summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_amba.c122
-rw-r--r--drivers/acpi/acpi_apd.c2
-rw-r--r--drivers/acpi/acpi_platform.c21
-rw-r--r--drivers/acpi/acpi_processor.c69
-rw-r--r--drivers/acpi/acpi_video.c7
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h6
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/dbcmds.c2
-rw-r--r--drivers/acpi/acpica/dbconvert.c5
-rw-r--r--drivers/acpi/acpica/dsmethod.c3
-rw-r--r--drivers/acpi/acpica/dsobject.c3
-rw-r--r--drivers/acpi/acpica/evgpeblk.c3
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c4
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/nseval.c3
-rw-r--r--drivers/acpi/acpica/nsinit.c137
-rw-r--r--drivers/acpi/acpica/tbinstal.c5
-rw-r--r--drivers/acpi/acpica/tbprint.c7
-rw-r--r--drivers/acpi/acpica/tbutils.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c40
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c246
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c3
-rw-r--r--drivers/acpi/acpica/utxfinit.c67
-rw-r--r--drivers/acpi/apei/apei-base.c6
-rw-r--r--drivers/acpi/apei/einj.c15
-rw-r--r--drivers/acpi/apei/erst.c3
-rw-r--r--drivers/acpi/apei/ghes.c23
-rw-r--r--drivers/acpi/bgrt.c10
-rw-r--r--drivers/acpi/bus.c29
-rw-r--r--drivers/acpi/cppc_acpi.c237
-rw-r--r--drivers/acpi/ec_sys.c3
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/internal.h13
-rw-r--r--drivers/acpi/nfit.c798
-rw-r--r--drivers/acpi/nfit.h30
-rw-r--r--drivers/acpi/osl.c158
-rw-r--r--drivers/acpi/pci_irq.c29
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c66
-rw-r--r--drivers/acpi/property.c1
-rw-r--r--drivers/acpi/resource.c14
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c36
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/acpi/utils.c6
-rw-r--r--drivers/android/binder.c31
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c108
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_mvebu.c14
-rw-r--r--drivers/ata/ahci_octeon.c105
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/ahci_xgene.c4
-rw-r--r--drivers/ata/libahci.c55
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_at91.c3
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_hpt366.c13
-rw-r--r--drivers/ata/pata_macio.c2
-rw-r--r--drivers/ata/sata_via.c133
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/base/bus.c15
-rw-r--r--drivers/base/component.c2
-rw-r--r--drivers/base/dd.c24
-rw-r--r--drivers/base/dma-coherent.c53
-rw-r--r--drivers/base/firmware_class.c87
-rw-r--r--drivers/base/memory.c34
-rw-r--r--drivers/base/power/clock_ops.c89
-rw-r--r--drivers/base/power/domain.c60
-rw-r--r--drivers/base/power/domain_governor.c64
-rw-r--r--drivers/base/power/opp/core.c1079
-rw-r--r--drivers/base/power/opp/cpu.c22
-rw-r--r--drivers/base/power/opp/debugfs.c85
-rw-r--r--drivers/base/power/opp/opp.h74
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/property.c37
-rw-r--r--drivers/base/regmap/internal.h16
-rw-r--r--drivers/base/regmap/regcache-flat.c20
-rw-r--r--drivers/base/regmap/regcache.c45
-rw-r--r--drivers/base/regmap/regmap-irq.c104
-rw-r--r--drivers/base/regmap/regmap-mmio.c259
-rw-r--r--drivers/base/regmap/regmap.c241
-rw-r--r--drivers/bcma/Kconfig5
-rw-r--r--drivers/bcma/Makefile1
-rw-r--r--drivers/bcma/bcma_private.h19
-rw-r--r--drivers/bcma/driver_chipcommon.c46
-rw-r--r--drivers/bcma/driver_chipcommon_pflash.c49
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c94
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c1
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/bcma/driver_mips.c66
-rw-r--r--drivers/bcma/host_pci.c2
-rw-r--r--drivers/bcma/main.c19
-rw-r--r--drivers/bcma/scan.c5
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/aoe/aoeblk.c2
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/cpqarray.c1820
-rw-r--r--drivers/block/cpqarray.h126
-rw-r--r--drivers/block/cryptoloop.c48
-rw-r--r--drivers/block/drbd/drbd_int.h20
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_nl.c61
-rw-r--r--drivers/block/drbd/drbd_receiver.c56
-rw-r--r--drivers/block/drbd/drbd_worker.c43
-rw-r--r--drivers/block/ida_cmd.h349
-rw-r--r--drivers/block/ida_ioctl.h87
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c270
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h11
-rw-r--r--drivers/block/nbd.c334
-rw-r--r--drivers/block/null_blk.c3
-rw-r--r--drivers/block/paride/pd.c4
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/rbd.c20
-rw-r--r--drivers/block/virtio_blk.c11
-rw-r--r--drivers/block/xen-blkback/xenbus.c20
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c9
-rw-r--r--drivers/bluetooth/btbcm.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c2
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_ag6xx.c337
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/arm-cci.c621
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/bus/mvebu-mbus.c52
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/uniphier-system-bus.c2
-rw-r--r--drivers/char/Kconfig3
-rw-r--r--drivers/char/agp/frontend.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c1
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c12
-rw-r--r--drivers/char/hw_random/exynos-rng.c10
-rw-r--r--drivers/char/hw_random/n2-drv.c10
-rw-r--r--drivers/char/hw_random/pic32-rng.c155
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c11
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c13
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/nvram.c12
-rw-r--r--drivers/char/nwbutton.c5
-rw-r--r--drivers/char/pcmcia/synclink_cs.c21
-rw-r--r--drivers/char/ppdev.c376
-rw-r--r--drivers/char/raw.c4
-rw-r--r--drivers/char/tpm/tpm-chip.c19
-rw-r--r--drivers/char/tpm/tpm.h7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c22
-rw-r--r--drivers/char/tpm/tpm_crb.c196
-rw-r--r--drivers/char/tpm/tpm_eventlog.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c253
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clk/Kconfig30
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/at91/clk-generated.c99
-rw-r--r--drivers/clk/at91/clk-h32mx.c40
-rw-r--r--drivers/clk/at91/clk-main.c330
-rw-r--r--drivers/clk/at91/clk-master.c98
-rw-r--r--drivers/clk/at91/clk-peripheral.c137
-rw-r--r--drivers/clk/at91/clk-pll.c150
-rw-r--r--drivers/clk/at91/clk-plldiv.c44
-rw-r--r--drivers/clk/at91/clk-programmable.c100
-rw-r--r--drivers/clk/at91/clk-slow.c43
-rw-r--r--drivers/clk/at91/clk-smd.c60
-rw-r--r--drivers/clk/at91/clk-system.c96
-rw-r--r--drivers/clk/at91/clk-usb.c127
-rw-r--r--drivers/clk/at91/clk-utmi.c80
-rw-r--r--drivers/clk/at91/pmc.c426
-rw-r--r--drivers/clk/at91/pmc.h98
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c4
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c47
-rw-r--r--drivers/clk/bcm/clk-cygnus.c59
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c41
-rw-r--r--drivers/clk/bcm/clk-iproc.h43
-rw-r--r--drivers/clk/clk-axi-clkgen.c170
-rw-r--r--drivers/clk/clk-composite.c2
-rw-r--r--drivers/clk/clk-divider.c18
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/clk-fixed-factor.c15
-rw-r--r--drivers/clk/clk-fixed-rate.c18
-rw-r--r--drivers/clk/clk-fractional-divider.c2
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-gpio.c163
-rw-r--r--drivers/clk/clk-max77686.c3
-rw-r--r--drivers/clk/clk-max77802.c2
-rw-r--r--drivers/clk/clk-mb86s7x.c4
-rw-r--r--drivers/clk/clk-multiplier.c2
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-palmas.c16
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-s2mps11.c111
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk-si514.c2
-rw-r--r--drivers/clk/clk-si5351.c2
-rw-r--r--drivers/clk/clk-si570.c2
-rw-r--r--drivers/clk/clk-vt8500.c100
-rw-r--r--drivers/clk/clk-xgene.c109
-rw-r--r--drivers/clk/clk.c221
-rw-r--r--drivers/clk/clkdev.c31
-rw-r--r--drivers/clk/h8300/clk-div.c6
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c26
-rw-r--r--drivers/clk/hisilicon/clk-hip04.c6
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c60
-rw-r--r--drivers/clk/imx/clk-busy.c4
-rw-r--r--drivers/clk/imx/clk-fixup-div.c5
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-gate-exclusive.c2
-rw-r--r--drivers/clk/imx/clk-imx6q.c132
-rw-r--r--drivers/clk/imx/clk-imx6ul.c65
-rw-r--r--drivers/clk/imx/clk.h2
-rw-r--r--drivers/clk/mediatek/clk-gate.c8
-rw-r--r--drivers/clk/mediatek/clk-gate.h2
-rw-r--r--drivers/clk/mediatek/clk-mtk.c10
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/clkc.c2
-rw-r--r--drivers/clk/mmp/reset.c2
-rw-r--r--drivers/clk/mvebu/Kconfig2
-rw-r--r--drivers/clk/mvebu/common.c13
-rw-r--r--drivers/clk/mvebu/dove-divider.c3
-rw-r--r--drivers/clk/mvebu/kirkwood.c2
-rw-r--r--drivers/clk/mxs/clk-div.c2
-rw-r--r--drivers/clk/mxs/clk.h2
-rw-r--r--drivers/clk/nxp/Makefile1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c4
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c2
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-creg.c226
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c15
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c20
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c40
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c11
-rw-r--r--drivers/clk/qcom/Kconfig8
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-rcg.c4
-rw-r--r--drivers/clk/qcom/common.c30
-rw-r--r--drivers/clk/qcom/gcc-ipq4019.c1354
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c37
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c18
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c46
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c98
-rw-r--r--drivers/clk/qcom/gdsc.c89
-rw-r--r--drivers/clk/qcom/gdsc.h34
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c35
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c15
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c157
-rw-r--r--drivers/clk/qcom/reset.c2
-rw-r--r--drivers/clk/qcom/reset.h2
-rw-r--r--drivers/clk/renesas/Makefile (renamed from drivers/clk/shmobile/Makefile)0
-rw-r--r--drivers/clk/renesas/clk-div6.c (renamed from drivers/clk/shmobile/clk-div6.c)3
-rw-r--r--drivers/clk/renesas/clk-div6.h (renamed from drivers/clk/shmobile/clk-div6.h)4
-rw-r--r--drivers/clk/renesas/clk-emev2.c (renamed from drivers/clk/shmobile/clk-emev2.c)0
-rw-r--r--drivers/clk/renesas/clk-mstp.c (renamed from drivers/clk/shmobile/clk-mstp.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c (renamed from drivers/clk/shmobile/clk-r8a73a4.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c (renamed from drivers/clk/shmobile/clk-r8a7740.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7778.c (renamed from drivers/clk/shmobile/clk-r8a7778.c)2
-rw-r--r--drivers/clk/renesas/clk-r8a7779.c (renamed from drivers/clk/shmobile/clk-r8a7779.c)2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c (renamed from drivers/clk/shmobile/clk-rcar-gen2.c)2
-rw-r--r--drivers/clk/renesas/clk-rz.c (renamed from drivers/clk/shmobile/clk-rz.c)2
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c (renamed from drivers/clk/shmobile/clk-sh73a0.c)2
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c (renamed from drivers/clk/shmobile/r8a7795-cpg-mssr.c)255
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c (renamed from drivers/clk/shmobile/renesas-cpg-mssr.c)7
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h (renamed from drivers/clk/shmobile/renesas-cpg-mssr.h)2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c8
-rw-r--r--drivers/clk/rockchip/clk-inverter.c8
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c8
-rw-r--r--drivers/clk/rockchip/clk-pll.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c37
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c56
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c38
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c175
-rw-r--r--drivers/clk/rockchip/clk.c68
-rw-r--r--drivers/clk/rockchip/clk.h28
-rw-r--r--drivers/clk/rockchip/softrst.c2
-rw-r--r--drivers/clk/samsung/Kconfig19
-rw-r--r--drivers/clk/samsung/Makefile4
-rw-r--r--drivers/clk/samsung/clk-exynos4.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c10
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c36
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c12
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c208
-rw-r--r--drivers/clk/samsung/clk-exynos5440.c12
-rw-r--r--drivers/clk/samsung/clk-exynos7.c12
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c2
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c4
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c8
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c8
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c10
-rw-r--r--drivers/clk/sirf/clk-atlas7.c2
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c6
-rw-r--r--drivers/clk/socfpga/clk-periph-a10.c7
-rw-r--r--drivers/clk/socfpga/clk-pll-a10.c2
-rw-r--r--drivers/clk/spear/spear1310_clock.c21
-rw-r--r--drivers/clk/spear/spear1340_clock.c16
-rw-r--r--drivers/clk/spear/spear3xx_clock.c8
-rw-r--r--drivers/clk/spear/spear6xx_clock.c6
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c8
-rw-r--r--drivers/clk/st/clkgen-mux.c13
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c3
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c2
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c2
-rw-r--r--drivers/clk/sunxi/clk-factors.c123
-rw-r--r--drivers/clk/sunxi/clk-factors.h26
-rw-r--r--drivers/clk/sunxi/clk-mod0.c23
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c2
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c235
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c2
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c3
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c132
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c86
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c863
-rw-r--r--drivers/clk/sunxi/clk-usb.c26
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c6
-rw-r--r--drivers/clk/tegra/clk-tegra114.c3
-rw-r--r--drivers/clk/tegra/clk-tegra20.c10
-rw-r--r--drivers/clk/tegra/clk.c2
-rw-r--r--drivers/clk/ti/Kconfig6
-rw-r--r--drivers/clk/ti/Makefile6
-rw-r--r--drivers/clk/ti/adpll.c983
-rw-r--r--drivers/clk/ti/apll.c20
-rw-r--r--drivers/clk/ti/clk-814x.c53
-rw-r--r--drivers/clk/ti/clk.c4
-rw-r--r--drivers/clk/ti/clkt_dpll.c6
-rw-r--r--drivers/clk/ti/clockdomain.c2
-rw-r--r--drivers/clk/ti/composite.c10
-rw-r--r--drivers/clk/ti/divider.c2
-rw-r--r--drivers/clk/ti/dpll.c27
-rw-r--r--drivers/clk/ti/dpll3xxx.c17
-rw-r--r--drivers/clk/ti/dpll44xx.c8
-rw-r--r--drivers/clk/ti/gate.c2
-rw-r--r--drivers/clk/ti/mux.c6
-rw-r--r--drivers/clk/ux500/abx500-clk.c5
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c74
-rw-r--r--drivers/clk/ux500/u8540_clk.c80
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clk/versatile/clk-impd1.c3
-rw-r--r--drivers/clk/versatile/clk-realview.c8
-rw-r--r--drivers/clk/versatile/clk-sp810.c4
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clk/x86/clk-lpt.c4
-rw-r--r--drivers/clk/zynq/clkc.c3
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/arm_arch_timer.c136
-rw-r--r--drivers/clocksource/arm_global_timer.c18
-rw-r--r--drivers/clocksource/exynos_mct.c2
-rw-r--r--drivers/clocksource/rockchip_timer.c21
-rw-r--r--drivers/clocksource/time-lpc32xx.c66
-rw-r--r--drivers/clocksource/time-pistachio.c4
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c218
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c303
-rw-r--r--drivers/cpufreq/cpufreq.c414
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c282
-rw-r--r--drivers/cpufreq/cpufreq_governor.c766
-rw-r--r--drivers/cpufreq/cpufreq_governor.h261
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c445
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h30
-rw-r--r--drivers/cpufreq/cpufreq_performance.c18
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c10
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c453
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c222
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpuidle/governors/menu.c97
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/atmel-aes.c10
-rw-r--r--drivers/crypto/atmel-sha-regs.h4
-rw-r--r--drivers/crypto/atmel-sha.c200
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/ccp/Makefile2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c39
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c12
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c52
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h22
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c533
-rw-r--r--drivers/crypto/ccp/ccp-dev.c471
-rw-r--r--drivers/crypto/ccp/ccp-dev.h155
-rw-r--r--drivers/crypto/ccp/ccp-ops.c381
-rw-r--r--drivers/crypto/ccp/ccp-pci.c23
-rw-r--r--drivers/crypto/ccp/ccp-platform.c48
-rw-r--r--drivers/crypto/ixp4xx_crypto.c26
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/crypto/marvell/cesa.h3
-rw-r--r--drivers/crypto/marvell/hash.c106
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/omap-aes.c97
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h4
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_user.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_hw_arbiter.c19
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_uclo.h42
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c70
-rw-r--r--drivers/crypto/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/rockchip/Makefile1
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c28
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h56
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c20
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c404
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sahara.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c5
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c4
-rw-r--r--drivers/devfreq/Kconfig2
-rw-r--r--drivers/dma-buf/dma-buf.c19
-rw-r--r--drivers/dma/Kconfig16
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dw/core.c34
-rw-r--r--drivers/dma/dw/regs.h2
-rw-r--r--drivers/dma/edma.c126
-rw-r--r--drivers/dma/ep93xx_dma.c28
-rw-r--r--drivers/dma/hsu/hsu.c13
-rw-r--r--drivers/dma/hsu/hsu.h3
-rw-r--r--drivers/dma/idma64.c3
-rw-r--r--drivers/dma/idma64.h4
-rw-r--r--drivers/dma/ioat/dma.c268
-rw-r--r--drivers/dma/ioat/dma.h23
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c49
-rw-r--r--drivers/dma/ioat/prep.c2
-rw-r--r--drivers/dma/iop-adma.c8
-rw-r--r--drivers/dma/mic_x100_dma.c2
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/omap-dma.c34
-rw-r--r--drivers/dma/pl330.c101
-rw-r--r--drivers/dma/pxa_dma.c39
-rw-r--r--drivers/dma/qcom/Kconfig29
-rw-r--r--drivers/dma/qcom/Makefile3
-rw-r--r--drivers/dma/qcom/bam_dma.c (renamed from drivers/dma/qcom_bam_dma.c)37
-rw-r--r--drivers/dma/qcom/hidma.c706
-rw-r--r--drivers/dma/qcom/hidma.h160
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c302
-rw-r--r--drivers/dma/qcom/hidma_mgmt.h39
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c295
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/rcar-dmac.c2
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sirf-dma.c10
-rw-r--r--drivers/dma/sun4i-dma.c1
-rw-r--r--drivers/dma/tegra20-apb-dma.c47
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c206
-rw-r--r--drivers/edac/Kconfig26
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/altera_edac.c492
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/edac/debugfs.c2
-rw-r--r--drivers/edac/edac_mc.c64
-rw-r--r--drivers/edac/edac_pci.c67
-rw-r--r--drivers/edac/mce_amd.c335
-rw-r--r--drivers/edac/mpc85xx_edac.c2
-rw-r--r--drivers/edac/sb_edac.c26
-rw-r--r--drivers/edac/xgene_edac.c70
-rw-r--r--drivers/extcon/extcon-arizona.c4
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-max14577.c3
-rw-r--r--drivers/extcon/extcon-max77693.c12
-rw-r--r--drivers/extcon/extcon-max77843.c5
-rw-r--r--drivers/extcon/extcon-max8997.c3
-rw-r--r--drivers/extcon/extcon-palmas.c53
-rw-r--r--drivers/extcon/extcon-rt8973a.c8
-rw-r--r--drivers/extcon/extcon-sm5502.c8
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c18
-rw-r--r--drivers/firewire/ohci.c5
-rw-r--r--drivers/firmware/Kconfig20
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scpi.c17
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c5
-rw-r--r--drivers/firmware/efi/arm-init.c32
-rw-r--r--drivers/firmware/efi/efi.c41
-rw-r--r--drivers/firmware/efi/efivars.c4
-rw-r--r--drivers/firmware/efi/esrt.c5
-rw-r--r--drivers/firmware/efi/libstub/Makefile6
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c44
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c17
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c112
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c7
-rw-r--r--drivers/firmware/efi/libstub/efistub.h19
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/firmware/efi/libstub/random.c135
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c113
-rw-r--r--drivers/firmware/efi/vars.c16
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/firmware/psci.c120
-rw-r--r--drivers/firmware/qemu_fw_cfg.c773
-rw-r--r--drivers/gpio/Kconfig91
-rw-r--r--drivers/gpio/Makefile10
-rw-r--r--drivers/gpio/devres.c2
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c430
-rw-r--r--drivers/gpio/gpio-104-idi-48.c30
-rw-r--r--drivers/gpio/gpio-104-idio-16.c27
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c11
-rw-r--r--drivers/gpio/gpio-adnp.c11
-rw-r--r--drivers/gpio/gpio-adp5520.c13
-rw-r--r--drivers/gpio/gpio-adp5588.c4
-rw-r--r--drivers/gpio/gpio-amd8111.c7
-rw-r--r--drivers/gpio/gpio-arizona.c12
-rw-r--r--drivers/gpio/gpio-ath79.c264
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c13
-rw-r--r--drivers/gpio/gpio-clps711x.c11
-rw-r--r--drivers/gpio/gpio-crystalcove.c9
-rw-r--r--drivers/gpio/gpio-cs5535.c20
-rw-r--r--drivers/gpio/gpio-da9052.c11
-rw-r--r--drivers/gpio/gpio-da9055.c16
-rw-r--r--drivers/gpio/gpio-davinci.c5
-rw-r--r--drivers/gpio/gpio-dln2.c16
-rw-r--r--drivers/gpio/gpio-ep93xx.c2
-rw-r--r--drivers/gpio/gpio-f7188x.c53
-rw-r--r--drivers/gpio/gpio-ge.c2
-rw-r--r--drivers/gpio/gpio-generic.c11
-rw-r--r--drivers/gpio/gpio-ich.c51
-rw-r--r--drivers/gpio/gpio-iop.c2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c12
-rw-r--r--drivers/gpio/gpio-kempld.c11
-rw-r--r--drivers/gpio/gpio-ks8695.c12
-rw-r--r--drivers/gpio/gpio-lp3943.c12
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c4
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c13
-rw-r--r--drivers/gpio/gpio-mcp23s08.c25
-rw-r--r--drivers/gpio/gpio-menz127.c199
-rw-r--r--drivers/gpio/gpio-moxart.c5
-rw-r--r--drivers/gpio/gpio-mpc5200.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c255
-rw-r--r--drivers/gpio/gpio-mvebu.c11
-rw-r--r--drivers/gpio/gpio-mxc.c6
-rw-r--r--drivers/gpio/gpio-octeon.c10
-rw-r--r--drivers/gpio/gpio-omap.c57
-rw-r--r--drivers/gpio/gpio-palmas.c12
-rw-r--r--drivers/gpio/gpio-pca953x.c13
-rw-r--r--drivers/gpio/gpio-pcf857x.c10
-rw-r--r--drivers/gpio/gpio-pisosr.c183
-rw-r--r--drivers/gpio/gpio-pxa.c4
-rw-r--r--drivers/gpio/gpio-rc5t583.c12
-rw-r--r--drivers/gpio/gpio-rdc321x.c13
-rw-r--r--drivers/gpio/gpio-sch.c11
-rw-r--r--drivers/gpio/gpio-sch311x.c8
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c2
-rw-r--r--drivers/gpio/gpio-sx150x.c18
-rw-r--r--drivers/gpio/gpio-syscon.c11
-rw-r--r--drivers/gpio/gpio-tb10x.c22
-rw-r--r--drivers/gpio/gpio-tc3589x.c13
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c25
-rw-r--r--drivers/gpio/gpio-tpic2810.c170
-rw-r--r--drivers/gpio/gpio-tps65086.c139
-rw-r--r--drivers/gpio/gpio-tps65218.c222
-rw-r--r--drivers/gpio/gpio-tps6586x.c12
-rw-r--r--drivers/gpio/gpio-tps65910.c12
-rw-r--r--drivers/gpio/gpio-tps65912.c174
-rw-r--r--drivers/gpio/gpio-ts4800.c81
-rw-r--r--drivers/gpio/gpio-ts5500.c9
-rw-r--r--drivers/gpio/gpio-twl6040.c9
-rw-r--r--drivers/gpio/gpio-ucb1400.c3
-rw-r--r--drivers/gpio/gpio-viperboard.c24
-rw-r--r--drivers/gpio/gpio-vx855.c12
-rw-r--r--drivers/gpio/gpio-wm831x.c12
-rw-r--r--drivers/gpio/gpio-wm8350.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c17
-rw-r--r--drivers/gpio/gpio-ws16c48.c427
-rw-r--r--drivers/gpio/gpio-xgene-sb.c266
-rw-r--r--drivers/gpio/gpio-xgene.c16
-rw-r--r--drivers/gpio/gpiolib-acpi.c18
-rw-r--r--drivers/gpio/gpiolib-sysfs.c51
-rw-r--r--drivers/gpio/gpiolib.c883
-rw-r--r--drivers/gpio/gpiolib.h79
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c402
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h6
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile14
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_endian.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h21
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h18
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c23
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c14
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c8
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c13
-rw-r--r--drivers/gpu/drm/drm_atomic.c87
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c376
-rw-r--r--drivers/gpu/drm/drm_bridge.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c39
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c33
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c27
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c9
-rw-r--r--drivers/gpu/drm/drm_edid.c27
-rw-r--r--drivers/gpu/drm/drm_edid_load.c17
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c13
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c127
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c13
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c7
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h3
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c13
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c123
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c13
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c81
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h2
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c41
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c42
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c8
-rw-r--r--drivers/gpu/drm/radeon/cik.c1697
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c4
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c170
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c19
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/rv740_dpm.c16
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c46
-rw-r--r--drivers/gpu/drm/radeon/si.c969
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c104
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c24
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c79
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c78
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c204
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c141
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c78
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c479
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c108
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c400
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h31
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c451
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c146
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h4
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c63
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h17
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c295
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c125
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h4
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c200
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h5
-rw-r--r--drivers/gpu/drm/tegra/gem.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c6
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h37
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c58
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c163
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c179
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c453
-rw-r--r--drivers/gpu/host1x/bus.c4
-rw-r--r--drivers/gpu/host1x/cdma.c8
-rw-r--r--drivers/gpu/host1x/job.c12
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c79
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c8
-rw-r--r--drivers/hid/Kconfig6
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-cmedia.c168
-rw-r--r--drivers/hid/hid-core.c107
-rw-r--r--drivers/hid/hid-corsair.c3
-rw-r--r--drivers/hid/hid-dr.c4
-rw-r--r--drivers/hid/hid-ids.h15
-rw-r--r--drivers/hid/hid-lenovo.c16
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c701
-rw-r--r--drivers/hid/hid-microsoft.c8
-rw-r--r--drivers/hid/hid-multitouch.c31
-rw-r--r--drivers/hid/hid-penmount.c8
-rw-r--r--drivers/hid/hid-rmi.c11
-rw-r--r--drivers/hid/hid-sony.c182
-rw-r--r--drivers/hid/hid-thingm.c135
-rw-r--r--drivers/hid/hid-wiimote-modules.c14
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c60
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/hid-core.c73
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c465
-rw-r--r--drivers/hid/wacom_wac.c260
-rw-r--r--drivers/hid/wacom_wac.h8
-rw-r--r--drivers/hsi/clients/nokia-modem.c2
-rw-r--r--drivers/hsi/clients/ssi_protocol.c16
-rw-r--r--drivers/hv/channel.c36
-rw-r--r--drivers/hv/channel_mgmt.c262
-rw-r--r--drivers/hv/connection.c20
-rw-r--r--drivers/hv/hv.c36
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hv_snapshot.c2
-rw-r--r--drivers/hv/hv_util.c1
-rw-r--r--drivers/hv/hv_utils_transport.c3
-rw-r--r--drivers/hv/hyperv_vmbus.h42
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/hv/vmbus_drv.c117
-rw-r--r--drivers/hwmon/Kconfig31
-rw-r--r--drivers/hwmon/Makefile4
-rw-r--r--drivers/hwmon/iio_hwmon.c11
-rw-r--r--drivers/hwmon/ltc2990.c161
-rw-r--r--drivers/hwmon/max1111.c6
-rw-r--r--drivers/hwmon/nsa320-hwmon.c215
-rw-r--r--drivers/hwmon/ntc_thermistor.c44
-rw-r--r--drivers/hwmon/pmbus/Kconfig4
-rw-r--r--drivers/hwmon/pmbus/adm1275.c56
-rw-r--r--drivers/hwmon/scpi-hwmon.c14
-rw-r--r--drivers/hwmon/vexpress-hwmon.c (renamed from drivers/hwmon/vexpress.c)0
-rw-r--r--drivers/hwtracing/coresight/Kconfig1
-rw-r--r--drivers/hwtracing/coresight/Makefile4
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c293
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c393
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h32
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h142
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c1272
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c1687
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h15
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c25
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c35
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c23
-rw-r--r--drivers/hwtracing/coresight/coresight.c388
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c3
-rw-r--r--drivers/hwtracing/intel_th/Kconfig1
-rw-r--r--drivers/hwtracing/intel_th/core.c30
-rw-r--r--drivers/hwtracing/intel_th/gth.c32
-rw-r--r--drivers/hwtracing/intel_th/gth.h3
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h41
-rw-r--r--drivers/hwtracing/intel_th/msu.c9
-rw-r--r--drivers/hwtracing/intel_th/pci.c12
-rw-r--r--drivers/hwtracing/intel_th/sth.c11
-rw-r--r--drivers/hwtracing/stm/Kconfig16
-rw-r--r--drivers/hwtracing/stm/Makefile2
-rw-r--r--drivers/hwtracing/stm/core.c175
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c71
-rw-r--r--drivers/hwtracing/stm/heartbeat.c130
-rw-r--r--drivers/hwtracing/stm/policy.c25
-rw-r--r--drivers/hwtracing/stm/stm.h2
-rw-r--r--drivers/i2c/busses/Kconfig10
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c177
-rw-r--r--drivers/i2c/busses/i2c-cadence.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c22
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c12
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c7
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c42
-rw-r--r--drivers/i2c/busses/i2c-octeon.c307
-rw-r--r--drivers/i2c/busses/i2c-piix4.c48
-rw-r--r--drivers/i2c/busses/i2c-qup.c977
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c88
-rw-r--r--drivers/i2c/i2c-boardinfo.c6
-rw-r--r--drivers/i2c/i2c-core.c49
-rw-r--r--drivers/i2c/i2c-dev.c14
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/i2c/i2c-smbus.c8
-rw-r--r--drivers/i2c/i2c-stub.c8
-rw-r--r--drivers/i2c/muxes/Kconfig9
-rw-r--r--drivers/i2c/muxes/Makefile2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c291
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/icside.c2
-rw-r--r--drivers/ide/palm_bk3710.c2
-rw-r--r--drivers/ide/pdc202xx_new.c1
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/idle/intel_idle.c230
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c7
-rw-r--r--drivers/iio/accel/mma8452.c206
-rw-r--r--drivers/iio/accel/st_accel_core.c16
-rw-r--r--drivers/iio/adc/Kconfig61
-rw-r--r--drivers/iio/adc/Makefile5
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c508
-rw-r--r--drivers/iio/adc/axp288_adc.c2
-rw-r--r--drivers/iio/adc/fsl-imx25-gcq.c417
-rw-r--r--drivers/iio/adc/ina2xx-adc.c155
-rw-r--r--drivers/iio/adc/max1363.c12
-rw-r--r--drivers/iio/adc/mcp320x.c31
-rw-r--r--drivers/iio/adc/mcp3422.c9
-rw-r--r--drivers/iio/adc/mxs-lradc.c (renamed from drivers/staging/iio/adc/mxs-lradc.c)13
-rw-r--r--drivers/iio/adc/palmas_gpadc.c6
-rw-r--r--drivers/iio/adc/ti-adc081c.c2
-rw-r--r--drivers/iio/adc/ti-adc0832.c288
-rw-r--r--drivers/iio/adc/ti-ads1015.c612
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c7
-rw-r--r--drivers/iio/chemical/Kconfig14
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c509
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c6
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.h8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c52
-rw-r--r--drivers/iio/dac/Kconfig39
-rw-r--r--drivers/iio/dac/Makefile3
-rw-r--r--drivers/iio/dac/ad5064.c391
-rw-r--r--drivers/iio/dac/ad5761.c430
-rw-r--r--drivers/iio/dac/mcp4725.c87
-rw-r--r--drivers/iio/dac/stx104.c152
-rw-r--r--drivers/iio/dac/vf610_dac.c298
-rw-r--r--drivers/iio/gyro/bmg160_core.c9
-rw-r--r--drivers/iio/gyro/st_gyro_core.c15
-rw-r--r--drivers/iio/health/Kconfig32
-rw-r--r--drivers/iio/health/Makefile2
-rw-r--r--drivers/iio/health/afe4403.c708
-rw-r--r--drivers/iio/health/afe4404.c679
-rw-r--r--drivers/iio/health/afe440x.h191
-rw-r--r--drivers/iio/health/max30100.c84
-rw-r--r--drivers/iio/humidity/Kconfig6
-rw-r--r--drivers/iio/humidity/dht11.c77
-rw-r--r--drivers/iio/humidity/hdc100x.c2
-rw-r--r--drivers/iio/humidity/htu21.c2
-rw-r--r--drivers/iio/humidity/si7005.c3
-rw-r--r--drivers/iio/humidity/si7020.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig23
-rw-r--r--drivers/iio/imu/inv_mpu6050/Makefile8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c24
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c458
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c208
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h37
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c54
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c98
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c32
-rw-r--r--drivers/iio/industrialio-buffer.c60
-rw-r--r--drivers/iio/industrialio-core.c1
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/light/bh1750.c2
-rw-r--r--drivers/iio/light/jsa1212.c2
-rw-r--r--drivers/iio/light/opt3001.c156
-rw-r--r--drivers/iio/magnetometer/Kconfig33
-rw-r--r--drivers/iio/magnetometer/Makefile4
-rw-r--r--drivers/iio/magnetometer/ak8975.c4
-rw-r--r--drivers/iio/magnetometer/hmc5843.h (renamed from drivers/staging/iio/magnetometer/hmc5843.h)5
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c (renamed from drivers/staging/iio/magnetometer/hmc5843_core.c)138
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c (renamed from drivers/staging/iio/magnetometer/hmc5843_i2c.c)3
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c (renamed from drivers/staging/iio/magnetometer/hmc5843_spi.c)3
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/potentiometer/Kconfig12
-rw-r--r--drivers/iio/potentiometer/Makefile1
-rw-r--r--drivers/iio/potentiometer/mcp4531.c2
-rw-r--r--drivers/iio/potentiometer/tpl0102.c166
-rw-r--r--drivers/iio/pressure/Kconfig43
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/mpl115.c66
-rw-r--r--drivers/iio/pressure/mpl115.h24
-rw-r--r--drivers/iio/pressure/mpl115_i2c.c67
-rw-r--r--drivers/iio/pressure/mpl115_spi.c106
-rw-r--r--drivers/iio/pressure/ms5611.h4
-rw-r--r--drivers/iio/pressure/ms5611_core.c122
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c11
-rw-r--r--drivers/iio/pressure/ms5611_spi.c12
-rw-r--r--drivers/iio/pressure/ms5637.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c8
-rw-r--r--drivers/iio/pressure/t5403.c2
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c2
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp006.c2
-rw-r--r--drivers/iio/temperature/tsys01.c2
-rw-r--r--drivers/iio/temperature/tsys02d.c2
-rw-r--r--drivers/infiniband/Kconfig3
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cache.c15
-rw-r--r--drivers/infiniband/core/cma.c22
-rw-r--r--drivers/infiniband/core/cma_configfs.c31
-rw-r--r--drivers/infiniband/core/device.c43
-rw-r--r--drivers/infiniband/core/fmr_pool.c37
-rw-r--r--drivers/infiniband/core/iwcm.c190
-rw-r--r--drivers/infiniband/core/iwpm_msg.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.c14
-rw-r--r--drivers/infiniband/core/iwpm_util.h2
-rw-r--r--drivers/infiniband/core/packer.c14
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c8
-rw-r--r--drivers/infiniband/core/ucma.c6
-rw-r--r--drivers/infiniband/core/ud_header.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/umem_odp.c8
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c33
-rw-r--r--drivers/infiniband/core/uverbs_main.c80
-rw-r--r--drivers/infiniband/core/verbs.c206
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c16
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c274
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c72
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h49
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c107
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h99
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig7
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h570
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c4137
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h456
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c4743
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1713
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.c821
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hmc.h241
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c730
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c1910
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h215
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h106
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c618
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.h131
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c1436
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h183
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_register.h1030
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h100
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h1312
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ucontext.h107
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c1204
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h442
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c1270
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2437
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h173
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.c85
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.h62
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c748
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.h124
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c79
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c104
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c548
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c194
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c166
-rw-r--r--drivers/infiniband/hw/mlx5/main.c132
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h118
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c601
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c271
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c3
-rw-r--r--drivers/infiniband/hw/nes/Kconfig1
-rw-r--r--drivers/infiniband/hw/nes/nes.c25
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c361
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c44
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h7
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c7
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c77
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c33
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c38
-rw-r--r--drivers/infiniband/hw/qib/Kconfig2
-rw-r--r--drivers/infiniband/hw/qib/Makefile10
-rw-r--r--drivers/infiniband/hw/qib/qib.h33
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c545
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c71
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c186
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c338
-rw-r--r--drivers/infiniband/hw/qib/qib_mmap.c174
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c490
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c1178
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c409
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c191
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_srq.c380
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c85
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c79
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c142
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1223
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h812
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs_mcast.c363
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c2
-rw-r--r--drivers/infiniband/sw/Makefile1
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig6
-rw-r--r--drivers/infiniband/sw/rdmavt/Makefile13
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c196
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h59
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c (renamed from drivers/staging/rdma/hfi1/cq.c)325
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h64
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c184
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.h53
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.c171
-rw-r--r--drivers/infiniband/sw/rdmavt/mad.h60
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c (renamed from drivers/staging/rdma/hfi1/verbs_mcast.c)262
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.h58
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c (renamed from drivers/staging/rdma/hfi1/mmap.c)142
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h63
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c830
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h92
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c119
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h58
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c1696
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h69
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c (renamed from drivers/staging/rdma/hfi1/srq.c)204
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h62
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.c49
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h187
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c873
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h104
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c45
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c65
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h7
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c7
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c819
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h74
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c40
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c935
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h31
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/Makefile2
-rw-r--r--drivers/input/input-compat.c6
-rw-r--r--drivers/input/input-compat.h14
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/goldfish_events.c17
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c8
-rw-r--r--drivers/input/keyboard/spear-keyboard.c6
-rw-r--r--drivers/input/misc/ati_remote2.c36
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/powermate.c3
-rw-r--r--drivers/input/misc/rotary_encoder.c403
-rw-r--r--drivers/input/misc/uinput.c4
-rw-r--r--drivers/input/mouse/Kconfig10
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/byd.c508
-rw-r--r--drivers/input/mouse/byd.h18
-rw-r--r--drivers/input/mouse/cyapa.c22
-rw-r--r--drivers/input/mouse/cyapa.h14
-rw-r--r--drivers/input/mouse/cyapa_gen3.c108
-rw-r--r--drivers/input/mouse/cyapa_gen5.c99
-rw-r--r--drivers/input/mouse/cyapa_gen6.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c14
-rw-r--r--drivers/input/mouse/psmouse.h1
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/rmi4/Kconfig63
-rw-r--r--drivers/input/rmi4/Makefile13
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c329
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h87
-rw-r--r--drivers/input/rmi4/rmi_bus.c419
-rw-r--r--drivers/input/rmi4/rmi_bus.h182
-rw-r--r--drivers/input/rmi4/rmi_driver.c1054
-rw-r--r--drivers/input/rmi4/rmi_driver.h105
-rw-r--r--drivers/input/rmi4/rmi_f01.c624
-rw-r--r--drivers/input/rmi4/rmi_f11.c1317
-rw-r--r--drivers/input/rmi4/rmi_f12.c457
-rw-r--r--drivers/input/rmi4/rmi_f30.c407
-rw-r--r--drivers/input/rmi4/rmi_i2c.c397
-rw-r--r--drivers/input/rmi4/rmi_spi.c589
-rw-r--r--drivers/input/touchscreen/Kconfig26
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c10
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c10
-rw-r--r--drivers/input/touchscreen/ad7879.c160
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c194
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h10
-rw-r--r--drivers/input/touchscreen/cyttsp_i2c.c10
-rw-r--r--drivers/input/touchscreen/cyttsp_spi.c10
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c596
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c1543
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c31
-rw-r--r--drivers/input/touchscreen/sur40.c21
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/iommu/Kconfig42
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu_v2.c1
-rw-r--r--drivers/iommu/arm-smmu-v3.c50
-rw-r--r--drivers/iommu/arm-smmu.c79
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c608
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c846
-rw-r--r--drivers/iommu/io-pgtable-arm.c34
-rw-r--r--drivers/iommu/io-pgtable.c5
-rw-r--r--drivers/iommu/io-pgtable.h53
-rw-r--r--drivers/iommu/iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c736
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/rockchip-iommu.c214
-rw-r--r--drivers/irqchip/Kconfig42
-rw-r--r--drivers/irqchip/Makefile8
-rw-r--r--drivers/irqchip/irq-alpine-msi.c293
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c156
-rw-r--r--drivers/irqchip/irq-ath79-cpu.c97
-rw-r--r--drivers/irqchip/irq-ath79-misc.c189
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h7
-rw-r--r--drivers/irqchip/irq-atmel-aic.c9
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c364
-rw-r--r--drivers/irqchip/irq-gic-realview.c44
-rw-r--r--drivers/irqchip/irq-gic-v2m.c14
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c349
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c38
-rw-r--r--drivers/irqchip/irq-mips-gic.c354
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c236
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c4
-rw-r--r--drivers/irqchip/irq-tango.c232
-rw-r--r--drivers/irqchip/irq-tegra.c14
-rw-r--r--drivers/irqchip/irq-ts4800.c2
-rw-r--r--drivers/isdn/Makefile3
-rw-r--r--drivers/isdn/hardware/eicon/debug.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c30
-rw-r--r--drivers/isdn/hardware/mISDN/ipac.h41
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c2
-rw-r--r--drivers/isdn/hisax/isac.c15
-rw-r--r--drivers/isdn/i4l/Kconfig10
-rw-r--r--drivers/isdn/i4l/isdn_tty.c12
-rw-r--r--drivers/isdn/mISDN/clock.c69
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/dell-led.c126
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c45
-rw-r--r--drivers/leds/led-triggers.c13
-rw-r--r--drivers/leds/leds-88pm860x.c12
-rw-r--r--drivers/leds/leds-da903x.c12
-rw-r--r--drivers/leds/leds-gpio.c6
-rw-r--r--drivers/leds/leds-is31fl32xx.c508
-rw-r--r--drivers/leds/leds-lm3533.c12
-rw-r--r--drivers/leds/leds-lp3944.c7
-rw-r--r--drivers/leds/leds-lp8788.c14
-rw-r--r--drivers/leds/leds-max8997.c14
-rw-r--r--drivers/leds/leds-s3c24xx.c19
-rw-r--r--drivers/leds/leds-wm831x-status.c13
-rw-r--r--drivers/lguest/interrupts_and_traps.c6
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c6
-rw-r--r--drivers/lightnvm/core.c26
-rw-r--r--drivers/lightnvm/gennvm.c91
-rw-r--r--drivers/lightnvm/gennvm.h6
-rw-r--r--drivers/lightnvm/rrpc.c203
-rw-r--r--drivers/lightnvm/rrpc.h16
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/mailbox/Kconfig37
-rw-r--r--drivers/mailbox/Makefile8
-rw-r--r--drivers/mailbox/hi6220-mailbox.c395
-rw-r--r--drivers/mailbox/mailbox-test.c69
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c284
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c111
-rw-r--r--drivers/mailbox/rockchip-mailbox.c283
-rw-r--r--drivers/mailbox/ti-msgmgr.c639
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/bcache/super.c46
-rw-r--r--drivers/md/bitmap.c25
-rw-r--r--drivers/md/bitmap.h4
-rw-r--r--drivers/md/dm-cache-metadata.c122
-rw-r--r--drivers/md/dm-cache-metadata.h4
-rw-r--r--drivers/md/dm-cache-policy-mq.c1473
-rw-r--r--drivers/md/dm-cache-policy-smq.c92
-rw-r--r--drivers/md/dm-cache-target.c16
-rw-r--r--drivers/md/dm-crypt.c95
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c216
-rw-r--r--drivers/md/dm-path-selector.h5
-rw-r--r--drivers/md/dm-queue-length.c37
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-round-robin.c85
-rw-r--r--drivers/md/dm-service-time.c35
-rw-r--r--drivers/md/dm-snap.c11
-rw-r--r--drivers/md/dm-table.c66
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c7
-rw-r--r--drivers/md/dm-thin.c23
-rw-r--r--drivers/md/dm-verity-fec.c2
-rw-r--r--drivers/md/dm-verity-target.c12
-rw-r--r--drivers/md/dm.c602
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/multipath.c4
-rw-r--r--drivers/md/raid1.c10
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c63
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c135
-rw-r--r--drivers/media/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb-frontends/Kconfig8
-rw-r--r--drivers/media/dvb-frontends/Makefile1
-rw-r--r--drivers/media/dvb-frontends/au8522.h1
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c7
-rw-r--r--drivers/media/dvb-frontends/au8522_priv.h11
-rw-r--r--drivers/media/dvb-frontends/cx24120.c4
-rw-r--r--drivers/media/dvb-frontends/dib0090.c12
-rw-r--r--drivers/media/dvb-frontends/dib9000.c4
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c11
-rw-r--r--drivers/media/dvb-frontends/mn88473.c (renamed from drivers/staging/media/mn88473/mn88473.c)388
-rw-r--r--drivers/media/dvb-frontends/mn88473.h14
-rw-r--r--drivers/media/dvb-frontends/mn88473_priv.h (renamed from drivers/staging/media/mn88473/mn88473_priv.h)7
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c151
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h1
-rw-r--r--drivers/media/dvb-frontends/stv0900_core.c7
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/i2c/tc358743.c30
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/media-device.c132
-rw-r--r--drivers/media/media-entity.c76
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c26
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c16
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c10
-rw-r--r--drivers/media/pci/pt3/pt3.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c38
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c10
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c21
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/ttpci/av7110.c13
-rw-r--r--drivers/media/pci/ttpci/budget.c32
-rw-r--r--drivers/media/platform/coda/coda-bit.c10
-rw-r--r--drivers/media/platform/coda/coda-common.c106
-rw-r--r--drivers/media/platform/coda/coda.h3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c12
-rw-r--r--drivers/media/platform/omap3isp/isp.c213
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c13
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.h1
-rw-r--r--drivers/media/platform/soc_camera/Kconfig29
-rw-r--r--drivers/media/platform/soc_camera/Makefile3
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c4
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c478
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c39
-rw-r--r--drivers/media/platform/ti-vpe/cal.c70
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c32
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c39
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/rc/ati_remote.c47
-rw-r--r--drivers/media/rc/igorplugusb.c17
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c56
-rw-r--r--drivers/media/rc/lirc_dev.c7
-rw-r--r--drivers/media/rc/mceusb.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.c106
-rw-r--r--drivers/media/rc/nuvoton-cir.h3
-rw-r--r--drivers/media/rc/rc-main.c47
-rw-r--r--drivers/media/rc/sunxi-cir.c1
-rw-r--r--drivers/media/tuners/xc4000.c2
-rw-r--r--drivers/media/usb/airspy/airspy.c11
-rw-r--r--drivers/media/usb/au0828/au0828-cards.c4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c354
-rw-r--r--drivers/media/usb/au0828/au0828-input.c4
-rw-r--r--drivers/media/usb/au0828/au0828-video.c268
-rw-r--r--drivers/media/usb/au0828/au0828.h15
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c24
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-dvb.c8
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c95
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c41
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c66
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/ov519.c43
-rw-r--r--drivers/media/usb/gspca/touptek.c8
-rw-r--r--drivers/media/usb/gspca/w996Xcf.c8
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c3
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-io.c2
-rw-r--r--drivers/media/usb/siano/smsusb.c6
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c20
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h12
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c291
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c33
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/fsl_ifc.c2
-rw-r--r--drivers/memory/mtk-smi.c273
-rw-r--r--drivers/memory/omap-gpmc.c30
-rw-r--r--drivers/memstick/host/r592.c3
-rw-r--r--drivers/message/fusion/mptbase.c42
-rw-r--r--drivers/mfd/Kconfig100
-rw-r--r--drivers/mfd/Makefile9
-rw-r--r--drivers/mfd/act8945a.c102
-rw-r--r--drivers/mfd/as3711.c4
-rw-r--r--drivers/mfd/axp20x-i2c.c104
-rw-r--r--drivers/mfd/axp20x-rsb.c80
-rw-r--r--drivers/mfd/axp20x.c105
-rw-r--r--drivers/mfd/cs47l24-tables.c10
-rw-r--r--drivers/mfd/da9062-core.c23
-rw-r--r--drivers/mfd/da9063-i2c.c36
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c203
-rw-r--r--drivers/mfd/intel-lpss-acpi.c12
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c26
-rw-r--r--drivers/mfd/ipaq-micro.c2
-rw-r--r--drivers/mfd/max77686.c88
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/mfd/mt6397-core.c105
-rw-r--r--drivers/mfd/rc5t583.c4
-rw-r--r--drivers/mfd/stmpe.c35
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/tps65010.c21
-rw-r--r--drivers/mfd/tps65086.c149
-rw-r--r--drivers/mfd/tps65090.c5
-rw-r--r--drivers/mfd/tps65912-core.c240
-rw-r--r--drivers/mfd/tps65912-i2c.c162
-rw-r--r--drivers/mfd/tps65912-irq.c217
-rw-r--r--drivers/mfd/tps65912-spi.c160
-rw-r--r--drivers/mfd/wm5102-tables.c16
-rw-r--r--drivers/mfd/wm5110-tables.c82
-rw-r--r--drivers/mfd/wm8998-tables.c12
-rw-r--r--drivers/misc/Kconfig282
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/apds990x.c8
-rw-r--r--drivers/misc/arm-charlcd.c24
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/misc/bh1770glc.c8
-rw-r--r--drivers/misc/c2port/core.c8
-rw-r--r--drivers/misc/cxl/Makefile1
-rw-r--r--drivers/misc/cxl/api.c83
-rw-r--r--drivers/misc/cxl/base.c32
-rw-r--r--drivers/misc/cxl/context.c11
-rw-r--r--drivers/misc/cxl/cxl.h288
-rw-r--r--drivers/misc/cxl/debugfs.c4
-rw-r--r--drivers/misc/cxl/fault.c25
-rw-r--r--drivers/misc/cxl/file.c28
-rw-r--r--drivers/misc/cxl/flash.c538
-rw-r--r--drivers/misc/cxl/guest.c1177
-rw-r--r--drivers/misc/cxl/hcalls.c647
-rw-r--r--drivers/misc/cxl/hcalls.h204
-rw-r--r--drivers/misc/cxl/irq.c309
-rw-r--r--drivers/misc/cxl/main.c122
-rw-r--r--drivers/misc/cxl/native.c469
-rw-r--r--drivers/misc/cxl/of.c513
-rw-r--r--drivers/misc/cxl/pci.c268
-rw-r--r--drivers/misc/cxl/sysfs.c128
-rw-r--r--drivers/misc/cxl/trace.h193
-rw-r--r--drivers/misc/cxl/vphb.c167
-rw-r--r--drivers/misc/eeprom/Kconfig6
-rw-r--r--drivers/misc/eeprom/at24.c130
-rw-r--r--drivers/misc/eeprom/at25.c148
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c332
-rw-r--r--drivers/misc/genwqe/card_sysfs.c2
-rw-r--r--drivers/misc/ibmasm/ibmasm.h9
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c8
-rw-r--r--drivers/misc/lkdtm.c158
-rw-r--r--drivers/misc/mei/Kconfig6
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/amthif.c130
-rw-r--r--drivers/misc/mei/bus-fixup.c41
-rw-r--r--drivers/misc/mei/bus.c57
-rw-r--r--drivers/misc/mei/client.c189
-rw-r--r--drivers/misc/mei/client.h27
-rw-r--r--drivers/misc/mei/debugfs.c65
-rw-r--r--drivers/misc/mei/hbm.c24
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/hw-me.c10
-rw-r--r--drivers/misc/mei/hw-txe.c10
-rw-r--r--drivers/misc/mei/hw.h32
-rw-r--r--drivers/misc/mei/init.c20
-rw-r--r--drivers/misc/mei/interrupt.c94
-rw-r--r--drivers/misc/mei/main.c106
-rw-r--r--drivers/misc/mei/mei-trace.c2
-rw-r--r--drivers/misc/mei/mei-trace.h40
-rw-r--r--drivers/misc/mei/mei_dev.h118
-rw-r--r--drivers/misc/mei/pci-me.c7
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/wd.c391
-rw-r--r--drivers/misc/mic/Kconfig44
-rw-r--r--drivers/misc/mic/Makefile1
-rw-r--r--drivers/misc/mic/bus/Makefile1
-rw-r--r--drivers/misc/mic/bus/cosm_bus.h2
-rw-r--r--drivers/misc/mic/bus/vop_bus.c203
-rw-r--r--drivers/misc/mic/bus/vop_bus.h140
-rw-r--r--drivers/misc/mic/card/Makefile1
-rw-r--r--drivers/misc/mic/card/mic_device.c89
-rw-r--r--drivers/misc/mic/card/mic_device.h3
-rw-r--r--drivers/misc/mic/card/mic_virtio.c634
-rw-r--r--drivers/misc/mic/card/mic_virtio.h76
-rw-r--r--drivers/misc/mic/card/mic_x100.c1
-rw-r--r--drivers/misc/mic/cosm/cosm_main.c13
-rw-r--r--drivers/misc/mic/host/Makefile2
-rw-r--r--drivers/misc/mic/host/mic_boot.c125
-rw-r--r--drivers/misc/mic/host/mic_debugfs.c190
-rw-r--r--drivers/misc/mic/host/mic_device.h9
-rw-r--r--drivers/misc/mic/host/mic_fops.c222
-rw-r--r--drivers/misc/mic/host/mic_fops.h32
-rw-r--r--drivers/misc/mic/host/mic_main.c49
-rw-r--r--drivers/misc/mic/host/mic_virtio.c811
-rw-r--r--drivers/misc/mic/host/mic_x100.c19
-rw-r--r--drivers/misc/mic/scif/scif_dma.c41
-rw-r--r--drivers/misc/mic/scif/scif_rma.c9
-rw-r--r--drivers/misc/mic/vop/Makefile9
-rw-r--r--drivers/misc/mic/vop/vop_debugfs.c232
-rw-r--r--drivers/misc/mic/vop/vop_main.c755
-rw-r--r--drivers/misc/mic/vop/vop_main.h (renamed from drivers/misc/mic/host/mic_virtio.h)129
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c1165
-rw-r--r--drivers/misc/panel.c (renamed from drivers/staging/panel/panel.c)136
-rw-r--r--drivers/misc/pch_phub.c10
-rw-r--r--drivers/misc/sgi-gru/grufault.c3
-rw-r--r--drivers/misc/sram.c5
-rw-r--r--drivers/misc/ti-st/st_core.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/card/block.c52
-rw-r--r--drivers/mmc/card/mmc_test.c1
-rw-r--r--drivers/mmc/card/sdio_uart.c14
-rw-r--r--drivers/mmc/core/core.c29
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/host.c7
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/mmc_ops.c19
-rw-r--r--drivers/mmc/core/pwrseq_simple.c1
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sd_ops.c7
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_ops.c3
-rw-r--r--drivers/mmc/host/Kconfig25
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c11
-rw-r--r--drivers/mmc/host/bfin_sdh.c3
-rw-r--r--drivers/mmc/host/davinci_mmc.c15
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c31
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c19
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c101
-rw-r--r--drivers/mmc/host/dw_mmc.h6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c6
-rw-r--r--drivers/mmc/host/mmci.c1
-rw-r--r--drivers/mmc/host/mtk-sd.c19
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c9
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/s3cmci.c3
-rw-r--r--drivers/mmc/host/sdhci-acpi.c47
-rw-r--r--drivers/mmc/host/sdhci-bcm2835.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c38
-rw-r--r--drivers/mmc/host/sdhci-iproc.c40
-rw-r--r--drivers/mmc/host/sdhci-msm.c25
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c109
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c53
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c19
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c40
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pic32.c257
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c48
-rw-r--r--drivers/mmc/host/sdhci-st.c40
-rw-r--r--drivers/mmc/host/sdhci-tegra.c76
-rw-r--r--drivers/mmc/host/sdhci.c539
-rw-r--r--drivers/mmc/host/sdhci.h8
-rw-r--r--drivers/mmc/host/sdricoh_cs.c26
-rw-r--r--drivers/mmc/host/sh_mmcif.c4
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c54
-rw-r--r--drivers/mmc/host/sunxi-mmc.c95
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c15
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c29
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm47xxpart.c42
-rw-r--r--drivers/mtd/bcm63xxpart.c182
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/devices/docg3.c5
-rw-r--r--drivers/mtd/devices/mtdram.c5
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/mtdswap.c24
-rw-r--r--drivers/mtd/nand/Kconfig10
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c89
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h9
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c42
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c3
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c73
-rw-r--r--drivers/mtd/nand/hisi504_nand.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c7
-rw-r--r--drivers/mtd/nand/nand_base.c88
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_bch.c27
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/plat_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c191
-rw-r--r--drivers/mtd/nand/qcom_nandc.c2223
-rw-r--r--drivers/mtd/nand/s3c2410.c3
-rw-r--r--drivers/mtd/nand/sunxi_nand.c287
-rw-r--r--drivers/mtd/nand/vf610_nfc.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c32
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c5
-rw-r--r--drivers/mtd/spi-nor/Kconfig3
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c167
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c4
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c246
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c2
-rw-r--r--drivers/mtd/tests/oobtest.c49
-rw-r--r--drivers/mtd/ubi/misc.c49
-rw-r--r--drivers/mtd/ubi/ubi.h16
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/bonding/bond_3ad.c44
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c104
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/can/Kconfig66
-rw-r--r--drivers/net/can/Makefile17
-rw-r--r--drivers/net/can/ifi_canfd/Kconfig8
-rw-r--r--drivers/net/can/ifi_canfd/Makefile5
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c944
-rw-r--r--drivers/net/can/rcar_can.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c87
-rw-r--r--drivers/net/can/usb/ems_usb.c8
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile4
-rw-r--r--drivers/net/dsa/bcm_sf2.c18
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/mv88e6123.c (renamed from drivers/net/dsa/mv88e6123_61_65.c)20
-rw-r--r--drivers/net/dsa/mv88e6171.c8
-rw-r--r--drivers/net/dsa/mv88e6352.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx.c768
-rw-r--r--drivers/net/dsa/mv88e6xxx.h37
-rw-r--r--drivers/net/ethernet/3com/3c59x.c12
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c39
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c388
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c53
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/Makefile3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c734
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h295
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h6
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c482
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h35
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/arc/emac.h60
-rw-r--r--drivers/net/ethernet/arc/emac_main.c35
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c39
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c41
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c41
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c185
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c363
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c325
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c63
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c26
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/cadence/macb.c149
-rw-r--r--drivers/net/ethernet/cadence/macb.h6
-rw-r--r--drivers/net/ethernet/cavium/Kconfig13
-rw-r--r--drivers/net/ethernet/cavium/Makefile1
-rw-r--r--drivers/net/ethernet/cavium/octeon/Makefile (renamed from drivers/net/ethernet/octeon/Makefile)0
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c (renamed from drivers/net/ethernet/octeon/octeon_mgmt.c)0
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c40
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c36
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c78
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c235
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c464
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h310
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c47
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h219
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c385
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c143
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h22
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c45
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h35
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c133
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h48
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c23
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c196
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c222
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h348
-rw-r--r--drivers/net/ethernet/freescale/fec.h38
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c404
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c104
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c106
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c76
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c196
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c48
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c85
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c12
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1012
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h232
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c776
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c328
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c296
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c538
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c878
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h89
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c283
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h87
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c125
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c826
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h80
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c124
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c121
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c41
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c213
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c959
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c20
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c143
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c398
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h103
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c4
-rw-r--r--drivers/net/ethernet/marvell/Kconfig22
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c572
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c487
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h182
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c18
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig17
-rw-r--r--drivers/net/ethernet/mediatek/Makefile5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1808
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h421
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c362
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c228
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c302
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c152
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c328
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c429
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/octeon/Kconfig14
-rw-r--r--drivers/net/ethernet/pasemi/Kconfig5
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c50
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h51
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c519
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h2619
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c155
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2221
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c586
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c334
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h40
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c1105
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c79
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c25
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c105
-rw-r--r--drivers/net/ethernet/rocker/Makefile1
-rw-r--r--drivers/net/ethernet/rocker/rocker.c5495
-rw-r--r--drivers/net/ethernet/rocker/rocker.h583
-rw-r--r--drivers/net/ethernet/rocker/rocker_hw.h467
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2909
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2958
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.c53
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h201
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/Makefile2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c91
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h38
-rw-r--r--drivers/net/ethernet/sfc/efx.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c184
-rw-r--r--drivers/net/ethernet/sfc/tx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c87
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h63
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h77
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c226
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c154
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c495
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c133
-rw-r--r--drivers/net/ethernet/sun/Kconfig16
-rw-r--r--drivers/net/ethernet/sun/Makefile2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c468
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1733
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h114
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c1732
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.h145
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c14
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/geneve.c204
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c12
-rw-r--r--drivers/net/hamradio/dmascc.c13
-rw-r--r--drivers/net/hyperv/hyperv_net.h11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c75
-rw-r--r--drivers/net/hyperv/rndis_filter.c35
-rw-r--r--drivers/net/ieee802154/at86rf230.c25
-rw-r--r--drivers/net/ieee802154/mrf24j40.c1
-rw-r--r--drivers/net/ipvlan/ipvlan.h10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c46
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c22
-rw-r--r--drivers/net/irda/irtty-sir.c10
-rw-r--r--drivers/net/macsec.c3297
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/Kconfig22
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/at803x.c36
-rw-r--r--drivers/net/phy/bcm7xxx.c64
-rw-r--r--drivers/net/phy/dp83848.c88
-rw-r--r--drivers/net/phy/fixed_phy.c11
-rw-r--r--drivers/net/phy/marvell.c52
-rw-r--r--drivers/net/phy/mdio-cavium.c153
-rw-r--r--drivers/net/phy/mdio-cavium.h119
-rw-r--r--drivers/net/phy/mdio-octeon.c280
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/phy/mdio-thunder.c154
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/phy/spi_ks8995.c306
-rw-r--r--drivers/net/ppp/ppp_generic.c47
-rw-r--r--drivers/net/ppp/ppp_mppe.c99
-rw-r--r--drivers/net/rionet.c277
-rw-r--r--drivers/net/team/team.c24
-rw-r--r--drivers/net/tun.c29
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/lan78xx.c357
-rw-r--r--drivers/net/usb/lan78xx.h1
-rw-r--r--drivers/net/usb/plusb.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/veth.c26
-rw-r--r--drivers/net/virtio_net.c64
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c20
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c199
-rw-r--r--drivers/net/vxlan.c690
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig6
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c933
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c46
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c41
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h169
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c146
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c39
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c64
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c171
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c162
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h92
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h65
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h6
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h73
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c172
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c112
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c174
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c105
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c640
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c45
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c180
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c57
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c22
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c216
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c230
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c495
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c858
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c432
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c87
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c15
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c29
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.h4
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco.h4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c16
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c38
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c40
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c9
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/README10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c274
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h90
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h49
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c325
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h50
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c310
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c51
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c7
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h18
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h20
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c3495
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h693
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h152
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c68
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h22
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c9
-rw-r--r--drivers/net/wireless/st/cw1200/pm.h9
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h4
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c66
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c86
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h1
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/netback.c65
-rw-r--r--drivers/net/xen-netback/xenbus.c91
-rw-r--r--drivers/nfc/microread/i2c.c8
-rw-r--r--drivers/nfc/pn544/i2c.c14
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c36
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c30
-rw-r--r--drivers/ntb/ntb_transport.c31
-rw-r--r--drivers/ntb/test/ntb_perf.c78
-rw-r--r--drivers/nvdimm/blk.c18
-rw-r--r--drivers/nvdimm/btt.c21
-rw-r--r--drivers/nvdimm/bus.c135
-rw-r--r--drivers/nvdimm/core.c111
-rw-r--r--drivers/nvdimm/dimm_devs.c6
-rw-r--r--drivers/nvdimm/e820.c2
-rw-r--r--drivers/nvdimm/namespace_devs.c7
-rw-r--r--drivers/nvdimm/nd.h8
-rw-r--r--drivers/nvdimm/pfn.h23
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/pmem.c265
-rw-r--r--drivers/nvdimm/region.c12
-rw-r--r--drivers/nvme/host/Kconfig6
-rw-r--r--drivers/nvme/host/Makefile10
-rw-r--r--drivers/nvme/host/core.c166
-rw-r--r--drivers/nvme/host/lightnvm.c63
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c286
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/core.c145
-rw-r--r--drivers/nvmem/imx-ocotp.c2
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c330
-rw-r--r--drivers/nvmem/mtk-efuse.c110
-rw-r--r--drivers/nvmem/rockchip-efuse.c90
-rw-r--r--drivers/nvmem/sunxi_sid.c9
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/of/fdt.c51
-rw-r--r--drivers/of/fdt_address.c11
-rw-r--r--drivers/of/of_mdio.c14
-rw-r--r--drivers/of/of_pci.c1
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/of/resolver.c7
-rw-r--r--drivers/of/unittest.c5
-rw-r--r--drivers/oprofile/oprofilefs.c4
-rw-r--r--drivers/parisc/Kconfig2
-rw-r--r--drivers/parisc/eisa_enumerator.c4
-rw-r--r--drivers/pci/Kconfig11
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c275
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/host/Kconfig47
-rw-r--r--drivers/pci/host/Makefile6
-rw-r--r--drivers/pci/host/pci-dra7xx.c11
-rw-r--r--drivers/pci/host/pci-exynos.c13
-rw-r--r--drivers/pci/host/pci-host-common.c194
-rw-r--r--drivers/pci/host/pci-host-common.h47
-rw-r--r--drivers/pci/host/pci-host-generic.c181
-rw-r--r--drivers/pci/host/pci-hyperv.c2346
-rw-r--r--drivers/pci/host/pci-imx6.c186
-rw-r--r--drivers/pci/host/pci-keystone.c13
-rw-r--r--drivers/pci/host/pci-layerscape.c1
-rw-r--r--drivers/pci/host/pci-tegra.c85
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c403
-rw-r--r--drivers/pci/host/pci-thunder-pem.c346
-rw-r--r--drivers/pci/host/pcie-altera.c3
-rw-r--r--drivers/pci/host/pcie-designware-plat.c138
-rw-r--r--drivers/pci/host/pcie-designware.c44
-rw-r--r--drivers/pci/host/pcie-designware.h6
-rw-r--r--drivers/pci/host/pcie-qcom.c12
-rw-r--r--drivers/pci/host/pcie-rcar.c14
-rw-r--r--drivers/pci/host/pcie-spear13xx.c14
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c881
-rw-r--r--drivers/pci/host/pcie-xilinx.c191
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c8
-rw-r--r--drivers/pci/iov.c14
-rw-r--r--drivers/pci/pci-label.c2
-rw-r--r--drivers/pci/pci-sysfs.c96
-rw-r--r--drivers/pci/pci.c40
-rw-r--r--drivers/pci/pci.h17
-rw-r--r--drivers/pci/pcie/Kconfig7
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c90
-rw-r--r--drivers/pci/pcie/pme.c11
-rw-r--r--drivers/pci/probe.c45
-rw-r--r--drivers/pci/quirks.c46
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/pci/rom.c83
-rw-r--r--drivers/pci/setup-bus.c1
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/db1xxx_ss.c11
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/perf/arm_pmu.c109
-rw-r--r--drivers/phy/Kconfig16
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-dm816x-usb.c4
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c83
-rw-r--r--drivers/phy/phy-rockchip-dp.c151
-rw-r--r--drivers/phy/phy-rockchip-emmc.c229
-rw-r--r--drivers/phy/phy-rockchip-usb.c233
-rw-r--r--drivers/phy/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/Kconfig52
-rw-r--r--drivers/pinctrl/Makefile16
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c33
-rw-r--r--drivers/pinctrl/core.c35
-rw-r--r--drivers/pinctrl/core.h4
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c53
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c39
-rw-r--r--drivers/pinctrl/mediatek/Kconfig22
-rw-r--r--drivers/pinctrl/mediatek/Makefile12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c585
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6397.c10
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c379
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8127.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8135.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8173.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c55
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.h12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h2323
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h1936
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c143
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h21
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c137
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c182
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c148
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2312
-rw-r--r--drivers/pinctrl/pinctrl-pic32.h141
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c24
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c369
-rw-r--r--drivers/pinctrl/pinctrl-single.c8
-rw-r--r--drivers/pinctrl/pinctrl-st.c1
-rw-r--r--drivers/pinctrl/pinctrl-xway.c17
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c2
-rw-r--r--drivers/pinctrl/pinmux.c13
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c3
-rw-r--r--drivers/pinctrl/qcom/Kconfig8
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c453
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c30
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig56
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile7
-rw-r--r--drivers/pinctrl/sh-pfc/core.c28
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c690
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c1136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c774
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c624
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1405
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c915
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c412
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h98
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c18
-rw-r--r--drivers/pinctrl/stm32/Kconfig16
-rw-r--r--drivers/pinctrl/stm32/Makefile5
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c829
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h51
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32f429.c1591
-rw-r--r--drivers/pinctrl/sunxi/Kconfig36
-rw-r--r--drivers/pinctrl/sunxi/Makefile4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c601
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c32
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c106
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h21
-rw-r--r--drivers/pinctrl/tegra/Kconfig30
-rw-r--r--drivers/pinctrl/tegra/Makefile7
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c (renamed from drivers/pinctrl/pinctrl-tegra-xusb.c)4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c (renamed from drivers/pinctrl/pinctrl-tegra.c)4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h (renamed from drivers/pinctrl/pinctrl-tegra.h)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c (renamed from drivers/pinctrl/pinctrl-tegra114.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c (renamed from drivers/pinctrl/pinctrl-tegra124.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c (renamed from drivers/pinctrl/pinctrl-tegra20.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c (renamed from drivers/pinctrl/pinctrl-tegra210.c)0
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c (renamed from drivers/pinctrl/pinctrl-tegra30.c)0
-rw-r--r--drivers/pinctrl/uniphier/Kconfig14
-rw-r--r--drivers/pinctrl/uniphier/Makefile14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c (renamed from drivers/pinctrl/uniphier/pinctrl-proxstream2.c)0
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c (renamed from drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c)0
-rw-r--r--drivers/platform/Kconfig3
-rw-r--r--drivers/platform/goldfish/Kconfig19
-rw-r--r--drivers/platform/goldfish/Makefile2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c182
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c286
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c370
-rw-r--r--drivers/platform/x86/dell-rbtn.c15
-rw-r--r--drivers/platform/x86/dell-smbios.c193
-rw-r--r--drivers/platform/x86/dell-smbios.h46
-rw-r--r--drivers/platform/x86/dell-wmi.c238
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c8
-rw-r--r--drivers/platform/x86/hp-wmi.c46
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/ideapad-laptop.c14
-rw-r--r--drivers/platform/x86/intel-hid.c5
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c56
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c35
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c15
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c115
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c4
-rw-r--r--drivers/power/88pm860x_charger.c2
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_btemp.c15
-rw-r--r--drivers/power/ab8500_charger.c16
-rw-r--r--drivers/power/ab8500_fg.c15
-rw-r--r--drivers/power/abx500_chargalg.c14
-rw-r--r--drivers/power/act8945a_charger.c359
-rw-r--r--drivers/power/avs/rockchip-io-domain.c58
-rw-r--r--drivers/power/bq2415x_charger.c22
-rw-r--r--drivers/power/bq24735-charger.c146
-rw-r--r--drivers/power/bq27xxx_battery.c12
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c24
-rw-r--r--drivers/power/charger-manager.c27
-rw-r--r--drivers/power/collie_battery.c3
-rw-r--r--drivers/power/goldfish_battery.c17
-rw-r--r--drivers/power/ipaq_micro_battery.c4
-rw-r--r--drivers/power/isp1704_charger.c19
-rw-r--r--drivers/power/jz4740-battery.c2
-rw-r--r--drivers/power/lp8788-charger.c2
-rw-r--r--drivers/power/pm2301_charger.c22
-rw-r--r--drivers/power/power_supply_sysfs.c3
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/arm-versatile-reboot.c39
-rw-r--r--drivers/powercap/intel_rapl.c221
-rw-r--r--drivers/ptp/ptp_chardev.c27
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c2
-rw-r--r--drivers/pwm/pwm-img.c5
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c5
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c74
-rw-r--r--drivers/rapidio/Kconfig8
-rw-r--r--drivers/rapidio/devices/Makefile1
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2720
-rw-r--r--drivers/rapidio/devices/tsi721.c1034
-rw-r--r--drivers/rapidio/devices/tsi721.h87
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c397
-rw-r--r--drivers/rapidio/rio-driver.c12
-rw-r--r--drivers/rapidio/rio-scan.c135
-rw-r--r--drivers/rapidio/rio.c441
-rw-r--r--drivers/rapidio/rio.h5
-rw-r--r--drivers/regulator/Kconfig28
-rw-r--r--drivers/regulator/Makefile9
-rw-r--r--drivers/regulator/act8865-regulator.c8
-rw-r--r--drivers/regulator/act8945a-regulator.c165
-rw-r--r--drivers/regulator/ad5398.c6
-rw-r--r--drivers/regulator/axp20x-regulator.c59
-rw-r--r--drivers/regulator/core.c75
-rw-r--r--drivers/regulator/da9210-regulator.c5
-rw-r--r--drivers/regulator/fan53555.c18
-rw-r--r--drivers/regulator/gpio-regulator.c6
-rw-r--r--drivers/regulator/helpers.c23
-rw-r--r--drivers/regulator/hi655x-regulator.c227
-rw-r--r--drivers/regulator/lp872x.c38
-rw-r--r--drivers/regulator/ltc3589.c15
-rw-r--r--drivers/regulator/max77620-regulator.c813
-rw-r--r--drivers/regulator/max77686-regulator.c (renamed from drivers/regulator/max77686.c)0
-rw-r--r--drivers/regulator/max77802-regulator.c (renamed from drivers/regulator/max77802.c)0
-rw-r--r--drivers/regulator/mt6397-regulator.c15
-rw-r--r--drivers/regulator/of_regulator.c21
-rw-r--r--drivers/regulator/pv88060-regulator.c8
-rw-r--r--drivers/regulator/pv88090-regulator.c8
-rw-r--r--drivers/regulator/pwm-regulator.c29
-rw-r--r--drivers/regulator/s2mps11.c43
-rw-r--r--drivers/regulator/s5m8767.c13
-rw-r--r--drivers/regulator/tps65912-regulator.c613
-rw-r--r--drivers/regulator/vexpress-regulator.c (renamed from drivers/regulator/vexpress.c)0
-rw-r--r--drivers/remoteproc/Kconfig9
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c36
-rw-r--r--drivers/remoteproc/st_remoteproc.c297
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c10
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c6
-rw-r--r--drivers/reset/reset-ath79.c2
-rw-r--r--drivers/reset/reset-berlin.c5
-rw-r--r--drivers/reset/reset-lpc18xx.c2
-rw-r--r--drivers/reset/reset-pistachio.c154
-rw-r--r--drivers/reset/reset-socfpga.c2
-rw-r--r--drivers/reset/reset-sunxi.c2
-rw-r--r--drivers/reset/reset-zynq.c2
-rw-r--r--drivers/reset/sti/reset-syscfg.c2
-rw-r--r--drivers/rtc/Kconfig252
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/class.c13
-rw-r--r--drivers/rtc/interface.c54
-rw-r--r--drivers/rtc/rtc-abx80x.c252
-rw-r--r--drivers/rtc/rtc-as3722.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c355
-rw-r--r--drivers/rtc/rtc-ds1305.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c411
-rw-r--r--drivers/rtc/rtc-ds1685.c9
-rw-r--r--drivers/rtc/rtc-ds3232.c476
-rw-r--r--drivers/rtc/rtc-ds3234.c171
-rw-r--r--drivers/rtc/rtc-generic.c12
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c6
-rw-r--r--drivers/rtc/rtc-max77686.c565
-rw-r--r--drivers/rtc/rtc-max77802.c502
-rw-r--r--drivers/rtc/rtc-mcp795.c10
-rw-r--r--drivers/rtc/rtc-mt6397.c1
-rw-r--r--drivers/rtc/rtc-palmas.c3
-rw-r--r--drivers/rtc/rtc-pcf2123.c271
-rw-r--r--drivers/rtc/rtc-pcf2127.c335
-rw-r--r--drivers/rtc/rtc-pcf85063.c162
-rw-r--r--drivers/rtc/rtc-pcf8523.c25
-rw-r--r--drivers/rtc/rtc-pic32.c411
-rw-r--r--drivers/rtc/rtc-rv3029c2.c723
-rw-r--r--drivers/rtc/rtc-rv8803.c67
-rw-r--r--drivers/rtc/rtc-rx6110.c402
-rw-r--r--drivers/rtc/rtc-rx8025.c27
-rw-r--r--drivers/rtc/rtc-s3c.c19
-rw-r--r--drivers/rtc/rtc-s5m.c8
-rw-r--r--drivers/rtc/rtc-sysfs.c35
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tps80031.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/s390/block/dasd_alias.c216
-rw-r--r--drivers/s390/block/dasd_devmap.c10
-rw-r--r--drivers/s390/block/dasd_diag.c69
-rw-r--r--drivers/s390/block/dasd_eckd.c347
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_fba.c28
-rw-r--r--drivers/s390/block/dasd_genhd.c4
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c38
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c18
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/con3215.c3
-rw-r--r--drivers/s390/char/monreader.c10
-rw-r--r--drivers/s390/char/sclp_cmd.c27
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c6
-rw-r--r--drivers/s390/char/tape_core.c4
-rw-r--r--drivers/s390/char/vmlogrdr.c6
-rw-r--r--drivers/s390/cio/blacklist.c9
-rw-r--r--drivers/s390/cio/ccwreq.c13
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/device.c23
-rw-r--r--drivers/s390/net/lcs.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c15
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/NCR5380.c133
-rw-r--r--drivers/scsi/aacraid/aachba.c27
-rw-r--r--drivers/scsi/aacraid/aacraid.h14
-rw-r--r--drivers/scsi/aacraid/commctrl.c13
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c69
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c202
-rw-r--r--drivers/scsi/aacraid/src.c30
-rw-r--r--drivers/scsi/aha1542.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1
-rw-r--r--drivers/scsi/arm/acornscsi.c3
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/atari_NCR5380.c133
-rw-r--r--drivers/scsi/be2iscsi/be.h20
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c867
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h148
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c62
-rw-r--r--drivers/scsi/be2iscsi/be_main.c313
-rw-r--r--drivers/scsi/be2iscsi/be_main.h25
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c573
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bfa/bfa_core.c19
-rw-r--r--drivers/scsi/bfa/bfa_cs.h41
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.h17
-rw-r--r--drivers/scsi/cxlflash/common.h9
-rw-r--r--drivers/scsi/cxlflash/main.c224
-rw-r--r--drivers/scsi/cxlflash/main.h5
-rw-r--r--drivers/scsi/cxlflash/superpipe.c195
-rw-r--r--drivers/scsi/cxlflash/superpipe.h1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c979
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c38
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c5
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c36
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c13
-rw-r--r--drivers/scsi/gdth.c7
-rw-r--r--drivers/scsi/gdth_proc.c11
-rw-r--r--drivers/scsi/hisi_sas/Makefile2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h43
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c136
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c100
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c2214
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/hpsa.c52
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c122
-rw-r--r--drivers/scsi/ibmvscsi/viosrp.h26
-rw-r--r--drivers/scsi/imm.c7
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c5
-rw-r--r--drivers/scsi/iscsi_tcp.c54
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libiscsi_tcp.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c43
-rw-r--r--drivers/scsi/mac53c94.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h353
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1079
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c1287
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h136
-rw-r--r--drivers/scsi/mesh.c2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h82
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h127
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h22
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h117
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h10
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h5
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c186
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h47
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c37
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c272
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c25
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c3
-rw-r--r--drivers/scsi/ppa.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c201
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h34
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c146
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c119
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c73
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c79
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c6
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_common.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_pm.c10
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_scan.c23
-rw-r--r--drivers/scsi/scsi_sysfs.c112
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c51
-rw-r--r--drivers/scsi/sd.h7
-rw-r--r--drivers/scsi/sg.c3
-rw-r--r--drivers/scsi/sim710.c3
-rw-r--r--drivers/scsi/snic/snic_ctl.c2
-rw-r--r--drivers/scsi/st.c128
-rw-r--r--drivers/scsi/stex.c153
-rw-r--r--drivers/scsi/storvsc_drv.c5
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c155
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h9
-rw-r--r--drivers/scsi/ufs/ufs.h35
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h151
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c19
-rw-r--r--drivers/scsi/ufs/ufshcd.c1211
-rw-r--r--drivers/scsi/ufs/ufshcd.h46
-rw-r--r--drivers/scsi/ufs/ufshci.h4
-rw-r--r--drivers/scsi/ufs/unipro.h22
-rw-r--r--drivers/sh/superhyway/superhyway.c2
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/fsl/qe/gpio.c2
-rw-r--r--drivers/soc/fsl/qe/qe_common.c66
-rw-r--r--drivers/soc/fsl/qe/qe_ic.c11
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c41
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c60
-rw-r--r--drivers/soc/rockchip/pm_domains.c34
-rw-r--r--drivers/soc/samsung/Kconfig13
-rw-r--r--drivers/soc/samsung/Makefile2
-rw-r--r--drivers/soc/samsung/exynos-pmu.c141
-rw-r--r--drivers/soc/samsung/exynos-pmu.h44
-rw-r--r--drivers/soc/samsung/exynos3250-pmu.c175
-rw-r--r--drivers/soc/samsung/exynos4-pmu.c222
-rw-r--r--drivers/soc/samsung/exynos5250-pmu.c195
-rw-r--r--drivers/soc/samsung/exynos5420-pmu.c280
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c5
-rw-r--r--drivers/soc/ti/knav_qmss.h4
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c14
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c22
-rw-r--r--drivers/spi/Kconfig96
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-axi-spi-engine.c591
-rw-r--r--drivers/spi/spi-bcm2835.c5
-rw-r--r--drivers/spi/spi-bcm2835aux.c72
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c5
-rw-r--r--drivers/spi/spi-imx.c339
-rw-r--r--drivers/spi/spi-lp8841-rtc.c256
-rw-r--r--drivers/spi/spi-omap2-mcspi.c62
-rw-r--r--drivers/spi/spi-pl022.c7
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c8
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c151
-rw-r--r--drivers/spi/spi-pxa2xx.h37
-rw-r--r--drivers/spi/spi-rockchip.c82
-rw-r--r--drivers/spi/spi-ti-qspi.c139
-rw-r--r--drivers/spi/spi.c428
-rw-r--r--drivers/spmi/spmi-pmic-arb.c153
-rw-r--r--drivers/staging/Kconfig10
-rw-r--r--drivers/staging/Makefile10
-rw-r--r--drivers/staging/android/Kconfig9
-rw-r--r--drivers/staging/android/ashmem.c47
-rw-r--r--drivers/staging/android/ion/hisilicon/hi6220_ion.c5
-rw-r--r--drivers/staging/android/ion/ion.c139
-rw-r--r--drivers/staging/android/ion/ion.h20
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c8
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c4
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c4
-rw-r--r--drivers/staging/android/ion/tegra/tegra_ion.c7
-rw-r--r--drivers/staging/android/lowmemorykiller.c26
-rw-r--r--drivers/staging/android/sw_sync.c191
-rw-r--r--drivers/staging/android/sw_sync.h8
-rw-r--r--drivers/staging/android/sync.c469
-rw-r--r--drivers/staging/android/sync.h241
-rw-r--r--drivers/staging/android/sync_debug.c221
-rw-r--r--drivers/staging/android/timed_gpio.c5
-rw-r--r--drivers/staging/android/trace/sync.h44
-rw-r--r--drivers/staging/android/uapi/ashmem.h1
-rw-r--r--drivers/staging/android/uapi/sync.h37
-rw-r--r--drivers/staging/board/armadillo800eva.c1
-rw-r--r--drivers/staging/board/board.c1
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c9
-rw-r--r--drivers/staging/comedi/TODO1
-rw-r--r--drivers/staging/comedi/comedi.h599
-rw-r--r--drivers/staging/comedi/comedi_fops.c86
-rw-r--r--drivers/staging/comedi/comedi_pcmcia.h3
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c68
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c141
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c3
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c47
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c18
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c4
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c2
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c32
-rw-r--r--drivers/staging/comedi/drivers/mite.c72
-rw-r--r--drivers/staging/comedi/drivers/mite.h3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c637
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c20
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c2
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c5
-rw-r--r--drivers/staging/comedi/drivers/s626.c33
-rw-r--r--drivers/staging/dgap/Kconfig6
-rw-r--r--drivers/staging/dgap/Makefile1
-rw-r--r--drivers/staging/dgap/dgap.c7079
-rw-r--r--drivers/staging/dgap/dgap.h1229
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c57
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h8
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c2
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c8
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h1
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h1
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c54
-rw-r--r--drivers/staging/dgnc/digi.h32
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c55
-rw-r--r--drivers/staging/fbtft/Kconfig12
-rw-r--r--drivers/staging/fbtft/Makefile2
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c4
-rw-r--r--drivers/staging/fbtft/fb_hx8340bn.c144
-rw-r--r--drivers/staging/fbtft/fb_hx8347d.c9
-rw-r--r--drivers/staging/fbtft/fb_hx8353d.c52
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.c144
-rw-r--r--drivers/staging/fbtft/fb_hx8357d.h60
-rw-r--r--drivers/staging/fbtft/fb_ili9163.c189
-rw-r--r--drivers/staging/fbtft/fb_ili9320.c13
-rw-r--r--drivers/staging/fbtft/fb_ili9325.c81
-rw-r--r--drivers/staging/fbtft/fb_ili9340.c34
-rw-r--r--drivers/staging/fbtft/fb_ili9341.c69
-rw-r--r--drivers/staging/fbtft/fb_ili9481.c33
-rw-r--r--drivers/staging/fbtft/fb_ili9486.c35
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c6
-rw-r--r--drivers/staging/fbtft/fb_s6d02a1.c42
-rw-r--r--drivers/staging/fbtft/fb_ssd1305.c216
-rw-r--r--drivers/staging/fbtft/fb_ssd1325.c205
-rw-r--r--drivers/staging/fbtft/fb_st7735r.c42
-rw-r--r--drivers/staging/fbtft/fb_tinylcd.c28
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c4
-rw-r--r--drivers/staging/fbtft/fb_uc1701.c27
-rw-r--r--drivers/staging/fbtft/fbtft-bus.c8
-rw-r--r--drivers/staging/fbtft/fbtft-core.c36
-rw-r--r--drivers/staging/fbtft/fbtft.h8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c75
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig3
-rw-r--r--drivers/staging/fsl-mc/bus/Makefile2
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c333
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c125
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c201
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c24
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c276
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c9
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h2
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h41
-rw-r--r--drivers/staging/fsl-mc/include/mc.h26
-rw-r--r--drivers/staging/fwserial/dma_fifo.c10
-rw-r--r--drivers/staging/fwserial/fwserial.c17
-rw-r--r--drivers/staging/fwserial/fwserial.h1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c8
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c20
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c34
-rw-r--r--drivers/staging/gdm72xx/Kconfig63
-rw-r--r--drivers/staging/gdm72xx/Makefile6
-rw-r--r--drivers/staging/gdm72xx/TODO2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c438
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.h74
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.c700
-rw-r--r--drivers/staging/gdm72xx/gdm_sdio.h63
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c789
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.h78
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.c815
-rw-r--r--drivers/staging/gdm72xx/gdm_wimax.h49
-rw-r--r--drivers/staging/gdm72xx/hci.h213
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c156
-rw-r--r--drivers/staging/gdm72xx/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c158
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.h21
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c363
-rw-r--r--drivers/staging/gdm72xx/usb_boot.h22
-rw-r--r--drivers/staging/gdm72xx/usb_ids.h86
-rw-r--r--drivers/staging/gdm72xx/wm_ioctl.h101
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c18
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c50
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c15
-rw-r--r--drivers/staging/i4l/Documentation/README.act2000104
-rw-r--r--drivers/staging/i4l/Documentation/README.icn148
-rw-r--r--drivers/staging/i4l/Documentation/README.pcbit40
-rw-r--r--drivers/staging/i4l/Documentation/README.sc281
-rw-r--r--drivers/staging/i4l/Kconfig13
-rw-r--r--drivers/staging/i4l/Makefile5
-rw-r--r--drivers/staging/i4l/TODO3
-rw-r--r--drivers/staging/i4l/act2000/Kconfig (renamed from drivers/isdn/act2000/Kconfig)0
-rw-r--r--drivers/staging/i4l/act2000/Makefile (renamed from drivers/isdn/act2000/Makefile)0
-rw-r--r--drivers/staging/i4l/act2000/act2000.h (renamed from drivers/isdn/act2000/act2000.h)0
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c (renamed from drivers/isdn/act2000/act2000_isa.c)0
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.h (renamed from drivers/isdn/act2000/act2000_isa.h)0
-rw-r--r--drivers/staging/i4l/act2000/capi.c (renamed from drivers/isdn/act2000/capi.c)0
-rw-r--r--drivers/staging/i4l/act2000/capi.h (renamed from drivers/isdn/act2000/capi.h)0
-rw-r--r--drivers/staging/i4l/act2000/module.c (renamed from drivers/isdn/act2000/module.c)0
-rw-r--r--drivers/staging/i4l/icn/Kconfig (renamed from drivers/isdn/icn/Kconfig)0
-rw-r--r--drivers/staging/i4l/icn/Makefile (renamed from drivers/isdn/icn/Makefile)0
-rw-r--r--drivers/staging/i4l/icn/icn.c (renamed from drivers/isdn/icn/icn.c)2
-rw-r--r--drivers/staging/i4l/icn/icn.h (renamed from drivers/isdn/icn/icn.h)0
-rw-r--r--drivers/staging/i4l/pcbit/Kconfig (renamed from drivers/isdn/pcbit/Kconfig)0
-rw-r--r--drivers/staging/i4l/pcbit/Makefile (renamed from drivers/isdn/pcbit/Makefile)0
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.c (renamed from drivers/isdn/pcbit/callbacks.c)0
-rw-r--r--drivers/staging/i4l/pcbit/callbacks.h (renamed from drivers/isdn/pcbit/callbacks.h)0
-rw-r--r--drivers/staging/i4l/pcbit/capi.c (renamed from drivers/isdn/pcbit/capi.c)0
-rw-r--r--drivers/staging/i4l/pcbit/capi.h (renamed from drivers/isdn/pcbit/capi.h)0
-rw-r--r--drivers/staging/i4l/pcbit/drv.c (renamed from drivers/isdn/pcbit/drv.c)0
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c (renamed from drivers/isdn/pcbit/edss1.c)0
-rw-r--r--drivers/staging/i4l/pcbit/edss1.h (renamed from drivers/isdn/pcbit/edss1.h)0
-rw-r--r--drivers/staging/i4l/pcbit/layer2.c (renamed from drivers/isdn/pcbit/layer2.c)0
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h (renamed from drivers/isdn/pcbit/layer2.h)0
-rw-r--r--drivers/staging/i4l/pcbit/module.c (renamed from drivers/isdn/pcbit/module.c)0
-rw-r--r--drivers/staging/i4l/pcbit/pcbit.h (renamed from drivers/isdn/pcbit/pcbit.h)0
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio-light28
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/Makefile1
-rw-r--r--drivers/staging/iio/TODO8
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h15
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c4
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c3
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c3
-rw-r--r--drivers/staging/iio/adc/Kconfig18
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c78
-rw-r--r--drivers/staging/iio/adc/ad7280a.c4
-rw-r--r--drivers/staging/iio/adc/ad7606.h10
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c53
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c32
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c32
-rw-r--r--drivers/staging/iio/adc/ad7816.c4
-rw-r--r--drivers/staging/iio/adc/spear_adc.c33
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c15
-rw-r--r--drivers/staging/iio/cdc/ad7150.c34
-rw-r--r--drivers/staging/iio/cdc/ad7746.c4
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c8
-rw-r--r--drivers/staging/iio/light/isl29018.c75
-rw-r--r--drivers/staging/iio/light/isl29028.c31
-rw-r--r--drivers/staging/iio/light/tsl2583.c92
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c8
-rw-r--r--drivers/staging/iio/magnetometer/Kconfig40
-rw-r--r--drivers/staging/iio/magnetometer/Makefile7
-rw-r--r--drivers/staging/iio/meter/ade7754.c5
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c5
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c6
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c7
-rw-r--r--drivers/staging/iio/meter/ade7854.c25
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c12
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c39
-rw-r--r--drivers/staging/iio/trigger/Kconfig10
-rw-r--r--drivers/staging/iio/trigger/Makefile1
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c216
-rw-r--r--drivers/staging/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h61
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h7
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_string.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h3
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h23
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h122
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h129
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h103
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetctl.h104
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h104
-rw-r--r--drivers/staging/lustre/include/linux/lnet/nidstr.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/types.h51
-rw-r--r--drivers/staging/lustre/lnet/Kconfig14
-rw-r--r--drivers/staging/lustre/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c705
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h159
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c1048
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c631
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c789
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c197
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c11
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c175
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile (renamed from drivers/staging/lustre/lustre/libcfs/Makefile)7
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c (renamed from drivers/staging/lustre/lustre/libcfs/debug.c)35
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c (renamed from drivers/staging/lustre/lustre/libcfs/fail.c)3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c (renamed from drivers/staging/lustre/lustre/libcfs/hash.c)153
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c)19
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_lock.c)12
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_mem.c)14
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c (renamed from drivers/staging/lustre/lustre/libcfs/libcfs_string.c)77
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c)78
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c)117
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c)7
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c)23
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c)2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-module.c)85
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c)0
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c (renamed from drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c)31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c (renamed from drivers/staging/lustre/lustre/libcfs/module.c)303
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c (renamed from drivers/staging/lustre/lustre/libcfs/prng.c)13
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c (renamed from drivers/staging/lustre/lustre/libcfs/tracefile.c)181
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h (renamed from drivers/staging/lustre/lustre/libcfs/tracefile.h)93
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c (renamed from drivers/staging/lustre/lustre/libcfs/workitem.c)50
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c113
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c1534
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c315
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c82
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c105
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c21
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c725
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c114
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c258
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c149
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c14
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c121
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c1025
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c119
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c235
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c677
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c189
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c208
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c363
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c350
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h63
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c537
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h181
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c482
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c32
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c55
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c453
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h184
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h332
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c25
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.h14
-rw-r--r--drivers/staging/lustre/lustre/Kconfig4
-rw-r--r--drivers/staging/lustre/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c32
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c16
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c33
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h35
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c69
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c14
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h491
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h7
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h57
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h24
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h63
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h797
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h101
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h252
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h133
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h91
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h33
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h55
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h27
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lite.h50
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h20
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h86
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h13
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h128
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_ver.h18
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h132
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h21
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h182
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h21
-rw-r--r--drivers/staging/lustre/lustre/include/uapi_kernelcomm.h (renamed from drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h)72
-rw-r--r--drivers/staging/lustre/lustre/lclient/glimpse.c3
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c81
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c25
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c64
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h15
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c107
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c184
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c91
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c235
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c73
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c41
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c175
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c457
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c39
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h175
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c228
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c54
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_rmtacl.c18
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c42
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c51
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c106
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c136
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c107
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c112
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h13
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c80
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c51
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c45
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c22
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h10
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c303
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h93
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c48
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c32
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c135
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c203
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c117
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c20
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c36
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c49
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c64
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c14
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c34
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h16
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c15
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c176
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c40
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c204
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c183
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c161
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c98
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c56
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c113
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c57
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c109
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c (renamed from drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c)80
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c30
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c243
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c110
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c66
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c114
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c499
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_internal.h5
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c69
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c371
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h41
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c10
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c32
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c140
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c213
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c39
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c365
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c132
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c5
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c68
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c176
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c145
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c28
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c60
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c76
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c133
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c93
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c35
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c91
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c17
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c33
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c22
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c309
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c272
-rw-r--r--drivers/staging/media/Kconfig6
-rw-r--r--drivers/staging/media/Makefile4
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h4
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c14
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c7
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c24
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c20
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c100
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/mn88472/mn88472.c12
-rw-r--r--drivers/staging/media/mn88473/Kconfig7
-rw-r--r--drivers/staging/media/mn88473/Makefile5
-rw-r--r--drivers/staging/media/mn88473/TODO21
-rw-r--r--drivers/staging/media/mx2/Kconfig15
-rw-r--r--drivers/staging/media/mx2/Makefile3
-rw-r--r--drivers/staging/media/mx2/TODO10
-rw-r--r--drivers/staging/media/mx2/mx2_camera.c (renamed from drivers/media/platform/soc_camera/mx2_camera.c)0
-rw-r--r--drivers/staging/media/mx3/Kconfig15
-rw-r--r--drivers/staging/media/mx3/Makefile3
-rw-r--r--drivers/staging/media/mx3/TODO10
-rw-r--r--drivers/staging/media/mx3/mx3_camera.c (renamed from drivers/media/platform/soc_camera/mx3_camera.c)12
-rw-r--r--drivers/staging/media/omap1/Kconfig13
-rw-r--r--drivers/staging/media/omap1/Makefile3
-rw-r--r--drivers/staging/media/omap1/TODO8
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c (renamed from drivers/media/platform/soc_camera/omap1_camera.c)0
-rw-r--r--drivers/staging/media/omap4iss/iss.c213
-rw-r--r--drivers/staging/media/omap4iss/iss.h6
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c15
-rw-r--r--drivers/staging/media/omap4iss/iss_video.h1
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c374
-rw-r--r--drivers/staging/most/aim-network/networking.c11
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.c4
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h7
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c80
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.h2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_sysfs.c4
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c21
-rw-r--r--drivers/staging/most/mostcore/core.c194
-rw-r--r--drivers/staging/most/mostcore/mostcore.h3
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c87
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h1
-rw-r--r--drivers/staging/netlogic/platform_net.c14
-rw-r--r--drivers/staging/netlogic/xlr_net.c201
-rw-r--r--drivers/staging/netlogic/xlr_net.h978
-rw-r--r--drivers/staging/nvec/TODO2
-rw-r--r--drivers/staging/nvec/nvec.c99
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_paz00.c13
-rw-r--r--drivers/staging/nvec/nvec_power.c14
-rw-r--r--drivers/staging/nvec/nvec_ps2.c2
-rw-r--r--drivers/staging/octeon-usb/TODO7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c1094
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h536
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c28
-rw-r--r--drivers/staging/octeon/ethernet-mem.c4
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c291
-rw-r--r--drivers/staging/octeon/ethernet-rx.c60
-rw-r--r--drivers/staging/octeon/ethernet-spi.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c62
-rw-r--r--drivers/staging/octeon/ethernet.c68
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h5
-rw-r--r--drivers/staging/panel/Kconfig278
-rw-r--r--drivers/staging/panel/Makefile1
-rw-r--r--drivers/staging/panel/TODO8
-rw-r--r--drivers/staging/panel/lcd-panel-cgram.txt24
-rw-r--r--drivers/staging/rdma/hfi1/Kconfig14
-rw-r--r--drivers/staging/rdma/hfi1/Makefile10
-rw-r--r--drivers/staging/rdma/hfi1/affinity.c430
-rw-r--r--drivers/staging/rdma/hfi1/affinity.h91
-rw-r--r--drivers/staging/rdma/hfi1/aspm.h309
-rw-r--r--drivers/staging/rdma/hfi1/chip.c2462
-rw-r--r--drivers/staging/rdma/hfi1/chip.h151
-rw-r--r--drivers/staging/rdma/hfi1/chip_registers.h20
-rw-r--r--drivers/staging/rdma/hfi1/common.h12
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.c332
-rw-r--r--drivers/staging/rdma/hfi1/debugfs.h5
-rw-r--r--drivers/staging/rdma/hfi1/device.c5
-rw-r--r--drivers/staging/rdma/hfi1/device.h5
-rw-r--r--drivers/staging/rdma/hfi1/diag.c105
-rw-r--r--drivers/staging/rdma/hfi1/dma.c17
-rw-r--r--drivers/staging/rdma/hfi1/driver.c348
-rw-r--r--drivers/staging/rdma/hfi1/efivar.c11
-rw-r--r--drivers/staging/rdma/hfi1/efivar.h5
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c117
-rw-r--r--drivers/staging/rdma/hfi1/eprom.h7
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c557
-rw-r--r--drivers/staging/rdma/hfi1/firmware.c587
-rw-r--r--drivers/staging/rdma/hfi1/hfi.h259
-rw-r--r--drivers/staging/rdma/hfi1/init.c192
-rw-r--r--drivers/staging/rdma/hfi1/intr.c29
-rw-r--r--drivers/staging/rdma/hfi1/iowait.h126
-rw-r--r--drivers/staging/rdma/hfi1/keys.c356
-rw-r--r--drivers/staging/rdma/hfi1/mad.c1048
-rw-r--r--drivers/staging/rdma/hfi1/mad.h14
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c292
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h73
-rw-r--r--drivers/staging/rdma/hfi1/mr.c473
-rw-r--r--drivers/staging/rdma/hfi1/opa_compat.h20
-rw-r--r--drivers/staging/rdma/hfi1/pcie.c278
-rw-r--r--drivers/staging/rdma/hfi1/pio.c365
-rw-r--r--drivers/staging/rdma/hfi1/pio.h118
-rw-r--r--drivers/staging/rdma/hfi1/pio_copy.c73
-rw-r--r--drivers/staging/rdma/hfi1/platform.c893
-rw-r--r--drivers/staging/rdma/hfi1/platform.h (renamed from drivers/staging/rdma/hfi1/platform_config.h)58
-rw-r--r--drivers/staging/rdma/hfi1/qp.c1642
-rw-r--r--drivers/staging/rdma/hfi1/qp.h198
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.c270
-rw-r--r--drivers/staging/rdma/hfi1/qsfp.h57
-rw-r--r--drivers/staging/rdma/hfi1/rc.c765
-rw-r--r--drivers/staging/rdma/hfi1/ruc.c373
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c365
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h116
-rw-r--r--drivers/staging/rdma/hfi1/sdma_txreq.h135
-rw-r--r--drivers/staging/rdma/hfi1/sysfs.c136
-rw-r--r--drivers/staging/rdma/hfi1/trace.c53
-rw-r--r--drivers/staging/rdma/hfi1/trace.h1477
-rw-r--r--drivers/staging/rdma/hfi1/twsi.c205
-rw-r--r--drivers/staging/rdma/hfi1/twsi.h9
-rw-r--r--drivers/staging/rdma/hfi1/uc.c164
-rw-r--r--drivers/staging/rdma/hfi1/ud.c250
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c1044
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.h13
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c74
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c634
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.h11
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c1667
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h824
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.c149
-rw-r--r--drivers/staging/rdma/hfi1/verbs_txreq.h116
-rw-r--r--drivers/staging/rtl8188eu/Makefile2
-rw-r--r--drivers/staging/rtl8188eu/TODO2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c137
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c15
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c92
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c25
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c77
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c17
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c30
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c91
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c50
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c26
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c15
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c3
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h4
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h29
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h8
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h290
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h9
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h2
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h214
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h28
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h1
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c9
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c8
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c14
-rw-r--r--drivers/staging/rtl8192e/dot11d.h3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c56
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c14
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib.h3
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c102
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c48
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c63
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/dot11d.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c92
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c49
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c26
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c79
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c14
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c24
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c45
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c65
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c38
-rw-r--r--drivers/staging/rtl8192u/r819xU_phy.c3
-rw-r--r--drivers/staging/rtl8712/TODO12
-rw-r--r--drivers/staging/rtl8712/drv_types.h1
-rw-r--r--drivers/staging/rtl8712/ieee80211.c2
-rw-r--r--drivers/staging/rtl8712/ieee80211.h112
-rw-r--r--drivers/staging/rtl8712/os_intfs.c3
-rw-r--r--drivers/staging/rtl8712/recv_linux.c7
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c77
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c60
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_io.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c10
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_rtl.c45
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c24
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp_ioctl.h118
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c27
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c45
-rw-r--r--drivers/staging/rtl8712/usb_intf.c22
-rw-r--r--drivers/staging/rtl8712/usb_ops.c26
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8712/xmit_linux.c17
-rw-r--r--drivers/staging/rtl8723au/TODO5
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c193
-rw-r--r--drivers/staging/rtl8723au/core/rtw_cmd.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_efuse.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme.c49
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c36
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c169
-rw-r--r--drivers/staging/rtl8723au/core/rtw_security.c8
-rw-r--r--drivers/staging/rtl8723au/core/rtw_sta_mgt.c44
-rw-r--r--drivers/staging/rtl8723au/core/rtw_xmit.c116
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c6
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c2
-rw-r--r--drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c2
-rw-r--r--drivers/staging/rtl8723au/hal/hal_com.c2
-rw-r--r--drivers/staging/rtl8723au/hal/odm.c8
-rw-r--r--drivers/staging/rtl8723au/hal/odm_HWConfig.c4
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c46
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_cmd.c6
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c16
-rw-r--r--drivers/staging/rtl8723au/hal/usb_halinit.c9
-rw-r--r--drivers/staging/rtl8723au/hal/usb_ops_linux.c6
-rw-r--r--drivers/staging/rtl8723au/include/odm_HWConfig.h2
-rw-r--r--drivers/staging/rtl8723au/include/osdep_service.h6
-rw-r--r--drivers/staging/rtl8723au/include/rtl8723a_cmd.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_ap.h3
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c43
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c9
-rw-r--r--drivers/staging/rtl8723au/os_dep/xmit_linux.c2
-rw-r--r--drivers/staging/rts5208/ms.c140
-rw-r--r--drivers/staging/rts5208/rtsx.c20
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c31
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c163
-rw-r--r--drivers/staging/rts5208/sd.c28
-rw-r--r--drivers/staging/rts5208/spi.c4
-rw-r--r--drivers/staging/rts5208/xd.c40
-rw-r--r--drivers/staging/skein/threefish_block.c2145
-rw-r--r--drivers/staging/slicoss/slic.h2
-rw-r--r--drivers/staging/slicoss/slicoss.c80
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c132
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c200
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c38
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.h3
-rw-r--r--drivers/staging/sm750fb/ddk750_help.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_hwi2c.c38
-rw-r--r--drivers/staging/sm750fb/ddk750_mode.c161
-rw-r--r--drivers/staging/sm750fb/ddk750_power.c54
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h11
-rw-r--r--drivers/staging/sm750fb/ddk750_reg.h3002
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c6
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h5
-rw-r--r--drivers/staging/sm750fb/sm750.c204
-rw-r--r--drivers/staging/sm750fb/sm750.h4
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c148
-rw-r--r--drivers/staging/sm750fb/sm750_accel.h278
-rw-r--r--drivers/staging/sm750fb/sm750_cursor.c62
-rw-r--r--drivers/staging/sm750fb/sm750_help.h56
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c211
-rw-r--r--drivers/staging/speakup/buffers.c4
-rw-r--r--drivers/staging/speakup/devsynth.c4
-rw-r--r--drivers/staging/speakup/fakekey.c6
-rw-r--r--drivers/staging/speakup/i18n.c5
-rw-r--r--drivers/staging/speakup/keyhelp.c22
-rw-r--r--drivers/staging/speakup/kobjects.c54
-rw-r--r--drivers/staging/speakup/main.c35
-rw-r--r--drivers/staging/speakup/serialio.c10
-rw-r--r--drivers/staging/speakup/speakup_acntpc.c5
-rw-r--r--drivers/staging/speakup/speakup_acntsa.c4
-rw-r--r--drivers/staging/speakup/speakup_apollo.c4
-rw-r--r--drivers/staging/speakup/speakup_audptr.c4
-rw-r--r--drivers/staging/speakup/speakup_bns.c4
-rw-r--r--drivers/staging/speakup/speakup_decext.c28
-rw-r--r--drivers/staging/speakup/speakup_decpc.c4
-rw-r--r--drivers/staging/speakup/speakup_dectlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dtlk.c4
-rw-r--r--drivers/staging/speakup/speakup_dummy.c4
-rw-r--r--drivers/staging/speakup/speakup_keypc.c4
-rw-r--r--drivers/staging/speakup/speakup_ltlk.c4
-rw-r--r--drivers/staging/speakup/speakup_soft.c3
-rw-r--r--drivers/staging/speakup/speakup_spkout.c4
-rw-r--r--drivers/staging/speakup/speakup_txprt.c4
-rw-r--r--drivers/staging/speakup/spk_priv.h4
-rw-r--r--drivers/staging/speakup/spk_priv_keyinfo.h4
-rw-r--r--drivers/staging/speakup/spkguide.txt1
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/speakup/varhandlers.c50
-rw-r--r--drivers/staging/staging.c19
-rw-r--r--drivers/staging/ste_rmi4/Kconfig9
-rw-r--r--drivers/staging/ste_rmi4/Makefile4
-rw-r--r--drivers/staging/ste_rmi4/TODO7
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c1140
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h46
-rw-r--r--drivers/staging/unisys/MAINTAINERS2
-rw-r--r--drivers/staging/unisys/include/guestlinuxdebug.h13
-rw-r--r--drivers/staging/unisys/include/iochannel.h14
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h237
-rw-r--r--drivers/staging/unisys/visorbus/vbusdeviceinfo.h7
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c97
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c22
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c165
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c8
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig2
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h45
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c10
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c68
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c24
-rw-r--r--drivers/staging/vt6655/card.c22
-rw-r--r--drivers/staging/vt6655/channel.c67
-rw-r--r--drivers/staging/vt6655/channel.h2
-rw-r--r--drivers/staging/vt6655/device_main.c31
-rw-r--r--drivers/staging/vt6655/key.c8
-rw-r--r--drivers/staging/vt6655/mac.c503
-rw-r--r--drivers/staging/vt6655/mac.h84
-rw-r--r--drivers/staging/vt6655/power.c66
-rw-r--r--drivers/staging/vt6655/power.h8
-rw-r--r--drivers/staging/vt6655/rf.c178
-rw-r--r--drivers/staging/vt6655/rxtx.c12
-rw-r--r--drivers/staging/vt6656/device.h4
-rw-r--r--drivers/staging/vt6656/main_usb.c30
-rw-r--r--drivers/staging/vt6656/power.c2
-rw-r--r--drivers/staging/vt6656/rf.c4
-rw-r--r--drivers/staging/vt6656/rxtx.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.c21
-rw-r--r--drivers/staging/wilc1000/Makefile6
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.c426
-rw-r--r--drivers/staging/wilc1000/coreconfigurator.h97
-rw-r--r--drivers/staging/wilc1000/host_interface.c2081
-rw-r--r--drivers/staging/wilc1000/host_interface.h135
-rw-r--r--drivers/staging/wilc1000/linux_mon.c134
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c514
-rw-r--r--drivers/staging/wilc1000/linux_wlan_common.h166
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c53
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.c143
-rw-r--r--drivers/staging/wilc1000/wilc_msgqueue.h110
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c152
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c51
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c1156
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.h1
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h17
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c583
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h25
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c91
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.h18
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h31
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c41
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h5
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c171
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c132
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c8
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c10
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c6
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h1
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c36
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c31
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c20
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c26
-rw-r--r--drivers/staging/xgifb/vb_def.h1
-rw-r--r--drivers/staging/xgifb/vb_init.c3
-rw-r--r--drivers/staging/xgifb/vb_setmode.c43
-rw-r--r--drivers/staging/xgifb/vb_struct.h2
-rw-r--r--drivers/staging/xgifb/vb_table.h3
-rw-r--r--drivers/staging/xgifb/vgatypes.h7
-rw-r--r--drivers/target/iscsi/iscsi_target.c86
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c98
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c93
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/loopback/tcm_loop.c46
-rw-r--r--drivers/target/sbp/sbp_target.c95
-rw-r--r--drivers/target/target_core_configfs.c203
-rw-r--r--drivers/target/target_core_device.c41
-rw-r--r--drivers/target/target_core_fabric_configfs.c250
-rw-r--r--drivers/target/target_core_iblock.c34
-rw-r--r--drivers/target/target_core_internal.h9
-rw-r--r--drivers/target/target_core_spc.c3
-rw-r--r--drivers/target/target_core_stat.c41
-rw-r--r--drivers/target/target_core_tmr.c1
-rw-r--r--drivers/target/target_core_tpg.c21
-rw-r--r--drivers/target/target_core_transport.c66
-rw-r--r--drivers/target/target_core_user.c267
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c20
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c44
-rw-r--r--drivers/thermal/Kconfig21
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/intel_pch_thermal.c6
-rw-r--r--drivers/thermal/mtk_thermal.c625
-rw-r--r--drivers/thermal/of-thermal.c81
-rw-r--r--drivers/thermal/rcar_thermal.c3
-rw-r--r--drivers/thermal/rockchip_thermal.c239
-rw-r--r--drivers/thermal/samsung/Kconfig1
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c19
-rw-r--r--drivers/thermal/tegra_soctherm.c2
-rw-r--r--drivers/thermal/thermal_core.c13
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c10
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/amiserial.c28
-rw-r--r--drivers/tty/cyclades.c22
-rw-r--r--drivers/tty/ehv_bytechan.c40
-rw-r--r--drivers/tty/goldfish.c42
-rw-r--r--drivers/tty/hvc/hvc_vio.c29
-rw-r--r--drivers/tty/hvc/hvc_xen.c81
-rw-r--r--drivers/tty/isicom.c3
-rw-r--r--drivers/tty/mxser.c21
-rw-r--r--drivers/tty/n_gsm.c22
-rw-r--r--drivers/tty/n_hdlc.c19
-rw-r--r--drivers/tty/n_tty.c117
-rw-r--r--drivers/tty/nozomi.c2
-rw-r--r--drivers/tty/pty.c93
-rw-r--r--drivers/tty/rocket.c18
-rw-r--r--drivers/tty/rocket_int.h1
-rw-r--r--drivers/tty/serial/68328serial.c1322
-rw-r--r--drivers/tty/serial/8250/8250.h14
-rw-r--r--drivers/tty/serial/8250/8250_accent.c13
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c2
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c146
-rw-r--r--drivers/tty/serial/8250/8250_boca.c41
-rw-r--r--drivers/tty/serial/8250/8250_core.c30
-rw-r--r--drivers/tty/serial/8250/8250_dw.c137
-rw-r--r--drivers/tty/serial/8250/8250_early.c38
-rw-r--r--drivers/tty/serial/8250/8250_exar_st16c554.c17
-rw-r--r--drivers/tty/serial/8250/8250_fourport.c28
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c7
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c27
-rw-r--r--drivers/tty/serial/8250/8250_hub6.c2
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c14
-rw-r--r--drivers/tty/serial/8250/8250_moxa.c157
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c51
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_omap.c40
-rw-r--r--drivers/tty/serial/8250/8250_pci.c147
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c48
-rw-r--r--drivers/tty/serial/8250/8250_port.c462
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig51
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/8250/serial_cs.c90
-rw-r--r--drivers/tty/serial/Kconfig42
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c24
-rw-r--r--drivers/tty/serial/arc_uart.c1
-rw-r--r--drivers/tty/serial/atmel_serial.c180
-rw-r--r--drivers/tty/serial/clps711x.c10
-rw-r--r--drivers/tty/serial/crisv10.c33
-rw-r--r--drivers/tty/serial/digicolor-usart.c9
-rw-r--r--drivers/tty/serial/earlycon.c118
-rw-r--r--drivers/tty/serial/ifx6x60.c3
-rw-r--r--drivers/tty/serial/imx.c5
-rw-r--r--drivers/tty/serial/jsm/jsm_tty.c13
-rw-r--r--drivers/tty/serial/m32r_sio.c134
-rw-r--r--drivers/tty/serial/m32r_sio.h49
-rw-r--r--drivers/tty/serial/meson_uart.c8
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c8
-rw-r--r--drivers/tty/serial/mpsc.c178
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c650
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/serial/samsung.c24
-rw-r--r--drivers/tty/serial/sc16is7xx.c24
-rw-r--r--drivers/tty/serial/serial_core.c125
-rw-r--r--drivers/tty/serial/serial_ks8695.c2
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c3
-rw-r--r--drivers/tty/serial/sh-sci.c178
-rw-r--r--drivers/tty/serial/sh-sci.h15
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/uartlite.c61
-rw-r--r--drivers/tty/serial/xilinx_uartps.c543
-rw-r--r--drivers/tty/serial/zs.c4
-rw-r--r--drivers/tty/synclink.c23
-rw-r--r--drivers/tty/synclink_gt.c19
-rw-r--r--drivers/tty/synclinkmp.c63
-rw-r--r--drivers/tty/tty_audit.c237
-rw-r--r--drivers/tty/tty_buffer.c39
-rw-r--r--drivers/tty/tty_io.c266
-rw-r--r--drivers/tty/tty_ioctl.c12
-rw-r--r--drivers/tty/tty_ldisc.c198
-rw-r--r--drivers/tty/tty_mutex.c8
-rw-r--r--drivers/tty/tty_port.c11
-rw-r--r--drivers/tty/vt/keyboard.c14
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c13
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/debug.c5
-rw-r--r--drivers/usb/chipidea/otg_fsm.c29
-rw-r--r--drivers/usb/chipidea/otg_fsm.h2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/class/cdc-acm.c74
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/usbtmc.c331
-rw-r--r--drivers/usb/common/common.c23
-rw-r--r--drivers/usb/common/usb-otg-fsm.c87
-rw-r--r--drivers/usb/core/Makefile2
-rw-r--r--drivers/usb/core/buffer.c18
-rw-r--r--drivers/usb/core/config.c37
-rw-r--r--drivers/usb/core/devices.c26
-rw-r--r--drivers/usb/core/devio.c301
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/file.c9
-rw-r--r--drivers/usb/core/hcd-pci.c12
-rw-r--r--drivers/usb/core/hcd.c114
-rw-r--r--drivers/usb/core/hub.c122
-rw-r--r--drivers/usb/core/hub.h7
-rw-r--r--drivers/usb/core/of.c47
-rw-r--r--drivers/usb/core/sysfs.c68
-rw-r--r--drivers/usb/core/urb.c3
-rw-r--r--drivers/usb/core/usb.c16
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c1884
-rw-r--r--drivers/usb/dwc2/core.h151
-rw-r--r--drivers/usb/dwc2/gadget.c125
-rw-r--r--drivers/usb/dwc2/hcd.c2255
-rw-r--r--drivers/usb/dwc2/hcd.h134
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c49
-rw-r--r--drivers/usb/dwc2/hcd_intr.c174
-rw-r--r--drivers/usb/dwc2/hcd_queue.c1941
-rw-r--r--drivers/usb/dwc2/platform.c38
-rw-r--r--drivers/usb/dwc3/core.c79
-rw-r--r--drivers/usb/dwc3/core.h11
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c5
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c9
-rw-r--r--drivers/usb/dwc3/gadget.c41
-rw-r--r--drivers/usb/gadget/composite.c152
-rw-r--r--drivers/usb/gadget/config.c9
-rw-r--r--drivers/usb/gadget/configfs.c37
-rw-r--r--drivers/usb/gadget/function/f_acm.c6
-rw-r--r--drivers/usb/gadget/function/f_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_eem.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c159
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/function/f_loopback.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/gadget/function/f_midi.c211
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_obex.c3
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/f_printer.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c7
-rw-r--r--drivers/usb/gadget/function/f_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_subset.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c195
-rw-r--r--drivers/usb/gadget/function/f_uac1.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c3
-rw-r--r--drivers/usb/gadget/function/rndis.c20
-rw-r--r--drivers/usb/gadget/function/tcm.h2
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c198
-rw-r--r--drivers/usb/gadget/legacy/Kconfig3
-rw-r--r--drivers/usb/gadget/legacy/inode.c32
-rw-r--r--drivers/usb/gadget/udc/Kconfig7
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c36
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c5
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c2
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c103
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c8
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c530
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.h11
-rw-r--r--drivers/usb/gadget/udc/udc-core.c36
-rw-r--r--drivers/usb/host/Kconfig18
-rw-r--r--drivers/usb/host/Makefile3
-rw-r--r--drivers/usb/host/bcma-hcd.c83
-rw-r--r--drivers/usb/host/ehci-atmel.c6
-rw-r--r--drivers/usb/host/ehci-dbg.c477
-rw-r--r--drivers/usb/host/ehci-fsl.c24
-rw-r--r--drivers/usb/host/ehci-hcd.c22
-rw-r--r--drivers/usb/host/ehci-hub.c6
-rw-r--r--drivers/usb/host/ehci-msm.c66
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-platform.c6
-rw-r--r--drivers/usb/host/ehci-q.c104
-rw-r--r--drivers/usb/host/ehci-sched.c524
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ehci-timer.c5
-rw-r--r--drivers/usb/host/ehci.h99
-rw-r--r--drivers/usb/host/fotg210-hcd.c15
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c7
-rw-r--r--drivers/usb/host/max3421-hcd.c16
-rw-r--r--drivers/usb/host/ohci-at91.c10
-rw-r--r--drivers/usb/host/ohci-nxp.c87
-rw-r--r--drivers/usb/host/ohci-platform.c6
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c15
-rw-r--r--drivers/usb/host/pci-quirks.c3
-rw-r--r--drivers/usb/host/r8a66597-hcd.c11
-rw-r--r--drivers/usb/host/u132-hcd.c18
-rw-r--r--drivers/usb/host/xhci-hub.c27
-rw-r--r--drivers/usb/host/xhci-mem.c186
-rw-r--r--drivers/usb/host/xhci-mtk.c10
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-plat.c19
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-ring.c137
-rw-r--r--drivers/usb/host/xhci.c32
-rw-r--r--drivers/usb/host/xhci.h17
-rw-r--r--drivers/usb/misc/chaoskey.c122
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1543
-rw-r--r--drivers/usb/mon/mon_main.c9
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/sunxi.c1
-rw-r--r--drivers/usb/musb/tusb6010_omap.c4
-rw-r--r--drivers/usb/musb/ux500_dma.c3
-rw-r--r--drivers/usb/phy/phy-am335x.c1
-rw-r--r--drivers/usb/phy/phy-generic.c11
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c72
-rw-r--r--drivers/usb/renesas_usbhs/Kconfig2
-rw-r--r--drivers/usb/renesas_usbhs/Makefile2
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c24
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h20
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c8
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c6
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h2
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c54
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.h3
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/console.c6
-rw-r--r--drivers/usb/serial/cp210x.c307
-rw-r--r--drivers/usb/serial/cyberjack.c3
-rw-r--r--drivers/usb/serial/cypress_m8.c14
-rw-r--r--drivers/usb/serial/digi_acceleport.c27
-rw-r--r--drivers/usb/serial/ftdi_sio.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.h8
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h8
-rw-r--r--drivers/usb/serial/garmin_gps.c51
-rw-r--r--drivers/usb/serial/io_edgeport.c4
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/keyspan.c2
-rw-r--r--drivers/usb/serial/kl5kusb105.c3
-rw-r--r--drivers/usb/serial/mct_u232.c11
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c8
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/quatech2.c2
-rw-r--r--drivers/usb/serial/safe_serial.c11
-rw-r--r--drivers/usb/storage/debug.c12
-rw-r--r--drivers/usb/storage/debug.h3
-rw-r--r--drivers/usb/storage/ene_ub6250.c4
-rw-r--r--drivers/usb/storage/scsiglue.c2
-rw-r--r--drivers/usb/storage/sddr09.c18
-rw-r--r--drivers/usb/storage/uas.c59
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/usb/usbip/usbip_common.c11
-rw-r--r--drivers/usb/usbip/usbip_event.c5
-rw-r--r--drivers/usb/usbip/usbip_protocol.txt358
-rw-r--r--drivers/usb/usbip/vhci_hcd.c88
-rw-r--r--drivers/usb/usbip/vhci_rx.c30
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c19
-rw-r--r--drivers/usb/usbip/vhci_tx.c14
-rw-r--r--drivers/usb/wusbcore/crypto.c30
-rw-r--r--drivers/usb/wusbcore/wusbhc.h2
-rw-r--r--drivers/vfio/pci/Kconfig4
-rw-r--r--drivers/vfio/pci/Makefile1
-rw-r--r--drivers/vfio/pci/vfio_pci.c175
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c45
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c280
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c17
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h39
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c9
-rw-r--r--drivers/vfio/vfio.c70
-rw-r--r--drivers/vhost/net.c80
-rw-r--r--drivers/vhost/scsi.c101
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vhost.c69
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/video/fbdev/Kconfig7
-rw-r--r--drivers/video/fbdev/acornfb.c4
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c14
-rw-r--r--drivers/video/fbdev/amba-clcd.c19
-rw-r--r--drivers/video/fbdev/atafb.c3
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c11
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c1
-rw-r--r--drivers/video/fbdev/au1100fb.c22
-rw-r--r--drivers/video/fbdev/bf537-lq035.c23
-rw-r--r--drivers/video/fbdev/bt431.h43
-rw-r--r--drivers/video/fbdev/bt455.h68
-rw-r--r--drivers/video/fbdev/da8xx-fb.c7
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c8
-rw-r--r--drivers/video/fbdev/exynos/Kconfig6
-rw-r--r--drivers/video/fbdev/exynos/Makefile6
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c7
-rw-r--r--drivers/video/fbdev/gbefb.c8
-rw-r--r--drivers/video/fbdev/imsttfb.c1
-rw-r--r--drivers/video/fbdev/imxfb.c12
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h1
-rw-r--r--drivers/video/fbdev/metronomefb.c6
-rw-r--r--drivers/video/fbdev/mx3fb.c9
-rw-r--r--drivers/video/fbdev/n411.c12
-rw-r--r--drivers/video/fbdev/nuc900fb.c8
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c3
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/fbdev/omap/lcdc.c16
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c22
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c602
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c6
-rw-r--r--drivers/video/fbdev/pxa168fb.c8
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/s3c-fb.c7
-rw-r--r--drivers/video/fbdev/s3c2410fb.c8
-rw-r--r--drivers/video/fbdev/sa1100fb.c8
-rw-r--r--drivers/video/fbdev/simplefb.c4
-rw-r--r--drivers/video/fbdev/sis/init301.c10
-rw-r--r--drivers/video/fbdev/skeletonfb.c17
-rw-r--r--drivers/video/fbdev/sunxvr1000.c42
-rw-r--r--drivers/video/fbdev/sunxvr2500.c39
-rw-r--r--drivers/video/fbdev/sunxvr500.c42
-rw-r--r--drivers/virt/fsl_hypervisor.c5
-rw-r--r--drivers/virtio/Kconfig2
-rw-r--r--drivers/virtio/virtio_balloon.c128
-rw-r--r--drivers/virtio/virtio_mmio.c67
-rw-r--r--drivers/virtio/virtio_pci_common.c2
-rw-r--r--drivers/virtio/virtio_pci_common.h6
-rw-r--r--drivers/virtio/virtio_pci_legacy.c42
-rw-r--r--drivers/virtio/virtio_pci_modern.c72
-rw-r--r--drivers/virtio/virtio_ring.c439
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c11
-rw-r--r--drivers/w1/masters/omap_hdq.c1
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--drivers/watchdog/Kconfig67
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/atlas7_wdt.c5
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c3
-rw-r--r--drivers/watchdog/da9063_wdt.c3
-rw-r--r--drivers/watchdog/digicolor_wdt.c3
-rw-r--r--drivers/watchdog/dw_wdt.c323
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c188
-rw-r--r--drivers/watchdog/hpwdt.c20
-rw-r--r--drivers/watchdog/imgpdc_wdt.c3
-rw-r--r--drivers/watchdog/imx2_wdt.c77
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c3
-rw-r--r--drivers/watchdog/mei_wdt.c724
-rw-r--r--drivers/watchdog/meson_wdt.c3
-rw-r--r--drivers/watchdog/moxart_wdt.c3
-rw-r--r--drivers/watchdog/mtk_wdt.c3
-rw-r--r--drivers/watchdog/ni903x_wdt.c270
-rw-r--r--drivers/watchdog/pnx4008_wdt.c42
-rw-r--r--drivers/watchdog/qcom-wdt.c3
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c19
-rw-r--r--drivers/watchdog/sbsa_gwdt.c408
-rw-r--r--drivers/watchdog/sunxi_wdt.c3
-rw-r--r--drivers/watchdog/tangox_wdt.c14
-rw-r--r--drivers/watchdog/w83627hf_wdt.c22
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c207
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/Kconfig23
-rw-r--r--drivers/xen/balloon.c17
-rw-r--r--drivers/xen/events/events_2l.c6
-rw-r--r--drivers/xen/events/events_base.c30
-rw-r--r--drivers/xen/events/events_fifo.c1
-rw-r--r--drivers/xen/features.c2
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/platform-pci.c22
-rw-r--r--drivers/xen/sys-hypervisor.c59
-rw-r--r--drivers/xen/xen-balloon.c14
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c2
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xen-scsiback.c281
-rw-r--r--drivers/xen/xen-selfballoon.c1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c13
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c13
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c1
-rw-r--r--drivers/xen/xenfs/xensyms.c1
-rw-r--r--drivers/zorro/zorro-sysfs.c3
4170 files changed, 284225 insertions, 142934 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index cb648a49543a..edeb2d1d99be 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -43,6 +43,7 @@ acpi-y += pci_root.o pci_link.o pci_irq.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
+acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
new file mode 100644
index 000000000000..2a61b54ab968
--- /dev/null
+++ b/drivers/acpi/acpi_amba.c
@@ -0,0 +1,122 @@
+
+/*
+ * ACPI support for platform bus type.
+ *
+ * Copyright (C) 2015, Linaro Ltd
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static const struct acpi_device_id amba_id_list[] = {
+ {"ARMH0061", 0}, /* PL061 GPIO Device */
+ {"", 0},
+};
+
+static void amba_register_dummy_clk(void)
+{
+ static struct clk *amba_dummy_clk;
+
+ /* If clock already registered */
+ if (amba_dummy_clk)
+ return;
+
+ amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
+ CLK_IS_ROOT, 0);
+ clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
+}
+
+static int amba_handler_attach(struct acpi_device *adev,
+ const struct acpi_device_id *id)
+{
+ struct amba_device *dev;
+ struct resource_entry *rentry;
+ struct list_head resource_list;
+ bool address_found = false;
+ int irq_no = 0;
+ int ret;
+
+ /* If the ACPI node already has a physical device attached, skip it. */
+ if (adev->physical_node_count)
+ return 0;
+
+ dev = amba_device_alloc(dev_name(&adev->dev), 0, 0);
+ if (!dev) {
+ dev_err(&adev->dev, "%s(): amba_device_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+ if (ret < 0)
+ goto err_free;
+
+ list_for_each_entry(rentry, &resource_list, node) {
+ switch (resource_type(rentry->res)) {
+ case IORESOURCE_MEM:
+ if (!address_found) {
+ dev->res = *rentry->res;
+ address_found = true;
+ }
+ break;
+ case IORESOURCE_IRQ:
+ if (irq_no < AMBA_NR_IRQS)
+ dev->irq[irq_no++] = rentry->res->start;
+ break;
+ default:
+ dev_warn(&adev->dev, "Invalid resource\n");
+ break;
+ }
+ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+ /*
+ * If the ACPI node has a parent and that parent has a physical device
+ * attached to it, that physical device should be the parent of
+ * the amba device we are about to create.
+ */
+ if (adev->parent)
+ dev->dev.parent = acpi_get_first_physical_node(adev->parent);
+
+ ACPI_COMPANION_SET(&dev->dev, adev);
+
+ ret = amba_device_add(dev, &iomem_resource);
+ if (ret) {
+ dev_err(&adev->dev, "%s(): amba_device_add() failed (%d)\n",
+ __func__, ret);
+ goto err_free;
+ }
+
+ return 1;
+
+err_free:
+ amba_device_put(dev);
+ return ret;
+}
+
+static struct acpi_scan_handler amba_handler = {
+ .ids = amba_id_list,
+ .attach = amba_handler_attach,
+};
+
+void __init acpi_amba_init(void)
+{
+ amba_register_dummy_clk();
+ acpi_scan_add_handler(&amba_handler);
+}
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index d507cf6deda0..f245bf35bedb 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -143,7 +143,9 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
/* Generic apd devices */
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
{ "AMD0010", APD_ADDR(cz_i2c_desc) },
+ { "AMDI0010", APD_ADDR(cz_i2c_desc) },
{ "AMD0020", APD_ADDR(cz_uart_desc) },
+ { "AMDI0020", APD_ADDR(cz_uart_desc) },
{ "AMD0030", },
#endif
#ifdef CONFIG_ARM64
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 296b7a14893a..159f7f19abce 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -43,7 +43,6 @@ static const struct acpi_device_id forbidden_id_list[] = {
struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
{
struct platform_device *pdev = NULL;
- struct acpi_device *acpi_parent;
struct platform_device_info pdevinfo;
struct resource_entry *rentry;
struct list_head resource_list;
@@ -62,7 +61,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
if (count < 0) {
return NULL;
} else if (count > 0) {
- resources = kmalloc(count * sizeof(struct resource),
+ resources = kzalloc(count * sizeof(struct resource),
GFP_KERNEL);
if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
@@ -82,22 +81,8 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
* attached to it, that physical device should be the parent of the
* platform device we are about to create.
*/
- pdevinfo.parent = NULL;
- acpi_parent = adev->parent;
- if (acpi_parent) {
- struct acpi_device_physical_node *entry;
- struct list_head *list;
-
- mutex_lock(&acpi_parent->physical_node_lock);
- list = &acpi_parent->physical_node_list;
- if (!list_empty(list)) {
- entry = list_first_entry(list,
- struct acpi_device_physical_node,
- node);
- pdevinfo.parent = entry->dev;
- }
- mutex_unlock(&acpi_parent->physical_node_lock);
- }
+ pdevinfo.parent = adev->parent ?
+ acpi_get_first_physical_node(adev->parent) : NULL;
pdevinfo.name = dev_name(&adev->dev);
pdevinfo.id = -1;
pdevinfo.res = resources;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 6979186dbd4b..0d92d0f915e9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+#ifdef CONFIG_X86
+static bool acpi_hwp_native_thermal_lvt_set;
+static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
+ u32 lvl,
+ void *context,
+ void **rv)
+{
+ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+ u32 capbuf[2];
+ struct acpi_osc_context osc_context = {
+ .uuid_str = sb_uuid_str,
+ .rev = 1,
+ .cap.length = 8,
+ .cap.pointer = capbuf,
+ };
+
+ if (acpi_hwp_native_thermal_lvt_set)
+ return AE_CTRL_TERMINATE;
+
+ capbuf[0] = 0x0000;
+ capbuf[1] = 0x1000; /* set bit 12 */
+
+ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
+ if (osc_context.ret.pointer && osc_context.ret.length > 1) {
+ u32 *capbuf_ret = osc_context.ret.pointer;
+
+ if (capbuf_ret[1] & 0x1000) {
+ acpi_handle_info(handle,
+ "_OSC native thermal LVT Acked\n");
+ acpi_hwp_native_thermal_lvt_set = true;
+ }
+ }
+ kfree(osc_context.ret.pointer);
+ }
+
+ return AE_OK;
+}
+
+void __init acpi_early_processor_osc(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
+ acpi_hwp_native_thermal_lvt_osc,
+ NULL, NULL);
+ }
+}
+#endif
+
/*
* The following ACPI IDs are known to be suitable for representing as
* processor devices.
@@ -514,7 +566,24 @@ static struct acpi_scan_handler processor_handler = {
},
};
+static int acpi_processor_container_attach(struct acpi_device *dev,
+ const struct acpi_device_id *id)
+{
+ return 1;
+}
+
+static const struct acpi_device_id processor_container_ids[] = {
+ { ACPI_PROCESSOR_CONTAINER_HID, },
+ { }
+};
+
+static struct acpi_scan_handler processor_container_handler = {
+ .ids = processor_container_ids,
+ .attach = acpi_processor_container_attach,
+};
+
void __init acpi_processor_init(void)
{
acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
+ acpi_scan_add_handler(&processor_container_handler);
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index a76f8be1bfe7..4361bc98ef4c 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -218,13 +218,6 @@ struct acpi_video_device {
struct thermal_cooling_device *cooling_dev;
};
-static const char device_decode[][30] = {
- "motherboard VGA device",
- "PCI VGA device",
- "AGP VGA device",
- "UNKNOWN",
-};
-
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data);
static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 55c8197036f3..51b073b68f16 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -165,7 +165,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
/* Initialization sequencing */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_reg_methods_enabled, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
/* Misc */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index e4977fac9c1d..9562a10a1a18 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -85,7 +85,7 @@ union acpi_parse_object;
#define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */
#define ACPI_MAX_MUTEX 5
-#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
+#define ACPI_NUM_MUTEX (ACPI_MAX_MUTEX+1)
/* Lock structure for reader/writer interfaces */
@@ -103,11 +103,11 @@ struct acpi_rw_lock {
#define ACPI_LOCK_HARDWARE 1
#define ACPI_MAX_LOCK 1
-#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
+#define ACPI_NUM_LOCK (ACPI_MAX_LOCK+1)
/* This Thread ID means that the mutex is not in use (unlocked) */
-#define ACPI_MUTEX_NOT_ACQUIRED (acpi_thread_id) 0
+#define ACPI_MUTEX_NOT_ACQUIRED ((acpi_thread_id) 0)
/* This Thread ID means an invalid thread ID */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 9684ed61284d..022d69cb345a 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -88,7 +88,7 @@
*/
acpi_status acpi_ns_initialize_objects(void);
-acpi_status acpi_ns_initialize_devices(void);
+acpi_status acpi_ns_initialize_devices(u32 flags);
/*
* nsload - Namespace loading
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 52f6bee52d47..5faeab41e302 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -1125,7 +1125,7 @@ const union acpi_predefined_info acpi_gbl_resource_names[] = {
PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
};
-static const union acpi_predefined_info acpi_gbl_scope_names[] = {
+const union acpi_predefined_info acpi_gbl_scope_names[] = {
{{"_GPE", 0, 0}},
{{"_PR_", 0, 0}},
{{"_SB_", 0, 0}},
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 7ec62c461280..772178c96ccf 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -348,7 +348,7 @@ void acpi_db_display_table_info(char *table_arg)
} else {
/* If the pointer is null, the table has been unloaded */
- ACPI_INFO((AE_INFO, "%4.4s - Table has been unloaded",
+ ACPI_INFO(("%4.4s - Table has been unloaded",
table_desc->signature.ascii));
}
}
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 9fee88f1c654..68f4e0f4b095 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -408,7 +408,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
new_buffer = acpi_db_encode_pld_buffer(pld_info);
if (!new_buffer) {
- return;
+ goto exit;
}
/* The two bit-packed buffers should match */
@@ -479,6 +479,7 @@ void acpi_db_dump_pld_buffer(union acpi_object *obj_desc)
pld_info->horizontal_offset);
}
- ACPI_FREE(pld_info);
ACPI_FREE(new_buffer);
+exit:
+ ACPI_FREE(pld_info);
}
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 6a72047aae1c..1982310e6d83 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -809,8 +809,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
if (method_desc->method.
info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
if (walk_state) {
- ACPI_INFO((AE_INFO,
- "Marking method %4.4s as Serialized "
+ ACPI_INFO(("Marking method %4.4s as Serialized "
"because of AE_ALREADY_EXISTS error",
walk_state->method_node->name.
ascii));
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index c303e9d9266f..a91de2b4603c 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -524,8 +524,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next;
}
- ACPI_INFO((AE_INFO,
- "Actual Package length (%u) is larger than "
+ ACPI_INFO(("Actual Package length (%u) is larger than "
"NumElements field (%u), truncated",
i, element_count));
} else if (i < element_count) {
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9275e626ed8d..447fa1cac64f 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -499,8 +499,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
}
if (gpe_enabled_count) {
- ACPI_INFO((AE_INFO,
- "Enabled %u GPEs in block %02X to %02X",
+ ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
gpe_enabled_count, (u32)gpe_block->block_base_number,
(u32)(gpe_block->block_base_number +
(gpe_block->gpe_count - 1))));
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 9fdd8d09141b..7dc75474c897 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -281,7 +281,7 @@ void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
}
if (walk_info.count) {
- ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
+ ACPI_INFO(("Enabled %u new GPEs", walk_info.count));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 47092b4d633c..63924d1c737a 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -600,7 +600,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
if (region_obj2->extra.method_REG == NULL ||
region_obj->region.handler == NULL ||
- !acpi_gbl_reg_methods_enabled) {
+ !acpi_gbl_namespace_initialized) {
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 011df210b7b2..f74161301037 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -252,7 +252,7 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_SUCCESS(status)) {
- ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ ACPI_INFO(("Dynamic OEM Table Load:"));
acpi_tb_print_table_header(0, table);
}
@@ -472,7 +472,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
/* Install the new table into the local data structures */
- ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
+ ACPI_INFO(("Dynamic OEM Table Load:"));
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 28eb861c44eb..5aa21c4eda1d 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -123,8 +123,10 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
* op is intended for use by disassemblers in order to properly
* disassemble control method invocations. The opcode or group of
* opcodes should be surrounded by an "if (0)" clause to ensure that
- * AML interpreters never see the opcode.
+ * AML interpreters never see the opcode. Thus, something is
+ * wrong if an external opcode ever gets here.
*/
+ ACPI_ERROR((AE_INFO, "Executed External Op"));
status = AE_OK;
goto cleanup;
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 65d58bea4320..5d59cfcef6f4 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -378,8 +378,7 @@ void acpi_ns_exec_module_code_list(void)
acpi_ut_remove_reference(prev);
}
- ACPI_INFO((AE_INFO,
- "Executed %u blocks of module-level executable AML code",
+ ACPI_INFO(("Executed %u blocks of module-level executable AML code",
method_count));
ACPI_FREE(info);
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index bd75d46234a4..d4aa8b696ee9 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -46,6 +46,7 @@
#include "acnamesp.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "acevents.h"
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsinit")
@@ -83,6 +84,8 @@ acpi_status acpi_ns_initialize_objects(void)
ACPI_FUNCTION_TRACE(ns_initialize_objects);
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Completing Initialization of ACPI Objects\n"));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
@@ -133,82 +136,108 @@ acpi_status acpi_ns_initialize_objects(void)
*
******************************************************************************/
-acpi_status acpi_ns_initialize_devices(void)
+acpi_status acpi_ns_initialize_devices(u32 flags)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_device_walk_info info;
ACPI_FUNCTION_TRACE(ns_initialize_devices);
- /* Init counters */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Initializing ACPI Devices\n"));
- info.device_count = 0;
- info.num_STA = 0;
- info.num_INI = 0;
+ /* Init counters */
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- "Initializing Device/Processor/Thermal objects "
- "and executing _INI/_STA methods:\n"));
+ info.device_count = 0;
+ info.num_STA = 0;
+ info.num_INI = 0;
- /* Tree analysis: find all subtrees that contain _INI methods */
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ "Initializing Device/Processor/Thermal objects "
+ "and executing _INI/_STA methods:\n"));
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, FALSE,
- acpi_ns_find_ini_methods, NULL, &info,
- NULL);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
- }
+ /* Tree analysis: find all subtrees that contain _INI methods */
+
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_find_ini_methods, NULL,
+ &info, NULL);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ /* Allocate the evaluation information block */
- /* Allocate the evaluation information block */
+ info.evaluate_info =
+ ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
+ if (!info.evaluate_info) {
+ status = AE_NO_MEMORY;
+ goto error_exit;
+ }
- info.evaluate_info =
- ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
- if (!info.evaluate_info) {
- status = AE_NO_MEMORY;
- goto error_exit;
+ /*
+ * Execute the "global" _INI method that may appear at the root.
+ * This support is provided for Windows compatibility (Vista+) and
+ * is not part of the ACPI specification.
+ */
+ info.evaluate_info->prefix_node = acpi_gbl_root_node;
+ info.evaluate_info->relative_pathname = METHOD_NAME__INI;
+ info.evaluate_info->parameters = NULL;
+ info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info.evaluate_info);
+ if (ACPI_SUCCESS(status)) {
+ info.num_INI++;
+ }
}
/*
- * Execute the "global" _INI method that may appear at the root. This
- * support is provided for Windows compatibility (Vista+) and is not
- * part of the ACPI specification.
+ * Run all _REG methods
+ *
+ * Note: Any objects accessed by the _REG methods will be automatically
+ * initialized, even if they contain executable AML (see the call to
+ * acpi_ns_initialize_objects below).
*/
- info.evaluate_info->prefix_node = acpi_gbl_root_node;
- info.evaluate_info->relative_pathname = METHOD_NAME__INI;
- info.evaluate_info->parameters = NULL;
- info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+ if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "[Init] Executing _REG OpRegion methods\n"));
- status = acpi_ns_evaluate(info.evaluate_info);
- if (ACPI_SUCCESS(status)) {
- info.num_INI++;
+ status = acpi_ev_initialize_op_regions();
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
}
- /* Walk namespace to execute all _INIs on present devices */
+ if (!(flags & ACPI_NO_DEVICE_INIT)) {
- status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, FALSE,
- acpi_ns_init_one_device, NULL, &info,
- NULL);
+ /* Walk namespace to execute all _INIs on present devices */
- /*
- * Any _OSI requests should be completed by now. If the BIOS has
- * requested any Windows OSI strings, we will always truncate
- * I/O addresses to 16 bits -- for Windows compatibility.
- */
- if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
- acpi_gbl_truncate_io_addresses = TRUE;
- }
+ status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, FALSE,
+ acpi_ns_init_one_device, NULL,
+ &info, NULL);
- ACPI_FREE(info.evaluate_info);
- if (ACPI_FAILURE(status)) {
- goto error_exit;
- }
+ /*
+ * Any _OSI requests should be completed by now. If the BIOS has
+ * requested any Windows OSI strings, we will always truncate
+ * I/O addresses to 16 bits -- for Windows compatibility.
+ */
+ if (acpi_gbl_osi_data >= ACPI_OSI_WIN_2000) {
+ acpi_gbl_truncate_io_addresses = TRUE;
+ }
- ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
- " Executed %u _INI methods requiring %u _STA executions "
- "(examined %u objects)\n",
- info.num_INI, info.num_STA, info.device_count));
+ ACPI_FREE(info.evaluate_info);
+ if (ACPI_FAILURE(status)) {
+ goto error_exit;
+ }
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+ " Executed %u _INI methods requiring %u _STA executions "
+ "(examined %u objects)\n",
+ info.num_INI, info.num_STA,
+ info.device_count));
+ }
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index b661a1e013fb..4dc6108de4ff 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -267,8 +267,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
- ACPI_INFO((AE_INFO,
- "Ignoring installation of %4.4s at %8.8X%8.8X",
+ ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
goto release_and_exit;
@@ -432,7 +431,7 @@ finish_override:
return;
}
- ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X"
+ ACPI_INFO(("%4.4s 0x%8.8X%8.8X"
" %s table override, new table: 0x%8.8X%8.8X",
old_table_desc->signature.ascii,
ACPI_FORMAT_UINT64(old_table_desc->address),
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd4146d4ff49..26d61dbace0a 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -132,7 +132,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
header->signature, ACPI_FORMAT_UINT64(address),
header->length));
} else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
@@ -144,7 +144,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
+ ACPI_INFO(("RSDP 0x%8.8X%8.8X %06X (v%.2d %-6.6s)",
ACPI_FORMAT_UINT64(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
@@ -158,8 +158,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
- ACPI_INFO((AE_INFO,
- "%-4.4s 0x%8.8X%8.8X"
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X"
" %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
local_header.signature, ACPI_FORMAT_UINT64(address),
local_header.length, local_header.revision,
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 3269bef371d7..9240c76d2823 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -174,9 +174,7 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
new_table);
- ACPI_INFO((AE_INFO,
- "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
- new_table->length));
+ ACPI_INFO(("Forced DSDT copy: length 0x%05X copied locally, original unmapped", new_table->length));
return (new_table);
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 278666e39563..3151968c10d1 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -47,6 +47,7 @@
#include "accommon.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acevents.h"
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxfload")
@@ -68,6 +69,25 @@ acpi_status __init acpi_load_tables(void)
ACPI_FUNCTION_TRACE(acpi_load_tables);
+ /*
+ * Install the default operation region handlers. These are the
+ * handlers that are defined by the ACPI specification to be
+ * "always accessible" -- namely, system_memory, system_IO, and
+ * PCI_Config. This also means that no _REG methods need to be
+ * run for these address spaces. We need to have these handlers
+ * installed before any AML code can be executed, especially any
+ * module-level code (11/2015).
+ * Note that we allow OSPMs to install their own region handlers
+ * between acpi_initialize_subsystem() and acpi_load_tables() to use
+ * their customized default region handlers.
+ */
+ status = acpi_ev_install_region_handlers();
+ if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "During Region initialization"));
+ return_ACPI_STATUS(status);
+ }
+
/* Load the namespace from the tables */
status = acpi_tb_load_namespace();
@@ -83,6 +103,20 @@ acpi_status __init acpi_load_tables(void)
"While loading namespace from ACPI tables"));
}
+ if (!acpi_gbl_group_module_level_code) {
+ /*
+ * Initialize the objects that remain uninitialized. This
+ * runs the executable AML that may be part of the
+ * declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ acpi_gbl_namespace_initialized = TRUE;
return_ACPI_STATUS(status);
}
@@ -206,9 +240,7 @@ acpi_status acpi_tb_load_namespace(void)
}
if (!tables_failed) {
- ACPI_INFO((AE_INFO,
- "%u ACPI AML tables successfully acquired and loaded\n",
- tables_loaded));
+ ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
} else {
ACPI_ERROR((AE_INFO,
"%u table load failures, %u successful",
@@ -301,7 +333,7 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
/* Install the table and load it into the namespace */
- ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
+ ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
status = acpi_tb_install_standard_table(ACPI_PTR_TO_PHYSADDR(table),
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index c9a720f2274a..f8e9978888e1 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -245,7 +245,7 @@ void *acpi_os_acquire_object(struct acpi_memory_list *cache)
acpi_status status;
void *object;
- ACPI_FUNCTION_NAME(os_acquire_object);
+ ACPI_FUNCTION_TRACE(os_acquire_object);
if (!cache) {
return_PTR(NULL);
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index c427a5cda465..d5c3adf19bd0 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,6 +140,67 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
+ * functions. This is the size of the Destination buffer.
+ *
+ * RETURN: TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ * the result of the operation will not overflow the output string
+ * buffer.
+ *
+ * NOTE: These functions are typically only helpful for processing
+ * user input and command lines. For most ACPICA code, the
+ * required buffer length is precisely calculated before buffer
+ * allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+ if (strlen(source) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcpy(dest, source);
+ return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+ if ((strlen(dest) + strlen(source)) >= dest_size) {
+ return (TRUE);
+ }
+
+ strcat(dest, source);
+ return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+ acpi_size dest_size,
+ char *source, acpi_size max_transfer_length)
+{
+ acpi_size actual_transfer_length;
+
+ actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+ if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+ return (TRUE);
+ }
+
+ strncat(dest, source, max_transfer_length);
+ return (FALSE);
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_strtoul64
@@ -155,7 +216,15 @@ int acpi_ut_stricmp(char *string1, char *string2)
* 32-bit or 64-bit conversion, depending on the current mode
* of the interpreter.
*
- * NOTE: Does not support Octal strings, not needed.
+ * NOTES: acpi_gbl_integer_byte_width should be set to the proper width.
+ * For the core ACPICA code, this width depends on the DSDT
+ * version. For iASL, the default byte width is always 8.
+ *
+ * Does not support Octal strings, not needed at this time.
+ *
+ * There is an earlier version of the function after this one,
+ * below. It is slightly different than this one, and the two
+ * may eventually may need to be merged. (01/2016).
*
******************************************************************************/
@@ -171,7 +240,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
u8 sign_of0x = 0;
u8 term = 0;
- ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+ ACPI_FUNCTION_TRACE_STR(ut_strtoul64, string);
switch (base) {
case ACPI_ANY_BASE:
@@ -318,63 +387,162 @@ error_exit:
}
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#ifdef _OBSOLETE_FUNCTIONS
+/* TBD: use version in ACPICA main code base? */
+/* DONE: 01/2016 */
+
/*******************************************************************************
*
- * FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ * FUNCTION: strtoul64
*
- * PARAMETERS: Adds a "DestSize" parameter to each of the standard string
- * functions. This is the size of the Destination buffer.
+ * PARAMETERS: string - Null terminated string
+ * terminater - Where a pointer to the terminating byte
+ * is returned
+ * base - Radix of the string
*
- * RETURN: TRUE if the operation would overflow the destination buffer.
+ * RETURN: Converted value
*
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- * the result of the operation will not overflow the output string
- * buffer.
- *
- * NOTE: These functions are typically only helpful for processing
- * user input and command lines. For most ACPICA code, the
- * required buffer length is precisely calculated before buffer
- * allocation, so the use of these functions is unnecessary.
+ * DESCRIPTION: Convert a string into an unsigned value.
*
******************************************************************************/
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+acpi_status strtoul64(char *string, u32 base, u64 *ret_integer)
{
+ u32 index;
+ u32 sign;
+ u64 return_value = 0;
+ acpi_status status = AE_OK;
- if (strlen(source) >= dest_size) {
- return (TRUE);
+ *ret_integer = 0;
+
+ switch (base) {
+ case 0:
+ case 8:
+ case 10:
+ case 16:
+
+ break;
+
+ default:
+ /*
+ * The specified Base parameter is not in the domain of
+ * this function:
+ */
+ return (AE_BAD_PARAMETER);
}
- strcpy(dest, source);
- return (FALSE);
-}
+ /* Skip over any white space in the buffer: */
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
+ while (isspace((int)*string) || *string == '\t') {
+ ++string;
+ }
- if ((strlen(dest) + strlen(source)) >= dest_size) {
- return (TRUE);
+ /*
+ * The buffer may contain an optional plus or minus sign.
+ * If it does, then skip over it but remember what is was:
+ */
+ if (*string == '-') {
+ sign = ACPI_SIGN_NEGATIVE;
+ ++string;
+ } else if (*string == '+') {
+ ++string;
+ sign = ACPI_SIGN_POSITIVE;
+ } else {
+ sign = ACPI_SIGN_POSITIVE;
}
- strcat(dest, source);
- return (FALSE);
-}
+ /*
+ * If the input parameter Base is zero, then we need to
+ * determine if it is octal, decimal, or hexadecimal:
+ */
+ if (base == 0) {
+ if (*string == '0') {
+ if (tolower((int)*(++string)) == 'x') {
+ base = 16;
+ ++string;
+ } else {
+ base = 8;
+ }
+ } else {
+ base = 10;
+ }
+ }
-u8
-acpi_ut_safe_strncat(char *dest,
- acpi_size dest_size,
- char *source, acpi_size max_transfer_length)
-{
- acpi_size actual_transfer_length;
+ /*
+ * For octal and hexadecimal bases, skip over the leading
+ * 0 or 0x, if they are present.
+ */
+ if (base == 8 && *string == '0') {
+ string++;
+ }
- actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+ if (base == 16 && *string == '0' && tolower((int)*(++string)) == 'x') {
+ string++;
+ }
- if ((strlen(dest) + actual_transfer_length) >= dest_size) {
- return (TRUE);
+ /* Main loop: convert the string to an unsigned long */
+
+ while (*string) {
+ if (isdigit((int)*string)) {
+ index = ((u8)*string) - '0';
+ } else {
+ index = (u8)toupper((int)*string);
+ if (isupper((int)index)) {
+ index = index - 'A' + 10;
+ } else {
+ goto error_exit;
+ }
+ }
+
+ if (index >= base) {
+ goto error_exit;
+ }
+
+ /* Check to see if value is out of range: */
+
+ if (return_value > ((ACPI_UINT64_MAX - (u64)index) / (u64)base)) {
+ goto error_exit;
+ } else {
+ return_value *= base;
+ return_value += index;
+ }
+
+ ++string;
}
- strncat(dest, source, max_transfer_length);
- return (FALSE);
+ /* If a minus sign was present, then "the conversion is negated": */
+
+ if (sign == ACPI_SIGN_NEGATIVE) {
+ return_value = (ACPI_UINT32_MAX - return_value) + 1;
+ }
+
+ *ret_integer = return_value;
+ return (status);
+
+error_exit:
+ switch (base) {
+ case 8:
+
+ status = AE_BAD_OCTAL_CONSTANT;
+ break;
+
+ case 10:
+
+ status = AE_BAD_DECIMAL_CONSTANT;
+ break;
+
+ case 16:
+
+ status = AE_BAD_HEX_CONSTANT;
+ break;
+
+ default:
+
+ /* Base validated above */
+
+ break;
+ }
+
+ return (status);
}
#endif
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index c7c2bb8f3559..60c406a8efcb 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -712,7 +712,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
/* Print summary */
if (!num_outstanding) {
- ACPI_INFO((AE_INFO, "No outstanding allocations"));
+ ACPI_INFO(("No outstanding allocations"));
} else {
ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
num_outstanding, num_outstanding));
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6fe59597b599..d9f15cbcd8a0 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -175,8 +175,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
* TBD: module_name and line_number args are not needed, should be removed.
*
******************************************************************************/
-void ACPI_INTERNAL_VAR_XFACE
-acpi_info(const char *module_name, u32 line_number, const char *format, ...)
+void ACPI_INTERNAL_VAR_XFACE acpi_info(const char *format, ...)
{
va_list arg_list;
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 721b87cce908..75b5f27da267 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -154,21 +154,6 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
*/
acpi_gbl_early_initialization = FALSE;
- /*
- * Install the default operation region handlers. These are the
- * handlers that are defined by the ACPI specification to be
- * "always accessible" -- namely, system_memory, system_IO, and
- * PCI_Config. This also means that no _REG methods need to be
- * run for these address spaces. We need to have these handlers
- * installed before any AML code can be executed, especially any
- * module-level code (11/2015).
- */
- status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "During Region initialization"));
- return_ACPI_STATUS(status);
- }
#if (!ACPI_REDUCED_HARDWARE)
/* Enable ACPI mode */
@@ -260,23 +245,6 @@ acpi_status __init acpi_initialize_objects(u32 flags)
ACPI_FUNCTION_TRACE(acpi_initialize_objects);
- /*
- * Run all _REG methods
- *
- * Note: Any objects accessed by the _REG methods will be automatically
- * initialized, even if they contain executable AML (see the call to
- * acpi_ns_initialize_objects below).
- */
- acpi_gbl_reg_methods_enabled = TRUE;
- if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Executing _REG OpRegion methods\n"));
-
- status = acpi_ev_initialize_op_regions();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
- }
#ifdef ACPI_EXEC_APP
/*
* This call implements the "initialization file" option for acpi_exec.
@@ -299,32 +267,27 @@ acpi_status __init acpi_initialize_objects(u32 flags)
*/
if (acpi_gbl_group_module_level_code) {
acpi_ns_exec_module_code_list();
- }
-
- /*
- * Initialize the objects that remain uninitialized. This runs the
- * executable AML that may be part of the declaration of these objects:
- * operation_regions, buffer_fields, Buffers, and Packages.
- */
- if (!(flags & ACPI_NO_OBJECT_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Completing Initialization of ACPI Objects\n"));
- status = acpi_ns_initialize_objects();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ /*
+ * Initialize the objects that remain uninitialized. This
+ * runs the executable AML that may be part of the
+ * declaration of these objects:
+ * operation_regions, buffer_fields, Buffers, and Packages.
+ */
+ if (!(flags & ACPI_NO_OBJECT_INIT)) {
+ status = acpi_ns_initialize_objects();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
}
}
/*
- * Initialize all device objects in the namespace. This runs the device
- * _STA and _INI methods.
+ * Initialize all device/region objects in the namespace. This runs
+ * the device _STA and _INI methods and region _REG methods.
*/
- if (!(flags & ACPI_NO_DEVICE_INIT)) {
- ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
- "[Init] Initializing ACPI Devices\n"));
-
- status = acpi_ns_initialize_devices();
+ if (!(flags & (ACPI_NO_DEVICE_INIT | ACPI_NO_ADDRESS_SPACE_INIT))) {
+ status = acpi_ns_initialize_devices(flags);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index a2c8d7adb6eb..da370e1d31f4 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -536,7 +536,8 @@ int apei_resources_request(struct apei_resources *resources,
goto err_unmap_ioport;
}
- return 0;
+ goto arch_res_fini;
+
err_unmap_ioport:
list_for_each_entry(res, &resources->ioport, list) {
if (res == res_bak)
@@ -551,7 +552,8 @@ err_unmap_iomem:
release_mem_region(res->start, res->end - res->start);
}
arch_res_fini:
- apei_resources_fini(&arch_res);
+ if (arch_apei_filter_addr)
+ apei_resources_fini(&arch_res);
nvs_res_fini:
apei_resources_fini(&nvs_resources);
return rc;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 0431883653be..559c1173de1c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -519,7 +519,7 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
int rc;
- unsigned long pfn;
+ u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
if (flags && (flags &
@@ -545,10 +545,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
/*
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
- * better granularity and that target address is normal RAM.
+ * better granularity and that target address is normal RAM or
+ * NVDIMM.
*/
- pfn = PFN_DOWN(param1 & param2);
- if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK))
+ base_addr = param1 & param2;
+ size = ~param2 + 1;
+
+ if (((param2 & PAGE_MASK) != PAGE_MASK) ||
+ ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ != REGION_INTERSECTS) &&
+ (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
+ != REGION_INTERSECTS)))
return -EINVAL;
inject:
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6e6bc1059301..006c3894c6ea 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1207,6 +1207,9 @@ static int __init erst_init(void)
"Failed to allocate %lld bytes for persistent store error log.\n",
erst_erange.size);
+ /* Cleanup ERST Resources */
+ apei_resources_fini(&erst_resources);
+
return 0;
err_release_erange:
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 3dd9c462d22a..60746ef904e4 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
@@ -79,6 +79,11 @@
((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
+/*
+ * This driver isn't really modular, however for the time being,
+ * continuing to use module_param is the easiest way to remain
+ * compatible with existing boot arg use cases.
+ */
bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
@@ -1148,18 +1153,4 @@ err_ioremap_exit:
err:
return rc;
}
-
-static void __exit ghes_exit(void)
-{
- platform_driver_unregister(&ghes_platform_driver);
- ghes_estatus_pool_exit();
- ghes_ioremap_exit();
-}
-
-module_init(ghes_init);
-module_exit(ghes_exit);
-
-MODULE_AUTHOR("Huang Ying");
-MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:GHES");
+device_initcall(ghes_init);
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index a83e3c62c5a9..75f128e766a9 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -1,4 +1,6 @@
/*
+ * BGRT boot graphic support
+ * Authors: Matthew Garrett, Josh Triplett <josh@joshtriplett.org>
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
* Copyright 2012 Intel Corporation
*
@@ -8,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
@@ -103,9 +104,4 @@ out_kobject:
kobject_put(bgrt_kobj);
return ret;
}
-
-module_init(bgrt_init);
-
-MODULE_AUTHOR("Matthew Garrett, Josh Triplett <josh@joshtriplett.org>");
-MODULE_DESCRIPTION("BGRT boot graphic support");
-MODULE_LICENSE("GPL");
+device_initcall(bgrt_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 891c42d1cd65..c068c829b453 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -479,24 +479,38 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device)
Device Matching
-------------------------------------------------------------------------- */
-static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
- const struct device *dev)
+/**
+ * acpi_get_first_physical_node - Get first physical node of an ACPI device
+ * @adev: ACPI device in question
+ *
+ * Return: First physical node of ACPI device @adev
+ */
+struct device *acpi_get_first_physical_node(struct acpi_device *adev)
{
struct mutex *physical_node_lock = &adev->physical_node_lock;
+ struct device *phys_dev;
mutex_lock(physical_node_lock);
if (list_empty(&adev->physical_node_list)) {
- adev = NULL;
+ phys_dev = NULL;
} else {
const struct acpi_device_physical_node *node;
node = list_first_entry(&adev->physical_node_list,
struct acpi_device_physical_node, node);
- if (node->dev != dev)
- adev = NULL;
+
+ phys_dev = node->dev;
}
mutex_unlock(physical_node_lock);
- return adev;
+ return phys_dev;
+}
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+ const struct device *dev)
+{
+ const struct device *phys_dev = acpi_get_first_physical_node(adev);
+
+ return phys_dev && phys_dev == dev ? adev : NULL;
}
/**
@@ -1005,6 +1019,9 @@ static int __init acpi_bus_init(void)
goto error1;
}
+ /* Set capability bits for _OSC under processor scope */
+ acpi_early_processor_osc();
+
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6730f965b379..8adac69dba3d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -39,6 +39,7 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <acpi/cppc_acpi.h>
/*
@@ -63,58 +64,140 @@ static struct mbox_chan *pcc_channel;
static void __iomem *pcc_comm_addr;
static u64 comm_base_addr;
static int pcc_subspace_idx = -1;
-static u16 pcc_cmd_delay;
static bool pcc_channel_acquired;
+static ktime_t deadline;
+static unsigned int pcc_mpar, pcc_mrtt;
+
+/* pcc mapped address + header size + offset within PCC subspace */
+#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs))
/*
* Arbitrary Retries in case the remote processor is slow to respond
- * to PCC commands.
+ * to PCC commands. Keeping it high enough to cover emulators where
+ * the processors run painfully slow.
*/
#define NUM_RETRIES 500
+static int check_pcc_chan(void)
+{
+ int ret = -EIO;
+ struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr;
+ ktime_t next_deadline = ktime_add(ktime_get(), deadline);
+
+ /* Retry in case the remote processor was too slow to catch up. */
+ while (!ktime_after(ktime_get(), next_deadline)) {
+ /*
+ * Per spec, prior to boot the PCC space wil be initialized by
+ * platform and should have set the command completion bit when
+ * PCC can be used by OSPM
+ */
+ if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
+ ret = 0;
+ break;
+ }
+ /*
+ * Reducing the bus traffic in case this loop takes longer than
+ * a few retries.
+ */
+ udelay(3);
+ }
+
+ return ret;
+}
+
static int send_pcc_cmd(u16 cmd)
{
- int retries, result = -EIO;
- struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
+ int ret = -EIO;
struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcc_comm_addr;
- u32 cmd_latency = pcct_ss->latency;
+ static ktime_t last_cmd_cmpl_time, last_mpar_reset;
+ static int mpar_count;
+ unsigned int time_delta;
- /* Min time OS should wait before sending next command. */
- udelay(pcc_cmd_delay);
+ /*
+ * For CMD_WRITE we know for a fact the caller should have checked
+ * the channel before writing to PCC space
+ */
+ if (cmd == CMD_READ) {
+ ret = check_pcc_chan();
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Handle the Minimum Request Turnaround Time(MRTT)
+ * "The minimum amount of time that OSPM must wait after the completion
+ * of a command before issuing the next command, in microseconds"
+ */
+ if (pcc_mrtt) {
+ time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
+ if (pcc_mrtt > time_delta)
+ udelay(pcc_mrtt - time_delta);
+ }
+
+ /*
+ * Handle the non-zero Maximum Periodic Access Rate(MPAR)
+ * "The maximum number of periodic requests that the subspace channel can
+ * support, reported in commands per minute. 0 indicates no limitation."
+ *
+ * This parameter should be ideally zero or large enough so that it can
+ * handle maximum number of requests that all the cores in the system can
+ * collectively generate. If it is not, we will follow the spec and just
+ * not send the request to the platform after hitting the MPAR limit in
+ * any 60s window
+ */
+ if (pcc_mpar) {
+ if (mpar_count == 0) {
+ time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
+ if (time_delta < 60 * MSEC_PER_SEC) {
+ pr_debug("PCC cmd not sent due to MPAR limit");
+ return -EIO;
+ }
+ last_mpar_reset = ktime_get();
+ mpar_count = pcc_mpar;
+ }
+ mpar_count--;
+ }
/* Write to the shared comm region. */
- writew(cmd, &generic_comm_base->command);
+ writew_relaxed(cmd, &generic_comm_base->command);
/* Flip CMD COMPLETE bit */
- writew(0, &generic_comm_base->status);
+ writew_relaxed(0, &generic_comm_base->status);
/* Ring doorbell */
- result = mbox_send_message(pcc_channel, &cmd);
- if (result < 0) {
+ ret = mbox_send_message(pcc_channel, &cmd);
+ if (ret < 0) {
pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
- cmd, result);
- return result;
+ cmd, ret);
+ return ret;
}
- /* Wait for a nominal time to let platform process command. */
- udelay(cmd_latency);
-
- /* Retry in case the remote processor was too slow to catch up. */
- for (retries = NUM_RETRIES; retries > 0; retries--) {
- if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
- result = 0;
- break;
- }
+ /*
+ * For READs we need to ensure the cmd completed to ensure
+ * the ensuing read()s can proceed. For WRITEs we dont care
+ * because the actual write()s are done before coming here
+ * and the next READ or WRITE will check if the channel
+ * is busy/free at the entry of this call.
+ *
+ * If Minimum Request Turnaround Time is non-zero, we need
+ * to record the completion time of both READ and WRITE
+ * command for proper handling of MRTT, so we need to check
+ * for pcc_mrtt in addition to CMD_READ
+ */
+ if (cmd == CMD_READ || pcc_mrtt) {
+ ret = check_pcc_chan();
+ if (pcc_mrtt)
+ last_cmd_cmpl_time = ktime_get();
}
- mbox_client_txdone(pcc_channel, result);
- return result;
+ mbox_client_txdone(pcc_channel, ret);
+ return ret;
}
static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{
- if (ret)
+ if (ret < 0)
pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
*(u16 *)msg, ret);
else
@@ -306,6 +389,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
{
struct acpi_pcct_hw_reduced *cppc_ss;
unsigned int len;
+ u64 usecs_lat;
if (pcc_subspace_idx >= 0) {
pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl,
@@ -335,7 +419,16 @@ static int register_pcc_channel(int pcc_subspace_idx)
*/
comm_base_addr = cppc_ss->base_address;
len = cppc_ss->length;
- pcc_cmd_delay = cppc_ss->min_turnaround_time;
+
+ /*
+ * cppc_ss->latency is just a Nominal value. In reality
+ * the remote processor could be much slower to reply.
+ * So add an arbitrary amount of wait on top of Nominal.
+ */
+ usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
+ pcc_mrtt = cppc_ss->min_turnaround_time;
+ pcc_mpar = cppc_ss->max_access_rate;
pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len);
if (!pcc_comm_addr) {
@@ -546,29 +639,74 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
-static u64 get_phys_addr(struct cpc_reg *reg)
-{
- /* PCC communication addr space begins at byte offset 0x8. */
- if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
- return (u64)comm_base_addr + 0x8 + reg->address;
- else
- return reg->address;
-}
+/*
+ * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
+ * as fast as possible. We have already mapped the PCC subspace during init, so
+ * we can directly write to it.
+ */
-static void cpc_read(struct cpc_reg *reg, u64 *val)
+static int cpc_read(struct cpc_reg *reg, u64 *val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_read_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ *val = 0;
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ *val = readb_relaxed(vaddr);
+ break;
+ case 16:
+ *val = readw_relaxed(vaddr);
+ break;
+ case 32:
+ *val = readl_relaxed(vaddr);
+ break;
+ case 64:
+ *val = readq_relaxed(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read %u bit width from PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ }
+ } else
+ ret_val = acpi_os_read_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
-static void cpc_write(struct cpc_reg *reg, u64 val)
+static int cpc_write(struct cpc_reg *reg, u64 val)
{
- u64 addr = get_phys_addr(reg);
+ int ret_val = 0;
- acpi_os_write_memory((acpi_physical_address)addr,
- val, reg->bit_width);
+ if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ void __iomem *vaddr = GET_PCC_VADDR(reg->address);
+
+ switch (reg->bit_width) {
+ case 8:
+ writeb_relaxed(val, vaddr);
+ break;
+ case 16:
+ writew_relaxed(val, vaddr);
+ break;
+ case 32:
+ writel_relaxed(val, vaddr);
+ break;
+ case 64:
+ writeq_relaxed(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write %u bit width to PCC\n",
+ reg->bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ } else
+ ret_val = acpi_os_write_memory((acpi_physical_address)reg->address,
+ val, reg->bit_width);
+ return ret_val;
}
/**
@@ -604,7 +742,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
(ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -662,7 +800,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) ||
(reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) {
/* Ring doorbell once to update PCC subspace */
- if (send_pcc_cmd(CMD_READ)) {
+ if (send_pcc_cmd(CMD_READ) < 0) {
ret = -EIO;
goto out_err;
}
@@ -713,6 +851,13 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
spin_lock(&pcc_lock);
+ /* If this is PCC reg, check if channel is free before writing */
+ if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ ret = check_pcc_chan();
+ if (ret)
+ goto busy_channel;
+ }
+
/*
* Skip writing MIN/MAX until Linux knows how to come up with
* useful values.
@@ -722,10 +867,10 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
/* Is this a PCC reg ?*/
if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
/* Ring doorbell so Remote can get our perf request. */
- if (send_pcc_cmd(CMD_WRITE))
+ if (send_pcc_cmd(CMD_WRITE) < 0)
ret = -EIO;
}
-
+busy_channel:
spin_unlock(&pcc_lock);
return ret;
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index bea8e425a8de..6c7dd7af789e 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -73,6 +73,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
loff_t init_off = *off;
int err = 0;
+ if (!write_support)
+ return -EINVAL;
+
if (*off >= EC_SPACE_SIZE)
return 0;
if (*off + count >= EC_SPACE_SIZE) {
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 6322db64b4a4..384cfc3083e1 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
-static struct dev_pm_ops acpi_fan_pm = {
+static const struct dev_pm_ops acpi_fan_pm = {
.resume = acpi_fan_resume,
.freeze = acpi_fan_suspend,
.thaw = acpi_fan_resume,
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1e6833a5cd44..7c188472d9c2 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -20,6 +20,7 @@
#define PREFIX "ACPI: "
+void acpi_initrd_initialize_tables(void);
acpi_status acpi_os_initialize1(void);
void init_acpi_device_notify(void);
int acpi_scan_init(void);
@@ -29,6 +30,11 @@ void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
void acpi_int340x_thermal_init(void);
+#ifdef CONFIG_ARM_AMBA
+void acpi_amba_init(void);
+#else
+static inline void acpi_amba_init(void) {}
+#endif
int acpi_sysfs_init(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
@@ -106,6 +112,7 @@ bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
+struct device *acpi_get_first_physical_node(struct acpi_device *adev);
/* --------------------------------------------------------------------------
Device Matching and Notification
@@ -138,6 +145,12 @@ void acpi_early_processor_set_pdc(void);
static inline void acpi_early_processor_set_pdc(void) {}
#endif
+#ifdef CONFIG_X86
+void acpi_early_processor_osc(void);
+#else
+static inline void acpi_early_processor_osc(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 35947ac87644..d0f35e63640b 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -21,6 +21,7 @@
#include <linux/sort.h>
#include <linux/pmem.h>
#include <linux/io.h>
+#include <linux/nd.h>
#include <asm/cacheflush.h>
#include "nfit.h"
@@ -34,6 +35,18 @@ static bool force_enable_dimms;
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
+static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
+module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
+
+/* after three payloads of overflow, it's dead jim */
+static unsigned int scrub_overflow_abort = 3;
+module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scrub_overflow_abort,
+ "Number of times we overflow ARS results before abort");
+
+static struct workqueue_struct *nfit_wq;
+
struct nfit_table_prev {
struct list_head spas;
struct list_head memdevs;
@@ -72,9 +85,90 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
return to_acpi_device(acpi_desc->dev);
}
+static int xlat_status(void *buf, unsigned int cmd)
+{
+ struct nd_cmd_clear_error *clear_err;
+ struct nd_cmd_ars_status *ars_status;
+ struct nd_cmd_ars_start *ars_start;
+ struct nd_cmd_ars_cap *ars_cap;
+ u16 flags;
+
+ switch (cmd) {
+ case ND_CMD_ARS_CAP:
+ ars_cap = buf;
+ if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
+ return -ENOTTY;
+
+ /* Command failed */
+ if (ars_cap->status & 0xffff)
+ return -EIO;
+
+ /* No supported scan types for this range */
+ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
+ if ((ars_cap->status >> 16 & flags) == 0)
+ return -ENOTTY;
+ break;
+ case ND_CMD_ARS_START:
+ ars_start = buf;
+ /* ARS is in progress */
+ if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
+ return -EBUSY;
+
+ /* Command failed */
+ if (ars_start->status & 0xffff)
+ return -EIO;
+ break;
+ case ND_CMD_ARS_STATUS:
+ ars_status = buf;
+ /* Command failed */
+ if (ars_status->status & 0xffff)
+ return -EIO;
+ /* Check extended status (Upper two bytes) */
+ if (ars_status->status == NFIT_ARS_STATUS_DONE)
+ return 0;
+
+ /* ARS is in progress */
+ if (ars_status->status == NFIT_ARS_STATUS_BUSY)
+ return -EBUSY;
+
+ /* No ARS performed for the current boot */
+ if (ars_status->status == NFIT_ARS_STATUS_NONE)
+ return -EAGAIN;
+
+ /*
+ * ARS interrupted, either we overflowed or some other
+ * agent wants the scan to stop. If we didn't overflow
+ * then just continue with the returned results.
+ */
+ if (ars_status->status == NFIT_ARS_STATUS_INTR) {
+ if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
+ return -ENOSPC;
+ return 0;
+ }
+
+ /* Unknown status */
+ if (ars_status->status >> 16)
+ return -EIO;
+ break;
+ case ND_CMD_CLEAR_ERROR:
+ clear_err = buf;
+ if (clear_err->status & 0xffff)
+ return -EIO;
+ if (!clear_err->cleared)
+ return -EIO;
+ if (clear_err->length > clear_err->cleared)
+ return clear_err->cleared;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
- unsigned int buf_len)
+ unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
const struct nd_cmd_desc *desc = NULL;
@@ -185,6 +279,8 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
* unfilled in the output buffer
*/
rc = buf_len - offset - in_buf.buffer.length;
+ if (cmd_rc)
+ *cmd_rc = xlat_status(buf, cmd);
} else {
dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
__func__, dimm_name, cmd_name, buf_len,
@@ -675,12 +771,11 @@ static struct attribute_group acpi_nfit_attribute_group = {
.attrs = acpi_nfit_attributes,
};
-const struct attribute_group *acpi_nfit_attribute_groups[] = {
+static const struct attribute_group *acpi_nfit_attribute_groups[] = {
&nvdimm_bus_attribute_group,
&acpi_nfit_attribute_group,
NULL,
};
-EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
{
@@ -917,7 +1012,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
if (!adev)
return;
- for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
+ for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
set_bit(i, &nd_desc->dsm_mask);
}
@@ -1105,7 +1200,7 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
writeq(cmd, mmio->addr.base + offset);
wmb_blk(nfit_blk);
- if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
+ if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
readq(mmio->addr.base + offset);
}
@@ -1141,7 +1236,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
memcpy_to_pmem(mmio->addr.aperture + offset,
iobuf + copied, c);
else {
- if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
+ if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
mmio_flush_range((void __force *)
mmio->addr.aperture + offset, c);
@@ -1328,13 +1423,13 @@ static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
memset(&flags, 0, sizeof(flags));
rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
- sizeof(flags));
+ sizeof(flags), NULL);
if (rc >= 0 && flags.status == 0)
nfit_blk->dimm_flags = flags.flags;
else if (rc == -ENOTTY) {
/* fall back to a conservative default */
- nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
+ nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
rc = 0;
} else
rc = -ENXIO;
@@ -1473,93 +1568,85 @@ static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
/* devm will free nfit_blk */
}
-static int ars_get_cap(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_cap *cmd, u64 addr, u64 length)
+static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
+ struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
{
- cmd->address = addr;
- cmd->length = length;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int cmd_rc, rc;
- return nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
- sizeof(*cmd));
+ cmd->address = spa->address;
+ cmd->length = spa->length;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
+ sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
-static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_start *cmd, u64 addr, u64 length)
+static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
{
int rc;
+ int cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
- cmd->address = addr;
- cmd->length = length;
- cmd->type = ND_ARS_PERSISTENT;
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = spa->address;
+ ars_start.length = spa->length;
+ if (nfit_spa_type(spa) == NFIT_SPA_PM)
+ ars_start.type = ND_ARS_PERSISTENT;
+ else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
+ ars_start.type = ND_ARS_VOLATILE;
+ else
+ return -ENOTTY;
- while (1) {
- rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, cmd,
- sizeof(*cmd));
- if (rc)
- return rc;
- switch (cmd->status) {
- case 0:
- return 0;
- case 1:
- /* ARS unsupported, but we should never get here */
- return 0;
- case 6:
- /* ARS is in progress */
- msleep(1000);
- break;
- default:
- return -ENXIO;
- }
- }
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
-static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_status *cmd, u32 size)
+static int ars_continue(struct acpi_nfit_desc *acpi_desc)
{
- int rc;
+ int rc, cmd_rc;
+ struct nd_cmd_ars_start ars_start;
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+
+ memset(&ars_start, 0, sizeof(ars_start));
+ ars_start.address = ars_status->restart_address;
+ ars_start.length = ars_status->restart_length;
+ ars_start.type = ars_status->type;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
+ sizeof(ars_start), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
+}
- while (1) {
- rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
- size);
- if (rc || cmd->status & 0xffff)
- return -ENXIO;
+static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
+ struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
+ int rc, cmd_rc;
- /* Check extended status (Upper two bytes) */
- switch (cmd->status >> 16) {
- case 0:
- return 0;
- case 1:
- /* ARS is in progress */
- msleep(1000);
- break;
- case 2:
- /* No ARS performed for the current boot */
- return 0;
- case 3:
- /* TODO: error list overflow support */
- default:
- return -ENXIO;
- }
- }
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
+ acpi_desc->ars_status_size, &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
- struct nd_cmd_ars_status *ars_status, u64 start)
+ struct nd_cmd_ars_status *ars_status)
{
int rc;
u32 i;
- /*
- * The address field returned by ars_status should be either
- * less than or equal to the address we last started ARS for.
- * The (start, length) returned by ars_status should also have
- * non-zero overlap with the range we started ARS for.
- * If this is not the case, bail.
- */
- if (ars_status->address > start ||
- (ars_status->address + ars_status->length < start))
- return -ENXIO;
-
for (i = 0; i < ars_status->num_records; i++) {
rc = nvdimm_bus_add_poison(nvdimm_bus,
ars_status->records[i].err_address,
@@ -1571,118 +1658,56 @@ static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
return 0;
}
-static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
- struct nd_region_desc *ndr_desc)
+static void acpi_nfit_remove_resource(void *data)
{
- struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
- struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
- struct nd_cmd_ars_status *ars_status = NULL;
- struct nd_cmd_ars_start *ars_start = NULL;
- struct nd_cmd_ars_cap *ars_cap = NULL;
- u64 start, len, cur, remaining;
- u32 ars_status_size;
- int rc;
-
- ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
- if (!ars_cap)
- return -ENOMEM;
+ struct resource *res = data;
- start = ndr_desc->res->start;
- len = ndr_desc->res->end - ndr_desc->res->start + 1;
-
- /*
- * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
- * Scrub' flag in extended status is not set, skip this but continue
- * initialization
- */
- rc = ars_get_cap(nd_desc, ars_cap, start, len);
- if (rc == -ENOTTY) {
- dev_dbg(acpi_desc->dev,
- "Address Range Scrub is not implemented, won't create an error list\n");
- rc = 0;
- goto out;
- }
- if (rc)
- goto out;
-
- if ((ars_cap->status & 0xffff) ||
- !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
- dev_warn(acpi_desc->dev,
- "ARS unsupported (status: 0x%x), won't create an error list\n",
- ars_cap->status);
- goto out;
- }
-
- /*
- * Check if a full-range ARS has been run. If so, use those results
- * without having to start a new ARS.
- */
- ars_status_size = ars_cap->max_ars_out;
- ars_status = kzalloc(ars_status_size, GFP_KERNEL);
- if (!ars_status) {
- rc = -ENOMEM;
- goto out;
- }
+ remove_resource(res);
+}
- rc = ars_get_status(nd_desc, ars_status, ars_status_size);
- if (rc)
- goto out;
+static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
+ struct nd_region_desc *ndr_desc)
+{
+ struct resource *res, *nd_res = ndr_desc->res;
+ int is_pmem, ret;
- if (ars_status->address <= start &&
- (ars_status->address + ars_status->length >= start + len)) {
- rc = ars_status_process_records(nvdimm_bus, ars_status, start);
- goto out;
- }
+ /* No operation if the region is already registered as PMEM */
+ is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
+ IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
+ if (is_pmem == REGION_INTERSECTS)
+ return 0;
- /*
- * ARS_STATUS can overflow if the number of poison entries found is
- * greater than the maximum buffer size (ars_cap->max_ars_out)
- * To detect overflow, check if the length field of ars_status
- * is less than the length we supplied. If so, process the
- * error entries we got, adjust the start point, and start again
- */
- ars_start = kzalloc(sizeof(*ars_start), GFP_KERNEL);
- if (!ars_start)
+ res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
return -ENOMEM;
- cur = start;
- remaining = len;
- do {
- u64 done, end;
-
- rc = ars_do_start(nd_desc, ars_start, cur, remaining);
- if (rc)
- goto out;
-
- rc = ars_get_status(nd_desc, ars_status, ars_status_size);
- if (rc)
- goto out;
+ res->name = "Persistent Memory";
+ res->start = nd_res->start;
+ res->end = nd_res->end;
+ res->flags = IORESOURCE_MEM;
+ res->desc = IORES_DESC_PERSISTENT_MEMORY;
- rc = ars_status_process_records(nvdimm_bus, ars_status, cur);
- if (rc)
- goto out;
+ ret = insert_resource(&iomem_resource, res);
+ if (ret)
+ return ret;
- end = min(cur + remaining,
- ars_status->address + ars_status->length);
- done = end - cur;
- cur += done;
- remaining -= done;
- } while (remaining);
+ ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
+ if (ret) {
+ remove_resource(res);
+ return ret;
+ }
- out:
- kfree(ars_cap);
- kfree(ars_start);
- kfree(ars_status);
- return rc;
+ return 0;
}
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
struct acpi_nfit_memory_map *memdev,
- struct acpi_nfit_system_address *spa)
+ struct nfit_spa *nfit_spa)
{
struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
memdev->device_handle);
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
struct nd_blk_region_desc *ndbr_desc;
struct nfit_mem *nfit_mem;
int blk_valid = 0;
@@ -1718,7 +1743,9 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
ndbr_desc->enable = acpi_nfit_blk_region_enable;
ndbr_desc->disable = acpi_nfit_blk_region_disable;
ndbr_desc->do_io = acpi_desc->blk_do_io;
- if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
+ nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
return -ENOMEM;
break;
}
@@ -1738,7 +1765,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct resource res;
int count = 0, rc;
- if (nfit_spa->is_registered)
+ if (nfit_spa->nd_region)
return 0;
if (spa->range_index == 0) {
@@ -1775,47 +1802,332 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
}
nd_mapping = &nd_mappings[count++];
rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
- memdev, spa);
+ memdev, nfit_spa);
if (rc)
- return rc;
+ goto out;
}
ndr_desc->nd_mapping = nd_mappings;
ndr_desc->num_mappings = count;
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
if (rc)
- return rc;
+ goto out;
nvdimm_bus = acpi_desc->nvdimm_bus;
if (nfit_spa_type(spa) == NFIT_SPA_PM) {
- rc = acpi_nfit_find_poison(acpi_desc, ndr_desc);
+ rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
if (rc) {
- dev_err(acpi_desc->dev,
- "error while performing ARS to find poison: %d\n",
+ dev_warn(acpi_desc->dev,
+ "failed to insert pmem resource to iomem: %d\n",
rc);
- return rc;
+ goto out;
}
- if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
- return -ENOMEM;
+
+ nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
- if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
- return -ENOMEM;
+ nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
+ ndr_desc);
+ if (!nfit_spa->nd_region)
+ rc = -ENOMEM;
}
- nfit_spa->is_registered = 1;
+ out:
+ if (rc)
+ dev_err(acpi_desc->dev, "failed to register spa range %d\n",
+ nfit_spa->spa->range_index);
+ return rc;
+}
+
+static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
+ u32 max_ars)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nd_cmd_ars_status *ars_status;
+
+ if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
+ memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
+ return 0;
+ }
+
+ if (acpi_desc->ars_status)
+ devm_kfree(dev, acpi_desc->ars_status);
+ acpi_desc->ars_status = NULL;
+ ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
+ if (!ars_status)
+ return -ENOMEM;
+ acpi_desc->ars_status = ars_status;
+ acpi_desc->ars_status_size = max_ars;
return 0;
}
-static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ int rc;
+
+ if (!nfit_spa->max_ars) {
+ struct nd_cmd_ars_cap ars_cap;
+
+ memset(&ars_cap, 0, sizeof(ars_cap));
+ rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
+ if (rc < 0)
+ return rc;
+ nfit_spa->max_ars = ars_cap.max_ars_out;
+ nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
+ /* check that the supported scrub types match the spa type */
+ if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
+ ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
+ return -ENOTTY;
+ else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
+ ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
+ return -ENOTTY;
+ }
+
+ if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
+ return -ENOMEM;
+
+ rc = ars_get_status(acpi_desc);
+ if (rc < 0 && rc != -ENOSPC)
+ return rc;
+
+ if (ars_status_process_records(acpi_desc->nvdimm_bus,
+ acpi_desc->ars_status))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
+{
+ struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ unsigned int overflow_retry = scrub_overflow_abort;
+ u64 init_ars_start = 0, init_ars_len = 0;
+ struct device *dev = acpi_desc->dev;
+ unsigned int tmo = scrub_timeout;
+ int rc;
+
+ if (nfit_spa->ars_done || !nfit_spa->nd_region)
+ return;
+
+ rc = ars_start(acpi_desc, nfit_spa);
+ /*
+ * If we timed out the initial scan we'll still be busy here,
+ * and will wait another timeout before giving up permanently.
+ */
+ if (rc < 0 && rc != -EBUSY)
+ return;
+
+ do {
+ u64 ars_start, ars_len;
+
+ if (acpi_desc->cancel)
+ break;
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+ if (rc == -ENOTTY)
+ break;
+ if (rc == -EBUSY && !tmo) {
+ dev_warn(dev, "range %d ars timeout, aborting\n",
+ spa->range_index);
+ break;
+ }
+
+ if (rc == -EBUSY) {
+ /*
+ * Note, entries may be appended to the list
+ * while the lock is dropped, but the workqueue
+ * being active prevents entries being deleted /
+ * freed.
+ */
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ mutex_lock(&acpi_desc->init_mutex);
+ continue;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ if (!init_ars_len) {
+ init_ars_len = acpi_desc->ars_status->length;
+ init_ars_start = acpi_desc->ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ }
+
+ if (rc < 0) {
+ dev_warn(dev, "range %d ars continuation failed\n",
+ spa->range_index);
+ break;
+ }
+
+ if (init_ars_len) {
+ ars_start = init_ars_start;
+ ars_len = init_ars_len;
+ } else {
+ ars_start = acpi_desc->ars_status->address;
+ ars_len = acpi_desc->ars_status->length;
+ }
+ dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
+ spa->range_index, ars_start, ars_len);
+ /* notify the region about new poison entries */
+ nvdimm_region_notify(nfit_spa->nd_region,
+ NVDIMM_REVALIDATE_POISON);
+ break;
+ } while (1);
+}
+
+static void acpi_nfit_scrub(struct work_struct *work)
+{
+ struct device *dev;
+ u64 init_scrub_length = 0;
struct nfit_spa *nfit_spa;
+ u64 init_scrub_address = 0;
+ bool init_ars_done = false;
+ struct acpi_nfit_desc *acpi_desc;
+ unsigned int tmo = scrub_timeout;
+ unsigned int overflow_retry = scrub_overflow_abort;
+
+ acpi_desc = container_of(work, typeof(*acpi_desc), work);
+ dev = acpi_desc->dev;
+ /*
+ * We scrub in 2 phases. The first phase waits for any platform
+ * firmware initiated scrubs to complete and then we go search for the
+ * affected spa regions to mark them scanned. In the second phase we
+ * initiate a directed scrub for every range that was not scrubbed in
+ * phase 1.
+ */
+
+ /* process platform firmware initiated scrubs */
+ retry:
+ mutex_lock(&acpi_desc->init_mutex);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ struct nd_cmd_ars_status *ars_status;
+ struct acpi_nfit_system_address *spa;
+ u64 ars_start, ars_len;
+ int rc;
- if (rc)
- return rc;
+ if (acpi_desc->cancel)
+ break;
+
+ if (nfit_spa->nd_region)
+ continue;
+
+ if (init_ars_done) {
+ /*
+ * No need to re-query, we're now just
+ * reconciling all the ranges covered by the
+ * initial scrub
+ */
+ rc = 0;
+ } else
+ rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
+
+ if (rc == -ENOTTY) {
+ /* no ars capability, just register spa and move on */
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ continue;
+ }
+
+ if (rc == -EBUSY && !tmo) {
+ /* fallthrough to directed scrub in phase 2 */
+ dev_warn(dev, "timeout awaiting ars results, continuing...\n");
+ break;
+ } else if (rc == -EBUSY) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ ssleep(1);
+ tmo--;
+ goto retry;
+ }
+
+ /* we got some results, but there are more pending... */
+ if (rc == -ENOSPC && overflow_retry--) {
+ ars_status = acpi_desc->ars_status;
+ /*
+ * Record the original scrub range, so that we
+ * can recall all the ranges impacted by the
+ * initial scrub.
+ */
+ if (!init_scrub_length) {
+ init_scrub_length = ars_status->length;
+ init_scrub_address = ars_status->address;
+ }
+ rc = ars_continue(acpi_desc);
+ if (rc == 0) {
+ mutex_unlock(&acpi_desc->init_mutex);
+ goto retry;
+ }
+ }
+
+ if (rc < 0) {
+ /*
+ * Initial scrub failed, we'll give it one more
+ * try below...
+ */
+ break;
+ }
+
+ /* We got some final results, record completed ranges */
+ ars_status = acpi_desc->ars_status;
+ if (init_scrub_length) {
+ ars_start = init_scrub_address;
+ ars_len = ars_start + init_scrub_length;
+ } else {
+ ars_start = ars_status->address;
+ ars_len = ars_status->length;
+ }
+ spa = nfit_spa->spa;
+
+ if (!init_ars_done) {
+ init_ars_done = true;
+ dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
+ ars_start, ars_len);
+ }
+ if (ars_start <= spa->address && ars_start + ars_len
+ >= spa->address + spa->length)
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
}
+
+ /*
+ * For all the ranges not covered by an initial scrub we still
+ * want to see if there are errors, but it's ok to discover them
+ * asynchronously.
+ */
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ /*
+ * Flag all the ranges that still need scrubbing, but
+ * register them now to make data available.
+ */
+ if (nfit_spa->nd_region)
+ nfit_spa->ars_done = 1;
+ else
+ acpi_nfit_register_region(acpi_desc, nfit_spa);
+ }
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ acpi_nfit_async_scrub(acpi_desc, nfit_spa);
+ mutex_unlock(&acpi_desc->init_mutex);
+}
+
+static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+{
+ struct nfit_spa *nfit_spa;
+ int rc;
+
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
+ /* BLK regions don't need to wait for ars results */
+ rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+ if (rc)
+ return rc;
+ }
+
+ queue_work(nfit_wq, &acpi_desc->work);
return 0;
}
@@ -1901,15 +2213,64 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
}
EXPORT_SYMBOL_GPL(acpi_nfit_init);
-static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
+struct acpi_nfit_flush_work {
+ struct work_struct work;
+ struct completion cmp;
+};
+
+static void flush_probe(struct work_struct *work)
{
- struct nvdimm_bus_descriptor *nd_desc;
- struct acpi_nfit_desc *acpi_desc;
- struct device *dev = &adev->dev;
+ struct acpi_nfit_flush_work *flush;
- acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
- if (!acpi_desc)
- return ERR_PTR(-ENOMEM);
+ flush = container_of(work, typeof(*flush), work);
+ complete(&flush->cmp);
+}
+
+static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct device *dev = acpi_desc->dev;
+ struct acpi_nfit_flush_work flush;
+
+ /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
+ device_lock(dev);
+ device_unlock(dev);
+
+ /*
+ * Scrub work could take 10s of seconds, userspace may give up so we
+ * need to be interruptible while waiting.
+ */
+ INIT_WORK_ONSTACK(&flush.work, flush_probe);
+ COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ queue_work(nfit_wq, &flush.work);
+ return wait_for_completion_interruptible(&flush.cmp);
+}
+
+static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd)
+{
+ struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+
+ if (nvdimm)
+ return 0;
+ if (cmd != ND_CMD_ARS_START)
+ return 0;
+
+ /*
+ * The kernel and userspace may race to initiate a scrub, but
+ * the scrub thread is prepared to lose that initial race. It
+ * just needs guarantees that any ars it initiates are not
+ * interrupted by any intervening start reqeusts from userspace.
+ */
+ if (work_busy(&acpi_desc->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
+{
+ struct nvdimm_bus_descriptor *nd_desc;
dev_set_drvdata(dev, acpi_desc);
acpi_desc->dev = dev;
@@ -1917,14 +2278,10 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
nd_desc = &acpi_desc->nd_desc;
nd_desc->provider_name = "ACPI.NFIT";
nd_desc->ndctl = acpi_nfit_ctl;
+ nd_desc->flush_probe = acpi_nfit_flush_probe;
+ nd_desc->clear_to_send = acpi_nfit_clear_to_send;
nd_desc->attr_groups = acpi_nfit_attribute_groups;
- acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
- if (!acpi_desc->nvdimm_bus) {
- devm_kfree(dev, acpi_desc);
- return ERR_PTR(-ENXIO);
- }
-
INIT_LIST_HEAD(&acpi_desc->spa_maps);
INIT_LIST_HEAD(&acpi_desc->spas);
INIT_LIST_HEAD(&acpi_desc->dcrs);
@@ -1935,9 +2292,9 @@ static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);
mutex_init(&acpi_desc->init_mutex);
-
- return acpi_desc;
+ INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
}
+EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
static int acpi_nfit_add(struct acpi_device *adev)
{
@@ -1956,12 +2313,13 @@ static int acpi_nfit_add(struct acpi_device *adev)
return 0;
}
- acpi_desc = acpi_nfit_desc_init(adev);
- if (IS_ERR(acpi_desc)) {
- dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
- __func__, PTR_ERR(acpi_desc));
- return PTR_ERR(acpi_desc);
- }
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
+ return -ENOMEM;
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
+ if (!acpi_desc->nvdimm_bus)
+ return -ENOMEM;
/*
* Save the acpi header for later and then skip it,
@@ -2000,6 +2358,8 @@ static int acpi_nfit_remove(struct acpi_device *adev)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
+ acpi_desc->cancel = 1;
+ flush_workqueue(nfit_wq);
nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
return 0;
}
@@ -2024,12 +2384,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}
if (!acpi_desc) {
- acpi_desc = acpi_nfit_desc_init(adev);
- if (IS_ERR(acpi_desc)) {
- dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
- __func__, PTR_ERR(acpi_desc));
+ acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
+ if (!acpi_desc)
goto out_unlock;
- }
+ acpi_nfit_desc_init(acpi_desc, &adev->dev);
+ acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
+ if (!acpi_desc->nvdimm_bus)
+ goto out_unlock;
+ } else {
+ /*
+ * Finish previous registration before considering new
+ * regions.
+ */
+ flush_workqueue(nfit_wq);
}
/* Evaluate _FIT */
@@ -2097,12 +2464,17 @@ static __init int nfit_init(void)
acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
+ nfit_wq = create_singlethread_workqueue("nfit");
+ if (!nfit_wq)
+ return -ENOMEM;
+
return acpi_bus_register_driver(&acpi_nfit_driver);
}
static __exit void nfit_exit(void)
{
acpi_bus_unregister_driver(&acpi_nfit_driver);
+ destroy_workqueue(nfit_wq);
}
module_init(nfit_init);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 3d549a383659..c75576b2d50e 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -14,6 +14,7 @@
*/
#ifndef __NFIT_H__
#define __NFIT_H__
+#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -40,15 +41,32 @@ enum nfit_uuids {
NFIT_UUID_MAX,
};
+enum nfit_fic {
+ NFIT_FIC_BYTE = 0x101, /* byte-addressable energy backed */
+ NFIT_FIC_BLK = 0x201, /* block-addressable non-energy backed */
+ NFIT_FIC_BYTEN = 0x301, /* byte-addressable non-energy backed */
+};
+
enum {
- ND_BLK_READ_FLUSH = 1,
- ND_BLK_DCR_LATCH = 2,
+ NFIT_BLK_READ_FLUSH = 1,
+ NFIT_BLK_DCR_LATCH = 2,
+ NFIT_ARS_STATUS_DONE = 0,
+ NFIT_ARS_STATUS_BUSY = 1 << 16,
+ NFIT_ARS_STATUS_NONE = 2 << 16,
+ NFIT_ARS_STATUS_INTR = 3 << 16,
+ NFIT_ARS_START_BUSY = 6,
+ NFIT_ARS_CAP_NONE = 1,
+ NFIT_ARS_F_OVERFLOW = 1,
+ NFIT_ARS_TIMEOUT = 90,
};
struct nfit_spa {
struct acpi_nfit_system_address *spa;
struct list_head list;
- int is_registered;
+ struct nd_region *nd_region;
+ unsigned int ars_done:1;
+ u32 clear_err_unit;
+ u32 max_ars;
};
struct nfit_dcr {
@@ -110,6 +128,10 @@ struct acpi_nfit_desc {
struct list_head idts;
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
+ struct nd_cmd_ars_status *ars_status;
+ size_t ars_status_size;
+ struct work_struct work;
+ unsigned int cancel:1;
unsigned long dimm_dsm_force_en;
unsigned long bus_dsm_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
@@ -182,5 +204,5 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
const u8 *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz);
-extern const struct attribute_group *acpi_nfit_attribute_groups[];
+void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev);
#endif /* __NFIT_H__ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 67da6fb72274..814d5f83b75e 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -602,6 +602,14 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn(PREFIX
+ "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
#include <linux/earlycpio.h>
#include <linux/memblock.h>
@@ -636,6 +644,7 @@ static const char * const table_sigs[] = {
#define ACPI_OVERRIDE_TABLES 64
static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
@@ -746,96 +755,125 @@ void __init acpi_initrd_override(void *data, size_t size)
}
}
}
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
-static void acpi_table_taint(struct acpi_table_header *table)
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address, u32 *length)
{
- pr_warn(PREFIX
- "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
- table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
+ int table_offset = 0;
+ int table_index = 0;
+ struct acpi_table_header *table;
+ u32 table_length;
+ *length = 0;
+ *address = 0;
+ if (!acpi_tables_addr)
+ return AE_OK;
-acpi_status
-acpi_os_table_override(struct acpi_table_header * existing_table,
- struct acpi_table_header ** new_table)
-{
- if (!existing_table || !new_table)
- return AE_BAD_PARAMETER;
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
- *new_table = NULL;
+ table_length = table->length;
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
- if (strncmp(existing_table->signature, "DSDT", 4) == 0)
- *new_table = (struct acpi_table_header *)AmlCode;
-#endif
- if (*new_table != NULL)
+ /* Only override tables matched */
+ if (test_bit(table_index, acpi_initrd_installed) ||
+ memcmp(existing_table->signature, table->signature, 4) ||
+ memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ *length = table_length;
+ *address = acpi_tables_addr + table_offset;
acpi_table_taint(existing_table);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ set_bit(table_index, acpi_initrd_installed);
+ break;
+
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
return AE_OK;
}
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address,
- u32 *table_length)
+void __init acpi_initrd_initialize_tables(void)
{
-#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
- *table_length = 0;
- *address = 0;
- return AE_OK;
-#else
int table_offset = 0;
+ int table_index = 0;
+ u32 table_length;
struct acpi_table_header *table;
- *table_length = 0;
- *address = 0;
-
if (!acpi_tables_addr)
- return AE_OK;
-
- do {
- if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
- WARN_ON(1);
- return AE_OK;
- }
+ return;
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
table = acpi_os_map_memory(acpi_tables_addr + table_offset,
ACPI_HEADER_SIZE);
-
if (table_offset + table->length > all_tables_size) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
WARN_ON(1);
- return AE_OK;
+ return;
}
- table_offset += table->length;
+ table_length = table->length;
- if (memcmp(existing_table->signature, table->signature, 4)) {
- acpi_os_unmap_memory(table,
- ACPI_HEADER_SIZE);
- continue;
- }
-
- /* Only override tables with matching oem id */
- if (memcmp(table->oem_table_id, existing_table->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE)) {
- acpi_os_unmap_memory(table,
- ACPI_HEADER_SIZE);
- continue;
+ /* Skip RSDT/XSDT which should only be used for override */
+ if (test_bit(table_index, acpi_initrd_installed) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
}
- table_offset -= table->length;
- *table_length = table->length;
+ acpi_table_taint(table);
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- *address = acpi_tables_addr + table_offset;
- break;
- } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
+ acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+ set_bit(table_index, acpi_initrd_installed);
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+}
+#else
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+}
+
+void __init acpi_initrd_initialize_tables(void)
+{
+}
+#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
- if (*address != 0)
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table)
+{
+ if (!existing_table || !new_table)
+ return AE_BAD_PARAMETER;
+
+ *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+ if (*new_table != NULL)
acpi_table_taint(existing_table);
return AE_OK;
-#endif
}
static irqreturn_t acpi_irq(int irq, void *dev_id)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index c8e169e46673..2c45dd3acc17 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#define PREFIX "ACPI: "
@@ -387,6 +388,23 @@ static inline int acpi_isa_register_gsi(struct pci_dev *dev)
}
#endif
+static inline bool acpi_pci_irq_valid(struct pci_dev *dev, u8 pin)
+{
+#ifdef CONFIG_X86
+ /*
+ * On x86 irq line 0xff means "unknown" or "no connection"
+ * (PCI 3.0, Section 6.2.4, footnote on page 223).
+ */
+ if (dev->irq == 0xff) {
+ dev->irq = IRQ_NOTCONNECTED;
+ dev_warn(&dev->dev, "PCI INT %c: not connected\n",
+ pin_name(pin));
+ return false;
+ }
+#endif
+ return true;
+}
+
int acpi_pci_irq_enable(struct pci_dev *dev)
{
struct acpi_prt_entry *entry;
@@ -431,11 +449,14 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
} else
gsi = -1;
- /*
- * No IRQ known to the ACPI subsystem - maybe the BIOS /
- * driver reported one, then use it. Exit in any case.
- */
if (gsi < 0) {
+ /*
+ * No IRQ known to the ACPI subsystem - maybe the BIOS /
+ * driver reported one, then use it. Exit in any case.
+ */
+ if (!acpi_pci_irq_valid(dev, pin))
+ return 0;
+
if (acpi_isa_register_gsi(dev))
dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
pin_name(pin));
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index 42df46a86c25..fcd1852dcdee 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/regmap.h>
@@ -205,7 +205,4 @@ static int __init intel_crc_pmic_opregion_driver_init(void)
{
return platform_driver_register(&intel_crc_pmic_opregion_driver);
}
-module_init(intel_crc_pmic_opregion_driver_init);
-
-MODULE_DESCRIPTION("CrystalCove ACPI operation region driver");
-MODULE_LICENSE("GPL");
+device_initcall(intel_crc_pmic_opregion_driver_init);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 11154a330f07..d2fa8cb82d2b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -314,7 +314,6 @@ static int __init acpi_processor_driver_init(void)
if (result < 0)
return result;
- acpi_processor_syscore_init();
register_hotcpu_notifier(&acpi_cpu_notifier);
acpi_thermal_cpufreq_init();
acpi_processor_ppc_init();
@@ -330,7 +329,6 @@ static void __exit acpi_processor_driver_exit(void)
acpi_processor_ppc_exit();
acpi_thermal_cpufreq_exit();
unregister_hotcpu_notifier(&acpi_cpu_notifier);
- acpi_processor_syscore_exit();
driver_unregister(&acpi_processor_driver);
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 175c86bee3a9..444e3745c8b3 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -23,6 +23,7 @@
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/module.h>
#include <linux/acpi.h>
@@ -30,7 +31,6 @@
#include <linux/sched.h> /* need_resched() */
#include <linux/tick.h>
#include <linux/cpuidle.h>
-#include <linux/syscore_ops.h>
#include <acpi/processor.h>
/*
@@ -43,8 +43,6 @@
#include <asm/apic.h>
#endif
-#define PREFIX "ACPI: "
-
#define ACPI_PROCESSOR_CLASS "processor"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
@@ -61,8 +59,8 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
- acpi_cstate);
+static
+DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
static int disabled_by_idle_boot_param(void)
{
@@ -81,9 +79,9 @@ static int set_max_cstate(const struct dmi_system_id *id)
if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
return 0;
- printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
- " Override with \"processor.max_cstate=%d\"\n", id->ident,
- (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
+ pr_notice("%s detected - limiting to C%ld max_cstate."
+ " Override with \"processor.max_cstate=%d\"\n", id->ident,
+ (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
max_cstate = (long)id->driver_data;
@@ -194,42 +192,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
#endif
-#ifdef CONFIG_PM_SLEEP
-static u32 saved_bm_rld;
-
-static int acpi_processor_suspend(void)
-{
- acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
- return 0;
-}
-
-static void acpi_processor_resume(void)
-{
- u32 resumed_bm_rld = 0;
-
- acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
- if (resumed_bm_rld == saved_bm_rld)
- return;
-
- acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
-}
-
-static struct syscore_ops acpi_processor_syscore_ops = {
- .suspend = acpi_processor_suspend,
- .resume = acpi_processor_resume,
-};
-
-void acpi_processor_syscore_init(void)
-{
- register_syscore_ops(&acpi_processor_syscore_ops);
-}
-
-void acpi_processor_syscore_exit(void)
-{
- unregister_syscore_ops(&acpi_processor_syscore_ops);
-}
-#endif /* CONFIG_PM_SLEEP */
-
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
@@ -351,7 +313,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* There must be at least 2 elements */
if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
- printk(KERN_ERR PREFIX "not enough elements in _CST\n");
+ pr_err("not enough elements in _CST\n");
ret = -EFAULT;
goto end;
}
@@ -360,7 +322,7 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
/* Validate number of power states. */
if (count < 1 || count != cst->package.count - 1) {
- printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
+ pr_err("count given by _CST is not valid\n");
ret = -EFAULT;
goto end;
}
@@ -469,11 +431,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
* (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
*/
if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
- printk(KERN_WARNING
- "Limiting number of power states to max (%d)\n",
- ACPI_PROCESSOR_MAX_POWER);
- printk(KERN_WARNING
- "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
+ pr_warn("Limiting number of power states to max (%d)\n",
+ ACPI_PROCESSOR_MAX_POWER);
+ pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
break;
}
}
@@ -1097,8 +1057,8 @@ int acpi_processor_power_init(struct acpi_processor *pr)
retval = cpuidle_register_driver(&acpi_idle_driver);
if (retval)
return retval;
- printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
- acpi_idle_driver.name);
+ pr_debug("%s registered with cpuidle\n",
+ acpi_idle_driver.name);
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 2aee41655ce9..f2fd3fee588a 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -816,6 +816,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
next = adev->node.next;
if (next == head) {
child = NULL;
+ adev = ACPI_COMPANION(dev);
goto nondev;
}
adev = list_entry(next, struct acpi_device, node);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index d02fd53042a5..56241eb341f4 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -27,8 +27,20 @@
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
+static inline bool acpi_iospace_resource_valid(struct resource *res)
+{
+ /* On X86 IO space is limited to the [0 - 64K] IO port range */
+ return res->end < 0x10003;
+}
#else
#define valid_IRQ(i) (true)
+/*
+ * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical
+ * addresses mapping IO space in CPU physical address space, IO space
+ * resources can be placed anywhere in the 64-bit physical address space.
+ */
+static inline bool
+acpi_iospace_resource_valid(struct resource *res) { return true; }
#endif
static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
@@ -127,7 +139,7 @@ static void acpi_dev_ioresource_flags(struct resource *res, u64 len,
if (!acpi_dev_resource_len_valid(res->start, res->end, len, true))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
- if (res->end >= 0x10003)
+ if (!acpi_iospace_resource_valid(res))
res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET;
if (io_decode == ACPI_DECODE_16)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 407a3760e8de..5f28cf778349 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1930,6 +1930,7 @@ int __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
+ acpi_amba_init();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9cb975200cac..2a8b59644297 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -19,6 +19,7 @@
#include <linux/reboot.h>
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/syscore_ops.h>
#include <asm/io.h>
#include <trace/events/power.h>
@@ -677,6 +678,39 @@ static void acpi_sleep_suspend_setup(void)
static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */
+#ifdef CONFIG_PM_SLEEP
+static u32 saved_bm_rld;
+
+static int acpi_save_bm_rld(void)
+{
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
+ return 0;
+}
+
+static void acpi_restore_bm_rld(void)
+{
+ u32 resumed_bm_rld = 0;
+
+ acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
+ if (resumed_bm_rld == saved_bm_rld)
+ return;
+
+ acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
+}
+
+static struct syscore_ops acpi_sleep_syscore_ops = {
+ .suspend = acpi_save_bm_rld,
+ .resume = acpi_restore_bm_rld,
+};
+
+void acpi_sleep_syscore_init(void)
+{
+ register_syscore_ops(&acpi_sleep_syscore_ops);
+}
+#else
+static inline void acpi_sleep_syscore_init(void) {}
+#endif /* CONFIG_PM_SLEEP */
+
#ifdef CONFIG_HIBERNATION
static unsigned long s4_hardware_signature;
static struct acpi_table_facs *facs;
@@ -714,6 +748,7 @@ static int acpi_hibernation_enter(void)
static void acpi_hibernation_leave(void)
{
+ pm_set_resume_via_firmware();
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
@@ -839,6 +874,7 @@ int __init acpi_sleep_init(void)
sleep_states[ACPI_STATE_S0] = 1;
+ acpi_sleep_syscore_init();
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 6c0f0794aa82..f49c02442d65 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -32,6 +32,7 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include "internal.h"
#define ACPI_MAX_TABLES 128
@@ -456,6 +457,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
+ acpi_initrd_initialize_tables();
check_multiple_madt();
return 0;
@@ -484,3 +486,13 @@ static int __init acpi_force_table_verification_setup(char *s)
}
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
+
+static int __init acpi_force_32bit_fadt_addr(char *s)
+{
+ pr_info("Forcing 32 Bit FADT addresses\n");
+ acpi_gbl_use32_bit_fadt_addresses = TRUE;
+
+ return 0;
+}
+
+early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f2f9873bb5c3..050673f0c0b3 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -201,10 +201,6 @@ acpi_extract_package(union acpi_object *package,
u8 **pointer = NULL;
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
@@ -696,7 +692,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
mask = obj->integer.value;
else if (obj->type == ACPI_TYPE_BUFFER)
for (i = 0; i < obj->buffer.length && i < 8; i++)
- mask |= (((u8)obj->buffer.pointer[i]) << (i * 8));
+ mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
ACPI_FREE(obj);
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7d00b7a015ea..16288e777ec3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
+ binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_offset;
}
off_end = (void *)offp + tr->offsets_size;
+ off_min = 0;
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ *offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
- proc->pid, thread->pid, (u64)*offp);
+ binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ proc->pid, thread->pid, (u64)*offp,
+ (u64)off_min,
+ (u64)(t->buffer->data_size -
+ sizeof(*fp)));
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
@@ -2737,6 +2744,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ if (unlikely(current->mm != proc->vma_vm_mm)) {
+ pr_err("current mm mismatch proc mm\n");
+ return -EINVAL;
+ }
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -2951,6 +2962,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
+ proc->vma_vm_mm = current->mm;
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
@@ -3593,13 +3605,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
static int binder_proc_show(struct seq_file *m, void *unused)
{
+ struct binder_proc *itr;
struct binder_proc *proc = m->private;
int do_lock = !binder_debug_no_lock;
+ bool valid_proc = false;
if (do_lock)
binder_lock(__func__);
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, proc, 1);
+
+ hlist_for_each_entry(itr, &binder_procs, proc_node) {
+ if (itr == proc) {
+ valid_proc = true;
+ break;
+ }
+ }
+ if (valid_proc) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ }
if (do_lock)
binder_unlock(__func__);
return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 861643ea91b5..5083f85efea7 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -151,6 +151,15 @@ config AHCI_MVEBU
If unsure, say N.
+config AHCI_OCTEON
+ tristate "Cavium Octeon Soc Serial ATA"
+ depends on SATA_AHCI_PLATFORM && CAVIUM_OCTEON_SOC
+ default y
+ help
+ This option enables support for Cavium Octeon SoC Serial ATA.
+
+ If unsure, say N.
+
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
depends on ARCH_SUNXI
@@ -355,7 +364,7 @@ config SATA_PROMISE
config SATA_RCAR
tristate "Renesas R-Car SATA support"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
This option enables support for Renesas R-Car Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index af45effac18c..18579521464e 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 146dc0b8ec61..a83bbcc58b4c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -85,6 +85,7 @@ enum board_ids {
};
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void ahci_remove_one(struct pci_dev *dev);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -94,9 +95,13 @@ static bool is_mcp89_apple(struct pci_dev *pdev);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
#ifdef CONFIG_PM
-static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
-static int ahci_pci_device_resume(struct pci_dev *pdev);
+static int ahci_pci_device_runtime_suspend(struct device *dev);
+static int ahci_pci_device_runtime_resume(struct device *dev);
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev);
+static int ahci_pci_device_resume(struct device *dev);
#endif
+#endif /* CONFIG_PM */
static struct scsi_host_template ahci_sht = {
AHCI_SHT("ahci"),
@@ -371,15 +376,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
@@ -563,16 +564,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ } /* terminate list */
};
+static const struct dev_pm_ops ahci_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume)
+ SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend,
+ ahci_pci_device_runtime_resume, NULL)
+};
static struct pci_driver ahci_pci_driver = {
.name = DRV_NAME,
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
- .remove = ata_pci_remove_one,
-#ifdef CONFIG_PM
- .suspend = ahci_pci_device_suspend,
- .resume = ahci_pci_device_resume,
-#endif
+ .remove = ahci_remove_one,
+ .driver = {
+ .pm = &ahci_pci_pm_ops,
+ },
};
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
@@ -801,42 +806,66 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
#ifdef CONFIG_PM
-static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static void ahci_pci_disable_interrupts(struct ata_host *host)
{
- struct ata_host *host = pci_get_drvdata(pdev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = hpriv->mmio;
u32 ctl;
- if (mesg.event & PM_EVENT_SUSPEND &&
- hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_err(&pdev->dev,
- "BIOS update required for suspend/resume\n");
- return -EIO;
- }
+ /* AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+}
- if (mesg.event & PM_EVENT_SLEEP) {
- /* AHCI spec rev1.1 section 8.3.3:
- * Software must disable interrupts prior to requesting a
- * transition of the HBA to D3 state.
- */
- ctl = readl(mmio + HOST_CTL);
- ctl &= ~HOST_IRQ_EN;
- writel(ctl, mmio + HOST_CTL);
- readl(mmio + HOST_CTL); /* flush */
- }
+static int ahci_pci_device_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
- return ata_pci_device_suspend(pdev, mesg);
+ ahci_pci_disable_interrupts(host);
+ return 0;
}
-static int ahci_pci_device_resume(struct pci_dev *pdev)
+static int ahci_pci_device_runtime_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct ata_host *host = pci_get_drvdata(pdev);
int rc;
- rc = ata_pci_device_do_resume(pdev);
+ rc = ahci_pci_reset_controller(host);
if (rc)
return rc;
+ ahci_pci_init_controller(host);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_pci_device_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ ahci_pci_disable_interrupts(host);
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int ahci_pci_device_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
/* Apple BIOS helpfully mangles the registers on resume */
if (is_mcp89_apple(pdev))
@@ -856,6 +885,8 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
}
#endif
+#endif /* CONFIG_PM */
+
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
@@ -1718,7 +1749,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- return ahci_host_activate(host, &ahci_sht);
+ rc = ahci_host_activate(host, &ahci_sht);
+ if (rc)
+ return rc;
+
+ pm_runtime_put_noidle(&pdev->dev);
+ return 0;
+}
+
+static void ahci_remove_one(struct pci_dev *pdev)
+{
+ pm_runtime_get_noresume(&pdev->dev);
+ ata_pci_remove_one(pdev);
}
module_pci_driver(ahci_pci_driver);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 167ba7e3b92e..70b06bcfb7e3 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -335,6 +335,7 @@ struct ahci_host_priv {
void __iomem * mmio; /* bus-independent mem map */
u32 cap; /* cap to use */
u32 cap2; /* cap2 to use */
+ u32 version; /* cached version */
u32 port_map; /* port map to use */
u32 saved_cap; /* saved initial cap */
u32 saved_cap2; /* saved initial cap2 */
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f7a7fa81740e..de7128d81e9c 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -112,12 +112,15 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
if (rc)
return rc;
- dram = mv_mbus_dram_info();
- if (!dram)
- return -ENODEV;
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-380-ahci")) {
+ dram = mv_mbus_dram_info();
+ if (!dram)
+ return -ENODEV;
- ahci_mvebu_mbus_config(hpriv, dram);
- ahci_mvebu_regret_option(hpriv);
+ ahci_mvebu_mbus_config(hpriv, dram);
+ ahci_mvebu_regret_option(hpriv);
+ }
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
&ahci_platform_sht);
@@ -133,6 +136,7 @@ disable_resources:
static const struct of_device_id ahci_mvebu_of_match[] = {
{ .compatible = "marvell,armada-380-ahci", },
+ { .compatible = "marvell,armada-3700-ahci", },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
new file mode 100644
index 000000000000..ea865fe953e1
--- /dev/null
+++ b/drivers/ata/ahci_octeon.c
@@ -0,0 +1,105 @@
+/*
+ * SATA glue for Cavium Octeon III SOCs.
+ *
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2010-2015 Cavium Networks
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/bitfield.h>
+
+#define CVMX_SATA_UCTL_SHIM_CFG 0xE8
+
+#define SATA_UCTL_ENDIAN_MODE_BIG 1
+#define SATA_UCTL_ENDIAN_MODE_LITTLE 0
+#define SATA_UCTL_ENDIAN_MODE_MASK 3
+
+#define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT 8
+#define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT 0
+#define SATA_UCTL_DMA_READ_CMD_SHIFT 12
+
+static int ahci_octeon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+ u64 cfg;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Platform resource[0] is missing\n");
+ return -ENODEV;
+ }
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG);
+
+ cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT);
+ cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT);
+
+#ifdef __BIG_ENDIAN
+ cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+ cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#else
+ cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
+ cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
+#endif
+
+ cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT;
+
+ cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg);
+
+ if (!node) {
+ dev_err(dev, "no device node, failed to add octeon sata\n");
+ return -ENODEV;
+ }
+
+ ret = of_platform_populate(node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to add ahci-platform core\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ahci_octeon_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id octeon_ahci_match[] = {
+ { .compatible = "cavium,octeon-7130-sata-uctl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_ahci_match);
+
+static struct platform_driver ahci_octeon_driver = {
+ .probe = ahci_octeon_probe,
+ .remove = ahci_octeon_remove,
+ .driver = {
+ .name = "octeon-ahci",
+ .of_match_table = octeon_ahci_match,
+ },
+};
+
+module_platform_driver(ahci_octeon_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. sata config.");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 04975b851c23..40442332bfa7 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -76,6 +76,7 @@ static const struct of_device_id ahci_of_match[] = {
{ .compatible = "ibm,476gtr-ahci", },
{ .compatible = "snps,dwc-ahci", },
{ .compatible = "hisilicon,hisi-ahci", },
+ { .compatible = "cavium,octeon-7130-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 8e3f7faf00d3..73b19b277138 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
__func__);
version = XGENE_AHCI_V1;
- }
- if (info->valid & ACPI_VALID_CID)
+ } else if (info->valid & ACPI_VALID_CID) {
version = XGENE_AHCI_V2;
+ }
}
}
#endif
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 85ea5142a095..3982054060b8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -225,6 +225,31 @@ static void ahci_enable_ahci(void __iomem *mmio)
WARN_ON(1);
}
+/**
+ * ahci_rpm_get_port - Make sure the port is powered on
+ * @ap: Port to power on
+ *
+ * Whenever there is need to access the AHCI host registers outside of
+ * normal execution paths, call this function to make sure the host is
+ * actually powered on.
+ */
+static int ahci_rpm_get_port(struct ata_port *ap)
+{
+ return pm_runtime_get_sync(ap->dev);
+}
+
+/**
+ * ahci_rpm_put_port - Undoes ahci_rpm_get_port()
+ * @ap: Port to power down
+ *
+ * Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
+ * if it has no more active users.
+ */
+static void ahci_rpm_put_port(struct ata_port *ap)
+{
+ pm_runtime_put(ap->dev);
+}
+
static ssize_t ahci_show_host_caps(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -251,9 +276,8 @@ static ssize_t ahci_show_host_version(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
struct ahci_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = hpriv->mmio;
- return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
+ return sprintf(buf, "%x\n", hpriv->version);
}
static ssize_t ahci_show_port_cmd(struct device *dev,
@@ -262,8 +286,13 @@ static ssize_t ahci_show_port_cmd(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
void __iomem *port_mmio = ahci_port_base(ap);
+ ssize_t ret;
+
+ ahci_rpm_get_port(ap);
+ ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+ ahci_rpm_put_port(ap);
- return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
+ return ret;
}
static ssize_t ahci_read_em_buffer(struct device *dev,
@@ -279,17 +308,20 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
size_t count;
int i;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EINVAL;
}
if (!(em_ctl & EM_CTL_MR)) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EAGAIN;
}
@@ -317,6 +349,7 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
}
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return i;
}
@@ -341,11 +374,13 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
size % 4 || size > hpriv->em_buf_sz)
return -EINVAL;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EBUSY;
}
@@ -358,6 +393,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return size;
}
@@ -371,7 +407,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
void __iomem *mmio = hpriv->mmio;
u32 em_ctl;
+ ahci_rpm_get_port(ap);
em_ctl = readl(mmio + HOST_EM_CTL);
+ ahci_rpm_put_port(ap);
return sprintf(buf, "%s%s%s%s\n",
em_ctl & EM_CTL_LED ? "led " : "",
@@ -509,6 +547,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
/* record values to use during operation */
hpriv->cap = cap;
hpriv->cap2 = cap2;
+ hpriv->version = readl(mmio + HOST_VERSION);
hpriv->port_map = port_map;
if (!hpriv->start_engine)
@@ -1014,6 +1053,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
else
return -EINVAL;
+ ahci_rpm_get_port(ap);
spin_lock_irqsave(ap->lock, flags);
/*
@@ -1023,6 +1063,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
em_ctl = readl(mmio + HOST_EM_CTL);
if (em_ctl & EM_CTL_TM) {
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
return -EBUSY;
}
@@ -1050,6 +1091,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
emp->led_state = state;
spin_unlock_irqrestore(ap->lock, flags);
+ ahci_rpm_put_port(ap);
+
return size;
}
@@ -2215,6 +2258,8 @@ static void ahci_pmp_detach(struct ata_port *ap)
int ahci_port_resume(struct ata_port *ap)
{
+ ahci_rpm_get_port(ap);
+
ahci_power_up(ap);
ahci_start_port(ap);
@@ -2241,6 +2286,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ata_port_freeze(ap);
}
+ ahci_rpm_put_port(ap);
return rc;
}
#endif
@@ -2356,11 +2402,10 @@ static void ahci_port_stop(struct ata_port *ap)
void ahci_print_info(struct ata_host *host, const char *scc_s)
{
struct ahci_host_priv *hpriv = host->private_data;
- void __iomem *mmio = hpriv->mmio;
u32 vers, cap, cap2, impl, speed;
const char *speed_s;
- vers = readl(mmio + HOST_VERSION);
+ vers = hpriv->version;
cap = hpriv->cap;
cap2 = hpriv->cap2;
impl = hpriv->port_map;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e417e1a1d02c..567859ce0512 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -174,13 +174,13 @@ static ssize_t ata_scsi_park_show(struct device *device,
struct ata_port *ap;
struct ata_link *link;
struct ata_device *dev;
- unsigned long flags, now;
+ unsigned long now;
unsigned int uninitialized_var(msecs);
int rc = 0;
ap = ata_shost_to_port(sdev->host);
- spin_lock_irqsave(ap->lock, flags);
+ spin_lock_irq(ap->lock);
dev = ata_scsi_find_dev(ap, sdev);
if (!dev) {
rc = -ENODEV;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index ace0a4de3449..9f27b14009f9 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -30,8 +30,7 @@
#include <linux/ata_platform.h>
#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
-
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#define DRV_NAME "pata_at91"
#define DRV_VERSION "0.3"
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index dd7410019d15..ec748d31928d 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -36,8 +36,8 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
+#include <linux/gpio.h>
#include <asm/dma.h>
-#include <asm/gpio.h>
#include <asm/portmux.h>
#define DRV_NAME "pata-bf54x"
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 0038dc4c06c7..e5fb7525a5df 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -176,17 +176,14 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
const char * const list[])
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
- int i = 0;
+ int i;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
- while (list[i] != NULL) {
- if (!strcmp(list[i], model_num)) {
- pr_warn("%s is not supported for %s\n",
- modestr, list[i]);
- return 1;
- }
- i++;
+ i = match_string(list, -1, model_num);
+ if (i >= 0) {
+ pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ return 1;
}
return 0;
}
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index e3d4b059fcd1..e347e7acd8ed 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -22,6 +22,7 @@
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/gfp.h>
+#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -30,7 +31,6 @@
#include <asm/macio.h>
#include <asm/io.h>
#include <asm/dbdma.h>
-#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/mediabay.h>
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 17d31fc009ab..0636d84fbefe 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -61,6 +61,7 @@ enum {
SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
SATA_INT_GATE = 0x41, /* SATA interrupt gating */
SATA_NATIVE_MODE = 0x42, /* Native mode enable */
+ SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
PATA_PIO_TIMING = 0xAB, /* PATA timing register */
@@ -71,9 +72,18 @@ enum {
NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
+
+ SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
+};
+
+struct svia_priv {
+ bool wd_workaround;
};
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev);
+#endif
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
@@ -85,6 +95,7 @@ static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
static int vt6421_pata_cable_detect(struct ata_port *ap);
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
+static void vt6421_error_handler(struct ata_port *ap);
static const struct pci_device_id svia_pci_tbl[] = {
{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
@@ -105,7 +116,7 @@ static struct pci_driver svia_pci_driver = {
.probe = svia_init_one,
#ifdef CONFIG_PM_SLEEP
.suspend = ata_pci_device_suspend,
- .resume = ata_pci_device_resume,
+ .resume = svia_pci_device_resume,
#endif
.remove = ata_pci_remove_one,
};
@@ -137,6 +148,7 @@ static struct ata_port_operations vt6421_sata_ops = {
.inherits = &svia_base_ops,
.scr_read = svia_scr_read,
.scr_write = svia_scr_write,
+ .error_handler = vt6421_error_handler,
};
static struct ata_port_operations vt8251_ops = {
@@ -536,7 +548,67 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
return 0;
}
-static void svia_configure(struct pci_dev *pdev, int board_id)
+static void svia_wd_fix(struct pci_dev *pdev)
+{
+ u8 tmp8;
+
+ pci_read_config_byte(pdev, 0x52, &tmp8);
+ pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
+}
+
+static irqreturn_t vt6421_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
+
+ /* if the IRQ was not handled, it might be a hotplug IRQ */
+ if (rc != IRQ_HANDLED) {
+ u32 serror;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* check for hotplug on port 0 */
+ svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
+ if (serror & SERR_PHYRDY_CHG) {
+ ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
+ ata_port_freeze(host->ports[0]);
+ rc = IRQ_HANDLED;
+ }
+ /* check for hotplug on port 1 */
+ svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
+ if (serror & SERR_PHYRDY_CHG) {
+ ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
+ ata_port_freeze(host->ports[1]);
+ rc = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+
+ return rc;
+}
+
+static void vt6421_error_handler(struct ata_port *ap)
+{
+ struct svia_priv *hpriv = ap->host->private_data;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ u32 serror;
+
+ /* see svia_configure() for description */
+ if (!hpriv->wd_workaround) {
+ svia_scr_read(&ap->link, SCR_ERROR, &serror);
+ if (serror == 0x1000500) {
+ ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
+ svia_wd_fix(pdev);
+ hpriv->wd_workaround = true;
+ ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
+ }
+ }
+
+ ata_sff_error_handler(ap);
+}
+
+static void svia_configure(struct pci_dev *pdev, int board_id,
+ struct svia_priv *hpriv)
{
u8 tmp8;
@@ -572,6 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
+ /* enable IRQ on hotplug */
+ pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+ if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+ dev_dbg(&pdev->dev,
+ "enabling SATA hotplug (0x%x)\n",
+ (int) tmp8);
+ tmp8 |= SATA_HOTPLUG;
+ pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+ }
+
/*
* vt6420/1 has problems talking to some drives. The following
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
@@ -593,11 +675,15 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
* http://article.gmane.org/gmane.linux.ide/46352
* http://thread.gmane.org/gmane.linux.kernel/1062139
+ *
+ * As the fix slows down data transfer, apply it only if the error
+ * actually appears - see vt6421_error_handler()
+ * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
+ * read safely.
*/
- if (board_id == vt6420 || board_id == vt6421) {
- pci_read_config_byte(pdev, 0x52, &tmp8);
- tmp8 |= 1 << 2;
- pci_write_config_byte(pdev, 0x52, tmp8);
+ if (board_id == vt6420) {
+ svia_wd_fix(pdev);
+ hpriv->wd_workaround = true;
}
}
@@ -608,6 +694,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
+ struct svia_priv *hpriv;
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -647,11 +734,39 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- svia_configure(pdev, board_id);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+ if (!hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+
+ svia_configure(pdev, board_id, hpriv);
pci_set_master(pdev);
- return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
- IRQF_SHARED, &svia_sht);
+ if (board_id == vt6421)
+ return ata_host_activate(host, pdev->irq, vt6421_interrupt,
+ IRQF_SHARED, &svia_sht);
+ else
+ return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
+ IRQF_SHARED, &svia_sht);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int svia_pci_device_resume(struct pci_dev *pdev)
+{
+ struct ata_host *host = pci_get_drvdata(pdev);
+ struct svia_priv *hpriv = host->private_data;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (hpriv->wd_workaround)
+ svia_wd_fix(pdev);
+ ata_host_resume(host);
+
+ return 0;
}
+#endif
module_pci_driver(svia_pci_driver);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 82f2ae0d7cc4..a969a7e443be 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -168,7 +168,7 @@ static char *res_strings[] = {
"reserved 14",
"Unrecognized cell",
"reserved 16",
- "reassemby abort: AAL5 abort",
+ "reassembly abort: AAL5 abort",
"packet purged",
"packet ageing timeout",
"channel ageing timeout",
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 500592486e88..6470eb8088f4 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -149,8 +149,7 @@ EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
- struct subsys_private *priv =
- container_of(kobj, typeof(*priv), subsys.kobj);
+ struct subsys_private *priv = to_subsys_private(kobj);
struct bus_type *bus = priv->bus;
kfree(priv);
@@ -1019,13 +1018,11 @@ static void device_insertion_sort_klist(struct device *a, struct list_head *list
int (*compare)(const struct device *a,
const struct device *b))
{
- struct list_head *pos;
struct klist_node *n;
struct device_private *dev_prv;
struct device *b;
- list_for_each(pos, list) {
- n = container_of(pos, struct klist_node, n_node);
+ list_for_each_entry(n, list, n_node) {
dev_prv = to_device_private_bus(n);
b = dev_prv->device;
if (compare(a, b) <= 0) {
@@ -1042,8 +1039,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
const struct device *b))
{
LIST_HEAD(sorted_devices);
- struct list_head *pos, *tmp;
- struct klist_node *n;
+ struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
@@ -1051,8 +1047,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
device_klist = bus_get_device_klist(bus);
spin_lock(&device_klist->k_lock);
- list_for_each_safe(pos, tmp, &device_klist->k_list) {
- n = container_of(pos, struct klist_node, n_node);
+ list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
dev_prv = to_device_private_bus(n);
dev = dev_prv->device;
device_insertion_sort_klist(dev, &sorted_devices, compare);
@@ -1107,7 +1102,7 @@ struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
knode = klist_next(&iter->ki);
if (!knode)
return NULL;
- dev = container_of(knode, struct device_private, knode_bus)->device;
+ dev = to_device_private_bus(knode)->device;
if (!iter->type || iter->type == dev->type)
return dev;
}
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 04a1582e80bb..89b032f2ffd2 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -267,7 +267,7 @@ void component_match_add_release(struct device *master,
}
if (match->num == match->alloc) {
- size_t new_size = match ? match->alloc + 16 : 15;
+ size_t new_size = match->alloc + 16;
int ret;
ret = component_match_realloc(master, match, new_size);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index c4da2df62e02..16688f50729c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -560,6 +560,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
struct device_attach_data *data = _data;
struct device *dev = data->dev;
bool async_allowed;
+ int ret;
/*
* Check if device has already been claimed. This may
@@ -570,8 +571,17 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
if (dev->driver)
return -EBUSY;
- if (!driver_match_device(drv, dev))
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ driver_deferred_probe_add(dev);
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d", ret);
+ return ret;
+ } /* ret > 0 means positive match */
async_allowed = driver_allows_async_probing(drv);
@@ -691,6 +701,7 @@ void device_initial_probe(struct device *dev)
static int __driver_attach(struct device *dev, void *data)
{
struct device_driver *drv = data;
+ int ret;
/*
* Lock device and try to bind to it. We drop the error
@@ -702,8 +713,17 @@ static int __driver_attach(struct device *dev, void *data)
* is an error.
*/
- if (!driver_match_device(drv, dev))
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ driver_deferred_probe_add(dev);
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d", ret);
+ return ret;
+ } /* ret > 0 means positive match */
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 55b83983a9c0..bdf28f7dd5e8 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -2,6 +2,7 @@
* Coherent per-device memory handling.
* Borrowed from i386
*/
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,9 +18,9 @@ struct dma_coherent_mem {
spinlock_t spinlock;
};
-static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr,
- size_t size, int flags,
- struct dma_coherent_mem **mem)
+static bool dma_init_coherent_memory(
+ phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
+ struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
@@ -31,7 +32,10 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_add
if (!size)
goto out;
- mem_base = ioremap(phys_addr, size);
+ if (flags & DMA_MEMORY_MAP)
+ mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+ else
+ mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
@@ -50,24 +54,28 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_add
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
-
- if (flags & DMA_MEMORY_MAP)
- return DMA_MEMORY_MAP;
-
- return DMA_MEMORY_IO;
+ return true;
out:
kfree(dma_mem);
- if (mem_base)
- iounmap(mem_base);
- return 0;
+ if (mem_base) {
+ if (flags & DMA_MEMORY_MAP)
+ memunmap(mem_base);
+ else
+ iounmap(mem_base);
+ }
+ return false;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
- iounmap(mem->virt_base);
+
+ if (mem->flags & DMA_MEMORY_MAP)
+ memunmap(mem->virt_base);
+ else
+ iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
@@ -88,15 +96,13 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
- int ret;
- ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags,
- &mem);
- if (ret == 0)
+ if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
+ &mem))
return 0;
if (dma_assign_coherent_memory(dev, mem) == 0)
- return ret;
+ return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
dma_release_coherent_memory(mem);
return 0;
@@ -181,7 +187,10 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
- memset(*ret, 0, size);
+ if (mem->flags & DMA_MEMORY_MAP)
+ memset(*ret, 0, size);
+ else
+ memset_io(*ret, 0, size);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
@@ -281,9 +290,9 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
struct dma_coherent_mem *mem = rmem->priv;
if (!mem &&
- dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
- &mem) != DMA_MEMORY_MAP) {
+ !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
+ &mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9250e564ebf..773fc3099769 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/file.h>
#include <linux/list.h>
+#include <linux/fs.h>
#include <linux/async.h>
#include <linux/pm.h>
#include <linux/suspend.h>
@@ -257,7 +258,7 @@ static void __fw_free_buf(struct kref *ref)
vunmap(buf->data);
for (i = 0; i < buf->nr_pages; i++)
__free_page(buf->pages[i]);
- kfree(buf->pages);
+ vfree(buf->pages);
} else
#endif
vfree(buf->data);
@@ -291,40 +292,19 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+static void fw_finish_direct_load(struct device *device,
+ struct firmware_buf *buf)
{
- int size;
- char *buf;
- int rc;
-
- if (!S_ISREG(file_inode(file)->i_mode))
- return -EINVAL;
- size = i_size_read(file_inode(file));
- if (size <= 0)
- return -EINVAL;
- buf = vmalloc(size);
- if (!buf)
- return -ENOMEM;
- rc = kernel_read(file, 0, buf, size);
- if (rc != size) {
- if (rc > 0)
- rc = -EIO;
- goto fail;
- }
- rc = security_kernel_fw_from_file(file, buf, size);
- if (rc)
- goto fail;
- fw_buf->data = buf;
- fw_buf->size = size;
- return 0;
-fail:
- vfree(buf);
- return rc;
+ mutex_lock(&fw_lock);
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ mutex_unlock(&fw_lock);
}
static int fw_get_filesystem_firmware(struct device *device,
struct firmware_buf *buf)
{
+ loff_t size;
int i, len;
int rc = -ENOENT;
char *path;
@@ -334,8 +314,6 @@ static int fw_get_filesystem_firmware(struct device *device,
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
- struct file *file;
-
/* skip the unset customized path */
if (!fw_path[i][0])
continue;
@@ -347,28 +325,25 @@ static int fw_get_filesystem_firmware(struct device *device,
break;
}
- file = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(file))
+ buf->size = 0;
+ rc = kernel_read_file_from_path(path, &buf->data, &size,
+ INT_MAX, READING_FIRMWARE);
+ if (rc) {
+ if (rc == -ENOENT)
+ dev_dbg(device, "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_warn(device, "loading %s failed with error %d\n",
+ path, rc);
continue;
- rc = fw_read_file_contents(file, buf);
- fput(file);
- if (rc)
- dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
- path, rc);
- else
- break;
+ }
+ dev_dbg(device, "direct-loading %s\n", buf->fw_id);
+ buf->size = size;
+ fw_finish_direct_load(device, buf);
+ break;
}
__putname(path);
- if (!rc) {
- dev_dbg(device, "firmware: direct-loading firmware %s\n",
- buf->fw_id);
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
- }
-
return rc;
}
@@ -660,7 +635,7 @@ static ssize_t firmware_loading_store(struct device *dev,
if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
- kfree(fw_buf->pages);
+ vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
@@ -685,8 +660,9 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: map pages failed\n",
__func__);
else
- rc = security_kernel_fw_from_file(NULL,
- fw_buf->data, fw_buf->size);
+ rc = security_kernel_post_read_file(NULL,
+ fw_buf->data, fw_buf->size,
+ READING_FIRMWARE);
/*
* Same logic as fw_load_abort, only the DONE bit
@@ -770,8 +746,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
buf->page_array_size * 2);
struct page **new_pages;
- new_pages = kmalloc(new_array_size * sizeof(void *),
- GFP_KERNEL);
+ new_pages = vmalloc(new_array_size * sizeof(void *));
if (!new_pages) {
fw_load_abort(fw_priv);
return -ENOMEM;
@@ -780,7 +755,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
buf->page_array_size * sizeof(void *));
memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
(new_array_size - buf->page_array_size));
- kfree(buf->pages);
+ vfree(buf->pages);
buf->pages = new_pages;
buf->page_array_size = new_array_size;
}
@@ -1051,7 +1026,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
}
if (fw_get_builtin_firmware(firmware, name)) {
- dev_dbg(device, "firmware: using built-in firmware %s\n", name);
+ dev_dbg(device, "using built-in %s\n", name);
return 0; /* assigned */
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 213456c2b123..f46dba8b7092 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -251,7 +251,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
+int memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
@@ -439,6 +439,37 @@ print_block_size(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
/*
+ * Memory auto online policy.
+ */
+
+static ssize_t
+show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (memhp_auto_online)
+ return sprintf(buf, "online\n");
+ else
+ return sprintf(buf, "offline\n");
+}
+
+static ssize_t
+store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (sysfs_streq(buf, "online"))
+ memhp_auto_online = true;
+ else if (sysfs_streq(buf, "offline"))
+ memhp_auto_online = false;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
+ store_auto_online_blocks);
+
+/*
* Some architectures will have custom drivers to do this, and
* will not need to do it from userspace. The fake hot-add code
* as well as ppc64 will do all of their discovery in userspace
@@ -746,6 +777,7 @@ static struct attribute *memory_root_attrs[] = {
#endif
&dev_attr_block_size_bytes.attr,
+ &dev_attr_auto_online_blocks.attr,
NULL
};
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 272a52ebafc0..0e64a1b5e62a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -137,6 +137,62 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
return __pm_clk_add(dev, NULL, clk);
}
+
+/**
+ * of_pm_clk_add_clks - Start using device clock(s) for power management.
+ * @dev: Device whose clock(s) is going to be used for power management.
+ *
+ * Add a series of clocks described in the 'clocks' device-tree node for
+ * a device to the list of clocks used for the power management of @dev.
+ * On success, returns the number of clocks added. Returns a negative
+ * error code if there are no clocks in the device node for the device
+ * or if adding a clock fails.
+ */
+int of_pm_clk_add_clks(struct device *dev)
+{
+ struct clk **clks;
+ unsigned int i, count;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_count_phandle_with_args(dev->of_node, "clocks",
+ "#clock-cells");
+ if (count == 0)
+ return -ENODEV;
+
+ clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ clks[i] = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clks[i])) {
+ ret = PTR_ERR(clks[i]);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clks[i]);
+ if (ret) {
+ clk_put(clks[i]);
+ goto error;
+ }
+ }
+
+ kfree(clks);
+
+ return i;
+
+error:
+ while (i--)
+ pm_clk_remove_clk(dev, clks[i]);
+
+ kfree(clks);
+
+ return ret;
+}
+
/**
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
@@ -198,6 +254,39 @@ void pm_clk_remove(struct device *dev, const char *con_id)
}
/**
+ * pm_clk_remove_clk - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @clk: Clock pointer
+ *
+ * Remove the clock pointed to by @clk from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd || !clk)
+ return;
+
+ spin_lock_irq(&psd->lock);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (clk == ce->clk)
+ goto remove;
+ }
+
+ spin_unlock_irq(&psd->lock);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ spin_unlock_irq(&psd->lock);
+
+ __pm_clk_remove(ce);
+}
+
+/**
* pm_clk_init - Initialize a device's list of power management clocks.
* @dev: Device to initialize the list of PM clocks for.
*
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 301b785f9f56..56705b52758e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -104,6 +104,7 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
+ unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -120,10 +121,10 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->power_on_latency_ns)
+ if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
- genpd->power_on_latency_ns = elapsed_ns;
+ genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
@@ -133,6 +134,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
{
+ unsigned int state_idx = genpd->state_idx;
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -149,10 +151,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns <= genpd->power_off_latency_ns)
+ if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
return ret;
- genpd->power_off_latency_ns = elapsed_ns;
+ genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "off", elapsed_ns);
@@ -485,8 +487,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (timed && runtime_pm)
time_start = ktime_get();
- genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ goto err_poweroff;
+
+ ret = genpd_restore_dev(genpd, dev);
+ if (ret)
+ goto err_stop;
/* Update resume latency value if the measured time exceeds it. */
if (timed && runtime_pm) {
@@ -501,6 +508,17 @@ static int pm_genpd_runtime_resume(struct device *dev)
}
return 0;
+
+err_stop:
+ genpd_stop_dev(genpd, dev);
+err_poweroff:
+ if (!dev->power.irq_safe) {
+ mutex_lock(&genpd->lock);
+ genpd_poweroff(genpd, 0);
+ mutex_unlock(&genpd->lock);
+ }
+
+ return ret;
}
static bool pd_ignore_unused;
@@ -585,6 +603,8 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
|| atomic_read(&genpd->sd_count) > 0)
return;
+ /* Choose the deepest state when suspending */
+ genpd->state_idx = genpd->state_count - 1;
genpd_power_off(genpd, timed);
genpd->status = GPD_STATE_POWER_OFF;
@@ -1378,7 +1398,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&subdomain->lock);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
- if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
+ if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
subdomain->name);
ret = -EBUSY;
@@ -1508,6 +1528,20 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->dev_ops.start = pm_clk_resume;
}
+ if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
+ pr_warn("Initial state index out of bounds.\n");
+ genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
+ }
+
+ if (genpd->state_count > GENPD_MAX_NUM_STATES) {
+ pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
+ genpd->state_count = GENPD_MAX_NUM_STATES;
+ }
+
+ /* Use only one "off" state if there were no states declared */
+ if (genpd->state_count == 0)
+ genpd->state_count = 1;
+
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
@@ -1668,6 +1702,9 @@ struct generic_pm_domain *of_genpd_get_from_provider(
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
struct of_genpd_provider *provider;
+ if (!genpdspec)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&of_genpd_mutex);
/* Check if we have such a provider in our array */
@@ -1864,6 +1901,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
struct pm_domain_data *pm_data;
const char *kobj_path;
struct gpd_link *link;
+ char state[16];
int ret;
ret = mutex_lock_interruptible(&genpd->lock);
@@ -1872,7 +1910,13 @@ static int pm_genpd_summary_one(struct seq_file *s,
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
- seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]);
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ snprintf(state, sizeof(state), "%s-%u",
+ status_lookup[genpd->status], genpd->state_idx);
+ else
+ snprintf(state, sizeof(state), "%s",
+ status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-15s ", genpd->name, state);
/*
* Modifications on the list require holding locks on both
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 1e937ac5f456..00a5436dd44b 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -98,7 +98,8 @@ static bool default_stop_ok(struct device *dev)
*
* This routine must be executed under the PM domain's lock.
*/
-static bool default_power_down_ok(struct dev_pm_domain *pd)
+static bool __default_power_down_ok(struct dev_pm_domain *pd,
+ unsigned int state)
{
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
@@ -106,27 +107,9 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
s64 min_off_time_ns;
s64 off_on_time_ns;
- if (genpd->max_off_time_changed) {
- struct gpd_link *link;
-
- /*
- * We have to invalidate the cached results for the masters, so
- * use the observation that default_power_down_ok() is not
- * going to be called for any master until this instance
- * returns.
- */
- list_for_each_entry(link, &genpd->slave_links, slave_node)
- link->master->max_off_time_changed = true;
-
- genpd->max_off_time_changed = false;
- genpd->cached_power_down_ok = false;
- genpd->max_off_time_ns = -1;
- } else {
- return genpd->cached_power_down_ok;
- }
+ off_on_time_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].power_on_latency_ns;
- off_on_time_ns = genpd->power_off_latency_ns +
- genpd->power_on_latency_ns;
min_off_time_ns = -1;
/*
@@ -186,8 +169,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
min_off_time_ns = constraint_ns;
}
- genpd->cached_power_down_ok = true;
-
/*
* If the computed minimum device off time is negative, there are no
* latency constraints, so the domain can spend arbitrary time in the
@@ -201,10 +182,45 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
* time and the time needed to turn the domain on is the maximum
* theoretical time this domain can spend in the "off" state.
*/
- genpd->max_off_time_ns = min_off_time_ns - genpd->power_on_latency_ns;
+ genpd->max_off_time_ns = min_off_time_ns -
+ genpd->states[state].power_on_latency_ns;
return true;
}
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+
+ if (!genpd->max_off_time_changed)
+ return genpd->cached_power_down_ok;
+
+ /*
+ * We have to invalidate the cached results for the masters, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any master until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->slave_links, slave_node)
+ link->master->max_off_time_changed = true;
+
+ genpd->max_off_time_ns = -1;
+ genpd->max_off_time_changed = false;
+ genpd->cached_power_down_ok = true;
+ genpd->state_idx = genpd->state_count - 1;
+
+ /* Find a state to power down to, starting from the deepest. */
+ while (!__default_power_down_ok(pd, genpd->state_idx)) {
+ if (genpd->state_idx == 0) {
+ genpd->cached_power_down_ok = false;
+ break;
+ }
+ genpd->state_idx--;
+ }
+
+ return genpd->cached_power_down_ok;
+}
+
static bool always_on_power_down_ok(struct dev_pm_domain *domain)
{
return false;
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index cf351d3dab1c..433b60092972 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -13,50 +13,52 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/export.h>
+#include <linux/regulator/consumer.h>
#include "opp.h"
/*
- * The root of the list of all devices. All device_opp structures branch off
- * from here, with each device_opp containing the list of opp it supports in
+ * The root of the list of all opp-tables. All opp_table structures branch off
+ * from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(dev_opp_list);
+static LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
-DEFINE_MUTEX(dev_opp_list_lock);
+DEFINE_MUTEX(opp_table_lock);
#define opp_rcu_lockdep_assert() \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&dev_opp_list_lock), \
- "Missing rcu_read_lock() or " \
- "dev_opp_list_lock protection"); \
+ !lockdep_is_held(&opp_table_lock), \
+ "Missing rcu_read_lock() or " \
+ "opp_table_lock protection"); \
} while (0)
-static struct device_list_opp *_find_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+static struct opp_device *_find_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_for_each_entry(list_dev, &dev_opp->dev_list, node)
- if (list_dev->dev == dev)
- return list_dev;
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ if (opp_dev->dev == dev)
+ return opp_dev;
return NULL;
}
-static struct device_opp *_managed_opp(const struct device_node *np)
+static struct opp_table *_managed_opp(const struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
- if (dev_opp->np == np) {
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
/*
* Multiple devices can point to the same OPP table and
* so will have same node-pointer, np.
@@ -64,7 +66,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* But the OPPs will be considered as shared only if the
* OPP table contains a "opp-shared" property.
*/
- return dev_opp->shared_opp ? dev_opp : NULL;
+ return opp_table->shared_opp ? opp_table : NULL;
}
}
@@ -72,24 +74,24 @@ static struct device_opp *_managed_opp(const struct device_node *np)
}
/**
- * _find_device_opp() - find device_opp struct using device pointer
- * @dev: device pointer used to lookup device OPPs
+ * _find_opp_table() - find opp_table struct using device pointer
+ * @dev: device pointer used to lookup OPP table
*
- * Search list of device OPPs for one containing matching device. Does a RCU
- * reader operation to grab the pointer needed.
+ * Search OPP table for one containing matching device. Does a RCU reader
+ * operation to grab the pointer needed.
*
- * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
+ * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
* -EINVAL based on type of error.
*
* Locking: For readers, this function must be called under rcu_read_lock().
- * device_opp is a RCU protected pointer, which means that device_opp is valid
+ * opp_table is a RCU protected pointer, which means that opp_table is valid
* as long as we are under RCU lock.
*
- * For Writers, this function must be called with dev_opp_list_lock held.
+ * For Writers, this function must be called with opp_table_lock held.
*/
-struct device_opp *_find_device_opp(struct device *dev)
+struct opp_table *_find_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
@@ -98,9 +100,9 @@ struct device_opp *_find_device_opp(struct device *dev)
return ERR_PTR(-EINVAL);
}
- list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
- if (_find_list_dev(dev, dev_opp))
- return dev_opp;
+ list_for_each_entry_rcu(opp_table, &opp_tables, node)
+ if (_find_opp_dev(dev, opp_table))
+ return opp_table;
return ERR_PTR(-ENODEV);
}
@@ -213,16 +215,16 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
unsigned long clock_latency_ns;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
clock_latency_ns = 0;
else
- clock_latency_ns = dev_opp->clock_latency_ns_max;
+ clock_latency_ns = opp_table->clock_latency_ns_max;
rcu_read_unlock();
return clock_latency_ns;
@@ -230,6 +232,82 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *opp;
+ struct regulator *reg;
+ unsigned long latency_ns = 0;
+ unsigned long min_uV = ~0, max_uV = 0;
+ int ret;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ reg = opp_table->regulator;
+ if (IS_ERR(reg)) {
+ /* Regulator may not be required for device */
+ if (reg)
+ dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+ PTR_ERR(reg));
+ rcu_read_unlock();
+ return 0;
+ }
+
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+ if (!opp->available)
+ continue;
+
+ if (opp->u_volt_min < min_uV)
+ min_uV = opp->u_volt_min;
+ if (opp->u_volt_max > max_uV)
+ max_uV = opp->u_volt_max;
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * The caller needs to ensure that opp_table (and hence the regulator)
+ * isn't freed, while we are executing this routine.
+ */
+ ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+ if (ret > 0)
+ latency_ns = ret * 1000;
+
+ return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ * nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+ return dev_pm_opp_get_max_volt_latency(dev) +
+ dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
+/**
* dev_pm_opp_get_suspend_opp() - Get suspend opp
* @dev: device for which we do this operation
*
@@ -244,21 +322,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
- !dev_opp->suspend_opp->available)
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
+ !opp_table->suspend_opp->available)
return NULL;
- return dev_opp->suspend_opp;
+ return opp_table->suspend_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
/**
- * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
* @dev: device for which we do this operation
*
* Return: This function returns the number of available opps if there are any,
@@ -268,21 +346,21 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp;
int count = 0;
rcu_read_lock();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- count = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n",
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ count = PTR_ERR(opp_table);
+ dev_err(dev, "%s: OPP table not found (%d)\n",
__func__, count);
goto out_unlock;
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available)
count++;
}
@@ -299,7 +377,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
* @freq: frequency to search for
* @available: true/false - match for available opp
*
- * Return: Searches for exact match in the opp list and returns pointer to the
+ * Return: Searches for exact match in the opp table and returns pointer to the
* matching opp if found, else returns ERR_PTR in case of error and should
* be handled using IS_ERR. Error return values can be:
* EINVAL: for bad pointer
@@ -323,19 +401,20 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int r = PTR_ERR(opp_table);
+
+ dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
return ERR_PTR(r);
}
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available == available &&
temp_opp->rate == freq) {
opp = temp_opp;
@@ -371,7 +450,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -381,11 +460,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
@@ -421,7 +500,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert();
@@ -431,11 +510,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EINVAL);
}
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp);
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table);
- list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available) {
/* go to the next node, before choosing prev */
if (temp_opp->rate > *freq)
@@ -451,130 +530,343 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
-/* List-dev Helpers */
-static void _kfree_list_dev_rcu(struct rcu_head *head)
+/*
+ * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct clk *clk;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ clk = ERR_CAST(opp_table);
+ goto unlock;
+ }
+
+ clk = opp_table->clk;
+ if (IS_ERR(clk))
+ dev_err(dev, "%s: No clock available for the device\n",
+ __func__);
+
+unlock:
+ rcu_read_unlock();
+ return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ int ret;
+
+ /* Regulator not available for device */
+ if (IS_ERR(reg)) {
+ dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+ PTR_ERR(reg));
+ return 0;
+ }
+
+ dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+ u_volt, u_volt_max);
+
+ ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+ u_volt_max);
+ if (ret)
+ dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+ __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+ return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev: device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *old_opp, *opp;
+ struct regulator *reg;
+ struct clk *clk;
+ unsigned long freq, old_freq;
+ unsigned long u_volt, u_volt_min, u_volt_max;
+ unsigned long ou_volt, ou_volt_min, ou_volt_max;
+ int ret;
+
+ if (unlikely(!target_freq)) {
+ dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+ target_freq);
+ return -EINVAL;
+ }
+
+ clk = _get_opp_clk(dev);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ freq = clk_round_rate(clk, target_freq);
+ if ((long)freq <= 0)
+ freq = target_freq;
+
+ old_freq = clk_get_rate(clk);
+
+ /* Return early if nothing to do */
+ if (old_freq == freq) {
+ dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+ __func__, freq);
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+ rcu_read_unlock();
+ return PTR_ERR(opp_table);
+ }
+
+ old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+ if (!IS_ERR(old_opp)) {
+ ou_volt = old_opp->u_volt;
+ ou_volt_min = old_opp->u_volt_min;
+ ou_volt_max = old_opp->u_volt_max;
+ } else {
+ dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+ __func__, old_freq, PTR_ERR(old_opp));
+ }
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+ __func__, freq, ret);
+ rcu_read_unlock();
+ return ret;
+ }
+
+ u_volt = opp->u_volt;
+ u_volt_min = opp->u_volt_min;
+ u_volt_max = opp->u_volt_max;
+
+ reg = opp_table->regulator;
+
+ rcu_read_unlock();
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+ u_volt_max);
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ if (clk_set_rate(clk, old_freq))
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (!IS_ERR(old_opp))
+ _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
+/* OPP-dev Helpers */
+static void _kfree_opp_dev_rcu(struct rcu_head *head)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
- list_dev = container_of(head, struct device_list_opp, rcu_head);
- kfree_rcu(list_dev, rcu_head);
+ opp_dev = container_of(head, struct opp_device, rcu_head);
+ kfree_rcu(opp_dev, rcu_head);
}
-static void _remove_list_dev(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static void _remove_opp_dev(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- opp_debug_unregister(list_dev, dev_opp);
- list_del(&list_dev->node);
- call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
- _kfree_list_dev_rcu);
+ opp_debug_unregister(opp_dev, opp_table);
+ list_del(&opp_dev->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
+ _kfree_opp_dev_rcu);
}
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+struct opp_device *_add_opp_dev(const struct device *dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
int ret;
- list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
- if (!list_dev)
+ opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
+ if (!opp_dev)
return NULL;
- /* Initialize list-dev */
- list_dev->dev = dev;
- list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+ /* Initialize opp-dev */
+ opp_dev->dev = dev;
+ list_add_rcu(&opp_dev->node, &opp_table->dev_list);
- /* Create debugfs entries for the dev_opp */
- ret = opp_debug_register(list_dev, dev_opp);
+ /* Create debugfs entries for the opp_table */
+ ret = opp_debug_register(opp_dev, opp_table);
if (ret)
dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
__func__, ret);
- return list_dev;
+ return opp_dev;
}
/**
- * _add_device_opp() - Find device OPP table or allocate a new one
+ * _add_opp_table() - Find OPP table or allocate a new one
* @dev: device for which we do this operation
*
* It tries to find an existing table first, if it couldn't find one, it
* allocates a new OPP table and returns that.
*
- * Return: valid device_opp pointer if success, else NULL.
+ * Return: valid opp_table pointer if success, else NULL.
*/
-static struct device_opp *_add_device_opp(struct device *dev)
+static struct opp_table *_add_opp_table(struct device *dev)
{
- struct device_opp *dev_opp;
- struct device_list_opp *list_dev;
+ struct opp_table *opp_table;
+ struct opp_device *opp_dev;
+ struct device_node *np;
+ int ret;
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (!IS_ERR(dev_opp))
- return dev_opp;
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (!IS_ERR(opp_table))
+ return opp_table;
/*
- * Allocate a new device OPP table. In the infrequent case where a new
+ * Allocate a new OPP table. In the infrequent case where a new
* device is needed to be added, we pay this penalty.
*/
- dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
- if (!dev_opp)
+ opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
+ if (!opp_table)
return NULL;
- INIT_LIST_HEAD(&dev_opp->dev_list);
+ INIT_LIST_HEAD(&opp_table->dev_list);
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- kfree(dev_opp);
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ kfree(opp_table);
return NULL;
}
- srcu_init_notifier_head(&dev_opp->srcu_head);
- INIT_LIST_HEAD(&dev_opp->opp_list);
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+
+ /* Set regulator to a non-NULL error value */
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Find clk for the device */
+ opp_table->clk = clk_get(dev, NULL);
+ if (IS_ERR(opp_table->clk)) {
+ ret = PTR_ERR(opp_table->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+ ret);
+ }
- /* Secure the device list modification */
- list_add_rcu(&dev_opp->node, &dev_opp_list);
- return dev_opp;
+ srcu_init_notifier_head(&opp_table->srcu_head);
+ INIT_LIST_HEAD(&opp_table->opp_list);
+
+ /* Secure the device table modification */
+ list_add_rcu(&opp_table->node, &opp_tables);
+ return opp_table;
}
/**
- * _kfree_device_rcu() - Free device_opp RCU handler
+ * _kfree_device_rcu() - Free opp_table RCU handler
* @head: RCU head
*/
static void _kfree_device_rcu(struct rcu_head *head)
{
- struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
+ struct opp_table *opp_table = container_of(head, struct opp_table,
+ rcu_head);
- kfree_rcu(device_opp, rcu_head);
+ kfree_rcu(opp_table, rcu_head);
}
/**
- * _remove_device_opp() - Removes a device OPP table
- * @dev_opp: device OPP table to be removed.
+ * _remove_opp_table() - Removes a OPP table
+ * @opp_table: OPP table to be removed.
*
- * Removes/frees device OPP table it it doesn't contain any OPPs.
+ * Removes/frees OPP table if it doesn't contain any OPPs.
*/
-static void _remove_device_opp(struct device_opp *dev_opp)
+static void _remove_opp_table(struct opp_table *opp_table)
{
- struct device_list_opp *list_dev;
+ struct opp_device *opp_dev;
+
+ if (!list_empty(&opp_table->opp_list))
+ return;
- if (!list_empty(&dev_opp->opp_list))
+ if (opp_table->supported_hw)
return;
- if (dev_opp->supported_hw)
+ if (opp_table->prop_name)
return;
- if (dev_opp->prop_name)
+ if (!IS_ERR(opp_table->regulator))
return;
- list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
- node);
+ /* Release clk */
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
- _remove_list_dev(list_dev, dev_opp);
+ opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
+ node);
+
+ _remove_opp_dev(opp_dev, opp_table);
/* dev_list must be empty now */
- WARN_ON(!list_empty(&dev_opp->dev_list));
+ WARN_ON(!list_empty(&opp_table->dev_list));
- list_del_rcu(&dev_opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+ list_del_rcu(&opp_table->node);
+ call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
_kfree_device_rcu);
}
@@ -591,17 +883,17 @@ static void _kfree_opp_rcu(struct rcu_head *head)
/**
* _opp_remove() - Remove an OPP from a table definition
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @opp: pointer to the OPP to remove
* @notify: OPP_EVENT_REMOVE notification should be sent or not
*
- * This function removes an opp definition from the opp list.
+ * This function removes an opp definition from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct device_opp *dev_opp,
+static void _opp_remove(struct opp_table *opp_table,
struct dev_pm_opp *opp, bool notify)
{
/*
@@ -609,22 +901,23 @@ static void _opp_remove(struct device_opp *dev_opp,
* frequency/voltage list.
*/
if (notify)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_REMOVE, opp);
opp_debug_remove_one(opp);
list_del_rcu(&opp->node);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
}
/**
- * dev_pm_opp_remove() - Remove an OPP from OPP list
+ * dev_pm_opp_remove() - Remove an OPP from OPP table
* @dev: device for which we do this operation
* @freq: OPP to remove with matching 'freq'
*
- * This function removes an opp from the opp list.
+ * This function removes an opp from the opp table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -633,17 +926,17 @@ static void _opp_remove(struct device_opp *dev_opp,
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
bool found = false;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp))
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table))
goto unlock;
- list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
if (opp->rate == freq) {
found = true;
break;
@@ -656,14 +949,14 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
goto unlock;
}
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct device_opp **dev_opp)
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -674,8 +967,8 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
INIT_LIST_HEAD(&opp->node);
- *dev_opp = _add_device_opp(dev);
- if (!*dev_opp) {
+ *opp_table = _add_opp_table(dev);
+ if (!*opp_table) {
kfree(opp);
return NULL;
}
@@ -683,22 +976,38 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
return opp;
}
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+ struct opp_table *opp_table)
+{
+ struct regulator *reg = opp_table->regulator;
+
+ if (!IS_ERR(reg) &&
+ !regulator_is_supported_voltage(reg, opp->u_volt_min,
+ opp->u_volt_max)) {
+ pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+ __func__, opp->u_volt_min, opp->u_volt_max);
+ return false;
+ }
+
+ return true;
+}
+
static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
- struct list_head *head = &dev_opp->opp_list;
+ struct list_head *head = &opp_table->opp_list;
int ret;
/*
* Insert new OPP in order of increasing frequency and discard if
* already present.
*
- * Need to use &dev_opp->opp_list in the condition part of the 'for'
+ * Need to use &opp_table->opp_list in the condition part of the 'for'
* loop, don't replace it with head otherwise it will become an infinite
* loop.
*/
- list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+ list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
if (new_opp->rate > opp->rate) {
head = &opp->node;
continue;
@@ -716,14 +1025,20 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
0 : -EEXIST;
}
- new_opp->dev_opp = dev_opp;
+ new_opp->opp_table = opp_table;
list_add_rcu(&new_opp->node, head);
- ret = opp_debug_create_one(new_opp, dev_opp);
+ ret = opp_debug_create_one(new_opp, opp_table);
if (ret)
dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
__func__, ret);
+ if (!_opp_supported_by_regulators(new_opp, opp_table)) {
+ new_opp->available = false;
+ dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+ __func__, new_opp->rate);
+ }
+
return 0;
}
@@ -734,14 +1049,14 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* @u_volt: Voltage in uVolts for this OPP
* @dynamic: Dynamically added OPPs.
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
* NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
* and freed by dev_pm_opp_of_remove_table.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -757,14 +1072,15 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
bool dynamic)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
+ unsigned long tol;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -772,33 +1088,36 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
/* populate the opp table */
new_opp->rate = freq;
+ tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
new_opp->u_volt = u_volt;
+ new_opp->u_volt_min = u_volt - tol;
+ new_opp->u_volt_max = u_volt + tol;
new_opp->available = true;
new_opp->dynamic = dynamic;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
/*
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
/* TODO: Support multiple regulators */
static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{
u32 microvolt[3] = {0};
u32 val;
@@ -807,9 +1126,9 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
char name[NAME_MAX];
/* Search for "opp-microvolt-<name>" */
- if (dev_opp->prop_name) {
+ if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
- dev_opp->prop_name);
+ opp_table->prop_name);
prop = of_find_property(opp->np, name, NULL);
}
@@ -844,14 +1163,20 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
}
opp->u_volt = microvolt[0];
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
+
+ if (count == 1) {
+ opp->u_volt_min = opp->u_volt;
+ opp->u_volt_max = opp->u_volt;
+ } else {
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+ }
/* Search for "opp-microamp-<name>" */
prop = NULL;
- if (dev_opp->prop_name) {
+ if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microamp-%s",
- dev_opp->prop_name);
+ opp_table->prop_name);
prop = of_find_property(opp->np, name, NULL);
}
@@ -878,7 +1203,7 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
* OPPs, which are available for those versions, based on its 'opp-supported-hw'
* property.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -887,44 +1212,44 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
unsigned int count)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
ret = -ENOMEM;
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- /* Do we already have a version hierarchy associated with dev_opp? */
- if (dev_opp->supported_hw) {
+ /* Do we already have a version hierarchy associated with opp_table? */
+ if (opp_table->supported_hw) {
dev_err(dev, "%s: Already have supported hardware list\n",
__func__);
ret = -EBUSY;
goto err;
}
- dev_opp->supported_hw = kmemdup(versions, count * sizeof(*versions),
+ opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
GFP_KERNEL);
- if (!dev_opp->supported_hw) {
+ if (!opp_table->supported_hw) {
ret = -ENOMEM;
goto err;
}
- dev_opp->supported_hw_count = count;
- mutex_unlock(&dev_opp_list_lock);
+ opp_table->supported_hw_count = count;
+ mutex_unlock(&opp_table_lock);
return 0;
err:
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -932,13 +1257,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
/**
* dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
- * @dev: Device for which supported-hw has to be set.
+ * @dev: Device for which supported-hw has to be put.
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_supported_hw(). Until this is called, the device_opp structure
+ * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
* will not be freed.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -946,42 +1271,43 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
*/
void dev_pm_opp_put_supported_hw(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- if (!dev_opp->supported_hw) {
+ if (!opp_table->supported_hw) {
dev_err(dev, "%s: Doesn't have supported hardware list\n",
__func__);
goto unlock;
}
- kfree(dev_opp->supported_hw);
- dev_opp->supported_hw = NULL;
- dev_opp->supported_hw_count = 0;
+ kfree(opp_table->supported_hw);
+ opp_table->supported_hw = NULL;
+ opp_table->supported_hw_count = 0;
- /* Try freeing device_opp if this was the last blocking resource */
- _remove_device_opp(dev_opp);
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
/**
* dev_pm_opp_set_prop_name() - Set prop-extn name
- * @dev: Device for which the regulator has to be set.
+ * @dev: Device for which the prop-name has to be set.
* @name: name to postfix to properties.
*
* This is required only for the V2 bindings, and it enables a platform to
@@ -989,7 +1315,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
* which the extension will apply are opp-microvolt and opp-microamp. OPP core
* should postfix the property name with -<name> while looking for them.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -997,42 +1323,42 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
*/
int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- dev_opp = _add_device_opp(dev);
- if (!dev_opp) {
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
ret = -ENOMEM;
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- /* Do we already have a prop-name associated with dev_opp? */
- if (dev_opp->prop_name) {
+ /* Do we already have a prop-name associated with opp_table? */
+ if (opp_table->prop_name) {
dev_err(dev, "%s: Already have prop-name %s\n", __func__,
- dev_opp->prop_name);
+ opp_table->prop_name);
ret = -EBUSY;
goto err;
}
- dev_opp->prop_name = kstrdup(name, GFP_KERNEL);
- if (!dev_opp->prop_name) {
+ opp_table->prop_name = kstrdup(name, GFP_KERNEL);
+ if (!opp_table->prop_name) {
ret = -ENOMEM;
goto err;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
err:
- _remove_device_opp(dev_opp);
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -1040,13 +1366,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
/**
* dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
- * @dev: Device for which the regulator has to be set.
+ * @dev: Device for which the prop-name has to be put.
*
* This is required only for the V2 bindings, and is called for a matching
- * dev_pm_opp_set_prop_name(). Until this is called, the device_opp structure
+ * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
* will not be freed.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1054,45 +1380,154 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
*/
void dev_pm_opp_put_prop_name(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' first */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
goto unlock;
}
- /* Make sure there are no concurrent readers while updating dev_opp */
- WARN_ON(!list_empty(&dev_opp->opp_list));
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
- if (!dev_opp->prop_name) {
+ if (!opp_table->prop_name) {
dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
goto unlock;
}
- kfree(dev_opp->prop_name);
- dev_opp->prop_name = NULL;
+ kfree(opp_table->prop_name);
+ opp_table->prop_name = NULL;
- /* Try freeing device_opp if this was the last blocking resource */
- _remove_device_opp(dev_opp);
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
-static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+ int ret;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _add_opp_table(dev);
+ if (!opp_table) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ /* This should be called before OPPs are initialized */
+ if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* Already have a regulator set */
+ if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+ ret = -EBUSY;
+ goto err;
+ }
+ /* Allocate the regulator */
+ reg = regulator_get_optional(dev, name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "%s: no regulator (%s) found: %d\n",
+ __func__, name, ret);
+ goto err;
+ }
+
+ opp_table->regulator = reg;
+
+ mutex_unlock(&opp_table_lock);
+ return 0;
+
+err:
+ _remove_opp_table(opp_table);
+unlock:
+ mutex_unlock(&opp_table_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+ struct opp_table *opp_table;
+
+ mutex_lock(&opp_table_lock);
+
+ /* Check for existing table for 'dev' first */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ dev_err(dev, "Failed to find opp_table: %ld\n",
+ PTR_ERR(opp_table));
+ goto unlock;
+ }
+
+ if (IS_ERR(opp_table->regulator)) {
+ dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+ goto unlock;
+ }
+
+ /* Make sure there are no concurrent readers while updating opp_table */
+ WARN_ON(!list_empty(&opp_table->opp_list));
+
+ regulator_put(opp_table->regulator);
+ opp_table->regulator = ERR_PTR(-ENXIO);
+
+ /* Try freeing opp_table if this was the last blocking resource */
+ _remove_opp_table(opp_table);
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
- unsigned int count = dev_opp->supported_hw_count;
+ unsigned int count = opp_table->supported_hw_count;
u32 version;
int ret;
- if (!dev_opp->supported_hw)
+ if (!opp_table->supported_hw)
return true;
while (count--) {
@@ -1105,7 +1540,7 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
}
/* Both of these are bitwise masks of the versions */
- if (!(version & dev_opp->supported_hw[count]))
+ if (!(version & opp_table->supported_hw[count]))
return false;
}
@@ -1117,11 +1552,11 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
* @dev: device for which we do this operation
* @np: device node
*
- * This function adds an opp definition to the opp list and returns status. The
+ * This function adds an opp definition to the opp table and returns status. The
* opp can be controlled using dev_pm_opp_enable/disable functions and may be
* removed by dev_pm_opp_remove.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1137,16 +1572,16 @@ static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
*/
static int _opp_add_static_v2(struct device *dev, struct device_node *np)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
u64 rate;
u32 val;
int ret;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- new_opp = _allocate_opp(dev, &dev_opp);
+ new_opp = _allocate_opp(dev, &opp_table);
if (!new_opp) {
ret = -ENOMEM;
goto unlock;
@@ -1159,7 +1594,7 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
}
/* Check if the OPP supports hardware's hierarchy of versions or not */
- if (!_opp_is_supported(dev, dev_opp, np)) {
+ if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
goto free_opp;
}
@@ -1179,30 +1614,30 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- ret = opp_parse_supplies(new_opp, dev, dev_opp);
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
goto free_opp;
- ret = _opp_add(dev, new_opp, dev_opp);
+ ret = _opp_add(dev, new_opp, opp_table);
if (ret)
goto free_opp;
/* OPP to select on device suspend */
if (of_property_read_bool(np, "opp-suspend")) {
- if (dev_opp->suspend_opp) {
+ if (opp_table->suspend_opp) {
dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, dev_opp->suspend_opp->rate,
+ __func__, opp_table->suspend_opp->rate,
new_opp->rate);
} else {
new_opp->suspend = true;
- dev_opp->suspend_opp = new_opp;
+ opp_table->suspend_opp = new_opp;
}
}
- if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
- dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
__func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
@@ -1213,13 +1648,13 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
* Notify the changes in the availability of the operable
* frequency/voltage list.
*/
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
return 0;
free_opp:
- _opp_remove(dev_opp, new_opp, false);
+ _opp_remove(opp_table, new_opp, false);
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
@@ -1229,11 +1664,11 @@ unlock:
* @freq: Frequency in Hz for this OPP
* @u_volt: Voltage in uVolts for this OPP
*
- * This function adds an opp definition to the opp list and returns status.
+ * This function adds an opp definition to the opp table and returns status.
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1265,7 +1700,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* copy operation, returns 0 if no modification was done OR modification was
* successful.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks to
* keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1274,7 +1709,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
int r = 0;
@@ -1283,18 +1718,18 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
if (!new_opp)
return -ENOMEM;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- /* Find the device_opp */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- r = PTR_ERR(dev_opp);
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
goto unlock;
}
/* Do we have the frequency? */
- list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
if (tmp_opp->rate == freq) {
opp = tmp_opp;
break;
@@ -1315,21 +1750,21 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
new_opp->available = availability_req;
list_replace_rcu(&opp->node, &new_opp->node);
- mutex_unlock(&dev_opp_list_lock);
- call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+ mutex_unlock(&opp_table_lock);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
/* Notify the change of the OPP availability */
if (availability_req)
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_ENABLE, new_opp);
else
- srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
- new_opp);
+ srcu_notifier_call_chain(&opp_table->srcu_head,
+ OPP_EVENT_DISABLE, new_opp);
return 0;
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
kfree(new_opp);
return r;
}
@@ -1343,7 +1778,7 @@ unlock:
* corresponding error value. It is meant to be used for users an OPP available
* after being temporarily made unavailable with dev_pm_opp_disable.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1369,7 +1804,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* control by users to make this OPP not available until the circumstances are
* right to make it available again (with a call to dev_pm_opp_enable).
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU and mutex locks to keep the
* integrity of the internal data structures. Callers should ensure that
* this function is *NOT* called under RCU protection or in contexts where
@@ -1387,26 +1822,26 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Return: pointer to notifier head if found, otherwise -ENODEV or
* -EINVAL based on type of error casted as pointer. value must be checked
* with IS_ERR to determine valid pointer or error result.
*
- * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
- * protected pointer. The reason for the same is that the opp pointer which is
- * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * Locking: This function must be called under rcu_read_lock(). opp_table is a
+ * RCU protected pointer. The reason for the same is that the opp pointer which
+ * is returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
{
- struct device_opp *dev_opp = _find_device_opp(dev);
+ struct opp_table *opp_table = _find_opp_table(dev);
- if (IS_ERR(dev_opp))
- return ERR_CAST(dev_opp); /* matching type */
+ if (IS_ERR(opp_table))
+ return ERR_CAST(opp_table); /* matching type */
- return &dev_opp->srcu_head;
+ return &opp_table->srcu_head;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
@@ -1414,11 +1849,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
/**
* dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
* entries
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Free OPPs created using static entries present in DT.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
@@ -1426,38 +1861,38 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
*/
void dev_pm_opp_of_remove_table(struct device *dev)
{
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
- /* Hold our list modification lock here */
- mutex_lock(&dev_opp_list_lock);
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
- /* Check for existing list for 'dev' */
- dev_opp = _find_device_opp(dev);
- if (IS_ERR(dev_opp)) {
- int error = PTR_ERR(dev_opp);
+ /* Check for existing table for 'dev' */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ int error = PTR_ERR(opp_table);
if (error != -ENODEV)
- WARN(1, "%s: dev_opp: %d\n",
+ WARN(1, "%s: opp_table: %d\n",
IS_ERR_OR_NULL(dev) ?
"Invalid device" : dev_name(dev),
error);
goto unlock;
}
- /* Find if dev_opp manages a single device */
- if (list_is_singular(&dev_opp->dev_list)) {
+ /* Find if opp_table manages a single device */
+ if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
- list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+ list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
if (!opp->dynamic)
- _opp_remove(dev_opp, opp, true);
+ _opp_remove(opp_table, opp, true);
}
} else {
- _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+ _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
@@ -1478,22 +1913,22 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
struct device_node *np;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
int ret = 0, count = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _managed_opp(opp_np);
- if (dev_opp) {
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
/* OPPs are already managed */
- if (!_add_list_dev(dev, dev_opp))
+ if (!_add_opp_dev(dev, opp_table))
ret = -ENOMEM;
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
- /* We have opp-list node now, iterate over it and add OPPs */
+ /* We have opp-table node now, iterate over it and add OPPs */
for_each_available_child_of_node(opp_np, np) {
count++;
@@ -1509,19 +1944,19 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
if (WARN_ON(!count))
return -ENOENT;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(dev);
- if (WARN_ON(IS_ERR(dev_opp))) {
- ret = PTR_ERR(dev_opp);
- mutex_unlock(&dev_opp_list_lock);
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
goto free_table;
}
- dev_opp->np = opp_np;
- dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+ opp_table->np = opp_np;
+ opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return 0;
@@ -1550,7 +1985,7 @@ static int _of_add_opp_table_v1(struct device *dev)
*/
nr = prop->length / sizeof(u32);
if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
return -EINVAL;
}
@@ -1570,11 +2005,11 @@ static int _of_add_opp_table_v1(struct device *dev)
/**
* dev_pm_opp_of_add_table() - Initialize opp table from device tree
- * @dev: device pointer used to lookup device OPPs.
+ * @dev: device pointer used to lookup OPP table.
*
* Register the initial OPP table with the OPP library for given device.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 9f0c15570f64..ba2bdbd932ef 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -31,7 +31,7 @@
* @table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
- * opp list is already initialized and ready for usage.
+ * opp table is already initialized and ready for usage.
*
* This function allocates required memory for the cpufreq table. It is
* expected that the caller does the required maintenance such as freeing
@@ -44,7 +44,7 @@
* WARNING: It is important for the callers to ensure refreshing their copy of
* the table if any of the mentioned functions have been invoked in the interim.
*
- * Locking: The internal device_opp and opp structures are RCU protected.
+ * Locking: The internal opp_table and opp structures are RCU protected.
* Since we just use the regular accessor functions to access the internal data
* structures, we use RCU read lock inside this function. As a result, users of
* this function DONOT need to use explicit locks for invoking.
@@ -122,15 +122,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
/* Required only for V1 bindings, as v2 can manage it from DT itself */
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
struct device *dev;
int cpu, ret = 0;
- mutex_lock(&dev_opp_list_lock);
+ mutex_lock(&opp_table_lock);
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
ret = -EINVAL;
goto unlock;
}
@@ -146,15 +146,15 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
continue;
}
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
__func__, cpu);
continue;
}
}
unlock:
- mutex_unlock(&dev_opp_list_lock);
+ mutex_unlock(&opp_table_lock);
return ret;
}
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ddfe4773e922..ef1ae6b52042 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -34,9 +34,9 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
debugfs_remove_recursive(opp->dentry);
}
-int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp)
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
{
- struct dentry *pdentry = dev_opp->dentry;
+ struct dentry *pdentry = opp_table->dentry;
struct dentry *d;
char name[25]; /* 20 chars for 64 bit value + 5 (opp:\0) */
@@ -83,52 +83,52 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp)
return 0;
}
-static int device_opp_debug_create_dir(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static int opp_list_debug_create_dir(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = list_dev->dev;
+ const struct device *dev = opp_dev->dev;
struct dentry *d;
- opp_set_dev_name(dev, dev_opp->dentry_name);
+ opp_set_dev_name(dev, opp_table->dentry_name);
/* Create device specific directory */
- d = debugfs_create_dir(dev_opp->dentry_name, rootdir);
+ d = debugfs_create_dir(opp_table->dentry_name, rootdir);
if (!d) {
dev_err(dev, "%s: Failed to create debugfs dir\n", __func__);
return -ENOMEM;
}
- list_dev->dentry = d;
- dev_opp->dentry = d;
+ opp_dev->dentry = d;
+ opp_table->dentry = d;
return 0;
}
-static int device_opp_debug_create_link(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static int opp_list_debug_create_link(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- const struct device *dev = list_dev->dev;
+ const struct device *dev = opp_dev->dev;
char name[NAME_MAX];
struct dentry *d;
- opp_set_dev_name(list_dev->dev, name);
+ opp_set_dev_name(opp_dev->dev, name);
/* Create device specific directory link */
- d = debugfs_create_symlink(name, rootdir, dev_opp->dentry_name);
+ d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name);
if (!d) {
dev_err(dev, "%s: Failed to create link\n", __func__);
return -ENOMEM;
}
- list_dev->dentry = d;
+ opp_dev->dentry = d;
return 0;
}
/**
* opp_debug_register - add a device opp node to the debugfs 'opp' directory
- * @list_dev: list-dev pointer for device
- * @dev_opp: the device-opp being added
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being added
*
* Dynamically adds device specific directory in debugfs 'opp' directory. If the
* device-opp is shared with other devices, then links will be created for all
@@ -136,73 +136,72 @@ static int device_opp_debug_create_link(struct device_list_opp *list_dev,
*
* Return: 0 on success, otherwise negative error.
*/
-int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table)
{
if (!rootdir) {
pr_debug("%s: Uninitialized rootdir\n", __func__);
return -EINVAL;
}
- if (dev_opp->dentry)
- return device_opp_debug_create_link(list_dev, dev_opp);
+ if (opp_table->dentry)
+ return opp_list_debug_create_link(opp_dev, opp_table);
- return device_opp_debug_create_dir(list_dev, dev_opp);
+ return opp_list_debug_create_dir(opp_dev, opp_table);
}
-static void opp_migrate_dentry(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static void opp_migrate_dentry(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- struct device_list_opp *new_dev;
+ struct opp_device *new_dev;
const struct device *dev;
struct dentry *dentry;
- /* Look for next list-dev */
- list_for_each_entry(new_dev, &dev_opp->dev_list, node)
- if (new_dev != list_dev)
+ /* Look for next opp-dev */
+ list_for_each_entry(new_dev, &opp_table->dev_list, node)
+ if (new_dev != opp_dev)
break;
/* new_dev is guaranteed to be valid here */
dev = new_dev->dev;
debugfs_remove_recursive(new_dev->dentry);
- opp_set_dev_name(dev, dev_opp->dentry_name);
+ opp_set_dev_name(dev, opp_table->dentry_name);
- dentry = debugfs_rename(rootdir, list_dev->dentry, rootdir,
- dev_opp->dentry_name);
+ dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ opp_table->dentry_name);
if (!dentry) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
- __func__, dev_name(list_dev->dev), dev_name(dev));
+ __func__, dev_name(opp_dev->dev), dev_name(dev));
return;
}
new_dev->dentry = dentry;
- dev_opp->dentry = dentry;
+ opp_table->dentry = dentry;
}
/**
* opp_debug_unregister - remove a device opp node from debugfs opp directory
- * @list_dev: list-dev pointer for device
- * @dev_opp: the device-opp being removed
+ * @opp_dev: opp-dev pointer for device
+ * @opp_table: the device-opp being removed
*
* Dynamically removes device specific directory from debugfs 'opp' directory.
*/
-void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{
- if (list_dev->dentry == dev_opp->dentry) {
+ if (opp_dev->dentry == opp_table->dentry) {
/* Move the real dentry object under another device */
- if (!list_is_singular(&dev_opp->dev_list)) {
- opp_migrate_dentry(list_dev, dev_opp);
+ if (!list_is_singular(&opp_table->dev_list)) {
+ opp_migrate_dentry(opp_dev, opp_table);
goto out;
}
- dev_opp->dentry = NULL;
+ opp_table->dentry = NULL;
}
- debugfs_remove_recursive(list_dev->dentry);
+ debugfs_remove_recursive(opp_dev->dentry);
out:
- list_dev->dentry = NULL;
+ opp_dev->dentry = NULL;
}
static int __init opp_debug_init(void)
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index 690638ef36ee..f67f806fcf3a 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -22,13 +22,16 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+struct clk;
+struct regulator;
+
/* Lock to allow exclusive modification to the device and opp lists */
-extern struct mutex dev_opp_list_lock;
+extern struct mutex opp_table_lock;
/*
* Internal data structure organization with the OPP layer library is as
* follows:
- * dev_opp_list (root)
+ * opp_tables (root)
* |- device 1 (represents voltage domain 1)
* | |- opp 1 (availability, freq, voltage)
* | |- opp 2 ..
@@ -37,18 +40,18 @@ extern struct mutex dev_opp_list_lock;
* |- device 2 (represents the next voltage domain)
* ...
* `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
+ * device 1, 2.. are represented by opp_table structure while each opp
* is represented by the opp structure.
*/
/**
* struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
+ * @node: opp table node. The nodes are maintained throughout the lifetime
* of boot. It is expected only an optimal set of OPPs are
* added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
+ * RCU usage: opp table is traversed with RCU locks. node
* modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
+ * are protected by the opp_table_lock for integrity.
* IMPORTANT: the opp nodes should be maintained in increasing
* order.
* @available: true/false - marks if this OPP as available or not
@@ -62,7 +65,7 @@ extern struct mutex dev_opp_list_lock;
* @u_amp: Maximum current drawn by the device in microamperes
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @opp_table: points back to the opp_table struct this opp belongs to
* @rcu_head: RCU callback head used for deferred freeing
* @np: OPP's device node.
* @dentry: debugfs dentry pointer (per opp)
@@ -84,7 +87,7 @@ struct dev_pm_opp {
unsigned long u_amp;
unsigned long clock_latency_ns;
- struct device_opp *dev_opp;
+ struct opp_table *opp_table;
struct rcu_head rcu_head;
struct device_node *np;
@@ -95,16 +98,16 @@ struct dev_pm_opp {
};
/**
- * struct device_list_opp - devices managed by 'struct device_opp'
+ * struct opp_device - devices managed by 'struct opp_table'
* @node: list node
* @dev: device to which the struct object belongs
* @rcu_head: RCU callback head used for deferred freeing
* @dentry: debugfs dentry pointer (per device)
*
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
+ * This is an internal data structure maintaining the devices that are managed
+ * by 'struct opp_table'.
*/
-struct device_list_opp {
+struct opp_device {
struct list_head node;
const struct device *dev;
struct rcu_head rcu_head;
@@ -115,16 +118,16 @@ struct device_list_opp {
};
/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
+ * struct opp_table - Device opp structure
+ * @node: table node - contains the devices with OPPs that
* have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
+ * table.
+ * RCU usage: nodes are not modified in the table of opp_table,
+ * however addition is possible and is secured by opp_table_lock
* @srcu_head: notifier head to notify the OPP availability changes.
* @rcu_head: RCU callback head used for deferred freeing
* @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
+ * @opp_list: table of opps
* @np: struct device_node pointer for opp's DT node.
* @clock_latency_ns_max: Max clock latency in nanoseconds.
* @shared_opp: OPP is shared between multiple devices.
@@ -132,9 +135,13 @@ struct device_list_opp {
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
* @dentry: debugfs dentry pointer of the real device directory (not links).
* @dentry_name: Name of the real dentry.
*
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
+ *
* This is an internal data structure maintaining the link to opps attached to
* a device. This structure is not meant to be shared to users as it is
* meant for book keeping and private to OPP library.
@@ -143,7 +150,7 @@ struct device_list_opp {
* need to wait for the grace period of both of them before freeing any
* resources. And so we have used kfree_rcu() from within call_srcu() handlers.
*/
-struct device_opp {
+struct opp_table {
struct list_head node;
struct srcu_notifier_head srcu_head;
@@ -153,12 +160,18 @@ struct device_opp {
struct device_node *np;
unsigned long clock_latency_ns_max;
+
+ /* For backward compatibility with v1 bindings */
+ unsigned int voltage_tolerance_v1;
+
bool shared_opp;
struct dev_pm_opp *suspend_opp;
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
+ struct clk *clk;
+ struct regulator *regulator;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
@@ -167,30 +180,27 @@ struct device_opp {
};
/* Routines internal to opp core */
-struct device_opp *_find_device_opp(struct device *dev);
-struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp);
+struct opp_table *_find_opp_table(struct device *dev);
+struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
-int opp_debug_create_one(struct dev_pm_opp *opp, struct device_opp *dev_opp);
-int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp);
-void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp);
+int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table);
+int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table);
+void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table);
#else
static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {}
static inline int opp_debug_create_one(struct dev_pm_opp *opp,
- struct device_opp *dev_opp)
+ struct opp_table *opp_table)
{ return 0; }
-static inline int opp_debug_register(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static inline int opp_debug_register(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{ return 0; }
-static inline void opp_debug_unregister(struct device_list_opp *list_dev,
- struct device_opp *dev_opp)
+static inline void opp_debug_unregister(struct opp_device *opp_dev,
+ struct opp_table *opp_table)
{ }
#endif /* DEBUG_FS */
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index a311cfa4c5bd..a6975795e7f3 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
}
EXPORT_SYMBOL(generate_pm_trace);
-extern char __tracedata_start, __tracedata_end;
+extern char __tracedata_start[], __tracedata_end[];
static int show_file_hash(unsigned int value)
{
int match;
char *tracedata;
match = 0;
- for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
+ for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
tracedata += 2 + sizeof(unsigned long)) {
unsigned short lineno = *(unsigned short *)tracedata;
const char *file = *(const char **)(tracedata + 2);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a1e0b9ab847a..5fb7718f256c 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
return -EEXIST;
}
dev->power.wakeup = ws;
+ if (dev->power.wakeirq)
+ device_wakeup_attach_irq(dev, dev->power.wakeirq);
spin_unlock_irq(&dev->power.lock);
return 0;
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index a163f2c59aa3..9b1a65debd49 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -218,7 +218,8 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
bool ret;
ret = __fwnode_property_present(fwnode, propname);
- if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ if (ret == false && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_present(fwnode->secondary, propname);
return ret;
}
@@ -423,7 +424,8 @@ EXPORT_SYMBOL_GPL(device_property_match_string);
int _ret_; \
_ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
_val_, _nval_); \
- if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \
+ if (_ret_ == -EINVAL && !IS_ERR_OR_NULL(_fwnode_) && \
+ !IS_ERR_OR_NULL(_fwnode_->secondary)) \
_ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
_proptype_, _val_, _nval_); \
_ret_; \
@@ -593,7 +595,8 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
int ret;
ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
- if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string_array(fwnode->secondary,
propname, val, nval);
return ret;
@@ -621,7 +624,8 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
int ret;
ret = __fwnode_property_read_string(fwnode, propname, val);
- if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
+ if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
ret = __fwnode_property_read_string(fwnode->secondary,
propname, val);
return ret;
@@ -647,7 +651,7 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
- int nval, ret, i;
+ int nval, ret;
nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
if (nval < 0)
@@ -664,13 +668,9 @@ int fwnode_property_match_string(struct fwnode_handle *fwnode,
if (ret < 0)
goto out;
- ret = -ENODATA;
- for (i = 0; i < nval; i++) {
- if (!strcmp(values[i], string)) {
- ret = i;
- break;
- }
- }
+ ret = match_string(values, nval, string);
+ if (ret < 0)
+ ret = -ENODATA;
out:
kfree(values);
return ret;
@@ -820,11 +820,16 @@ void device_remove_property_set(struct device *dev)
* the pset. If there is no real firmware node (ACPI/DT) primary
* will hold the pset.
*/
- if (!is_pset_node(fwnode))
- fwnode = fwnode->secondary;
- if (!IS_ERR(fwnode) && is_pset_node(fwnode))
+ if (is_pset_node(fwnode)) {
+ set_primary_fwnode(dev, NULL);
pset_free_set(to_pset_node(fwnode));
- set_secondary_fwnode(dev, NULL);
+ } else {
+ fwnode = fwnode->secondary;
+ if (!IS_ERR(fwnode) && is_pset_node(fwnode)) {
+ set_secondary_fwnode(dev, NULL);
+ pset_free_set(to_pset_node(fwnode));
+ }
+ }
}
EXPORT_SYMBOL_GPL(device_remove_property_set);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 3df977054781..5c79526245c2 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -110,6 +110,7 @@ struct regmap {
/* number of bits to (left) shift the reg value when formatting*/
int reg_shift;
int reg_stride;
+ int reg_stride_order;
/* regcache specific members */
const struct regcache_ops *cache_ops;
@@ -263,4 +264,19 @@ static inline const char *regmap_name(const struct regmap *map)
return map->name;
}
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+ unsigned int index)
+{
+ if (map->reg_stride_order >= 0)
+ return index << map->reg_stride_order;
+ else
+ return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+ unsigned int reg)
+{
+ return reg >> map->reg_stride_order;
+}
+
#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 686c9e0b930e..3ee72550b1e3 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -16,20 +16,30 @@
#include "internal.h"
+static inline unsigned int regcache_flat_get_index(const struct regmap *map,
+ unsigned int reg)
+{
+ return regcache_get_index_by_order(map, reg);
+}
+
static int regcache_flat_init(struct regmap *map)
{
int i;
unsigned int *cache;
- map->cache = kcalloc(map->max_register + 1, sizeof(unsigned int),
- GFP_KERNEL);
+ if (!map || map->reg_stride_order < 0)
+ return -EINVAL;
+
+ map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+ + 1, sizeof(unsigned int), GFP_KERNEL);
if (!map->cache)
return -ENOMEM;
cache = map->cache;
for (i = 0; i < map->num_reg_defaults; i++)
- cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+ cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
+ map->reg_defaults[i].def;
return 0;
}
@@ -47,7 +57,7 @@ static int regcache_flat_read(struct regmap *map,
{
unsigned int *cache = map->cache;
- *value = cache[reg];
+ *value = cache[regcache_flat_get_index(map, reg)];
return 0;
}
@@ -57,7 +67,7 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
{
unsigned int *cache = map->cache;
- cache[reg] = value;
+ cache[regcache_flat_get_index(map, reg)] = value;
return 0;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 348be3a35410..4170b7d95276 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -30,7 +30,7 @@ static int regcache_hw_init(struct regmap *map)
int i, j;
int ret;
int count;
- unsigned int val;
+ unsigned int reg, val;
void *tmp_buf;
if (!map->num_reg_defaults_raw)
@@ -57,7 +57,7 @@ static int regcache_hw_init(struct regmap *map)
bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
- /* Bypass the cache access till data read from HW*/
+ /* Bypass the cache access till data read from HW */
map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
@@ -65,29 +65,48 @@ static int regcache_hw_init(struct regmap *map)
goto err_free;
}
ret = regmap_raw_read(map, 0, tmp_buf,
- map->num_reg_defaults_raw);
+ map->cache_size_raw);
map->cache_bypass = cache_bypass;
- if (ret < 0)
- goto err_cache_free;
-
- map->reg_defaults_raw = tmp_buf;
- map->cache_free = 1;
+ if (ret == 0) {
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = 1;
+ } else {
+ kfree(tmp_buf);
+ }
}
/* fill the reg_defaults */
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
- if (regmap_volatile(map, i * map->reg_stride))
+ reg = i * map->reg_stride;
+
+ if (!regmap_readable(map, reg))
continue;
- val = regcache_get_val(map, map->reg_defaults_raw, i);
- map->reg_defaults[j].reg = i * map->reg_stride;
+
+ if (regmap_volatile(map, reg))
+ continue;
+
+ if (map->reg_defaults_raw) {
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
+ } else {
+ bool cache_bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ ret = regmap_read(map, reg, &val);
+ map->cache_bypass = cache_bypass;
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read %d: %d\n",
+ reg, ret);
+ goto err_free;
+ }
+ }
+
+ map->reg_defaults[j].reg = reg;
map->reg_defaults[j].def = val;
j++;
}
return 0;
-err_cache_free:
- kfree(tmp_buf);
err_free:
kfree(map->reg_defaults);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 9b0d202414d0..26f799e71c82 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -379,6 +379,7 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, data);
irq_set_chip(virq, &data->irq_chip);
irq_set_nested_thread(virq, 1);
+ irq_set_parent(virq, data->irq);
irq_set_noprobe(virq);
return 0;
@@ -655,13 +656,34 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
*
* @irq: Primary IRQ for the device
* @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also dispose all mapped irq on chip.
*/
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
+ unsigned int virq;
+ int hwirq;
+
if (!d)
return;
free_irq(irq, d);
+
+ /* Dispose all virtual irq from irq domain before removing it */
+ for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+ /* Ignore hwirq if holes in the IRQ list */
+ if (!d->chip->irqs[hwirq].mask)
+ continue;
+
+ /*
+ * Find the virtual irq of hwirq on chip and if it is
+ * there then dispose it
+ */
+ virq = irq_find_mapping(d->domain, hwirq);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
irq_domain_remove(d->domain);
kfree(d->type_buf);
kfree(d->type_buf_def);
@@ -674,6 +696,88 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+static void devm_regmap_irq_chip_release(struct device *dev, void *res)
+{
+ struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
+
+ regmap_del_irq_chip(d->irq, d);
+}
+
+static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
+
+{
+ struct regmap_irq_chip_data **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regmap_add_irq_chip(): Resource manager regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The regmap_irq_chip data automatically be released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data **ptr, *d;
+ int ret;
+
+ ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
+ chip, &d);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = d;
+ devres_add(dev, ptr);
+ *data = d;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
+
+/**
+ * devm_regmap_del_irq_chip(): Resource managed regmap_del_irq_chip()
+ *
+ * @dev: Device for which which resource was allocated.
+ * @irq: Primary IRQ for the device
+ * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data)
+{
+ int rc;
+
+ WARN_ON(irq != data->irq);
+ rc = devres_release(dev, devm_regmap_irq_chip_release,
+ devm_regmap_irq_chip_match, data);
+
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
+
/**
* regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
*
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index eea51569f0eb..7526906ca080 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -25,26 +25,14 @@
struct regmap_mmio_context {
void __iomem *regs;
- unsigned reg_bytes;
unsigned val_bytes;
- unsigned pad_bytes;
struct clk *clk;
-};
-static inline void regmap_mmio_regsize_check(size_t reg_size)
-{
- switch (reg_size) {
- case 1:
- case 2:
- case 4:
-#ifdef CONFIG_64BIT
- case 8:
-#endif
- break;
- default:
- BUG();
- }
-}
+ void (*reg_write)(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val);
+ unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+ unsigned int reg);
+};
static int regmap_mmio_regbits_check(size_t reg_bits)
{
@@ -88,72 +76,62 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
return min_stride;
}
-static inline void regmap_mmio_count_check(size_t count, u32 offset)
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
{
- BUG_ON(count <= offset);
+ writew(val, ctx->regs + reg);
}
-static inline unsigned int
-regmap_mmio_get_offset(const void *reg, size_t reg_size)
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
{
- switch (reg_size) {
- case 1:
- return *(u8 *)reg;
- case 2:
- return *(u16 *)reg;
- case 4:
- return *(u32 *)reg;
+ iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ iowrite32be(val, ctx->regs + reg);
+}
+
#ifdef CONFIG_64BIT
- case 8:
- return *(u64 *)reg;
-#endif
- default:
- BUG();
- }
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeq(val, ctx->regs + reg);
}
+#endif
-static int regmap_mmio_gather_write(void *context,
- const void *reg, size_t reg_size,
- const void *val, size_t val_size)
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_mmio_context *ctx = context;
- unsigned int offset;
int ret;
- regmap_mmio_regsize_check(reg_size);
-
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = regmap_mmio_get_offset(reg, reg_size);
-
- while (val_size) {
- switch (ctx->val_bytes) {
- case 1:
- writeb(*(u8 *)val, ctx->regs + offset);
- break;
- case 2:
- writew(*(u16 *)val, ctx->regs + offset);
- break;
- case 4:
- writel(*(u32 *)val, ctx->regs + offset);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- writeq(*(u64 *)val, ctx->regs + offset);
- break;
-#endif
- default:
- /* Should be caught by regmap_mmio_check_config */
- BUG();
- }
- val_size -= ctx->val_bytes;
- val += ctx->val_bytes;
- offset += ctx->val_bytes;
- }
+ ctx->reg_write(ctx, reg, val);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
@@ -161,59 +139,56 @@ static int regmap_mmio_gather_write(void *context,
return 0;
}
-static int regmap_mmio_write(void *context, const void *data, size_t count)
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
{
- struct regmap_mmio_context *ctx = context;
- unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
+ return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl(ctx->regs + reg);
+}
- regmap_mmio_count_check(count, offset);
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32be(ctx->regs + reg);
+}
- return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
- data + offset, count - offset);
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readq(ctx->regs + reg);
}
+#endif
-static int regmap_mmio_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_mmio_context *ctx = context;
- unsigned int offset;
int ret;
- regmap_mmio_regsize_check(reg_size);
-
if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
}
- offset = regmap_mmio_get_offset(reg, reg_size);
-
- while (val_size) {
- switch (ctx->val_bytes) {
- case 1:
- *(u8 *)val = readb(ctx->regs + offset);
- break;
- case 2:
- *(u16 *)val = readw(ctx->regs + offset);
- break;
- case 4:
- *(u32 *)val = readl(ctx->regs + offset);
- break;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)val = readq(ctx->regs + offset);
- break;
-#endif
- default:
- /* Should be caught by regmap_mmio_check_config */
- BUG();
- }
- val_size -= ctx->val_bytes;
- val += ctx->val_bytes;
- offset += ctx->val_bytes;
- }
+ *val = ctx->reg_read(ctx, reg);
if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
@@ -232,14 +207,11 @@ static void regmap_mmio_free_context(void *context)
kfree(context);
}
-static struct regmap_bus regmap_mmio = {
+static const struct regmap_bus regmap_mmio = {
.fast_io = true,
- .write = regmap_mmio_write,
- .gather_write = regmap_mmio_gather_write,
- .read = regmap_mmio_read,
+ .reg_write = regmap_mmio_write,
+ .reg_read = regmap_mmio_read,
.free_context = regmap_mmio_free_context,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -265,24 +237,71 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
- switch (config->reg_format_endian) {
- case REGMAP_ENDIAN_DEFAULT:
- case REGMAP_ENDIAN_NATIVE:
- break;
- default:
- return ERR_PTR(-EINVAL);
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
- ctx->reg_bytes = config->reg_bits / 8;
- ctx->pad_bytes = config->pad_bits / 8;
ctx->clk = ERR_PTR(-ENODEV);
+ switch (config->reg_format_endian) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16le;
+ ctx->reg_write = regmap_mmio_write16le;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32le;
+ ctx->reg_write = regmap_mmio_write32le;
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ ctx->reg_read = regmap_mmio_read64le;
+ ctx->reg_write = regmap_mmio_write64le;
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ break;
+ case 16:
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ break;
+ case 32:
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+
if (clk_id == NULL)
return ctx;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index ee54e841de4a..df2d2ef5d6b3 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -19,6 +19,7 @@
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/log2.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -557,6 +558,8 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
endian = REGMAP_ENDIAN_BIG;
else if (of_property_read_bool(np, "little-endian"))
endian = REGMAP_ENDIAN_LITTLE;
+ else if (of_property_read_bool(np, "native-endian"))
+ endian = REGMAP_ENDIAN_NATIVE;
/* If the endianness was specified in DT, use that */
if (endian != REGMAP_ENDIAN_DEFAULT)
@@ -638,6 +641,10 @@ struct regmap *__regmap_init(struct device *dev,
map->reg_stride = config->reg_stride;
else
map->reg_stride = 1;
+ if (is_power_of_2(map->reg_stride))
+ map->reg_stride_order = ilog2(map->reg_stride);
+ else
+ map->reg_stride_order = -1;
map->use_single_read = config->use_single_rw || !bus || !bus->read;
map->use_single_write = config->use_single_rw || !bus || !bus->write;
map->can_multi_write = config->can_multi_write && bus && bus->write;
@@ -1308,7 +1315,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->writeable_reg)
for (i = 0; i < val_len / map->format.val_bytes; i++)
if (!map->writeable_reg(map->dev,
- reg + (i * map->reg_stride)))
+ reg + regmap_get_offset(map, i)))
return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
@@ -1316,7 +1323,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
int val_bytes = map->format.val_bytes;
for (i = 0; i < val_len / val_bytes; i++) {
ival = map->format.parse_val(val + (i * val_bytes));
- ret = regcache_write(map, reg + (i * map->reg_stride),
+ ret = regcache_write(map,
+ reg + regmap_get_offset(map, i),
ival);
if (ret) {
dev_err(map->dev,
@@ -1690,100 +1698,63 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
EXPORT_SYMBOL_GPL(regmap_raw_write);
/**
- * regmap_field_write(): Write a value to a single register field
- *
- * @field: Register field to write to
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_field_write(struct regmap_field *field, unsigned int val)
-{
- return regmap_update_bits(field->regmap, field->reg,
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_field_write);
-
-/**
- * regmap_field_update_bits(): Perform a read/modify/write cycle
- * on the register field
+ * regmap_field_update_bits_base():
+ * Perform a read/modify/write cycle on the register field
+ * with change, async, force option
*
* @field: Register field to write to
* @mask: Bitmask to change
* @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
mask = (mask << field->shift) & field->mask;
- return regmap_update_bits(field->regmap, field->reg,
- mask, val << field->shift);
+ return regmap_update_bits_base(field->regmap, field->reg,
+ mask, val << field->shift,
+ change, async, force);
}
-EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
/**
- * regmap_fields_write(): Write a value to a single register field with port ID
- *
- * @field: Register field to write to
- * @id: port ID
- * @val: Value to be written
- *
- * A value of zero will be returned on success, a negative errno will
- * be returned in error cases.
- */
-int regmap_fields_write(struct regmap_field *field, unsigned int id,
- unsigned int val)
-{
- if (id >= field->id_size)
- return -EINVAL;
-
- return regmap_update_bits(field->regmap,
- field->reg + (field->id_offset * id),
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_write);
-
-int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
- unsigned int val)
-{
- if (id >= field->id_size)
- return -EINVAL;
-
- return regmap_write_bits(field->regmap,
- field->reg + (field->id_offset * id),
- field->mask, val << field->shift);
-}
-EXPORT_SYMBOL_GPL(regmap_fields_force_write);
-
-/**
- * regmap_fields_update_bits(): Perform a read/modify/write cycle
- * on the register field
+ * regmap_fields_update_bits_base():
+ * Perform a read/modify/write cycle on the register field
+ * with change, async, force option
*
* @field: Register field to write to
* @id: port ID
* @mask: Bitmask to change
* @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
-int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
- unsigned int mask, unsigned int val)
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
if (id >= field->id_size)
return -EINVAL;
mask = (mask << field->shift) & field->mask;
- return regmap_update_bits(field->regmap,
- field->reg + (field->id_offset * id),
- mask, val << field->shift);
+ return regmap_update_bits_base(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift,
+ change, async, force);
}
-EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
/*
* regmap_bulk_write(): Write multiple registers to the device
@@ -1846,8 +1817,9 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
goto out;
}
- ret = _regmap_write(map, reg + (i * map->reg_stride),
- ival);
+ ret = _regmap_write(map,
+ reg + regmap_get_offset(map, i),
+ ival);
if (ret != 0)
goto out;
}
@@ -2253,6 +2225,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
WARN_ON(!map->bus);
+ if (!map->bus || !map->bus->read)
+ return -EINVAL;
+
range = _regmap_range_lookup(map, reg);
if (range) {
ret = _regmap_select_page(map, &reg, range,
@@ -2416,7 +2391,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
* cost as we expect to hit the cache.
*/
for (i = 0; i < val_count; i++) {
- ret = _regmap_read(map, reg + (i * map->reg_stride),
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i),
&v);
if (ret != 0)
goto out;
@@ -2568,7 +2543,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
} else {
for (i = 0; i < val_count; i++) {
unsigned int ival;
- ret = regmap_read(map, reg + (i * map->reg_stride),
+ ret = regmap_read(map, reg + regmap_get_offset(map, i),
&ival);
if (ret != 0)
return ret;
@@ -2648,138 +2623,36 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
/**
- * regmap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits);
-
-/**
- * regmap_write_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_write_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_write_bits);
-
-/**
- * regmap_update_bits_async: Perform a read/modify/write cycle on the register
- * map asynchronously
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * With most buses the read must be done synchronously so this is most
- * useful for devices with a cache which do not need to interact with
- * the hardware to determine the current register value.
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- int ret;
-
- map->lock(map->lock_arg);
-
- map->async = true;
-
- ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
-
- map->async = false;
-
- map->unlock(map->lock_arg);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_async);
-
-/**
- * regmap_update_bits_check: Perform a read/modify/write cycle on the
- * register map and report if updated
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- * @change: Boolean indicating if a write was done
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits_check(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
-{
- int ret;
-
- map->lock(map->lock_arg);
- ret = _regmap_update_bits(map, reg, mask, val, change, false);
- map->unlock(map->lock_arg);
- return ret;
-}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check);
-
-/**
- * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
- * register map asynchronously and report if
- * updated
+ * regmap_update_bits_base:
+ * Perform a read/modify/write cycle on the
+ * register map with change, async, force option
*
* @map: Register map to update
* @reg: Register to update
* @mask: Bitmask to change
* @val: New value for bitmask
* @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
*
+ * if async was true,
* With most buses the read must be done synchronously so this is most
* useful for devices with a cache which do not need to interact with
* the hardware to determine the current register value.
*
* Returns zero for success, a negative number on error.
*/
-int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val,
- bool *change)
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
{
int ret;
map->lock(map->lock_arg);
- map->async = true;
+ map->async = async;
- ret = _regmap_update_bits(map, reg, mask, val, change, false);
+ ret = _regmap_update_bits(map, reg, mask, val, change, force);
map->async = false;
@@ -2787,7 +2660,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
return ret;
}
-EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+EXPORT_SYMBOL_GPL(regmap_update_bits_base);
void regmap_async_complete_cb(struct regmap_async *async, int ret)
{
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 023d448ed3fa..efdc2ae8441a 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -70,6 +70,11 @@ config BCMA_DRIVER_MIPS
If unsure, say N
+config BCMA_PFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS
+ default y
+
config BCMA_SFLASH
bool
depends on BCMA_DRIVER_MIPS
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index f32af9b76bcd..087948a1d20d 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,6 +1,7 @@
bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_chipcommon_b.o
+bcma-$(CONFIG_BCMA_PFLASH) += driver_chipcommon_pflash.o
bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 38f156745d53..eda09090cb52 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -47,10 +47,6 @@ int bcma_sprom_get(struct bcma_bus *bus);
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-extern struct platform_device bcma_pflash_dev;
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_b.c */
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
@@ -62,6 +58,21 @@ void bcma_pmu_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
+/**************************************************
+ * driver_chipcommon_sflash.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_PFLASH
+extern struct platform_device bcma_pflash_dev;
+int bcma_pflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Parallel flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_PFLASH */
+
#ifdef CONFIG_BCMA_SFLASH
/* driver_chipcommon_sflash.c */
int bcma_sflash_init(struct bcma_drv_cc *cc);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b7c8a8d4e6d1..921ce1834673 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,6 +15,8 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
@@ -113,8 +115,37 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
return 0;
}
+static void bcma_core_chipcommon_flash_detect(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+ case BCMA_CC_FLASHT_STSER:
+ case BCMA_CC_FLASHT_ATSER:
+ bcma_debug(bus, "Found serial flash\n");
+ bcma_sflash_init(cc);
+ break;
+ case BCMA_CC_FLASHT_PARA:
+ bcma_debug(bus, "Found parallel flash\n");
+ bcma_pflash_init(cc);
+ break;
+ default:
+ bcma_err(bus, "Flash type not supported\n");
+ }
+
+ if (cc->core->id.rev == 38 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
+ bcma_debug(bus, "Found NAND flash\n");
+ bcma_nflash_init(cc);
+ }
+ }
+}
+
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
+
if (cc->early_setup_done)
return;
@@ -129,6 +160,12 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_early_init(cc);
+ if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_chipco_serial_init(cc);
+
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_core_chipcommon_flash_detect(cc);
+
cc->early_setup_done = true;
}
@@ -185,11 +222,12 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = 2;
else if (ticks > maxt)
ticks = maxt;
- bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else {
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+ bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
bcma_core_set_clockmode(cc->core,
ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
@@ -314,9 +352,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
return res;
}
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
{
+#if IS_BUILTIN(CONFIG_BCM47XX)
unsigned int irq;
u32 baud_base;
u32 i;
@@ -358,5 +396,5 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
+#endif /* CONFIG_BCM47XX */
}
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_chipcommon_pflash.c b/drivers/bcma/driver_chipcommon_pflash.c
new file mode 100644
index 000000000000..3b497c9ee0d4
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_pflash.c
@@ -0,0 +1,49 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon parallel flash
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+ .part_probe_types = part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+ .name = "bcma_pflash",
+ .flags = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &bcma_pflash_data,
+ },
+ .resource = &bcma_pflash_resource,
+ .num_resources = 1,
+};
+
+int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+ struct bcma_pflash *pflash = &cc->pflash;
+
+ pflash->present = true;
+
+ if (!(bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS))
+ bcma_pflash_data.width = 1;
+ else
+ bcma_pflash_data.width = 2;
+
+ bcma_pflash_resource.start = BCMA_SOC_FLASH2;
+ bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ;
+
+ return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index fe0d48cb1778..f1eb4d3e1d57 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -15,44 +15,44 @@
u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
u32 offset, u32 mask, u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
u32 set)
{
- bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
- bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
- bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
+ bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
+ bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
@@ -60,18 +60,18 @@ static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
{
u32 ilp_ctl, alp_hz;
- if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) &
+ if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
return 0;
- bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
- BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
+ bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
+ BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
usleep_range(1000, 2000);
- ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
+ ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
- bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
alp_hz = ilp_ctl * 32768 / 4;
return (alp_hz + 50000) / 100000 * 100;
@@ -127,8 +127,8 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
BCMA_RES_4314_MACPHY_CLK_AVAIL);
- bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
- bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
bcma_wait_value(cc->core, BCMA_CLKCTLST,
BCMA_CLKCTLST_HAVEHT, 0, 20000);
break;
@@ -140,7 +140,7 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
/* Flush */
if (cc->pmu.rev >= 2)
- bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+ bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
/* TODO: Do we need to update OTP? */
}
@@ -195,9 +195,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
/* Set the resource masks. */
if (min_msk)
- bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
- bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
/*
* Add some delay; allow resources to come up and settle.
@@ -269,23 +269,33 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
void bcma_pmu_early_init(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
u32 pmucap;
- pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
+ if (cc->core->id.rev >= 35 &&
+ cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+ cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
+ if (!cc->pmu.core)
+ bcma_warn(bus, "Couldn't find expected PMU core");
+ }
+ if (!cc->pmu.core)
+ cc->pmu.core = cc->core;
+
+ pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
- bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
- cc->pmu.rev, pmucap);
+ bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+ pmucap);
}
void bcma_pmu_init(struct bcma_drv_cc *cc)
{
if (cc->pmu.rev == 1)
- bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
- ~BCMA_CC_PMU_CTL_NOILPONW);
+ bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
+ ~BCMA_CC_PMU_CTL_NOILPONW);
else
- bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
- BCMA_CC_PMU_CTL_NOILPONW);
+ bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
+ BCMA_CC_PMU_CTL_NOILPONW);
bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
@@ -472,8 +482,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value)
{
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
}
void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
@@ -497,20 +507,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
/* RMW only the P1 divider */
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
- tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
/* RMW only the int feedback divider */
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
- tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
- bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
tmp = BCMA_CC_PMU_CTL_PLL_UPD;
break;
@@ -646,7 +656,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
break;
}
- tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
- bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+ tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
+ bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
}
EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 7e11ef4cb7db..04d706ca5f43 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -38,6 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
{ "M25P32", 0x15, 0x10000, 64, },
{ "M25P64", 0x16, 0x10000, 128, },
{ "M25FL128", 0x17, 0x10000, 256, },
+ { "MX25L25635F", 0x18, 0x10000, 512, },
{ NULL },
};
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 98067f757fb0..771a2a253440 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -192,6 +192,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;
default:
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 24424f3fef96..96f171328200 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -14,8 +14,6 @@
#include <linux/bcma/bcma.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
@@ -32,26 +30,6 @@ enum bcma_boot_dev {
BCMA_BOOT_DEV_NAND,
};
-static const char * const part_probes[] = { "bcm47xxpart", NULL };
-
-static struct physmap_flash_data bcma_pflash_data = {
- .part_probe_types = part_probes,
-};
-
-static struct resource bcma_pflash_resource = {
- .name = "bcma_pflash",
- .flags = IORESOURCE_MEM,
-};
-
-struct platform_device bcma_pflash_dev = {
- .name = "physmap-flash",
- .dev = {
- .platform_data = &bcma_pflash_data,
- },
- .resource = &bcma_pflash_resource,
- .num_resources = 1,
-};
-
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
@@ -272,48 +250,11 @@ static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus)
return BCMA_BOOT_DEV_SERIAL;
}
-static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
+static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
{
struct bcma_bus *bus = mcore->core->bus;
- struct bcma_drv_cc *cc = &bus->drv_cc;
- struct bcma_pflash *pflash = &cc->pflash;
enum bcma_boot_dev boot_dev;
- switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
- case BCMA_CC_FLASHT_STSER:
- case BCMA_CC_FLASHT_ATSER:
- bcma_debug(bus, "Found serial flash\n");
- bcma_sflash_init(cc);
- break;
- case BCMA_CC_FLASHT_PARA:
- bcma_debug(bus, "Found parallel flash\n");
- pflash->present = true;
- pflash->window = BCMA_SOC_FLASH2;
- pflash->window_size = BCMA_SOC_FLASH2_SZ;
-
- if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
- BCMA_CC_FLASH_CFG_DS) == 0)
- pflash->buswidth = 1;
- else
- pflash->buswidth = 2;
-
- bcma_pflash_data.width = pflash->buswidth;
- bcma_pflash_resource.start = pflash->window;
- bcma_pflash_resource.end = pflash->window + pflash->window_size;
-
- break;
- default:
- bcma_err(bus, "Flash type not supported\n");
- }
-
- if (cc->core->id.rev == 38 ||
- bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
- if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
- bcma_debug(bus, "Found NAND flash\n");
- bcma_nflash_init(cc);
- }
- }
-
/* Determine flash type this SoC boots from */
boot_dev = bcma_boot_dev(bus);
switch (boot_dev) {
@@ -337,13 +278,10 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
- struct bcma_bus *bus = mcore->core->bus;
-
if (mcore->early_setup_done)
return;
- bcma_chipco_serial_init(&bus->drv_cc);
- bcma_core_mips_flash_detect(mcore);
+ bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;
}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 0856189c065f..cae5385cf499 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -294,7 +294,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c466f752b067..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
return false;
}
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
struct bcma_device *core)
{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
struct of_phandle_args out_irq;
int ret;
- if (!parent || !parent->dev.of_node)
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
return 0;
ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
{
struct device_node *node;
+ if (!IS_ENABLED(CONFIG_OF_IRQ))
+ return;
+
node = bcma_of_find_child_device(parent, core);
if (node)
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
}
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
- struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
- struct bcma_device *core, int num)
-{
- return 0;
-}
-#endif /* CONFIG_OF */
unsigned int bcma_core_irq(struct bcma_device *core, int num)
{
@@ -350,7 +341,7 @@ static int bcma_register_devices(struct bcma_bus *bus)
bcma_register_core(bus, core);
}
-#ifdef CONFIG_BCMA_DRIVER_MIPS
+#ifdef CONFIG_BCMA_PFLASH
if (bus->drv_cc.pflash.present) {
err = platform_device_register(&bcma_pflash_dev);
if (err)
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index df806b9c5490..4a2d1b235fb5 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -98,6 +98,9 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_SHIM, "SHIM" },
{ BCMA_CORE_PCIE2, "PCIe Gen2" },
{ BCMA_CORE_ARM_CR4, "ARM CR4" },
+ { BCMA_CORE_GCI, "GCI" },
+ { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
+ { BCMA_CORE_ARM_CA7, "ARM CA7" },
{ BCMA_CORE_DEFAULT, "Default" },
};
@@ -315,6 +318,8 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
switch (core->id.id) {
case BCMA_CORE_4706_MAC_GBIT_COMMON:
case BCMA_CORE_NS_CHIPCOMMON_B:
+ case BCMA_CORE_PMU:
+ case BCMA_CORE_GCI:
/* Not used yet: case BCMA_CORE_OOB_ROUTER: */
break;
default:
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 29819e719afa..39dd30b6ef86 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -110,16 +110,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_CPQ_DA
- tristate "Compaq SMART2 support"
- depends on PCI && VIRT_TO_BUS && 0
- help
- This is the driver for Compaq Smart Array controllers. Everyone
- using these boards should say Y here. See the file
- <file:Documentation/blockdev/cpqarray.txt> for the current list of
- boards supported by this driver, and for further information on the
- use of this driver.
-
config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 671329023ec2..1e9661e26f29 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..ec9d8610b25f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d048d2009e89..437b3a822f44 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -875,7 +875,7 @@ bio_pageinc(struct bio *bio)
* compound pages is no longer allowed by the kernel.
*/
page = compound_head(bv.bv_page);
- atomic_inc(&page->_count);
+ page_ref_inc(page);
}
}
@@ -888,7 +888,7 @@ bio_pagedec(struct bio *bio)
bio_for_each_segment(bv, bio, iter) {
page = compound_head(bv.bv_page);
- atomic_dec(&page->_count);
+ page_ref_dec(page);
}
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index cb27190e9f39..51a071e32221 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -341,7 +341,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
- bio->bi_iter.bi_size & PAGE_MASK)
+ bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
deleted file mode 100644
index f749df9e15cd..000000000000
--- a/drivers/block/cpqarray.c
+++ /dev/null
@@ -1,1820 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/bio.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/blkpg.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/hdreg.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/scatterlist.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-
-#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-
-#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
-#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
-
-/* Embedded module documentation macros - see modules.h */
-/* Original author Chris Frantz - Compaq Computer Corporation */
-MODULE_AUTHOR("Compaq Computer Corporation");
-MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
-MODULE_LICENSE("GPL");
-
-#include "cpqarray.h"
-#include "ida_cmd.h"
-#include "smart1,2.h"
-#include "ida_ioctl.h"
-
-#define READ_AHEAD 128
-#define NR_CMDS 128 /* This could probably go as high as ~400 */
-
-#define MAX_CTLR 8
-#define CTLR_SHIFT 8
-
-#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
-
-static DEFINE_MUTEX(cpqarray_mutex);
-static int nr_ctlr;
-static ctlr_info_t *hba[MAX_CTLR];
-
-static int eisa[8];
-
-#define NR_PRODUCTS ARRAY_SIZE(products)
-
-/* board_id = Subsystem Device ID & Vendor ID
- * product = Marketing Name for the board
- * access = Address of the struct of function pointers
- */
-static struct board_type products[] = {
- { 0x0040110E, "IDA", &smart1_access },
- { 0x0140110E, "IDA-2", &smart1_access },
- { 0x1040110E, "IAES", &smart1_access },
- { 0x2040110E, "SMART", &smart1_access },
- { 0x3040110E, "SMART-2/E", &smart2e_access },
- { 0x40300E11, "SMART-2/P", &smart2_access },
- { 0x40310E11, "SMART-2SL", &smart2_access },
- { 0x40320E11, "Smart Array 3200", &smart2_access },
- { 0x40330E11, "Smart Array 3100ES", &smart2_access },
- { 0x40340E11, "Smart Array 221", &smart2_access },
- { 0x40400E11, "Integrated Array", &smart4_access },
- { 0x40480E11, "Compaq Raid LC2", &smart4_access },
- { 0x40500E11, "Smart Array 4200", &smart4_access },
- { 0x40510E11, "Smart Array 4250ES", &smart4_access },
- { 0x40580E11, "Smart Array 431", &smart4_access },
-};
-
-/* define the PCI info for the PCI cards this driver can control */
-static const struct pci_device_id cpqarray_pci_device_id[] =
-{
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
- 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
- { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
- 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
- { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
- 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
- { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
- 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
- { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
-
-static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
-
-/* Debug... */
-#define DBG(s) do { s } while(0)
-/* Debug (general info)... */
-#define DBGINFO(s) do { } while(0)
-/* Debug Paranoid... */
-#define DBGP(s) do { } while(0)
-/* Debug Extra Paranoid... */
-#define DBGPX(s) do { } while(0)
-
-static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
-static void __iomem *remap_pci_mem(ulong base, ulong size);
-static int cpqarray_eisa_detect(void);
-static int pollcomplete(int ctlr);
-static void getgeometry(int ctlr);
-static void start_fwbk(int ctlr);
-
-static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
-static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
-
-static void free_hba(int i);
-static int alloc_cpqarray_hba(void);
-
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int blk,
- unsigned int blkcnt,
- unsigned int log_unit );
-
-static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
-static void ida_release(struct gendisk *disk, fmode_t mode);
-static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
-static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
-
-static void do_ida_request(struct request_queue *q);
-static void start_io(ctlr_info_t *h);
-
-static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
-static inline void complete_command(cmdlist_t *cmd, int timeout);
-
-static irqreturn_t do_ida_intr(int irq, void *dev_id);
-static void ida_timer(unsigned long tdata);
-static int ida_revalidate(struct gendisk *disk);
-static int revalidate_allvol(ctlr_info_t *host);
-static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
-
-#ifdef CONFIG_PROC_FS
-static void ida_procinit(int i);
-#else
-static void ida_procinit(int i) {}
-#endif
-
-static inline drv_info_t *get_drv(struct gendisk *disk)
-{
- return disk->private_data;
-}
-
-static inline ctlr_info_t *get_host(struct gendisk *disk)
-{
- return disk->queue->queuedata;
-}
-
-
-static const struct block_device_operations ida_fops = {
- .owner = THIS_MODULE,
- .open = ida_unlocked_open,
- .release = ida_release,
- .ioctl = ida_ioctl,
- .getgeo = ida_getgeo,
- .revalidate_disk= ida_revalidate,
-};
-
-
-#ifdef CONFIG_PROC_FS
-
-static struct proc_dir_entry *proc_array;
-static const struct file_operations ida_proc_fops;
-
-/*
- * Get us a file in /proc/array that says something about each controller.
- * Create /proc/array if it doesn't exist yet.
- */
-static void __init ida_procinit(int i)
-{
- if (proc_array == NULL) {
- proc_array = proc_mkdir("driver/cpqarray", NULL);
- if (!proc_array) return;
- }
-
- proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
-}
-
-/*
- * Report information about this controller.
- */
-static int ida_proc_show(struct seq_file *m, void *v)
-{
- int i, ctlr;
- ctlr_info_t *h = (ctlr_info_t*)m->private;
- drv_info_t *drv;
-#ifdef CPQ_PROC_PRINT_QUEUES
- cmdlist_t *c;
- unsigned long flags;
-#endif
-
- ctlr = h->ctlr;
- seq_printf(m, "%s: Compaq %s Controller\n"
- " Board ID: 0x%08lx\n"
- " Firmware Revision: %c%c%c%c\n"
- " Controller Sig: 0x%08lx\n"
- " Memory Address: 0x%08lx\n"
- " I/O Port: 0x%04x\n"
- " IRQ: %d\n"
- " Logical drives: %d\n"
- " Physical drives: %d\n\n"
- " Current Q depth: %d\n"
- " Max Q depth since init: %d\n\n",
- h->devname,
- h->product_name,
- (unsigned long)h->board_id,
- h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
- (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
- (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
- h->log_drives, h->phys_drives,
- h->Qdepth, h->maxQsinceinit);
-
- seq_puts(m, "Logical Drive Info:\n");
-
- for(i=0; i<h->log_drives; i++) {
- drv = &h->drv[i];
- seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
- ctlr, i, drv->blk_size, drv->nr_blks);
- }
-
-#ifdef CPQ_PROC_PRINT_QUEUES
- spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
- seq_puts(m, "\nCurrent Queues:\n");
-
- c = h->reqQ;
- seq_printf(m, "reqQ = %p", c);
- if (c) c=c->next;
- while(c && c != h->reqQ) {
- seq_printf(m, "->%p", c);
- c=c->next;
- }
-
- c = h->cmpQ;
- seq_printf(m, "\ncmpQ = %p", c);
- if (c) c=c->next;
- while(c && c != h->cmpQ) {
- seq_printf(m, "->%p", c);
- c=c->next;
- }
-
- seq_putc(m, '\n');
- spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
-#endif
- seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
- h->nr_allocs, h->nr_frees);
- return 0;
-}
-
-static int ida_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ida_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations ida_proc_fops = {
- .owner = THIS_MODULE,
- .open = ida_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif /* CONFIG_PROC_FS */
-
-module_param_array(eisa, int, NULL, 0);
-
-static void release_io_mem(ctlr_info_t *c)
-{
- /* if IO mem was not protected do nothing */
- if( c->io_mem_addr == 0)
- return;
- release_region(c->io_mem_addr, c->io_mem_length);
- c->io_mem_addr = 0;
- c->io_mem_length = 0;
-}
-
-static void cpqarray_remove_one(int i)
-{
- int j;
- char buff[4];
-
- /* sendcmd will turn off interrupt, and send the flush...
- * To write all data in the battery backed cache to disks
- * no data returned, but don't want to send NULL to sendcmd */
- if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
- {
- printk(KERN_WARNING "Unable to flush cache on controller %d\n",
- i);
- }
- free_irq(hba[i]->intr, hba[i]);
- iounmap(hba[i]->vaddr);
- unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
- del_timer(&hba[i]->timer);
- remove_proc_entry(hba[i]->devname, proc_array);
- pci_free_consistent(hba[i]->pci_dev,
- NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
- hba[i]->cmd_pool_dhandle);
- kfree(hba[i]->cmd_pool_bits);
- for(j = 0; j < NWD; j++) {
- if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
- del_gendisk(ida_gendisk[i][j]);
- put_disk(ida_gendisk[i][j]);
- }
- blk_cleanup_queue(hba[i]->queue);
- release_io_mem(hba[i]);
- free_hba(i);
-}
-
-static void cpqarray_remove_one_pci(struct pci_dev *pdev)
-{
- int i;
- ctlr_info_t *tmp_ptr;
-
- if (pci_get_drvdata(pdev) == NULL) {
- printk( KERN_ERR "cpqarray: Unable to remove device \n");
- return;
- }
-
- tmp_ptr = pci_get_drvdata(pdev);
- i = tmp_ptr->ctlr;
- if (hba[i] == NULL) {
- printk(KERN_ERR "cpqarray: controller %d appears to have"
- "already been removed \n", i);
- return;
- }
- pci_set_drvdata(pdev, NULL);
-
- cpqarray_remove_one(i);
-}
-
-/* removing an instance that was not removed automatically..
- * must be an eisa card.
- */
-static void cpqarray_remove_one_eisa(int i)
-{
- if (hba[i] == NULL) {
- printk(KERN_ERR "cpqarray: controller %d appears to have"
- "already been removed \n", i);
- return;
- }
- cpqarray_remove_one(i);
-}
-
-/* pdev is NULL for eisa */
-static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
-{
- struct request_queue *q;
- int j;
-
- /*
- * register block devices
- * Find disks and fill in structs
- * Get an interrupt, set the Q depth and get into /proc
- */
-
- /* If this successful it should insure that we are the only */
- /* instance of the driver */
- if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
- goto Enomem4;
- }
- hba[i]->access.set_intr_mask(hba[i], 0);
- if (request_irq(hba[i]->intr, do_ida_intr, IRQF_SHARED,
- hba[i]->devname, hba[i]))
- {
- printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
- hba[i]->intr, hba[i]->devname);
- goto Enomem3;
- }
-
- for (j=0; j<NWD; j++) {
- ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
- if (!ida_gendisk[i][j])
- goto Enomem2;
- }
-
- hba[i]->cmd_pool = pci_alloc_consistent(
- hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
- &(hba[i]->cmd_pool_dhandle));
- hba[i]->cmd_pool_bits = kcalloc(
- DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
- GFP_KERNEL);
-
- if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
- goto Enomem1;
-
- memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
- printk(KERN_INFO "cpqarray: Finding drives on %s",
- hba[i]->devname);
-
- spin_lock_init(&hba[i]->lock);
- q = blk_init_queue(do_ida_request, &hba[i]->lock);
- if (!q)
- goto Enomem1;
-
- hba[i]->queue = q;
- q->queuedata = hba[i];
-
- getgeometry(i);
- start_fwbk(i);
-
- ida_procinit(i);
-
- if (pdev)
- blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
-
- /* This is a hardware imposed limit. */
- blk_queue_max_segments(q, SG_MAX);
-
- init_timer(&hba[i]->timer);
- hba[i]->timer.expires = jiffies + IDA_TIMER;
- hba[i]->timer.data = (unsigned long)hba[i];
- hba[i]->timer.function = ida_timer;
- add_timer(&hba[i]->timer);
-
- /* Enable IRQ now that spinlock and rate limit timer are set up */
- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
-
- for(j=0; j<NWD; j++) {
- struct gendisk *disk = ida_gendisk[i][j];
- drv_info_t *drv = &hba[i]->drv[j];
- sprintf(disk->disk_name, "ida/c%dd%d", i, j);
- disk->major = COMPAQ_SMART2_MAJOR + i;
- disk->first_minor = j<<NWD_SHIFT;
- disk->fops = &ida_fops;
- if (j && !drv->nr_blks)
- continue;
- blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
- set_capacity(disk, drv->nr_blks);
- disk->queue = hba[i]->queue;
- disk->private_data = drv;
- add_disk(disk);
- }
-
- /* done ! */
- return(i);
-
-Enomem1:
- nr_ctlr = i;
- kfree(hba[i]->cmd_pool_bits);
- if (hba[i]->cmd_pool)
- pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
- hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-Enomem2:
- while (j--) {
- put_disk(ida_gendisk[i][j]);
- ida_gendisk[i][j] = NULL;
- }
- free_irq(hba[i]->intr, hba[i]);
-Enomem3:
- unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
-Enomem4:
- if (pdev)
- pci_set_drvdata(pdev, NULL);
- release_io_mem(hba[i]);
- free_hba(i);
-
- printk( KERN_ERR "cpqarray: out of memory");
-
- return -1;
-}
-
-static int cpqarray_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int i;
-
- printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
- " bus %d dev %d func %d\n",
- pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- i = alloc_cpqarray_hba();
- if( i < 0 )
- return (-1);
- memset(hba[i], 0, sizeof(ctlr_info_t));
- sprintf(hba[i]->devname, "ida%d", i);
- hba[i]->ctlr = i;
- /* Initialize the pdev driver private data */
- pci_set_drvdata(pdev, hba[i]);
-
- if (cpqarray_pci_init(hba[i], pdev) != 0) {
- pci_set_drvdata(pdev, NULL);
- release_io_mem(hba[i]);
- free_hba(i);
- return -1;
- }
-
- return (cpqarray_register_ctlr(i, pdev));
-}
-
-static struct pci_driver cpqarray_pci_driver = {
- .name = "cpqarray",
- .probe = cpqarray_init_one,
- .remove = cpqarray_remove_one_pci,
- .id_table = cpqarray_pci_device_id,
-};
-
-/*
- * This is it. Find all the controllers and register them.
- * returns the number of block devices registered.
- */
-static int __init cpqarray_init(void)
-{
- int num_cntlrs_reg = 0;
- int i;
- int rc = 0;
-
- /* detect controllers */
- printk(DRIVER_NAME "\n");
-
- rc = pci_register_driver(&cpqarray_pci_driver);
- if (rc)
- return rc;
- cpqarray_eisa_detect();
-
- for (i=0; i < MAX_CTLR; i++) {
- if (hba[i] != NULL)
- num_cntlrs_reg++;
- }
-
- if (num_cntlrs_reg)
- return 0;
- else {
- pci_unregister_driver(&cpqarray_pci_driver);
- return -ENODEV;
- }
-}
-
-/* Function to find the first free pointer into our hba[] array */
-/* Returns -1 if no free entries are left. */
-static int alloc_cpqarray_hba(void)
-{
- int i;
-
- for(i=0; i< MAX_CTLR; i++) {
- if (hba[i] == NULL) {
- hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if(hba[i]==NULL) {
- printk(KERN_ERR "cpqarray: out of memory.\n");
- return (-1);
- }
- return (i);
- }
- }
- printk(KERN_WARNING "cpqarray: This driver supports a maximum"
- " of 8 controllers.\n");
- return(-1);
-}
-
-static void free_hba(int i)
-{
- kfree(hba[i]);
- hba[i]=NULL;
-}
-
-/*
- * Find the IO address of the controller, its IRQ and so forth. Fill
- * in some basic stuff into the ctlr_info_t structure.
- */
-static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
-{
- ushort vendor_id, device_id, command;
- unchar cache_line_size, latency_timer;
- unchar irq, revision;
- unsigned long addr[6];
- __u32 board_id;
-
- int i;
-
- c->pci_dev = pdev;
- pci_set_master(pdev);
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
- return -1;
- }
- vendor_id = pdev->vendor;
- device_id = pdev->device;
- revision = pdev->revision;
- irq = pdev->irq;
-
- for(i=0; i<6; i++)
- addr[i] = pci_resource_start(pdev, i);
-
- if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
- {
- printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
- return -1;
- }
-
- pci_read_config_word(pdev, PCI_COMMAND, &command);
- pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
-
- pci_read_config_dword(pdev, 0x2c, &board_id);
-
- /* check to see if controller has been disabled */
- if(!(command & 0x02)) {
- printk(KERN_WARNING
- "cpqarray: controller appears to be disabled\n");
- return(-1);
- }
-
-DBGINFO(
- printk("vendor_id = %x\n", vendor_id);
- printk("device_id = %x\n", device_id);
- printk("command = %x\n", command);
- for(i=0; i<6; i++)
- printk("addr[%d] = %lx\n", i, addr[i]);
- printk("revision = %x\n", revision);
- printk("irq = %x\n", irq);
- printk("cache_line_size = %x\n", cache_line_size);
- printk("latency_timer = %x\n", latency_timer);
- printk("board_id = %x\n", board_id);
-);
-
- c->intr = irq;
-
- for(i=0; i<6; i++) {
- if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
- { /* IO space */
- c->io_mem_addr = addr[i];
- c->io_mem_length = pci_resource_end(pdev, i)
- - pci_resource_start(pdev, i) + 1;
- if(!request_region( c->io_mem_addr, c->io_mem_length,
- "cpqarray"))
- {
- printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
- c->io_mem_addr = 0;
- c->io_mem_length = 0;
- }
- break;
- }
- }
-
- c->paddr = 0;
- for(i=0; i<6; i++)
- if (!(pci_resource_flags(pdev, i) &
- PCI_BASE_ADDRESS_SPACE_IO)) {
- c->paddr = pci_resource_start (pdev, i);
- break;
- }
- if (!c->paddr)
- return -1;
- c->vaddr = remap_pci_mem(c->paddr, 128);
- if (!c->vaddr)
- return -1;
- c->board_id = board_id;
-
- for(i=0; i<NR_PRODUCTS; i++) {
- if (board_id == products[i].board_id) {
- c->product_name = products[i].product_name;
- c->access = *(products[i].access);
- break;
- }
- }
- if (i == NR_PRODUCTS) {
- printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
- " to access the SMART Array controller %08lx\n",
- (unsigned long)board_id);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Map (physical) PCI mem into (virtual) kernel space
- */
-static void __iomem *remap_pci_mem(ulong base, ulong size)
-{
- ulong page_base = ((ulong) base) & PAGE_MASK;
- ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap(page_base, page_offs+size);
-
- return (page_remapped ? (page_remapped + page_offs) : NULL);
-}
-
-#ifndef MODULE
-/*
- * Config string is a comma separated set of i/o addresses of EISA cards.
- */
-static int cpqarray_setup(char *str)
-{
- int i, ints[9];
-
- (void)get_options(str, ARRAY_SIZE(ints), ints);
-
- for(i=0; i<ints[0] && i<8; i++)
- eisa[i] = ints[i+1];
- return 1;
-}
-
-__setup("smart2=", cpqarray_setup);
-
-#endif
-
-/*
- * Find an EISA controller's signature. Set up an hba if we find it.
- */
-static int cpqarray_eisa_detect(void)
-{
- int i=0, j;
- __u32 board_id;
- int intr;
- int ctlr;
- int num_ctlr = 0;
-
- while(i<8 && eisa[i]) {
- ctlr = alloc_cpqarray_hba();
- if(ctlr == -1)
- break;
- board_id = inl(eisa[i]+0xC80);
- for(j=0; j < NR_PRODUCTS; j++)
- if (board_id == products[j].board_id)
- break;
-
- if (j == NR_PRODUCTS) {
- printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
- " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
- continue;
- }
-
- memset(hba[ctlr], 0, sizeof(ctlr_info_t));
- hba[ctlr]->io_mem_addr = eisa[i];
- hba[ctlr]->io_mem_length = 0x7FF;
- if(!request_region(hba[ctlr]->io_mem_addr,
- hba[ctlr]->io_mem_length,
- "cpqarray"))
- {
- printk(KERN_WARNING "cpqarray: I/O range already in "
- "use addr = %lx length = %ld\n",
- hba[ctlr]->io_mem_addr,
- hba[ctlr]->io_mem_length);
- free_hba(ctlr);
- continue;
- }
-
- /*
- * Read the config register to find our interrupt
- */
- intr = inb(eisa[i]+0xCC0) >> 4;
- if (intr & 1) intr = 11;
- else if (intr & 2) intr = 10;
- else if (intr & 4) intr = 14;
- else if (intr & 8) intr = 15;
-
- hba[ctlr]->intr = intr;
- sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
- hba[ctlr]->product_name = products[j].product_name;
- hba[ctlr]->access = *(products[j].access);
- hba[ctlr]->ctlr = ctlr;
- hba[ctlr]->board_id = board_id;
- hba[ctlr]->pci_dev = NULL; /* not PCI */
-
-DBGINFO(
- printk("i = %d, j = %d\n", i, j);
- printk("irq = %x\n", intr);
- printk("product name = %s\n", products[j].product_name);
- printk("board_id = %x\n", board_id);
-);
-
- num_ctlr++;
- i++;
-
- if (cpqarray_register_ctlr(ctlr, NULL) == -1)
- printk(KERN_WARNING
- "cpqarray: Can't register EISA controller %d\n",
- ctlr);
-
- }
-
- return num_ctlr;
-}
-
-/*
- * Open. Make sure the device is really there.
- */
-static int ida_open(struct block_device *bdev, fmode_t mode)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
- ctlr_info_t *host = get_host(bdev->bd_disk);
-
- DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
- /*
- * Root is allowed to open raw volume zero even if it's not configured
- * so array config can still work. I don't think I really like this,
- * but I'm already using way to many device nodes to claim another one
- * for "raw controller".
- */
- if (!drv->nr_blks) {
- if (!capable(CAP_SYS_RAWIO))
- return -ENXIO;
- if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
- return -ENXIO;
- }
- host->usage_count++;
- return 0;
-}
-
-static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&cpqarray_mutex);
- ret = ida_open(bdev, mode);
- mutex_unlock(&cpqarray_mutex);
-
- return ret;
-}
-
-/*
- * Close. Sync first.
- */
-static void ida_release(struct gendisk *disk, fmode_t mode)
-{
- ctlr_info_t *host;
-
- mutex_lock(&cpqarray_mutex);
- host = get_host(disk);
- host->usage_count--;
- mutex_unlock(&cpqarray_mutex);
-}
-
-/*
- * Enqueuing and dequeuing functions for cmdlists.
- */
-static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
-{
- if (*Qptr == NULL) {
- *Qptr = c;
- c->next = c->prev = c;
- } else {
- c->prev = (*Qptr)->prev;
- c->next = (*Qptr);
- (*Qptr)->prev->next = c;
- (*Qptr)->prev = c;
- }
-}
-
-static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
-{
- if (c && c->next != c) {
- if (*Qptr == c) *Qptr = c->next;
- c->prev->next = c->next;
- c->next->prev = c->prev;
- } else {
- *Qptr = NULL;
- }
- return c;
-}
-
-/*
- * Get a request and submit it to the controller.
- * This routine needs to grab all the requests it possibly can from the
- * req Q and submit them. Interrupts are off (and need to be off) when you
- * are in here (either via the dummy do_ida_request functions or by being
- * called from the interrupt handler
- */
-static void do_ida_request(struct request_queue *q)
-{
- ctlr_info_t *h = q->queuedata;
- cmdlist_t *c;
- struct request *creq;
- struct scatterlist tmp_sg[SG_MAX];
- int i, dir, seg;
-
-queue_next:
- creq = blk_peek_request(q);
- if (!creq)
- goto startio;
-
- BUG_ON(creq->nr_phys_segments > SG_MAX);
-
- if ((c = cmd_alloc(h,1)) == NULL)
- goto startio;
-
- blk_start_request(creq);
-
- c->ctlr = h->ctlr;
- c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- c->req.hdr.blk = blk_rq_pos(creq);
- c->rq = creq;
-DBGPX(
- printk("sector=%d, nr_sectors=%u\n",
- blk_rq_pos(creq), blk_rq_sectors(creq));
-);
- sg_init_table(tmp_sg, SG_MAX);
- seg = blk_rq_map_sg(q, creq, tmp_sg);
-
- /* Now do all the DMA Mappings */
- if (rq_data_dir(creq) == READ)
- dir = PCI_DMA_FROMDEVICE;
- else
- dir = PCI_DMA_TODEVICE;
- for( i=0; i < seg; i++)
- {
- c->req.sg[i].size = tmp_sg[i].length;
- c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
- sg_page(&tmp_sg[i]),
- tmp_sg[i].offset,
- tmp_sg[i].length, dir);
- }
-DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
- c->req.hdr.sg_cnt = seg;
- c->req.hdr.blk_cnt = blk_rq_sectors(creq);
- c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
- c->type = CMD_RWREQ;
-
- /* Put the request on the tail of the request queue */
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- goto queue_next;
-
-startio:
- start_io(h);
-}
-
-/*
- * start_io submits everything on a controller's request queue
- * and moves it to the completion queue.
- *
- * Interrupts had better be off if you're in here
- */
-static void start_io(ctlr_info_t *h)
-{
- cmdlist_t *c;
-
- while((c = h->reqQ) != NULL) {
- /* Can't do anything if we're busy */
- if (h->access.fifo_full(h) == 0)
- return;
-
- /* Get the first entry from the request Q */
- removeQ(&h->reqQ, c);
- h->Qdepth--;
-
- /* Tell the controller to do our bidding */
- h->access.submit_command(h, c);
-
- /* Get onto the completion Q */
- addQ(&h->cmpQ, c);
- }
-}
-
-/*
- * Mark all buffers that cmd was responsible for
- */
-static inline void complete_command(cmdlist_t *cmd, int timeout)
-{
- struct request *rq = cmd->rq;
- int error = 0;
- int i, ddir;
-
- if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
- (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
- printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
- cmd->ctlr, cmd->hdr.unit);
- hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
- }
- if (cmd->req.hdr.rcode & RCODE_FATAL) {
- printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
- cmd->ctlr, cmd->hdr.unit);
- error = -EIO;
- }
- if (cmd->req.hdr.rcode & RCODE_INVREQ) {
- printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
- cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
- cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
- cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
- error = -EIO;
- }
- if (timeout)
- error = -EIO;
- /* unmap the DMA mapping for all the scatter gather elements */
- if (cmd->req.hdr.cmd == IDA_READ)
- ddir = PCI_DMA_FROMDEVICE;
- else
- ddir = PCI_DMA_TODEVICE;
- for(i=0; i<cmd->req.hdr.sg_cnt; i++)
- pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
- cmd->req.sg[i].size, ddir);
-
- DBGPX(printk("Done with %p\n", rq););
- __blk_end_request_all(rq, error);
-}
-
-/*
- * The controller will interrupt us upon completion of commands.
- * Find the command on the completion queue, remove it, tell the OS and
- * try to queue up more IO
- */
-static irqreturn_t do_ida_intr(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- cmdlist_t *c;
- unsigned long istat;
- unsigned long flags;
- __u32 a,a1;
-
- istat = h->access.intr_pending(h);
- /* Is this interrupt for us? */
- if (istat == 0)
- return IRQ_NONE;
-
- /*
- * If there are completed commands in the completion queue,
- * we had better do something about it.
- */
- spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
- if (istat & FIFO_NOT_EMPTY) {
- while((a = h->access.command_completed(h))) {
- a1 = a; a &= ~3;
- if ((c = h->cmpQ) == NULL)
- {
- printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
- continue;
- }
- while(c->busaddr != a) {
- c = c->next;
- if (c == h->cmpQ)
- break;
- }
- /*
- * If we've found the command, take it off the
- * completion Q and free it
- */
- if (c->busaddr == a) {
- removeQ(&h->cmpQ, c);
- /* Check for invalid command.
- * Controller returns command error,
- * But rcode = 0.
- */
-
- if((a1 & 0x03) && (c->req.hdr.rcode == 0))
- {
- c->req.hdr.rcode = RCODE_INVREQ;
- }
- if (c->type == CMD_RWREQ) {
- complete_command(c, 0);
- cmd_free(h, c, 1);
- } else if (c->type == CMD_IOCTL_PEND) {
- c->type = CMD_IOCTL_DONE;
- }
- continue;
- }
- }
- }
-
- /*
- * See if we can queue up some more IO
- */
- do_ida_request(h->queue);
- spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
- return IRQ_HANDLED;
-}
-
-/*
- * This timer was for timing out requests that haven't happened after
- * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
- * reset a flags structure so we don't flood the user with
- * "Non-Fatal error" messages.
- */
-static void ida_timer(unsigned long tdata)
-{
- ctlr_info_t *h = (ctlr_info_t*)tdata;
-
- h->timer.expires = jiffies + IDA_TIMER;
- add_timer(&h->timer);
- h->misc_tflags = 0;
-}
-
-static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
-
- if (drv->cylinders) {
- geo->heads = drv->heads;
- geo->sectors = drv->sectors;
- geo->cylinders = drv->cylinders;
- } else {
- geo->heads = 0xff;
- geo->sectors = 0x3f;
- geo->cylinders = drv->nr_blks / (0xff*0x3f);
- }
-
- return 0;
-}
-
-/*
- * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
- * setting readahead and submitting commands from userspace to the controller.
- */
-static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- drv_info_t *drv = get_drv(bdev->bd_disk);
- ctlr_info_t *host = get_host(bdev->bd_disk);
- int error;
- ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
- ida_ioctl_t *my_io;
-
- switch(cmd) {
- case IDAGETDRVINFO:
- if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
- return -EFAULT;
- return 0;
- case IDAPASSTHRU:
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
- if (!my_io)
- return -ENOMEM;
- error = -EFAULT;
- if (copy_from_user(my_io, io, sizeof(*my_io)))
- goto out_passthru;
- error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
- if (error)
- goto out_passthru;
- error = -EFAULT;
- if (copy_to_user(io, my_io, sizeof(*my_io)))
- goto out_passthru;
- error = 0;
-out_passthru:
- kfree(my_io);
- return error;
- case IDAGETCTLRSIG:
- if (!arg) return -EINVAL;
- if (put_user(host->ctlr_sig, (int __user *)arg))
- return -EFAULT;
- return 0;
- case IDAREVALIDATEVOLS:
- if (MINOR(bdev->bd_dev) != 0)
- return -ENXIO;
- return revalidate_allvol(host);
- case IDADRIVERVERSION:
- if (!arg) return -EINVAL;
- if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
- return -EFAULT;
- return 0;
- case IDAGETPCIINFO:
- {
-
- ida_pci_info_struct pciinfo;
-
- if (!arg) return -EINVAL;
- memset(&pciinfo, 0, sizeof(pciinfo));
- pciinfo.bus = host->pci_dev->bus->number;
- pciinfo.dev_fn = host->pci_dev->devfn;
- pciinfo.board_id = host->board_id;
- if(copy_to_user((void __user *) arg, &pciinfo,
- sizeof( ida_pci_info_struct)))
- return -EFAULT;
- return(0);
- }
-
- default:
- return -EINVAL;
- }
-
-}
-
-static int ida_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long param)
-{
- int ret;
-
- mutex_lock(&cpqarray_mutex);
- ret = ida_locked_ioctl(bdev, mode, cmd, param);
- mutex_unlock(&cpqarray_mutex);
-
- return ret;
-}
-
-/*
- * ida_ctlr_ioctl is for passing commands to the controller from userspace.
- * The command block (io) has already been copied to kernel space for us,
- * however, any elements in the sglist need to be copied to kernel space
- * or copied back to userspace.
- *
- * Only root may perform a controller passthru command, however I'm not doing
- * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
- * putting a 64M buffer in the sglist is probably a *bad* idea.
- */
-static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
-{
- int ctlr = h->ctlr;
- cmdlist_t *c;
- void *p = NULL;
- unsigned long flags;
- int error;
-
- if ((c = cmd_alloc(h, 0)) == NULL)
- return -ENOMEM;
- c->ctlr = ctlr;
- c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- c->req.hdr.cmd = io->cmd;
- c->req.hdr.blk = io->blk;
- c->req.hdr.blk_cnt = io->blk_cnt;
- c->type = CMD_IOCTL_PEND;
-
- /* Pre submit processing */
- switch(io->cmd) {
- case PASSTHRU_A:
- p = memdup_user(io->sg[0].addr, io->sg[0].size);
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- cmd_free(h, c, 0);
- return error;
- }
- c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
- sizeof(ida_ioctl_t),
- PCI_DMA_BIDIRECTIONAL);
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- case IDA_READ:
- case READ_FLASH_ROM:
- case SENSE_CONTROLLER_PERFORMANCE:
- p = kmalloc(io->sg[0].size, GFP_KERNEL);
- if (!p)
- {
- error = -ENOMEM;
- cmd_free(h, c, 0);
- return(error);
- }
-
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- case IDA_WRITE:
- case IDA_WRITE_MEDIA:
- case DIAG_PASS_THRU:
- case COLLECT_BUFFER:
- case WRITE_FLASH_ROM:
- p = memdup_user(io->sg[0].addr, io->sg[0].size);
- if (IS_ERR(p)) {
- error = PTR_ERR(p);
- cmd_free(h, c, 0);
- return error;
- }
- c->req.sg[0].size = io->sg[0].size;
- c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- break;
- default:
- c->req.sg[0].size = sizeof(io->c);
- c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- c->req.hdr.sg_cnt = 1;
- }
-
- /* Put the request on the tail of the request queue */
- spin_lock_irqsave(IDA_LOCK(ctlr), flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
-
- /* Wait for completion */
- while(c->type != CMD_IOCTL_DONE)
- schedule();
-
- /* Unmap the DMA */
- pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
- PCI_DMA_BIDIRECTIONAL);
- /* Post submit processing */
- switch(io->cmd) {
- case PASSTHRU_A:
- pci_unmap_single(h->pci_dev, c->req.hdr.blk,
- sizeof(ida_ioctl_t),
- PCI_DMA_BIDIRECTIONAL);
- case IDA_READ:
- case DIAG_PASS_THRU:
- case SENSE_CONTROLLER_PERFORMANCE:
- case READ_FLASH_ROM:
- if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
- kfree(p);
- return -EFAULT;
- }
- /* fall through and free p */
- case IDA_WRITE:
- case IDA_WRITE_MEDIA:
- case COLLECT_BUFFER:
- case WRITE_FLASH_ROM:
- kfree(p);
- break;
- default:;
- /* Nothing to do */
- }
-
- io->rcode = c->req.hdr.rcode;
- cmd_free(h, c, 0);
- return(0);
-}
-
-/*
- * Commands are pre-allocated in a large block. Here we use a simple bitmap
- * scheme to suballocte them to the driver. Operations that are not time
- * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
- * as the first argument to get a new command.
- */
-static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
-{
- cmdlist_t * c;
- int i;
- dma_addr_t cmd_dhandle;
-
- if (!get_from_pool) {
- c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
- sizeof(cmdlist_t), &cmd_dhandle);
- if(c==NULL)
- return NULL;
- } else {
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
- if (i == NR_CMDS)
- return NULL;
- } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
- c = h->cmd_pool + i;
- cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
- h->nr_allocs++;
- }
-
- memset(c, 0, sizeof(cmdlist_t));
- c->busaddr = cmd_dhandle;
- return c;
-}
-
-static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
-{
- int i;
-
- if (!got_from_pool) {
- pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
- c->busaddr);
- } else {
- i = c - h->cmd_pool;
- clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
- h->nr_frees++;
- }
-}
-
-/***********************************************************************
- name: sendcmd
- Send a command to an IDA using the memory mapped FIFO interface
- and wait for it to complete.
- This routine should only be called at init time.
-***********************************************************************/
-static int sendcmd(
- __u8 cmd,
- int ctlr,
- void *buff,
- size_t size,
- unsigned int blk,
- unsigned int blkcnt,
- unsigned int log_unit )
-{
- cmdlist_t *c;
- int complete;
- unsigned long temp;
- unsigned long i;
- ctlr_info_t *info_p = hba[ctlr];
-
- c = cmd_alloc(info_p, 1);
- if(!c)
- return IO_ERROR;
- c->ctlr = ctlr;
- c->hdr.unit = log_unit;
- c->hdr.prio = 0;
- c->hdr.size = sizeof(rblk_t) >> 2;
- c->size += sizeof(rblk_t);
-
- /* The request information. */
- c->req.hdr.next = 0;
- c->req.hdr.rcode = 0;
- c->req.bp = 0;
- c->req.hdr.sg_cnt = 1;
- c->req.hdr.reserved = 0;
-
- if (size == 0)
- c->req.sg[0].size = 512;
- else
- c->req.sg[0].size = size;
-
- c->req.hdr.blk = blk;
- c->req.hdr.blk_cnt = blkcnt;
- c->req.hdr.cmd = (unsigned char) cmd;
- c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
- buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- /*
- * Disable interrupt
- */
- info_p->access.set_intr_mask(info_p, 0);
- /* Make sure there is room in the command FIFO */
- /* Actually it should be completely empty at this time. */
- for (i = 200000; i > 0; i--) {
- temp = info_p->access.fifo_full(info_p);
- if (temp != 0) {
- break;
- }
- udelay(10);
-DBG(
- printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
- " waiting!\n", ctlr);
-);
- }
- /*
- * Send the cmd
- */
- info_p->access.submit_command(info_p, c);
- complete = pollcomplete(ctlr);
-
- pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
- c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
- if (complete != 1) {
- if (complete != c->busaddr) {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd "
- "Invalid command list address returned! (%08lx)\n",
- ctlr, (unsigned long)complete);
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
- } else {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd Timeout out, "
- "No command list address returned!\n",
- ctlr);
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
-
- if (c->req.hdr.rcode & 0x00FE) {
- if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
- printk( KERN_WARNING
- "cpqarray ida%d: idaSendPciCmd, error: "
- "Controller failed at init time "
- "cmd: 0x%x, return code = 0x%x\n",
- ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
-
- cmd_free(info_p, c, 1);
- return (IO_ERROR);
- }
- }
- cmd_free(info_p, c, 1);
- return (IO_OK);
-}
-
-/*
- * revalidate_allvol is for online array config utilities. After a
- * utility reconfigures the drives in the array, it can use this function
- * (through an ioctl) to make the driver zap any previous disk structs for
- * that controller and get new ones.
- *
- * Right now I'm using the getgeometry() function to do this, but this
- * function should probably be finer grained and allow you to revalidate one
- * particualar logical volume (instead of all of them on a particular
- * controller).
- */
-static int revalidate_allvol(ctlr_info_t *host)
-{
- int ctlr = host->ctlr;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(IDA_LOCK(ctlr), flags);
- if (host->usage_count > 1) {
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
- printk(KERN_WARNING "cpqarray: Device busy for volume"
- " revalidation (usage=%d)\n", host->usage_count);
- return -EBUSY;
- }
- host->usage_count++;
- spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
-
- /*
- * Set the partition and block size structures for all volumes
- * on this controller to zero. We will reread all of this data
- */
- set_capacity(ida_gendisk[ctlr][0], 0);
- for (i = 1; i < NWD; i++) {
- struct gendisk *disk = ida_gendisk[ctlr][i];
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- }
- memset(host->drv, 0, sizeof(drv_info_t)*NWD);
-
- /*
- * Tell the array controller not to give us any interrupts while
- * we check the new geometry. Then turn interrupts back on when
- * we're done.
- */
- host->access.set_intr_mask(host, 0);
- getgeometry(ctlr);
- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
-
- for(i=0; i<NWD; i++) {
- struct gendisk *disk = ida_gendisk[ctlr][i];
- drv_info_t *drv = &host->drv[i];
- if (i && !drv->nr_blks)
- continue;
- blk_queue_logical_block_size(host->queue, drv->blk_size);
- set_capacity(disk, drv->nr_blks);
- disk->queue = host->queue;
- disk->private_data = drv;
- if (i)
- add_disk(disk);
- }
-
- host->usage_count--;
- return 0;
-}
-
-static int ida_revalidate(struct gendisk *disk)
-{
- drv_info_t *drv = disk->private_data;
- set_capacity(disk, drv->nr_blks);
- return 0;
-}
-
-/********************************************************************
- name: pollcomplete
- Wait polling for a command to complete.
- The memory mapped FIFO is polled for the completion.
- Used only at init time, interrupts disabled.
- ********************************************************************/
-static int pollcomplete(int ctlr)
-{
- int done;
- int i;
-
- /* Wait (up to 2 seconds) for a command to complete */
-
- for (i = 200000; i > 0; i--) {
- done = hba[ctlr]->access.command_completed(hba[ctlr]);
- if (done == 0) {
- udelay(10); /* a short fixed delay */
- } else
- return (done);
- }
- /* Invalid address to tell caller we ran out of time */
- return 1;
-}
-/*****************************************************************
- start_fwbk
- Starts controller firmwares background processing.
- Currently only the Integrated Raid controller needs this done.
- If the PCI mem address registers are written to after this,
- data corruption may occur
-*****************************************************************/
-static void start_fwbk(int ctlr)
-{
- id_ctlr_t *id_ctlr_buf;
- int ret_code;
-
- if( (hba[ctlr]->board_id != 0x40400E11)
- && (hba[ctlr]->board_id != 0x40480E11) )
-
- /* Not a Integrated Raid, so there is nothing for us to do */
- return;
- printk(KERN_DEBUG "cpqarray: Starting firmware's background"
- " processing\n");
- /* Command does not return anything, but idasend command needs a
- buffer */
- id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
- if(id_ctlr_buf==NULL)
- {
- printk(KERN_WARNING "cpqarray: Out of memory. "
- "Unable to start background processing.\n");
- return;
- }
- ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
- id_ctlr_buf, 0, 0, 0, 0);
- if(ret_code != IO_OK)
- printk(KERN_WARNING "cpqarray: Unable to start"
- " background processing\n");
-
- kfree(id_ctlr_buf);
-}
-/*****************************************************************
- getgeometry
- Get ida logical volume geometry from the controller
- This is a large bit of code which once existed in two flavors,
- It is used only at init time.
-*****************************************************************/
-static void getgeometry(int ctlr)
-{
- id_log_drv_t *id_ldrive;
- id_ctlr_t *id_ctlr_buf;
- sense_log_drv_stat_t *id_lstatus_buf;
- config_t *sense_config_buf;
- unsigned int log_unit, log_index;
- int ret_code, size;
- drv_info_t *drv;
- ctlr_info_t *info_p = hba[ctlr];
- int i;
-
- info_p->log_drv_map = 0;
-
- id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
- if (!id_ldrive) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_0;
- }
-
- id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
- if (!id_ctlr_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_1;
- }
-
- id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
- if (!id_lstatus_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_2;
- }
-
- sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
- if (!sense_config_buf) {
- printk( KERN_ERR "cpqarray: out of memory.\n");
- goto err_3;
- }
-
- info_p->phys_drives = 0;
- info_p->log_drv_map = 0;
- info_p->drv_assign_map = 0;
- info_p->drv_spare_map = 0;
- info_p->mp_failed_drv_map = 0; /* only initialized here */
- /* Get controllers info for this logical drive */
- ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
- if (ret_code == IO_ERROR) {
- /*
- * If can't get controller info, set the logical drive map to 0,
- * so the idastubopen will fail on all logical drives
- * on the controller.
- */
- printk(KERN_ERR "cpqarray: error sending ID controller\n");
- goto err_4;
- }
-
- info_p->log_drives = id_ctlr_buf->nr_drvs;
- for(i=0;i<4;i++)
- info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
- info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
-
- printk(" (%s)\n", info_p->product_name);
- /*
- * Initialize logical drive map to zero
- */
- log_index = 0;
- /*
- * Get drive geometry for all logical drives
- */
- if (id_ctlr_buf->nr_drvs > 16)
- printk(KERN_WARNING "cpqarray ida%d: This driver supports "
- "16 logical drives per controller.\n. "
- " Additional drives will not be "
- "detected\n", ctlr);
-
- for (log_unit = 0;
- (log_index < id_ctlr_buf->nr_drvs)
- && (log_unit < NWD);
- log_unit++) {
- size = sizeof(sense_log_drv_stat_t);
-
- /*
- Send "Identify logical drive status" cmd
- */
- ret_code = sendcmd(SENSE_LOG_DRV_STAT,
- ctlr, id_lstatus_buf, size, 0, 0, log_unit);
- if (ret_code == IO_ERROR) {
- /*
- If can't get logical drive status, set
- the logical drive map to 0, so the
- idastubopen will fail for all logical drives
- on the controller.
- */
- info_p->log_drv_map = 0;
- printk( KERN_WARNING
- "cpqarray ida%d: idaGetGeometry - Controller"
- " failed to report status of logical drive %d\n"
- "Access to this controller has been disabled\n",
- ctlr, log_unit);
- goto err_4;
- }
- /*
- Make sure the logical drive is configured
- */
- if (id_lstatus_buf->status != LOG_NOT_CONF) {
- ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
- sizeof(id_log_drv_t), 0, 0, log_unit);
- /*
- If error, the bit for this
- logical drive won't be set and
- idastubopen will return error.
- */
- if (ret_code != IO_ERROR) {
- drv = &info_p->drv[log_unit];
- drv->blk_size = id_ldrive->blk_size;
- drv->nr_blks = id_ldrive->nr_blks;
- drv->cylinders = id_ldrive->drv.cyl;
- drv->heads = id_ldrive->drv.heads;
- drv->sectors = id_ldrive->drv.sect_per_track;
- info_p->log_drv_map |= (1 << log_unit);
-
- printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
- ctlr, log_unit, drv->blk_size, drv->nr_blks);
- ret_code = sendcmd(SENSE_CONFIG,
- ctlr, sense_config_buf,
- sizeof(config_t), 0, 0, log_unit);
- if (ret_code == IO_ERROR) {
- info_p->log_drv_map = 0;
- printk(KERN_ERR "cpqarray: error sending sense config\n");
- goto err_4;
- }
-
- info_p->phys_drives =
- sense_config_buf->ctlr_phys_drv;
- info_p->drv_assign_map
- |= sense_config_buf->drv_asgn_map;
- info_p->drv_assign_map
- |= sense_config_buf->spare_asgn_map;
- info_p->drv_spare_map
- |= sense_config_buf->spare_asgn_map;
- } /* end of if no error on id_ldrive */
- log_index = log_index + 1;
- } /* end of if logical drive configured */
- } /* end of for log_unit */
-
- /* Free all the buffers and return */
-err_4:
- kfree(sense_config_buf);
-err_3:
- kfree(id_lstatus_buf);
-err_2:
- kfree(id_ctlr_buf);
-err_1:
- kfree(id_ldrive);
-err_0:
- return;
-}
-
-static void __exit cpqarray_exit(void)
-{
- int i;
-
- pci_unregister_driver(&cpqarray_pci_driver);
-
- /* Double check that all controller entries have been removed */
- for(i=0; i<MAX_CTLR; i++) {
- if (hba[i] != NULL) {
- printk(KERN_WARNING "cpqarray: Removing EISA "
- "controller %d\n", i);
- cpqarray_remove_one_eisa(i);
- }
- }
-
- remove_proc_entry("driver/cpqarray", NULL);
-}
-
-module_init(cpqarray_init)
-module_exit(cpqarray_exit)
diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
deleted file mode 100644
index be73e9d579c5..000000000000
--- a/drivers/block/cpqarray.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-#ifndef CPQARRAY_H
-#define CPQARRAY_H
-
-#ifdef __KERNEL__
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/timer.h>
-#endif
-
-#include "ida_cmd.h"
-
-#define IO_OK 0
-#define IO_ERROR 1
-#define NWD 16
-#define NWD_SHIFT 4
-
-#define IDA_TIMER (5*HZ)
-#define IDA_TIMEOUT (10*HZ)
-
-#define MISC_NONFATAL_WARN 0x01
-
-typedef struct {
- unsigned blk_size;
- unsigned nr_blks;
- unsigned cylinders;
- unsigned heads;
- unsigned sectors;
- int usage_count;
-} drv_info_t;
-
-#ifdef __KERNEL__
-
-struct ctlr_info;
-typedef struct ctlr_info ctlr_info_t;
-
-struct access_method {
- void (*submit_command)(ctlr_info_t *h, cmdlist_t *c);
- void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
- unsigned long (*fifo_full)(ctlr_info_t *h);
- unsigned long (*intr_pending)(ctlr_info_t *h);
- unsigned long (*command_completed)(ctlr_info_t *h);
-};
-
-struct board_type {
- __u32 board_id;
- char *product_name;
- struct access_method *access;
-};
-
-struct ctlr_info {
- int ctlr;
- char devname[8];
- __u32 log_drv_map;
- __u32 drv_assign_map;
- __u32 drv_spare_map;
- __u32 mp_failed_drv_map;
-
- char firm_rev[4];
- int ctlr_sig;
-
- int log_drives;
- int phys_drives;
-
- struct pci_dev *pci_dev; /* NULL if EISA */
- __u32 board_id;
- char *product_name;
-
- void __iomem *vaddr;
- unsigned long paddr;
- unsigned long io_mem_addr;
- unsigned long io_mem_length;
- int intr;
- int usage_count;
- drv_info_t drv[NWD];
- struct proc_dir_entry *proc;
-
- struct access_method access;
-
- cmdlist_t *reqQ;
- cmdlist_t *cmpQ;
- cmdlist_t *cmd_pool;
- dma_addr_t cmd_pool_dhandle;
- unsigned long *cmd_pool_bits;
- struct request_queue *queue;
- spinlock_t lock;
-
- unsigned int Qdepth;
- unsigned int maxQsinceinit;
-
- unsigned int nr_requests;
- unsigned int nr_allocs;
- unsigned int nr_frees;
- struct timer_list timer;
- unsigned int misc_tflags;
-};
-
-#define IDA_LOCK(i) (&hba[i]->lock)
-
-#endif
-
-#endif /* CPQARRAY_H */
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 99e773cb70d0..3d31761c0ed0 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -21,9 +21,9 @@
#include <linux/module.h>
+#include <crypto/skcipher.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <asm/uaccess.h>
@@ -46,7 +46,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_blkcipher *tfm;
+ struct crypto_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -82,12 +82,12 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
+ err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
if (err != 0)
goto out_free_tfm;
@@ -96,17 +96,14 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_blkcipher(tfm);
+ crypto_free_skcipher(tfm);
out:
return err;
}
-typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
- struct scatterlist *sg_out,
- struct scatterlist *sg_in,
- unsigned int nsg);
+typedef int (*encdec_cbc_t)(struct skcipher_request *req);
static int
cryptoloop_transfer(struct loop_device *lo, int cmd,
@@ -114,11 +111,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_blkcipher *tfm = lo->key_data;
- struct blkcipher_desc desc = {
- .tfm = tfm,
- .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
- };
+ struct crypto_skcipher *tfm = lo->key_data;
+ SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -127,6 +121,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ NULL, NULL);
+
sg_init_table(&sg_out, 1);
sg_init_table(&sg_in, 1);
@@ -135,13 +133,13 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
- encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
+ encdecfunc = crypto_skcipher_decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
- encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
+ encdecfunc = crypto_skcipher_encrypt;
}
while (size > 0) {
@@ -152,10 +150,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
sg_set_page(&sg_in, in_page, sz, in_offs);
sg_set_page(&sg_out, out_page, sz, out_offs);
- desc.info = iv;
- err = encdecfunc(&desc, &sg_out, &sg_in, sz);
+ skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
+ err = encdecfunc(req);
if (err)
- return err;
+ goto out;
IV++;
size -= sz;
@@ -163,7 +161,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
out_offs += sz;
}
- return 0;
+ err = 0;
+
+out:
+ skcipher_request_zero(req);
+ return err;
}
static int
@@ -175,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_blkcipher *tfm = lo->key_data;
+ struct crypto_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_blkcipher(tfm);
+ crypto_free_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 34bc84efc29e..7a1cf7eaa71d 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -26,13 +26,13 @@
#ifndef _DRBD_INT_H
#define _DRBD_INT_H
+#include <crypto/hash.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/slab.h>
-#include <linux/crypto.h>
#include <linux/ratelimit.h>
#include <linux/tcp.h>
#include <linux/mutex.h>
@@ -724,11 +724,11 @@ struct drbd_connection {
struct list_head transfer_log; /* all requests not yet fully processed */
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
- struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
- struct crypto_hash *csums_tfm;
- struct crypto_hash *verify_tfm;
+ struct crypto_shash *cram_hmac_tfm;
+ struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
+ struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_ahash *csums_tfm;
+ struct crypto_ahash *verify_tfm;
void *int_dig_in;
void *int_dig_vv;
@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
-/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
- * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
+/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
+ * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
@@ -1524,8 +1524,8 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
}
-extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 5b43dfb79819..fa209773d494 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1340,7 +1340,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
- data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
@@ -1629,7 +1629,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1718,7 +1718,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -2498,11 +2498,11 @@ void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
- crypto_free_hash(connection->csums_tfm);
- crypto_free_hash(connection->verify_tfm);
- crypto_free_hash(connection->cram_hmac_tfm);
- crypto_free_hash(connection->integrity_tfm);
- crypto_free_hash(connection->peer_integrity_tfm);
+ crypto_free_ahash(connection->csums_tfm);
+ crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->cram_hmac_tfm);
+ crypto_free_ahash(connection->integrity_tfm);
+ crypto_free_ahash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index c055c5e12f24..1fd1dccebb6b 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
+ blk_queue_segment_boundary(q, PAGE_SIZE-1);
if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection;
@@ -2160,19 +2160,34 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
}
struct crypto {
- struct crypto_hash *verify_tfm;
- struct crypto_hash *csums_tfm;
- struct crypto_hash *cram_hmac_tfm;
- struct crypto_hash *integrity_tfm;
+ struct crypto_ahash *verify_tfm;
+ struct crypto_ahash *csums_tfm;
+ struct crypto_shash *cram_hmac_tfm;
+ struct crypto_ahash *integrity_tfm;
};
static int
-alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
{
if (!tfm_name[0])
return NO_ERROR;
- *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+ *tfm = crypto_alloc_shash(tfm_name, 0, 0);
+ if (IS_ERR(*tfm)) {
+ *tfm = NULL;
+ return err_alg;
+ }
+
+ return NO_ERROR;
+}
+
+static int
+alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
+{
+ if (!tfm_name[0])
+ return NO_ERROR;
+
+ *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*tfm)) {
*tfm = NULL;
return err_alg;
@@ -2187,24 +2202,24 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
- ERR_CSUMS_ALG);
+ rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+ ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
- ERR_VERIFY_ALG);
+ rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+ ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
- ERR_INTEGRITY_ALG);
+ rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+ ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
if (new_net_conf->cram_hmac_alg[0] != 0) {
snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
new_net_conf->cram_hmac_alg);
- rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
- ERR_AUTH_ALG);
+ rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
+ ERR_AUTH_ALG);
}
return rv;
@@ -2212,10 +2227,10 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
static void free_crypto(struct crypto *crypto)
{
- crypto_free_hash(crypto->cram_hmac_tfm);
- crypto_free_hash(crypto->integrity_tfm);
- crypto_free_hash(crypto->csums_tfm);
- crypto_free_hash(crypto->verify_tfm);
+ crypto_free_shash(crypto->cram_hmac_tfm);
+ crypto_free_ahash(crypto->integrity_tfm);
+ crypto_free_ahash(crypto->csums_tfm);
+ crypto_free_ahash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2292,23 +2307,23 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_hash(connection->csums_tfm);
+ crypto_free_ahash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_hash(connection->verify_tfm);
+ crypto_free_ahash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_hash(connection->integrity_tfm);
+ crypto_free_ahash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */
__drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
- crypto_free_hash(connection->cram_hmac_tfm);
+ crypto_free_shash(connection->cram_hmac_tfm);
connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
mutex_unlock(&connection->resource->conf_update);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1957fe8601dc..050aaa1c0350 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1627,7 +1627,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
@@ -1741,7 +1741,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
@@ -3321,7 +3321,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
- struct crypto_hash *peer_integrity_tfm = NULL;
+ struct crypto_ahash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
@@ -3402,14 +3402,14 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
- peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (!peer_integrity_tfm) {
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
integrity_alg);
goto disconnect;
}
- hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+ hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
@@ -3439,7 +3439,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- crypto_free_hash(connection->peer_integrity_tfm);
+ crypto_free_ahash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3457,7 +3457,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
- crypto_free_hash(peer_integrity_tfm);
+ crypto_free_ahash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3469,15 +3469,15 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
const char *alg, const char *name)
{
- struct crypto_hash *tfm;
+ struct crypto_ahash *tfm;
if (!alg[0])
return NULL;
- tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
@@ -3530,8 +3530,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
- struct crypto_hash *verify_tfm = NULL;
- struct crypto_hash *csums_tfm = NULL;
+ struct crypto_ahash *verify_tfm = NULL;
+ struct crypto_ahash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
@@ -3678,14 +3678,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_hash(peer_device->connection->verify_tfm);
+ crypto_free_ahash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_hash(peer_device->connection->csums_tfm);
+ crypto_free_ahash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
@@ -3729,9 +3729,9 @@ disconnect:
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
- crypto_free_hash(csums_tfm);
+ crypto_free_ahash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
- crypto_free_hash(verify_tfm);
+ crypto_free_ahash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
@@ -4925,14 +4925,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
{
struct drbd_socket *sock;
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
- struct scatterlist sg;
char *response = NULL;
char *right_response = NULL;
char *peers_ch = NULL;
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- struct hash_desc desc;
+ SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -4945,12 +4944,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
- desc.tfm = connection->cram_hmac_tfm;
- desc.flags = 0;
+ desc->tfm = connection->cram_hmac_tfm;
+ desc->flags = 0;
- rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+ rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
- drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
+ drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
rv = -1;
goto fail;
}
@@ -5011,7 +5010,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
+ resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
drbd_err(connection, "kmalloc of response failed\n");
@@ -5019,10 +5018,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- sg_init_table(&sg, 1);
- sg_set_buf(&sg, peers_ch, pi.size);
-
- rv = crypto_hash_digest(&desc, &sg, sg.length, response);
+ rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
@@ -5070,9 +5066,8 @@ static int drbd_do_auth(struct drbd_connection *connection)
goto fail;
}
- sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
-
- rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
+ rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
+ right_response);
if (rv) {
drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
rv = -1;
@@ -5091,6 +5086,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
+ shash_desc_zero(desc);
return rv;
}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index eff716c27b1f..4d87499f0d54 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -274,51 +274,56 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
- desc.tfm = tfm;
- desc.flags = 0;
+ ahash_request_set_tfm(req, tfm);
+ ahash_request_set_callback(req, 0, NULL, NULL);
sg_init_table(&sg, 1);
- crypto_hash_init(&desc);
+ crypto_ahash_init(req);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
sg_set_page(&sg, page, PAGE_SIZE, 0);
- crypto_hash_update(&desc, &sg, sg.length);
+ ahash_request_set_crypt(req, &sg, NULL, sg.length);
+ crypto_ahash_update(req);
page = tmp;
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
- crypto_hash_update(&desc, &sg, sg.length);
- crypto_hash_final(&desc, digest);
+ ahash_request_set_crypt(req, &sg, digest, sg.length);
+ crypto_ahash_finup(req);
+ ahash_request_zero(req);
}
-void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
struct bio_vec bvec;
struct bvec_iter iter;
- desc.tfm = tfm;
- desc.flags = 0;
+ ahash_request_set_tfm(req, tfm);
+ ahash_request_set_callback(req, 0, NULL, NULL);
sg_init_table(&sg, 1);
- crypto_hash_init(&desc);
+ crypto_ahash_init(req);
bio_for_each_segment(bvec, bio, iter) {
sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- crypto_hash_update(&desc, &sg, sg.length);
+ ahash_request_set_crypt(req, &sg, NULL, sg.length);
+ crypto_ahash_update(req);
}
- crypto_hash_final(&desc, digest);
+ ahash_request_set_crypt(req, NULL, digest, 0);
+ crypto_ahash_final(req);
+ ahash_request_zero(req);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -337,7 +342,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1113,7 +1118,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
- digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1163,7 +1168,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1235,7 +1240,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
diff --git a/drivers/block/ida_cmd.h b/drivers/block/ida_cmd.h
deleted file mode 100644
index 98b5746b3089..000000000000
--- a/drivers/block/ida_cmd.h
+++ /dev/null
@@ -1,349 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifndef ARRAYCMD_H
-#define ARRAYCMD_H
-
-#include <asm/types.h>
-#if 0
-#include <linux/blkdev.h>
-#endif
-
-/* for the Smart Array 42XX cards */
-#define S42XX_REQUEST_PORT_OFFSET 0x40
-#define S42XX_REPLY_INTR_MASK_OFFSET 0x34
-#define S42XX_REPLY_PORT_OFFSET 0x44
-#define S42XX_INTR_STATUS 0x30
-
-#define S42XX_INTR_OFF 0x08
-#define S42XX_INTR_PENDING 0x08
-
-#define COMMAND_FIFO 0x04
-#define COMMAND_COMPLETE_FIFO 0x08
-#define INTR_MASK 0x0C
-#define INTR_STATUS 0x10
-#define INTR_PENDING 0x14
-
-#define FIFO_NOT_EMPTY 0x01
-#define FIFO_NOT_FULL 0x02
-
-#define BIG_PROBLEM 0x40
-#define LOG_NOT_CONF 2
-
-#pragma pack(1)
-typedef struct {
- __u32 size;
- __u32 addr;
-} sg_t;
-
-#define RCODE_NONFATAL 0x02
-#define RCODE_FATAL 0x04
-#define RCODE_INVREQ 0x10
-typedef struct {
- __u16 next;
- __u8 cmd;
- __u8 rcode;
- __u32 blk;
- __u16 blk_cnt;
- __u8 sg_cnt;
- __u8 reserved;
-} rhdr_t;
-
-#define SG_MAX 32
-typedef struct {
- rhdr_t hdr;
- sg_t sg[SG_MAX];
- __u32 bp;
-} rblk_t;
-
-typedef struct {
- __u8 unit;
- __u8 prio;
- __u16 size;
-} chdr_t;
-
-#define CMD_RWREQ 0x00
-#define CMD_IOCTL_PEND 0x01
-#define CMD_IOCTL_DONE 0x02
-
-typedef struct cmdlist {
- chdr_t hdr;
- rblk_t req;
- __u32 size;
- int retry_cnt;
- __u32 busaddr;
- int ctlr;
- struct cmdlist *prev;
- struct cmdlist *next;
- struct request *rq;
- int type;
-} cmdlist_t;
-
-#define ID_CTLR 0x11
-typedef struct {
- __u8 nr_drvs;
- __u32 cfg_sig;
- __u8 firm_rev[4];
- __u8 rom_rev[4];
- __u8 hw_rev;
- __u32 bb_rev;
- __u32 drv_present_map;
- __u32 ext_drv_map;
- __u32 board_id;
- __u8 cfg_error;
- __u32 non_disk_bits;
- __u8 bad_ram_addr;
- __u8 cpu_rev;
- __u8 pdpi_rev;
- __u8 epic_rev;
- __u8 wcxc_rev;
- __u8 marketing_rev;
- __u8 ctlr_flags;
- __u8 host_flags;
- __u8 expand_dis;
- __u8 scsi_chips;
- __u32 max_req_blocks;
- __u32 ctlr_clock;
- __u8 drvs_per_bus;
- __u16 big_drv_present_map[8];
- __u16 big_ext_drv_map[8];
- __u16 big_non_disk_map[8];
- __u16 task_flags;
- __u8 icl_bus;
- __u8 red_modes;
- __u8 cur_red_mode;
- __u8 red_ctlr_stat;
- __u8 red_fail_reason;
- __u8 reserved[403];
-} id_ctlr_t;
-
-typedef struct {
- __u16 cyl;
- __u8 heads;
- __u8 xsig;
- __u8 psectors;
- __u16 wpre;
- __u8 maxecc;
- __u8 drv_ctrl;
- __u16 pcyls;
- __u8 pheads;
- __u16 landz;
- __u8 sect_per_track;
- __u8 cksum;
-} drv_param_t;
-
-#define ID_LOG_DRV 0x10
-typedef struct {
- __u16 blk_size;
- __u32 nr_blks;
- drv_param_t drv;
- __u8 fault_tol;
- __u8 reserved;
- __u8 bios_disable;
-} id_log_drv_t;
-
-#define ID_LOG_DRV_EXT 0x18
-typedef struct {
- __u32 log_drv_id;
- __u8 log_drv_label[64];
- __u8 reserved[418];
-} id_log_drv_ext_t;
-
-#define SENSE_LOG_DRV_STAT 0x12
-typedef struct {
- __u8 status;
- __u32 fail_map;
- __u16 read_err[32];
- __u16 write_err[32];
- __u8 drv_err_data[256];
- __u8 drq_timeout[32];
- __u32 blks_to_recover;
- __u8 drv_recovering;
- __u16 remap_cnt[32];
- __u32 replace_drv_map;
- __u32 act_spare_map;
- __u8 spare_stat;
- __u8 spare_repl_map[32];
- __u32 repl_ok_map;
- __u8 media_exch;
- __u8 cache_fail;
- __u8 expn_fail;
- __u8 unit_flags;
- __u16 big_fail_map[8];
- __u16 big_remap_map[128];
- __u16 big_repl_map[8];
- __u16 big_act_spare_map[8];
- __u8 big_spar_repl_map[128];
- __u16 big_repl_ok_map[8];
- __u8 big_drv_rebuild;
- __u8 reserved[36];
-} sense_log_drv_stat_t;
-
-#define START_RECOVER 0x13
-
-#define ID_PHYS_DRV 0x15
-typedef struct {
- __u8 scsi_bus;
- __u8 scsi_id;
- __u16 blk_size;
- __u32 nr_blks;
- __u32 rsvd_blks;
- __u8 drv_model[40];
- __u8 drv_sn[40];
- __u8 drv_fw[8];
- __u8 scsi_iq_bits;
- __u8 compaq_drv_stmp;
- __u8 last_fail;
- __u8 phys_drv_flags;
- __u8 phys_drv_flags1;
- __u8 scsi_lun;
- __u8 phys_drv_flags2;
- __u8 reserved;
- __u32 spi_speed_rules;
- __u8 phys_connector[2];
- __u8 phys_box_on_bus;
- __u8 phys_bay_in_box;
-} id_phys_drv_t;
-
-#define BLINK_DRV_LEDS 0x16
-typedef struct {
- __u32 blink_duration;
- __u32 reserved;
- __u8 blink[256];
- __u8 reserved1[248];
-} blink_drv_leds_t;
-
-#define SENSE_BLINK_LEDS 0x17
-typedef struct {
- __u32 blink_duration;
- __u32 btime_elap;
- __u8 blink[256];
- __u8 reserved1[248];
-} sense_blink_leds_t;
-
-#define IDA_READ 0x20
-#define IDA_WRITE 0x30
-#define IDA_WRITE_MEDIA 0x31
-#define RESET_TO_DIAG 0x40
-#define DIAG_PASS_THRU 0x41
-
-#define SENSE_CONFIG 0x50
-#define SET_CONFIG 0x51
-typedef struct {
- __u32 cfg_sig;
- __u16 compat_port;
- __u8 data_dist_mode;
- __u8 surf_an_ctrl;
- __u16 ctlr_phys_drv;
- __u16 log_unit_phys_drv;
- __u16 fault_tol_mode;
- __u8 phys_drv_param[16];
- drv_param_t drv;
- __u32 drv_asgn_map;
- __u16 dist_factor;
- __u32 spare_asgn_map;
- __u8 reserved[6];
- __u16 os;
- __u8 ctlr_order;
- __u8 extra_info;
- __u32 data_offs;
- __u8 parity_backedout_write_drvs;
- __u8 parity_dist_mode;
- __u8 parity_shift_fact;
- __u8 bios_disable_flag;
- __u32 blks_on_vol;
- __u32 blks_per_drv;
- __u8 scratch[16];
- __u16 big_drv_map[8];
- __u16 big_spare_map[8];
- __u8 ss_source_vol;
- __u8 mix_drv_cap_range;
- struct {
- __u16 big_drv_map[8];
- __u32 blks_per_drv;
- __u16 fault_tol_mode;
- __u16 dist_factor;
- } MDC_range[4];
- __u8 reserved1[248];
-} config_t;
-
-#define BYPASS_VOL_STATE 0x52
-#define SS_CREATE_VOL 0x53
-#define CHANGE_CONFIG 0x54
-#define SENSE_ORIG_CONF 0x55
-#define REORDER_LOG_DRV 0x56
-typedef struct {
- __u8 old_units[32];
-} reorder_log_drv_t;
-
-#define LABEL_LOG_DRV 0x57
-typedef struct {
- __u8 log_drv_label[64];
-} label_log_drv_t;
-
-#define SS_TO_VOL 0x58
-
-#define SET_SURF_DELAY 0x60
-typedef struct {
- __u16 delay;
- __u8 reserved[510];
-} surf_delay_t;
-
-#define SET_OVERHEAT_DELAY 0x61
-typedef struct {
- __u16 delay;
-} overhead_delay_t;
-
-#define SET_MP_DELAY
-typedef struct {
- __u16 delay;
- __u8 reserved[510];
-} mp_delay_t;
-
-#define PASSTHRU_A 0x91
-typedef struct {
- __u8 target;
- __u8 bus;
- __u8 lun;
- __u32 timeout;
- __u32 flags;
- __u8 status;
- __u8 error;
- __u8 cdb_len;
- __u8 sense_error;
- __u8 sense_key;
- __u32 sense_info;
- __u8 sense_code;
- __u8 sense_qual;
- __u32 residual;
- __u8 reserved[4];
- __u8 cdb[12];
-} scsi_param_t;
-
-#define RESUME_BACKGROUND_ACTIVITY 0x99
-#define SENSE_CONTROLLER_PERFORMANCE 0xa8
-#define FLUSH_CACHE 0xc2
-#define COLLECT_BUFFER 0xd2
-#define READ_FLASH_ROM 0xf6
-#define WRITE_FLASH_ROM 0xf7
-#pragma pack()
-
-#endif /* ARRAYCMD_H */
diff --git a/drivers/block/ida_ioctl.h b/drivers/block/ida_ioctl.h
deleted file mode 100644
index 888fff9caed0..000000000000
--- a/drivers/block/ida_ioctl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifndef IDA_IOCTL_H
-#define IDA_IOCTL_H
-
-#include "ida_cmd.h"
-#include "cpqarray.h"
-
-#define IDAGETDRVINFO 0x27272828
-#define IDAPASSTHRU 0x28282929
-#define IDAGETCTLRSIG 0x29293030
-#define IDAREVALIDATEVOLS 0x30303131
-#define IDADRIVERVERSION 0x31313232
-#define IDAGETPCIINFO 0x32323333
-
-typedef struct _ida_pci_info_struct
-{
- unsigned char bus;
- unsigned char dev_fn;
- __u32 board_id;
-} ida_pci_info_struct;
-/*
- * Normally, the ioctl determines the logical unit for this command by
- * the major,minor number of the fd passed to ioctl. If you need to send
- * a command to a different/nonexistant unit (such as during config), you
- * can override the normal behavior by setting the unit valid bit. (Normally,
- * it should be zero) The controller the command is sent to is still
- * determined by the major number of the open device.
- */
-
-#define UNITVALID 0x80
-typedef struct {
- __u8 cmd;
- __u8 rcode;
- __u8 unit;
- __u32 blk;
- __u16 blk_cnt;
-
-/* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */
- struct {
- void __user *addr;
- size_t size;
- } sg[SG_MAX];
- int sg_cnt;
-
- union ctlr_cmds {
- drv_info_t drv;
- unsigned char buf[1024];
-
- id_ctlr_t id_ctlr;
- drv_param_t drv_param;
- id_log_drv_t id_log_drv;
- id_log_drv_ext_t id_log_drv_ext;
- sense_log_drv_stat_t sense_log_drv_stat;
- id_phys_drv_t id_phys_drv;
- blink_drv_leds_t blink_drv_leds;
- sense_blink_leds_t sense_blink_leds;
- config_t config;
- reorder_log_drv_t reorder_log_drv;
- label_log_drv_t label_log_drv;
- surf_delay_t surf_delay;
- overhead_delay_t overhead_delay;
- mp_delay_t mp_delay;
- scsi_param_t scsi_param;
- } c;
-} ida_ioctl_t;
-
-#endif /* IDA_IOCTL_H */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 423f4ca7d712..80cf8add46ff 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq));
+ /*
+ * This bio may be started from the middle of the 'bvec'
+ * because of bio splitting, so offset from the bvec must
+ * be passed to iov iterator
+ */
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 9b180dbbd03c..25824c1697c5 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -173,7 +173,13 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
+ if (mtip_check_surprise_removal(dd->pdev))
+ return NULL;
+
rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(rq))
+ return NULL;
+
return blk_mq_rq_to_pdu(rq);
}
@@ -233,15 +239,9 @@ static void mtip_async_complete(struct mtip_port *port,
"Command tag %d failed due to TFE\n", tag);
}
- /* Unmap the DMA scatter list entries */
- dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
-
rq = mtip_rq_from_tag(dd, tag);
- if (unlikely(cmd->unaligned))
- up(&port->cmd_slot_unal);
-
- blk_mq_end_request(rq, status ? -EIO : 0);
+ blk_mq_complete_request(rq, status);
}
/*
@@ -581,6 +581,8 @@ static void mtip_completion(struct mtip_port *port,
dev_warn(&port->dd->pdev->dev,
"Internal command %d completed with TFE\n", tag);
+ command->comp_func = NULL;
+ command->comp_data = NULL;
complete(waiting);
}
@@ -618,8 +620,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
port = dd->port;
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
@@ -628,7 +628,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
cmd->comp_func(port, MTIP_TAG_INTERNAL,
cmd, PORT_IRQ_TF_ERR);
}
- goto handle_tfe_exit;
+ return;
}
/* clear the tag accumulator */
@@ -701,7 +701,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
fail_reason = "thermal shutdown";
}
if (buf[288] == 0xBF) {
- set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed. Secure erase required.\n");
fail_all_ncq_cmds = 1;
@@ -771,11 +771,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
-
-handle_tfe_exit:
- /* clear eh_active */
- clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
}
/*
@@ -1007,6 +1002,7 @@ static bool mtip_pause_ncq(struct mtip_port *port,
(fis->features == 0x27 || fis->features == 0x72 ||
fis->features == 0x62 || fis->features == 0x26))) {
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
/* Com reset after secure erase or lowlevel format */
mtip_restart_port(port);
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
@@ -1021,12 +1017,14 @@ static bool mtip_pause_ncq(struct mtip_port *port,
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
+ * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
+ gfp_t atomic)
{
unsigned long to;
unsigned int n;
@@ -1037,16 +1035,21 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
+ atomic == GFP_KERNEL) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- msleep(100);
+ if (atomic == GFP_KERNEL)
+ msleep(100);
+ else {
+ cpu_relax();
+ udelay(100);
+ }
+
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
- goto err_fault;
/*
* Ignore s_active bit 0 of array element 0.
@@ -1099,6 +1102,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
int rv = 0;
+ unsigned long start;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1107,6 +1111,10 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
int_cmd = mtip_get_int_command(dd);
+ if (!int_cmd) {
+ dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
+ return -EFAULT;
+ }
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1119,7 +1127,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
dev_warn(&dd->pdev->dev,
"Failed to quiesce IO\n");
mtip_put_int_command(dd, int_cmd);
@@ -1162,6 +1170,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Populate the command header */
int_cmd->command_header->byte_count = 0;
+ start = jiffies;
+
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
@@ -1170,10 +1180,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
if ((rv = wait_for_completion_interruptible_timeout(
&wait,
msecs_to_jiffies(timeout))) <= 0) {
+
if (rv == -ERESTARTSYS) { /* interrupted */
dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
rv = -EINTR;
goto exec_ic_exit;
} else if (rv == 0) /* timeout */
@@ -2039,7 +2051,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
outbuf,
taskout,
DMA_TO_DEVICE);
- if (outbuf_dma == 0) {
+ if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2056,7 +2068,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
inbuf_dma = pci_map_single(dd->pdev,
inbuf,
taskin, DMA_FROM_DEVICE);
- if (inbuf_dma == 0) {
+ if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2890,6 +2902,42 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
return -EFAULT;
}
+static void mtip_softirq_done_fn(struct request *rq)
+{
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct driver_data *dd = rq->q->queuedata;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
+ cmd->direction);
+
+ if (unlikely(cmd->unaligned))
+ up(&dd->port->cmd_slot_unal);
+
+ blk_mq_end_request(rq, rq->errors);
+}
+
+static void mtip_abort_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
+
+ clear_bit(req->tag, dd->port->cmds_to_issue);
+ req->errors = -EIO;
+ mtip_softirq_done_fn(req);
+}
+
+static void mtip_queue_cmd(struct request *req, void *data,
+ bool reserved)
+{
+ struct driver_data *dd = data;
+
+ set_bit(req->tag, dd->port->cmds_to_issue);
+ blk_abort_request(req);
+}
+
/*
* service thread to issue queued commands
*
@@ -2902,7 +2950,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
static int mtip_service_thread(void *data)
{
struct driver_data *dd = (struct driver_data *)data;
- unsigned long slot, slot_start, slot_wrap;
+ unsigned long slot, slot_start, slot_wrap, to;
unsigned int num_cmd_slots = dd->slot_groups * 32;
struct mtip_port *port = dd->port;
@@ -2917,9 +2965,7 @@ static int mtip_service_thread(void *data)
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !(port->flags & MTIP_PF_PAUSE_IO));
-
- set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ (port->flags & MTIP_PF_SVC_THD_WORK));
if (kthread_should_stop() ||
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
@@ -2929,6 +2975,8 @@ static int mtip_service_thread(void *data)
&dd->dd_flag)))
goto st_out;
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+
restart_eh:
/* Demux bits: start with error handling */
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
@@ -2939,6 +2987,32 @@ restart_eh:
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
goto restart_eh;
+ if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
+ to = jiffies + msecs_to_jiffies(5000);
+
+ do {
+ mdelay(100);
+ } while (atomic_read(&dd->irq_workers_active) != 0 &&
+ time_before(jiffies, to));
+
+ if (atomic_read(&dd->irq_workers_active) != 0)
+ dev_warn(&dd->pdev->dev,
+ "Completion workers still active!");
+
+ spin_lock(dd->queue->queue_lock);
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_queue_cmd, dd);
+ spin_unlock(dd->queue->queue_lock);
+
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
+
+ if (mtip_device_reset(dd))
+ blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ mtip_abort_cmd, dd);
+
+ clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
+ }
+
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
@@ -2971,10 +3045,8 @@ restart_eh:
}
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
- if (mtip_ftl_rebuild_poll(dd) < 0)
- set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
- &dd->dd_flag);
- clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ if (mtip_ftl_rebuild_poll(dd) == 0)
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
}
@@ -3089,7 +3161,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
- /* TODO */
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
}
}
@@ -3263,20 +3335,25 @@ out1:
return rv;
}
-static void mtip_standby_drive(struct driver_data *dd)
+static int mtip_standby_drive(struct driver_data *dd)
{
- if (dd->sr)
- return;
+ int rv = 0;
+ if (dd->sr || !dd->port)
+ return -ENODEV;
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
- !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
- if (mtip_standby_immediate(dd->port))
+ !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
+ !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
+ rv = mtip_standby_immediate(dd->port);
+ if (rv)
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
+ }
+ return rv;
}
/*
@@ -3289,10 +3366,6 @@ static void mtip_standby_drive(struct driver_data *dd)
*/
static int mtip_hw_exit(struct driver_data *dd)
{
- /*
- * Send standby immediate (E0h) to the drive so that it
- * saves its state.
- */
if (!dd->sr) {
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -3334,8 +3407,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (!dd->sr && dd->port)
- mtip_standby_immediate(dd->port);
+ mtip_standby_drive(dd);
return 0;
}
@@ -3358,7 +3430,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
- if (mtip_standby_immediate(dd->port) != 0) {
+ if (mtip_standby_drive(dd) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
@@ -3596,6 +3668,28 @@ static int mtip_block_getgeo(struct block_device *dev,
return 0;
}
+static int mtip_block_open(struct block_device *dev, fmode_t mode)
+{
+ struct driver_data *dd;
+
+ if (dev && dev->bd_disk) {
+ dd = (struct driver_data *) dev->bd_disk->private_data;
+
+ if (dd) {
+ if (test_bit(MTIP_DDF_REMOVAL_BIT,
+ &dd->dd_flag)) {
+ return -ENODEV;
+ }
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+void mtip_block_release(struct gendisk *disk, fmode_t mode)
+{
+}
+
/*
* Block device operation function.
*
@@ -3603,6 +3697,8 @@ static int mtip_block_getgeo(struct block_device *dev,
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
+ .open = mtip_block_open,
+ .release = mtip_block_release,
.ioctl = mtip_block_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = mtip_block_compat_ioctl,
@@ -3664,10 +3760,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
rq_data_dir(rq))) {
return -ENODATA;
}
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
+ test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
return -ENODATA;
- if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
- return -ENXIO;
}
if (rq->cmd_flags & REQ_DISCARD) {
@@ -3779,11 +3874,32 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
return 0;
}
+static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
+ bool reserved)
+{
+ struct driver_data *dd = req->q->queuedata;
+
+ if (reserved)
+ goto exit_handler;
+
+ if (test_bit(req->tag, dd->port->cmds_to_issue))
+ goto exit_handler;
+
+ if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
+ goto exit_handler;
+
+ wake_up_interruptible(&dd->port->svc_wait);
+exit_handler:
+ return BLK_EH_RESET_TIMER;
+}
+
static struct blk_mq_ops mtip_mq_ops = {
.queue_rq = mtip_queue_rq,
.map_queue = blk_mq_map_queue,
.init_request = mtip_init_cmd,
.exit_request = mtip_free_cmd,
+ .complete = mtip_softirq_done_fn,
+ .timeout = mtip_cmd_timeout,
};
/*
@@ -3850,7 +3966,6 @@ static int mtip_block_initialize(struct driver_data *dd)
mtip_hw_debugfs_init(dd);
-skip_create_disk:
memset(&dd->tags, 0, sizeof(dd->tags));
dd->tags.ops = &mtip_mq_ops;
dd->tags.nr_hw_queues = 1;
@@ -3860,12 +3975,13 @@ skip_create_disk:
dd->tags.numa_node = dd->numa_node;
dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
+ dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
rv = blk_mq_alloc_tag_set(&dd->tags);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
- goto block_queue_alloc_init_error;
+ goto block_queue_alloc_tag_error;
}
/* Allocate the request queue. */
@@ -3880,6 +3996,7 @@ skip_create_disk:
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
+skip_create_disk:
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_get_identify(dd);
if (wait_for_rebuild < 0) {
@@ -3976,8 +4093,9 @@ kthread_run_error:
read_capacity_error:
init_hw_cmds_error:
blk_cleanup_queue(dd->queue);
- blk_mq_free_tag_set(&dd->tags);
block_queue_alloc_init_error:
+ blk_mq_free_tag_set(&dd->tags);
+block_queue_alloc_tag_error:
mtip_hw_debugfs_exit(dd);
disk_index_error:
spin_lock(&rssd_index_lock);
@@ -3994,6 +4112,22 @@ protocol_init_error:
return rv;
}
+static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ struct mtip_cmd *cmd;
+
+ if (likely(!reserv))
+ blk_mq_complete_request(rq, -ENODEV);
+ else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+
+ cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
+ if (cmd->comp_func)
+ cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
+ cmd, -ENODEV);
+ }
+}
+
/*
* Block layer deinitialization function.
*
@@ -4025,12 +4159,23 @@ static int mtip_block_remove(struct driver_data *dd)
}
}
- if (!dd->sr)
- mtip_standby_drive(dd);
+ if (!dd->sr) {
+ /*
+ * Explicitly wait here for IOs to quiesce,
+ * as mtip_standby_drive usually won't wait for IOs.
+ */
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
+ GFP_KERNEL))
+ mtip_standby_drive(dd);
+ }
else
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
dd->disk->disk_name);
+ blk_mq_freeze_queue_start(dd->queue);
+ blk_mq_stop_hw_queues(dd->queue);
+ blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+
/*
* Delete our gendisk structure. This also removes the device
* from /dev
@@ -4040,7 +4185,8 @@ static int mtip_block_remove(struct driver_data *dd)
dd->bdev = NULL;
}
if (dd->disk) {
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4081,7 +4227,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
- del_gendisk(dd->disk);
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
+ del_gendisk(dd->disk);
if (dd->disk->queue) {
blk_cleanup_queue(dd->queue);
blk_mq_free_tag_set(&dd->tags);
@@ -4426,7 +4573,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
struct driver_data *dd = pci_get_drvdata(pdev);
unsigned long flags, to;
- set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+ set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
spin_lock_irqsave(&dev_lock, flags);
list_del_init(&dd->online_list);
@@ -4443,12 +4590,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
} while (atomic_read(&dd->irq_workers_active) != 0 &&
time_before(jiffies, to));
+ if (!dd->sr)
+ fsync_bdev(dd->bdev);
+
if (atomic_read(&dd->irq_workers_active) != 0) {
dev_warn(&dd->pdev->dev,
"Completion workers still active!\n");
}
- blk_mq_stop_hw_queues(dd->queue);
+ blk_set_queue_dying(dd->queue);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
/* Clean up the block layer. */
mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 3274784008eb..7617888f7944 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -134,16 +134,24 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_TO_ACTIVE_BIT = 9, /* timeout handling */
MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
(1 << MTIP_PF_EH_ACTIVE_BIT) |
(1 << MTIP_PF_SE_ACTIVE_BIT) |
- (1 << MTIP_PF_DM_ACTIVE_BIT)),
+ (1 << MTIP_PF_DM_ACTIVE_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
MTIP_PF_ISSUE_CMDS_BIT = 5,
MTIP_PF_REBUILD_BIT = 6,
MTIP_PF_SVC_THD_STOP_BIT = 8,
+ MTIP_PF_SVC_THD_WORK = ((1 << MTIP_PF_EH_ACTIVE_BIT) |
+ (1 << MTIP_PF_ISSUE_CMDS_BIT) |
+ (1 << MTIP_PF_REBUILD_BIT) |
+ (1 << MTIP_PF_SVC_THD_STOP_BIT) |
+ (1 << MTIP_PF_TO_ACTIVE_BIT)),
+
/* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
@@ -153,6 +161,7 @@ enum {
MTIP_DDF_RESUME_BIT = 6,
MTIP_DDF_INIT_DONE_BIT = 7,
MTIP_DDF_REBUILD_FAILED_BIT = 8,
+ MTIP_DDF_REMOVAL_BIT = 9,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
(1 << MTIP_DDF_SEC_LOCK_BIT) |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e4c5cc107934..08afbc7a2bb8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -57,10 +57,12 @@ struct nbd_device {
int blksize;
loff_t bytesize;
int xmit_timeout;
+ bool timedout;
bool disconnect; /* a disconnect has been requested by user */
struct timer_list timeout_timer;
- spinlock_t tasks_lock;
+ /* protects initialization and shutdown of the socket */
+ spinlock_t sock_lock;
struct task_struct *task_recv;
struct task_struct *task_send;
@@ -98,6 +100,11 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
return disk_to_dev(nbd->disk);
}
+static bool nbd_is_connected(struct nbd_device *nbd)
+{
+ return !!nbd->task_recv;
+}
+
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@@ -110,6 +117,42 @@ static const char *nbdcmd_to_ascii(int cmd)
return "invalid";
}
+static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
+{
+ bdev->bd_inode->i_size = 0;
+ set_capacity(nbd->disk, 0);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
+{
+ if (!nbd_is_connected(nbd))
+ return;
+
+ bdev->bd_inode->i_size = nbd->bytesize;
+ set_capacity(nbd->disk, nbd->bytesize >> 9);
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+}
+
+static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
+ int blocksize, int nr_blocks)
+{
+ int ret;
+
+ ret = set_blocksize(bdev, blocksize);
+ if (ret)
+ return ret;
+
+ nbd->blksize = blocksize;
+ nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+
+ nbd_size_update(nbd, bdev);
+
+ return 0;
+}
+
static void nbd_end_request(struct nbd_device *nbd, struct request *req)
{
int error = req->errors ? -EIO : 0;
@@ -129,13 +172,20 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req)
*/
static void sock_shutdown(struct nbd_device *nbd)
{
- if (!nbd->sock)
+ spin_lock_irq(&nbd->sock_lock);
+
+ if (!nbd->sock) {
+ spin_unlock_irq(&nbd->sock_lock);
return;
+ }
dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+ sockfd_put(nbd->sock);
nbd->sock = NULL;
- del_timer_sync(&nbd->timeout_timer);
+ spin_unlock_irq(&nbd->sock_lock);
+
+ del_timer(&nbd->timeout_timer);
}
static void nbd_xmit_timeout(unsigned long arg)
@@ -146,19 +196,16 @@ static void nbd_xmit_timeout(unsigned long arg)
if (list_empty(&nbd->queue_head))
return;
- nbd->disconnect = true;
+ spin_lock_irqsave(&nbd->sock_lock, flags);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
+ nbd->timedout = true;
- if (nbd->task_recv)
- force_sig(SIGKILL, nbd->task_recv);
-
- if (nbd->task_send)
- force_sig(SIGKILL, nbd->task_send);
+ if (nbd->sock)
+ kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
+ spin_unlock_irqrestore(&nbd->sock_lock, flags);
- dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
+ dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
}
/*
@@ -171,7 +218,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
int result;
struct msghdr msg;
struct kvec iov;
- sigset_t blocked, oldset;
unsigned long pflags = current->flags;
if (unlikely(!sock)) {
@@ -181,11 +227,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
return -EINVAL;
}
- /* Allow interception of SIGKILL only
- * Don't allow other signals to interrupt the transmission */
- siginitsetinv(&blocked, sigmask(SIGKILL));
- sigprocmask(SIG_SETMASK, &blocked, &oldset);
-
current->flags |= PF_MEMALLOC;
do {
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
@@ -212,7 +253,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
buf += result;
} while (size > 0);
- sigprocmask(SIG_SETMASK, &oldset, NULL);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
if (!send && nbd->xmit_timeout)
@@ -402,31 +442,28 @@ static struct device_attribute pid_attr = {
.show = pid_show,
};
-static int nbd_thread_recv(struct nbd_device *nbd)
+static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
{
struct request *req;
int ret;
- unsigned long flags;
BUG_ON(nbd->magic != NBD_MAGIC);
sk_set_memalloc(nbd->sock->sk);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = current;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
return ret;
}
+ nbd_size_update(nbd, bdev);
+
while (1) {
req = nbd_read_stat(nbd);
if (IS_ERR(req)) {
@@ -437,21 +474,11 @@ static int nbd_thread_recv(struct nbd_device *nbd)
nbd_end_request(nbd, req);
}
+ nbd_size_clear(nbd, bdev);
+
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_recv = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
-
- if (signal_pending(current)) {
- ret = kernel_dequeue_signal(NULL);
- dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
- task_pid_nr(current), current->comm, ret);
- mutex_lock(&nbd->tx_lock);
- sock_shutdown(nbd);
- mutex_unlock(&nbd->tx_lock);
- ret = -ETIMEDOUT;
- }
return ret;
}
@@ -544,11 +571,8 @@ static int nbd_thread_send(void *data)
{
struct nbd_device *nbd = data;
struct request *req;
- unsigned long flags;
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = current;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
set_user_nice(current, MIN_NICE);
while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
@@ -557,17 +581,6 @@ static int nbd_thread_send(void *data)
kthread_should_stop() ||
!list_empty(&nbd->waiting_queue));
- if (signal_pending(current)) {
- int ret = kernel_dequeue_signal(NULL);
-
- dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
- task_pid_nr(current), current->comm, ret);
- mutex_lock(&nbd->tx_lock);
- sock_shutdown(nbd);
- mutex_unlock(&nbd->tx_lock);
- break;
- }
-
/* extract request */
if (list_empty(&nbd->waiting_queue))
continue;
@@ -582,13 +595,7 @@ static int nbd_thread_send(void *data)
nbd_handle_req(nbd, req);
}
- spin_lock_irqsave(&nbd->tasks_lock, flags);
nbd->task_send = NULL;
- spin_unlock_irqrestore(&nbd->tasks_lock, flags);
-
- /* Clear maybe pending signals */
- if (signal_pending(current))
- kernel_dequeue_signal(NULL);
return 0;
}
@@ -618,8 +625,8 @@ static void nbd_request_handler(struct request_queue *q)
req, req->cmd_type);
if (unlikely(!nbd->sock)) {
- dev_err(disk_to_dev(nbd->disk),
- "Attempted send on closed socket\n");
+ dev_err_ratelimited(disk_to_dev(nbd->disk),
+ "Attempted send on closed socket\n");
req->errors++;
nbd_end_request(nbd, req);
spin_lock_irq(q->queue_lock);
@@ -636,6 +643,61 @@ static void nbd_request_handler(struct request_queue *q)
}
}
+static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+{
+ int ret = 0;
+
+ spin_lock_irq(&nbd->sock_lock);
+
+ if (nbd->sock) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ nbd->sock = sock;
+
+out:
+ spin_unlock_irq(&nbd->sock_lock);
+
+ return ret;
+}
+
+/* Reset all properties of an NBD device */
+static void nbd_reset(struct nbd_device *nbd)
+{
+ nbd->disconnect = false;
+ nbd->timedout = false;
+ nbd->blksize = 1024;
+ nbd->bytesize = 0;
+ set_capacity(nbd->disk, 0);
+ nbd->flags = 0;
+ nbd->xmit_timeout = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ del_timer_sync(&nbd->timeout_timer);
+}
+
+static void nbd_bdev_reset(struct block_device *bdev)
+{
+ set_device_ro(bdev, false);
+ bdev->bd_inode->i_size = 0;
+ if (max_part > 0) {
+ blkdev_reread_part(bdev);
+ bdev->bd_invalidated = 1;
+ }
+}
+
+static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
+{
+ if (nbd->flags & NBD_FLAG_READ_ONLY)
+ set_device_ro(bdev, true);
+ if (nbd->flags & NBD_FLAG_SEND_TRIM)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ if (nbd->flags & NBD_FLAG_SEND_FLUSH)
+ blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ else
+ blk_queue_flush(nbd->disk->queue, 0);
+}
+
static int nbd_dev_dbg_init(struct nbd_device *nbd);
static void nbd_dev_dbg_close(struct nbd_device *nbd);
@@ -668,48 +730,40 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
return 0;
}
- case NBD_CLEAR_SOCK: {
- struct socket *sock = nbd->sock;
- nbd->sock = NULL;
+ case NBD_CLEAR_SOCK:
+ sock_shutdown(nbd);
nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
kill_bdev(bdev);
- if (sock)
- sockfd_put(sock);
return 0;
- }
case NBD_SET_SOCK: {
- struct socket *sock;
int err;
- if (nbd->sock)
- return -EBUSY;
- sock = sockfd_lookup(arg, &err);
- if (sock) {
- nbd->sock = sock;
- if (max_part > 0)
- bdev->bd_invalidated = 1;
- nbd->disconnect = false; /* we're connected now */
- return 0;
- }
- return -EINVAL;
+ struct socket *sock = sockfd_lookup(arg, &err);
+
+ if (!sock)
+ return err;
+
+ err = nbd_set_socket(nbd, sock);
+ if (!err && max_part)
+ bdev->bd_invalidated = 1;
+
+ return err;
}
- case NBD_SET_BLKSIZE:
- nbd->blksize = arg;
- nbd->bytesize &= ~(nbd->blksize-1);
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
+ case NBD_SET_BLKSIZE: {
+ loff_t bsize = div_s64(nbd->bytesize, arg);
+
+ return nbd_size_set(nbd, bdev, arg, bsize);
+ }
case NBD_SET_SIZE:
- nbd->bytesize = arg & ~(nbd->blksize-1);
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
+ return nbd_size_set(nbd, bdev, nbd->blksize,
+ arg / nbd->blksize);
+
+ case NBD_SET_SIZE_BLOCKS:
+ return nbd_size_set(nbd, bdev, nbd->blksize, arg);
case NBD_SET_TIMEOUT:
nbd->xmit_timeout = arg * HZ;
@@ -725,16 +779,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->flags = arg;
return 0;
- case NBD_SET_SIZE_BLOCKS:
- nbd->bytesize = ((u64) arg) * nbd->blksize;
- bdev->bd_inode->i_size = nbd->bytesize;
- set_blocksize(bdev, nbd->blksize);
- set_capacity(nbd->disk, nbd->bytesize >> 9);
- return 0;
-
case NBD_DO_IT: {
struct task_struct *thread;
- struct socket *sock;
int error;
if (nbd->task_recv)
@@ -744,15 +790,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
mutex_unlock(&nbd->tx_lock);
- if (nbd->flags & NBD_FLAG_READ_ONLY)
- set_device_ro(bdev, true);
- if (nbd->flags & NBD_FLAG_SEND_TRIM)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
- nbd->disk->queue);
- if (nbd->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
- else
- blk_queue_flush(nbd->disk->queue, 0);
+ nbd_parse_flags(nbd, bdev);
thread = kthread_run(nbd_thread_send, nbd, "%s",
nbd_name(nbd));
@@ -762,29 +800,24 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
}
nbd_dev_dbg_init(nbd);
- error = nbd_thread_recv(nbd);
+ error = nbd_thread_recv(nbd, bdev);
nbd_dev_dbg_close(nbd);
kthread_stop(thread);
mutex_lock(&nbd->tx_lock);
sock_shutdown(nbd);
- sock = nbd->sock;
- nbd->sock = NULL;
nbd_clear_que(nbd);
kill_bdev(bdev);
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
- set_device_ro(bdev, false);
- if (sock)
- sockfd_put(sock);
- nbd->flags = 0;
- nbd->bytesize = 0;
- bdev->bd_inode->i_size = 0;
- set_capacity(nbd->disk, 0);
- if (max_part > 0)
- blkdev_reread_part(bdev);
+ nbd_bdev_reset(bdev);
+
if (nbd->disconnect) /* user requested, ignore socket errors */
- return 0;
+ error = 0;
+ if (nbd->timedout)
+ error = -ETIMEDOUT;
+
+ nbd_reset(nbd);
+
return error;
}
@@ -892,50 +925,23 @@ static const struct file_operations nbd_dbg_flags_ops = {
static int nbd_dev_dbg_init(struct nbd_device *nbd)
{
struct dentry *dir;
- struct dentry *f;
+
+ if (!nbd_dbg_dir)
+ return -EIO;
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
- if (IS_ERR_OR_NULL(dir)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
- nbd_name(nbd), PTR_ERR(dir));
- return PTR_ERR(dir);
+ if (!dir) {
+ dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
+ nbd_name(nbd));
+ return -EIO;
}
nbd->dbg_dir = dir;
- f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
-
- f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
- if (IS_ERR_OR_NULL(f)) {
- dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
- PTR_ERR(f));
- return PTR_ERR(f);
- }
+ debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+ debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+ debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
return 0;
}
@@ -950,8 +956,8 @@ static int nbd_dbg_init(void)
struct dentry *dbg_dir;
dbg_dir = debugfs_create_dir("nbd", NULL);
- if (IS_ERR(dbg_dir))
- return PTR_ERR(dbg_dir);
+ if (!dbg_dir)
+ return -EIO;
nbd_dbg_dir = dbg_dir;
@@ -1069,7 +1075,7 @@ static int __init nbd_init(void)
nbd_dev[i].magic = NBD_MAGIC;
INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
spin_lock_init(&nbd_dev[i].queue_lock);
- spin_lock_init(&nbd_dev[i].tasks_lock);
+ spin_lock_init(&nbd_dev[i].sock_lock);
INIT_LIST_HEAD(&nbd_dev[i].queue_head);
mutex_init(&nbd_dev[i].tx_lock);
init_timer(&nbd_dev[i].timeout_timer);
@@ -1077,14 +1083,12 @@ static int __init nbd_init(void)
nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
init_waitqueue_head(&nbd_dev[i].active_wq);
init_waitqueue_head(&nbd_dev[i].waiting_wq);
- nbd_dev[i].blksize = 1024;
- nbd_dev[i].bytesize = 0;
disk->major = NBD_MAJOR;
disk->first_minor = i << part_shift;
disk->fops = &nbd_fops;
disk->private_data = &nbd_dev[i];
sprintf(disk->disk_name, "nbd%d", i);
- set_capacity(disk, 0);
+ nbd_reset(&nbd_dev[i]);
add_disk(disk);
}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 64a7b5971b57..cab97593ba54 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -742,10 +742,11 @@ static int null_add_dev(void)
add_disk(disk);
+done:
mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
mutex_unlock(&lock);
-done:
+
return 0;
out_cleanup_lightnvm:
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 562b5a4ca7b7..78a39f736c64 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -126,7 +126,7 @@
*/
#include <linux/types.h>
-static bool verbose = 0;
+static int verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
static DEFINE_MUTEX(pd_mutex);
static DEFINE_SPINLOCK(pd_lock);
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 1740d75e8a32..216a94fed5b4 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -117,7 +117,7 @@
*/
-static bool verbose = 0;
+static int verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
#include <asm/uaccess.h>
-module_param(verbose, bool, 0);
+module_param(verbose, int, 0);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param_array(drive0, int, NULL, 0);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4a876785b68c..94a1843b0426 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1847,14 +1847,12 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
if (osd_req->r_result < 0)
obj_request->result = osd_req->r_result;
- rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
-
/*
* We support a 64-bit length, but ultimately it has to be
* passed to the block layer, which just supports a 32-bit
* length field.
*/
- obj_request->xferred = osd_req->r_reply_op_len[0];
+ obj_request->xferred = osd_req->r_ops[0].outdata_len;
rbd_assert(obj_request->xferred < (u64)UINT_MAX);
opcode = osd_req->r_ops[0].op;
@@ -1955,7 +1953,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2004,7 +2002,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
rbd_dev = img_request->rbd_dev;
osdc = &rbd_dev->rbd_client->client->osdc;
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
- false, GFP_ATOMIC);
+ false, GFP_NOIO);
if (!osd_req)
return NULL; /* ENOMEM */
@@ -2506,7 +2504,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
bio_chain_clone_range(&bio_list,
&bio_offset,
clone_size,
- GFP_ATOMIC);
+ GFP_NOIO);
if (!obj_request->bio_list)
goto out_unwind;
} else if (type == OBJ_REQUEST_PAGES) {
@@ -5643,18 +5641,12 @@ static void rbd_sysfs_cleanup(void)
static int rbd_slab_init(void)
{
rbd_assert(!rbd_img_request_cache);
- rbd_img_request_cache = kmem_cache_create("rbd_img_request",
- sizeof (struct rbd_img_request),
- __alignof__(struct rbd_img_request),
- 0, NULL);
+ rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
if (!rbd_img_request_cache)
return -ENOMEM;
rbd_assert(!rbd_obj_request_cache);
- rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
- sizeof (struct rbd_obj_request),
- __alignof__(struct rbd_obj_request),
- 0, NULL);
+ rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
if (!rbd_obj_request_cache)
goto out_err;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ca35495a5be..28cff0d23d82 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -477,8 +477,13 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
struct virtio_blk_config, wce,
&writeback);
+
+ /*
+ * If WCE is not configurable and flush is not available,
+ * assume no writeback cache is in use.
+ */
if (err)
- writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
return writeback;
}
@@ -833,14 +838,14 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features_legacy[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
- VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
+ VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
}
;
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
+ VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
VIRTIO_BLK_F_MQ,
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 876763f7f13e..26aa080e243c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -23,8 +23,7 @@
#include <xen/grant_table.h>
#include "common.h"
-/* Enlarge the array size in order to fully show blkback name. */
-#define BLKBACK_NAME_LEN (20)
+/* On the XenBus the max length of 'ring-ref%u'. */
#define RINGREF_NAME_LEN (20)
struct backend_info {
@@ -76,7 +75,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
else
devname = devpath;
- snprintf(buf, BLKBACK_NAME_LEN, "blkback.%d.%s", blkif->domid, devname);
+ snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
kfree(devpath);
return 0;
@@ -85,7 +84,7 @@ static int blkback_name(struct xen_blkif *blkif, char *buf)
static void xen_update_blkif_status(struct xen_blkif *blkif)
{
int err;
- char name[BLKBACK_NAME_LEN];
+ char name[TASK_COMM_LEN];
struct xen_blkif_ring *ring;
int i;
@@ -618,6 +617,14 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
goto fail;
}
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-max-indirect-segments", "%u",
+ MAX_INDIRECT_SEGMENTS);
+ if (err)
+ dev_warn(&dev->dev,
+ "writing %s/feature-max-indirect-segments (%d)",
+ dev->nodename, err);
+
/* Multi-queue: advertise how many queues are supported by us.*/
err = xenbus_printf(XBT_NIL, dev->nodename,
"multi-queue-max-queues", "%u", xenblk_max_queues);
@@ -849,11 +856,6 @@ again:
dev->nodename);
goto abort;
}
- err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
- MAX_INDIRECT_SEGMENTS);
- if (err)
- dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
- dev->nodename, err);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 83eb9e6bf8b0..6405b6557792 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -125,8 +125,10 @@ static const struct block_device_operations xlvbd_block_fops;
*/
static unsigned int xen_blkif_max_segments = 32;
-module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
-MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
+module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
+ S_IRUGO);
+MODULE_PARM_DESC(max_indirect_segments,
+ "Maximum amount of segments in indirect requests (default is 32)");
static unsigned int xen_blkif_max_queues = 4;
module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index ec6af1595062..cf50fd2e96df 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -169,6 +169,17 @@ config BT_HCIUART_QCA
Say Y here to compile support for QCA protocol.
+config BT_HCIUART_AG6XX
+ bool "Intel AG6XX protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ select BT_INTEL
+ help
+ The Intel/AG6XX protocol support enables Bluetooth HCI over serial
+ port interface for Intel ibt 2.1 Bluetooth controllers.
+
+ Say Y here to compile support for Intel AG6XX protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 07c9cf381e5a..9c18939fc5c9 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -36,6 +36,7 @@ hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
+hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index fa893c3ec408..47ca4b39d306 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe05f) },
{ USB_DEVICE(0x0489, 0xe076) },
{ USB_DEVICE(0x0489, 0xe078) },
+ { USB_DEVICE(0x0489, 0xe095) },
{ USB_DEVICE(0x04c5, 0x1330) },
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300d) },
{ USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) },
+ { USB_DEVICE(0x04CA, 0x3014) },
{ USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) },
@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x13d3, 0x3393) },
+ { USB_DEVICE(0x13d3, 0x3395) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
+ { USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
/* Atheros AR5BBU12 with sflash firmware */
@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
@@ -497,6 +505,7 @@ static int ath3k_probe(struct usb_interface *intf,
/* match device ID in ath3k blacklist table */
if (!id->driver_info) {
const struct usb_device_id *match;
+
match = usb_match_id(intf, ath3k_blist_tbl);
if (match)
id = match;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0b697946e9bc..fdb44829ab6f 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -467,7 +467,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
err = request_firmware(&fw, fw_name, &hdev->dev);
if (err < 0) {
BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name);
- return 0;
+ goto done;
}
btbcm_patchram(hdev, fw);
@@ -501,6 +501,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
+done:
btbcm_check_bdaddr(hdev);
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 6ed8acfcfa9c..c6ef248de5e4 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -371,7 +371,7 @@ static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card,
if (firmwarestat == FIRMWARE_READY)
return 0;
- msleep(10);
+ msleep(100);
}
return -ETIMEDOUT;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a191e318fab8..0d4e372e426d 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -227,10 +229,12 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
new file mode 100644
index 000000000000..6923d17a022f
--- /dev/null
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -0,0 +1,337 @@
+/*
+ *
+ * Bluetooth HCI UART driver for Intel/AG6xx devices
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btintel.h"
+
+struct ag6xx_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+};
+
+struct pbn_entry {
+ __le32 addr;
+ __le32 plen;
+ __u8 data[0];
+} __packed;
+
+static int ag6xx_open(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx;
+
+ BT_DBG("hu %p", hu);
+
+ ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL);
+ if (!ag6xx)
+ return -ENOMEM;
+
+ skb_queue_head_init(&ag6xx->txq);
+
+ hu->priv = ag6xx;
+ return 0;
+}
+
+static int ag6xx_close(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ kfree_skb(ag6xx->rx_skb);
+ kfree(ag6xx);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int ag6xx_flush(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&ag6xx->txq);
+ return 0;
+}
+
+static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ag6xx->txq);
+ if (!skb)
+ return skb;
+
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ return skb;
+}
+
+static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ skb_queue_tail(&ag6xx->txq, skb);
+ return 0;
+}
+
+static const struct h4_recv_pkt ag6xx_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct ag6xx_data *ag6xx = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx_recv_pkts,
+ ARRAY_SIZE(ag6xx_recv_pkts));
+ if (IS_ERR(ag6xx->rx_skb)) {
+ int err = PTR_ERR(ag6xx->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ ag6xx->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen,
+ const void *data)
+{
+ /* Can write a maximum of 247 bytes per HCI command.
+ * HCI cmd Header (3), Intel mem write header (6), data (247).
+ */
+ while (plen > 0) {
+ struct sk_buff *skb;
+ u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen;
+ __le32 leaddr = cpu_to_le32(addr);
+
+ memcpy(cmd_param, &leaddr, 4);
+ cmd_param[4] = 0;
+ cmd_param[5] = fragment_len;
+ memcpy(cmd_param + 6, data, fragment_len);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+ kfree_skb(skb);
+
+ plen -= fragment_len;
+ data += fragment_len;
+ addr += fragment_len;
+ }
+
+ return 0;
+}
+
+static int ag6xx_setup(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+ struct sk_buff *skb;
+ struct intel_version ver;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ char fwname[64];
+ bool patched = false;
+ int err;
+
+ hu->hdev->set_diag = btintel_set_diag;
+ hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+ err = btintel_enter_mfg(hdev);
+ if (err)
+ return err;
+
+ err = btintel_read_version(hdev, &ver);
+ if (err)
+ return err;
+
+ btintel_version_info(hdev, &ver);
+
+ /* The hardware platform number has a fixed value of 0x37 and
+ * for now only accept this single value.
+ */
+ if (ver.hw_platform != 0x37) {
+ bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X",
+ ver.hw_platform);
+ return -EINVAL;
+ }
+
+ /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this
+ * firmware setup method.
+ */
+ if (ver.hw_variant != 0x0a) {
+ bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x",
+ ver.hw_variant);
+ return -EINVAL;
+ }
+
+ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata",
+ ver.hw_platform, ver.hw_variant);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)",
+ fwname, err);
+ goto patch;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Applying bddata (%s)", fwname);
+
+ skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data,
+ HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb));
+ release_firmware(fw);
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ release_firmware(fw);
+
+patch:
+ /* If there is no applied patch, fw_patch_num is always 0x00. In other
+ * cases, current firmware is already patched. No need to patch it.
+ */
+ if (ver.fw_patch_num) {
+ bt_dev_info(hdev, "Device is already patched. patch num: %02x",
+ ver.fw_patch_num);
+ patched = true;
+ goto complete;
+ }
+
+ snprintf(fwname, sizeof(fwname),
+ "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn",
+ ver.hw_platform, ver.hw_variant, ver.hw_revision,
+ ver.fw_variant, ver.fw_revision, ver.fw_build_num,
+ ver.fw_build_ww, ver.fw_build_yy);
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)",
+ fwname, err);
+ goto complete;
+ }
+ fw_ptr = fw->data;
+
+ bt_dev_info(hdev, "Patching firmware file (%s)", fwname);
+
+ /* PBN patch file contains a list of binary patches to be applied on top
+ * of the embedded firmware. Each patch entry header contains the target
+ * address and patch size.
+ *
+ * Patch entry:
+ * | addr(le) | patch_len(le) | patch_data |
+ * | 4 Bytes | 4 Bytes | n Bytes |
+ *
+ * PBN file is terminated by a patch entry whose address is 0xffffffff.
+ */
+ while (fw->size > fw_ptr - fw->data) {
+ struct pbn_entry *pbn = (void *)fw_ptr;
+ u32 addr, plen;
+
+ if (pbn->addr == 0xffffffff) {
+ bt_dev_info(hdev, "Patching complete");
+ patched = true;
+ break;
+ }
+
+ addr = le32_to_cpu(pbn->addr);
+ plen = le32_to_cpu(pbn->plen);
+
+ if (fw->data + fw->size <= pbn->data + plen) {
+ bt_dev_info(hdev, "Invalid patch len (%d)", plen);
+ break;
+ }
+
+ bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
+ fw->size);
+
+ err = intel_mem_write(hdev, addr, plen, pbn->data);
+ if (err) {
+ bt_dev_err(hdev, "Patching failed");
+ break;
+ }
+
+ fw_ptr = pbn->data + plen;
+ }
+
+ release_firmware(fw);
+
+complete:
+ /* Exit manufacturing mode and reset */
+ err = btintel_exit_mfg(hdev, true, patched);
+ if (err)
+ return err;
+
+ /* Set the event mask for Intel specific vendor events. This enables
+ * a few extra events that are useful during general operation.
+ */
+ btintel_set_event_mask_mfg(hdev, false);
+
+ btintel_check_bdaddr(hdev);
+ return 0;
+}
+
+static const struct hci_uart_proto ag6xx_proto = {
+ .id = HCI_UART_AG6XX,
+ .name = "AG6XX",
+ .manufacturer = 2,
+ .open = ag6xx_open,
+ .close = ag6xx_close,
+ .flush = ag6xx_flush,
+ .setup = ag6xx_setup,
+ .recv = ag6xx_recv,
+ .enqueue = ag6xx_enqueue,
+ .dequeue = ag6xx_dequeue,
+};
+
+int __init ag6xx_init(void)
+{
+ return hci_uart_register_proto(&ag6xx_proto);
+}
+
+int __exit ag6xx_deinit(void)
+{
+ return hci_uart_unregister_proto(&ag6xx_proto);
+}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5f3de181e744..d8881dc0600c 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -820,10 +820,13 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E3D", 0 },
{ "BCM2E3F", 0 },
{ "BCM2E40", 0 },
+ { "BCM2E54", 0 },
+ { "BCM2E55", 0 },
{ "BCM2E64", 0 },
{ "BCM2E65", 0 },
{ "BCM2E67", 0 },
{ "BCM2E7B", 0 },
+ { "BCM2E7C", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 3d63ea37bd4c..91d605147b10 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -488,7 +488,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
bt_dev_info(hdev, "Change controller speed to %d", speed);
@@ -581,7 +581,7 @@ static int intel_setup(struct hci_uart *hu)
clear_bit(STATE_BOOTING, &intel->flags);
/* In case of timeout, try to continue anyway */
- if (err && err != ETIMEDOUT)
+ if (err && err != -ETIMEDOUT)
return err;
set_bit(STATE_BOOTLOADER, &intel->flags);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 73202624133b..c00168a5bb80 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_init();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_init();
+#endif
return 0;
}
@@ -836,6 +839,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_QCA
qca_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_AG6XX
+ ag6xx_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 82c92f1b65b4..4814ff08f427 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 9
+#define HCI_UART_MAX_PROTO 10
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -46,6 +46,7 @@
#define HCI_UART_INTEL 6
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
+#define HCI_UART_AG6XX 9
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -182,3 +183,8 @@ int bcm_deinit(void);
int qca_init(void);
int qca_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_AG6XX
+int ag6xx_init(void);
+int ag6xx_deinit(void);
+#endif
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 9a92c072a485..d4a3a3133da5 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -34,15 +34,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
-config ARM_CCI500_PMU
- bool "ARM CCI500 PMU support"
+config ARM_CCI5xx_PMU
+ bool "ARM CCI-500/CCI-550 PMU support"
depends on (ARM && CPU_V7) || ARM64
depends on PERF_EVENTS
select ARM_CCI_PMU
help
- Support for PMU events monitoring on the ARM CCI-500 cache coherent
- interconnect. CCI-500 provides 8 independent event counters, which
- can count events pertaining to the slave/master interfaces as well
+ Support for PMU events monitoring on the ARM CCI-500/CCI-550 cache
+ coherent interconnects. Both of them provide 8 independent event counters,
+ which can count events pertaining to the slave/master interfaces as well
as the internal events to the CCI.
If unsure, say Y
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 577cc4bf6a9d..a49b28378d59 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -52,8 +52,9 @@ static const struct of_device_id arm_cci_matches[] = {
#ifdef CONFIG_ARM_CCI400_COMMON
{.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
{ .compatible = "arm,cci-500", },
+ { .compatible = "arm,cci-550", },
#endif
{},
};
@@ -92,7 +93,7 @@ static const struct of_device_id arm_cci_matches[] = {
enum {
CCI_IF_SLAVE,
CCI_IF_MASTER,
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
CCI_IF_GLOBAL,
#endif
CCI_IF_MAX,
@@ -121,13 +122,12 @@ struct cci_pmu_model {
u32 fixed_hw_cntrs;
u32 num_hw_cntrs;
u32 cntr_size;
- u64 nformat_attrs;
- u64 nevent_attrs;
- struct dev_ext_attribute *format_attrs;
- struct dev_ext_attribute *event_attrs;
+ struct attribute **format_attrs;
+ struct attribute **event_attrs;
struct event_range event_ranges[CCI_IF_MAX];
int (*validate_hw_event)(struct cci_pmu *, unsigned long);
int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
+ void (*write_counters)(struct cci_pmu *, unsigned long *);
};
static struct cci_pmu_model cci_pmu_models[];
@@ -155,19 +155,24 @@ enum cci_models {
CCI400_R0,
CCI400_R1,
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
CCI500_R0,
+ CCI550_R0,
#endif
CCI_MODEL_MAX
};
+static void pmu_write_counters(struct cci_pmu *cci_pmu,
+ unsigned long *mask);
static ssize_t cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf);
static ssize_t cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
- { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config }
+#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \
+ &((struct dev_ext_attribute[]) { \
+ { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \
+ })[0].attr.attr
#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
@@ -242,12 +247,13 @@ enum cci400_perf_events {
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-static struct dev_ext_attribute cci400_pmu_format_attrs[] = {
+static struct attribute *cci400_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
+ NULL
};
-static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = {
+static struct attribute *cci400_r0_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
@@ -279,9 +285,10 @@ static struct dev_ext_attribute cci400_r0_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+ NULL
};
-static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = {
+static struct attribute *cci400_r1_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01),
@@ -325,6 +332,7 @@ static struct dev_ext_attribute cci400_r1_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11),
/* Special event for cycles counter */
CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff),
+ NULL
};
static ssize_t cci400_pmu_cycle_event_show(struct device *dev,
@@ -420,72 +428,68 @@ static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev
}
#endif /* CONFIG_ARM_CCI400_PMU */
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
/*
- * CCI500 provides 8 independent event counters that can count
- * any of the events available.
- *
- * CCI500 PMU event id is an 9-bit value made of two parts.
+ * CCI5xx PMU event id is an 9-bit value made of two parts.
* bits [8:5] - Source for the event
- * 0x0-0x6 - Slave interfaces
- * 0x8-0xD - Master interfaces
- * 0xf - Global Events
- * 0x7,0xe - Reserved
- *
* bits [4:0] - Event code (specific to type of interface)
+ *
+ *
*/
/* Port ids */
-#define CCI500_PORT_S0 0x0
-#define CCI500_PORT_S1 0x1
-#define CCI500_PORT_S2 0x2
-#define CCI500_PORT_S3 0x3
-#define CCI500_PORT_S4 0x4
-#define CCI500_PORT_S5 0x5
-#define CCI500_PORT_S6 0x6
-
-#define CCI500_PORT_M0 0x8
-#define CCI500_PORT_M1 0x9
-#define CCI500_PORT_M2 0xa
-#define CCI500_PORT_M3 0xb
-#define CCI500_PORT_M4 0xc
-#define CCI500_PORT_M5 0xd
-
-#define CCI500_PORT_GLOBAL 0xf
-
-#define CCI500_PMU_EVENT_MASK 0x1ffUL
-#define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5
-#define CCI500_PMU_EVENT_SOURCE_MASK 0xf
-#define CCI500_PMU_EVENT_CODE_SHIFT 0x0
-#define CCI500_PMU_EVENT_CODE_MASK 0x1f
-
-#define CCI500_PMU_EVENT_SOURCE(event) \
- ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
-#define CCI500_PMU_EVENT_CODE(event) \
- ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
-
-#define CCI500_SLAVE_PORT_MIN_EV 0x00
-#define CCI500_SLAVE_PORT_MAX_EV 0x1f
-#define CCI500_MASTER_PORT_MIN_EV 0x00
-#define CCI500_MASTER_PORT_MAX_EV 0x06
-#define CCI500_GLOBAL_PORT_MIN_EV 0x00
-#define CCI500_GLOBAL_PORT_MAX_EV 0x0f
-
-
-#define CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
- CCI_EXT_ATTR_ENTRY(_name, cci500_pmu_global_event_show, \
+#define CCI5xx_PORT_S0 0x0
+#define CCI5xx_PORT_S1 0x1
+#define CCI5xx_PORT_S2 0x2
+#define CCI5xx_PORT_S3 0x3
+#define CCI5xx_PORT_S4 0x4
+#define CCI5xx_PORT_S5 0x5
+#define CCI5xx_PORT_S6 0x6
+
+#define CCI5xx_PORT_M0 0x8
+#define CCI5xx_PORT_M1 0x9
+#define CCI5xx_PORT_M2 0xa
+#define CCI5xx_PORT_M3 0xb
+#define CCI5xx_PORT_M4 0xc
+#define CCI5xx_PORT_M5 0xd
+#define CCI5xx_PORT_M6 0xe
+
+#define CCI5xx_PORT_GLOBAL 0xf
+
+#define CCI5xx_PMU_EVENT_MASK 0x1ffUL
+#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5
+#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf
+#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0
+#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f
+
+#define CCI5xx_PMU_EVENT_SOURCE(event) \
+ ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK)
+#define CCI5xx_PMU_EVENT_CODE(event) \
+ ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK)
+
+#define CCI5xx_SLAVE_PORT_MIN_EV 0x00
+#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f
+#define CCI5xx_MASTER_PORT_MIN_EV 0x00
+#define CCI5xx_MASTER_PORT_MAX_EV 0x06
+#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00
+#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f
+
+
+#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \
+ CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \
(unsigned long) _config)
-static ssize_t cci500_pmu_global_event_show(struct device *dev,
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
-static struct dev_ext_attribute cci500_pmu_format_attrs[] = {
+static struct attribute *cci5xx_pmu_format_attrs[] = {
CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
+ NULL,
};
-static struct dev_ext_attribute cci500_pmu_event_attrs[] = {
+static struct attribute *cci5xx_pmu_event_attrs[] = {
/* Slave events */
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0),
CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1),
@@ -530,63 +534,73 @@ static struct dev_ext_attribute cci500_pmu_event_attrs[] = {
CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6),
/* Global events */
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
- CCI500_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
+ NULL
};
-static ssize_t cci500_pmu_global_event_show(struct device *dev,
+static ssize_t cci5xx_pmu_global_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dev_ext_attribute *eattr = container_of(attr,
struct dev_ext_attribute, attr);
/* Global events have single fixed source code */
return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n",
- (unsigned long)eattr->var, CCI500_PORT_GLOBAL);
+ (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL);
}
+/*
+ * CCI500 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI500 PMU event source ids
+ * 0x0-0x6 - Slave interfaces
+ * 0x8-0xD - Master interfaces
+ * 0xf - Global Events
+ * 0x7,0xe - Reserved
+ */
static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
unsigned long hw_event)
{
- u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event);
- u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event);
+ u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+ u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
int if_type;
- if (hw_event & ~CCI500_PMU_EVENT_MASK)
+ if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
return -ENOENT;
switch (ev_source) {
- case CCI500_PORT_S0:
- case CCI500_PORT_S1:
- case CCI500_PORT_S2:
- case CCI500_PORT_S3:
- case CCI500_PORT_S4:
- case CCI500_PORT_S5:
- case CCI500_PORT_S6:
+ case CCI5xx_PORT_S0:
+ case CCI5xx_PORT_S1:
+ case CCI5xx_PORT_S2:
+ case CCI5xx_PORT_S3:
+ case CCI5xx_PORT_S4:
+ case CCI5xx_PORT_S5:
+ case CCI5xx_PORT_S6:
if_type = CCI_IF_SLAVE;
break;
- case CCI500_PORT_M0:
- case CCI500_PORT_M1:
- case CCI500_PORT_M2:
- case CCI500_PORT_M3:
- case CCI500_PORT_M4:
- case CCI500_PORT_M5:
+ case CCI5xx_PORT_M0:
+ case CCI5xx_PORT_M1:
+ case CCI5xx_PORT_M2:
+ case CCI5xx_PORT_M3:
+ case CCI5xx_PORT_M4:
+ case CCI5xx_PORT_M5:
if_type = CCI_IF_MASTER;
break;
- case CCI500_PORT_GLOBAL:
+ case CCI5xx_PORT_GLOBAL:
if_type = CCI_IF_GLOBAL;
break;
default:
@@ -599,7 +613,118 @@ static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
return -ENOENT;
}
-#endif /* CONFIG_ARM_CCI500_PMU */
+
+/*
+ * CCI550 provides 8 independent event counters that can count
+ * any of the events available.
+ * CCI550 PMU event source ids
+ * 0x0-0x6 - Slave interfaces
+ * 0x8-0xe - Master interfaces
+ * 0xf - Global Events
+ * 0x7 - Reserved
+ */
+static int cci550_validate_hw_event(struct cci_pmu *cci_pmu,
+ unsigned long hw_event)
+{
+ u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event);
+ u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event);
+ int if_type;
+
+ if (hw_event & ~CCI5xx_PMU_EVENT_MASK)
+ return -ENOENT;
+
+ switch (ev_source) {
+ case CCI5xx_PORT_S0:
+ case CCI5xx_PORT_S1:
+ case CCI5xx_PORT_S2:
+ case CCI5xx_PORT_S3:
+ case CCI5xx_PORT_S4:
+ case CCI5xx_PORT_S5:
+ case CCI5xx_PORT_S6:
+ if_type = CCI_IF_SLAVE;
+ break;
+ case CCI5xx_PORT_M0:
+ case CCI5xx_PORT_M1:
+ case CCI5xx_PORT_M2:
+ case CCI5xx_PORT_M3:
+ case CCI5xx_PORT_M4:
+ case CCI5xx_PORT_M5:
+ case CCI5xx_PORT_M6:
+ if_type = CCI_IF_MASTER;
+ break;
+ case CCI5xx_PORT_GLOBAL:
+ if_type = CCI_IF_GLOBAL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
+ ev_code <= cci_pmu->model->event_ranges[if_type].max)
+ return hw_event;
+
+ return -ENOENT;
+}
+
+#endif /* CONFIG_ARM_CCI5xx_PMU */
+
+/*
+ * Program the CCI PMU counters which have PERF_HES_ARCH set
+ * with the event period and mark them ready before we enable
+ * PMU.
+ */
+static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu)
+{
+ int i;
+ struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+ DECLARE_BITMAP(mask, cci_pmu->num_cntrs);
+
+ bitmap_zero(mask, cci_pmu->num_cntrs);
+ for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_hw->events[i];
+
+ if (WARN_ON(!event))
+ continue;
+
+ /* Leave the events which are not counting */
+ if (event->hw.state & PERF_HES_STOPPED)
+ continue;
+ if (event->hw.state & PERF_HES_ARCH) {
+ set_bit(i, mask);
+ event->hw.state &= ~PERF_HES_ARCH;
+ }
+ }
+
+ pmu_write_counters(cci_pmu, mask);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu)
+{
+ u32 val;
+
+ /* Enable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu)
+{
+ cci_pmu_sync_counters(cci_pmu);
+ __cci_pmu_enable_nosync(cci_pmu);
+}
+
+/* Should be called with cci_pmu->hw_events->pmu_lock held */
+static void __cci_pmu_disable(void)
+{
+ u32 val;
+
+ /* Disable all the PMU counters. */
+ val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+ writel(val, cci_ctrl_base + CCI_PMCR);
+}
static ssize_t cci_pmu_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -633,8 +758,8 @@ static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offs
static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
int idx, unsigned int offset)
{
- return writel_relaxed(value, cci_pmu->base +
- CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
+ writel_relaxed(value, cci_pmu->base +
+ CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
}
static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
@@ -647,12 +772,56 @@ static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
}
+static bool __maybe_unused
+pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx)
+{
+ return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0;
+}
+
static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
{
pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
}
/*
+ * For all counters on the CCI-PMU, disable any 'enabled' counters,
+ * saving the changed counters in the mask, so that we can restore
+ * it later using pmu_restore_counters. The mask is private to the
+ * caller. We cannot rely on the used_mask maintained by the CCI_PMU
+ * as it only tells us if the counter is assigned to perf_event or not.
+ * The state of the perf_event cannot be locked by the PMU layer, hence
+ * we check the individual counter status (which can be locked by
+ * cci_pm->hw_events->pmu_lock).
+ *
+ * @mask should be initialised to empty by the caller.
+ */
+static void __maybe_unused
+pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+
+ for (i = 0; i < cci_pmu->num_cntrs; i++) {
+ if (pmu_counter_is_enabled(cci_pmu, i)) {
+ set_bit(i, mask);
+ pmu_disable_counter(cci_pmu, i);
+ }
+ }
+}
+
+/*
+ * Restore the status of the counters. Reversal of the pmu_save_counters().
+ * For each counter set in the mask, enable the counter back.
+ */
+static void __maybe_unused
+pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs)
+ pmu_enable_counter(cci_pmu, i);
+}
+
+/*
* Returns the number of programmable counters actually implemented
* by the cci
*/
@@ -754,18 +923,98 @@ static u32 pmu_read_counter(struct perf_event *event)
return value;
}
-static void pmu_write_counter(struct perf_event *event, u32 value)
+static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx)
{
- struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
- struct hw_perf_event *hw_counter = &event->hw;
- int idx = hw_counter->idx;
+ pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
+}
- if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
- dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+ struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events;
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_hw->events[i];
+
+ if (WARN_ON(!event))
+ continue;
+ pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+ }
+}
+
+static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ if (cci_pmu->model->write_counters)
+ cci_pmu->model->write_counters(cci_pmu, mask);
else
- pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
+ __pmu_write_counters(cci_pmu, mask);
}
+#ifdef CONFIG_ARM_CCI5xx_PMU
+
+/*
+ * CCI-500/CCI-550 has advanced power saving policies, which could gate the
+ * clocks to the PMU counters, which makes the writes to them ineffective.
+ * The only way to write to those counters is when the global counters
+ * are enabled and the particular counter is enabled.
+ *
+ * So we do the following :
+ *
+ * 1) Disable all the PMU counters, saving their current state
+ * 2) Enable the global PMU profiling, now that all counters are
+ * disabled.
+ *
+ * For each counter to be programmed, repeat steps 3-7:
+ *
+ * 3) Write an invalid event code to the event control register for the
+ counter, so that the counters are not modified.
+ * 4) Enable the counter control for the counter.
+ * 5) Set the counter value
+ * 6) Disable the counter
+ * 7) Restore the event in the target counter
+ *
+ * 8) Disable the global PMU.
+ * 9) Restore the status of the rest of the counters.
+ *
+ * We choose an event which for CCI-5xx is guaranteed not to count.
+ * We use the highest possible event code (0x1f) for the master interface 0.
+ */
+#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \
+ (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT))
+static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask)
+{
+ int i;
+ DECLARE_BITMAP(saved_mask, cci_pmu->num_cntrs);
+
+ bitmap_zero(saved_mask, cci_pmu->num_cntrs);
+ pmu_save_counters(cci_pmu, saved_mask);
+
+ /*
+ * Now that all the counters are disabled, we can safely turn the PMU on,
+ * without syncing the status of the counters
+ */
+ __cci_pmu_enable_nosync(cci_pmu);
+
+ for_each_set_bit(i, mask, cci_pmu->num_cntrs) {
+ struct perf_event *event = cci_pmu->hw_events.events[i];
+
+ if (WARN_ON(!event))
+ continue;
+
+ pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT);
+ pmu_enable_counter(cci_pmu, i);
+ pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i);
+ pmu_disable_counter(cci_pmu, i);
+ pmu_set_event(cci_pmu, i, event->hw.config_base);
+ }
+
+ __cci_pmu_disable();
+
+ pmu_restore_counters(cci_pmu, saved_mask);
+}
+
+#endif /* CONFIG_ARM_CCI5xx_PMU */
+
static u64 pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -789,7 +1038,7 @@ static void pmu_read(struct perf_event *event)
pmu_event_update(event);
}
-void pmu_event_set_period(struct perf_event *event)
+static void pmu_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/*
@@ -800,7 +1049,14 @@ void pmu_event_set_period(struct perf_event *event)
*/
u64 val = 1ULL << 31;
local64_set(&hwc->prev_count, val);
- pmu_write_counter(event, val);
+
+ /*
+ * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose
+ * values needs to be sync-ed with the s/w state before the PMU is
+ * enabled.
+ * Mark this counter for sync.
+ */
+ hwc->state |= PERF_HES_ARCH;
}
static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
@@ -811,6 +1067,9 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
int idx, handled = IRQ_NONE;
raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable the PMU while we walk through the counters */
+ __cci_pmu_disable();
/*
* Iterate over counters and update the corresponding perf events.
* This should work regardless of whether we have per-counter overflow
@@ -818,13 +1077,10 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
*/
for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
struct perf_event *event = events->events[idx];
- struct hw_perf_event *hw_counter;
if (!event)
continue;
- hw_counter = &event->hw;
-
/* Did this counter overflow? */
if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
CCI_PMU_OVRFLW_FLAG))
@@ -837,6 +1093,9 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
pmu_event_set_period(event);
handled = IRQ_HANDLED;
}
+
+ /* Enable the PMU and sync possibly overflowed counters */
+ __cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
return IRQ_RETVAL(handled);
@@ -875,16 +1134,12 @@ static void cci_pmu_enable(struct pmu *pmu)
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
unsigned long flags;
- u32 val;
if (!enabled)
return;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
-
- /* Enable all the PMU counters. */
- val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
- writel(val, cci_ctrl_base + CCI_PMCR);
+ __cci_pmu_enable_sync(cci_pmu);
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
@@ -894,13 +1149,9 @@ static void cci_pmu_disable(struct pmu *pmu)
struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
unsigned long flags;
- u32 val;
raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
-
- /* Disable all the PMU counters. */
- val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
- writel(val, cci_ctrl_base + CCI_PMCR);
+ __cci_pmu_disable();
raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
}
@@ -1176,9 +1427,8 @@ static int cci_pmu_event_init(struct perf_event *event)
static ssize_t pmu_cpumask_attr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct dev_ext_attribute *eattr = container_of(attr,
- struct dev_ext_attribute, attr);
- struct cci_pmu *cci_pmu = eattr->var;
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
cpumask_pr_args(&cci_pmu->cpus));
@@ -1187,13 +1437,11 @@ static ssize_t pmu_cpumask_attr_show(struct device *dev,
return n;
}
-static struct dev_ext_attribute pmu_cpumask_attr = {
- __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL),
- NULL, /* Populated in cci_pmu_init */
-};
+static struct device_attribute pmu_cpumask_attr =
+ __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL);
static struct attribute *pmu_attrs[] = {
- &pmu_cpumask_attr.attr.attr,
+ &pmu_cpumask_attr.attr,
NULL,
};
@@ -1218,60 +1466,14 @@ static const struct attribute_group *pmu_attr_groups[] = {
NULL
};
-static struct attribute **alloc_attrs(struct platform_device *pdev,
- int n, struct dev_ext_attribute *source)
-{
- int i;
- struct attribute **attrs;
-
- /* Alloc n + 1 (for terminating NULL) */
- attrs = devm_kcalloc(&pdev->dev, n + 1, sizeof(struct attribute *),
- GFP_KERNEL);
- if (!attrs)
- return attrs;
- for(i = 0; i < n; i++)
- attrs[i] = &source[i].attr.attr;
- return attrs;
-}
-
-static int cci_pmu_init_attrs(struct cci_pmu *cci_pmu, struct platform_device *pdev)
-{
- const struct cci_pmu_model *model = cci_pmu->model;
- struct attribute **attrs;
-
- /*
- * All allocations below are managed, hence doesn't need to be
- * free'd explicitly in case of an error.
- */
-
- if (model->nevent_attrs) {
- attrs = alloc_attrs(pdev, model->nevent_attrs,
- model->event_attrs);
- if (!attrs)
- return -ENOMEM;
- pmu_event_attr_group.attrs = attrs;
- }
- if (model->nformat_attrs) {
- attrs = alloc_attrs(pdev, model->nformat_attrs,
- model->format_attrs);
- if (!attrs)
- return -ENOMEM;
- pmu_format_attr_group.attrs = attrs;
- }
- pmu_cpumask_attr.var = cci_pmu;
-
- return 0;
-}
-
static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
{
- char *name = cci_pmu->model->name;
+ const struct cci_pmu_model *model = cci_pmu->model;
+ char *name = model->name;
u32 num_cntrs;
- int rc;
- rc = cci_pmu_init_attrs(cci_pmu, pdev);
- if (rc)
- return rc;
+ pmu_event_attr_group.attrs = model->event_attrs;
+ pmu_format_attr_group.attrs = model->format_attrs;
cci_pmu->pmu = (struct pmu) {
.name = cci_pmu->model->name,
@@ -1314,7 +1516,7 @@ static int cci_pmu_cpu_notifier(struct notifier_block *self,
if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
break;
target = cpumask_any_but(cpu_online_mask, cpu);
- if (target < 0) // UP, last CPU
+ if (target >= nr_cpu_ids) // UP, last CPU
break;
/*
* TODO: migrate context once core races on event->ctx have
@@ -1336,9 +1538,7 @@ static struct cci_pmu_model cci_pmu_models[] = {
.num_hw_cntrs = 4,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
.event_attrs = cci400_r0_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci400_r0_pmu_event_attrs),
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R0_SLAVE_PORT_MIN_EV,
@@ -1358,9 +1558,7 @@ static struct cci_pmu_model cci_pmu_models[] = {
.num_hw_cntrs = 4,
.cntr_size = SZ_4K,
.format_attrs = cci400_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci400_pmu_format_attrs),
.event_attrs = cci400_r1_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci400_r1_pmu_event_attrs),
.event_ranges = {
[CCI_IF_SLAVE] = {
CCI400_R1_SLAVE_PORT_MIN_EV,
@@ -1375,31 +1573,54 @@ static struct cci_pmu_model cci_pmu_models[] = {
.get_event_idx = cci400_get_event_idx,
},
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
[CCI500_R0] = {
.name = "CCI_500",
.fixed_hw_cntrs = 0,
.num_hw_cntrs = 8,
.cntr_size = SZ_64K,
- .format_attrs = cci500_pmu_format_attrs,
- .nformat_attrs = ARRAY_SIZE(cci500_pmu_format_attrs),
- .event_attrs = cci500_pmu_event_attrs,
- .nevent_attrs = ARRAY_SIZE(cci500_pmu_event_attrs),
+ .format_attrs = cci5xx_pmu_format_attrs,
+ .event_attrs = cci5xx_pmu_event_attrs,
.event_ranges = {
[CCI_IF_SLAVE] = {
- CCI500_SLAVE_PORT_MIN_EV,
- CCI500_SLAVE_PORT_MAX_EV,
+ CCI5xx_SLAVE_PORT_MIN_EV,
+ CCI5xx_SLAVE_PORT_MAX_EV,
},
[CCI_IF_MASTER] = {
- CCI500_MASTER_PORT_MIN_EV,
- CCI500_MASTER_PORT_MAX_EV,
+ CCI5xx_MASTER_PORT_MIN_EV,
+ CCI5xx_MASTER_PORT_MAX_EV,
},
[CCI_IF_GLOBAL] = {
- CCI500_GLOBAL_PORT_MIN_EV,
- CCI500_GLOBAL_PORT_MAX_EV,
+ CCI5xx_GLOBAL_PORT_MIN_EV,
+ CCI5xx_GLOBAL_PORT_MAX_EV,
},
},
.validate_hw_event = cci500_validate_hw_event,
+ .write_counters = cci5xx_pmu_write_counters,
+ },
+ [CCI550_R0] = {
+ .name = "CCI_550",
+ .fixed_hw_cntrs = 0,
+ .num_hw_cntrs = 8,
+ .cntr_size = SZ_64K,
+ .format_attrs = cci5xx_pmu_format_attrs,
+ .event_attrs = cci5xx_pmu_event_attrs,
+ .event_ranges = {
+ [CCI_IF_SLAVE] = {
+ CCI5xx_SLAVE_PORT_MIN_EV,
+ CCI5xx_SLAVE_PORT_MAX_EV,
+ },
+ [CCI_IF_MASTER] = {
+ CCI5xx_MASTER_PORT_MIN_EV,
+ CCI5xx_MASTER_PORT_MAX_EV,
+ },
+ [CCI_IF_GLOBAL] = {
+ CCI5xx_GLOBAL_PORT_MIN_EV,
+ CCI5xx_GLOBAL_PORT_MAX_EV,
+ },
+ },
+ .validate_hw_event = cci550_validate_hw_event,
+ .write_counters = cci5xx_pmu_write_counters,
},
#endif
};
@@ -1419,11 +1640,15 @@ static const struct of_device_id arm_cci_pmu_matches[] = {
.data = &cci_pmu_models[CCI400_R1],
},
#endif
-#ifdef CONFIG_ARM_CCI500_PMU
+#ifdef CONFIG_ARM_CCI5xx_PMU
{
.compatible = "arm,cci-500-pmu,r0",
.data = &cci_pmu_models[CCI500_R0],
},
+ {
+ .compatible = "arm,cci-550-pmu,r0",
+ .data = &cci_pmu_models[CCI550_R0],
+ },
#endif
{},
};
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index e98d15eaa799..1827fc4d15c1 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
- for_each_child_of_node(pdev->dev.of_node, child) {
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c43c3d2baf73..ce54a0160faa 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -948,6 +948,58 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
*res = mbus_state.pcie_io_aperture;
}
+int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
+{
+ const struct mbus_dram_target_info *dram;
+ int i;
+
+ /* Get dram info */
+ dram = mv_mbus_dram_info();
+ if (!dram) {
+ pr_err("missing DRAM information\n");
+ return -ENODEV;
+ }
+
+ /* Try to find matching DRAM window for phyaddr */
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ if (cs->base <= phyaddr &&
+ phyaddr <= (cs->base + cs->size - 1)) {
+ *target = dram->mbus_dram_target_id;
+ *attr = cs->mbus_attr;
+ return 0;
+ }
+ }
+
+ pr_err("invalid dram address %pa\n", &phyaddr);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info);
+
+int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target,
+ u8 *attr)
+{
+ int win;
+
+ for (win = 0; win < mbus_state.soc->num_wins; win++) {
+ u64 wbase;
+ int enabled;
+
+ mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase,
+ size, target, attr, NULL);
+
+ if (!enabled)
+ continue;
+
+ if (wbase <= phyaddr && phyaddr <= wbase + *size)
+ return win;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info);
+
static __init int mvebu_mbus_debugfs_init(void)
{
struct mvebu_mbus_state *s = &mbus_state;
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 25996e256110..795c9d9c96a6 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -330,7 +330,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
cmd = RSB_CMD_RD32;
break;
default:
- dev_err(rsb->dev, "Invalid access width: %d\n", len);
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
@@ -372,7 +372,7 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
cmd = RSB_CMD_WR32;
break;
default:
- dev_err(rsb->dev, "Invalid access width: %d\n", len);
+ dev_err(rsb->dev, "Invalid access width: %zd\n", len);
return -EINVAL;
}
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 834a2aeaf27a..350b7309c26d 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -108,7 +108,7 @@ static int uniphier_system_bus_check_overlap(
for (i = 0; i < ARRAY_SIZE(priv->bank); i++) {
for (j = i + 1; j < ARRAY_SIZE(priv->bank); j++) {
- if (priv->bank[i].end > priv->bank[j].base ||
+ if (priv->bank[i].end > priv->bank[j].base &&
priv->bank[i].base < priv->bank[j].end) {
dev_err(priv->dev,
"region overlap between bank%d and bank%d\n",
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index a043107da2af..3ec0766ed5e9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -328,7 +328,8 @@ config JS_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
- depends on RTC!=y && !IA64 && !ARM && !M32R && !MIPS && !SPARC && !FRV && !S390 && !SUPERH && !AVR32 && !BLACKFIN && !UML
+ depends on RTC!=y
+ depends on ALPHA || M68K || MN10300 || PARISC || PPC || X86
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 09f17eb73486..0f64d149c98d 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -156,7 +156,7 @@ static pgprot_t agp_convert_mmap_flags(int prot)
{
unsigned long prot_bits;
- prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
+ prot_bits = calc_vm_prot_bits(prot, 0) | VM_SHARED;
return vm_get_page_prot(prot_bits);
}
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 05755441250c..fdced547ad59 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/uninorth.h>
-#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/pmac_feature.h>
#include "agp.h"
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index ff00331bff49..67ee8b08ab53 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_ATMEL
config HW_RANDOM_BCM63XX
tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX
+ depends on BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -382,6 +382,19 @@ config HW_RANDOM_STM32
If unsure, say N.
+config HW_RANDOM_PIC32
+ tristate "Microchip PIC32 Random Number Generator support"
+ depends on HW_RANDOM && MACH_PIC32
+ default y
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on a PIC32.
+
+ To compile this driver as a module, choose M here. the
+ module will be called pic32-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5ad397635128..f5a6fa7690e7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -33,3 +33,4 @@ obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
obj-$(CONFIG_HW_RANDOM_STM32) += stm32-rng.o
+obj-$(CONFIG_HW_RANDOM_PIC32) += pic32-rng.o
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index 4b31f1387f37..5132c9cde50d 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
+#include <linux/of.h>
#define RNG_CTRL 0x00
#define RNG_EN (1 << 0)
@@ -79,10 +80,8 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
static int bcm63xx_rng_probe(struct platform_device *pdev)
{
struct resource *r;
- struct clk *clk;
int ret;
struct bcm63xx_rng_priv *priv;
- struct hwrng *rng;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -132,10 +131,19 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id bcm63xx_rng_of_match[] = {
+ { .compatible = "brcm,bcm6368-rng", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
+#endif
+
static struct platform_driver bcm63xx_rng_driver = {
.probe = bcm63xx_rng_probe,
.driver = {
.name = "bcm63xx-rng",
+ .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
},
};
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 30cf4623184f..ada081232528 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -144,8 +144,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
}
-#ifdef CONFIG_PM
-static int exynos_rng_runtime_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -155,7 +154,7 @@ static int exynos_rng_runtime_suspend(struct device *dev)
return 0;
}
-static int exynos_rng_runtime_resume(struct device *dev)
+static int __maybe_unused exynos_rng_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -163,12 +162,12 @@ static int exynos_rng_runtime_resume(struct device *dev)
return clk_prepare_enable(exynos_rng->clk);
}
-static int exynos_rng_suspend(struct device *dev)
+static int __maybe_unused exynos_rng_suspend(struct device *dev)
{
return pm_runtime_force_suspend(dev);
}
-static int exynos_rng_resume(struct device *dev)
+static int __maybe_unused exynos_rng_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct exynos_rng *exynos_rng = platform_get_drvdata(pdev);
@@ -180,7 +179,6 @@ static int exynos_rng_resume(struct device *dev)
return exynos_rng_configure(exynos_rng);
}
-#endif
static const struct dev_pm_ops exynos_rng_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(exynos_rng_suspend, exynos_rng_resume)
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 843d6f6aee7a..3b06c1d6cfb2 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -743,6 +743,16 @@ static const struct of_device_id n2rng_match[] = {
.compatible = "SUNW,kt-rng",
.data = (void *) 1,
},
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m4-rng",
+ .data = (void *) 1,
+ },
+ {
+ .name = "random-number-generator",
+ .compatible = "ORCL,m7-rng",
+ .data = (void *) 1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
new file mode 100644
index 000000000000..108897bea2d0
--- /dev/null
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -0,0 +1,155 @@
+/*
+ * PIC32 RNG driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RNGCON 0x04
+#define TRNGEN BIT(8)
+#define PRNGEN BIT(9)
+#define PRNGCONT BIT(10)
+#define TRNGMOD BIT(11)
+#define SEEDLOAD BIT(12)
+#define RNGPOLY1 0x08
+#define RNGPOLY2 0x0C
+#define RNGNUMGEN1 0x10
+#define RNGNUMGEN2 0x14
+#define RNGSEED1 0x18
+#define RNGSEED2 0x1C
+#define RNGRCNT 0x20
+#define RCNT_MASK 0x7F
+
+struct pic32_rng {
+ void __iomem *base;
+ struct hwrng rng;
+ struct clk *clk;
+};
+
+/*
+ * The TRNG can generate up to 24Mbps. This is a timeout that should be safe
+ * enough given the instructions in the loop and that the TRNG may not always
+ * be at maximum rate.
+ */
+#define RNG_TIMEOUT 500
+
+static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+ u64 *data = buf;
+ u32 t;
+ unsigned int timeout = RNG_TIMEOUT;
+
+ if (max < 8)
+ return 0;
+
+ do {
+ t = readl(priv->base + RNGRCNT) & RCNT_MASK;
+ if (t == 64) {
+ /* TRNG value comes through the seed registers */
+ *data = ((u64)readl(priv->base + RNGSEED2) << 32) +
+ readl(priv->base + RNGSEED1);
+ return 8;
+ }
+ } while (wait && --timeout);
+
+ return -EIO;
+}
+
+static int pic32_rng_probe(struct platform_device *pdev)
+{
+ struct pic32_rng *priv;
+ struct resource *res;
+ u32 v;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* enable TRNG in enhanced mode */
+ v = TRNGEN | TRNGMOD;
+ writel(v, priv->base + RNGCON);
+
+ priv->rng.name = pdev->name;
+ priv->rng.read = pic32_rng_read;
+
+ ret = hwrng_register(&priv->rng);
+ if (ret)
+ goto err_register;
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int pic32_rng_remove(struct platform_device *pdev)
+{
+ struct pic32_rng *rng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rng->rng);
+ writel(0, rng->base + RNGCON);
+ clk_disable_unprepare(rng->clk);
+ return 0;
+}
+
+static const struct of_device_id pic32_rng_of_match[] = {
+ { .compatible = "microchip,pic32mzda-rng", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
+
+static struct platform_driver pic32_rng_driver = {
+ .probe = pic32_rng_probe,
+ .remove = pic32_rng_remove,
+ .driver = {
+ .name = "pic32-rng",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_rng_of_match),
+ },
+};
+
+module_platform_driver(pic32_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 RNG Driver");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7fddd8696211..1e25b5205724 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -849,7 +849,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
- si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ goto restart;
} else if (si_sm_result == SI_SM_HOSED) {
smi_inc_stat(smi_info, hosed_count);
@@ -866,7 +866,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
*/
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
- si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+ goto restart;
}
/*
@@ -1363,12 +1363,12 @@ MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
" default scan of the interfaces identified via DMI");
#endif
module_param_named(tryplatform, si_tryplatform, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
" default scan of the interfaces identified via platform"
" interfaces like openfirmware");
#ifdef CONFIG_PCI
module_param_named(trypci, si_trypci, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
" default scan of the interfaces identified via pci");
#endif
module_param_named(trydefaults, si_trydefaults, bool, 0);
@@ -2690,6 +2690,9 @@ static int acpi_ipmi_probe(struct platform_device *dev)
unsigned long long tmp;
int rv = -EINVAL;
+ if (!si_tryacpi)
+ return 0;
+
handle = ACPI_HANDLE(&dev->dev);
if (!handle)
return -ENODEV;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5f1c3d08ba65..8b3be8b92573 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -920,23 +920,18 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
msg_done_handler(ssif_info, -EIO, NULL, 0);
}
} else {
+ /* Ready to request the result. */
unsigned long oflags, *flags;
- bool got_alert;
ssif_inc_stat(ssif_info, sent_messages);
ssif_inc_stat(ssif_info, sent_messages_parts);
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
- got_alert = ssif_info->got_alert;
- if (got_alert) {
+ if (ssif_info->got_alert) {
+ /* The result is already ready, just start it. */
ssif_info->got_alert = false;
- ssif_info->waiting_alert = false;
- }
-
- if (got_alert) {
ipmi_ssif_unlock_cond(ssif_info, flags);
- /* The alert already happened, try now. */
- retry_timeout((unsigned long) ssif_info);
+ start_get(ssif_info);
} else {
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 096f0cef4da1..4facc7517a6a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1140,7 +1140,7 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs)
the timer. So do so. */
pretimeout_since_last_heartbeat = 1;
if (atomic_inc_and_test(&preop_panic_excl))
- panic(PFX "pre-timeout");
+ nmi_panic(regs, PFX "pre-timeout");
}
return NMI_HANDLED;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 4f6f94c43412..71025c2f6bbb 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -695,7 +695,7 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
offset += file->f_pos;
case SEEK_SET:
/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
- if (IS_ERR_VALUE((unsigned long long)offset)) {
+ if ((unsigned long long)offset >= -MAX_ERRNO) {
ret = -EOVERFLOW;
break;
}
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 01292328a456..678fa97e41fb 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -496,12 +496,12 @@ static void pc_set_checksum(void)
#ifdef CONFIG_PROC_FS
-static char *floppy_types[] = {
+static const char * const floppy_types[] = {
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
"3.5'' 2.88M", "3.5'' 2.88M"
};
-static char *gfx_types[] = {
+static const char * const gfx_types[] = {
"EGA, VGA, ... (with BIOS)",
"CGA (40 cols)",
"CGA (80 cols)",
@@ -602,7 +602,7 @@ static void atari_set_checksum(void)
static struct {
unsigned char val;
- char *name;
+ const char *name;
} boot_prefs[] = {
{ 0x80, "TOS" },
{ 0x40, "ASV" },
@@ -611,7 +611,7 @@ static struct {
{ 0x00, "unspecified" }
};
-static char *languages[] = {
+static const char * const languages[] = {
"English (US)",
"German",
"French",
@@ -623,7 +623,7 @@ static char *languages[] = {
"Swiss (German)"
};
-static char *dateformat[] = {
+static const char * const dateformat[] = {
"MM%cDD%cYY",
"DD%cMM%cYY",
"YY%cMM%cDD",
@@ -634,7 +634,7 @@ static char *dateformat[] = {
"7 (undefined)"
};
-static char *colors[] = {
+static const char * const colors[] = {
"2", "4", "16", "256", "65536", "??", "??", "??"
};
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index 76c490fa0511..0e184426db98 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount)
static void button_sequence_finished (unsigned long parameters)
{
-#ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */
- if (button_press_count == reboot_count)
+ if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
+ button_press_count == reboot_count)
kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */
-#endif /* CONFIG_NWBUTTON_REBOOT */
button_consume_callbacks (button_press_count);
bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
button_press_count = 0; /* Reset the button press counter */
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 45df4bf914f8..22c27652e46a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1349,7 +1349,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
/* TODO:disable interrupts instead of reset to preserve signal states */
reset_device(info);
- if (!tty || tty->termios.c_cflag & HUPCL) {
+ if (!tty || C_HUPCL(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
set_signals(info);
}
@@ -1390,7 +1390,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
get_signals(info);
- if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
+ if (info->netcount || (tty && C_CREAD(tty)))
rx_start(info);
spin_unlock_irqrestore(&info->lock, flags);
@@ -1733,7 +1733,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgslpc_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock, flags);
info->serial_signals &= ~SerialSignal_RTS;
set_signals(info);
@@ -1762,7 +1762,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
mgslpc_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock, flags);
info->serial_signals |= SerialSignal_RTS;
set_signals(info);
@@ -2306,8 +2306,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
mgslpc_change_params(info, tty);
/* Handle transition to B0 status */
- if (old_termios->c_cflag & CBAUD &&
- !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock, flags);
set_signals(info);
@@ -2315,21 +2314,17 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- tty->termios.c_cflag & CBAUD) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->serial_signals |= SerialSignal_RTS;
- }
spin_lock_irqsave(&info->lock, flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock, flags);
}
/* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
tx_release(tty);
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index ae0b42b66e55..f8a483c67b07 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -69,12 +69,13 @@
#include <linux/ppdev.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#define PP_VERSION "ppdev: user-space parallel port driver"
#define CHRDEV "ppdev"
struct pp_struct {
- struct pardevice * pdev;
+ struct pardevice *pdev;
wait_queue_head_t irq_wait;
atomic_t irqc;
unsigned int flags;
@@ -98,18 +99,26 @@ struct pp_struct {
#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
static DEFINE_MUTEX(pp_do_mutex);
-static inline void pp_enable_irq (struct pp_struct *pp)
+
+/* define fixed sized ioctl cmd for y2038 migration */
+#define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2])
+#define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2])
+#define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2])
+#define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2])
+
+static inline void pp_enable_irq(struct pp_struct *pp)
{
struct parport *port = pp->pdev->port;
- port->ops->enable_irq (port);
+
+ port->ops->enable_irq(port);
}
-static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
- loff_t * ppos)
+static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- char * kbuffer;
+ char *kbuffer;
ssize_t bytes_read = 0;
struct parport *pport;
int mode;
@@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
return 0;
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
- if (!kbuffer) {
+ if (!kbuffer)
return -ENOMEM;
- }
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
- parport_set_timeout (pp->pdev,
- (file->f_flags & O_NONBLOCK) ?
- PARPORT_INACTIVITY_O_NONBLOCK :
- pp->default_inactivity);
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
while (bytes_read == 0) {
ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
@@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
int flags = 0;
size_t (*fn)(struct parport *, void *, size_t, int);
- if (pp->flags & PP_W91284PIC) {
+ if (pp->flags & PP_W91284PIC)
flags |= PARPORT_W91284PIC;
- }
- if (pp->flags & PP_FASTREAD) {
+ if (pp->flags & PP_FASTREAD)
flags |= PARPORT_EPP_FAST;
- }
- if (pport->ieee1284.mode & IEEE1284_ADDR) {
+ if (pport->ieee1284.mode & IEEE1284_ADDR)
fn = pport->ops->epp_read_addr;
- } else {
+ else
fn = pport->ops->epp_read_data;
- }
bytes_read = (*fn)(pport, kbuffer, need, flags);
} else {
- bytes_read = parport_read (pport, kbuffer, need);
+ bytes_read = parport_read(pport, kbuffer, need);
}
if (bytes_read != 0)
@@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
break;
}
- if (signal_pending (current)) {
+ if (signal_pending(current)) {
bytes_read = -ERESTARTSYS;
break;
}
@@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
cond_resched();
}
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
- if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
+ if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
bytes_read = -EFAULT;
- kfree (kbuffer);
- pp_enable_irq (pp);
+ kfree(kbuffer);
+ pp_enable_irq(pp);
return bytes_read;
}
-static ssize_t pp_write (struct file * file, const char __user * buf,
- size_t count, loff_t * ppos)
+static ssize_t pp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- char * kbuffer;
+ char *kbuffer;
ssize_t bytes_written = 0;
ssize_t wrote;
int mode;
@@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
}
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
- if (!kbuffer) {
+ if (!kbuffer)
return -ENOMEM;
- }
+
pport = pp->pdev->port;
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
- parport_set_timeout (pp->pdev,
- (file->f_flags & O_NONBLOCK) ?
- PARPORT_INACTIVITY_O_NONBLOCK :
- pp->default_inactivity);
+ parport_set_timeout(pp->pdev,
+ (file->f_flags & O_NONBLOCK) ?
+ PARPORT_INACTIVITY_O_NONBLOCK :
+ pp->default_inactivity);
while (bytes_written < count) {
ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
- if (copy_from_user (kbuffer, buf + bytes_written, n)) {
+ if (copy_from_user(kbuffer, buf + bytes_written, n)) {
bytes_written = -EFAULT;
break;
}
@@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
/* do a fast EPP write */
if (pport->ieee1284.mode & IEEE1284_ADDR) {
- wrote = pport->ops->epp_write_addr (pport,
+ wrote = pport->ops->epp_write_addr(pport,
kbuffer, n, PARPORT_EPP_FAST);
} else {
- wrote = pport->ops->epp_write_data (pport,
+ wrote = pport->ops->epp_write_data(pport,
kbuffer, n, PARPORT_EPP_FAST);
}
} else {
- wrote = parport_write (pp->pdev->port, kbuffer, n);
+ wrote = parport_write(pp->pdev->port, kbuffer, n);
}
if (wrote <= 0) {
- if (!bytes_written) {
+ if (!bytes_written)
bytes_written = wrote;
- }
break;
}
@@ -251,36 +255,36 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
break;
}
- if (signal_pending (current))
+ if (signal_pending(current))
break;
cond_resched();
}
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
- kfree (kbuffer);
- pp_enable_irq (pp);
+ kfree(kbuffer);
+ pp_enable_irq(pp);
return bytes_written;
}
-static void pp_irq (void *private)
+static void pp_irq(void *private)
{
struct pp_struct *pp = private;
if (pp->irqresponse) {
- parport_write_control (pp->pdev->port, pp->irqctl);
+ parport_write_control(pp->pdev->port, pp->irqctl);
pp->irqresponse = 0;
}
- atomic_inc (&pp->irqc);
- wake_up_interruptible (&pp->irq_wait);
+ atomic_inc(&pp->irqc);
+ wake_up_interruptible(&pp->irq_wait);
}
-static int register_device (int minor, struct pp_struct *pp)
+static int register_device(int minor, struct pp_struct *pp)
{
struct parport *port;
- struct pardevice * pdev = NULL;
+ struct pardevice *pdev = NULL;
char *name;
int fl;
@@ -288,30 +292,30 @@ static int register_device (int minor, struct pp_struct *pp)
if (name == NULL)
return -ENOMEM;
- port = parport_find_number (minor);
+ port = parport_find_number(minor);
if (!port) {
- printk (KERN_WARNING "%s: no associated port!\n", name);
- kfree (name);
+ printk(KERN_WARNING "%s: no associated port!\n", name);
+ kfree(name);
return -ENXIO;
}
fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
- pdev = parport_register_device (port, name, NULL,
- NULL, pp_irq, fl, pp);
- parport_put_port (port);
+ pdev = parport_register_device(port, name, NULL,
+ NULL, pp_irq, fl, pp);
+ parport_put_port(port);
if (!pdev) {
- printk (KERN_WARNING "%s: failed to register device!\n", name);
- kfree (name);
+ printk(KERN_WARNING "%s: failed to register device!\n", name);
+ kfree(name);
return -ENXIO;
}
pp->pdev = pdev;
- pr_debug("%s: registered pardevice\n", name);
+ dev_dbg(&pdev->dev, "registered pardevice\n");
return 0;
}
-static enum ieee1284_phase init_phase (int mode)
+static enum ieee1284_phase init_phase(int mode)
{
switch (mode & ~(IEEE1284_DEVICEID
| IEEE1284_ADDR)) {
@@ -322,11 +326,27 @@ static enum ieee1284_phase init_phase (int mode)
return IEEE1284_PH_FWD_IDLE;
}
+static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
+{
+ long to_jiffies;
+
+ if ((tv_sec < 0) || (tv_usec < 0))
+ return -EINVAL;
+
+ to_jiffies = usecs_to_jiffies(tv_usec);
+ to_jiffies += tv_sec * HZ;
+ if (to_jiffies <= 0)
+ return -EINVAL;
+
+ pdev->timeout = to_jiffies;
+ return 0;
+}
+
static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
unsigned int minor = iminor(file_inode(file));
struct pp_struct *pp = file->private_data;
- struct parport * port;
+ struct parport *port;
void __user *argp = (void __user *)arg;
/* First handle the cases that don't take arguments. */
@@ -337,19 +357,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int ret;
if (pp->flags & PP_CLAIMED) {
- pr_debug(CHRDEV "%x: you've already got it!\n", minor);
+ dev_dbg(&pp->pdev->dev, "you've already got it!\n");
return -EINVAL;
}
/* Deferred device registration. */
if (!pp->pdev) {
- int err = register_device (minor, pp);
- if (err) {
+ int err = register_device(minor, pp);
+
+ if (err)
return err;
- }
}
- ret = parport_claim_or_block (pp->pdev);
+ ret = parport_claim_or_block(pp->pdev);
if (ret < 0)
return ret;
@@ -357,7 +377,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* For interrupt-reporting to work, we need to be
* informed of each interrupt. */
- pp_enable_irq (pp);
+ pp_enable_irq(pp);
/* We may need to fix up the state machine. */
info = &pp->pdev->port->ieee1284;
@@ -365,15 +385,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
pp->saved_state.phase = info->phase;
info->mode = pp->state.mode;
info->phase = pp->state.phase;
- pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
- parport_set_timeout (pp->pdev, pp->default_inactivity);
+ pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
+ parport_set_timeout(pp->pdev, pp->default_inactivity);
return 0;
}
case PPEXCL:
if (pp->pdev) {
- pr_debug(CHRDEV "%x: too late for PPEXCL; "
- "already registered\n", minor);
+ dev_dbg(&pp->pdev->dev,
+ "too late for PPEXCL; already registered\n");
if (pp->flags & PP_EXCL)
/* But it's not really an error. */
return 0;
@@ -388,11 +408,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PPSETMODE:
{
int mode;
- if (copy_from_user (&mode, argp, sizeof (mode)))
+
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
/* FIXME: validate mode */
pp->state.mode = mode;
- pp->state.phase = init_phase (mode);
+ pp->state.phase = init_phase(mode);
if (pp->flags & PP_CLAIMED) {
pp->pdev->port->ieee1284.mode = mode;
@@ -405,28 +426,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int mode;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
mode = pp->pdev->port->ieee1284.mode;
- } else {
+ else
mode = pp->state.mode;
- }
- if (copy_to_user (argp, &mode, sizeof (mode))) {
+
+ if (copy_to_user(argp, &mode, sizeof(mode)))
return -EFAULT;
- }
return 0;
}
case PPSETPHASE:
{
int phase;
- if (copy_from_user (&phase, argp, sizeof (phase))) {
+
+ if (copy_from_user(&phase, argp, sizeof(phase)))
return -EFAULT;
- }
+
/* FIXME: validate phase */
pp->state.phase = phase;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
pp->pdev->port->ieee1284.phase = phase;
- }
return 0;
}
@@ -434,38 +454,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int phase;
- if (pp->flags & PP_CLAIMED) {
+ if (pp->flags & PP_CLAIMED)
phase = pp->pdev->port->ieee1284.phase;
- } else {
+ else
phase = pp->state.phase;
- }
- if (copy_to_user (argp, &phase, sizeof (phase))) {
+ if (copy_to_user(argp, &phase, sizeof(phase)))
return -EFAULT;
- }
return 0;
}
case PPGETMODES:
{
unsigned int modes;
- port = parport_find_number (minor);
+ port = parport_find_number(minor);
if (!port)
return -ENODEV;
modes = port->modes;
parport_put_port(port);
- if (copy_to_user (argp, &modes, sizeof (modes))) {
+ if (copy_to_user(argp, &modes, sizeof(modes)))
return -EFAULT;
- }
return 0;
}
case PPSETFLAGS:
{
int uflags;
- if (copy_from_user (&uflags, argp, sizeof (uflags))) {
+ if (copy_from_user(&uflags, argp, sizeof(uflags)))
return -EFAULT;
- }
pp->flags &= ~PP_FLAGMASK;
pp->flags |= (uflags & PP_FLAGMASK);
return 0;
@@ -475,9 +491,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int uflags;
uflags = pp->flags & PP_FLAGMASK;
- if (copy_to_user (argp, &uflags, sizeof (uflags))) {
+ if (copy_to_user(argp, &uflags, sizeof(uflags)))
return -EFAULT;
- }
return 0;
}
} /* end switch() */
@@ -495,27 +510,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char reg;
unsigned char mask;
int mode;
+ s32 time32[2];
+ s64 time64[2];
+ struct timespec64 ts;
int ret;
- struct timeval par_timeout;
- long to_jiffies;
case PPRSTATUS:
- reg = parport_read_status (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_status(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPRDATA:
- reg = parport_read_data (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_data(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPRCONTROL:
- reg = parport_read_control (port);
- if (copy_to_user (argp, &reg, sizeof (reg)))
+ reg = parport_read_control(port);
+ if (copy_to_user(argp, &reg, sizeof(reg)))
return -EFAULT;
return 0;
case PPYIELD:
- parport_yield_blocking (pp->pdev);
+ parport_yield_blocking(pp->pdev);
return 0;
case PPRELEASE:
@@ -525,45 +541,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
pp->state.phase = info->phase;
info->mode = pp->saved_state.mode;
info->phase = pp->saved_state.phase;
- parport_release (pp->pdev);
+ parport_release(pp->pdev);
pp->flags &= ~PP_CLAIMED;
return 0;
case PPWCONTROL:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
- parport_write_control (port, reg);
+ parport_write_control(port, reg);
return 0;
case PPWDATA:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
- parport_write_data (port, reg);
+ parport_write_data(port, reg);
return 0;
case PPFCONTROL:
- if (copy_from_user (&mask, argp,
- sizeof (mask)))
+ if (copy_from_user(&mask, argp,
+ sizeof(mask)))
return -EFAULT;
- if (copy_from_user (&reg, 1 + (unsigned char __user *) arg,
- sizeof (reg)))
+ if (copy_from_user(&reg, 1 + (unsigned char __user *) arg,
+ sizeof(reg)))
return -EFAULT;
- parport_frob_control (port, mask, reg);
+ parport_frob_control(port, mask, reg);
return 0;
case PPDATADIR:
- if (copy_from_user (&mode, argp, sizeof (mode)))
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
if (mode)
- port->ops->data_reverse (port);
+ port->ops->data_reverse(port);
else
- port->ops->data_forward (port);
+ port->ops->data_forward(port);
return 0;
case PPNEGOT:
- if (copy_from_user (&mode, argp, sizeof (mode)))
+ if (copy_from_user(&mode, argp, sizeof(mode)))
return -EFAULT;
- switch ((ret = parport_negotiate (port, mode))) {
+ switch ((ret = parport_negotiate(port, mode))) {
case 0: break;
case -1: /* handshake failed, peripheral not IEEE 1284 */
ret = -EIO;
@@ -572,11 +588,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -ENXIO;
break;
}
- pp_enable_irq (pp);
+ pp_enable_irq(pp);
return ret;
case PPWCTLONIRQ:
- if (copy_from_user (&reg, argp, sizeof (reg)))
+ if (copy_from_user(&reg, argp, sizeof(reg)))
return -EFAULT;
/* Remember what to set the control lines to, for next
@@ -586,39 +602,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
case PPCLRIRQ:
- ret = atomic_read (&pp->irqc);
- if (copy_to_user (argp, &ret, sizeof (ret)))
+ ret = atomic_read(&pp->irqc);
+ if (copy_to_user(argp, &ret, sizeof(ret)))
return -EFAULT;
- atomic_sub (ret, &pp->irqc);
+ atomic_sub(ret, &pp->irqc);
return 0;
- case PPSETTIME:
- if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
+ case PPSETTIME32:
+ if (copy_from_user(time32, argp, sizeof(time32)))
return -EFAULT;
- }
- /* Convert to jiffies, place in pp->pdev->timeout */
- if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
- return -EINVAL;
- }
- to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
- to_jiffies += par_timeout.tv_sec * (long)HZ;
- if (to_jiffies <= 0) {
+
+ return pp_set_timeout(pp->pdev, time32[0], time32[1]);
+
+ case PPSETTIME64:
+ if (copy_from_user(time64, argp, sizeof(time64)))
+ return -EFAULT;
+
+ return pp_set_timeout(pp->pdev, time64[0], time64[1]);
+
+ case PPGETTIME32:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time32[0] = ts.tv_sec;
+ time32[1] = ts.tv_nsec / NSEC_PER_USEC;
+ if ((time32[0] < 0) || (time32[1] < 0))
return -EINVAL;
- }
- pp->pdev->timeout = to_jiffies;
+
+ if (copy_to_user(argp, time32, sizeof(time32)))
+ return -EFAULT;
+
return 0;
- case PPGETTIME:
- to_jiffies = pp->pdev->timeout;
- memset(&par_timeout, 0, sizeof(par_timeout));
- par_timeout.tv_sec = to_jiffies / HZ;
- par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
- if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
+ case PPGETTIME64:
+ jiffies_to_timespec64(pp->pdev->timeout, &ts);
+ time64[0] = ts.tv_sec;
+ time64[1] = ts.tv_nsec / NSEC_PER_USEC;
+ if ((time64[0] < 0) || (time64[1] < 0))
+ return -EINVAL;
+
+ if (copy_to_user(argp, time64, sizeof(time64)))
return -EFAULT;
+
return 0;
default:
- pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
+ dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
return -EINVAL;
}
@@ -629,13 +656,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
+
mutex_lock(&pp_do_mutex);
ret = pp_do_ioctl(file, cmd, arg);
mutex_unlock(&pp_do_mutex);
return ret;
}
-static int pp_open (struct inode * inode, struct file * file)
+#ifdef CONFIG_COMPAT
+static long pp_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int pp_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct pp_struct *pp;
@@ -643,16 +679,16 @@ static int pp_open (struct inode * inode, struct file * file)
if (minor >= PARPORT_MAX)
return -ENXIO;
- pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
+ pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
if (!pp)
return -ENOMEM;
pp->state.mode = IEEE1284_MODE_COMPAT;
- pp->state.phase = init_phase (pp->state.mode);
+ pp->state.phase = init_phase(pp->state.mode);
pp->flags = 0;
pp->irqresponse = 0;
- atomic_set (&pp->irqc, 0);
- init_waitqueue_head (&pp->irq_wait);
+ atomic_set(&pp->irqc, 0);
+ init_waitqueue_head(&pp->irq_wait);
/* Defer the actual device registration until the first claim.
* That way, we know whether or not the driver wants to have
@@ -664,7 +700,7 @@ static int pp_open (struct inode * inode, struct file * file)
return 0;
}
-static int pp_release (struct inode * inode, struct file * file)
+static int pp_release(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct pp_struct *pp = file->private_data;
@@ -673,10 +709,10 @@ static int pp_release (struct inode * inode, struct file * file)
compat_negot = 0;
if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
(pp->state.mode != IEEE1284_MODE_COMPAT)) {
- struct ieee1284_info *info;
+ struct ieee1284_info *info;
/* parport released, but not in compatibility mode */
- parport_claim_or_block (pp->pdev);
+ parport_claim_or_block(pp->pdev);
pp->flags |= PP_CLAIMED;
info = &pp->pdev->port->ieee1284;
pp->saved_state.mode = info->mode;
@@ -689,9 +725,9 @@ static int pp_release (struct inode * inode, struct file * file)
compat_negot = 2;
}
if (compat_negot) {
- parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
- pr_debug(CHRDEV "%x: negotiated back to compatibility "
- "mode because user-space forgot\n", minor);
+ parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
+ dev_dbg(&pp->pdev->dev,
+ "negotiated back to compatibility mode because user-space forgot\n");
}
if (pp->flags & PP_CLAIMED) {
@@ -702,7 +738,7 @@ static int pp_release (struct inode * inode, struct file * file)
pp->state.phase = info->phase;
info->mode = pp->saved_state.mode;
info->phase = pp->saved_state.phase;
- parport_release (pp->pdev);
+ parport_release(pp->pdev);
if (compat_negot != 1) {
pr_debug(CHRDEV "%x: released pardevice "
"because user-space forgot\n", minor);
@@ -711,25 +747,26 @@ static int pp_release (struct inode * inode, struct file * file)
if (pp->pdev) {
const char *name = pp->pdev->name;
- parport_unregister_device (pp->pdev);
- kfree (name);
+
+ parport_unregister_device(pp->pdev);
+ kfree(name);
pp->pdev = NULL;
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
}
- kfree (pp);
+ kfree(pp);
return 0;
}
/* No kernel lock held - fine */
-static unsigned int pp_poll (struct file * file, poll_table * wait)
+static unsigned int pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
unsigned int mask = 0;
- poll_wait (file, &pp->irq_wait, wait);
- if (atomic_read (&pp->irqc))
+ poll_wait(file, &pp->irq_wait, wait);
+ if (atomic_read(&pp->irqc))
mask |= POLLIN | POLLRDNORM;
return mask;
@@ -744,6 +781,9 @@ static const struct file_operations pp_fops = {
.write = pp_write,
.poll = pp_poll,
.unlocked_ioctl = pp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pp_compat_ioctl,
+#endif
.open = pp_open,
.release = pp_release,
};
@@ -765,13 +805,13 @@ static struct parport_driver pp_driver = {
.detach = pp_detach,
};
-static int __init ppdev_init (void)
+static int __init ppdev_init(void)
{
int err = 0;
- if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
- printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
- PP_MAJOR);
+ if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
+ printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
+ PP_MAJOR);
return -EIO;
}
ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -781,11 +821,11 @@ static int __init ppdev_init (void)
}
err = parport_register_driver(&pp_driver);
if (err < 0) {
- printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
+ printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
goto out_class;
}
- printk (KERN_INFO PP_VERSION "\n");
+ printk(KERN_INFO PP_VERSION "\n");
goto out;
out_class:
@@ -796,12 +836,12 @@ out:
return err;
}
-static void __exit ppdev_cleanup (void)
+static void __exit ppdev_cleanup(void)
{
/* Clean up all parport stuff */
parport_unregister_driver(&pp_driver);
class_destroy(ppdev_class);
- unregister_chrdev (PP_MAJOR, CHRDEV);
+ unregister_chrdev(PP_MAJOR, CHRDEV);
}
module_init(ppdev_init);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 9b9809b709a5..e83b2adc014a 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -334,10 +334,8 @@ static int __init raw_init(void)
cdev_init(&raw_cdev, &raw_fops);
ret = cdev_add(&raw_cdev, dev, max_raw_minors);
- if (ret) {
+ if (ret)
goto error_region;
- }
-
raw_class = class_create(THIS_MODULE, "raw");
if (IS_ERR(raw_class)) {
printk(KERN_ERR "Error creating raw class.\n");
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 45cc39aabeee..274dd0123237 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -88,6 +88,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
+ int rc;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -136,11 +137,17 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
+ rc = devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
+ if (rc) {
+ put_device(&chip->dev);
+ return ERR_PTR(rc);
+ }
+
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
-static int tpm_dev_add_device(struct tpm_chip *chip)
+static int tpm_add_char_device(struct tpm_chip *chip)
{
int rc;
@@ -151,7 +158,6 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
- device_unregister(&chip->dev);
return rc;
}
@@ -162,16 +168,17 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
+ cdev_del(&chip->cdev);
return rc;
}
return rc;
}
-static void tpm_dev_del_device(struct tpm_chip *chip)
+static void tpm_del_char_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
- device_unregister(&chip->dev);
+ device_del(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -222,7 +229,7 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
- rc = tpm_dev_add_device(chip);
+ rc = tpm_add_char_device(chip);
if (rc)
goto out_err;
@@ -274,6 +281,6 @@ void tpm_chip_unregister(struct tpm_chip *chip)
sysfs_remove_link(&chip->pdev->kobj, "ppi");
tpm1_chip_unregister(chip);
- tpm_dev_del_device(chip);
+ tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 542a80cbfd9c..28b477e8da6a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -128,13 +128,6 @@ enum tpm2_startup_types {
TPM2_SU_STATE = 0x0001,
};
-enum tpm2_start_method {
- TPM2_START_ACPI = 2,
- TPM2_START_FIFO = 6,
- TPM2_START_CRB = 7,
- TPM2_START_CRB_WITH_ACPI = 8,
-};
-
struct tpm_chip;
struct tpm_vendor_specific {
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 45a634016f95..b28e4da3d2cf 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -20,7 +20,11 @@
#include <keys/trusted-type.h>
enum tpm2_object_attributes {
- TPM2_ATTR_USER_WITH_AUTH = BIT(6),
+ TPM2_OA_USER_WITH_AUTH = BIT(6),
+};
+
+enum tpm2_session_attributes {
+ TPM2_SA_CONTINUE_SESSION = BIT(0),
};
struct tpm2_startup_in {
@@ -478,22 +482,18 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
tpm_buf_append_u8(&buf, payload->migratable);
/* public */
- if (options->policydigest)
- tpm_buf_append_u16(&buf, 14 + options->digest_len);
- else
- tpm_buf_append_u16(&buf, 14);
-
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH);
tpm_buf_append_u16(&buf, hash);
/* policy */
- if (options->policydigest) {
+ if (options->policydigest_len) {
tpm_buf_append_u32(&buf, 0);
- tpm_buf_append_u16(&buf, options->digest_len);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
tpm_buf_append(&buf, options->policydigest,
- options->digest_len);
+ options->policydigest_len);
} else {
- tpm_buf_append_u32(&buf, TPM2_ATTR_USER_WITH_AUTH);
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
tpm_buf_append_u16(&buf, 0);
}
@@ -631,7 +631,7 @@ static int tpm2_unseal(struct tpm_chip *chip,
options->policyhandle ?
options->policyhandle : TPM2_RS_PW,
NULL /* nonce */, 0,
- 0 /* session_attributes */,
+ TPM2_SA_CONTINUE_SESSION,
options->blobauth /* hmac */,
TPM_DIGEST_SIZE);
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 8342cf51ffdc..a12b31940344 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -34,14 +34,6 @@ enum crb_defaults {
CRB_ACPI_START_INDEX = 1,
};
-struct acpi_tpm2 {
- struct acpi_table_header hdr;
- u16 platform_class;
- u16 reserved;
- u64 control_area_pa;
- u32 start_method;
-} __packed;
-
enum crb_ca_request {
CRB_CA_REQ_GO_IDLE = BIT(0),
CRB_CA_REQ_CMD_READY = BIT(1),
@@ -85,6 +77,8 @@ enum crb_flags {
struct crb_priv {
unsigned int flags;
+ struct resource res;
+ void __iomem *iobase;
struct crb_control_area __iomem *cca;
u8 __iomem *cmd;
u8 __iomem *rsp;
@@ -97,7 +91,7 @@ static u8 crb_status(struct tpm_chip *chip)
struct crb_priv *priv = chip->vendor.priv;
u8 sts = 0;
- if ((le32_to_cpu(ioread32(&priv->cca->start)) & CRB_START_INVOKE) !=
+ if ((ioread32(&priv->cca->start) & CRB_START_INVOKE) !=
CRB_START_INVOKE)
sts |= CRB_STS_COMPLETE;
@@ -113,7 +107,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
if (count < 6)
return -EIO;
- if (le32_to_cpu(ioread32(&priv->cca->sts)) & CRB_CA_STS_ERROR)
+ if (ioread32(&priv->cca->sts) & CRB_CA_STS_ERROR)
return -EIO;
memcpy_fromio(buf, priv->rsp, 6);
@@ -149,11 +143,11 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct crb_priv *priv = chip->vendor.priv;
int rc = 0;
- if (len > le32_to_cpu(ioread32(&priv->cca->cmd_size))) {
+ if (len > ioread32(&priv->cca->cmd_size)) {
dev_err(&chip->dev,
"invalid command count value %x %zx\n",
(unsigned int) len,
- (size_t) le32_to_cpu(ioread32(&priv->cca->cmd_size)));
+ (size_t) ioread32(&priv->cca->cmd_size));
return -E2BIG;
}
@@ -189,7 +183,7 @@ static void crb_cancel(struct tpm_chip *chip)
static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
{
struct crb_priv *priv = chip->vendor.priv;
- u32 cancel = le32_to_cpu(ioread32(&priv->cca->cancel));
+ u32 cancel = ioread32(&priv->cca->cancel);
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
}
@@ -204,97 +198,145 @@ static const struct tpm_class_ops tpm_crb = {
.req_complete_val = CRB_STS_COMPLETE,
};
-static int crb_acpi_add(struct acpi_device *device)
+static int crb_init(struct acpi_device *device, struct crb_priv *priv)
{
struct tpm_chip *chip;
- struct acpi_tpm2 *buf;
+ int rc;
+
+ chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ chip->vendor.priv = priv;
+ chip->acpi_dev_handle = device->handle;
+ chip->flags = TPM_CHIP_FLAG_TPM2;
+
+ rc = tpm_get_timeouts(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm2_do_selftest(chip);
+ if (rc)
+ return rc;
+
+ return tpm_chip_register(chip);
+}
+
+static int crb_check_resource(struct acpi_resource *ares, void *data)
+{
+ struct crb_priv *priv = data;
+ struct resource res;
+
+ if (acpi_dev_resource_memory(ares, &res)) {
+ priv->res = res;
+ priv->res.name = NULL;
+ }
+
+ return 1;
+}
+
+static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+ u64 start, u32 size)
+{
+ struct resource new_res = {
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ /* Detect a 64 bit address on a 32 bit system */
+ if (start != new_res.start)
+ return ERR_PTR(-EINVAL);
+
+ if (!resource_contains(&priv->res, &new_res))
+ return devm_ioremap_resource(dev, &new_res);
+
+ return priv->iobase + (new_res.start - priv->res.start);
+}
+
+static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+{
+ struct list_head resources;
+ struct device *dev = &device->dev;
+ u64 pa;
+ int ret;
+
+ INIT_LIST_HEAD(&resources);
+ ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+ priv);
+ if (ret < 0)
+ return ret;
+ acpi_dev_free_resource_list(&resources);
+
+ if (resource_type(&priv->res) != IORESOURCE_MEM) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ }
+
+ priv->iobase = devm_ioremap_resource(dev, &priv->res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
+
+ priv->cca = crb_map_res(dev, priv, buf->control_address, 0x1000);
+ if (IS_ERR(priv->cca))
+ return PTR_ERR(priv->cca);
+
+ pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
+ (u64) ioread32(&priv->cca->cmd_pa_low);
+ priv->cmd = crb_map_res(dev, priv, pa, ioread32(&priv->cca->cmd_size));
+ if (IS_ERR(priv->cmd))
+ return PTR_ERR(priv->cmd);
+
+ memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+ pa = le64_to_cpu(pa);
+ priv->rsp = crb_map_res(dev, priv, pa, ioread32(&priv->cca->rsp_size));
+ return PTR_ERR_OR_ZERO(priv->rsp);
+}
+
+static int crb_acpi_add(struct acpi_device *device)
+{
+ struct acpi_table_tpm2 *buf;
struct crb_priv *priv;
struct device *dev = &device->dev;
acpi_status status;
u32 sm;
- u64 pa;
int rc;
status = acpi_get_table(ACPI_SIG_TPM2, 1,
(struct acpi_table_header **) &buf);
- if (ACPI_FAILURE(status)) {
- dev_err(dev, "failed to get TPM2 ACPI table\n");
- return -ENODEV;
+ if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) {
+ dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
}
/* Should the FIFO driver handle this? */
- if (buf->start_method == TPM2_START_FIFO)
+ sm = buf->start_method;
+ if (sm == ACPI_TPM2_MEMORY_MAPPED)
return -ENODEV;
- chip = tpmm_chip_alloc(dev, &tpm_crb);
- if (IS_ERR(chip))
- return PTR_ERR(chip);
-
- chip->flags = TPM_CHIP_FLAG_TPM2;
-
- if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
- dev_err(dev, "TPM2 ACPI table has wrong size");
- return -EINVAL;
- }
-
- priv = (struct crb_priv *) devm_kzalloc(dev, sizeof(struct crb_priv),
- GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "failed to devm_kzalloc for private data\n");
+ priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- }
-
- sm = le32_to_cpu(buf->start_method);
/* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs
* report only ACPI start but in practice seems to require both
* ACPI start and CRB start.
*/
- if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO ||
+ if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED ||
!strcmp(acpi_device_hid(device), "MSFT0101"))
priv->flags |= CRB_FL_CRB_START;
- if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI)
+ if (sm == ACPI_TPM2_START_METHOD ||
+ sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)
priv->flags |= CRB_FL_ACPI_START;
- priv->cca = (struct crb_control_area __iomem *)
- devm_ioremap_nocache(dev, buf->control_area_pa, 0x1000);
- if (!priv->cca) {
- dev_err(dev, "ioremap of the control area failed\n");
- return -ENOMEM;
- }
-
- pa = ((u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_high)) << 32) |
- (u64) le32_to_cpu(ioread32(&priv->cca->cmd_pa_low));
- priv->cmd = devm_ioremap_nocache(dev, pa,
- ioread32(&priv->cca->cmd_size));
- if (!priv->cmd) {
- dev_err(dev, "ioremap of the command buffer failed\n");
- return -ENOMEM;
- }
-
- memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
- pa = le64_to_cpu(pa);
- priv->rsp = devm_ioremap_nocache(dev, pa,
- ioread32(&priv->cca->rsp_size));
- if (!priv->rsp) {
- dev_err(dev, "ioremap of the response buffer failed\n");
- return -ENOMEM;
- }
-
- chip->vendor.priv = priv;
-
- rc = tpm_get_timeouts(chip);
+ rc = crb_map_io(device, priv, buf);
if (rc)
return rc;
- chip->acpi_dev_handle = device->handle;
-
- rc = tpm2_do_selftest(chip);
- if (rc)
- return rc;
-
- return tpm_chip_register(chip);
+ return crb_init(device, priv);
}
static int crb_acpi_remove(struct acpi_device *device)
@@ -302,11 +344,11 @@ static int crb_acpi_remove(struct acpi_device *device)
struct device *dev = &device->dev;
struct tpm_chip *chip = dev_get_drvdata(dev);
- tpm_chip_unregister(chip);
-
if (chip->flags & TPM_CHIP_FLAG_TPM2)
tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_unregister(chip);
+
return 0;
}
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index bd72fb04225e..4e6940acf639 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -232,7 +232,7 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
{
struct tcpa_event *event = v;
struct tcpa_event temp_event;
- char *tempPtr;
+ char *temp_ptr;
int i;
memcpy(&temp_event, event, sizeof(struct tcpa_event));
@@ -242,10 +242,16 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
temp_event.event_type = do_endian_conversion(event->event_type);
temp_event.event_size = do_endian_conversion(event->event_size);
- tempPtr = (char *)&temp_event;
+ temp_ptr = (char *) &temp_event;
- for (i = 0; i < sizeof(struct tcpa_event) + temp_event.event_size; i++)
- seq_putc(m, tempPtr[i]);
+ for (i = 0; i < (sizeof(struct tcpa_event) - 1) ; i++)
+ seq_putc(m, temp_ptr[i]);
+
+ temp_ptr = (char *) v;
+
+ for (i = (sizeof(struct tcpa_event) - 1);
+ i < (sizeof(struct tcpa_event) + temp_event.event_size); i++)
+ seq_putc(m, temp_ptr[i]);
return 0;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 8a3509cb10da..a507006728e0 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,7 +28,6 @@
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/freezer.h>
-#include <acpi/actbl2.h>
#include "tpm.h"
enum tis_access {
@@ -60,22 +59,18 @@ enum tis_int_flags {
};
enum tis_defaults {
- TIS_MEM_BASE = 0xFED40000,
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
};
struct tpm_info {
- unsigned long start;
- unsigned long len;
- unsigned int irq;
-};
-
-static struct tpm_info tis_default_info = {
- .start = TIS_MEM_BASE,
- .len = TIS_MEM_LEN,
- .irq = 0,
+ struct resource res;
+ /* irq > 0 means: use irq $irq;
+ * irq = 0 means: autoprobe for an irq;
+ * irq = -1 means: no irq support
+ */
+ int irq;
};
/* Some timeout values are needed before it is known whether the chip is
@@ -118,39 +113,11 @@ static inline int is_itpm(struct acpi_device *dev)
{
return has_hid(dev, "INTC0102");
}
-
-static inline int is_fifo(struct acpi_device *dev)
-{
- struct acpi_table_tpm2 *tbl;
- acpi_status st;
-
- /* TPM 1.2 FIFO */
- if (!has_hid(dev, "MSFT0101"))
- return 1;
-
- st = acpi_get_table(ACPI_SIG_TPM2, 1,
- (struct acpi_table_header **) &tbl);
- if (ACPI_FAILURE(st)) {
- dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
- return 0;
- }
-
- if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
- return 0;
-
- /* TPM 2.0 FIFO */
- return 1;
-}
#else
static inline int is_itpm(struct acpi_device *dev)
{
return 0;
}
-
-static inline int is_fifo(struct acpi_device *dev)
-{
- return 1;
-}
#endif
/* Before we attempt to access the TPM we must see that the valid bit is set.
@@ -716,9 +683,9 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
chip->acpi_dev_handle = acpi_dev_handle;
#endif
- chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
- if (!chip->vendor.iobase)
- return -EIO;
+ chip->vendor.iobase = devm_ioremap_resource(dev, &tpm_info->res);
+ if (IS_ERR(chip->vendor.iobase))
+ return PTR_ERR(chip->vendor.iobase);
/* Maximum timeouts */
chip->vendor.timeout_a = TIS_TIMEOUT_A_MAX;
@@ -807,7 +774,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
- if (interrupts) {
+ if (interrupts && tpm_info->irq != -1) {
if (tpm_info->irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
tpm_info->irq);
@@ -893,29 +860,29 @@ static int tpm_tis_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
-#ifdef CONFIG_PNP
static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
- const struct pnp_device_id *pnp_id)
+ const struct pnp_device_id *pnp_id)
{
- struct tpm_info tpm_info = tis_default_info;
+ struct tpm_info tpm_info = {};
acpi_handle acpi_dev_handle = NULL;
+ struct resource *res;
- tpm_info.start = pnp_mem_start(pnp_dev, 0);
- tpm_info.len = pnp_mem_len(pnp_dev, 0);
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ tpm_info.res = *res;
if (pnp_irq_valid(pnp_dev, 0))
tpm_info.irq = pnp_irq(pnp_dev, 0);
else
- interrupts = false;
+ tpm_info.irq = -1;
-#ifdef CONFIG_ACPI
if (pnp_acpi_device(pnp_dev)) {
if (is_itpm(pnp_acpi_device(pnp_dev)))
itpm = true;
- acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
+ acpi_dev_handle = ACPI_HANDLE(&pnp_dev->dev);
}
-#endif
return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
}
@@ -956,7 +923,6 @@ static struct pnp_driver tis_pnp_driver = {
module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
-#endif
#ifdef CONFIG_ACPI
static int tpm_check_resource(struct acpi_resource *ares, void *data)
@@ -964,11 +930,11 @@ static int tpm_check_resource(struct acpi_resource *ares, void *data)
struct tpm_info *tpm_info = (struct tpm_info *) data;
struct resource res;
- if (acpi_dev_resource_interrupt(ares, 0, &res)) {
+ if (acpi_dev_resource_interrupt(ares, 0, &res))
tpm_info->irq = res.start;
- } else if (acpi_dev_resource_memory(ares, &res)) {
- tpm_info->start = res.start;
- tpm_info->len = resource_size(&res);
+ else if (acpi_dev_resource_memory(ares, &res)) {
+ tpm_info->res = res;
+ tpm_info->res.name = NULL;
}
return 1;
@@ -976,14 +942,25 @@ static int tpm_check_resource(struct acpi_resource *ares, void *data)
static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
{
+ struct acpi_table_tpm2 *tbl;
+ acpi_status st;
struct list_head resources;
- struct tpm_info tpm_info = tis_default_info;
+ struct tpm_info tpm_info = {};
int ret;
- if (!is_fifo(acpi_dev))
+ st = acpi_get_table(ACPI_SIG_TPM2, 1,
+ (struct acpi_table_header **) &tbl);
+ if (ACPI_FAILURE(st) || tbl->header.length < sizeof(*tbl)) {
+ dev_err(&acpi_dev->dev,
+ FW_BUG "failed to get TPM2 ACPI table\n");
+ return -EINVAL;
+ }
+
+ if (tbl->start_method != ACPI_TPM2_MEMORY_MAPPED)
return -ENODEV;
INIT_LIST_HEAD(&resources);
+ tpm_info.irq = -1;
ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
&tpm_info);
if (ret < 0)
@@ -991,8 +968,11 @@ static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
acpi_dev_free_resource_list(&resources);
- if (!tpm_info.irq)
- interrupts = false;
+ if (resource_type(&tpm_info.res) != IORESOURCE_MEM) {
+ dev_err(&acpi_dev->dev,
+ FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ }
if (is_itpm(acpi_dev))
itpm = true;
@@ -1031,80 +1011,135 @@ static struct acpi_driver tis_acpi_driver = {
};
#endif
+static struct platform_device *force_pdev;
+
+static int tpm_tis_plat_probe(struct platform_device *pdev)
+{
+ struct tpm_info tpm_info = {};
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+ tpm_info.res = *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ tpm_info.irq = res->start;
+ } else {
+ if (pdev == force_pdev)
+ tpm_info.irq = -1;
+ else
+ /* When forcing auto probe the IRQ */
+ tpm_info.irq = 0;
+ }
+
+ return tpm_tis_init(&pdev->dev, &tpm_info, NULL);
+}
+
+static int tpm_tis_plat_remove(struct platform_device *pdev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+
+ return 0;
+}
+
static struct platform_driver tis_drv = {
+ .probe = tpm_tis_plat_probe,
+ .remove = tpm_tis_plat_remove,
.driver = {
.name = "tpm_tis",
.pm = &tpm_tis_pm,
},
};
-static struct platform_device *pdev;
-
static bool force;
+#ifdef CONFIG_X86
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
+#endif
+
+static int tpm_tis_force_device(void)
+{
+ struct platform_device *pdev;
+ static const struct resource x86_resources[] = {
+ {
+ .start = 0xFED40000,
+ .end = 0xFED40000 + TIS_MEM_LEN - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ };
+
+ if (!force)
+ return 0;
+
+ /* The driver core will match the name tpm_tis of the device to
+ * the tpm_tis platform driver and complete the setup via
+ * tpm_tis_plat_probe
+ */
+ pdev = platform_device_register_simple("tpm_tis", -1, x86_resources,
+ ARRAY_SIZE(x86_resources));
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ force_pdev = pdev;
+
+ return 0;
+}
+
static int __init init_tis(void)
{
int rc;
-#ifdef CONFIG_PNP
- if (!force) {
- rc = pnp_register_driver(&tis_pnp_driver);
- if (rc)
- return rc;
- }
-#endif
+
+ rc = tpm_tis_force_device();
+ if (rc)
+ goto err_force;
+
+ rc = platform_driver_register(&tis_drv);
+ if (rc)
+ goto err_platform;
+
#ifdef CONFIG_ACPI
- if (!force) {
- rc = acpi_bus_register_driver(&tis_acpi_driver);
- if (rc) {
-#ifdef CONFIG_PNP
- pnp_unregister_driver(&tis_pnp_driver);
-#endif
- return rc;
- }
- }
+ rc = acpi_bus_register_driver(&tis_acpi_driver);
+ if (rc)
+ goto err_acpi;
#endif
- if (!force)
- return 0;
- rc = platform_driver_register(&tis_drv);
- if (rc < 0)
- return rc;
- pdev = platform_device_register_simple("tpm_tis", -1, NULL, 0);
- if (IS_ERR(pdev)) {
- rc = PTR_ERR(pdev);
- goto err_dev;
+ if (IS_ENABLED(CONFIG_PNP)) {
+ rc = pnp_register_driver(&tis_pnp_driver);
+ if (rc)
+ goto err_pnp;
}
- rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
- if (rc)
- goto err_init;
+
return 0;
-err_init:
- platform_device_unregister(pdev);
-err_dev:
- platform_driver_unregister(&tis_drv);
+
+err_pnp:
+#ifdef CONFIG_ACPI
+ acpi_bus_unregister_driver(&tis_acpi_driver);
+err_acpi:
+#endif
+ platform_device_unregister(force_pdev);
+err_platform:
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
+err_force:
return rc;
}
static void __exit cleanup_tis(void)
{
- struct tpm_chip *chip;
-#if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
- if (!force) {
+ pnp_unregister_driver(&tis_pnp_driver);
#ifdef CONFIG_ACPI
- acpi_bus_unregister_driver(&tis_acpi_driver);
-#endif
-#ifdef CONFIG_PNP
- pnp_unregister_driver(&tis_pnp_driver);
+ acpi_bus_unregister_driver(&tis_acpi_driver);
#endif
- return;
- }
-#endif
- chip = dev_get_drvdata(&pdev->dev);
- tpm_chip_unregister(chip);
- tpm_tis_remove(chip);
- platform_device_unregister(pdev);
platform_driver_unregister(&tis_drv);
+
+ if (force_pdev)
+ platform_device_unregister(force_pdev);
}
module_init(init_tis);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index a15ce4ef39cd..b098d2d0b7c4 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -171,7 +171,7 @@ static const struct tty_operations ttyprintk_ops = {
.ioctl = tpk_ioctl,
};
-static struct tty_port_operations null_ops = { };
+static const struct tty_port_operations null_ops = { };
static struct tty_driver *ttyprintk_driver;
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index 77d6c127e691..dcd19f3f182e 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -509,7 +509,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
channel->log2_element_size = ((format > 2) ?
2 : format);
- bytebufsize = channel->rd_buf_size = bufsize *
+ bytebufsize = bufsize *
(1 << channel->log2_element_size);
buffers = devm_kcalloc(dev, bufnum,
@@ -523,6 +523,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
if (!is_writebuf) {
channel->num_rd_buffers = bufnum;
+ channel->rd_buf_size = bytebufsize;
channel->rd_allow_partial = allowpartial;
channel->rd_synchronous = synchronous;
channel->rd_exclusive_open = exclusive_open;
@@ -533,6 +534,7 @@ static int xilly_setupchannels(struct xilly_endpoint *ep,
bufnum, bytebufsize);
} else if (channelnum > 0) {
channel->num_wr_buffers = bufnum;
+ channel->wr_buf_size = bytebufsize;
channel->seekable = seekable;
channel->wr_supports_nonempty = supports_nonempty;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index eca8e019e005..16f7d33421d8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -6,9 +6,6 @@ config CLKDEV_LOOKUP
config HAVE_CLK_PREPARE
bool
-config HAVE_MACH_CLKDEV
- bool
-
config COMMON_CLK
bool
select HAVE_CLK_PREPARE
@@ -99,6 +96,14 @@ config COMMON_CLK_SI570
This driver supports Silicon Labs 570/571/598/599 programmable
clock generators.
+config COMMON_CLK_CDCE706
+ tristate "Clock driver for TI CDCE706 clock synthesizer"
+ depends on I2C
+ select REGMAP_I2C
+ select RATIONAL
+ ---help---
+ This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
config COMMON_CLK_CDCE925
tristate "Clock driver for TI CDCE925 devices"
depends on I2C
@@ -190,23 +195,14 @@ config COMMON_CLK_PWM
config COMMON_CLK_PXA
def_bool COMMON_CLK && ARCH_PXA
---help---
- Sypport for the Marvell PXA SoC.
-
-config COMMON_CLK_CDCE706
- tristate "Clock driver for TI CDCE706 clock synthesizer"
- depends on I2C
- select REGMAP_I2C
- select RATIONAL
- ---help---
- This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+ Support for the Marvell PXA SoC.
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
-source "drivers/clk/qcom/Kconfig"
-
-endmenu
-
source "drivers/clk/mvebu/Kconfig"
-
+source "drivers/clk/qcom/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/ti/Kconfig"
+
+endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index bae4be6501df..46869d696e4d 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -70,15 +70,14 @@ obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
-obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/
-obj-$(CONFIG_ARCH_RENESAS) += shmobile/
+obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
+obj-y += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_X86) += x86/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index abc80949e1dd..e1aa210dd7aa 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -15,8 +15,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -28,8 +28,9 @@
struct clk_generated {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
struct clk_range range;
+ spinlock_t *lock;
u32 id;
u32 gckdiv;
u8 parent_id;
@@ -41,49 +42,52 @@ struct clk_generated {
static int clk_generated_enable(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- u32 tmp;
+ unsigned long flags;
pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
__func__, gck->gckdiv, gck->parent_id);
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) &
- ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK);
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id)
- | AT91_PMC_PCR_CMD
- | AT91_PMC_PCR_GCKDIV(gck->gckdiv)
- | AT91_PMC_PCR_GCKEN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
+ AT91_PMC_PCR_GCKCSS(gck->parent_id) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
+ AT91_PMC_PCR_GCKEN);
+ spin_unlock_irqrestore(gck->lock, flags);
return 0;
}
static void clk_generated_disable(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- u32 tmp;
-
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
- pmc_unlock(pmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(gck->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
+ AT91_PMC_PCR_CMD);
+ spin_unlock_irqrestore(gck->lock, flags);
}
static int clk_generated_is_enabled(struct clk_hw *hw)
{
struct clk_generated *gck = to_clk_generated(hw);
- struct at91_pmc *pmc = gck->pmc;
- int ret;
+ unsigned long flags;
+ unsigned int status;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(gck->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(gck->lock, flags);
- return ret;
+ return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
}
static unsigned long
@@ -214,13 +218,14 @@ static const struct clk_ops generated_ops = {
*/
static void clk_generated_startup(struct clk_generated *gck)
{
- struct at91_pmc *pmc = gck->pmc;
u32 tmp;
+ unsigned long flags;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR);
- pmc_unlock(pmc);
+ spin_lock_irqsave(gck->lock, flags);
+ regmap_write(gck->regmap, AT91_PMC_PCR,
+ (gck->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
+ spin_unlock_irqrestore(gck->lock, flags);
gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
>> AT91_PMC_PCR_GCKCSS_OFFSET;
@@ -229,8 +234,8 @@ static void clk_generated_startup(struct clk_generated *gck)
}
static struct clk * __init
-at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
- const char **parent_names, u8 num_parents,
+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char
+ *name, const char **parent_names, u8 num_parents,
u8 id, const struct clk_range *range)
{
struct clk_generated *gck;
@@ -249,7 +254,8 @@ at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
gck->id = id;
gck->hw.init = &init;
- gck->pmc = pmc;
+ gck->regmap = regmap;
+ gck->lock = lock;
gck->range = *range;
clk = clk_register(NULL, &gck->hw);
@@ -261,20 +267,20 @@ at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_sama5d2_clk_generated_setup(struct device_node *np,
- struct at91_pmc *pmc)
+void __init of_sama5d2_clk_generated_setup(struct device_node *np)
{
int num;
u32 id;
const char *name;
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[GENERATED_SOURCE_MAX];
struct device_node *gcknp;
struct clk_range range = CLK_RANGE(0, 0);
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -283,6 +289,10 @@ void __init of_sama5d2_clk_generated_setup(struct device_node *np,
if (!num || num > PERIPHERAL_MAX)
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, gcknp) {
if (of_property_read_u32(gcknp, "reg", &id))
continue;
@@ -296,11 +306,14 @@ void __init of_sama5d2_clk_generated_setup(struct device_node *np,
of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
&range);
- clk = at91_clk_register_generated(pmc, name, parent_names,
- num_parents, id, &range);
+ clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ parent_names, num_parents,
+ id, &range);
if (IS_ERR(clk))
continue;
of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
}
}
+CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated",
+ of_sama5d2_clk_generated_setup);
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 61566bcefa53..819f5842fa66 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -15,15 +15,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
-#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
#include "pmc.h"
@@ -31,7 +25,7 @@
struct clk_sama5d4_h32mx {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_sama5d4_h32mx(hw) container_of(hw, struct clk_sama5d4_h32mx, hw)
@@ -40,8 +34,10 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
+ unsigned int mckr;
- if (pmc_read(h32mxclk->pmc, AT91_PMC_MCKR) & AT91_PMC_H32MXDIV)
+ regmap_read(h32mxclk->regmap, AT91_PMC_MCKR, &mckr);
+ if (mckr & AT91_PMC_H32MXDIV)
return parent_rate / 2;
if (parent_rate > H32MX_MAX_FREQ)
@@ -70,18 +66,16 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw);
- struct at91_pmc *pmc = h32mxclk->pmc;
- u32 tmp;
+ u32 mckr = 0;
if (parent_rate != rate && (parent_rate / 2) != rate)
return -EINVAL;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_H32MXDIV;
if ((parent_rate / 2) == rate)
- tmp |= AT91_PMC_H32MXDIV;
- pmc_write(pmc, AT91_PMC_MCKR, tmp);
- pmc_unlock(pmc);
+ mckr = AT91_PMC_H32MXDIV;
+
+ regmap_update_bits(h32mxclk->regmap, AT91_PMC_MCKR,
+ AT91_PMC_H32MXDIV, mckr);
return 0;
}
@@ -92,14 +86,18 @@ static const struct clk_ops h32mx_ops = {
.set_rate = clk_sama5d4_h32mx_set_rate,
};
-void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np)
{
struct clk_sama5d4_h32mx *h32mxclk;
struct clk_init_data init;
const char *parent_name;
+ struct regmap *regmap;
struct clk *clk;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL);
if (!h32mxclk)
return;
@@ -113,7 +111,7 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
init.flags = CLK_SET_RATE_GATE;
h32mxclk->hw.init = &init;
- h32mxclk->pmc = pmc;
+ h32mxclk->regmap = regmap;
clk = clk_register(NULL, &h32mxclk->hw);
if (!clk) {
@@ -123,3 +121,5 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx",
+ of_sama5d4_clk_h32mx_setup);
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index fd7247deabdc..58b5baca670c 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -13,13 +13,8 @@
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -34,18 +29,14 @@
struct clk_main_osc {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
};
#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
struct clk_main_rc_osc {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
unsigned long frequency;
unsigned long accuracy;
};
@@ -54,51 +45,47 @@ struct clk_main_rc_osc {
struct clk_rm9200_main {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw)
struct clk_sam9x5_main {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 parent;
};
#define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw)
-static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id)
+static inline bool clk_main_osc_ready(struct regmap *regmap)
{
- struct clk_main_osc *osc = dev_id;
+ unsigned int status;
- wake_up(&osc->wait);
- disable_irq_nosync(osc->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCS;
}
static int clk_main_osc_prepare(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
+ struct regmap *regmap = osc->regmap;
u32 tmp;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+ tmp &= ~MOR_KEY_MASK;
+
if (tmp & AT91_PMC_OSCBYPASS)
return 0;
if (!(tmp & AT91_PMC_MOSCEN)) {
tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY;
- pmc_write(pmc, AT91_CKGR_MOR, tmp);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp);
}
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS);
- }
+ while (!clk_main_osc_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -106,9 +93,10 @@ static int clk_main_osc_prepare(struct clk_hw *hw)
static void clk_main_osc_unprepare(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ u32 tmp;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
if (tmp & AT91_PMC_OSCBYPASS)
return;
@@ -116,20 +104,22 @@ static void clk_main_osc_unprepare(struct clk_hw *hw)
return;
tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN);
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
}
static int clk_main_osc_is_prepared(struct clk_hw *hw)
{
struct clk_main_osc *osc = to_clk_main_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ u32 tmp, status;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
if (tmp & AT91_PMC_OSCBYPASS)
return 1;
- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) &&
- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN));
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+ return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
}
static const struct clk_ops main_osc_ops = {
@@ -139,18 +129,16 @@ static const struct clk_ops main_osc_ops = {
};
static struct clk * __init
-at91_clk_register_main_osc(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_main_osc(struct regmap *regmap,
const char *name,
const char *parent_name,
bool bypass)
{
- int ret;
struct clk_main_osc *osc;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !parent_name)
+ if (!name || !parent_name)
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -164,85 +152,70 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
- osc->pmc = pmc;
- osc->irq = irq;
-
- init_waitqueue_head(&osc->wait);
- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
- ret = request_irq(osc->irq, clk_main_osc_irq_handler,
- IRQF_TRIGGER_HIGH, name, osc);
- if (ret) {
- kfree(osc);
- return ERR_PTR(ret);
- }
+ osc->regmap = regmap;
if (bypass)
- pmc_write(pmc, AT91_CKGR_MOR,
- (pmc_read(pmc, AT91_CKGR_MOR) &
- ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) |
- AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
+ regmap_update_bits(regmap,
+ AT91_CKGR_MOR, MOR_KEY_MASK |
+ AT91_PMC_MOSCEN,
+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY);
clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
- free_irq(irq, osc);
+ if (IS_ERR(clk))
kfree(osc);
- }
return clk;
}
-void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np)
{
struct clk *clk;
- unsigned int irq;
const char *name = np->name;
const char *parent_name;
+ struct regmap *regmap;
bool bypass;
of_property_read_string(np, "clock-output-names", &name);
bypass = of_property_read_bool(np, "atmel,osc-bypass");
parent_name = of_clk_get_parent_name(np, 0);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass);
+ clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc",
+ of_at91rm9200_clk_main_osc_setup);
-static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id)
+static bool clk_main_rc_osc_ready(struct regmap *regmap)
{
- struct clk_main_rc_osc *osc = dev_id;
+ unsigned int status;
- wake_up(&osc->wait);
- disable_irq_nosync(osc->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCRCS;
}
static int clk_main_rc_osc_prepare(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp;
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
- if (!(tmp & AT91_PMC_MOSCRCEN)) {
- tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY;
- pmc_write(pmc, AT91_CKGR_MOR, tmp);
- }
+ if (!(mor & AT91_PMC_MOSCRCEN))
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN,
+ AT91_PMC_MOSCRCEN | AT91_PMC_KEY);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) {
- enable_irq(osc->irq);
- wait_event(osc->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS);
- }
+ while (!clk_main_rc_osc_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -250,23 +223,28 @@ static int clk_main_rc_osc_prepare(struct clk_hw *hw)
static void clk_main_rc_osc_unprepare(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR);
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor;
+
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
- if (!(tmp & AT91_PMC_MOSCRCEN))
+ if (!(mor & AT91_PMC_MOSCRCEN))
return;
- tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN);
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY);
+ regmap_update_bits(regmap, AT91_CKGR_MOR,
+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, AT91_PMC_KEY);
}
static int clk_main_rc_osc_is_prepared(struct clk_hw *hw)
{
struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
- struct at91_pmc *pmc = osc->pmc;
+ struct regmap *regmap = osc->regmap;
+ unsigned int mor, status;
- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) &&
- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN));
+ regmap_read(regmap, AT91_CKGR_MOR, &mor);
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+ return (mor & AT91_PMC_MOSCRCEN) && (status & AT91_PMC_MOSCRCS);
}
static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw,
@@ -294,17 +272,15 @@ static const struct clk_ops main_rc_osc_ops = {
};
static struct clk * __init
-at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_main_rc_osc(struct regmap *regmap,
const char *name,
u32 frequency, u32 accuracy)
{
- int ret;
struct clk_main_rc_osc *osc;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !frequency)
+ if (!name || !frequency)
return ERR_PTR(-EINVAL);
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
@@ -315,66 +291,56 @@ at91_clk_register_main_rc_osc(struct at91_pmc *pmc,
init.ops = &main_rc_osc_ops;
init.parent_names = NULL;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+ init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
- osc->pmc = pmc;
- osc->irq = irq;
+ osc->regmap = regmap;
osc->frequency = frequency;
osc->accuracy = accuracy;
- init_waitqueue_head(&osc->wait);
- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
- ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler,
- IRQF_TRIGGER_HIGH, name, osc);
- if (ret)
- return ERR_PTR(ret);
-
clk = clk_register(NULL, &osc->hw);
- if (IS_ERR(clk)) {
- free_irq(irq, osc);
+ if (IS_ERR(clk))
kfree(osc);
- }
return clk;
}
-void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np)
{
struct clk *clk;
- unsigned int irq;
u32 frequency = 0;
u32 accuracy = 0;
const char *name = np->name;
+ struct regmap *regmap;
of_property_read_string(np, "clock-output-names", &name);
of_property_read_u32(np, "clock-frequency", &frequency);
of_property_read_u32(np, "clock-accuracy", &accuracy);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency,
- accuracy);
+ clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc",
+ of_at91sam9x5_clk_main_rc_osc_setup);
-static int clk_main_probe_frequency(struct at91_pmc *pmc)
+static int clk_main_probe_frequency(struct regmap *regmap)
{
unsigned long prep_time, timeout;
- u32 tmp;
+ unsigned int mcfr;
timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT);
do {
prep_time = jiffies;
- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
- if (tmp & AT91_PMC_MAINRDY)
+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
+ if (mcfr & AT91_PMC_MAINRDY)
return 0;
usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT);
} while (time_before(prep_time, timeout));
@@ -382,34 +348,37 @@ static int clk_main_probe_frequency(struct at91_pmc *pmc)
return -ETIMEDOUT;
}
-static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
+static unsigned long clk_main_recalc_rate(struct regmap *regmap,
unsigned long parent_rate)
{
- u32 tmp;
+ unsigned int mcfr;
if (parent_rate)
return parent_rate;
pr_warn("Main crystal frequency not set, using approximate value\n");
- tmp = pmc_read(pmc, AT91_CKGR_MCFR);
- if (!(tmp & AT91_PMC_MAINRDY))
+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr);
+ if (!(mcfr & AT91_PMC_MAINRDY))
return 0;
- return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
+ return ((mcfr & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV;
}
static int clk_rm9200_main_prepare(struct clk_hw *hw)
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
- return clk_main_probe_frequency(clkmain->pmc);
+ return clk_main_probe_frequency(clkmain->regmap);
}
static int clk_rm9200_main_is_prepared(struct clk_hw *hw)
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
+ unsigned int status;
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status);
- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY);
+ return status & AT91_PMC_MAINRDY ? 1 : 0;
}
static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
@@ -417,7 +386,7 @@ static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw,
{
struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw);
- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
}
static const struct clk_ops rm9200_main_ops = {
@@ -427,7 +396,7 @@ static const struct clk_ops rm9200_main_ops = {
};
static struct clk * __init
-at91_clk_register_rm9200_main(struct at91_pmc *pmc,
+at91_clk_register_rm9200_main(struct regmap *regmap,
const char *name,
const char *parent_name)
{
@@ -435,7 +404,7 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_name)
@@ -452,7 +421,7 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
init.flags = 0;
clkmain->hw.init = &init;
- clkmain->pmc = pmc;
+ clkmain->regmap = regmap;
clk = clk_register(NULL, &clkmain->hw);
if (IS_ERR(clk))
@@ -461,52 +430,54 @@ at91_clk_register_rm9200_main(struct at91_pmc *pmc,
return clk;
}
-void __init of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_main_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_rm9200_main(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91_clk_register_rm9200_main(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main",
+ of_at91rm9200_clk_main_setup);
-static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id)
+static inline bool clk_sam9x5_main_ready(struct regmap *regmap)
{
- struct clk_sam9x5_main *clkmain = dev_id;
+ unsigned int status;
- wake_up(&clkmain->wait);
- disable_irq_nosync(clkmain->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MOSCSELS ? 1 : 0;
}
static int clk_sam9x5_main_prepare(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- struct at91_pmc *pmc = clkmain->pmc;
+ struct regmap *regmap = clkmain->regmap;
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
- }
+ while (!clk_sam9x5_main_ready(regmap))
+ cpu_relax();
- return clk_main_probe_frequency(pmc);
+ return clk_main_probe_frequency(regmap);
}
static int clk_sam9x5_main_is_prepared(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
+ return clk_sam9x5_main_ready(clkmain->regmap);
}
static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
@@ -514,30 +485,28 @@ static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw,
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- return clk_main_recalc_rate(clkmain->pmc, parent_rate);
+ return clk_main_recalc_rate(clkmain->regmap, parent_rate);
}
static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
- struct at91_pmc *pmc = clkmain->pmc;
- u32 tmp;
+ struct regmap *regmap = clkmain->regmap;
+ unsigned int tmp;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK;
+ regmap_read(regmap, AT91_CKGR_MOR, &tmp);
+ tmp &= ~MOR_KEY_MASK;
if (index && !(tmp & AT91_PMC_MOSCSEL))
- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL);
else if (!index && (tmp & AT91_PMC_MOSCSEL))
- pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
+ regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) {
- enable_irq(clkmain->irq);
- wait_event(clkmain->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS);
- }
+ while (!clk_sam9x5_main_ready(regmap))
+ cpu_relax();
return 0;
}
@@ -545,8 +514,11 @@ static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index)
static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
{
struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ unsigned int status;
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN);
+ return status & AT91_PMC_MOSCEN ? 1 : 0;
}
static const struct clk_ops sam9x5_main_ops = {
@@ -558,18 +530,17 @@ static const struct clk_ops sam9x5_main_ops = {
};
static struct clk * __init
-at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
- unsigned int irq,
+at91_clk_register_sam9x5_main(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
{
- int ret;
struct clk_sam9x5_main *clkmain;
struct clk *clk = NULL;
struct clk_init_data init;
+ unsigned int status;
- if (!pmc || !irq || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_names || !num_parents)
@@ -586,51 +557,42 @@ at91_clk_register_sam9x5_main(struct at91_pmc *pmc,
init.flags = CLK_SET_PARENT_GATE;
clkmain->hw.init = &init;
- clkmain->pmc = pmc;
- clkmain->irq = irq;
- clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) &
- AT91_PMC_MOSCEN);
- init_waitqueue_head(&clkmain->wait);
- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN);
- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler,
- IRQF_TRIGGER_HIGH, name, clkmain);
- if (ret)
- return ERR_PTR(ret);
+ clkmain->regmap = regmap;
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+ clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
clk = clk_register(NULL, &clkmain->hw);
- if (IS_ERR(clk)) {
- free_irq(clkmain->irq, clkmain);
+ if (IS_ERR(clk))
kfree(clkmain);
- }
return clk;
}
-void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_main_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
- unsigned int irq;
+ unsigned int num_parents;
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > 2)
+ if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
of_property_read_string(np, "clock-output-names", &name);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- return;
-
- clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names,
+ clk = at91_clk_register_sam9x5_main(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main",
+ of_at91sam9x5_clk_main_setup);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 620ea323356b..d1021e106191 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -12,13 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -44,32 +39,26 @@ struct clk_master_layout {
struct clk_master {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
};
-static irqreturn_t clk_master_irq_handler(int irq, void *dev_id)
+static inline bool clk_master_ready(struct regmap *regmap)
{
- struct clk_master *master = (struct clk_master *)dev_id;
+ unsigned int status;
- wake_up(&master->wait);
- disable_irq_nosync(master->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_MCKRDY ? 1 : 0;
}
+
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) {
- enable_irq(master->irq);
- wait_event(master->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
- }
+ while (!clk_master_ready(master->regmap))
+ cpu_relax();
return 0;
}
@@ -78,7 +67,7 @@ static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY);
+ return clk_master_ready(master->regmap);
}
static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
@@ -88,18 +77,16 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
u8 div;
unsigned long rate = parent_rate;
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
const struct clk_master_layout *layout = master->layout;
const struct clk_master_characteristics *characteristics =
master->characteristics;
- u32 tmp;
+ unsigned int mckr;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask;
- pmc_unlock(pmc);
+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ mckr &= layout->mask;
- pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK;
- div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
rate /= 3;
@@ -119,9 +106,11 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
static u8 clk_master_get_parent(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
- struct at91_pmc *pmc = master->pmc;
+ unsigned int mckr;
- return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS;
+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+
+ return mckr & AT91_PMC_CSS;
}
static const struct clk_ops master_ops = {
@@ -132,18 +121,17 @@ static const struct clk_ops master_ops = {
};
static struct clk * __init
-at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
+at91_clk_register_master(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics)
{
- int ret;
struct clk_master *master;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !irq || !name || !num_parents || !parent_names)
+ if (!name || !num_parents || !parent_names)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -159,20 +147,10 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
master->hw.init = &init;
master->layout = layout;
master->characteristics = characteristics;
- master->pmc = pmc;
- master->irq = irq;
- init_waitqueue_head(&master->wait);
- irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
- ret = request_irq(master->irq, clk_master_irq_handler,
- IRQF_TRIGGER_HIGH, "clk-master", master);
- if (ret) {
- kfree(master);
- return ERR_PTR(ret);
- }
+ master->regmap = regmap;
clk = clk_register(NULL, &master->hw);
if (IS_ERR(clk)) {
- free_irq(master->irq, master);
kfree(master);
}
@@ -217,18 +195,18 @@ out_free_characteristics:
}
static void __init
-of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_master_setup(struct device_node *np,
const struct clk_master_layout *layout)
{
struct clk *clk;
- int num_parents;
- unsigned int irq;
+ unsigned int num_parents;
const char *parent_names[MASTER_SOURCE_MAX];
const char *name = np->name;
struct clk_master_characteristics *characteristics;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > MASTER_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -239,11 +217,11 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
if (!characteristics)
return;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
- goto out_free_characteristics;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
- clk = at91_clk_register_master(pmc, irq, name, num_parents,
+ clk = at91_clk_register_master(regmap, name, num_parents,
parent_names, layout,
characteristics);
if (IS_ERR(clk))
@@ -256,14 +234,16 @@ out_free_characteristics:
kfree(characteristics);
}
-void __init of_at91rm9200_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_master_setup(struct device_node *np)
{
- of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout);
+ of_at91_clk_master_setup(np, &at91rm9200_master_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master",
+ of_at91rm9200_clk_master_setup);
-void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_master_setup(struct device_node *np)
{
- of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout);
+ of_at91_clk_master_setup(np, &at91sam9x5_master_layout);
}
+CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master",
+ of_at91sam9x5_clk_master_setup);
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 58f3b568e9cb..fd160728e990 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -12,11 +12,13 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
+DEFINE_SPINLOCK(pmc_pcr_lock);
+
#define PERIPHERAL_MAX 64
#define PERIPHERAL_AT91RM9200 0
@@ -33,7 +35,7 @@
struct clk_peripheral {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u32 id;
};
@@ -41,8 +43,9 @@ struct clk_peripheral {
struct clk_sam9x5_peripheral {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
struct clk_range range;
+ spinlock_t *lock;
u32 id;
u32 div;
bool auto_div;
@@ -54,7 +57,6 @@ struct clk_sam9x5_peripheral {
static int clk_peripheral_enable(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCER;
u32 id = periph->id;
@@ -62,14 +64,14 @@ static int clk_peripheral_enable(struct clk_hw *hw)
return 0;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCER1;
- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
+
return 0;
}
static void clk_peripheral_disable(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCDR;
u32 id = periph->id;
@@ -77,21 +79,23 @@ static void clk_peripheral_disable(struct clk_hw *hw)
return;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCDR1;
- pmc_write(pmc, offset, PERIPHERAL_MASK(id));
+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id));
}
static int clk_peripheral_is_enabled(struct clk_hw *hw)
{
struct clk_peripheral *periph = to_clk_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
int offset = AT91_PMC_PCSR;
+ unsigned int status;
u32 id = periph->id;
if (id < PERIPHERAL_ID_MIN)
return 1;
if (id > PERIPHERAL_ID_MAX)
offset = AT91_PMC_PCSR1;
- return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id));
+ regmap_read(periph->regmap, offset, &status);
+
+ return status & PERIPHERAL_MASK(id) ? 1 : 0;
}
static const struct clk_ops peripheral_ops = {
@@ -101,14 +105,14 @@ static const struct clk_ops peripheral_ops = {
};
static struct clk * __init
-at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
+at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id)
{
struct clk_peripheral *periph;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX)
+ if (!name || !parent_name || id > PERIPHERAL_ID_MAX)
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -123,7 +127,7 @@ at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name,
periph->id = id;
periph->hw.init = &init;
- periph->pmc = pmc;
+ periph->regmap = regmap;
clk = clk_register(NULL, &periph->hw);
if (IS_ERR(clk))
@@ -160,53 +164,58 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
if (periph->id < PERIPHERAL_ID_MIN)
return 0;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div)
- | AT91_PMC_PCR_CMD
- | AT91_PMC_PCR_EN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_EN,
+ AT91_PMC_PCR_DIV(periph->div) |
+ AT91_PMC_PCR_CMD |
+ AT91_PMC_PCR_EN);
+ spin_unlock_irqrestore(periph->lock, flags);
+
return 0;
}
static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
if (periph->id < PERIPHERAL_ID_MIN)
return;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN;
- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_update_bits(periph->regmap, AT91_PMC_PCR,
+ AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD,
+ AT91_PMC_PCR_CMD);
+ spin_unlock_irqrestore(periph->lock, flags);
}
static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- int ret;
+ unsigned long flags;
+ unsigned int status;
if (periph->id < PERIPHERAL_ID_MIN)
return 1;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(periph->lock, flags);
- return ret;
+ return status & AT91_PMC_PCR_EN ? 1 : 0;
}
static unsigned long
@@ -214,19 +223,20 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- struct at91_pmc *pmc = periph->pmc;
- u32 tmp;
+ unsigned long flags;
+ unsigned int status;
if (periph->id < PERIPHERAL_ID_MIN)
return parent_rate;
- pmc_lock(pmc);
- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK));
- tmp = pmc_read(pmc, AT91_PMC_PCR);
- pmc_unlock(pmc);
+ spin_lock_irqsave(periph->lock, flags);
+ regmap_write(periph->regmap, AT91_PMC_PCR,
+ (periph->id & AT91_PMC_PCR_PID_MASK));
+ regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ spin_unlock_irqrestore(periph->lock, flags);
- if (tmp & AT91_PMC_PCR_EN) {
- periph->div = PERIPHERAL_RSHIFT(tmp);
+ if (status & AT91_PMC_PCR_EN) {
+ periph->div = PERIPHERAL_RSHIFT(status);
periph->auto_div = false;
} else {
clk_sam9x5_peripheral_autodiv(periph);
@@ -318,15 +328,15 @@ static const struct clk_ops sam9x5_peripheral_ops = {
};
static struct clk * __init
-at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
- const char *parent_name, u32 id,
- const struct clk_range *range)
+at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name,
+ u32 id, const struct clk_range *range)
{
struct clk_sam9x5_peripheral *periph;
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name || !parent_name)
+ if (!name || !parent_name)
return ERR_PTR(-EINVAL);
periph = kzalloc(sizeof(*periph), GFP_KERNEL);
@@ -342,7 +352,8 @@ at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
periph->id = id;
periph->hw.init = &init;
periph->div = 0;
- periph->pmc = pmc;
+ periph->regmap = regmap;
+ periph->lock = lock;
periph->auto_div = true;
periph->range = *range;
@@ -356,7 +367,7 @@ at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name,
}
static void __init
-of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
+of_at91_clk_periph_setup(struct device_node *np, u8 type)
{
int num;
u32 id;
@@ -364,6 +375,7 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
const char *parent_name;
const char *name;
struct device_node *periphclknp;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -373,6 +385,10 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
if (!num || num > PERIPHERAL_MAX)
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, periphclknp) {
if (of_property_read_u32(periphclknp, "reg", &id))
continue;
@@ -384,7 +400,7 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
name = periphclknp->name;
if (type == PERIPHERAL_AT91RM9200) {
- clk = at91_clk_register_peripheral(pmc, name,
+ clk = at91_clk_register_peripheral(regmap, name,
parent_name, id);
} else {
struct clk_range range = CLK_RANGE(0, 0);
@@ -393,7 +409,9 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
"atmel,clk-output-range",
&range);
- clk = at91_clk_register_sam9x5_peripheral(pmc, name,
+ clk = at91_clk_register_sam9x5_peripheral(regmap,
+ &pmc_pcr_lock,
+ name,
parent_name,
id, &range);
}
@@ -405,14 +423,17 @@ of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type)
}
}
-void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_periph_setup(struct device_node *np)
{
- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200);
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200);
}
+CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral",
+ of_at91rm9200_clk_periph_setup);
-void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np)
{
- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5);
+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5);
}
+CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral",
+ of_at91sam9x5_clk_periph_setup);
+
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 18b60f4895a6..fb2e0b56d4b7 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -12,14 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -58,9 +52,7 @@ struct clk_pll_layout {
struct clk_pll {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 id;
u8 div;
u8 range;
@@ -69,20 +61,19 @@ struct clk_pll {
const struct clk_pll_characteristics *characteristics;
};
-static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id)
+static inline bool clk_pll_ready(struct regmap *regmap, int id)
{
- struct clk_pll *pll = (struct clk_pll *)dev_id;
+ unsigned int status;
- wake_up(&pll->wait);
- disable_irq_nosync(pll->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & PLL_STATUS_MASK(id) ? 1 : 0;
}
static int clk_pll_prepare(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
+ struct regmap *regmap = pll->regmap;
const struct clk_pll_layout *layout = pll->layout;
const struct clk_pll_characteristics *characteristics =
pll->characteristics;
@@ -90,39 +81,34 @@ static int clk_pll_prepare(struct clk_hw *hw)
u32 mask = PLL_STATUS_MASK(id);
int offset = PLL_REG(id);
u8 out = 0;
- u32 pllr, icpr;
+ unsigned int pllr;
+ unsigned int status;
u8 div;
u16 mul;
- pllr = pmc_read(pmc, offset);
+ regmap_read(regmap, offset, &pllr);
div = PLL_DIV(pllr);
mul = PLL_MUL(pllr, layout);
- if ((pmc_read(pmc, AT91_PMC_SR) & mask) &&
+ regmap_read(regmap, AT91_PMC_SR, &status);
+ if ((status & mask) &&
(div == pll->div && mul == pll->mul))
return 0;
if (characteristics->out)
out = characteristics->out[pll->range];
- if (characteristics->icpll) {
- icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id);
- icpr |= (characteristics->icpll[pll->range] <<
- PLL_ICPR_SHIFT(id));
- pmc_write(pmc, AT91_PMC_PLLICPR, icpr);
- }
- pllr &= ~layout->pllr_mask;
- pllr |= layout->pllr_mask &
- (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
- (out << PLL_OUT_SHIFT) |
- ((pll->mul & layout->mul_mask) << layout->mul_shift));
- pmc_write(pmc, offset, pllr);
-
- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
- enable_irq(pll->irq);
- wait_event(pll->wait,
- pmc_read(pmc, AT91_PMC_SR) & mask);
- }
+ if (characteristics->icpll)
+ regmap_update_bits(regmap, AT91_PMC_PLLICPR, PLL_ICPR_MASK(id),
+ characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id));
+
+ regmap_update_bits(regmap, offset, layout->pllr_mask,
+ pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) |
+ (out << PLL_OUT_SHIFT) |
+ ((pll->mul & layout->mul_mask) << layout->mul_shift));
+
+ while (!clk_pll_ready(regmap, pll->id))
+ cpu_relax();
return 0;
}
@@ -130,32 +116,35 @@ static int clk_pll_prepare(struct clk_hw *hw)
static int clk_pll_is_prepared(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
- return !!(pmc_read(pmc, AT91_PMC_SR) &
- PLL_STATUS_MASK(pll->id));
+ return clk_pll_ready(pll->regmap, pll->id);
}
static void clk_pll_unprepare(struct clk_hw *hw)
{
struct clk_pll *pll = to_clk_pll(hw);
- struct at91_pmc *pmc = pll->pmc;
- const struct clk_pll_layout *layout = pll->layout;
- int offset = PLL_REG(pll->id);
- u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask);
+ unsigned int mask = pll->layout->pllr_mask;
- pmc_write(pmc, offset, tmp);
+ regmap_update_bits(pll->regmap, PLL_REG(pll->id), mask, ~mask);
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll *pll = to_clk_pll(hw);
+ unsigned int pllr;
+ u16 mul;
+ u8 div;
- if (!pll->div || !pll->mul)
+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
+
+ div = PLL_DIV(pllr);
+ mul = PLL_MUL(pllr, pll->layout);
+
+ if (!div || !mul)
return 0;
- return (parent_rate / pll->div) * (pll->mul + 1);
+ return (parent_rate / div) * (mul + 1);
}
static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
@@ -308,7 +297,7 @@ static const struct clk_ops pll_ops = {
};
static struct clk * __init
-at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
+at91_clk_register_pll(struct regmap *regmap, const char *name,
const char *parent_name, u8 id,
const struct clk_pll_layout *layout,
const struct clk_pll_characteristics *characteristics)
@@ -316,9 +305,8 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
struct clk_pll *pll;
struct clk *clk = NULL;
struct clk_init_data init;
- int ret;
int offset = PLL_REG(id);
- u32 tmp;
+ unsigned int pllr;
if (id > PLL_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -337,23 +325,13 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
pll->hw.init = &init;
pll->layout = layout;
pll->characteristics = characteristics;
- pll->pmc = pmc;
- pll->irq = irq;
- tmp = pmc_read(pmc, offset) & layout->pllr_mask;
- pll->div = PLL_DIV(tmp);
- pll->mul = PLL_MUL(tmp, layout);
- init_waitqueue_head(&pll->wait);
- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
- id ? "clk-pllb" : "clk-plla", pll);
- if (ret) {
- kfree(pll);
- return ERR_PTR(ret);
- }
+ pll->regmap = regmap;
+ regmap_read(regmap, offset, &pllr);
+ pll->div = PLL_DIV(pllr);
+ pll->mul = PLL_MUL(pllr, layout);
clk = clk_register(NULL, &pll->hw);
if (IS_ERR(clk)) {
- free_irq(pll->irq, pll);
kfree(pll);
}
@@ -483,12 +461,12 @@ out_free_characteristics:
}
static void __init
-of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_pll_setup(struct device_node *np,
const struct clk_pll_layout *layout)
{
u32 id;
- unsigned int irq;
struct clk *clk;
+ struct regmap *regmap;
const char *parent_name;
const char *name = np->name;
struct clk_pll_characteristics *characteristics;
@@ -500,15 +478,15 @@ of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc,
of_property_read_string(np, "clock-output-names", &name);
- characteristics = of_at91_clk_pll_get_characteristics(np);
- if (!characteristics)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ characteristics = of_at91_clk_pll_get_characteristics(np);
+ if (!characteristics)
return;
- clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout,
+ clk = at91_clk_register_pll(regmap, name, parent_name, id, layout,
characteristics);
if (IS_ERR(clk))
goto out_free_characteristics;
@@ -520,26 +498,30 @@ out_free_characteristics:
kfree(characteristics);
}
-void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout);
+ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll",
+ of_at91rm9200_clk_pll_setup);
-void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout);
+ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout);
}
+CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll",
+ of_at91sam9g45_clk_pll_setup);
-void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout);
+ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout);
}
+CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb",
+ of_at91sam9g20_clk_pllb_setup);
-void __init of_sama5d3_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_sama5d3_clk_pll_setup(struct device_node *np)
{
- of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout);
+ of_at91_clk_pll_setup(np, &sama5d3_pll_layout);
}
+CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll",
+ of_sama5d3_clk_pll_setup);
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ea226562bb40..2bed26481027 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -21,16 +21,18 @@
struct clk_plldiv {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_plldiv *plldiv = to_clk_plldiv(hw);
- struct at91_pmc *pmc = plldiv->pmc;
+ unsigned int mckr;
- if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2)
+ regmap_read(plldiv->regmap, AT91_PMC_MCKR, &mckr);
+
+ if (mckr & AT91_PMC_PLLADIV2)
return parent_rate / 2;
return parent_rate;
@@ -57,18 +59,12 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_plldiv *plldiv = to_clk_plldiv(hw);
- struct at91_pmc *pmc = plldiv->pmc;
- u32 tmp;
- if (parent_rate != rate && (parent_rate / 2) != rate)
+ if ((parent_rate != rate) && (parent_rate / 2 != rate))
return -EINVAL;
- pmc_lock(pmc);
- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2;
- if ((parent_rate / 2) == rate)
- tmp |= AT91_PMC_PLLADIV2;
- pmc_write(pmc, AT91_PMC_MCKR, tmp);
- pmc_unlock(pmc);
+ regmap_update_bits(plldiv->regmap, AT91_PMC_MCKR, AT91_PMC_PLLADIV2,
+ parent_rate != rate ? AT91_PMC_PLLADIV2 : 0);
return 0;
}
@@ -80,7 +76,7 @@ static const struct clk_ops plldiv_ops = {
};
static struct clk * __init
-at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
+at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct clk_plldiv *plldiv;
@@ -98,7 +94,7 @@ at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE;
plldiv->hw.init = &init;
- plldiv->pmc = pmc;
+ plldiv->regmap = regmap;
clk = clk_register(NULL, &plldiv->hw);
@@ -109,27 +105,27 @@ at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name,
}
static void __init
-of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc)
+of_at91sam9x5_clk_plldiv_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_plldiv(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+ clk = at91_clk_register_plldiv(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
return;
}
-
-void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_plldiv_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv",
+ of_at91sam9x5_clk_plldiv_setup);
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 14b270b85fec..10f846cc8db1 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -12,10 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -24,6 +22,7 @@
#define PROG_STATUS_MASK(id) (1 << ((id) + 8))
#define PROG_PRES_MASK 0x7
+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK)
#define PROG_MAX_RM9200_CSS 3
struct clk_programmable_layout {
@@ -34,7 +33,7 @@ struct clk_programmable_layout {
struct clk_programmable {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u8 id;
const struct clk_programmable_layout *layout;
};
@@ -44,14 +43,12 @@ struct clk_programmable {
static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 pres;
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
- const struct clk_programmable_layout *layout = prog->layout;
+ unsigned int pckr;
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
- pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) &
- PROG_PRES_MASK;
- return parent_rate >> pres;
+ return parent_rate >> PROG_PRES(prog->layout, pckr);
}
static int clk_programmable_determine_rate(struct clk_hw *hw,
@@ -101,36 +98,36 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
- struct at91_pmc *pmc = prog->pmc;
- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask;
+ unsigned int mask = layout->css_mask;
+ unsigned int pckr = 0;
if (layout->have_slck_mck)
- tmp &= AT91_PMC_CSSMCK_MCK;
+ mask |= AT91_PMC_CSSMCK_MCK;
if (index > layout->css_mask) {
- if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) {
- tmp |= AT91_PMC_CSSMCK_MCK;
- return 0;
- } else {
+ if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck)
return -EINVAL;
- }
+
+ pckr |= AT91_PMC_CSSMCK_MCK;
}
- pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index);
+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), mask, pckr);
+
return 0;
}
static u8 clk_programmable_get_parent(struct clk_hw *hw)
{
- u32 tmp;
- u8 ret;
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
const struct clk_programmable_layout *layout = prog->layout;
+ unsigned int pckr;
+ u8 ret;
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
+
+ ret = pckr & layout->css_mask;
- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id));
- ret = tmp & layout->css_mask;
- if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret)
+ if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret)
ret = PROG_MAX_RM9200_CSS + 1;
return ret;
@@ -140,26 +137,27 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_programmable *prog = to_clk_programmable(hw);
- struct at91_pmc *pmc = prog->pmc;
const struct clk_programmable_layout *layout = prog->layout;
unsigned long div = parent_rate / rate;
+ unsigned int pckr;
int shift = 0;
- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) &
- ~(PROG_PRES_MASK << layout->pres_shift);
+
+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr);
if (!div)
return -EINVAL;
shift = fls(div) - 1;
- if (div != (1<<shift))
+ if (div != (1 << shift))
return -EINVAL;
if (shift >= PROG_PRES_MASK)
return -EINVAL;
- pmc_write(pmc, AT91_PMC_PCKR(prog->id),
- tmp | (shift << layout->pres_shift));
+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id),
+ PROG_PRES_MASK << layout->pres_shift,
+ shift << layout->pres_shift);
return 0;
}
@@ -173,7 +171,7 @@ static const struct clk_ops programmable_ops = {
};
static struct clk * __init
-at91_clk_register_programmable(struct at91_pmc *pmc,
+at91_clk_register_programmable(struct regmap *regmap,
const char *name, const char **parent_names,
u8 num_parents, u8 id,
const struct clk_programmable_layout *layout)
@@ -198,7 +196,7 @@ at91_clk_register_programmable(struct at91_pmc *pmc,
prog->id = id;
prog->layout = layout;
prog->hw.init = &init;
- prog->pmc = pmc;
+ prog->regmap = regmap;
clk = clk_register(NULL, &prog->hw);
if (IS_ERR(clk))
@@ -226,19 +224,20 @@ static const struct clk_programmable_layout at91sam9x5_programmable_layout = {
};
static void __init
-of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
+of_at91_clk_prog_setup(struct device_node *np,
const struct clk_programmable_layout *layout)
{
int num;
u32 id;
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[PROG_SOURCE_MAX];
const char *name;
struct device_node *progclknp;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > PROG_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -247,6 +246,10 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (!num || num > (PROG_ID_MAX + 1))
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, progclknp) {
if (of_property_read_u32(progclknp, "reg", &id))
continue;
@@ -254,7 +257,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
if (of_property_read_string(np, "clock-output-names", &name))
name = progclknp->name;
- clk = at91_clk_register_programmable(pmc, name,
+ clk = at91_clk_register_programmable(regmap, name,
parent_names, num_parents,
id, layout);
if (IS_ERR(clk))
@@ -265,20 +268,23 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc,
}
-void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout);
}
+CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable",
+ of_at91rm9200_clk_prog_setup);
-void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout);
}
+CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable",
+ of_at91sam9g45_clk_prog_setup);
-void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np)
{
- of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout);
+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout);
}
+CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable",
+ of_at91sam9x5_clk_prog_setup);
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 6f99a530ead6..61090b1146cf 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -12,17 +12,11 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/slab.h>
#include <linux/clk/at91_pmc.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
#include "sckc.h"
@@ -58,7 +52,7 @@ struct clk_slow_rc_osc {
struct clk_sam9260_slow {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw)
@@ -251,7 +245,7 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr,
init.ops = &slow_rc_osc_ops;
init.parent_names = NULL;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED;
+ init.flags = CLK_IGNORE_UNUSED;
osc->hw.init = &init;
osc->sckcr = sckcr;
@@ -366,11 +360,11 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
+ unsigned int num_parents;
const char *name = np->name;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > 2)
+ if (num_parents == 0 || num_parents > 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
@@ -388,8 +382,11 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np,
static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw)
{
struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw);
+ unsigned int status;
+
+ regmap_read(slowck->regmap, AT91_PMC_SR, &status);
- return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL);
+ return status & AT91_PMC_OSCSEL ? 1 : 0;
}
static const struct clk_ops sam9260_slow_ops = {
@@ -397,7 +394,7 @@ static const struct clk_ops sam9260_slow_ops = {
};
static struct clk * __init
-at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
+at91_clk_register_sam9260_slow(struct regmap *regmap,
const char *name,
const char **parent_names,
int num_parents)
@@ -406,7 +403,7 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
struct clk *clk = NULL;
struct clk_init_data init;
- if (!pmc || !name)
+ if (!name)
return ERR_PTR(-EINVAL);
if (!parent_names || !num_parents)
@@ -423,7 +420,7 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
init.flags = 0;
slowck->hw.init = &init;
- slowck->pmc = pmc;
+ slowck->regmap = regmap;
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
@@ -432,26 +429,32 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
return clk;
}
-void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9260_clk_slow_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_names[2];
- int num_parents;
+ unsigned int num_parents;
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
if (num_parents != 2)
return;
of_clk_parent_fill(np, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
of_property_read_string(np, "clock-output-names", &name);
- clk = at91_clk_register_sam9260_slow(pmc, name, parent_names,
+ clk = at91_clk_register_sam9260_slow(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow",
+ of_at91sam9260_clk_slow_setup);
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index a7f8501cfa05..3c04b069d5b8 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -24,7 +24,7 @@
struct at91sam9x5_clk_smd {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_at91sam9x5_clk_smd(hw) \
@@ -33,13 +33,13 @@ struct at91sam9x5_clk_smd {
static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 tmp;
- u8 smddiv;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
+ unsigned int smdr;
+ u8 smddiv;
+
+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
+ smddiv = (smdr & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
- tmp = pmc_read(pmc, AT91_PMC_SMD);
- smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT;
return parent_rate / (smddiv + 1);
}
@@ -67,40 +67,38 @@ static long at91sam9x5_clk_smd_round_rate(struct clk_hw *hw, unsigned long rate,
static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index)
{
- u32 tmp;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS;
- if (index)
- tmp |= AT91_PMC_SMDS;
- pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMDS,
+ index ? AT91_PMC_SMDS : 0);
+
return 0;
}
static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw)
{
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
+ unsigned int smdr;
- return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS;
+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr);
+
+ return smdr & AT91_PMC_SMDS;
}
static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw);
- struct at91_pmc *pmc = smd->pmc;
unsigned long div = parent_rate / rate;
if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1))
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV;
- tmp |= (div - 1) << SMD_DIV_SHIFT;
- pmc_write(pmc, AT91_PMC_SMD, tmp);
+
+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMD_DIV,
+ (div - 1) << SMD_DIV_SHIFT);
return 0;
}
@@ -114,7 +112,7 @@ static const struct clk_ops at91sam9x5_smd_ops = {
};
static struct clk * __init
-at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
+at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_smd *smd;
@@ -132,7 +130,7 @@ at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
smd->hw.init = &init;
- smd->pmc = pmc;
+ smd->regmap = regmap;
clk = clk_register(NULL, &smd->hw);
if (IS_ERR(clk))
@@ -141,26 +139,32 @@ at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np)
{
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[SMD_SOURCE_MAX];
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > SMD_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9x5_clk_register_smd(pmc, name, parent_names,
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9x5_clk_register_smd(regmap, name, parent_names,
num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd",
+ of_at91sam9x5_clk_smd_setup);
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index 3f5314344286..8f35d8172909 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -12,13 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/of_irq.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -29,9 +24,7 @@
#define to_clk_system(hw) container_of(hw, struct clk_system, hw)
struct clk_system {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
u8 id;
};
@@ -39,58 +32,54 @@ static inline int is_pck(int id)
{
return (id >= 8) && (id <= 15);
}
-static irqreturn_t clk_system_irq_handler(int irq, void *dev_id)
+
+static inline bool clk_system_ready(struct regmap *regmap, int id)
{
- struct clk_system *sys = (struct clk_system *)dev_id;
+ unsigned int status;
- wake_up(&sys->wait);
- disable_irq_nosync(sys->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & (1 << id) ? 1 : 0;
}
static int clk_system_prepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
- u32 mask = 1 << sys->id;
- pmc_write(pmc, AT91_PMC_SCER, mask);
+ regmap_write(sys->regmap, AT91_PMC_SCER, 1 << sys->id);
if (!is_pck(sys->id))
return 0;
- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) {
- if (sys->irq) {
- enable_irq(sys->irq);
- wait_event(sys->wait,
- pmc_read(pmc, AT91_PMC_SR) & mask);
- } else
- cpu_relax();
- }
+ while (!clk_system_ready(sys->regmap, sys->id))
+ cpu_relax();
+
return 0;
}
static void clk_system_unprepare(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
- pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id);
+ regmap_write(sys->regmap, AT91_PMC_SCDR, 1 << sys->id);
}
static int clk_system_is_prepared(struct clk_hw *hw)
{
struct clk_system *sys = to_clk_system(hw);
- struct at91_pmc *pmc = sys->pmc;
+ unsigned int status;
+
+ regmap_read(sys->regmap, AT91_PMC_SCSR, &status);
- if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)))
+ if (!(status & (1 << sys->id)))
return 0;
if (!is_pck(sys->id))
return 1;
- return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id));
+ regmap_read(sys->regmap, AT91_PMC_SR, &status);
+
+ return status & (1 << sys->id) ? 1 : 0;
}
static const struct clk_ops system_ops = {
@@ -100,13 +89,12 @@ static const struct clk_ops system_ops = {
};
static struct clk * __init
-at91_clk_register_system(struct at91_pmc *pmc, const char *name,
- const char *parent_name, u8 id, int irq)
+at91_clk_register_system(struct regmap *regmap, const char *name,
+ const char *parent_name, u8 id)
{
struct clk_system *sys;
struct clk *clk = NULL;
struct clk_init_data init;
- int ret;
if (!parent_name || id > SYSTEM_MAX_ID)
return ERR_PTR(-EINVAL);
@@ -123,44 +111,33 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
sys->id = id;
sys->hw.init = &init;
- sys->pmc = pmc;
- sys->irq = irq;
- if (irq) {
- init_waitqueue_head(&sys->wait);
- irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
- ret = request_irq(sys->irq, clk_system_irq_handler,
- IRQF_TRIGGER_HIGH, name, sys);
- if (ret) {
- kfree(sys);
- return ERR_PTR(ret);
- }
- }
+ sys->regmap = regmap;
clk = clk_register(NULL, &sys->hw);
- if (IS_ERR(clk)) {
- if (irq)
- free_irq(sys->irq, sys);
+ if (IS_ERR(clk))
kfree(sys);
- }
return clk;
}
-static void __init
-of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_sys_setup(struct device_node *np)
{
int num;
- int irq = 0;
u32 id;
struct clk *clk;
const char *name;
struct device_node *sysclknp;
const char *parent_name;
+ struct regmap *regmap;
num = of_get_child_count(np);
if (num > (SYSTEM_MAX_ID + 1))
return;
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
for_each_child_of_node(np, sysclknp) {
if (of_property_read_u32(sysclknp, "reg", &id))
continue;
@@ -168,21 +145,14 @@ of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc)
if (of_property_read_string(np, "clock-output-names", &name))
name = sysclknp->name;
- if (is_pck(id))
- irq = irq_of_parse_and_map(sysclknp, 0);
-
parent_name = of_clk_get_parent_name(sysclknp, 0);
- clk = at91_clk_register_system(pmc, name, parent_name, id, irq);
+ clk = at91_clk_register_system(regmap, name, parent_name, id);
if (IS_ERR(clk))
continue;
of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk);
}
}
-
-void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_sys_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system",
+ of_at91rm9200_clk_sys_setup);
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 8ab8502778a2..d80bdb0a8b02 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -12,8 +12,8 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -27,7 +27,7 @@
struct at91sam9x5_clk_usb {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -35,7 +35,7 @@ struct at91sam9x5_clk_usb {
struct at91rm9200_clk_usb {
struct clk_hw hw;
- struct at91_pmc *pmc;
+ struct regmap *regmap;
u32 divisors[4];
};
@@ -45,13 +45,12 @@ struct at91rm9200_clk_usb {
static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u32 tmp;
- u8 usbdiv;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
+ u8 usbdiv;
- tmp = pmc_read(pmc, AT91_PMC_USB);
- usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+ usbdiv = (usbr & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
}
@@ -109,33 +108,31 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
{
- u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
if (index > 1)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS;
- if (index)
- tmp |= AT91_PMC_USBS;
- pmc_write(pmc, AT91_PMC_USB, tmp);
+
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
+ index ? AT91_PMC_USBS : 0);
+
return 0;
}
static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
- return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS;
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+
+ return usbr & AT91_PMC_USBS;
}
static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
unsigned long div;
if (!rate)
@@ -145,9 +142,8 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
return -EINVAL;
- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
- tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT;
- pmc_write(pmc, AT91_PMC_USB, tmp);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_OHCIUSBDIV,
+ (div - 1) << SAM9X5_USB_DIV_SHIFT);
return 0;
}
@@ -163,28 +159,28 @@ static const struct clk_ops at91sam9x5_usb_ops = {
static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- pmc_write(pmc, AT91_PMC_USB,
- pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
+ AT91_PMC_USBS);
+
return 0;
}
static void at91sam9n12_clk_usb_disable(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- pmc_write(pmc, AT91_PMC_USB,
- pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, 0);
}
static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw)
{
struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
+ unsigned int usbr;
- return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS);
+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
+
+ return usbr & AT91_PMC_USBS;
}
static const struct clk_ops at91sam9n12_usb_ops = {
@@ -197,7 +193,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
};
static struct clk * __init
-at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents)
{
struct at91sam9x5_clk_usb *usb;
@@ -216,7 +212,7 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
@@ -226,7 +222,7 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
}
static struct clk * __init
-at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
struct at91sam9x5_clk_usb *usb;
@@ -244,7 +240,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
clk = clk_register(NULL, &usb->hw);
if (IS_ERR(clk))
@@ -257,12 +253,12 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
- u32 tmp;
+ unsigned int pllbr;
u8 usbdiv;
- tmp = pmc_read(pmc, AT91_CKGR_PLLBR);
- usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
+ regmap_read(usb->regmap, AT91_CKGR_PLLBR, &pllbr);
+
+ usbdiv = (pllbr & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT;
if (usb->divisors[usbdiv])
return parent_rate / usb->divisors[usbdiv];
@@ -310,10 +306,8 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u32 tmp;
int i;
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
- struct at91_pmc *pmc = usb->pmc;
unsigned long div;
if (!rate)
@@ -323,10 +317,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
if (usb->divisors[i] == div) {
- tmp = pmc_read(pmc, AT91_CKGR_PLLBR) &
- ~AT91_PMC_USBDIV;
- tmp |= i << RM9200_USB_DIV_SHIFT;
- pmc_write(pmc, AT91_CKGR_PLLBR, tmp);
+ regmap_update_bits(usb->regmap, AT91_CKGR_PLLBR,
+ AT91_PMC_USBDIV,
+ i << RM9200_USB_DIV_SHIFT);
+
return 0;
}
}
@@ -341,7 +335,7 @@ static const struct clk_ops at91rm9200_usb_ops = {
};
static struct clk * __init
-at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
+at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors)
{
struct at91rm9200_clk_usb *usb;
@@ -359,7 +353,7 @@ at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.flags = CLK_SET_RATE_PARENT;
usb->hw.init = &init;
- usb->pmc = pmc;
+ usb->regmap = regmap;
memcpy(usb->divisors, divisors, sizeof(usb->divisors));
clk = clk_register(NULL, &usb->hw);
@@ -369,35 +363,42 @@ at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name,
return clk;
}
-void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
- int num_parents;
+ unsigned int num_parents;
const char *parent_names[USB_SOURCE_MAX];
const char *name = np->name;
+ struct regmap *regmap;
num_parents = of_clk_get_parent_count(np);
- if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
+ if (num_parents == 0 || num_parents > USB_SOURCE_MAX)
return;
of_clk_parent_fill(np, parent_names, num_parents);
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb",
+ of_at91sam9x5_clk_usb_setup);
-void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -405,20 +406,26 @@ void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- clk = at91sam9n12_clk_register_usb(pmc, name, parent_name);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91sam9n12_clk_register_usb(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb",
+ of_at91sam9n12_clk_usb_setup);
-void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc)
+static void __init of_at91rm9200_clk_usb_setup(struct device_node *np)
{
struct clk *clk;
const char *parent_name;
const char *name = np->name;
u32 divisors[4] = {0, 0, 0, 0};
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
if (!parent_name)
@@ -430,9 +437,15 @@ void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
of_property_read_string(np, "clock-output-names", &name);
- clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors);
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return;
+
+ clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb",
+ of_at91rm9200_clk_usb_setup);
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ca561e90a60f..61fcf399e58c 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -11,14 +11,9 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include "pmc.h"
@@ -26,37 +21,30 @@
struct clk_utmi {
struct clk_hw hw;
- struct at91_pmc *pmc;
- unsigned int irq;
- wait_queue_head_t wait;
+ struct regmap *regmap;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
-static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id)
+static inline bool clk_utmi_ready(struct regmap *regmap)
{
- struct clk_utmi *utmi = (struct clk_utmi *)dev_id;
+ unsigned int status;
- wake_up(&utmi->wait);
- disable_irq_nosync(utmi->irq);
+ regmap_read(regmap, AT91_PMC_SR, &status);
- return IRQ_HANDLED;
+ return status & AT91_PMC_LOCKU;
}
static int clk_utmi_prepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN |
- AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN;
+ unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT |
+ AT91_PMC_BIASEN;
- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr);
- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) {
- enable_irq(utmi->irq);
- wait_event(utmi->wait,
- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
- }
+ while (!clk_utmi_ready(utmi->regmap))
+ cpu_relax();
return 0;
}
@@ -64,18 +52,15 @@ static int clk_utmi_prepare(struct clk_hw *hw)
static int clk_utmi_is_prepared(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU);
+ return clk_utmi_ready(utmi->regmap);
}
static void clk_utmi_unprepare(struct clk_hw *hw)
{
struct clk_utmi *utmi = to_clk_utmi(hw);
- struct at91_pmc *pmc = utmi->pmc;
- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN;
- pmc_write(pmc, AT91_CKGR_UCKR, tmp);
+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0);
}
static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
@@ -93,10 +78,9 @@ static const struct clk_ops utmi_ops = {
};
static struct clk * __init
-at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
+at91_clk_register_utmi(struct regmap *regmap,
const char *name, const char *parent_name)
{
- int ret;
struct clk_utmi *utmi;
struct clk *clk = NULL;
struct clk_init_data init;
@@ -112,52 +96,36 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
init.flags = CLK_SET_RATE_GATE;
utmi->hw.init = &init;
- utmi->pmc = pmc;
- utmi->irq = irq;
- init_waitqueue_head(&utmi->wait);
- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
- ret = request_irq(utmi->irq, clk_utmi_irq_handler,
- IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
- if (ret) {
- kfree(utmi);
- return ERR_PTR(ret);
- }
+ utmi->regmap = regmap;
clk = clk_register(NULL, &utmi->hw);
- if (IS_ERR(clk)) {
- free_irq(utmi->irq, utmi);
+ if (IS_ERR(clk))
kfree(utmi);
- }
return clk;
}
-static void __init
-of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc)
+static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np)
{
- unsigned int irq;
struct clk *clk;
const char *parent_name;
const char *name = np->name;
+ struct regmap *regmap;
parent_name = of_clk_get_parent_name(np, 0);
of_property_read_string(np, "clock-output-names", &name);
- irq = irq_of_parse_and_map(np, 0);
- if (!irq)
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
return;
- clk = at91_clk_register_utmi(pmc, irq, name, parent_name);
+ clk = at91_clk_register_utmi(regmap, name, parent_name);
if (IS_ERR(clk))
return;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
return;
}
-
-void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
- struct at91_pmc *pmc)
-{
- of_at91_clk_utmi_setup(np, pmc);
-}
+CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi",
+ of_at91sam9x5_clk_utmi_setup);
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 8476b570779b..526df5ba042d 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -12,36 +12,13 @@
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/proc-fns.h>
#include "pmc.h"
-void __iomem *at91_pmc_base;
-EXPORT_SYMBOL_GPL(at91_pmc_base);
-
-void at91rm9200_idle(void)
-{
- /*
- * Disable the processor clock. The processor will be automatically
- * re-enabled by an interrupt or by a reset.
- */
- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
-}
-
-void at91sam9_idle(void)
-{
- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK);
- cpu_do_idle();
-}
-
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range)
{
@@ -64,402 +41,3 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname,
return 0;
}
EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
-
-static void pmc_irq_mask(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq);
-}
-
-static void pmc_irq_unmask(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq);
-}
-
-static int pmc_irq_set_type(struct irq_data *d, unsigned type)
-{
- if (type != IRQ_TYPE_LEVEL_HIGH) {
- pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void pmc_irq_suspend(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc->imr = pmc_read(pmc, AT91_PMC_IMR);
- pmc_write(pmc, AT91_PMC_IDR, pmc->imr);
-}
-
-static void pmc_irq_resume(struct irq_data *d)
-{
- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d);
-
- pmc_write(pmc, AT91_PMC_IER, pmc->imr);
-}
-
-static struct irq_chip pmc_irq = {
- .name = "PMC",
- .irq_disable = pmc_irq_mask,
- .irq_mask = pmc_irq_mask,
- .irq_unmask = pmc_irq_unmask,
- .irq_set_type = pmc_irq_set_type,
- .irq_suspend = pmc_irq_suspend,
- .irq_resume = pmc_irq_resume,
-};
-
-static struct lock_class_key pmc_lock_class;
-
-static int pmc_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct at91_pmc *pmc = h->host_data;
-
- irq_set_lockdep_class(virq, &pmc_lock_class);
-
- irq_set_chip_and_handler(virq, &pmc_irq,
- handle_level_irq);
- irq_set_chip_data(virq, pmc);
-
- return 0;
-}
-
-static int pmc_irq_domain_xlate(struct irq_domain *d,
- struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_type)
-{
- struct at91_pmc *pmc = d->host_data;
- const struct at91_pmc_caps *caps = pmc->caps;
-
- if (WARN_ON(intsize < 1))
- return -EINVAL;
-
- *out_hwirq = intspec[0];
-
- if (!(caps->available_irqs & (1 << *out_hwirq)))
- return -EINVAL;
-
- *out_type = IRQ_TYPE_LEVEL_HIGH;
-
- return 0;
-}
-
-static const struct irq_domain_ops pmc_irq_ops = {
- .map = pmc_irq_map,
- .xlate = pmc_irq_domain_xlate,
-};
-
-static irqreturn_t pmc_irq_handler(int irq, void *data)
-{
- struct at91_pmc *pmc = (struct at91_pmc *)data;
- unsigned long sr;
- int n;
-
- sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR);
- if (!sr)
- return IRQ_NONE;
-
- for_each_set_bit(n, &sr, BITS_PER_LONG)
- generic_handle_irq(irq_find_mapping(pmc->irqdomain, n));
-
- return IRQ_HANDLED;
-}
-
-static const struct at91_pmc_caps at91rm9200_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_PCK3RDY,
-};
-
-static const struct at91_pmc_caps at91sam9260_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY,
-};
-
-static const struct at91_pmc_caps at91sam9g45_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY,
-};
-
-static const struct at91_pmc_caps at91sam9n12_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB |
- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
-};
-
-static const struct at91_pmc_caps at91sam9x5_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS |
- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV,
-};
-
-static const struct at91_pmc_caps sama5d2_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
- AT91_PMC_CFDEV | AT91_PMC_GCKRDY,
-};
-
-static const struct at91_pmc_caps sama5d3_caps = {
- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY |
- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY |
- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY |
- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS |
- AT91_PMC_CFDEV,
-};
-
-static struct at91_pmc *__init at91_pmc_init(struct device_node *np,
- void __iomem *regbase, int virq,
- const struct at91_pmc_caps *caps)
-{
- struct at91_pmc *pmc;
-
- if (!regbase || !virq || !caps)
- return NULL;
-
- at91_pmc_base = regbase;
-
- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
- if (!pmc)
- return NULL;
-
- spin_lock_init(&pmc->lock);
- pmc->regbase = regbase;
- pmc->virq = virq;
- pmc->caps = caps;
-
- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc);
-
- if (!pmc->irqdomain)
- goto out_free_pmc;
-
- pmc_write(pmc, AT91_PMC_IDR, 0xffffffff);
- if (request_irq(pmc->virq, pmc_irq_handler,
- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc))
- goto out_remove_irqdomain;
-
- return pmc;
-
-out_remove_irqdomain:
- irq_domain_remove(pmc->irqdomain);
-out_free_pmc:
- kfree(pmc);
-
- return NULL;
-}
-
-static const struct of_device_id pmc_clk_ids[] __initconst = {
- /* Slow oscillator */
- {
- .compatible = "atmel,at91sam9260-clk-slow",
- .data = of_at91sam9260_clk_slow_setup,
- },
- /* Main clock */
- {
- .compatible = "atmel,at91rm9200-clk-main-osc",
- .data = of_at91rm9200_clk_main_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-main-rc-osc",
- .data = of_at91sam9x5_clk_main_rc_osc_setup,
- },
- {
- .compatible = "atmel,at91rm9200-clk-main",
- .data = of_at91rm9200_clk_main_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-main",
- .data = of_at91sam9x5_clk_main_setup,
- },
- /* PLL clocks */
- {
- .compatible = "atmel,at91rm9200-clk-pll",
- .data = of_at91rm9200_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9g45-clk-pll",
- .data = of_at91sam9g45_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9g20-clk-pllb",
- .data = of_at91sam9g20_clk_pllb_setup,
- },
- {
- .compatible = "atmel,sama5d3-clk-pll",
- .data = of_sama5d3_clk_pll_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-plldiv",
- .data = of_at91sam9x5_clk_plldiv_setup,
- },
- /* Master clock */
- {
- .compatible = "atmel,at91rm9200-clk-master",
- .data = of_at91rm9200_clk_master_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-master",
- .data = of_at91sam9x5_clk_master_setup,
- },
- /* System clocks */
- {
- .compatible = "atmel,at91rm9200-clk-system",
- .data = of_at91rm9200_clk_sys_setup,
- },
- /* Peripheral clocks */
- {
- .compatible = "atmel,at91rm9200-clk-peripheral",
- .data = of_at91rm9200_clk_periph_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-peripheral",
- .data = of_at91sam9x5_clk_periph_setup,
- },
- /* Programmable clocks */
- {
- .compatible = "atmel,at91rm9200-clk-programmable",
- .data = of_at91rm9200_clk_prog_setup,
- },
- {
- .compatible = "atmel,at91sam9g45-clk-programmable",
- .data = of_at91sam9g45_clk_prog_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-programmable",
- .data = of_at91sam9x5_clk_prog_setup,
- },
- /* UTMI clock */
-#if defined(CONFIG_HAVE_AT91_UTMI)
- {
- .compatible = "atmel,at91sam9x5-clk-utmi",
- .data = of_at91sam9x5_clk_utmi_setup,
- },
-#endif
- /* USB clock */
-#if defined(CONFIG_HAVE_AT91_USB_CLK)
- {
- .compatible = "atmel,at91rm9200-clk-usb",
- .data = of_at91rm9200_clk_usb_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-usb",
- .data = of_at91sam9x5_clk_usb_setup,
- },
- {
- .compatible = "atmel,at91sam9n12-clk-usb",
- .data = of_at91sam9n12_clk_usb_setup,
- },
-#endif
- /* SMD clock */
-#if defined(CONFIG_HAVE_AT91_SMD)
- {
- .compatible = "atmel,at91sam9x5-clk-smd",
- .data = of_at91sam9x5_clk_smd_setup,
- },
-#endif
-#if defined(CONFIG_HAVE_AT91_H32MX)
- {
- .compatible = "atmel,sama5d4-clk-h32mx",
- .data = of_sama5d4_clk_h32mx_setup,
- },
-#endif
-#if defined(CONFIG_HAVE_AT91_GENERATED_CLK)
- {
- .compatible = "atmel,sama5d2-clk-generated",
- .data = of_sama5d2_clk_generated_setup,
- },
-#endif
- { /*sentinel*/ }
-};
-
-static void __init of_at91_pmc_setup(struct device_node *np,
- const struct at91_pmc_caps *caps)
-{
- struct at91_pmc *pmc;
- struct device_node *childnp;
- void (*clk_setup)(struct device_node *, struct at91_pmc *);
- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
- int virq;
-
- if (!regbase)
- return;
-
- virq = irq_of_parse_and_map(np, 0);
- if (!virq)
- return;
-
- pmc = at91_pmc_init(np, regbase, virq, caps);
- if (!pmc)
- return;
- for_each_child_of_node(np, childnp) {
- clk_id = of_match_node(pmc_clk_ids, childnp);
- if (!clk_id)
- continue;
- clk_setup = clk_id->data;
- clk_setup(childnp, pmc);
- }
-}
-
-static void __init of_at91rm9200_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91rm9200_caps);
-}
-CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc",
- of_at91rm9200_pmc_setup);
-
-static void __init of_at91sam9260_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9260_caps);
-}
-CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc",
- of_at91sam9260_pmc_setup);
-
-static void __init of_at91sam9g45_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9g45_caps);
-}
-CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc",
- of_at91sam9g45_pmc_setup);
-
-static void __init of_at91sam9n12_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9n12_caps);
-}
-CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc",
- of_at91sam9n12_pmc_setup);
-
-static void __init of_at91sam9x5_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &at91sam9x5_caps);
-}
-CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc",
- of_at91sam9x5_pmc_setup);
-
-static void __init of_sama5d2_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &sama5d2_caps);
-}
-CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc",
- of_sama5d2_pmc_setup);
-
-static void __init of_sama5d3_pmc_setup(struct device_node *np)
-{
- of_at91_pmc_setup(np, &sama5d3_caps);
-}
-CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc",
- of_sama5d3_pmc_setup);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index f65739272779..5771fff0ee3f 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -14,8 +14,11 @@
#include <linux/io.h>
#include <linux/irqdomain.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
+extern spinlock_t pmc_pcr_lock;
+
struct clk_range {
unsigned long min;
unsigned long max;
@@ -23,102 +26,7 @@ struct clk_range {
#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
-struct at91_pmc_caps {
- u32 available_irqs;
-};
-
-struct at91_pmc {
- void __iomem *regbase;
- int virq;
- spinlock_t lock;
- const struct at91_pmc_caps *caps;
- struct irq_domain *irqdomain;
- u32 imr;
-};
-
-static inline void pmc_lock(struct at91_pmc *pmc)
-{
- spin_lock(&pmc->lock);
-}
-
-static inline void pmc_unlock(struct at91_pmc *pmc)
-{
- spin_unlock(&pmc->lock);
-}
-
-static inline u32 pmc_read(struct at91_pmc *pmc, int offset)
-{
- return readl(pmc->regbase + offset);
-}
-
-static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value)
-{
- writel(value, pmc->regbase + offset);
-}
-
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
-void of_at91sam9260_clk_slow_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_main_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91rm9200_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_main_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g45_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g20_clk_pllb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_sama5d3_clk_pll_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_master_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_sys_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_periph_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9g45_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_prog_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91sam9x5_clk_utmi_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91rm9200_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9x5_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-void of_at91sam9n12_clk_usb_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_at91sam9x5_clk_smd_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_sama5d4_clk_h32mx_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
-void of_sama5d2_clk_generated_setup(struct device_node *np,
- struct at91_pmc *pmc);
-
#endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index e4f89e28b5ec..3a177ade6e6c 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -38,8 +38,8 @@ static int bcm2835_aux_clk_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
- if (!reg)
- return -ENODEV;
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
onecell = devm_kmalloc(dev, sizeof(*onecell), GFP_KERNEL);
if (!onecell)
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 015e687ffabe..c74ed3fd496d 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -88,10 +88,23 @@
#define CM_HSMDIV 0x08c
#define CM_OTPCTL 0x090
#define CM_OTPDIV 0x094
+#define CM_PCMCTL 0x098
+#define CM_PCMDIV 0x09c
#define CM_PWMCTL 0x0a0
#define CM_PWMDIV 0x0a4
+#define CM_SLIMCTL 0x0a8
+#define CM_SLIMDIV 0x0ac
#define CM_SMICTL 0x0b0
#define CM_SMIDIV 0x0b4
+/* no definition for 0x0b8 and 0x0bc */
+#define CM_TCNTCTL 0x0c0
+#define CM_TCNTDIV 0x0c4
+#define CM_TECCTL 0x0c8
+#define CM_TECDIV 0x0cc
+#define CM_TD0CTL 0x0d0
+#define CM_TD0DIV 0x0d4
+#define CM_TD1CTL 0x0d8
+#define CM_TD1DIV 0x0dc
#define CM_TSENSCTL 0x0e0
#define CM_TSENSDIV 0x0e4
#define CM_TIMERCTL 0x0e8
@@ -311,21 +324,18 @@ void __init bcm2835_init_clocks(void)
struct clk *clk;
int ret;
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
- 126000000);
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 126000000);
if (IS_ERR(clk))
pr_err("apb_pclk not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, CLK_IS_ROOT,
- 3000000);
+ clk = clk_register_fixed_rate(NULL, "uart0_pclk", NULL, 0, 3000000);
if (IS_ERR(clk))
pr_err("uart0_pclk not registered\n");
ret = clk_register_clkdev(clk, NULL, "20201000.uart");
if (ret)
pr_err("uart0_pclk alias not registered\n");
- clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "uart1_pclk", NULL, 0, 125000000);
if (IS_ERR(clk))
pr_err("uart1_pclk not registered\n");
ret = clk_register_clkdev(clk, NULL, "20215000.uart");
@@ -1060,16 +1070,7 @@ static long bcm2835_pll_divider_round_rate(struct clk_hw *hw,
static unsigned long bcm2835_pll_divider_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
- struct bcm2835_cprman *cprman = divider->cprman;
- const struct bcm2835_pll_divider_data *data = divider->data;
- u32 div = cprman_read(cprman, data->a2w_reg);
-
- div &= (1 << A2W_PLL_DIV_BITS) - 1;
- if (div == 0)
- div = 256;
-
- return parent_rate / div;
+ return clk_divider_ops.recalc_rate(hw, parent_rate);
}
static void bcm2835_pll_divider_off(struct clk_hw *hw)
@@ -1107,13 +1108,15 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
struct bcm2835_cprman *cprman = divider->cprman;
const struct bcm2835_pll_divider_data *data = divider->data;
- u32 cm;
- int ret;
+ u32 cm, div, max_div = 1 << A2W_PLL_DIV_BITS;
- ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
- if (ret)
- return ret;
+ div = DIV_ROUND_UP_ULL(parent_rate, rate);
+
+ div = min(div, max_div);
+ if (div == max_div)
+ div = 0;
+ cprman_write(cprman, data->a2w_reg, div);
cm = cprman_read(cprman, data->cm_reg);
cprman_write(cprman, data->cm_reg, cm | data->load_mask);
cprman_write(cprman, data->cm_reg, cm & ~data->load_mask);
@@ -1428,7 +1431,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
divider->div.reg = cprman->regs + data->a2w_reg;
divider->div.shift = A2W_PLL_DIV_SHIFT;
divider->div.width = A2W_PLL_DIV_BITS;
- divider->div.flags = 0;
+ divider->div.flags = CLK_DIVIDER_MAX_AT_ZERO;
divider->div.lock = &cprman->regs_lock;
divider->div.hw.init = &init;
divider->div.table = NULL;
diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c
index 3a228b6d4fee..464fdc4bc66b 100644
--- a/drivers/clk/bcm/clk-cygnus.c
+++ b/drivers/clk/bcm/clk-cygnus.c
@@ -268,3 +268,62 @@ static void __init cygnus_asiu_init(struct device_node *node)
iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div));
}
CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
+
+/*
+ * AUDIO PLL VCO frequency parameter table
+ *
+ * PLL output frequency = ((ndiv_int + ndiv_frac / 2^20) *
+ * (parent clock rate / pdiv)
+ *
+ * On Cygnus, parent is the 25MHz oscillator
+ */
+static const struct iproc_pll_vco_param audiopll_vco_params[] = {
+ /* rate (Hz) ndiv_int ndiv_frac pdiv */
+ { 1354750204UL, 54, 199238, 1 },
+ { 1769470191UL, 70, 816639, 1 },
+};
+
+static const struct iproc_pll_ctrl audiopll = {
+ .flags = IPROC_CLK_PLL_NEEDS_SW_CFG | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW,
+ .reset = RESET_VAL(0x5c, 0, 1),
+ .dig_filter = DF_VAL(0x48, 0, 3, 6, 4, 3, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 0),
+ .ndiv_int = REG_VAL(0x8, 0, 10),
+ .ndiv_frac = REG_VAL(0x8, 10, 20),
+ .pdiv = REG_VAL(0x44, 0, 4),
+ .vco_ctrl = VCO_CTRL_VAL(0x0c, 0x10),
+ .status = REG_VAL(0x54, 0, 1),
+ .macro_mode = REG_VAL(0x0, 0, 3),
+};
+
+static const struct iproc_clk_ctrl audiopll_clk[] = {
+ [BCM_CYGNUS_AUDIOPLL_CH0] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH0,
+ .flags = IPROC_CLK_AON |
+ IPROC_CLK_MCLK_DIV_BY_2,
+ .enable = ENABLE_VAL(0x14, 8, 10, 9),
+ .mdiv = REG_VAL(0x14, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH1] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH1,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x18, 8, 10, 9),
+ .mdiv = REG_VAL(0x18, 0, 8),
+ },
+ [BCM_CYGNUS_AUDIOPLL_CH2] = {
+ .channel = BCM_CYGNUS_AUDIOPLL_CH2,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x1c, 8, 10, 9),
+ .mdiv = REG_VAL(0x1c, 0, 8),
+ },
+};
+
+static void __init cygnus_audiopll_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &audiopll, audiopll_vco_params,
+ ARRAY_SIZE(audiopll_vco_params), audiopll_clk,
+ ARRAY_SIZE(audiopll_clk));
+}
+CLK_OF_DECLARE(cygnus_audiopll, "brcm,cygnus-audiopll",
+ cygnus_audiopll_clk_init);
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index afd5891ac9e6..fd492a5dad12 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -25,6 +25,12 @@
#define PLL_VCO_HIGH_SHIFT 19
#define PLL_VCO_LOW_SHIFT 30
+/*
+ * PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
+ * from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
+ */
+#define PLL_USER_MODE 7
+
/* number of delay loops waiting for PLL to lock */
#define LOCK_DELAY 100
@@ -215,7 +221,10 @@ static void __pll_put_in_reset(struct iproc_pll *pll)
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
val = readl(pll->control_base + reset->offset);
- val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift);
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
+ else
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
@@ -236,7 +245,10 @@ static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
val = readl(pll->control_base + reset->offset);
- val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift;
+ if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
+ val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
+ else
+ val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
@@ -292,6 +304,16 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
/* put PLL in reset */
__pll_put_in_reset(pll);
+ /* set PLL in user mode before modifying PLL controls */
+ if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
+ val = readl(pll->control_base + ctrl->macro_mode.offset);
+ val &= ~(bit_mask(ctrl->macro_mode.width) <<
+ ctrl->macro_mode.shift);
+ val |= PLL_USER_MODE << ctrl->macro_mode.shift;
+ iproc_pll_write(pll, pll->control_base,
+ ctrl->macro_mode.offset, val);
+ }
+
iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
@@ -505,7 +527,10 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
if (mdiv == 0)
mdiv = 256;
- clk->rate = parent_rate / mdiv;
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ clk->rate = parent_rate / (mdiv * 2);
+ else
+ clk->rate = parent_rate / mdiv;
return clk->rate;
}
@@ -543,7 +568,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate == 0 || parent_rate == 0)
return -EINVAL;
- div = DIV_ROUND_UP(parent_rate, rate);
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ div = DIV_ROUND_UP(parent_rate, rate * 2);
+ else
+ div = DIV_ROUND_UP(parent_rate, rate);
if (div > 256)
return -EINVAL;
@@ -555,7 +583,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val |= div << ctrl->mdiv.shift;
}
iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
- clk->rate = parent_rate / div;
+ if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
+ clk->rate = parent_rate / (div * 2);
+ else
+ clk->rate = parent_rate / div;
return 0;
}
diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h
index 8988de70a98c..2148b4ea9f28 100644
--- a/drivers/clk/bcm/clk-iproc.h
+++ b/drivers/clk/bcm/clk-iproc.h
@@ -61,6 +61,26 @@
#define IPROC_CLK_PLL_SPLIT_STAT_CTRL BIT(6)
/*
+ * Some PLLs have an additional divide by 2 in master clock calculation;
+ * MCLK = VCO_freq / (Mdiv * 2). Identify this to let the driver know
+ * of modified calculations
+ */
+#define IPROC_CLK_MCLK_DIV_BY_2 BIT(7)
+
+/*
+ * Some PLLs provide a look up table for the leaf clock frequencies and
+ * auto calculates VCO frequency parameters based on the provided leaf
+ * clock frequencies. They have a user mode that allows the divider
+ * controls to be determined by the user
+ */
+#define IPROC_CLK_PLL_USER_MODE_ON BIT(8)
+
+/*
+ * Some PLLs have an active low reset
+ */
+#define IPROC_CLK_PLL_RESET_ACTIVE_LOW BIT(9)
+
+/*
* Parameters for VCO frequency configuration
*
* VCO frequency =
@@ -149,6 +169,7 @@ struct iproc_pll_ctrl {
struct iproc_clk_reg_op pdiv;
struct iproc_pll_vco_ctrl vco_ctrl;
struct iproc_clk_reg_op status;
+ struct iproc_clk_reg_op macro_mode;
};
/*
@@ -183,16 +204,16 @@ struct iproc_asiu_div {
unsigned int low_width;
};
-void __init iproc_armpll_setup(struct device_node *node);
-void __init iproc_pll_clk_setup(struct device_node *node,
- const struct iproc_pll_ctrl *pll_ctrl,
- const struct iproc_pll_vco_param *vco,
- unsigned int num_vco_entries,
- const struct iproc_clk_ctrl *clk_ctrl,
- unsigned int num_clks);
-void __init iproc_asiu_setup(struct device_node *node,
- const struct iproc_asiu_div *div,
- const struct iproc_asiu_gate *gate,
- unsigned int num_clks);
+void iproc_armpll_setup(struct device_node *node);
+void iproc_pll_clk_setup(struct device_node *node,
+ const struct iproc_pll_ctrl *pll_ctrl,
+ const struct iproc_pll_vco_param *vco,
+ unsigned int num_vco_entries,
+ const struct iproc_clk_ctrl *clk_ctrl,
+ unsigned int num_clks);
+void iproc_asiu_setup(struct device_node *node,
+ const struct iproc_asiu_div *div,
+ const struct iproc_asiu_gate *gate,
+ unsigned int num_clks);
#endif /* _CLK_IPROC_H */
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 3bcd42fbb55e..3294db3b4e4e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -16,19 +16,8 @@
#include <linux/module.h>
#include <linux/err.h>
-#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04
-#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08
-#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c
-#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10
-#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14
-#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18
-#define AXI_CLKGEN_V1_REG_LOCK1 0x1c
-#define AXI_CLKGEN_V1_REG_LOCK2 0x20
-#define AXI_CLKGEN_V1_REG_LOCK3 0x24
-#define AXI_CLKGEN_V1_REG_FILTER1 0x28
-#define AXI_CLKGEN_V1_REG_FILTER2 0x2c
-
#define AXI_CLKGEN_V2_REG_RESET 0x40
+#define AXI_CLKGEN_V2_REG_CLKSEL 0x44
#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70
#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74
@@ -51,40 +40,11 @@
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
-struct axi_clkgen;
-
-struct axi_clkgen_mmcm_ops {
- void (*enable)(struct axi_clkgen *axi_clkgen, bool enable);
- int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg,
- unsigned int val, unsigned int mask);
- int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg,
- unsigned int *val);
-};
-
struct axi_clkgen {
void __iomem *base;
- const struct axi_clkgen_mmcm_ops *mmcm_ops;
struct clk_hw clk_hw;
};
-static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
-{
- axi_clkgen->mmcm_ops->enable(axi_clkgen, enable);
-}
-
-static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
-{
- return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask);
-}
-
-static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
-{
- return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val);
-}
-
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
{
switch (m) {
@@ -207,70 +167,6 @@ static void axi_clkgen_read(struct axi_clkgen *axi_clkgen,
*val = readl(axi_clkgen->base + reg);
}
-static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg)
-{
- switch (reg) {
- case MMCM_REG_CLKOUT0_1:
- return AXI_CLKGEN_V1_REG_CLK_OUT1;
- case MMCM_REG_CLKOUT0_2:
- return AXI_CLKGEN_V1_REG_CLK_OUT2;
- case MMCM_REG_CLK_FB1:
- return AXI_CLKGEN_V1_REG_CLK_FB1;
- case MMCM_REG_CLK_FB2:
- return AXI_CLKGEN_V1_REG_CLK_FB2;
- case MMCM_REG_CLK_DIV:
- return AXI_CLKGEN_V1_REG_CLK_DIV;
- case MMCM_REG_LOCK1:
- return AXI_CLKGEN_V1_REG_LOCK1;
- case MMCM_REG_LOCK2:
- return AXI_CLKGEN_V1_REG_LOCK2;
- case MMCM_REG_LOCK3:
- return AXI_CLKGEN_V1_REG_LOCK3;
- case MMCM_REG_FILTER1:
- return AXI_CLKGEN_V1_REG_FILTER1;
- case MMCM_REG_FILTER2:
- return AXI_CLKGEN_V1_REG_FILTER2;
- default:
- return 0;
- }
-}
-
-static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int val, unsigned int mask)
-{
- reg = axi_clkgen_v1_map_mmcm_reg(reg);
- if (reg == 0)
- return -EINVAL;
-
- axi_clkgen_write(axi_clkgen, reg, val);
-
- return 0;
-}
-
-static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen,
- unsigned int reg, unsigned int *val)
-{
- reg = axi_clkgen_v1_map_mmcm_reg(reg);
- if (reg == 0)
- return -EINVAL;
-
- axi_clkgen_read(axi_clkgen, reg, val);
-
- return 0;
-}
-
-static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen,
- bool enable)
-{
- axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable);
-}
-
-static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = {
- .write = axi_clkgen_v1_mmcm_write,
- .read = axi_clkgen_v1_mmcm_read,
- .enable = axi_clkgen_v1_mmcm_enable,
-};
-
static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
{
unsigned int timeout = 10000;
@@ -286,7 +182,7 @@ static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen)
return val & 0xffff;
}
-static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
+static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int *val)
{
unsigned int reg_val;
@@ -310,7 +206,7 @@ static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen,
return 0;
}
-static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
+static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen,
unsigned int reg, unsigned int val, unsigned int mask)
{
unsigned int reg_val = 0;
@@ -321,7 +217,7 @@ static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
return ret;
if (mask != 0xffff) {
- axi_clkgen_v2_mmcm_read(axi_clkgen, reg, &reg_val);
+ axi_clkgen_mmcm_read(axi_clkgen, reg, &reg_val);
reg_val &= ~mask;
}
@@ -332,7 +228,7 @@ static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen,
return 0;
}
-static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
+static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen,
bool enable)
{
unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE;
@@ -343,12 +239,6 @@ static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen,
axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val);
}
-static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = {
- .write = axi_clkgen_v2_mmcm_write,
- .read = axi_clkgen_v2_mmcm_read,
- .enable = axi_clkgen_v2_mmcm_enable,
-};
-
static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw)
{
return container_of(clk_hw, struct axi_clkgen, clk_hw);
@@ -438,10 +328,7 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
tmp = (unsigned long long)(parent_rate / d) * m;
do_div(tmp, dout);
- if (tmp > ULONG_MAX)
- return ULONG_MAX;
-
- return tmp;
+ return min_t(unsigned long long, tmp, ULONG_MAX);
}
static int axi_clkgen_enable(struct clk_hw *clk_hw)
@@ -460,21 +347,38 @@ static void axi_clkgen_disable(struct clk_hw *clk_hw)
axi_clkgen_mmcm_enable(axi_clkgen, false);
}
+static int axi_clkgen_set_parent(struct clk_hw *clk_hw, u8 index)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+
+ axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, index);
+
+ return 0;
+}
+
+static u8 axi_clkgen_get_parent(struct clk_hw *clk_hw)
+{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ unsigned int parent;
+
+ axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_CLKSEL, &parent);
+
+ return parent;
+}
+
static const struct clk_ops axi_clkgen_ops = {
.recalc_rate = axi_clkgen_recalc_rate,
.round_rate = axi_clkgen_round_rate,
.set_rate = axi_clkgen_set_rate,
.enable = axi_clkgen_enable,
.disable = axi_clkgen_disable,
+ .set_parent = axi_clkgen_set_parent,
+ .get_parent = axi_clkgen_get_parent,
};
static const struct of_device_id axi_clkgen_ids[] = {
{
- .compatible = "adi,axi-clkgen-1.00.a",
- .data = &axi_clkgen_v1_mmcm_ops
- }, {
.compatible = "adi,axi-clkgen-2.00.a",
- .data = &axi_clkgen_v2_mmcm_ops,
},
{ },
};
@@ -485,10 +389,11 @@ static int axi_clkgen_probe(struct platform_device *pdev)
const struct of_device_id *id;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
- const char *parent_name;
+ const char *parent_names[2];
const char *clk_name;
struct resource *mem;
struct clk *clk;
+ unsigned int i;
if (!pdev->dev.of_node)
return -ENODEV;
@@ -501,26 +406,29 @@ static int axi_clkgen_probe(struct platform_device *pdev)
if (!axi_clkgen)
return -ENOMEM;
- axi_clkgen->mmcm_ops = id->data;
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(axi_clkgen->base))
return PTR_ERR(axi_clkgen->base);
- parent_name = of_clk_get_parent_name(pdev->dev.of_node, 0);
- if (!parent_name)
+ init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
+ if (init.num_parents < 1 || init.num_parents > 2)
return -EINVAL;
+ for (i = 0; i < init.num_parents; i++) {
+ parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
+ if (!parent_names[i])
+ return -EINVAL;
+ }
+
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
init.name = clk_name;
init.ops = &axi_clkgen_ops;
- init.flags = CLK_SET_RATE_GATE;
- init.parent_names = &parent_name;
- init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.parent_names = parent_names;
axi_clkgen_mmcm_enable(axi_clkgen, false);
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 4735de0660cc..1f903e1f86a2 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -19,8 +19,6 @@
#include <linux/err.h>
#include <linux/slab.h>
-#define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-
static u8 clk_composite_get_parent(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ded3ff4b91b9..00e035b51c69 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -28,8 +28,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#define div_mask(width) ((1 << (width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
@@ -305,9 +303,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
*/
maxdiv = min(ULONG_MAX / rate, maxdiv);
- for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
- if (!_is_valid_div(table, i, flags))
- continue;
+ for (i = _next_div(table, 0, flags); i <= maxdiv;
+ i = _next_div(table, i, flags)) {
if (rate * i == parent_rate_saved) {
/*
* It's the most ideal case if the requested rate can be
@@ -423,6 +420,12 @@ const struct clk_ops clk_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+const struct clk_ops clk_divider_ro_ops = {
+ .recalc_rate = clk_divider_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
+
static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -446,7 +449,10 @@ static struct clk *_register_divider(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &clk_divider_ops;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_divider_ops;
init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index bac4553f04b8..22e4c659704e 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -36,7 +36,7 @@ static void __init efm32gg_cmu_init(struct device_node *np)
}
clk[clk_HFXO] = clk_register_fixed_rate(NULL, "HFXO", NULL,
- CLK_IS_ROOT, 48000000);
+ 0, 48000000);
clk[clk_HFPERCLKUSART0] = clk_register_gate(NULL, "HFPERCLK.USART0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 0, 0, NULL);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 83de57aeceea..053448e2453d 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -23,8 +23,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
-
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -102,6 +100,19 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
+void clk_unregister_fixed_factor(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ clk_unregister(clk);
+ kfree(to_clk_fixed_factor(hw));
+}
+EXPORT_SYMBOL_GPL(clk_unregister_fixed_factor);
+
#ifdef CONFIG_OF
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index f85ec8d1711f..cd9dc925b3f8 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -26,8 +26,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
-
static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -106,6 +104,19 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate);
+void clk_unregister_fixed_rate(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ clk_unregister(clk);
+ kfree(to_clk_fixed_rate(hw));
+}
+EXPORT_SYMBOL_GPL(clk_unregister_fixed_rate);
+
#ifdef CONFIG_OF
/**
* of_fixed_clk_setup() - Setup function for simple fixed rate clock
@@ -125,8 +136,7 @@ void of_fixed_clk_setup(struct device_node *node)
of_property_read_string(node, "clock-output-names", &clk_name);
clk = clk_register_fixed_rate_with_accuracy(NULL, clk_name, NULL,
- CLK_IS_ROOT, rate,
- accuracy);
+ 0, rate, accuracy);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 5c4955e33f7a..1abcd76b4993 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -16,8 +16,6 @@
#include <linux/slab.h>
#include <linux/rational.h>
-#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
-
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index de0b322f5f58..d0d8ec8e1f1b 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -26,8 +26,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
/*
* It works on following logic:
*
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 7b09a265d79f..08f65acc5d57 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -20,6 +20,8 @@
#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
/**
* DOC: basic gpio gated clock which can be enabled and disabled
@@ -31,8 +33,6 @@
* parent - fixed parent. No clk_set_parent support
*/
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
static int clk_gpio_gate_enable(struct clk_hw *hw)
{
struct clk_gpio *clk = to_clk_gpio(hw);
@@ -201,134 +201,69 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
-#ifdef CONFIG_OF
-/**
- * clk_register_get() has to be delayed, because -EPROBE_DEFER
- * can not be handled properly at of_clk_init() call time.
- */
-
-struct clk_gpio_delayed_register_data {
- const char *gpio_name;
- int num_parents;
- const char **parent_names;
- struct device_node *node;
- struct mutex lock;
- struct clk *clk;
- struct clk *(*clk_register_get)(const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned gpio, bool active_low);
-};
-
-static struct clk *of_clk_gpio_delayed_register_get(
- struct of_phandle_args *clkspec, void *_data)
+static int gpio_clk_driver_probe(struct platform_device *pdev)
{
- struct clk_gpio_delayed_register_data *data = _data;
- struct clk *clk;
+ struct device_node *node = pdev->dev.of_node;
+ const char **parent_names, *gpio_name;
+ unsigned int num_parents;
int gpio;
enum of_gpio_flags of_flags;
+ struct clk *clk;
+ bool active_low, is_mux;
- mutex_lock(&data->lock);
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents) {
+ parent_names = devm_kcalloc(&pdev->dev, num_parents,
+ sizeof(char *), GFP_KERNEL);
+ if (!parent_names)
+ return -ENOMEM;
- if (data->clk) {
- mutex_unlock(&data->lock);
- return data->clk;
+ of_clk_parent_fill(node, parent_names, num_parents);
+ } else {
+ parent_names = NULL;
}
- gpio = of_get_named_gpio_flags(data->node, data->gpio_name, 0,
- &of_flags);
+ is_mux = of_device_is_compatible(node, "gpio-mux-clock");
+
+ gpio_name = is_mux ? "select-gpios" : "enable-gpios";
+ gpio = of_get_named_gpio_flags(node, gpio_name, 0, &of_flags);
if (gpio < 0) {
- mutex_unlock(&data->lock);
if (gpio == -EPROBE_DEFER)
pr_debug("%s: %s: GPIOs not yet available, retry later\n",
- data->node->name, __func__);
+ node->name, __func__);
else
pr_err("%s: %s: Can't get '%s' DT property\n",
- data->node->name, __func__,
- data->gpio_name);
- return ERR_PTR(gpio);
+ node->name, __func__,
+ gpio_name);
+ return gpio;
}
- clk = data->clk_register_get(data->node->name, data->parent_names,
- data->num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
- if (IS_ERR(clk))
- goto out;
-
- data->clk = clk;
-out:
- mutex_unlock(&data->lock);
-
- return clk;
-}
-
-static struct clk *of_clk_gpio_gate_delayed_register_get(const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned gpio, bool active_low)
-{
- return clk_register_gpio_gate(NULL, name, parent_names ?
- parent_names[0] : NULL, gpio, active_low, 0);
-}
-
-static struct clk *of_clk_gpio_mux_delayed_register_get(const char *name,
- const char * const *parent_names, u8 num_parents, unsigned gpio,
- bool active_low)
-{
- return clk_register_gpio_mux(NULL, name, parent_names, num_parents,
- gpio, active_low, 0);
-}
-
-static void __init of_gpio_clk_setup(struct device_node *node,
- const char *gpio_name,
- struct clk *(*clk_register_get)(const char *name,
- const char * const *parent_names,
- u8 num_parents,
- unsigned gpio, bool active_low))
-{
- struct clk_gpio_delayed_register_data *data;
- const char **parent_names;
- int i, num_parents;
-
- num_parents = of_clk_get_parent_count(node);
- if (num_parents < 0)
- num_parents = 0;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return;
-
- if (num_parents) {
- parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
- if (!parent_names) {
- kfree(data);
- return;
- }
-
- for (i = 0; i < num_parents; i++)
- parent_names[i] = of_clk_get_parent_name(node, i);
- } else {
- parent_names = NULL;
- }
+ active_low = of_flags & OF_GPIO_ACTIVE_LOW;
- data->num_parents = num_parents;
- data->parent_names = parent_names;
- data->node = node;
- data->gpio_name = gpio_name;
- data->clk_register_get = clk_register_get;
- mutex_init(&data->lock);
+ if (is_mux)
+ clk = clk_register_gpio_mux(&pdev->dev, node->name,
+ parent_names, num_parents, gpio, active_low, 0);
+ else
+ clk = clk_register_gpio_gate(&pdev->dev, node->name,
+ parent_names ? parent_names[0] : NULL, gpio,
+ active_low, 0);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- of_clk_add_provider(node, of_clk_gpio_delayed_register_get, data);
+ return of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
-static void __init of_gpio_gate_clk_setup(struct device_node *node)
-{
- of_gpio_clk_setup(node, "enable-gpios",
- of_clk_gpio_gate_delayed_register_get);
-}
-CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
+static const struct of_device_id gpio_clk_match_table[] = {
+ { .compatible = "gpio-mux-clock" },
+ { .compatible = "gpio-gate-clock" },
+ { }
+};
-void __init of_gpio_mux_clk_setup(struct device_node *node)
-{
- of_gpio_clk_setup(node, "select-gpios",
- of_clk_gpio_mux_delayed_register_get);
-}
-CLK_OF_DECLARE(gpio_mux_clk, "gpio-mux-clock", of_gpio_mux_clk_setup);
-#endif
+static struct platform_driver gpio_clk_driver = {
+ .probe = gpio_clk_driver_probe,
+ .driver = {
+ .name = "gpio-clk",
+ .of_match_table = gpio_clk_match_table,
+ },
+};
+builtin_platform_driver(gpio_clk_driver);
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 446c2fe76dc2..9b6f2772e948 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -38,17 +38,14 @@ static struct clk_init_data max77686_clks_init[MAX77686_CLKS_NUM] = {
[MAX77686_CLK_AP] = {
.name = "32khz_ap",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77686_CLK_CP] = {
.name = "32khz_cp",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77686_CLK_PMIC] = {
.name = "32khz_pmic",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
diff --git a/drivers/clk/clk-max77802.c b/drivers/clk/clk-max77802.c
index 4a89f7979ba0..355dd2e522c3 100644
--- a/drivers/clk/clk-max77802.c
+++ b/drivers/clk/clk-max77802.c
@@ -39,12 +39,10 @@ static struct clk_init_data max77802_clks_init[MAX77802_CLKS_NUM] = {
[MAX77802_CLK_32K_AP] = {
.name = "32khz_ap",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
[MAX77802_CLK_32K_CP] = {
.name = "32khz_cp",
.ops = &max_gen_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
diff --git a/drivers/clk/clk-mb86s7x.c b/drivers/clk/clk-mb86s7x.c
index f39c25a22f43..e0817754ca3e 100644
--- a/drivers/clk/clk-mb86s7x.c
+++ b/drivers/clk/clk-mb86s7x.c
@@ -217,7 +217,7 @@ static struct clk *crg11_get(struct of_phandle_args *clkspec, void *data)
init.name = clkp;
init.num_parents = 0;
init.ops = &crg_port_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
crgclk->hw.init = &init;
crgclk->cntrlr = cntrlr;
crgclk->domain = domain;
@@ -341,7 +341,7 @@ struct clk *mb86s7x_clclk_register(struct device *cpu_dev)
init.name = dev_name(cpu_dev);
init.ops = &clk_clc_ops;
- init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.num_parents = 0;
return devm_clk_register(cpu_dev, &clc->hw);
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index fe7806506bf3..9e449c7b751c 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -14,8 +14,6 @@
#include <linux/of.h>
#include <linux/slab.h>
-#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-
static unsigned long __get_mult(struct clk_multiplier *mult,
unsigned long rate,
unsigned long parent_rate)
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 5ed03c8a8df9..252188fd8bcd 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -26,8 +26,6 @@
* parent - parent is adjustable through clk_set_parent
*/
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static u8 clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 8e3039f0c3f9..9c0b8e6b1ab3 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -44,7 +44,7 @@ struct palmas_clock_info {
struct clk *clk;
struct clk_hw hw;
struct palmas *palmas;
- struct palmas_clk32k_desc *clk_desc;
+ const struct palmas_clk32k_desc *clk_desc;
int ext_control_pin;
};
@@ -125,10 +125,10 @@ static struct clk_ops palmas_clks_ops = {
struct palmas_clks_of_match_data {
struct clk_init_data init;
- struct palmas_clk32k_desc desc;
+ const struct palmas_clk32k_desc desc;
};
-static struct palmas_clks_of_match_data palmas_of_clk32kg = {
+static const struct palmas_clks_of_match_data palmas_of_clk32kg = {
.init = {
.name = "clk32kg",
.ops = &palmas_clks_ops,
@@ -144,7 +144,7 @@ static struct palmas_clks_of_match_data palmas_of_clk32kg = {
},
};
-static struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
+static const struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
.init = {
.name = "clk32kgaudio",
.ops = &palmas_clks_ops,
@@ -240,14 +240,14 @@ static int palmas_clks_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = pdev->dev.of_node;
- struct palmas_clks_of_match_data *match_data;
- const struct of_device_id *match;
+ const struct palmas_clks_of_match_data *match_data;
struct palmas_clock_info *cinfo;
struct clk *clk;
int ret;
- match = of_match_device(palmas_clks_of_match, &pdev->dev);
- match_data = (struct palmas_clks_of_match_data *)match->data;
+ match_data = of_device_get_match_data(&pdev->dev);
+ if (!match_data)
+ return 1;
cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
if (!cinfo)
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 328fcfcefd8c..883045814dac 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -95,7 +95,7 @@ static int clk_pwm_probe(struct platform_device *pdev)
init.name = clk_name;
init.ops = &clk_pwm_ops;
- init.flags = CLK_IS_BASIC | CLK_IS_ROOT;
+ init.flags = CLK_IS_BASIC;
init.num_parents = 0;
clk_pwm->pwm = pwm;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d266299dfdb1..f8c83977c7fa 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -28,11 +28,6 @@
#include <linux/mfd/samsung/s5m8767.h>
#include <linux/mfd/samsung/core.h>
-#define s2mps11_name(a) (a->hw.init->name)
-
-static struct clk **clk_table;
-static struct clk_onecell_data clk_data;
-
enum {
S2MPS11_CLK_AP = 0,
S2MPS11_CLK_CP,
@@ -99,52 +94,19 @@ static struct clk_ops s2mps11_clk_ops = {
.recalc_rate = s2mps11_clk_recalc_rate,
};
+/* This s2mps11_clks_init tructure is common to s2mps11, s2mps13 and s2mps14 */
static struct clk_init_data s2mps11_clks_init[S2MPS11_CLKS_NUM] = {
[S2MPS11_CLK_AP] = {
.name = "s2mps11_ap",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
[S2MPS11_CLK_CP] = {
.name = "s2mps11_cp",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
[S2MPS11_CLK_BT] = {
.name = "s2mps11_bt",
.ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
-};
-
-static struct clk_init_data s2mps13_clks_init[S2MPS11_CLKS_NUM] = {
- [S2MPS11_CLK_AP] = {
- .name = "s2mps13_ap",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_CP] = {
- .name = "s2mps13_cp",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_BT] = {
- .name = "s2mps13_bt",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
-};
-
-static struct clk_init_data s2mps14_clks_init[S2MPS11_CLKS_NUM] = {
- [S2MPS11_CLK_AP] = {
- .name = "s2mps14_ap",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
- },
- [S2MPS11_CLK_BT] = {
- .name = "s2mps14_bt",
- .ops = &s2mps11_clk_ops,
- .flags = CLK_IS_ROOT,
},
};
@@ -164,12 +126,9 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
}
- for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
- if (!clks_init[i].name)
- continue; /* Skip clocks not present in some devices */
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++)
of_property_read_string_index(clk_np, "clock-output-names", i,
&clks_init[i].name);
- }
return clk_np;
}
@@ -177,39 +136,38 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev,
static int s2mps11_clk_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct s2mps11_clk *s2mps11_clks, *s2mps11_clk;
+ struct s2mps11_clk *s2mps11_clks;
+ struct clk_onecell_data *clk_data;
unsigned int s2mps11_reg;
- struct clk_init_data *clks_init;
int i, ret = 0;
+ enum sec_device_type hwid = platform_get_device_id(pdev)->driver_data;
s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
- sizeof(*s2mps11_clk), GFP_KERNEL);
+ sizeof(*s2mps11_clks), GFP_KERNEL);
if (!s2mps11_clks)
return -ENOMEM;
- s2mps11_clk = s2mps11_clks;
+ clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
- clk_table = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+ clk_data->clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
sizeof(struct clk *), GFP_KERNEL);
- if (!clk_table)
+ if (!clk_data->clks)
return -ENOMEM;
- switch(platform_get_device_id(pdev)->driver_data) {
+ switch (hwid) {
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
- clks_init = s2mps11_clks_init;
break;
case S2MPS13X:
s2mps11_reg = S2MPS13_REG_RTCCTRL;
- clks_init = s2mps13_clks_init;
break;
case S2MPS14X:
s2mps11_reg = S2MPS14_REG_RTCCTRL;
- clks_init = s2mps14_clks_init;
break;
case S5M8767X:
s2mps11_reg = S5M8767_REG_CTRL1;
- clks_init = s2mps11_clks_init;
break;
default:
dev_err(&pdev->dev, "Invalid device type\n");
@@ -217,46 +175,39 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
}
/* Store clocks of_node in first element of s2mps11_clks array */
- s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
+ s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, s2mps11_clks_init);
if (IS_ERR(s2mps11_clks->clk_np))
return PTR_ERR(s2mps11_clks->clk_np);
- for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) {
- if (!clks_init[i].name)
+ for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
+ if (i == S2MPS11_CLK_CP && hwid == S2MPS14X)
continue; /* Skip clocks not present in some devices */
- s2mps11_clk->iodev = iodev;
- s2mps11_clk->hw.init = &clks_init[i];
- s2mps11_clk->mask = 1 << i;
- s2mps11_clk->reg = s2mps11_reg;
-
- s2mps11_clk->clk = devm_clk_register(&pdev->dev,
- &s2mps11_clk->hw);
- if (IS_ERR(s2mps11_clk->clk)) {
+ s2mps11_clks[i].iodev = iodev;
+ s2mps11_clks[i].hw.init = &s2mps11_clks_init[i];
+ s2mps11_clks[i].mask = 1 << i;
+ s2mps11_clks[i].reg = s2mps11_reg;
+
+ s2mps11_clks[i].clk = devm_clk_register(&pdev->dev,
+ &s2mps11_clks[i].hw);
+ if (IS_ERR(s2mps11_clks[i].clk)) {
dev_err(&pdev->dev, "Fail to register : %s\n",
- s2mps11_name(s2mps11_clk));
- ret = PTR_ERR(s2mps11_clk->clk);
+ s2mps11_clks_init[i].name);
+ ret = PTR_ERR(s2mps11_clks[i].clk);
goto err_reg;
}
- s2mps11_clk->lookup = clkdev_create(s2mps11_clk->clk,
- s2mps11_name(s2mps11_clk), NULL);
- if (!s2mps11_clk->lookup) {
+ s2mps11_clks[i].lookup = clkdev_create(s2mps11_clks[i].clk,
+ s2mps11_clks_init[i].name, NULL);
+ if (!s2mps11_clks[i].lookup) {
ret = -ENOMEM;
goto err_reg;
}
+ clk_data->clks[i] = s2mps11_clks[i].clk;
}
- for (i = 0; i < S2MPS11_CLKS_NUM; i++) {
- /* Skip clocks not present on S2MPS14 */
- if (!clks_init[i].name)
- continue;
- clk_table[i] = s2mps11_clks[i].clk;
- }
-
- clk_data.clks = clk_table;
- clk_data.clk_num = S2MPS11_CLKS_NUM;
+ clk_data->clk_num = S2MPS11_CLKS_NUM;
of_clk_add_provider(s2mps11_clks->clk_np, of_clk_src_onecell_get,
- &clk_data);
+ clk_data);
platform_set_drvdata(pdev, s2mps11_clks);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 89e9ca78bb94..6962ee5d1e9a 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -155,7 +155,7 @@ scpi_clk_ops_init(struct device *dev, const struct of_device_id *match,
unsigned long min = 0, max = 0;
init.name = name;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
init.ops = match->data;
sclk->hw.init = &init;
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 6af7dce54241..ceef25b0990b 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -313,7 +313,7 @@ static int si514_probe(struct i2c_client *client,
return -ENOMEM;
init.ops = &si514_clk_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 850316ac8831..b1bc12c045d3 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -1495,7 +1495,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (drvdata->variant == SI5351_VARIANT_B) {
init.name = si5351_pll_names[2];
init.ops = &si5351_vxco_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
} else {
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index cf478aa9fa5d..d56648521a95 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -418,7 +418,7 @@ static int si570_probe(struct i2c_client *client,
return -ENOMEM;
init.ops = &si570_clk_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
data->hw.init = &init;
data->i2c_client = client;
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 37e928846ec5..b0f76a84f1e9 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -355,7 +355,7 @@ CLK_OF_DECLARE(vt8500_device, "via,vt8500-device-clock", vtwm_device_clk_init);
#define WM8850_BITS_TO_VAL(m, d1, d2) \
((((m / 2) - 1) << 16) | ((d1 - 1) << 8) | d2)
-static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *prediv)
{
unsigned long tclk;
@@ -365,7 +365,7 @@ static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
pr_err("%s: requested rate out of range\n", __func__);
*multiplier = 0;
*prediv = 1;
- return;
+ return -EINVAL;
}
if (rate <= parent_rate * 31)
/* use the prediv to double the resolution */
@@ -379,12 +379,15 @@ static void vt8500_find_pll_bits(unsigned long rate, unsigned long parent_rate,
if (tclk != rate)
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__,
rate, tclk);
+
+ return 0;
}
-static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul, div1;
+ int div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -403,7 +406,7 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -414,12 +417,19 @@ static void wm8650_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
@@ -449,10 +459,11 @@ static u32 wm8750_get_filter(u32 parent_rate, u32 divisor1)
return 0;
}
-static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *filter, u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul;
+ int div1, div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -472,7 +483,7 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -483,6 +494,11 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
@@ -491,12 +507,15 @@ static void wm8750_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
-static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
+static int wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
u32 *multiplier, u32 *divisor1, u32 *divisor2)
{
- u32 mul, div1, div2;
+ u32 mul;
+ int div1, div2;
u32 best_mul, best_div1, best_div2;
unsigned long tclk, rate_err, best_err;
@@ -516,7 +535,7 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = mul;
*divisor1 = div1;
*divisor2 = div2;
- return;
+ return 0;
}
if (rate_err < best_err) {
@@ -527,6 +546,11 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
}
}
+ if (best_err == (unsigned long)-1) {
+ pr_warn("%s: impossible rate %lu\n", __func__, rate);
+ return -EINVAL;
+ }
+
/* if we got here, it wasn't an exact match */
pr_warn("%s: requested rate %lu, found rate %lu\n", __func__, rate,
rate - best_err);
@@ -534,6 +558,8 @@ static void wm8850_find_pll_bits(unsigned long rate, unsigned long parent_rate,
*multiplier = best_mul;
*divisor1 = best_div1;
*divisor2 = best_div2;
+
+ return 0;
}
static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -543,31 +569,39 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 filter, mul, div1, div2;
u32 pll_val;
unsigned long flags = 0;
+ int ret;
/* sanity check */
switch (pll->type) {
case PLL_TYPE_VT8500:
- vt8500_find_pll_bits(rate, parent_rate, &mul, &div1);
- pll_val = VT8500_BITS_TO_VAL(mul, div1);
+ ret = vt8500_find_pll_bits(rate, parent_rate, &mul, &div1);
+ if (!ret)
+ pll_val = VT8500_BITS_TO_VAL(mul, div1);
break;
case PLL_TYPE_WM8650:
- wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
- pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
+ ret = wm8650_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8650_BITS_TO_VAL(mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
- pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
+ ret = wm8750_find_pll_bits(rate, parent_rate, &filter, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8750_BITS_TO_VAL(filter, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- wm8850_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
- pll_val = WM8850_BITS_TO_VAL(mul, div1, div2);
+ ret = wm8850_find_pll_bits(rate, parent_rate, &mul, &div1, &div2);
+ if (!ret)
+ pll_val = WM8850_BITS_TO_VAL(mul, div1, div2);
break;
default:
pr_err("%s: invalid pll type\n", __func__);
- return 0;
+ ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
spin_lock_irqsave(pll->lock, flags);
vt8500_pmc_wait_busy();
@@ -585,28 +619,36 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
long round_rate;
+ int ret;
switch (pll->type) {
case PLL_TYPE_VT8500:
- vt8500_find_pll_bits(rate, *prate, &mul, &div1);
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ if (!ret)
+ round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
break;
case PLL_TYPE_WM8650:
- wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ if (!ret)
+ round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
break;
default:
- round_rate = 0;
+ ret = -EINVAL;
}
+ if (ret)
+ return ret;
+
return round_rate;
}
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 10224b01b97c..d73450b60b28 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -29,7 +29,9 @@
#include <linux/of_address.h>
/* Register SCU_PCPPLL bit fields */
-#define N_DIV_RD(src) (((src) & 0x000001ff))
+#define N_DIV_RD(src) ((src) & 0x000001ff)
+#define SC_N_DIV_RD(src) ((src) & 0x0000007f)
+#define SC_OUTDIV2(src) (((src) & 0x00000100) >> 8)
/* Register SCU_SOCPLL bit fields */
#define CLKR_RD(src) (((src) & 0x07000000)>>24)
@@ -48,7 +50,7 @@ static inline u32 xgene_clk_read(void __iomem *csr)
static inline void xgene_clk_write(u32 data, void __iomem *csr)
{
- return writel_relaxed(data, csr);
+ writel_relaxed(data, csr);
}
/* PLL Clock */
@@ -63,6 +65,7 @@ struct xgene_clk_pll {
spinlock_t *lock;
u32 pll_offset;
enum xgene_pll_type type;
+ int version;
};
#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
@@ -92,27 +95,37 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
- if (pllclk->type == PLL_TYPE_PCP) {
- /*
- * PLL VCO = Reference clock * NF
- * PCP PLL = PLL_VCO / 2
- */
- nout = 2;
- fvco = parent_rate * (N_DIV_RD(pll) + 4);
+ if (pllclk->version <= 1) {
+ if (pllclk->type == PLL_TYPE_PCP) {
+ /*
+ * PLL VCO = Reference clock * NF
+ * PCP PLL = PLL_VCO / 2
+ */
+ nout = 2;
+ fvco = parent_rate * (N_DIV_RD(pll) + 4);
+ } else {
+ /*
+ * Fref = Reference Clock / NREF;
+ * Fvco = Fref * NFB;
+ * Fout = Fvco / NOUT;
+ */
+ nref = CLKR_RD(pll) + 1;
+ nout = CLKOD_RD(pll) + 1;
+ nfb = CLKF_RD(pll);
+ fref = parent_rate / nref;
+ fvco = fref * nfb;
+ }
} else {
/*
- * Fref = Reference Clock / NREF;
- * Fvco = Fref * NFB;
- * Fout = Fvco / NOUT;
+ * fvco = Reference clock * FBDIVC
+ * PLL freq = fvco / NOUT
*/
- nref = CLKR_RD(pll) + 1;
- nout = CLKOD_RD(pll) + 1;
- nfb = CLKF_RD(pll);
- fref = parent_rate / nref;
- fvco = fref * nfb;
+ nout = SC_OUTDIV2(pll) ? 2 : 3;
+ fvco = parent_rate * SC_N_DIV_RD(pll);
}
- pr_debug("%s pll recalc rate %ld parent %ld\n", clk_hw_get_name(hw),
- fvco / nout, parent_rate);
+ pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
+ clk_hw_get_name(hw), fvco / nout, parent_rate,
+ pllclk->version);
return fvco / nout;
}
@@ -125,7 +138,7 @@ static const struct clk_ops xgene_clk_pll_ops = {
static struct clk *xgene_register_clk_pll(struct device *dev,
const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u32 pll_offset,
- u32 type, spinlock_t *lock)
+ u32 type, spinlock_t *lock, int version)
{
struct xgene_clk_pll *apmclk;
struct clk *clk;
@@ -144,6 +157,7 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
+ apmclk->version = version;
apmclk->reg = reg;
apmclk->lock = lock;
apmclk->pll_offset = pll_offset;
@@ -160,26 +174,37 @@ static struct clk *xgene_register_clk_pll(struct device *dev,
return clk;
}
+static int xgene_pllclk_version(struct device_node *np)
+{
+ if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
+ return 1;
+ if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
+ return 1;
+ return 2;
+}
+
static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
{
- const char *clk_name = np->full_name;
- struct clk *clk;
- void __iomem *reg;
+ const char *clk_name = np->full_name;
+ struct clk *clk;
+ void __iomem *reg;
+ int version = xgene_pllclk_version(np);
- reg = of_iomap(np, 0);
- if (reg == NULL) {
- pr_err("Unable to map CSR register for %s\n", np->full_name);
- return;
- }
- of_property_read_string(np, "clock-output-names", &clk_name);
- clk = xgene_register_clk_pll(NULL,
- clk_name, of_clk_get_parent_name(np, 0),
- CLK_IS_ROOT, reg, 0, pll_type, &clk_lock);
- if (!IS_ERR(clk)) {
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- pr_debug("Add %s clock PLL\n", clk_name);
- }
+ reg = of_iomap(np, 0);
+ if (reg == NULL) {
+ pr_err("Unable to map CSR register for %s\n", np->full_name);
+ return;
+ }
+ of_property_read_string(np, "clock-output-names", &clk_name);
+ clk = xgene_register_clk_pll(NULL,
+ clk_name, of_clk_get_parent_name(np, 0),
+ CLK_IS_ROOT, reg, 0, pll_type, &clk_lock,
+ version);
+ if (!IS_ERR(clk)) {
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ clk_register_clkdev(clk, clk_name, NULL);
+ pr_debug("Add %s clock PLL\n", clk_name);
+ }
}
static void xgene_socpllclk_init(struct device_node *np)
@@ -351,8 +376,8 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* Set new divider */
data = xgene_clk_read(pclk->param.divider_reg +
pclk->param.reg_divider_offset);
- data &= ~((1 << pclk->param.reg_divider_width) - 1)
- << pclk->param.reg_divider_shift;
+ data &= ~(((1 << pclk->param.reg_divider_width) - 1)
+ << pclk->param.reg_divider_shift);
data |= divider;
xgene_clk_write(data, pclk->param.divider_reg +
pclk->param.reg_divider_offset);
@@ -460,7 +485,7 @@ static void __init xgene_devclk_init(struct device_node *np)
rc = of_address_to_resource(np, i, &res);
if (rc != 0) {
if (i == 0) {
- pr_err("no DTS register for %s\n",
+ pr_err("no DTS register for %s\n",
np->full_name);
return;
}
@@ -518,4 +543,8 @@ err:
CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
+CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
+ xgene_socpllclk_init);
+CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
+ xgene_pcppllclk_init);
CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b4db67a446c8..fb74dc1f7520 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -350,13 +350,12 @@ static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
{
if (!core || index >= core->num_parents)
return NULL;
- else if (!core->parents)
- return clk_core_lookup(core->parent_names[index]);
- else if (!core->parents[index])
- return core->parents[index] =
- clk_core_lookup(core->parent_names[index]);
- else
- return core->parents[index];
+
+ if (!core->parents[index])
+ core->parents[index] =
+ clk_core_lookup(core->parent_names[index]);
+
+ return core->parents[index];
}
struct clk_hw *
@@ -386,7 +385,7 @@ static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
ret = core->rate;
- if (core->flags & CLK_IS_ROOT)
+ if (!core->num_parents)
goto out;
if (!core->parent)
@@ -1067,30 +1066,12 @@ static int clk_fetch_parent_index(struct clk_core *core,
{
int i;
- if (!core->parents) {
- core->parents = kcalloc(core->num_parents,
- sizeof(struct clk *), GFP_KERNEL);
- if (!core->parents)
- return -ENOMEM;
- }
-
- /*
- * find index of new parent clock using cached parent ptrs,
- * or if not yet cached, use string name comparison and cache
- * them now to avoid future calls to clk_core_lookup.
- */
- for (i = 0; i < core->num_parents; i++) {
- if (core->parents[i] == parent)
- return i;
-
- if (core->parents[i])
- continue;
+ if (!parent)
+ return -EINVAL;
- if (!strcmp(core->parent_names[i], parent->name)) {
- core->parents[i] = clk_core_lookup(parent->name);
+ for (i = 0; i < core->num_parents; i++)
+ if (clk_core_get_parent_by_index(core, i) == parent)
return i;
- }
- }
return -EINVAL;
}
@@ -1677,56 +1658,14 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_parent);
-/*
- * .get_parent is mandatory for clocks with multiple possible parents. It is
- * optional for single-parent clocks. Always call .get_parent if it is
- * available and WARN if it is missing for multi-parent clocks.
- *
- * For single-parent clocks without .get_parent, first check to see if the
- * .parents array exists, and if so use it to avoid an expensive tree
- * traversal. If .parents does not exist then walk the tree.
- */
static struct clk_core *__clk_init_parent(struct clk_core *core)
{
- struct clk_core *ret = NULL;
- u8 index;
+ u8 index = 0;
- /* handle the trivial cases */
+ if (core->num_parents > 1 && core->ops->get_parent)
+ index = core->ops->get_parent(core->hw);
- if (!core->num_parents)
- goto out;
-
- if (core->num_parents == 1) {
- if (IS_ERR_OR_NULL(core->parent))
- core->parent = clk_core_lookup(core->parent_names[0]);
- ret = core->parent;
- goto out;
- }
-
- if (!core->ops->get_parent) {
- WARN(!core->ops->get_parent,
- "%s: multi-parent clocks must implement .get_parent\n",
- __func__);
- goto out;
- }
-
- /*
- * Do our best to cache parent clocks in core->parents. This prevents
- * unnecessary and expensive lookups. We don't set core->parent here;
- * that is done by the calling function.
- */
-
- index = core->ops->get_parent(core->hw);
-
- if (!core->parents)
- core->parents =
- kcalloc(core->num_parents, sizeof(struct clk *),
- GFP_KERNEL);
-
- ret = clk_core_get_parent_by_index(core, index);
-
-out:
- return ret;
+ return clk_core_get_parent_by_index(core, index);
}
static void clk_core_reparent(struct clk_core *core,
@@ -1809,13 +1748,13 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
/* try finding the new parent index */
if (parent) {
p_index = clk_fetch_parent_index(core, parent);
- p_rate = parent->rate;
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
ret = p_index;
goto out;
}
+ p_rate = parent->rate;
}
/* propagate PRE_RATE_CHANGE notifications */
@@ -1902,6 +1841,10 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
+ /* bail early if nothing to do */
+ if (degrees == clk->core->phase)
+ goto out;
+
trace_clk_set_phase(clk->core, degrees);
if (clk->core->ops->set_phase)
@@ -1912,6 +1855,7 @@ int clk_set_phase(struct clk *clk, int degrees)
if (!ret)
clk->core->phase = degrees;
+out:
clk_prepare_unlock();
return ret;
@@ -2218,7 +2162,7 @@ unlock:
*
* Dynamically removes a clk and all its child nodes from the
* debugfs clk directory if clk->dentry points to debugfs created by
- * clk_debug_register in __clk_init.
+ * clk_debug_register in __clk_core_init.
*/
static void clk_debug_unregister(struct clk_core *core)
{
@@ -2303,26 +2247,22 @@ static inline void clk_debug_unregister(struct clk_core *core)
#endif
/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev: device initializing this clk, placeholder for now
- * @clk: clk being initialized
+ * __clk_core_init - initialize the data structures in a struct clk_core
+ * @core: clk_core being initialized
*
* Initializes the lists in struct clk_core, queries the hardware for the
* parent and rate and sets them both.
*/
-static int __clk_init(struct device *dev, struct clk *clk_user)
+static int __clk_core_init(struct clk_core *core)
{
int i, ret = 0;
struct clk_core *orphan;
struct hlist_node *tmp2;
- struct clk_core *core;
unsigned long rate;
- if (!clk_user)
+ if (!core)
return -EINVAL;
- core = clk_user->core;
-
clk_prepare_lock();
/* check to see if a clock with this name is already registered */
@@ -2337,22 +2277,29 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
if (core->ops->set_rate &&
!((core->ops->round_rate || core->ops->determine_rate) &&
core->ops->recalc_rate)) {
- pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
- __func__, core->name);
+ pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
+ __func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_parent && !core->ops->get_parent) {
- pr_warning("%s: %s must implement .get_parent & .set_parent\n",
- __func__, core->name);
+ pr_err("%s: %s must implement .get_parent & .set_parent\n",
+ __func__, core->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (core->num_parents > 1 && !core->ops->get_parent) {
+ pr_err("%s: %s must implement .get_parent as it has multi parents\n",
+ __func__, core->name);
ret = -EINVAL;
goto out;
}
if (core->ops->set_rate_and_parent &&
!(core->ops->set_parent && core->ops->set_rate)) {
- pr_warn("%s: %s must implement .set_parent & .set_rate\n",
+ pr_err("%s: %s must implement .set_parent & .set_rate\n",
__func__, core->name);
ret = -EINVAL;
goto out;
@@ -2364,37 +2311,12 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
"%s: invalid NULL in %s's .parent_names\n",
__func__, core->name);
- /*
- * Allocate an array of struct clk *'s to avoid unnecessary string
- * look-ups of clk's possible parents. This can fail for clocks passed
- * in to clk_init during early boot; thus any access to core->parents[]
- * must always check for a NULL pointer and try to populate it if
- * necessary.
- *
- * If core->parents is not NULL we skip this entire block. This allows
- * for clock drivers to statically initialize core->parents.
- */
- if (core->num_parents > 1 && !core->parents) {
- core->parents = kcalloc(core->num_parents, sizeof(struct clk *),
- GFP_KERNEL);
- /*
- * clk_core_lookup returns NULL for parents that have not been
- * clk_init'd; thus any access to clk->parents[] must check
- * for a NULL pointer. We can always perform lazy lookups for
- * missing parents later on.
- */
- if (core->parents)
- for (i = 0; i < core->num_parents; i++)
- core->parents[i] =
- clk_core_lookup(core->parent_names[i]);
- }
-
core->parent = __clk_init_parent(core);
/*
- * Populate core->parent if parent has already been __clk_init'd. If
- * parent has not yet been __clk_init'd then place clk in the orphan
- * list. If clk has set the CLK_IS_ROOT flag then place it in the root
+ * Populate core->parent if parent has already been clk_core_init'd. If
+ * parent has not yet been clk_core_init'd then place clk in the orphan
+ * list. If clk doesn't have any parents then place it in the root
* clk list.
*
* Every time a new clk is clk_init'd then we walk the list of orphan
@@ -2405,7 +2327,7 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
hlist_add_head(&core->child_node,
&core->parent->children);
core->orphan = core->parent->orphan;
- } else if (core->flags & CLK_IS_ROOT) {
+ } else if (!core->num_parents) {
hlist_add_head(&core->child_node, &clk_root_list);
core->orphan = false;
} else {
@@ -2454,24 +2376,15 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
core->rate = core->req_rate = rate;
/*
- * walk the list of orphan clocks and reparent any that are children of
- * this clock
+ * walk the list of orphan clocks and reparent any that newly finds a
+ * parent.
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
- if (orphan->num_parents && orphan->ops->get_parent) {
- i = orphan->ops->get_parent(orphan->hw);
- if (i >= 0 && i < orphan->num_parents &&
- !strcmp(core->name, orphan->parent_names[i]))
- clk_core_reparent(orphan, core);
- continue;
- }
+ struct clk_core *parent = __clk_init_parent(orphan);
- for (i = 0; i < orphan->num_parents; i++)
- if (!strcmp(core->name, orphan->parent_names[i])) {
- clk_core_reparent(orphan, core);
- break;
- }
- }
+ if (parent)
+ clk_core_reparent(orphan, parent);
+ }
/*
* optional platform-specific magic
@@ -2585,21 +2498,31 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
}
}
+ /* avoid unnecessary string look-ups of clk_core's possible parents. */
+ core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
+ GFP_KERNEL);
+ if (!core->parents) {
+ ret = -ENOMEM;
+ goto fail_parents;
+ };
+
INIT_HLIST_HEAD(&core->clks);
hw->clk = __clk_create_clk(hw, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
- goto fail_parent_names_copy;
+ goto fail_parents;
}
- ret = __clk_init(dev, hw->clk);
+ ret = __clk_core_init(core);
if (!ret)
return hw->clk;
__clk_free_clk(hw->clk);
hw->clk = NULL;
+fail_parents:
+ kfree(core->parents);
fail_parent_names_copy:
while (--i >= 0)
kfree_const(core->parent_names[i]);
@@ -2683,7 +2606,7 @@ void clk_unregister(struct clk *clk)
if (clk->core->ops == &clk_nodrv_ops) {
pr_err("%s: unregistered clock: %s\n", __func__,
clk->core->name);
- return;
+ goto unlock;
}
/*
* Assign empty clock ops for consumers that might still hold
@@ -2709,7 +2632,7 @@ void clk_unregister(struct clk *clk)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
-
+unlock:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unregister);
@@ -3061,10 +2984,23 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
return __of_clk_get_from_provider(clkspec, NULL, __func__);
}
+EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
-int of_clk_get_parent_count(struct device_node *np)
+/**
+ * of_clk_get_parent_count() - Count the number of clocks a device node has
+ * @np: device node to count
+ *
+ * Returns: The number of clocks that are possible parents of this node
+ */
+unsigned int of_clk_get_parent_count(struct device_node *np)
{
- return of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ int count;
+
+ count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (count < 0)
+ return 0;
+
+ return count;
}
EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
@@ -3214,6 +3150,9 @@ void __init of_clk_init(const struct of_device_id *matches)
for_each_matching_node_and_match(np, matches, &match) {
struct clock_provider *parent;
+ if (!of_device_is_available(np))
+ continue;
+
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (!parent) {
list_for_each_entry_safe(clk_provider, next,
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 779b6ff0c7ad..eb20b941154b 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -353,11 +353,25 @@ void clkdev_drop(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_drop);
+static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw,
+ const char *con_id,
+ const char *dev_id, ...)
+{
+ struct clk_lookup *cl;
+ va_list ap;
+
+ va_start(ap, dev_id);
+ cl = vclkdev_create(hw, con_id, dev_id, ap);
+ va_end(ap);
+
+ return cl;
+}
+
/**
* clk_register_clkdev - register one clock lookup for a struct clk
* @clk: struct clk to associate with all clk_lookups
* @con_id: connection ID string on device
- * @dev_id: format string describing device name
+ * @dev_id: string describing device name
*
* con_id or dev_id may be NULL as a wildcard, just as in the rest of
* clkdev.
@@ -368,17 +382,22 @@ EXPORT_SYMBOL(clkdev_drop);
* after clk_register().
*/
int clk_register_clkdev(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...)
+ const char *dev_id)
{
struct clk_lookup *cl;
- va_list ap;
if (IS_ERR(clk))
return PTR_ERR(clk);
- va_start(ap, dev_fmt);
- cl = vclkdev_create(__clk_get_hw(clk), con_id, dev_fmt, ap);
- va_end(ap);
+ /*
+ * Since dev_id can be NULL, and NULL is handled specially, we must
+ * pass it as either a NULL format string, or with "%s".
+ */
+ if (dev_id)
+ cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s",
+ dev_id);
+ else
+ cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL);
return cl ? 0 : -ENOMEM;
}
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d71d01157dbb..4bf44a25d950 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -13,7 +13,7 @@ static DEFINE_SPINLOCK(clklock);
static void __init h8300_div_clk_setup(struct device_node *node)
{
- int num_parents;
+ unsigned int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -22,7 +22,7 @@ static void __init h8300_div_clk_setup(struct device_node *node)
int offset;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("%s: no parent found", clk_name);
return;
}
@@ -34,7 +34,7 @@ static void __init h8300_div_clk_setup(struct device_node *node)
}
offset = (unsigned long)divcr & 3;
offset = (3 - offset) * 8;
- divcr = (void *)((unsigned long)divcr & ~3);
+ divcr = (void __iomem *)((unsigned long)divcr & ~3);
parent_name = of_clk_get_parent_name(node, 0);
of_property_read_u32(node, "renesas,width", &width);
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 6cf38dc1c929..c9c2fd575ef7 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -83,7 +83,7 @@ static const struct clk_ops pll_ops = {
static void __init h8s2678_pll_clk_setup(struct device_node *node)
{
- int num_parents;
+ unsigned int num_parents;
struct clk *clk;
const char *clk_name = node->name;
const char *parent_name;
@@ -91,7 +91,7 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
struct clk_init_data init;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("%s: no parent found", clk_name);
return;
}
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 7d03fe17d66f..d04a104ce1b4 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -78,15 +78,15 @@ static const char *const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
/* fixed rate clocks */
static struct hisi_fixed_rate_clock hi3620_fixed_rate_clks[] __initdata = {
- { HI3620_OSC32K, "osc32k", NULL, CLK_IS_ROOT, 32768, },
- { HI3620_OSC26M, "osc26m", NULL, CLK_IS_ROOT, 26000000, },
- { HI3620_PCLK, "pclk", NULL, CLK_IS_ROOT, 26000000, },
- { HI3620_PLL_ARM0, "armpll0", NULL, CLK_IS_ROOT, 1600000000, },
- { HI3620_PLL_ARM1, "armpll1", NULL, CLK_IS_ROOT, 1600000000, },
- { HI3620_PLL_PERI, "armpll2", NULL, CLK_IS_ROOT, 1440000000, },
- { HI3620_PLL_USB, "armpll3", NULL, CLK_IS_ROOT, 1440000000, },
- { HI3620_PLL_HDMI, "armpll4", NULL, CLK_IS_ROOT, 1188000000, },
- { HI3620_PLL_GPU, "armpll5", NULL, CLK_IS_ROOT, 1300000000, },
+ { HI3620_OSC32K, "osc32k", NULL, 0, 32768, },
+ { HI3620_OSC26M, "osc26m", NULL, 0, 26000000, },
+ { HI3620_PCLK, "pclk", NULL, 0, 26000000, },
+ { HI3620_PLL_ARM0, "armpll0", NULL, 0, 1600000000, },
+ { HI3620_PLL_ARM1, "armpll1", NULL, 0, 1600000000, },
+ { HI3620_PLL_PERI, "armpll2", NULL, 0, 1440000000, },
+ { HI3620_PLL_USB, "armpll3", NULL, 0, 1440000000, },
+ { HI3620_PLL_HDMI, "armpll4", NULL, 0, 1188000000, },
+ { HI3620_PLL_GPU, "armpll5", NULL, 0, 1300000000, },
};
/* fixed factor clocks */
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index 8afb40ef40ce..329a09214d12 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -235,7 +235,7 @@ static int hi6220_stub_clk_probe(struct platform_device *pdev)
init.name = "acpu0";
init.ops = &hi6220_stub_clk_ops;
init.num_parents = 0;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
clk = devm_clk_register(dev, &stub_clk->hw);
if (IS_ERR(clk))
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 4563343b6420..f02cb41d40a4 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -26,19 +26,19 @@
/* clocks in AO (always on) controller */
static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = {
- { HI6220_REF32K, "ref32k", NULL, CLK_IS_ROOT, 32764, },
- { HI6220_CLK_TCXO, "clk_tcxo", NULL, CLK_IS_ROOT, 19200000, },
- { HI6220_MMC1_PAD, "mmc1_pad", NULL, CLK_IS_ROOT, 100000000, },
- { HI6220_MMC2_PAD, "mmc2_pad", NULL, CLK_IS_ROOT, 100000000, },
- { HI6220_MMC0_PAD, "mmc0_pad", NULL, CLK_IS_ROOT, 200000000, },
- { HI6220_PLL_BBP, "bbppll0", NULL, CLK_IS_ROOT, 245760000, },
- { HI6220_PLL_GPU, "gpupll", NULL, CLK_IS_ROOT, 1000000000,},
- { HI6220_PLL1_DDR, "ddrpll1", NULL, CLK_IS_ROOT, 1066000000,},
- { HI6220_PLL_SYS, "syspll", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_DDR_SRC, "ddr_sel_src", NULL, CLK_IS_ROOT, 1200000000,},
- { HI6220_PLL_MEDIA, "media_pll", NULL, CLK_IS_ROOT, 1440000000,},
- { HI6220_PLL_DDR, "ddrpll0", NULL, CLK_IS_ROOT, 1600000000,},
+ { HI6220_REF32K, "ref32k", NULL, 0, 32764, },
+ { HI6220_CLK_TCXO, "clk_tcxo", NULL, 0, 19200000, },
+ { HI6220_MMC1_PAD, "mmc1_pad", NULL, 0, 100000000, },
+ { HI6220_MMC2_PAD, "mmc2_pad", NULL, 0, 100000000, },
+ { HI6220_MMC0_PAD, "mmc0_pad", NULL, 0, 200000000, },
+ { HI6220_PLL_BBP, "bbppll0", NULL, 0, 245760000, },
+ { HI6220_PLL_GPU, "gpupll", NULL, 0, 1000000000,},
+ { HI6220_PLL1_DDR, "ddrpll1", NULL, 0, 1066000000,},
+ { HI6220_PLL_SYS, "syspll", NULL, 0, 1200000000,},
+ { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, 0, 1200000000,},
+ { HI6220_DDR_SRC, "ddr_sel_src", NULL, 0, 1200000000,},
+ { HI6220_PLL_MEDIA, "media_pll", NULL, 0, 1440000000,},
+ { HI6220_PLL_DDR, "ddrpll0", NULL, 0, 1600000000,},
};
static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = {
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
index 8ca967308343..b38e03da1d02 100644
--- a/drivers/clk/hisilicon/clk-hip04.c
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -36,9 +36,9 @@
/* fixed rate clocks */
static struct hisi_fixed_rate_clock hip04_fixed_rate_clks[] __initdata = {
- { HIP04_OSC50M, "osc50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIP04_CLK_50M, "clk50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIP04_CLK_168M, "clk168m", NULL, CLK_IS_ROOT, 168750000, },
+ { HIP04_OSC50M, "osc50m", NULL, 0, 50000000, },
+ { HIP04_CLK_50M, "clk50m", NULL, 0, 50000000, },
+ { HIP04_CLK_168M, "clk168m", NULL, 0, 168750000, },
};
static void __init hip04_clk_init(struct device_node *np)
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 0aaf29da8491..14b05efa3c2a 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -14,36 +14,36 @@
#include "clk.h"
static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
- { HIX5HD2_FIXED_1200M, "1200m", NULL, CLK_IS_ROOT, 1200000000, },
- { HIX5HD2_FIXED_400M, "400m", NULL, CLK_IS_ROOT, 400000000, },
- { HIX5HD2_FIXED_48M, "48m", NULL, CLK_IS_ROOT, 48000000, },
- { HIX5HD2_FIXED_24M, "24m", NULL, CLK_IS_ROOT, 24000000, },
- { HIX5HD2_FIXED_600M, "600m", NULL, CLK_IS_ROOT, 600000000, },
- { HIX5HD2_FIXED_300M, "300m", NULL, CLK_IS_ROOT, 300000000, },
- { HIX5HD2_FIXED_75M, "75m", NULL, CLK_IS_ROOT, 75000000, },
- { HIX5HD2_FIXED_200M, "200m", NULL, CLK_IS_ROOT, 200000000, },
- { HIX5HD2_FIXED_100M, "100m", NULL, CLK_IS_ROOT, 100000000, },
- { HIX5HD2_FIXED_40M, "40m", NULL, CLK_IS_ROOT, 40000000, },
- { HIX5HD2_FIXED_150M, "150m", NULL, CLK_IS_ROOT, 150000000, },
- { HIX5HD2_FIXED_1728M, "1728m", NULL, CLK_IS_ROOT, 1728000000, },
- { HIX5HD2_FIXED_28P8M, "28p8m", NULL, CLK_IS_ROOT, 28000000, },
- { HIX5HD2_FIXED_432M, "432m", NULL, CLK_IS_ROOT, 432000000, },
- { HIX5HD2_FIXED_345P6M, "345p6m", NULL, CLK_IS_ROOT, 345000000, },
- { HIX5HD2_FIXED_288M, "288m", NULL, CLK_IS_ROOT, 288000000, },
- { HIX5HD2_FIXED_60M, "60m", NULL, CLK_IS_ROOT, 60000000, },
- { HIX5HD2_FIXED_750M, "750m", NULL, CLK_IS_ROOT, 750000000, },
- { HIX5HD2_FIXED_500M, "500m", NULL, CLK_IS_ROOT, 500000000, },
- { HIX5HD2_FIXED_54M, "54m", NULL, CLK_IS_ROOT, 54000000, },
- { HIX5HD2_FIXED_27M, "27m", NULL, CLK_IS_ROOT, 27000000, },
- { HIX5HD2_FIXED_1500M, "1500m", NULL, CLK_IS_ROOT, 1500000000, },
- { HIX5HD2_FIXED_375M, "375m", NULL, CLK_IS_ROOT, 375000000, },
- { HIX5HD2_FIXED_187M, "187m", NULL, CLK_IS_ROOT, 187000000, },
- { HIX5HD2_FIXED_250M, "250m", NULL, CLK_IS_ROOT, 250000000, },
- { HIX5HD2_FIXED_125M, "125m", NULL, CLK_IS_ROOT, 125000000, },
- { HIX5HD2_FIXED_2P02M, "2m", NULL, CLK_IS_ROOT, 2000000, },
- { HIX5HD2_FIXED_50M, "50m", NULL, CLK_IS_ROOT, 50000000, },
- { HIX5HD2_FIXED_25M, "25m", NULL, CLK_IS_ROOT, 25000000, },
- { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
+ { HIX5HD2_FIXED_1200M, "1200m", NULL, 0, 1200000000, },
+ { HIX5HD2_FIXED_400M, "400m", NULL, 0, 400000000, },
+ { HIX5HD2_FIXED_48M, "48m", NULL, 0, 48000000, },
+ { HIX5HD2_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HIX5HD2_FIXED_600M, "600m", NULL, 0, 600000000, },
+ { HIX5HD2_FIXED_300M, "300m", NULL, 0, 300000000, },
+ { HIX5HD2_FIXED_75M, "75m", NULL, 0, 75000000, },
+ { HIX5HD2_FIXED_200M, "200m", NULL, 0, 200000000, },
+ { HIX5HD2_FIXED_100M, "100m", NULL, 0, 100000000, },
+ { HIX5HD2_FIXED_40M, "40m", NULL, 0, 40000000, },
+ { HIX5HD2_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HIX5HD2_FIXED_1728M, "1728m", NULL, 0, 1728000000, },
+ { HIX5HD2_FIXED_28P8M, "28p8m", NULL, 0, 28000000, },
+ { HIX5HD2_FIXED_432M, "432m", NULL, 0, 432000000, },
+ { HIX5HD2_FIXED_345P6M, "345p6m", NULL, 0, 345000000, },
+ { HIX5HD2_FIXED_288M, "288m", NULL, 0, 288000000, },
+ { HIX5HD2_FIXED_60M, "60m", NULL, 0, 60000000, },
+ { HIX5HD2_FIXED_750M, "750m", NULL, 0, 750000000, },
+ { HIX5HD2_FIXED_500M, "500m", NULL, 0, 500000000, },
+ { HIX5HD2_FIXED_54M, "54m", NULL, 0, 54000000, },
+ { HIX5HD2_FIXED_27M, "27m", NULL, 0, 27000000, },
+ { HIX5HD2_FIXED_1500M, "1500m", NULL, 0, 1500000000, },
+ { HIX5HD2_FIXED_375M, "375m", NULL, 0, 375000000, },
+ { HIX5HD2_FIXED_187M, "187m", NULL, 0, 187000000, },
+ { HIX5HD2_FIXED_250M, "250m", NULL, 0, 250000000, },
+ { HIX5HD2_FIXED_125M, "125m", NULL, 0, 125000000, },
+ { HIX5HD2_FIXED_2P02M, "2m", NULL, 0, 2000000, },
+ { HIX5HD2_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HIX5HD2_FIXED_25M, "25m", NULL, 0, 25000000, },
+ { HIX5HD2_FIXED_83M, "83m", NULL, 0, 83333333, },
};
static const char *const sfc_mux_p[] __initconst = {
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 4bb1bc419b79..5cc99590f9a3 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -38,7 +38,7 @@ struct clk_busy_divider {
static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
{
- struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+ struct clk_divider *div = to_clk_divider(hw);
return container_of(div, struct clk_busy_divider, div);
}
@@ -123,7 +123,7 @@ struct clk_busy_mux {
static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
{
- struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+ struct clk_mux *mux = to_clk_mux(hw);
return container_of(mux, struct clk_busy_mux, mux);
}
diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c
index 21db020b1f2d..ce5722732715 100644
--- a/drivers/clk/imx/clk-fixup-div.c
+++ b/drivers/clk/imx/clk-fixup-div.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include "clk.h"
-#define to_clk_div(_hw) container_of(_hw, struct clk_divider, hw)
#define div_mask(d) ((1 << (d->width)) - 1)
/**
@@ -35,7 +34,7 @@ struct clk_fixup_div {
static inline struct clk_fixup_div *to_clk_fixup_div(struct clk_hw *hw)
{
- struct clk_divider *divider = to_clk_div(hw);
+ struct clk_divider *divider = to_clk_divider(hw);
return container_of(divider, struct clk_fixup_div, divider);
}
@@ -60,7 +59,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw);
- struct clk_divider *div = to_clk_div(hw);
+ struct clk_divider *div = to_clk_divider(hw);
unsigned int divider, value;
unsigned long flags = 0;
u32 val;
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index 0d40b35c557c..c9b327e0a8dd 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -15,8 +15,6 @@
#include <linux/slab.h>
#include "clk.h"
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
/**
* struct clk_fixup_mux - imx integer fixup multiplexer clock
* @mux: the parent class
diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c
index c12f5f2e04dc..3bd9dee618b2 100644
--- a/drivers/clk/imx/clk-gate-exclusive.c
+++ b/drivers/clk/imx/clk-gate-exclusive.c
@@ -31,7 +31,7 @@ struct clk_gate_exclusive {
static int clk_gate_exclusive_enable(struct clk_hw *hw)
{
- struct clk_gate *gate = container_of(hw, struct clk_gate, hw);
+ struct clk_gate *gate = to_clk_gate(hw);
struct clk_gate_exclusive *exgate = container_of(gate,
struct clk_gate_exclusive, gate);
u32 val = readl(gate->reg);
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index f0efc6feeec2..02e18182fcb5 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -34,7 +34,9 @@ static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", };
static const char *audio_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
static const char *gpu_axi_sels[] = { "axi", "ahb", };
+static const char *pre_axi_sels[] = { "axi", "ahb", };
static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu2d_core_sels_2[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m",};
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
@@ -44,15 +46,24 @@ static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di0_sels_2[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu1_di1_sels_2[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu2_di0_sels_2[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
+static const char *ipu2_di1_sels_2[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0_podf", "ldb_di1_podf", };
static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
static const char *pcie_axi_sels[] = { "axi", "ahb", };
static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *enfc_sels_2[] = {"pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", };
static const char *eim_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *vdo_axi_sels[] = { "axi", "ahb", };
static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *ipg_per_sels[] = { "ipg", "osc", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
"dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
"ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio_div", };
@@ -121,12 +132,19 @@ static unsigned int share_count_ssi2;
static unsigned int share_count_ssi3;
static unsigned int share_count_mipi_core_cfg;
static unsigned int share_count_spdif;
+static unsigned int share_count_prg0;
+static unsigned int share_count_prg1;
static inline int clk_on_imx6q(void)
{
return of_machine_is_compatible("fsl,imx6q");
}
+static inline int clk_on_imx6qp(void)
+{
+ return of_machine_is_compatible("fsl,imx6qp");
+}
+
static inline int clk_on_imx6dl(void)
{
return of_machine_is_compatible("fsl,imx6dl");
@@ -265,7 +283,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2);
clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
clk[IMX6QDL_CLK_VIDEO_27M] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20);
- if (clk_on_imx6dl()) {
+ if (clk_on_imx6dl() || clk_on_imx6qp()) {
clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1);
clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1);
}
@@ -294,7 +312,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
}
- clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ clk[IMX6QDL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+ clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels));
+ clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2));
+ } else {
+ clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ }
clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
@@ -305,22 +331,40 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
- clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
clk[IMX6QDL_CLK_HSI_TX_SEL] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
clk[IMX6QDL_CLK_PCIE_AXI_SEL] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
- clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
- clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels_2, ARRAY_SIZE(ipu1_di0_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels_2, ARRAY_SIZE(ipu1_di1_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels_2, ARRAY_SIZE(ipu2_di0_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels_2, ARRAY_SIZE(ipu2_di1_sels_2), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels_2, ARRAY_SIZE(enfc_sels_2));
+ clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels));
+ clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+ clk[IMX6QDL_CLK_PRE_AXI] = imx_clk_mux("pre_axi", base + 0x18, 1, 1, pre_axi_sels, ARRAY_SIZE(pre_axi_sels));
+ } else {
+ clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
+ clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup);
+ }
clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
@@ -335,23 +379,33 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
clk[IMX6QDL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
clk[IMX6QDL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
- clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
clk[IMX6QDL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
clk[IMX6QDL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
clk[IMX6QDL_CLK_ASRC_PRED] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
clk[IMX6QDL_CLK_ASRC_PODF] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
clk[IMX6QDL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
clk[IMX6QDL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
- clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
- clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_IPG_PER] = imx_clk_divider("ipg_per", "ipg_per_sel", base + 0x1c, 0, 6);
+ clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "can_sel", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "uart_sel", base + 0x24, 0, 6);
+ clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0", 2, 7);
+ clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
+ } else {
+ clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+ clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
+ clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ }
clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
- clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
clk[IMX6QDL_CLK_LDB_DI1_PODF] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0);
clk[IMX6QDL_CLK_IPU1_DI0_PRE] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[IMX6QDL_CLK_IPU1_DI1_PRE] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
@@ -364,15 +418,19 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
clk[IMX6QDL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
clk[IMX6QDL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
- clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
clk[IMX6QDL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
clk[IMX6QDL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
clk[IMX6QDL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
- clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
- clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3);
+ clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
+ } else {
+ clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
+ clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
+ }
clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
@@ -380,7 +438,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
/* name parent_name reg shift width busy: reg, shift */
clk[IMX6QDL_CLK_AXI] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
clk[IMX6QDL_CLK_MMDC_CH0_AXI_PODF] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
- clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_CG] = imx_clk_gate("mmdc_ch1_axi_cg", "periph2", base + 0x4, 18);
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "mmdc_ch1_axi_cg", base + 0x14, 3, 3, base + 0x48, 2);
+ } else {
+ clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ }
clk[IMX6QDL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
clk[IMX6QDL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
@@ -432,8 +495,13 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_IPU1_DI1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
clk[IMX6QDL_CLK_IPU2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
clk[IMX6QDL_CLK_IPU2_DI0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
- clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
- clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_sel", base + 0x74, 12);
+ clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_sel", base + 0x74, 14);
+ } else {
+ clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+ clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ }
clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg);
clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg);
@@ -482,6 +550,16 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10);
clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
+ if (clk_on_imx6qp()) {
+ clk[IMX6QDL_CLK_PRE0] = imx_clk_gate2("pre0", "pre_axi", base + 0x80, 16);
+ clk[IMX6QDL_CLK_PRE1] = imx_clk_gate2("pre1", "pre_axi", base + 0x80, 18);
+ clk[IMX6QDL_CLK_PRE2] = imx_clk_gate2("pre2", "pre_axi", base + 0x80, 20);
+ clk[IMX6QDL_CLK_PRE3] = imx_clk_gate2("pre3", "pre_axi", base + 0x80, 22);
+ clk[IMX6QDL_CLK_PRG0_AXI] = imx_clk_gate2_shared("prg0_axi", "ipu1_podf", base + 0x80, 24, &share_count_prg0);
+ clk[IMX6QDL_CLK_PRG1_AXI] = imx_clk_gate2_shared("prg1_axi", "ipu2_podf", base + 0x80, 26, &share_count_prg1);
+ clk[IMX6QDL_CLK_PRG0_APB] = imx_clk_gate2_shared("prg0_apb", "ipg", base + 0x80, 24, &share_count_prg0);
+ clk[IMX6QDL_CLK_PRG1_APB] = imx_clk_gate2_shared("prg1_apb", "ipg", base + 0x80, 26, &share_count_prg1);
+ }
clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
clk[IMX6QDL_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 08692d74b884..0f1f17a8f3ed 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -157,9 +157,9 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]);
clks[IMX6UL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1);
- clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
- clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
- clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
+ clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+ clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13);
+ clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13);
clks[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13);
clks[IMX6UL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13);
clks[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13);
@@ -196,8 +196,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20);
- clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
- clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
+ clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
+ clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
clks[IMX6UL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
@@ -210,8 +210,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* name parent_name mult div */
clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
- clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
- clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
clks[IMX6UL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8);
np = ccm_node;
@@ -219,34 +219,34 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
WARN_ON(!base);
clks[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels));
- clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
- clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+ clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
clks[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
- clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
- clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
- clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+ clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+ clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
clks[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
- clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+ clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
clks[IMX6UL_CLK_GPMI_SEL] = imx_clk_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels));
- clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
+ clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
clks[IMX6UL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
clks[IMX6UL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
- clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
clks[IMX6UL_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels));
- clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
- clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
- clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
- clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
+ clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
+ clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
+ clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels));
+ clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels));
clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels));
clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
- clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
- clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+ clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+ clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
- clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+ clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
@@ -259,11 +259,11 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
- clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
- clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
clks[IMX6UL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
clks[IMX6UL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3);
- clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
+ clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3);
clks[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3);
clks[IMX6UL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6);
clks[IMX6UL_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6);
@@ -287,14 +287,14 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3);
clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3);
- clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
- clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
- clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
+ clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
+ clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
@@ -302,7 +302,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10);
clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12);
clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
- clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
+ clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16);
clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20);
clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x68, 24);
@@ -331,7 +331,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2);
clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6);
clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8);
- clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
+ clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
@@ -365,6 +365,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
/* CCGR5 */
clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
clks[IMX6UL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
clks[IMX6UL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio);
@@ -391,10 +392,10 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14);
clks[IMX6UL_CLK_UART8_SERIAL] = imx_clk_gate2("uart8_serial", "uart_podf", base + 0x80, 14);
clks[IMX6UL_CLK_WDOG3] = imx_clk_gate2("wdog3", "ipg", base + 0x80, 20);
- clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24);
+ clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24);
clks[IMX6UL_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26);
clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28);
- clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("Pwm7", "perclk", base + 0x80, 30);
+ clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30);
/* mask handshake of mmdc */
writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index c94ac5c26226..d942f5748d08 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -87,7 +87,7 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
static inline struct clk *imx_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *imx_clk_divider(const char *name, const char *parent,
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 576bdb7c98b8..2a76901bf04b 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -25,7 +25,7 @@
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
@@ -37,7 +37,7 @@ static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
static int mtk_cg_bit_is_set(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
@@ -49,14 +49,14 @@ static int mtk_cg_bit_is_set(struct clk_hw *hw)
static void mtk_cg_set_bit(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_clk_gate(hw);
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
}
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 11e25c992948..b1821603b887 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -29,7 +29,7 @@ struct mtk_clk_gate {
u8 bit;
};
-static inline struct mtk_clk_gate *to_clk_gate(struct clk_hw *hw)
+static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
{
return container_of(hw, struct mtk_clk_gate, hw);
}
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index cf08db6c130c..5ada644e6200 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -58,8 +58,8 @@ void __init mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
for (i = 0; i < num; i++) {
const struct mtk_fixed_clk *rc = &clks[i];
- clk = clk_register_fixed_rate(NULL, rc->name, rc->parent,
- rc->parent ? 0 : CLK_IS_ROOT, rc->rate);
+ clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0,
+ rc->rate);
if (IS_ERR(clk)) {
pr_err("Failed to register clk %s: %ld\n",
@@ -209,12 +209,14 @@ struct clk * __init mtk_clk_register_composite(const struct mtk_composite *mc,
mc->flags);
if (IS_ERR(clk)) {
- kfree(gate);
- kfree(mux);
+ ret = PTR_ERR(clk);
+ goto err_out;
}
return clk;
err_out:
+ kfree(div);
+ kfree(gate);
kfree(mux);
return ERR_PTR(ret);
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index 9e9fe4b19ac4..309049d41f1b 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -57,7 +57,7 @@ static int mtk_reset(struct reset_controller_dev *rcdev,
return mtk_reset_deassert(rcdev, id);
}
-static struct reset_control_ops mtk_reset_ops = {
+static const struct reset_control_ops mtk_reset_ops = {
.assert = mtk_reset_assert,
.deassert = mtk_reset_deassert,
.reset = mtk_reset,
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index c83ae1367abc..d920d410b51d 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf,
}
void __init meson_clk_register_clks(const struct clk_conf *clk_confs,
- size_t nr_confs,
+ unsigned int nr_confs,
void __iomem *clk_base)
{
unsigned int i;
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
index b54da1fe73f0..b4e4d6aa2631 100644
--- a/drivers/clk/mmp/reset.c
+++ b/drivers/clk/mmp/reset.c
@@ -74,7 +74,7 @@ static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops mmp_clk_reset_ops = {
+static const struct reset_control_ops mmp_clk_reset_ops = {
.assert = mmp_clk_reset_assert,
.deassert = mmp_clk_reset_deassert,
};
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 27696255486d..eaee8f099c8c 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -11,7 +11,6 @@ config ARMADA_370_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
- select MVEBU_CLK_COREDIV
config ARMADA_375_CLK
bool
@@ -29,7 +28,6 @@ config ARMADA_XP_CLK
bool
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
- select MVEBU_CLK_COREDIV
config DOVE_CLK
bool
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 28aac67e7b92..66be2e0c82b4 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -137,8 +137,8 @@ void __init mvebu_coreclk_setup(struct device_node *np,
of_property_read_string_index(np, "clock-output-names", 0,
&tclk_name);
rate = desc->get_tclk_freq(base);
- clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
- CLK_IS_ROOT, rate);
+ clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0,
+ rate);
WARN_ON(IS_ERR(clk_data.clks[0]));
/* Register CPU clock */
@@ -150,8 +150,8 @@ void __init mvebu_coreclk_setup(struct device_node *np,
&& desc->is_sscg_enabled(base))
rate = desc->fix_sscg_deviation(rate);
- clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
- CLK_IS_ROOT, rate);
+ clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0,
+ rate);
WARN_ON(IS_ERR(clk_data.clks[1]));
/* Register fixed-factor clocks derived from CPU clock */
@@ -174,8 +174,7 @@ void __init mvebu_coreclk_setup(struct device_node *np,
2 + desc->num_ratios, &name);
rate = desc->get_refclk_freq(base);
clk_data.clks[2 + desc->num_ratios] =
- clk_register_fixed_rate(NULL, name, NULL,
- CLK_IS_ROOT, rate);
+ clk_register_fixed_rate(NULL, name, NULL, 0, rate);
WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
}
@@ -199,8 +198,6 @@ struct clk_gating_ctrl {
u32 saved_reg;
};
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
static struct clk_gating_ctrl *ctrl;
static struct clk *clk_gating_get_src(
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 3e0b52daa35f..4091f3cfee19 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -225,8 +225,7 @@ static int dove_divider_init(struct device *dev, void __iomem *base,
* Create the core PLL clock. We treat this as a fixed rate
* clock as we don't know any better, and documentation is sparse.
*/
- clk = clk_register_fixed_rate(dev, core_pll[0], NULL, CLK_IS_ROOT,
- 2000000000UL);
+ clk = clk_register_fixed_rate(dev, core_pll[0], NULL, 0, 2000000000UL);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 99550f25975e..a2a8d614039d 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -256,8 +256,6 @@ static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
11, 1, 0 },
};
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static struct clk *clk_muxing_get_src(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 049ee27d5a22..f75e989c578f 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -33,7 +33,7 @@ struct clk_div {
static inline struct clk_div *to_clk_div(struct clk_hw *hw)
{
- struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+ struct clk_divider *divider = to_clk_divider(hw);
return container_of(divider, struct clk_div, divider);
}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index a4590956d2a2..5a264a486ad9 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -38,7 +38,7 @@ struct clk *mxs_clk_frac(const char *name, const char *parent_name,
static inline struct clk *mxs_clk_fixed(const char *name, int rate)
{
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static inline struct clk *mxs_clk_gate(const char *name,
diff --git a/drivers/clk/nxp/Makefile b/drivers/clk/nxp/Makefile
index 607bd48c6563..d456ee6cc3d3 100644
--- a/drivers/clk/nxp/Makefile
+++ b/drivers/clk/nxp/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-cgu.o
obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-ccu.o
+obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-creg.o
obj-$(CONFIG_ARCH_LPC32XX) += clk-lpc32xx.o
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 13aabbb3acbe..f7136b94fd0e 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -28,8 +28,6 @@
#define CCU_BRANCH_IS_BUS BIT(0)
#define CCU_BRANCH_HAVE_DIV2 BIT(1)
-#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-
struct lpc18xx_branch_clk_data {
const char **name;
int num;
@@ -222,7 +220,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran
div->width = 1;
div_hw = &div->hw;
- div_ops = &clk_divider_ops;
+ div_ops = &clk_divider_ro_ops;
}
branch->gate.reg = branch->offset + reg_base;
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index c924572fc9bc..2531174b399e 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -605,7 +605,7 @@ static void __init lpc18xx_cgu_register_source_clks(struct device_node *np,
/* Register the internal 12 MHz RC oscillator (IRC) */
clk = clk_register_fixed_rate(NULL, clk_src_names[CLK_SRC_IRC],
- NULL, CLK_IS_ROOT, 12000000);
+ NULL, 0, 12000000);
if (IS_ERR(clk))
pr_warn("%s: failed to register irc clk\n", __func__);
diff --git a/drivers/clk/nxp/clk-lpc18xx-creg.c b/drivers/clk/nxp/clk-lpc18xx-creg.c
new file mode 100644
index 000000000000..d44b61afa2dc
--- /dev/null
+++ b/drivers/clk/nxp/clk-lpc18xx-creg.c
@@ -0,0 +1,226 @@
+/*
+ * Clk driver for NXP LPC18xx/43xx Configuration Registers (CREG)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LPC18XX_CREG_CREG0 0x004
+#define LPC18XX_CREG_CREG0_EN1KHZ BIT(0)
+#define LPC18XX_CREG_CREG0_EN32KHZ BIT(1)
+#define LPC18XX_CREG_CREG0_RESET32KHZ BIT(2)
+#define LPC18XX_CREG_CREG0_PD32KHZ BIT(3)
+
+#define to_clk_creg(_hw) container_of(_hw, struct clk_creg_data, hw)
+
+enum {
+ CREG_CLK_1KHZ,
+ CREG_CLK_32KHZ,
+ CREG_CLK_MAX,
+};
+
+struct clk_creg_data {
+ struct clk_hw hw;
+ const char *name;
+ struct regmap *reg;
+ unsigned int en_mask;
+ const struct clk_ops *ops;
+};
+
+#define CREG_CLK(_name, _emask, _ops) \
+{ \
+ .name = _name, \
+ .en_mask = LPC18XX_CREG_CREG0_##_emask, \
+ .ops = &_ops, \
+}
+
+static int clk_creg_32k_prepare(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ int ret;
+
+ ret = regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_PD32KHZ |
+ LPC18XX_CREG_CREG0_RESET32KHZ, 0);
+
+ /*
+ * Powering up the 32k oscillator takes a long while
+ * and sadly there aren't any status bit to poll.
+ */
+ msleep(2500);
+
+ return ret;
+}
+
+static void clk_creg_32k_unprepare(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ LPC18XX_CREG_CREG0_PD32KHZ,
+ LPC18XX_CREG_CREG0_PD32KHZ);
+}
+
+static int clk_creg_32k_is_prepared(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ u32 reg;
+
+ regmap_read(creg->reg, LPC18XX_CREG_CREG0, &reg);
+
+ return !(reg & LPC18XX_CREG_CREG0_PD32KHZ) &&
+ !(reg & LPC18XX_CREG_CREG0_RESET32KHZ);
+}
+
+static unsigned long clk_creg_1k_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return parent_rate / 32;
+}
+
+static int clk_creg_enable(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ return regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ creg->en_mask, creg->en_mask);
+}
+
+static void clk_creg_disable(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+
+ regmap_update_bits(creg->reg, LPC18XX_CREG_CREG0,
+ creg->en_mask, 0);
+}
+
+static int clk_creg_is_enabled(struct clk_hw *hw)
+{
+ struct clk_creg_data *creg = to_clk_creg(hw);
+ u32 reg;
+
+ regmap_read(creg->reg, LPC18XX_CREG_CREG0, &reg);
+
+ return !!(reg & creg->en_mask);
+}
+
+static const struct clk_ops clk_creg_32k = {
+ .enable = clk_creg_enable,
+ .disable = clk_creg_disable,
+ .is_enabled = clk_creg_is_enabled,
+ .prepare = clk_creg_32k_prepare,
+ .unprepare = clk_creg_32k_unprepare,
+ .is_prepared = clk_creg_32k_is_prepared,
+};
+
+static const struct clk_ops clk_creg_1k = {
+ .enable = clk_creg_enable,
+ .disable = clk_creg_disable,
+ .is_enabled = clk_creg_is_enabled,
+ .recalc_rate = clk_creg_1k_recalc_rate,
+};
+
+static struct clk_creg_data clk_creg_clocks[] = {
+ [CREG_CLK_1KHZ] = CREG_CLK("1khz_clk", EN1KHZ, clk_creg_1k),
+ [CREG_CLK_32KHZ] = CREG_CLK("32khz_clk", EN32KHZ, clk_creg_32k),
+};
+
+static struct clk *clk_register_creg_clk(struct device *dev,
+ struct clk_creg_data *creg_clk,
+ const char **parent_name,
+ struct regmap *syscon)
+{
+ struct clk_init_data init;
+
+ init.ops = creg_clk->ops;
+ init.name = creg_clk->name;
+ init.parent_names = parent_name;
+ init.num_parents = 1;
+
+ creg_clk->reg = syscon;
+ creg_clk->hw.init = &init;
+
+ if (dev)
+ return devm_clk_register(dev, &creg_clk->hw);
+
+ return clk_register(NULL, &creg_clk->hw);
+}
+
+static struct clk *clk_creg_early[CREG_CLK_MAX];
+static struct clk_onecell_data clk_creg_early_data = {
+ .clks = clk_creg_early,
+ .clk_num = CREG_CLK_MAX,
+};
+
+static void __init lpc18xx_creg_clk_init(struct device_node *np)
+{
+ const char *clk_32khz_parent;
+ struct regmap *syscon;
+
+ syscon = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(syscon)) {
+ pr_err("%s: syscon lookup failed\n", __func__);
+ return;
+ }
+
+ clk_32khz_parent = of_clk_get_parent_name(np, 0);
+
+ clk_creg_early[CREG_CLK_32KHZ] =
+ clk_register_creg_clk(NULL, &clk_creg_clocks[CREG_CLK_32KHZ],
+ &clk_32khz_parent, syscon);
+ clk_creg_early[CREG_CLK_1KHZ] = ERR_PTR(-EPROBE_DEFER);
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_creg_early_data);
+}
+CLK_OF_DECLARE(lpc18xx_creg_clk, "nxp,lpc1850-creg-clk", lpc18xx_creg_clk_init);
+
+static struct clk *clk_creg[CREG_CLK_MAX];
+static struct clk_onecell_data clk_creg_data = {
+ .clks = clk_creg,
+ .clk_num = CREG_CLK_MAX,
+};
+
+static int lpc18xx_creg_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *syscon;
+
+ syscon = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(syscon)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ return PTR_ERR(syscon);
+ }
+
+ clk_creg[CREG_CLK_32KHZ] = clk_creg_early[CREG_CLK_32KHZ];
+ clk_creg[CREG_CLK_1KHZ] =
+ clk_register_creg_clk(NULL, &clk_creg_clocks[CREG_CLK_1KHZ],
+ &clk_creg_clocks[CREG_CLK_32KHZ].name,
+ syscon);
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get, &clk_creg_data);
+}
+
+static const struct of_device_id lpc18xx_creg_clk_of_match[] = {
+ { .compatible = "nxp,lpc1850-creg-clk" },
+ {},
+};
+
+static struct platform_driver lpc18xx_creg_clk_driver = {
+ .probe = lpc18xx_creg_clk_probe,
+ .driver = {
+ .name = "lpc18xx-creg-clk",
+ .of_match_table = lpc18xx_creg_clk_of_match,
+ },
+};
+builtin_platform_driver(lpc18xx_creg_clk_driver);
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 10dd0fdaa474..481b2646b496 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -87,7 +87,7 @@ enum {
enum {
/* Start from the last defined clock in dt bindings */
- LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_ADC + 1,
+ LPC32XX_CLK_ADC_DIV = LPC32XX_CLK_HCLK_PLL + 1,
LPC32XX_CLK_ADC_RTC,
LPC32XX_CLK_TEST1,
LPC32XX_CLK_TEST2,
@@ -96,7 +96,6 @@ enum {
LPC32XX_CLK_OSC,
LPC32XX_CLK_SYS,
LPC32XX_CLK_PLL397X,
- LPC32XX_CLK_HCLK_PLL,
LPC32XX_CLK_HCLK_DIV_PERIPH,
LPC32XX_CLK_HCLK_DIV,
LPC32XX_CLK_HCLK,
@@ -589,7 +588,8 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, m, n, p, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m = 0, n = 0, p = 0;
int p_i, n_i;
pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
@@ -1429,6 +1429,8 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
hw = &clk_hw->hw0.div.hw;
else if (clk_hw->type == CLK_GATE)
hw = &clk_hw->hw0.gate.hw;
+ else
+ return ERR_PTR(-EINVAL);
hw->init = &clk_init;
clk = clk_register(NULL, hw);
@@ -1515,7 +1517,7 @@ static void __init lpc32xx_clk_init(struct device_node *np)
return;
}
- for (i = 0; i < LPC32XX_CLK_MAX; i++) {
+ for (i = 1; i < LPC32XX_CLK_MAX; i++) {
clk[i] = lpc32xx_clk_register(i);
if (IS_ERR(clk[i])) {
pr_err("failed to register %s clock: %ld\n",
@@ -1526,9 +1528,6 @@ static void __init lpc32xx_clk_init(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- /* For 13MHz osc valid output range of PLL is from 156MHz to 266.5MHz */
- clk_set_rate(clk[LPC32XX_CLK_HCLK_PLL], 208000000);
-
/* Set 48MHz rate of USB PLL clock */
clk_set_rate(clk[LPC32XX_CLK_USB_PLL], 48000000);
@@ -1555,7 +1554,7 @@ static void __init lpc32xx_usb_clk_init(struct device_node *np)
return;
}
- for (i = 0; i < LPC32XX_USB_CLK_MAX; i++) {
+ for (i = 1; i < LPC32XX_USB_CLK_MAX; i++) {
usb_clk[i] = lpc32xx_clk_register(i + LPC32XX_CLK_USB_OFFSET);
if (IS_ERR(usb_clk[i])) {
pr_err("failed to register %s clock: %ld\n",
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index b7747229db9a..a98b98e2a9e4 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -84,7 +84,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = CCCR;
+ unsigned long cccr = readl(CCCR);
unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
return parent_rate / m;
@@ -99,7 +99,7 @@ PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
#define PXA25X_CKEN(dev_id, con_id, parents, mult, div, \
bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div, \
- is_lp, &CKEN, CKEN_ ## bit, flags)
+ is_lp, CKEN, CKEN_ ## bit, flags)
#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp, \
div_hp, bit, NULL, 0)
@@ -112,10 +112,10 @@ PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, 0)
+ CKEN, CKEN_ ## bit, 0)
#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+ CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
static struct desc_clk_cken pxa25x_clocks[] __initdata = {
PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
@@ -162,7 +162,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long cccr = CCCR;
+ unsigned long cccr = readl(CCCR);
unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
return (parent_rate / n2) * 2;
@@ -173,7 +173,7 @@ RATE_RO_OPS(clk_pxa25x_run, "run");
static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long clkcfg, cccr = CCCR;
+ unsigned long clkcfg, cccr = readl(CCCR);
unsigned int l, m, n2, t;
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -200,12 +200,10 @@ static void __init pxa25x_register_core(void)
static void __init pxa25x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
- 3686400);
+ CLK_GET_RATE_NOCACHE, 3686400);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
- 32768);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ CLK_GET_RATE_NOCACHE, 32768);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
0, 26, 1);
clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 5b82d30baf9f..c40b1804f58c 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -85,7 +85,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
bool pxa27x_is_ppll_disabled(void)
{
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
return ccsr & (1 << CCCR_PPDIS_BIT);
}
@@ -93,7 +93,7 @@ bool pxa27x_is_ppll_disabled(void)
#define PXA27X_CKEN(dev_id, con_id, parents, mult_hp, div_hp, \
bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, 1, 1, mult_hp, div_hp, \
- is_lp, &CKEN, CKEN_ ## bit, flags)
+ is_lp, CKEN, CKEN_ ## bit, flags)
#define PXA27X_PBUS_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
PXA27X_CKEN(dev_id, con_id, pxa27x_pbus_parents, mult_hp, \
div_hp, bit, pxa27x_is_ppll_disabled, 0)
@@ -106,10 +106,10 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
#define PXA27X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, 0)
+ CKEN, CKEN_ ## bit, 0)
#define PXA27X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
- &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+ CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
static struct desc_clk_cken pxa27x_clocks[] __initdata = {
PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
@@ -151,7 +151,7 @@ static unsigned long clk_pxa27x_cpll_get_rate(struct clk_hw *hw,
unsigned long clkcfg;
unsigned int t, ht;
unsigned int l, L, n2, N;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
t = clkcfg & (1 << 0);
@@ -171,8 +171,8 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int l, osc_forced;
- unsigned long ccsr = CCSR;
- unsigned long cccr = CCCR;
+ unsigned long ccsr = readl(CCSR);
+ unsigned long cccr = readl(CCCR);
l = ccsr & CCSR_L_MASK;
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
@@ -193,7 +193,7 @@ static unsigned long clk_pxa27x_lcd_base_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_lcd_base_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -208,12 +208,12 @@ MUX_RO_RATE_RO_OPS(clk_pxa27x_lcd_base, "lcd_base");
static void __init pxa27x_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
13 * MHz);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
32768 * KHz);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "ppll_312mhz", "osc_13mhz", 0, 24, 1);
}
@@ -222,7 +222,7 @@ static unsigned long clk_pxa27x_core_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -242,7 +242,7 @@ static u8 clk_pxa27x_core_get_parent(struct clk_hw *hw)
{
unsigned long clkcfg;
unsigned int t, ht, b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -263,7 +263,7 @@ MUX_RO_RATE_RO_OPS(clk_pxa27x_core, "core");
static unsigned long clk_pxa27x_run_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
unsigned int n2 = (ccsr & CCSR_N2_MASK) >> CCSR_N2_SHIFT;
return (parent_rate / n2) * 2;
@@ -285,7 +285,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
{
unsigned long clkcfg;
unsigned int b, osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
@@ -302,7 +302,7 @@ static unsigned long clk_pxa27x_system_bus_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_system_bus_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced;
- unsigned long ccsr = CCSR;
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
if (osc_forced)
@@ -318,8 +318,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned int a, l, osc_forced;
- unsigned long cccr = CCCR;
- unsigned long ccsr = CCSR;
+ unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
@@ -337,8 +337,8 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
{
unsigned int osc_forced, a;
- unsigned long cccr = CCCR;
- unsigned long ccsr = CCSR;
+ unsigned long cccr = readl(CCCR);
+ unsigned long ccsr = readl(CCSR);
osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
a = cccr & (1 << CCCR_A_BIT);
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4af4eed5f89f..42bdaa772be0 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -284,15 +284,15 @@ static void __init pxa3xx_register_core(void)
static void __init pxa3xx_register_plls(void)
{
clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
13 * MHz);
clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
32768);
clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
- CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+ CLK_GET_RATE_NOCACHE,
120 * MHz);
- clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_rate(NULL, "clk_dummy", NULL, 0, 0);
clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
0, 1, 2);
@@ -334,8 +334,7 @@ static void __init pxa3xx_base_clocks_init(void)
clk_register_clk_pxa3xx_system_bus();
clk_register_clk_pxa3xx_ac97();
clk_register_clk_pxa3xx_smemc();
- clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
- (void __iomem *)&OSCC, 11, 0, NULL);
+ clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0, OSCC, 11, 0, NULL);
clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
clk_register_fixed_factor(NULL, "os-timer0",
"osc_13mhz", 0, 1, 4));
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index b552eceec2be..95e3b3e0fa1c 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -28,6 +28,14 @@ config APQ_MMCC_8084
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config IPQ_GCC_4019
+ tristate "IPQ4019 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on ipq4019 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc.
+
config IPQ_GCC_806X
tristate "IPQ806x Global Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index dc4280b85db1..2a25f4e75f49 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -14,6 +14,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index bfbb28f450c2..67ce7c146a6a 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -638,7 +638,6 @@ static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
src = ns_to_src(&rcg->s, ns);
- f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
for (i = 0; i < num_parents; i++) {
if (src == rcg->s.parent_map[i].cfg) {
@@ -647,6 +646,9 @@ static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
}
}
+ /* bypass the pre divider */
+ f.pre_div = 1;
+
/* let us find appropriate m/n values for this */
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index c112ebaba70d..f7c226ab4307 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -119,7 +119,6 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
fixed->hw.init = &init_data;
init_data.name = path;
- init_data.flags = CLK_IS_ROOT;
init_data.ops = &clk_fixed_rate_ops;
clk = devm_clk_register(dev, &fixed->hw);
@@ -185,6 +184,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct clk **clks;
struct qcom_reset_controller *reset;
struct qcom_cc *cc;
+ struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
@@ -213,7 +213,11 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_del_clk_provider, pdev->dev.of_node);
+ ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
+ pdev->dev.of_node);
+
+ if (ret)
+ return ret;
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
@@ -227,18 +231,28 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_reset_unregister, &reset->rcdev);
+ ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
+ &reset->rcdev);
+
+ if (ret)
+ return ret;
if (desc->gdscs && desc->num_gdscs) {
- ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs,
- &reset->rcdev, regmap);
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
if (ret)
return ret;
}
- devm_add_action(dev, qcom_cc_gdsc_unregister, dev);
-
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
new file mode 100644
index 000000000000..3cd1af0af0d9
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq4019.c
@@ -0,0 +1,1354 @@
+/*
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+enum {
+ P_XO,
+ P_FEPLL200,
+ P_FEPLL500,
+ P_DDRPLL,
+ P_FEPLLWCSS2G,
+ P_FEPLLWCSS5G,
+ P_FEPLL125DLY,
+ P_DDRPLLAPSS,
+};
+
+static struct parent_map gcc_xo_200_500_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const char * const gcc_xo_200_500[] = {
+ "xo",
+ "fepll200",
+ "fepll500",
+};
+
+static struct parent_map gcc_xo_200_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 1 },
+};
+
+static const char * const gcc_xo_200[] = {
+ "xo",
+ "fepll200",
+};
+
+static struct parent_map gcc_xo_200_spi_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 2 },
+};
+
+static const char * const gcc_xo_200_spi[] = {
+ "xo",
+ "fepll200",
+};
+
+static struct parent_map gcc_xo_sdcc1_500_map[] = {
+ { P_XO, 0 },
+ { P_DDRPLL, 1 },
+ { P_FEPLL500, 2 },
+};
+
+static const char * const gcc_xo_sdcc1_500[] = {
+ "xo",
+ "ddrpll",
+ "fepll500",
+};
+
+static struct parent_map gcc_xo_wcss2g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS2G, 1 },
+};
+
+static const char * const gcc_xo_wcss2g[] = {
+ "xo",
+ "fepllwcss2g",
+};
+
+static struct parent_map gcc_xo_wcss5g_map[] = {
+ { P_XO, 0 },
+ { P_FEPLLWCSS5G, 1 },
+};
+
+static const char * const gcc_xo_wcss5g[] = {
+ "xo",
+ "fepllwcss5g",
+};
+
+static struct parent_map gcc_xo_125_dly_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL125DLY, 1 },
+};
+
+static const char * const gcc_xo_125_dly[] = {
+ "xo",
+ "fepll125dly",
+};
+
+static struct parent_map gcc_xo_ddr_500_200_map[] = {
+ { P_XO, 0 },
+ { P_FEPLL200, 3 },
+ { P_FEPLL500, 2 },
+ { P_DDRPLLAPSS, 1 },
+};
+
+static const char * const gcc_xo_ddr_500_200[] = {
+ "xo",
+ "fepll200",
+ "fepll500",
+ "ddrpllapss",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static const struct freq_tbl ftbl_gcc_audio_pwm_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 audio_clk_src = {
+ .cmd_rcgr = 0x1b000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_audio_pwm_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "audio_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+
+ },
+};
+
+static struct clk_branch gcc_audio_ahb_clk = {
+ .halt_reg = 0x1b010,
+ .clkr = {
+ .enable_reg = 0x1b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_audio_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_audio_pwm_clk = {
+ .halt_reg = 0x1b00C,
+ .clkr = {
+ .enable_reg = 0x1b00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_audio_pwm_clk",
+ .parent_names = (const char *[]){
+ "audio_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_i2c_apps_clk[] = {
+ F(19200000, P_XO, 1, 2, 5),
+ F(24000000, P_XO, 1, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x200c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2008,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_i2c_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3010,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_2_spi_apps_clk[] = {
+ F(960000, P_XO, 12, 1, 4),
+ F(4800000, P_XO, 1, 1, 10),
+ F(9600000, P_XO, 1, 1, 5),
+ F(15000000, P_XO, 1, 1, 3),
+ F(19200000, P_XO, 1, 2, 5),
+ F(24000000, P_XO, 1, 1, 2),
+ F(48000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_spi_map,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_spi_apps_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2004,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_2_spi_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x300c,
+ .clkr = {
+ .enable_reg = 0x300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_2_apps_clk[] = {
+ F(1843200, P_FEPLL200, 1, 144, 15625),
+ F(3686400, P_FEPLL200, 1, 288, 15625),
+ F(7372800, P_FEPLL200, 1, 576, 15625),
+ F(14745600, P_FEPLL200, 1, 1152, 15625),
+ F(16000000, P_FEPLL200, 1, 2, 25),
+ F(24000000, P_XO, 1, 1, 2),
+ F(32000000, P_FEPLL200, 1, 4, 25),
+ F(40000000, P_FEPLL200, 1, 1, 5),
+ F(46400000, P_FEPLL200, 1, 29, 125),
+ F(48000000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_2_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x203c,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src",
+ },
+ .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x3034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_2_apps_clk,
+ .parent_map = gcc_xo_200_spi_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_200_spi,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x302c,
+ .clkr = {
+ .enable_reg = 0x302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+ F(1250000, P_FEPLL200, 1, 16, 0),
+ F(2500000, P_FEPLL200, 1, 8, 0),
+ F(5000000, P_FEPLL200, 1, 4, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8000,
+ .clkr = {
+ .enable_reg = 0x8000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gp1_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9000,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gp2_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_gp_clk,
+ .parent_map = gcc_xo_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0xa000,
+ .clkr = {
+ .enable_reg = 0xa000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gp3_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
+ F(144000, P_XO, 1, 3, 240),
+ F(400000, P_XO, 1, 1, 0),
+ F(20000000, P_FEPLL500, 1, 1, 25),
+ F(25000000, P_FEPLL500, 1, 1, 20),
+ F(50000000, P_FEPLL500, 1, 1, 10),
+ F(100000000, P_FEPLL500, 1, 1, 5),
+ F(193000000, P_DDRPLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x18004,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk,
+ .parent_map = gcc_xo_sdcc1_500_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_sdcc1_500,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apps_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(200000000, P_FEPLL200, 1, 0, 0),
+ F(500000000, P_FEPLL500, 1, 0, 0),
+ F(626000000, P_DDRPLLAPSS, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_apps_clk,
+ .parent_map = gcc_xo_ddr_500_200_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apps_clk_src",
+ .parent_names = gcc_xo_ddr_500_200,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_apps_ahb_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(100000000, P_FEPLL200, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apps_ahb_clk_src = {
+ .cmd_rcgr = 0x19014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_500_map,
+ .freq_tbl = ftbl_gcc_apps_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apps_ahb_clk_src",
+ .parent_names = gcc_xo_200_500,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .parent_names = (const char *[]){
+ "apps_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcd_xo_clk = {
+ .halt_reg = 0x2103c,
+ .clkr = {
+ .enable_reg = 0x2103c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcd_xo_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .clkr = {
+ .enable_reg = 0x1300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ess_clk = {
+ .halt_reg = 0x12010,
+ .clkr = {
+ .enable_reg = 0x12010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ess_clk",
+ .parent_names = (const char *[]){
+ "fephy_125m_dly_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_imem_axi_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_imem_axi_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_imem_cfg_ahb_clk = {
+ .halt_reg = 0xe008,
+ .clkr = {
+ .enable_reg = 0xe008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_imem_cfg_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_ahb_clk = {
+ .halt_reg = 0x1d00c,
+ .clkr = {
+ .enable_reg = 0x1d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_axi_m_clk = {
+ .halt_reg = 0x1d004,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_axi_m_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_axi_s_clk = {
+ .halt_reg = 0x1d008,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_axi_s_clk",
+ .parent_names = (const char *[]){
+ "fepll200",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x1c008,
+ .clkr = {
+ .enable_reg = 0x1c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x18010,
+ .clkr = {
+ .enable_reg = 0x18010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1800c,
+ .clkr = {
+ .enable_reg = 0x1800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_tlmm_ahb_clk = {
+ .halt_reg = 0x5004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tlmm_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_master_clk = {
+ .halt_reg = 0x1e00c,
+ .clkr = {
+ .enable_reg = 0x1e00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_master_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_sleep_clk = {
+ .halt_reg = 0x1e010,
+ .clkr = {
+ .enable_reg = 0x1e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2_mock_utmi_clk = {
+ .halt_reg = 0x1e014,
+ .clkr = {
+ .enable_reg = 0x1e014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb2_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+ F(2000000, P_FEPLL200, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1e000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_200_map,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_200,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_usb3_master_clk = {
+ .halt_reg = 0x1e028,
+ .clkr = {
+ .enable_reg = 0x1e028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_master_clk",
+ .parent_names = (const char *[]){
+ "fepll125",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sleep_clk = {
+ .halt_reg = 0x1e02C,
+ .clkr = {
+ .enable_reg = 0x1e02C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_sleep_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mock_utmi_clk = {
+ .halt_reg = 0x1e030,
+ .clkr = {
+ .enable_reg = 0x1e030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "usb30_mock_utmi_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_fephy_dly_clk[] = {
+ F(125000000, P_FEPLL125DLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fephy_125m_dly_clk_src = {
+ .cmd_rcgr = 0x12000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_125_dly_map,
+ .freq_tbl = ftbl_gcc_fephy_dly_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fephy_125m_dly_clk_src",
+ .parent_names = gcc_xo_125_dly,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+
+static const struct freq_tbl ftbl_gcc_wcss2g_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS2G, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss2g_clk_src = {
+ .cmd_rcgr = 0x1f000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gcc_wcss2g_clk,
+ .parent_map = gcc_xo_wcss2g_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "wcss2g_clk_src",
+ .parent_names = gcc_xo_wcss2g,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch gcc_wcss2g_clk = {
+ .halt_reg = 0x1f00C,
+ .clkr = {
+ .enable_reg = 0x1f00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_clk",
+ .parent_names = (const char *[]){
+ "wcss2g_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss2g_ref_clk = {
+ .halt_reg = 0x1f00C,
+ .clkr = {
+ .enable_reg = 0x1f00C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_ref_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss2g_rtc_clk = {
+ .halt_reg = 0x1f010,
+ .clkr = {
+ .enable_reg = 0x1f010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss2g_rtc_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_wcss5g_clk[] = {
+ F(48000000, P_XO, 1, 0, 0),
+ F(250000000, P_FEPLLWCSS5G, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 wcss5g_clk_src = {
+ .cmd_rcgr = 0x20000,
+ .hid_width = 5,
+ .parent_map = gcc_xo_wcss5g_map,
+ .freq_tbl = ftbl_gcc_wcss5g_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "wcss5g_clk_src",
+ .parent_names = gcc_xo_wcss5g,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_wcss5g_clk = {
+ .halt_reg = 0x2000c,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_clk",
+ .parent_names = (const char *[]){
+ "wcss5g_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss5g_ref_clk = {
+ .halt_reg = 0x2000c,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_ref_clk",
+ .parent_names = (const char *[]){
+ "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_wcss5g_rtc_clk = {
+ .halt_reg = 0x20010,
+ .clkr = {
+ .enable_reg = 0x20010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_wcss5g_rtc_clk",
+ .parent_names = (const char *[]){
+ "gcc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_ipq4019_clocks[] = {
+ [AUDIO_CLK_SRC] = &audio_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [GCC_USB3_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [GCC_APPS_CLK_SRC] = &apps_clk_src.clkr,
+ [GCC_APPS_AHB_CLK_SRC] = &apps_ahb_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [FEPHY_125M_DLY_CLK_SRC] = &fephy_125m_dly_clk_src.clkr,
+ [WCSS2G_CLK_SRC] = &wcss2g_clk_src.clkr,
+ [WCSS5G_CLK_SRC] = &wcss5g_clk_src.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_AUDIO_AHB_CLK] = &gcc_audio_ahb_clk.clkr,
+ [GCC_AUDIO_PWM_CLK] = &gcc_audio_pwm_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_DCD_XO_CLK] = &gcc_dcd_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_ESS_CLK] = &gcc_ess_clk.clkr,
+ [GCC_IMEM_AXI_CLK] = &gcc_imem_axi_clk.clkr,
+ [GCC_IMEM_CFG_AHB_CLK] = &gcc_imem_cfg_ahb_clk.clkr,
+ [GCC_PCIE_AHB_CLK] = &gcc_pcie_ahb_clk.clkr,
+ [GCC_PCIE_AXI_M_CLK] = &gcc_pcie_axi_m_clk.clkr,
+ [GCC_PCIE_AXI_S_CLK] = &gcc_pcie_axi_s_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_TLMM_AHB_CLK] = &gcc_tlmm_ahb_clk.clkr,
+ [GCC_USB2_MASTER_CLK] = &gcc_usb2_master_clk.clkr,
+ [GCC_USB2_SLEEP_CLK] = &gcc_usb2_sleep_clk.clkr,
+ [GCC_USB2_MOCK_UTMI_CLK] = &gcc_usb2_mock_utmi_clk.clkr,
+ [GCC_USB3_MASTER_CLK] = &gcc_usb3_master_clk.clkr,
+ [GCC_USB3_SLEEP_CLK] = &gcc_usb3_sleep_clk.clkr,
+ [GCC_USB3_MOCK_UTMI_CLK] = &gcc_usb3_mock_utmi_clk.clkr,
+ [GCC_WCSS2G_CLK] = &gcc_wcss2g_clk.clkr,
+ [GCC_WCSS2G_REF_CLK] = &gcc_wcss2g_ref_clk.clkr,
+ [GCC_WCSS2G_RTC_CLK] = &gcc_wcss2g_rtc_clk.clkr,
+ [GCC_WCSS5G_CLK] = &gcc_wcss5g_clk.clkr,
+ [GCC_WCSS5G_REF_CLK] = &gcc_wcss5g_ref_clk.clkr,
+ [GCC_WCSS5G_RTC_CLK] = &gcc_wcss5g_rtc_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq4019_resets[] = {
+ [WIFI0_CPU_INIT_RESET] = { 0x1f008, 5 },
+ [WIFI0_RADIO_SRIF_RESET] = { 0x1f008, 4 },
+ [WIFI0_RADIO_WARM_RESET] = { 0x1f008, 3 },
+ [WIFI0_RADIO_COLD_RESET] = { 0x1f008, 2 },
+ [WIFI0_CORE_WARM_RESET] = { 0x1f008, 1 },
+ [WIFI0_CORE_COLD_RESET] = { 0x1f008, 0 },
+ [WIFI1_CPU_INIT_RESET] = { 0x20008, 5 },
+ [WIFI1_RADIO_SRIF_RESET] = { 0x20008, 4 },
+ [WIFI1_RADIO_WARM_RESET] = { 0x20008, 3 },
+ [WIFI1_RADIO_COLD_RESET] = { 0x20008, 2 },
+ [WIFI1_CORE_WARM_RESET] = { 0x20008, 1 },
+ [WIFI1_CORE_COLD_RESET] = { 0x20008, 0 },
+ [USB3_UNIPHY_PHY_ARES] = { 0x1e038, 5 },
+ [USB3_HSPHY_POR_ARES] = { 0x1e038, 4 },
+ [USB3_HSPHY_S_ARES] = { 0x1e038, 2 },
+ [USB2_HSPHY_POR_ARES] = { 0x1e01c, 4 },
+ [USB2_HSPHY_S_ARES] = { 0x1e01c, 2 },
+ [PCIE_PHY_AHB_ARES] = { 0x1d010, 11 },
+ [PCIE_AHB_ARES] = { 0x1d010, 10 },
+ [PCIE_PWR_ARES] = { 0x1d010, 9 },
+ [PCIE_PIPE_STICKY_ARES] = { 0x1d010, 8 },
+ [PCIE_AXI_M_STICKY_ARES] = { 0x1d010, 7 },
+ [PCIE_PHY_ARES] = { 0x1d010, 6 },
+ [PCIE_PARF_XPU_ARES] = { 0x1d010, 5 },
+ [PCIE_AXI_S_XPU_ARES] = { 0x1d010, 4 },
+ [PCIE_AXI_M_VMIDMT_ARES] = { 0x1d010, 3 },
+ [PCIE_PIPE_ARES] = { 0x1d010, 2 },
+ [PCIE_AXI_S_ARES] = { 0x1d010, 1 },
+ [PCIE_AXI_M_ARES] = { 0x1d010, 0 },
+ [ESS_RESET] = { 0x12008, 0},
+ [GCC_BLSP1_BCR] = {0x01000, 0},
+ [GCC_BLSP1_QUP1_BCR] = {0x02000, 0},
+ [GCC_BLSP1_UART1_BCR] = {0x02038, 0},
+ [GCC_BLSP1_QUP2_BCR] = {0x03008, 0},
+ [GCC_BLSP1_UART2_BCR] = {0x03028, 0},
+ [GCC_BIMC_BCR] = {0x04000, 0},
+ [GCC_TLMM_BCR] = {0x05000, 0},
+ [GCC_IMEM_BCR] = {0x0E000, 0},
+ [GCC_ESS_BCR] = {0x12008, 0},
+ [GCC_PRNG_BCR] = {0x13000, 0},
+ [GCC_BOOT_ROM_BCR] = {0x13008, 0},
+ [GCC_CRYPTO_BCR] = {0x16000, 0},
+ [GCC_SDCC1_BCR] = {0x18000, 0},
+ [GCC_SEC_CTRL_BCR] = {0x1A000, 0},
+ [GCC_AUDIO_BCR] = {0x1B008, 0},
+ [GCC_QPIC_BCR] = {0x1C000, 0},
+ [GCC_PCIE_BCR] = {0x1D000, 0},
+ [GCC_USB2_BCR] = {0x1E008, 0},
+ [GCC_USB2_PHY_BCR] = {0x1E018, 0},
+ [GCC_USB3_BCR] = {0x1E024, 0},
+ [GCC_USB3_PHY_BCR] = {0x1E034, 0},
+ [GCC_SYSTEM_NOC_BCR] = {0x21000, 0},
+ [GCC_PCNOC_BCR] = {0x2102C, 0},
+ [GCC_DCD_BCR] = {0x21038, 0},
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = {0x21064, 0},
+ [GCC_SNOC_BUS_TIMEOUT1_BCR] = {0x2106C, 0},
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = {0x21074, 0},
+ [GCC_SNOC_BUS_TIMEOUT3_BCR] = {0x2107C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = {0x21084, 0},
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = {0x2108C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = {0x21094, 0},
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = {0x2109C, 0},
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = {0x210A4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = {0x210AC, 0},
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = {0x210B4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = {0x210BC, 0},
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = {0x210C4, 0},
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = {0x210CC, 0},
+ [GCC_TCSR_BCR] = {0x22000, 0},
+ [GCC_MPM_BCR] = {0x24000, 0},
+ [GCC_SPDM_BCR] = {0x25000, 0},
+};
+
+static const struct regmap_config gcc_ipq4019_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x2dfff,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq4019_desc = {
+ .config = &gcc_ipq4019_regmap_config,
+ .clks = gcc_ipq4019_clocks,
+ .num_clks = ARRAY_SIZE(gcc_ipq4019_clocks),
+ .resets = gcc_ipq4019_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq4019_resets),
+};
+
+static const struct of_device_id gcc_ipq4019_match_table[] = {
+ { .compatible = "qcom,gcc-ipq4019" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table);
+
+static int gcc_ipq4019_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000);
+ clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000);
+
+ return qcom_cc_probe(pdev, &gcc_ipq4019_desc);
+}
+
+static struct platform_driver gcc_ipq4019_driver = {
+ .probe = gcc_ipq4019_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq4019",
+ .owner = THIS_MODULE,
+ .of_match_table = gcc_ipq4019_match_table,
+ },
+};
+
+static int __init gcc_ipq4019_init(void)
+{
+ return platform_driver_register(&gcc_ipq4019_driver);
+}
+core_initcall(gcc_ipq4019_init);
+
+static void __exit gcc_ipq4019_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq4019_driver);
+}
+module_exit(gcc_ipq4019_exit);
+
+MODULE_ALIAS("platform:gcc-ipq4019");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM GCC IPQ4019 driver");
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index dd5402bac620..52a7d3959875 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -890,7 +890,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -906,7 +905,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -922,7 +920,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -938,7 +935,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -954,7 +950,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -970,7 +965,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1144,7 +1138,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1308,7 +1301,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1324,7 +1316,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1394,7 +1385,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1410,7 +1400,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1425,7 +1414,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1442,7 +1430,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1457,7 +1444,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1472,7 +1458,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1487,7 +1472,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1504,7 +1488,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1563,7 +1546,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1577,7 +1559,6 @@ static struct clk_branch pcie_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1591,7 +1572,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1605,7 +1585,6 @@ static struct clk_branch pcie_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1659,7 +1638,6 @@ static struct clk_branch pcie1_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1673,7 +1651,6 @@ static struct clk_branch pcie1_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1687,7 +1664,6 @@ static struct clk_branch pcie1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1701,7 +1677,6 @@ static struct clk_branch pcie1_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1755,7 +1730,6 @@ static struct clk_branch pcie2_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1769,7 +1743,6 @@ static struct clk_branch pcie2_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1783,7 +1756,6 @@ static struct clk_branch pcie2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1797,7 +1769,6 @@ static struct clk_branch pcie2_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1887,7 +1858,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1901,7 +1871,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1915,7 +1884,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1897,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2106,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2218,7 +2184,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2234,7 +2199,6 @@ static struct clk_branch ebi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2248,7 +2212,6 @@ static struct clk_branch ebi2_aon_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_always_on_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index ad413036f7c7..6dc55864979c 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1479,7 +1479,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2027,7 +2026,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2041,7 +2039,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2055,7 +2052,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2069,7 +2065,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2083,7 +2078,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2097,7 +2091,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2111,7 +2104,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2125,7 +2117,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2130,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2153,7 +2143,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2167,7 +2156,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2181,7 +2169,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2195,7 +2182,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2209,7 +2195,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2223,7 +2208,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2237,7 +2221,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2251,7 +2234,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2265,7 +2247,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2279,7 +2260,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2273,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2286,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2322,7 +2300,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2337,7 +2314,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2352,7 +2328,6 @@ static struct clk_branch adm1_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2367,7 +2342,6 @@ static struct clk_branch adm1_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2382,7 +2356,6 @@ static struct clk_branch modem_ahb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2397,7 +2370,6 @@ static struct clk_branch modem_ahb2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2412,7 +2384,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2427,7 +2398,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2442,7 +2412,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2459,7 +2428,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 8cc9b2868b41..9c29080a84d8 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -2590,6 +2590,23 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
},
};
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_ahb_clk = {
.halt_reg = 0x59028,
.clkr = {
@@ -3227,6 +3244,7 @@ static struct clk_regmap *gcc_msm8916_clocks[] = {
[GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
[GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
[GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
};
static struct gdsc *gcc_msm8916_gdscs[] = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 983dd7dc89a7..eb551c75fba6 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1546,7 +1546,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2143,7 +2142,6 @@ static struct clk_branch usb_hsic_hsio_cal_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_hsio_cal_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2291,6 @@ static struct clk_branch ce1_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_core_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2304,6 @@ static struct clk_branch ce1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2323,7 +2319,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2339,7 +2334,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2355,7 +2349,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2371,7 +2364,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2387,7 +2379,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2403,7 +2394,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2419,7 +2409,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2435,7 +2424,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2451,7 +2439,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2467,7 +2454,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2483,7 +2469,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2499,7 +2484,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2515,7 +2499,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2531,7 +2514,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2545,7 +2527,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2559,7 +2540,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2575,7 +2555,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2589,7 +2568,6 @@ static struct clk_branch usb_hs3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2603,7 +2581,6 @@ static struct clk_branch usb_hs4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2617,7 +2594,6 @@ static struct clk_branch usb_hsic_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2633,7 +2609,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2624,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2665,7 +2639,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2681,7 +2654,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2697,7 +2669,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2712,7 +2683,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2729,7 +2699,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2753,7 +2722,7 @@ static struct clk_rcg ce3_src = {
},
.freq_tbl = clk_tbl_ce3,
.clkr = {
- .enable_reg = 0x2c08,
+ .enable_reg = 0x36c0,
.enable_mask = BIT(7),
.hw.init = &(struct clk_init_data){
.name = "ce3_src",
@@ -2769,7 +2738,7 @@ static struct clk_branch ce3_core_clk = {
.halt_reg = 0x2fdc,
.halt_bit = 5,
.clkr = {
- .enable_reg = 0x36c4,
+ .enable_reg = 0x36cc,
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "ce3_core_clk",
@@ -2883,7 +2852,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2897,7 +2865,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2911,7 +2878,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2925,7 +2891,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2939,7 +2904,6 @@ static struct clk_branch pcie_phy_ref_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_ref_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2953,7 +2917,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2967,7 +2930,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2982,7 +2944,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2997,7 +2958,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3012,7 +2972,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3029,7 +2988,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 335952db309b..00915209e7c5 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -1965,7 +1965,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .flags = CLK_IS_ROOT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 16d7c323db49..c9b96f318d9c 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -30,6 +30,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
@@ -1320,7 +1321,7 @@ static struct clk_branch gcc_mmss_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT | CLK_IS_ROOT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2314,7 +2315,7 @@ static struct clk_branch gcc_bimc_gfx_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_bimc_gfx_clk",
- .flags = CLK_SET_RATE_PARENT | CLK_IS_ROOT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2814,7 +2815,6 @@ static struct clk_branch gcc_ufs_sys_clk_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_sys_clk_core_clk",
.ops = &clk_branch2_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2827,7 +2827,6 @@ static struct clk_branch gcc_ufs_tx_symbol_clk_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_clk_core_clk",
.ops = &clk_branch2_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3059,6 +3058,83 @@ static struct clk_hw *gcc_msm8996_hws[] = {
&ufs_ice_core_postdiv_clk_src.hw,
};
+static struct gdsc aggre0_noc_gdsc = {
+ .gdscr = 0x81004,
+ .gds_hw_ctrl = 0x81028,
+ .pd = {
+ .name = "aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre0_noc_gdsc = {
+ .gdscr = 0x7d024,
+ .pd = {
+ .name = "hlos1_vote_aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_core_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_lpass_core",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x6d004,
+ .pd = {
+ .name = "pcie1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie2_gdsc = {
+ .gdscr = 0x6e004,
+ .pd = {
+ .name = "pcie2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *gcc_msm8996_clocks[] = {
[GPLL0_EARLY] = &gpll0_early.clkr,
[GPLL0] = &gpll0.clkr,
@@ -3245,6 +3321,18 @@ static struct clk_regmap *gcc_msm8996_clocks[] = {
[GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
};
+static struct gdsc *gcc_msm8996_gdscs[] = {
+ [AGGRE0_NOC_GDSC] = &aggre0_noc_gdsc,
+ [HLOS1_VOTE_AGGRE0_NOC_GDSC] = &hlos1_vote_aggre0_noc_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_CORE_GDSC] = &hlos1_vote_lpass_core_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [PCIE2_GDSC] = &pcie2_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+};
+
static const struct qcom_reset_map gcc_msm8996_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x4000 },
[GCC_CONFIG_NOC_BCR] = { 0x5000 },
@@ -3363,6 +3451,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8996_clocks),
.resets = gcc_msm8996_resets,
.num_resets = ARRAY_SIZE(gcc_msm8996_resets),
+ .gdscs = gcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
};
static const struct of_device_id gcc_msm8996_match_table[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index da9fad8b642b..f12d7b2bddd7 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -42,12 +43,12 @@
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
-static int gdsc_is_enabled(struct gdsc *sc)
+static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
{
u32 val;
int ret;
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ ret = regmap_read(sc->regmap, reg, &val);
if (ret)
return ret;
@@ -58,28 +59,46 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en)
{
int ret;
u32 val = en ? 0 : SW_COLLAPSE_MASK;
- u32 check = en ? PWR_ON_MASK : 0;
- unsigned long timeout;
+ ktime_t start;
+ unsigned int status_reg = sc->gdscr;
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
- timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
- do {
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ /* If disabling votable gdscs, don't poll on status */
+ if ((sc->flags & VOTABLE) && !en) {
+ /*
+ * Add a short delay here to ensure that an enable
+ * right after it was disabled does not put it in an
+ * unknown state
+ */
+ udelay(TIMEOUT_US);
+ return 0;
+ }
- if ((val & PWR_ON_MASK) == check)
- return 0;
- } while (time_before(jiffies, timeout));
+ if (sc->gds_hw_ctrl) {
+ status_reg = sc->gds_hw_ctrl;
+ /*
+ * The gds hw controller asserts/de-asserts the status bit soon
+ * after it receives a power on/off request from a master.
+ * The controller then takes around 8 xo cycles to start its
+ * internal state machine and update the status bit. During
+ * this time, the status bit does not reflect the true status
+ * of the core.
+ * Add a delay of 1 us between writing to the SW_COLLAPSE bit
+ * and polling the status bit.
+ */
+ udelay(1);
+ }
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ start = ktime_get();
+ do {
+ if (gdsc_is_enabled(sc, status_reg) == en)
+ return 0;
+ } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
- if ((val & PWR_ON_MASK) == check)
+ if (gdsc_is_enabled(sc, status_reg) == en)
return 0;
return -ETIMEDOUT;
@@ -165,6 +184,7 @@ static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
int on, ret;
+ unsigned int reg;
/*
* Disable HW trigger: collapse/restore occur based on registers writes.
@@ -185,10 +205,18 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
- on = gdsc_is_enabled(sc);
+ reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr;
+ on = gdsc_is_enabled(sc, reg);
if (on < 0)
return on;
+ /*
+ * Votable GDSCs can be ON due to Vote from other masters.
+ * If a Votable GDSC is ON, make sure we have a Vote.
+ */
+ if ((sc->flags & VOTABLE) && on)
+ gdsc_enable(&sc->pd);
+
if (on || (sc->pwrsts & PWRSTS_RET))
gdsc_force_mem_on(sc);
else
@@ -201,11 +229,14 @@ static int gdsc_init(struct gdsc *sc)
return 0;
}
-int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
int i, ret;
struct genpd_onecell_data *data;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -228,10 +259,30 @@ int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
data->domains[i] = &scs[i]->pd;
}
+ /* Add subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
+
return of_genpd_add_provider_onecell(dev->of_node, data);
}
-void gdsc_unregister(struct device *dev)
+void gdsc_unregister(struct gdsc_desc *desc)
{
+ int i;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
+
+ /* Remove subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5ded26884f08..3bf497c36bdf 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -20,18 +20,12 @@
struct regmap;
struct reset_controller_dev;
-/* Powerdomain allowable state bitfields */
-#define PWRSTS_OFF BIT(0)
-#define PWRSTS_RET BIT(1)
-#define PWRSTS_ON BIT(2)
-#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
-#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
-
/**
* struct gdsc - Globally Distributed Switch Controller
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
* @pwrsts: Possible powerdomain power states
@@ -41,28 +35,44 @@ struct reset_controller_dev;
*/
struct gdsc {
struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
};
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
#ifdef CONFIG_QCOM_GDSC
-int gdsc_register(struct device *, struct gdsc **, size_t n,
- struct reset_controller_dev *, struct regmap *);
-void gdsc_unregister(struct device *);
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
#else
-static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+static inline int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev,
struct regmap *r)
{
return -ENOSYS;
}
-static inline void gdsc_unregister(struct device *d) {};
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
#endif /* CONFIG_QCOM_GDSC */
#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 00e36192a1de..7f21421c87d6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1789,7 +1789,6 @@ static struct clk_branch gmem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gmem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1805,7 +1804,6 @@ static struct clk_branch ijpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1821,7 +1819,6 @@ static struct clk_branch mmss_imem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1835,7 +1832,6 @@ static struct clk_branch jpegd_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1851,7 +1847,6 @@ static struct clk_branch vcodec_axi_b_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_b_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1867,7 +1862,6 @@ static struct clk_branch vcodec_axi_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1883,7 +1877,6 @@ static struct clk_branch vcodec_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1897,7 +1890,6 @@ static struct clk_branch vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1913,7 +1905,6 @@ static struct clk_branch mdp_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1920,6 @@ static struct clk_branch rot_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1945,7 +1935,6 @@ static struct clk_branch vcap_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1961,7 +1950,6 @@ static struct clk_branch vpe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1977,7 +1965,6 @@ static struct clk_branch gfx3d_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1991,7 +1978,6 @@ static struct clk_branch amp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "amp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2005,7 +1991,6 @@ static struct clk_branch csi_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "csi_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2019,7 +2004,6 @@ static struct clk_branch dsi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2035,7 +2019,6 @@ static struct clk_branch dsi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2049,7 +2032,6 @@ static struct clk_branch dsi2_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2065,7 +2047,6 @@ static struct clk_branch dsi2_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2425,7 +2406,6 @@ static struct clk_branch gfx2d0_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2441,7 +2421,6 @@ static struct clk_branch gfx2d1_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2457,7 +2436,6 @@ static struct clk_branch gfx3d_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2473,7 +2451,6 @@ static struct clk_branch hdmi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2489,7 +2466,6 @@ static struct clk_branch hdmi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2503,7 +2479,6 @@ static struct clk_branch ijpeg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2519,7 +2494,6 @@ static struct clk_branch mmss_imem_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2533,7 +2507,6 @@ static struct clk_branch jpegd_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2547,7 +2520,6 @@ static struct clk_branch mdp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2561,7 +2533,6 @@ static struct clk_branch rot_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2577,7 +2548,6 @@ static struct clk_branch smmu_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "smmu_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2591,7 +2561,6 @@ static struct clk_branch tv_enc_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "tv_enc_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2605,7 +2574,6 @@ static struct clk_branch vcap_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2621,7 +2589,6 @@ static struct clk_branch vcodec_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2635,7 +2602,6 @@ static struct clk_branch vfe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2615,6 @@ static struct clk_branch vpe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 9d790bcadf25..715e7cd94125 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2400,6 +2400,7 @@ static struct gdsc oxilicx_gdsc = {
.pd = {
.name = "oxilicx",
},
+ .parent = &oxili_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2615,7 +2616,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
static int mmcc_msm8974_probe(struct platform_device *pdev)
{
struct regmap *regmap;
- int ret;
regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
if (IS_ERR(regmap))
@@ -2624,22 +2624,11 @@ static int mmcc_msm8974_probe(struct platform_device *pdev)
clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
- ret = qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
- if (ret)
- return ret;
-
- return pm_genpd_add_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
-}
-
-static int mmcc_msm8974_remove(struct platform_device *pdev)
-{
- pm_genpd_remove_subdomain(&oxili_gdsc.pd, &oxilicx_gdsc.pd);
- return 0;
+ return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
}
static struct platform_driver mmcc_msm8974_driver = {
.probe = mmcc_msm8974_probe,
- .remove = mmcc_msm8974_remove,
.driver = {
.name = "mmcc-msm8974",
.of_match_table = mmcc_msm8974_match_table,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 064f3eaa39d0..6df7ff36b416 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -32,6 +32,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
@@ -2917,6 +2918,144 @@ static struct clk_hw *mmcc_msm8996_hws[] = {
&gpll0_div.hw,
};
+static struct gdsc mmagic_video_gdsc = {
+ .gdscr = 0x119c,
+ .gds_hw_ctrl = 0x120c,
+ .pd = {
+ .name = "mmagic_video",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_mdss_gdsc = {
+ .gdscr = 0x247c,
+ .gds_hw_ctrl = 0x2480,
+ .pd = {
+ .name = "mmagic_mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_camss_gdsc = {
+ .gdscr = 0x3c4c,
+ .gds_hw_ctrl = 0x3c50,
+ .pd = {
+ .name = "mmagic_camss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "venus",
+ },
+ .parent = &mmagic_video_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_gdsc = {
+ .gdscr = 0x34a0,
+ .cxcs = (unsigned int []){ 0x36bc, 0x36c4 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "camss",
+ },
+ .parent = &mmagic_camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .cxcs = (unsigned int []){ 0x36a8 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .cxcs = (unsigned int []){ 0x36ac },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .cxcs = (unsigned int []){ 0x35a8, 0x35b0, 0x35c0, 0x35b8 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "jpeg",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .cxcs = (unsigned int []){ 0x36b0 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "cpp",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc fd_gdsc = {
+ .gdscr = 0x3b64,
+ .cxcs = (unsigned int []){ 0x3b68, 0x3b6c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "fd",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x2310, 0x231c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss",
+ },
+ .parent = &mmagic_mdss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMPLL0_EARLY] = &mmpll0_early.clkr,
[MMPLL0_PLL] = &mmpll0.clkr,
@@ -3093,6 +3232,22 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[FD_AHB_CLK] = &fd_ahb_clk.clkr,
};
+static struct gdsc *mmcc_msm8996_gdscs[] = {
+ [MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc,
+ [MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc,
+ [MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [CAMSS_GDSC] = &camss_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [FD_GDSC] = &fd_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
static const struct qcom_reset_map mmcc_msm8996_resets[] = {
[MMAGICAHB_BCR] = { 0x5020 },
[MMAGIC_CFG_BCR] = { 0x5050 },
@@ -3170,6 +3325,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = {
.num_clks = ARRAY_SIZE(mmcc_msm8996_clocks),
.resets = mmcc_msm8996_resets,
.num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
+ .gdscs = mmcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
};
static const struct of_device_id mmcc_msm8996_match_table[] = {
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index 6c977d3a8590..0324d8daab9b 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -55,7 +55,7 @@ qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
return regmap_update_bits(rst->regmap, map->reg, mask, 0);
}
-struct reset_control_ops qcom_reset_ops = {
+const struct reset_control_ops qcom_reset_ops = {
.reset = qcom_reset,
.assert = qcom_reset_assert,
.deassert = qcom_reset_deassert,
diff --git a/drivers/clk/qcom/reset.h b/drivers/clk/qcom/reset.h
index 0e11e2130f97..cda877927d43 100644
--- a/drivers/clk/qcom/reset.h
+++ b/drivers/clk/qcom/reset.h
@@ -32,6 +32,6 @@ struct qcom_reset_controller {
#define to_qcom_reset_controller(r) \
container_of(r, struct qcom_reset_controller, rcdev);
-extern struct reset_control_ops qcom_reset_ops;
+extern const struct reset_control_ops qcom_reset_ops;
#endif
diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/renesas/Makefile
index 7e2579b30326..7e2579b30326 100644
--- a/drivers/clk/shmobile/Makefile
+++ b/drivers/clk/renesas/Makefile
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 999994769450..0627860233cb 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -82,9 +82,8 @@ static unsigned long cpg_div6_clock_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct div6_clock *clock = to_div6_clock(hw);
- unsigned int div = (clk_readl(clock->reg) & CPG_DIV6_DIV_MASK) + 1;
- return parent_rate / div;
+ return parent_rate / clock->div;
}
static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
diff --git a/drivers/clk/shmobile/clk-div6.h b/drivers/clk/renesas/clk-div6.h
index 9a85a95188da..567b31d2bfa5 100644
--- a/drivers/clk/shmobile/clk-div6.h
+++ b/drivers/clk/renesas/clk-div6.h
@@ -1,5 +1,5 @@
-#ifndef __SHMOBILE_CLK_DIV6_H__
-#define __SHMOBILE_CLK_DIV6_H__
+#ifndef __RENESAS_CLK_DIV6_H__
+#define __RENESAS_CLK_DIV6_H__
struct clk *cpg_div6_register(const char *name, unsigned int num_parents,
const char **parent_names, void __iomem *reg);
diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/renesas/clk-emev2.c
index a91825471c79..a91825471c79 100644
--- a/drivers/clk/shmobile/clk-emev2.c
+++ b/drivers/clk/renesas/clk-emev2.c
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 3b09716ebda2..3d44e183aedd 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -14,7 +14,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 9326204bed9d..28d204bb659e 100644
--- a/drivers/clk/shmobile/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 1e6b1da58065..2f7ce6696b6c 100644
--- a/drivers/clk/shmobile/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/renesas/clk-r8a7778.c
index b1741551fff2..40e3a501a50e 100644
--- a/drivers/clk/shmobile/clk-r8a7778.c
+++ b/drivers/clk/renesas/clk-r8a7778.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/renesas/clk-r8a7779.c
index 92275c5f2c60..cf2a37df03b1 100644
--- a/drivers/clk/shmobile/clk-r8a7779.c
+++ b/drivers/clk/renesas/clk-r8a7779.c
@@ -11,7 +11,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 841977240305..00e6aba4b9c0 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -11,7 +11,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/math64.h>
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 9766e3cb595f..f6312c62f16b 100644
--- a/drivers/clk/shmobile/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -10,7 +10,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 8966f8bbfd72..eea38f6ea77e 100644
--- a/drivers/clk/shmobile/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -9,7 +9,7 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clk/shmobile.h>
+#include <linux/clk/renesas.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
diff --git a/drivers/clk/shmobile/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 13e994772dfd..b2198aef5ed4 100644
--- a/drivers/clk/shmobile/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -20,6 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/slab.h>
#include <dt-bindings/clock/r8a7795-cpg-mssr.h>
@@ -61,6 +62,7 @@ enum r8a7795_clk_types {
CLK_TYPE_GEN3_PLL2,
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
+ CLK_TYPE_GEN3_SD,
};
static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
@@ -99,11 +101,18 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("s3d1", R8A7795_CLK_S3D1, CLK_S3, 1, 1),
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
+
+ DEF_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
+ DEF_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
+ DEF_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
+ DEF_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
+
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
+ DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
};
static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
@@ -120,8 +129,17 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S3D1),
DEF_MOD("scif2", 310, R8A7795_CLK_S3D4),
+ DEF_MOD("sdif3", 311, R8A7795_CLK_SD3),
+ DEF_MOD("sdif2", 312, R8A7795_CLK_SD2),
+ DEF_MOD("sdif1", 313, R8A7795_CLK_SD1),
+ DEF_MOD("sdif0", 314, R8A7795_CLK_SD0),
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
+ DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1),
+ DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
+ DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4),
DEF_MOD("audmac1", 501, R8A7795_CLK_S3D4),
@@ -130,6 +148,21 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
+ DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd1", 602, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvd0", 603, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvb1", 606, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvb0", 607, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi1", 610, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpvi0", 611, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf1", 614, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpf0", 615, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1),
+ DEF_MOD("fcpcs", 619, R8A7795_CLK_S2D1),
DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1),
DEF_MOD("vspd2", 621, R8A7795_CLK_S2D1),
DEF_MOD("vspd1", 622, R8A7795_CLK_S2D1),
@@ -147,6 +180,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("du2", 722, R8A7795_CLK_S2D1),
DEF_MOD("du1", 723, R8A7795_CLK_S2D1),
DEF_MOD("du0", 724, R8A7795_CLK_S2D1),
+ DEF_MOD("lvds", 727, R8A7795_CLK_S2D1),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
DEF_MOD("etheravb", 812, R8A7795_CLK_S3D2),
@@ -159,6 +193,9 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("gpio2", 910, R8A7795_CLK_CP),
DEF_MOD("gpio1", 911, R8A7795_CLK_CP),
DEF_MOD("gpio0", 912, R8A7795_CLK_CP),
+ DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2),
+ DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4),
+ DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4),
DEF_MOD("i2c6", 918, R8A7795_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A7795_CLK_S3D2),
DEF_MOD("i2c4", 927, R8A7795_CLK_S3D2),
@@ -198,6 +235,221 @@ static const unsigned int r8a7795_crit_mod_clks[] __initconst = {
MOD_CLK_ID(408), /* INTC-AP (GIC) */
};
+/* -----------------------------------------------------------------------------
+ * SDn Clock
+ *
+ */
+#define CPG_SD_STP_HCK BIT(9)
+#define CPG_SD_STP_CK BIT(8)
+
+#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
+#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
+
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
+{ \
+ .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
+ ((stp_ck) ? CPG_SD_STP_CK : 0) | \
+ ((sd_srcfc) << 2) | \
+ ((sd_fc) << 0), \
+ .div = (sd_div), \
+}
+
+struct sd_div_table {
+ u32 val;
+ unsigned int div;
+};
+
+struct sd_clock {
+ struct clk_hw hw;
+ void __iomem *reg;
+ const struct sd_div_table *div_table;
+ unsigned int div_num;
+ unsigned int div_min;
+ unsigned int div_max;
+};
+
+/* SDn divider
+ * sd_srcfc sd_fc div
+ * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
+ *-------------------------------------------------------------------
+ * 0 0 0 (1) 1 (4) 4
+ * 0 0 1 (2) 1 (4) 8
+ * 1 0 2 (4) 1 (4) 16
+ * 1 0 3 (8) 1 (4) 32
+ * 1 0 4 (16) 1 (4) 64
+ * 0 0 0 (1) 0 (2) 2
+ * 0 0 1 (2) 0 (2) 4
+ * 1 0 2 (4) 0 (2) 8
+ * 1 0 3 (8) 0 (2) 16
+ * 1 0 4 (16) 0 (2) 32
+ */
+static const struct sd_div_table cpg_sd_div_table[] = {
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
+};
+
+#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
+
+static int cpg_sd_clock_enable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val &= ~(CPG_SD_STP_MASK);
+ val |= clock->div_table[i].val & CPG_SD_STP_MASK;
+
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static void cpg_sd_clock_disable(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ clk_writel(clk_readl(clock->reg) | CPG_SD_STP_MASK, clock->reg);
+}
+
+static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+
+ return !(clk_readl(clock->reg) & CPG_SD_STP_MASK);
+}
+
+static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned long rate = parent_rate;
+ u32 val, sd_fc;
+ unsigned int i;
+
+ val = clk_readl(clock->reg);
+
+ sd_fc = val & CPG_SD_FC_MASK;
+ for (i = 0; i < clock->div_num; i++)
+ if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ return DIV_ROUND_CLOSEST(rate, clock->div_table[i].div);
+}
+
+static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ unsigned int div;
+
+ if (!rate)
+ rate = 1;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
+}
+
+static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate);
+
+ return DIV_ROUND_CLOSEST(*parent_rate, div);
+}
+
+static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sd_clock *clock = to_sd_clock(hw);
+ unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate);
+ u32 val;
+ unsigned int i;
+
+ for (i = 0; i < clock->div_num; i++)
+ if (div == clock->div_table[i].div)
+ break;
+
+ if (i >= clock->div_num)
+ return -EINVAL;
+
+ val = clk_readl(clock->reg);
+ val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK);
+ clk_writel(val, clock->reg);
+
+ return 0;
+}
+
+static const struct clk_ops cpg_sd_clock_ops = {
+ .enable = cpg_sd_clock_enable,
+ .disable = cpg_sd_clock_disable,
+ .is_enabled = cpg_sd_clock_is_enabled,
+ .recalc_rate = cpg_sd_clock_recalc_rate,
+ .round_rate = cpg_sd_clock_round_rate,
+ .set_rate = cpg_sd_clock_set_rate,
+};
+
+static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ const char *parent_name)
+{
+ struct clk_init_data init;
+ struct sd_clock *clock;
+ struct clk *clk;
+ unsigned int i;
+
+ clock = kzalloc(sizeof(*clock), GFP_KERNEL);
+ if (!clock)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = core->name;
+ init.ops = &cpg_sd_clock_ops;
+ init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clock->reg = base + core->offset;
+ clock->hw.init = &init;
+ clock->div_table = cpg_sd_div_table;
+ clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
+
+ clock->div_max = clock->div_table[0].div;
+ clock->div_min = clock->div_max;
+ for (i = 1; i < clock->div_num; i++) {
+ clock->div_max = max(clock->div_max, clock->div_table[i].div);
+ clock->div_min = min(clock->div_min, clock->div_table[i].div);
+ }
+
+ clk = clk_register(NULL, &clock->hw);
+ if (IS_ERR(clk))
+ kfree(clock);
+
+ return clk;
+}
#define CPG_PLL0CR 0x00d8
#define CPG_PLL2CR 0x002c
@@ -323,6 +575,9 @@ struct clk * __init r8a7795_cpg_clk_register(struct device *dev,
mult = (((value >> 24) & 0x7f) + 1) * 2;
break;
+ case CLK_TYPE_GEN3_SD:
+ return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/shmobile/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 9a4d888164bb..58e24b326a48 100644
--- a/drivers/clk/shmobile/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -348,6 +348,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
#else
dev_dbg(dev, "Ignoring MSTP %s to prevent disabling\n",
mod->name);
+ kfree(clock);
return;
#endif
}
@@ -568,7 +569,11 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
if (error)
return error;
- devm_add_action(dev, cpg_mssr_del_clk_provider, np);
+ error = devm_add_action_or_reset(dev,
+ cpg_mssr_del_clk_provider,
+ np);
+ if (error)
+ return error;
error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
info->num_core_pm_clks);
diff --git a/drivers/clk/shmobile/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index e09f03cbf086..952b6957233b 100644
--- a/drivers/clk/shmobile/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -53,6 +53,8 @@ enum clk_types {
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
#define DEF_DIV6P1(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset)
+#define DEF_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
/*
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index d07374f48caf..4e73ed5cab58 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -116,7 +116,7 @@ static void rockchip_cpuclk_set_dividers(struct rockchip_cpuclk *cpuclk,
pr_debug("%s: setting reg 0x%x to 0x%x\n",
__func__, clksel->reg, clksel->val);
- writel(clksel->val , cpuclk->reg_base + clksel->reg);
+ writel(clksel->val, cpuclk->reg_base + clksel->reg);
}
}
@@ -290,14 +290,14 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
pr_err("%s: could not lookup parent clock %s\n",
__func__, parent_names[0]);
ret = -EINVAL;
- goto free_cpuclk;
+ goto free_alt_parent;
}
ret = clk_notifier_register(clk, &cpuclk->clk_nb);
if (ret) {
pr_err("%s: failed to register clock notifier for %s\n",
__func__, name);
- goto free_cpuclk;
+ goto free_alt_parent;
}
if (nrates > 0) {
@@ -326,6 +326,8 @@ free_rate_table:
kfree(cpuclk->rate_table);
unregister_notifier:
clk_notifier_unregister(clk, &cpuclk->clk_nb);
+free_alt_parent:
+ clk_disable_unprepare(cpuclk->alt_parent);
free_cpuclk:
kfree(cpuclk);
return ERR_PTR(ret);
diff --git a/drivers/clk/rockchip/clk-inverter.c b/drivers/clk/rockchip/clk-inverter.c
index 7cbf43beb3c6..dcb6e37f3da1 100644
--- a/drivers/clk/rockchip/clk-inverter.c
+++ b/drivers/clk/rockchip/clk-inverter.c
@@ -90,7 +90,7 @@ struct clk *rockchip_clk_register_inverter(const char *name,
inv_clock = kmalloc(sizeof(*inv_clock), GFP_KERNEL);
if (!inv_clock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init.name = name;
init.num_parents = num_parents;
@@ -106,11 +106,7 @@ struct clk *rockchip_clk_register_inverter(const char *name,
clk = clk_register(NULL, &inv_clock->hw);
if (IS_ERR(clk))
- goto err_free;
+ kfree(inv_clock);
return clk;
-
-err_free:
- kfree(inv_clock);
- return NULL;
}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index 2685644826a0..e0dc7e83403a 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -150,7 +150,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init.name = name;
init.num_parents = num_parents;
@@ -172,11 +172,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
- goto err_free;
+ kfree(mmc_clock);
return clk;
-
-err_free:
- kfree(mmc_clock);
- return NULL;
}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index b7e66c9dd9f2..5de797e34d54 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -94,6 +94,11 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
unsigned int val;
int delay = 24000000, ret;
+ if (IS_ERR(grf)) {
+ pr_err("%s: grf regmap not available\n", __func__);
+ return PTR_ERR(grf);
+ }
+
while (delay > 0) {
ret = regmap_read(grf, pll->lock_offset, &val);
if (ret) {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index bc7fbac83ab7..7cdb2d61f3e0 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -177,6 +177,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(0, "gpll_armclk", "gpll", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(0), 6, GFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
/*
* Clock-Architecture Diagram 2
*/
@@ -187,6 +189,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 8, GFLAGS),
COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ FACTOR(0, "ddrphy", "ddrphy2x", 0, 1, 2),
COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
@@ -263,6 +266,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vcodec", mux_pll_src_3plls_p, 0,
RK2928_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
+ FACTOR_GATE(HCLK_VCODEC, "hclk_vcodec", "aclk_vcodec", 0, 1, 4,
+ RK2928_CLKGATE_CON(3), 12, GFLAGS),
COMPOSITE(0, "aclk_hvec", mux_pll_src_3plls_p, 0,
RK2928_CLKSEL_CON(20), 0, 2, MFLAGS, 2, 5, DFLAGS,
@@ -343,7 +348,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(16), 0, 2, MFLAGS, 2, 5, DFLAGS,
RK2928_CLKGATE_CON(10), 5, GFLAGS),
- COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
+ COMPOSITE_NOGATE(SCLK_MACPLL, "mac_pll_src", mux_pll_src_3plls_p, CLK_SET_RATE_NO_REPARENT,
RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
@@ -351,6 +356,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 6, GFLAGS),
+ FACTOR(0, "sclk_macref_out", "hclk_peri_src", 0, 1, 2),
MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
RK2928_CLKSEL_CON(31), 0, 1, MFLAGS),
@@ -376,11 +382,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(ACLK_VIO, "aclk_vio", "aclk_disp1_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS),
GATE(ACLK_LCDC, "aclk_lcdc", "aclk_disp1_pre", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
- GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS),
GATE(HCLK_LCDC, "hclk_lcdc", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
- /* hclk_video gates */
- GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(3), 12, GFLAGS),
/* xin24m gates */
GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK2928_CLKGATE_CON(10), 0, GFLAGS),
@@ -404,7 +408,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
GATE(HCLK_OTG1, "hclk_otg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_I2S, "hclk_i2s", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_sfc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS),
- GATE(0, "hclk_mac", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 15, GFLAGS),
+ GATE(HCLK_MAC, "hclk_mac", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
/* pclk_peri gates */
GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
@@ -444,34 +448,11 @@ static void __init rk3036_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
if (IS_ERR(clk))
pr_warn("%s: could not register clock usb480m: %ld\n",
__func__, PTR_ERR(clk));
- clk = clk_register_fixed_factor(NULL, "ddrphy", "ddrphy2x", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock ddrphy: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
- "aclk_vcodec", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "sclk_macref_out",
- "hclk_peri_src", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock sclk_macref_out: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 7f7444cbf6fc..40bab3901491 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -339,13 +339,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
INVERTER(0, "pclk_cif0", "pclkin_cif0",
RK2928_CLKSEL_CON(30), 8, IFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
/*
* the 480m are generated inside the usb block from these clocks,
* but they are also a source for the hsicphy clock.
*/
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 5, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -605,7 +607,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
@@ -662,11 +664,11 @@ static struct clk_div_table div_rk3188_aclk_core_t[] = {
{ /* sentinel */ },
};
-PNAME(mux_hsicphy_p) = { "sclk_otgphy0", "sclk_otgphy1",
+PNAME(mux_hsicphy_p) = { "sclk_otgphy0_480m", "sclk_otgphy1_480m",
"gpll", "cpll" };
static struct rockchip_clk_branch rk3188_i2s0_fracmux __initdata =
- MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+ MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
@@ -722,7 +724,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(7), 0,
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3188_i2s0_fracmux),
@@ -748,12 +750,12 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
+ "hclk_cpubus"
};
static void __init rk3188_common_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -763,17 +765,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_branches(common_clk_branches,
ARRAY_SIZE(common_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 981a50205339..7702d2855e9c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -187,7 +187,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(7), 1, GFLAGS),
GATE(0, "ddrc", "ddrphy_pre", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(8), 5, GFLAGS),
- GATE(0, "ddrphy", "ddrphy_pre", CLK_IGNORE_UNUSED,
+ FACTOR_GATE(0, "ddrphy", "ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
RK2928_CLKGATE_CON(7), 0, GFLAGS),
/* PD_CORE */
@@ -240,13 +240,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
- GATE(0, "hclk_vpu_src", "aclk_vpu_pre", 0,
+ FACTOR_GATE(0, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 4, GFLAGS),
COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- GATE(0, "hclk_rkvdec_src", "aclk_rkvdec_pre", 0,
+ FACTOR_GATE(0, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 5, GFLAGS),
COMPOSITE(0, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
@@ -285,7 +285,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKSEL_CON(23), 14, 2, MFLAGS, 8, 6, DFLAGS,
RK2928_CLKGATE_CON(3), 5, GFLAGS),
- GATE(0, "sclk_hdmi_hdcp", "xin24m", 0,
+ GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK2928_CLKGATE_CON(3), 7, GFLAGS),
COMPOSITE(0, "sclk_hdmi_cec", mux_sclk_hdmi_cec_p, 0,
@@ -364,13 +364,15 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(3), 1, GFLAGS),
MUX(0, "sclk_vop_src", mux_sclk_vop_src_p, 0,
RK2928_CLKSEL_CON(27), 0, 1, MFLAGS),
- DIV(0, "dclk_hdmiphy", "sclk_vop_src", 0,
+ DIV(DCLK_HDMI_PHY, "dclk_hdmiphy", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(29), 0, 3, DFLAGS),
DIV(0, "sclk_vop_pre", "sclk_vop_src", 0,
RK2928_CLKSEL_CON(27), 8, 8, DFLAGS),
- MUX(0, "dclk_vop", mux_dclk_vop_p, 0,
+ MUX(DCLK_VOP, "dclk_vop", mux_dclk_vop_p, 0,
RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 3, GFLAGS),
@@ -422,7 +424,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "sclk_otgphy1", "xin24m", 0,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
- COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+ COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
RK2928_CLKSEL_CON(24), 6, 10, DFLAGS,
RK2928_CLKGATE_CON(2), 8, GFLAGS),
@@ -503,7 +505,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "aclk_iep", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 2, GFLAGS),
GATE(0, "aclk_iep_noc", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 9, GFLAGS),
- GATE(0, "aclk_vop", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 5, GFLAGS),
GATE(0, "aclk_vop_noc", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 12, GFLAGS),
GATE(0, "aclk_hdcp", "aclk_hdcp_pre", 0, RK2928_CLKGATE_CON(14), 10, GFLAGS),
@@ -511,13 +513,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "hclk_rga", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 1, GFLAGS),
GATE(0, "hclk_iep", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 3, GFLAGS),
- GATE(0, "hclk_vop", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 6, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 6, GFLAGS),
GATE(0, "hclk_vio_ahb_arbi", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 7, GFLAGS),
GATE(0, "hclk_vio_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 8, GFLAGS),
GATE(0, "hclk_vop_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 13, GFLAGS),
GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 7, GFLAGS),
GATE(0, "hclk_hdcp_mmu", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 12, GFLAGS),
- GATE(0, "pclk_hdmi_ctrl", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 6, GFLAGS),
GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 8, GFLAGS),
GATE(0, "pclk_hdcp", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 11, GFLAGS),
@@ -582,7 +584,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(PCLK_UART0, "pclk_uart0", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 12, GFLAGS),
GATE(PCLK_UART1, "pclk_uart1", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 13, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 14, GFLAGS),
- GATE(0, "pclk_tsadc", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 15, GFLAGS),
GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 0, GFLAGS),
GATE(0, "pclk_cru", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(0, "pclk_sgrf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(10), 2, GFLAGS),
@@ -590,7 +592,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_ddrphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 3, GFLAGS),
GATE(0, "pclk_acodecphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 5, GFLAGS),
- GATE(0, "pclk_hdmiphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(PCLK_HDMI_PHY, "pclk_hdmiphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 7, GFLAGS),
GATE(0, "pclk_vdacphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(0, "pclk_phy_noc", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
@@ -605,13 +607,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
/* PD_MMC */
MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
- MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3228_SDIO_CON0, 1),
- MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 0),
MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3228_EMMC_CON0, 1),
- MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
};
static const char *const rk3228_critical_clocks[] __initconst = {
@@ -624,7 +626,6 @@ static const char *const rk3228_critical_clocks[] __initconst = {
static void __init rk3228_clk_init(struct device_node *np)
{
void __iomem *reg_base;
- struct clk *clk;
reg_base = of_iomap(np, 0);
if (!reg_base) {
@@ -634,29 +635,6 @@ static void __init rk3228_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "ddrphy_pre", "ddrphy4x", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock ddrphy_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vpu_pre",
- "hclk_vpu_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vpu_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_rkvdec_pre",
- "hclk_rkvdec_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_rkvdec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
rockchip_clk_register_plls(rk3228_pll_clks,
ARRAY_SIZE(rk3228_pll_clks),
RK3228_GRF_SOC_STATUS0);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 984fc187d12e..3cb72163a512 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -195,8 +195,8 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_usbphy480m_p) = { "sclk_otgphy1", "sclk_otgphy2",
- "sclk_otgphy0" };
+PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
+ "sclk_otgphy0_480m" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
@@ -333,6 +333,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(0), 7, GFLAGS),
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
RK3288_CLKGATE_CON(4), 1, GFLAGS),
@@ -399,12 +401,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
*/
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vdpu", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
- /*
- * We introduce a virtul node of hclk_vodec_pre_v to split one clock
- * struct with a gate and a fix divider into two node in software.
- */
- GATE(0, "hclk_vcodec_pre_v", "aclk_vdpu", 0,
+
+ FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vdpu", 0, 1, 4,
RK3288_CLKGATE_CON(3), 10, GFLAGS),
+
GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 1, GFLAGS),
@@ -537,11 +537,11 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3288_CLKGATE_CON(4), 10, GFLAGS),
- GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 4, GFLAGS),
- GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 5, GFLAGS),
- GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
+ GATE(SCLK_OTGPHY2, "sclk_otgphy2", "xin24m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 6, GFLAGS),
GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 7, GFLAGS),
@@ -888,24 +888,6 @@ static void __init rk3288_clk_init(struct device_node *np)
rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
- /* xin12m is created by an cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
-
- clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock usb480m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
- "hclk_vcodec_pre_v", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
/* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
if (IS_ERR(clk))
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 21f3ea909fab..a2bb12200465 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -121,7 +121,7 @@ PNAME(mux_i2s_2ch_p) = { "i2s_2ch_src", "i2s_2ch_frac",
"dummy", "xin12m" };
PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac",
"ext_i2s", "xin12m" };
-PNAME(mux_edp_24m_p) = { "dummy", "xin24m" };
+PNAME(mux_edp_24m_p) = { "xin24m", "dummy" };
PNAME(mux_vip_out_p) = { "vip_src", "xin24m" };
PNAME(mux_usbphy480m_p) = { "usbotg_out", "xin24m" };
PNAME(mux_hsic_usbphy480m_p) = { "usbotg_out", "dummy" };
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
- .mux_core_shift = 15,
+ .mux_core_shift = 7,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,36 +218,66 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
}
static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
- RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
- RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
- RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
- RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+ RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+ RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+ RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
};
static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
- RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
- RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
- RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
- RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
- RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
- RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
- RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+ RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+ RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+ RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+ RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+ RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+ RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+ RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
};
+static struct rockchip_clk_branch rk3368_i2s_8ch_fracmux __initdata =
+ MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(27), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_spdif_8ch_fracmux __initdata =
+ MUX(0, "spdif_8ch_pre", mux_spdif_8ch_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(31), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_i2s_2ch_fracmux __initdata =
+ MUX(0, "i2s_2ch_pre", mux_i2s_2ch_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(53), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(33), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(35), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart3_fracmux __initdata =
+ MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(39), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3368_uart4_fracmux __initdata =
+ MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(41), 8, 2, MFLAGS);
+
static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
/*
* Clock-Architecture Diagram 2
*/
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
@@ -299,7 +329,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
- GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+ FACTOR_GATE(0, "sclk_ddr", "ddrphy_src", CLK_IGNORE_UNUSED, 1, 4,
RK3368_CLKGATE_CON(6), 14, GFLAGS),
GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(6), 15, GFLAGS),
@@ -337,11 +367,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "i2s_8ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(27), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(6), 1, GFLAGS),
- COMPOSITE_FRAC(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(28), 0,
- RK3368_CLKGATE_CON(6), 2, GFLAGS),
- MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(27), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(28), 0,
+ RK3368_CLKGATE_CON(6), 2, GFLAGS,
+ &rk3368_i2s_8ch_fracmux),
COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "i2s_8ch_clkout", mux_i2s_8ch_clkout_p, 0,
RK3368_CLKSEL_CON(27), 15, 1, MFLAGS,
RK3368_CLKGATE_CON(6), 0, GFLAGS),
@@ -350,21 +379,21 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "spdif_8ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(31), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(6), 4, GFLAGS),
- COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(32), 0,
- RK3368_CLKGATE_CON(6), 5, GFLAGS),
- COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
- RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
- RK3368_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(32), 0,
+ RK3368_CLKGATE_CON(6), 5, GFLAGS,
+ &rk3368_spdif_8ch_fracmux),
+ GATE(SCLK_SPDIF_8CH, "sclk_spdif_8ch", "spdif_8ch_pre", CLK_SET_RATE_PARENT,
+ RK3368_CLKGATE_CON(6), 6, GFLAGS),
COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(53), 12, 1, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(5), 13, GFLAGS),
- COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(54), 0,
- RK3368_CLKGATE_CON(5), 14, GFLAGS),
- COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
- RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
- RK3368_CLKGATE_CON(5), 15, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(54), 0,
+ RK3368_CLKGATE_CON(5), 14, GFLAGS,
+ &rk3368_i2s_2ch_fracmux),
+ GATE(SCLK_I2S_2CH, "sclk_i2s_2ch", "i2s_2ch_pre", CLK_SET_RATE_PARENT,
+ RK3368_CLKGATE_CON(5), 15, GFLAGS),
COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
RK3368_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS,
@@ -384,18 +413,18 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
* Clock-Architecture Diagram 3
*/
- COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 6, GFLAGS),
- COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+ COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3368_CLKGATE_CON(4), 7, GFLAGS),
/*
- * We introduce a virtual node of hclk_vodec_pre_v to split one clock
- * struct with a gate and a fix divider into two node in software.
+ * We use aclk_vdpu by default ---GRF_SOC_CON0[7] setting in system,
+ * so we ignore the mux and make clocks nodes as following,
*/
- GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+ FACTOR_GATE(0, "hclk_video_pre", "aclk_vdpu", 0, 1, 4,
RK3368_CLKGATE_CON(4), 8, GFLAGS),
COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
@@ -442,7 +471,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK3368_CLKGATE_CON(4), 13, GFLAGS),
GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
- RK3368_CLKGATE_CON(5), 12, GFLAGS),
+ RK3368_CLKGATE_CON(4), 12, GFLAGS),
COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
@@ -560,38 +589,34 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gpll_usb_usb_p, 0,
RK3368_CLKSEL_CON(33), 12, 2, MFLAGS, 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 0, GFLAGS),
- COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(34), 0,
- RK3368_CLKGATE_CON(2), 1, GFLAGS),
- MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(33), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(34), 0,
+ RK3368_CLKGATE_CON(2), 1, GFLAGS,
+ &rk3368_uart0_fracmux),
COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
RK3368_CLKSEL_CON(35), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 2, GFLAGS),
- COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(36), 0,
- RK3368_CLKGATE_CON(2), 3, GFLAGS),
- MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(35), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(36), 0,
+ RK3368_CLKGATE_CON(2), 3, GFLAGS,
+ &rk3368_uart1_fracmux),
COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
RK3368_CLKSEL_CON(39), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 6, GFLAGS),
- COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(40), 0,
- RK3368_CLKGATE_CON(2), 7, GFLAGS),
- MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(39), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(40), 0,
+ RK3368_CLKGATE_CON(2), 7, GFLAGS,
+ &rk3368_uart3_fracmux),
COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
RK3368_CLKSEL_CON(41), 0, 7, DFLAGS,
RK3368_CLKGATE_CON(2), 8, GFLAGS),
- COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(42), 0,
- RK3368_CLKGATE_CON(2), 9, GFLAGS),
- MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
- RK3368_CLKSEL_CON(41), 8, 2, MFLAGS),
+ COMPOSITE_FRACMUX(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
+ RK3368_CLKSEL_CON(42), 0,
+ RK3368_CLKGATE_CON(2), 9, GFLAGS,
+ &rk3368_uart4_fracmux),
COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3368_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS,
@@ -842,24 +867,6 @@ static void __init rk3368_clk_init(struct device_node *np)
rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
- /* xin12m is created by a cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- /* ddrphy_div4 is created by a cru-internal divider */
- clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock xin12m: %ld\n",
- __func__, PTR_ERR(clk));
-
- clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
- "hclk_video_pre_v", 0, 1, 4);
- if (IS_ERR(clk))
- pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
- __func__, PTR_ERR(clk));
-
/* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
if (IS_ERR(clk))
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index d9a0b5d4d47f..ec06350c78c4 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (gate_offset >= 0) {
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
- return ERR_PTR(-ENOMEM);
+ goto err_gate;
gate->flags = gate_flags;
gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
if (div_width > 0) {
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
- return ERR_PTR(-ENOMEM);
+ goto err_div;
div->flags = div_flags;
div->reg = base + muxdiv_offset;
@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name,
div->width = div_width;
div->lock = lock;
div->table = div_table;
- div_ops = &clk_divider_ops;
+ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+ ? &clk_divider_ro_ops
+ : &clk_divider_ops;
}
clk = clk_register_composite(NULL, name, parent_names, num_parents,
@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
flags);
return clk;
+err_div:
+ kfree(gate);
+err_gate:
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
}
struct rockchip_clk_frac {
@@ -260,6 +267,53 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
return clk;
}
+static struct clk *rockchip_clk_register_factor_branch(const char *name,
+ const char *const *parent_names, u8 num_parents,
+ void __iomem *base, unsigned int mult, unsigned int div,
+ int gate_offset, u8 gate_shift, u8 gate_flags,
+ unsigned long flags, spinlock_t *lock)
+{
+ struct clk *clk;
+ struct clk_gate *gate = NULL;
+ struct clk_fixed_factor *fix = NULL;
+
+ /* without gate, register a simple factor clock */
+ if (gate_offset == 0) {
+ return clk_register_fixed_factor(NULL, name,
+ parent_names[0], flags, mult,
+ div);
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ gate->flags = gate_flags;
+ gate->reg = base + gate_offset;
+ gate->bit_idx = gate_shift;
+ gate->lock = lock;
+
+ fix = kzalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ kfree(gate);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fix->mult = mult;
+ fix->div = div;
+
+ clk = clk_register_composite(NULL, name, parent_names, num_parents,
+ NULL, NULL,
+ &fix->hw, &clk_fixed_factor_ops,
+ &gate->hw, &clk_gate_ops, flags);
+ if (IS_ERR(clk)) {
+ kfree(fix);
+ kfree(gate);
+ }
+
+ return clk;
+}
+
static DEFINE_SPINLOCK(clk_lock);
static struct clk **clk_table;
static void __iomem *reg_base;
@@ -395,6 +449,14 @@ void __init rockchip_clk_register_branches(
reg_base + list->muxdiv_offset,
list->div_shift, list->div_flags, &clk_lock);
break;
+ case branch_factor:
+ clk = rockchip_clk_register_factor_branch(
+ list->name, list->parent_names,
+ list->num_parents, reg_base,
+ list->div_shift, list->div_width,
+ list->gate_offset, list->gate_shift,
+ list->gate_flags, flags, &clk_lock);
+ break;
}
/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ff8bd23a93ec..39c198bbcbee 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -254,6 +254,7 @@ enum rockchip_clk_branch_type {
branch_gate,
branch_mmc,
branch_inverter,
+ branch_factor,
};
struct rockchip_clk_branch {
@@ -508,6 +509,33 @@ struct rockchip_clk_branch {
.div_flags = if, \
}
+#define FACTOR(_id, cname, pname, f, fm, fd) \
+ { \
+ .id = _id, \
+ .branch_type = branch_factor, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .div_shift = fm, \
+ .div_width = fd, \
+ }
+
+#define FACTOR_GATE(_id, cname, pname, f, fm, fd, go, gb, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_factor, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .div_shift = fm, \
+ .div_width = fd, \
+ .gate_offset = go, \
+ .gate_shift = gb, \
+ .gate_flags = gf, \
+ }
+
void rockchip_clk_init(struct device_node *np, void __iomem *base,
unsigned long nr_clks);
struct regmap *rockchip_clk_get_grf(void);
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
index 552f7bb15bc5..21218987bbc3 100644
--- a/drivers/clk/rockchip/softrst.c
+++ b/drivers/clk/rockchip/softrst.c
@@ -81,7 +81,7 @@ static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops rockchip_softrst_ops = {
+static const struct reset_control_ops rockchip_softrst_ops = {
.assert = rockchip_softrst_assert,
.deassert = rockchip_softrst_deassert,
};
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 84196ecdaa12..20c5fe92ab4a 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -1,9 +1,17 @@
+# Recent Exynos platforms should just select COMMON_CLK_SAMSUNG:
config COMMON_CLK_SAMSUNG
- bool
- select COMMON_CLK
+ bool "Samsung Exynos clock controller support" if COMPILE_TEST
+ # Clocks on ARM64 SoCs (e.g. Exynos5433, Exynos7) are chosen by
+ # EXYNOS_ARM64_COMMON_CLK to avoid building them on ARMv7:
+ select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS
+
+config EXYNOS_ARM64_COMMON_CLK
+ bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
- bool
+ bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
help
Build the s3c2410 clock driver based on the common clock framework.
@@ -17,10 +25,9 @@ config S3C2410_COMMON_DCLK
framework.
config S3C2412_COMMON_CLK
- bool
+ bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
config S3C2443_COMMON_CLK
- bool
+ bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
-
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 5f6833ea355d..fc367d4b2902 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -10,11 +10,11 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
-obj-$(CONFIG_ARCH_EXYNOS7) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ac03e4fe2871..7b3d0f975987 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -500,19 +500,19 @@ PNAME(clkout_cpu_p4x12) = { "fout_apll_div_2", "none", "none", "none",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_XXTI, "xxti", NULL, CLK_IS_ROOT, 0),
- FRATE(CLK_XUSBXTI, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_XXTI, "xxti", NULL, 0, 0),
+ FRATE(CLK_XUSBXTI, "xusbxti", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
- FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
+ FRATE(0, "sclk_hdmi24m", NULL, 0, 24000000),
FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
- FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy0", NULL, 0, 48000000),
};
static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata = {
- FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "sclk_usbphy1", NULL, 0, 48000000),
};
static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = {
@@ -1251,7 +1251,7 @@ static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
fclk.id = CLK_FIN_PLL;
fclk.name = "fin_pll";
fclk.parent_name = NULL;
- fclk.flags = CLK_IS_ROOT;
+ fclk.flags = 0;
fclk.fixed_rate = finpll_f;
samsung_clk_register_fixed_rate(ctx, &fclk, 1);
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 92c39f6efec8..86ee06b226bd 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -274,7 +274,7 @@ static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initda
};
static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000),
};
static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 5bebf8cb0d70..837197db4ffb 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -262,15 +262,15 @@ PNAME(mout_spdif_p) = { "sclk_audio0", "sclk_audio1", "sclk_audio2",
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5250_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(0, "sclk_dptxphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_uhostphy", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_hdmi27m", NULL, 0, 27000000),
+ FRATE(0, "sclk_dptxphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_uhostphy", NULL, 0, 48000000),
};
static struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index d1a29f6c1084..7a7ed075a573 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -1432,42 +1432,38 @@ static unsigned long top_clk_regs[] __initdata = {
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = {
FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH1_TXD_CLK, "phyclk_dptx_phy_ch1_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(PHYCLK_DPTX_PHY_CH0_TXD_CLK, "phyclk_dptx_phy_ch0_txd_clk", NULL,
- CLK_IS_ROOT, 270000000),
+ 0, 270000000),
FRATE(phyclk_hdmi_phy_tmds_clko, "phyclk_hdmi_phy_tmds_clko", NULL,
- CLK_IS_ROOT, 250000000),
+ 0, 250000000),
FRATE(PHYCLK_HDMI_PHY_PIXEL_CLKO, "phyclk_hdmi_phy_pixel_clko", NULL,
- CLK_IS_ROOT, 1660000000),
+ 0, 1660000000),
FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi",
- NULL, CLK_IS_ROOT, 125000000),
+ NULL, 0, 125000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS,
"phyclk_mipi_dphy_4l_m_txbyte_clkhs" , NULL,
- CLK_IS_ROOT, 187500000),
+ 0, 187500000),
FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m",
- NULL, CLK_IS_ROOT, 24000000),
+ NULL, 0, 24000000),
FRATE(PHYCLK_DPTX_PHY_CLK_DIV2, "phyclk_dptx_phy_clk_div2", NULL,
- CLK_IS_ROOT, 135000000),
+ 0, 135000000),
FRATE(PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0,
- "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL,
- CLK_IS_ROOT, 20000000),
+ "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL, 0, 20000000),
FRATE(PHYCLK_USBHOST20_PHY_PHYCLOCK, "phyclk_usbhost20_phy_phyclock",
- NULL, CLK_IS_ROOT, 60000000),
+ NULL, 0, 60000000),
FRATE(PHYCLK_USBHOST20_PHY_FREECLK, "phyclk_usbhost20_phy_freeclk",
- NULL, CLK_IS_ROOT, 60000000),
+ NULL, 0, 60000000),
FRATE(PHYCLK_USBHOST20_PHY_CLK48MOHCI,
- "phyclk_usbhost20_phy_clk48mohci",
- NULL, CLK_IS_ROOT, 48000000),
+ "phyclk_usbhost20_phy_clk48mohci", NULL, 0, 48000000),
FRATE(PHYCLK_USBDRD30_UDRD30_PIPE_PCLK,
- "phyclk_usbdrd30_udrd30_pipe_pclk", NULL,
- CLK_IS_ROOT, 125000000),
+ "phyclk_usbdrd30_udrd30_pipe_pclk", NULL, 0, 125000000),
FRATE(PHYCLK_USBDRD30_UDRD30_PHYCLOCK,
- "phyclk_usbdrd30_udrd30_phyclock", NULL,
- CLK_IS_ROOT, 60000000),
+ "phyclk_usbdrd30_udrd30_phyclock", NULL, 0, 60000000),
};
PNAME(mout_memtop_pll_user_p) = {"fin_pll", "dout_mem_pll"};
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index d048dedd8b72..be03ed0fcb6b 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -480,16 +480,16 @@ PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock
exynos5x_fixed_rate_ext_clks[] __initdata = {
- FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0),
+ FRATE(CLK_FIN_PLL, "fin_pll", NULL, 0, 0),
};
/* fixed rate clocks generated inside the soc */
static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = {
- FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000),
- FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000),
- FRATE(0, "mphy_refclk_ixtal24", NULL, CLK_IS_ROOT, 48000000),
- FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000),
+ FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 24000000),
+ FRATE(0, "sclk_pwi", NULL, 0, 24000000),
+ FRATE(0, "sclk_usbh20", NULL, 0, 48000000),
+ FRATE(0, "mphy_refclk_ixtal24", NULL, 0, 48000000),
+ FRATE(0, "sclk_usbh20_scan_clk", NULL, 0, 480000000),
};
static struct samsung_fixed_factor_clock
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index cee062c588de..128527b8fbeb 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -142,17 +142,6 @@ static unsigned long top_clk_regs[] __initdata = {
MUX_ENABLE_TOP_FSYS1,
MUX_ENABLE_TOP_PERIC0,
MUX_ENABLE_TOP_PERIC1,
- MUX_STAT_TOP0,
- MUX_STAT_TOP1,
- MUX_STAT_TOP2,
- MUX_STAT_TOP3,
- MUX_STAT_TOP4,
- MUX_STAT_TOP_MSCL,
- MUX_STAT_TOP_CAM1,
- MUX_STAT_TOP_FSYS0,
- MUX_STAT_TOP_FSYS1,
- MUX_STAT_TOP_PERIC0,
- MUX_STAT_TOP_PERIC1,
DIV_TOP0,
DIV_TOP1,
DIV_TOP2,
@@ -170,22 +159,6 @@ static unsigned long top_clk_regs[] __initdata = {
DIV_TOP_PERIC3,
DIV_TOP_PERIC4,
DIV_TOP_PLL_FREQ_DET,
- DIV_STAT_TOP0,
- DIV_STAT_TOP1,
- DIV_STAT_TOP2,
- DIV_STAT_TOP3,
- DIV_STAT_TOP4,
- DIV_STAT_TOP_MSCL,
- DIV_STAT_TOP_CAM10,
- DIV_STAT_TOP_CAM11,
- DIV_STAT_TOP_FSYS0,
- DIV_STAT_TOP_FSYS1,
- DIV_STAT_TOP_FSYS2,
- DIV_STAT_TOP_PERIC0,
- DIV_STAT_TOP_PERIC1,
- DIV_STAT_TOP_PERIC2,
- DIV_STAT_TOP_PERIC3,
- DIV_STAT_TOP_PLL_FREQ_DET,
ENABLE_ACLK_TOP,
ENABLE_SCLK_TOP,
ENABLE_SCLK_TOP_MSCL,
@@ -251,18 +224,18 @@ static struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initdata = {
static struct samsung_fixed_rate_clock top_fixed_clks[] __initdata = {
/* Xi2s{0|1}CDCLK input clock for I2S/PCM */
- FRATE(0, "ioclk_audiocdclk1", NULL, CLK_IS_ROOT, 100000000),
- FRATE(0, "ioclk_audiocdclk0", NULL, CLK_IS_ROOT, 100000000),
+ FRATE(0, "ioclk_audiocdclk1", NULL, 0, 100000000),
+ FRATE(0, "ioclk_audiocdclk0", NULL, 0, 100000000),
/* Xi2s1SDI input clock for SPDIF */
- FRATE(0, "ioclk_spdif_extclk", NULL, CLK_IS_ROOT, 100000000),
+ FRATE(0, "ioclk_spdif_extclk", NULL, 0, 100000000),
/* XspiCLK[4:0] input clock for SPI */
- FRATE(0, "ioclk_spi4_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi3_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi2_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi1_clk_in", NULL, CLK_IS_ROOT, 50000000),
- FRATE(0, "ioclk_spi0_clk_in", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_spi4_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi3_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi2_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi1_clk_in", NULL, 0, 50000000),
+ FRATE(0, "ioclk_spi0_clk_in", NULL, 0, 50000000),
/* Xi2s1SCLK input clock for I2S1_BCLK */
- FRATE(0, "ioclk_i2s1_bclk_in", NULL, CLK_IS_ROOT, 12288000),
+ FRATE(0, "ioclk_i2s1_bclk_in", NULL, 0, 12288000),
};
static struct samsung_mux_clock top_mux_clks[] __initdata = {
@@ -490,9 +463,9 @@ static struct samsung_div_clock top_div_clks[] __initdata = {
DIV(CLK_DIV_SCLK_ISP_SENSOR1_A, "div_sclk_isp_sensor1_a",
"mout_sclk_isp_sensor1", DIV_TOP_CAM11, 8, 4),
DIV(CLK_DIV_SCLK_ISP_SENSOR0_B, "div_sclk_isp_sensor0_b",
- "div_sclk_isp_sensor0_a", DIV_TOP_CAM11, 12, 4),
+ "div_sclk_isp_sensor0_a", DIV_TOP_CAM11, 4, 4),
DIV(CLK_DIV_SCLK_ISP_SENSOR0_A, "div_sclk_isp_sensor0_a",
- "mout_sclk_isp_sensor0", DIV_TOP_CAM11, 8, 4),
+ "mout_sclk_isp_sensor0", DIV_TOP_CAM11, 0, 4),
/* DIV_TOP_FSYS0 */
DIV(CLK_DIV_SCLK_MMC1_B, "div_sclk_mmc1_b", "div_sclk_mmc1_a",
@@ -999,26 +972,12 @@ static unsigned long mif_clk_regs[] __initdata = {
MUX_ENABLE_MIF5,
MUX_ENABLE_MIF6,
MUX_ENABLE_MIF7,
- MUX_STAT_MIF0,
- MUX_STAT_MIF1,
- MUX_STAT_MIF2,
- MUX_STAT_MIF3,
- MUX_STAT_MIF4,
- MUX_STAT_MIF5,
- MUX_STAT_MIF6,
- MUX_STAT_MIF7,
DIV_MIF1,
DIV_MIF2,
DIV_MIF3,
DIV_MIF4,
DIV_MIF5,
DIV_MIF_PLL_FREQ_DET,
- DIV_STAT_MIF1,
- DIV_STAT_MIF2,
- DIV_STAT_MIF3,
- DIV_STAT_MIF4,
- DIV_STAT_MIF5,
- DIV_STAT_MIF_PLL_FREQ_DET,
ENABLE_ACLK_MIF0,
ENABLE_ACLK_MIF1,
ENABLE_ACLK_MIF2,
@@ -1565,7 +1524,6 @@ CLK_OF_DECLARE(exynos5433_cmu_mif, "samsung,exynos5433-cmu-mif",
static unsigned long peric_clk_regs[] __initdata = {
DIV_PERIC,
- DIV_STAT_PERIC,
ENABLE_ACLK_PERIC,
ENABLE_PCLK_PERIC0,
ENABLE_PCLK_PERIC1,
@@ -2012,11 +1970,6 @@ static unsigned long fsys_clk_regs[] __initdata = {
MUX_ENABLE_FSYS2,
MUX_ENABLE_FSYS3,
MUX_ENABLE_FSYS4,
- MUX_STAT_FSYS0,
- MUX_STAT_FSYS1,
- MUX_STAT_FSYS2,
- MUX_STAT_FSYS3,
- MUX_STAT_FSYS4,
MUX_IGNORE_FSYS2,
MUX_IGNORE_FSYS3,
ENABLE_ACLK_FSYS0,
@@ -2031,42 +1984,40 @@ static struct samsung_fixed_rate_clock fsys_fixed_clks[] __initdata = {
/* PHY clocks from USBDRD30_PHY */
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY,
"phyclk_usbdrd30_udrd30_phyclock_phy", NULL,
- CLK_IS_ROOT, 60000000),
+ 0, 60000000),
FRATE(CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY,
"phyclk_usbdrd30_udrd30_pipe_pclk_phy", NULL,
- CLK_IS_ROOT, 125000000),
+ 0, 125000000),
/* PHY clocks from USBHOST30_PHY */
FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY,
"phyclk_usbhost30_uhost30_phyclock_phy", NULL,
- CLK_IS_ROOT, 60000000),
+ 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY,
"phyclk_usbhost30_uhost30_pipe_pclk_phy", NULL,
- CLK_IS_ROOT, 125000000),
+ 0, 125000000),
/* PHY clocks from USBHOST20_PHY */
FRATE(CLK_PHYCLK_USBHOST20_PHY_FREECLK_PHY,
- "phyclk_usbhost20_phy_freeclk_phy", NULL, CLK_IS_ROOT,
- 60000000),
+ "phyclk_usbhost20_phy_freeclk_phy", NULL, 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_PHYCLOCK_PHY,
- "phyclk_usbhost20_phy_phyclock_phy", NULL, CLK_IS_ROOT,
- 60000000),
+ "phyclk_usbhost20_phy_phyclock_phy", NULL, 0, 60000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_CLK48MOHCI_PHY,
"phyclk_usbhost20_phy_clk48mohci_phy", NULL,
- CLK_IS_ROOT, 48000000),
+ 0, 48000000),
FRATE(CLK_PHYCLK_USBHOST20_PHY_HSIC1_PHY,
- "phyclk_usbhost20_phy_hsic1_phy", NULL, CLK_IS_ROOT,
+ "phyclk_usbhost20_phy_hsic1_phy", NULL, 0,
60000000),
/* PHY clocks from UFS_PHY */
FRATE(CLK_PHYCLK_UFS_TX0_SYMBOL_PHY, "phyclk_ufs_tx0_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_RX0_SYMBOL_PHY, "phyclk_ufs_rx0_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_TX1_SYMBOL_PHY, "phyclk_ufs_tx1_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
FRATE(CLK_PHYCLK_UFS_RX1_SYMBOL_PHY, "phyclk_ufs_rx1_symbol_phy",
- NULL, CLK_IS_ROOT, 300000000),
+ NULL, 0, 300000000),
/* PHY clocks from LLI_PHY */
FRATE(CLK_PHYCLK_LLI_MPHY_TO_UFS_PHY, "phyclk_lli_mphy_to_ufs_phy",
- NULL, CLK_IS_ROOT, 26000000),
+ NULL, 0, 26000000),
};
static struct samsung_mux_clock fsys_mux_clks[] __initdata = {
@@ -2362,9 +2313,7 @@ CLK_OF_DECLARE(exynos5433_cmu_fsys, "samsung,exynos5433-cmu-fsys",
static unsigned long g2d_clk_regs[] __initdata = {
MUX_SEL_G2D0,
MUX_SEL_ENABLE_G2D0,
- MUX_SEL_STAT_G2D0,
DIV_G2D,
- DIV_STAT_G2D,
DIV_ENABLE_ACLK_G2D,
DIV_ENABLE_ACLK_G2D_SECURE_SMMU_G2D,
DIV_ENABLE_PCLK_G2D,
@@ -2520,16 +2469,9 @@ static unsigned long disp_clk_regs[] __initdata = {
MUX_ENABLE_DISP2,
MUX_ENABLE_DISP3,
MUX_ENABLE_DISP4,
- MUX_STAT_DISP0,
- MUX_STAT_DISP1,
- MUX_STAT_DISP2,
- MUX_STAT_DISP3,
- MUX_STAT_DISP4,
MUX_IGNORE_DISP2,
DIV_DISP,
DIV_DISP_PLL_FREQ_DET,
- DIV_STAT_DISP,
- DIV_STAT_DISP_PLL_FREQ_DET,
ENABLE_ACLK_DISP0,
ENABLE_ACLK_DISP1,
ENABLE_PCLK_DISP,
@@ -2604,18 +2546,16 @@ static struct samsung_fixed_factor_clock disp_fixed_factor_clks[] __initdata = {
static struct samsung_fixed_rate_clock disp_fixed_clks[] __initdata = {
/* PHY clocks from MIPI_DPHY1 */
- FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
- 188000000),
- FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, CLK_IS_ROOT,
- 100000000),
+ FRATE(0, "phyclk_mipidphy1_bitclkdiv8_phy", NULL, 0, 188000000),
+ FRATE(0, "phyclk_mipidphy1_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from MIPI_DPHY0 */
- FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, CLK_IS_ROOT,
- 188000000),
- FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, CLK_IS_ROOT,
- 100000000),
+ FRATE(0, "phyclk_mipidphy0_bitclkdiv8_phy", NULL, 0, 188000000),
+ FRATE(0, "phyclk_mipidphy0_rxclkesc0_phy", NULL, 0, 100000000),
/* PHY clocks from HDMI_PHY */
- FRATE(0, "phyclk_hdmiphy_tmds_clko_phy", NULL, CLK_IS_ROOT, 300000000),
- FRATE(0, "phyclk_hdmiphy_pixel_clko_phy", NULL, CLK_IS_ROOT, 166000000),
+ FRATE(CLK_PHYCLK_HDMIPHY_TMDS_CLKO_PHY, "phyclk_hdmiphy_tmds_clko_phy",
+ NULL, 0, 300000000),
+ FRATE(CLK_PHYCLK_HDMIPHY_PIXEL_CLKO_PHY, "phyclk_hdmiphy_pixel_clko_phy",
+ NULL, 0, 166000000),
};
static struct samsung_mux_clock disp_mux_clks[] __initdata = {
@@ -2820,6 +2760,8 @@ static struct samsung_gate_clock disp_gate_clks[] __initdata = {
ENABLE_PCLK_DISP, 2, 0, 0),
GATE(CLK_PCLK_DECON_TV, "pclk_decon_tv", "div_pclk_disp",
ENABLE_PCLK_DISP, 1, 0, 0),
+ GATE(CLK_PCLK_DECON, "pclk_decon", "div_pclk_disp",
+ ENABLE_PCLK_DISP, 0, 0, 0),
/* ENABLE_SCLK_DISP */
GATE(CLK_PHYCLK_MIPIDPHY1_BITCLKDIV8, "phyclk_mipidphy1_bitclkdiv8",
@@ -2919,11 +2861,8 @@ static unsigned long aud_clk_regs[] __initdata = {
MUX_SEL_AUD1,
MUX_ENABLE_AUD0,
MUX_ENABLE_AUD1,
- MUX_STAT_AUD0,
DIV_AUD0,
DIV_AUD1,
- DIV_STAT_AUD0,
- DIV_STAT_AUD1,
ENABLE_ACLK_AUD,
ENABLE_PCLK_AUD,
ENABLE_SCLK_AUD0,
@@ -2937,9 +2876,9 @@ PNAME(mout_aud_pll_user_aud_p) = { "oscclk", "fout_aud_pll", };
PNAME(mout_sclk_aud_pcm_p) = { "mout_aud_pll_user", "ioclk_audiocdclk0",};
static struct samsung_fixed_rate_clock aud_fixed_clks[] __initdata = {
- FRATE(0, "ioclk_jtag_tclk", NULL, CLK_IS_ROOT, 33000000),
- FRATE(0, "ioclk_slimbus_clk", NULL, CLK_IS_ROOT, 25000000),
- FRATE(0, "ioclk_i2s_bclk", NULL, CLK_IS_ROOT, 50000000),
+ FRATE(0, "ioclk_jtag_tclk", NULL, 0, 33000000),
+ FRATE(0, "ioclk_slimbus_clk", NULL, 0, 25000000),
+ FRATE(0, "ioclk_i2s_bclk", NULL, 0, 50000000),
};
static struct samsung_mux_clock aud_mux_clks[] __initdata = {
@@ -3087,7 +3026,6 @@ PNAME(mout_aclk_bus2_400_p) = { "oscclk", "aclk_bus2_400", };
#define CMU_BUS_COMMON_CLK_REGS \
DIV_BUS, \
- DIV_STAT_BUS, \
ENABLE_ACLK_BUS, \
ENABLE_PCLK_BUS, \
ENABLE_IP_BUS0, \
@@ -3100,7 +3038,6 @@ static unsigned long bus01_clk_regs[] __initdata = {
static unsigned long bus2_clk_regs[] __initdata = {
MUX_SEL_BUS2,
MUX_ENABLE_BUS2,
- MUX_STAT_BUS2,
CMU_BUS_COMMON_CLK_REGS,
};
@@ -3259,11 +3196,8 @@ static unsigned long g3d_clk_regs[] __initdata = {
G3D_PLL_FREQ_DET,
MUX_SEL_G3D,
MUX_ENABLE_G3D,
- MUX_STAT_G3D,
DIV_G3D,
DIV_G3D_PLL_FREQ_DET,
- DIV_STAT_G3D,
- DIV_STAT_G3D_PLL_FREQ_DET,
ENABLE_ACLK_G3D,
ENABLE_PCLK_G3D,
ENABLE_SCLK_G3D,
@@ -3379,7 +3313,6 @@ CLK_OF_DECLARE(exynos5433_cmu_g3d, "samsung,exynos5433-cmu-g3d",
static unsigned long gscl_clk_regs[] __initdata = {
MUX_SEL_GSCL,
MUX_ENABLE_GSCL,
- MUX_STAT_GSCL,
ENABLE_ACLK_GSCL,
ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL0,
ENABLE_ACLK_GSCL_SECURE_SMMU_GSCL1,
@@ -3472,11 +3405,11 @@ static struct samsung_gate_clock gscl_gate_clks[] __initdata = {
/* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1 */
GATE(CLK_PCLK_SMMU_GSCL1, "pclk_smmu_gscl1", "mout_aclk_gscl_111_user",
- ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL1, 0, 0, 0),
/* ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2 */
GATE(CLK_PCLK_SMMU_GSCL2, "pclk_smmu_gscl2", "mout_aclk_gscl_111_user",
- ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL0, 0, 0, 0),
+ ENABLE_PCLK_GSCL_SECURE_SMMU_GSCL2, 0, 0, 0),
};
static struct samsung_cmu_info gscl_cmu_info __initdata = {
@@ -3543,15 +3476,9 @@ static unsigned long apollo_clk_regs[] __initdata = {
MUX_ENABLE_APOLLO0,
MUX_ENABLE_APOLLO1,
MUX_ENABLE_APOLLO2,
- MUX_STAT_APOLLO0,
- MUX_STAT_APOLLO1,
- MUX_STAT_APOLLO2,
DIV_APOLLO0,
DIV_APOLLO1,
DIV_APOLLO_PLL_FREQ_DET,
- DIV_STAT_APOLLO0,
- DIV_STAT_APOLLO1,
- DIV_STAT_APOLLO_PLL_FREQ_DET,
ENABLE_ACLK_APOLLO,
ENABLE_PCLK_APOLLO,
ENABLE_SCLK_APOLLO,
@@ -3735,15 +3662,9 @@ static unsigned long atlas_clk_regs[] __initdata = {
MUX_ENABLE_ATLAS0,
MUX_ENABLE_ATLAS1,
MUX_ENABLE_ATLAS2,
- MUX_STAT_ATLAS0,
- MUX_STAT_ATLAS1,
- MUX_STAT_ATLAS2,
DIV_ATLAS0,
DIV_ATLAS1,
DIV_ATLAS_PLL_FREQ_DET,
- DIV_STAT_ATLAS0,
- DIV_STAT_ATLAS1,
- DIV_STAT_ATLAS_PLL_FREQ_DET,
ENABLE_ACLK_ATLAS,
ENABLE_PCLK_ATLAS,
ENABLE_SCLK_ATLAS,
@@ -3937,10 +3858,7 @@ static unsigned long mscl_clk_regs[] __initdata = {
MUX_SEL_MSCL1,
MUX_ENABLE_MSCL0,
MUX_ENABLE_MSCL1,
- MUX_STAT_MSCL0,
- MUX_STAT_MSCL1,
DIV_MSCL,
- DIV_STAT_MSCL,
ENABLE_ACLK_MSCL,
ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER0,
ENABLE_ACLK_MSCL_SECURE_SMMU_M2MSCALER1,
@@ -4097,9 +4015,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mscl, "samsung,exynos5433-cmu-mscl",
static unsigned long mfc_clk_regs[] __initdata = {
MUX_SEL_MFC,
MUX_ENABLE_MFC,
- MUX_STAT_MFC,
DIV_MFC,
- DIV_STAT_MFC,
ENABLE_ACLK_MFC,
ENABLE_ACLK_MFC_SECURE_SMMU_MFC,
ENABLE_PCLK_MFC,
@@ -4207,9 +4123,7 @@ CLK_OF_DECLARE(exynos5433_cmu_mfc, "samsung,exynos5433-cmu-mfc",
static unsigned long hevc_clk_regs[] __initdata = {
MUX_SEL_HEVC,
MUX_ENABLE_HEVC,
- MUX_STAT_HEVC,
DIV_HEVC,
- DIV_STAT_HEVC,
ENABLE_ACLK_HEVC,
ENABLE_ACLK_HEVC_SECURE_SMMU_HEVC,
ENABLE_PCLK_HEVC,
@@ -4321,9 +4235,7 @@ CLK_OF_DECLARE(exynos5433_cmu_hevc, "samsung,exynos5433-cmu-hevc",
static unsigned long isp_clk_regs[] __initdata = {
MUX_SEL_ISP,
MUX_ENABLE_ISP,
- MUX_STAT_ISP,
DIV_ISP,
- DIV_STAT_ISP,
ENABLE_ACLK_ISP0,
ENABLE_ACLK_ISP1,
ENABLE_ACLK_ISP2,
@@ -4603,20 +4515,11 @@ static unsigned long cam0_clk_regs[] __initdata = {
MUX_ENABLE_CAM02,
MUX_ENABLE_CAM03,
MUX_ENABLE_CAM04,
- MUX_STAT_CAM00,
- MUX_STAT_CAM01,
- MUX_STAT_CAM02,
- MUX_STAT_CAM03,
- MUX_STAT_CAM04,
MUX_IGNORE_CAM01,
DIV_CAM00,
DIV_CAM01,
DIV_CAM02,
DIV_CAM03,
- DIV_STAT_CAM00,
- DIV_STAT_CAM01,
- DIV_STAT_CAM02,
- DIV_STAT_CAM03,
ENABLE_ACLK_CAM00,
ENABLE_ACLK_CAM01,
ENABLE_ACLK_CAM02,
@@ -4687,9 +4590,9 @@ PNAME(mout_sclk_pixelasync_lite_c_init_a_p) = {
static struct samsung_fixed_rate_clock cam0_fixed_clks[] __initdata = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY, "phyclk_rxbyteclkhs0_s4_phy",
- NULL, CLK_IS_ROOT, 100000000),
+ NULL, 0, 100000000),
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY, "phyclk_rxbyteclkhs0_s2a_phy",
- NULL, CLK_IS_ROOT, 100000000),
+ NULL, 0, 100000000),
};
static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
@@ -4749,21 +4652,21 @@ static struct samsung_mux_clock cam0_mux_clks[] __initdata = {
MUX(CLK_MOUT_SCLK_LITE_FREECNT_C, "mout_sclk_lite_freecnt_c",
mout_sclk_lite_freecnt_c_p, MUX_SEL_CAM04, 24, 1),
MUX(CLK_MOUT_SCLK_LITE_FREECNT_B, "mout_sclk_lite_freecnt_b",
- mout_sclk_lite_freecnt_b_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_lite_freecnt_b_p, MUX_SEL_CAM04, 20, 1),
MUX(CLK_MOUT_SCLK_LITE_FREECNT_A, "mout_sclk_lite_freecnt_a",
- mout_sclk_lite_freecnt_a_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_lite_freecnt_a_p, MUX_SEL_CAM04, 16, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_B, "mout_sclk_pixelasync_lite_c_b",
- mout_sclk_pixelasync_lite_c_b_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_pixelasync_lite_c_b_p, MUX_SEL_CAM04, 12, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_A, "mout_sclk_pixelasync_lite_c_a",
- mout_sclk_pixelasync_lite_c_a_p, MUX_SEL_CAM04, 24, 1),
+ mout_sclk_pixelasync_lite_c_a_p, MUX_SEL_CAM04, 8, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_B,
"mout_sclk_pixelasync_lite_c_init_b",
mout_sclk_pixelasync_lite_c_init_b_p,
- MUX_SEL_CAM04, 24, 1),
+ MUX_SEL_CAM04, 4, 1),
MUX(CLK_MOUT_SCLK_PIXELASYNC_LITE_C_INIT_A,
"mout_sclk_pixelasync_lite_c_init_a",
mout_sclk_pixelasync_lite_c_init_a_p,
- MUX_SEL_CAM04, 24, 1),
+ MUX_SEL_CAM04, 0, 1),
};
static struct samsung_div_clock cam0_div_clks[] __initdata = {
@@ -5074,14 +4977,9 @@ static unsigned long cam1_clk_regs[] __initdata = {
MUX_ENABLE_CAM10,
MUX_ENABLE_CAM11,
MUX_ENABLE_CAM12,
- MUX_STAT_CAM10,
- MUX_STAT_CAM11,
- MUX_STAT_CAM12,
MUX_IGNORE_CAM11,
DIV_CAM10,
DIV_CAM11,
- DIV_STAT_CAM10,
- DIV_STAT_CAM11,
ENABLE_ACLK_CAM10,
ENABLE_ACLK_CAM11,
ENABLE_ACLK_CAM12,
@@ -5120,7 +5018,7 @@ PNAME(mout_aclk_lite_c_a_p) = { "mout_aclk_cam1_552_user",
static struct samsung_fixed_rate_clock cam1_fixed_clks[] __initdata = {
FRATE(CLK_PHYCLK_RXBYTEECLKHS0_S2B, "phyclk_rxbyteclkhs0_s2b_phy", NULL,
- CLK_IS_ROOT, 100000000),
+ 0, 100000000),
};
static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
@@ -5134,9 +5032,9 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
MUX(CLK_MOUT_ACLK_CAM1_333_USER, "mout_aclk_cam1_333_user",
mout_aclk_cam1_333_user_p, MUX_SEL_CAM10, 8, 1),
MUX(CLK_MOUT_ACLK_CAM1_400_USER, "mout_aclk_cam1_400_user",
- mout_aclk_cam1_400_user_p, MUX_SEL_CAM01, 4, 1),
+ mout_aclk_cam1_400_user_p, MUX_SEL_CAM10, 4, 1),
MUX(CLK_MOUT_ACLK_CAM1_552_USER, "mout_aclk_cam1_552_user",
- mout_aclk_cam1_552_user_p, MUX_SEL_CAM01, 0, 1),
+ mout_aclk_cam1_552_user_p, MUX_SEL_CAM10, 0, 1),
/* MUX_SEL_CAM11 */
MUX(CLK_MOUT_PHYCLK_RXBYTECLKHS0_S2B_USER,
@@ -5161,7 +5059,7 @@ static struct samsung_mux_clock cam1_mux_clks[] __initdata = {
static struct samsung_div_clock cam1_div_clks[] __initdata = {
/* DIV_CAM10 */
- DIV(CLK_DIV_SCLK_ISP_WPWM, "div_sclk_isp_wpwm",
+ DIV(CLK_DIV_SCLK_ISP_MPWM, "div_sclk_isp_mpwm",
"div_pclk_cam1_83", DIV_CAM10, 16, 2),
DIV(CLK_DIV_PCLK_CAM1_83, "div_pclk_cam1_83",
"mout_aclk_cam1_333_user", DIV_CAM10, 12, 2),
@@ -5355,7 +5253,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
ENABLE_PCLK_CAM1, 5, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_ISP_I2C0, "pclk_isp_i2c0", "div_pclk_cam1_83",
ENABLE_PCLK_CAM1, 4, CLK_IGNORE_UNUSED, 0),
- GATE(CLK_PCLK_ISP_MPWM, "pclk_isp_wpwm", "div_pclk_cam1_83",
+ GATE(CLK_PCLK_ISP_MPWM, "pclk_isp_mpwm", "div_pclk_cam1_83",
ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
GATE(CLK_PCLK_FD, "pclk_fd", "div_pclk_fd",
ENABLE_PCLK_CAM1, 3, CLK_IGNORE_UNUSED, 0),
@@ -5388,7 +5286,7 @@ static struct samsung_gate_clock cam1_gate_clks[] __initdata = {
ENABLE_SCLK_CAM1, 5, 0, 0),
GATE(CLK_SCLK_ISP_SPI0, "sclk_isp_spi0", "mout_sclk_isp_spi0_user",
ENABLE_SCLK_CAM1, 4, 0, 0),
- GATE(CLK_SCLK_ISP_MPWM, "sclk_isp_wpwm", "div_sclk_isp_wpwm",
+ GATE(CLK_SCLK_ISP_MPWM, "sclk_isp_mpwm", "div_sclk_isp_mpwm",
ENABLE_SCLK_CAM1, 3, 0, 0),
GATE(CLK_PCLK_DBG_ISP, "sclk_dbg_isp", "div_pclk_dbg_cam1",
ENABLE_SCLK_CAM1, 2, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 590813871ffe..c57cff1e1798 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -31,16 +31,16 @@ PNAME(mout_spi_p) = { "div125", "div200" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_ext_clks[] __initdata = {
- FRATE(0, "xtal", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xtal", NULL, 0, 0),
};
/* fixed rate clocks */
static struct samsung_fixed_rate_clock exynos5440_fixed_rate_clks[] __initdata = {
- FRATE(0, "ppll", NULL, CLK_IS_ROOT, 1000000000),
- FRATE(0, "usb_phy0", NULL, CLK_IS_ROOT, 60000000),
- FRATE(0, "usb_phy1", NULL, CLK_IS_ROOT, 60000000),
- FRATE(0, "usb_ohci12", NULL, CLK_IS_ROOT, 12000000),
- FRATE(0, "usb_ohci48", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(0, "ppll", NULL, 0, 1000000000),
+ FRATE(0, "usb_phy0", NULL, 0, 60000000),
+ FRATE(0, "usb_phy1", NULL, 0, 60000000),
+ FRATE(0, "usb_ohci12", NULL, 0, 12000000),
+ FRATE(0, "usb_ohci48", NULL, 0, 48000000),
};
/* fixed factor clocks */
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 55f8e2e24ab8..ad68d463b12c 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -894,10 +894,8 @@ PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_user_p) = { "fin_pll",
/* fixed rate clocks used in the FSYS0 block */
static struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
- FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
- CLK_IS_ROOT, 60000000),
- FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
- CLK_IS_ROOT, 125000000),
+ FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL, 0, 60000000),
+ FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL, 0, 125000000),
};
static unsigned long fsys0_clk_regs[] __initdata = {
@@ -1009,11 +1007,11 @@ PNAME(mout_phyclk_ufs20_rx1_user_p) = { "fin_pll", "phyclk_ufs20_rx1_symbol" };
/* fixed rate clocks used in the FSYS1 block */
static struct samsung_fixed_rate_clock fixed_rate_clks_fsys1[] __initdata = {
FRATE(PHYCLK_UFS20_TX0_SYMBOL, "phyclk_ufs20_tx0_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
FRATE(PHYCLK_UFS20_RX0_SYMBOL, "phyclk_ufs20_rx0_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
FRATE(PHYCLK_UFS20_RX1_SYMBOL, "phyclk_ufs20_rx1_symbol", NULL,
- CLK_IS_ROOT, 300000000),
+ 0, 300000000),
};
static unsigned long fsys1_clk_regs[] __initdata = {
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 0945a8852299..d7b011c1fcf8 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -344,7 +344,7 @@ struct samsung_mux_clock s3c2442_muxes[] __initdata = {
*/
#define XTI 1
struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
+ FRATE(XTI, "xti", NULL, 0, 0),
};
static void __init s3c2410_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 44d6a9f4f5b2..effe3736ec6b 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -232,8 +232,8 @@ static struct notifier_block s3c2412_restart_handler = {
*/
#define XTI 1
struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
+ FRATE(XTI, "xti", NULL, 0, 0),
+ FRATE(0, "ext", NULL, 0, 0),
};
static void __init s3c2412_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 2c0a1ea3c80c..37562783b25e 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -371,10 +371,10 @@ static struct notifier_block s3c2443_restart_handler = {
* Only necessary until the devicetree-move is complete
*/
struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
- FRATE(0, "xti", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext_i2s", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "ext_uart", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "xti", NULL, 0, 0),
+ FRATE(0, "ext", NULL, 0, 0),
+ FRATE(0, "ext_i2s", NULL, 0, 0),
+ FRATE(0, "ext_uart", NULL, 0, 0),
};
static void __init s3c2443_common_clk_register_fixed_ext(
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index d325ed1e196b..60aa775bd374 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -176,14 +176,14 @@ PNAME(audio2_p6410) = { "mout_epll", "dout_mpll", "fin_pll", "iiscdclk2",
/* Fixed rate clocks generated outside the SoC. */
FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_ext_clks) __initdata = {
- FRATE(0, "fin_pll", NULL, CLK_IS_ROOT, 0),
- FRATE(0, "xusbxti", NULL, CLK_IS_ROOT, 0),
+ FRATE(0, "fin_pll", NULL, 0, 0),
+ FRATE(0, "xusbxti", NULL, 0, 0),
};
/* Fixed rate clocks generated inside the SoC. */
FIXED_RATE_CLOCKS(s3c64xx_fixed_rate_clks) __initdata = {
- FRATE(CLK27M, "clk27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(CLK48M, "clk48m", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(CLK27M, "clk27m", NULL, 0, 27000000),
+ FRATE(CLK48M, "clk48m", NULL, 0, 48000000),
};
/* List of clock muxes present on all S3C64xx SoCs. */
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index 759aaf342bea..52302262045d 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -503,15 +503,15 @@ static const struct samsung_mux_clock s5p6442_mux_clks[] __initconst = {
/* S5PV210-specific fixed rate clocks generated inside the SoC. */
static const struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initconst = {
- FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
- FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
- FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
- FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, 0, 27000000),
+ FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, 0, 27000000),
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, 0, 48000000),
+ FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, 0, 48000000),
};
/* S5P6442-specific fixed rate clocks generated inside the SoC. */
static const struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initconst = {
- FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000),
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, 0, 30000000),
};
/* Common clock dividers. */
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index 957aae63e7cc..d0c6c9a2d06a 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -1423,7 +1423,7 @@ static int atlas7_reset_module(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops atlas7_rst_ops = {
+static const struct reset_control_ops atlas7_rst_ops = {
.reset = atlas7_reset_module,
};
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 1cebf253e8fd..c2d572748167 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -115,7 +115,6 @@ static void __init __socfpga_gate_init(struct device_node *node,
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
int rc;
- int i = 0;
socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
if (WARN_ON(!socfpga_clk))
@@ -167,12 +166,9 @@ static void __init __socfpga_gate_init(struct device_node *node,
init.name = clk_name;
init.ops = ops;
init.flags = 0;
- while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] =
- of_clk_get_parent_name(node, i)) != NULL)
- i++;
+ init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
init.parent_names = parent_name;
- init.num_parents = i;
socfpga_clk->hw.hw.init = &init;
clk = clk_register(NULL, &socfpga_clk->hw.hw);
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
index 1f397cb72e89..70993f1e88bc 100644
--- a/drivers/clk/socfpga/clk-periph-a10.c
+++ b/drivers/clk/socfpga/clk-periph-a10.c
@@ -74,7 +74,7 @@ static __init void __socfpga_periph_init(struct device_node *node,
struct clk *clk;
struct socfpga_periph_clk *periph_clk;
const char *clk_name = node->name;
- const char *parent_name;
+ const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
int rc;
u32 fixed_div;
@@ -109,9 +109,8 @@ static __init void __socfpga_periph_init(struct device_node *node,
init.ops = ops;
init.flags = 0;
- parent_name = of_clk_get_parent_name(node, 0);
- init.num_parents = 1;
- init.parent_names = &parent_name;
+ init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS);
+ init.parent_names = parent_name;
periph_clk->hw.hw.init = &init;
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 402d630bd531..35fabe1a32c3 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -74,7 +74,7 @@ static struct clk_ops clk_pll_ops = {
.get_parent = clk_pll_get_parent,
};
-static struct __init clk * __socfpga_pll_init(struct device_node *node,
+static struct clk * __init __socfpga_pll_init(struct device_node *node,
const struct clk_ops *ops)
{
u32 reg;
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 009bd1410cfa..2f86e3f94efa 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -386,24 +386,20 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
- 25000000);
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, 0, 25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, 0, 125000000);
clk_register_clkdev(clk, "gmii_pad_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
- CLK_IS_ROOT, 12288000);
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 0,
+ 12288000);
clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
/* clock derived from 32 KHz osc clk */
@@ -897,11 +893,10 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
&_lock);
clk_register_clkdev(clk, "ras_apb_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+ clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, 0,
50000000);
- clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
- 50000000);
+ clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, 0, 50000000);
clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 9c7abfd951ba..cbb19a90f2d6 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -443,24 +443,20 @@ void __init spear1340_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
- 25000000);
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, 0, 25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
- 125000000);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, 0, 125000000);
clk_register_clkdev(clk, "gmii_pad_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
- CLK_IS_ROOT, 12288000);
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL, 0,
+ 12288000);
clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 404a55edd613..c403c66b6583 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -251,7 +251,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base,
struct clk *clk;
clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
+ 0, 125000000);
clk_register_clkdev(clk, "smii_125m_pad", NULL);
clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
@@ -391,12 +391,10 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
{
struct clk *clk, *clk1, *ras_apb_clk;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, 0, 24000000);
clk_register_clkdev(clk, "osc_24m_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index e24f85cd4300..7c9383c3c2c6 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -117,12 +117,10 @@ void __init spear6xx_clk_init(void __iomem *misc_base)
{
struct clk *clk, *clk1;
- clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
- 32000);
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, 0, 32000);
clk_register_clkdev(clk, "osc_32k_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
- 30000000);
+ clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, 0, 30000000);
clk_register_clkdev(clk, "osc_30m_clk", NULL);
/* clock derived from 32 KHz osc clk */
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 24d99594c0b3..627267c7ec5c 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -244,10 +244,10 @@ static const char ** __init flexgen_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents;
+ unsigned int nparents;
nparents = of_clk_get_parent_count(np);
- if (WARN_ON(nparents <= 0))
+ if (WARN_ON(!nparents))
return NULL;
parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index ccb324d97160..dec4eaaecc00 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -574,12 +574,16 @@ static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
struct stm_fs params;
long hwrate = 0;
unsigned long flags = 0;
+ int ret;
if (!rate || !parent_rate)
return -EINVAL;
- if (!clk_fs660c32_vco_get_params(parent_rate, rate, &params))
- clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
+ ret = clk_fs660c32_vco_get_params(parent_rate, rate, &params);
+ if (ret)
+ return ret;
+
+ clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
__func__, clk_hw_get_name(hw),
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 5dc5ce217960..b1e10ffe7a44 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -26,10 +26,10 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np,
int *num_parents)
{
const char **parents;
- int nparents;
+ unsigned int nparents;
nparents = of_clk_get_parent_count(np);
- if (WARN_ON(nparents <= 0))
+ if (WARN_ON(!nparents))
return ERR_PTR(-EINVAL);
parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
@@ -822,11 +822,10 @@ err:
if (!clk_data->clks[i])
continue;
- composite = container_of(__clk_get_hw(clk_data->clks[i]),
- struct clk_composite, hw);
- kfree(container_of(composite->gate_hw, struct clk_gate, hw));
- kfree(container_of(composite->rate_hw, struct clk_divider, hw));
- kfree(container_of(composite->mux_hw, struct clk_mux, hw));
+ composite = to_clk_composite(__clk_get_hw(clk_data->clks[i]));
+ kfree(to_clk_gate(composite->gate_hw));
+ kfree(to_clk_divider(composite->rate_hw));
+ kfree(to_clk_mux(composite->mux_hw));
}
kfree(clk_data->clks);
diff --git a/drivers/clk/sunxi/clk-a10-hosc.c b/drivers/clk/sunxi/clk-a10-hosc.c
index 0481d5d673d6..6b598c6a0213 100644
--- a/drivers/clk/sunxi/clk-a10-hosc.c
+++ b/drivers/clk/sunxi/clk-a10-hosc.c
@@ -15,9 +15,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
#define SUNXI_OSC24M_GATE 0
@@ -61,7 +61,6 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
goto err_free_gate;
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
return;
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index 044c1717b762..d9ea22ec4e25 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -85,7 +85,7 @@ static int sunxi_ve_of_xlate(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_ve_reset_ops = {
+static const struct reset_control_ops sunxi_ve_reset_ops = {
.assert = sunxi_ve_reset_assert,
.deassert = sunxi_ve_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 1611b036421c..3437f734c9bf 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,7 +17,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -107,7 +106,6 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
goto iounmap_reg;
of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
return;
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 59428dbd607a..ddefe9668863 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -48,7 +48,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
u32 reg;
unsigned long rate;
struct clk_factors *factors = to_clk_factors(hw);
- struct clk_factors_config *config = factors->config;
+ const struct clk_factors_config *config = factors->config;
/* Fetch the register value */
reg = readl(factors->reg);
@@ -63,18 +63,28 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
p = FACTOR_GET(config->pshift, config->pwidth, reg);
- /* Calculate the rate */
- rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
+ if (factors->recalc) {
+ struct factors_request factors_req = {
+ .parent_rate = parent_rate,
+ .n = n,
+ .k = k,
+ .m = m,
+ .p = p,
+ };
- return rate;
-}
+ /* get mux details from mux clk structure */
+ if (factors->mux)
+ factors_req.parent_index =
+ (reg >> factors->mux->shift) &
+ factors->mux->mask;
-static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- struct clk_factors *factors = to_clk_factors(hw);
- factors->get_factors((u32 *)&rate, (u32)*parent_rate,
- NULL, NULL, NULL, NULL);
+ factors->recalc(&factors_req);
+
+ return factors_req.rate;
+ }
+
+ /* Calculate the rate */
+ rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
return rate;
}
@@ -82,6 +92,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
static int clk_factors_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
+ struct clk_factors *factors = to_clk_factors(hw);
struct clk_hw *parent, *best_parent = NULL;
int i, num_parents;
unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
@@ -89,6 +100,10 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
/* find the parent that can help provide the fastest rate <= rate */
num_parents = clk_hw_get_num_parents(hw);
for (i = 0; i < num_parents; i++) {
+ struct factors_request factors_req = {
+ .rate = req->rate,
+ .parent_index = i,
+ };
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
@@ -97,8 +112,9 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
else
parent_rate = clk_hw_get_rate(parent);
- child_rate = clk_factors_round_rate(hw, req->rate,
- &parent_rate);
+ factors_req.parent_rate = parent_rate;
+ factors->get_factors(&factors_req);
+ child_rate = factors_req.rate;
if (child_rate <= req->rate && child_rate > best_child_rate) {
best_parent = parent;
@@ -120,13 +136,16 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
- u8 n = 0, k = 0, m = 0, p = 0;
+ struct factors_request req = {
+ .rate = rate,
+ .parent_rate = parent_rate,
+ };
u32 reg;
struct clk_factors *factors = to_clk_factors(hw);
- struct clk_factors_config *config = factors->config;
+ const struct clk_factors_config *config = factors->config;
unsigned long flags = 0;
- factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
+ factors->get_factors(&req);
if (factors->lock)
spin_lock_irqsave(factors->lock, flags);
@@ -135,10 +154,10 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
reg = readl(factors->reg);
/* Set up the new factors - macros do not do anything if width is 0 */
- reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
- reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
- reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
- reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
+ reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
+ reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
+ reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
+ reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
/* Apply them now */
writel(reg, factors->reg);
@@ -155,7 +174,6 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_factors_ops = {
.determine_rate = clk_factors_determine_rate,
.recalc_rate = clk_factors_recalc_rate,
- .round_rate = clk_factors_round_rate,
.set_rate = clk_factors_set_rate,
};
@@ -172,7 +190,7 @@ struct clk *sunxi_factors_register(struct device_node *node,
struct clk_hw *mux_hw = NULL;
const char *clk_name = node->name;
const char *parents[FACTORS_MAX_PARENTS];
- int i = 0;
+ int ret, i = 0;
/* if we have a mux, we will have >1 parents */
i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
@@ -188,21 +206,22 @@ struct clk *sunxi_factors_register(struct device_node *node,
factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
if (!factors)
- return NULL;
+ goto err_factors;
/* set up factors properties */
factors->reg = reg;
factors->config = data->table;
factors->get_factors = data->getter;
+ factors->recalc = data->recalc;
factors->lock = lock;
/* Add a gate if this factor clock can be gated */
if (data->enable) {
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
- if (!gate) {
- kfree(factors);
- return NULL;
- }
+ if (!gate)
+ goto err_gate;
+
+ factors->gate = gate;
/* set up gate properties */
gate->reg = reg;
@@ -214,11 +233,10 @@ struct clk *sunxi_factors_register(struct device_node *node,
/* Add a mux if this factor clock can be muxed */
if (data->mux) {
mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- kfree(factors);
- kfree(gate);
- return NULL;
- }
+ if (!mux)
+ goto err_mux;
+
+ factors->mux = mux;
/* set up gate properties */
mux->reg = reg;
@@ -233,11 +251,44 @@ struct clk *sunxi_factors_register(struct device_node *node,
mux_hw, &clk_mux_ops,
&factors->hw, &clk_factors_ops,
gate_hw, &clk_gate_ops, 0);
+ if (IS_ERR(clk))
+ goto err_register;
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret)
+ goto err_provider;
return clk;
+
+err_provider:
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+err_register:
+ kfree(mux);
+err_mux:
+ kfree(gate);
+err_gate:
+ kfree(factors);
+err_factors:
+ return NULL;
+}
+
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
+{
+ struct clk_hw *hw = __clk_get_hw(clk);
+ struct clk_factors *factors;
+ const char *name;
+
+ if (!hw)
+ return;
+
+ factors = to_clk_factors(hw);
+ name = clk_hw_get_name(hw);
+
+ of_clk_del_provider(node);
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+ kfree(factors->mux);
+ kfree(factors->gate);
+ kfree(factors);
}
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 171085ab5513..1e63c5b2d5f4 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -2,7 +2,6 @@
#define __MACH_SUNXI_CLK_FACTORS_H
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/spinlock.h>
#define SUNXI_FACTORS_NOT_APPLICABLE (0)
@@ -19,21 +18,36 @@ struct clk_factors_config {
u8 n_start;
};
+struct factors_request {
+ unsigned long rate;
+ unsigned long parent_rate;
+ u8 parent_index;
+ u8 n;
+ u8 k;
+ u8 m;
+ u8 p;
+};
+
struct factors_data {
int enable;
int mux;
int muxmask;
- struct clk_factors_config *table;
- void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+ const struct clk_factors_config *table;
+ void (*getter)(struct factors_request *req);
+ void (*recalc)(struct factors_request *req);
const char *name;
};
struct clk_factors {
struct clk_hw hw;
void __iomem *reg;
- struct clk_factors_config *config;
- void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+ const struct clk_factors_config *config;
+ void (*get_factors)(struct factors_request *req);
+ void (*recalc)(struct factors_request *req);
spinlock_t *lock;
+ /* for cleanup */
+ struct clk_mux *mux;
+ struct clk_gate *gate;
};
struct clk *sunxi_factors_register(struct device_node *node,
@@ -41,4 +55,6 @@ struct clk *sunxi_factors_register(struct device_node *node,
spinlock_t *lock,
void __iomem *reg);
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk);
+
#endif
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index d167e1efb927..b38d71cec74c 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -28,17 +29,16 @@
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_a10_get_mod0_factors(struct factors_request *req)
{
u8 div, calcm, calcp;
/* These clocks can only divide, so we will never be able to achieve
* frequencies higher than the parent frequency */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
if (div < 16)
calcp = 0;
@@ -51,18 +51,13 @@ static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
calcm = DIV_ROUND_UP(div, 1 << calcp);
- *freq = (parent_rate >> calcp) / calcm;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm - 1;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / calcm;
+ req->m = calcm - 1;
+ req->p = calcp;
}
/* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun4i_a10_mod0_config = {
+static const struct clk_factors_config sun4i_a10_mod0_config = {
.mshift = 0,
.mwidth = 4,
.pshift = 16,
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index f4da52b5ca0e..a085c3bc127c 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -98,6 +98,8 @@ static void __init sunxi_simple_gates_init(struct device_node *node)
sunxi_simple_gates_setup(node, NULL, 0);
}
+CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
@@ -130,6 +132,8 @@ CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun8i_a33_ahb1, "allwinner,sun8i-a33-ahb1-gates-clk",
sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_apb0, "allwinner,sun8i-a83t-apb0-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 23d042aabb4f..68021fa5ecd9 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -9,7 +9,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/clkdev.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -87,7 +86,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
clk_parent, 0, reg, i,
0, NULL);
WARN_ON(IS_ERR(clk_data->clks[i]));
- clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
j++;
}
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 20887686bdbe..84a187e55360 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -8,211 +8,97 @@
*
*/
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
-#define SUN6I_AR100_MAX_PARENTS 4
-#define SUN6I_AR100_SHIFT_MASK 0x3
-#define SUN6I_AR100_SHIFT_MAX SUN6I_AR100_SHIFT_MASK
-#define SUN6I_AR100_SHIFT_SHIFT 4
-#define SUN6I_AR100_DIV_MASK 0x1f
-#define SUN6I_AR100_DIV_MAX (SUN6I_AR100_DIV_MASK + 1)
-#define SUN6I_AR100_DIV_SHIFT 8
-#define SUN6I_AR100_MUX_MASK 0x3
-#define SUN6I_AR100_MUX_SHIFT 16
-
-struct ar100_clk {
- struct clk_hw hw;
- void __iomem *reg;
-};
-
-static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
-{
- return container_of(hw, struct ar100_clk, hw);
-}
-
-static unsigned long ar100_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
- int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
- int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
-
- return (parent_rate >> shift) / (div + 1);
-}
-
-static int ar100_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- int nparents = clk_hw_get_num_parents(hw);
- long best_rate = -EINVAL;
- int i;
-
- req->best_parent_hw = NULL;
-
- for (i = 0; i < nparents; i++) {
- unsigned long parent_rate;
- unsigned long tmp_rate;
- struct clk_hw *parent;
- unsigned long div;
- int shift;
-
- parent = clk_hw_get_parent_by_index(hw, i);
- parent_rate = clk_hw_get_rate(parent);
- div = DIV_ROUND_UP(parent_rate, req->rate);
-
- /*
- * The AR100 clk contains 2 divisors:
- * - one power of 2 divisor
- * - one regular divisor
- *
- * First check if we can safely shift (or divide by a power
- * of 2) without losing precision on the requested rate.
- */
- shift = ffs(div) - 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- shift = SUN6I_AR100_SHIFT_MAX;
-
- div >>= shift;
-
- /*
- * Then if the divisor is still bigger than what the HW
- * actually supports, use a bigger shift (or power of 2
- * divider) value and accept to lose some precision.
- */
- while (div > SUN6I_AR100_DIV_MAX) {
- shift++;
- div >>= 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- break;
- }
-
- /*
- * If the shift value (or power of 2 divider) is bigger
- * than what the HW actually support, skip this parent.
- */
- if (shift > SUN6I_AR100_SHIFT_MAX)
- continue;
-
- tmp_rate = (parent_rate >> shift) / div;
- if (!req->best_parent_hw || tmp_rate > best_rate) {
- req->best_parent_hw = parent;
- req->best_parent_rate = parent_rate;
- best_rate = tmp_rate;
- }
- }
-
- if (best_rate < 0)
- return best_rate;
-
- req->rate = best_rate;
-
- return 0;
-}
-
-static int ar100_set_parent(struct clk_hw *hw, u8 index)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
-
- if (index >= SUN6I_AR100_MAX_PARENTS)
- return -EINVAL;
-
- val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
- val |= (index << SUN6I_AR100_MUX_SHIFT);
- writel(val, clk->reg);
-
- return 0;
-}
+#include "clk-factors.h"
-static u8 ar100_get_parent(struct clk_hw *hw)
-{
- struct ar100_clk *clk = to_ar100_clk(hw);
- return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
- SUN6I_AR100_MUX_MASK;
-}
-
-static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+/**
+ * sun6i_get_ar100_factors - Calculates factors p, m for AR100
+ *
+ * AR100 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+static void sun6i_get_ar100_factors(struct factors_request *req)
{
- unsigned long div = parent_rate / rate;
- struct ar100_clk *clk = to_ar100_clk(hw);
- u32 val = readl(clk->reg);
+ unsigned long div;
int shift;
- if (parent_rate % rate)
- return -EINVAL;
+ /* clock only divides */
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- shift = ffs(div) - 1;
- if (shift > SUN6I_AR100_SHIFT_MAX)
- shift = SUN6I_AR100_SHIFT_MAX;
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
- div >>= shift;
+ if (div < 32)
+ shift = 0;
+ else if (div >> 1 < 32)
+ shift = 1;
+ else if (div >> 2 < 32)
+ shift = 2;
+ else
+ shift = 3;
- if (div > SUN6I_AR100_DIV_MAX)
- return -EINVAL;
+ div >>= shift;
- val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
- (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
- val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
- (div << SUN6I_AR100_DIV_SHIFT);
- writel(val, clk->reg);
+ if (div > 32)
+ div = 32;
- return 0;
+ req->rate = (req->parent_rate >> shift) / div;
+ req->m = div - 1;
+ req->p = shift;
}
-static struct clk_ops ar100_ops = {
- .recalc_rate = ar100_recalc_rate,
- .determine_rate = ar100_determine_rate,
- .set_parent = ar100_set_parent,
- .get_parent = ar100_get_parent,
- .set_rate = ar100_set_rate,
+static const struct clk_factors_config sun6i_ar100_config = {
+ .mwidth = 5,
+ .mshift = 8,
+ .pwidth = 2,
+ .pshift = 4,
};
+static const struct factors_data sun6i_ar100_data = {
+ .mux = 16,
+ .muxmask = GENMASK(1, 0),
+ .table = &sun6i_ar100_config,
+ .getter = sun6i_get_ar100_factors,
+};
+
+static DEFINE_SPINLOCK(sun6i_ar100_lock);
+
static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
{
- const char *parents[SUN6I_AR100_MAX_PARENTS];
struct device_node *np = pdev->dev.of_node;
- const char *clk_name = np->name;
- struct clk_init_data init;
- struct ar100_clk *ar100;
struct resource *r;
+ void __iomem *reg;
struct clk *clk;
- int nparents;
-
- ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
- if (!ar100)
- return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ar100->reg = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ar100->reg))
- return PTR_ERR(ar100->reg);
+ reg = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
- nparents = of_clk_get_parent_count(np);
- if (nparents > SUN6I_AR100_MAX_PARENTS)
- nparents = SUN6I_AR100_MAX_PARENTS;
-
- of_clk_parent_fill(np, parents, nparents);
+ clk = sunxi_factors_register(np, &sun6i_ar100_data, &sun6i_ar100_lock,
+ reg);
+ if (!clk)
+ return -ENOMEM;
- of_property_read_string(np, "clock-output-names", &clk_name);
+ platform_set_drvdata(pdev, clk);
- init.name = clk_name;
- init.ops = &ar100_ops;
- init.parent_names = parents;
- init.num_parents = nparents;
- init.flags = 0;
+ return 0;
+}
- ar100->hw.init = &init;
+static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk *clk = platform_get_drvdata(pdev);
- clk = clk_register(&pdev->dev, &ar100->hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ sunxi_factors_unregister(np, clk);
- return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ return 0;
}
static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
@@ -227,6 +113,7 @@ static struct platform_driver sun6i_a31_ar100_clk_driver = {
.of_match_table = sun6i_a31_ar100_clk_dt_ids,
},
.probe = sun6i_a31_ar100_clk_probe,
+ .remove = sun6i_a31_ar100_clk_remove,
};
module_platform_driver(sun6i_a31_ar100_clk_driver);
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index 7ba61103a6f5..2ea61debffc1 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -36,7 +36,7 @@ static struct clk *sun8i_a23_apb0_register(struct device_node *node,
/* The A23 APB0 clock is a standard 2 bit wide divider clock */
clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg,
- 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ 0, 2, 0, NULL);
if (IS_ERR(clk))
return clk;
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index e32d18ba252b..63fdb790df29 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -17,7 +17,6 @@
* GNU General Public License for more details.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -110,3 +109,5 @@ err_unmap:
CLK_OF_DECLARE(sun8i_h3_bus_gates, "allwinner,sun8i-h3-bus-gates-clk",
sun8i_h3_bus_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_bus_gates, "allwinner,sun8i-a83t-bus-gates-clk",
+ sun8i_h3_bus_gates_init);
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index bf117a636d23..411d3033a96e 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -15,74 +15,106 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/of_address.h>
-#include "clk-factors.h"
+#define SUN8I_MBUS_ENABLE 31
+#define SUN8I_MBUS_MUX_SHIFT 24
+#define SUN8I_MBUS_MUX_MASK 0x3
+#define SUN8I_MBUS_DIV_SHIFT 0
+#define SUN8I_MBUS_DIV_WIDTH 3
+#define SUN8I_MBUS_MAX_PARENTS 4
-/**
- * sun8i_a23_get_mbus_factors() - calculates m factor for MBUS clocks
- * MBUS rate is calculated as follows
- * rate = parent_rate / (m + 1);
- */
+static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
-static void sun8i_a23_get_mbus_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void __init sun8i_a23_mbus_setup(struct device_node *node)
{
- u8 div;
+ int num_parents = of_clk_get_parent_count(node);
+ const char **parents;
+ const char *clk_name = node->name;
+ struct resource res;
+ struct clk_divider *div;
+ struct clk_gate *gate;
+ struct clk_mux *mux;
+ struct clk *clk;
+ void __iomem *reg;
+ int err;
- /*
- * These clocks can only divide, so we will never be able to
- * achieve frequencies higher than the parent frequency
- */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
+ if (!parents)
+ return;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (!reg) {
+ pr_err("Could not get registers for sun8i-mbus-clk\n");
+ goto err_free_parents;
+ }
- if (div > 8)
- div = 8;
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto err_unmap;
- *freq = parent_rate / div;
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ goto err_free_div;
- /* we were called to round the frequency, we can now return */
- if (m == NULL)
- return;
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_free_mux;
- *m = div - 1;
-}
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ of_clk_parent_fill(node, parents, num_parents);
-static struct clk_factors_config sun8i_a23_mbus_config = {
- .mshift = 0,
- .mwidth = 3,
-};
+ gate->reg = reg;
+ gate->bit_idx = SUN8I_MBUS_ENABLE;
+ gate->lock = &sun8i_a23_mbus_lock;
-static const struct factors_data sun8i_a23_mbus_data __initconst = {
- .enable = 31,
- .mux = 24,
- .muxmask = BIT(1) | BIT(0),
- .table = &sun8i_a23_mbus_config,
- .getter = sun8i_a23_get_mbus_factors,
-};
+ div->reg = reg;
+ div->shift = SUN8I_MBUS_DIV_SHIFT;
+ div->width = SUN8I_MBUS_DIV_WIDTH;
+ div->lock = &sun8i_a23_mbus_lock;
-static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
-
-static void __init sun8i_a23_mbus_setup(struct device_node *node)
-{
- struct clk *mbus;
- void __iomem *reg;
+ mux->reg = reg;
+ mux->shift = SUN8I_MBUS_MUX_SHIFT;
+ mux->mask = SUN8I_MBUS_MUX_MASK;
+ mux->lock = &sun8i_a23_mbus_lock;
- reg = of_iomap(node, 0);
- if (!reg) {
- pr_err("Could not get registers for a23-mbus-clk\n");
- return;
- }
+ clk = clk_register_composite(NULL, clk_name, parents, num_parents,
+ &mux->hw, &clk_mux_ops,
+ &div->hw, &clk_divider_ops,
+ &gate->hw, &clk_gate_ops,
+ 0);
+ if (IS_ERR(clk))
+ goto err_free_gate;
- mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
- &sun8i_a23_mbus_lock, reg);
+ err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (err)
+ goto err_unregister_clk;
+ kfree(parents); /* parents is deep copied */
/* The MBUS clocks needs to be always enabled */
- __clk_get(mbus);
- clk_prepare_enable(mbus);
+ __clk_get(clk);
+ clk_prepare_enable(clk);
+
+ return;
+
+err_unregister_clk:
+ /* TODO: The composite clock stuff will leak a bit here. */
+ clk_unregister(clk);
+err_free_gate:
+ kfree(gate);
+err_free_mux:
+ kfree(mux);
+err_free_div:
+ kfree(div);
+err_unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+err_free_parents:
+ kfree(parents);
}
CLK_OF_DECLARE(sun8i_a23_mbus, "allwinner,sun8i-a23-mbus-clk", sun8i_a23_mbus_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 6c4c98324d3c..43f014f85803 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -32,15 +33,14 @@
* p and m are named div1 and div2 in Allwinner's SDK
*/
-static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
- u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
+static void sun9i_a80_get_pll4_factors(struct factors_request *req)
{
int n;
int m = 1;
int p = 1;
/* Normalize value to a 6 MHz multiple (24 MHz / 4) */
- n = DIV_ROUND_UP(*freq, 6000000);
+ n = DIV_ROUND_UP(req->rate, 6000000);
/* If n is too large switch to steps of 12 MHz */
if (n > 255) {
@@ -60,18 +60,13 @@ static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
else if (n < 12)
n = 12;
- *freq = ((24000000 * n) >> p) / (m + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n_ret == NULL)
- return;
-
- *n_ret = n;
- *m_ret = m;
- *p_ret = p;
+ req->rate = ((24000000 * n) >> p) / (m + 1);
+ req->n = n;
+ req->m = m;
+ req->p = p;
}
-static struct clk_factors_config sun9i_a80_pll4_config = {
+static const struct clk_factors_config sun9i_a80_pll4_config = {
.mshift = 18,
.mwidth = 1,
.nshift = 8,
@@ -111,30 +106,24 @@ CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_se
* rate = parent_rate / (m + 1);
*/
-static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_gt_factors(struct factors_request *req)
{
u32 div;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* maximum divider is 4 */
if (div > 4)
div = 4;
- *freq = parent_rate / div;
-
- /* we were called to round the frequency, we can now return */
- if (!m)
- return;
-
- *m = div;
+ req->rate = req->parent_rate / div;
+ req->m = div;
}
-static struct clk_factors_config sun9i_a80_gt_config = {
+static const struct clk_factors_config sun9i_a80_gt_config = {
.mshift = 0,
.mwidth = 2,
};
@@ -176,30 +165,24 @@ CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
* rate = parent_rate >> p;
*/
-static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_ahb_factors(struct factors_request *req)
{
u32 _p;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+ _p = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
/* maximum p is 3 */
if (_p > 3)
_p = 3;
- *freq = parent_rate >> _p;
-
- /* we were called to round the frequency, we can now return */
- if (!p)
- return;
-
- *p = _p;
+ req->rate = req->parent_rate >> _p;
+ req->p = _p;
}
-static struct clk_factors_config sun9i_a80_ahb_config = {
+static const struct clk_factors_config sun9i_a80_ahb_config = {
.pshift = 0,
.pwidth = 2,
};
@@ -262,34 +245,25 @@ CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_se
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_apb1_factors(struct factors_request *req)
{
u32 div;
- u8 calcm, calcp;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* Highest possible divider is 256 (p = 3, m = 31) */
if (div > 256)
div = 256;
- calcp = order_base_2(div);
- calcm = (parent_rate >> calcp) - 1;
- *freq = (parent_rate >> calcp) / (calcm + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm;
- *p = calcp;
+ req->p = order_base_2(div);
+ req->m = (req->parent_rate >> req->p) - 1;
+ req->rate = (req->parent_rate >> req->p) / (req->m + 1);
}
-static struct clk_factors_config sun9i_a80_apb1_config = {
+static const struct clk_factors_config sun9i_a80_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a9b176139aca..028dd832a39f 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -83,7 +83,7 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sun9i_mmc_reset_ops = {
+static const struct reset_control_ops sun9i_mmc_reset_ops = {
.assert = sun9i_mmc_reset_assert,
.deassert = sun9i_mmc_reset_deassert,
};
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 5ba2188ee99c..91de0a006773 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -28,214 +28,6 @@
static DEFINE_SPINLOCK(clk_lock);
-/**
- * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
- */
-
-#define SUN6I_AHB1_MAX_PARENTS 4
-#define SUN6I_AHB1_MUX_PARENT_PLL6 3
-#define SUN6I_AHB1_MUX_SHIFT 12
-/* un-shifted mask is what mux_clk expects */
-#define SUN6I_AHB1_MUX_MASK 0x3
-#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
- SUN6I_AHB1_MUX_MASK)
-
-#define SUN6I_AHB1_DIV_SHIFT 4
-#define SUN6I_AHB1_DIV_MASK (0x3 << SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_GET(reg) ((reg & SUN6I_AHB1_DIV_MASK) >> \
- SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_DIV_MASK) | \
- (div << SUN6I_AHB1_DIV_SHIFT))
-#define SUN6I_AHB1_PLL6_DIV_SHIFT 6
-#define SUN6I_AHB1_PLL6_DIV_MASK (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_GET(reg) ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
- SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
- (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
-
-struct sun6i_ahb1_clk {
- struct clk_hw hw;
- void __iomem *reg;
-};
-
-#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
-
-static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
- unsigned long rate;
- u32 reg;
-
- /* Fetch the register value */
- reg = readl(ahb1->reg);
-
- /* apply pre-divider first if parent is pll6 */
- if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
- parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
-
- /* clk divider */
- rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
-
- return rate;
-}
-
-static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
- u8 parent, unsigned long parent_rate)
-{
- u8 div, calcp, calcm = 1;
-
- /*
- * clock can only divide, so we will never be able to achieve
- * frequencies higher than the parent frequency
- */
- if (parent_rate && rate > parent_rate)
- rate = parent_rate;
-
- div = DIV_ROUND_UP(parent_rate, rate);
-
- /* calculate pre-divider if parent is pll6 */
- if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
- if (div < 4)
- calcp = 0;
- else if (div / 2 < 4)
- calcp = 1;
- else if (div / 4 < 4)
- calcp = 2;
- else
- calcp = 3;
-
- calcm = DIV_ROUND_UP(div, 1 << calcp);
- } else {
- calcp = __roundup_pow_of_two(div);
- calcp = calcp > 3 ? 3 : calcp;
- }
-
- /* we were asked to pass back divider values */
- if (divp) {
- *divp = calcp;
- *pre_divp = calcm - 1;
- }
-
- return (parent_rate / calcm) >> calcp;
-}
-
-static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct clk_hw *parent, *best_parent = NULL;
- int i, num_parents;
- unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
-
- /* find the parent that can help provide the fastest rate <= rate */
- num_parents = clk_hw_get_num_parents(hw);
- for (i = 0; i < num_parents; i++) {
- parent = clk_hw_get_parent_by_index(hw, i);
- if (!parent)
- continue;
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- parent_rate = clk_hw_round_rate(parent, req->rate);
- else
- parent_rate = clk_hw_get_rate(parent);
-
- child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
- parent_rate);
-
- if (child_rate <= req->rate && child_rate > best_child_rate) {
- best_parent = parent;
- best = parent_rate;
- best_child_rate = child_rate;
- }
- }
-
- if (!best_parent)
- return -EINVAL;
-
- req->best_parent_hw = best_parent;
- req->best_parent_rate = best;
- req->rate = best_child_rate;
-
- return 0;
-}
-
-static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
- unsigned long flags;
- u8 div, pre_div, parent;
- u32 reg;
-
- spin_lock_irqsave(&clk_lock, flags);
-
- reg = readl(ahb1->reg);
-
- /* need to know which parent is used to apply pre-divider */
- parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
- sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
-
- reg = SUN6I_AHB1_DIV_SET(reg, div);
- reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
- writel(reg, ahb1->reg);
-
- spin_unlock_irqrestore(&clk_lock, flags);
-
- return 0;
-}
-
-static const struct clk_ops sun6i_ahb1_clk_ops = {
- .determine_rate = sun6i_ahb1_clk_determine_rate,
- .recalc_rate = sun6i_ahb1_clk_recalc_rate,
- .set_rate = sun6i_ahb1_clk_set_rate,
-};
-
-static void __init sun6i_ahb1_clk_setup(struct device_node *node)
-{
- struct clk *clk;
- struct sun6i_ahb1_clk *ahb1;
- struct clk_mux *mux;
- const char *clk_name = node->name;
- const char *parents[SUN6I_AHB1_MAX_PARENTS];
- void __iomem *reg;
- int i;
-
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg))
- return;
-
- /* we have a mux, we will have >1 parents */
- i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
- of_property_read_string(node, "clock-output-names", &clk_name);
-
- ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
- if (!ahb1)
- return;
-
- mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
- if (!mux) {
- kfree(ahb1);
- return;
- }
-
- /* set up clock properties */
- mux->reg = reg;
- mux->shift = SUN6I_AHB1_MUX_SHIFT;
- mux->mask = SUN6I_AHB1_MUX_MASK;
- mux->lock = &clk_lock;
- ahb1->reg = reg;
-
- clk = clk_register_composite(NULL, clk_name, parents, i,
- &mux->hw, &clk_mux_ops,
- &ahb1->hw, &sun6i_ahb1_clk_ops,
- NULL, NULL, 0);
-
- if (!IS_ERR(clk)) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
- }
-}
-CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
-
/* Maximum number of parents our clocks have */
#define SUNXI_MAX_PARENTS 5
@@ -246,49 +38,45 @@ CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_se
* parent_rate is always 24Mhz
*/
-static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll1_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a 6M multiple */
- div = *freq / 6000000;
- *freq = 6000000 * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / 6000000;
+ req->rate = 6000000 * div;
/* m is always zero for pll1 */
- *m = 0;
+ req->m = 0;
/* k is 1 only on these cases */
- if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
- *k = 1;
+ if (req->rate >= 768000000 || req->rate == 42000000 ||
+ req->rate == 54000000)
+ req->k = 1;
else
- *k = 0;
+ req->k = 0;
/* p will be 3 for divs under 10 */
if (div < 10)
- *p = 3;
+ req->p = 3;
/* p will be 2 for divs between 10 - 20 and odd divs under 32 */
else if (div < 20 || (div < 32 && (div & 1)))
- *p = 2;
+ req->p = 2;
/* p will be 1 for even divs under 32, divs under 40 and odd pairs
* of divs between 40-62 */
else if (div < 40 || (div < 64 && (div & 2)))
- *p = 1;
+ req->p = 1;
/* any other entries have p = 0 */
else
- *p = 0;
+ req->p = 0;
/* calculate a suitable n based on k and p */
- div <<= *p;
- div /= (*k + 1);
- *n = div / 4;
+ div <<= req->p;
+ div /= (req->k + 1);
+ req->n = div / 4;
}
/**
@@ -297,15 +85,14 @@ static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
* rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
* parent_rate should always be 24MHz
*/
-static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll1_factors(struct factors_request *req)
{
/*
* We can operate only on MHz, this will make our life easier
* later.
*/
- u32 freq_mhz = *freq / 1000000;
- u32 parent_freq_mhz = parent_rate / 1000000;
+ u32 freq_mhz = req->rate / 1000000;
+ u32 parent_freq_mhz = req->parent_rate / 1000000;
/*
* Round down the frequency to the closest multiple of either
@@ -319,28 +106,20 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
else
freq_mhz = round_freq_16;
- *freq = freq_mhz * 1000000;
-
- /*
- * If the factors pointer are null, we were just called to
- * round down the frequency.
- * Exit.
- */
- if (n == NULL)
- return;
+ req->rate = freq_mhz * 1000000;
/* If the frequency is a multiple of 32 MHz, k is always 3 */
if (!(freq_mhz % 32))
- *k = 3;
+ req->k = 3;
/* If the frequency is a multiple of 9 MHz, k is always 2 */
else if (!(freq_mhz % 9))
- *k = 2;
+ req->k = 2;
/* If the frequency is a multiple of 8 MHz, k is always 1 */
else if (!(freq_mhz % 8))
- *k = 1;
+ req->k = 1;
/* Otherwise, we don't use the k factor */
else
- *k = 0;
+ req->k = 0;
/*
* If the frequency is a multiple of 2 but not a multiple of
@@ -351,27 +130,28 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
* somehow relates to this frequency.
*/
if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
- *m = 2;
+ req->m = 2;
/*
* If the frequency is a multiple of 6MHz, but the factor is
* odd, m will be 3
*/
else if ((freq_mhz / 6) & 1)
- *m = 3;
+ req->m = 3;
/* Otherwise, we end up with m = 1 */
else
- *m = 1;
+ req->m = 1;
/* Calculate n thanks to the above factors we already got */
- *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+ req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz)
+ - 1;
/*
* If n end up being outbound, and that we can still decrease
* m, do it.
*/
- if ((*n + 1) > 31 && (*m + 1) > 1) {
- *n = (*n + 1) / 2 - 1;
- *m = (*m + 1) / 2 - 1;
+ if ((req->n + 1) > 31 && (req->m + 1) > 1) {
+ req->n = (req->n + 1) / 2 - 1;
+ req->m = (req->m + 1) / 2 - 1;
}
}
@@ -382,45 +162,41 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun8i_a23_get_pll1_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a 6M multiple */
- div = *freq / 6000000;
- *freq = 6000000 * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / 6000000;
+ req->rate = 6000000 * div;
/* m is always zero for pll1 */
- *m = 0;
+ req->m = 0;
/* k is 1 only on these cases */
- if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
- *k = 1;
+ if (req->rate >= 768000000 || req->rate == 42000000 ||
+ req->rate == 54000000)
+ req->k = 1;
else
- *k = 0;
+ req->k = 0;
/* p will be 2 for divs under 20 and odd divs under 32 */
if (div < 20 || (div < 32 && (div & 1)))
- *p = 2;
+ req->p = 2;
/* p will be 1 for even divs under 32, divs under 40 and odd pairs
* of divs between 40-62 */
else if (div < 40 || (div < 64 && (div & 2)))
- *p = 1;
+ req->p = 1;
/* any other entries have p = 0 */
else
- *p = 0;
+ req->p = 0;
/* calculate a suitable n based on k and p */
- div <<= *p;
- div /= (*k + 1);
- *n = div / 4 - 1;
+ div <<= req->p;
+ div /= (req->k + 1);
+ req->n = div / 4 - 1;
}
/**
@@ -430,29 +206,24 @@ static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll5_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a parent_rate multiple (24M) */
- div = *freq / parent_rate;
- *freq = parent_rate * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / req->parent_rate;
+ req->rate = req->parent_rate * div;
if (div < 31)
- *k = 0;
+ req->k = 0;
else if (div / 2 < 31)
- *k = 1;
+ req->k = 1;
else if (div / 3 < 31)
- *k = 2;
+ req->k = 2;
else
- *k = 3;
+ req->k = 3;
- *n = DIV_ROUND_UP(div, (*k+1));
+ req->n = DIV_ROUND_UP(div, (req->k + 1));
}
/**
@@ -462,24 +233,19 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
* parent_rate is always 24Mhz
*/
-static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll6_factors(struct factors_request *req)
{
u8 div;
/* Normalize value to a parent_rate multiple (24M) */
- div = *freq / parent_rate;
- *freq = parent_rate * div;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
+ div = req->rate / req->parent_rate;
+ req->rate = req->parent_rate * div;
- *k = div / 32;
- if (*k > 3)
- *k = 3;
+ req->k = div / 32;
+ if (req->k > 3)
+ req->k = 3;
- *n = DIV_ROUND_UP(div, (*k+1)) - 1;
+ req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
}
/**
@@ -488,37 +254,94 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
* rate = parent_rate >> p
*/
-static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun5i_a13_get_ahb_factors(struct factors_request *req)
{
u32 div;
/* divide only */
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
/*
* user manual says valid speed is 8k ~ 276M, but tests show it
* can work at speeds up to 300M, just after reparenting to pll6
*/
- if (*freq < 8000)
- *freq = 8000;
- if (*freq > 300000000)
- *freq = 300000000;
+ if (req->rate < 8000)
+ req->rate = 8000;
+ if (req->rate > 300000000)
+ req->rate = 300000000;
- div = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+ div = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
/* p = 0 ~ 3 */
if (div > 3)
div = 3;
- *freq = parent_rate >> div;
+ req->rate = req->parent_rate >> div;
- /* we were called to round the frequency, we can now return */
- if (p == NULL)
- return;
+ req->p = div;
+}
+
+#define SUN6I_AHB1_PARENT_PLL6 3
+
+/**
+ * sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p
+ *
+ * if parent is pll6, then
+ * parent_rate = pll6 rate / (m + 1)
+ */
+
+static void sun6i_get_ahb1_factors(struct factors_request *req)
+{
+ u8 div, calcp, calcm = 1;
+
+ /*
+ * clock can only divide, so we will never be able to achieve
+ * frequencies higher than the parent frequency
+ */
+ if (req->parent_rate && req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
+
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
+
+ /* calculate pre-divider if parent is pll6 */
+ if (req->parent_index == SUN6I_AHB1_PARENT_PLL6) {
+ if (div < 4)
+ calcp = 0;
+ else if (div / 2 < 4)
+ calcp = 1;
+ else if (div / 4 < 4)
+ calcp = 2;
+ else
+ calcp = 3;
+
+ calcm = DIV_ROUND_UP(div, 1 << calcp);
+ } else {
+ calcp = __roundup_pow_of_two(div);
+ calcp = calcp > 3 ? 3 : calcp;
+ }
- *p = div;
+ req->rate = (req->parent_rate / calcm) >> calcp;
+ req->p = calcp;
+ req->m = calcm - 1;
+}
+
+/**
+ * sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
+ * parent index
+ */
+static void sun6i_ahb1_recalc(struct factors_request *req)
+{
+ req->rate = req->parent_rate;
+
+ /* apply pre-divider first if parent is pll6 */
+ if (req->parent_index == SUN6I_AHB1_PARENT_PLL6)
+ req->rate /= req->m + 1;
+
+ /* clk divider */
+ req->rate >>= req->p;
}
/**
@@ -527,39 +350,34 @@ static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_apb1_factors(struct factors_request *req)
{
u8 calcm, calcp;
+ int div;
- if (parent_rate < *freq)
- *freq = parent_rate;
+ if (req->parent_rate < req->rate)
+ req->rate = req->parent_rate;
- parent_rate = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
/* Invalid rate! */
- if (parent_rate > 32)
+ if (div > 32)
return;
- if (parent_rate <= 4)
+ if (div <= 4)
calcp = 0;
- else if (parent_rate <= 8)
+ else if (div <= 8)
calcp = 1;
- else if (parent_rate <= 16)
+ else if (div <= 16)
calcp = 2;
else
calcp = 3;
- calcm = (parent_rate >> calcp) - 1;
+ calcm = (req->parent_rate >> calcp) - 1;
- *freq = (parent_rate >> calcp) / (calcm + 1);
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+ req->m = calcm;
+ req->p = calcp;
}
@@ -571,17 +389,16 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
* rate = (parent_rate >> p) / (m + 1);
*/
-static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
- u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun7i_a20_get_out_factors(struct factors_request *req)
{
u8 div, calcm, calcp;
/* These clocks can only divide, so we will never be able to achieve
* frequencies higher than the parent frequency */
- if (*freq > parent_rate)
- *freq = parent_rate;
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
- div = DIV_ROUND_UP(parent_rate, *freq);
+ div = DIV_ROUND_UP(req->parent_rate, req->rate);
if (div < 32)
calcp = 0;
@@ -594,21 +411,16 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
calcm = DIV_ROUND_UP(div, 1 << calcp);
- *freq = (parent_rate >> calcp) / calcm;
-
- /* we were called to round the frequency, we can now return */
- if (n == NULL)
- return;
-
- *m = calcm - 1;
- *p = calcp;
+ req->rate = (req->parent_rate >> calcp) / calcm;
+ req->m = calcm - 1;
+ req->p = calcp;
}
/**
* sunxi_factors_clk_setup() - Setup function for factor clocks
*/
-static struct clk_factors_config sun4i_pll1_config = {
+static const struct clk_factors_config sun4i_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -619,7 +431,7 @@ static struct clk_factors_config sun4i_pll1_config = {
.pwidth = 2,
};
-static struct clk_factors_config sun6i_a31_pll1_config = {
+static const struct clk_factors_config sun6i_a31_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -629,7 +441,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
.n_start = 1,
};
-static struct clk_factors_config sun8i_a23_pll1_config = {
+static const struct clk_factors_config sun8i_a23_pll1_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -641,14 +453,14 @@ static struct clk_factors_config sun8i_a23_pll1_config = {
.n_start = 1,
};
-static struct clk_factors_config sun4i_pll5_config = {
+static const struct clk_factors_config sun4i_pll5_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
.kwidth = 2,
};
-static struct clk_factors_config sun6i_a31_pll6_config = {
+static const struct clk_factors_config sun6i_a31_pll6_config = {
.nshift = 8,
.nwidth = 5,
.kshift = 4,
@@ -656,12 +468,19 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
.n_start = 1,
};
-static struct clk_factors_config sun5i_a13_ahb_config = {
+static const struct clk_factors_config sun5i_a13_ahb_config = {
.pshift = 4,
.pwidth = 2,
};
-static struct clk_factors_config sun4i_apb1_config = {
+static const struct clk_factors_config sun6i_ahb1_config = {
+ .mshift = 6,
+ .mwidth = 2,
+ .pshift = 4,
+ .pwidth = 2,
+};
+
+static const struct clk_factors_config sun4i_apb1_config = {
.mshift = 0,
.mwidth = 5,
.pshift = 16,
@@ -669,7 +488,7 @@ static struct clk_factors_config sun4i_apb1_config = {
};
/* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun7i_a20_out_config = {
+static const struct clk_factors_config sun7i_a20_out_config = {
.mshift = 8,
.mwidth = 5,
.pshift = 20,
@@ -728,6 +547,14 @@ static const struct factors_data sun5i_a13_ahb_data __initconst = {
.getter = sun5i_a13_get_ahb_factors,
};
+static const struct factors_data sun6i_ahb1_data __initconst = {
+ .mux = 12,
+ .muxmask = BIT(1) | BIT(0),
+ .table = &sun6i_ahb1_config,
+ .getter = sun6i_get_ahb1_factors,
+ .recalc = sun6i_ahb1_recalc,
+};
+
static const struct factors_data sun4i_apb1_data __initconst = {
.mux = 24,
.muxmask = BIT(1) | BIT(0),
@@ -758,6 +585,61 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
return sunxi_factors_register(node, data, &clk_lock, reg);
}
+static void __init sun4i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun4i_pll1_data);
+}
+CLK_OF_DECLARE(sun4i_pll1, "allwinner,sun4i-a10-pll1-clk",
+ sun4i_pll1_clk_setup);
+
+static void __init sun6i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun6i_a31_pll1_data);
+}
+CLK_OF_DECLARE(sun6i_pll1, "allwinner,sun6i-a31-pll1-clk",
+ sun6i_pll1_clk_setup);
+
+static void __init sun8i_pll1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun8i_a23_pll1_data);
+}
+CLK_OF_DECLARE(sun8i_pll1, "allwinner,sun8i-a23-pll1-clk",
+ sun8i_pll1_clk_setup);
+
+static void __init sun7i_pll4_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun7i_a20_pll4_data);
+}
+CLK_OF_DECLARE(sun7i_pll4, "allwinner,sun7i-a20-pll4-clk",
+ sun7i_pll4_clk_setup);
+
+static void __init sun5i_ahb_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun5i_a13_ahb_data);
+}
+CLK_OF_DECLARE(sun5i_ahb, "allwinner,sun5i-a13-ahb-clk",
+ sun5i_ahb_clk_setup);
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun6i_ahb1_data);
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk",
+ sun6i_ahb1_clk_setup);
+
+static void __init sun4i_apb1_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun4i_apb1_data);
+}
+CLK_OF_DECLARE(sun4i_apb1, "allwinner,sun4i-a10-apb1-clk",
+ sun4i_apb1_clk_setup);
+
+static void __init sun7i_out_clk_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun7i_a20_out_data);
+}
+CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
+ sun7i_out_clk_setup);
/**
@@ -782,8 +664,8 @@ static const struct mux_data sun8i_h3_ahb2_mux_data __initconst = {
.shift = 0,
};
-static void __init sunxi_mux_clk_setup(struct device_node *node,
- struct mux_data *data)
+static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
+ const struct mux_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -792,21 +674,71 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
int i;
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for mux-clk: %s\n",
+ of_node_full_name(node));
+ return NULL;
+ }
i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
- of_property_read_string(node, "clock-output-names", &clk_name);
+ if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+ pr_err("%s: could not read clock-output-names from \"%s\"\n",
+ __func__, of_node_full_name(node));
+ goto out_unmap;
+ }
clk = clk_register_mux(NULL, clk_name, parents, i,
CLK_SET_RATE_PARENT, reg,
data->shift, SUNXI_MUX_GATE_WIDTH,
0, &clk_lock);
- if (clk) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register mux clock %s: %ld\n", __func__,
+ clk_name, PTR_ERR(clk));
+ goto out_unmap;
+ }
+
+ if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ clk_unregister_divider(clk);
+ goto out_unmap;
}
+
+ return clk;
+out_unmap:
+ iounmap(reg);
+ return NULL;
+}
+
+static void __init sun4i_cpu_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+
+ clk = sunxi_mux_clk_setup(node, &sun4i_cpu_mux_data);
+ if (!clk)
+ return;
+
+ /* Protect CPU clock */
+ __clk_get(clk);
+ clk_prepare_enable(clk);
+}
+CLK_OF_DECLARE(sun4i_cpu, "allwinner,sun4i-a10-cpu-clk",
+ sun4i_cpu_clk_setup);
+
+static void __init sun6i_ahb1_mux_clk_setup(struct device_node *node)
+{
+ sunxi_mux_clk_setup(node, &sun6i_a31_ahb1_mux_data);
}
+CLK_OF_DECLARE(sun6i_ahb1_mux, "allwinner,sun6i-a31-ahb1-mux-clk",
+ sun6i_ahb1_mux_clk_setup);
+static void __init sun8i_ahb2_clk_setup(struct device_node *node)
+{
+ sunxi_mux_clk_setup(node, &sun8i_h3_ahb2_mux_data);
+}
+CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
+ sun8i_ahb2_clk_setup);
/**
@@ -865,7 +797,7 @@ static const struct div_data sun4i_apb0_data __initconst = {
};
static void __init sunxi_divider_clk_setup(struct device_node *node,
- struct div_data *data)
+ const struct div_data *data)
{
struct clk *clk;
const char *clk_name = node->name;
@@ -873,21 +805,77 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
void __iomem *reg;
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for mux-clk: %s\n",
+ of_node_full_name(node));
+ return;
+ }
clk_parent = of_clk_get_parent_name(node, 0);
- of_property_read_string(node, "clock-output-names", &clk_name);
+ if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+ pr_err("%s: could not read clock-output-names from \"%s\"\n",
+ __func__, of_node_full_name(node));
+ goto out_unmap;
+ }
clk = clk_register_divider_table(NULL, clk_name, clk_parent, 0,
reg, data->shift, data->width,
data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
data->table, &clk_lock);
- if (clk) {
- of_clk_add_provider(node, of_clk_src_simple_get, clk);
- clk_register_clkdev(clk, clk_name, NULL);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register divider clock %s: %ld\n",
+ __func__, clk_name, PTR_ERR(clk));
+ goto out_unmap;
}
+
+ if (of_clk_add_provider(node, of_clk_src_simple_get, clk)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ goto out_unregister;
+ }
+
+ if (clk_register_clkdev(clk, clk_name, NULL)) {
+ of_clk_del_provider(node);
+ goto out_unregister;
+ }
+
+ return;
+out_unregister:
+ clk_unregister_divider(clk);
+
+out_unmap:
+ iounmap(reg);
}
+static void __init sun4i_ahb_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_ahb_data);
+}
+CLK_OF_DECLARE(sun4i_ahb, "allwinner,sun4i-a10-ahb-clk",
+ sun4i_ahb_clk_setup);
+
+static void __init sun4i_apb0_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_apb0_data);
+}
+CLK_OF_DECLARE(sun4i_apb0, "allwinner,sun4i-a10-apb0-clk",
+ sun4i_apb0_clk_setup);
+
+static void __init sun4i_axi_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun4i_axi_data);
+}
+CLK_OF_DECLARE(sun4i_axi, "allwinner,sun4i-a10-axi-clk",
+ sun4i_axi_clk_setup);
+
+static void __init sun8i_axi_clk_setup(struct device_node *node)
+{
+ sunxi_divider_clk_setup(node, &sun8i_a23_axi_data);
+}
+CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
+ sun8i_axi_clk_setup);
+
/**
@@ -975,8 +963,8 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
* |________________________|
*/
-static void __init sunxi_divs_clk_setup(struct device_node *node,
- struct divs_data *data)
+static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
+ const struct divs_data *data)
{
struct clk_onecell_data *clk_data;
const char *parent;
@@ -997,13 +985,20 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
/* Set up factor clock that we will be dividing */
pclk = sunxi_factors_clk_setup(node, data->factors);
+ if (!pclk)
+ return NULL;
parent = __clk_get_name(pclk);
reg = of_iomap(node, 0);
+ if (!reg) {
+ pr_err("Could not map registers for divs-clk: %s\n",
+ of_node_full_name(node));
+ return NULL;
+ }
clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
if (!clk_data)
- return;
+ goto out_unmap;
clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL);
if (!clks)
@@ -1081,146 +1076,54 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
clkflags);
WARN_ON(IS_ERR(clk_data->clks[i]));
- clk_register_clkdev(clks[i], clk_name, NULL);
}
/* Adjust to the real max */
clk_data->clk_num = i;
- of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-
- return;
+ if (of_clk_add_provider(node, of_clk_src_onecell_get, clk_data)) {
+ pr_err("%s: failed to add clock provider for %s\n",
+ __func__, clk_name);
+ goto free_gate;
+ }
+ return clks;
free_gate:
kfree(gate);
free_clks:
kfree(clks);
free_clkdata:
kfree(clk_data);
+out_unmap:
+ iounmap(reg);
+ return NULL;
}
-
-
-/* Matches for factors clocks */
-static const struct of_device_id clk_factors_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
- {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
- {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
- {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
- {.compatible = "allwinner,sun5i-a13-ahb-clk", .data = &sun5i_a13_ahb_data,},
- {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
- {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
- {}
-};
-
-/* Matches for divider clocks */
-static const struct of_device_id clk_div_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
- {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
- {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
- {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
- {}
-};
-
-/* Matches for divided outputs */
-static const struct of_device_id clk_divs_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
- {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
- {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
- {}
-};
-
-/* Matches for mux clocks */
-static const struct of_device_id clk_mux_match[] __initconst = {
- {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
- {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
- {.compatible = "allwinner,sun8i-h3-ahb2-clk", .data = &sun8i_h3_ahb2_mux_data,},
- {}
-};
-
-
-static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
- void *function)
-{
- struct device_node *np;
- const struct div_data *data;
- const struct of_device_id *match;
- void (*setup_function)(struct device_node *, const void *) = function;
-
- for_each_matching_node_and_match(np, clk_match, &match) {
- data = match->data;
- setup_function(np, data);
- }
-}
-
-static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
-{
- unsigned int i;
-
- /* Register divided output clocks */
- of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
-
- /* Register factor clocks */
- of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
-
- /* Register divider clocks */
- of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
-
- /* Register mux clocks */
- of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
-
- /* Protect the clocks that needs to stay on */
- for (i = 0; i < nclocks; i++) {
- struct clk *clk = clk_get(NULL, clocks[i]);
-
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
- }
-}
-
-static const char *sun4i_a10_critical_clocks[] __initdata = {
- "pll5_ddr",
-};
-
-static void __init sun4i_a10_init_clocks(struct device_node *node)
+static void __init sun4i_pll5_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(sun4i_a10_critical_clocks,
- ARRAY_SIZE(sun4i_a10_critical_clocks));
-}
-CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
+ struct clk **clks;
-static const char *sun5i_critical_clocks[] __initdata = {
- "cpu",
- "pll5_ddr",
-};
+ clks = sunxi_divs_clk_setup(node, &pll5_divs_data);
+ if (!clks)
+ return;
-static void __init sun5i_init_clocks(struct device_node *node)
-{
- sunxi_init_clocks(sun5i_critical_clocks,
- ARRAY_SIZE(sun5i_critical_clocks));
+ /* Protect PLL5_DDR */
+ __clk_get(clks[0]);
+ clk_prepare_enable(clks[0]);
}
-CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_r8_clk_init, "allwinner,sun5i-r8", sun5i_init_clocks);
-CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
-
-static const char *sun6i_critical_clocks[] __initdata = {
- "cpu",
-};
+CLK_OF_DECLARE(sun4i_pll5, "allwinner,sun4i-a10-pll5-clk",
+ sun4i_pll5_clk_setup);
-static void __init sun6i_init_clocks(struct device_node *node)
+static void __init sun4i_pll6_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(sun6i_critical_clocks,
- ARRAY_SIZE(sun6i_critical_clocks));
+ sunxi_divs_clk_setup(node, &pll6_divs_data);
}
-CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
-CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_h3_clk_init, "allwinner,sun8i-h3", sun6i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll6, "allwinner,sun4i-a10-pll6-clk",
+ sun4i_pll6_clk_setup);
-static void __init sun9i_init_clocks(struct device_node *node)
+static void __init sun6i_pll6_clk_setup(struct device_node *node)
{
- sunxi_init_clocks(NULL, 0);
+ sunxi_divs_clk_setup(node, &sun6i_a31_pll6_divs_data);
}
-CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
+CLK_OF_DECLARE(sun6i_pll6, "allwinner,sun6i-a31-pll6-clk",
+ sun6i_pll6_clk_setup);
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 67b8e38f4ee9..fe0c3d169377 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -76,7 +76,7 @@ static int sunxi_usb_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_usb_reset_ops = {
+static const struct reset_control_ops sunxi_usb_reset_ops = {
.assert = sunxi_usb_reset_assert,
.deassert = sunxi_usb_reset_deassert,
};
@@ -216,6 +216,18 @@ static void __init sun8i_a23_usb_setup(struct device_node *node)
}
CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup);
+static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
+ .clk_mask = BIT(19) | BIT(18) | BIT(17) | BIT(16) |
+ BIT(11) | BIT(10) | BIT(9) | BIT(8),
+ .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun8i_h3_usb_setup(struct device_node *node)
+{
+ sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
+
static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
.clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
.reset_mask = BIT(19) | BIT(18) | BIT(17),
@@ -243,15 +255,3 @@ static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
}
CLK_OF_DECLARE(sun9i_a80_usb_phy, "allwinner,sun9i-a80-usb-phy-clk", sun9i_a80_usb_phy_setup);
-
-static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
- .clk_mask = BIT(19) | BIT(18) | BIT(17) | BIT(16) |
- BIT(11) | BIT(10) | BIT(9) | BIT(8),
- .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
-};
-
-static void __init sun8i_h3_usb_setup(struct device_node *node)
-{
- sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
-}
-CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index c0f7843e80e6..92d04ce2dee6 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -72,7 +72,7 @@ struct clk *tegra_clk_register_sync_source(const char *name,
init.ops = &tegra_clk_sync_source_ops;
init.name = name;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 86a307b17eb0..19bfa07e24b1 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -995,7 +995,6 @@ static const struct clk_ops dfll_clk_ops = {
};
static struct clk_init_data dfll_clk_init_data = {
- .flags = CLK_IS_ROOT,
.ops = &dfll_clk_ops,
.num_parents = 0,
};
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index da0b5941c89f..d64ec7a1b976 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -52,8 +52,7 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks,
return -EINVAL;
}
- osc = clk_register_fixed_rate(NULL, "osc", NULL, CLK_IS_ROOT,
- *osc_freq);
+ osc = clk_register_fixed_rate(NULL, "osc", NULL, 0, *osc_freq);
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks);
if (!dt_clk)
@@ -88,8 +87,7 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
/* clk_32k */
dt_clk = tegra_lookup_dt_id(tegra_clk_clk_32k, tegra_clks);
if (dt_clk) {
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL,
- CLK_IS_ROOT, 32768);
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 4a24aa4bbdea..df47ec3169c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -972,8 +972,7 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base)
struct clk *clk;
/* clk_32k */
- clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, CLK_IS_ROOT,
- 32768);
+ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768);
clks[TEGRA114_CLK_CLK_32K] = clk;
/* clk_m_div2 */
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 7a48e986c4c9..7ad63837694f 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -837,15 +837,13 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_PEX] = clk;
/* cdev1 */
- clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "cdev1_fixed", NULL, 0, 26000000);
clk = tegra_clk_register_periph_gate("cdev1", "cdev1_fixed", 0,
clk_base, 0, 94, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV1] = clk;
/* cdev2 */
- clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "cdev2_fixed", NULL, 0, 26000000);
clk = tegra_clk_register_periph_gate("cdev2", "cdev2_fixed", 0,
clk_base, 0, 93, periph_clk_enb_refcnt);
clks[TEGRA20_CLK_CDEV2] = clk;
@@ -879,8 +877,8 @@ static void __init tegra20_osc_clk_init(void)
input_freq = tegra20_clk_measure_input_freq();
/* clk_m */
- clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IS_ROOT |
- CLK_IGNORE_UNUSED, input_freq);
+ clk = clk_register_fixed_rate(NULL, "clk_m", NULL, CLK_IGNORE_UNUSED,
+ input_freq);
clks[TEGRA20_CLK_CLK_M] = clk;
/* pll_ref */
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 2a3a4fe803d6..f60fe2e344ca 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -271,7 +271,7 @@ void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
}
}
-static struct reset_control_ops rst_ops = {
+static const struct reset_control_ops rst_ops = {
.assert = tegra_clk_rst_assert,
.deassert = tegra_clk_rst_deassert,
};
diff --git a/drivers/clk/ti/Kconfig b/drivers/clk/ti/Kconfig
new file mode 100644
index 000000000000..271341787e67
--- /dev/null
+++ b/drivers/clk/ti/Kconfig
@@ -0,0 +1,6 @@
+config COMMON_CLK_TI_ADPLL
+ tristate "Clock driver for dm814x ADPLL"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ default y if SOC_TI81XX
+ ---help---
+ ADPLL clock driver for the dm814x SoC using common clock framework.
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index d4ac96087ccd..0deac9821039 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,3 +1,5 @@
+ifeq ($(CONFIG_ARCH_OMAP2PLUS), y)
+
obj-y += clk.o autoidle.o clockdomain.o
clk-common = dpll.o composite.o divider.o gate.o \
fixed-factor.o mux.o apll.o \
@@ -18,3 +20,7 @@ obj-$(CONFIG_SOC_AM43XX) += $(clk-common) dpll3xxx.o clk-43xx.o
ifdef CONFIG_ATAGS
obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o
endif
+
+endif # CONFIG_ARCH_OMAP2PLUS
+
+obj-$(CONFIG_COMMON_CLK_TI_ADPLL) += adpll.o
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
new file mode 100644
index 000000000000..255cafb18336
--- /dev/null
+++ b/drivers/clk/ti/adpll.c
@@ -0,0 +1,983 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+
+#define ADPLL_PLLSS_MMR_LOCK_OFFSET 0x00 /* Managed by MPPULL */
+#define ADPLL_PLLSS_MMR_LOCK_ENABLED 0x1f125B64
+#define ADPLL_PLLSS_MMR_UNLOCK_MAGIC 0x1eda4c3d
+
+#define ADPLL_PWRCTRL_OFFSET 0x00
+#define ADPLL_PWRCTRL_PONIN 5
+#define ADPLL_PWRCTRL_PGOODIN 4
+#define ADPLL_PWRCTRL_RET 3
+#define ADPLL_PWRCTRL_ISORET 2
+#define ADPLL_PWRCTRL_ISOSCAN 1
+#define ADPLL_PWRCTRL_OFFMODE 0
+
+#define ADPLL_CLKCTRL_OFFSET 0x04
+#define ADPLL_CLKCTRL_CLKDCOLDOEN 29
+#define ADPLL_CLKCTRL_IDLE 23
+#define ADPLL_CLKCTRL_CLKOUTEN 20
+#define ADPLL_CLKINPHIFSEL_ADPLL_S 19 /* REVISIT: which bit? */
+#define ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ 19
+#define ADPLL_CLKCTRL_ULOWCLKEN 18
+#define ADPLL_CLKCTRL_CLKDCOLDOPWDNZ 17
+#define ADPLL_CLKCTRL_M2PWDNZ 16
+#define ADPLL_CLKCTRL_M3PWDNZ_ADPLL_S 15
+#define ADPLL_CLKCTRL_LOWCURRSTDBY_ADPLL_S 13
+#define ADPLL_CLKCTRL_LPMODE_ADPLL_S 12
+#define ADPLL_CLKCTRL_REGM4XEN_ADPLL_S 10
+#define ADPLL_CLKCTRL_SELFREQDCO_ADPLL_LJ 10
+#define ADPLL_CLKCTRL_TINITZ 0
+
+#define ADPLL_TENABLE_OFFSET 0x08
+#define ADPLL_TENABLEDIV_OFFSET 0x8c
+
+#define ADPLL_M2NDIV_OFFSET 0x10
+#define ADPLL_M2NDIV_M2 16
+#define ADPLL_M2NDIV_M2_ADPLL_S_WIDTH 5
+#define ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH 7
+
+#define ADPLL_MN2DIV_OFFSET 0x14
+#define ADPLL_MN2DIV_N2 16
+
+#define ADPLL_FRACDIV_OFFSET 0x18
+#define ADPLL_FRACDIV_REGSD 24
+#define ADPLL_FRACDIV_FRACTIONALM 0
+#define ADPLL_FRACDIV_FRACTIONALM_MASK 0x3ffff
+
+#define ADPLL_BWCTRL_OFFSET 0x1c
+#define ADPLL_BWCTRL_BWCONTROL 1
+#define ADPLL_BWCTRL_BW_INCR_DECRZ 0
+
+#define ADPLL_RESERVED_OFFSET 0x20
+
+#define ADPLL_STATUS_OFFSET 0x24
+#define ADPLL_STATUS_PONOUT 31
+#define ADPLL_STATUS_PGOODOUT 30
+#define ADPLL_STATUS_LDOPWDN 29
+#define ADPLL_STATUS_RECAL_BSTATUS3 28
+#define ADPLL_STATUS_RECAL_OPPIN 27
+#define ADPLL_STATUS_PHASELOCK 10
+#define ADPLL_STATUS_FREQLOCK 9
+#define ADPLL_STATUS_BYPASSACK 8
+#define ADPLL_STATUS_LOSSREF 6
+#define ADPLL_STATUS_CLKOUTENACK 5
+#define ADPLL_STATUS_LOCK2 4
+#define ADPLL_STATUS_M2CHANGEACK 3
+#define ADPLL_STATUS_HIGHJITTER 1
+#define ADPLL_STATUS_BYPASS 0
+#define ADPLL_STATUS_PREPARED_MASK (BIT(ADPLL_STATUS_PHASELOCK) | \
+ BIT(ADPLL_STATUS_FREQLOCK))
+
+#define ADPLL_M3DIV_OFFSET 0x28 /* Only on MPUPLL */
+#define ADPLL_M3DIV_M3 0
+#define ADPLL_M3DIV_M3_WIDTH 5
+#define ADPLL_M3DIV_M3_MASK 0x1f
+
+#define ADPLL_RAMPCTRL_OFFSET 0x2c /* Only on MPUPLL */
+#define ADPLL_RAMPCTRL_CLKRAMPLEVEL 19
+#define ADPLL_RAMPCTRL_CLKRAMPRATE 16
+#define ADPLL_RAMPCTRL_RELOCK_RAMP_EN 0
+
+#define MAX_ADPLL_INPUTS 3
+#define MAX_ADPLL_OUTPUTS 4
+#define ADPLL_MAX_RETRIES 5
+
+#define to_dco(_hw) container_of(_hw, struct ti_adpll_dco_data, hw)
+#define to_adpll(_hw) container_of(_hw, struct ti_adpll_data, dco)
+#define to_clkout(_hw) container_of(_hw, struct ti_adpll_clkout_data, hw)
+
+enum ti_adpll_clocks {
+ TI_ADPLL_DCO,
+ TI_ADPLL_DCO_GATE,
+ TI_ADPLL_N2,
+ TI_ADPLL_M2,
+ TI_ADPLL_M2_GATE,
+ TI_ADPLL_BYPASS,
+ TI_ADPLL_HIF,
+ TI_ADPLL_DIV2,
+ TI_ADPLL_CLKOUT,
+ TI_ADPLL_CLKOUT2,
+ TI_ADPLL_M3,
+};
+
+#define TI_ADPLL_NR_CLOCKS (TI_ADPLL_M3 + 1)
+
+enum ti_adpll_inputs {
+ TI_ADPLL_CLKINP,
+ TI_ADPLL_CLKINPULOW,
+ TI_ADPLL_CLKINPHIF,
+};
+
+enum ti_adpll_s_outputs {
+ TI_ADPLL_S_DCOCLKLDO,
+ TI_ADPLL_S_CLKOUT,
+ TI_ADPLL_S_CLKOUTX2,
+ TI_ADPLL_S_CLKOUTHIF,
+};
+
+enum ti_adpll_lj_outputs {
+ TI_ADPLL_LJ_CLKDCOLDO,
+ TI_ADPLL_LJ_CLKOUT,
+ TI_ADPLL_LJ_CLKOUTLDO,
+};
+
+struct ti_adpll_platform_data {
+ const bool is_type_s;
+ const int nr_max_inputs;
+ const int nr_max_outputs;
+ const int output_index;
+};
+
+struct ti_adpll_clock {
+ struct clk *clk;
+ struct clk_lookup *cl;
+ void (*unregister)(struct clk *clk);
+};
+
+struct ti_adpll_dco_data {
+ struct clk_hw hw;
+};
+
+struct ti_adpll_clkout_data {
+ struct ti_adpll_data *adpll;
+ struct clk_gate gate;
+ struct clk_hw hw;
+};
+
+struct ti_adpll_data {
+ struct device *dev;
+ const struct ti_adpll_platform_data *c;
+ struct device_node *np;
+ unsigned long pa;
+ void __iomem *iobase;
+ void __iomem *regs;
+ spinlock_t lock; /* For ADPLL shared register access */
+ const char *parent_names[MAX_ADPLL_INPUTS];
+ struct clk *parent_clocks[MAX_ADPLL_INPUTS];
+ struct ti_adpll_clock *clocks;
+ struct clk_onecell_data outputs;
+ struct ti_adpll_dco_data dco;
+};
+
+static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
+ int output_index,
+ const char *postfix)
+{
+ const char *name;
+ int err;
+
+ if (output_index >= 0) {
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &name);
+ if (err)
+ return NULL;
+ } else {
+ const char *base_name = "adpll";
+ char *buf;
+
+ buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
+ strlen(postfix), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+ sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
+ name = buf;
+ }
+
+ return name;
+}
+
+#define ADPLL_MAX_CON_ID 16 /* See MAX_CON_ID */
+
+static int ti_adpll_setup_clock(struct ti_adpll_data *d, struct clk *clock,
+ int index, int output_index, const char *name,
+ void (*unregister)(struct clk *clk))
+{
+ struct clk_lookup *cl;
+ const char *postfix = NULL;
+ char con_id[ADPLL_MAX_CON_ID];
+
+ d->clocks[index].clk = clock;
+ d->clocks[index].unregister = unregister;
+
+ /* Separate con_id in format "pll040dcoclkldo" to fit MAX_CON_ID */
+ postfix = strrchr(name, '.');
+ if (strlen(postfix) > 1) {
+ if (strlen(postfix) > ADPLL_MAX_CON_ID)
+ dev_warn(d->dev, "clock %s con_id lookup may fail\n",
+ name);
+ snprintf(con_id, 16, "pll%03lx%s", d->pa & 0xfff, postfix + 1);
+ cl = clkdev_create(clock, con_id, NULL);
+ if (!cl)
+ return -ENOMEM;
+ d->clocks[index].cl = cl;
+ } else {
+ dev_warn(d->dev, "no con_id for clock %s\n", name);
+ }
+
+ if (output_index < 0)
+ return 0;
+
+ d->outputs.clks[output_index] = clock;
+ d->outputs.clk_num++;
+
+ return 0;
+}
+
+static int ti_adpll_init_divider(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 shift, u8 width,
+ u8 clk_divider_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_divider(d->dev, child_name, parent_name, 0,
+ reg, shift, width, clk_divider_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register divider %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_divider);
+}
+
+static int ti_adpll_init_mux(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name, struct clk *clk0,
+ struct clk *clk1,
+ void __iomem *reg,
+ u8 shift)
+{
+ const char *child_name;
+ const char *parents[2];
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+ parents[0] = __clk_get_name(clk0);
+ parents[1] = __clk_get_name(clk1);
+ clock = clk_register_mux(d->dev, child_name, parents, 2, 0,
+ reg, shift, 1, 0, &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register mux %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister_mux);
+}
+
+static int ti_adpll_init_gate(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, char *name,
+ struct clk *parent_clock,
+ void __iomem *reg,
+ u8 bit_idx,
+ u8 clk_gate_flags)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, output_index, name);
+ if (!child_name)
+ return -EINVAL;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_gate(d->dev, child_name, parent_name, 0,
+ reg, bit_idx, clk_gate_flags,
+ &d->lock);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register gate %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ clk_unregister_gate);
+}
+
+static int ti_adpll_init_fixed_factor(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ char *name,
+ struct clk *parent_clock,
+ unsigned int mult,
+ unsigned int div)
+{
+ const char *child_name;
+ const char *parent_name;
+ struct clk *clock;
+
+ child_name = ti_adpll_clk_get_name(d, -ENODEV, name);
+ if (!child_name)
+ return -ENOMEM;
+
+ parent_name = __clk_get_name(parent_clock);
+ clock = clk_register_fixed_factor(d->dev, child_name, parent_name,
+ 0, mult, div);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, index, -ENODEV, child_name,
+ clk_unregister);
+}
+
+static void ti_adpll_set_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v |= BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static void ti_adpll_clear_idle_bypass(struct ti_adpll_data *d)
+{
+ unsigned long flags;
+ u32 v;
+
+ spin_lock_irqsave(&d->lock, flags);
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ v &= ~BIT(ADPLL_CLKCTRL_IDLE);
+ writel_relaxed(v, d->regs + ADPLL_CLKCTRL_OFFSET);
+ spin_unlock_irqrestore(&d->lock, flags);
+}
+
+static bool ti_adpll_clock_is_bypass(struct ti_adpll_data *d)
+{
+ u32 v;
+
+ v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return v & BIT(ADPLL_STATUS_BYPASS);
+}
+
+/*
+ * Locked and bypass are not actually mutually exclusive: if you only care
+ * about the DCO clock and not CLKOUT you can clear M2PWDNZ before enabling
+ * the PLL, resulting in status (FREQLOCK | PHASELOCK | BYPASS) after lock.
+ */
+static bool ti_adpll_is_locked(struct ti_adpll_data *d)
+{
+ u32 v = readl_relaxed(d->regs + ADPLL_STATUS_OFFSET);
+
+ return (v & ADPLL_STATUS_PREPARED_MASK) == ADPLL_STATUS_PREPARED_MASK;
+}
+
+static int ti_adpll_wait_lock(struct ti_adpll_data *d)
+{
+ int retries = ADPLL_MAX_RETRIES;
+
+ do {
+ if (ti_adpll_is_locked(d))
+ return 0;
+ usleep_range(200, 300);
+ } while (retries--);
+
+ dev_err(d->dev, "pll failed to lock\n");
+ return -ETIMEDOUT;
+}
+
+static int ti_adpll_prepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_clear_idle_bypass(d);
+ ti_adpll_wait_lock(d);
+
+ return 0;
+}
+
+static void ti_adpll_unprepare(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ ti_adpll_set_idle_bypass(d);
+}
+
+static int ti_adpll_is_prepared(struct clk_hw *hw)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+
+ return ti_adpll_is_locked(d);
+}
+
+/*
+ * Note that the DCO clock is never subject to bypass: if the PLL is off,
+ * dcoclk is low.
+ */
+static unsigned long ti_adpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ti_adpll_dco_data *dco = to_dco(hw);
+ struct ti_adpll_data *d = to_adpll(dco);
+ u32 frac_m, divider, v;
+ u64 rate;
+ unsigned long flags;
+
+ if (ti_adpll_clock_is_bypass(d))
+ return 0;
+
+ spin_lock_irqsave(&d->lock, flags);
+ frac_m = readl_relaxed(d->regs + ADPLL_FRACDIV_OFFSET);
+ frac_m &= ADPLL_FRACDIV_FRACTIONALM_MASK;
+ rate = (u64)readw_relaxed(d->regs + ADPLL_MN2DIV_OFFSET) << 18;
+ rate += frac_m;
+ rate *= parent_rate;
+ divider = (readw_relaxed(d->regs + ADPLL_M2NDIV_OFFSET) + 1) << 18;
+ spin_unlock_irqrestore(&d->lock, flags);
+
+ do_div(rate, divider);
+
+ if (d->c->is_type_s) {
+ v = readl_relaxed(d->regs + ADPLL_CLKCTRL_OFFSET);
+ if (v & BIT(ADPLL_CLKCTRL_REGM4XEN_ADPLL_S))
+ rate *= 4;
+ rate *= 2;
+ }
+
+ return rate;
+}
+
+/* PLL parent is always clkinp, bypass only affects the children */
+static u8 ti_adpll_get_parent(struct clk_hw *hw)
+{
+ return 0;
+}
+
+static struct clk_ops ti_adpll_ops = {
+ .prepare = ti_adpll_prepare,
+ .unprepare = ti_adpll_unprepare,
+ .is_prepared = ti_adpll_is_prepared,
+ .recalc_rate = ti_adpll_recalc_rate,
+ .get_parent = ti_adpll_get_parent,
+};
+
+static int ti_adpll_init_dco(struct ti_adpll_data *d)
+{
+ struct clk_init_data init;
+ struct clk *clock;
+ const char *postfix;
+ int width, err;
+
+ d->outputs.clks = devm_kzalloc(d->dev, sizeof(struct clk *) *
+ MAX_ADPLL_OUTPUTS,
+ GFP_KERNEL);
+ if (!d->outputs.clks)
+ return -ENOMEM;
+
+ if (d->c->output_index < 0)
+ postfix = "dco";
+ else
+ postfix = NULL;
+
+ init.name = ti_adpll_clk_get_name(d, d->c->output_index, postfix);
+ if (!init.name)
+ return -EINVAL;
+
+ init.parent_names = d->parent_names;
+ init.num_parents = d->c->nr_max_inputs;
+ init.ops = &ti_adpll_ops;
+ init.flags = CLK_GET_RATE_NOCACHE;
+ d->dco.hw.init = &init;
+
+ if (d->c->is_type_s)
+ width = 5;
+ else
+ width = 4;
+
+ /* Internal input clock divider N2 */
+ err = ti_adpll_init_divider(d, TI_ADPLL_N2, -ENODEV, "n2",
+ d->parent_clocks[TI_ADPLL_CLKINP],
+ d->regs + ADPLL_MN2DIV_OFFSET,
+ ADPLL_MN2DIV_N2, width, 0);
+ if (err)
+ return err;
+
+ clock = devm_clk_register(d->dev, &d->dco.hw);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ return ti_adpll_setup_clock(d, clock, TI_ADPLL_DCO, d->c->output_index,
+ init.name, NULL);
+}
+
+static int ti_adpll_clkout_enable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.enable(gate_hw);
+}
+
+static void ti_adpll_clkout_disable(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+ clk_gate_ops.disable(gate_hw);
+}
+
+static int ti_adpll_clkout_is_enabled(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct clk_hw *gate_hw = &co->gate.hw;
+
+ __clk_hw_set_clk(gate_hw, hw);
+
+ return clk_gate_ops.is_enabled(gate_hw);
+}
+
+/* Setting PLL bypass puts clkout and clkoutx2 into bypass */
+static u8 ti_adpll_clkout_get_parent(struct clk_hw *hw)
+{
+ struct ti_adpll_clkout_data *co = to_clkout(hw);
+ struct ti_adpll_data *d = co->adpll;
+
+ return ti_adpll_clock_is_bypass(d);
+}
+
+static int ti_adpll_init_clkout(struct ti_adpll_data *d,
+ enum ti_adpll_clocks index,
+ int output_index, int gate_bit,
+ char *name, struct clk *clk0,
+ struct clk *clk1)
+{
+ struct ti_adpll_clkout_data *co;
+ struct clk_init_data init;
+ struct clk_ops *ops;
+ const char *parent_names[2];
+ const char *child_name;
+ struct clk *clock;
+ int err;
+
+ co = devm_kzalloc(d->dev, sizeof(*co), GFP_KERNEL);
+ if (!co)
+ return -ENOMEM;
+ co->adpll = d;
+
+ err = of_property_read_string_index(d->np,
+ "clock-output-names",
+ output_index,
+ &child_name);
+ if (err)
+ return err;
+
+ ops = devm_kzalloc(d->dev, sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return -ENOMEM;
+
+ init.name = child_name;
+ init.ops = ops;
+ init.flags = CLK_IS_BASIC;
+ co->hw.init = &init;
+ parent_names[0] = __clk_get_name(clk0);
+ parent_names[1] = __clk_get_name(clk1);
+ init.parent_names = parent_names;
+ init.num_parents = 2;
+
+ ops->get_parent = ti_adpll_clkout_get_parent;
+ ops->determine_rate = __clk_mux_determine_rate;
+ if (gate_bit) {
+ co->gate.lock = &d->lock;
+ co->gate.reg = d->regs + ADPLL_CLKCTRL_OFFSET;
+ co->gate.bit_idx = gate_bit;
+ ops->enable = ti_adpll_clkout_enable;
+ ops->disable = ti_adpll_clkout_disable;
+ ops->is_enabled = ti_adpll_clkout_is_enabled;
+ }
+
+ clock = devm_clk_register(d->dev, &co->hw);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "failed to register output %s: %li\n",
+ name, PTR_ERR(clock));
+ return PTR_ERR(clock);
+ }
+
+ return ti_adpll_setup_clock(d, clock, index, output_index, child_name,
+ NULL);
+}
+
+static int ti_adpll_init_children_adpll_s(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (!d->c->is_type_s)
+ return 0;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV, "m2",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_S_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Internal fixed divider, after M2 before clkout */
+ err = ti_adpll_init_fixed_factor(d, TI_ADPLL_DIV2, "div2",
+ d->clocks[TI_ADPLL_M2].clk,
+ 1, 2);
+ if (err)
+ return err;
+
+ /* Output clkout with a mux and gate, sources from div2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_DIV2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Output clkoutx2 with a mux and gate, sources from M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT2, TI_ADPLL_S_CLKOUTX2, 0,
+ "clkout2", d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from DCO and clkinphif */
+ if (d->parent_clocks[TI_ADPLL_CLKINPHIF]) {
+ err = ti_adpll_init_mux(d, TI_ADPLL_HIF, "hif",
+ d->clocks[TI_ADPLL_DCO].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPHIF],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKINPHIFSEL_ADPLL_S);
+ if (err)
+ return err;
+ }
+
+ /* Output clkouthif with a divider M3, sources from hif */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M3, TI_ADPLL_S_CLKOUTHIF, "m3",
+ d->clocks[TI_ADPLL_HIF].clk,
+ d->regs + ADPLL_M3DIV_OFFSET,
+ ADPLL_M3DIV_M3,
+ ADPLL_M3DIV_M3_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clock dcoclkldo is the DCO */
+
+ return 0;
+}
+
+static int ti_adpll_init_children_adpll_lj(struct ti_adpll_data *d)
+{
+ int err;
+
+ if (d->c->is_type_s)
+ return 0;
+
+ /* Output clkdcoldo, gated output of DCO */
+ err = ti_adpll_init_gate(d, TI_ADPLL_DCO_GATE, TI_ADPLL_LJ_CLKDCOLDO,
+ "clkdcoldo", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKDCOLDOEN, 0);
+ if (err)
+ return err;
+
+ /* Internal divider M2, sources from DCO */
+ err = ti_adpll_init_divider(d, TI_ADPLL_M2, -ENODEV,
+ "m2", d->clocks[TI_ADPLL_DCO].clk,
+ d->regs + ADPLL_M2NDIV_OFFSET,
+ ADPLL_M2NDIV_M2,
+ ADPLL_M2NDIV_M2_ADPLL_LJ_WIDTH,
+ CLK_DIVIDER_ONE_BASED);
+ if (err)
+ return err;
+
+ /* Output clkoutldo, gated output of M2 */
+ err = ti_adpll_init_gate(d, TI_ADPLL_M2_GATE, TI_ADPLL_LJ_CLKOUTLDO,
+ "clkoutldo", d->clocks[TI_ADPLL_M2].clk,
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_CLKOUTLDOEN_ADPLL_LJ,
+ 0);
+ if (err)
+ return err;
+
+ /* Internal mux, sources from divider N2 or clkinpulow */
+ err = ti_adpll_init_mux(d, TI_ADPLL_BYPASS, "bypass",
+ d->clocks[TI_ADPLL_N2].clk,
+ d->parent_clocks[TI_ADPLL_CLKINPULOW],
+ d->regs + ADPLL_CLKCTRL_OFFSET,
+ ADPLL_CLKCTRL_ULOWCLKEN);
+ if (err)
+ return err;
+
+ /* Output clkout, sources M2 or bypass */
+ err = ti_adpll_init_clkout(d, TI_ADPLL_CLKOUT, TI_ADPLL_S_CLKOUT,
+ ADPLL_CLKCTRL_CLKOUTEN, "clkout",
+ d->clocks[TI_ADPLL_M2].clk,
+ d->clocks[TI_ADPLL_BYPASS].clk);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void ti_adpll_free_resources(struct ti_adpll_data *d)
+{
+ int i;
+
+ for (i = TI_ADPLL_M3; i >= 0; i--) {
+ struct ti_adpll_clock *ac = &d->clocks[i];
+
+ if (!ac || IS_ERR_OR_NULL(ac->clk))
+ continue;
+ if (ac->cl)
+ clkdev_drop(ac->cl);
+ if (ac->unregister)
+ ac->unregister(ac->clk);
+ }
+}
+
+/* MPU PLL manages the lock register for all PLLs */
+static void ti_adpll_unlock_all(void __iomem *reg)
+{
+ u32 v;
+
+ v = readl_relaxed(reg);
+ if (v == ADPLL_PLLSS_MMR_LOCK_ENABLED)
+ writel_relaxed(ADPLL_PLLSS_MMR_UNLOCK_MAGIC, reg);
+}
+
+static int ti_adpll_init_registers(struct ti_adpll_data *d)
+{
+ int register_offset = 0;
+
+ if (d->c->is_type_s) {
+ register_offset = 8;
+ ti_adpll_unlock_all(d->iobase + ADPLL_PLLSS_MMR_LOCK_OFFSET);
+ }
+
+ d->regs = d->iobase + register_offset + ADPLL_PWRCTRL_OFFSET;
+
+ return 0;
+}
+
+static int ti_adpll_init_inputs(struct ti_adpll_data *d)
+{
+ const char *error = "need at least %i inputs";
+ struct clk *clock;
+ int nr_inputs;
+
+ nr_inputs = of_clk_get_parent_count(d->np);
+ if (nr_inputs < d->c->nr_max_inputs) {
+ dev_err(d->dev, error, nr_inputs);
+ return -EINVAL;
+ }
+ of_clk_parent_fill(d->np, d->parent_names, nr_inputs);
+
+ clock = devm_clk_get(d->dev, d->parent_names[0]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinp\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINP] = clock;
+
+ clock = devm_clk_get(d->dev, d->parent_names[1]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinpulow clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPULOW] = clock;
+
+ if (d->c->is_type_s) {
+ clock = devm_clk_get(d->dev, d->parent_names[2]);
+ if (IS_ERR(clock)) {
+ dev_err(d->dev, "could not get clkinphif clock\n");
+ return PTR_ERR(clock);
+ }
+ d->parent_clocks[TI_ADPLL_CLKINPHIF] = clock;
+ }
+
+ return 0;
+}
+
+static const struct ti_adpll_platform_data ti_adpll_type_s = {
+ .is_type_s = true,
+ .nr_max_inputs = MAX_ADPLL_INPUTS,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS,
+ .output_index = TI_ADPLL_S_DCOCLKLDO,
+};
+
+static const struct ti_adpll_platform_data ti_adpll_type_lj = {
+ .is_type_s = false,
+ .nr_max_inputs = MAX_ADPLL_INPUTS - 1,
+ .nr_max_outputs = MAX_ADPLL_OUTPUTS - 1,
+ .output_index = -EINVAL,
+};
+
+static const struct of_device_id ti_adpll_match[] = {
+ { .compatible = "ti,dm814-adpll-s-clock", &ti_adpll_type_s },
+ { .compatible = "ti,dm814-adpll-lj-clock", &ti_adpll_type_lj },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_adpll_match);
+
+static int ti_adpll_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ const struct ti_adpll_platform_data *pdata;
+ struct ti_adpll_data *d;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(ti_adpll_match, dev);
+ if (match)
+ pdata = match->data;
+ else
+ return -ENODEV;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->dev = dev;
+ d->np = node;
+ d->c = pdata;
+ dev_set_drvdata(d->dev, d);
+ spin_lock_init(&d->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ d->pa = res->start;
+
+ d->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(d->iobase)) {
+ dev_err(dev, "could not get IO base: %li\n",
+ PTR_ERR(d->iobase));
+ return PTR_ERR(d->iobase);
+ }
+
+ err = ti_adpll_init_registers(d);
+ if (err)
+ return err;
+
+ err = ti_adpll_init_inputs(d);
+ if (err)
+ return err;
+
+ d->clocks = devm_kzalloc(d->dev, sizeof(struct ti_adpll_clock) *
+ TI_ADPLL_NR_CLOCKS,
+ GFP_KERNEL);
+ if (!d->clocks)
+ return -ENOMEM;
+
+ err = ti_adpll_init_dco(d);
+ if (err) {
+ dev_err(dev, "could not register dco: %i\n", err);
+ goto free;
+ }
+
+ err = ti_adpll_init_children_adpll_s(d);
+ if (err)
+ goto free;
+ err = ti_adpll_init_children_adpll_lj(d);
+ if (err)
+ goto free;
+
+ err = of_clk_add_provider(d->np, of_clk_src_onecell_get, &d->outputs);
+ if (err)
+ goto free;
+
+ return 0;
+
+free:
+ WARN_ON(1);
+ ti_adpll_free_resources(d);
+
+ return err;
+}
+
+static int ti_adpll_remove(struct platform_device *pdev)
+{
+ struct ti_adpll_data *d = dev_get_drvdata(&pdev->dev);
+
+ ti_adpll_free_resources(d);
+
+ return 0;
+}
+
+static struct platform_driver ti_adpll_driver = {
+ .driver = {
+ .name = "ti-adpll",
+ .of_match_table = ti_adpll_match,
+ },
+ .probe = ti_adpll_probe,
+ .remove = ti_adpll_remove,
+};
+
+static int __init ti_adpll_init(void)
+{
+ return platform_driver_register(&ti_adpll_driver);
+}
+core_initcall(ti_adpll_init);
+
+static void __exit ti_adpll_exit(void)
+{
+ platform_driver_unregister(&ti_adpll_driver);
+}
+module_exit(ti_adpll_exit);
+
+MODULE_DESCRIPTION("Clock driver for dm814x ADPLL");
+MODULE_ALIAS("platform:dm814-adpll-clock");
+MODULE_AUTHOR("Tony LIndgren <tony@atomide.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index b336a8c11e2a..6411e132faa2 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -140,11 +140,21 @@ static void __init omap_clk_register_apll(struct clk_hw *hw,
struct dpll_data *ad = clk_hw->dpll_data;
struct clk *clk;
- ad->clk_ref = of_clk_get(node, 0);
- ad->clk_bypass = of_clk_get(node, 1);
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref for %s not ready, retry\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
+ return;
+
+ goto cleanup;
+ }
- if (IS_ERR(ad->clk_ref) || IS_ERR(ad->clk_bypass)) {
- pr_debug("clk-ref or clk-bypass for %s not ready, retry\n",
+ ad->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass for %s not ready, retry\n",
node->name);
if (!ti_clk_retry_init(node, hw, omap_clk_register_apll))
return;
@@ -152,6 +162,8 @@ static void __init omap_clk_register_apll(struct clk_hw *hw,
goto cleanup;
}
+ ad->clk_bypass = __clk_get_hw(clk);
+
clk = clk_register(NULL, &clk_hw->hw);
if (!IS_ERR(clk)) {
of_clk_add_provider(node, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
index 9e85fcc74cc9..52c6efc53731 100644
--- a/drivers/clk/ti/clk-814x.c
+++ b/drivers/clk/ti/clk-814x.c
@@ -5,8 +5,10 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clk/ti.h>
+#include <linux/of_platform.h>
#include "clock.h"
@@ -27,11 +29,62 @@ static struct ti_dt_clk dm814_clks[] = {
{ .node_name = NULL },
};
+static bool timer_clocks_initialized;
+
+static int __init dm814x_adpll_early_init(void)
+{
+ struct device_node *np;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ np = of_find_node_by_name(NULL, "pllss");
+ if (!np) {
+ pr_err("Could not find node for plls\n");
+ return -ENODEV;
+ }
+
+ of_platform_populate(np, NULL, NULL, NULL);
+
+ return 0;
+}
+core_initcall(dm814x_adpll_early_init);
+
+static const char * const init_clocks[] = {
+ "pll040clkout", /* MPU 481c5040.adpll.clkout */
+ "pll290clkout", /* DDR 481c5290.adpll.clkout */
+};
+
+static int __init dm814x_adpll_enable_init_clocks(void)
+{
+ int i, err;
+
+ if (!timer_clocks_initialized)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(init_clocks); i++) {
+ struct clk *clock;
+
+ clock = clk_get(NULL, init_clocks[i]);
+ if (WARN(IS_ERR(clock), "could not find init clock %s\n",
+ init_clocks[i]))
+ continue;
+ err = clk_prepare_enable(clock);
+ if (WARN(err, "could not enable init clock %s\n",
+ init_clocks[i]))
+ continue;
+ }
+
+ return 0;
+}
+postcore_initcall(dm814x_adpll_enable_init_clocks);
+
int __init dm814x_dt_clk_init(void)
{
ti_dt_clocks_register(dm814_clks);
omap2_clk_disable_autoidle_all();
omap2_clk_enable_init_clocks(NULL, 0);
+ timer_clocks_initialized = true;
return 0;
}
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index b5bcd77e8d0f..5fcf247759ac 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -305,8 +305,8 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
case TI_CLK_FIXED:
fixed = setup->data;
- clk = clk_register_fixed_rate(NULL, setup->name, NULL,
- CLK_IS_ROOT, fixed->frequency);
+ clk = clk_register_fixed_rate(NULL, setup->name, NULL, 0,
+ fixed->frequency);
break;
case TI_CLK_MUX:
clk = ti_clk_register_mux(setup);
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index b5cc6f66ae5d..032c658a5f5e 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -254,7 +254,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
v >>= __ffs(dd->enable_mask);
if (_omap2_dpll_is_in_bypass(v))
- return clk_get_rate(dd->clk_bypass);
+ return clk_hw_get_rate(dd->clk_bypass);
v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
dpll_mult = v & dd->mult_mask;
@@ -262,7 +262,7 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
dpll_div = v & dd->div1_mask;
dpll_div >>= __ffs(dd->div1_mask);
- dpll_clk = (u64)clk_get_rate(dd->clk_ref) * dpll_mult;
+ dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult;
do_div(dpll_clk, dpll_div + 1);
return dpll_clk;
@@ -301,7 +301,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
dd = clk->dpll_data;
- ref_rate = clk_get_rate(dd->clk_ref);
+ ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
clk_name, target_rate);
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index b9bc3b8df659..6cf9dd189a92 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -109,7 +109,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node)
struct clk_hw *clk_hw;
const char *clkdm_name = node->name;
int i;
- int num_clks;
+ unsigned int num_clks;
num_clks = of_clk_get_parent_count(node);
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index dbef218fe5ec..1cf70f452e1e 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -28,8 +28,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -236,14 +234,14 @@ cleanup:
static void __init of_ti_composite_clk_setup(struct device_node *node)
{
- int num_clks;
+ unsigned int num_clks;
int i;
struct clk_hw_omap_comp *cclk;
/* Number of component clocks to be put inside this clock */
num_clks = of_clk_get_parent_count(node);
- if (num_clks < 1) {
+ if (!num_clks) {
pr_err("composite clk %s must have component(s)\n", node->name);
return;
}
@@ -273,13 +271,13 @@ CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw,
int type)
{
- int num_parents;
+ unsigned int num_parents;
const char **parent_names;
struct component_clk *clk;
num_parents = of_clk_get_parent_count(node);
- if (num_parents < 1) {
+ if (!num_parents) {
pr_err("component-clock %s must have parent(s)\n", node->name);
return -EINVAL;
}
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index df2558350fc1..b4e5de16e561 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -26,8 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#define div_mask(d) ((1 << ((d)->width)) - 1)
static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 5519b386edc0..3bc9959f71c3 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -147,11 +147,22 @@ static void __init _register_dpll(struct clk_hw *hw,
struct dpll_data *dd = clk_hw->dpll_data;
struct clk *clk;
- dd->clk_ref = of_clk_get(node, 0);
- dd->clk_bypass = of_clk_get(node, 1);
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_debug("clk-ref missing for %s, retry later\n",
+ node->name);
+ if (!ti_clk_retry_init(node, hw, _register_dpll))
+ return;
- if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
- pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
+ goto cleanup;
+ }
+
+ dd->clk_ref = __clk_get_hw(clk);
+
+ clk = of_clk_get(node, 1);
+
+ if (IS_ERR(clk)) {
+ pr_debug("clk-bypass missing for %s, retry later\n",
node->name);
if (!ti_clk_retry_init(node, hw, _register_dpll))
return;
@@ -159,6 +170,8 @@ static void __init _register_dpll(struct clk_hw *hw,
goto cleanup;
}
+ dd->clk_bypass = __clk_get_hw(clk);
+
/* register the clock */
clk = clk_register(NULL, &clk_hw->hw);
@@ -251,8 +264,8 @@ struct clk *ti_clk_register_dpll(struct ti_clk *setup)
dd->recal_en_bit = dpll->recal_en_bit;
dd->recal_st_bit = dpll->recal_st_bit;
- dd->clk_ref = clk_ref;
- dd->clk_bypass = clk_bypass;
+ dd->clk_ref = __clk_get_hw(clk_ref);
+ dd->clk_bypass = __clk_get_hw(clk_bypass);
if (dpll->flags & CLKF_CORE)
ops = &omap3_dpll_core_ck_ops;
@@ -361,7 +374,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
init->ops = ops;
init->num_parents = of_clk_get_parent_count(node);
- if (init->num_parents < 1) {
+ if (!init->num_parents) {
pr_err("%s must have parent(s)\n", node->name);
goto cleanup;
}
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index cc739291a3ce..88f2ce81ba55 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -98,7 +98,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
unsigned long fint;
u16 f = 0;
- fint = clk_get_rate(clk->dpll_data->clk_ref) / n;
+ fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n;
pr_debug("clock: fint is %lu\n", fint);
@@ -460,12 +460,11 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
parent = clk_hw_get_parent(hw);
- if (clk_hw_get_rate(hw) ==
- clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
- WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
+ if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) {
+ WARN_ON(parent != dd->clk_bypass);
r = _omap3_noncore_dpll_bypass(clk);
} else {
- WARN_ON(parent != __clk_get_hw(dd->clk_ref));
+ WARN_ON(parent != dd->clk_ref);
r = _omap3_noncore_dpll_lock(clk);
}
@@ -513,13 +512,13 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
if (!dd)
return -EINVAL;
- if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ req->best_parent_hw = dd->clk_bypass;
} else {
req->rate = omap2_dpll_round_rate(hw, req->rate,
&req->best_parent_rate);
- req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ req->best_parent_hw = dd->clk_ref;
}
req->best_parent_rate = req->rate;
@@ -577,7 +576,7 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
if (!dd)
return -EINVAL;
- if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref))
+ if (clk_hw_get_parent(hw) != dd->clk_ref)
return -EINVAL;
if (dd->last_rounded_rate == 0)
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 660d7436ac24..82c05b55a7be 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -94,7 +94,7 @@ static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
{
long fint, fout;
- fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+ fint = clk_hw_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
fout = fint * dd->last_rounded_m;
if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
@@ -212,13 +212,13 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
if (!dd)
return -EINVAL;
- if (clk_get_rate(dd->clk_bypass) == req->rate &&
+ if (clk_hw_get_rate(dd->clk_bypass) == req->rate &&
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
- req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+ req->best_parent_hw = dd->clk_bypass;
} else {
req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
&req->best_parent_rate);
- req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+ req->best_parent_hw = dd->clk_ref;
}
req->best_parent_rate = req->rate;
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 5429d3534363..bc05f276f32b 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -24,8 +24,6 @@
#include "clock.h"
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index dab9ba88b9d6..44777ab6fdeb 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -26,8 +26,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__
-#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-
static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
@@ -180,7 +178,7 @@ static void of_mux_clk_setup(struct device_node *node)
{
struct clk *clk;
void __iomem *reg;
- int num_parents;
+ unsigned int num_parents;
const char **parent_names;
u8 clk_mux_flags = 0;
u32 mask = 0;
@@ -263,7 +261,7 @@ struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
{
struct clk_mux *mux;
- int num_parents;
+ unsigned int num_parents;
u32 val;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 222425d08ab6..a07c31e6f26d 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -40,8 +40,7 @@ static int ab8500_reg_clks(struct device *dev)
return ret;
/* ab8500_sysclk */
- clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0);
clk_register_clkdev(clk, "sysclk", "ab8500-usb.0");
clk_register_clkdev(clk, "sysclk", "ab-iddet.0");
clk_register_clkdev(clk, "sysclk", "snd-soc-mop500.0");
@@ -68,7 +67,7 @@ static int ab8500_reg_clks(struct device *dev)
clk = clk_reg_sysctrl_gate_fixed_rate(dev, "ulpclk", NULL,
AB8500_SYSULPCLKCTRL1, AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
AB8500_SYSULPCLKCTRL1_ULPCLKREQ,
- 38400000, 9000, CLK_IS_ROOT);
+ 38400000, 9000, 0);
clk_register_clkdev(clk, "ulpclk", "snd-soc-mop500.0");
/* ab8500_intclk */
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 271c09644652..9a736d939806 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -91,21 +91,21 @@ void u8500_clk_init(void)
/* Clock sources */
clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLSOC0] = clk;
clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLSOC1] = clk;
clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_PLLDDR] = clk;
/* FIXME: Add sys, ulp and int clocks here. */
rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
32768);
/* PRCMU clocks */
@@ -126,105 +126,101 @@ void u8500_clk_init(void)
clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
PRCMU_SGACLK, 0);
else
- clk = clk_reg_prcmu_gate("sgclk", NULL,
- PRCMU_SGACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0);
prcmu_clk[PRCMU_SGACLK] = clk;
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
prcmu_clk[PRCMU_UARTCLK] = clk;
- clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0);
prcmu_clk[PRCMU_MSP02CLK] = clk;
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
prcmu_clk[PRCMU_MSP1CLK] = clk;
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
prcmu_clk[PRCMU_I2CCLK] = clk;
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
prcmu_clk[PRCMU_SLIMCLK] = clk;
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
prcmu_clk[PRCMU_PER1CLK] = clk;
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
prcmu_clk[PRCMU_PER2CLK] = clk;
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
prcmu_clk[PRCMU_PER3CLK] = clk;
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
prcmu_clk[PRCMU_PER5CLK] = clk;
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
prcmu_clk[PRCMU_PER6CLK] = clk;
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
prcmu_clk[PRCMU_PER7CLK] = clk;
clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_LCDCLK] = clk;
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
prcmu_clk[PRCMU_BMLCLK] = clk;
clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HSITXCLK] = clk;
clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HSIRXCLK] = clk;
clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_HDMICLK] = clk;
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
prcmu_clk[PRCMU_APEATCLK] = clk;
clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_APETRACECLK] = clk;
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
prcmu_clk[PRCMU_MCDECLK] = clk;
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
prcmu_clk[PRCMU_IPI2CCLK] = clk;
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
prcmu_clk[PRCMU_DSIALTCLK] = clk;
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
prcmu_clk[PRCMU_DMACLK] = clk;
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
prcmu_clk[PRCMU_B2R2CLK] = clk;
clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_TVCLK] = clk;
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
prcmu_clk[PRCMU_SSPCLK] = clk;
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
prcmu_clk[PRCMU_RNGCLK] = clk;
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
prcmu_clk[PRCMU_UICCCLK] = clk;
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
prcmu_clk[PRCMU_TIMCLK] = clk;
clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
- 100000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ 100000000, CLK_SET_RATE_GATE);
prcmu_clk[PRCMU_SDMMCCLK] = clk;
clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
@@ -252,7 +248,7 @@ void u8500_clk_init(void)
prcmu_clk[PRCMU_DSI2ESCCLK] = clk;
clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
prcmu_clk[PRCMU_ARMSS] = clk;
twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index d7bcb7a86615..86549e59fb42 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -56,28 +56,28 @@ void u8540_clk_init(void)
/* Clock sources. */
/* Fixed ClockGen */
clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "soc0_pll", NULL);
clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "soc1_pll", NULL);
clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "ddr_pll", NULL);
clk = clk_register_fixed_rate(NULL, "rtc32k", NULL,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
clk = clk_register_fixed_rate(NULL, "ulp38m4", NULL,
- CLK_IS_ROOT|CLK_IGNORE_UNUSED,
+ CLK_IGNORE_UNUSED,
38400000);
- clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0);
clk_register_clkdev(clk, NULL, "UART");
/* msp02clk needs a abx500 clk as parent. Handle by abx500 clk driver */
@@ -85,120 +85,116 @@ void u8540_clk_init(void)
PRCMU_MSP02CLK, 0);
clk_register_clkdev(clk, NULL, "MSP02");
- clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0);
clk_register_clkdev(clk, NULL, "MSP1");
- clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0);
clk_register_clkdev(clk, NULL, "I2C");
- clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0);
clk_register_clkdev(clk, NULL, "slim");
- clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH1");
- clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH2");
- clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH3");
- clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH5");
- clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH6");
- clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0);
clk_register_clkdev(clk, NULL, "PERIPH7");
clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "lcd");
clk_register_clkdev(clk, "lcd", "mcde");
- clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0);
clk_register_clkdev(clk, NULL, "bml");
clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "hdmi");
clk_register_clkdev(clk, "hdmi", "mcde");
- clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0);
clk_register_clkdev(clk, NULL, "apeat");
- clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, 0);
clk_register_clkdev(clk, NULL, "apetrace");
- clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0);
clk_register_clkdev(clk, NULL, "mcde");
clk_register_clkdev(clk, "mcde", "mcde");
clk_register_clkdev(clk, NULL, "dsilink.0");
clk_register_clkdev(clk, NULL, "dsilink.1");
clk_register_clkdev(clk, NULL, "dsilink.2");
- clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0);
clk_register_clkdev(clk, NULL, "ipi2");
- clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
- CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0);
clk_register_clkdev(clk, NULL, "dsialt");
- clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0);
clk_register_clkdev(clk, NULL, "dma40.0");
- clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0);
clk_register_clkdev(clk, NULL, "b2r2");
clk_register_clkdev(clk, NULL, "b2r2_core");
clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
clk_register_clkdev(clk, NULL, "b2r2_1_core");
clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "tv");
clk_register_clkdev(clk, "tv", "mcde");
- clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0);
clk_register_clkdev(clk, NULL, "SSP");
- clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0);
clk_register_clkdev(clk, NULL, "rngclk");
- clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0);
clk_register_clkdev(clk, NULL, "uicc");
- clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0);
clk_register_clkdev(clk, NULL, "mtu0");
clk_register_clkdev(clk, NULL, "mtu1");
clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL,
PRCMU_SDMMCCLK, 100000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdmmc");
clk = clk_reg_prcmu_opp_volt_scalable("sdmmchclk", NULL,
PRCMU_SDMMCHCLK, 400000000,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "sdmmchclk");
- clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("hvaclk", NULL, PRCMU_HVACLK, 0);
clk_register_clkdev(clk, NULL, "hva");
- clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, CLK_IS_ROOT);
+ clk = clk_reg_prcmu_gate("g1clk", NULL, PRCMU_G1CLK, 0);
clk_register_clkdev(clk, NULL, "g1");
clk = clk_reg_prcmu_scalable("spare1clk", NULL, PRCMU_SPARE1CLK, 0,
- CLK_IS_ROOT|CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE);
clk_register_clkdev(clk, "dsilcd", "mcde");
clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
@@ -244,7 +240,7 @@ void u8540_clk_init(void)
clk_register_clkdev(clk, "dsilp2", "mcde");
clk = clk_reg_prcmu_scalable_rate("armss", NULL,
- PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
+ PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED);
clk_register_clkdev(clk, "armss", NULL);
clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index 3bca438ecd19..5e9b65278e4c 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -170,7 +170,7 @@ static struct clk *icst_clk_setup(struct device *dev,
init.name = name;
init.ops = &icst_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
icst->map = map;
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 65c842a21c62..74c3216dbb00 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -98,8 +98,7 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id)
/* Register the fixed rate PCLK */
imc->pclkname = kasprintf(GFP_KERNEL, "lm%x-pclk", id);
- pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL,
- CLK_IS_ROOT, 0);
+ pclk = clk_register_fixed_rate(NULL, imc->pclkname, NULL, 0, 0);
imc->pclk = pclk;
imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id);
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index bd4dd2463e23..c56efc70ac16 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -56,12 +56,11 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
struct clk *clk;
/* APB clock dummy */
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(clk, "apb_pclk", NULL);
/* 24 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
- 24000000);
+ clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, 0, 24000000);
clk_register_clkdev(clk, NULL, "dev:uart0");
clk_register_clkdev(clk, NULL, "dev:uart1");
clk_register_clkdev(clk, NULL, "dev:uart2");
@@ -81,8 +80,7 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
/* 1 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT,
- 1000000);
+ clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, 0, 1000000);
clk_register_clkdev(clk, NULL, "sp804");
/* ICST VCO clock */
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index e78755e0ef78..1fe1e8d970cf 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
int num = ARRAY_SIZE(parent_names);
char name[12];
struct clk_init_data init;
+ static int instance;
int i;
bool deprecated;
@@ -117,7 +118,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
sp810->timerclken[i].sp810 = sp810;
sp810->timerclken[i].channel = i;
@@ -138,5 +139,6 @@ static void __init clk_sp810_of_setup(struct device_node *node)
}
of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
+ instance++;
}
CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 89c0609e180b..7e5add7d7752 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -94,7 +94,7 @@ static int vexpress_osc_probe(struct platform_device *pdev)
init.name = dev_name(&pdev->dev);
init.ops = &vexpress_osc_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.num_parents = 0;
osc->hw.init = &init;
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index f827083defc4..6b40eb89ae19 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -10,8 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -30,7 +28,7 @@ static int lpt_clk_probe(struct platform_device *pdev)
/* LPSS free running clock */
drvdata->name = "lpss_clk";
clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
- CLK_IS_ROOT, 100000000);
+ 0, 100000000);
if (IS_ERR(clk))
return PTR_ERR(clk);
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 38a65c3e62fc..88a2cab37f62 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -265,8 +265,7 @@ static void __init zynq_clk_setup(struct device_node *np)
pr_warn("ps_clk frequency not specified, using 33 MHz.\n");
tmp = 33333333;
}
- ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, CLK_IS_ROOT,
- tmp);
+ ps_clk = clk_register_fixed_rate(NULL, "ps_clk", NULL, 0, tmp);
/* PLLs */
clk = clk_register_zynq_pll("armpll_int", "ps_clk", SLCR_ARMPLL_CTRL,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 33db7406c0e2..c346be650892 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -160,6 +160,7 @@ config CLKSRC_EFM32
config CLKSRC_LPC32XX
bool "Clocksource for LPC32XX" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
+ depends on ARM
select CLKSRC_MMIO
select CLKSRC_OF
help
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index c64d543d64bf..5152b3898155 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -32,6 +32,14 @@
#define CNTTIDR 0x08
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
#define CNTVCT_LO 0x08
#define CNTVCT_HI 0x0c
#define CNTFRQ 0x10
@@ -67,7 +75,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
-static bool arch_timer_use_virtual = true;
+static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
@@ -263,14 +271,22 @@ static void __arch_timer_setup(unsigned type,
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
- if (arch_timer_use_virtual) {
- clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
clk->set_next_event = arch_timer_set_next_event_virt;
- } else {
- clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
+ case HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
+ break;
+ default:
+ BUG();
}
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
@@ -279,10 +295,12 @@ static void __arch_timer_setup(unsigned type,
clk->cpumask = cpu_all_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
clk->set_next_event =
arch_timer_set_next_event_virt_mem;
} else {
clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
@@ -338,17 +356,20 @@ static void arch_counter_set_user_access(void)
arch_timer_set_cntkctl(cntkctl);
}
+static bool arch_timer_has_nonsecure_ppi(void)
+{
+ return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
+ arch_timer_ppi[PHYS_NONSECURE_PPI]);
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
- if (arch_timer_use_virtual)
- enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
- else {
- enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
- }
+ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+
+ if (arch_timer_has_nonsecure_ppi())
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
@@ -390,7 +411,7 @@ static void arch_timer_banner(unsigned type)
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
type & ARCH_CP15_TIMER ?
- arch_timer_use_virtual ? "virt" : "phys" :
+ (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
"",
type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
type & ARCH_MEM_TIMER ?
@@ -460,7 +481,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
@@ -490,13 +511,9 @@ static void arch_timer_stop(struct clock_event_device *clk)
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
- if (arch_timer_use_virtual)
- disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
- else {
- disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
- }
+ disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
+ if (arch_timer_has_nonsecure_ppi())
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
clk->set_state_shutdown(clk);
}
@@ -562,12 +579,14 @@ static int __init arch_timer_register(void)
goto out;
}
- if (arch_timer_use_virtual) {
- ppi = arch_timer_ppi[VIRT_PPI];
+ ppi = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_virt,
"arch_timer", arch_timer_evt);
- } else {
- ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
@@ -578,6 +597,13 @@ static int __init arch_timer_register(void)
free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
arch_timer_evt);
}
+ break;
+ case HYP_PPI:
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ break;
+ default:
+ BUG();
}
if (err) {
@@ -602,15 +628,10 @@ static int __init arch_timer_register(void)
out_unreg_notify:
unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
- if (arch_timer_use_virtual)
- free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
- else {
- free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
+ if (arch_timer_has_nonsecure_ppi())
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
arch_timer_evt);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
- arch_timer_evt);
- }
out_free:
free_percpu(arch_timer_evt);
@@ -697,12 +718,25 @@ static void __init arch_timer_init(void)
*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
+ *
+ * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
+ * accesses to CNTP_*_EL1 registers are silently redirected to
+ * their CNTHP_*_EL2 counterparts, and use a different PPI
+ * number.
*/
if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
- arch_timer_use_virtual = false;
+ bool has_ppi;
- if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
- !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ if (is_kernel_in_hyp_mode()) {
+ arch_timer_uses_ppi = HYP_PPI;
+ has_ppi = !!arch_timer_ppi[HYP_PPI];
+ } else {
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
return;
}
@@ -735,7 +769,7 @@ static void __init arch_timer_of_init(struct device_node *np)
*/
if (IS_ENABLED(CONFIG_ARM) &&
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
arch_timer_init();
}
@@ -757,7 +791,6 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
- iounmap(cntctlbase);
/*
* Try to find a virtual capable frame. Otherwise fall back to a
@@ -765,20 +798,31 @@ static void __init arch_timer_mem_init(struct device_node *np)
*/
for_each_available_child_of_node(np, frame) {
int n;
+ u32 cntacr;
if (of_property_read_u32(frame, "frame-number", &n)) {
pr_err("arch_timer: Missing frame-number\n");
- of_node_put(best_frame);
of_node_put(frame);
- return;
+ goto out;
}
- if (cnttidr & CNTTIDR_VIRT(n)) {
+ /* Try enabling everything, and see what sticks */
+ cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+ writel_relaxed(cntacr, cntctlbase + CNTACR(n));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(n));
+
+ if ((cnttidr & CNTTIDR_VIRT(n)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
of_node_put(best_frame);
best_frame = frame;
arch_timer_mem_use_virtual = true;
break;
}
+
+ if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
+ continue;
+
of_node_put(best_frame);
best_frame = of_node_get(frame);
}
@@ -786,24 +830,26 @@ static void __init arch_timer_mem_init(struct device_node *np)
base = arch_counter_base = of_iomap(best_frame, 0);
if (!base) {
pr_err("arch_timer: Can't map frame's registers\n");
- of_node_put(best_frame);
- return;
+ goto out;
}
if (arch_timer_mem_use_virtual)
irq = irq_of_parse_and_map(best_frame, 1);
else
irq = irq_of_parse_and_map(best_frame, 0);
- of_node_put(best_frame);
+
if (!irq) {
pr_err("arch_timer: Frame missing %s irq",
arch_timer_mem_use_virtual ? "virt" : "phys");
- return;
+ goto out;
}
arch_timer_detect_rate(base, np);
arch_timer_mem_register(base, irq);
arch_timer_common_init();
+out:
+ iounmap(cntctlbase);
+ of_node_put(best_frame);
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index d189d8cb69f7..9df0d1699d22 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -16,6 +16,7 @@
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -174,6 +175,7 @@ static int gt_clockevents_init(struct clock_event_device *clk)
clk->set_state_shutdown = gt_clockevent_shutdown;
clk->set_state_periodic = gt_clockevent_set_periodic;
clk->set_state_oneshot = gt_clockevent_shutdown;
+ clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
clk->set_next_event = gt_clockevent_set_next_event;
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
@@ -221,6 +223,21 @@ static u64 notrace gt_sched_clock_read(void)
}
#endif
+static unsigned long gt_read_long(void)
+{
+ return readl_relaxed(gt_base + GT_COUNTER0);
+}
+
+static struct delay_timer gt_delay_timer = {
+ .read_current_timer = gt_read_long,
+};
+
+static void __init gt_delay_timer_init(void)
+{
+ gt_delay_timer.freq = gt_clk_rate;
+ register_current_timer_delay(&gt_delay_timer);
+}
+
static void __init gt_clocksource_init(void)
{
writel(0, gt_base + GT_CONTROL);
@@ -317,6 +334,7 @@ static void __init global_timer_of_register(struct device_node *np)
/* Immediately configure the timer on the boot CPU */
gt_clocksource_init();
gt_clockevents_init(this_cpu_ptr(gt_evt));
+ gt_delay_timer_init();
return;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ff44082a0827..be09bc0b5e26 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -313,6 +313,7 @@ static struct clock_event_device mct_comp_device = {
.set_state_periodic = mct_set_state_periodic,
.set_state_shutdown = mct_set_state_shutdown,
.set_state_oneshot = mct_set_state_shutdown,
+ .set_state_oneshot_stopped = mct_set_state_shutdown,
.tick_resume = mct_set_state_shutdown,
};
@@ -452,6 +453,7 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
evt->set_state_periodic = set_state_periodic;
evt->set_state_shutdown = set_state_shutdown;
evt->set_state_oneshot = set_state_shutdown;
+ evt->set_state_oneshot_stopped = set_state_shutdown;
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index 8c77a529d0d4..b991b288c803 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -122,23 +122,23 @@ static void __init rk_timer_init(struct device_node *np)
pclk = of_clk_get_by_name(np, "pclk");
if (IS_ERR(pclk)) {
pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
- return;
+ goto out_unmap;
}
if (clk_prepare_enable(pclk)) {
pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
- return;
+ goto out_unmap;
}
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk)) {
pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
- return;
+ goto out_timer_clk;
}
if (clk_prepare_enable(timer_clk)) {
pr_err("Failed to enable timer clock\n");
- return;
+ goto out_timer_clk;
}
bc_timer.freq = clk_get_rate(timer_clk);
@@ -146,7 +146,7 @@ static void __init rk_timer_init(struct device_node *np)
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
- return;
+ goto out_irq;
}
ce->name = TIMER_NAME;
@@ -164,10 +164,19 @@ static void __init rk_timer_init(struct device_node *np)
ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce);
if (ret) {
pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret);
- return;
+ goto out_irq;
}
clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
+
+ return;
+
+out_irq:
+ clk_disable_unprepare(timer_clk);
+out_timer_clk:
+ clk_disable_unprepare(pclk);
+out_unmap:
+ iounmap(bc_timer.base);
}
CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
index 1316876b487a..daae61e8c820 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/time-lpc32xx.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -43,6 +44,7 @@
struct lpc32xx_clock_event_ddata {
struct clock_event_device evtdev;
void __iomem *base;
+ u32 ticks_per_jiffy;
};
/* Needed for the sched clock */
@@ -53,6 +55,15 @@ static u64 notrace lpc32xx_read_sched_clock(void)
return readl(clocksource_timer_counter);
}
+static unsigned long lpc32xx_delay_timer_read(void)
+{
+ return readl(clocksource_timer_counter);
+}
+
+static struct delay_timer lpc32xx_delay_timer = {
+ .read_current_timer = lpc32xx_delay_timer_read,
+};
+
static int lpc32xx_clkevt_next_event(unsigned long delta,
struct clock_event_device *evtdev)
{
@@ -60,14 +71,13 @@ static int lpc32xx_clkevt_next_event(unsigned long delta,
container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
/*
- * Place timer in reset and program the delta in the prescale
- * register (PR). When the prescale counter matches the value
- * in PR the counter register is incremented and the compare
- * match will trigger. After setup the timer is released from
- * reset and enabled.
+ * Place timer in reset and program the delta in the match
+ * channel 0 (MR0). When the timer counter matches the value
+ * in MR0 register the match will trigger an interrupt.
+ * After setup the timer is released from reset and enabled.
*/
writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
- writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR);
+ writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
return 0;
@@ -86,11 +96,39 @@ static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
{
+ struct lpc32xx_clock_event_ddata *ddata =
+ container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
/*
* When using oneshot, we must also disable the timer
* to wait for the first call to set_next_event().
*/
- return lpc32xx_clkevt_shutdown(evtdev);
+ writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
+
+ /* Enable interrupt, reset on match and stop on match (MCR). */
+ writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
+ LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
+ return 0;
+}
+
+static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
+{
+ struct lpc32xx_clock_event_ddata *ddata =
+ container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
+ /* Enable interrupt and reset on match. */
+ writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
+ ddata->base + LPC32XX_TIMER_MCR);
+
+ /*
+ * Place timer in reset and program the delta in the match
+ * channel 0 (MR0).
+ */
+ writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
+ writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
+ writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
+
+ return 0;
}
static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
@@ -108,11 +146,13 @@ static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
.evtdev = {
.name = "lpc3220 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.set_next_event = lpc32xx_clkevt_next_event,
.set_state_shutdown = lpc32xx_clkevt_shutdown,
.set_state_oneshot = lpc32xx_clkevt_oneshot,
+ .set_state_periodic = lpc32xx_clkevt_periodic,
},
};
@@ -162,6 +202,8 @@ static int __init lpc32xx_clocksource_init(struct device_node *np)
}
clocksource_timer_counter = base + LPC32XX_TIMER_TC;
+ lpc32xx_delay_timer.freq = rate;
+ register_current_timer_delay(&lpc32xx_delay_timer);
sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
return 0;
@@ -210,18 +252,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np)
/*
* Disable timer and clear any pending interrupt (IR) on match
- * channel 0 (MR0). Configure a compare match value of 1 on MR0
- * and enable interrupt, reset on match and stop on match (MCR).
+ * channel 0 (MR0). Clear the prescaler as it's not used.
*/
writel_relaxed(0, base + LPC32XX_TIMER_TCR);
+ writel_relaxed(0, base + LPC32XX_TIMER_PR);
writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
- writel_relaxed(1, base + LPC32XX_TIMER_MR0);
- writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
- LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR);
rate = clk_get_rate(clk);
lpc32xx_clk_event_ddata.base = base;
+ lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
rate, 1, -1);
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
index 3269d9ef7a18..376e59bc5fa0 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/time-pistachio.c
@@ -163,7 +163,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
if (IS_ERR(periph_regs)) {
- pr_err("cannot get peripheral regmap (%lu)\n",
+ pr_err("cannot get peripheral regmap (%ld)\n",
PTR_ERR(periph_regs));
return;
}
@@ -176,7 +176,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
sys_clk = of_clk_get_by_name(node, "sys");
if (IS_ERR(sys_clk)) {
- pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+ pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
return;
}
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index f93511031177..a7f45853c103 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -19,6 +19,7 @@ config CPU_FREQ
if CPU_FREQ
config CPU_FREQ_GOV_COMMON
+ select IRQ_WORK
bool
config CPU_FREQ_BOOST_SW
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 51eef87bbc37..fb5712141040 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -70,6 +70,8 @@ struct acpi_cpufreq_data {
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus;
+ void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
};
/* acpi_perf_data is a pointer to percpu data. */
@@ -243,125 +245,119 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
}
}
-struct msr_addr {
- u32 reg;
-};
+static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy;
-struct io_addr {
- u16 port;
- u8 bit_width;
-};
+ rdmsr(MSR_IA32_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val)
+{
+ u32 lo, hi;
+
+ rdmsr(MSR_IA32_PERF_CTL, lo, hi);
+ lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE);
+ wrmsr(MSR_IA32_PERF_CTL, lo, hi);
+}
+
+static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used)
+{
+ u32 val, dummy;
+
+ rdmsr(MSR_AMD_PERF_CTL, val, dummy);
+ return val;
+}
+
+static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val)
+{
+ wrmsr(MSR_AMD_PERF_CTL, val, 0);
+}
+
+static u32 cpu_freq_read_io(struct acpi_pct_register *reg)
+{
+ u32 val;
+
+ acpi_os_read_port(reg->address, &val, reg->bit_width);
+ return val;
+}
+
+static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val)
+{
+ acpi_os_write_port(reg->address, val, reg->bit_width);
+}
struct drv_cmd {
- unsigned int type;
- const struct cpumask *mask;
- union {
- struct msr_addr msr;
- struct io_addr io;
- } addr;
+ struct acpi_pct_register *reg;
u32 val;
+ union {
+ void (*write)(struct acpi_pct_register *reg, u32 val);
+ u32 (*read)(struct acpi_pct_register *reg);
+ } func;
};
/* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
- u32 h;
- switch (cmd->type) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- case SYSTEM_AMD_MSR_CAPABLE:
- rdmsr(cmd->addr.msr.reg, cmd->val, h);
- break;
- case SYSTEM_IO_CAPABLE:
- acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
- &cmd->val,
- (u32)cmd->addr.io.bit_width);
- break;
- default:
- break;
- }
+ cmd->val = cmd->func.read(cmd->reg);
}
-/* Called via smp_call_function_many(), on the target CPUs */
-static void do_drv_write(void *_cmd)
+static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
{
- struct drv_cmd *cmd = _cmd;
- u32 lo, hi;
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .func.read = data->cpu_freq_read,
+ };
+ int err;
- switch (cmd->type) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- rdmsr(cmd->addr.msr.reg, lo, hi);
- lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
- wrmsr(cmd->addr.msr.reg, lo, hi);
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- wrmsr(cmd->addr.msr.reg, cmd->val, 0);
- break;
- case SYSTEM_IO_CAPABLE:
- acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
- cmd->val,
- (u32)cmd->addr.io.bit_width);
- break;
- default:
- break;
- }
+ err = smp_call_function_any(mask, do_drv_read, &cmd, 1);
+ WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ return cmd.val;
}
-static void drv_read(struct drv_cmd *cmd)
+/* Called via smp_call_function_many(), on the target CPUs */
+static void do_drv_write(void *_cmd)
{
- int err;
- cmd->val = 0;
+ struct drv_cmd *cmd = _cmd;
- err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
- WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
+ cmd->func.write(cmd->reg, cmd->val);
}
-static void drv_write(struct drv_cmd *cmd)
+static void drv_write(struct acpi_cpufreq_data *data,
+ const struct cpumask *mask, u32 val)
{
+ struct acpi_processor_performance *perf = to_perf_data(data);
+ struct drv_cmd cmd = {
+ .reg = &perf->control_register,
+ .val = val,
+ .func.write = data->cpu_freq_write,
+ };
int this_cpu;
this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, cmd->mask))
- do_drv_write(cmd);
- smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
+ if (cpumask_test_cpu(this_cpu, mask))
+ do_drv_write(&cmd);
+
+ smp_call_function_many(mask, do_drv_write, &cmd, 1);
put_cpu();
}
-static u32
-get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
+static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{
- struct acpi_processor_performance *perf;
- struct drv_cmd cmd;
+ u32 val;
if (unlikely(cpumask_empty(mask)))
return 0;
- switch (data->cpu_feature) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
- break;
- case SYSTEM_IO_CAPABLE:
- cmd.type = SYSTEM_IO_CAPABLE;
- perf = to_perf_data(data);
- cmd.addr.io.port = perf->control_register.address;
- cmd.addr.io.bit_width = perf->control_register.bit_width;
- break;
- default:
- return 0;
- }
-
- cmd.mask = mask;
- drv_read(&cmd);
+ val = drv_read(data, mask);
- pr_debug("get_cur_val = %u\n", cmd.val);
+ pr_debug("get_cur_val = %u\n", val);
- return cmd.val;
+ return val;
}
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
@@ -416,7 +412,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
{
struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
- struct drv_cmd cmd;
+ const struct cpumask *mask;
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
@@ -434,42 +430,21 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
} else {
pr_debug("Already at target state (P%d)\n",
next_perf_state);
- goto out;
+ return 0;
}
}
- switch (data->cpu_feature) {
- case SYSTEM_INTEL_MSR_CAPABLE:
- cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- case SYSTEM_AMD_MSR_CAPABLE:
- cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- case SYSTEM_IO_CAPABLE:
- cmd.type = SYSTEM_IO_CAPABLE;
- cmd.addr.io.port = perf->control_register.address;
- cmd.addr.io.bit_width = perf->control_register.bit_width;
- cmd.val = (u32) perf->states[next_perf_state].control;
- break;
- default:
- result = -ENODEV;
- goto out;
- }
-
- /* cpufreq holds the hotplug lock, so we are safe from here on */
- if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
- cmd.mask = policy->cpus;
- else
- cmd.mask = cpumask_of(policy->cpu);
+ /*
+ * The core won't allow CPUs to go away until the governor has been
+ * stopped, so we can rely on the stability of policy->cpus.
+ */
+ mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ?
+ cpumask_of(policy->cpu) : policy->cpus;
- drv_write(&cmd);
+ drv_write(data, mask, perf->states[next_perf_state].control);
if (acpi_pstate_strict) {
- if (!check_freqs(cmd.mask, data->freq_table[index].frequency,
+ if (!check_freqs(mask, data->freq_table[index].frequency,
data)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
@@ -480,7 +455,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (!result)
perf->state = next_perf_state;
-out:
return result;
}
@@ -540,8 +514,10 @@ static int boost_notify(struct notifier_block *nb, unsigned long action,
*/
switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
break;
@@ -740,15 +716,21 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_io;
+ data->cpu_freq_write = cpu_freq_write_io;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_intel;
+ data->cpu_freq_write = cpu_freq_write_intel;
break;
}
if (check_amd_hwpstate_cpu(cpu)) {
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
+ data->cpu_freq_read = cpu_freq_read_amd;
+ data->cpu_freq_write = cpu_freq_write_amd;
break;
}
result = -ENODEV;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index f6b79ab0070b..404360cad25c 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -21,7 +21,7 @@
#include <asm/msr.h>
#include <asm/cpufeature.h>
-#include "cpufreq_governor.h"
+#include "cpufreq_ondemand.h"
#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL 0xc0010080
#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE 0xc0010081
@@ -45,10 +45,10 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
long d_actual, d_reference;
struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
- struct dbs_data *od_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
- struct od_cpu_dbs_info_s *od_info =
- od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
+ struct od_policy_dbs_info *od_info = to_dbs_info(policy_dbs);
if (!od_info->freq_table)
return freq_next;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 0ca74d070058..5f8dbe640a20 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -4,9 +4,6 @@
* Copyright (C) 2014 Linaro.
* Viresh Kumar <viresh.kumar@linaro.org>
*
- * The OPP code in function set_target() is reused from
- * drivers/cpufreq/omap-cpufreq.c
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -31,9 +28,8 @@
struct private_data {
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
- unsigned int voltage_tolerance; /* in percentage */
+ const char *reg_name;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -44,175 +40,128 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
- struct dev_pm_opp *opp;
- struct cpufreq_frequency_table *freq_table = policy->freq_table;
- struct clk *cpu_clk = policy->clk;
struct private_data *priv = policy->driver_data;
- struct device *cpu_dev = priv->cpu_dev;
- struct regulator *cpu_reg = priv->cpu_reg;
- unsigned long volt = 0, tol = 0;
- int volt_old = 0;
- unsigned int old_freq, new_freq;
- long freq_Hz, freq_exact;
- int ret;
- freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
- if (freq_Hz <= 0)
- freq_Hz = freq_table[index].frequency * 1000;
-
- freq_exact = freq_Hz;
- new_freq = freq_Hz / 1000;
- old_freq = clk_get_rate(cpu_clk) / 1000;
+ return dev_pm_opp_set_rate(priv->cpu_dev,
+ policy->freq_table[index].frequency * 1000);
+}
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
+/*
+ * An earlier version of opp-v1 bindings used to name the regulator
+ * "cpu0-supply", we still need to handle that for backwards compatibility.
+ */
+static const char *find_supply_name(struct device *dev)
+{
+ struct device_node *np;
+ struct property *pp;
+ int cpu = dev->id;
+ const char *name = NULL;
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(cpu_dev, "failed to find OPP for %ld\n",
- freq_Hz);
- return PTR_ERR(opp);
- }
- volt = dev_pm_opp_get_voltage(opp);
- opp_freq = dev_pm_opp_get_freq(opp);
- rcu_read_unlock();
- tol = volt * priv->voltage_tolerance / 100;
- volt_old = regulator_get_voltage(cpu_reg);
- dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
- opp_freq / 1000, volt);
- }
+ np = of_node_get(dev->of_node);
- dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %ld mV\n",
- old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
- new_freq / 1000, volt ? volt / 1000 : -1);
+ /* This must be valid for sure */
+ if (WARN_ON(!np))
+ return NULL;
- /* scaling up? scale voltage before frequency */
- if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage up: %d\n",
- ret);
- return ret;
+ /* Try "cpu0" for older DTs */
+ if (!cpu) {
+ pp = of_find_property(np, "cpu0-supply", NULL);
+ if (pp) {
+ name = "cpu0";
+ goto node_put;
}
}
- ret = clk_set_rate(cpu_clk, freq_exact);
- if (ret) {
- dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- if (!IS_ERR(cpu_reg) && volt_old > 0)
- regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ pp = of_find_property(np, "cpu-supply", NULL);
+ if (pp) {
+ name = "cpu";
+ goto node_put;
}
- /* scaling down? scale voltage after frequency */
- if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
- ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
- if (ret) {
- dev_err(cpu_dev, "failed to scale voltage down: %d\n",
- ret);
- clk_set_rate(cpu_clk, old_freq * 1000);
- }
- }
-
- return ret;
+ dev_dbg(dev, "no regulator for cpu%d\n", cpu);
+node_put:
+ of_node_put(np);
+ return name;
}
-static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+static int resources_available(void)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
struct clk *cpu_clk;
int ret = 0;
- char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
+ const char *name;
- cpu_dev = get_cpu_device(cpu);
+ cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
- pr_err("failed to get cpu%d device\n", cpu);
+ pr_err("failed to get cpu0 device\n");
return -ENODEV;
}
- /* Try "cpu0" for older DTs */
- if (!cpu)
- reg = reg_cpu0;
- else
- reg = reg_cpu;
-
-try_again:
- cpu_reg = regulator_get_optional(cpu_dev, reg);
- ret = PTR_ERR_OR_ZERO(cpu_reg);
+ cpu_clk = clk_get(cpu_dev, NULL);
+ ret = PTR_ERR_OR_ZERO(cpu_clk);
if (ret) {
/*
- * If cpu's regulator supply node is present, but regulator is
- * not yet registered, we should try defering probe.
+ * If cpu's clk node is present, but clock is not yet
+ * registered, we should try defering probe.
*/
- if (ret == -EPROBE_DEFER) {
- dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
- cpu);
- return ret;
- }
-
- /* Try with "cpu-supply" */
- if (reg == reg_cpu0) {
- reg = reg_cpu;
- goto try_again;
- }
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(cpu_dev, "clock not ready, retry\n");
+ else
+ dev_err(cpu_dev, "failed to get clock: %d\n", ret);
- dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
+ return ret;
}
- cpu_clk = clk_get(cpu_dev, NULL);
- ret = PTR_ERR_OR_ZERO(cpu_clk);
- if (ret) {
- /* put regulator */
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
+ clk_put(cpu_clk);
+
+ name = find_supply_name(cpu_dev);
+ /* Platform doesn't require regulator */
+ if (!name)
+ return 0;
+ cpu_reg = regulator_get_optional(cpu_dev, name);
+ ret = PTR_ERR_OR_ZERO(cpu_reg);
+ if (ret) {
/*
- * If cpu's clk node is present, but clock is not yet
- * registered, we should try defering probe.
+ * If cpu's regulator supply node is present, but regulator is
+ * not yet registered, we should try defering probe.
*/
if (ret == -EPROBE_DEFER)
- dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
+ dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
- ret);
- } else {
- *cdev = cpu_dev;
- *creg = cpu_reg;
- *cclk = cpu_clk;
+ dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
+
+ return ret;
}
- return ret;
+ regulator_put(cpu_reg);
+ return 0;
}
static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
- struct device_node *np;
struct private_data *priv;
struct device *cpu_dev;
- struct regulator *cpu_reg;
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
- unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
- bool need_update = false;
+ bool opp_v1 = false;
+ const char *name;
int ret;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
- if (ret) {
- pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
- return ret;
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
}
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
- ret = -ENOENT;
- goto out_put_reg_clk;
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
+ return ret;
}
/* Get OPP-sharing information from "operating-points-v2" bindings */
@@ -223,9 +172,23 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* finding shared-OPPs for backward compatibility.
*/
if (ret == -ENOENT)
- need_update = true;
+ opp_v1 = true;
else
- goto out_node_put;
+ goto out_put_clk;
+ }
+
+ /*
+ * OPP layer will be taking care of regulators now, but it needs to know
+ * the name of the regulator first.
+ */
+ name = find_supply_name(cpu_dev);
+ if (name) {
+ ret = dev_pm_opp_set_regulator(cpu_dev, name);
+ if (ret) {
+ dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
+ policy->cpu, ret);
+ goto out_put_clk;
+ }
}
/*
@@ -246,12 +209,12 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
- pr_debug("OPP table is not ready, deferring probe\n");
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
- if (need_update) {
+ if (opp_v1) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
@@ -265,10 +228,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
-
- of_property_read_u32(np, "clock-latency", &transition_latency);
- } else {
- transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -277,62 +236,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
-
- if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
-
- if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq = 0;
-
- /*
- * Disable any OPPs where the connected regulator isn't able to
- * provide the specified voltage and record minimum and maximum
- * voltage levels.
- */
- while (1) {
- struct dev_pm_opp *opp;
- unsigned long opp_uV, tol_uV;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- break;
- }
- opp_uV = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- tol_uV = opp_uV * priv->voltage_tolerance / 100;
- if (regulator_is_supported_voltage(cpu_reg,
- opp_uV - tol_uV,
- opp_uV + tol_uV)) {
- if (opp_uV < min_uV)
- min_uV = opp_uV;
- if (opp_uV > max_uV)
- max_uV = opp_uV;
- } else {
- dev_pm_opp_disable(cpu_dev, opp_freq);
- }
-
- opp_freq++;
- }
-
- ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
- if (ret > 0)
- transition_latency += ret * 1000;
- }
+ priv->reg_name = name;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
- pr_err("failed to init cpufreq table: %d\n", ret);
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
}
priv->cpu_dev = cpu_dev;
- priv->cpu_reg = cpu_reg;
policy->driver_data = priv;
-
policy->clk = cpu_clk;
rcu_read_lock();
@@ -357,9 +270,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
- policy->cpuinfo.transition_latency = transition_latency;
+ transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
+ if (!transition_latency)
+ transition_latency = CPUFREQ_ETERNAL;
- of_node_put(np);
+ policy->cpuinfo.transition_latency = transition_latency;
return 0;
@@ -369,12 +284,10 @@ out_free_priv:
kfree(priv);
out_free_opp:
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
-out_node_put:
- of_node_put(np);
-out_put_reg_clk:
+ if (name)
+ dev_pm_opp_put_regulator(cpu_dev);
+out_put_clk:
clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
return ret;
}
@@ -386,9 +299,10 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ if (priv->reg_name)
+ dev_pm_opp_put_regulator(priv->cpu_dev);
+
clk_put(policy->clk);
- if (!IS_ERR(priv->cpu_reg))
- regulator_put(priv->cpu_reg);
kfree(priv);
return 0;
@@ -441,9 +355,6 @@ static struct cpufreq_driver dt_cpufreq_driver = {
static int dt_cpufreq_probe(struct platform_device *pdev)
{
- struct device *cpu_dev;
- struct regulator *cpu_reg;
- struct clk *cpu_clk;
int ret;
/*
@@ -453,19 +364,15 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = resources_available();
if (ret)
return ret;
- clk_put(cpu_clk);
- if (!IS_ERR(cpu_reg))
- regulator_put(cpu_reg);
-
dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
- dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ dev_err(&pdev->dev, "failed register driver: %d\n", ret);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e979ec78b695..e93405f0eac4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -38,48 +38,10 @@ static inline bool policy_is_inactive(struct cpufreq_policy *policy)
return cpumask_empty(policy->cpus);
}
-static bool suitable_policy(struct cpufreq_policy *policy, bool active)
-{
- return active == !policy_is_inactive(policy);
-}
-
-/* Finds Next Acive/Inactive policy */
-static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
- bool active)
-{
- do {
- /* No more policies in the list */
- if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
- return NULL;
-
- policy = list_next_entry(policy, policy_list);
- } while (!suitable_policy(policy, active));
-
- return policy;
-}
-
-static struct cpufreq_policy *first_policy(bool active)
-{
- struct cpufreq_policy *policy;
-
- /* No policies in the list */
- if (list_empty(&cpufreq_policy_list))
- return NULL;
-
- policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
- policy_list);
-
- if (!suitable_policy(policy, active))
- policy = next_policy(policy, active);
-
- return policy;
-}
-
/* Macros to iterate over CPU policies */
-#define for_each_suitable_policy(__policy, __active) \
- for (__policy = first_policy(__active); \
- __policy; \
- __policy = next_policy(__policy, __active))
+#define for_each_suitable_policy(__policy, __active) \
+ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
+ if ((__active) == !policy_is_inactive(__policy))
#define for_each_active_policy(__policy) \
for_each_suitable_policy(__policy, true)
@@ -102,7 +64,6 @@ static LIST_HEAD(cpufreq_governor_list);
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_RWLOCK(cpufreq_driver_lock);
-DEFINE_MUTEX(cpufreq_governor_lock);
/* Flag to suspend/resume CPUFreq governors */
static bool cpufreq_suspended;
@@ -113,10 +74,9 @@ static inline bool has_target(void)
}
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event);
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
-static void handle_update(struct work_struct *work);
+static int cpufreq_start_governor(struct cpufreq_policy *policy);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -818,12 +778,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ssize_t ret;
down_read(&policy->rwsem);
-
- if (fattr->show)
- ret = fattr->show(policy, buf);
- else
- ret = -EIO;
-
+ ret = fattr->show(policy, buf);
up_read(&policy->rwsem);
return ret;
@@ -838,18 +793,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
get_online_cpus();
- if (!cpu_online(policy->cpu))
- goto unlock;
-
- down_write(&policy->rwsem);
-
- if (fattr->store)
+ if (cpu_online(policy->cpu)) {
+ down_write(&policy->rwsem);
ret = fattr->store(policy, buf, count);
- else
- ret = -EIO;
+ up_write(&policy->rwsem);
+ }
- up_write(&policy->rwsem);
-unlock:
put_online_cpus();
return ret;
@@ -959,6 +908,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return cpufreq_add_dev_symlink(policy);
}
+__weak struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return NULL;
+}
+
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
struct cpufreq_governor *gov = NULL;
@@ -968,11 +922,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
/* Update governor of new_policy to the governor used before hotplug */
gov = find_governor(policy->last_governor);
- if (gov)
+ if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
- else
- gov = CPUFREQ_DEFAULT_GOVERNOR;
+ } else {
+ gov = cpufreq_default_governor();
+ if (!gov)
+ return -ENODATA;
+ }
new_policy.governor = gov;
@@ -996,36 +953,42 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (cpumask_test_cpu(cpu, policy->cpus))
return 0;
+ down_write(&policy->rwsem);
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
- return ret;
+ goto unlock;
}
}
- down_write(&policy->rwsem);
cpumask_set_cpu(cpu, policy->cpus);
- up_write(&policy->rwsem);
if (has_target()) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
-
- if (ret) {
+ ret = cpufreq_start_governor(policy);
+ if (ret)
pr_err("%s: Failed to start governor\n", __func__);
- return ret;
- }
}
- return 0;
+unlock:
+ up_write(&policy->rwsem);
+ return ret;
+}
+
+static void handle_update(struct work_struct *work)
+{
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+ unsigned int cpu = policy->cpu;
+ pr_debug("handle_update for cpu %u called\n", cpu);
+ cpufreq_update_policy(cpu);
}
static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
struct cpufreq_policy *policy;
+ int ret;
if (WARN_ON(!dev))
return NULL;
@@ -1043,7 +1006,13 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
- kobject_init(&policy->kobj, &ktype_cpufreq);
+ ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
+ cpufreq_global_kobject, "policy%u", cpu);
+ if (ret) {
+ pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ goto err_free_real_cpus;
+ }
+
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
@@ -1054,6 +1023,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
policy->cpu = cpu;
return policy;
+err_free_real_cpus:
+ free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
@@ -1158,16 +1129,6 @@ static int cpufreq_online(unsigned int cpu)
cpumask_copy(policy->related_cpus, policy->cpus);
/* Remember CPUs present at the policy creation time. */
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
-
- /* Name and add the kobject */
- ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
- "policy%u",
- cpumask_first(policy->related_cpus));
- if (ret) {
- pr_err("%s: failed to add policy->kobj: %d\n", __func__,
- ret);
- goto out_exit_policy;
- }
}
/*
@@ -1309,9 +1270,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return ret;
}
-static void cpufreq_offline_prepare(unsigned int cpu)
+static void cpufreq_offline(unsigned int cpu)
{
struct cpufreq_policy *policy;
+ int ret;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1321,13 +1283,13 @@ static void cpufreq_offline_prepare(unsigned int cpu)
return;
}
+ down_write(&policy->rwsem);
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret)
pr_err("%s: Failed to stop governor\n", __func__);
}
- down_write(&policy->rwsem);
cpumask_clear_cpu(cpu, policy->cpus);
if (policy_is_inactive(policy)) {
@@ -1340,39 +1302,24 @@ static void cpufreq_offline_prepare(unsigned int cpu)
/* Nominate new CPU */
policy->cpu = cpumask_any(policy->cpus);
}
- up_write(&policy->rwsem);
/* Start governor again for active policy */
if (!policy_is_inactive(policy)) {
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
-
+ ret = cpufreq_start_governor(policy);
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
- } else if (cpufreq_driver->stop_cpu) {
- cpufreq_driver->stop_cpu(policy);
- }
-}
-static void cpufreq_offline_finish(unsigned int cpu)
-{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-
- if (!policy) {
- pr_debug("%s: No cpu_data found\n", __func__);
- return;
+ goto unlock;
}
- /* Only proceed for inactive policies */
- if (!policy_is_inactive(policy))
- return;
+ if (cpufreq_driver->stop_cpu)
+ cpufreq_driver->stop_cpu(policy);
/* If cpu is last user of policy, free policy */
if (has_target()) {
- int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
}
@@ -1386,6 +1333,9 @@ static void cpufreq_offline_finish(unsigned int cpu)
cpufreq_driver->exit(policy);
policy->freq_table = NULL;
}
+
+unlock:
+ up_write(&policy->rwsem);
}
/**
@@ -1401,10 +1351,8 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (!policy)
return;
- if (cpu_online(cpu)) {
- cpufreq_offline_prepare(cpu);
- cpufreq_offline_finish(cpu);
- }
+ if (cpu_online(cpu))
+ cpufreq_offline(cpu);
cpumask_clear_cpu(cpu, policy->real_cpus);
remove_cpu_dev_symlink(policy, cpu);
@@ -1413,15 +1361,6 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_policy_free(policy, true);
}
-static void handle_update(struct work_struct *work)
-{
- struct cpufreq_policy *policy =
- container_of(work, struct cpufreq_policy, update);
- unsigned int cpu = policy->cpu;
- pr_debug("handle_update for cpu %u called\n", cpu);
- cpufreq_update_policy(cpu);
-}
-
/**
* cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
* in deep trouble.
@@ -1457,9 +1396,17 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
{
struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ unsigned long flags;
- if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
- return cpufreq_driver->get(cpu);
+ read_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
+ ret_freq = cpufreq_driver->get(cpu);
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return ret_freq;
+ }
+
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
policy = cpufreq_cpu_get(cpu);
if (policy) {
@@ -1540,6 +1487,27 @@ unsigned int cpufreq_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_get);
+static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
+{
+ unsigned int new_freq;
+
+ if (cpufreq_suspended)
+ return 0;
+
+ new_freq = cpufreq_driver->get(policy->cpu);
+ if (!new_freq)
+ return 0;
+
+ if (!policy->cur) {
+ pr_debug("cpufreq: Driver did not initialize current freq\n");
+ policy->cur = new_freq;
+ } else if (policy->cur != new_freq && has_target()) {
+ cpufreq_out_of_sync(policy, new_freq);
+ }
+
+ return new_freq;
+}
+
static struct subsys_interface cpufreq_interface = {
.name = "cpufreq",
.subsys = &cpu_subsys,
@@ -1584,6 +1552,7 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
+ int ret;
if (!cpufreq_driver)
return;
@@ -1594,7 +1563,11 @@ void cpufreq_suspend(void)
pr_debug("%s: Suspending Governors\n", __func__);
for_each_active_policy(policy) {
- if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+ down_write(&policy->rwsem);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ up_write(&policy->rwsem);
+
+ if (ret)
pr_err("%s: Failed to stop governor for policy: %p\n",
__func__, policy);
else if (cpufreq_driver->suspend
@@ -1616,6 +1589,7 @@ suspend:
void cpufreq_resume(void)
{
struct cpufreq_policy *policy;
+ int ret;
if (!cpufreq_driver)
return;
@@ -1628,25 +1602,19 @@ void cpufreq_resume(void)
pr_debug("%s: Resuming Governors\n", __func__);
for_each_active_policy(policy) {
- if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
+ if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
pr_err("%s: Failed to resume driver: %p\n", __func__,
policy);
- else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
- || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
- }
-
- /*
- * schedule call cpufreq_update_policy() for first-online CPU, as that
- * wouldn't be hotplugged-out on suspend. It will verify that the
- * current freq is in sync with what we believe it to be.
- */
- policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
- if (WARN_ON(!policy))
- return;
+ } else {
+ down_write(&policy->rwsem);
+ ret = cpufreq_start_governor(policy);
+ up_write(&policy->rwsem);
- schedule_work(&policy->update);
+ if (ret)
+ pr_err("%s: Failed to start governor for policy: %p\n",
+ __func__, policy);
+ }
+ }
}
/**
@@ -1846,7 +1814,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int old_target_freq = target_freq;
- int retval = -EINVAL;
+ struct cpufreq_frequency_table *freq_table;
+ int index, retval;
if (cpufreq_disabled())
return -ENODEV;
@@ -1873,34 +1842,28 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
policy->restore_freq = policy->cur;
if (cpufreq_driver->target)
- retval = cpufreq_driver->target(policy, target_freq, relation);
- else if (cpufreq_driver->target_index) {
- struct cpufreq_frequency_table *freq_table;
- int index;
-
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (unlikely(!freq_table)) {
- pr_err("%s: Unable to find freq_table\n", __func__);
- goto out;
- }
+ return cpufreq_driver->target(policy, target_freq, relation);
- retval = cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index);
- if (unlikely(retval)) {
- pr_err("%s: Unable to find matching freq\n", __func__);
- goto out;
- }
+ if (!cpufreq_driver->target_index)
+ return -EINVAL;
- if (freq_table[index].frequency == policy->cur) {
- retval = 0;
- goto out;
- }
+ freq_table = cpufreq_frequency_get_table(policy->cpu);
+ if (unlikely(!freq_table)) {
+ pr_err("%s: Unable to find freq_table\n", __func__);
+ return -EINVAL;
+ }
- retval = __target_index(policy, freq_table, index);
+ retval = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &index);
+ if (unlikely(retval)) {
+ pr_err("%s: Unable to find matching freq\n", __func__);
+ return retval;
}
-out:
- return retval;
+ if (freq_table[index].frequency == policy->cur)
+ return 0;
+
+ return __target_index(policy, freq_table, index);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1920,20 +1883,14 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event)
+__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
{
- int ret;
+ return NULL;
+}
- /* Only must be defined when default governor is known to have latency
- restrictions, like e.g. conservative or ondemand.
- That this is the case is already ensured in Kconfig
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
- struct cpufreq_governor *gov = &cpufreq_gov_performance;
-#else
- struct cpufreq_governor *gov = NULL;
-#endif
+static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
+{
+ int ret;
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
@@ -1948,12 +1905,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
policy->governor->max_transition_latency) {
- if (!gov)
- return -EINVAL;
- else {
+ struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+ if (gov) {
pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
policy->governor->name, gov->name);
policy->governor = gov;
+ } else {
+ return -EINVAL;
}
}
@@ -1963,21 +1922,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
- mutex_lock(&cpufreq_governor_lock);
- if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
- || (!policy->governor_enabled
- && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
- mutex_unlock(&cpufreq_governor_lock);
- return -EBUSY;
- }
-
- if (event == CPUFREQ_GOV_STOP)
- policy->governor_enabled = false;
- else if (event == CPUFREQ_GOV_START)
- policy->governor_enabled = true;
-
- mutex_unlock(&cpufreq_governor_lock);
-
ret = policy->governor->governor(policy, event);
if (!ret) {
@@ -1985,14 +1929,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->governor->initialized++;
else if (event == CPUFREQ_GOV_POLICY_EXIT)
policy->governor->initialized--;
- } else {
- /* Restore original values */
- mutex_lock(&cpufreq_governor_lock);
- if (event == CPUFREQ_GOV_STOP)
- policy->governor_enabled = true;
- else if (event == CPUFREQ_GOV_START)
- policy->governor_enabled = false;
- mutex_unlock(&cpufreq_governor_lock);
}
if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
@@ -2002,6 +1938,17 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
return ret;
}
+static int cpufreq_start_governor(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
+ cpufreq_update_current_freq(policy);
+
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_START);
+ return ret ? ret : cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+}
+
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
int err;
@@ -2138,8 +2085,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return cpufreq_driver->setpolicy(new_policy);
}
- if (new_policy->governor == policy->governor)
- goto out;
+ if (new_policy->governor == policy->governor) {
+ pr_debug("cpufreq: governor limits update\n");
+ return cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ }
pr_debug("governor switch\n");
@@ -2147,7 +2096,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
/* This can happen due to race with other operations */
pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
@@ -2155,10 +2104,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
- up_write(&policy->rwsem);
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
-
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
@@ -2168,32 +2114,27 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
/* start new governor */
policy->governor = new_policy->governor;
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+ ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
if (!ret) {
- ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
- if (!ret)
- goto out;
-
- up_write(&policy->rwsem);
- __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- down_write(&policy->rwsem);
+ ret = cpufreq_start_governor(policy);
+ if (!ret) {
+ pr_debug("cpufreq: governor change\n");
+ return 0;
+ }
+ cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
}
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+ if (cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
policy->governor = NULL;
else
- __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ cpufreq_start_governor(policy);
}
return ret;
-
- out:
- pr_debug("governor: change or update limits\n");
- return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
/**
@@ -2224,19 +2165,11 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
- new_policy.cur = cpufreq_driver->get(cpu);
+ new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
goto unlock;
}
-
- if (!policy->cur) {
- pr_debug("Driver did not initialize current freq\n");
- policy->cur = new_policy.cur;
- } else {
- if (policy->cur != new_policy.cur && has_target())
- cpufreq_out_of_sync(policy, new_policy.cur);
- }
}
ret = cpufreq_set_policy(policy, &new_policy);
@@ -2260,11 +2193,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
- cpufreq_offline_prepare(cpu);
- break;
-
- case CPU_POST_DEAD:
- cpufreq_offline_finish(cpu);
+ cpufreq_offline(cpu);
break;
case CPU_DOWN_FAILED:
@@ -2297,8 +2226,11 @@ static int cpufreq_boost_set_sw(int state)
__func__);
break;
}
+
+ down_write(&policy->rwsem);
policy->user_policy.max = policy->max;
- __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ up_write(&policy->rwsem);
}
}
@@ -2384,7 +2316,7 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
* submitted by the CPU Frequency driver.
*
* Registers a CPU Frequency driver to this core code. This code
- * returns zero on success, -EBUSY when another driver got here first
+ * returns zero on success, -EEXIST when another driver got here first
* (and isn't unregistered in the meantime).
*
*/
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 606ad74abe6e..bf4913f6453b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -14,6 +14,22 @@
#include <linux/slab.h>
#include "cpufreq_governor.h"
+struct cs_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+};
+
+static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct cs_policy_dbs_info, policy_dbs);
+}
+
+struct cs_dbs_tuners {
+ unsigned int down_threshold;
+ unsigned int freq_step;
+};
+
/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
@@ -21,21 +37,6 @@
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
-static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
-
-static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event);
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_conservative = {
- .name = "conservative",
- .governor = cs_cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy)
{
@@ -57,27 +58,28 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of maximum frequency
*/
-static void cs_check_cpu(int cpu, unsigned int load)
+static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
- struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
*/
if (cs_tuners->freq_step == 0)
- return;
+ goto out;
/* Check for frequency increase */
- if (load > cs_tuners->up_threshold) {
+ if (load > dbs_data->up_threshold) {
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
if (dbs_info->requested_freq == policy->max)
- return;
+ goto out;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
@@ -86,12 +88,12 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
- return;
+ goto out;
}
/* if sampling_down_factor is active break out early */
- if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
- return;
+ if (++dbs_info->down_skip < dbs_data->sampling_down_factor)
+ goto out;
dbs_info->down_skip = 0;
/* Check for frequency decrease */
@@ -101,7 +103,7 @@ static void cs_check_cpu(int cpu, unsigned int load)
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
- return;
+ goto out;
freq_target = get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > freq_target)
@@ -111,58 +113,25 @@ static void cs_check_cpu(int cpu, unsigned int load)
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
- return;
}
-}
-
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
-{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-
- if (modify_all)
- dbs_check_cpu(dbs_data, policy->cpu);
- return delay_for_sampling_rate(cs_tuners->sampling_rate);
+ out:
+ return dbs_data->sampling_rate;
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- struct cpufreq_freqs *freq = data;
- struct cs_cpu_dbs_info_s *dbs_info =
- &per_cpu(cs_cpu_dbs_info, freq->cpu);
- struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
-
- if (!policy)
- return 0;
-
- /* policy isn't governed by conservative governor */
- if (policy->governor != &cpufreq_gov_conservative)
- return 0;
-
- /*
- * we only care if our internally tracked freq moves outside the 'valid'
- * ranges of frequency available to us otherwise we do not change it
- */
- if (dbs_info->requested_freq > policy->max
- || dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = freq->new;
-
- return 0;
-}
+ void *data);
static struct notifier_block cs_cpufreq_notifier_block = {
.notifier_call = dbs_cpufreq_notifier,
};
/************************** sysfs interface ************************/
-static struct common_dbs_data cs_dbs_cdata;
+static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -170,22 +139,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- cs_tuners->sampling_down_factor = input;
- return count;
-}
-
-static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
- size_t count)
-{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- unsigned int input;
- int ret;
- ret = sscanf(buf, "%u", &input);
-
- if (ret != 1)
- return -EINVAL;
-
- cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
+ dbs_data->sampling_down_factor = input;
return count;
}
@@ -200,7 +154,7 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
- cs_tuners->up_threshold = input;
+ dbs_data->up_threshold = input;
return count;
}
@@ -214,7 +168,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= cs_tuners->up_threshold)
+ input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -224,8 +178,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- unsigned int input, j;
+ unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -235,21 +188,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice_load) /* nothing to do */
+ if (input == dbs_data->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice_load = input;
+ dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct cs_cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(cs_cpu_dbs_info, j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice_load)
- dbs_info->cdbs.prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
+ gov_update_cpu_data(dbs_data);
+
return count;
}
@@ -275,55 +221,47 @@ static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
return count;
}
-show_store_one(cs, sampling_rate);
-show_store_one(cs, sampling_down_factor);
-show_store_one(cs, up_threshold);
-show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice_load);
-show_store_one(cs, freq_step);
-declare_show_sampling_rate_min(cs);
-
-gov_sys_pol_attr_rw(sampling_rate);
-gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(up_threshold);
-gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice_load);
-gov_sys_pol_attr_rw(freq_step);
-gov_sys_pol_attr_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes_gov_sys[] = {
- &sampling_rate_min_gov_sys.attr,
- &sampling_rate_gov_sys.attr,
- &sampling_down_factor_gov_sys.attr,
- &up_threshold_gov_sys.attr,
- &down_threshold_gov_sys.attr,
- &ignore_nice_load_gov_sys.attr,
- &freq_step_gov_sys.attr,
+gov_show_one_common(sampling_rate);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(up_threshold);
+gov_show_one_common(ignore_nice_load);
+gov_show_one_common(min_sampling_rate);
+gov_show_one(cs, down_threshold);
+gov_show_one(cs, freq_step);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(up_threshold);
+gov_attr_rw(ignore_nice_load);
+gov_attr_ro(min_sampling_rate);
+gov_attr_rw(down_threshold);
+gov_attr_rw(freq_step);
+
+static struct attribute *cs_attributes[] = {
+ &min_sampling_rate.attr,
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice_load.attr,
+ &freq_step.attr,
NULL
};
-static struct attribute_group cs_attr_group_gov_sys = {
- .attrs = dbs_attributes_gov_sys,
- .name = "conservative",
-};
+/************************** sysfs end ************************/
-static struct attribute *dbs_attributes_gov_pol[] = {
- &sampling_rate_min_gov_pol.attr,
- &sampling_rate_gov_pol.attr,
- &sampling_down_factor_gov_pol.attr,
- &up_threshold_gov_pol.attr,
- &down_threshold_gov_pol.attr,
- &ignore_nice_load_gov_pol.attr,
- &freq_step_gov_pol.attr,
- NULL
-};
+static struct policy_dbs_info *cs_alloc(void)
+{
+ struct cs_policy_dbs_info *dbs_info;
-static struct attribute_group cs_attr_group_gov_pol = {
- .attrs = dbs_attributes_gov_pol,
- .name = "conservative",
-};
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
-/************************** sysfs end ************************/
+static void cs_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
static int cs_init(struct dbs_data *dbs_data, bool notify)
{
@@ -335,11 +273,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify)
return -ENOMEM;
}
- tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
- tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
@@ -361,35 +299,66 @@ static void cs_exit(struct dbs_data *dbs_data, bool notify)
kfree(dbs_data->tuners);
}
-define_get_cpu_dbs_routines(cs_cpu_dbs_info);
+static void cs_start(struct cpufreq_policy *policy)
+{
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
+}
-static struct common_dbs_data cs_dbs_cdata = {
- .governor = GOV_CONSERVATIVE,
- .attr_group_gov_sys = &cs_attr_group_gov_sys,
- .attr_group_gov_pol = &cs_attr_group_gov_pol,
- .get_cpu_cdbs = get_cpu_cdbs,
- .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+static struct dbs_governor cs_dbs_gov = {
+ .gov = {
+ .name = "conservative",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+ },
+ .kobj_type = { .default_attrs = cs_attributes },
.gov_dbs_timer = cs_dbs_timer,
- .gov_check_cpu = cs_check_cpu,
+ .alloc = cs_alloc,
+ .free = cs_free,
.init = cs_init,
.exit = cs_exit,
- .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
+ .start = cs_start,
};
-static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
+#define CPU_FREQ_GOV_CONSERVATIVE (&cs_dbs_gov.gov)
+
+static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
{
- return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
+ struct cpufreq_freqs *freq = data;
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
+ struct cs_policy_dbs_info *dbs_info;
+
+ if (!policy)
+ return 0;
+
+ /* policy isn't governed by conservative governor */
+ if (policy->governor != CPU_FREQ_GOV_CONSERVATIVE)
+ return 0;
+
+ dbs_info = to_dbs_info(policy->governor_data);
+ /*
+ * we only care if our internally tracked freq moves outside the 'valid'
+ * ranges of frequency available to us otherwise we do not change it
+ */
+ if (dbs_info->requested_freq > policy->max
+ || dbs_info->requested_freq < policy->min)
+ dbs_info->requested_freq = freq->new;
+
+ return 0;
}
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_conservative);
+ return cpufreq_register_governor(CPU_FREQ_GOV_CONSERVATIVE);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- cpufreq_unregister_governor(&cpufreq_gov_conservative);
+ cpufreq_unregister_governor(CPU_FREQ_GOV_CONSERVATIVE);
}
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
@@ -399,6 +368,11 @@ MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return CPU_FREQ_GOV_CONSERVATIVE;
+}
+
fs_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e0d111024d48..10a5cfeae8c5 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -18,95 +18,193 @@
#include <linux/export.h>
#include <linux/kernel_stat.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include "cpufreq_governor.h"
-static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
-{
- if (have_governor_per_policy())
- return dbs_data->cdata->attr_group_gov_pol;
- else
- return dbs_data->cdata->attr_group_gov_sys;
-}
+static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
+
+static DEFINE_MUTEX(gov_dbs_data_mutex);
-void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
+/* Common sysfs tunables */
+/**
+ * store_sampling_rate - update sampling rate effective immediately if needed.
+ *
+ * If new rate is smaller than the old, simply updating
+ * dbs.sampling_rate might not be appropriate. For example, if the
+ * original sampling_rate was 1 second and the requested new sampling rate is 10
+ * ms because the user needs immediate reaction from ondemand governor, but not
+ * sure if higher frequency will be required or not, then, the governor may
+ * change the sampling rate too late; up to 1 second later. Thus, if we are
+ * reducing the sampling rate, we need to make the new value effective
+ * immediately.
+ *
+ * This must be called with dbs_data->mutex held, otherwise traversing
+ * policy_dbs_list isn't safe.
+ */
+ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count)
{
- struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- struct cpufreq_policy *policy = cdbs->shared->policy;
- unsigned int sampling_rate;
- unsigned int max_load = 0;
- unsigned int ignore_nice;
- unsigned int j;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int rate;
+ int ret;
+ ret = sscanf(buf, "%u", &rate);
+ if (ret != 1)
+ return -EINVAL;
- if (dbs_data->cdata->governor == GOV_ONDEMAND) {
- struct od_cpu_dbs_info_s *od_dbs_info =
- dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
+ /*
+ * We are operating under dbs_data->mutex and so the list and its
+ * entries can't be freed concurrently.
+ */
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ mutex_lock(&policy_dbs->timer_mutex);
/*
- * Sometimes, the ondemand governor uses an additional
- * multiplier to give long delays. So apply this multiplier to
- * the 'sampling_rate', so as to keep the wake-up-from-idle
- * detection logic a bit conservative.
+ * On 32-bit architectures this may race with the
+ * sample_delay_ns read in dbs_update_util_handler(), but that
+ * really doesn't matter. If the read returns a value that's
+ * too big, the sample will be skipped, but the next invocation
+ * of dbs_update_util_handler() (when the update has been
+ * completed) will take a sample.
+ *
+ * If this runs in parallel with dbs_work_handler(), we may end
+ * up overwriting the sample_delay_ns value that it has just
+ * written, but it will be corrected next time a sample is
+ * taken, so it shouldn't be significant.
*/
- sampling_rate = od_tuners->sampling_rate;
- sampling_rate *= od_dbs_info->rate_mult;
+ gov_update_sample_delay(policy_dbs, 0);
+ mutex_unlock(&policy_dbs->timer_mutex);
+ }
- ignore_nice = od_tuners->ignore_nice_load;
- } else {
- sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice_load;
+ return count;
+}
+EXPORT_SYMBOL_GPL(store_sampling_rate);
+
+/**
+ * gov_update_cpu_data - Update CPU load data.
+ * @dbs_data: Top-level governor data pointer.
+ *
+ * Update CPU load data for all CPUs in the domain governed by @dbs_data
+ * (that may be a single policy or a bunch of them if governor tunables are
+ * system-wide).
+ *
+ * Call under the @dbs_data mutex.
+ */
+void gov_update_cpu_data(struct dbs_data *dbs_data)
+{
+ struct policy_dbs_info *policy_dbs;
+
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ unsigned int j;
+
+ for_each_cpu(j, policy_dbs->policy->cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+ dbs_data->io_is_busy);
+ if (dbs_data->ignore_nice_load)
+ j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
}
+}
+EXPORT_SYMBOL_GPL(gov_update_cpu_data);
+
+static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
+{
+ return container_of(kobj, struct dbs_data, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dbs_data *dbs_data = to_dbs_data(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(dbs_data, buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dbs_data *dbs_data = to_dbs_data(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret = -EBUSY;
+
+ mutex_lock(&dbs_data->mutex);
+
+ if (dbs_data->usage_count)
+ ret = gattr->store(dbs_data, buf, count);
+
+ mutex_unlock(&dbs_data->mutex);
+
+ return ret;
+}
+
+/*
+ * Sysfs Ops for accessing governor attributes.
+ *
+ * All show/store invocations for governor specific sysfs attributes, will first
+ * call the below show/store callbacks and the attribute specific callback will
+ * be called from within it.
+ */
+static const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+
+unsigned int dbs_update(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int ignore_nice = dbs_data->ignore_nice_load;
+ unsigned int max_load = 0;
+ unsigned int sampling_rate, io_busy, j;
+
+ /*
+ * Sometimes governors may use an additional multiplier to increase
+ * sample delays temporarily. Apply that multiplier to sampling_rate
+ * so as to keep the wake-up-from-idle detection logic a bit
+ * conservative.
+ */
+ sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
+ /*
+ * For the purpose of ondemand, waiting for disk IO is an indication
+ * that you're performance critical, and not that the system is actually
+ * idle, so do not add the iowait time to the CPU idle time then.
+ */
+ io_busy = dbs_data->io_is_busy;
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info *j_cdbs;
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
u64 cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time;
unsigned int load;
- int io_busy = 0;
-
- j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
- /*
- * For the purpose of ondemand, waiting for disk IO is
- * an indication that you're performance critical, and
- * not that the system is actually idle. So do not add
- * the iowait time to the cpu idle time.
- */
- if (dbs_data->cdata->governor == GOV_ONDEMAND)
- io_busy = od_tuners->io_is_busy;
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
- wall_time = (unsigned int)
- (cur_wall_time - j_cdbs->prev_cpu_wall);
+ wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time < j_cdbs->prev_cpu_idle)
- cur_idle_time = j_cdbs->prev_cpu_idle;
-
- idle_time = (unsigned int)
- (cur_idle_time - j_cdbs->prev_cpu_idle);
- j_cdbs->prev_cpu_idle = cur_idle_time;
+ if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
+ idle_time = 0;
+ } else {
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
+ }
if (ignore_nice) {
- u64 cur_nice;
- unsigned long cur_nice_jiffies;
-
- cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
- cdbs->prev_cpu_nice;
- /*
- * Assumption: nice time between sampling periods will
- * be less than 2^32 jiffies for 32 bit sys
- */
- cur_nice_jiffies = (unsigned long)
- cputime64_to_jiffies64(cur_nice);
+ u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- cdbs->prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += jiffies_to_usecs(cur_nice_jiffies);
+ idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
+ j_cdbs->prev_cpu_nice = cur_nice;
}
if (unlikely(!wall_time || wall_time < idle_time))
@@ -128,10 +226,10 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
* dropped down. So we perform the copy only once, upon the
* first wake-up from idle.)
*
- * Detecting this situation is easy: the governor's deferrable
- * timer would not have fired during CPU-idle periods. Hence
- * an unusually large 'wall_time' (as compared to the sampling
- * rate) indicates this scenario.
+ * Detecting this situation is easy: the governor's utilization
+ * update handler would not have run during CPU-idle periods.
+ * Hence, an unusually large 'wall_time' (as compared to the
+ * sampling rate) indicates this scenario.
*
* prev_load can be zero in two cases and we must recalculate it
* for both cases:
@@ -156,222 +254,224 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
if (load > max_load)
max_load = load;
}
-
- dbs_data->cdata->gov_check_cpu(cpu, max_load);
+ return max_load;
}
-EXPORT_SYMBOL_GPL(dbs_check_cpu);
+EXPORT_SYMBOL_GPL(dbs_update);
-void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay)
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cpu_dbs_info *cdbs;
+ struct cpufreq_policy *policy = policy_dbs->policy;
int cpu;
+ gov_update_sample_delay(policy_dbs, delay_us);
+ policy_dbs->last_sample_time = 0;
+
for_each_cpu(cpu, policy->cpus) {
- cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
- cdbs->timer.expires = jiffies + delay;
- add_timer_on(&cdbs->timer, cpu);
+ struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+ cpufreq_set_update_util_data(cpu, &cdbs->update_util);
}
}
-EXPORT_SYMBOL_GPL(gov_add_timers);
-static inline void gov_cancel_timers(struct cpufreq_policy *policy)
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
{
- struct dbs_data *dbs_data = policy->governor_data;
- struct cpu_dbs_info *cdbs;
int i;
- for_each_cpu(i, policy->cpus) {
- cdbs = dbs_data->cdata->get_cpu_cdbs(i);
- del_timer_sync(&cdbs->timer);
- }
-}
+ for_each_cpu(i, policy->cpus)
+ cpufreq_set_update_util_data(i, NULL);
-void gov_cancel_work(struct cpu_common_dbs_info *shared)
-{
- /* Tell dbs_timer_handler() to skip queuing up work items. */
- atomic_inc(&shared->skip_work);
- /*
- * If dbs_timer_handler() is already running, it may not notice the
- * incremented skip_work, so wait for it to complete to prevent its work
- * item from being queued up after the cancel_work_sync() below.
- */
- gov_cancel_timers(shared->policy);
- /*
- * In case dbs_timer_handler() managed to run and spawn a work item
- * before the timers have been canceled, wait for that work item to
- * complete and then cancel all of the timers set up by it. If
- * dbs_timer_handler() runs again at that point, it will see the
- * positive value of skip_work and won't spawn any more work items.
- */
- cancel_work_sync(&shared->work);
- gov_cancel_timers(shared->policy);
- atomic_set(&shared->skip_work, 0);
+ synchronize_sched();
}
-EXPORT_SYMBOL_GPL(gov_cancel_work);
-/* Will return if we need to evaluate cpu load again or not */
-static bool need_load_eval(struct cpu_common_dbs_info *shared,
- unsigned int sampling_rate)
+static void gov_cancel_work(struct cpufreq_policy *policy)
{
- if (policy_is_shared(shared->policy)) {
- ktime_t time_now = ktime_get();
- s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
-
- /* Do nothing if we recently have sampled */
- if (delta_us < (s64)(sampling_rate / 2))
- return false;
- else
- shared->time_stamp = time_now;
- }
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
- return true;
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
}
static void dbs_work_handler(struct work_struct *work)
{
- struct cpu_common_dbs_info *shared = container_of(work, struct
- cpu_common_dbs_info, work);
+ struct policy_dbs_info *policy_dbs;
struct cpufreq_policy *policy;
- struct dbs_data *dbs_data;
- unsigned int sampling_rate, delay;
- bool eval_load;
-
- policy = shared->policy;
- dbs_data = policy->governor_data;
+ struct dbs_governor *gov;
- /* Kill all timers */
- gov_cancel_timers(policy);
+ policy_dbs = container_of(work, struct policy_dbs_info, work);
+ policy = policy_dbs->policy;
+ gov = dbs_governor_of(policy);
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-
- sampling_rate = cs_tuners->sampling_rate;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-
- sampling_rate = od_tuners->sampling_rate;
- }
-
- eval_load = need_load_eval(shared, sampling_rate);
+ /*
+ * Make sure cpufreq_governor_limits() isn't evaluating load or the
+ * ondemand governor isn't updating the sampling rate in parallel.
+ */
+ mutex_lock(&policy_dbs->timer_mutex);
+ gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
+ mutex_unlock(&policy_dbs->timer_mutex);
+ /* Allow the utilization update handler to queue up more work. */
+ atomic_set(&policy_dbs->work_count, 0);
/*
- * Make sure cpufreq_governor_limits() isn't evaluating load in
- * parallel.
+ * If the update below is reordered with respect to the sample delay
+ * modification, the utilization update handler may end up using a stale
+ * sample delay value.
*/
- mutex_lock(&shared->timer_mutex);
- delay = dbs_data->cdata->gov_dbs_timer(policy, eval_load);
- mutex_unlock(&shared->timer_mutex);
+ smp_wmb();
+ policy_dbs->work_in_progress = false;
+}
- atomic_dec(&shared->skip_work);
+static void dbs_irq_work(struct irq_work *irq_work)
+{
+ struct policy_dbs_info *policy_dbs;
- gov_add_timers(policy, delay);
+ policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
+ schedule_work_on(smp_processor_id(), &policy_dbs->work);
}
-static void dbs_timer_handler(unsigned long data)
+static void dbs_update_util_handler(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max)
{
- struct cpu_dbs_info *cdbs = (struct cpu_dbs_info *)data;
- struct cpu_common_dbs_info *shared = cdbs->shared;
+ struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
+ struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
+ u64 delta_ns, lst;
/*
- * Timer handler may not be allowed to queue the work at the moment,
- * because:
- * - Another timer handler has done that
- * - We are stopping the governor
- * - Or we are updating the sampling rate of the ondemand governor
+ * The work may not be allowed to be queued up right now.
+ * Possible reasons:
+ * - Work has already been queued up or is in progress.
+ * - It is too early (too little time from the previous sample).
*/
- if (atomic_inc_return(&shared->skip_work) > 1)
- atomic_dec(&shared->skip_work);
- else
- queue_work(system_wq, &shared->work);
-}
+ if (policy_dbs->work_in_progress)
+ return;
-static void set_sampling_rate(struct dbs_data *dbs_data,
- unsigned int sampling_rate)
-{
- if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
- cs_tuners->sampling_rate = sampling_rate;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- od_tuners->sampling_rate = sampling_rate;
+ /*
+ * If the reads below are reordered before the check above, the value
+ * of sample_delay_ns used in the computation may be stale.
+ */
+ smp_rmb();
+ lst = READ_ONCE(policy_dbs->last_sample_time);
+ delta_ns = time - lst;
+ if ((s64)delta_ns < policy_dbs->sample_delay_ns)
+ return;
+
+ /*
+ * If the policy is not shared, the irq_work may be queued up right away
+ * at this point. Otherwise, we need to ensure that only one of the
+ * CPUs sharing the policy will do that.
+ */
+ if (policy_dbs->is_shared) {
+ if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
+ return;
+
+ /*
+ * If another CPU updated last_sample_time in the meantime, we
+ * shouldn't be here, so clear the work counter and bail out.
+ */
+ if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
+ atomic_set(&policy_dbs->work_count, 0);
+ return;
+ }
}
+
+ policy_dbs->last_sample_time = time;
+ policy_dbs->work_in_progress = true;
+ irq_work_queue(&policy_dbs->irq_work);
}
-static int alloc_common_dbs_info(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata)
+static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
+ struct dbs_governor *gov)
{
- struct cpu_common_dbs_info *shared;
+ struct policy_dbs_info *policy_dbs;
int j;
- /* Allocate memory for the common information for policy->cpus */
- shared = kzalloc(sizeof(*shared), GFP_KERNEL);
- if (!shared)
- return -ENOMEM;
+ /* Allocate memory for per-policy governor data. */
+ policy_dbs = gov->alloc();
+ if (!policy_dbs)
+ return NULL;
- /* Set shared for all CPUs, online+offline */
- for_each_cpu(j, policy->related_cpus)
- cdata->get_cpu_cdbs(j)->shared = shared;
+ policy_dbs->policy = policy;
+ mutex_init(&policy_dbs->timer_mutex);
+ atomic_set(&policy_dbs->work_count, 0);
+ init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
+ INIT_WORK(&policy_dbs->work, dbs_work_handler);
- mutex_init(&shared->timer_mutex);
- atomic_set(&shared->skip_work, 0);
- INIT_WORK(&shared->work, dbs_work_handler);
- return 0;
+ /* Set policy_dbs for all CPUs, online+offline */
+ for_each_cpu(j, policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
+
+ j_cdbs->policy_dbs = policy_dbs;
+ j_cdbs->update_util.func = dbs_update_util_handler;
+ }
+ return policy_dbs;
}
-static void free_common_dbs_info(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata)
+static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
+ struct dbs_governor *gov)
{
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
int j;
- mutex_destroy(&shared->timer_mutex);
+ mutex_destroy(&policy_dbs->timer_mutex);
- for_each_cpu(j, policy->cpus)
- cdata->get_cpu_cdbs(j)->shared = NULL;
+ for_each_cpu(j, policy_dbs->policy->related_cpus) {
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- kfree(shared);
+ j_cdbs->policy_dbs = NULL;
+ j_cdbs->update_util.func = NULL;
+ }
+ gov->free(policy_dbs);
}
-static int cpufreq_governor_init(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data,
- struct common_dbs_data *cdata)
+static int cpufreq_governor_init(struct cpufreq_policy *policy)
{
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct dbs_data *dbs_data;
+ struct policy_dbs_info *policy_dbs;
unsigned int latency;
- int ret;
+ int ret = 0;
/* State should be equivalent to EXIT */
if (policy->governor_data)
return -EBUSY;
- if (dbs_data) {
- if (WARN_ON(have_governor_per_policy()))
- return -EINVAL;
+ policy_dbs = alloc_policy_dbs_info(policy, gov);
+ if (!policy_dbs)
+ return -ENOMEM;
- ret = alloc_common_dbs_info(policy, cdata);
- if (ret)
- return ret;
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+ dbs_data = gov->gdbs_data;
+ if (dbs_data) {
+ if (WARN_ON(have_governor_per_policy())) {
+ ret = -EINVAL;
+ goto free_policy_dbs_info;
+ }
+ policy_dbs->dbs_data = dbs_data;
+ policy->governor_data = policy_dbs;
+
+ mutex_lock(&dbs_data->mutex);
dbs_data->usage_count++;
- policy->governor_data = dbs_data;
- return 0;
+ list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+ mutex_unlock(&dbs_data->mutex);
+ goto out;
}
dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
- if (!dbs_data)
- return -ENOMEM;
-
- ret = alloc_common_dbs_info(policy, cdata);
- if (ret)
- goto free_dbs_data;
+ if (!dbs_data) {
+ ret = -ENOMEM;
+ goto free_policy_dbs_info;
+ }
- dbs_data->cdata = cdata;
- dbs_data->usage_count = 1;
+ INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
+ mutex_init(&dbs_data->mutex);
- ret = cdata->init(dbs_data, !policy->governor->initialized);
+ ret = gov->init(dbs_data, !policy->governor->initialized);
if (ret)
- goto free_common_dbs_info;
+ goto free_policy_dbs_info;
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
@@ -381,216 +481,156 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
- set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
- latency * LATENCY_MULTIPLIER));
+ dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
+ LATENCY_MULTIPLIER * latency);
if (!have_governor_per_policy())
- cdata->gdbs_data = dbs_data;
+ gov->gdbs_data = dbs_data;
- policy->governor_data = dbs_data;
+ policy->governor_data = policy_dbs;
- ret = sysfs_create_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
- if (ret)
- goto reset_gdbs_data;
+ policy_dbs->dbs_data = dbs_data;
+ dbs_data->usage_count = 1;
+ list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
- return 0;
+ gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
+ ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+ get_governor_parent_kobj(policy),
+ "%s", gov->gov.name);
+ if (!ret)
+ goto out;
+
+ /* Failure, so roll back. */
+ pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
-reset_gdbs_data:
policy->governor_data = NULL;
if (!have_governor_per_policy())
- cdata->gdbs_data = NULL;
- cdata->exit(dbs_data, !policy->governor->initialized);
-free_common_dbs_info:
- free_common_dbs_info(policy, cdata);
-free_dbs_data:
+ gov->gdbs_data = NULL;
+ gov->exit(dbs_data, !policy->governor->initialized);
kfree(dbs_data);
+
+free_policy_dbs_info:
+ free_policy_dbs_info(policy_dbs, gov);
+
+out:
+ mutex_unlock(&gov_dbs_data_mutex);
return ret;
}
-static int cpufreq_governor_exit(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_exit(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ int count;
- /* State should be equivalent to INIT */
- if (!cdbs->shared || cdbs->shared->policy)
- return -EBUSY;
+ /* Protect gov->gdbs_data against concurrent updates. */
+ mutex_lock(&gov_dbs_data_mutex);
+
+ mutex_lock(&dbs_data->mutex);
+ list_del(&policy_dbs->list);
+ count = --dbs_data->usage_count;
+ mutex_unlock(&dbs_data->mutex);
- if (!--dbs_data->usage_count) {
- sysfs_remove_group(get_governor_parent_kobj(policy),
- get_sysfs_attr(dbs_data));
+ if (!count) {
+ kobject_put(&dbs_data->kobj);
policy->governor_data = NULL;
if (!have_governor_per_policy())
- cdata->gdbs_data = NULL;
+ gov->gdbs_data = NULL;
- cdata->exit(dbs_data, policy->governor->initialized == 1);
+ gov->exit(dbs_data, policy->governor->initialized == 1);
+ mutex_destroy(&dbs_data->mutex);
kfree(dbs_data);
} else {
policy->governor_data = NULL;
}
- free_common_dbs_info(policy, cdata);
+ free_policy_dbs_info(policy_dbs, gov);
+
+ mutex_unlock(&gov_dbs_data_mutex);
return 0;
}
-static int cpufreq_governor_start(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_start(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
- int io_busy = 0;
+ struct dbs_governor *gov = dbs_governor_of(policy);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ unsigned int sampling_rate, ignore_nice, j;
+ unsigned int io_busy;
if (!policy->cur)
return -EINVAL;
- /* State should be equivalent to INIT */
- if (!shared || shared->policy)
- return -EBUSY;
+ policy_dbs->is_shared = policy_is_shared(policy);
+ policy_dbs->rate_mult = 1;
- if (cdata->governor == GOV_CONSERVATIVE) {
- struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-
- sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice_load;
- } else {
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-
- sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice_load;
- io_busy = od_tuners->io_is_busy;
- }
-
- shared->policy = policy;
- shared->time_stamp = ktime_get();
+ sampling_rate = dbs_data->sampling_rate;
+ ignore_nice = dbs_data->ignore_nice_load;
+ io_busy = dbs_data->io_is_busy;
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
+ struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
unsigned int prev_load;
- j_cdbs->prev_cpu_idle =
- get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
- prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
- j_cdbs->prev_cpu_idle);
- j_cdbs->prev_load = 100 * prev_load /
- (unsigned int)j_cdbs->prev_cpu_wall;
+ prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
-
- __setup_timer(&j_cdbs->timer, dbs_timer_handler,
- (unsigned long)j_cdbs,
- TIMER_DEFERRABLE | TIMER_IRQSAFE);
}
- if (cdata->governor == GOV_CONSERVATIVE) {
- struct cs_cpu_dbs_info_s *cs_dbs_info =
- cdata->get_cpu_dbs_info_s(cpu);
-
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- struct od_ops *od_ops = cdata->gov_ops;
- struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
-
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- od_ops->powersave_bias_init_cpu(cpu);
- }
+ gov->start(policy);
- gov_add_timers(policy, delay_for_sampling_rate(sampling_rate));
+ gov_set_update_util(policy_dbs, sampling_rate);
return 0;
}
-static int cpufreq_governor_stop(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_stop(struct cpufreq_policy *policy)
{
- struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
- struct cpu_common_dbs_info *shared = cdbs->shared;
-
- /* State should be equivalent to START */
- if (!shared || !shared->policy)
- return -EBUSY;
-
- gov_cancel_work(shared);
- shared->policy = NULL;
-
+ gov_cancel_work(policy);
return 0;
}
-static int cpufreq_governor_limits(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data)
+static int cpufreq_governor_limits(struct cpufreq_policy *policy)
{
- struct common_dbs_data *cdata = dbs_data->cdata;
- unsigned int cpu = policy->cpu;
- struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
- /* State should be equivalent to START */
- if (!cdbs->shared || !cdbs->shared->policy)
- return -EBUSY;
+ mutex_lock(&policy_dbs->timer_mutex);
+
+ if (policy->max < policy->cur)
+ __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > policy->cur)
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+
+ gov_update_sample_delay(policy_dbs, 0);
- mutex_lock(&cdbs->shared->timer_mutex);
- if (policy->max < cdbs->shared->policy->cur)
- __cpufreq_driver_target(cdbs->shared->policy, policy->max,
- CPUFREQ_RELATION_H);
- else if (policy->min > cdbs->shared->policy->cur)
- __cpufreq_driver_target(cdbs->shared->policy, policy->min,
- CPUFREQ_RELATION_L);
- dbs_check_cpu(dbs_data, cpu);
- mutex_unlock(&cdbs->shared->timer_mutex);
+ mutex_unlock(&policy_dbs->timer_mutex);
return 0;
}
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata, unsigned int event)
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
{
- struct dbs_data *dbs_data;
- int ret;
-
- /* Lock governor to block concurrent initialization of governor */
- mutex_lock(&cdata->mutex);
-
- if (have_governor_per_policy())
- dbs_data = policy->governor_data;
- else
- dbs_data = cdata->gdbs_data;
-
- if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
- ret = -EINVAL;
- goto unlock;
- }
-
- switch (event) {
- case CPUFREQ_GOV_POLICY_INIT:
- ret = cpufreq_governor_init(policy, dbs_data, cdata);
- break;
- case CPUFREQ_GOV_POLICY_EXIT:
- ret = cpufreq_governor_exit(policy, dbs_data);
- break;
- case CPUFREQ_GOV_START:
- ret = cpufreq_governor_start(policy, dbs_data);
- break;
- case CPUFREQ_GOV_STOP:
- ret = cpufreq_governor_stop(policy, dbs_data);
- break;
- case CPUFREQ_GOV_LIMITS:
- ret = cpufreq_governor_limits(policy, dbs_data);
- break;
- default:
- ret = -EINVAL;
+ if (event == CPUFREQ_GOV_POLICY_INIT) {
+ return cpufreq_governor_init(policy);
+ } else if (policy->governor_data) {
+ switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ return cpufreq_governor_exit(policy);
+ case CPUFREQ_GOV_START:
+ return cpufreq_governor_start(policy);
+ case CPUFREQ_GOV_STOP:
+ return cpufreq_governor_stop(policy);
+ case CPUFREQ_GOV_LIMITS:
+ return cpufreq_governor_limits(policy);
+ }
}
-
-unlock:
- mutex_unlock(&cdata->mutex);
-
- return ret;
+ return -EINVAL;
}
EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 91e767a058a7..61ff82fe0613 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -18,6 +18,7 @@
#define _CPUFREQ_GOVERNOR_H
#include <linux/atomic.h>
+#include <linux/irq_work.h>
#include <linux/cpufreq.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
@@ -41,96 +42,68 @@
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/*
- * Macro for creating governors sysfs routines
- *
- * - gov_sys: One governor instance per whole system
- * - gov_pol: One governor instance per policy
+ * Abbreviations:
+ * dbs: used as a shortform for demand based switching It helps to keep variable
+ * names smaller, simpler
+ * cdbs: common dbs
+ * od_*: On-demand governor
+ * cs_*: Conservative governor
*/
-/* Create attributes */
-#define gov_sys_attr_ro(_name) \
-static struct global_attr _name##_gov_sys = \
-__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
-
-#define gov_sys_attr_rw(_name) \
-static struct global_attr _name##_gov_sys = \
-__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
-
-#define gov_pol_attr_ro(_name) \
-static struct freq_attr _name##_gov_pol = \
-__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
-
-#define gov_pol_attr_rw(_name) \
-static struct freq_attr _name##_gov_pol = \
-__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+/* Governor demand based switching data (per-policy or global). */
+struct dbs_data {
+ int usage_count;
+ void *tuners;
+ unsigned int min_sampling_rate;
+ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int io_is_busy;
-#define gov_sys_pol_attr_rw(_name) \
- gov_sys_attr_rw(_name); \
- gov_pol_attr_rw(_name)
+ struct kobject kobj;
+ struct list_head policy_dbs_list;
+ /*
+ * Protect concurrent updates to governor tunables from sysfs,
+ * policy_dbs_list and usage_count.
+ */
+ struct mutex mutex;
+};
-#define gov_sys_pol_attr_ro(_name) \
- gov_sys_attr_ro(_name); \
- gov_pol_attr_ro(_name)
+/* Governor's specific attributes */
+struct dbs_data;
+struct governor_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
+ ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
+ size_t count);
+};
-/* Create show/store routines */
-#define show_one(_gov, file_name) \
-static ssize_t show_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
+#define gov_show_one(_gov, file_name) \
+static ssize_t show_##file_name \
+(struct dbs_data *dbs_data, char *buf) \
{ \
- struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
- return sprintf(buf, "%u\n", tuners->file_name); \
-} \
- \
-static ssize_t show_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, char *buf) \
-{ \
- struct dbs_data *dbs_data = policy->governor_data; \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
}
-#define store_one(_gov, file_name) \
-static ssize_t store_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
-{ \
- struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
- return store_##file_name(dbs_data, buf, count); \
-} \
- \
-static ssize_t store_##file_name##_gov_pol \
-(struct cpufreq_policy *policy, const char *buf, size_t count) \
+#define gov_show_one_common(file_name) \
+static ssize_t show_##file_name \
+(struct dbs_data *dbs_data, char *buf) \
{ \
- struct dbs_data *dbs_data = policy->governor_data; \
- return store_##file_name(dbs_data, buf, count); \
+ return sprintf(buf, "%u\n", dbs_data->file_name); \
}
-#define show_store_one(_gov, file_name) \
-show_one(_gov, file_name); \
-store_one(_gov, file_name)
+#define gov_attr_ro(_name) \
+static struct governor_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
-/* create helper routines */
-#define define_get_cpu_dbs_routines(_dbs_info) \
-static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
-{ \
- return &per_cpu(_dbs_info, cpu).cdbs; \
-} \
- \
-static void *get_cpu_dbs_info_s(int cpu) \
-{ \
- return &per_cpu(_dbs_info, cpu); \
-}
-
-/*
- * Abbreviations:
- * dbs: used as a shortform for demand based switching It helps to keep variable
- * names smaller, simpler
- * cdbs: common dbs
- * od_*: On-demand governor
- * cs_*: Conservative governor
- */
+#define gov_attr_rw(_name) \
+static struct governor_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
/* Common to all CPUs of a policy */
-struct cpu_common_dbs_info {
+struct policy_dbs_info {
struct cpufreq_policy *policy;
/*
* Per policy mutex that serializes load evaluation from limit-change
@@ -138,11 +111,27 @@ struct cpu_common_dbs_info {
*/
struct mutex timer_mutex;
- ktime_t time_stamp;
- atomic_t skip_work;
+ u64 last_sample_time;
+ s64 sample_delay_ns;
+ atomic_t work_count;
+ struct irq_work irq_work;
struct work_struct work;
+ /* dbs_data may be shared between multiple policy objects */
+ struct dbs_data *dbs_data;
+ struct list_head list;
+ /* Multiplier for increasing sample delay temporarily. */
+ unsigned int rate_mult;
+ /* Status indicators */
+ bool is_shared; /* This object is used by multiple CPUs */
+ bool work_in_progress; /* Work is being queued up or in progress */
};
+static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
+}
+
/* Per cpu structures */
struct cpu_dbs_info {
u64 prev_cpu_idle;
@@ -155,54 +144,14 @@ struct cpu_dbs_info {
* wake-up from idle.
*/
unsigned int prev_load;
- struct timer_list timer;
- struct cpu_common_dbs_info *shared;
-};
-
-struct od_cpu_dbs_info_s {
- struct cpu_dbs_info cdbs;
- struct cpufreq_frequency_table *freq_table;
- unsigned int freq_lo;
- unsigned int freq_lo_jiffies;
- unsigned int freq_hi_jiffies;
- unsigned int rate_mult;
- unsigned int sample_type:1;
-};
-
-struct cs_cpu_dbs_info_s {
- struct cpu_dbs_info cdbs;
- unsigned int down_skip;
- unsigned int requested_freq;
-};
-
-/* Per policy Governors sysfs tunables */
-struct od_dbs_tuners {
- unsigned int ignore_nice_load;
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int powersave_bias;
- unsigned int io_is_busy;
-};
-
-struct cs_dbs_tuners {
- unsigned int ignore_nice_load;
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int down_threshold;
- unsigned int freq_step;
+ struct update_util_data update_util;
+ struct policy_dbs_info *policy_dbs;
};
/* Common Governor data across policies */
-struct dbs_data;
-struct common_dbs_data {
- /* Common across governors */
- #define GOV_ONDEMAND 0
- #define GOV_CONSERVATIVE 1
- int governor;
- struct attribute_group *attr_group_gov_sys; /* one governor - system */
- struct attribute_group *attr_group_gov_pol; /* one governor - policy */
+struct dbs_governor {
+ struct cpufreq_governor gov;
+ struct kobj_type kobj_type;
/*
* Common data for platforms that don't set
@@ -210,74 +159,32 @@ struct common_dbs_data {
*/
struct dbs_data *gdbs_data;
- struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
- void *(*get_cpu_dbs_info_s)(int cpu);
- unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy,
- bool modify_all);
- void (*gov_check_cpu)(int cpu, unsigned int load);
+ unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
+ struct policy_dbs_info *(*alloc)(void);
+ void (*free)(struct policy_dbs_info *policy_dbs);
int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify);
-
- /* Governor specific ops, see below */
- void *gov_ops;
-
- /*
- * Protects governor's data (struct dbs_data and struct common_dbs_data)
- */
- struct mutex mutex;
+ void (*start)(struct cpufreq_policy *policy);
};
-/* Governor Per policy data */
-struct dbs_data {
- struct common_dbs_data *cdata;
- unsigned int min_sampling_rate;
- int usage_count;
- void *tuners;
-};
+static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
+{
+ return container_of(policy->governor, struct dbs_governor, gov);
+}
-/* Governor specific ops, will be passed to dbs_data->gov_ops */
+/* Governor specific operations */
struct od_ops {
- void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation);
- void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
};
-static inline int delay_for_sampling_rate(unsigned int sampling_rate)
-{
- int delay = usecs_to_jiffies(sampling_rate);
-
- /* We want all CPUs to do sampling nearly on same jiffy */
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
-
- return delay;
-}
-
-#define declare_show_sampling_rate_min(_gov) \
-static ssize_t show_sampling_rate_min_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
-{ \
- struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
- return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
-} \
- \
-static ssize_t show_sampling_rate_min_gov_pol \
-(struct cpufreq_policy *policy, char *buf) \
-{ \
- struct dbs_data *dbs_data = policy->governor_data; \
- return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
-}
-
-extern struct mutex cpufreq_governor_lock;
-
-void gov_add_timers(struct cpufreq_policy *policy, unsigned int delay);
-void gov_cancel_work(struct cpu_common_dbs_info *shared);
-void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
-int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct common_dbs_data *cdata, unsigned int event);
+unsigned int dbs_update(struct cpufreq_policy *policy);
+int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event);
void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
+ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ size_t count);
+void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index eae51070c034..acd80272ded6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -16,7 +16,8 @@
#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/tick.h>
-#include "cpufreq_governor.h"
+
+#include "cpufreq_ondemand.h"
/* On-demand governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
@@ -27,24 +28,10 @@
#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
-
static struct od_ops od_ops;
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static struct cpufreq_governor cpufreq_gov_ondemand;
-#endif
-
static unsigned int default_powersave_bias;
-static void ondemand_powersave_bias_init_cpu(int cpu)
-{
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
- dbs_info->freq_lo = 0;
-}
-
/*
* Not all CPUs want IO time to be accounted as busy; this depends on how
* efficient idling at a higher frequency/voltage is.
@@ -70,8 +57,8 @@ static int should_io_be_busy(void)
/*
* Find right freq to be set now with powersave_bias on.
- * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
- * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
+ * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
+ * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
*/
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation)
@@ -79,15 +66,15 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index = 0;
- unsigned int jiffies_total, jiffies_hi, jiffies_lo;
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- policy->cpu);
- struct dbs_data *dbs_data = policy->governor_data;
+ unsigned int delay_hi_us;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0;
- dbs_info->freq_lo_jiffies = 0;
+ dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
@@ -110,31 +97,30 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
dbs_info->freq_lo = 0;
- dbs_info->freq_lo_jiffies = 0;
+ dbs_info->freq_lo_delay_us = 0;
return freq_lo;
}
- jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
- jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
- jiffies_hi += ((freq_hi - freq_lo) / 2);
- jiffies_hi /= (freq_hi - freq_lo);
- jiffies_lo = jiffies_total - jiffies_hi;
+ delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
+ delay_hi_us += (freq_hi - freq_lo) / 2;
+ delay_hi_us /= freq_hi - freq_lo;
+ dbs_info->freq_hi_delay_us = delay_hi_us;
dbs_info->freq_lo = freq_lo;
- dbs_info->freq_lo_jiffies = jiffies_lo;
- dbs_info->freq_hi_jiffies = jiffies_hi;
+ dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
return freq_hi;
}
-static void ondemand_powersave_bias_init(void)
+static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
{
- int i;
- for_each_online_cpu(i) {
- ondemand_powersave_bias_init_cpu(i);
- }
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->freq_table = cpufreq_frequency_get_table(policy->cpu);
+ dbs_info->freq_lo = 0;
}
static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias)
@@ -152,21 +138,21 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
* (default), then we try to increase frequency. Else, we adjust the frequency
* proportional to load.
*/
-static void od_check_cpu(int cpu, unsigned int load)
+static void od_update(struct cpufreq_policy *policy)
{
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
- struct dbs_data *dbs_data = policy->governor_data;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
dbs_info->freq_lo = 0;
/* Check for frequency increase */
- if (load > od_tuners->up_threshold) {
+ if (load > dbs_data->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
- dbs_info->rate_mult =
- od_tuners->sampling_down_factor;
+ policy_dbs->rate_mult = dbs_data->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
@@ -177,177 +163,70 @@ static void od_check_cpu(int cpu, unsigned int load)
freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
- dbs_info->rate_mult = 1;
+ policy_dbs->rate_mult = 1;
- if (!od_tuners->powersave_bias) {
- __cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_C);
- return;
- }
+ if (od_tuners->powersave_bias)
+ freq_next = od_ops.powersave_bias_target(policy,
+ freq_next,
+ CPUFREQ_RELATION_L);
- freq_next = od_ops.powersave_bias_target(policy, freq_next,
- CPUFREQ_RELATION_L);
__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
}
}
-static unsigned int od_dbs_timer(struct cpufreq_policy *policy, bool modify_all)
+static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
{
- struct dbs_data *dbs_data = policy->governor_data;
- unsigned int cpu = policy->cpu;
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- cpu);
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- int delay = 0, sample_type = dbs_info->sample_type;
-
- if (!modify_all)
- goto max_delay;
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ int sample_type = dbs_info->sample_type;
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
- if (sample_type == OD_SUB_SAMPLE) {
- delay = dbs_info->freq_lo_jiffies;
+ /*
+ * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
+ * it then.
+ */
+ if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
__cpufreq_driver_target(policy, dbs_info->freq_lo,
CPUFREQ_RELATION_H);
- } else {
- dbs_check_cpu(dbs_data, cpu);
- if (dbs_info->freq_lo) {
- /* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
- }
+ return dbs_info->freq_lo_delay_us;
}
-max_delay:
- if (!delay)
- delay = delay_for_sampling_rate(od_tuners->sampling_rate
- * dbs_info->rate_mult);
-
- return delay;
-}
-
-/************************** sysfs interface ************************/
-static struct common_dbs_data od_dbs_cdata;
-
-/**
- * update_sampling_rate - update sampling rate effective immediately if needed.
- * @new_rate: new sampling rate
- *
- * If new rate is smaller than the old, simply updating
- * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
- * original sampling_rate was 1 second and the requested new sampling rate is 10
- * ms because the user needs immediate reaction from ondemand governor, but not
- * sure if higher frequency will be required or not, then, the governor may
- * change the sampling rate too late; up to 1 second later. Thus, if we are
- * reducing the sampling rate, we need to make the new value effective
- * immediately.
- */
-static void update_sampling_rate(struct dbs_data *dbs_data,
- unsigned int new_rate)
-{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- struct cpumask cpumask;
- int cpu;
-
- od_tuners->sampling_rate = new_rate = max(new_rate,
- dbs_data->min_sampling_rate);
-
- /*
- * Lock governor so that governor start/stop can't execute in parallel.
- */
- mutex_lock(&od_dbs_cdata.mutex);
-
- cpumask_copy(&cpumask, cpu_online_mask);
-
- for_each_cpu(cpu, &cpumask) {
- struct cpufreq_policy *policy;
- struct od_cpu_dbs_info_s *dbs_info;
- struct cpu_dbs_info *cdbs;
- struct cpu_common_dbs_info *shared;
- unsigned long next_sampling, appointed_at;
-
- dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
- cdbs = &dbs_info->cdbs;
- shared = cdbs->shared;
-
- /*
- * A valid shared and shared->policy means governor hasn't
- * stopped or exited yet.
- */
- if (!shared || !shared->policy)
- continue;
-
- policy = shared->policy;
-
- /* clear all CPUs of this policy */
- cpumask_andnot(&cpumask, &cpumask, policy->cpus);
+ od_update(policy);
- /*
- * Update sampling rate for CPUs whose policy is governed by
- * dbs_data. In case of governor_per_policy, only a single
- * policy will be governed by dbs_data, otherwise there can be
- * multiple policies that are governed by the same dbs_data.
- */
- if (dbs_data != policy->governor_data)
- continue;
-
- /*
- * Checking this for any CPU should be fine, timers for all of
- * them are scheduled together.
- */
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->cdbs.timer.expires;
-
- if (time_before(next_sampling, appointed_at)) {
- gov_cancel_work(shared);
- gov_add_timers(policy, usecs_to_jiffies(new_rate));
-
- }
+ if (dbs_info->freq_lo) {
+ /* Setup timer for SUB_SAMPLE */
+ dbs_info->sample_type = OD_SUB_SAMPLE;
+ return dbs_info->freq_hi_delay_us;
}
- mutex_unlock(&od_dbs_cdata.mutex);
+ return dbs_data->sampling_rate * policy_dbs->rate_mult;
}
-static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
- size_t count)
-{
- unsigned int input;
- int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
-
- update_sampling_rate(dbs_data, input);
- return count;
-}
+/************************** sysfs interface ************************/
+static struct dbs_governor od_dbs_gov;
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- unsigned int j;
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- od_tuners->io_is_busy = !!input;
+ dbs_data->io_is_busy = !!input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- }
+ gov_update_cpu_data(dbs_data);
+
return count;
}
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -357,40 +236,43 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return -EINVAL;
}
- od_tuners->up_threshold = input;
+ dbs_data->up_threshold = input;
return count;
}
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
- unsigned int input, j;
+ struct policy_dbs_info *policy_dbs;
+ unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- od_tuners->sampling_down_factor = input;
+
+ dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
- j);
- dbs_info->rate_mult = 1;
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ /*
+ * Doing this without locking might lead to using different
+ * rate_mult values in od_update() and od_dbs_timer().
+ */
+ mutex_lock(&policy_dbs->timer_mutex);
+ policy_dbs->rate_mult = 1;
+ mutex_unlock(&policy_dbs->timer_mutex);
}
+
return count;
}
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
{
- struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- unsigned int j;
-
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -398,22 +280,14 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice_load) { /* nothing to do */
+ if (input == dbs_data->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice_load = input;
+ dbs_data->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
- for_each_online_cpu(j) {
- struct od_cpu_dbs_info_s *dbs_info;
- dbs_info = &per_cpu(od_cpu_dbs_info, j);
- dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
- &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice_load)
- dbs_info->cdbs.prev_cpu_nice =
- kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ gov_update_cpu_data(dbs_data);
- }
return count;
}
@@ -421,6 +295,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -432,59 +307,54 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
input = 1000;
od_tuners->powersave_bias = input;
- ondemand_powersave_bias_init();
+
+ list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+ ondemand_powersave_bias_init(policy_dbs->policy);
+
return count;
}
-show_store_one(od, sampling_rate);
-show_store_one(od, io_is_busy);
-show_store_one(od, up_threshold);
-show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice_load);
-show_store_one(od, powersave_bias);
-declare_show_sampling_rate_min(od);
-
-gov_sys_pol_attr_rw(sampling_rate);
-gov_sys_pol_attr_rw(io_is_busy);
-gov_sys_pol_attr_rw(up_threshold);
-gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice_load);
-gov_sys_pol_attr_rw(powersave_bias);
-gov_sys_pol_attr_ro(sampling_rate_min);
-
-static struct attribute *dbs_attributes_gov_sys[] = {
- &sampling_rate_min_gov_sys.attr,
- &sampling_rate_gov_sys.attr,
- &up_threshold_gov_sys.attr,
- &sampling_down_factor_gov_sys.attr,
- &ignore_nice_load_gov_sys.attr,
- &powersave_bias_gov_sys.attr,
- &io_is_busy_gov_sys.attr,
+gov_show_one_common(sampling_rate);
+gov_show_one_common(up_threshold);
+gov_show_one_common(sampling_down_factor);
+gov_show_one_common(ignore_nice_load);
+gov_show_one_common(min_sampling_rate);
+gov_show_one_common(io_is_busy);
+gov_show_one(od, powersave_bias);
+
+gov_attr_rw(sampling_rate);
+gov_attr_rw(io_is_busy);
+gov_attr_rw(up_threshold);
+gov_attr_rw(sampling_down_factor);
+gov_attr_rw(ignore_nice_load);
+gov_attr_rw(powersave_bias);
+gov_attr_ro(min_sampling_rate);
+
+static struct attribute *od_attributes[] = {
+ &min_sampling_rate.attr,
+ &sampling_rate.attr,
+ &up_threshold.attr,
+ &sampling_down_factor.attr,
+ &ignore_nice_load.attr,
+ &powersave_bias.attr,
+ &io_is_busy.attr,
NULL
};
-static struct attribute_group od_attr_group_gov_sys = {
- .attrs = dbs_attributes_gov_sys,
- .name = "ondemand",
-};
+/************************** sysfs end ************************/
-static struct attribute *dbs_attributes_gov_pol[] = {
- &sampling_rate_min_gov_pol.attr,
- &sampling_rate_gov_pol.attr,
- &up_threshold_gov_pol.attr,
- &sampling_down_factor_gov_pol.attr,
- &ignore_nice_load_gov_pol.attr,
- &powersave_bias_gov_pol.attr,
- &io_is_busy_gov_pol.attr,
- NULL
-};
+static struct policy_dbs_info *od_alloc(void)
+{
+ struct od_policy_dbs_info *dbs_info;
-static struct attribute_group od_attr_group_gov_pol = {
- .attrs = dbs_attributes_gov_pol,
- .name = "ondemand",
-};
+ dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
+ return dbs_info ? &dbs_info->policy_dbs : NULL;
+}
-/************************** sysfs end ************************/
+static void od_free(struct policy_dbs_info *policy_dbs)
+{
+ kfree(to_dbs_info(policy_dbs));
+}
static int od_init(struct dbs_data *dbs_data, bool notify)
{
@@ -503,7 +373,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
- tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+ dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
@@ -511,17 +381,17 @@ static int od_init(struct dbs_data *dbs_data, bool notify)
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
- tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
}
- tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice_load = 0;
+ dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+ dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- tuners->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
@@ -532,33 +402,38 @@ static void od_exit(struct dbs_data *dbs_data, bool notify)
kfree(dbs_data->tuners);
}
-define_get_cpu_dbs_routines(od_cpu_dbs_info);
+static void od_start(struct cpufreq_policy *policy)
+{
+ struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ ondemand_powersave_bias_init(policy);
+}
static struct od_ops od_ops = {
- .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
.powersave_bias_target = generic_powersave_bias_target,
- .freq_increase = dbs_freq_increase,
};
-static struct common_dbs_data od_dbs_cdata = {
- .governor = GOV_ONDEMAND,
- .attr_group_gov_sys = &od_attr_group_gov_sys,
- .attr_group_gov_pol = &od_attr_group_gov_pol,
- .get_cpu_cdbs = get_cpu_cdbs,
- .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
+static struct dbs_governor od_dbs_gov = {
+ .gov = {
+ .name = "ondemand",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+ },
+ .kobj_type = { .default_attrs = od_attributes },
.gov_dbs_timer = od_dbs_timer,
- .gov_check_cpu = od_check_cpu,
- .gov_ops = &od_ops,
+ .alloc = od_alloc,
+ .free = od_free,
.init = od_init,
.exit = od_exit,
- .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
+ .start = od_start,
};
+#define CPU_FREQ_GOV_ONDEMAND (&od_dbs_gov.gov)
+
static void od_set_powersave_bias(unsigned int powersave_bias)
{
- struct cpufreq_policy *policy;
- struct dbs_data *dbs_data;
- struct od_dbs_tuners *od_tuners;
unsigned int cpu;
cpumask_t done;
@@ -567,22 +442,25 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
get_online_cpus();
for_each_online_cpu(cpu) {
- struct cpu_common_dbs_info *shared;
+ struct cpufreq_policy *policy;
+ struct policy_dbs_info *policy_dbs;
+ struct dbs_data *dbs_data;
+ struct od_dbs_tuners *od_tuners;
if (cpumask_test_cpu(cpu, &done))
continue;
- shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
- if (!shared)
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND)
continue;
- policy = shared->policy;
- cpumask_or(&done, &done, policy->cpus);
-
- if (policy->governor != &cpufreq_gov_ondemand)
+ policy_dbs = policy->governor_data;
+ if (!policy_dbs)
continue;
- dbs_data = policy->governor_data;
+ cpumask_or(&done, &done, policy->cpus);
+
+ dbs_data = policy_dbs->dbs_data;
od_tuners = dbs_data->tuners;
od_tuners->powersave_bias = default_powersave_bias;
}
@@ -605,30 +483,14 @@ void od_unregister_powersave_bias_handler(void)
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
-static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
- unsigned int event)
-{
- return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
-}
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
-#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
- .name = "ondemand",
- .governor = od_cpufreq_governor_dbs,
- .max_transition_latency = TRANSITION_LATENCY_LIMIT,
- .owner = THIS_MODULE,
-};
-
static int __init cpufreq_gov_dbs_init(void)
{
- return cpufreq_register_governor(&cpufreq_gov_ondemand);
+ return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
- cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+ cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
}
MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
@@ -638,6 +500,11 @@ MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return CPU_FREQ_GOV_ONDEMAND;
+}
+
fs_initcall(cpufreq_gov_dbs_init);
#else
module_init(cpufreq_gov_dbs_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
new file mode 100644
index 000000000000..f0121db3cd9e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for CPUFreq ondemand governor and related code.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+struct od_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_lo;
+ unsigned int freq_lo_delay_us;
+ unsigned int freq_hi_delay_us;
+ unsigned int sample_type:1;
+};
+
+static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+{
+ return container_of(policy_dbs, struct od_policy_dbs_info, policy_dbs);
+}
+
+struct od_dbs_tuners {
+ unsigned int powersave_bias;
+};
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index cf117deb39b1..af9f4b96f5a8 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -33,10 +33,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
return 0;
}
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_performance = {
+static struct cpufreq_governor cpufreq_gov_performance = {
.name = "performance",
.governor = cpufreq_governor_performance,
.owner = THIS_MODULE,
@@ -52,6 +49,19 @@ static void __exit cpufreq_gov_performance_exit(void)
cpufreq_unregister_governor(&cpufreq_gov_performance);
}
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+#ifndef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+ return &cpufreq_gov_performance;
+}
+#endif
+
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index e3b874c235ea..b8b400232a74 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -33,10 +33,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
return 0;
}
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_powersave = {
+static struct cpufreq_governor cpufreq_gov_powersave = {
.name = "powersave",
.governor = cpufreq_governor_powersave,
.owner = THIS_MODULE,
@@ -57,6 +54,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_powersave;
+}
+
fs_initcall(cpufreq_gov_powersave_init);
#else
module_init(cpufreq_gov_powersave_init);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4dbf1db16aca..4d16f45ee1da 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -89,10 +89,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
return rc;
}
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_userspace = {
+static struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
.store_setspeed = cpufreq_set,
@@ -116,6 +113,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
MODULE_LICENSE("GPL");
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+ return &cpufreq_gov_userspace;
+}
+
fs_initcall(cpufreq_gov_userspace_init);
#else
module_init(cpufreq_gov_userspace_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cd83d477e32d..30fe323c4551 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+/**
+ * struct sample - Store performance sample
+ * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
+ * performance during last sample period
+ * @busy_scaled: Scaled busy value which is used to calculate next
+ * P state. This can be different than core_pct_busy
+ * to account for cpu idle period
+ * @aperf: Difference of actual performance frequency clock count
+ * read from APERF MSR between last and current sample
+ * @mperf: Difference of maximum performance frequency clock count
+ * read from MPERF MSR between last and current sample
+ * @tsc: Difference of time stamp counter between last and
+ * current sample
+ * @freq: Effective frequency calculated from APERF/MPERF
+ * @time: Current time from scheduler
+ *
+ * This structure is used in the cpudata structure to store performance sample
+ * data for choosing next P State.
+ */
struct sample {
int32_t core_pct_busy;
int32_t busy_scaled;
@@ -71,9 +90,23 @@ struct sample {
u64 mperf;
u64 tsc;
int freq;
- ktime_t time;
+ u64 time;
};
+/**
+ * struct pstate_data - Store P state data
+ * @current_pstate: Current requested P state
+ * @min_pstate: Min P state possible for this platform
+ * @max_pstate: Max P state possible for this platform
+ * @max_pstate_physical:This is physical Max P state for a processor
+ * This can be higher than the max_pstate which can
+ * be limited by platform thermal design power limits
+ * @scaling: Scaling factor to convert frequency to cpufreq
+ * frequency units
+ * @turbo_pstate: Max Turbo P state possible for this platform
+ *
+ * Stores the per cpu model P state limits and current P state.
+ */
struct pstate_data {
int current_pstate;
int min_pstate;
@@ -83,6 +116,19 @@ struct pstate_data {
int turbo_pstate;
};
+/**
+ * struct vid_data - Stores voltage information data
+ * @min: VID data for this platform corresponding to
+ * the lowest P state
+ * @max: VID data corresponding to the highest P State.
+ * @turbo: VID data for turbo P state
+ * @ratio: Ratio of (vid max - vid min) /
+ * (max P state - Min P State)
+ *
+ * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
+ * This data is used in Atom platforms, where in addition to target P state,
+ * the voltage data needs to be specified to select next P State.
+ */
struct vid_data {
int min;
int max;
@@ -90,6 +136,18 @@ struct vid_data {
int32_t ratio;
};
+/**
+ * struct _pid - Stores PID data
+ * @setpoint: Target set point for busyness or performance
+ * @integral: Storage for accumulated error values
+ * @p_gain: PID proportional gain
+ * @i_gain: PID integral gain
+ * @d_gain: PID derivative gain
+ * @deadband: PID deadband
+ * @last_err: Last error storage for integral part of PID calculation
+ *
+ * Stores PID coefficients and last error for PID controller.
+ */
struct _pid {
int setpoint;
int32_t integral;
@@ -100,16 +158,33 @@ struct _pid {
int32_t last_err;
};
+/**
+ * struct cpudata - Per CPU instance data storage
+ * @cpu: CPU number for this instance data
+ * @update_util: CPUFreq utility callback information
+ * @pstate: Stores P state limits for this CPU
+ * @vid: Stores VID limits for this CPU
+ * @pid: Stores PID parameters for this CPU
+ * @last_sample_time: Last Sample time
+ * @prev_aperf: Last APERF value read from APERF MSR
+ * @prev_mperf: Last MPERF value read from MPERF MSR
+ * @prev_tsc: Last timestamp counter (TSC) value
+ * @prev_cummulative_iowait: IO Wait time difference from last and
+ * current sample
+ * @sample: Storage for storing last Sample data
+ *
+ * This structure stores per CPU instance data for all CPUs.
+ */
struct cpudata {
int cpu;
- struct timer_list timer;
+ struct update_util_data update_util;
struct pstate_data pstate;
struct vid_data vid;
struct _pid pid;
- ktime_t last_sample_time;
+ u64 last_sample_time;
u64 prev_aperf;
u64 prev_mperf;
u64 prev_tsc;
@@ -118,8 +193,22 @@ struct cpudata {
};
static struct cpudata **all_cpu_data;
+
+/**
+ * struct pid_adjust_policy - Stores static PID configuration data
+ * @sample_rate_ms: PID calculation sample rate in ms
+ * @sample_rate_ns: Sample rate calculation in ns
+ * @deadband: PID deadband
+ * @setpoint: PID Setpoint
+ * @p_gain_pct: PID proportional gain
+ * @i_gain_pct: PID integral gain
+ * @d_gain_pct: PID derivative gain
+ *
+ * Stores per CPU model static PID configuration data.
+ */
struct pstate_adjust_policy {
int sample_rate_ms;
+ s64 sample_rate_ns;
int deadband;
int setpoint;
int p_gain_pct;
@@ -127,17 +216,36 @@ struct pstate_adjust_policy {
int i_gain_pct;
};
+/**
+ * struct pstate_funcs - Per CPU model specific callbacks
+ * @get_max: Callback to get maximum non turbo effective P state
+ * @get_max_physical: Callback to get maximum non turbo physical P state
+ * @get_min: Callback to get minimum P state
+ * @get_turbo: Callback to get turbo P state
+ * @get_scaling: Callback to get frequency scaling factor
+ * @get_val: Callback to convert P state to actual MSR write value
+ * @get_vid: Callback to get VID data for Atom platforms
+ * @get_target_pstate: Callback to a function to calculate next P state to use
+ *
+ * Core and Atom CPU models have different way to get P State limits. This
+ * structure is used to store those callbacks.
+ */
struct pstate_funcs {
int (*get_max)(void);
int (*get_max_physical)(void);
int (*get_min)(void);
int (*get_turbo)(void);
int (*get_scaling)(void);
- void (*set)(struct cpudata*, int pstate);
+ u64 (*get_val)(struct cpudata*, int pstate);
void (*get_vid)(struct cpudata *);
int32_t (*get_target_pstate)(struct cpudata *);
};
+/**
+ * struct cpu_defaults- Per CPU model default config data
+ * @pid_policy: PID config data
+ * @funcs: Callback function data
+ */
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -150,6 +258,34 @@ static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
static int hwp_active;
+
+/**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo: User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled: Platform turbo status either from msr
+ * MSR_IA32_MISC_ENABLE or when maximum available pstate
+ * matches the maximum turbo pstate
+ * @max_perf_pct: Effective maximum performance limit in percentage, this
+ * is minimum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct: Effective minimum performance limit in percentage, this
+ * is maximum of either limits enforced by cpufreq policy
+ * or limits from user set limits via intel_pstate sysfs
+ * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct
+ * This value is used to limit max pstate
+ * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct
+ * This value is used to limit min pstate
+ * @max_policy_pct: The maximum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @max_sysfs_pct: The maximum performance in percentage enforced by
+ * intel pstate sysfs interface
+ * @min_policy_pct: The minimum performance in percentage enforced by
+ * cpufreq setpolicy interface
+ * @min_sysfs_pct: The minimum performance in percentage enforced by
+ * intel pstate sysfs interface
+ *
+ * Storage for user and policy defined limits.
+ */
struct perf_limits {
int no_turbo;
int turbo_disabled;
@@ -197,8 +333,8 @@ static struct perf_limits *limits = &powersave_limits;
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
int deadband, int integral) {
- pid->setpoint = setpoint;
- pid->deadband = deadband;
+ pid->setpoint = int_tofp(setpoint);
+ pid->deadband = int_tofp(deadband);
pid->integral = int_tofp(integral);
pid->last_err = int_tofp(setpoint) - int_tofp(busy);
}
@@ -224,9 +360,9 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
int32_t pterm, dterm, fp_error;
int32_t integral_limit;
- fp_error = int_tofp(pid->setpoint) - busy;
+ fp_error = pid->setpoint - busy;
- if (abs(fp_error) <= int_tofp(pid->deadband))
+ if (abs(fp_error) <= pid->deadband)
return 0;
pterm = mul_fp(pid->p_gain, fp_error);
@@ -286,7 +422,7 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
-static void intel_pstate_hwp_set(void)
+static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
@@ -296,9 +432,7 @@ static void intel_pstate_hwp_set(void)
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
- get_online_cpus();
-
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, cpumask) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -317,7 +451,12 @@ static void intel_pstate_hwp_set(void)
value |= HWP_MAX_PERF(max);
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
+}
+static void intel_pstate_hwp_set_online_cpus(void)
+{
+ get_online_cpus();
+ intel_pstate_hwp_set(cpu_online_mask);
put_online_cpus();
}
@@ -439,7 +578,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -465,7 +604,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
int_tofp(100));
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -490,7 +629,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
int_tofp(100));
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set_online_cpus();
return count;
}
@@ -531,6 +670,9 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
+ /* First disable HWP notification interrupt as we don't process them */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
@@ -558,7 +700,7 @@ static int atom_get_turbo_pstate(void)
return value & 0x7F;
}
-static void atom_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
int32_t vid_fp;
@@ -578,9 +720,7 @@ static void atom_set_pstate(struct cpudata *cpudata, int pstate)
if (pstate > cpudata->pstate.max_pstate)
vid = cpudata->vid.turbo;
- val |= vid;
-
- wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ return val | vid;
}
static int silvermont_get_scaling(void)
@@ -704,7 +844,7 @@ static inline int core_get_scaling(void)
return 100000;
}
-static void core_set_pstate(struct cpudata *cpudata, int pstate)
+static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -712,7 +852,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
if (limits->no_turbo && !limits->turbo_disabled)
val |= (u64)1 << 32;
- wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ return val;
}
static int knl_get_turbo_pstate(void)
@@ -743,7 +883,7 @@ static struct cpu_defaults core_params = {
.get_min = core_get_min_pstate,
.get_turbo = core_get_turbo_pstate,
.get_scaling = core_get_scaling,
- .set = core_set_pstate,
+ .get_val = core_get_val,
.get_target_pstate = get_target_pstate_use_performance,
},
};
@@ -762,7 +902,7 @@ static struct cpu_defaults silvermont_params = {
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
- .set = atom_set_pstate,
+ .get_val = atom_get_val,
.get_scaling = silvermont_get_scaling,
.get_vid = atom_get_vid,
.get_target_pstate = get_target_pstate_use_cpu_load,
@@ -783,7 +923,7 @@ static struct cpu_defaults airmont_params = {
.get_max_physical = atom_get_max_pstate,
.get_min = atom_get_min_pstate,
.get_turbo = atom_get_turbo_pstate,
- .set = atom_set_pstate,
+ .get_val = atom_get_val,
.get_scaling = airmont_get_scaling,
.get_vid = atom_get_vid,
.get_target_pstate = get_target_pstate_use_cpu_load,
@@ -805,7 +945,7 @@ static struct cpu_defaults knl_params = {
.get_min = core_get_min_pstate,
.get_turbo = knl_get_turbo_pstate,
.get_scaling = core_get_scaling,
- .set = core_set_pstate,
+ .get_val = core_get_val,
.get_target_pstate = get_target_pstate_use_performance,
},
};
@@ -824,33 +964,32 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
* policy, or by cpu specific default values determined through
* experimentation.
*/
- max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
+ max_perf_adj = fp_toint(max_perf * limits->max_perf);
*max = clamp_t(int, max_perf_adj,
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
- min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
+ min_perf = fp_toint(max_perf * limits->min_perf);
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
+static inline void intel_pstate_record_pstate(struct cpudata *cpu, int pstate)
{
- int max_perf, min_perf;
-
- if (force) {
- update_turbo_state();
-
- intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
-
- pstate = clamp_t(int, pstate, min_perf, max_perf);
-
- if (pstate == cpu->pstate.current_pstate)
- return;
- }
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
-
cpu->pstate.current_pstate = pstate;
+}
+
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ int pstate = cpu->pstate.min_pstate;
- pstate_funcs.set(cpu, pstate);
+ intel_pstate_record_pstate(cpu, pstate);
+ /*
+ * Generally, there is no guarantee that this code will always run on
+ * the CPU being updated, so force the register update to run on the
+ * right CPU.
+ */
+ wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
+ pstate_funcs.get_val(cpu, pstate));
}
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
@@ -863,7 +1002,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+
+ intel_pstate_set_min_pstate(cpu);
}
static inline void intel_pstate_calc_busy(struct cpudata *cpu)
@@ -874,16 +1014,10 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
core_pct = int_tofp(sample->aperf) * int_tofp(100);
core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
- sample->freq = fp_toint(
- mul_fp(int_tofp(
- cpu->pstate.max_pstate_physical *
- cpu->pstate.scaling / 100),
- core_pct));
-
sample->core_pct_busy = (int32_t)core_pct;
}
-static inline void intel_pstate_sample(struct cpudata *cpu)
+static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
{
u64 aperf, mperf;
unsigned long flags;
@@ -893,14 +1027,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
tsc = rdtsc();
- if ((cpu->prev_mperf == mperf) || (cpu->prev_tsc == tsc)) {
+ if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
local_irq_restore(flags);
- return;
+ return false;
}
local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
- cpu->sample.time = ktime_get();
+ cpu->sample.time = time;
cpu->sample.aperf = aperf;
cpu->sample.mperf = mperf;
cpu->sample.tsc = tsc;
@@ -908,27 +1042,23 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
cpu->sample.mperf -= cpu->prev_mperf;
cpu->sample.tsc -= cpu->prev_tsc;
- intel_pstate_calc_busy(cpu);
-
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
+ /*
+ * First time this function is invoked in a given cycle, all of the
+ * previous sample data fields are equal to zero or stale and they must
+ * be populated with meaningful numbers for things to work, so assume
+ * that sample.time will always be reset before setting the utilization
+ * update hook and make the caller skip the sample then.
+ */
+ return !!cpu->last_sample_time;
}
-static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
-{
- int delay;
-
- delay = msecs_to_jiffies(50);
- mod_timer_pinned(&cpu->timer, jiffies + delay);
-}
-
-static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- int delay;
-
- delay = msecs_to_jiffies(pid_params.sample_rate_ms);
- mod_timer_pinned(&cpu->timer, jiffies + delay);
+ return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
+ cpu->pstate.scaling, cpu->sample.mperf);
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -954,7 +1084,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
mperf = cpu->sample.mperf + delta_iowait_mperf;
cpu->prev_cummulative_iowait = cummulative_iowait;
-
/*
* The load can be estimated as the ratio of the mperf counter
* running at a constant frequency during active periods
@@ -970,8 +1099,9 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
{
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
- s64 duration_us;
- u32 sample_time;
+ u64 duration_ns;
+
+ intel_pstate_calc_busy(cpu);
/*
* core_busy is the ratio of actual performance to max
@@ -990,25 +1120,41 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
/*
- * Since we have a deferred timer, it will not fire unless
- * we are in C0. So, determine if the actual elapsed time
- * is significantly greater (3x) than our sample interval. If it
- * is, then we were idle for a long enough period of time
- * to adjust our busyness.
+ * Since our utilization update callback will not run unless we are
+ * in C0, check if the actual elapsed time is significantly greater (3x)
+ * than our sample interval. If it is, then we were idle for a long
+ * enough period of time to adjust our busyness.
*/
- sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
- duration_us = ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
- if (duration_us > sample_time * 3) {
- sample_ratio = div_fp(int_tofp(sample_time),
- int_tofp(duration_us));
+ duration_ns = cpu->sample.time - cpu->last_sample_time;
+ if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
+ sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
+ int_tofp(duration_ns));
core_busy = mul_fp(core_busy, sample_ratio);
+ } else {
+ sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+ if (sample_ratio < int_tofp(1))
+ core_busy = 0;
}
cpu->sample.busy_scaled = core_busy;
return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
}
+static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ update_turbo_state();
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+ intel_pstate_record_pstate(cpu, pstate);
+ wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
+}
+
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
int from, target_pstate;
@@ -1018,7 +1164,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
target_pstate = pstate_funcs.get_target_pstate(cpu);
- intel_pstate_set_pstate(cpu, target_pstate, true);
+ intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
trace_pstate_sample(fp_toint(sample->core_pct_busy),
@@ -1028,26 +1174,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
sample->mperf,
sample->aperf,
sample->tsc,
- sample->freq);
+ get_avg_frequency(cpu));
}
-static void intel_hwp_timer_func(unsigned long __data)
+static void intel_pstate_update_util(struct update_util_data *data, u64 time,
+ unsigned long util, unsigned long max)
{
- struct cpudata *cpu = (struct cpudata *) __data;
-
- intel_pstate_sample(cpu);
- intel_hwp_set_sample_time(cpu);
-}
+ struct cpudata *cpu = container_of(data, struct cpudata, update_util);
+ u64 delta_ns = time - cpu->sample.time;
-static void intel_pstate_timer_func(unsigned long __data)
-{
- struct cpudata *cpu = (struct cpudata *) __data;
+ if ((s64)delta_ns >= pid_params.sample_rate_ns) {
+ bool sample_taken = intel_pstate_sample(cpu, time);
- intel_pstate_sample(cpu);
-
- intel_pstate_adjust_busy_pstate(cpu);
-
- intel_pstate_set_sample_time(cpu);
+ if (sample_taken && !hwp_active)
+ intel_pstate_adjust_busy_pstate(cpu);
+ }
}
#define ICPU(model, policy) \
@@ -1095,24 +1236,17 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->cpu = cpunum;
- if (hwp_active)
+ if (hwp_active) {
intel_pstate_hwp_enable(cpu);
+ pid_params.sample_rate_ms = 50;
+ pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
+ }
intel_pstate_get_cpu_pstates(cpu);
- init_timer_deferrable(&cpu->timer);
- cpu->timer.data = (unsigned long)cpu;
- cpu->timer.expires = jiffies + HZ/100;
-
- if (!hwp_active)
- cpu->timer.function = intel_pstate_timer_func;
- else
- cpu->timer.function = intel_hwp_timer_func;
-
intel_pstate_busy_pid_reset(cpu);
- intel_pstate_sample(cpu);
- add_timer_on(&cpu->timer, cpunum);
+ cpu->update_util.func = intel_pstate_update_util;
pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
@@ -1128,7 +1262,36 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
if (!cpu)
return 0;
sample = &cpu->sample;
- return sample->freq;
+ return get_avg_frequency(cpu);
+}
+
+static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
+{
+ struct cpudata *cpu = all_cpu_data[cpu_num];
+
+ /* Prevent intel_pstate_update_util() from using stale data. */
+ cpu->sample.time = 0;
+ cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+}
+
+static void intel_pstate_clear_update_util_hook(unsigned int cpu)
+{
+ cpufreq_set_update_util_data(cpu, NULL);
+ synchronize_sched();
+}
+
+static void intel_pstate_set_performance_limits(struct perf_limits *limits)
+{
+ limits->no_turbo = 0;
+ limits->turbo_disabled = 0;
+ limits->max_perf_pct = 100;
+ limits->max_perf = int_tofp(1);
+ limits->min_perf_pct = 100;
+ limits->min_perf = int_tofp(1);
+ limits->max_policy_pct = 100;
+ limits->max_sysfs_pct = 100;
+ limits->min_policy_pct = 0;
+ limits->min_sysfs_pct = 0;
}
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
@@ -1136,17 +1299,20 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
- policy->max >= policy->cpuinfo.max_freq) {
- pr_debug("intel_pstate: set performance\n");
+ intel_pstate_clear_update_util_hook(policy->cpu);
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
- if (hwp_active)
- intel_pstate_hwp_set();
- return 0;
+ if (policy->max >= policy->cpuinfo.max_freq) {
+ pr_debug("intel_pstate: set performance\n");
+ intel_pstate_set_performance_limits(limits);
+ goto out;
+ }
+ } else {
+ pr_debug("intel_pstate: set powersave\n");
+ limits = &powersave_limits;
}
- pr_debug("intel_pstate: set powersave\n");
- limits = &powersave_limits;
limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
@@ -1172,8 +1338,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
int_tofp(100));
+ out:
+ intel_pstate_set_update_util_hook(policy->cpu);
+
if (hwp_active)
- intel_pstate_hwp_set();
+ intel_pstate_hwp_set(policy->cpus);
return 0;
}
@@ -1196,11 +1365,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
- del_timer_sync(&all_cpu_data[cpu_num]->timer);
+ intel_pstate_clear_update_util_hook(cpu_num);
+
if (hwp_active)
return;
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
+ intel_pstate_set_min_pstate(cpu);
}
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -1260,6 +1430,7 @@ static int intel_pstate_msrs_not_valid(void)
static void copy_pid_params(struct pstate_adjust_policy *policy)
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
pid_params.p_gain_pct = policy->p_gain_pct;
pid_params.i_gain_pct = policy->i_gain_pct;
pid_params.d_gain_pct = policy->d_gain_pct;
@@ -1274,7 +1445,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_min = funcs->get_min;
pstate_funcs.get_turbo = funcs->get_turbo;
pstate_funcs.get_scaling = funcs->get_scaling;
- pstate_funcs.set = funcs->set;
+ pstate_funcs.get_val = funcs->get_val;
pstate_funcs.get_vid = funcs->get_vid;
pstate_funcs.get_target_pstate = funcs->get_target_pstate;
@@ -1397,6 +1568,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
#endif /* CONFIG_ACPI */
+static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ {}
+};
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@@ -1406,17 +1582,16 @@ static int __init intel_pstate_init(void)
if (no_load)
return -ENODEV;
+ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
+ copy_cpu_funcs(&core_params.funcs);
+ hwp_active++;
+ goto hwp_cpu_matched;
+ }
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists())
- return -ENODEV;
-
cpu_def = (struct cpu_defaults *)id->driver_data;
copy_pid_params(&cpu_def->pid_policy);
@@ -1425,17 +1600,20 @@ static int __init intel_pstate_init(void)
if (intel_pstate_msrs_not_valid())
return -ENODEV;
+hwp_cpu_matched:
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
- pr_info("intel_pstate: HWP enabled\n");
- hwp_active++;
- }
-
if (!hwp_active && hwp_only)
goto out;
@@ -1446,12 +1624,15 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+ if (hwp_active)
+ pr_info("intel_pstate: HWP enabled\n");
+
return rc;
out:
get_online_cpus();
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu]) {
- del_timer_sync(&all_cpu_data[cpu]->timer);
+ intel_pstate_clear_update_util_hook(cpu);
kfree(all_cpu_data[cpu]);
}
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 547890fd9572..39ac78c94be0 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -28,6 +28,8 @@
#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <trace/events/power.h>
#include <asm/cputhreads.h>
#include <asm/firmware.h>
@@ -43,15 +45,39 @@
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled, occ_reset;
+static const char * const throttle_reason[] = {
+ "No throttling",
+ "Power Cap",
+ "Processor Over Temperature",
+ "Power Supply Failure",
+ "Over Current",
+ "OCC Reset"
+};
+
+enum throttle_reason_type {
+ NO_THROTTLE = 0,
+ POWERCAP,
+ CPU_OVERTEMP,
+ POWER_SUPPLY_FAILURE,
+ OVERCURRENT,
+ OCC_RESET_THROTTLE,
+ OCC_MAX_REASON
+};
+
static struct chip {
unsigned int id;
bool throttled;
+ bool restore;
+ u8 throttle_reason;
cpumask_t mask;
struct work_struct throttle;
- bool restore;
+ int throttle_turbo;
+ int throttle_sub_turbo;
+ int reason[OCC_MAX_REASON];
} *chips;
static int nr_chips;
+static DEFINE_PER_CPU(struct chip *, chip_info);
/*
* Note: The set of pstates consists of contiguous integers, the
@@ -183,6 +209,42 @@ static struct freq_attr *powernv_cpu_freq_attr[] = {
NULL,
};
+#define throttle_attr(name, member) \
+static ssize_t name##_show(struct cpufreq_policy *policy, char *buf) \
+{ \
+ struct chip *chip = per_cpu(chip_info, policy->cpu); \
+ \
+ return sprintf(buf, "%u\n", chip->member); \
+} \
+ \
+static struct freq_attr throttle_attr_##name = __ATTR_RO(name) \
+
+throttle_attr(unthrottle, reason[NO_THROTTLE]);
+throttle_attr(powercap, reason[POWERCAP]);
+throttle_attr(overtemp, reason[CPU_OVERTEMP]);
+throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
+throttle_attr(overcurrent, reason[OVERCURRENT]);
+throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
+throttle_attr(turbo_stat, throttle_turbo);
+throttle_attr(sub_turbo_stat, throttle_sub_turbo);
+
+static struct attribute *throttle_attrs[] = {
+ &throttle_attr_unthrottle.attr,
+ &throttle_attr_powercap.attr,
+ &throttle_attr_overtemp.attr,
+ &throttle_attr_supply_fault.attr,
+ &throttle_attr_overcurrent.attr,
+ &throttle_attr_occ_reset.attr,
+ &throttle_attr_turbo_stat.attr,
+ &throttle_attr_sub_turbo_stat.attr,
+ NULL,
+};
+
+static const struct attribute_group throttle_attr_grp = {
+ .name = "throttle_stats",
+ .attrs = throttle_attrs,
+};
+
/* Helper routines */
/* Access helpers to power mgt SPR */
@@ -311,34 +373,36 @@ static inline unsigned int get_nominal_index(void)
static void powernv_cpufreq_throttle_check(void *data)
{
+ struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax, i;
+ int pmsr_pmax;
pmsr = get_pmspr(SPRN_PMSR);
-
- for (i = 0; i < nr_chips; i++)
- if (chips[i].id == cpu_to_chip_id(cpu))
- break;
+ chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) {
- if (chips[i].throttled)
+ if (chip->throttled)
goto next;
- chips[i].throttled = true;
- if (pmsr_pmax < powernv_pstate_info.nominal)
- pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
- cpu, chips[i].id, pmsr_pmax,
- powernv_pstate_info.nominal);
- else
- pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
- cpu, chips[i].id, pmsr_pmax,
- powernv_pstate_info.max);
- } else if (chips[i].throttled) {
- chips[i].throttled = false;
- pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
- chips[i].id, pmsr_pmax);
+ chip->throttled = true;
+ if (pmsr_pmax < powernv_pstate_info.nominal) {
+ pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+ cpu, chip->id, pmsr_pmax,
+ powernv_pstate_info.nominal);
+ chip->throttle_sub_turbo++;
+ } else {
+ chip->throttle_turbo++;
+ }
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
+ } else if (chip->throttled) {
+ chip->throttled = false;
+ trace_powernv_throttle(chip->id,
+ throttle_reason[chip->throttle_reason],
+ pmsr_pmax);
}
/* Check if Psafe_mode_active is set in PMSR. */
@@ -356,7 +420,7 @@ next:
if (throttled) {
pr_info("PMSR = %16lx\n", pmsr);
- pr_crit("CPU Frequency could be throttled\n");
+ pr_warn("CPU Frequency could be throttled\n");
}
}
@@ -397,6 +461,21 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
+ if (!policy->driver_data) {
+ int ret;
+
+ ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
+ if (ret) {
+ pr_info("Failed to create throttle stats directory for cpu %d\n",
+ policy->cpu);
+ return ret;
+ }
+ /*
+ * policy->driver_data is used as a flag for one-time
+ * creation of throttle sysfs files.
+ */
+ policy->driver_data = policy;
+ }
return cpufreq_table_validate_and_show(policy, powernv_freqs);
}
@@ -423,18 +502,19 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
unsigned int cpu;
- cpumask_var_t mask;
+ cpumask_t mask;
- smp_call_function_any(&chip->mask,
+ get_online_cpus();
+ cpumask_and(&mask, &chip->mask, cpu_online_mask);
+ smp_call_function_any(&mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
- return;
+ goto out;
chip->restore = false;
- cpumask_copy(mask, &chip->mask);
- for_each_cpu_and(cpu, mask, cpu_online_mask) {
- int index, tcpu;
+ for_each_cpu(cpu, &mask) {
+ int index;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
@@ -442,20 +522,12 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
policy.cur,
CPUFREQ_RELATION_C, &index);
powernv_cpufreq_target_index(&policy, index);
- for_each_cpu(tcpu, policy.cpus)
- cpumask_clear_cpu(tcpu, mask);
+ cpumask_andnot(&mask, &mask, policy.cpus);
}
+out:
+ put_online_cpus();
}
-static char throttle_reason[][30] = {
- "No throttling",
- "Power Cap",
- "Processor Over Temperature",
- "Power Supply Failure",
- "Over Current",
- "OCC Reset"
- };
-
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
@@ -481,7 +553,7 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
*/
if (!throttled) {
throttled = true;
- pr_crit("CPU frequency is throttled for duration\n");
+ pr_warn("CPU frequency is throttled for duration\n");
}
break;
@@ -505,23 +577,20 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
return 0;
}
- if (omsg.throttle_status &&
- omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
- pr_info("OCC: Chip %u Pmax reduced due to %s\n",
- (unsigned int)omsg.chip,
- throttle_reason[omsg.throttle_status]);
- else if (!omsg.throttle_status)
- pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
- throttle_reason[omsg.throttle_status]);
- else
- return 0;
-
for (i = 0; i < nr_chips; i++)
- if (chips[i].id == omsg.chip) {
- if (!omsg.throttle_status)
- chips[i].restore = true;
- schedule_work(&chips[i].throttle);
- }
+ if (chips[i].id == omsg.chip)
+ break;
+
+ if (omsg.throttle_status >= 0 &&
+ omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
+ chips[i].throttle_reason = omsg.throttle_status;
+ chips[i].reason[omsg.throttle_status]++;
+ }
+
+ if (!omsg.throttle_status)
+ chips[i].restore = true;
+
+ schedule_work(&chips[i].throttle);
}
return 0;
}
@@ -566,21 +635,33 @@ static int init_chip_info(void)
}
}
- chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+ chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips)
return -ENOMEM;
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
- chips[i].throttled = false;
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
- chips[i].restore = false;
+ for_each_cpu(cpu, &chips[i].mask)
+ per_cpu(chip_info, cpu) = &chips[i];
}
return 0;
}
+static inline void clean_chip_info(void)
+{
+ kfree(chips);
+}
+
+static inline void unregister_all_notifiers(void)
+{
+ opal_message_notifier_unregister(OPAL_MSG_OCC,
+ &powernv_cpufreq_opal_nb);
+ unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+}
+
static int __init powernv_cpufreq_init(void)
{
int rc = 0;
@@ -591,28 +672,35 @@ static int __init powernv_cpufreq_init(void)
/* Discover pstates from device tree and init */
rc = init_powernv_pstates();
- if (rc) {
- pr_info("powernv-cpufreq disabled. System does not support PState control\n");
- return rc;
- }
+ if (rc)
+ goto out;
/* Populate chip info */
rc = init_chip_info();
if (rc)
- return rc;
+ goto out;
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
- return cpufreq_register_driver(&powernv_cpufreq_driver);
+
+ rc = cpufreq_register_driver(&powernv_cpufreq_driver);
+ if (!rc)
+ return 0;
+
+ pr_info("Failed to register the cpufreq driver (%d)\n", rc);
+ unregister_all_notifiers();
+ clean_chip_info();
+out:
+ pr_info("Platform driver disabled. System does not support PState control\n");
+ return rc;
}
module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void)
{
- unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
- opal_message_notifier_unregister(OPAL_MSG_OCC,
- &powernv_cpufreq_opal_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver);
+ unregister_all_notifiers();
+ clean_chip_info();
}
module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 096377232747..46fee1539cc8 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -319,7 +319,7 @@ static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx)
local_irq_save(flags);
/* Set new the CCCR and prepare CCLKCFG */
- CCCR = pxa_freq_settings[idx].cccr;
+ writel(pxa_freq_settings[idx].cccr, CCCR);
cclkcfg = pxa_freq_settings[idx].cclkcfg;
asm volatile(" \n\
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 051a8a8224cd..a145b319d171 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -576,10 +576,8 @@ static struct cpufreq_driver s5pv210_driver = {
.get = cpufreq_generic_get,
.init = s5pv210_cpu_init,
.name = "s5pv210",
-#ifdef CONFIG_PM
.suspend = cpufreq_generic_suspend,
.resume = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
-#endif
};
static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 0742b3296673..03d38c291de6 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -196,11 +196,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static void get_typical_interval(struct menu_device *data)
+static unsigned int get_typical_interval(struct menu_device *data)
{
int i, divisor;
- unsigned int max, thresh;
- uint64_t avg, stddev;
+ unsigned int max, thresh, avg;
+ uint64_t sum, variance;
thresh = UINT_MAX; /* Discard outliers above this value */
@@ -208,55 +208,52 @@ again:
/* First calculate the average of past intervals */
max = 0;
- avg = 0;
+ sum = 0;
divisor = 0;
for (i = 0; i < INTERVALS; i++) {
unsigned int value = data->intervals[i];
if (value <= thresh) {
- avg += value;
+ sum += value;
divisor++;
if (value > max)
max = value;
}
}
if (divisor == INTERVALS)
- avg >>= INTERVAL_SHIFT;
+ avg = sum >> INTERVAL_SHIFT;
else
- do_div(avg, divisor);
+ avg = div_u64(sum, divisor);
- /* Then try to determine standard deviation */
- stddev = 0;
+ /* Then try to determine variance */
+ variance = 0;
for (i = 0; i < INTERVALS; i++) {
unsigned int value = data->intervals[i];
if (value <= thresh) {
- int64_t diff = value - avg;
- stddev += diff * diff;
+ int64_t diff = (int64_t)value - avg;
+ variance += diff * diff;
}
}
if (divisor == INTERVALS)
- stddev >>= INTERVAL_SHIFT;
+ variance >>= INTERVAL_SHIFT;
else
- do_div(stddev, divisor);
+ do_div(variance, divisor);
/*
- * The typical interval is obtained when standard deviation is small
- * or standard deviation is small compared to the average interval.
- *
- * int_sqrt() formal parameter type is unsigned long. When the
- * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
- * the resulting squared standard deviation exceeds the input domain
- * of int_sqrt on platforms where unsigned long is 32 bits in size.
- * In such case reject the candidate average.
+ * The typical interval is obtained when standard deviation is
+ * small (stddev <= 20 us, variance <= 400 us^2) or standard
+ * deviation is small compared to the average interval (avg >
+ * 6*stddev, avg^2 > 36*variance). The average is smaller than
+ * UINT_MAX aka U32_MAX, so computing its square does not
+ * overflow a u64. We simply reject this candidate average if
+ * the standard deviation is greater than 715 s (which is
+ * rather unlikely).
*
* Use this result only if there is no timer to wake us up sooner.
*/
- if (likely(stddev <= ULONG_MAX)) {
- stddev = int_sqrt(stddev);
- if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
- || stddev <= 20) {
- if (data->next_timer_us > avg)
- data->predicted_us = avg;
- return;
+ if (likely(variance <= U64_MAX/36)) {
+ if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
+ || variance <= 400) {
+ return avg;
}
}
@@ -270,7 +267,7 @@ again:
* with sporadic activity with a bunch of short pauses.
*/
if ((divisor * 4) <= INTERVALS * 3)
- return;
+ return UINT_MAX;
thresh = max - 1;
goto again;
@@ -287,6 +284,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
unsigned int interactivity_req;
+ unsigned int expected_interval;
unsigned long nr_iowaiters, cpu_load;
if (data->needs_update) {
@@ -313,32 +311,43 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- get_typical_interval(data);
-
- /*
- * Performance multiplier defines a minimum predicted idle
- * duration / latency ratio. Adjust the latency limit if
- * necessary.
- */
- interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
- if (latency_req > interactivity_req)
- latency_req = interactivity_req;
+ expected_interval = get_typical_interval(data);
+ expected_interval = min(expected_interval, data->next_timer_us);
if (CPUIDLE_DRIVER_STATE_START > 0) {
- data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
+ struct cpuidle_state *s = &drv->states[CPUIDLE_DRIVER_STATE_START];
+ unsigned int polling_threshold;
+
/*
* We want to default to C1 (hlt), not to busy polling
- * unless the timer is happening really really soon.
+ * unless the timer is happening really really soon, or
+ * C1's exit latency exceeds the user configured limit.
*/
- if (interactivity_req > 20 &&
- !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
- dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
+ polling_threshold = max_t(unsigned int, 20, s->target_residency);
+ if (data->next_timer_us > polling_threshold &&
+ latency_req > s->exit_latency && !s->disabled &&
+ !dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
+ else
+ data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
} else {
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
}
/*
+ * Use the lowest expected idle interval to pick the idle state.
+ */
+ data->predicted_us = min(data->predicted_us, expected_interval);
+
+ /*
+ * Use the performance multiplier and the user-configurable
+ * latency_req to determine the maximum exit latency.
+ */
+ interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
+ if (latency_req > interactivity_req)
+ latency_req = interactivity_req;
+
+ /*
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07d494276aad..477fffdb4f49 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES
depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
select CRYPTO_AES
select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
help
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
@@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH
config CRYPTO_DEV_SUN4I_SS
tristate "Support for Allwinner Security System cryptographic accelerator"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI && !64BIT
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_AES
@@ -507,6 +508,10 @@ config CRYPTO_DEV_ROCKCHIP
depends on OF && ARCH_ROCKCHIP
select CRYPTO_AES
select CRYPTO_DES
+ select CRYPTO_MD5
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
select CRYPTO_BLKCIPHER
help
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 3eb3f1279fb7..e3d40a8dfffb 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
return len ? block_size - len : 0;
}
-static inline struct aead_request *
-aead_request_cast(struct crypto_async_request *req)
-{
- return container_of(req, struct aead_request, base);
-}
-
static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
{
struct atmel_aes_dev *aes_dd = NULL;
@@ -2085,9 +2079,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (!aes_dd->io_base) {
+ if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(aes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/atmel-sha-regs.h b/drivers/crypto/atmel-sha-regs.h
index 83b2d7425666..e08897109cab 100644
--- a/drivers/crypto/atmel-sha-regs.h
+++ b/drivers/crypto/atmel-sha-regs.h
@@ -8,6 +8,8 @@
#define SHA_CR_START (1 << 0)
#define SHA_CR_FIRST (1 << 4)
#define SHA_CR_SWRST (1 << 8)
+#define SHA_CR_WUIHV (1 << 12)
+#define SHA_CR_WUIEHV (1 << 13)
#define SHA_MR 0x04
#define SHA_MR_MODE_MASK (0x3 << 0)
@@ -15,6 +17,8 @@
#define SHA_MR_MODE_AUTO 0x1
#define SHA_MR_MODE_PDC 0x2
#define SHA_MR_PROCDLY (1 << 4)
+#define SHA_MR_UIHV (1 << 5)
+#define SHA_MR_UIEHV (1 << 6)
#define SHA_MR_ALGO_SHA1 (0 << 8)
#define SHA_MR_ALGO_SHA256 (1 << 8)
#define SHA_MR_ALGO_SHA384 (2 << 8)
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8bf9914d4d15..97e34799e077 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -53,6 +53,7 @@
#define SHA_FLAGS_FINUP BIT(16)
#define SHA_FLAGS_SG BIT(17)
+#define SHA_FLAGS_ALGO_MASK GENMASK(22, 18)
#define SHA_FLAGS_SHA1 BIT(18)
#define SHA_FLAGS_SHA224 BIT(19)
#define SHA_FLAGS_SHA256 BIT(20)
@@ -60,11 +61,12 @@
#define SHA_FLAGS_SHA512 BIT(22)
#define SHA_FLAGS_ERROR BIT(23)
#define SHA_FLAGS_PAD BIT(24)
+#define SHA_FLAGS_RESTORE BIT(25)
#define SHA_OP_UPDATE 1
#define SHA_OP_FINAL 2
-#define SHA_BUFFER_LEN PAGE_SIZE
+#define SHA_BUFFER_LEN (PAGE_SIZE / 16)
#define ATMEL_SHA_DMA_THRESHOLD 56
@@ -73,10 +75,15 @@ struct atmel_sha_caps {
bool has_dualbuff;
bool has_sha224;
bool has_sha_384_512;
+ bool has_uihv;
};
struct atmel_sha_dev;
+/*
+ * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
struct atmel_sha_reqctx {
struct atmel_sha_dev *dd;
unsigned long flags;
@@ -95,7 +102,7 @@ struct atmel_sha_reqctx {
size_t block_size;
- u8 buffer[0] __aligned(sizeof(u32));
+ u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
};
struct atmel_sha_ctx {
@@ -122,6 +129,7 @@ struct atmel_sha_dev {
spinlock_t lock;
int err;
struct tasklet_struct done_task;
+ struct tasklet_struct queue_task;
unsigned long flags;
struct crypto_queue queue;
@@ -317,7 +325,8 @@ static int atmel_sha_init(struct ahash_request *req)
static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
- u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+ u32 valmr = SHA_MR_MODE_AUTO;
+ unsigned int i, hashsize = 0;
if (likely(dma)) {
if (!dd->caps.has_dma)
@@ -329,22 +338,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
}
- if (ctx->flags & SHA_FLAGS_SHA1)
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
valmr |= SHA_MR_ALGO_SHA1;
- else if (ctx->flags & SHA_FLAGS_SHA224)
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
valmr |= SHA_MR_ALGO_SHA224;
- else if (ctx->flags & SHA_FLAGS_SHA256)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA256:
valmr |= SHA_MR_ALGO_SHA256;
- else if (ctx->flags & SHA_FLAGS_SHA384)
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
valmr |= SHA_MR_ALGO_SHA384;
- else if (ctx->flags & SHA_FLAGS_SHA512)
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA512:
valmr |= SHA_MR_ALGO_SHA512;
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ break;
+ }
/* Setting CR_FIRST only for the first iteration */
- if (!(ctx->digcnt[0] || ctx->digcnt[1]))
- valcr = SHA_CR_FIRST;
+ if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+ const u32 *hash = (const u32 *)ctx->digest;
+
+ /*
+ * Restore the hardware context: update the User Initialize
+ * Hash Value (UIHV) with the value saved when the latest
+ * 'update' operation completed on this very same crypto
+ * request.
+ */
+ ctx->flags &= ~SHA_FLAGS_RESTORE;
+ atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+ atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+ valmr |= SHA_MR_UIHV;
+ }
+ /*
+ * WARNING: If the UIHV feature is not available, the hardware CANNOT
+ * process concurrent requests: the internal registers used to store
+ * the hash/digest are still set to the partial digest output values
+ * computed during the latest round.
+ */
- atmel_sha_write(dd, SHA_CR, valcr);
atmel_sha_write(dd, SHA_MR, valmr);
}
@@ -713,23 +762,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
u32 *hash = (u32 *)ctx->digest;
- int i;
+ unsigned int i, hashsize;
- if (ctx->flags & SHA_FLAGS_SHA1)
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA224)
- for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA256)
- for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else if (ctx->flags & SHA_FLAGS_SHA384)
- for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
- else
- for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
- hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+ case SHA_FLAGS_SHA1:
+ hashsize = SHA1_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA224:
+ case SHA_FLAGS_SHA256:
+ hashsize = SHA256_DIGEST_SIZE;
+ break;
+
+ case SHA_FLAGS_SHA384:
+ case SHA_FLAGS_SHA512:
+ hashsize = SHA512_DIGEST_SIZE;
+ break;
+
+ default:
+ /* Should not happen... */
+ return;
+ }
+
+ for (i = 0; i < hashsize / sizeof(u32); ++i)
+ hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+ ctx->flags |= SHA_FLAGS_RESTORE;
}
static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -788,7 +845,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
req->base.complete(&req->base, err);
/* handle new request */
- tasklet_schedule(&dd->done_task);
+ tasklet_schedule(&dd->queue_task);
}
static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
@@ -922,36 +979,17 @@ static int atmel_sha_update(struct ahash_request *req)
static int atmel_sha_final(struct ahash_request *req)
{
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
- struct atmel_sha_dev *dd = tctx->dd;
-
- int err = 0;
ctx->flags |= SHA_FLAGS_FINUP;
if (ctx->flags & SHA_FLAGS_ERROR)
return 0; /* uncompleted hash is not needed */
- if (ctx->bufcnt) {
- return atmel_sha_enqueue(req, SHA_OP_FINAL);
- } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
- err = atmel_sha_hw_init(dd);
- if (err)
- goto err1;
-
- dd->flags |= SHA_FLAGS_BUSY;
- err = atmel_sha_final_req(dd);
- } else {
+ if (ctx->flags & SHA_FLAGS_PAD)
/* copy ready hash (+ finalize hmac) */
return atmel_sha_finish(req);
- }
-
-err1:
- if (err != -EINPROGRESS)
- /* done_task will not finish it, so do it here */
- atmel_sha_finish_req(req, err);
- return err;
+ return atmel_sha_enqueue(req, SHA_OP_FINAL);
}
static int atmel_sha_finup(struct ahash_request *req)
@@ -979,11 +1017,27 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
}
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+ const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(out, ctx, sizeof(*ctx));
+ return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+ struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+
+ memcpy(ctx, in, sizeof(*ctx));
+ return 0;
+}
+
static int atmel_sha_cra_init(struct crypto_tfm *tfm)
{
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct atmel_sha_reqctx) +
- SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
+ sizeof(struct atmel_sha_reqctx));
return 0;
}
@@ -995,8 +1049,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha1",
.cra_driver_name = "atmel-sha1",
@@ -1016,8 +1073,11 @@ static struct ahash_alg sha_1_256_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha256",
.cra_driver_name = "atmel-sha256",
@@ -1039,8 +1099,11 @@ static struct ahash_alg sha_224_alg = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha224",
.cra_driver_name = "atmel-sha224",
@@ -1062,8 +1125,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha384",
.cra_driver_name = "atmel-sha384",
@@ -1083,8 +1149,11 @@ static struct ahash_alg sha_384_512_algs[] = {
.final = atmel_sha_final,
.finup = atmel_sha_finup,
.digest = atmel_sha_digest,
+ .export = atmel_sha_export,
+ .import = atmel_sha_import,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct atmel_sha_reqctx),
.base = {
.cra_name = "sha512",
.cra_driver_name = "atmel-sha512",
@@ -1100,16 +1169,18 @@ static struct ahash_alg sha_384_512_algs[] = {
},
};
+static void atmel_sha_queue_task(unsigned long data)
+{
+ struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+ atmel_sha_handle_queue(dd, NULL);
+}
+
static void atmel_sha_done_task(unsigned long data)
{
struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
int err = 0;
- if (!(SHA_FLAGS_BUSY & dd->flags)) {
- atmel_sha_handle_queue(dd, NULL);
- return;
- }
-
if (SHA_FLAGS_CPU & dd->flags) {
if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
@@ -1272,14 +1343,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
dd->caps.has_dualbuff = 0;
dd->caps.has_sha224 = 0;
dd->caps.has_sha_384_512 = 0;
+ dd->caps.has_uihv = 0;
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
+ case 0x510:
+ dd->caps.has_dma = 1;
+ dd->caps.has_dualbuff = 1;
+ dd->caps.has_sha224 = 1;
+ dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
+ break;
case 0x420:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
dd->caps.has_sha224 = 1;
dd->caps.has_sha_384_512 = 1;
+ dd->caps.has_uihv = 1;
break;
case 0x410:
dd->caps.has_dma = 1;
@@ -1366,6 +1446,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
(unsigned long)sha_dd);
+ tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+ (unsigned long)sha_dd);
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
@@ -1404,9 +1486,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (!sha_dd->io_base) {
+ if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(sha_dd->io_base);
goto res_err;
}
@@ -1464,6 +1546,7 @@ err_sha_dma:
iclk_unprepare:
clk_unprepare(sha_dd->iclk);
res_err:
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
dev_err(dev, "initialization failed.\n");
@@ -1484,6 +1567,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
atmel_sha_unregister_algs(sha_dd);
+ tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
if (sha_dd->caps.has_dma)
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 2c7a628d0375..bf467d7be35c 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1417,9 +1417,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (!tdes_dd->io_base) {
+ if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
- err = -ENOMEM;
+ err = PTR_ERR(tdes_dd->io_base);
goto res_err;
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 69d4a1326fee..44d30b45f3cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -534,7 +534,7 @@ static int caam_probe(struct platform_device *pdev)
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index f7e0d8d4c3da..6fd63a600614 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -65,7 +65,7 @@ static int caam_reset_hw_jr(struct device *dev)
/*
* Shutdown JobR independent of platform property code
*/
-int caam_jr_shutdown(struct device *dev)
+static int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
dma_addr_t inpbusaddr, outbusaddr;
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index a8a79975682f..0ba9c40597dc 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -455,7 +455,8 @@ struct caam_ctrl {
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64 0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST 0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64 0x00000001 /* 64-byte burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 55a1f3951578..b750592cc936 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-platform.o
+ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index d89f20c04266..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -220,6 +220,42 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
return ccp_aes_cmac_finup(req);
}
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
+ state.null_msg = rctx->null_msg;
+ memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_aes_cmac_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->null_msg = state.null_msg;
+ memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -352,10 +388,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
alg->final = ccp_aes_cmac_final;
alg->finup = ccp_aes_cmac_finup;
alg->digest = ccp_aes_cmac_digest;
+ alg->export = ccp_aes_cmac_export;
+ alg->import = ccp_aes_cmac_import;
alg->setkey = ccp_aes_cmac_setkey;
halg = &alg->halg;
halg->digestsize = AES_BLOCK_SIZE;
+ halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 7984f910884d..89291c15015c 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) AES crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -259,6 +259,7 @@ static struct crypto_alg ccp_aes_rfc3686_defaults = {
struct ccp_aes_def {
enum ccp_aes_mode mode;
+ unsigned int version;
const char *name;
const char *driver_name;
unsigned int blocksize;
@@ -269,6 +270,7 @@ struct ccp_aes_def {
static struct ccp_aes_def aes_algs[] = {
{
.mode = CCP_AES_MODE_ECB,
+ .version = CCP_VERSION(3, 0),
.name = "ecb(aes)",
.driver_name = "ecb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -277,6 +279,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CBC,
+ .version = CCP_VERSION(3, 0),
.name = "cbc(aes)",
.driver_name = "cbc-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -285,6 +288,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CFB,
+ .version = CCP_VERSION(3, 0),
.name = "cfb(aes)",
.driver_name = "cfb-aes-ccp",
.blocksize = AES_BLOCK_SIZE,
@@ -293,6 +297,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_OFB,
+ .version = CCP_VERSION(3, 0),
.name = "ofb(aes)",
.driver_name = "ofb-aes-ccp",
.blocksize = 1,
@@ -301,6 +306,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
.driver_name = "ctr-aes-ccp",
.blocksize = 1,
@@ -309,6 +315,7 @@ static struct ccp_aes_def aes_algs[] = {
},
{
.mode = CCP_AES_MODE_CTR,
+ .version = CCP_VERSION(3, 0),
.name = "rfc3686(ctr(aes))",
.driver_name = "rfc3686-ctr-aes-ccp",
.blocksize = 1,
@@ -357,8 +364,11 @@ static int ccp_register_aes_alg(struct list_head *head,
int ccp_register_aes_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+ if (aes_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_aes_alg(head, &aes_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index d14b3f28e010..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) SHA crypto API support
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -207,6 +207,46 @@ static int ccp_sha_digest(struct ahash_request *req)
return ccp_sha_finup(req);
}
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* Don't let anything leak to 'out' */
+ memset(&state, 0, sizeof(state));
+
+ state.type = rctx->type;
+ state.msg_bits = rctx->msg_bits;
+ state.first = rctx->first;
+ memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+ state.buf_count = rctx->buf_count;
+ memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+ /* 'out' may not be aligned so memcpy from local variable */
+ memcpy(out, &state, sizeof(state));
+
+ return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+ struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct ccp_sha_exp_ctx state;
+
+ /* 'in' may not be aligned so memcpy to local variable */
+ memcpy(&state, in, sizeof(state));
+
+ memset(rctx, 0, sizeof(*rctx));
+ rctx->type = state.type;
+ rctx->msg_bits = state.msg_bits;
+ rctx->first = state.first;
+ memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+ rctx->buf_count = state.buf_count;
+ memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+ return 0;
+}
+
static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int key_len)
{
@@ -304,6 +344,7 @@ static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm)
}
struct ccp_sha_def {
+ unsigned int version;
const char *name;
const char *drv_name;
enum ccp_sha_type type;
@@ -313,6 +354,7 @@ struct ccp_sha_def {
static struct ccp_sha_def sha_algs[] = {
{
+ .version = CCP_VERSION(3, 0),
.name = "sha1",
.drv_name = "sha1-ccp",
.type = CCP_SHA_TYPE_1,
@@ -320,6 +362,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA1_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha224",
.drv_name = "sha224-ccp",
.type = CCP_SHA_TYPE_224,
@@ -327,6 +370,7 @@ static struct ccp_sha_def sha_algs[] = {
.block_size = SHA224_BLOCK_SIZE,
},
{
+ .version = CCP_VERSION(3, 0),
.name = "sha256",
.drv_name = "sha256-ccp",
.type = CCP_SHA_TYPE_256,
@@ -403,9 +447,12 @@ static int ccp_register_sha_alg(struct list_head *head,
alg->final = ccp_sha_final;
alg->finup = ccp_sha_finup;
alg->digest = ccp_sha_digest;
+ alg->export = ccp_sha_export;
+ alg->import = ccp_sha_import;
halg = &alg->halg;
halg->digestsize = def->digest_size;
+ halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
@@ -440,8 +487,11 @@ static int ccp_register_sha_alg(struct list_head *head,
int ccp_register_sha_algs(struct list_head *head)
{
int i, ret;
+ unsigned int ccpversion = ccp_version();
for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
+ if (sha_algs[i].version > ccpversion)
+ continue;
ret = ccp_register_sha_alg(head, &sha_algs[i]);
if (ret)
return ret;
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 76a96f0f44c6..a326ec20bfa8 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_aes_cmac_exp_ctx {
+ unsigned int null_msg;
+
+ u8 iv[AES_BLOCK_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[AES_BLOCK_SIZE];
+};
+
/***** SHA related defines *****/
#define MAX_SHA_CONTEXT_SIZE SHA256_DIGEST_SIZE
#define MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
struct ccp_cmd cmd;
};
+struct ccp_sha_exp_ctx {
+ enum ccp_sha_type type;
+
+ u64 msg_bits;
+
+ unsigned int first;
+
+ u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+ unsigned int buf_count;
+ u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
/***** Common Context Structure *****/
struct ccp_ctx {
int (*complete)(struct crypto_async_request *req, int ret);
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
new file mode 100644
index 000000000000..7d5eab49179e
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -0,0 +1,533 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+
+static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
+{
+ struct ccp_cmd_queue *cmd_q = op->cmd_q;
+ struct ccp_device *ccp = cmd_q->ccp;
+ void __iomem *cr_addr;
+ u32 cr0, cmd;
+ unsigned int i;
+ int ret = 0;
+
+ /* We could read a status register to see how many free slots
+ * are actually available, but reading that register resets it
+ * and you could lose some error information.
+ */
+ cmd_q->free_slots--;
+
+ cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
+ | (op->jobid << REQ0_JOBID_SHIFT)
+ | REQ0_WAIT_FOR_WRITE;
+
+ if (op->soc)
+ cr0 |= REQ0_STOP_ON_COMPLETE
+ | REQ0_INT_ON_COMPLETE;
+
+ if (op->ioc || !cmd_q->free_slots)
+ cr0 |= REQ0_INT_ON_COMPLETE;
+
+ /* Start at CMD_REQ1 */
+ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
+
+ mutex_lock(&ccp->req_mutex);
+
+ /* Write CMD_REQ1 through CMD_REQx first */
+ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
+ iowrite32(*(cr + i), cr_addr);
+
+ /* Tell the CCP to start */
+ wmb();
+ iowrite32(cr0, ccp->io_regs + CMD_REQ0);
+
+ mutex_unlock(&ccp->req_mutex);
+
+ if (cr0 & REQ0_INT_ON_COMPLETE) {
+ /* Wait for the job to complete */
+ ret = wait_event_interruptible(cmd_q->int_queue,
+ cmd_q->int_rcvd);
+ if (ret || cmd_q->cmd_error) {
+ /* On error delete all related jobs from the queue */
+ cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+
+ if (!ret)
+ ret = -EIO;
+ } else if (op->soc) {
+ /* Delete just head job from the queue on SoC */
+ cmd = DEL_Q_ACTIVE
+ | (cmd_q->id << DEL_Q_ID_SHIFT)
+ | op->jobid;
+
+ iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
+ }
+
+ cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 0;
+ }
+
+ return ret;
+}
+
+static int ccp_perform_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
+ | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
+ | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
+ | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->u.aes.mode == CCP_AES_MODE_CFB)
+ cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_xts_aes(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
+ | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
+ | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ if (op->init)
+ cr[0] |= REQ1_INIT;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_sha(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
+ | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
+ | REQ1_INIT;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->eom) {
+ cr[0] |= REQ1_EOM;
+ cr[4] = lower_32_bits(op->u.sha.msg_bits);
+ cr[5] = upper_32_bits(op->u.sha.msg_bits);
+ } else {
+ cr[4] = 0;
+ cr[5] = 0;
+ }
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_rsa(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
+ | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
+ | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->u.rsa.input_len - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
+ | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_passthru(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
+ | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
+ | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM)
+ cr[1] = op->src.u.dma.length - 1;
+ else
+ cr[1] = op->dst.u.dma.length - 1;
+
+ if (op->src.type == CCP_MEMTYPE_SYSTEM) {
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+
+ if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
+ } else {
+ cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
+ cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
+ }
+
+ if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+ } else {
+ cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
+ cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
+ }
+
+ if (op->eom)
+ cr[0] |= REQ1_EOM;
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_perform_ecc(struct ccp_op *op)
+{
+ u32 cr[6];
+
+ /* Fill out the register contents for REQ1 through REQ6 */
+ cr[0] = REQ1_ECC_AFFINE_CONVERT
+ | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
+ | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
+ | REQ1_EOM;
+ cr[1] = op->src.u.dma.length - 1;
+ cr[2] = ccp_addr_lo(&op->src.u.dma);
+ cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->src.u.dma);
+ cr[4] = ccp_addr_lo(&op->dst.u.dma);
+ cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
+ | ccp_addr_hi(&op->dst.u.dma);
+
+ return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
+}
+
+static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
+ u32 trng_value;
+ int len = min_t(int, sizeof(trng_value), max);
+
+ /*
+ * Locking is provided by the caller so we can update device
+ * hwrng-related fields safely
+ */
+ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
+ if (!trng_value) {
+ /* Zero is returned if not data is available or if a
+ * bad-entropy error is present. Assume an error if
+ * we exceed TRNG_RETRIES reads of zero.
+ */
+ if (ccp->hwrng_retries++ > TRNG_RETRIES)
+ return -EIO;
+
+ return 0;
+ }
+
+ /* Reset the counter and save the rng value */
+ ccp->hwrng_retries = 0;
+ memcpy(data, &trng_value, len);
+
+ return len;
+}
+
+static int ccp_init(struct ccp_device *ccp)
+{
+ struct device *dev = ccp->dev;
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+ unsigned int qmr, qim, i;
+ int ret;
+
+ /* Find available queues */
+ qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+
+ /* Allocate a dma pool for this queue */
+ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
+ ccp->name, i);
+ dma_pool = dma_pool_create(dma_pool_name, dev,
+ CCP_DMAPOOL_MAX_SIZE,
+ CCP_DMAPOOL_ALIGN, 0);
+ if (!dma_pool) {
+ dev_err(dev, "unable to allocate dma pool\n");
+ ret = -ENOMEM;
+ goto e_pool;
+ }
+
+ cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
+ ccp->cmd_q_count++;
+
+ cmd_q->ccp = ccp;
+ cmd_q->id = i;
+ cmd_q->dma_pool = dma_pool;
+
+ /* Reserve 2 KSB regions for the queue */
+ cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
+ cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
+ ccp->ksb_count -= 2;
+
+ /* Preset some register values and masks that are queue
+ * number dependent
+ */
+ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
+ (CMD_Q_STATUS_INCR * i);
+ cmd_q->int_ok = 1 << (i * 2);
+ cmd_q->int_err = 1 << ((i * 2) + 1);
+
+ cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
+
+ init_waitqueue_head(&cmd_q->int_queue);
+
+ /* Build queue interrupt mask (two interrupts per queue) */
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+
+#ifdef CONFIG_ARM64
+ /* For arm64 set the recommended queue cache settings */
+ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+ (CMD_Q_CACHE_INC * i));
+#endif
+
+ dev_dbg(dev, "queue #%u available\n", i);
+ }
+ if (ccp->cmd_q_count == 0) {
+ dev_notice(dev, "no command queues available\n");
+ ret = -EIO;
+ goto e_pool;
+ }
+ dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ /* Request an irq */
+ ret = ccp->get_irq(ccp);
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_pool;
+ }
+
+ /* Initialize the queues used to wait for KSB space and suspend */
+ init_waitqueue_head(&ccp->ksb_queue);
+ init_waitqueue_head(&ccp->suspend_queue);
+
+ /* Create a kthread for each queue */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct task_struct *kthread;
+
+ cmd_q = &ccp->cmd_q[i];
+
+ kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
+ if (IS_ERR(kthread)) {
+ dev_err(dev, "error creating queue thread (%ld)\n",
+ PTR_ERR(kthread));
+ ret = PTR_ERR(kthread);
+ goto e_kthread;
+ }
+
+ cmd_q->kthread = kthread;
+ wake_up_process(kthread);
+ }
+
+ /* Register the RNG */
+ ccp->hwrng.name = ccp->rngname;
+ ccp->hwrng.read = ccp_trng_read;
+ ret = hwrng_register(&ccp->hwrng);
+ if (ret) {
+ dev_err(dev, "error registering hwrng (%d)\n", ret);
+ goto e_kthread;
+ }
+
+ ccp_add_device(ccp);
+
+ /* Enable interrupts */
+ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
+
+ return 0;
+
+e_kthread:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ ccp->free_irq(ccp);
+
+e_pool:
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ return ret;
+}
+
+static void ccp_destroy(struct ccp_device *ccp)
+{
+ struct ccp_cmd_queue *cmd_q;
+ struct ccp_cmd *cmd;
+ unsigned int qim, i;
+
+ /* Remove this device from the list of available units first */
+ ccp_del_device(ccp);
+
+ /* Unregister the RNG */
+ hwrng_unregister(&ccp->hwrng);
+
+ /* Stop the queue kthreads */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ if (ccp->cmd_q[i].kthread)
+ kthread_stop(ccp->cmd_q[i].kthread);
+
+ /* Build queue interrupt mask (two interrupt masks per queue) */
+ qim = 0;
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+ qim |= cmd_q->int_ok | cmd_q->int_err;
+ }
+
+ /* Disable and clear interrupts */
+ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ ioread32(cmd_q->reg_int_status);
+ ioread32(cmd_q->reg_status);
+ }
+ iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
+
+ ccp->free_irq(ccp);
+
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ dma_pool_destroy(ccp->cmd_q[i].dma_pool);
+
+ /* Flush the cmd and backlog queue */
+ while (!list_empty(&ccp->cmd)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+ while (!list_empty(&ccp->backlog)) {
+ /* Invoke the callback directly with an error code */
+ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
+ list_del(&cmd->entry);
+ cmd->callback(cmd->data, -ENODEV);
+ }
+}
+
+static irqreturn_t ccp_irq_handler(int irq, void *data)
+{
+ struct device *dev = data;
+ struct ccp_device *ccp = dev_get_drvdata(dev);
+ struct ccp_cmd_queue *cmd_q;
+ u32 q_int, status;
+ unsigned int i;
+
+ status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ cmd_q = &ccp->cmd_q[i];
+
+ q_int = status & (cmd_q->int_ok | cmd_q->int_err);
+ if (q_int) {
+ cmd_q->int_status = status;
+ cmd_q->q_status = ioread32(cmd_q->reg_status);
+ cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
+
+ /* On error, only save the first error value */
+ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
+ cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
+
+ cmd_q->int_rcvd = 1;
+
+ /* Acknowledge the interrupt and wake the kthread */
+ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
+ wake_up_interruptible(&cmd_q->int_queue);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ccp_actions ccp3_actions = {
+ .perform_aes = ccp_perform_aes,
+ .perform_xts_aes = ccp_perform_xts_aes,
+ .perform_sha = ccp_perform_sha,
+ .perform_rsa = ccp_perform_rsa,
+ .perform_passthru = ccp_perform_passthru,
+ .perform_ecc = ccp_perform_ecc,
+ .init = ccp_init,
+ .destroy = ccp_destroy,
+ .irqhandler = ccp_irq_handler,
+};
+
+struct ccp_vdata ccpv3 = {
+ .version = CCP_VERSION(3, 0),
+ .perform = &ccp3_actions,
+};
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 861bacc1bb94..4dbc18727235 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/rwlock_types.h>
+#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/hw_random.h>
@@ -37,20 +39,107 @@ struct ccp_tasklet_data {
struct ccp_cmd *cmd;
};
-static struct ccp_device *ccp_dev;
-static inline struct ccp_device *ccp_get_device(void)
+/* List of CCPs, CCP count, read-write access lock, and access functions
+ *
+ * Lock structure: get ccp_unit_lock for reading whenever we need to
+ * examine the CCP list. While holding it for reading we can acquire
+ * the RR lock to update the round-robin next-CCP pointer. The unit lock
+ * must be acquired before the RR lock.
+ *
+ * If the unit-lock is acquired for writing, we have total control over
+ * the list, so there's no value in getting the RR lock.
+ */
+static DEFINE_RWLOCK(ccp_unit_lock);
+static LIST_HEAD(ccp_units);
+
+/* Round-robin counter */
+static DEFINE_SPINLOCK(ccp_rr_lock);
+static struct ccp_device *ccp_rr;
+
+/* Ever-increasing value to produce unique unit numbers */
+static atomic_t ccp_unit_ordinal;
+unsigned int ccp_increment_unit_ordinal(void)
{
- return ccp_dev;
+ return atomic_inc_return(&ccp_unit_ordinal);
}
-static inline void ccp_add_device(struct ccp_device *ccp)
+/**
+ * ccp_add_device - add a CCP device to the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Put this CCP on the unit list, which makes it available
+ * for use.
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+void ccp_add_device(struct ccp_device *ccp)
{
- ccp_dev = ccp;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ list_add_tail(&ccp->entry, &ccp_units);
+ if (!ccp_rr)
+ /* We already have the list lock (we're first) so this
+ * pointer can't change on us. Set its initial value.
+ */
+ ccp_rr = ccp;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
}
-static inline void ccp_del_device(struct ccp_device *ccp)
+/**
+ * ccp_del_device - remove a CCP device from the list
+ *
+ * @ccp: ccp_device struct pointer
+ *
+ * Remove this unit from the list of devices. If the next device
+ * up for use is this one, adjust the pointer. If this is the last
+ * device, NULL the pointer.
+ */
+void ccp_del_device(struct ccp_device *ccp)
{
- ccp_dev = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&ccp_unit_lock, flags);
+ if (ccp_rr == ccp) {
+ /* ccp_unit_lock is read/write; any read access
+ * will be suspended while we make changes to the
+ * list and RR pointer.
+ */
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ }
+ list_del(&ccp->entry);
+ if (list_empty(&ccp_units))
+ ccp_rr = NULL;
+ write_unlock_irqrestore(&ccp_unit_lock, flags);
+}
+
+static struct ccp_device *ccp_get_device(void)
+{
+ unsigned long flags;
+ struct ccp_device *dp = NULL;
+
+ /* We round-robin through the unit list.
+ * The (ccp_rr) pointer refers to the next unit to use.
+ */
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ spin_lock(&ccp_rr_lock);
+ dp = ccp_rr;
+ if (list_is_last(&ccp_rr->entry, &ccp_units))
+ ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
+ entry);
+ else
+ ccp_rr = list_next_entry(ccp_rr, entry);
+ spin_unlock(&ccp_rr_lock);
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return dp;
}
/**
@@ -60,14 +149,41 @@ static inline void ccp_del_device(struct ccp_device *ccp)
*/
int ccp_present(void)
{
- if (ccp_get_device())
- return 0;
+ unsigned long flags;
+ int ret;
- return -ENODEV;
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ ret = list_empty(&ccp_units);
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret ? -ENODEV : 0;
}
EXPORT_SYMBOL_GPL(ccp_present);
/**
+ * ccp_version - get the version of the CCP device
+ *
+ * Returns the version from the first unit on the list;
+ * otherwise a zero if no CCP device is present
+ */
+unsigned int ccp_version(void)
+{
+ struct ccp_device *dp;
+ unsigned long flags;
+ int ret = 0;
+
+ read_lock_irqsave(&ccp_unit_lock, flags);
+ if (!list_empty(&ccp_units)) {
+ dp = list_first_entry(&ccp_units, struct ccp_device, entry);
+ ret = dp->vdata->version;
+ }
+ read_unlock_irqrestore(&ccp_unit_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ccp_version);
+
+/**
* ccp_enqueue_cmd - queue an operation for processing by the CCP
*
* @cmd: ccp_cmd struct to be processed
@@ -221,7 +337,12 @@ static void ccp_do_cmd_complete(unsigned long data)
complete(&tdata->completion);
}
-static int ccp_cmd_queue_thread(void *data)
+/**
+ * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue
+ *
+ * @data: thread-specific data
+ */
+int ccp_cmd_queue_thread(void *data)
{
struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data;
struct ccp_cmd *cmd;
@@ -257,35 +378,6 @@ static int ccp_cmd_queue_thread(void *data)
return 0;
}
-static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
- u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
-
- /*
- * Locking is provided by the caller so we can update device
- * hwrng-related fields safely
- */
- trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
- if (!trng_value) {
- /* Zero is returned if not data is available or if a
- * bad-entropy error is present. Assume an error if
- * we exceed TRNG_RETRIES reads of zero.
- */
- if (ccp->hwrng_retries++ > TRNG_RETRIES)
- return -EIO;
-
- return 0;
- }
-
- /* Reset the counter and save the rng value */
- ccp->hwrng_retries = 0;
- memcpy(data, &trng_value, len);
-
- return len;
-}
-
/**
* ccp_alloc_struct - allocate and initialize the ccp_device struct
*
@@ -309,253 +401,11 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
ccp->ksb_count = KSB_COUNT;
ccp->ksb_start = 0;
- return ccp;
-}
-
-/**
- * ccp_init - initialize the CCP device
- *
- * @ccp: ccp_device struct
- */
-int ccp_init(struct ccp_device *ccp)
-{
- struct device *dev = ccp->dev;
- struct ccp_cmd_queue *cmd_q;
- struct dma_pool *dma_pool;
- char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
- unsigned int qmr, qim, i;
- int ret;
-
- /* Find available queues */
- qim = 0;
- qmr = ioread32(ccp->io_regs + Q_MASK_REG);
- for (i = 0; i < MAX_HW_QUEUES; i++) {
- if (!(qmr & (1 << i)))
- continue;
-
- /* Allocate a dma pool for this queue */
- snprintf(dma_pool_name, sizeof(dma_pool_name), "ccp_q%d", i);
- dma_pool = dma_pool_create(dma_pool_name, dev,
- CCP_DMAPOOL_MAX_SIZE,
- CCP_DMAPOOL_ALIGN, 0);
- if (!dma_pool) {
- dev_err(dev, "unable to allocate dma pool\n");
- ret = -ENOMEM;
- goto e_pool;
- }
-
- cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
- ccp->cmd_q_count++;
-
- cmd_q->ccp = ccp;
- cmd_q->id = i;
- cmd_q->dma_pool = dma_pool;
-
- /* Reserve 2 KSB regions for the queue */
- cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
- cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
- ccp->ksb_count -= 2;
-
- /* Preset some register values and masks that are queue
- * number dependent
- */
- cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
- (CMD_Q_STATUS_INCR * i);
- cmd_q->int_ok = 1 << (i * 2);
- cmd_q->int_err = 1 << ((i * 2) + 1);
-
- cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
-
- init_waitqueue_head(&cmd_q->int_queue);
-
- /* Build queue interrupt mask (two interrupts per queue) */
- qim |= cmd_q->int_ok | cmd_q->int_err;
+ ccp->ord = ccp_increment_unit_ordinal();
+ snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
+ snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
-#ifdef CONFIG_ARM64
- /* For arm64 set the recommended queue cache settings */
- iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
- (CMD_Q_CACHE_INC * i));
-#endif
-
- dev_dbg(dev, "queue #%u available\n", i);
- }
- if (ccp->cmd_q_count == 0) {
- dev_notice(dev, "no command queues available\n");
- ret = -EIO;
- goto e_pool;
- }
- dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
-
- /* Disable and clear interrupts until ready */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- /* Request an irq */
- ret = ccp->get_irq(ccp);
- if (ret) {
- dev_err(dev, "unable to allocate an IRQ\n");
- goto e_pool;
- }
-
- /* Initialize the queues used to wait for KSB space and suspend */
- init_waitqueue_head(&ccp->ksb_queue);
- init_waitqueue_head(&ccp->suspend_queue);
-
- /* Create a kthread for each queue */
- for (i = 0; i < ccp->cmd_q_count; i++) {
- struct task_struct *kthread;
-
- cmd_q = &ccp->cmd_q[i];
-
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "ccp-q%u", cmd_q->id);
- if (IS_ERR(kthread)) {
- dev_err(dev, "error creating queue thread (%ld)\n",
- PTR_ERR(kthread));
- ret = PTR_ERR(kthread);
- goto e_kthread;
- }
-
- cmd_q->kthread = kthread;
- wake_up_process(kthread);
- }
-
- /* Register the RNG */
- ccp->hwrng.name = "ccp-rng";
- ccp->hwrng.read = ccp_trng_read;
- ret = hwrng_register(&ccp->hwrng);
- if (ret) {
- dev_err(dev, "error registering hwrng (%d)\n", ret);
- goto e_kthread;
- }
-
- /* Make the device struct available before enabling interrupts */
- ccp_add_device(ccp);
-
- /* Enable interrupts */
- iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
-
- return 0;
-
-e_kthread:
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- ccp->free_irq(ccp);
-
-e_pool:
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- return ret;
-}
-
-/**
- * ccp_destroy - tear down the CCP device
- *
- * @ccp: ccp_device struct
- */
-void ccp_destroy(struct ccp_device *ccp)
-{
- struct ccp_cmd_queue *cmd_q;
- struct ccp_cmd *cmd;
- unsigned int qim, i;
-
- /* Remove general access to the device struct */
- ccp_del_device(ccp);
-
- /* Unregister the RNG */
- hwrng_unregister(&ccp->hwrng);
-
- /* Stop the queue kthreads */
- for (i = 0; i < ccp->cmd_q_count; i++)
- if (ccp->cmd_q[i].kthread)
- kthread_stop(ccp->cmd_q[i].kthread);
-
- /* Build queue interrupt mask (two interrupt masks per queue) */
- qim = 0;
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
- qim |= cmd_q->int_ok | cmd_q->int_err;
- }
-
- /* Disable and clear interrupts */
- iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- ioread32(cmd_q->reg_int_status);
- ioread32(cmd_q->reg_status);
- }
- iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
-
- ccp->free_irq(ccp);
-
- for (i = 0; i < ccp->cmd_q_count; i++)
- dma_pool_destroy(ccp->cmd_q[i].dma_pool);
-
- /* Flush the cmd and backlog queue */
- while (!list_empty(&ccp->cmd)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
- while (!list_empty(&ccp->backlog)) {
- /* Invoke the callback directly with an error code */
- cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
- list_del(&cmd->entry);
- cmd->callback(cmd->data, -ENODEV);
- }
-}
-
-/**
- * ccp_irq_handler - handle interrupts generated by the CCP device
- *
- * @irq: the irq associated with the interrupt
- * @data: the data value supplied when the irq was created
- */
-irqreturn_t ccp_irq_handler(int irq, void *data)
-{
- struct device *dev = data;
- struct ccp_device *ccp = dev_get_drvdata(dev);
- struct ccp_cmd_queue *cmd_q;
- u32 q_int, status;
- unsigned int i;
-
- status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
-
- for (i = 0; i < ccp->cmd_q_count; i++) {
- cmd_q = &ccp->cmd_q[i];
-
- q_int = status & (cmd_q->int_ok | cmd_q->int_err);
- if (q_int) {
- cmd_q->int_status = status;
- cmd_q->q_status = ioread32(cmd_q->reg_status);
- cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
-
- /* On error, only save the first error value */
- if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
- cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
-
- cmd_q->int_rcvd = 1;
-
- /* Acknowledge the interrupt and wake the kthread */
- iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
- wake_up_interruptible(&cmd_q->int_queue);
- }
- }
-
- return IRQ_HANDLED;
+ return ccp;
}
#ifdef CONFIG_PM
@@ -577,41 +427,22 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
}
#endif
-#ifdef CONFIG_X86
-static const struct x86_cpu_id ccp_support[] = {
- { X86_VENDOR_AMD, 22, },
- { },
-};
-#endif
-
static int __init ccp_mod_init(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
int ret;
- if (!x86_match_cpu(ccp_support))
- return -ENODEV;
-
- switch (cpuinfo->x86) {
- case 22:
- if ((cpuinfo->x86_model < 48) || (cpuinfo->x86_model > 63))
- return -ENODEV;
-
- ret = ccp_pci_init();
- if (ret)
- return ret;
-
- /* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
- ccp_pci_exit();
- return -ENODEV;
- }
-
- return 0;
+ ret = ccp_pci_init();
+ if (ret)
+ return ret;
- break;
+ /* Don't leave the driver loaded if init failed */
+ if (ccp_present() != 0) {
+ ccp_pci_exit();
+ return -ENODEV;
}
+
+ return 0;
#endif
#ifdef CONFIG_ARM64
@@ -622,7 +453,7 @@ static int __init ccp_mod_init(void)
return ret;
/* Don't leave the driver loaded if init failed */
- if (!ccp_get_device()) {
+ if (ccp_present() != 0) {
ccp_platform_exit();
return -ENODEV;
}
@@ -636,13 +467,7 @@ static int __init ccp_mod_init(void)
static void __exit ccp_mod_exit(void)
{
#ifdef CONFIG_X86
- struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
-
- switch (cpuinfo->x86) {
- case 22:
- ccp_pci_exit();
- break;
- }
+ ccp_pci_exit();
#endif
#ifdef CONFIG_ARM64
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 6ff89031fb96..7745d0be491d 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -23,6 +23,7 @@
#include <linux/hw_random.h>
#include <linux/bitops.h>
+#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
#define MAX_HW_QUEUES 5
@@ -140,6 +141,29 @@
#define CCP_ECC_RESULT_OFFSET 60
#define CCP_ECC_RESULT_SUCCESS 0x0001
+struct ccp_op;
+
+/* Structure for computation functions that are device-specific */
+struct ccp_actions {
+ int (*perform_aes)(struct ccp_op *);
+ int (*perform_xts_aes)(struct ccp_op *);
+ int (*perform_sha)(struct ccp_op *);
+ int (*perform_rsa)(struct ccp_op *);
+ int (*perform_passthru)(struct ccp_op *);
+ int (*perform_ecc)(struct ccp_op *);
+ int (*init)(struct ccp_device *);
+ void (*destroy)(struct ccp_device *);
+ irqreturn_t (*irqhandler)(int, void *);
+};
+
+/* Structure to hold CCP version-specific values */
+struct ccp_vdata {
+ unsigned int version;
+ struct ccp_actions *perform;
+};
+
+extern struct ccp_vdata ccpv3;
+
struct ccp_device;
struct ccp_cmd;
@@ -184,6 +208,13 @@ struct ccp_cmd_queue {
} ____cacheline_aligned;
struct ccp_device {
+ struct list_head entry;
+
+ struct ccp_vdata *vdata;
+ unsigned int ord;
+ char name[MAX_CCP_NAME_LEN];
+ char rngname[MAX_CCP_NAME_LEN];
+
struct device *dev;
/*
@@ -258,18 +289,132 @@ struct ccp_device {
unsigned int axcache;
};
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_KSB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE__LAST,
+};
+
+struct ccp_dma_info {
+ dma_addr_t address;
+ unsigned int offset;
+ unsigned int length;
+ enum dma_data_direction dir;
+};
+
+struct ccp_dm_workarea {
+ struct device *dev;
+ struct dma_pool *dma_pool;
+ unsigned int length;
+
+ u8 *address;
+ struct ccp_dma_info dma;
+};
+
+struct ccp_sg_workarea {
+ struct scatterlist *sg;
+ int nents;
+
+ struct scatterlist *dma_sg;
+ struct device *dma_dev;
+ unsigned int dma_count;
+ enum dma_data_direction dma_dir;
+
+ unsigned int sg_used;
+
+ u64 bytes_left;
+};
+
+struct ccp_data {
+ struct ccp_sg_workarea sg_wa;
+ struct ccp_dm_workarea dm_wa;
+};
+
+struct ccp_mem {
+ enum ccp_memtype type;
+ union {
+ struct ccp_dma_info dma;
+ u32 ksb;
+ } u;
+};
+
+struct ccp_aes_op {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+};
+
+struct ccp_xts_aes_op {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+};
+
+struct ccp_sha_op {
+ enum ccp_sha_type type;
+ u64 msg_bits;
+};
+
+struct ccp_rsa_op {
+ u32 mod_size;
+ u32 input_len;
+};
+
+struct ccp_passthru_op {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+};
+
+struct ccp_ecc_op {
+ enum ccp_ecc_function function;
+};
+
+struct ccp_op {
+ struct ccp_cmd_queue *cmd_q;
+
+ u32 jobid;
+ u32 ioc;
+ u32 soc;
+ u32 ksb_key;
+ u32 ksb_ctx;
+ u32 init;
+ u32 eom;
+
+ struct ccp_mem src;
+ struct ccp_mem dst;
+
+ union {
+ struct ccp_aes_op aes;
+ struct ccp_xts_aes_op xts;
+ struct ccp_sha_op sha;
+ struct ccp_rsa_op rsa;
+ struct ccp_passthru_op passthru;
+ struct ccp_ecc_op ecc;
+ } u;
+};
+
+static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
+{
+ return lower_32_bits(info->address + info->offset);
+}
+
+static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
+{
+ return upper_32_bits(info->address + info->offset) & 0x0000ffff;
+}
+
int ccp_pci_init(void);
void ccp_pci_exit(void);
int ccp_platform_init(void);
void ccp_platform_exit(void);
+void ccp_add_device(struct ccp_device *ccp);
+void ccp_del_device(struct ccp_device *ccp);
+
struct ccp_device *ccp_alloc_struct(struct device *dev);
-int ccp_init(struct ccp_device *ccp);
-void ccp_destroy(struct ccp_device *ccp);
bool ccp_queues_suspended(struct ccp_device *ccp);
-
-irqreturn_t ccp_irq_handler(int irq, void *data);
+int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 6613aee79b87..eefdf595f758 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -13,124 +13,12 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/kthread.h>
-#include <linux/sched.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/ccp.h>
-#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
+#include <linux/ccp.h>
#include "ccp-dev.h"
-enum ccp_memtype {
- CCP_MEMTYPE_SYSTEM = 0,
- CCP_MEMTYPE_KSB,
- CCP_MEMTYPE_LOCAL,
- CCP_MEMTYPE__LAST,
-};
-
-struct ccp_dma_info {
- dma_addr_t address;
- unsigned int offset;
- unsigned int length;
- enum dma_data_direction dir;
-};
-
-struct ccp_dm_workarea {
- struct device *dev;
- struct dma_pool *dma_pool;
- unsigned int length;
-
- u8 *address;
- struct ccp_dma_info dma;
-};
-
-struct ccp_sg_workarea {
- struct scatterlist *sg;
- int nents;
-
- struct scatterlist *dma_sg;
- struct device *dma_dev;
- unsigned int dma_count;
- enum dma_data_direction dma_dir;
-
- unsigned int sg_used;
-
- u64 bytes_left;
-};
-
-struct ccp_data {
- struct ccp_sg_workarea sg_wa;
- struct ccp_dm_workarea dm_wa;
-};
-
-struct ccp_mem {
- enum ccp_memtype type;
- union {
- struct ccp_dma_info dma;
- u32 ksb;
- } u;
-};
-
-struct ccp_aes_op {
- enum ccp_aes_type type;
- enum ccp_aes_mode mode;
- enum ccp_aes_action action;
-};
-
-struct ccp_xts_aes_op {
- enum ccp_aes_action action;
- enum ccp_xts_aes_unit_size unit_size;
-};
-
-struct ccp_sha_op {
- enum ccp_sha_type type;
- u64 msg_bits;
-};
-
-struct ccp_rsa_op {
- u32 mod_size;
- u32 input_len;
-};
-
-struct ccp_passthru_op {
- enum ccp_passthru_bitwise bit_mod;
- enum ccp_passthru_byteswap byte_swap;
-};
-
-struct ccp_ecc_op {
- enum ccp_ecc_function function;
-};
-
-struct ccp_op {
- struct ccp_cmd_queue *cmd_q;
-
- u32 jobid;
- u32 ioc;
- u32 soc;
- u32 ksb_key;
- u32 ksb_ctx;
- u32 init;
- u32 eom;
-
- struct ccp_mem src;
- struct ccp_mem dst;
-
- union {
- struct ccp_aes_op aes;
- struct ccp_xts_aes_op xts;
- struct ccp_sha_op sha;
- struct ccp_rsa_op rsa;
- struct ccp_passthru_op passthru;
- struct ccp_ecc_op ecc;
- } u;
-};
-
/* SHA initial context values */
static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
@@ -152,253 +40,6 @@ static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
};
-static u32 ccp_addr_lo(struct ccp_dma_info *info)
-{
- return lower_32_bits(info->address + info->offset);
-}
-
-static u32 ccp_addr_hi(struct ccp_dma_info *info)
-{
- return upper_32_bits(info->address + info->offset) & 0x0000ffff;
-}
-
-static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
-{
- struct ccp_cmd_queue *cmd_q = op->cmd_q;
- struct ccp_device *ccp = cmd_q->ccp;
- void __iomem *cr_addr;
- u32 cr0, cmd;
- unsigned int i;
- int ret = 0;
-
- /* We could read a status register to see how many free slots
- * are actually available, but reading that register resets it
- * and you could lose some error information.
- */
- cmd_q->free_slots--;
-
- cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
- | (op->jobid << REQ0_JOBID_SHIFT)
- | REQ0_WAIT_FOR_WRITE;
-
- if (op->soc)
- cr0 |= REQ0_STOP_ON_COMPLETE
- | REQ0_INT_ON_COMPLETE;
-
- if (op->ioc || !cmd_q->free_slots)
- cr0 |= REQ0_INT_ON_COMPLETE;
-
- /* Start at CMD_REQ1 */
- cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
-
- mutex_lock(&ccp->req_mutex);
-
- /* Write CMD_REQ1 through CMD_REQx first */
- for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
- iowrite32(*(cr + i), cr_addr);
-
- /* Tell the CCP to start */
- wmb();
- iowrite32(cr0, ccp->io_regs + CMD_REQ0);
-
- mutex_unlock(&ccp->req_mutex);
-
- if (cr0 & REQ0_INT_ON_COMPLETE) {
- /* Wait for the job to complete */
- ret = wait_event_interruptible(cmd_q->int_queue,
- cmd_q->int_rcvd);
- if (ret || cmd_q->cmd_error) {
- /* On error delete all related jobs from the queue */
- cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
-
- if (!ret)
- ret = -EIO;
- } else if (op->soc) {
- /* Delete just head job from the queue on SoC */
- cmd = DEL_Q_ACTIVE
- | (cmd_q->id << DEL_Q_ID_SHIFT)
- | op->jobid;
-
- iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
- }
-
- cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
-
- cmd_q->int_rcvd = 0;
- }
-
- return ret;
-}
-
-static int ccp_perform_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
- | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
- | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
- | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->u.aes.mode == CCP_AES_MODE_CFB)
- cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_xts_aes(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
- | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
- | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- if (op->init)
- cr[0] |= REQ1_INIT;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_sha(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
- | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
- | REQ1_INIT;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->eom) {
- cr[0] |= REQ1_EOM;
- cr[4] = lower_32_bits(op->u.sha.msg_bits);
- cr[5] = upper_32_bits(op->u.sha.msg_bits);
- } else {
- cr[4] = 0;
- cr[5] = 0;
- }
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_rsa(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
- | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
- | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
- | REQ1_EOM;
- cr[1] = op->u.rsa.input_len - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
- | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_passthru(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
- | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
- | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM)
- cr[1] = op->src.u.dma.length - 1;
- else
- cr[1] = op->dst.u.dma.length - 1;
-
- if (op->src.type == CCP_MEMTYPE_SYSTEM) {
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
-
- if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
- cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
- } else {
- cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
- cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
- }
-
- if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
- } else {
- cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
- cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
- }
-
- if (op->eom)
- cr[0] |= REQ1_EOM;
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
-static int ccp_perform_ecc(struct ccp_op *op)
-{
- u32 cr[6];
-
- /* Fill out the register contents for REQ1 through REQ6 */
- cr[0] = REQ1_ECC_AFFINE_CONVERT
- | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
- | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
- | REQ1_EOM;
- cr[1] = op->src.u.dma.length - 1;
- cr[2] = ccp_addr_lo(&op->src.u.dma);
- cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->src.u.dma);
- cr[4] = ccp_addr_lo(&op->dst.u.dma);
- cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
- | ccp_addr_hi(&op->dst.u.dma);
-
- return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
-}
-
static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
{
int start;
@@ -837,7 +478,7 @@ static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
op.u.passthru.byte_swap = byte_swap;
- return ccp_perform_passthru(&op);
+ return cmd_q->ccp->vdata->perform->perform_passthru(&op);
}
static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
@@ -969,7 +610,7 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
}
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_src;
@@ -1131,7 +772,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.soc = 1;
}
- ret = ccp_perform_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1296,7 +937,7 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
if (!src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_xts_aes(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_xts_aes(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1453,7 +1094,7 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
if (sha->final && !src.sg_wa.bytes_left)
op.eom = 1;
- ret = ccp_perform_sha(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_sha(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_data;
@@ -1633,7 +1274,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.rsa.mod_size = rsa->key_size;
op.u.rsa.input_len = i_len;
- ret = ccp_perform_rsa(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_rsa(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1758,7 +1399,7 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
op.dst.u.dma.offset = dst.sg_wa.sg_used;
op.dst.u.dma.length = op.src.u.dma.length;
- ret = ccp_perform_passthru(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -1870,7 +1511,7 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
@@ -2034,7 +1675,7 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.ecc.function = cmd->u.ecc.function;
- ret = ccp_perform_ecc(&op);
+ ret = cmd_q->ccp->vdata->perform->perform_ecc(&op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 7690467c42f8..0bf262e36b6b 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -59,9 +59,11 @@ static int ccp_get_msix_irqs(struct ccp_device *ccp)
ccp_pci->msix_count = ret;
for (v = 0; v < ccp_pci->msix_count; v++) {
/* Set the interrupt names and request the irqs */
- snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
+ snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
+ ccp->name, v);
ccp_pci->msix[v].vector = msix_entry[v].vector;
- ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
+ ret = request_irq(ccp_pci->msix[v].vector,
+ ccp->vdata->perform->irqhandler,
0, ccp_pci->msix[v].name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
@@ -94,7 +96,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
return ret;
ccp->irq = pdev->irq;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
goto e_msi;
@@ -179,6 +182,12 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err;
ccp->dev_specific = ccp_pci;
+ ccp->vdata = (struct ccp_vdata *)id->driver_data;
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -221,7 +230,7 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_iomap;
@@ -251,7 +260,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
if (!ccp)
return;
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
@@ -312,7 +321,7 @@ static int ccp_pci_resume(struct pci_dev *pdev)
#endif
static const struct pci_device_id ccp_pci_table[] = {
- { PCI_VDEVICE(AMD, 0x1537), },
+ { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index 66dd7c9d08c3..351f28d8c336 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -1,7 +1,7 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
- * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ * Copyright (C) 2014,2016 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*
@@ -32,6 +32,33 @@ struct ccp_platform {
int coherent;
};
+static const struct acpi_device_id ccp_acpi_match[];
+static const struct of_device_id ccp_of_match[];
+
+static struct ccp_vdata *ccp_get_of_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
+
+ match = of_match_node(ccp_of_match, pdev->dev.of_node);
+ if (match && match->data)
+ return (struct ccp_vdata *)match->data;
+#endif
+ return 0;
+}
+
+static struct ccp_vdata *ccp_get_acpi_version(struct platform_device *pdev)
+{
+#ifdef CONFIG_ACPI
+ const struct acpi_device_id *match;
+
+ match = acpi_match_device(ccp_acpi_match, &pdev->dev);
+ if (match && match->driver_data)
+ return (struct ccp_vdata *)match->driver_data;
+#endif
+ return 0;
+}
+
static int ccp_get_irq(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
@@ -43,7 +70,8 @@ static int ccp_get_irq(struct ccp_device *ccp)
return ret;
ccp->irq = ret;
- ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+ ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
+ ccp->name, dev);
if (ret) {
dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
return ret;
@@ -106,6 +134,13 @@ static int ccp_platform_probe(struct platform_device *pdev)
goto e_err;
ccp->dev_specific = ccp_platform;
+ ccp->vdata = pdev->dev.of_node ? ccp_get_of_version(pdev)
+ : ccp_get_acpi_version(pdev);
+ if (!ccp->vdata || !ccp->vdata->version) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
@@ -137,7 +172,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ccp);
- ret = ccp_init(ccp);
+ ret = ccp->vdata->perform->init(ccp);
if (ret)
goto e_err;
@@ -155,7 +190,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
- ccp_destroy(ccp);
+ ccp->vdata->perform->destroy(ccp);
dev_notice(dev, "disabled\n");
@@ -214,7 +249,7 @@ static int ccp_platform_resume(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id ccp_acpi_match[] = {
- { "AMDI0C00", 0 },
+ { "AMDI0C00", (kernel_ulong_t)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
@@ -222,7 +257,8 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a" },
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&ccpv3 },
{ },
};
MODULE_DEVICE_TABLE(of, ccp_of_match);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index e52496a172d0..2296934455fc 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt,
BUG_ON(ivsize && !req->iv);
memcpy(crypt->iv, req->iv, ivsize);
+ buf = chainup_buffers(dev, req->src, crypt->auth_len,
+ &src_hook, flags, src_direction);
+ req_ctx->src = src_hook.next;
+ crypt->src_buf = src_hook.phys_next;
+ if (!buf)
+ goto free_buf_src;
+
+ lastlen = buf->buf_len;
+ if (lastlen >= authsize)
+ crypt->icv_rev_aes = buf->phys_addr +
+ buf->buf_len - authsize;
+
req_ctx->dst = NULL;
if (req->src != req->dst) {
@@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt,
}
}
- buf = chainup_buffers(dev, req->src, crypt->auth_len,
- &src_hook, flags, src_direction);
- req_ctx->src = src_hook.next;
- crypt->src_buf = src_hook.phys_next;
- if (!buf)
- goto free_buf_src;
-
- if (!encrypt || !req_ctx->dst) {
- lastlen = buf->buf_len;
- if (lastlen >= authsize)
- crypt->icv_rev_aes = buf->phys_addr +
- buf->buf_len - authsize;
- }
-
if (unlikely(lastlen < authsize)) {
/* The 12 hmac bytes are scattered,
* we need to copy them into a safe buffer */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index c0656e7f37b5..80239ae69527 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
cesa->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cesa->regs))
- return -ENOMEM;
+ return PTR_ERR(cesa->regs);
ret = mv_cesa_dev_dma_init(cesa);
if (ret)
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index bd985e72520b..74071e45ada0 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -588,6 +588,7 @@ struct mv_cesa_ahash_dma_req {
struct mv_cesa_tdma_req base;
u8 *padding;
dma_addr_t padding_dma;
+ u8 *cache;
dma_addr_t cache_dma;
};
@@ -609,7 +610,7 @@ struct mv_cesa_ahash_req {
struct mv_cesa_ahash_std_req std;
} req;
struct mv_cesa_op_ctx op_tmpl;
- u8 *cache;
+ u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
unsigned int cache_ptr;
u64 len;
int src_nents;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 683cca9ac3c4..7ca2e0f9dc2e 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -45,69 +45,25 @@ mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
return mv_cesa_req_dma_iter_next_op(&iter->base);
}
-static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
- gfp_t flags)
+static inline int
+mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
{
- struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;
-
- creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
- &dreq->cache_dma);
- if (!creq->cache)
- return -ENOMEM;
-
- return 0;
-}
-
-static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
- gfp_t flags)
-{
- creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
- if (!creq->cache)
+ req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
+ &req->cache_dma);
+ if (!req->cache)
return -ENOMEM;
return 0;
}
-static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
-{
- struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int ret;
-
- if (creq->cache)
- return 0;
-
- if (creq->req.base.type == CESA_DMA_REQ)
- ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
- else
- ret = mv_cesa_ahash_std_alloc_cache(creq, flags);
-
- return ret;
-}
-
-static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
-{
- dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
- creq->req.dma.cache_dma);
-}
-
-static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
-{
- kfree(creq->cache);
-}
-
-static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
+static inline void
+mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
{
- if (!creq->cache)
+ if (!req->cache)
return;
- if (creq->req.base.type == CESA_DMA_REQ)
- mv_cesa_ahash_dma_free_cache(creq);
- else
- mv_cesa_ahash_std_free_cache(creq);
-
- creq->cache = NULL;
+ dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
+ req->cache_dma);
}
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
@@ -146,6 +102,7 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
+ mv_cesa_ahash_dma_free_cache(&creq->req.dma);
mv_cesa_dma_cleanup(&creq->req.dma.base);
}
@@ -161,8 +118,6 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- mv_cesa_ahash_free_cache(creq);
-
if (creq->req.base.type == CESA_DMA_REQ)
mv_cesa_ahash_dma_last_cleanup(req);
}
@@ -445,14 +400,6 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
- int ret;
-
- if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
- !creq->last_req) {
- ret = mv_cesa_ahash_alloc_cache(req);
- if (ret)
- return ret;
- }
if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
*cached = true;
@@ -505,10 +452,17 @@ mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
gfp_t flags)
{
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
+ int ret;
if (!creq->cache_ptr)
return 0;
+ ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
+ if (ret)
+ return ret;
+
+ memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
+
return mv_cesa_dma_add_data_transfer(chain,
CESA_SA_DATA_SRAM_OFFSET,
ahashdreq->cache_dma,
@@ -848,10 +802,6 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
if (!cache_ptr)
return 0;
- ret = mv_cesa_ahash_alloc_cache(req);
- if (ret)
- return ret;
-
memcpy(creq->cache, cache, cache_ptr);
creq->cache_ptr = cache_ptr;
@@ -860,9 +810,14 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
static int mv_cesa_md5_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
+ creq->state[0] = MD5_H0;
+ creq->state[1] = MD5_H1;
+ creq->state[2] = MD5_H2;
+ creq->state[3] = MD5_H3;
mv_cesa_ahash_init(req, &tmpl, true);
@@ -923,9 +878,15 @@ struct ahash_alg mv_md5_alg = {
static int mv_cesa_sha1_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
+ creq->state[0] = SHA1_H0;
+ creq->state[1] = SHA1_H1;
+ creq->state[2] = SHA1_H2;
+ creq->state[3] = SHA1_H3;
+ creq->state[4] = SHA1_H4;
mv_cesa_ahash_init(req, &tmpl, false);
@@ -986,9 +947,18 @@ struct ahash_alg mv_sha1_alg = {
static int mv_cesa_sha256_init(struct ahash_request *req)
{
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_op_ctx tmpl = { };
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
+ creq->state[0] = SHA256_H0;
+ creq->state[1] = SHA256_H1;
+ creq->state[2] = SHA256_H2;
+ creq->state[3] = SHA256_H3;
+ creq->state[4] = SHA256_H4;
+ creq->state[5] = SHA256_H5;
+ creq->state[6] = SHA256_H6;
+ creq->state[7] = SHA256_H7;
mv_cesa_ahash_init(req, &tmpl, false);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 046c1c45411b..d94e25df503b 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -308,7 +308,7 @@ int nx842_crypto_compress(struct crypto_tfm *tfm,
h = !n && add_header ? hdrsize : 0;
if (ignore)
- pr_warn("interal error, ignore is set %x\n", ignore);
+ pr_warn("internal error, ignore is set %x\n", ignore);
ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
if (ret)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index dd355bd19474..d420ec751c7c 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -152,13 +153,10 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- spinlock_t lock;
- struct crypto_queue queue;
-
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
struct ablkcipher_request *req;
+ struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
@@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
pr_debug("err: %d\n", err);
- dd->flags &= ~FLAGS_BUSY;
-
- req->base.complete(&req->base, err);
+ crypto_finalize_request(dd->engine, req, err);
}
static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
}
static int omap_aes_handle_queue(struct omap_aes_dev *dd,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_aes_ctx *ctx;
- struct omap_aes_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0, len;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ return crypto_transfer_request_to_engine(dd->engine, req);
- if (!async_req)
- return ret;
+ return 0;
+}
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+ struct omap_aes_reqctx *rctx;
+ int len;
- req = ablkcipher_request_cast(async_req);
+ if (!dd)
+ return -ENODEV;
/* assign new request to device */
dd->req = req;
@@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->ctx = ctx;
ctx->dd = dd;
- err = omap_aes_write_ctrl(dd);
- if (!err)
- err = omap_aes_crypt_dma_start(dd);
- if (err) {
- /* aes_task will not finish it, so do it here */
- omap_aes_finish_req(dd, err);
- tasklet_schedule(&dd->queue_task);
- }
+ return omap_aes_write_ctrl(dd);
+}
- return ret; /* return ret, which is enqueue return value */
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+
+ if (!dd)
+ return -ENODEV;
+
+ return omap_aes_crypt_dma_start(dd);
}
static void omap_aes_done_task(unsigned long data)
@@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data)
}
omap_aes_finish_req(dd, 0);
- omap_aes_handle_queue(dd, NULL);
pr_debug("exit\n");
}
-static void omap_aes_queue_task(unsigned long data)
-{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-
- omap_aes_handle_queue(dd, NULL);
-}
-
static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
-
err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
omap_aes_get_res_pdev(dd, pdev, &res);
if (err)
@@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
@@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev)
}
}
+ /* Initialize crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine)
+ goto err_algs;
+
+ dd->engine->prepare_request = omap_aes_prepare_req;
+ dd->engine->crypt_one_request = omap_aes_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
return 0;
+err_engine:
+ crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
@@ -1260,7 +1252,6 @@ err_algs:
omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
pm_runtime_disable(dev);
err_res:
dd = NULL;
@@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
+ crypto_engine_exit(dd->engine);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
dd = NULL;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index f96d427e502c..5a07208ce778 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -55,8 +55,8 @@
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c62x"
-#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index e78a1d7d88fc..b40d9c8dad96 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
- adf_dev_restore(accel_dev);
if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
index ef5988afd4c6..b5484bfa6996 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_user.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_user.h
@@ -58,7 +58,7 @@ struct adf_user_cfg_key_val {
uint64_t padding3;
};
enum adf_cfg_val_type type;
-};
+} __packed;
struct adf_user_cfg_section {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
@@ -70,7 +70,7 @@ struct adf_user_cfg_section {
struct adf_user_cfg_section *next;
uint64_t padding3;
};
-};
+} __packed;
struct adf_user_cfg_ctl_data {
union {
@@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data {
uint64_t padding;
};
uint8_t device_id;
-};
+} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
index f267d9e42e0b..d7dd18d9bef8 100644
--- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -49,7 +49,6 @@
#include "adf_transport_internal.h"
#define ADF_ARB_NUM 4
-#define ADF_ARB_REQ_RING_NUM 8
#define ADF_ARB_REG_SIZE 0x4
#define ADF_ARB_WTR_SIZE 0x20
#define ADF_ARB_OFFSET 0x30000
@@ -64,15 +63,6 @@
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
(ADF_ARB_REG_SLOT * index), value)
-#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
- ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
- ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
- (ADF_ARB_REG_SIZE * index), value)
-
#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
(ADF_ARB_REG_SIZE * index), value)
@@ -99,15 +89,6 @@ int adf_init_arb(struct adf_accel_dev *accel_dev)
for (arb = 0; arb < ADF_ARB_NUM; arb++)
WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
- /* Setup service weighting */
- for (arb = 0; arb < ADF_ARB_NUM; arb++)
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
-
- /* Setup ring response ordering */
- for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
- WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
-
/* Setup worker queue registers */
for (i = 0; i < hw_data->num_engines; i++)
WRITE_CSR_ARB_WQCFG(csr, i, i);
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
index d97db990955d..5d1ee7e53492 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
@@ -112,27 +112,27 @@ enum icp_qat_uof_mem_region {
};
enum icp_qat_uof_regtype {
- ICP_NO_DEST,
- ICP_GPA_REL,
- ICP_GPA_ABS,
- ICP_GPB_REL,
- ICP_GPB_ABS,
- ICP_SR_REL,
- ICP_SR_RD_REL,
- ICP_SR_WR_REL,
- ICP_SR_ABS,
- ICP_SR_RD_ABS,
- ICP_SR_WR_ABS,
- ICP_DR_REL,
- ICP_DR_RD_REL,
- ICP_DR_WR_REL,
- ICP_DR_ABS,
- ICP_DR_RD_ABS,
- ICP_DR_WR_ABS,
- ICP_LMEM,
- ICP_LMEM0,
- ICP_LMEM1,
- ICP_NEIGH_REL,
+ ICP_NO_DEST = 0,
+ ICP_GPA_REL = 1,
+ ICP_GPA_ABS = 2,
+ ICP_GPB_REL = 3,
+ ICP_GPB_ABS = 4,
+ ICP_SR_REL = 5,
+ ICP_SR_RD_REL = 6,
+ ICP_SR_WR_REL = 7,
+ ICP_SR_ABS = 8,
+ ICP_SR_RD_ABS = 9,
+ ICP_SR_WR_ABS = 10,
+ ICP_DR_REL = 19,
+ ICP_DR_RD_REL = 20,
+ ICP_DR_WR_REL = 21,
+ ICP_DR_ABS = 22,
+ ICP_DR_RD_ABS = 23,
+ ICP_DR_WR_ABS = 24,
+ ICP_LMEM = 26,
+ ICP_LMEM0 = 27,
+ ICP_LMEM1 = 28,
+ ICP_NEIGH_REL = 31,
};
enum icp_qat_css_fwtype {
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 59e4c3af15ed..1e8852a8a057 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm,
if (IS_ERR(ctx->hash_tfm))
return PTR_ERR(ctx->hash_tfm);
ctx->qat_hash_alg = hash;
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
- sizeof(struct qat_crypto_request));
+ crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
@@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
spin_lock_init(&ctx->lock);
- tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
- sizeof(struct qat_crypto_request);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
ctx->tfm = tfm;
return 0;
}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 51c594fdacdc..e5c0727d4876 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -340,14 +340,16 @@ static int qat_rsa_enc(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.enc.m);
- else
- if (!dma_mapping_error(dev, qat_req->in.enc.m))
- dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -356,15 +358,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.enc.c))
dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.enc.m);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.enc.m))
+ dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
@@ -472,14 +473,16 @@ static int qat_rsa_dec(struct akcipher_request *req)
if (!ret)
return -EINPROGRESS;
-unmap_src:
- if (qat_req->src_align)
- dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
- qat_req->in.dec.c);
- else
- if (!dma_mapping_error(dev, qat_req->in.dec.c))
- dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
- DMA_TO_DEVICE);
+
+ if (!dma_mapping_error(dev, qat_req->phy_out))
+ dma_unmap_single(dev, qat_req->phy_out,
+ sizeof(struct qat_rsa_output_params),
+ DMA_TO_DEVICE);
+unmap_in_params:
+ if (!dma_mapping_error(dev, qat_req->phy_in))
+ dma_unmap_single(dev, qat_req->phy_in,
+ sizeof(struct qat_rsa_input_params),
+ DMA_TO_DEVICE);
unmap_dst:
if (qat_req->dst_align)
dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
@@ -488,15 +491,14 @@ unmap_dst:
if (!dma_mapping_error(dev, qat_req->out.dec.m))
dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
DMA_FROM_DEVICE);
-unmap_in_params:
- if (!dma_mapping_error(dev, qat_req->phy_in))
- dma_unmap_single(dev, qat_req->phy_in,
- sizeof(struct qat_rsa_input_params),
- DMA_TO_DEVICE);
- if (!dma_mapping_error(dev, qat_req->phy_out))
- dma_unmap_single(dev, qat_req->phy_out,
- sizeof(struct qat_rsa_output_params),
- DMA_TO_DEVICE);
+unmap_src:
+ if (qat_req->src_align)
+ dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+ qat_req->in.dec.c);
+ else
+ if (!dma_mapping_error(dev, qat_req->in.dec.c))
+ dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+ DMA_TO_DEVICE);
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 25d15f19c2b3..9b961b37a282 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -688,7 +688,7 @@ static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
int mflag = 0;
struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
- for (ae = 0; ae <= max_ae; ae++) {
+ for (ae = 0; ae < max_ae; ae++) {
if (!test_bit(ae,
(unsigned long *)&handle->hal_handle->ae_mask))
continue;
diff --git a/drivers/crypto/rockchip/Makefile b/drivers/crypto/rockchip/Makefile
index 7051c6c715f3..30f91297b4b6 100644
--- a/drivers/crypto/rockchip/Makefile
+++ b/drivers/crypto/rockchip/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rk_crypto.o
rk_crypto-objs := rk3288_crypto.o \
rk3288_crypto_ablkcipher.o \
+ rk3288_crypto_ahash.o
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index da9c73dce4af..af508258d2ea 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -208,6 +208,8 @@ static void rk_crypto_tasklet_cb(unsigned long data)
if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
dev->ablk_req = ablkcipher_request_cast(async_req);
+ else
+ dev->ahash_req = ahash_request_cast(async_req);
err = dev->start(dev);
if (err)
dev->complete(dev, err);
@@ -220,6 +222,9 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_cbc_des_alg,
&rk_ecb_des3_ede_alg,
&rk_cbc_des3_ede_alg,
+ &rk_ahash_sha1,
+ &rk_ahash_sha256,
+ &rk_ahash_md5,
};
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
@@ -229,15 +234,24 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
- err = crypto_register_alg(&rk_cipher_algs[i]->alg);
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ err = crypto_register_alg(
+ &rk_cipher_algs[i]->alg.crypto);
+ else
+ err = crypto_register_ahash(
+ &rk_cipher_algs[i]->alg.hash);
if (err)
goto err_cipher_algs;
}
return 0;
err_cipher_algs:
- for (k = 0; k < i; k++)
- crypto_unregister_alg(&rk_cipher_algs[k]->alg);
+ for (k = 0; k < i; k++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
return err;
}
@@ -245,8 +259,12 @@ static void rk_crypto_unregister(void)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++)
- crypto_unregister_alg(&rk_cipher_algs[i]->alg);
+ for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
+ if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
+ crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
+ else
+ crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ }
}
static void rk_crypto_action(void *data)
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index e499c2c6c903..d7b71fea320b 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -6,6 +6,10 @@
#include <crypto/algapi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <crypto/internal/hash.h>
+
+#include <crypto/md5.h>
+#include <crypto/sha.h>
#define _SBF(v, f) ((v) << (f))
@@ -149,6 +153,28 @@
#define RK_CRYPTO_TDES_KEY3_0 0x0130
#define RK_CRYPTO_TDES_KEY3_1 0x0134
+/* HASH */
+#define RK_CRYPTO_HASH_CTRL 0x0180
+#define RK_CRYPTO_HASH_SWAP_DO BIT(3)
+#define RK_CRYPTO_HASH_SWAP_DI BIT(2)
+#define RK_CRYPTO_HASH_SHA1 _SBF(0x00, 0)
+#define RK_CRYPTO_HASH_MD5 _SBF(0x01, 0)
+#define RK_CRYPTO_HASH_SHA256 _SBF(0x02, 0)
+#define RK_CRYPTO_HASH_PRNG _SBF(0x03, 0)
+
+#define RK_CRYPTO_HASH_STS 0x0184
+#define RK_CRYPTO_HASH_DONE BIT(0)
+
+#define RK_CRYPTO_HASH_MSG_LEN 0x0188
+#define RK_CRYPTO_HASH_DOUT_0 0x018c
+#define RK_CRYPTO_HASH_DOUT_1 0x0190
+#define RK_CRYPTO_HASH_DOUT_2 0x0194
+#define RK_CRYPTO_HASH_DOUT_3 0x0198
+#define RK_CRYPTO_HASH_DOUT_4 0x019c
+#define RK_CRYPTO_HASH_DOUT_5 0x01a0
+#define RK_CRYPTO_HASH_DOUT_6 0x01a4
+#define RK_CRYPTO_HASH_DOUT_7 0x01a8
+
#define CRYPTO_READ(dev, offset) \
readl_relaxed(((dev)->reg + (offset)))
#define CRYPTO_WRITE(dev, offset, val) \
@@ -166,6 +192,7 @@ struct rk_crypto_info {
struct crypto_queue queue;
struct tasklet_struct crypto_tasklet;
struct ablkcipher_request *ablk_req;
+ struct ahash_request *ahash_req;
/* device lock */
spinlock_t lock;
@@ -195,15 +222,36 @@ struct rk_crypto_info {
void (*unload_data)(struct rk_crypto_info *dev);
};
+/* the private variable of hash */
+struct rk_ahash_ctx {
+ struct rk_crypto_info *dev;
+ /* for fallback */
+ struct crypto_ahash *fallback_tfm;
+};
+
+/* the privete variable of hash for fallback */
+struct rk_ahash_rctx {
+ struct ahash_request fallback_req;
+};
+
/* the private variable of cipher */
struct rk_cipher_ctx {
struct rk_crypto_info *dev;
unsigned int keylen;
};
+enum alg_type {
+ ALG_TYPE_HASH,
+ ALG_TYPE_CIPHER,
+};
+
struct rk_crypto_tmp {
- struct rk_crypto_info *dev;
- struct crypto_alg alg;
+ struct rk_crypto_info *dev;
+ union {
+ struct crypto_alg crypto;
+ struct ahash_alg hash;
+ } alg;
+ enum alg_type type;
};
extern struct rk_crypto_tmp rk_ecb_aes_alg;
@@ -213,4 +261,8 @@ extern struct rk_crypto_tmp rk_cbc_des_alg;
extern struct rk_crypto_tmp rk_ecb_des3_ede_alg;
extern struct rk_crypto_tmp rk_cbc_des3_ede_alg;
+extern struct rk_crypto_tmp rk_ahash_sha1;
+extern struct rk_crypto_tmp rk_ahash_sha256;
+extern struct rk_crypto_tmp rk_ahash_md5;
+
#endif
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index d98b681f6c06..b5a3afe222e4 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -336,7 +336,7 @@ static int rk_ablk_cra_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct rk_crypto_tmp *algt;
- algt = container_of(alg, struct rk_crypto_tmp, alg);
+ algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
ctx->dev = algt->dev;
ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
@@ -357,7 +357,8 @@ static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
}
struct rk_crypto_tmp rk_ecb_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-rk",
.cra_priority = 300,
@@ -381,7 +382,8 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-rk",
.cra_priority = 300,
@@ -406,7 +408,8 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
};
struct rk_crypto_tmp rk_ecb_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-rk",
.cra_priority = 300,
@@ -430,7 +433,8 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
};
struct rk_crypto_tmp rk_cbc_des_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-rk",
.cra_priority = 300,
@@ -455,7 +459,8 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-ede-rk",
.cra_priority = 300,
@@ -480,7 +485,8 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
- .alg = {
+ .type = ALG_TYPE_CIPHER,
+ .alg.crypto = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-ede-rk",
.cra_priority = 300,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
new file mode 100644
index 000000000000..718588219f75
--- /dev/null
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -0,0 +1,404 @@
+/*
+ * Crypto acceleration support for Rockchip RK3288
+ *
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Zain Wang <zain.wang@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
+ */
+#include "rk3288_crypto.h"
+
+/*
+ * IC can not process zero message hash,
+ * so we put the fixed hash out when met zero message.
+ */
+
+static int zero_message_process(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int rk_digest_size = crypto_ahash_digestsize(tfm);
+
+ switch (rk_digest_size) {
+ case SHA1_DIGEST_SIZE:
+ memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
+ break;
+ case SHA256_DIGEST_SIZE:
+ memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
+ break;
+ case MD5_DIGEST_SIZE:
+ memcpy(req->result, md5_zero_message_hash, rk_digest_size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rk_ahash_crypto_complete(struct rk_crypto_info *dev, int err)
+{
+ if (dev->ahash_req->base.complete)
+ dev->ahash_req->base.complete(&dev->ahash_req->base, err);
+}
+
+static void rk_ahash_reg_init(struct rk_crypto_info *dev)
+{
+ int reg_status = 0;
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
+ RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
+ reg_status &= (~RK_CRYPTO_HASH_FLUSH);
+ reg_status |= _SBF(0xffff, 16);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
+
+ memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
+ RK_CRYPTO_HRDMA_DONE_ENA);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
+ RK_CRYPTO_HRDMA_DONE_INT);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, dev->mode |
+ RK_CRYPTO_HASH_SWAP_DO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
+ RK_CRYPTO_BYTESWAP_BRFIFO |
+ RK_CRYPTO_BYTESWAP_BTFIFO);
+
+ CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
+}
+
+static int rk_ahash_init(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_init(&rctx->fallback_req);
+}
+
+static int rk_ahash_update(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+
+ return crypto_ahash_update(&rctx->fallback_req);
+}
+
+static int rk_ahash_final(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_final(&rctx->fallback_req);
+}
+
+static int rk_ahash_finup(struct ahash_request *req)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rctx->fallback_req.nbytes = req->nbytes;
+ rctx->fallback_req.src = req->src;
+ rctx->fallback_req.result = req->result;
+
+ return crypto_ahash_finup(&rctx->fallback_req);
+}
+
+static int rk_ahash_import(struct ahash_request *req, const void *in)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_import(&rctx->fallback_req, in);
+}
+
+static int rk_ahash_export(struct ahash_request *req, void *out)
+{
+ struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
+ rctx->fallback_req.base.flags = req->base.flags &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_ahash_export(&rctx->fallback_req, out);
+}
+
+static int rk_ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
+ struct rk_crypto_info *dev = NULL;
+ unsigned long flags;
+ int ret;
+
+ if (!req->nbytes)
+ return zero_message_process(req);
+
+ dev = tctx->dev;
+ dev->total = req->nbytes;
+ dev->left_bytes = req->nbytes;
+ dev->aligned = 0;
+ dev->mode = 0;
+ dev->align_size = 4;
+ dev->sg_dst = NULL;
+ dev->sg_src = req->src;
+ dev->first = req->src;
+ dev->nents = sg_nents(req->src);
+
+ switch (crypto_ahash_digestsize(tfm)) {
+ case SHA1_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA1;
+ break;
+ case SHA256_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_SHA256;
+ break;
+ case MD5_DIGEST_SIZE:
+ dev->mode = RK_CRYPTO_HASH_MD5;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rk_ahash_reg_init(dev);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ret = crypto_enqueue_request(&dev->queue, &req->base);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ tasklet_schedule(&dev->crypto_tasklet);
+
+ /*
+ * it will take some time to process date after last dma transmission.
+ *
+ * waiting time is relative with the last date len,
+ * so cannot set a fixed time here.
+ * 10-50 makes system not call here frequently wasting
+ * efficiency, and make it response quickly when dma
+ * complete.
+ */
+ while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
+ usleep_range(10, 50);
+
+ memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
+ crypto_ahash_digestsize(tfm));
+
+ return 0;
+}
+
+static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
+{
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
+ CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
+ CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
+ (RK_CRYPTO_HASH_START << 16));
+}
+
+static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
+{
+ int err;
+
+ err = dev->load_data(dev, dev->sg_src, NULL);
+ if (!err)
+ crypto_ahash_dma_start(dev);
+ return err;
+}
+
+static int rk_ahash_start(struct rk_crypto_info *dev)
+{
+ return rk_ahash_set_data_start(dev);
+}
+
+static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
+{
+ int err = 0;
+
+ dev->unload_data(dev);
+ if (dev->left_bytes) {
+ if (dev->aligned) {
+ if (sg_is_last(dev->sg_src)) {
+ dev_warn(dev->dev, "[%s:%d], Lack of data\n",
+ __func__, __LINE__);
+ err = -ENOMEM;
+ goto out_rx;
+ }
+ dev->sg_src = sg_next(dev->sg_src);
+ }
+ err = rk_ahash_set_data_start(dev);
+ } else {
+ dev->complete(dev, 0);
+ }
+
+out_rx:
+ return err;
+}
+
+static int rk_cra_hash_init(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_crypto_tmp *algt;
+ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+
+ const char *alg_name = crypto_tfm_alg_name(tfm);
+
+ algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+
+ tctx->dev = algt->dev;
+ tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
+ if (!tctx->dev->addr_vir) {
+ dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
+ return -ENOMEM;
+ }
+ tctx->dev->start = rk_ahash_start;
+ tctx->dev->update = rk_ahash_crypto_rx;
+ tctx->dev->complete = rk_ahash_crypto_complete;
+
+ /* for fallback */
+ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(tctx->fallback_tfm)) {
+ dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
+ return PTR_ERR(tctx->fallback_tfm);
+ }
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct rk_ahash_rctx) +
+ crypto_ahash_reqsize(tctx->fallback_tfm));
+
+ return tctx->dev->enable_clk(tctx->dev);
+}
+
+static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+{
+ struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ free_page((unsigned long)tctx->dev->addr_vir);
+ return tctx->dev->disable_clk(tctx->dev);
+}
+
+struct rk_crypto_tmp rk_ahash_sha1 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "rk-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_sha256 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "rk-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
+
+struct rk_crypto_tmp rk_ahash_md5 = {
+ .type = ALG_TYPE_HASH,
+ .alg.hash = {
+ .init = rk_ahash_init,
+ .update = rk_ahash_update,
+ .final = rk_ahash_final,
+ .finup = rk_ahash_finup,
+ .export = rk_ahash_export,
+ .import = rk_ahash_import,
+ .digest = rk_ahash_digest,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "rk-md5",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct rk_ahash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = rk_cra_hash_init,
+ .cra_exit = rk_cra_hash_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }
+ }
+};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index f214a8755827..5f161a9777e3 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
{
if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
const struct of_device_id *match;
+
match = of_match_node(s5p_sss_dt_match,
pdev->dev.of_node);
return (struct samsung_aes_variant *)match->data;
@@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
void __iomem *keystart;
if (iv)
- memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
else
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
- memcpy(keystart, key, keylen);
+ memcpy_toio(keystart, key, keylen);
}
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
-
uint32_t aes_control;
int err;
unsigned long flags;
@@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_dev *dev = ctx->dev;
if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
- pr_err("request size is not exact amount of AES blocks\n");
+ dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
- struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->dev = s5p_dev;
tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
@@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_algs;
}
- pr_info("s5p-sss driver registered\n");
+ dev_info(dev, "s5p-sss driver registered\n");
return 0;
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 6c4f91c5e6b3..c3f3d89e4831 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -182,7 +182,6 @@ struct sahara_sha_reqctx {
u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
u8 context[SHA256_DIGEST_SIZE + 4];
- struct mutex mutex;
unsigned int mode;
unsigned int digest_size;
unsigned int context_size;
@@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
if (!req->nbytes && !last)
return 0;
- mutex_lock(&rctx->mutex);
rctx->last = last;
if (!rctx->active) {
@@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
mutex_unlock(&dev->queue_mutex);
wake_up_process(dev->kthread);
- mutex_unlock(&rctx->mutex);
return ret;
}
@@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req)
rctx->context_size = rctx->digest_size + 4;
rctx->active = 0;
- mutex_init(&rctx->mutex);
-
return 0;
}
@@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req)
static int sahara_sha_export(struct ahash_request *req, void *out)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(out, ctx, sizeof(struct sahara_ctx));
- memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
- sizeof(struct sahara_sha_reqctx));
+ memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
return 0;
}
static int sahara_sha_import(struct ahash_request *req, const void *in)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
- memcpy(ctx, in, sizeof(struct sahara_ctx));
- memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
- sizeof(struct sahara_sha_reqctx));
+ memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
return 0;
}
@@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "sahara-sha1",
@@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = {
.export = sahara_sha_export,
.import = sahara_sha_import,
.halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "sahara-sha256",
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index a19ee127edca..7be3fbcd8d78 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
spaces = readl(ss->base + SS_FCSR);
rx_cnt = SS_RXFIFO_SPACES(spaces);
tx_cnt = SS_TXFIFO_SPACES(spaces);
- dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+ dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
mode,
oi, mi.length, ileft, areq->nbytes, rx_cnt,
- oo, mo.length, oleft, areq->nbytes, tx_cnt,
- todo, ob);
+ oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
if (tx_cnt == 0)
continue;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 4c243c1ffc7f..790f7cadc1ed 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1440,9 +1440,9 @@ static int ux500_cryp_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index d6fdc583ce5d..574e87c7f2b8 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1659,9 +1659,9 @@ static int ux500_hash_probe(struct platform_device *pdev)
device_data->phybase = res->start;
device_data->base = devm_ioremap_resource(dev, res);
- if (!device_data->base) {
+ if (IS_ERR(device_data->base)) {
dev_err(dev, "%s: ioremap() failed!\n", __func__);
- ret = -ENOMEM;
+ ret = PTR_ERR(device_data->base);
goto out;
}
spin_lock_init(&device_data->ctx_lock);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 64281bb2f650..4de78c552251 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -61,7 +61,7 @@ config DEVFREQ_GOV_USERSPACE
Sets the frequency at the user specified one.
This governor returns the user configured frequency if there
has been an input to /sys/devices/.../power/devfreq_set_freq.
- Otherwise, the governor does not change the frequnecy
+ Otherwise, the governor does not change the frequency
given at the initialization.
comment "DEVFREQ Drivers"
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 9810d1df0691..4a2c07ee6677 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -259,6 +259,7 @@ static long dma_buf_ioctl(struct file *file,
struct dma_buf *dmabuf;
struct dma_buf_sync sync;
enum dma_data_direction direction;
+ int ret;
dmabuf = file->private_data;
@@ -285,11 +286,11 @@ static long dma_buf_ioctl(struct file *file,
}
if (sync.flags & DMA_BUF_SYNC_END)
- dma_buf_end_cpu_access(dmabuf, direction);
+ ret = dma_buf_end_cpu_access(dmabuf, direction);
else
- dma_buf_begin_cpu_access(dmabuf, direction);
+ ret = dma_buf_begin_cpu_access(dmabuf, direction);
- return 0;
+ return ret;
default:
return -ENOTTY;
}
@@ -611,15 +612,19 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
* @dmabuf: [in] buffer to complete cpu access for.
* @direction: [in] length of range for cpu access.
*
- * This call must always succeed.
+ * Can return negative error values, returns 0 on success.
*/
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
+int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
{
+ int ret = 0;
+
WARN_ON(!dmabuf);
if (dmabuf->ops->end_cpu_access)
- dmabuf->ops->end_cpu_access(dmabuf, direction);
+ ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 79b1390f2016..d96d87c56f2e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -341,12 +341,13 @@ config MV_XOR
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+ and APBX-DMA is integrated into Freescale
+ i.MX23/28/MX6Q/MX6DL/MX6UL chips.
config MX3_IPU
bool "MX3x Image Processing Unit support"
@@ -408,15 +409,6 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
-config QCOM_BAM_DMA
- tristate "QCOM BAM DMA support"
- depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- ---help---
- Enable support for the QCOM BAM DMA controller. This controller
- provides DMA capabilities for a variety of on-chip devices.
-
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
@@ -539,6 +531,8 @@ config ZX_DMA
# driver files
source "drivers/dma/bestcomm/Kconfig"
+source "drivers/dma/qcom/Kconfig"
+
source "drivers/dma/dw/Kconfig"
source "drivers/dma/hsu/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2dd0a067a0ca..6084127c1486 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
@@ -67,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index eed6bda01790..4a748c3435d7 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -438,7 +438,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
return ERR_PTR(-ENODEV);
}
- dev_dbg(dev, "found DMA channel \"%s\" at index %d\n", name, index);
+ dev_dbg(dev, "Looking for DMA channel \"%s\" at index %d...\n", name, index);
return acpi_dma_request_slave_chan_by_index(dev, index);
}
EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index c50a247be2e0..0cb259c59916 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -496,6 +496,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->src_addr_widths = device->src_addr_widths;
caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions;
+ caps->max_burst = device->max_burst;
caps->residue_granularity = device->residue_granularity;
caps->descriptor_reuse = device->descriptor_reuse;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5ad0ec1f0e29..97199b3c25a2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true)
return;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
- } else {
- cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
- cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
- }
+ cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+ cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param;
- if (!dws || dws->dma_dev != chan->device->dev)
+ if (dws->dma_dev != chan->device->dev)
return false;
/* We have to copy data since dws can be temporary storage */
@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback.
*/
+ /*
+ * We need controller-specific data to set up slave transfers.
+ */
+ if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
+ return -EINVAL;
+ }
+
/* Enable controller here if needed */
if (!dw->in_use)
dw_dma_on(dw);
@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+
+ /* Clear custom channel configuration */
+ dwc->src_id = 0;
+ dwc->dst_id = 0;
+
+ dwc->src_master = 0;
+ dwc->dst_master = 0;
+
dwc->initialized = false;
/* Disable interrupts */
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 241ff2b1402b..0a50c18d85b8 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -150,7 +150,7 @@ enum dw_dma_msize {
#define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
#define DWC_CTLL_DST_DEC (1<<7)
#define DWC_CTLL_DST_FIX (2<<7)
-#define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */
+#define DWC_CTLL_SRC_INC (0<<9) /* SAR update/not */
#define DWC_CTLL_SRC_DEC (1<<9)
#define DWC_CTLL_SRC_FIX (2<<9)
#define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e3d7fcb69b4c..04070baab78a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -869,6 +869,13 @@ static int edma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void edma_synchronize(struct dma_chan *chan)
+{
+ struct edma_chan *echan = to_edma_chan(chan);
+
+ vchan_synchronize(&echan->vchan);
+}
+
static int edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -1231,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
+ bool use_intermediate = false;
u32 burst;
int i, ret, nslots;
@@ -1272,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
- if (nslots > MAX_NR_SG)
- return NULL;
+ if (nslots > MAX_NR_SG) {
+ /*
+ * If the burst and period sizes are the same, we can put
+ * the full buffer into a single period and activate
+ * intermediate interrupts. This will produce interrupts
+ * after each burst, which is also after each desired period.
+ */
+ if (burst == period_len) {
+ period_len = buf_len;
+ nslots = 2;
+ use_intermediate = true;
+ } else {
+ return NULL;
+ }
+ }
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC);
@@ -1351,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/*
* Enable period interrupt only if it is requested
*/
- if (tx_flags & DMA_PREP_INTERRUPT)
+ if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN;
+
+ /* Also enable intermediate interrupts if necessary */
+ if (use_intermediate)
+ edesc->pset[i].param.opt |= ITCINTEN;
+ }
}
/* Place the cyclic channel to highest priority queue */
@@ -1365,36 +1391,36 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
static void edma_completion_handler(struct edma_chan *echan)
{
struct device *dev = echan->vchan.chan.device->dev;
- struct edma_desc *edesc = echan->edesc;
-
- if (!edesc)
- return;
+ struct edma_desc *edesc;
spin_lock(&echan->vchan.lock);
- if (edesc->cyclic) {
- vchan_cyclic_callback(&edesc->vdesc);
- spin_unlock(&echan->vchan.lock);
- return;
- } else if (edesc->processed == edesc->pset_nr) {
- edesc->residue = 0;
- edma_stop(echan);
- vchan_cookie_complete(&edesc->vdesc);
- echan->edesc = NULL;
-
- dev_dbg(dev, "Transfer completed on channel %d\n",
- echan->ch_num);
- } else {
- dev_dbg(dev, "Sub transfer completed on channel %d\n",
- echan->ch_num);
-
- edma_pause(echan);
-
- /* Update statistics for tx_status */
- edesc->residue -= edesc->sg_len;
- edesc->residue_stat = edesc->residue;
- edesc->processed_stat = edesc->processed;
+ edesc = echan->edesc;
+ if (edesc) {
+ if (edesc->cyclic) {
+ vchan_cyclic_callback(&edesc->vdesc);
+ spin_unlock(&echan->vchan.lock);
+ return;
+ } else if (edesc->processed == edesc->pset_nr) {
+ edesc->residue = 0;
+ edma_stop(echan);
+ vchan_cookie_complete(&edesc->vdesc);
+ echan->edesc = NULL;
+
+ dev_dbg(dev, "Transfer completed on channel %d\n",
+ echan->ch_num);
+ } else {
+ dev_dbg(dev, "Sub transfer completed on channel %d\n",
+ echan->ch_num);
+
+ edma_pause(echan);
+
+ /* Update statistics for tx_status */
+ edesc->residue -= edesc->sg_len;
+ edesc->residue_stat = edesc->residue;
+ edesc->processed_stat = edesc->processed;
+ }
+ edma_execute(echan);
}
- edma_execute(echan);
spin_unlock(&echan->vchan.lock);
}
@@ -1563,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
-{
- struct platform_device *tc_pdev;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !tc)
- return;
-
- tc_pdev = of_find_device_by_node(tc->node);
- if (!tc_pdev) {
- pr_err("%s: TPTC device is not found\n", __func__);
- return;
- }
- if (!pm_runtime_enabled(&tc_pdev->dev))
- pm_runtime_enable(&tc_pdev->dev);
-
- if (enable)
- ret = pm_runtime_get_sync(&tc_pdev->dev);
- else
- ret = pm_runtime_put_sync(&tc_pdev->dev);
-
- if (ret < 0)
- pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
- enable ? "get" : "put", dev_name(&tc_pdev->dev));
-}
-
/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -1625,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW");
- edma_tc_set_pm_state(echan->tc, true);
-
return 0;
err_slot:
@@ -1663,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false;
}
- edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL;
echan->hw_triggered = false;
@@ -1837,6 +1834,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
s_ddev->device_pause = edma_dma_pause;
s_ddev->device_resume = edma_dma_resume;
s_ddev->device_terminate_all = edma_terminate_all;
+ s_ddev->device_synchronize = edma_synchronize;
s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -1862,6 +1860,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
m_ddev->device_pause = edma_dma_pause;
m_ddev->device_resume = edma_dma_resume;
m_ddev->device_terminate_all = edma_terminate_all;
+ m_ddev->device_synchronize = edma_synchronize;
m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
@@ -2408,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i;
for (i = 0; i < ecc->num_channels; i++) {
- if (echan[i].alloced) {
+ if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false);
- edma_tc_set_pm_state(echan[i].tc, false);
- }
}
return 0;
@@ -2441,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]);
-
- edma_tc_set_pm_state(echan[i].tc, true);
}
}
@@ -2466,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev)
{
- return 0;
+ pm_runtime_enable(&pdev->dev);
+ return pm_runtime_get_sync(&pdev->dev);
}
static struct platform_driver edma_tptc_driver = {
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 57ff46284f15..21f08cc3352b 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
desc->size);
}
- switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
- case M2P_INTERRUPT_STALL:
- /* Disable interrupts */
- control = readl(edmac->regs + M2P_CONTROL);
- control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
- m2p_set_control(edmac, control);
-
- return INTERRUPT_DONE;
-
- case M2P_INTERRUPT_NFB:
- if (ep93xx_dma_advance_active(edmac))
- m2p_fill_desc(edmac);
+ /*
+ * Even latest E2 silicon revision sometimes assert STALL interrupt
+ * instead of NFB. Therefore we treat them equally, basing on the
+ * amount of data we still have to transfer.
+ */
+ if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
+ return INTERRUPT_UNKNOWN;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
return INTERRUPT_NEXT_BUFFER;
}
- return INTERRUPT_UNKNOWN;
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
}
/*
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index eef145edb936..ee510515ce18 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
- mtsr = config->dst_addr_width;
+ mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
- mtsr = config->src_addr_width;
+ mtsr = config->dst_addr_width;
}
hsu_chan_disable(hsuc);
@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
- return sr;
+ return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
}
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
- size_t bytes = desc->length;
+ size_t bytes = 0;
int i;
- i = desc->active % HSU_DMA_CHAN_NR_DESC;
+ for (i = desc->active; i < desc->nents; i++)
+ bytes += desc->sg[i].len;
+
+ i = HSU_DMA_CHAN_NR_DESC - 1;
do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 578a8ee8cd05..6b070c22b1df 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15)
+#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
+#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
+#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0)
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 3cb7b2c78197..1953e57505f4 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -289,6 +289,9 @@ static void idma64_desc_fill(struct idma64_chan *idma64c,
/* Trigger an interrupt after the last block is transfered */
lli->ctllo |= IDMA64C_CTLL_INT_EN;
+
+ /* Disable LLP transfer in the last block */
+ lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
}
static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index 8423f13ed0da..6b816878e5e7 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "virt-dma.h"
@@ -71,7 +71,7 @@
#define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */
#define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */
#define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
-#define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
+#define IDMA64C_CFGH_WR_ISSUE_THD(x) ((x) << 18)
/* Interrupt registers */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 21539d5c54c3..bd09961443b1 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/prefetch.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -290,24 +291,30 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
}
static struct ioat_ring_ent *
-ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
{
struct ioat_dma_descriptor *hw;
struct ioat_ring_ent *desc;
struct ioatdma_device *ioat_dma;
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
+ int chunk;
dma_addr_t phys;
+ u8 *pos;
+ off_t offs;
ioat_dma = to_ioatdma_device(chan->device);
- hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
- if (!hw)
- return NULL;
+
+ chunk = idx / IOAT_DESCS_PER_2M;
+ idx &= (IOAT_DESCS_PER_2M - 1);
+ offs = idx * IOAT_DESC_SZ;
+ pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
+ phys = ioat_chan->descs[chunk].hw + offs;
+ hw = (struct ioat_dma_descriptor *)pos;
memset(hw, 0, sizeof(*hw));
desc = kmem_cache_zalloc(ioat_cache, flags);
- if (!desc) {
- pci_pool_free(ioat_dma->dma_pool, hw, phys);
+ if (!desc)
return NULL;
- }
dma_async_tx_descriptor_init(&desc->txd, chan);
desc->txd.tx_submit = ioat_tx_submit_unlock;
@@ -318,32 +325,63 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
{
- struct ioatdma_device *ioat_dma;
-
- ioat_dma = to_ioatdma_device(chan->device);
- pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
kmem_cache_free(ioat_cache, desc);
}
struct ioat_ring_ent **
ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
{
+ struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
struct ioat_ring_ent **ring;
- int descs = 1 << order;
- int i;
-
- if (order > ioat_get_max_alloc_order())
- return NULL;
+ int total_descs = 1 << order;
+ int i, chunks;
/* allocate the array to hold the software ring */
- ring = kcalloc(descs, sizeof(*ring), flags);
+ ring = kcalloc(total_descs, sizeof(*ring), flags);
if (!ring)
return NULL;
- for (i = 0; i < descs; i++) {
- ring[i] = ioat_alloc_ring_ent(c, flags);
+
+ ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
+
+ for (i = 0; i < chunks; i++) {
+ struct ioat_descs *descs = &ioat_chan->descs[i];
+
+ descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+ SZ_2M, &descs->hw, flags);
+ if (!descs->virt && (i > 0)) {
+ int idx;
+
+ for (idx = 0; idx < i; idx++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ descs->virt, descs->hw);
+ descs->virt = NULL;
+ descs->hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
+ kfree(ring);
+ return NULL;
+ }
+ }
+
+ for (i = 0; i < total_descs; i++) {
+ ring[i] = ioat_alloc_ring_ent(c, i, flags);
if (!ring[i]) {
+ int idx;
+
while (i--)
ioat_free_ring_ent(ring[i], c);
+
+ for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
+ dma_free_coherent(to_dev(ioat_chan),
+ SZ_2M,
+ ioat_chan->descs[idx].virt,
+ ioat_chan->descs[idx].hw);
+ ioat_chan->descs[idx].virt = NULL;
+ ioat_chan->descs[idx].hw = 0;
+ }
+
+ ioat_chan->desc_chunks = 0;
kfree(ring);
return NULL;
}
@@ -351,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
}
/* link descs */
- for (i = 0; i < descs-1; i++) {
+ for (i = 0; i < total_descs-1; i++) {
struct ioat_ring_ent *next = ring[i+1];
struct ioat_dma_descriptor *hw = ring[i]->hw;
@@ -362,114 +400,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
return ring;
}
-static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
-{
- /* reshape differs from normal ring allocation in that we want
- * to allocate a new software ring while only
- * extending/truncating the hardware ring
- */
- struct dma_chan *c = &ioat_chan->dma_chan;
- const u32 curr_size = ioat_ring_size(ioat_chan);
- const u16 active = ioat_ring_active(ioat_chan);
- const u32 new_size = 1 << order;
- struct ioat_ring_ent **ring;
- u32 i;
-
- if (order > ioat_get_max_alloc_order())
- return false;
-
- /* double check that we have at least 1 free descriptor */
- if (active == curr_size)
- return false;
-
- /* when shrinking, verify that we can hold the current active
- * set in the new ring
- */
- if (active >= new_size)
- return false;
-
- /* allocate the array to hold the software ring */
- ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
- if (!ring)
- return false;
-
- /* allocate/trim descriptors as needed */
- if (new_size > curr_size) {
- /* copy current descriptors to the new ring */
- for (i = 0; i < curr_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* add new descriptors to the ring */
- for (i = curr_size; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
- if (!ring[new_idx]) {
- while (i--) {
- u16 new_idx = (ioat_chan->tail+i) &
- (new_size-1);
-
- ioat_free_ring_ent(ring[new_idx], c);
- }
- kfree(ring);
- return false;
- }
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* hw link new descriptors */
- for (i = curr_size-1; i < new_size; i++) {
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
- struct ioat_ring_ent *next =
- ring[(new_idx+1) & (new_size-1)];
- struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
- hw->next = next->txd.phys;
- }
- } else {
- struct ioat_dma_descriptor *hw;
- struct ioat_ring_ent *next;
-
- /* copy current descriptors to the new ring, dropping the
- * removed descriptors
- */
- for (i = 0; i < new_size; i++) {
- u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
- u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
-
- ring[new_idx] = ioat_chan->ring[curr_idx];
- set_desc_id(ring[new_idx], new_idx);
- }
-
- /* free deleted descriptors */
- for (i = new_size; i < curr_size; i++) {
- struct ioat_ring_ent *ent;
-
- ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
- ioat_free_ring_ent(ent, c);
- }
-
- /* fix up hardware ring */
- hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
- next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
- hw->next = next->txd.phys;
- }
-
- dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
- __func__, new_size);
-
- kfree(ioat_chan->ring);
- ioat_chan->ring = ring;
- ioat_chan->alloc_order = order;
-
- return true;
-}
-
/**
* ioat_check_space_lock - verify space and grab ring producer lock
* @ioat: ioat,3 channel (ring) to operate on
@@ -478,9 +408,6 @@ static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
__acquires(&ioat_chan->prep_lock)
{
- bool retry;
-
- retry:
spin_lock_bh(&ioat_chan->prep_lock);
/* never allow the last descriptor to be consumed, we need at
* least one free at all times to allow for on-the-fly ring
@@ -493,24 +420,8 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
ioat_chan->produce = num_descs;
return 0; /* with ioat->prep_lock held */
}
- retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
spin_unlock_bh(&ioat_chan->prep_lock);
- /* is another cpu already trying to expand the ring? */
- if (retry)
- goto retry;
-
- spin_lock_bh(&ioat_chan->cleanup_lock);
- spin_lock_bh(&ioat_chan->prep_lock);
- retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
- clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
- spin_unlock_bh(&ioat_chan->prep_lock);
- spin_unlock_bh(&ioat_chan->cleanup_lock);
-
- /* if we were able to expand the ring retry the allocation */
- if (retry)
- goto retry;
-
dev_dbg_ratelimited(to_dev(ioat_chan),
"%s: ring full! num_descs: %d (%x:%x:%x)\n",
__func__, num_descs, ioat_chan->head,
@@ -823,19 +734,6 @@ static void check_active(struct ioatdma_chan *ioat_chan)
if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
- /* if the ring is idle, empty, and oversized try to step
- * down the size
- */
- reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
-
- /* keep shrinking until we get back to our minimum
- * default size
- */
- if (ioat_chan->alloc_order > ioat_get_alloc_order())
- mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
- }
-
}
void ioat_timer_event(unsigned long data)
@@ -916,40 +814,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
return dma_cookie_status(c, cookie, txstate);
}
-static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
-{
- struct pci_dev *pdev = ioat_dma->pdev;
- int irq = pdev->irq, i;
-
- if (!is_bwd_ioat(pdev))
- return 0;
-
- switch (ioat_dma->irq_mode) {
- case IOAT_MSIX:
- for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
- struct msix_entry *msix = &ioat_dma->msix_entries[i];
- struct ioatdma_chan *ioat_chan;
-
- ioat_chan = ioat_chan_by_index(ioat_dma, i);
- devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
- }
-
- pci_disable_msix(pdev);
- break;
- case IOAT_MSI:
- pci_disable_msi(pdev);
- /* fall through */
- case IOAT_INTX:
- devm_free_irq(&pdev->dev, irq, ioat_dma);
- break;
- default:
- return 0;
- }
- ioat_dma->irq_mode = IOAT_NOIRQ;
-
- return ioat_dma_setup_interrupts(ioat_dma);
-}
-
int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
{
/* throw away whatever the channel was doing and get it
@@ -989,9 +853,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
}
}
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
+ ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
+ ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
+ }
+
+
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
- if (!err)
- err = ioat_irq_reinit(ioat_dma);
+ if (!err) {
+ if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
+ writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
+ writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
+ writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
+ }
+ }
if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index b8f48074789f..a9bc1a15b0d1 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -62,7 +62,6 @@ enum ioat_irq_mode {
* struct ioatdma_device - internal representation of a IOAT device
* @pdev: PCI-Express device
* @reg_base: MMIO register space base address
- * @dma_pool: for allocating DMA descriptors
* @completion_pool: DMA buffers for completion ops
* @sed_hw_pool: DMA super descriptor pools
* @dma_dev: embedded struct dma_device
@@ -76,8 +75,7 @@ enum ioat_irq_mode {
struct ioatdma_device {
struct pci_dev *pdev;
void __iomem *reg_base;
- struct pci_pool *dma_pool;
- struct pci_pool *completion_pool;
+ struct dma_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct dma_device dma_dev;
@@ -88,6 +86,16 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+
+ /* shadow version for CB3.3 chan reset errata workaround */
+ u64 msixtba0;
+ u64 msixdata0;
+ u32 msixpba;
+};
+
+struct ioat_descs {
+ void *virt;
+ dma_addr_t hw;
};
struct ioatdma_chan {
@@ -100,7 +108,6 @@ struct ioatdma_chan {
#define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3
- #define IOAT_RESHAPE_PENDING 4
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
@@ -133,6 +140,8 @@ struct ioatdma_chan {
u16 produce;
struct ioat_ring_ent **ring;
spinlock_t prep_lock;
+ struct ioat_descs descs[2];
+ int desc_chunks;
};
struct ioat_sysfs_entry {
@@ -302,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err)
}
#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
- (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
- (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+#define IOAT_MAX_DESCS 65536
+#define IOAT_DESCS_PER_2M 32768
static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
{
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 690e3b4f8202..8e67895bcca3 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -73,6 +73,8 @@
int system_has_dca_enabled(struct pci_dev *pdev);
+#define IOAT_DESC_SZ 64
+
struct ioat_dma_descriptor {
uint32_t size;
union {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e07912..efdee1a69fc4 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <linux/dca.h>
#include <linux/aer.h>
+#include <linux/sizes.h>
#include "dma.h"
#include "registers.h"
#include "hw.h"
@@ -136,14 +137,6 @@ int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level,
"high-water mark for pushing ioat descriptors (default: 4)");
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
- "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
-int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
- "ioat+: upper limit for ring size (default: 16)");
static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
struct pci_dev *pdev = ioat_dma->pdev;
struct device *dev = &pdev->dev;
- /* DMA coherent memory pool for DMA descriptor allocations */
- ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
- sizeof(struct ioat_dma_descriptor),
- 64, 0);
- if (!ioat_dma->dma_pool) {
- err = -ENOMEM;
- goto err_dma_pool;
- }
-
- ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+ ioat_dma->completion_pool = dma_pool_create("completion_pool", dev,
sizeof(u64),
SMP_CACHE_BYTES,
SMP_CACHE_BYTES);
if (!ioat_dma->completion_pool) {
err = -ENOMEM;
- goto err_completion_pool;
+ goto err_out;
}
ioat_enumerate_channels(ioat_dma);
@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
err_self_test:
ioat_disable_interrupts(ioat_dma);
err_setup_interrupts:
- pci_pool_destroy(ioat_dma->completion_pool);
-err_completion_pool:
- pci_pool_destroy(ioat_dma->dma_pool);
-err_dma_pool:
+ dma_pool_destroy(ioat_dma->completion_pool);
+err_out:
return err;
}
@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma)
if (err) {
ioat_disable_interrupts(ioat_dma);
- pci_pool_destroy(ioat_dma->completion_pool);
- pci_pool_destroy(ioat_dma->dma_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
}
return err;
@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
dma_async_device_unregister(dma);
- pci_pool_destroy(ioat_dma->dma_pool);
- pci_pool_destroy(ioat_dma->completion_pool);
+ dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
}
@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c)
ioat_free_ring_ent(desc, c);
}
+ for (i = 0; i < ioat_chan->desc_chunks; i++) {
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ ioat_chan->descs[i].virt,
+ ioat_chan->descs[i].hw);
+ ioat_chan->descs[i].virt = NULL;
+ ioat_chan->descs[i].hw = 0;
+ }
+ ioat_chan->desc_chunks = 0;
+
kfree(ioat_chan->ring);
ioat_chan->ring = NULL;
ioat_chan->alloc_order = 0;
- pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+ dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
ioat_chan->completion_dma);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
- pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+ dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
writel(((u64)ioat_chan->completion_dma) >> 32,
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- order = ioat_get_alloc_order();
+ order = IOAT_MAX_ORDER;
ring = ioat_alloc_ring(c, order, GFP_KERNEL);
if (!ring)
return -ENOMEM;
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
index 6bb4a13a8fbd..243421af888f 100644
--- a/drivers/dma/ioat/prep.c
+++ b/drivers/dma/ioat/prep.c
@@ -26,7 +26,7 @@
#include "hw.h"
#include "dma.h"
-#define MAX_SCF 1024
+#define MAX_SCF 256
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e4f43125e0fb..f039cfadf17b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1300,10 +1300,10 @@ static int iop_adma_probe(struct platform_device *pdev)
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
+ adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
+ plat_data->pool_size,
+ &adev->dma_desc_pool,
+ GFP_KERNEL);
if (!adev->dma_desc_pool_virt) {
ret = -ENOMEM;
goto err_free_adev;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 068e920ecb68..1502b24b7c7d 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -483,7 +483,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
if (IS_ERR(ch->cookie))
- return IS_ERR(ch->cookie);
+ return PTR_ERR(ch->cookie);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 14091f878f80..3922a5d56806 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -964,8 +964,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
* requires that we explicitly flush the writes
*/
mv_chan->dma_desc_pool_virt =
- dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
- &mv_chan->dma_desc_pool, GFP_KERNEL);
+ dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
+ GFP_KERNEL);
if (!mv_chan->dma_desc_pool_virt)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 9794b073d7d7..1e984e18c126 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig;
bool cyclic;
bool paused;
+ bool running;
int dma_ch;
struct omap_desc *desc;
@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
+
+ c->running = true;
}
static void omap_dma_stop(struct omap_chan *c)
@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val);
}
+
+ c->running = false;
}
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
- uint32_t ccr;
unsigned long flags;
- ccr = omap_dma_chan_read(c, CCR);
- /* The channel is no longer active, handle the completion right away */
- if (!(ccr & CCR_ENABLE))
- omap_dma_callback(c->dma_ch, 0, c);
-
ret = dma_cookie_status(chan, cookie, txstate);
+
+ if (!c->paused && c->running) {
+ uint32_t ccr = omap_dma_chan_read(c, CCR);
+ /*
+ * The channel is no longer active, set the return value
+ * accordingly
+ */
+ if (!(ccr & CCR_ENABLE))
+ ret = DMA_COMPLETE;
+ }
+
if (ret == DMA_COMPLETE || !txstate)
return ret;
@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
- d->cicr = CICR_DROP_IE;
- if (tx_flags & DMA_PREP_INTERRUPT)
- d->cicr |= CICR_FRAME_IE;
+ d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
d->csdp = data_type;
@@ -1009,6 +1017,13 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void omap_dma_synchronize(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int omap_dma_pause(struct dma_chan *chan)
{
struct omap_chan *c = to_omap_dma_chan(chan);
@@ -1112,6 +1127,7 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.device_pause = omap_dma_pause;
od->ddev.device_resume = omap_dma_resume;
od->ddev.device_terminate_all = omap_dma_terminate_all;
+ od->ddev.device_synchronize = omap_dma_synchronize;
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 17ee758b419f..372b4359da97 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -33,6 +33,9 @@
#define PL330_MAX_CHAN 8
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
+#define PL330_MAX_BURST 16
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
enum pl330_cachectrl {
CCTRL0, /* Noncacheable and nonbufferable */
@@ -488,6 +491,17 @@ struct pl330_dmac {
/* Peripheral channels connected to this DMAC */
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
+ int quirks;
+};
+
+static struct pl330_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "arm,pl330-broken-no-flushp",
+ .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+ }
};
struct dma_pl330_desc {
@@ -1137,47 +1151,67 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
+ u8 buf[], const struct _xfer_spec *pxs,
+ int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
+ off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
+ enum pl330_cond cond;
+
+ if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+ cond = BURST;
+ else
+ cond = SINGLE;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+ off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
+
+ if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+ off += _emit_FLUSHP(dry_run, &buf[off],
+ pxs->desc->peri);
}
return off;
}
-static int _bursts(unsigned dry_run, u8 buf[],
+static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
switch (pxs->desc->rqtype) {
case DMA_MEM_TO_DEV:
- off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_DEV_TO_MEM:
- off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+ off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
break;
case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
@@ -1191,7 +1225,7 @@ static int _bursts(unsigned dry_run, u8 buf[],
}
/* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
+static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
unsigned long *bursts, const struct _xfer_spec *pxs)
{
int cyc, cycmax, szlp, szlpend, szbrst, off;
@@ -1199,7 +1233,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
struct _arg_LPEND lpend;
if (*bursts == 1)
- return _bursts(dry_run, buf, pxs, 1);
+ return _bursts(pl330, dry_run, buf, pxs, 1);
/* Max iterations possible in DMALP is 256 */
if (*bursts >= 256*256) {
@@ -1217,7 +1251,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
}
szlp = _emit_LP(1, buf, 0, 0);
- szbrst = _bursts(1, buf, pxs, 1);
+ szbrst = _bursts(pl330, 1, buf, pxs, 1);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1249,7 +1283,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
ljmp1 = off;
- off += _bursts(dry_run, &buf[off], pxs, cyc);
+ off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
lpend.cond = ALWAYS;
lpend.forever = false;
@@ -1272,8 +1306,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
return off;
}
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_loops(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
@@ -1282,15 +1317,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
while (bursts) {
c = bursts;
- off += _loop(dry_run, &buf[off], &c, pxs);
+ off += _loop(pl330, dry_run, &buf[off], &c, pxs);
bursts -= c;
}
return off;
}
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
- const struct _xfer_spec *pxs)
+static inline int _setup_xfer(struct pl330_dmac *pl330,
+ unsigned dry_run, u8 buf[],
+ const struct _xfer_spec *pxs)
{
struct pl330_xfer *x = &pxs->desc->px;
int off = 0;
@@ -1301,7 +1337,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
/* Setup Loop(s) */
- off += _setup_loops(dry_run, &buf[off], pxs);
+ off += _setup_loops(pl330, dry_run, &buf[off], pxs);
return off;
}
@@ -1310,8 +1346,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
* A req is a sequence of one or more xfer units.
* Returns the number of bytes taken to setup the MC for the req.
*/
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
- unsigned index, struct _xfer_spec *pxs)
+static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
+ struct pl330_thread *thrd, unsigned index,
+ struct _xfer_spec *pxs)
{
struct _pl330_req *req = &thrd->req[index];
struct pl330_xfer *x;
@@ -1328,7 +1365,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
return -EINVAL;
- off += _setup_xfer(dry_run, &buf[off], pxs);
+ off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1422,7 +1459,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
xs.desc = desc;
/* First dry run to check if req is acceptable */
- ret = _setup_req(1, thrd, idx, &xs);
+ ret = _setup_req(pl330, 1, thrd, idx, &xs);
if (ret < 0)
goto xfer_exit;
@@ -1436,7 +1473,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* Hook the request */
thrd->lstenq = idx;
thrd->req[idx].desc = desc;
- _setup_req(0, thrd, idx, &xs);
+ _setup_req(pl330, 0, thrd, idx, &xs);
ret = 0;
@@ -2781,6 +2818,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct resource *res;
int i, ret, irq;
int num_chan;
+ struct device_node *np = adev->dev.of_node;
pdat = dev_get_platdata(&adev->dev);
@@ -2800,6 +2838,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ /* get quirk */
+ for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
+ if (of_property_read_bool(np, of_quirks[i].quirk))
+ pl330->quirks |= of_quirks[i].id;
+
res = &adev->res;
pl330->base = devm_ioremap_resource(&adev->dev, res);
if (IS_ERR(pl330->base))
@@ -2895,6 +2938,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+ 1 : PL330_MAX_BURST);
ret = dma_async_device_register(pd);
if (ret) {
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index debca824bed6..77c1c44009d8 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -122,6 +122,7 @@ struct pxad_chan {
struct pxad_device {
struct dma_device slave;
int nr_chans;
+ int nr_requestors;
void __iomem *base;
struct pxad_phy *phys;
spinlock_t phy_lock; /* Phy association */
@@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- if (chan->drcmr <= DRCMR_CHLNUM) {
+ if (chan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(chan->drcmr);
writel_relaxed(0, chan->phy->base + reg);
}
@@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
static void phy_enable(struct pxad_phy *phy, bool misaligned)
{
+ struct pxad_device *pdev;
u32 reg, dalgn;
if (!phy->vchan)
@@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ pdev = to_pxad_dev(phy->vchan->vc.chan.device);
+ if (phy->vchan->drcmr <= pdev->nr_requestors) {
reg = pxad_drcmr(phy->vchan->drcmr);
writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
}
@@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
{
u32 maxburst = 0, dev_addr = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
*dcmd = 0;
if (dir == DMA_DEV_TO_MEM) {
@@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
*dcmd |= PXA_DCMD_INCTRGADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
@@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
*dcmd |= PXA_DCMD_INCSRCADDR;
- if (chan->drcmr <= DRCMR_CHLNUM)
+ if (chan->drcmr <= pdev->nr_requestors)
*dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
@@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
static int pxad_init_dmadev(struct platform_device *op,
struct pxad_device *pdev,
- unsigned int nr_phy_chans)
+ unsigned int nr_phy_chans,
+ unsigned int nr_requestors)
{
int ret;
unsigned int i;
struct pxad_chan *c;
pdev->nr_chans = nr_phy_chans;
+ pdev->nr_requestors = nr_requestors;
INIT_LIST_HEAD(&pdev->slave.channels);
pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
@@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
struct resource *iores;
- int ret, dma_channels = 0;
+ int ret, dma_channels = 0, nb_requestors = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
return PTR_ERR(pdev->base);
of_id = of_match_device(pxad_dt_ids, &op->dev);
- if (of_id)
+ if (of_id) {
of_property_read_u32(op->dev.of_node, "#dma-channels",
&dma_channels);
- else if (pdata && pdata->dma_channels)
+ ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
+ &nb_requestors);
+ if (ret) {
+ dev_warn(pdev->slave.dev,
+ "#dma-requests set to default 32 as missing in OF: %d",
+ ret);
+ nb_requestors = 32;
+ };
+ } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
- else
+ nb_requestors = pdata->nb_requestors;
+ } else {
dma_channels = 32; /* default 32 channel */
+ }
dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
@@ -1423,7 +1439,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.descriptor_reuse = true;
pdev->slave.dev = &op->dev;
- ret = pxad_init_dmadev(op, pdev, dma_channels);
+ ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
if (ret) {
dev_err(pdev->slave.dev, "unable to register\n");
return ret;
@@ -1442,7 +1458,8 @@ static int pxad_probe(struct platform_device *op)
platform_set_drvdata(op, pdev);
pxad_init_debugfs(pdev);
- dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
+ dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
+ dma_channels, nb_requestors);
return 0;
}
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 000000000000..a7761c4025f4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_HIDMA_MGMT
+ tristate "Qualcomm Technologies HIDMA Management support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA Management.
+ Each DMA device requires one management interface driver
+ for basic initialization before QCOM_HIDMA channel driver can
+ start managing the channels. In a virtualized environment,
+ the guest OS would run QCOM_HIDMA channel driver and the
+ host would run the QCOM_HIDMA_MGMT management driver.
+
+config QCOM_HIDMA
+ tristate "Qualcomm Technologies HIDMA Channel support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA controller.
+ The HIDMA controller supports optimized buffer copies
+ (user to kernel, kernel to kernel, etc.). It only supports
+ memcpy interface. The core is not intended for general
+ purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 000000000000..bfea6990229f
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
+hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a250cdc8376..d5e0a9c3ad5d 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -49,13 +49,13 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
-#include "dmaengine.h"
-#include "virt-dma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
struct bam_desc_hw {
- u32 addr; /* Buffer physical address */
- u16 size; /* Buffer size in bytes */
- u16 flags;
+ __le32 addr; /* Buffer physical address */
+ __le16 size; /* Buffer size in bytes */
+ __le16 flags;
};
#define DESC_FLAG_INT BIT(15)
@@ -502,8 +502,8 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
/* allocate FIFO descriptor space, but only if necessary */
- bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- &bchan->fifo_phys, GFP_KERNEL);
+ bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ &bchan->fifo_phys, GFP_KERNEL);
if (!bchan->fifo_virt) {
dev_err(bdev->dev, "Failed to allocate desc fifo\n");
@@ -538,8 +538,8 @@ static void bam_free_chan(struct dma_chan *chan)
bam_reset_channel(bchan);
spin_unlock_irqrestore(&bchan->vc.lock, flags);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
- bchan->fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+ bchan->fifo_phys);
bchan->fifo_virt = NULL;
/* mask irq for pipe/channel */
@@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;
do {
- desc->addr = sg_dma_address(sg) + curr_offset;
+ desc->addr = cpu_to_le32(sg_dma_address(sg) +
+ curr_offset);
if (remainder > BAM_MAX_DATA_SIZE) {
- desc->size = BAM_MAX_DATA_SIZE;
+ desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
remainder -= BAM_MAX_DATA_SIZE;
curr_offset += BAM_MAX_DATA_SIZE;
} else {
- desc->size = remainder;
+ desc->size = cpu_to_le16(remainder);
remainder = 0;
}
@@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan)
/* set any special flags on the last descriptor */
if (async_desc->num_desc == async_desc->xfer_len)
- desc[async_desc->xfer_len - 1].flags = async_desc->flags;
+ desc[async_desc->xfer_len - 1].flags =
+ cpu_to_le16(async_desc->flags);
else
- desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+ desc[async_desc->xfer_len - 1].flags |=
+ cpu_to_le16(DESC_FLAG_INT);
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
u32 partial = MAX_DESCRIPTORS - bchan->tail;
@@ -1231,9 +1234,9 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task);
- dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
- bdev->channels[i].fifo_virt,
- bdev->channels[i].fifo_phys);
+ dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
+ bdev->channels[i].fifo_virt,
+ bdev->channels[i].fifo_phys);
}
tasklet_kill(&bdev->task);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 000000000000..cccc78efbca9
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,706 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/* Linux Foundation elects GPLv2 license only. */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/property.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+
+#include "../dmaengine.h"
+#include "hidma.h"
+
+/*
+ * Default idle time is 2 seconds. This parameter can
+ * be overridden by changing the following
+ * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
+ * during kernel boot.
+ */
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_ERR_INFO_SW 0xFF
+#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
+#define HIDMA_NR_DEFAULT_DESC 10
+
+static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
+{
+ return container_of(dmadev, struct hidma_dev, ddev);
+}
+
+static inline
+struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
+{
+ return container_of(_lldevp, struct hidma_dev, lldev);
+}
+
+static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
+{
+ return container_of(dmach, struct hidma_chan, chan);
+}
+
+static inline
+struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct hidma_desc, desc);
+}
+
+static void hidma_free(struct hidma_dev *dmadev)
+{
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+}
+
+static unsigned int nr_desc_prm;
+module_param(nr_desc_prm, uint, 0644);
+MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+
+
+/* process completed descriptors */
+static void hidma_process_completed(struct hidma_chan *mchan)
+{
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *mdma = to_hidma_dev(ddev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t last_cookie;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ enum dma_status llstat;
+
+ desc = &mdesc->desc;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ dma_cookie_complete(desc);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+ if (desc->callback && (llstat == DMA_COMPLETE))
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&list, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+}
+
+/*
+ * Called once for each submitted descriptor.
+ * PM is locked once for each descriptor that is currently
+ * in execution.
+ */
+static void hidma_callback(void *data)
+{
+ struct hidma_desc *mdesc = data;
+ struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *dmadev = to_hidma_dev(ddev);
+ unsigned long irqflags;
+ bool queued = false;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (mdesc->node.next) {
+ /* Delete from the active list, add to completed list */
+ list_move_tail(&mdesc->node, &mchan->completed);
+ queued = true;
+
+ /* calculate the next running descriptor */
+ mchan->running = list_first_entry(&mchan->active,
+ struct hidma_desc, node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ hidma_process_completed(mchan);
+
+ if (queued) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+}
+
+static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
+{
+ struct hidma_chan *mchan;
+ struct dma_device *ddev;
+
+ mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return -ENOMEM;
+
+ ddev = &dmadev->ddev;
+ mchan->dma_sig = dma_sig;
+ mchan->dmadev = dmadev;
+ mchan->chan.device = ddev;
+ dma_cookie_init(&mchan->chan);
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &ddev->channels);
+ dmadev->ddev.chancnt++;
+ return 0;
+}
+
+static void hidma_issue_task(unsigned long arg)
+{
+ struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ hidma_ll_start(dmadev->lldev);
+}
+
+static void hidma_issue_pending(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!mchan->running) {
+ struct hidma_desc *desc = list_first_entry(&mchan->active,
+ struct hidma_desc,
+ node);
+ mchan->running = desc;
+ }
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* PM will be released in hidma_callback function. */
+ status = pm_runtime_get(dmadev->ddev.dev);
+ if (status < 0)
+ tasklet_schedule(&dmadev->task);
+ else
+ hidma_ll_start(dmadev->lldev);
+}
+
+static enum dma_status hidma_tx_status(struct dma_chan *dmach,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dmach, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
+ unsigned long flags;
+ dma_cookie_t runcookie;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (mchan->running)
+ runcookie = mchan->running->desc.cookie;
+ else
+ runcookie = -EINVAL;
+
+ if (runcookie == cookie)
+ ret = DMA_PAUSED;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Submit descriptor to hardware.
+ * Lock the PM for each descriptor we are sending.
+ */
+static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct hidma_chan *mchan = to_hidma_chan(txd->chan);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ dma_cookie_t cookie;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (!hidma_ll_isenabled(dmadev->lldev)) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return -ENODEV;
+ }
+
+ mdesc = container_of(txd, struct hidma_desc, desc);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move descriptor to active */
+ list_move_tail(&mdesc->node, &mchan->active);
+
+ /* Update cookie */
+ cookie = dma_cookie_assign(txd);
+
+ hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return cookie;
+}
+
+static int hidma_alloc_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+ unsigned int i;
+ int rc = 0;
+
+ if (mchan->allocated)
+ return 0;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < dmadev->nr_descriptors; i++) {
+ mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
+ if (!mdesc) {
+ rc = -ENOMEM;
+ break;
+ }
+ dma_async_tx_descriptor_init(&mdesc->desc, dmach);
+ mdesc->desc.tx_submit = hidma_tx_submit;
+
+ rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
+ "DMA engine", hidma_callback, mdesc,
+ &mdesc->tre_ch);
+ if (rc) {
+ dev_err(dmach->device->dev,
+ "channel alloc failed at %u\n", i);
+ kfree(mdesc);
+ break;
+ }
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ if (rc) {
+ /* return the allocated descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
+ kfree(mdesc);
+ }
+ return rc;
+ }
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&descs, &mchan->free);
+ mchan->allocated = true;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ return 1;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ src, dest, len, flags);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static int hidma_terminate_channel(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ struct hidma_desc *tmp, *mdesc;
+ unsigned long irqflags;
+ LIST_HEAD(list);
+ int rc;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ /* give completed requests a chance to finish */
+ hidma_process_completed(mchan);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_init(&mchan->active, &list);
+ list_splice_init(&mchan->prepared, &list);
+ list_splice_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* this suspends the existing transfer */
+ rc = hidma_ll_pause(dmadev->lldev);
+ if (rc) {
+ dev_err(dmadev->ddev.dev, "channel did not pause\n");
+ goto out;
+ }
+
+ /* return all user requests */
+ list_for_each_entry_safe(mdesc, tmp, &list, node) {
+ struct dma_async_tx_descriptor *txd = &mdesc->desc;
+ dma_async_tx_callback callback = mdesc->desc.callback;
+ void *param = mdesc->desc.callback_param;
+
+ dma_descriptor_unmap(txd);
+
+ if (callback)
+ callback(param);
+
+ dma_run_dependencies(txd);
+
+ /* move myself to free_list */
+ list_move(&mdesc->node, &mchan->free);
+ }
+
+ rc = hidma_ll_resume(dmadev->lldev);
+out:
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static int hidma_terminate_all(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ int rc;
+
+ rc = hidma_terminate_channel(chan);
+ if (rc)
+ return rc;
+
+ /* reinitialize the hardware */
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_setup(dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static void hidma_free_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *mdma = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+
+ /* terminate running transactions and free descriptors */
+ hidma_terminate_channel(dmach);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(mdma->lldev, mdesc->tre_ch);
+ list_del(&mdesc->node);
+ kfree(mdesc);
+ }
+
+ mchan->allocated = 0;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+}
+
+static int hidma_pause(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (!mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (hidma_ll_pause(dmadev->lldev))
+ dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+ mchan->paused = true;
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return 0;
+}
+
+static int hidma_resume(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+ int rc = 0;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_resume(dmadev->lldev);
+ if (!rc)
+ mchan->paused = false;
+ else
+ dev_err(dmadev->ddev.dev,
+ "failed to resume the channel");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return rc;
+}
+
+static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+
+ /*
+ * All interrupts are request driven.
+ * HW doesn't send an interrupt by itself.
+ */
+ return hidma_ll_inthandler(chirq, lldev);
+}
+
+static int hidma_probe(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev;
+ struct resource *trca_resource;
+ struct resource *evca_resource;
+ int chirq;
+ void __iomem *evca;
+ void __iomem *trca;
+ int rc;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ if (IS_ERR(trca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ if (IS_ERR(evca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ /*
+ * This driver only handles the channel IRQs.
+ * Common IRQ is handled by the management driver.
+ */
+ chirq = platform_get_irq(pdev, 0);
+ if (chirq < 0) {
+ rc = -ENODEV;
+ goto bailout;
+ }
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ spin_lock_init(&dmadev->lock);
+ dmadev->ddev.dev = &pdev->dev;
+ pm_runtime_get_sync(dmadev->ddev.dev);
+
+ dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ if (WARN_ON(!pdev->dev.dma_mask)) {
+ rc = -ENXIO;
+ goto dmafree;
+ }
+
+ dmadev->dev_evca = evca;
+ dmadev->evca_resource = evca_resource;
+ dmadev->dev_trca = trca;
+ dmadev->trca_resource = trca_resource;
+ dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
+ dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
+ dmadev->ddev.device_tx_status = hidma_tx_status;
+ dmadev->ddev.device_issue_pending = hidma_issue_pending;
+ dmadev->ddev.device_pause = hidma_pause;
+ dmadev->ddev.device_resume = hidma_resume;
+ dmadev->ddev.device_terminate_all = hidma_terminate_all;
+ dmadev->ddev.copy_align = 8;
+
+ device_property_read_u32(&pdev->dev, "desc-count",
+ &dmadev->nr_descriptors);
+
+ if (!dmadev->nr_descriptors && nr_desc_prm)
+ dmadev->nr_descriptors = nr_desc_prm;
+
+ if (!dmadev->nr_descriptors)
+ dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
+
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+
+ /* Set DMA mask to 64 bits. */
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto dmafree;
+ }
+
+ dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
+ dmadev->nr_descriptors, dmadev->dev_trca,
+ dmadev->dev_evca, dmadev->chidx);
+ if (!dmadev->lldev) {
+ rc = -EPROBE_DEFER;
+ goto dmafree;
+ }
+
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
+ "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ rc = hidma_chan_init(dmadev, 0);
+ if (rc)
+ goto uninit;
+
+ rc = dma_async_device_register(&dmadev->ddev);
+ if (rc)
+ goto uninit;
+
+ dmadev->irq = chirq;
+ tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
+ platform_set_drvdata(pdev, dmadev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+
+uninit:
+ hidma_ll_uninit(dmadev->lldev);
+dmafree:
+ if (dmadev)
+ hidma_free(dmadev);
+bailout:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+static int hidma_remove(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ dma_async_device_unregister(&dmadev->ddev);
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ hidma_ll_uninit(dmadev->lldev);
+ hidma_free(dmadev);
+
+ dev_info(&pdev->dev, "HI-DMA engine removed\n");
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_acpi_ids[] = {
+ {"QCOM8061"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_match[] = {
+ {.compatible = "qcom,hidma-1.0",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hidma_match);
+
+static struct platform_driver hidma_driver = {
+ .probe = hidma_probe,
+ .remove = hidma_remove,
+ .driver = {
+ .name = "hidma",
+ .of_match_table = hidma_match,
+ .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 000000000000..231e306f6d87
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,160 @@
+/*
+ * Qualcomm Technologies HIDMA data structures
+ *
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QCOM_HIDMA_H
+#define QCOM_HIDMA_H
+
+#include <linux/kfifo.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+#define TRE_SIZE 32 /* each TRE is 32 bytes */
+#define TRE_CFG_IDX 0
+#define TRE_LEN_IDX 1
+#define TRE_SRC_LOW_IDX 2
+#define TRE_SRC_HI_IDX 3
+#define TRE_DEST_LOW_IDX 4
+#define TRE_DEST_HI_IDX 5
+
+struct hidma_tx_status {
+ u8 err_info; /* error record in this transfer */
+ u8 err_code; /* completion code */
+};
+
+struct hidma_tre {
+ atomic_t allocated; /* if this channel is allocated */
+ bool queued; /* flag whether this is pending */
+ u16 status; /* status */
+ u32 chidx; /* index of the tre */
+ u32 dma_sig; /* signature of the tre */
+ const char *dev_name; /* name of the device */
+ void (*callback)(void *data); /* requester callback */
+ void *data; /* Data associated with this channel*/
+ struct hidma_lldev *lldev; /* lldma device pointer */
+ u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
+ u32 tre_index; /* the offset where this was written*/
+ u32 int_flags; /* interrupt flags */
+};
+
+struct hidma_lldev {
+ bool initialized; /* initialized flag */
+ u8 trch_state; /* trch_state of the device */
+ u8 evch_state; /* evch_state of the device */
+ u8 chidx; /* channel index in the core */
+ u32 nr_tres; /* max number of configs */
+ spinlock_t lock; /* reentrancy */
+ struct hidma_tre *trepool; /* trepool of user configs */
+ struct device *dev; /* device */
+ void __iomem *trca; /* Transfer Channel address */
+ void __iomem *evca; /* Event Channel address */
+ struct hidma_tre
+ **pending_tre_list; /* Pointers to pending TREs */
+ struct hidma_tx_status
+ *tx_status_list; /* Pointers to pending TREs status*/
+ s32 pending_tre_count; /* Number of TREs pending */
+
+ void *tre_ring; /* TRE ring */
+ dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */
+ u32 tre_ring_size; /* Byte size of the ring */
+ u32 tre_processed_off; /* last processed TRE */
+
+ void *evre_ring; /* EVRE ring */
+ dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */
+ u32 evre_ring_size; /* Byte size of the ring */
+ u32 evre_processed_off; /* last processed EVRE */
+
+ u32 tre_write_offset; /* TRE write location */
+ struct tasklet_struct task; /* task delivering notifications */
+ DECLARE_KFIFO_PTR(handoff_fifo,
+ struct hidma_tre *); /* pending TREs FIFO */
+};
+
+struct hidma_desc {
+ struct dma_async_tx_descriptor desc;
+ /* link list node for this channel*/
+ struct list_head node;
+ u32 tre_ch;
+};
+
+struct hidma_chan {
+ bool paused;
+ bool allocated;
+ char dbg_name[16];
+ u32 dma_sig;
+
+ /*
+ * active descriptor on this channel
+ * It is used by the DMA complete notification to
+ * locate the descriptor that initiated the transfer.
+ */
+ struct dentry *debugfs;
+ struct dentry *stats;
+ struct hidma_dev *dmadev;
+ struct hidma_desc *running;
+
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head active;
+ struct list_head completed;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct hidma_dev {
+ int irq;
+ int chidx;
+ u32 nr_descriptors;
+
+ struct hidma_lldev *lldev;
+ void __iomem *dev_trca;
+ struct resource *trca_resource;
+ void __iomem *dev_evca;
+ struct resource *evca_resource;
+
+ /* used to protect the pending channel list*/
+ spinlock_t lock;
+ struct dma_device ddev;
+
+ struct dentry *debugfs;
+ struct dentry *stats;
+
+ /* Task delivering issue_pending */
+ struct tasklet_struct task;
+};
+
+int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
+ const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch);
+
+void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
+enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
+bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
+void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
+void hidma_ll_start(struct hidma_lldev *llhndl);
+int hidma_ll_pause(struct hidma_lldev *llhndl);
+int hidma_ll_resume(struct hidma_lldev *llhndl);
+void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+int hidma_ll_setup(struct hidma_lldev *lldev);
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx);
+int hidma_ll_uninit(struct hidma_lldev *llhndl);
+irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
+ u8 err_code);
+#endif
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 000000000000..ef491b893f40
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,302 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine Management interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+
+#include "hidma_mgmt.h"
+
+#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_CFG_OFFSET 0x400
+#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
+#define HIDMA_MAX_XACTIONS_OFFSET 0x420
+#define HIDMA_HW_VERSION_OFFSET 0x424
+#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
+
+#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_WEIGHT_MASK GENMASK(6, 0)
+#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
+#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
+
+#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
+#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
+#define HIDMA_WRR_BIT_POS 8
+#define HIDMA_PRIORITY_BIT_POS 15
+
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_MAX_CHANNEL_WEIGHT 15
+
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
+{
+ unsigned int i;
+ u32 val;
+
+ if (!is_power_of_2(mgmtdev->max_write_request) ||
+ (mgmtdev->max_write_request < 128) ||
+ (mgmtdev->max_write_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
+ mgmtdev->max_write_request);
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(mgmtdev->max_read_request) ||
+ (mgmtdev->max_read_request < 128) ||
+ (mgmtdev->max_read_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
+ mgmtdev->max_read_request);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_wr_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_WR_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_rd_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_RD_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ if (mgmtdev->priority[i] > 1) {
+ dev_err(&mgmtdev->pdev->dev,
+ "priority can be 0 or 1\n");
+ return -EINVAL;
+ }
+
+ if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max value of weight can be %d.\n",
+ HIDMA_MAX_CHANNEL_WEIGHT);
+ return -EINVAL;
+ }
+
+ /* weight needs to be at least one */
+ if (mgmtdev->weight[i] == 0)
+ mgmtdev->weight[i] = 1;
+ }
+
+ pm_runtime_get_sync(&mgmtdev->pdev->dev);
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+ val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
+ val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
+ val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
+ val |= mgmtdev->max_read_request;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+ val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
+ val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
+ val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
+ val |= mgmtdev->max_rd_xactions;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+
+ mgmtdev->hw_version =
+ readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
+ mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
+ mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ u32 weight = mgmtdev->weight[i];
+ u32 priority = mgmtdev->priority[i];
+
+ val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
+ val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
+ val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
+ val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
+ writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ }
+
+ val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+ val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
+ val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
+ writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+
+ pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
+ pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
+
+static int hidma_mgmt_probe(struct platform_device *pdev)
+{
+ struct hidma_mgmt_dev *mgmtdev;
+ struct resource *res;
+ void __iomem *virtaddr;
+ int irq;
+ int rc;
+ u32 val;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(virtaddr)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq resources not found\n");
+ rc = irq;
+ goto out;
+ }
+
+ mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
+ if (!mgmtdev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->pdev = pdev;
+ mgmtdev->addrsize = resource_size(res);
+ mgmtdev->virtaddr = virtaddr;
+
+ rc = device_property_read_u32(&pdev->dev, "dma-channels",
+ &mgmtdev->dma_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "number of channels missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev,
+ "channel-reset-timeout-cycles",
+ &mgmtdev->chreset_timeout_cycles);
+ if (rc) {
+ dev_err(&pdev->dev, "channel reset timeout missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
+ &mgmtdev->max_write_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
+ &mgmtdev->max_read_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
+ &mgmtdev->max_wr_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-transactions missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
+ &mgmtdev->max_rd_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-transactions missing\n");
+ goto out;
+ }
+
+ mgmtdev->priority = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->priority),
+ GFP_KERNEL);
+ if (!mgmtdev->priority) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->weight = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->weight), GFP_KERNEL);
+ if (!mgmtdev->weight) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = hidma_mgmt_setup(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "setup failed\n");
+ goto out;
+ }
+
+ /* start the HW */
+ val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+ val |= 1;
+ writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+
+ rc = hidma_mgmt_init_sys(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "sysfs setup failed\n");
+ goto out;
+ }
+
+ dev_info(&pdev->dev,
+ "HW rev: %d.%d @ %pa with %d physical channels\n",
+ mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
+ &res->start, mgmtdev->dma_channels);
+
+ platform_set_drvdata(pdev, mgmtdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+out:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
+ {"QCOM8060"},
+ {},
+};
+#endif
+
+static const struct of_device_id hidma_mgmt_match[] = {
+ {.compatible = "qcom,hidma-mgmt-1.0",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
+
+static struct platform_driver hidma_mgmt_driver = {
+ .probe = hidma_mgmt_probe,
+ .driver = {
+ .name = "hidma-mgmt",
+ .of_match_table = hidma_mgmt_match,
+ .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_mgmt_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 000000000000..f7daf33769f4
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
+/*
+ * Qualcomm Technologies HIDMA Management common header
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+struct hidma_mgmt_dev {
+ u8 hw_version_major;
+ u8 hw_version_minor;
+
+ u32 max_wr_xactions;
+ u32 max_rd_xactions;
+ u32 max_write_request;
+ u32 max_read_request;
+ u32 dma_channels;
+ u32 chreset_timeout_cycles;
+ u32 hw_version;
+ u32 *priority;
+ u32 *weight;
+
+ /* Hardware device constants */
+ void __iomem *virtaddr;
+ resource_size_t addrsize;
+
+ struct kobject **chroots;
+ struct platform_device *pdev;
+};
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 000000000000..d61f1068a34b
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,295 @@
+/*
+ * Qualcomm Technologies HIDMA Management SYS interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "hidma_mgmt.h"
+
+struct hidma_chan_attr {
+ struct hidma_mgmt_dev *mdev;
+ int index;
+ struct kobj_attribute attr;
+};
+
+struct hidma_mgmt_fileinfo {
+ char *name;
+ int mode;
+ int (*get)(struct hidma_mgmt_dev *mdev);
+ int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
+};
+
+#define IMPLEMENT_GETSET(name) \
+static int get_##name(struct hidma_mgmt_dev *mdev) \
+{ \
+ return mdev->name; \
+} \
+static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \
+{ \
+ u64 tmp; \
+ int rc; \
+ \
+ tmp = mdev->name; \
+ mdev->name = val; \
+ rc = hidma_mgmt_setup(mdev); \
+ if (rc) \
+ mdev->name = tmp; \
+ return rc; \
+}
+
+#define DECLARE_ATTRIBUTE(name, mode) \
+ {#name, mode, get_##name, set_##name}
+
+IMPLEMENT_GETSET(hw_version_major)
+IMPLEMENT_GETSET(hw_version_minor)
+IMPLEMENT_GETSET(max_wr_xactions)
+IMPLEMENT_GETSET(max_rd_xactions)
+IMPLEMENT_GETSET(max_write_request)
+IMPLEMENT_GETSET(max_read_request)
+IMPLEMENT_GETSET(dma_channels)
+IMPLEMENT_GETSET(chreset_timeout_cycles)
+
+static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->priority[i];
+ mdev->priority[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->priority[i] = tmp;
+ return rc;
+}
+
+static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->weight[i];
+ mdev->weight[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->weight[i] = tmp;
+ return rc;
+}
+
+static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
+ DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
+ DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
+ DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
+ DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
+};
+
+static ssize_t show_values(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ buf[0] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
+ break;
+ }
+ }
+ return strlen(buf);
+}
+
+static ssize_t set_values(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned long tmp;
+ unsigned int i;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ rc = hidma_mgmt_files[i].set(mdev, tmp);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t show_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+
+ buf[0] = 0;
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+ if (strcmp(attr->attr.name, "priority") == 0)
+ sprintf(buf, "%d\n", mdev->priority[chattr->index]);
+ else if (strcmp(attr->attr.name, "weight") == 0)
+ sprintf(buf, "%d\n", mdev->weight[chattr->index]);
+
+ return strlen(buf);
+}
+
+static ssize_t set_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+ unsigned long tmp;
+ int rc;
+
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ if (strcmp(attr->attr.name, "priority") == 0) {
+ rc = set_priority(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ } else if (strcmp(attr->attr.name, "weight") == 0) {
+ rc = set_weight(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ }
+ return count;
+}
+
+static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(&dev->pdev->dev,
+ sizeof(struct device_attribute), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = show_values;
+ attrs->store = set_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return device_create_file(&dev->pdev->dev, attrs);
+}
+
+static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
+ int mode, int index,
+ struct kobject *parent)
+{
+ struct hidma_chan_attr *chattr;
+ char *name_copy;
+
+ chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
+ if (!chattr)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ chattr->mdev = mdev;
+ chattr->index = index;
+ chattr->attr.attr.name = name_copy;
+ chattr->attr.attr.mode = mode;
+ chattr->attr.show = show_values_channel;
+ chattr->attr.store = set_values_channel;
+ sysfs_attr_init(&chattr->attr.attr);
+
+ return sysfs_create_file(parent, &chattr->attr.attr);
+}
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
+{
+ unsigned int i;
+ int rc;
+ int required;
+ struct kobject *chanops;
+
+ required = sizeof(*mdev->chroots) * mdev->dma_channels;
+ mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
+ if (!mdev->chroots)
+ return -ENOMEM;
+
+ chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
+ if (!chanops)
+ return -ENOMEM;
+
+ /* create each channel directory here */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ char name[20];
+
+ snprintf(name, sizeof(name), "chan%d", i);
+ mdev->chroots[i] = kobject_create_and_add(name, chanops);
+ if (!mdev->chroots[i])
+ return -ENOMEM;
+ }
+
+ /* populate common parameters */
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
+ hidma_mgmt_files[i].mode);
+ if (rc)
+ return rc;
+ }
+
+ /* populate parameters that are per channel */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ rc = create_sysfs_entry_channel(mdev, "priority",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+
+ rc = create_sysfs_entry_channel(mdev, "weight",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index f32c430eb16c..6e0685f1a838 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -12,7 +12,7 @@ config RENESAS_DMA
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
@@ -41,7 +41,7 @@ endif
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
help
This driver supports the general purpose DMA controller found in the
@@ -49,7 +49,7 @@ config RCAR_DMAC
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 7820d07e7bee..dfb17926297b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -413,7 +413,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
u16 dmaor;
/* Clear all channels and enable the DMAC globally. */
- rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
+ rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
rcar_dmac_write(dmac, RCAR_DMAOR,
RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 11707df1a689..80d86402490e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -699,7 +699,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
- pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+ pdata = of_device_get_match_data(&pdev->dev);
else
pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 22ea2419ee56..e48350e65089 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -989,7 +989,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
return 0;
}
-static int sirfsoc_dma_runtime_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -997,7 +997,7 @@ static int sirfsoc_dma_runtime_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_runtime_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
int ret;
@@ -1010,8 +1010,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int sirfsoc_dma_pm_suspend(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1062,7 +1061,7 @@ static int sirfsoc_dma_pm_suspend(struct device *dev)
return 0;
}
-static int sirfsoc_dma_pm_resume(struct device *dev)
+static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
{
struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
struct sirfsoc_dma_regs *save = &sdma->regs_save;
@@ -1121,7 +1120,6 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index 1661d518224a..e0df233dde92 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1271,6 +1271,7 @@ static const struct of_device_id sun4i_dma_match[] = {
{ .compatible = "allwinner,sun4i-a10-dma" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, sun4i_dma_match);
static struct platform_driver sun4i_dma_driver = {
.probe = sun4i_dma_probe,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 935da8192f59..3871f29e523d 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1292,40 +1292,19 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
.support_separate_wcount_reg = true,
};
-
-static const struct of_device_id tegra_dma_of_match[] = {
- {
- .compatible = "nvidia,tegra148-apbdma",
- .data = &tegra148_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra114-apbdma",
- .data = &tegra114_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra30-apbdma",
- .data = &tegra30_dma_chip_data,
- }, {
- .compatible = "nvidia,tegra20-apbdma",
- .data = &tegra20_dma_chip_data,
- }, {
- },
-};
-MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
-
static int tegra_dma_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_dma *tdma;
int ret;
int i;
- const struct tegra_dma_chip_data *cdata = NULL;
- const struct of_device_id *match;
+ const struct tegra_dma_chip_data *cdata;
- match = of_match_device(tegra_dma_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
+ cdata = of_device_get_match_data(&pdev->dev);
+ if (!cdata) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
return -ENODEV;
}
- cdata = match->data;
tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
sizeof(struct tegra_dma_channel), GFP_KERNEL);
@@ -1612,6 +1591,24 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
};
+static const struct of_device_id tegra_dma_of_match[] = {
+ {
+ .compatible = "nvidia,tegra148-apbdma",
+ .data = &tegra148_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra114-apbdma",
+ .data = &tegra114_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra30-apbdma",
+ .data = &tegra30_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-apbdma",
+ .data = &tegra20_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+
static struct platform_driver tegra_dmac_driver = {
.driver = {
.name = "tegra-apbdma",
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 6f4b5017ca3b..ef67f278e076 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
- * @active_desc: Active descriptor
- * @allocated_desc: Allocated descriptor
+ * @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
+ * @desc_pendingcount: Descriptor pending count
*/
struct xilinx_vdma_chan {
struct xilinx_vdma_device *xdev;
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan {
u32 desc_offset;
spinlock_t lock;
struct list_head pending_list;
- struct xilinx_vdma_tx_descriptor *active_desc;
- struct xilinx_vdma_tx_descriptor *allocated_desc;
+ struct list_head active_list;
struct list_head done_list;
struct dma_chan common;
struct dma_pool *desc_pool;
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan {
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
+ u32 desc_pendingcount;
};
/**
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
container_of(chan, struct xilinx_vdma_chan, common)
#define to_vdma_tx_descriptor(tx) \
container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
+#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
+ cond, delay_us, timeout_us)
/* IO accessors */
static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
-
- if (chan->allocated_desc)
- return chan->allocated_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
- spin_lock_irqsave(&chan->lock, flags);
- chan->allocated_desc = desc;
- spin_unlock_irqrestore(&chan->lock, flags);
-
INIT_LIST_HEAD(&desc->segments);
return desc;
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list(chan, &chan->pending_list);
xilinx_vdma_free_desc_list(chan, &chan->done_list);
-
- xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
- chan->active_desc = NULL;
+ xilinx_vdma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */
- do {
- if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED)
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ (val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
chan->err = true;
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
*/
static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
+ u32 val;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
/* Wait for the hardware to start */
- do {
- if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED))
- break;
- } while (loop--);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
+ !(val & XILINX_VDMA_DMASR_HALTED), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "Cannot start channel %p: %x\n",
chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
u32 reg;
- struct xilinx_vdma_tx_segment *head, *tail = NULL;
+ struct xilinx_vdma_tx_segment *tail_segment;
+ /* This function was invoked with lock held */
if (chan->err)
return;
- spin_lock_irqsave(&chan->lock, flags);
-
- /* There's already an active descriptor, bail out. */
- if (chan->active_desc)
- goto out_unlock;
-
if (list_empty(&chan->pending_list))
- goto out_unlock;
+ return;
desc = list_first_entry(&chan->pending_list,
struct xilinx_vdma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
if (chan->has_sg && xilinx_vdma_is_running(chan) &&
!xilinx_vdma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
- goto out_unlock;
+ return;
}
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
- if (chan->has_sg) {
- head = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- tail = list_entry(desc->segments.prev,
- struct xilinx_vdma_tx_segment, node);
-
- vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
- }
+ if (chan->has_sg)
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+ desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
else
reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+ /* Configure channel to allow number frame buffers */
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+ chan->desc_pendingcount);
+
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
xilinx_vdma_start(chan);
if (chan->err)
- goto out_unlock;
+ return;
/* Start the transfer */
if (chan->has_sg) {
- vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
+ vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+ tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
- list_for_each_entry(segment, &desc->segments, node) {
+ list_for_each_entry(desc, &chan->pending_list, node) {
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_vdma_tx_segment, node);
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
if (!last)
- goto out_unlock;
+ return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
}
- list_del(&desc->node);
- chan->active_desc = desc;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
}
/**
@@ -730,8 +720,11 @@ out_unlock:
static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
{
struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
+ spin_lock_irqsave(&chan->lock, flags);
xilinx_vdma_start_transfer(chan);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc;
- unsigned long flags;
+ struct xilinx_vdma_tx_descriptor *desc, *next;
- spin_lock_irqsave(&chan->lock, flags);
+ /* This function was invoked with lock held */
+ if (list_empty(&chan->active_list))
+ return;
- desc = chan->active_desc;
- if (!desc) {
- dev_dbg(chan->dev, "no running descriptors\n");
- goto out_unlock;
+ list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ list_del(&desc->node);
+ dma_cookie_complete(&desc->async_tx);
+ list_add_tail(&desc->node, &chan->done_list);
}
-
- dma_cookie_complete(&desc->async_tx);
- list_add_tail(&desc->node, &chan->done_list);
-
- chan->active_desc = NULL;
-
-out_unlock:
- spin_unlock_irqrestore(&chan->lock, flags);
}
/**
@@ -770,21 +756,17 @@ out_unlock:
*/
static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
{
- int loop = XILINX_VDMA_LOOP_COUNT;
+ int err;
u32 tmp;
vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
-
/* Wait for the hardware to finish reset */
- do {
- tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RESET;
- } while (loop-- && tmp);
+ err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
+ !(tmp & XILINX_VDMA_DMACR_RESET), 0,
+ XILINX_VDMA_LOOP_COUNT);
- if (!loop) {
+ if (err) {
dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
chan->err = false;
- return 0;
+ return err;
}
/**
@@ -870,8 +852,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ spin_lock(&chan->lock);
xilinx_vdma_complete_descriptor(chan);
xilinx_vdma_start_transfer(chan);
+ spin_unlock(&chan->lock);
}
tasklet_schedule(&chan->tasklet);
@@ -879,6 +863,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
/**
+ * append_desc_queue - Queuing descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ */
+static void append_desc_queue(struct xilinx_vdma_chan *chan,
+ struct xilinx_vdma_tx_descriptor *desc)
+{
+ struct xilinx_vdma_tx_segment *tail_segment;
+ struct xilinx_vdma_tx_descriptor *tail_desc;
+
+ if (list_empty(&chan->pending_list))
+ goto append;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ */
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_vdma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment, node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+append:
+ list_add_tail(&desc->node, &chan->pending_list);
+ chan->desc_pendingcount++;
+
+ if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ dev_dbg(chan->dev, "desc pendingcount is too high\n");
+ chan->desc_pendingcount = chan->num_frms;
+ }
+}
+
+/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
*
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
- /* Append the transaction to the pending transactions queue. */
- list_add_tail(&desc->node, &chan->pending_list);
-
- /* Free the allocated desc */
- chan->allocated_desc = NULL;
+ /* Put this transaction onto the tail of the pending queue */
+ append_desc_queue(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
hw->buf_addr = xt->src_start;
- /* Link the previous next descriptor to current */
- if (!list_empty(&desc->segments)) {
- prev = list_last_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
- }
-
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
- prev->hw.next_desc = segment->phys;
+ desc->async_tx.phys = segment->phys;
return &desc->async_tx;
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->dev = xdev->dev;
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
+ chan->desc_pendingcount = 0x0;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
+ INIT_LIST_HEAD(&chan->active_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -1222,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
+ if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index ef25000a5bc6..37755e63cc28 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -367,14 +367,30 @@ config EDAC_OCTEON_PCI
Support for error detection and correction on the
Cavium Octeon family of SOCs.
-config EDAC_ALTERA_MC
- bool "Altera SDRAM Memory Controller EDAC"
+config EDAC_ALTERA
+ bool "Altera SOCFPGA ECC"
depends on EDAC_MM_EDAC=y && ARCH_SOCFPGA
help
Support for error detection and correction on the
- Altera SDRAM memory controller. Note that the
- preloader must initialize the SDRAM before loading
- the kernel.
+ Altera SOCs. This must be selected for SDRAM ECC.
+ Note that the preloader must initialize the SDRAM
+ before loading the kernel.
+
+config EDAC_ALTERA_L2C
+ bool "Altera L2 Cache ECC"
+ depends on EDAC_ALTERA=y
+ select CACHE_L2X0
+ help
+ Support for error detection and correction on the
+ Altera L2 cache Memory for Altera SoCs. This option
+ requires L2 cache so it will force that selection.
+
+config EDAC_ALTERA_OCRAM
+ bool "Altera On-Chip RAM ECC"
+ depends on EDAC_ALTERA=y && SRAM && GENERIC_ALLOCATOR
+ help
+ Support for error detection and correction on the
+ Altera On-Chip RAM Memory for Altera SoCs.
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index be163e20fe56..f9e4a3e0e6e9 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -67,6 +67,6 @@ obj-$(CONFIG_EDAC_OCTEON_L2C) += octeon_edac-l2c.o
obj-$(CONFIG_EDAC_OCTEON_LMC) += octeon_edac-lmc.o
obj-$(CONFIG_EDAC_OCTEON_PCI) += octeon_edac-pci.o
-obj-$(CONFIG_EDAC_ALTERA_MC) += altera_edac.o
+obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 929640981d8a..63e42098726d 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1,5 +1,5 @@
/*
- * Copyright Altera Corporation (C) 2014-2015. All rights reserved.
+ * Copyright Altera Corporation (C) 2014-2016. All rights reserved.
* Copyright 2011-2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -17,8 +17,10 @@
* Adapted from the highbank_mc_edac driver.
*/
+#include <asm/cacheflush.h>
#include <linux/ctype.h>
#include <linux/edac.h>
+#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -34,6 +36,7 @@
#define EDAC_MOD_STR "altera_edac"
#define EDAC_VERSION "1"
+#define EDAC_DEVICE "Altera"
static const struct altr_sdram_prv_data c5_data = {
.ecc_ctrl_offset = CV_CTLCFG_OFST,
@@ -75,6 +78,31 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
+/************************** EDAC Device Defines **************************/
+
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
+#define ALTR_OCR_ECC_EN BIT(0)
+#define ALTR_OCR_ECC_INJS BIT(1)
+#define ALTR_OCR_ECC_INJD BIT(2)
+#define ALTR_OCR_ECC_SERR BIT(3)
+#define ALTR_OCR_ECC_DERR BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
+#define ALTR_L2_ECC_EN BIT(0)
+#define ALTR_L2_ECC_INJS BIT(1)
+#define ALTR_L2_ECC_INJD BIT(2)
+
+#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
+
+/*********************** EDAC Memory Controller Functions ****************/
+
+/* The SDRAM controller uses the EDAC Memory Controller framework. */
+
static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id)
{
struct mem_ctl_info *mci = dev_id;
@@ -504,6 +532,466 @@ static struct platform_driver altr_sdram_edac_driver = {
module_platform_driver(altr_sdram_edac_driver);
+/************************* EDAC Parent Probe *************************/
+
+static const struct of_device_id altr_edac_device_of_match[];
+
+static const struct of_device_id altr_edac_of_match[] = {
+ { .compatible = "altr,socfpga-ecc-manager" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_of_match);
+
+static int altr_edac_probe(struct platform_device *pdev)
+{
+ of_platform_populate(pdev->dev.of_node, altr_edac_device_of_match,
+ NULL, &pdev->dev);
+ return 0;
+}
+
+static struct platform_driver altr_edac_driver = {
+ .probe = altr_edac_probe,
+ .driver = {
+ .name = "socfpga_ecc_manager",
+ .of_match_table = altr_edac_of_match,
+ },
+};
+module_platform_driver(altr_edac_driver);
+
+/************************* EDAC Device Functions *************************/
+
+/*
+ * EDAC Device Functions (shared between various IPs).
+ * The discrete memories use the EDAC Device framework. The probe
+ * and error handling functions are very similar between memories
+ * so they are shared. The memory allocation and freeing for EDAC
+ * trigger testing are different for each memory.
+ */
+
+const struct edac_device_prv_data ocramecc_data;
+const struct edac_device_prv_data l2ecc_data;
+
+struct edac_device_prv_data {
+ int (*setup)(struct platform_device *pdev, void __iomem *base);
+ int ce_clear_mask;
+ int ue_clear_mask;
+ char dbgfs_name[20];
+ void * (*alloc_mem)(size_t size, void **other);
+ void (*free_mem)(void *p, size_t size, void *other);
+ int ecc_enable_mask;
+ int ce_set_mask;
+ int ue_set_mask;
+ int trig_alloc_sz;
+};
+
+struct altr_edac_device_dev {
+ void __iomem *base;
+ int sb_irq;
+ int db_irq;
+ const struct edac_device_prv_data *data;
+ struct dentry *debugfs_dir;
+ char *edac_dev_name;
+};
+
+static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
+{
+ irqreturn_t ret_value = IRQ_NONE;
+ struct edac_device_ctl_info *dci = dev_id;
+ struct altr_edac_device_dev *drvdata = dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+
+ if (irq == drvdata->sb_irq) {
+ if (priv->ce_clear_mask)
+ writel(priv->ce_clear_mask, drvdata->base);
+ edac_device_handle_ce(dci, 0, 0, drvdata->edac_dev_name);
+ ret_value = IRQ_HANDLED;
+ } else if (irq == drvdata->db_irq) {
+ if (priv->ue_clear_mask)
+ writel(priv->ue_clear_mask, drvdata->base);
+ edac_device_handle_ue(dci, 0, 0, drvdata->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ ret_value = IRQ_HANDLED;
+ } else {
+ WARN_ON(1);
+ }
+
+ return ret_value;
+}
+
+static ssize_t altr_edac_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+
+{
+ u32 *ptemp, i, error_mask;
+ int result = 0;
+ u8 trig_type;
+ unsigned long flags;
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void *generic_ptr = edac_dci->dev;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ if (!priv->alloc_mem)
+ return -ENOMEM;
+
+ /*
+ * Note that generic_ptr is initialized to the device * but in
+ * some alloc_functions, this is overridden and returns data.
+ */
+ ptemp = priv->alloc_mem(priv->trig_alloc_sz, &generic_ptr);
+ if (!ptemp) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Inject: Buffer Allocation error\n");
+ return -ENOMEM;
+ }
+
+ if (trig_type == ALTR_UE_TRIGGER_CHAR)
+ error_mask = priv->ue_set_mask;
+ else
+ error_mask = priv->ce_set_mask;
+
+ edac_printk(KERN_ALERT, EDAC_DEVICE,
+ "Trigger Error Mask (0x%X)\n", error_mask);
+
+ local_irq_save(flags);
+ /* write ECC corrupted data out. */
+ for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) {
+ /* Read data so we're in the correct state */
+ rmb();
+ if (ACCESS_ONCE(ptemp[i]))
+ result = -1;
+ /* Toggle Error bit (it is latched), leave ECC enabled */
+ writel(error_mask, drvdata->base);
+ writel(priv->ecc_enable_mask, drvdata->base);
+ ptemp[i] = i;
+ }
+ /* Ensure it has been written out */
+ wmb();
+ local_irq_restore(flags);
+
+ if (result)
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Mem Not Cleared\n");
+
+ /* Read out written data. ECC error caused here */
+ for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++)
+ if (ACCESS_ONCE(ptemp[i]) != i)
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Read doesn't match written data\n");
+
+ if (priv->free_mem)
+ priv->free_mem(ptemp, priv->trig_alloc_sz, generic_ptr);
+
+ return count;
+}
+
+static const struct file_operations altr_edac_device_inject_fops = {
+ .open = simple_open,
+ .write = altr_edac_device_trig,
+ .llseek = generic_file_llseek,
+};
+
+static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
+ const struct edac_device_prv_data *priv)
+{
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+
+ if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
+ return;
+
+ drvdata->debugfs_dir = edac_debugfs_create_dir(drvdata->edac_dev_name);
+ if (!drvdata->debugfs_dir)
+ return;
+
+ if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
+ drvdata->debugfs_dir, edac_dci,
+ &altr_edac_device_inject_fops))
+ debugfs_remove_recursive(drvdata->debugfs_dir);
+}
+
+static const struct of_device_id altr_edac_device_of_match[] = {
+#ifdef CONFIG_EDAC_ALTERA_L2C
+ { .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+#endif
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+ { .compatible = "altr,socfpga-ocram-ecc",
+ .data = (void *)&ocramecc_data },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_device_of_match);
+
+/*
+ * altr_edac_device_probe()
+ * This is a generic EDAC device driver that will support
+ * various Altera memory devices such as the L2 cache ECC and
+ * OCRAM ECC as well as the memories for other peripherals.
+ * Module specific initialization is done by passing the
+ * function index in the device tree.
+ */
+static int altr_edac_device_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct altr_edac_device_dev *drvdata;
+ struct resource *r;
+ int res = 0;
+ struct device_node *np = pdev->dev.of_node;
+ char *ecc_name = (char *)np->name;
+ static int dev_instance;
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to open devm\n");
+ return -ENOMEM;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to get mem resource\n");
+ res = -ENODEV;
+ goto fail;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
+ dev_name(&pdev->dev))) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error requesting mem region\n", ecc_name);
+ res = -EBUSY;
+ goto fail;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*drvdata), ecc_name,
+ 1, ecc_name, 1, 0, NULL, 0,
+ dev_instance++);
+
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: Unable to allocate EDAC device\n", ecc_name);
+ res = -ENOMEM;
+ goto fail;
+ }
+
+ drvdata = dci->pvt_info;
+ dci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dci);
+ drvdata->edac_dev_name = ecc_name;
+
+ drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!drvdata->base)
+ goto fail1;
+
+ /* Get driver specific data for this EDAC device */
+ drvdata->data = of_match_node(altr_edac_device_of_match, np)->data;
+
+ /* Check specific dependencies for the module */
+ if (drvdata->data->setup) {
+ res = drvdata->data->setup(pdev, drvdata->base);
+ if (res)
+ goto fail1;
+ }
+
+ drvdata->sb_irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+ altr_edac_device_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res)
+ goto fail1;
+
+ drvdata->db_irq = platform_get_irq(pdev, 1);
+ res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+ altr_edac_device_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res)
+ goto fail1;
+
+ dci->mod_name = "Altera ECC Manager";
+ dci->dev_name = drvdata->edac_dev_name;
+
+ res = edac_device_add_device(dci);
+ if (res)
+ goto fail1;
+
+ altr_create_edacdev_dbgfs(dci, drvdata->data);
+
+ devres_close_group(&pdev->dev, NULL);
+
+ return 0;
+
+fail1:
+ edac_device_free_ctl_info(dci);
+fail:
+ devres_release_group(&pdev->dev, NULL);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error setting up EDAC device: %d\n", ecc_name, res);
+
+ return res;
+}
+
+static int altr_edac_device_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct altr_edac_device_dev *drvdata = dci->pvt_info;
+
+ debugfs_remove_recursive(drvdata->debugfs_dir);
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static struct platform_driver altr_edac_device_driver = {
+ .probe = altr_edac_device_probe,
+ .remove = altr_edac_device_remove,
+ .driver = {
+ .name = "altr_edac_device",
+ .of_match_table = altr_edac_device_of_match,
+ },
+};
+module_platform_driver(altr_edac_device_driver);
+
+/*********************** OCRAM EDAC Device Functions *********************/
+
+#ifdef CONFIG_EDAC_ALTERA_OCRAM
+
+static void *ocram_alloc_mem(size_t size, void **other)
+{
+ struct device_node *np;
+ struct gen_pool *gp;
+ void *sram_addr;
+
+ np = of_find_compatible_node(NULL, NULL, "altr,socfpga-ocram-ecc");
+ if (!np)
+ return NULL;
+
+ gp = of_gen_pool_get(np, "iram", 0);
+ of_node_put(np);
+ if (!gp)
+ return NULL;
+
+ sram_addr = (void *)gen_pool_alloc(gp, size);
+ if (!sram_addr)
+ return NULL;
+
+ memset(sram_addr, 0, size);
+ /* Ensure data is written out */
+ wmb();
+
+ /* Remember this handle for freeing later */
+ *other = gp;
+
+ return sram_addr;
+}
+
+static void ocram_free_mem(void *p, size_t size, void *other)
+{
+ gen_pool_free((struct gen_pool *)other, (u32)p, size);
+}
+
+/*
+ * altr_ocram_check_deps()
+ * Test for OCRAM cache ECC dependencies upon entry because
+ * platform specific startup should have initialized the
+ * On-Chip RAM memory and enabled the ECC.
+ * Can't turn on ECC here because accessing un-initialized
+ * memory will cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_ocram_check_deps(struct platform_device *pdev,
+ void __iomem *base)
+{
+ if (readl(base) & ALTR_OCR_ECC_EN)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "OCRAM: No ECC present or ECC disabled.\n");
+ return -ENODEV;
+}
+
+const struct edac_device_prv_data ocramecc_data = {
+ .setup = altr_ocram_check_deps,
+ .ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
+ .ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
+ .dbgfs_name = "altr_ocram_trigger",
+ .alloc_mem = ocram_alloc_mem,
+ .free_mem = ocram_free_mem,
+ .ecc_enable_mask = ALTR_OCR_ECC_EN,
+ .ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
+ .ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+ .trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+};
+
+#endif /* CONFIG_EDAC_ALTERA_OCRAM */
+
+/********************* L2 Cache EDAC Device Functions ********************/
+
+#ifdef CONFIG_EDAC_ALTERA_L2C
+
+static void *l2_alloc_mem(size_t size, void **other)
+{
+ struct device *dev = *other;
+ void *ptemp = devm_kzalloc(dev, size, GFP_KERNEL);
+
+ if (!ptemp)
+ return NULL;
+
+ /* Make sure everything is written out */
+ wmb();
+
+ /*
+ * Clean all cache levels up to LoC (includes L2)
+ * This ensures the corrupted data is written into
+ * L2 cache for readback test (which causes ECC error).
+ */
+ flush_cache_all();
+
+ return ptemp;
+}
+
+static void l2_free_mem(void *p, size_t size, void *other)
+{
+ struct device *dev = other;
+
+ if (dev && p)
+ devm_kfree(dev, p);
+}
+
+/*
+ * altr_l2_check_deps()
+ * Test for L2 cache ECC dependencies upon entry because
+ * platform specific startup should have initialized the L2
+ * memory and enabled the ECC.
+ * Bail if ECC is not enabled.
+ * Note that L2 Cache Enable is forced at build time.
+ */
+static int altr_l2_check_deps(struct platform_device *pdev,
+ void __iomem *base)
+{
+ if (readl(base) & ALTR_L2_ECC_EN)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "L2: No ECC present, or ECC disabled\n");
+ return -ENODEV;
+}
+
+const struct edac_device_prv_data l2ecc_data = {
+ .setup = altr_l2_check_deps,
+ .ce_clear_mask = 0,
+ .ue_clear_mask = 0,
+ .dbgfs_name = "altr_l2_trigger",
+ .alloc_mem = l2_alloc_mem,
+ .free_mem = l2_free_mem,
+ .ecc_enable_mask = ALTR_L2_ECC_EN,
+ .ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
+ .ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+ .trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+};
+
+#endif /* CONFIG_EDAC_ALTERA_L2C */
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
-MODULE_DESCRIPTION("EDAC Driver for Altera SDRAM Controller");
+MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9eee13ef83a5..d87a47547ba5 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 chan_off;
u64 dram_base = get_dram_base(pvt, range);
u64 hole_off = f10_dhar_offset(pvt);
- u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
if (hi_rng) {
/*
diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c
index 54d2f668cb0a..92dbb7e2320c 100644
--- a/drivers/edac/debugfs.c
+++ b/drivers/edac/debugfs.c
@@ -53,7 +53,7 @@ int __init edac_debugfs_init(void)
void edac_debugfs_exit(void)
{
- debugfs_remove(edac_debugfs);
+ debugfs_remove_recursive(edac_debugfs);
}
int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 8adfc167c2e3..1472f48c8ac6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -535,60 +535,21 @@ static void edac_mc_workq_function(struct work_struct *work_req)
mutex_lock(&mem_ctls_mutex);
- /* if this control struct has movd to offline state, we are done */
- if (mci->op_state == OP_OFFLINE) {
+ if (mci->op_state != OP_RUNNING_POLL) {
mutex_unlock(&mem_ctls_mutex);
return;
}
- /* Only poll controllers that are running polled and have a check */
- if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+ if (edac_mc_assert_error_check_and_clear())
mci->edac_check(mci);
mutex_unlock(&mem_ctls_mutex);
- /* Reschedule */
+ /* Queue ourselves again. */
edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
}
/*
- * edac_mc_workq_setup
- * initialize a workq item for this mci
- * passing in the new delay period in msec
- *
- * locking model:
- *
- * called with the mem_ctls_mutex held
- */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
-{
- edac_dbg(0, "\n");
-
- /* if this instance is not in the POLL state, then simply return */
- if (mci->op_state != OP_RUNNING_POLL)
- return;
-
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-
- edac_queue_work(&mci->work, msecs_to_jiffies(msec));
-}
-
-/*
- * edac_mc_workq_teardown
- * stop the workq processing on this mci
- *
- * locking model:
- *
- * called WITHOUT lock held
- */
-static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
-{
- mci->op_state = OP_OFFLINE;
-
- edac_stop_work(&mci->work);
-}
-
-/*
* edac_mc_reset_delay_period(unsigned long value)
*
* user space has updated our poll period value, need to
@@ -771,12 +732,12 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
goto fail1;
}
- /* If there IS a check routine, then we are running POLLED */
- if (mci->edac_check != NULL) {
- /* This instance is NOW RUNNING */
+ if (mci->edac_check) {
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
+
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
@@ -823,15 +784,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
return NULL;
}
+ /* mark MCI offline: */
+ mci->op_state = OP_OFFLINE;
+
if (!del_mc_from_global_list(mci))
edac_mc_owner = NULL;
- mutex_unlock(&mem_ctls_mutex);
- /* flush workq processes */
- edac_mc_workq_teardown(mci);
+ mutex_unlock(&mem_ctls_mutex);
- /* marking MCI offline */
- mci->op_state = OP_OFFLINE;
+ if (mci->edac_check)
+ edac_stop_work(&mci->work);
/* remove from sysfs */
edac_remove_sysfs_mci_device(mci);
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 99685388d3fb..8f2f2899a7a2 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -195,55 +195,24 @@ static void edac_pci_workq_function(struct work_struct *work_req)
mutex_lock(&edac_pci_ctls_mutex);
- if (pci->op_state == OP_RUNNING_POLL) {
- /* we might be in POLL mode, but there may NOT be a poll func
- */
- if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
- pci->edac_check(pci);
-
- /* if we are on a one second period, then use round */
- msec = edac_pci_get_poll_msec();
- if (msec == 1000)
- delay = round_jiffies_relative(msecs_to_jiffies(msec));
- else
- delay = msecs_to_jiffies(msec);
-
- /* Reschedule only if we are in POLL mode */
- edac_queue_work(&pci->work, delay);
+ if (pci->op_state != OP_RUNNING_POLL) {
+ mutex_unlock(&edac_pci_ctls_mutex);
+ return;
}
- mutex_unlock(&edac_pci_ctls_mutex);
-}
-
-/*
- * edac_pci_workq_setup()
- * initialize a workq item for this edac_pci instance
- * passing in the new delay period in msec
- *
- * locking model:
- * called when 'edac_pci_ctls_mutex' is locked
- */
-static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
- unsigned int msec)
-{
- edac_dbg(0, "\n");
+ if (edac_pci_get_check_errors())
+ pci->edac_check(pci);
- INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ /* if we are on a one second period, then use round */
+ msec = edac_pci_get_poll_msec();
+ if (msec == 1000)
+ delay = round_jiffies_relative(msecs_to_jiffies(msec));
+ else
+ delay = msecs_to_jiffies(msec);
- edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
-}
+ edac_queue_work(&pci->work, delay);
-/*
- * edac_pci_workq_teardown()
- * stop the workq processing on this edac_pci instance
- */
-static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
-{
- edac_dbg(0, "\n");
-
- pci->op_state = OP_OFFLINE;
-
- edac_stop_work(&pci->work);
+ mutex_unlock(&edac_pci_ctls_mutex);
}
/*
@@ -289,10 +258,12 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
goto fail1;
}
- if (pci->edac_check != NULL) {
+ if (pci->edac_check) {
pci->op_state = OP_RUNNING_POLL;
- edac_pci_workq_setup(pci, 1000);
+ INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+ edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
+
} else {
pci->op_state = OP_RUNNING_INTERRUPT;
}
@@ -350,8 +321,8 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
mutex_unlock(&edac_pci_ctls_mutex);
- /* stop the workq timer */
- edac_pci_workq_teardown(pci);
+ if (pci->edac_check)
+ edac_stop_work(&pci->work);
edac_printk(KERN_INFO, EDAC_PCI,
"Removed device %d for %s %s: DEV %s\n",
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index e3a945ce374b..49768c08ac07 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -147,6 +147,135 @@ static const char * const mc6_mce_desc[] = {
"Status Register File",
};
+/* Scalable MCA error strings */
+static const char * const f17h_ls_mce_desc[] = {
+ "Load queue parity",
+ "Store queue parity",
+ "Miss address buffer payload parity",
+ "L1 TLB parity",
+ "", /* reserved */
+ "DC tag error type 6",
+ "DC tag error type 1",
+ "Internal error type 1",
+ "Internal error type 2",
+ "Sys Read data error thread 0",
+ "Sys read data error thread 1",
+ "DC tag error type 2",
+ "DC data error type 1 (poison comsumption)",
+ "DC data error type 2",
+ "DC data error type 3",
+ "DC tag error type 4",
+ "L2 TLB parity",
+ "PDC parity error",
+ "DC tag error type 3",
+ "DC tag error type 5",
+ "L2 fill data error",
+};
+
+static const char * const f17h_if_mce_desc[] = {
+ "microtag probe port parity error",
+ "IC microtag or full tag multi-hit error",
+ "IC full tag parity",
+ "IC data array parity",
+ "Decoupling queue phys addr parity error",
+ "L0 ITLB parity error",
+ "L1 ITLB parity error",
+ "L2 ITLB parity error",
+ "BPQ snoop parity on Thread 0",
+ "BPQ snoop parity on Thread 1",
+ "L1 BTB multi-match error",
+ "L2 BTB multi-match error",
+};
+
+static const char * const f17h_l2_mce_desc[] = {
+ "L2M tag multi-way-hit error",
+ "L2M tag ECC error",
+ "L2M data ECC error",
+ "HW assert",
+};
+
+static const char * const f17h_de_mce_desc[] = {
+ "uop cache tag parity error",
+ "uop cache data parity error",
+ "Insn buffer parity error",
+ "Insn dispatch queue parity error",
+ "Fetch address FIFO parity",
+ "Patch RAM data parity",
+ "Patch RAM sequencer parity",
+ "uop buffer parity"
+};
+
+static const char * const f17h_ex_mce_desc[] = {
+ "Watchdog timeout error",
+ "Phy register file parity",
+ "Flag register file parity",
+ "Immediate displacement register file parity",
+ "Address generator payload parity",
+ "EX payload parity",
+ "Checkpoint queue parity",
+ "Retire dispatch queue parity",
+};
+
+static const char * const f17h_fp_mce_desc[] = {
+ "Physical register file parity",
+ "Freelist parity error",
+ "Schedule queue parity",
+ "NSQ parity error",
+ "Retire queue parity",
+ "Status register file parity",
+};
+
+static const char * const f17h_l3_mce_desc[] = {
+ "Shadow tag macro ECC error",
+ "Shadow tag macro multi-way-hit error",
+ "L3M tag ECC error",
+ "L3M tag multi-way-hit error",
+ "L3M data ECC error",
+ "XI parity, L3 fill done channel error",
+ "L3 victim queue parity",
+ "L3 HW assert",
+};
+
+static const char * const f17h_cs_mce_desc[] = {
+ "Illegal request from transport layer",
+ "Address violation",
+ "Security violation",
+ "Illegal response from transport layer",
+ "Unexpected response",
+ "Parity error on incoming request or probe response data",
+ "Parity error on incoming read response data",
+ "Atomic request parity",
+ "ECC error on probe filter access",
+};
+
+static const char * const f17h_pie_mce_desc[] = {
+ "HW assert",
+ "Internal PIE register security violation",
+ "Error on GMI link",
+ "Poison data written to internal PIE register",
+};
+
+static const char * const f17h_umc_mce_desc[] = {
+ "DRAM ECC error",
+ "Data poison error on DRAM",
+ "SDP parity error",
+ "Advanced peripheral bus error",
+ "Command/address parity error",
+ "Write data CRC error",
+};
+
+static const char * const f17h_pb_mce_desc[] = {
+ "Parameter Block RAM ECC error",
+};
+
+static const char * const f17h_psp_mce_desc[] = {
+ "PSP RAM ECC or parity error",
+};
+
+static const char * const f17h_smu_mce_desc[] = {
+ "SMU RAM ECC or parity error",
+};
+
static bool f12h_mc0_mce(u16 ec, u8 xec)
{
bool ret = false;
@@ -691,6 +820,177 @@ static void decode_mc6_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC6 MCE info?\n");
}
+static void decode_f17h_core_errors(const char *ip_name, u8 xec,
+ unsigned int mca_type)
+{
+ const char * const *error_desc_array;
+ size_t len;
+
+ pr_emerg(HW_ERR "%s Error: ", ip_name);
+
+ switch (mca_type) {
+ case SMCA_LS:
+ error_desc_array = f17h_ls_mce_desc;
+ len = ARRAY_SIZE(f17h_ls_mce_desc) - 1;
+
+ if (xec == 0x4) {
+ pr_cont("Unrecognized LS MCA error code.\n");
+ return;
+ }
+ break;
+
+ case SMCA_IF:
+ error_desc_array = f17h_if_mce_desc;
+ len = ARRAY_SIZE(f17h_if_mce_desc) - 1;
+ break;
+
+ case SMCA_L2_CACHE:
+ error_desc_array = f17h_l2_mce_desc;
+ len = ARRAY_SIZE(f17h_l2_mce_desc) - 1;
+ break;
+
+ case SMCA_DE:
+ error_desc_array = f17h_de_mce_desc;
+ len = ARRAY_SIZE(f17h_de_mce_desc) - 1;
+ break;
+
+ case SMCA_EX:
+ error_desc_array = f17h_ex_mce_desc;
+ len = ARRAY_SIZE(f17h_ex_mce_desc) - 1;
+ break;
+
+ case SMCA_FP:
+ error_desc_array = f17h_fp_mce_desc;
+ len = ARRAY_SIZE(f17h_fp_mce_desc) - 1;
+ break;
+
+ case SMCA_L3_CACHE:
+ error_desc_array = f17h_l3_mce_desc;
+ len = ARRAY_SIZE(f17h_l3_mce_desc) - 1;
+ break;
+
+ default:
+ pr_cont("Corrupted MCA core error info.\n");
+ return;
+ }
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n",
+ amd_core_mcablock_names[mca_type]);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
+static void decode_df_errors(u8 xec, unsigned int mca_type)
+{
+ const char * const *error_desc_array;
+ size_t len;
+
+ pr_emerg(HW_ERR "Data Fabric Error: ");
+
+ switch (mca_type) {
+ case SMCA_CS:
+ error_desc_array = f17h_cs_mce_desc;
+ len = ARRAY_SIZE(f17h_cs_mce_desc) - 1;
+ break;
+
+ case SMCA_PIE:
+ error_desc_array = f17h_pie_mce_desc;
+ len = ARRAY_SIZE(f17h_pie_mce_desc) - 1;
+ break;
+
+ default:
+ pr_cont("Corrupted MCA Data Fabric info.\n");
+ return;
+ }
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n",
+ amd_df_mcablock_names[mca_type]);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
+/* Decode errors according to Scalable MCA specification */
+static void decode_smca_errors(struct mce *m)
+{
+ u32 addr = MSR_AMD64_SMCA_MCx_IPID(m->bank);
+ unsigned int hwid, mca_type, i;
+ u8 xec = XEC(m->status, xec_mask);
+ const char * const *error_desc_array;
+ const char *ip_name;
+ u32 low, high;
+ size_t len;
+
+ if (rdmsr_safe(addr, &low, &high)) {
+ pr_emerg("Invalid IP block specified, error information is unreliable.\n");
+ return;
+ }
+
+ hwid = high & MCI_IPID_HWID;
+ mca_type = (high & MCI_IPID_MCATYPE) >> 16;
+
+ pr_emerg(HW_ERR "MC%d IPID value: 0x%08x%08x\n", m->bank, high, low);
+
+ /*
+ * Based on hwid and mca_type values, decode errors from respective IPs.
+ * Note: mca_type values make sense only in the context of an hwid.
+ */
+ for (i = 0; i < ARRAY_SIZE(amd_hwids); i++)
+ if (amd_hwids[i].hwid == hwid)
+ break;
+
+ switch (i) {
+ case SMCA_F17H_CORE:
+ ip_name = (mca_type == SMCA_L3_CACHE) ?
+ "L3 Cache" : "F17h Core";
+ return decode_f17h_core_errors(ip_name, xec, mca_type);
+ break;
+
+ case SMCA_DF:
+ return decode_df_errors(xec, mca_type);
+ break;
+
+ case SMCA_UMC:
+ error_desc_array = f17h_umc_mce_desc;
+ len = ARRAY_SIZE(f17h_umc_mce_desc) - 1;
+ break;
+
+ case SMCA_PB:
+ error_desc_array = f17h_pb_mce_desc;
+ len = ARRAY_SIZE(f17h_pb_mce_desc) - 1;
+ break;
+
+ case SMCA_PSP:
+ error_desc_array = f17h_psp_mce_desc;
+ len = ARRAY_SIZE(f17h_psp_mce_desc) - 1;
+ break;
+
+ case SMCA_SMU:
+ error_desc_array = f17h_smu_mce_desc;
+ len = ARRAY_SIZE(f17h_smu_mce_desc) - 1;
+ break;
+
+ default:
+ pr_emerg(HW_ERR "HWID:%d does not match any existing IPs.\n", hwid);
+ return;
+ }
+
+ ip_name = amd_hwids[i].name;
+ pr_emerg(HW_ERR "%s Error: ", ip_name);
+
+ if (xec > len) {
+ pr_cont("Unrecognized %s MCA bank error code.\n", ip_name);
+ return;
+ }
+
+ pr_cont("%s.\n", error_desc_array[xec]);
+}
+
static inline void amd_decode_err_code(u16 ec)
{
if (INT_ERROR(ec)) {
@@ -752,6 +1052,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
int ecc;
+ u32 ebx = cpuid_ebx(0x80000007);
if (amd_filter_mce(m))
return NOTIFY_STOP;
@@ -769,11 +1070,20 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 == 0x15 || c->x86 == 0x16)
+ if (c->x86 >= 0x15)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
+ if (!!(ebx & BIT(3))) {
+ u32 low, high;
+ u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
+
+ if (!rdmsr_safe(addr, &low, &high) &&
+ (low & MCI_CONFIG_MCAX))
+ pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-"));
+ }
+
/* do the two bits[14:13] together */
ecc = (m->status >> 45) & 0x3;
if (ecc)
@@ -784,6 +1094,11 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
+ if (!!(ebx & BIT(3))) {
+ decode_smca_errors(m);
+ goto err_code;
+ }
+
if (!fam_ops)
goto err_code;
@@ -834,6 +1149,7 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
+ u32 ebx;
if (c->x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
@@ -888,10 +1204,18 @@ static int __init mce_amd_init(void)
fam_ops->mc2_mce = f16h_mc2_mce;
break;
+ case 0x17:
+ ebx = cpuid_ebx(0x80000007);
+ xec_mask = 0x3f;
+ if (!(ebx & BIT(3))) {
+ printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
+ goto err_out;
+ }
+ break;
+
default:
printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86);
- kfree(fam_ops);
- fam_ops = NULL;
+ goto err_out;
}
pr_info("MCE: In-kernel MCE decoding enabled.\n");
@@ -899,6 +1223,11 @@ static int __init mce_amd_init(void)
mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
+
+err_out:
+ kfree(fam_ops);
+ fam_ops = NULL;
+ return -EINVAL;
}
early_initcall(mce_amd_init);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index b7139c160baf..ca63d0da8889 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1244,7 +1244,7 @@ static struct platform_driver * const drivers[] = {
static int __init mpc85xx_mc_init(void)
{
int res = 0;
- u32 pvr = 0;
+ u32 __maybe_unused pvr = 0;
printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
"(C) 2006 Montavista Software\n");
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index f5c6b97c8958..93f0d4120289 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1839,8 +1839,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
n_tads, gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
+ (u32)(1 << TAD_SOCK(reg)),
+ (u32)TAD_CH(reg) + 1,
(u32)TAD_TGT0(reg),
(u32)TAD_TGT1(reg),
(u32)TAD_TGT2(reg),
@@ -2118,7 +2118,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
ch_way = TAD_CH(reg) + 1;
- sck_way = TAD_SOCK(reg) + 1;
+ sck_way = 1 << TAD_SOCK(reg);
if (ch_way == 3)
idx = addr >> 6;
@@ -2175,7 +2175,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
n_tads,
addr,
limit,
- (u32)TAD_SOCK(reg),
+ sck_way,
ch_way,
offset,
idx,
@@ -2190,18 +2190,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset, addr);
return -EINVAL;
}
- addr -= offset;
- /* Store the low bits [0:6] of the addr */
- ch_addr = addr & 0x7f;
- /* Remove socket wayness and remove 6 bits */
- addr >>= 6;
- addr = div_u64(addr, sck_xch);
-#if 0
- /* Divide by channel way */
- addr = addr / ch_way;
-#endif
- /* Recover the last 6 bits */
- ch_addr |= addr << 6;
+
+ ch_addr = addr - offset;
+ ch_addr >>= (6 + shiftup);
+ ch_addr /= ch_way * sck_way;
+ ch_addr <<= (6 + shiftup);
+ ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
/*
* Step 3) Decode rank
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 41f876414a18..bf19b6e3bd12 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -61,6 +61,7 @@ struct xgene_edac {
struct regmap *mcba_map;
struct regmap *mcbb_map;
struct regmap *efuse_map;
+ struct regmap *rb_map;
void __iomem *pcp_csr;
spinlock_t lock;
struct dentry *dfs;
@@ -1057,7 +1058,7 @@ static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
case 0x041:
return true;
}
- } else if (L3C_ELR_ERRSYN(l3celr) == 9)
+ } else if (L3C_ELR_ERRWAY(l3celr) == 9)
return true;
return false;
@@ -1353,6 +1354,17 @@ static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
#define GLBL_MDED_ERRH 0x0848
#define GLBL_MDED_ERRHMASK 0x084c
+/* IO Bus Registers */
+#define RBCSR 0x0000
+#define STICKYERR_MASK BIT(0)
+#define RBEIR 0x0008
+#define AGENT_OFFLINE_ERR_MASK BIT(30)
+#define UNIMPL_RBPAGE_ERR_MASK BIT(29)
+#define WORD_ALIGNED_ERR_MASK BIT(28)
+#define PAGE_ACCESS_ERR_MASK BIT(27)
+#define WRITE_ACCESS_MASK BIT(26)
+#define RBERRADDR_RD(src) ((src) & 0x03FFFFFF)
+
static const char * const soc_mem_err_v1[] = {
"10GbE0",
"10GbE1",
@@ -1470,6 +1482,51 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
u32 err_addr_hi;
u32 reg;
+ /* If the register bus resource isn't available, just skip it */
+ if (!ctx->edac->rb_map)
+ goto rb_skip;
+
+ /*
+ * Check RB access errors
+ * 1. Out of range
+ * 2. Un-implemented page
+ * 3. Un-aligned access
+ * 4. Offline slave IP
+ */
+ if (regmap_read(ctx->edac->rb_map, RBCSR, &reg))
+ return;
+ if (reg & STICKYERR_MASK) {
+ bool write;
+ u32 address;
+
+ dev_err(edac_dev->dev, "IOB bus access error(s)\n");
+ if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
+ return;
+ write = reg & WRITE_ACCESS_MASK ? 1 : 0;
+ address = RBERRADDR_RD(reg);
+ if (reg & AGENT_OFFLINE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to offline agent error\n",
+ write ? "write" : "read");
+ if (reg & UNIMPL_RBPAGE_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s access to unimplemented page error\n",
+ write ? "write" : "read");
+ if (reg & WORD_ALIGNED_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s word aligned access error\n",
+ write ? "write" : "read");
+ if (reg & PAGE_ACCESS_ERR_MASK)
+ dev_err(edac_dev->dev,
+ "IOB bus %s to page out of range access error\n",
+ write ? "write" : "read");
+ if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
+ return;
+ if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
+ return;
+ }
+rb_skip:
+
/* IOB Bridge agent transaction error interrupt */
reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
if (!reg)
@@ -1852,6 +1909,17 @@ static int xgene_edac_probe(struct platform_device *pdev)
goto out_err;
}
+ /*
+ * NOTE: The register bus resource is optional for compatibility
+ * reason.
+ */
+ edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "regmap-rb");
+ if (IS_ERR(edac->rb_map)) {
+ dev_warn(edac->dev, "missing syscon regmap rb\n");
+ edac->rb_map = NULL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(edac->pcp_csr)) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c121d01a5cd6..1d8e0a57bd51 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -185,7 +185,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
break;
};
- mutex_lock(&arizona->dapm->card->dapm_mutex);
+ snd_soc_dapm_mutex_lock(arizona->dapm);
arizona->hpdet_clamp = clamp;
@@ -227,7 +227,7 @@ static void arizona_extcon_hp_clamp(struct arizona_extcon_info *info,
ret);
}
- mutex_unlock(&arizona->dapm->card->dapm_mutex);
+ snd_soc_dapm_mutex_unlock(arizona->dapm);
}
static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 279ff8f6637d..d023789f0fda 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -126,7 +126,7 @@ static int gpio_extcon_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&data->work, gpio_extcon_work);
/*
- * Request the interrput of gpio to detect whether external connector
+ * Request the interrupt of gpio to detect whether external connector
* is attached or detached.
*/
ret = devm_request_any_context_irq(&pdev->dev, data->irq,
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index b30ab97ce75f..852a7112f451 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -150,6 +150,7 @@ enum max14577_muic_acc_type {
static const unsigned int max14577_extcon_cable[] = {
EXTCON_USB,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -454,6 +455,8 @@ static int max14577_muic_chg_handler(struct max14577_muic_info *info)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX14577_CHARGER_TYPE_DEDICATED_CHG:
extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_DCP,
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index fdf8f5d4d4e9..f17cb76b567c 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -204,6 +204,7 @@ enum max77693_muic_acc_type {
static const unsigned int max77693_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -512,8 +513,11 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
break;
case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
dock_id = EXTCON_DOCK;
- if (!attached)
+ if (!attached) {
extcon_set_cable_state_(info->edev, EXTCON_USB, false);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ false);
+ }
break;
default:
dev_err(info->dev, "failed to detect %s dock device\n",
@@ -601,6 +605,8 @@ static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
if (ret < 0)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77693_MUIC_GND_MHL:
case MAX77693_MUIC_GND_MHL_VB:
@@ -830,6 +836,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
*/
extcon_set_cable_state_(info->edev, EXTCON_USB,
attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
if (!cable_attached)
extcon_set_cable_state_(info->edev, EXTCON_DOCK,
@@ -899,6 +907,8 @@ static int max77693_muic_chg_handler(struct max77693_muic_info *info)
extcon_set_cable_state_(info->edev, EXTCON_USB,
attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
/* Only TA cable */
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index 74dfb7f4f277..b188bd650efa 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -122,6 +122,7 @@ enum max77843_muic_charger_type {
static const unsigned int max77843_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_CDP,
EXTCON_CHG_USB_FAST,
@@ -486,6 +487,8 @@ static int max77843_muic_chg_handler(struct max77843_muic_info *info)
return ret;
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
case MAX77843_MUIC_CHG_DOWNSTREAM:
ret = max77843_muic_set_path(info,
@@ -803,7 +806,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
/* Clear IRQ bits before request IRQs */
ret = regmap_bulk_read(max77843->regmap_muic,
MAX77843_MUIC_REG_INT1, info->status,
- MAX77843_MUIC_IRQ_NUM);
+ MAX77843_MUIC_STATUS_NUM);
if (ret) {
dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
goto err_muic_irq;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index b2b13b3dce14..9a89320d09a8 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -148,6 +148,7 @@ struct max8997_muic_info {
static const unsigned int max8997_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_CHG_USB_FAST,
EXTCON_CHG_USB_SLOW,
@@ -334,6 +335,8 @@ static int max8997_muic_handle_usb(struct max8997_muic_info *info,
break;
case MAX8997_USB_DEVICE:
extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
break;
default:
dev_err(info->dev, "failed to detect %s usb cable\n",
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 93c30a885740..8b3226dca1d9 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -216,11 +216,23 @@ static int palmas_usb_probe(struct platform_device *pdev)
return PTR_ERR(palmas_usb->id_gpiod);
}
+ palmas_usb->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+ GPIOD_IN);
+ if (IS_ERR(palmas_usb->vbus_gpiod)) {
+ dev_err(&pdev->dev, "failed to get vbus gpio\n");
+ return PTR_ERR(palmas_usb->vbus_gpiod);
+ }
+
if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
palmas_usb->enable_id_detection = false;
palmas_usb->enable_gpio_id_detection = true;
}
+ if (palmas_usb->enable_vbus_detection && palmas_usb->vbus_gpiod) {
+ palmas_usb->enable_vbus_detection = false;
+ palmas_usb->enable_gpio_vbus_detection = true;
+ }
+
if (palmas_usb->enable_gpio_id_detection) {
u32 debounce;
@@ -266,7 +278,7 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->id_irq,
NULL, palmas_id_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_id", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
@@ -304,13 +316,46 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->vbus_irq, NULL,
palmas_vbus_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas_usb_vbus", palmas_usb);
if (status < 0) {
dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
return status;
}
+ } else if (palmas_usb->enable_gpio_vbus_detection) {
+ /* remux GPIO_1 as VBUSDET */
+ status = palmas_update_bits(palmas,
+ PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD1,
+ PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK,
+ (1 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT));
+ if (status < 0) {
+ dev_err(&pdev->dev, "can't remux GPIO1\n");
+ return status;
+ }
+
+ palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+ PALMAS_VBUS_OTG_IRQ);
+ palmas_usb->gpio_vbus_irq = gpiod_to_irq(palmas_usb->vbus_gpiod);
+ if (palmas_usb->gpio_vbus_irq < 0) {
+ dev_err(&pdev->dev, "failed to get vbus irq\n");
+ return palmas_usb->gpio_vbus_irq;
+ }
+ status = devm_request_threaded_irq(&pdev->dev,
+ palmas_usb->gpio_vbus_irq,
+ NULL,
+ palmas_vbus_irq_handler,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT,
+ "palmas_usb_vbus",
+ palmas_usb);
+ if (status < 0) {
+ dev_err(&pdev->dev,
+ "failed to request handler for vbus irq\n");
+ return status;
+ }
}
palmas_enable_irq(palmas_usb);
@@ -337,6 +382,8 @@ static int palmas_usb_suspend(struct device *dev)
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
enable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_gpio_vbus_detection)
+ enable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
enable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
@@ -352,6 +399,8 @@ static int palmas_usb_resume(struct device *dev)
if (device_may_wakeup(dev)) {
if (palmas_usb->enable_vbus_detection)
disable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_gpio_vbus_detection)
+ disable_irq_wake(palmas_usb->gpio_vbus_irq);
if (palmas_usb->enable_id_detection)
disable_irq_wake(palmas_usb->id_irq);
if (palmas_usb->enable_gpio_id_detection)
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index e1bb82809bef..97e074d70eca 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -93,6 +93,7 @@ static struct reg_data rt8973a_reg_data[] = {
static const unsigned int rt8973a_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_JIG,
EXTCON_NONE,
@@ -398,6 +399,9 @@ static int rt8973a_muic_cable_handler(struct rt8973a_muic_info *info,
/* Change the state of external accessory */
extcon_set_cable_state_(info->edev, id, attached);
+ if (id == EXTCON_USB)
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
return 0;
}
@@ -663,7 +667,7 @@ MODULE_DEVICE_TABLE(of, rt8973a_dt_match);
#ifdef CONFIG_PM_SLEEP
static int rt8973a_muic_suspend(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
@@ -673,7 +677,7 @@ static int rt8973a_muic_suspend(struct device *dev)
static int rt8973a_muic_resume(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct rt8973a_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 7aac3cc7efd7..df769a17e736 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -95,6 +95,7 @@ static struct reg_data sm5502_reg_data[] = {
static const unsigned int sm5502_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
EXTCON_CHG_USB_DCP,
EXTCON_NONE,
};
@@ -411,6 +412,9 @@ static int sm5502_muic_cable_handler(struct sm5502_muic_info *info,
/* Change the state of external accessory */
extcon_set_cable_state_(info->edev, id, attached);
+ if (id == EXTCON_USB)
+ extcon_set_cable_state_(info->edev, EXTCON_CHG_USB_SDP,
+ attached);
return 0;
}
@@ -655,7 +659,7 @@ MODULE_DEVICE_TABLE(of, sm5502_dt_match);
#ifdef CONFIG_PM_SLEEP
static int sm5502_muic_suspend(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
enable_irq_wake(info->irq);
@@ -665,7 +669,7 @@ static int sm5502_muic_suspend(struct device *dev)
static int sm5502_muic_resume(struct device *dev)
{
- struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *i2c = to_i2c_client(dev);
struct sm5502_muic_info *info = i2c_get_clientdata(i2c);
disable_irq_wake(info->irq);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 36a7c2d89a01..aee149bdf4c0 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -221,7 +221,7 @@ struct inbound_phy_packet_event {
#ifdef CONFIG_COMPAT
static void __user *u64_to_uptr(u64 value)
{
- if (is_compat_task())
+ if (in_compat_syscall())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
@@ -229,7 +229,7 @@ static void __user *u64_to_uptr(u64 value)
static u64 uptr_to_u64(void __user *ptr)
{
- if (is_compat_task())
+ if (in_compat_syscall())
return ptr_to_compat(ptr);
else
return (u64)(unsigned long)ptr;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 76b2d390f6ec..631c977b0da5 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -33,6 +33,7 @@
#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/time64.h>
#include <linux/timex.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -413,17 +414,18 @@ static void
packet_irq_handler(struct pcilynx *lynx)
{
struct client *client;
- u32 tcode_mask, tcode;
+ u32 tcode_mask, tcode, timestamp;
size_t length;
- struct timeval tv;
+ struct timespec64 ts64;
/* FIXME: Also report rcv_speed. */
length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
- do_gettimeofday(&tv);
- lynx->rcv_buffer[0] = (__force __le32)tv.tv_usec;
+ ktime_get_real_ts64(&ts64);
+ timestamp = ts64.tv_nsec / NSEC_PER_USEC;
+ lynx->rcv_buffer[0] = (__force __le32)timestamp;
if (length == PHY_PACKET_SIZE)
tcode_mask = 1 << TCODE_PHY_PACKET;
@@ -444,14 +446,16 @@ static void
bus_reset_irq_handler(struct pcilynx *lynx)
{
struct client *client;
- struct timeval tv;
+ struct timespec64 ts64;
+ u32 timestamp;
- do_gettimeofday(&tv);
+ ktime_get_real_ts64(&ts64);
+ timestamp = ts64.tv_nsec / NSEC_PER_USEC;
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
- packet_buffer_put(&client->buffer, &tv.tv_usec, 4);
+ packet_buffer_put(&client->buffer, &timestamp, 4);
spin_unlock(&lynx->client_list_lock);
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c2f5117fd8cb..8bf89267dc25 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2278,9 +2278,10 @@ static int ohci_enable(struct fw_card *card,
u32 lps, version, irqs;
int i, ret;
- if (software_reset(ohci)) {
+ ret = software_reset(ohci);
+ if (ret < 0) {
ohci_err(ohci, "failed to reset ohci card\n");
- return -EBUSY;
+ return ret;
}
/*
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 49a3a1185bb6..6664f1108c7c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -161,6 +161,26 @@ config RASPBERRYPI_FIRMWARE
This option enables support for communicating with the firmware on the
Raspberry Pi.
+config FW_CFG_SYSFS
+ tristate "QEMU fw_cfg device support in sysfs"
+ depends on SYSFS && (ARM || ARM64 || PPC_PMAC || SPARC || X86)
+ depends on HAS_IOPORT_MAP
+ default n
+ help
+ Say Y or M here to enable the exporting of the QEMU firmware
+ configuration (fw_cfg) file entries via sysfs. Entries are
+ found under /sys/firmware/fw_cfg when this option is enabled
+ and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+ bool "QEMU fw_cfg device parameter parsing"
+ depends on FW_CFG_SYSFS
+ help
+ Allow the qemu_fw_cfg device to be initialized via the kernel
+ command line or using a module parameter.
+ WARNING: Using incorrect parameters (base address in particular)
+ may crash your system.
+
config QCOM_SCM
bool
depends on ARM || ARM64
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 48dd4175297e..474bada56fcd 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 6174db80c663..7e3e595c9f30 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -80,7 +80,7 @@
#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
-#define MAX_RX_TIMEOUT (msecs_to_jiffies(20))
+#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
enum scpi_error_codes {
SCPI_SUCCESS = 0, /* Success */
@@ -231,7 +231,8 @@ struct _scpi_sensor_info {
};
struct sensor_value {
- __le32 val;
+ __le32 lo_val;
+ __le32 hi_val;
} __packed;
static struct scpi_drvinfo *scpi_info;
@@ -373,7 +374,7 @@ static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
ret = -ETIMEDOUT;
else
/* first status word */
- ret = le32_to_cpu(msg->status);
+ ret = msg->status;
out:
if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
scpi_process_cmd(scpi_chan, msg->cmd);
@@ -525,15 +526,17 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
return ret;
}
-int scpi_sensor_get_value(u16 sensor, u32 *val)
+int scpi_sensor_get_value(u16 sensor, u64 *val)
{
+ __le16 id = cpu_to_le16(sensor);
struct sensor_value buf;
int ret;
- ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &sensor, sizeof(sensor),
+ ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
&buf, sizeof(buf));
if (!ret)
- *val = le32_to_cpu(buf.val);
+ *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
+ le32_to_cpu(buf.lo_val);
return ret;
}
@@ -699,7 +702,7 @@ static int scpi_probe(struct platform_device *pdev)
cl->rx_callback = scpi_handle_remote_msg;
cl->tx_prepare = scpi_tx_prepare;
cl->tx_block = true;
- cl->tx_tout = 50;
+ cl->tx_tout = 20;
cl->knows_txdone = false; /* controller can't ack */
INIT_LIST_HEAD(&pchan->rx_pending);
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 0c2f0a61b0ea..0b631e5b5b84 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -94,15 +94,14 @@ static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
found:
__ioread32_copy(nvram_buf, header, sizeof(*header) / 4);
- header = (struct nvram_header *)nvram_buf;
- nvram_len = header->len;
+ nvram_len = ((struct nvram_header *)(nvram_buf))->len;
if (nvram_len > size) {
pr_err("The nvram size according to the header seems to be bigger than the partition on flash\n");
nvram_len = size;
}
if (nvram_len >= NVRAM_SPACE) {
pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
- header->len, NVRAM_SPACE - 1);
+ nvram_len, NVRAM_SPACE - 1);
nvram_len = NVRAM_SPACE - 1;
}
/* proceed reading data after header */
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 9e15d571b53c..8714f8c271ba 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -61,8 +61,8 @@ static int __init uefi_init(void)
char vendor[100] = "unknown";
int i, retval;
- efi.systab = early_memremap(efi_system_table,
- sizeof(efi_system_table_t));
+ efi.systab = early_memremap_ro(efi_system_table,
+ sizeof(efi_system_table_t));
if (efi.systab == NULL) {
pr_warn("Unable to map EFI system table.\n");
return -ENOMEM;
@@ -86,8 +86,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
- sizeof(vendor) * sizeof(efi_char16_t));
+ c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
+ sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = c16[i];
@@ -100,8 +100,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff, vendor);
table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
- table_size);
+ config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
+ table_size);
if (config_tables == NULL) {
pr_warn("Unable to map EFI config table array.\n");
retval = -ENOMEM;
@@ -185,7 +185,7 @@ void __init efi_init(void)
efi_system_table = params.system_table;
memmap.phys_map = params.mmap;
- memmap.map = early_memremap(params.mmap, params.mmap_size);
+ memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
if (memmap.map == NULL) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
@@ -203,7 +203,19 @@ void __init efi_init(void)
reserve_regions();
early_memunmap(memmap.map, params.mmap_size);
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
+
+ if (IS_ENABLED(CONFIG_ARM)) {
+ /*
+ * ARM currently does not allow ioremap_cache() to be called on
+ * memory regions that are covered by struct page. So remove the
+ * UEFI memory map from the linear mapping.
+ */
+ memblock_mark_nomap(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ } else {
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+ }
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2cd37dad67a6..3a69ed5ecfcb 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -182,6 +182,7 @@ static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -326,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
return end;
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
- struct efi_memory_map *map;
- void *p;
- map = efi.memmap;
- if (!map)
- return NULL;
- if (WARN_ON(!map->map))
- return NULL;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
-}
-
static __initdata efi_config_table_type_t common_tables[] = {
{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
@@ -586,7 +555,8 @@ static __initdata char memory_type_name[][20] = {
"ACPI Memory NVS",
"Memory Mapped I/O",
"MMIO Port Space",
- "PAL Code"
+ "PAL Code",
+ "Persistent Memory",
};
char * __init efi_md_typeattr_format(char *buf, size_t size,
@@ -613,13 +583,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+ EFI_MEMORY_NV |
EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
- snprintf(pos, size, "|%3s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ snprintf(pos, size,
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+ attr & EFI_MEMORY_NV ? "NV" : "",
attr & EFI_MEMORY_XP ? "XP" : "",
attr & EFI_MEMORY_RP ? "RP" : "",
attr & EFI_MEMORY_WP ? "WP" : "",
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 10e6774ab2a2..096adcbcb5a9 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -231,7 +231,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
static inline bool is_compat(void)
{
- if (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())
+ if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
return true;
return false;
@@ -386,7 +386,7 @@ static const struct sysfs_ops efivar_attr_ops = {
static void efivar_release(struct kobject *kobj)
{
- struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
+ struct efivar_entry *var = to_efivar_entry(kobj);
kfree(var);
}
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 22c5285f7705..75feb3f5829b 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -167,14 +167,11 @@ static struct kset *esrt_kset;
static int esre_create_sysfs_entry(void *esre, int entry_num)
{
struct esre_entry *entry;
- char name[20];
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- sprintf(name, "entry%d", entry_num);
-
entry->kobj.kset = esrt_kset;
if (esrt->fw_resource_version == 1) {
@@ -182,7 +179,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
entry->esre.esre1 = esre;
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
- "%s", name);
+ "entry%d", entry_num);
if (rc) {
kfree(entry);
return rc;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index aaf9c0bab42e..da99bbb74aeb 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -23,6 +23,10 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
lib-y := efi-stub-helper.o
@@ -36,7 +40,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o
+lib-$(CONFIG_ARM64) += arm64-stub.o random.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 3397902e4040..414deb85c2e5 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -18,6 +18,8 @@
#include "efistub.h"
+bool __nokaslr;
+
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@@ -190,6 +192,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi(sys_table, "Booting Linux Kernel...\n");
+ status = check_platform_features(sys_table);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
/*
* Get a handle to the loaded image protocol. This is used to get
* information about the running image, such as size and the command
@@ -207,14 +213,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Failed to find DRAM base\n");
goto fail;
}
- status = handle_kernel_image(sys_table, image_addr, &image_size,
- &reserve_addr,
- &reserve_size,
- dram_base, image);
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
- goto fail;
- }
/*
* Get the command line from EFI, using the LOADED_IMAGE
@@ -224,7 +222,28 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
cmdline_ptr = efi_convert_cmdline(sys_table, image, &cmdline_size);
if (!cmdline_ptr) {
pr_efi_err(sys_table, "getting command line via LOADED_IMAGE_PROTOCOL\n");
- goto fail_free_image;
+ goto fail;
+ }
+
+ /* check whether 'nokaslr' was passed on the command line */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ static const u8 default_cmdline[] = CONFIG_CMDLINE;
+ const u8 *str, *cmdline = cmdline_ptr;
+
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
+ cmdline = default_cmdline;
+ str = strstr(cmdline, "nokaslr");
+ if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
+ __nokaslr = true;
+ }
+
+ status = handle_kernel_image(sys_table, image_addr, &image_size,
+ &reserve_addr,
+ &reserve_size,
+ dram_base, image);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel\n");
+ goto fail_free_cmdline;
}
status = efi_parse_options(cmdline_ptr);
@@ -244,7 +263,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to load device tree!\n");
- goto fail_free_cmdline;
+ goto fail_free_image;
}
}
@@ -286,12 +305,11 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_free(sys_table, initrd_size, initrd_addr);
efi_free(sys_table, fdt_size, fdt_addr);
-fail_free_cmdline:
- efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
-
fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
+fail_free_cmdline:
+ efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 495ebd657e38..6f42be4d0084 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -9,6 +9,23 @@
#include <linux/efi.h>
#include <asm/efi.h>
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ int block;
+
+ /* non-LPAE kernels can run anywhere */
+ if (!IS_ENABLED(CONFIG_ARM_LPAE))
+ return EFI_SUCCESS;
+
+ /* LPAE kernels need compatible hardware */
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block < 5) {
+ pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
unsigned long *image_addr,
unsigned long *image_size,
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 78dfbd34b6bf..a90f6459f5c6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -12,37 +12,87 @@
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/sysreg.h>
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image)
+#include "efistub.h"
+
+extern bool __nokaslr;
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ u64 tg;
+
+ /* UEFI mandates support for 4 KB granularity, no need to check */
+ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return EFI_SUCCESS;
+
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
+ if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ else
+ pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
- unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
unsigned long preferred_offset;
+ u64 phys_seed = 0;
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ if (!__nokaslr) {
+ status = efi_get_random_bytes(sys_table_arg,
+ sizeof(phys_seed),
+ (u8 *)&phys_seed);
+ if (status == EFI_NOT_FOUND) {
+ pr_efi(sys_table_arg, "EFI_RNG_PROTOCOL unavailable, no randomness supplied\n");
+ } else if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "efi_get_random_bytes() failed\n");
+ return status;
+ }
+ } else {
+ pr_efi(sys_table_arg, "KASLR disabled on kernel command line\n");
+ }
+ }
/*
* The preferred offset of the kernel Image is TEXT_OFFSET bytes beyond
* a 2 MB aligned base, which itself may be lower than dram_base, as
* long as the resulting offset equals or exceeds it.
*/
- preferred_offset = round_down(dram_base, SZ_2M) + TEXT_OFFSET;
+ preferred_offset = round_down(dram_base, MIN_KIMG_ALIGN) + TEXT_OFFSET;
if (preferred_offset < dram_base)
- preferred_offset += SZ_2M;
+ preferred_offset += MIN_KIMG_ALIGN;
- /* Relocate the image, if required. */
kernel_size = _edata - _text;
- if (*image_addr != preferred_offset) {
- kernel_memsize = kernel_size + (_end - _edata);
+ kernel_memsize = kernel_size + (_end - _edata);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
+ /*
+ * If KASLR is enabled, and we have some randomness available,
+ * locate the kernel at a randomized offset in physical memory.
+ */
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_random_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr,
+ phys_seed);
+ *image_addr = *reserve_addr + TEXT_OFFSET;
+ } else {
/*
- * First, try a straight allocation at the preferred offset.
+ * Else, try a straight allocation at the preferred offset.
* This will work around the issue where, if dram_base == 0x0,
* efi_low_alloc() refuses to allocate at 0x0 (to prevent the
* address of the allocation to be mistaken for a FAIL return
@@ -52,27 +102,31 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
+ if (*image_addr == preferred_offset)
+ return EFI_SUCCESS;
+
*image_addr = *reserve_addr = preferred_offset;
- nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
- EFI_PAGE_SIZE;
+ *reserve_size = round_up(kernel_memsize, EFI_ALLOC_ALIGN);
+
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
- EFI_LOADER_DATA, nr_pages,
+ EFI_LOADER_DATA,
+ *reserve_size / EFI_PAGE_SIZE,
(efi_physical_addr_t *)reserve_addr);
- if (status != EFI_SUCCESS) {
- kernel_memsize += TEXT_OFFSET;
- status = efi_low_alloc(sys_table_arg, kernel_memsize,
- SZ_2M, reserve_addr);
+ }
- if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
- return status;
- }
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ if (status != EFI_SUCCESS) {
+ *reserve_size = kernel_memsize + TEXT_OFFSET;
+ status = efi_low_alloc(sys_table_arg, *reserve_size,
+ MIN_KIMG_ALIGN, reserve_addr);
+
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ *reserve_size = 0;
+ return status;
}
- memcpy((void *)*image_addr, old_image_addr, kernel_size);
- *reserve_size = kernel_memsize;
+ *image_addr = *reserve_addr + TEXT_OFFSET;
}
-
+ memcpy((void *)*image_addr, old_image_addr, kernel_size);
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f07d4a67fa76..29ed2f9b218c 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -649,6 +649,10 @@ static u8 *efi_utf16_to_utf8(u8 *dst, const u16 *src, int n)
return dst;
}
+#ifndef MAX_CMDLINE_ADDRESS
+#define MAX_CMDLINE_ADDRESS ULONG_MAX
+#endif
+
/*
* Convert the unicode UEFI command line to ASCII to pass to kernel.
* Size of memory allocated return in *cmd_line_len.
@@ -684,7 +688,8 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
options_bytes++; /* NUL termination */
- status = efi_low_alloc(sys_table_arg, options_bytes, 0, &cmdline_addr);
+ status = efi_high_alloc(sys_table_arg, options_bytes, 0,
+ &cmdline_addr, MAX_CMDLINE_ADDRESS);
if (status != EFI_SUCCESS)
return NULL;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 6b6548fda089..ee49cd23ee63 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,6 +5,16 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -43,4 +53,13 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
unsigned long desc_size, efi_memory_desc_t *runtime_map,
int *count);
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table,
+ unsigned long size, u8 *out);
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed);
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index cf7b7d46302a..6dba78aef337 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -147,6 +147,20 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ efi_status_t efi_status;
+
+ efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
+ (u8 *)&fdt_val64);
+ if (efi_status == EFI_SUCCESS) {
+ status = fdt_setprop(fdt, node, "kaslr-seed",
+ &fdt_val64, sizeof(fdt_val64));
+ if (status)
+ goto fdt_set_fail;
+ } else if (efi_status != EFI_NOT_FOUND) {
+ return efi_status;
+ }
+ }
return EFI_SUCCESS;
fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
new file mode 100644
index 000000000000..53f6d3fe6d86
--- /dev/null
+++ b/drivers/firmware/efi/libstub/random.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+
+struct efi_rng_protocol {
+ efi_status_t (*get_info)(struct efi_rng_protocol *,
+ unsigned long *, efi_guid_t *);
+ efi_status_t (*get_rng)(struct efi_rng_protocol *,
+ efi_guid_t *, unsigned long, u8 *out);
+};
+
+efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
+ unsigned long size, u8 *out)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_status_t status;
+ struct efi_rng_protocol *rng;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ return rng->get_rng(rng, NULL, size, out);
+}
+
+/*
+ * Return the number of slots covered by this entry, i.e., the number of
+ * addresses it covers that are suitably aligned and supply enough room
+ * for the allocation.
+ */
+static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
+ unsigned long size,
+ unsigned long align)
+{
+ u64 start, end;
+
+ if (md->type != EFI_CONVENTIONAL_MEMORY)
+ return 0;
+
+ start = round_up(md->phys_addr, align);
+ end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
+ align);
+
+ if (start > end)
+ return 0;
+
+ return (end - start + 1) / align;
+}
+
+/*
+ * The UEFI memory descriptors have a virtual address field that is only used
+ * when installing the virtual mapping using SetVirtualAddressMap(). Since it
+ * is unused here, we can reuse it to keep track of each descriptor's slot
+ * count.
+ */
+#define MD_NUM_SLOTS(md) ((md)->virt_addr)
+
+efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *addr,
+ unsigned long random_seed)
+{
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
+
+ status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+ &desc_size, NULL, NULL);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (align < EFI_ALLOC_ALIGN)
+ align = EFI_ALLOC_ALIGN;
+
+ /* count the suitable slots in each memory map entry */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ unsigned long slots;
+
+ slots = get_entry_num_slots(md, size, align);
+ MD_NUM_SLOTS(md) = slots;
+ total_slots += slots;
+ }
+
+ /* find a random number between 0 and total_slots */
+ target_slot = (total_slots * (u16)random_seed) >> 16;
+
+ /*
+ * target_slot is now a value in the range [0, total_slots), and so
+ * it corresponds with exactly one of the suitable slots we recorded
+ * when iterating over the memory map the first time around.
+ *
+ * So iterate over the memory map again, subtracting the number of
+ * slots of each entry at each iteration, until we have found the entry
+ * that covers our chosen slot. Use the residual value of target_slot
+ * to calculate the randomly chosen address, and allocate it directly
+ * using EFI_ALLOCATE_ADDRESS.
+ */
+ for (map_offset = 0; map_offset < map_size; map_offset += desc_size) {
+ efi_memory_desc_t *md = (void *)memory_map + map_offset;
+ efi_physical_addr_t target;
+ unsigned long pages;
+
+ if (target_slot >= MD_NUM_SLOTS(md)) {
+ target_slot -= MD_NUM_SLOTS(md);
+ continue;
+ }
+
+ target = round_up(md->phys_addr, align) + target_slot * align;
+ pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+
+ status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, pages, &target);
+ if (status == EFI_SUCCESS)
+ *addr = target;
+ break;
+ }
+
+ efi_call_early(free_pool, memory_map);
+
+ return status;
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 228bbf910461..de6953039af6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -61,63 +61,23 @@
*/
static DEFINE_SPINLOCK(efi_runtime_lock);
-/*
- * Some runtime services calls can be reentrant under NMI, even if the table
- * above says they are not. (source: UEFI Specification v2.4A)
- *
- * Table 32. Functions that may be called after Machine Check, INIT and NMI
- * +----------------------------+------------------------------------------+
- * | Function | Called after Machine Check, INIT and NMI |
- * +----------------------------+------------------------------------------+
- * | GetTime() | Yes, even if previously busy. |
- * | GetVariable() | Yes, even if previously busy |
- * | GetNextVariableName() | Yes, even if previously busy |
- * | QueryVariableInfo() | Yes, even if previously busy |
- * | SetVariable() | Yes, even if previously busy |
- * | UpdateCapsule() | Yes, even if previously busy |
- * | QueryCapsuleCapabilities() | Yes, even if previously busy |
- * | ResetSystem() | Yes, even if previously busy |
- * +----------------------------+------------------------------------------+
- *
- * In order to prevent deadlocks under NMI, the wrappers for these functions
- * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi().
- * However, not all of the services listed are reachable through NMI code paths,
- * so the the special handling as suggested by the UEFI spec is only implemented
- * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI
- * context through efi_pstore_write().
- */
-
-/*
- * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
- * the EFI specification requires that callers of the time related runtime
- * functions serialize with other CMOS accesses in the kernel, as the EFI time
- * functions may choose to also use the legacy CMOS RTC.
- */
-__weak DEFINE_SPINLOCK(rtc_lock);
-
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_time, tm, tc);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
static efi_status_t virt_efi_set_time(efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_time, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
@@ -125,27 +85,21 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
efi_bool_t *pending,
efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_wakeup_time, enabled, tm);
spin_unlock(&efi_runtime_lock);
- spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
@@ -155,13 +109,12 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
unsigned long *data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -169,12 +122,11 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_next_variable, name_size, name, vendor);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -184,13 +136,12 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(set_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -199,15 +150,14 @@ virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data)
{
- unsigned long flags;
efi_status_t status;
- if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ if (!spin_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
status = efi_call_virt(set_variable, name, vendor, attr, data_size,
data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -217,27 +167,45 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
u64 *remaining_space,
u64 *max_variable_size)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(query_variable_info, attr, storage_space,
remaining_space, max_variable_size);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
+ return status;
+}
+
+static efi_status_t
+virt_efi_query_variable_info_nonblocking(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (!spin_trylock(&efi_runtime_lock))
+ return EFI_NOT_READY;
+
+ status = efi_call_virt(query_variable_info, attr, storage_space,
+ remaining_space, max_variable_size);
+ spin_unlock(&efi_runtime_lock);
return status;
}
static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
{
- unsigned long flags;
efi_status_t status;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(get_next_high_mono_count, count);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -246,26 +214,23 @@ static void virt_efi_reset_system(int reset_type,
unsigned long data_size,
efi_char16_t *data)
{
- unsigned long flags;
-
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
__efi_call_virt(reset_system, reset_type, status, data_size, data);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
}
static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
unsigned long count,
unsigned long sg_list)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(update_capsule, capsules, count, sg_list);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -274,16 +239,15 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
u64 *max_size,
int *reset_type)
{
- unsigned long flags;
efi_status_t status;
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
- spin_lock_irqsave(&efi_runtime_lock, flags);
+ spin_lock(&efi_runtime_lock);
status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
reset_type);
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ spin_unlock(&efi_runtime_lock);
return status;
}
@@ -300,6 +264,7 @@ void efi_native_runtime_setup(void)
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
efi.reset_system = virt_efi_reset_system;
efi.query_variable_info = virt_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 7f2ea21c730d..0ac594c0a234 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -300,7 +300,18 @@ check_var_size(u32 attributes, unsigned long size)
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
- return fops->query_variable_store(attributes, size);
+ return fops->query_variable_store(attributes, size, false);
+}
+
+static efi_status_t
+check_var_size_nonblocking(u32 attributes, unsigned long size)
+{
+ const struct efivar_operations *fops = __efivars->ops;
+
+ if (!fops->query_variable_store)
+ return EFI_UNSUPPORTED;
+
+ return fops->query_variable_store(attributes, size, true);
}
static int efi_status_to_err(efi_status_t status)
@@ -681,7 +692,8 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
if (!spin_trylock_irqsave(&__efivars->lock, flags))
return -EBUSY;
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+ status = check_var_size_nonblocking(attributes,
+ size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
spin_unlock_irqrestore(&__efivars->lock, flags);
return -ENOSPC;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 72791232e46b..81037e5fe301 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -319,6 +319,9 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
+ str += sprintf(str, "%d\n", nic->subnet_mask_prefix);
+ break;
case ISCSI_BOOT_ETH_ORIGIN:
str += sprintf(str, "%d\n", nic->origin);
break;
@@ -460,6 +463,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
if (address_not_null(nic->ip_addr))
rc = S_IRUGO;
break;
+ case ISCSI_BOOT_ETH_PREFIX_LEN:
case ISCSI_BOOT_ETH_SUBNET_MASK:
if (nic->subnet_mask_prefix)
rc = S_IRUGO;
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index f25cd79c8a79..11bfee8b79a9 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -14,6 +14,7 @@
#define pr_fmt(fmt) "psci: " fmt
#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/of.h>
@@ -21,10 +22,12 @@
#include <linux/printk.h>
#include <linux/psci.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
#include <linux/suspend.h>
#include <uapi/linux/psci.h>
+#include <asm/cpuidle.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
#include <asm/smp_plat.h>
@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
psci_func_id, 0, 0);
}
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+ int i, ret, count = 0;
+ u32 *psci_states;
+ struct device_node *state_node;
+
+ /*
+ * If the PSCI cpu_suspend function hook has not been initialized
+ * idle states must not be enabled, so bail out
+ */
+ if (!psci_ops.cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /* Count idle states */
+ while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+ count))) {
+ count++;
+ of_node_put(state_node);
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+ if (!psci_states)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ u32 state;
+
+ state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+ ret = of_property_read_u32(state_node,
+ "arm,psci-suspend-param",
+ &state);
+ if (ret) {
+ pr_warn(" * %s missing arm,psci-suspend-param property\n",
+ state_node->full_name);
+ of_node_put(state_node);
+ goto free_mem;
+ }
+
+ of_node_put(state_node);
+ pr_debug("psci-power-state %#x index %d\n", state, i);
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ psci_states[i] = state;
+ }
+ /* Idle states parsed correctly, initialize per-cpu pointer */
+ per_cpu(psci_power_state, cpu) = psci_states;
+ return 0;
+
+free_mem:
+ kfree(psci_states);
+ return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+ struct device_node *cpu_node;
+ int ret;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+ u32 *state = __this_cpu_read(psci_power_state);
+
+ return psci_ops.cpu_suspend(state[index - 1],
+ virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+ int ret;
+ u32 *state = __this_cpu_read(psci_power_state);
+ /*
+ * idle state index 0 corresponds to wfi, should never be called
+ * from the cpu_suspend operations
+ */
+ if (WARN_ON_ONCE(!index))
+ return -EINVAL;
+
+ if (!psci_power_state_loses_context(state[index - 1]))
+ ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ else
+ ret = cpu_suspend(index, psci_suspend_finisher);
+
+ return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+ .suspend = psci_cpu_suspend_enter,
+ .init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+#endif
+#endif
+
static int psci_system_suspend(unsigned long unused)
{
return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644
index 000000000000..815c4a5cae54
--- /dev/null
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -0,0 +1,773 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ * [fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
+ * or
+ * [fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
+ *
+ * where:
+ * <size> := size of ioport or mmio range
+ * <base> := physical base address of ioport or mmio range
+ * <ctrl_off> := (optional) offset of control register
+ * <data_off> := (optional) offset of data register
+ *
+ * e.g.:
+ * fw_cfg.ioport=2@0x510:0:1 (the default on x86)
+ * or
+ * fw_cfg.mmio=0xA@0x9020000:8:0 (the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* selector key values for "well-known" fw_cfg entries */
+#define FW_CFG_SIGNATURE 0x00
+#define FW_CFG_ID 0x01
+#define FW_CFG_FILE_DIR 0x19
+
+/* size in bytes of fw_cfg signature */
+#define FW_CFG_SIG_SIZE 4
+
+/* fw_cfg "file name" is up to 56 characters (including terminating nul) */
+#define FW_CFG_MAX_FILE_PATH 56
+
+/* fw_cfg file directory entry type */
+struct fw_cfg_file {
+ u32 size;
+ u16 select;
+ u16 reserved;
+ char name[FW_CFG_MAX_FILE_PATH];
+};
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static inline u16 fw_cfg_sel_endianness(u16 key)
+{
+ return fw_cfg_is_mmio ? cpu_to_be16(key) : cpu_to_le16(key);
+}
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static inline void fw_cfg_read_blob(u16 key,
+ void *buf, loff_t pos, size_t count)
+{
+ u32 glk;
+ acpi_status status;
+
+ /* If we have ACPI, ensure mutual exclusion against any potential
+ * device access by the firmware, e.g. via AML methods:
+ */
+ status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk);
+ if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) {
+ /* Should never get here */
+ WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n");
+ memset(buf, 0, count);
+ return;
+ }
+
+ mutex_lock(&fw_cfg_dev_lock);
+ iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
+ while (pos-- > 0)
+ ioread8(fw_cfg_reg_data);
+ ioread8_rep(fw_cfg_reg_data, buf, count);
+ mutex_unlock(&fw_cfg_dev_lock);
+
+ acpi_release_global_lock(glk);
+}
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+ if (fw_cfg_is_mmio) {
+ iounmap(fw_cfg_dev_base);
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ } else {
+ ioport_unmap(fw_cfg_dev_base);
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ }
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+# define FW_CFG_CTRL_OFF 0x08
+# define FW_CFG_DATA_OFF 0x00
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# else
+# warning "QEMU FW_CFG may not be available on this architecture!"
+# define FW_CFG_CTRL_OFF 0x00
+# define FW_CFG_DATA_OFF 0x01
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+ char sig[FW_CFG_SIG_SIZE];
+ struct resource *range, *ctrl, *data;
+
+ /* acquire i/o range details */
+ fw_cfg_is_mmio = false;
+ range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!range) {
+ fw_cfg_is_mmio = true;
+ range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!range)
+ return -EINVAL;
+ }
+ fw_cfg_p_base = range->start;
+ fw_cfg_p_size = resource_size(range);
+
+ if (fw_cfg_is_mmio) {
+ if (!request_mem_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_mem"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ } else {
+ if (!request_region(fw_cfg_p_base,
+ fw_cfg_p_size, "fw_cfg_io"))
+ return -EBUSY;
+ fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+ if (!fw_cfg_dev_base) {
+ release_region(fw_cfg_p_base, fw_cfg_p_size);
+ return -EFAULT;
+ }
+ }
+
+ /* were custom register offsets provided (e.g. on the command line)? */
+ ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+ data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+ if (ctrl && data) {
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+ fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+ } else {
+ /* use architecture-specific offsets */
+ fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+ fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+ }
+
+ /* verify fw_cfg device signature */
+ fw_cfg_read_blob(FW_CFG_SIGNATURE, sig, 0, FW_CFG_SIG_SIZE);
+ if (memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+ fw_cfg_io_cleanup();
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+{
+ return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
+} fw_cfg_rev_attr = {
+ .attr = { .name = "rev", .mode = S_IRUSR },
+ .show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+ struct kobject kobj;
+ struct fw_cfg_file f;
+ struct list_head list;
+};
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+ return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+ return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_add_tail(&entry->list, &fw_cfg_entry_cache);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+ spin_lock(&fw_cfg_cache_lock);
+ list_del(&entry->list);
+ spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+ struct fw_cfg_sysfs_entry *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+ /* will end up invoking fw_cfg_sysfs_cache_delist()
+ * via each object's release() method (i.e. destructor)
+ */
+ kobject_put(&entry->kobj);
+ }
+}
+
+/* default_attrs: per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+ .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+ .show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->f.size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%u\n", e->f.select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+ return sprintf(buf, "%s\n", e->f.name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+ &fw_cfg_sysfs_attr_size.attr,
+ &fw_cfg_sysfs_attr_key.attr,
+ &fw_cfg_sysfs_attr_name.attr,
+ NULL,
+};
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+ char *buf)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+ struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+ return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+ .show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ fw_cfg_sysfs_cache_delist(entry);
+ kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+ .default_attrs = fw_cfg_sysfs_entry_attrs,
+ .sysfs_ops = &fw_cfg_sysfs_attr_ops,
+ .release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+ if (pos > entry->f.size)
+ return -EINVAL;
+
+ if (count > entry->f.size - pos)
+ count = entry->f.size - pos;
+
+ fw_cfg_read_blob(entry->f.select, buf, pos, count);
+ return count;
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+ .attr = { .name = "raw", .mode = S_IRUSR },
+ .read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+ struct kobject *target, const char *name)
+{
+ int ret;
+ struct kset *subdir;
+ struct kobject *ko;
+ char *name_copy, *p, *tok;
+
+ if (!dir || !target || !name || !*name)
+ return -EINVAL;
+
+ /* clone a copy of name for parsing */
+ name_copy = p = kstrdup(name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ /* create folders for each dirname token, then symlink for basename */
+ while ((tok = strsep(&p, "/")) && *tok) {
+
+ /* last (basename) token? If so, add symlink here */
+ if (!p || !*p) {
+ ret = sysfs_create_link(&dir->kobj, target, tok);
+ break;
+ }
+
+ /* does the current dir contain an item named after tok ? */
+ ko = kset_find_obj(dir, tok);
+ if (ko) {
+ /* drop reference added by kset_find_obj */
+ kobject_put(ko);
+
+ /* ko MUST be a kset - we're about to use it as one ! */
+ if (ko->ktype != dir->kobj.ktype) {
+ ret = -EINVAL;
+ break;
+ }
+
+ /* descend into already existing subdirectory */
+ dir = to_kset(ko);
+ } else {
+ /* create new subdirectory kset */
+ subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+ if (!subdir) {
+ ret = -ENOMEM;
+ break;
+ }
+ subdir->kobj.kset = dir;
+ subdir->kobj.ktype = dir->kobj.ktype;
+ ret = kobject_set_name(&subdir->kobj, "%s", tok);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+ ret = kset_register(subdir);
+ if (ret) {
+ kfree(subdir);
+ break;
+ }
+
+ /* descend into newly created subdirectory */
+ dir = subdir;
+ }
+ }
+
+ /* we're done with cloned copy of name */
+ kfree(name_copy);
+ return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+ struct kobject *k, *next;
+
+ list_for_each_entry_safe(k, next, &kset->list, entry)
+ /* all set members are ksets too, but check just in case... */
+ if (k->ktype == kset->kobj.ktype)
+ fw_cfg_kset_unregister_recursive(to_kset(k));
+
+ /* symlinks are cleanly and automatically removed with the directory */
+ kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+ int err;
+ struct fw_cfg_sysfs_entry *entry;
+
+ /* allocate new entry */
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ /* set file entry information */
+ memcpy(&entry->f, f, sizeof(struct fw_cfg_file));
+
+ /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+ fw_cfg_sel_ko, "%d", entry->f.select);
+ if (err)
+ goto err_register;
+
+ /* add raw binary content access */
+ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+ if (err)
+ goto err_add_raw;
+
+ /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->f.name);
+
+ /* success, add entry to global cache */
+ fw_cfg_sysfs_cache_enlist(entry);
+ return 0;
+
+err_add_raw:
+ kobject_del(&entry->kobj);
+err_register:
+ kfree(entry);
+ return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+ int ret = 0;
+ u32 count, i;
+ struct fw_cfg_file *dir;
+ size_t dir_size;
+
+ fw_cfg_read_blob(FW_CFG_FILE_DIR, &count, 0, sizeof(count));
+ count = be32_to_cpu(count);
+ dir_size = count * sizeof(struct fw_cfg_file);
+
+ dir = kmalloc(dir_size, GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ fw_cfg_read_blob(FW_CFG_FILE_DIR, dir, sizeof(count), dir_size);
+
+ for (i = 0; i < count; i++) {
+ dir[i].size = be32_to_cpu(dir[i].size);
+ dir[i].select = be16_to_cpu(dir[i].select);
+ ret = fw_cfg_register_file(&dir[i]);
+ if (ret)
+ break;
+ }
+
+ kfree(dir);
+ return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+ kobject_del(kobj);
+ kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+ int err;
+
+ /* NOTE: If we supported multiple fw_cfg devices, we'd first create
+ * a subdirectory named after e.g. pdev->id, then hang per-device
+ * by_key (and by_name) subdirectories underneath it. However, only
+ * one fw_cfg device exist system-wide, so if one was already found
+ * earlier, we might as well stop here.
+ */
+ if (fw_cfg_sel_ko)
+ return -EBUSY;
+
+ /* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+ err = -ENOMEM;
+ fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+ if (!fw_cfg_sel_ko)
+ goto err_sel;
+ fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+ if (!fw_cfg_fname_kset)
+ goto err_name;
+
+ /* initialize fw_cfg device i/o from platform data */
+ err = fw_cfg_do_platform_probe(pdev);
+ if (err)
+ goto err_probe;
+
+ /* get revision number, add matching top-level attribute */
+ fw_cfg_read_blob(FW_CFG_ID, &fw_cfg_rev, 0, sizeof(fw_cfg_rev));
+ fw_cfg_rev = le32_to_cpu(fw_cfg_rev);
+ err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+ if (err)
+ goto err_rev;
+
+ /* process fw_cfg file directory entry, registering each file */
+ err = fw_cfg_register_dir_entries();
+ if (err)
+ goto err_dir;
+
+ /* success */
+ pr_debug("fw_cfg: loaded.\n");
+ return 0;
+
+err_dir:
+ fw_cfg_sysfs_cache_cleanup();
+ sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+ fw_cfg_io_cleanup();
+err_probe:
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+ return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+ pr_debug("fw_cfg: unloading.\n");
+ fw_cfg_sysfs_cache_cleanup();
+ fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+ fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+ fw_cfg_io_cleanup();
+ return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+ { .compatible = "qemu,fw-cfg-mmio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+ { "QEMU0002", },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+ .probe = fw_cfg_sysfs_probe,
+ .remove = fw_cfg_sysfs_remove,
+ .driver = {
+ .name = "fw_cfg",
+ .of_match_table = fw_cfg_sysfs_mmio_match,
+ .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+ },
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+ ":%" __PHYS_ADDR_PREFIX "i" \
+ ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+ "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+ ":%" __PHYS_ADDR_PREFIX "u" \
+ ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+ struct resource res[3] = {};
+ char *str;
+ phys_addr_t base;
+ resource_size_t size, ctrl_off, data_off;
+ int processed, consumed = 0;
+
+ /* only one fw_cfg device can exist system-wide, so if one
+ * was processed on the command line already, we might as
+ * well stop here.
+ */
+ if (fw_cfg_cmdline_dev) {
+ /* avoid leaking previously registered device */
+ platform_device_unregister(fw_cfg_cmdline_dev);
+ return -EINVAL;
+ }
+
+ /* consume "<size>" portion of command line argument */
+ size = memparse(arg, &str);
+
+ /* get "@<base>[:<ctrl_off>:<data_off>]" chunks */
+ processed = sscanf(str, PH_ADDR_SCAN_FMT,
+ &base, &consumed,
+ &ctrl_off, &data_off, &consumed);
+
+ /* sscanf() must process precisely 1 or 3 chunks:
+ * <base> is mandatory, optionally followed by <ctrl_off>
+ * and <data_off>;
+ * there must be no extra characters after the last chunk,
+ * so str[consumed] must be '\0'.
+ */
+ if (str[consumed] ||
+ (processed != 1 && processed != 3))
+ return -EINVAL;
+
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+ IORESOURCE_IO;
+
+ /* insert register offsets, if provided */
+ if (processed > 1) {
+ res[1].name = "ctrl";
+ res[1].start = ctrl_off;
+ res[1].flags = IORESOURCE_REG;
+ res[2].name = "data";
+ res[2].start = data_off;
+ res[2].flags = IORESOURCE_REG;
+ }
+
+ /* "processed" happens to nicely match the number of resources
+ * we need to pass in to this platform device.
+ */
+ fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+ PLATFORM_DEVID_NONE, res, processed);
+ if (IS_ERR(fw_cfg_cmdline_dev))
+ return PTR_ERR(fw_cfg_cmdline_dev);
+
+ return 0;
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+ /* stay silent if device was not configured via the command
+ * line, or if the parameter name (ioport/mmio) doesn't match
+ * the device setting
+ */
+ if (!fw_cfg_cmdline_dev ||
+ (!strcmp(kp->name, "mmio") ^
+ (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+ return 0;
+
+ switch (fw_cfg_cmdline_dev->num_resources) {
+ case 1:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start);
+ case 3:
+ return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+ resource_size(&fw_cfg_cmdline_dev->resource[0]),
+ fw_cfg_cmdline_dev->resource[0].start,
+ fw_cfg_cmdline_dev->resource[1].start,
+ fw_cfg_cmdline_dev->resource[2].start);
+ }
+
+ /* Should never get here */
+ WARN(1, "Unexpected number of resources: %d\n",
+ fw_cfg_cmdline_dev->num_resources);
+ return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+ .set = fw_cfg_cmdline_set,
+ .get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+ int ret;
+
+ /* create /sys/firmware/qemu_fw_cfg/ top level directory */
+ fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+ if (!fw_cfg_top_ko)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&fw_cfg_sysfs_driver);
+ if (ret)
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+
+ return ret;
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+ platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+ platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+ /* clean up /sys/firmware/qemu_fw_cfg/ */
+ fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c88dd24a4b1f..5f3429f0bf46 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -126,6 +126,16 @@ config GPIO_AMDPT
driver for GPIO functionality on Promontory IOHub
Require ACPI ASL code to enumerate as a platform device.
+config GPIO_ATH79
+ tristate "Atheros AR71XX/AR724X/AR913X GPIO support"
+ default y if ATH79
+ depends on ATH79 || COMPILE_TEST
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Select this option to enable GPIO driver for
+ Atheros AR71XX/AR724X/AR913X SoC devices.
+
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -256,10 +266,17 @@ config GPIO_LYNXPOINT
config GPIO_MB86S7X
bool "GPIO support for Fujitsu MB86S7x Platforms"
- depends on ARCH_MB86S7X
+ depends on ARCH_MB86S7X || COMPILE_TEST
help
Say yes here to support the GPIO controller in Fujitsu MB86S70 SoCs.
+config GPIO_MENZ127
+ tristate "MEN 16Z127 GPIO support"
+ depends on MCB
+ select GPIO_GENERIC
+ help
+ Say yes here to support the MEN 16Z127 GPIO Controller
+
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
@@ -270,7 +287,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOXART
bool "MOXART GPIO support"
- depends on ARCH_MOXART
+ depends on ARCH_MOXART || COMPILE_TEST
select GPIO_GENERIC
help
Select this option to enable GPIO driver for
@@ -281,12 +298,14 @@ config GPIO_MPC5200
depends on PPC_MPC52xx
config GPIO_MPC8XXX
- bool "MPC512x/MPC8xxx GPIO support"
+ bool "MPC512x/MPC8xxx/QorIQ GPIO support"
depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
- FSL_SOC_BOOKE || PPC_86xx
+ FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
+ COMPILE_TEST
+ select GPIO_GENERIC
help
Say Y here if you're going to use hardware that connects to the
- MPC512x/831x/834x/837x/8572/8610 GPIOs.
+ MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
config GPIO_MVEBU
def_bool y
@@ -339,7 +358,7 @@ config GPIO_PXA
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO on Renesas R-Car SoCs.
@@ -380,6 +399,14 @@ config GPIO_TB10X
select GENERIC_IRQ_CHIP
select OF_GPIO
+config GPIO_TS4800
+ tristate "TS-4800 DIO blocks and compatibles"
+ depends on OF_GPIO
+ depends on SOC_IMX51 || COMPILE_TEST
+ select GPIO_GENERIC
+ help
+ This driver support TS-4800 FPGA GPIO controllers.
+
config GPIO_TZ1090
bool "Toumaz Xenif TZ1090 GPIO support"
depends on SOC_TZ1090
@@ -433,6 +460,7 @@ config GPIO_XGENE_SB
tristate "APM X-Gene GPIO standby controller support"
depends on ARCH_XGENE && OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
This driver supports the GPIO block within the APM X-Gene
Standby Domain. Say yes here to enable the GPIO functionality.
@@ -487,6 +515,15 @@ endmenu
menu "Port-mapped I/O GPIO drivers"
depends on X86 # Unconditional I/O space access
+config GPIO_104_DIO_48E
+ tristate "ACCES 104-DIO-48E GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES 104-DIO-48E family. The base port
+ address for the device may be configured via the dio_48e_base module
+ parameter. The interrupt line number for the device may be configured
+ via the dio_48e_irq module parameter.
+
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
select GPIOLIB_IRQCHIP
@@ -506,10 +543,10 @@ config GPIO_104_IDI_48
via the idi_48_irq module parameter.
config GPIO_F7188X
- tristate "F71869, F71869A, F71882FG and F71889F GPIO support"
+ tristate "F71869, F71869A, F71882FG, F71889F and F81866 GPIO support"
help
This option enables support for GPIOs found on Fintek Super-I/O
- chips F71869, F71869A, F71882FG and F71889F.
+ chips F71869, F71869A, F71882FG, F71889F and F81866.
To compile this driver as a module, choose M here: the module will
be called f7188x-gpio.
@@ -570,6 +607,15 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WS16C48
+ tristate "WinSystems WS16C48 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the WinSystems WS16C48. The base port address
+ for the device may be configured via the ws16c48_base module
+ parameter. The interrupt line number for the device may be configured
+ via the ws16c48_irq module parameter.
+
endmenu
menu "I2C GPIO expanders"
@@ -702,6 +748,14 @@ config GPIO_SX150X
8 bits: sx1508q
16 bits: sx1509q
+config GPIO_TPIC2810
+ tristate "TPIC2810 8-Bit I2C GPO expander"
+ help
+ Say yes here to enable the GPO driver for the TI TPIC2810 chip.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-tpic2810.
+
endmenu
menu "MFD GPIO expanders"
@@ -844,6 +898,19 @@ config GPIO_TIMBERDALE
---help---
Add support for the GPIO IP in the timberdale FPGA.
+config GPIO_TPS65086
+ tristate "TI TPS65086 GPO"
+ depends on MFD_TPS65086
+ help
+ This driver supports the GPO on TI TPS65086x PMICs.
+
+config GPIO_TPS65218
+ tristate "TPS65218 GPIO"
+ depends on MFD_TPS65218
+ help
+ Select this option to enable GPIO driver for the TPS65218
+ chip family.
+
config GPIO_TPS6586X
bool "TPS6586X GPIO"
depends on MFD_TPS6586X
@@ -860,7 +927,7 @@ config GPIO_TPS65910
config GPIO_TPS65912
tristate "TI TPS65912 GPIO"
- depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ depends on MFD_TPS65912
help
This driver supports TPS65912 gpio chip
@@ -1011,6 +1078,12 @@ config GPIO_MC33880
SPI driver for Freescale MC33880 high-side/low-side switch.
This provides GPIO interface supporting inputs and outputs.
+config GPIO_PISOSR
+ tristate "Generic parallel-in/serial-out shift register"
+ help
+ GPIO driver for SPI compatible parallel-in/serial-out shift
+ registers. These are input only devices.
+
endmenu
menu "SPI or I2C GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ece7d7cbdc80..1e0b74f3b1ed 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
+obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
@@ -23,7 +24,7 @@ obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
-obj-$(CONFIG_ATH79) += gpio-ath79.o
+obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
+obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
@@ -75,6 +77,7 @@ obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o
obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
+obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
@@ -95,9 +98,13 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
+obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
+obj-$(CONFIG_GPIO_TPS65086) += gpio-tps65086.o
+obj-$(CONFIG_GPIO_TPS65218) += gpio-tps65218.o
obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
+obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
@@ -111,6 +118,7 @@ obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
+obj-$(CONFIG_GPIO_WS16C48) += gpio-ws16c48.o
obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 903fcf4d04a0..b760cbbb41d8 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -155,7 +155,7 @@ struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
suffixes[i]);
desc = fwnode_get_named_gpiod(child, prop_name);
- if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+ if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
break;
}
if (IS_ERR(desc)) {
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
new file mode 100644
index 000000000000..448a903089ef
--- /dev/null
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -0,0 +1,430 @@
+/*
+ * GPIO driver for the ACCES 104-DIO-48E
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+static unsigned dio_48e_base;
+module_param(dio_48e_base, uint, 0);
+MODULE_PARM_DESC(dio_48e_base, "ACCES 104-DIO-48E base address");
+static unsigned dio_48e_irq;
+module_param(dio_48e_irq, uint, 0);
+MODULE_PARM_DESC(dio_48e_irq, "ACCES 104-DIO-48E interrupt line number");
+
+/**
+ * struct dio48e_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @io_state: bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @control: Control registers state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @base: base port address of the GPIO device
+ * @irq: Interrupt line number
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct dio48e_gpio {
+ struct gpio_chip chip;
+ unsigned char io_state[6];
+ unsigned char out_state[6];
+ unsigned char control[2];
+ spinlock_t lock;
+ unsigned base;
+ unsigned irq;
+ unsigned char irq_mask;
+};
+
+static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+
+ return !!(dio48egpio->io_state[port] & mask);
+}
+
+static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned io_port = offset / 8;
+ const unsigned control_port = io_port / 2;
+ const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ unsigned long flags;
+ unsigned control;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ dio48egpio->io_state[io_port] |= 0xF0;
+ dio48egpio->control[control_port] |= BIT(3);
+ } else {
+ dio48egpio->io_state[io_port] |= 0x0F;
+ dio48egpio->control[control_port] |= BIT(0);
+ }
+ } else {
+ dio48egpio->io_state[io_port] |= 0xFF;
+ if (io_port == 0 || io_port == 3)
+ dio48egpio->control[control_port] |= BIT(4);
+ else
+ dio48egpio->control[control_port] |= BIT(1);
+ }
+
+ control = BIT(7) | dio48egpio->control[control_port];
+ outb(control, control_addr);
+ control &= ~BIT(7);
+ outb(control, control_addr);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return 0;
+}
+
+static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned io_port = offset / 8;
+ const unsigned control_port = io_port / 2;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
+ const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
+ unsigned long flags;
+ unsigned control;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* Check if configuring Port C */
+ if (io_port == 2 || io_port == 5) {
+ /* Port C can be configured by nibble */
+ if (offset % 8 > 3) {
+ dio48egpio->io_state[io_port] &= 0x0F;
+ dio48egpio->control[control_port] &= ~BIT(3);
+ } else {
+ dio48egpio->io_state[io_port] &= 0xF0;
+ dio48egpio->control[control_port] &= ~BIT(0);
+ }
+ } else {
+ dio48egpio->io_state[io_port] &= 0x00;
+ if (io_port == 0 || io_port == 3)
+ dio48egpio->control[control_port] &= ~BIT(4);
+ else
+ dio48egpio->control[control_port] &= ~BIT(1);
+ }
+
+ if (value)
+ dio48egpio->out_state[io_port] |= mask;
+ else
+ dio48egpio->out_state[io_port] &= ~mask;
+
+ control = BIT(7) | dio48egpio->control[control_port];
+ outb(control, control_addr);
+
+ outb(dio48egpio->out_state[io_port], dio48egpio->base + out_port);
+
+ control &= ~BIT(7);
+ outb(control, control_addr);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return 0;
+}
+
+static int dio48e_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned in_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+ unsigned port_state;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ /* ensure that GPIO is set for input */
+ if (!(dio48egpio->io_state[port] & mask)) {
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+ return -EINVAL;
+ }
+
+ port_state = inb(dio48egpio->base + in_port);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+
+ return !!(port_state & mask);
+}
+
+static void dio48e_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ const unsigned out_port = (port > 2) ? port + 1 : port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (value)
+ dio48egpio->out_state[port] |= mask;
+ else
+ dio48egpio->out_state[port] &= ~mask;
+
+ outb(dio48egpio->out_state[port], dio48egpio->base + out_port);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static void dio48e_irq_ack(struct irq_data *data)
+{
+}
+
+static void dio48e_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (offset == 19)
+ dio48egpio->irq_mask &= ~BIT(0);
+ else
+ dio48egpio->irq_mask &= ~BIT(1);
+
+ if (!dio48egpio->irq_mask)
+ /* disable interrupts */
+ inb(dio48egpio->base + 0xB);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static void dio48e_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ unsigned long flags;
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return;
+
+ spin_lock_irqsave(&dio48egpio->lock, flags);
+
+ if (!dio48egpio->irq_mask) {
+ /* enable interrupts */
+ outb(0x00, dio48egpio->base + 0xF);
+ outb(0x00, dio48egpio->base + 0xB);
+ }
+
+ if (offset == 19)
+ dio48egpio->irq_mask |= BIT(0);
+ else
+ dio48egpio->irq_mask |= BIT(1);
+
+ spin_unlock_irqrestore(&dio48egpio->lock, flags);
+}
+
+static int dio48e_irq_set_type(struct irq_data *data, unsigned flow_type)
+{
+ const unsigned long offset = irqd_to_hwirq(data);
+
+ /* only bit 3 on each respective Port C supports interrupts */
+ if (offset != 19 && offset != 43)
+ return -EINVAL;
+
+ if (flow_type != IRQ_TYPE_NONE && flow_type != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip dio48e_irqchip = {
+ .name = "104-dio-48e",
+ .irq_ack = dio48e_irq_ack,
+ .irq_mask = dio48e_irq_mask,
+ .irq_unmask = dio48e_irq_unmask,
+ .irq_set_type = dio48e_irq_set_type
+};
+
+static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
+{
+ struct dio48e_gpio *const dio48egpio = dev_id;
+ struct gpio_chip *const chip = &dio48egpio->chip;
+ const unsigned long irq_mask = dio48egpio->irq_mask;
+ unsigned long gpio;
+
+ for_each_set_bit(gpio, &irq_mask, 2)
+ generic_handle_irq(irq_find_mapping(chip->irqdomain,
+ 19 + gpio*24));
+
+ spin_lock(&dio48egpio->lock);
+
+ outb(0x00, dio48egpio->base + 0xF);
+
+ spin_unlock(&dio48egpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int __init dio48e_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dio48e_gpio *dio48egpio;
+ const unsigned base = dio_48e_base;
+ const unsigned extent = 16;
+ const char *const name = dev_name(dev);
+ int err;
+ const unsigned irq = dio_48e_irq;
+
+ dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
+ if (!dio48egpio)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
+ }
+
+ dio48egpio->chip.label = name;
+ dio48egpio->chip.parent = dev;
+ dio48egpio->chip.owner = THIS_MODULE;
+ dio48egpio->chip.base = -1;
+ dio48egpio->chip.ngpio = 48;
+ dio48egpio->chip.get_direction = dio48e_gpio_get_direction;
+ dio48egpio->chip.direction_input = dio48e_gpio_direction_input;
+ dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
+ dio48egpio->chip.get = dio48e_gpio_get;
+ dio48egpio->chip.set = dio48e_gpio_set;
+ dio48egpio->base = base;
+ dio48egpio->irq = irq;
+
+ spin_lock_init(&dio48egpio->lock);
+
+ dev_set_drvdata(dev, dio48egpio);
+
+ err = gpiochip_add_data(&dio48egpio->chip, dio48egpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ /* initialize all GPIO as output */
+ outb(0x80, base + 3);
+ outb(0x00, base);
+ outb(0x00, base + 1);
+ outb(0x00, base + 2);
+ outb(0x00, base + 3);
+ outb(0x80, base + 7);
+ outb(0x00, base + 4);
+ outb(0x00, base + 5);
+ outb(0x00, base + 6);
+ outb(0x00, base + 7);
+
+ /* disable IRQ by default */
+ inb(base + 0xB);
+
+ err = gpiochip_irqchip_add(&dio48egpio->chip, &dio48e_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ err = request_irq(irq, dio48e_irq_handler, 0, name, dio48egpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ return 0;
+
+err_gpiochip_remove:
+ gpiochip_remove(&dio48egpio->chip);
+ return err;
+}
+
+static int dio48e_remove(struct platform_device *pdev)
+{
+ struct dio48e_gpio *const dio48egpio = platform_get_drvdata(pdev);
+
+ free_irq(dio48egpio->irq, dio48egpio);
+ gpiochip_remove(&dio48egpio->chip);
+
+ return 0;
+}
+
+static struct platform_device *dio48e_device;
+
+static struct platform_driver dio48e_driver = {
+ .driver = {
+ .name = "104-dio-48e"
+ },
+ .remove = dio48e_remove
+};
+
+static void __exit dio48e_exit(void)
+{
+ platform_device_unregister(dio48e_device);
+ platform_driver_unregister(&dio48e_driver);
+}
+
+static int __init dio48e_init(void)
+{
+ int err;
+
+ dio48e_device = platform_device_alloc(dio48e_driver.driver.name, -1);
+ if (!dio48e_device)
+ return -ENOMEM;
+
+ err = platform_device_add(dio48e_device);
+ if (err)
+ goto err_platform_device;
+
+ err = platform_driver_probe(&dio48e_driver, dio48e_probe);
+ if (err)
+ goto err_platform_driver;
+
+ return 0;
+
+err_platform_driver:
+ platform_device_del(dio48e_device);
+err_platform_device:
+ platform_device_put(dio48e_device);
+ return err;
+}
+
+module_init(dio48e_init);
+module_exit(dio48e_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 52eed328ce99..e37cd4cdda35 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(idi_48_irq, "ACCES 104-IDI-48 interrupt line number");
* @ack_lock: synchronization lock to prevent IRQ handler race conditions
* @irq_mask: input bits affected by interrupts
* @base: base port address of the GPIO device
- * @extent: extent of port address region of the GPIO device
* @irq: Interrupt line number
* @cos_enb: Change-Of-State IRQ enable boundaries mask
*/
@@ -49,7 +48,6 @@ struct idi_48_gpio {
spinlock_t ack_lock;
unsigned char irq_mask[6];
unsigned base;
- unsigned extent;
unsigned irq;
unsigned char cos_enb;
};
@@ -227,11 +225,10 @@ static int __init idi_48_probe(struct platform_device *pdev)
if (!idi48gpio)
return -ENOMEM;
- if (!request_region(base, extent, name)) {
- dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
- name, base, base + extent);
- err = -EBUSY;
- goto err_lock_io_port;
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
}
idi48gpio->chip.label = name;
@@ -243,7 +240,6 @@ static int __init idi_48_probe(struct platform_device *pdev)
idi48gpio->chip.direction_input = idi_48_gpio_direction_input;
idi48gpio->chip.get = idi_48_gpio_get;
idi48gpio->base = base;
- idi48gpio->extent = extent;
idi48gpio->irq = irq;
spin_lock_init(&idi48gpio->lock);
@@ -253,7 +249,7 @@ static int __init idi_48_probe(struct platform_device *pdev)
err = gpiochip_add_data(&idi48gpio->chip, idi48gpio);
if (err) {
dev_err(dev, "GPIO registering failed (%d)\n", err);
- goto err_gpio_register;
+ return err;
}
/* Disable IRQ by default */
@@ -264,23 +260,20 @@ static int __init idi_48_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_TYPE_NONE);
if (err) {
dev_err(dev, "Could not add irqchip (%d)\n", err);
- goto err_gpiochip_irqchip_add;
+ goto err_gpiochip_remove;
}
- err = request_irq(irq, idi_48_irq_handler, 0, name, idi48gpio);
+ err = request_irq(irq, idi_48_irq_handler, IRQF_SHARED, name,
+ idi48gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- goto err_request_irq;
+ goto err_gpiochip_remove;
}
return 0;
-err_request_irq:
-err_gpiochip_irqchip_add:
+err_gpiochip_remove:
gpiochip_remove(&idi48gpio->chip);
-err_gpio_register:
- release_region(base, extent);
-err_lock_io_port:
return err;
}
@@ -290,7 +283,6 @@ static int idi_48_remove(struct platform_device *pdev)
free_irq(idi48gpio->irq, idi48gpio);
gpiochip_remove(&idi48gpio->chip);
- release_region(idi48gpio->base, idi48gpio->extent);
return 0;
}
@@ -340,4 +332,4 @@ module_exit(idi_48_exit);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 4d69b50b2d84..ecc85fe9323d 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -38,7 +38,6 @@ MODULE_PARM_DESC(idio_16_irq, "ACCES 104-IDIO-16 interrupt line number");
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
* @base: base port address of the GPIO device
- * @extent: extent of port address region of the GPIO device
* @irq: Interrupt line number
* @out_state: output bits state
*/
@@ -47,7 +46,6 @@ struct idio_16_gpio {
spinlock_t lock;
unsigned long irq_mask;
unsigned base;
- unsigned extent;
unsigned irq;
unsigned out_state;
};
@@ -201,11 +199,10 @@ static int __init idio_16_probe(struct platform_device *pdev)
if (!idio16gpio)
return -ENOMEM;
- if (!request_region(base, extent, name)) {
- dev_err(dev, "Unable to lock %s port addresses (0x%X-0x%X)\n",
- name, base, base + extent);
- err = -EBUSY;
- goto err_lock_io_port;
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
}
idio16gpio->chip.label = name;
@@ -219,7 +216,6 @@ static int __init idio_16_probe(struct platform_device *pdev)
idio16gpio->chip.get = idio_16_gpio_get;
idio16gpio->chip.set = idio_16_gpio_set;
idio16gpio->base = base;
- idio16gpio->extent = extent;
idio16gpio->irq = irq;
idio16gpio->out_state = 0xFFFF;
@@ -230,7 +226,7 @@ static int __init idio_16_probe(struct platform_device *pdev)
err = gpiochip_add_data(&idio16gpio->chip, idio16gpio);
if (err) {
dev_err(dev, "GPIO registering failed (%d)\n", err);
- goto err_gpio_register;
+ return err;
}
/* Disable IRQ by default */
@@ -241,23 +237,19 @@ static int __init idio_16_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_TYPE_NONE);
if (err) {
dev_err(dev, "Could not add irqchip (%d)\n", err);
- goto err_gpiochip_irqchip_add;
+ goto err_gpiochip_remove;
}
err = request_irq(irq, idio_16_irq_handler, 0, name, idio16gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- goto err_request_irq;
+ goto err_gpiochip_remove;
}
return 0;
-err_request_irq:
-err_gpiochip_irqchip_add:
+err_gpiochip_remove:
gpiochip_remove(&idio16gpio->chip);
-err_gpio_register:
- release_region(base, extent);
-err_lock_io_port:
return err;
}
@@ -267,7 +259,6 @@ static int idio_16_remove(struct platform_device *pdev)
free_irq(idio16gpio->irq, idio16gpio);
gpiochip_remove(&idio16gpio->chip);
- release_region(idio16gpio->base, idio16gpio->extent);
return 0;
}
@@ -317,4 +308,4 @@ module_exit(idio_16_exit);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 372b0e01adc6..0475e8ec96d0 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -140,15 +140,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return gpiochip_add_data(&priv->gc, priv);
-}
-
-static int mmio_74xx_gpio_remove(struct platform_device *pdev)
-{
- struct mmio_74xx_gpio_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
}
static struct platform_driver mmio_74xx_gpio_driver = {
@@ -157,7 +149,6 @@ static struct platform_driver mmio_74xx_gpio_driver = {
.of_match_table = mmio_74xx_gpio_ids,
},
.probe = mmio_74xx_gpio_probe,
- .remove = mmio_74xx_gpio_remove,
};
module_platform_driver(mmio_74xx_gpio_driver);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index fb5b47b69f14..8ff7b0d3eac6 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -265,7 +265,7 @@ static int adnp_gpio_setup(struct adnp *adnp, unsigned int num_gpios)
chip->of_node = chip->parent->of_node;
chip->owner = THIS_MODULE;
- err = gpiochip_add_data(chip, adnp);
+ err = devm_gpiochip_add_data(&adnp->client->dev, chip, adnp);
if (err)
return err;
@@ -520,14 +520,6 @@ static int adnp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int adnp_i2c_remove(struct i2c_client *client)
-{
- struct adnp *adnp = i2c_get_clientdata(client);
-
- gpiochip_remove(&adnp->gpio);
- return 0;
-}
-
static const struct i2c_device_id adnp_i2c_id[] = {
{ "gpio-adnp" },
{ },
@@ -546,7 +538,6 @@ static struct i2c_driver adnp_i2c_driver = {
.of_match_table = adnp_of_match,
},
.probe = adnp_i2c_probe,
- .remove = adnp_i2c_remove,
.id_table = adnp_i2c_id,
};
module_i2c_driver(adnp_i2c_driver);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 4fa7ff1fec9a..abf199609546 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -153,7 +153,7 @@ static int adp5520_gpio_probe(struct platform_device *pdev)
goto err;
}
- ret = gpiochip_add_data(&dev->gpio_chip, dev);
+ ret = devm_gpiochip_add_data(&pdev->dev, &dev->gpio_chip, dev);
if (ret)
goto err;
@@ -164,22 +164,11 @@ err:
return ret;
}
-static int adp5520_gpio_remove(struct platform_device *pdev)
-{
- struct adp5520_gpio *dev;
-
- dev = platform_get_drvdata(pdev);
- gpiochip_remove(&dev->gpio_chip);
-
- return 0;
-}
-
static struct platform_driver adp5520_gpio_driver = {
.driver = {
.name = "adp5520-gpio",
},
.probe = adp5520_gpio_probe,
- .remove = adp5520_gpio_remove,
};
module_platform_driver(adp5520_gpio_driver);
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 19a0eba1e942..c0f718b12317 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -414,7 +414,7 @@ static int adp5588_gpio_probe(struct i2c_client *client,
}
}
- ret = gpiochip_add_data(&dev->gpio_chip, dev);
+ ret = devm_gpiochip_add_data(&client->dev, &dev->gpio_chip, dev);
if (ret)
goto err_irq;
@@ -457,8 +457,6 @@ static int adp5588_gpio_remove(struct i2c_client *client)
if (dev->irq_base)
free_irq(dev->client->irq, dev);
- gpiochip_remove(&dev->gpio_chip);
-
return 0;
}
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index c7040fffc5b4..30ad7d7c1678 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -25,6 +25,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/gpio.h>
@@ -204,7 +205,8 @@ found:
gp.pmbase &= 0x0000FF00;
if (gp.pmbase == 0)
goto out;
- if (!request_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE, "AMD GPIO")) {
+ if (!devm_request_region(&pdev->dev, gp.pmbase + PMBASE_OFFSET,
+ PMBASE_SIZE, "AMD GPIO")) {
dev_err(&pdev->dev, "AMD GPIO region 0x%x already in use!\n",
gp.pmbase + PMBASE_OFFSET);
err = -EBUSY;
@@ -213,7 +215,6 @@ found:
gp.pm = ioport_map(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
if (!gp.pm) {
dev_err(&pdev->dev, "Couldn't map io port into io memory\n");
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
err = -ENOMEM;
goto out;
}
@@ -228,7 +229,6 @@ found:
printk(KERN_ERR "GPIO registering failed (%d)\n",
err);
ioport_unmap(gp.pm);
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
goto out;
}
out:
@@ -239,7 +239,6 @@ static void __exit amd_gpio_exit(void)
{
gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
- release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
}
module_init(amd_gpio_init);
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index e910c1f41d93..991370494922 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -132,7 +132,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
else
arizona_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&arizona_gpio->gpio_chip, arizona_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
+ arizona_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
@@ -147,18 +148,9 @@ err:
return ret;
}
-static int arizona_gpio_remove(struct platform_device *pdev)
-{
- struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&arizona_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver arizona_gpio_driver = {
.driver.name = "arizona-gpio",
.probe = arizona_gpio_probe,
- .remove = arizona_gpio_remove,
};
module_platform_driver(arizona_gpio_driver);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index d13dd133a907..c4f4cddc7c1a 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -1,12 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO API support
*
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
* Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -15,118 +14,204 @@
#include <linux/gpio/driver.h>
#include <linux/platform_data/gpio-ath79.h>
#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#define AR71XX_GPIO_REG_OE 0x00
+#define AR71XX_GPIO_REG_IN 0x04
+#define AR71XX_GPIO_REG_SET 0x0c
+#define AR71XX_GPIO_REG_CLEAR 0x10
-#include <asm/mach-ath79/ar71xx_regs.h>
+#define AR71XX_GPIO_REG_INT_ENABLE 0x14
+#define AR71XX_GPIO_REG_INT_TYPE 0x18
+#define AR71XX_GPIO_REG_INT_POLARITY 0x1c
+#define AR71XX_GPIO_REG_INT_PENDING 0x20
+#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip chip;
+ struct gpio_chip gc;
void __iomem *base;
spinlock_t lock;
+ unsigned long both_edges;
};
-static void ath79_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- if (value)
- __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(gpio), ctrl->base + AR71XX_GPIO_REG_CLEAR);
+ return container_of(gc, struct ath79_gpio_ctrl, gc);
}
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ return readl(ctrl->base + reg);
+}
- return (__raw_readl(ctrl->base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+static void ath79_gpio_write(struct ath79_gpio_ctrl *ctrl,
+ unsigned reg, u32 val)
+{
+ return writel(val, ctrl->base + reg);
}
-static int ath79_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
+static bool ath79_gpio_update_bits(
+ struct ath79_gpio_ctrl *ctrl, unsigned reg, u32 mask, u32 bits)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ u32 old_val, new_val;
+
+ old_val = ath79_gpio_read(ctrl, reg);
+ new_val = (old_val & ~mask) | (bits & mask);
+
+ if (new_val != old_val)
+ ath79_gpio_write(ctrl, reg, new_val);
+
+ return new_val != old_val;
+}
+
+static void ath79_gpio_irq_unmask(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+static void ath79_gpio_irq_mask(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ unsigned long flags;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return 0;
}
-static int ath79_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+static void ath79_gpio_irq_enable(struct irq_data *data)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+}
- if (value)
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
-
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+static void ath79_gpio_irq_disable(struct irq_data *data)
+{
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ unsigned long flags;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return 0;
}
-static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int ath79_gpio_irq_set_type(struct irq_data *data,
+ unsigned int flow_type)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
+ struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
+ u32 mask = BIT(irqd_to_hwirq(data));
+ u32 type = 0, polarity = 0;
unsigned long flags;
+ bool disabled;
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity |= mask;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_EDGE_BOTH:
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ polarity |= mask;
+ case IRQ_TYPE_LEVEL_LOW:
+ type |= mask;
+ break;
+
+ default:
+ return -EINVAL;
+ }
spin_lock_irqsave(&ctrl->lock, flags);
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) | BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+ if (flow_type == IRQ_TYPE_EDGE_BOTH) {
+ ctrl->both_edges |= mask;
+ polarity = ~ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ } else {
+ ctrl->both_edges &= ~mask;
+ }
+
+ /* As the IRQ configuration can't be loaded atomically we
+ * have to disable the interrupt while the configuration state
+ * is invalid.
+ */
+ disabled = ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
+
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_TYPE, mask, type);
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_POLARITY, mask, polarity);
+
+ if (disabled)
+ ath79_gpio_update_bits(
+ ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
-static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+static struct irq_chip ath79_gpio_irqchip = {
+ .name = "gpio-ath79",
+ .irq_enable = ath79_gpio_irq_enable,
+ .irq_disable = ath79_gpio_irq_disable,
+ .irq_mask = ath79_gpio_irq_mask,
+ .irq_unmask = ath79_gpio_irq_unmask,
+ .irq_set_type = ath79_gpio_irq_set_type,
+};
+
+static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
- struct ath79_gpio_ctrl *ctrl = gpiochip_get_data(chip);
- unsigned long flags;
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct ath79_gpio_ctrl *ctrl =
+ container_of(gc, struct ath79_gpio_ctrl, gc);
+ unsigned long flags, pending;
+ u32 both_edges, state;
+ int irq;
+
+ chained_irq_enter(irqchip, desc);
spin_lock_irqsave(&ctrl->lock, flags);
- if (value)
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_SET);
- else
- __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- __raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
- ctrl->base + AR71XX_GPIO_REG_OE);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
- return 0;
-}
+ if (pending) {
+ for_each_set_bit(irq, &pending, gc->ngpio)
+ generic_handle_irq(
+ irq_linear_revmap(gc->irqdomain, irq));
+ }
-static const struct gpio_chip ath79_gpio_chip = {
- .label = "ath79",
- .get = ath79_gpio_get_value,
- .set = ath79_gpio_set_value,
- .direction_input = ath79_gpio_direction_input,
- .direction_output = ath79_gpio_direction_output,
- .base = 0,
-};
+ chained_irq_exit(irqchip, desc);
+}
static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar7100-gpio" },
@@ -147,6 +232,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
+ platform_set_drvdata(pdev, ctrl);
if (np) {
err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
@@ -154,10 +240,6 @@ static int ath79_gpio_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ngpios property is not valid\n");
return err;
}
- if (ath79_gpio_count >= 32) {
- dev_err(&pdev->dev, "ngpios must be less than 32\n");
- return -EINVAL;
- }
oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
} else if (pdata) {
ath79_gpio_count = pdata->ngpios;
@@ -167,6 +249,11 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (ath79_gpio_count >= 32) {
+ dev_err(&pdev->dev, "ngpios must be less than 32\n");
+ return -EINVAL;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->base = devm_ioremap_nocache(
&pdev->dev, res->start, resource_size(res));
@@ -174,21 +261,53 @@ static int ath79_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&ctrl->lock);
- memcpy(&ctrl->chip, &ath79_gpio_chip, sizeof(ctrl->chip));
- ctrl->chip.parent = &pdev->dev;
- ctrl->chip.ngpio = ath79_gpio_count;
- if (oe_inverted) {
- ctrl->chip.direction_input = ar934x_gpio_direction_input;
- ctrl->chip.direction_output = ar934x_gpio_direction_output;
+ err = bgpio_init(&ctrl->gc, &pdev->dev, 4,
+ ctrl->base + AR71XX_GPIO_REG_IN,
+ ctrl->base + AR71XX_GPIO_REG_SET,
+ ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return err;
}
+ /* Use base 0 to stay compatible with legacy platforms */
+ ctrl->gc.base = 0;
- err = gpiochip_add_data(&ctrl->chip, ctrl);
+ err = gpiochip_add_data(&ctrl->gc, ctrl);
if (err) {
dev_err(&pdev->dev,
"cannot add AR71xx GPIO chip, error=%d", err);
return err;
}
+ if (np && !of_property_read_bool(np, "interrupt-controller"))
+ return 0;
+
+ err = gpiochip_irqchip_add(&ctrl->gc, &ath79_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to add gpiochip_irqchip\n");
+ goto gpiochip_remove;
+ }
+
+ gpiochip_set_chained_irqchip(&ctrl->gc, &ath79_gpio_irqchip,
+ platform_get_irq(pdev, 0),
+ ath79_gpio_irq_handler);
+
+ return 0;
+
+gpiochip_remove:
+ gpiochip_remove(&ctrl->gc);
+ return err;
+}
+
+static int ath79_gpio_remove(struct platform_device *pdev)
+{
+ struct ath79_gpio_ctrl *ctrl = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&ctrl->gc);
return 0;
}
@@ -198,6 +317,7 @@ static struct platform_driver ath79_gpio_driver = {
.of_match_table = ath79_gpio_of_match,
},
.probe = ath79_gpio_probe,
+ .remove = ath79_gpio_remove,
};
module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index b6c5abe85daf..2fd38d598f3d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -630,7 +630,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bcm_kona_gpio_reset(kona_gpio);
- ret = gpiochip_add_data(chip, kona_gpio);
+ ret = devm_gpiochip_add_data(dev, chip, kona_gpio);
if (ret < 0) {
dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
goto err_irq_domain;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index d7644251e869..42d51c59ed50 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -233,17 +233,14 @@ static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct list_head *pos;
+ struct brcmstb_gpio_bank *bank;
/* Interrupts weren't properly cleared during probe */
BUG_ON(!priv || !chip);
chained_irq_enter(chip, desc);
- list_for_each(pos, &priv->bank_list) {
- struct brcmstb_gpio_bank *bank =
- list_entry(pos, struct brcmstb_gpio_bank, node);
+ list_for_each_entry(bank, &priv->bank_list, node)
brcmstb_gpio_irq_bank_handler(bank);
- }
chained_irq_exit(chip, desc);
}
@@ -280,7 +277,6 @@ static int brcmstb_gpio_sanity_check_banks(struct device *dev,
static int brcmstb_gpio_remove(struct platform_device *pdev)
{
struct brcmstb_gpio_priv *priv = platform_get_drvdata(pdev);
- struct list_head *pos;
struct brcmstb_gpio_bank *bank;
int ret = 0;
@@ -293,10 +289,9 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
* You can lose return values below, but we report all errors, and it's
* more important to actually perform all of the steps.
*/
- list_for_each(pos, &priv->bank_list) {
- bank = list_entry(pos, struct brcmstb_gpio_bank, node);
+ list_for_each_entry(bank, &priv->bank_list, node)
gpiochip_remove(&bank->gc);
- }
+
if (priv->reboot_notifier.notifier_call) {
ret = unregister_reboot_notifier(&priv->reboot_notifier);
if (ret)
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index c84f9551f108..5a690256af9b 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -67,15 +67,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
gc->owner = THIS_MODULE;
platform_set_drvdata(pdev, gc);
- return gpiochip_add_data(gc, NULL);
-}
-
-static int clps711x_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_chip *gc = platform_get_drvdata(pdev);
-
- gpiochip_remove(gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = {
@@ -90,7 +82,6 @@ static struct platform_driver clps711x_gpio_driver = {
.of_match_table = of_match_ptr(clps711x_gpio_ids),
},
.probe = clps711x_gpio_probe,
- .remove = clps711x_gpio_remove,
};
module_platform_driver(clps711x_gpio_driver);
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 7865ef0d3352..7c446d118cd6 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -345,7 +345,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
cg->chip.dbg_show = crystalcove_gpio_dbg_show;
cg->regmap = pmic->regmap;
- retval = gpiochip_add_data(&cg->chip, cg);
+ retval = devm_gpiochip_add_data(&pdev->dev, &cg->chip, cg);
if (retval) {
dev_warn(&pdev->dev, "add gpio chip error: %d\n", retval);
return retval;
@@ -359,14 +359,10 @@ static int crystalcove_gpio_probe(struct platform_device *pdev)
if (retval) {
dev_warn(&pdev->dev, "request irq failed: %d\n", retval);
- goto out_remove_gpio;
+ return retval;
}
return 0;
-
-out_remove_gpio:
- gpiochip_remove(&cg->chip);
- return retval;
}
static int crystalcove_gpio_remove(struct platform_device *pdev)
@@ -374,7 +370,6 @@ static int crystalcove_gpio_remove(struct platform_device *pdev)
struct crystalcove_gpio *cg = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
- gpiochip_remove(&cg->chip);
if (irq >= 0)
free_irq(irq, cg);
return 0;
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index eccb712e09fb..90278b19aa0e 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -320,13 +320,13 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
- goto done;
+ return err;
}
if (!devm_request_region(&pdev->dev, res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "can't request region\n");
- goto done;
+ return err;
}
/* set up the driver-specific struct */
@@ -348,19 +348,10 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
mask_orig, mask);
/* finally, register with the generic GPIO API */
- err = gpiochip_add_data(&cs5535_gpio_chip.chip, &cs5535_gpio_chip);
+ err = devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
+ &cs5535_gpio_chip);
if (err)
- goto done;
-
- return 0;
-
-done:
- return err;
-}
-
-static int cs5535_gpio_remove(struct platform_device *pdev)
-{
- gpiochip_remove(&cs5535_gpio_chip.chip);
+ return err;
return 0;
}
@@ -370,7 +361,6 @@ static struct platform_driver cs5535_gpio_driver = {
.name = DRV_NAME,
},
.probe = cs5535_gpio_probe,
- .remove = cs5535_gpio_remove,
};
module_platform_driver(cs5535_gpio_driver);
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index f9b3247ad14b..e29553b7ccdb 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -214,7 +214,7 @@ static int da9052_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = gpiochip_add_data(&gpio->gp, gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -225,17 +225,8 @@ static int da9052_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int da9052_gpio_remove(struct platform_device *pdev)
-{
- struct da9052_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->gp);
- return 0;
-}
-
static struct platform_driver da9052_gpio_driver = {
.probe = da9052_gpio_probe,
- .remove = da9052_gpio_remove,
.driver = {
.name = "da9052-gpio",
},
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 18210fb2cb13..2c2c18dc6c4f 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -151,31 +151,19 @@ static int da9055_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
gpio->gp.base = pdata->gpio_base;
- ret = gpiochip_add_data(&gpio->gp, gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
- goto err_mem;
+ return ret;
}
platform_set_drvdata(pdev, gpio);
return 0;
-
-err_mem:
- return ret;
-}
-
-static int da9055_gpio_remove(struct platform_device *pdev)
-{
- struct da9055_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->gp);
- return 0;
}
static struct platform_driver da9055_gpio_driver = {
.probe = da9055_gpio_probe,
- .remove = da9055_gpio_remove,
.driver = {
.name = "da9055-gpio",
},
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index cd007a67b302..dd262f00295d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -258,6 +258,8 @@ static int davinci_gpio_probe(struct platform_device *pdev)
spin_lock_init(&chips[i].lock);
regs = gpio2regs(base);
+ if (!regs)
+ return -ENXIO;
chips[i].regs = regs;
chips[i].set_data = &regs->set_data;
chips[i].clr_data = &regs->clr_data;
@@ -433,8 +435,7 @@ static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq)
{
static struct irq_chip_type gpio_unbanked;
- gpio_unbanked = *container_of(irq_get_chip(irq),
- struct irq_chip_type, chip);
+ gpio_unbanked = *irq_data_get_chip_type(irq_get_irq_data(irq));
return &gpio_unbanked.chip;
};
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index e11a7d126e74..f7a60a441e95 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -479,40 +479,32 @@ static int dln2_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dln2);
- ret = gpiochip_add_data(&dln2->gpio, dln2);
+ ret = devm_gpiochip_add_data(dev, &dln2->gpio, dln2);
if (ret < 0) {
dev_err(dev, "failed to add gpio chip: %d\n", ret);
- goto out;
+ return ret;
}
ret = gpiochip_irqchip_add(&dln2->gpio, &dln2_gpio_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
if (ret < 0) {
dev_err(dev, "failed to add irq chip: %d\n", ret);
- goto out_gpiochip_remove;
+ return ret;
}
ret = dln2_register_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV,
dln2_gpio_event);
if (ret) {
dev_err(dev, "failed to register event cb: %d\n", ret);
- goto out_gpiochip_remove;
+ return ret;
}
return 0;
-
-out_gpiochip_remove:
- gpiochip_remove(&dln2->gpio);
-out:
- return ret;
}
static int dln2_gpio_remove(struct platform_device *pdev)
{
- struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
-
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- gpiochip_remove(&dln2->gpio);
return 0;
}
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index ad279078fed7..d054219e18b9 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -339,7 +339,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, struct device *dev,
gc->to_irq = ep93xx_gpio_to_irq;
}
- return gpiochip_add_data(gc, NULL);
+ return devm_gpiochip_add_data(dev, gc, NULL);
}
static int ep93xx_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index d62fd6bbaf82..daac2d480db1 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -1,5 +1,5 @@
/*
- * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882 and F71889
+ * GPIO driver for Fintek Super-I/O F71869, F71869A, F71882, F71889 and F81866
*
* Copyright (C) 2010-2013 LaCie
*
@@ -36,14 +36,16 @@
#define SIO_F71869A_ID 0x1007 /* F71869A chipset ID */
#define SIO_F71882_ID 0x0541 /* F71882 chipset ID */
#define SIO_F71889_ID 0x0909 /* F71889 chipset ID */
+#define SIO_F81866_ID 0x1010 /* F81866 chipset ID */
-enum chips { f71869, f71869a, f71882fg, f71889f };
+enum chips { f71869, f71869a, f71882fg, f71889f, f81866 };
static const char * const f7188x_names[] = {
"f71869",
"f71869a",
"f71882fg",
"f71889f",
+ "f81866",
};
struct f7188x_sio {
@@ -190,6 +192,18 @@ static struct f7188x_gpio_bank f71889_gpio_bank[] = {
F7188X_GPIO_BANK(70, 8, 0x80),
};
+static struct f7188x_gpio_bank f81866_gpio_bank[] = {
+ F7188X_GPIO_BANK(0, 8, 0xF0),
+ F7188X_GPIO_BANK(10, 8, 0xE0),
+ F7188X_GPIO_BANK(20, 8, 0xD0),
+ F7188X_GPIO_BANK(30, 8, 0xC0),
+ F7188X_GPIO_BANK(40, 8, 0xB0),
+ F7188X_GPIO_BANK(50, 8, 0xA0),
+ F7188X_GPIO_BANK(60, 8, 0x90),
+ F7188X_GPIO_BANK(70, 8, 0x80),
+ F7188X_GPIO_BANK(80, 8, 0x88),
+};
+
static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
int err;
@@ -318,6 +332,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
data->nr_bank = ARRAY_SIZE(f71889_gpio_bank);
data->bank = f71889_gpio_bank;
break;
+ case f81866:
+ data->nr_bank = ARRAY_SIZE(f81866_gpio_bank);
+ data->bank = f81866_gpio_bank;
+ break;
default:
return -ENODEV;
}
@@ -332,37 +350,16 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
bank->chip.parent = &pdev->dev;
bank->data = data;
- err = gpiochip_add_data(&bank->chip, bank);
+ err = devm_gpiochip_add_data(&pdev->dev, &bank->chip, bank);
if (err) {
dev_err(&pdev->dev,
"Failed to register gpiochip %d: %d\n",
i, err);
- goto err_gpiochip;
+ return err;
}
}
return 0;
-
-err_gpiochip:
- for (i = i - 1; i >= 0; i--) {
- struct f7188x_gpio_bank *bank = &data->bank[i];
- gpiochip_remove(&bank->chip);
- }
-
- return err;
-}
-
-static int f7188x_gpio_remove(struct platform_device *pdev)
-{
- int i;
- struct f7188x_gpio_data *data = platform_get_drvdata(pdev);
-
- for (i = 0; i < data->nr_bank; i++) {
- struct f7188x_gpio_bank *bank = &data->bank[i];
- gpiochip_remove(&bank->chip);
- }
-
- return 0;
}
static int __init f7188x_find(int addr, struct f7188x_sio *sio)
@@ -395,6 +392,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
case SIO_F71889_ID:
sio->type = f71889f;
break;
+ case SIO_F81866_ID:
+ sio->type = f81866;
+ break;
default:
pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid);
goto err;
@@ -455,7 +455,6 @@ static struct platform_driver f7188x_gpio_driver = {
.name = DRVNAME,
},
.probe = f7188x_gpio_probe,
- .remove = f7188x_gpio_remove,
};
static int __init f7188x_gpio_init(void)
@@ -485,6 +484,6 @@ static void __exit f7188x_gpio_exit(void)
}
module_exit(f7188x_gpio_exit);
-MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG and F71889F");
+MODULE_DESCRIPTION("GPIO driver for Super-I/O chips F71869, F71869A, F71882FG, F71889F and F81866");
MODULE_AUTHOR("Simon Guinot <simon.guinot@sequanux.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index cbbec838a9d1..8650b2916f87 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -89,7 +89,7 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
gc->of_node = pdev->dev.of_node;
/* This function adds a memory mapped GPIO chip */
- ret = gpiochip_add_data(gc, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
if (ret)
goto err0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 2a4f2333a50b..54cddfa98f50 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -628,15 +628,7 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gc);
- return gpiochip_add_data(gc, NULL);
-}
-
-static int bgpio_pdev_remove(struct platform_device *pdev)
-{
- struct gpio_chip *gc = platform_get_drvdata(pdev);
-
- gpiochip_remove(gc);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
static const struct platform_device_id bgpio_id_table[] = {
@@ -657,7 +649,6 @@ static struct platform_driver bgpio_driver = {
},
.id_table = bgpio_id_table,
.probe = bgpio_pdev_probe,
- .remove = bgpio_pdev_remove,
};
module_platform_driver(bgpio_driver);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index a4893386abbf..4f6d643516b7 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gpio.h>
@@ -384,8 +385,8 @@ static struct ichx_desc avoton_desc = {
.use_outlvl_cache = true,
};
-static int ichx_gpio_request_regions(struct resource *res_base,
- const char *name, u8 use_gpio)
+static int ichx_gpio_request_regions(struct device *dev,
+ struct resource *res_base, const char *name, u8 use_gpio)
{
int i;
@@ -395,34 +396,12 @@ static int ichx_gpio_request_regions(struct resource *res_base,
for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
if (!(use_gpio & (1 << i)))
continue;
- if (!request_region(
+ if (!devm_request_region(dev,
res_base->start + ichx_priv.desc->regs[0][i],
ichx_priv.desc->reglen[i], name))
- goto request_err;
+ return -EBUSY;
}
return 0;
-
-request_err:
- /* Clean up: release already requested regions, if any */
- for (i--; i >= 0; i--) {
- if (!(use_gpio & (1 << i)))
- continue;
- release_region(res_base->start + ichx_priv.desc->regs[0][i],
- ichx_priv.desc->reglen[i]);
- }
- return -EBUSY;
-}
-
-static void ichx_gpio_release_regions(struct resource *res_base, u8 use_gpio)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) {
- if (!(use_gpio & (1 << i)))
- continue;
- release_region(res_base->start + ichx_priv.desc->regs[0][i],
- ichx_priv.desc->reglen[i]);
- }
}
static int ichx_gpio_probe(struct platform_device *pdev)
@@ -468,7 +447,7 @@ static int ichx_gpio_probe(struct platform_device *pdev)
spin_lock_init(&ichx_priv.lock);
res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
ichx_priv.use_gpio = ich_info->use_gpio;
- err = ichx_gpio_request_regions(res_base, pdev->name,
+ err = ichx_gpio_request_regions(&pdev->dev, res_base, pdev->name,
ichx_priv.use_gpio);
if (err)
return err;
@@ -489,8 +468,8 @@ static int ichx_gpio_probe(struct platform_device *pdev)
goto init;
}
- if (!request_region(res_pm->start, resource_size(res_pm),
- pdev->name)) {
+ if (!devm_request_region(&pdev->dev, res_pm->start,
+ resource_size(res_pm), pdev->name)) {
pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
goto init;
}
@@ -502,31 +481,19 @@ init:
err = gpiochip_add_data(&ichx_priv.chip, NULL);
if (err) {
pr_err("Failed to register GPIOs\n");
- goto add_err;
+ return err;
}
pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
return 0;
-
-add_err:
- ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
- if (ichx_priv.pm_base)
- release_region(ichx_priv.pm_base->start,
- resource_size(ichx_priv.pm_base));
- return err;
}
static int ichx_gpio_remove(struct platform_device *pdev)
{
gpiochip_remove(&ichx_priv.chip);
- ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
- if (ichx_priv.pm_base)
- release_region(ichx_priv.pm_base->start,
- resource_size(ichx_priv.pm_base));
-
return 0;
}
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index fb65e5850e0c..860c535922fd 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -114,7 +114,7 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- return gpiochip_add_data(&iop3xx_chip, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &iop3xx_chip, NULL);
}
static struct platform_driver iop3xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 482aa0353868..a8d0a6b8025a 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -182,7 +182,7 @@ static int ttl_probe(struct platform_device *pdev)
gpio->base = -1;
gpio->ngpio = 20;
- ret = gpiochip_add_data(gpio, NULL);
+ ret = devm_gpiochip_add_data(dev, gpio, NULL);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
@@ -191,21 +191,11 @@ static int ttl_probe(struct platform_device *pdev)
return 0;
}
-static int ttl_remove(struct platform_device *pdev)
-{
- struct ttl_module *mod = platform_get_drvdata(pdev);
-
- gpiochip_remove(&mod->gpio);
-
- return 0;
-}
-
static struct platform_driver ttl_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = ttl_probe,
- .remove = ttl_remove,
};
module_platform_driver(ttl_driver);
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 01117747b965..701f1510328c 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -178,7 +178,7 @@ static int kempld_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = gpiochip_add_data(chip, gpio);
+ ret = devm_gpiochip_add_data(dev, chip, gpio);
if (ret) {
dev_err(dev, "Could not register GPIO chip\n");
return ret;
@@ -190,20 +190,11 @@ static int kempld_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int kempld_gpio_remove(struct platform_device *pdev)
-{
- struct kempld_gpio_data *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->chip);
- return 0;
-}
-
static struct platform_driver kempld_gpio_driver = {
.driver = {
.name = "kempld-gpio",
},
.probe = kempld_gpio_probe,
- .remove = kempld_gpio_remove,
};
module_platform_driver(kempld_gpio_driver);
diff --git a/drivers/gpio/gpio-ks8695.c b/drivers/gpio/gpio-ks8695.c
index 9f86ed9c753b..179723d02f55 100644
--- a/drivers/gpio/gpio-ks8695.c
+++ b/drivers/gpio/gpio-ks8695.c
@@ -205,18 +205,6 @@ static int ks8695_gpio_to_irq(struct gpio_chip *gc, unsigned int pin)
return gpio_irq[pin];
}
-/*
- * Map IRQ number to GPIO line.
- */
-int irq_to_gpio(unsigned int irq)
-{
- if ((irq < KS8695_IRQ_EXTERN0) || (irq > KS8695_IRQ_EXTERN3))
- return -EINVAL;
-
- return (irq - KS8695_IRQ_EXTERN0);
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
/* GPIOLIB interface */
static struct gpio_chip ks8695_gpio_chip = {
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 1c8e2ae26938..6dc6725403ec 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -204,15 +204,8 @@ static int lp3943_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lp3943_gpio);
- return gpiochip_add_data(&lp3943_gpio->chip, lp3943_gpio);
-}
-
-static int lp3943_gpio_remove(struct platform_device *pdev)
-{
- struct lp3943_gpio *lp3943_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&lp3943_gpio->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &lp3943_gpio->chip,
+ lp3943_gpio);
}
static const struct of_device_id lp3943_gpio_of_match[] = {
@@ -223,7 +216,6 @@ MODULE_DEVICE_TABLE(of, lp3943_gpio_of_match);
static struct platform_driver lp3943_gpio_driver = {
.probe = lp3943_gpio_probe,
- .remove = lp3943_gpio_remove,
.driver = {
.name = "lp3943-gpio",
.of_match_table = lp3943_gpio_of_match,
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 4cecf4ce96c1..d39014daeef9 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -547,7 +547,7 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev)
lpc32xx_gpiochip[i].chip.of_gpio_n_cells = 3;
lpc32xx_gpiochip[i].chip.of_node = pdev->dev.of_node;
}
- gpiochip_add_data(&lpc32xx_gpiochip[i].chip,
+ devm_gpiochip_add_data(&pdev->dev, &lpc32xx_gpiochip[i].chip,
&lpc32xx_gpiochip[i]);
}
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 13107772be4f..9df015e85ad9 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -370,7 +370,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->can_sleep = false;
gc->parent = dev;
- ret = gpiochip_add_data(gc, lg);
+ ret = devm_gpiochip_add_data(dev, gc, lg);
if (ret) {
dev_err(dev, "failed adding lp-gpio chip\n");
return ret;
@@ -439,9 +439,7 @@ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
static int lp_gpio_remove(struct platform_device *pdev)
{
- struct lp_gpio *lg = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- gpiochip_remove(&lg->chip);
return 0;
}
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index ba22fb92a6e7..14f252f9eb29 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -103,17 +103,7 @@ static int mc9s08dz60_probe(struct i2c_client *client,
mc9s->client = client;
i2c_set_clientdata(client, mc9s);
- return gpiochip_add_data(&mc9s->chip, mc9s);
-}
-
-static int mc9s08dz60_remove(struct i2c_client *client)
-{
- struct mc9s08dz60 *mc9s;
-
- mc9s = i2c_get_clientdata(client);
-
- gpiochip_remove(&mc9s->chip);
- return 0;
+ return devm_gpiochip_add_data(&client->dev, &mc9s->chip, mc9s);
}
static const struct i2c_device_id mc9s08dz60_id[] = {
@@ -128,7 +118,6 @@ static struct i2c_driver mc9s08dz60_i2c_driver = {
.name = "mc9s08dz60",
},
.probe = mc9s08dz60_probe,
- .remove = mc9s08dz60_remove,
.id_table = mc9s08dz60_id,
};
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index c767879e4dd9..47e486910aab 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -31,6 +31,7 @@
#define MCP_TYPE_S17 1
#define MCP_TYPE_008 2
#define MCP_TYPE_017 3
+#define MCP_TYPE_S18 4
/* Registers are all 8 bits wide.
*
@@ -48,6 +49,7 @@
# define IOCON_HAEN (1 << 3)
# define IOCON_ODR (1 << 2)
# define IOCON_INTPOL (1 << 1)
+# define IOCON_INTCC (1)
#define MCP_GPPU 0x06
#define MCP_INTF 0x07
#define MCP_INTCAP 0x08
@@ -617,6 +619,12 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s17";
break;
+
+ case MCP_TYPE_S18:
+ mcp->ops = &mcp23s17_ops;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23s18";
+ break;
#endif /* CONFIG_SPI_MASTER */
#if IS_ENABLED(CONFIG_I2C)
@@ -657,8 +665,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
of_property_read_bool(mcp->chip.parent->of_node,
"microchip,irq-active-high");
- if (type == MCP_TYPE_017)
- mirror = pdata->mirror;
+ mirror = pdata->mirror;
}
if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror ||
@@ -674,6 +681,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mirror)
status |= IOCON_MIRROR | (IOCON_MIRROR << 8);
+ if (type == MCP_TYPE_S18)
+ status |= IOCON_INTCC | (IOCON_INTCC << 8);
+
status = mcp->ops->write(mcp, MCP_IOCON, status);
if (status < 0)
goto fail;
@@ -735,6 +745,10 @@ static const struct of_device_id mcp23s08_spi_of_match[] = {
.compatible = "microchip,mcp23s17",
.data = (void *) MCP_TYPE_S17,
},
+ {
+ .compatible = "microchip,mcp23s18",
+ .data = (void *) MCP_TYPE_S18,
+ },
/* NOTE: The use of the mcp prefix is deprecated and will be removed. */
{
.compatible = "mcp,mcp23s08",
@@ -803,6 +817,8 @@ static int mcp230xx_probe(struct i2c_client *client,
pdata = devm_kzalloc(&client->dev,
sizeof(struct mcp23s08_platform_data),
GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
pdata->base = -1;
}
}
@@ -969,8 +985,8 @@ static int mcp23s08_probe(struct spi_device *spi)
goto fail;
if (pdata->base != -1)
- pdata->base += (type == MCP_TYPE_S17) ? 16 : 8;
- ngpio += (type == MCP_TYPE_S17) ? 16 : 8;
+ pdata->base += data->mcp[addr]->chip.ngpio;
+ ngpio += data->mcp[addr]->chip.ngpio;
}
data->ngpio = ngpio;
@@ -1012,6 +1028,7 @@ static int mcp23s08_remove(struct spi_device *spi)
static const struct spi_device_id mcp23s08_ids[] = {
{ "mcp23s08", MCP_TYPE_S08 },
{ "mcp23s17", MCP_TYPE_S17 },
+ { "mcp23s18", MCP_TYPE_S18 },
{ },
};
MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
new file mode 100644
index 000000000000..c5c9599a3a71
--- /dev/null
+++ b/drivers/gpio/gpio-menz127.c
@@ -0,0 +1,199 @@
+/*
+ * MEN 16Z127 GPIO driver
+ *
+ * Copyright (C) 2016 MEN Mikroelektronik GmbH (www.men.de)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/mcb.h>
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+
+#define MEN_Z127_CTRL 0x00
+#define MEN_Z127_PSR 0x04
+#define MEN_Z127_IRQR 0x08
+#define MEN_Z127_GPIODR 0x0c
+#define MEN_Z127_IER1 0x10
+#define MEN_Z127_IER2 0x14
+#define MEN_Z127_DBER 0x18
+#define MEN_Z127_ODER 0x1C
+#define GPIO_TO_DBCNT_REG(gpio) ((gpio * 4) + 0x80)
+
+#define MEN_Z127_DB_MIN_US 50
+/* 16 bit compare register. Each bit represents 50us */
+#define MEN_Z127_DB_MAX_US (0xffff * MEN_Z127_DB_MIN_US)
+#define MEN_Z127_DB_IN_RANGE(db) ((db >= MEN_Z127_DB_MIN_US) && \
+ (db <= MEN_Z127_DB_MAX_US))
+
+struct men_z127_gpio {
+ struct gpio_chip gc;
+ void __iomem *reg_base;
+ struct mcb_device *mdev;
+ struct resource *mem;
+};
+
+static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
+ unsigned debounce)
+{
+ struct men_z127_gpio *priv = gpiochip_get_data(gc);
+ struct device *dev = &priv->mdev->dev;
+ unsigned int rnd;
+ u32 db_en, db_cnt;
+
+ if (!MEN_Z127_DB_IN_RANGE(debounce)) {
+ dev_err(dev, "debounce value %u out of range", debounce);
+ return -EINVAL;
+ }
+
+ if (debounce > 0) {
+ /* round up or down depending on MSB-1 */
+ rnd = fls(debounce) - 1;
+
+ if (rnd && (debounce & BIT(rnd - 1)))
+ debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ else
+ debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+
+ if (debounce > MEN_Z127_DB_MAX_US)
+ debounce = MEN_Z127_DB_MAX_US;
+
+ /* 50us per register unit */
+ debounce /= 50;
+ }
+
+ spin_lock(&gc->bgpio_lock);
+
+ db_en = readl(priv->reg_base + MEN_Z127_DBER);
+
+ if (debounce == 0) {
+ db_en &= ~BIT(gpio);
+ db_cnt = 0;
+ } else {
+ db_en |= BIT(gpio);
+ db_cnt = debounce;
+ }
+
+ writel(db_en, priv->reg_base + MEN_Z127_DBER);
+ writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
+
+ spin_unlock(&gc->bgpio_lock);
+
+ return 0;
+}
+
+static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
+{
+ struct men_z127_gpio *priv = gpiochip_get_data(gc);
+ u32 od_en;
+
+ if (gpio_pin >= gc->ngpio)
+ return -EINVAL;
+
+ spin_lock(&gc->bgpio_lock);
+ od_en = readl(priv->reg_base + MEN_Z127_ODER);
+
+ if (gpiochip_line_is_open_drain(gc, gpio_pin))
+ od_en |= BIT(gpio_pin);
+ else
+ od_en &= ~BIT(gpio_pin);
+
+ writel(od_en, priv->reg_base + MEN_Z127_ODER);
+ spin_unlock(&gc->bgpio_lock);
+
+ return 0;
+}
+
+static int men_z127_probe(struct mcb_device *mdev,
+ const struct mcb_device_id *id)
+{
+ struct men_z127_gpio *men_z127_gpio;
+ struct device *dev = &mdev->dev;
+ int ret;
+
+ men_z127_gpio = devm_kzalloc(dev, sizeof(struct men_z127_gpio),
+ GFP_KERNEL);
+ if (!men_z127_gpio)
+ return -ENOMEM;
+
+ men_z127_gpio->mem = mcb_request_mem(mdev, dev_name(dev));
+ if (IS_ERR(men_z127_gpio->mem)) {
+ dev_err(dev, "failed to request device memory");
+ return PTR_ERR(men_z127_gpio->mem);
+ }
+
+ men_z127_gpio->reg_base = ioremap(men_z127_gpio->mem->start,
+ resource_size(men_z127_gpio->mem));
+ if (men_z127_gpio->reg_base == NULL) {
+ ret = -ENXIO;
+ goto err_release;
+ }
+
+ men_z127_gpio->mdev = mdev;
+ mcb_set_drvdata(mdev, men_z127_gpio);
+
+ ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
+ men_z127_gpio->reg_base + MEN_Z127_PSR,
+ men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ NULL,
+ men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ NULL, 0);
+ if (ret)
+ goto err_unmap;
+
+ men_z127_gpio->gc.set_debounce = men_z127_debounce;
+ men_z127_gpio->gc.request = men_z127_request;
+
+ ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
+ if (ret) {
+ dev_err(dev, "failed to register MEN 16Z127 GPIO controller");
+ goto err_unmap;
+ }
+
+ dev_info(dev, "MEN 16Z127 GPIO driver registered");
+
+ return 0;
+
+err_unmap:
+ iounmap(men_z127_gpio->reg_base);
+err_release:
+ mcb_release_mem(men_z127_gpio->mem);
+ return ret;
+}
+
+static void men_z127_remove(struct mcb_device *mdev)
+{
+ struct men_z127_gpio *men_z127_gpio = mcb_get_drvdata(mdev);
+
+ gpiochip_remove(&men_z127_gpio->gc);
+ iounmap(men_z127_gpio->reg_base);
+ mcb_release_mem(men_z127_gpio->mem);
+}
+
+static const struct mcb_device_id men_z127_ids[] = {
+ { .device = 0x7f },
+ { }
+};
+MODULE_DEVICE_TABLE(mcb, men_z127_ids);
+
+static struct mcb_driver men_z127_driver = {
+ .driver = {
+ .name = "z127-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = men_z127_probe,
+ .remove = men_z127_remove,
+ .id_table = men_z127_ids,
+};
+module_mcb_driver(men_z127_driver);
+
+MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
+MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("mcb:16z127");
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index ca604538ebf7..f02d0b490978 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -57,13 +57,10 @@ static int moxart_gpio_probe(struct platform_device *pdev)
gc->label = "moxart-gpio";
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
- gc->bgpio_data = gc->read_reg(gc->reg_set);
gc->base = 0;
- gc->ngpio = 32;
- gc->parent = dev;
gc->owner = THIS_MODULE;
- ret = gpiochip_add_data(gc, NULL);
+ ret = devm_gpiochip_add_data(dev, gc, NULL);
if (ret) {
dev_err(dev, "%s: gpiochip_add failed\n",
dev->of_node->full_name);
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 0e5a6709f27d..fc10cf59691c 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -25,7 +25,6 @@
#include <linux/of_platform.h>
#include <linux/module.h>
-#include <asm/gpio.h>
#include <asm/mpc52xx.h>
#include <sysdev/fsl_soc.h>
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 9d40787e66c0..425501c39527 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -1,7 +1,8 @@
/*
- * GPIOs on MPC512x/8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610/QorIQ and compatible
*
* Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
+ * Copyright (C) 2016 Freescale Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -14,11 +15,12 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
#define MPC8XXX_GPIO_PINS 32
@@ -31,32 +33,17 @@
#define GPIO_ICR2 0x18
struct mpc8xxx_gpio_chip {
- struct of_mm_gpio_chip mm_gc;
+ struct gpio_chip gc;
+ void __iomem *regs;
raw_spinlock_t lock;
- /*
- * shadowed data register to be able to clear/set output pins in
- * open drain mode safely
- */
- u32 data;
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+
struct irq_domain *irq;
unsigned int irqn;
- const void *of_dev_id_data;
};
-static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
-{
- return 1u << (MPC8XXX_GPIO_PINS - 1 - gpio);
-}
-
-static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
-{
- struct mpc8xxx_gpio_chip *mpc8xxx_gc =
- container_of(mm, struct mpc8xxx_gpio_chip, mm_gc);
-
- mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT);
-}
-
/* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs
* defined as output cannot be determined by reading GPDAT register,
* so we use shadow data register instead. The status of input pins
@@ -65,117 +52,36 @@ static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm)
static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
u32 val;
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = in_be32(mm->regs + GPIO_DIR);
-
- val = in_be32(mm->regs + GPIO_DAT) & ~out_mask;
- out_shadow = mpc8xxx_gc->data & out_mask;
-
- return !!((val | out_shadow) & mpc8xxx_gpio2mask(gpio));
-}
-
-static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
-
- return in_be32(mm->regs + GPIO_DAT) & mpc8xxx_gpio2mask(gpio);
-}
-
-static void mpc8xxx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- if (val)
- mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
- else
- mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(gpio);
-
- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-}
-
-static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
- int i;
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- for (i = 0; i < gc->ngpio; i++) {
- if (*mask == 0)
- break;
- if (__test_and_clear_bit(i, mask)) {
- if (test_bit(i, bits))
- mpc8xxx_gc->data |= mpc8xxx_gpio2mask(i);
- else
- mpc8xxx_gc->data &= ~mpc8xxx_gpio2mask(i);
- }
- }
-
- out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-}
-
-static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
-{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
- struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
+ out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = gc->bgpio_data & out_mask;
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-
- return 0;
+ return !!((val | out_shadow) & gc->pin2mask(gc, gpio));
}
-static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc,
+ unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
- unsigned long flags;
-
- mpc8xxx_gpio_set(gc, gpio, val);
-
- raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
-
- setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
-
- raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
-
- return 0;
-}
-
-static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
-{
/* GPIO 28..31 are input only on MPC5121 */
if (gpio >= 28)
return -EINVAL;
- return mpc8xxx_gpio_dir_out(gc, gpio, val);
+ return mpc8xxx_gc->direction_output(gc, gpio, val);
}
-static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc,
+ unsigned int gpio, int val)
{
+ struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
/* GPIO 0..3 are input only on MPC5125 */
if (gpio <= 3)
return -EINVAL;
- return mpc8xxx_gpio_dir_out(gc, gpio, val);
+ return mpc8xxx_gc->direction_output(gc, gpio, val);
}
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -192,10 +98,11 @@ static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned int mask;
- mask = in_be32(mm->regs + GPIO_IER) & in_be32(mm->regs + GPIO_IMR);
+ mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
+ & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
if (mask)
generic_handle_irq(irq_linear_revmap(mpc8xxx_gc->irq,
32 - ffs(mask)));
@@ -206,12 +113,14 @@ static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ | gc->pin2mask(gc, irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -219,12 +128,14 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
}
@@ -232,29 +143,32 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
- out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gc->pin2mask(gc, irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- setbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ | gc->pin2mask(gc, irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(mm->regs + GPIO_ICR,
- mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
+ gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ & ~(gc->pin2mask(gc, irqd_to_hwirq(d))));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -268,17 +182,17 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
unsigned long flags;
if (gpio < 16) {
- reg = mm->regs + GPIO_ICR;
+ reg = mpc8xxx_gc->regs + GPIO_ICR;
shift = (15 - gpio) * 2;
} else {
- reg = mm->regs + GPIO_ICR2;
+ reg = mpc8xxx_gc->regs + GPIO_ICR2;
shift = (15 - (gpio % 16)) * 2;
}
@@ -286,20 +200,22 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrsetbits_be32(reg, 3 << shift, 2 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ | (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrsetbits_be32(reg, 3 << shift, 1 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ | (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- clrbits32(reg, 3 << shift);
+ gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -354,8 +270,6 @@ static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
};
static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
- .gpio_dir_out = mpc8xxx_gpio_dir_out,
- .gpio_get = mpc8xxx_gpio_get,
.irq_set_type = mpc8xxx_irq_set_type,
};
@@ -374,9 +288,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
- struct of_mm_gpio_chip *mm_gc;
- struct gpio_chip *gc;
- const struct of_device_id *id;
+ struct gpio_chip *gc;
const struct mpc8xxx_gpio_devtype *devtype =
of_device_get_match_data(&pdev->dev);
int ret;
@@ -389,12 +301,34 @@ static int mpc8xxx_probe(struct platform_device *pdev)
raw_spin_lock_init(&mpc8xxx_gc->lock);
- mm_gc = &mpc8xxx_gc->mm_gc;
- gc = &mm_gc->gc;
+ mpc8xxx_gc->regs = of_iomap(np, 0);
+ if (!mpc8xxx_gc->regs)
+ return -ENOMEM;
+
+ gc = &mpc8xxx_gc->gc;
+
+ if (of_property_read_bool(np, "little-endian")) {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+ mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL,
+ mpc8xxx_gc->regs + GPIO_DIR, NULL,
+ BGPIOF_BIG_ENDIAN);
+ if (ret)
+ goto err;
+ dev_dbg(&pdev->dev, "GPIO registers are LITTLE endian\n");
+ } else {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+ mpc8xxx_gc->regs + GPIO_DAT,
+ NULL, NULL,
+ mpc8xxx_gc->regs + GPIO_DIR, NULL,
+ BGPIOF_BIG_ENDIAN
+ | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (ret)
+ goto err;
+ dev_dbg(&pdev->dev, "GPIO registers are BIG endian\n");
+ }
- mm_gc->save_regs = mpc8xxx_gpio_save_regs;
- gc->ngpio = MPC8XXX_GPIO_PINS;
- gc->direction_input = mpc8xxx_gpio_dir_in;
+ mpc8xxx_gc->direction_output = gc->direction_output;
if (!devtype)
devtype = &mpc8xxx_gpio_devtype_default;
@@ -405,18 +339,22 @@ static int mpc8xxx_probe(struct platform_device *pdev)
*/
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
- gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
- gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
- gc->set = mpc8xxx_gpio_set;
- gc->set_multiple = mpc8xxx_gpio_set_multiple;
+ if (devtype->gpio_dir_out)
+ gc->direction_output = devtype->gpio_dir_out;
+ if (devtype->gpio_get)
+ gc->get = devtype->gpio_get;
+
gc->to_irq = mpc8xxx_gpio_to_irq;
- ret = of_mm_gpiochip_add_data(np, mm_gc, mpc8xxx_gc);
- if (ret)
- return ret;
+ ret = gpiochip_add_data(gc, mpc8xxx_gc);
+ if (ret) {
+ pr_err("%s: GPIO chip registration failed with status %d\n",
+ np->full_name, ret);
+ goto err;
+ }
mpc8xxx_gc->irqn = irq_of_parse_and_map(np, 0);
- if (mpc8xxx_gc->irqn == NO_IRQ)
+ if (!mpc8xxx_gc->irqn)
return 0;
mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
@@ -424,18 +362,16 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (!mpc8xxx_gc->irq)
return 0;
- id = of_match_node(mpc8xxx_gpio_ids, np);
- if (id)
- mpc8xxx_gc->of_dev_id_data = id->data;
-
/* ack and mask all irqs */
- out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
- out_be32(mm_gc->regs + GPIO_IMR, 0);
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
-
return 0;
+err:
+ iounmap(mpc8xxx_gc->regs);
+ return ret;
}
static int mpc8xxx_remove(struct platform_device *pdev)
@@ -447,7 +383,8 @@ static int mpc8xxx_remove(struct platform_device *pdev)
irq_domain_remove(mpc8xxx_gc->irq);
}
- of_mm_gpiochip_remove(&mpc8xxx_gc->mm_gc);
+ gpiochip_remove(&mpc8xxx_gc->gc);
+ iounmap(mpc8xxx_gc->regs);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a5eacc1dff09..11c6582ef0a6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -756,7 +756,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
BUG();
}
- gpiochip_add_data(&mvchip->chip, mvchip);
+ devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some gpio controllers do not provide irq support */
if (!of_irq_count(np))
@@ -777,16 +777,14 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
if (mvchip->irqbase < 0) {
dev_err(&pdev->dev, "no irqs\n");
- err = mvchip->irqbase;
- goto err_gpiochip_add;
+ return mvchip->irqbase;
}
gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
mvchip->membase, handle_level_irq);
if (!gc) {
dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
- err = -ENOMEM;
- goto err_gpiochip_add;
+ return -ENOMEM;
}
gc->private = mvchip;
@@ -828,9 +826,6 @@ err_generic_chip:
IRQ_LEVEL | IRQ_NOPROBE);
kfree(gc);
-err_gpiochip_add:
- gpiochip_remove(&mvchip->chip);
-
return err;
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 7fd21cb53c81..1b342a3842c8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -462,14 +462,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
if (err)
goto out_bgio;
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
if (irq_base < 0) {
err = irq_base;
- goto out_gpiochip_remove;
+ goto out_bgio;
}
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
@@ -492,8 +492,6 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
-out_gpiochip_remove:
- gpiochip_remove(&port->gc);
out_bgio:
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 7665ebcd0c1d..47aead1ed1cc 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -117,7 +117,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
chip->set = octeon_gpio_set;
- err = gpiochip_add_data(chip, gpio);
+ err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
goto out;
@@ -126,13 +126,6 @@ out:
return err;
}
-static int octeon_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_chip *chip = dev_get_platdata(&pdev->dev);
- gpiochip_remove(chip);
- return 0;
-}
-
static struct of_device_id octeon_gpio_match[] = {
{
.compatible = "cavium,octeon-3860-gpio",
@@ -147,7 +140,6 @@ static struct platform_driver octeon_gpio_driver = {
.of_match_table = octeon_gpio_match,
},
.probe = octeon_gpio_probe,
- .remove = octeon_gpio_remove,
};
module_platform_driver(octeon_gpio_driver);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 189f672bebc1..551dfa9d97ab 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -66,7 +66,6 @@ struct gpio_bank {
u32 irq_usage;
u32 dbck_enable_mask;
bool dbck_enabled;
- struct device *dev;
bool is_mpuio;
bool dbck_flag;
bool loses_context;
@@ -627,7 +626,7 @@ static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
unsigned long flags;
if (bank->non_wakeup_gpios & gpio_bit) {
- dev_err(bank->dev,
+ dev_err(bank->chip.parent,
"Unable to modify wakeup on non-wakeup GPIO%d\n",
offset);
return -EINVAL;
@@ -669,7 +668,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
* enable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(chip->parent);
raw_spin_lock_irqsave(&bank->lock, flags);
omap_enable_gpio_module(bank, offset);
@@ -698,7 +697,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
* disable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_put(bank->dev);
+ pm_runtime_put(chip->parent);
}
/*
@@ -723,7 +722,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
if (WARN_ON(!isr_reg))
goto exit;
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
while (1) {
u32 isr_saved, level_mask = 0;
@@ -776,7 +775,7 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
}
}
exit:
- pm_runtime_put(bank->dev);
+ pm_runtime_put(bank->chip.parent);
return IRQ_HANDLED;
}
@@ -826,7 +825,7 @@ static void omap_gpio_irq_bus_lock(struct irq_data *data)
struct gpio_bank *bank = omap_irq_data_get_bank(data);
if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
}
static void gpio_irq_bus_sync_unlock(struct irq_data *data)
@@ -838,7 +837,7 @@ static void gpio_irq_bus_sync_unlock(struct irq_data *data)
* disable the bank module.
*/
if (!BANK_USED(bank))
- pm_runtime_put(bank->dev);
+ pm_runtime_put(bank->chip.parent);
}
static void omap_gpio_ack_irq(struct irq_data *d)
@@ -1100,7 +1099,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
- dev_err(bank->dev, "Could not register gpio chip %d\n", ret);
+ dev_err(bank->chip.parent,
+ "Could not register gpio chip %d\n", ret);
return ret;
}
@@ -1114,7 +1114,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
*/
irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
if (irq_base < 0) {
- dev_err(bank->dev, "Couldn't allocate IRQ numbers\n");
+ dev_err(bank->chip.parent, "Couldn't allocate IRQ numbers\n");
return -ENODEV;
}
#endif
@@ -1131,15 +1131,17 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
IRQ_TYPE_NONE);
if (ret) {
- dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret);
+ dev_err(bank->chip.parent,
+ "Couldn't add irqchip to gpiochip %d\n", ret);
gpiochip_remove(&bank->chip);
return -ENODEV;
}
gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
- ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
- 0, dev_name(bank->dev), bank);
+ ret = devm_request_irq(bank->chip.parent, bank->irq,
+ omap_gpio_irq_handler,
+ 0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
@@ -1196,7 +1198,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
return bank->irq;
}
- bank->dev = dev;
bank->chip.parent = dev;
bank->chip.owner = THIS_MODULE;
bank->dbck_flag = pdata->dbck_flag;
@@ -1235,9 +1236,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
if (bank->dbck_flag) {
- bank->dbck = devm_clk_get(bank->dev, "dbclk");
+ bank->dbck = devm_clk_get(dev, "dbclk");
if (IS_ERR(bank->dbck)) {
- dev_err(bank->dev,
+ dev_err(dev,
"Could not get gpio dbck. Disable debounce\n");
bank->dbck_flag = false;
} else {
@@ -1247,9 +1248,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bank);
- pm_runtime_enable(bank->dev);
- pm_runtime_irq_safe(bank->dev);
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
+ pm_runtime_get_sync(dev);
if (bank->is_mpuio)
omap_mpuio_init(bank);
@@ -1258,14 +1259,14 @@ static int omap_gpio_probe(struct platform_device *pdev)
ret = omap_gpio_chip_init(bank, irqc);
if (ret) {
- pm_runtime_put_sync(bank->dev);
- pm_runtime_disable(bank->dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
omap_gpio_show_rev(bank);
- pm_runtime_put(bank->dev);
+ pm_runtime_put(dev);
list_add_tail(&bank->node, &omap_gpio_list);
@@ -1278,7 +1279,7 @@ static int omap_gpio_remove(struct platform_device *pdev)
list_del(&bank->node);
gpiochip_remove(&bank->chip);
- pm_runtime_disable(bank->dev);
+ pm_runtime_disable(&pdev->dev);
if (bank->dbck_flag)
clk_unprepare(bank->dbck);
@@ -1348,7 +1349,7 @@ static int omap_gpio_runtime_suspend(struct device *dev)
update_gpio_context_count:
if (bank->get_context_loss_count)
bank->context_loss_count =
- bank->get_context_loss_count(bank->dev);
+ bank->get_context_loss_count(dev);
omap_gpio_dbck_disable(bank);
raw_spin_unlock_irqrestore(&bank->lock, flags);
@@ -1378,7 +1379,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (bank->get_context_loss_count)
bank->context_loss_count =
- bank->get_context_loss_count(bank->dev);
+ bank->get_context_loss_count(dev);
}
omap_gpio_dbck_enable(bank);
@@ -1398,7 +1399,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
} else {
- c = bank->get_context_loss_count(bank->dev);
+ c = bank->get_context_loss_count(dev);
if (c != bank->context_loss_count) {
omap_gpio_restore_context(bank);
} else {
@@ -1481,7 +1482,7 @@ void omap2_gpio_prepare_for_idle(int pwr_mode)
bank->power_mode = pwr_mode;
- pm_runtime_put_sync_suspend(bank->dev);
+ pm_runtime_put_sync_suspend(bank->chip.parent);
}
}
@@ -1493,7 +1494,7 @@ void omap2_gpio_resume_after_idle(void)
if (!BANK_USED(bank) || !bank->loses_context)
continue;
- pm_runtime_get_sync(bank->dev);
+ pm_runtime_get_sync(bank->chip.parent);
}
}
#endif
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index fdfb3b1e0def..6f27b3d94d53 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -195,7 +195,8 @@ static int palmas_gpio_probe(struct platform_device *pdev)
else
palmas_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&palmas_gpio->gpio_chip, palmas_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &palmas_gpio->gpio_chip,
+ palmas_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -205,20 +206,11 @@ static int palmas_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int palmas_gpio_remove(struct platform_device *pdev)
-{
- struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&palmas_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver palmas_gpio_driver = {
.driver.name = "palmas-gpio",
.driver.owner = THIS_MODULE,
.driver.of_match_table = of_palmas_gpio_match,
.probe = palmas_gpio_probe,
- .remove = palmas_gpio_remove,
};
static int __init palmas_gpio_init(void)
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 23196c5fc17c..e66084c295fb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -18,6 +18,7 @@
#include <linux/i2c.h>
#include <linux/platform_data/pca953x.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
@@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
switch (chip->chip_type) {
case PCA953X_TYPE:
ret = i2c_smbus_write_word_data(chip->client,
- reg << 1, (u16) *val);
+ reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
break;
case PCA957X_TYPE:
ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
@@ -367,9 +368,11 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
memcpy(reg_val, chip->reg_output, NBANK(chip));
mutex_lock(&chip->i2c_lock);
for(bank=0; bank<NBANK(chip); bank++) {
- unsigned bankmask = mask[bank/4] >> ((bank % 4) * 8);
+ unsigned bankmask = mask[bank / sizeof(*mask)] >>
+ ((bank % sizeof(*mask)) * 8);
if(bankmask) {
- unsigned bankval = bits[bank/4] >> ((bank % 4) * 8);
+ unsigned bankval = bits[bank / sizeof(*bits)] >>
+ ((bank % sizeof(*bits)) * 8);
reg_val[bank] = (reg_val[bank] & ~bankmask) | bankval;
}
}
@@ -754,7 +757,7 @@ static int pca953x_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = gpiochip_add_data(&chip->gpio_chip, chip);
+ ret = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (ret)
return ret;
@@ -789,8 +792,6 @@ static int pca953x_remove(struct i2c_client *client)
}
}
- gpiochip_remove(&chip->gpio_chip);
-
return 0;
}
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 709cd3fc2a70..169c09aa33c8 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -372,7 +372,7 @@ static int pcf857x_probe(struct i2c_client *client,
gpio->out = ~n_latch;
gpio->status = gpio->out;
- status = gpiochip_add_data(&gpio->chip, gpio);
+ status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
if (status < 0)
goto fail;
@@ -383,7 +383,7 @@ static int pcf857x_probe(struct i2c_client *client,
IRQ_TYPE_NONE);
if (status) {
dev_err(&client->dev, "cannot add irqchip\n");
- goto fail_irq;
+ goto fail;
}
status = devm_request_threaded_irq(&client->dev, client->irq,
@@ -391,7 +391,7 @@ static int pcf857x_probe(struct i2c_client *client,
IRQF_TRIGGER_FALLING | IRQF_SHARED,
dev_name(&client->dev), gpio);
if (status)
- goto fail_irq;
+ goto fail;
gpiochip_set_chained_irqchip(&gpio->chip, &pcf857x_irq_chip,
client->irq, NULL);
@@ -413,9 +413,6 @@ static int pcf857x_probe(struct i2c_client *client,
return 0;
-fail_irq:
- gpiochip_remove(&gpio->chip);
-
fail:
dev_dbg(&client->dev, "probe error %d for '%s'\n", status,
client->name);
@@ -440,7 +437,6 @@ static int pcf857x_remove(struct i2c_client *client)
}
}
- gpiochip_remove(&gpio->chip);
return status;
}
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
new file mode 100644
index 000000000000..cb14b8d1d512
--- /dev/null
+++ b/drivers/gpio/gpio-pisosr.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spi/spi.h>
+
+#define DEFAULT_NGPIO 8
+
+/**
+ * struct pisosr_gpio - GPIO driver data
+ * @chip: GPIO controller chip
+ * @spi: SPI device pointer
+ * @buffer: Buffer for device reads
+ * @buffer_size: Size of buffer
+ * @load_gpio: GPIO pin used to load input into device
+ * @lock: Protects read sequences
+ */
+struct pisosr_gpio {
+ struct gpio_chip chip;
+ struct spi_device *spi;
+ u8 *buffer;
+ size_t buffer_size;
+ struct gpio_desc *load_gpio;
+ struct mutex lock;
+};
+
+static int pisosr_gpio_refresh(struct pisosr_gpio *gpio)
+{
+ int ret;
+
+ mutex_lock(&gpio->lock);
+
+ if (gpio->load_gpio) {
+ gpiod_set_value_cansleep(gpio->load_gpio, 1);
+ udelay(1); /* registers load time (~10ns) */
+ gpiod_set_value_cansleep(gpio->load_gpio, 0);
+ udelay(1); /* registers recovery time (~5ns) */
+ }
+
+ ret = spi_read(gpio->spi, gpio->buffer, gpio->buffer_size);
+
+ mutex_unlock(&gpio->lock);
+
+ return ret;
+}
+
+static int pisosr_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always input */
+ return 1;
+}
+
+static int pisosr_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always input */
+ return 0;
+}
+
+static int pisosr_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* This device is input only */
+ return -EINVAL;
+}
+
+static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct pisosr_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Refresh may not always be needed */
+ pisosr_gpio_refresh(gpio);
+
+ return (gpio->buffer[offset / 8] >> (offset % 8)) & 0x1;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "pisosr-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = pisosr_gpio_get_direction,
+ .direction_input = pisosr_gpio_direction_input,
+ .direction_output = pisosr_gpio_direction_output,
+ .get = pisosr_gpio_get,
+ .base = -1,
+ .ngpio = DEFAULT_NGPIO,
+ .can_sleep = true,
+};
+
+static int pisosr_gpio_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pisosr_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, gpio);
+
+ gpio->chip = template_chip;
+ gpio->chip.parent = dev;
+ of_property_read_u16(dev->of_node, "ngpios", &gpio->chip.ngpio);
+
+ gpio->spi = spi;
+
+ gpio->buffer_size = DIV_ROUND_UP(gpio->chip.ngpio, 8);
+ gpio->buffer = devm_kzalloc(dev, gpio->buffer_size, GFP_KERNEL);
+ if (!gpio->buffer)
+ return -ENOMEM;
+
+ gpio->load_gpio = devm_gpiod_get_optional(dev, "load", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio->load_gpio)) {
+ ret = PTR_ERR(gpio->load_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Unable to allocate load GPIO\n");
+ return ret;
+ }
+
+ mutex_init(&gpio->lock);
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pisosr_gpio_remove(struct spi_device *spi)
+{
+ struct pisosr_gpio *gpio = spi_get_drvdata(spi);
+
+ gpiochip_remove(&gpio->chip);
+
+ mutex_destroy(&gpio->lock);
+
+ return 0;
+}
+
+static const struct spi_device_id pisosr_gpio_id_table[] = {
+ { "pisosr-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, pisosr_gpio_id_table);
+
+static const struct of_device_id pisosr_gpio_of_match_table[] = {
+ { .compatible = "pisosr-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pisosr_gpio_of_match_table);
+
+static struct spi_driver pisosr_gpio_driver = {
+ .driver = {
+ .name = "pisosr-gpio",
+ .of_match_table = pisosr_gpio_of_match_table,
+ },
+ .probe = pisosr_gpio_probe,
+ .remove = pisosr_gpio_remove,
+ .id_table = pisosr_gpio_id_table,
+};
+module_spi_driver(pisosr_gpio_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("SPI Compatible PISO Shift Register GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index b2b7b78664b8..76ac906b4d78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
ret = pinctrl_gpio_direction_output(chip->base + offset);
- if (!ret)
- return 0;
+ if (ret)
+ return ret;
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 1e2d210b3369..1d6100fa312a 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -136,15 +136,8 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rc5t583_gpio);
- return gpiochip_add_data(&rc5t583_gpio->gpio_chip, rc5t583_gpio);
-}
-
-static int rc5t583_gpio_remove(struct platform_device *pdev)
-{
- struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&rc5t583_gpio->gpio_chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &rc5t583_gpio->gpio_chip,
+ rc5t583_gpio);
}
static struct platform_driver rc5t583_gpio_driver = {
@@ -152,7 +145,6 @@ static struct platform_driver rc5t583_gpio_driver = {
.name = "rc5t583-gpio",
},
.probe = rc5t583_gpio_probe,
- .remove = rc5t583_gpio_remove,
};
static int __init rc5t583_gpio_init(void)
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 96ddee3f464a..ec945b90f54d 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -194,23 +194,14 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "registering %d GPIOs\n",
rdc321x_gpio_dev->chip.ngpio);
- return gpiochip_add_data(&rdc321x_gpio_dev->chip, rdc321x_gpio_dev);
-}
-
-static int rdc321x_gpio_remove(struct platform_device *pdev)
-{
- struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
-
- gpiochip_remove(&rdc321x_gpio_dev->chip);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &rdc321x_gpio_dev->chip,
+ rdc321x_gpio_dev);
}
static struct platform_driver rdc321x_gpio_driver = {
.driver.name = "rdc321x-gpio",
.driver.owner = THIS_MODULE,
.probe = rdc321x_gpio_probe,
- .remove = rdc321x_gpio_remove,
};
module_platform_driver(rdc321x_gpio_driver);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 5314ee4b947d..e85e7539cf5d 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -215,15 +215,7 @@ static int sch_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sch);
- return gpiochip_add_data(&sch->chip, sch);
-}
-
-static int sch_gpio_remove(struct platform_device *pdev)
-{
- struct sch_gpio *sch = platform_get_drvdata(pdev);
-
- gpiochip_remove(&sch->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &sch->chip, sch);
}
static struct platform_driver sch_gpio_driver = {
@@ -231,7 +223,6 @@ static struct platform_driver sch_gpio_driver = {
.name = "sch_gpio",
},
.probe = sch_gpio_probe,
- .remove = sch_gpio_remove,
};
module_platform_driver(sch_gpio_driver);
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index 1cbd77a04e7b..a03b38ee2e02 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -12,6 +12,7 @@
* (at your option) any later version.
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -228,7 +229,8 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
int err, i;
/* we can register all GPIO data registers at once */
- if (!request_region(pdata->runtime_reg + GP1, 6, DRV_NAME)) {
+ if (!devm_request_region(&pdev->dev, pdata->runtime_reg + GP1, 6,
+ DRV_NAME)) {
dev_err(&pdev->dev, "Failed to request region 0x%04x-0x%04x.\n",
pdata->runtime_reg + GP1, pdata->runtime_reg + GP1 + 5);
return -EBUSY;
@@ -273,7 +275,6 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
return 0;
exit_err:
- release_region(pdata->runtime_reg + GP1, 6);
/* release already registered chips */
for (--i; i >= 0; i--)
gpiochip_remove(&priv->blocks[i].chip);
@@ -282,12 +283,9 @@ exit_err:
static int sch311x_gpio_remove(struct platform_device *pdev)
{
- struct sch311x_pdev_data *pdata = dev_get_platdata(&pdev->dev);
struct sch311x_gpio_priv *priv = platform_get_drvdata(pdev);
int i;
- release_region(pdata->runtime_reg + GP1, 6);
-
for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) {
gpiochip_remove(&priv->blocks[i].chip);
dev_info(&pdev->dev,
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 50fb09080a6b..7ffd16495286 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -165,7 +165,7 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.owner = THIS_MODULE;
spics->last_off = -1;
- ret = gpiochip_add_data(&spics->chip, spics);
+ ret = devm_gpiochip_add_data(&pdev->dev, &spics->chip, spics);
if (ret) {
dev_err(&pdev->dev, "unable to add gpio chip\n");
return ret;
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 83af1cb36333..0d5b8c525dd9 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -409,7 +409,7 @@ static int gsta_probe(struct platform_device *dev)
goto err_free_descs;
}
- err = gpiochip_add_data(&chip->gpio, chip);
+ err = devm_gpiochip_add_data(&dev->dev, &chip->gpio, chip);
if (err < 0) {
dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
-err);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index d11dd48570b2..19e654f88b3a 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -258,7 +258,7 @@ static int xway_stp_probe(struct platform_device *pdev)
ret = xway_stp_hw_init(chip);
if (!ret)
- ret = gpiochip_add_data(&chip->gc, chip);
+ ret = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (!ret)
dev_info(&pdev->dev, "Init done\n");
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index e6cff1cabd0c..d387eb524bf3 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -687,7 +687,7 @@ static int sx150x_probe(struct i2c_client *client,
if (rc < 0)
return rc;
- rc = gpiochip_add_data(&chip->gpio_chip, chip);
+ rc = devm_gpiochip_add_data(&client->dev, &chip->gpio_chip, chip);
if (rc)
return rc;
@@ -696,25 +696,12 @@ static int sx150x_probe(struct i2c_client *client,
pdata->irq_summary,
pdata->irq_base);
if (rc < 0)
- goto probe_fail_post_gpiochip_add;
+ return rc;
}
i2c_set_clientdata(client, chip);
return 0;
-probe_fail_post_gpiochip_add:
- gpiochip_remove(&chip->gpio_chip);
- return rc;
-}
-
-static int sx150x_remove(struct i2c_client *client)
-{
- struct sx150x_chip *chip;
-
- chip = i2c_get_clientdata(client);
- gpiochip_remove(&chip->gpio_chip);
-
- return 0;
}
static struct i2c_driver sx150x_driver = {
@@ -723,7 +710,6 @@ static struct i2c_driver sx150x_driver = {
.of_match_table = of_match_ptr(sx150x_of_match),
},
.probe = sx150x_probe,
- .remove = sx150x_remove,
.id_table = sx150x_id,
};
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index e5c5b6205886..24b6d643ecdb 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -238,15 +238,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- return gpiochip_add_data(&priv->chip, priv);
-}
-
-static int syscon_gpio_remove(struct platform_device *pdev)
-{
- struct syscon_gpio_priv *priv = platform_get_drvdata(pdev);
-
- gpiochip_remove(&priv->chip);
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
}
static struct platform_driver syscon_gpio_driver = {
@@ -255,7 +247,6 @@ static struct platform_driver syscon_gpio_driver = {
.of_match_table = syscon_gpio_ids,
},
.probe = syscon_gpio_probe,
- .remove = syscon_gpio_remove,
};
module_platform_driver(syscon_gpio_driver);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 5eaec20ddbc7..80b6959ae995 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -205,10 +205,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
tb10x_gpio->gc.can_sleep = false;
- ret = gpiochip_add_data(&tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tb10x_gpio->gc, tb10x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not add gpiochip.\n");
- goto fail_gpiochip_registration;
+ return ret;
}
platform_set_drvdata(pdev, tb10x_gpio);
@@ -219,7 +219,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(&pdev->dev, "No interrupt specified.\n");
- goto fail_get_irq;
+ return ret;
}
tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
@@ -229,14 +229,13 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
IRQF_TRIGGER_NONE | IRQF_SHARED,
dev_name(&pdev->dev), tb10x_gpio);
if (ret != 0)
- goto fail_request_irq;
+ return ret;
tb10x_gpio->domain = irq_domain_add_linear(dn,
tb10x_gpio->gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
- ret = -ENOMEM;
- goto fail_irq_domain;
+ return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
@@ -244,7 +243,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- goto fail_irq_domain;
+ return ret;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@@ -258,14 +257,6 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
-
-fail_irq_domain:
-fail_request_irq:
-fail_get_irq:
- gpiochip_remove(&tb10x_gpio->gc);
-fail_gpiochip_registration:
-fail_ioremap:
- return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
@@ -278,7 +269,6 @@ static int tb10x_gpio_remove(struct platform_device *pdev)
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
- gpiochip_remove(&tb10x_gpio->gc);
return 0;
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 05a27ec55add..4f566e6b81f1 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -272,7 +272,8 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return ret;
}
- ret = gpiochip_add_data(&tc3589x_gpio->chip, tc3589x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tc3589x_gpio->chip,
+ tc3589x_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
return ret;
@@ -299,20 +300,10 @@ static int tc3589x_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int tc3589x_gpio_remove(struct platform_device *pdev)
-{
- struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tc3589x_gpio->chip);
-
- return 0;
-}
-
static struct platform_driver tc3589x_gpio_driver = {
.driver.name = "tc3589x-gpio",
.driver.owner = THIS_MODULE,
.probe = tc3589x_gpio_probe,
- .remove = tc3589x_gpio_remove,
};
static int __init tc3589x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9a1a7e2ef388..790bb111b2cb 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -545,7 +545,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_chip.of_node = pdev->dev.of_node;
- ret = gpiochip_add_data(&tegra_gpio_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tegra_gpio_chip, NULL);
if (ret < 0) {
irq_domain_remove(irq_domain);
return ret;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index a6de10c5275b..85ed608c2b27 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -237,12 +237,6 @@ static int timbgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- dev_err(dev, "Unable to get resource\n");
- return -EINVAL;
- }
-
tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL);
if (!tgpio) {
dev_err(dev, "Memory alloc failed\n");
@@ -252,17 +246,10 @@ static int timbgpio_probe(struct platform_device *pdev)
spin_lock_init(&tgpio->lock);
- if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem),
- DRIVER_NAME)) {
- dev_err(dev, "Region already claimed\n");
- return -EBUSY;
- }
-
- tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem));
- if (!tgpio->membase) {
- dev_err(dev, "Cannot ioremap\n");
- return -ENOMEM;
- }
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tgpio->membase = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(tgpio->membase))
+ return PTR_ERR(tgpio->membase);
gc = &tgpio->gpio;
@@ -279,7 +266,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->ngpio = pdata->nr_pins;
gc->can_sleep = false;
- err = gpiochip_add_data(gc, tgpio);
+ err = devm_gpiochip_add_data(&pdev->dev, gc, tgpio);
if (err)
return err;
@@ -320,8 +307,6 @@ static int timbgpio_remove(struct platform_device *pdev)
irq_set_handler_data(irq, NULL);
}
- gpiochip_remove(&tgpio->gpio);
-
return 0;
}
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
new file mode 100644
index 000000000000..9f020aa4b067
--- /dev/null
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#define TPIC2810_WS_COMMAND 0x44
+
+/**
+ * struct tpic2810 - GPIO driver data
+ * @chip: GPIO controller chip
+ * @client: I2C device pointer
+ * @buffer: Buffer for device register
+ * @lock: Protects write sequences
+ */
+struct tpic2810 {
+ struct gpio_chip chip;
+ struct i2c_client *client;
+ u8 buffer;
+ struct mutex lock;
+};
+
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value);
+
+static int tpic2810_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device always output */
+ return 0;
+}
+
+static int tpic2810_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return -EINVAL;
+}
+
+static int tpic2810_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ /* This device always output */
+ tpic2810_set(chip, offset, value);
+ return 0;
+}
+
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct tpic2810 *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+
+ if (value)
+ gpio->buffer |= BIT(offset);
+ else
+ gpio->buffer &= ~BIT(offset);
+
+ i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+ gpio->buffer);
+
+ mutex_unlock(&gpio->lock);
+}
+
+static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct tpic2810 *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+
+ /* clear bits under mask */
+ gpio->buffer &= ~(*mask);
+ /* set bits under mask */
+ gpio->buffer |= ((*mask) & (*bits));
+
+ i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+ gpio->buffer);
+
+ mutex_unlock(&gpio->lock);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tpic2810",
+ .owner = THIS_MODULE,
+ .get_direction = tpic2810_get_direction,
+ .direction_input = tpic2810_direction_input,
+ .direction_output = tpic2810_direction_output,
+ .set = tpic2810_set,
+ .set_multiple = tpic2810_set_multiple,
+ .base = -1,
+ .ngpio = 8,
+ .can_sleep = true,
+};
+
+static const struct of_device_id tpic2810_of_match_table[] = {
+ { .compatible = "ti,tpic2810" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tpic2810_of_match_table);
+
+static int tpic2810_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tpic2810 *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, gpio);
+
+ gpio->chip = template_chip;
+ gpio->chip.parent = &client->dev;
+
+ gpio->client = client;
+
+ mutex_init(&gpio->lock);
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&client->dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tpic2810_remove(struct i2c_client *client)
+{
+ struct tpic2810 *gpio = i2c_get_clientdata(client);
+
+ gpiochip_remove(&gpio->chip);
+
+ return 0;
+}
+
+static const struct i2c_device_id tpic2810_id_table[] = {
+ { "tpic2810", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tpic2810_id_table);
+
+static struct i2c_driver tpic2810_driver = {
+ .driver = {
+ .name = "tpic2810",
+ .of_match_table = tpic2810_of_match_table,
+ },
+ .probe = tpic2810_probe,
+ .remove = tpic2810_remove,
+ .id_table = tpic2810_id_table,
+};
+module_i2c_driver(tpic2810_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPIC2810 8-Bit LED Driver GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
new file mode 100644
index 000000000000..8e25f01ac314
--- /dev/null
+++ b/drivers/gpio/gpio-tps65086.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/tps65086.h>
+
+struct tps65086_gpio {
+ struct gpio_chip chip;
+ struct tps65086 *tps;
+};
+
+static int tps65086_gpio_get_direction(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return 0;
+}
+
+static int tps65086_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ /* This device is output only */
+ return -EINVAL;
+}
+
+static int tps65086_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+
+ /* Set the initial value */
+ regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
+
+ return 0;
+}
+
+static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65086_GPOCTRL, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(4 + offset);
+}
+
+static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct tps65086_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "tps65086-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps65086_gpio_get_direction,
+ .direction_input = tps65086_gpio_direction_input,
+ .direction_output = tps65086_gpio_direction_output,
+ .get = tps65086_gpio_get,
+ .set = tps65086_gpio_set,
+ .base = -1,
+ .ngpio = 4,
+ .can_sleep = true,
+};
+
+static int tps65086_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65086_gpio *gpio;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpio->tps = dev_get_drvdata(pdev->dev.parent);
+ gpio->chip = template_chip;
+ gpio->chip.parent = gpio->tps->dev;
+
+ ret = gpiochip_add_data(&gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65086_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65086_gpio *gpio = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&gpio->chip);
+
+ return 0;
+}
+
+static const struct platform_device_id tps65086_gpio_id_table[] = {
+ { "tps65086-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65086_gpio_id_table);
+
+static struct platform_driver tps65086_gpio_driver = {
+ .driver = {
+ .name = "tps65086-gpio",
+ },
+ .probe = tps65086_gpio_probe,
+ .remove = tps65086_gpio_remove,
+ .id_table = tps65086_gpio_id_table,
+};
+module_platform_driver(tps65086_gpio_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65086 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
new file mode 100644
index 000000000000..313c0e484607
--- /dev/null
+++ b/drivers/gpio/gpio-tps65218.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2015 Verifone Int.
+ *
+ * Author: Nicolas Saenz Julienne <nicolassaenzj@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify i t
+ * under the terms of the GNU General Public License as published by th e
+ * Free Software Foundation; either version 2 of the License, or (at you r
+ * option) any later version.
+ *
+ * This driver is based on the gpio-tps65912 implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/tps65218.h>
+
+struct tps65218_gpio {
+ struct tps65218 *tps65218;
+ struct gpio_chip gpio_chip;
+};
+
+static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ unsigned int val;
+ int ret;
+
+ ret = tps65218_reg_read(tps65218, TPS65218_REG_ENABLE2, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
+}
+
+static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+
+ if (value)
+ tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+ else
+ tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+}
+
+static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ /* Only drives GPOs */
+ tps65218_gpio_set(gc, offset, value);
+ return 0;
+}
+
+static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+ return -EPERM;
+}
+
+static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+ int ret;
+
+ if (gpiochip_line_is_open_source(gc, offset)) {
+ dev_err(gc->parent, "can't work as open source\n");
+ return -EINVAL;
+ }
+
+ switch (offset) {
+ case 0:
+ if (!gpiochip_line_is_open_drain(gc, offset)) {
+ dev_err(gc->parent, "GPO1 works only as open drain\n");
+ return -EINVAL;
+ }
+
+ /* Disable sequencer for GPO1 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
+ TPS65218_SEQ7_GPO1_SEQ_MASK,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ /* Setup GPO1 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_IO1_SEL,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+ case 1:
+ /* GP02 is push-pull by default, can be set as open drain. */
+ if (gpiochip_line_is_open_drain(gc, offset)) {
+ ret = tps65218_clear_bits(tps65218,
+ TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_GPO2_BUF,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+ }
+
+ /* Setup GPO2 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_IO1_SEL,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+
+ case 2:
+ if (!gpiochip_line_is_open_drain(gc, offset)) {
+ dev_err(gc->parent, "GPO3 works only as open drain\n");
+ return -EINVAL;
+ }
+
+ /* Disable sequencer for GPO3 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_SEQ7,
+ TPS65218_SEQ7_GPO3_SEQ_MASK,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ /* Setup GPO3 */
+ ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG2,
+ TPS65218_CONFIG2_DC12_RST,
+ TPS65218_PROTECT_L1);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct gpio_chip template_chip = {
+ .label = "gpio-tps65218",
+ .owner = THIS_MODULE,
+ .request = tps65218_gpio_request,
+ .direction_output = tps65218_gpio_output,
+ .direction_input = tps65218_gpio_input,
+ .get = tps65218_gpio_get,
+ .set = tps65218_gpio_set,
+ .can_sleep = true,
+ .ngpio = 3,
+ .base = -1,
+};
+
+static int tps65218_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65218 *tps65218 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65218_gpio *tps65218_gpio;
+ int ret;
+
+ tps65218_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65218_gpio),
+ GFP_KERNEL);
+ if (!tps65218_gpio)
+ return -ENOMEM;
+
+ tps65218_gpio->tps65218 = tps65218;
+ tps65218_gpio->gpio_chip = template_chip;
+ tps65218_gpio->gpio_chip.parent = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ tps65218_gpio->gpio_chip.of_node = pdev->dev.of_node;
+#endif
+
+ ret = gpiochip_add_data(&tps65218_gpio->gpio_chip, tps65218_gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, tps65218_gpio);
+
+ return ret;
+}
+
+static int tps65218_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65218_gpio *tps65218_gpio = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&tps65218_gpio->gpio_chip);
+
+ return 0;
+}
+
+static const struct of_device_id tps65218_dt_match[] = {
+ { .compatible = "ti,tps65218-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tps65218_dt_match);
+
+static struct platform_driver tps65218_gpio_driver = {
+ .driver = {
+ .name = "tps65218-gpio",
+ .of_match_table = of_match_ptr(tps65218_dt_match)
+ },
+ .probe = tps65218_gpio_probe,
+ .remove = tps65218_gpio_remove,
+};
+
+module_platform_driver(tps65218_gpio_driver);
+
+MODULE_AUTHOR("Nicolas Saenz Julienne <nicolassaenzj@gmail.com>");
+MODULE_DESCRIPTION("GPO interface for TPS65218 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65218-gpio");
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index 87de5486a29e..c88bdc8ee2c9 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -117,7 +117,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
else
tps6586x_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&tps6586x_gpio->gpio_chip, tps6586x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps6586x_gpio->gpio_chip,
+ tps6586x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -128,19 +129,10 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int tps6586x_gpio_remove(struct platform_device *pdev)
-{
- struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps6586x_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver tps6586x_gpio_driver = {
.driver.name = "tps6586x-gpio",
.driver.owner = THIS_MODULE,
.probe = tps6586x_gpio_probe,
- .remove = tps6586x_gpio_remove,
};
static int __init tps6586x_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index e81eee7627a3..cdbd7c740043 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -170,7 +170,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
}
skip_init:
- ret = gpiochip_add_data(&tps65910_gpio->gpio_chip, tps65910_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tps65910_gpio->gpio_chip,
+ tps65910_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -181,19 +182,10 @@ skip_init:
return ret;
}
-static int tps65910_gpio_remove(struct platform_device *pdev)
-{
- struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps65910_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver tps65910_gpio_driver = {
.driver.name = "tps65910-gpio",
.driver.owner = THIS_MODULE,
.probe = tps65910_gpio_probe,
- .remove = tps65910_gpio_remove,
};
static int __init tps65910_gpio_init(void)
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 4f2029c7da3a..acfd30a13a56 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -1,151 +1,149 @@
/*
- * Copyright 2011 Texas Instruments Inc.
+ * GPIO driver for TI TPS65912x PMICs
*
- * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This driver is based on wm8350 implementation.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the Arizona GPIO driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
#include <linux/gpio.h>
-#include <linux/mfd/core.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
+
#include <linux/mfd/tps65912.h>
-struct tps65912_gpio_data {
- struct tps65912 *tps65912;
+struct tps65912_gpio {
struct gpio_chip gpio_chip;
+ struct tps65912 *tps;
};
-static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+static int tps65912_gpio_get_direction(struct gpio_chip *gc,
+ unsigned offset)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
- int val;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
+ int ret, val;
- if (val & GPIO_STS_MASK)
- return 1;
+ ret = regmap_read(gpio->tps->regmap, TPS65912_GPIO1 + offset, &val);
+ if (ret)
+ return ret;
- return 0;
+ if (val & GPIO_CFG_MASK)
+ return GPIOF_DIR_OUT;
+ else
+ return GPIOF_DIR_IN;
}
-static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- if (value)
- tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK);
- else
- tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK);
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK, 0);
}
-static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
/* Set the initial value */
- tps65912_gpio_set(gc, offset, value);
+ regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_CFG_MASK, GPIO_CFG_MASK);
+}
+
+static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65912_GPIO1 + offset, &val);
+ if (ret)
+ return ret;
+
+ if (val & GPIO_STS_MASK)
+ return 1;
- return tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_CFG_MASK);
+ return 0;
}
-static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
+static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
{
- struct tps65912_gpio_data *tps65912_gpio = gpiochip_get_data(gc);
- struct tps65912 *tps65912 = tps65912_gpio->tps65912;
+ struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
- GPIO_CFG_MASK);
+ regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
static struct gpio_chip template_chip = {
- .label = "tps65912",
+ .label = "tps65912-gpio",
.owner = THIS_MODULE,
- .direction_input = tps65912_gpio_input,
- .direction_output = tps65912_gpio_output,
+ .get_direction = tps65912_gpio_get_direction,
+ .direction_input = tps65912_gpio_direction_input,
+ .direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
.set = tps65912_gpio_set,
- .can_sleep = true,
- .ngpio = 5,
.base = -1,
+ .ngpio = 5,
+ .can_sleep = true,
};
static int tps65912_gpio_probe(struct platform_device *pdev)
{
- struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
- struct tps65912_board *pdata = dev_get_platdata(tps65912->dev);
- struct tps65912_gpio_data *tps65912_gpio;
+ struct tps65912 *tps = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912_gpio *gpio;
int ret;
- tps65912_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65912_gpio),
- GFP_KERNEL);
- if (tps65912_gpio == NULL)
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
return -ENOMEM;
- tps65912_gpio->tps65912 = tps65912;
- tps65912_gpio->gpio_chip = template_chip;
- tps65912_gpio->gpio_chip.parent = &pdev->dev;
- if (pdata && pdata->gpio_base)
- tps65912_gpio->gpio_chip.base = pdata->gpio_base;
+ gpio->tps = dev_get_drvdata(pdev->dev.parent);
+ gpio->gpio_chip = template_chip;
+ gpio->gpio_chip.parent = tps->dev;
- ret = gpiochip_add_data(&tps65912_gpio->gpio_chip, tps65912_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip,
+ gpio);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register gpiochip, %d\n", ret);
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, tps65912_gpio);
-
- return ret;
-}
+ platform_set_drvdata(pdev, gpio);
-static int tps65912_gpio_remove(struct platform_device *pdev)
-{
- struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&tps65912_gpio->gpio_chip);
return 0;
}
+static const struct platform_device_id tps65912_gpio_id_table[] = {
+ { "tps65912-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65912_gpio_id_table);
+
static struct platform_driver tps65912_gpio_driver = {
.driver = {
.name = "tps65912-gpio",
},
.probe = tps65912_gpio_probe,
- .remove = tps65912_gpio_remove,
+ .id_table = tps65912_gpio_id_table,
};
+module_platform_driver(tps65912_gpio_driver);
-static int __init tps65912_gpio_init(void)
-{
- return platform_driver_register(&tps65912_gpio_driver);
-}
-subsys_initcall(tps65912_gpio_init);
-
-static void __exit tps65912_gpio_exit(void)
-{
- platform_driver_unregister(&tps65912_gpio_driver);
-}
-module_exit(tps65912_gpio_exit);
-
-MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("GPIO interface for TPS65912 PMICs");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912 GPIO driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps65912-gpio");
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
new file mode 100644
index 000000000000..0c144a72f9af
--- /dev/null
+++ b/drivers/gpio/gpio-ts4800.c
@@ -0,0 +1,81 @@
+/*
+ * GPIO driver for the TS-4800 board
+ *
+ * Copyright (c) 2016 - Savoir-faire Linux
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_PIN_NUMBER 16
+#define INPUT_REG_OFFSET 0x00
+#define OUTPUT_REG_OFFSET 0x02
+#define DIRECTION_REG_OFFSET 0x04
+
+static int ts4800_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct gpio_chip *chip;
+ struct resource *res;
+ void __iomem *base_addr;
+ int retval;
+ u32 ngpios;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_addr))
+ return PTR_ERR(base_addr);
+
+ node = pdev->dev.of_node;
+ if (!node)
+ return -EINVAL;
+
+ retval = of_property_read_u32(node, "ngpios", &ngpios);
+ if (retval == -EINVAL)
+ ngpios = DEFAULT_PIN_NUMBER;
+ else if (retval)
+ return retval;
+
+ retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
+ base_addr + OUTPUT_REG_OFFSET, NULL,
+ base_addr + DIRECTION_REG_OFFSET, NULL, 0);
+ if (retval) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return retval;
+ }
+
+ chip->ngpio = ngpios;
+
+ platform_set_drvdata(pdev, chip);
+
+ return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+}
+
+static const struct of_device_id ts4800_gpio_of_match[] = {
+ { .compatible = "technologic,ts4800-gpio", },
+ {},
+};
+
+static struct platform_driver ts4800_gpio_driver = {
+ .driver = {
+ .name = "ts4800-gpio",
+ .of_match_table = ts4800_gpio_of_match,
+ },
+ .probe = ts4800_gpio_probe,
+};
+
+module_platform_driver_probe(ts4800_gpio_driver, ts4800_gpio_probe);
+
+MODULE_AUTHOR("Julien Grossholtz <julien.grossholtz@savoirfairelinux.com>");
+MODULE_DESCRIPTION("TS4800 FPGA GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 5f945083f9d8..6cfeba07f882 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -409,7 +409,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
break;
}
- ret = gpiochip_add_data(&priv->gpio_chip, priv);
+ ret = devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
if (ret) {
dev_err(dev, "failed to register the gpio chip\n");
return ret;
@@ -418,13 +418,10 @@ static int ts5500_dio_probe(struct platform_device *pdev)
ret = ts5500_enable_irq(priv);
if (ret) {
dev_err(dev, "invalid interrupt %d\n", priv->hwirq);
- goto cleanup;
+ return ret;
}
return 0;
-cleanup:
- gpiochip_remove(&priv->gpio_chip);
- return ret;
}
static int ts5500_dio_remove(struct platform_device *pdev)
@@ -432,7 +429,7 @@ static int ts5500_dio_remove(struct platform_device *pdev)
struct ts5500_priv *priv = platform_get_drvdata(pdev);
ts5500_disable_irq(priv);
- gpiochip_remove(&priv->gpio_chip);
+
return 0;
}
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 8e9e9853f3bd..b780314cdfc9 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -100,7 +100,7 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
twl6040gpo_chip.of_node = twl6040_core_dev->of_node;
#endif
- ret = gpiochip_add_data(&twl6040gpo_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &twl6040gpo_chip, NULL);
if (ret < 0) {
dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret);
twl6040gpo_chip.ngpio = 0;
@@ -109,12 +109,6 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
return ret;
}
-static int gpo_twl6040_remove(struct platform_device *pdev)
-{
- gpiochip_remove(&twl6040gpo_chip);
- return 0;
-}
-
/* Note: this hardware lives inside an I2C-based multi-function device. */
MODULE_ALIAS("platform:twl6040-gpo");
@@ -123,7 +117,6 @@ static struct platform_driver gpo_twl6040_driver = {
.name = "twl6040-gpo",
},
.probe = gpo_twl6040_probe,
- .remove = gpo_twl6040_remove,
};
module_platform_driver(gpo_twl6040_driver);
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 2c5cd46bfa6e..5dbe31bf6699 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -67,7 +67,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
ucb->gc.set = ucb1400_gpio_set;
ucb->gc.can_sleep = true;
- err = gpiochip_add_data(&ucb->gc, ucb);
+ err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
if (err)
goto err;
@@ -90,7 +90,6 @@ static int ucb1400_gpio_remove(struct platform_device *dev)
return err;
}
- gpiochip_remove(&ucb->gc);
return err;
}
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 1170b035cb92..dec47aafd5cd 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -410,10 +410,10 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
- ret = gpiochip_add_data(&vb_gpio->gpioa, vb_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpioa, vb_gpio);
if (ret < 0) {
dev_err(vb_gpio->gpioa.parent, "could not add gpio a");
- goto err_gpioa;
+ return ret;
}
/* registering gpio b */
@@ -427,37 +427,21 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
- ret = gpiochip_add_data(&vb_gpio->gpiob, vb_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &vb_gpio->gpiob, vb_gpio);
if (ret < 0) {
dev_err(vb_gpio->gpiob.parent, "could not add gpio b");
- goto err_gpiob;
+ return ret;
}
platform_set_drvdata(pdev, vb_gpio);
return ret;
-
-err_gpiob:
- gpiochip_remove(&vb_gpio->gpioa);
-
-err_gpioa:
- return ret;
-}
-
-static int vprbrd_gpio_remove(struct platform_device *pdev)
-{
- struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&vb_gpio->gpiob);
-
- return 0;
}
static struct platform_driver vprbrd_gpio_driver = {
.driver.name = "viperboard-gpio",
.driver.owner = THIS_MODULE,
.probe = vprbrd_gpio_probe,
- .remove = vprbrd_gpio_remove,
};
static int __init vprbrd_gpio_init(void)
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 764999cc0794..8cdb9f7ec7e0 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -259,16 +259,7 @@ static int vx855gpio_probe(struct platform_device *pdev)
vx855gpio_gpio_setup(vg);
- return gpiochip_add_data(&vg->gpio, vg);
-}
-
-static int vx855gpio_remove(struct platform_device *pdev)
-{
- struct vx855_gpio *vg = platform_get_drvdata(pdev);
-
- gpiochip_remove(&vg->gpio);
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &vg->gpio, vg);
}
static struct platform_driver vx855gpio_driver = {
@@ -276,7 +267,6 @@ static struct platform_driver vx855gpio_driver = {
.name = MODULE_NAME,
},
.probe = vx855gpio_probe,
- .remove = vx855gpio_remove,
};
module_platform_driver(vx855gpio_driver);
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 98390070fb64..18cb0f534b91 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -259,7 +259,8 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
else
wm831x_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm831x_gpio->gpio_chip, wm831x_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm831x_gpio->gpio_chip,
+ wm831x_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -270,19 +271,10 @@ static int wm831x_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int wm831x_gpio_remove(struct platform_device *pdev)
-{
- struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm831x_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
.driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
- .remove = wm831x_gpio_remove,
};
static int __init wm831x_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 0a306b4baa73..07d45a3b205a 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -125,7 +125,8 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
else
wm8350_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm8350_gpio->gpio_chip, wm8350_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm8350_gpio->gpio_chip,
+ wm8350_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
return ret;
@@ -136,19 +137,10 @@ static int wm8350_gpio_probe(struct platform_device *pdev)
return ret;
}
-static int wm8350_gpio_remove(struct platform_device *pdev)
-{
- struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm8350_gpio->gpio_chip);
- return 0;
-}
-
static struct platform_driver wm8350_gpio_driver = {
.driver.name = "wm8350-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8350_gpio_probe,
- .remove = wm8350_gpio_remove,
};
static int __init wm8350_gpio_init(void)
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 3ae4c1597494..b089df99a0d0 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -261,34 +261,23 @@ static int wm8994_gpio_probe(struct platform_device *pdev)
else
wm8994_gpio->gpio_chip.base = -1;
- ret = gpiochip_add_data(&wm8994_gpio->gpio_chip, wm8994_gpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &wm8994_gpio->gpio_chip,
+ wm8994_gpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, wm8994_gpio);
return ret;
-
-err:
- return ret;
-}
-
-static int wm8994_gpio_remove(struct platform_device *pdev)
-{
- struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&wm8994_gpio->gpio_chip);
- return 0;
}
static struct platform_driver wm8994_gpio_driver = {
.driver.name = "wm8994-gpio",
.driver.owner = THIS_MODULE,
.probe = wm8994_gpio_probe,
- .remove = wm8994_gpio_remove,
};
static int __init wm8994_gpio_init(void)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
new file mode 100644
index 000000000000..51f41e8fd21e
--- /dev/null
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -0,0 +1,427 @@
+/*
+ * GPIO driver for the WinSystems WS16C48
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+static unsigned ws16c48_base;
+module_param(ws16c48_base, uint, 0);
+MODULE_PARM_DESC(ws16c48_base, "WinSystems WS16C48 base address");
+static unsigned ws16c48_irq;
+module_param(ws16c48_irq, uint, 0);
+MODULE_PARM_DESC(ws16c48_irq, "WinSystems WS16C48 interrupt line number");
+
+/**
+ * struct ws16c48_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @io_state: bit I/O state (whether bit is set to input or output)
+ * @out_state: output bits state
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @irq_mask: I/O bits affected by interrupts
+ * @flow_mask: IRQ flow type mask for the respective I/O bits
+ * @base: base port address of the GPIO device
+ * @irq: Interrupt line number
+ */
+struct ws16c48_gpio {
+ struct gpio_chip chip;
+ unsigned char io_state[6];
+ unsigned char out_state[6];
+ spinlock_t lock;
+ unsigned long irq_mask;
+ unsigned long flow_mask;
+ unsigned base;
+ unsigned irq;
+};
+
+static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+
+ return !!(ws16c48gpio->io_state[port] & mask);
+}
+
+static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->io_state[port] |= mask;
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->io_state[port] &= ~mask;
+ if (value)
+ ws16c48gpio->out_state[port] |= mask;
+ else
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned port_state;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ /* ensure that GPIO is set for input */
+ if (!(ws16c48gpio->io_state[port] & mask)) {
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return -EINVAL;
+ }
+
+ port_state = inb(ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return !!(port_state & mask);
+}
+
+static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ /* ensure that GPIO is set for output */
+ if (ws16c48gpio->io_state[port] & mask) {
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return;
+ }
+
+ if (value)
+ ws16c48gpio->out_state[port] |= mask;
+ else
+ ws16c48gpio->out_state[port] &= ~mask;
+ outb(ws16c48gpio->out_state[port], ws16c48gpio->base + port);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned port = offset / 8;
+ const unsigned mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned port_state;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ port_state = ws16c48gpio->irq_mask >> (8*port);
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(port_state & ~mask, ws16c48gpio->base + 8 + port);
+ outb(port_state | mask, ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->irq_mask &= ~mask;
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static void ws16c48_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ ws16c48gpio->irq_mask |= mask;
+
+ outb(0x80, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->irq_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+}
+
+static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
+ const unsigned long offset = irqd_to_hwirq(data);
+ const unsigned long mask = BIT(offset);
+ const unsigned port = offset / 8;
+ unsigned long flags;
+
+ /* only the first 3 ports support interrupts */
+ if (port > 2)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ws16c48gpio->lock, flags);
+
+ switch (flow_type) {
+ case IRQ_TYPE_NONE:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ws16c48gpio->flow_mask |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ws16c48gpio->flow_mask &= ~mask;
+ break;
+ default:
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return -EINVAL;
+ }
+
+ outb(0x40, ws16c48gpio->base + 7);
+ outb(ws16c48gpio->flow_mask >> (8*port), ws16c48gpio->base + 8 + port);
+ outb(0xC0, ws16c48gpio->base + 7);
+
+ spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip ws16c48_irqchip = {
+ .name = "ws16c48",
+ .irq_ack = ws16c48_irq_ack,
+ .irq_mask = ws16c48_irq_mask,
+ .irq_unmask = ws16c48_irq_unmask,
+ .irq_set_type = ws16c48_irq_set_type
+};
+
+static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
+{
+ struct ws16c48_gpio *const ws16c48gpio = dev_id;
+ struct gpio_chip *const chip = &ws16c48gpio->chip;
+ unsigned long int_pending;
+ unsigned long port;
+ unsigned long int_id;
+ unsigned long gpio;
+
+ int_pending = inb(ws16c48gpio->base + 6) & 0x7;
+ if (!int_pending)
+ return IRQ_NONE;
+
+ /* loop until all pending interrupts are handled */
+ do {
+ for_each_set_bit(port, &int_pending, 3) {
+ int_id = inb(ws16c48gpio->base + 8 + port);
+ for_each_set_bit(gpio, &int_id, 8)
+ generic_handle_irq(irq_find_mapping(
+ chip->irqdomain, gpio + 8*port));
+ }
+
+ int_pending = inb(ws16c48gpio->base + 6) & 0x7;
+ } while (int_pending);
+
+ return IRQ_HANDLED;
+}
+
+static int __init ws16c48_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ws16c48_gpio *ws16c48gpio;
+ const unsigned base = ws16c48_base;
+ const unsigned extent = 16;
+ const char *const name = dev_name(dev);
+ int err;
+ const unsigned irq = ws16c48_irq;
+
+ ws16c48gpio = devm_kzalloc(dev, sizeof(*ws16c48gpio), GFP_KERNEL);
+ if (!ws16c48gpio)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base, extent, name)) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base, base + extent);
+ return -EBUSY;
+ }
+
+ ws16c48gpio->chip.label = name;
+ ws16c48gpio->chip.parent = dev;
+ ws16c48gpio->chip.owner = THIS_MODULE;
+ ws16c48gpio->chip.base = -1;
+ ws16c48gpio->chip.ngpio = 48;
+ ws16c48gpio->chip.get_direction = ws16c48_gpio_get_direction;
+ ws16c48gpio->chip.direction_input = ws16c48_gpio_direction_input;
+ ws16c48gpio->chip.direction_output = ws16c48_gpio_direction_output;
+ ws16c48gpio->chip.get = ws16c48_gpio_get;
+ ws16c48gpio->chip.set = ws16c48_gpio_set;
+ ws16c48gpio->base = base;
+ ws16c48gpio->irq = irq;
+
+ spin_lock_init(&ws16c48gpio->lock);
+
+ dev_set_drvdata(dev, ws16c48gpio);
+
+ err = gpiochip_add_data(&ws16c48gpio->chip, ws16c48gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ /* Disable IRQ by default */
+ outb(0x80, base + 7);
+ outb(0, base + 8);
+ outb(0, base + 9);
+ outb(0, base + 10);
+ outb(0xC0, base + 7);
+
+ err = gpiochip_irqchip_add(&ws16c48gpio->chip, &ws16c48_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ err = request_irq(irq, ws16c48_irq_handler, IRQF_SHARED, name,
+ ws16c48gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ goto err_gpiochip_remove;
+ }
+
+ return 0;
+
+err_gpiochip_remove:
+ gpiochip_remove(&ws16c48gpio->chip);
+ return err;
+}
+
+static int ws16c48_remove(struct platform_device *pdev)
+{
+ struct ws16c48_gpio *const ws16c48gpio = platform_get_drvdata(pdev);
+
+ free_irq(ws16c48gpio->irq, ws16c48gpio);
+ gpiochip_remove(&ws16c48gpio->chip);
+
+ return 0;
+}
+
+static struct platform_device *ws16c48_device;
+
+static struct platform_driver ws16c48_driver = {
+ .driver = {
+ .name = "ws16c48"
+ },
+ .remove = ws16c48_remove
+};
+
+static void __exit ws16c48_exit(void)
+{
+ platform_device_unregister(ws16c48_device);
+ platform_driver_unregister(&ws16c48_driver);
+}
+
+static int __init ws16c48_init(void)
+{
+ int err;
+
+ ws16c48_device = platform_device_alloc(ws16c48_driver.driver.name, -1);
+ if (!ws16c48_device)
+ return -ENOMEM;
+
+ err = platform_device_add(ws16c48_device);
+ if (err)
+ goto err_platform_device;
+
+ err = platform_driver_probe(&ws16c48_driver, ws16c48_probe);
+ if (err)
+ goto err_platform_driver;
+
+ return 0;
+
+err_platform_driver:
+ platform_device_del(ws16c48_device);
+err_platform_device:
+ platform_device_put(ws16c48_device);
+ return err;
+}
+
+module_init(ws16c48_init);
+module_exit(ws16c48_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("WinSystems WS16C48 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 282004deb5d4..31cbcb84cfaf 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -2,8 +2,9 @@
* AppliedMicro X-Gene SoC GPIO-Standby Driver
*
* Copyright (c) 2014, Applied Micro Circuits Corporation
- * Author: Tin Huynh <tnhuynh@apm.com>.
- * Y Vo <yvo@apm.com>.
+ * Author: Tin Huynh <tnhuynh@apm.com>.
+ * Y Vo <yvo@apm.com>.
+ * Quan Nguyen <qnguyen@apm.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -28,9 +29,14 @@
#include "gpiolib.h"
-#define XGENE_MAX_GPIO_DS 22
-#define XGENE_MAX_GPIO_DS_IRQ 6
+/* Common property names */
+#define XGENE_NIRQ_PROPERTY "apm,nr-irqs"
+#define XGENE_NGPIO_PROPERTY "apm,nr-gpios"
+#define XGENE_IRQ_START_PROPERTY "apm,irq-start"
+#define XGENE_DFLT_MAX_NGPIO 22
+#define XGENE_DFLT_MAX_NIRQ 6
+#define XGENE_DFLT_IRQ_START_PIN 8
#define GPIO_MASK(x) (1U << ((x) % 32))
#define MPA_GPIO_INT_LVL 0x0290
@@ -39,19 +45,32 @@
#define MPA_GPIO_IN_ADDR 0x02a4
#define MPA_GPIO_SEL_LO 0x0294
+#define GPIO_INT_LEVEL_H 0x000001
+#define GPIO_INT_LEVEL_L 0x000000
+
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
* @gc: memory-mapped GPIO controllers.
- * @irq: Mapping GPIO pins and interrupt number
- * nirq: Number of GPIO pins that supports interrupt
+ * @regs: GPIO register base offset
+ * @irq_domain: GPIO interrupt domain
+ * @irq_start: GPIO pin that start support interrupt
+ * @nirq: Number of GPIO pins that supports interrupt
+ * @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
struct gpio_chip gc;
- u32 *irq;
- u32 nirq;
+ void __iomem *regs;
+ struct irq_domain *irq_domain;
+ u16 irq_start;
+ u16 nirq;
+ u16 parent_irq_base;
};
-static void xgene_gpio_set_bit(struct gpio_chip *gc, void __iomem *reg, u32 gpio, int val)
+#define HWIRQ_TO_GPIO(priv, hwirq) ((hwirq) + (priv)->irq_start)
+#define GPIO_TO_HWIRQ(priv, gpio) ((gpio) - (priv)->irq_start)
+
+static void xgene_gpio_set_bit(struct gpio_chip *gc,
+ void __iomem *reg, u32 gpio, int val)
{
u32 data;
@@ -63,23 +82,170 @@ static void xgene_gpio_set_bit(struct gpio_chip *gc, void __iomem *reg, u32 gpio
gc->write_reg(reg, data);
}
-static int apm_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
+static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
+ int gpio = HWIRQ_TO_GPIO(priv, d->hwirq);
+ int lvl_type = GPIO_INT_LEVEL_H;
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ lvl_type = GPIO_INT_LEVEL_H;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
+ lvl_type = GPIO_INT_LEVEL_L;
+ break;
+ default:
+ break;
+ }
+
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 1);
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ d->hwirq, lvl_type);
+
+ /* Propagate IRQ type setting to parent */
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ return irq_chip_set_type_parent(d, IRQ_TYPE_EDGE_RISING);
+ else
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip xgene_gpio_sb_irq_chip = {
+ .name = "sbgpio",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = xgene_gpio_sb_irq_set_type,
+};
+
+static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
{
struct xgene_gpio_sb *priv = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ if ((gpio < priv->irq_start) ||
+ (gpio > HWIRQ_TO_GPIO(priv, priv->nirq)))
+ return -ENXIO;
+
+ if (gc->parent->of_node)
+ fwspec.fwnode = of_node_to_fwnode(gc->parent->of_node);
+ else
+ fwspec.fwnode = gc->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = GPIO_TO_HWIRQ(priv, gpio);
+ fwspec.param[1] = IRQ_TYPE_NONE;
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static void xgene_gpio_sb_domain_activate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+ u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+
+ if (gpiochip_lock_as_irq(&priv->gc, gpio)) {
+ dev_err(priv->gc.parent,
+ "Unable to configure XGene GPIO standby pin %d as IRQ\n",
+ gpio);
+ return;
+ }
+
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 1);
+}
+
+static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
+ struct irq_data *irq_data)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+ u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
+
+ gpiochip_unlock_as_irq(&priv->gc, gpio);
+ xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpio * 2, 0);
+}
+
+static int xgene_gpio_sb_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct xgene_gpio_sb *priv = d->host_data;
+
+ if ((fwspec->param_count != 2) ||
+ (fwspec->param[0] >= priv->nirq))
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+}
+
+static int xgene_gpio_sb_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct xgene_gpio_sb *priv = domain->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int i;
+
+ hwirq = fwspec->param[0];
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &xgene_gpio_sb_irq_chip, priv);
- if (priv->irq[gpio])
- return priv->irq[gpio];
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ if (is_of_node(parent_fwspec.fwnode)) {
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;/* SPI */
+ /* Skip SGIs and PPIs*/
+ parent_fwspec.param[1] = hwirq + priv->parent_irq_base - 32;
+ parent_fwspec.param[2] = fwspec->param[1];
+ } else if (is_fwnode_irqchip(parent_fwspec.fwnode)) {
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = hwirq + priv->parent_irq_base;
+ parent_fwspec.param[1] = fwspec->param[1];
+ } else
+ return -EINVAL;
- return -ENXIO;
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
}
+static void xgene_gpio_sb_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+ unsigned int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
+ .translate = xgene_gpio_sb_domain_translate,
+ .alloc = xgene_gpio_sb_domain_alloc,
+ .free = xgene_gpio_sb_domain_free,
+ .activate = xgene_gpio_sb_domain_activate,
+ .deactivate = xgene_gpio_sb_domain_deactivate,
+};
+
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv;
- u32 ret, i;
- u32 default_lines[] = {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D};
+ int ret;
struct resource *res;
void __iomem *regs;
+ struct irq_domain *parent_domain = NULL;
+ struct fwnode_handle *fwnode;
+ u32 val32;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -90,6 +256,18 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
+ priv->regs = regs;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0) {
+ priv->parent_irq_base = irq_get_irq_data(ret)->hwirq;
+ parent_domain = irq_get_irq_data(ret)->domain;
+ }
+ if (!parent_domain) {
+ dev_err(&pdev->dev, "unable to obtain parent domain\n");
+ return -ENODEV;
+ }
+
ret = bgpio_init(&priv->gc, &pdev->dev, 4,
regs + MPA_GPIO_IN_ADDR,
regs + MPA_GPIO_OUT_ADDR, NULL,
@@ -97,30 +275,51 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->gc.to_irq = apm_gpio_sb_to_irq;
- priv->gc.ngpio = XGENE_MAX_GPIO_DS;
+ priv->gc.to_irq = xgene_gpio_sb_to_irq;
- priv->nirq = XGENE_MAX_GPIO_DS_IRQ;
+ /* Retrieve start irq pin, use default if property not found */
+ priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
+ if (!device_property_read_u32(&pdev->dev,
+ XGENE_IRQ_START_PROPERTY, &val32))
+ priv->irq_start = val32;
- priv->irq = devm_kzalloc(&pdev->dev, sizeof(u32) * XGENE_MAX_GPIO_DS,
- GFP_KERNEL);
- if (!priv->irq)
- return -ENOMEM;
+ /* Retrieve number irqs, use default if property not found */
+ priv->nirq = XGENE_DFLT_MAX_NIRQ;
+ if (!device_property_read_u32(&pdev->dev, XGENE_NIRQ_PROPERTY, &val32))
+ priv->nirq = val32;
- for (i = 0; i < priv->nirq; i++) {
- priv->irq[default_lines[i]] = platform_get_irq(pdev, i);
- xgene_gpio_set_bit(&priv->gc, regs + MPA_GPIO_SEL_LO,
- default_lines[i] * 2, 1);
- xgene_gpio_set_bit(&priv->gc, regs + MPA_GPIO_INT_LVL, i, 1);
- }
+ /* Retrieve number gpio, use default if property not found */
+ priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ if (!device_property_read_u32(&pdev->dev, XGENE_NGPIO_PROPERTY, &val32))
+ priv->gc.ngpio = val32;
+
+ dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
+ priv->gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
- ret = gpiochip_add_data(&priv->gc, priv);
- if (ret)
- dev_err(&pdev->dev, "failed to register X-Gene GPIO Standby driver\n");
+ if (pdev->dev.of_node)
+ fwnode = of_node_to_fwnode(pdev->dev.of_node);
else
- dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
+ fwnode = pdev->dev.fwnode;
+
+ priv->irq_domain = irq_domain_create_hierarchy(parent_domain,
+ 0, priv->nirq, fwnode,
+ &xgene_gpio_sb_domain_ops, priv);
+ if (!priv->irq_domain)
+ return -ENODEV;
+
+ priv->gc.irqdomain = priv->irq_domain;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register X-Gene GPIO Standby driver\n");
+ irq_domain_remove(priv->irq_domain);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
if (priv->nirq > 0) {
/* Register interrupt handlers for gpio signaled acpi events */
@@ -138,7 +337,8 @@ static int xgene_gpio_sb_remove(struct platform_device *pdev)
acpi_gpiochip_free_interrupts(&priv->gc);
}
- gpiochip_remove(&priv->gc);
+ irq_domain_remove(priv->irq_domain);
+
return 0;
}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 592e9cdf9c53..0dc916191689 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -173,6 +173,11 @@ static int xgene_gpio_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err;
+ }
+
gpio->base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!gpio->base) {
@@ -193,7 +198,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gpio);
- err = gpiochip_add_data(&gpio->chip, gpio);
+ err = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
if (err) {
dev_err(&pdev->dev,
"failed to register gpiochip.\n");
@@ -207,14 +212,6 @@ err:
return err;
}
-static int xgene_gpio_remove(struct platform_device *pdev)
-{
- struct xgene_gpio *gpio = platform_get_drvdata(pdev);
-
- gpiochip_remove(&gpio->chip);
- return 0;
-}
-
static const struct of_device_id xgene_gpio_of_match[] = {
{ .compatible = "apm,xgene-gpio", },
{},
@@ -228,7 +225,6 @@ static struct platform_driver xgene_gpio_driver = {
.pm = XGENE_GPIO_PM_OPS,
},
.probe = xgene_gpio_probe,
- .remove = xgene_gpio_remove,
};
module_platform_driver(xgene_gpio_driver);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 540cbc88c7a2..682070d20f00 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -71,29 +71,29 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* controller uses pin controller and the mapping is not contiguous the
* offset might be different.
*/
-static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
+static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_device *gdev, int pin)
{
struct gpio_pin_range *pin_range;
/* If there are no ranges in this chip, use 1:1 mapping */
- if (list_empty(&chip->pin_ranges))
+ if (list_empty(&gdev->pin_ranges))
return pin;
- list_for_each_entry(pin_range, &chip->pin_ranges, node) {
+ list_for_each_entry(pin_range, &gdev->pin_ranges, node) {
const struct pinctrl_gpio_range *range = &pin_range->range;
int i;
if (range->pins) {
for (i = 0; i < range->npins; i++) {
if (range->pins[i] == pin)
- return range->base + i - chip->base;
+ return range->base + i - gdev->base;
}
} else {
if (pin >= range->pin_base &&
pin < range->pin_base + range->npins) {
unsigned gpio_base;
- gpio_base = range->base - chip->base;
+ gpio_base = range->base - gdev->base;
return gpio_base + pin - range->pin_base;
}
}
@@ -102,7 +102,7 @@ static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
return -EINVAL;
}
#else
-static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip,
+static inline int acpi_gpiochip_pin_to_gpio_offset(struct gpio_device *gdev,
int pin)
{
return pin;
@@ -134,7 +134,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
if (!chip)
return ERR_PTR(-EPROBE_DEFER);
- offset = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ offset = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (offset < 0)
return ERR_PTR(offset);
@@ -202,7 +202,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (!handler)
return AE_BAD_PARAMETER;
- pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (pin < 0)
return AE_BAD_PARAMETER;
@@ -673,7 +673,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_desc *desc;
bool found;
- pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
+ pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin);
if (pin < 0) {
status = AE_BAD_PARAMETER;
goto out;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 405dfcaadc4c..932e510aec50 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -180,7 +180,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
* Remove this redundant call (along with the corresponding
* unlock) when those drivers have been fixed.
*/
- ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
if (ret < 0)
goto err_put_kn;
@@ -194,7 +194,7 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
return 0;
err_unlock:
- gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
err_put_kn:
sysfs_put(data->value_kn);
@@ -212,7 +212,7 @@ static void gpio_sysfs_free_irq(struct device *dev)
data->irq_flags = 0;
free_irq(data->irq, data);
- gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(desc->gdev->chip, gpio_chip_hwgpio(desc));
sysfs_put(data->value_kn);
}
@@ -547,6 +547,7 @@ static struct class gpio_class = {
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
struct gpio_chip *chip;
+ struct gpio_device *gdev;
struct gpiod_data *data;
unsigned long flags;
int status;
@@ -565,12 +566,13 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
return -EINVAL;
}
- chip = desc->chip;
+ gdev = desc->gdev;
+ chip = gdev->chip;
mutex_lock(&sysfs_lock);
/* check if chip is being removed */
- if (!chip || !chip->cdev) {
+ if (!chip || !gdev->mockdev) {
status = -ENODEV;
goto err_unlock;
}
@@ -605,7 +607,7 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (chip->names && chip->names[offset])
ioname = chip->names[offset];
- dev = device_create_with_groups(&gpio_class, chip->parent,
+ dev = device_create_with_groups(&gpio_class, &gdev->dev,
MKDEV(0, 0), data, gpio_groups,
ioname ? ioname : "gpio%u",
desc_to_gpio(desc));
@@ -716,9 +718,11 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
-int gpiochip_sysfs_register(struct gpio_chip *chip)
+int gpiochip_sysfs_register(struct gpio_device *gdev)
{
struct device *dev;
+ struct device *parent;
+ struct gpio_chip *chip = gdev->chip;
/*
* Many systems add gpio chips for SOC support very early,
@@ -729,8 +733,17 @@ int gpiochip_sysfs_register(struct gpio_chip *chip)
if (!gpio_class.p)
return 0;
+ /*
+ * For sysfs backward compatibility we need to preserve this
+ * preferred parenting to the gpio_chip parent field, if set.
+ */
+ if (chip->parent)
+ parent = chip->parent;
+ else
+ parent = &gdev->dev;
+
/* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, chip->parent,
+ dev = device_create_with_groups(&gpio_class, parent,
MKDEV(0, 0),
chip, gpiochip_groups,
"gpiochip%d", chip->base);
@@ -738,30 +751,31 @@ int gpiochip_sysfs_register(struct gpio_chip *chip)
return PTR_ERR(dev);
mutex_lock(&sysfs_lock);
- chip->cdev = dev;
+ gdev->mockdev = dev;
mutex_unlock(&sysfs_lock);
return 0;
}
-void gpiochip_sysfs_unregister(struct gpio_chip *chip)
+void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
struct gpio_desc *desc;
+ struct gpio_chip *chip = gdev->chip;
unsigned int i;
- if (!chip->cdev)
+ if (!gdev->mockdev)
return;
- device_unregister(chip->cdev);
+ device_unregister(gdev->mockdev);
/* prevent further gpiod exports */
mutex_lock(&sysfs_lock);
- chip->cdev = NULL;
+ gdev->mockdev = NULL;
mutex_unlock(&sysfs_lock);
/* unregister gpiod class devices owned by sysfs */
for (i = 0; i < chip->ngpio; i++) {
- desc = &chip->desc[i];
+ desc = &gdev->descs[i];
if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
gpiod_free(desc);
}
@@ -771,7 +785,7 @@ static int __init gpiolib_sysfs_init(void)
{
int status;
unsigned long flags;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
status = class_register(&gpio_class);
if (status < 0)
@@ -784,8 +798,8 @@ static int __init gpiolib_sysfs_init(void)
* registered, and so arch_initcall() can always gpio_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
- if (chip->cdev)
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ if (gdev->mockdev)
continue;
/*
@@ -798,12 +812,11 @@ static int __init gpiolib_sysfs_init(void)
* gpio_lock prevents us from doing this.
*/
spin_unlock_irqrestore(&gpio_lock, flags);
- status = gpiochip_sysfs_register(chip);
+ status = gpiochip_sysfs_register(gdev);
spin_lock_irqsave(&gpio_lock, flags);
}
spin_unlock_irqrestore(&gpio_lock, flags);
-
return status;
}
postcore_initcall(gpiolib_sysfs_init);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5c1ba879f889..b747c76fd2b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -16,6 +16,11 @@
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/idr.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -42,6 +47,14 @@
#define extra_checks 0
#endif
+/* Device and char device-related information */
+static DEFINE_IDA(gpio_ida);
+static dev_t gpio_devt;
+#define GPIO_DEV_MAX 256 /* 256 GPIO chip devices supported */
+static struct bus_type gpio_bus_type = {
+ .name = "gpio",
+};
+
/* gpio_lock prevents conflicts during gpio_desc[] table updates.
* While any GPIO is requested, its gpio_chip is not removable;
* each GPIO's "requested" flag serves as a lock and refcount.
@@ -50,12 +63,12 @@ DEFINE_SPINLOCK(gpio_lock);
static DEFINE_MUTEX(gpio_lookup_lock);
static LIST_HEAD(gpio_lookup_list);
-LIST_HEAD(gpio_chips);
-
+LIST_HEAD(gpio_devices);
static void gpiochip_free_hogs(struct gpio_chip *chip);
static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static bool gpiolib_initialized;
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
@@ -67,15 +80,16 @@ static inline void desc_set_label(struct gpio_desc *d, const char *label)
*/
struct gpio_desc *gpio_to_desc(unsigned gpio)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
- if (chip->base <= gpio && chip->base + chip->ngpio > gpio) {
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ if (gdev->base <= gpio &&
+ gdev->base + gdev->ngpio > gpio) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return &chip->desc[gpio - chip->base];
+ return &gdev->descs[gpio - gdev->base];
}
}
@@ -94,10 +108,12 @@ EXPORT_SYMBOL_GPL(gpio_to_desc);
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
u16 hwnum)
{
- if (hwnum >= chip->ngpio)
+ struct gpio_device *gdev = chip->gpiodev;
+
+ if (hwnum >= gdev->ngpio)
return ERR_PTR(-EINVAL);
- return &chip->desc[hwnum];
+ return &gdev->descs[hwnum];
}
/**
@@ -107,7 +123,7 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
*/
int desc_to_gpio(const struct gpio_desc *desc)
{
- return desc->chip->base + (desc - &desc->chip->desc[0]);
+ return desc->gdev->base + (desc - &desc->gdev->descs[0]);
}
EXPORT_SYMBOL_GPL(desc_to_gpio);
@@ -118,23 +134,25 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- return desc ? desc->chip : NULL;
+ if (!desc || !desc->gdev || !desc->gdev->chip)
+ return NULL;
+ return desc->gdev->chip;
}
EXPORT_SYMBOL_GPL(gpiod_to_chip);
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
int base = ARCH_NR_GPIOS - ngpio;
- list_for_each_entry_reverse(chip, &gpio_chips, list) {
+ list_for_each_entry_reverse(gdev, &gpio_devices, list) {
/* found a free space? */
- if (chip->base + chip->ngpio <= base)
+ if (gdev->base + gdev->ngpio <= base)
break;
else
/* nope, check the space right before the chip */
- base = chip->base - ngpio;
+ base = gdev->base - ngpio;
}
if (gpio_is_valid(base)) {
@@ -187,57 +205,45 @@ EXPORT_SYMBOL_GPL(gpiod_get_direction);
* Return -EBUSY if the new chip overlaps with some other chip's integer
* space.
*/
-static int gpiochip_add_to_list(struct gpio_chip *chip)
+static int gpiodev_add_to_list(struct gpio_device *gdev)
{
- struct gpio_chip *iterator;
- struct gpio_chip *previous = NULL;
+ struct gpio_device *prev, *next;
- if (list_empty(&gpio_chips)) {
- list_add_tail(&chip->list, &gpio_chips);
+ if (list_empty(&gpio_devices)) {
+ /* initial entry in list */
+ list_add_tail(&gdev->list, &gpio_devices);
return 0;
}
- list_for_each_entry(iterator, &gpio_chips, list) {
- if (iterator->base >= chip->base + chip->ngpio) {
- /*
- * Iterator is the first GPIO chip so there is no
- * previous one
- */
- if (!previous) {
- goto found;
- } else {
- /*
- * We found a valid range(means
- * [base, base + ngpio - 1]) between previous
- * and iterator chip.
- */
- if (previous->base + previous->ngpio
- <= chip->base)
- goto found;
- }
- }
- previous = iterator;
+ next = list_entry(gpio_devices.next, struct gpio_device, list);
+ if (gdev->base + gdev->ngpio <= next->base) {
+ /* add before first entry */
+ list_add(&gdev->list, &gpio_devices);
+ return 0;
}
- /*
- * We are beyond the last chip in the list and iterator now
- * points to the head.
- * Let iterator point to the last chip in the list.
- */
-
- iterator = list_last_entry(&gpio_chips, struct gpio_chip, list);
- if (iterator->base + iterator->ngpio <= chip->base) {
- list_add(&chip->list, &iterator->list);
+ prev = list_entry(gpio_devices.prev, struct gpio_device, list);
+ if (prev->base + prev->ngpio <= gdev->base) {
+ /* add behind last entry */
+ list_add_tail(&gdev->list, &gpio_devices);
return 0;
}
- dev_err(chip->parent,
- "GPIO integer space overlap, cannot add chip\n");
- return -EBUSY;
+ list_for_each_entry_safe(prev, next, &gpio_devices, list) {
+ /* at the end of the list */
+ if (&next->list == &gpio_devices)
+ break;
-found:
- list_add_tail(&chip->list, &iterator->list);
- return 0;
+ /* add between prev and next */
+ if (prev->base + prev->ngpio <= gdev->base
+ && gdev->base + gdev->ngpio <= next->base) {
+ list_add(&gdev->list, &prev->list);
+ return 0;
+ }
+ }
+
+ dev_err(&gdev->dev, "GPIO integer space overlap, cannot add chip\n");
+ return -EBUSY;
}
/**
@@ -245,23 +251,23 @@ found:
*/
static struct gpio_desc *gpio_name_to_desc(const char * const name)
{
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
+ list_for_each_entry(gdev, &gpio_devices, list) {
int i;
- for (i = 0; i != chip->ngpio; ++i) {
- struct gpio_desc *gpio = &chip->desc[i];
+ for (i = 0; i != gdev->ngpio; ++i) {
+ struct gpio_desc *desc = &gdev->descs[i];
- if (!gpio->name || !name)
+ if (!desc->name || !name)
continue;
- if (!strcmp(gpio->name, name)) {
+ if (!strcmp(desc->name, name)) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return gpio;
+ return desc;
}
}
}
@@ -279,6 +285,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name)
*/
static int gpiochip_set_desc_names(struct gpio_chip *gc)
{
+ struct gpio_device *gdev = gc->gpiodev;
int i;
if (!gc->names)
@@ -290,19 +297,208 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
gpio = gpio_name_to_desc(gc->names[i]);
if (gpio)
- dev_warn(gc->parent, "Detected name collision for "
- "GPIO name '%s'\n",
+ dev_warn(&gdev->dev,
+ "Detected name collision for GPIO name '%s'\n",
gc->names[i]);
}
/* Then add all names to the GPIO descriptors */
for (i = 0; i != gc->ngpio; ++i)
- gc->desc[i].name = gc->names[i];
+ gdev->descs[i].name = gc->names[i];
+
+ return 0;
+}
+
+/**
+ * gpio_ioctl() - ioctl handler for the GPIO chardev
+ */
+static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct gpio_device *gdev = filp->private_data;
+ struct gpio_chip *chip = gdev->chip;
+ int __user *ip = (int __user *)arg;
+
+ /* We fail any subsequent ioctl():s when the chip is gone */
+ if (!chip)
+ return -ENODEV;
+
+ /* Fill in the struct and pass to userspace */
+ if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
+ struct gpiochip_info chipinfo;
+
+ strncpy(chipinfo.name, dev_name(&gdev->dev),
+ sizeof(chipinfo.name));
+ chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
+ strncpy(chipinfo.label, gdev->label,
+ sizeof(chipinfo.label));
+ chipinfo.label[sizeof(chipinfo.label)-1] = '\0';
+ chipinfo.lines = gdev->ngpio;
+ if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+ return -EFAULT;
+ return 0;
+ } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
+ struct gpioline_info lineinfo;
+ struct gpio_desc *desc;
+
+ if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+ return -EFAULT;
+ if (lineinfo.line_offset > gdev->ngpio)
+ return -EINVAL;
+
+ desc = &gdev->descs[lineinfo.line_offset];
+ if (desc->name) {
+ strncpy(lineinfo.name, desc->name,
+ sizeof(lineinfo.name));
+ lineinfo.name[sizeof(lineinfo.name)-1] = '\0';
+ } else {
+ lineinfo.name[0] = '\0';
+ }
+ if (desc->label) {
+ strncpy(lineinfo.consumer, desc->label,
+ sizeof(lineinfo.consumer));
+ lineinfo.consumer[sizeof(lineinfo.consumer)-1] = '\0';
+ } else {
+ lineinfo.consumer[0] = '\0';
+ }
+
+ /*
+ * Userspace only need to know that the kernel is using
+ * this GPIO so it can't use it.
+ */
+ lineinfo.flags = 0;
+ if (test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_IS_HOGGED, &desc->flags) ||
+ test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags) ||
+ test_bit(FLAG_SYSFS, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_KERNEL;
+ if (test_bit(FLAG_IS_OUT, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_IS_OUT;
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/**
+ * gpio_chrdev_open() - open the chardev for ioctl operations
+ * @inode: inode for this chardev
+ * @filp: file struct for storing private data
+ * Returns 0 on success
+ */
+static int gpio_chrdev_open(struct inode *inode, struct file *filp)
+{
+ struct gpio_device *gdev = container_of(inode->i_cdev,
+ struct gpio_device, chrdev);
+ /* Fail on open if the backing gpiochip is gone */
+ if (!gdev || !gdev->chip)
+ return -ENODEV;
+ get_device(&gdev->dev);
+ filp->private_data = gdev;
return 0;
}
/**
+ * gpio_chrdev_release() - close chardev after ioctl operations
+ * @inode: inode for this chardev
+ * @filp: file struct for storing private data
+ * Returns 0 on success
+ */
+static int gpio_chrdev_release(struct inode *inode, struct file *filp)
+{
+ struct gpio_device *gdev = container_of(inode->i_cdev,
+ struct gpio_device, chrdev);
+
+ if (!gdev)
+ return -ENODEV;
+ put_device(&gdev->dev);
+ return 0;
+}
+
+
+static const struct file_operations gpio_fileops = {
+ .release = gpio_chrdev_release,
+ .open = gpio_chrdev_open,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = gpio_ioctl,
+ .compat_ioctl = gpio_ioctl,
+};
+
+static void gpiodevice_release(struct device *dev)
+{
+ struct gpio_device *gdev = dev_get_drvdata(dev);
+
+ cdev_del(&gdev->chrdev);
+ list_del(&gdev->list);
+ ida_simple_remove(&gpio_ida, gdev->id);
+ kfree(gdev->label);
+ kfree(gdev->descs);
+ kfree(gdev);
+}
+
+static int gpiochip_setup_dev(struct gpio_device *gdev)
+{
+ int status;
+
+ cdev_init(&gdev->chrdev, &gpio_fileops);
+ gdev->chrdev.owner = THIS_MODULE;
+ gdev->chrdev.kobj.parent = &gdev->dev.kobj;
+ gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id);
+ status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1);
+ if (status < 0)
+ chip_warn(gdev->chip, "failed to add char device %d:%d\n",
+ MAJOR(gpio_devt), gdev->id);
+ else
+ chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
+ MAJOR(gpio_devt), gdev->id);
+ status = device_add(&gdev->dev);
+ if (status)
+ goto err_remove_chardev;
+
+ status = gpiochip_sysfs_register(gdev);
+ if (status)
+ goto err_remove_device;
+
+ /* From this point, the .release() function cleans up gpio_device */
+ gdev->dev.release = gpiodevice_release;
+ get_device(&gdev->dev);
+ pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
+ __func__, gdev->base, gdev->base + gdev->ngpio - 1,
+ dev_name(&gdev->dev), gdev->chip->label ? : "generic");
+
+ return 0;
+
+err_remove_device:
+ device_del(&gdev->dev);
+err_remove_chardev:
+ cdev_del(&gdev->chrdev);
+ return status;
+}
+
+static void gpiochip_setup_devs(void)
+{
+ struct gpio_device *gdev;
+ int err;
+
+ list_for_each_entry(gdev, &gpio_devices, list) {
+ err = gpiochip_setup_dev(gdev);
+ if (err)
+ pr_err("%s: Failed to initialize gpio device (%d)\n",
+ dev_name(&gdev->dev), err);
+ }
+}
+
+/**
* gpiochip_add_data() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
* Context: potentially before irqs will work
@@ -316,6 +512,9 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
+ * gpiochip_add_data() must only be called after gpiolib initialization,
+ * ie after core_initcall().
+ *
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*/
@@ -323,43 +522,106 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
{
unsigned long flags;
int status = 0;
- unsigned id;
+ unsigned i;
int base = chip->base;
- struct gpio_desc *descs;
+ struct gpio_device *gdev;
- descs = kcalloc(chip->ngpio, sizeof(descs[0]), GFP_KERNEL);
- if (!descs)
+ /*
+ * First: allocate and populate the internal stat container, and
+ * set up the struct device.
+ */
+ gdev = kzalloc(sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
return -ENOMEM;
+ gdev->dev.bus = &gpio_bus_type;
+ gdev->chip = chip;
+ chip->gpiodev = gdev;
+ if (chip->parent) {
+ gdev->dev.parent = chip->parent;
+ gdev->dev.of_node = chip->parent->of_node;
+ } else {
+#ifdef CONFIG_OF_GPIO
+ /* If the gpiochip has an assigned OF node this takes precedence */
+ if (chip->of_node)
+ gdev->dev.of_node = chip->of_node;
+#endif
+ }
+ gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL);
+ if (gdev->id < 0) {
+ status = gdev->id;
+ goto err_free_gdev;
+ }
+ dev_set_name(&gdev->dev, "gpiochip%d", gdev->id);
+ device_initialize(&gdev->dev);
+ dev_set_drvdata(&gdev->dev, gdev);
+ if (chip->parent && chip->parent->driver)
+ gdev->owner = chip->parent->driver->owner;
+ else if (chip->owner)
+ /* TODO: remove chip->owner */
+ gdev->owner = chip->owner;
+ else
+ gdev->owner = THIS_MODULE;
- chip->data = data;
+ gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
+ if (!gdev->descs) {
+ status = -ENOMEM;
+ goto err_free_gdev;
+ }
if (chip->ngpio == 0) {
chip_err(chip, "tried to insert a GPIO chip with zero lines\n");
- return -EINVAL;
+ status = -EINVAL;
+ goto err_free_descs;
+ }
+
+ if (chip->label)
+ gdev->label = kstrdup(chip->label, GFP_KERNEL);
+ else
+ gdev->label = kstrdup("unknown", GFP_KERNEL);
+ if (!gdev->label) {
+ status = -ENOMEM;
+ goto err_free_descs;
}
+ gdev->ngpio = chip->ngpio;
+ gdev->data = data;
+
spin_lock_irqsave(&gpio_lock, flags);
+ /*
+ * TODO: this allocates a Linux GPIO number base in the global
+ * GPIO numberspace for this chip. In the long run we want to
+ * get *rid* of this numberspace and use only descriptors, but
+ * it may be a pipe dream. It will not happen before we get rid
+ * of the sysfs interface anyways.
+ */
if (base < 0) {
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_descs;
+ goto err_free_label;
}
+ /*
+ * TODO: it should not be necessary to reflect the assigned
+ * base outside of the GPIO subsystem. Go over drivers and
+ * see if anyone makes use of this, else drop this and assign
+ * a poison instead.
+ */
chip->base = base;
}
+ gdev->base = base;
- status = gpiochip_add_to_list(chip);
+ status = gpiodev_add_to_list(gdev);
if (status) {
spin_unlock_irqrestore(&gpio_lock, flags);
- goto err_free_descs;
+ goto err_free_label;
}
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
+ for (i = 0; i < chip->ngpio; i++) {
+ struct gpio_desc *desc = &gdev->descs[i];
- desc->chip = chip;
+ desc->gdev = gdev;
/* REVISIT: most hardware initializes GPIOs as inputs (often
* with pullups enabled) so power usage is minimized. Linux
@@ -370,17 +632,12 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
- chip->desc = descs;
-
spin_unlock_irqrestore(&gpio_lock, flags);
#ifdef CONFIG_PINCTRL
- INIT_LIST_HEAD(&chip->pin_ranges);
+ INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
- if (!chip->owner && chip->parent && chip->parent->driver)
- chip->owner = chip->parent->driver->owner;
-
status = gpiochip_set_desc_names(chip);
if (status)
goto err_remove_from_list;
@@ -391,14 +648,19 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
acpi_gpiochip_add(chip);
- status = gpiochip_sysfs_register(chip);
- if (status)
- goto err_remove_chip;
-
- pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
- chip->base, chip->base + chip->ngpio - 1,
- chip->label ? : "generic");
-
+ /*
+ * By first adding the chardev, and then adding the device,
+ * we get a device node entry in sysfs under
+ * /sys/bus/gpio/devices/gpiochipN/dev that can be used for
+ * coldplug of device nodes and other udev business.
+ * We can do this only if gpiolib has been initialized.
+ * Otherwise, defer until later.
+ */
+ if (gpiolib_initialized) {
+ status = gpiochip_setup_dev(gdev);
+ if (status)
+ goto err_remove_chip;
+ }
return 0;
err_remove_chip:
@@ -407,21 +669,33 @@ err_remove_chip:
of_gpiochip_remove(chip);
err_remove_from_list:
spin_lock_irqsave(&gpio_lock, flags);
- list_del(&chip->list);
+ list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- chip->desc = NULL;
+err_free_label:
+ kfree(gdev->label);
err_free_descs:
- kfree(descs);
-
+ kfree(gdev->descs);
+err_free_gdev:
+ ida_simple_remove(&gpio_ida, gdev->id);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
- chip->base, chip->base + chip->ngpio - 1,
- chip->label ? : "generic");
+ gdev->base, gdev->base + gdev->ngpio - 1,
+ chip->label ? : "generic");
+ kfree(gdev);
return status;
}
EXPORT_SYMBOL_GPL(gpiochip_add_data);
/**
+ * gpiochip_get_data() - get per-subdriver data for the chip
+ */
+void *gpiochip_get_data(struct gpio_chip *chip)
+{
+ return chip->gpiodev->data;
+}
+EXPORT_SYMBOL_GPL(gpiochip_get_data);
+
+/**
* gpiochip_remove() - unregister a gpio_chip
* @chip: the chip to unregister
*
@@ -429,39 +703,123 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data);
*/
void gpiochip_remove(struct gpio_chip *chip)
{
+ struct gpio_device *gdev = chip->gpiodev;
struct gpio_desc *desc;
unsigned long flags;
- unsigned id;
+ unsigned i;
bool requested = false;
- gpiochip_sysfs_unregister(chip);
-
+ /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
+ gpiochip_sysfs_unregister(gdev);
+ /* Numb the device, cancelling all outstanding operations */
+ gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
-
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
+ /*
+ * We accept no more calls into the driver from this point, so
+ * NULL the driver data pointer
+ */
+ gdev->data = NULL;
spin_lock_irqsave(&gpio_lock, flags);
- for (id = 0; id < chip->ngpio; id++) {
- desc = &chip->desc[id];
- desc->chip = NULL;
+ for (i = 0; i < gdev->ngpio; i++) {
+ desc = &gdev->descs[i];
if (test_bit(FLAG_REQUESTED, &desc->flags))
requested = true;
}
- list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
if (requested)
- dev_crit(chip->parent,
+ dev_crit(&gdev->dev,
"REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
- kfree(chip->desc);
- chip->desc = NULL;
+ /*
+ * The gpiochip side puts its use of the device to rest here:
+ * if there are no userspace clients, the chardev and device will
+ * be removed, else it will be dangling until the last user is
+ * gone.
+ */
+ put_device(&gdev->dev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
+static void devm_gpio_chip_release(struct device *dev, void *res)
+{
+ struct gpio_chip *chip = *(struct gpio_chip **)res;
+
+ gpiochip_remove(chip);
+}
+
+static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
+
+{
+ struct gpio_chip **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+/**
+ * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * @dev: the device pointer on which irq_chip belongs to.
+ * @chip: the chip to register, with chip->base initialized
+ * Context: potentially before irqs will work
+ *
+ * Returns a negative errno if the chip can't be registered, such as
+ * because the chip->base is invalid or already associated with a
+ * different chip. Otherwise it returns zero as a success code.
+ *
+ * The gpio chip automatically be released when the device is unbound.
+ */
+int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip,
+ void *data)
+{
+ struct gpio_chip **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = gpiochip_add_data(chip, data);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = chip;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_add_data);
+
+/**
+ * devm_gpiochip_remove() - Resource manager of gpiochip_remove()
+ * @dev: device for which which resource was allocated
+ * @chip: the chip to remove
+ *
+ * A gpio_chip with any GPIOs still requested may not be removed.
+ */
+void devm_gpiochip_remove(struct device *dev, struct gpio_chip *chip)
+{
+ int ret;
+
+ ret = devres_release(dev, devm_gpio_chip_release,
+ devm_gpio_chip_match, chip);
+ if (!ret)
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_gpiochip_remove);
+
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
@@ -477,17 +835,21 @@ struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip,
void *data))
{
+ struct gpio_device *gdev;
struct gpio_chip *chip;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list)
- if (match(chip, data))
+ list_for_each_entry(gdev, &gpio_devices, list)
+ if (match(gdev->chip, data))
break;
/* No match? */
- if (&chip->list == &gpio_chips)
+ if (&gdev->list == &gpio_devices)
chip = NULL;
+ else
+ chip = gdev->chip;
+
spin_unlock_irqrestore(&gpio_lock, flags);
return chip;
@@ -617,14 +979,14 @@ static int gpiochip_irq_reqres(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
- if (!try_module_get(chip->owner))
+ if (!try_module_get(chip->gpiodev->owner))
return -ENODEV;
if (gpiochip_lock_as_irq(chip, d->hwirq)) {
chip_err(chip,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
- module_put(chip->owner);
+ module_put(chip->gpiodev->owner);
return -EINVAL;
}
return 0;
@@ -635,7 +997,7 @@ static void gpiochip_irq_relres(struct irq_data *d)
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
gpiochip_unlock_as_irq(chip, d->hwirq);
- module_put(chip->owner);
+ module_put(chip->gpiodev->owner);
}
static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -785,7 +1147,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
*/
int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_request_gpio(chip->base + offset);
+ return pinctrl_request_gpio(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_request);
@@ -796,7 +1158,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
*/
void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset)
{
- pinctrl_free_gpio(chip->base + offset);
+ pinctrl_free_gpio(chip->gpiodev->base + offset);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_free);
@@ -814,6 +1176,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
unsigned int gpio_offset, const char *pin_group)
{
struct gpio_pin_range *pin_range;
+ struct gpio_device *gdev = chip->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
@@ -826,7 +1189,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
pin_range->range.id = gpio_offset;
pin_range->range.gc = chip;
pin_range->range.name = chip->label;
- pin_range->range.base = chip->base + gpio_offset;
+ pin_range->range.base = gdev->base + gpio_offset;
pin_range->pctldev = pctldev;
ret = pinctrl_get_group_pins(pctldev, pin_group,
@@ -843,7 +1206,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip,
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
- list_add_tail(&pin_range->node, &chip->pin_ranges);
+ list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
@@ -863,6 +1226,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
unsigned int npins)
{
struct gpio_pin_range *pin_range;
+ struct gpio_device *gdev = chip->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
@@ -875,7 +1239,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
pin_range->range.id = gpio_offset;
pin_range->range.gc = chip;
pin_range->range.name = chip->label;
- pin_range->range.base = chip->base + gpio_offset;
+ pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
@@ -891,7 +1255,7 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
pinctl_name,
pin_offset, pin_offset + npins - 1);
- list_add_tail(&pin_range->node, &chip->pin_ranges);
+ list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
@@ -904,8 +1268,9 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
void gpiochip_remove_pin_ranges(struct gpio_chip *chip)
{
struct gpio_pin_range *pin_range, *tmp;
+ struct gpio_device *gdev = chip->gpiodev;
- list_for_each_entry_safe(pin_range, tmp, &chip->pin_ranges, node) {
+ list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) {
list_del(&pin_range->node);
pinctrl_remove_gpio_range(pin_range->pctldev,
&pin_range->range);
@@ -922,7 +1287,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges);
*/
static int __gpiod_request(struct gpio_desc *desc, const char *label)
{
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int status;
unsigned long flags;
@@ -971,27 +1336,50 @@ done:
return status;
}
+/*
+ * This descriptor validation needs to be inserted verbatim into each
+ * function taking a descriptor, so we need to use a preprocessor
+ * macro to avoid endless duplication.
+ */
+#define VALIDATE_DESC(desc) do { \
+ if (!desc || !desc->gdev) { \
+ pr_warn("%s: invalid GPIO\n", __func__); \
+ return -EINVAL; \
+ } \
+ if ( !desc->gdev->chip ) { \
+ dev_warn(&desc->gdev->dev, \
+ "%s: backing chip is gone\n", __func__); \
+ return 0; \
+ } } while (0)
+
+#define VALIDATE_DESC_VOID(desc) do { \
+ if (!desc || !desc->gdev) { \
+ pr_warn("%s: invalid GPIO\n", __func__); \
+ return; \
+ } \
+ if (!desc->gdev->chip) { \
+ dev_warn(&desc->gdev->dev, \
+ "%s: backing chip is gone\n", __func__); \
+ return; \
+ } } while (0)
+
+
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int status = -EPROBE_DEFER;
- struct gpio_chip *chip;
-
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ struct gpio_device *gdev;
- chip = desc->chip;
- if (!chip)
- goto done;
+ VALIDATE_DESC(desc);
+ gdev = desc->gdev;
- if (try_module_get(chip->owner)) {
+ if (try_module_get(gdev->owner)) {
status = __gpiod_request(desc, label);
if (status < 0)
- module_put(chip->owner);
+ module_put(gdev->owner);
+ else
+ get_device(&gdev->dev);
}
-done:
if (status)
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -1010,7 +1398,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
spin_lock_irqsave(&gpio_lock, flags);
- chip = desc->chip;
+ chip = desc->gdev->chip;
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -1033,10 +1421,12 @@ static bool __gpiod_free(struct gpio_desc *desc)
void gpiod_free(struct gpio_desc *desc)
{
- if (desc && __gpiod_free(desc))
- module_put(desc->chip->owner);
- else
+ if (desc && desc->gdev && __gpiod_free(desc)) {
+ module_put(desc->gdev->owner);
+ put_device(&desc->gdev->dev);
+ } else {
WARN_ON(extra_checks);
+ }
}
/**
@@ -1059,7 +1449,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
if (offset >= chip->ngpio)
return NULL;
- desc = &chip->desc[offset];
+ desc = &chip->gpiodev->descs[offset];
if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
@@ -1111,7 +1501,8 @@ void gpiochip_free_own_desc(struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
-/* Drivers MUST set GPIO direction before making get/set calls. In
+/*
+ * Drivers MUST set GPIO direction before making get/set calls. In
* some cases this is done in early boot, before IRQs are enabled.
*
* As a rule these aren't called more than once (except for drivers
@@ -1134,12 +1525,9 @@ int gpiod_direction_input(struct gpio_desc *desc)
struct gpio_chip *chip;
int status = -EINVAL;
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
- chip = desc->chip;
if (!chip->get || !chip->direction_input) {
gpiod_warn(desc,
"%s: missing get() or direction_input() operations\n",
@@ -1178,7 +1566,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
return gpiod_direction_input(desc);
- chip = desc->chip;
+ chip = desc->gdev->chip;
if (!chip->set || !chip->direction_output) {
gpiod_warn(desc,
"%s: missing set() or direction_output() operations\n",
@@ -1207,10 +1595,7 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
*/
int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
return _gpiod_direction_output_raw(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
@@ -1229,10 +1614,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
+ VALIDATE_DESC(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return _gpiod_direction_output_raw(desc, value);
@@ -1251,12 +1633,8 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
struct gpio_chip *chip;
- if (!desc || !desc->chip) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
- chip = desc->chip;
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
if (!chip->set || !chip->set_debounce) {
gpiod_dbg(desc,
"%s: missing set() or set_debounce() operations\n",
@@ -1276,6 +1654,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_is_active_low(const struct gpio_desc *desc)
{
+ VALIDATE_DESC(desc);
return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -1308,7 +1687,7 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
int offset;
int value;
- chip = desc->chip;
+ chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : -EIO;
value = value < 0 ? value : !!value;
@@ -1328,10 +1707,9 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
*/
int gpiod_get_raw_value(const struct gpio_desc *desc)
{
- if (!desc)
- return 0;
+ VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ WARN_ON(desc->gdev->chip->can_sleep);
return _gpiod_get_raw_value(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
@@ -1349,10 +1727,10 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
int gpiod_get_value(const struct gpio_desc *desc)
{
int value;
- if (!desc)
- return 0;
+
+ VALIDATE_DESC(desc);
/* Should be using gpio_get_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ WARN_ON(desc->gdev->chip->can_sleep);
value = _gpiod_get_raw_value(desc);
if (value < 0)
@@ -1373,7 +1751,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value);
static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
{
int err = 0;
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
@@ -1400,7 +1778,7 @@ static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value)
static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value)
{
int err = 0;
- struct gpio_chip *chip = desc->chip;
+ struct gpio_chip *chip = desc->gdev->chip;
int offset = gpio_chip_hwgpio(desc);
if (value) {
@@ -1423,7 +1801,7 @@ static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value)
{
struct gpio_chip *chip;
- chip = desc->chip;
+ chip = desc->gdev->chip;
trace_gpio_value(desc_to_gpio(desc), 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
_gpio_set_open_drain_value(desc, value);
@@ -1471,7 +1849,7 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
int i = 0;
while (i < array_size) {
- struct gpio_chip *chip = desc_array[i]->chip;
+ struct gpio_chip *chip = desc_array[i]->gdev->chip;
unsigned long mask[BITS_TO_LONGS(chip->ngpio)];
unsigned long bits[BITS_TO_LONGS(chip->ngpio)];
int count = 0;
@@ -1505,7 +1883,8 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
count++;
}
i++;
- } while ((i < array_size) && (desc_array[i]->chip == chip));
+ } while ((i < array_size) &&
+ (desc_array[i]->gdev->chip == chip));
/* push collected bits to outputs */
if (count != 0)
gpio_chip_set_multiple(chip, mask, bits);
@@ -1525,10 +1904,9 @@ static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
*/
void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
- if (!desc)
- return;
- /* Should be using gpio_set_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
_gpiod_set_raw_value(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
@@ -1546,10 +1924,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
*/
void gpiod_set_value(struct gpio_desc *desc, int value)
{
- if (!desc)
- return;
- /* Should be using gpio_set_value_cansleep() */
- WARN_ON(desc->chip->can_sleep);
+ VALIDATE_DESC_VOID(desc);
+ /* Should be using gpiod_set_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
_gpiod_set_raw_value(desc, value);
@@ -1607,9 +1984,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value);
*/
int gpiod_cansleep(const struct gpio_desc *desc)
{
- if (!desc)
- return 0;
- return desc->chip->can_sleep;
+ VALIDATE_DESC(desc);
+ return desc->gdev->chip->can_sleep;
}
EXPORT_SYMBOL_GPL(gpiod_cansleep);
@@ -1625,9 +2001,8 @@ int gpiod_to_irq(const struct gpio_desc *desc)
struct gpio_chip *chip;
int offset;
- if (!desc)
- return -EINVAL;
- chip = desc->chip;
+ VALIDATE_DESC(desc);
+ chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
}
@@ -1646,14 +2021,14 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return -EINVAL;
- if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) {
+ if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -1671,10 +2046,37 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return;
- clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+ clear_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
+bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
+
+bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
+
+bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
+
/**
* gpiod_get_raw_value_cansleep() - return a gpio's raw value
* @desc: gpio whose value will be returned
@@ -1687,8 +2089,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
might_sleep_if(extra_checks);
- if (!desc)
- return 0;
+ VALIDATE_DESC(desc);
return _gpiod_get_raw_value(desc);
}
EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
@@ -1707,9 +2108,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
int value;
might_sleep_if(extra_checks);
- if (!desc)
- return 0;
-
+ VALIDATE_DESC(desc);
value = _gpiod_get_raw_value(desc);
if (value < 0)
return value;
@@ -1734,8 +2133,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_value_cansleep);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
- if (!desc)
- return;
+ VALIDATE_DESC_VOID(desc);
_gpiod_set_raw_value(desc, value);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
@@ -1753,9 +2151,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value_cansleep);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
might_sleep_if(extra_checks);
- if (!desc)
- return;
-
+ VALIDATE_DESC_VOID(desc);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
_gpiod_set_raw_value(desc, value);
@@ -1873,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
return desc;
}
-static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
+static struct gpio_desc *acpi_find_gpio(struct device *dev,
+ const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ enum gpiod_flags flags,
+ enum gpio_lookup_flags *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -1906,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id,
desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
if (IS_ERR(desc))
return desc;
+
+ if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
+ info.gpioint) {
+ dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
+ }
}
if (info.polarity == GPIO_ACTIVE_LOW)
- *flags |= GPIO_ACTIVE_LOW;
+ *lookupflags |= GPIO_ACTIVE_LOW;
return desc;
}
@@ -2172,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
desc = of_find_gpio(dev, con_id, idx, &lookupflags);
} else if (ACPI_COMPANION(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
+ desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
}
}
@@ -2358,8 +2762,8 @@ static void gpiochip_free_hogs(struct gpio_chip *chip)
int id;
for (id = 0; id < chip->ngpio; id++) {
- if (test_bit(FLAG_IS_HOGGED, &chip->desc[id].flags))
- gpiochip_free_own_desc(&chip->desc[id]);
+ if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags))
+ gpiochip_free_own_desc(&chip->gpiodev->descs[id]);
}
}
@@ -2456,17 +2860,41 @@ void gpiod_put_array(struct gpio_descs *descs)
}
EXPORT_SYMBOL_GPL(gpiod_put_array);
+static int __init gpiolib_dev_init(void)
+{
+ int ret;
+
+ /* Register GPIO sysfs bus */
+ ret = bus_register(&gpio_bus_type);
+ if (ret < 0) {
+ pr_err("gpiolib: could not register GPIO bus type\n");
+ return ret;
+ }
+
+ ret = alloc_chrdev_region(&gpio_devt, 0, GPIO_DEV_MAX, "gpiochip");
+ if (ret < 0) {
+ pr_err("gpiolib: failed to allocate char dev region\n");
+ bus_unregister(&gpio_bus_type);
+ } else {
+ gpiolib_initialized = true;
+ gpiochip_setup_devs();
+ }
+ return ret;
+}
+core_initcall(gpiolib_dev_init);
+
#ifdef CONFIG_DEBUG_FS
-static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
unsigned i;
- unsigned gpio = chip->base;
- struct gpio_desc *gdesc = &chip->desc[0];
+ struct gpio_chip *chip = gdev->chip;
+ unsigned gpio = gdev->base;
+ struct gpio_desc *gdesc = &gdev->descs[0];
int is_out;
int is_irq;
- for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
+ for (i = 0; i < gdev->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) {
if (gdesc->name) {
seq_printf(s, " gpio-%-3d (%-20.20s)\n",
@@ -2492,16 +2920,16 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
{
unsigned long flags;
- struct gpio_chip *chip = NULL;
+ struct gpio_device *gdev = NULL;
loff_t index = *pos;
s->private = "";
spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list)
+ list_for_each_entry(gdev, &gpio_devices, list)
if (index-- == 0) {
spin_unlock_irqrestore(&gpio_lock, flags);
- return chip;
+ return gdev;
}
spin_unlock_irqrestore(&gpio_lock, flags);
@@ -2511,14 +2939,14 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
unsigned long flags;
- struct gpio_chip *chip = v;
+ struct gpio_device *gdev = v;
void *ret = NULL;
spin_lock_irqsave(&gpio_lock, flags);
- if (list_is_last(&chip->list, &gpio_chips))
+ if (list_is_last(&gdev->list, &gpio_devices))
ret = NULL;
else
- ret = list_entry(chip->list.next, struct gpio_chip, list);
+ ret = list_entry(gdev->list.next, struct gpio_device, list);
spin_unlock_irqrestore(&gpio_lock, flags);
s->private = "\n";
@@ -2533,15 +2961,24 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v)
static int gpiolib_seq_show(struct seq_file *s, void *v)
{
- struct gpio_chip *chip = v;
- struct device *dev;
+ struct gpio_device *gdev = v;
+ struct gpio_chip *chip = gdev->chip;
+ struct device *parent;
+
+ if (!chip) {
+ seq_printf(s, "%s%s: (dangling chip)", (char *)s->private,
+ dev_name(&gdev->dev));
+ return 0;
+ }
- seq_printf(s, "%sGPIOs %d-%d", (char *)s->private,
- chip->base, chip->base + chip->ngpio - 1);
- dev = chip->parent;
- if (dev)
- seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus",
- dev_name(dev));
+ seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private,
+ dev_name(&gdev->dev),
+ gdev->base, gdev->base + gdev->ngpio - 1);
+ parent = chip->parent;
+ if (parent)
+ seq_printf(s, ", parent: %s/%s",
+ parent->bus ? parent->bus->name : "no-bus",
+ dev_name(parent));
if (chip->label)
seq_printf(s, ", %s", chip->label);
if (chip->can_sleep)
@@ -2551,7 +2988,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
if (chip->dbg_show)
chip->dbg_show(s, chip);
else
- gpiolib_dbg_show(s, chip);
+ gpiolib_dbg_show(s, gdev);
return 0;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 99ed3b00ffe9..e30e5fdb1214 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -12,14 +12,67 @@
#ifndef GPIOLIB_H
#define GPIOLIB_H
+#include <linux/gpio/driver.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
enum of_gpio_flags;
enum gpiod_flags;
struct acpi_device;
/**
+ * struct gpio_device - internal state container for GPIO devices
+ * @id: numerical ID number for the GPIO chip
+ * @dev: the GPIO device struct
+ * @chrdev: character device for the GPIO device
+ * @mockdev: class device used by the deprecated sysfs interface (may be
+ * NULL)
+ * @owner: helps prevent removal of modules exporting active GPIOs
+ * @chip: pointer to the corresponding gpiochip, holding static
+ * data for this device
+ * @descs: array of ngpio descriptors.
+ * @ngpio: the number of GPIO lines on this GPIO device, equal to the size
+ * of the @descs array.
+ * @base: GPIO base in the DEPRECATED global Linux GPIO numberspace, assigned
+ * at device creation time.
+ * @label: a descriptive name for the GPIO device, such as the part number
+ * or name of the IP component in a System on Chip.
+ * @data: per-instance data assigned by the driver
+ * @list: links gpio_device:s together for traversal
+ *
+ * This state container holds most of the runtime variable data
+ * for a GPIO device and can hold references and live on after the
+ * GPIO chip has been removed, if it is still being used from
+ * userspace.
+ */
+struct gpio_device {
+ int id;
+ struct device dev;
+ struct cdev chrdev;
+ struct device *mockdev;
+ struct module *owner;
+ struct gpio_chip *chip;
+ struct gpio_desc *descs;
+ int base;
+ u16 ngpio;
+ char *label;
+ void *data;
+ struct list_head list;
+
+#ifdef CONFIG_PINCTRL
+ /*
+ * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
+ * describe the actual pin range which they serve in an SoC. This
+ * information would be used by pinctrl subsystem to configure
+ * corresponding pins for gpio usage.
+ */
+ struct list_head pin_ranges;
+#endif
+};
+
+/**
* struct acpi_gpio_info - ACPI GPIO specific information
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @active_low: in case of @gpioint, the pin is active low
@@ -90,10 +143,10 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
extern struct spinlock gpio_lock;
-extern struct list_head gpio_chips;
+extern struct list_head gpio_devices;
struct gpio_desc {
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
@@ -122,7 +175,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
*/
static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
{
- return desc - &desc->chip->desc[0];
+ return desc - &desc->gdev->descs[0];
}
/* With descriptor prefix */
@@ -149,31 +202,31 @@ static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
/* With chip prefix */
#define chip_emerg(chip, fmt, ...) \
- pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_emerg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_crit(chip, fmt, ...) \
- pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_crit(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_err(chip, fmt, ...) \
- pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_err(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_warn(chip, fmt, ...) \
- pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_warn(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_info(chip, fmt, ...) \
- pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_info(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#define chip_dbg(chip, fmt, ...) \
- pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+ dev_dbg(&chip->gpiodev->dev, "(%s): " fmt, chip->label, ##__VA_ARGS__)
#ifdef CONFIG_GPIO_SYSFS
-int gpiochip_sysfs_register(struct gpio_chip *chip);
-void gpiochip_sysfs_unregister(struct gpio_chip *chip);
+int gpiochip_sysfs_register(struct gpio_device *gdev);
+void gpiochip_sysfs_unregister(struct gpio_device *gdev);
#else
-static inline int gpiochip_sysfs_register(struct gpio_chip *chip)
+static inline int gpiochip_sysfs_register(struct gpio_device *gdev)
{
return 0;
}
-static inline void gpiochip_sysfs_unregister(struct gpio_chip *chip)
+static inline void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
}
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 0f734ee05274..ca77ec10147c 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,10 +1,14 @@
-menu "ACP Configuration"
+menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
- bool "Enable ACP IP support"
+ bool "Enable AMD Audio CoProcessor IP support"
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
Choose this option to enable ACP IP support for AMD SOCs.
+ This adds the ACP (Audio CoProcessor) IP driver and wires
+ it up into the amdgpu driver. The ACP block provides the DMA
+ engine for the i2s-based ALSA driver. It is required for audio
+ on APUs which utilize an i2s codec.
endmenu
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0489722fc7e..b77489dec6e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -141,7 +141,6 @@ extern unsigned amdgpu_pcie_lane_cap;
#define CIK_CURSOR_HEIGHT 128
struct amdgpu_device;
-struct amdgpu_fence;
struct amdgpu_ib;
struct amdgpu_vm;
struct amdgpu_ring;
@@ -287,9 +286,11 @@ struct amdgpu_ring_funcs {
struct amdgpu_ib *ib);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
+ void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
+ void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
@@ -346,13 +347,15 @@ struct amdgpu_fence_driver {
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
- uint64_t sync_seq;
- atomic64_t last_seq;
+ uint32_t sync_seq;
+ atomic_t last_seq;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
struct timer_list fallback_timer;
- wait_queue_head_t fence_queue;
+ unsigned num_fences_mask;
+ spinlock_t lock;
+ struct fence **fences;
};
/* some special values for the owner field */
@@ -362,19 +365,6 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-struct amdgpu_fence {
- struct fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
- uint64_t seq;
-
- /* filp or special value for fence creator */
- void *owner;
-
- wait_queue_t fence_wake;
-};
-
struct amdgpu_user_fence {
/* write-back bo */
struct amdgpu_bo *bo;
@@ -386,16 +376,15 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
- struct amdgpu_fence **fence);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
void amdgpu_fence_process(struct amdgpu_ring *ring);
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
@@ -434,6 +423,8 @@ struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
};
struct amdgpu_bo_va_mapping {
@@ -445,7 +436,6 @@ struct amdgpu_bo_va_mapping {
/* bo virtual addresses in a specific vm */
struct amdgpu_bo_va {
- struct mutex mutex;
/* protected by bo being reserved */
struct list_head bo_list;
struct fence *last_pt_update;
@@ -540,11 +530,14 @@ int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
* Assumption is that there won't be hole (all object on same
* alignment).
*/
+
+#define AMDGPU_SA_NUM_FENCE_LISTS 32
+
struct amdgpu_sa_manager {
wait_queue_head_t wq;
struct amdgpu_bo *bo;
struct list_head *hole;
- struct list_head flist[AMDGPU_MAX_RINGS];
+ struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
@@ -596,6 +589,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
+int amdgpu_sync_init(void);
+void amdgpu_sync_fini(void);
/*
* GART structures, functions & helpers
@@ -726,7 +721,6 @@ struct amdgpu_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
- struct amdgpu_fence *fence;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
unsigned vm_id;
@@ -845,7 +839,6 @@ struct amdgpu_vm_id {
struct amdgpu_vm {
/* tree of virtual addresses mapped */
- spinlock_t it_lock;
struct rb_root va;
/* protecting invalidated */
@@ -882,6 +875,13 @@ struct amdgpu_vm_manager_id {
struct list_head list;
struct fence *active;
atomic_long_t owner;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
};
struct amdgpu_vm_manager {
@@ -917,8 +917,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid,
- uint64_t pd_addr);
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
@@ -1006,7 +1009,7 @@ struct amdgpu_bo_list {
struct amdgpu_bo *gds_obj;
struct amdgpu_bo *gws_obj;
struct amdgpu_bo *oa_obj;
- bool has_userptr;
+ unsigned first_userptr;
unsigned num_entries;
struct amdgpu_bo_list_entry *array;
};
@@ -1133,10 +1136,9 @@ struct amdgpu_gfx {
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ib, void *owner,
- struct fence *last_vm_update,
+ struct amdgpu_ib *ib, struct fence *last_vm_update,
struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
@@ -1155,7 +1157,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
/*
* CS.
@@ -1197,6 +1198,7 @@ struct amdgpu_job {
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
+ struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
struct amdgpu_user_fence uf;
@@ -1589,6 +1591,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -2012,7 +2015,6 @@ struct amdgpu_device {
struct amdgpu_sdma sdma;
/* uvd */
- bool has_uvd;
struct amdgpu_uvd uvd;
/* vce */
@@ -2032,6 +2034,7 @@ struct amdgpu_device {
/* tracking pinned memory */
u64 vram_pin_size;
+ u64 invisible_pin_size;
u64 gart_pin_size;
/* amdkfd interface */
@@ -2059,20 +2062,6 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
/*
- * Cast helper
- */
-extern const struct fence_ops amdgpu_fence_ops;
-static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
-{
- struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
-
- if (__f->base.ops == &amdgpu_fence_ops)
- return __f;
-
- return NULL;
-}
-
-/*
* Registers read & write functions.
*/
#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
@@ -2186,10 +2175,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
@@ -2314,12 +2305,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
struct amdgpu_ring **out_ring);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index fa948dcbdd5d..0020a0ea43ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,6 +63,10 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+ return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -142,10 +146,6 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 4792f9d0b7d4..eacd810fc09b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -91,7 +91,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
- bool has_userptr = false;
+ unsigned last_entry = 0, first_userptr = num_entries;
unsigned i;
int r;
@@ -101,8 +101,9 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
- struct amdgpu_bo_list_entry *entry = &array[i];
+ struct amdgpu_bo_list_entry *entry;
struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
struct mm_struct *usermm;
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
@@ -111,19 +112,24 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
goto error_free;
}
- entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_unreference_unlocked(gobj);
- entry->priority = min(info[i].bo_priority,
- AMDGPU_BO_LIST_MAX_PRIORITY);
- usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
+
+ usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
if (usermm != current->mm) {
- amdgpu_bo_unref(&entry->robj);
+ amdgpu_bo_unref(&bo);
r = -EPERM;
goto error_free;
}
- has_userptr = true;
+ entry = &array[--first_userptr];
+ } else {
+ entry = &array[last_entry++];
}
+
+ entry->robj = bo;
+ entry->priority = min(info[i].bo_priority,
+ AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
@@ -145,7 +151,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
list->gds_obj = gds_obj;
list->gws_obj = gws_obj;
list->oa_obj = oa_obj;
- list->has_userptr = has_userptr;
+ list->first_userptr = first_userptr;
list->array = array;
list->num_entries = num_entries;
@@ -194,6 +200,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
list_add_tail(&list->array[i].tv.head,
&bucket[priority]);
+ list->array[i].user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 7a4b101e10c6..6043dc7c3a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
+ struct cgs_mode_info *mode_info;
if (info == NULL)
return -EINVAL;
+ mode_info = info->mode_info;
+
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
info->display_count++;
}
- if (info->mode_info != NULL &&
+ if (mode_info != NULL &&
crtc->enabled && amdgpu_crtc->enabled &&
amdgpu_crtc->hw_mode.clock) {
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
@@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2);
- info->mode_info->vblank_time_us = vblank_lines * line_time_us;
- info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- info->mode_info->ref_clock = adev->clock.spll.reference_freq;
- info->mode_info++;
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
}
}
}
@@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
return 0;
}
+
+static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+{
+ CGS_FUNC_ADEV;
+
+ adev->pm.dpm_enabled = enabled;
+
+ return 0;
+}
+
/** \brief evaluate acpi namespace object, handle or pathname must be valid
* \param cgs_device
* \param info input/output arguments for the control method
@@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
+ amdgpu_cgs_notify_dpm_enabled,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52c3eb96b199..9392e50a7ba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
+#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -111,6 +112,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
+ p->uf_entry.user_pages = NULL;
drm_gem_object_unreference_unlocked(gobj);
return 0;
@@ -297,6 +299,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj;
+ bool binding_userptr = false;
struct mm_struct *usermm;
uint32_t domain;
@@ -304,6 +307,15 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (usermm && usermm != current->mm)
return -EPERM;
+ /* Check if we have user pages and nobody bound the BO already */
+ if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
+ size_t size = sizeof(struct page *);
+
+ size *= bo->tbo.ttm->num_pages;
+ memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ binding_userptr = true;
+ }
+
if (bo->pin_count)
continue;
@@ -334,6 +346,11 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
}
return r;
}
+
+ if (binding_userptr) {
+ drm_free_large(lobj->user_pages);
+ lobj->user_pages = NULL;
+ }
}
return 0;
}
@@ -342,15 +359,18 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
bool need_mmap_lock = false;
+ unsigned i, tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
- need_mmap_lock = p->bo_list->has_userptr;
+ need_mmap_lock = p->bo_list->first_userptr !=
+ p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
}
@@ -363,9 +383,81 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0))
- goto error_reserve;
+ while (1) {
+ struct list_head need_pages;
+ unsigned i;
+
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
+ &duplicates);
+ if (unlikely(r != 0))
+ goto error_free_pages;
+
+ /* Without a BO list we don't have userptr BOs */
+ if (!p->bo_list)
+ break;
+
+ INIT_LIST_HEAD(&need_pages);
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+
+ e = &p->bo_list->array[i];
+
+ if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ &e->user_invalidated) && e->user_pages) {
+
+ /* We acquired a page array, but somebody
+ * invalidated it. Free it an try again
+ */
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages,
+ false);
+ drm_free_large(e->user_pages);
+ e->user_pages = NULL;
+ }
+
+ if (e->robj->tbo.ttm->state != tt_bound &&
+ !e->user_pages) {
+ list_del(&e->tv.head);
+ list_add(&e->tv.head, &need_pages);
+
+ amdgpu_bo_unreserve(e->robj);
+ }
+ }
+
+ if (list_empty(&need_pages))
+ break;
+
+ /* Unreserve everything again. */
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+ /* We tried to often, just abort */
+ if (!--tries) {
+ r = -EDEADLK;
+ goto error_free_pages;
+ }
+
+ /* Fill the page arrays for all useptrs. */
+ list_for_each_entry(e, &need_pages, tv.head) {
+ struct ttm_tt *ttm = e->robj->tbo.ttm;
+
+ e->user_pages = drm_calloc_large(ttm->num_pages,
+ sizeof(struct page*));
+ if (!e->user_pages) {
+ r = -ENOMEM;
+ goto error_free_pages;
+ }
+
+ r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
+ if (r) {
+ drm_free_large(e->user_pages);
+ e->user_pages = NULL;
+ goto error_free_pages;
+ }
+ }
+
+ /* And try again. */
+ list_splice(&need_pages, &p->validated);
+ }
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
@@ -397,10 +489,26 @@ error_validate:
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
-error_reserve:
+error_free_pages:
+
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ e = &p->bo_list->array[i];
+
+ if (!e->user_pages)
+ continue;
+
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages,
+ false);
+ drm_free_large(e->user_pages);
+ }
+ }
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2139da773da6..612117478b57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,6 +62,12 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1479,7 +1485,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev))
+ if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index f0ed974bd4e0..3fb405b3a614 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
return true;
- fence_put(*f);
+ fence_put(fence);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 74a2f8a6be1f..f1e17d60055a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -555,6 +555,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void)
{
+ amdgpu_sync_init();
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
@@ -577,6 +578,7 @@ static void __exit amdgpu_exit(void)
amdgpu_amdkfd_fini();
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
+ amdgpu_sync_fini();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 97db196dc6f8..d81f1f4883a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,9 +47,30 @@
* that the the relevant GPU caches have been flushed.
*/
+struct amdgpu_fence {
+ struct fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+};
+
static struct kmem_cache *amdgpu_fence_slab;
static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+/*
+ * Cast helper
+ */
+static const struct fence_ops amdgpu_fence_ops;
+static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
+{
+ struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
+
+ if (__f->base.ops == &amdgpu_fence_ops)
+ return __f;
+
+ return NULL;
+}
+
/**
* amdgpu_fence_write - write a fence value
*
@@ -82,7 +103,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
if (drv->cpu_addr)
seq = le32_to_cpu(*drv->cpu_addr);
else
- seq = lower_32_bits(atomic64_read(&drv->last_seq));
+ seq = atomic_read(&drv->last_seq);
return seq;
}
@@ -91,32 +112,45 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @owner: creator of the fence
- * @fence: amdgpu fence object
+ * @f: resulting fence object
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
- struct amdgpu_fence **fence)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_fence *fence;
+ struct fence *old, **ptr;
+ uint32_t seq;
- /* we are protected by the ring emission mutex */
- *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
- if ((*fence) == NULL) {
+ fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
return -ENOMEM;
- }
- (*fence)->seq = ++ring->fence_drv.sync_seq;
- (*fence)->ring = ring;
- (*fence)->owner = owner;
- fence_init(&(*fence)->base, &amdgpu_fence_ops,
- &ring->fence_drv.fence_queue.lock,
- adev->fence_context + ring->idx,
- (*fence)->seq);
+
+ seq = ++ring->fence_drv.sync_seq;
+ fence->ring = ring;
+ fence_init(&fence->base, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx,
+ seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- (*fence)->seq,
- AMDGPU_FENCE_FLAG_INT);
+ seq, AMDGPU_FENCE_FLAG_INT);
+
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ /* This function can't be called concurrently anyway, otherwise
+ * emitting the fence would mess up the hardware ring buffer.
+ */
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && !fence_is_signaled(old)) {
+ DRM_INFO("rcu slot is busy\n");
+ fence_wait(old, false);
+ }
+
+ rcu_assign_pointer(*ptr, fence_get(&fence->base));
+
+ *f = &fence->base;
+
return 0;
}
@@ -134,89 +168,48 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
}
/**
- * amdgpu_fence_activity - check for fence activity
+ * amdgpu_fence_process - check for fence activity
*
* @ring: pointer to struct amdgpu_ring
*
* Checks the current fence value and calculates the last
- * signalled fence value. Returns true if activity occured
- * on the ring, and the fence_queue should be waken up.
+ * signalled fence value. Wakes the fence queue if the
+ * sequence number has increased.
*/
-static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
+void amdgpu_fence_process(struct amdgpu_ring *ring)
{
- uint64_t seq, last_seq, last_emitted;
- unsigned count_loop = 0;
- bool wake = false;
-
- /* Note there is a scenario here for an infinite loop but it's
- * very unlikely to happen. For it to happen, the current polling
- * process need to be interrupted by another process and another
- * process needs to update the last_seq btw the atomic read and
- * xchg of the current process.
- *
- * More over for this to go in infinite loop there need to be
- * continuously new fence signaled ie amdgpu_fence_read needs
- * to return a different value each time for both the currently
- * polling process and the other process that xchg the last_seq
- * btw atomic read and xchg of the current process. And the
- * value the other process set as last seq must be higher than
- * the seq value we just read. Which means that current process
- * need to be interrupted after amdgpu_fence_read and before
- * atomic xchg.
- *
- * To be even more safe we count the number of time we loop and
- * we bail after 10 loop just accepting the fact that we might
- * have temporarly set the last_seq not to the true real last
- * seq but to an older one.
- */
- last_seq = atomic64_read(&ring->fence_drv.last_seq);
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ uint32_t seq, last_seq;
+ int r;
+
do {
- last_emitted = ring->fence_drv.sync_seq;
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
seq = amdgpu_fence_read(ring);
- seq |= last_seq & 0xffffffff00000000LL;
- if (seq < last_seq) {
- seq &= 0xffffffff;
- seq |= last_emitted & 0xffffffff00000000LL;
- }
- if (seq <= last_seq || seq > last_emitted) {
- break;
- }
- /* If we loop over we don't want to return without
- * checking if a fence is signaled as it means that the
- * seq we just read is different from the previous on.
- */
- wake = true;
- last_seq = seq;
- if ((count_loop++) > 10) {
- /* We looped over too many time leave with the
- * fact that we might have set an older fence
- * seq then the current real last seq as signaled
- * by the hw.
- */
- break;
- }
- } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
+ } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (seq < last_emitted)
+ if (seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
- return wake;
-}
+ while (last_seq != seq) {
+ struct fence *fence, **ptr;
-/**
- * amdgpu_fence_process - process a fence
- *
- * @adev: amdgpu_device pointer
- * @ring: ring index the fence is associated with
- *
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
- */
-void amdgpu_fence_process(struct amdgpu_ring *ring)
-{
- if (amdgpu_fence_activity(ring))
- wake_up_all(&ring->fence_drv.fence_queue);
+ ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+
+ /* There is always exactly one thread signaling this fence slot */
+ fence = rcu_dereference_protected(*ptr, 1);
+ rcu_assign_pointer(*ptr, NULL);
+
+ BUG_ON(!fence);
+
+ r = fence_signal(fence);
+ if (!r)
+ FENCE_TRACE(fence, "signaled from irq context\n");
+ else
+ BUG();
+
+ fence_put(fence);
+ }
}
/**
@@ -234,77 +227,6 @@ static void amdgpu_fence_fallback(unsigned long arg)
}
/**
- * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
- *
- * @ring: ring the fence is associated with
- * @seq: sequence number
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if the fence has signaled (current fence value
- * is >= requested value) or false if it has not (current fence
- * value is < the requested value. Helper function for
- * amdgpu_fence_signaled().
- */
-static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
-{
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return true;
-
- /* poll new last sequence at least once */
- amdgpu_fence_process(ring);
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return true;
-
- return false;
-}
-
-/*
- * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
- * @ring: ring to wait on for the seq number
- * @seq: seq number wait for
- *
- * return value:
- * 0: seq signaled, and gpu not hang
- * -EINVAL: some paramter is not valid
- */
-static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
-{
- BUG_ON(!ring);
- if (seq > ring->fence_drv.sync_seq)
- return -EINVAL;
-
- if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
- return 0;
-
- amdgpu_fence_schedule_fallback(ring);
- wait_event(ring->fence_drv.fence_queue,
- amdgpu_fence_seq_signaled(ring, seq));
-
- return 0;
-}
-
-/**
- * amdgpu_fence_wait_next - wait for the next fence to signal
- *
- * @adev: amdgpu device pointer
- * @ring: ring index the fence is associated with
- *
- * Wait for the next fence on the requested ring to signal (all asics).
- * Returns 0 if the next fence has passed, error for all other cases.
- * Caller must hold ring lock.
- */
-int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
-{
- uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
-
- if (seq >= ring->fence_drv.sync_seq)
- return -ENOENT;
-
- return amdgpu_fence_ring_wait_seq(ring, seq);
-}
-
-/**
* amdgpu_fence_wait_empty - wait for all fences to signal
*
* @adev: amdgpu device pointer
@@ -312,16 +234,28 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns 0 if the fences have passed, error for all other cases.
- * Caller must hold ring lock.
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ring->fence_drv.sync_seq;
+ uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ struct fence *fence, **ptr;
+ int r;
if (!seq)
return 0;
- return amdgpu_fence_ring_wait_seq(ring, seq);
+ ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ fence = rcu_dereference(*ptr);
+ if (!fence || !fence_get_rcu(fence)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ r = fence_wait(fence, false);
+ fence_put(fence);
+ return r;
}
/**
@@ -341,13 +275,10 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
* but it's ok to report slightly wrong fence count here.
*/
amdgpu_fence_process(ring);
- emitted = ring->fence_drv.sync_seq
- - atomic64_read(&ring->fence_drv.last_seq);
- /* to avoid 32bits warp around */
- if (emitted > 0x10000000)
- emitted = 0x10000000;
-
- return (unsigned)emitted;
+ emitted = 0x100000000ull;
+ emitted -= atomic_read(&ring->fence_drv.last_seq);
+ emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ return lower_32_bits(emitted);
}
/**
@@ -379,7 +310,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
}
- amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
+ amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
@@ -397,25 +328,36 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* for the requested ring.
*
* @ring: ring to init the fence driver on
+ * @num_hw_submission: number of entries on the hardware queue
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
+ unsigned num_hw_submission)
{
long timeout;
int r;
+ /* Check that num_hw_submission is a power of two */
+ if ((num_hw_submission & (num_hw_submission - 1)) != 0)
+ return -EINVAL;
+
ring->fence_drv.cpu_addr = NULL;
ring->fence_drv.gpu_addr = 0;
ring->fence_drv.sync_seq = 0;
- atomic64_set(&ring->fence_drv.last_seq, 0);
+ atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
- init_waitqueue_head(&ring->fence_drv.fence_queue);
+ ring->fence_drv.num_fences_mask = num_hw_submission - 1;
+ spin_lock_init(&ring->fence_drv.lock);
+ ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
+ GFP_KERNEL);
+ if (!ring->fence_drv.fences)
+ return -ENOMEM;
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
if (timeout == 0) {
@@ -429,7 +371,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
timeout = MAX_SCHEDULE_TIMEOUT;
}
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- amdgpu_sched_hw_submission,
+ num_hw_submission,
timeout, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
@@ -477,10 +419,9 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev)
*/
void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
{
- int i, r;
+ unsigned i, j;
+ int r;
- if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
- kmem_cache_destroy(amdgpu_fence_slab);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -491,13 +432,18 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(adev);
}
- wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ fence_put(ring->fence_drv.fences[i]);
+ kfree(ring->fence_drv.fences);
ring->fence_drv.initialized = false;
}
+
+ if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+ kmem_cache_destroy(amdgpu_fence_slab);
}
/**
@@ -594,103 +540,57 @@ static const char *amdgpu_fence_get_timeline_name(struct fence *f)
}
/**
- * amdgpu_fence_is_signaled - test if fence is signaled
- *
- * @f: fence to test
+ * amdgpu_fence_enable_signaling - enable signalling on fence
+ * @fence: fence
*
- * Test the fence sequence number if it is already signaled. If it isn't
- * signaled start fence processing. Returns True if the fence is signaled.
+ * This function is called with fence_queue lock held, and adds a callback
+ * to fence_queue that checks if this fence is signaled, and if so it
+ * signals the fence and removes itself.
*/
-static bool amdgpu_fence_is_signaled(struct fence *f)
+static bool amdgpu_fence_enable_signaling(struct fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return true;
-
- amdgpu_fence_process(ring);
+ if (!timer_pending(&ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(ring);
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return true;
+ FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
- return false;
+ return true;
}
/**
- * amdgpu_fence_check_signaled - callback from fence_queue
+ * amdgpu_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
*
- * this function is called with fence_queue lock held, which is also used
- * for the fence locking itself, so unlocked variants are used for
- * fence_signal, and remove_wait_queue.
+ * Free up the fence memory after the RCU grace period.
*/
-static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
+static void amdgpu_fence_free(struct rcu_head *rcu)
{
- struct amdgpu_fence *fence;
- struct amdgpu_device *adev;
- u64 seq;
- int ret;
-
- fence = container_of(wait, struct amdgpu_fence, fence_wake);
- adev = fence->ring->adev;
-
- /*
- * We cannot use amdgpu_fence_process here because we're already
- * in the waitqueue, in a call from wake_up_all.
- */
- seq = atomic64_read(&fence->ring->fence_drv.last_seq);
- if (seq >= fence->seq) {
- ret = fence_signal_locked(&fence->base);
- if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-
- __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
- } else
- FENCE_TRACE(&fence->base, "pending\n");
- return 0;
+ struct fence *f = container_of(rcu, struct fence, rcu);
+ struct amdgpu_fence *fence = to_amdgpu_fence(f);
+ kmem_cache_free(amdgpu_fence_slab, fence);
}
/**
- * amdgpu_fence_enable_signaling - enable signalling on fence
+ * amdgpu_fence_release - callback that fence can be freed
+ *
* @fence: fence
*
- * This function is called with fence_queue lock held, and adds a callback
- * to fence_queue that checks if this fence is signaled, and if so it
- * signals the fence and removes itself.
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
*/
-static bool amdgpu_fence_enable_signaling(struct fence *f)
-{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- struct amdgpu_ring *ring = fence->ring;
-
- if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
- return false;
-
- fence->fence_wake.flags = 0;
- fence->fence_wake.private = NULL;
- fence->fence_wake.func = amdgpu_fence_check_signaled;
- __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
- fence_get(f);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
- return true;
-}
-
static void amdgpu_fence_release(struct fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
+ call_rcu(&f->rcu, amdgpu_fence_free);
}
-const struct fence_ops amdgpu_fence_ops = {
+static const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .signaled = amdgpu_fence_is_signaled,
.wait = fence_default_wait,
.release = amdgpu_fence_release,
};
@@ -714,9 +614,9 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
amdgpu_fence_process(ring);
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
- seq_printf(m, "Last signaled fence 0x%016llx\n",
- (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
- seq_printf(m, "Last emitted 0x%016llx\n",
+ seq_printf(m, "Last signaled fence 0x%08x\n",
+ atomic_read(&ring->fence_drv.last_seq));
+ seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7a47c45b2131..fa6a27bff298 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/ktime.h>
+#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -140,25 +141,40 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
- struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = rbo->adev;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = bo->adev;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
+
+ struct amdgpu_bo_list_entry vm_pd;
+ struct list_head list, duplicates;
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
- r = amdgpu_bo_reserve(rbo, true);
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
+
+ tv.bo = &bo->tbo;
+ tv.shared = true;
+ list_add(&tv.head, &list);
+
+ amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
return;
}
- bo_va = amdgpu_vm_bo_find(vm, rbo);
+ bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo_va) {
if (--bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
}
}
- amdgpu_bo_unreserve(rbo);
+ ttm_eu_backoff_reservation(&ticket, &list);
}
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -243,12 +259,10 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_USERPTR_REGISTER))
return -EINVAL;
- if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && (
- !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) ||
- !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) {
+ if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
+ !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
- /* if we want to write to it we must require anonymous
- memory and install a MMU notifier */
+ /* if we want to write to it we must install a MMU notifier */
return -EACCES;
}
@@ -274,18 +288,23 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);
+
+ r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
+ bo->tbo.ttm->pages);
+ if (r)
+ goto unlock_mmap_sem;
+
r = amdgpu_bo_reserve(bo, true);
- if (r) {
- up_read(&current->mm->mmap_sem);
- goto release_object;
- }
+ if (r)
+ goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
amdgpu_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
if (r)
- goto release_object;
+ goto free_pages;
+
+ up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@@ -297,6 +316,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
args->handle = handle;
return 0;
+free_pages:
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
+
+unlock_mmap_sem:
+ up_read(&current->mm->mmap_sem);
+
release_object:
drm_gem_object_unreference_unlocked(gobj);
@@ -569,11 +594,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
tv.shared = true;
list_add(&tv.head, &list);
- if (args->operation == AMDGPU_VA_OP_MAP) {
- tv_pd.bo = &fpriv->vm.page_directory->tbo;
- tv_pd.shared = true;
- list_add(&tv_pd.head, &list);
- }
+ tv_pd.bo = &fpriv->vm.page_directory->tbo;
+ tv_pd.shared = true;
+ list_add(&tv_pd.head, &list);
+
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) {
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index db14a7bbb8f4..8443cea6821a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -85,14 +85,13 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*
* @adev: amdgpu_device pointer
* @ib: IB object to free
+ * @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base);
- if (ib->fence)
- fence_put(&ib->fence->base);
+ amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
/**
@@ -101,7 +100,6 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* @adev: amdgpu_device pointer
* @num_ibs: number of IBs to schedule
* @ibs: IB objects to schedule
- * @owner: owner for creating the fences
* @f: fence created during this submission
*
* Schedule an IB on the associated ring (all asics).
@@ -118,14 +116,14 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib)
* to SI there was just a DE IB.
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, void *owner,
- struct fence *last_vm_update,
+ struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
+ struct fence *hwf;
unsigned i;
int r = 0;
@@ -153,13 +151,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (vm) {
/* do context switch */
- amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr);
-
- if (ring->funcs->emit_gds_switch)
- amdgpu_ring_emit_gds_switch(ring, ib->vm_id,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
+ amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
+ ib->gds_base, ib->gds_size,
+ ib->gws_base, ib->gws_size,
+ ib->oa_base, ib->oa_size);
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
@@ -171,6 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ib->ctx != ctx || ib->vm != vm) {
ring->current_ctx = old_ctx;
+ if (ib->vm_id)
+ amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring);
return -EINVAL;
}
@@ -178,10 +175,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
ring->current_ctx = ctx;
}
- r = amdgpu_fence_emit(ring, owner, &ib->fence);
+ if (vm) {
+ if (ring->funcs->emit_hdp_invalidate)
+ amdgpu_ring_emit_hdp_invalidate(ring);
+ }
+
+ r = amdgpu_fence_emit(ring, &hwf);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
ring->current_ctx = old_ctx;
+ if (ib->vm_id)
+ amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring);
return r;
}
@@ -195,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
if (f)
- *f = fence_get(&ib->fence->base);
+ *f = fence_get(hwf);
amdgpu_ring_commit(ring);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index f594cfaa97e5..762cfdb85147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
return r;
}
+ adev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
adev->irq.msi_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 90e52f7e17a0..9c9b19e2f353 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -70,9 +70,13 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free(struct amdgpu_job *job)
{
unsigned i;
+ struct fence *f;
+ /* use sched fence if available */
+ f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i]);
+ amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
+ fence_put(job->fence);
amdgpu_bo_unref(&job->uf.bo);
amdgpu_sync_free(&job->sync);
@@ -148,7 +152,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
trace_amdgpu_sched_run_job(job);
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job->owner,
+ r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, &fence);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
@@ -156,6 +160,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
}
err:
+ job->fence = fence;
amdgpu_job_free(job);
return fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7805a8706af7..aef70db16832 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -382,8 +382,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->mc.real_vram_size;
+ vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size;
+ vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mc.gtt_size;
vram_gtt.gtt_size -= adev->gart_pin_size;
return copy_to_user(out, &vram_gtt,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d7ec9bd6755f..9f4a45cd2aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,7 +48,8 @@ struct amdgpu_mn {
/* protected by adev->mn_lock */
struct hlist_node node;
- /* objects protected by mm->mmap_sem */
+ /* objects protected by lock */
+ struct mutex lock;
struct rb_root objects;
};
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -105,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
}
/**
+ * amdgpu_mn_invalidate_node - unmap all BOs of a node
+ *
+ * @node: the node with the BOs to unmap
+ *
+ * We block for all BOs and unmap them by move them
+ * into system domain again.
+ */
+static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_bo *bo;
+ long r;
+
+ list_for_each_entry(bo, &node->bos, mn_list) {
+
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
+ continue;
+
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
+ continue;
+ }
+
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ true, false, MAX_SCHEDULE_TIMEOUT);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+
+ amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ if (r)
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
+
+ amdgpu_bo_unreserve(bo);
+ }
+}
+
+/**
+ * amdgpu_mn_invalidate_page - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @address: address of invalidate page
+ *
+ * Invalidation of a single page. Blocks for all BOs mapping it
+ * and unmap them by move them into system domain again.
+ */
+static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+ struct interval_tree_node *it;
+
+ mutex_lock(&rmn->lock);
+
+ it = interval_tree_iter_first(&rmn->objects, address, address);
+ if (it) {
+ struct amdgpu_mn_node *node;
+
+ node = container_of(it, struct amdgpu_mn_node, it);
+ amdgpu_mn_invalidate_node(node, address, address);
+ }
+
+ mutex_unlock(&rmn->lock);
+}
+
+/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @mn: our notifier
@@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
+ mutex_lock(&rmn->lock);
+
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
- struct amdgpu_bo *bo;
- long r;
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
- end))
- continue;
-
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
- }
+ amdgpu_mn_invalidate_node(node, start, end);
}
+
+ mutex_unlock(&rmn->lock);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
+ .invalidate_page = amdgpu_mn_invalidate_page,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
@@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
+ mutex_init(&rmn->lock);
rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return -ENOMEM;
}
}
@@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return 0;
}
@@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9a025a77958d..e557fc1f17c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -308,7 +308,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
bool is_iomem;
- int r;
+ long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
@@ -319,14 +319,20 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
}
return 0;
}
+
+ r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
- if (r) {
+ if (r)
return r;
- }
+
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
- if (ptr) {
+ if (ptr)
*ptr = bo->kptr;
- }
+
return 0;
}
@@ -418,9 +424,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
bo->adev->vram_pin_size += amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size += amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size += amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p pin failed\n", bo);
@@ -450,9 +458,11 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->adev->vram_pin_size -= amdgpu_bo_size(bo);
- else
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ bo->adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ } else
bo->adev->gart_pin_size -= amdgpu_bo_size(bo);
} else {
dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo);
@@ -470,6 +480,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
}
+static const char *amdgpu_vram_names[] = {
+ "UNKNOWN",
+ "GDDR1",
+ "DDR2",
+ "GDDR3",
+ "GDDR4",
+ "GDDR5",
+ "HBM",
+ "DDR3"
+};
+
int amdgpu_bo_init(struct amdgpu_device *adev)
{
/* Add an MTRR for the VRAM */
@@ -478,8 +499,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->mc.mc_vram_size >> 20,
(unsigned long long)adev->mc.aper_size >> 20);
- DRM_INFO("RAM width %dbits DDR\n",
- adev->mc.vram_width);
+ DRM_INFO("RAM width %dbits %s\n",
+ adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
return amdgpu_ttm_init(adev);
}
@@ -602,6 +623,10 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (abo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 3cb6d6c413c7..e9c6ae6ed2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
+ if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
@@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle)
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- if (amdgpu_dpm == 0)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- }
+ if (adev->pp_enabled)
+ adev->pm.dpm_enabled = true;
#endif
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 56c07e3fdb33..972eed2ef787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -236,7 +236,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring);
+ r = amdgpu_fence_driver_init_ring(ring,
+ amdgpu_sched_hw_submission);
if (r)
return r;
}
@@ -352,30 +353,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
}
}
-/**
- * amdgpu_ring_from_fence - get ring from fence
- *
- * @f: fence structure
- *
- * Extract the ring a fence belongs to. Handles both scheduler as
- * well as hardware fences.
- */
-struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f)
-{
- struct amdgpu_fence *a_fence;
- struct amd_sched_fence *s_fence;
-
- s_fence = to_amd_sched_fence(f);
- if (s_fence)
- return container_of(s_fence->sched, struct amdgpu_ring, sched);
-
- a_fence = to_amdgpu_fence(f);
- if (a_fence)
- return a_fence->ring;
-
- return NULL;
-}
-
/*
* Debugfs info
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 2faf03bcda21..8bf84efafb04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -60,9 +60,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
INIT_LIST_HEAD(&sa_manager->flist[i]);
- }
r = amdgpu_bo_create(adev, size, align, true, domain,
0, NULL, NULL, &sa_manager->bo);
@@ -228,11 +227,9 @@ static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
unsigned soffset, eoffset, wasted;
int i;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (!list_empty(&sa_manager->flist[i])) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
+ if (!list_empty(&sa_manager->flist[i]))
return true;
- }
- }
soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
@@ -265,12 +262,11 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
/* go over all fence list and try to find the closest sa_bo
* of the current last
*/
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
struct amdgpu_sa_bo *sa_bo;
- if (list_empty(&sa_manager->flist[i])) {
+ if (list_empty(&sa_manager->flist[i]))
continue;
- }
sa_bo = list_first_entry(&sa_manager->flist[i],
struct amdgpu_sa_bo, flist);
@@ -299,7 +295,9 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
}
if (best_bo) {
- uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
+ uint32_t idx = best_bo->fence->context;
+
+ idx %= AMDGPU_SA_NUM_FENCE_LISTS;
++tries[idx];
sa_manager->hole = best_bo->olist.prev;
@@ -315,8 +313,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
struct amdgpu_sa_bo **sa_bo,
unsigned size, unsigned align)
{
- struct fence *fences[AMDGPU_MAX_RINGS];
- unsigned tries[AMDGPU_MAX_RINGS];
+ struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
+ unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
unsigned count;
int i, r;
signed long t;
@@ -338,7 +336,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
spin_lock(&sa_manager->wq.lock);
do {
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
fences[i] = NULL;
tries[i] = 0;
}
@@ -355,7 +353,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
/* see if we can skip over some allocations */
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
- for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+ for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
if (fences[i])
fences[count++] = fence_get(fences[i]);
@@ -397,8 +395,9 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
spin_lock(&sa_manager->wq.lock);
if (fence && !fence_is_signaled(fence)) {
uint32_t idx;
+
(*sa_bo)->fence = fence_get(fence);
- idx = amdgpu_ring_from_fence(fence)->idx;
+ idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
} else {
amdgpu_sa_bo_remove_locked(*sa_bo);
@@ -410,25 +409,6 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
#if defined(CONFIG_DEBUG_FS)
-static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
-{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
-
- if (a_fence)
- seq_printf(m, " protected by 0x%016llx on ring %d",
- a_fence->seq, a_fence->ring->idx);
-
- if (s_fence) {
- struct amdgpu_ring *ring;
-
-
- ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
- seq_printf(m, " protected by 0x%016x on ring %d",
- s_fence->base.seqno, ring->idx);
- }
-}
-
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
@@ -445,8 +425,11 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
+
if (i->fence)
- amdgpu_sa_bo_dump_fence(i->fence, m);
+ seq_printf(m, " protected by 0x%08x on context %d",
+ i->fence->seqno, i->fence->context);
+
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c15be00de904..c48b4fce5e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
struct fence *fence;
};
+static struct kmem_cache *amdgpu_sync_slab;
+
/**
* amdgpu_sync_create - zero init sync object
*
@@ -50,14 +52,18 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
sync->last_vm_update = NULL;
}
+/**
+ * amdgpu_sync_same_dev - test if fence belong to us
+ *
+ * @adev: amdgpu device to use for the test
+ * @f: fence to test
+ *
+ * Test if the fence was issued by us.
+ */
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
- if (a_fence)
- return a_fence->ring->adev == adev;
-
if (s_fence) {
struct amdgpu_ring *ring;
@@ -68,17 +74,31 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
return false;
}
-static bool amdgpu_sync_test_owner(struct fence *f, void *owner)
+/**
+ * amdgpu_sync_get_owner - extract the owner of a fence
+ *
+ * @fence: fence get the owner from
+ *
+ * Extract who originally created the fence.
+ */
+static void *amdgpu_sync_get_owner(struct fence *f)
{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
if (s_fence)
- return s_fence->owner == owner;
- if (a_fence)
- return a_fence->owner == owner;
- return false;
+ return s_fence->owner;
+
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
}
+/**
+ * amdgpu_sync_keep_later - Keep the later fence
+ *
+ * @keep: existing fence to test
+ * @fence: new fence
+ *
+ * Either keep the existing fence or the new one, depending which one is later.
+ */
static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
{
if (*keep && fence_is_later(*keep, fence))
@@ -104,7 +124,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_test_owner(f, AMDGPU_FENCE_OWNER_VM))
+ amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
hash_for_each_possible(sync->fences, e, node, f->context) {
@@ -115,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
- e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
return -ENOMEM;
@@ -124,18 +144,6 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
-static void *amdgpu_sync_get_owner(struct fence *f)
-{
- struct amdgpu_fence *a_fence = to_amdgpu_fence(f);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
-
- if (s_fence)
- return s_fence->owner;
- else if (a_fence)
- return a_fence->owner;
- return AMDGPU_FENCE_OWNER_UNDEFINED;
-}
-
/**
* amdgpu_sync_resv - sync to a reservation object
*
@@ -208,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
f = e->fence;
hash_del(&e->node);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
if (!fence_is_signaled(f))
return f;
@@ -231,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
return 0;
@@ -253,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
hash_for_each_safe(sync->fences, i, tmp, e, node) {
hash_del(&e->node);
fence_put(e->fence);
- kfree(e);
+ kmem_cache_free(amdgpu_sync_slab, e);
}
fence_put(sync->last_vm_update);
}
+
+/**
+ * amdgpu_sync_init - init sync object subsystem
+ *
+ * Allocate the slab allocator.
+ */
+int amdgpu_sync_init(void)
+{
+ amdgpu_sync_slab = kmem_cache_create(
+ "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_sync_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * amdgpu_sync_fini - fini sync object subsystem
+ *
+ * Free the slab allocator.
+ */
+void amdgpu_sync_fini(void)
+{
+ kmem_cache_destroy(amdgpu_sync_slab);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9ccdd189d717..6f3369de232f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -384,9 +384,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
+ struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ abo = container_of(bo, struct amdgpu_bo, tbo);
+ if (WARN_ON_ONCE(abo->pin_count > 0))
+ return -EINVAL;
+
adev = amdgpu_get_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem);
@@ -494,29 +500,32 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
/*
* TTM backend functions.
*/
+struct amdgpu_ttm_gup_task_list {
+ struct list_head list;
+ struct task_struct *task;
+};
+
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct amdgpu_device *adev;
- u64 offset;
- uint64_t userptr;
- struct mm_struct *usermm;
- uint32_t userflags;
+ struct ttm_dma_tt ttm;
+ struct amdgpu_device *adev;
+ u64 offset;
+ uint64_t userptr;
+ struct mm_struct *usermm;
+ uint32_t userflags;
+ spinlock_t guptasklock;
+ struct list_head guptasks;
+ atomic_t mmu_invalidations;
};
-/* prepare the sg table with the user pages */
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned pinned = 0, nents;
- int r;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
- enum dma_data_direction direction = write ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ unsigned pinned = 0;
+ int r;
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only pin down anonymous memory
+ /* check that we only use anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -529,10 +538,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
do {
unsigned num_pages = ttm->num_pages - pinned;
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct page **pages = ttm->pages + pinned;
+ struct page **p = pages + pinned;
+ struct amdgpu_ttm_gup_task_list guptask;
+
+ guptask.task = current;
+ spin_lock(&gtt->guptasklock);
+ list_add(&guptask.list, &gtt->guptasks);
+ spin_unlock(&gtt->guptasklock);
+
+ r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+
+ spin_lock(&gtt->guptasklock);
+ list_del(&guptask.list);
+ spin_unlock(&gtt->guptasklock);
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
if (r < 0)
goto release_pages;
@@ -540,6 +559,25 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
} while (pinned < ttm->num_pages);
+ return 0;
+
+release_pages:
+ release_pages(pages, pinned, 0);
+ return r;
+}
+
+/* prepare the sg table with the user pages */
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+{
+ struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned nents;
+ int r;
+
+ int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ enum dma_data_direction direction = write ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
@@ -558,9 +596,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
release_sg:
kfree(ttm->sg);
-
-release_pages:
- release_pages(ttm->pages, pinned, 0);
return r;
}
@@ -587,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
@@ -783,6 +818,10 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
gtt->userptr = addr;
gtt->usermm = current->mm;
gtt->userflags = flags;
+ spin_lock_init(&gtt->guptasklock);
+ INIT_LIST_HEAD(&gtt->guptasks);
+ atomic_set(&gtt->mmu_invalidations, 0);
+
return 0;
}
@@ -800,21 +839,40 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_gup_task_list *entry;
unsigned long size;
- if (gtt == NULL)
- return false;
-
- if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ if (gtt == NULL || !gtt->userptr)
return false;
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
+ spin_lock(&gtt->guptasklock);
+ list_for_each_entry(entry, &gtt->guptasks, list) {
+ if (entry->task == current) {
+ spin_unlock(&gtt->guptasklock);
+ return false;
+ }
+ }
+ spin_unlock(&gtt->guptasklock);
+
+ atomic_inc(&gtt->mmu_invalidations);
+
return true;
}
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ int prev_invalidated = *last_invalidated;
+
+ *last_invalidated = atomic_read(&gtt->mmu_invalidations);
+ return prev_invalidated != *last_invalidated;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 1de82bf4fc79..338da80006b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -241,32 +241,28 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring = &adev->uvd.ring;
- int i, r;
+ unsigned size;
+ void *ptr;
+ int i;
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- uint32_t handle = atomic_read(&adev->uvd.handles[i]);
- if (handle != 0) {
- struct fence *fence;
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- amdgpu_uvd_note_usage(adev);
+ if (i == AMDGPU_MAX_UVD_HANDLES)
+ return 0;
- r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence);
- if (r) {
- DRM_ERROR("Error destroying UVD (%d)!\n", r);
- continue;
- }
+ size = amdgpu_bo_size(adev->uvd.vcpu_bo);
+ ptr = adev->uvd.cpu_addr;
- fence_wait(fence, false);
- fence_put(fence);
+ adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
+ if (!adev->uvd.saved_bo)
+ return -ENOMEM;
- adev->uvd.filp[i] = NULL;
- atomic_set(&adev->uvd.handles[i], 0);
- }
- }
+ memcpy(adev->uvd.saved_bo, ptr, size);
return 0;
}
@@ -275,23 +271,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
unsigned size;
void *ptr;
- const struct common_firmware_header *hdr;
- unsigned offset;
if (adev->uvd.vcpu_bo == NULL)
return -EINVAL;
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
- (adev->uvd.fw->size) - offset);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr = adev->uvd.cpu_addr;
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
- memset(ptr, 0, size);
+ if (adev->uvd.saved_bo != NULL) {
+ memcpy(ptr, adev->uvd.saved_bo, size);
+ kfree(adev->uvd.saved_bo);
+ adev->uvd.saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned offset;
+
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
+ (adev->uvd.fw->size) - offset);
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ memset(ptr, 0, size);
+ }
return 0;
}
@@ -539,13 +541,6 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return -EINVAL;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
- return r;
- }
-
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
@@ -886,8 +881,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib,
- AMDGPU_FENCE_OWNER_UNDEFINED, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 39c3aa60381a..4bec0c108cea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -425,8 +425,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
if (r)
goto err;
@@ -487,9 +487,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ job->fence = f;
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d9dc8bea5e98..b6c011b83641 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -95,6 +95,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
entry->priority = 0;
entry->tv.bo = &vm->page_directory->tbo;
entry->tv.shared = true;
+ entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -188,6 +189,13 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (!is_later && owner == (long)id &&
pd_addr == id->pd_gpu_addr) {
+ r = amdgpu_sync_fence(ring->adev, sync,
+ id->mgr_id->active);
+ if (r) {
+ mutex_unlock(&adev->vm_manager.lock);
+ return r;
+ }
+
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);
@@ -234,19 +242,68 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vmid: vmid number to use
+ * @vm_id: vmid number to use
* @pd_addr: address of the page directory
*
* Emit a VM flush when it is necessary.
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vmid,
- uint64_t pd_addr)
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+ bool gds_switch_needed = ring->funcs->emit_gds_switch && (
+ mgr_id->gds_base != gds_base ||
+ mgr_id->gds_size != gds_size ||
+ mgr_id->gws_base != gws_base ||
+ mgr_id->gws_size != gws_size ||
+ mgr_id->oa_base != oa_base ||
+ mgr_id->oa_size != oa_size);
+
+ if (ring->funcs->emit_pipeline_sync && (
+ pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
+ amdgpu_ring_emit_pipeline_sync(ring);
+
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
- trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid);
- amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr);
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
+ amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
}
+
+ if (gds_switch_needed) {
+ mgr_id->gds_base = gds_base;
+ mgr_id->gds_size = gds_size;
+ mgr_id->gws_base = gws_base;
+ mgr_id->gws_size = gws_size;
+ mgr_id->oa_base = oa_base;
+ mgr_id->oa_size = oa_size;
+ amdgpu_ring_emit_gds_switch(ring, vm_id,
+ gds_base, gds_size,
+ gws_base, gws_size,
+ oa_base, oa_size);
+ }
+}
+
+/**
+ * amdgpu_vm_reset_id - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ * @vm_id: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
+{
+ struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+
+ mgr_id->gds_base = 0;
+ mgr_id->gds_size = 0;
+ mgr_id->gws_base = 0;
+ mgr_id->gws_size = 0;
+ mgr_id->oa_base = 0;
+ mgr_id->oa_size = 0;
}
/**
@@ -810,7 +867,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
while (start != mapping->it.last + 1) {
uint64_t last;
- last = min((uint64_t)mapping->it.last, start + max_size);
+ last = min((uint64_t)mapping->it.last, start + max_size - 1);
r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
start, last, flags, addr,
fence);
@@ -818,7 +875,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
return r;
start = last + 1;
- addr += max_size;
+ addr += max_size * AMDGPU_GPU_PAGE_SIZE;
}
return 0;
@@ -914,22 +971,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
int r;
- spin_lock(&vm->freed_lock);
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- spin_unlock(&vm->freed_lock);
+
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
0, NULL);
kfree(mapping);
if (r)
return r;
- spin_lock(&vm->freed_lock);
}
- spin_unlock(&vm->freed_lock);
-
return 0;
}
@@ -956,9 +1009,8 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock);
- mutex_lock(&bo_va->mutex);
+
r = amdgpu_vm_bo_update(adev, bo_va, NULL);
- mutex_unlock(&bo_va->mutex);
if (r)
return r;
@@ -1002,7 +1054,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
- mutex_init(&bo_va->mutex);
+
list_add_tail(&bo_va->bo_list, &bo->va);
return bo_va;
@@ -1054,9 +1106,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- spin_unlock(&vm->it_lock);
if (it) {
struct amdgpu_bo_va_mapping *tmp;
tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1080,13 +1130,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- mutex_lock(&bo_va->mutex);
list_add(&mapping->list, &bo_va->invalids);
- mutex_unlock(&bo_va->mutex);
- spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
- trace_amdgpu_vm_bo_map(bo_va, mapping);
/* Make sure the page tables are allocated */
saddr >>= amdgpu_vm_block_size;
@@ -1130,6 +1175,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
+ entry->user_pages = NULL;
vm->page_tables[pt_idx].addr = 0;
}
@@ -1137,9 +1183,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
error_free:
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping);
@@ -1168,7 +1212,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- mutex_lock(&bo_va->mutex);
+
list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr)
break;
@@ -1182,25 +1226,18 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break;
}
- if (&mapping->list == &bo_va->invalids) {
- mutex_unlock(&bo_va->mutex);
+ if (&mapping->list == &bo_va->invalids)
return -ENOENT;
- }
}
- mutex_unlock(&bo_va->mutex);
+
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- if (valid) {
- spin_lock(&vm->freed_lock);
+ if (valid)
list_add(&mapping->list, &vm->freed);
- spin_unlock(&vm->freed_lock);
- } else {
+ else
kfree(mapping);
- }
return 0;
}
@@ -1229,23 +1266,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
- spin_lock(&vm->freed_lock);
list_add(&mapping->list, &vm->freed);
- spin_unlock(&vm->freed_lock);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
- spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va);
- spin_unlock(&vm->it_lock);
kfree(mapping);
}
+
fence_put(bo_va->last_pt_update);
- mutex_destroy(&bo_va->mutex);
kfree(bo_va);
}
@@ -1298,8 +1329,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
- spin_lock_init(&vm->it_lock);
- spin_lock_init(&vm->freed_lock);
+
pd_size = amdgpu_vm_directory_size(adev);
pd_entries = amdgpu_vm_num_pdes(adev);
@@ -1386,6 +1416,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_vm_id *id = &vm->ids[i];
@@ -1410,9 +1441,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
INIT_LIST_HEAD(&adev->vm_manager.ids_lru);
/* skip over VMID 0, since it is the system VM */
- for (i = 1; i < adev->vm_manager.num_ids; ++i)
+ for (i = 1; i < adev->vm_manager.num_ids; ++i) {
+ amdgpu_vm_reset_id(adev, i);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
+ }
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 474ca02b0949..1f9109d3348b 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -3017,7 +3017,6 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
&memory_level->MinVddcPhases);
memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
memory_level->VoltageDownH = 0;
@@ -3376,7 +3375,6 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
- graphic_level->EnabledForActivity = 1;
return 0;
}
@@ -3407,6 +3405,7 @@ static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
PPSMC_DISPLAY_WATERMARK_HIGH;
}
+ pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -3450,6 +3449,8 @@ static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
return ret;
}
+ pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
if ((dpm_table->mclk_table.count >= 2) &&
((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
@@ -4381,26 +4382,6 @@ static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
}
}
}
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_pcie(adev, level);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
} else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
if ((!pi->sclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -5395,30 +5376,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
- if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
-#if 0
- PPSMC_Result result;
-#endif
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
- CISLANDS_TEMP_RANGE_MAX);
- if (ret) {
- DRM_ERROR("ci_thermal_set_temperature_range failed\n");
- return ret;
- }
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
-#if 0
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
-
- if (result != PPSMC_Result_OK)
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
-#endif
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 192ab13e9f05..bddc9ba11495 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2028,8 +2028,6 @@ static int cik_common_early_init(void *handle)
adev->asic_funcs = &cik_asic_funcs;
- adev->has_uvd = true;
-
adev->rev_id = cik_get_rev_id(adev);
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 266db15daf2c..d3ac3298fba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -261,6 +261,13 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
*
@@ -636,8 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -663,7 +669,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -816,6 +823,30 @@ static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
}
/**
+ * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
+ SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
+ SDMA_POLL_REG_MEM_EXTRA_M));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
+}
+
+/**
* cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1270,8 +1301,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.parse_cs = NULL,
.emit_ib = cik_sdma_ring_emit_ib,
.emit_fence = cik_sdma_ring_emit_fence,
+ .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e3ff809a0cae..6de2ce535e37 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1668,6 +1668,9 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1973,7 +1976,7 @@ static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v10_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1986,8 +1989,16 @@ static void dce_v10_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
@@ -2064,8 +2075,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2079,9 +2089,9 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2700,13 +2710,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v10_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v10_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2980,8 +2990,6 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- adev->mode_info.mode_config_initialized = true;
-
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.max_width = 16384;
@@ -3012,7 +3020,9 @@ static int dce_v10_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v10_0_afmt_init(adev);
+ r = dce_v10_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v10_0_audio_init(adev);
if (r)
@@ -3020,7 +3030,8 @@ static int dce_v10_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v10_0_sw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 6b6c9b6879ae..e9ccc6b787f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1658,6 +1658,9 @@ static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1963,7 +1966,7 @@ static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1976,8 +1979,16 @@ static void dce_v11_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
@@ -2054,8 +2065,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2069,9 +2079,9 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2691,13 +2701,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v11_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v11_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2961,7 +2971,7 @@ static int dce_v11_0_sw_init(void *handle)
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
if (r)
- return r;
+ return r;
}
for (i = 8; i < 20; i += 2) {
@@ -2973,9 +2983,7 @@ static int dce_v11_0_sw_init(void *handle)
/* HPD hotplug */
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
if (r)
- return r;
-
- adev->mode_info.mode_config_initialized = true;
+ return r;
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
@@ -2994,6 +3002,7 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
+
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
r = dce_v11_0_crtc_init(adev, i);
@@ -3007,7 +3016,9 @@ static int dce_v11_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v11_0_afmt_init(adev);
+ r = dce_v11_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v11_0_audio_init(adev);
if (r)
@@ -3015,7 +3026,8 @@ static int dce_v11_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v11_0_sw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 56bea36a6b18..e56b55d8c280 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1639,6 +1639,9 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
int i;
+ if (!amdgpu_audio)
+ return;
+
if (!adev->mode_info.audio.enabled)
return;
@@ -1910,7 +1913,7 @@ static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
{
int i;
@@ -1923,8 +1926,16 @@ static void dce_v8_0_afmt_init(struct amdgpu_device *adev)
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ int j;
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
@@ -2001,8 +2012,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (atomic) {
amdgpu_fb = to_amdgpu_framebuffer(fb);
target_fb = fb;
- }
- else {
+ } else {
amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
}
@@ -2016,9 +2026,9 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
if (unlikely(r != 0))
return r;
- if (atomic)
+ if (atomic) {
fb_location = amdgpu_bo_gpu_offset(rbo);
- else {
+ } else {
r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(rbo);
@@ -2612,13 +2622,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_on(dev, amdgpu_crtc->crtc_id);
dce_v8_0_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id);
+ drm_vblank_off(dev, amdgpu_crtc->crtc_id);
if (amdgpu_crtc->enabled) {
dce_v8_0_vga_enable(crtc, true);
amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -2890,8 +2900,6 @@ static int dce_v8_0_sw_init(void *handle)
if (r)
return r;
- adev->mode_info.mode_config_initialized = true;
-
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.max_width = 16384;
@@ -2922,7 +2930,9 @@ static int dce_v8_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v8_0_afmt_init(adev);
+ r = dce_v8_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v8_0_audio_init(adev);
if (r)
@@ -2930,7 +2940,8 @@ static int dce_v8_0_sw_init(void *handle)
drm_kms_helper_poll_init(adev->ddev);
- return r;
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
}
static int dce_v8_0_sw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4411b94775db..bb8709066fd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1925,6 +1925,25 @@ static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
/**
+ * gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
+ *
+ * @adev: amdgpu_device pointer
+ * @ridx: amdgpu ring index
+ *
+ * Emits an hdp invalidate on the cp.
+ */
+static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) |
+ WR_CONFIRM));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1);
+}
+
+/**
* gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
*
* @adev: amdgpu_device pointer
@@ -2117,8 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -2145,7 +2163,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
err2:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -3023,6 +3042,26 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
+/**
+ * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ *
+ * @ring: the ring to emmit the commands to
+ *
+ * Sync the command pipeline with the PFP. E.g. wait for everything
+ * to be completed.
+ */
+static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+}
+
/*
* vm
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3054,14 +3093,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
- if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
- amdgpu_ring_write(ring, 0);
- }
-
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
@@ -5142,9 +5173,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
+ .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -5158,9 +5191,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.parse_cs = NULL,
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
+ .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1b85c001f860..f0c7b3596480 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -706,8 +706,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err2;
@@ -733,7 +732,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
@@ -1262,8 +1262,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
@@ -1291,7 +1290,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
fail:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
return r;
}
@@ -4589,6 +4589,18 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0x20); /* poll interval */
}
+static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) |
+ WR_CONFIRM));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 1);
+
+}
+
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
@@ -4682,8 +4694,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
}
-static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
@@ -4706,6 +4717,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
+}
+
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
+{
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -5028,9 +5045,11 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
+ .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -5044,9 +5063,11 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.parse_cs = NULL,
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
+ .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 711840a23bd3..05b0353d3880 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -339,7 +339,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
tmp = RREG32(mmHDP_MISC_CNTL);
- tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
WREG32(mmHDP_MISC_CNTL, tmp);
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle)
gmc_v7_0_set_gart_funcs(adev);
gmc_v7_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -927,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle)
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 757803ae7c4a..02deb3229405 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -386,7 +386,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
tmp = RREG32(mmHDP_MISC_CNTL);
- tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0);
WREG32(mmHDP_MISC_CNTL, tmp);
tmp = RREG32(mmHDP_HOST_PATH_CNTL);
@@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle)
gmc_v8_0_set_gart_funcs(adev);
gmc_v8_0_set_irq_funcs(adev);
- if (adev->flags & AMD_IS_APU) {
- adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- } else {
- u32 tmp = RREG32(mmMC_SEQ_MISC0);
- tmp &= MC_SEQ_MISC0__MT__MASK;
- adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
- }
-
return 0;
}
@@ -881,12 +873,27 @@ static int gmc_v8_0_late_init(void *handle)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
+#define mmMC_SEQ_MISC0_FIJI 0xA71
+
static int gmc_v8_0_sw_init(void *handle)
{
int r;
int dma_bits;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->flags & AMD_IS_APU) {
+ adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ } else {
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_FIJI)
+ tmp = RREG32(mmMC_SEQ_MISC0_FIJI);
+ else
+ tmp = RREG32(mmMC_SEQ_MISC0);
+ tmp &= MC_SEQ_MISC0__MT__MASK;
+ adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
+ }
+
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index dddb8d6a81f3..6e0a86a563f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -32,8 +32,8 @@
#include "oss/oss_2_4_d.h"
#include "oss/oss_2_4_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
@@ -300,6 +300,13 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
/**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
*
@@ -694,8 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -721,7 +727,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -874,6 +881,31 @@ static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
}
/**
+ * sdma_v2_4_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
* sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1274,8 +1306,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.parse_cs = NULL,
.emit_ib = sdma_v2_4_ring_emit_ib,
.emit_fence = sdma_v2_4_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
.test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 19e02f7a06f3..8c8ca98dd129 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -410,6 +410,14 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
+static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
+ SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
+ amdgpu_ring_write(ring, mmHDP_DEBUG0);
+ amdgpu_ring_write(ring, 1);
+}
+
/**
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
*
@@ -845,8 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED,
- NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
goto err1;
@@ -871,7 +878,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
}
err1:
fence_put(f);
- amdgpu_ib_free(adev, &ib);
+ amdgpu_ib_free(adev, &ib, NULL);
+ fence_put(f);
err0:
amdgpu_wb_free(adev, index);
return r;
@@ -1024,6 +1032,31 @@ static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib
}
/**
+ * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Make sure all previous operations are completed (CIK).
+ */
+static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+}
+
+/**
* sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
@@ -1541,8 +1574,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.parse_cs = NULL,
.emit_ib = sdma_v3_0_ring_emit_ib,
.emit_fence = sdma_v3_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
+ .emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b6f7d7bff929..0f14199cf716 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -307,7 +307,7 @@ static int tonga_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
- amdgpu_irq_add_domain(adev);
+ amdgpu_irq_remove_domain(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index c606ccb38d8b..cb463753115b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
- r = uvd_v4_2_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e3c852d9d79a..16476d80f475 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
- r = uvd_v5_0_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3375e614ac67..d49379145ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ r = uvd_v6_0_hw_fini(adev);
+ if (r)
+ return r;
+
/* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_uvd_suspend(adev);
if (r)
return r;
}
- r = uvd_v6_0_hw_fini(adev);
- if (r)
- return r;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b72cf063df1a..1c120efa292c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1071,26 +1071,22 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_TOPAZ:
- adev->has_uvd = false;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
case CHIP_FIJI:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
- adev->has_uvd = true;
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index d2b49c026cf6..07ac724e3ec9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -107,7 +107,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (iminor(inode) != 0)
return -ENODEV;
- is_32bit_user_mode = is_compat_task();
+ is_32bit_user_mode = in_compat_syscall();
if (is_32bit_user_mode == true) {
dev_warn(kfd_device,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c34c393e9aea..d5e19b5fbbfb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
union SQ_CMD_BITS *in_reg_sq_cmd,
union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
{
- int status;
+ int status = 0;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct HsaDbgWaveMsgAMDGen2 *pMsg;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a902ae037398..ac005796b71c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -311,7 +311,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
goto err_process_pqm_init;
/* init process apertures*/
- process->is_32bit_user_mode = is_compat_task();
+ process->is_32bit_user_mode = in_compat_syscall();
if (kfd_init_apertures(process) != 0)
goto err_init_apretures;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
index 9d4347dd6125..dfe78799100d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_7_2_enum.h
@@ -6225,6 +6225,12 @@ typedef enum TCC_CACHE_POLICIES {
TCC_CACHE_POLICY_STREAM = 0x1,
TCC_CACHE_POLICY_BYPASS = 0x2,
} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+ MTYPE_NC_NV = 0x0,
+ MTYPE_NC = 0x1,
+ MTYPE_CC = 0x2,
+ MTYPE_UC = 0x3,
+} MTYPE;
typedef enum PERFMON_COUNTER_MODE {
PERFMON_COUNTER_MODE_ACCUM = 0x0,
PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index aec38fc3834f..ab84d4947247 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)(
void *cgs_device,
struct cgs_display_info *info);
+typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+
typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
@@ -644,6 +646,8 @@ struct cgs_ops {
cgs_set_clockgating_state set_clockgating_state;
/* display manager */
cgs_get_active_displays_info get_active_displays_info;
+ /* notify dpm enabled */
+ cgs_notify_dpm_enabled notify_dpm_enabled;
/* ACPI */
cgs_call_acpi_method call_acpi_method;
/* get system info */
@@ -734,8 +738,12 @@ struct cgs_device
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
CGS_CALL(set_clockgating_state, dev, block_type, state)
+#define cgs_notify_dpm_enabled(dev, enabled) \
+ CGS_CALL(notify_dpm_enabled, dev, enabled)
+
#define cgs_get_active_displays_info(dev, info) \
CGS_CALL(get_active_displays_info, dev, info)
+
#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
#define cgs_query_system_info(dev, sys_info) \
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index e195bf59da86..043e6ebab575 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,17 +1,17 @@
subdir-ccflags-y += -Iinclude/drm \
- -Idrivers/gpu/drm/amd/powerplay/inc/ \
- -Idrivers/gpu/drm/amd/include/asic_reg \
- -Idrivers/gpu/drm/amd/include \
- -Idrivers/gpu/drm/amd/powerplay/smumgr\
- -Idrivers/gpu/drm/amd/powerplay/hwmgr \
- -Idrivers/gpu/drm/amd/powerplay/eventmgr
+ -I$(FULL_AMD_PATH)/powerplay/inc/ \
+ -I$(FULL_AMD_PATH)/include/asic_reg \
+ -I$(FULL_AMD_PATH)/include \
+ -I$(FULL_AMD_PATH)/powerplay/smumgr\
+ -I$(FULL_AMD_PATH)/powerplay/hwmgr \
+ -I$(FULL_AMD_PATH)/powerplay/eventmgr
AMD_PP_PATH = ../powerplay
PP_LIBS = smumgr hwmgr eventmgr
-AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
+AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
include $(AMD_POWERPLAY)
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 6b52c78cb404..56856a2864d1 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = {
reset_display_configCounter_tasks,
update_dal_configuration_tasks,
vari_bright_resume_tasks,
- block_adjust_power_state_tasks,
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
initialize_thermal_controller_tasks,
- reset_boot_state_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
notify_hw_power_source_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index b8d6a82c1be2..5682490337e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -241,6 +241,11 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DynamicUVDState);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEDPM);
+
cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
@@ -744,8 +749,9 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
clock = hwmgr->display_config.min_core_set_clock;
+;
if (clock == 0)
- printk(KERN_ERR "[ powerplay ] min_core_set_clock not set\n");
+ printk(KERN_INFO "[ powerplay ] min_core_set_clock not set\n");
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 5cca2ecc6bea..89f31bc5b68b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
for(count = 0; count < table->VceLevelCount; count++) {
table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
table->VceLevel[count].MinVoltage |=
(mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
table->VceLevel[count].MinVoltage |=
@@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
for (count = 0; count < table->SamuLevelCount; count++) {
/* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
VOLTAGE_SCALE) << VDDC_SHIFT;
@@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
table->UvdBootLevel = 0;
for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
@@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
fiji_populate_smc_voltage_tables(hwmgr, table);
+ table->SystemFlags = 0;
+
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition))
table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
@@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
result = fiji_populate_vr_config(hwmgr, table);
PP_ASSERT_WITH_CODE(0 == result,
@@ -4275,7 +4281,6 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
dpm_table->mclk_table.dpm_levels
[dpm_table->mclk_table.count - 1].value = mclk;
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_OD6PlusinACSupport) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -4886,6 +4891,10 @@ static void fiji_print_current_perforce_level(
activity_percent >>= 8;
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+ seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+ seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
}
static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
@@ -5192,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
+ const struct fiji_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
+ const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
+ int i;
+
+ if (equal == NULL || psa == NULL || psb == NULL)
+ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+ *equal &= (psa->acp_clk == psb->acp_clk);
+
+ return 0;
+}
+
+bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0,0,NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+ if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+ if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+ is_update_required = true;
+*/
+ return is_update_required;
+}
+
+
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
.backend_fini = &tonga_hwmgr_backend_fini,
@@ -5227,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
.set_fan_control_mode = fiji_set_fan_control_mode,
.get_fan_control_mode = fiji_get_fan_control_mode,
+ .check_states_equal = fiji_check_states_equal,
+ .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
.get_pp_table = fiji_get_pp_table,
.set_pp_table = fiji_set_pp_table,
.force_clock_level = fiji_force_clock_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 22e273b1c1c5..a16f7cd4c238 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -29,6 +29,7 @@
#include "smu73_discrete.h"
#include "ppatomctrl.h"
#include "fiji_ppsmc.h"
+#include "pp_endian.h"
#define FIJI_MAX_HARDWARE_POWERLEVELS 2
#define FIJI_AT_DFLT 30
@@ -347,15 +348,4 @@ int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 9deadabbc81c..72cfecc4f9f7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -34,6 +34,11 @@ static int phm_run_table(struct pp_hwmgr *hwmgr,
int result = 0;
phm_table_function *function;
+ if (rt_table->function_list == NULL) {
+ printk(KERN_INFO "[ powerplay ] this function not implement!\n");
+ return 0;
+ }
+
for (function = rt_table->function_list; NULL != *function; function++) {
int tmp = (*function)(hwmgr, input, output, temp_storage, result);
@@ -57,9 +62,9 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
int result = 0;
void *temp_storage = NULL;
- if (hwmgr == NULL || rt_table == NULL || rt_table->function_list == NULL) {
+ if (hwmgr == NULL || rt_table == NULL) {
printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
- return 0; /*temp return ture because some function not implement on some asic */
+ return -EINVAL;
}
if (0 != rt_table->storage_size) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index be31bed2538a..fa208ada6892 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
+
if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) &&
acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION))
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
@@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
+ int ret = 1;
+ bool enabled;
PHM_FUNC_CHECK(hwmgr);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface)) {
if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
} else {
- return phm_dispatch_table(hwmgr,
+ ret = phm_dispatch_table(hwmgr,
&(hwmgr->enable_dynamic_state_management),
NULL, NULL);
}
- return 0;
+
+ enabled = ret == 0 ? true : false;
+
+ cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+ return ret;
}
int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index aec4f8346d9c..0d5d8372953e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5185,7 +5185,6 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n", mclk/100, sclk/100);
-
offset = data->soft_regs_start + offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
activity_percent += 0x80;
@@ -5193,6 +5192,9 @@ tonga_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+ seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+ seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
}
static int tonga_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 49168d262ccc..f88d3bbe6671 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -28,6 +28,7 @@
#include "ppatomctrl.h"
#include "ppinterrupt.h"
#include "tonga_powertune.h"
+#include "pp_endian.h"
#define TONGA_MAX_HARDWARE_POWERLEVELS 2
#define TONGA_DYNCLK_NUMBER_OF_TREND_COEFFICIENTS 15
@@ -386,17 +387,6 @@ typedef struct tonga_hwmgr tonga_hwmgr;
#define TONGA_UNUSED_GPIO_PIN 0x7F
-#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
-#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
-
-#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
-#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
-
-#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
-#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
-
-#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
-
int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
int tonga_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
int tonga_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 34f4bef3691f..b156481b50e8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -512,8 +512,10 @@ static int get_cac_tdp_table(
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
- if (NULL == hwmgr->dyn_state.cac_dtp_table)
+ if (NULL == hwmgr->dyn_state.cac_dtp_table) {
+ kfree(tdp_table);
return -ENOMEM;
+ }
memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
new file mode 100644
index 000000000000..f49d1963fe85
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_endian.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _PP_ENDIAN_H_
+#define _PP_ENDIAN_H_
+
+#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X)
+#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X)
+
+#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X)
+#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X)
+
+#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X))
+#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X))
+
+#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X))
+
+#endif /* _PP_ENDIAN_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 504f035d1843..fc9e3d1dd409 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -32,6 +32,27 @@ struct pp_instance;
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
+enum AVFS_BTC_STATUS {
+ AVFS_BTC_BOOT = 0,
+ AVFS_BTC_BOOT_STARTEDSMU,
+ AVFS_LOAD_VIRUS,
+ AVFS_BTC_VIRUS_LOADED,
+ AVFS_BTC_VIRUS_FAIL,
+ AVFS_BTC_COMPLETED_PREVIOUSLY,
+ AVFS_BTC_ENABLEAVFS,
+ AVFS_BTC_STARTED,
+ AVFS_BTC_FAILED,
+ AVFS_BTC_RESTOREVFT_FAILED,
+ AVFS_BTC_SAVEVFT_FAILED,
+ AVFS_BTC_DPMTABLESETUP_FAILED,
+ AVFS_BTC_COMPLETED_UNSAVED,
+ AVFS_BTC_COMPLETED_SAVED,
+ AVFS_BTC_COMPLETED_RESTORED,
+ AVFS_BTC_DISABLED,
+ AVFS_BTC_NOTSUPPORTED,
+ AVFS_BTC_SMUMSG_ERROR
+};
+
struct pp_smumgr_func {
int (*smu_init)(struct pp_smumgr *smumgr);
int (*smu_fini)(struct pp_smumgr *smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
index 8cd22d9c9140..b4eb483215b1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h
@@ -23,24 +23,6 @@
#ifndef _FIJI_SMUMANAGER_H_
#define _FIJI_SMUMANAGER_H_
-enum AVFS_BTC_STATUS {
- AVFS_BTC_BOOT = 0,
- AVFS_BTC_BOOT_STARTEDSMU,
- AVFS_LOAD_VIRUS,
- AVFS_BTC_VIRUS_LOADED,
- AVFS_BTC_VIRUS_FAIL,
- AVFS_BTC_STARTED,
- AVFS_BTC_FAILED,
- AVFS_BTC_RESTOREVFT_FAILED,
- AVFS_BTC_SAVEVFT_FAILED,
- AVFS_BTC_DPMTABLESETUP_FAILED,
- AVFS_BTC_COMPLETED_UNSAVED,
- AVFS_BTC_COMPLETED_SAVED,
- AVFS_BTC_COMPLETED_RESTORED,
- AVFS_BTC_DISABLED,
- AVFS_BTC_NOTSUPPORTED,
- AVFS_BTC_SMUMSG_ERROR
-};
struct fiji_smu_avfs {
enum AVFS_BTC_STATUS AvfsBtcStatus;
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 87c78eecea64..dc115aea352b 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -84,12 +84,33 @@ static bool amd_sched_fence_enable_signaling(struct fence *f)
return true;
}
-static void amd_sched_fence_release(struct fence *f)
+/**
+ * amd_sched_fence_free - free up the fence memory
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the fence memory after the RCU grace period.
+ */
+static void amd_sched_fence_free(struct rcu_head *rcu)
{
+ struct fence *f = container_of(rcu, struct fence, rcu);
struct amd_sched_fence *fence = to_amd_sched_fence(f);
kmem_cache_free(sched_fence_slab, fence);
}
+/**
+ * amd_sched_fence_release - callback that fence can be freed
+ *
+ * @fence: fence
+ *
+ * This function is called when the reference count becomes zero.
+ * It just RCU schedules freeing up the fence.
+ */
+static void amd_sched_fence_release(struct fence *f)
+{
+ call_rcu(&f->rcu, amd_sched_fence_free);
+}
+
const struct fence_ops amd_sched_fence_ops = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 56b829f97699..3ac1ae4d8caf 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
DRM_ERROR("failed to map control registers area\n");
ret = PTR_ERR(hdlcd->mmio);
hdlcd->mmio = NULL;
- goto fail;
+ return ret;
}
version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
DRM_ERROR("unknown product id: 0x%x\n", version);
- ret = -EINVAL;
- goto fail;
+ return -EINVAL;
}
DRM_INFO("found ARM HDLCD version r%dp%d\n",
(version & HDLCD_VERSION_MAJOR_MASK) >> 8,
@@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
- goto fail;
+ return ret;
ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
if (ret)
@@ -101,8 +100,6 @@ irq_fail:
drm_crtc_cleanup(&hdlcd->crtc);
setup_fail:
of_reserved_mem_device_release(drm->dev);
-fail:
- devm_clk_put(drm->dev, hdlcd->clk);
return ret;
}
@@ -412,7 +409,6 @@ err_unload:
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
- devm_clk_put(dev, hdlcd->clk);
err_free:
drm_dev_unref(drm);
@@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
- if (!IS_ERR(hdlcd->clk)) {
- devm_clk_put(drm->dev, hdlcd->clk);
- hdlcd->clk = NULL;
- }
drm_mode_config_cleanup(drm);
drm_dev_unregister(drm);
drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6e731db31aa4..aca7f9cc6109 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
release:
for_each_sg(sgt->sgl, sg, num, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
@@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
- page_cache_release(sg_page(sg));
+ put_page(sg_page(sg));
}
sg_free_table(sgt);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index f221e2dc1b0d..a965e7e8ad6e 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -497,13 +497,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/* ast is different - we will force move buffers out of VRAM */
static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -617,7 +610,6 @@ static void ast_crtc_commit(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.dpms = ast_crtc_dpms,
- .mode_fixup = ast_crtc_mode_fixup,
.mode_set = ast_crtc_mode_set,
.mode_set_base = ast_crtc_mode_set_base,
.disable = ast_crtc_disable,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9863291a9a54..58c4f785cf84 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -121,13 +121,6 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
cfg);
}
-static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
{
struct drm_device *dev = c->dev;
@@ -261,7 +254,6 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
- .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
@@ -349,4 +341,3 @@ fail:
atmel_hlcdc_crtc_destroy(&crtc->base);
return ret;
}
-
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 1ffe9c329c46..d65dcaee3832 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -558,7 +558,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!state->base.crtc || !fb)
return 0;
- crtc_state = s->state->crtc_states[drm_crtc_index(s->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
mode = &crtc_state->adjusted_mode;
state->src_x = s->src_x;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 7f1a3604b19f..b332b4d3b0e2 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -182,8 +182,8 @@ static const struct pci_device_id bochs_pci_tbl[] = {
{
.vendor = 0x1234,
.device = 0x1111,
- .subvendor = 0x1af4,
- .subdevice = 0x1100,
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
.driver_data = BOCHS_QEMU_STDVGA,
},
{
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 317c27f2a50b..96926f09e0c9 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -30,13 +30,6 @@ static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
-static bool bochs_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -135,7 +128,6 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = {
static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
.dpms = bochs_crtc_dpms,
- .mode_fixup = bochs_crtc_mode_fixup,
.mode_set = bochs_crtc_mode_set,
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index b1619e29a564..7bc394ec9fb3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -33,8 +33,9 @@ static struct drm_driver driver;
/* only bind to the cirrus chip in qemu */
static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
- 0, 0 },
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET, PCI_SUBDEVICE_ID_QEMU,
+ 0, 0, 0 },
{ PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
0x0001, 0, 0, 0 },
{0,}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 432ce9440e09..d3d8d7bfcc57 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -91,18 +91,6 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG_GFX(0xe, gr0e);
}
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
{
struct cirrus_device *cdev = crtc->dev->dev_private;
@@ -372,7 +360,6 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = {
static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
.dpms = cirrus_crtc_dpms,
- .mode_fixup = cirrus_crtc_mode_fixup,
.mode_set = cirrus_crtc_mode_set,
.mode_set_base = cirrus_crtc_mode_set_base,
.prepare = cirrus_crtc_prepare,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 092620c6ff32..8ee1db866e80 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
/**
@@ -376,6 +377,58 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
/**
+ * drm_atomic_replace_property_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ * @replaced: whether the blob has been replaced
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static void
+drm_atomic_replace_property_blob(struct drm_property_blob **blob,
+ struct drm_property_blob *new_blob,
+ bool *replaced)
+{
+ struct drm_property_blob *old_blob = *blob;
+
+ if (old_blob == new_blob)
+ return;
+
+ if (old_blob)
+ drm_property_unreference_blob(old_blob);
+ if (new_blob)
+ drm_property_reference_blob(new_blob);
+ *blob = new_blob;
+ *replaced = true;
+
+ return;
+}
+
+static int
+drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ bool *replaced)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property_blob *new_blob = NULL;
+
+ if (blob_id != 0) {
+ new_blob = drm_property_lookup_blob(dev, blob_id);
+ if (new_blob == NULL)
+ return -EINVAL;
+ if (expected_size > 0 && expected_size != new_blob->length)
+ return -EINVAL;
+ }
+
+ drm_atomic_replace_property_blob(blob, new_blob, replaced);
+
+ return 0;
+}
+
+/**
* drm_atomic_crtc_set_property - set property on CRTC
* @crtc: the drm CRTC to set a property on
* @state: the state object to update with the new property value
@@ -397,6 +450,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
int ret;
if (property == config->prop_active)
@@ -407,8 +461,31 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_unreference_blob(mode);
return ret;
- }
- else if (crtc->funcs->atomic_set_property)
+ } else if (property == config->degamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->degamma_lut,
+ val,
+ -1,
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (property == config->ctm_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->ctm,
+ val,
+ sizeof(struct drm_color_ctm),
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (property == config->gamma_lut_property) {
+ ret = drm_atomic_replace_property_blob_from_id(crtc,
+ &state->gamma_lut,
+ val,
+ -1,
+ &replaced);
+ state->color_mgmt_changed = replaced;
+ return ret;
+ } else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
else
return -EINVAL;
@@ -444,6 +521,12 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->degamma_lut_property)
+ *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
+ else if (property == config->ctm_property)
+ *val = (state->ctm) ? state->ctm->base.id : 0;
+ else if (property == config->gamma_lut_property)
+ *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4da4f2a49078..4befe25c81c7 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ plane->state->crtc);
if (WARN_ON(!crtc_state))
return;
@@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
if (plane_state->crtc) {
- crtc_state =
- state->crtc_states[drm_crtc_index(plane_state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ plane_state->crtc);
if (WARN_ON(!crtc_state))
return;
@@ -86,43 +87,104 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
-static bool
-check_pending_encoder_assignment(struct drm_atomic_state *state,
- struct drm_encoder *new_encoder)
+static int handle_conflicting_encoders(struct drm_atomic_state *state,
+ bool disable_conflicting_encoders)
{
- struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int i;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ unsigned encoder_mask = 0;
+ int i, ret;
+ /*
+ * First loop, find all newly assigned encoders from the connectors
+ * part of the state. If the same encoder is assigned to multiple
+ * connectors bail out.
+ */
for_each_connector_in_state(state, connector, conn_state, i) {
- if (conn_state->best_encoder != new_encoder)
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+ struct drm_encoder *new_encoder;
+
+ if (!conn_state->crtc)
continue;
- /* encoder already assigned and we're trying to re-steal it! */
- if (connector->state->best_encoder != conn_state->best_encoder)
- return false;
+ if (funcs->atomic_best_encoder)
+ new_encoder = funcs->atomic_best_encoder(connector, conn_state);
+ else
+ new_encoder = funcs->best_encoder(connector);
+
+ if (new_encoder) {
+ if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
+ new_encoder->base.id, new_encoder->name,
+ connector->base.id, connector->name);
+
+ return -EINVAL;
+ }
+
+ encoder_mask |= 1 << drm_encoder_index(new_encoder);
+ }
}
- return true;
-}
+ if (!encoder_mask)
+ return 0;
-static struct drm_crtc *
-get_current_crtc_for_encoder(struct drm_device *dev,
- struct drm_encoder *encoder)
-{
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_connector *connector;
+ /*
+ * Second loop, iterate over all connectors not part of the state.
+ *
+ * If a conflicting encoder is found and disable_conflicting_encoders
+ * is not set, an error is returned. Userspace can provide a solution
+ * through the atomic ioctl.
+ *
+ * If the flag is set conflicting connectors are removed from the crtc
+ * and the crtc is disabled if no encoder is left. This preserves
+ * compatibility with the legacy set_config behavior.
+ */
+ drm_for_each_connector(connector, state->dev) {
+ struct drm_crtc_state *crtc_state;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ if (drm_atomic_get_existing_connector_state(state, connector))
+ continue;
- drm_for_each_connector(connector, dev) {
- if (connector->state->best_encoder != encoder)
+ encoder = connector->state->best_encoder;
+ if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
continue;
- return connector->state->crtc;
+ if (!disable_conflicting_encoders) {
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ conn_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
+ encoder->base.id, encoder->name,
+ conn_state->crtc->base.id, conn_state->crtc->name,
+ connector->base.id, connector->name);
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ if (ret)
+ return ret;
+
+ if (!crtc_state->connector_mask) {
+ ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
+ NULL);
+ if (ret < 0)
+ return ret;
+
+ crtc_state->active = false;
+ }
}
- return NULL;
+ return 0;
}
static void
@@ -166,70 +228,44 @@ set_best_encoder(struct drm_atomic_state *state,
conn_state->best_encoder = encoder;
}
-static int
+static void
steal_encoder(struct drm_atomic_state *state,
- struct drm_encoder *encoder,
- struct drm_crtc *encoder_crtc)
+ struct drm_encoder *encoder)
{
- struct drm_mode_config *config = &state->dev->mode_config;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
+ int i;
- /*
- * We can only steal an encoder coming from a connector, which means we
- * must already hold the connection_mutex.
- */
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
- encoder->base.id, encoder->name,
- encoder_crtc->base.id, encoder_crtc->name);
-
- crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
- crtc_state->connectors_changed = true;
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ struct drm_crtc *encoder_crtc;
- list_for_each_entry(connector, &config->connector_list, head) {
- if (connector->state->best_encoder != encoder)
+ if (connector_state->best_encoder != encoder)
continue;
- DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
+ encoder_crtc = connector->state->crtc;
- connector_state = drm_atomic_get_connector_state(state,
- connector);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
-
- if (connector_state->best_encoder != encoder)
- continue;
+ DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
+ encoder->base.id, encoder->name,
+ encoder_crtc->base.id, encoder_crtc->name);
set_best_encoder(state, connector_state, NULL);
- }
- return 0;
+ crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
+ crtc_state->connectors_changed = true;
+
+ return;
+ }
}
static int
-update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+update_connector_routing(struct drm_atomic_state *state,
+ struct drm_connector *connector,
+ struct drm_connector_state *connector_state)
{
const struct drm_connector_helper_funcs *funcs;
struct drm_encoder *new_encoder;
- struct drm_crtc *encoder_crtc;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
struct drm_crtc_state *crtc_state;
- int idx, ret;
-
- connector = state->connectors[conn_idx];
- connector_state = state->connector_states[conn_idx];
-
- if (!connector)
- return 0;
DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
connector->base.id,
@@ -237,16 +273,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
if (connector->state->crtc != connector_state->crtc) {
if (connector->state->crtc) {
- idx = drm_crtc_index(connector->state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
crtc_state->connectors_changed = true;
}
if (connector_state->crtc) {
- idx = drm_crtc_index(connector_state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
}
}
@@ -298,34 +330,11 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
return 0;
}
- if (!check_pending_encoder_assignment(state, new_encoder)) {
- DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
- connector->base.id,
- connector->name);
- return -EINVAL;
- }
-
- encoder_crtc = get_current_crtc_for_encoder(state->dev,
- new_encoder);
-
- if (encoder_crtc) {
- ret = steal_encoder(state, new_encoder, encoder_crtc);
- if (ret) {
- DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
- return ret;
- }
- }
-
- if (WARN_ON(!connector_state->crtc))
- return -EINVAL;
+ steal_encoder(state, new_encoder);
set_best_encoder(state, connector_state, new_encoder);
- idx = drm_crtc_index(connector_state->crtc);
-
- crtc_state = state->crtc_states[idx];
+ crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
crtc_state->connectors_changed = true;
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
@@ -366,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
if (!conn_state->crtc || !conn_state->best_encoder)
continue;
- crtc_state =
- state->crtc_states[drm_crtc_index(conn_state->crtc)];
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ conn_state->crtc);
/*
* Each encoder has at most one connector (since we always steal
@@ -488,13 +497,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
}
+ ret = handle_conflicting_encoders(state, state->legacy_set_config);
+ if (ret)
+ return ret;
+
for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
* properties need to be updated.
*/
- ret = update_connector_routing(state, i);
+ ret = update_connector_routing(state, connector,
+ connector_state);
if (ret)
return ret;
}
@@ -666,7 +680,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state->crtc)
continue;
- old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
+ old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
+ old_conn_state->crtc);
if (!old_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
@@ -1761,28 +1776,18 @@ static int update_output_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
struct drm_connector_state *conn_state;
- int ret, i, j;
+ int ret, i;
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
state->acquire_ctx);
if (ret)
return ret;
- /* First grab all affected connector/crtc states. */
- for (i = 0; i < set->num_connectors; i++) {
- conn_state = drm_atomic_get_connector_state(state,
- set->connectors[i]);
- if (IS_ERR(conn_state))
- return PTR_ERR(conn_state);
- }
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- return ret;
- }
+ /* First disable all connectors on the target crtc. */
+ ret = drm_atomic_add_affected_connectors(state, set->crtc);
+ if (ret)
+ return ret;
- /* Then recompute connector->crtc links and crtc enabling state. */
for_each_connector_in_state(state, connector, conn_state, i) {
if (conn_state->crtc == set->crtc) {
ret = drm_atomic_set_crtc_for_connector(conn_state,
@@ -1790,16 +1795,19 @@ static int update_output_state(struct drm_atomic_state *state,
if (ret)
return ret;
}
+ }
- for (j = 0; j < set->num_connectors; j++) {
- if (set->connectors[j] == connector) {
- ret = drm_atomic_set_crtc_for_connector(conn_state,
- set->crtc);
- if (ret)
- return ret;
- break;
- }
- }
+ /* Then set all connectors from set->connectors on the target crtc */
+ for (i = 0; i < set->num_connectors; i++) {
+ conn_state = drm_atomic_get_connector_state(state,
+ set->connectors[i]);
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state,
+ set->crtc);
+ if (ret)
+ return ret;
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
@@ -1842,6 +1850,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
if (!state)
return -ENOMEM;
+ state->legacy_set_config = true;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
ret = __drm_atomic_helper_set_config(set, state);
@@ -2488,8 +2497,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
- if (crtc->state)
+ if (crtc->state) {
drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_unreference_blob(crtc->state->degamma_lut);
+ drm_property_unreference_blob(crtc->state->ctm);
+ drm_property_unreference_blob(crtc->state->gamma_lut);
+ }
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
@@ -2513,10 +2526,17 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
if (state->mode_blob)
drm_property_reference_blob(state->mode_blob);
+ if (state->degamma_lut)
+ drm_property_reference_blob(state->degamma_lut);
+ if (state->ctm)
+ drm_property_reference_blob(state->ctm);
+ if (state->gamma_lut)
+ drm_property_reference_blob(state->gamma_lut);
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->connectors_changed = false;
+ state->color_mgmt_changed = false;
state->event = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2557,6 +2577,9 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
drm_property_unreference_blob(state->mode_blob);
+ drm_property_unreference_blob(state->degamma_lut);
+ drm_property_unreference_blob(state->ctm);
+ drm_property_unreference_blob(state->gamma_lut);
}
EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
@@ -2870,3 +2893,98 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @start:
+ * @size: size of the tables
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that support color management through the DEGAMMA_LUT/GAMMA_LUT
+ * properties.
+ */
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue,
+ uint32_t start, uint32_t size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_property_blob *blob = NULL;
+ struct drm_color_lut *blob_data;
+ int i, ret = 0;
+
+ state = drm_atomic_state_alloc(crtc->dev);
+ if (!state)
+ return;
+
+ blob = drm_property_create_blob(dev,
+ sizeof(struct drm_color_lut) * size,
+ NULL);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ blob = NULL;
+ goto fail;
+ }
+
+ /* Prepare GAMMA_LUT with the legacy values. */
+ blob_data = (struct drm_color_lut *) blob->data;
+ for (i = 0; i < size; i++) {
+ blob_data[i].red = red[i];
+ blob_data[i].green = green[i];
+ blob_data[i].blue = blue[i];
+ }
+
+ state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ /* Reset DEGAMMA_LUT and CTM properties. */
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->degamma_lut_property, 0);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->ctm_property, 0);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+ config->gamma_lut_property, blob->base.id);
+ if (ret)
+ goto fail;
+
+ ret = drm_atomic_commit(state);
+ if (ret)
+ goto fail;
+
+ /* Driver takes ownership of state on successful commit. */
+
+ drm_property_unreference_blob(blob);
+
+ return;
+fail:
+ if (ret == -EDEADLK)
+ goto backoff;
+
+ drm_atomic_state_free(state);
+ drm_property_unreference_blob(blob);
+
+ return;
+backoff:
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+
+ goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index bd93453afa61..b3654404abd0 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -186,7 +186,8 @@ void drm_bridge_disable(struct drm_bridge *bridge)
drm_bridge_disable(bridge->next);
- bridge->funcs->disable(bridge);
+ if (bridge->funcs->disable)
+ bridge->funcs->disable(bridge);
}
EXPORT_SYMBOL(drm_bridge_disable);
@@ -206,7 +207,8 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
if (!bridge)
return;
- bridge->funcs->post_disable(bridge);
+ if (bridge->funcs->post_disable)
+ bridge->funcs->post_disable(bridge);
drm_bridge_post_disable(bridge->next);
}
@@ -256,7 +258,8 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
drm_bridge_pre_enable(bridge->next);
- bridge->funcs->pre_enable(bridge);
+ if (bridge->funcs->pre_enable)
+ bridge->funcs->pre_enable(bridge);
}
EXPORT_SYMBOL(drm_bridge_pre_enable);
@@ -276,7 +279,8 @@ void drm_bridge_enable(struct drm_bridge *bridge)
if (!bridge)
return;
- bridge->funcs->enable(bridge);
+ if (bridge->funcs->enable)
+ bridge->funcs->enable(bridge);
drm_bridge_enable(bridge->next);
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 84514001dcef..e08f962288d9 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -430,9 +430,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
- mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
- mutex_unlock(&dev->mode_config.idr_mutex);
+ drm_mode_object_put(dev, &fb->base);
fb->base.id = 0;
}
@@ -1554,6 +1552,41 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "DEGAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.degamma_lut_size_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "CTM", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.ctm_property = prop;
+
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB,
+ "GAMMA_LUT", 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_property = prop;
+
+ prop = drm_property_create_range(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "GAMMA_LUT_SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.gamma_lut_size_property = prop;
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7539eea4ccbc..79555d2b1b87 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -1075,3 +1075,36 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
+
+/**
+ * drm_helper_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction properties on a
+ * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
+ * set and 2 size properties to inform the userspace of the lut sizes.
+ */
+void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+ int degamma_lut_size,
+ int gamma_lut_size)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->ctm_property, 0);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_property, 0);
+
+ drm_object_attach_property(&crtc->base,
+ config->degamma_lut_size_property,
+ degamma_lut_size);
+ drm_object_attach_property(&crtc->base,
+ config->gamma_lut_size_property,
+ gamma_lut_size);
+}
+EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7d58f594cffe..df64ed1c0139 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -179,7 +179,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
{
struct drm_dp_aux_msg msg;
unsigned int retry;
- int err;
+ int err = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -187,6 +187,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
msg.buffer = buffer;
msg.size = size;
+ mutex_lock(&aux->hw_mutex);
+
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
@@ -195,25 +197,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
*/
for (retry = 0; retry < 32; retry++) {
- mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
- mutex_unlock(&aux->hw_mutex);
if (err < 0) {
if (err == -EBUSY)
continue;
- return err;
+ goto unlock;
}
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
case DP_AUX_NATIVE_REPLY_ACK:
if (err < size)
- return -EPROTO;
- return err;
+ err = -EPROTO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_NACK:
- return -EIO;
+ err = -EIO;
+ goto unlock;
case DP_AUX_NATIVE_REPLY_DEFER:
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -222,7 +223,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- return -EIO;
+ err = -EIO;
+
+unlock:
+ mutex_unlock(&aux->hw_mutex);
+ return err;
}
/**
@@ -544,9 +549,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
- mutex_lock(&aux->hw_mutex);
ret = aux->transfer(aux, msg);
- mutex_unlock(&aux->hw_mutex);
if (ret < 0) {
if (ret == -EBUSY)
continue;
@@ -685,6 +688,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
memset(&msg, 0, sizeof(msg));
+ mutex_lock(&aux->hw_mutex);
+
for (i = 0; i < num; i++) {
msg.address = msgs[i].addr;
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -739,6 +744,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
msg.size = 0;
(void)drm_dp_i2c_do_msg(aux, &msg);
+ mutex_unlock(&aux->hw_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..e17fbdaf874b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
port_num = port->port_num;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
if (!mstb) {
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
- if (!mstb)
+ if (!mstb) {
+ drm_dp_put_port(port);
return -EINVAL;
+ }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
kfree(txmsg);
fail_put:
drm_dp_put_mst_branch_device(mstb);
+ drm_dp_put_port(port);
return ret;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fdb1eb014586..558ef9fc39e6 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 0x0f - 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 0x10 - 1024x768@60Hz */
@@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = {
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
+ 704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
int i, j, m, modes = 0;
struct drm_display_mode *mode;
- u8 *est = ((u8 *)timing) + 5;
+ u8 *est = ((u8 *)timing) + 6;
for (i = 0; i < 6; i++) {
for (j = 7; j >= 0; j--) {
@@ -3308,7 +3308,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
u8 *cea;
u8 *name;
u8 *db;
- int sad_count = 0;
+ int total_sad_count = 0;
int mnl;
int dbl;
@@ -3322,6 +3322,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+ /* max: 13 bytes EDID, 16 bytes ELD */
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
@@ -3350,11 +3351,15 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
+ int sad_count;
+
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
- sad_count = dbl / 3;
- if (dbl >= 1)
- memcpy(eld + 20 + mnl, &db[1], dbl);
+ sad_count = min(dbl / 3, 15 - total_sad_count);
+ if (sad_count >= 1)
+ memcpy(eld + 20 + mnl + total_sad_count * 3,
+ &db[1], sad_count * 3);
+ total_sad_count += sad_count;
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
@@ -3371,13 +3376,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
}
}
}
- eld[5] |= sad_count << 4;
+ eld[5] |= total_sad_count << 4;
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
- drm_eld_size(eld), sad_count);
+ drm_eld_size(eld), total_sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 698b8c3b09d9..9a401aed98e0 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -170,16 +170,11 @@ static void *edid_load(struct drm_connector *connector, const char *name,
int i, valid_extensions = 0;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
- builtin = 0;
- for (i = 0; i < GENERIC_EDIDS; i++) {
- if (strcmp(name, generic_edid_name[i]) == 0) {
- fwdata = generic_edid[i];
- fwsize = sizeof(generic_edid[i]);
- builtin = 1;
- break;
- }
- }
- if (!builtin) {
+ builtin = match_string(generic_edid_name, GENERIC_EDIDS, name);
+ if (builtin >= 0) {
+ fwdata = generic_edid[builtin];
+ fwsize = sizeof(generic_edid[builtin]);
+ } else {
struct platform_device *pdev;
int err;
@@ -252,7 +247,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
}
DRM_INFO("Got %s EDID base block and %d extension%s from "
- "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
+ "\"%s\" for connector \"%s\"\n", (builtin >= 0) ? "built-in" :
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
name, connector_name);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 2e8c77e71e1f..da0c5320789f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
fail:
while (i--)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
@@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
drm_free_large(pages);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e5df53b6e229..1f500a1b9969 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -109,8 +109,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
if (IS_ERR(cma_obj))
return cma_obj;
- cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
- &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
+ cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!cma_obj->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
size);
@@ -192,8 +192,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
if (cma_obj->vaddr) {
- dma_free_writecombine(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -324,9 +324,8 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(cma_obj->base.dev->dev, vma,
- cma_obj->vaddr, cma_obj->paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
+ cma_obj->paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 6e6a9c58d404..f5d80839a90c 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -47,7 +47,17 @@
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
- return of_driver_match_device(dev, drv);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+ /* attempt OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* compare DSI device and driver names */
+ if (!strcmp(dsi->name, drv->name))
+ return 1;
+
+ return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -129,14 +139,20 @@ static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
return device_add(&dsi->dev);
}
+#if IS_ENABLED(CONFIG_OF)
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
- struct mipi_dsi_device *dsi;
struct device *dev = host->dev;
+ struct mipi_dsi_device_info info = { };
int ret;
u32 reg;
+ if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ dev_err(dev, "modalias failure on %s\n", node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
ret = of_property_read_u32(node, "reg", &reg);
if (ret) {
dev_err(dev, "device node %s has no valid reg property: %d\n",
@@ -144,32 +160,111 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
return ERR_PTR(-EINVAL);
}
- if (reg > 3) {
- dev_err(dev, "device node %s has invalid reg property: %u\n",
- node->full_name, reg);
+ info.channel = reg;
+ info.node = of_node_get(node);
+
+ return mipi_dsi_device_register_full(host, &info);
+}
+#else
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+/**
+ * mipi_dsi_device_register_full - create a MIPI DSI device
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = host->dev;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "invalid mipi_dsi_device_info pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (info->channel > 3) {
+ dev_err(dev, "invalid virtual channel: %u\n", info->channel);
return ERR_PTR(-EINVAL);
}
dsi = mipi_dsi_device_alloc(host);
if (IS_ERR(dsi)) {
- dev_err(dev, "failed to allocate DSI device %s: %ld\n",
- node->full_name, PTR_ERR(dsi));
+ dev_err(dev, "failed to allocate DSI device %ld\n",
+ PTR_ERR(dsi));
return dsi;
}
- dsi->dev.of_node = of_node_get(node);
- dsi->channel = reg;
+ dsi->dev.of_node = info->node;
+ dsi->channel = info->channel;
+ strlcpy(dsi->name, info->type, sizeof(dsi->name));
ret = mipi_dsi_device_add(dsi);
if (ret) {
- dev_err(dev, "failed to add DSI device %s: %d\n",
- node->full_name, ret);
+ dev_err(dev, "failed to add DSI device %d\n", ret);
kfree(dsi);
return ERR_PTR(ret);
}
return dsi;
}
+EXPORT_SYMBOL(mipi_dsi_device_register_full);
+
+/**
+ * mipi_dsi_device_unregister - unregister MIPI DSI device
+ * @dsi: DSI peripheral device
+ */
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
+{
+ device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_device_unregister);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+/**
+ * of_find_mipi_dsi_host_by_node() - find the MIPI DSI host matching a
+ * device tree node
+ * @node: device tree node
+ *
+ * Returns:
+ * A pointer to the MIPI DSI host corresponding to @node or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+ struct mipi_dsi_host *host;
+
+ mutex_lock(&host_lock);
+
+ list_for_each_entry(host, &host_list, list) {
+ if (host->dev->of_node == node) {
+ mutex_unlock(&host_lock);
+ return host;
+ }
+ }
+
+ mutex_unlock(&host_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
@@ -182,6 +277,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
of_mipi_dsi_device_add(host, node);
}
+ mutex_lock(&host_lock);
+ list_add_tail(&host->list, &host_list);
+ mutex_unlock(&host_lock);
+
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -190,7 +289,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- device_unregister(&dsi->dev);
+ mipi_dsi_device_unregister(dsi);
return 0;
}
@@ -198,6 +297,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+ mutex_lock(&host_lock);
+ list_del_init(&host->list);
+ mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 937a77520f58..281c6eca20a8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -761,9 +761,9 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem);
while (pinned < npages) {
- ret = get_user_pages(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
+ !etnaviv_obj->userptr.ro, 0,
+ pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index d13303ce530d..09198d0b5814 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1110,8 +1110,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
if (!cmdbuf)
return NULL;
- cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
- GFP_KERNEL);
+ cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
+ GFP_KERNEL);
if (!cmdbuf->vaddr) {
kfree(cmdbuf);
return NULL;
@@ -1125,8 +1125,8 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
{
- dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
- cmdbuf->vaddr, cmdbuf->paddr);
+ dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
+ cmdbuf->paddr);
kfree(cmdbuf);
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f17d39279596..baddf33fb475 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -94,7 +94,7 @@ comment "Sub-drivers"
config DRM_EXYNOS_G2D
bool "G2D"
- depends on !VIDEO_SAMSUNG_S5P_G2D
+ depends on VIDEO_SAMSUNG_S5P_G2D=n
select FRAME_VECTOR
help
Choose this option if you want to use Exynos G2D for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 968b31c522b2..23d2f958739b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -2,10 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fbdev.o \
- exynos_drm_fb.o exynos_drm_gem.o exynos_drm_core.o \
- exynos_drm_plane.o
+exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
+ exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 7f55ba6771c6..011211e4167d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
return 0;
err:
- list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+ list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
if (subdrv->close)
subdrv->close(dev, subdrv->dev, file);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d614194644c8..81cc5537cf25 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -199,17 +199,6 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
return exynos_fb->dma_addr[index];
}
-static void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- if (fb_helper)
- drm_fb_helper_hotplug_event(fb_helper);
- else
- exynos_drm_fbdev_init(dev);
-}
-
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 4ae860c44f1d..72d7c0b7c216 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -317,3 +317,14 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
}
+
+void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
+ else
+ exynos_drm_fbdev_init(dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index e16d7f0ae192..330eef87f718 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -15,9 +15,30 @@
#ifndef _EXYNOS_DRM_FBDEV_H_
#define _EXYNOS_DRM_FBDEV_H_
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
int exynos_drm_fbdev_init(struct drm_device *dev);
-int exynos_drm_fbdev_reinit(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+void exynos_drm_output_poll_changed(struct drm_device *dev);
+
+#else
+
+static inline int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+
+#define exynos_drm_output_poll_changed (NULL)
+
+#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 51d484ae9f49..018449f8d557 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -888,7 +888,7 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
* clock. On these SoCs the bootloader may enable it but any
* power domain off/on will reset it to disable state.
*/
- if (ctx->driver_data != &exynos5_fimd_driver_data ||
+ if (ctx->driver_data != &exynos5_fimd_driver_data &&
ctx->driver_data != &exynos5420_fimd_driver_data)
return;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 9869d70e9e54..a0def0be6d65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -129,7 +129,7 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
} else
val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
- regmap_write(mic->sysreg, DSD_CFG_MUX, val);
+ ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
DRM_ERROR("mic: Failed to read system register\n");
}
@@ -457,6 +457,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
DRM_ERROR("mic: Failed to get system register.\n");
+ ret = PTR_ERR(mic->sysreg);
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index d86227236f55..50185ac347b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -11,9 +11,10 @@
#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
@@ -57,11 +58,12 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
}
static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
-
{
struct drm_plane_state *state = &exynos_state->base;
- struct drm_crtc *crtc = exynos_state->base.crtc;
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, crtc);
+ struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
unsigned int src_x, src_y;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 7574db2da413..4ed7798533f9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -62,13 +62,6 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
DCU_UPDATE_MODE_READREG);
}
-static bool fsl_dcu_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -127,7 +120,6 @@ static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
.atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
.disable = fsl_dcu_drm_disable_crtc,
.enable = fsl_dcu_drm_crtc_enable,
- .mode_fixup = fsl_dcu_drm_crtc_mode_fixup,
.mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 6126546295e9..17db4b4749d5 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -116,7 +116,7 @@ static const struct gma_limit_t cdv_intel_limits[] = {
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
.find_pll = cdv_intel_find_dp_pll,
- }
+ }
};
#define _wait_for(COND, MS, W) ({ \
@@ -245,7 +245,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
/* We don't know what the other fields of these regs are, so
* leave them in place.
*/
- /*
+ /*
* The BIT 14:13 of 0x8010/0x8030 is used to select the ref clk
* for the pipe A/B. Display spec 1.06 has wrong definition.
* Correct definition is like below:
@@ -256,7 +256,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
*
* if DPLLA sets 01 and DPLLB sets 02, both use clk from DPLLA
*
- */
+ */
ret = cdv_sb_read(dev, ref_sfr, &ref_value);
if (ret)
return ret;
@@ -646,7 +646,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
* for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
* it will be 27MHz. From the VBIOS code it seems that the pipe A choose
* 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
- */
+ */
if (pipe == 0)
refclk = 27000;
else
@@ -659,7 +659,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
drm_mode_debug_printmodeline(adjusted_mode);
-
+
limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
@@ -721,7 +721,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf |= PIPE_6BPC;
} else
pipeconf |= PIPE_8BPC;
-
+
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -974,7 +974,6 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = cdv_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 7bb1f1aff932..c52f9adf5e04 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
* FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
* be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
*/
-static int __deprecated
+static int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 927082148d4d..5bf765de2517 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -478,13 +478,6 @@ int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
-bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
void gma_crtc_prepare(struct drm_crtc *crtc)
{
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index 78b9f986a6e5..b2491c65f053 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -75,9 +75,6 @@ extern void gma_crtc_load_lut(struct drm_crtc *crtc);
extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, u32 start, u32 size);
extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
-extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
extern void gma_crtc_prepare(struct drm_crtc *crtc);
extern void gma_crtc_commit(struct drm_crtc *crtc);
extern void gma_crtc_disable(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index acd38344b302..92e3f93ee682 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -1026,10 +1026,8 @@ mrst_crtc_mode_set_exit:
const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
.dpms = mdfld_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = mdfld_crtc_mode_set,
.mode_set_base = mdfld__intel_pipe_set_base,
.prepare = gma_crtc_prepare,
.commit = gma_crtc_commit,
};
-
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 1048f0c7c6ce..da9fd34b9550 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -657,7 +657,6 @@ pipe_set_base_exit:
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index dcdbc37e55e1..398015be87e4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -430,7 +430,6 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = gma_crtc_dpms,
- .mode_fixup = gma_crtc_mode_fixup,
.mode_set = psb_intel_crtc_mode_set,
.mode_set_base = gma_pipe_set_base,
.prepare = gma_crtc_prepare,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 20e82008b8b6..30798cbc6fc0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -758,10 +758,10 @@ static int i915_drm_resume(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_display_resume(dev);
-
intel_dp_mst_resume(dev);
+ intel_display_resume(dev);
+
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
- IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+ IS_SKL_GT3(dev) || \
+ IS_SKL_GT4(dev))
+
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3d31d3ac589e..dabc08987b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
vaddr += PAGE_SIZE;
}
obj->dirty = 0;
@@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
@@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- page_cache_release(sg_page_iter_page(&sg_iter));
+ put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 1f3eef6fb345..0506016e18e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -228,25 +228,20 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
return ret;
}
-static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
+static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- bool was_interruptible;
int ret;
- mutex_lock(&dev->struct_mutex);
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, false);
-
- dev_priv->mm.interruptible = was_interruptible;
mutex_unlock(&dev->struct_mutex);
- if (unlikely(ret))
- DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
+ return ret;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 598198543dcd..a2b938ec01a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -34,8 +34,8 @@
* set of these objects.
*
* Fences are used to detile GTT memory mappings. They're also connected to the
- * hardware frontbuffer render tracking and hence interract with frontbuffer
- * conmpression. Furthermore on older platforms fences are required for tiled
+ * hardware frontbuffer render tracking and hence interact with frontbuffer
+ * compression. Furthermore on older platforms fences are required for tiled
* objects used by the display engine. They can also be used by the render
* engine - they're required for blitter commands and are optional for render
* commands. But on gen4+ both display (with the exception of fbc) and rendering
@@ -46,8 +46,8 @@
*
* Finally note that because fences are such a restricted resource they're
* dynamically associated with objects. Furthermore fence state is committed to
- * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
- * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
+ * explicitly call i915_gem_object_get_fence() to synchronize fencing status
* for cpu access. Also note that some code wants an unfenced view, for those
* cases the fence can be removed forcefully with i915_gem_object_put_fence().
*
@@ -527,7 +527,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
+ * 17 is not just a page offset, so as we page an object out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 4b09c840d493..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages(work->task, mm,
- obj->userptr.ptr + pinned * PAGE_SIZE,
- npages - pinned,
- !obj->userptr.read_only, 0,
- pvec + pinned, NULL);
- if (ret < 0)
- break;
-
- pinned += ret;
+ ret = -EFAULT;
+ if (atomic_inc_not_zero(&mm->mm_users)) {
+ down_read(&mm->mmap_sem);
+ while (pinned < npages) {
+ ret = get_user_pages_remote
+ (work->task, mm,
+ obj->userptr.ptr + pinned * PAGE_SIZE,
+ npages - pinned,
+ !obj->userptr.read_only, 0,
+ pvec + pinned, NULL);
+ if (ret < 0)
+ break;
+
+ pinned += ret;
+ }
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
- up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
@@ -683,7 +688,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
obj->dirty = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5ab3f..1c212205d0e7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8b7b8b64b008..6e0d8283daa6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10542,7 +10542,8 @@ found:
goto fail;
}
- if (drm_atomic_commit(state)) {
+ ret = drm_atomic_commit(state);
+ if (ret) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
goto fail;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a2bd698fe2f7..937e77228466 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -506,6 +506,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ intel_connector->unregister(intel_connector);
+
/* need to nuke the connector */
drm_modeset_lock_all(dev);
if (connector->state->crtc) {
@@ -519,11 +521,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
}
- drm_modeset_unlock_all(dev);
- intel_connector->unregister(intel_connector);
-
- drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
+ /* We're using qword write, seqno should be aligned to 8 bytes. */
+ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+ /* We're thrashing one dword of HWS. */
+ intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30a8403a8f4f..cd9fe609aefb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -478,11 +478,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* and as part of the cleanup in the hw state restore we also redisable
* the vga plane.
*/
- if (!HAS_PCH_SPLIT(dev)) {
- drm_modeset_lock_all(dev);
+ if (!HAS_PCH_SPLIT(dev))
intel_display_resume(dev);
- drm_modeset_unlock_all(dev);
- }
dev_priv->modeset_restore = MODESET_DONE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
struct drm_framebuffer *fb = pstate->fb;
+ uint32_t width = 0, height = 0;
+
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(pstate->rotation))
+ swap(width, height);
/* for planar format */
if (fb->pixel_format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
+ return width * height *
drm_format_plane_cpp(fb->pixel_format, 0);
else /* uv-plane data rate */
- return (intel_crtc->config->pipe_src_w/2) *
- (intel_crtc->config->pipe_src_h/2) *
+ return (width / 2) * (height / 2) *
drm_format_plane_cpp(fb->pixel_format, 1);
}
/* for packed formats */
- return intel_crtc->config->pipe_src_w *
- intel_crtc->config->pipe_src_h *
- drm_format_plane_cpp(fb->pixel_format, 0);
+ return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
}
/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_framebuffer *fb = plane->state->fb;
int id = skl_wm_plane_id(intel_plane);
- if (fb == NULL)
+ if (!to_intel_plane_state(plane->state)->visible)
continue;
+
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t plane_blocks, y_plane_blocks = 0;
int id = skl_wm_plane_id(intel_plane);
- if (pstate->fb == NULL)
+ if (!to_intel_plane_state(pstate)->visible)
continue;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
{
struct drm_plane *plane = &intel_plane->base;
struct drm_framebuffer *fb = plane->state->fb;
+ struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(plane->state);
uint32_t latency = dev_priv->wm.skl_latency[level];
uint32_t method1, method2;
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
uint8_t cpp;
+ uint32_t width = 0, height = 0;
- if (latency == 0 || !cstate->base.active || !fb)
+ if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
return false;
+ width = drm_rect_width(&intel_pstate->src) >> 16;
+ height = drm_rect_height(&intel_pstate->src) >> 16;
+
+ if (intel_rotation_90_or_270(plane->state->rotation))
+ swap(width, height);
+
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
cpp, latency);
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- cstate->pipe_src_w,
- cpp, fb->modifier[0],
+ width,
+ cpp,
+ fb->modifier[0],
latency);
- plane_bytes_per_line = cstate->pipe_src_w * cpp;
+ plane_bytes_per_line = width * cpp;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+ if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+ /* This is tied to WaForceContextSaveRestoreNonCoherent */
+ if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = ringbuf->obj;
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ unsigned flags = PIN_OFFSET_BIAS | 4096;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return -ENOMEM;
}
} else {
- ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+ flags | PIN_MAPPABLE);
if (ret)
return ret;
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
if (unlikely(total_bytes > remain_usable)) {
/*
* The base request will fit but the reserved space
- * falls off the end. So only need to to wait for the
- * reserved size after flushing out the remainder.
+ * falls off the end. So don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
*/
wait_bytes = remain_actual + ringbuf->reserved_size;
- need_wrap = true;
} else if (total_bytes > ringbuf->space) {
/* No wrapping required, just waiting. */
wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ if (IS_HASWELL(dev))
+ dev_priv->uncore.funcs.force_wake_put =
+ fw_domains_put_with_fifo;
+ else
+ dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 2a95d10e9d92..a24631fdf4ad 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9876e0f0c3e1..e26dcdec2aba 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
{
struct imx_drm_device *imxdrm = drm->dev_private;
struct imx_drm_crtc *imx_drm_crtc;
- int ret;
/*
* The vblank arrays are dimensioned by MAX_CRTC - we can't
@@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- goto err_register;
-
drm_crtc_helper_add(crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
@@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL);
return 0;
-
-err_register:
- imxdrm->crtc[--imxdrm->pipes] = NULL;
- kfree(imx_drm_crtc);
- return ret;
}
EXPORT_SYMBOL_GPL(imx_drm_add_crtc);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 588827844f30..681ec6eb77d9 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref)
int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
int x, int y)
{
- struct drm_gem_cma_object *cma_obj;
- unsigned long eba;
- int active;
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_DEBUG_KMS("entry is null.\n");
- return -EFAULT;
+ struct drm_gem_cma_object *cma_obj[3];
+ unsigned long eba, ubo, vbo;
+ int active, i;
+
+ for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
+ cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
+ if (!cma_obj[i]) {
+ DRM_DEBUG_KMS("plane %d entry is null.\n", i);
+ return -EFAULT;
+ }
}
- dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
- &cma_obj->paddr, x, y);
-
- eba = cma_obj->paddr + fb->offsets[0] +
+ eba = cma_obj[0]->paddr + fb->offsets[0] +
fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+ if (eba & 0x7) {
+ DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
+ DRM_DEBUG_KMS("pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
+ DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->stride[0] = fb->pitches[0];
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ /*
+ * Multiplanar formats have to meet the following restrictions:
+ * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+ * - EBA, UBO and VBO are a multiple of 8
+ * - UBO and VBO are unsigned and not larger than 0xfffff8
+ * - Only EBA may be changed while scanout is active
+ * - The strides of U and V planes must be identical.
+ */
+ ubo = cma_obj[1]->paddr + fb->offsets[1] +
+ fb->pitches[1] * y / 2 + x / 2 - eba;
+ vbo = cma_obj[2]->paddr + fb->offsets[2] +
+ fb->pitches[2] * y / 2 + x / 2 - eba;
+
+ if ((ubo & 0x7) || (vbo & 0x7)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
+ return -EINVAL;
+ }
+
+ if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
+ DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
+ (ipu_plane->v_offset != vbo))) {
+ DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] != fb->pitches[2]) {
+ DRM_DEBUG_KMS("U/V pitches must be identical.\n");
+ return -EINVAL;
+ }
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
+ DRM_DEBUG_KMS("U/V pitches out of range.\n");
+ return -EINVAL;
+ }
+
+ if (ipu_plane->enabled &&
+ (ipu_plane->stride[1] != fb->pitches[1])) {
+ DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
+ return -EINVAL;
+ }
+
+ ipu_plane->u_offset = ubo;
+ ipu_plane->v_offset = vbo;
+ ipu_plane->stride[1] = fb->pitches[1];
+
+ dev_dbg(ipu_plane->base.dev->dev,
+ "phys = %pad %pad %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, &cma_obj[1]->paddr,
+ &cma_obj[2]->paddr, x, y);
+ break;
+ default:
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
+ &cma_obj[0]->paddr, x, y);
+ break;
+ }
+
if (ipu_plane->enabled) {
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
@@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
}
}
- ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w);
- if (ret) {
- dev_err(dev, "initializing dmfc channel failed with %d\n", ret);
- return ret;
- }
-
ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
calc_bandwidth(crtc_w, crtc_h,
calc_vref(mode)), 64);
@@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
return ret;
}
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
+
ipu_cpmem_zero(ipu_plane->ipu_ch);
ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
@@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
if (interlaced)
ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
+ if (fb->pixel_format == DRM_FORMAT_YUV420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->u_offset,
+ ipu_plane->v_offset);
+ } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
+ ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+ ipu_plane->stride[1],
+ ipu_plane->v_offset,
+ ipu_plane->u_offset);
+ }
+
ipu_plane->w = src_w;
ipu_plane->h = src_h;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 3a443b413c60..4448fd4ad4eb 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -29,6 +29,10 @@ struct ipu_plane {
int w;
int h;
+ unsigned int u_offset;
+ unsigned int v_offset;
+ unsigned int stride[2];
+
bool enabled;
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index af8b4c19cf15..14e64e08909e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -92,18 +92,6 @@ static inline void mga_wait_busy(struct mga_device *mdev)
} while ((status & 0x01) && time_before(jiffies, timeout));
}
-/*
- * The core passes the desired mode to the CRTC code to see whether any
- * CRTC-specific modifications need to be made to it. We're in a position
- * to just pass that straight through, so this does nothing
- */
-static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
#define P_ARRAY_SIZE 9
static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
@@ -1410,7 +1398,6 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
.disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
- .mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
.mode_set_base = mga_crtc_mode_set_base,
.prepare = mga_crtc_prepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b04a64664673..65428cf233ce 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -196,7 +196,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
#else
-static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev);
+static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
return -ENODEV;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index c77e3d4e2c5c..e233acf52334 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -147,13 +147,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp4_crtc);
}
-static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/* statically (for now) map planes to mixer stage (z-order): */
static const int idxs[] = {
[VG1] = 1,
@@ -501,7 +494,6 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
- .mode_fixup = mdp4_crtc_mode_fixup,
.mode_set_nofb = mdp4_crtc_mode_set_nofb,
.disable = mdp4_crtc_disable,
.enable = mdp4_crtc_enable,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index a064d9712234..9673b9520b6a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -185,13 +185,6 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc)
kfree(mdp5_crtc);
}
-static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/*
* blend_setup() - blend all the planes of a CRTC
*
@@ -627,7 +620,6 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
- .mode_fixup = mdp5_crtc_mode_fixup,
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
.disable = mdp5_crtc_disable,
.enable = mdp5_crtc_enable,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d52910e2c26c..c03b96709179 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -467,9 +467,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
struct msm_file_private *ctx = file->driver_priv;
struct msm_kms *kms = priv->kms;
- if (kms)
- kms->funcs->preclose(kms, file);
-
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
priv->lastctx = NULL;
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..e32222c3d44f 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -55,7 +55,6 @@ struct msm_kms_funcs {
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
/* cleanup: */
- void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f04397d43a7..55ccbf006b5e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -227,13 +227,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
}
-static bool
-nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void
nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
@@ -1093,7 +1086,6 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.dpms = nv_crtc_dpms,
.prepare = nv_crtc_prepare,
.commit = nv_crtc_commit,
- .mode_fixup = nv_crtc_mode_fixup,
.mode_set = nv_crtc_mode_set,
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
index 4bef72a9d106..3fda594700e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.c
@@ -59,9 +59,11 @@ static void
nv40_perfctr_next(struct nvkm_pm *pm, struct nvkm_perfdom *dom)
{
struct nvkm_device *device = pm->engine.subdev.device;
- if (pm->sequence != pm->sequence) {
+ struct nv40_pm *nv40pm = container_of(pm, struct nv40_pm, base);
+
+ if (nv40pm->sequence != pm->sequence) {
nvkm_wr32(device, 0x400084, 0x00000020);
- pm->sequence = pm->sequence;
+ nv40pm->sequence = pm->sequence;
}
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 3fd5a0b4d4cf..747f26a55e43 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -279,7 +279,7 @@ static int dvic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (pdev->dev.of_node)
+ if (!pdev->dev.of_node)
return -ENODEV;
r = dvic_probe_of(pdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index ae7dd625e19f..36485c2137ce 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1180,7 +1180,7 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (pdev->dev.of_node)
+ if (!pdev->dev.of_node)
return -ENODEV;
r = dsicm_probe_of(pdev);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 04097dab8589..075f2bb44867 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -332,13 +332,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
kfree(omap_crtc);
}
-static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void omap_crtc_enable(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -475,7 +468,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
- .mode_fixup = omap_crtc_mode_fixup,
.mode_set_nofb = omap_crtc_mode_set_nofb,
.disable = omap_crtc_disable,
.enable = omap_crtc_enable,
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9f94576c435d..de275a5be1db 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -597,10 +597,9 @@ static int omap_dmm_remove(struct platform_device *dev)
kfree(omap_dmm->engines);
if (omap_dmm->refill_va)
- dma_free_writecombine(omap_dmm->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- omap_dmm->refill_va,
- omap_dmm->refill_pa);
+ dma_free_wc(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va, omap_dmm->refill_pa);
if (omap_dmm->dummy_page)
__free_page(omap_dmm->dummy_page);
@@ -725,9 +724,9 @@ static int omap_dmm_probe(struct platform_device *dev)
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
/* alloc refill memory */
- omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
- REFILL_BUFFER_SIZE * omap_dmm->num_engines,
- &omap_dmm->refill_pa, GFP_KERNEL);
+ omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
if (!omap_dmm->refill_va) {
dev_err(&dev->dev, "could not allocate refill memory\n");
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index cc36a8dc9bd4..907154f5b67c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1313,8 +1313,8 @@ void omap_gem_free_object(struct drm_gem_object *obj)
}
if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
- dma_free_writecombine(dev->dev, obj->size,
- omap_obj->vaddr, omap_obj->paddr);
+ dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
+ omap_obj->paddr);
} else if (omap_obj->vaddr) {
vunmap(omap_obj->vaddr);
} else if (obj->import_attach) {
@@ -1412,9 +1412,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
/* Allocate memory if needed. */
if (flags & OMAP_BO_MEM_DMA_API) {
- omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
- &omap_obj->paddr,
- GFP_KERNEL);
+ omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
+ &omap_obj->paddr,
+ GFP_KERNEL);
if (!omap_obj->vaddr)
goto err_release;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 3cf8aab23a39..af267c35d813 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -97,11 +97,12 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
return omap_gem_get_pages(obj, &pages, true);
}
-static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
- enum dma_data_direction dir)
+static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
+ enum dma_data_direction dir)
{
struct drm_gem_object *obj = buffer->priv;
omap_gem_put_pages(obj);
+ return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2164c999052c..ceb20486dacf 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -847,6 +847,7 @@ static const struct drm_display_mode innolux_g121x1_l03_mode = {
.vsync_end = 768 + 38 + 1,
.vtotal = 768 + 38 + 1 + 0,
.vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc innolux_g121x1_l03 = {
@@ -982,6 +983,29 @@ static const struct panel_desc lg_lb070wv8 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
};
+static const struct drm_display_mode lg_lp120up1_mode = {
+ .clock = 162300,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40+ 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 4,
+ .vtotal = 1280 + 4 + 4 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+ .modes = &lg_lp120up1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 267,
+ .height = 183,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -1177,6 +1201,42 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct display_timing urt_umsh_8596md_timing = {
+ .pixelclock = { 33260000, 33260000, 33260000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 41, 41, 41 },
+ .hback_porch = { 216 - 128, 216 - 128, 216 - 128 },
+ .hsync_len = { 71, 128, 128 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 10, 10, 10 },
+ .vback_porch = { 35 - 2, 35 - 2, 35 - 2 },
+ .vsync_len = { 2, 2, 2 },
+ .flags = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
+ DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc urt_umsh_8596md_lvds = {
+ .timings = &urt_umsh_8596md_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
+static const struct panel_desc urt_umsh_8596md_parallel = {
+ .timings = &urt_umsh_8596md_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am800480r3tmqwa1h",
@@ -1257,6 +1317,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
+ .compatible = "lg,lp120up1",
+ .data = &lg_lp120up1,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
@@ -1281,6 +1344,24 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "urt,umsh-8596md-t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-1t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-7t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
+ .compatible = "urt,umsh-8596md-11t",
+ .data = &urt_umsh_8596md_lvds,
+ }, {
+ .compatible = "urt,umsh-8596md-19t",
+ .data = &urt_umsh_8596md_lvds,
+ }, {
+ .compatible = "urt,umsh-8596md-20t",
+ .data = &urt_umsh_8596md_parallel,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 43e5f503d1c5..030409a3ee4e 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(user_bo);
+ qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
+ qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
+ qcrtc->hot_spot_x = hot_x;
+ qcrtc->hot_spot_y = hot_y;
+
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = qcrtc->cur_x;
- cmd->u.set.position.y = qcrtc->cur_y;
+ cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
@@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
- cmd->u.position.x = qcrtc->cur_x;
- cmd->u.position.y = qcrtc->cur_y;
+ cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
+ cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6e6b9b1519b8..3f3897eb458c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -135,6 +135,8 @@ struct qxl_crtc {
int index;
int cur_x;
int cur_y;
+ int hot_spot_x;
+ int hot_spot_y;
};
struct qxl_output {
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index ec1593a6a561..f66c33dd21a3 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -66,9 +66,10 @@ int atom_debug = 0;
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
-static uint32_t atom_arg_mask[8] =
- { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
-0xFF000000 };
+static uint32_t atom_arg_mask[8] = {
+ 0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000,
+ 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
+};
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
static int atom_dst_to_src[8][4] = {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 801dd60ac192..b80b08f71cb4 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -1665,11 +1667,11 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
}
int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
+ struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
if (ASIC_IS_DCE4(rdev))
return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6af832545bc5..afa9db1dc0e3 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -37,10 +37,10 @@
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
+ "0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
+ "0dB", "3.5dB", "6dB", "9.5dB"
};
/***** radeon AUX functions *****/
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 1603751b1164..edd05cdb0cd8 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+
break;
case 2:
case 3:
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 69556f5e247e..38e5123708e7 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -1163,12 +1163,11 @@ u32 btc_valid_sclk[40] =
155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000
};
-static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
-{
- { 10000, 30000, RADEON_SCLK_UP },
- { 15000, 30000, RADEON_SCLK_UP },
- { 20000, 30000, RADEON_SCLK_UP },
- { 25000, 30000, RADEON_SCLK_UP }
+static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = {
+ { 10000, 30000, RADEON_SCLK_UP },
+ { 15000, 30000, RADEON_SCLK_UP },
+ { 20000, 30000, RADEON_SCLK_UP },
+ { 25000, 30000, RADEON_SCLK_UP }
};
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
@@ -1637,14 +1636,14 @@ static int btc_init_smc_table(struct radeon_device *rdev,
cypress_populate_smc_voltage_tables(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- default:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
@@ -1860,37 +1859,37 @@ static bool btc_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 4a09947be244..35e0fc3ae8a7 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -192,9 +192,9 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
- struct ci_power_info *pi = rdev->pm.dpm.priv;
+ struct ci_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
@@ -1632,7 +1632,7 @@ static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
else
power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
- ci_set_power_limit(rdev, power_limit);
+ ci_set_power_limit(rdev, power_limit);
if (pi->caps_automatic_dc_transition) {
if (ac_power)
@@ -2017,9 +2017,9 @@ static void ci_enable_display_gap(struct radeon_device *rdev)
{
u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
- tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
- tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
- DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+ tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+ tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
}
@@ -2938,8 +2938,8 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
- memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
- memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
@@ -3152,7 +3152,7 @@ static int ci_calculate_sclk_params(struct radeon_device *rdev,
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->caps_sclk_ss_support) {
struct radeon_atom_ss ss;
@@ -3229,7 +3229,7 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
- graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
@@ -4393,7 +4393,7 @@ static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
break;
case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
- break;
+ break;
case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
@@ -4625,7 +4625,7 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
if (ret)
goto init_mc_done;
- ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
if (ret)
goto init_mc_done;
@@ -4916,7 +4916,7 @@ static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *
allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
- rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
return 0;
@@ -5517,7 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct ci_ps *ps;
@@ -5693,8 +5693,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return ret;
}
- pi->dll_default_on = false;
- pi->sram_end = SMC_RAM_END;
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
@@ -5734,9 +5734,9 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->caps_uvd_dpm = true;
pi->caps_vce_dpm = true;
- ci_get_leakage_voltages(rdev);
- ci_patch_dependency_tables_with_leakage(rdev);
- ci_set_private_data_variables_based_on_pptable(rdev);
+ ci_get_leakage_voltages(rdev);
+ ci_patch_dependency_tables_with_leakage(rdev);
+ ci_set_private_data_variables_based_on_pptable(rdev);
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
@@ -5839,7 +5839,7 @@ int ci_dpm_init(struct radeon_device *rdev)
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
else
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
- }
+ }
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
@@ -5860,7 +5860,7 @@ int ci_dpm_init(struct radeon_device *rdev)
#endif
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
+ &frev, &crev, &data_offset)) {
pi->caps_sclk_ss_support = true;
pi->caps_mclk_ss_support = true;
pi->dynamic_ss = true;
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 35c6f648ba04..24760ee3063e 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -194,11 +194,11 @@ PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
return PPSMC_Result_OK;
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & CKEN) == 0)
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
break;
- udelay(1);
- }
+ udelay(1);
+ }
return PPSMC_Result_OK;
}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 06001400ce8b..8ac82df2efde 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1712,7 +1712,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
*/
u32 cik_get_xclk(struct radeon_device *rdev)
{
- u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
if (rdev->flags & RADEON_IS_IGP) {
if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
@@ -2343,9 +2343,13 @@ out:
*/
static void cik_tiling_mode_table_init(struct radeon_device *rdev)
{
- const u32 num_tile_mode_states = 32;
- const u32 num_secondary_tile_mode_states = 16;
- u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+ u32 *tile = rdev->config.cik.tile_mode_array;
+ u32 *macrotile = rdev->config.cik.macrotile_mode_array;
+ const u32 num_tile_mode_states =
+ ARRAY_SIZE(rdev->config.cik.tile_mode_array);
+ const u32 num_secondary_tile_mode_states =
+ ARRAY_SIZE(rdev->config.cik.macrotile_mode_array);
+ u32 reg_offset, split_equal_to_row_size;
u32 num_pipe_configs;
u32 num_rbs = rdev->config.cik.max_backends_per_se *
rdev->config.cik.max_shader_engines;
@@ -2367,1032 +2371,669 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
if (num_pipe_configs > 8)
num_pipe_configs = 16;
- if (num_pipe_configs == 16) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 8) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_2_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 4) {
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ tile[reg_offset] = 0;
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ macrotile[reg_offset] = 0;
+
+ switch(num_pipe_configs) {
+ case 16:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 8:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_2_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 4:
if (num_rbs == 4) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_16x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
} else if (num_rbs < 4) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16));
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
- NUM_BANKS(ADDR_SURF_4_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if (num_pipe_configs == 2) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
- break;
- case 1:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
- break;
- case 2:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 3:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
- break;
- case 4:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 5:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
- break;
- case 6:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
- break;
- case 7:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- TILE_SPLIT(split_equal_to_row_size));
- break;
- case 8:
- gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- PIPE_CONFIG(ADDR_SURF_P2);
- break;
- case 9:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2));
- break;
- case 10:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 11:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 12:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 13:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
- break;
- case 14:
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 16:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 17:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 27:
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2));
- break;
- case 28:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 29:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- case 30:
- gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
- MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P2) |
- SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 1:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 2:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 3:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 4:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 5:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 6:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- case 8:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 9:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 10:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 11:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 12:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 13:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
- NUM_BANKS(ADDR_SURF_16_BANK));
- break;
- case 14:
- gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
- NUM_BANKS(ADDR_SURF_8_BANK));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16));
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
}
- } else
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ case 2:
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
+ tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
+ tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ TILE_SPLIT(split_equal_to_row_size));
+ tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P2);
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2));
+ tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P2) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+
+ macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+ macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+ macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]);
+ break;
+
+ default:
DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
+ }
}
/**
@@ -9709,13 +9350,13 @@ uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
- u32 cntl_reg, u32 status_reg)
+ u32 cntl_reg, u32 status_reg)
{
int r, i;
struct atom_clock_dividers dividers;
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index ca058589ddef..a4edd0702718 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1620,14 +1620,14 @@ static int cypress_init_smc_table(struct radeon_device *rdev,
cypress_populate_smc_voltage_tables(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- default:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 2ad462896896..76c4bdf21b20 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1140,7 +1140,7 @@ static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
int r, i;
struct atom_clock_dividers dividers;
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
clock, false, &dividers);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 46f87d4aaf31..9e93205eb9e4 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1816,8 +1816,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
ib[idx + 0] = offset;
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1862,8 +1862,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1897,8 +1897,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1925,8 +1925,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- radeon_get_ib_value(p, idx+1) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ radeon_get_ib_value(p, idx+1) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2098,8 +2098,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2239,8 +2239,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffff8;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -2261,8 +2261,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -2283,8 +2283,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 3cf04a2f44bb..f766c967a284 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -206,7 +206,7 @@ void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
* build a AVI Info Frame
*/
void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+ unsigned char *buffer, size_t size)
{
uint8_t *frame = buffer + 3;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 2d71da448487..d0240743a17c 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2640,7 +2640,7 @@ static int kv_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct kv_ps *ps;
@@ -2738,7 +2738,7 @@ int kv_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- pi->sram_end = SMC_RAM_END;
+ pi->sram_end = SMC_RAM_END;
/* Enabling nb dpm on an asrock system prevents dpm from working */
if (rdev->pdev->subsystem_vendor == 0x1849)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 158872eb78e4..b88d63c9be99 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1257,7 +1257,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = RREG32_CG(CG_CGTT_LOCAL_0);
tmp &= ~0x00380000;
WREG32_CG(CG_CGTT_LOCAL_0, tmp);
- tmp = RREG32_CG(CG_CGTT_LOCAL_1);
+ tmp = RREG32_CG(CG_CGTT_LOCAL_1);
tmp &= ~0x0e000000;
WREG32_CG(CG_CGTT_LOCAL_1, tmp);
}
@@ -2634,7 +2634,7 @@ int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
struct atom_clock_dividers dividers;
int r, i;
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
ecclk, false, &dividers);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c3d531a1114b..4a601f990562 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -725,9 +725,9 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
struct ni_power_info *ni_get_pi(struct radeon_device *rdev)
{
- struct ni_power_info *pi = rdev->pm.dpm.priv;
+ struct ni_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
struct ni_ps *ni_get_ps(struct radeon_ps *rps)
@@ -1096,9 +1096,9 @@ static void ni_stop_smc(struct radeon_device *rdev)
static int ni_process_firmware_header(struct radeon_device *rdev)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
u32 tmp;
int ret;
@@ -1202,14 +1202,14 @@ static int ni_enter_ulp_state(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
if (pi->gfx_clock_gating) {
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
- WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
RREG32(GB_ADDR_CONFIG);
- }
+ }
WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
- ~HOST_SMC_MSG_MASK);
+ ~HOST_SMC_MSG_MASK);
udelay(25000);
@@ -1321,12 +1321,12 @@ static void ni_populate_mvdd_value(struct radeon_device *rdev,
u32 mclk,
NISLANDS_SMC_VOLTAGE_VALUE *voltage)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
if (!pi->mvdd_control) {
voltage->index = eg_pi->mvdd_high_index;
- voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
+ voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
return;
}
@@ -1510,47 +1510,47 @@ int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 mc_cg_config;
switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
break;
- default:
+ default:
return -EINVAL;
}
switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
+ case MC_CG_ARB_FREQ_F0:
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
break;
- case MC_CG_ARB_FREQ_F1:
+ case MC_CG_ARB_FREQ_F1:
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
break;
- case MC_CG_ARB_FREQ_F2:
+ case MC_CG_ARB_FREQ_F2:
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
break;
- case MC_CG_ARB_FREQ_F3:
+ case MC_CG_ARB_FREQ_F3:
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
@@ -1621,9 +1621,7 @@ static int ni_populate_memory_timing_parameters(struct radeon_device *rdev,
(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk);
- radeon_atom_set_engine_dram_timings(rdev,
- pl->sclk,
- pl->mclk);
+ radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -1867,9 +1865,9 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
- if (pi->mem_gddr5)
- mpll_dq_func_cntl &= ~PDNB;
- mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
+ if (pi->mem_gddr5)
+ mpll_dq_func_cntl &= ~PDNB;
+ mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS;
mclk_pwrmgt_cntl |= (MRDCKA0_RESET |
@@ -1891,15 +1889,15 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
MRDCKD1_PDNB);
dll_cntl |= (MRDCKA0_BYPASS |
- MRDCKA1_BYPASS |
- MRDCKB0_BYPASS |
- MRDCKB1_BYPASS |
- MRDCKC0_BYPASS |
- MRDCKC1_BYPASS |
- MRDCKD0_BYPASS |
- MRDCKD1_BYPASS);
-
- spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ MRDCKA1_BYPASS |
+ MRDCKB0_BYPASS |
+ MRDCKB1_BYPASS |
+ MRDCKC0_BYPASS |
+ MRDCKC1_BYPASS |
+ MRDCKD0_BYPASS |
+ MRDCKD1_BYPASS);
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
@@ -2089,7 +2087,7 @@ static int ni_populate_sclk_value(struct radeon_device *rdev,
static int ni_init_smc_spll_table(struct radeon_device *rdev)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
SMC_NISLANDS_SPLL_DIV_TABLE *spll_table;
NISLANDS_SMC_SCLK_VALUE sclk_params;
@@ -2311,8 +2309,8 @@ static int ni_convert_power_level_to_smc(struct radeon_device *rdev,
NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
int ret;
bool dll_state_on;
u16 std_vddc;
@@ -2391,8 +2389,8 @@ static int ni_populate_smc_t(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 a_t;
u32 t_l, t_h;
@@ -2451,8 +2449,8 @@ static int ni_populate_power_containment_values(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
u32 prev_sclk;
@@ -2595,7 +2593,7 @@ static int ni_enable_power_containment(struct radeon_device *rdev,
struct radeon_ps *radeon_new_state,
bool enable)
{
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
PPSMC_Result smc_result;
int ret = 0;
@@ -2625,7 +2623,7 @@ static int ni_convert_power_state_to_smc(struct radeon_device *rdev,
struct radeon_ps *radeon_state,
NISLANDS_SMC_SWSTATE *smc_state)
{
- struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *state = ni_get_ps(radeon_state);
int i, ret;
@@ -2770,46 +2768,46 @@ static bool ni_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
bool result = true;
switch (in_reg) {
- case MC_SEQ_RAS_TIMING >> 2:
+ case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- case MC_SEQ_PMG_TIMING >> 2:
+ case MC_SEQ_PMG_TIMING >> 2:
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
break;
- case MC_PMG_CMD_MRS2 >> 2:
+ case MC_PMG_CMD_MRS2 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
@@ -2876,9 +2874,9 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table;
u8 module_index = rv770_get_memory_module_index(rdev);
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
@@ -2896,25 +2894,25 @@ static int ni_initialize_mc_reg_table(struct radeon_device *rdev)
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ret = ni_copy_vbios_mc_reg_table(table, ni_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ni_set_s0_mc_reg_index(ni_table);
ret = ni_set_mc_special_registers(rdev, ni_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
ni_set_valid_flag(ni_table);
init_mc_done:
- kfree(table);
+ kfree(table);
return ret;
}
@@ -2994,7 +2992,7 @@ static int ni_populate_mc_reg_table(struct radeon_device *rdev,
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *boot_state = ni_get_ps(radeon_boot_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
@@ -3025,7 +3023,7 @@ static int ni_upload_mc_reg_table(struct radeon_device *rdev,
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state);
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table;
u16 address;
@@ -3142,7 +3140,7 @@ static int ni_initialize_smc_cac_tables(struct radeon_device *rdev)
struct ni_power_info *ni_pi = ni_get_pi(rdev);
PP_NIslands_CACTABLES *cac_tables = NULL;
int i, ret;
- u32 reg;
+ u32 reg;
if (ni_pi->enable_cac == false)
return 0;
@@ -3422,13 +3420,13 @@ static int ni_pcie_performance_request(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
- (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
+ (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false)
radeon_acpi_pcie_notify_device_ready(rdev);
eg_pi->pcie_performance_request_registered = true;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) &&
- eg_pi->pcie_performance_request_registered) {
+ eg_pi->pcie_performance_request_registered) {
eg_pi->pcie_performance_request_registered = false;
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise);
}
@@ -3441,12 +3439,12 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 tmp;
- tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
- (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
- pi->pcie_gen2 = true;
- else
+ if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+ (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
+ pi->pcie_gen2 = true;
+ else
pi->pcie_gen2 = false;
if (!pi->pcie_gen2)
@@ -3458,8 +3456,8 @@ static int ni_advertise_gen2_capability(struct radeon_device *rdev)
static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
bool enable)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
- u32 tmp, bif;
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ u32 tmp, bif;
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
@@ -3502,7 +3500,7 @@ static void ni_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
if (enable)
WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
else
- WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
+ WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
}
void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
@@ -3563,7 +3561,7 @@ void ni_update_current_ps(struct radeon_device *rdev,
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->current_rps = *rps;
ni_pi->current_ps = *new_ps;
@@ -3575,7 +3573,7 @@ void ni_update_requested_ps(struct radeon_device *rdev,
{
struct ni_ps *new_ps = ni_get_ps(rps);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
- struct ni_power_info *ni_pi = ni_get_pi(rdev);
+ struct ni_power_info *ni_pi = ni_get_pi(rdev);
eg_pi->requested_rps = *rps;
ni_pi->requested_ps = *new_ps;
@@ -3591,8 +3589,8 @@ int ni_dpm_enable(struct radeon_device *rdev)
if (pi->gfx_clock_gating)
ni_cg_clockgating_default(rdev);
- if (btc_dpm_enabled(rdev))
- return -EINVAL;
+ if (btc_dpm_enabled(rdev))
+ return -EINVAL;
if (pi->mg_clock_gating)
ni_mg_clockgating_default(rdev);
if (eg_pi->ls_clock_gating)
@@ -3991,7 +3989,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct ni_ps *ps;
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
index da310a70c0f0..827ccc87cbc3 100644
--- a/drivers/gpu/drm/radeon/ni_reg.h
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -109,6 +109,8 @@
#define NI_DP_MSE_SAT2 0x7398
#define NI_DP_MSE_SAT_UPDATE 0x739c
+# define NI_DP_MSE_SAT_UPDATE_MASK 0x3
+# define NI_DP_MSE_16_MTP_KEEPOUT 0x100
#define NI_DIG_BE_CNTL 0x7140
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index ed121042247f..f86ab695ee8f 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -235,8 +235,8 @@ int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
fb_div |= 1;
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
+ if (r)
+ return r;
/* assert PLL_RESET */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
@@ -1490,7 +1490,7 @@ static int r600_mc_init(struct radeon_device *rdev)
rdev->fastfb_working = true;
}
}
- }
+ }
}
radeon_update_bandwidth_info(rdev);
@@ -4574,7 +4574,7 @@ uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 2f36fa1576e0..b69c8de35bd3 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1671,8 +1671,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (idx_value & 0xfffffff0) +
- ((u64)(tmp & 0xff) << 32);
+ (idx_value & 0xfffffff0) +
+ ((u64)(tmp & 0xff) << 32);
ib[idx + 0] = offset;
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
@@ -1712,8 +1712,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- idx_value +
- ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+ idx_value +
+ ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
@@ -1764,8 +1764,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1876,8 +1876,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffff8;
ib[idx+2] = upper_32_bits(offset) & 0xff;
@@ -1898,8 +1898,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
offset = reloc->gpu_offset +
- (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
- ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
+ (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
+ ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = offset & 0xfffffffc;
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index fa2154493cf1..6a4b020dd0b4 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -844,7 +844,7 @@ int r600_get_platform_caps(struct radeon_device *rdev)
struct radeon_mode_info *mode_info = &rdev->mode_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -874,7 +874,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
union fan_info *fan_info;
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
int ret, i;
@@ -1070,7 +1070,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
ext_hdr->usVCETableOffset) {
VCEClockInfoArray *array = (VCEClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
(mode_info->atom_context->bios + data_offset +
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e85894ade95c..e82a99cb2459 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -215,7 +215,7 @@ void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
* build a HDMI Video Info Frame
*/
void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+ unsigned char *buffer, size_t size)
{
uint8_t *frame = buffer + 3;
@@ -312,7 +312,7 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
}
void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+ struct radeon_crtc *crtc, unsigned int clock)
{
struct radeon_encoder *radeon_encoder;
struct radeon_encoder_atom_dig *dig;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index de9a2ffcf5f7..f8097a0e7a79 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2095,7 +2095,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
struct radeon_i2c_bus_rec i2c_bus;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2575,7 +2575,7 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
@@ -2666,7 +2666,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 56482e35d43e..fd8c4d317e60 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
return radeon_atpx_priv.atpx_detected;
}
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+ return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
/**
* radeon_atpx_call - call an ATPX method
*
@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
*/
static int radeon_atpx_validate(struct radeon_atpx *atpx)
{
- /* make sure required functions are enabled */
- /* dGPU power control is required */
- atpx->functions.power_cntl = true;
-
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a9b01bcf7d0a..432480ff9d22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -34,7 +34,6 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#endif /* CONFIG_PPC_PMAC */
/* from radeon_legacy_encoder.c */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4197ca1bb1e4..4fd1a961012d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_has_atpx_dgpu_power_cntl(void);
+#else
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+#endif
+
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
@@ -1155,9 +1161,9 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vm_size = 4;
}
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
+ /*
+ * Max GPUVM size for Cayman, SI and CI are 40 bits.
+ */
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
@@ -1433,7 +1439,7 @@ int radeon_device_init(struct radeon_device *rdev,
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- if (rdev->flags & RADEON_IS_PX)
+ if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
runtime = true;
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
if (runtime)
@@ -1895,7 +1901,7 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
- "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+ "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
rdev->debugfs[rdev->debugfs_count].files = files;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e29096b2fa6b..fcc7483d3f7b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -407,7 +407,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
- down_read(&rdev->exclusive_lock);
+ down_read(&rdev->exclusive_lock);
if (work->fence) {
struct radeon_fence *fence;
@@ -919,7 +919,7 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
*den /= tmp;
/* make sure nominator is large enough */
- if (*nom < nom_min) {
+ if (*nom < nom_min) {
tmp = DIV_ROUND_UP(nom_min, *nom);
*nom *= tmp;
*den *= tmp;
@@ -959,7 +959,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
- if (*fb_div > fb_div_max) {
+ if (*fb_div > fb_div_max) {
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
*fb_div = fb_div_max;
}
@@ -1683,10 +1683,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
- if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
- radeon_fbdev_init(rdev);
- drm_kms_helper_poll_init(rdev->ddev);
- }
+ radeon_fbdev_init(rdev);
+ drm_kms_helper_poll_init(rdev->ddev);
/* do pm late init */
ret = radeon_pm_late_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index df7a1719c841..de504ea29c06 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -89,8 +89,16 @@ static int radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary,
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1);
do {
+ unsigned value1, value2;
+ udelay(10);
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset);
- } while ((temp & 0x1) && retries++ < 10000);
+
+ value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK;
+ value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT;
+
+ if (!value1 && !value2)
+ break;
+ } while (retries++ < 50);
if (retries == 10000)
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset);
@@ -150,7 +158,7 @@ static int radeon_dp_mst_update_stream_attribs(struct radeon_connector *mst_conn
return 0;
}
-static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y)
+static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp)
{
struct drm_device *dev = mst->base.dev;
struct radeon_device *rdev = dev->dev_private;
@@ -158,6 +166,8 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
uint32_t val, temp;
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe);
int retries = 0;
+ uint32_t x = drm_fixp2int(avg_time_slots_per_mtp);
+ uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26);
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y);
@@ -165,6 +175,7 @@ static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, ui
do {
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset);
+ udelay(10);
} while ((temp & 0x1) && (retries++ < 10000));
if (retries >= 10000)
@@ -246,14 +257,8 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(radeon_connector);
}
-static int radeon_connector_dpms(struct drm_connector *connector, int mode)
-{
- DRM_DEBUG_KMS("\n");
- return 0;
-}
-
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = {
- .dpms = radeon_connector_dpms,
+ .dpms = drm_helper_connector_dpms,
.detect = radeon_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = radeon_dp_mst_connector_destroy,
@@ -394,7 +399,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
int ret, slots;
-
+ s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp;
if (!ASIC_IS_DCE5(rdev)) {
DRM_ERROR("got mst dpms on non-DCE5\n");
return;
@@ -456,7 +461,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
mst_enc->enc_active = true;
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary);
- radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0);
+
+ fixed_pbn = drm_int2fixp(mst_enc->pbn);
+ fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div);
+ avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot);
+ radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp);
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0,
mst_enc->fe);
@@ -510,6 +519,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
{
struct radeon_encoder_mst *mst_enc;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_connector_atom_dig *dig_connector;
int bpp = 24;
mst_enc = radeon_encoder->enc_priv;
@@ -523,22 +533,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
- {
- struct radeon_connector_atom_dig *dig_connector;
- int ret;
-
- dig_connector = mst_enc->connector->con_priv;
- ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
- dig_connector->dpcd, adjusted_mode->clock,
- &dig_connector->dp_lane_count,
- &dig_connector->dp_clock);
- if (ret) {
- dig_connector->dp_lane_count = 0;
- dig_connector->dp_clock = 0;
- }
- DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
- dig_connector->dp_lane_count, dig_connector->dp_clock);
- }
+ dig_connector = mst_enc->connector->con_priv;
+ dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+ dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
+ DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ dig_connector->dp_lane_count, dig_connector->dp_clock);
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d2e628eea53d..0e3143acb565 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -38,9 +38,9 @@
#include <linux/vga_switcheroo.h>
/* object hierarchy -
- this contains a helper + a radeon fb
- the helper contains a pointer to radeon framebuffer baseclass.
-*/
+ * this contains a helper + a radeon fb
+ * the helper contains a pointer to radeon framebuffer baseclass.
+ */
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer rfb;
@@ -292,7 +292,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -325,6 +326,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
+ /* don't enable fbdev if no connectors */
+ if (list_empty(&rdev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@@ -377,11 +382,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ if (rdev->mode_info.rfbdev)
+ fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
+ if (!rdev->mode_info.rfbdev)
+ return false;
+
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
@@ -389,12 +398,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fbdev_restore_mode(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index c39ce1f05703..92ce0e533bc0 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -274,7 +274,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
+ rdev->accel_working = false;
return r;
} else {
@@ -304,7 +304,7 @@ static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
}
static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 979f3bf65f2c..1e9304d1c88f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (r) {
return r;
}
+ rdev->ddev->vblank_disable_allowed = true;
+
/* enable msi */
rdev->msi_enabled = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 32b338ff436b..478d4099b0d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
RADEON_CRTC_DISP_REQ_EN_B));
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_on(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ if (dev->num_crtcs > radeon_crtc->crtc_id)
+ drm_vblank_off(dev, radeon_crtc->crtc_id);
if (radeon_crtc->crtc_id)
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
else {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 88dc973fb209..868c3ba2efaa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -818,52 +818,52 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) &
~(RADEON_TMDS_TRANSMITTER_PLLRST);
- if (rdev->family == CHIP_R200 ||
- rdev->family == CHIP_R100 ||
- ASIC_IS_R300(rdev))
- tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
- else /* RV chips got this bit reversed */
- tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
-
- fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
- (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
- RADEON_FP_CRTC_DONT_SHADOW_HEND));
-
- fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
-
- fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
- RADEON_FP_DFP_SYNC_SEL |
- RADEON_FP_CRT_SYNC_SEL |
- RADEON_FP_CRTC_LOCK_8DOT |
- RADEON_FP_USE_SHADOW_EN |
- RADEON_FP_CRTC_USE_SHADOW_VEND |
- RADEON_FP_CRT_SYNC_ALT);
-
- if (1) /* FIXME rgbBits == 8 */
- fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
- else
- fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
-
- if (radeon_crtc->crtc_id == 0) {
- if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
- fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
- if (radeon_encoder->rmx_type != RMX_OFF)
- fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
- else
- fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
- } else
- fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
- } else {
- if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
- fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
- fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
- } else
- fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
- }
-
- WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
- WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
- WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
+ if (rdev->family == CHIP_R200 ||
+ rdev->family == CHIP_R100 ||
+ ASIC_IS_R300(rdev))
+ tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN);
+ else /* RV chips got this bit reversed */
+ tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN;
+
+ fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) |
+ (RADEON_FP_CRTC_DONT_SHADOW_VPAR |
+ RADEON_FP_CRTC_DONT_SHADOW_HEND));
+
+ fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN);
+
+ fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN |
+ RADEON_FP_DFP_SYNC_SEL |
+ RADEON_FP_CRT_SYNC_SEL |
+ RADEON_FP_CRTC_LOCK_8DOT |
+ RADEON_FP_USE_SHADOW_EN |
+ RADEON_FP_CRTC_USE_SHADOW_VEND |
+ RADEON_FP_CRT_SYNC_ALT);
+
+ if (1) /* FIXME rgbBits == 8 */
+ fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */
+ else
+ fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */
+
+ if (radeon_crtc->crtc_id == 0) {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ if (radeon_encoder->rmx_type != RMX_OFF)
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX;
+ else
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1;
+ } else
+ fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2;
+ } else {
+ if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) {
+ fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK;
+ fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2;
+ } else
+ fp_gen_cntl |= RADEON_FP_SEL_CRTC2;
+ }
+
+ WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl);
+ WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl);
+ WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
if (rdev->is_atom_bios)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fb6ad143873f..2d901bf28a94 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -214,8 +214,8 @@ int radeon_bo_create(struct radeon_device *rdev,
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_CPU);
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_CPU);
bo->flags = flags;
/* PCI GART is always snooped */
@@ -799,6 +799,10 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
+ /* Can't move a pinned BO to visible VRAM */
+ if (rbo->pin_count > 0)
+ return -EINVAL;
+
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
@@ -848,7 +852,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
*
*/
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
- bool shared)
+ bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7a98823bacd1..38226d925a5b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -79,7 +79,7 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
- } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index e6ad54cdfa62..b0eb28e8fb73 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -56,7 +56,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
- struct radeon_semaphore *semaphore)
+ struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
@@ -73,7 +73,7 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
}
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
- struct radeon_semaphore *semaphore)
+ struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e06ac546a90f..7dddfdce85e6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -397,9 +397,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
+ struct radeon_bo *rbo;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
+ /* Can't move a pinned BO */
+ rbo = container_of(bo, struct radeon_bo, tbo);
+ if (WARN_ON_ONCE(rbo->pin_count > 0))
+ return -EINVAL;
+
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
radeon_move_null(bo, new_mem);
@@ -554,8 +560,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
if (r < 0)
goto release_pages;
@@ -610,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
- page_cache_release(page);
+ put_page(page);
}
sg_free_table(ttm->sg);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6edcb5485092..6fe9e4e76284 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -722,9 +722,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
return r;
}
-/* multiple fence commands without any stream commands in between can
- crash the vcpu so just try to emmit a dummy create/destroy msg to
- avoid this */
+/*
+ * multiple fence commands without any stream commands in between can
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 566a1a01f6d1..c1c619facb47 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -166,7 +166,7 @@ int radeon_vce_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
atomic_set(&rdev->vce.handles[i], 0);
rdev->vce.filp[i] = NULL;
- }
+ }
return 0;
}
@@ -389,7 +389,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
if (fence)
@@ -446,7 +446,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
}
if (fence)
@@ -769,18 +769,18 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_unlock_commit(rdev, ring, false);
for (i = 0; i < rdev->usec_timeout; i++) {
- if (vce_v1_0_get_rptr(rdev, ring) != rptr)
- break;
- DRM_UDELAY(1);
+ if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+ break;
+ DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
} else {
- DRM_ERROR("radeon: ring %d test failed\n",
- ring->idx);
- r = -ETIMEDOUT;
+ DRM_ERROR("radeon: ring %d test failed\n",
+ ring->idx);
+ r = -ETIMEDOUT;
}
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 3979632b9225..a1358748cea5 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -611,15 +611,16 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
*/
static uint32_t radeon_vm_page_flags(uint32_t flags)
{
- uint32_t hw_flags = 0;
- hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
- hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- hw_flags |= R600_PTE_SYSTEM;
- hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
- }
- return hw_flags;
+ uint32_t hw_flags = 0;
+
+ hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ hw_flags |= R600_PTE_SYSTEM;
+ hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return hw_flags;
}
/**
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index cb0afe78abed..94b48fc1e266 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -795,7 +795,7 @@ static int rs780_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct igp_ps *ps;
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 97e5a6f1ce58..25e29303b119 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -209,7 +209,7 @@ static struct rv6xx_sclk_stepping rv6xx_next_vco_step(struct radeon_device *rdev
static bool rv6xx_can_step_post_div(struct radeon_device *rdev,
struct rv6xx_sclk_stepping *cur,
- struct rv6xx_sclk_stepping *target)
+ struct rv6xx_sclk_stepping *target)
{
return (cur->post_divider > target->post_divider) &&
((cur->vco_frequency * target->post_divider) <=
@@ -239,7 +239,7 @@ static bool rv6xx_reached_stepping_target(struct radeon_device *rdev,
static void rv6xx_generate_steps(struct radeon_device *rdev,
u32 low, u32 high,
- u32 start_index, u8 *end_index)
+ u32 start_index, u8 *end_index)
{
struct rv6xx_sclk_stepping cur;
struct rv6xx_sclk_stepping target;
@@ -1356,23 +1356,23 @@ static void rv6xx_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
enum radeon_dpm_event_src dpm_event_src;
switch (sources) {
- case 0:
- default:
+ case 0:
+ default:
want_thermal_protection = false;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
+ want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
break;
}
@@ -1879,7 +1879,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct rv6xx_ps *ps;
diff --git a/drivers/gpu/drm/radeon/rv740_dpm.c b/drivers/gpu/drm/radeon/rv740_dpm.c
index c4c8da501da8..4b850824fe06 100644
--- a/drivers/gpu/drm/radeon/rv740_dpm.c
+++ b/drivers/gpu/drm/radeon/rv740_dpm.c
@@ -36,28 +36,28 @@ u32 rv740_get_decoded_reference_divider(u32 encoded_ref)
u32 ref = 0;
switch (encoded_ref) {
- case 0:
+ case 0:
ref = 1;
break;
- case 16:
+ case 16:
ref = 2;
break;
- case 17:
+ case 17:
ref = 3;
break;
- case 18:
+ case 18:
ref = 2;
break;
- case 19:
+ case 19:
ref = 3;
break;
- case 20:
+ case 20:
ref = 4;
break;
- case 21:
+ case 21:
ref = 5;
break;
- default:
+ default:
DRM_ERROR("Invalid encoded Reference Divider\n");
ref = 0;
break;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index e830c8935db0..a010decf59af 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -345,27 +345,27 @@ static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
int ret = 0;
switch (postdiv) {
- case 1:
+ case 1:
*encoded_postdiv = 0;
break;
- case 2:
+ case 2:
*encoded_postdiv = 1;
break;
- case 4:
+ case 4:
*encoded_postdiv = 2;
break;
- case 8:
+ case 8:
*encoded_postdiv = 3;
break;
- case 16:
+ case 16:
*encoded_postdiv = 4;
break;
- default:
+ default:
ret = -EINVAL;
break;
}
- return ret;
+ return ret;
}
u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
@@ -1175,15 +1175,15 @@ static int rv770_init_smc_table(struct radeon_device *rdev,
rv770_populate_smc_mvdd_table(rdev, table);
switch (rdev->pm.int_thermal_type) {
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
+ case THERMAL_TYPE_RV770:
+ case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
break;
- case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_NONE:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
break;
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
+ case THERMAL_TYPE_EXTERNAL_GPIO:
+ default:
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
break;
}
@@ -1567,18 +1567,18 @@ void rv770_reset_smio_status(struct radeon_device *rdev)
sw_smio_index =
(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
switch (sw_smio_index) {
- case 3:
+ case 3:
vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
break;
- case 2:
+ case 2:
vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
break;
- case 1:
+ case 1:
vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
break;
- case 0:
+ case 0:
return;
- default:
+ default:
vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
break;
}
@@ -1817,21 +1817,21 @@ static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
enum radeon_dpm_event_src dpm_event_src;
switch (sources) {
- case 0:
- default:
+ case 0:
+ default:
want_thermal_protection = false;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
@@ -2273,7 +2273,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
union pplib_clock_info *clock_info;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
struct rv7xx_ps *ps;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f878d6962da5..ae21550fe767 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1307,7 +1307,7 @@ int si_get_allowed_info_register(struct radeon_device *rdev,
*/
u32 si_get_xclk(struct radeon_device *rdev)
{
- u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
u32 tmp;
tmp = RREG32(CG_CLKPIN_CNTL_2);
@@ -2442,8 +2442,10 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
*/
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
- const u32 num_tile_mode_states = 32;
- u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
+ u32 *tile = rdev->config.si.tile_mode_array;
+ const u32 num_tile_mode_states =
+ ARRAY_SIZE(rdev->config.si.tile_mode_array);
+ u32 reg_offset, split_equal_to_row_size;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
@@ -2458,491 +2460,442 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
break;
}
- if ((rdev->family == CHIP_TAHITI) ||
- (rdev->family == CHIP_PITCAIRN)) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else if ((rdev->family == CHIP_VERDE) ||
- (rdev->family == CHIP_OLAND) ||
- (rdev->family == CHIP_HAINAN)) {
- for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
- switch (reg_offset) {
- case 0: /* non-AA compressed depth or any compressed stencil */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 1: /* 2xAA/4xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 2: /* 8xAA compressed depth only */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 8: /* 1D and 1D Array Surfaces */
- gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 9: /* Displayable maps. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 10: /* Display 8bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 11: /* Display 16bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 12: /* Display 32bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 13: /* Thin. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 14: /* Thin 8 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 15: /* Thin 16 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 16: /* Thin 32 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 17: /* Thin 64 bpp. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P4_8x16) |
- TILE_SPLIT(split_equal_to_row_size) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 21: /* 8 bpp PRT. */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 22: /* 16 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
- break;
- case 23: /* 32 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 24: /* 64 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
- NUM_BANKS(ADDR_SURF_16_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
- break;
- case 25: /* 128 bpp PRT */
- gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
- MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
- PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
- TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
- NUM_BANKS(ADDR_SURF_8_BANK) |
- BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
- BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
- MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
- break;
- default:
- gb_tile_moden = 0;
- break;
- }
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
- WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
- }
- } else
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ tile[reg_offset] = 0;
+
+ switch(rdev->family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ /* non-AA compressed depth or any compressed stencil */
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 2xAA/4xAA compressed depth only */
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 8xAA compressed depth only */
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 1D and 1D Array Surfaces */
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Displayable maps. */
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 8bpp. */
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 16bpp. */
+ tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 32bpp. */
+ tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin. */
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 8 bpp. */
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 16 bpp. */
+ tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 32 bpp. */
+ tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* Thin 64 bpp. */
+ tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+ /* 8 bpp PRT. */
+ tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 16 bpp PRT */
+ tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 32 bpp PRT */
+ tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 64 bpp PRT */
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 128 bpp PRT */
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ break;
+
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ /* non-AA compressed depth or any compressed stencil */
+ tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 2xAA/4xAA compressed depth only */
+ tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 8xAA compressed depth only */
+ tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
+ tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
+ tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
+ tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
+ tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
+ tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 1D and 1D Array Surfaces */
+ tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Displayable maps. */
+ tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 8bpp. */
+ tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* Display 16bpp. */
+ tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Display 32bpp. */
+ tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin. */
+ tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 8 bpp. */
+ tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 16 bpp. */
+ tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 32 bpp. */
+ tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* Thin 64 bpp. */
+ tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ TILE_SPLIT(split_equal_to_row_size) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 8 bpp PRT. */
+ tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 16 bpp PRT */
+ tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
+ /* 32 bpp PRT */
+ tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 64 bpp PRT */
+ tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
+ /* 128 bpp PRT */
+ tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
+ NUM_BANKS(ADDR_SURF_8_BANK) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
+ break;
+
+ default:
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
+ }
}
static void si_select_se_sh(struct radeon_device *rdev,
@@ -7314,7 +7267,7 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
@@ -7775,33 +7728,33 @@ static void si_program_aspm(struct radeon_device *rdev)
int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
{
- unsigned i;
+ unsigned i;
- /* make sure VCEPLL_CTLREQ is deasserted */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+ /* make sure VCEPLL_CTLREQ is deasserted */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
- mdelay(10);
+ mdelay(10);
- /* assert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
+ /* assert UPLL_CTLREQ */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
- /* wait for CTLACK and CTLACK2 to get asserted */
- for (i = 0; i < 100; ++i) {
- uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
- if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
- break;
- mdelay(10);
- }
+ /* wait for CTLACK and CTLACK2 to get asserted */
+ for (i = 0; i < 100; ++i) {
+ uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
+ if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
+ break;
+ mdelay(10);
+ }
- /* deassert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
+ /* deassert UPLL_CTLREQ */
+ WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
- if (i == 100) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
- return -ETIMEDOUT;
- }
+ if (i == 100) {
+ DRM_ERROR("Timeout setting UVD clocks!\n");
+ return -ETIMEDOUT;
+ }
- return 0;
+ return 0;
}
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index a82b891ae1fe..af4df81c4e0c 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -499,7 +499,7 @@ static const struct si_cac_config_reg lcac_pitcairn[] =
static const struct si_cac_config_reg cac_override_pitcairn[] =
{
- { 0xFFFFFFFF }
+ { 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_pitcairn =
@@ -991,7 +991,7 @@ static const struct si_cac_config_reg lcac_cape_verde[] =
static const struct si_cac_config_reg cac_override_cape_verde[] =
{
- { 0xFFFFFFFF }
+ { 0xFFFFFFFF }
};
static const struct si_powertune_data powertune_data_cape_verde =
@@ -1762,9 +1762,9 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev);
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
- struct si_power_info *pi = rdev->pm.dpm.priv;
+ struct si_power_info *pi = rdev->pm.dpm.priv;
- return pi;
+ return pi;
}
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
@@ -2926,9 +2926,11 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
@@ -3008,6 +3010,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
++p;
}
+ /* limit mclk on all R7 370 parts for stability */
+ if (rdev->pdev->device == 0x6811 &&
+ rdev->pdev->revision == 0x81)
+ max_mclk = 120000;
if (rps->vce_active) {
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
@@ -3150,9 +3156,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
}
}
- for (i = 0; i < ps->performance_level_count; i++)
- btc_adjust_clock_combinations(rdev, max_limits,
- &ps->performance_levels[i]);
+ for (i = 0; i < ps->performance_level_count; i++)
+ btc_adjust_clock_combinations(rdev, max_limits,
+ &ps->performance_levels[i]);
for (i = 0; i < ps->performance_level_count; i++) {
if (ps->performance_levels[i].vddc < min_vce_voltage)
@@ -3291,7 +3297,7 @@ static void si_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
case 0:
default:
want_thermal_protection = false;
- break;
+ break;
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
@@ -3493,7 +3499,7 @@ static int si_process_firmware_header(struct radeon_device *rdev)
if (ret)
return ret;
- si_pi->state_table_start = tmp;
+ si_pi->state_table_start = tmp;
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
@@ -3652,7 +3658,7 @@ static void si_program_response_times(struct radeon_device *rdev)
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
+ backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
@@ -3760,7 +3766,7 @@ static void si_setup_bsp(struct radeon_device *rdev)
&pi->pbsu);
- pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
+ pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
WREG32(CG_BSP, pi->dsp);
@@ -4308,7 +4314,7 @@ static int si_populate_memory_timing_parameters(struct radeon_device *rdev,
radeon_atom_set_engine_dram_timings(rdev,
pl->sclk,
- pl->mclk);
+ pl->mclk);
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
@@ -4343,7 +4349,7 @@ static int si_do_program_memory_timing_parameters(struct radeon_device *rdev,
si_pi->sram_end);
if (ret)
break;
- }
+ }
return ret;
}
@@ -4821,9 +4827,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(2);
- spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
- spll_func_cntl_3 |= SPLL_DITHEN;
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
if (pi->sclk_ss) {
struct radeon_atom_ss ss;
@@ -4930,15 +4936,15 @@ static int si_populate_mclk_value(struct radeon_device *rdev,
tmp = freq_nom / reference_clock;
tmp = tmp * tmp;
if (radeon_atombios_get_asic_ss_info(rdev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
u32 clks = reference_clock * 5 / ss.rate;
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
- mpll_ss1 &= ~CLKV_MASK;
- mpll_ss1 |= CLKV(clkv);
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
- mpll_ss2 &= ~CLKS_MASK;
- mpll_ss2 |= CLKS(clks);
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
}
}
@@ -5265,7 +5271,7 @@ static int si_convert_power_state_to_smc(struct radeon_device *rdev,
ni_pi->enable_power_containment = false;
ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state);
- if (ret)
+ if (ret)
ni_pi->enable_sq_ramping = false;
return si_populate_smc_t(rdev, radeon_state, smc_state);
@@ -5436,46 +5442,46 @@ static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
case MC_SEQ_RAS_TIMING >> 2:
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
break;
- case MC_SEQ_CAS_TIMING >> 2:
+ case MC_SEQ_CAS_TIMING >> 2:
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING >> 2:
+ case MC_SEQ_MISC_TIMING >> 2:
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
break;
- case MC_SEQ_MISC_TIMING2 >> 2:
+ case MC_SEQ_MISC_TIMING2 >> 2:
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D0 >> 2:
+ case MC_SEQ_RD_CTL_D0 >> 2:
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
break;
- case MC_SEQ_RD_CTL_D1 >> 2:
+ case MC_SEQ_RD_CTL_D1 >> 2:
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D0 >> 2:
+ case MC_SEQ_WR_CTL_D0 >> 2:
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
break;
- case MC_SEQ_WR_CTL_D1 >> 2:
+ case MC_SEQ_WR_CTL_D1 >> 2:
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
break;
- case MC_PMG_CMD_EMRS >> 2:
+ case MC_PMG_CMD_EMRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS >> 2:
+ case MC_PMG_CMD_MRS >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
break;
- case MC_PMG_CMD_MRS1 >> 2:
+ case MC_PMG_CMD_MRS1 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
break;
- case MC_SEQ_PMG_TIMING >> 2:
+ case MC_SEQ_PMG_TIMING >> 2:
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
break;
- case MC_PMG_CMD_MRS2 >> 2:
+ case MC_PMG_CMD_MRS2 >> 2:
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
break;
- case MC_SEQ_WR_CTL_2 >> 2:
+ case MC_SEQ_WR_CTL_2 >> 2:
*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
break;
- default:
+ default:
result = false;
break;
}
@@ -5562,19 +5568,19 @@ static int si_initialize_mc_reg_table(struct radeon_device *rdev)
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
- ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
- if (ret)
- goto init_mc_done;
+ ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+ if (ret)
+ goto init_mc_done;
- ret = si_copy_vbios_mc_reg_table(table, si_table);
- if (ret)
- goto init_mc_done;
+ ret = si_copy_vbios_mc_reg_table(table, si_table);
+ if (ret)
+ goto init_mc_done;
si_set_s0_mc_reg_index(si_table);
ret = si_set_mc_special_registers(rdev, si_table);
- if (ret)
- goto init_mc_done;
+ if (ret)
+ goto init_mc_done;
si_set_valid_flag(si_table);
@@ -5715,10 +5721,10 @@ static int si_upload_mc_reg_table(struct radeon_device *rdev,
static void si_enable_voltage_control(struct radeon_device *rdev, bool enable)
{
- if (enable)
- WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
- else
- WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
+ if (enable)
+ WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
+ else
+ WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev,
@@ -6820,7 +6826,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct ni_ps *ps;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index cd0862809adf..f0d5c1724f55 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -787,8 +787,8 @@ static void sumo_program_acpi_power_level(struct radeon_device *rdev)
struct atom_clock_dividers dividers;
int ret;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- pi->acpi_pl.sclk,
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ pi->acpi_pl.sclk,
false, &dividers);
if (ret)
return;
@@ -1462,7 +1462,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index d34bfcdab9be..6730367ac228 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -369,8 +369,8 @@ static void trinity_gfx_powergating_initialize(struct radeon_device *rdev)
int ret;
u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- 25000, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ 25000, false, &dividers);
if (ret)
return;
@@ -587,8 +587,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
u32 value;
u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE;
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- sclk, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
if (ret)
return;
@@ -597,8 +597,8 @@ static void trinity_set_divider_value(struct radeon_device *rdev,
value |= CLK_DIVIDER(dividers.post_div);
WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value);
- ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- sclk/2, false, &dividers);
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk/2, false, &dividers);
if (ret)
return;
@@ -1045,14 +1045,14 @@ static int trinity_set_thermal_temperature_range(struct radeon_device *rdev,
int low_temp = 0 * 1000;
int high_temp = 255 * 1000;
- if (low_temp < min_temp)
+ if (low_temp < min_temp)
low_temp = min_temp;
- if (high_temp > max_temp)
+ if (high_temp > max_temp)
high_temp = max_temp;
- if (high_temp < low_temp) {
+ if (high_temp < low_temp) {
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
+ return -EINVAL;
+ }
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK);
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK);
@@ -1737,7 +1737,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
struct _NonClockInfoArray *non_clock_info_array;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
+ u16 data_offset;
u8 frev, crev;
u8 *power_state_offset;
struct sumo_ps *ps;
diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c
index cdeaab7c7b1e..fce214482e72 100644
--- a/drivers/gpu/drm/radeon/vce_v2_0.c
+++ b/drivers/gpu/drm/radeon/vce_v2_0.c
@@ -53,7 +53,7 @@ static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated)
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
WREG32(VCE_CGTT_CLK_OVERRIDE, 0);
- } else {
+ } else {
tmp = RREG32(VCE_CLOCK_GATING_B);
tmp |= 0xe7;
tmp &= ~0xe70000;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 51e9e8ce551a..d9f06cc361fa 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -491,14 +491,6 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc)
rcrtc->outputs = 0;
}
-static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* TODO Fixup modes */
- return true;
-}
-
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -531,7 +523,6 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
- .mode_fixup = rcar_du_crtc_mode_fixup,
.disable = rcar_du_crtc_disable,
.enable = rcar_du_crtc_enable,
.atomic_begin = rcar_du_crtc_atomic_begin,
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 3d3cf2f8891e..d5cfef75fc80 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -271,8 +271,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (!iores)
return -ENXIO;
- platform_set_drvdata(pdev, hdmi);
-
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
/*
* If we failed to find the CRTC(s) which this encoder is
@@ -293,7 +291,16 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
- return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+ ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data);
+
+ /*
+ * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
+ * which would have called the encoder cleanup. Do it manually.
+ */
+ if (ret)
+ drm_encoder_cleanup(encoder);
+
+ return ret;
}
static void dw_hdmi_rockchip_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 896da09e49ee..f556a8f4fde6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -251,6 +251,27 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
return 0;
}
+static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct rockchip_drm_private *priv = crtc->dev->dev_private;
+ int pipe = drm_crtc_index(crtc);
+
+ if (pipe < ROCKCHIP_MAX_CRTC &&
+ priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->cancel_pending_vblank)
+ priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
+}
+
+static void rockchip_drm_preclose(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+}
+
void rockchip_drm_lastclose(struct drm_device *dev)
{
struct rockchip_drm_private *priv = dev->dev_private;
@@ -281,6 +302,7 @@ static struct drm_driver rockchip_drm_driver = {
DRIVER_PRIME | DRIVER_ATOMIC,
.load = rockchip_drm_load,
.unload = rockchip_drm_unload,
+ .preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3529f692edb8..00d17d71aa4c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,6 +40,7 @@ struct rockchip_crtc_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
void (*wait_for_update)(struct drm_crtc *crtc);
+ void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
};
struct rockchip_atomic_commit {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fd370548d7d7..a619f120f801 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -499,10 +499,25 @@ err_disable_hclk:
static void vop_crtc_disable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ int i;
if (!vop->is_enabled)
return;
+ /*
+ * We need to make sure that all windows are disabled before we
+ * disable that crtc. Otherwise we might try to scan from a destroyed
+ * buffer later.
+ */
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ spin_unlock(&vop->reg_lock);
+ }
+
drm_crtc_vblank_off(crtc);
/*
@@ -549,6 +564,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->fb;
struct vop_win *vop_win = to_vop_win(plane);
struct vop_plane_state *vop_plane_state = to_vop_plane_state(state);
@@ -563,12 +579,13 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- crtc = crtc ? crtc : plane->state->crtc;
- /*
- * Both crtc or plane->state->crtc can be null.
- */
if (!crtc || !fb)
goto out_disable;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
src->x1 = state->src_x;
src->y1 = state->src_y;
src->x2 = state->src_x + state->src_w;
@@ -580,8 +597,8 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
clip.x1 = 0;
clip.y1 = 0;
- clip.x2 = crtc->mode.hdisplay;
- clip.y2 = crtc->mode.vdisplay;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
ret = drm_plane_helper_check_update(plane, crtc, state->fb,
src, dest, &clip,
@@ -873,10 +890,30 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
}
+static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
+ struct drm_file *file_priv)
+{
+ struct drm_device *drm = crtc->dev;
+ struct vop *vop = to_vop(crtc);
+ struct drm_pending_vblank_event *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ e = vop->event;
+ if (e && e->base.file_priv == file_priv) {
+ vop->event = NULL;
+
+ e->base.destroy(&e->base);
+ file_priv->event_space += sizeof(e->event);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
static const struct rockchip_crtc_funcs private_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.wait_for_update = vop_crtc_wait_for_update,
+ .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
};
static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -885,9 +922,6 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
{
struct vop *vop = to_vop(crtc);
- if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
- return false;
-
adjusted_mode->clock =
clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
@@ -1108,7 +1142,7 @@ static int vop_create_crtc(struct vop *vop)
const struct vop_data *vop_data = vop->data;
struct device *dev = vop->dev;
struct drm_device *drm_dev = vop->drm_dev;
- struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane, *tmp;
struct drm_crtc *crtc = &vop->crtc;
struct device_node *port;
int ret;
@@ -1148,7 +1182,7 @@ static int vop_create_crtc(struct vop *vop)
ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
&vop_crtc_funcs, NULL);
if (ret)
- return ret;
+ goto err_cleanup_planes;
drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
@@ -1181,6 +1215,7 @@ static int vop_create_crtc(struct vop *vop)
if (!port) {
DRM_ERROR("no port node found in %s\n",
dev->of_node->full_name);
+ ret = -ENOENT;
goto err_cleanup_crtc;
}
@@ -1194,7 +1229,8 @@ static int vop_create_crtc(struct vop *vop)
err_cleanup_crtc:
drm_crtc_cleanup(crtc);
err_cleanup_planes:
- list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
drm_plane_cleanup(plane);
return ret;
}
@@ -1202,9 +1238,28 @@ err_cleanup_planes:
static void vop_destroy_crtc(struct vop *vop)
{
struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *plane, *tmp;
rockchip_unregister_crtc_funcs(crtc);
of_node_put(crtc->port);
+
+ /*
+ * We need to cleanup the planes now. Why?
+ *
+ * The planes are "&vop->win[i].base". That means the memory is
+ * all part of the big "struct vop" chunk of memory. That memory
+ * was devm allocated and associated with this component. We need to
+ * free it ourselves before vop_unbind() finishes.
+ */
+ list_for_each_entry_safe(plane, tmp, &drm_dev->mode_config.plane_list,
+ head)
+ vop_plane_destroy(plane);
+
+ /*
+ * Destroy CRTC after vop_plane_destroy() since vop_disable_plane()
+ * references the CRTC.
+ */
drm_crtc_cleanup(crtc);
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 27342fd76e90..88643ab160bf 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -359,13 +359,6 @@ static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
scrtc->dpms = mode;
}
-static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
{
shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -431,7 +424,6 @@ static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.dpms = shmob_drm_crtc_dpms,
- .mode_fixup = shmob_drm_crtc_mode_fixup,
.prepare = shmob_drm_crtc_mode_prepare,
.commit = shmob_drm_crtc_mode_commit,
.mode_set = shmob_drm_crtc_mode_set,
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index 00d0698be9d3..a516eb869f6f 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -7,6 +7,7 @@
#include "sti_awg_utils.h"
#define AWG_OPCODE_OFFSET 10
+#define AWG_MAX_ARG 0x3ff
enum opcode {
SET,
@@ -34,6 +35,8 @@ static int awg_generate_instr(enum opcode opcode,
/* skip, repeat and replay arg should not exceed 1023.
* If user wants to exceed this value, the instruction should be
* duplicate and arg should be adjust for each duplicated instruction.
+ *
+ * mux_sel is used in case of SAV/EAV synchronization.
*/
while (arg_tmp > 0) {
@@ -65,7 +68,7 @@ static int awg_generate_instr(enum opcode opcode,
mux = 0;
data_enable = 0;
- arg &= (0x3ff);
+ arg &= AWG_MAX_ARG;
break;
case REPEAT:
case REPLAY:
@@ -76,13 +79,13 @@ static int awg_generate_instr(enum opcode opcode,
mux = 0;
data_enable = 0;
- arg &= (0x3ff);
+ arg &= AWG_MAX_ARG;
break;
case JUMP:
mux = 0;
data_enable = 0;
arg |= 0x40; /* for jump instruction 7th bit is 1 */
- arg &= 0x3ff;
+ arg &= AWG_MAX_ARG;
break;
case STOP:
arg = 0;
@@ -110,68 +113,75 @@ static int awg_generate_instr(enum opcode opcode,
return 0;
}
-int sti_awg_generate_code_data_enable_mode(
+static int awg_generate_line_signal(
struct awg_code_generation_params *fwparams,
struct awg_timing *timing)
{
long int val;
- long int data_en;
int ret = 0;
- if (timing->trailing_lines > 0) {
- /* skip trailing lines */
- val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
-
- val = timing->trailing_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
- }
-
if (timing->trailing_pixels > 0) {
/* skip trailing pixel */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->trailing_pixels - 1;
- data_en = 0;
- ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
}
/* set DE signal high */
val = timing->blanking_level;
- data_en = 1;
ret |= awg_generate_instr((timing->trailing_pixels > 0) ? SET : RPLSET,
- val, 0, data_en, fwparams);
+ val, 0, 1, fwparams);
if (timing->blanking_pixels > 0) {
/* skip the number of active pixel */
val = timing->active_pixels - 1;
- data_en = 1;
- ret |= awg_generate_instr(SKIP, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SKIP, val, 0, 1, fwparams);
/* set DE signal low */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(SET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(SET, val, 0, 0, fwparams);
+ }
+
+ return ret;
+}
+
+int sti_awg_generate_code_data_enable_mode(
+ struct awg_code_generation_params *fwparams,
+ struct awg_timing *timing)
+{
+ long int val, tmp_val;
+ int ret = 0;
+
+ if (timing->trailing_lines > 0) {
+ /* skip trailing lines */
+ val = timing->blanking_level;
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
+
+ val = timing->trailing_lines - 1;
+ ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
- /* replay the sequence as many active lines defined */
- val = timing->active_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ tmp_val = timing->active_lines - 1;
+
+ while (tmp_val > 0) {
+ /* generate DE signal for each line */
+ ret |= awg_generate_line_signal(fwparams, timing);
+ /* replay the sequence as many active lines defined */
+ ret |= awg_generate_instr(REPLAY,
+ min_t(int, AWG_MAX_ARG, tmp_val),
+ 0, 0, fwparams);
+ tmp_val -= AWG_MAX_ARG;
+ }
if (timing->blanking_lines > 0) {
/* skip blanking lines */
val = timing->blanking_level;
- data_en = 0;
- ret |= awg_generate_instr(RPLSET, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
val = timing->blanking_lines - 1;
- data_en = 0;
- ret |= awg_generate_instr(REPLAY, val, 0, data_en, fwparams);
+ ret |= awg_generate_instr(REPLAY, val, 0, 0, fwparams);
}
return ret;
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index afed2171beb9..3d2fa3ab33df 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -75,13 +75,13 @@ static int sti_compositor_bind(struct device *dev,
switch (desc[i].type) {
case STI_VID_SUBDEV:
compo->vid[vid_id++] =
- sti_vid_create(compo->dev, desc[i].id,
+ sti_vid_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_MIXER_MAIN_SUBDEV:
case STI_MIXER_AUX_SUBDEV:
compo->mixer[mixer_id++] =
- sti_mixer_create(compo->dev, desc[i].id,
+ sti_mixer_create(compo->dev, drm_dev, desc[i].id,
compo->regs + desc[i].offset);
break;
case STI_GPD_SUBDEV:
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index de11c7cfb02f..505620c7c2c8 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -56,6 +56,7 @@ static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
/* accept the provided drm_display_mode, do not fix it up */
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
return true;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 807863106b8d..3abb400151ac 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -5,12 +5,10 @@
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
@@ -74,6 +72,82 @@ static const uint32_t cursor_supported_formats[] = {
#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(cursor->regs + reg))
+
+static void cursor_dbg_vpo(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void cursor_dbg_size(struct seq_file *s, u32 val)
+{
+ seq_printf(s, "\t%d x %d", val & 0x07FF, (val >> 16) & 0x07FF);
+}
+
+static void cursor_dbg_pml(struct seq_file *s,
+ struct sti_cursor *cursor, u32 val)
+{
+ if (cursor->pixmap.paddr == val)
+ seq_printf(s, "\tVirt @: %p", cursor->pixmap.base);
+}
+
+static void cursor_dbg_cml(struct seq_file *s,
+ struct sti_cursor *cursor, u32 val)
+{
+ if (cursor->clut_paddr == val)
+ seq_printf(s, "\tVirt @: %p", cursor->clut);
+}
+
+static int cursor_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&cursor->plane), cursor->regs);
+
+ DBGFS_DUMP(CUR_CTL);
+ DBGFS_DUMP(CUR_VPO);
+ cursor_dbg_vpo(s, readl(cursor->regs + CUR_VPO));
+ DBGFS_DUMP(CUR_PML);
+ cursor_dbg_pml(s, cursor, readl(cursor->regs + CUR_PML));
+ DBGFS_DUMP(CUR_PMP);
+ DBGFS_DUMP(CUR_SIZE);
+ cursor_dbg_size(s, readl(cursor->regs + CUR_SIZE));
+ DBGFS_DUMP(CUR_CML);
+ cursor_dbg_cml(s, cursor, readl(cursor->regs + CUR_CML));
+ DBGFS_DUMP(CUR_AWS);
+ DBGFS_DUMP(CUR_AWE);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list cursor_debugfs_files[] = {
+ { "cursor", cursor_dbg_show, 0, NULL },
+};
+
+static int cursor_debugfs_init(struct sti_cursor *cursor,
+ struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
+ cursor_debugfs_files[i].data = cursor;
+
+ return drm_debugfs_create_files(cursor_debugfs_files,
+ ARRAY_SIZE(cursor_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
{
u8 *dst = cursor->pixmap.base;
@@ -110,35 +184,31 @@ static void sti_cursor_init(struct sti_cursor *cursor)
(b * 5);
}
-static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
struct drm_crtc *crtc = state->crtc;
- struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &crtc->mode;
- int dst_x = state->crtc_x;
- int dst_y = state->crtc_y;
- int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_w, src_h;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
- int src_w = state->src_w >> 16;
- int src_h = state->src_h >> 16;
- bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
- struct drm_gem_cma_object *cma_obj;
- u32 y, x;
- u32 val;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- drm_plane->base.id, sti_plane_to_str(plane));
- DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
-
- dev_dbg(cursor->dev, "%s %s\n", __func__,
- sti_plane_to_str(plane));
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
if (src_w < STI_CURS_MIN_SIZE ||
src_h < STI_CURS_MIN_SIZE ||
@@ -146,7 +216,7 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
src_h > STI_CURS_MAX_SIZE) {
DRM_ERROR("Invalid cursor size (%dx%d)\n",
src_w, src_h);
- return;
+ return -EINVAL;
}
/* If the cursor size has changed, re-allocated the pixmap */
@@ -157,29 +227,57 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
cursor->height = src_h;
if (cursor->pixmap.base)
- dma_free_writecombine(cursor->dev,
- cursor->pixmap.size,
- cursor->pixmap.base,
- cursor->pixmap.paddr);
+ dma_free_wc(cursor->dev, cursor->pixmap.size,
+ cursor->pixmap.base, cursor->pixmap.paddr);
cursor->pixmap.size = cursor->width * cursor->height;
- cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
- cursor->pixmap.size,
- &cursor->pixmap.paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->pixmap.base = dma_alloc_wc(cursor->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->pixmap.base) {
DRM_ERROR("Failed to allocate memory for pixmap\n");
- return;
+ return -EINVAL;
}
}
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
+ return -EINVAL;
}
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+
+ return 0;
+}
+
+static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_cursor *cursor = to_sti_cursor(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y;
+ struct drm_gem_cma_object *cma_obj;
+ u32 y, x;
+ u32 val;
+
+ if (!crtc || !fb)
+ return;
+
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+
/* Convert ARGB8888 to CLUT8 */
sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
@@ -193,21 +291,21 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
val = y << 16 | x;
writel(val, cursor->regs + CUR_AWE);
- if (first_prepare) {
- /* Set and fetch CLUT */
- writel(cursor->clut_paddr, cursor->regs + CUR_CML);
- writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
- }
-
/* Set memory location, size, and position */
writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
writel(cursor->width, cursor->regs + CUR_PMP);
writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
y = sti_vtg_get_line_number(*mode, dst_y);
- x = sti_vtg_get_pixel_number(*mode, dst_y);
+ x = sti_vtg_get_pixel_number(*mode, dst_x);
writel((y << 16) | x, cursor->regs + CUR_VPO);
+ /* Set and fetch CLUT */
+ writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+
+ sti_plane_update_fps(plane, true, false);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -215,7 +313,6 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -224,13 +321,15 @@ static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+ .atomic_check = sti_cursor_atomic_check,
.atomic_update = sti_cursor_atomic_update,
.atomic_disable = sti_cursor_atomic_disable,
};
@@ -252,8 +351,8 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
/* Allocate clut buffer */
size = 0x100 * sizeof(unsigned short);
- cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
- GFP_KERNEL | GFP_DMA);
+ cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
if (!cursor->clut) {
DRM_ERROR("Failed to allocate memory for cursor clut\n");
@@ -283,10 +382,13 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
+ if (cursor_debugfs_init(cursor, drm_dev->primary))
+ DRM_ERROR("CURSOR debugfs setup failed\n");
+
return &cursor->plane.drm_plane;
err_plane:
- dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
+ dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
err_clut:
devm_kfree(dev, cursor);
return NULL;
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 506b5626f3ed..6bd6abaa5a70 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -20,6 +20,7 @@
#include "sti_crtc.h"
#include "sti_drv.h"
+#include "sti_plane.h"
#define DRIVER_NAME "sti"
#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,6 +31,130 @@
#define STI_MAX_FB_HEIGHT 4096
#define STI_MAX_FB_WIDTH 4096
+static int sti_drm_fps_get(void *data, u64 *val)
+{
+ struct drm_device *drm_dev = data;
+ struct drm_plane *p;
+ unsigned int i = 0;
+
+ *val = 0;
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ *val |= plane->fps_info.output << i;
+ i++;
+ }
+
+ return 0;
+}
+
+static int sti_drm_fps_set(void *data, u64 val)
+{
+ struct drm_device *drm_dev = data;
+ struct drm_plane *p;
+ unsigned int i = 0;
+
+ list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ plane->fps_info.output = (val >> i) & 1;
+ i++;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sti_drm_fps_fops,
+ sti_drm_fps_get, sti_drm_fps_set, "%llu\n");
+
+static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_plane *p;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(p, &dev->mode_config.plane_list, head) {
+ struct sti_plane *plane = to_sti_plane(p);
+
+ seq_printf(s, "%s%s\n",
+ plane->fps_info.fps_str,
+ plane->fps_info.fips_str);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list sti_drm_dbg_list[] = {
+ {"fps_get", sti_drm_fps_dbg_show, 0},
+};
+
+static int sti_drm_debugfs_create(struct dentry *root,
+ struct drm_minor *minor,
+ const char *name,
+ const struct file_operations *fops)
+{
+ struct drm_device *dev = minor->dev;
+ struct drm_info_node *node;
+ struct dentry *ent;
+
+ ent = debugfs_create_file(name, S_IRUGO | S_IWUSR, root, dev, fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *)fops;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int sti_drm_dbg_init(struct drm_minor *minor)
+{
+ int ret;
+
+ ret = drm_debugfs_create_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list),
+ minor->debugfs_root, minor);
+ if (ret)
+ goto err;
+
+ ret = sti_drm_debugfs_create(minor->debugfs_root, minor, "fps_show",
+ &sti_drm_fps_fops);
+ if (ret)
+ goto err;
+
+ DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
+ return 0;
+err:
+ DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
+ return ret;
+}
+
+void sti_drm_dbg_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list), minor);
+
+ drm_debugfs_remove_files((struct drm_info_list *)&sti_drm_fps_fops,
+ 1, minor);
+}
+
static void sti_atomic_schedule(struct sti_private *private,
struct drm_atomic_state *state)
{
@@ -181,18 +306,9 @@ static const struct file_operations sti_driver_fops = {
.release = drm_release,
};
-static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
-{
- /* we want to be able to write in mmapped buffer */
- flags |= O_RDWR;
- return drm_gem_prime_export(dev, obj, flags);
-}
-
static struct drm_driver sti_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
- DRIVER_GEM | DRIVER_PRIME,
+ DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.load = sti_load,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -207,7 +323,7 @@ static struct drm_driver sti_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = sti_gem_prime_export,
+ .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -215,6 +331,9 @@ static struct drm_driver sti_driver = {
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .debugfs_init = sti_drm_dbg_init,
+ .debugfs_cleanup = sti_drm_dbg_cleanup,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 45cbe2bf7dd6..25f76632002c 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
@@ -156,6 +157,69 @@ static void dvo_awg_configure(struct sti_dvo *dvo, u32 *awg_ram_code, int nb)
writel(DVO_AWG_CTRL_EN, dvo->regs + DVO_AWG_DIGSYNC_CTRL);
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(dvo->regs + reg))
+
+static void dvo_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+ unsigned int i;
+
+ seq_puts(s, "\n\n");
+ seq_puts(s, " DVO AWG microcode:");
+ for (i = 0; i < AWG_MAX_INST; i++) {
+ if (i % 8 == 0)
+ seq_printf(s, "\n %04X:", i);
+ seq_printf(s, " %04X", readl(reg + i * 4));
+ }
+}
+
+static int dvo_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
+ DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
+ DBGFS_DUMP(DVO_DOF_CFG);
+ DBGFS_DUMP(DVO_LUT_PROG_LOW);
+ DBGFS_DUMP(DVO_LUT_PROG_MID);
+ DBGFS_DUMP(DVO_LUT_PROG_HIGH);
+ dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list dvo_debugfs_files[] = {
+ { "dvo", dvo_dbg_show, 0, NULL },
+};
+
+static void dvo_debugfs_exit(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor);
+}
+
+static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
+ dvo_debugfs_files[i].data = dvo;
+
+ return drm_debugfs_create_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_dvo_disable(struct drm_bridge *bridge)
{
struct sti_dvo *dvo = bridge->driver_private;
@@ -345,12 +409,14 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_DRIVER("\n");
- if (!dvo->panel)
+ if (!dvo->panel) {
dvo->panel = of_drm_find_panel(dvo->panel_node);
+ if (dvo->panel)
+ drm_panel_attach(dvo->panel, connector);
+ }
if (dvo->panel)
- if (!drm_panel_attach(dvo->panel, connector))
- return connector_status_connected;
+ return connector_status_connected;
return connector_status_disconnected;
}
@@ -453,6 +519,9 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ if (dvo_debugfs_init(dvo, drm_dev->primary))
+ DRM_ERROR("DVO debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -467,6 +536,9 @@ static void sti_dvo_unbind(struct device *dev,
struct device *master, void *data)
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ dvo_debugfs_exit(dvo, drm_dev->primary);
drm_bridge_remove(dvo->bridge);
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index f9a1d92c9d95..ff3d3e7e7704 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -6,9 +6,7 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -32,10 +30,23 @@
#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
#define GDP_ARGB1555 0x06
#define GDP_ARGB4444 0x07
-#define GDP_CLUT8 0x0B
-#define GDP_YCBR888 0x10
-#define GDP_YCBR422R 0x12
-#define GDP_AYCBR8888 0x15
+
+#define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
+
+static struct gdp_format_to_str {
+ int format;
+ char name[20];
+} gdp_format_to_str[] = {
+ GDP2STR(RGB565),
+ GDP2STR(RGB888),
+ GDP2STR(RGB888_32),
+ GDP2STR(XBGR8888),
+ GDP2STR(ARGB8565),
+ GDP2STR(ARGB8888),
+ GDP2STR(ABGR8888),
+ GDP2STR(ARGB1555),
+ GDP2STR(ARGB4444)
+ };
#define GAM_GDP_CTL_OFFSET 0x00
#define GAM_GDP_AGC_OFFSET 0x04
@@ -97,6 +108,7 @@ struct sti_gdp_node_list {
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
+ * @vtg: registered vtg
*/
struct sti_gdp {
struct sti_plane plane;
@@ -108,6 +120,7 @@ struct sti_gdp {
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+ struct sti_vtg *vtg;
};
#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
@@ -121,12 +134,224 @@ static const uint32_t gdp_supported_formats[] = {
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
- DRM_FORMAT_AYUV,
- DRM_FORMAT_YUV444,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_C8,
};
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(gdp->regs + reg ## _OFFSET))
+
+static void gdp_dbg_ctl(struct seq_file *s, int val)
+{
+ int i;
+
+ seq_puts(s, "\tColor:");
+ for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
+ if (gdp_format_to_str[i].format == (val & 0x1F)) {
+ seq_printf(s, gdp_format_to_str[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(gdp_format_to_str))
+ seq_puts(s, "<UNKNOWN>");
+
+ seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
+}
+
+static void gdp_dbg_vpo(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_vps(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_size(struct seq_file *s, int val)
+{
+ seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+}
+
+static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
+{
+ void *base = NULL;
+ unsigned int i;
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ if (gdp->node_list[i].top_field_paddr == val) {
+ base = gdp->node_list[i].top_field;
+ break;
+ }
+ if (gdp->node_list[i].btm_field_paddr == val) {
+ base = gdp->node_list[i].btm_field;
+ break;
+ }
+ }
+
+ if (base)
+ seq_printf(s, "\tVirt @: %p", base);
+}
+
+static void gdp_dbg_ppt(struct seq_file *s, int val)
+{
+ if (val & GAM_GDP_PPT_IGNORE)
+ seq_puts(s, "\tNot displayed on mixer!");
+}
+
+static void gdp_dbg_mst(struct seq_file *s, int val)
+{
+ if (val & 1)
+ seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int gdp_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+ struct drm_crtc *crtc = drm_plane->crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&gdp->plane), gdp->regs);
+
+ DBGFS_DUMP(GAM_GDP_CTL);
+ gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
+ DBGFS_DUMP(GAM_GDP_AGC);
+ DBGFS_DUMP(GAM_GDP_VPO);
+ gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
+ DBGFS_DUMP(GAM_GDP_VPS);
+ gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
+ DBGFS_DUMP(GAM_GDP_PML);
+ DBGFS_DUMP(GAM_GDP_PMP);
+ DBGFS_DUMP(GAM_GDP_SIZE);
+ gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
+ DBGFS_DUMP(GAM_GDP_NVN);
+ gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+ DBGFS_DUMP(GAM_GDP_KEY1);
+ DBGFS_DUMP(GAM_GDP_KEY2);
+ DBGFS_DUMP(GAM_GDP_PPT);
+ gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
+ DBGFS_DUMP(GAM_GDP_CML);
+ DBGFS_DUMP(GAM_GDP_MST);
+ gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
+
+ seq_puts(s, "\n\n");
+ if (!crtc)
+ seq_puts(s, " Not connected to any DRM CRTC\n");
+ else
+ seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
+{
+ seq_printf(s, "\t@:0x%p", node);
+ seq_printf(s, "\n\tCTL 0x%08X", node->gam_gdp_ctl);
+ gdp_dbg_ctl(s, node->gam_gdp_ctl);
+ seq_printf(s, "\n\tAGC 0x%08X", node->gam_gdp_agc);
+ seq_printf(s, "\n\tVPO 0x%08X", node->gam_gdp_vpo);
+ gdp_dbg_vpo(s, node->gam_gdp_vpo);
+ seq_printf(s, "\n\tVPS 0x%08X", node->gam_gdp_vps);
+ gdp_dbg_vps(s, node->gam_gdp_vps);
+ seq_printf(s, "\n\tPML 0x%08X", node->gam_gdp_pml);
+ seq_printf(s, "\n\tPMP 0x%08X", node->gam_gdp_pmp);
+ seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
+ gdp_dbg_size(s, node->gam_gdp_size);
+ seq_printf(s, "\n\tNVN 0x%08X", node->gam_gdp_nvn);
+ seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
+ seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
+ seq_printf(s, "\n\tPPT 0x%08X", node->gam_gdp_ppt);
+ gdp_dbg_ppt(s, node->gam_gdp_ppt);
+ seq_printf(s, "\n\tCML 0x%08X", node->gam_gdp_cml);
+ seq_puts(s, "\n");
+}
+
+static int gdp_node_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ unsigned int b;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ for (b = 0; b < GDP_NODE_NB_BANK; b++) {
+ seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
+ gdp_node_dump_node(s, gdp->node_list[b].top_field);
+ seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
+ gdp_node_dump_node(s, gdp->node_list[b].btm_field);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list gdp0_debugfs_files[] = {
+ { "gdp0", gdp_dbg_show, 0, NULL },
+ { "gdp0_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp1_debugfs_files[] = {
+ { "gdp1", gdp_dbg_show, 0, NULL },
+ { "gdp1_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp2_debugfs_files[] = {
+ { "gdp2", gdp_dbg_show, 0, NULL },
+ { "gdp2_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list gdp3_debugfs_files[] = {
+ { "gdp3", gdp_dbg_show, 0, NULL },
+ { "gdp3_node", gdp_node_dbg_show, 0, NULL },
+};
+
+static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+{
+ unsigned int i;
+ struct drm_info_list *gdp_debugfs_files;
+ int nb_files;
+
+ switch (gdp->plane.desc) {
+ case STI_GDP_0:
+ gdp_debugfs_files = gdp0_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp0_debugfs_files);
+ break;
+ case STI_GDP_1:
+ gdp_debugfs_files = gdp1_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp1_debugfs_files);
+ break;
+ case STI_GDP_2:
+ gdp_debugfs_files = gdp2_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp2_debugfs_files);
+ break;
+ case STI_GDP_3:
+ gdp_debugfs_files = gdp3_debugfs_files;
+ nb_files = ARRAY_SIZE(gdp3_debugfs_files);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_files; i++)
+ gdp_debugfs_files[i].data = gdp;
+
+ return drm_debugfs_create_files(gdp_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+}
+
static int sti_gdp_fourcc2format(int fourcc)
{
switch (fourcc) {
@@ -146,14 +371,6 @@ static int sti_gdp_fourcc2format(int fourcc)
return GDP_RGB565;
case DRM_FORMAT_RGB888:
return GDP_RGB888;
- case DRM_FORMAT_AYUV:
- return GDP_AYCBR8888;
- case DRM_FORMAT_YUV444:
- return GDP_YCBR888;
- case DRM_FORMAT_VYUY:
- return GDP_YCBR422R;
- case DRM_FORMAT_C8:
- return GDP_CLUT8;
}
return -1;
}
@@ -163,7 +380,6 @@ static int sti_gdp_get_alpharange(int format)
switch (format) {
case GDP_ARGB8565:
case GDP_ARGB8888:
- case GDP_AYCBR8888:
case GDP_ABGR8888:
return GAM_GDP_ALPHARANGE_255;
}
@@ -240,9 +456,6 @@ end:
*/
static void sti_gdp_disable(struct sti_gdp *gdp)
{
- struct drm_plane *drm_plane = &gdp->plane.drm_plane;
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
- struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
unsigned int i;
DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
@@ -253,8 +466,7 @@ static void sti_gdp_disable(struct sti_gdp *gdp)
gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
}
- if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+ if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
if (gdp->clk_pix)
@@ -312,8 +524,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_writecombine(gdp->dev,
- size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -380,20 +591,140 @@ static void sti_gdp_init(struct sti_gdp *gdp)
}
}
-static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+/**
+ * sti_gdp_get_dst
+ * @dev: device
+ * @dst: requested destination size
+ * @src: source size
+ *
+ * Return the cropped / clamped destination size
+ *
+ * RETURNS:
+ * cropped / clamped destination size
+ */
+static int sti_gdp_get_dst(struct device *dev, int dst, int src)
+{
+ if (dst == src)
+ return dst;
+
+ if (dst < src) {
+ dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
+ return dst;
+ }
+
+ dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
+ return src;
+}
+
+static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
struct drm_crtc *crtc = state->crtc;
struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
struct drm_framebuffer *fb = state->fb;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+ struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
+ int format;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ mixer = to_sti_mixer(crtc);
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+
+ format = sti_gdp_fourcc2format(fb->pixel_format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return -EINVAL;
+ }
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ gdp->vtg = mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux;
+ if (sti_vtg_register_client(gdp->vtg,
+ &gdp->vtg_field_nb, crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ struct clk *clkp;
+ int rate = mode->clock * 1000;
+ int res;
+
+ /*
+ * According to the mixer used, the gdp pixel clock
+ * should have a different parent clock.
+ */
+ if (mixer->id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return -EINVAL;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->base.id, sti_plane_to_str(plane));
+ DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_plane_to_str(plane),
+ dst_w, dst_h, dst_x, dst_y,
+ src_w, src_h, src_x, src_y);
+
+ return 0;
+}
+
+static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_gdp *gdp = to_sti_gdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
struct drm_gem_cma_object *cma_obj;
struct sti_gdp_node_list *list;
struct sti_gdp_node_list *curr_list;
@@ -403,13 +734,10 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
int format;
unsigned int depth, bpp;
u32 ydo, xdo, yds, xds;
- int res;
- /* Manage the case where crtc is null (disabled) */
- if (!crtc)
+ if (!crtc || !fb)
return;
- mixer = to_sti_mixer(crtc);
mode = &crtc->mode;
dst_x = state->crtc_x;
dst_y = state->crtc_y;
@@ -418,16 +746,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
-
- DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
- drm_plane->base.id, sti_plane_to_str(plane));
- DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
- sti_plane_to_str(plane),
- dst_w, dst_h, dst_x, dst_y,
- src_w, src_h, src_x, src_y);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
@@ -440,20 +760,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
format = sti_gdp_fourcc2format(fb->pixel_format);
- if (format == -1) {
- DRM_ERROR("Format not supported by GDP %.4s\n",
- (char *)&fb->pixel_format);
- return;
- }
top_field->gam_gdp_ctl |= format;
top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
- }
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
@@ -465,12 +776,9 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_pml += src_x * (bpp >> 3);
top_field->gam_gdp_pml += src_y * fb->pitches[0];
- /* input parameters */
- top_field->gam_gdp_pmp = fb->pitches[0];
- top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
- clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
-
- /* output parameters */
+ /* output parameters (clamped / cropped) */
+ dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
+ dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
ydo = sti_vtg_get_line_number(*mode, dst_y);
yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
xdo = sti_vtg_get_pixel_number(*mode, dst_x);
@@ -478,6 +786,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
top_field->gam_gdp_vpo = (ydo << 16) | xdo;
top_field->gam_gdp_vps = (yds << 16) | xds;
+ /* input parameters */
+ src_w = dst_w;
+ top_field->gam_gdp_pmp = fb->pitches[0];
+ top_field->gam_gdp_size = src_h << 16 | src_w;
+
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
top_field->gam_gdp_nvn = list->btm_field_paddr;
@@ -488,44 +801,6 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
fb->pitches[0];
- if (first_prepare) {
- /* Register gdp callback */
- if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
- compo->vtg_main : compo->vtg_aux,
- &gdp->vtg_field_nb, crtc)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return;
- }
-
- /* Set and enable gdp clock */
- if (gdp->clk_pix) {
- struct clk *clkp;
- int rate = mode->clock * 1000;
-
- /* According to the mixer used, the gdp pixel clock
- * should have a different parent clock. */
- if (mixer->id == STI_MIXER_MAIN)
- clkp = gdp->clk_main_parent;
- else
- clkp = gdp->clk_aux_parent;
-
- if (clkp)
- clk_set_parent(gdp->clk_pix, clkp);
-
- res = clk_set_rate(gdp->clk_pix, rate);
- if (res < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
- rate);
- return;
- }
-
- if (clk_prepare_enable(gdp->clk_pix)) {
- DRM_ERROR("Failed to prepare/enable gdp\n");
- return;
- }
- }
- }
-
/* Update the NVN field of the 'right' field of the current GDP node
* (being used by the HW) with the address of the updated ('free') top
* field GDP node.
@@ -574,6 +849,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
}
end:
+ sti_plane_update_fps(plane, true, false);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -581,7 +858,6 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -590,13 +866,15 @@ static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+ .atomic_check = sti_gdp_atomic_check,
.atomic_update = sti_gdp_atomic_update,
.atomic_disable = sti_gdp_atomic_disable,
};
@@ -640,6 +918,9 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&gdp->plane, type);
+ if (gdp_debugfs_init(gdp, drm_dev->primary))
+ DRM_ERROR("GDP debugfs setup failed\n");
+
return &gdp->plane.drm_plane;
err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 49cce833f2c8..ec0d017eaf1a 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -326,6 +326,103 @@ static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
}
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(hda->regs + reg))
+
+static void hda_dbg_cfg(struct seq_file *s, int val)
+{
+ seq_puts(s, "\tAWG ");
+ seq_puts(s, val & CFG_AWG_ASYNC_EN ? "enabled" : "disabled");
+}
+
+static void hda_dbg_awg_microcode(struct seq_file *s, void __iomem *reg)
+{
+ unsigned int i;
+
+ seq_puts(s, "\n\n");
+ seq_puts(s, " HDA AWG microcode:");
+ for (i = 0; i < AWG_MAX_INST; i++) {
+ if (i % 8 == 0)
+ seq_printf(s, "\n %04X:", i);
+ seq_printf(s, " %04X", readl(reg + i * 4));
+ }
+}
+
+static void hda_dbg_video_dacs_ctrl(struct seq_file *s, void __iomem *reg)
+{
+ u32 val = readl(reg);
+ u32 mask;
+
+ switch ((u32)reg & VIDEO_DACS_CONTROL_MASK) {
+ case VIDEO_DACS_CONTROL_SYSCFG2535:
+ mask = DAC_CFG_HD_OFF_MASK;
+ break;
+ case VIDEO_DACS_CONTROL_SYSCFG5072:
+ mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Warning: DACS ctrl register not supported!");
+ return;
+ }
+
+ seq_puts(s, "\n");
+ seq_printf(s, "\n %-25s 0x%08X", "VIDEO_DACS_CONTROL", val);
+ seq_puts(s, "\tHD DACs ");
+ seq_puts(s, val & mask ? "disabled" : "enabled");
+}
+
+static int hda_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
+ DBGFS_DUMP(HDA_ANA_CFG);
+ hda_dbg_cfg(s, readl(hda->regs + HDA_ANA_CFG));
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_Y);
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CB);
+ DBGFS_DUMP(HDA_ANA_SCALE_CTRL_CR);
+ DBGFS_DUMP(HDA_ANA_ANC_CTRL);
+ DBGFS_DUMP(HDA_ANA_SRC_Y_CFG);
+ DBGFS_DUMP(HDA_ANA_SRC_C_CFG);
+ hda_dbg_awg_microcode(s, hda->regs + HDA_SYNC_AWGI);
+ if (hda->video_dacs_ctrl)
+ hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hda_debugfs_files[] = {
+ { "hda", hda_dbg_show, 0, NULL },
+};
+
+static void hda_debugfs_exit(struct sti_hda *hda, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor);
+}
+
+static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
+ hda_debugfs_files[i].data = hda;
+
+ return drm_debugfs_create_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
/**
* Configure AWG, writing instructions
*
@@ -685,6 +782,12 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
goto err_sysfs;
}
+ /* force to disable hd dacs at startup */
+ hda_enable_hd_dacs(hda, false);
+
+ if (hda_debugfs_init(hda, drm_dev->primary))
+ DRM_ERROR("HDA debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -697,7 +800,10 @@ err_connector:
static void sti_hda_unbind(struct device *dev,
struct device *master, void *data)
{
- /* do nothing */
+ struct sti_hda *hda = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ hda_debugfs_exit(hda, drm_dev->primary);
}
static const struct component_ops sti_hda_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index cd501563c0cc..6ef0715bd5b9 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
@@ -51,9 +52,18 @@
#define HDMI_SW_DI_2_PKT_WORD4 0x0614
#define HDMI_SW_DI_2_PKT_WORD5 0x0618
#define HDMI_SW_DI_2_PKT_WORD6 0x061C
+#define HDMI_SW_DI_3_HEAD_WORD 0x0620
+#define HDMI_SW_DI_3_PKT_WORD0 0x0624
+#define HDMI_SW_DI_3_PKT_WORD1 0x0628
+#define HDMI_SW_DI_3_PKT_WORD2 0x062C
+#define HDMI_SW_DI_3_PKT_WORD3 0x0630
+#define HDMI_SW_DI_3_PKT_WORD4 0x0634
+#define HDMI_SW_DI_3_PKT_WORD5 0x0638
+#define HDMI_SW_DI_3_PKT_WORD6 0x063C
#define HDMI_IFRAME_SLOT_AVI 1
#define HDMI_IFRAME_SLOT_AUDIO 2
+#define HDMI_IFRAME_SLOT_VENDOR 3
#define XCAT(prefix, x, suffix) prefix ## x ## suffix
#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
@@ -65,6 +75,8 @@
#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
+#define HDMI_SW_DI_MAX_WORD 7
+
#define HDMI_IFRAME_DISABLED 0x0
#define HDMI_IFRAME_SINGLE_SHOT 0x1
#define HDMI_IFRAME_FIELD 0x2
@@ -117,6 +129,8 @@ struct sti_hdmi_connector {
struct drm_connector drm_connector;
struct drm_encoder *encoder;
struct sti_hdmi *hdmi;
+ struct drm_property *colorspace_property;
+ struct drm_property *hdmi_mode_property;
};
#define to_sti_hdmi_connector(x) \
@@ -217,8 +231,10 @@ static void hdmi_config(struct sti_hdmi *hdmi)
/* Clear overrun and underrun fifo */
conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
- /* Enable HDMI mode not DVI */
- conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+ /* Select encryption type and the framing mode */
+ conf |= HDMI_CFG_ESS_NOT_OESS;
+ if (hdmi->hdmi_mode == HDMI_MODE_HDMI)
+ conf |= HDMI_CFG_HDMI_NOT_DVI;
/* Enable sink term detection */
conf |= HDMI_CFG_SINK_TERM_DET_EN;
@@ -241,6 +257,47 @@ static void hdmi_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, conf, HDMI_CFG);
}
+/*
+ * Helper to reset info frame
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ * @slot: infoframe to reset
+ */
+static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
+ u32 slot)
+{
+ u32 val, i;
+ u32 head_offset, pack_offset;
+
+ switch (slot) {
+ case HDMI_IFRAME_SLOT_AVI:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
+ break;
+ case HDMI_IFRAME_SLOT_AUDIO:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
+ break;
+ case HDMI_IFRAME_SLOT_VENDOR:
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+ break;
+ default:
+ DRM_ERROR("unsupported infoframe slot: %#x\n", slot);
+ return;
+ }
+
+ /* Disable transmission for the selected slot */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, slot);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ /* Reset info frame registers */
+ hdmi_write(hdmi, 0x0, head_offset);
+ for (i = 0; i < HDMI_SW_DI_MAX_WORD; i += sizeof(u32))
+ hdmi_write(hdmi, 0x0, pack_offset + i);
+}
+
/**
* Helper to concatenate infoframe in 32 bits word
*
@@ -266,12 +323,13 @@ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
* @data: infoframe to write
* @size: size to write
*/
-static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
+static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
+ const u8 *data,
+ size_t size)
{
const u8 *ptr = data;
u32 val, slot, mode, i;
u32 head_offset, pack_offset;
- size_t size;
switch (*ptr) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -279,17 +337,19 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
mode = HDMI_IFRAME_FIELD;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI);
- size = HDMI_AVI_INFOFRAME_SIZE;
break;
-
case HDMI_INFOFRAME_TYPE_AUDIO:
slot = HDMI_IFRAME_SLOT_AUDIO;
mode = HDMI_IFRAME_FRAME;
head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AUDIO);
pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AUDIO);
- size = HDMI_AUDIO_INFOFRAME_SIZE;
break;
-
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ slot = HDMI_IFRAME_SLOT_VENDOR;
+ mode = HDMI_IFRAME_FRAME;
+ head_offset = HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_VENDOR);
+ pack_offset = HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_VENDOR);
+ break;
default:
DRM_ERROR("unsupported infoframe type: %#x\n", *ptr);
return;
@@ -308,8 +368,9 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
/*
* Each subpack contains 4 bytes
* The First Bytes of the first subpacket must contain the checksum
- * Packet size in increase by one.
+ * Packet size is increase by one.
*/
+ size = size - HDMI_INFOFRAME_HEADER_SIZE + 1;
for (i = 0; i < size; i += sizeof(u32)) {
size_t num;
@@ -321,7 +382,7 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi, const u8 *data)
/* Enable transmission slot for updated infoframe */
val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
- val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, slot);
+ val |= HDMI_IFRAME_CFG_DI_N(mode, slot);
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
@@ -352,7 +413,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
}
/* fixed infoframe configuration not linked to the mode */
- infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.colorspace = hdmi->colorspace;
infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
@@ -362,7 +423,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
return ret;
}
- hdmi_infoframe_write_infopack(hdmi, buffer);
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
@@ -398,7 +459,49 @@ static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
return ret;
}
- hdmi_infoframe_write_infopack(hdmi, buffer);
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+
+ return 0;
+}
+
+/*
+ * Prepare and configure the VS infoframe
+ *
+ * Vendor Specific infoframe are transmitted once per frame and
+ * contains vendor specific information.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+#define HDMI_VENDOR_INFOFRAME_MAX_SIZE 6
+static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ struct hdmi_vendor_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_VENDOR_INFOFRAME_MAX_SIZE];
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe, mode);
+ if (ret < 0) {
+ /*
+ * Going into that statement does not means vendor infoframe
+ * fails. It just informed us that vendor infoframe is not
+ * needed for the selected mode. Only 4k or stereoscopic 3D
+ * mode requires vendor infoframe. So just simply return 0.
+ */
+ return 0;
+ }
+
+ ret = hdmi_vendor_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack VS infoframe: %d\n", ret);
+ return ret;
+ }
+
+ hdmi_infoframe_write_infopack(hdmi, buffer, ret);
return 0;
}
@@ -448,6 +551,172 @@ static void hdmi_swreset(struct sti_hdmi *hdmi)
clk_disable_unprepare(hdmi->clk_audio);
}
+#define DBGFS_PRINT_STR(str1, str2) seq_printf(s, "%-24s %s\n", str1, str2)
+#define DBGFS_PRINT_INT(str1, int2) seq_printf(s, "%-24s %d\n", str1, int2)
+#define DBGFS_DUMP(str, reg) seq_printf(s, "%s %-25s 0x%08X", str, #reg, \
+ hdmi_read(hdmi, reg))
+#define DBGFS_DUMP_DI(reg, slot) DBGFS_DUMP("\n", reg(slot))
+
+static void hdmi_dbg_cfg(struct seq_file *s, int val)
+{
+ int tmp;
+
+ seq_puts(s, "\t");
+ tmp = val & HDMI_CFG_HDMI_NOT_DVI;
+ DBGFS_PRINT_STR("mode:", tmp ? "HDMI" : "DVI");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_HDCP_EN;
+ DBGFS_PRINT_STR("HDCP:", tmp ? "enable" : "disable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_ESS_NOT_OESS;
+ DBGFS_PRINT_STR("HDCP mode:", tmp ? "ESS enable" : "OESS enable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_SINK_TERM_DET_EN;
+ DBGFS_PRINT_STR("Sink term detection:", tmp ? "enable" : "disable");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_H_SYNC_POL_NEG;
+ DBGFS_PRINT_STR("Hsync polarity:", tmp ? "inverted" : "normal");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_V_SYNC_POL_NEG;
+ DBGFS_PRINT_STR("Vsync polarity:", tmp ? "inverted" : "normal");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = val & HDMI_CFG_422_EN;
+ DBGFS_PRINT_STR("YUV422 format:", tmp ? "enable" : "disable");
+}
+
+static void hdmi_dbg_sta(struct seq_file *s, int val)
+{
+ int tmp;
+
+ seq_puts(s, "\t");
+ tmp = (val & HDMI_STA_DLL_LCK);
+ DBGFS_PRINT_STR("pll:", tmp ? "locked" : "not locked");
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_STA_HOT_PLUG);
+ DBGFS_PRINT_STR("hdmi cable:", tmp ? "connected" : "not connected");
+}
+
+static void hdmi_dbg_sw_di_cfg(struct seq_file *s, int val)
+{
+ int tmp;
+ char *const en_di[] = {"no transmission",
+ "single transmission",
+ "once every field",
+ "once every frame"};
+
+ seq_puts(s, "\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 1));
+ DBGFS_PRINT_STR("Data island 1:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 2)) >> 4;
+ DBGFS_PRINT_STR("Data island 2:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 3)) >> 8;
+ DBGFS_PRINT_STR("Data island 3:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 4)) >> 12;
+ DBGFS_PRINT_STR("Data island 4:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 5)) >> 16;
+ DBGFS_PRINT_STR("Data island 5:", en_di[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, 6)) >> 20;
+ DBGFS_PRINT_STR("Data island 6:", en_di[tmp]);
+}
+
+static int hdmi_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
+ DBGFS_DUMP("\n", HDMI_CFG);
+ hdmi_dbg_cfg(s, hdmi_read(hdmi, HDMI_CFG));
+ DBGFS_DUMP("", HDMI_INT_EN);
+ DBGFS_DUMP("\n", HDMI_STA);
+ hdmi_dbg_sta(s, hdmi_read(hdmi, HDMI_STA));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_XMIN);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Xmin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMIN));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_XMAX);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Xmax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_XMAX));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_YMIN);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Ymin:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMIN));
+ DBGFS_DUMP("", HDMI_ACTIVE_VID_YMAX);
+ seq_puts(s, "\t");
+ DBGFS_PRINT_INT("Ymax:", hdmi_read(hdmi, HDMI_ACTIVE_VID_YMAX));
+ DBGFS_DUMP("", HDMI_SW_DI_CFG);
+ hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
+
+ seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AVI);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AVI);
+ seq_puts(s, "\n");
+ seq_printf(s, "\n AUDIO Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_AUDIO);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_AUDIO);
+ seq_puts(s, "\n");
+ seq_printf(s, "\n VENDOR SPECIFIC Infoframe (Data Island slot N=%d):",
+ HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD0, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD1, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD2, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD3, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD4, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD5, HDMI_IFRAME_SLOT_VENDOR);
+ DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hdmi_debugfs_files[] = {
+ { "hdmi", hdmi_dbg_show, 0, NULL },
+};
+
+static void hdmi_debugfs_exit(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor);
+}
+
+static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
+ hdmi_debugfs_files[i].data = hdmi;
+
+ return drm_debugfs_create_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_hdmi_disable(struct drm_bridge *bridge)
{
struct sti_hdmi *hdmi = bridge->driver_private;
@@ -468,6 +737,11 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
/* Stop the phy */
hdmi->phy_ops->stop(hdmi);
+ /* Reset info frame transmission */
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AVI);
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_AUDIO);
+ hdmi_infoframe_reset(hdmi, HDMI_IFRAME_SLOT_VENDOR);
+
/* Set the default channel data to be a dark red */
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
@@ -523,6 +797,10 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (hdmi_audio_infoframe_config(hdmi))
DRM_ERROR("Unable to configure AUDIO infoframe\n");
+ /* Program VS infoframe */
+ if (hdmi_vendor_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure VS infoframe\n");
+
/* Sw reset */
hdmi_swreset(hdmi);
}
@@ -664,12 +942,97 @@ static void sti_hdmi_connector_destroy(struct drm_connector *connector)
kfree(hdmi_connector);
}
+static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
+ struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_property *prop;
+
+ /* colorspace property */
+ hdmi->colorspace = DEFAULT_COLORSPACE_MODE;
+ prop = drm_property_create_enum(drm_dev, 0, "colorspace",
+ colorspace_mode_names,
+ ARRAY_SIZE(colorspace_mode_names));
+ if (!prop) {
+ DRM_ERROR("fails to create colorspace property\n");
+ return;
+ }
+ hdmi_connector->colorspace_property = prop;
+ drm_object_attach_property(&connector->base, prop, hdmi->colorspace);
+
+ /* hdmi_mode property */
+ hdmi->hdmi_mode = DEFAULT_HDMI_MODE;
+ prop = drm_property_create_enum(drm_dev, 0, "hdmi_mode",
+ hdmi_mode_names,
+ ARRAY_SIZE(hdmi_mode_names));
+ if (!prop) {
+ DRM_ERROR("fails to create colorspace property\n");
+ return;
+ }
+ hdmi_connector->hdmi_mode_property = prop;
+ drm_object_attach_property(&connector->base, prop, hdmi->hdmi_mode);
+
+}
+
+static int
+sti_hdmi_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (property == hdmi_connector->colorspace_property) {
+ hdmi->colorspace = val;
+ return 0;
+ }
+
+ if (property == hdmi_connector->hdmi_mode_property) {
+ hdmi->hdmi_mode = val;
+ return 0;
+ }
+
+ DRM_ERROR("failed to set hdmi connector property\n");
+ return -EINVAL;
+}
+
+static int
+sti_hdmi_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ if (property == hdmi_connector->colorspace_property) {
+ *val = hdmi->colorspace;
+ return 0;
+ }
+
+ if (property == hdmi_connector->hdmi_mode_property) {
+ *val = hdmi->hdmi_mode;
+ return 0;
+ }
+
+ DRM_ERROR("failed to get hdmi connector property\n");
+ return -EINVAL;
+}
+
static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = sti_hdmi_connector_detect,
.destroy = sti_hdmi_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
+ .set_property = drm_atomic_helper_connector_set_property,
+ .atomic_set_property = sti_hdmi_connector_set_property,
+ .atomic_get_property = sti_hdmi_connector_get_property,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -729,6 +1092,9 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
drm_connector_helper_add(drm_connector,
&sti_hdmi_connector_helper_funcs);
+ /* initialise property */
+ sti_hdmi_connector_init_property(drm_dev, drm_connector);
+
err = drm_connector_register(drm_connector);
if (err)
goto err_connector;
@@ -742,6 +1108,9 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
/* Enable default interrupts */
hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
+ if (hdmi_debugfs_init(hdmi, drm_dev->primary))
+ DRM_ERROR("HDMI debugfs setup failed\n");
+
return 0;
err_sysfs:
@@ -755,7 +1124,10 @@ err_connector:
static void sti_hdmi_unbind(struct device *dev,
struct device *master, void *data)
{
- /* do nothing */
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ hdmi_debugfs_exit(hdmi, drm_dev->primary);
}
static const struct component_ops sti_hdmi_ops = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 3d22390e1f3b..ef3a94583bbd 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -7,15 +7,14 @@
#ifndef _STI_HDMI_H_
#define _STI_HDMI_H_
+#include <linux/hdmi.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
-
-#define HDMI_STA_HOT_PLUG_SHIFT 4
-#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
+#define HDMI_STA_HOT_PLUG BIT(4)
struct sti_hdmi;
@@ -24,6 +23,27 @@ struct hdmi_phy_ops {
void (*stop)(struct sti_hdmi *hdmi);
};
+/* values for the framing mode property */
+enum sti_hdmi_modes {
+ HDMI_MODE_HDMI,
+ HDMI_MODE_DVI,
+};
+
+static const struct drm_prop_enum_list hdmi_mode_names[] = {
+ { HDMI_MODE_HDMI, "hdmi" },
+ { HDMI_MODE_DVI, "dvi" },
+};
+
+#define DEFAULT_HDMI_MODE HDMI_MODE_HDMI
+
+static const struct drm_prop_enum_list colorspace_mode_names[] = {
+ { HDMI_COLORSPACE_RGB, "rgb" },
+ { HDMI_COLORSPACE_YUV422, "yuv422" },
+ { HDMI_COLORSPACE_YUV444, "yuv444" },
+};
+
+#define DEFAULT_COLORSPACE_MODE HDMI_COLORSPACE_RGB
+
/**
* STI hdmi structure
*
@@ -44,6 +64,9 @@ struct hdmi_phy_ops {
* @wait_event: wait event
* @event_received: wait event status
* @reset: reset control of the hdmi phy
+ * @ddc_adapt: i2c ddc adapter
+ * @colorspace: current colorspace selected
+ * @hdmi_mode: select framing for HDMI or DVI
*/
struct sti_hdmi {
struct device dev;
@@ -64,6 +87,8 @@ struct sti_hdmi {
bool event_received;
struct reset_control *reset;
struct i2c_adapter *ddc_adapt;
+ enum hdmi_colorspace colorspace;
+ enum sti_hdmi_modes hdmi_mode;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 43861b52261d..e05b0dc523ff 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -4,14 +4,11 @@
* License terms: GNU General Public License (GPL), version 2
*/
-#include <linux/clk.h>
#include <linux/component.h>
#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -329,8 +326,6 @@ struct sti_hqvdp_cmd {
* @reset: reset control
* @vtg_nb: notifier to handle VTG Vsync
* @btm_field_pending: is there any bottom field (interlaced frame) to display
- * @curr_field_count: number of field updates
- * @last_field_count: number of field updates since last fps measure
* @hqvdp_cmd: buffer of commands
* @hqvdp_cmd_paddr: physical address of hqvdp_cmd
* @vtg: vtg for main data path
@@ -346,10 +341,8 @@ struct sti_hqvdp {
struct reset_control *reset;
struct notifier_block vtg_nb;
bool btm_field_pending;
- unsigned int curr_field_count;
- unsigned int last_field_count;
void *hqvdp_cmd;
- dma_addr_t hqvdp_cmd_paddr;
+ u32 hqvdp_cmd_paddr;
struct sti_vtg *vtg;
bool xp70_initialized;
};
@@ -372,8 +365,8 @@ static const uint32_t hqvdp_supported_formats[] = {
*/
static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
{
- int curr_cmd, next_cmd;
- dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ u32 curr_cmd, next_cmd;
+ u32 cmd = hqvdp->hqvdp_cmd_paddr;
int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -400,8 +393,8 @@ static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
*/
static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
{
- int curr_cmd;
- dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ u32 curr_cmd;
+ u32 cmd = hqvdp->hqvdp_cmd_paddr;
unsigned int i;
curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
@@ -417,6 +410,246 @@ static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
}
/**
+ * sti_hqvdp_get_next_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the next hqvdp_cmd that will be used by the FW.
+ *
+ * RETURNS:
+ * the offset of the next command that will be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
+{
+ int next_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ unsigned int i;
+
+ next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if (cmd == next_cmd)
+ return i * sizeof(struct sti_hqvdp_cmd);
+
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(hqvdp->regs + reg))
+
+static const char *hqvdp_dbg_get_lut(u32 *coef)
+{
+ if (!memcmp(coef, coef_lut_a_legacy, 16))
+ return "LUT A";
+ if (!memcmp(coef, coef_lut_b, 16))
+ return "LUT B";
+ if (!memcmp(coef, coef_lut_c_y_legacy, 16))
+ return "LUT C Y";
+ if (!memcmp(coef, coef_lut_c_c_legacy, 16))
+ return "LUT C C";
+ if (!memcmp(coef, coef_lut_d_y_legacy, 16))
+ return "LUT D Y";
+ if (!memcmp(coef, coef_lut_d_c_legacy, 16))
+ return "LUT D C";
+ if (!memcmp(coef, coef_lut_e_y_legacy, 16))
+ return "LUT E Y";
+ if (!memcmp(coef, coef_lut_e_c_legacy, 16))
+ return "LUT E C";
+ if (!memcmp(coef, coef_lut_f_y_legacy, 16))
+ return "LUT F Y";
+ if (!memcmp(coef, coef_lut_f_c_legacy, 16))
+ return "LUT F C";
+ return "<UNKNOWN>";
+}
+
+static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
+{
+ int src_w, src_h, dst_w, dst_h;
+
+ seq_puts(s, "\n\tTOP:");
+ seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
+ switch (c->top.config) {
+ case TOP_CONFIG_PROGRESSIVE:
+ seq_puts(s, "\tProgressive");
+ break;
+ case TOP_CONFIG_INTER_TOP:
+ seq_puts(s, "\tInterlaced, top field");
+ break;
+ case TOP_CONFIG_INTER_BTM:
+ seq_puts(s, "\tInterlaced, bottom field");
+ break;
+ default:
+ seq_puts(s, "\t<UNKNOWN>");
+ break;
+ }
+
+ seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
+ seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
+ seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
+ c->top.chroma_src_pitch);
+ seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
+ c->top.input_frame_size);
+ seq_printf(s, "\t%dx%d",
+ c->top.input_frame_size & 0x0000FFFF,
+ c->top.input_frame_size >> 16);
+ seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
+ c->top.input_viewport_size);
+ src_w = c->top.input_viewport_size & 0x0000FFFF;
+ src_h = c->top.input_viewport_size >> 16;
+ seq_printf(s, "\t%dx%d", src_w, src_h);
+
+ seq_puts(s, "\n\tHVSRC:");
+ seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
+ c->hvsrc.output_picture_size);
+ dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
+ dst_h = c->hvsrc.output_picture_size >> 16;
+ seq_printf(s, "\t%dx%d", dst_w, dst_h);
+ seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
+
+ seq_printf(s, "\n\t %-20s %s", "yh_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
+ seq_printf(s, "\n\t %-20s %s", "ch_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
+ seq_printf(s, "\n\t %-20s %s", "yv_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
+ seq_printf(s, "\n\t %-20s %s", "cv_coef",
+ hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
+
+ seq_printf(s, "\n\t %-20s", "ScaleH");
+ if (dst_w > src_w)
+ seq_printf(s, " %d/1", dst_w / src_w);
+ else
+ seq_printf(s, " 1/%d", src_w / dst_w);
+
+ seq_printf(s, "\n\t %-20s", "tScaleV");
+ if (dst_h > src_h)
+ seq_printf(s, " %d/1", dst_h / src_h);
+ else
+ seq_printf(s, " 1/%d", src_h / dst_h);
+
+ seq_puts(s, "\n\tCSDI:");
+ seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
+ switch (c->csdi.config) {
+ case CSDI_CONFIG_PROG:
+ seq_puts(s, "Bypass");
+ break;
+ case CSDI_CONFIG_INTER_DIR:
+ seq_puts(s, "Deinterlace, directional");
+ break;
+ default:
+ seq_puts(s, "<UNKNOWN>");
+ break;
+ }
+
+ seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
+ seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
+}
+
+static int hqvdp_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int cmd, cmd_offset, infoxp70;
+ void *virt;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
+
+ DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
+ DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
+ DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
+ DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
+ infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
+ seq_puts(s, "\tFirmware state: ");
+ if (infoxp70 & INFO_XP70_FW_READY)
+ seq_puts(s, "idle and ready");
+ else if (infoxp70 & INFO_XP70_FW_PROCESSING)
+ seq_puts(s, "processing a picture");
+ else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
+ seq_puts(s, "programming queues");
+ else
+ seq_puts(s, "NOT READY");
+
+ DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
+ DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+ & STARTUP_CTRL1_RST_DONE)
+ seq_puts(s, "\tReset is done");
+ else
+ seq_puts(s, "\tReset is NOT done");
+ DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
+ & STARTUP_CTRL2_FETCH_EN)
+ seq_puts(s, "\tFetch is enabled");
+ else
+ seq_puts(s, "\tFetch is NOT enabled");
+ DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
+ DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
+ DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
+ DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
+ if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
+ seq_puts(s, "\tHW Vsync");
+ else
+ seq_puts(s, "\tSW Vsync ?!?!");
+
+ /* Last command */
+ cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+ cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ seq_puts(s, "\n\n Last command: unknown");
+ } else {
+ virt = hqvdp->hqvdp_cmd + cmd_offset;
+ seq_printf(s, "\n\n Last command: address @ 0x%x (0x%p)",
+ cmd, virt);
+ hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+ }
+
+ /* Next command */
+ cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+ cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ seq_puts(s, "\n\n Next command: unknown");
+ } else {
+ virt = hqvdp->hqvdp_cmd + cmd_offset;
+ seq_printf(s, "\n\n Next command address: @ 0x%x (0x%p)",
+ cmd, virt);
+ hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+ }
+
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list hqvdp_debugfs_files[] = {
+ { "hqvdp", hqvdp_dbg_show, 0, NULL },
+};
+
+static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
+ hqvdp_debugfs_files[i].data = hqvdp;
+
+ return drm_debugfs_create_files(hqvdp_debugfs_files,
+ ARRAY_SIZE(hqvdp_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
+/**
* sti_hqvdp_update_hvsrc
* @orient: horizontal or vertical
* @scale: scaling/zoom factor
@@ -580,7 +813,7 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
- DRM_ERROR("Cannot get cmds, skip btm field\n");
+ DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
return -EBUSY;
}
@@ -599,11 +832,12 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
- hqvdp->curr_field_count++;
hqvdp->btm_field_pending = false;
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr);
+
+ sti_plane_update_fps(&hqvdp->plane, false, true);
}
return 0;
@@ -612,19 +846,21 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
{
int size;
+ dma_addr_t dma_addr;
hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
/* Allocate memory for the VDP commands */
size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
- hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
- &hqvdp->hqvdp_cmd_paddr,
- GFP_KERNEL | GFP_DMA);
+ hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
+ &dma_addr,
+ GFP_KERNEL | GFP_DMA);
if (!hqvdp->hqvdp_cmd) {
DRM_ERROR("Failed to allocate memory for VDP cmd\n");
return;
}
+ hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
memset(hqvdp->hqvdp_cmd, 0, size);
}
@@ -670,7 +906,7 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
DRM_DEBUG_DRIVER("\n");
if (hqvdp->xp70_initialized) {
- DRM_INFO("HQVDP XP70 already initialized\n");
+ DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
return;
}
@@ -775,53 +1011,131 @@ out:
release_firmware(firmware);
}
-static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
+ struct drm_plane_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
struct drm_crtc *crtc = state->crtc;
- struct sti_mixer *mixer = to_sti_mixer(crtc);
struct drm_framebuffer *fb = state->fb;
- struct drm_display_mode *mode = &crtc->mode;
- int dst_x = state->crtc_x;
- int dst_y = state->crtc_y;
- int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
- /* src_x are in 16.16 format */
- int src_x = state->src_x >> 16;
- int src_y = state->src_y >> 16;
- int src_w = state->src_w >> 16;
- int src_h = state->src_h >> 16;
bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
- struct drm_gem_cma_object *cma_obj;
- struct sti_hqvdp_cmd *cmd;
- int scale_h, scale_v;
- int cmd_offset;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+
+ /* no need for further checks if the plane is being disabled */
+ if (!crtc || !fb)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ mode = &crtc_state->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+ src_w, src_h,
+ dst_w, dst_h)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return -EINVAL;
+ }
+
+ if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Input / output size
+ * Align to upper even value
+ */
+ dst_w = ALIGN(dst_w, 2);
+ dst_h = ALIGN(dst_h, 2);
+
+ if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+ (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+ (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+ (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ src_w, src_h,
+ dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ if (first_prepare) {
+ /* Start HQVDP XP70 coprocessor */
+ sti_hqvdp_start_xp70(hqvdp);
+
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return -EINVAL;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if (sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb,
+ crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+ }
DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
- crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
sti_plane_to_str(plane),
dst_w, dst_h, dst_x, dst_y,
src_w, src_h, src_x, src_y);
+ return 0;
+}
+
+static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+ struct drm_plane_state *oldstate)
+{
+ struct drm_plane_state *state = drm_plane->state;
+ struct sti_plane *plane = to_sti_plane(drm_plane);
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_display_mode *mode;
+ int dst_x, dst_y, dst_w, dst_h;
+ int src_x, src_y, src_w, src_h;
+ struct drm_gem_cma_object *cma_obj;
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ if (!crtc || !fb)
+ return;
+
+ mode = &crtc->mode;
+ dst_x = state->crtc_x;
+ dst_y = state->crtc_y;
+ dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ /* src_x are in 16.16 format */
+ src_x = state->src_x >> 16;
+ src_y = state->src_y >> 16;
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
- DRM_ERROR("No available hqvdp_cmd now\n");
+ DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
return;
}
cmd = hqvdp->hqvdp_cmd + cmd_offset;
- if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
- src_w, src_h,
- dst_w, dst_h)) {
- DRM_ERROR("Scaling beyond HW capabilities\n");
- return;
- }
-
/* Static parameters, defaulting to progressive mode */
cmd->top.config = TOP_CONFIG_PROGRESSIVE;
cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
@@ -836,10 +1150,6 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- if (!cma_obj) {
- DRM_ERROR("Can't get CMA GEM object for fb\n");
- return;
- }
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
(char *)&fb->pixel_format,
@@ -860,16 +1170,6 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
dst_w = ALIGN(dst_w, 2);
dst_h = ALIGN(dst_h, 2);
- if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
- (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
- (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
- (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
- DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
- src_w, src_h,
- dst_w, dst_h);
- return;
- }
-
cmd->top.input_viewport_size = src_h << 16 | src_w;
cmd->top.input_frame_size = src_h << 16 | src_w;
cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
@@ -900,30 +1200,9 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
scale_v = SCALE_FACTOR * dst_h / src_h;
sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
- if (first_prepare) {
- /* Start HQVDP XP70 coprocessor */
- sti_hqvdp_start_xp70(hqvdp);
-
- /* Prevent VTG shutdown */
- if (clk_prepare_enable(hqvdp->clk_pix_main)) {
- DRM_ERROR("Failed to prepare/enable pix main clk\n");
- return;
- }
-
- /* Register VTG Vsync callback to handle bottom fields */
- if (sti_vtg_register_client(hqvdp->vtg,
- &hqvdp->vtg_nb,
- crtc)) {
- DRM_ERROR("Cannot register VTG notifier\n");
- return;
- }
- }
-
writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
hqvdp->regs + HQVDP_MBX_NEXT_CMD);
- hqvdp->curr_field_count++;
-
/* Interlaced : get ready to display the bottom field at next Vsync */
if (fb->flags & DRM_MODE_FB_INTERLACED)
hqvdp->btm_field_pending = true;
@@ -931,6 +1210,8 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
__func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+ sti_plane_update_fps(plane, true, true);
+
plane->status = STI_PLANE_UPDATED;
}
@@ -938,7 +1219,6 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
struct drm_plane_state *oldstate)
{
struct sti_plane *plane = to_sti_plane(drm_plane);
- struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
if (!drm_plane->crtc) {
DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
@@ -947,13 +1227,15 @@ static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
}
DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
- drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+ drm_plane->crtc->base.id,
+ sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
drm_plane->base.id, sti_plane_to_str(plane));
plane->status = STI_PLANE_DISABLING;
}
static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+ .atomic_check = sti_hqvdp_atomic_check,
.atomic_update = sti_hqvdp_atomic_update,
.atomic_disable = sti_hqvdp_atomic_disable,
};
@@ -983,6 +1265,9 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
+ if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
+ DRM_ERROR("HQVDP debugfs setup failed\n");
+
return &hqvdp->plane.drm_plane;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 49db835dce03..e7425c38fc93 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -75,6 +75,145 @@ static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
writel(val, mixer->regs + reg_id);
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ sti_mixer_reg_read(mixer, reg))
+
+static void mixer_dbg_ctl(struct seq_file *s, int val)
+{
+ unsigned int i;
+ int count = 0;
+ char *const disp_layer[] = {"BKG", "VID0", "VID1", "GDP0",
+ "GDP1", "GDP2", "GDP3"};
+
+ seq_puts(s, "\tEnabled: ");
+ for (i = 0; i < 7; i++) {
+ if (val & 1) {
+ seq_printf(s, "%s ", disp_layer[i]);
+ count++;
+ }
+ val = val >> 1;
+ }
+
+ val = val >> 2;
+ if (val & 1) {
+ seq_puts(s, "CURS ");
+ count++;
+ }
+ if (!count)
+ seq_puts(s, "Nothing");
+}
+
+static void mixer_dbg_crb(struct seq_file *s, int val)
+{
+ int i;
+
+ seq_puts(s, "\tDepth: ");
+ for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
+ switch (val & GAM_DEPTH_MASK_ID) {
+ case GAM_DEPTH_VID0_ID:
+ seq_puts(s, "VID0");
+ break;
+ case GAM_DEPTH_VID1_ID:
+ seq_puts(s, "VID1");
+ break;
+ case GAM_DEPTH_GDP0_ID:
+ seq_puts(s, "GDP0");
+ break;
+ case GAM_DEPTH_GDP1_ID:
+ seq_puts(s, "GDP1");
+ break;
+ case GAM_DEPTH_GDP2_ID:
+ seq_puts(s, "GDP2");
+ break;
+ case GAM_DEPTH_GDP3_ID:
+ seq_puts(s, "GDP3");
+ break;
+ default:
+ seq_puts(s, "---");
+ }
+
+ if (i < GAM_MIXER_NB_DEPTH_LEVEL - 1)
+ seq_puts(s, " < ");
+ val = val >> 3;
+ }
+}
+
+static void mixer_dbg_mxn(struct seq_file *s, void *addr)
+{
+ int i;
+
+ for (i = 1; i < 8; i++)
+ seq_printf(s, "-0x%08X", (int)readl(addr + i * 4));
+}
+
+static int mixer_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "%s: (vaddr = 0x%p)",
+ sti_mixer_to_str(mixer), mixer->regs);
+
+ DBGFS_DUMP(GAM_MIXER_CTL);
+ mixer_dbg_ctl(s, sti_mixer_reg_read(mixer, GAM_MIXER_CTL));
+ DBGFS_DUMP(GAM_MIXER_BKC);
+ DBGFS_DUMP(GAM_MIXER_BCO);
+ DBGFS_DUMP(GAM_MIXER_BCS);
+ DBGFS_DUMP(GAM_MIXER_AVO);
+ DBGFS_DUMP(GAM_MIXER_AVS);
+ DBGFS_DUMP(GAM_MIXER_CRB);
+ mixer_dbg_crb(s, sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+ DBGFS_DUMP(GAM_MIXER_ACT);
+ DBGFS_DUMP(GAM_MIXER_MBP);
+ DBGFS_DUMP(GAM_MIXER_MX0);
+ mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list mixer0_debugfs_files[] = {
+ { "mixer_main", mixer_dbg_show, 0, NULL },
+};
+
+static struct drm_info_list mixer1_debugfs_files[] = {
+ { "mixer_aux", mixer_dbg_show, 0, NULL },
+};
+
+static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+{
+ unsigned int i;
+ struct drm_info_list *mixer_debugfs_files;
+ int nb_files;
+
+ switch (mixer->id) {
+ case STI_MIXER_MAIN:
+ mixer_debugfs_files = mixer0_debugfs_files;
+ nb_files = ARRAY_SIZE(mixer0_debugfs_files);
+ break;
+ case STI_MIXER_AUX:
+ mixer_debugfs_files = mixer1_debugfs_files;
+ nb_files = ARRAY_SIZE(mixer1_debugfs_files);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_files; i++)
+ mixer_debugfs_files[i].data = mixer;
+
+ return drm_debugfs_create_files(mixer_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+}
+
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
{
u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
@@ -237,7 +376,9 @@ void sti_mixer_set_matrix(struct sti_mixer *mixer)
mixerColorSpaceMatIdentity[i]);
}
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+ struct drm_device *drm_dev,
+ int id,
void __iomem *baseaddr)
{
struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
@@ -258,5 +399,8 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
sti_mixer_to_str(mixer), mixer->regs);
+ if (mixer_debugfs_init(mixer, drm_dev->primary))
+ DRM_ERROR("MIXER debugfs setup failed\n");
+
return mixer;
}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index efb1a9a5ba86..6f35fc086873 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -42,7 +42,9 @@ struct sti_mixer {
const char *sti_mixer_to_str(struct sti_mixer *mixer);
-struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+struct sti_mixer *sti_mixer_create(struct device *dev,
+ struct drm_device *drm_dev,
+ int id,
void __iomem *baseaddr);
int sti_mixer_set_plane_status(struct sti_mixer *mixer,
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 2e5c751910c5..f10c98d3f012 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -43,6 +43,69 @@ const char *sti_plane_to_str(struct sti_plane *plane)
}
}
+#define STI_FPS_INTERVAL_MS 3000
+
+static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
+{
+ struct timespec tmp_ts = timespec_sub(lhs, rhs);
+ u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
+
+ do_div(tmp_ns, NSEC_PER_MSEC);
+
+ return (u32)tmp_ns;
+}
+
+void sti_plane_update_fps(struct sti_plane *plane,
+ bool new_frame,
+ bool new_field)
+{
+ struct timespec now;
+ struct sti_fps_info *fps;
+ int fpks, fipks, ms_since_last, num_frames, num_fields;
+
+ getrawmonotonic(&now);
+
+ /* Compute number of frame updates */
+ fps = &plane->fps_info;
+
+ if (new_field)
+ fps->curr_field_counter++;
+
+ /* do not perform fps calcul if new_frame is false */
+ if (!new_frame)
+ return;
+
+ fps->curr_frame_counter++;
+ ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+ num_frames = fps->curr_frame_counter - fps->last_frame_counter;
+
+ if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
+ return;
+
+ fps->last_timestamp = now;
+ fps->last_frame_counter = fps->curr_frame_counter;
+ fpks = (num_frames * 1000000) / ms_since_last;
+ snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps",
+ sti_plane_to_str(plane), fpks / 1000, fpks % 1000);
+
+ if (fps->curr_field_counter) {
+ /* Compute number of field updates */
+ num_fields = fps->curr_field_counter - fps->last_field_counter;
+ fps->last_field_counter = fps->curr_field_counter;
+ fipks = (num_fields * 1000000) / ms_since_last;
+ snprintf(plane->fps_info.fips_str,
+ FPS_LENGTH, " - %d.%.3d field/sec",
+ fipks / 1000, fipks % 1000);
+ } else {
+ plane->fps_info.fips_str[0] = '\0';
+ }
+
+ if (fps->output)
+ DRM_INFO("%s%s\n",
+ plane->fps_info.fps_str,
+ plane->fps_info.fips_str);
+}
+
static void sti_plane_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index 86f1e6fc81b9..c50a3b9f5d37 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -50,6 +50,18 @@ enum sti_plane_status {
STI_PLANE_DISABLED,
};
+#define FPS_LENGTH 64
+struct sti_fps_info {
+ bool output;
+ unsigned int curr_frame_counter;
+ unsigned int last_frame_counter;
+ unsigned int curr_field_counter;
+ unsigned int last_field_counter;
+ struct timespec last_timestamp;
+ char fps_str[FPS_LENGTH];
+ char fips_str[FPS_LENGTH];
+};
+
/**
* STI plane structure
*
@@ -57,15 +69,20 @@ enum sti_plane_status {
* @desc: plane type & id
* @status: to know the status of the plane
* @zorder: plane z-order
+ * @fps_info: frame per second info
*/
struct sti_plane {
struct drm_plane drm_plane;
enum sti_plane_desc desc;
enum sti_plane_status status;
int zorder;
+ struct sti_fps_info fps_info;
};
const char *sti_plane_to_str(struct sti_plane *plane);
+void sti_plane_update_fps(struct sti_plane *plane,
+ bool new_frame,
+ bool new_field);
void sti_plane_init_property(struct sti_plane *plane,
enum drm_plane_type type);
#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 24a3735b88fd..2c99016443e5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include "sti_crtc.h"
+#include "sti_vtg.h"
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
@@ -85,19 +86,7 @@
#define TVO_VIP_SEL_INPUT_BYPASSED 1
#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
-#define TVO_SYNC_MAIN_VTG_SET_1 0x01
-#define TVO_SYNC_MAIN_VTG_SET_2 0x02
-#define TVO_SYNC_MAIN_VTG_SET_3 0x03
-#define TVO_SYNC_MAIN_VTG_SET_4 0x04
-#define TVO_SYNC_MAIN_VTG_SET_5 0x05
-#define TVO_SYNC_MAIN_VTG_SET_6 0x06
#define TVO_SYNC_AUX_VTG_SET_REF 0x10
-#define TVO_SYNC_AUX_VTG_SET_1 0x11
-#define TVO_SYNC_AUX_VTG_SET_2 0x12
-#define TVO_SYNC_AUX_VTG_SET_3 0x13
-#define TVO_SYNC_AUX_VTG_SET_4 0x14
-#define TVO_SYNC_AUX_VTG_SET_5 0x15
-#define TVO_SYNC_AUX_VTG_SET_6 0x16
#define TVO_SYNC_HD_DCS_SHIFT 8
@@ -106,6 +95,8 @@
#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
+#define TVO_MIN_HD_HEIGHT 720
+
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
STI_TVOUT_VIDEO_OUT_RGB,
@@ -269,6 +260,31 @@ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
}
/**
+ * Set preformatter matrix
+ *
+ * @tvout: tvout structure
+ * @mode: display mode structure
+ */
+static void tvout_preformatter_set_matrix(struct sti_tvout *tvout,
+ struct drm_display_mode *mode)
+{
+ unsigned int i;
+ const u32 *pf_matrix;
+
+ if (mode->vdisplay >= TVO_MIN_HD_HEIGHT)
+ pf_matrix = rgb_to_ycbcr_709;
+ else
+ pf_matrix = rgb_to_ycbcr_601;
+
+ for (i = 0; i < 8; i++) {
+ tvout_write(tvout, *(pf_matrix + i),
+ TVO_CSC_MAIN_M0 + (i * 4));
+ tvout_write(tvout, *(pf_matrix + i),
+ TVO_CSC_AUX_M0 + (i * 4));
+ }
+}
+
+/**
* Start VIP block for DVO output
*
* @tvout: pointer on tvout structure
@@ -280,24 +296,26 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
u32 tvo_in_vid_format;
- int val;
+ int val, tmp;
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
DRM_DEBUG_DRIVER("main vip for DVO\n");
- /* Select the input sync for dvo = VTG set 4 */
- val = TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_4;
+ /* Select the input sync for dvo */
+ tmp = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_DVO;
+ val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for DVO\n");
- /* Select the input sync for dvo = VTG set 4 */
- val = TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_4 << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_4;
+ /* Select the input sync for dvo */
+ tmp = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_DVO;
+ val = tmp << TVO_SYNC_DVO_PAD_VSYNC_SHIFT;
+ val |= tmp << TVO_SYNC_DVO_PAD_HSYNC_SHIFT;
+ val |= tmp;
tvout_write(tvout, val, TVO_DVO_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -308,9 +326,8 @@ static void tvout_dvo_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* Set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO,
- TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ /* Set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_DVO, TVO_VIP_CLIP_DISABLED);
/* Set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_DVO, TVO_VIP_RND_8BIT_ROUNDED);
@@ -345,13 +362,17 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
if (main_path) {
DRM_DEBUG_DRIVER("main vip for hdmi\n");
- /* select the input sync for hdmi = VTG set 1 */
- tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ /* select the input sync for hdmi */
+ tvout_write(tvout,
+ TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+ TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
- /* select the input sync for hdmi = VTG set 1 */
- tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ /* select the input sync for hdmi */
+ tvout_write(tvout,
+ TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDMI,
+ TVO_HDMI_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -361,9 +382,8 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
- TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ /* set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 8-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
@@ -397,13 +417,19 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
dev_dbg(tvout->dev, "%s\n", __func__);
if (main_path) {
- val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
- val |= TVO_SYNC_MAIN_VTG_SET_3;
+ DRM_DEBUG_DRIVER("main vip for HDF\n");
+ /* Select the input sync for HD analog and HD DCS */
+ val = TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+ val = val << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
- val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
- val |= TVO_SYNC_AUX_VTG_SET_3;
+ DRM_DEBUG_DRIVER("aux vip for HDF\n");
+ /* Select the input sync for HD analog and HD DCS */
+ val = TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDDCS;
+ val = val << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_REF | VTG_SYNC_ID_HDF;
tvout_write(tvout, val, TVO_HD_SYNC_SEL);
tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
@@ -414,8 +440,8 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (EAV/SAV clipping) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
+ /* set clipping mode */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_DISABLED);
/* set round mode (rounded to 10-bit per component) */
tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
@@ -436,6 +462,157 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(tvout->regs + reg))
+
+static void tvout_dbg_vip(struct seq_file *s, int val)
+{
+ int r, g, b, tmp, mask;
+ char *const reorder[] = {"Y_G", "Cb_B", "Cr_R"};
+ char *const clipping[] = {"No", "EAV/SAV", "Limited range RGB/Y",
+ "Limited range Cb/Cr", "decided by register"};
+ char *const round[] = {"8-bit", "10-bit", "12-bit"};
+ char *const input_sel[] = {"Main (color matrix enabled)",
+ "Main (color matrix by-passed)",
+ "", "", "", "", "", "",
+ "Aux (color matrix enabled)",
+ "Aux (color matrix by-passed)",
+ "", "", "", "", "", "Force value"};
+
+ seq_puts(s, "\t");
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT;
+ r = (val & mask) >> TVO_VIP_REORDER_R_SHIFT;
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT;
+ g = (val & mask) >> TVO_VIP_REORDER_G_SHIFT;
+ mask = TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT;
+ b = (val & mask) >> TVO_VIP_REORDER_B_SHIFT;
+ seq_printf(s, "%-24s %s->%s %s->%s %s->%s\n", "Reorder:",
+ reorder[r], reorder[TVO_VIP_REORDER_CR_R_SEL],
+ reorder[g], reorder[TVO_VIP_REORDER_Y_G_SEL],
+ reorder[b], reorder[TVO_VIP_REORDER_CB_B_SEL]);
+ seq_puts(s, "\t\t\t\t\t");
+ mask = TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT;
+ tmp = (val & mask) >> TVO_VIP_CLIP_SHIFT;
+ seq_printf(s, "%-24s %s\n", "Clipping:", clipping[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ mask = TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT;
+ tmp = (val & mask) >> TVO_VIP_RND_SHIFT;
+ seq_printf(s, "%-24s input data rounded to %s per component\n",
+ "Round:", round[tmp]);
+ seq_puts(s, "\t\t\t\t\t");
+ tmp = (val & TVO_VIP_SEL_INPUT_MASK);
+ seq_printf(s, "%-24s %s", "Input selection:", input_sel[tmp]);
+}
+
+static void tvout_dbg_hd_dac_cfg(struct seq_file *s, int val)
+{
+ seq_printf(s, "\t%-24s %s", "HD DAC:",
+ val & 1 ? "disabled" : "enabled");
+}
+
+static int tvout_dbg_show(struct seq_file *s, void *data)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
+
+ seq_puts(s, "\n\n HDMI encoder: ");
+ crtc = tvout->hdmi->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_HDMI_SYNC_SEL);
+ DBGFS_DUMP(TVO_VIP_HDMI);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDMI));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n DVO encoder: ");
+ crtc = tvout->dvo->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_DVO_SYNC_SEL);
+ DBGFS_DUMP(TVO_DVO_CONFIG);
+ DBGFS_DUMP(TVO_VIP_DVO);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_DVO));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n HDA encoder: ");
+ crtc = tvout->hda->crtc;
+ if (crtc) {
+ seq_printf(s, "connected to %s path",
+ sti_crtc_is_main(crtc) ? "main" : "aux");
+ DBGFS_DUMP(TVO_HD_SYNC_SEL);
+ DBGFS_DUMP(TVO_HD_DAC_CFG_OFF);
+ tvout_dbg_hd_dac_cfg(s,
+ readl(tvout->regs + TVO_HD_DAC_CFG_OFF));
+ DBGFS_DUMP(TVO_VIP_HDF);
+ tvout_dbg_vip(s, readl(tvout->regs + TVO_VIP_HDF));
+ } else {
+ seq_puts(s, "disabled");
+ }
+
+ seq_puts(s, "\n\n main path configuration");
+ DBGFS_DUMP(TVO_CSC_MAIN_M0);
+ DBGFS_DUMP(TVO_CSC_MAIN_M1);
+ DBGFS_DUMP(TVO_CSC_MAIN_M2);
+ DBGFS_DUMP(TVO_CSC_MAIN_M3);
+ DBGFS_DUMP(TVO_CSC_MAIN_M4);
+ DBGFS_DUMP(TVO_CSC_MAIN_M5);
+ DBGFS_DUMP(TVO_CSC_MAIN_M6);
+ DBGFS_DUMP(TVO_CSC_MAIN_M7);
+ DBGFS_DUMP(TVO_MAIN_IN_VID_FORMAT);
+
+ seq_puts(s, "\n\n auxiliary path configuration");
+ DBGFS_DUMP(TVO_CSC_AUX_M0);
+ DBGFS_DUMP(TVO_CSC_AUX_M2);
+ DBGFS_DUMP(TVO_CSC_AUX_M3);
+ DBGFS_DUMP(TVO_CSC_AUX_M4);
+ DBGFS_DUMP(TVO_CSC_AUX_M5);
+ DBGFS_DUMP(TVO_CSC_AUX_M6);
+ DBGFS_DUMP(TVO_CSC_AUX_M7);
+ DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list tvout_debugfs_files[] = {
+ { "tvout", tvout_dbg_show, 0, NULL },
+};
+
+static void tvout_debugfs_exit(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor);
+}
+
+static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
+ tvout_debugfs_files[i].data = tvout;
+
+ return drm_debugfs_create_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor->debugfs_root, minor);
+}
+
static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
@@ -446,10 +623,6 @@ static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
{
}
-static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
{
struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
@@ -462,10 +635,12 @@ static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
.destroy = sti_tvout_encoder_destroy,
};
-static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
+static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -480,8 +655,7 @@ static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_dvo_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_dvo_encoder_commit,
+ .enable = sti_dvo_encoder_enable,
.disable = sti_dvo_encoder_disable,
};
@@ -512,10 +686,12 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
return drm_encoder;
}
-static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+static void sti_hda_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -533,8 +709,7 @@ static void sti_hda_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_hda_encoder_commit,
+ .commit = sti_hda_encoder_enable,
.disable = sti_hda_encoder_disable,
};
@@ -563,10 +738,12 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
return drm_encoder;
}
-static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+static void sti_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
+ tvout_preformatter_set_matrix(tvout, &encoder->crtc->mode);
+
tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
}
@@ -581,8 +758,7 @@ static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
.dpms = sti_tvout_encoder_dpms,
.mode_set = sti_tvout_encoder_mode_set,
- .prepare = sti_tvout_encoder_prepare,
- .commit = sti_hdmi_encoder_commit,
+ .commit = sti_hdmi_encoder_enable,
.disable = sti_hdmi_encoder_disable,
};
@@ -628,26 +804,24 @@ static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
if (tvout->hda)
drm_encoder_cleanup(tvout->hda);
tvout->hda = NULL;
+
+ if (tvout->dvo)
+ drm_encoder_cleanup(tvout->dvo);
+ tvout->dvo = NULL;
}
static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
- unsigned int i;
tvout->drm_dev = drm_dev;
- /* set preformatter matrix */
- for (i = 0; i < 8; i++) {
- tvout_write(tvout, rgb_to_ycbcr_601[i],
- TVO_CSC_MAIN_M0 + (i * 4));
- tvout_write(tvout, rgb_to_ycbcr_601[i],
- TVO_CSC_AUX_M0 + (i * 4));
- }
-
sti_tvout_create_encoders(drm_dev, tvout);
+ if (tvout_debugfs_init(tvout, drm_dev->primary))
+ DRM_ERROR("TVOUT debugfs setup failed\n");
+
return 0;
}
@@ -655,8 +829,11 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
void *data)
{
struct sti_tvout *tvout = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
sti_tvout_destroy_encoders(tvout);
+
+ tvout_debugfs_exit(tvout, drm_dev->primary);
}
static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index a8254cc362a1..5a2c5dc3687b 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -42,6 +42,104 @@
#define VID_MPR1_BT709 0x0AC50000
#define VID_MPR2_BT709 0x07150545
#define VID_MPR3_BT709 0x00000AE8
+/* YCbCr to RGB BT709:
+ * R = Y+1.3711Cr
+ * G = Y-0.6992Cr-0.3359Cb
+ * B = Y+1.7344Cb
+ */
+#define VID_MPR0_BT601 0x0A800000
+#define VID_MPR1_BT601 0x0AAF0000
+#define VID_MPR2_BT601 0x094E0754
+#define VID_MPR3_BT601 0x00000ADD
+
+#define VID_MIN_HD_HEIGHT 720
+
+#define DBGFS_DUMP(reg) seq_printf(s, "\n %-25s 0x%08X", #reg, \
+ readl(vid->regs + reg))
+
+static void vid_dbg_ctl(struct seq_file *s, int val)
+{
+ val = val >> 30;
+ seq_puts(s, "\t");
+
+ if (!(val & 1))
+ seq_puts(s, "NOT ");
+ seq_puts(s, "ignored on main mixer - ");
+
+ if (!(val & 2))
+ seq_puts(s, "NOT ");
+ seq_puts(s, "ignored on aux mixer");
+}
+
+static void vid_dbg_vpo(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_vps(struct seq_file *s, int val)
+{
+ seq_printf(s, "\txds:%4d\tyds:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+}
+
+static void vid_dbg_mst(struct seq_file *s, int val)
+{
+ if (val & 1)
+ seq_puts(s, "\tBUFFER UNDERFLOW!");
+}
+
+static int vid_dbg_show(struct seq_file *s, void *arg)
+{
+ struct drm_info_node *node = s->private;
+ struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
+ struct drm_device *dev = node->minor->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
+
+ DBGFS_DUMP(VID_CTL);
+ vid_dbg_ctl(s, readl(vid->regs + VID_CTL));
+ DBGFS_DUMP(VID_ALP);
+ DBGFS_DUMP(VID_CLF);
+ DBGFS_DUMP(VID_VPO);
+ vid_dbg_vpo(s, readl(vid->regs + VID_VPO));
+ DBGFS_DUMP(VID_VPS);
+ vid_dbg_vps(s, readl(vid->regs + VID_VPS));
+ DBGFS_DUMP(VID_KEY1);
+ DBGFS_DUMP(VID_KEY2);
+ DBGFS_DUMP(VID_MPR0);
+ DBGFS_DUMP(VID_MPR1);
+ DBGFS_DUMP(VID_MPR2);
+ DBGFS_DUMP(VID_MPR3);
+ DBGFS_DUMP(VID_MST);
+ vid_dbg_mst(s, readl(vid->regs + VID_MST));
+ DBGFS_DUMP(VID_BC);
+ DBGFS_DUMP(VID_TINT);
+ DBGFS_DUMP(VID_CSAT);
+ seq_puts(s, "\n");
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static struct drm_info_list vid_debugfs_files[] = {
+ { "vid", vid_dbg_show, 0, NULL },
+};
+
+static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
+ vid_debugfs_files[i].data = vid;
+
+ return drm_debugfs_create_files(vid_debugfs_files,
+ ARRAY_SIZE(vid_debugfs_files),
+ minor->debugfs_root, minor);
+}
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state)
@@ -52,6 +150,7 @@ void sti_vid_commit(struct sti_vid *vid,
int dst_y = state->crtc_y;
int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ int src_h = state->src_h >> 16;
u32 val, ydo, xdo, yds, xds;
/* Input / output size
@@ -71,6 +170,19 @@ void sti_vid_commit(struct sti_vid *vid,
writel((ydo << 16) | xdo, vid->regs + VID_VPO);
writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+ /* Color conversion parameters */
+ if (src_h >= VID_MIN_HD_HEIGHT) {
+ writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+ } else {
+ writel(VID_MPR0_BT601, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT601, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT601, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT601, vid->regs + VID_MPR3);
+ }
}
void sti_vid_disable(struct sti_vid *vid)
@@ -91,20 +203,14 @@ static void sti_vid_init(struct sti_vid *vid)
/* Opaque */
writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
- /* Color conversion parameters */
- writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
- writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
- writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
- writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
-
/* Brightness, contrast, tint, saturation */
writel(VID_BC_DFLT, vid->regs + VID_BC);
writel(VID_TINT_DFLT, vid->regs + VID_TINT);
writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
}
-struct sti_vid *sti_vid_create(struct device *dev, int id,
- void __iomem *baseaddr)
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+ int id, void __iomem *baseaddr)
{
struct sti_vid *vid;
@@ -120,5 +226,8 @@ struct sti_vid *sti_vid_create(struct device *dev, int id,
sti_vid_init(vid);
+ if (vid_debugfs_init(vid, drm_dev->primary))
+ DRM_ERROR("VID debugfs setup failed\n");
+
return vid;
}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 5dea4791f1d6..6c842344f3d8 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -23,7 +23,7 @@ struct sti_vid {
void sti_vid_commit(struct sti_vid *vid,
struct drm_plane_state *state);
void sti_vid_disable(struct sti_vid *vid);
-struct sti_vid *sti_vid_create(struct device *dev, int id,
- void __iomem *baseaddr);
+struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
+ int id, void __iomem *baseaddr);
#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index d56630c60039..32c7986b63ab 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -15,8 +15,8 @@
#include "sti_vtg.h"
-#define VTG_TYPE_MASTER 0
-#define VTG_TYPE_SLAVE_BY_EXT0 1
+#define VTG_MODE_MASTER 0
+#define VTG_MODE_SLAVE_BY_EXT0 1
/* registers offset */
#define VTG_MODE 0x0000
@@ -64,6 +64,9 @@
/* Delay introduced by the HDMI in nb of pixel */
#define HDMI_DELAY (5)
+/* Delay introduced by the DVO in nb of pixel */
+#define DVO_DELAY (2)
+
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
@@ -71,13 +74,61 @@
LIST_HEAD(vtg_lookup);
+/*
+ * STI VTG register offset structure
+ *
+ *@h_hd: stores the VTG_H_HD_x register offset
+ *@top_v_vd: stores the VTG_TOP_V_VD_x register offset
+ *@bot_v_vd: stores the VTG_BOT_V_VD_x register offset
+ *@top_v_hd: stores the VTG_TOP_V_HD_x register offset
+ *@bot_v_hd: stores the VTG_BOT_V_HD_x register offset
+ */
+struct sti_vtg_regs_offs {
+ u32 h_hd;
+ u32 top_v_vd;
+ u32 bot_v_vd;
+ u32 top_v_hd;
+ u32 bot_v_hd;
+};
+
+#define VTG_MAX_SYNC_OUTPUT 4
+static const struct sti_vtg_regs_offs vtg_regs_offs[VTG_MAX_SYNC_OUTPUT] = {
+ { VTG_H_HD_1,
+ VTG_TOP_V_VD_1, VTG_BOT_V_VD_1, VTG_TOP_V_HD_1, VTG_BOT_V_HD_1 },
+ { VTG_H_HD_2,
+ VTG_TOP_V_VD_2, VTG_BOT_V_VD_2, VTG_TOP_V_HD_2, VTG_BOT_V_HD_2 },
+ { VTG_H_HD_3,
+ VTG_TOP_V_VD_3, VTG_BOT_V_VD_3, VTG_TOP_V_HD_3, VTG_BOT_V_HD_3 },
+ { VTG_H_HD_4,
+ VTG_TOP_V_VD_4, VTG_BOT_V_VD_4, VTG_TOP_V_HD_4, VTG_BOT_V_HD_4 }
+};
+
+/*
+ * STI VTG synchronisation parameters structure
+ *
+ *@hsync: sample number falling and rising edge
+ *@vsync_line_top: vertical top field line number falling and rising edge
+ *@vsync_line_bot: vertical bottom field line number falling and rising edge
+ *@vsync_off_top: vertical top field sample number rising and falling edge
+ *@vsync_off_bot: vertical bottom field sample number rising and falling edge
+ */
+struct sti_vtg_sync_params {
+ u32 hsync;
+ u32 vsync_line_top;
+ u32 vsync_line_bot;
+ u32 vsync_off_top;
+ u32 vsync_off_bot;
+};
+
/**
* STI VTG structure
*
* @dev: pointer to device driver
- * @data: data associated to the device
+ * @np: device node
+ * @regs: register mapping
+ * @sync_params: synchronisation parameters used to generate timings
* @irq: VTG irq
- * @type: VTG type (main or aux)
+ * @irq_status: store the IRQ status value
* @notifier_list: notifier callback
* @crtc: the CRTC for vblank event
* @slave: slave vtg
@@ -87,6 +138,7 @@ struct sti_vtg {
struct device *dev;
struct device_node *np;
void __iomem *regs;
+ struct sti_vtg_sync_params sync_params[VTG_MAX_SYNC_OUTPUT];
int irq;
u32 irq_status;
struct raw_notifier_head notifier_list;
@@ -146,13 +198,69 @@ static void vtg_set_output_window(void __iomem *regs,
writel(video_bottom_field_stop, regs + VTG_VID_BFS);
}
+static void vtg_set_hsync_vsync_pos(struct sti_vtg_sync_params *sync,
+ int delay,
+ const struct drm_display_mode *mode)
+{
+ long clocksperline, start, stop;
+ u32 risesync_top, fallsync_top;
+ u32 risesync_offs_top, fallsync_offs_top;
+
+ clocksperline = mode->htotal;
+
+ /* Get the hsync position */
+ start = 0;
+ stop = mode->hsync_end - mode->hsync_start;
+
+ start += delay;
+ stop += delay;
+
+ if (start < 0)
+ start += clocksperline;
+ else if (start >= clocksperline)
+ start -= clocksperline;
+
+ if (stop < 0)
+ stop += clocksperline;
+ else if (stop >= clocksperline)
+ stop -= clocksperline;
+
+ sync->hsync = (stop << 16) | start;
+
+ /* Get the vsync position */
+ if (delay >= 0) {
+ risesync_top = 1;
+ fallsync_top = risesync_top;
+ fallsync_top += mode->vsync_end - mode->vsync_start;
+
+ fallsync_offs_top = (u32)delay;
+ risesync_offs_top = (u32)delay;
+ } else {
+ risesync_top = mode->vtotal;
+ fallsync_top = mode->vsync_end - mode->vsync_start;
+
+ fallsync_offs_top = clocksperline + delay;
+ risesync_offs_top = clocksperline + delay;
+ }
+
+ sync->vsync_line_top = (fallsync_top << 16) | risesync_top;
+ sync->vsync_off_top = (fallsync_offs_top << 16) | risesync_offs_top;
+
+ /* Only progressive supported for now */
+ sync->vsync_line_bot = sync->vsync_line_top;
+ sync->vsync_off_bot = sync->vsync_off_top;
+}
+
static void vtg_set_mode(struct sti_vtg *vtg,
- int type, const struct drm_display_mode *mode)
+ int type,
+ struct sti_vtg_sync_params *sync,
+ const struct drm_display_mode *mode)
{
- u32 tmp;
+ unsigned int i;
if (vtg->slave)
- vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+ vtg_set_mode(vtg->slave, VTG_MODE_SLAVE_BY_EXT0,
+ vtg->sync_params, mode);
/* Set the number of clock cycles per line */
writel(mode->htotal, vtg->regs + VTG_CLKLN);
@@ -163,57 +271,31 @@ static void vtg_set_mode(struct sti_vtg *vtg,
/* Program output window */
vtg_set_output_window(vtg->regs, mode);
- /* prepare VTG set 1 for HDMI */
- tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
- tmp |= HDMI_DELAY;
- writel(tmp, vtg->regs + VTG_H_HD_1);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
-
- tmp = HDMI_DELAY << 16;
- tmp |= HDMI_DELAY;
- writel(tmp, vtg->regs + VTG_TOP_V_HD_1);
- writel(tmp, vtg->regs + VTG_BOT_V_HD_1);
-
- /* prepare VTG set 2 for for HD DCS */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
- writel(tmp, vtg->regs + VTG_H_HD_2);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
- writel(0, vtg->regs + VTG_TOP_V_HD_2);
- writel(0, vtg->regs + VTG_BOT_V_HD_2);
-
- /* prepare VTG set 3 for HD Analog in HD mode */
- tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
- tmp |= mode->htotal + AWG_DELAY_HD;
- writel(tmp, vtg->regs + VTG_H_HD_3);
-
- tmp = (mode->vsync_end - mode->vsync_start) << 16;
- tmp |= mode->vtotal;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
-
- tmp = (mode->htotal + AWG_DELAY_HD) << 16;
- tmp |= mode->htotal + AWG_DELAY_HD;
- writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
- writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
-
- /* Prepare VTG set 4 for DVO */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
- writel(tmp, vtg->regs + VTG_H_HD_4);
-
- tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
- tmp |= 1;
- writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
- writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
- writel(0, vtg->regs + VTG_TOP_V_HD_4);
- writel(0, vtg->regs + VTG_BOT_V_HD_4);
+ /* Set hsync and vsync position for HDMI */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDMI - 1], HDMI_DELAY, mode);
+
+ /* Set hsync and vsync position for HD DCS */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDDCS - 1], 0, mode);
+
+ /* Set hsync and vsync position for HDF */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_HDF - 1], AWG_DELAY_HD, mode);
+
+ /* Set hsync and vsync position for DVO */
+ vtg_set_hsync_vsync_pos(&sync[VTG_SYNC_ID_DVO - 1], DVO_DELAY, mode);
+
+ /* Progam the syncs outputs */
+ for (i = 0; i < VTG_MAX_SYNC_OUTPUT ; i++) {
+ writel(sync[i].hsync,
+ vtg->regs + vtg_regs_offs[i].h_hd);
+ writel(sync[i].vsync_line_top,
+ vtg->regs + vtg_regs_offs[i].top_v_vd);
+ writel(sync[i].vsync_line_bot,
+ vtg->regs + vtg_regs_offs[i].bot_v_vd);
+ writel(sync[i].vsync_off_top,
+ vtg->regs + vtg_regs_offs[i].top_v_hd);
+ writel(sync[i].vsync_off_bot,
+ vtg->regs + vtg_regs_offs[i].bot_v_hd);
+ }
/* mode */
writel(type, vtg->regs + VTG_MODE);
@@ -231,7 +313,7 @@ void sti_vtg_set_config(struct sti_vtg *vtg,
const struct drm_display_mode *mode)
{
/* write configuration */
- vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+ vtg_set_mode(vtg, VTG_MODE_MASTER, vtg->sync_params, mode);
vtg_reset(vtg);
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
index cd2439f89d05..f1dcdf9c2342 100644
--- a/drivers/gpu/drm/sti/sti_vtg.h
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -10,6 +10,11 @@
#define VTG_TOP_FIELD_EVENT 1
#define VTG_BOTTOM_FIELD_EVENT 2
+#define VTG_SYNC_ID_HDMI 1
+#define VTG_SYNC_ID_HDDCS 2
+#define VTG_SYNC_ID_HDF 3
+#define VTG_SYNC_ID_DVO 4
+
struct sti_vtg;
struct drm_display_mode;
struct notifier_block;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 33add93b4ed9..3b0d8c392b70 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -175,8 +175,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
- dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
- bo->paddr);
+ dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
}
@@ -233,8 +232,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
- bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
- GFP_KERNEL | __GFP_NOWARN);
+ bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
"failed to allocate buffer of size %zu\n",
@@ -472,8 +471,8 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
- bo->paddr, gem->size);
+ ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
+ gem->size);
if (ret) {
drm_gem_vm_close(vma);
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 4e19d0f9cc30..077ae9b2865d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
copy_highpage(to_page, from_page);
- page_cache_release(from_page);
+ put_page(from_page);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
- page_cache_release(to_page);
+ put_page(to_page);
}
ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index c427499133d6..fd1eb9d03f0b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -423,8 +423,8 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
}
if (ufb->obj->base.import_attach) {
- dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
- DMA_FROM_DEVICE);
+ ret = dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
+ DMA_FROM_DEVICE);
}
unlock:
@@ -536,7 +536,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
- drm_gem_object_unreference(&ufbdev->ufb.obj->base);
+ drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 2a0a784ab6ee..d7528e0d8442 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
return ret;
}
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
*handle_p = handle;
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 160ef2a08b89..b87afee44995 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -279,14 +279,6 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
}
-static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-
-{
- return true;
-}
-
#if 0
static int
udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -402,7 +394,6 @@ static void udl_crtc_commit(struct drm_crtc *crtc)
static const struct drm_crtc_helper_funcs udl_helper_funcs = {
.dpms = udl_crtc_dpms,
- .mode_fixup = udl_crtc_mode_fixup,
.mode_set = udl_crtc_mode_set,
.prepare = udl_crtc_prepare,
.commit = udl_crtc_commit,
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 22278bcfc60e..9807bc9d296e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -398,9 +398,8 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
- ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
- bo->base.vaddr, bo->base.paddr,
- vma->vm_end - vma->vm_start);
+ ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
+ bo->base.paddr, vma->vm_end - vma->vm_start);
if (ret)
drm_gem_vm_close(vma);
@@ -499,11 +498,12 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo))
return PTR_ERR(bo);
- ret = copy_from_user(bo->base.vaddr,
+ if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
- args->size);
- if (ret != 0)
+ args->size)) {
+ ret = -EFAULT;
goto fail;
+ }
/* Clear the rest of the memory from allocating from the BO
* cache.
*/
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index f53fe6cd72be..fa2ad15d4f62 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -52,7 +52,7 @@ struct vc4_dev {
/* Protects bo_cache and the BO stats. */
struct mutex bo_lock;
- /* Sequence number for the last job queued in job_list.
+ /* Sequence number for the last job queued in bin_job_list.
* Starts at 0 (no jobs emitted).
*/
uint64_t emit_seqno;
@@ -62,11 +62,19 @@ struct vc4_dev {
*/
uint64_t finished_seqno;
- /* List of all struct vc4_exec_info for jobs to be executed.
- * The first job in the list is the one currently programmed
- * into ct0ca/ct1ca for execution.
+ /* List of all struct vc4_exec_info for jobs to be executed in
+ * the binner. The first job in the list is the one currently
+ * programmed into ct0ca for execution.
*/
- struct list_head job_list;
+ struct list_head bin_job_list;
+
+ /* List of all struct vc4_exec_info for jobs that have
+ * completed binning and are ready for rendering. The first
+ * job in the list is the one currently programmed into ct1ca
+ * for execution.
+ */
+ struct list_head render_job_list;
+
/* List of the finished vc4_exec_infos waiting to be freed by
* job_done_work.
*/
@@ -296,11 +304,20 @@ struct vc4_exec_info {
};
static inline struct vc4_exec_info *
-vc4_first_job(struct vc4_dev *vc4)
+vc4_first_bin_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->bin_job_list))
+ return NULL;
+ return list_first_entry(&vc4->bin_job_list, struct vc4_exec_info, head);
+}
+
+static inline struct vc4_exec_info *
+vc4_first_render_job(struct vc4_dev *vc4)
{
- if (list_empty(&vc4->job_list))
+ if (list_empty(&vc4->render_job_list))
return NULL;
- return list_first_entry(&vc4->job_list, struct vc4_exec_info, head);
+ return list_first_entry(&vc4->render_job_list,
+ struct vc4_exec_info, head);
}
/**
@@ -414,7 +431,9 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void vc4_submit_next_job(struct drm_device *dev);
+void vc4_submit_next_bin_job(struct drm_device *dev);
+void vc4_submit_next_render_job(struct drm_device *dev);
+void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
uint64_t timeout_ns, bool interruptible);
void vc4_job_handle_completed(struct vc4_dev *vc4);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 202aa1544acc..8d4384f8b78d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -141,10 +141,10 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_get_hang_state *state;
struct vc4_hang_state *kernel_state;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
- unsigned int i, unref_list_count;
+ unsigned int i, j, unref_list_count, prev_idx;
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
@@ -153,37 +153,55 @@ vc4_save_hang_state(struct drm_device *dev)
state = &kernel_state->user_state;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
- if (!exec) {
+ exec[0] = vc4_first_bin_job(vc4);
+ exec[1] = vc4_first_render_job(vc4);
+ if (!exec[0] && !exec[1]) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- unref_list_count = 0;
- list_for_each_entry(bo, &exec->unref_list, unref_head)
- unref_list_count++;
+ /* Get the bos from both binner and renderer into hang state. */
+ state->bo_count = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
+
+ unref_list_count = 0;
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
+ unref_list_count++;
+ state->bo_count += exec[i]->bo_count + unref_list_count;
+ }
+
+ kernel_state->bo = kcalloc(state->bo_count,
+ sizeof(*kernel_state->bo), GFP_ATOMIC);
- state->bo_count = exec->bo_count + unref_list_count;
- kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo),
- GFP_ATOMIC);
if (!kernel_state->bo) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
- for (i = 0; i < exec->bo_count; i++) {
- drm_gem_object_reference(&exec->bo[i]->base);
- kernel_state->bo[i] = &exec->bo[i]->base;
- }
+ prev_idx = 0;
+ for (i = 0; i < 2; i++) {
+ if (!exec[i])
+ continue;
- list_for_each_entry(bo, &exec->unref_list, unref_head) {
- drm_gem_object_reference(&bo->base.base);
- kernel_state->bo[i] = &bo->base.base;
- i++;
+ for (j = 0; j < exec[i]->bo_count; j++) {
+ drm_gem_object_reference(&exec[i]->bo[j]->base);
+ kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
+ }
+
+ list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
+ drm_gem_object_reference(&bo->base.base);
+ kernel_state->bo[j + prev_idx] = &bo->base.base;
+ j++;
+ }
+ prev_idx = j + 1;
}
- state->start_bin = exec->ct0ca;
- state->start_render = exec->ct1ca;
+ if (exec[0])
+ state->start_bin = exec[0]->ct0ca;
+ if (exec[1])
+ state->start_render = exec[1]->ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
@@ -267,13 +285,15 @@ vc4_hangcheck_elapsed(unsigned long data)
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
- struct vc4_exec_info *exec;
+ struct vc4_exec_info *bin_exec, *render_exec;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- exec = vc4_first_job(vc4);
+
+ bin_exec = vc4_first_bin_job(vc4);
+ render_exec = vc4_first_render_job(vc4);
/* If idle, we can stop watching for hangs. */
- if (!exec) {
+ if (!bin_exec && !render_exec) {
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
}
@@ -284,9 +304,12 @@ vc4_hangcheck_elapsed(unsigned long data)
/* If we've made any progress in execution, rearm the timer
* and wait.
*/
- if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
- exec->last_ct0ca = ct0ca;
- exec->last_ct1ca = ct1ca;
+ if ((bin_exec && ct0ca != bin_exec->last_ct0ca) ||
+ (render_exec && ct1ca != render_exec->last_ct1ca)) {
+ if (bin_exec)
+ bin_exec->last_ct0ca = ct0ca;
+ if (render_exec)
+ render_exec->last_ct1ca = ct1ca;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev);
return;
@@ -386,11 +409,13 @@ vc4_flush_caches(struct drm_device *dev)
* The job_lock should be held during this.
*/
void
-vc4_submit_next_job(struct drm_device *dev)
+vc4_submit_next_bin_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_job(vc4);
+ struct vc4_exec_info *exec;
+again:
+ exec = vc4_first_bin_job(vc4);
if (!exec)
return;
@@ -400,11 +425,40 @@ vc4_submit_next_job(struct drm_device *dev)
V3D_WRITE(V3D_BPOA, 0);
V3D_WRITE(V3D_BPOS, 0);
- if (exec->ct0ca != exec->ct0ea)
+ /* Either put the job in the binner if it uses the binner, or
+ * immediately move it to the to-be-rendered queue.
+ */
+ if (exec->ct0ca != exec->ct0ea) {
submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
+ } else {
+ vc4_move_job_to_render(dev, exec);
+ goto again;
+ }
+}
+
+void
+vc4_submit_next_render_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_render_job(vc4);
+
+ if (!exec)
+ return;
+
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}
+void
+vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ bool was_empty = list_empty(&vc4->render_job_list);
+
+ list_move_tail(&exec->head, &vc4->render_job_list);
+ if (was_empty)
+ vc4_submit_next_render_job(dev);
+}
+
static void
vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
{
@@ -443,14 +497,14 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
exec->seqno = seqno;
vc4_update_bo_seqnos(exec, seqno);
- list_add_tail(&exec->head, &vc4->job_list);
+ list_add_tail(&exec->head, &vc4->bin_job_list);
/* If no job was executing, kick ours off. Otherwise, it'll
- * get started when the previous job's frame done interrupt
+ * get started when the previous job's flush done interrupt
* occurs.
*/
- if (vc4_first_job(vc4) == exec) {
- vc4_submit_next_job(dev);
+ if (vc4_first_bin_job(vc4) == exec) {
+ vc4_submit_next_bin_job(dev);
vc4_queue_hangcheck(dev);
}
@@ -859,7 +913,8 @@ vc4_gem_init(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- INIT_LIST_HEAD(&vc4->job_list);
+ INIT_LIST_HEAD(&vc4->bin_job_list);
+ INIT_LIST_HEAD(&vc4->render_job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 56272ca98ab7..d8b864925fd3 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -47,6 +47,7 @@ struct vc4_hdmi {
void __iomem *hdmicore_regs;
void __iomem *hd_regs;
int hpd_gpio;
+ bool hpd_active_low;
struct clk *pixel_clock;
struct clk *hsm_clock;
@@ -166,7 +167,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_dev *vc4 = to_vc4_dev(dev);
if (vc4->hdmi->hpd_gpio) {
- if (gpio_get_value(vc4->hdmi->hpd_gpio))
+ if (gpio_get_value_cansleep(vc4->hdmi->hpd_gpio) ^
+ vc4->hdmi->hpd_active_low)
return connector_status_connected;
else
return connector_status_disconnected;
@@ -517,11 +519,17 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
* we'll use the HDMI core's register.
*/
if (of_find_property(dev->of_node, "hpd-gpios", &value)) {
- hdmi->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
+ enum of_gpio_flags hpd_gpio_flags;
+
+ hdmi->hpd_gpio = of_get_named_gpio_flags(dev->of_node,
+ "hpd-gpios", 0,
+ &hpd_gpio_flags);
if (hdmi->hpd_gpio < 0) {
ret = hdmi->hpd_gpio;
goto err_unprepare_hsm;
}
+
+ hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
}
vc4->hdmi = hdmi;
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 78a21357fb2d..b0104a346a74 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -30,6 +30,10 @@
* disables that specific interrupt, and 0s written are ignored
* (reading either one returns the set of enabled interrupts).
*
+ * When we take a binning flush done interrupt, we need to submit the
+ * next frame for binning and move the finished frame to the render
+ * thread.
+ *
* When we take a render frame interrupt, we need to wake the
* processes waiting for some frame to be done, and get the next frame
* submitted ASAP (so the hardware doesn't sit idle when there's work
@@ -44,6 +48,7 @@
#include "vc4_regs.h"
#define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \
+ V3D_INT_FLDONE | \
V3D_INT_FRDONE)
DECLARE_WAIT_QUEUE_HEAD(render_wait);
@@ -77,7 +82,7 @@ vc4_overflow_mem_work(struct work_struct *work)
unsigned long irqflags;
spin_lock_irqsave(&vc4->job_lock, irqflags);
- current_exec = vc4_first_job(vc4);
+ current_exec = vc4_first_bin_job(vc4);
if (current_exec) {
vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
list_add_tail(&vc4->overflow_mem->unref_head,
@@ -98,17 +103,43 @@ vc4_overflow_mem_work(struct work_struct *work)
}
static void
-vc4_irq_finish_job(struct drm_device *dev)
+vc4_irq_finish_bin_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+ if (!exec)
+ return;
+
+ vc4_move_job_to_render(dev, exec);
+ vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_cancel_bin_job(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_exec_info *exec = vc4_first_bin_job(vc4);
+
+ if (!exec)
+ return;
+
+ list_move_tail(&exec->head, &vc4->bin_job_list);
+ vc4_submit_next_bin_job(dev);
+}
+
+static void
+vc4_irq_finish_render_job(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_exec_info *exec = vc4_first_job(vc4);
+ struct vc4_exec_info *exec = vc4_first_render_job(vc4);
if (!exec)
return;
vc4->finished_seqno++;
list_move_tail(&exec->head, &vc4->job_done_list);
- vc4_submit_next_job(dev);
+ vc4_submit_next_render_job(dev);
wake_up_all(&vc4->job_wait_queue);
schedule_work(&vc4->job_done_work);
@@ -125,9 +156,10 @@ vc4_irq(int irq, void *arg)
barrier();
intctl = V3D_READ(V3D_INTCTL);
- /* Acknowledge the interrupts we're handling here. The render
- * frame done interrupt will be cleared, while OUTOMEM will
- * stay high until the underlying cause is cleared.
+ /* Acknowledge the interrupts we're handling here. The binner
+ * last flush / render frame done interrupt will be cleared,
+ * while OUTOMEM will stay high until the underlying cause is
+ * cleared.
*/
V3D_WRITE(V3D_INTCTL, intctl);
@@ -138,9 +170,16 @@ vc4_irq(int irq, void *arg)
status = IRQ_HANDLED;
}
+ if (intctl & V3D_INT_FLDONE) {
+ spin_lock(&vc4->job_lock);
+ vc4_irq_finish_bin_job(dev);
+ spin_unlock(&vc4->job_lock);
+ status = IRQ_HANDLED;
+ }
+
if (intctl & V3D_INT_FRDONE) {
spin_lock(&vc4->job_lock);
- vc4_irq_finish_job(dev);
+ vc4_irq_finish_render_job(dev);
spin_unlock(&vc4->job_lock);
status = IRQ_HANDLED;
}
@@ -205,6 +244,7 @@ void vc4_irq_reset(struct drm_device *dev)
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
spin_lock_irqsave(&vc4->job_lock, irqflags);
- vc4_irq_finish_job(dev);
+ vc4_cancel_bin_job(dev);
+ vc4_irq_finish_render_job(dev);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 31de5d17bc85..e6d3c6028341 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -268,6 +268,7 @@ static int vc4_v3d_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id vc4_v3d_dt_match[] = {
+ { .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,vc4-v3d" },
{}
};
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d0cbd5ecd7f0..7e2a12c4fed2 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
- page_cache_release(page);
+ put_page(page);
}
}
case dr_via_pages_alloc:
@@ -239,8 +239,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages)
return -ENOMEM;
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm,
- (unsigned long)xfer->mem_addr,
+ ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
(vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 429aa311685a..4854dac87e24 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -237,13 +237,6 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static bool virtio_gpu_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -277,7 +270,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
- .mode_fixup = virtio_gpu_crtc_mode_fixup,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
};
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 58704f0a4607..531d22025fec 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -25,6 +25,8 @@
*
**************************************************************************/
+#include <linux/kernel.h>
+
#ifdef __KERNEL__
#include <drm/vmwgfx_drm.h>
@@ -36,7 +38,6 @@
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
#endif /* ARRAY_SIZE */
-#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
@@ -987,12 +988,12 @@ svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
const surf_size_struct *pixel_size,
surf_size_struct *block_size)
{
- block_size->width = DIV_ROUND_UP(pixel_size->width,
- desc->block_size.width);
- block_size->height = DIV_ROUND_UP(pixel_size->height,
- desc->block_size.height);
- block_size->depth = DIV_ROUND_UP(pixel_size->depth,
- desc->block_size.depth);
+ block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width,
+ desc->block_size.width);
+ block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height,
+ desc->block_size.height);
+ block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth,
+ desc->block_size.depth);
}
static inline bool
@@ -1100,8 +1101,9 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
const u32 bd = desc->block_size.depth;
- const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
- const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+ const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) *
+ desc->bytes_per_block;
+ const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride;
const u32 offset = (z / bd * imgstride +
y / bh * rowstride +
x / bw * desc->bytes_per_block);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0ee76e523a90..6cbb7d4bdd11 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -195,7 +195,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_MASTER | DRM_AUTH),
VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
vmw_kms_update_layout_ioctl,
- DRM_MASTER),
+ DRM_MASTER | DRM_CONTROL_ALLOW),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
@@ -1204,6 +1204,7 @@ static int vmw_master_set(struct drm_device *dev,
}
dev_priv->active_master = vmaster;
+ drm_sysfs_hotplug_event(dev);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5cb1b1687cd4..019a6ca3e8e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20150810"
+#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 9
+#define VMWGFX_DRIVER_MINOR 10
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -407,8 +407,11 @@ struct vmw_private {
void *fb_info;
enum vmw_display_unit_type active_display_unit;
struct vmw_legacy_display *ldu_priv;
- struct vmw_screen_object_display *sou_priv;
struct vmw_overlay *overlay_priv;
+ struct drm_property *hotplug_mode_update_property;
+ struct drm_property *implicit_placement_property;
+ unsigned num_implicit;
+ struct vmw_framebuffer *implicit_fb;
/*
* Context and surface management.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5da5de0cb522..723ba16c6084 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3009,6 +3009,26 @@ out_unref:
return ret;
}
+/**
+ * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXGenMips body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+
+ return vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3297,7 +3317,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_clear_depthstencil_view, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
&vmw_cmd_dx_check_subresource, true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b221a8c40282..4742ec4ead27 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -236,8 +236,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
- du->cursor_x = x + crtc->x;
- du->cursor_y = y + crtc->y;
+ du->cursor_x = x + du->set_gui_x;
+ du->cursor_y = y + du->set_gui_y;
/*
* FIXME: Unclear whether there's any global state touched by the
@@ -663,9 +663,8 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
break;
case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
- clips, num_clips, increment,
- true,
- NULL);
+ clips, NULL, num_clips,
+ increment, true, NULL);
break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1109,6 +1108,22 @@ int vmw_kms_present(struct vmw_private *dev_priv,
return 0;
}
+static void
+vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
+{
+ if (dev_priv->hotplug_mode_update_property)
+ return;
+
+ dev_priv->hotplug_mode_update_property =
+ drm_property_create_range(dev_priv->dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "hotplug_mode_update", 0, 1);
+
+ if (!dev_priv->hotplug_mode_update_property)
+ return;
+
+}
+
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -1121,6 +1136,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.max_width = dev_priv->texture_max_width;
dev->mode_config.max_height = dev_priv->texture_max_height;
+ drm_mode_create_suggested_offset_properties(dev);
+ vmw_kms_create_hotplug_mode_update_property(dev_priv);
+
ret = vmw_kms_stdu_init_display(dev_priv);
if (ret) {
ret = vmw_kms_sou_init_display(dev_priv);
@@ -1360,15 +1378,28 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_x_property,
+ du->gui_x);
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_y_property,
+ du->gui_y);
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_x_property,
+ 0);
+ drm_object_property_set_value
+ (&con->base, dev->mode_config.suggested_y_property,
+ 0);
}
con->status = vmw_du_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
+ drm_sysfs_hotplug_event(dev);
return 0;
}
@@ -1591,6 +1622,12 @@ int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
+ struct vmw_display_unit *du = vmw_connector_to_du(connector);
+ struct vmw_private *dev_priv = vmw_priv(connector->dev);
+
+ if (property == dev_priv->implicit_placement_property)
+ du->is_implicit = val;
+
return 0;
}
@@ -2096,3 +2133,119 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
return 0;
}
+
+/**
+ * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ */
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du)
+{
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (du->active_implicit) {
+ if (--(dev_priv->num_implicit) == 0)
+ dev_priv->implicit_fb = NULL;
+ du->active_implicit = false;
+ }
+}
+
+/**
+ * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
+ *
+ * @vmw_priv: Pointer to a device private struct.
+ * @du: The display unit of the crtc.
+ * @vfb: The implicit framebuffer
+ *
+ * Registers a binding to an implicit framebuffer.
+ */
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du,
+ struct vmw_framebuffer *vfb)
+{
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
+
+ if (!du->active_implicit && du->is_implicit) {
+ dev_priv->implicit_fb = vfb;
+ du->active_implicit = true;
+ dev_priv->num_implicit++;
+ }
+}
+
+/**
+ * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc we want to flip.
+ *
+ * Returns true or false depending whether it's OK to flip this crtc
+ * based on the criterion that we must not have more than one implicit
+ * frame-buffer at any one time.
+ */
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (!du->is_implicit)
+ return true;
+
+ if (dev_priv->num_implicit != 1)
+ return false;
+
+ return true;
+}
+
+/**
+ * vmw_kms_update_implicit_fb - Update the implicit fb.
+ *
+ * @dev_priv: Pointer to device-private struct.
+ * @crtc: The crtc the new implicit frame-buffer is bound to.
+ */
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc)
+{
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ struct vmw_framebuffer *vfb;
+
+ lockdep_assert_held_once(&dev_priv->dev->mode_config.mutex);
+
+ if (!du->is_implicit)
+ return;
+
+ vfb = vmw_framebuffer_to_vfb(crtc->primary->fb);
+ WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
+ dev_priv->implicit_fb != vfb);
+
+ dev_priv->implicit_fb = vfb;
+}
+
+/**
+ * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * property.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @immutable: Whether the property is immutable.
+ *
+ * Sets up the implicit placement property unless it's already set up.
+ */
+void
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+ bool immutable)
+{
+ if (dev_priv->implicit_placement_property)
+ return;
+
+ dev_priv->implicit_placement_property =
+ drm_property_create_range(dev_priv->dev,
+ immutable ?
+ DRM_MODE_PROP_IMMUTABLE : 0,
+ "implicit_placement", 0, 1);
+
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index edd81503516d..57203212c501 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -178,6 +178,9 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
+ bool active_implicit;
+ int set_gui_x;
+ int set_gui_y;
};
#define vmw_crtc_to_du(x) \
@@ -254,6 +257,18 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
+void vmw_kms_del_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du);
+void vmw_kms_add_active(struct vmw_private *dev_priv,
+ struct vmw_display_unit *du,
+ struct vmw_framebuffer *vfb);
+bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
+ bool immutable);
+
/*
* Legacy display unit functions - vmwgfx_ldu.c
@@ -287,6 +302,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b6fa44fe8929..63ccd9871ec9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -288,6 +288,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
crtc->y = set->y;
crtc->mode = *mode;
crtc->enabled = true;
+ ldu->base.set_gui_x = set->x;
+ ldu->base.set_gui_y = set->y;
vmw_ldu_add_active(dev_priv, ldu, vfb);
@@ -375,8 +377,19 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
+ dev->mode_config.dirty_info_property,
+ 1);
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ 1);
return 0;
}
@@ -412,6 +425,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_vblank_cleanup;
+ vmw_kms_create_implicit_placement_property(dev_priv, true);
+
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_ldu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index c5a1a08b0449..0ea22fd112c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -74,19 +74,6 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
-
-/*
- * Other structs.
- */
-
-struct vmw_screen_object_display {
- unsigned num_implicit;
-
- struct vmw_framebuffer *implicit_fb;
- SVGAFifoCmdDefineGMRFB cur;
- struct vmw_dma_buffer *pinned_gmrfb;
-};
-
/**
* Display unit using screen objects.
*/
@@ -97,7 +84,6 @@ struct vmw_screen_object_unit {
struct vmw_dma_buffer *buffer; /**< Backing store buffer */
bool defined;
- bool active_implicit;
};
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
@@ -116,33 +102,6 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
}
-static void vmw_sou_del_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- if (sou->active_implicit) {
- if (--(ld->num_implicit) == 0)
- ld->implicit_fb = NULL;
- sou->active_implicit = false;
- }
-}
-
-static void vmw_sou_add_active(struct vmw_private *vmw_priv,
- struct vmw_screen_object_unit *sou,
- struct vmw_framebuffer *vfb)
-{
- struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-
- BUG_ON(!ld->num_implicit && ld->implicit_fb);
-
- if (!sou->active_implicit && sou->base.is_implicit) {
- ld->implicit_fb = vfb;
- sou->active_implicit = true;
- ld->num_implicit++;
- }
-}
-
/**
* Send the fifo command to create a screen.
*/
@@ -185,6 +144,8 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
cmd->obj.root.x = sou->base.gui_x;
cmd->obj.root.y = sou->base.gui_y;
}
+ sou->base.set_gui_x = cmd->obj.root.x;
+ sou->base.set_gui_y = cmd->obj.root.y;
/* Ok to assume that buffer is pinned in vram */
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
@@ -323,13 +284,13 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return -EINVAL;
}
- /* sou only supports one fb active at the time */
+ /* Only one active implicit frame-buffer at a time. */
if (sou->base.is_implicit &&
- dev_priv->sou_priv->implicit_fb && vfb &&
- !(dev_priv->sou_priv->num_implicit == 1 &&
- sou->active_implicit) &&
- dev_priv->sou_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple framebuffers not supported\n");
+ dev_priv->implicit_fb && vfb &&
+ !(dev_priv->num_implicit == 1 &&
+ sou->base.active_implicit) &&
+ dev_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple implicit framebuffers not supported.\n");
return -EINVAL;
}
@@ -351,7 +312,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
crtc->y = 0;
crtc->enabled = false;
- vmw_sou_del_active(dev_priv, sou);
+ vmw_kms_del_active(dev_priv, &sou->base);
vmw_sou_backing_free(dev_priv, sou);
@@ -415,7 +376,7 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return ret;
}
- vmw_sou_add_active(dev_priv, sou, vfb);
+ vmw_kms_add_active(dev_priv, &sou->base, vfb);
connector->encoder = encoder;
encoder->crtc = crtc;
@@ -428,39 +389,6 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
return 0;
}
-/**
- * Returns if this unit can be page flipped.
- * Must be called with the mode_config mutex held.
- */
-static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- if (!sou->base.is_implicit)
- return true;
-
- if (dev_priv->sou_priv->num_implicit != 1)
- return false;
-
- return true;
-}
-
-/**
- * Update the implicit fb to the current fb of this crtc.
- * Must be called with the mode_config mutex held.
- */
-static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
-
- BUG_ON(!sou->base.is_implicit);
-
- dev_priv->sou_priv->implicit_fb =
- vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
-}
-
static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -470,30 +398,27 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
+ struct drm_vmw_rect vclips;
int ret;
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- if (!vmw_sou_screen_object_flippable(dev_priv, crtc))
+ if (!vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
crtc->primary->fb = fb;
/* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
if (vfb->dmabuf)
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
- &clips, 1, 1,
+ NULL, &vclips, 1, 1,
true, &fence);
else
ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
- &clips, NULL, NULL,
+ NULL, &vclips, NULL,
0, 0, 1, 1, &fence);
@@ -521,7 +446,7 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
vmw_fence_obj_unreference(&fence);
if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_sou_update_implicit_fb(dev_priv, crtc);
+ vmw_kms_update_implicit_fb(dev_priv, crtc);
return ret;
@@ -586,13 +511,12 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder = &sou->base.encoder;
connector = &sou->base.connector;
- sou->active_implicit = false;
-
+ sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
sou->base.pref_mode = NULL;
- sou->base.is_implicit = true;
+ sou->base.is_implicit = false;
drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -611,8 +535,19 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_object_attach_property(&connector->base,
- dev->mode_config.dirty_info_property,
- 1);
+ dev->mode_config.dirty_info_property,
+ 1);
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ sou->base.is_implicit);
return 0;
}
@@ -622,11 +557,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
int i, ret;
- if (dev_priv->sou_priv) {
- DRM_INFO("sou system already on\n");
- return -EINVAL;
- }
-
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");
@@ -634,21 +564,19 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
- if (unlikely(!dev_priv->sou_priv))
- goto err_no_mem;
-
- dev_priv->sou_priv->num_implicit = 0;
- dev_priv->sou_priv->implicit_fb = NULL;
+ dev_priv->num_implicit = 0;
+ dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
- goto err_free;
+ return ret;
ret = drm_mode_create_dirty_info_property(dev);
if (unlikely(ret != 0))
goto err_vblank_cleanup;
+ vmw_kms_create_implicit_placement_property(dev_priv, false);
+
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
@@ -660,10 +588,6 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
err_vblank_cleanup:
drm_vblank_cleanup(dev);
-err_free:
- kfree(dev_priv->sou_priv);
- dev_priv->sou_priv = NULL;
-err_no_mem:
return ret;
}
@@ -671,13 +595,8 @@ int vmw_kms_sou_close_display(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
drm_vblank_cleanup(dev);
- kfree(dev_priv->sou_priv);
-
return 0;
}
@@ -738,6 +657,11 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
int i;
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
cmd->header.size = sizeof(cmd->body) + region_size;
@@ -875,6 +799,11 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
*/
static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
{
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_dmabuf_blit) *
dirty->num_hits);
@@ -909,6 +838,8 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the dma-buffer backed framebuffer.
* @clips: Array of clip rects.
+ * @vclips: Alternate array of clip rects. Either @clips or @vclips must
+ * be NULL.
* @num_clips: Number of clip rects in @clips.
* @increment: Increment to use when looping over @clips.
* @interruptible: Whether to perform waits interruptible if possible.
@@ -922,6 +853,7 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
struct vmw_fence_obj **out_fence)
@@ -945,7 +877,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
dirty.clip = vmw_sou_dmabuf_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
num_clips;
- ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL,
+ ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
0, 0, num_clips, increment, &dirty);
vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
@@ -967,6 +899,11 @@ out_revert:
*/
static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
{
+ if (!dirty->num_hits) {
+ vmw_fifo_commit(dirty->dev_priv, 0);
+ return;
+ }
+
vmw_fifo_commit(dirty->dev_priv,
sizeof(struct vmw_kms_sou_readback_blit) *
dirty->num_hits);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 4ef5ffd7189d..b949102ad864 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -96,7 +96,6 @@ struct vmw_stdu_surface_copy {
* content_vfbs dimensions, then this is a pointer into the
* corresponding field in content_vfbs. If not, then this
* is a separate buffer to which content_vfbs will blit to.
- * @content_fb: holds the rendered content, can be a surface or DMA buffer
* @content_type: content_fb type
* @defined: true if the current display unit has been initialized
*/
@@ -104,8 +103,6 @@ struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
struct vmw_surface *display_srf;
- struct drm_framebuffer *content_fb;
-
enum stdu_content_type content_fb_type;
bool defined;
@@ -122,22 +119,6 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
*****************************************************************************/
/**
- * vmw_stdu_pin_display - pins the resource associated with the display surface
- *
- * @stdu: contains the display surface
- *
- * Since the display surface can either be a private surface allocated by us,
- * or it can point to the content surface, we use this function to not pin the
- * same resource twice.
- */
-static int vmw_stdu_pin_display(struct vmw_screen_target_display_unit *stdu)
-{
- return vmw_resource_pin(&stdu->display_srf->res, false);
-}
-
-
-
-/**
* vmw_stdu_unpin_display - unpins the resource associated with display surface
*
* @stdu: contains the display surface
@@ -153,13 +134,7 @@ static void vmw_stdu_unpin_display(struct vmw_screen_target_display_unit *stdu)
struct vmw_resource *res = &stdu->display_srf->res;
vmw_resource_unpin(res);
-
- if (stdu->content_fb_type != SAME_AS_DISPLAY) {
- vmw_resource_unreference(&res);
- stdu->content_fb_type = SAME_AS_DISPLAY;
- }
-
- stdu->display_srf = NULL;
+ vmw_surface_unreference(&stdu->display_srf);
}
}
@@ -185,6 +160,9 @@ static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
*
* @dev_priv: VMW DRM device
* @stdu: display unit to create a Screen Target for
+ * @mode: The mode to set.
+ * @crtc_x: X coordinate of screen target relative to framebuffer origin.
+ * @crtc_y: Y coordinate of screen target relative to framebuffer origin.
*
* Creates a STDU that we can used later. This function is called whenever the
* framebuffer size changes.
@@ -193,7 +171,9 @@ static void vmw_stdu_crtc_destroy(struct drm_crtc *crtc)
* 0 on success, error code on failure
*/
static int vmw_stdu_define_st(struct vmw_private *dev_priv,
- struct vmw_screen_target_display_unit *stdu)
+ struct vmw_screen_target_display_unit *stdu,
+ struct drm_display_mode *mode,
+ int crtc_x, int crtc_y)
{
struct {
SVGA3dCmdHeader header;
@@ -211,17 +191,19 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
cmd->header.size = sizeof(cmd->body);
cmd->body.stid = stdu->base.unit;
- cmd->body.width = stdu->display_srf->base_size.width;
- cmd->body.height = stdu->display_srf->base_size.height;
+ cmd->body.width = mode->hdisplay;
+ cmd->body.height = mode->vdisplay;
cmd->body.flags = (0 == cmd->body.stid) ? SVGA_STFLAG_PRIMARY : 0;
cmd->body.dpi = 0;
- cmd->body.xRoot = stdu->base.crtc.x;
- cmd->body.yRoot = stdu->base.crtc.y;
-
- if (!stdu->base.is_implicit) {
+ if (stdu->base.is_implicit) {
+ cmd->body.xRoot = crtc_x;
+ cmd->body.yRoot = crtc_y;
+ } else {
cmd->body.xRoot = stdu->base.gui_x;
cmd->body.yRoot = stdu->base.gui_y;
}
+ stdu->base.set_gui_x = cmd->body.xRoot;
+ stdu->base.set_gui_y = cmd->body.yRoot;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -392,126 +374,43 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
return ret;
}
-
-
/**
- * vmw_stdu_crtc_set_config - Sets a mode
+ * vmw_stdu_bind_fb - Bind an fb to a defined screen target
*
- * @set: mode parameters
- *
- * This function is the device-specific portion of the DRM CRTC mode set.
- * For the SVGA device, we do this by defining a Screen Target, binding a
- * GB Surface to that target, and finally update the screen target.
+ * @dev_priv: Pointer to a device private struct.
+ * @crtc: The crtc holding the screen target.
+ * @mode: The mode currently used by the screen target. Must be non-NULL.
+ * @new_fb: The new framebuffer to bind. Must be non-NULL.
*
* RETURNS:
- * 0 on success, error code otherwise
+ * 0 on success, error code on failure.
*/
-static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *new_fb)
{
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
+ struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
+ struct vmw_surface *new_display_srf = NULL;
+ enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
- struct drm_display_mode *mode;
- struct drm_framebuffer *new_fb;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
-
- if (!set || !set->crtc)
- return -EINVAL;
-
- crtc = set->crtc;
- crtc->x = set->x;
- crtc->y = set->y;
- stdu = vmw_crtc_to_stdu(crtc);
- mode = set->mode;
- new_fb = set->fb;
- dev_priv = vmw_priv(crtc->dev);
-
-
- if (set->num_connectors > 1) {
- DRM_ERROR("Too many connectors\n");
- return -EINVAL;
- }
-
- if (set->num_connectors == 1 &&
- set->connectors[0] != &stdu->base.connector) {
- DRM_ERROR("Connectors don't match %p %p\n",
- set->connectors[0], &stdu->base.connector);
- return -EINVAL;
- }
-
-
- /* Since they always map one to one these are safe */
- connector = &stdu->base.connector;
- encoder = &stdu->base.encoder;
-
-
- /*
- * After this point the CRTC will be considered off unless a new fb
- * is bound
- */
- if (stdu->defined) {
- /* Unbind current surface by binding an invalid one */
- ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
- if (unlikely(ret != 0))
- return ret;
-
- /* Update Screen Target, display will now be blank */
- if (crtc->primary->fb) {
- vmw_stdu_update_st(dev_priv, stdu);
- if (unlikely(ret != 0))
- return ret;
- }
-
- crtc->primary->fb = NULL;
- crtc->enabled = false;
- encoder->crtc = NULL;
- connector->encoder = NULL;
-
- vmw_stdu_unpin_display(stdu);
- stdu->content_fb = NULL;
- stdu->content_fb_type = SAME_AS_DISPLAY;
-
- ret = vmw_stdu_destroy_st(dev_priv, stdu);
- /* The hardware is hung, give up */
- if (unlikely(ret != 0))
- return ret;
- }
-
-
- /* Any of these conditions means the caller wants CRTC off */
- if (set->num_connectors == 0 || !mode || !new_fb)
- return 0;
-
-
- if (set->x + mode->hdisplay > new_fb->width ||
- set->y + mode->vdisplay > new_fb->height) {
- DRM_ERROR("Set outside of framebuffer\n");
- return -EINVAL;
- }
+ int ret;
- stdu->content_fb = new_fb;
- vfb = vmw_framebuffer_to_vfb(stdu->content_fb);
+ WARN_ON_ONCE(!stdu->defined);
- if (vfb->dmabuf)
- stdu->content_fb_type = SEPARATE_DMA;
+ if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
+ new_fb->height == mode->vdisplay)
+ new_content_type = SAME_AS_DISPLAY;
+ else if (vfb->dmabuf)
+ new_content_type = SEPARATE_DMA;
+ else
+ new_content_type = SEPARATE_SURFACE;
- /*
- * If the requested mode is different than the width and height
- * of the FB or if the content buffer is a DMA buf, then allocate
- * a display FB that matches the dimension of the mode
- */
- if (mode->hdisplay != new_fb->width ||
- mode->vdisplay != new_fb->height ||
- stdu->content_fb_type != SAME_AS_DISPLAY) {
+ if (new_content_type != SAME_AS_DISPLAY &&
+ !stdu->display_srf) {
struct vmw_surface content_srf;
struct drm_vmw_size display_base_size = {0};
- struct vmw_surface *display_srf;
-
display_base_size.width = mode->hdisplay;
display_base_size.height = mode->vdisplay;
@@ -521,7 +420,7 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
* If content buffer is a DMA buf, then we have to construct
* surface info
*/
- if (stdu->content_fb_type == SEPARATE_DMA) {
+ if (new_content_type == SEPARATE_DMA) {
switch (new_fb->bits_per_pixel) {
case 32:
@@ -538,17 +437,13 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
default:
DRM_ERROR("Invalid format\n");
- ret = -EINVAL;
- goto err_unref_content;
+ return -EINVAL;
}
content_srf.flags = 0;
content_srf.mip_levels[0] = 1;
content_srf.multisample_count = 0;
} else {
-
- stdu->content_fb_type = SEPARATE_SURFACE;
-
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
content_srf = *new_vfbs->surface;
}
@@ -563,26 +458,136 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
content_srf.multisample_count,
0,
display_base_size,
- &display_srf);
+ &new_display_srf);
if (unlikely(ret != 0)) {
- DRM_ERROR("Cannot allocate a display FB.\n");
- goto err_unref_content;
+ DRM_ERROR("Could not allocate screen target surface.\n");
+ return ret;
}
-
- stdu->display_srf = display_srf;
- } else {
+ } else if (new_content_type == SAME_AS_DISPLAY) {
new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
- stdu->display_srf = new_vfbs->surface;
+ new_display_srf = vmw_surface_reference(new_vfbs->surface);
}
+ if (new_display_srf) {
+ /* Pin new surface before flipping */
+ ret = vmw_resource_pin(&new_display_srf->res, false);
+ if (ret)
+ goto out_srf_unref;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &new_display_srf->res);
+ if (ret)
+ goto out_srf_unpin;
+
+ /* Unpin and unreference old surface */
+ vmw_stdu_unpin_display(stdu);
- ret = vmw_stdu_pin_display(stdu);
- if (unlikely(ret != 0)) {
- stdu->display_srf = NULL;
- goto err_unref_content;
+ /* Transfer the reference */
+ stdu->display_srf = new_display_srf;
+ new_display_srf = NULL;
}
- vmw_svga_enable(dev_priv);
+ crtc->primary->fb = new_fb;
+ stdu->content_fb_type = new_content_type;
+ return 0;
+
+out_srf_unpin:
+ vmw_resource_unpin(&new_display_srf->res);
+out_srf_unref:
+ vmw_surface_unreference(&new_display_srf);
+ return ret;
+}
+
+/**
+ * vmw_stdu_crtc_set_config - Sets a mode
+ *
+ * @set: mode parameters
+ *
+ * This function is the device-specific portion of the DRM CRTC mode set.
+ * For the SVGA device, we do this by defining a Screen Target, binding a
+ * GB Surface to that target, and finally update the screen target.
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
+{
+ struct vmw_private *dev_priv;
+ struct vmw_framebuffer *vfb;
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *new_fb;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ bool turning_off;
+ int ret;
+
+
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ crtc = set->crtc;
+ stdu = vmw_crtc_to_stdu(crtc);
+ mode = set->mode;
+ new_fb = set->fb;
+ dev_priv = vmw_priv(crtc->dev);
+ turning_off = set->num_connectors == 0 || !mode || !new_fb;
+ vfb = (new_fb) ? vmw_framebuffer_to_vfb(new_fb) : NULL;
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("Too many connectors\n");
+ return -EINVAL;
+ }
+
+ if (set->num_connectors == 1 &&
+ set->connectors[0] != &stdu->base.connector) {
+ DRM_ERROR("Connectors don't match %p %p\n",
+ set->connectors[0], &stdu->base.connector);
+ return -EINVAL;
+ }
+
+ if (!turning_off && (set->x + mode->hdisplay > new_fb->width ||
+ set->y + mode->vdisplay > new_fb->height)) {
+ DRM_ERROR("Set outside of framebuffer\n");
+ return -EINVAL;
+ }
+
+ /* Only one active implicit frame-buffer at a time. */
+ if (!turning_off && stdu->base.is_implicit && dev_priv->implicit_fb &&
+ !(dev_priv->num_implicit == 1 && stdu->base.active_implicit)
+ && dev_priv->implicit_fb != vfb) {
+ DRM_ERROR("Multiple implicit framebuffers not supported.\n");
+ return -EINVAL;
+ }
+
+ /* Since they always map one to one these are safe */
+ connector = &stdu->base.connector;
+ encoder = &stdu->base.encoder;
+
+ if (stdu->defined) {
+ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (ret)
+ return ret;
+
+ vmw_stdu_unpin_display(stdu);
+ (void) vmw_stdu_update_st(dev_priv, stdu);
+ vmw_kms_del_active(dev_priv, &stdu->base);
+
+ ret = vmw_stdu_destroy_st(dev_priv, stdu);
+ if (ret)
+ return ret;
+
+ crtc->primary->fb = NULL;
+ crtc->enabled = false;
+ encoder->crtc = NULL;
+ connector->encoder = NULL;
+ stdu->content_fb_type = SAME_AS_DISPLAY;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ }
+
+ if (turning_off)
+ return 0;
/*
* Steps to displaying a surface, assume surface is already
@@ -592,35 +597,33 @@ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set)
* 3. update that screen target (this is done later by
* vmw_kms_stdu_do_surface_dirty_or_present)
*/
- ret = vmw_stdu_define_st(dev_priv, stdu);
- if (unlikely(ret != 0))
- goto err_unpin_display_and_content;
+ /*
+ * Note on error handling: We can't really restore the crtc to
+ * it's original state on error, but we at least update the
+ * current state to what's submitted to hardware to enable
+ * future recovery.
+ */
+ vmw_svga_enable(dev_priv);
+ ret = vmw_stdu_define_st(dev_priv, stdu, mode, set->x, set->y);
+ if (ret)
+ return ret;
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- if (unlikely(ret != 0))
- goto err_unpin_destroy_st;
+ crtc->x = set->x;
+ crtc->y = set->y;
+ crtc->mode = *mode;
+ ret = vmw_stdu_bind_fb(dev_priv, crtc, mode, new_fb);
+ if (ret)
+ return ret;
+ vmw_kms_add_active(dev_priv, &stdu->base, vfb);
+ crtc->enabled = true;
connector->encoder = encoder;
encoder->crtc = crtc;
- crtc->mode = *mode;
- crtc->primary->fb = new_fb;
- crtc->enabled = true;
-
- return ret;
-
-err_unpin_destroy_st:
- vmw_stdu_destroy_st(dev_priv, stdu);
-err_unpin_display_and_content:
- vmw_stdu_unpin_display(stdu);
-err_unref_content:
- stdu->content_fb = NULL;
- return ret;
+ return 0;
}
-
-
/**
* vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
*
@@ -648,59 +651,34 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_screen_target_display_unit *stdu;
+ struct drm_vmw_rect vclips;
+ struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
int ret;
- if (crtc == NULL)
- return -EINVAL;
-
dev_priv = vmw_priv(crtc->dev);
stdu = vmw_crtc_to_stdu(crtc);
- crtc->primary->fb = new_fb;
- stdu->content_fb = new_fb;
-
- if (stdu->display_srf) {
- /*
- * If the display surface is the same as the content surface
- * then remove the reference
- */
- if (stdu->content_fb_type == SAME_AS_DISPLAY) {
- if (stdu->defined) {
- /* Unbind the current surface */
- ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
- if (unlikely(ret != 0))
- goto err_out;
- }
- vmw_stdu_unpin_display(stdu);
- stdu->display_srf = NULL;
- }
- }
-
-
- if (!new_fb) {
- /* Blanks the display */
- (void) vmw_stdu_update_st(dev_priv, stdu);
-
- return 0;
- }
+ if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
+ return -EINVAL;
- if (stdu->content_fb_type == SAME_AS_DISPLAY) {
- stdu->display_srf = vmw_framebuffer_to_vfbs(new_fb)->surface;
- ret = vmw_stdu_pin_display(stdu);
- if (ret) {
- stdu->display_srf = NULL;
- goto err_out;
- }
+ ret = vmw_stdu_bind_fb(dev_priv, crtc, &crtc->mode, new_fb);
+ if (ret)
+ return ret;
- /* Bind display surface */
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- if (unlikely(ret != 0))
- goto err_unpin_display_and_content;
- }
+ if (stdu->base.is_implicit)
+ vmw_kms_update_implicit_fb(dev_priv, crtc);
- /* Update display surface: after this point everything is bound */
- ret = vmw_stdu_update_st(dev_priv, stdu);
- if (unlikely(ret != 0))
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+ if (vfb->dmabuf)
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips,
+ 1, 1, true, false);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips,
+ NULL, 0, 0, 1, 1, NULL);
+ if (ret)
return ret;
if (event) {
@@ -721,14 +699,7 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
vmw_fifo_flush(dev_priv, false);
}
- return ret;
-
-err_unpin_display_and_content:
- vmw_stdu_unpin_display(stdu);
-err_out:
- crtc->primary->fb = NULL;
- stdu->content_fb = NULL;
- return ret;
+ return 0;
}
@@ -1138,7 +1109,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
- stdu->base.is_implicit = true;
+ stdu->base.is_implicit = false;
drm_connector_init(dev, connector, &vmw_stdu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -1159,7 +1130,17 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
-
+ drm_object_attach_property(&connector->base,
+ dev_priv->hotplug_mode_update_property, 1);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+ if (dev_priv->implicit_placement_property)
+ drm_object_attach_property
+ (&connector->base,
+ dev_priv->implicit_placement_property,
+ stdu->base.is_implicit);
return 0;
}
@@ -1224,6 +1205,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
+ vmw_kms_create_implicit_placement_property(dev_priv, false);
+
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index dd2dbb9746ce..c27858ae0552 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -83,8 +83,10 @@ static int host1x_device_parse_dt(struct host1x_device *device,
if (of_match_node(driver->subdevs, np) &&
of_device_is_available(np)) {
err = host1x_subdev_add(device, np);
- if (err < 0)
+ if (err < 0) {
+ of_node_put(np);
return err;
+ }
}
}
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 5a8c8d55317a..a18db4d5347c 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -52,8 +52,8 @@ static void host1x_pushbuffer_destroy(struct push_buffer *pb)
struct host1x *host1x = cdma_to_host1x(cdma);
if (pb->phys != 0)
- dma_free_writecombine(host1x->dev, pb->size_bytes + 4,
- pb->mapped, pb->phys);
+ dma_free_wc(host1x->dev, pb->size_bytes + 4, pb->mapped,
+ pb->phys);
pb->mapped = NULL;
pb->phys = 0;
@@ -76,8 +76,8 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->pos = 0;
/* allocate and map pushbuffer memory */
- pb->mapped = dma_alloc_writecombine(host1x->dev, pb->size_bytes + 4,
- &pb->phys, GFP_KERNEL);
+ pb->mapped = dma_alloc_wc(host1x->dev, pb->size_bytes + 4, &pb->phys,
+ GFP_KERNEL);
if (!pb->mapped)
goto fail;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 63bd63f3c7df..b4515d544039 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -225,7 +225,7 @@ unpin:
return 0;
}
-static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
{
int i = 0;
u32 last_page = ~0;
@@ -467,9 +467,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
size += g->words * sizeof(u32);
}
- job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
- &job->gather_copy,
- GFP_KERNEL);
+ job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
+ GFP_KERNEL);
if (!job->gather_copy_mapped) {
job->gather_copy_mapped = NULL;
return -ENOMEM;
@@ -578,9 +577,8 @@ void host1x_job_unpin(struct host1x_job *job)
job->num_unpins = 0;
if (job->gather_copy_size)
- dma_free_writecombine(job->channel->dev, job->gather_copy_size,
- job->gather_copy_mapped,
- job->gather_copy);
+ dma_free_wc(job->channel->dev, job->gather_copy_size,
+ job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 883a314cd83a..6494a4d28171 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format)
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved);
void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
- u32 pixel_format, int stride,
- int u_offset, int v_offset)
+ unsigned int uv_stride,
+ unsigned int u_offset, unsigned int v_offset)
{
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YUV422P:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
- break;
- case V4L2_PIX_FMT_YVU420:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
- ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1);
- ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
- ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8);
- break;
- }
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
+ ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full);
void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch,
u32 pixel_format, int stride, int height)
{
- int u_offset, v_offset;
+ int fourcc, u_offset, v_offset;
int uv_stride = 0;
- switch (pixel_format) {
- case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
+ fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format);
+ switch (fourcc) {
+ case DRM_FORMAT_YUV420:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height / 2);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_YUV422P:
+ case DRM_FORMAT_YVU420:
+ uv_stride = stride / 2;
+ v_offset = stride * height;
+ u_offset = v_offset + (uv_stride * height / 2);
+ break;
+ case DRM_FORMAT_YUV422:
uv_stride = stride / 2;
u_offset = stride * height;
v_offset = u_offset + (uv_stride * height);
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, v_offset);
break;
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV16:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ uv_stride = stride;
u_offset = stride * height;
- ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride,
- u_offset, 0);
+ v_offset = 0;
break;
+ default:
+ return;
}
+ ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset);
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar);
@@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
- case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
v_offset = V_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
+ case V4L2_PIX_FMT_YVU420:
+ offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
+ u_offset = U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
+ v_offset, u_offset);
+ break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
u_offset = U2_OFFSET(pix, image->rect.left,
@@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
v_offset = V2_OFFSET(pix, image->rect.left,
image->rect.top) - offset;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
@@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
@@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
image->rect.top) - offset;
v_offset = 0;
- ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat,
- pix->bytesperline,
+ ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_UYVY:
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 042c3958e2a0..837b1ec22800 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -350,11 +350,13 @@ out:
}
EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
+void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
{
struct ipu_dmfc_priv *priv = dmfc->priv;
u32 dmfc_gen1;
+ mutex_lock(&priv->mutex);
+
dmfc_gen1 = readl(priv->base + DMFC_GENERAL1);
if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines)
@@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width)
writel(dmfc_gen1, priv->base + DMFC_GENERAL1);
- return 0;
+ mutex_unlock(&priv->mutex);
}
-EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel);
+EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot);
struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel)
{
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 513a16cc6e18..411722570035 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -196,6 +196,12 @@ config HID_PRODIKEYS
multimedia keyboard, but will lack support for the musical keyboard
and some additional multimedia keys.
+config HID_CMEDIA
+ tristate "CMedia CM6533 HID audio jack controls"
+ depends on HID
+ ---help---
+ Support for CMedia CM6533 HID audio jack controls.
+
config HID_CP2112
tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
depends on USB_HID && I2C && GPIOLIB
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 00011fee08b9..be56ab6f75a8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
+obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
obj-$(CONFIG_HID_CORSAIR) += hid-corsair.o
obj-$(CONFIG_HID_CP2112) += hid-cp2112.o
obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o
diff --git a/drivers/hid/hid-cmedia.c b/drivers/hid/hid-cmedia.c
new file mode 100644
index 000000000000..7230f8513681
--- /dev/null
+++ b/drivers/hid/hid-cmedia.c
@@ -0,0 +1,168 @@
+/*
+ * HID driver for CMedia CM6533 audio jack controls
+ *
+ * Copyright (C) 2015 Ben Chen <ben_chen@bizlinktech.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Ben Chen");
+MODULE_DESCRIPTION("CM6533 HID jack controls");
+MODULE_LICENSE("GPL");
+
+#define CM6533_JD_TYPE_COUNT 1
+#define CM6533_JD_RAWEV_LEN 16
+#define CM6533_JD_SFX_OFFSET 8
+
+/*
+*
+*CM6533 audio jack HID raw events:
+*
+*Plug in:
+*01000600 002083xx 080008c0 10000000
+*about 3 seconds later...
+*01000a00 002083xx 08000380 10000000
+*01000600 002083xx 08000380 10000000
+*
+*Plug out:
+*01000400 002083xx 080008c0 x0000000
+*/
+
+static const u8 ji_sfx[] = { 0x08, 0x00, 0x08, 0xc0 };
+static const u8 ji_in[] = { 0x01, 0x00, 0x06, 0x00 };
+static const u8 ji_out[] = { 0x01, 0x00, 0x04, 0x00 };
+
+static int jack_switch_types[CM6533_JD_TYPE_COUNT] = {
+ SW_HEADPHONE_INSERT,
+};
+
+struct cmhid {
+ struct input_dev *input_dev;
+ struct hid_device *hid;
+ unsigned short switch_map[CM6533_JD_TYPE_COUNT];
+};
+
+static void hp_ev(struct hid_device *hid, struct cmhid *cm, int value)
+{
+ input_report_switch(cm->input_dev, SW_HEADPHONE_INSERT, value);
+ input_sync(cm->input_dev);
+}
+
+static int cmhid_raw_event(struct hid_device *hid, struct hid_report *report,
+ u8 *data, int len)
+{
+ struct cmhid *cm = hid_get_drvdata(hid);
+
+ if (len != CM6533_JD_RAWEV_LEN)
+ goto out;
+ if (memcmp(data+CM6533_JD_SFX_OFFSET, ji_sfx, sizeof(ji_sfx)))
+ goto out;
+
+ if (!memcmp(data, ji_out, sizeof(ji_out))) {
+ hp_ev(hid, cm, 0);
+ goto out;
+ }
+ if (!memcmp(data, ji_in, sizeof(ji_in))) {
+ hp_ev(hid, cm, 1);
+ goto out;
+ }
+
+out:
+ return 0;
+}
+
+static int cmhid_input_configured(struct hid_device *hid,
+ struct hid_input *hidinput)
+{
+ struct input_dev *input_dev = hidinput->input;
+ struct cmhid *cm = hid_get_drvdata(hid);
+ int i;
+
+ cm->input_dev = input_dev;
+ memcpy(cm->switch_map, jack_switch_types, sizeof(cm->switch_map));
+ input_dev->evbit[0] = BIT(EV_SW);
+ for (i = 0; i < CM6533_JD_TYPE_COUNT; i++)
+ input_set_capability(cm->input_dev,
+ EV_SW, jack_switch_types[i]);
+ return 0;
+}
+
+static int cmhid_input_mapping(struct hid_device *hid,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ return -1;
+}
+
+static int cmhid_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+ int ret;
+ struct cmhid *cm;
+
+ cm = kzalloc(sizeof(struct cmhid), GFP_KERNEL);
+ if (!cm) {
+ ret = -ENOMEM;
+ goto allocfail;
+ }
+
+ cm->hid = hid;
+
+ hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
+ hid_set_drvdata(hid, cm);
+
+ ret = hid_parse(hid);
+ if (ret) {
+ hid_err(hid, "parse failed\n");
+ goto fail;
+ }
+
+ ret = hid_hw_start(hid, HID_CONNECT_DEFAULT | HID_CONNECT_HIDDEV_FORCE);
+ if (ret) {
+ hid_err(hid, "hw start failed\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(cm);
+allocfail:
+ return ret;
+}
+
+static void cmhid_remove(struct hid_device *hid)
+{
+ struct cmhid *cm = hid_get_drvdata(hid);
+
+ hid_hw_stop(hid);
+ kfree(cm);
+}
+
+static const struct hid_device_id cmhid_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, cmhid_devices);
+
+static struct hid_driver cmhid_driver = {
+ .name = "cm6533_jd",
+ .id_table = cmhid_devices,
+ .raw_event = cmhid_raw_event,
+ .input_configured = cmhid_input_configured,
+ .probe = cmhid_probe,
+ .remove = cmhid_remove,
+ .input_mapping = cmhid_input_mapping,
+};
+module_hid_driver(cmhid_driver);
+
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7e89288b1537..4f9c5c6deaed 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1075,7 +1075,7 @@ static u32 s32ton(__s32 value, unsigned n)
* Extract/implement a data field from/to a little endian report (bit array).
*
* Code sort-of follows HID spec:
- * http://www.usb.org/developers/devclass_docs/HID1_11.pdf
+ * http://www.usb.org/developers/hidpage/HID1_11.pdf
*
* While the USB HID spec allows unlimited length bit fields in "report
* descriptors", most devices never use more than 16 bits.
@@ -1083,20 +1083,37 @@ static u32 s32ton(__s32 value, unsigned n)
* Search linux-kernel and linux-usb-devel archives for "hid-core extract".
*/
-__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
- unsigned offset, unsigned n)
-{
- u64 x;
+static u32 __extract(u8 *report, unsigned offset, int n)
+{
+ unsigned int idx = offset / 8;
+ unsigned int bit_nr = 0;
+ unsigned int bit_shift = offset % 8;
+ int bits_to_copy = 8 - bit_shift;
+ u32 value = 0;
+ u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
+
+ while (n > 0) {
+ value |= ((u32)report[idx] >> bit_shift) << bit_nr;
+ n -= bits_to_copy;
+ bit_nr += bits_to_copy;
+ bits_to_copy = 8;
+ bit_shift = 0;
+ idx++;
+ }
+
+ return value & mask;
+}
- if (n > 32)
+u32 hid_field_extract(const struct hid_device *hid, u8 *report,
+ unsigned offset, unsigned n)
+{
+ if (n > 32) {
hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
+ n = 32;
+ }
- report += offset >> 3; /* adjust byte index */
- offset &= 7; /* now only need bit offset into one byte */
- x = get_unaligned_le64(report);
- x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */
- return (u32) x;
+ return __extract(report, offset, n);
}
EXPORT_SYMBOL_GPL(hid_field_extract);
@@ -1106,31 +1123,56 @@ EXPORT_SYMBOL_GPL(hid_field_extract);
* The data mangled in the bit stream remains in little endian
* order the whole time. It make more sense to talk about
* endianness of register values by considering a register
- * a "cached" copy of the little endiad bit stream.
+ * a "cached" copy of the little endian bit stream.
*/
-static void implement(const struct hid_device *hid, __u8 *report,
- unsigned offset, unsigned n, __u32 value)
+
+static void __implement(u8 *report, unsigned offset, int n, u32 value)
+{
+ unsigned int idx = offset / 8;
+ unsigned int size = offset + n;
+ unsigned int bit_shift = offset % 8;
+ int bits_to_set = 8 - bit_shift;
+ u8 bit_mask = 0xff << bit_shift;
+
+ while (n - bits_to_set >= 0) {
+ report[idx] &= ~bit_mask;
+ report[idx] |= value << bit_shift;
+ value >>= bits_to_set;
+ n -= bits_to_set;
+ bits_to_set = 8;
+ bit_mask = 0xff;
+ bit_shift = 0;
+ idx++;
+ }
+
+ /* last nibble */
+ if (n) {
+ if (size % 8)
+ bit_mask &= (1U << (size % 8)) - 1;
+ report[idx] &= ~bit_mask;
+ report[idx] |= (value << bit_shift) & bit_mask;
+ }
+}
+
+static void implement(const struct hid_device *hid, u8 *report,
+ unsigned offset, unsigned n, u32 value)
{
- u64 x;
- u64 m = (1ULL << n) - 1;
+ u64 m;
- if (n > 32)
+ if (n > 32) {
hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
__func__, n, current->comm);
+ n = 32;
+ }
+ m = (1ULL << n) - 1;
if (value > m)
hid_warn(hid, "%s() called with too large value %d! (%s)\n",
__func__, value, current->comm);
WARN_ON(value > m);
value &= m;
- report += offset >> 3;
- offset &= 7;
-
- x = get_unaligned_le64(report);
- x &= ~(m << offset);
- x |= ((u64)value) << offset;
- put_unaligned_le64(x, report);
+ __implement(report, offset, n, value);
}
/*
@@ -1251,6 +1293,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
/* Ignore report if ErrorRollOver */
if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
value[n] >= min && value[n] <= max &&
+ value[n] - min < field->maxusage &&
field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
goto exit;
}
@@ -1263,11 +1306,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
}
if (field->value[n] >= min && field->value[n] <= max
+ && field->value[n] - min < field->maxusage
&& field->usage[field->value[n] - min].hid
&& search(value, field->value[n], count))
hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
if (value[n] >= min && value[n] <= max
+ && value[n] - min < field->maxusage
&& field->usage[value[n] - min].hid
&& search(field->value, value[n], count))
hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
@@ -1891,6 +1936,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
@@ -1919,6 +1965,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
@@ -1932,6 +1979,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
@@ -2003,6 +2053,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
@@ -2051,6 +2102,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
{ }
};
@@ -2615,9 +2667,10 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers ||
- (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver))) {
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !hid_match_id(hdev, hid_have_special_driver)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 58551964ce86..717704e9ae07 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -595,6 +595,9 @@ static int corsair_input_mapping(struct hid_device *dev,
{
int gkey;
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD)
+ return 0;
+
gkey = corsair_usage_to_gkey(usage->hid & HID_USAGE);
if (gkey != 0) {
hid_map_usage_clear(input, usage, bit, max, EV_KEY,
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 1d78ba3b799e..8fd4bf77f264 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -151,7 +151,7 @@ static inline int drff_init(struct hid_device *hid)
* descriptor. In any case, it's a wonder it works on Windows.
*
* Usage Page (Desktop), ; Generic desktop controls (01h)
- * Usage (Joystik), ; Joystik (04h, application collection)
+ * Usage (Joystick), ; Joystick (04h, application collection)
* Collection (Application),
* Collection (Logical),
* Report Size (8),
@@ -207,7 +207,7 @@ static inline int drff_init(struct hid_device *hid)
/* Fixed report descriptor for PID 0x011 joystick */
static __u8 pid0011_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
- 0x09, 0x04, /* Usage (Joystik), */
+ 0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x14, /* Logical Minimum (0), */
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6ff6e78ac54..c6eaff5f8845 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -61,6 +61,9 @@
#define USB_VENDOR_ID_AIREN 0x1a2c
#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002
+#define USB_VENDOR_ID_AKAI 0x2011
+#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
@@ -246,6 +249,7 @@
#define USB_VENDOR_ID_CMEDIA 0x0d8c
#define USB_DEVICE_ID_CM109 0x000e
+#define USB_DEVICE_ID_CM6533 0x0022
#define USB_VENDOR_ID_CODEMERCS 0x07c0
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
@@ -672,6 +676,7 @@
#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
#define USB_DEVICE_ID_MS_OFFICE_KB 0x0048
#define USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0 0x009d
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K 0x00b4
#define USB_DEVICE_ID_MS_NE4K 0x00db
#define USB_DEVICE_ID_MS_NE4K_JP 0x00dc
#define USB_DEVICE_ID_MS_LK6K 0x00f9
@@ -679,7 +684,10 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_NE7K 0x071d
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1 0x0732
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_600 0x0750
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
+#define USB_DEVICE_ID_MS_COMFORT_KEYBOARD 0x00e3
#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
@@ -800,6 +808,7 @@
#define USB_VENDOR_ID_QUANTA 0x0408
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_VENDOR_ID_RAZER 0x1532
@@ -839,6 +848,7 @@
#define USB_VENDOR_ID_SEMICO 0x1a2c
#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023
+#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2 0x0027
#define USB_VENDOR_ID_SENNHEISER 0x1395
#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
@@ -872,6 +882,9 @@
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
#define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER 0x1000
+#define USB_VENDOR_ID_SINO_LITE 0x1345
+#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
+
#define USB_VENDOR_ID_SOUNDGRAPH 0x15c2
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
@@ -1047,7 +1060,7 @@
#define USB_DEVICE_ID_RI_KA_WEBMAIL 0x1320 /* Webmail Notifier */
#define USB_VENDOR_ID_MULTIPLE_1781 0x1781
-#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a8d
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD 0x0a9d
#define USB_VENDOR_ID_DRACAL_RAPHNET 0x289b
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 0125e356bd8d..1ac4ff4d57a6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -184,21 +184,31 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
unsigned char byte2, unsigned char byte3)
{
int ret;
- unsigned char buf[] = {0x18, byte2, byte3};
+ unsigned char *buf;
+
+ buf = kzalloc(3, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = 0x18;
+ buf[1] = byte2;
+ buf[2] = byte3;
switch (hdev->product) {
case USB_DEVICE_ID_LENOVO_CUSBKBD:
- ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, 0x13, buf, 3,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
break;
case USB_DEVICE_ID_LENOVO_CBTKBD:
- ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ ret = hid_hw_output_report(hdev, buf, 3);
break;
default:
ret = -EINVAL;
break;
}
+ kfree(buf);
+
return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
}
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c690fae02cf8..feb2be71f77c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -61,7 +61,7 @@
*/
static __u8 df_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -127,7 +127,7 @@ static __u8 df_rdesc_fixed[] = {
static __u8 dfp_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -175,7 +175,7 @@ static __u8 dfp_rdesc_fixed[] = {
static __u8 fv_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -242,7 +242,7 @@ static __u8 fv_rdesc_fixed[] = {
static __u8 momo_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
@@ -288,7 +288,7 @@ static __u8 momo_rdesc_fixed[] = {
static __u8 momo2_rdesc_fixed[] = {
0x05, 0x01, /* Usage Page (Desktop), */
-0x09, 0x04, /* Usage (Joystik), */
+0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x95, 0x01, /* Report Count (1), */
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index bd2ab476c65e..2e2515a4c070 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -15,13 +15,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/usb.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/kfifo.h>
#include <linux/input/mt.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/fixp-arith.h>
#include <asm/unaligned.h>
+#include "usbhid/usbhid.h"
#include "hid-ids.h"
MODULE_LICENSE("GPL");
@@ -773,6 +779,589 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
}
}
+/* -------------------------------------------------------------------------- */
+/* 0x8123: Force feedback support */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_FF_GET_INFO 0x01
+#define HIDPP_FF_RESET_ALL 0x11
+#define HIDPP_FF_DOWNLOAD_EFFECT 0x21
+#define HIDPP_FF_SET_EFFECT_STATE 0x31
+#define HIDPP_FF_DESTROY_EFFECT 0x41
+#define HIDPP_FF_GET_APERTURE 0x51
+#define HIDPP_FF_SET_APERTURE 0x61
+#define HIDPP_FF_GET_GLOBAL_GAINS 0x71
+#define HIDPP_FF_SET_GLOBAL_GAINS 0x81
+
+#define HIDPP_FF_EFFECT_STATE_GET 0x00
+#define HIDPP_FF_EFFECT_STATE_STOP 0x01
+#define HIDPP_FF_EFFECT_STATE_PLAY 0x02
+#define HIDPP_FF_EFFECT_STATE_PAUSE 0x03
+
+#define HIDPP_FF_EFFECT_CONSTANT 0x00
+#define HIDPP_FF_EFFECT_PERIODIC_SINE 0x01
+#define HIDPP_FF_EFFECT_PERIODIC_SQUARE 0x02
+#define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE 0x03
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP 0x04
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN 0x05
+#define HIDPP_FF_EFFECT_SPRING 0x06
+#define HIDPP_FF_EFFECT_DAMPER 0x07
+#define HIDPP_FF_EFFECT_FRICTION 0x08
+#define HIDPP_FF_EFFECT_INERTIA 0x09
+#define HIDPP_FF_EFFECT_RAMP 0x0A
+
+#define HIDPP_FF_EFFECT_AUTOSTART 0x80
+
+#define HIDPP_FF_EFFECTID_NONE -1
+#define HIDPP_FF_EFFECTID_AUTOCENTER -2
+
+#define HIDPP_FF_MAX_PARAMS 20
+#define HIDPP_FF_RESERVED_SLOTS 1
+
+struct hidpp_ff_private_data {
+ struct hidpp_device *hidpp;
+ u8 feature_index;
+ u8 version;
+ u16 gain;
+ s16 range;
+ u8 slot_autocenter;
+ u8 num_effects;
+ int *effect_ids;
+ struct workqueue_struct *wq;
+ atomic_t workqueue_size;
+};
+
+struct hidpp_ff_work_data {
+ struct work_struct work;
+ struct hidpp_ff_private_data *data;
+ int effect_id;
+ u8 command;
+ u8 params[HIDPP_FF_MAX_PARAMS];
+ u8 size;
+};
+
+static const signed short hiddpp_ff_effects[] = {
+ FF_CONSTANT,
+ FF_PERIODIC,
+ FF_SINE,
+ FF_SQUARE,
+ FF_SAW_UP,
+ FF_SAW_DOWN,
+ FF_TRIANGLE,
+ FF_SPRING,
+ FF_DAMPER,
+ FF_AUTOCENTER,
+ FF_GAIN,
+ -1
+};
+
+static const signed short hiddpp_ff_effects_v2[] = {
+ FF_RAMP,
+ FF_FRICTION,
+ FF_INERTIA,
+ -1
+};
+
+static const u8 HIDPP_FF_CONDITION_CMDS[] = {
+ HIDPP_FF_EFFECT_SPRING,
+ HIDPP_FF_EFFECT_FRICTION,
+ HIDPP_FF_EFFECT_DAMPER,
+ HIDPP_FF_EFFECT_INERTIA
+};
+
+static const char *HIDPP_FF_CONDITION_NAMES[] = {
+ "spring",
+ "friction",
+ "damper",
+ "inertia"
+};
+
+
+static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id)
+{
+ int i;
+
+ for (i = 0; i < data->num_effects; i++)
+ if (data->effect_ids[i] == effect_id)
+ return i+1;
+
+ return 0;
+}
+
+static void hidpp_ff_work_handler(struct work_struct *w)
+{
+ struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
+ struct hidpp_ff_private_data *data = wd->data;
+ struct hidpp_report response;
+ u8 slot;
+ int ret;
+
+ /* add slot number if needed */
+ switch (wd->effect_id) {
+ case HIDPP_FF_EFFECTID_AUTOCENTER:
+ wd->params[0] = data->slot_autocenter;
+ break;
+ case HIDPP_FF_EFFECTID_NONE:
+ /* leave slot as zero */
+ break;
+ default:
+ /* find current slot for effect */
+ wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id);
+ break;
+ }
+
+ /* send command and wait for reply */
+ ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index,
+ wd->command, wd->params, wd->size, &response);
+
+ if (ret) {
+ hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n");
+ goto out;
+ }
+
+ /* parse return data */
+ switch (wd->command) {
+ case HIDPP_FF_DOWNLOAD_EFFECT:
+ slot = response.fap.params[0];
+ if (slot > 0 && slot <= data->num_effects) {
+ if (wd->effect_id >= 0)
+ /* regular effect uploaded */
+ data->effect_ids[slot-1] = wd->effect_id;
+ else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+ /* autocenter spring uploaded */
+ data->slot_autocenter = slot;
+ }
+ break;
+ case HIDPP_FF_DESTROY_EFFECT:
+ if (wd->effect_id >= 0)
+ /* regular effect destroyed */
+ data->effect_ids[wd->params[0]-1] = -1;
+ else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+ /* autocenter spring destoyed */
+ data->slot_autocenter = 0;
+ break;
+ case HIDPP_FF_SET_GLOBAL_GAINS:
+ data->gain = (wd->params[0] << 8) + wd->params[1];
+ break;
+ case HIDPP_FF_SET_APERTURE:
+ data->range = (wd->params[0] << 8) + wd->params[1];
+ break;
+ default:
+ /* no action needed */
+ break;
+ }
+
+out:
+ atomic_dec(&data->workqueue_size);
+ kfree(wd);
+}
+
+static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size)
+{
+ struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+ int s;
+
+ if (!wd)
+ return -ENOMEM;
+
+ INIT_WORK(&wd->work, hidpp_ff_work_handler);
+
+ wd->data = data;
+ wd->effect_id = effect_id;
+ wd->command = command;
+ wd->size = size;
+ memcpy(wd->params, params, size);
+
+ atomic_inc(&data->workqueue_size);
+ queue_work(data->wq, &wd->work);
+
+ /* warn about excessive queue size */
+ s = atomic_read(&data->workqueue_size);
+ if (s >= 20 && s % 20 == 0)
+ hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s);
+
+ return 0;
+}
+
+static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[20];
+ u8 size;
+ int force;
+
+ /* set common parameters */
+ params[2] = effect->replay.length >> 8;
+ params[3] = effect->replay.length & 255;
+ params[4] = effect->replay.delay >> 8;
+ params[5] = effect->replay.delay & 255;
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+ force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[1] = HIDPP_FF_EFFECT_CONSTANT;
+ params[6] = force >> 8;
+ params[7] = force & 255;
+ params[8] = effect->u.constant.envelope.attack_level >> 7;
+ params[9] = effect->u.constant.envelope.attack_length >> 8;
+ params[10] = effect->u.constant.envelope.attack_length & 255;
+ params[11] = effect->u.constant.envelope.fade_level >> 7;
+ params[12] = effect->u.constant.envelope.fade_length >> 8;
+ params[13] = effect->u.constant.envelope.fade_length & 255;
+ size = 14;
+ dbg_hid("Uploading constant force level=%d in dir %d = %d\n",
+ effect->u.constant.level,
+ effect->direction, force);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.constant.envelope.attack_level,
+ effect->u.constant.envelope.attack_length,
+ effect->u.constant.envelope.fade_level,
+ effect->u.constant.envelope.fade_length);
+ break;
+ case FF_PERIODIC:
+ {
+ switch (effect->u.periodic.waveform) {
+ case FF_SINE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE;
+ break;
+ case FF_SQUARE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE;
+ break;
+ case FF_SAW_UP:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP;
+ break;
+ case FF_SAW_DOWN:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN;
+ break;
+ case FF_TRIANGLE:
+ params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE;
+ break;
+ default:
+ hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform);
+ return -EINVAL;
+ }
+ force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[6] = effect->u.periodic.magnitude >> 8;
+ params[7] = effect->u.periodic.magnitude & 255;
+ params[8] = effect->u.periodic.offset >> 8;
+ params[9] = effect->u.periodic.offset & 255;
+ params[10] = effect->u.periodic.period >> 8;
+ params[11] = effect->u.periodic.period & 255;
+ params[12] = effect->u.periodic.phase >> 8;
+ params[13] = effect->u.periodic.phase & 255;
+ params[14] = effect->u.periodic.envelope.attack_level >> 7;
+ params[15] = effect->u.periodic.envelope.attack_length >> 8;
+ params[16] = effect->u.periodic.envelope.attack_length & 255;
+ params[17] = effect->u.periodic.envelope.fade_level >> 7;
+ params[18] = effect->u.periodic.envelope.fade_length >> 8;
+ params[19] = effect->u.periodic.envelope.fade_length & 255;
+ size = 20;
+ dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n",
+ effect->u.periodic.magnitude, effect->direction,
+ effect->u.periodic.offset,
+ effect->u.periodic.period,
+ effect->u.periodic.phase);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.periodic.envelope.attack_level,
+ effect->u.periodic.envelope.attack_length,
+ effect->u.periodic.envelope.fade_level,
+ effect->u.periodic.envelope.fade_length);
+ break;
+ }
+ case FF_RAMP:
+ params[1] = HIDPP_FF_EFFECT_RAMP;
+ force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[6] = force >> 8;
+ params[7] = force & 255;
+ force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+ params[8] = force >> 8;
+ params[9] = force & 255;
+ params[10] = effect->u.ramp.envelope.attack_level >> 7;
+ params[11] = effect->u.ramp.envelope.attack_length >> 8;
+ params[12] = effect->u.ramp.envelope.attack_length & 255;
+ params[13] = effect->u.ramp.envelope.fade_level >> 7;
+ params[14] = effect->u.ramp.envelope.fade_length >> 8;
+ params[15] = effect->u.ramp.envelope.fade_length & 255;
+ size = 16;
+ dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n",
+ effect->u.ramp.start_level,
+ effect->u.ramp.end_level,
+ effect->direction, force);
+ dbg_hid(" envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+ effect->u.ramp.envelope.attack_level,
+ effect->u.ramp.envelope.attack_length,
+ effect->u.ramp.envelope.fade_level,
+ effect->u.ramp.envelope.fade_length);
+ break;
+ case FF_FRICTION:
+ case FF_INERTIA:
+ case FF_SPRING:
+ case FF_DAMPER:
+ params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING];
+ params[6] = effect->u.condition[0].left_saturation >> 9;
+ params[7] = (effect->u.condition[0].left_saturation >> 1) & 255;
+ params[8] = effect->u.condition[0].left_coeff >> 8;
+ params[9] = effect->u.condition[0].left_coeff & 255;
+ params[10] = effect->u.condition[0].deadband >> 9;
+ params[11] = (effect->u.condition[0].deadband >> 1) & 255;
+ params[12] = effect->u.condition[0].center >> 8;
+ params[13] = effect->u.condition[0].center & 255;
+ params[14] = effect->u.condition[0].right_coeff >> 8;
+ params[15] = effect->u.condition[0].right_coeff & 255;
+ params[16] = effect->u.condition[0].right_saturation >> 9;
+ params[17] = (effect->u.condition[0].right_saturation >> 1) & 255;
+ size = 18;
+ dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n",
+ HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING],
+ effect->u.condition[0].left_coeff,
+ effect->u.condition[0].left_saturation,
+ effect->u.condition[0].right_coeff,
+ effect->u.condition[0].right_saturation);
+ dbg_hid(" deadband=%d, center=%d\n",
+ effect->u.condition[0].deadband,
+ effect->u.condition[0].center);
+ break;
+ default:
+ hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type);
+ return -EINVAL;
+ }
+
+ return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size);
+}
+
+static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[2];
+
+ params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP;
+
+ dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id);
+
+ return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params));
+}
+
+static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 slot = 0;
+
+ dbg_hid("Erasing effect %d.\n", effect_id);
+
+ return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1);
+}
+
+static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[18];
+
+ dbg_hid("Setting autocenter to %d.\n", magnitude);
+
+ /* start a standard spring effect */
+ params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART;
+ /* zero delay and duration */
+ params[2] = params[3] = params[4] = params[5] = 0;
+ /* set coeff to 25% of saturation */
+ params[8] = params[14] = magnitude >> 11;
+ params[9] = params[15] = (magnitude >> 3) & 255;
+ params[6] = params[16] = magnitude >> 9;
+ params[7] = params[17] = (magnitude >> 1) & 255;
+ /* zero deadband and center */
+ params[10] = params[11] = params[12] = params[13] = 0;
+
+ hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params));
+}
+
+static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain)
+{
+ struct hidpp_ff_private_data *data = dev->ff->private;
+ u8 params[4];
+
+ dbg_hid("Setting gain to %d.\n", gain);
+
+ params[0] = gain >> 8;
+ params[1] = gain & 255;
+ params[2] = 0; /* no boost */
+ params[3] = 0;
+
+ hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params));
+}
+
+static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *idev = hidinput->input;
+ struct hidpp_ff_private_data *data = idev->ff->private;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
+}
+
+static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct hid_device *hid = to_hid_device(dev);
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *idev = hidinput->input;
+ struct hidpp_ff_private_data *data = idev->ff->private;
+ u8 params[2];
+ int range = simple_strtoul(buf, NULL, 10);
+
+ range = clamp(range, 180, 900);
+
+ params[0] = range >> 8;
+ params[1] = range & 0x00FF;
+
+ hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params));
+
+ return count;
+}
+
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store);
+
+static void hidpp_ff_destroy(struct ff_device *ff)
+{
+ struct hidpp_ff_private_data *data = ff->private;
+
+ kfree(data->effect_ids);
+}
+
+static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+{
+ struct hid_device *hid = hidpp->hid_dev;
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *dev = hidinput->input;
+ const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+ const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+ struct ff_device *ff;
+ struct hidpp_report response;
+ struct hidpp_ff_private_data *data;
+ int error, j, num_slots;
+ u8 version;
+
+ if (!dev) {
+ hid_err(hid, "Struct input_dev not set!\n");
+ return -EINVAL;
+ }
+
+ /* Get firmware release */
+ version = bcdDevice & 255;
+
+ /* Set supported force feedback capabilities */
+ for (j = 0; hiddpp_ff_effects[j] >= 0; j++)
+ set_bit(hiddpp_ff_effects[j], dev->ffbit);
+ if (version > 1)
+ for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++)
+ set_bit(hiddpp_ff_effects_v2[j], dev->ffbit);
+
+ /* Read number of slots available in device */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_INFO, NULL, 0, &response);
+ if (error) {
+ if (error < 0)
+ return error;
+ hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+ __func__, error);
+ return -EPROTO;
+ }
+
+ num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+ error = input_ff_create(dev, num_slots);
+
+ if (error) {
+ hid_err(dev, "Failed to create FF device!\n");
+ return error;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
+ if (!data->effect_ids) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ data->hidpp = hidpp;
+ data->feature_index = feature_index;
+ data->version = version;
+ data->slot_autocenter = 0;
+ data->num_effects = num_slots;
+ for (j = 0; j < num_slots; j++)
+ data->effect_ids[j] = -1;
+
+ ff = dev->ff;
+ ff->private = data;
+
+ ff->upload = hidpp_ff_upload_effect;
+ ff->erase = hidpp_ff_erase_effect;
+ ff->playback = hidpp_ff_playback;
+ ff->set_gain = hidpp_ff_set_gain;
+ ff->set_autocenter = hidpp_ff_set_autocenter;
+ ff->destroy = hidpp_ff_destroy;
+
+
+ /* reset all forces */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_RESET_ALL, NULL, 0, &response);
+
+ /* Read current Range */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_APERTURE, NULL, 0, &response);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
+ data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
+
+ /* Create sysfs interface */
+ error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
+
+ /* Read the current gain values */
+ error = hidpp_send_fap_command_sync(hidpp, feature_index,
+ HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
+ if (error)
+ hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
+ data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
+ /* ignore boost value at response.fap.params[2] */
+
+ /* init the hardware command queue */
+ data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+ atomic_set(&data->workqueue_size, 0);
+
+ /* initialize with zero autocenter to get wheel in usable state */
+ hidpp_ff_set_autocenter(dev, 0);
+
+ hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+
+ return 0;
+}
+
+static int hidpp_ff_deinit(struct hid_device *hid)
+{
+ struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ struct input_dev *dev = hidinput->input;
+ struct hidpp_ff_private_data *data;
+
+ if (!dev) {
+ hid_err(hid, "Struct input_dev not found!\n");
+ return -EINVAL;
+ }
+
+ hid_info(hid, "Unloading HID++ force feedback.\n");
+ data = dev->ff->private;
+ if (!data) {
+ hid_err(hid, "Private data not found!\n");
+ return -EINVAL;
+ }
+
+ destroy_workqueue(data->wq);
+ device_remove_file(&hid->dev, &dev_attr_range);
+
+ return 0;
+}
+
+
/* ************************************************************************** */
/* */
/* Device Support */
@@ -1301,121 +1890,22 @@ static int k400_connect(struct hid_device *hdev, bool connected)
#define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123
-/* Using session ID = 1 */
-#define CMD_G920_FORCE_GET_APERTURE 0x51
-#define CMD_G920_FORCE_SET_APERTURE 0x61
-
-struct g920_private_data {
- u8 force_feature;
- u16 range;
-};
-
-static ssize_t g920_range_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hid = to_hid_device(dev);
- struct hidpp_device *hidpp = hid_get_drvdata(hid);
- struct g920_private_data *pdata;
-
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hid, "Private driver data not found!\n");
- return -EINVAL;
- }
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", pdata->range);
-}
-
-static ssize_t g920_range_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hid_device *hid = to_hid_device(dev);
- struct hidpp_device *hidpp = hid_get_drvdata(hid);
- struct g920_private_data *pdata;
- struct hidpp_report response;
- u8 params[2];
- int ret;
- u16 range = simple_strtoul(buf, NULL, 10);
-
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hid, "Private driver data not found!\n");
- return -EINVAL;
- }
-
- if (range < 180)
- range = 180;
- else if (range > 900)
- range = 900;
-
- params[0] = range >> 8;
- params[1] = range & 0x00FF;
-
- ret = hidpp_send_fap_command_sync(hidpp, pdata->force_feature,
- CMD_G920_FORCE_SET_APERTURE, params, 2, &response);
- if (ret)
- return ret;
-
- pdata->range = range;
- return count;
-}
-
-static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, g920_range_show, g920_range_store);
-
-static int g920_allocate(struct hid_device *hdev)
-{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct g920_private_data *pdata;
-
- pdata = devm_kzalloc(&hdev->dev, sizeof(struct g920_private_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- hidpp->private_data = pdata;
-
- return 0;
-}
-
static int g920_get_config(struct hidpp_device *hidpp)
{
- struct g920_private_data *pdata = hidpp->private_data;
- struct hidpp_report response;
u8 feature_type;
u8 feature_index;
int ret;
- pdata = hidpp->private_data;
- if (!pdata) {
- hid_err(hidpp->hid_dev, "Private driver data not found!\n");
- return -EINVAL;
- }
-
/* Find feature and store for later use */
ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
&feature_index, &feature_type);
if (ret)
return ret;
- pdata->force_feature = feature_index;
-
- /* Read current Range */
- ret = hidpp_send_fap_command_sync(hidpp, feature_index,
- CMD_G920_FORCE_GET_APERTURE, NULL, 0, &response);
- if (ret > 0) {
- hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
- __func__, ret);
- return -EPROTO;
- }
- if (ret)
- return ret;
-
- pdata->range = get_unaligned_be16(&response.fap.params[0]);
-
- /* Create sysfs interface */
- ret = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+ ret = hidpp_ff_init(hidpp, feature_index);
if (ret)
- hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d\n", ret);
+ hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
+ ret);
return 0;
}
@@ -1739,10 +2229,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = k400_allocate(hdev);
if (ret)
goto allocate_fail;
- } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- ret = g920_allocate(hdev);
- if (ret)
- goto allocate_fail;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1825,7 +2311,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_hw_open_failed:
hid_device_io_stop(hdev);
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- device_remove_file(&hdev->dev, &dev_attr_range);
hid_hw_close(hdev);
hid_hw_stop(hdev);
}
@@ -1843,7 +2328,7 @@ static void hidpp_remove(struct hid_device *hdev)
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- device_remove_file(&hdev->dev, &dev_attr_range);
+ hidpp_ff_deinit(hdev);
hid_hw_close(hdev);
}
hid_hw_stop(hdev);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 77a2cf3e4afe..e924d555536c 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -272,6 +272,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
.driver_data = MS_ERGONOMY | MS_RDESC_3K },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600),
+ .driver_data = MS_ERGONOMY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
@@ -286,6 +292,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_HIDINPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
.driver_data = MS_HIDINPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD),
+ .driver_data = MS_ERGONOMY},
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 296d4991560e..c741f5e50a66 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -396,6 +396,11 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->is_buttonpad = true;
break;
+ case 0xff0000c5:
+ /* Retrieve the Win8 blob once to enable some devices */
+ if (usage->usage_index == 0)
+ mt_get_feature(hdev, field->report);
+ break;
}
}
@@ -1133,6 +1138,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+ if (ret)
+ dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
+ hdev->name);
mt_set_maxcontacts(hdev);
mt_set_input_mode(hdev);
@@ -1145,8 +1153,31 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
#ifdef CONFIG_PM
+static void mt_release_contacts(struct hid_device *hid)
+{
+ struct hid_input *hidinput;
+
+ list_for_each_entry(hidinput, &hid->inputs, list) {
+ struct input_dev *input_dev = hidinput->input;
+ struct input_mt *mt = input_dev->mt;
+ int i;
+
+ if (mt) {
+ for (i = 0; i < mt->num_slots; i++) {
+ input_mt_slot(input_dev, i);
+ input_mt_report_slot_state(input_dev,
+ MT_TOOL_FINGER,
+ false);
+ }
+ input_mt_sync_frame(input_dev);
+ input_sync(input_dev);
+ }
+ }
+}
+
static int mt_reset_resume(struct hid_device *hdev)
{
+ mt_release_contacts(hdev);
mt_set_maxcontacts(hdev);
mt_set_input_mode(hdev);
return 0;
diff --git a/drivers/hid/hid-penmount.c b/drivers/hid/hid-penmount.c
index c11dce85cd18..d90383f788e2 100644
--- a/drivers/hid/hid-penmount.c
+++ b/drivers/hid/hid-penmount.c
@@ -23,8 +23,12 @@ static int penmount_input_mapping(struct hid_device *hdev,
struct hid_usage *usage, unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- return 1;
+ if (((usage->hid - 1) & HID_USAGE) == 0) {
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ } else {
+ return -1;
+ }
}
return 0;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 67cd059a8f46..9cd2ca34a6be 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -594,6 +594,9 @@ static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
int ret;
u8 buf[RMI_F11_CTRL_REG_COUNT];
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
RMI_F11_CTRL_REG_COUNT);
if (ret)
@@ -613,6 +616,9 @@ static int rmi_post_reset(struct hid_device *hdev)
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
ret = rmi_reset_attn_mode(hdev);
if (ret) {
hid_err(hdev, "can not set rmi mode\n");
@@ -640,6 +646,11 @@ static int rmi_post_reset(struct hid_device *hdev)
static int rmi_post_resume(struct hid_device *hdev)
{
+ struct rmi_data *data = hid_get_drvdata(hdev);
+
+ if (!(data->device_flags & RMI_DEVICE))
+ return 0;
+
return rmi_reset_attn_mode(hdev);
}
#endif /* CONFIG_PM */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 9b8db0e0ef1c..310436a54a3f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -50,6 +50,7 @@
#define MOTION_CONTROLLER_BT BIT(8)
#define NAVIGATION_CONTROLLER_USB BIT(9)
#define NAVIGATION_CONTROLLER_BT BIT(10)
+#define SINO_LITE_CONTROLLER BIT(11)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -74,7 +75,7 @@
* axis values. Additionally, the controller only has 20 actual, physical axes
* so there are several unused axes in between the used ones.
*/
-static __u8 sixaxis_rdesc[] = {
+static u8 sixaxis_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -152,7 +153,7 @@ static __u8 sixaxis_rdesc[] = {
};
/* PS/3 Motion controller */
-static __u8 motion_rdesc[] = {
+static u8 motion_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
@@ -249,9 +250,9 @@ static __u8 motion_rdesc[] = {
};
/* PS/3 Navigation controller */
-static __u8 navigation_rdesc[] = {
+static u8 navigation_rdesc[] = {
0x05, 0x01, /* Usage Page (Desktop), */
- 0x09, 0x04, /* Usage (Joystik), */
+ 0x09, 0x04, /* Usage (Joystick), */
0xA1, 0x01, /* Collection (Application), */
0xA1, 0x02, /* Collection (Logical), */
0x85, 0x01, /* Report ID (1), */
@@ -809,7 +810,7 @@ static u8 dualshock4_bt_rdesc[] = {
0xC0 /* End Collection */
};
-static __u8 ps3remote_rdesc[] = {
+static u8 ps3remote_rdesc[] = {
0x05, 0x01, /* GUsagePage Generic Desktop */
0x09, 0x05, /* LUsage 0x05 [Game Pad] */
0xA1, 0x01, /* MCollection Application (mouse, keyboard) */
@@ -817,14 +818,18 @@ static __u8 ps3remote_rdesc[] = {
/* Use collection 1 for joypad buttons */
0xA1, 0x02, /* MCollection Logical (interrelated data) */
- /* Ignore the 1st byte, maybe it is used for a controller
- * number but it's not needed for correct operation */
+ /*
+ * Ignore the 1st byte, maybe it is used for a controller
+ * number but it's not needed for correct operation
+ */
0x75, 0x08, /* GReportSize 0x08 [8] */
0x95, 0x01, /* GReportCount 0x01 [1] */
0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
- /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
- * buttons multiple keypresses are allowed */
+ /*
+ * Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
+ * buttons multiple keypresses are allowed
+ */
0x05, 0x09, /* GUsagePage Button */
0x19, 0x01, /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
0x29, 0x18, /* LUsageMaximum 0x18 [Button 24] */
@@ -849,8 +854,10 @@ static __u8 ps3remote_rdesc[] = {
0x95, 0x01, /* GReportCount 0x01 [1] */
0x80, /* MInput */
- /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
- * 0xff and 11th is for press indication */
+ /*
+ * Ignore bytes from 6th to 11th, 6th to 10th are always constant at
+ * 0xff and 11th is for press indication
+ */
0x75, 0x08, /* GReportSize 0x08 [8] */
0x95, 0x06, /* GReportCount 0x06 [6] */
0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
@@ -929,7 +936,7 @@ static const unsigned int buzz_keymap[] = {
/*
* The controller has 4 remote buzzers, each with one LED and 5
* buttons.
- *
+ *
* We use the mapping chosen by the controller, which is:
*
* Key Offset
@@ -943,15 +950,15 @@ static const unsigned int buzz_keymap[] = {
* So, for example, the orange button on the third buzzer is mapped to
* BTN_TRIGGER_HAPPY14
*/
- [ 1] = BTN_TRIGGER_HAPPY1,
- [ 2] = BTN_TRIGGER_HAPPY2,
- [ 3] = BTN_TRIGGER_HAPPY3,
- [ 4] = BTN_TRIGGER_HAPPY4,
- [ 5] = BTN_TRIGGER_HAPPY5,
- [ 6] = BTN_TRIGGER_HAPPY6,
- [ 7] = BTN_TRIGGER_HAPPY7,
- [ 8] = BTN_TRIGGER_HAPPY8,
- [ 9] = BTN_TRIGGER_HAPPY9,
+ [1] = BTN_TRIGGER_HAPPY1,
+ [2] = BTN_TRIGGER_HAPPY2,
+ [3] = BTN_TRIGGER_HAPPY3,
+ [4] = BTN_TRIGGER_HAPPY4,
+ [5] = BTN_TRIGGER_HAPPY5,
+ [6] = BTN_TRIGGER_HAPPY6,
+ [7] = BTN_TRIGGER_HAPPY7,
+ [8] = BTN_TRIGGER_HAPPY8,
+ [9] = BTN_TRIGGER_HAPPY9,
[10] = BTN_TRIGGER_HAPPY10,
[11] = BTN_TRIGGER_HAPPY11,
[12] = BTN_TRIGGER_HAPPY12,
@@ -973,33 +980,33 @@ static enum power_supply_property sony_battery_props[] = {
};
struct sixaxis_led {
- __u8 time_enabled; /* the total time the led is active (0xff means forever) */
- __u8 duty_length; /* how long a cycle is in deciseconds (0 means "really fast") */
- __u8 enabled;
- __u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
- __u8 duty_on; /* % of duty_length the led is on (0xff mean 100%) */
+ u8 time_enabled; /* the total time the led is active (0xff means forever) */
+ u8 duty_length; /* how long a cycle is in deciseconds (0 means "really fast") */
+ u8 enabled;
+ u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
+ u8 duty_on; /* % of duty_length the led is on (0xff mean 100%) */
} __packed;
struct sixaxis_rumble {
- __u8 padding;
- __u8 right_duration; /* Right motor duration (0xff means forever) */
- __u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
- __u8 left_duration; /* Left motor duration (0xff means forever) */
- __u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
+ u8 padding;
+ u8 right_duration; /* Right motor duration (0xff means forever) */
+ u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
+ u8 left_duration; /* Left motor duration (0xff means forever) */
+ u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
} __packed;
struct sixaxis_output_report {
- __u8 report_id;
+ u8 report_id;
struct sixaxis_rumble rumble;
- __u8 padding[4];
- __u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
+ u8 padding[4];
+ u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
struct sixaxis_led led[4]; /* LEDx at (4 - x) */
struct sixaxis_led _reserved; /* LED5, not actually soldered */
} __packed;
union sixaxis_output_report_01 {
struct sixaxis_output_report data;
- __u8 buf[36];
+ u8 buf[36];
};
struct motion_output_report_02 {
@@ -1028,30 +1035,30 @@ struct sony_sc {
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
struct work_struct state_worker;
- void(*send_output_report)(struct sony_sc*);
+ void (*send_output_report)(struct sony_sc *);
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
- __u8 *output_report_dmabuf;
+ u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
- __u8 left;
- __u8 right;
+ u8 left;
+ u8 right;
#endif
- __u8 mac_address[6];
- __u8 worker_initialized;
- __u8 cable_state;
- __u8 battery_charging;
- __u8 battery_capacity;
- __u8 led_state[MAX_LEDS];
- __u8 resume_led_state[MAX_LEDS];
- __u8 led_delay_on[MAX_LEDS];
- __u8 led_delay_off[MAX_LEDS];
- __u8 led_count;
+ u8 mac_address[6];
+ u8 worker_initialized;
+ u8 cable_state;
+ u8 battery_charging;
+ u8 battery_capacity;
+ u8 led_state[MAX_LEDS];
+ u8 resume_led_state[MAX_LEDS];
+ u8 led_delay_on[MAX_LEDS];
+ u8 led_delay_off[MAX_LEDS];
+ u8 led_count;
};
-static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sixaxis_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
*rsize = sizeof(sixaxis_rdesc);
@@ -1072,7 +1079,7 @@ static u8 *navigation_fixup(struct hid_device *hdev, u8 *rdesc,
return navigation_rdesc;
}
-static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
*rsize = sizeof(ps3remote_rdesc);
@@ -1113,11 +1120,14 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
-static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
+ if (sc->quirks & SINO_LITE_CONTROLLER)
+ return rdesc;
+
/*
* Some Sony RF receivers wrongly declare the mouse pointer as a
* a constant non-data variable.
@@ -1164,12 +1174,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
-static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
- static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+ static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
unsigned long flags;
int offset;
- __u8 cable_state, battery_capacity, battery_charging;
+ u8 cable_state, battery_capacity, battery_charging;
/*
* The sixaxis is charging if the battery value is 0xee
@@ -1184,7 +1194,7 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
battery_charging = !(rd[offset] & 0x01);
cable_state = 1;
} else {
- __u8 index = rd[offset] <= 5 ? rd[offset] : 5;
+ u8 index = rd[offset] <= 5 ? rd[offset] : 5;
battery_capacity = sixaxis_battery_capacity[index];
battery_charging = 0;
cable_state = 0;
@@ -1197,14 +1207,14 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
spin_unlock_irqrestore(&sc->lock, flags);
}
-static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
unsigned long flags;
int n, offset;
- __u8 cable_state, battery_capacity, battery_charging;
+ u8 cable_state, battery_capacity, battery_charging;
/*
* Battery and touchpad data starts at byte 30 in the USB report and
@@ -1254,7 +1264,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
* follows the data for the first.
*/
for (n = 0; n < 2; n++) {
- __u16 x, y;
+ u16 x, y;
x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
@@ -1270,7 +1280,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
}
static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
- __u8 *rd, int size)
+ u8 *rd, int size)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
@@ -1394,7 +1404,7 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
{
const int buf_size =
max(SIXAXIS_REPORT_0xF2_SIZE, SIXAXIS_REPORT_0xF5_SIZE);
- __u8 *buf;
+ u8 *buf;
int ret;
buf = kmalloc(buf_size, GFP_KERNEL);
@@ -1420,8 +1430,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
}
ret = hid_hw_output_report(hdev, buf, 1);
- if (ret < 0)
- hid_err(hdev, "can't set operational mode: step 3\n");
+ if (ret < 0) {
+ hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+ ret = 0;
+ }
out:
kfree(buf);
@@ -1431,8 +1443,8 @@ out:
static int sixaxis_set_operational_bt(struct hid_device *hdev)
{
- static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
- __u8 *buf;
+ static const u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+ u8 *buf;
int ret;
buf = kmemdup(report, sizeof(report), GFP_KERNEL);
@@ -1453,7 +1465,7 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
*/
static int dualshock4_set_operational_bt(struct hid_device *hdev)
{
- __u8 *buf;
+ u8 *buf;
int ret;
buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
@@ -1470,7 +1482,7 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
- static const __u8 sixaxis_leds[10][4] = {
+ static const u8 sixaxis_leds[10][4] = {
{ 0x01, 0x00, 0x00, 0x00 },
{ 0x00, 0x01, 0x00, 0x00 },
{ 0x00, 0x00, 0x01, 0x00 },
@@ -1497,7 +1509,7 @@ static void sixaxis_set_leds_from_id(struct sony_sc *sc)
static void dualshock4_set_leds_from_id(struct sony_sc *sc)
{
/* The first 4 color/index entries match what the PS4 assigns */
- static const __u8 color_code[7][3] = {
+ static const u8 color_code[7][3] = {
/* Blue */ { 0x00, 0x00, 0x01 },
/* Red */ { 0x01, 0x00, 0x00 },
/* Green */ { 0x00, 0x01, 0x00 },
@@ -1525,7 +1537,7 @@ static void buzz_set_leds(struct sony_sc *sc)
&hdev->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next,
struct hid_report, list);
- __s32 *value = report->field[0]->value;
+ s32 *value = report->field[0]->value;
BUILD_BUG_ON(MAX_LEDS < 4);
@@ -1619,7 +1631,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
struct hid_device *hdev = to_hid_device(dev);
struct sony_sc *drv_data = hid_get_drvdata(hdev);
int n;
- __u8 new_on, new_off;
+ u8 new_on, new_off;
if (!drv_data) {
hid_err(hdev, "No device data\n");
@@ -1690,8 +1702,8 @@ static int sony_leds_init(struct sony_sc *sc)
const char *name_fmt;
static const char * const ds4_name_str[] = { "red", "green", "blue",
"global" };
- __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
- __u8 use_hw_blink[MAX_LEDS] = { 0 };
+ u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
+ u8 use_hw_blink[MAX_LEDS] = { 0 };
BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
@@ -1719,7 +1731,7 @@ static int sony_leds_init(struct sony_sc *sc)
name_len = 0;
name_fmt = "%s:%s";
} else if (sc->quirks & NAVIGATION_CONTROLLER) {
- static const __u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
+ static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
sc->led_count = 1;
@@ -1796,7 +1808,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
static const union sixaxis_output_report_01 default_report = {
.buf = {
0x01,
- 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0x01, 0xff, 0x00, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0x27, 0x10, 0x00, 0x32,
0xff, 0x27, 0x10, 0x00, 0x32,
@@ -1842,7 +1854,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
}
}
- hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+ hid_hw_raw_request(sc->hdev, report->report_id, (u8 *)report,
sizeof(struct sixaxis_output_report),
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
@@ -1850,7 +1862,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
static void dualshock4_send_output_report(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
- __u8 *buf = sc->output_report_dmabuf;
+ u8 *buf = sc->output_report_dmabuf;
int offset;
if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
@@ -1910,7 +1922,7 @@ static void motion_send_output_report(struct sony_sc *sc)
report->rumble = max(sc->right, sc->left);
#endif
- hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE);
+ hid_hw_output_report(hdev, (u8 *)report, MOTION_REPORT_0x02_SIZE);
}
static inline void sony_send_output_report(struct sony_sc *sc)
@@ -1922,6 +1934,7 @@ static inline void sony_send_output_report(struct sony_sc *sc)
static void sony_state_worker(struct work_struct *work)
{
struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+
sc->send_output_report(sc);
}
@@ -2142,7 +2155,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
static int sony_check_add(struct sony_sc *sc)
{
- __u8 *buf = NULL;
+ u8 *buf = NULL;
int n, ret;
if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -2253,7 +2266,7 @@ static void sony_release_device_id(struct sony_sc *sc)
}
static inline void sony_init_output_report(struct sony_sc *sc,
- void(*send_output_report)(struct sony_sc*))
+ void (*send_output_report)(struct sony_sc *))
{
sc->send_output_report = send_output_report;
@@ -2441,7 +2454,7 @@ static int sony_suspend(struct hid_device *hdev, pm_message_t message)
/*
* On suspend save the current LED state,
* stop running force-feedback and blank the LEDS.
- */
+ */
if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
struct sony_sc *sc = hid_get_drvdata(hdev);
@@ -2501,8 +2514,10 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = VAIO_RDESC_CONSTANT },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
.driver_data = VAIO_RDESC_CONSTANT },
- /* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
- * Logitech joystick from the device descriptor. */
+ /*
+ * Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
+ * Logitech joystick from the device descriptor.
+ */
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
.driver_data = BUZZ_CONTROLLER },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
@@ -2521,6 +2536,9 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ /* Nyko Core Controller for PS3 */
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
+ .driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index b95d3978c272..847a497cd472 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -14,7 +14,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include "hid-ids.h"
@@ -56,7 +55,6 @@ struct thingm_rgb {
struct thingm_led red;
struct thingm_led green;
struct thingm_led blue;
- struct work_struct work;
u8 num;
};
@@ -79,9 +77,13 @@ static int thingm_send(struct thingm_device *tdev, u8 buf[REPORT_SIZE])
buf[0], buf[1], buf[2], buf[3], buf[4],
buf[5], buf[6], buf[7], buf[8]);
+ mutex_lock(&tdev->lock);
+
ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ mutex_unlock(&tdev->lock);
+
return ret < 0 ? ret : 0;
}
@@ -89,16 +91,31 @@ static int thingm_recv(struct thingm_device *tdev, u8 buf[REPORT_SIZE])
{
int ret;
+ /*
+ * A read consists of two operations: sending the read command
+ * and the actual read from the device. Use the mutex to protect
+ * the full sequence of both operations.
+ */
+ mutex_lock(&tdev->lock);
+
+ ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret < 0)
+ goto err;
+
ret = hid_hw_raw_request(tdev->hdev, buf[0], buf, REPORT_SIZE,
HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
if (ret < 0)
- return ret;
+ goto err;
+
+ ret = 0;
hid_dbg(tdev->hdev, "<- %d %c %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx %02hhx\n",
buf[0], buf[1], buf[2], buf[3], buf[4],
buf[5], buf[6], buf[7], buf[8]);
-
- return 0;
+err:
+ mutex_unlock(&tdev->lock);
+ return ret;
}
static int thingm_version(struct thingm_device *tdev)
@@ -106,10 +123,6 @@ static int thingm_version(struct thingm_device *tdev)
u8 buf[REPORT_SIZE] = { REPORT_ID, 'v', 0, 0, 0, 0, 0, 0, 0 };
int err;
- err = thingm_send(tdev, buf);
- if (err)
- return err;
-
err = thingm_recv(tdev, buf);
if (err)
return err;
@@ -131,25 +144,17 @@ static int thingm_write_color(struct thingm_rgb *rgb)
return thingm_send(rgb->tdev, buf);
}
-static void thingm_work(struct work_struct *work)
-{
- struct thingm_rgb *rgb = container_of(work, struct thingm_rgb, work);
-
- mutex_lock(&rgb->tdev->lock);
-
- if (thingm_write_color(rgb))
- hid_err(rgb->tdev->hdev, "failed to write color\n");
-
- mutex_unlock(&rgb->tdev->lock);
-}
-
-static void thingm_led_set(struct led_classdev *ldev,
- enum led_brightness brightness)
+static int thingm_led_set(struct led_classdev *ldev,
+ enum led_brightness brightness)
{
struct thingm_led *led = container_of(ldev, struct thingm_led, ldev);
+ int ret;
+
+ ret = thingm_write_color(led->rgb);
+ if (ret)
+ hid_err(led->rgb->tdev->hdev, "failed to write color\n");
- /* the ledclass has already stored the brightness value */
- schedule_work(&led->rgb->work);
+ return ret;
}
static int thingm_init_rgb(struct thingm_rgb *rgb)
@@ -162,10 +167,11 @@ static int thingm_init_rgb(struct thingm_rgb *rgb)
"thingm%d:red:led%d", minor, rgb->num);
rgb->red.ldev.name = rgb->red.name;
rgb->red.ldev.max_brightness = 255;
- rgb->red.ldev.brightness_set = thingm_led_set;
+ rgb->red.ldev.brightness_set_blocking = thingm_led_set;
rgb->red.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->red.ldev);
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->red.ldev);
if (err)
return err;
@@ -174,46 +180,27 @@ static int thingm_init_rgb(struct thingm_rgb *rgb)
"thingm%d:green:led%d", minor, rgb->num);
rgb->green.ldev.name = rgb->green.name;
rgb->green.ldev.max_brightness = 255;
- rgb->green.ldev.brightness_set = thingm_led_set;
+ rgb->green.ldev.brightness_set_blocking = thingm_led_set;
rgb->green.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->green.ldev);
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->green.ldev);
if (err)
- goto unregister_red;
+ return err;
/* Register the blue diode */
snprintf(rgb->blue.name, sizeof(rgb->blue.name),
"thingm%d:blue:led%d", minor, rgb->num);
rgb->blue.ldev.name = rgb->blue.name;
rgb->blue.ldev.max_brightness = 255;
- rgb->blue.ldev.brightness_set = thingm_led_set;
+ rgb->blue.ldev.brightness_set_blocking = thingm_led_set;
rgb->blue.rgb = rgb;
- err = led_classdev_register(&rgb->tdev->hdev->dev, &rgb->blue.ldev);
- if (err)
- goto unregister_green;
-
- INIT_WORK(&rgb->work, thingm_work);
-
- return 0;
-
-unregister_green:
- led_classdev_unregister(&rgb->green.ldev);
-
-unregister_red:
- led_classdev_unregister(&rgb->red.ldev);
-
+ err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
+ &rgb->blue.ldev);
return err;
}
-static void thingm_remove_rgb(struct thingm_rgb *rgb)
-{
- led_classdev_unregister(&rgb->red.ldev);
- led_classdev_unregister(&rgb->green.ldev);
- led_classdev_unregister(&rgb->blue.ldev);
- flush_work(&rgb->work);
-}
-
static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct thingm_device *tdev;
@@ -229,17 +216,13 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
err = hid_parse(hdev);
if (err)
- goto error;
-
- err = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
- if (err)
- goto error;
+ return err;
mutex_init(&tdev->lock);
err = thingm_version(tdev);
if (err)
- goto stop;
+ return err;
hid_dbg(hdev, "firmware version: %c.%c\n",
tdev->version.major, tdev->version.minor);
@@ -250,17 +233,18 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!tdev->fwinfo) {
hid_err(hdev, "unsupported firmware %c\n", tdev->version.major);
- err = -ENODEV;
- goto stop;
+ return -ENODEV;
}
tdev->rgb = devm_kzalloc(&hdev->dev,
sizeof(struct thingm_rgb) * tdev->fwinfo->numrgb,
GFP_KERNEL);
- if (!tdev->rgb) {
- err = -ENOMEM;
- goto stop;
- }
+ if (!tdev->rgb)
+ return -ENOMEM;
+
+ err = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (err)
+ return err;
for (i = 0; i < tdev->fwinfo->numrgb; ++i) {
struct thingm_rgb *rgb = tdev->rgb + i;
@@ -269,28 +253,12 @@ static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
rgb->num = tdev->fwinfo->first + i;
err = thingm_init_rgb(rgb);
if (err) {
- while (--i >= 0)
- thingm_remove_rgb(tdev->rgb + i);
- goto stop;
+ hid_hw_stop(hdev);
+ return err;
}
}
return 0;
-stop:
- hid_hw_stop(hdev);
-error:
- return err;
-}
-
-static void thingm_remove(struct hid_device *hdev)
-{
- struct thingm_device *tdev = hid_get_drvdata(hdev);
- int i;
-
- hid_hw_stop(hdev);
-
- for (i = 0; i < tdev->fwinfo->numrgb; ++i)
- thingm_remove_rgb(tdev->rgb + i);
}
static const struct hid_device_id thingm_table[] = {
@@ -302,7 +270,6 @@ MODULE_DEVICE_TABLE(hid, thingm_table);
static struct hid_driver thingm_driver = {
.name = "thingm",
.probe = thingm_probe,
- .remove = thingm_remove,
.id_table = thingm_table,
};
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 4390eee2ce84..c830ed39348f 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -2049,9 +2049,11 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
* -----+------------------------------+-----+-----+
* The single bits Yaw, Roll, Pitch in the lower right corner specify
* whether the wiimote is rotating fast (0) or slow (1). Speed for slow
- * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
- * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
- * and 9 for slow.
+ * roation is 8192/440 units / deg/s and for fast rotation 8192/2000
+ * units / deg/s. To get a linear scale for fast rotation we multiply
+ * by 2000/440 = ~4.5454 and scale both fast and slow by 9 to match the
+ * previous scale reported by this driver.
+ * This leaves a linear scale with 8192*9/440 (~167.564) units / deg/s.
* If the wiimote is not rotating the sensor reports 2^13 = 8192.
* Ext specifies whether an extension is connected to the motionp.
* which is parsed by wiimote-core.
@@ -2070,15 +2072,15 @@ static void wiimod_mp_in_mp(struct wiimote_data *wdata, const __u8 *ext)
z -= 8192;
if (!(ext[3] & 0x02))
- x *= 18;
+ x = (x * 2000 * 9) / 440;
else
x *= 9;
if (!(ext[4] & 0x02))
- y *= 18;
+ y = (y * 2000 * 9) / 440;
else
y *= 9;
if (!(ext[3] & 0x01))
- z *= 18;
+ z = (z * 2000 * 9) / 440;
else
z *= 9;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b9216938a718..2e021ba8ff05 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -283,17 +283,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
+ u16 size;
+ int args_len;
+ int index = 0;
+
+ i2c_hid_dbg(ihid, "%s\n", __func__);
+
+ if (data_len > ihid->bufsize)
+ return -EINVAL;
- /* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
- u16 size = 2 /* size */ +
+ size = 2 /* size */ +
(reportID ? 1 : 0) /* reportID */ +
data_len /* buf */;
- int args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ args_len = (reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
2 /* dataRegister */ +
size /* args */;
- int index = 0;
-
- i2c_hid_dbg(ihid, "%s\n", __func__);
if (!use_data && maxOutputLength == 0)
return -ENOSYS;
@@ -1108,13 +1112,30 @@ static int i2c_hid_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid = ihid->hid;
- int ret = 0;
+ int ret;
int wake_status;
- if (hid->driver && hid->driver->suspend)
+ if (hid->driver && hid->driver->suspend) {
+ /*
+ * Wake up the device so that IO issues in
+ * HID driver's suspend code can succeed.
+ */
+ ret = pm_runtime_resume(dev);
+ if (ret < 0)
+ return ret;
+
ret = hid->driver->suspend(hid, PMSG_SUSPEND);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!pm_runtime_suspended(dev)) {
+ /* Save some power */
+ i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+
+ disable_irq(ihid->irq);
+ }
- disable_irq(ihid->irq);
if (device_may_wakeup(&client->dev)) {
wake_status = enable_irq_wake(ihid->irq);
if (!wake_status)
@@ -1124,10 +1145,7 @@ static int i2c_hid_suspend(struct device *dev)
wake_status);
}
- /* Save some power */
- i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-
- return ret;
+ return 0;
}
static int i2c_hid_resume(struct device *dev)
@@ -1138,11 +1156,6 @@ static int i2c_hid_resume(struct device *dev)
struct hid_device *hid = ihid->hid;
int wake_status;
- enable_irq(ihid->irq);
- ret = i2c_hid_hwreset(client);
- if (ret)
- return ret;
-
if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
wake_status = disable_irq_wake(ihid->irq);
if (!wake_status)
@@ -1152,6 +1165,16 @@ static int i2c_hid_resume(struct device *dev)
wake_status);
}
+ /* We'll resume to full power */
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ enable_irq(ihid->irq);
+ ret = i2c_hid_hwreset(client);
+ if (ret)
+ return ret;
+
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;
@@ -1191,6 +1214,7 @@ static const struct dev_pm_ops i2c_hid_pm = {
static const struct i2c_device_id i2c_hid_id_table[] = {
{ "hid", 0 },
+ { "hid-over-i2c", 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, i2c_hid_id_table);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index e094c572b86e..16b6f11a0700 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -384,7 +384,7 @@ struct uhid_create_req_compat {
static int uhid_event_from_user(const char __user *buffer, size_t len,
struct uhid_event *event)
{
- if (is_compat_task()) {
+ if (in_compat_syscall()) {
u32 type;
if (get_user(type, buffer))
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ad71160b9ea4..ae83af649a60 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -951,14 +951,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
return ret;
}
-static void usbhid_restart_queues(struct usbhid_device *usbhid)
-{
- if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- usbhid_restart_out_queue(usbhid);
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- usbhid_restart_ctrl_queue(usbhid);
-}
-
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -1404,6 +1396,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
usb_kill_urb(usbhid->urbout);
}
+static void hid_restart_io(struct hid_device *hid)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (clear_halt || reset_pending)
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+ spin_unlock_irq(&usbhid->lock);
+
+ if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
+ return;
+
+ if (!clear_halt) {
+ if (hid_start_in(hid) < 0)
+ hid_io_error(hid);
+ }
+
+ spin_lock_irq(&usbhid->lock);
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+}
+
/* Treat USB reset pretty much the same as suspend/resume */
static int hid_pre_reset(struct usb_interface *intf)
{
@@ -1453,14 +1476,14 @@ static int hid_post_reset(struct usb_interface *intf)
return 1;
}
+ /* No need to do another reset or clear a halted endpoint */
spin_lock_irq(&usbhid->lock);
clear_bit(HID_RESET_PENDING, &usbhid->iofl);
+ clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
+
+ hid_restart_io(hid);
return 0;
}
@@ -1483,25 +1506,9 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
{
- struct usbhid_device *usbhid = hid->driver_data;
- int status;
-
- spin_lock_irq(&usbhid->lock);
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
-
- usbhid_restart_queues(usbhid);
- spin_unlock_irq(&usbhid->lock);
-
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
+ int status = 0;
+ hid_restart_io(hid);
if (driver_suspended && hid->driver && hid->driver->resume)
status = hid->driver->resume(hid);
return status;
@@ -1570,12 +1577,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
static int hid_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata (intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- if (!test_bit(HID_STARTED, &usbhid->iofl))
- return 0;
-
status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
@@ -1584,10 +1587,8 @@ static int hid_resume(struct usb_interface *intf)
static int hid_reset_resume(struct usb_interface *intf)
{
struct hid_device *hid = usb_get_intfdata(intf);
- struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 7dd0953cd70f..ed2f68edc8f1 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -55,6 +55,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -106,6 +107,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
@@ -140,6 +142,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5cb21dd91094..ccf1883318c3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -152,6 +152,25 @@ static void wacom_feature_mapping(struct hid_device *hdev,
hid_data->inputmode = field->report->id;
hid_data->inputmode_index = usage->usage_index;
break;
+
+ case HID_UP_DIGITIZER:
+ if (field->report->id == 0x0B &&
+ (field->application == WACOM_G9_DIGITIZER ||
+ field->application == WACOM_G11_DIGITIZER)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
+
+ case WACOM_G9_PAGE:
+ case WACOM_G11_PAGE:
+ if (field->report->id == 0x03 &&
+ (field->application == WACOM_G9_TOUCHSCREEN ||
+ field->application == WACOM_G11_TOUCHSCREEN)) {
+ wacom->wacom_wac.mode_report = field->report->id;
+ wacom->wacom_wac.mode_value = 0;
+ }
+ break;
}
}
@@ -322,26 +341,41 @@ static int wacom_hid_set_device_mode(struct hid_device *hdev)
return 0;
}
-static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
- int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev,
+ struct wacom_wac *wacom_wac)
{
- unsigned char *rep_data;
+ u8 *rep_data;
+ struct hid_report *r;
+ struct hid_report_enum *re;
+ int length;
int error = -ENOMEM, limit = 0;
- rep_data = kzalloc(length, GFP_KERNEL);
+ if (wacom_wac->mode_report < 0)
+ return 0;
+
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[wacom_wac->mode_report];
+ if (!r)
+ return -EINVAL;
+
+ rep_data = hid_alloc_report_buf(r, GFP_KERNEL);
if (!rep_data)
- return error;
+ return -ENOMEM;
+
+ length = hid_report_len(r);
do {
- rep_data[0] = report_id;
- rep_data[1] = mode;
+ rep_data[0] = wacom_wac->mode_report;
+ rep_data[1] = wacom_wac->mode_value;
error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
length, 1);
if (error >= 0)
error = wacom_get_report(hdev, HID_FEATURE_REPORT,
rep_data, length, 1);
- } while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
+ } while (error >= 0 &&
+ rep_data[1] != wacom_wac->mode_report &&
+ limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -411,32 +445,41 @@ static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
static int wacom_query_tablet_data(struct hid_device *hdev,
struct wacom_features *features)
{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
if (hdev->bus == BUS_BLUETOOTH)
return wacom_bt_query_tablet_data(hdev, 1, features);
- if (features->type == HID_GENERIC)
- return wacom_hid_set_device_mode(hdev);
-
- if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
- if (features->type > TABLETPC) {
- /* MT Tablet PC touch */
- return wacom_set_device_mode(hdev, 3, 4, 4);
- }
- else if (features->type == WACOM_24HDT) {
- return wacom_set_device_mode(hdev, 18, 3, 2);
- }
- else if (features->type == WACOM_27QHDT) {
- return wacom_set_device_mode(hdev, 131, 3, 2);
- }
- else if (features->type == BAMBOO_PAD) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
- }
- } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
- if (features->type <= BAMBOO_PT) {
- return wacom_set_device_mode(hdev, 2, 2, 2);
+ if (features->type != HID_GENERIC) {
+ if (features->device_type & WACOM_DEVICETYPE_TOUCH) {
+ if (features->type > TABLETPC) {
+ /* MT Tablet PC touch */
+ wacom_wac->mode_report = 3;
+ wacom_wac->mode_value = 4;
+ } else if (features->type == WACOM_24HDT) {
+ wacom_wac->mode_report = 18;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == WACOM_27QHDT) {
+ wacom_wac->mode_report = 131;
+ wacom_wac->mode_value = 2;
+ } else if (features->type == BAMBOO_PAD) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
+ } else if (features->device_type & WACOM_DEVICETYPE_PEN) {
+ if (features->type <= BAMBOO_PT) {
+ wacom_wac->mode_report = 2;
+ wacom_wac->mode_value = 2;
+ }
}
}
+ wacom_set_device_mode(hdev, wacom_wac);
+
+ if (features->type == HID_GENERIC)
+ return wacom_hid_set_device_mode(hdev);
+
return 0;
}
@@ -1357,6 +1400,9 @@ static void wacom_clean_inputs(struct wacom *wacom)
wacom->wacom_wac.pen_input = NULL;
wacom->wacom_wac.touch_input = NULL;
wacom->wacom_wac.pad_input = NULL;
+ wacom->wacom_wac.pen_registered = false;
+ wacom->wacom_wac.touch_registered = false;
+ wacom->wacom_wac.pad_registered = false;
wacom_destroy_leds(wacom);
}
@@ -1494,123 +1540,6 @@ static void wacom_calculate_res(struct wacom_features *features)
features->unitExpo);
}
-static void wacom_wireless_work(struct work_struct *work)
-{
- struct wacom *wacom = container_of(work, struct wacom, work);
- struct usb_device *usbdev = wacom->usbdev;
- struct wacom_wac *wacom_wac = &wacom->wacom_wac;
- struct hid_device *hdev1, *hdev2;
- struct wacom *wacom1, *wacom2;
- struct wacom_wac *wacom_wac1, *wacom_wac2;
- int error;
-
- /*
- * Regardless if this is a disconnect or a new tablet,
- * remove any existing input and battery devices.
- */
-
- wacom_destroy_battery(wacom);
-
- /* Stylus interface */
- hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
- wacom1 = hid_get_drvdata(hdev1);
- wacom_wac1 = &(wacom1->wacom_wac);
- wacom_clean_inputs(wacom1);
-
- /* Touch interface */
- hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
- wacom2 = hid_get_drvdata(hdev2);
- wacom_wac2 = &(wacom2->wacom_wac);
- wacom_clean_inputs(wacom2);
-
- if (wacom_wac->pid == 0) {
- hid_info(wacom->hdev, "wireless tablet disconnected\n");
- wacom_wac1->shared->type = 0;
- } else {
- const struct hid_device_id *id = wacom_ids;
-
- hid_info(wacom->hdev, "wireless tablet connected with PID %x\n",
- wacom_wac->pid);
-
- while (id->bus) {
- if (id->vendor == USB_VENDOR_ID_WACOM &&
- id->product == wacom_wac->pid)
- break;
- id++;
- }
-
- if (!id->bus) {
- hid_info(wacom->hdev, "ignoring unknown PID.\n");
- return;
- }
-
- /* Stylus interface */
- wacom_wac1->features =
- *((struct wacom_features *)id->driver_data);
- wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PEN;
- wacom_set_default_phy(&wacom_wac1->features);
- wacom_calculate_res(&wacom_wac1->features);
- snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
- wacom_wac1->features.name);
- if (wacom_wac1->features.type < BAMBOO_PEN ||
- wacom_wac1->features.type > BAMBOO_PT) {
- snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
- wacom_wac1->features.name);
- wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
- }
- wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
- wacom_wac1->shared->type = wacom_wac1->features.type;
- wacom_wac1->pid = wacom_wac->pid;
- error = wacom_allocate_inputs(wacom1) ||
- wacom_register_inputs(wacom1);
- if (error)
- goto fail;
-
- /* Touch interface */
- if (wacom_wac1->features.touch_max ||
- (wacom_wac1->features.type >= INTUOSHT &&
- wacom_wac1->features.type <= BAMBOO_PT)) {
- wacom_wac2->features =
- *((struct wacom_features *)id->driver_data);
- wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
- wacom_set_default_phy(&wacom_wac2->features);
- wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
- wacom_calculate_res(&wacom_wac2->features);
- snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
- "%s (WL) Finger",wacom_wac2->features.name);
- if (wacom_wac1->features.touch_max)
- wacom_wac2->features.device_type |= WACOM_DEVICETYPE_TOUCH;
- if (wacom_wac1->features.type >= INTUOSHT &&
- wacom_wac1->features.type <= BAMBOO_PT) {
- snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
- "%s (WL) Pad",wacom_wac2->features.name);
- wacom_wac2->features.device_type |= WACOM_DEVICETYPE_PAD;
- }
- wacom_wac2->pid = wacom_wac->pid;
- error = wacom_allocate_inputs(wacom2) ||
- wacom_register_inputs(wacom2);
- if (error)
- goto fail;
-
- if ((wacom_wac1->features.type == INTUOSHT ||
- wacom_wac1->features.type == INTUOSHT2) &&
- wacom_wac1->features.touch_max)
- wacom_wac->shared->touch_input = wacom_wac2->touch_input;
- }
-
- error = wacom_initialize_battery(wacom);
- if (error)
- goto fail;
- }
-
- return;
-
-fail:
- wacom_clean_inputs(wacom1);
- wacom_clean_inputs(wacom2);
- return;
-}
-
void wacom_battery_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1642,7 +1571,7 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev)
return size;
}
-static void wacom_update_name(struct wacom *wacom)
+static void wacom_update_name(struct wacom *wacom, const char *suffix)
{
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom_wac->features;
@@ -1678,68 +1607,28 @@ static void wacom_update_name(struct wacom *wacom)
/* Append the device type to the name */
snprintf(wacom_wac->pen_name, sizeof(wacom_wac->pen_name),
- "%s Pen", name);
+ "%s%s Pen", name, suffix);
snprintf(wacom_wac->touch_name, sizeof(wacom_wac->touch_name),
- "%s Finger", name);
+ "%s%s Finger", name, suffix);
snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name),
- "%s Pad", name);
+ "%s%s Pad", name, suffix);
}
-static int wacom_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
+static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
- struct wacom *wacom;
- struct wacom_wac *wacom_wac;
- struct wacom_features *features;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ struct hid_device *hdev = wacom->hdev;
int error;
unsigned int connect_mask = HID_CONNECT_HIDRAW;
- if (!id->driver_data)
- return -EINVAL;
-
- hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
- /* hid-core sets this quirk for the boot interface */
- hdev->quirks &= ~HID_QUIRK_NOGET;
-
- wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
- if (!wacom)
- return -ENOMEM;
-
- hid_set_drvdata(hdev, wacom);
- wacom->hdev = hdev;
-
- /* ask for the report descriptor to be loaded by HID */
- error = hid_parse(hdev);
- if (error) {
- hid_err(hdev, "parse failed\n");
- goto fail_parse;
- }
-
- wacom_wac = &wacom->wacom_wac;
- wacom_wac->features = *((struct wacom_features *)id->driver_data);
- features = &wacom_wac->features;
features->pktlen = wacom_compute_pktlen(hdev);
- if (features->pktlen > WACOM_PKGLEN_MAX) {
- error = -EINVAL;
- goto fail_pktlen;
- }
-
- if (features->check_for_hid_type && features->hid_type != hdev->type) {
- error = -ENODEV;
- goto fail_type;
- }
-
- wacom->usbdev = dev;
- wacom->intf = intf;
- mutex_init(&wacom->lock);
- INIT_WORK(&wacom->work, wacom_wireless_work);
+ if (features->pktlen > WACOM_PKGLEN_MAX)
+ return -EINVAL;
error = wacom_allocate_inputs(wacom);
if (error)
- goto fail_allocate_inputs;
+ return error;
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
@@ -1752,7 +1641,7 @@ static int wacom_probe(struct hid_device *hdev,
} else if ((features->pktlen != WACOM_PKGLEN_BPAD_TOUCH) &&
(features->pktlen != WACOM_PKGLEN_BPAD_TOUCH_USB)) {
error = -ENODEV;
- goto fail_shared_data;
+ goto fail_allocate_inputs;
}
}
@@ -1772,14 +1661,14 @@ static int wacom_probe(struct hid_device *hdev,
error ? "Ignoring" : "Assuming pen");
if (error)
- goto fail_shared_data;
+ goto fail_parsed;
features->device_type |= WACOM_DEVICETYPE_PEN;
}
wacom_calculate_res(features);
- wacom_update_name(wacom);
+ wacom_update_name(wacom, wireless ? " (WL)" : "");
error = wacom_add_shared_data(hdev);
if (error)
@@ -1796,14 +1685,6 @@ static int wacom_probe(struct hid_device *hdev,
if (error)
goto fail_register_inputs;
- if (hdev->bus == BUS_BLUETOOTH) {
- error = device_create_file(&hdev->dev, &dev_attr_speed);
- if (error)
- hid_warn(hdev,
- "can't create sysfs speed attribute err: %d\n",
- error);
- }
-
if (features->type == HID_GENERIC)
connect_mask |= HID_CONNECT_DRIVER;
@@ -1814,8 +1695,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail_hw_start;
}
- /* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(hdev, features);
+ if (!wireless) {
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(hdev, features);
+ }
/* touch only Bamboo doesn't support pen */
if ((features->type == BAMBOO_TOUCH) &&
@@ -1844,18 +1727,169 @@ static int wacom_probe(struct hid_device *hdev,
return 0;
fail_hw_start:
- if (hdev->bus == BUS_BLUETOOTH)
- device_remove_file(&hdev->dev, &dev_attr_speed);
+ hid_hw_stop(hdev);
fail_register_inputs:
wacom_clean_inputs(wacom);
wacom_destroy_battery(wacom);
fail_battery:
wacom_remove_shared_data(wacom);
fail_shared_data:
- wacom_clean_inputs(wacom);
+fail_parsed:
fail_allocate_inputs:
+ wacom_clean_inputs(wacom);
+ return error;
+}
+
+static void wacom_wireless_work(struct work_struct *work)
+{
+ struct wacom *wacom = container_of(work, struct wacom, work);
+ struct usb_device *usbdev = wacom->usbdev;
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_device *hdev1, *hdev2;
+ struct wacom *wacom1, *wacom2;
+ struct wacom_wac *wacom_wac1, *wacom_wac2;
+ int error;
+
+ /*
+ * Regardless if this is a disconnect or a new tablet,
+ * remove any existing input and battery devices.
+ */
+
+ wacom_destroy_battery(wacom);
+
+ /* Stylus interface */
+ hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom1 = hid_get_drvdata(hdev1);
+ wacom_wac1 = &(wacom1->wacom_wac);
+ wacom_clean_inputs(wacom1);
+
+ /* Touch interface */
+ hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
+ wacom2 = hid_get_drvdata(hdev2);
+ wacom_wac2 = &(wacom2->wacom_wac);
+ wacom_clean_inputs(wacom2);
+
+ if (wacom_wac->pid == 0) {
+ hid_info(wacom->hdev, "wireless tablet disconnected\n");
+ wacom_wac1->shared->type = 0;
+ } else {
+ const struct hid_device_id *id = wacom_ids;
+
+ hid_info(wacom->hdev, "wireless tablet connected with PID %x\n",
+ wacom_wac->pid);
+
+ while (id->bus) {
+ if (id->vendor == USB_VENDOR_ID_WACOM &&
+ id->product == wacom_wac->pid)
+ break;
+ id++;
+ }
+
+ if (!id->bus) {
+ hid_info(wacom->hdev, "ignoring unknown PID.\n");
+ return;
+ }
+
+ /* Stylus interface */
+ wacom_wac1->features =
+ *((struct wacom_features *)id->driver_data);
+
+ wacom_wac1->pid = wacom_wac->pid;
+ hid_hw_stop(hdev1);
+ error = wacom_parse_and_register(wacom1, true);
+ if (error)
+ goto fail;
+
+ /* Touch interface */
+ if (wacom_wac1->features.touch_max ||
+ (wacom_wac1->features.type >= INTUOSHT &&
+ wacom_wac1->features.type <= BAMBOO_PT)) {
+ wacom_wac2->features =
+ *((struct wacom_features *)id->driver_data);
+ wacom_wac2->pid = wacom_wac->pid;
+ hid_hw_stop(hdev2);
+ error = wacom_parse_and_register(wacom2, true);
+ if (error)
+ goto fail;
+ }
+
+ error = wacom_initialize_battery(wacom);
+ if (error)
+ goto fail;
+ }
+
+ return;
+
+fail:
+ wacom_clean_inputs(wacom1);
+ wacom_clean_inputs(wacom2);
+ return;
+}
+
+static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct wacom *wacom;
+ struct wacom_wac *wacom_wac;
+ struct wacom_features *features;
+ int error;
+
+ if (!id->driver_data)
+ return -EINVAL;
+
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+ /* hid-core sets this quirk for the boot interface */
+ hdev->quirks &= ~HID_QUIRK_NOGET;
+
+ wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
+ if (!wacom)
+ return -ENOMEM;
+
+ hid_set_drvdata(hdev, wacom);
+ wacom->hdev = hdev;
+
+ wacom_wac = &wacom->wacom_wac;
+ wacom_wac->features = *((struct wacom_features *)id->driver_data);
+ features = &wacom_wac->features;
+
+ if (features->check_for_hid_type && features->hid_type != hdev->type) {
+ error = -ENODEV;
+ goto fail_type;
+ }
+
+ wacom_wac->hid_data.inputmode = -1;
+ wacom_wac->mode_report = -1;
+
+ wacom->usbdev = dev;
+ wacom->intf = intf;
+ mutex_init(&wacom->lock);
+ INIT_WORK(&wacom->work, wacom_wireless_work);
+
+ /* ask for the report descriptor to be loaded by HID */
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "parse failed\n");
+ goto fail_parse;
+ }
+
+ error = wacom_parse_and_register(wacom, false);
+ if (error)
+ goto fail_parse;
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ error = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (error)
+ hid_warn(hdev,
+ "can't create sysfs speed attribute err: %d\n",
+ error);
+ }
+
+ return 0;
+
fail_type:
-fail_pktlen:
fail_parse:
kfree(wacom);
hid_set_drvdata(hdev, NULL);
@@ -1865,6 +1899,11 @@ fail_parse:
static void wacom_remove(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+
+ if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 99ef77fcfb80..02c4efea241c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -575,16 +575,102 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
return 1;
}
+static int wacom_intuos_get_tool_type(int tool_id)
+{
+ int tool_type;
+
+ switch (tool_id) {
+ case 0x812: /* Inking pen */
+ case 0x801: /* Intuos3 Inking pen */
+ case 0x120802: /* Intuos4/5 Inking Pen */
+ case 0x012:
+ tool_type = BTN_TOOL_PENCIL;
+ break;
+
+ case 0x822: /* Pen */
+ case 0x842:
+ case 0x852:
+ case 0x823: /* Intuos3 Grip Pen */
+ case 0x813: /* Intuos3 Classic Pen */
+ case 0x885: /* Intuos3 Marker Pen */
+ case 0x802: /* Intuos4/5 13HD/24HD General Pen */
+ case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+ case 0x8e2: /* IntuosHT2 pen */
+ case 0x022:
+ case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
+ case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
+ case 0x160802: /* Cintiq 13HD Pro Pen */
+ case 0x180802: /* DTH2242 Pen */
+ case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+ tool_type = BTN_TOOL_PEN;
+ break;
+
+ case 0x832: /* Stroke pen */
+ case 0x032:
+ tool_type = BTN_TOOL_BRUSH;
+ break;
+
+ case 0x007: /* Mouse 4D and 2D */
+ case 0x09c:
+ case 0x094:
+ case 0x017: /* Intuos3 2D Mouse */
+ case 0x806: /* Intuos4 Mouse */
+ tool_type = BTN_TOOL_MOUSE;
+ break;
+
+ case 0x096: /* Lens cursor */
+ case 0x097: /* Intuos3 Lens cursor */
+ case 0x006: /* Intuos4 Lens cursor */
+ tool_type = BTN_TOOL_LENS;
+ break;
+
+ case 0x82a: /* Eraser */
+ case 0x85a:
+ case 0x91a:
+ case 0xd1a:
+ case 0x0fa:
+ case 0x82b: /* Intuos3 Grip Pen Eraser */
+ case 0x81b: /* Intuos3 Classic Pen Eraser */
+ case 0x91b: /* Intuos3 Airbrush Eraser */
+ case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
+ case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+ case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+ case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+ case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
+ case 0x18080a: /* DTH2242 Eraser */
+ case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+ tool_type = BTN_TOOL_RUBBER;
+ break;
+
+ case 0xd12:
+ case 0x912:
+ case 0x112:
+ case 0x913: /* Intuos3 Airbrush */
+ case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
+ case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+ tool_type = BTN_TOOL_AIRBRUSH;
+ break;
+
+ default: /* Unknown tool */
+ tool_type = BTN_TOOL_PEN;
+ break;
+ }
+ return tool_type;
+}
+
static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
struct input_dev *input = wacom->pen_input;
- int idx = 0;
+ int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
- /* tool number */
- if (features->type == INTUOS)
- idx = data[1] & 0x01;
+ if (!(((data[1] & 0xfc) == 0xc0) || /* in prox */
+ ((data[1] & 0xfe) == 0x20) || /* in range */
+ ((data[1] & 0xfe) == 0x80))) /* out prox */
+ return 0;
/* Enter report */
if ((data[1] & 0xfc) == 0xc0) {
@@ -596,116 +682,24 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
- switch (wacom->id[idx]) {
- case 0x812: /* Inking pen */
- case 0x801: /* Intuos3 Inking pen */
- case 0x120802: /* Intuos4/5 Inking Pen */
- case 0x012:
- wacom->tool[idx] = BTN_TOOL_PENCIL;
- break;
-
- case 0x822: /* Pen */
- case 0x842:
- case 0x852:
- case 0x823: /* Intuos3 Grip Pen */
- case 0x813: /* Intuos3 Classic Pen */
- case 0x885: /* Intuos3 Marker Pen */
- case 0x802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
- case 0x022:
- case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
- case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
- case 0x160802: /* Cintiq 13HD Pro Pen */
- case 0x180802: /* DTH2242 Pen */
- case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
-
- case 0x832: /* Stroke pen */
- case 0x032:
- wacom->tool[idx] = BTN_TOOL_BRUSH;
- break;
-
- case 0x007: /* Mouse 4D and 2D */
- case 0x09c:
- case 0x094:
- case 0x017: /* Intuos3 2D Mouse */
- case 0x806: /* Intuos4 Mouse */
- wacom->tool[idx] = BTN_TOOL_MOUSE;
- break;
-
- case 0x096: /* Lens cursor */
- case 0x097: /* Intuos3 Lens cursor */
- case 0x006: /* Intuos4 Lens cursor */
- wacom->tool[idx] = BTN_TOOL_LENS;
- break;
-
- case 0x82a: /* Eraser */
- case 0x85a:
- case 0x91a:
- case 0xd1a:
- case 0x0fa:
- case 0x82b: /* Intuos3 Grip Pen Eraser */
- case 0x81b: /* Intuos3 Classic Pen Eraser */
- case 0x91b: /* Intuos3 Airbrush Eraser */
- case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
- case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
- case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
- case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
- case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
- case 0x18080a: /* DTH2242 Eraser */
- case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
- wacom->tool[idx] = BTN_TOOL_RUBBER;
- break;
-
- case 0xd12:
- case 0x912:
- case 0x112:
- case 0x913: /* Intuos3 Airbrush */
- case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
- case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
- wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
- break;
-
- default: /* Unknown tool */
- wacom->tool[idx] = BTN_TOOL_PEN;
- break;
- }
+ wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
+
return 1;
}
- /*
- * don't report events for invalid data
- */
- /* older I4 styli don't work with new Cintiqs */
- if ((!((wacom->id[idx] >> 20) & 0x01) &&
- (features->type == WACOM_21UX2)) ||
- /* Only large Intuos support Lense Cursor */
- (wacom->tool[idx] == BTN_TOOL_LENS &&
- (features->type == INTUOS3 ||
- features->type == INTUOS3S ||
- features->type == INTUOS4 ||
- features->type == INTUOS4S ||
- features->type == INTUOS5 ||
- features->type == INTUOS5S ||
- features->type == INTUOSPM ||
- features->type == INTUOSPS)) ||
- /* Cintiq doesn't send data when RDY bit isn't set */
- (features->type == CINTIQ && !(data[1] & 0x40)))
- return 1;
+ /* in Range */
+ if ((data[1] & 0xfe) == 0x20) {
+ if (features->type != INTUOSHT2)
+ wacom->shared->stylus_in_proximity = true;
- wacom->shared->stylus_in_proximity = true;
- if (wacom->shared->touch_down)
+ /* in Range while exiting */
+ if (wacom->reporting_data) {
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+ return 2;
+ }
return 1;
-
- /* in Range while exiting */
- if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
- return 2;
}
/* Exit report */
@@ -750,13 +744,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
return 2;
}
- /* don't report other events if we don't know the ID */
- if (!wacom->id[idx]) {
- /* but reschedule a read of the current tool */
- wacom_intuos_schedule_prox_event(wacom);
- return 1;
- }
-
return 0;
}
@@ -897,6 +884,36 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
data[0] != WACOM_REPORT_INTUOS_PEN)
return 0;
+ if (wacom->shared->touch_down)
+ return 1;
+
+ /* don't report events if we don't know the tool ID */
+ if (!wacom->id[idx]) {
+ /* but reschedule a read of the current tool */
+ wacom_intuos_schedule_prox_event(wacom);
+ return 1;
+ }
+
+ /*
+ * don't report events for invalid data
+ */
+ /* older I4 styli don't work with new Cintiqs */
+ if ((!((wacom->id[idx] >> 20) & 0x01) &&
+ (features->type == WACOM_21UX2)) ||
+ /* Only large Intuos support Lense Cursor */
+ (wacom->tool[idx] == BTN_TOOL_LENS &&
+ (features->type == INTUOS3 ||
+ features->type == INTUOS3S ||
+ features->type == INTUOS4 ||
+ features->type == INTUOS4S ||
+ features->type == INTUOS5 ||
+ features->type == INTUOS5S ||
+ features->type == INTUOSPM ||
+ features->type == INTUOSPS)) ||
+ /* Cintiq doesn't send data when RDY bit isn't set */
+ (features->type == CINTIQ && !(data[1] & 0x40)))
+ return 1;
+
x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1);
y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1);
distance = data[9] >> 2;
@@ -2409,6 +2426,17 @@ void wacom_setup_device_quirks(struct wacom *wacom)
}
/*
+ * Hack for the Bamboo One:
+ * the device presents a PAD/Touch interface as most Bamboos and even
+ * sends ghosts PAD data on it. However, later, we must disable this
+ * ghost interface, and we can not detect it unless we set it here
+ * to WACOM_DEVICETYPE_PAD or WACOM_DEVICETYPE_TOUCH.
+ */
+ if (features->type == BAMBOO_PEN &&
+ features->pktlen == WACOM_PKGLEN_BBTOUCH3)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+ /*
* Raw Wacom-mode pen and touch events both come from interface
* 0, whose HID descriptor has an application usage of 0xFF0D
* (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 25baa7f29599..e2084d914c14 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -84,6 +84,12 @@
#define WACOM_DEVICETYPE_WL_MONITOR 0x0008
#define WACOM_VENDORDEFINED_PEN 0xff0d0001
+#define WACOM_G9_PAGE 0xff090000
+#define WACOM_G9_DIGITIZER (WACOM_G9_PAGE | 0x02)
+#define WACOM_G9_TOUCHSCREEN (WACOM_G9_PAGE | 0x11)
+#define WACOM_G11_PAGE 0xff110000
+#define WACOM_G11_DIGITIZER (WACOM_G11_PAGE | 0x02)
+#define WACOM_G11_TOUCHSCREEN (WACOM_G11_PAGE | 0x11)
#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
((f)->physical == HID_DG_STYLUS) || \
@@ -238,6 +244,8 @@ struct wacom_wac {
int ps_connected;
u8 bt_features;
u8 bt_high_speed;
+ int mode_report;
+ int mode_value;
struct hid_data hid_data;
};
diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c
index 7f82c911ad74..c000780d931f 100644
--- a/drivers/hsi/clients/nokia-modem.c
+++ b/drivers/hsi/clients/nokia-modem.c
@@ -281,6 +281,8 @@ static int nokia_modem_remove(struct device *dev)
#ifdef CONFIG_OF
static const struct of_device_id nokia_modem_of_match[] = {
{ .compatible = "nokia,n900-modem", },
+ { .compatible = "nokia,n950-modem", },
+ { .compatible = "nokia,n9-modem", },
{},
};
MODULE_DEVICE_TABLE(of, nokia_modem_of_match);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index a38af68cf326..6595d2091268 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -521,13 +521,7 @@ static void ssip_start_rx(struct hsi_client *cl)
* high transition. Therefore we need to ignore the sencond UP event.
*/
if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
- if (ssi->main_state == INIT) {
- ssi->main_state = HANDSHAKE;
- spin_unlock(&ssi->lock);
- ssip_send_bootinfo_req_cmd(cl);
- } else {
- spin_unlock(&ssi->lock);
- }
+ spin_unlock(&ssi->lock);
return;
}
ssip_set_rxstate(ssi, RECV_READY);
@@ -671,6 +665,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
ssip_error(cl);
/* Fall through */
case INIT:
+ case HANDSHAKE:
spin_lock(&ssi->lock);
ssi->main_state = HANDSHAKE;
if (!ssi->waketest) {
@@ -688,9 +683,6 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
msg->complete = ssip_release_cmd;
hsi_async_write(cl, msg);
break;
- case HANDSHAKE:
- /* Ignore */
- break;
default:
dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
break;
@@ -939,9 +931,11 @@ static int ssip_pn_open(struct net_device *dev)
ssi->waketest = 1;
ssi_waketest(cl, 1); /* FIXME: To be removed */
}
- ssi->main_state = INIT;
+ ssi->main_state = HANDSHAKE;
spin_unlock_bh(&ssi->lock);
+ ssip_send_bootinfo_req_cmd(cl);
+
return 0;
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1161d68a1863..56dd261f7142 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -219,6 +219,21 @@ error0:
}
EXPORT_SYMBOL_GPL(vmbus_open);
+/* Used for Hyper-V Socket: a guest client's connect() to the host */
+int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
+ const uuid_le *shv_host_servie_id)
+{
+ struct vmbus_channel_tl_connect_request conn_msg;
+
+ memset(&conn_msg, 0, sizeof(conn_msg));
+ conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
+ conn_msg.guest_endpoint_id = *shv_guest_servie_id;
+ conn_msg.host_service_id = *shv_host_servie_id;
+
+ return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
+}
+EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
+
/*
* create_gpadl_header - Creates a gpadl for the specified buffer
*/
@@ -624,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u64 aligned_data = 0;
int ret;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -643,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
- &signal);
+ &signal, lock);
/*
* Signalling the host is conditional on many factors:
@@ -659,6 +675,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
* If we cannot write to the ring-buffer; signal the host
* even if we may not have written anything. This is a rare
* enough condition that it should not matter.
+ * NOTE: in this case, the hvsock channel is an exception, because
+ * it looks the host side's hvsock implementation has a throttling
+ * mechanism which can hurt the performance otherwise.
*/
if (channel->signal_policy)
@@ -666,7 +685,8 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
else
kick_q = true;
- if (((ret == 0) && kick_q && signal) || (ret))
+ if (((ret == 0) && kick_q && signal) ||
+ (ret && !is_hvsock_channel(channel)))
vmbus_setevent(channel);
return ret;
@@ -719,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
@@ -755,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
/*
* Signalling the host is conditional on many factors:
@@ -818,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -837,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
if (ret == 0 && signal)
vmbus_setevent(channel);
@@ -862,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
struct kvec bufferlist[3];
u64 aligned_data = 0;
bool signal = false;
+ bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -900,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+ ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
+ &signal, lock);
if (ret == 0 && signal)
vmbus_setevent(channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 1c1ad47042c5..38b682bab85a 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -28,12 +28,127 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/hyperv.h>
#include "hyperv_vmbus.h"
-static void init_vp_index(struct vmbus_channel *channel,
- const uuid_le *type_guid);
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
+
+static const struct vmbus_device vmbus_devs[] = {
+ /* IDE */
+ { .dev_type = HV_IDE,
+ HV_IDE_GUID,
+ .perf_device = true,
+ },
+
+ /* SCSI */
+ { .dev_type = HV_SCSI,
+ HV_SCSI_GUID,
+ .perf_device = true,
+ },
+
+ /* Fibre Channel */
+ { .dev_type = HV_FC,
+ HV_SYNTHFC_GUID,
+ .perf_device = true,
+ },
+
+ /* Synthetic NIC */
+ { .dev_type = HV_NIC,
+ HV_NIC_GUID,
+ .perf_device = true,
+ },
+
+ /* Network Direct */
+ { .dev_type = HV_ND,
+ HV_ND_GUID,
+ .perf_device = true,
+ },
+
+ /* PCIE */
+ { .dev_type = HV_PCIE,
+ HV_PCIE_GUID,
+ .perf_device = true,
+ },
+
+ /* Synthetic Frame Buffer */
+ { .dev_type = HV_FB,
+ HV_SYNTHVID_GUID,
+ .perf_device = false,
+ },
+
+ /* Synthetic Keyboard */
+ { .dev_type = HV_KBD,
+ HV_KBD_GUID,
+ .perf_device = false,
+ },
+
+ /* Synthetic MOUSE */
+ { .dev_type = HV_MOUSE,
+ HV_MOUSE_GUID,
+ .perf_device = false,
+ },
+
+ /* KVP */
+ { .dev_type = HV_KVP,
+ HV_KVP_GUID,
+ .perf_device = false,
+ },
+
+ /* Time Synch */
+ { .dev_type = HV_TS,
+ HV_TS_GUID,
+ .perf_device = false,
+ },
+
+ /* Heartbeat */
+ { .dev_type = HV_HB,
+ HV_HEART_BEAT_GUID,
+ .perf_device = false,
+ },
+
+ /* Shutdown */
+ { .dev_type = HV_SHUTDOWN,
+ HV_SHUTDOWN_GUID,
+ .perf_device = false,
+ },
+
+ /* File copy */
+ { .dev_type = HV_FCOPY,
+ HV_FCOPY_GUID,
+ .perf_device = false,
+ },
+
+ /* Backup */
+ { .dev_type = HV_BACKUP,
+ HV_VSS_GUID,
+ .perf_device = false,
+ },
+
+ /* Dynamic Memory */
+ { .dev_type = HV_DM,
+ HV_DM_GUID,
+ .perf_device = false,
+ },
+
+ /* Unknown GUID */
+ { .dev_type = HV_UNKOWN,
+ .perf_device = false,
+ },
+};
+
+static u16 hv_get_dev_type(const uuid_le *guid)
+{
+ u16 i;
+
+ for (i = HV_IDE; i < HV_UNKOWN; i++) {
+ if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
+ return i;
+ }
+ pr_info("Unknown GUID: %pUl\n", guid);
+ return i;
+}
/**
* vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
@@ -144,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
return NULL;
channel->id = atomic_inc_return(&chan_num);
+ channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
@@ -195,6 +311,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
vmbus_release_relid(relid);
BUG_ON(!channel->rescind);
+ BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
if (channel->target_cpu != get_cpu()) {
put_cpu();
@@ -206,9 +323,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
}
if (channel->primary_channel == NULL) {
- mutex_lock(&vmbus_connection.channel_mutex);
list_del(&channel->listentry);
- mutex_unlock(&vmbus_connection.channel_mutex);
primary_channel = channel;
} else {
@@ -251,6 +366,8 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
struct vmbus_channel *channel;
bool fnew = true;
unsigned long flags;
+ u16 dev_type;
+ int ret;
/* Make sure this is a new offer */
mutex_lock(&vmbus_connection.channel_mutex);
@@ -288,7 +405,9 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
goto err_free_chan;
}
- init_vp_index(newchannel, &newchannel->offermsg.offer.if_type);
+ dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
+
+ init_vp_index(newchannel, dev_type);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
@@ -325,12 +444,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
if (!newchannel->device_obj)
goto err_deq_chan;
+ newchannel->device_obj->device_id = dev_type;
/*
* Add the new device to the bus. This will kick off device-driver
* binding which eventually invokes the device driver's AddDevice()
* method.
*/
- if (vmbus_device_register(newchannel->device_obj) != 0) {
+ mutex_lock(&vmbus_connection.channel_mutex);
+ ret = vmbus_device_register(newchannel->device_obj);
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ if (ret != 0) {
pr_err("unable to add child device object (relid %d)\n",
newchannel->offermsg.child_relid);
kfree(newchannel->device_obj);
@@ -358,37 +482,6 @@ err_free_chan:
free_channel(newchannel);
}
-enum {
- IDE = 0,
- SCSI,
- FC,
- NIC,
- ND_NIC,
- PCIE,
- MAX_PERF_CHN,
-};
-
-/*
- * This is an array of device_ids (device types) that are performance critical.
- * We attempt to distribute the interrupt load for these devices across
- * all available CPUs.
- */
-static const struct hv_vmbus_device_id hp_devs[] = {
- /* IDE */
- { HV_IDE_GUID, },
- /* Storage - SCSI */
- { HV_SCSI_GUID, },
- /* Storage - FC */
- { HV_SYNTHFC_GUID, },
- /* Network */
- { HV_NIC_GUID, },
- /* NetworkDirect Guest RDMA */
- { HV_ND_GUID, },
- /* PCI Express Pass Through */
- { HV_PCIE_GUID, },
-};
-
-
/*
* We use this state to statically distribute the channel interrupt load.
*/
@@ -405,22 +498,15 @@ static int next_numa_node_id;
* For pre-win8 hosts or non-performance critical channels we assign the
* first CPU in the first NUMA node.
*/
-static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_guid)
+static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
{
u32 cur_cpu;
- int i;
- bool perf_chn = false;
+ bool perf_chn = vmbus_devs[dev_type].perf_device;
struct vmbus_channel *primary = channel->primary_channel;
int next_node;
struct cpumask available_mask;
struct cpumask *alloced_mask;
- for (i = IDE; i < MAX_PERF_CHN; i++) {
- if (!uuid_le_cmp(*type_guid, hp_devs[i].guid)) {
- perf_chn = true;
- break;
- }
- }
if ((vmbus_proto_version == VERSION_WS2008) ||
(vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
/*
@@ -469,6 +555,17 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
+
+ /*
+ * Normally Hyper-V host doesn't create more subchannels than there
+ * are VCPUs on the node but it is possible when not all present VCPUs
+ * on the node are initialized by guest. Clear the alloced_cpus_in_node
+ * to start over.
+ */
+ if (cpumask_equal(&primary->alloced_cpus_in_node,
+ cpumask_of_node(primary->numa_node)))
+ cpumask_clear(&primary->alloced_cpus_in_node);
+
while (true) {
cur_cpu = cpumask_next(cur_cpu, &available_mask);
if (cur_cpu >= nr_cpu_ids) {
@@ -498,6 +595,32 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
channel->target_vp = hv_context.vp_index[cur_cpu];
}
+static void vmbus_wait_for_unload(void)
+{
+ int cpu = smp_processor_id();
+ void *page_addr = hv_context.synic_message_page[cpu];
+ struct hv_message *msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
+ struct vmbus_channel_message_header *hdr;
+ bool unloaded = false;
+
+ while (1) {
+ if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
+ mdelay(10);
+ continue;
+ }
+
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ unloaded = true;
+
+ vmbus_signal_eom(msg);
+
+ if (unloaded)
+ break;
+ }
+}
+
/*
* vmbus_unload_response - Handler for the unload response.
*/
@@ -510,7 +633,7 @@ static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
complete(&vmbus_connection.unload_event);
}
-void vmbus_initiate_unload(void)
+void vmbus_initiate_unload(bool crash)
{
struct vmbus_channel_message_header hdr;
@@ -523,7 +646,14 @@ void vmbus_initiate_unload(void)
hdr.msgtype = CHANNELMSG_UNLOAD;
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
- wait_for_completion(&vmbus_connection.unload_event);
+ /*
+ * vmbus_initiate_unload() is also called on crash and the crash can be
+ * happening in an interrupt context, where scheduling is impossible.
+ */
+ if (!crash)
+ wait_for_completion(&vmbus_connection.unload_event);
+ else
+ vmbus_wait_for_unload();
}
/*
@@ -592,6 +722,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
struct device *dev;
rescind = (struct vmbus_channel_rescind_offer *)hdr;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
channel = relid2channel(rescind->child_relid);
if (channel == NULL) {
@@ -600,7 +732,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* vmbus_process_offer(), we have already invoked
* vmbus_release_relid() on error.
*/
- return;
+ goto out;
}
spin_lock_irqsave(&channel->lock, flags);
@@ -608,6 +740,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
spin_unlock_irqrestore(&channel->lock, flags);
if (channel->device_obj) {
+ if (channel->chn_rescind_callback) {
+ channel->chn_rescind_callback(channel);
+ goto out;
+ }
/*
* We will have to unregister this device from the
* driver core.
@@ -621,7 +757,24 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
hv_process_channel_removal(channel,
channel->offermsg.child_relid);
}
+
+out:
+ mutex_unlock(&vmbus_connection.channel_mutex);
+}
+
+void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
+{
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ BUG_ON(!is_hvsock_channel(channel));
+
+ channel->rescind = true;
+ vmbus_device_unregister(channel->device_obj);
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
}
+EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
+
/*
* vmbus_onoffers_delivered -
@@ -825,6 +978,10 @@ struct vmbus_channel_message_table_entry
{CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
{CHANNELMSG_UNLOAD, 0, NULL},
{CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
+ {CHANNELMSG_18, 0, NULL},
+ {CHANNELMSG_19, 0, NULL},
+ {CHANNELMSG_20, 0, NULL},
+ {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
};
/*
@@ -973,3 +1130,10 @@ bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
+
+void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
+ void (*chn_rescind_cb)(struct vmbus_channel *))
+{
+ channel->chn_rescind_callback = chn_rescind_cb;
+}
+EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 3dc5a9c7fad6..d02f1373dd98 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -88,8 +88,16 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* This has been the behavior pre-win8. This is not
* perf issue and having all channel messages delivered on CPU 0
* would be ok.
+ * For post win8 hosts, we support receiving channel messagges on
+ * all the CPUs. This is needed for kexec to work correctly where
+ * the CPU attempting to connect may not be CPU 0.
*/
- msg->target_vcpu = 0;
+ if (version >= VERSION_WIN8_1) {
+ msg->target_vcpu = hv_context.vp_index[get_cpu()];
+ put_cpu();
+ } else {
+ msg->target_vcpu = 0;
+ }
/*
* Add to list before we send the request since we may
@@ -236,7 +244,7 @@ void vmbus_disconnect(void)
/*
* First send the unload request to the host.
*/
- vmbus_initiate_unload();
+ vmbus_initiate_unload(false);
if (vmbus_connection.work_queue) {
drain_workqueue(vmbus_connection.work_queue);
@@ -288,7 +296,8 @@ struct vmbus_channel *relid2channel(u32 relid)
struct list_head *cur, *tmp;
struct vmbus_channel *cur_sc;
- mutex_lock(&vmbus_connection.channel_mutex);
+ BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
+
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->offermsg.child_relid == relid) {
found_channel = channel;
@@ -307,7 +316,6 @@ struct vmbus_channel *relid2channel(u32 relid)
}
}
}
- mutex_unlock(&vmbus_connection.channel_mutex);
return found_channel;
}
@@ -474,7 +482,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
/*
* vmbus_set_event - Send an event notification to the parent
*/
-int vmbus_set_event(struct vmbus_channel *channel)
+void vmbus_set_event(struct vmbus_channel *channel)
{
u32 child_relid = channel->offermsg.child_relid;
@@ -485,5 +493,5 @@ int vmbus_set_event(struct vmbus_channel *channel)
(child_relid >> 5));
}
- return hv_signal_event(channel->sig_event);
+ hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 11bca51ef5ff..a1c086ba3b9a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -204,6 +204,8 @@ int hv_init(void)
sizeof(int) * NR_CPUS);
memset(hv_context.event_dpc, 0,
sizeof(void *) * NR_CPUS);
+ memset(hv_context.msg_dpc, 0,
+ sizeof(void *) * NR_CPUS);
memset(hv_context.clk_evt, 0,
sizeof(void *) * NR_CPUS);
@@ -295,8 +297,14 @@ void hv_cleanup(void)
* Cleanup the TSC page based CS.
*/
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
- clocksource_change_rating(&hyperv_cs_tsc, 10);
- clocksource_unregister(&hyperv_cs_tsc);
+ /*
+ * Crash can happen in an interrupt context and unregistering
+ * a clocksource is impossible and redundant in this case.
+ */
+ if (!oops_in_progress) {
+ clocksource_change_rating(&hyperv_cs_tsc, 10);
+ clocksource_unregister(&hyperv_cs_tsc);
+ }
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
@@ -337,22 +345,6 @@ int hv_post_message(union hv_connection_id connection_id,
return status & 0xFFFF;
}
-
-/*
- * hv_signal_event -
- * Signal an event on the specified connection using the hypervisor event IPC.
- *
- * This involves a hypercall.
- */
-int hv_signal_event(void *con_id)
-{
- u64 status;
-
- status = hv_do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL);
-
- return status & 0xFFFF;
-}
-
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -425,6 +417,13 @@ int hv_synic_alloc(void)
}
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+ hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
+ if (hv_context.msg_dpc[cpu] == NULL) {
+ pr_err("Unable to allocate event dpc\n");
+ goto err;
+ }
+ tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
+
hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
if (hv_context.clk_evt[cpu] == NULL) {
pr_err("Unable to allocate clock event device\n");
@@ -466,6 +465,7 @@ err:
static void hv_synic_free_cpu(int cpu)
{
kfree(hv_context.event_dpc[cpu]);
+ kfree(hv_context.msg_dpc[cpu]);
kfree(hv_context.clk_evt[cpu]);
if (hv_context.synic_event_page[cpu])
free_page((unsigned long)hv_context.synic_event_page[cpu]);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index c37a71e13de0..23c70799ad8a 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -251,7 +251,6 @@ void hv_fcopy_onchannelcallback(void *context)
*/
fcopy_transaction.recv_len = recvlen;
- fcopy_transaction.recv_channel = channel;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
@@ -317,6 +316,7 @@ static void fcopy_on_reset(void)
int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ fcopy_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d4ab81bcd515..9b9b370fe22a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -639,7 +639,6 @@ void hv_kvp_onchannelcallback(void *context)
*/
kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
@@ -688,6 +687,7 @@ int
hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ kvp_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 67def4a831c8..3fba14e88f03 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -263,7 +263,6 @@ void hv_vss_onchannelcallback(void *context)
*/
vss_transaction.recv_len = recvlen;
- vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
@@ -337,6 +336,7 @@ hv_vss_init(struct hv_util_service *srv)
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
+ vss_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 7994ec2e4151..d5acaa2d8e61 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -322,6 +322,7 @@ static int util_probe(struct hv_device *dev,
srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
+ srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4f42c0e20c20..9a9983fa4531 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -310,6 +310,9 @@ struct hvutil_transport *hvutil_transport_init(const char *name,
return hvt;
err_free_hvt:
+ spin_lock(&hvt_list_lock);
+ list_del(&hvt->list);
+ spin_unlock(&hvt_list_lock);
kfree(hvt);
return NULL;
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4ebc796b4f33..12321b93a756 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -256,12 +256,6 @@ struct hv_monitor_page {
u8 rsvdz4[1984];
};
-/* Declare the various hypercall operations. */
-enum hv_call_code {
- HVCALL_POST_MESSAGE = 0x005c,
- HVCALL_SIGNAL_EVENT = 0x005d,
-};
-
/* Definition of the hv_post_message hypercall input structure. */
struct hv_input_post_message {
union hv_connection_id connectionid;
@@ -449,10 +443,11 @@ struct hv_context {
u32 vp_index[NR_CPUS];
/*
* Starting with win8, we can take channel interrupts on any CPU;
- * we will manage the tasklet that handles events on a per CPU
+ * we will manage the tasklet that handles events messages on a per CPU
* basis.
*/
struct tasklet_struct *event_dpc[NR_CPUS];
+ struct tasklet_struct *msg_dpc[NR_CPUS];
/*
* To optimize the mapping of relid to channel, maintain
* per-cpu list of the channels based on their CPU affinity.
@@ -501,8 +496,6 @@ extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size);
-extern int hv_signal_event(void *con_id);
-
extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
@@ -531,7 +524,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
struct kvec *kv_list,
- u32 kv_count, bool *signal);
+ u32 kv_count, bool *signal, bool lock);
int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 *buffer_actual_len,
@@ -626,6 +619,30 @@ struct vmbus_channel_message_table_entry {
extern struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT];
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg)
+{
+ msg->header.message_type = HVMSG_NONE;
+
+ /*
+ * Make sure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ mb();
+
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ wrmsrl(HV_X64_MSR_EOM, 0);
+ }
+}
+
/* General vmbus interface */
struct hv_device *vmbus_device_create(const uuid_le *type,
@@ -650,9 +667,10 @@ void vmbus_disconnect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-int vmbus_set_event(struct vmbus_channel *channel);
+void vmbus_set_event(struct vmbus_channel *channel);
void vmbus_on_event(unsigned long data);
+void vmbus_on_msg_dpc(unsigned long data);
int hv_kvp_init(struct hv_util_service *);
void hv_kvp_deinit(void);
@@ -665,7 +683,7 @@ void hv_vss_onchannelcallback(void *);
int hv_fcopy_init(struct hv_util_service *);
void hv_fcopy_deinit(void);
void hv_fcopy_onchannelcallback(void *);
-void vmbus_initiate_unload(void);
+void vmbus_initiate_unload(bool crash);
static inline void hv_poll_channel(struct vmbus_channel *channel,
void (*cb)(void *))
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index b53702ce692f..5613e2b5cff7 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
- struct kvec *kv_list, u32 kv_count, bool *signal)
+ struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
u32 next_write_location;
u32 old_write;
u64 prev_indices = 0;
- unsigned long flags;
+ unsigned long flags = 0;
for (i = 0; i < kv_count; i++)
totalbytes_towrite += kv_list[i].iov_len;
totalbytes_towrite += sizeof(u64);
- spin_lock_irqsave(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
hv_get_ringbuffer_availbytes(outring_info,
&bytes_avail_toread,
@@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
@@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
hv_set_next_write_location(outring_info, next_write_location);
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ if (lock)
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
*signal = hv_need_to_signal(old_write, outring_info);
return 0;
@@ -388,7 +391,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
- unsigned long flags;
struct vmpacket_descriptor desc;
u32 offset;
u32 packetlen;
@@ -397,7 +399,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
if (buflen <= 0)
return -EINVAL;
- spin_lock_irqsave(&inring_info->ring_lock, flags);
*buffer_actual_len = 0;
*requestid = 0;
@@ -412,7 +413,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
* No error is set when there is even no header, drivers are
* supposed to analyze buffer_actual_len.
*/
- goto out_unlock;
+ return ret;
}
next_read_location = hv_get_next_read_location(inring_info);
@@ -425,15 +426,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
*buffer_actual_len = packetlen;
*requestid = desc.trans_id;
- if (bytes_avail_toread < packetlen + offset) {
- ret = -EAGAIN;
- goto out_unlock;
- }
+ if (bytes_avail_toread < packetlen + offset)
+ return -EAGAIN;
- if (packetlen > buflen) {
- ret = -ENOBUFS;
- goto out_unlock;
- }
+ if (packetlen > buflen)
+ return -ENOBUFS;
next_read_location =
hv_get_next_readlocation_withoffset(inring_info, offset);
@@ -460,7 +457,5 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
-out_unlock:
- spin_unlock_irqrestore(&inring_info->ring_lock, flags);
return ret;
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 328e4c3808e0..64713ff47e36 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,7 +45,6 @@
static struct acpi_device *hv_acpi_dev;
-static struct tasklet_struct msg_dpc;
static struct completion probe_event;
@@ -477,6 +476,24 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
}
static DEVICE_ATTR_RO(channel_vp_mapping);
+static ssize_t vendor_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t device_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ return sprintf(buf, "0x%x\n", hv_dev->device_id);
+}
+static DEVICE_ATTR_RO(device);
+
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_attrs[] = {
&dev_attr_id.attr,
@@ -502,6 +519,8 @@ static struct attribute *vmbus_attrs[] = {
&dev_attr_in_read_bytes_avail.attr,
&dev_attr_in_write_bytes_avail.attr,
&dev_attr_channel_vp_mapping.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus);
@@ -562,6 +581,10 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
struct hv_driver *drv = drv_to_hv_drv(driver);
struct hv_device *hv_dev = device_to_hv_device(device);
+ /* The hv_sock driver handles all hv_sock offers. */
+ if (is_hvsock_channel(hv_dev->channel))
+ return drv->hvsock;
+
if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
return 1;
@@ -685,28 +708,10 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
if (dev->event_handler)
dev->event_handler(dev);
- msg->header.message_type = HVMSG_NONE;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
+ vmbus_signal_eom(msg);
}
-static void vmbus_on_msg_dpc(unsigned long data)
+void vmbus_on_msg_dpc(unsigned long data)
{
int cpu = smp_processor_id();
void *page_addr = hv_context.synic_message_page[cpu];
@@ -716,52 +721,32 @@ static void vmbus_on_msg_dpc(unsigned long data)
struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
- while (1) {
- if (msg->header.message_type == HVMSG_NONE)
- /* no msg */
- break;
+ if (msg->header.message_type == HVMSG_NONE)
+ /* no msg */
+ return;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
+ hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- if (hdr->msgtype >= CHANNELMSG_COUNT) {
- WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
- goto msg_handled;
- }
+ if (hdr->msgtype >= CHANNELMSG_COUNT) {
+ WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
+ goto msg_handled;
+ }
- entry = &channel_message_table[hdr->msgtype];
- if (entry->handler_type == VMHT_BLOCKING) {
- ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
- if (ctx == NULL)
- continue;
+ entry = &channel_message_table[hdr->msgtype];
+ if (entry->handler_type == VMHT_BLOCKING) {
+ ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (ctx == NULL)
+ return;
- INIT_WORK(&ctx->work, vmbus_onmessage_work);
- memcpy(&ctx->msg, msg, sizeof(*msg));
+ INIT_WORK(&ctx->work, vmbus_onmessage_work);
+ memcpy(&ctx->msg, msg, sizeof(*msg));
- queue_work(vmbus_connection.work_queue, &ctx->work);
- } else
- entry->message_handler(hdr);
+ queue_work(vmbus_connection.work_queue, &ctx->work);
+ } else
+ entry->message_handler(hdr);
msg_handled:
- msg->header.message_type = HVMSG_NONE;
-
- /*
- * Make sure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- mb();
-
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- wrmsrl(HV_X64_MSR_EOM, 0);
- }
- }
+ vmbus_signal_eom(msg);
}
static void vmbus_isr(void)
@@ -814,7 +799,7 @@ static void vmbus_isr(void)
if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
hv_process_timer_expiration(msg, cpu);
else
- tasklet_schedule(&msg_dpc);
+ tasklet_schedule(hv_context.msg_dpc[cpu]);
}
}
@@ -838,8 +823,6 @@ static int vmbus_bus_init(void)
return ret;
}
- tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-
ret = bus_register(&hv_bus);
if (ret)
goto err_cleanup;
@@ -957,6 +940,7 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le));
memcpy(&child_device_obj->dev_instance, instance,
sizeof(uuid_le));
+ child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
return child_device_obj;
@@ -1268,7 +1252,7 @@ static void hv_kexec_handler(void)
int cpu;
hv_synic_clockevents_cleanup();
- vmbus_initiate_unload();
+ vmbus_initiate_unload(false);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
hv_cleanup();
@@ -1276,7 +1260,7 @@ static void hv_kexec_handler(void)
static void hv_crash_handler(struct pt_regs *regs)
{
- vmbus_initiate_unload();
+ vmbus_initiate_unload(true);
/*
* In crash handler we can't schedule synic cleanup for all CPUs,
* doing the cleanup for current CPU only. This should be sufficient
@@ -1334,7 +1318,8 @@ static void __exit vmbus_exit(void)
hv_synic_clockevents_cleanup();
vmbus_disconnect();
hv_remove_vmbus_irq();
- tasklet_kill(&msg_dpc);
+ for_each_online_cpu(cpu)
+ tasklet_kill(hv_context.msg_dpc[cpu]);
vmbus_free_channels();
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
unregister_die_notifier(&hyperv_die_block);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 60fb80bd353d..5c2d13a687aa 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -685,6 +685,20 @@ config SENSORS_LTC2945
This driver can also be built as a module. If so, the module will
be called ltc2945.
+config SENSORS_LTC2990
+ tristate "Linear Technology LTC2990 (current monitoring mode only)"
+ depends on I2C
+ help
+ If you say yes here you get support for Linear Technology LTC2990
+ I2C System Monitor. The LTC2990 supports a combination of voltage,
+ current and temperature monitoring, but in addition to the Vcc supply
+ voltage and chip temperature, this driver currently only supports
+ reading two currents by measuring two differential voltages across
+ series resistors.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc2990.
+
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
@@ -1127,7 +1141,7 @@ config SENSORS_NTC_THERMISTOR
Currently, this driver supports
NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333,
- and NCP03WF104 from Murata and B57330V2103 from EPCOS.
+ NCP03WF104 and NCP15XH103 from Murata and B57330V2103 from EPCOS.
This driver can also be built as a module. If so, the module
will be called ntc-thermistor.
@@ -1176,6 +1190,21 @@ config SENSORS_NCT7904
This driver can also be built as a module. If so, the module
will be called nct7904.
+config SENSORS_NSA320
+ tristate "ZyXEL NSA320 and compatible fan speed and temperature sensors"
+ depends on GPIOLIB && OF
+ depends on MACH_KIRKWOOD || COMPILE_TEST
+ help
+ If you say yes here you get support for hardware monitoring
+ for the ZyXEL NSA320 Media Server and other compatible devices
+ (probably the NSA325 and some NSA310 variants).
+
+ The sensor data is taken from a Holtek HT46R065 microcontroller
+ connected to GPIO lines.
+
+ This driver can also be built as a module. If so, the module
+ will be called nsa320-hwmon.
+
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 30c94df31465..58cc3acba7e7 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_SENSORS_LM95234) += lm95234.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
obj-$(CONFIG_SENSORS_LM95245) += lm95245.o
obj-$(CONFIG_SENSORS_LTC2945) += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2990) += ltc2990.o
obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4222) += ltc4222.o
@@ -123,6 +124,7 @@ obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
obj-$(CONFIG_SENSORS_NCT7802) += nct7802.o
obj-$(CONFIG_SENSORS_NCT7904) += nct7904.o
+obj-$(CONFIG_SENSORS_NSA320) += nsa320-hwmon.o
obj-$(CONFIG_SENSORS_NTC_THERMISTOR) += ntc_thermistor.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
obj-$(CONFIG_SENSORS_PC87427) += pc87427.o
@@ -149,7 +151,7 @@ obj-$(CONFIG_SENSORS_TMP103) += tmp103.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
obj-$(CONFIG_SENSORS_VT1211) += vt1211.o
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 17ae2eb26ce2..b550ba5fa58a 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -67,6 +67,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
enum iio_chan_type type;
struct iio_channel *channels;
const char *name = "iio_hwmon";
+ char *sname;
if (dev->of_node && dev->of_node->name)
name = dev->of_node->name;
@@ -144,7 +145,15 @@ static int iio_hwmon_probe(struct platform_device *pdev)
st->attr_group.attrs = st->attrs;
st->groups[0] = &st->attr_group;
- st->hwmon_dev = hwmon_device_register_with_groups(dev, name, st,
+
+ sname = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!sname) {
+ ret = -ENOMEM;
+ goto error_release_channels;
+ }
+
+ strreplace(sname, '-', '_');
+ st->hwmon_dev = hwmon_device_register_with_groups(dev, sname, st,
st->groups);
if (IS_ERR(st->hwmon_dev)) {
ret = PTR_ERR(st->hwmon_dev);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
new file mode 100644
index 000000000000..8f8fe059ab48
--- /dev/null
+++ b/drivers/hwmon/ltc2990.c
@@ -0,0 +1,161 @@
+/*
+ * Driver for Linear Technology LTC2990 power monitor
+ *
+ * Copyright (C) 2014 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ *
+ * This driver assumes the chip is wired as a dual current monitor, and
+ * reports the voltage drop across two series resistors. It also reports
+ * the chip's internal temperature and Vcc power supply voltage.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define LTC2990_STATUS 0x00
+#define LTC2990_CONTROL 0x01
+#define LTC2990_TRIGGER 0x02
+#define LTC2990_TINT_MSB 0x04
+#define LTC2990_V1_MSB 0x06
+#define LTC2990_V2_MSB 0x08
+#define LTC2990_V3_MSB 0x0A
+#define LTC2990_V4_MSB 0x0C
+#define LTC2990_VCC_MSB 0x0E
+
+#define LTC2990_CONTROL_KELVIN BIT(7)
+#define LTC2990_CONTROL_SINGLE BIT(6)
+#define LTC2990_CONTROL_MEASURE_ALL (0x3 << 3)
+#define LTC2990_CONTROL_MODE_CURRENT 0x06
+#define LTC2990_CONTROL_MODE_VOLTAGE 0x07
+
+/* convert raw register value to sign-extended integer in 16-bit range */
+static int ltc2990_voltage_to_int(int raw)
+{
+ if (raw & BIT(14))
+ return -(0x4000 - (raw & 0x3FFF)) << 2;
+ else
+ return (raw & 0x3FFF) << 2;
+}
+
+/* Return the converted value from the given register in uV or mC */
+static int ltc2990_get_value(struct i2c_client *i2c, u8 reg, int *result)
+{
+ int val;
+
+ val = i2c_smbus_read_word_swapped(i2c, reg);
+ if (unlikely(val < 0))
+ return val;
+
+ switch (reg) {
+ case LTC2990_TINT_MSB:
+ /* internal temp, 0.0625 degrees/LSB, 13-bit */
+ val = (val & 0x1FFF) << 3;
+ *result = (val * 1000) >> 7;
+ break;
+ case LTC2990_V1_MSB:
+ case LTC2990_V3_MSB:
+ /* Vx-Vy, 19.42uV/LSB. Depends on mode. */
+ *result = ltc2990_voltage_to_int(val) * 1942 / (4 * 100);
+ break;
+ case LTC2990_VCC_MSB:
+ /* Vcc, 305.18μV/LSB, 2.5V offset */
+ *result = (ltc2990_voltage_to_int(val) * 30518 /
+ (4 * 100 * 1000)) + 2500;
+ break;
+ default:
+ return -EINVAL; /* won't happen, keep compiler happy */
+ }
+
+ return 0;
+}
+
+static ssize_t ltc2990_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ int value;
+ int ret;
+
+ ret = ltc2990_get_value(dev_get_drvdata(dev), attr->index, &value);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_TINT_MSB);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_V1_MSB);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_V3_MSB);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
+ LTC2990_VCC_MSB);
+
+static struct attribute *ltc2990_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr2_input.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ltc2990);
+
+static int ltc2990_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct device *hwmon_dev;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ /* Setup continuous mode, current monitor */
+ ret = i2c_smbus_write_byte_data(i2c, LTC2990_CONTROL,
+ LTC2990_CONTROL_MEASURE_ALL |
+ LTC2990_CONTROL_MODE_CURRENT);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Error: Failed to set control mode.\n");
+ return ret;
+ }
+ /* Trigger once to start continuous conversion */
+ ret = i2c_smbus_write_byte_data(i2c, LTC2990_TRIGGER, 1);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Error: Failed to start acquisition.\n");
+ return ret;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&i2c->dev,
+ i2c->name,
+ i2c,
+ ltc2990_groups);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2990_i2c_id[] = {
+ { "ltc2990", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
+
+static struct i2c_driver ltc2990_i2c_driver = {
+ .driver = {
+ .name = "ltc2990",
+ },
+ .probe = ltc2990_i2c_probe,
+ .id_table = ltc2990_i2c_id,
+};
+
+module_i2c_driver(ltc2990_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2990 Sensor Driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 36544c4f653c..303d0c9df907 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
int max1111_read_channel(int channel)
{
+ if (!the_max1111 || !the_max1111->spi)
+ return -ENODEV;
+
return max1111_read(&the_max1111->spi->dev, channel);
}
EXPORT_SYMBOL(max1111_read_channel);
@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
{
struct max1111_data *data = spi_get_drvdata(spi);
+#ifdef CONFIG_SHARPSL_PM
+ the_max1111 = NULL;
+#endif
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
new file mode 100644
index 000000000000..0517a265741f
--- /dev/null
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -0,0 +1,215 @@
+/*
+ * drivers/hwmon/nsa320-hwmon.c
+ *
+ * ZyXEL NSA320 Media Servers
+ * hardware monitoring
+ *
+ * Copyright (C) 2016 Adam Baker <linux@baker-net.org.uk>
+ * based on a board file driver
+ * Copyright (C) 2012 Peter Schildmann <linux@schildmann.info>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License v2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+/* Tests for error return values rely upon this value being < 0x80 */
+#define MAGIC_NUMBER 0x55
+
+/*
+ * The Zyxel hwmon MCU is a Holtek HT46R065 that is factory programmed
+ * to perform temperature and fan speed monitoring. It is read by taking
+ * the active pin low. The 32 bit output word is then clocked onto the
+ * data line. The MSB of the data word is a magic nuber to indicate it
+ * has been read correctly, the next byte is the fan speed (in hundreds
+ * of RPM) and the last two bytes are the temperature (in tenths of a
+ * degree)
+ */
+
+struct nsa320_hwmon {
+ struct mutex update_lock; /* lock GPIO operations */
+ unsigned long last_updated; /* jiffies */
+ unsigned long mcu_data;
+ struct gpio_desc *act;
+ struct gpio_desc *clk;
+ struct gpio_desc *data;
+};
+
+enum nsa320_inputs {
+ NSA320_TEMP = 0,
+ NSA320_FAN = 1,
+};
+
+static const char * const nsa320_input_names[] = {
+ [NSA320_TEMP] = "System Temperature",
+ [NSA320_FAN] = "Chassis Fan",
+};
+
+/*
+ * Although this protocol looks similar to SPI the long delay
+ * between the active (aka chip select) signal and the shorter
+ * delay between clock pulses are needed for reliable operation.
+ * The delays provided are taken from the manufacturer kernel,
+ * testing suggest they probably incorporate a reasonable safety
+ * margin. (The single device tested became unreliable if the
+ * delay was reduced to 1/10th of this value.)
+ */
+static s32 nsa320_hwmon_update(struct device *dev)
+{
+ u32 mcu_data;
+ u32 mask;
+ struct nsa320_hwmon *hwmon = dev_get_drvdata(dev);
+
+ mutex_lock(&hwmon->update_lock);
+
+ mcu_data = hwmon->mcu_data;
+
+ if (time_after(jiffies, hwmon->last_updated + HZ) || mcu_data == 0) {
+ gpiod_set_value(hwmon->act, 1);
+ msleep(100);
+
+ mcu_data = 0;
+ for (mask = BIT(31); mask; mask >>= 1) {
+ gpiod_set_value(hwmon->clk, 0);
+ usleep_range(100, 200);
+ gpiod_set_value(hwmon->clk, 1);
+ usleep_range(100, 200);
+ if (gpiod_get_value(hwmon->data))
+ mcu_data |= mask;
+ }
+
+ gpiod_set_value(hwmon->act, 0);
+ dev_dbg(dev, "Read raw MCU data %08x\n", mcu_data);
+
+ if ((mcu_data >> 24) != MAGIC_NUMBER) {
+ dev_dbg(dev, "Read invalid MCU data %08x\n", mcu_data);
+ mcu_data = -EIO;
+ } else {
+ hwmon->mcu_data = mcu_data;
+ hwmon->last_updated = jiffies;
+ }
+ }
+
+ mutex_unlock(&hwmon->update_lock);
+
+ return mcu_data;
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int channel = to_sensor_dev_attr(attr)->index;
+
+ return sprintf(buf, "%s\n", nsa320_input_names[channel]);
+}
+
+static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ s32 mcu_data = nsa320_hwmon_update(dev);
+
+ if (mcu_data < 0)
+ return mcu_data;
+
+ return sprintf(buf, "%d\n", (mcu_data & 0xffff) * 100);
+}
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ s32 mcu_data = nsa320_hwmon_update(dev);
+
+ if (mcu_data < 0)
+ return mcu_data;
+
+ return sprintf(buf, "%d\n", ((mcu_data & 0xff0000) >> 16) * 100);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, NSA320_TEMP);
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, show_label, NULL, NSA320_FAN);
+static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
+
+static struct attribute *nsa320_attrs[] = {
+ &sensor_dev_attr_temp1_label.dev_attr.attr,
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_fan1_label.dev_attr.attr,
+ &dev_attr_fan1_input.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(nsa320);
+
+static const struct of_device_id of_nsa320_hwmon_match[] = {
+ { .compatible = "zyxel,nsa320-mcu", },
+ { },
+};
+
+static int nsa320_hwmon_probe(struct platform_device *pdev)
+{
+ struct nsa320_hwmon *hwmon;
+ struct device *classdev;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ /* Look up the GPIO pins to use */
+ hwmon->act = devm_gpiod_get(&pdev->dev, "act", GPIOD_OUT_LOW);
+ if (IS_ERR(hwmon->act))
+ return PTR_ERR(hwmon->act);
+
+ hwmon->clk = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_HIGH);
+ if (IS_ERR(hwmon->clk))
+ return PTR_ERR(hwmon->clk);
+
+ hwmon->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(hwmon->data))
+ return PTR_ERR(hwmon->data);
+
+ mutex_init(&hwmon->update_lock);
+
+ classdev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "nsa320", hwmon, nsa320_groups);
+
+ return PTR_ERR_OR_ZERO(classdev);
+
+}
+
+/* All allocations use devres so remove() is not needed. */
+
+static struct platform_driver nsa320_hwmon_driver = {
+ .probe = nsa320_hwmon_probe,
+ .driver = {
+ .name = "nsa320-hwmon",
+ .of_match_table = of_match_ptr(of_nsa320_hwmon_match),
+ },
+};
+
+module_platform_driver(nsa320_hwmon_driver);
+
+MODULE_DEVICE_TABLE(of, of_nsa320_hwmon_match);
+MODULE_AUTHOR("Peter Schildmann <linux@schildmann.info>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>");
+MODULE_DESCRIPTION("NSA320 Hardware Monitoring");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:nsa320-hwmon");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index feed30646d91..faa6e8dfbaaf 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -54,6 +54,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
{ "ncp15wl333", TYPE_NCPXXWL333 },
{ "b57330v2103", TYPE_B57330V2103},
{ "ncp03wf104", TYPE_NCPXXWF104 },
+ { "ncp15xh103", TYPE_NCPXXXH103 },
{ },
};
@@ -173,6 +174,43 @@ static const struct ntc_compensation ncpXXwf104[] = {
{ .temp_c = 125, .ohm = 2522 },
};
+static const struct ntc_compensation ncpXXxh103[] = {
+ { .temp_c = -40, .ohm = 247565 },
+ { .temp_c = -35, .ohm = 181742 },
+ { .temp_c = -30, .ohm = 135128 },
+ { .temp_c = -25, .ohm = 101678 },
+ { .temp_c = -20, .ohm = 77373 },
+ { .temp_c = -15, .ohm = 59504 },
+ { .temp_c = -10, .ohm = 46222 },
+ { .temp_c = -5, .ohm = 36244 },
+ { .temp_c = 0, .ohm = 28674 },
+ { .temp_c = 5, .ohm = 22878 },
+ { .temp_c = 10, .ohm = 18399 },
+ { .temp_c = 15, .ohm = 14910 },
+ { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = 25, .ohm = 10000 },
+ { .temp_c = 30, .ohm = 8271 },
+ { .temp_c = 35, .ohm = 6883 },
+ { .temp_c = 40, .ohm = 5762 },
+ { .temp_c = 45, .ohm = 4851 },
+ { .temp_c = 50, .ohm = 4105 },
+ { .temp_c = 55, .ohm = 3492 },
+ { .temp_c = 60, .ohm = 2985 },
+ { .temp_c = 65, .ohm = 2563 },
+ { .temp_c = 70, .ohm = 2211 },
+ { .temp_c = 75, .ohm = 1915 },
+ { .temp_c = 80, .ohm = 1666 },
+ { .temp_c = 85, .ohm = 1454 },
+ { .temp_c = 90, .ohm = 1275 },
+ { .temp_c = 95, .ohm = 1121 },
+ { .temp_c = 100, .ohm = 990 },
+ { .temp_c = 105, .ohm = 876 },
+ { .temp_c = 110, .ohm = 779 },
+ { .temp_c = 115, .ohm = 694 },
+ { .temp_c = 120, .ohm = 620 },
+ { .temp_c = 125, .ohm = 556 },
+};
+
/*
* The following compensation table is from the specification of EPCOS NTC
* Thermistors Datasheet
@@ -260,6 +298,8 @@ static const struct of_device_id ntc_match[] = {
.data = &ntc_thermistor_id[5]},
{ .compatible = "murata,ncp03wf104",
.data = &ntc_thermistor_id[6] },
+ { .compatible = "murata,ncp15xh103",
+ .data = &ntc_thermistor_id[7] },
/* Usage of vendor name "ntc" is deprecated */
{ .compatible = "ntc,ncp15wb473",
@@ -609,6 +649,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
data->comp = ncpXXwf104;
data->n_comp = ARRAY_SIZE(ncpXXwf104);
break;
+ case TYPE_NCPXXXH103:
+ data->comp = ncpXXxh103;
+ data->n_comp = ARRAY_SIZE(ncpXXxh103);
+ break;
default:
dev_err(&pdev->dev, "Unknown device type: %lu(%s)\n",
pdev_id->driver_data, pdev_id->name);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 7e5cc3d025ef..054d3d863802 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -31,8 +31,8 @@ config SENSORS_ADM1275
default n
help
If you say yes here you get hardware monitoring support for Analog
- Devices ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 Hot-Swap
- Controller and Digital Power Monitors.
+ Devices ADM1075, ADM1275, ADM1276, ADM1278, ADM1293, and ADM1294
+ Hot-Swap Controller and Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 188af4c89f40..3baa4f4a8c5e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -24,7 +24,7 @@
#include <linux/bitops.h>
#include "pmbus.h"
-enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
+enum chips { adm1075, adm1275, adm1276, adm1278, adm1293, adm1294 };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -41,6 +41,10 @@ enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
#define ADM1075_IRANGE_25 BIT(3)
#define ADM1075_IRANGE_MASK (BIT(3) | BIT(4))
+#define ADM1278_TEMP1_EN BIT(3)
+#define ADM1278_VIN_EN BIT(2)
+#define ADM1278_VOUT_EN BIT(1)
+
#define ADM1293_IRANGE_25 0
#define ADM1293_IRANGE_50 BIT(6)
#define ADM1293_IRANGE_100 BIT(7)
@@ -54,6 +58,7 @@ enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
#define ADM1293_VAUX_EN BIT(1)
+#define ADM1278_PEAK_TEMP 0xd7
#define ADM1275_IOUT_WARN2_LIMIT 0xd7
#define ADM1275_DEVICE_CONFIG 0xd8
@@ -80,6 +85,7 @@ struct adm1275_data {
bool have_iout_min;
bool have_pin_min;
bool have_pin_max;
+ bool have_temp_max;
struct pmbus_driver_info info;
};
@@ -113,6 +119,13 @@ static const struct coefficients adm1276_coefficients[] = {
[4] = { 2115, 0, -1 }, /* power, vrange not set */
};
+static const struct coefficients adm1278_coefficients[] = {
+ [0] = { 19599, 0, -2 }, /* voltage */
+ [1] = { 800, 20475, -1 }, /* current */
+ [2] = { 6123, 0, -2 }, /* power */
+ [3] = { 42, 31880, -1 }, /* temperature */
+};
+
static const struct coefficients adm1293_coefficients[] = {
[0] = { 3333, -1, 0 }, /* voltage, vrange 1.2V */
[1] = { 5552, -5, -1 }, /* voltage, vrange 7.4V */
@@ -196,6 +209,11 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
return -ENXIO;
ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
break;
+ case PMBUS_VIRT_READ_TEMP_MAX:
+ if (!data->have_temp_max)
+ return -ENXIO;
+ ret = pmbus_read_word_data(client, 0, ADM1278_PEAK_TEMP);
+ break;
case PMBUS_VIRT_RESET_IOUT_HISTORY:
case PMBUS_VIRT_RESET_VOUT_HISTORY:
case PMBUS_VIRT_RESET_VIN_HISTORY:
@@ -204,6 +222,10 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
if (!data->have_pin_max)
return -ENXIO;
break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ if (!data->have_temp_max)
+ return -ENXIO;
+ break;
default:
ret = -ENODATA;
break;
@@ -245,6 +267,9 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
ret = pmbus_write_word_data(client, 0,
ADM1293_PIN_MIN, 0);
break;
+ case PMBUS_VIRT_RESET_TEMP_HISTORY:
+ ret = pmbus_write_word_data(client, 0, ADM1278_PEAK_TEMP, 0);
+ break;
default:
ret = -ENODATA;
break;
@@ -312,6 +337,7 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1075", adm1075 },
{ "adm1275", adm1275 },
{ "adm1276", adm1276 },
+ { "adm1278", adm1278 },
{ "adm1293", adm1293 },
{ "adm1294", adm1294 },
{ }
@@ -329,6 +355,7 @@ static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *mid;
const struct coefficients *coefficients;
int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
+ int tindex = -1;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -386,6 +413,7 @@ static int adm1275_probe(struct i2c_client *client,
info->format[PSC_VOLTAGE_OUT] = direct;
info->format[PSC_CURRENT_OUT] = direct;
info->format[PSC_POWER] = direct;
+ info->format[PSC_TEMPERATURE] = direct;
info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
info->read_word_data = adm1275_read_word_data;
@@ -460,6 +488,27 @@ static int adm1275_probe(struct i2c_client *client,
info->func[0] |=
PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
break;
+ case adm1278:
+ data->have_vout = true;
+ data->have_pin_max = true;
+ data->have_temp_max = true;
+
+ coefficients = adm1278_coefficients;
+ vindex = 0;
+ cindex = 1;
+ pindex = 2;
+ tindex = 3;
+
+ info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+ if (config & ADM1278_TEMP1_EN)
+ info->func[0] |=
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ if (config & ADM1278_VIN_EN)
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (config & ADM1278_VOUT_EN)
+ info->func[0] |=
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+ break;
case adm1293:
case adm1294:
data->have_iout_min = true;
@@ -537,6 +586,11 @@ static int adm1275_probe(struct i2c_client *client,
info->b[PSC_POWER] = coefficients[pindex].b;
info->R[PSC_POWER] = coefficients[pindex].R;
}
+ if (tindex >= 0) {
+ info->m[PSC_TEMPERATURE] = coefficients[tindex].m;
+ info->b[PSC_TEMPERATURE] = coefficients[tindex].b;
+ info->R[PSC_TEMPERATURE] = coefficients[tindex].R;
+ }
return pmbus_do_probe(client, id, info);
}
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 7e20567bc369..912b449c8303 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -52,7 +52,7 @@ static int scpi_read_temp(void *dev, int *temp)
struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
- u32 value;
+ u64 value;
int ret;
ret = scpi_ops->sensor_get_value(sensor->info.sensor_id, &value);
@@ -70,7 +70,7 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
struct scpi_sensors *scpi_sensors = dev_get_drvdata(dev);
struct scpi_ops *scpi_ops = scpi_sensors->scpi_ops;
struct sensor_data *sensor;
- u32 value;
+ u64 value;
int ret;
sensor = container_of(attr, struct sensor_data, dev_attr_input);
@@ -79,7 +79,7 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
if (ret)
return ret;
- return sprintf(buf, "%u\n", value);
+ return sprintf(buf, "%llu\n", value);
}
static ssize_t
@@ -114,6 +114,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
{
u16 nr_sensors, i;
int num_temp = 0, num_volt = 0, num_current = 0, num_power = 0;
+ int num_energy = 0;
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
@@ -182,6 +183,13 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
"power%d_label", num_power + 1);
num_power++;
break;
+ case ENERGY:
+ snprintf(sensor->input, sizeof(sensor->input),
+ "energy%d_input", num_energy + 1);
+ snprintf(sensor->label, sizeof(sensor->input),
+ "energy%d_label", num_energy + 1);
+ num_energy++;
+ break;
default:
continue;
}
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress-hwmon.c
index 8ba419d343f8..8ba419d343f8 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress-hwmon.c
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index c85935f3525a..db0541031c72 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -4,6 +4,7 @@
menuconfig CORESIGHT
bool "CoreSight Tracing Support"
select ARM_AMBA
+ select PERF_EVENTS
help
This framework provides a kernel interface for the CoreSight debug
and trace drivers to register themselves with. It's intended to build
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 99f8e5f6256e..cf8c6d689747 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
+ coresight-etm3x-sysfs.o \
+ coresight-etm-perf.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 77d0f9c1118d..acbce79934d6 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Embedded Trace Buffer driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -10,8 +12,8 @@
* GNU General Public License for more details.
*/
+#include <asm/local.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -27,6 +29,11 @@
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
+#include <linux/circ_buf.h>
+#include <linux/mm.h>
+#include <linux/perf_event.h>
+
+#include <asm/local.h>
#include "coresight-priv.h"
@@ -64,6 +71,26 @@
#define ETB_FRAME_SIZE_WORDS 4
/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur: index of the current buffer
+ * @nr_pages: max number of pages granted to us
+ * @offset: offset within the current buffer
+ * @data_size: how much we collected in this run
+ * @lost: other than zero if we had a HW buffer wrap around
+ * @snapshot: is this run in snapshot mode
+ * @data_pages: a handle the ring buffer
+ */
+struct cs_buffers {
+ unsigned int cur;
+ unsigned int nr_pages;
+ unsigned long offset;
+ local_t data_size;
+ local_t lost;
+ bool snapshot;
+ void **data_pages;
+};
+
+/**
* struct etb_drvdata - specifics associated to an ETB component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
@@ -71,10 +98,10 @@
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
- * @in_use: synchronise user space access to etb buffer.
+ * @reading: synchronise user space access to etb buffer.
+ * @mode: this ETB is being used.
* @buf: area of memory where ETB buffer content gets sent.
* @buffer_depth: size of @buf.
- * @enable: this ETB is being used.
* @trigger_cntr: amount of words to store after a trigger.
*/
struct etb_drvdata {
@@ -84,10 +111,10 @@ struct etb_drvdata {
struct coresight_device *csdev;
struct miscdevice miscdev;
spinlock_t spinlock;
- atomic_t in_use;
+ local_t reading;
+ local_t mode;
u8 *buf;
u32 buffer_depth;
- bool enable;
u32 trigger_cntr;
};
@@ -132,18 +159,31 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int etb_enable(struct coresight_device *csdev)
+static int etb_enable(struct coresight_device *csdev, u32 mode)
{
- struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ u32 val;
unsigned long flags;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
+ val = local_cmpxchg(&drvdata->mode,
+ CS_MODE_DISABLED, mode);
+ /*
+ * When accessing from Perf, a HW buffer can be handled
+ * by a single trace entity. In sysFS mode many tracers
+ * can be logging to the same HW buffer.
+ */
+ if (val == CS_MODE_PERF)
+ return -EBUSY;
+
+ /* Nothing to do, the tracer is already enabled. */
+ if (val == CS_MODE_SYSFS)
+ goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
etb_enable_hw(drvdata);
- drvdata->enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+out:
dev_info(drvdata->dev, "ETB enabled\n");
return 0;
}
@@ -244,17 +284,225 @@ static void etb_disable(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
- drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
dev_info(drvdata->dev, "ETB disabled\n");
}
+static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
+ void **pages, int nr_pages, bool overwrite)
+{
+ int node;
+ struct cs_buffers *buf;
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->snapshot = overwrite;
+ buf->nr_pages = nr_pages;
+ buf->data_pages = pages;
+
+ return buf;
+}
+
+static void etb_free_buffer(void *config)
+{
+ struct cs_buffers *buf = config;
+
+ kfree(buf);
+}
+
+static int etb_set_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int ret = 0;
+ unsigned long head;
+ struct cs_buffers *buf = sink_config;
+
+ /* wrap head around to the amount of space we have */
+ head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+ /* find the page to write to */
+ buf->cur = head / PAGE_SIZE;
+
+ /* and offset within that page */
+ buf->offset = head % PAGE_SIZE;
+
+ local_set(&buf->data_size, 0);
+
+ return ret;
+}
+
+static unsigned long etb_reset_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config, bool *lost)
+{
+ unsigned long size = 0;
+ struct cs_buffers *buf = sink_config;
+
+ if (buf) {
+ /*
+ * In snapshot mode ->data_size holds the new address of the
+ * ring buffer's head. The size itself is the whole address
+ * range since we want the latest information.
+ */
+ if (buf->snapshot)
+ handle->head = local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+
+ /*
+ * Tell the tracer PMU how much we got in this run and if
+ * something went wrong along the way. Nobody else can use
+ * this cs_buffers instance until we are done. As such
+ * resetting parameters here and squaring off with the ring
+ * buffer API in the tracer PMU is fine.
+ */
+ *lost = !!local_xchg(&buf->lost, 0);
+ size = local_xchg(&buf->data_size, 0);
+ }
+
+ return size;
+}
+
+static void etb_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int i, cur;
+ u8 *buf_ptr;
+ u32 read_ptr, write_ptr, capacity;
+ u32 status, read_data, to_read;
+ unsigned long offset;
+ struct cs_buffers *buf = sink_config;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (!buf)
+ return;
+
+ capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+
+ CS_UNLOCK(drvdata->base);
+ etb_disable_hw(drvdata);
+
+ /* unit is in words, not bytes */
+ read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+ write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+ /*
+ * Entries should be aligned to the frame size. If they are not
+ * go back to the last alignement point to give decoding tools a
+ * chance to fix things.
+ */
+ if (write_ptr % ETB_FRAME_SIZE_WORDS) {
+ dev_err(drvdata->dev,
+ "write_ptr: %lu not aligned to formatter frame size\n",
+ (unsigned long)write_ptr);
+
+ write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
+ local_inc(&buf->lost);
+ }
+
+ /*
+ * Get a hold of the status register and see if a wrap around
+ * has occurred. If so adjust things accordingly. Otherwise
+ * start at the beginning and go until the write pointer has
+ * been reached.
+ */
+ status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+ if (status & ETB_STATUS_RAM_FULL) {
+ local_inc(&buf->lost);
+ to_read = capacity;
+ read_ptr = write_ptr;
+ } else {
+ to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
+ to_read *= ETB_FRAME_SIZE_WORDS;
+ }
+
+ /*
+ * Make sure we don't overwrite data that hasn't been consumed yet.
+ * It is entirely possible that the HW buffer has more data than the
+ * ring buffer can currently handle. If so adjust the start address
+ * to take only the last traces.
+ *
+ * In snapshot mode we are looking to get the latest traces only and as
+ * such, we don't care about not overwriting data that hasn't been
+ * processed by user space.
+ */
+ if (!buf->snapshot && to_read > handle->size) {
+ u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
+
+ /* The new read pointer must be frame size aligned */
+ to_read -= handle->size & mask;
+ /*
+ * Move the RAM read pointer up, keeping in mind that
+ * everything is in frame size units.
+ */
+ read_ptr = (write_ptr + drvdata->buffer_depth) -
+ to_read / ETB_FRAME_SIZE_WORDS;
+ /* Wrap around if need be*/
+ read_ptr &= ~(drvdata->buffer_depth - 1);
+ /* let the decoder know we've skipped ahead */
+ local_inc(&buf->lost);
+ }
+
+ /* finally tell HW where we want to start reading from */
+ writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+ cur = buf->cur;
+ offset = buf->offset;
+ for (i = 0; i < to_read; i += 4) {
+ buf_ptr = buf->data_pages[cur] + offset;
+ read_data = readl_relaxed(drvdata->base +
+ ETB_RAM_READ_DATA_REG);
+ *buf_ptr++ = read_data >> 0;
+ *buf_ptr++ = read_data >> 8;
+ *buf_ptr++ = read_data >> 16;
+ *buf_ptr++ = read_data >> 24;
+
+ offset += 4;
+ if (offset >= PAGE_SIZE) {
+ offset = 0;
+ cur++;
+ /* wrap around at the end of the buffer */
+ cur &= buf->nr_pages - 1;
+ }
+ }
+
+ /* reset ETB buffer for next run */
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+ writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+
+ /*
+ * In snapshot mode all we have to do is communicate to
+ * perf_aux_output_end() the address of the current head. In full
+ * trace mode the same function expects a size to move rb->aux_head
+ * forward.
+ */
+ if (buf->snapshot)
+ local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+ else
+ local_add(to_read, &buf->data_size);
+
+ etb_enable_hw(drvdata);
+ CS_LOCK(drvdata->base);
+}
+
static const struct coresight_ops_sink etb_sink_ops = {
.enable = etb_enable,
.disable = etb_disable,
+ .alloc_buffer = etb_alloc_buffer,
+ .free_buffer = etb_free_buffer,
+ .set_buffer = etb_set_buffer,
+ .reset_buffer = etb_reset_buffer,
+ .update_buffer = etb_update_buffer,
};
static const struct coresight_ops etb_cs_ops = {
@@ -266,7 +514,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->enable) {
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
etb_enable_hw(drvdata);
@@ -281,7 +529,7 @@ static int etb_open(struct inode *inode, struct file *file)
struct etb_drvdata *drvdata = container_of(file->private_data,
struct etb_drvdata, miscdev);
- if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+ if (local_cmpxchg(&drvdata->reading, 0, 1))
return -EBUSY;
dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -317,7 +565,7 @@ static int etb_release(struct inode *inode, struct file *file)
{
struct etb_drvdata *drvdata = container_of(file->private_data,
struct etb_drvdata, miscdev);
- atomic_set(&drvdata->in_use, 0);
+ local_set(&drvdata->reading, 0);
dev_dbg(drvdata->dev, "%s: released\n", __func__);
return 0;
@@ -489,15 +737,6 @@ err_misc_register:
return ret;
}
-static int etb_remove(struct amba_device *adev)
-{
- struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int etb_runtime_suspend(struct device *dev)
{
@@ -537,14 +776,10 @@ static struct amba_driver etb_driver = {
.name = "coresight-etb10",
.owner = THIS_MODULE,
.pm = &etb_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etb_probe,
- .remove = etb_remove,
.id_table = etb_ids,
};
-
-module_amba_driver(etb_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
+builtin_amba_driver(etb_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
new file mode 100644
index 000000000000..755125f7917f
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "coresight-priv.h"
+
+static struct pmu etm_pmu;
+static bool etm_perf_up;
+
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work: Handle to free allocated memory outside IRQ context.
+ * @mask: Hold the CPU(s) this event was set for.
+ * @snk_config: The sink configuration.
+ * @path: An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+ struct work_struct work;
+ cpumask_t mask;
+ void *snk_config;
+ struct list_head **path;
+};
+
+static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
+
+/* ETMv3.5/PTM's ETMCR is 'config' */
+PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
+
+static struct attribute *etm_config_formats_attr[] = {
+ &format_attr_cycacc.attr,
+ &format_attr_timestamp.attr,
+ NULL,
+};
+
+static struct attribute_group etm_pmu_format_group = {
+ .name = "format",
+ .attrs = etm_config_formats_attr,
+};
+
+static const struct attribute_group *etm_pmu_attr_groups[] = {
+ &etm_pmu_format_group,
+ NULL,
+};
+
+static void etm_event_read(struct perf_event *event) {}
+
+static int etm_event_init(struct perf_event *event)
+{
+ if (event->attr.type != etm_pmu.type)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void free_event_data(struct work_struct *work)
+{
+ int cpu;
+ cpumask_t *mask;
+ struct etm_event_data *event_data;
+ struct coresight_device *sink;
+
+ event_data = container_of(work, struct etm_event_data, work);
+ mask = &event_data->mask;
+ /*
+ * First deal with the sink configuration. See comment in
+ * etm_setup_aux() about why we take the first available path.
+ */
+ if (event_data->snk_config) {
+ cpu = cpumask_first(mask);
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (sink_ops(sink)->free_buffer)
+ sink_ops(sink)->free_buffer(event_data->snk_config);
+ }
+
+ for_each_cpu(cpu, mask) {
+ if (event_data->path[cpu])
+ coresight_release_path(event_data->path[cpu]);
+ }
+
+ kfree(event_data->path);
+ kfree(event_data);
+}
+
+static void *alloc_event_data(int cpu)
+{
+ int size;
+ cpumask_t *mask;
+ struct etm_event_data *event_data;
+
+ /* First get memory for the session's data */
+ event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
+ if (!event_data)
+ return NULL;
+
+ /* Make sure nothing disappears under us */
+ get_online_cpus();
+ size = num_online_cpus();
+
+ mask = &event_data->mask;
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_copy(mask, cpu_online_mask);
+ put_online_cpus();
+
+ /*
+ * Each CPU has a single path between source and destination. As such
+ * allocate an array using CPU numbers as indexes. That way a path
+ * for any CPU can easily be accessed at any given time. We proceed
+ * the same way for sessions involving a single CPU. The cost of
+ * unused memory when dealing with single CPU trace scenarios is small
+ * compared to the cost of searching through an optimized array.
+ */
+ event_data->path = kcalloc(size,
+ sizeof(struct list_head *), GFP_KERNEL);
+ if (!event_data->path) {
+ kfree(event_data);
+ return NULL;
+ }
+
+ return event_data;
+}
+
+static void etm_free_aux(void *data)
+{
+ struct etm_event_data *event_data = data;
+
+ schedule_work(&event_data->work);
+}
+
+static void *etm_setup_aux(int event_cpu, void **pages,
+ int nr_pages, bool overwrite)
+{
+ int cpu;
+ cpumask_t *mask;
+ struct coresight_device *sink;
+ struct etm_event_data *event_data = NULL;
+
+ event_data = alloc_event_data(event_cpu);
+ if (!event_data)
+ return NULL;
+
+ INIT_WORK(&event_data->work, free_event_data);
+
+ mask = &event_data->mask;
+
+ /* Setup the path for each CPU in a trace session */
+ for_each_cpu(cpu, mask) {
+ struct coresight_device *csdev;
+
+ csdev = per_cpu(csdev_src, cpu);
+ if (!csdev)
+ goto err;
+
+ /*
+ * Building a path doesn't enable it, it simply builds a
+ * list of devices from source to sink that can be
+ * referenced later when the path is actually needed.
+ */
+ event_data->path[cpu] = coresight_build_path(csdev);
+ if (!event_data->path[cpu])
+ goto err;
+ }
+
+ /*
+ * In theory nothing prevent tracers in a trace session from being
+ * associated with different sinks, nor having a sink per tracer. But
+ * until we have HW with this kind of topology and a way to convey
+ * sink assignement from the perf cmd line we need to assume tracers
+ * in a trace session are using the same sink. Therefore pick the sink
+ * found at the end of the first available path.
+ */
+ cpu = cpumask_first(mask);
+ /* Grab the sink at the end of the path */
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (!sink)
+ goto err;
+
+ if (!sink_ops(sink)->alloc_buffer)
+ goto err;
+
+ /* Get the AUX specific data from the sink buffer */
+ event_data->snk_config =
+ sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+ nr_pages, overwrite);
+ if (!event_data->snk_config)
+ goto err;
+
+out:
+ return event_data;
+
+err:
+ etm_free_aux(event_data);
+ event_data = NULL;
+ goto out;
+}
+
+static void etm_event_start(struct perf_event *event, int flags)
+{
+ int cpu = smp_processor_id();
+ struct etm_event_data *event_data;
+ struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+
+ if (!csdev)
+ goto fail;
+
+ /*
+ * Deal with the ring buffer API and get a handle on the
+ * session's information.
+ */
+ event_data = perf_aux_output_begin(handle, event);
+ if (!event_data)
+ goto fail;
+
+ /* We need a sink, no need to continue without one */
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
+ goto fail_end_stop;
+
+ /* Configure the sink */
+ if (sink_ops(sink)->set_buffer(sink, handle,
+ event_data->snk_config))
+ goto fail_end_stop;
+
+ /* Nothing will happen without a path */
+ if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+ goto fail_end_stop;
+
+ /* Tell the perf core the event is alive */
+ event->hw.state = 0;
+
+ /* Finally enable the tracer */
+ if (source_ops(csdev)->enable(csdev, &event->attr, CS_MODE_PERF))
+ goto fail_end_stop;
+
+out:
+ return;
+
+fail_end_stop:
+ perf_aux_output_end(handle, 0, true);
+fail:
+ event->hw.state = PERF_HES_STOPPED;
+ goto out;
+}
+
+static void etm_event_stop(struct perf_event *event, int mode)
+{
+ bool lost;
+ int cpu = smp_processor_id();
+ unsigned long size;
+ struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+ struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
+ struct etm_event_data *event_data = perf_get_aux(handle);
+
+ if (event->hw.state == PERF_HES_STOPPED)
+ return;
+
+ if (!csdev)
+ return;
+
+ sink = coresight_get_sink(event_data->path[cpu]);
+ if (!sink)
+ return;
+
+ /* stop tracer */
+ source_ops(csdev)->disable(csdev);
+
+ /* tell the core */
+ event->hw.state = PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_UPDATE) {
+ if (WARN_ON_ONCE(handle->event != event))
+ return;
+
+ /* update trace information */
+ if (!sink_ops(sink)->update_buffer)
+ return;
+
+ sink_ops(sink)->update_buffer(sink, handle,
+ event_data->snk_config);
+
+ if (!sink_ops(sink)->reset_buffer)
+ return;
+
+ size = sink_ops(sink)->reset_buffer(sink, handle,
+ event_data->snk_config,
+ &lost);
+
+ perf_aux_output_end(handle, size, lost);
+ }
+
+ /* Disabling the path make its elements available to other sessions */
+ coresight_disable_path(event_data->path[cpu]);
+}
+
+static int etm_event_add(struct perf_event *event, int mode)
+{
+ int ret = 0;
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (mode & PERF_EF_START) {
+ etm_event_start(event, 0);
+ if (hwc->state & PERF_HES_STOPPED)
+ ret = -EINVAL;
+ } else {
+ hwc->state = PERF_HES_STOPPED;
+ }
+
+ return ret;
+}
+
+static void etm_event_del(struct perf_event *event, int mode)
+{
+ etm_event_stop(event, PERF_EF_UPDATE);
+}
+
+int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{
+ char entry[sizeof("cpu9999999")];
+ int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
+ struct device *pmu_dev = etm_pmu.dev;
+ struct device *cs_dev = &csdev->dev;
+
+ sprintf(entry, "cpu%d", cpu);
+
+ if (!etm_perf_up)
+ return -EPROBE_DEFER;
+
+ if (link) {
+ ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
+ if (ret)
+ return ret;
+ per_cpu(csdev_src, cpu) = csdev;
+ } else {
+ sysfs_remove_link(&pmu_dev->kobj, entry);
+ per_cpu(csdev_src, cpu) = NULL;
+ }
+
+ return 0;
+}
+
+static int __init etm_perf_init(void)
+{
+ int ret;
+
+ etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
+
+ etm_pmu.attr_groups = etm_pmu_attr_groups;
+ etm_pmu.task_ctx_nr = perf_sw_context;
+ etm_pmu.read = etm_event_read;
+ etm_pmu.event_init = etm_event_init;
+ etm_pmu.setup_aux = etm_setup_aux;
+ etm_pmu.free_aux = etm_free_aux;
+ etm_pmu.start = etm_event_start;
+ etm_pmu.stop = etm_event_stop;
+ etm_pmu.add = etm_event_add;
+ etm_pmu.del = etm_event_del;
+
+ ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+ if (ret == 0)
+ etm_perf_up = true;
+
+ return ret;
+}
+device_initcall(etm_perf_init);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
new file mode 100644
index 000000000000..87f5a134eb6f
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_ETM_PERF_H
+#define _CORESIGHT_ETM_PERF_H
+
+struct coresight_device;
+
+#ifdef CONFIG_CORESIGHT
+int etm_perf_symlink(struct coresight_device *csdev, bool link);
+
+#else
+static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
+{ return -EINVAL; }
+
+#endif /* CONFIG_CORESIGHT */
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index b4481eb29304..51597cb2c08a 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -13,6 +13,7 @@
#ifndef _CORESIGHT_CORESIGHT_ETM_H
#define _CORESIGHT_CORESIGHT_ETM_H
+#include <asm/local.h>
#include <linux/spinlock.h>
#include "coresight-priv.h"
@@ -109,7 +110,10 @@
#define ETM_MODE_STALL BIT(2)
#define ETM_MODE_TIMESTAMP BIT(3)
#define ETM_MODE_CTXID BIT(4)
-#define ETM_MODE_ALL 0x1f
+#define ETM_MODE_ALL (ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
+ ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+ ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
+ ETM_MODE_EXCL_USER)
#define ETM_SQR_MASK 0x3
#define ETM_TRACEID_MASK 0x3f
@@ -136,35 +140,16 @@
#define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \
ETM_ADD_COMP_0 | \
ETM_EVENT_NOT_A)
+
/**
- * struct etm_drvdata - specifics associated to an ETM component
- * @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
- * @atclk: optional clock for the core parts of the ETM.
- * @csdev: component vitals needed by the framework.
- * @spinlock: only one at a time pls.
- * @cpu: the cpu this component is affined to.
- * @port_size: port size as reported by ETMCR bit 4-6 and 21.
- * @arch: ETM/PTM version number.
- * @use_cpu14: true if management registers need to be accessed via CP14.
- * @enable: is this ETM/PTM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:true if we should start tracing at boot time.
- * @os_unlock: true if access to management registers is allowed.
- * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
- * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
- * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
- * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
- * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
- * @etmccr: value of register ETMCCR.
- * @etmccer: value of register ETMCCER.
- * @traceid: value of the current ID for this component.
+ * struct etm_config - configuration information related to an ETM
* @mode: controls various modes supported by this ETM/PTM.
* @ctrl: used in conjunction with @mode.
* @trigger_event: setting for register ETMTRIGGER.
* @startstop_ctrl: setting for register ETMTSSCR.
* @enable_event: setting for register ETMTEEVR.
* @enable_ctrl1: setting for register ETMTECR1.
+ * @enable_ctrl2: setting for register ETMTECR2.
* @fifofull_level: setting for register ETMFFLR.
* @addr_idx: index for the address comparator selection.
* @addr_val: value for address comparator register.
@@ -189,36 +174,16 @@
* @ctxid_mask: mask applicable to all the context IDs.
* @sync_freq: Synchronisation frequency.
* @timestamp_event: Defines an event that requests the insertion
- of a timestamp into the trace stream.
+ * of a timestamp into the trace stream.
*/
-struct etm_drvdata {
- void __iomem *base;
- struct device *dev;
- struct clk *atclk;
- struct coresight_device *csdev;
- spinlock_t spinlock;
- int cpu;
- int port_size;
- u8 arch;
- bool use_cp14;
- bool enable;
- bool sticky_enable;
- bool boot_enable;
- bool os_unlock;
- u8 nr_addr_cmp;
- u8 nr_cntr;
- u8 nr_ext_inp;
- u8 nr_ext_out;
- u8 nr_ctxid_cmp;
- u32 etmccr;
- u32 etmccer;
- u32 traceid;
+struct etm_config {
u32 mode;
u32 ctrl;
u32 trigger_event;
u32 startstop_ctrl;
u32 enable_event;
u32 enable_ctrl1;
+ u32 enable_ctrl2;
u32 fifofull_level;
u8 addr_idx;
u32 addr_val[ETM_MAX_ADDR_CMP];
@@ -244,6 +209,56 @@ struct etm_drvdata {
u32 timestamp_event;
};
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the ETM.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: only one at a time pls.
+ * @cpu: the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch: ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @mode: this tracer's mode, i.e sysFS, Perf or disabled.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr: value of register ETMCCR.
+ * @etmccer: value of register ETMCCER.
+ * @traceid: value of the current ID for this component.
+ * @config: structure holding configuration parameters.
+ */
+struct etm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *atclk;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ int cpu;
+ int port_size;
+ u8 arch;
+ bool use_cp14;
+ local_t mode;
+ bool sticky_enable;
+ bool boot_enable;
+ bool os_unlock;
+ u8 nr_addr_cmp;
+ u8 nr_cntr;
+ u8 nr_ext_inp;
+ u8 nr_ext_out;
+ u8 nr_ctxid_cmp;
+ u32 etmccr;
+ u32 etmccer;
+ u32 traceid;
+ struct etm_config config;
+};
+
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -251,4 +266,39 @@ enum etm_addr_type {
ETM_ADDR_TYPE_START,
ETM_ADDR_TYPE_STOP,
};
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+ u32 val, u32 off)
+{
+ if (drvdata->use_cp14) {
+ if (etm_writel_cp14(off, val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ writel_relaxed(val, drvdata->base + off);
+ }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+ u32 val;
+
+ if (drvdata->use_cp14) {
+ if (etm_readl_cp14(off, &val)) {
+ dev_err(drvdata->dev,
+ "invalid CP14 access to ETM reg: %#x", off);
+ }
+ } else {
+ val = readl_relaxed(drvdata->base + off);
+ }
+
+ return val;
+}
+
+extern const struct attribute_group *coresight_etm_groups[];
+int etm_get_trace_id(struct etm_drvdata *drvdata);
+void etm_set_default(struct etm_config *config);
+void etm_config_trace_mode(struct etm_config *config);
+struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
new file mode 100644
index 000000000000..cbb4046c1070
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -0,0 +1,1272 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm.h"
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_addr_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_cntr;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ctxid_cmp;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long flags, val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ CS_UNLOCK(drvdata->base);
+
+ val = etm_readl(drvdata, ETMSR);
+
+ CS_LOCK(drvdata->base);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i, ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ spin_lock(&drvdata->spinlock);
+ memset(config, 0, sizeof(struct etm_config));
+ config->mode = ETM_MODE_EXCLUDE;
+ config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+ config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+
+ etm_set_default(config);
+ spin_unlock(&drvdata->spinlock);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->mode;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->mode = val & ETM_MODE_ALL;
+
+ if (config->mode & ETM_MODE_EXCLUDE)
+ config->enable_ctrl1 |= ETMTECR1_INC_EXC;
+ else
+ config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+ if (config->mode & ETM_MODE_CYCACC)
+ config->ctrl |= ETMCR_CYC_ACC;
+ else
+ config->ctrl &= ~ETMCR_CYC_ACC;
+
+ if (config->mode & ETM_MODE_STALL) {
+ if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+ dev_warn(drvdata->dev, "stall mode not supported\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ config->ctrl |= ETMCR_STALL_MODE;
+ } else
+ config->ctrl &= ~ETMCR_STALL_MODE;
+
+ if (config->mode & ETM_MODE_TIMESTAMP) {
+ if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+ dev_warn(drvdata->dev, "timestamp not supported\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ config->ctrl |= ETMCR_TIMESTAMP_EN;
+ } else
+ config->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+ if (config->mode & ETM_MODE_CTXID)
+ config->ctrl |= ETMCR_CTXID_SIZE;
+ else
+ config->ctrl &= ~ETMCR_CTXID_SIZE;
+
+ if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+ etm_config_trace_mode(config);
+
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+
+err_unlock:
+ spin_unlock(&drvdata->spinlock);
+ return ret;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->trigger_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->trigger_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->enable_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->enable_event = val & ETM_EVENT_MASK;
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->fifofull_level;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->fifofull_level = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->addr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->addr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val1 = config->addr_val[idx];
+ val2 = config->addr_val[idx + 1];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* Lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val1;
+ config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ config->addr_val[idx + 1] = val2;
+ config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ config->enable_ctrl1 |= (1 << (idx/2));
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_START;
+ config->startstop_ctrl |= (1 << idx);
+ config->enable_ctrl1 |= BIT(25);
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ config->startstop_ctrl |= (1 << (idx + 16));
+ config->enable_ctrl1 |= ETMTECR1_START_STOP;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->addr_acctype[config->addr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->addr_acctype[config->addr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->cntr_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->cntr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_rld_val[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_rld_val[config->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_event[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->cntr_rld_event[config->cntr_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+ u32 val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (!local_read(&drvdata->mode)) {
+ spin_lock(&drvdata->spinlock);
+ for (i = 0; i < drvdata->nr_cntr; i++)
+ ret += sprintf(buf, "counter %d: %x\n",
+ i, config->cntr_val[i]);
+ spin_unlock(&drvdata->spinlock);
+ return ret;
+ }
+
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ val = etm_readl(drvdata, ETMCNTVRn(i));
+ ret += sprintf(buf, "counter %d: %x\n", i, val);
+ }
+
+ return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ config->cntr_val[config->cntr_idx] = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_12_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_12_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_21_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_21_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_23_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_23_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_31_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_31_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_32_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_32_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->seq_13_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->seq_13_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val, flags;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ if (!local_read(&drvdata->mode)) {
+ val = config->seq_curr_state;
+ goto out;
+ }
+
+ pm_runtime_get_sync(drvdata->dev);
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ CS_UNLOCK(drvdata->base);
+ val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+ CS_LOCK(drvdata->base);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ pm_runtime_put(drvdata->dev);
+out:
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val > ETM_SEQ_STATE_MAX_VAL)
+ return -EINVAL;
+
+ config->seq_curr_state = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->ctxid_idx;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ if (val >= drvdata->nr_ctxid_cmp)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->ctxid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val = config->ctxid_vpid[config->ctxid_idx];
+ spin_unlock(&drvdata->spinlock);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long vpid, pid;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &vpid);
+ if (ret)
+ return ret;
+
+ pid = coresight_vpid_to_pid(vpid);
+
+ spin_lock(&drvdata->spinlock);
+ config->ctxid_pid[config->ctxid_idx] = pid;
+ config->ctxid_vpid[config->ctxid_idx] = vpid;
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->ctxid_mask;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->ctxid_mask = val;
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->sync_freq;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->sync_freq = val & ETM_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ val = config->timestamp_event;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etm_config *config = &drvdata->config;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ config->timestamp_event = val & ETM_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t cpu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cpu;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = etm_get_trace_id(drvdata);
+
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ drvdata->traceid = val & ETM_TRACEID_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ctxid_cmp.attr,
+ &dev_attr_etmsr.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_trigger_event.attr,
+ &dev_attr_enable_event.attr,
+ &dev_attr_fifofull_level.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_acctype.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntr_rld_val.attr,
+ &dev_attr_cntr_event.attr,
+ &dev_attr_cntr_rld_event.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_seq_12_event.attr,
+ &dev_attr_seq_21_event.attr,
+ &dev_attr_seq_23_event.attr,
+ &dev_attr_seq_31_event.attr,
+ &dev_attr_seq_32_event.attr,
+ &dev_attr_seq_13_event.attr,
+ &dev_attr_seq_curr_state.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_pid.attr,
+ &dev_attr_ctxid_mask.attr,
+ &dev_attr_sync_freq.attr,
+ &dev_attr_timestamp_event.attr,
+ &dev_attr_traceid.attr,
+ &dev_attr_cpu.attr,
+ NULL,
+};
+
+#define coresight_simple_func(name, offset) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
+ readl_relaxed(drvdata->base + offset)); \
+} \
+DEVICE_ATTR_RO(name)
+
+coresight_simple_func(etmccr, ETMCCR);
+coresight_simple_func(etmccer, ETMCCER);
+coresight_simple_func(etmscr, ETMSCR);
+coresight_simple_func(etmidr, ETMIDR);
+coresight_simple_func(etmcr, ETMCR);
+coresight_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_simple_func(etmteevr, ETMTEEVR);
+coresight_simple_func(etmtssvr, ETMTSSCR);
+coresight_simple_func(etmtecr1, ETMTECR1);
+coresight_simple_func(etmtecr2, ETMTECR2);
+
+static struct attribute *coresight_etm_mgmt_attrs[] = {
+ &dev_attr_etmccr.attr,
+ &dev_attr_etmccer.attr,
+ &dev_attr_etmscr.attr,
+ &dev_attr_etmidr.attr,
+ &dev_attr_etmcr.attr,
+ &dev_attr_etmtraceidr.attr,
+ &dev_attr_etmteevr.attr,
+ &dev_attr_etmtssvr.attr,
+ &dev_attr_etmtecr1.attr,
+ &dev_attr_etmtecr2.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_etm_group = {
+ .attrs = coresight_etm_attrs,
+};
+
+static const struct attribute_group coresight_etm_mgmt_group = {
+ .attrs = coresight_etm_mgmt_attrs,
+ .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etm_groups[] = {
+ &coresight_etm_group,
+ &coresight_etm_mgmt_group,
+ NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d630b7ece735..d83ab82672e4 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Program Flow Trace driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -27,14 +29,21 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
+#include <linux/perf_event.h>
#include <asm/sections.h>
#include "coresight-etm.h"
+#include "coresight-etm-perf.h"
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
static int boot_enable;
module_param_named(boot_enable, boot_enable, int, S_IRUGO);
@@ -42,45 +51,16 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
static int etm_count;
static struct etm_drvdata *etmdrvdata[NR_CPUS];
-static inline void etm_writel(struct etm_drvdata *drvdata,
- u32 val, u32 off)
-{
- if (drvdata->use_cp14) {
- if (etm_writel_cp14(off, val)) {
- dev_err(drvdata->dev,
- "invalid CP14 access to ETM reg: %#x", off);
- }
- } else {
- writel_relaxed(val, drvdata->base + off);
- }
-}
-
-static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
-{
- u32 val;
-
- if (drvdata->use_cp14) {
- if (etm_readl_cp14(off, &val)) {
- dev_err(drvdata->dev,
- "invalid CP14 access to ETM reg: %#x", off);
- }
- } else {
- val = readl_relaxed(drvdata->base + off);
- }
-
- return val;
-}
-
/*
* Memory mapped writes to clear os lock are not supported on some processors
* and OS lock must be unlocked before any memory mapped access on such
* processors, otherwise memory mapped reads/writes will be invalid.
*/
-static void etm_os_unlock(void *info)
+static void etm_os_unlock(struct etm_drvdata *drvdata)
{
- struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
/* Writing any value to ETMOSLAR unlocks the trace registers */
etm_writel(drvdata, 0x0, ETMOSLAR);
+ drvdata->os_unlock = true;
isb();
}
@@ -215,36 +195,156 @@ static void etm_clr_prog(struct etm_drvdata *drvdata)
}
}
-static void etm_set_default(struct etm_drvdata *drvdata)
+void etm_set_default(struct etm_config *config)
{
int i;
- drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->enable_event = ETM_HARD_WIRE_RES_A;
+ if (WARN_ON_ONCE(!config))
+ return;
- drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+ /*
+ * Taken verbatim from the TRM:
+ *
+ * To trace all memory:
+ * set bit [24] in register 0x009, the ETMTECR1, to 1
+ * set all other bits in register 0x009, the ETMTECR1, to 0
+ * set all bits in register 0x007, the ETMTECR2, to 0
+ * set register 0x008, the ETMTEEVR, to 0x6F (TRUE).
+ */
+ config->enable_ctrl1 = BIT(24);
+ config->enable_ctrl2 = 0x0;
+ config->enable_event = ETM_HARD_WIRE_RES_A;
- for (i = 0; i < drvdata->nr_cntr; i++) {
- drvdata->cntr_rld_val[i] = 0x0;
- drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
- drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
- drvdata->cntr_val[i] = 0x0;
+ config->trigger_event = ETM_DEFAULT_EVENT_VAL;
+ config->enable_event = ETM_HARD_WIRE_RES_A;
+
+ config->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+ config->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+ config->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+
+ for (i = 0; i < ETM_MAX_CNTR; i++) {
+ config->cntr_rld_val[i] = 0x0;
+ config->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+ config->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+ config->cntr_val[i] = 0x0;
}
- drvdata->seq_curr_state = 0x0;
- drvdata->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
- drvdata->ctxid_pid[i] = 0x0;
- drvdata->ctxid_vpid[i] = 0x0;
+ config->seq_curr_state = 0x0;
+ config->ctxid_idx = 0x0;
+ for (i = 0; i < ETM_MAX_CTXID_CMP; i++) {
+ config->ctxid_pid[i] = 0x0;
+ config->ctxid_vpid[i] = 0x0;
}
- drvdata->ctxid_mask = 0x0;
+ config->ctxid_mask = 0x0;
+}
+
+void etm_config_trace_mode(struct etm_config *config)
+{
+ u32 flags, mode;
+
+ mode = config->mode;
+
+ mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
+
+ /* excluding kernel AND user space doesn't make sense */
+ if (mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+ return;
+
+ /* nothing to do if neither flags are set */
+ if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+ return;
+
+ flags = (1 << 0 | /* instruction execute */
+ 3 << 3 | /* ARM instruction */
+ 0 << 5 | /* No data value comparison */
+ 0 << 7 | /* No exact mach */
+ 0 << 8); /* Ignore context ID */
+
+ /* No need to worry about single address comparators. */
+ config->enable_ctrl2 = 0x0;
+
+ /* Bit 0 is address range comparator 1 */
+ config->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
+
+ /*
+ * On ETMv3.5:
+ * ETMACTRn[13,11] == Non-secure state comparison control
+ * ETMACTRn[12,10] == Secure state comparison control
+ *
+ * b00 == Match in all modes in this state
+ * b01 == Do not match in any more in this state
+ * b10 == Match in all modes excepts user mode in this state
+ * b11 == Match only in user mode in this state
+ */
+
+ /* Tracing in secure mode is not supported at this time */
+ flags |= (0 << 12 | 1 << 10);
+
+ if (mode & ETM_MODE_EXCL_USER) {
+ /* exclude user, match all modes except user mode */
+ flags |= (1 << 13 | 0 << 11);
+ } else {
+ /* exclude kernel, match only in user mode */
+ flags |= (1 << 13 | 1 << 11);
+ }
+
+ /*
+ * The ETMEEVR register is already set to "hard wire A". As such
+ * all there is to do is setup an address comparator that spans
+ * the entire address range and configure the state and mode bits.
+ */
+ config->addr_val[0] = (u32) 0x0;
+ config->addr_val[1] = (u32) ~0x0;
+ config->addr_acctype[0] = flags;
+ config->addr_acctype[1] = flags;
+ config->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+ config->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+}
+
+#define ETM3X_SUPPORTED_OPTIONS (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN)
+
+static int etm_parse_event_config(struct etm_drvdata *drvdata,
+ struct perf_event_attr *attr)
+{
+ struct etm_config *config = &drvdata->config;
+
+ if (!attr)
+ return -EINVAL;
+
+ /* Clear configuration from previous run */
+ memset(config, 0, sizeof(struct etm_config));
+
+ if (attr->exclude_kernel)
+ config->mode = ETM_MODE_EXCL_KERN;
+
+ if (attr->exclude_user)
+ config->mode = ETM_MODE_EXCL_USER;
+
+ /* Always start from the default config */
+ etm_set_default(config);
+
+ /*
+ * By default the tracers are configured to trace the whole address
+ * range. Narrow the field only if requested by user space.
+ */
+ if (config->mode)
+ etm_config_trace_mode(config);
+
+ /*
+ * At this time only cycle accurate and timestamp options are
+ * available.
+ */
+ if (attr->config & ~ETM3X_SUPPORTED_OPTIONS)
+ return -EINVAL;
+
+ config->ctrl = attr->config;
+
+ return 0;
}
static void etm_enable_hw(void *info)
@@ -252,6 +352,7 @@ static void etm_enable_hw(void *info)
int i;
u32 etmcr;
struct etm_drvdata *drvdata = info;
+ struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
@@ -265,65 +366,74 @@ static void etm_enable_hw(void *info)
etm_set_prog(drvdata);
etmcr = etm_readl(drvdata, ETMCR);
- etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
+ /* Clear setting from a previous run if need be */
+ etmcr &= ~ETM3X_SUPPORTED_OPTIONS;
etmcr |= drvdata->port_size;
- etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
- etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
- etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
- etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
- etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
- etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+ etmcr |= ETMCR_ETM_EN;
+ etm_writel(drvdata, config->ctrl | etmcr, ETMCR);
+ etm_writel(drvdata, config->trigger_event, ETMTRIGGER);
+ etm_writel(drvdata, config->startstop_ctrl, ETMTSSCR);
+ etm_writel(drvdata, config->enable_event, ETMTEEVR);
+ etm_writel(drvdata, config->enable_ctrl1, ETMTECR1);
+ etm_writel(drvdata, config->fifofull_level, ETMFFLR);
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
- etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
+ etm_writel(drvdata, config->addr_val[i], ETMACVRn(i));
+ etm_writel(drvdata, config->addr_acctype[i], ETMACTRn(i));
}
for (i = 0; i < drvdata->nr_cntr; i++) {
- etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
- etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
- etm_writel(drvdata, drvdata->cntr_rld_event[i],
+ etm_writel(drvdata, config->cntr_rld_val[i], ETMCNTRLDVRn(i));
+ etm_writel(drvdata, config->cntr_event[i], ETMCNTENRn(i));
+ etm_writel(drvdata, config->cntr_rld_event[i],
ETMCNTRLDEVRn(i));
- etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
- }
- etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
- etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
- etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
- etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
- etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
- etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
- etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
+ etm_writel(drvdata, config->cntr_val[i], ETMCNTVRn(i));
+ }
+ etm_writel(drvdata, config->seq_12_event, ETMSQ12EVR);
+ etm_writel(drvdata, config->seq_21_event, ETMSQ21EVR);
+ etm_writel(drvdata, config->seq_23_event, ETMSQ23EVR);
+ etm_writel(drvdata, config->seq_31_event, ETMSQ31EVR);
+ etm_writel(drvdata, config->seq_32_event, ETMSQ32EVR);
+ etm_writel(drvdata, config->seq_13_event, ETMSQ13EVR);
+ etm_writel(drvdata, config->seq_curr_state, ETMSQR);
for (i = 0; i < drvdata->nr_ext_out; i++)
etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
- etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
- etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
- etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
+ etm_writel(drvdata, config->ctxid_pid[i], ETMCIDCVRn(i));
+ etm_writel(drvdata, config->ctxid_mask, ETMCIDCMR);
+ etm_writel(drvdata, config->sync_freq, ETMSYNCFR);
/* No external input selected */
etm_writel(drvdata, 0x0, ETMEXTINSELR);
- etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
+ etm_writel(drvdata, config->timestamp_event, ETMTSEVR);
/* No auxiliary control selected */
etm_writel(drvdata, 0x0, ETMAUXCR);
etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
/* No VMID comparator value selected */
etm_writel(drvdata, 0x0, ETMVMIDCVR);
- /* Ensures trace output is enabled from this ETM */
- etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
-
etm_clr_prog(drvdata);
CS_LOCK(drvdata->base);
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
-static int etm_trace_id(struct coresight_device *csdev)
+static int etm_cpu_id(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->cpu;
+}
+
+int etm_get_trace_id(struct etm_drvdata *drvdata)
+{
unsigned long flags;
int trace_id = -1;
- if (!drvdata->enable)
+ if (!drvdata)
+ goto out;
+
+ if (!local_read(&drvdata->mode))
return drvdata->traceid;
- pm_runtime_get_sync(csdev->dev.parent);
+
+ pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -332,17 +442,41 @@ static int etm_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(csdev->dev.parent);
+ pm_runtime_put(drvdata->dev);
+out:
return trace_id;
+
+}
+
+static int etm_trace_id(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return etm_get_trace_id(drvdata);
}
-static int etm_enable(struct coresight_device *csdev)
+static int etm_enable_perf(struct coresight_device *csdev,
+ struct perf_event_attr *attr)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return -EINVAL;
+
+ /* Configure the tracer based on the session's specifics */
+ etm_parse_event_config(drvdata, attr);
+ /* And enable it */
+ etm_enable_hw(drvdata);
+
+ return 0;
+}
+
+static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
- pm_runtime_get_sync(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
/*
@@ -357,16 +491,45 @@ static int etm_enable(struct coresight_device *csdev)
goto err;
}
- drvdata->enable = true;
drvdata->sticky_enable = true;
-
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "ETM tracing enabled\n");
return 0;
+
err:
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(csdev->dev.parent);
+ return ret;
+}
+
+static int etm_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
+{
+ int ret;
+ u32 val;
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
+
+ /* Someone is already using the tracer */
+ if (val)
+ return -EBUSY;
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ ret = etm_enable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ ret = etm_enable_perf(csdev, attr);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ /* The tracer didn't start */
+ if (ret)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
+
return ret;
}
@@ -374,18 +537,16 @@ static void etm_disable_hw(void *info)
{
int i;
struct etm_drvdata *drvdata = info;
+ struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
etm_set_prog(drvdata);
- /* Program trace enable to low by using always false event */
- etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
-
/* Read back sequencer and counters for post trace analysis */
- drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+ config->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
for (i = 0; i < drvdata->nr_cntr; i++)
- drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+ config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
@@ -393,7 +554,28 @@ static void etm_disable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
}
-static void etm_disable(struct coresight_device *csdev)
+static void etm_disable_perf(struct coresight_device *csdev)
+{
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return;
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Setting the prog bit disables tracing immediately */
+ etm_set_prog(drvdata);
+
+ /*
+ * There is no way to know when the tracer will be used again so
+ * power down the tracer.
+ */
+ etm_set_pwrdwn(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void etm_disable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -411,1235 +593,52 @@ static void etm_disable(struct coresight_device *csdev)
* ensures that register writes occur when cpu is powered.
*/
smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
- drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
put_online_cpus();
- pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-static const struct coresight_ops_source etm_source_ops = {
- .trace_id = etm_trace_id,
- .enable = etm_enable,
- .disable = etm_disable,
-};
-
-static const struct coresight_ops etm_cs_ops = {
- .source_ops = &etm_source_ops,
-};
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_addr_cmp;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{ unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_cntr;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ctxid_cmp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_ctxid_cmp;
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ctxid_cmp);
-
-static ssize_t etmsr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long flags, val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- val = etm_readl(drvdata, ETMSR);
-
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-
- return sprintf(buf, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(etmsr);
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int i, ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val) {
- spin_lock(&drvdata->spinlock);
- drvdata->mode = ETM_MODE_EXCLUDE;
- drvdata->ctrl = 0x0;
- drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
- drvdata->startstop_ctrl = 0x0;
- drvdata->addr_idx = 0x0;
- for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- drvdata->addr_val[i] = 0x0;
- drvdata->addr_acctype[i] = 0x0;
- drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
- }
- drvdata->cntr_idx = 0x0;
-
- etm_set_default(drvdata);
- spin_unlock(&drvdata->spinlock);
- }
-
- return size;
-}
-static DEVICE_ATTR_WO(reset);
-
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->mode;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->mode = val & ETM_MODE_ALL;
-
- if (drvdata->mode & ETM_MODE_EXCLUDE)
- drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
- else
- drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
-
- if (drvdata->mode & ETM_MODE_CYCACC)
- drvdata->ctrl |= ETMCR_CYC_ACC;
- else
- drvdata->ctrl &= ~ETMCR_CYC_ACC;
-
- if (drvdata->mode & ETM_MODE_STALL) {
- if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
- dev_warn(drvdata->dev, "stall mode not supported\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- drvdata->ctrl |= ETMCR_STALL_MODE;
- } else
- drvdata->ctrl &= ~ETMCR_STALL_MODE;
-
- if (drvdata->mode & ETM_MODE_TIMESTAMP) {
- if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
- dev_warn(drvdata->dev, "timestamp not supported\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
- } else
- drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
-
- if (drvdata->mode & ETM_MODE_CTXID)
- drvdata->ctrl |= ETMCR_CTXID_SIZE;
- else
- drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-
-err_unlock:
- spin_unlock(&drvdata->spinlock);
- return ret;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t trigger_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->trigger_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t trigger_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->trigger_event = val & ETM_EVENT_MASK;
-
- return size;
-}
-static DEVICE_ATTR_RW(trigger_event);
-
-static ssize_t enable_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->enable_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t enable_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->enable_event = val & ETM_EVENT_MASK;
-
- return size;
-}
-static DEVICE_ATTR_RW(enable_event);
-
-static ssize_t fifofull_level_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->fifofull_level;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t fifofull_level_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->fifofull_level = val;
-
- return size;
-}
-static DEVICE_ATTR_RW(fifofull_level);
-
-static ssize_t addr_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->addr_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_addr_cmp)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->addr_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_single_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val1 = drvdata->addr_val[idx];
- val2 = drvdata->addr_val[idx + 1];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
- /* Lower address comparator cannot have a higher address value */
- if (val1 > val2)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val1;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_val[idx + 1] = val2;
- drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
- drvdata->enable_ctrl1 |= (1 << (idx/2));
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
- drvdata->startstop_ctrl |= (1 << idx);
- drvdata->enable_ctrl1 |= BIT(25);
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
- drvdata->startstop_ctrl |= (1 << (idx + 16));
- drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_acctype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->addr_acctype[drvdata->addr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t addr_acctype_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->addr_acctype[drvdata->addr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(addr_acctype);
-
-static ssize_t cntr_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cntr_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_cntr)
- return -EINVAL;
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntr_rld_val_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_rld_val[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_val);
-
-static ssize_t cntr_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_event[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_event);
-
-static ssize_t cntr_rld_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->cntr_rld_event[drvdata->cntr_idx];
- spin_unlock(&drvdata->spinlock);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t cntr_rld_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_rld_event);
-
-static ssize_t cntr_val_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int i, ret = 0;
- u32 val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- spin_lock(&drvdata->spinlock);
- for (i = 0; i < drvdata->nr_cntr; i++)
- ret += sprintf(buf, "counter %d: %x\n",
- i, drvdata->cntr_val[i]);
- spin_unlock(&drvdata->spinlock);
- return ret;
- }
-
- for (i = 0; i < drvdata->nr_cntr; i++) {
- val = etm_readl(drvdata, ETMCNTVRn(i));
- ret += sprintf(buf, "counter %d: %x\n", i, val);
- }
-
- return ret;
-}
-
-static ssize_t cntr_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_val[drvdata->cntr_idx] = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(cntr_val);
-
-static ssize_t seq_12_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_12_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_12_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_12_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_12_event);
-
-static ssize_t seq_21_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_21_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_21_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_21_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_21_event);
-
-static ssize_t seq_23_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_23_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_23_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_23_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_23_event);
-
-static ssize_t seq_31_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_31_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_31_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_31_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_31_event);
-
-static ssize_t seq_32_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_32_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_32_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_32_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_32_event);
-
-static ssize_t seq_13_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_13_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_13_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->seq_13_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_13_event);
-
-static ssize_t seq_curr_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val, flags;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- val = drvdata->seq_curr_state;
- goto out;
- }
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
-
- CS_UNLOCK(drvdata->base);
- val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
- CS_LOCK(drvdata->base);
-
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-out:
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t seq_curr_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val > ETM_SEQ_STATE_MAX_VAL)
- return -EINVAL;
-
- drvdata->seq_curr_state = val;
-
- return size;
-}
-static DEVICE_ATTR_RW(seq_curr_state);
-
-static ssize_t ctxid_idx_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ctxid_idx;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static void etm_disable(struct coresight_device *csdev)
{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- if (val >= drvdata->nr_ctxid_cmp)
- return -EINVAL;
+ u32 mode;
+ struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
+ * For as long as the tracer isn't disabled another entity can't
+ * change its status. As such we can read the status here without
+ * fearing it will change under us.
*/
- spin_lock(&drvdata->spinlock);
- drvdata->ctxid_idx = val;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_idx);
-
-static ssize_t ctxid_pid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
- spin_unlock(&drvdata->spinlock);
+ mode = local_read(&drvdata->mode);
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_pid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long vpid, pid;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &vpid);
- if (ret)
- return ret;
-
- pid = coresight_vpid_to_pid(vpid);
-
- spin_lock(&drvdata->spinlock);
- drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
- drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
- spin_unlock(&drvdata->spinlock);
-
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_pid);
-
-static ssize_t ctxid_mask_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ctxid_mask;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t ctxid_mask_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->ctxid_mask = val;
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_mask);
-
-static ssize_t sync_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->sync_freq;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t sync_freq_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->sync_freq = val & ETM_SYNC_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(sync_freq);
-
-static ssize_t timestamp_event_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->timestamp_event;
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t timestamp_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->timestamp_event = val & ETM_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(timestamp_event);
-
-static ssize_t cpu_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cpu;
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
-
-}
-static DEVICE_ATTR_RO(cpu);
-
-static ssize_t traceid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long val, flags;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (!drvdata->enable) {
- val = drvdata->traceid;
- goto out;
+ switch (mode) {
+ case CS_MODE_DISABLED:
+ break;
+ case CS_MODE_SYSFS:
+ etm_disable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ etm_disable_perf(csdev);
+ break;
+ default:
+ WARN_ON_ONCE(mode);
+ return;
}
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
-
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-out:
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t traceid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- drvdata->traceid = val & ETM_TRACEID_MASK;
- return size;
+ if (mode)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
}
-static DEVICE_ATTR_RW(traceid);
-
-static struct attribute *coresight_etm_attrs[] = {
- &dev_attr_nr_addr_cmp.attr,
- &dev_attr_nr_cntr.attr,
- &dev_attr_nr_ctxid_cmp.attr,
- &dev_attr_etmsr.attr,
- &dev_attr_reset.attr,
- &dev_attr_mode.attr,
- &dev_attr_trigger_event.attr,
- &dev_attr_enable_event.attr,
- &dev_attr_fifofull_level.attr,
- &dev_attr_addr_idx.attr,
- &dev_attr_addr_single.attr,
- &dev_attr_addr_range.attr,
- &dev_attr_addr_start.attr,
- &dev_attr_addr_stop.attr,
- &dev_attr_addr_acctype.attr,
- &dev_attr_cntr_idx.attr,
- &dev_attr_cntr_rld_val.attr,
- &dev_attr_cntr_event.attr,
- &dev_attr_cntr_rld_event.attr,
- &dev_attr_cntr_val.attr,
- &dev_attr_seq_12_event.attr,
- &dev_attr_seq_21_event.attr,
- &dev_attr_seq_23_event.attr,
- &dev_attr_seq_31_event.attr,
- &dev_attr_seq_32_event.attr,
- &dev_attr_seq_13_event.attr,
- &dev_attr_seq_curr_state.attr,
- &dev_attr_ctxid_idx.attr,
- &dev_attr_ctxid_pid.attr,
- &dev_attr_ctxid_mask.attr,
- &dev_attr_sync_freq.attr,
- &dev_attr_timestamp_event.attr,
- &dev_attr_traceid.attr,
- &dev_attr_cpu.attr,
- NULL,
-};
-
-#define coresight_simple_func(name, offset) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
- readl_relaxed(drvdata->base + offset)); \
-} \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
-
-static struct attribute *coresight_etm_mgmt_attrs[] = {
- &dev_attr_etmccr.attr,
- &dev_attr_etmccer.attr,
- &dev_attr_etmscr.attr,
- &dev_attr_etmidr.attr,
- &dev_attr_etmcr.attr,
- &dev_attr_etmtraceidr.attr,
- &dev_attr_etmteevr.attr,
- &dev_attr_etmtssvr.attr,
- &dev_attr_etmtecr1.attr,
- &dev_attr_etmtecr2.attr,
- NULL,
-};
-static const struct attribute_group coresight_etm_group = {
- .attrs = coresight_etm_attrs,
-};
-
-
-static const struct attribute_group coresight_etm_mgmt_group = {
- .attrs = coresight_etm_mgmt_attrs,
- .name = "mgmt",
+static const struct coresight_ops_source etm_source_ops = {
+ .cpu_id = etm_cpu_id,
+ .trace_id = etm_trace_id,
+ .enable = etm_enable,
+ .disable = etm_disable,
};
-static const struct attribute_group *coresight_etm_groups[] = {
- &coresight_etm_group,
- &coresight_etm_mgmt_group,
- NULL,
+static const struct coresight_ops etm_cs_ops = {
+ .source_ops = &etm_source_ops,
};
static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -1658,7 +657,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
etmdrvdata[cpu]->os_unlock = true;
}
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -1671,7 +670,7 @@ static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -1707,6 +706,9 @@ static void etm_init_arch_data(void *info)
u32 etmccr;
struct etm_drvdata *drvdata = info;
+ /* Make sure all registers are accessible */
+ etm_os_unlock(drvdata);
+
CS_UNLOCK(drvdata->base);
/* First dummy read */
@@ -1743,40 +745,9 @@ static void etm_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
-static void etm_init_default_data(struct etm_drvdata *drvdata)
+static void etm_init_trace_id(struct etm_drvdata *drvdata)
{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and will be just as good.
- */
- static int etm3x_traceid = 0x10;
-
- u32 flags = (1 << 0 | /* instruction execute*/
- 3 << 3 | /* ARM instruction */
- 0 << 5 | /* No data value comparison */
- 0 << 7 | /* No exact mach */
- 0 << 8 | /* Ignore context ID */
- 0 << 10); /* Security ignored */
-
- /*
- * Initial configuration only - guarantees sources handled by
- * this driver have a unique ID at startup time but not between
- * all other types of sources. For that we lean on the core
- * framework.
- */
- drvdata->traceid = etm3x_traceid++;
- drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
- drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
- if (drvdata->nr_addr_cmp >= 2) {
- drvdata->addr_val[0] = (u32) _stext;
- drvdata->addr_val[1] = (u32) _etext;
- drvdata->addr_acctype[0] = flags;
- drvdata->addr_acctype[1] = flags;
- drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
- }
-
- etm_set_default(drvdata);
+ drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
}
static int etm_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1831,9 +802,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
get_online_cpus();
etmdrvdata[drvdata->cpu] = drvdata;
- if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
- drvdata->os_unlock = true;
-
if (smp_call_function_single(drvdata->cpu,
etm_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
@@ -1847,7 +815,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto err_arch_supported;
}
- etm_init_default_data(drvdata);
+
+ etm_init_trace_id(drvdata);
+ etm_set_default(&drvdata->config);
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -1861,6 +831,12 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
goto err_arch_supported;
}
+ ret = etm_perf_symlink(drvdata->csdev, true);
+ if (ret) {
+ coresight_unregister(drvdata->csdev);
+ goto err_arch_supported;
+ }
+
pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
@@ -1877,17 +853,6 @@ err_arch_supported:
return ret;
}
-static int etm_remove(struct amba_device *adev)
-{
- struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm_count == 0)
- unregister_hotcpu_notifier(&etm_cpu_notifier);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int etm_runtime_suspend(struct device *dev)
{
@@ -1948,13 +913,9 @@ static struct amba_driver etm_driver = {
.name = "coresight-etm3x",
.owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etm_probe,
- .remove = etm_remove,
.id_table = etm_ids,
};
-
-module_amba_driver(etm_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
+builtin_amba_driver(etm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a6707642bb23..1c59bd36834c 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
@@ -32,6 +31,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
+#include <linux/perf_event.h>
#include <asm/sections.h>
#include "coresight-etm4x.h"
@@ -63,6 +63,13 @@ static bool etm4_arch_supported(u8 arch)
return true;
}
+static int etm4_cpu_id(struct coresight_device *csdev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->cpu;
+}
+
static int etm4_trace_id(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -72,7 +79,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
if (!drvdata->enable)
return drvdata->trcid;
- pm_runtime_get_sync(drvdata->dev);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -81,7 +87,6 @@ static int etm4_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
return trace_id;
}
@@ -182,12 +187,12 @@ static void etm4_enable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
-static int etm4_enable(struct coresight_device *csdev)
+static int etm4_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
- pm_runtime_get_sync(drvdata->dev);
spin_lock(&drvdata->spinlock);
/*
@@ -207,7 +212,6 @@ static int etm4_enable(struct coresight_device *csdev)
return 0;
err:
spin_unlock(&drvdata->spinlock);
- pm_runtime_put(drvdata->dev);
return ret;
}
@@ -256,12 +260,11 @@ static void etm4_disable(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
put_online_cpus();
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "ETM tracing disabled\n");
}
static const struct coresight_ops_source etm4_source_ops = {
+ .cpu_id = etm4_cpu_id,
.trace_id = etm4_trace_id,
.enable = etm4_enable,
.disable = etm4_disable,
@@ -2219,7 +2222,7 @@ static ssize_t name##_show(struct device *_dev, \
return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
readl_relaxed(drvdata->base + offset)); \
} \
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
coresight_simple_func(trcoslsr, TRCOSLSR);
coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2687,6 @@ err_coresight_register:
return ret;
}
-static int etm4_remove(struct amba_device *adev)
-{
- struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm4_count == 0)
- unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
- return 0;
-}
-
static struct amba_id etm4_ids[] = {
{ /* ETM 4.0 - Qualcomm */
.id = 0x0003b95d,
@@ -2712,10 +2704,9 @@ static struct amba_id etm4_ids[] = {
static struct amba_driver etm4x_driver = {
.drv = {
.name = "coresight-etm4x",
+ .suppress_bind_attrs = true,
},
.probe = etm4_probe,
- .remove = etm4_remove,
.id_table = etm4_ids,
};
-
-module_amba_driver(etm4x_driver);
+builtin_amba_driver(etm4x_driver);
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 2e36bde7fcb4..0600ca30649d 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Funnel driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -69,7 +70,6 @@ static int funnel_enable(struct coresight_device *csdev, int inport,
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
funnel_enable_hw(drvdata, inport);
dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
@@ -95,7 +95,6 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
funnel_disable_hw(drvdata, inport);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
@@ -226,14 +225,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int funnel_remove(struct amba_device *adev)
-{
- struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int funnel_runtime_suspend(struct device *dev)
{
@@ -273,13 +264,9 @@ static struct amba_driver funnel_driver = {
.name = "coresight-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = funnel_probe,
- .remove = funnel_remove,
.id_table = funnel_ids,
};
-
-module_amba_driver(funnel_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Funnel driver");
+builtin_amba_driver(funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 62fcd98cc7cf..333eddaed339 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -34,6 +34,15 @@
#define TIMEOUT_US 100
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
+#define ETM_MODE_EXCL_KERN BIT(30)
+#define ETM_MODE_EXCL_USER BIT(31)
+
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
@@ -52,6 +61,12 @@ static inline void CS_UNLOCK(void __iomem *addr)
} while (0);
}
+void coresight_disable_path(struct list_head *path);
+int coresight_enable_path(struct list_head *path, u32 mode);
+struct coresight_device *coresight_get_sink(struct list_head *path);
+struct list_head *coresight_build_path(struct coresight_device *csdev);
+void coresight_release_path(struct list_head *path);
+
#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
extern int etm_readl_cp14(u32 off, unsigned int *val);
extern int etm_writel_cp14(u32 off, u32 val);
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 584059e9e866..700f710e4bfa 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -15,7 +15,6 @@
#include <linux/clk.h>
#include <linux/coresight.h>
#include <linux/device.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -48,8 +47,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
-
CS_UNLOCK(drvdata->base);
/*
@@ -86,8 +83,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
CS_LOCK(drvdata->base);
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -156,15 +151,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int replicator_remove(struct amba_device *adev)
-{
- struct replicator_state *drvdata = amba_get_drvdata(adev);
-
- pm_runtime_disable(&adev->dev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -206,10 +192,9 @@ static struct amba_driver replicator_driver = {
.drv = {
.name = "coresight-replicator-qcom",
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = replicator_probe,
- .remove = replicator_remove,
.id_table = replicator_ids,
};
-
-module_amba_driver(replicator_driver);
+builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 963ac197c253..4299c0569340 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Replicator driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
@@ -41,7 +42,6 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR enabled\n");
return 0;
}
@@ -51,7 +51,6 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -127,20 +126,6 @@ out_disable_pm:
return ret;
}
-static int replicator_remove(struct platform_device *pdev)
-{
- struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- pm_runtime_get_sync(&pdev->dev);
- if (!IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -175,15 +160,11 @@ static const struct of_device_id replicator_match[] = {
static struct platform_driver replicator_driver = {
.probe = replicator_probe,
- .remove = replicator_remove,
.driver = {
.name = "coresight-replicator",
.of_match_table = replicator_match,
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
};
-
builtin_platform_driver(replicator_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index a57c7ec1661f..1be191f5d39c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Trace Memory Controller driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -124,7 +125,7 @@ struct tmc_drvdata {
bool reading;
char *buf;
dma_addr_t paddr;
- void __iomem *vaddr;
+ void *vaddr;
u32 size;
bool enable;
enum tmc_config_type config_type;
@@ -242,12 +243,9 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
{
unsigned long flags;
- pm_runtime_get_sync(drvdata->dev);
-
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
return -EBUSY;
}
@@ -268,7 +266,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
return 0;
}
-static int tmc_enable_sink(struct coresight_device *csdev)
+static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -381,8 +379,6 @@ out:
drvdata->enable = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-
dev_info(drvdata->dev, "TMC disabled\n");
}
@@ -766,23 +762,10 @@ err_misc_register:
err_devm_kzalloc:
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
dma_free_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
+ drvdata->vaddr, drvdata->paddr);
return ret;
}
-static int tmc_remove(struct amba_device *adev)
-{
- struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(drvdata->dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
-
- return 0;
-}
-
static struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
@@ -795,13 +778,9 @@ static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
.owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
},
.probe = tmc_probe,
- .remove = tmc_remove,
.id_table = tmc_ids,
};
-
-module_amba_driver(tmc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
+builtin_amba_driver(tmc_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7214efd10db5..8fb09d9237ab 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -1,5 +1,7 @@
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
+ * Description: CoreSight Trace Port Interface Unit driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -11,7 +13,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -70,11 +71,10 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tpiu_enable(struct coresight_device *csdev)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- pm_runtime_get_sync(csdev->dev.parent);
tpiu_enable_hw(drvdata);
dev_info(drvdata->dev, "TPIU enabled\n");
@@ -98,7 +98,6 @@ static void tpiu_disable(struct coresight_device *csdev)
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_disable_hw(drvdata);
- pm_runtime_put(csdev->dev.parent);
dev_info(drvdata->dev, "TPIU disabled\n");
}
@@ -172,14 +171,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int tpiu_remove(struct amba_device *adev)
-{
- struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int tpiu_runtime_suspend(struct device *dev)
{
@@ -223,13 +214,9 @@ static struct amba_driver tpiu_driver = {
.name = "coresight-tpiu",
.owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tpiu_probe,
- .remove = tpiu_remove,
.id_table = tpiu_ids,
};
-
-module_amba_driver(tpiu_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
+builtin_amba_driver(tpiu_driver);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 93738dfbf631..2ea5961092c1 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,7 +11,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
@@ -24,11 +23,28 @@
#include <linux/coresight.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
+/**
+ * struct coresight_node - elements of a path, from source to sink
+ * @csdev: Address of an element.
+ * @link: hook to the list.
+ */
+struct coresight_node {
+ struct coresight_device *csdev;
+ struct list_head link;
+};
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+
static int coresight_id_match(struct device *dev, void *data)
{
int trace_id, i_trace_id;
@@ -68,15 +84,12 @@ static int coresight_source_is_unique(struct coresight_device *csdev)
csdev, coresight_id_match);
}
-static int coresight_find_link_inport(struct coresight_device *csdev)
+static int coresight_find_link_inport(struct coresight_device *csdev,
+ struct coresight_device *parent)
{
int i;
- struct coresight_device *parent;
struct coresight_connection *conn;
- parent = container_of(csdev->path_link.next,
- struct coresight_device, path_link);
-
for (i = 0; i < parent->nr_outport; i++) {
conn = &parent->conns[i];
if (conn->child_dev == csdev)
@@ -89,15 +102,12 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
return 0;
}
-static int coresight_find_link_outport(struct coresight_device *csdev)
+static int coresight_find_link_outport(struct coresight_device *csdev,
+ struct coresight_device *child)
{
int i;
- struct coresight_device *child;
struct coresight_connection *conn;
- child = container_of(csdev->path_link.prev,
- struct coresight_device, path_link);
-
for (i = 0; i < csdev->nr_outport; i++) {
conn = &csdev->conns[i];
if (conn->child_dev == child)
@@ -110,13 +120,13 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
return 0;
}
-static int coresight_enable_sink(struct coresight_device *csdev)
+static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
{
int ret;
if (!csdev->enable) {
if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev);
+ ret = sink_ops(csdev)->enable(csdev, mode);
if (ret)
return ret;
}
@@ -138,14 +148,19 @@ static void coresight_disable_sink(struct coresight_device *csdev)
}
}
-static int coresight_enable_link(struct coresight_device *csdev)
+static int coresight_enable_link(struct coresight_device *csdev,
+ struct coresight_device *parent,
+ struct coresight_device *child)
{
int ret;
int link_subtype;
int refport, inport, outport;
- inport = coresight_find_link_inport(csdev);
- outport = coresight_find_link_outport(csdev);
+ if (!parent || !child)
+ return -EINVAL;
+
+ inport = coresight_find_link_inport(csdev, parent);
+ outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
@@ -168,14 +183,19 @@ static int coresight_enable_link(struct coresight_device *csdev)
return 0;
}
-static void coresight_disable_link(struct coresight_device *csdev)
+static void coresight_disable_link(struct coresight_device *csdev,
+ struct coresight_device *parent,
+ struct coresight_device *child)
{
int i, nr_conns;
int link_subtype;
int refport, inport, outport;
- inport = coresight_find_link_inport(csdev);
- outport = coresight_find_link_outport(csdev);
+ if (!parent || !child)
+ return;
+
+ inport = coresight_find_link_inport(csdev, parent);
+ outport = coresight_find_link_outport(csdev, child);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
@@ -201,7 +221,7 @@ static void coresight_disable_link(struct coresight_device *csdev)
csdev->enable = false;
}
-static int coresight_enable_source(struct coresight_device *csdev)
+static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
{
int ret;
@@ -213,7 +233,7 @@ static int coresight_enable_source(struct coresight_device *csdev)
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev);
+ ret = source_ops(csdev)->enable(csdev, NULL, mode);
if (ret)
return ret;
}
@@ -235,109 +255,188 @@ static void coresight_disable_source(struct coresight_device *csdev)
}
}
-static int coresight_enable_path(struct list_head *path)
+void coresight_disable_path(struct list_head *path)
{
- int ret = 0;
- struct coresight_device *cd;
-
- /*
- * At this point we have a full @path, from source to sink. The
- * sink is the first entry and the source the last one. Go through
- * all the components and enable them one by one.
- */
- list_for_each_entry(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- ret = coresight_enable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- /*
- * Don't enable the source just yet - this needs to
- * happen at the very end when all links and sink
- * along the path have been configured properly.
- */
- ;
- } else {
- ret = coresight_enable_link(cd);
+ struct coresight_node *nd;
+ struct coresight_device *csdev, *parent, *child;
+
+ list_for_each_entry(nd, path, link) {
+ csdev = nd->csdev;
+
+ switch (csdev->type) {
+ case CORESIGHT_DEV_TYPE_SINK:
+ case CORESIGHT_DEV_TYPE_LINKSINK:
+ coresight_disable_sink(csdev);
+ break;
+ case CORESIGHT_DEV_TYPE_SOURCE:
+ /* sources are disabled from either sysFS or Perf */
+ break;
+ case CORESIGHT_DEV_TYPE_LINK:
+ parent = list_prev_entry(nd, link)->csdev;
+ child = list_next_entry(nd, link)->csdev;
+ coresight_disable_link(csdev, parent, child);
+ break;
+ default:
+ break;
}
- if (ret)
- goto err;
}
+}
- return 0;
-err:
- list_for_each_entry_continue_reverse(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- coresight_disable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- ;
- } else {
- coresight_disable_link(cd);
+int coresight_enable_path(struct list_head *path, u32 mode)
+{
+
+ int ret = 0;
+ struct coresight_node *nd;
+ struct coresight_device *csdev, *parent, *child;
+
+ list_for_each_entry_reverse(nd, path, link) {
+ csdev = nd->csdev;
+
+ switch (csdev->type) {
+ case CORESIGHT_DEV_TYPE_SINK:
+ case CORESIGHT_DEV_TYPE_LINKSINK:
+ ret = coresight_enable_sink(csdev, mode);
+ if (ret)
+ goto err;
+ break;
+ case CORESIGHT_DEV_TYPE_SOURCE:
+ /* sources are enabled from either sysFS or Perf */
+ break;
+ case CORESIGHT_DEV_TYPE_LINK:
+ parent = list_prev_entry(nd, link)->csdev;
+ child = list_next_entry(nd, link)->csdev;
+ ret = coresight_enable_link(csdev, parent, child);
+ if (ret)
+ goto err;
+ break;
+ default:
+ goto err;
}
}
+out:
return ret;
+err:
+ coresight_disable_path(path);
+ goto out;
}
-static int coresight_disable_path(struct list_head *path)
+struct coresight_device *coresight_get_sink(struct list_head *path)
{
- struct coresight_device *cd;
+ struct coresight_device *csdev;
- list_for_each_entry_reverse(cd, path, path_link) {
- if (cd == list_first_entry(path, struct coresight_device,
- path_link)) {
- coresight_disable_sink(cd);
- } else if (list_is_last(&cd->path_link, path)) {
- /*
- * The source has already been stopped, no need
- * to do it again here.
- */
- ;
- } else {
- coresight_disable_link(cd);
+ if (!path)
+ return NULL;
+
+ csdev = list_last_entry(path, struct coresight_node, link)->csdev;
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return NULL;
+
+ return csdev;
+}
+
+/**
+ * _coresight_build_path - recursively build a path from a @csdev to a sink.
+ * @csdev: The device to start from.
+ * @path: The list to add devices to.
+ *
+ * The tree of Coresight device is traversed until an activated sink is
+ * found. From there the sink is added to the list along with all the
+ * devices that led to that point - the end result is a list from source
+ * to sink. In that list the source is the first device and the sink the
+ * last one.
+ */
+static int _coresight_build_path(struct coresight_device *csdev,
+ struct list_head *path)
+{
+ int i;
+ bool found = false;
+ struct coresight_node *node;
+ struct coresight_connection *conn;
+
+ /* An activated sink has been found. Enqueue the element */
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+ goto out;
+
+ /* Not a sink - recursively explore each port found on this element */
+ for (i = 0; i < csdev->nr_outport; i++) {
+ conn = &csdev->conns[i];
+ if (_coresight_build_path(conn->child_dev, path) == 0) {
+ found = true;
+ break;
}
}
+ if (!found)
+ return -ENODEV;
+
+out:
+ /*
+ * A path from this element to a sink has been found. The elements
+ * leading to the sink are already enqueued, all that is left to do
+ * is tell the PM runtime core we need this element and add a node
+ * for it.
+ */
+ node = kzalloc(sizeof(struct coresight_node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->csdev = csdev;
+ list_add(&node->link, path);
+ pm_runtime_get_sync(csdev->dev.parent);
+
return 0;
}
-static int coresight_build_paths(struct coresight_device *csdev,
- struct list_head *path,
- bool enable)
+struct list_head *coresight_build_path(struct coresight_device *csdev)
{
- int i, ret = -EINVAL;
- struct coresight_connection *conn;
+ struct list_head *path;
- list_add(&csdev->path_link, path);
+ path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!path)
+ return NULL;
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
- csdev->activated) {
- if (enable)
- ret = coresight_enable_path(path);
- else
- ret = coresight_disable_path(path);
- } else {
- for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- if (coresight_build_paths(conn->child_dev,
- path, enable) == 0)
- ret = 0;
- }
+ INIT_LIST_HEAD(path);
+
+ if (_coresight_build_path(csdev, path)) {
+ kfree(path);
+ path = NULL;
}
- if (list_first_entry(path, struct coresight_device, path_link) != csdev)
- dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+ return path;
+}
- list_del(&csdev->path_link);
+/**
+ * coresight_release_path - release a previously built path.
+ * @path: the path to release.
+ *
+ * Go through all the elements of a path and 1) removed it from the list and
+ * 2) free the memory allocated for each node.
+ */
+void coresight_release_path(struct list_head *path)
+{
+ struct coresight_device *csdev;
+ struct coresight_node *nd, *next;
- return ret;
+ list_for_each_entry_safe(nd, next, path, link) {
+ csdev = nd->csdev;
+
+ pm_runtime_put_sync(csdev->dev.parent);
+ list_del(&nd->link);
+ kfree(nd);
+ }
+
+ kfree(path);
+ path = NULL;
}
int coresight_enable(struct coresight_device *csdev)
{
int ret = 0;
- LIST_HEAD(path);
+ int cpu;
+ struct list_head *path;
mutex_lock(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -348,22 +447,47 @@ int coresight_enable(struct coresight_device *csdev)
if (csdev->enable)
goto out;
- if (coresight_build_paths(csdev, &path, true)) {
- dev_err(&csdev->dev, "building path(s) failed\n");
+ path = coresight_build_path(csdev);
+ if (!path) {
+ pr_err("building path(s) failed\n");
goto out;
}
- if (coresight_enable_source(csdev))
- dev_err(&csdev->dev, "source enable failed\n");
+ ret = coresight_enable_path(path, CS_MODE_SYSFS);
+ if (ret)
+ goto err_path;
+
+ ret = coresight_enable_source(csdev, CS_MODE_SYSFS);
+ if (ret)
+ goto err_source;
+
+ /*
+ * When working from sysFS it is important to keep track
+ * of the paths that were created so that they can be
+ * undone in 'coresight_disable()'. Since there can only
+ * be a single session per tracer (when working from sysFS)
+ * a per-cpu variable will do just fine.
+ */
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ per_cpu(sysfs_path, cpu) = path;
+
out:
mutex_unlock(&coresight_mutex);
return ret;
+
+err_source:
+ coresight_disable_path(path);
+
+err_path:
+ coresight_release_path(path);
+ goto out;
}
EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
- LIST_HEAD(path);
+ int cpu;
+ struct list_head *path;
mutex_lock(&coresight_mutex);
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
@@ -373,9 +497,12 @@ void coresight_disable(struct coresight_device *csdev)
if (!csdev->enable)
goto out;
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ path = per_cpu(sysfs_path, cpu);
coresight_disable_source(csdev);
- if (coresight_build_paths(csdev, &path, false))
- dev_err(&csdev->dev, "releasing path(s) failed\n");
+ coresight_disable_path(path);
+ coresight_release_path(path);
+ per_cpu(sysfs_path, cpu) = NULL;
out:
mutex_unlock(&coresight_mutex);
@@ -481,6 +608,8 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ kfree(csdev->conns);
+ kfree(csdev->refcnt);
kfree(csdev);
}
@@ -536,7 +665,7 @@ static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
* are hooked-up with each newly added component.
*/
bus_for_each_dev(&coresight_bustype, NULL,
- csdev, coresight_orphan_match);
+ csdev, coresight_orphan_match);
}
@@ -568,6 +697,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
if (dev) {
conn->child_dev = to_coresight_device(dev);
+ /* and put reference from 'bus_find_device()' */
+ put_device(dev);
} else {
csdev->orphan = true;
conn->child_dev = NULL;
@@ -575,6 +706,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
}
}
+static int coresight_remove_match(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *csdev, *iterator;
+ struct coresight_connection *conn;
+
+ csdev = data;
+ iterator = to_coresight_device(dev);
+
+ /* No need to check oneself */
+ if (csdev == iterator)
+ return 0;
+
+ /*
+ * Circle throuch all the connection of that component. If we find
+ * a connection whose name matches @csdev, remove it.
+ */
+ for (i = 0; i < iterator->nr_outport; i++) {
+ conn = &iterator->conns[i];
+
+ if (conn->child_dev == NULL)
+ continue;
+
+ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ iterator->orphan = true;
+ conn->child_dev = NULL;
+ /* No need to continue */
+ break;
+ }
+ }
+
+ /*
+ * Returning '0' ensures that all known component on the
+ * bus will be checked.
+ */
+ return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+ bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_remove_match);
+}
+
/**
* coresight_timeout - loop until a bit has changed to a specific state.
* @addr: base address of the area of interest.
@@ -713,13 +888,8 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
- mutex_lock(&coresight_mutex);
-
- kfree(csdev->conns);
+ /* Remove references of that device in the topology */
+ coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
-
- mutex_unlock(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index b0973617826f..b68da1888fd5 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -10,7 +10,6 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -86,7 +85,7 @@ static int of_coresight_alloc_memory(struct device *dev,
return -ENOMEM;
/* Children connected to this component via @outports */
- pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+ pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
sizeof(*pdata->child_names),
GFP_KERNEL);
if (!pdata->child_names)
diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig
index b7a9073d968b..1b412f8a56b5 100644
--- a/drivers/hwtracing/intel_th/Kconfig
+++ b/drivers/hwtracing/intel_th/Kconfig
@@ -1,5 +1,6 @@
config INTEL_TH
tristate "Intel(R) Trace Hub controller"
+ depends on HAS_DMA && HAS_IOMEM
help
Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that
produce, switch and output trace data from multiple hardware and
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 165d3001c301..4272f2ce5f6e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -124,17 +124,34 @@ static struct device_type intel_th_source_device_type = {
.release = intel_th_device_release,
};
+static struct intel_th *to_intel_th(struct intel_th_device *thdev)
+{
+ /*
+ * subdevice tree is flat: if this one is not a switch, its
+ * parent must be
+ */
+ if (thdev->type != INTEL_TH_SWITCH)
+ thdev = to_intel_th_hub(thdev);
+
+ if (WARN_ON_ONCE(!thdev || thdev->type != INTEL_TH_SWITCH))
+ return NULL;
+
+ return dev_get_drvdata(thdev->dev.parent);
+}
+
static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct intel_th_device *thdev = to_intel_th_device(dev);
+ struct intel_th *th = to_intel_th(thdev);
char *node;
if (thdev->id >= 0)
- node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", 0, thdev->name,
- thdev->id);
+ node = kasprintf(GFP_KERNEL, "intel_th%d/%s%d", th->id,
+ thdev->name, thdev->id);
else
- node = kasprintf(GFP_KERNEL, "intel_th%d/%s", 0, thdev->name);
+ node = kasprintf(GFP_KERNEL, "intel_th%d/%s", th->id,
+ thdev->name);
return node;
}
@@ -319,6 +336,7 @@ static struct intel_th_subdevice {
unsigned nres;
unsigned type;
unsigned otype;
+ unsigned scrpd;
int id;
} intel_th_subdevices[TH_SUBDEVICE_MAX] = {
{
@@ -352,6 +370,7 @@ static struct intel_th_subdevice {
.id = 0,
.type = INTEL_TH_OUTPUT,
.otype = GTH_MSU,
+ .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC0_IS_ENABLED,
},
{
.nres = 2,
@@ -371,6 +390,7 @@ static struct intel_th_subdevice {
.id = 1,
.type = INTEL_TH_OUTPUT,
.otype = GTH_MSU,
+ .scrpd = SCRPD_MEM_IS_PRIM_DEST | SCRPD_MSC1_IS_ENABLED,
},
{
.nres = 2,
@@ -403,6 +423,7 @@ static struct intel_th_subdevice {
.name = "pti",
.type = INTEL_TH_OUTPUT,
.otype = GTH_PTI,
+ .scrpd = SCRPD_PTI_IS_PRIM_DEST,
},
{
.nres = 1,
@@ -477,6 +498,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
thdev->dev.devt = MKDEV(th->major, i);
thdev->output.type = subdev->otype;
thdev->output.port = -1;
+ thdev->output.scratchpad = subdev->scrpd;
}
err = device_add(&thdev->dev);
@@ -579,6 +601,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
}
th->dev = dev;
+ dev_set_drvdata(dev, th);
+
err = intel_th_populate(th, devres, ndevres, irq);
if (err)
goto err_chrdev;
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 2dc5378ccd3a..9beea0b54231 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -146,24 +146,6 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port)
iowrite32(val, gth->base + reg);
}
-/*static int gth_master_get(struct gth_device *gth, unsigned int master)
-{
- unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
- unsigned int shift = (master & 0x7) * 4;
- u32 val;
-
- if (master >= 256) {
- reg = REG_GTH_GSWTDEST;
- shift = 0;
- }
-
- val = ioread32(gth->base + reg);
- val &= (0xf << shift);
- val >>= shift;
-
- return val ? val & 0x7 : -1;
- }*/
-
static ssize_t master_attr_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -304,6 +286,10 @@ static int intel_th_gth_reset(struct gth_device *gth)
if (scratchpad & SCRPD_DEBUGGER_IN_USE)
return -EBUSY;
+ /* Always save/restore STH and TU registers in S0ix entry/exit */
+ scratchpad |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
+ iowrite32(scratchpad, gth->base + REG_GTH_SCRPD0);
+
/* output ports */
for (port = 0; port < 8; port++) {
if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
@@ -506,6 +492,10 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
if (!count)
dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
output->port);
+
+ reg = ioread32(gth->base + REG_GTH_SCRPD0);
+ reg &= ~output->scratchpad;
+ iowrite32(reg, gth->base + REG_GTH_SCRPD0);
}
/**
@@ -520,7 +510,7 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
- u32 scr = 0xfc0000;
+ u32 scr = 0xfc0000, scrpd;
int master;
spin_lock(&gth->gth_lock);
@@ -535,6 +525,10 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
output->active = true;
spin_unlock(&gth->gth_lock);
+ scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
+ scrpd |= output->scratchpad;
+ iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
+
iowrite32(scr, gth->base + REG_GTH_SCR);
iowrite32(0, gth->base + REG_GTH_SCR2);
}
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 3b714b7a61db..56f0d2620577 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -57,9 +57,6 @@ enum {
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
};
-/* Externall debugger is using Intel TH */
-#define SCRPD_DEBUGGER_IN_USE BIT(24)
-
/* waiting for Pipeline Empty bit(s) to assert for GTH */
#define GTH_PLE_WAITLOOP_DEPTH 10000
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 57fd72b20fae..eedd09332db6 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -30,6 +30,7 @@ enum {
* struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
* @port: output port number, assigned by the switch
* @type: GTH_{MSU,CTP,PTI}
+ * @scratchpad: scratchpad bits to flag when this output is enabled
* @multiblock: true for multiblock output configuration
* @active: true when this output is enabled
*
@@ -41,6 +42,7 @@ enum {
struct intel_th_output {
int port;
unsigned int type;
+ unsigned int scratchpad;
bool multiblock;
bool active;
};
@@ -241,4 +243,43 @@ enum {
GTH_PTI = 4, /* MIPI-PTI */
};
+/*
+ * Scratchpad bits: tell firmware and external debuggers
+ * what we are up to.
+ */
+enum {
+ /* Memory is the primary destination */
+ SCRPD_MEM_IS_PRIM_DEST = BIT(0),
+ /* XHCI DbC is the primary destination */
+ SCRPD_DBC_IS_PRIM_DEST = BIT(1),
+ /* PTI is the primary destination */
+ SCRPD_PTI_IS_PRIM_DEST = BIT(2),
+ /* BSSB is the primary destination */
+ SCRPD_BSSB_IS_PRIM_DEST = BIT(3),
+ /* PTI is the alternate destination */
+ SCRPD_PTI_IS_ALT_DEST = BIT(4),
+ /* BSSB is the alternate destination */
+ SCRPD_BSSB_IS_ALT_DEST = BIT(5),
+ /* DeepSx exit occurred */
+ SCRPD_DEEPSX_EXIT = BIT(6),
+ /* S4 exit occurred */
+ SCRPD_S4_EXIT = BIT(7),
+ /* S5 exit occurred */
+ SCRPD_S5_EXIT = BIT(8),
+ /* MSU controller 0/1 is enabled */
+ SCRPD_MSC0_IS_ENABLED = BIT(9),
+ SCRPD_MSC1_IS_ENABLED = BIT(10),
+ /* Sx exit occurred */
+ SCRPD_SX_EXIT = BIT(11),
+ /* Trigger Unit is enabled */
+ SCRPD_TRIGGER_IS_ENABLED = BIT(12),
+ SCRPD_ODLA_IS_ENABLED = BIT(13),
+ SCRPD_SOCHAP_IS_ENABLED = BIT(14),
+ SCRPD_STH_IS_ENABLED = BIT(15),
+ SCRPD_DCIH_IS_ENABLED = BIT(16),
+ SCRPD_VER_IS_ENABLED = BIT(17),
+ /* External debugger is using Intel TH */
+ SCRPD_DEBUGGER_IN_USE = BIT(24),
+};
+
#endif
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70ca27e45602..d9d6022c5aca 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -408,7 +408,7 @@ msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
* Second time (wrap_count==1), it's just like any other block,
* containing data in the range of [MSC_BDESC..data_bytes].
*/
- if (iter->block == iter->start_block && iter->wrap_count) {
+ if (iter->block == iter->start_block && iter->wrap_count == 2) {
tocopy = DATA_IN_PAGE - data_bytes;
src += data_bytes;
}
@@ -1112,12 +1112,11 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
size = msc->nr_pages << PAGE_SHIFT;
if (!size)
- return 0;
+ goto put_count;
- if (off >= size) {
- len = 0;
+ if (off >= size)
goto put_count;
- }
+
if (off + len >= size)
len = size - off;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 641e87936064..bca7a2ac00d6 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -46,8 +46,6 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (IS_ERR(th))
return PTR_ERR(th);
- pci_set_drvdata(pdev, th);
-
return 0;
}
@@ -67,6 +65,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa126),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Apollo Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x5a8e),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Broxton */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 56101c33e10f..e1aee61dd7b3 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -94,10 +94,13 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
case STP_PACKET_TRIG:
if (flags & STP_PACKET_TIMESTAMPED)
reg += 4;
- iowrite8(*payload, sth->base + reg);
+ writeb_relaxed(*payload, sth->base + reg);
break;
case STP_PACKET_MERR:
+ if (size > 4)
+ size = 4;
+
sth_iowrite(&out->MERR, payload, size);
break;
@@ -107,8 +110,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
else
outp = (u64 __iomem *)&out->FLAG;
- size = 1;
- sth_iowrite(outp, payload, size);
+ size = 0;
+ writeb_relaxed(0, outp);
break;
case STP_PACKET_USER:
@@ -129,6 +132,8 @@ static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
sth_iowrite(outp, payload, size);
break;
+ default:
+ return -ENOTSUPP;
}
return size;
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 83e9f591a54b..847a39b35307 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -1,6 +1,7 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
+ select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
@@ -8,6 +9,8 @@ config STM
Say Y here to enable System Trace Module device support.
+if STM
+
config STM_DUMMY
tristate "Dummy STM driver"
help
@@ -24,3 +27,16 @@ config STM_SOURCE_CONSOLE
If you want to send kernel console messages over STM devices,
say Y.
+
+config STM_SOURCE_HEARTBEAT
+ tristate "Heartbeat over STM devices"
+ help
+ This is a kernel space trace source that sends periodic
+ heartbeat messages to trace hosts over STM devices. It is
+ also useful for testing stm class drivers and the stm class
+ framework itself.
+
+ If you want to send heartbeat messages over STM devices,
+ say Y.
+
+endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index f9312c38dd7a..a9ce3d487e57 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -5,5 +5,7 @@ stm_core-y := core.o policy.o
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
+obj-$(CONFIG_STM_SOURCE_HEARTBEAT) += stm_heartbeat.o
stm_console-y := console.o
+stm_heartbeat-y := heartbeat.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index b6445d9e5453..de80d45d8df9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -113,6 +113,7 @@ struct stm_device *stm_find_device(const char *buf)
stm = to_stm_device(dev);
if (!try_module_get(stm->owner)) {
+ /* matches class_find_device() above */
put_device(dev);
return NULL;
}
@@ -125,7 +126,7 @@ struct stm_device *stm_find_device(const char *buf)
* @stm: stm device, previously acquired by stm_find_device()
*
* This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
*/
void stm_put_device(struct stm_device *stm)
{
@@ -185,6 +186,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
return;
@@ -199,6 +203,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
@@ -233,7 +240,7 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
return -1;
}
-static unsigned int
+static int
stm_find_master_chan(struct stm_device *stm, unsigned int width,
unsigned int *mstart, unsigned int mend,
unsigned int *cstart, unsigned int cend)
@@ -288,12 +295,13 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
}
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
/* output is already assigned -- shouldn't happen */
if (WARN_ON_ONCE(output->nr_chans))
goto unlock;
ret = stm_find_master_chan(stm, width, &midx, mend, &cidx, cend);
- if (ret)
+ if (ret < 0)
goto unlock;
output->master = midx;
@@ -304,6 +312,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
ret = 0;
unlock:
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
return ret;
@@ -312,11 +321,18 @@ unlock:
static void stm_output_free(struct stm_device *stm, struct stm_output *output)
{
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
+static void stm_output_init(struct stm_output *output)
+{
+ spin_lock_init(&output->lock);
+}
+
static int major_match(struct device *dev, const void *data)
{
unsigned int major = *(unsigned int *)data;
@@ -339,6 +355,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
if (!stmf)
return -ENOMEM;
+ stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
if (!try_module_get(stmf->stm->owner))
@@ -349,6 +366,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ /* matches class_find_device() above */
+ put_device(dev);
kfree(stmf);
return err;
@@ -357,9 +376,19 @@ err_free:
static int stm_char_release(struct inode *inode, struct file *file)
{
struct stm_file *stmf = file->private_data;
+ struct stm_device *stm = stmf->stm;
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, stmf->output.master,
+ stmf->output.channel);
- stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
+ stm_output_free(stm, &stmf->output);
+
+ /*
+ * matches the stm_char_open()'s
+ * class_find_device() + try_module_get()
+ */
+ stm_put_device(stm);
kfree(stmf);
return 0;
@@ -380,8 +409,8 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
return ret;
}
-static void stm_write(struct stm_data *data, unsigned int master,
- unsigned int channel, const char *buf, size_t count)
+static ssize_t stm_write(struct stm_data *data, unsigned int master,
+ unsigned int channel, const char *buf, size_t count)
{
unsigned int flags = STP_PACKET_TIMESTAMPED;
const unsigned char *p = buf, nil = 0;
@@ -393,9 +422,14 @@ static void stm_write(struct stm_data *data, unsigned int master,
sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
sz, p);
flags = 0;
+
+ if (sz < 0)
+ break;
}
data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return pos;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -406,6 +440,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
char *kbuf;
int err;
+ if (count + 1 > PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
/*
* if no m/c have been assigned to this writer up to this
* point, use "default" policy entry
@@ -430,8 +467,8 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
return -EFAULT;
}
- stm_write(stm->data, stmf->output.master, stmf->output.channel, kbuf,
- count);
+ count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
+ kbuf, count);
kfree(kbuf);
@@ -515,10 +552,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
- if (ret) {
+ if (ret)
stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
- }
err_free:
kfree(id);
@@ -618,7 +653,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (!stm_data->packet || !stm_data->sw_nchannels)
return -EINVAL;
- nmasters = stm_data->sw_end - stm_data->sw_start;
+ nmasters = stm_data->sw_end - stm_data->sw_start + 1;
stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
if (!stm)
return -ENOMEM;
@@ -641,6 +676,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
if (err)
goto err_device;
+ mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
@@ -654,6 +690,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return 0;
err_device:
+ /* matches device_initialize() above */
put_device(&stm->dev);
err_free:
kfree(stm);
@@ -662,20 +699,28 @@ err_free:
}
EXPORT_SYMBOL_GPL(stm_register_device);
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm);
void stm_unregister_device(struct stm_data *stm_data)
{
struct stm_device *stm = stm_data->stm;
struct stm_source_device *src, *iter;
- int i;
+ int i, ret;
- spin_lock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
- __stm_source_link_drop(src, stm);
+ ret = __stm_source_link_drop(src, stm);
+ /*
+ * src <-> stm link must not change under the same
+ * stm::link_mutex, so complain loudly if it has;
+ * also in this situation ret!=0 means this src is
+ * not connected to this stm and it should be otherwise
+ * safe to proceed with the tear-down of stm.
+ */
+ WARN_ON_ONCE(ret);
}
- spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
synchronize_srcu(&stm_source_srcu);
@@ -686,7 +731,7 @@ void stm_unregister_device(struct stm_data *stm_data)
stp_policy_unbind(stm->policy);
mutex_unlock(&stm->policy_mutex);
- for (i = 0; i < stm->sw_nmasters; i++)
+ for (i = stm->data->sw_start; i <= stm->data->sw_end; i++)
stp_master_free(stm, i);
device_unregister(&stm->dev);
@@ -694,6 +739,17 @@ void stm_unregister_device(struct stm_data *stm_data)
}
EXPORT_SYMBOL_GPL(stm_unregister_device);
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ * stm::link_mutex
+ * stm::link_lock
+ * src::link_lock
+ */
+
/**
* stm_source_link_add() - connect an stm_source device to an stm device
* @src: stm_source device
@@ -710,6 +766,7 @@ static int stm_source_link_add(struct stm_source_device *src,
char *id;
int err;
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -719,6 +776,7 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
id = kstrdup(src->data->name, GFP_KERNEL);
if (id) {
@@ -753,9 +811,9 @@ static int stm_source_link_add(struct stm_source_device *src,
fail_free_output:
stm_output_free(stm, &src->output);
- stm_put_device(stm);
fail_detach:
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -764,6 +822,7 @@ fail_detach:
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
return err;
}
@@ -776,28 +835,55 @@ fail_detach:
* If @stm is @src::link, disconnect them from one another and put the
* reference on the @stm device.
*
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
*/
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm)
{
struct stm_device *link;
+ int ret = 0;
+
+ lockdep_assert_held(&stm->link_mutex);
+ /* for stm::link_list modification, we hold both mutex and spinlock */
+ spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
- if (WARN_ON_ONCE(link != stm)) {
- spin_unlock(&src->link_lock);
- return;
+
+ /*
+ * The linked device may have changed since we last looked, because
+ * we weren't holding the src::link_lock back then; if this is the
+ * case, tell the caller to retry.
+ */
+ if (link != stm) {
+ ret = -EAGAIN;
+ goto unlock;
}
stm_output_free(link, &src->output);
- /* caller must hold stm::link_lock */
list_del_init(&src->link_entry);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
+unlock:
spin_unlock(&src->link_lock);
+ spin_unlock(&stm->link_lock);
+
+ /*
+ * Call the unlink callbacks for both source and stm, when we know
+ * that we have actually performed the unlinking.
+ */
+ if (!ret) {
+ if (src->data->unlink)
+ src->data->unlink(src->data);
+
+ if (stm->data->unlink)
+ stm->data->unlink(stm->data, src->output.master,
+ src->output.channel);
+ }
+
+ return ret;
}
/**
@@ -813,21 +899,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
static void stm_source_link_drop(struct stm_source_device *src)
{
struct stm_device *stm;
- int idx;
+ int idx, ret;
+retry:
idx = srcu_read_lock(&stm_source_srcu);
+ /*
+ * The stm device will be valid for the duration of this
+ * read section, but the link may change before we grab
+ * the src::link_lock in __stm_source_link_drop().
+ */
stm = srcu_dereference(src->link, &stm_source_srcu);
+ ret = 0;
if (stm) {
- if (src->data->unlink)
- src->data->unlink(src->data);
-
- spin_lock(&stm->link_lock);
- __stm_source_link_drop(src, stm);
- spin_unlock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
+ ret = __stm_source_link_drop(src, stm);
+ mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
+
+ /* if it did change, retry */
+ if (ret == -EAGAIN)
+ goto retry;
}
static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +956,10 @@ static ssize_t stm_source_link_store(struct device *dev,
return -EINVAL;
err = stm_source_link_add(src, link);
- if (err)
+ if (err) {
+ /* matches the stm_find_device() above */
stm_put_device(link);
+ }
return err ? : count;
}
@@ -925,6 +1021,7 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ stm_output_init(&src->output);
spin_lock_init(&src->link_lock);
INIT_LIST_HEAD(&src->link_entry);
src->data = data;
@@ -973,9 +1070,9 @@ int stm_source_write(struct stm_source_data *data, unsigned int chan,
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
- stm_write(stm->data, src->output.master,
- src->output.channel + chan,
- buf, count);
+ count = stm_write(stm->data, src->output.master,
+ src->output.channel + chan,
+ buf, count);
else
count = -ENODEV;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 3709bef0b21f..310adf57e7a1 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -40,22 +40,75 @@ dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
return size;
}
-static struct stm_data dummy_stm = {
- .name = "dummy_stm",
- .sw_start = 0x0000,
- .sw_end = 0xffff,
- .sw_nchannels = 0xffff,
- .packet = dummy_stm_packet,
-};
+#define DUMMY_STM_MAX 32
+
+static struct stm_data dummy_stm[DUMMY_STM_MAX];
+
+static int nr_dummies = 4;
+
+module_param(nr_dummies, int, 0600);
+
+static unsigned int dummy_stm_nr;
+
+static unsigned int fail_mode;
+
+module_param(fail_mode, int, 0600);
+
+static int dummy_stm_link(struct stm_data *data, unsigned int master,
+ unsigned int channel)
+{
+ if (fail_mode && (channel & fail_mode))
+ return -EINVAL;
+
+ return 0;
+}
static int dummy_stm_init(void)
{
- return stm_register_device(NULL, &dummy_stm, THIS_MODULE);
+ int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+
+ if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < __nr_dummies; i++) {
+ dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
+ if (!dummy_stm[i].name)
+ goto fail_unregister;
+
+ dummy_stm[i].sw_start = 0x0000;
+ dummy_stm[i].sw_end = 0xffff;
+ dummy_stm[i].sw_nchannels = 0xffff;
+ dummy_stm[i].packet = dummy_stm_packet;
+ dummy_stm[i].link = dummy_stm_link;
+
+ ret = stm_register_device(NULL, &dummy_stm[i], THIS_MODULE);
+ if (ret)
+ goto fail_free;
+ }
+
+ dummy_stm_nr = __nr_dummies;
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_unregister_device(&dummy_stm[i]);
+fail_free:
+ kfree(dummy_stm[i].name);
+ }
+
+ return ret;
+
}
static void dummy_stm_exit(void)
{
- stm_unregister_device(&dummy_stm);
+ int i;
+
+ for (i = 0; i < dummy_stm_nr; i++) {
+ stm_unregister_device(&dummy_stm[i]);
+ kfree(dummy_stm[i].name);
+ }
}
module_init(dummy_stm_init);
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
new file mode 100644
index 000000000000..0133571b506f
--- /dev/null
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -0,0 +1,130 @@
+/*
+ * Simple heartbeat STM source driver
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Heartbeat STM source will send repetitive messages over STM devices to a
+ * trace host.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <linux/stm.h>
+
+#define STM_HEARTBEAT_MAX 32
+
+static int nr_devs = 4;
+static int interval_ms = 10;
+
+module_param(nr_devs, int, 0600);
+module_param(interval_ms, int, 0600);
+
+static struct stm_heartbeat {
+ struct stm_source_data data;
+ struct hrtimer hrtimer;
+ unsigned int active;
+} stm_heartbeat[STM_HEARTBEAT_MAX];
+
+static unsigned int nr_instances;
+
+static const char str[] = "heartbeat stm source driver is here to serve you";
+
+static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
+{
+ struct stm_heartbeat *heartbeat = container_of(hr, struct stm_heartbeat,
+ hrtimer);
+
+ stm_source_write(&heartbeat->data, 0, str, sizeof str);
+ if (heartbeat->active)
+ hrtimer_forward_now(hr, ms_to_ktime(interval_ms));
+
+ return heartbeat->active ? HRTIMER_RESTART : HRTIMER_NORESTART;
+}
+
+static int stm_heartbeat_link(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 1;
+ hrtimer_start(&heartbeat->hrtimer, ms_to_ktime(interval_ms),
+ HRTIMER_MODE_ABS);
+
+ return 0;
+}
+
+static void stm_heartbeat_unlink(struct stm_source_data *data)
+{
+ struct stm_heartbeat *heartbeat =
+ container_of(data, struct stm_heartbeat, data);
+
+ heartbeat->active = 0;
+ hrtimer_cancel(&heartbeat->hrtimer);
+}
+
+static int stm_heartbeat_init(void)
+{
+ int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+
+ if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < __nr_instances; i++) {
+ stm_heartbeat[i].data.name =
+ kasprintf(GFP_KERNEL, "heartbeat.%d", i);
+ if (!stm_heartbeat[i].data.name)
+ goto fail_unregister;
+
+ stm_heartbeat[i].data.nr_chans = 1;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
+ stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
+ hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ stm_heartbeat[i].hrtimer.function =
+ stm_heartbeat_hrtimer_handler;
+
+ ret = stm_source_register_device(NULL, &stm_heartbeat[i].data);
+ if (ret)
+ goto fail_free;
+ }
+
+ nr_instances = __nr_instances;
+
+ return 0;
+
+fail_unregister:
+ for (i--; i >= 0; i--) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+fail_free:
+ kfree(stm_heartbeat[i].data.name);
+ }
+
+ return ret;
+}
+
+static void stm_heartbeat_exit(void)
+{
+ int i;
+
+ for (i = 0; i < nr_instances; i++) {
+ stm_source_unregister_device(&stm_heartbeat[i].data);
+ kfree(stm_heartbeat[i].data.name);
+ }
+}
+
+module_init(stm_heartbeat_init);
+module_exit(stm_heartbeat_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_heartbeat driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 11ab6d01adf6..1db189657b2b 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
{
struct stm_device *stm = policy->stm;
+ /*
+ * stp_policy_release() will not call here if the policy is already
+ * unbound; other users should not either, as no link exists between
+ * this policy and anything else in that case
+ */
if (WARN_ON_ONCE(!policy->stm))
return;
- mutex_lock(&stm->policy_mutex);
- stm->policy = NULL;
- mutex_unlock(&stm->policy_mutex);
+ lockdep_assert_held(&stm->policy_mutex);
+ stm->policy = NULL;
policy->stm = NULL;
stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
static void stp_policy_release(struct config_item *item)
{
struct stp_policy *policy = to_stp_policy(item);
+ struct stm_device *stm = policy->stm;
+ /* a policy *can* be unbound and still exist in configfs tree */
+ if (!stm)
+ return;
+
+ mutex_lock(&stm->policy_mutex);
stp_policy_unbind(policy);
+ mutex_unlock(&stm->policy_mutex);
+
kfree(policy);
}
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
/*
* node must look like <device_name>.<policy_name>, where
- * <device_name> is the name of an existing stm device and
- * <policy_name> is an arbitrary string
+ * <device_name> is the name of an existing stm device; may
+ * contain dots;
+ * <policy_name> is an arbitrary string; may not contain dots
*/
- p = strchr(devname, '.');
+ p = strrchr(devname, '.');
if (!p) {
kfree(devname);
return ERR_PTR(-EINVAL);
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 95ece0292c99..4e8c6926260f 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -45,6 +45,7 @@ struct stm_device {
int major;
unsigned int sw_nmasters;
struct stm_data *data;
+ struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
/* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
container_of((_d), struct stm_device, dev)
struct stm_output {
+ spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 0299dfa746a3..faa8e6821fea 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -789,7 +789,7 @@ config I2C_QUP
config I2C_RIIC
tristate "Renesas RIIC adapter"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
Renesas RIIC I2C interface.
@@ -833,7 +833,7 @@ config I2C_SH7760
config I2C_SH_MOBILE
tristate "SuperH Mobile I2C Controller"
depends on HAS_DMA
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Renesas SH-Mobile processor.
@@ -908,7 +908,7 @@ config I2C_TEGRA
config I2C_UNIPHIER
tristate "UniPhier FIFO-less I2C controller"
- depends on ARCH_UNIPHIER
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you say yes to this option, support will be included for
the UniPhier FIFO-less I2C interface embedded in PH1-LD4, PH1-sLD8,
@@ -916,7 +916,7 @@ config I2C_UNIPHIER
config I2C_UNIPHIER_F
tristate "UniPhier FIFO-builtin I2C controller"
- depends on ARCH_UNIPHIER
+ depends on ARCH_UNIPHIER || COMPILE_TEST
help
If you say yes to this option, support will be included for
the UniPhier FIFO-builtin I2C interface embedded in PH1-Pro4,
@@ -985,7 +985,7 @@ config I2C_XLP9XX
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
help
If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 0419f5284609..b9f0fff4e723 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -58,11 +58,13 @@
#define IE_M_RX_FIFO_FULL_SHIFT 31
#define IE_M_RX_THLD_SHIFT 30
#define IE_M_START_BUSY_SHIFT 28
+#define IE_M_TX_UNDERRUN_SHIFT 27
#define IS_OFFSET 0x3c
#define IS_M_RX_FIFO_FULL_SHIFT 31
#define IS_M_RX_THLD_SHIFT 30
#define IS_M_START_BUSY_SHIFT 28
+#define IS_M_TX_UNDERRUN_SHIFT 27
#define M_TX_OFFSET 0x40
#define M_TX_WR_STATUS_SHIFT 31
@@ -76,7 +78,7 @@
#define M_RX_DATA_SHIFT 0
#define M_RX_DATA_MASK 0xff
-#define I2C_TIMEOUT_MESC 100
+#define I2C_TIMEOUT_MSEC 50000
#define M_TX_RX_FIFO_SIZE 64
enum bus_speed_index {
@@ -95,12 +97,17 @@ struct bcm_iproc_i2c_dev {
struct completion done;
int xfer_is_done;
+
+ struct i2c_msg *msg;
+
+ /* bytes that have been transferred */
+ unsigned int tx_bytes;
};
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
-#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT))
static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
{
@@ -112,13 +119,95 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
if (!status)
return IRQ_NONE;
+ /* TX FIFO is empty and we have more data to send */
+ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) {
+ struct i2c_msg *msg = iproc_i2c->msg;
+ unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
+ unsigned int i;
+ u32 val;
+
+ /* can only fill up to the FIFO size */
+ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
+ for (i = 0; i < tx_bytes; i++) {
+ /* start from where we left over */
+ unsigned int idx = iproc_i2c->tx_bytes + i;
+
+ val = msg->buf[idx];
+
+ /* mark the last byte */
+ if (idx == msg->len - 1) {
+ u32 tmp;
+
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
+
+ /*
+ * Since this is the last byte, we should
+ * now disable TX FIFO underrun interrupt
+ */
+ tmp = readl(iproc_i2c->base + IE_OFFSET);
+ tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT);
+ writel(tmp, iproc_i2c->base + IE_OFFSET);
+ }
+
+ /* load data into TX FIFO */
+ writel(val, iproc_i2c->base + M_TX_OFFSET);
+ }
+ /* update number of transferred bytes */
+ iproc_i2c->tx_bytes += tx_bytes;
+ }
+
+ if (status & BIT(IS_M_START_BUSY_SHIFT)) {
+ iproc_i2c->xfer_is_done = 1;
+ complete_all(&iproc_i2c->done);
+ }
+
writel(status, iproc_i2c->base + IS_OFFSET);
- iproc_i2c->xfer_is_done = 1;
- complete_all(&iproc_i2c->done);
return IRQ_HANDLED;
}
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ u32 val;
+
+ /* put controller in reset */
+ val = readl(iproc_i2c->base + CFG_OFFSET);
+ val |= 1 << CFG_RESET_SHIFT;
+ val &= ~(1 << CFG_EN_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+
+ /* wait 100 usec per spec */
+ udelay(100);
+
+ /* bring controller out of reset */
+ val &= ~(1 << CFG_RESET_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+
+ /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+ writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+ /* disable all interrupts */
+ writel(0, iproc_i2c->base + IE_OFFSET);
+
+ /* clear all pending interrupts */
+ writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+ return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(iproc_i2c->base + CFG_OFFSET);
+ if (enable)
+ val |= BIT(CFG_EN_SHIFT);
+ else
+ val &= ~BIT(CFG_EN_SHIFT);
+ writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
@@ -149,6 +238,12 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
default:
dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_init(iproc_i2c);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
return -EIO;
}
}
@@ -159,7 +254,8 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
int ret, i;
u8 addr;
u32 val;
- unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+ unsigned int tx_bytes;
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
/* check if bus is busy */
if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
@@ -168,13 +264,20 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
return -EBUSY;
}
+ iproc_i2c->msg = msg;
+
/* format and load slave address into the TX FIFO */
addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
writel(addr, iproc_i2c->base + M_TX_OFFSET);
- /* for a write transaction, load data into the TX FIFO */
+ /*
+ * For a write transaction, load data into the TX FIFO. Only allow
+ * loading up to TX FIFO size - 1 bytes of data since the first byte
+ * has been used up by the slave address
+ */
+ tx_bytes = min_t(unsigned int, msg->len, M_TX_RX_FIFO_SIZE - 1);
if (!(msg->flags & I2C_M_RD)) {
- for (i = 0; i < msg->len; i++) {
+ for (i = 0; i < tx_bytes; i++) {
val = msg->buf[i];
/* mark the last byte */
@@ -183,6 +286,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
writel(val, iproc_i2c->base + M_TX_OFFSET);
}
+ iproc_i2c->tx_bytes = tx_bytes;
}
/* mark as incomplete before starting the transaction */
@@ -194,13 +298,24 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* transaction is done, i.e., the internal start_busy bit, transitions
* from 1 to 0.
*/
- writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+ val = BIT(IE_M_START_BUSY_SHIFT);
+
+ /*
+ * If TX data size is larger than the TX FIFO, need to enable TX
+ * underrun interrupt, which will be triggerred when the TX FIFO is
+ * empty. When that happens we can then pump more data into the FIFO
+ */
+ if (!(msg->flags & I2C_M_RD) &&
+ msg->len > iproc_i2c->tx_bytes)
+ val |= BIT(IE_M_TX_UNDERRUN_SHIFT);
+
+ writel(val, iproc_i2c->base + IE_OFFSET);
/*
* Now we can activate the transfer. For a read operation, specify the
* number of bytes to read
*/
- val = 1 << M_CMD_START_BUSY_SHIFT;
+ val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
@@ -283,7 +398,6 @@ static const struct i2c_algorithm bcm_iproc_algo = {
static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
/* need to reserve one byte in the FIFO for the slave address */
.max_read_len = M_TX_RX_FIFO_SIZE - 1,
- .max_write_len = M_TX_RX_FIFO_SIZE - 1,
};
static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -321,49 +435,6 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
return 0;
}
-static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
-{
- u32 val;
-
- /* put controller in reset */
- val = readl(iproc_i2c->base + CFG_OFFSET);
- val |= 1 << CFG_RESET_SHIFT;
- val &= ~(1 << CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-
- /* wait 100 usec per spec */
- udelay(100);
-
- /* bring controller out of reset */
- val &= ~(1 << CFG_RESET_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-
- /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
-
- /* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
-
- /* clear all pending interrupts */
- writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
-
- return 0;
-}
-
-static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
- bool enable)
-{
- u32 val;
-
- val = readl(iproc_i2c->base + CFG_OFFSET);
- if (enable)
- val |= BIT(CFG_EN_SHIFT);
- else
- val &= ~BIT(CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
-}
-
static int bcm_iproc_i2c_probe(struct platform_device *pdev)
{
int irq, ret = 0;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 6b08d1607b7a..90bbd9f9dd8f 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -124,6 +124,8 @@
/**
* struct cdns_i2c - I2C device private data structure
+ *
+ * @dev: Pointer to device structure
* @membase: Base address of the I2C device
* @adap: I2C adapter instance
* @p_msg: Message pointer
@@ -171,7 +173,7 @@ struct cdns_platform_data {
clk_rate_change_nb)
/**
- * cdns_i2c_clear_bus_hold() - Clear bus hold bit
+ * cdns_i2c_clear_bus_hold - Clear bus hold bit
* @id: Pointer to driver data struct
*
* Helper to clear the controller's bus hold bit.
@@ -815,8 +817,8 @@ static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
}
/**
- * cdns_i2c_suspend - Suspend method for the driver
- * @_dev: Address of the platform_device structure
+ * cdns_i2c_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the platform_device structure
*
* Put the driver into low power mode.
*
@@ -833,10 +835,10 @@ static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
}
/**
- * cdns_i2c_resume - Resume from suspend
- * @_dev: Address of the platform_device structure
+ * cdns_i2c_runtime_resume - Runtime resume
+ * @dev: Address of the platform_device structure
*
- * Resume operation after suspend.
+ * Runtime resume callback.
*
* Return: 0 on success and error value on error
*/
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index e38c2bbba940..1590ad0a8081 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -11,7 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/acpi.h>
@@ -151,7 +150,3 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 10fbd6d841e0..99b54be6ba73 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -634,7 +634,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
- mutex_lock(&dev->lock);
pm_runtime_get_sync(dev->dev);
reinit_completion(&dev->cmd_complete);
@@ -673,11 +672,12 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
/*
- * We must disable the adapter before unlocking the &dev->lock mutex
- * below. Otherwise the hardware might continue generating interrupts
- * which in turn causes a race condition with the following transfer.
- * Needs some more investigation if the additional interrupts are
- * a hardware bug or this driver doesn't handle them correctly yet.
+ * We must disable the adapter before returning and signaling the end
+ * of the current transfer. Otherwise the hardware might continue
+ * generating interrupts which in turn causes a race condition with
+ * the following transfer. Needs some more investigation if the
+ * additional interrupts are a hardware bug or this driver doesn't
+ * handle them correctly yet.
*/
__i2c_dw_enable(dev, false);
@@ -706,7 +706,6 @@ done:
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -860,7 +859,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
int r;
init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
r = i2c_dw_init(dev);
if (r)
@@ -883,9 +881,17 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
return r;
}
+ /*
+ * Increment PM usage count during adapter registration in order to
+ * avoid possible spurious runtime suspend when adapter device is
+ * registered to the device core and immediate resume in case bus has
+ * registered I2C slaves that do I2C transfers in their probe.
+ */
+ pm_runtime_get_noresume(dev->dev);
r = i2c_add_numbered_adapter(adap);
if (r)
dev_err(dev->dev, "failure adding adapter: %d\n", r);
+ pm_runtime_put_noidle(dev->dev);
return r;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9ffb63a60f95..cd409e7fbc71 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -36,7 +36,6 @@
* @dev: driver model device node
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
- * @lock: protect this struct and IO registers
* @clk: input reference clock
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transfered
@@ -73,7 +72,6 @@ struct dw_i2c_dev {
struct device *dev;
void __iomem *base;
struct completion cmd_complete;
- struct mutex lock;
struct clk *clk;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 438f1b4964c0..d656657b805c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -123,6 +123,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "80860F41", 0 },
{ "808622C1", 0 },
{ "AMD0010", ACCESS_INTR_MASK },
+ { "AMDI0010", ACCESS_INTR_MASK },
{ "AMDI0510", 0 },
{ "APMC0D0F", 0 },
{ }
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 27fa0cb09538..585a3b7915bd 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -184,7 +184,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
-#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292
+#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
@@ -193,9 +193,11 @@
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
+#define PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS 0x2292
#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
@@ -204,10 +206,8 @@
#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
-#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23
-#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
-#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
+#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3
#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index a2b132cef717..1ca7ef2314f7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -212,7 +212,7 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
};
-static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
.devtype = IMX1_I2C,
.regshift = IMX_I2C_REGSHIFT,
.clk_div = imx_i2c_clk_div,
@@ -222,7 +222,7 @@ static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
};
-static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
+static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
.devtype = IMX21_I2C,
.regshift = IMX_I2C_REGSHIFT,
.clk_div = imx_i2c_clk_div,
@@ -871,7 +871,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
if ((!i) && block_data)
msgs->buf[0] = len;
else
- msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
dev_dbg(&i2c_imx->adapter.dev,
"<%s> read byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
@@ -916,7 +916,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_RSTA;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
- result = i2c_imx_bus_busy(i2c_imx, 1);
+ result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
goto fail0;
}
@@ -1192,7 +1192,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int i2c_imx_runtime_suspend(struct device *dev)
{
- struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
+ struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
clk_disable_unprepare(i2c_imx->clk);
@@ -1201,7 +1201,7 @@ static int i2c_imx_runtime_suspend(struct device *dev)
static int i2c_imx_runtime_resume(struct device *dev)
{
- struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
+ struct imx_i2c_struct *i2c_imx = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..ba14a863b451 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&clk_freq);
if (ret) {
- dev_err(&pdev->dev, "clock-frequency not specified in DT");
+ dev_err(&pdev->dev, "clock-frequency not specified in DT\n");
goto err;
}
i2c->speed = clk_freq / 1000;
+ if (i2c->speed == 0) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "clock-frequency minimum is 1000\n");
+ goto err;
+ }
jz4780_i2c_set_speed(i2c);
dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index aec8e6ce38a4..453358b4d9ca 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -60,6 +60,7 @@
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_4G_MODE 0x0001
#define I2C_DEFAULT_SPEED 100000 /* hz */
#define MAX_FS_MODE_SPEED 400000
@@ -88,6 +89,8 @@ enum DMA_REGS_OFFSET {
OFFSET_RX_MEM_ADDR = 0x20,
OFFSET_TX_LEN = 0x24,
OFFSET_RX_LEN = 0x28,
+ OFFSET_TX_4G_MODE = 0x54,
+ OFFSET_RX_4G_MODE = 0x58,
};
enum i2c_trans_st_rs {
@@ -133,6 +136,7 @@ struct mtk_i2c_compatible {
unsigned char dcm: 1;
unsigned char auto_restart: 1;
unsigned char aux_len_reg: 1;
+ unsigned char support_33bits: 1;
};
struct mtk_i2c {
@@ -182,6 +186,7 @@ static const struct mtk_i2c_compatible mt6577_compat = {
.dcm = 1,
.auto_restart = 0,
.aux_len_reg = 0,
+ .support_33bits = 0,
};
static const struct mtk_i2c_compatible mt6589_compat = {
@@ -190,6 +195,7 @@ static const struct mtk_i2c_compatible mt6589_compat = {
.dcm = 0,
.auto_restart = 0,
.aux_len_reg = 0,
+ .support_33bits = 0,
};
static const struct mtk_i2c_compatible mt8173_compat = {
@@ -198,6 +204,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
+ .support_33bits = 1,
};
static const struct of_device_id mtk_i2c_of_match[] = {
@@ -366,6 +373,11 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk,
return 0;
}
+static inline u32 mtk_i2c_set_4g_mode(dma_addr_t addr)
+{
+ return (addr & BIT_ULL(32)) ? I2C_DMA_4G_MODE : I2C_DMA_CLR_FLAG;
+}
+
static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
int num, int left_num)
{
@@ -373,6 +385,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
u16 start_reg;
u16 control_reg;
u16 restart_flag = 0;
+ u32 reg_4g_mode;
dma_addr_t rpaddr = 0;
dma_addr_t wpaddr = 0;
int ret;
@@ -439,6 +452,12 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_FROM_DEVICE);
if (dma_mapping_error(i2c->dev, rpaddr))
return -ENOMEM;
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
+ }
+
writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN);
} else if (i2c->op == I2C_MASTER_WR) {
@@ -448,6 +467,12 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_TO_DEVICE);
if (dma_mapping_error(i2c->dev, wpaddr))
return -ENOMEM;
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
+ }
+
writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
} else {
@@ -465,6 +490,15 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
msgs->len, DMA_TO_DEVICE);
return -ENOMEM;
}
+
+ if (i2c->dev_comp->support_33bits) {
+ reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE);
+
+ reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr);
+ writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE);
+ }
+
writel((u32)wpaddr, i2c->pdmabase + OFFSET_TX_MEM_ADDR);
writel((u32)rpaddr, i2c->pdmabase + OFFSET_RX_MEM_ADDR);
writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN);
@@ -729,6 +763,14 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (i2c->dev_comp->support_33bits) {
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(33));
+ if (ret) {
+ dev_err(&pdev->dev, "dma_set_mask return error.\n");
+ return ret;
+ }
+ }
+
ret = mtk_i2c_clock_enable(i2c);
if (ret) {
dev_err(&pdev->dev, "clock enable failed!\n");
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 32914ab42a19..46fb6c42934f 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -2,7 +2,7 @@
* (C) Copyright 2009-2010
* Nokia Siemens Networks, michael.lawnick.ext@nsn.com
*
- * Portions Copyright (C) 2010, 2011 Cavium Networks, Inc.
+ * Portions Copyright (C) 2010 - 2016 Cavium, Inc.
*
* This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
*
@@ -26,39 +26,48 @@
#define DRV_NAME "i2c-octeon"
-/* The previous out-of-tree version was implicitly version 1.0. */
-#define DRV_VERSION "2.0"
-
-/* register offsets */
-#define SW_TWSI 0x00
-#define TWSI_INT 0x10
+/* Register offsets */
+#define SW_TWSI 0x00
+#define TWSI_INT 0x10
/* Controller command patterns */
-#define SW_TWSI_V 0x8000000000000000ull
-#define SW_TWSI_EOP_TWSI_DATA 0x0C00000100000000ull
-#define SW_TWSI_EOP_TWSI_CTL 0x0C00000200000000ull
-#define SW_TWSI_EOP_TWSI_CLKCTL 0x0C00000300000000ull
-#define SW_TWSI_EOP_TWSI_STAT 0x0C00000300000000ull
-#define SW_TWSI_EOP_TWSI_RST 0x0C00000700000000ull
-#define SW_TWSI_OP_TWSI_CLK 0x0800000000000000ull
-#define SW_TWSI_R 0x0100000000000000ull
+#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
+#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
+
+/* Controller opcode word (bits 60:57) */
+#define SW_TWSI_OP_SHIFT 57
+#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
+
+/* Controller extended opcode word (bits 34:32) */
+#define SW_TWSI_EOP_SHIFT 32
+#define SW_TWSI_EOP_TWSI_DATA (SW_TWSI_OP_EOP | 1ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CTL (SW_TWSI_OP_EOP | 2ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_CLKCTL (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_STAT (SW_TWSI_OP_EOP | 3ULL << SW_TWSI_EOP_SHIFT)
+#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
/* Controller command and status bits */
-#define TWSI_CTL_CE 0x80
-#define TWSI_CTL_ENAB 0x40
-#define TWSI_CTL_STA 0x20
-#define TWSI_CTL_STP 0x10
-#define TWSI_CTL_IFLG 0x08
-#define TWSI_CTL_AAK 0x04
+#define TWSI_CTL_CE 0x80
+#define TWSI_CTL_ENAB 0x40 /* Bus enable */
+#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
+#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
+#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
+#define TWSI_CTL_AAK 0x04 /* Assert ACK */
/* Some status values */
-#define STAT_START 0x08
-#define STAT_RSTART 0x10
-#define STAT_TXADDR_ACK 0x18
-#define STAT_TXDATA_ACK 0x28
-#define STAT_RXADDR_ACK 0x40
-#define STAT_RXDATA_ACK 0x50
-#define STAT_IDLE 0xF8
+#define STAT_START 0x08
+#define STAT_RSTART 0x10
+#define STAT_TXADDR_ACK 0x18
+#define STAT_TXDATA_ACK 0x28
+#define STAT_RXADDR_ACK 0x40
+#define STAT_RXDATA_ACK 0x50
+#define STAT_IDLE 0xF8
+
+/* TWSI_INT values */
+#define TWSI_INT_CORE_EN BIT_ULL(6)
+#define TWSI_INT_SDA_OVR BIT_ULL(8)
+#define TWSI_INT_SCL_OVR BIT_ULL(9)
struct octeon_i2c {
wait_queue_head_t queue;
@@ -66,23 +75,19 @@ struct octeon_i2c {
int irq;
u32 twsi_freq;
int sys_freq;
- resource_size_t twsi_phys;
void __iomem *twsi_base;
- resource_size_t regsize;
struct device *dev;
};
/**
- * octeon_i2c_write_sw - write an I2C core register.
- * @i2c: The struct octeon_i2c.
- * @eop_reg: Register selector.
- * @data: Value to be written.
+ * octeon_i2c_write_sw - write an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
+ * @data: Value to be written
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
- u64 eop_reg,
- u8 data)
+static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
u64 tmp;
@@ -93,9 +98,9 @@ static void octeon_i2c_write_sw(struct octeon_i2c *i2c,
}
/**
- * octeon_i2c_read_sw - write an I2C core register.
- * @i2c: The struct octeon_i2c.
- * @eop_reg: Register selector.
+ * octeon_i2c_read_sw - read lower bits of an I2C core register
+ * @i2c: The struct octeon_i2c
+ * @eop_reg: Register selector
*
* Returns the data.
*
@@ -115,8 +120,8 @@ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
/**
* octeon_i2c_write_int - write the TWSI_INT register
- * @i2c: The struct octeon_i2c.
- * @data: Value to be written.
+ * @i2c: The struct octeon_i2c
+ * @data: Value to be written
*/
static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
{
@@ -125,57 +130,52 @@ static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
}
/**
- * octeon_i2c_int_enable - enable the TS interrupt.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_int_enable - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
*
* The interrupt will be asserted when there is non-STAT_IDLE state in
* the SW_TWSI_EOP_TWSI_STAT register.
*/
static void octeon_i2c_int_enable(struct octeon_i2c *i2c)
{
- octeon_i2c_write_int(i2c, 0x40);
+ octeon_i2c_write_int(i2c, TWSI_INT_CORE_EN);
}
-/**
- * octeon_i2c_int_disable - disable the TS interrupt.
- * @i2c: The struct octeon_i2c.
- */
+/* disable the CORE interrupt */
static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
{
+ /* clear TS/ST/IFLG events */
octeon_i2c_write_int(i2c, 0);
}
/**
- * octeon_i2c_unblock - unblock the bus.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_unblock - unblock the bus
+ * @i2c: The struct octeon_i2c
*
- * If there was a reset while a device was driving 0 to bus,
- * bus is blocked. We toggle it free manually by some clock
- * cycles and send a stop.
+ * If there was a reset while a device was driving 0 to bus, bus is blocked.
+ * We toggle it free manually by some clock cycles and send a stop.
*/
static void octeon_i2c_unblock(struct octeon_i2c *i2c)
{
int i;
dev_dbg(i2c->dev, "%s\n", __func__);
+
for (i = 0; i < 9; i++) {
- octeon_i2c_write_int(i2c, 0x0);
+ octeon_i2c_write_int(i2c, 0);
udelay(5);
- octeon_i2c_write_int(i2c, 0x200);
+ octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
udelay(5);
}
- octeon_i2c_write_int(i2c, 0x300);
+ /* hand-crank a STOP */
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
udelay(5);
- octeon_i2c_write_int(i2c, 0x100);
+ octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
udelay(5);
- octeon_i2c_write_int(i2c, 0x0);
+ octeon_i2c_write_int(i2c, 0);
}
-/**
- * octeon_i2c_isr - the interrupt service routine.
- * @int: The irq, unused.
- * @dev_id: Our struct octeon_i2c.
- */
+/* interrupt service routine */
static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
struct octeon_i2c *i2c = dev_id;
@@ -193,24 +193,20 @@ static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
}
/**
- * octeon_i2c_wait - wait for the IFLG to be set.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_wait - wait for the IFLG to be set
+ * @i2c: The struct octeon_i2c
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
- long result;
+ long time_left;
octeon_i2c_int_enable(i2c);
-
- result = wait_event_timeout(i2c->queue,
- octeon_i2c_test_iflg(i2c),
- i2c->adap.timeout);
-
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
+ i2c->adap.timeout);
octeon_i2c_int_disable(i2c);
-
- if (result == 0) {
+ if (!time_left) {
dev_dbg(i2c->dev, "%s: timeout\n", __func__);
return -ETIMEDOUT;
}
@@ -219,18 +215,18 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
}
/**
- * octeon_i2c_start - send START to the bus.
- * @i2c: The struct octeon_i2c.
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_start(struct octeon_i2c *i2c)
{
- u8 data;
int result;
+ u8 data;
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_STA);
+ TWSI_CTL_ENAB | TWSI_CTL_STA);
result = octeon_i2c_wait(i2c);
if (result) {
@@ -243,7 +239,6 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
octeon_i2c_unblock(i2c);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
TWSI_CTL_ENAB | TWSI_CTL_STA);
-
result = octeon_i2c_wait(i2c);
}
if (result)
@@ -259,34 +254,19 @@ static int octeon_i2c_start(struct octeon_i2c *i2c)
return 0;
}
-/**
- * octeon_i2c_stop - send STOP to the bus.
- * @i2c: The struct octeon_i2c.
- *
- * Returns 0 on success, otherwise a negative errno.
- */
-static int octeon_i2c_stop(struct octeon_i2c *i2c)
+/* send STOP to the bus */
+static void octeon_i2c_stop(struct octeon_i2c *i2c)
{
- u8 data;
-
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
TWSI_CTL_ENAB | TWSI_CTL_STP);
-
- data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-
- if (data != STAT_IDLE) {
- dev_err(i2c->dev, "%s: bad status(0x%x)\n", __func__, data);
- return -EIO;
- }
- return 0;
}
/**
- * octeon_i2c_write - send data to the bus.
- * @i2c: The struct octeon_i2c.
- * @target: Target address.
- * @data: Pointer to the data to be sent.
- * @length: Length of the data.
+ * octeon_i2c_write - send data to the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the data to be sent
+ * @length: Length of the data
*
* The address is sent over the bus, then the data.
*
@@ -311,6 +291,7 @@ static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
for (i = 0; i < length; i++) {
tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+
if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
dev_err(i2c->dev,
"%s: bad status before write (0x%x)\n",
@@ -330,20 +311,21 @@ static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
}
/**
- * octeon_i2c_read - receive data from the bus.
- * @i2c: The struct octeon_i2c.
- * @target: Target address.
- * @data: Pointer to the location to store the datae .
- * @length: Length of the data.
+ * octeon_i2c_read - receive data from the bus via low-level controller
+ * @i2c: The struct octeon_i2c
+ * @target: Target address
+ * @data: Pointer to the location to store the data
+ * @rlength: Length of the data
+ * @recv_len: flag for length byte
*
* The address is sent over the bus, then the data is read.
*
* Returns 0 on success, otherwise a negative errno.
*/
static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
- u8 *data, int length)
+ u8 *data, u16 *rlength, bool recv_len)
{
- int i, result;
+ int i, result, length = *rlength;
u8 tmp;
if (length < 1)
@@ -353,7 +335,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
if (result)
return result;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target<<1) | 1);
+ octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target << 1) | 1);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
@@ -362,6 +344,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
for (i = 0; i < length; i++) {
tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
+
if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
dev_err(i2c->dev,
"%s: bad status before read (0x%x)\n",
@@ -369,52 +352,59 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
return -EIO;
}
- if (i+1 < length)
+ if (i + 1 < length)
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_AAK);
+ TWSI_CTL_ENAB | TWSI_CTL_AAK);
else
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB);
+ TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
if (result)
return result;
data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
+ if (recv_len && i == 0) {
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
+ dev_err(i2c->dev,
+ "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
+ __func__, data[i]);
+ return -EPROTO;
+ }
+ length += data[i];
+ }
}
+ *rlength = length;
return 0;
}
/**
- * octeon_i2c_xfer - The driver's master_xfer function.
- * @adap: Pointer to the i2c_adapter structure.
- * @msgs: Pointer to the messages to be processed.
- * @num: Length of the MSGS array.
+ * octeon_i2c_xfer - The driver's master_xfer function
+ * @adap: Pointer to the i2c_adapter structure
+ * @msgs: Pointer to the messages to be processed
+ * @num: Length of the MSGS array
*
- * Returns the number of messages processed, or a negative errno on
- * failure.
+ * Returns the number of messages processed, or a negative errno on failure.
*/
-static int octeon_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs,
+static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num)
{
- struct i2c_msg *pmsg;
- int i;
- int ret = 0;
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ int i, ret = 0;
for (i = 0; ret == 0 && i < num; i++) {
- pmsg = &msgs[i];
+ struct i2c_msg *pmsg = &msgs[i];
+
dev_dbg(i2c->dev,
"Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
pmsg->flags & I2C_M_RD ? "read" : "write",
pmsg->len, pmsg->addr, i + 1, num);
if (pmsg->flags & I2C_M_RD)
ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
+ &pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
else
ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf,
- pmsg->len);
+ pmsg->len);
}
octeon_i2c_stop(i2c);
@@ -423,7 +413,8 @@ static int octeon_i2c_xfer(struct i2c_adapter *adap,
static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
}
static const struct i2c_algorithm octeon_i2c_algo = {
@@ -438,10 +429,8 @@ static struct i2c_adapter octeon_i2c_ops = {
.timeout = HZ / 50,
};
-/**
- * octeon_i2c_setclock - Calculate and set clock divisors.
- */
-static int octeon_i2c_setclock(struct octeon_i2c *i2c)
+/* calculate and set clock divisors */
+static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
{
int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
@@ -449,8 +438,7 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
/*
* An mdiv value of less than 2 seems to not work well
- * with ds1337 RTCs, so we constrain it to larger
- * values.
+ * with ds1337 RTCs, so we constrain it to larger values.
*/
for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
/*
@@ -460,6 +448,7 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
tclk *= (1 << ndiv_idx);
thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+
for (inc = 0; inc <= 1; inc++) {
thp_idx = thp_base + inc;
if (thp_idx < 5 || thp_idx > 0xff)
@@ -480,11 +469,9 @@ static int octeon_i2c_setclock(struct octeon_i2c *i2c)
}
octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
-
- return 0;
}
-static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
+static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
{
u8 status;
int tries;
@@ -507,9 +494,10 @@ static int octeon_i2c_initlowlevel(struct octeon_i2c *i2c)
static int octeon_i2c_probe(struct platform_device *pdev)
{
- int irq, result = 0;
- struct octeon_i2c *i2c;
+ struct device_node *node = pdev->dev.of_node;
struct resource *res_mem;
+ struct octeon_i2c *i2c;
+ int irq, result = 0;
/* All adaptors have an irq. */
irq = platform_get_irq(pdev, 0);
@@ -518,31 +506,25 @@ static int octeon_i2c_probe(struct platform_device *pdev)
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c) {
- dev_err(&pdev->dev, "kzalloc failed\n");
result = -ENOMEM;
goto out;
}
i2c->dev = &pdev->dev;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res_mem == NULL) {
- dev_err(i2c->dev, "found no memory resource\n");
- result = -ENXIO;
+ i2c->twsi_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(i2c->twsi_base)) {
+ result = PTR_ERR(i2c->twsi_base);
goto out;
}
- i2c->twsi_phys = res_mem->start;
- i2c->regsize = resource_size(res_mem);
/*
* "clock-rate" is a legacy binding, the official binding is
* "clock-frequency". Try the official one first and then
* fall back if it doesn't exist.
*/
- if (of_property_read_u32(pdev->dev.of_node,
- "clock-frequency", &i2c->twsi_freq) &&
- of_property_read_u32(pdev->dev.of_node,
- "clock-rate", &i2c->twsi_freq)) {
+ if (of_property_read_u32(node, "clock-frequency", &i2c->twsi_freq) &&
+ of_property_read_u32(node, "clock-rate", &i2c->twsi_freq)) {
dev_err(i2c->dev,
"no I2C 'clock-rate' or 'clock-frequency' property\n");
result = -ENXIO;
@@ -551,13 +533,6 @@ static int octeon_i2c_probe(struct platform_device *pdev)
i2c->sys_freq = octeon_get_io_clock_rate();
- if (!devm_request_mem_region(&pdev->dev, i2c->twsi_phys, i2c->regsize,
- res_mem->name)) {
- dev_err(i2c->dev, "request_mem_region failed\n");
- goto out;
- }
- i2c->twsi_base = devm_ioremap(&pdev->dev, i2c->twsi_phys, i2c->regsize);
-
init_waitqueue_head(&i2c->queue);
i2c->irq = irq;
@@ -569,21 +544,17 @@ static int octeon_i2c_probe(struct platform_device *pdev)
goto out;
}
- result = octeon_i2c_initlowlevel(i2c);
+ result = octeon_i2c_init_lowlevel(i2c);
if (result) {
dev_err(i2c->dev, "init low level failed\n");
goto out;
}
- result = octeon_i2c_setclock(i2c);
- if (result) {
- dev_err(i2c->dev, "clock init failed\n");
- goto out;
- }
+ octeon_i2c_set_clock(i2c);
i2c->adap = octeon_i2c_ops;
i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c->adap.dev.of_node = node;
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
@@ -592,8 +563,7 @@ static int octeon_i2c_probe(struct platform_device *pdev)
dev_err(i2c->dev, "failed to add adapter\n");
goto out;
}
- dev_info(i2c->dev, "version %s\n", DRV_VERSION);
-
+ dev_info(i2c->dev, "probed\n");
return 0;
out:
@@ -608,10 +578,8 @@ static int octeon_i2c_remove(struct platform_device *pdev)
return 0;
};
-static struct of_device_id octeon_i2c_match[] = {
- {
- .compatible = "cavium,octeon-3860-twsi",
- },
+static const struct of_device_id octeon_i2c_match[] = {
+ { .compatible = "cavium,octeon-3860-twsi", },
{},
};
MODULE_DEVICE_TABLE(of, octeon_i2c_match);
@@ -630,4 +598,3 @@ module_platform_driver(octeon_i2c_driver);
MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 93f2895383ee..23d1c167b5d7 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,8 +85,14 @@
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
-/* SB800 port is selected by bits 2:1 of the smb_en register (0x2c) */
+/*
+ * SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
+ * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
+ * Hudson-2/Bolton port is always selected by bits 2:1 of register 0x2f.
+ */
#define SB800_PIIX4_PORT_IDX 0x2c
+#define SB800_PIIX4_PORT_IDX_ALT 0x2e
+#define SB800_PIIX4_PORT_IDX_SEL 0x2f
#define SB800_PIIX4_PORT_IDX_MASK 0x06
/* insmod parameters */
@@ -136,8 +142,13 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
{ },
};
-/* SB800 globals */
+/*
+ * SB800 globals
+ * piix4_mutex_sb800 protects piix4_port_sel_sb800 and the pair
+ * of I/O ports at SB800_PIIX4_SMB_IDX.
+ */
static DEFINE_MUTEX(piix4_mutex_sb800);
+static u8 piix4_port_sel_sb800;
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
" port 0", " port 2", " port 3", " port 4"
};
@@ -148,7 +159,7 @@ struct i2c_piix4_adapdata {
/* SB800 */
bool sb800_main;
- unsigned short port;
+ u8 port; /* Port number, shifted */
};
static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -254,7 +265,7 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id, u8 aux)
{
unsigned short piix4_smba;
- u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status;
+ u8 smba_en_lo, smba_en_hi, smb_en, smb_en_status, port_sel;
u8 i2ccfg, i2ccfg_offset = 0x10;
/* SB800 and later SMBus does not support forcing address */
@@ -334,6 +345,23 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, i2ccfg >> 4);
+ /* Find which register is used for port selection */
+ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ } else {
+ mutex_lock(&piix4_mutex_sb800);
+ outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
+ port_sel = inb_p(SB800_PIIX4_SMB_IDX + 1);
+ piix4_port_sel_sb800 = (port_sel & 0x01) ?
+ SB800_PIIX4_PORT_IDX_ALT :
+ SB800_PIIX4_PORT_IDX;
+ mutex_unlock(&piix4_mutex_sb800);
+ }
+
+ dev_info(&PIIX4_dev->dev,
+ "Using register 0x%02x for SMBus port selection\n",
+ (unsigned int)piix4_port_sel_sb800);
+
return piix4_smba;
}
@@ -563,12 +591,12 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
mutex_lock(&piix4_mutex_sb800);
- outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
+ outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
- if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != (port << 1))
- outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | (port << 1),
+ if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
+ outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write,
@@ -627,7 +655,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
- bool sb800_main, unsigned short port,
+ bool sb800_main, u8 port,
const char *name, struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
@@ -654,7 +682,7 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
- adapdata->port = port;
+ adapdata->port = port << 1;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
@@ -790,7 +818,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
if (adapdata->smba) {
i2c_del_adapter(adap);
- if (adapdata->port == 0) {
+ if (adapdata->port == (0 << 1)) {
release_region(adapdata->smba, SMBIOSIZE);
if (adapdata->sb800_main)
release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fdcbdab808e9..23eaabb19f96 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,8 +14,12 @@
*
*/
+#include <linux/atomic.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
@@ -24,6 +28,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
/* QUP Registers */
#define QUP_CONFIG 0x000
@@ -33,6 +38,7 @@
#define QUP_OPERATIONAL 0x018
#define QUP_ERROR_FLAGS 0x01c
#define QUP_ERROR_FLAGS_EN 0x020
+#define QUP_OPERATIONAL_MASK 0x028
#define QUP_HW_VERSION 0x030
#define QUP_MX_OUTPUT_CNT 0x100
#define QUP_OUT_FIFO_BASE 0x110
@@ -42,6 +48,7 @@
#define QUP_IN_FIFO_BASE 0x218
#define QUP_I2C_CLK_CTL 0x400
#define QUP_I2C_STATUS 0x404
+#define QUP_I2C_MASTER_GEN 0x408
/* QUP States and reset values */
#define QUP_RESET_STATE 0
@@ -51,6 +58,7 @@
#define QUP_STATE_VALID BIT(2)
#define QUP_I2C_MAST_GEN BIT(4)
+#define QUP_I2C_FLUSH BIT(6)
#define QUP_OPERATIONAL_RESET 0x000ff0
#define QUP_I2C_STATUS_RESET 0xfffffc
@@ -69,16 +77,22 @@
#define QUP_CLOCK_AUTO_GATE BIT(13)
#define I2C_MINI_CORE (2 << 8)
#define I2C_N_VAL 15
+#define I2C_N_VAL_V2 7
+
/* Most significant word offset in FIFO port */
#define QUP_MSW_SHIFT (I2C_N_VAL + 1)
/* Packing/Unpacking words in FIFOs, and IO modes */
#define QUP_OUTPUT_BLK_MODE (1 << 10)
+#define QUP_OUTPUT_BAM_MODE (3 << 10)
#define QUP_INPUT_BLK_MODE (1 << 12)
+#define QUP_INPUT_BAM_MODE (3 << 12)
+#define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE)
#define QUP_UNPACK_EN BIT(14)
#define QUP_PACK_EN BIT(15)
#define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
+#define QUP_V2_TAGS_EN 1
#define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
#define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
@@ -90,6 +104,15 @@
#define QUP_TAG_DATA (2 << 8)
#define QUP_TAG_STOP (3 << 8)
#define QUP_TAG_REC (4 << 8)
+#define QUP_BAM_INPUT_EOT 0x93
+#define QUP_BAM_FLUSH_STOP 0x96
+
+/* QUP v2 tags */
+#define QUP_TAG_V2_START 0x81
+#define QUP_TAG_V2_DATAWR 0x82
+#define QUP_TAG_V2_DATAWR_STOP 0x83
+#define QUP_TAG_V2_DATARD 0x85
+#define QUP_TAG_V2_DATARD_STOP 0x87
/* Status, Error flags */
#define I2C_STATUS_WR_BUFFER_FULL BIT(0)
@@ -98,6 +121,36 @@
#define QUP_STATUS_ERROR_FLAGS 0x7c
#define QUP_READ_LIMIT 256
+#define SET_BIT 0x1
+#define RESET_BIT 0x0
+#define ONE_BYTE 0x1
+#define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
+
+#define MX_TX_RX_LEN SZ_64K
+#define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
+
+/* Max timeout in ms for 32k bytes */
+#define TOUT_MAX 300
+
+struct qup_i2c_block {
+ int count;
+ int pos;
+ int tx_tag_len;
+ int rx_tag_len;
+ int data_len;
+ u8 tags[6];
+};
+
+struct qup_i2c_tag {
+ u8 *start;
+ dma_addr_t addr;
+};
+
+struct qup_i2c_bam {
+ struct qup_i2c_tag tag;
+ struct dma_chan *dma;
+ struct scatterlist *sg;
+};
struct qup_i2c_dev {
struct device *dev;
@@ -114,6 +167,7 @@ struct qup_i2c_dev {
int in_blk_sz;
unsigned long one_byte_t;
+ struct qup_i2c_block blk;
struct i2c_msg *msg;
/* Current posion in user message buffer */
@@ -123,6 +177,19 @@ struct qup_i2c_dev {
/* QUP core errors */
u32 qup_err;
+ /* To check if this is the last msg */
+ bool is_last;
+
+ /* To configure when bus is in run state */
+ int config_run;
+
+ /* dma parameters */
+ bool is_dma;
+ struct dma_pool *dpool;
+ struct qup_i2c_tag start_tag;
+ struct qup_i2c_bam brx;
+ struct qup_i2c_bam btx;
+
struct completion xfer;
};
@@ -199,6 +266,14 @@ static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
}
+static void qup_i2c_flush(struct qup_i2c_dev *qup)
+{
+ u32 val = readl(qup->base + QUP_STATE);
+
+ val |= QUP_I2C_FLUSH;
+ writel(val, qup->base + QUP_STATE);
+}
+
static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
{
return qup_i2c_poll_state_mask(qup, 0, 0);
@@ -221,26 +296,62 @@ static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
return 0;
}
-static int qup_i2c_wait_writeready(struct qup_i2c_dev *qup)
+/**
+ * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path
+ * @qup: The qup_i2c_dev device
+ * @op: The bit/event to wait on
+ * @val: value of the bit to wait on, 0 or 1
+ * @len: The length the bytes to be transferred
+ */
+static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
+ int len)
{
unsigned long timeout;
u32 opflags;
u32 status;
+ u32 shift = __ffs(op);
- timeout = jiffies + HZ;
+ len *= qup->one_byte_t;
+ /* timeout after a wait of twice the max time */
+ timeout = jiffies + len * 4;
for (;;) {
opflags = readl(qup->base + QUP_OPERATIONAL);
status = readl(qup->base + QUP_I2C_STATUS);
- if (!(opflags & QUP_OUT_NOT_EMPTY) &&
- !(status & I2C_STATUS_BUS_ACTIVE))
- return 0;
+ if (((opflags & op) >> shift) == val) {
+ if ((op == QUP_OUT_NOT_EMPTY) && qup->is_last) {
+ if (!(status & I2C_STATUS_BUS_ACTIVE))
+ return 0;
+ } else {
+ return 0;
+ }
+ }
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- usleep_range(qup->one_byte_t, qup->one_byte_t * 2);
+ usleep_range(len, len * 2);
+ }
+}
+
+static void qup_i2c_set_write_mode_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ /* Number of entries to shift out, including the tags */
+ int total = msg->len + qup->blk.tx_tag_len;
+
+ total |= qup->config_run;
+
+ if (total < qup->out_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_WRITE_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(total, qup->base + QUP_MX_OUTPUT_CNT);
}
}
@@ -261,13 +372,45 @@ static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg)
}
}
-static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int check_for_fifo_space(struct qup_i2c_dev *qup)
+{
+ int ret;
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto out;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL,
+ RESET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ /* Fifo is full. Drain out the fifo */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY,
+ RESET_BIT, 256 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo out full");
+ goto out;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
u32 qup_tag;
- u32 opflags;
int idx;
u32 val;
+ int ret = 0;
if (qup->pos == 0) {
val = QUP_TAG_START | addr;
@@ -279,9 +422,9 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
while (qup->pos < msg->len) {
/* Check that there's space in the FIFO for our pair */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (opflags & QUP_OUT_FULL)
- break;
+ ret = check_for_fifo_space(qup);
+ if (ret)
+ return ret;
if (qup->pos == msg->len - 1)
qup_tag = QUP_TAG_STOP;
@@ -300,11 +443,501 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
qup->pos++;
idx++;
}
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+
+ return ret;
}
-static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static void qup_i2c_set_blk_data(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ memset(&qup->blk, 0, sizeof(qup->blk));
+
+ qup->blk.data_len = msg->len;
+ qup->blk.count = (msg->len + QUP_READ_LIMIT - 1) / QUP_READ_LIMIT;
+
+ /* 4 bytes for first block and 2 writes for rest */
+ qup->blk.tx_tag_len = 4 + (qup->blk.count - 1) * 2;
+
+ /* There are 2 tag bytes that are read in to fifo for every block */
+ if (msg->flags & I2C_M_RD)
+ qup->blk.rx_tag_len = qup->blk.count * 2;
+}
+
+static int qup_i2c_send_data(struct qup_i2c_dev *qup, int tlen, u8 *tbuf,
+ int dlen, u8 *dbuf)
+{
+ u32 val = 0, idx = 0, pos = 0, i = 0, t;
+ int len = tlen + dlen;
+ u8 *buf = tbuf;
+ int ret = 0;
+
+ while (len > 0) {
+ ret = check_for_fifo_space(qup);
+ if (ret)
+ return ret;
+
+ t = (len >= 4) ? 4 : len;
+
+ while (idx < t) {
+ if (!i && (pos >= tlen)) {
+ buf = dbuf;
+ pos = 0;
+ i = 1;
+ }
+ val |= buf[pos++] << (idx++ * 8);
+ }
+
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+ idx = 0;
+ val = 0;
+ len -= 4;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+
+ return ret;
+}
+
+static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
+{
+ int data_len;
+
+ if (qup->blk.data_len > QUP_READ_LIMIT)
+ data_len = QUP_READ_LIMIT;
+ else
+ data_len = qup->blk.data_len;
+
+ return data_len;
+}
+
+static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg, int is_dma)
+{
+ u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+ int len = 0;
+ int data_len;
+
+ int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
+
+ if (qup->blk.pos == 0) {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+ }
+
+ /* Send _STOP commands for the last block */
+ if (last) {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR_STOP;
+ } else {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR;
+ }
+
+ data_len = qup_i2c_get_data_len(qup);
+
+ /* 0 implies 256 bytes */
+ if (data_len == QUP_READ_LIMIT)
+ tags[len++] = 0;
+ else
+ tags[len++] = data_len;
+
+ if ((msg->flags & I2C_M_RD) && last && is_dma) {
+ tags[len++] = QUP_BAM_INPUT_EOT;
+ tags[len++] = QUP_BAM_FLUSH_STOP;
+ }
+
+ return len;
+}
+
+static int qup_i2c_issue_xfer_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int data_len = 0, tag_len, index;
+ int ret;
+
+ tag_len = qup_i2c_set_tags(qup->blk.tags, qup, msg, 0);
+ index = msg->len - qup->blk.data_len;
+
+ /* only tags are written for read */
+ if (!(msg->flags & I2C_M_RD))
+ data_len = qup_i2c_get_data_len(qup);
+
+ ret = qup_i2c_send_data(qup, tag_len, qup->blk.tags,
+ data_len, &msg->buf[index]);
+ qup->blk.data_len -= data_len;
+
+ return ret;
+}
+
+static void qup_i2c_bam_cb(void *data)
+{
+ struct qup_i2c_dev *qup = data;
+
+ complete(&qup->xfer);
+}
+
+static int qup_sg_set_buf(struct scatterlist *sg, void *buf,
+ struct qup_i2c_tag *tg, unsigned int buflen,
+ struct qup_i2c_dev *qup, int map, int dir)
+{
+ int ret;
+
+ sg_set_buf(sg, buf, buflen);
+ ret = dma_map_sg(qup->dev, sg, 1, dir);
+ if (!ret)
+ return -EINVAL;
+
+ if (!map)
+ sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start);
+
+ return 0;
+}
+
+static void qup_i2c_rel_dma(struct qup_i2c_dev *qup)
+{
+ if (qup->btx.dma)
+ dma_release_channel(qup->btx.dma);
+ if (qup->brx.dma)
+ dma_release_channel(qup->brx.dma);
+ qup->btx.dma = NULL;
+ qup->brx.dma = NULL;
+}
+
+static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
+{
+ int err;
+
+ if (!qup->btx.dma) {
+ qup->btx.dma = dma_request_slave_channel_reason(qup->dev, "tx");
+ if (IS_ERR(qup->btx.dma)) {
+ err = PTR_ERR(qup->btx.dma);
+ qup->btx.dma = NULL;
+ dev_err(qup->dev, "\n tx channel not available");
+ return err;
+ }
+ }
+
+ if (!qup->brx.dma) {
+ qup->brx.dma = dma_request_slave_channel_reason(qup->dev, "rx");
+ if (IS_ERR(qup->brx.dma)) {
+ dev_err(qup->dev, "\n rx channel not available");
+ err = PTR_ERR(qup->brx.dma);
+ qup->brx.dma = NULL;
+ qup_i2c_rel_dma(qup);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg,
+ int num)
+{
+ struct dma_async_tx_descriptor *txd, *rxd = NULL;
+ int ret = 0, idx = 0, limit = QUP_READ_LIMIT;
+ dma_cookie_t cookie_rx, cookie_tx;
+ u32 rx_nents = 0, tx_nents = 0, len, blocks, rem;
+ u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+ u8 *tags;
+
+ while (idx < num) {
+ blocks = (msg->len + limit) / limit;
+ rem = msg->len % limit;
+ tx_len = 0, len = 0, i = 0;
+
+ qup->is_last = (idx == (num - 1));
+
+ qup_i2c_set_blk_data(qup, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ rx_nents += (blocks * 2) + 1;
+ tx_nents += 1;
+
+ while (qup->blk.pos < blocks) {
+ /* length set to '0' implies 256 bytes */
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + len];
+ len += qup_i2c_set_tags(tags, qup, msg, 1);
+
+ /* scratch buf to read the start and len tags */
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &qup->brx.tag.start[0],
+ &qup->brx.tag,
+ 2, qup, 0, 0);
+
+ if (ret)
+ return ret;
+
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &msg->buf[limit * i],
+ NULL, tlen, qup,
+ 1, DMA_FROM_DEVICE);
+ if (ret)
+ return ret;
+
+ i++;
+ qup->blk.pos = i;
+ }
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &qup->start_tag.start[off],
+ &qup->start_tag, len, qup, 0, 0);
+ if (ret)
+ return ret;
+
+ off += len;
+ /* scratch buf to read the BAM EOT and FLUSH tags */
+ ret = qup_sg_set_buf(&qup->brx.sg[rx_buf++],
+ &qup->brx.tag.start[0],
+ &qup->brx.tag, 2,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+ } else {
+ tx_nents += (blocks * 2);
+
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + tx_len];
+ len = qup_i2c_set_tags(tags, qup, msg, 1);
+
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ tags,
+ &qup->start_tag, len,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+
+ tx_len += len;
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &msg->buf[limit * i],
+ NULL, tlen, qup, 1,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+ i++;
+ qup->blk.pos = i;
+ }
+ off += tx_len;
+
+ if (idx == (num - 1)) {
+ len = 1;
+ if (rx_nents) {
+ qup->btx.tag.start[0] =
+ QUP_BAM_INPUT_EOT;
+ len++;
+ }
+ qup->btx.tag.start[len - 1] =
+ QUP_BAM_FLUSH_STOP;
+ ret = qup_sg_set_buf(&qup->btx.sg[tx_buf++],
+ &qup->btx.tag.start[0],
+ &qup->btx.tag, len,
+ qup, 0, 0);
+ if (ret)
+ return ret;
+ tx_nents += 1;
+ }
+ }
+ idx++;
+ msg++;
+ }
+
+ txd = dmaengine_prep_slave_sg(qup->btx.dma, qup->btx.sg, tx_nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_PREP_FENCE);
+ if (!txd) {
+ dev_err(qup->dev, "failed to get tx desc\n");
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ if (!rx_nents) {
+ txd->callback = qup_i2c_bam_cb;
+ txd->callback_param = qup;
+ }
+
+ cookie_tx = dmaengine_submit(txd);
+ if (dma_submit_error(cookie_tx)) {
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ dma_async_issue_pending(qup->btx.dma);
+
+ if (rx_nents) {
+ rxd = dmaengine_prep_slave_sg(qup->brx.dma, qup->brx.sg,
+ rx_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxd) {
+ dev_err(qup->dev, "failed to get rx desc\n");
+ ret = -EINVAL;
+
+ /* abort TX descriptors */
+ dmaengine_terminate_all(qup->btx.dma);
+ goto desc_err;
+ }
+
+ rxd->callback = qup_i2c_bam_cb;
+ rxd->callback_param = qup;
+ cookie_rx = dmaengine_submit(rxd);
+ if (dma_submit_error(cookie_rx)) {
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ dma_async_issue_pending(qup->brx.dma);
+ }
+
+ if (!wait_for_completion_timeout(&qup->xfer, TOUT_MAX * HZ)) {
+ dev_err(qup->dev, "normal trans timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret || qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ msg--;
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+
+ if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ dev_err(qup->dev, "change to run state timed out");
+ return ret;
+ }
+
+ if (rx_nents)
+ writel(QUP_BAM_INPUT_EOT,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ writel(QUP_BAM_FLUSH_STOP,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ qup_i2c_flush(qup);
+
+ /* wait for remaining interrupts to occur */
+ if (!wait_for_completion_timeout(&qup->xfer, HZ))
+ dev_err(qup->dev, "flush timed out\n");
+
+ qup_i2c_rel_dma(qup);
+ }
+ }
+
+ dma_unmap_sg(qup->dev, qup->btx.sg, tx_nents, DMA_TO_DEVICE);
+
+ if (rx_nents)
+ dma_unmap_sg(qup->dev, qup->brx.sg, rx_nents,
+ DMA_FROM_DEVICE);
+desc_err:
+ return ret;
+}
+
+static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret = 0;
+
+ enable_irq(qup->irq);
+ ret = qup_i2c_req_dma(qup);
+
+ if (ret)
+ goto out;
+
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
+ writel(0, qup->base + QUP_MX_INPUT_CNT);
+ writel(0, qup->base + QUP_MX_OUTPUT_CNT);
+
+ /* set BAM mode */
+ writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE);
+
+ /* mask fifo irqs */
+ writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK);
+
+ /* set RUN STATE */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ qup->msg = msg;
+ ret = qup_i2c_bam_do_xfer(qup, qup->msg, num);
+out:
+ disable_irq(qup->irq);
+
+ qup->msg = NULL;
+ return ret;
+}
+
+static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
{
unsigned long left;
+ int ret = 0;
+
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static int qup_i2c_write_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_set_blk_data(qup, msg);
+ qup_i2c_set_write_mode_v2(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
+}
+
+static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
int ret;
qup->msg = msg;
@@ -325,30 +958,21 @@ static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
if (ret)
goto err;
- qup_i2c_issue_write(qup, msg);
-
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ ret = qup_i2c_issue_write(qup, msg);
if (ret)
goto err;
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
goto err;
- }
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
goto err;
- }
} while (qup->pos < msg->len);
/* Wait for the outstanding data in the fifo to drain */
- ret = qup_i2c_wait_writeready(qup);
-
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
err:
disable_irq(qup->irq);
qup->msg = NULL;
@@ -370,6 +994,28 @@ static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len)
}
}
+static void qup_i2c_set_read_mode_v2(struct qup_i2c_dev *qup, int len)
+{
+ int tx_len = qup->blk.tx_tag_len;
+
+ len += qup->blk.rx_tag_len;
+ len |= qup->config_run;
+ tx_len |= qup->config_run;
+
+ if (len < qup->in_fifo_sz) {
+ /* FIFO mode */
+ writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
+ writel(tx_len, qup->base + QUP_MX_WRITE_CNT);
+ writel(len, qup->base + QUP_MX_READ_CNT);
+ } else {
+ /* BLOCK mode (transfer data on chunks) */
+ writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN,
+ qup->base + QUP_IO_MODE);
+ writel(tx_len, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(len, qup->base + QUP_MX_INPUT_CNT);
+ }
+}
+
static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr, len, val;
@@ -384,18 +1030,19 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
}
-static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- u32 opflags;
u32 val = 0;
int idx;
+ int ret = 0;
for (idx = 0; qup->pos < msg->len; idx++) {
if ((idx & 1) == 0) {
/* Check that FIFO have data */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (!(opflags & QUP_IN_NOT_EMPTY))
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret)
+ return ret;
/* Reading 2 words at time */
val = readl(qup->base + QUP_IN_FIFO_BASE);
@@ -405,18 +1052,94 @@ static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
}
}
+
+ return ret;
+}
+
+static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ u32 val;
+ int idx, pos = 0, ret = 0, total;
+
+ total = qup_i2c_get_data_len(qup);
+
+ /* 2 extra bytes for read tags */
+ while (pos < (total + 2)) {
+ /* Check that FIFO have data */
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo not empty");
+ return ret;
+ }
+ val = readl(qup->base + QUP_IN_FIFO_BASE);
+
+ for (idx = 0; idx < 4; idx++, val >>= 8, pos++) {
+ /* first 2 bytes are tag bytes */
+ if (pos < 2)
+ continue;
+
+ if (pos >= (total + 2))
+ goto out;
+
+ msg->buf[qup->pos++] = val & 0xff;
+ }
+ }
+
+out:
+ qup->blk.data_len -= total;
+
+ return ret;
+}
+
+static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_set_blk_data(qup, msg);
+ qup_i2c_set_read_mode_v2(qup, msg->len);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_read_fifo_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
+
+ return ret;
}
static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- unsigned long left;
int ret;
qup->msg = msg;
qup->pos = 0;
enable_irq(qup->irq);
-
qup_i2c_set_read_mode(qup, msg->len);
ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
@@ -436,21 +1159,13 @@ static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
goto err;
do {
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
goto err;
- }
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
+ ret = qup_i2c_read_fifo(qup, msg);
+ if (ret)
goto err;
- }
-
- qup_i2c_read_fifo(qup, msg);
} while (qup->pos < msg->len);
err:
@@ -513,6 +1228,87 @@ out:
return ret;
}
+static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret, len, idx = 0, use_dma = 0;
+
+ ret = pm_runtime_get_sync(qup->dev);
+ if (ret < 0)
+ goto out;
+
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
+ if (ret)
+ goto out;
+
+ /* Configure QUP as I2C mini core */
+ writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG);
+ writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN);
+
+ if ((qup->is_dma)) {
+ /* All i2c_msgs should be transferred using either dma or cpu */
+ for (idx = 0; idx < num; idx++) {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = (msgs[idx].len > qup->out_fifo_sz) ||
+ (msgs[idx].len > qup->in_fifo_sz);
+
+ if ((!is_vmalloc_addr(msgs[idx].buf)) && len) {
+ use_dma = 1;
+ } else {
+ use_dma = 0;
+ break;
+ }
+ }
+ }
+
+ do {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (qup_i2c_poll_state_i2c_master(qup)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ qup->is_last = (idx == (num - 1));
+ if (idx)
+ qup->config_run = QUP_I2C_MX_CONFIG_DURING_RUN;
+ else
+ qup->config_run = 0;
+
+ reinit_completion(&qup->xfer);
+
+ if (use_dma) {
+ ret = qup_i2c_bam_xfer(adap, &msgs[idx], num);
+ } else {
+ if (msgs[idx].flags & I2C_M_RD)
+ ret = qup_i2c_read_one_v2(qup, &msgs[idx]);
+ else
+ ret = qup_i2c_write_one_v2(qup, &msgs[idx]);
+ }
+ } while ((idx++ < (num - 1)) && !use_dma && !ret);
+
+ if (!ret)
+ ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
+
+ if (ret == 0)
+ ret = num;
+out:
+ pm_runtime_mark_last_busy(qup->dev);
+ pm_runtime_put_autosuspend(qup->dev);
+
+ return ret;
+}
+
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -523,6 +1319,11 @@ static const struct i2c_algorithm qup_i2c_algo = {
.functionality = qup_i2c_func,
};
+static const struct i2c_algorithm qup_i2c_algo_v2 = {
+ .master_xfer = qup_i2c_xfer_v2,
+ .functionality = qup_i2c_func,
+};
+
/*
* The QUP block will issue a NACK and STOP on the bus when reaching
* the end of the read, the length of the read is specified as one byte
@@ -561,6 +1362,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
int ret, fs_div, hs_div;
int src_clk_freq;
u32 clk_freq = 100000;
+ int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
if (!qup)
@@ -572,6 +1374,68 @@ static int qup_i2c_probe(struct platform_device *pdev)
of_property_read_u32(node, "clock-frequency", &clk_freq);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
+ qup->adap.algo = &qup_i2c_algo;
+ qup->adap.quirks = &qup_i2c_quirks;
+ } else {
+ qup->adap.algo = &qup_i2c_algo_v2;
+ ret = qup_i2c_req_dma(qup);
+
+ if (ret == -EPROBE_DEFER)
+ goto fail_dma;
+ else if (ret != 0)
+ goto nodma;
+
+ blocks = (MX_BLOCKS << 1) + 1;
+ qup->btx.sg = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg) * blocks,
+ GFP_KERNEL);
+ if (!qup->btx.sg) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ sg_init_table(qup->btx.sg, blocks);
+
+ qup->brx.sg = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->brx.sg) * blocks,
+ GFP_KERNEL);
+ if (!qup->brx.sg) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ sg_init_table(qup->brx.sg, blocks);
+
+ /* 2 tag bytes for each block + 5 for start, stop tags */
+ size = blocks * 2 + 5;
+ qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
+ size, 4, 0);
+
+ qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL,
+ &qup->start_tag.addr);
+ if (!qup->start_tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+
+ qup->brx.tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->brx.tag.addr);
+ if (!qup->brx.tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+
+ qup->btx.tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->btx.tag.addr);
+ if (!qup->btx.tag.start) {
+ ret = -ENOMEM;
+ goto fail_dma;
+ }
+ qup->is_dma = true;
+ }
+
+nodma:
/* We support frequencies up to FAST Mode (400KHz) */
if (!clk_freq || clk_freq > 400000) {
dev_err(qup->dev, "clock frequency not supported %d\n",
@@ -667,10 +1531,10 @@ static int qup_i2c_probe(struct platform_device *pdev)
qup->out_blk_sz, qup->out_fifo_sz);
i2c_set_adapdata(&qup->adap, qup);
- qup->adap.algo = &qup_i2c_algo;
- qup->adap.quirks = &qup_i2c_quirks;
qup->adap.dev.parent = qup->dev;
qup->adap.dev.of_node = pdev->dev.of_node;
+ qup->is_last = true;
+
strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
@@ -689,6 +1553,11 @@ fail_runtime:
pm_runtime_set_suspended(qup->dev);
fail:
qup_i2c_disable_clocks(qup);
+fail_dma:
+ if (qup->btx.dma)
+ dma_release_channel(qup->btx.dma);
+ if (qup->brx.dma)
+ dma_release_channel(qup->brx.dma);
return ret;
}
@@ -696,6 +1565,18 @@ static int qup_i2c_remove(struct platform_device *pdev)
{
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
+ if (qup->is_dma) {
+ dma_pool_free(qup->dpool, qup->start_tag.start,
+ qup->start_tag.addr);
+ dma_pool_free(qup->dpool, qup->brx.tag.start,
+ qup->brx.tag.addr);
+ dma_pool_free(qup->dpool, qup->btx.tag.start,
+ qup->btx.tag.addr);
+ dma_pool_destroy(qup->dpool);
+ dma_release_channel(qup->btx.dma);
+ dma_release_channel(qup->brx.dma);
+ }
+
disable_irq(qup->irq);
qup_i2c_disable_clocks(qup);
i2c_del_adapter(&qup->adap);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 1abeadc8ab79..68ecb5630ad5 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -611,7 +611,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (IS_ERR(priv->io))
return PTR_ERR(priv->io);
- priv->devtype = (enum rcar_i2c_type)of_match_device(rcar_i2c_dt_ids, dev)->data;
+ priv->devtype = (enum rcar_i2c_type)of_device_get_match_data(dev);
init_waitqueue_head(&priv->wait);
adap = &priv->adap;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a0522fcc4ff8..929185a7296c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -805,9 +805,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->hw = &tegra20_i2c_hw;
if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_device(tegra_i2c_of_match, &pdev->dev);
- i2c_dev->hw = match->data;
+ i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
} else if (pdev->id == 3) {
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 6efd20095d5d..74f54f2f471f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -37,6 +37,8 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#define DRIVER_NAME "xiic-i2c"
@@ -66,6 +68,7 @@ enum xiic_endian {
* @endianness: big/little-endian byte order
*/
struct xiic_i2c {
+ struct device *dev;
void __iomem *base;
wait_queue_head_t wait;
struct i2c_adapter adap;
@@ -77,6 +80,7 @@ struct xiic_i2c {
struct i2c_msg *rx_msg;
int rx_pos;
enum xiic_endian endianness;
+ struct clk *clk;
};
@@ -164,6 +168,7 @@ struct xiic_i2c {
#define XIIC_RESET_MASK 0xAUL
+#define XIIC_PM_TIMEOUT 1000 /* ms */
/*
* The following constant is used for the device global interrupt enable
* register, to enable all interrupts for the device, this is the only bit
@@ -676,9 +681,13 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
+ err = pm_runtime_get_sync(i2c->dev);
+ if (err < 0)
+ return err;
+
err = xiic_busy(i2c);
if (err)
- return err;
+ goto out;
i2c->tx_msg = msgs;
i2c->nmsgs = num;
@@ -686,14 +695,20 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
xiic_start_xfer(i2c);
if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
- (i2c->state == STATE_DONE), HZ))
- return (i2c->state == STATE_DONE) ? num : -EIO;
- else {
+ (i2c->state == STATE_DONE), HZ)) {
+ err = (i2c->state == STATE_DONE) ? num : -EIO;
+ goto out;
+ } else {
i2c->tx_msg = NULL;
i2c->rx_msg = NULL;
i2c->nmsgs = 0;
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto out;
}
+out:
+ pm_runtime_mark_last_busy(i2c->dev);
+ pm_runtime_put_autosuspend(i2c->dev);
+ return err;
}
static u32 xiic_func(struct i2c_adapter *adap)
@@ -748,13 +763,28 @@ static int xiic_i2c_probe(struct platform_device *pdev)
mutex_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
+ i2c->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c->clk)) {
+ dev_err(&pdev->dev, "input clock not found.\n");
+ return PTR_ERR(i2c->clk);
+ }
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+ i2c->dev = &pdev->dev;
+ pm_runtime_enable(i2c->dev);
+ pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(i2c->dev);
+ pm_runtime_set_active(i2c->dev);
ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return ret;
+ goto err_clk_dis;
}
/*
@@ -776,7 +806,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Failed to add adapter\n");
xiic_deinit(i2c);
- return ret;
+ goto err_clk_dis;
}
if (pdata) {
@@ -786,16 +816,30 @@ static int xiic_i2c_probe(struct platform_device *pdev)
}
return 0;
+
+err_clk_dis:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(i2c->clk);
+ return ret;
}
static int xiic_i2c_remove(struct platform_device *pdev)
{
struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
/* remove adapter & data */
i2c_del_adapter(&i2c->adap);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
xiic_deinit(i2c);
+ clk_disable_unprepare(i2c->clk);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -808,12 +852,42 @@ static const struct of_device_id xiic_of_match[] = {
MODULE_DEVICE_TABLE(of, xiic_of_match);
#endif
+static int __maybe_unused cdns_i2c_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+
+ clk_disable(i2c->clk);
+
+ return 0;
+}
+
+static int __maybe_unused cdns_i2c_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct xiic_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_enable(i2c->clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable clock.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops xiic_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(cdns_i2c_runtime_suspend,
+ cdns_i2c_runtime_resume, NULL)
+};
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = xiic_i2c_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(xiic_of_match),
+ .pm = &xiic_dev_pm_ops,
},
};
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index 90e322959303..e33022e2d459 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -12,11 +12,11 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
#include <linux/rwsem.h>
+#include <linux/slab.h>
#include "i2c-core.h"
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index ffe715d346d8..e584d88ee337 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -28,32 +28,32 @@
*/
#include <dt-bindings/i2c/i2c.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio.h>
-#include <linux/slab.h>
+#include <linux/hardirq.h>
#include <linux/i2c.h>
-#include <linux/init.h>
#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/completion.h>
-#include <linux/hardirq.h>
-#include <linux/irqflags.h>
-#include <linux/rwsem.h>
-#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
-#include <linux/acpi.h>
-#include <linux/jump_label.h>
-#include <asm/uaccess.h>
-#include <linux/err.h>
#include <linux/property.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
#include "i2c-core.h"
@@ -73,6 +73,7 @@ static struct device_type i2c_client_type;
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
+static bool is_registered;
void i2c_transfer_trace_reg(void)
{
@@ -524,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-
-/* uevent helps with hotplug: modprobe -q $(MODALIAS) */
static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_client *client = to_i2c_client(dev);
int rc;
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
- if (add_uevent_var(env, "MODALIAS=%s%s",
- I2C_MODULE_PREFIX, client->name))
- return -ENOMEM;
- dev_dbg(dev, "uevent\n");
- return 0;
+ return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name);
}
/* i2c bus recovery routines */
@@ -1529,7 +1524,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
int res = 0;
/* Can't register until after driver model init */
- if (unlikely(WARN_ON(!i2c_bus_type.p))) {
+ if (WARN_ON(!is_registered)) {
res = -EAGAIN;
goto out_list;
}
@@ -1926,7 +1921,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
int res;
/* Can't register until after driver model init */
- if (unlikely(WARN_ON(!i2c_bus_type.p)))
+ if (WARN_ON(!is_registered))
return -EAGAIN;
/* add the driver to the list of i2c drivers in the driver core */
@@ -2104,6 +2099,9 @@ static int __init i2c_init(void)
retval = bus_register(&i2c_bus_type);
if (retval)
return retval;
+
+ is_registered = true;
+
#ifdef CONFIG_I2C_COMPAT
i2c_adapter_compat_class = class_compat_register("i2c-adapter");
if (!i2c_adapter_compat_class) {
@@ -2125,6 +2123,7 @@ class_err:
class_compat_unregister(i2c_adapter_compat_class);
bus_err:
#endif
+ is_registered = false;
bus_unregister(&i2c_bus_type);
return retval;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 2413ec9f8207..0b1108d3c2f3 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,17 +22,17 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/notifier.h>
#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/i2c.h>
#include <linux/i2c-dev.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
/*
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 00fc5b1c7b66..d4022878b2f0 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -19,13 +19,13 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/acpi.h>
+#include <linux/slab.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 94765a81970d..abb55d3e76f3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -15,14 +15,14 @@
* GNU General Public License for more details.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
#include <linux/i2c.h>
#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
struct i2c_smbus_alert {
unsigned int alert_edge_triggered:1;
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index af2a94e1140b..06af583d5101 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -17,13 +17,13 @@
#define DEBUG 1
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
#define MAX_CHIPS 10
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f06b0e24673b..e280c8ecc0b5 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -72,4 +72,13 @@ config I2C_MUX_REG
This driver can also be built as a module. If so, the module
will be called i2c-mux-reg.
+config I2C_DEMUX_PINCTRL
+ tristate "pinctrl-based I2C demultiplexer"
+ depends on PINCTRL && OF
+ select OF_DYNAMIC
+ help
+ If you say yes to this option, support will be included for an I2C
+ demultiplexer that uses the pinctrl subsystem. This is useful if you
+ want to change the I2C master at run-time depending on features.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index e89799b76a92..7c267c29b191 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -3,6 +3,8 @@
obj-$(CONFIG_I2C_ARB_GPIO_CHALLENGE) += i2c-arb-gpio-challenge.o
+obj-$(CONFIG_I2C_DEMUX_PINCTRL) += i2c-demux-pinctrl.o
+
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
new file mode 100644
index 000000000000..8de073aed001
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -0,0 +1,291 @@
+/*
+ * Pinctrl based I2C DeMultiplexer
+ *
+ * Copyright (C) 2015-16 by Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2015-16 by Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ *
+ * See the bindings doc for DTS setup and the sysfs doc for usage information.
+ * (look for filenames containing 'i2c-demux-pinctrl' in Documentation/)
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct i2c_demux_pinctrl_chan {
+ struct device_node *parent_np;
+ struct i2c_adapter *parent_adap;
+ struct of_changeset chgset;
+};
+
+struct i2c_demux_pinctrl_priv {
+ int cur_chan;
+ int num_chan;
+ struct device *dev;
+ const char *bus_name;
+ struct i2c_adapter cur_adap;
+ struct i2c_algorithm algo;
+ struct i2c_demux_pinctrl_chan chan[];
+};
+
+static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
+
+static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap;
+
+ return __i2c_transfer(parent, msgs, num);
+}
+
+static u32 i2c_demux_functionality(struct i2c_adapter *adap)
+{
+ struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
+ struct i2c_adapter *parent = priv->chan[priv->cur_chan].parent_adap;
+
+ return parent->algo->functionality(parent);
+}
+
+static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan)
+{
+ struct i2c_adapter *adap;
+ struct pinctrl *p;
+ int ret;
+
+ ret = of_changeset_apply(&priv->chan[new_chan].chgset);
+ if (ret)
+ goto err;
+
+ adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ if (!adap) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto err_with_put;
+ }
+
+ priv->chan[new_chan].parent_adap = adap;
+ priv->cur_chan = new_chan;
+
+ /* Now fill out current adapter structure. cur_chan must be up to date */
+ priv->algo.master_xfer = i2c_demux_master_xfer;
+ priv->algo.functionality = i2c_demux_functionality;
+
+ snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name),
+ "i2c-demux (master i2c-%d)", i2c_adapter_id(adap));
+ priv->cur_adap.owner = THIS_MODULE;
+ priv->cur_adap.algo = &priv->algo;
+ priv->cur_adap.algo_data = priv;
+ priv->cur_adap.dev.parent = priv->dev;
+ priv->cur_adap.class = adap->class;
+ priv->cur_adap.retries = adap->retries;
+ priv->cur_adap.timeout = adap->timeout;
+ priv->cur_adap.quirks = adap->quirks;
+ priv->cur_adap.dev.of_node = priv->dev->of_node;
+ ret = i2c_add_adapter(&priv->cur_adap);
+ if (ret < 0)
+ goto err_with_put;
+
+ return 0;
+
+ err_with_put:
+ i2c_put_adapter(adap);
+ err:
+ dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+ return ret;
+}
+
+static int i2c_demux_deactivate_master(struct i2c_demux_pinctrl_priv *priv)
+{
+ int ret, cur = priv->cur_chan;
+
+ if (cur < 0)
+ return 0;
+
+ i2c_del_adapter(&priv->cur_adap);
+ i2c_put_adapter(priv->chan[cur].parent_adap);
+
+ ret = of_changeset_revert(&priv->chan[cur].chgset);
+
+ priv->chan[cur].parent_adap = NULL;
+ priv->cur_chan = -EINVAL;
+
+ return ret;
+}
+
+static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_chan)
+{
+ int ret;
+
+ if (new_chan == priv->cur_chan)
+ return 0;
+
+ ret = i2c_demux_deactivate_master(priv);
+ if (ret)
+ return ret;
+
+ return i2c_demux_activate_master(priv, new_chan);
+}
+
+static ssize_t available_masters_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+ int count = 0, i;
+
+ for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c",
+ i, priv->chan[i].parent_np->full_name,
+ i == priv->num_chan - 1 ? '\n' : ' ');
+
+ return count;
+}
+static DEVICE_ATTR_RO(available_masters);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", priv->cur_chan);
+}
+
+static ssize_t current_master_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= priv->num_chan)
+ return -EINVAL;
+
+ ret = i2c_demux_change_master(priv, val);
+
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(current_master);
+
+static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct i2c_demux_pinctrl_priv *priv;
+ int num_chan, i, j, err;
+
+ num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
+ if (num_chan < 2) {
+ dev_err(&pdev->dev, "Need at least two I2C masters to switch\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
+ + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_chan; i++) {
+ struct device_node *adap_np;
+
+ adap_np = of_parse_phandle(np, "i2c-parent", i);
+ if (!adap_np) {
+ dev_err(&pdev->dev, "can't get phandle for parent %d\n", i);
+ err = -ENOENT;
+ goto err_rollback;
+ }
+ priv->chan[i].parent_np = adap_np;
+
+ of_changeset_init(&priv->chan[i].chgset);
+ of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+ }
+
+ priv->num_chan = num_chan;
+ priv->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ /* switch to first parent as active master */
+ i2c_demux_activate_master(priv, 0);
+
+ err = device_create_file(&pdev->dev, &dev_attr_available_masters);
+ if (err)
+ goto err_rollback;
+
+ err = device_create_file(&pdev->dev, &dev_attr_current_master);
+ if (err)
+ goto err_rollback_available;
+
+ return 0;
+
+err_rollback_available:
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
+err_rollback:
+ for (j = 0; j < i; j++) {
+ of_node_put(priv->chan[j].parent_np);
+ of_changeset_destroy(&priv->chan[j].chgset);
+ }
+
+ return err;
+}
+
+static int i2c_demux_pinctrl_remove(struct platform_device *pdev)
+{
+ struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev);
+ int i;
+
+ device_remove_file(&pdev->dev, &dev_attr_current_master);
+ device_remove_file(&pdev->dev, &dev_attr_available_masters);
+
+ i2c_demux_deactivate_master(priv);
+
+ for (i = 0; i < priv->num_chan; i++) {
+ of_node_put(priv->chan[i].parent_np);
+ of_changeset_destroy(&priv->chan[i].chgset);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id i2c_demux_pinctrl_of_match[] = {
+ { .compatible = "i2c-demux-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_demux_pinctrl_of_match);
+
+static struct platform_driver i2c_demux_pinctrl_driver = {
+ .driver = {
+ .name = "i2c-demux-pinctrl",
+ .of_match_table = i2c_demux_pinctrl_of_match,
+ },
+ .probe = i2c_demux_pinctrl_probe,
+ .remove = i2c_demux_pinctrl_remove,
+};
+module_platform_driver(i2c_demux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C demux driver");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-demux-pinctrl");
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 696b6c1ec940..f94baadbf424 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -531,14 +531,9 @@ static const struct hpt_info hpt371n = {
.timings = &hpt37x_timings
};
-static int check_in_drive_list(ide_drive_t *drive, const char **list)
+static bool check_in_drive_list(ide_drive_t *drive, const char **list)
{
- char *m = (char *)&drive->id[ATA_ID_PROD];
-
- while (*list)
- if (!strcmp(*list++, m))
- return 1;
- return 0;
+ return match_string(list, -1, (char *)&drive->id[ATA_ID_PROD]) >= 0;
}
static struct hpt_info *hpt3xx_get_info(struct device *dev)
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 9f0a48e39b8a..80e933b296f6 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -451,7 +451,7 @@ err_free:
return ret;
}
-static const struct ide_port_info icside_v6_port_info __initconst = {
+static const struct ide_port_info icside_v6_port_info = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 8012e43bf8f6..46427ea01753 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -325,6 +325,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
clk_enable(clk);
rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
/* NOTE: round *down* to meet minimum timings; we count in clocks */
ideclk_period = 1000000000UL / rate;
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 9ad014a7afc7..b33646be699c 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -28,7 +28,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#endif
#define DRV_NAME "pdc202xx_new"
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 96a345248224..7f0434f7e486 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -40,7 +40,6 @@
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/ide.h>
-#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/sections.h>
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cd4510a63375..c6935de425fa 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -65,7 +65,7 @@
#include <asm/mwait.h>
#include <asm/msr.h>
-#define INTEL_IDLE_VERSION "0.4"
+#define INTEL_IDLE_VERSION "0.4.1"
#define PREFIX "intel_idle: "
static struct cpuidle_driver intel_idle_driver = {
@@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state skx_cstates[] = {
+ {
+ .name = "C1-SKX",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-SKX",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-SKX",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 600,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
static struct cpuidle_state atom_cstates[] = {
{
.name = "C1E-ATM",
@@ -716,6 +745,26 @@ static struct cpuidle_state avn_cstates[] = {
{
.enter = NULL }
};
+static struct cpuidle_state knl_cstates[] = {
+ {
+ .name = "C1-KNL",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 1,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .name = "C6-KNL",
+ .desc = "MWAIT 0x10",
+ .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 120,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze },
+ {
+ .enter = NULL }
+};
/**
* intel_idle
@@ -798,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n,
* driver in this case
*/
dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
- if (!dev->registered)
- intel_idle_cpu_init(hotcpu);
+ if (dev->registered)
+ break;
+
+ if (intel_idle_cpu_init(hotcpu))
+ return NOTIFY_BAD;
break;
}
@@ -884,12 +936,20 @@ static const struct idle_cpu idle_cpu_skl = {
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_skx = {
+ .state_table = skx_cstates,
+ .disable_promotion_to_c1e = true,
+};
static const struct idle_cpu idle_cpu_avn = {
.state_table = avn_cstates,
.disable_promotion_to_c1e = true,
};
+static const struct idle_cpu idle_cpu_knl = {
+ .state_table = knl_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -921,6 +981,10 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x56, idle_cpu_bdw),
ICPU(0x4e, idle_cpu_skl),
ICPU(0x5e, idle_cpu_skl),
+ ICPU(0x8e, idle_cpu_skl),
+ ICPU(0x9e, idle_cpu_skl),
+ ICPU(0x55, idle_cpu_skx),
+ ICPU(0x57, idle_cpu_knl),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -962,22 +1026,15 @@ static int __init intel_idle_probe(void)
icpu = (const struct idle_cpu *)id->driver_data;
cpuidle_state_table = icpu->state_table;
- if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
- lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else
- on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
-
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
- pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
- lapic_timer_reliable_states);
return 0;
}
/*
* intel_idle_cpuidle_devices_uninit()
- * unregister, free cpuidle_devices
+ * Unregisters the cpuidle devices.
*/
static void intel_idle_cpuidle_devices_uninit(void)
{
@@ -988,49 +1045,102 @@ static void intel_idle_cpuidle_devices_uninit(void)
dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
-
- free_percpu(intel_idle_cpuidle_devices);
- return;
}
/*
- * intel_idle_state_table_update()
- *
- * Update the default state_table for this CPU-id
+ * ivt_idle_state_table_update(void)
*
- * Currently used to access tuned IVT multi-socket targets
+ * Tune IVT multi-socket targets
* Assumption: num_sockets == (max_package_num + 1)
*/
-void intel_idle_state_table_update(void)
+static void ivt_idle_state_table_update(void)
{
/* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
- if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
- int cpu, package_num, num_sockets = 1;
-
- for_each_online_cpu(cpu) {
- package_num = topology_physical_package_id(cpu);
- if (package_num + 1 > num_sockets) {
- num_sockets = package_num + 1;
-
- if (num_sockets > 4) {
- cpuidle_state_table = ivt_cstates_8s;
- return;
- }
+ int cpu, package_num, num_sockets = 1;
+
+ for_each_online_cpu(cpu) {
+ package_num = topology_physical_package_id(cpu);
+ if (package_num + 1 > num_sockets) {
+ num_sockets = package_num + 1;
+
+ if (num_sockets > 4) {
+ cpuidle_state_table = ivt_cstates_8s;
+ return;
}
}
+ }
+
+ if (num_sockets > 2)
+ cpuidle_state_table = ivt_cstates_4s;
- if (num_sockets > 2)
- cpuidle_state_table = ivt_cstates_4s;
- /* else, 1 and 2 socket systems use default ivt_cstates */
+ /* else, 1 and 2 socket systems use default ivt_cstates */
+}
+/*
+ * sklh_idle_state_table_update(void)
+ *
+ * On SKL-H (model 0x5e) disable C8 and C9 if:
+ * C10 is enabled and SGX disabled
+ */
+static void sklh_idle_state_table_update(void)
+{
+ unsigned long long msr;
+ unsigned int eax, ebx, ecx, edx;
+
+
+ /* if PC10 disabled via cmdline intel_idle.max_cstate=7 or shallower */
+ if (max_cstate <= 7)
+ return;
+
+ /* if PC10 not present in CPUID.MWAIT.EDX */
+ if ((mwait_substates & (0xF << 28)) == 0)
+ return;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr);
+
+ /* PC10 is not enabled in PKG C-state limit */
+ if ((msr & 0xF) != 8)
+ return;
+
+ ecx = 0;
+ cpuid(7, &eax, &ebx, &ecx, &edx);
+
+ /* if SGX is present */
+ if (ebx & (1 << 2)) {
+
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+
+ /* if SGX is enabled */
+ if (msr & (1 << 18))
+ return;
+ }
+
+ skl_cstates[5].disabled = 1; /* C8-SKL */
+ skl_cstates[6].disabled = 1; /* C9-SKL */
+}
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ */
+
+static void intel_idle_state_table_update(void)
+{
+ switch (boot_cpu_data.x86_model) {
+
+ case 0x3e: /* IVT */
+ ivt_idle_state_table_update();
+ break;
+ case 0x5e: /* SKL-H */
+ sklh_idle_state_table_update();
+ break;
}
- return;
}
/*
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
-static int __init intel_idle_cpuidle_driver_init(void)
+static void __init intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
@@ -1063,6 +1173,14 @@ static int __init intel_idle_cpuidle_driver_init(void)
if (num_substates == 0)
continue;
+ /* if state marked as disabled, skip it */
+ if (cpuidle_state_table[cstate].disabled != 0) {
+ pr_debug(PREFIX "state %s is disabled",
+ cpuidle_state_table[cstate].name);
+ continue;
+ }
+
+
if (((mwait_cstate + 1) > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
@@ -1074,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void)
drv->state_count += 1;
}
- if (icpu->auto_demotion_disable_flags)
- on_each_cpu(auto_demotion_disable, NULL, 1);
-
if (icpu->byt_auto_demotion_disable_flag) {
wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0);
wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0);
}
-
- if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */
- on_each_cpu(c1e_promotion_disable, NULL, 1);
-
- return 0;
}
@@ -1104,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu)
if (cpuidle_register_device(dev)) {
pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
- intel_idle_cpuidle_devices_uninit();
return -EIO;
}
@@ -1129,40 +1238,51 @@ static int __init intel_idle_init(void)
if (retval)
return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
drv ? drv->name : "none");
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
cpu_notifier_register_begin();
for_each_online_cpu(i) {
retval = intel_idle_cpu_init(i);
if (retval) {
+ intel_idle_cpuidle_devices_uninit();
cpu_notifier_register_done();
cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
return retval;
}
}
__register_cpu_notifier(&cpu_hotplug_notifier);
+ if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
+ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+ else
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
+
cpu_notifier_register_done();
+ pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
+ lapic_timer_reliable_states);
+
return 0;
}
static void __exit intel_idle_exit(void)
{
- intel_idle_cpuidle_devices_uninit();
- cpuidle_unregister_driver(&intel_idle_driver);
+ struct cpuidle_device *dev;
+ int i;
cpu_notifier_register_begin();
@@ -1170,9 +1290,15 @@ static void __exit intel_idle_exit(void)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
__unregister_cpu_notifier(&cpu_hotplug_notifier);
+ for_each_possible_cpu(i) {
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ cpuidle_unregister_device(dev);
+ }
+
cpu_notifier_register_done();
- return;
+ cpuidle_unregister_driver(&intel_idle_driver);
+ free_percpu(intel_idle_cpuidle_devices);
}
module_init(intel_idle_init);
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 833ea9dd4464..b0d3ecf3318b 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -143,7 +143,7 @@ config MMA8452
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the following Freescale 3-axis
- accelerometers: MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
+ accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
To compile this driver as a module, choose M here: the module
will be called mma8452.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index c73331f7782b..2072a31e813b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
{
int ret;
int axis = chan->scan_index;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmc150_accel_set_power_state(data, true);
@@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
}
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
- &raw_val, 2);
+ &raw_val, sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(raw_val >> chan->scan_type.shift,
+ *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift,
chan->scan_type.realbits - 1);
ret = bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
@@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.realbits = (bits), \
.storagebits = 16, \
.shift = 16 - (bits), \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index ccc632a7cf01..7f4994f32a90 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1,6 +1,7 @@
/*
* mma8452.c - Support for following Freescale 3-axis accelerometers:
*
+ * MMA8451Q (14 bit)
* MMA8452Q (12 bit)
* MMA8453Q (10 bit)
* MMA8652FC (12 bit)
@@ -15,7 +16,7 @@
*
* 7-bit I2C slave address 0x1c/0x1d (pin selectable)
*
- * TODO: orientation / freefall events, autosleep
+ * TODO: orientation events, autosleep
*/
#include <linux/module.h>
@@ -85,8 +86,9 @@
#define MMA8452_INT_FF_MT BIT(2)
#define MMA8452_INT_TRANS BIT(5)
-#define MMA8452_DEVICE_ID 0x2a
-#define MMA8453_DEVICE_ID 0x3a
+#define MMA8451_DEVICE_ID 0x1a
+#define MMA8452_DEVICE_ID 0x2a
+#define MMA8453_DEVICE_ID 0x3a
#define MMA8652_DEVICE_ID 0x4a
#define MMA8653_DEVICE_ID 0x5a
@@ -416,6 +418,51 @@ fail:
return ret;
}
+/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */
+static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
+{
+ int val;
+ const struct mma_chip_info *chip = data->chip_info;
+
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ return !(val & MMA8452_FF_MT_CFG_OAE);
+}
+
+static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
+{
+ int val;
+ const struct mma_chip_info *chip = data->chip_info;
+
+ if ((state && mma8452_freefall_mode_enabled(data)) ||
+ (!state && !(mma8452_freefall_mode_enabled(data))))
+ return 0;
+
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ if (state) {
+ val |= BIT(idx_x + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_y + chip->ev_cfg_chan_shift);
+ val |= BIT(idx_z + chip->ev_cfg_chan_shift);
+ val &= ~MMA8452_FF_MT_CFG_OAE;
+ } else {
+ val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= MMA8452_FF_MT_CFG_OAE;
+ }
+
+ val = mma8452_change_config(data, chip->ev_cfg, val);
+ if (val)
+ return val;
+
+ return 0;
+}
+
static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
int val, int val2)
{
@@ -609,12 +656,22 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
const struct mma_chip_info *chip = data->chip_info;
int ret;
- ret = i2c_smbus_read_byte_data(data->client,
- data->chip_info->ev_cfg);
- if (ret < 0)
- return ret;
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return mma8452_freefall_mode_enabled(data);
+ case IIO_EV_DIR_RISING:
+ if (mma8452_freefall_mode_enabled(data))
+ return 0;
+
+ ret = i2c_smbus_read_byte_data(data->client,
+ data->chip_info->ev_cfg);
+ if (ret < 0)
+ return ret;
- return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ default:
+ return -EINVAL;
+ }
}
static int mma8452_write_event_config(struct iio_dev *indio_dev,
@@ -627,19 +684,35 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
const struct mma_chip_info *chip = data->chip_info;
int val;
- val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
- if (val < 0)
- return val;
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return mma8452_set_freefall_mode(data, state);
+ case IIO_EV_DIR_RISING:
+ val = i2c_smbus_read_byte_data(data->client, chip->ev_cfg);
+ if (val < 0)
+ return val;
+
+ if (state) {
+ if (mma8452_freefall_mode_enabled(data)) {
+ val &= ~BIT(idx_x + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_y + chip->ev_cfg_chan_shift);
+ val &= ~BIT(idx_z + chip->ev_cfg_chan_shift);
+ val |= MMA8452_FF_MT_CFG_OAE;
+ }
+ val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ } else {
+ if (mma8452_freefall_mode_enabled(data))
+ return 0;
- if (state)
- val |= BIT(chan->scan_index + chip->ev_cfg_chan_shift);
- else
- val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ val &= ~BIT(chan->scan_index + chip->ev_cfg_chan_shift);
+ }
- val |= chip->ev_cfg_ele;
- val |= MMA8452_FF_MT_CFG_OAE;
+ val |= chip->ev_cfg_ele;
- return mma8452_change_config(data, chip->ev_cfg, val);
+ return mma8452_change_config(data, chip->ev_cfg, val);
+ default:
+ return -EINVAL;
+ }
}
static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
@@ -652,6 +725,16 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
if (src < 0)
return;
+ if (mma8452_freefall_mode_enabled(data)) {
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ return;
+ }
+
if (src & data->chip_info->ev_src_xe)
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
@@ -745,6 +828,27 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
return 0;
}
+static const struct iio_event_spec mma8452_freefall_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD) |
+ BIT(IIO_EV_INFO_HIGH_PASS_FILTER_3DB)
+ },
+};
+
+static const struct iio_event_spec mma8652_freefall_event[] = {
+ {
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD)
+ },
+};
+
static const struct iio_event_spec mma8452_transient_event[] = {
{
.type = IIO_EV_TYPE_MAG,
@@ -781,6 +885,24 @@ static struct attribute_group mma8452_event_attribute_group = {
.attrs = mma8452_event_attributes,
};
+#define MMA8452_FREEFALL_CHANNEL(modifier) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = modifier, \
+ .scan_index = -1, \
+ .event_spec = mma8452_freefall_event, \
+ .num_event_specs = ARRAY_SIZE(mma8452_freefall_event), \
+}
+
+#define MMA8652_FREEFALL_CHANNEL(modifier) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = modifier, \
+ .scan_index = -1, \
+ .event_spec = mma8652_freefall_event, \
+ .num_event_specs = ARRAY_SIZE(mma8652_freefall_event), \
+}
+
#define MMA8452_CHANNEL(axis, idx, bits) { \
.type = IIO_ACCEL, \
.modified = 1, \
@@ -822,11 +944,20 @@ static struct attribute_group mma8452_event_attribute_group = {
.num_event_specs = ARRAY_SIZE(mma8452_motion_event), \
}
+static const struct iio_chan_spec mma8451_channels[] = {
+ MMA8452_CHANNEL(X, idx_x, 14),
+ MMA8452_CHANNEL(Y, idx_y, 14),
+ MMA8452_CHANNEL(Z, idx_z, 14),
+ IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
+};
+
static const struct iio_chan_spec mma8452_channels[] = {
MMA8452_CHANNEL(X, idx_x, 12),
MMA8452_CHANNEL(Y, idx_y, 12),
MMA8452_CHANNEL(Z, idx_z, 12),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8453_channels[] = {
@@ -834,6 +965,7 @@ static const struct iio_chan_spec mma8453_channels[] = {
MMA8452_CHANNEL(Y, idx_y, 10),
MMA8452_CHANNEL(Z, idx_z, 10),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8452_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8652_channels[] = {
@@ -841,6 +973,7 @@ static const struct iio_chan_spec mma8652_channels[] = {
MMA8652_CHANNEL(Y, idx_y, 12),
MMA8652_CHANNEL(Z, idx_z, 12),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
static const struct iio_chan_spec mma8653_channels[] = {
@@ -848,9 +981,11 @@ static const struct iio_chan_spec mma8653_channels[] = {
MMA8652_CHANNEL(Y, idx_y, 10),
MMA8652_CHANNEL(Z, idx_z, 10),
IIO_CHAN_SOFT_TIMESTAMP(idx_ts),
+ MMA8652_FREEFALL_CHANNEL(IIO_MOD_X_AND_Y_AND_Z),
};
enum {
+ mma8451,
mma8452,
mma8453,
mma8652,
@@ -858,17 +993,34 @@ enum {
};
static const struct mma_chip_info mma_chip_info_table[] = {
- [mma8452] = {
- .chip_id = MMA8452_DEVICE_ID,
- .channels = mma8452_channels,
- .num_channels = ARRAY_SIZE(mma8452_channels),
+ [mma8451] = {
+ .chip_id = MMA8451_DEVICE_ID,
+ .channels = mma8451_channels,
+ .num_channels = ARRAY_SIZE(mma8451_channels),
/*
* Hardware has fullscale of -2G, -4G, -8G corresponding to
- * raw value -2048 for 12 bit or -512 for 10 bit.
+ * raw value -8192 for 14 bit, -2048 for 12 bit or -512 for 10
+ * bit.
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor for 12 bit here is given by:
- * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
+ .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = 1,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
+ .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
+ .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+ },
+ [mma8452] = {
+ .chip_id = MMA8452_DEVICE_ID,
+ .channels = mma8452_channels,
+ .num_channels = ARRAY_SIZE(mma8452_channels),
.mma_scales = { {0, 9577}, {0, 19154}, {0, 38307} },
.ev_cfg = MMA8452_TRANSIENT_CFG,
.ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
@@ -1049,6 +1201,7 @@ static int mma8452_reset(struct i2c_client *client)
}
static const struct of_device_id mma8452_dt_ids[] = {
+ { .compatible = "fsl,mma8451", .data = &mma_chip_info_table[mma8451] },
{ .compatible = "fsl,mma8452", .data = &mma_chip_info_table[mma8452] },
{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
@@ -1085,6 +1238,7 @@ static int mma8452_probe(struct i2c_client *client,
return ret;
switch (ret) {
+ case MMA8451_DEVICE_ID:
case MMA8452_DEVICE_ID:
case MMA8453_DEVICE_ID:
case MMA8652_DEVICE_ID:
@@ -1190,6 +1344,10 @@ static int mma8452_probe(struct i2c_client *client,
if (ret < 0)
goto buffer_cleanup;
+ ret = mma8452_set_freefall_mode(data, false);
+ if (ret)
+ return ret;
+
return 0;
buffer_cleanup:
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 70f042797f15..a03a1417dd63 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -67,6 +67,8 @@
#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
+#define ST_ACCEL_1_IHL_IRQ_ADDR 0x25
+#define ST_ACCEL_1_IHL_IRQ_MASK 0x02
#define ST_ACCEL_1_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 2 */
@@ -92,6 +94,8 @@
#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
+#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
#define ST_ACCEL_2_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 3 */
@@ -125,6 +129,8 @@
#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
+#define ST_ACCEL_3_IHL_IRQ_ADDR 0x23
+#define ST_ACCEL_3_IHL_IRQ_MASK 0x40
#define ST_ACCEL_3_IG1_EN_ADDR 0x23
#define ST_ACCEL_3_IG1_EN_MASK 0x08
#define ST_ACCEL_3_MULTIREAD_BIT false
@@ -169,6 +175,8 @@
#define ST_ACCEL_5_DRDY_IRQ_ADDR 0x22
#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK 0x04
#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
+#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
#define ST_ACCEL_5_IG1_EN_ADDR 0x21
#define ST_ACCEL_5_IG1_EN_MASK 0x08
#define ST_ACCEL_5_MULTIREAD_BIT false
@@ -292,6 +300,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
.bootime = 2,
@@ -355,6 +365,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
.bootime = 2,
@@ -430,6 +442,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
.ig1 = {
.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -537,6 +551,8 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
},
.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
.bootime = 2, /* guess */
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 283ded7747a9..82c718c515a0 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -131,6 +131,17 @@ config AT91_ADC
To compile this driver as a module, choose M here: the module will be
called at91_adc.
+config AT91_SAMA5D2_ADC
+ tristate "Atmel AT91 SAMA5D2 ADC"
+ depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for Atmel SAMA5D2 ADC which is
+ available on SAMA5D2 SoC family.
+
+ To compile this driver as a module, choose M here: the module will be
+ called at91-sama5d2_adc.
+
config AXP288_ADC
tristate "X-Powers AXP288 ADC driver"
depends on MFD_AXP20X
@@ -184,6 +195,13 @@ config EXYNOS_ADC
To compile this driver as a module, choose M here: the module will be
called exynos_adc.
+config FSL_MX25_ADC
+ tristate "Freescale MX25 ADC driver"
+ depends on MFD_MX25_TSADC
+ help
+ Generic Conversion Queue driver used for general purpose ADC in the
+ MX25. This driver supports single measurements using the MX25 ADC.
+
config HI8435
tristate "Holt Integrated Circuits HI-8435 threshold detector"
select IIO_TRIGGERED_EVENT
@@ -267,11 +285,11 @@ config MCP320X
called mcp320x.
config MCP3422
- tristate "Microchip Technology MCP3422/3/4/6/7/8 driver"
+ tristate "Microchip Technology MCP3421/2/3/4/5/6/7/8 driver"
depends on I2C
help
- Say yes here to build support for Microchip Technology's
- MCP3422, MCP3423, MCP3424, MCP3426, MCP3427 or MCP3428
+ Say yes here to build support for Microchip Technology's MCP3421
+ MCP3422, MCP3423, MCP3424, MCP3425, MCP3426, MCP3427 or MCP3428
analog to digital converters.
This driver can also be built as a module. If so, the module will be
@@ -287,6 +305,20 @@ config MEN_Z188_ADC
This driver can also be built as a module. If so, the module will be
called men_z188_adc.
+config MXS_LRADC
+ tristate "Freescale i.MX23/i.MX28 LRADC"
+ depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
+ depends on INPUT
+ select STMP_DEVICE
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for i.MX23/i.MX28 LRADC convertor
+ built into these chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mxs-lradc.
+
config NAU7802
tristate "Nuvoton NAU7802 ADC driver"
depends on I2C
@@ -352,6 +384,16 @@ config TI_ADC081C
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
+config TI_ADC0832
+ tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
+ depends on SPI
+ help
+ If you say yes here you get support for Texas Instruments ADC0831,
+ ADC0832, ADC0834, ADC0838 ADC chips.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-adc0832.
+
config TI_ADC128S052
tristate "Texas Instruments ADC128S052/ADC122S021/ADC124S021"
depends on SPI
@@ -362,6 +404,19 @@ config TI_ADC128S052
This driver can also be built as a module. If so, the module will be
called ti-adc128s052.
+config TI_ADS1015
+ tristate "Texas Instruments ADS1015 ADC"
+ depends on I2C && !SENSORS_ADS1015
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS1015
+ ADC chip.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1015.
+
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI && OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 6435780e9b71..0cb79210a4b0 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -14,11 +14,13 @@ obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
+obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
+obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
@@ -28,13 +30,16 @@ obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
obj-$(CONFIG_MCP3422) += mcp3422.o
obj-$(CONFIG_MEN_Z188_ADC) += men_z188_adc.o
+obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
+obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
+obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
new file mode 100644
index 000000000000..dbee13ad33a3
--- /dev/null
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -0,0 +1,508 @@
+/*
+ * Atmel ADC driver for SAMA5D2 devices and compatible.
+ *
+ * Copyright (C) 2015 Atmel,
+ * 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+/* Control Register */
+#define AT91_SAMA5D2_CR 0x00
+/* Software Reset */
+#define AT91_SAMA5D2_CR_SWRST BIT(0)
+/* Start Conversion */
+#define AT91_SAMA5D2_CR_START BIT(1)
+/* Touchscreen Calibration */
+#define AT91_SAMA5D2_CR_TSCALIB BIT(2)
+/* Comparison Restart */
+#define AT91_SAMA5D2_CR_CMPRST BIT(4)
+
+/* Mode Register */
+#define AT91_SAMA5D2_MR 0x04
+/* Trigger Selection */
+#define AT91_SAMA5D2_MR_TRGSEL(v) ((v) << 1)
+/* ADTRG */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG0 0
+/* TIOA0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG1 1
+/* TIOA1 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG2 2
+/* TIOA2 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG3 3
+/* PWM event line 0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG4 4
+/* PWM event line 1 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG5 5
+/* TIOA3 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG6 6
+/* RTCOUT0 */
+#define AT91_SAMA5D2_MR_TRGSEL_TRIG7 7
+/* Sleep Mode */
+#define AT91_SAMA5D2_MR_SLEEP BIT(5)
+/* Fast Wake Up */
+#define AT91_SAMA5D2_MR_FWUP BIT(6)
+/* Prescaler Rate Selection */
+#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
+#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
+/* Startup Time */
+#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
+/* Analog Change */
+#define AT91_SAMA5D2_MR_ANACH BIT(23)
+/* Tracking Time */
+#define AT91_SAMA5D2_MR_TRACKTIM(v) ((v) << 24)
+#define AT91_SAMA5D2_MR_TRACKTIM_MAX 0xff
+/* Transfer Time */
+#define AT91_SAMA5D2_MR_TRANSFER(v) ((v) << 28)
+#define AT91_SAMA5D2_MR_TRANSFER_MAX 0x3
+/* Use Sequence Enable */
+#define AT91_SAMA5D2_MR_USEQ BIT(31)
+
+/* Channel Sequence Register 1 */
+#define AT91_SAMA5D2_SEQR1 0x08
+/* Channel Sequence Register 2 */
+#define AT91_SAMA5D2_SEQR2 0x0c
+/* Channel Enable Register */
+#define AT91_SAMA5D2_CHER 0x10
+/* Channel Disable Register */
+#define AT91_SAMA5D2_CHDR 0x14
+/* Channel Status Register */
+#define AT91_SAMA5D2_CHSR 0x18
+/* Last Converted Data Register */
+#define AT91_SAMA5D2_LCDR 0x20
+/* Interrupt Enable Register */
+#define AT91_SAMA5D2_IER 0x24
+/* Interrupt Disable Register */
+#define AT91_SAMA5D2_IDR 0x28
+/* Interrupt Mask Register */
+#define AT91_SAMA5D2_IMR 0x2c
+/* Interrupt Status Register */
+#define AT91_SAMA5D2_ISR 0x30
+/* Last Channel Trigger Mode Register */
+#define AT91_SAMA5D2_LCTMR 0x34
+/* Last Channel Compare Window Register */
+#define AT91_SAMA5D2_LCCWR 0x38
+/* Overrun Status Register */
+#define AT91_SAMA5D2_OVER 0x3c
+/* Extended Mode Register */
+#define AT91_SAMA5D2_EMR 0x40
+/* Compare Window Register */
+#define AT91_SAMA5D2_CWR 0x44
+/* Channel Gain Register */
+#define AT91_SAMA5D2_CGR 0x48
+/* Channel Offset Register */
+#define AT91_SAMA5D2_COR 0x4c
+/* Channel Data Register 0 */
+#define AT91_SAMA5D2_CDR0 0x50
+/* Analog Control Register */
+#define AT91_SAMA5D2_ACR 0x94
+/* Touchscreen Mode Register */
+#define AT91_SAMA5D2_TSMR 0xb0
+/* Touchscreen X Position Register */
+#define AT91_SAMA5D2_XPOSR 0xb4
+/* Touchscreen Y Position Register */
+#define AT91_SAMA5D2_YPOSR 0xb8
+/* Touchscreen Pressure Register */
+#define AT91_SAMA5D2_PRESSR 0xbc
+/* Trigger Register */
+#define AT91_SAMA5D2_TRGR 0xc0
+/* Correction Select Register */
+#define AT91_SAMA5D2_COSR 0xd0
+/* Correction Value Register */
+#define AT91_SAMA5D2_CVR 0xd4
+/* Channel Error Correction Register */
+#define AT91_SAMA5D2_CECR 0xd8
+/* Write Protection Mode Register */
+#define AT91_SAMA5D2_WPMR 0xe4
+/* Write Protection Status Register */
+#define AT91_SAMA5D2_WPSR 0xe8
+/* Version Register */
+#define AT91_SAMA5D2_VERSION 0xfc
+
+#define AT91_AT91_SAMA5D2_CHAN(num, addr) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .channel = num, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 12, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .datasheet_name = "CH"#num, \
+ .indexed = 1, \
+ }
+
+#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
+#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
+
+struct at91_adc_soc_info {
+ unsigned startup_time;
+ unsigned min_sample_rate;
+ unsigned max_sample_rate;
+};
+
+struct at91_adc_state {
+ void __iomem *base;
+ int irq;
+ struct clk *per_clk;
+ struct regulator *reg;
+ struct regulator *vref;
+ int vref_uv;
+ const struct iio_chan_spec *chan;
+ bool conversion_done;
+ u32 conversion_value;
+ struct at91_adc_soc_info soc_info;
+ wait_queue_head_t wq_data_available;
+ /*
+ * lock to prevent concurrent 'single conversion' requests through
+ * sysfs.
+ */
+ struct mutex lock;
+};
+
+static const struct iio_chan_spec at91_adc_channels[] = {
+ AT91_AT91_SAMA5D2_CHAN(0, 0x50),
+ AT91_AT91_SAMA5D2_CHAN(1, 0x54),
+ AT91_AT91_SAMA5D2_CHAN(2, 0x58),
+ AT91_AT91_SAMA5D2_CHAN(3, 0x5c),
+ AT91_AT91_SAMA5D2_CHAN(4, 0x60),
+ AT91_AT91_SAMA5D2_CHAN(5, 0x64),
+ AT91_AT91_SAMA5D2_CHAN(6, 0x68),
+ AT91_AT91_SAMA5D2_CHAN(7, 0x6c),
+ AT91_AT91_SAMA5D2_CHAN(8, 0x70),
+ AT91_AT91_SAMA5D2_CHAN(9, 0x74),
+ AT91_AT91_SAMA5D2_CHAN(10, 0x78),
+ AT91_AT91_SAMA5D2_CHAN(11, 0x7c),
+};
+
+static unsigned at91_adc_startup_time(unsigned startup_time_min,
+ unsigned adc_clk_khz)
+{
+ const unsigned startup_lookup[] = {
+ 0, 8, 16, 24,
+ 64, 80, 96, 112,
+ 512, 576, 640, 704,
+ 768, 832, 896, 960
+ };
+ unsigned ticks_min, i;
+
+ /*
+ * Since the adc frequency is checked before, there is no reason
+ * to not meet the startup time constraint.
+ */
+
+ ticks_min = startup_time_min * adc_clk_khz / 1000;
+ for (i = 0; i < ARRAY_SIZE(startup_lookup); i++)
+ if (startup_lookup[i] > ticks_min)
+ break;
+
+ return i;
+}
+
+static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
+{
+ struct iio_dev *indio_dev = iio_priv_to_dev(st);
+ unsigned f_per, prescal, startup;
+
+ f_per = clk_get_rate(st->per_clk);
+ prescal = (f_per / (2 * freq)) - 1;
+
+ startup = at91_adc_startup_time(st->soc_info.startup_time,
+ freq / 1000);
+
+ at91_adc_writel(st, AT91_SAMA5D2_MR,
+ AT91_SAMA5D2_MR_TRANSFER(2)
+ | AT91_SAMA5D2_MR_STARTUP(startup)
+ | AT91_SAMA5D2_MR_PRESCAL(prescal));
+
+ dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
+ freq, startup, prescal);
+}
+
+static unsigned at91_adc_get_sample_freq(struct at91_adc_state *st)
+{
+ unsigned f_adc, f_per = clk_get_rate(st->per_clk);
+ unsigned mr, prescal;
+
+ mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+ prescal = (mr >> AT91_SAMA5D2_MR_PRESCAL_OFFSET)
+ & AT91_SAMA5D2_MR_PRESCAL_MAX;
+ f_adc = f_per / (2 * (prescal + 1));
+
+ return f_adc;
+}
+
+static irqreturn_t at91_adc_interrupt(int irq, void *private)
+{
+ struct iio_dev *indio = private;
+ struct at91_adc_state *st = iio_priv(indio);
+ u32 status = at91_adc_readl(st, AT91_SAMA5D2_ISR);
+ u32 imr = at91_adc_readl(st, AT91_SAMA5D2_IMR);
+
+ if (status & imr) {
+ st->conversion_value = at91_adc_readl(st, st->chan->address);
+ st->conversion_done = true;
+ wake_up_interruptible(&st->wq_data_available);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int at91_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+
+ st->chan = chan;
+
+ at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
+
+ ret = wait_event_interruptible_timeout(st->wq_data_available,
+ st->conversion_done,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret > 0) {
+ *val = st->conversion_value;
+ ret = IIO_VAL_INT;
+ st->conversion_done = false;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, BIT(chan->channel));
+ at91_adc_writel(st, AT91_SAMA5D2_CHDR, BIT(chan->channel));
+
+ mutex_unlock(&st->lock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_uv / 1000;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = at91_adc_get_sample_freq(st);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int at91_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ if (val < st->soc_info.min_sample_rate ||
+ val > st->soc_info.max_sample_rate)
+ return -EINVAL;
+
+ at91_adc_setup_samp_freq(st, val);
+
+ return 0;
+}
+
+static const struct iio_info at91_adc_info = {
+ .read_raw = &at91_adc_read_raw,
+ .write_raw = &at91_adc_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int at91_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct at91_adc_state *st;
+ struct resource *res;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &at91_adc_info;
+ indio_dev->channels = at91_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(at91_adc_channels);
+
+ st = iio_priv(indio_dev);
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "atmel,min-sample-rate-hz",
+ &st->soc_info.min_sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,min-sample-rate-hz\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "atmel,max-sample-rate-hz",
+ &st->soc_info.max_sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,max-sample-rate-hz\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "atmel,startup-time-ms",
+ &st->soc_info.startup_time);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "invalid or missing value for atmel,startup-time-ms\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&st->wq_data_available);
+ mutex_init(&st->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ st->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->base))
+ return PTR_ERR(st->base);
+
+ st->irq = platform_get_irq(pdev, 0);
+ if (st->irq <= 0) {
+ if (!st->irq)
+ st->irq = -ENXIO;
+
+ return st->irq;
+ }
+
+ st->per_clk = devm_clk_get(&pdev->dev, "adc_clk");
+ if (IS_ERR(st->per_clk))
+ return PTR_ERR(st->per_clk);
+
+ st->reg = devm_regulator_get(&pdev->dev, "vddana");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
+ st->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(st->vref))
+ return PTR_ERR(st->vref);
+
+ ret = devm_request_irq(&pdev->dev, st->irq, at91_adc_interrupt, 0,
+ pdev->dev.driver->name, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->vref);
+ if (ret)
+ goto reg_disable;
+
+ st->vref_uv = regulator_get_voltage(st->vref);
+ if (st->vref_uv <= 0) {
+ ret = -EINVAL;
+ goto vref_disable;
+ }
+
+ at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
+ at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+
+ at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
+
+ ret = clk_prepare_enable(st->per_clk);
+ if (ret)
+ goto vref_disable;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto per_clk_disable_unprepare;
+
+ dev_info(&pdev->dev, "version: %x\n",
+ readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
+
+ return 0;
+
+per_clk_disable_unprepare:
+ clk_disable_unprepare(st->per_clk);
+vref_disable:
+ regulator_disable(st->vref);
+reg_disable:
+ regulator_disable(st->reg);
+ return ret;
+}
+
+static int at91_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct at91_adc_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ clk_disable_unprepare(st->per_clk);
+
+ regulator_disable(st->vref);
+ regulator_disable(st->reg);
+
+ return 0;
+}
+
+static const struct of_device_id at91_adc_dt_match[] = {
+ {
+ .compatible = "atmel,sama5d2-adc",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_adc_dt_match);
+
+static struct platform_driver at91_adc_driver = {
+ .probe = at91_adc_probe,
+ .remove = at91_adc_remove,
+ .driver = {
+ .name = "at91-sama5d2_adc",
+ .of_match_table = at91_adc_dt_match,
+ },
+};
+module_platform_driver(at91_adc_driver)
+
+MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
+MODULE_DESCRIPTION("Atmel AT91 SAMA5D2 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 0c904edd6c00..7fd24949c0c1 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -46,7 +46,7 @@ struct axp288_adc_info {
struct regmap *regmap;
};
-static const struct iio_chan_spec const axp288_adc_channels[] = {
+static const struct iio_chan_spec axp288_adc_channels[] = {
{
.indexed = 1,
.type = IIO_TEMP,
diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
new file mode 100644
index 000000000000..72b32c1ab257
--- /dev/null
+++ b/drivers/iio/adc/fsl-imx25-gcq.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This is the driver for the imx25 GCQ (Generic Conversion Queue)
+ * connected to the imx25 ADC.
+ */
+
+#include <dt-bindings/iio/adc/fsl-imx25-gcq.h>
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define MX25_GCQ_TIMEOUT (msecs_to_jiffies(2000))
+
+static const char * const driver_name = "mx25-gcq";
+
+enum mx25_gcq_cfgs {
+ MX25_CFG_XP = 0,
+ MX25_CFG_YP,
+ MX25_CFG_XN,
+ MX25_CFG_YN,
+ MX25_CFG_WIPER,
+ MX25_CFG_INAUX0,
+ MX25_CFG_INAUX1,
+ MX25_CFG_INAUX2,
+ MX25_NUM_CFGS,
+};
+
+struct mx25_gcq_priv {
+ struct regmap *regs;
+ struct completion completed;
+ struct clk *clk;
+ int irq;
+ struct regulator *vref[4];
+ u32 channel_vref_mv[MX25_NUM_CFGS];
+};
+
+#define MX25_CQG_CHAN(chan, id) {\
+ .type = IIO_VOLTAGE,\
+ .indexed = 1,\
+ .channel = chan,\
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE),\
+ .datasheet_name = id,\
+}
+
+static const struct iio_chan_spec mx25_gcq_channels[MX25_NUM_CFGS] = {
+ MX25_CQG_CHAN(MX25_CFG_XP, "xp"),
+ MX25_CQG_CHAN(MX25_CFG_YP, "yp"),
+ MX25_CQG_CHAN(MX25_CFG_XN, "xn"),
+ MX25_CQG_CHAN(MX25_CFG_YN, "yn"),
+ MX25_CQG_CHAN(MX25_CFG_WIPER, "wiper"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX0, "inaux0"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX1, "inaux1"),
+ MX25_CQG_CHAN(MX25_CFG_INAUX2, "inaux2"),
+};
+
+static const char * const mx25_gcq_refp_names[] = {
+ [MX25_ADC_REFP_YP] = "yp",
+ [MX25_ADC_REFP_XP] = "xp",
+ [MX25_ADC_REFP_INT] = "int",
+ [MX25_ADC_REFP_EXT] = "ext",
+};
+
+static irqreturn_t mx25_gcq_irq(int irq, void *data)
+{
+ struct mx25_gcq_priv *priv = data;
+ u32 stats;
+
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stats);
+
+ if (stats & MX25_ADCQ_SR_EOQ) {
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR,
+ MX25_ADCQ_MR_EOQ_IRQ, MX25_ADCQ_MR_EOQ_IRQ);
+ complete(&priv->completed);
+ }
+
+ /* Disable conversion queue run */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS, 0);
+
+ /* Acknowledge all possible irqs */
+ regmap_write(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_FRR |
+ MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR |
+ MX25_ADCQ_SR_EOQ | MX25_ADCQ_SR_PD);
+
+ return IRQ_HANDLED;
+}
+
+static int mx25_gcq_get_raw_value(struct device *dev,
+ struct iio_chan_spec const *chan,
+ struct mx25_gcq_priv *priv,
+ int *val)
+{
+ long timeout;
+ u32 data;
+
+ /* Setup the configuration we want to use */
+ regmap_write(priv->regs, MX25_ADCQ_ITEM_7_0,
+ MX25_ADCQ_ITEM(0, chan->channel));
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_EOQ_IRQ, 0);
+
+ /* Trigger queue for one run */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FQS,
+ MX25_ADCQ_CR_FQS);
+
+ timeout = wait_for_completion_interruptible_timeout(
+ &priv->completed, MX25_GCQ_TIMEOUT);
+ if (timeout < 0) {
+ dev_err(dev, "ADC wait for measurement failed\n");
+ return timeout;
+ } else if (timeout == 0) {
+ dev_err(dev, "ADC timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(priv->regs, MX25_ADCQ_FIFO, &data);
+
+ *val = MX25_ADCQ_FIFO_DATA(data);
+
+ return IIO_VAL_INT;
+}
+
+static int mx25_gcq_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct mx25_gcq_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ ret = mx25_gcq_get_raw_value(&indio_dev->dev, chan, priv, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = priv->channel_vref_mv[chan->channel];
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mx25_gcq_iio_info = {
+ .read_raw = mx25_gcq_read_raw,
+};
+
+static const struct regmap_config mx25_gcq_regconfig = {
+ .max_register = 0x5c,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
+ struct mx25_gcq_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ unsigned int refp_used[4] = {};
+ int ret, i;
+
+ /*
+ * Setup all configurations registers with a default conversion
+ * configuration for each input
+ */
+ for (i = 0; i < MX25_NUM_CFGS; ++i)
+ regmap_write(priv->regs, MX25_ADCQ_CFG(i),
+ MX25_ADCQ_CFG_YPLL_OFF |
+ MX25_ADCQ_CFG_XNUR_OFF |
+ MX25_ADCQ_CFG_XPUL_OFF |
+ MX25_ADCQ_CFG_REFP_INT |
+ MX25_ADCQ_CFG_IN(i) |
+ MX25_ADCQ_CFG_REFN_NGND2);
+
+ /*
+ * First get all regulators to store them in channel_vref_mv if
+ * necessary. Later we use that information for proper IIO scale
+ * information.
+ */
+ priv->vref[MX25_ADC_REFP_INT] = NULL;
+ priv->vref[MX25_ADC_REFP_EXT] =
+ devm_regulator_get_optional(&pdev->dev, "vref-ext");
+ priv->vref[MX25_ADC_REFP_XP] =
+ devm_regulator_get_optional(&pdev->dev, "vref-xp");
+ priv->vref[MX25_ADC_REFP_YP] =
+ devm_regulator_get_optional(&pdev->dev, "vref-yp");
+
+ for_each_child_of_node(np, child) {
+ u32 reg;
+ u32 refp = MX25_ADCQ_CFG_REFP_INT;
+ u32 refn = MX25_ADCQ_CFG_REFN_NGND2;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ dev_err(dev, "Failed to get reg property\n");
+ return ret;
+ }
+
+ if (reg >= MX25_NUM_CFGS) {
+ dev_err(dev,
+ "reg value is greater than the number of available configuration registers\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(child, "fsl,adc-refp", &refp);
+ of_property_read_u32(child, "fsl,adc-refn", &refn);
+
+ switch (refp) {
+ case MX25_ADC_REFP_EXT:
+ case MX25_ADC_REFP_XP:
+ case MX25_ADC_REFP_YP:
+ if (IS_ERR(priv->vref[refp])) {
+ dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.",
+ mx25_gcq_refp_names[refp]);
+ return PTR_ERR(priv->vref[refp]);
+ }
+ priv->channel_vref_mv[reg] =
+ regulator_get_voltage(priv->vref[refp]);
+ /* Conversion from uV to mV */
+ priv->channel_vref_mv[reg] /= 1000;
+ break;
+ case MX25_ADC_REFP_INT:
+ priv->channel_vref_mv[reg] = 2500;
+ break;
+ default:
+ dev_err(dev, "Invalid positive reference %d\n", refp);
+ return -EINVAL;
+ }
+
+ ++refp_used[refp];
+
+ /*
+ * Shift the read values to the correct positions within the
+ * register.
+ */
+ refp = MX25_ADCQ_CFG_REFP(refp);
+ refn = MX25_ADCQ_CFG_REFN(refn);
+
+ if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) {
+ dev_err(dev, "Invalid fsl,adc-refp property value\n");
+ return -EINVAL;
+ }
+ if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) {
+ dev_err(dev, "Invalid fsl,adc-refn property value\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_CFG(reg),
+ MX25_ADCQ_CFG_REFP_MASK |
+ MX25_ADCQ_CFG_REFN_MASK,
+ refp | refn);
+ }
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST,
+ MX25_ADCQ_CR_FRST | MX25_ADCQ_CR_QRST);
+
+ regmap_write(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_PDMSK | MX25_ADCQ_CR_QSM_FQS);
+
+ /* Remove unused regulators */
+ for (i = 0; i != 4; ++i) {
+ if (!refp_used[i]) {
+ if (!IS_ERR_OR_NULL(priv->vref[i]))
+ devm_regulator_put(priv->vref[i]);
+ priv->vref[i] = NULL;
+ }
+ }
+
+ return 0;
+}
+
+static int mx25_gcq_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct mx25_gcq_priv *priv;
+ struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mem;
+ int ret;
+ int i;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ priv->regs = devm_regmap_init_mmio(dev, mem, &mx25_gcq_regconfig);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ init_completion(&priv->completed);
+
+ ret = mx25_gcq_setup_cfgs(pdev, priv);
+ if (ret)
+ return ret;
+
+ for (i = 0; i != 4; ++i) {
+ if (!priv->vref[i])
+ continue;
+
+ ret = regulator_enable(priv->vref[i]);
+ if (ret)
+ goto err_regulator_disable;
+ }
+
+ priv->clk = tsadc->clk;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ goto err_vref_disable;
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "Failed to get IRQ\n");
+ ret = priv->irq;
+ if (!ret)
+ ret = -ENXIO;
+ goto err_clk_unprepare;
+ }
+
+ ret = request_irq(priv->irq, mx25_gcq_irq, 0, pdev->name, priv);
+ if (ret) {
+ dev_err(dev, "Failed requesting IRQ\n");
+ goto err_clk_unprepare;
+ }
+
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = mx25_gcq_channels;
+ indio_dev->num_channels = ARRAY_SIZE(mx25_gcq_channels);
+ indio_dev->info = &mx25_gcq_iio_info;
+ indio_dev->name = driver_name;
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device\n");
+ goto err_irq_free;
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return 0;
+
+err_irq_free:
+ free_irq(priv->irq, priv);
+err_clk_unprepare:
+ clk_disable_unprepare(priv->clk);
+err_vref_disable:
+ i = 4;
+err_regulator_disable:
+ for (; i-- > 0;) {
+ if (priv->vref[i])
+ regulator_disable(priv->vref[i]);
+ }
+ return ret;
+}
+
+static int mx25_gcq_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct mx25_gcq_priv *priv = iio_priv(indio_dev);
+ int i;
+
+ iio_device_unregister(indio_dev);
+ free_irq(priv->irq, priv);
+ clk_disable_unprepare(priv->clk);
+ for (i = 4; i-- > 0;) {
+ if (priv->vref[i])
+ regulator_disable(priv->vref[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mx25_gcq_ids[] = {
+ { .compatible = "fsl,imx25-gcq", },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver mx25_gcq_driver = {
+ .driver = {
+ .name = "mx25-gcq",
+ .of_match_table = mx25_gcq_ids,
+ },
+ .probe = mx25_gcq_probe,
+ .remove = mx25_gcq_remove,
+};
+module_platform_driver(mx25_gcq_driver);
+
+MODULE_DESCRIPTION("ADC driver for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index d803e5018a42..65909d5858b1 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -19,17 +19,18 @@
*
* Configurable 7-bit I2C slave address from 0x40 to 0x4F
*/
-#include <linux/module.h>
-#include <linux/kthread.h>
+
#include <linux/delay.h>
+#include <linux/i2c.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/sysfs.h>
-#include <linux/i2c.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/platform_data/ina2xx.h>
-
#include <linux/util_macros.h>
+#include <linux/platform_data/ina2xx.h>
+
/* INA2XX registers definition */
#define INA2XX_CONFIG 0x00
#define INA2XX_SHUNT_VOLTAGE 0x01 /* readonly */
@@ -38,7 +39,7 @@
#define INA2XX_CURRENT 0x04 /* readonly */
#define INA2XX_CALIBRATION 0x05
-#define INA226_ALERT_MASK 0x06
+#define INA226_ALERT_MASK GENMASK(2, 1)
#define INA266_CVRF BIT(3)
#define INA2XX_MAX_REGISTERS 8
@@ -113,7 +114,7 @@ struct ina2xx_chip_info {
struct mutex state_lock;
unsigned int shunt_resistor;
int avg;
- s64 prev_ns; /* track buffer capture time, check for underruns*/
+ s64 prev_ns; /* track buffer capture time, check for underruns */
int int_time_vbus; /* Bus voltage integration time uS */
int int_time_vshunt; /* Shunt voltage integration time uS */
bool allow_async_readout;
@@ -121,21 +122,21 @@ struct ina2xx_chip_info {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
- .config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
- .shunt_div = 100,
- .bus_voltage_shift = 3,
- .bus_voltage_lsb = 4000,
- .power_lsb = 20000,
- },
+ .config_default = INA219_CONFIG_DEFAULT,
+ .calibration_factor = 40960000,
+ .shunt_div = 100,
+ .bus_voltage_shift = 3,
+ .bus_voltage_lsb = 4000,
+ .power_lsb = 20000,
+ },
[ina226] = {
- .config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
- .shunt_div = 400,
- .bus_voltage_shift = 0,
- .bus_voltage_lsb = 1250,
- .power_lsb = 25000,
- },
+ .config_default = INA226_CONFIG_DEFAULT,
+ .calibration_factor = 5120000,
+ .shunt_div = 400,
+ .bus_voltage_shift = 0,
+ .bus_voltage_lsb = 1250,
+ .power_lsb = 25000,
+ },
};
static int ina2xx_read_raw(struct iio_dev *indio_dev,
@@ -149,7 +150,7 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = regmap_read(chip->regmap, chan->address, &regval);
- if (ret < 0)
+ if (ret)
return ret;
if (is_signed_reg(chan->address))
@@ -251,7 +252,7 @@ static int ina226_set_int_time_vbus(struct ina2xx_chip_info *chip,
return -EINVAL;
bits = find_closest(val_us, ina226_conv_time_tab,
- ARRAY_SIZE(ina226_conv_time_tab));
+ ARRAY_SIZE(ina226_conv_time_tab));
chip->int_time_vbus = ina226_conv_time_tab[bits];
@@ -270,7 +271,7 @@ static int ina226_set_int_time_vshunt(struct ina2xx_chip_info *chip,
return -EINVAL;
bits = find_closest(val_us, ina226_conv_time_tab,
- ARRAY_SIZE(ina226_conv_time_tab));
+ ARRAY_SIZE(ina226_conv_time_tab));
chip->int_time_vshunt = ina226_conv_time_tab[bits];
@@ -285,8 +286,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- int ret;
unsigned int config, tmp;
+ int ret;
if (iio_buffer_enabled(indio_dev))
return -EBUSY;
@@ -294,8 +295,8 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
mutex_lock(&chip->state_lock);
ret = regmap_read(chip->regmap, INA2XX_CONFIG, &config);
- if (ret < 0)
- goto _err;
+ if (ret)
+ goto err;
tmp = config;
@@ -310,19 +311,19 @@ static int ina2xx_write_raw(struct iio_dev *indio_dev,
else
ret = ina226_set_int_time_vbus(chip, val2, &tmp);
break;
+
default:
ret = -EINVAL;
}
if (!ret && (tmp != config))
ret = regmap_write(chip->regmap, INA2XX_CONFIG, tmp);
-_err:
+err:
mutex_unlock(&chip->state_lock);
return ret;
}
-
static ssize_t ina2xx_allow_async_readout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -355,6 +356,7 @@ static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
return -EINVAL;
chip->shunt_resistor = val;
+
return 0;
}
@@ -438,7 +440,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned short data[8];
int bit, ret, i = 0;
- unsigned long buffer_us, elapsed_us;
s64 time_a, time_b;
unsigned int alert;
@@ -462,8 +463,6 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
return ret;
alert &= INA266_CVRF;
- trace_printk("Conversion ready: %d\n", !!alert);
-
} while (!alert);
/*
@@ -488,19 +487,14 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
iio_push_to_buffers_with_timestamp(indio_dev,
(unsigned int *)data, time_a);
- buffer_us = (unsigned long)(time_b - time_a) / 1000;
- elapsed_us = (unsigned long)(time_a - chip->prev_ns) / 1000;
-
- trace_printk("uS: elapsed: %lu, buf: %lu\n", elapsed_us, buffer_us);
-
chip->prev_ns = time_a;
- return buffer_us;
+ return (unsigned long)(time_b - time_a) / 1000;
};
static int ina2xx_capture_thread(void *data)
{
- struct iio_dev *indio_dev = (struct iio_dev *)data;
+ struct iio_dev *indio_dev = data;
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
int buffer_us;
@@ -530,12 +524,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
unsigned int sampling_us = SAMPLING_PERIOD(chip);
- trace_printk("Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
- (unsigned int)(*indio_dev->active_scan_mask),
- 1000000/sampling_us, chip->avg);
+ dev_dbg(&indio_dev->dev, "Enabling buffer w/ scan_mask %02x, freq = %d, avg =%u\n",
+ (unsigned int)(*indio_dev->active_scan_mask),
+ 1000000 / sampling_us, chip->avg);
- trace_printk("Expected work period: %u us\n", sampling_us);
- trace_printk("Async readout mode: %d\n", chip->allow_async_readout);
+ dev_dbg(&indio_dev->dev, "Expected work period: %u us\n", sampling_us);
+ dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
+ chip->allow_async_readout);
chip->prev_ns = iio_get_time_ns();
@@ -575,8 +570,7 @@ static int ina2xx_debug_reg(struct iio_dev *indio_dev,
}
/* Possible integration times for vshunt and vbus */
-static IIO_CONST_ATTR_INT_TIME_AVAIL \
- ("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.000140 0.000204 0.000332 0.000588 0.001100 0.002116 0.004156 0.008244");
static IIO_DEVICE_ATTR(in_allow_async_readout, S_IRUGO | S_IWUSR,
ina2xx_allow_async_readout_show,
@@ -598,21 +592,23 @@ static const struct attribute_group ina2xx_attribute_group = {
};
static const struct iio_info ina2xx_info = {
- .debugfs_reg_access = &ina2xx_debug_reg,
- .read_raw = &ina2xx_read_raw,
- .write_raw = &ina2xx_write_raw,
- .attrs = &ina2xx_attribute_group,
.driver_module = THIS_MODULE,
+ .attrs = &ina2xx_attribute_group,
+ .read_raw = ina2xx_read_raw,
+ .write_raw = ina2xx_write_raw,
+ .debugfs_reg_access = ina2xx_debug_reg,
};
/* Initialize the configuration and calibration registers. */
static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
{
u16 regval;
- int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ int ret;
- if (ret < 0)
+ ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ if (ret)
return ret;
+
/*
* Set current LSB to 1mA, shunt is in uOhms
* (equation 13 in datasheet). We hardcode a Current_LSB
@@ -621,7 +617,7 @@ static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
* to the user for now.
*/
regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
+ chip->shunt_resistor);
return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
}
@@ -632,8 +628,8 @@ static int ina2xx_probe(struct i2c_client *client,
struct ina2xx_chip_info *chip;
struct iio_dev *indio_dev;
struct iio_buffer *buffer;
- int ret;
unsigned int val;
+ int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
@@ -641,8 +637,19 @@ static int ina2xx_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
+ /* This is only used for device removal purposes. */
+ i2c_set_clientdata(client, indio_dev);
+
+ chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ dev_err(&client->dev, "failed to allocate register map\n");
+ return PTR_ERR(chip->regmap);
+ }
+
chip->config = &ina2xx_config[id->driver_data];
+ mutex_init(&chip->state_lock);
+
if (of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata =
@@ -658,25 +665,6 @@ static int ina2xx_probe(struct i2c_client *client,
if (ret)
return ret;
- mutex_init(&chip->state_lock);
-
- /* This is only used for device removal purposes. */
- i2c_set_clientdata(client, indio_dev);
-
- indio_dev->name = id->name;
- indio_dev->channels = ina2xx_channels;
- indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
-
- indio_dev->dev.parent = &client->dev;
- indio_dev->info = &ina2xx_info;
- indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
-
- chip->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
- if (IS_ERR(chip->regmap)) {
- dev_err(&client->dev, "failed to allocate register map\n");
- return PTR_ERR(chip->regmap);
- }
-
/* Patch the current config register with default. */
val = chip->config->config_default;
@@ -687,24 +675,28 @@ static int ina2xx_probe(struct i2c_client *client,
}
ret = ina2xx_init(chip, val);
- if (ret < 0) {
- dev_err(&client->dev, "error configuring the device: %d\n",
- ret);
- return -ENODEV;
+ if (ret) {
+ dev_err(&client->dev, "error configuring the device\n");
+ return ret;
}
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = ina2xx_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
+ indio_dev->name = id->name;
+ indio_dev->info = &ina2xx_info;
+ indio_dev->setup_ops = &ina2xx_setup_ops;
+
buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
if (!buffer)
return -ENOMEM;
- indio_dev->setup_ops = &ina2xx_setup_ops;
-
iio_device_attach_buffer(indio_dev, buffer);
return iio_device_register(indio_dev);
}
-
static int ina2xx_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
@@ -717,7 +709,6 @@ static int ina2xx_remove(struct i2c_client *client)
INA2XX_MODE_MASK, 0);
}
-
static const struct i2c_device_id ina2xx_id[] = {
{"ina219", ina219},
{"ina220", ina219},
@@ -726,7 +717,6 @@ static const struct i2c_device_id ina2xx_id[] = {
{"ina231", ina226},
{}
};
-
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
static struct i2c_driver ina2xx_driver = {
@@ -737,7 +727,6 @@ static struct i2c_driver ina2xx_driver = {
.remove = ina2xx_remove,
.id_table = ina2xx_id,
};
-
module_i2c_driver(ina2xx_driver);
MODULE_AUTHOR("Marc Titinger <marc.titinger@baylibre.com>");
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 929508e5266c..998dc3caad4c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11644] = {
.bits = 12,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11645] = {
.bits = 12,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11646] = {
.bits = 10,
- .int_vref_mv = 2048,
+ .int_vref_mv = 4096,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
},
[max11647] = {
.bits = 10,
- .int_vref_mv = 4096,
+ .int_vref_mv = 2048,
.mode_list = max11644_mode_list,
.num_modes = ARRAY_SIZE(max11644_mode_list),
.default_mode = s0to1,
@@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = {
{ "max11615", max11615 },
{ "max11616", max11616 },
{ "max11617", max11617 },
+ { "max11644", max11644 },
+ { "max11645", max11645 },
+ { "max11646", max11646 },
+ { "max11647", max11647 },
{}
};
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index d1c05f6eed18..a850ca7d1eda 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -187,26 +187,27 @@ out:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
-#define MCP320X_VOLTAGE_CHANNEL_DIFF(num) \
+#define MCP320X_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
- .channel = (num * 2), \
- .channel2 = (num * 2 + 1), \
- .address = (num * 2), \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .address = (chan1), \
.differential = 1, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
static const struct iio_chan_spec mcp3201_channels[] = {
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
};
static const struct iio_chan_spec mcp3202_channels[] = {
MCP320X_VOLTAGE_CHANNEL(0),
MCP320X_VOLTAGE_CHANNEL(1),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
};
static const struct iio_chan_spec mcp3204_channels[] = {
@@ -214,8 +215,10 @@ static const struct iio_chan_spec mcp3204_channels[] = {
MCP320X_VOLTAGE_CHANNEL(1),
MCP320X_VOLTAGE_CHANNEL(2),
MCP320X_VOLTAGE_CHANNEL(3),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
- MCP320X_VOLTAGE_CHANNEL_DIFF(1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
};
static const struct iio_chan_spec mcp3208_channels[] = {
@@ -227,10 +230,14 @@ static const struct iio_chan_spec mcp3208_channels[] = {
MCP320X_VOLTAGE_CHANNEL(5),
MCP320X_VOLTAGE_CHANNEL(6),
MCP320X_VOLTAGE_CHANNEL(7),
- MCP320X_VOLTAGE_CHANNEL_DIFF(0),
- MCP320X_VOLTAGE_CHANNEL_DIFF(1),
- MCP320X_VOLTAGE_CHANNEL_DIFF(2),
- MCP320X_VOLTAGE_CHANNEL_DIFF(3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(0, 1),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(1, 0),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(2, 3),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(3, 2),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(4, 5),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(5, 4),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(6, 7),
+ MCP320X_VOLTAGE_CHANNEL_DIFF(7, 6),
};
static const struct iio_info mcp320x_info = {
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 6eca7aea8a37..d7b36efd2f3c 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -1,11 +1,12 @@
/*
- * mcp3422.c - driver for the Microchip mcp3422/3/4/6/7/8 chip family
+ * mcp3422.c - driver for the Microchip mcp3421/2/3/4/5/6/7/8 chip family
*
* Copyright (C) 2013, Angelo Compagnucci
* Author: Angelo Compagnucci <angelo.compagnucci@gmail.com>
*
* Datasheet: http://ww1.microchip.com/downloads/en/devicedoc/22088b.pdf
* http://ww1.microchip.com/downloads/en/DeviceDoc/22226a.pdf
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/22072b.pdf
*
* This driver exports the value of analog input voltage to sysfs, the
* voltage unit is nV.
@@ -338,7 +339,7 @@ static int mcp3422_probe(struct i2c_client *client,
u8 config;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!indio_dev)
@@ -357,6 +358,7 @@ static int mcp3422_probe(struct i2c_client *client,
switch (adc->id) {
case 1:
+ case 5:
indio_dev->channels = mcp3421_channels;
indio_dev->num_channels = ARRAY_SIZE(mcp3421_channels);
break;
@@ -395,6 +397,7 @@ static const struct i2c_device_id mcp3422_id[] = {
{ "mcp3422", 2 },
{ "mcp3423", 3 },
{ "mcp3424", 4 },
+ { "mcp3425", 5 },
{ "mcp3426", 6 },
{ "mcp3427", 7 },
{ "mcp3428", 8 },
@@ -421,5 +424,5 @@ static struct i2c_driver mcp3422_driver = {
module_i2c_driver(mcp3422_driver);
MODULE_AUTHOR("Angelo Compagnucci <angelo.compagnucci@gmail.com>");
-MODULE_DESCRIPTION("Microchip mcp3422/3/4/6/7/8 driver");
+MODULE_DESCRIPTION("Microchip mcp3421/2/3/4/5/6/7/8 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index bb1f15224ac8..33051b87aac2 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -443,7 +443,8 @@ static void mxs_lradc_setup_ts_channel(struct mxs_lradc *lradc, unsigned ch)
LRADC_CH_NUM_SAMPLES(lradc->over_sample_cnt - 1),
LRADC_CH(ch));
- /* from the datasheet:
+ /*
+ * from the datasheet:
* "Software must clear this register in preparation for a
* multi-cycle accumulation.
*/
@@ -504,7 +505,8 @@ static void mxs_lradc_setup_ts_pressure(struct mxs_lradc *lradc, unsigned ch1,
mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch1));
mxs_lradc_reg_wrt(lradc, reg, LRADC_CH(ch2));
- /* from the datasheet:
+ /*
+ * from the datasheet:
* "Software must clear this register in preparation for a
* multi-cycle accumulation.
*/
@@ -914,7 +916,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SCALE:
if (chan->type == IIO_TEMP) {
- /* From the datasheet, we have to multiply by 1.012 and
+ /*
+ * From the datasheet, we have to multiply by 1.012 and
* divide by 4
*/
*val = 0;
@@ -929,7 +932,8 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
- /* The calculated value from the ADC is in Kelvin, we
+ /*
+ * The calculated value from the ADC is in Kelvin, we
* want Celsius for hwmon so the offset is -273.15
* The offset is applied before scaling so it is
* actually -213.15 * 4 / 1.012 = -1079.644268
@@ -1750,6 +1754,7 @@ static int mxs_lradc_remove(struct platform_device *pdev)
iio_triggered_buffer_cleanup(iio);
clk_disable_unprepare(lradc->clk);
+
return 0;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index f42eb8a7d21f..2bbf0c521beb 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -534,7 +534,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(adc->irq, NULL,
palmas_gpadc_irq,
- IRQF_ONESHOT | IRQF_EARLY_RESUME, dev_name(adc->dev),
+ IRQF_ONESHOT, dev_name(adc->dev),
adc);
if (ret < 0) {
dev_err(adc->dev,
@@ -549,7 +549,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_0 = platform_get_irq(pdev, 1);
ret = request_threaded_irq(adc->irq_auto_0, NULL,
palmas_gpadc_irq_auto,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas-adc-auto-0", adc);
if (ret < 0) {
dev_err(adc->dev, "request auto0 irq %d failed: %d\n",
@@ -565,7 +565,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_1 = platform_get_irq(pdev, 2);
ret = request_threaded_irq(adc->irq_auto_1, NULL,
palmas_gpadc_irq_auto,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
"palmas-adc-auto-1", adc);
if (ret < 0) {
dev_err(adc->dev, "request auto1 irq %d failed: %d\n",
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 2c8374f86252..ecbc12138d58 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -73,7 +73,7 @@ static int adc081c_probe(struct i2c_client *client,
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
new file mode 100644
index 000000000000..0afeac0c9bad
--- /dev/null
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -0,0 +1,288 @@
+/*
+ * ADC0831/ADC0832/ADC0834/ADC0838 8-bit ADC driver
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.ti.com/lit/ds/symlink/adc0832-n.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/iio.h>
+#include <linux/regulator/consumer.h>
+
+enum {
+ adc0831,
+ adc0832,
+ adc0834,
+ adc0838,
+};
+
+struct adc0832 {
+ struct spi_device *spi;
+ struct regulator *reg;
+ struct mutex lock;
+ u8 mux_bits;
+
+ u8 tx_buf[2] ____cacheline_aligned;
+ u8 rx_buf[2];
+};
+
+#define ADC0832_VOLTAGE_CHANNEL(chan) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan1), \
+ .channel2 = (chan2), \
+ .differential = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+ }
+
+static const struct iio_chan_spec adc0831_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+};
+
+static const struct iio_chan_spec adc0832_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+};
+
+static const struct iio_chan_spec adc0834_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL(2),
+ ADC0832_VOLTAGE_CHANNEL(3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+};
+
+static const struct iio_chan_spec adc0838_channels[] = {
+ ADC0832_VOLTAGE_CHANNEL(0),
+ ADC0832_VOLTAGE_CHANNEL(1),
+ ADC0832_VOLTAGE_CHANNEL(2),
+ ADC0832_VOLTAGE_CHANNEL(3),
+ ADC0832_VOLTAGE_CHANNEL(4),
+ ADC0832_VOLTAGE_CHANNEL(5),
+ ADC0832_VOLTAGE_CHANNEL(6),
+ ADC0832_VOLTAGE_CHANNEL(7),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
+ ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+};
+
+static int adc0831_adc_conversion(struct adc0832 *adc)
+{
+ struct spi_device *spi = adc->spi;
+ int ret;
+
+ ret = spi_read(spi, &adc->rx_buf, 2);
+ if (ret)
+ return ret;
+
+ /*
+ * Skip TRI-STATE and a leading zero
+ */
+ return (adc->rx_buf[0] << 2 & 0xff) | (adc->rx_buf[1] >> 6);
+}
+
+static int adc0832_adc_conversion(struct adc0832 *adc, int channel,
+ bool differential)
+{
+ struct spi_device *spi = adc->spi;
+ struct spi_transfer xfer = {
+ .tx_buf = adc->tx_buf,
+ .rx_buf = adc->rx_buf,
+ .len = 2,
+ };
+ int ret;
+
+ if (!adc->mux_bits)
+ return adc0831_adc_conversion(adc);
+
+ /* start bit */
+ adc->tx_buf[0] = 1 << (adc->mux_bits + 1);
+ /* single-ended or differential */
+ adc->tx_buf[0] |= differential ? 0 : (1 << adc->mux_bits);
+ /* odd / sign */
+ adc->tx_buf[0] |= (channel % 2) << (adc->mux_bits - 1);
+ /* select */
+ if (adc->mux_bits > 1)
+ adc->tx_buf[0] |= channel / 2;
+
+ /* align Data output BIT7 (MSB) to 8-bit boundary */
+ adc->tx_buf[0] <<= 1;
+
+ ret = spi_sync_transfer(spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ return adc->rx_buf[1];
+}
+
+static int adc0832_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *channel, int *value,
+ int *shift, long mask)
+{
+ struct adc0832 *adc = iio_priv(iio);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *value = adc0832_adc_conversion(adc, channel->channel,
+ channel->differential);
+ mutex_unlock(&adc->lock);
+ if (*value < 0)
+ return *value;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *value = regulator_get_voltage(adc->reg);
+ if (*value < 0)
+ return *value;
+
+ /* convert regulator output voltage to mV */
+ *value /= 1000;
+ *shift = 8;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info adc0832_info = {
+ .read_raw = adc0832_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int adc0832_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct adc0832 *adc;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->spi = spi;
+ mutex_init(&adc->lock);
+
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &adc0832_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ switch (spi_get_device_id(spi)->driver_data) {
+ case adc0831:
+ adc->mux_bits = 0;
+ indio_dev->channels = adc0831_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0831_channels);
+ break;
+ case adc0832:
+ adc->mux_bits = 1;
+ indio_dev->channels = adc0832_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0832_channels);
+ break;
+ case adc0834:
+ adc->mux_bits = 2;
+ indio_dev->channels = adc0834_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0834_channels);
+ break;
+ case adc0838:
+ adc->mux_bits = 3;
+ indio_dev->channels = adc0838_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adc0838_channels);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
+
+ ret = regulator_enable(adc->reg);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, indio_dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ regulator_disable(adc->reg);
+
+ return ret;
+}
+
+static int adc0832_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct adc0832 *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regulator_disable(adc->reg);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id adc0832_dt_ids[] = {
+ { .compatible = "ti,adc0831", },
+ { .compatible = "ti,adc0832", },
+ { .compatible = "ti,adc0834", },
+ { .compatible = "ti,adc0838", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adc0832_dt_ids);
+
+#endif
+
+static const struct spi_device_id adc0832_id[] = {
+ { "adc0831", adc0831 },
+ { "adc0832", adc0832 },
+ { "adc0834", adc0834 },
+ { "adc0838", adc0838 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adc0832_id);
+
+static struct spi_driver adc0832_driver = {
+ .driver = {
+ .name = "adc0832",
+ .of_match_table = of_match_ptr(adc0832_dt_ids),
+ },
+ .probe = adc0832_probe,
+ .remove = adc0832_remove,
+ .id_table = adc0832_id,
+};
+module_spi_driver(adc0832_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("ADC0831/ADC0832/ADC0834/ADC0838 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
new file mode 100644
index 000000000000..73cbf0b54e54
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -0,0 +1,612 @@
+/*
+ * ADS1015 - Texas Instruments Analog-to-Digital Converter
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for ADS1015 ADC 7-bit I2C slave address:
+ * * 0x48 - ADDR connected to Ground
+ * * 0x49 - ADDR connected to Vdd
+ * * 0x4A - ADDR connected to SDA
+ * * 0x4B - ADDR connected to SCL
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#include <linux/i2c/ads1015.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define ADS1015_DRV_NAME "ads1015"
+
+#define ADS1015_CONV_REG 0x00
+#define ADS1015_CFG_REG 0x01
+
+#define ADS1015_CFG_DR_SHIFT 5
+#define ADS1015_CFG_MOD_SHIFT 8
+#define ADS1015_CFG_PGA_SHIFT 9
+#define ADS1015_CFG_MUX_SHIFT 12
+
+#define ADS1015_CFG_DR_MASK GENMASK(7, 5)
+#define ADS1015_CFG_MOD_MASK BIT(8)
+#define ADS1015_CFG_PGA_MASK GENMASK(11, 9)
+#define ADS1015_CFG_MUX_MASK GENMASK(14, 12)
+
+/* device operating modes */
+#define ADS1015_CONTINUOUS 0
+#define ADS1015_SINGLESHOT 1
+
+#define ADS1015_SLEEP_DELAY_MS 2000
+#define ADS1015_DEFAULT_PGA 2
+#define ADS1015_DEFAULT_DATA_RATE 4
+#define ADS1015_DEFAULT_CHAN 0
+
+enum ads1015_channels {
+ ADS1015_AIN0_AIN1 = 0,
+ ADS1015_AIN0_AIN3,
+ ADS1015_AIN1_AIN3,
+ ADS1015_AIN2_AIN3,
+ ADS1015_AIN0,
+ ADS1015_AIN1,
+ ADS1015_AIN2,
+ ADS1015_AIN3,
+ ADS1015_TIMESTAMP,
+};
+
+static const unsigned int ads1015_data_rate[] = {
+ 128, 250, 490, 920, 1600, 2400, 3300, 3300
+};
+
+static const struct {
+ int scale;
+ int uscale;
+} ads1015_scale[] = {
+ {3, 0},
+ {2, 0},
+ {1, 0},
+ {0, 500000},
+ {0, 250000},
+ {0, 125000},
+ {0, 125000},
+ {0, 125000},
+};
+
+#define ADS1015_V_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .indexed = 1, \
+ .address = _addr, \
+ .channel = _chan, \
+ .channel2 = _chan2, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+struct ads1015_data {
+ struct regmap *regmap;
+ /*
+ * Protects ADC ops, e.g: concurrent sysfs/buffered
+ * data reads, configuration updates
+ */
+ struct mutex lock;
+ struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+};
+
+static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == ADS1015_CFG_REG);
+}
+
+static const struct regmap_config ads1015_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .max_register = ADS1015_CFG_REG,
+ .writeable_reg = ads1015_is_writeable_reg,
+};
+
+static const struct iio_chan_spec ads1015_channels[] = {
+ ADS1015_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+ ADS1015_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+ ADS1015_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+ ADS1015_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+ ADS1015_V_CHAN(0, ADS1015_AIN0),
+ ADS1015_V_CHAN(1, ADS1015_AIN1),
+ ADS1015_V_CHAN(2, ADS1015_AIN2),
+ ADS1015_V_CHAN(3, ADS1015_AIN3),
+ IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ if (on) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ pm_runtime_put_noidle(dev);
+ } else {
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static
+int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
+{
+ int ret, pga, dr, conv_time;
+ bool change;
+
+ if (chan < 0 || chan >= ADS1015_CHANNELS)
+ return -EINVAL;
+
+ pga = data->channel_data[chan].pga;
+ dr = data->channel_data[chan].data_rate;
+
+ ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MUX_MASK |
+ ADS1015_CFG_PGA_MASK,
+ chan << ADS1015_CFG_MUX_SHIFT |
+ pga << ADS1015_CFG_PGA_SHIFT,
+ &change);
+ if (ret < 0)
+ return ret;
+
+ if (change) {
+ conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+ usleep_range(conv_time, conv_time + 1);
+ }
+
+ return regmap_read(data->regmap, ADS1015_CONV_REG, val);
+}
+
+static irqreturn_t ads1015_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ads1015_data *data = iio_priv(indio_dev);
+ s16 buf[8]; /* 1x s16 ADC val + 3x s16 padding + 4x s16 timestamp */
+ int chan, ret, res;
+
+ memset(buf, 0, sizeof(buf));
+
+ mutex_lock(&data->lock);
+ chan = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ ret = ads1015_get_adc_result(data, chan, &res);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+
+ buf[0] = res;
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ads1015_set_scale(struct ads1015_data *data, int chan,
+ int scale, int uscale)
+{
+ int i, ret, rindex = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
+ if (ads1015_scale[i].scale == scale &&
+ ads1015_scale[i].uscale == uscale) {
+ rindex = i;
+ break;
+ }
+ if (rindex < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_PGA_MASK,
+ rindex << ADS1015_CFG_PGA_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ data->channel_data[chan].pga = rindex;
+
+ return 0;
+}
+
+static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
+{
+ int i, ret, rindex = -1;
+
+ for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
+ if (ads1015_data_rate[i] == rate) {
+ rindex = i;
+ break;
+ }
+ if (rindex < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_DR_MASK,
+ rindex << ADS1015_CFG_DR_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ data->channel_data[chan].data_rate = rindex;
+
+ return 0;
+}
+
+static int ads1015_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, idx;
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ mutex_lock(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ ret = ads1015_set_power_state(data, true);
+ if (ret < 0)
+ break;
+
+ ret = ads1015_get_adc_result(data, chan->address, val);
+ if (ret < 0) {
+ ads1015_set_power_state(data, false);
+ break;
+ }
+
+ /* 12 bit res, D0 is bit 4 in conversion register */
+ *val = sign_extend32(*val >> 4, 11);
+
+ ret = ads1015_set_power_state(data, false);
+ if (ret < 0)
+ break;
+
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ idx = data->channel_data[chan->address].pga;
+ *val = ads1015_scale[idx].scale;
+ *val2 = ads1015_scale[idx].uscale;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ idx = data->channel_data[chan->address].data_rate;
+ *val = ads1015_data_rate[idx];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ads1015_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads1015_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ ret = ads1015_set_scale(data, chan->address, val, val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads1015_set_data_rate(data, chan->address, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+
+ return ret;
+}
+
+static int ads1015_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return ads1015_set_power_state(iio_priv(indio_dev), true);
+}
+
+static int ads1015_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ return ads1015_set_power_state(iio_priv(indio_dev), false);
+}
+
+static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
+ .preenable = ads1015_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = iio_triggered_buffer_predisable,
+ .postdisable = ads1015_buffer_postdisable,
+ .validate_scan_mask = &iio_validate_scan_mask_onehot,
+};
+
+static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR(sampling_frequency_available,
+ "128 250 490 920 1600 2400 3300");
+
+static struct attribute *ads1015_attributes[] = {
+ &iio_const_attr_scale_available.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ads1015_attribute_group = {
+ .attrs = ads1015_attributes,
+};
+
+static const struct iio_info ads1015_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = ads1015_read_raw,
+ .write_raw = ads1015_write_raw,
+ .attrs = &ads1015_attribute_group,
+};
+
+#ifdef CONFIG_OF
+static int ads1015_get_channels_config_of(struct i2c_client *client)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ struct device_node *node;
+
+ if (!client->dev.of_node ||
+ !of_get_next_child(client->dev.of_node, NULL))
+ return -EINVAL;
+
+ for_each_child_of_node(client->dev.of_node, node) {
+ u32 pval;
+ unsigned int channel;
+ unsigned int pga = ADS1015_DEFAULT_PGA;
+ unsigned int data_rate = ADS1015_DEFAULT_DATA_RATE;
+
+ if (of_property_read_u32(node, "reg", &pval)) {
+ dev_err(&client->dev, "invalid reg on %s\n",
+ node->full_name);
+ continue;
+ }
+
+ channel = pval;
+ if (channel >= ADS1015_CHANNELS) {
+ dev_err(&client->dev,
+ "invalid channel index %d on %s\n",
+ channel, node->full_name);
+ continue;
+ }
+
+ if (!of_property_read_u32(node, "ti,gain", &pval)) {
+ pga = pval;
+ if (pga > 6) {
+ dev_err(&client->dev, "invalid gain on %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "ti,datarate", &pval)) {
+ data_rate = pval;
+ if (data_rate > 7) {
+ dev_err(&client->dev,
+ "invalid data_rate on %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ data->channel_data[channel].pga = pga;
+ data->channel_data[channel].data_rate = data_rate;
+ }
+
+ return 0;
+}
+#endif
+
+static void ads1015_get_channels_config(struct i2c_client *client)
+{
+ unsigned int k;
+
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ads1015_data *data = iio_priv(indio_dev);
+ struct ads1015_platform_data *pdata = dev_get_platdata(&client->dev);
+
+ /* prefer platform data */
+ if (pdata) {
+ memcpy(data->channel_data, pdata->channel_data,
+ sizeof(data->channel_data));
+ return;
+ }
+
+#ifdef CONFIG_OF
+ if (!ads1015_get_channels_config_of(client))
+ return;
+#endif
+ /* fallback on default configuration */
+ for (k = 0; k < ADS1015_CHANNELS; ++k) {
+ data->channel_data[k].pga = ADS1015_DEFAULT_PGA;
+ data->channel_data[k].data_rate = ADS1015_DEFAULT_DATA_RATE;
+ }
+}
+
+static int ads1015_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct ads1015_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &ads1015_info;
+ indio_dev->name = ADS1015_DRV_NAME;
+ indio_dev->channels = ads1015_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ /* we need to keep this ABI the same as used by hwmon ADS1015 driver */
+ ads1015_get_channels_config(client);
+
+ data->regmap = devm_regmap_init_i2c(client, &ads1015_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "Failed to allocate register map\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ads1015_trigger_handler,
+ &ads1015_buffer_setup_ops);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ goto err_buffer_cleanup;
+ pm_runtime_set_autosuspend_delay(&client->dev, ADS1015_SLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to register IIO device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
+}
+
+static int ads1015_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ /* power down single shot mode */
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+#ifdef CONFIG_PM
+static int ads1015_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_SINGLESHOT << ADS1015_CFG_MOD_SHIFT);
+}
+
+static int ads1015_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct ads1015_data *data = iio_priv(indio_dev);
+
+ return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+ ADS1015_CFG_MOD_MASK,
+ ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+}
+#endif
+
+static const struct dev_pm_ops ads1015_pm_ops = {
+ SET_RUNTIME_PM_OPS(ads1015_runtime_suspend,
+ ads1015_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id ads1015_id[] = {
+ {"ads1015", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ads1015_id);
+
+static struct i2c_driver ads1015_driver = {
+ .driver = {
+ .name = ADS1015_DRV_NAME,
+ .pm = &ads1015_pm_ops,
+ },
+ .probe = ads1015_probe,
+ .remove = ads1015_remove,
+ .id_table = ads1015_id,
+};
+
+module_i2c_driver(ads1015_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS1015 ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index ebdb838d3a1c..9fabed47053d 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -93,12 +93,7 @@ static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(&queue->buffer);
- dmaengine_terminate_all(dmaengine_buffer->chan);
- /* FIXME: There is a slight chance of a race condition here.
- * dmaengine_terminate_all() does not guarantee that all transfer
- * callbacks have finished running. Need to introduce a
- * dmaengine_terminate_all_sync().
- */
+ dmaengine_terminate_sync(dmaengine_buffer->chan);
iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
}
diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig
index f16de61be46d..f73290f84c90 100644
--- a/drivers/iio/chemical/Kconfig
+++ b/drivers/iio/chemical/Kconfig
@@ -4,6 +4,20 @@
menu "Chemical Sensors"
+config ATLAS_PH_SENSOR
+ tristate "Atlas Scientific OEM pH-SM sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select IRQ_WORK
+ help
+ Say Y here to build I2C interface support for the Atlas
+ Scientific OEM pH-SM sensor.
+
+ To compile this driver as module, choose M here: the
+ module will be called atlas-ph-sensor.
+
config IAQCORE
tristate "AMS iAQ-Core VOC sensors"
depends on I2C
diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile
index 167861fadfab..b02202b41289 100644
--- a/drivers/iio/chemical/Makefile
+++ b/drivers/iio/chemical/Makefile
@@ -3,5 +3,6 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-ph-sensor.o
obj-$(CONFIG_IAQCORE) += ams-iaq-core.o
obj-$(CONFIG_VZ89X) += vz89x.o
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
new file mode 100644
index 000000000000..62b37cd8fb56
--- /dev/null
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -0,0 +1,509 @@
+/*
+ * atlas-ph-sensor.c - Support for Atlas Scientific OEM pH-SM sensor
+ *
+ * Copyright (C) 2015 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/irq_work.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pm_runtime.h>
+
+#define ATLAS_REGMAP_NAME "atlas_ph_regmap"
+#define ATLAS_DRV_NAME "atlas_ph"
+
+#define ATLAS_REG_DEV_TYPE 0x00
+#define ATLAS_REG_DEV_VERSION 0x01
+
+#define ATLAS_REG_INT_CONTROL 0x04
+#define ATLAS_REG_INT_CONTROL_EN BIT(3)
+
+#define ATLAS_REG_PWR_CONTROL 0x06
+
+#define ATLAS_REG_CALIB_STATUS 0x0d
+#define ATLAS_REG_CALIB_STATUS_MASK 0x07
+#define ATLAS_REG_CALIB_STATUS_LOW BIT(0)
+#define ATLAS_REG_CALIB_STATUS_MID BIT(1)
+#define ATLAS_REG_CALIB_STATUS_HIGH BIT(2)
+
+#define ATLAS_REG_TEMP_DATA 0x0e
+#define ATLAS_REG_PH_DATA 0x16
+
+#define ATLAS_PH_INT_TIME_IN_US 450000
+
+struct atlas_data {
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ struct irq_work work;
+
+ __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+};
+
+static const struct regmap_range atlas_volatile_ranges[] = {
+ regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
+ regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+};
+
+static const struct regmap_access_table atlas_volatile_table = {
+ .yes_ranges = atlas_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(atlas_volatile_ranges),
+};
+
+static const struct regmap_config atlas_regmap_config = {
+ .name = ATLAS_REGMAP_NAME,
+
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_table = &atlas_volatile_table,
+ .max_register = ATLAS_REG_PH_DATA + 4,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct iio_chan_spec atlas_channels[] = {
+ {
+ .type = IIO_PH,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(1),
+ {
+ .type = IIO_TEMP,
+ .address = ATLAS_REG_TEMP_DATA,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .output = 1,
+ .scan_index = -1
+ },
+};
+
+static int atlas_set_powermode(struct atlas_data *data, int on)
+{
+ return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
+}
+
+static int atlas_set_interrupt(struct atlas_data *data, bool state)
+{
+ return regmap_update_bits(data->regmap, ATLAS_REG_INT_CONTROL,
+ ATLAS_REG_INT_CONTROL_EN,
+ state ? ATLAS_REG_INT_CONTROL_EN : 0);
+}
+
+static int atlas_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(&data->client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&data->client->dev);
+ return ret;
+ }
+
+ return atlas_set_interrupt(data, true);
+}
+
+static int atlas_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_triggered_buffer_predisable(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = atlas_set_interrupt(data, false);
+ if (ret)
+ return ret;
+
+ pm_runtime_mark_last_busy(&data->client->dev);
+ return pm_runtime_put_autosuspend(&data->client->dev);
+}
+
+static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
+ .postenable = atlas_buffer_postenable,
+ .predisable = atlas_buffer_predisable,
+};
+
+static void atlas_work_handler(struct irq_work *work)
+{
+ struct atlas_data *data = container_of(work, struct atlas_data, work);
+
+ iio_trigger_poll(data->trig);
+}
+
+static irqreturn_t atlas_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct atlas_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
+ (u8 *) &data->buffer, sizeof(data->buffer[0]));
+
+ if (!ret)
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t atlas_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ irq_work_queue(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+{
+ struct device *dev = &data->client->dev;
+ int suspended = pm_runtime_suspended(dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ if (suspended)
+ usleep_range(ATLAS_PH_INT_TIME_IN_US,
+ ATLAS_PH_INT_TIME_IN_US + 100000);
+
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
+ (u8 *) val, sizeof(*val));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int atlas_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret;
+ __be32 reg;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ (u8 *) &reg, sizeof(reg));
+ break;
+ case IIO_PH:
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = atlas_read_ph_measurement(data, &reg);
+
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ *val = be32_to_cpu(reg);
+ ret = IIO_VAL_INT;
+ }
+ return ret;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 1; /* 0.01 */
+ *val2 = 100;
+ break;
+ case IIO_PH:
+ *val = 1; /* 0.001 */
+ *val2 = 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int atlas_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct atlas_data *data = iio_priv(indio_dev);
+ __be32 reg = cpu_to_be32(val);
+
+ if (val2 != 0 || val < 0 || val > 20000)
+ return -EINVAL;
+
+ if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_TEMP)
+ return -EINVAL;
+
+ return regmap_bulk_write(data->regmap, chan->address,
+ &reg, sizeof(reg));
+}
+
+static const struct iio_info atlas_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = atlas_read_raw,
+ .write_raw = atlas_write_raw,
+};
+
+static int atlas_check_calibration(struct atlas_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
+ dev_warn(dev, "device has not been calibrated\n");
+ return 0;
+ }
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
+ dev_warn(dev, "device missing low point calibration\n");
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_MID))
+ dev_warn(dev, "device missing mid point calibration\n");
+
+ if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
+ dev_warn(dev, "device missing high point calibration\n");
+
+ return 0;
+};
+
+static int atlas_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct atlas_data *data;
+ struct iio_trigger *trig;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ indio_dev->info = &atlas_info;
+ indio_dev->name = ATLAS_DRV_NAME;
+ indio_dev->channels = atlas_channels;
+ indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &client->dev;
+
+ trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d",
+ indio_dev->name, indio_dev->id);
+
+ if (!trig)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->trig = trig;
+ trig->dev.parent = indio_dev->dev.parent;
+ trig->ops = &atlas_interrupt_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &atlas_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap initialization failed\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret)
+ return ret;
+
+ if (client->irq <= 0) {
+ dev_err(&client->dev, "no valid irq defined\n");
+ return -EINVAL;
+ }
+
+ ret = atlas_check_calibration(data);
+ if (ret)
+ return ret;
+
+ ret = iio_trigger_register(trig);
+ if (ret) {
+ dev_err(&client->dev, "failed to register trigger\n");
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ &atlas_trigger_handler, &atlas_buffer_setup_ops);
+ if (ret) {
+ dev_err(&client->dev, "cannot setup iio trigger\n");
+ goto unregister_trigger;
+ }
+
+ init_irq_work(&data->work, atlas_work_handler);
+
+ /* interrupt pin toggles on new conversion */
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, atlas_interrupt_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "atlas_irq",
+ indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "request irq (%d) failed\n", client->irq);
+ goto unregister_buffer;
+ }
+
+ ret = atlas_set_powermode(data, 1);
+ if (ret) {
+ dev_err(&client->dev, "cannot power device on");
+ goto unregister_buffer;
+ }
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, 2500);
+ pm_runtime_use_autosuspend(&client->dev);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "unable to register device\n");
+ goto unregister_pm;
+ }
+
+ return 0;
+
+unregister_pm:
+ pm_runtime_disable(&client->dev);
+ atlas_set_powermode(data, 0);
+
+unregister_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+unregister_trigger:
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int atlas_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct atlas_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ return atlas_set_powermode(data, 0);
+}
+
+#ifdef CONFIG_PM
+static int atlas_runtime_suspend(struct device *dev)
+{
+ struct atlas_data *data =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return atlas_set_powermode(data, 0);
+}
+
+static int atlas_runtime_resume(struct device *dev)
+{
+ struct atlas_data *data =
+ iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+ return atlas_set_powermode(data, 1);
+}
+#endif
+
+static const struct dev_pm_ops atlas_pm_ops = {
+ SET_RUNTIME_PM_OPS(atlas_runtime_suspend,
+ atlas_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id atlas_id[] = {
+ { "atlas-ph-sm", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
+
+static const struct of_device_id atlas_dt_ids[] = {
+ { .compatible = "atlas,ph-sm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
+
+static struct i2c_driver atlas_driver = {
+ .driver = {
+ .name = ATLAS_DRV_NAME,
+ .of_match_table = of_match_ptr(atlas_dt_ids),
+ .pm = &atlas_pm_ops,
+ },
+ .probe = atlas_probe,
+ .remove = atlas_remove,
+ .id_table = atlas_id,
+};
+module_i2c_driver(atlas_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("Atlas Scientific pH-SM sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index b8b804923230..652649da500f 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -249,7 +249,7 @@ static int vz89x_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
data->xfer = vz89x_smbus_xfer;
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
i2c_set_clientdata(client, indio_dev);
data->client = client;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8447c31e27f2..f5a2d445d0c0 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -18,13 +18,15 @@
#include <asm/unaligned.h>
#include <linux/iio/common/st_sensors.h>
+#include "st_sensors_core.h"
+
static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
{
return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
}
-static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
- u8 reg_addr, u8 mask, u8 data)
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data)
{
int err;
u8 new_data;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.h b/drivers/iio/common/st_sensors/st_sensors_core.h
new file mode 100644
index 000000000000..cd88098ff6f1
--- /dev/null
+++ b/drivers/iio/common/st_sensors/st_sensors_core.h
@@ -0,0 +1,8 @@
+/*
+ * Local functions in the ST Sensors core
+ */
+#ifndef __ST_SENSORS_CORE_H
+#define __ST_SENSORS_CORE_H
+int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
+ u8 reg_addr, u8 mask, u8 data);
+#endif
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 3e907040c2c7..6a8c98327945 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -14,32 +14,65 @@
#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/interrupt.h>
-
#include <linux/iio/common/st_sensors.h>
-
+#include "st_sensors_core.h"
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
- int err;
+ int err, irq;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ unsigned long irq_trig;
sdata->trig = iio_trigger_alloc("%s-trigger", indio_dev->name);
if (sdata->trig == NULL) {
- err = -ENOMEM;
dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
- goto iio_trigger_alloc_error;
+ return -ENOMEM;
}
- err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ irq = sdata->get_irq_data_ready(indio_dev);
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ /*
+ * If the IRQ is triggered on falling edge, we need to mark the
+ * interrupt as active low, if the hardware supports this.
+ */
+ if (irq_trig == IRQF_TRIGGER_FALLING) {
+ if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
+ dev_err(&indio_dev->dev,
+ "falling edge specified for IRQ but hardware "
+ "only support rising edge, will request "
+ "rising edge\n");
+ irq_trig = IRQF_TRIGGER_RISING;
+ } else {
+ /* Set up INT active low i.e. falling edge */
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.addr_ihl,
+ sdata->sensor_settings->drdy_irq.mask_ihl, 1);
+ if (err < 0)
+ goto iio_trigger_free;
+ dev_info(&indio_dev->dev,
+ "interrupts on the falling edge\n");
+ }
+ } else if (irq_trig == IRQF_TRIGGER_RISING) {
+ dev_info(&indio_dev->dev,
+ "interrupts on the rising edge\n");
+
+ } else {
+ dev_err(&indio_dev->dev,
+ "unsupported IRQ trigger specified (%lx), only "
+ "rising and falling edges supported, enforce "
+ "rising edge\n", irq_trig);
+ irq_trig = IRQF_TRIGGER_RISING;
+ }
+ err = request_threaded_irq(irq,
iio_trigger_generic_data_rdy_poll,
NULL,
- IRQF_TRIGGER_RISING,
+ irq_trig,
sdata->trig->name,
sdata->trig);
if (err) {
dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
- goto request_irq_error;
+ goto iio_trigger_free;
}
iio_trigger_set_drvdata(sdata->trig, indio_dev);
@@ -57,9 +90,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
iio_trigger_register_error:
free_irq(sdata->get_irq_data_ready(indio_dev), sdata->trig);
-request_irq_error:
+iio_trigger_free:
iio_trigger_free(sdata->trig);
-iio_trigger_alloc_error:
return err;
}
EXPORT_SYMBOL(st_sensors_allocate_trigger);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e701e28fb1cd..a995139f907c 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -10,8 +10,10 @@ config AD5064
depends on (SPI_MASTER && I2C!=m) || I2C
help
Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
- AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
- AD5669R Digital to Analog Converter.
+ AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R, AD5627, AD5627R,
+ AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R, AD5666,
+ AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+ LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to Analog Converter.
To compile this driver as a module, choose M here: the
module will be called ad5064.
@@ -111,6 +113,16 @@ config AD5755
To compile this driver as a module, choose M here: the
module will be called ad5755.
+config AD5761
+ tristate "Analog Devices AD5761/61R/21/21R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5761, AD5761R, AD5721,
+ AD5721R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5761.
+
config AD5764
tristate "Analog Devices AD5764/64R/44/44R DAC driver"
depends on SPI_MASTER
@@ -176,11 +188,11 @@ config MAX5821
10 bits DAC.
config MCP4725
- tristate "MCP4725 DAC driver"
+ tristate "MCP4725/6 DAC driver"
depends on I2C
---help---
Say Y here if you want to build a driver for the Microchip
- MCP 4725 12-bit digital-to-analog converter (DAC) with I2C
+ MCP 4725/6 12-bit digital-to-analog converter (DAC) with I2C
interface.
To compile this driver as a module, choose M here: the module
@@ -196,4 +208,23 @@ config MCP4922
To compile this driver as a module, choose M here: the module
will be called mcp4922.
+config STX104
+ tristate "Apex Embedded Systems STX104 DAC driver"
+ depends on ISA
+ help
+ Say yes here to build support for the 2-channel DAC on the Apex
+ Embedded Systems STX104 integrated analog PC/104 card. The base port
+ addresses for the devices may be configured via the "base" module
+ parameter array.
+
+config VF610_DAC
+ tristate "Vybrid vf610 DAC driver"
+ depends on OF
+ depends on HAS_IOMEM
+ help
+ Say yes here to support Vybrid board digital-to-analog converter.
+
+ This driver can also be built as a module. If so, the module will
+ be called vf610_dac.
+
endmenu
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 63ae05633e0c..67b48429686d 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5449) += ad5449.o
obj-$(CONFIG_AD5755) += ad5755.o
+obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
@@ -21,3 +22,5 @@ obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
+obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_VF610_DAC) += vf610_dac.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index 81ca0081a019..6803e4a137cd 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -1,6 +1,9 @@
/*
- * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R,
- * AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver
+ * AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5625, AD5625R,
+ * AD5627, AD5627R, AD5628, AD5629R, AD5645R, AD5647R, AD5648, AD5665, AD5665R,
+ * AD5666, AD5667, AD5667R, AD5668, AD5669R, LTC2606, LTC2607, LTC2609, LTC2616,
+ * LTC2617, LTC2619, LTC2626, LTC2627, LTC2629 Digital to analog converters
+ * driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -39,6 +42,9 @@
#define AD5064_CMD_RESET 0x7
#define AD5064_CMD_CONFIG 0x8
+#define AD5064_CMD_RESET_V2 0x5
+#define AD5064_CMD_CONFIG_V2 0x7
+
#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
@@ -48,12 +54,25 @@
#define AD5064_LDAC_PWRDN_3STATE 0x3
/**
+ * enum ad5064_regmap_type - Register layout variant
+ * @AD5064_REGMAP_ADI: Old Analog Devices register map layout
+ * @AD5064_REGMAP_ADI2: New Analog Devices register map layout
+ * @AD5064_REGMAP_LTC: LTC register map layout
+ */
+enum ad5064_regmap_type {
+ AD5064_REGMAP_ADI,
+ AD5064_REGMAP_ADI2,
+ AD5064_REGMAP_LTC,
+};
+
+/**
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
- * @internal_vref: internal reference voltage. 0 if the chip has no internal
- * vref.
+ * @internal_vref: internal reference voltage. 0 if the chip has no
+ internal vref.
* @channel: channel specification
* @num_channels: number of channels
+ * @regmap_type: register map layout variant
*/
struct ad5064_chip_info {
@@ -61,6 +80,7 @@ struct ad5064_chip_info {
unsigned long internal_vref;
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ enum ad5064_regmap_type regmap_type;
};
struct ad5064_state;
@@ -111,18 +131,43 @@ enum ad5064_type {
ID_AD5064,
ID_AD5064_1,
ID_AD5065,
+ ID_AD5625,
+ ID_AD5625R_1V25,
+ ID_AD5625R_2V5,
+ ID_AD5627,
+ ID_AD5627R_1V25,
+ ID_AD5627R_2V5,
ID_AD5628_1,
ID_AD5628_2,
ID_AD5629_1,
ID_AD5629_2,
+ ID_AD5645R_1V25,
+ ID_AD5645R_2V5,
+ ID_AD5647R_1V25,
+ ID_AD5647R_2V5,
ID_AD5648_1,
ID_AD5648_2,
+ ID_AD5665,
+ ID_AD5665R_1V25,
+ ID_AD5665R_2V5,
ID_AD5666_1,
ID_AD5666_2,
+ ID_AD5667,
+ ID_AD5667R_1V25,
+ ID_AD5667R_2V5,
ID_AD5668_1,
ID_AD5668_2,
ID_AD5669_1,
ID_AD5669_2,
+ ID_LTC2606,
+ ID_LTC2607,
+ ID_LTC2609,
+ ID_LTC2616,
+ ID_LTC2617,
+ ID_LTC2619,
+ ID_LTC2626,
+ ID_LTC2627,
+ ID_LTC2629,
};
static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
@@ -136,15 +181,27 @@ static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
const struct iio_chan_spec *chan)
{
- unsigned int val;
+ unsigned int val, address;
+ unsigned int shift;
int ret;
- val = (0x1 << chan->address);
+ if (st->chip_info->regmap_type == AD5064_REGMAP_LTC) {
+ val = 0;
+ address = chan->address;
+ } else {
+ if (st->chip_info->regmap_type == AD5064_REGMAP_ADI2)
+ shift = 4;
+ else
+ shift = 8;
+
+ val = (0x1 << chan->address);
+ address = 0;
- if (st->pwr_down[chan->channel])
- val |= st->pwr_down_mode[chan->channel] << 8;
+ if (st->pwr_down[chan->channel])
+ val |= st->pwr_down_mode[chan->channel] << shift;
+ }
- ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
+ ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, address, val, 0);
return ret;
}
@@ -155,6 +212,10 @@ static const char * const ad5064_powerdown_modes[] = {
"three_state",
};
+static const char * const ltc2617_powerdown_modes[] = {
+ "90kohm_to_gnd",
+};
+
static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
@@ -185,6 +246,13 @@ static const struct iio_enum ad5064_powerdown_mode_enum = {
.set = ad5064_set_powerdown_mode,
};
+static const struct iio_enum ltc2617_powerdown_mode_enum = {
+ .items = ltc2617_powerdown_modes,
+ .num_items = ARRAY_SIZE(ltc2617_powerdown_modes),
+ .get = ad5064_get_powerdown_mode,
+ .set = ad5064_set_powerdown_mode,
+};
+
static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
uintptr_t private, const struct iio_chan_spec *chan, char *buf)
{
@@ -295,7 +363,19 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
{ },
};
-#define AD5064_CHANNEL(chan, addr, bits, _shift) { \
+static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ad5064_read_dac_powerdown,
+ .write = ad5064_write_dac_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &ltc2617_powerdown_mode_enum),
+ { },
+};
+
+#define AD5064_CHANNEL(chan, addr, bits, _shift, _ext_info) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
@@ -309,145 +389,340 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.storagebits = 16, \
.shift = (_shift), \
}, \
- .ext_info = ad5064_ext_info, \
+ .ext_info = (_ext_info), \
}
-#define DECLARE_AD5064_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5064_CHANNELS(name, bits, shift, ext_info) \
const struct iio_chan_spec name[] = { \
- AD5064_CHANNEL(0, 0, bits, shift), \
- AD5064_CHANNEL(1, 1, bits, shift), \
- AD5064_CHANNEL(2, 2, bits, shift), \
- AD5064_CHANNEL(3, 3, bits, shift), \
- AD5064_CHANNEL(4, 4, bits, shift), \
- AD5064_CHANNEL(5, 5, bits, shift), \
- AD5064_CHANNEL(6, 6, bits, shift), \
- AD5064_CHANNEL(7, 7, bits, shift), \
+ AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+ AD5064_CHANNEL(1, 1, bits, shift, ext_info), \
+ AD5064_CHANNEL(2, 2, bits, shift, ext_info), \
+ AD5064_CHANNEL(3, 3, bits, shift, ext_info), \
+ AD5064_CHANNEL(4, 4, bits, shift, ext_info), \
+ AD5064_CHANNEL(5, 5, bits, shift, ext_info), \
+ AD5064_CHANNEL(6, 6, bits, shift, ext_info), \
+ AD5064_CHANNEL(7, 7, bits, shift, ext_info), \
}
-#define DECLARE_AD5065_CHANNELS(name, bits, shift) \
+#define DECLARE_AD5065_CHANNELS(name, bits, shift, ext_info) \
const struct iio_chan_spec name[] = { \
- AD5064_CHANNEL(0, 0, bits, shift), \
- AD5064_CHANNEL(1, 3, bits, shift), \
+ AD5064_CHANNEL(0, 0, bits, shift, ext_info), \
+ AD5064_CHANNEL(1, 3, bits, shift, ext_info), \
}
-static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8);
-static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6);
-static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4);
+static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4, ad5064_ext_info);
+
+static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6, ad5064_ext_info);
+static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4, ad5064_ext_info);
-static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8);
-static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6);
-static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4);
+static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5645_channels, 14, 2, ad5064_ext_info);
+static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0, ad5064_ext_info);
-static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4);
-static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0);
+static DECLARE_AD5064_CHANNELS(ltc2607_channels, 16, 0, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2617_channels, 14, 2, ltc2617_ext_info);
+static DECLARE_AD5064_CHANNELS(ltc2627_channels, 12, 4, ltc2617_ext_info);
static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
[ID_AD5024] = {
.shared_vref = false,
.channels = ad5024_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5025] = {
.shared_vref = false,
.channels = ad5025_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5044] = {
.shared_vref = false,
.channels = ad5044_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5045] = {
.shared_vref = false,
.channels = ad5045_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5064] = {
.shared_vref = false,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5064_1] = {
.shared_vref = true,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5065] = {
.shared_vref = false,
.channels = ad5065_channels,
.num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5625] = {
+ .shared_vref = true,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5625R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5625R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5629_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627] = {
+ .shared_vref = true,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5627R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5629_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5628_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5024_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5628_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5024_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5629_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5629_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5629_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5629_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5645R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5645_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5645R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5645_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5647R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5645_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5647R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5645_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5648_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5044_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5648_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5044_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5665] = {
+ .shared_vref = true,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5665R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5665R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5669_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5666_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5666_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 4,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_AD5667] = {
+ .shared_vref = true,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5667R_1V25] = {
+ .shared_vref = true,
+ .internal_vref = 1250000,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
+ },
+ [ID_AD5667R_2V5] = {
+ .shared_vref = true,
+ .internal_vref = 2500000,
+ .channels = ad5669_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_ADI2
},
[ID_AD5668_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5668_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5669_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5669_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
},
[ID_AD5669_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5669_channels,
.num_channels = 8,
+ .regmap_type = AD5064_REGMAP_ADI,
+ },
+ [ID_LTC2606] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2607] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2609] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2607_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2616] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2617] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2619] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2617_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2626] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 1,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2627] = {
+ .shared_vref = true,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 2,
+ .regmap_type = AD5064_REGMAP_LTC,
+ },
+ [ID_LTC2629] = {
+ .shared_vref = false,
+ .internal_vref = 0,
+ .channels = ltc2627_channels,
+ .num_channels = 4,
+ .regmap_type = AD5064_REGMAP_LTC,
},
};
@@ -469,6 +744,22 @@ static const char * const ad5064_vref_name(struct ad5064_state *st,
return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
}
+static int ad5064_set_config(struct ad5064_state *st, unsigned int val)
+{
+ unsigned int cmd;
+
+ switch (st->chip_info->regmap_type) {
+ case AD5064_REGMAP_ADI2:
+ cmd = AD5064_CMD_CONFIG_V2;
+ break;
+ default:
+ cmd = AD5064_CMD_CONFIG;
+ break;
+ }
+
+ return ad5064_write(st, cmd, 0, val, 0);
+}
+
static int ad5064_probe(struct device *dev, enum ad5064_type type,
const char *name, ad5064_write_func write)
{
@@ -498,8 +789,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
if (!st->chip_info->internal_vref)
return ret;
st->use_internal_vref = true;
- ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
- AD5064_CONFIG_INT_VREF_ENABLE, 0);
+ ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
if (ret) {
dev_err(dev, "Failed to enable internal vref: %d\n",
ret);
@@ -628,9 +918,19 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
+ unsigned int cmd_shift;
int ret;
- st->data.i2c[0] = (cmd << 4) | addr;
+ switch (st->chip_info->regmap_type) {
+ case AD5064_REGMAP_ADI2:
+ cmd_shift = 3;
+ break;
+ default:
+ cmd_shift = 4;
+ break;
+ }
+
+ st->data.i2c[0] = (cmd << cmd_shift) | addr;
put_unaligned_be16(val, &st->data.i2c[1]);
ret = i2c_master_send(i2c, st->data.i2c, 3);
@@ -653,12 +953,35 @@ static int ad5064_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id ad5064_i2c_ids[] = {
+ {"ad5625", ID_AD5625 },
+ {"ad5625r-1v25", ID_AD5625R_1V25 },
+ {"ad5625r-2v5", ID_AD5625R_2V5 },
+ {"ad5627", ID_AD5627 },
+ {"ad5627r-1v25", ID_AD5627R_1V25 },
+ {"ad5627r-2v5", ID_AD5627R_2V5 },
{"ad5629-1", ID_AD5629_1},
{"ad5629-2", ID_AD5629_2},
{"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */
+ {"ad5645r-1v25", ID_AD5645R_1V25 },
+ {"ad5645r-2v5", ID_AD5645R_2V5 },
+ {"ad5665", ID_AD5665 },
+ {"ad5665r-1v25", ID_AD5665R_1V25 },
+ {"ad5665r-2v5", ID_AD5665R_2V5 },
+ {"ad5667", ID_AD5667 },
+ {"ad5667r-1v25", ID_AD5667R_1V25 },
+ {"ad5667r-2v5", ID_AD5667R_2V5 },
{"ad5669-1", ID_AD5669_1},
{"ad5669-2", ID_AD5669_2},
{"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */
+ {"ltc2606", ID_LTC2606},
+ {"ltc2607", ID_LTC2607},
+ {"ltc2609", ID_LTC2609},
+ {"ltc2616", ID_LTC2616},
+ {"ltc2617", ID_LTC2617},
+ {"ltc2619", ID_LTC2619},
+ {"ltc2626", ID_LTC2626},
+ {"ltc2627", ID_LTC2627},
+ {"ltc2629", ID_LTC2629},
{}
};
MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
diff --git a/drivers/iio/dac/ad5761.c b/drivers/iio/dac/ad5761.c
new file mode 100644
index 000000000000..d6510d6928b3
--- /dev/null
+++ b/drivers/iio/dac/ad5761.c
@@ -0,0 +1,430 @@
+/*
+ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter
+ *
+ * Copyright 2016 Qtechnology A/S
+ * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/bitops.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_data/ad5761.h>
+
+#define AD5761_ADDR(addr) ((addr & 0xf) << 16)
+#define AD5761_ADDR_NOOP 0x0
+#define AD5761_ADDR_DAC_WRITE 0x3
+#define AD5761_ADDR_CTRL_WRITE_REG 0x4
+#define AD5761_ADDR_SW_DATA_RESET 0x7
+#define AD5761_ADDR_DAC_READ 0xb
+#define AD5761_ADDR_CTRL_READ_REG 0xc
+#define AD5761_ADDR_SW_FULL_RESET 0xf
+
+#define AD5761_CTRL_USE_INTVREF BIT(5)
+#define AD5761_CTRL_ETS BIT(6)
+
+/**
+ * struct ad5761_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in mV - 0 if external
+ * reference voltage is used
+ * @channel: channel specification
+*/
+
+struct ad5761_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec channel;
+};
+
+struct ad5761_range_params {
+ int m;
+ int c;
+};
+
+enum ad5761_supported_device_ids {
+ ID_AD5721,
+ ID_AD5721R,
+ ID_AD5761,
+ ID_AD5761R,
+};
+
+/**
+ * struct ad5761_state - driver instance specific data
+ * @spi: spi_device
+ * @vref_reg: reference voltage regulator
+ * @use_intref: true when the internal voltage reference is used
+ * @vref: actual voltage reference in mVolts
+ * @range: output range mode used
+ * @data: cache aligned spi buffer
+ */
+struct ad5761_state {
+ struct spi_device *spi;
+ struct regulator *vref_reg;
+
+ bool use_intref;
+ int vref;
+ enum ad5761_voltage_range range;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[3] ____cacheline_aligned;
+};
+
+static const struct ad5761_range_params ad5761_range_params[] = {
+ [AD5761_VOLTAGE_RANGE_M10V_10V] = {
+ .m = 80,
+ .c = 40,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_10V] = {
+ .m = 40,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_M5V_5V] = {
+ .m = 40,
+ .c = 20,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_5V] = {
+ .m = 20,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_M2V5_7V5] = {
+ .m = 40,
+ .c = 10,
+ },
+ [AD5761_VOLTAGE_RANGE_M3V_3V] = {
+ .m = 24,
+ .c = 12,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_16V] = {
+ .m = 64,
+ .c = 0,
+ },
+ [AD5761_VOLTAGE_RANGE_0V_20V] = {
+ .m = 80,
+ .c = 0,
+ },
+};
+
+static int _ad5761_spi_write(struct ad5761_state *st, u8 addr, u16 val)
+{
+ st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5761_spi_write(struct iio_dev *indio_dev, u8 addr, u16 val)
+{
+ struct ad5761_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = _ad5761_spi_write(st, addr, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int _ad5761_spi_read(struct ad5761_state *st, u8 addr, u16 *val)
+{
+ int ret;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ .cs_change = true,
+ }, {
+ .tx_buf = &st->data[1].d8[1],
+ .rx_buf = &st->data[2].d8[1],
+ .bits_per_word = 8,
+ .len = 3,
+ },
+ };
+
+ st->data[0].d32 = cpu_to_be32(AD5761_ADDR(addr));
+ st->data[1].d32 = cpu_to_be32(AD5761_ADDR(AD5761_ADDR_NOOP));
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+
+ *val = be32_to_cpu(st->data[2].d32);
+
+ return ret;
+}
+
+static int ad5761_spi_read(struct iio_dev *indio_dev, u8 addr, u16 *val)
+{
+ struct ad5761_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = _ad5761_spi_read(st, addr, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5761_spi_set_range(struct ad5761_state *st,
+ enum ad5761_voltage_range range)
+{
+ u16 aux;
+ int ret;
+
+ aux = (range & 0x7) | AD5761_CTRL_ETS;
+
+ if (st->use_intref)
+ aux |= AD5761_CTRL_USE_INTVREF;
+
+ ret = _ad5761_spi_write(st, AD5761_ADDR_SW_FULL_RESET, 0);
+ if (ret)
+ return ret;
+
+ ret = _ad5761_spi_write(st, AD5761_ADDR_CTRL_WRITE_REG, aux);
+ if (ret)
+ return ret;
+
+ st->range = range;
+
+ return 0;
+}
+
+static int ad5761_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ad5761_state *st;
+ int ret;
+ u16 aux;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ad5761_spi_read(indio_dev, AD5761_ADDR_DAC_READ, &aux);
+ if (ret)
+ return ret;
+ *val = aux >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ st = iio_priv(indio_dev);
+ *val = st->vref * ad5761_range_params[st->range].m;
+ *val /= 10;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OFFSET:
+ st = iio_priv(indio_dev);
+ *val = -(1 << chan->scan_type.realbits);
+ *val *= ad5761_range_params[st->range].c;
+ *val /= ad5761_range_params[st->range].m;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad5761_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ u16 aux;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val2 || (val << chan->scan_type.shift) > 0xffff || val < 0)
+ return -EINVAL;
+
+ aux = val << chan->scan_type.shift;
+
+ return ad5761_spi_write(indio_dev, AD5761_ADDR_DAC_WRITE, aux);
+}
+
+static const struct iio_info ad5761_info = {
+ .read_raw = &ad5761_read_raw,
+ .write_raw = &ad5761_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define AD5761_CHAN(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 16 - (_bits), \
+ }, \
+}
+
+static const struct ad5761_chip_info ad5761_chip_infos[] = {
+ [ID_AD5721] = {
+ .int_vref = 0,
+ .channel = AD5761_CHAN(12),
+ },
+ [ID_AD5721R] = {
+ .int_vref = 2500,
+ .channel = AD5761_CHAN(12),
+ },
+ [ID_AD5761] = {
+ .int_vref = 0,
+ .channel = AD5761_CHAN(16),
+ },
+ [ID_AD5761R] = {
+ .int_vref = 2500,
+ .channel = AD5761_CHAN(16),
+ },
+};
+
+static int ad5761_get_vref(struct ad5761_state *st,
+ const struct ad5761_chip_info *chip_info)
+{
+ int ret;
+
+ st->vref_reg = devm_regulator_get_optional(&st->spi->dev, "vref");
+ if (PTR_ERR(st->vref_reg) == -ENODEV) {
+ /* Use Internal regulator */
+ if (!chip_info->int_vref) {
+ dev_err(&st->spi->dev,
+ "Voltage reference not found\n");
+ return -EIO;
+ }
+
+ st->use_intref = true;
+ st->vref = chip_info->int_vref;
+ return 0;
+ }
+
+ if (IS_ERR(st->vref_reg)) {
+ dev_err(&st->spi->dev,
+ "Error getting voltage reference regulator\n");
+ return PTR_ERR(st->vref_reg);
+ }
+
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(&st->spi->dev,
+ "Failed to enable voltage reference\n");
+ return ret;
+ }
+
+ ret = regulator_get_voltage(st->vref_reg);
+ if (ret < 0) {
+ dev_err(&st->spi->dev,
+ "Failed to get voltage reference value\n");
+ goto disable_regulator_vref;
+ }
+
+ if (ret < 2000000 || ret > 3000000) {
+ dev_warn(&st->spi->dev,
+ "Invalid external voltage ref. value %d uV\n", ret);
+ ret = -EIO;
+ goto disable_regulator_vref;
+ }
+
+ st->vref = ret / 1000;
+ st->use_intref = false;
+
+ return 0;
+
+disable_regulator_vref:
+ regulator_disable(st->vref_reg);
+ st->vref_reg = NULL;
+ return ret;
+}
+
+static int ad5761_probe(struct spi_device *spi)
+{
+ struct iio_dev *iio_dev;
+ struct ad5761_state *st;
+ int ret;
+ const struct ad5761_chip_info *chip_info =
+ &ad5761_chip_infos[spi_get_device_id(spi)->driver_data];
+ enum ad5761_voltage_range voltage_range = AD5761_VOLTAGE_RANGE_0V_5V;
+ struct ad5761_platform_data *pdata = dev_get_platdata(&spi->dev);
+
+ iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+
+ st->spi = spi;
+ spi_set_drvdata(spi, iio_dev);
+
+ ret = ad5761_get_vref(st, chip_info);
+ if (ret)
+ return ret;
+
+ if (pdata)
+ voltage_range = pdata->voltage_range;
+
+ ret = ad5761_spi_set_range(st, voltage_range);
+ if (ret)
+ goto disable_regulator_err;
+
+ iio_dev->dev.parent = &spi->dev;
+ iio_dev->info = &ad5761_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = &chip_info->channel;
+ iio_dev->num_channels = 1;
+ iio_dev->name = spi_get_device_id(st->spi)->name;
+ ret = iio_device_register(iio_dev);
+ if (ret)
+ goto disable_regulator_err;
+
+ return 0;
+
+disable_regulator_err:
+ if (!IS_ERR_OR_NULL(st->vref_reg))
+ regulator_disable(st->vref_reg);
+
+ return ret;
+}
+
+static int ad5761_remove(struct spi_device *spi)
+{
+ struct iio_dev *iio_dev = spi_get_drvdata(spi);
+ struct ad5761_state *st = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
+
+ if (!IS_ERR_OR_NULL(st->vref_reg))
+ regulator_disable(st->vref_reg);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5761_id[] = {
+ {"ad5721", ID_AD5721},
+ {"ad5721r", ID_AD5721R},
+ {"ad5761", ID_AD5761},
+ {"ad5761r", ID_AD5761R},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5761_id);
+
+static struct spi_driver ad5761_driver = {
+ .driver = {
+ .name = "ad5761",
+ },
+ .probe = ad5761_probe,
+ .remove = ad5761_remove,
+ .id_table = ad5761_id,
+};
+module_spi_driver(ad5761_driver);
+
+MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices AD5721, AD5721R, AD5761, AD5761R driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index b4dde8315210..cca935c06f2b 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -1,5 +1,5 @@
/*
- * mcp4725.c - Support for Microchip MCP4725
+ * mcp4725.c - Support for Microchip MCP4725/6
*
* Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -134,6 +134,12 @@ static const char * const mcp4725_powerdown_modes[] = {
"500kohm_to_gnd"
};
+static const char * const mcp4726_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "125kohm_to_gnd",
+ "640kohm_to_gnd"
+};
+
static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
@@ -182,11 +188,24 @@ static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
return len;
}
-static const struct iio_enum mcp4725_powerdown_mode_enum = {
- .items = mcp4725_powerdown_modes,
- .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
- .get = mcp4725_get_powerdown_mode,
- .set = mcp4725_set_powerdown_mode,
+enum {
+ MCP4725,
+ MCP4726,
+};
+
+static const struct iio_enum mcp472x_powerdown_mode_enum[] = {
+ [MCP4725] = {
+ .items = mcp4725_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+ },
+ [MCP4726] = {
+ .items = mcp4726_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4726_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+ },
};
static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
@@ -196,19 +215,46 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
.write = mcp4725_write_powerdown,
.shared = IIO_SEPARATE,
},
- IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4725_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+ &mcp472x_powerdown_mode_enum[MCP4725]),
+ IIO_ENUM_AVAILABLE("powerdown_mode",
+ &mcp472x_powerdown_mode_enum[MCP4725]),
+ { },
+};
+
+static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = mcp4725_read_powerdown,
+ .write = mcp4725_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE,
+ &mcp472x_powerdown_mode_enum[MCP4726]),
+ IIO_ENUM_AVAILABLE("powerdown_mode",
+ &mcp472x_powerdown_mode_enum[MCP4726]),
{ },
};
-static const struct iio_chan_spec mcp4725_channel = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .output = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .ext_info = mcp4725_ext_info,
+static const struct iio_chan_spec mcp472x_channel[] = {
+ [MCP4725] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = mcp4725_ext_info,
+ },
+ [MCP4726] = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = mcp4726_ext_info,
+ },
};
static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
@@ -302,7 +348,7 @@ static int mcp4725_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
- indio_dev->channels = &mcp4725_channel;
+ indio_dev->channels = &mcp472x_channel[id->driver_data];
indio_dev->num_channels = 1;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -316,7 +362,7 @@ static int mcp4725_probe(struct i2c_client *client,
}
pd = (inbuf[0] >> 1) & 0x3;
data->powerdown = pd > 0 ? true : false;
- data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
+ data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
return iio_device_register(indio_dev);
@@ -329,7 +375,8 @@ static int mcp4725_remove(struct i2c_client *client)
}
static const struct i2c_device_id mcp4725_id[] = {
- { "mcp4725", 0 },
+ { "mcp4725", MCP4725 },
+ { "mcp4726", MCP4726 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp4725_id);
@@ -346,5 +393,5 @@ static struct i2c_driver mcp4725_driver = {
module_i2c_driver(mcp4725_driver);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("MCP4725 12-bit DAC");
+MODULE_DESCRIPTION("MCP4725/6 12-bit DAC");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
new file mode 100644
index 000000000000..174f4b75ceed
--- /dev/null
+++ b/drivers/iio/dac/stx104.c
@@ -0,0 +1,152 @@
+/*
+ * DAC driver for the Apex Embedded Systems STX104
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#define STX104_NUM_CHAN 2
+
+#define STX104_CHAN(chan) { \
+ .type = IIO_VOLTAGE, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .indexed = 1, \
+ .output = 1 \
+}
+
+#define STX104_EXTENT 16
+/**
+ * The highest base address possible for an ISA device is 0x3FF; this results in
+ * 1024 possible base addresses. Dividing the number of possible base addresses
+ * by the address extent taken by each device results in the maximum number of
+ * devices on a system.
+ */
+#define MAX_NUM_STX104 (1024 / STX104_EXTENT)
+
+static unsigned base[MAX_NUM_STX104];
+static unsigned num_stx104;
+module_param_array(base, uint, &num_stx104, 0);
+MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
+
+/**
+ * struct stx104_iio - IIO device private data structure
+ * @chan_out_states: channels' output states
+ * @base: base port address of the IIO device
+ */
+struct stx104_iio {
+ unsigned chan_out_states[STX104_NUM_CHAN];
+ unsigned base;
+};
+
+static int stx104_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct stx104_iio *const priv = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ *val = priv->chan_out_states[chan->channel];
+
+ return IIO_VAL_INT;
+}
+
+static int stx104_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct stx104_iio *const priv = iio_priv(indio_dev);
+ const unsigned chan_addr_offset = 2 * chan->channel;
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ priv->chan_out_states[chan->channel] = val;
+ outw(val, priv->base + 4 + chan_addr_offset);
+
+ return 0;
+}
+
+static const struct iio_info stx104_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = stx104_read_raw,
+ .write_raw = stx104_write_raw
+};
+
+static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
+ STX104_CHAN(0),
+ STX104_CHAN(1)
+};
+
+static int stx104_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct stx104_iio *priv;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ if (!devm_request_region(dev, base[id], STX104_EXTENT,
+ dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + STX104_EXTENT);
+ return -EBUSY;
+ }
+
+ indio_dev->info = &stx104_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = stx104_channels;
+ indio_dev->num_channels = STX104_NUM_CHAN;
+ indio_dev->name = dev_name(dev);
+
+ priv = iio_priv(indio_dev);
+ priv->base = base[id];
+
+ /* initialize DAC output to 0V */
+ outw(0, base[id] + 4);
+ outw(0, base[id] + 6);
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver stx104_driver = {
+ .probe = stx104_probe,
+ .driver = {
+ .name = "stx104"
+ }
+};
+
+static void __exit stx104_exit(void)
+{
+ isa_unregister_driver(&stx104_driver);
+}
+
+static int __init stx104_init(void)
+{
+ return isa_register_driver(&stx104_driver, num_stx104);
+}
+
+module_init(stx104_init);
+module_exit(stx104_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/vf610_dac.c b/drivers/iio/dac/vf610_dac.c
new file mode 100644
index 000000000000..c4ec7779b394
--- /dev/null
+++ b/drivers/iio/dac/vf610_dac.c
@@ -0,0 +1,298 @@
+/*
+ * Freescale Vybrid vf610 DAC driver
+ *
+ * Copyright 2016 Toradex AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VF610_DACx_STATCTRL 0x20
+
+#define VF610_DAC_DACEN BIT(15)
+#define VF610_DAC_DACRFS BIT(14)
+#define VF610_DAC_LPEN BIT(11)
+
+#define VF610_DAC_DAT0(x) ((x) & 0xFFF)
+
+enum vf610_conversion_mode_sel {
+ VF610_DAC_CONV_HIGH_POWER,
+ VF610_DAC_CONV_LOW_POWER,
+};
+
+struct vf610_dac {
+ struct clk *clk;
+ struct device *dev;
+ enum vf610_conversion_mode_sel conv_mode;
+ void __iomem *regs;
+};
+
+static void vf610_dac_init(struct vf610_dac *info)
+{
+ int val;
+
+ info->conv_mode = VF610_DAC_CONV_LOW_POWER;
+ val = VF610_DAC_DACEN | VF610_DAC_DACRFS |
+ VF610_DAC_LPEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+}
+
+static void vf610_dac_exit(struct vf610_dac *info)
+{
+ int val;
+
+ val = readl(info->regs + VF610_DACx_STATCTRL);
+ val &= ~VF610_DAC_DACEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+}
+
+static int vf610_set_conversion_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+ int val;
+
+ mutex_lock(&indio_dev->mlock);
+ info->conv_mode = mode;
+ val = readl(info->regs + VF610_DACx_STATCTRL);
+ if (mode)
+ val |= VF610_DAC_LPEN;
+ else
+ val &= ~VF610_DAC_LPEN;
+ writel(val, info->regs + VF610_DACx_STATCTRL);
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int vf610_get_conversion_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ return info->conv_mode;
+}
+
+static const char * const vf610_conv_modes[] = { "high-power", "low-power" };
+
+static const struct iio_enum vf610_conversion_mode = {
+ .items = vf610_conv_modes,
+ .num_items = ARRAY_SIZE(vf610_conv_modes),
+ .get = vf610_get_conversion_mode,
+ .set = vf610_set_conversion_mode,
+};
+
+static const struct iio_chan_spec_ext_info vf610_ext_info[] = {
+ IIO_ENUM("conversion_mode", IIO_SHARED_BY_DIR,
+ &vf610_conversion_mode),
+ {},
+};
+
+#define VF610_DAC_CHAN(_chan_type) { \
+ .type = (_chan_type), \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = vf610_ext_info, \
+}
+
+static const struct iio_chan_spec vf610_dac_iio_channels[] = {
+ VF610_DAC_CHAN(IIO_VOLTAGE),
+};
+
+static int vf610_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2,
+ long mask)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = VF610_DAC_DAT0(readl(info->regs));
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /*
+ * DACRFS is always 1 for valid reference and typical
+ * reference voltage as per Vybrid datasheet is 3.3V
+ * from section 9.1.2.1 of Vybrid datasheet
+ */
+ *val = 3300 /* mV */;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vf610_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2,
+ long mask)
+{
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+ writel(VF610_DAC_DAT0(val), info->regs);
+ mutex_unlock(&indio_dev->mlock);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info vf610_dac_iio_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &vf610_read_raw,
+ .write_raw = &vf610_write_raw,
+};
+
+static const struct of_device_id vf610_dac_match[] = {
+ { .compatible = "fsl,vf610-dac", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_dac_match);
+
+static int vf610_dac_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct vf610_dac *info;
+ struct resource *mem;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct vf610_dac));
+ if (!indio_dev) {
+ dev_err(&pdev->dev, "Failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ info = iio_priv(indio_dev);
+ info->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ info->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ info->clk = devm_clk_get(&pdev->dev, "dac");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "Failed getting clock, err = %ld\n",
+ PTR_ERR(info->clk));
+ return PTR_ERR(info->clk);
+ }
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &vf610_dac_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = vf610_dac_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(vf610_dac_iio_channels);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Could not prepare or enable the clock\n");
+ return ret;
+ }
+
+ vf610_dac_init(info);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register the device\n");
+ goto error_iio_device_register;
+ }
+
+ return 0;
+
+error_iio_device_register:
+ clk_disable_unprepare(info->clk);
+
+ return ret;
+}
+
+static int vf610_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ vf610_dac_exit(info);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_dac_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+
+ vf610_dac_exit(info);
+ clk_disable_unprepare(info->clk);
+
+ return 0;
+}
+
+static int vf610_dac_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct vf610_dac *info = iio_priv(indio_dev);
+ int ret;
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret)
+ return ret;
+
+ vf610_dac_init(info);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_dac_pm_ops, vf610_dac_suspend, vf610_dac_resume);
+
+static struct platform_driver vf610_dac_driver = {
+ .probe = vf610_dac_probe,
+ .remove = vf610_dac_remove,
+ .driver = {
+ .name = "vf610-dac",
+ .of_match_table = vf610_dac_match,
+ .pm = &vf610_dac_pm_ops,
+ },
+};
+module_platform_driver(vf610_dac_driver);
+
+MODULE_AUTHOR("Sanchayan Maity <sanchayan.maity@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610 DAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index bbce3b09ac45..4dac567e75b4 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
int ret;
- unsigned int raw_val;
+ __le16 raw_val;
mutex_lock(&data->mutex);
ret = bmg160_set_power_state(data, true);
@@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
}
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
- 2);
+ sizeof(raw_val));
if (ret < 0) {
dev_err(data->dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
@@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
return ret;
}
- *val = sign_extend32(raw_val, 15);
+ *val = sign_extend32(le16_to_cpu(raw_val), 15);
ret = bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
if (ret < 0)
@@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = {
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &bmg160_event, \
.num_event_specs = 1 \
@@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->buffer[i++] = ret;
+ data->buffer[i++] = val;
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 02eddcebeea3..110f95b6e52f 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -185,6 +185,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_1_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
.bootime = 2,
@@ -248,6 +253,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_2_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
.bootime = 2,
@@ -307,6 +317,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
.drdy_irq = {
.addr = ST_GYRO_3_DRDY_IRQ_ADDR,
.mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+ /*
+ * The sensor has IHL (active low) and open
+ * drain settings, but only for INT1 and not
+ * for the DRDY line on INT2.
+ */
},
.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/health/Kconfig b/drivers/iio/health/Kconfig
index a647679da805..c5f004a8e447 100644
--- a/drivers/iio/health/Kconfig
+++ b/drivers/iio/health/Kconfig
@@ -3,7 +3,35 @@
#
# When adding new entries keep the list in alphabetical order
-menu "Health sensors"
+menu "Health Sensors"
+
+menu "Heart Rate Monitors"
+
+config AFE4403
+ tristate "TI AFE4403 Heart Rate Monitor"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes to choose the Texas Instruments AFE4403
+ heart rate monitor and low-cost pulse oximeter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called afe4403.
+
+config AFE4404
+ tristate "TI AFE4404 heart rate and pulse oximeter sensor"
+ depends on I2C
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes to choose the Texas Instruments AFE4404
+ heart rate monitor and low-cost pulse oximeter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called afe4404.
config MAX30100
tristate "MAX30100 heart rate and pulse oximeter sensor"
@@ -19,3 +47,5 @@ config MAX30100
module will be called max30100.
endmenu
+
+endmenu
diff --git a/drivers/iio/health/Makefile b/drivers/iio/health/Makefile
index 7c475d7faad8..9955a2ae8df1 100644
--- a/drivers/iio/health/Makefile
+++ b/drivers/iio/health/Makefile
@@ -4,4 +4,6 @@
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AFE4403) += afe4403.o
+obj-$(CONFIG_AFE4404) += afe4404.o
obj-$(CONFIG_MAX30100) += max30100.o
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
new file mode 100644
index 000000000000..88e43f87b926
--- /dev/null
+++ b/drivers/iio/health/afe4403.c
@@ -0,0 +1,708 @@
+/*
+ * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4403_DRIVER_NAME "afe4403"
+
+/* AFE4403 Registers */
+#define AFE4403_TIAGAIN 0x20
+#define AFE4403_TIA_AMB_GAIN 0x21
+
+/* AFE4403 GAIN register fields */
+#define AFE4403_TIAGAIN_RES_MASK GENMASK(2, 0)
+#define AFE4403_TIAGAIN_RES_SHIFT 0
+#define AFE4403_TIAGAIN_CAP_MASK GENMASK(7, 3)
+#define AFE4403_TIAGAIN_CAP_SHIFT 3
+
+/* AFE4403 LEDCNTRL register fields */
+#define AFE440X_LEDCNTRL_LED1_MASK GENMASK(15, 8)
+#define AFE440X_LEDCNTRL_LED1_SHIFT 8
+#define AFE440X_LEDCNTRL_LED2_MASK GENMASK(7, 0)
+#define AFE440X_LEDCNTRL_LED2_SHIFT 0
+#define AFE440X_LEDCNTRL_LED_RANGE_MASK GENMASK(17, 16)
+#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT 16
+
+/* AFE4403 CONTROL2 register fields */
+#define AFE440X_CONTROL2_PWR_DWN_TX BIT(2)
+#define AFE440X_CONTROL2_EN_SLOW_DIAG BIT(8)
+#define AFE440X_CONTROL2_DIAG_OUT_TRI BIT(10)
+#define AFE440X_CONTROL2_TX_BRDG_MOD BIT(11)
+#define AFE440X_CONTROL2_TX_REF_MASK GENMASK(18, 17)
+#define AFE440X_CONTROL2_TX_REF_SHIFT 17
+
+/* AFE4404 NULL fields */
+#define NULL_MASK 0
+#define NULL_SHIFT 0
+
+/* AFE4403 LEDCNTRL values */
+#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1
+#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2
+#define AFE440X_LEDCNTRL_RANGE_TX_OFF 0x3
+
+/* AFE4403 CONTROL2 values */
+#define AFE440X_CONTROL2_TX_REF_025 0x0
+#define AFE440X_CONTROL2_TX_REF_050 0x1
+#define AFE440X_CONTROL2_TX_REF_100 0x2
+#define AFE440X_CONTROL2_TX_REF_075 0x3
+
+/* AFE4403 CONTROL3 values */
+#define AFE440X_CONTROL3_CLK_DIV_2 0x0
+#define AFE440X_CONTROL3_CLK_DIV_4 0x2
+#define AFE440X_CONTROL3_CLK_DIV_6 0x3
+#define AFE440X_CONTROL3_CLK_DIV_8 0x4
+#define AFE440X_CONTROL3_CLK_DIV_12 0x5
+#define AFE440X_CONTROL3_CLK_DIV_1 0x7
+
+/* AFE4403 TIAGAIN_CAP values */
+#define AFE4403_TIAGAIN_CAP_5_P 0x0
+#define AFE4403_TIAGAIN_CAP_10_P 0x1
+#define AFE4403_TIAGAIN_CAP_20_P 0x2
+#define AFE4403_TIAGAIN_CAP_30_P 0x3
+#define AFE4403_TIAGAIN_CAP_55_P 0x8
+#define AFE4403_TIAGAIN_CAP_155_P 0x10
+
+/* AFE4403 TIAGAIN_RES values */
+#define AFE4403_TIAGAIN_RES_500_K 0x0
+#define AFE4403_TIAGAIN_RES_250_K 0x1
+#define AFE4403_TIAGAIN_RES_100_K 0x2
+#define AFE4403_TIAGAIN_RES_50_K 0x3
+#define AFE4403_TIAGAIN_RES_25_K 0x4
+#define AFE4403_TIAGAIN_RES_10_K 0x5
+#define AFE4403_TIAGAIN_RES_1_M 0x6
+#define AFE4403_TIAGAIN_RES_NONE 0x7
+
+/**
+ * struct afe4403_data
+ * @dev - Device structure
+ * @spi - SPI device handle
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4403_data {
+ struct device *dev;
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct regulator *regulator;
+ struct iio_trigger *trig;
+ int irq;
+};
+
+enum afe4403_chan_id {
+ LED1,
+ ALED1,
+ LED2,
+ ALED2,
+ LED1_ALED1,
+ LED2_ALED2,
+ ILED1,
+ ILED2,
+};
+
+static const struct afe440x_reg_info afe4403_reg_info[] = {
+ [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
+ [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
+ [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
+ [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+ [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+ [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+ [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
+ [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+};
+
+static const struct iio_chan_spec afe4403_channels[] = {
+ /* ADC values */
+ AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
+ AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
+ AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ /* LED current */
+ AFE440X_CURRENT_CHAN(ILED1, "led1"),
+ AFE440X_CURRENT_CHAN(ILED2, "led2"),
+};
+
+static const struct afe440x_val_table afe4403_res_table[] = {
+ { 500000 }, { 250000 }, { 100000 }, { 50000 },
+ { 25000 }, { 10000 }, { 1000000 }, { 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+
+static const struct afe440x_val_table afe4403_cap_table[] = {
+ { 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
+ { 0, 30000 }, { 0, 35000 }, { 0, 45000 }, { 0, 50000 },
+ { 0, 55000 }, { 0, 60000 }, { 0, 70000 }, { 0, 75000 },
+ { 0, 80000 }, { 0, 85000 }, { 0, 95000 }, { 0, 100000 },
+ { 0, 155000 }, { 0, 160000 }, { 0, 170000 }, { 0, 175000 },
+ { 0, 180000 }, { 0, 185000 }, { 0, 195000 }, { 0, 200000 },
+ { 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
+ { 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ unsigned int reg_val, type;
+ int vals[2];
+ int ret, val_len;
+
+ ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= afe440x_attr->mask;
+ reg_val >>= afe440x_attr->shift;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ type = IIO_VAL_INT;
+ val_len = 1;
+ vals[0] = reg_val;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ type = IIO_VAL_INT_PLUS_MICRO;
+ val_len = 2;
+ if (reg_val < afe440x_attr->table_size) {
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+ break;
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ int val, integer, fract, ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ val = integer;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+ afe440x_attr->mask,
+ (val << afe440x_attr->shift));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+ &afe440x_attr_tia_separate_en.dev_attr.attr,
+ &afe440x_attr_tia_resistance1.dev_attr.attr,
+ &afe440x_attr_tia_capacitance1.dev_attr.attr,
+ &afe440x_attr_tia_resistance2.dev_attr.attr,
+ &afe440x_attr_tia_capacitance2.dev_attr.attr,
+ &dev_attr_tia_resistance_available.attr,
+ &dev_attr_tia_capacitance_available.attr,
+ NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+ .attrs = afe440x_attributes
+};
+
+static int afe4403_read(struct afe4403_data *afe, unsigned int reg, u32 *val)
+{
+ u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+ u8 rx[3];
+ int ret;
+
+ /* Enable reading from the device */
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = spi_write_then_read(afe->spi, &reg, 1, rx, 3);
+ if (ret)
+ return ret;
+
+ *val = (rx[0] << 16) |
+ (rx[1] << 8) |
+ (rx[2]);
+
+ /* Disable reading from the device */
+ tx[3] = AFE440X_CONTROL0_WRITE;
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int afe4403_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = afe4403_read(afe, reg_info.reg, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = regmap_read(afe->regmap, reg_info.offreg,
+ val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int afe4403_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return regmap_update_bits(afe->regmap,
+ reg_info.offreg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return regmap_update_bits(afe->regmap,
+ reg_info.reg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info afe4403_iio_info = {
+ .attrs = &afe440x_attribute_group,
+ .read_raw = afe4403_read_raw,
+ .write_raw = afe4403_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4403_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret, bit, i = 0;
+ s32 buffer[8];
+ u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
+ u8 rx[3];
+
+ /* Enable reading from the device */
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ goto err;
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = spi_write_then_read(afe->spi,
+ &afe4403_reg_info[bit].reg, 1,
+ rx, 3);
+ if (ret)
+ goto err;
+
+ buffer[i++] = (rx[0] << 16) |
+ (rx[1] << 8) |
+ (rx[2]);
+ }
+
+ /* Disable reading from the device */
+ tx[3] = AFE440X_CONTROL0_WRITE;
+ ret = spi_write_then_read(afe->spi, tx, 4, NULL, 0);
+ if (ret)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4403_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+#define AFE4403_TIMING_PAIRS \
+ { AFE440X_LED2STC, 0x000050 }, \
+ { AFE440X_LED2ENDC, 0x0003e7 }, \
+ { AFE440X_LED1LEDSTC, 0x0007d0 }, \
+ { AFE440X_LED1LEDENDC, 0x000bb7 }, \
+ { AFE440X_ALED2STC, 0x000438 }, \
+ { AFE440X_ALED2ENDC, 0x0007cf }, \
+ { AFE440X_LED1STC, 0x000820 }, \
+ { AFE440X_LED1ENDC, 0x000bb7 }, \
+ { AFE440X_LED2LEDSTC, 0x000000 }, \
+ { AFE440X_LED2LEDENDC, 0x0003e7 }, \
+ { AFE440X_ALED1STC, 0x000c08 }, \
+ { AFE440X_ALED1ENDC, 0x000f9f }, \
+ { AFE440X_LED2CONVST, 0x0003ef }, \
+ { AFE440X_LED2CONVEND, 0x0007cf }, \
+ { AFE440X_ALED2CONVST, 0x0007d7 }, \
+ { AFE440X_ALED2CONVEND, 0x000bb7 }, \
+ { AFE440X_LED1CONVST, 0x000bbf }, \
+ { AFE440X_LED1CONVEND, 0x009c3f }, \
+ { AFE440X_ALED1CONVST, 0x000fa7 }, \
+ { AFE440X_ALED1CONVEND, 0x001387 }, \
+ { AFE440X_ADCRSTSTCT0, 0x0003e8 }, \
+ { AFE440X_ADCRSTENDCT0, 0x0003eb }, \
+ { AFE440X_ADCRSTSTCT1, 0x0007d0 }, \
+ { AFE440X_ADCRSTENDCT1, 0x0007d3 }, \
+ { AFE440X_ADCRSTSTCT2, 0x000bb8 }, \
+ { AFE440X_ADCRSTENDCT2, 0x000bbb }, \
+ { AFE440X_ADCRSTSTCT3, 0x000fa0 }, \
+ { AFE440X_ADCRSTENDCT3, 0x000fa3 }, \
+ { AFE440X_PRPCOUNT, 0x009c3f }, \
+ { AFE440X_PDNCYCLESTC, 0x001518 }, \
+ { AFE440X_PDNCYCLEENDC, 0x00991f }
+
+static const struct reg_sequence afe4403_reg_sequences[] = {
+ AFE4403_TIMING_PAIRS,
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
+ { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
+ { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
+ (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
+ { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
+ AFE440X_CONTROL2_TX_REF_SHIFT },
+};
+
+static const struct regmap_range afe4403_yes_ranges[] = {
+ regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4403_volatile_table = {
+ .yes_ranges = afe4403_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(afe4403_yes_ranges),
+};
+
+static const struct regmap_config afe4403_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = AFE440X_PDNCYCLEENDC,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &afe4403_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4403_of_match[] = {
+ { .compatible = "ti,afe4403", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4403_of_match);
+#endif
+
+static int __maybe_unused afe4403_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE,
+ AFE440X_CONTROL2_PDN_AFE);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused afe4403_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend, afe4403_resume);
+
+static int afe4403_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct afe4403_data *afe;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ afe = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ afe->dev = &spi->dev;
+ afe->spi = spi;
+ afe->irq = spi->irq;
+
+ afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(afe->dev, "Unable to allocate register map\n");
+ return PTR_ERR(afe->regmap);
+ }
+
+ afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ if (IS_ERR(afe->regulator)) {
+ dev_err(afe->dev, "Unable to get regulator\n");
+ return PTR_ERR(afe->regulator);
+ }
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+ AFE440X_CONTROL0_SW_RESET);
+ if (ret) {
+ dev_err(afe->dev, "Unable to reset device\n");
+ goto err_disable_reg;
+ }
+
+ ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
+ ARRAY_SIZE(afe4403_reg_sequences));
+ if (ret) {
+ dev_err(afe->dev, "Unable to set register defaults\n");
+ goto err_disable_reg;
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = afe->dev;
+ indio_dev->channels = afe4403_channels;
+ indio_dev->num_channels = ARRAY_SIZE(afe4403_channels);
+ indio_dev->name = AFE4403_DRIVER_NAME;
+ indio_dev->info = &afe4403_iio_info;
+
+ if (afe->irq > 0) {
+ afe->trig = devm_iio_trigger_alloc(afe->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!afe->trig) {
+ dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ ret = -ENOMEM;
+ goto err_disable_reg;
+ }
+
+ iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+ afe->trig->ops = &afe4403_trigger_ops;
+ afe->trig->dev.parent = afe->dev;
+
+ ret = iio_trigger_register(afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO trigger\n");
+ goto err_disable_reg;
+ }
+
+ ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL, IRQF_ONESHOT,
+ AFE4403_DRIVER_NAME,
+ afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to request IRQ\n");
+ goto err_trig;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ afe4403_trigger_handler, NULL);
+ if (ret) {
+ dev_err(afe->dev, "Unable to setup buffer\n");
+ goto err_trig;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO device\n");
+ goto err_buff;
+ }
+
+ return 0;
+
+err_buff:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trig:
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+err_disable_reg:
+ regulator_disable(afe->regulator);
+
+ return ret;
+}
+
+static int afe4403_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct spi_device_id afe4403_ids[] = {
+ { "afe4403", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, afe4403_ids);
+
+static struct spi_driver afe4403_spi_driver = {
+ .driver = {
+ .name = AFE4403_DRIVER_NAME,
+ .of_match_table = of_match_ptr(afe4403_of_match),
+ .pm = &afe4403_pm_ops,
+ },
+ .probe = afe4403_probe,
+ .remove = afe4403_remove,
+ .id_table = afe4403_ids,
+};
+module_spi_driver(afe4403_spi_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
new file mode 100644
index 000000000000..5096a4643784
--- /dev/null
+++ b/drivers/iio/health/afe4404.c
@@ -0,0 +1,679 @@
+/*
+ * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#include "afe440x.h"
+
+#define AFE4404_DRIVER_NAME "afe4404"
+
+/* AFE4404 registers */
+#define AFE4404_TIA_GAIN_SEP 0x20
+#define AFE4404_TIA_GAIN 0x21
+#define AFE4404_PROG_TG_STC 0x34
+#define AFE4404_PROG_TG_ENDC 0x35
+#define AFE4404_LED3LEDSTC 0x36
+#define AFE4404_LED3LEDENDC 0x37
+#define AFE4404_CLKDIV_PRF 0x39
+#define AFE4404_OFFDAC 0x3a
+#define AFE4404_DEC 0x3d
+#define AFE4404_AVG_LED2_ALED2VAL 0x3f
+#define AFE4404_AVG_LED1_ALED1VAL 0x40
+
+/* AFE4404 GAIN register fields */
+#define AFE4404_TIA_GAIN_RES_MASK GENMASK(2, 0)
+#define AFE4404_TIA_GAIN_RES_SHIFT 0
+#define AFE4404_TIA_GAIN_CAP_MASK GENMASK(5, 3)
+#define AFE4404_TIA_GAIN_CAP_SHIFT 3
+
+/* AFE4404 LEDCNTRL register fields */
+#define AFE4404_LEDCNTRL_ILED1_MASK GENMASK(5, 0)
+#define AFE4404_LEDCNTRL_ILED1_SHIFT 0
+#define AFE4404_LEDCNTRL_ILED2_MASK GENMASK(11, 6)
+#define AFE4404_LEDCNTRL_ILED2_SHIFT 6
+#define AFE4404_LEDCNTRL_ILED3_MASK GENMASK(17, 12)
+#define AFE4404_LEDCNTRL_ILED3_SHIFT 12
+
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_ILED_2X_MASK BIT(17)
+#define AFE440X_CONTROL2_ILED_2X_SHIFT 17
+
+/* AFE4404 CONTROL3 register fields */
+#define AFE440X_CONTROL3_OSC_ENABLE BIT(9)
+
+/* AFE4404 OFFDAC register current fields */
+#define AFE4404_OFFDAC_CURR_LED1_MASK GENMASK(9, 5)
+#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5
+#define AFE4404_OFFDAC_CURR_LED2_MASK GENMASK(19, 15)
+#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15
+#define AFE4404_OFFDAC_CURR_LED3_MASK GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0
+#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10)
+#define AFE4404_OFFDAC_CURR_ALED1_SHIFT 10
+#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0)
+#define AFE4404_OFFDAC_CURR_ALED2_SHIFT 0
+
+/* AFE4404 NULL fields */
+#define NULL_MASK 0
+#define NULL_SHIFT 0
+
+/* AFE4404 TIA_GAIN_CAP values */
+#define AFE4404_TIA_GAIN_CAP_5_P 0x0
+#define AFE4404_TIA_GAIN_CAP_2_5_P 0x1
+#define AFE4404_TIA_GAIN_CAP_10_P 0x2
+#define AFE4404_TIA_GAIN_CAP_7_5_P 0x3
+#define AFE4404_TIA_GAIN_CAP_20_P 0x4
+#define AFE4404_TIA_GAIN_CAP_17_5_P 0x5
+#define AFE4404_TIA_GAIN_CAP_25_P 0x6
+#define AFE4404_TIA_GAIN_CAP_22_5_P 0x7
+
+/* AFE4404 TIA_GAIN_RES values */
+#define AFE4404_TIA_GAIN_RES_500_K 0x0
+#define AFE4404_TIA_GAIN_RES_250_K 0x1
+#define AFE4404_TIA_GAIN_RES_100_K 0x2
+#define AFE4404_TIA_GAIN_RES_50_K 0x3
+#define AFE4404_TIA_GAIN_RES_25_K 0x4
+#define AFE4404_TIA_GAIN_RES_10_K 0x5
+#define AFE4404_TIA_GAIN_RES_1_M 0x6
+#define AFE4404_TIA_GAIN_RES_2_M 0x7
+
+/**
+ * struct afe4404_data
+ * @dev - Device structure
+ * @regmap - Register map of the device
+ * @regulator - Pointer to the regulator for the IC
+ * @trig - IIO trigger for this device
+ * @irq - ADC_RDY line interrupt number
+ */
+struct afe4404_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *regulator;
+ struct iio_trigger *trig;
+ int irq;
+};
+
+enum afe4404_chan_id {
+ LED1,
+ ALED1,
+ LED2,
+ ALED2,
+ LED3,
+ LED1_ALED1,
+ LED2_ALED2,
+ ILED1,
+ ILED2,
+ ILED3,
+};
+
+static const struct afe440x_reg_info afe4404_reg_info[] = {
+ [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
+ [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
+ [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
+ [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
+ [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
+ [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
+ [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
+ [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
+ [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
+ [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+};
+
+static const struct iio_chan_spec afe4404_channels[] = {
+ /* ADC values */
+ AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
+ AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
+ AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+ /* LED current */
+ AFE440X_CURRENT_CHAN(ILED1, "led1"),
+ AFE440X_CURRENT_CHAN(ILED2, "led2"),
+ AFE440X_CURRENT_CHAN(ILED3, "led3"),
+};
+
+static const struct afe440x_val_table afe4404_res_table[] = {
+ { .integer = 500000, .fract = 0 },
+ { .integer = 250000, .fract = 0 },
+ { .integer = 100000, .fract = 0 },
+ { .integer = 50000, .fract = 0 },
+ { .integer = 25000, .fract = 0 },
+ { .integer = 10000, .fract = 0 },
+ { .integer = 1000000, .fract = 0 },
+ { .integer = 2000000, .fract = 0 },
+};
+AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+
+static const struct afe440x_val_table afe4404_cap_table[] = {
+ { .integer = 0, .fract = 5000 },
+ { .integer = 0, .fract = 2500 },
+ { .integer = 0, .fract = 10000 },
+ { .integer = 0, .fract = 7500 },
+ { .integer = 0, .fract = 20000 },
+ { .integer = 0, .fract = 17500 },
+ { .integer = 0, .fract = 25000 },
+ { .integer = 0, .fract = 22500 },
+};
+AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+
+static ssize_t afe440x_show_register(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ unsigned int reg_val, type;
+ int vals[2];
+ int ret, val_len;
+
+ ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+ if (ret)
+ return ret;
+
+ reg_val &= afe440x_attr->mask;
+ reg_val >>= afe440x_attr->shift;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ type = IIO_VAL_INT;
+ val_len = 1;
+ vals[0] = reg_val;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ type = IIO_VAL_INT_PLUS_MICRO;
+ val_len = 2;
+ if (reg_val < afe440x_attr->table_size) {
+ vals[0] = afe440x_attr->val_table[reg_val].integer;
+ vals[1] = afe440x_attr->val_table[reg_val].fract;
+ break;
+ }
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ return iio_format_value(buf, type, val_len, vals);
+}
+
+static ssize_t afe440x_store_register(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
+ int val, integer, fract, ret;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ switch (afe440x_attr->type) {
+ case SIMPLE:
+ val = integer;
+ break;
+ case RESISTANCE:
+ case CAPACITANCE:
+ for (val = 0; val < afe440x_attr->table_size; val++)
+ if (afe440x_attr->val_table[val].integer == integer &&
+ afe440x_attr->val_table[val].fract == fract)
+ break;
+ if (val == afe440x_attr->table_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
+ afe440x_attr->mask,
+ (val << afe440x_attr->shift));
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+
+static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
+static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+
+static struct attribute *afe440x_attributes[] = {
+ &afe440x_attr_tia_separate_en.dev_attr.attr,
+ &afe440x_attr_tia_resistance1.dev_attr.attr,
+ &afe440x_attr_tia_capacitance1.dev_attr.attr,
+ &afe440x_attr_tia_resistance2.dev_attr.attr,
+ &afe440x_attr_tia_capacitance2.dev_attr.attr,
+ &dev_attr_tia_resistance_available.attr,
+ &dev_attr_tia_capacitance_available.attr,
+ NULL
+};
+
+static const struct attribute_group afe440x_attribute_group = {
+ .attrs = afe440x_attributes
+};
+
+static int afe4404_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+ int ret;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = regmap_read(afe->regmap, reg_info.offreg,
+ val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(afe->regmap, reg_info.reg, val);
+ if (ret)
+ return ret;
+ *val &= reg_info.mask;
+ *val >>= reg_info.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int afe4404_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_OFFSET:
+ return regmap_update_bits(afe->regmap,
+ reg_info.offreg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ case IIO_CURRENT:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return regmap_update_bits(afe->regmap,
+ reg_info.reg,
+ reg_info.mask,
+ (val << reg_info.shift));
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info afe4404_iio_info = {
+ .attrs = &afe440x_attribute_group,
+ .read_raw = afe4404_read_raw,
+ .write_raw = afe4404_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static irqreturn_t afe4404_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret, bit, i = 0;
+ s32 buffer[10];
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+ &buffer[i++]);
+ if (ret)
+ goto err;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_trigger_ops afe4404_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
+/* Default timings from data-sheet */
+#define AFE4404_TIMING_PAIRS \
+ { AFE440X_PRPCOUNT, 39999 }, \
+ { AFE440X_LED2LEDSTC, 0 }, \
+ { AFE440X_LED2LEDENDC, 398 }, \
+ { AFE440X_LED2STC, 80 }, \
+ { AFE440X_LED2ENDC, 398 }, \
+ { AFE440X_ADCRSTSTCT0, 5600 }, \
+ { AFE440X_ADCRSTENDCT0, 5606 }, \
+ { AFE440X_LED2CONVST, 5607 }, \
+ { AFE440X_LED2CONVEND, 6066 }, \
+ { AFE4404_LED3LEDSTC, 400 }, \
+ { AFE4404_LED3LEDENDC, 798 }, \
+ { AFE440X_ALED2STC, 480 }, \
+ { AFE440X_ALED2ENDC, 798 }, \
+ { AFE440X_ADCRSTSTCT1, 6068 }, \
+ { AFE440X_ADCRSTENDCT1, 6074 }, \
+ { AFE440X_ALED2CONVST, 6075 }, \
+ { AFE440X_ALED2CONVEND, 6534 }, \
+ { AFE440X_LED1LEDSTC, 800 }, \
+ { AFE440X_LED1LEDENDC, 1198 }, \
+ { AFE440X_LED1STC, 880 }, \
+ { AFE440X_LED1ENDC, 1198 }, \
+ { AFE440X_ADCRSTSTCT2, 6536 }, \
+ { AFE440X_ADCRSTENDCT2, 6542 }, \
+ { AFE440X_LED1CONVST, 6543 }, \
+ { AFE440X_LED1CONVEND, 7003 }, \
+ { AFE440X_ALED1STC, 1280 }, \
+ { AFE440X_ALED1ENDC, 1598 }, \
+ { AFE440X_ADCRSTSTCT3, 7005 }, \
+ { AFE440X_ADCRSTENDCT3, 7011 }, \
+ { AFE440X_ALED1CONVST, 7012 }, \
+ { AFE440X_ALED1CONVEND, 7471 }, \
+ { AFE440X_PDNCYCLESTC, 7671 }, \
+ { AFE440X_PDNCYCLEENDC, 39199 }
+
+static const struct reg_sequence afe4404_reg_sequences[] = {
+ AFE4404_TIMING_PAIRS,
+ { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+ { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
+ { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
+ (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
+ (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
+ { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE },
+};
+
+static const struct regmap_range afe4404_yes_ranges[] = {
+ regmap_reg_range(AFE440X_LED2VAL, AFE440X_LED1_ALED1VAL),
+ regmap_reg_range(AFE4404_AVG_LED2_ALED2VAL, AFE4404_AVG_LED1_ALED1VAL),
+};
+
+static const struct regmap_access_table afe4404_volatile_table = {
+ .yes_ranges = afe4404_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(afe4404_yes_ranges),
+};
+
+static const struct regmap_config afe4404_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = AFE4404_AVG_LED1_ALED1VAL,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &afe4404_volatile_table,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id afe4404_of_match[] = {
+ { .compatible = "ti,afe4404", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, afe4404_of_match);
+#endif
+
+static int __maybe_unused afe4404_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE,
+ AFE440X_CONTROL2_PDN_AFE);
+ if (ret)
+ return ret;
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused afe4404_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(afe->regmap, AFE440X_CONTROL2,
+ AFE440X_CONTROL2_PDN_AFE, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend, afe4404_resume);
+
+static int afe4404_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct afe4404_data *afe;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ afe = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ afe->dev = &client->dev;
+ afe->irq = client->irq;
+
+ afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(afe->dev, "Unable to allocate register map\n");
+ return PTR_ERR(afe->regmap);
+ }
+
+ afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ if (IS_ERR(afe->regulator)) {
+ dev_err(afe->dev, "Unable to get regulator\n");
+ return PTR_ERR(afe->regulator);
+ }
+ ret = regulator_enable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
+ AFE440X_CONTROL0_SW_RESET);
+ if (ret) {
+ dev_err(afe->dev, "Unable to reset device\n");
+ goto disable_reg;
+ }
+
+ ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
+ ARRAY_SIZE(afe4404_reg_sequences));
+ if (ret) {
+ dev_err(afe->dev, "Unable to set register defaults\n");
+ goto disable_reg;
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = afe->dev;
+ indio_dev->channels = afe4404_channels;
+ indio_dev->num_channels = ARRAY_SIZE(afe4404_channels);
+ indio_dev->name = AFE4404_DRIVER_NAME;
+ indio_dev->info = &afe4404_iio_info;
+
+ if (afe->irq > 0) {
+ afe->trig = devm_iio_trigger_alloc(afe->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ indio_dev->id);
+ if (!afe->trig) {
+ dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ ret = -ENOMEM;
+ goto disable_reg;
+ }
+
+ iio_trigger_set_drvdata(afe->trig, indio_dev);
+
+ afe->trig->ops = &afe4404_trigger_ops;
+ afe->trig->dev.parent = afe->dev;
+
+ ret = iio_trigger_register(afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO trigger\n");
+ goto disable_reg;
+ }
+
+ ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ iio_trigger_generic_data_rdy_poll,
+ NULL, IRQF_ONESHOT,
+ AFE4404_DRIVER_NAME,
+ afe->trig);
+ if (ret) {
+ dev_err(afe->dev, "Unable to request IRQ\n");
+ goto disable_reg;
+ }
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ afe4404_trigger_handler, NULL);
+ if (ret) {
+ dev_err(afe->dev, "Unable to setup buffer\n");
+ goto unregister_trigger;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(afe->dev, "Unable to register IIO device\n");
+ goto unregister_triggered_buffer;
+ }
+
+ return 0;
+
+unregister_triggered_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+unregister_trigger:
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+disable_reg:
+ regulator_disable(afe->regulator);
+
+ return ret;
+}
+
+static int afe4404_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ if (afe->irq > 0)
+ iio_trigger_unregister(afe->trig);
+
+ ret = regulator_disable(afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to disable regulator\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id afe4404_ids[] = {
+ { "afe4404", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, afe4404_ids);
+
+static struct i2c_driver afe4404_i2c_driver = {
+ .driver = {
+ .name = AFE4404_DRIVER_NAME,
+ .of_match_table = of_match_ptr(afe4404_of_match),
+ .pm = &afe4404_pm_ops,
+ },
+ .probe = afe4404_probe,
+ .remove = afe4404_remove,
+ .id_table = afe4404_ids,
+};
+module_i2c_driver(afe4404_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe440x.h b/drivers/iio/health/afe440x.h
new file mode 100644
index 000000000000..c671ab78a23a
--- /dev/null
+++ b/drivers/iio/health/afe440x.h
@@ -0,0 +1,191 @@
+/*
+ * AFE440X Heart Rate Monitors and Low-Cost Pulse Oximeters
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _AFE440X_H
+#define _AFE440X_H
+
+/* AFE440X registers */
+#define AFE440X_CONTROL0 0x00
+#define AFE440X_LED2STC 0x01
+#define AFE440X_LED2ENDC 0x02
+#define AFE440X_LED1LEDSTC 0x03
+#define AFE440X_LED1LEDENDC 0x04
+#define AFE440X_ALED2STC 0x05
+#define AFE440X_ALED2ENDC 0x06
+#define AFE440X_LED1STC 0x07
+#define AFE440X_LED1ENDC 0x08
+#define AFE440X_LED2LEDSTC 0x09
+#define AFE440X_LED2LEDENDC 0x0a
+#define AFE440X_ALED1STC 0x0b
+#define AFE440X_ALED1ENDC 0x0c
+#define AFE440X_LED2CONVST 0x0d
+#define AFE440X_LED2CONVEND 0x0e
+#define AFE440X_ALED2CONVST 0x0f
+#define AFE440X_ALED2CONVEND 0x10
+#define AFE440X_LED1CONVST 0x11
+#define AFE440X_LED1CONVEND 0x12
+#define AFE440X_ALED1CONVST 0x13
+#define AFE440X_ALED1CONVEND 0x14
+#define AFE440X_ADCRSTSTCT0 0x15
+#define AFE440X_ADCRSTENDCT0 0x16
+#define AFE440X_ADCRSTSTCT1 0x17
+#define AFE440X_ADCRSTENDCT1 0x18
+#define AFE440X_ADCRSTSTCT2 0x19
+#define AFE440X_ADCRSTENDCT2 0x1a
+#define AFE440X_ADCRSTSTCT3 0x1b
+#define AFE440X_ADCRSTENDCT3 0x1c
+#define AFE440X_PRPCOUNT 0x1d
+#define AFE440X_CONTROL1 0x1e
+#define AFE440X_LEDCNTRL 0x22
+#define AFE440X_CONTROL2 0x23
+#define AFE440X_ALARM 0x29
+#define AFE440X_LED2VAL 0x2a
+#define AFE440X_ALED2VAL 0x2b
+#define AFE440X_LED1VAL 0x2c
+#define AFE440X_ALED1VAL 0x2d
+#define AFE440X_LED2_ALED2VAL 0x2e
+#define AFE440X_LED1_ALED1VAL 0x2f
+#define AFE440X_CONTROL3 0x31
+#define AFE440X_PDNCYCLESTC 0x32
+#define AFE440X_PDNCYCLEENDC 0x33
+
+/* CONTROL0 register fields */
+#define AFE440X_CONTROL0_REG_READ BIT(0)
+#define AFE440X_CONTROL0_TM_COUNT_RST BIT(1)
+#define AFE440X_CONTROL0_SW_RESET BIT(3)
+
+/* CONTROL1 register fields */
+#define AFE440X_CONTROL1_TIMEREN BIT(8)
+
+/* TIAGAIN register fields */
+#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15)
+#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT 15
+
+/* CONTROL2 register fields */
+#define AFE440X_CONTROL2_PDN_AFE BIT(0)
+#define AFE440X_CONTROL2_PDN_RX BIT(1)
+#define AFE440X_CONTROL2_DYNAMIC4 BIT(3)
+#define AFE440X_CONTROL2_DYNAMIC3 BIT(4)
+#define AFE440X_CONTROL2_DYNAMIC2 BIT(14)
+#define AFE440X_CONTROL2_DYNAMIC1 BIT(20)
+
+/* CONTROL3 register fields */
+#define AFE440X_CONTROL3_CLKDIV GENMASK(2, 0)
+
+/* CONTROL0 values */
+#define AFE440X_CONTROL0_WRITE 0x0
+#define AFE440X_CONTROL0_READ 0x1
+
+struct afe440x_reg_info {
+ unsigned int reg;
+ unsigned int offreg;
+ unsigned int shift;
+ unsigned int mask;
+};
+
+#define AFE440X_REG_INFO(_reg, _offreg, _sm) \
+ { \
+ .reg = _reg, \
+ .offreg = _offreg, \
+ .shift = _sm ## _SHIFT, \
+ .mask = _sm ## _MASK, \
+ }
+
+#define AFE440X_INTENSITY_CHAN(_index, _name, _mask) \
+ { \
+ .type = IIO_INTENSITY, \
+ .channel = _index, \
+ .address = _index, \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 24, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+ .extend_name = _name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ _mask, \
+ }
+
+#define AFE440X_CURRENT_CHAN(_index, _name) \
+ { \
+ .type = IIO_CURRENT, \
+ .channel = _index, \
+ .address = _index, \
+ .scan_index = _index, \
+ .extend_name = _name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .output = true, \
+ }
+
+enum afe440x_reg_type {
+ SIMPLE,
+ RESISTANCE,
+ CAPACITANCE,
+};
+
+struct afe440x_val_table {
+ int integer;
+ int fract;
+};
+
+#define AFE440X_TABLE_ATTR(_name, _table) \
+static ssize_t _name ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ ssize_t len = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(_table); i++) \
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06u ", \
+ _table[i].integer, \
+ _table[i].fract); \
+ \
+ buf[len - 1] = '\n'; \
+ \
+ return len; \
+} \
+static DEVICE_ATTR_RO(_name)
+
+struct afe440x_attr {
+ struct device_attribute dev_attr;
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+ enum afe440x_reg_type type;
+ const struct afe440x_val_table *val_table;
+ unsigned int table_size;
+};
+
+#define to_afe440x_attr(_dev_attr) \
+ container_of(_dev_attr, struct afe440x_attr, dev_attr)
+
+#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size) \
+ struct afe440x_attr afe440x_attr_##_name = { \
+ .dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR), \
+ afe440x_show_register, \
+ afe440x_store_register), \
+ .reg = _reg, \
+ .shift = _field ## _SHIFT, \
+ .mask = _field ## _MASK, \
+ .type = _type, \
+ .val_table = _table, \
+ .table_size = _size, \
+ }
+
+#endif /* _AFE440X_H */
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 9d1c81f91dd7..90ab8a2d2846 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -13,7 +13,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * TODO: allow LED current and pulse length controls via device tree properties
+ * TODO: enable pulse length controls via device tree properties
*/
#include <linux/module.h>
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -65,6 +66,7 @@
#define MAX30100_REG_SPO2_CONFIG_1600US 0x3
#define MAX30100_REG_LED_CONFIG 0x09
+#define MAX30100_REG_LED_CONFIG_LED_MASK 0x0f
#define MAX30100_REG_LED_CONFIG_RED_LED_SHIFT 4
#define MAX30100_REG_LED_CONFIG_24MA 0x07
@@ -111,6 +113,12 @@ static const struct regmap_config max30100_regmap_config = {
.volatile_reg = max30100_is_volatile_reg,
};
+static const unsigned int max30100_led_current_mapping[] = {
+ 4400, 7600, 11000, 14200, 17400,
+ 20800, 24000, 27100, 30600, 33800,
+ 37000, 40200, 43600, 46800, 50000
+};
+
static const unsigned long max30100_scan_masks[] = {0x3, 0};
static const struct iio_chan_spec max30100_channels[] = {
@@ -230,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
- while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) {
+ while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
ret = max30100_read_measurement(data);
if (ret)
break;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
mutex_unlock(&data->lock);
@@ -243,15 +252,76 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static int max30100_get_current_idx(unsigned int val, int *reg)
+{
+ int idx;
+
+ /* LED turned off */
+ if (val == 0) {
+ *reg = 0;
+ return 0;
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(max30100_led_current_mapping); idx++) {
+ if (max30100_led_current_mapping[idx] == val) {
+ *reg = idx + 1;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max30100_led_init(struct max30100_data *data)
+{
+ struct device *dev = &data->client->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int val[2];
+ int reg, ret;
+
+ ret = of_property_read_u32_array(np, "maxim,led-current-microamp",
+ (unsigned int *) &val, 2);
+ if (ret) {
+ /* Default to 24 mA RED LED, 50 mA IR LED */
+ reg = (MAX30100_REG_LED_CONFIG_24MA <<
+ MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
+ MAX30100_REG_LED_CONFIG_50MA;
+ dev_warn(dev, "no led-current-microamp set");
+
+ return regmap_write(data->regmap, MAX30100_REG_LED_CONFIG, reg);
+ }
+
+ /* RED LED current */
+ ret = max30100_get_current_idx(val[0], &reg);
+ if (ret) {
+ dev_err(dev, "invalid RED current setting %d", val[0]);
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+ MAX30100_REG_LED_CONFIG_LED_MASK <<
+ MAX30100_REG_LED_CONFIG_RED_LED_SHIFT,
+ reg << MAX30100_REG_LED_CONFIG_RED_LED_SHIFT);
+ if (ret)
+ return ret;
+
+ /* IR LED current */
+ ret = max30100_get_current_idx(val[1], &reg);
+ if (ret) {
+ dev_err(dev, "invalid IR current setting %d", val[1]);
+ return ret;
+ }
+
+ return regmap_update_bits(data->regmap, MAX30100_REG_LED_CONFIG,
+ MAX30100_REG_LED_CONFIG_LED_MASK, reg);
+}
+
static int max30100_chip_init(struct max30100_data *data)
{
int ret;
- /* RED IR LED = 24mA, IR LED = 50mA */
- ret = regmap_write(data->regmap, MAX30100_REG_LED_CONFIG,
- (MAX30100_REG_LED_CONFIG_24MA <<
- MAX30100_REG_LED_CONFIG_RED_LED_SHIFT) |
- MAX30100_REG_LED_CONFIG_50MA);
+ /* setup LED current settings */
+ ret = max30100_led_init(data);
if (ret)
return ret;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6a23698d347c..866dda133336 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -43,14 +43,16 @@ config SI7005
humidity and temperature sensor.
To compile this driver as a module, choose M here: the module
- will be called si7005.
+ will be called si7005. This driver also
+ supports Hoperf TH02 Humidity and Temperature Sensor.
config SI7020
tristate "Si7013/20/21 Relative Humidity and Temperature Sensors"
depends on I2C
help
Say yes here to build support for the Silicon Labs Si7013/20/21
- Relative Humidity and Temperature Sensors.
+ Relative Humidity and Temperature Sensors. This driver also
+ supports Hoperf TH06 Humidity and Temperature Sensor.
To compile this driver as a module, choose M here: the module
will be called si7020.
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index cfc5a051ab9f..20b500da94db 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -50,12 +50,32 @@
#define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \
DHT11_EDGES_PREAMBLE + 1)
-/* Data transmission timing (nano seconds) */
+/*
+ * Data transmission timing:
+ * Data bits are encoded as pulse length (high time) on the data line.
+ * 0-bit: 22-30uS -- typically 26uS (AM2302)
+ * 1-bit: 68-75uS -- typically 70uS (AM2302)
+ * The acutal timings also depend on the properties of the cable, with
+ * longer cables typically making pulses shorter.
+ *
+ * Our decoding depends on the time resolution of the system:
+ * timeres > 34uS ... don't know what a 1-tick pulse is
+ * 34uS > timeres > 30uS ... no problem (30kHz and 32kHz clocks)
+ * 30uS > timeres > 23uS ... don't know what a 2-tick pulse is
+ * timeres < 23uS ... no problem
+ *
+ * Luckily clocks in the 33-44kHz range are quite uncommon, so we can
+ * support most systems if the threshold for decoding a pulse as 1-bit
+ * is chosen carefully. If somebody really wants to support clocks around
+ * 40kHz, where this driver is most unreliable, there are two options.
+ * a) select an implementation using busy loop polling on those systems
+ * b) use the checksum to do some probabilistic decoding
+ */
#define DHT11_START_TRANSMISSION 18 /* ms */
-#define DHT11_SENSOR_RESPONSE 80000
-#define DHT11_START_BIT 50000
-#define DHT11_DATA_BIT_LOW 27000
-#define DHT11_DATA_BIT_HIGH 70000
+#define DHT11_MIN_TIMERES 34000 /* ns */
+#define DHT11_THRESHOLD 49000 /* ns */
+#define DHT11_AMBIG_LOW 23000 /* ns */
+#define DHT11_AMBIG_HIGH 30000 /* ns */
struct dht11 {
struct device *dev;
@@ -76,43 +96,39 @@ struct dht11 {
struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
};
-static unsigned char dht11_decode_byte(int *timing, int threshold)
+static unsigned char dht11_decode_byte(char *bits)
{
unsigned char ret = 0;
int i;
for (i = 0; i < 8; ++i) {
ret <<= 1;
- if (timing[i] >= threshold)
+ if (bits[i])
++ret;
}
return ret;
}
-static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
+static int dht11_decode(struct dht11 *dht11, int offset)
{
- int i, t, timing[DHT11_BITS_PER_READ], threshold;
+ int i, t;
+ char bits[DHT11_BITS_PER_READ];
unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
- threshold = DHT11_DATA_BIT_HIGH / timeres;
- if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold)
- pr_err("dht11: WARNING: decoding ambiguous\n");
-
- /* scale down with timeres and check validity */
for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
t = dht11->edges[offset + 2 * i + 2].ts -
dht11->edges[offset + 2 * i + 1].ts;
if (!dht11->edges[offset + 2 * i + 1].value)
return -EIO; /* lost synchronisation */
- timing[i] = t / timeres;
+ bits[i] = t > DHT11_THRESHOLD;
}
- hum_int = dht11_decode_byte(timing, threshold);
- hum_dec = dht11_decode_byte(&timing[8], threshold);
- temp_int = dht11_decode_byte(&timing[16], threshold);
- temp_dec = dht11_decode_byte(&timing[24], threshold);
- checksum = dht11_decode_byte(&timing[32], threshold);
+ hum_int = dht11_decode_byte(bits);
+ hum_dec = dht11_decode_byte(&bits[8]);
+ temp_int = dht11_decode_byte(&bits[16]);
+ temp_dec = dht11_decode_byte(&bits[24]);
+ checksum = dht11_decode_byte(&bits[32]);
if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
return -EIO;
@@ -161,12 +177,12 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
int *val, int *val2, long m)
{
struct dht11 *dht11 = iio_priv(iio_dev);
- int ret, timeres;
+ int ret, timeres, offset;
mutex_lock(&dht11->lock);
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
timeres = ktime_get_resolution_ns();
- if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
+ if (timeres > DHT11_MIN_TIMERES) {
dev_err(dht11->dev, "timeresolution %dns too low\n",
timeres);
/* In theory a better clock could become available
@@ -176,6 +192,10 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
ret = -EAGAIN;
goto err;
}
+ if (timeres > DHT11_AMBIG_LOW && timeres < DHT11_AMBIG_HIGH)
+ dev_warn(dht11->dev,
+ "timeresolution: %dns - decoding ambiguous\n",
+ timeres);
reinit_completion(&dht11->completion);
@@ -208,11 +228,14 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
if (ret < 0)
goto err;
- ret = dht11_decode(dht11,
- dht11->num_edges == DHT11_EDGES_PER_READ ?
- DHT11_EDGES_PREAMBLE :
- DHT11_EDGES_PREAMBLE - 2,
- timeres);
+ offset = DHT11_EDGES_PREAMBLE +
+ dht11->num_edges - DHT11_EDGES_PER_READ;
+ for (; offset >= 0; --offset) {
+ ret = dht11_decode(dht11, offset);
+ if (!ret)
+ break;
+ }
+
if (ret)
goto err;
}
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index a7f61e881a49..fa4767613173 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -274,7 +274,7 @@ static int hdc100x_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/humidity/htu21.c b/drivers/iio/humidity/htu21.c
index d1636a74980e..11cbc38b450f 100644
--- a/drivers/iio/humidity/htu21.c
+++ b/drivers/iio/humidity/htu21.c
@@ -192,7 +192,7 @@ static int htu21_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index 91972ccd8aaf..6297766e93d0 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -135,7 +135,7 @@ static int si7005_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
@@ -170,6 +170,7 @@ static int si7005_probe(struct i2c_client *client,
static const struct i2c_device_id si7005_id[] = {
{ "si7005", 0 },
+ { "th02", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7005_id);
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index 71991b5c0658..ffc2ccf6374e 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -121,7 +121,7 @@ static int si7020_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_READ_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
/* Reset device, loads default settings. */
ret = i2c_smbus_write_byte(client, SI7020CMD_RESET);
@@ -149,6 +149,7 @@ static int si7020_probe(struct i2c_client *client,
static const struct i2c_device_id si7020_id[] = {
{ "si7020", 0 },
+ { "th06", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, si7020_id);
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 8f8d1370ed8b..847455a2d6bb 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -3,15 +3,30 @@
#
config INV_MPU6050_IIO
- tristate "Invensense MPU6050 devices"
- depends on I2C && SYSFS
- depends on I2C_MUX
+ tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+
+config INV_MPU6050_I2C
+ tristate "Invensense MPU6050 devices (I2C)"
+ depends on I2C_MUX
+ select INV_MPU6050_IIO
+ select REGMAP_I2C
help
This driver supports the Invensense MPU6050 devices.
This driver can also support MPU6500 in MPU6050 compatibility mode
and also in MPU6500 mode with some limitations.
It is a gyroscope/accelerometer combo device.
This driver can be built as a module. The module will be called
- inv-mpu6050.
+ inv-mpu6050-i2c.
+
+config INV_MPU6050_SPI
+ tristate "Invensense MPU6050 devices (SPI)"
+ depends on SPI_MASTER
+ select INV_MPU6050_IIO
+ select REGMAP_SPI
+ help
+ This driver supports the Invensense MPU6050 devices.
+ It is a gyroscope/accelerometer combo device.
+ This driver can be built as a module. The module will be called
+ inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/Makefile b/drivers/iio/imu/inv_mpu6050/Makefile
index f566f6a7b3a9..734af5e6cef9 100644
--- a/drivers/iio/imu/inv_mpu6050/Makefile
+++ b/drivers/iio/imu/inv_mpu6050/Makefile
@@ -3,4 +3,10 @@
#
obj-$(CONFIG_INV_MPU6050_IIO) += inv-mpu6050.o
-inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o inv_mpu_acpi.o
+inv-mpu6050-objs := inv_mpu_core.o inv_mpu_ring.o inv_mpu_trigger.o
+
+obj-$(CONFIG_INV_MPU6050_I2C) += inv-mpu6050-i2c.o
+inv-mpu6050-i2c-objs := inv_mpu_i2c.o inv_mpu_acpi.o
+
+obj-$(CONFIG_INV_MPU6050_SPI) += inv-mpu6050-spi.o
+inv-mpu6050-spi-objs := inv_mpu_spi.o
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 1c982a56acd5..2771106fd650 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -66,11 +66,11 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
union acpi_object *elem;
int j;
- elem = &(cpm->package.elements[i]);
+ elem = &cpm->package.elements[i];
for (j = 0; j < elem->package.count; ++j) {
union acpi_object *sub_elem;
- sub_elem = &(elem->package.elements[j]);
+ sub_elem = &elem->package.elements[j];
if (sub_elem->type == ACPI_TYPE_STRING)
strlcpy(info->type, sub_elem->string.pointer,
sizeof(info->type));
@@ -139,22 +139,23 @@ static int inv_mpu_process_acpi_config(struct i2c_client *client,
return 0;
}
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
st->mux_client = NULL;
- if (ACPI_HANDLE(&st->client->dev)) {
+ if (ACPI_HANDLE(&client->dev)) {
struct i2c_board_info info;
struct acpi_device *adev;
int ret = -1;
- adev = ACPI_COMPANION(&st->client->dev);
+ adev = ACPI_COMPANION(&client->dev);
memset(&info, 0, sizeof(info));
dmi_check_system(inv_mpu_dev_list);
switch (matched_product_name) {
case INV_MPU_ASUS_T100TA:
- ret = asus_acpi_get_sensor_info(adev, st->client,
+ ret = asus_acpi_get_sensor_info(adev, client,
&info);
break;
/* Add more matched product processing here */
@@ -166,7 +167,7 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
/* No matching DMI, so create device on INV6XX type */
unsigned short primary, secondary;
- ret = inv_mpu_process_acpi_config(st->client, &primary,
+ ret = inv_mpu_process_acpi_config(client, &primary,
&secondary);
if (!ret && secondary) {
char *name;
@@ -185,14 +186,15 @@ int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
st->mux_client = i2c_new_device(st->mux_adapter, &info);
if (!st->mux_client)
return -ENODEV;
-
}
return 0;
}
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
{
+ struct inv_mpu6050_state *st = iio_priv(dev_get_drvdata(&client->dev));
+
if (st->mux_client)
i2c_unregister_device(st->mux_client);
}
@@ -200,12 +202,12 @@ void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
#include "inv_mpu_iio.h"
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st)
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
{
return 0;
}
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st)
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client)
{
}
#endif
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f0e06093b5e8..d192953e9a38 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -39,6 +39,26 @@ static const int gyro_scale_6050[] = {133090, 266181, 532362, 1064724};
*/
static const int accel_scale[] = {598, 1196, 2392, 4785};
+static const struct inv_mpu6050_reg_map reg_set_6500 = {
+ .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
+ .lpf = INV_MPU6050_REG_CONFIG,
+ .user_ctrl = INV_MPU6050_REG_USER_CTRL,
+ .fifo_en = INV_MPU6050_REG_FIFO_EN,
+ .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
+ .accl_config = INV_MPU6050_REG_ACCEL_CONFIG,
+ .fifo_count_h = INV_MPU6050_REG_FIFO_COUNT_H,
+ .fifo_r_w = INV_MPU6050_REG_FIFO_R_W,
+ .raw_gyro = INV_MPU6050_REG_RAW_GYRO,
+ .raw_accl = INV_MPU6050_REG_RAW_ACCEL,
+ .temperature = INV_MPU6050_REG_TEMPERATURE,
+ .int_enable = INV_MPU6050_REG_INT_ENABLE,
+ .pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
+ .pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
+ .int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .accl_offset = INV_MPU6500_REG_ACCEL_OFFSET,
+ .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
+};
+
static const struct inv_mpu6050_reg_map reg_set_6050 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG,
@@ -55,6 +75,8 @@ static const struct inv_mpu6050_reg_map reg_set_6050 = {
.pwr_mgmt_1 = INV_MPU6050_REG_PWR_MGMT_1,
.pwr_mgmt_2 = INV_MPU6050_REG_PWR_MGMT_2,
.int_pin_cfg = INV_MPU6050_REG_INT_PIN_CFG,
+ .accl_offset = INV_MPU6050_REG_ACCEL_OFFSET,
+ .gyro_offset = INV_MPU6050_REG_GYRO_OFFSET,
};
static const struct inv_mpu6050_chip_config chip_config_6050 = {
@@ -66,7 +88,13 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.accl_fs = INV_MPU6050_FS_02G,
};
-static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
+static const struct inv_mpu6050_hw hw_info[] = {
+ {
+ .num_reg = 117,
+ .name = "MPU6500",
+ .reg = &reg_set_6500,
+ .config = &chip_config_6050,
+ },
{
.num_reg = 117,
.name = "MPU6050",
@@ -75,134 +103,53 @@ static const struct inv_mpu6050_hw hw_info[INV_NUM_PARTS] = {
},
};
-int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 d)
-{
- return i2c_smbus_write_i2c_block_data(st->client, reg, 1, &d);
-}
-
-/*
- * The i2c read/write needs to happen in unlocked mode. As the parent
- * adapter is common. If we use locked versions, it will fail as
- * the mux adapter will lock the parent i2c adapter, while calling
- * select/deselect functions.
- */
-static int inv_mpu6050_write_reg_unlocked(struct inv_mpu6050_state *st,
- u8 reg, u8 d)
-{
- int ret;
- u8 buf[2];
- struct i2c_msg msg[1] = {
- {
- .addr = st->client->addr,
- .flags = 0,
- .len = sizeof(buf),
- .buf = buf,
- }
- };
-
- buf[0] = reg;
- buf[1] = d;
- ret = __i2c_transfer(st->client->adapter, msg, 1);
- if (ret != 1)
- return ret;
-
- return 0;
-}
-
-static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
- u32 chan_id)
-{
- struct iio_dev *indio_dev = mux_priv;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
- int ret = 0;
-
- /* Use the same mutex which was used everywhere to protect power-op */
- mutex_lock(&indio_dev->mlock);
- if (!st->powerup_count) {
- ret = inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
- 0);
- if (ret)
- goto write_error;
-
- msleep(INV_MPU6050_REG_UP_TIME);
- }
- if (!ret) {
- st->powerup_count++;
- ret = inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
- st->client->irq |
- INV_MPU6050_BIT_BYPASS_EN);
- }
-write_error:
- mutex_unlock(&indio_dev->mlock);
-
- return ret;
-}
-
-static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
- void *mux_priv, u32 chan_id)
-{
- struct iio_dev *indio_dev = mux_priv;
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
-
- mutex_lock(&indio_dev->mlock);
- /* It doesn't really mattter, if any of the calls fails */
- inv_mpu6050_write_reg_unlocked(st, st->reg->int_pin_cfg,
- st->client->irq);
- st->powerup_count--;
- if (!st->powerup_count)
- inv_mpu6050_write_reg_unlocked(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
- mutex_unlock(&indio_dev->mlock);
-
- return 0;
-}
-
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
{
- u8 d, mgmt_1;
+ unsigned int d, mgmt_1;
int result;
-
- /* switch clock needs to be careful. Only when gyro is on, can
- clock source be switched to gyro. Otherwise, it must be set to
- internal clock */
- if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->pwr_mgmt_1, 1, &mgmt_1);
- if (result != 1)
+ /*
+ * switch clock needs to be careful. Only when gyro is on, can
+ * clock source be switched to gyro. Otherwise, it must be set to
+ * internal clock
+ */
+ if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
+ result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1);
+ if (result)
return result;
mgmt_1 &= ~INV_MPU6050_BIT_CLK_MASK;
}
- if ((INV_MPU6050_BIT_PWR_GYRO_STBY == mask) && (!en)) {
- /* turning off gyro requires switch to internal clock first.
- Then turn off gyro engine */
+ if ((mask == INV_MPU6050_BIT_PWR_GYRO_STBY) && (!en)) {
+ /*
+ * turning off gyro requires switch to internal clock first.
+ * Then turn off gyro engine
+ */
mgmt_1 |= INV_CLK_INTERNAL;
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1, mgmt_1);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1);
if (result)
return result;
}
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->pwr_mgmt_2, 1, &d);
- if (result != 1)
+ result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d);
+ if (result)
return result;
if (en)
d &= ~mask;
else
d |= mask;
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_2, d);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_2, d);
if (result)
return result;
if (en) {
/* Wait for output stabilize */
msleep(INV_MPU6050_TEMP_UP_TIME);
- if (INV_MPU6050_BIT_PWR_GYRO_STBY == mask) {
+ if (mask == INV_MPU6050_BIT_PWR_GYRO_STBY) {
/* switch internal clock to PLL */
mgmt_1 |= INV_CLK_PLL;
- result = inv_mpu6050_write_reg(st,
- st->reg->pwr_mgmt_1, mgmt_1);
+ result = regmap_write(st->map,
+ st->reg->pwr_mgmt_1, mgmt_1);
if (result)
return result;
}
@@ -218,25 +165,26 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
if (power_on) {
/* Already under indio-dev->mlock mutex */
if (!st->powerup_count)
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- 0);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
if (!result)
st->powerup_count++;
} else {
st->powerup_count--;
if (!st->powerup_count)
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
}
if (result)
return result;
if (power_on)
- msleep(INV_MPU6050_REG_UP_TIME);
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
return 0;
}
+EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
@@ -257,59 +205,73 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
return result;
d = (INV_MPU6050_FSR_2000DPS << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
+ result = regmap_write(st->map, st->reg->gyro_config, d);
if (result)
return result;
d = INV_MPU6050_FILTER_20HZ;
- result = inv_mpu6050_write_reg(st, st->reg->lpf, d);
+ result = regmap_write(st->map, st->reg->lpf, d);
if (result)
return result;
d = INV_MPU6050_ONE_K_HZ / INV_MPU6050_INIT_FIFO_RATE - 1;
- result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
return result;
d = (INV_MPU6050_FS_02G << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
+ result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
return result;
memcpy(&st->chip_config, hw_info[st->chip_type].config,
- sizeof(struct inv_mpu6050_chip_config));
+ sizeof(struct inv_mpu6050_chip_config));
result = inv_mpu6050_set_power_itg(st, false);
return result;
}
+static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg,
+ int axis, int val)
+{
+ int ind, result;
+ __be16 d = cpu_to_be16(val);
+
+ ind = (axis - IIO_MOD_X) * 2;
+ result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2);
+ if (result)
+ return -EINVAL;
+
+ return 0;
+}
+
static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg,
- int axis, int *val)
+ int axis, int *val)
{
int ind, result;
__be16 d;
ind = (axis - IIO_MOD_X) * 2;
- result = i2c_smbus_read_i2c_block_data(st->client, reg + ind, 2,
- (u8 *)&d);
- if (result != 2)
+ result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2);
+ if (result)
return -EINVAL;
*val = (short)be16_to_cpup(&d);
return IIO_VAL_INT;
}
-static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long mask) {
+static int
+inv_mpu6050_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
{
- int ret, result;
+ int result;
ret = IIO_VAL_INT;
result = 0;
@@ -323,16 +285,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ANGL_VEL:
if (!st->chip_config.gyro_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
goto error_read_raw;
}
- ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
- chan->channel2, val);
+ ret = inv_mpu6050_sensor_show(st, st->reg->raw_gyro,
+ chan->channel2, val);
if (!st->chip_config.gyro_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
@@ -341,16 +303,16 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
break;
case IIO_ACCEL:
if (!st->chip_config.accl_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, true,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
goto error_read_raw;
}
ret = inv_mpu6050_sensor_show(st, st->reg->raw_accl,
- chan->channel2, val);
+ chan->channel2, val);
if (!st->chip_config.accl_fifo_enable ||
- !st->chip_config.enable) {
+ !st->chip_config.enable) {
result = inv_mpu6050_switch_engine(st, false,
INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
@@ -360,8 +322,8 @@ static int inv_mpu6050_read_raw(struct iio_dev *indio_dev,
case IIO_TEMP:
/* wait for stablization */
msleep(INV_MPU6050_SENSOR_UP_TIME);
- inv_mpu6050_sensor_show(st, st->reg->temperature,
- IIO_MOD_X, val);
+ ret = inv_mpu6050_sensor_show(st, st->reg->temperature,
+ IIO_MOD_X, val);
break;
default:
ret = -EINVAL;
@@ -405,6 +367,20 @@ error_read_raw:
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
+ chan->channel2, val);
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
+ chan->channel2, val);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -418,8 +394,7 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
if (gyro_scale_6050[i] == val) {
d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st,
- st->reg->gyro_config, d);
+ result = regmap_write(st->map, st->reg->gyro_config, d);
if (result)
return result;
@@ -448,6 +423,7 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
return -EINVAL;
}
+
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result, i;
@@ -456,8 +432,7 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
if (accel_scale[i] == val) {
d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
- result = inv_mpu6050_write_reg(st,
- st->reg->accl_config, d);
+ result = regmap_write(st->map, st->reg->accl_config, d);
if (result)
return result;
@@ -470,16 +445,17 @@ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
}
static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask) {
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int result;
mutex_lock(&indio_dev->mlock);
- /* we should only update scale when the chip is disabled, i.e.,
- not running */
+ /*
+ * we should only update scale when the chip is disabled, i.e.
+ * not running
+ */
if (st->chip_config.enable) {
result = -EBUSY;
goto error_write_raw;
@@ -502,6 +478,21 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
}
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ result = inv_mpu6050_sensor_set(st,
+ st->reg->gyro_offset,
+ chan->channel2, val);
+ break;
+ case IIO_ACCEL:
+ result = inv_mpu6050_sensor_set(st,
+ st->reg->accl_offset,
+ chan->channel2, val);
+ break;
+ default:
+ result = -EINVAL;
+ }
default:
result = -EINVAL;
break;
@@ -537,7 +528,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++;
data = d[i];
- result = inv_mpu6050_write_reg(st, st->reg->lpf, data);
+ result = regmap_write(st->map, st->reg->lpf, data);
if (result)
return result;
st->chip_config.lpf = data;
@@ -548,8 +539,9 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
/**
* inv_mpu6050_fifo_rate_store() - Set fifo rate.
*/
-static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t
+inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
s32 fifo_rate;
u8 d;
@@ -560,7 +552,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
if (kstrtoint(buf, 10, &fifo_rate))
return -EINVAL;
if (fifo_rate < INV_MPU6050_MIN_FIFO_RATE ||
- fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
+ fifo_rate > INV_MPU6050_MAX_FIFO_RATE)
return -EINVAL;
if (fifo_rate == st->chip_config.fifo_rate)
return count;
@@ -575,7 +567,7 @@ static ssize_t inv_mpu6050_fifo_rate_store(struct device *dev,
goto fifo_rate_fail;
d = INV_MPU6050_ONE_K_HZ / fifo_rate - 1;
- result = inv_mpu6050_write_reg(st, st->reg->sample_rate_div, d);
+ result = regmap_write(st->map, st->reg->sample_rate_div, d);
if (result)
goto fifo_rate_fail;
st->chip_config.fifo_rate = fifo_rate;
@@ -596,8 +588,9 @@ fifo_rate_fail:
/**
* inv_fifo_rate_show() - Get the current sampling rate.
*/
-static ssize_t inv_fifo_rate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t
+inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
@@ -608,16 +601,18 @@ static ssize_t inv_fifo_rate_show(struct device *dev,
* inv_attr_show() - calling this function will show current
* parameters.
*/
-static ssize_t inv_attr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct inv_mpu6050_state *st = iio_priv(dev_to_iio_dev(dev));
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
s8 *m;
switch (this_attr->address) {
- /* In MPU6050, the two matrix are the same because gyro and accel
- are integrated in one chip */
+ /*
+ * In MPU6050, the two matrix are the same because gyro and accel
+ * are integrated in one chip
+ */
case ATTR_GYRO_MATRIX:
case ATTR_ACCL_MATRIX:
m = st->plat_data.orientation;
@@ -654,14 +649,15 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
.type = _type, \
.modified = 1, \
.channel2 = _channel2, \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
.scan_index = _index, \
.scan_type = { \
.sign = 's', \
.realbits = 16, \
.storagebits = 16, \
- .shift = 0 , \
+ .shift = 0, \
.endianness = IIO_BE, \
}, \
}
@@ -674,7 +670,7 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
*/
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
| BIT(IIO_CHAN_INFO_OFFSET)
| BIT(IIO_CHAN_INFO_SCALE),
.scan_index = -1,
@@ -727,25 +723,25 @@ static const struct iio_info mpu_info = {
/**
* inv_check_and_setup_chip() - check and setup chip.
*/
-static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
- const struct i2c_device_id *id)
+static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
{
int result;
- st->chip_type = INV_MPU6050;
st->hw = &hw_info[st->chip_type];
st->reg = hw_info[st->chip_type].reg;
/* reset to make sure previous state are not there */
- result = inv_mpu6050_write_reg(st, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_H_RESET);
+ result = regmap_write(st->map, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_H_RESET);
if (result)
return result;
msleep(INV_MPU6050_POWER_UP_TIME);
- /* toggle power state. After reset, the sleep bit could be on
- or off depending on the OTP settings. Toggling power would
- make it in a definite state as well as making the hardware
- state align with the software state */
+ /*
+ * toggle power state. After reset, the sleep bit could be on
+ * or off depending on the OTP settings. Toggling power would
+ * make it in a definite state as well as making the hardware
+ * state align with the software state
+ */
result = inv_mpu6050_set_power_itg(st, false);
if (result)
return result;
@@ -754,65 +750,59 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st,
return result;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_ACCL_STBY);
+ INV_MPU6050_BIT_PWR_ACCL_STBY);
if (result)
return result;
result = inv_mpu6050_switch_engine(st, false,
- INV_MPU6050_BIT_PWR_GYRO_STBY);
+ INV_MPU6050_BIT_PWR_GYRO_STBY);
if (result)
return result;
return 0;
}
-/**
- * inv_mpu_probe() - probe function.
- * @client: i2c client.
- * @id: i2c device id.
- *
- * Returns 0 on success, a negative error code otherwise.
- */
-static int inv_mpu_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type)
{
struct inv_mpu6050_state *st;
struct iio_dev *indio_dev;
struct inv_mpu6050_platform_data *pdata;
+ struct device *dev = regmap_get_device(regmap);
int result;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENOSYS;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->client = client;
+ st->chip_type = chip_type;
st->powerup_count = 0;
- pdata = dev_get_platdata(&client->dev);
+ st->irq = irq;
+ st->map = regmap;
+ pdata = dev_get_platdata(dev);
if (pdata)
st->plat_data = *pdata;
/* power is turned on inside check chip type*/
- result = inv_check_and_setup_chip(st, id);
+ result = inv_check_and_setup_chip(st);
if (result)
return result;
+ if (inv_mpu_bus_setup)
+ inv_mpu_bus_setup(indio_dev);
+
result = inv_mpu6050_init_config(indio_dev);
if (result) {
- dev_err(&client->dev,
- "Could not initialize device.\n");
+ dev_err(dev, "Could not initialize device.\n");
return result;
}
- i2c_set_clientdata(client, indio_dev);
- indio_dev->dev.parent = &client->dev;
- /* id will be NULL when enumerated via ACPI */
- if (id)
- indio_dev->name = (char *)id->name;
+ dev_set_drvdata(dev, indio_dev);
+ indio_dev->dev.parent = dev;
+ /* name will be NULL when enumerated via ACPI */
+ if (name)
+ indio_dev->name = name;
else
- indio_dev->name = (char *)dev_name(&client->dev);
+ indio_dev->name = dev_name(dev);
indio_dev->channels = inv_mpu_channels;
indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
@@ -824,13 +814,12 @@ static int inv_mpu_probe(struct i2c_client *client,
inv_mpu6050_read_fifo,
NULL);
if (result) {
- dev_err(&st->client->dev, "configure buffer fail %d\n",
- result);
+ dev_err(dev, "configure buffer fail %d\n", result);
return result;
}
result = inv_mpu6050_probe_trigger(indio_dev);
if (result) {
- dev_err(&st->client->dev, "trigger probe fail %d\n", result);
+ dev_err(dev, "trigger probe fail %d\n", result);
goto out_unreg_ring;
}
@@ -838,102 +827,47 @@ static int inv_mpu_probe(struct i2c_client *client,
spin_lock_init(&st->time_stamp_lock);
result = iio_device_register(indio_dev);
if (result) {
- dev_err(&st->client->dev, "IIO register fail %d\n", result);
+ dev_err(dev, "IIO register fail %d\n", result);
goto out_remove_trigger;
}
- st->mux_adapter = i2c_add_mux_adapter(client->adapter,
- &client->dev,
- indio_dev,
- 0, 0, 0,
- inv_mpu6050_select_bypass,
- inv_mpu6050_deselect_bypass);
- if (!st->mux_adapter) {
- result = -ENODEV;
- goto out_unreg_device;
- }
-
- result = inv_mpu_acpi_create_mux_client(st);
- if (result)
- goto out_del_mux;
-
return 0;
-out_del_mux:
- i2c_del_mux_adapter(st->mux_adapter);
-out_unreg_device:
- iio_device_unregister(indio_dev);
out_remove_trigger:
inv_mpu6050_remove_trigger(st);
out_unreg_ring:
iio_triggered_buffer_cleanup(indio_dev);
return result;
}
+EXPORT_SYMBOL_GPL(inv_mpu_core_probe);
-static int inv_mpu_remove(struct i2c_client *client)
+int inv_mpu_core_remove(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
- inv_mpu_acpi_delete_mux_client(st);
- i2c_del_mux_adapter(st->mux_adapter);
iio_device_unregister(indio_dev);
- inv_mpu6050_remove_trigger(st);
+ inv_mpu6050_remove_trigger(iio_priv(indio_dev));
iio_triggered_buffer_cleanup(indio_dev);
return 0;
}
+EXPORT_SYMBOL_GPL(inv_mpu_core_remove);
+
#ifdef CONFIG_PM_SLEEP
static int inv_mpu_resume(struct device *dev)
{
- return inv_mpu6050_set_power_itg(
- iio_priv(i2c_get_clientdata(to_i2c_client(dev))), true);
+ return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), true);
}
static int inv_mpu_suspend(struct device *dev)
{
- return inv_mpu6050_set_power_itg(
- iio_priv(i2c_get_clientdata(to_i2c_client(dev))), false);
+ return inv_mpu6050_set_power_itg(iio_priv(dev_get_drvdata(dev)), false);
}
-static SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
-
-#define INV_MPU6050_PMOPS (&inv_mpu_pmops)
-#else
-#define INV_MPU6050_PMOPS NULL
#endif /* CONFIG_PM_SLEEP */
-/*
- * device id table is used to identify what device can be
- * supported by this driver
- */
-static const struct i2c_device_id inv_mpu_id[] = {
- {"mpu6050", INV_MPU6050},
- {"mpu6500", INV_MPU6500},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
-
-static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6500", 0},
- { },
-};
-
-MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
-
-static struct i2c_driver inv_mpu_driver = {
- .probe = inv_mpu_probe,
- .remove = inv_mpu_remove,
- .id_table = inv_mpu_id,
- .driver = {
- .name = "inv-mpu6050",
- .pm = INV_MPU6050_PMOPS,
- .acpi_match_table = ACPI_PTR(inv_acpi_match),
- },
-};
-
-module_i2c_driver(inv_mpu_driver);
+SIMPLE_DEV_PM_OPS(inv_mpu_pmops, inv_mpu_suspend, inv_mpu_resume);
+EXPORT_SYMBOL_GPL(inv_mpu_pmops);
MODULE_AUTHOR("Invensense Corporation");
MODULE_DESCRIPTION("Invensense device MPU6050 driver");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
new file mode 100644
index 000000000000..f581256d9d4c
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -0,0 +1,208 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+/*
+ * The i2c read/write needs to happen in unlocked mode. As the parent
+ * adapter is common. If we use locked versions, it will fail as
+ * the mux adapter will lock the parent i2c adapter, while calling
+ * select/deselect functions.
+ */
+static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client,
+ u8 reg, u8 d)
+{
+ int ret;
+ u8 buf[2] = {reg, d};
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(buf),
+ .buf = buf,
+ }
+ };
+
+ ret = __i2c_transfer(client->adapter, msg, 1);
+ if (ret != 1)
+ return ret;
+
+ return 0;
+}
+
+static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
+ u32 chan_id)
+{
+ struct i2c_client *client = mux_priv;
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ /* Use the same mutex which was used everywhere to protect power-op */
+ mutex_lock(&indio_dev->mlock);
+ if (!st->powerup_count) {
+ ret = inv_mpu6050_write_reg_unlocked(client,
+ st->reg->pwr_mgmt_1, 0);
+ if (ret)
+ goto write_error;
+
+ usleep_range(INV_MPU6050_REG_UP_TIME_MIN,
+ INV_MPU6050_REG_UP_TIME_MAX);
+ }
+ if (!ret) {
+ st->powerup_count++;
+ ret = inv_mpu6050_write_reg_unlocked(client,
+ st->reg->int_pin_cfg,
+ INV_MPU6050_INT_PIN_CFG |
+ INV_MPU6050_BIT_BYPASS_EN);
+ }
+write_error:
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
+ void *mux_priv, u32 chan_id)
+{
+ struct i2c_client *client = mux_priv;
+ struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ mutex_lock(&indio_dev->mlock);
+ /* It doesn't really mattter, if any of the calls fails */
+ inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg,
+ INV_MPU6050_INT_PIN_CFG);
+ st->powerup_count--;
+ if (!st->powerup_count)
+ inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+/**
+ * inv_mpu_probe() - probe function.
+ * @client: i2c client.
+ * @id: i2c device id.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int inv_mpu_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct inv_mpu6050_state *st;
+ int result;
+ const char *name = id ? id->name : NULL;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EOPNOTSUPP;
+
+ regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ result = inv_mpu_core_probe(regmap, client->irq, name,
+ NULL, id->driver_data);
+ if (result < 0)
+ return result;
+
+ st = iio_priv(dev_get_drvdata(&client->dev));
+ st->mux_adapter = i2c_add_mux_adapter(client->adapter,
+ &client->dev,
+ client,
+ 0, 0, 0,
+ inv_mpu6050_select_bypass,
+ inv_mpu6050_deselect_bypass);
+ if (!st->mux_adapter) {
+ result = -ENODEV;
+ goto out_unreg_device;
+ }
+
+ result = inv_mpu_acpi_create_mux_client(client);
+ if (result)
+ goto out_del_mux;
+
+ return 0;
+
+out_del_mux:
+ i2c_del_mux_adapter(st->mux_adapter);
+out_unreg_device:
+ inv_mpu_core_remove(&client->dev);
+ return result;
+}
+
+static int inv_mpu_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+
+ inv_mpu_acpi_delete_mux_client(client);
+ i2c_del_mux_adapter(st->mux_adapter);
+
+ return inv_mpu_core_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id inv_mpu_id[] = {
+ {"mpu6050", INV_MPU6050},
+ {"mpu6500", INV_MPU6500},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+ {"INVN6500", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct i2c_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(inv_acpi_match),
+ .name = "inv-mpu6050-i2c",
+ .pm = &inv_mpu_pmops,
+ },
+};
+
+module_i2c_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Invensense Corporation");
+MODULE_DESCRIPTION("Invensense device MPU6050 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index db0a4a2758ab..e302a49703bf 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
@@ -38,6 +39,9 @@
* @int_enable: Interrupt enable register.
* @pwr_mgmt_1: Controls chip's power state and clock source.
* @pwr_mgmt_2: Controls power state of individual sensors.
+ * @int_pin_cfg; Controls interrupt pin configuration.
+ * @accl_offset: Controls the accelerometer calibration offset.
+ * @gyro_offset: Controls the gyroscope calibration offset.
*/
struct inv_mpu6050_reg_map {
u8 sample_rate_div;
@@ -55,12 +59,15 @@ struct inv_mpu6050_reg_map {
u8 pwr_mgmt_1;
u8 pwr_mgmt_2;
u8 int_pin_cfg;
+ u8 accl_offset;
+ u8 gyro_offset;
};
/*device enum */
enum inv_devices {
INV_MPU6050,
INV_MPU6500,
+ INV_MPU6000,
INV_NUM_PARTS
};
@@ -107,9 +114,10 @@ struct inv_mpu6050_hw {
* @hw: Other hardware-specific information.
* @chip_type: chip type.
* @time_stamp_lock: spin lock to time stamp.
- * @client: i2c client handle.
* @plat_data: platform data.
* @timestamps: kfifo queue to store time stamp.
+ * @map regmap pointer.
+ * @irq interrupt number.
*/
struct inv_mpu6050_state {
#define TIMESTAMP_FIFO_SIZE 16
@@ -119,15 +127,19 @@ struct inv_mpu6050_state {
const struct inv_mpu6050_hw *hw;
enum inv_devices chip_type;
spinlock_t time_stamp_lock;
- struct i2c_client *client;
struct i2c_adapter *mux_adapter;
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
+ struct regmap *map;
+ int irq;
};
/*register and associated bit definition*/
+#define INV_MPU6050_REG_ACCEL_OFFSET 0x06
+#define INV_MPU6050_REG_GYRO_OFFSET 0x13
+
#define INV_MPU6050_REG_SAMPLE_RATE_DIV 0x19
#define INV_MPU6050_REG_CONFIG 0x1A
#define INV_MPU6050_REG_GYRO_CONFIG 0x1B
@@ -151,6 +163,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BIT_I2C_MST_EN 0x20
#define INV_MPU6050_BIT_FIFO_EN 0x40
#define INV_MPU6050_BIT_DMP_EN 0x80
+#define INV_MPU6050_BIT_I2C_IF_DIS 0x10
#define INV_MPU6050_REG_PWR_MGMT_1 0x6B
#define INV_MPU6050_BIT_H_RESET 0x80
@@ -167,10 +180,18 @@ struct inv_mpu6050_state {
#define INV_MPU6050_BYTES_PER_3AXIS_SENSOR 6
#define INV_MPU6050_FIFO_COUNT_BYTE 2
#define INV_MPU6050_FIFO_THRESHOLD 500
+
+/* mpu6500 registers */
+#define INV_MPU6500_REG_ACCEL_OFFSET 0x77
+
+/* delay time in milliseconds */
#define INV_MPU6050_POWER_UP_TIME 100
#define INV_MPU6050_TEMP_UP_TIME 100
#define INV_MPU6050_SENSOR_UP_TIME 30
-#define INV_MPU6050_REG_UP_TIME 5
+
+/* delay time in microseconds */
+#define INV_MPU6050_REG_UP_TIME_MIN 5000
+#define INV_MPU6050_REG_UP_TIME_MAX 10000
#define INV_MPU6050_TEMP_OFFSET 12421
#define INV_MPU6050_TEMP_SCALE 2941
@@ -185,6 +206,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_REG_INT_PIN_CFG 0x37
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+#define INV_MPU6050_INT_PIN_CFG 0
/* init parameters */
#define INV_MPU6050_INIT_FIFO_RATE 50
@@ -252,5 +274,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev);
int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask);
int inv_mpu6050_write_reg(struct inv_mpu6050_state *st, int reg, u8 val);
int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
-int inv_mpu_acpi_create_mux_client(struct inv_mpu6050_state *st);
-void inv_mpu_acpi_delete_mux_client(struct inv_mpu6050_state *st);
+int inv_mpu_acpi_create_mux_client(struct i2c_client *client);
+void inv_mpu_acpi_delete_mux_client(struct i2c_client *client);
+int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ int (*inv_mpu_bus_setup)(struct iio_dev *), int chip_type);
+int inv_mpu_core_remove(struct device *dev);
+int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on);
+extern const struct dev_pm_ops inv_mpu_pmops;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index ba27e277511f..d0700628ee6d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
@@ -41,23 +40,24 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
/* disable interrupt */
- result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ result = regmap_write(st->map, st->reg->int_enable, 0);
if (result) {
- dev_err(&st->client->dev, "int_enable failed %d\n", result);
+ dev_err(regmap_get_device(st->map), "int_enable failed %d\n",
+ result);
return result;
}
/* disable the sensor output to FIFO */
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
goto reset_fifo_fail;
/* disable fifo reading */
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl, 0);
if (result)
goto reset_fifo_fail;
/* reset FIFO*/
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_RST);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
@@ -67,14 +67,14 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
- result = inv_mpu6050_write_reg(st, st->reg->int_enable,
- INV_MPU6050_BIT_DATA_RDY_EN);
+ result = regmap_write(st->map, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
if (result)
return result;
}
/* enable FIFO reading and I2C master interface*/
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl,
- INV_MPU6050_BIT_FIFO_EN);
+ result = regmap_write(st->map, st->reg->user_ctrl,
+ INV_MPU6050_BIT_FIFO_EN);
if (result)
goto reset_fifo_fail;
/* enable sensor output to FIFO */
@@ -83,16 +83,16 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
d |= INV_MPU6050_BITS_GYRO_OUT;
if (st->chip_config.accl_fifo_enable)
d |= INV_MPU6050_BIT_ACCEL_OUT;
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, d);
+ result = regmap_write(st->map, st->reg->fifo_en, d);
if (result)
goto reset_fifo_fail;
return 0;
reset_fifo_fail:
- dev_err(&st->client->dev, "reset fifo failed %d\n", result);
- result = inv_mpu6050_write_reg(st, st->reg->int_enable,
- INV_MPU6050_BIT_DATA_RDY_EN);
+ dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result);
+ result = regmap_write(st->map, st->reg->int_enable,
+ INV_MPU6050_BIT_DATA_RDY_EN);
return result;
}
@@ -109,7 +109,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
timestamp = iio_get_time_ns();
kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
- &st->time_stamp_lock);
+ &st->time_stamp_lock);
return IRQ_WAKE_THREAD;
}
@@ -143,10 +143,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
* read fifo_count register to know how many bytes inside FIFO
* right now
*/
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->fifo_count_h,
- INV_MPU6050_FIFO_COUNT_BYTE, data);
- if (result != INV_MPU6050_FIFO_COUNT_BYTE)
+ result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
+ INV_MPU6050_FIFO_COUNT_BYTE);
+ if (result)
goto end_session;
fifo_count = be16_to_cpup((__be16 *)(&data[0]));
if (fifo_count < bytes_per_datum)
@@ -158,22 +157,21 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto flush_fifo;
/* Timestamp mismatch. */
if (kfifo_len(&st->timestamps) >
- fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
- goto flush_fifo;
+ fifo_count / bytes_per_datum + INV_MPU6050_TIME_STAMP_TOR)
+ goto flush_fifo;
while (fifo_count >= bytes_per_datum) {
- result = i2c_smbus_read_i2c_block_data(st->client,
- st->reg->fifo_r_w,
- bytes_per_datum, data);
- if (result != bytes_per_datum)
+ result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
+ data, bytes_per_datum);
+ if (result)
goto flush_fifo;
result = kfifo_out(&st->timestamps, &timestamp, 1);
/* when there is no timestamp, put timestamp as 0 */
- if (0 == result)
+ if (result == 0)
timestamp = 0;
result = iio_push_to_buffers_with_timestamp(indio_dev, data,
- timestamp);
+ timestamp);
if (result)
goto flush_fifo;
fifo_count -= bytes_per_datum;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
new file mode 100644
index 000000000000..dea6c4361de0
--- /dev/null
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -0,0 +1,98 @@
+/*
+* Copyright (C) 2015 Intel Corporation Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include "inv_mpu_iio.h"
+
+static const struct regmap_config inv_mpu_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
+{
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ ret = inv_mpu6050_set_power_itg(st, true);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->map, INV_MPU6050_REG_USER_CTRL,
+ INV_MPU6050_BIT_I2C_IF_DIS);
+ if (ret) {
+ inv_mpu6050_set_power_itg(st, false);
+ return ret;
+ }
+
+ return inv_mpu6050_set_power_itg(st, false);
+}
+
+static int inv_mpu_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const char *name = id ? id->name : NULL;
+
+ regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return inv_mpu_core_probe(regmap, spi->irq, name,
+ inv_mpu_i2c_disable, id->driver_data);
+}
+
+static int inv_mpu_remove(struct spi_device *spi)
+{
+ return inv_mpu_core_remove(&spi->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct spi_device_id inv_mpu_id[] = {
+ {"mpu6000", INV_MPU6000},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, inv_mpu_id);
+
+static const struct acpi_device_id inv_acpi_match[] = {
+ {"INVN6000", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
+
+static struct spi_driver inv_mpu_driver = {
+ .probe = inv_mpu_probe,
+ .remove = inv_mpu_remove,
+ .id_table = inv_mpu_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(inv_acpi_match),
+ .name = "inv-mpu6000-spi",
+ .pm = &inv_mpu_pmops,
+ },
+};
+
+module_spi_driver(inv_mpu_driver);
+
+MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>");
+MODULE_DESCRIPTION("Invensense device MPU6000 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 844610c3a3a9..e8818d4dd4b8 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -19,19 +19,19 @@ static void inv_scan_query(struct iio_dev *indio_dev)
st->chip_config.gyro_fifo_enable =
test_bit(INV_MPU6050_SCAN_GYRO_X,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_GYRO_Y,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_GYRO_Z,
- indio_dev->active_scan_mask);
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_GYRO_Z,
+ indio_dev->active_scan_mask);
st->chip_config.accl_fifo_enable =
test_bit(INV_MPU6050_SCAN_ACCL_X,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_ACCL_Y,
- indio_dev->active_scan_mask) ||
- test_bit(INV_MPU6050_SCAN_ACCL_Z,
- indio_dev->active_scan_mask);
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Y,
+ indio_dev->active_scan_mask) ||
+ test_bit(INV_MPU6050_SCAN_ACCL_Z,
+ indio_dev->active_scan_mask);
}
/**
@@ -65,15 +65,15 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
return result;
} else {
- result = inv_mpu6050_write_reg(st, st->reg->fifo_en, 0);
+ result = regmap_write(st->map, st->reg->fifo_en, 0);
if (result)
return result;
- result = inv_mpu6050_write_reg(st, st->reg->int_enable, 0);
+ result = regmap_write(st->map, st->reg->int_enable, 0);
if (result)
return result;
- result = inv_mpu6050_write_reg(st, st->reg->user_ctrl, 0);
+ result = regmap_write(st->map, st->reg->user_ctrl, 0);
if (result)
return result;
@@ -101,7 +101,7 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
* @state: Desired trigger state
*/
static int inv_mpu_data_rdy_trigger_set_state(struct iio_trigger *trig,
- bool state)
+ bool state)
{
return inv_mpu6050_set_enable(iio_trigger_get_drvdata(trig), state);
}
@@ -123,7 +123,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
if (!st->trig)
return -ENOMEM;
- ret = devm_request_irq(&indio_dev->dev, st->client->irq,
+ ret = devm_request_irq(&indio_dev->dev, st->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
"inv_mpu",
@@ -131,7 +131,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev)
if (ret)
return ret;
- st->trig->dev.parent = &st->client->dev;
+ st->trig->dev.parent = regmap_get_device(st->map);
st->trig->ops = &inv_mpu_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 139ae916225f..90462fcf5436 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -512,33 +512,41 @@ static ssize_t iio_buffer_show_enable(struct device *dev,
return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
}
+static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
+ unsigned int scan_index)
+{
+ const struct iio_chan_spec *ch;
+ unsigned int bytes;
+
+ ch = iio_find_channel_from_si(indio_dev, scan_index);
+ bytes = ch->scan_type.storagebits / 8;
+ if (ch->scan_type.repeat > 1)
+ bytes *= ch->scan_type.repeat;
+ return bytes;
+}
+
+static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
+{
+ return iio_storage_bytes_for_si(indio_dev,
+ indio_dev->scan_index_timestamp);
+}
+
static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
const unsigned long *mask, bool timestamp)
{
- const struct iio_chan_spec *ch;
unsigned bytes = 0;
int length, i;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask,
indio_dev->masklength) {
- ch = iio_find_channel_from_si(indio_dev, i);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, i);
bytes = ALIGN(bytes, length);
bytes += length;
}
+
if (timestamp) {
- ch = iio_find_channel_from_si(indio_dev,
- indio_dev->scan_index_timestamp);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_timestamp(indio_dev);
bytes = ALIGN(bytes, length);
bytes += length;
}
@@ -645,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev,
unsigned int modes;
memset(config, 0, sizeof(*config));
+ config->watermark = ~0;
/*
* If there is just one buffer and we are removing it there is nothing
@@ -1288,7 +1297,6 @@ static int iio_buffer_add_demux(struct iio_buffer *buffer,
static int iio_buffer_update_demux(struct iio_dev *indio_dev,
struct iio_buffer *buffer)
{
- const struct iio_chan_spec *ch;
int ret, in_ind = -1, out_ind, length;
unsigned in_loc = 0, out_loc = 0;
struct iio_demux_table *p = NULL;
@@ -1315,21 +1323,11 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
in_ind + 1);
- ch = iio_find_channel_from_si(indio_dev, in_ind);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, in_ind);
/* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
}
- ch = iio_find_channel_from_si(indio_dev, in_ind);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_si(indio_dev, in_ind);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
@@ -1340,13 +1338,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
}
/* Relies on scan_timestamp being last */
if (buffer->scan_timestamp) {
- ch = iio_find_channel_from_si(indio_dev,
- indio_dev->scan_index_timestamp);
- if (ch->scan_type.repeat > 1)
- length = ch->scan_type.storagebits / 8 *
- ch->scan_type.repeat;
- else
- length = ch->scan_type.storagebits / 8;
+ length = iio_storage_bytes_for_timestamp(indio_dev);
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index af7cc1e65656..70cb7eb0a75c 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -77,6 +77,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_VELOCITY] = "velocity",
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
+ [IIO_PH] = "ph",
};
static const char * const iio_modifier_names[] = {
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index f6a07dc32ae4..a6af56ad10e1 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
mutex_lock(&data->lock);
data->gesture_mode_running = 1;
- while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) {
+ while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) {
ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE,
&data->buffer, 4);
@@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data)
goto err_read;
iio_push_to_buffers(data->indio_dev, data->buffer);
+ cnt--;
}
err_read:
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 8b4164343f20..b05946604f80 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -241,7 +241,7 @@ static int bh1750_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index c4e8c6b6c3c3..99a62816c3b4 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -326,7 +326,7 @@ static int jsa1212_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
index 01e111e72d4b..b776c8ed4387 100644
--- a/drivers/iio/light/opt3001.c
+++ b/drivers/iio/light/opt3001.c
@@ -65,19 +65,25 @@
#define OPT3001_REG_EXPONENT(n) ((n) >> 12)
#define OPT3001_REG_MANTISSA(n) ((n) & 0xfff)
+#define OPT3001_INT_TIME_LONG 800000
+#define OPT3001_INT_TIME_SHORT 100000
+
/*
* Time to wait for conversion result to be ready. The device datasheet
- * worst-case max value is 880ms. Add some slack to be on the safe side.
+ * sect. 6.5 states results are ready after total integration time plus 3ms.
+ * This results in worst-case max values of 113ms or 883ms, respectively.
+ * Add some slack to be on the safe side.
*/
-#define OPT3001_RESULT_READY_TIMEOUT msecs_to_jiffies(1000)
+#define OPT3001_RESULT_READY_SHORT 150
+#define OPT3001_RESULT_READY_LONG 1000
struct opt3001 {
struct i2c_client *client;
struct device *dev;
struct mutex lock;
- u16 ok_to_ignore_lock:1;
- u16 result_ready:1;
+ bool ok_to_ignore_lock;
+ bool result_ready;
wait_queue_head_t result_ready_queue;
u16 result;
@@ -89,6 +95,8 @@ struct opt3001 {
u8 high_thresh_exp;
u8 low_thresh_exp;
+
+ bool use_irq;
};
struct opt3001_scale {
@@ -227,26 +235,30 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
u16 reg;
u8 exponent;
u16 value;
+ long timeout;
- /*
- * Enable the end-of-conversion interrupt mechanism. Note that doing
- * so will overwrite the low-level limit value however we will restore
- * this value later on.
- */
- ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
- OPT3001_LOW_LIMIT_EOC_ENABLE);
- if (ret < 0) {
- dev_err(opt->dev, "failed to write register %02x\n",
- OPT3001_LOW_LIMIT);
- return ret;
+ if (opt->use_irq) {
+ /*
+ * Enable the end-of-conversion interrupt mechanism. Note that
+ * doing so will overwrite the low-level limit value however we
+ * will restore this value later on.
+ */
+ ret = i2c_smbus_write_word_swapped(opt->client,
+ OPT3001_LOW_LIMIT,
+ OPT3001_LOW_LIMIT_EOC_ENABLE);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
+
+ /* Allow IRQ to access the device despite lock being set */
+ opt->ok_to_ignore_lock = true;
}
- /* Reset data-ready indicator flag (will be set in the IRQ routine) */
+ /* Reset data-ready indicator flag */
opt->result_ready = false;
- /* Allow IRQ to access the device despite lock being set */
- opt->ok_to_ignore_lock = true;
-
/* Configure for single-conversion mode and start a new conversion */
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
if (ret < 0) {
@@ -266,32 +278,69 @@ static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
goto err;
}
- /* Wait for the IRQ to indicate the conversion is complete */
- ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready,
- OPT3001_RESULT_READY_TIMEOUT);
+ if (opt->use_irq) {
+ /* Wait for the IRQ to indicate the conversion is complete */
+ ret = wait_event_timeout(opt->result_ready_queue,
+ opt->result_ready,
+ msecs_to_jiffies(OPT3001_RESULT_READY_LONG));
+ } else {
+ /* Sleep for result ready time */
+ timeout = (opt->int_time == OPT3001_INT_TIME_SHORT) ?
+ OPT3001_RESULT_READY_SHORT : OPT3001_RESULT_READY_LONG;
+ msleep(timeout);
+
+ /* Check result ready flag */
+ ret = i2c_smbus_read_word_swapped(opt->client,
+ OPT3001_CONFIGURATION);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_CONFIGURATION);
+ goto err;
+ }
+
+ if (!(ret & OPT3001_CONFIGURATION_CRF)) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* Obtain value */
+ ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to read register %02x\n",
+ OPT3001_RESULT);
+ goto err;
+ }
+ opt->result = ret;
+ opt->result_ready = true;
+ }
err:
- /* Disallow IRQ to access the device while lock is active */
- opt->ok_to_ignore_lock = false;
+ if (opt->use_irq)
+ /* Disallow IRQ to access the device while lock is active */
+ opt->ok_to_ignore_lock = false;
if (ret == 0)
return -ETIMEDOUT;
else if (ret < 0)
return ret;
- /*
- * Disable the end-of-conversion interrupt mechanism by restoring the
- * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note
- * that selectively clearing those enable bits would affect the actual
- * limit value due to bit-overlap and therefore can't be done.
- */
- value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
- ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
- value);
- if (ret < 0) {
- dev_err(opt->dev, "failed to write register %02x\n",
- OPT3001_LOW_LIMIT);
- return ret;
+ if (opt->use_irq) {
+ /*
+ * Disable the end-of-conversion interrupt mechanism by
+ * restoring the low-level limit value (clearing
+ * OPT3001_LOW_LIMIT_EOC_ENABLE). Note that selectively clearing
+ * those enable bits would affect the actual limit value due to
+ * bit-overlap and therefore can't be done.
+ */
+ value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
+ ret = i2c_smbus_write_word_swapped(opt->client,
+ OPT3001_LOW_LIMIT,
+ value);
+ if (ret < 0) {
+ dev_err(opt->dev, "failed to write register %02x\n",
+ OPT3001_LOW_LIMIT);
+ return ret;
+ }
}
exponent = OPT3001_REG_EXPONENT(opt->result);
@@ -325,13 +374,13 @@ static int opt3001_set_int_time(struct opt3001 *opt, int time)
reg = ret;
switch (time) {
- case 100000:
+ case OPT3001_INT_TIME_SHORT:
reg &= ~OPT3001_CONFIGURATION_CT;
- opt->int_time = 100000;
+ opt->int_time = OPT3001_INT_TIME_SHORT;
break;
- case 800000:
+ case OPT3001_INT_TIME_LONG:
reg |= OPT3001_CONFIGURATION_CT;
- opt->int_time = 800000;
+ opt->int_time = OPT3001_INT_TIME_LONG;
break;
default:
return -EINVAL;
@@ -597,9 +646,9 @@ static int opt3001_configure(struct opt3001 *opt)
/* Reflect status of the device's integration time setting */
if (reg & OPT3001_CONFIGURATION_CT)
- opt->int_time = 800000;
+ opt->int_time = OPT3001_INT_TIME_LONG;
else
- opt->int_time = 100000;
+ opt->int_time = OPT3001_INT_TIME_SHORT;
/* Ensure device is in shutdown initially */
opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
@@ -733,12 +782,18 @@ static int opt3001_probe(struct i2c_client *client,
return ret;
}
- ret = request_threaded_irq(irq, NULL, opt3001_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "opt3001", iio);
- if (ret) {
- dev_err(dev, "failed to request IRQ #%d\n", irq);
- return ret;
+ /* Make use of INT pin only if valid IRQ no. is given */
+ if (irq > 0) {
+ ret = request_threaded_irq(irq, NULL, opt3001_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "opt3001", iio);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ #%d\n", irq);
+ return ret;
+ }
+ opt->use_irq = true;
+ } else {
+ dev_dbg(opt->dev, "enabling interrupt-less operation\n");
}
return 0;
@@ -751,7 +806,8 @@ static int opt3001_remove(struct i2c_client *client)
int ret;
u16 reg;
- free_irq(client->irq, iio);
+ if (opt->use_irq)
+ free_irq(client->irq, iio);
ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
if (ret < 0) {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 868abada3409..021dc5361f53 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -105,4 +105,37 @@ config IIO_ST_MAGN_SPI_3AXIS
depends on IIO_ST_MAGN_3AXIS
depends on IIO_ST_SENSORS_SPI
+config SENSORS_HMC5843
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config SENSORS_HMC5843_I2C
+ tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)"
+ depends on I2C
+ select SENSORS_HMC5843
+ select REGMAP_I2C
+ help
+ Say Y here to add support for the Honeywell HMC5843, HMC5883 and
+ HMC5883L 3-Axis Magnetometer (digital compass).
+
+ This driver can also be compiled as a set of modules.
+ If so, these modules will be created:
+ - hmc5843_core (core functions)
+ - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983)
+
+config SENSORS_HMC5843_SPI
+ tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)"
+ depends on SPI_MASTER
+ select SENSORS_HMC5843
+ select REGMAP_SPI
+ help
+ Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer
+ (digital compass).
+
+ This driver can also be compiled as a set of modules.
+ If so, these modules will be created:
+ - hmc5843_core (core functions)
+ - hmc5843_spi (support for HMC5983)
+
endmenu
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 2c72df458ec2..dd03fe524481 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -15,3 +15,7 @@ st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
+
+obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
+obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
+obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index b13936dacc78..9c5c9ef3f1da 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -252,7 +252,7 @@ struct ak_def {
u8 data_regs[3];
};
-static struct ak_def ak_def_array[AK_MAX_TYPE] = {
+static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
{
.type = AK8975,
.raw_to_gauss = ak8975_raw_to_gauss,
@@ -360,7 +360,7 @@ static struct ak_def ak_def_array[AK_MAX_TYPE] = {
*/
struct ak8975_data {
struct i2c_client *client;
- struct ak_def *def;
+ const struct ak_def *def;
struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
diff --git a/drivers/staging/iio/magnetometer/hmc5843.h b/drivers/iio/magnetometer/hmc5843.h
index 06f35d3828e4..76a5d7484d8d 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.h
+++ b/drivers/iio/magnetometer/hmc5843.h
@@ -7,8 +7,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#ifndef HMC5843_CORE_H
#define HMC5843_CORE_H
@@ -38,7 +37,7 @@ enum hmc5843_ids {
* @regmap: hardware access register maps
* @variant: describe chip variants
* @buffer: 3x 16-bit channels + padding + 64-bit timestamp
- **/
+ */
struct hmc5843_data {
struct device *dev;
struct mutex lock;
diff --git a/drivers/staging/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index 394bc141a1b0..77882b466e0f 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -18,7 +18,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -66,6 +65,33 @@
#define HMC5843_MEAS_CONF_NEGATIVE_BIAS 0x02
#define HMC5843_MEAS_CONF_MASK 0x03
+/*
+ * API for setting the measurement configuration to
+ * Normal, Positive bias and Negative bias
+ *
+ * From the datasheet:
+ * 0 - Normal measurement configuration (default): In normal measurement
+ * configuration the device follows normal measurement flow. Pins BP
+ * and BN are left floating and high impedance.
+ *
+ * 1 - Positive bias configuration: In positive bias configuration, a
+ * positive current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ * 2 - Negative bias configuration. In negative bias configuration, a
+ * negative current is forced across the resistive load on pins BP
+ * and BN.
+ *
+ * 3 - Only available on HMC5983. Magnetic sensor is disabled.
+ * Temperature sensor is enabled.
+ */
+
+static const char *const hmc5843_meas_conf_modes[] = {"normal", "positivebias",
+ "negativebias"};
+
+static const char *const hmc5983_meas_conf_modes[] = {"normal", "positivebias",
+ "negativebias",
+ "disabled"};
/* Scaling factors: 10000000/Gain */
static const int hmc5843_regval_to_nanoscale[] = {
6173, 7692, 10309, 12821, 18868, 21739, 25641, 35714
@@ -174,24 +200,6 @@ static int hmc5843_read_measurement(struct hmc5843_data *data,
return IIO_VAL_INT;
}
-/*
- * API for setting the measurement configuration to
- * Normal, Positive bias and Negative bias
- *
- * From the datasheet:
- * 0 - Normal measurement configuration (default): In normal measurement
- * configuration the device follows normal measurement flow. Pins BP
- * and BN are left floating and high impedance.
- *
- * 1 - Positive bias configuration: In positive bias configuration, a
- * positive current is forced across the resistive load on pins BP
- * and BN.
- *
- * 2 - Negative bias configuration. In negative bias configuration, a
- * negative current is forced across the resistive load on pins BP
- * and BN.
- *
- */
static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
{
int ret;
@@ -205,48 +213,55 @@ static int hmc5843_set_meas_conf(struct hmc5843_data *data, u8 meas_conf)
}
static
-ssize_t hmc5843_show_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+int hmc5843_show_measurement_configuration(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
+ struct hmc5843_data *data = iio_priv(indio_dev);
unsigned int val;
int ret;
ret = regmap_read(data->regmap, HMC5843_CONFIG_REG_A, &val);
if (ret)
return ret;
- val &= HMC5843_MEAS_CONF_MASK;
- return sprintf(buf, "%d\n", val);
+ return val & HMC5843_MEAS_CONF_MASK;
}
static
-ssize_t hmc5843_set_measurement_configuration(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+int hmc5843_set_measurement_configuration(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int meas_conf)
{
- struct hmc5843_data *data = iio_priv(dev_to_iio_dev(dev));
- unsigned long meas_conf = 0;
- int ret;
+ struct hmc5843_data *data = iio_priv(indio_dev);
- ret = kstrtoul(buf, 10, &meas_conf);
- if (ret)
- return ret;
- if (meas_conf >= HMC5843_MEAS_CONF_MASK)
- return -EINVAL;
+ return hmc5843_set_meas_conf(data, meas_conf);
+}
- ret = hmc5843_set_meas_conf(data, meas_conf);
+static const struct iio_enum hmc5843_meas_conf_enum = {
+ .items = hmc5843_meas_conf_modes,
+ .num_items = ARRAY_SIZE(hmc5843_meas_conf_modes),
+ .get = hmc5843_show_measurement_configuration,
+ .set = hmc5843_set_measurement_configuration,
+};
- return (ret < 0) ? ret : count;
-}
+static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
+ IIO_ENUM("meas_conf", true, &hmc5843_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
+ { },
+};
-static IIO_DEVICE_ATTR(meas_conf,
- S_IWUSR | S_IRUGO,
- hmc5843_show_measurement_configuration,
- hmc5843_set_measurement_configuration,
- 0);
+static const struct iio_enum hmc5983_meas_conf_enum = {
+ .items = hmc5983_meas_conf_modes,
+ .num_items = ARRAY_SIZE(hmc5983_meas_conf_modes),
+ .get = hmc5843_show_measurement_configuration,
+ .set = hmc5843_set_measurement_configuration,
+};
+
+static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
+ IIO_ENUM("meas_conf", true, &hmc5983_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
+ { },
+};
static
ssize_t hmc5843_show_samp_freq_avail(struct device *dev,
@@ -459,6 +474,25 @@ done:
.storagebits = 16, \
.endianness = IIO_BE, \
}, \
+ .ext_info = hmc5843_ext_info, \
+ }
+
+#define HMC5983_CHANNEL(axis, idx) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = hmc5983_ext_info, \
}
static const struct iio_chan_spec hmc5843_channels[] = {
@@ -476,8 +510,14 @@ static const struct iio_chan_spec hmc5883_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(3),
};
+static const struct iio_chan_spec hmc5983_channels[] = {
+ HMC5983_CHANNEL(X, 0),
+ HMC5983_CHANNEL(Z, 1),
+ HMC5983_CHANNEL(Y, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
static struct attribute *hmc5843_attributes[] = {
- &iio_dev_attr_meas_conf.dev_attr.attr,
&iio_dev_attr_scale_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
@@ -516,7 +556,7 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
ARRAY_SIZE(hmc5883l_regval_to_nanoscale),
},
[HMC5983_ID] = {
- .channels = hmc5883_channels,
+ .channels = hmc5983_channels,
.regval_to_samp_freq = hmc5983_regval_to_samp_freq,
.n_regval_to_samp_freq =
ARRAY_SIZE(hmc5983_regval_to_samp_freq),
@@ -565,14 +605,14 @@ static const unsigned long hmc5843_scan_masks[] = {0x7, 0};
int hmc5843_common_suspend(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_CONVERSION_CONTINUOUS);
+ HMC5843_MODE_SLEEP);
}
EXPORT_SYMBOL(hmc5843_common_suspend);
int hmc5843_common_resume(struct device *dev)
{
return hmc5843_set_mode(iio_priv(dev_get_drvdata(dev)),
- HMC5843_MODE_SLEEP);
+ HMC5843_MODE_CONVERSION_CONTINUOUS);
}
EXPORT_SYMBOL(hmc5843_common_resume);
diff --git a/drivers/staging/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
index 3e06ceb32059..3de7f4426ac4 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_i2c.c
+++ b/drivers/iio/magnetometer/hmc5843_i2c.c
@@ -7,8 +7,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#include <linux/module.h>
#include <linux/i2c.h>
diff --git a/drivers/staging/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
index 8be198058ea2..535f03a70d63 100644
--- a/drivers/staging/iio/magnetometer/hmc5843_spi.c
+++ b/drivers/iio/magnetometer/hmc5843_spi.c
@@ -6,8 +6,7 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * */
+ */
#include <linux/module.h>
#include <linux/spi/spi.h>
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 06a4d9c35581..9daca4681922 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev)
static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev)
{
}
+#define ST_MAGN_TRIGGER_SET_STATE NULL
#endif /* CONFIG_IIO_BUFFER */
#endif /* ST_MAGN_H */
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index b27f0146647b..501f858df413 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -175,6 +175,8 @@
#define ST_MAGN_3_BDU_MASK 0x10
#define ST_MAGN_3_DRDY_IRQ_ADDR 0x62
#define ST_MAGN_3_DRDY_INT_MASK 0x01
+#define ST_MAGN_3_IHL_IRQ_ADDR 0x63
+#define ST_MAGN_3_IHL_IRQ_MASK 0x04
#define ST_MAGN_3_FS_AVL_15000_GAIN 1500
#define ST_MAGN_3_MULTIREAD_BIT false
#define ST_MAGN_3_OUT_X_L_ADDR 0x68
@@ -480,6 +482,8 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.drdy_irq = {
.addr = ST_MAGN_3_DRDY_IRQ_ADDR,
.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
+ .addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
+ .mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
},
.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index fd75db73e582..ffc735c168fb 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -17,4 +17,16 @@ config MCP4531
To compile this driver as a module, choose M here: the
module will be called mcp4531.
+config TPL0102
+ tristate "Texas Instruments digital potentiometer driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for the Texas Instruments
+ TPL0102, TPL0402
+ digital potentiometer chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tpl0102.
+
endmenu
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index 8afe49227012..b563b492b486 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -4,3 +4,4 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_MCP4531) += mcp4531.o
+obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index a3f66874ee2e..0db67fe14766 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -159,7 +159,7 @@ static int mcp4531_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(dev, "SMBUS Word Data not supported\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
new file mode 100644
index 000000000000..313124b6fd59
--- /dev/null
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -0,0 +1,166 @@
+/*
+ * tpl0102.c - Support for Texas Instruments digital potentiometers
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: enable/disable hi-z output control
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+
+struct tpl0102_cfg {
+ int wipers;
+ int max_pos;
+ int kohms;
+};
+
+enum tpl0102_type {
+ CAT5140_503,
+ CAT5140_104,
+ TPL0102_104,
+ TPL0401_103,
+};
+
+static const struct tpl0102_cfg tpl0102_cfg[] = {
+ /* on-semiconductor parts */
+ [CAT5140_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [CAT5140_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ /* ti parts */
+ [TPL0102_104] = { .wipers = 2, .max_pos = 256, .kohms = 100 },
+ [TPL0401_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+};
+
+struct tpl0102_data {
+ struct regmap *regmap;
+ unsigned long devid;
+};
+
+static const struct regmap_config tpl0102_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+#define TPL0102_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec tpl0102_channels[] = {
+ TPL0102_CHANNEL(0),
+ TPL0102_CHANNEL(1),
+};
+
+static int tpl0102_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct tpl0102_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW: {
+ int ret = regmap_read(data->regmap, chan->channel, val);
+
+ return ret ? ret : IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * tpl0102_cfg[data->devid].kohms;
+ *val2 = tpl0102_cfg[data->devid].max_pos;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int tpl0102_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct tpl0102_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_RAW)
+ return -EINVAL;
+
+ if (val >= tpl0102_cfg[data->devid].max_pos || val < 0)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, chan->channel, val);
+}
+
+static const struct iio_info tpl0102_info = {
+ .read_raw = tpl0102_read_raw,
+ .write_raw = tpl0102_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int tpl0102_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tpl0102_data *data;
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENOTSUPP;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->devid = id->driver_data;
+ data->regmap = devm_regmap_init_i2c(client, &tpl0102_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(dev, "regmap initialization failed\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &tpl0102_info;
+ indio_dev->channels = tpl0102_channels;
+ indio_dev->num_channels = tpl0102_cfg[data->devid].wipers;
+ indio_dev->name = client->name;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct i2c_device_id tpl0102_id[] = {
+ { "cat5140-503", CAT5140_503 },
+ { "cat5140-104", CAT5140_104 },
+ { "tpl0102-104", TPL0102_104 },
+ { "tpl0401-103", TPL0401_103 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpl0102_id);
+
+static struct i2c_driver tpl0102_driver = {
+ .driver = {
+ .name = "tpl0102",
+ },
+ .probe = tpl0102_probe,
+ .id_table = tpl0102_id,
+};
+
+module_i2c_driver(tpl0102_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("TPL0102 digital potentiometer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 6f2e7c9ac23e..31c0e1fd2202 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -10,11 +10,11 @@ config BMP280
depends on I2C
select REGMAP_I2C
help
- Say yes here to build support for Bosch Sensortec BMP280
- pressure and temperature sensor.
+ Say yes here to build support for Bosch Sensortec BMP280
+ pressure and temperature sensor.
- To compile this driver as a module, choose M here: the module
- will be called bmp280.
+ To compile this driver as a module, choose M here: the module
+ will be called bmp280.
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
@@ -27,18 +27,33 @@ config HID_SENSOR_PRESS
Say yes here to build support for the HID SENSOR
Pressure driver
- To compile this driver as a module, choose M here: the module
- will be called hid-sensor-press.
+ To compile this driver as a module, choose M here: the module
+ will be called hid-sensor-press.
config MPL115
+ tristate
+
+config MPL115_I2C
tristate "Freescale MPL115A2 pressure sensor driver"
depends on I2C
+ select MPL115
help
Say yes here to build support for the Freescale MPL115A2
pressure sensor connected via I2C.
- To compile this driver as a module, choose M here: the module
- will be called mpl115.
+ To compile this driver as a module, choose M here: the module
+ will be called mpl115_i2c.
+
+config MPL115_SPI
+ tristate "Freescale MPL115A1 pressure sensor driver"
+ depends on SPI_MASTER
+ select MPL115
+ help
+ Say yes here to build support for the Freescale MPL115A1
+ pressure sensor connected via SPI.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mpl115_spi.
config MPL3115
tristate "Freescale MPL3115A2 pressure sensor driver"
@@ -49,11 +64,13 @@ config MPL3115
Say yes here to build support for the Freescale MPL3115A2
pressure sensor / altimeter.
- To compile this driver as a module, choose M here: the module
- will be called mpl3115.
+ To compile this driver as a module, choose M here: the module
+ will be called mpl3115.
config MS5611
tristate "Measurement Specialties MS5611 pressure sensor driver"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Measurement Specialties
MS5611, MS5607 pressure and temperature sensors.
@@ -82,7 +99,7 @@ config MS5611_SPI
config MS5637
tristate "Measurement Specialties MS5637 pressure & temperature sensor"
depends on I2C
- select IIO_MS_SENSORS_I2C
+ select IIO_MS_SENSORS_I2C
help
If you say yes here you get support for the Measurement Specialties
MS5637 pressure and temperature sensor.
@@ -128,7 +145,7 @@ config T5403
Say yes here to build support for the EPCOS T5403 pressure sensor
connected via I2C.
- To compile this driver as a module, choose M here: the module
- will be called t5403.
+ To compile this driver as a module, choose M here: the module
+ will be called t5403.
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 46571c96823f..d336af14f3fe 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_MPL115) += mpl115.o
+obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
+obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index a0d7deeac62f..73f2f0c46e62 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -1,5 +1,5 @@
/*
- * mpl115.c - Support for Freescale MPL115A2 pressure/temperature sensor
+ * mpl115.c - Support for Freescale MPL115A pressure/temperature sensor
*
* Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -7,17 +7,16 @@
* the GNU General Public License. See the file COPYING in the main
* directory of this archive for more details.
*
- * (7-bit I2C slave address 0x60)
- *
* TODO: shutdown pin
*
*/
#include <linux/module.h>
-#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
+#include "mpl115.h"
+
#define MPL115_PADC 0x00 /* pressure ADC output value, MSB first, 10 bit */
#define MPL115_TADC 0x02 /* temperature ADC output value, MSB first, 10 bit */
#define MPL115_A0 0x04 /* 12 bit integer, 3 bit fraction */
@@ -27,16 +26,18 @@
#define MPL115_CONVERT 0x12 /* convert temperature and pressure */
struct mpl115_data {
- struct i2c_client *client;
+ struct device *dev;
struct mutex lock;
s16 a0;
s16 b1, b2;
s16 c12;
+ const struct mpl115_ops *ops;
};
static int mpl115_request(struct mpl115_data *data)
{
- int ret = i2c_smbus_write_byte_data(data->client, MPL115_CONVERT, 0);
+ int ret = data->ops->write(data->dev, MPL115_CONVERT, 0);
+
if (ret < 0)
return ret;
@@ -57,12 +58,12 @@ static int mpl115_comp_pressure(struct mpl115_data *data, int *val, int *val2)
if (ret < 0)
goto done;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_PADC);
+ ret = data->ops->read(data->dev, MPL115_PADC);
if (ret < 0)
goto done;
padc = ret >> 6;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+ ret = data->ops->read(data->dev, MPL115_TADC);
if (ret < 0)
goto done;
tadc = ret >> 6;
@@ -90,7 +91,7 @@ static int mpl115_read_temp(struct mpl115_data *data)
ret = mpl115_request(data);
if (ret < 0)
goto done;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_TADC);
+ ret = data->ops->read(data->dev, MPL115_TADC);
done:
mutex_unlock(&data->lock);
return ret;
@@ -145,66 +146,53 @@ static const struct iio_info mpl115_info = {
.driver_module = THIS_MODULE,
};
-static int mpl115_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int mpl115_probe(struct device *dev, const char *name,
+ const struct mpl115_ops *ops)
{
struct mpl115_data *data;
struct iio_dev *indio_dev;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
- data->client = client;
+ data->dev = dev;
+ data->ops = ops;
mutex_init(&data->lock);
- i2c_set_clientdata(client, indio_dev);
indio_dev->info = &mpl115_info;
- indio_dev->name = id->name;
- indio_dev->dev.parent = &client->dev;
+ indio_dev->name = name;
+ indio_dev->dev.parent = dev;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mpl115_channels;
indio_dev->num_channels = ARRAY_SIZE(mpl115_channels);
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_A0);
+ ret = data->ops->init(data->dev);
+ if (ret)
+ return ret;
+
+ ret = data->ops->read(data->dev, MPL115_A0);
if (ret < 0)
return ret;
data->a0 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_B1);
+ ret = data->ops->read(data->dev, MPL115_B1);
if (ret < 0)
return ret;
data->b1 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_B2);
+ ret = data->ops->read(data->dev, MPL115_B2);
if (ret < 0)
return ret;
data->b2 = ret;
- ret = i2c_smbus_read_word_swapped(data->client, MPL115_C12);
+ ret = data->ops->read(data->dev, MPL115_C12);
if (ret < 0)
return ret;
data->c12 = ret;
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
-
-static const struct i2c_device_id mpl115_id[] = {
- { "mpl115", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mpl115_id);
-
-static struct i2c_driver mpl115_driver = {
- .driver = {
- .name = "mpl115",
- },
- .probe = mpl115_probe,
- .id_table = mpl115_id,
-};
-module_i2c_driver(mpl115_driver);
+EXPORT_SYMBOL_GPL(mpl115_probe);
MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
MODULE_DESCRIPTION("Freescale MPL115 pressure/temperature driver");
diff --git a/drivers/iio/pressure/mpl115.h b/drivers/iio/pressure/mpl115.h
new file mode 100644
index 000000000000..01b652774dc3
--- /dev/null
+++ b/drivers/iio/pressure/mpl115.h
@@ -0,0 +1,24 @@
+/*
+ * Freescale MPL115A pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#ifndef _MPL115_H_
+#define _MPL115_H_
+
+struct mpl115_ops {
+ int (*init)(struct device *);
+ int (*read)(struct device *, u8);
+ int (*write)(struct device *, u8, u8);
+};
+
+int mpl115_probe(struct device *dev, const char *name,
+ const struct mpl115_ops *ops);
+
+#endif
diff --git a/drivers/iio/pressure/mpl115_i2c.c b/drivers/iio/pressure/mpl115_i2c.c
new file mode 100644
index 000000000000..1a29be462f6e
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_i2c.c
@@ -0,0 +1,67 @@
+/*
+ * Freescale MPL115A2 pressure/temperature sensor
+ *
+ * Copyright (c) 2014 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x60)
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "mpl115.h"
+
+static int mpl115_i2c_init(struct device *dev)
+{
+ return 0;
+}
+
+static int mpl115_i2c_read(struct device *dev, u8 address)
+{
+ return i2c_smbus_read_word_swapped(to_i2c_client(dev), address);
+}
+
+static int mpl115_i2c_write(struct device *dev, u8 address, u8 value)
+{
+ return i2c_smbus_write_byte_data(to_i2c_client(dev), address, value);
+}
+
+static const struct mpl115_ops mpl115_i2c_ops = {
+ .init = mpl115_i2c_init,
+ .read = mpl115_i2c_read,
+ .write = mpl115_i2c_write,
+};
+
+static int mpl115_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -EOPNOTSUPP;
+
+ return mpl115_probe(&client->dev, id->name, &mpl115_i2c_ops);
+}
+
+static const struct i2c_device_id mpl115_i2c_id[] = {
+ { "mpl115", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mpl115_i2c_id);
+
+static struct i2c_driver mpl115_i2c_driver = {
+ .driver = {
+ .name = "mpl115",
+ },
+ .probe = mpl115_i2c_probe,
+ .id_table = mpl115_i2c_id,
+};
+module_i2c_driver(mpl115_i2c_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Freescale MPL115A2 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl115_spi.c b/drivers/iio/pressure/mpl115_spi.c
new file mode 100644
index 000000000000..9ebf55f5b3aa
--- /dev/null
+++ b/drivers/iio/pressure/mpl115_spi.c
@@ -0,0 +1,106 @@
+/*
+ * Freescale MPL115A1 pressure/temperature sensor
+ *
+ * Copyright (c) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Datasheet: http://www.nxp.com/files/sensors/doc/data_sheet/MPL115A1.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "mpl115.h"
+
+#define MPL115_SPI_WRITE(address) ((address) << 1)
+#define MPL115_SPI_READ(address) (0x80 | (address) << 1)
+
+struct mpl115_spi_buf {
+ u8 tx[4];
+ u8 rx[4];
+};
+
+static int mpl115_spi_init(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf;
+
+ buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, buf);
+
+ return 0;
+}
+
+static int mpl115_spi_read(struct device *dev, u8 address)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer = {
+ .tx_buf = buf->tx,
+ .rx_buf = buf->rx,
+ .len = 4,
+ };
+ int ret;
+
+ buf->tx[0] = MPL115_SPI_READ(address);
+ buf->tx[2] = MPL115_SPI_READ(address + 1);
+
+ ret = spi_sync_transfer(spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ return (buf->rx[1] << 8) | buf->rx[3];
+}
+
+static int mpl115_spi_write(struct device *dev, u8 address, u8 value)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpl115_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer = {
+ .tx_buf = buf->tx,
+ .len = 2,
+ };
+
+ buf->tx[0] = MPL115_SPI_WRITE(address);
+ buf->tx[1] = value;
+
+ return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static const struct mpl115_ops mpl115_spi_ops = {
+ .init = mpl115_spi_init,
+ .read = mpl115_spi_read,
+ .write = mpl115_spi_write,
+};
+
+static int mpl115_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return mpl115_probe(&spi->dev, id->name, &mpl115_spi_ops);
+}
+
+static const struct spi_device_id mpl115_spi_ids[] = {
+ { "mpl115", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mpl115_spi_ids);
+
+static struct spi_driver mpl115_spi_driver = {
+ .driver = {
+ .name = "mpl115",
+ },
+ .probe = mpl115_spi_probe,
+ .id_table = mpl115_spi_ids,
+};
+module_spi_driver(mpl115_spi_driver);
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("Freescale MPL115A1 pressure/temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 23b93c797dba..8b08e4b7e3a9 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -51,6 +51,8 @@ struct ms5611_state {
struct ms5611_chip_info *chip_info;
};
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type);
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
+ const char* name, int type);
+int ms5611_remove(struct iio_dev *indio_dev);
#endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2f3d9b4aca4e..992ad8d3b67a 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -16,7 +16,11 @@
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#include "ms5611.h"
static bool ms5611_prom_is_valid(u16 *prom, size_t len)
@@ -133,17 +137,17 @@ static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf
t = 2000 + ((chip_info->prom[6] * dt) >> 23);
if (t < 2000) {
- s64 off2, sens2, t2;
+ s64 off2, sens2, t2, tmp;
t2 = (dt * dt) >> 31;
- off2 = (61 * (t - 2000) * (t - 2000)) >> 4;
- sens2 = off2 << 1;
+ tmp = (t - 2000) * (t - 2000);
+ off2 = (61 * tmp) >> 4;
+ sens2 = tmp << 1;
if (t < -1500) {
- s64 tmp = (t + 1500) * (t + 1500);
-
+ tmp = (t + 1500) * (t + 1500);
off2 += 15 * tmp;
- sens2 += (8 * tmp);
+ sens2 += 8 * tmp;
}
t -= t2;
@@ -173,6 +177,28 @@ static int ms5611_reset(struct iio_dev *indio_dev)
return 0;
}
+static irqreturn_t ms5611_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ms5611_state *st = iio_priv(indio_dev);
+ s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+ mutex_unlock(&st->lock);
+ if (ret < 0)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int ms5611_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -201,11 +227,25 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 10;
+ return IIO_VAL_INT;
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
}
+static const unsigned long ms5611_scan_masks[] = {0x3, 0};
+
static struct ms5611_chip_info chip_info_tbl[] = {
[MS5611] = {
.temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
@@ -218,12 +258,29 @@ static struct ms5611_chip_info chip_info_tbl[] = {
static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
},
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
- }
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 32,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static const struct iio_info ms5611_info = {
@@ -234,6 +291,18 @@ static const struct iio_info ms5611_info = {
static int ms5611_init(struct iio_dev *indio_dev)
{
int ret;
+ struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent,
+ "vdd");
+
+ /* Enable attached regulator if any. */
+ if (!IS_ERR(vdd)) {
+ ret = regulator_enable(vdd);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "failed to enable Vdd supply: %d\n", ret);
+ return ret;
+ }
+ }
ret = ms5611_reset(indio_dev);
if (ret < 0)
@@ -242,7 +311,8 @@ static int ms5611_init(struct iio_dev *indio_dev)
return ms5611_read_prom(indio_dev);
}
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
+ const char *name, int type)
{
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
@@ -250,20 +320,48 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
mutex_init(&st->lock);
st->chip_info = &chip_info_tbl[type];
indio_dev->dev.parent = dev;
- indio_dev->name = dev->driver->name;
+ indio_dev->name = name;
indio_dev->info = &ms5611_info;
indio_dev->channels = ms5611_channels;
indio_dev->num_channels = ARRAY_SIZE(ms5611_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->available_scan_masks = ms5611_scan_masks;
ret = ms5611_init(indio_dev);
if (ret < 0)
return ret;
- return devm_iio_device_register(dev, indio_dev);
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ms5611_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return ret;
}
EXPORT_SYMBOL(ms5611_probe);
+int ms5611_remove(struct iio_dev *indio_dev)
+{
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(ms5611_remove);
+
MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
MODULE_DESCRIPTION("MS5611 core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 245797d1ecf0..7f6fc8eee922 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -92,19 +92,25 @@ static int ms5611_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_READ_WORD_DATA |
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
st->reset = ms5611_i2c_reset;
st->read_prom_word = ms5611_i2c_read_prom_word;
st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure;
st->client = client;
- return ms5611_probe(indio_dev, &client->dev, id->driver_data);
+ return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data);
+}
+
+static int ms5611_i2c_remove(struct i2c_client *client)
+{
+ return ms5611_remove(i2c_get_clientdata(client));
}
static const struct i2c_device_id ms5611_id[] = {
@@ -120,6 +126,7 @@ static struct i2c_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
+ .remove = ms5611_i2c_remove,
};
module_i2c_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index aaa0c4ba91a7..5cc009e85f0e 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -90,6 +90,8 @@ static int ms5611_spi_probe(struct spi_device *spi)
if (!indio_dev)
return -ENOMEM;
+ spi_set_drvdata(spi, indio_dev);
+
spi->mode = SPI_MODE_0;
spi->max_speed_hz = 20000000;
spi->bits_per_word = 8;
@@ -103,8 +105,13 @@ static int ms5611_spi_probe(struct spi_device *spi)
st->read_adc_temp_and_pressure = ms5611_spi_read_adc_temp_and_pressure;
st->client = spi;
- return ms5611_probe(indio_dev, &spi->dev,
- spi_get_device_id(spi)->driver_data);
+ return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name,
+ spi_get_device_id(spi)->driver_data);
+}
+
+static int ms5611_spi_remove(struct spi_device *spi)
+{
+ return ms5611_remove(spi_get_drvdata(spi));
}
static const struct spi_device_id ms5611_id[] = {
@@ -120,6 +127,7 @@ static struct spi_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
+ .remove = ms5611_spi_remove,
};
module_spi_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5637.c b/drivers/iio/pressure/ms5637.c
index e8d0e0da938d..e68052c118e6 100644
--- a/drivers/iio/pressure/ms5637.c
+++ b/drivers/iio/pressure/ms5637.c
@@ -136,7 +136,7 @@ static int ms5637_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index b39a2fb0671c..172393ad34af 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -62,6 +62,8 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
+#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
@@ -100,6 +102,8 @@
#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
+#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
+#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
@@ -220,6 +224,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
.mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
},
.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
.bootime = 2,
@@ -304,6 +310,8 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
.mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
+ .mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
},
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/pressure/t5403.c b/drivers/iio/pressure/t5403.c
index e11cd3938d67..2667e71721f5 100644
--- a/drivers/iio/pressure/t5403.c
+++ b/drivers/iio/pressure/t5403.c
@@ -221,7 +221,7 @@ static int t5403_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENODEV;
+ return -EOPNOTSUPP;
ret = i2c_smbus_read_byte_data(client, T5403_SLAVE_ADDR);
if (ret < 0)
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index db35e04a0637..4f502386aa86 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -278,7 +278,7 @@ static int lidar_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
data->xfer = lidar_smbus_xfer;
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
indio_dev->info = &lidar_info;
indio_dev->name = LIDAR_DRV_NAME;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index a570c2e2aac3..4b645fc672aa 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -516,7 +516,7 @@ static int mlx90614_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index e78c1069a6a9..18c9b43c02cb 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -205,7 +205,7 @@ static int tmp006_probe(struct i2c_client *client,
int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
+ return -EOPNOTSUPP;
if (!tmp006_check_identification(client)) {
dev_err(&client->dev, "no TMP006 sensor\n");
diff --git a/drivers/iio/temperature/tsys01.c b/drivers/iio/temperature/tsys01.c
index 05c12060ce8d..3e60c6189d98 100644
--- a/drivers/iio/temperature/tsys01.c
+++ b/drivers/iio/temperature/tsys01.c
@@ -190,7 +190,7 @@ static int tsys01_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/iio/temperature/tsys02d.c b/drivers/iio/temperature/tsys02d.c
index 4c1fbd52ea08..ab6fe8f6f2d1 100644
--- a/drivers/iio/temperature/tsys02d.c
+++ b/drivers/iio/temperature/tsys02d.c
@@ -137,7 +137,7 @@ static int tsys02d_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
dev_err(&client->dev,
"Adapter does not support some i2c transaction\n");
- return -ENODEV;
+ return -EOPNOTSUPP;
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dev_data));
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 8a8440c0eed1..6425c0e5d18a 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -68,6 +68,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
+source "drivers/infiniband/hw/i40iw/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/mlx5/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
@@ -82,4 +83,6 @@ source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
+source "drivers/infiniband/sw/rdmavt/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index dc21836b5a8d..fad0b44c356f 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND) += hw/
obj-$(CONFIG_INFINIBAND) += ulp/
+obj-$(CONFIG_INFINIBAND) += sw/
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 53343ffbff7a..cb00d59da456 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ pr_warn("ib_query_port failed (%d) for %s\n",
+ ret, device->name);
goto err;
}
@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_gid(device, port, i,
gid_cache->table + i, NULL);
if (ret) {
- printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
+ ret, device->name, i);
goto err;
}
}
@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL);
if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) {
- printk(KERN_WARNING "Couldn't allocate cache "
- "for %s\n", device->name);
+ pr_warn("Couldn't allocate cache for %s\n", device->name);
return -ENOMEM;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9729639df407..93ab0ae97208 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1206,6 +1206,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = true;
req->service_id = req_param->primary_path->service_id;
req->pkey = be16_to_cpu(req_param->primary_path->pkey);
+ if (req->pkey != req_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ req_param->bth_pkey, req->pkey);
break;
case IB_CM_SIDR_REQ_RECEIVED:
req->device = sidr_param->listen_id->device;
@@ -1213,6 +1217,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
req->has_gid = false;
req->service_id = sidr_param->service_id;
req->pkey = sidr_param->pkey;
+ if (req->pkey != sidr_param->bth_pkey)
+ pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
+ "RDMA CMA: in the future this may cause the request to be dropped\n",
+ sidr_param->bth_pkey, req->pkey);
break;
default:
return -EINVAL;
@@ -1713,7 +1721,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -2186,8 +2194,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
- "listening on device %s\n", ret, cma_dev->device->name);
+ pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
+ ret, cma_dev->device->name);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -3239,7 +3247,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = 0;
break;
default:
- printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+ pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event);
goto out;
}
@@ -4003,8 +4011,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if ((dev_addr->bound_dev_if == ndev->ifindex) &&
(net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
- printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
- ndev->name, &id_priv->id);
+ pr_info("RDMA CM addr change for ndev %s used by id %p\n",
+ ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work)
return -ENOMEM;
@@ -4287,7 +4295,7 @@ static int __init cma_init(void)
goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
- printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+ pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init();
return 0;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 18b112aa577e..41573df1d9fc 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -49,8 +49,6 @@ struct cma_dev_group {
char name[IB_DEVICE_NAME_MAX];
struct config_group device_group;
struct config_group ports_group;
- struct config_group *default_dev_group[2];
- struct config_group **default_ports_group;
struct cma_dev_port_group *ports;
};
@@ -158,7 +156,6 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
unsigned int i;
unsigned int ports_num;
struct cma_dev_port_group *ports;
- struct config_group **ports_group;
int err;
ibdev = cma_get_ib_dev(cma_dev);
@@ -169,9 +166,8 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
ports_num = ibdev->phys_port_cnt;
ports = kcalloc(ports_num, sizeof(*cma_dev_group->ports),
GFP_KERNEL);
- ports_group = kcalloc(ports_num + 1, sizeof(*ports_group), GFP_KERNEL);
- if (!ports || !ports_group) {
+ if (!ports) {
err = -ENOMEM;
goto free;
}
@@ -185,18 +181,16 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
config_group_init_type_name(&ports[i].group,
port_str,
&cma_port_group_type);
- ports_group[i] = &ports[i].group;
+ configfs_add_default_group(&ports[i].group,
+ &cma_dev_group->ports_group);
+
}
- ports_group[i] = NULL;
- cma_dev_group->default_ports_group = ports_group;
cma_dev_group->ports = ports;
return 0;
free:
kfree(ports);
- kfree(ports_group);
cma_dev_group->ports = NULL;
- cma_dev_group->default_ports_group = NULL;
return err;
}
@@ -220,9 +214,7 @@ static void release_cma_ports_group(struct config_item *item)
ports_group);
kfree(cma_dev_group->ports);
- kfree(cma_dev_group->default_ports_group);
cma_dev_group->ports = NULL;
- cma_dev_group->default_ports_group = NULL;
};
static struct configfs_item_operations cma_ports_item_ops = {
@@ -263,22 +255,17 @@ static struct config_group *make_cma_dev(struct config_group *group,
strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
- err = make_cma_ports(cma_dev_group, cma_dev);
- if (err)
- goto fail;
-
- cma_dev_group->ports_group.default_groups =
- cma_dev_group->default_ports_group;
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
- cma_dev_group->device_group.default_groups
- = cma_dev_group->default_dev_group;
- cma_dev_group->default_dev_group[0] = &cma_dev_group->ports_group;
- cma_dev_group->default_dev_group[1] = NULL;
+ err = make_cma_ports(cma_dev_group, cma_dev);
+ if (err)
+ goto fail;
config_group_init_type_name(&cma_dev_group->device_group, name,
&cma_device_group_type);
+ configfs_add_default_group(&cma_dev_group->ports_group,
+ &cma_dev_group->device_group);
cma_deref_dev(cma_dev);
return &cma_dev_group->device_group;
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 94b80a51ab68..10979844026a 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ pr_warn("Device %s is missing mandatory function %s\n",
+ device->name, mandatory_table[i].name);
return -EINVAL;
}
}
@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) {
- printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
- device->name, client->name);
+ pr_warn("Couldn't allocate client context for %s/%s\n",
+ device->name, client->name);
return -ENOMEM;
}
@@ -343,29 +343,29 @@ int ib_register_device(struct ib_device *device,
ret = read_port_immutable(device);
if (ret) {
- printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
- device->name);
+ pr_warn("Couldn't create per port immutable data %s\n",
+ device->name);
goto out;
}
ret = ib_cache_setup_one(device);
if (ret) {
- printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
+ pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
goto out;
}
memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- printk(KERN_WARNING "Couldn't query the device attributes\n");
+ pr_warn("Couldn't query the device attributes\n");
ib_cache_cleanup_one(device);
goto out;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- printk(KERN_WARNING "Couldn't register device %s with driver model\n",
- device->name);
+ pr_warn("Couldn't register device %s with driver model\n",
+ device->name);
ib_cache_cleanup_one(device);
goto out;
}
@@ -566,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
goto out;
}
- printk(KERN_WARNING "No client context found for %s/%s\n",
- device->name, client->name);
+ pr_warn("No client context found for %s/%s\n",
+ device->name, client->name);
out:
spin_unlock_irqrestore(&device->client_data_lock, flags);
@@ -650,10 +650,23 @@ int ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
+ union ib_gid gid;
+ int err;
+
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
return -EINVAL;
- return device->query_port(device, port_num, port_attr);
+ memset(port_attr, 0, sizeof(*port_attr));
+ err = device->query_port(device, port_num, port_attr);
+ if (err || port_attr->subnet_prefix)
+ return err;
+
+ err = ib_query_gid(device, port_num, 0, &gid, NULL);
+ if (err)
+ return err;
+
+ port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
+ return 0;
}
EXPORT_SYMBOL(ib_query_port);
@@ -960,13 +973,13 @@ static int __init ib_core_init(void)
ret = class_register(&ib_class);
if (ret) {
- printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
+ pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp;
}
ret = ibnl_init();
if (ret) {
- printk(KERN_WARNING "Couldn't init IB netlink interface\n");
+ pr_warn("Couldn't init IB netlink interface\n");
goto err_sysfs;
}
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 6ac3683c144b..cdbb1f1a6d97 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#ifdef DEBUG
if (fmr->ref_count !=0) {
- printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
+ fmr, fmr->ref_count);
}
#endif
}
@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
ret = ib_unmap_fmr(&fmr_list);
if (ret)
- printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
+ pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
spin_lock_irq(&pool->pool_lock);
list_splice(&unmap_list, &pool->free_list);
@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- printk(KERN_INFO PFX "Device %s does not support FMRs\n",
- device->name);
+ pr_info(PFX "Device %s does not support FMRs\n", device->name);
return ERR_PTR(-ENOSYS);
}
@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
max_remaps = device->attrs.max_map_per_fmr;
pool = kmalloc(sizeof *pool, GFP_KERNEL);
- if (!pool) {
- printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
+ if (!pool)
return ERR_PTR(-ENOMEM);
- }
pool->cache_bucket = NULL;
-
pool->flush_function = params->flush_function;
pool->flush_arg = params->flush_arg;
@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL);
if (!pool->cache_bucket) {
- printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
+ pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM;
goto out_free_pool;
}
@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
"ib_fmr(%s)",
device->name);
if (IS_ERR(pool->thread)) {
- printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
+ pr_warn(PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread);
goto out_free_pool;
}
@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
- if (!fmr) {
- printk(KERN_WARNING PFX "failed to allocate fmr "
- "struct for FMR %d\n", i);
+ if (!fmr)
goto out_fail;
- }
fmr->pool = pool;
fmr->remap_count = 0;
@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
if (IS_ERR(fmr->fmr)) {
- printk(KERN_WARNING PFX "fmr_create failed "
- "for FMR %d\n", i);
+ pr_warn(PFX "fmr_create failed for FMR %d\n",
+ i);
kfree(fmr);
goto out_fail;
}
@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
}
if (i < pool->pool_size)
- printk(KERN_WARNING PFX "pool still has %d regions registered\n",
- pool->pool_size - i);
+ pr_warn(PFX "pool still has %d regions registered\n",
+ pool->pool_size - i);
kfree(pool->cache_bucket);
kfree(pool);
@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
list_add(&fmr->list, &pool->free_list);
spin_unlock_irqrestore(&pool->pool_lock, flags);
- printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
+ pr_warn(PFX "fmr_map returns %d\n", result);
return ERR_PTR(result);
}
@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
#ifdef DEBUG
if (fmr->ref_count < 0)
- printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
- fmr, fmr->ref_count);
+ pr_warn(PFX "FMR %p has ref count %d < 0\n",
+ fmr, fmr->ref_count);
#endif
spin_unlock_irqrestore(&pool->pool_lock, flags);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index ff9163dc1596..e28a160cdab0 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -50,6 +50,8 @@
#include <rdma/iw_cm.h>
#include <rdma/ib_addr.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
#include "iwcm.h"
@@ -57,6 +59,16 @@ MODULE_AUTHOR("Tom Tucker");
MODULE_DESCRIPTION("iWARP CM");
MODULE_LICENSE("Dual BSD/GPL");
+static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
+ [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+ [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+ [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
+ [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+ [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+};
+
static struct workqueue_struct *iwcm_wq;
struct iwcm_work {
struct work_struct work;
@@ -402,6 +414,11 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ if (cm_id->mapped) {
+ iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+ iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+ }
+
(void)iwcm_deref_id(cm_id_priv);
}
@@ -426,6 +443,97 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
}
EXPORT_SYMBOL(iw_destroy_cm_id);
+/**
+ * iw_cm_check_wildcard - If IP address is 0 then use original
+ * @pm_addr: sockaddr containing the ip to check for wildcard
+ * @cm_addr: sockaddr containing the actual IP address
+ * @cm_outaddr: sockaddr to set IP addr which leaving port
+ *
+ * Checks the pm_addr for wildcard and then sets cm_outaddr's
+ * IP to the actual (cm_addr).
+ */
+static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
+ struct sockaddr_storage *cm_addr,
+ struct sockaddr_storage *cm_outaddr)
+{
+ if (pm_addr->ss_family == AF_INET) {
+ struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
+
+ if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
+ struct sockaddr_in *cm4_addr =
+ (struct sockaddr_in *)cm_addr;
+ struct sockaddr_in *cm4_outaddr =
+ (struct sockaddr_in *)cm_outaddr;
+
+ cm4_outaddr->sin_addr = cm4_addr->sin_addr;
+ }
+ } else {
+ struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
+
+ if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
+ struct sockaddr_in6 *cm6_addr =
+ (struct sockaddr_in6 *)cm_addr;
+ struct sockaddr_in6 *cm6_outaddr =
+ (struct sockaddr_in6 *)cm_outaddr;
+
+ cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
+ }
+ }
+}
+
+/**
+ * iw_cm_map - Use portmapper to map the ports
+ * @cm_id: connection manager pointer
+ * @active: Indicates the active side when true
+ * returns nonzero for error only if iwpm_create_mapinfo() fails
+ *
+ * Tries to add a mapping for a port using the Portmapper. If
+ * successful in mapping the IP/Port it will check the remote
+ * mapped IP address for a wildcard IP address and replace the
+ * zero IP address with the remote_addr.
+ */
+static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
+{
+ struct iwpm_dev_data pm_reg_msg;
+ struct iwpm_sa_data pm_msg;
+ int status;
+
+ cm_id->m_local_addr = cm_id->local_addr;
+ cm_id->m_remote_addr = cm_id->remote_addr;
+
+ memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+ sizeof(pm_reg_msg.dev_name));
+ memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
+ sizeof(pm_reg_msg.if_name));
+
+ if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
+ !iwpm_valid_pid())
+ return 0;
+
+ cm_id->mapped = true;
+ pm_msg.loc_addr = cm_id->local_addr;
+ pm_msg.rem_addr = cm_id->remote_addr;
+ if (active)
+ status = iwpm_add_and_query_mapping(&pm_msg,
+ RDMA_NL_IWCM);
+ else
+ status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
+
+ if (!status) {
+ cm_id->m_local_addr = pm_msg.mapped_loc_addr;
+ if (active) {
+ cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
+ iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
+ &cm_id->remote_addr,
+ &cm_id->m_remote_addr);
+ }
+ }
+
+ return iwpm_create_mapinfo(&cm_id->local_addr,
+ &cm_id->m_local_addr,
+ RDMA_NL_IWCM);
+}
+
/*
* CM_ID <-- LISTEN
*
@@ -452,7 +560,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
case IW_CM_STATE_IDLE:
cm_id_priv->state = IW_CM_STATE_LISTEN;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+ ret = iw_cm_map(cm_id, false);
+ if (!ret)
+ ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
if (ret)
cm_id_priv->state = IW_CM_STATE_IDLE;
spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -582,39 +692,37 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id_priv->state != IW_CM_STATE_IDLE) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
/* Get the ib_qp given the QPN */
qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
if (!qp) {
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
cm_id->device->iwcm->add_ref(qp);
cm_id_priv->qp = qp;
cm_id_priv->state = IW_CM_STATE_CONN_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- ret = cm_id->device->iwcm->connect(cm_id, iw_param);
- if (ret) {
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- if (cm_id_priv->qp) {
- cm_id->device->iwcm->rem_ref(qp);
- cm_id_priv->qp = NULL;
- }
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
- BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
- cm_id_priv->state = IW_CM_STATE_IDLE;
- clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
- wake_up_all(&cm_id_priv->connect_wait);
- }
+ ret = iw_cm_map(cm_id, true);
+ if (!ret)
+ ret = cm_id->device->iwcm->connect(cm_id, iw_param);
+ if (!ret)
+ return 0; /* success */
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+ if (cm_id_priv->qp) {
+ cm_id->device->iwcm->rem_ref(qp);
+ cm_id_priv->qp = NULL;
+ }
+ cm_id_priv->state = IW_CM_STATE_IDLE;
+err:
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+ wake_up_all(&cm_id_priv->connect_wait);
return ret;
}
EXPORT_SYMBOL(iw_cm_connect);
@@ -656,8 +764,23 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
goto out;
cm_id->provider_data = iw_event->provider_data;
- cm_id->local_addr = iw_event->local_addr;
- cm_id->remote_addr = iw_event->remote_addr;
+ cm_id->m_local_addr = iw_event->local_addr;
+ cm_id->m_remote_addr = iw_event->remote_addr;
+ cm_id->local_addr = listen_id_priv->id.local_addr;
+
+ ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
+ &iw_event->remote_addr,
+ &cm_id->remote_addr,
+ RDMA_NL_IWCM);
+ if (ret) {
+ cm_id->remote_addr = iw_event->remote_addr;
+ } else {
+ iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
+ &iw_event->local_addr,
+ &cm_id->local_addr);
+ iw_event->local_addr = cm_id->local_addr;
+ iw_event->remote_addr = cm_id->remote_addr;
+ }
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
cm_id_priv->state = IW_CM_STATE_CONN_RECV;
@@ -753,8 +876,10 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
if (iw_event->status == 0) {
- cm_id_priv->id.local_addr = iw_event->local_addr;
- cm_id_priv->id.remote_addr = iw_event->remote_addr;
+ cm_id_priv->id.m_local_addr = iw_event->local_addr;
+ cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
+ iw_event->local_addr = cm_id_priv->id.local_addr;
+ iw_event->remote_addr = cm_id_priv->id.remote_addr;
cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
} else {
/* REJECTED or RESET */
@@ -1044,6 +1169,17 @@ EXPORT_SYMBOL(iw_cm_init_qp_attr);
static int __init iw_cm_init(void)
{
+ int ret;
+
+ ret = iwpm_init(RDMA_NL_IWCM);
+ if (ret)
+ pr_err("iw_cm: couldn't init iwpm\n");
+
+ ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
+ iwcm_nl_cb_table);
+ if (ret)
+ pr_err("iw_cm: couldn't register netlink callbacks\n");
+
iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
if (!iwcm_wq)
return -ENOMEM;
@@ -1063,6 +1199,8 @@ static void __exit iw_cm_cleanup(void)
{
unregister_net_sysctl_table(iwcm_ctl_table_hdr);
destroy_workqueue(iwcm_wq);
+ ibnl_remove_client(RDMA_NL_IWCM);
+ iwpm_exit(RDMA_NL_IWCM);
}
module_init(iw_cm_init);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 22a3abee2a54..43e3fa27102b 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -88,8 +88,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
if (ret)
goto pid_query_error;
- ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,
- pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
+ ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
+ pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
if (ret)
goto pid_query_error;
ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
@@ -394,7 +394,7 @@ register_pid_response_exit:
/* always for found nlmsg_request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_register_pid_cb);
@@ -463,7 +463,7 @@ add_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_add_mapping_cb);
@@ -555,7 +555,7 @@ query_mapping_response_exit:
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
@@ -749,7 +749,7 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
/* always for found request */
kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
barrier();
- wake_up(&nlmsg_request->waitq);
+ up(&nlmsg_request->sem);
return 0;
}
EXPORT_SYMBOL(iwpm_mapping_error_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 5fb089e91353..9b2bf2fb2b00 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -254,9 +254,9 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
}
int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
- struct sockaddr_storage *mapped_rem_addr,
- struct sockaddr_storage *remote_addr,
- u8 nl_client)
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr,
+ u8 nl_client)
{
struct hlist_node *tmp_hlist_node;
struct hlist_head *hash_bucket_head;
@@ -322,6 +322,8 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
nlmsg_request->nl_client = nl_client;
nlmsg_request->request_done = 0;
nlmsg_request->err_code = 0;
+ sema_init(&nlmsg_request->sem, 1);
+ down(&nlmsg_request->sem);
return nlmsg_request;
}
@@ -364,11 +366,9 @@ struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
{
int ret;
- init_waitqueue_head(&nlmsg_request->waitq);
- ret = wait_event_timeout(nlmsg_request->waitq,
- (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);
- if (!ret) {
+ ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT);
+ if (ret) {
ret = -EINVAL;
pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
__func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index b7b9e194ce81..af1fc14a0d3d 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -69,7 +69,7 @@ struct iwpm_nlmsg_request {
u8 nl_client;
u8 request_done;
u16 err_code;
- wait_queue_head_t waitq;
+ struct semaphore sem;
struct kref kref;
};
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c
index 1b65986c0be3..19b1ee3279b4 100644
--- a/drivers/infiniband/core/packer.c
+++ b/drivers/infiniband/core/packer.c
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset));
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
return 0;
}
}
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default:
- printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+ pr_warn("Field size %d bits not handled\n", size * 8);
}
}
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else {
if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) {
- printk(KERN_WARNING "Structure field %s of size %d "
- "bits is not byte-aligned\n",
- desc[i].field_name, desc[i].size_bits);
+ pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+ desc[i].field_name, desc[i].size_bits);
}
memcpy(structure + desc[i].struct_offset_bytes,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1e37f3515d98..8a09c0fb268d 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
struct ib_ah_attr ah_attr;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
- printk(KERN_WARNING "Couldn't query port\n");
+ pr_warn("Couldn't query port\n");
return;
}
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
if (!new_ah) {
- printk(KERN_WARNING "Couldn't allocate new SM AH\n");
return;
}
@@ -880,16 +879,21 @@ static void update_sm_ah(struct work_struct *work)
new_ah->pkey_index = 0;
if (ib_find_pkey(port->agent->device, port->port_num,
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
- printk(KERN_ERR "Couldn't find index for default PKey\n");
+ pr_err("Couldn't find index for default PKey\n");
memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid;
ah_attr.sl = port_attr.sm_sl;
ah_attr.port_num = port->port_num;
+ if (port_attr.grh_required) {
+ ah_attr.ah_flags = IB_AH_GRH;
+ ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
+ ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
+ }
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
if (IS_ERR(new_ah->ah)) {
- printk(KERN_WARNING "Couldn't create new SM AH\n");
+ pr_warn("Couldn't create new SM AH\n");
kfree(new_ah);
return;
}
@@ -1221,7 +1225,7 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
rec.net = NULL;
rec.ifindex = 0;
rec.gid_type = IB_GID_TYPE_IB;
- memset(rec.dmac, 0, ETH_ALEN);
+ eth_zero_addr(rec.dmac);
query->callback(status, &rec, query->context);
} else
query->callback(status, NULL, query->context);
@@ -1800,13 +1804,13 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client);
if (ret) {
- printk(KERN_ERR "Couldn't register ib_sa client\n");
+ pr_err("Couldn't register ib_sa client\n");
goto err1;
}
ret = mcast_init();
if (ret) {
- printk(KERN_ERR "Couldn't initialize multicast handling\n");
+ pr_err("Couldn't initialize multicast handling\n");
goto err2;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 6b4e8a008bc0..4a9aa0433b07 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ pr_err("ucm: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void)
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm");
if (ret) {
- printk(KERN_ERR "ucm: couldn't register device number\n");
+ pr_err("ucm: couldn't register device number\n");
goto error1;
}
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
+ pr_err("ucm: couldn't create abi_version attribute\n");
goto error2;
}
ret = ib_register_client(&ucm_client);
if (ret) {
- printk(KERN_ERR "ucm: couldn't register client\n");
+ pr_err("ucm: couldn't register client\n");
goto error3;
}
return 0;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 8b5a934e1133..dd3bcceadfde 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
}
}
if (!event_found)
- printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+ pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
}
static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -1716,13 +1716,13 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) {
- printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+ pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1;
}
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) {
- printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+ pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM;
goto err2;
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 2116132568e7..29a45d2f8898 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) {
- printk(KERN_WARNING "Invalid LRH.link_version %d\n",
- header->lrh.link_version);
+ pr_warn("Invalid LRH.link_version %d\n",
+ header->lrh.link_version);
return -EINVAL;
}
@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) {
- printk(KERN_WARNING "Invalid GRH.ip_version %d\n",
- header->grh.ip_version);
+ pr_warn("Invalid GRH.ip_version %d\n",
+ header->grh.ip_version);
return -EINVAL;
}
if (header->grh.next_header != 0x1b) {
- printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n",
- header->grh.next_header);
+ pr_warn("Invalid GRH.next_header 0x%02x\n",
+ header->grh.next_header);
return -EINVAL;
}
break;
default:
- printk(KERN_WARNING "Invalid LRH.link_next_header %d\n",
- header->lrh.link_next_header);
+ pr_warn("Invalid LRH.link_next_header %d\n",
+ header->lrh.link_next_header);
return -EINVAL;
}
@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf,
header->immediate_present = 1;
break;
default:
- printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n",
- header->bth.opcode);
+ pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
return -EINVAL;
}
if (header->bth.transport_header_version != 0) {
- printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n",
- header->bth.transport_header_version);
+ pr_warn("Invalid BTH.transport_header_version %d\n",
+ header->bth.transport_header_version);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 38acb3cfc545..fe4d2e1a8b58 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -188,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
1, !umem->writable, page_list, vma_list);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e69bf266049d..75077a018675 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -572,10 +572,10 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
* complex (and doesn't gain us much performance in most use
* cases).
*/
- npages = get_user_pages(owning_process, owning_mm, user_virt,
- gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
+ npages = get_user_pages_remote(owning_process, owning_mm,
+ user_virt, gup_num_pages,
+ access_mask & ODP_WRITE_ALLOWED_BIT,
+ 0, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6c6fbff19752..6fdc7ecdaca0 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -402,7 +402,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
- resp->device_cap_flags = attr->device_cap_flags;
+ resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
@@ -1174,6 +1174,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
+ struct ib_udata udata;
int ret;
if (out_len < sizeof(resp))
@@ -1195,7 +1196,12 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
goto err_free;
}
- mw = pd->device->alloc_mw(pd, cmd.mw_type);
+ INIT_UDATA(&udata, buf + sizeof(cmd),
+ (unsigned long)cmd.response + sizeof(resp),
+ in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+ out_len - sizeof(resp));
+
+ mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
goto err_put;
@@ -3086,6 +3092,14 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
return -EPERM;
+ if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
+ return -EINVAL;
+
+ if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
+ (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL;
@@ -3586,9 +3600,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
- struct ib_uverbs_ex_query_device_resp resp;
+ struct ib_uverbs_ex_query_device_resp resp = { {0} };
struct ib_uverbs_ex_query_device cmd;
- struct ib_device_attr attr;
+ struct ib_device_attr attr = {0};
int err;
if (ucore->inlen < sizeof(cmd))
@@ -3609,14 +3623,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
if (ucore->outlen < resp.response_length)
return -ENOSPC;
- memset(&attr, 0, sizeof(attr));
-
err = ib_dev->query_device(ib_dev, &attr, uhw);
if (err)
return err;
copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
- resp.comp_mask = 0;
if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
goto end;
@@ -3629,9 +3640,6 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
- resp.odp_caps.reserved = 0;
-#else
- memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
#endif
resp.response_length += sizeof(resp.odp_caps);
@@ -3649,8 +3657,5 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
- if (err)
- return err;
-
- return 0;
+ return err;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 39680aed99dd..28ba2cc81535 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -683,12 +683,28 @@ out:
return ev_file;
}
+static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
+{
+ u64 mask;
+
+ if (command <= IB_USER_VERBS_CMD_OPEN_QP)
+ mask = ib_dev->uverbs_cmd_mask;
+ else
+ mask = ib_dev->uverbs_ex_cmd_mask;
+
+ if (mask & ((u64)1 << command))
+ return 0;
+
+ return -1;
+}
+
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
+ __u32 command;
__u32 flags;
int srcu_key;
ssize_t ret;
@@ -707,37 +723,34 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+ IB_USER_VERBS_CMD_COMMAND_MASK)) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!flags) {
- __u32 command;
+ command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ if (verify_command_mask(ib_dev, command)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!file->ucontext &&
+ command != IB_USER_VERBS_CMD_GET_CONTEXT) {
+ ret = -EINVAL;
+ goto out;
+ }
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+ if (!flags) {
if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
- if (!file->ucontext &&
- command != IB_USER_VERBS_CMD_GET_CONTEXT) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (hdr.in_words * 4 != count) {
ret = -EINVAL;
goto out;
@@ -749,21 +762,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
hdr.out_words * 4);
} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
- __u32 command;
-
struct ib_uverbs_ex_cmd_hdr ex_hdr;
struct ib_udata ucore;
struct ib_udata uhw;
size_t written_count = count;
- if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
- IB_USER_VERBS_CMD_COMMAND_MASK)) {
- ret = -EINVAL;
- goto out;
- }
-
- command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
-
if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
@@ -775,11 +778,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
- ret = -ENOSYS;
- goto out;
- }
-
if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
ret = -EINVAL;
goto out;
@@ -1058,7 +1056,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ pr_err("user_verbs: couldn't register dynamic device number\n");
return ret;
}
}
@@ -1279,14 +1277,14 @@ static int __init ib_uverbs_init(void)
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs");
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register device number\n");
+ pr_err("user_verbs: couldn't register device number\n");
goto out;
}
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
- printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
+ pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev;
}
@@ -1294,13 +1292,13 @@ static int __init ib_uverbs_init(void)
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
+ pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
}
ret = ib_register_client(&uverbs_client);
if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register client\n");
+ pr_err("user_verbs: couldn't register client\n");
goto out_class;
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 5af6d024e053..15b8adbf39c0 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1551,6 +1551,46 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
}
EXPORT_SYMBOL(ib_check_mr_status);
+int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+ int state)
+{
+ if (!device->set_vf_link_state)
+ return -ENOSYS;
+
+ return device->set_vf_link_state(device, vf, port, state);
+}
+EXPORT_SYMBOL(ib_set_vf_link_state);
+
+int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *info)
+{
+ if (!device->get_vf_config)
+ return -ENOSYS;
+
+ return device->get_vf_config(device, vf, port, info);
+}
+EXPORT_SYMBOL(ib_get_vf_config);
+
+int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_stats *stats)
+{
+ if (!device->get_vf_stats)
+ return -ENOSYS;
+
+ return device->get_vf_stats(device, vf, port, stats);
+}
+EXPORT_SYMBOL(ib_get_vf_stats);
+
+int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+ int type)
+{
+ if (!device->set_vf_guid)
+ return -ENOSYS;
+
+ return device->set_vf_guid(device, vf, port, guid, type);
+}
+EXPORT_SYMBOL(ib_set_vf_guid);
+
/**
* ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
* and set it the memory region.
@@ -1567,6 +1607,8 @@ EXPORT_SYMBOL(ib_check_mr_status);
* - The last sg element is allowed to have length less than page_size.
* - If sg_nents total byte length exceeds the mr max_num_sge * page_size
* then only max_num_sg entries will be mapped.
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
+ * constraints holds and the page_size argument is ignored.
*
* Returns the number of sg elements that were mapped to the memory region.
*
@@ -1657,3 +1699,167 @@ next_page:
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe sdrain;
+ struct ib_send_wr swr = {}, *bad_swr;
+ int ret;
+
+ if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_send(qp, &swr, &bad_swr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&sdrain.done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {}, *bad_rwr;
+ int ret;
+
+ if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
+ WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
+ "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ ret = ib_post_recv(qp, &rwr, &bad_rwr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ wait_for_completion(&rdrain.done);
+}
+
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and SQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_sq(struct ib_qp *qp)
+{
+ if (qp->device->drain_sq)
+ qp->device->drain_sq(qp);
+ else
+ __ib_drain_sq(qp);
+}
+EXPORT_SYMBOL(ib_drain_sq);
+
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ * application.
+ * @qp: queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that. Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and RQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+ if (qp->device->drain_rq)
+ qp->device->drain_rq(qp);
+ else
+ __ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_rq);
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ * application on both the RQ and SQ.
+ * @qp: queue pair to drain
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
+ * and completions.
+ *
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+ ib_drain_sq(qp);
+ ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index aded2a5cc2d5..c7ad0a4c8b15 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
+obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index f504ba73e5dc..d403231a4aff 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1877,7 +1877,7 @@ err:
static int is_loopback_dst(struct iw_cm_id *cm_id)
{
struct net_device *dev;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
if (!dev)
@@ -1892,10 +1892,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
- if (cm_id->remote_addr.ss_family != PF_INET) {
+ if (cm_id->m_remote_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto out;
}
@@ -1961,9 +1961,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = IPTOS_LOWDELAY;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
sizeof(ep->com.remote_addr));
/* send connect request to rnic */
@@ -1992,7 +1992,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
might_sleep();
- if (cm_id->local_addr.ss_family != PF_INET) {
+ if (cm_id->m_local_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto fail1;
}
@@ -2008,7 +2008,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
/*
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2734820d291b..42a7b8952d13 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -657,7 +657,8 @@ err:
return ERR_PTR(err);
}
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct iwch_dev *rhp;
struct iwch_pd *php;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index cd2ff5f9518a..651711370d55 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -302,7 +302,7 @@ void _c4iw_free_ep(struct kref *kref)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
@@ -314,12 +314,6 @@ void _c4iw_free_ep(struct kref *kref)
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
}
- if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
- print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
- iwpm_remove_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr);
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- }
kfree(ep);
}
@@ -455,7 +449,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -485,12 +479,19 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
int i;
+ u16 vlan = ep->l2t->vlan;
+ int nparams;
+
+ if (vlan == CPL_L2T_VLAN_NONE)
+ nparams = 8;
+ else
+ nparams = 9;
skb = get_skb(skb, flowclen, GFP_KERNEL);
flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
- FW_FLOWC_WR_NPARAMS_V(8));
+ FW_FLOWC_WR_NPARAMS_V(nparams));
flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
16)) | FW_WR_FLOWID_V(ep->hwtid));
@@ -511,9 +512,17 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
- /* Pad WR to 16 byte boundary */
- flowc->mnemval[8].mnemonic = 0;
- flowc->mnemval[8].val = 0;
+ if (nparams == 9) {
+ u16 pri;
+
+ pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[8].val = cpu_to_be32(pri);
+ } else {
+ /* Pad WR to 16 byte boundary */
+ flowc->mnemval[8].mnemonic = 0;
+ flowc->mnemval[8].val = 0;
+ }
for (i = 0; i < 9; i++) {
flowc->mnemval[i].r4[0] = 0;
flowc->mnemval[i].r4[1] = 0;
@@ -568,54 +577,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-/*
- * c4iw_form_pm_msg - Form a port mapper message with mapping info
- */
-static void c4iw_form_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
-{
- memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
- sizeof(ep->com.remote_addr));
-}
-
-/*
- * c4iw_form_reg_msg - Form a port mapper message with dev info
- */
-static void c4iw_form_reg_msg(struct c4iw_dev *dev,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
- IWPM_IFNAME_SIZE);
-}
-
-static void c4iw_record_pm_msg(struct c4iw_ep *ep,
- struct iwpm_sa_data *pm_msg)
-{
- memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
- sizeof(ep->com.mapped_remote_addr));
-}
-
-static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
-{
- int ret;
-
- print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
- print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
-
- ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
- &child_ep->com.mapped_remote_addr,
- &child_ep->com.remote_addr, RDMA_NL_C4IW);
- if (ret)
- PDBG("Unable to find remote peer addr info - err %d\n", ret);
-
- return ret;
-}
-
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts, int ipv6)
{
@@ -645,13 +606,13 @@ static int send_connect(struct c4iw_ep *ep)
int wscale;
int win, sizev4, sizev6, wrlen;
struct sockaddr_in *la = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in *ra = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ &ep->com.remote_addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
+ &ep->com.remote_addr;
int ret;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -710,7 +671,7 @@ static int send_connect(struct c4iw_ep *ep)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
@@ -1829,10 +1790,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
- sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+ sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
req->le.u.ipv4.lip = sin->sin_addr.s_addr;
- sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+ sin = (struct sockaddr_in *)&ep->com.remote_addr;
req->le.pport = sin->sin_port;
req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
@@ -1864,7 +1825,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
- DSCP_V(ep->tos) |
+ DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE_V(1) |
@@ -1928,7 +1889,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
struct dst_entry *dst, struct c4iw_dev *cdev,
- bool clear_mpa_v1, enum chip_type adapter_type)
+ bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
{
struct neighbour *n;
int err, step;
@@ -1958,7 +1919,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
goto out;
}
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
+ n, pdev, rt_tos2priority(tos));
if (!ep->l2t)
goto out;
ep->mtu = pdev->mtu;
@@ -2013,13 +1974,13 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
- &ep->com.cm_id->remote_addr;
+ &ep->com.cm_id->m_remote_addr;
int iptype;
__u8 *ra;
@@ -2038,10 +1999,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
/* find a route */
- if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
+ if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ raddr->sin_port, ep->com.cm_id->tos);
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
} else {
@@ -2058,7 +2019,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
goto fail3;
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
- ep->com.dev->rdev.lldi.adapter_type);
+ ep->com.dev->rdev.lldi.adapter_type,
+ ep->com.cm_id->tos);
if (err) {
pr_err("%s - cannot alloc l2e.\n", __func__);
goto fail4;
@@ -2069,7 +2031,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = ep->com.cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -2109,10 +2071,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in6 *ra6;
ep = lookup_atid(t, atid);
- la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
+ la = (struct sockaddr_in *)&ep->com.local_addr;
+ ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
@@ -2154,7 +2116,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)
@@ -2189,7 +2151,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -2391,6 +2353,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
u16 peer_mss = ntohs(req->tcpopt.mss);
int iptype;
unsigned short hdrs;
+ u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
parent_ep = lookup_stid(t, stid);
if (!parent_ep) {
@@ -2399,8 +2362,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
if (state_read(&parent_ep->com) != LISTEN) {
- printk(KERN_ERR "%s - listening ep not in LISTEN\n",
- __func__);
+ PDBG("%s - listening ep not in LISTEN\n", __func__);
goto reject;
}
@@ -2415,7 +2377,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ tos);
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
@@ -2441,7 +2403,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
- parent_ep->com.dev->rdev.lldi.adapter_type);
+ parent_ep->com.dev->rdev.lldi.adapter_type, tos);
if (err) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -2459,18 +2421,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
- /*
- * The mapped_local and mapped_remote addresses get setup with
- * the actual 4-tuple. The local address will be based on the
- * actual local address of the connection, but on the port number
- * of the parent listening endpoint. The remote address is
- * setup based on a query to the IWPM since we don't know what it
- * originally was before mapping. If no mapping was done, then
- * mapped_remote == remote, and mapped_local == local.
- */
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
- &child_ep->com.mapped_local_addr;
+ &child_ep->com.local_addr;
sin->sin_family = PF_INET;
sin->sin_port = local_port;
@@ -2482,12 +2435,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
- sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
+ sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
sin->sin_family = PF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
@@ -2498,18 +2451,15 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin6_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
- memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
- sizeof(child_ep->com.remote_addr));
- get_remote_addr(parent_ep, child_ep);
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
- child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ child_ep->tos = tos;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@@ -2522,7 +2472,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
accept_cr(child_ep, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
if (iptype == 6) {
- sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -2765,7 +2715,7 @@ out:
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
cxgb4_clip_release(
ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr,
@@ -3026,8 +2976,8 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
struct in_device *ind;
int found = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
@@ -3072,8 +3022,8 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
struct in6_addr uninitialized_var(addr);
- struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
- struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
memcpy(la6->sin6_addr.s6_addr, &addr, 16);
@@ -3092,11 +3042,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
__u8 *ra;
int iptype;
- int iwpm_err = 0;
if ((conn_param->ord > cur_max_read_depth(dev)) ||
(conn_param->ird > cur_max_read_depth(dev))) {
@@ -3144,47 +3091,17 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
- memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
sizeof(ep->com.remote_addr));
- /* No port mapper available, go with the specified peer information */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
- memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
- sizeof(ep->com.mapped_remote_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- c4iw_form_pm_msg(ep, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- c4iw_record_pm_msg(ep, &pm_msg);
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
- err = -ENOMEM;
- goto fail1;
- }
- print_addr(&ep->com, __func__, "add_query/create_mapinfo");
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
-
- laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
- raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
- laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
- raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
+ laddr = (struct sockaddr_in *)&ep->com.local_addr;
+ raddr = (struct sockaddr_in *)&ep->com.remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
- if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (cm_id->m_remote_addr.ss_family == AF_INET) {
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
@@ -3203,7 +3120,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ra, ntohs(raddr->sin_port));
ep->dst = find_route(dev, laddr->sin_addr.s_addr,
raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, 0);
+ raddr->sin_port, cm_id->tos);
} else {
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3234,7 +3151,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
- ep->com.dev->rdev.lldi.adapter_type);
+ ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
goto fail3;
@@ -3245,7 +3162,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->l2t->idx);
state_set(&ep->com, CONNECTING);
- ep->tos = 0;
+ ep->tos = cm_id->tos;
/* send connect request to rnic */
err = send_connect(ep);
@@ -3269,7 +3186,7 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
@@ -3302,7 +3219,7 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
{
int err;
struct sockaddr_in *sin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.local_addr;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
do {
@@ -3343,9 +3260,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_listen_ep *ep;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
might_sleep();
@@ -3360,7 +3274,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
ep->com.cm_id = cm_id;
ep->com.dev = dev;
ep->backlog = backlog;
- memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
sizeof(ep->com.local_addr));
/*
@@ -3369,10 +3283,10 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
if (dev->rdev.lldi.enable_fw_ofld_conn &&
ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
else
ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
- cm_id->local_addr.ss_family, ep);
+ cm_id->m_local_addr.ss_family, ep);
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
@@ -3381,36 +3295,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
- /* No port mapper available, go with the specified info */
- memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
- sizeof(ep->com.mapped_local_addr));
-
- c4iw_form_reg_msg(dev, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
- if (iwpm_err) {
- PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
- __func__, iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
- sizeof(ep->com.local_addr));
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
- if (iwpm_err)
- PDBG("%s: Port Mapper query fail (err = %d).\n",
- __func__, iwpm_err);
- else
- memcpy(&ep->com.mapped_local_addr,
- &pm_msg.mapped_loc_addr,
- sizeof(ep->com.mapped_local_addr));
- }
- if (iwpm_create_mapinfo(&ep->com.local_addr,
- &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
- err = -ENOMEM;
- goto fail3;
- }
- print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
+ memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
+ sizeof(ep->com.local_addr));
- set_bit(RELEASE_MAPINFO, &ep->com.flags);
state_set(&ep->com, LISTEN);
if (ep->com.local_addr.ss_family == AF_INET)
err = create_server4(dev, ep);
@@ -3421,7 +3308,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto out;
}
-fail3:
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
@@ -3456,7 +3342,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
0, 0, __func__);
- sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
@@ -3580,7 +3466,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
- (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+ (struct sockaddr_in6 *)&ep->com.local_addr;
cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
(const u32 *)&sin6->sin6_addr.s6_addr, 1);
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index cf21df4a8bf5..b4eeb783573c 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq)
+ if (wq) {
+ if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
+ if (t4_sq_empty(wq))
+ complete(&qhp->sq_drained);
+ if (t4_rq_empty(wq))
+ complete(&qhp->rq_drained);
+ }
spin_unlock(&qhp->lock);
+ }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8024ea4417b8..ae2e8b23d2dd 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -87,17 +87,6 @@ struct c4iw_debugfs_data {
int pos;
};
-/* registered cxgb4 netlink callbacks */
-static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
static int count_idrs(int id, void *p, void *data)
{
int *countp = data;
@@ -242,13 +231,13 @@ static int dump_qp(int id, void *p, void *data)
if (qp->ep) {
if (qp->ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &qp->ep->com.local_addr;
+ &qp->ep->com.cm_id->local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
- &qp->ep->com.remote_addr;
+ &qp->ep->com.cm_id->remote_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_local_addr;
+ &qp->ep->com.cm_id->m_local_addr;
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &qp->ep->com.mapped_remote_addr;
+ &qp->ep->com.cm_id->m_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
@@ -264,15 +253,15 @@ static int dump_qp(int id, void *p, void *data)
ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.local_addr;
+ &qp->ep->com.cm_id->local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &qp->ep->com.remote_addr;
+ &qp->ep->com.cm_id->remote_addr;
struct sockaddr_in6 *mapped_lsin6 =
(struct sockaddr_in6 *)
- &qp->ep->com.mapped_local_addr;
+ &qp->ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *mapped_rsin6 =
(struct sockaddr_in6 *)
- &qp->ep->com.mapped_remote_addr;
+ &qp->ep->com.cm_id->m_remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
@@ -545,13 +534,13 @@ static int dump_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in *rsin = (struct sockaddr_in *)
- &ep->com.remote_addr;
+ &ep->com.cm_id->remote_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
- &ep->com.mapped_remote_addr;
+ &ep->com.cm_id->m_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -569,13 +558,13 @@ static int dump_ep(int id, void *p, void *data)
ntohs(mapped_rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
- &ep->com.remote_addr;
+ &ep->com.cm_id->remote_addr;
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_remote_addr;
+ &ep->com.cm_id->m_remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -610,9 +599,9 @@ static int dump_listen_ep(int id, void *p, void *data)
if (ep->com.local_addr.ss_family == AF_INET) {
struct sockaddr_in *lsin = (struct sockaddr_in *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -623,9 +612,9 @@ static int dump_listen_ep(int id, void *p, void *data)
ntohs(mapped_lsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
- &ep->com.local_addr;
+ &ep->com.cm_id->local_addr;
struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
- &ep->com.mapped_local_addr;
+ &ep->com.cm_id->m_local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -801,10 +790,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
+ PDBG("udb %pR db_reg %p gts_reg %p "
"qpmask 0x%x cqmask 0x%x\n",
- (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (void *)pci_resource_start(rdev->lldi.pdev, 2),
+ &rdev->lldi.pdev->resource[2],
rdev->lldi.db_reg, rdev->lldi.gts_reg,
rdev->qpmask, rdev->cqmask);
@@ -1506,20 +1494,6 @@ static int __init c4iw_init_module(void)
printk(KERN_WARNING MOD
"could not create debugfs entry, continuing\n");
- if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
- c4iw_nl_cb_table))
- pr_err("%s[%u]: Failed to add netlink callback\n"
- , __func__, __LINE__);
-
- err = iwpm_init(RDMA_NL_C4IW);
- if (err) {
- pr_err("port mapper initialization failed with %d\n", err);
- ibnl_remove_client(RDMA_NL_C4IW);
- c4iw_cm_term();
- debugfs_remove_recursive(c4iw_debugfs_root);
- return err;
- }
-
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
return 0;
@@ -1537,8 +1511,6 @@ static void __exit c4iw_exit_module(void)
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
- iwpm_exit(RDMA_NL_C4IW);
- ibnl_remove_client(RDMA_NL_C4IW);
c4iw_cm_term();
debugfs_remove_recursive(c4iw_debugfs_root);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index fb2de75a0392..df43f871ab61 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -476,6 +476,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
+ struct completion rq_drained;
+ struct completion sq_drained;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -753,7 +755,6 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
- RELEASE_MAPINFO = 6,
};
enum c4iw_ep_history {
@@ -790,8 +791,6 @@ struct c4iw_ep_common {
struct mutex mutex;
struct sockaddr_storage local_addr;
struct sockaddr_storage remote_addr;
- struct sockaddr_storage mapped_local_addr;
- struct sockaddr_storage mapped_remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
unsigned long history;
@@ -843,45 +842,6 @@ struct c4iw_ep {
struct c4iw_ep_stats stats;
};
-static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
- const char *msg)
-{
-
-#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
-#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
-#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
-#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
-
- if (c4iw_debug) {
- switch (epc->local_addr.ss_family) {
- case AF_INET:
- PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
- func, msg, SINA(&epc->local_addr),
- SINP(&epc->local_addr),
- SINP(&epc->mapped_local_addr),
- SINA(&epc->remote_addr),
- SINP(&epc->remote_addr),
- SINP(&epc->mapped_remote_addr));
- break;
- case AF_INET6:
- PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
- func, msg, SIN6A(&epc->local_addr),
- SIN6P(&epc->local_addr),
- SIN6P(&epc->mapped_local_addr),
- SIN6A(&epc->remote_addr),
- SIN6P(&epc->remote_addr),
- SIN6P(&epc->mapped_remote_addr));
- break;
- default:
- break;
- }
- }
-#undef SINA
-#undef SINP
-#undef SIN6A
-#undef SIN6P
-}
-
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
{
return cm_id->provider_data;
@@ -961,7 +921,8 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr,
struct scatterlist *sg,
int sg_nents);
int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);
@@ -1016,6 +977,8 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
+void c4iw_drain_rq(struct ib_qp *qp);
+void c4iw_drain_sq(struct ib_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 7849890c4781..008be07d5604 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -34,6 +34,7 @@
#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
+#include <rdma/ib_user_verbs.h>
#include "iw_cxgb4.h"
@@ -552,7 +553,8 @@ err:
return ERR_PTR(err);
}
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct c4iw_dev *rhp;
struct c4iw_pd *php;
@@ -617,12 +619,14 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
int ret = 0;
int length = roundup(max_num_sg * sizeof(u64), 32);
+ php = to_c4iw_pd(pd);
+ rhp = php->rhp;
+
if (mr_type != IB_MR_TYPE_MEM_REG ||
- max_num_sg > t4_max_fr_depth(use_dsgl))
+ max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+ use_dsgl))
return ERR_PTR(-EINVAL);
- php = to_c4iw_pd(pd);
- rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index ec04272fbdc2..124682dc5709 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -339,7 +339,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
- props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
+ props->max_fast_reg_page_list_len =
+ t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
return 0;
}
@@ -564,6 +565,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.get_protocol_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
+ dev->ibdev.drain_sq = c4iw_drain_sq;
+ dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e99345eb875a..e17fb5d5e033 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -606,7 +606,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
}
static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_reg_wr *wr, u8 *len16, u8 t5dev)
+ struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
{
struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
struct fw_ri_immd *imdp;
@@ -615,7 +615,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
int rem;
- if (mhp->mpl_len > t4_max_fr_depth(use_dsgl))
+ if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
return -EINVAL;
wqe->fr.qpbinde_to_dcacpu = 0;
@@ -629,7 +629,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
0xffffffff);
- if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
struct fw_ri_dsgl *sglp;
for (i = 0; i < mhp->mpl_len; i++)
@@ -808,9 +808,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
- is_t5(
- qhp->rhp->rdev.lldi.adapter_type) ?
- 1 : 0);
+ qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -1621,7 +1619,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
unsigned int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
- struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
+ struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
+ struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
PDBG("%s ib_pd %p\n", __func__, pd);
@@ -1697,6 +1696,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
+ init_completion(&qhp->sq_drained);
+ init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
@@ -1706,29 +1707,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
goto err2;
if (udata) {
- mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
- if (!mm1) {
+ sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
+ if (!sq_key_mm) {
ret = -ENOMEM;
goto err3;
}
- mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
- if (!mm2) {
+ rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+ if (!rq_key_mm) {
ret = -ENOMEM;
goto err4;
}
- mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
- if (!mm3) {
+ sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
+ if (!sq_db_key_mm) {
ret = -ENOMEM;
goto err5;
}
- mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
- if (!mm4) {
+ rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+ if (!rq_db_key_mm) {
ret = -ENOMEM;
goto err6;
}
if (t4_sq_onchip(&qhp->wq.sq)) {
- mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
- if (!mm5) {
+ ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
+ GFP_KERNEL);
+ if (!ma_sync_key_mm) {
ret = -ENOMEM;
goto err7;
}
@@ -1743,7 +1745,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
uresp.rq_size = qhp->wq.rq.size;
uresp.rq_memsize = qhp->wq.rq.memsize;
spin_lock(&ucontext->mmap_lock);
- if (mm5) {
+ if (ma_sync_key_mm) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
} else {
@@ -1761,28 +1763,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
if (ret)
goto err8;
- mm1->key = uresp.sq_key;
- mm1->addr = qhp->wq.sq.phys_addr;
- mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
- insert_mmap(ucontext, mm1);
- mm2->key = uresp.rq_key;
- mm2->addr = virt_to_phys(qhp->wq.rq.queue);
- mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
- insert_mmap(ucontext, mm2);
- mm3->key = uresp.sq_db_gts_key;
- mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
- mm3->len = PAGE_SIZE;
- insert_mmap(ucontext, mm3);
- mm4->key = uresp.rq_db_gts_key;
- mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
- mm4->len = PAGE_SIZE;
- insert_mmap(ucontext, mm4);
- if (mm5) {
- mm5->key = uresp.ma_sync_key;
- mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
- + PCIE_MA_SYNC_A) & PAGE_MASK;
- mm5->len = PAGE_SIZE;
- insert_mmap(ucontext, mm5);
+ sq_key_mm->key = uresp.sq_key;
+ sq_key_mm->addr = qhp->wq.sq.phys_addr;
+ sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+ insert_mmap(ucontext, sq_key_mm);
+ rq_key_mm->key = uresp.rq_key;
+ rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+ rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+ insert_mmap(ucontext, rq_key_mm);
+ sq_db_key_mm->key = uresp.sq_db_gts_key;
+ sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+ sq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, sq_db_key_mm);
+ rq_db_key_mm->key = uresp.rq_db_gts_key;
+ rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+ rq_db_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, rq_db_key_mm);
+ if (ma_sync_key_mm) {
+ ma_sync_key_mm->key = uresp.ma_sync_key;
+ ma_sync_key_mm->addr =
+ (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
+ PCIE_MA_SYNC_A) & PAGE_MASK;
+ ma_sync_key_mm->len = PAGE_SIZE;
+ insert_mmap(ucontext, ma_sync_key_mm);
}
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
@@ -1795,15 +1798,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
err8:
- kfree(mm5);
+ kfree(ma_sync_key_mm);
err7:
- kfree(mm4);
+ kfree(rq_db_key_mm);
err6:
- kfree(mm3);
+ kfree(sq_db_key_mm);
err5:
- kfree(mm2);
+ kfree(rq_key_mm);
err4:
- kfree(mm1);
+ kfree(sq_key_mm);
err3:
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
err2:
@@ -1888,3 +1891,17 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
+
+void c4iw_drain_sq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+ wait_for_completion(&qp->sq_drained);
+}
+
+void c4iw_drain_rq(struct ib_qp *ibqp)
+{
+ struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+ wait_for_completion(&qp->rq_drained);
+}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 343e8daf2270..1e26669793c3 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -753,103 +753,4 @@ struct fw_ri_wr {
#define FW_RI_WR_P2PTYPE_G(x) \
(((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
-struct tcp_options {
- __be16 mss;
- __u8 wsf;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8:4;
- __u8 unknown:1;
- __u8:1;
- __u8 sack:1;
- __u8 tstamp:1;
-#else
- __u8 tstamp:1;
- __u8 sack:1;
- __u8:1;
- __u8 unknown:1;
- __u8:4;
-#endif
-};
-
-struct cpl_pass_accept_req {
- union opcode_tid ot;
- __be16 rsvd;
- __be16 len;
- __be32 hdr_len;
- __be16 vlan;
- __be16 l2info;
- __be32 tos_stid;
- struct tcp_options tcpopt;
-};
-
-/* cpl_pass_accept_req.hdr_len fields */
-#define SYN_RX_CHAN_S 0
-#define SYN_RX_CHAN_M 0xF
-#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
-#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
-
-#define TCP_HDR_LEN_S 10
-#define TCP_HDR_LEN_M 0x3F
-#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
-#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
-
-#define IP_HDR_LEN_S 16
-#define IP_HDR_LEN_M 0x3FF
-#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
-#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
-
-#define ETH_HDR_LEN_S 26
-#define ETH_HDR_LEN_M 0x1F
-#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
-#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
-
-/* cpl_pass_accept_req.l2info fields */
-#define SYN_MAC_IDX_S 0
-#define SYN_MAC_IDX_M 0x1FF
-#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
-#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
-
-#define SYN_XACT_MATCH_S 9
-#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
-#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
-
-#define SYN_INTF_S 12
-#define SYN_INTF_M 0xF
-#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
-#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
-
-struct ulptx_idata {
- __be32 cmd_more;
- __be32 len;
-};
-
-#define ULPTX_NSGE_S 0
-#define ULPTX_NSGE_M 0xFFFF
-#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
-
-#define RX_DACK_MODE_S 29
-#define RX_DACK_MODE_M 0x3
-#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
-#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
-
-#define RX_DACK_CHANGE_S 31
-#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
-#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
-
-enum { /* TCP congestion control algorithms */
- CONG_ALG_RENO,
- CONG_ALG_TAHOE,
- CONG_ALG_NEWRENO,
- CONG_ALG_HIGHSPEED
-};
-
-#define CONG_CNTRL_S 14
-#define CONG_CNTRL_M 0x3
-#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
-#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
-
-#define T5_ISS_S 18
-#define T5_ISS_V(x) ((x) << T5_ISS_S)
-#define T5_ISS_F T5_ISS_V(1U)
-
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
new file mode 100644
index 000000000000..6e7d27a14061
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_I40IW
+ tristate "Intel(R) Ethernet X722 iWARP Driver"
+ depends on INET && I40E
+ select GENERIC_ALLOCATOR
+ ---help---
+ Intel(R) Ethernet X722 iWARP Driver
+ INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
new file mode 100644
index 000000000000..90068c03d217
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/Makefile
@@ -0,0 +1,9 @@
+ccflags-y := -Idrivers/net/ethernet/intel/i40e
+
+obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
+
+i40iw-objs :=\
+ i40iw_cm.o i40iw_ctrl.o \
+ i40iw_hmc.o i40iw_hw.o i40iw_main.o \
+ i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
+ i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
new file mode 100644
index 000000000000..819767681445
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -0,0 +1,570 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_IW_H
+#define I40IW_IW_H
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
+#include <crypto/hash.h>
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_d.h"
+#include "i40iw_hmc.h"
+
+#include <i40e_client.h>
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_ucontext.h"
+#include "i40iw_pble.h"
+#include "i40iw_verbs.h"
+#include "i40iw_cm.h"
+#include "i40iw_user.h"
+#include "i40iw_puda.h"
+
+#define I40IW_FW_VERSION 2
+#define I40IW_HW_VERSION 2
+
+#define I40IW_ARP_ADD 1
+#define I40IW_ARP_DELETE 2
+#define I40IW_ARP_RESOLVE 3
+
+#define I40IW_MACIP_ADD 1
+#define I40IW_MACIP_DELETE 2
+
+#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
+
+#define RX_BUF_SIZE (1536 + 8)
+#define IW_REG0_SIZE (4 * 1024)
+#define IW_TX_TIMEOUT (6 * HZ)
+#define IW_FIRST_QPN 1
+#define IW_SW_CONTEXT_ALIGN 1024
+
+#define MAX_DPC_ITERATIONS 128
+
+#define I40IW_EVENT_TIMEOUT 100000
+#define I40IW_VCHNL_EVENT_TIMEOUT 100000
+
+#define I40IW_NO_VLAN 0xffff
+#define I40IW_NO_QSET 0xffff
+
+/* access to mcast filter list */
+#define IW_ADD_MCAST false
+#define IW_DEL_MCAST true
+
+#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
+#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008
+#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010
+#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400
+#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IW_CFG_FPM_QP_COUNT 32768
+
+#define I40IW_MTU_TO_MSS 40
+#define I40IW_DEFAULT_MSS 1460
+
+struct i40iw_cqp_compl_info {
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ bool error;
+ u8 op_code;
+};
+
+#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
+
+#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
+
+#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
+
+struct i40iw_cqp_request {
+ struct cqp_commands_info info;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ atomic_t refcount;
+ void (*callback_fcn)(struct i40iw_cqp_request*, u32);
+ void *param;
+ struct i40iw_cqp_compl_info compl_info;
+ bool waiting;
+ bool request_done;
+ bool dynamic;
+};
+
+struct i40iw_cqp {
+ struct i40iw_sc_cqp sc_cqp;
+ spinlock_t req_lock; /*cqp request list */
+ wait_queue_head_t waitq;
+ struct i40iw_dma_mem sq;
+ struct i40iw_dma_mem host_ctx;
+ u64 *scratch_array;
+ struct i40iw_cqp_request *cqp_requests;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+};
+
+struct i40iw_device;
+
+struct i40iw_ccq {
+ struct i40iw_sc_cq sc_cq;
+ spinlock_t lock; /* ccq control */
+ wait_queue_head_t waitq;
+ struct i40iw_dma_mem mem_cq;
+ struct i40iw_dma_mem shadow_area;
+};
+
+struct i40iw_ceq {
+ struct i40iw_sc_ceq sc_ceq;
+ struct i40iw_dma_mem mem;
+ u32 irq;
+ u32 msix_idx;
+ struct i40iw_device *iwdev;
+ struct tasklet_struct dpc_tasklet;
+};
+
+struct i40iw_aeq {
+ struct i40iw_sc_aeq sc_aeq;
+ struct i40iw_dma_mem mem;
+};
+
+struct i40iw_arp_entry {
+ u32 ip_addr[4];
+ u8 mac_addr[ETH_ALEN];
+};
+
+enum init_completion_state {
+ INVALID_STATE = 0,
+ INITIAL_STATE,
+ CQP_CREATED,
+ HMC_OBJS_CREATED,
+ PBLE_CHUNK_MEM,
+ CCQ_CREATED,
+ AEQ_CREATED,
+ CEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED,
+ INET_NOTIFIER,
+ IP_ADDR_REGISTERED,
+ RDMA_DEV_REGISTERED
+};
+
+struct i40iw_msix_vector {
+ u32 idx;
+ u32 irq;
+ u32 cpu_affinity;
+ u32 ceq_id;
+};
+
+#define I40IW_MSIX_TABLE_SIZE 65
+
+struct virtchnl_work {
+ struct work_struct work;
+ union {
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_virtchnl_work_info work_info;
+ };
+};
+
+struct i40e_qvlist_info;
+
+struct i40iw_device {
+ struct i40iw_ib_device *iwibdev;
+ struct net_device *netdev;
+ wait_queue_head_t vchnl_waitq;
+ struct i40iw_sc_dev sc_dev;
+ struct i40iw_handler *hdl;
+ struct i40e_info *ldev;
+ struct i40e_client *client;
+ struct i40iw_hw hw;
+ struct i40iw_cm_core cm_core;
+ unsigned long *mem_resources;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_arps;
+ struct i40iw_qp **qp_table;
+ bool msix_shared;
+ u32 msix_count;
+ struct i40iw_msix_vector *iw_msixtbl;
+ struct i40e_qvlist_info *iw_qvlist;
+
+ struct i40iw_hmc_pble_rsrc *pble_rsrc;
+ struct i40iw_arp_entry *arp_table;
+ struct i40iw_cqp cqp;
+ struct i40iw_ccq ccq;
+ u32 ceqs_count;
+ struct i40iw_ceq *ceqlist;
+ struct i40iw_aeq aeq;
+ u32 arp_table_size;
+ u32 next_arp_index;
+ spinlock_t resource_lock; /* hw resource access */
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 of_device_registered;
+
+ u32 device_cap_flags;
+ unsigned long db_start;
+ u8 resource_profile;
+ u8 max_rdma_vfs;
+ u8 max_enabled_vfs;
+ u8 max_sge;
+ u8 iw_status;
+ u8 send_term_ok;
+ bool push_mode; /* Initialized from parameter passed to driver */
+
+ /* x710 specific */
+ struct mutex pbl_mutex;
+ struct tasklet_struct dpc_tasklet;
+ struct workqueue_struct *virtchnl_wq;
+ struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ struct i40iw_dma_mem obj_mem;
+ struct i40iw_dma_mem obj_next;
+ u8 *hmc_info_mem;
+ u32 sd_type;
+ struct workqueue_struct *param_wq;
+ atomic_t params_busy;
+ u32 mss;
+ enum init_completion_state init_state;
+ u16 mac_ip_table_idx;
+ atomic_t vchnl_msgs;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_pd;
+ u32 next_qp;
+ u32 next_cq;
+ u32 next_pd;
+ u32 max_mr_size;
+ u32 max_qp_wr;
+ u32 max_cqe;
+ u32 mr_stagmask;
+ u32 mpa_version;
+ bool dcb;
+};
+
+struct i40iw_ib_device {
+ struct ib_device ibdev;
+ struct i40iw_device *iwdev;
+};
+
+struct i40iw_handler {
+ struct list_head list;
+ struct i40e_client *client;
+ struct i40iw_device device;
+ struct i40e_info ldev;
+};
+
+/**
+ * to_iwdev - get device
+ * @ibdev: ib device
+ **/
+static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
+}
+
+/**
+ * to_ucontext - get user context
+ * @ibucontext: ib user context
+ **/
+static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
+}
+
+/**
+ * to_iwpd - get protection domain
+ * @ibpd: ib pd
+ **/
+static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct i40iw_pd, ibpd);
+}
+
+/**
+ * to_iwmr - get device memory region
+ * @ibdev: ib memory region
+ **/
+static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct i40iw_mr, ibmr);
+}
+
+/**
+ * to_iwmr_from_ibfmr - get device memory region
+ * @ibfmr: ib fmr
+ **/
+static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct i40iw_mr, ibfmr);
+}
+
+/**
+ * to_iwmw - get device memory window
+ * @ibmw: ib memory window
+ **/
+static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct i40iw_mr, ibmw);
+}
+
+/**
+ * to_iwcq - get completion queue
+ * @ibcq: ib cqdevice
+ **/
+static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct i40iw_cq, ibcq);
+}
+
+/**
+ * to_iwqp - get device qp
+ * @ibqp: ib qp
+ **/
+static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct i40iw_qp, ibqp);
+}
+
+/* i40iw.c */
+void i40iw_add_ref(struct ib_qp *);
+void i40iw_rem_ref(struct ib_qp *);
+struct ib_qp *i40iw_get_qp(struct ib_device *, int);
+
+void i40iw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_qp *qp);
+
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+ unsigned char *mac_addr,
+ __be32 *ip_addr,
+ bool ipv4,
+ u32 action);
+
+int i40iw_manage_apbvt(struct i40iw_device *iwdev,
+ u16 accel_local_port,
+ bool add_port);
+
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+
+/**
+ * i40iw_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 max_resources,
+ u32 *req_resource_num,
+ u32 *next)
+{
+ u32 resource_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+ resource_num = find_next_zero_bit(resource_array, max_resources, *next);
+ if (resource_num >= max_resources) {
+ resource_num = find_first_zero_bit(resource_array, max_resources);
+ if (resource_num >= max_resources) {
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+ return -EOVERFLOW;
+ }
+ }
+ set_bit(resource_num, resource_array);
+ *next = resource_num + 1;
+ if (*next == max_resources)
+ *next = 0;
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+ *req_resource_num = resource_num;
+
+ return 0;
+}
+
+/**
+ * i40iw_is_resource_allocated - detrmine if resource is
+ * allocated
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to check
+ **/
+static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 resource_num)
+{
+ bool bit_is_set;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+
+ bit_is_set = test_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+
+ return bit_is_set;
+}
+
+/**
+ * i40iw_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void i40iw_free_resource(struct i40iw_device *iwdev,
+ unsigned long *resource_array,
+ u32 resource_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->resource_lock, flags);
+ clear_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+}
+
+/**
+ * to_iwhdl - Get the handler from the device pointer
+ * @iwdev: device pointer
+ **/
+static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
+{
+ return container_of(iw_dev, struct i40iw_handler, device);
+}
+
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
+
+/**
+ * iw_init_resources -
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
+
+int i40iw_register_rdma_device(struct i40iw_device *iwdev);
+void i40iw_port_ibevent(struct i40iw_device *iwdev);
+int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn_worker(void *);
+int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
+ struct sk_buff *);
+
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request *cqp_request);
+enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+ u8 *mac_addr, u8 *mac_index);
+int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+ struct i40iw_modify_qp_info *info, bool wait);
+
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cminfo,
+ enum i40iw_quad_entry_type etype,
+ enum i40iw_quad_hash_manage_type mtype,
+ void *cmnode,
+ bool wait);
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ u32 qp_num);
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ struct i40iw_dma_mem *memptr,
+ u32 size, u32 mask);
+
+void i40iw_request_reset(struct i40iw_device *iwdev);
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
+void i40iw_setup_cm_core(struct i40iw_device *iwdev);
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
+void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
+void i40iw_process_aeq(struct i40iw_device *);
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+ u8 state, u8 del_hash,
+ u8 term, u8 term_len);
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+ u16 rem_port,
+ u32 *rem_addr,
+ u16 loc_port,
+ u32 *loc_addr,
+ bool add_refcnt);
+
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ bool wait);
+
+void i40iw_copy_ip_ntohl(u32 *dst, u32 *src);
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
+ u64 addr,
+ u64 size,
+ int acc,
+ u64 *iova_start);
+
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+int i40iw_net_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
new file mode 100644
index 000000000000..38f917a6c778
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -0,0 +1,4137 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/atomic.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/notifier.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/highmem.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/neighbour.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/ip_fib.h>
+#include <net/tcp.h>
+#include <asm/checksum.h>
+
+#include "i40iw.h"
+
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
+static void i40iw_cm_post_event(struct i40iw_cm_event *event);
+static void i40iw_disconnect_worker(struct work_struct *work);
+
+/**
+ * i40iw_free_sqbuf - put back puda buffer if refcount = 0
+ * @dev: FPK device
+ * @buf: puda buffer to free
+ */
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+{
+ struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
+ struct i40iw_puda_rsrc *ilq = dev->ilq;
+
+ if (!atomic_dec_return(&buf->refcount))
+ i40iw_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * i40iw_derive_hw_ird_setting - Calculate IRD
+ *
+ * @cm_ird: IRD of connection's node
+ *
+ * The ird from the connection is rounded to a supported HW
+ * setting (2,8,32,64) and then encoded for ird_size field of
+ * qp_ctx
+ */
+static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
+{
+ u8 encoded_ird_size;
+ u8 pof2_cm_ird = 1;
+
+ /* round-off to next powerof2 */
+ while (pof2_cm_ird < cm_ird)
+ pof2_cm_ird *= 2;
+
+ /* ird_size field is encoded in qp_ctx */
+ switch (pof2_cm_ird) {
+ case I40IW_HW_IRD_SETTING_64:
+ encoded_ird_size = 3;
+ break;
+ case I40IW_HW_IRD_SETTING_32:
+ case I40IW_HW_IRD_SETTING_16:
+ encoded_ird_size = 2;
+ break;
+ case I40IW_HW_IRD_SETTING_8:
+ case I40IW_HW_IRD_SETTING_4:
+ encoded_ird_size = 1;
+ break;
+ case I40IW_HW_IRD_SETTING_2:
+ default:
+ encoded_ird_size = 0;
+ break;
+ }
+ return encoded_ird_size;
+}
+
+/**
+ * i40iw_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+{
+ if (conn_ird > I40IW_MAX_IRD_SIZE)
+ conn_ird = I40IW_MAX_IRD_SIZE;
+
+ if (conn_ord > I40IW_MAX_ORD_SIZE)
+ conn_ord = I40IW_MAX_ORD_SIZE;
+
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * i40iw_copy_ip_ntohl - change network to host ip
+ * @dst: host ip
+ * @src: big endian
+ */
+void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * i40iw_copy_ip_htonl - change host addr to network ip
+ * @dst: host ip
+ * @src: little endian
+ */
+static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * i40iw_fill_sockaddr4 - get addr info for passive connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * i40iw_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+*/
+static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
+ struct i40iw_cm_info *cm_info)
+{
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * i40iw_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = (void *)cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * i40iw_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type,
+ int status)
+{
+ struct iw_cm_event event;
+
+ memset(&event, 0, sizeof(event));
+ event.event = type;
+ event.status = status;
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ i40iw_fill_sockaddr4(cm_node, &event);
+ else
+ i40iw_fill_sockaddr6(cm_node, &event);
+ event.provider_data = (void *)cm_node;
+ event.private_data = (void *)cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ i40iw_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ break;
+ case IW_CM_EVENT_CLOSE:
+ break;
+ default:
+ i40iw_pr_err("event type received type = %d\n", type);
+ return -1;
+ }
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * i40iw_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
+ enum i40iw_cm_event_type type)
+{
+ struct i40iw_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
+ cm_node,
+ event,
+ type,
+ event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+
+ i40iw_cm_post_event(event);
+ return event;
+}
+
+/**
+ * i40iw_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (send_entry) {
+ cm_node->send_entry = NULL;
+ i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+ kfree(send_entry);
+ atomic_dec(&cm_node->ref_count);
+ }
+}
+
+/**
+ * i40iw_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ i40iw_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
+{
+ if ((cm_node->rem_mac[0] == 0x0) &&
+ (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
+ ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
+ return true;
+ return false;
+}
+
+/**
+ * i40iw_form_cm_frame - get a free packet and build frame
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+ struct i40iw_kmem_info *options,
+ struct i40iw_kmem_info *hdr,
+ struct i40iw_kmem_info *pdata,
+ u8 flags)
+{
+ struct i40iw_puda_buf *sqbuf;
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ u16 packetsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+ if (!sqbuf)
+ return NULL;
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata) {
+ pd_len = pdata->size;
+ if (!is_remote_ne020_or_chelsio(cm_node))
+ pd_len += MPA_ZERO_PAD_LEN;
+ }
+
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ packetsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ packetsize = sizeof(*ip6h) + sizeof(*tcph);
+ packetsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0x00, eth_hlen + packetsize);
+
+ sqbuf->totallen = packetsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = (void *)cm_node;
+
+ ethh = (struct ethhdr *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ } else {
+ ethh->h_proto = htons(ETH_P_IP);
+ }
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = 0;
+ iph->tot_len = htons(packetsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = htonl(cm_node->loc_addr[0]);
+ iph->daddr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ipv6hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+ } else {
+ ethh->h_proto = htons(ETH_P_IPV6);
+ }
+ ip6h->version = 6;
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
+ ip6h->nexthdr = 6;
+ ip6h->hop_limit = 128;
+ i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pd_len)
+ memcpy(buf, pdata->addr, pd_len);
+
+ atomic_set(&sqbuf->refcount, 1);
+
+ return sqbuf;
+}
+
+/**
+ * i40iw_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
+}
+
+/**
+ * i40iw_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s cm_node=%p state=%d\n",
+ __func__,
+ cm_node,
+ cm_node->state);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ }
+
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+}
+
+/**
+ * i40iw_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s cm_node=%p state =%d\n",
+ __func__,
+ cm_node,
+ cm_node->state);
+ if (reset)
+ i40iw_send_reset(cm_node);
+ else
+ i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void i40iw_event_connect_error(struct i40iw_cm_event *event)
+{
+ struct i40iw_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ i40iw_send_cm_event(event->cm_node, cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ cm_id->rem_ref(cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_process_options
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_packet: flag if syn packet
+ */
+static int i40iw_process_options(struct i40iw_cm_node *cm_node,
+ u8 *optionsloc,
+ u32 optionsize,
+ u32 syn_packet)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->as_base.optionnum) {
+ case OPTION_NUMBER_END:
+ offset = optionsize;
+ break;
+ case OPTION_NUMBER_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUMBER_MSS:
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s: MSS Length: %d Offset: %d Size: %d\n",
+ __func__,
+ all_options->as_mss.length,
+ offset,
+ optionsize);
+ got_mss_option = 1;
+ if (all_options->as_mss.length != 4)
+ return -1;
+ tmp = ntohs(all_options->as_mss.mss);
+ if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUMBER_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->as_windowscale.shiftcount;
+ break;
+ default:
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "TCP Option not understood: %x\n",
+ all_options->as_base.optionnum);
+ break;
+ }
+ offset += all_options->as_base.length;
+ }
+ if (!got_mss_option && syn_packet)
+ cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
+ return 0;
+}
+
+/**
+ * i40iw_handle_tcp_options -
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
+ struct tcphdr *tcph,
+ int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+
+ if (optionsize) {
+ if (i40iw_process_options(cm_node,
+ optionsloc,
+ optionsize,
+ (u32)tcph->syn)) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "%s: Node %p, Sending RESET\n",
+ __func__,
+ cm_node);
+ if (passive)
+ i40iw_passive_open_err(cm_node, true);
+ else
+ i40iw_active_open_err(cm_node, true);
+ return -1;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
+ cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ return 0;
+}
+
+/**
+ * i40iw_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
+ void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * i40iw_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
+ void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+
+ /* initialize the upper 5 bytes of the frame */
+ i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ rtr_msg->ctrl_ird = IETF_NO_IRD_ORD;
+ rtr_msg->ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ird_size;
+ rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD : cm_node->ord_size;
+ }
+
+ rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
+ rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
+ rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+}
+
+/**
+ * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
+ struct i40iw_kmem_info *mpa,
+ u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * i40iw_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ if (!cm_node) {
+ i40iw_pr_err("cm_node == NULL\n");
+ return -1;
+ }
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+ cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -1;
+ }
+
+ sqbuf = i40iw_form_cm_frame(cm_node,
+ NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata,
+ SET_ACK);
+ if (!sqbuf) {
+ i40iw_pr_err("sq_buf == NULL\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
+ const void *pdata,
+ u8 plen)
+{
+ struct i40iw_puda_buf *sqbuf;
+ struct i40iw_kmem_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+ cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = (void *)pdata;
+ priv_info.size = plen;
+
+ sqbuf = i40iw_form_cm_frame(cm_node,
+ NULL,
+ &cm_node->mpa_hdr,
+ &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -ENOMEM;
+ }
+ cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * recv_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buffer: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ int mpa_hdr_len;
+ int priv_data_len;
+
+ *type = I40IW_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ i40iw_pr_err("ietf buffer small (%x)\n", len);
+ return -1;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buffer;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ i40iw_pr_err("large pri_data %d\n", priv_data_len);
+ return -1;
+ }
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -1;
+ }
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ i40iw_pr_err("rev %d\n", mpa_frame->rev);
+ return -1;
+ }
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+
+ if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
+ i40iw_pr_err("Unexpected MPA Key received\n");
+ return -1;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
+ i40iw_pr_err("Unexpected MPA Key received\n");
+ return -1;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
+ return -1;
+ }
+ if (len > MAX_CM_BUFFER) {
+ i40iw_pr_err("ietf buffer large len = %d\n", len);
+ return -1;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:{
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -1;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ return -1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -1;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else /* Not supported RDMA0 operation */
+ return -1;
+ i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+ "MPAV2: Negotiated ORD: %d, IRD: %d\n",
+ cm_node->ord_size, cm_node->ird_size);
+ break;
+ }
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = I40IW_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * i40iw_schedule_cm_timer
+ * @@cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it es send ot close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * i40iw_rem_ref_cm_node(cm_core, cm_node);
+ * i40iw_schedule_cm_timer(...)
+ * atomic_inc(&cm_node->ref_count);
+ */
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *sqbuf,
+ enum i40iw_timer_type type,
+ int send_retrans,
+ int close_when_complete)
+{
+ struct i40iw_sc_dev *dev = cm_node->dev;
+ struct i40iw_cm_core *cm_core = cm_node->cm_core;
+ struct i40iw_timer_entry *new_send;
+ int ret = 0;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ return -ENOMEM;
+ }
+ new_send->retrycount = I40IW_DEFAULT_RETRYS;
+ new_send->retranscount = I40IW_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == I40IW_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+ i40iw_pr_err("already close entry\n");
+ return -EINVAL;
+ }
+ cm_node->close_entry = new_send;
+ }
+
+ if (type == I40IW_TIMER_TYPE_SEND) {
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ atomic_inc(&cm_node->ref_count);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
+
+ atomic_inc(&sqbuf->refcount);
+ i40iw_puda_send_buf(dev->ilq, sqbuf);
+ if (!send_retrans) {
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ i40iw_rem_ref_cm_node(cm_node);
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return ret;
+}
+
+/**
+ * i40iw_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
+{
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ enum i40iw_cm_node_state state = cm_node->state;
+
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ switch (state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_CLOSING:
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_LAST_ACK:
+ if (cm_node->cm_id)
+ cm_id->rem_ref(cm_id);
+ i40iw_send_reset(cm_node);
+ break;
+ default:
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
+{
+ struct i40iw_timer_entry *close_entry = cm_node->close_entry;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct i40iw_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
+ iwqp->last_aeq = I40IW_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ i40iw_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ if (cm_id)
+ cm_id->rem_ref(cm_id);
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * i40iw_cm_timer_tick - system's timer expired callback
+ * @pass: Pointing to cm_core
+ */
+static void i40iw_cm_timer_tick(unsigned long pass)
+{
+ unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ struct i40iw_sc_dev *dev;
+ unsigned long flags;
+
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct i40iw_cm_node, list);
+ if (cm_node->close_entry || cm_node->send_entry) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->timer_entry, &timer_list);
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node,
+ struct i40iw_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ i40iw_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+ if ((nexttimeout > send_entry->timetosend) ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ i40iw_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
+ (cm_node->state == I40IW_CM_STATE_CLOSED)) {
+ i40iw_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ i40iw_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ i40iw_retrans_expired(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ cm_node->cm_core->stats_pkt_retrans++;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ dev = cm_node->dev;
+ atomic_inc(&send_entry->sqbuf->refcount);
+ i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (I40IW_RETRY_TIMEOUT <<
+ (I40IW_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, I40IW_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p state=%d\n",
+ cm_node,
+ cm_node->state);
+ i40iw_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * i40iw_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
+{
+ struct i40iw_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuffer[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct i40iw_kmem_info opts;
+
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuffer;
+ if (!cm_node) {
+ i40iw_pr_err("no cm_node\n");
+ return -EINVAL;
+ }
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_mss.optionnum = OPTION_NUMBER_MSS;
+ options->as_mss.length = sizeof(struct option_mss);
+ options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
+ options->as_windowscale.length = sizeof(struct option_windowscale);
+ options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = OPTION_NUMBER_END;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
+ if (sqbuf)
+ i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+ else
+ i40iw_pr_err("no sqbuf\n");
+}
+
+/**
+ * i40iw_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_puda_buf *sqbuf;
+
+ sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
+ if (!sqbuf) {
+ i40iw_pr_err("no sqbuf\n");
+ return -1;
+ }
+ return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: loc ip addr
+ * @add_refcnt: flag to increment refcount of cm_node
+ */
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+ u16 rem_port,
+ u32 *rem_addr,
+ u16 loc_port,
+ u32 *loc_addr,
+ bool add_refcnt)
+{
+ struct list_head *hte;
+ struct i40iw_cm_node *cm_node;
+ unsigned long flags;
+
+ hte = &cm_core->connected_nodes;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_entry(cm_node, hte, list) {
+ if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ (cm_node->loc_port == loc_port) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
+ (cm_node->rem_port == rem_port)) {
+ if (add_refcnt)
+ atomic_inc(&cm_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return cm_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * i40iw_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_port: listener tcp port num
+ * @dst_addr: listener ip addr
+ * @listener_state: state to match with listen node's
+ */
+static struct i40iw_cm_listener *i40iw_find_listener(
+ struct i40iw_cm_core *cm_core,
+ u32 *dst_addr,
+ u16 dst_port,
+ u16 vlan_id,
+ enum i40iw_cm_listener_state
+ listener_state)
+{
+ struct i40iw_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ /* compare node pair, return node handle if a match */
+ if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
+ !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
+ (listen_port == dst_port) &&
+ (listener_state & listen_node->listener_state)) {
+ atomic_inc(&listen_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_node *cm_node)
+{
+ struct list_head *hte;
+ unsigned long flags;
+
+ if (!cm_node || !cm_core) {
+ i40iw_pr_err("cm_node or cm_core == NULL\n");
+ return;
+ }
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ /* get a handle on the hash table element (list head for this slot) */
+ hte = &cm_core->connected_nodes;
+ list_add_tail(&cm_node->list, hte);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * listen_port_in_use - determine if port is in use
+ * @port: Listen port number
+ */
+static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+{
+ struct i40iw_cm_listener *listen_node;
+ unsigned long flags;
+ bool ret = false;
+
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return ret;
+}
+
+/**
+ * i40iw_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static enum i40iw_status_code i40iw_del_multiple_qhash(
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct i40iw_cm_listener *child_listen_node;
+ enum i40iw_status_code ret = I40IW_ERR_CONFIG;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
+ if (child_listen_node->ipv4)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "freed pointer = %p\n",
+ child_listen_node);
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+/**
+ * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+ * vlan id and mac for that address.
+ */
+static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct net_device *ip_dev = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr laddr6;
+
+ i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = I40IW_NO_VLAN;
+ if (mac)
+ eth_zero_addr(mac);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->dev_addr && mac)
+ ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+#endif
+ return ip_dev;
+}
+
+/**
+ * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+static u16 i40iw_get_vlan_ipv4(u32 *addr)
+{
+ struct net_device *netdev;
+ u16 vlan_id = I40IW_NO_VLAN;
+
+ netdev = ip_dev_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+ return vlan_id;
+}
+
+/**
+ * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp;
+ enum i40iw_status_code ret = 0;
+ struct i40iw_cm_listener *child_listen_node;
+ unsigned long flags;
+
+ rtnl_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
+ (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+ (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ i40iw_pr_err("idev == NULL\n");
+ break;
+ }
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node =
+ kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ i40iw_pr_err("listener memory allocation\n");
+ ret = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+
+ i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
+ ifp->addr.in6_u.u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+
+ ret = i40iw_manage_qhash(iwdev, cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (!ret) {
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ } else {
+ kfree(child_listen_node);
+ }
+ }
+ }
+ }
+exit:
+ rtnl_unlock();
+ return ret;
+}
+
+/**
+ * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_4(
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ struct i40iw_cm_listener *child_listen_node;
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, dev) {
+ if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
+ (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+ (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ idev = in_dev_get(dev);
+ for_ifa(idev) {
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address,
+ rdma_vlan_dev_vlan_id(dev),
+ dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ i40iw_pr_err("listener memory allocation\n");
+ in_dev_put(idev);
+ ret = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node,
+ cm_parent_listen_node,
+ sizeof(*child_listen_node));
+
+ child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+
+ ret = i40iw_manage_qhash(iwdev,
+ cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (!ret) {
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ } else {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
+ }
+ }
+ endfor_ifa(idev);
+ in_dev_put(idev);
+ }
+ }
+exit:
+ rtnl_unlock();
+ return ret;
+}
+
+/**
+ * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ int ret = -EINVAL;
+ int err = 0;
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct i40iw_cm_node *cm_node;
+ struct list_head reset_list;
+ struct i40iw_cm_info nfo;
+ struct i40iw_cm_node *loopback;
+ enum i40iw_cm_node_state old_state;
+ unsigned long flags;
+
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_pos, struct i40iw_cm_node, list);
+ if ((cm_node->listener == listener) && !cm_node->accelerated) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->reset_entry, &reset_list);
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+
+ list_for_each_safe(list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
+ loopback = cm_node->loopbackpartner;
+ if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ if (!loopback) {
+ i40iw_cleanup_retrans_entry(cm_node);
+ err = i40iw_send_reset(cm_node);
+ if (err) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_pr_err("send reset\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ } else {
+ struct i40iw_cm_event event;
+
+ event.cm_node = loopback;
+ memcpy(event.cm_info.rem_addr,
+ loopback->rem_addr, sizeof(event.cm_info.rem_addr));
+ memcpy(event.cm_info.loc_addr,
+ loopback->loc_addr, sizeof(event.cm_info.loc_addr));
+ event.cm_info.rem_port = loopback->rem_port;
+ event.cm_info.loc_port = loopback->loc_port;
+ event.cm_info.cm_id = loopback->cm_id;
+ event.cm_info.ipv4 = loopback->ipv4;
+ atomic_inc(&loopback->ref_count);
+ loopback->state = I40IW_CM_STATE_CLOSED;
+ i40iw_event_connect_error(&event);
+ cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+ i40iw_rem_ref_cm_node(cm_node);
+ }
+ }
+ }
+
+ if (!atomic_dec_return(&listener->ref_count)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (listener->iwdev) {
+ if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+ i40iw_manage_apbvt(listener->iwdev,
+ listener->loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
+ } else {
+ if (listener->qhash_set)
+ i40iw_manage_qhash(listener->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ }
+ }
+
+ cm_core->stats_listen_destroyed++;
+ kfree(listener);
+ cm_core->stats_listen_nodes_destroyed++;
+ listener = NULL;
+ ret = 0;
+ }
+
+ if (listener) {
+ if (atomic_read(&listener->pend_accepts_cnt) > 0)
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s: listener (%p) pending accepts=%u\n",
+ __func__,
+ listener,
+ atomic_read(&listener->pend_accepts_cnt));
+ }
+
+ return ret;
+}
+
+/**
+ * i40iw_cm_del_listen - delete a linstener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
+ struct i40iw_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL; /* going to be destroyed pretty soon */
+ return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * i40iw_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
+ u32 src_ip,
+ u32 dst_ip,
+ int arpindex)
+{
+ struct rtable *rt;
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct net_device *netdev = iwdev->netdev;
+ __be32 dst_ipaddr = htonl(dst_ip);
+ __be32 src_ipaddr = htonl(src_ip);
+
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+ if (IS_ERR(rt)) {
+ i40iw_pr_err("ip_route_output\n");
+ return rc;
+ }
+
+ if (netif_is_bond_slave(netdev))
+ netdev = netdev_master_upper_dev_get(netdev);
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+
+ rcu_read_lock();
+ if (neigh) {
+ if (neigh->nud_state & NUD_VALID) {
+ if (arpindex >= 0) {
+ if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
+ neigh->ha))
+ /* Mac address same as arp table */
+ goto resolve_neigh_exit;
+ i40iw_manage_arp_cache(iwdev,
+ iwdev->arp_table[arpindex].mac_addr,
+ &dst_ip,
+ true,
+ I40IW_ARP_DELETE);
+ }
+
+ i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
+ rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ }
+ resolve_neigh_exit:
+
+ rcu_read_unlock();
+ if (neigh)
+ neigh_release(neigh);
+
+ ip_rt_put(rt);
+ return rc;
+}
+
+/**
+ * i40iw_get_dst_ipv6
+ */
+static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr)
+{
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.daddr = dst_addr->sin6_addr;
+ fl6.saddr = src_addr->sin6_addr;
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ return dst;
+}
+
+/**
+ * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
+ u32 *src,
+ u32 *dest,
+ int arpindex)
+{
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct net_device *netdev = iwdev->netdev;
+ struct dst_entry *dst;
+ struct sockaddr_in6 dst_addr;
+ struct sockaddr_in6 src_addr;
+
+ memset(&dst_addr, 0, sizeof(dst_addr));
+ dst_addr.sin6_family = AF_INET6;
+ i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+ memset(&src_addr, 0, sizeof(src_addr));
+ src_addr.sin6_family = AF_INET6;
+ i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+ dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return rc;
+ }
+
+ if (netif_is_bond_slave(netdev))
+ netdev = netdev_master_upper_dev_get(netdev);
+
+ neigh = dst_neigh_lookup(dst, &dst_addr);
+
+ rcu_read_lock();
+ if (neigh) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
+ if (neigh->nud_state & NUD_VALID) {
+ if (arpindex >= 0) {
+ if (ether_addr_equal
+ (iwdev->arp_table[arpindex].mac_addr,
+ neigh->ha)) {
+ /* Mac address same as in arp table */
+ goto resolve_neigh_exit6;
+ }
+ i40iw_manage_arp_cache(iwdev,
+ iwdev->arp_table[arpindex].mac_addr,
+ dest,
+ false,
+ I40IW_ARP_DELETE);
+ }
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ dest,
+ false,
+ I40IW_ARP_ADD);
+ rc = i40iw_arp_table(iwdev,
+ dest,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
+ }
+ }
+
+ resolve_neigh_exit6:
+ rcu_read_unlock();
+ if (neigh)
+ neigh_release(neigh);
+ dst_release(dst);
+ return rc;
+}
+
+/**
+ * i40iw_ipv4_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * i40iw_ipv6_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+ return (!memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6));
+}
+
+/**
+ * i40iw_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct i40iw_cm_node *i40iw_make_cm_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info,
+ struct i40iw_cm_listener *listener)
+{
+ struct i40iw_cm_node *cm_node;
+ struct timespec ts;
+ int oldarpindex;
+ int arpindex;
+ struct net_device *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = iwdev->mpa_version;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->ird_size = I40IW_MAX_IRD_SIZE;
+ cm_node->ord_size = I40IW_MAX_ORD_SIZE;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+ spin_lock_init(&cm_node->retrans_list_lock);
+
+ atomic_set(&cm_node->ref_count, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->tcp_cntxt.rcv_wnd =
+ I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ ts = current_kernel_time();
+ cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.mss = iwdev->mss;
+
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->sc_dev;
+
+ if ((cm_node->ipv4 &&
+ i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
+ cm_node->rem_addr))) {
+ arpindex = i40iw_arp_table(iwdev,
+ cm_node->rem_addr,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ } else {
+ oldarpindex = i40iw_arp_table(iwdev,
+ cm_node->rem_addr,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = i40iw_addr_resolve_neigh(iwdev,
+ cm_info->loc_addr[0],
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
+ cm_info->loc_addr,
+ cm_info->rem_addr,
+ oldarpindex);
+ else
+ arpindex = -EINVAL;
+ }
+ if (arpindex < 0) {
+ i40iw_pr_err("cm_node arpindex\n");
+ kfree(cm_node);
+ return NULL;
+ }
+ ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
+ i40iw_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+}
+
+/**
+ * i40iw_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_cm_core *cm_core = cm_node->cm_core;
+ struct i40iw_qp *iwqp;
+ struct i40iw_cm_info nfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
+ if (atomic_dec_return(&cm_node->ref_count)) {
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+ return;
+ }
+ list_del(&cm_node->list);
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ pr_err("node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ i40iw_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (!i40iw_listen_port_in_use(cm_core, htons(cm_node->loc_port)) &&
+ cm_node->apbvt_set && cm_node->iwdev) {
+ i40iw_manage_apbvt(cm_node->iwdev,
+ cm_node->loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+ i40iw_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ iwqp->cm_node = NULL;
+ i40iw_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ i40iw_get_addr_info(cm_node, &nfo);
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_node->cm_core->stats_nodes_destroyed++;
+ kfree(cm_node);
+}
+
+/**
+ * i40iw_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
+{
+ u32 ret;
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_LAST_ACK;
+ i40iw_send_fin(cm_node);
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSING;
+ i40iw_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close.
+ * After we receive ACK, do not send anything.
+ * Just rm the node.
+ */
+ break;
+ case I40IW_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_TIME_WAIT;
+ i40iw_send_ack(cm_node);
+ ret =
+ i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ if (ret)
+ i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
+ break;
+ case I40IW_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ default:
+ i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ i40iw_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ if (i40iw_send_syn(cm_node, 0))
+ i40iw_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ i40iw_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ atomic_add_return(1, &cm_node->passive_state);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_pr_err("Bad state state = %d\n", cm_node->state);
+ i40iw_passive_open_err(cm_node, false);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ i40iw_active_open_err(cm_node, false);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ break;
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_LAST_ACK:
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ case I40IW_CM_STATE_TIME_WAIT:
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ int ret;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (ret) {
+ if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
+ i40iw_active_open_err(cm_node, true);
+ else
+ i40iw_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_ESTABLISHED:
+ if (res_type == I40IW_MPA_REQUEST_REJECT)
+ i40iw_pr_err("state for reject\n");
+ cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
+ type = I40IW_CM_EVENT_MPA_REQ;
+ i40iw_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ I40IW_PASSIVE_STATE_INDICATED);
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (res_type == I40IW_MPA_REQUEST_REJECT) {
+ type = I40IW_CM_EVENT_MPA_REJECT;
+ cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = I40IW_CM_EVENT_CONNECTED;
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ i40iw_send_ack(cm_node);
+ }
+ break;
+ default:
+ pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
+ break;
+ }
+ i40iw_create_event(cm_node, type);
+}
+
+/**
+ * i40iw_indicate_pkt_err - Send up err event to cm
+ * @cm_node: connection's node
+ */
+static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ i40iw_active_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_SYN_RCVD:
+ i40iw_passive_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+ int err = 0;
+
+ if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+ err = 1;
+ i40iw_active_open_err(cm_node, true);
+ }
+ return err;
+}
+
+/**
+ * i40iw_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+ int err = 0;
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num)
+ err = -1;
+ else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err) {
+ i40iw_pr_err("seq number\n");
+ i40iw_indicate_pkt_err(cm_node);
+ }
+ return err;
+}
+
+/**
+ * i40iw_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
+ struct i40iw_cm_info nfo;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ i40iw_active_open_err(cm_node, 1);
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ i40iw_passive_open_err(cm_node, false);
+ break;
+ }
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret) {
+ i40iw_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = I40IW_CM_STATE_SYN_RCVD;
+ i40iw_get_addr_info(cm_node, &nfo);
+ ret = i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ (void *)cm_node,
+ false);
+ cm_node->qhash_set = true;
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_CLOSING:
+ case I40IW_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int ret;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_SENT:
+ i40iw_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (i40iw_check_syn(cm_node, tcph)) {
+ i40iw_pr_err("check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (ret) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ i40iw_send_ack(cm_node); /* ACK for the syn_ack */
+ ret = i40iw_send_mpa_request(cm_node);
+ if (ret) {
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "cm_node=%p i40iw_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
+ break;
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ i40iw_passive_open_err(cm_node, true);
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_CLOSING:
+ case I40IW_CM_STATE_UNKNOWN:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret = 0;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ if (i40iw_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ i40iw_cleanup_retrans_entry(cm_node);
+ ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ break;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = I40IW_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_ESTABLISHED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ i40iw_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSED:
+ i40iw_cleanup_retrans_entry(cm_node);
+ atomic_inc(&cm_node->ref_count);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_CLOSING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ if (!cm_node->accept_pend)
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ i40iw_cleanup_retrans_entry(cm_node);
+ cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
+ break;
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_OFFLOADED:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ case I40IW_CM_STATE_UNKNOWN:
+ default:
+ i40iw_cleanup_retrans_entry(cm_node);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40iw_process_packet - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *rbuf)
+{
+ enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int ret;
+
+ if (tcph->rst) {
+ pkt_type = I40IW_PKT_TYPE_RST;
+ } else if (tcph->syn) {
+ pkt_type = I40IW_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = I40IW_PKT_TYPE_SYNACK;
+ } else if (tcph->ack) {
+ pkt_type = I40IW_PKT_TYPE_ACK;
+ }
+ if (tcph->fin)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case I40IW_PKT_TYPE_SYN:
+ i40iw_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case I40IW_PKT_TYPE_SYNACK:
+ i40iw_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case I40IW_PKT_TYPE_ACK:
+ ret = i40iw_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !ret)
+ i40iw_handle_fin_pkt(cm_node);
+ break;
+ case I40IW_PKT_TYPE_RST:
+ i40iw_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ i40iw_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * i40iw_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_listener *i40iw_make_listen_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cm_info)
+{
+ struct i40iw_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
+ cm_info->loc_port,
+ cm_info->vlan_id,
+ I40IW_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
+ atomic_dec(&listener->ref_count);
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "Not creating listener since it already exists\n");
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node (1/2 node to compare incoming traffic to) */
+ listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ atomic_set(&listener->ref_count, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_nodes);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * i40iw_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @private_data_len: len to provate data for mpa request
+ * @private_data: pointer to private data for connection
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_node *i40iw_create_cm_node(
+ struct i40iw_cm_core *cm_core,
+ struct i40iw_device *iwdev,
+ u16 private_data_len,
+ void *private_data,
+ struct i40iw_cm_info *cm_info)
+{
+ int ret;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_listener *loopback_remotelistener;
+ struct i40iw_cm_node *loopback_remotenode;
+ struct i40iw_cm_info loopback_cm_info;
+
+ /* create a CM connection node */
+ cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return NULL;
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+
+ if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
+ loopback_remotelistener = i40iw_find_listener(
+ cm_core,
+ cm_info->rem_addr,
+ cm_node->rem_port,
+ cm_node->vlan_id,
+ I40IW_CM_LISTENER_ACTIVE_STATE);
+ if (!loopback_remotelistener) {
+ i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+ } else {
+ loopback_cm_info = *cm_info;
+ loopback_cm_info.loc_port = cm_info->rem_port;
+ loopback_cm_info.rem_port = cm_info->loc_port;
+ loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
+ loopback_cm_info.ipv4 = cm_info->ipv4;
+ loopback_remotenode = i40iw_make_cm_node(cm_core,
+ iwdev,
+ &loopback_cm_info,
+ loopback_remotelistener);
+ if (!loopback_remotenode) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return NULL;
+ }
+ cm_core->stats_loopbacks++;
+ loopback_remotenode->loopbackpartner = cm_node;
+ loopback_remotenode->tcp_cntxt.rcv_wscale =
+ I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->loopbackpartner = loopback_remotenode;
+ memcpy(loopback_remotenode->pdata_buf, private_data,
+ private_data_len);
+ loopback_remotenode->pdata.size = private_data_len;
+
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ cm_node->tcp_cntxt.rcv_nxt =
+ loopback_remotenode->tcp_cntxt.loc_seq_num;
+ loopback_remotenode->tcp_cntxt.rcv_nxt =
+ cm_node->tcp_cntxt.loc_seq_num;
+ cm_node->tcp_cntxt.max_snd_wnd =
+ loopback_remotenode->tcp_cntxt.rcv_wnd;
+ loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
+ loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
+ loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+ loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
+ i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
+ }
+ return cm_node;
+ }
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+
+ cm_node->state = I40IW_CM_STATE_SYN_SENT;
+ ret = i40iw_send_syn(cm_node, 0);
+
+ if (ret) {
+ if (cm_node->ipv4)
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI4",
+ cm_node->rem_addr);
+ else
+ i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI6",
+ cm_node->rem_addr);
+ i40iw_rem_ref_cm_node(cm_node);
+ cm_node = NULL;
+ }
+
+ if (cm_node)
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_port,
+ cm_node,
+ cm_node->cm_id);
+
+ return cm_node;
+}
+
+/**
+ * i40iw_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdate: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
+{
+ int ret = 0;
+ int err;
+ int passive_state;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
+
+ if (cm_node->tcp_cntxt.client)
+ return ret;
+ i40iw_cleanup_retrans_entry(cm_node);
+
+ if (!loopback) {
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == I40IW_SEND_RESET_EVENT) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ } else {
+ ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
+ if (ret) {
+ cm_node->state = I40IW_CM_STATE_CLOSED;
+ err = i40iw_send_reset(cm_node);
+ if (err)
+ i40iw_pr_err("send reset failed\n");
+ } else {
+ cm_id->add_ref(cm_id);
+ }
+ }
+ }
+ } else {
+ cm_node->cm_id = NULL;
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ i40iw_rem_ref_cm_node(loopback);
+ } else {
+ ret = i40iw_send_cm_event(loopback,
+ loopback->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNREFUSED);
+ i40iw_rem_ref_cm_node(cm_node);
+ loopback->state = I40IW_CM_STATE_CLOSING;
+
+ cm_id = loopback->cm_id;
+ i40iw_rem_ref_cm_node(loopback);
+ cm_id->rem_ref(cm_id);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40iw_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ switch (cm_node->state) {
+ case I40IW_CM_STATE_SYN_RCVD:
+ case I40IW_CM_STATE_SYN_SENT:
+ case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case I40IW_CM_STATE_ESTABLISHED:
+ case I40IW_CM_STATE_ACCEPTING:
+ case I40IW_CM_STATE_MPAREQ_SENT:
+ case I40IW_CM_STATE_MPAREQ_RCVD:
+ i40iw_cleanup_retrans_entry(cm_node);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_CLOSE_WAIT:
+ cm_node->state = I40IW_CM_STATE_LAST_ACK;
+ i40iw_send_fin(cm_node);
+ break;
+ case I40IW_CM_STATE_FIN_WAIT1:
+ case I40IW_CM_STATE_FIN_WAIT2:
+ case I40IW_CM_STATE_LAST_ACK:
+ case I40IW_CM_STATE_TIME_WAIT:
+ case I40IW_CM_STATE_CLOSING:
+ ret = -1;
+ break;
+ case I40IW_CM_STATE_LISTENING:
+ i40iw_cleanup_retrans_entry(cm_node);
+ i40iw_send_reset(cm_node);
+ break;
+ case I40IW_CM_STATE_MPAREJ_RCVD:
+ case I40IW_CM_STATE_UNKNOWN:
+ case I40IW_CM_STATE_INITED:
+ case I40IW_CM_STATE_CLOSED:
+ case I40IW_CM_STATE_LISTENER_DESTROYED:
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ case I40IW_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ i40iw_pr_err("send_entry\n");
+ i40iw_rem_ref_cm_node(cm_node);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * i40iw_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @dev: FPK dev struct
+ * @rbuf: receive buffer
+ */
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+{
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_listener *listener;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct i40iw_cm_info cm_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+ struct vlan_ethhdr *ethh;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct iphdr *)rbuf->iph;
+ memset(&cm_info, 0, sizeof(cm_info));
+
+ i40iw_debug_buf(dev,
+ I40IW_DEBUG_ILQ,
+ "RECEIVE ILQ BUFFER",
+ rbuf->mem.va,
+ rbuf->totallen);
+ ethh = (struct vlan_ethhdr *)rbuf->mem.va;
+
+ if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s vlan_id=%d\n",
+ __func__,
+ cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = I40IW_NO_VLAN;
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->daddr);
+ cm_info.rem_addr[0] = ntohl(iph->saddr);
+ cm_info.ipv4 = true;
+ } else {
+ ip6h = (struct ipv6hdr *)rbuf->iph;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ cm_info.ipv4 = false;
+ }
+ cm_info.loc_port = ntohs(tcph->dest);
+ cm_info.rem_port = ntohs(tcph->source);
+ cm_node = i40iw_find_node(cm_core,
+ cm_info.rem_port,
+ cm_info.rem_addr,
+ cm_info.loc_port,
+ cm_info.loc_addr,
+ true);
+
+ if (!cm_node) {
+ /* Only type of packet accepted are for */
+ /* the PASSIVE open (syn only) */
+ if (!tcph->syn || tcph->ack)
+ return;
+ listener =
+ i40iw_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ I40IW_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s no listener found\n",
+ __func__);
+ return;
+ }
+ cm_info.cm_id = listener->cm_id;
+ cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
+ if (!cm_node) {
+ i40iw_debug(cm_core->dev,
+ I40IW_DEBUG_CM,
+ "%s allocate node failed\n",
+ __func__);
+ atomic_dec(&listener->ref_count);
+ return;
+ }
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = I40IW_CM_STATE_LISTENING;
+ } else {
+ i40iw_rem_ref_cm_node(cm_node);
+ return;
+ }
+ atomic_inc(&cm_node->ref_count);
+ } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return;
+ }
+ i40iw_process_packet(cm_node, rbuf);
+ i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_setup_cm_core - allocate a top level instance of a cm
+ * core
+ * @iwdev: iwarp device structure
+ */
+void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+{
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->sc_dev;
+
+ INIT_LIST_HEAD(&cm_core->connected_nodes);
+ INIT_LIST_HEAD(&cm_core->listen_nodes);
+
+ init_timer(&cm_core->tcp_timer);
+ cm_core->tcp_timer.function = i40iw_cm_timer_tick;
+ cm_core->tcp_timer.data = (unsigned long)cm_core;
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+
+ cm_core->event_wq = create_singlethread_workqueue("iwewq");
+ cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
+}
+
+/**
+ * i40iw_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
+{
+ unsigned long flags;
+
+ if (!cm_core)
+ return;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (timer_pending(&cm_core->tcp_timer))
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ destroy_workqueue(cm_core->event_wq);
+ destroy_workqueue(cm_core->disconn_wq);
+}
+
+/**
+ * i40iw_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
+ struct i40iw_tcp_offload_info *tcp_info,
+ struct i40iw_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = true;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = I40IW_DEFAULT_TTL;
+ tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
+ tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
+ tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
+ tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+
+ tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
+ tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
+ tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
+ cm_node->tcp_cntxt.rcv_wscale);
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
+ if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+ }
+ if (cm_node->ipv4) {
+ tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+ tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+
+ tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
+ tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
+ tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(iwqp->iwdev,
+ &tcp_info->dest_ip_addr3,
+ true,
+ NULL,
+ I40IW_ARP_RESOLVE));
+ } else {
+ tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+ tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+ tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
+ tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
+ tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
+ tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
+ tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
+ tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
+ tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
+ tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
+ tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(
+ iwqp->iwdev,
+ &tcp_info->dest_ip_addr0,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE));
+ }
+}
+
+/**
+ * i40iw_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+ struct i40iw_cm_node *cm_node)
+{
+ struct i40iw_tcp_offload_info tcp_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
+
+ memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
+
+ if (iwarp_info->ord_size == 1)
+ iwarp_info->ord_size = 2;
+
+ iwarp_info->rd_enable = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+
+ i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
+ SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
+ }
+
+ cm_node->state = I40IW_CM_STATE_OFFLOADED;
+ tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+
+ dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * i40iw_cm_disconn - when a connection is being closed
+ * @iwqp: associate qp for the connection
+ */
+int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+{
+ struct disconn_work *work;
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM; /* Timer will clean up */
+
+ i40iw_add_ref(&iwqp->ibqp);
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, i40iw_disconnect_worker);
+ queue_work(cm_core->disconn_wq, &work->work);
+ return 0;
+}
+
+/**
+ * i40iw_loopback_nop - Send a nop
+ * @qp: associated hw qp
+ */
+static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = qp->qp_uk.sq_base->elem;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(0, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_ib_device *iwibdev;
+
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ if (!iwdev) {
+ i40iw_pr_err("iwdev == NULL\n");
+ return;
+ }
+
+ iwibdev = iwdev->iwibdev;
+
+ if (iwqp->active_conn) {
+ /* indicate this connection is NOT active */
+ iwqp->active_conn = 0;
+ } else {
+ /* Need to free the Last Streaming Mode Message */
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
+ }
+ }
+
+ /* close the CM node down if it is still active */
+ if (iwqp->cm_node) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
+ i40iw_cm_close(iwqp->cm_node);
+ }
+}
+
+/**
+ * i40iw_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ struct ib_event ibevent;
+ unsigned long flags;
+ int ret;
+
+ if (!iwqp) {
+ i40iw_pr_err("iwqp == NULL\n");
+ return;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ cm_id = iwqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ iwdev = to_iwdev(iwqp->ibqp.device);
+
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ /*When term timer expires after cm_timer, don't want
+ *terminate-handler to issue cm_disconn which can re-free
+ *a QP even after its refcnt=0.
+ */
+ del_timer(&iwqp->terminate_timer);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
+ (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->destroyed) {
+ /* Flush the queues */
+ i40iw_flush_wqes(iwdev, iwqp);
+
+ if (qp->term_flags) {
+ ibevent.device = iwqp->ibqp.device;
+ ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
+ IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
+ ibevent.element.qp = &iwqp->ibqp;
+ iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+ }
+ }
+
+ if (cm_id && cm_id->event_handler) {
+ if (issue_disconn) {
+ ret = i40iw_send_cm_event(NULL,
+ cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+
+ if (ret)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "disconnect event failed %s: - cm_id = %p\n",
+ __func__, cm_id);
+ }
+ if (issue_close) {
+ i40iw_qp_disconnect(iwqp);
+ cm_id->provider_data = iwqp;
+ ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
+ if (ret)
+ i40iw_debug(&iwdev->sc_dev,
+ I40IW_DEBUG_CM,
+ "close event failed %s: - cm_id = %p\n",
+ __func__, cm_id);
+ cm_id->rem_ref(cm_id);
+ }
+ }
+}
+
+/**
+ * i40iw_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void i40iw_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct i40iw_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ i40iw_cm_disconn_true(iwqp);
+ i40iw_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * i40iw_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cm_node *cm_node;
+ struct ib_qp_attr attr;
+ int passive_state;
+ struct i40iw_ib_device *iwibdev;
+ struct ib_mr *ibmr;
+ struct i40iw_pd *iwpd;
+ u16 buf_len = 0;
+ struct i40iw_kmem_info accept;
+ enum i40iw_status_code status;
+ u64 tagged_offset;
+
+ memset(&attr, 0, sizeof(attr));
+ ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->sc_dev;
+ cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
+ }
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Accept vlan_id=%d\n",
+ cm_node->vlan_id);
+ if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+ if (cm_node->loopbackpartner)
+ i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
+ i40iw_rem_ref_cm_node(cm_node);
+ return -EINVAL;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == I40IW_SEND_RESET_EVENT) {
+ i40iw_rem_ref_cm_node(cm_node);
+ return -ECONNRESET;
+ }
+
+ cm_node->cm_core->stats_accepts++;
+ iwqp->cm_node = (void *)cm_node;
+ cm_node->iwqp = iwqp;
+
+ buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+
+ status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
+
+ if (status)
+ return -ENOMEM;
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy(accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ if ((cm_node->ipv4 &&
+ !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
+ iwibdev = iwdev->iwibdev;
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
+ iwqp->ietf_mem.pa,
+ buf_len,
+ IB_ACCESS_LOCAL_WRITE,
+ &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
+ return -ENOMEM;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ if (is_remote_ne020_or_chelsio(cm_node))
+ dev->iw_priv_qp_ops->qp_send_lsmm(
+ &iwqp->sc_qp,
+ iwqp->ietf_mem.va,
+ (accept.size + conn_param->private_data_len),
+ ibmr->lkey);
+ else
+ dev->iw_priv_qp_ops->qp_send_lsmm(
+ &iwqp->sc_qp,
+ iwqp->ietf_mem.va,
+ (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
+ ibmr->lkey);
+
+ } else {
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ i40iw_loopback_nop(&iwqp->sc_qp);
+ }
+
+ if (iwqp->page)
+ kunmap(iwqp->page);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = (void *)iwqp;
+ iwqp->active_conn = 0;
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ cm_id->add_ref(cm_id);
+ i40iw_add_ref(&iwqp->ibqp);
+
+ i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (cm_node->loopbackpartner) {
+ cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
+
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->loopbackpartner->pdata_buf,
+ conn_param->private_data,
+ conn_param->private_data_len);
+ i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
+ }
+
+ cm_node->accelerated = 1;
+ if (cm_node->accept_pend) {
+ if (!cm_node->listener)
+ i40iw_pr_err("cm_node->listener NULL for passive node\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_node *loopback;
+
+ cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+ loopback = cm_node->loopbackpartner;
+ cm_node->cm_id = cm_id;
+ cm_node->pdata.size = pdata_len;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
+ return -EINVAL;
+
+ if (loopback) {
+ memcpy(&loopback->pdata_buf, pdata, pdata_len);
+ loopback->pdata.size = pdata_len;
+ }
+
+ return i40iw_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * i40iw_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int apbvt_set = 0;
+ enum i40iw_status_code status;
+
+ ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.ipv4 = false;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+ }
+ cm_info.cm_id = cm_id;
+ if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+ (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+ raddr6->sin6_addr.in6_u.u6_addr32,
+ sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+ status = i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (status)
+ return -EINVAL;
+ }
+ status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
+ if (status) {
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ return -EINVAL;
+ }
+
+ apbvt_set = 1;
+ cm_id->add_ref(cm_id);
+ cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
+ conn_param->private_data_len,
+ (void *)conn_param->private_data,
+ &cm_info);
+ if (!cm_node) {
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+
+ if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
+ cm_info.loc_port))
+ i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+ cm_id->rem_ref(cm_id);
+ iwdev->cm_core.stats_connect_errs++;
+ return -ENOMEM;
+ }
+
+ i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+ if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+ !cm_node->ord_size)
+ cm_node->ord_size = 1;
+
+ cm_node->apbvt_set = apbvt_set;
+ cm_node->qhash_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ i40iw_add_ref(&iwqp->ibqp);
+ return 0;
+}
+
+/**
+ * i40iw_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_listener *cm_listen_node;
+ struct i40iw_cm_info cm_info;
+ enum i40iw_status_code ret;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ memset(&cm_info, 0, sizeof(cm_info));
+ if (laddr->sin_family == AF_INET) {
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != INADDR_ANY)
+ cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+ else
+ wildcard = true;
+
+ } else {
+ cm_info.ipv4 = false;
+ i40iw_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
+ i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id,
+ NULL);
+ else
+ wildcard = true;
+ }
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
+ if (!cm_listen_node) {
+ i40iw_pr_err("cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ if (cm_info.ipv4)
+ ret = i40iw_add_mqh_4(iwdev,
+ &cm_info,
+ cm_listen_node);
+ else
+ ret = i40iw_add_mqh_6(iwdev,
+ &cm_info,
+ cm_listen_node);
+ if (ret)
+ goto error;
+
+ ret = i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD);
+
+ if (ret)
+ goto error;
+ } else {
+ ret = i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_SYN,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ NULL,
+ true);
+ if (ret)
+ goto error;
+ cm_listen_node->qhash_set = true;
+ ret = i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_ADD);
+ if (ret)
+ goto error;
+ }
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ return 0;
+ error:
+ i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
+ return -EINVAL;
+}
+
+/**
+ * i40iw_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int i40iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct i40iw_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
+ else
+ i40iw_pr_err("cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * i40iw_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cm_node *cm_node;
+ struct i40iw_sc_dev *dev;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = (struct i40iw_qp *)cm_id->provider_data;
+ iwdev = to_iwdev(iwqp->ibqp.device);
+ dev = &iwdev->sc_dev;
+
+ if (iwqp->destroyed) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+ i40iw_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+ dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap(iwqp->page);
+ status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ if (status)
+ i40iw_pr_err("send cm event\n");
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ cm_node->accelerated = 1;
+ if (cm_node->accept_pend) {
+ if (!cm_node->listener)
+ i40iw_pr_err("listener is null for passive node\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ i40iw_send_cm_event(event->cm_node,
+ cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ cm_id->rem_ref(cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
+{
+ struct i40iw_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct i40iw_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "reset event %p - cm_id = %p\n",
+ event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
+ i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void i40iw_cm_event_handler(struct work_struct *work)
+{
+ struct i40iw_cm_event *event = container_of(work,
+ struct i40iw_cm_event,
+ event_work);
+ struct i40iw_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+
+ switch (event->type) {
+ case I40IW_CM_EVENT_MPA_REQ:
+ i40iw_send_cm_event(cm_node,
+ cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST,
+ 0);
+ break;
+ case I40IW_CM_EVENT_RESET:
+ i40iw_cm_event_reset(event);
+ break;
+ case I40IW_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_cm_event_connected(event);
+ break;
+ case I40IW_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ (cm_node->state == I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_send_cm_event(cm_node,
+ cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNREFUSED);
+ break;
+ case I40IW_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
+ break;
+ i40iw_event_connect_error(event);
+ break;
+ default:
+ i40iw_pr_err("event type = %d\n", event->type);
+ break;
+ }
+
+ event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
+ i40iw_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * i40iw_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void i40iw_cm_post_event(struct i40iw_cm_event *event)
+{
+ atomic_inc(&event->cm_node->ref_count);
+ event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
+ INIT_WORK(&event->event_work, i40iw_cm_event_handler);
+
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
new file mode 100644
index 000000000000..5f8ceb4a8e84
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -0,0 +1,456 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_CM_H
+#define I40IW_CM_H
+
+#define QUEUE_EVENTS
+
+#define I40IW_MANAGE_APBVT_DEL 0
+#define I40IW_MANAGE_APBVT_ADD 1
+
+#define I40IW_MPA_REQUEST_ACCEPT 1
+#define I40IW_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines, enums, structs */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VERSION 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001FF
+
+#define I40IW_MAX_IETF_SIZE 32
+
+#define MPA_ZERO_PAD_LEN 4
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3FFF
+
+/* HW-supported IRD sizes*/
+#define I40IW_HW_IRD_SETTING_2 2
+#define I40IW_HW_IRD_SETTING_4 4
+#define I40IW_HW_IRD_SETTING_8 8
+#define I40IW_HW_IRD_SETTING_16 16
+#define I40IW_HW_IRD_SETTING_32 32
+#define I40IW_HW_IRD_SETTING_64 64
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
+ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
+ IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[0];
+};
+
+#define ietf_mpa_req_resp_frame ietf_mpa_frame
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[0];
+};
+
+struct i40iw_cm_node;
+enum i40iw_timer_type {
+ I40IW_TIMER_TYPE_SEND,
+ I40IW_TIMER_TYPE_RECV,
+ I40IW_TIMER_NODE_CLEANUP,
+ I40IW_TIMER_TYPE_CLOSE,
+};
+
+#define I40IW_PASSIVE_STATE_INDICATED 0
+#define I40IW_DO_NOT_SEND_RESET_EVENT 1
+#define I40IW_SEND_RESET_EVENT 2
+
+#define MAX_I40IW_IFS 4
+
+#define SET_ACK 0x1
+#define SET_SYN 0x2
+#define SET_FIN 0x4
+#define SET_RST 0x8
+
+#define TCP_OPTIONS_PADDING 3
+
+struct option_base {
+ u8 optionnum;
+ u8 length;
+};
+
+enum option_numbers {
+ OPTION_NUMBER_END,
+ OPTION_NUMBER_NONE,
+ OPTION_NUMBER_MSS,
+ OPTION_NUMBER_WINDOW_SCALE,
+ OPTION_NUMBER_SACK_PERM,
+ OPTION_NUMBER_SACK,
+ OPTION_NUMBER_WRITE0 = 0xbc
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 length;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 length;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char as_end;
+ struct option_base as_base;
+ struct option_mss as_mss;
+ struct option_windowscale as_windowscale;
+};
+
+struct i40iw_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct i40iw_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+#define I40IW_DEFAULT_RETRYS 64
+#define I40IW_DEFAULT_RETRANS 8
+#define I40IW_DEFAULT_TTL 0x40
+#define I40IW_DEFAULT_RTT_VAR 0x6
+#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
+#define I40IW_DEFAULT_REXMIT_THRESH 8
+
+#define I40IW_RETRY_TIMEOUT HZ
+#define I40IW_SHORT_TIME 10
+#define I40IW_LONG_TIME (2 * HZ)
+#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define I40IW_CM_HASHTABLE_SIZE 1024
+#define I40IW_CM_TCP_TIMER_INTERVAL 3000
+#define I40IW_CM_DEFAULT_MTU 1540
+#define I40IW_CM_DEFAULT_FRAME_CNT 10
+#define I40IW_CM_THREAD_STACK_SIZE 256
+#define I40IW_CM_DEFAULT_RCV_WND 64240
+#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
+#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2
+#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A
+#define I40IW_CM_FREE_PKT_LO_WATERMARK 2
+
+#define I40IW_CM_DEFAULT_MSS 536
+
+#define I40IW_CM_DEF_SEQ 0x159bf75f
+#define I40IW_CM_DEF_LOCAL_ID 0x3b47
+
+#define I40IW_CM_DEF_SEQ2 0x18ed5740
+#define I40IW_CM_DEF_LOCAL_ID2 0xb807
+#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+typedef u32 i40iw_addr_t;
+
+#define i40iw_cm_tsa_context i40iw_qp_context
+
+struct i40iw_qp;
+
+/* cm node transition states */
+enum i40iw_cm_node_state {
+ I40IW_CM_STATE_UNKNOWN,
+ I40IW_CM_STATE_INITED,
+ I40IW_CM_STATE_LISTENING,
+ I40IW_CM_STATE_SYN_RCVD,
+ I40IW_CM_STATE_SYN_SENT,
+ I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
+ I40IW_CM_STATE_ESTABLISHED,
+ I40IW_CM_STATE_ACCEPTING,
+ I40IW_CM_STATE_MPAREQ_SENT,
+ I40IW_CM_STATE_MPAREQ_RCVD,
+ I40IW_CM_STATE_MPAREJ_RCVD,
+ I40IW_CM_STATE_OFFLOADED,
+ I40IW_CM_STATE_FIN_WAIT1,
+ I40IW_CM_STATE_FIN_WAIT2,
+ I40IW_CM_STATE_CLOSE_WAIT,
+ I40IW_CM_STATE_TIME_WAIT,
+ I40IW_CM_STATE_LAST_ACK,
+ I40IW_CM_STATE_CLOSING,
+ I40IW_CM_STATE_LISTENER_DESTROYED,
+ I40IW_CM_STATE_CLOSED
+};
+
+enum mpa_frame_version {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2
+};
+
+enum i40iw_tcpip_pkt_type {
+ I40IW_PKT_TYPE_UNKNOWN,
+ I40IW_PKT_TYPE_SYN,
+ I40IW_PKT_TYPE_SYNACK,
+ I40IW_PKT_TYPE_ACK,
+ I40IW_PKT_TYPE_FIN,
+ I40IW_PKT_TYPE_RST
+};
+
+/* CM context params */
+struct i40iw_cm_tcp_context {
+ u8 client;
+
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+
+ u32 loc_id;
+ u32 rem_id;
+
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+
+ struct timeval sent_ts;
+};
+
+enum i40iw_cm_listener_state {
+ I40IW_CM_LISTENER_PASSIVE_STATE = 1,
+ I40IW_CM_LISTENER_ACTIVE_STATE = 2,
+ I40IW_CM_LISTENER_EITHER_STATE = 3
+};
+
+struct i40iw_cm_listener {
+ struct list_head list;
+ struct i40iw_cm_core *cm_core;
+ u8 loc_mac[ETH_ALEN];
+ u32 loc_addr[4];
+ u16 loc_port;
+ u32 map_loc_addr[4];
+ u16 map_loc_port;
+ struct iw_cm_id *cm_id;
+ atomic_t ref_count;
+ struct i40iw_device *iwdev;
+ atomic_t pend_accepts_cnt;
+ int backlog;
+ enum i40iw_cm_listener_state listener_state;
+ u32 reused_node;
+ u8 user_pri;
+ u16 vlan_id;
+ bool qhash_set;
+ bool ipv4;
+ struct list_head child_listen_list;
+
+};
+
+struct i40iw_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+/* per connection node and node state information */
+struct i40iw_cm_node {
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ u32 map_loc_addr[4], map_rem_addr[4];
+ u16 map_loc_port, map_rem_port;
+ u16 vlan_id;
+ enum i40iw_cm_node_state state;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ atomic_t ref_count;
+ struct i40iw_qp *iwqp;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cm_tcp_context tcp_cntxt;
+ struct i40iw_cm_core *cm_core;
+ struct i40iw_cm_node *loopbackpartner;
+ struct i40iw_timer_entry *send_entry;
+ struct i40iw_timer_entry *close_entry;
+ spinlock_t retrans_list_lock; /* cm transmit packet */
+ enum send_rdma0 send_rdma0_op;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ int accelerated;
+ struct i40iw_cm_listener *listener;
+ int apbvt_set;
+ int accept_pend;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ atomic_t passive_state;
+ bool qhash_set;
+ u8 user_pri;
+ bool ipv4;
+ bool snd_mark_en;
+ u16 lsmm_size;
+ enum mpa_frame_version mpa_frame_rev;
+ struct i40iw_kmem_info pdata;
+ union {
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ };
+
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ struct i40iw_kmem_info mpa_hdr;
+};
+
+/* structure for client or CM to fill when making CM api calls. */
+/* - only need to set relevant data, based on op. */
+struct i40iw_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u16 map_loc_port;
+ u16 map_rem_port;
+ u32 map_loc_addr[4];
+ u32 map_rem_addr[4];
+ u16 vlan_id;
+ int backlog;
+ u16 user_pri;
+ bool ipv4;
+};
+
+/* CM event codes */
+enum i40iw_cm_event_type {
+ I40IW_CM_EVENT_UNKNOWN,
+ I40IW_CM_EVENT_ESTABLISHED,
+ I40IW_CM_EVENT_MPA_REQ,
+ I40IW_CM_EVENT_MPA_CONNECT,
+ I40IW_CM_EVENT_MPA_ACCEPT,
+ I40IW_CM_EVENT_MPA_REJECT,
+ I40IW_CM_EVENT_MPA_ESTABLISHED,
+ I40IW_CM_EVENT_CONNECTED,
+ I40IW_CM_EVENT_RESET,
+ I40IW_CM_EVENT_ABORTED
+};
+
+/* event to post to CM event handler */
+struct i40iw_cm_event {
+ enum i40iw_cm_event_type type;
+ struct i40iw_cm_info cm_info;
+ struct work_struct event_work;
+ struct i40iw_cm_node *cm_node;
+};
+
+struct i40iw_cm_core {
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+
+ struct list_head listen_nodes;
+ struct list_head connected_nodes;
+
+ struct timer_list tcp_timer;
+
+ struct workqueue_struct *event_wq;
+ struct workqueue_struct *disconn_wq;
+
+ spinlock_t ht_lock; /* manage hash table */
+ spinlock_t listen_list_lock; /* listen list */
+
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_loopbacks;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+};
+
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+ struct i40iw_puda_buf *sqbuf,
+ enum i40iw_timer_type type,
+ int send_retrans,
+ int close_when_complete);
+
+int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_reject(struct iw_cm_id *, const void *, u8);
+int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_create_listen(struct iw_cm_id *, int);
+int i40iw_destroy_listen(struct iw_cm_id *);
+
+int i40iw_cm_start(struct i40iw_device *);
+int i40iw_cm_stop(struct i40iw_device *);
+
+int i40iw_arp_table(struct i40iw_device *iwdev,
+ u32 *ip_addr,
+ bool ipv4,
+ u8 *mac_addr,
+ u32 action);
+
+#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
new file mode 100644
index 000000000000..f05802bf6ca0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -0,0 +1,4743 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_insert_wqe_hdr - write wqe header
+ * @wqe: cqp wqe for header
+ * @header: header for the cqp wqe
+ */
+static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+{
+ wmb(); /* make sure WQE is populated before polarity is set */
+ set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail:wqtail register value
+ * @error: cqp processing err
+ */
+static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
+ u32 *val,
+ u32 *tail,
+ u32 *error)
+{
+ if (cqp->dev->is_pf) {
+ *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
+ *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
+ *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
+ } else {
+ *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
+ *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
+ *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
+ }
+}
+
+/**
+ * i40iw_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail:wqtail register value
+ * @count: how many times to try for completion
+ */
+static enum i40iw_status_code i40iw_cqp_poll_registers(
+ struct i40iw_sc_cqp *cqp,
+ u32 tail,
+ u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i < count) {
+ i++;
+ i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = (cqp->dev->is_pf) ?
+ i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
+ i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+ return I40IW_ERR_CQP_COMPL_ERROR;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ return 0;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ }
+ return I40IW_ERR_TIMEOUT;
+}
+
+/**
+ * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
+ u64 *buf,
+ struct i40iw_hmc_obj_info *info)
+{
+ u64 temp;
+ u32 i, j;
+ u32 low;
+
+ /* copy base values in obj_info */
+ for (i = I40IW_HMC_IW_QP, j = 0;
+ i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ info[i].base = RS_64_1(temp, 32) * 512;
+ low = (u32)(temp);
+ if (low)
+ info[i].cnt = low;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @buf: ptr to fpm query buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+ u64 *buf,
+ struct i40iw_hmc_info *hmc_info,
+ struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
+{
+ u64 temp;
+ struct i40iw_hmc_obj_info *obj_info;
+ u32 i, j, size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, 0, &temp);
+ hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
+ max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
+
+ /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
+ if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
+ max_pe_sds--;
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+
+ for (i = I40IW_HMC_IW_QP, j = 8;
+ i <= I40IW_HMC_IW_ARP; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ if (i == I40IW_HMC_IW_QP)
+ obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+ else if (i == I40IW_HMC_IW_CQ)
+ obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+ else
+ obj_info[i].max_cnt = (u32)temp;
+
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[i].size = ((u64)1 << size);
+ }
+ for (i = I40IW_HMC_IW_MR, j = 48;
+ i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+ get_64bit_val(buf, j, &temp);
+ obj_info[i].max_cnt = (u32)temp;
+ size = (u32)RS_64_1(temp, 32);
+ obj_info[i].size = LS_64_1(1, size);
+ }
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+ get_64bit_val(buf, 64, &temp);
+ hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
+ if (!hmc_fpm_misc->xf_block_size)
+ return I40IW_ERR_INVALID_SIZE;
+ get_64bit_val(buf, 80, &temp);
+ hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
+ if (!hmc_fpm_misc->q1_block_size)
+ return I40IW_ERR_INVALID_SIZE;
+ return 0;
+}
+
+/**
+ * i40iw_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ */
+static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_pd *pd,
+ u16 pd_id)
+{
+ pd->size = sizeof(*pd);
+ pd->pd_id = pd_id;
+ pd->dev = dev;
+}
+
+/**
+ * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq, srq) to encoded_size
+ * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
+ */
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
+{
+ u8 encoded_size = 0;
+
+ /* cqp sq's hw coded value starts from 1 for size of 4
+ * while it starts from 0 for qp' wq's.
+ */
+ if (cqpsq)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+ return encoded_size;
+}
+
+/**
+ * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
+ struct i40iw_cqp_init_info *info)
+{
+ u8 hw_sq_size;
+
+ if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
+ (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return I40IW_ERR_INVALID_SIZE;
+
+ hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->enabled_vf_count = info->enabled_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ info->dev->cqp = cqp;
+
+ I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+ "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
+ __func__, cqp->sq_size, cqp->hw_sq_size,
+ cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @disable_pfpdus: if pfpdu to be disabled
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
+ bool disable_pfpdus,
+ u16 *maj_err,
+ u16 *min_err)
+{
+ u64 temp;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ enum i40iw_status_code ret_code;
+
+ ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
+ &cqp->sdbuf,
+ 128,
+ I40IW_SD_BUF_ALIGNMENT);
+
+ if (ret_code)
+ goto exit;
+
+ temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
+ LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
+
+ if (disable_pfpdus)
+ temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
+
+ set_64bit_val(cqp->host_ctx, 0, temp);
+ set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+ temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
+ LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
+ set_64bit_val(cqp->host_ctx, 16, temp);
+ set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+ set_64bit_val(cqp->host_ctx, 32, 0);
+ set_64bit_val(cqp->host_ctx, 40, 0);
+ set_64bit_val(cqp->host_ctx, 48, 0);
+ set_64bit_val(cqp->host_ctx, 56, 0);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
+ cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
+
+ p1 = RS_32_1(cqp->host_ctx_pa, 32);
+ p2 = (u32)cqp->host_ctx_pa;
+
+ if (cqp->dev->is_pf) {
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
+ } else {
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
+ }
+ do {
+ if (cnt++ > I40IW_DONE_COUNT) {
+ i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ ret_code = I40IW_ERR_TIMEOUT;
+ /*
+ * read PFPE_CQPERRORCODES register to get the minor
+ * and major error code
+ */
+ if (cqp->dev->is_pf)
+ err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
+ else
+ err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+ *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
+ *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
+ goto exit;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ if (cqp->dev->is_pf)
+ val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
+ else
+ val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
+ } while (!val);
+
+exit:
+ if (!ret_code)
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
+{
+ if (cqp->dev->is_pf)
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+ else
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+
+ i40iw_debug(cqp->dev,
+ I40IW_DEBUG_WQE,
+ "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
+ __func__,
+ cqp->sq_ring.head,
+ cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+}
+
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @wqe_idx: we index of cqp ring
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+ u64 *wqe = NULL;
+ u32 wqe_idx;
+ enum i40iw_status_code ret_code;
+
+ if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
+ i40iw_debug(cqp->dev,
+ I40IW_DEBUG_WQE,
+ "%s: ring is full head %x tail %x size %x\n",
+ __func__,
+ cqp->sq_ring.head,
+ cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+ if (!wqe_idx)
+ cqp->polarity = !cqp->polarity;
+
+ wqe = cqp->sq_base[wqe_idx].elem;
+ cqp->scratch_array[wqe_idx] = scratch;
+ I40IW_CQP_INIT_WQE(wqe);
+
+ return wqe;
+}
+
+/**
+ * i40iw_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
+{
+ u32 cnt = 0, val = 1;
+ enum i40iw_status_code ret_code = 0;
+ u32 cqpstat_addr;
+
+ if (cqp->dev->is_pf) {
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
+ cqpstat_addr = I40E_PFPE_CCQPSTATUS;
+ } else {
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
+ i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
+ cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
+ }
+ do {
+ if (cnt++ > I40IW_DONE_COUNT) {
+ ret_code = I40IW_ERR_TIMEOUT;
+ break;
+ }
+ udelay(I40IW_SLEEP_COUNT);
+ val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
+ } while (val);
+
+ i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ /* write to cq doorbell shadow area */
+ /* arm next se should always be zero */
+ get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+
+ arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+
+ temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+ wmb(); /* make sure shadow area is updated before arming */
+
+ if (ccq->dev->is_pf)
+ i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
+ else
+ i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
+}
+
+/**
+ * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
+ struct i40iw_sc_cq *ccq,
+ struct i40iw_ccq_cqe_info *info)
+{
+ u64 qp_ctx, temp, temp1;
+ u64 *cqe;
+ struct i40iw_sc_cqp *cqp;
+ u32 wqe_idx;
+ u8 polarity;
+ enum i40iw_status_code ret_code = 0;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
+ else
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
+
+ get_64bit_val(cqe, 24, &temp);
+ polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
+ if (polarity != ccq->cq_uk.polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ get_64bit_val(cqe, 8, &qp_ctx);
+ cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
+ info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
+ info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+ if (info->error) {
+ info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
+ info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+ }
+ wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, 16, &temp1);
+ info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+ info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area,
+ 0,
+ I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
+ wmb(); /* write shadow area before tail */
+ I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
+ struct i40iw_sc_cqp *cqp,
+ u8 op_code,
+ struct i40iw_ccq_cqe_info *compl_info)
+{
+ struct i40iw_ccq_cqe_info info;
+ struct i40iw_sc_cq *ccq;
+ enum i40iw_status_code ret_code = 0;
+ u32 cnt = 0;
+
+ memset(&info, 0, sizeof(info));
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > I40IW_DONE_COUNT)
+ return I40IW_ERR_TIMEOUT;
+
+ if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
+ udelay(I40IW_SLEEP_COUNT);
+ continue;
+ }
+
+ if (info.error) {
+ ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+ break;
+ }
+ /* check if opcode is cq create */
+ if (op_code != info.op_code) {
+ i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+ "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ __func__, op_code, info.op_code);
+ }
+ /* success, exit out of the loop */
+ if (op_code == info.op_code)
+ break;
+ }
+
+ if (compl_info)
+ memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_push_page(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_cqp_manage_push_page_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
+ return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->qs_handle);
+
+ header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @vf_index: vf index for cqp
+ * @free_pm_fcn: function number
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 vf_index,
+ bool free_pm_fcn,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ if (vf_index >= I40IW_MAX_VF_PER_PF)
+ return I40IW_ERR_INVALID_VF_ID;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_profile_type: type of profile to set
+ * @vf_num: vf number for profile
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_profile_type,
+ u8 vf_num, bool post_sq,
+ bool poll_registers)
+{
+ u64 *wqe;
+ u64 header;
+ u32 val, tail, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16,
+ (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
+ LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
+
+ header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
+ else
+ ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+ I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem; Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ struct i40iw_dma_mem *commit_fpm_mem,
+ bool post_sq,
+ u8 wait_type)
+{
+ u64 *wqe;
+ u64 header;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+
+ if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+ else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+ ret_code = i40iw_sc_commit_fpm_values_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_query_fpm_values - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ struct i40iw_dma_mem *query_fpm_mem,
+ bool post_sq,
+ u8 wait_type)
+{
+ u64 *wqe;
+ u64 header;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+ header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ /* read the tail from CQP_TAIL register */
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+ else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+ ret_code = i40iw_sc_query_fpm_values_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_add_arp_cache_entry_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 8, info->reach_max);
+
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 16, temp);
+
+ header = info->arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
+ LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u16 arp_index,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u16 arp_index,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ header = arp_index |
+ LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_apbvt_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, info->port);
+
+ header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started. For passive connections, when
+ * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive connections) or
+ * sent (active connections), this routine is called with entry type of
+ * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the quad hash entry in
+ * the hardware will point to iwarp's qp number and requires no calls from the driver.
+ */
+static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_qhash_table_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 0, temp);
+
+ qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
+ LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe,
+ 48,
+ LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe,
+ 56,
+ LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+
+ set_64bit_val(wqe,
+ 48,
+ LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+ }
+ qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+ if (info->vlan_valid)
+ qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
+ set_64bit_val(wqe, 16, qw2);
+ if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe,
+ 40,
+ LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+ LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+ set_64bit_val(wqe,
+ 32,
+ LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+ LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+ } else {
+ set_64bit_val(wqe,
+ 32,
+ LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+ }
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
+ LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
+ LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
+ LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
+ LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
+ LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
+
+ i40iw_insert_wqe_hdr(wqe, temp);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ struct i40iw_local_mac_ipaddr_entry_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ temp = info->mac_addr[5] |
+ LS_64_1(info->mac_addr[4], 8) |
+ LS_64_1(info->mac_addr[3], 16) |
+ LS_64_1(info->mac_addr[2], 24) |
+ LS_64_1(info->mac_addr[1], 32) |
+ LS_64_1(info->mac_addr[0], 40);
+
+ set_64bit_val(wqe, 32, temp);
+
+ header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 entry_idx,
+ u8 ignore_ref_count,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+ LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cqp_nop - send a nop wqe
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 header;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
+ struct i40iw_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
+ (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (info->ceq_id >= I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->polarity = 1;
+ I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = ceq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = ceq->ceq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = i40iw_update_sds_noccq;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
+{
+ enum i40iw_status_code ret_code;
+
+ ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ ret_code = i40iw_sc_cceq_create_done(ceq);
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = ceq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ header = ceq->ceq_id |
+ LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+ LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+ LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ */
+static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
+{
+ u64 temp;
+ u64 *ceqe;
+ struct i40iw_sc_cq *cq = NULL;
+ u8 polarity;
+
+ ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
+ if (polarity != ceq->polarity)
+ return cq;
+
+ cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
+
+ I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
+ ceq->polarity ^= 1;
+
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
+ else
+ i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
+
+ return cq;
+}
+
+/**
+ * i40iw_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
+ struct i40iw_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
+ (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
+ return I40IW_ERR_INVALID_SIZE;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ info->dev->aeq = aeq;
+
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ info->dev->aeq = aeq;
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = aeq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+ header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = aeq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+ LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
+ struct i40iw_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ u64 *aeqe;
+ u16 wqe_idx;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
+ get_64bit_val(aeqe, 0, &compl_ctx);
+ get_64bit_val(aeqe, 8, &temp);
+ polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
+
+ if (aeq->polarity != polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
+
+ ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
+ wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
+ info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
+ info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
+ info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
+ info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
+ info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
+ info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+ switch (ae_src) {
+ case I40IW_AE_SOURCE_RQ:
+ case I40IW_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case I40IW_AE_SOURCE_CQ:
+ case I40IW_AE_SOURCE_CQ_0110:
+ case I40IW_AE_SOURCE_CQ_1010:
+ case I40IW_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = LS_64_1(compl_ctx, 1);
+ break;
+ case I40IW_AE_SOURCE_SQ:
+ case I40IW_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->wqe_idx = wqe_idx;
+ info->compl_ctx = compl_ctx;
+ break;
+ case I40IW_AE_SOURCE_IN_RR_WR:
+ case I40IW_AE_SOURCE_IN_RR_WR_1011:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case I40IW_AE_SOURCE_OUT_RR:
+ case I40IW_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ default:
+ break;
+ }
+ I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
+ aeq->polarity ^= 1;
+ return 0;
+}
+
+/**
+ * i40iw_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
+ u32 count)
+{
+ if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
+ else
+ i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create_done - create aeq
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = aeq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_aeq_destroy_done - destroy of aeq during close
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = aeq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
+ struct i40iw_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
+ return I40IW_ERR_INVALID_SIZE;
+
+ if (info->ceq_id > I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = I40IW_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+
+ /* following are only for iw cqs so initialize them to zero */
+ cq->cq_uk.cqe_alloc_reg = NULL;
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * i40iw_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
+{
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+ return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
+ u64 scratch,
+ bool check_overflow,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ enum i40iw_status_code ret_code;
+
+ cqp = ccq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = ccq->cq_uk.cq_id |
+ LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ enum i40iw_status_code ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+ header = ccq->cq_uk.cq_id |
+ LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
+ struct i40iw_cq_init_info *info)
+{
+ u32 __iomem *cqe_alloc_reg = NULL;
+ enum i40iw_status_code ret_code;
+ u32 pble_obj_cnt;
+ u32 arm_offset;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
+ if (i40iw_get_hw_addr(cq->dev))
+ cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
+ arm_offset);
+ info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
+ ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
+
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
+ u64 scratch,
+ bool check_overflow,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
+ return I40IW_ERR_INVALID_CQ_ID;
+
+ if (cq->ceq_id > I40IW_MAX_CEQID)
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = cq->cq_uk.cq_id |
+ LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ header = cq->cq_uk.cq_id |
+ LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch:
+ * @post_sq: flag to post to sq
+ */
+static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
+ struct i40iw_modify_cq_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ u32 cq_size, ceq_id, first_pm_pbl_idx;
+ u8 pbl_chunk_size;
+ bool virtual_map, ceq_id_valid, check_overflow;
+ u32 pble_obj_cnt;
+
+ if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
+ return I40IW_ERR_INVALID_CEQ_ID;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->cq_resize && info->virtual_map &&
+ (info->first_pm_pbl_idx >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cqp = cq->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ cq->pbl_list = info->pbl_list;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+ cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
+ if (info->ceq_change) {
+ ceq_id_valid = true;
+ ceq_id = info->ceq_id;
+ } else {
+ ceq_id_valid = cq->ceq_id_valid;
+ ceq_id = ceq_id_valid ? cq->ceq_id : 0;
+ }
+ virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
+ first_pm_pbl_idx = (info->cq_resize ?
+ (info->virtual_map ? info->first_pm_pbl_idx : 0) :
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+ pbl_chunk_size = (info->cq_resize ?
+ (info->virtual_map ? info->pbl_chunk_size : 0) :
+ (cq->virtual_map ? cq->pbl_chunk_size : 0));
+ check_overflow = info->check_overflow_change ? info->check_overflow :
+ cq->check_overflow;
+ cq->cq_uk.cq_size = cq_size;
+ cq->ceq_id_valid = ceq_id_valid;
+ cq->ceq_id = ceq_id;
+ cq->virtual_map = virtual_map;
+ cq->first_pm_pbl_idx = first_pm_pbl_idx;
+ cq->pbl_chunk_size = pbl_chunk_size;
+ cq->check_overflow = check_overflow;
+
+ set_64bit_val(wqe, 0, cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16,
+ LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+ header = cq->cq_uk.cq_id |
+ LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
+ LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
+ LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+ LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+ LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+ LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+ struct i40iw_qp_init_info *info)
+{
+ u32 __iomem *wqe_alloc_reg = NULL;
+ enum i40iw_status_code ret_code;
+ u32 pble_obj_cnt;
+ u8 wqe_size;
+ u32 offset;
+
+ qp->dev = info->pd->dev;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
+ if (i40iw_get_hw_addr(qp->pd->dev))
+ wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ offset);
+
+ info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+ ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+ qp->virtual_map = info->virtual_map;
+
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
+ (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
+
+ qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ false);
+ i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+ __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+ ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+ qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
+ i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
+ "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qs_handle = qp->pd->dev->qs_handle;
+ qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
+
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_create(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_create_qp_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+
+ if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
+ (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
+ return I40IW_ERR_INVALID_QP_ID;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+ LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+ LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_modify(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_modify_qp_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
+ if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
+ (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
+ LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
+ LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+ LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
+ LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+ LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
+ LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+ LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+ LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
+ LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+ LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_destroy(
+ struct i40iw_sc_qp *qp,
+ u64 scratch,
+ bool remove_hash_idx,
+ bool ignore_mw_bnd,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
+ LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 temp = 0;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+ if (!flush_sq && !flush_rq) {
+ if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
+ return 0;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ if (info->userflushcode) {
+ if (flush_rq) {
+ temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
+ LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
+ }
+ if (flush_sq) {
+ temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
+ LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
+ }
+ }
+ set_64bit_val(wqe, 16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
+
+ set_64bit_val(wqe, 8, temp);
+
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
+ LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
+ LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
+ LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_upload_context(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_upload_context_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 16, info->buf_pa);
+
+ header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
+ LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
+ LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
+ LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static enum i40iw_status_code i40iw_sc_qp_setctx(
+ struct i40iw_sc_qp *qp,
+ u64 *qp_ctx,
+ struct i40iw_qp_host_ctx_info *info)
+{
+ struct i40iwarp_offload_info *iw;
+ struct i40iw_tcp_offload_info *tcp;
+ u64 qw0, qw3, qw7 = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
+ LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
+ LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
+ LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
+ LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
+ LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
+ LS_64(info->push_idx, I40IWQPC_PPIDX) |
+ LS_64(info->push_mode_en, I40IWQPC_PMENA);
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+ LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
+
+ set_64bit_val(qp_ctx,
+ 128,
+ LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
+
+ set_64bit_val(qp_ctx,
+ 136,
+ LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
+ LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
+
+ set_64bit_val(qp_ctx,
+ 168,
+ LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
+ set_64bit_val(qp_ctx,
+ 176,
+ LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
+ LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+
+ if (info->iwarp_info_valid) {
+ qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
+ LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
+
+ qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
+ set_64bit_val(qp_ctx, 144, qp->q2_pa);
+ set_64bit_val(qp_ctx,
+ 152,
+ LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
+
+ /*
+ * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
+ *advertisable IRD of 64
+ */
+ iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
+ set_64bit_val(qp_ctx,
+ 160,
+ LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
+ LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
+ LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
+ LS_64(iw->rd_enable, I40IWQPC_RDOK) |
+ LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
+ LS_64(iw->bind_en, I40IWQPC_BINDEN) |
+ LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
+ LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+ LS_64(1, I40IWQPC_IWARPMODE) |
+ LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
+ LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
+ LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
+ LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
+ LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
+ LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
+ LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
+ LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
+ LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
+ LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
+ LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
+
+ qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
+ LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+ LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
+ LS_64(tcp->tos, I40IWQPC_TOS) |
+ LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
+ LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ set_64bit_val(qp_ctx,
+ 32,
+ LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
+ LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
+
+ set_64bit_val(qp_ctx,
+ 40,
+ LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
+ LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
+
+ set_64bit_val(qp_ctx,
+ 48,
+ LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
+ LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
+ LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
+
+ qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
+ LS_64(tcp->wscale, I40IWQPC_WSCALE) |
+ LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
+ LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
+ LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
+ LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
+ LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
+
+ set_64bit_val(qp_ctx,
+ 72,
+ LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
+ LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
+ set_64bit_val(qp_ctx,
+ 80,
+ LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
+ LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
+
+ set_64bit_val(qp_ctx,
+ 88,
+ LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
+ LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
+ set_64bit_val(qp_ctx,
+ 96,
+ LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
+ LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
+ set_64bit_val(qp_ctx,
+ 104,
+ LS_64(tcp->srtt, I40IWQPC_SRTT) |
+ LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
+ set_64bit_val(qp_ctx,
+ 112,
+ LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
+ LS_64(tcp->cwnd, I40IWQPC_CWND));
+ set_64bit_val(qp_ctx,
+ 120,
+ LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
+ LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
+ set_64bit_val(qp_ctx,
+ 128,
+ LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
+ LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
+ set_64bit_val(qp_ctx,
+ 184,
+ LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
+ LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
+ set_64bit_val(qp_ctx,
+ 192,
+ LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
+ LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
+ }
+
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 56, qw7);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
+ qp_ctx, I40IW_QP_CTX_SIZE);
+ return 0;
+}
+
+/**
+ * i40iw_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_stag(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_allocate_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
+ LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+ set_64bit_val(wqe,
+ 40,
+ LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
+
+ header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_reg_ns_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+
+ if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+ if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
+ return I40IW_ERR_INVALID_PBLE_INDEX;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+ if (!info->chunk_size) {
+ set_64bit_val(wqe, 32, info->reg_addr_pa);
+ set_64bit_val(wqe, 48, 0);
+ } else {
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 48, info->first_pm_pbl_index);
+ }
+ set_64bit_val(wqe, 40, info->hmc_fcn_index);
+ set_64bit_val(wqe, 56, 0);
+
+ addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+ LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+ LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+ LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_shared - registered shared memory region
+ * @dev: sc device struct
+ * @info: info for shared memory registeration
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_shared(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_register_shared_stag *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 temp, va64, fbo, header;
+ u32 va32;
+ bool remote_access;
+ u8 addr_type;
+
+ if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ va64 = (uintptr_t)(info->va);
+ va32 = (u32)(va64 & 0x00000000FFFFFFFF);
+ fbo = (u64)(va32 & (4096 - 1));
+
+ set_64bit_val(wqe,
+ 0,
+ (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
+
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+ temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
+ LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
+ LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
+ set_64bit_val(wqe, 16, temp);
+
+ addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_STAG_MR) |
+ LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+ LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+ LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_dealloc_stag(
+ struct i40iw_sc_dev *dev,
+ struct i40iw_dealloc_stag_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 8,
+ LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_query_stag - query hardware for stag
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @stag_index: stag index for query
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
+ u64 scratch,
+ u32 stag_index,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @mw_stag_index:stag index
+ * @pd_id: pd is for this mw
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mw_alloc(
+ struct i40iw_sc_dev *dev,
+ u64 scratch,
+ u32 mw_stag_index,
+ u16 pd_id,
+ bool post_sq)
+{
+ u64 header;
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe,
+ 16,
+ LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
+
+ header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
+ void *lsmm_buf,
+ u32 size,
+ i40iw_stag stag)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+ set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
+
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_lsmm_nostag - for privilege qp
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
+ void *lsmm_buf,
+ u32 size)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+ set_64bit_val(wqe, 8, size);
+
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ if (read) {
+ header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
+ } else {
+ header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+ }
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_post_wqe0 - send wqe with opcode
+ * @qp: sc qp struct
+ * @opcode: opcode to use for wqe0
+ */
+static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
+{
+ u64 *wqe;
+ u64 header;
+ struct i40iw_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+ switch (opcode) {
+ case I40IWQP_OP_NOP:
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ break;
+ case I40IWQP_OP_RDMA_SEND:
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
+ LS_64(1, I40IWQPSQ_STREAMMODE) |
+ LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+ break;
+ default:
+ i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
+ __func__);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_dma_mem query_fpm_mem;
+ struct i40iw_virt_mem virt_mem;
+ struct i40iw_vfdev *vf_dev = NULL;
+ u32 mem_size;
+ enum i40iw_status_code ret_code = 0;
+ bool poll_registers = true;
+ u16 iw_vf_idx;
+ u8 wait_type;
+
+ if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+ (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+ return I40IW_ERR_INVALID_HMCFN_ID;
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
+ dev->hmc_fn_id);
+ if (hmc_fn_id == dev->hmc_fn_id) {
+ hmc_info = dev->hmc_info;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = dev->fpm_query_buf;
+ } else {
+ vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
+ if (!vf_dev)
+ return I40IW_ERR_INVALID_VF_ID;
+
+ hmc_info = &vf_dev->hmc_info;
+ iw_vf_idx = vf_dev->iw_vf_idx;
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
+ hmc_info, hmc_info->hmc_obj);
+ if (!vf_dev->fpm_query_buf) {
+ if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
+ ret_code = i40iw_alloc_query_fpm_buf(dev,
+ &dev->vf_fpm_query_buf[iw_vf_idx]);
+ if (ret_code)
+ return ret_code;
+ }
+ vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
+ vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
+ }
+ query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
+ query_fpm_mem.va = vf_dev->fpm_query_buf;
+ /**
+ * It is HARDWARE specific:
+ * this call is done by PF for VF and
+ * i40iw_sc_query_fpm_values needs ccq poll
+ * because PF ccq is already created.
+ */
+ poll_registers = false;
+ }
+
+ hmc_info->hmc_fn_id = hmc_fn_id;
+
+ if (hmc_fn_id != dev->hmc_fn_id) {
+ ret_code =
+ i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+ } else {
+ wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+ (u8)I40IW_CQP_WAIT_POLL_CQ;
+
+ ret_code = i40iw_sc_query_fpm_values(
+ dev->cqp,
+ 0,
+ hmc_info->hmc_fn_id,
+ &query_fpm_mem,
+ true,
+ wait_type);
+ }
+ if (ret_code)
+ return ret_code;
+
+ /* parse the fpm_query_buf and fill hmc obj info */
+ ret_code =
+ i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
+ hmc_info,
+ &dev->hmc_fpm_misc);
+ if (ret_code)
+ return ret_code;
+ i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
+ query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
+
+ if (hmc_fn_id != dev->hmc_fn_id) {
+ i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+
+ /* parse the fpm_commit_buf and fill hmc obj info */
+ i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj);
+ mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+ if (ret_code)
+ return ret_code;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+ }
+
+ /* fill size of objects which are fixed */
+ hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
+ * populates fpm base address in hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_obj_info *obj_info;
+ u64 *buf;
+ struct i40iw_dma_mem commit_fpm_mem;
+ u32 i, j;
+ enum i40iw_status_code ret_code = 0;
+ bool poll_registers = true;
+ u8 wait_type;
+
+ if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+ (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+ return I40IW_ERR_INVALID_HMCFN_ID;
+
+ if (hmc_fn_id == dev->hmc_fn_id) {
+ hmc_info = dev->hmc_info;
+ } else {
+ hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
+ poll_registers = false;
+ }
+ if (!hmc_info)
+ return I40IW_ERR_BAD_PTR;
+
+ obj_info = hmc_info->hmc_obj;
+ buf = dev->fpm_commit_buf;
+
+ /* copy cnt values in commit buf */
+ for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
+ i++, j += 8)
+ set_64bit_val(buf, j, (u64)obj_info[i].cnt);
+
+ set_64bit_val(buf, 40, 0); /* APBVT rsvd */
+
+ commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+ commit_fpm_mem.va = dev->fpm_commit_buf;
+ wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+ (u8)I40IW_CQP_WAIT_POLL_CQ;
+ ret_code = i40iw_sc_commit_fpm_values(
+ dev->cqp,
+ 0,
+ hmc_info->hmc_fn_id,
+ &commit_fpm_mem,
+ true,
+ wait_type);
+
+ /* parse the fpm_commit_buf and fill hmc obj info */
+ if (!ret_code)
+ ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
+ commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
+
+ return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info; sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
+ struct i40iw_update_sds_info *info,
+ u64 scratch)
+{
+ u64 data;
+ u64 header;
+ u64 *wqe;
+ int mem_entries, wqe_entries;
+ struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ I40IW_CQP_INIT_WQE(wqe);
+ wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+ mem_entries = info->cnt - wqe_entries;
+
+ header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+ LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
+
+ if (mem_entries) {
+ memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
+ data = sdbuf->pa;
+ } else {
+ data = 0;
+ }
+ data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
+
+ set_64bit_val(wqe, 16, data);
+
+ switch (wqe_entries) {
+ case 3:
+ set_64bit_val(wqe, 48,
+ (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+ LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+ set_64bit_val(wqe, 56, info->entry[2].data);
+ /* fallthrough */
+ case 2:
+ set_64bit_val(wqe, 32,
+ (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+ LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+ set_64bit_val(wqe, 40, info->entry[1].data);
+ /* fallthrough */
+ case 1:
+ set_64bit_val(wqe, 0,
+ LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
+
+ set_64bit_val(wqe, 8, info->entry[0].data);
+ break;
+ default:
+ break;
+ }
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ return 0;
+}
+
+/**
+ * i40iw_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to i40iw_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info,
+ u64 scratch)
+{
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ enum i40iw_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+ if (!ret_code)
+ i40iw_sc_cqp_post_sq(cqp);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info)
+{
+ u32 error, val, tail;
+ struct i40iw_sc_cqp *cqp = dev->cqp;
+ enum i40iw_status_code ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+ if (ret_code)
+ return ret_code;
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error)
+ return I40IW_ERR_CQP_COMPL_ERROR;
+
+ i40iw_sc_cqp_post_sq(cqp);
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp,
+ u64 scratch)
+{
+ u64 header;
+ u64 *wqe;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
+ LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp,
+ u64 scratch)
+{
+ u64 header;
+ u64 *wqe;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
+
+ header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
+ LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
+ struct i40iw_sc_cqp *cqp,
+ u64 scratch,
+ u8 hmc_fn_id,
+ bool post_sq,
+ bool poll_registers)
+{
+ u64 header;
+ u64 *wqe;
+ u32 tail, val, error;
+ enum i40iw_status_code ret_code = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+ set_64bit_val(wqe,
+ 16,
+ LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
+
+ header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+ i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+ if (error) {
+ ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+ return ret_code;
+ }
+ if (post_sq) {
+ i40iw_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ /* check for cqp sq tail update */
+ ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+ else
+ ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+ I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
+{
+ return I40IW_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * i40iw_config_fpm_values - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
+{
+ struct i40iw_virt_mem virt_mem;
+ u32 i, mem_size;
+ u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
+ u32 powerof2;
+ u64 sd_needed, bytes_needed;
+ u32 loop_count = 0;
+
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
+ enum i40iw_status_code ret_code = 0;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "i40iw_sc_init_iw_hmc returned error_code = %d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+ bytes_needed +=
+ (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n",
+ __func__, i, hmc_info->hmc_obj[i].max_cnt,
+ hmc_info->hmc_obj[i].size);
+ }
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
+ __func__, sd_needed, hmc_info->first_sd_index);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n",
+ __func__, bytes_needed, hmc_info->sd_table.sd_cnt,
+ hmc_fpm_misc->max_sds);
+
+ qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
+ qpwantedoriginal = qpwanted;
+ mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
+ pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
+ qp_count, hmc_fpm_misc->max_sds,
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
+
+ do {
+ ++loop_count;
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
+ min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
+ hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
+ hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
+ qpwanted * hmc_fpm_misc->ht_multiplier;
+ hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
+ hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
+
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+ hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
+ ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
+ hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
+
+ /* How much memory is needed for all the objects. */
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+ bytes_needed +=
+ (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1;
+ if ((loop_count > 1000) ||
+ ((!(loop_count % 10)) &&
+ (qpwanted > qpwantedoriginal * 2 / 3))) {
+ if (qpwanted > FPM_MULTIPLIER) {
+ qpwanted -= FPM_MULTIPLIER;
+ powerof2 = 1;
+ while (powerof2 < qpwanted)
+ powerof2 *= 2;
+ powerof2 /= 2;
+ qpwanted = powerof2;
+ } else {
+ qpwanted /= 2;
+ }
+ }
+ if (mrwanted > FPM_MULTIPLIER * 10)
+ mrwanted -= FPM_MULTIPLIER * 10;
+ if (pblewanted > FPM_MULTIPLIER * 1000)
+ pblewanted -= FPM_MULTIPLIER * 1000;
+ } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
+
+ bytes_needed = 0;
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+ bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n",
+ __func__, i, hmc_info->hmc_obj[i].cnt,
+ hmc_info->hmc_obj[i].size);
+ }
+ sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up not truncate. */
+
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
+ loop_count, sd_needed,
+ hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
+
+ ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "configure_iw_fpm returned error_code[x%08X]\n",
+ i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
+ return ret_code;
+ }
+
+ hmc_info->sd_table.sd_cnt = (u32)sd_needed;
+
+ mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: failed to allocate memory for sd_entry buffer\n",
+ __func__);
+ return ret_code;
+ }
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_dma_mem values_mem;
+
+ dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+ switch (pcmdinfo->cqp_cmd) {
+ case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_del_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
+ pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CEQ_DESTROY:
+ status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+ pcmdinfo->in.u.ceq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_AEQ_DESTROY:
+ status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+ pcmdinfo->in.u.aeq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_DELETE_ARP_CACHE_ENTRY:
+ status = i40iw_sc_del_arp_cache_entry(
+ pcmdinfo->in.u.del_arp_cache_entry.cqp,
+ pcmdinfo->in.u.del_arp_cache_entry.scratch,
+ pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_APBVT_ENTRY:
+ status = i40iw_sc_manage_apbvt_entry(
+ pcmdinfo->in.u.manage_apbvt_entry.cqp,
+ &pcmdinfo->in.u.manage_apbvt_entry.info,
+ pcmdinfo->in.u.manage_apbvt_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CEQ_CREATE:
+ status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+ pcmdinfo->in.u.ceq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_AEQ_CREATE:
+ status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+ pcmdinfo->in.u.aeq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_alloc_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
+ pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
+ status = i40iw_sc_add_local_mac_ipaddr_entry(
+ pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
+ &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
+ pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_QHASH_TABLE_ENTRY:
+ status = i40iw_sc_manage_qhash_table_entry(
+ pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+ &pcmdinfo->in.u.manage_qhash_table_entry.info,
+ pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_MODIFY:
+ status = i40iw_sc_qp_modify(
+ pcmdinfo->in.u.qp_modify.qp,
+ &pcmdinfo->in.u.qp_modify.info,
+ pcmdinfo->in.u.qp_modify.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_UPLOAD_CONTEXT:
+ status = i40iw_sc_qp_upload_context(
+ pcmdinfo->in.u.qp_upload_context.dev,
+ &pcmdinfo->in.u.qp_upload_context.info,
+ pcmdinfo->in.u.qp_upload_context.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_CQ_CREATE:
+ status = i40iw_sc_cq_create(
+ pcmdinfo->in.u.cq_create.cq,
+ pcmdinfo->in.u.cq_create.scratch,
+ pcmdinfo->in.u.cq_create.check_overflow,
+ pcmdinfo->post_sq);
+ break;
+ case OP_CQ_DESTROY:
+ status = i40iw_sc_cq_destroy(
+ pcmdinfo->in.u.cq_destroy.cq,
+ pcmdinfo->in.u.cq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_CREATE:
+ status = i40iw_sc_qp_create(
+ pcmdinfo->in.u.qp_create.qp,
+ &pcmdinfo->in.u.qp_create.info,
+ pcmdinfo->in.u.qp_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_QP_DESTROY:
+ status = i40iw_sc_qp_destroy(
+ pcmdinfo->in.u.qp_destroy.qp,
+ pcmdinfo->in.u.qp_destroy.scratch,
+ pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+ pcmdinfo->in.u.qp_destroy.
+ ignore_mw_bnd,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_ALLOC_STAG:
+ status = i40iw_sc_alloc_stag(
+ pcmdinfo->in.u.alloc_stag.dev,
+ &pcmdinfo->in.u.alloc_stag.info,
+ pcmdinfo->in.u.alloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MR_REG_NON_SHARED:
+ status = i40iw_sc_mr_reg_non_shared(
+ pcmdinfo->in.u.mr_reg_non_shared.dev,
+ &pcmdinfo->in.u.mr_reg_non_shared.info,
+ pcmdinfo->in.u.mr_reg_non_shared.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_DEALLOC_STAG:
+ status = i40iw_sc_dealloc_stag(
+ pcmdinfo->in.u.dealloc_stag.dev,
+ &pcmdinfo->in.u.dealloc_stag.info,
+ pcmdinfo->in.u.dealloc_stag.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_MW_ALLOC:
+ status = i40iw_sc_mw_alloc(
+ pcmdinfo->in.u.mw_alloc.dev,
+ pcmdinfo->in.u.mw_alloc.scratch,
+ pcmdinfo->in.u.mw_alloc.mw_stag_index,
+ pcmdinfo->in.u.mw_alloc.pd_id,
+ pcmdinfo->post_sq);
+
+ break;
+ case OP_QP_FLUSH_WQES:
+ status = i40iw_sc_qp_flush_wqes(
+ pcmdinfo->in.u.qp_flush_wqes.qp,
+ &pcmdinfo->in.u.qp_flush_wqes.info,
+ pcmdinfo->in.u.qp_flush_wqes.
+ scratch, pcmdinfo->post_sq);
+ break;
+ case OP_ADD_ARP_CACHE_ENTRY:
+ status = i40iw_sc_add_arp_cache_entry(
+ pcmdinfo->in.u.add_arp_cache_entry.cqp,
+ &pcmdinfo->in.u.add_arp_cache_entry.info,
+ pcmdinfo->in.u.add_arp_cache_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_MANAGE_PUSH_PAGE:
+ status = i40iw_sc_manage_push_page(
+ pcmdinfo->in.u.manage_push_page.cqp,
+ &pcmdinfo->in.u.manage_push_page.info,
+ pcmdinfo->in.u.manage_push_page.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case OP_UPDATE_PE_SDS:
+ /* case I40IW_CQP_OP_UPDATE_PE_SDS */
+ status = i40iw_update_pe_sds(
+ pcmdinfo->in.u.update_pe_sds.dev,
+ &pcmdinfo->in.u.update_pe_sds.info,
+ pcmdinfo->in.u.update_pe_sds.
+ scratch);
+
+ break;
+ case OP_MANAGE_HMC_PM_FUNC_TABLE:
+ status = i40iw_sc_manage_hmc_pm_func_table(
+ pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+ pcmdinfo->in.u.manage_hmc_pm.scratch,
+ (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
+ pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
+ true);
+ break;
+ case OP_SUSPEND:
+ status = i40iw_sc_suspend_qp(
+ pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case OP_RESUME:
+ status = i40iw_sc_resume_qp(
+ pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case OP_MANAGE_VF_PBLE_BP:
+ status = i40iw_manage_vf_pble_bp(
+ pcmdinfo->in.u.manage_vf_pble_bp.cqp,
+ &pcmdinfo->in.u.manage_vf_pble_bp.info,
+ pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
+ break;
+ case OP_QUERY_FPM_VALUES:
+ values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
+ values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
+ status = i40iw_sc_query_fpm_values(
+ pcmdinfo->in.u.query_fpm_values.cqp,
+ pcmdinfo->in.u.query_fpm_values.scratch,
+ pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
+ &values_mem, true, I40IW_CQP_WAIT_EVENT);
+ break;
+ case OP_COMMIT_FPM_VALUES:
+ values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
+ values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
+ status = i40iw_sc_commit_fpm_values(
+ pcmdinfo->in.u.commit_fpm_values.cqp,
+ pcmdinfo->in.u.commit_fpm_values.scratch,
+ pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
+ &values_mem,
+ true,
+ I40IW_CQP_WAIT_EVENT);
+ break;
+ default:
+ status = I40IW_NOT_SUPPORTED;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40iw_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo)
+{
+ enum i40iw_status_code status = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
+ status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+ else
+ list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * i40iw_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
+{
+ enum i40iw_status_code status = 0;
+ struct cqp_commands_info *pcmdinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
+ pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+
+ status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+ if (status)
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * i40iw_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
+{
+ u16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (u16 *)pkt;
+ opcode = ntohs(mpa[1]) & 0xf;
+ }
+ return opcode;
+}
+
+/**
+ * i40iw_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *i40iw_locate_mpa(u8 *pkt)
+{
+ /* skip over ethernet header */
+ pkt += I40IW_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+ return pkt;
+}
+
+/**
+ * i40iw_setup_termhdr - termhdr for terminate pkt
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
+ struct i40iw_terminate_hdr *hdr,
+ enum i40iw_flush_opcode opcode,
+ u8 layer_etype,
+ u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * i40iw_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
+ struct i40iw_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ u16 ddp_seg_len;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
+ u32 opcode;
+ struct i40iw_terminate_hdr *termhdr;
+
+ termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ /* Use data from offending packet to fill in ddp & rdma hdrs */
+ pkt = i40iw_locate_mpa(pkt);
+ ddp_seg_len = ntohs(*(u16 *)pkt);
+ if (ddp_seg_len) {
+ copy_len = 2;
+ termhdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ copy_len += TERM_DDP_LEN_TAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ copy_len += TERM_DDP_LEN_UNTAGGED;
+ termhdr->hdrct |= DDP_HDR_FLAG;
+ }
+
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+ if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+ copy_len += TERM_RDMA_LEN;
+ termhdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+ }
+ }
+
+ opcode = i40iw_iwarp_opcode(info, pkt);
+
+ switch (info->ae_id) {
+ case I40IW_AE_AMP_UNALLOCATED_STAG:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+ break;
+ case I40IW_AE_AMP_BOUNDS_VIOLATION:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ if (info->q2_data_written)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
+ break;
+ case I40IW_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case I40IW_OP_TYPE_RDMA_WRITE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case I40IW_AE_AMP_INVALID_STAG:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+ break;
+ case I40IW_AE_AMP_BAD_QP:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+ break;
+ case I40IW_AE_AMP_BAD_STAG_KEY:
+ case I40IW_AE_AMP_BAD_STAG_INDEX:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ switch (opcode) {
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
+ }
+ break;
+ case I40IW_AE_AMP_RIGHTS_VIOLATION:
+ case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
+ break;
+ case I40IW_AE_AMP_TO_WRAP:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
+ break;
+ case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
+ break;
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+ break;
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_DDP_NO_L_BIT:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+ break;
+ case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
+ else
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+ break;
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
+ break;
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
+ break;
+ default:
+ i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ if (flush_code && !info->in_rdrsp_wr)
+ qp->sq_flush = (info->sq) ? true : false;
+
+ return sizeof(struct i40iw_terminate_hdr) + copy_len;
+}
+
+/**
+ * i40iw_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
+{
+ /* Send the fin only */
+ i40iw_term_modify_qp(qp,
+ I40IW_QP_STATE_TERMINATE,
+ I40IWQP_TERM_SEND_FIN_ONLY,
+ 0);
+}
+
+/**
+ * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & I40IW_TERM_SENT)
+ return; /* Sanity check */
+
+ /* Eventtype can change from bld_terminate_hdr */
+ qp->eventtype = TERM_EVENT_QP_FATAL;
+ termlen = i40iw_bld_terminate_hdr(qp, info);
+ i40iw_terminate_start_timer(qp);
+ qp->term_flags |= I40IW_TERM_SENT;
+ i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
+ I40IWQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * i40iw_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ u32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct i40iw_terminate_hdr *termhdr;
+
+ mpa = (u32 *)i40iw_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ i40iw_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= I40IW_TERM_RCVD;
+ qp->eventtype = TERM_EVENT_QP_FATAL;
+ termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ i40iw_terminate_done(qp, 0);
+ } else {
+ i40iw_terminate_start_timer(qp);
+ i40iw_terminate_send_fin(qp);
+ }
+}
+
+/**
+ * i40iw_hw_stat_init - Initiliaze HW stats table
+ * @devstat: pestat struct
+ * @fcn_idx: PCI fn id
+ * @hw: PF i40iw_hw structure.
+ * @is_pf: Is it a PF?
+ *
+ * Populate the HW stat table with register offset addr for each
+ * stat. And start the perioidic stats timer.
+ */
+static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
+ u8 fcn_idx,
+ struct i40iw_hw *hw, bool is_pf)
+{
+ u32 stat_reg_offset;
+ u32 stat_index;
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+
+ devstat->hw = hw;
+
+ if (is_pf) {
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ I40E_GLPES_PFTCPRTXSEG(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
+
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ I40E_GLPES_PFRDMAVINVLO(fcn_idx);
+ } else {
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+ I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+ I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+ I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+ I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+ I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+ I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+ I40E_GLPES_VFTCPRTXSEG(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+ I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
+ stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+ I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
+
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+ I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+ I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+ I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+ I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+ I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+ I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+ I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+ I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+ I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+ I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+ I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+ I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+ I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+ I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+ I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+ I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+ I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+ I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+ I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+ I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+ I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+ I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+ I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+ I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
+ stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+ I40E_GLPES_VFRDMAVINVLO(fcn_idx);
+ }
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++) {
+ stat_reg_offset = stat_table->stat_offset_64[stat_index];
+ last_rd_stats->stat_value_64[stat_index] =
+ readq(devstat->hw->hw_addr + stat_reg_offset);
+ }
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++) {
+ stat_reg_offset = stat_table->stat_offset_32[stat_index];
+ last_rd_stats->stat_value_32[stat_index] =
+ i40iw_rd32(devstat->hw, stat_reg_offset);
+ }
+}
+
+/**
+ * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
+ enum i40iw_hw_stat_index_32b index,
+ u64 *value)
+{
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ u64 new_stat_value = 0;
+ u32 stat_reg_offset = stat_table->stat_offset_32[index];
+
+ new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+ /*roll-over case */
+ if (new_stat_value < last_rd_stats->stat_value_32[index])
+ hw_stats->stat_value_32[index] += new_stat_value;
+ else
+ hw_stats->stat_value_32[index] +=
+ new_stat_value - last_rd_stats->stat_value_32[index];
+ last_rd_stats->stat_value_32[index] = new_stat_value;
+ *value = hw_stats->stat_value_32[index];
+}
+
+/**
+ * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
+ enum i40iw_hw_stat_index_64b index,
+ u64 *value)
+{
+ struct i40iw_dev_hw_stat_offsets *stat_table =
+ &devstat->hw_stat_offsets;
+ struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ u64 new_stat_value = 0;
+ u32 stat_reg_offset = stat_table->stat_offset_64[index];
+
+ new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+ /*roll-over case */
+ if (new_stat_value < last_rd_stats->stat_value_64[index])
+ hw_stats->stat_value_64[index] += new_stat_value;
+ else
+ hw_stats->stat_value_64[index] +=
+ new_stat_value - last_rd_stats->stat_value_64[index];
+ last_rd_stats->stat_value_64[index] = new_stat_value;
+ *value = hw_stats->stat_value_64[index];
+}
+
+/**
+ * i40iw_hw_stat_read_all - read all HW stat counters
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters and populates hw_stats structure
+ * of passed-in dev's pestat as well as copy created in stat_values.
+ */
+static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
+ struct i40iw_dev_hw_stats *stat_values)
+{
+ u32 stat_index;
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++)
+ i40iw_hw_stat_read_32(devstat, stat_index,
+ &stat_values->stat_value_32[stat_index]);
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++)
+ i40iw_hw_stat_read_64(devstat, stat_index,
+ &stat_values->stat_value_64[stat_index]);
+}
+
+/**
+ * i40iw_hw_stat_refresh_all - Update all HW stat structs
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters to refresh values in hw_stats structure
+ * of passed-in dev's pestat
+ */
+static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+{
+ u64 stat_value;
+ u32 stat_index;
+
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+ stat_index++)
+ i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
+ for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+ stat_index++)
+ i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+}
+
+static struct i40iw_cqp_ops iw_cqp_ops = {
+ i40iw_sc_cqp_init,
+ i40iw_sc_cqp_create,
+ i40iw_sc_cqp_post_sq,
+ i40iw_sc_cqp_get_next_send_wqe,
+ i40iw_sc_cqp_destroy,
+ i40iw_sc_poll_for_cqp_op_done
+};
+
+static struct i40iw_ccq_ops iw_ccq_ops = {
+ i40iw_sc_ccq_init,
+ i40iw_sc_ccq_create,
+ i40iw_sc_ccq_destroy,
+ i40iw_sc_ccq_create_done,
+ i40iw_sc_ccq_get_cqe_info,
+ i40iw_sc_ccq_arm
+};
+
+static struct i40iw_ceq_ops iw_ceq_ops = {
+ i40iw_sc_ceq_init,
+ i40iw_sc_ceq_create,
+ i40iw_sc_cceq_create_done,
+ i40iw_sc_cceq_destroy_done,
+ i40iw_sc_cceq_create,
+ i40iw_sc_ceq_destroy,
+ i40iw_sc_process_ceq
+};
+
+static struct i40iw_aeq_ops iw_aeq_ops = {
+ i40iw_sc_aeq_init,
+ i40iw_sc_aeq_create,
+ i40iw_sc_aeq_destroy,
+ i40iw_sc_get_next_aeqe,
+ i40iw_sc_repost_aeq_entries,
+ i40iw_sc_aeq_create_done,
+ i40iw_sc_aeq_destroy_done
+};
+
+/* iwarp pd ops */
+static struct i40iw_pd_ops iw_pd_ops = {
+ i40iw_sc_pd_init,
+};
+
+static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+ i40iw_sc_qp_init,
+ i40iw_sc_qp_create,
+ i40iw_sc_qp_modify,
+ i40iw_sc_qp_destroy,
+ i40iw_sc_qp_flush_wqes,
+ i40iw_sc_qp_upload_context,
+ i40iw_sc_qp_setctx,
+ i40iw_sc_send_lsmm,
+ i40iw_sc_send_lsmm_nostag,
+ i40iw_sc_send_rtt,
+ i40iw_sc_post_wqe0,
+};
+
+static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+ i40iw_sc_cq_init,
+ i40iw_sc_cq_create,
+ i40iw_sc_cq_destroy,
+ i40iw_sc_cq_modify,
+};
+
+static struct i40iw_mr_ops iw_mr_ops = {
+ i40iw_sc_alloc_stag,
+ i40iw_sc_mr_reg_non_shared,
+ i40iw_sc_mr_reg_shared,
+ i40iw_sc_dealloc_stag,
+ i40iw_sc_query_stag,
+ i40iw_sc_mw_alloc
+};
+
+static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
+ i40iw_sc_manage_push_page,
+ i40iw_sc_manage_hmc_pm_func_table,
+ i40iw_sc_set_hmc_resource_profile,
+ i40iw_sc_commit_fpm_values,
+ i40iw_sc_query_fpm_values,
+ i40iw_sc_static_hmc_pages_allocated,
+ i40iw_sc_add_arp_cache_entry,
+ i40iw_sc_del_arp_cache_entry,
+ i40iw_sc_query_arp_cache_entry,
+ i40iw_sc_manage_apbvt_entry,
+ i40iw_sc_manage_qhash_table_entry,
+ i40iw_sc_alloc_local_mac_ipaddr_entry,
+ i40iw_sc_add_local_mac_ipaddr_entry,
+ i40iw_sc_del_local_mac_ipaddr_entry,
+ i40iw_sc_cqp_nop,
+ i40iw_sc_commit_fpm_values_done,
+ i40iw_sc_query_fpm_values_done,
+ i40iw_sc_manage_hmc_pm_func_table_done,
+ i40iw_sc_suspend_qp,
+ i40iw_sc_resume_qp
+};
+
+static struct i40iw_hmc_ops iw_hmc_ops = {
+ i40iw_sc_init_iw_hmc,
+ i40iw_sc_parse_fpm_query_buf,
+ i40iw_sc_configure_iw_fpm,
+ i40iw_sc_parse_fpm_commit_buf,
+ i40iw_sc_create_hmc_obj,
+ i40iw_sc_del_hmc_obj,
+ NULL,
+ NULL
+};
+
+static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
+ i40iw_hw_stat_init,
+ i40iw_hw_stat_read_32,
+ i40iw_hw_stat_read_64,
+ i40iw_hw_stat_read_all,
+ i40iw_hw_stat_refresh_all
+};
+
+/**
+ * i40iw_device_init_pestat - Initialize the pestat structure
+ * @dev: pestat struct
+ */
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
+{
+ devstat->ops = iw_device_pestat_ops;
+ return 0;
+}
+
+/**
+ * i40iw_device_init - Initialize IWARP device
+ * @dev: IWARP device pointer
+ * @info: IWARP init info
+ */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+ struct i40iw_device_init_info *info)
+{
+ u32 val;
+ u32 vchnl_ver = 0;
+ u16 hmc_fcn = 0;
+ enum i40iw_status_code ret_code = 0;
+ u8 db_size;
+
+ spin_lock_init(&dev->cqp_lock);
+ INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */
+
+ i40iw_device_init_uk(&dev->dev_uk);
+
+ dev->debug_mask = info->debug_mask;
+
+ ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: i40iw_device_init_pestat failed\n", __func__);
+ return ret_code;
+ }
+ dev->hmc_fn_id = info->hmc_fn_id;
+ dev->qs_handle = info->qs_handle;
+ dev->exception_lan_queue = info->exception_lan_queue;
+ dev->is_pf = info->is_pf;
+
+ dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+ dev->fpm_query_buf = info->fpm_query_buf;
+
+ dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+ dev->fpm_commit_buf = info->fpm_commit_buf;
+
+ dev->hw = info->hw;
+ dev->hw->hw_addr = info->bar0;
+
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+ dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
+ if (dev->is_pf) {
+ dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
+ dev->hmc_fn_id, dev->hw, true);
+ spin_lock_init(&dev->dev_pestat.stats_lock);
+ /*start the periodic stats_timer */
+ i40iw_hw_stats_start_timer(dev);
+ val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
+ db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
+ if ((db_size != I40IW_PE_DB_SIZE_4M) &&
+ (db_size != I40IW_PE_DB_SIZE_8M)) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: PE doorbell is not enabled in CSR val 0x%x\n",
+ __func__, val);
+ ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
+ return ret_code;
+ }
+ dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
+ dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
+ } else {
+ dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
+ }
+
+ dev->cqp_ops = &iw_cqp_ops;
+ dev->ccq_ops = &iw_ccq_ops;
+ dev->ceq_ops = &iw_ceq_ops;
+ dev->aeq_ops = &iw_aeq_ops;
+ dev->cqp_misc_ops = &iw_cqp_misc_ops;
+ dev->iw_pd_ops = &iw_pd_ops;
+ dev->iw_priv_qp_ops = &iw_priv_qp_ops;
+ dev->iw_priv_cq_ops = &iw_priv_cq_ops;
+ dev->mr_ops = &iw_mr_ops;
+ dev->hmc_ops = &iw_hmc_ops;
+ dev->vchnl_if.vchnl_send = info->vchnl_send;
+ if (dev->vchnl_if.vchnl_send)
+ dev->vchnl_up = true;
+ else
+ dev->vchnl_up = false;
+ if (!dev->is_pf) {
+ dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
+ ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
+ if (!ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s: Get Channel version rc = 0x%0x, version is %u\n",
+ __func__, ret_code, vchnl_ver);
+ ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
+ if (!ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_DEV,
+ "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
+ __func__, ret_code, hmc_fcn);
+ dev->hmc_fn_id = (u8)hmc_fcn;
+ }
+ }
+ }
+ dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
+
+ return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
new file mode 100644
index 000000000000..aab88d65f805
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -0,0 +1,1713 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_D_H
+#define I40IW_D_H
+
+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40IW_PUSH_OFFSET (4 * 1024 * 1024)
+#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16
+#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024)
+#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2
+
+#define I40IW_PE_DB_SIZE_4M 1
+#define I40IW_PE_DB_SIZE_8M 2
+
+#define I40IW_DDP_VER 1
+#define I40IW_RDMAP_VER 1
+
+#define I40IW_RDMA_MODE_RDMAC 0
+#define I40IW_RDMA_MODE_IETF 1
+
+#define I40IW_QP_STATE_INVALID 0
+#define I40IW_QP_STATE_IDLE 1
+#define I40IW_QP_STATE_RTS 2
+#define I40IW_QP_STATE_CLOSING 3
+#define I40IW_QP_STATE_RESERVED 4
+#define I40IW_QP_STATE_TERMINATE 5
+#define I40IW_QP_STATE_ERROR 6
+
+#define I40IW_STAG_STATE_INVALID 0
+#define I40IW_STAG_STATE_VALID 1
+
+#define I40IW_STAG_TYPE_SHARED 0
+#define I40IW_STAG_TYPE_NONSHARED 1
+
+#define I40IW_MAX_USER_PRIORITY 8
+
+#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits)
+#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits)
+#define LS_32_1(val, bits) (u32)(val << bits)
+#define RS_32_1(val, bits) (u32)(val >> bits)
+#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+
+#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
+
+#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
+#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK))
+#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT)
+
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_MASK 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define Q2_BAD_FRAME_OFFSET 72
+#define CQE_MAJOR_DRV 0x8000
+
+#define I40IW_TERM_SENT 0x01
+#define I40IW_TERM_RCVD 0x02
+#define I40IW_TERM_DONE 0x04
+#define I40IW_MAC_HLEN 14
+
+#define I40IW_INVALID_WQE_INDEX 0xffffffff
+
+#define I40IW_CQP_WAIT_POLL_REGS 1
+#define I40IW_CQP_WAIT_POLL_CQ 2
+#define I40IW_CQP_WAIT_EVENT 3
+
+#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \
+ ( \
+ &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
+ )
+#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \
+ ( \
+ &(((struct i40iw_extended_cqe *) \
+ ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
+ )
+
+#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \
+ ( \
+ &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \
+ )
+
+#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \
+ ( \
+ &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \
+ )
+
+#define I40IW_AE_SOURCE_RQ 0x1
+#define I40IW_AE_SOURCE_RQ_0011 0x3
+
+#define I40IW_AE_SOURCE_CQ 0x2
+#define I40IW_AE_SOURCE_CQ_0110 0x6
+#define I40IW_AE_SOURCE_CQ_1010 0xA
+#define I40IW_AE_SOURCE_CQ_1110 0xE
+
+#define I40IW_AE_SOURCE_SQ 0x5
+#define I40IW_AE_SOURCE_SQ_0111 0x7
+
+#define I40IW_AE_SOURCE_IN_RR_WR 0x9
+#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB
+#define I40IW_AE_SOURCE_OUT_RR 0xD
+#define I40IW_AE_SOURCE_OUT_RR_1111 0xF
+
+#define I40IW_TCP_STATE_NON_EXISTENT 0
+#define I40IW_TCP_STATE_CLOSED 1
+#define I40IW_TCP_STATE_LISTEN 2
+#define I40IW_STATE_SYN_SEND 3
+#define I40IW_TCP_STATE_SYN_RECEIVED 4
+#define I40IW_TCP_STATE_ESTABLISHED 5
+#define I40IW_TCP_STATE_CLOSE_WAIT 6
+#define I40IW_TCP_STATE_FIN_WAIT_1 7
+#define I40IW_TCP_STATE_CLOSING 8
+#define I40IW_TCP_STATE_LAST_ACK 9
+#define I40IW_TCP_STATE_FIN_WAIT_2 10
+#define I40IW_TCP_STATE_TIME_WAIT 11
+#define I40IW_TCP_STATE_RESERVED_1 12
+#define I40IW_TCP_STATE_RESERVED_2 13
+#define I40IW_TCP_STATE_RESERVED_3 14
+#define I40IW_TCP_STATE_RESERVED_4 15
+
+/* ILQ CQP hash table fields */
+#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32
+#define I40IW_CQPSQ_QHASH_VLANID_MASK \
+ ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32
+#define I40IW_CQPSQ_QHASH_QPN_MASK \
+ ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16
+#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \
+ ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0
+#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \
+ ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR0_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR1_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR2_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR3_MASK \
+ ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT)
+#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_QHASH_OPCODE_MASK \
+ ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61
+#define I40IW_CQPSQ_QHASH_MANAGE_MASK \
+ ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60
+#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59
+#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \
+ ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \
+ ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT)
+/* CQP Host Context */
+#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0
+#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT)
+
+#define I40IW_CQPHC_SQSIZE_SHIFT 8
+#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT)
+
+#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1
+#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT)
+
+#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32
+#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT)
+
+#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT)
+
+#define I40IW_CQPHC_SVER_SHIFT 24
+#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT)
+
+#define I40IW_CQPHC_SQBASE_SHIFT 9
+#define I40IW_CQPHC_SQBASE_MASK \
+ (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT)
+
+#define I40IW_CQPHC_QPCTX_SHIFT 0
+#define I40IW_CQPHC_QPCTX_MASK \
+ (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT)
+#define I40IW_CQPHC_SVER 1
+
+#define I40IW_CQP_SW_SQSIZE_4 4
+#define I40IW_CQP_SW_SQSIZE_2048 2048
+
+/* iWARP QP Doorbell shadow area */
+#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0
+#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \
+ (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT)
+
+/* Completion Queue Doorbell shadow area */
+#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0
+#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT)
+
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \
+ (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14
+#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \
+ (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT)
+
+/* CQP and iWARP Completion Queue */
+#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CCQ_OPRETVAL_SHIFT 0
+#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT)
+
+#define I40IW_CQ_MINERR_SHIFT 0
+#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT)
+
+#define I40IW_CQ_MAJERR_SHIFT 16
+#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT)
+
+#define I40IW_CQ_WQEIDX_SHIFT 32
+#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT)
+
+#define I40IW_CQ_ERROR_SHIFT 55
+#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT)
+
+#define I40IW_CQ_SQ_SHIFT 62
+#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT)
+
+#define I40IW_CQ_VALID_SHIFT 63
+#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT)
+
+#define I40IWCQ_PAYLDLEN_SHIFT 0
+#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT)
+
+#define I40IWCQ_TCPSEQNUM_SHIFT 32
+#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT)
+
+#define I40IWCQ_INVSTAG_SHIFT 0
+#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT)
+
+#define I40IWCQ_QPID_SHIFT 32
+#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT)
+
+#define I40IWCQ_PSHDROP_SHIFT 51
+#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT)
+
+#define I40IWCQ_SRQ_SHIFT 52
+#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT)
+
+#define I40IWCQ_STAG_SHIFT 53
+#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT)
+
+#define I40IWCQ_SOEVENT_SHIFT 54
+#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT)
+
+#define I40IWCQ_OP_SHIFT 56
+#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT)
+
+/* CEQE format */
+#define I40IW_CEQE_CQCTX_SHIFT 0
+#define I40IW_CEQE_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT)
+
+#define I40IW_CEQE_VALID_SHIFT 63
+#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT)
+
+/* AEQE format */
+#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_AEQE_QPCQID_SHIFT 0
+#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT)
+
+#define I40IW_AEQE_WQDESCIDX_SHIFT 18
+#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT)
+
+#define I40IW_AEQE_OVERFLOW_SHIFT 33
+#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT)
+
+#define I40IW_AEQE_AECODE_SHIFT 34
+#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT)
+
+#define I40IW_AEQE_AESRC_SHIFT 50
+#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT)
+
+#define I40IW_AEQE_IWSTATE_SHIFT 54
+#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT)
+
+#define I40IW_AEQE_TCPSTATE_SHIFT 57
+#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT)
+
+#define I40IW_AEQE_Q2DATA_SHIFT 61
+#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT)
+
+#define I40IW_AEQE_VALID_SHIFT 63
+#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT)
+
+/* CQP SQ WQES */
+#define I40IW_QP_TYPE_IWARP 1
+#define I40IW_QP_TYPE_UDA 2
+#define I40IW_QP_TYPE_CQP 4
+
+#define I40IW_CQ_TYPE_IWARP 1
+#define I40IW_CQ_TYPE_ILQ 2
+#define I40IW_CQ_TYPE_IEQ 3
+#define I40IW_CQ_TYPE_CQP 4
+
+#define I40IWQP_TERM_SEND_TERM_AND_FIN 0
+#define I40IWQP_TERM_SEND_TERM_ONLY 1
+#define I40IWQP_TERM_SEND_FIN_ONLY 2
+#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3
+
+#define I40IW_CQP_OP_CREATE_QP 0
+#define I40IW_CQP_OP_MODIFY_QP 0x1
+#define I40IW_CQP_OP_DESTROY_QP 0x02
+#define I40IW_CQP_OP_CREATE_CQ 0x03
+#define I40IW_CQP_OP_MODIFY_CQ 0x04
+#define I40IW_CQP_OP_DESTROY_CQ 0x05
+#define I40IW_CQP_OP_CREATE_SRQ 0x06
+#define I40IW_CQP_OP_MODIFY_SRQ 0x07
+#define I40IW_CQP_OP_DESTROY_SRQ 0x08
+#define I40IW_CQP_OP_ALLOC_STAG 0x09
+#define I40IW_CQP_OP_REG_MR 0x0a
+#define I40IW_CQP_OP_QUERY_STAG 0x0b
+#define I40IW_CQP_OP_REG_SMR 0x0c
+#define I40IW_CQP_OP_DEALLOC_STAG 0x0d
+#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e
+#define I40IW_CQP_OP_MANAGE_ARP 0x0f
+#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11
+#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12
+#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13
+#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
+#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
+#define I40IW_CQP_OP_CREATE_CEQ 0x16
+#define I40IW_CQP_OP_DESTROY_CEQ 0x18
+#define I40IW_CQP_OP_CREATE_AEQ 0x19
+#define I40IW_CQP_OP_DESTROY_AEQ 0x1b
+#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c
+#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d
+#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e
+#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f
+#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20
+#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21
+#define I40IW_CQP_OP_FLUSH_WQES 0x22
+#define I40IW_CQP_OP_MANAGE_APBVT 0x23
+#define I40IW_CQP_OP_NOP 0x24
+#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26
+#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27
+#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28
+#define I40IW_CQP_OP_SUSPEND_QP 0x29
+#define I40IW_CQP_OP_RESUME_QP 0x2a
+#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
+#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d
+
+#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
+#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
+
+#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32
+#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT)
+
+#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56
+#define I40IW_UDA_QPSQ_MACLEN_MASK \
+ ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48
+#define I40IW_UDA_QPSQ_IPLEN_MASK \
+ ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4T_SHIFT 30
+#define I40IW_UDA_QPSQ_L4T_MASK \
+ ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT)
+
+#define I40IW_UDA_QPSQ_IIPT_SHIFT 28
+#define I40IW_UDA_QPSQ_IIPT_MASK \
+ ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24
+#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0
+#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT)
+
+#define I40IW_UDA_QPSQ_VALID_SHIFT 63
+#define I40IW_UDA_QPSQ_VALID_MASK \
+ ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT)
+
+#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62
+#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT)
+
+#define I40IW_UDA_PAYLOADLEN_SHIFT 0
+#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT)
+
+#define I40IW_UDA_HDRLEN_SHIFT 16
+#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT)
+
+#define I40IW_VLAN_TAG_VALID_SHIFT 50
+#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT)
+
+#define I40IW_UDA_L3PROTO_SHIFT 0
+#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT)
+
+#define I40IW_UDA_L4PROTO_SHIFT 16
+#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT)
+
+#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44
+#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \
+ ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT)
+
+/* CQP SQ WQE common fields */
+#define I40IW_CQPSQ_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT)
+
+#define I40IW_CQPSQ_TPHVAL_SHIFT 0
+#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT)
+
+#define I40IW_CQPSQ_TPHEN_SHIFT 60
+#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT)
+
+#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy QP */
+
+#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32
+#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48
+#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_QP_QPID_SHIFT 0
+#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+#define I40IW_CQPSQ_QP_OP_SHIFT 32
+#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK
+
+#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42
+#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43
+#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_VQ_SHIFT 45
+#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT)
+
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \
+ (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT)
+
+#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47
+#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
+#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
+
+#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
+#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
+
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \
+ (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
+
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \
+ (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56
+#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT)
+
+#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58
+#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT)
+
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \
+ (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \
+ (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT)
+
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy CQ */
+#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK \
+ (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \
+ (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24
+#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_OP_SHIFT 32
+#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43
+#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44
+#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT)
+
+#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47
+#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49
+#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \
+ (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT)
+
+/* Create/Modify/Destroy Shared Receive Queue */
+
+#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0
+#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \
+ (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \
+ (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16
+#define I40IW_CQPSQ_SRQ_PDID_MASK \
+ (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0
+#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT
+#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK
+
+#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT
+#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK
+
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \
+ (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \
+ (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0
+#define I40IW_CQPSQ_STAG_STAGLEN_MASK \
+ (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PDID_SHIFT 48
+#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_KEY_SHIFT 0
+#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT)
+
+#define I40IW_CQPSQ_STAG_IDX_SHIFT 8
+#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \
+ (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_MR_SHIFT 43
+#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT)
+
+#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46
+#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT)
+
+#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48
+#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \
+ (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT)
+
+#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53
+#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT)
+
+#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59
+#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61
+#define I40IW_CQPSQ_STAG_USEPFRID_MASK \
+ (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \
+ (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT)
+
+/* Query stag */
+#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT
+#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK
+
+/* Allocate Local IP Address Entry */
+
+/* Manage Local IP Address Table - MLIPA */
+#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPV4_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \
+ (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \
+ (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8
+#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16
+#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24
+#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32
+#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40
+#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT)
+
+/* Manage ARP Table - MAT */
+#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0
+#define I40IW_CQPSQ_MAT_REACHMAX_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0
+#define I40IW_CQPSQ_MAT_MACADDR_MASK \
+ (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \
+ (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42
+#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \
+ (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT)
+
+#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43
+#define I40IW_CQPSQ_MAT_PERMANENT_MASK \
+ (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT)
+
+#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44
+#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT)
+
+/* Manage VF PBLE Backing Pages - MVPBP*/
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \
+ (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \
+ (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32
+#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \
+ (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \
+ (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \
+ (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)
+
+/* Manage Push Page - MPP */
+#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff
+
+#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \
+ I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0
+#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT)
+
+#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62
+#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT)
+
+/* Upload Context - UCTX */
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0
+#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \
+ (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62
+#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \
+ (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT)
+
+/* Manage HMC PM Function Table - MHMC */
+#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0
+#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT)
+
+#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62
+#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \
+ (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT)
+
+/* Set HMC Resource Profile - SHMCRP */
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \
+ (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT)
+#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32
+#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT)
+
+/* Create/Destroy CEQ */
+#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \
+ (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Create/Destroy AEQ */
+#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0
+#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \
+ (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \
+ (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Commit FPM Values - CFPM */
+#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT)
+
+/* Flush WQEs - FWQE */
+#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0
+#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16
+#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \
+ (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0
+#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \
+ (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16
+#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \
+ (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32
+#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \
+ (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48
+#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \
+ (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0
+#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \
+ I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60
+#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \
+ (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT)
+
+/* Manage Accelerated Port Table - MAPT */
+#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0
+#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT)
+
+#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62
+#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT)
+
+/* Update Protocol Engine SDs */
+#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32
+#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \
+ (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT)
+#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \
+ (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \
+ ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \
+ (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \
+ (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT)
+
+/* Suspend QP */
+#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* Resume QP */
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \
+ (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT)
+
+#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* IW QP Context */
+#define I40IWQPC_DDP_VER_SHIFT 0
+#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT)
+
+#define I40IWQPC_SNAP_SHIFT 2
+#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT)
+
+#define I40IWQPC_IPV4_SHIFT 3
+#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT)
+
+#define I40IWQPC_NONAGLE_SHIFT 4
+#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT)
+
+#define I40IWQPC_INSERTVLANTAG_SHIFT 5
+#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT)
+
+#define I40IWQPC_USESRQ_SHIFT 6
+#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_SHIFT 7
+#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT)
+
+#define I40IWQPC_RQWQESIZE_SHIFT 8
+#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT)
+
+#define I40IWQPC_INSERTL2TAG2_SHIFT 11
+#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT)
+
+#define I40IWQPC_LIMIT_SHIFT 12
+#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT)
+
+#define I40IWQPC_DROPOOOSEG_SHIFT 15
+#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT)
+
+#define I40IWQPC_DUPACK_THRESH_SHIFT 16
+#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19
+#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT)
+
+#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19
+#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT)
+
+#define I40IWQPC_RCVTPHEN_SHIFT 28
+#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT)
+
+#define I40IWQPC_XMITTPHEN_SHIFT 29
+#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT)
+
+#define I40IWQPC_RQTPHEN_SHIFT 30
+#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT)
+
+#define I40IWQPC_SQTPHEN_SHIFT 31
+#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT)
+
+#define I40IWQPC_PPIDX_SHIFT 32
+#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT)
+
+#define I40IWQPC_PMENA_SHIFT 47
+#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT)
+
+#define I40IWQPC_RDMAP_VER_SHIFT 62
+#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT)
+
+#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_TTL_SHIFT 0
+#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT)
+
+#define I40IWQPC_RQSIZE_SHIFT 8
+#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT)
+
+#define I40IWQPC_SQSIZE_SHIFT 12
+#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT)
+
+#define I40IWQPC_SRCMACADDRIDX_SHIFT 16
+#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT)
+
+#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23
+#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT)
+
+#define I40IWQPC_TOS_SHIFT 24
+#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT)
+
+#define I40IWQPC_SRCPORTNUM_SHIFT 32
+#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTPORTNUM_SHIFT 48
+#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTIPADDR0_SHIFT 32
+#define I40IWQPC_DESTIPADDR0_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT)
+
+#define I40IWQPC_DESTIPADDR1_SHIFT 0
+#define I40IWQPC_DESTIPADDR1_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT)
+
+#define I40IWQPC_DESTIPADDR2_SHIFT 32
+#define I40IWQPC_DESTIPADDR2_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT)
+
+#define I40IWQPC_DESTIPADDR3_SHIFT 0
+#define I40IWQPC_DESTIPADDR3_MASK \
+ (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT)
+
+#define I40IWQPC_SNDMSS_SHIFT 16
+#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+
+#define I40IWQPC_VLANTAG_SHIFT 32
+#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
+
+#define I40IWQPC_ARPIDX_SHIFT 48
+#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+
+#define I40IWQPC_FLOWLABEL_SHIFT 0
+#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
+
+#define I40IWQPC_WSCALE_SHIFT 20
+#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_SHIFT 21
+#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22
+#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \
+ (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT)
+
+#define I40IWQPC_TCPSTATE_SHIFT 28
+#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT)
+
+#define I40IWQPC_RCVSCALE_SHIFT 32
+#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT)
+
+#define I40IWQPC_SNDSCALE_SHIFT 40
+#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT)
+
+#define I40IWQPC_PDIDX_SHIFT 48
+#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT)
+
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \
+ (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24
+#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \
+ (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0
+#define I40IWQPC_TIMESTAMP_RECENT_MASK \
+ (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32
+#define I40IWQPC_TIMESTAMP_AGE_MASK \
+ (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT)
+
+#define I40IWQPC_SNDNXT_SHIFT 0
+#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT)
+
+#define I40IWQPC_SNDWND_SHIFT 32
+#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT)
+
+#define I40IWQPC_RCVNXT_SHIFT 0
+#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT)
+
+#define I40IWQPC_RCVWND_SHIFT 32
+#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT)
+
+#define I40IWQPC_SNDMAX_SHIFT 0
+#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT)
+
+#define I40IWQPC_SNDUNA_SHIFT 32
+#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT)
+
+#define I40IWQPC_SRTT_SHIFT 0
+#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT)
+
+#define I40IWQPC_RTTVAR_SHIFT 32
+#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT)
+
+#define I40IWQPC_SSTHRESH_SHIFT 0
+#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT)
+
+#define I40IWQPC_CWND_SHIFT 32
+#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT)
+
+#define I40IWQPC_SNDWL1_SHIFT 0
+#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT)
+
+#define I40IWQPC_SNDWL2_SHIFT 32
+#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_SHIFT 32
+#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT)
+
+#define I40IWQPC_MAXSNDWND_SHIFT 0
+#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT)
+
+#define I40IWQPC_REXMIT_THRESH_SHIFT 48
+#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT)
+
+#define I40IWQPC_TXCQNUM_SHIFT 0
+#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT)
+
+#define I40IWQPC_RXCQNUM_SHIFT 32
+#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_LASTBYTESENT_SHIFT 0
+#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
+
+#define I40IWQPC_SRQID_SHIFT 32
+#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT)
+
+#define I40IWQPC_ORDSIZE_SHIFT 0
+#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT)
+
+#define I40IWQPC_IRDSIZE_SHIFT 16
+#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT)
+
+#define I40IWQPC_WRRDRSPOK_SHIFT 20
+#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT)
+
+#define I40IWQPC_RDOK_SHIFT 21
+#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT)
+
+#define I40IWQPC_SNDMARKERS_SHIFT 22
+#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT)
+
+#define I40IWQPC_BINDEN_SHIFT 23
+#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT)
+
+#define I40IWQPC_FASTREGEN_SHIFT 24
+#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT)
+
+#define I40IWQPC_PRIVEN_SHIFT 25
+#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
+
+#define I40IWQPC_LSMMPRESENT_SHIFT 26
+#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
+
+#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
+#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+
+#define I40IWQPC_IWARPMODE_SHIFT 28
+#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
+
+#define I40IWQPC_RCVMARKERS_SHIFT 29
+#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT)
+
+#define I40IWQPC_ALIGNHDRS_SHIFT 30
+#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT)
+
+#define I40IWQPC_RCVNOMPACRC_SHIFT 31
+#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT)
+
+#define I40IWQPC_RCVMARKOFFSET_SHIFT 33
+#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT)
+
+#define I40IWQPC_SNDMARKOFFSET_SHIFT 48
+#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT)
+
+#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_SQTPHVAL_SHIFT 0
+#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT)
+
+#define I40IWQPC_RQTPHVAL_SHIFT 8
+#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT)
+
+#define I40IWQPC_QSHANDLE_SHIFT 16
+#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT)
+
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \
+ I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR3_MASK \
+ (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR2_MASK \
+ (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR1_MASK \
+ (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR0_MASK \
+ (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
+
+/* wqe size considering 32 bytes per wqe*/
+#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
+#define I40IWQP_SW_MAX_WQSIZE 16384 /* 524288 bytes */
+
+#define I40IWQP_OP_RDMA_WRITE 0
+#define I40IWQP_OP_RDMA_READ 1
+#define I40IWQP_OP_RDMA_SEND 3
+#define I40IWQP_OP_RDMA_SEND_INV 4
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6
+#define I40IWQP_OP_BIND_MW 8
+#define I40IWQP_OP_FAST_REGISTER 9
+#define I40IWQP_OP_LOCAL_INVALIDATE 10
+#define I40IWQP_OP_RDMA_READ_LOC_INV 11
+#define I40IWQP_OP_NOP 12
+
+#define I40IW_RSVD_SHIFT 41
+#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT)
+
+/* iwarp QP SQ WQE common fields */
+#define I40IWQPSQ_OPCODE_SHIFT 32
+#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT)
+
+#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38
+#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)
+
+#define I40IWQPSQ_PUSHWQE_SHIFT 56
+#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT)
+
+#define I40IWQPSQ_STREAMMODE_SHIFT 58
+#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)
+
+#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59
+#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT)
+
+#define I40IWQPSQ_READFENCE_SHIFT 60
+#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT)
+
+#define I40IWQPSQ_LOCALFENCE_SHIFT 61
+#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT)
+
+#define I40IWQPSQ_SIGCOMPL_SHIFT 62
+#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT)
+
+#define I40IWQPSQ_VALID_SHIFT 63
+#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT)
+
+#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_FRAG_LEN_SHIFT 0
+#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT)
+
+#define I40IWQPSQ_FRAG_STAG_SHIFT 32
+#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT)
+
+#define I40IWQPSQ_REMSTAGINV_SHIFT 0
+#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT)
+
+#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57
+#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT)
+
+#define I40IWQPSQ_INLINEDATALEN_SHIFT 48
+#define I40IWQPSQ_INLINEDATALEN_MASK \
+ (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT)
+
+/* iwarp send with push mode */
+#define I40IWQPSQ_WQDESCIDX_SHIFT 0
+#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT)
+
+/* rdma write */
+#define I40IWQPSQ_REMSTAG_SHIFT 0
+#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT)
+
+#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* memory window */
+#define I40IWQPSQ_STAGRIGHTS_SHIFT 48
+#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT)
+
+#define I40IWQPSQ_VABASEDTO_SHIFT 53
+#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT)
+
+#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0
+#define I40IWQPSQ_PARENTMRSTAG_MASK \
+ (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT)
+
+#define I40IWQPSQ_MWSTAG_SHIFT 32
+#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT)
+
+#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Local Invalidate */
+#define I40IWQPSQ_LOCSTAG_SHIFT 32
+#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT)
+
+/* Fast Register */
+#define I40IWQPSQ_STAGKEY_SHIFT 0
+#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT)
+
+#define I40IWQPSQ_STAGINDEX_SHIFT 8
+#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT)
+
+#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43
+#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT)
+
+#define I40IWQPSQ_LPBLSIZE_SHIFT 44
+#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT)
+
+#define I40IWQPSQ_HPAGESIZE_SHIFT 46
+#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT)
+
+#define I40IWQPSQ_STAGLEN_SHIFT 0
+#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \
+ (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \
+ (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT)
+
+#define I40IWQPSQ_PBLADDR_SHIFT 12
+#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT)
+
+/* iwarp QP RQ WQE common fields */
+#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT
+#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK
+
+#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT
+#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK
+
+#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT
+#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK
+
+#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT
+#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK
+
+#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT
+#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK
+
+/* Query FPM CQP buf */
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK \
+ (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK \
+ (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \
+ (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32
+#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \
+ (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK \
+ (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK \
+ (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CEQS_MASK \
+ (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT)
+
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \
+ (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \
+ (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16
+#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \
+ (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT)
+
+#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32
+#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \
+ (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT)
+
+/* Static HMC pages allocated buf */
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \
+ (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT)
+
+#define I40IW_HW_PAGE_SIZE 4096
+#define I40IW_DONE_COUNT 1000
+#define I40IW_SLEEP_COUNT 10
+
+enum {
+ I40IW_QUEUES_ALIGNMENT_MASK = (128 - 1),
+ I40IW_AEQ_ALIGNMENT_MASK = (256 - 1),
+ I40IW_Q2_ALIGNMENT_MASK = (256 - 1),
+ I40IW_CEQ_ALIGNMENT_MASK = (256 - 1),
+ I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
+ I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
+ I40IW_SHADOWAREA_MASK = (128 - 1),
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0,
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0
+};
+
+enum i40iw_alignment {
+ I40IW_CQP_ALIGNMENT = 0x200,
+ I40IW_AEQ_ALIGNMENT = 0x100,
+ I40IW_CEQ_ALIGNMENT = 0x100,
+ I40IW_CQ0_ALIGNMENT = 0x100,
+ I40IW_SD_BUF_ALIGNMENT = 0x100
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+
+#define I40IW_CQE_QTYPE_RQ 0
+#define I40IW_CQE_QTYPE_SQ 1
+
+#define I40IW_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define I40IW_RING_GETSIZE(_ring) ((_ring).size)
+#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head)
+#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail)
+
+#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!I40IW_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = I40IW_ERR_RING_FULL; \
+ } \
+ }
+
+#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = I40IW_ERR_RING_FULL; \
+ } \
+ }
+
+#define I40IW_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define I40IW_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define I40IW_RING_FULL_ERR(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \
+ )
+
+#define I40IW_ERR_RING_FULL2(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \
+ )
+
+#define I40IW_ERR_RING_FULL3(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \
+ )
+
+#define I40IW_RING_MORE_WORK(_ring) \
+ ( \
+ (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \
+ )
+
+#define I40IW_RING_WORK_AVAILABLE(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \
+ ( \
+ ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \
+ )
+
+#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = I40IW_RING_GETCURRENT_HEAD(_ring); \
+ I40IW_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+/* Async Events codes */
+#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102
+#define I40IW_AE_AMP_INVALID_STAG 0x0103
+#define I40IW_AE_AMP_BAD_QP 0x0104
+#define I40IW_AE_AMP_BAD_PD 0x0105
+#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106
+#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107
+#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define I40IW_AE_AMP_TO_WRAP 0x010a
+#define I40IW_AE_AMP_FASTREG_SHARED 0x010b
+#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d
+#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f
+#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111
+#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130
+#define I40IW_AE_BAD_CLOSE 0x0201
+#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define I40IW_AE_CQ_OPERATION_ERROR 0x0203
+#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c
+#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define I40IW_AE_STAG_ZERO_INVALID 0x0206
+#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define I40IW_AE_SRQ_LIMIT 0x0209
+#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b
+#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220
+#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302
+#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305
+#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307
+#define I40IW_AE_DDP_NO_L_BIT 0x0308
+#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define I40IW_AE_INVALID_ARP_ENTRY 0x0401
+#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define I40IW_AE_STALE_ARP_ENTRY 0x0403
+#define I40IW_AE_INVALID_WQE_LENGTH 0x0404
+#define I40IW_AE_INVALID_MAC_ENTRY 0x0405
+#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501
+#define I40IW_AE_LLP_CONNECTION_RESET 0x0502
+#define I40IW_AE_LLP_FIN_RECEIVED 0x0503
+#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506
+#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define I40IW_AE_LLP_SYN_RECEIVED 0x0508
+#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d
+#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520
+#define I40IW_AE_RESET_SENT 0x0601
+#define I40IW_AE_TERMINATE_SENT 0x0602
+#define I40IW_AE_RESET_NOT_SENT 0x0603
+#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700
+#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801
+#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802
+#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900
+
+#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1
+#define OP_CEQ_DESTROY 2
+#define OP_AEQ_DESTROY 3
+#define OP_DELETE_ARP_CACHE_ENTRY 4
+#define OP_MANAGE_APBVT_ENTRY 5
+#define OP_CEQ_CREATE 6
+#define OP_AEQ_CREATE 7
+#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8
+#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9
+#define OP_MANAGE_QHASH_TABLE_ENTRY 10
+#define OP_QP_MODIFY 11
+#define OP_QP_UPLOAD_CONTEXT 12
+#define OP_CQ_CREATE 13
+#define OP_CQ_DESTROY 14
+#define OP_QP_CREATE 15
+#define OP_QP_DESTROY 16
+#define OP_ALLOC_STAG 17
+#define OP_MR_REG_NON_SHARED 18
+#define OP_DEALLOC_STAG 19
+#define OP_MW_ALLOC 20
+#define OP_QP_FLUSH_WQES 21
+#define OP_ADD_ARP_CACHE_ENTRY 22
+#define OP_MANAGE_PUSH_PAGE 23
+#define OP_UPDATE_PE_SDS 24
+#define OP_MANAGE_HMC_PM_FUNC_TABLE 25
+#define OP_SUSPEND 26
+#define OP_RESUME 27
+#define OP_MANAGE_VF_PBLE_BP 28
+#define OP_QUERY_FPM_VALUES 29
+#define OP_COMMIT_FPM_VALUES 30
+#define OP_SIZE_CQP_STAT_ARRAY 31
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
new file mode 100644
index 000000000000..5484cbf55f0f
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
@@ -0,0 +1,821 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
+ u32 type,
+ u32 idx,
+ u32 cnt,
+ u32 *sd_idx,
+ u32 *sd_limit)
+{
+ u64 fpm_addr, fpm_limit;
+
+ fpm_addr = hmc_info->hmc_obj[(type)].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+ *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
+ *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
+ *sd_limit += 1;
+}
+
+/**
+ * i40iw_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
+ u32 type,
+ u32 idx,
+ u32 cnt,
+ u32 *pd_idx,
+ u32 *pd_limit)
+{
+ u64 fpm_adr, fpm_limit;
+
+ fpm_adr = hmc_info->hmc_obj[type].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+ *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
+ *(pd_limit) += 1;
+}
+
+/**
+ * i40iw_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_set_sd_entry(u64 pa,
+ u32 idx,
+ enum i40iw_sd_entry_type type,
+ struct update_sd_entry *entry)
+{
+ entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+ (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
+ entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
+ struct update_sd_entry *entry)
+{
+ entry->data = (I40IW_HMC_MAX_BP_COUNT <<
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+ (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
+ entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id,
+ u64 pa, u32 sd_idx,
+ enum i40iw_sd_entry_type type,
+ bool setsd)
+{
+ struct i40iw_update_sds_info sdinfo;
+
+ sdinfo.cnt = 1;
+ sdinfo.hmc_fn_id = hmc_fn_id;
+ if (setsd)
+ i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+ else
+ i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
+
+ return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * i40iw_hmc_sd_grp - setup group od sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_info *hmc_info,
+ u32 sd_index,
+ u32 sd_cnt,
+ bool setsd)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_update_sds_info sdinfo;
+ u64 pa;
+ u32 i;
+ enum i40iw_status_code ret_code = 0;
+
+ memset(&sdinfo, 0, sizeof(sdinfo));
+ sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+ for (i = sd_index; i < sd_index + sd_cnt; i++) {
+ sd_entry = &hmc_info->sd_table.sd_entry[i];
+ if (!sd_entry ||
+ (!sd_entry->valid && setsd) ||
+ (sd_entry->valid && !setsd))
+ continue;
+ if (setsd) {
+ pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ } else {
+ i40iw_clr_sd_entry(i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ }
+ sdinfo.cnt++;
+ if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
+ ret_code);
+ return ret_code;
+ }
+ sdinfo.cnt = 0;
+ }
+ }
+ if (sdinfo.cnt)
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct i40iw_vfdev *vf_dev = NULL;
+ u16 idx;
+
+ for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+ if (dev->vf_dev[idx] &&
+ ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+ vf_dev = dev->vf_dev[idx];
+ break;
+ }
+ }
+ return vf_dev;
+}
+
+/**
+ * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id)
+{
+ struct i40iw_hmc_info *hmc_info = NULL;
+ u16 idx;
+
+ for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+ if (dev->vf_dev[idx] &&
+ ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+ hmc_info = &dev->vf_dev[idx]->hmc_info;
+ break;
+ }
+ }
+ return hmc_info;
+}
+
+/**
+ * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+
+ if (!info->add_sd_cnt)
+ return 0;
+
+ return i40iw_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->add_sd_cnt, true);
+}
+
+/**
+ * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_iw_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx = 0, pd_lmt = 0;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 i, j;
+ bool pd_error = false;
+ enum i40iw_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
+ __func__, info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+
+ if (!dev->is_pf)
+ return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
+
+ i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ return I40IW_ERR_INVALID_SD_INDEX;
+ }
+ i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+ for (j = sd_idx; j < sd_lmt; j++) {
+ ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
+ j,
+ info->entry_type,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+
+ if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
+ ((dev->hmc_info == info->hmc_info) &&
+ (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
+ pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ (j + 1) * I40IW_HMC_MAX_BP_COUNT);
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ while (i && (i > pd_idx1)) {
+ i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
+ info->is_pf);
+ i--;
+ }
+ }
+ }
+ if (sd_entry->valid)
+ continue;
+
+ info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+ info->add_sd_cnt++;
+ sd_entry->valid = true;
+ }
+ return i40iw_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40IW_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ (j - 1) * I40IW_HMC_MAX_BP_COUNT);
+ pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ i40iw_prep_remove_pd_page(info->hmc_info, i);
+ break;
+ case I40IW_SD_TYPE_DIRECT:
+ i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40IW_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ enum i40iw_status_code ret_code = 0;
+ u32 i, sd_idx;
+ struct i40iw_dma_mem *mem;
+
+ if (dev->is_pf && !reset)
+ ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->del_sd_cnt, false);
+
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
+
+ for (i = 0; i < info->del_sd_cnt; i++) {
+ sd_idx = info->hmc_info->sd_indexes[i];
+ sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+ if (!sd_entry)
+ continue;
+ mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+ &sd_entry->u.pd_table.pd_page_addr :
+ &sd_entry->u.bp.addr;
+
+ if (!mem || !mem->va)
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
+ else
+ i40iw_free_dma_mem(dev->hw, mem);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_del_iw_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct i40iw_hmc_pd_table *pd_table;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 i, j;
+ enum i40iw_status_code ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ __func__, info->start_idx, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC,
+ "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ __func__, info->start_idx, info->count,
+ info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+ }
+ if (!dev->is_pf) {
+ ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
+ info->count);
+ if (info->rsrc_type != I40IW_HMC_IW_PBLE)
+ return ret_code;
+ }
+
+ i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
+
+ if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ I40IW_SD_TYPE_PAGED)
+ continue;
+
+ rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
+ pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
+ info->is_pf);
+ if (ret_code) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
+ return ret_code;
+ }
+ }
+ }
+
+ i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
+ return I40IW_ERR_INVALID_SD_INDEX;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40IW_SD_TYPE_DIRECT:
+ ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ case I40IW_SD_TYPE_PAGED:
+ ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return i40iw_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40iw_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40iw_dma_mem mem;
+ u64 alloc_len;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (type == I40IW_SD_TYPE_PAGED)
+ alloc_len = I40IW_HMC_PAGED_BP_SIZE;
+ else
+ alloc_len = direct_mode_sz;
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
+ I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (type == I40IW_SD_TYPE_PAGED) {
+ ret_code = i40iw_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40iw_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
+ I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40iw_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40iw_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ */
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40iw_dma_mem *rsrc_pg)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct i40iw_hmc_pd_table *pd_table;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_dma_mem mem;
+ struct i40iw_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
+ if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
+ return 0;
+
+ rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ ret_code = i40iw_allocate_dma_mem(hw, page,
+ I40IW_HMC_PAGED_BP_SIZE,
+ I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+ pd_entry->rsrc_pg = false;
+ }
+
+ memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
+ page_desc = page->pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40IW_INC_PD_REFCNT(pd_table);
+ if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
+ I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
+ else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
+ I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
+ hmc_info->hmc_fn_id);
+ }
+ I40IW_INC_BP_REFCNT(&pd_entry->bp);
+
+ return 0;
+}
+
+/**
+ * i40iw_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ */
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info,
+ u32 idx,
+ bool is_pf)
+{
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_hmc_pd_table *pd_table;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ struct i40iw_dma_mem *mem;
+ u64 *pd_addr;
+
+ sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt)
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
+ return I40IW_ERR_INVALID_SD_TYPE;
+
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40IW_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ return 0;
+
+ pd_entry->valid = false;
+ I40IW_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (is_pf)
+ I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+ else
+ I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
+ hmc_info->hmc_fn_id);
+
+ if (!pd_entry->rsrc_pg) {
+ mem = &pd_entry->bp.addr;
+ if (!mem || !mem->va)
+ return I40IW_ERR_PARAM;
+ i40iw_free_dma_mem(hw, mem);
+ }
+ if (!pd_table->ref_cnt)
+ i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+
+ return 0;
+}
+
+/**
+ * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt)
+ return I40IW_ERR_NOT_READY;
+
+ I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+ sd_entry->valid = false;
+
+ return 0;
+}
+
+/**
+ * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
+ u32 idx)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt)
+ return I40IW_ERR_NOT_READY;
+
+ sd_entry->valid = false;
+ I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ return 0;
+}
+
+/**
+ * i40iw_pf_init_vfhmc -
+ * @vf_cnt_array: array of cnt values of iwarp hmc objects
+ * @vf_hmc_fn_id: hmc function id ofr vf driver
+ * @dev: pointer to i40iw_dev struct
+ *
+ * Called by pf driver to initialize hmc_info for vf driver instance.
+ */
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
+ u8 vf_hmc_fn_id,
+ u32 *vf_cnt_array)
+{
+ struct i40iw_hmc_info *hmc_info;
+ enum i40iw_status_code ret_code = 0;
+ u32 i;
+
+ if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
+ (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
+ I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+ i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
+ __func__, vf_hmc_fn_id);
+ return I40IW_ERR_INVALID_HMCFN_ID;
+ }
+
+ ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
+ if (ret_code)
+ return ret_code;
+
+ hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
+
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+ if (vf_cnt_array)
+ hmc_info->hmc_obj[i].cnt =
+ vf_cnt_array[i - I40IW_HMC_IW_QP];
+ else
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
new file mode 100644
index 000000000000..4c3fdd875621
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
@@ -0,0 +1,241 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_HMC_H
+#define I40IW_HMC_H
+
+#include "i40iw_d.h"
+
+struct i40iw_hw;
+enum i40iw_status_code;
+
+#define I40IW_HMC_MAX_BP_COUNT 512
+#define I40IW_MAX_SD_ENTRIES 11
+#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA
+
+#define I40IW_HMC_INFO_SIGNATURE 0x484D5347
+#define I40IW_HMC_PD_CNT_IN_SD 512
+#define I40IW_HMC_DIRECT_BP_SIZE 0x200000
+#define I40IW_HMC_MAX_SD_COUNT 4096
+#define I40IW_HMC_PAGED_BP_SIZE 4096
+#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40IW_FIRST_VF_FPM_ID 16
+#define FPM_MULTIPLIER 1024
+
+#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ i40iw_wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: VF's function id
+ */
+#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \
+ ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+struct i40iw_hmc_obj_info {
+ u64 base;
+ u32 max_cnt;
+ u32 cnt;
+ u64 size;
+};
+
+enum i40iw_sd_entry_type {
+ I40IW_SD_TYPE_INVALID = 0,
+ I40IW_SD_TYPE_PAGED = 1,
+ I40IW_SD_TYPE_DIRECT = 2
+};
+
+struct i40iw_hmc_bp {
+ enum i40iw_sd_entry_type entry_type;
+ struct i40iw_dma_mem addr;
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40iw_hmc_pd_entry {
+ struct i40iw_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg;
+ bool valid;
+};
+
+struct i40iw_hmc_pd_table {
+ struct i40iw_dma_mem pd_page_addr;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_virt_mem pd_entry_virt_mem;
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40iw_hmc_sd_entry {
+ enum i40iw_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40iw_hmc_pd_table pd_table;
+ struct i40iw_hmc_bp bp;
+ } u;
+};
+
+struct i40iw_hmc_sd_table {
+ struct i40iw_virt_mem addr;
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40iw_hmc_sd_entry *sd_entry;
+};
+
+struct i40iw_hmc_info {
+ u32 signature;
+ u8 hmc_fn_id;
+ u16 first_sd_index;
+
+ struct i40iw_hmc_obj_info *hmc_obj;
+ struct i40iw_virt_mem hmc_obj_virt_mem;
+ struct i40iw_hmc_sd_table sd_table;
+ u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
+};
+
+struct update_sd_entry {
+ u64 cmd;
+ u64 data;
+};
+
+struct i40iw_update_sds_info {
+ u32 cnt;
+ u8 hmc_fn_id;
+ struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
+};
+
+struct i40iw_ccq_cqe_info;
+struct i40iw_hmc_fcn_info {
+ void (*callback_fcn)(struct i40iw_sc_dev *, void *,
+ struct i40iw_ccq_cqe_info *);
+ void *cqp_callback_param;
+ u32 vf_id;
+ u16 iw_vf_idx;
+ bool free_fcn;
+};
+
+enum i40iw_hmc_rsrc_type {
+ I40IW_HMC_IW_QP = 0,
+ I40IW_HMC_IW_CQ = 1,
+ I40IW_HMC_IW_SRQ = 2,
+ I40IW_HMC_IW_HTE = 3,
+ I40IW_HMC_IW_ARP = 4,
+ I40IW_HMC_IW_APBVT_ENTRY = 5,
+ I40IW_HMC_IW_MR = 6,
+ I40IW_HMC_IW_XF = 7,
+ I40IW_HMC_IW_XFFL = 8,
+ I40IW_HMC_IW_Q1 = 9,
+ I40IW_HMC_IW_Q1FL = 10,
+ I40IW_HMC_IW_TIMER = 11,
+ I40IW_HMC_IW_FSIMC = 12,
+ I40IW_HMC_IW_FSIAV = 13,
+ I40IW_HMC_IW_PBLE = 14,
+ I40IW_HMC_IW_MAX = 15,
+};
+
+struct i40iw_hmc_create_obj_info {
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_virt_mem add_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 add_sd_cnt;
+ enum i40iw_sd_entry_type entry_type;
+ bool is_pf;
+};
+
+struct i40iw_hmc_del_obj_info {
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_virt_mem del_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 del_sd_cnt;
+ bool is_pf;
+};
+
+enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
+ struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info);
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *info,
+ bool reset);
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
+ u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
+ bool setsd);
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info);
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 sd_index,
+ enum i40iw_sd_entry_type type, u64 direct_mode_sz);
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 pd_index,
+ struct i40iw_dma_mem *rsrc_pg);
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+ struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
+
+#define ENTER_SHARED_FUNCTION()
+#define EXIT_SHARED_FUNCTION()
+
+#endif /* I40IW_HMC_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
new file mode 100644
index 000000000000..9fd302425563
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -0,0 +1,730 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include "i40iw.h"
+
+/**
+ * i40iw_initialize_hw_resources - initialize hw resource during open
+ * @iwdev: iwarp device
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
+{
+ unsigned long num_pds;
+ u32 resources_size;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 arp_table_size;
+ u32 mrdrvbits;
+ void *resource_ptr;
+
+ max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
+ max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
+ arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
+ iwdev->max_cqe = 0xFFFFF;
+ num_pds = max_qp * 4;
+ resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
+ resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
+ resources_size += sizeof(struct i40iw_qp **) * max_qp;
+ iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
+
+ if (!iwdev->mem_resources)
+ return -ENOMEM;
+
+ iwdev->max_qp = max_qp;
+ iwdev->max_mr = max_mr;
+ iwdev->max_cq = max_cq;
+ iwdev->max_pd = num_pds;
+ iwdev->arp_table_size = arp_table_size;
+ iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
+ resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
+
+ iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ iwdev->allocated_qps = resource_ptr;
+ iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
+ iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
+ iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
+ iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
+ iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
+ set_bit(0, iwdev->allocated_mrs);
+ set_bit(0, iwdev->allocated_qps);
+ set_bit(0, iwdev->allocated_cqs);
+ set_bit(0, iwdev->allocated_pds);
+ set_bit(0, iwdev->allocated_arps);
+
+ /* Following for ILQ/IEQ */
+ set_bit(1, iwdev->allocated_qps);
+ set_bit(1, iwdev->allocated_cqs);
+ set_bit(1, iwdev->allocated_pds);
+ set_bit(2, iwdev->allocated_cqs);
+ set_bit(2, iwdev->allocated_pds);
+
+ spin_lock_init(&iwdev->resource_lock);
+ mrdrvbits = 24 - get_count_order(iwdev->max_mr);
+ iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+ return 0;
+}
+
+/**
+ * i40iw_cqp_ce_handler - handle cqp completions
+ * @iwdev: iwarp device
+ * @arm: flag to arm after completions
+ * @cq: cq for cqp completions
+ */
+static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ u32 cqe_count = 0;
+ struct i40iw_ccq_cqe_info info;
+ int ret;
+
+ do {
+ memset(&info, 0, sizeof(info));
+ ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
+ if (ret)
+ break;
+ cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
+ if (info.error)
+ i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code, info.min_err_code);
+ if (cqp_request) {
+ cqp_request->compl_info.maj_err_code = info.maj_err_code;
+ cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.op_ret_val = info.op_ret_val;
+ cqp_request->compl_info.error = info.error;
+
+ if (cqp_request->waiting) {
+ cqp_request->request_done = true;
+ wake_up(&cqp_request->waitq);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+ } else {
+ if (cqp_request->callback_fcn)
+ cqp_request->callback_fcn(cqp_request, 1);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+ }
+ }
+
+ cqe_count++;
+ } while (1);
+
+ if (arm && cqe_count) {
+ i40iw_process_bh(dev);
+ dev->ccq_ops->ccq_arm(cq);
+ }
+}
+
+/**
+ * i40iw_iwarp_ce_handler - handle iwarp completions
+ * @iwdev: iwarp device
+ * @iwcp: iwarp cq receiving event
+ */
+static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
+ struct i40iw_sc_cq *iwcq)
+{
+ struct i40iw_cq *i40iwcq = iwcq->back_cq;
+
+ if (i40iwcq->ibcq.comp_handler)
+ i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
+ i40iwcq->ibcq.cq_context);
+}
+
+/**
+ * i40iw_puda_ce_handler - handle puda completion events
+ * @iwdev: iwarp device
+ * @cq: puda completion q for event
+ */
+static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
+ struct i40iw_sc_cq *cq)
+{
+ struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
+ enum i40iw_status_code status;
+ u32 compl_error;
+
+ do {
+ status = i40iw_puda_poll_completion(dev, cq, &compl_error);
+ if (status == I40IW_ERR_QUEUE_EMPTY)
+ break;
+ if (status) {
+ i40iw_pr_err("puda status = %d\n", status);
+ break;
+ }
+ if (compl_error) {
+ i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
+ break;
+ }
+ } while (1);
+
+ dev->ccq_ops->ccq_arm(cq);
+}
+
+/**
+ * i40iw_process_ceq - handle ceq for completions
+ * @iwdev: iwarp device
+ * @ceq: ceq having cq for completion
+ */
+void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_sc_ceq *sc_ceq;
+ struct i40iw_sc_cq *cq;
+ bool arm = true;
+
+ sc_ceq = &ceq->sc_ceq;
+ do {
+ cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
+ if (!cq)
+ break;
+
+ if (cq->cq_type == I40IW_CQ_TYPE_CQP)
+ i40iw_cqp_ce_handler(iwdev, cq, arm);
+ else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
+ i40iw_iwarp_ce_handler(iwdev, cq);
+ else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
+ (cq->cq_type == I40IW_CQ_TYPE_IEQ))
+ i40iw_puda_ce_handler(iwdev, cq);
+ } while (1);
+}
+
+/**
+ * i40iw_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+ u8 state,
+ u8 del_hash,
+ u8 term,
+ u8 termlen)
+{
+ struct i40iw_modify_qp_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.next_iwarp_state = state;
+ info.remove_hash_idx = del_hash;
+ info.cq_num_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.dont_send_term = true;
+ info.dont_send_fin = true;
+ info.termlen = termlen;
+
+ if (term & I40IWQP_TERM_SEND_TERM_ONLY)
+ info.dont_send_term = false;
+ if (term & I40IWQP_TERM_SEND_FIN_ONLY)
+ info.dont_send_fin = false;
+ if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
+ info.reset_tcp_conn = true;
+ i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+}
+
+/**
+ * i40iw_process_aeq - handle aeq events
+ * @iwdev: iwarp device
+ */
+void i40iw_process_aeq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+ struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
+ struct i40iw_aeqe_info aeinfo;
+ struct i40iw_aeqe_info *info = &aeinfo;
+ int ret;
+ struct i40iw_qp *iwqp = NULL;
+ struct i40iw_sc_cq *cq = NULL;
+ struct i40iw_cq *iwcq = NULL;
+ struct i40iw_sc_qp *qp = NULL;
+ struct i40iw_qp_host_ctx_info *ctx_info = NULL;
+ unsigned long flags;
+
+ u32 aeqcnt = 0;
+
+ if (!sc_aeq->size)
+ return;
+
+ do {
+ memset(info, 0, sizeof(*info));
+ ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
+ if (ret)
+ break;
+
+ aeqcnt++;
+ i40iw_debug(dev, I40IW_DEBUG_AEQ,
+ "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
+ __func__, info->ae_id, info->qp, info->qp_cq_id);
+ if (info->qp) {
+ iwqp = iwdev->qp_table[info->qp_cq_id];
+ if (!iwqp) {
+ i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = info->tcp_state;
+ iwqp->hw_iwarp_state = info->iwarp_state;
+ iwqp->last_aeq = info->ae_id;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ ctx_info = &iwqp->ctx_info;
+ ctx_info->err_rq_idx_valid = true;
+ } else {
+ if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
+ continue;
+ }
+
+ switch (info->ae_id) {
+ case I40IW_AE_LLP_FIN_RECEIVED:
+ if (qp->term_flags)
+ continue;
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
+ if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
+ (iwqp->ibqp_state == IB_QPS_RTS)) {
+ i40iw_next_iw_state(iwqp,
+ I40IW_QP_STATE_CLOSING, 0, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ }
+ iwqp->cm_id->add_ref(iwqp->cm_id);
+ i40iw_schedule_cm_timer(iwqp->cm_node,
+ (struct i40iw_puda_buf *)iwqp,
+ I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ break;
+ case I40IW_AE_LLP_CLOSE_COMPLETE:
+ if (qp->term_flags)
+ i40iw_terminate_done(qp, 0);
+ else
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_RESET_SENT:
+ i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_LLP_CONNECTION_RESET:
+ if (atomic_read(&iwqp->close_timer_started))
+ continue;
+ i40iw_cm_disconn(iwqp);
+ break;
+ case I40IW_AE_TERMINATE_SENT:
+ i40iw_terminate_send_fin(qp);
+ break;
+ case I40IW_AE_LLP_TERMINATE_RECEIVED:
+ i40iw_terminate_received(qp, info);
+ break;
+ case I40IW_AE_CQ_OPERATION_ERROR:
+ i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
+ info->ae_id);
+ cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
+ iwcq = (struct i40iw_cq *)cq->back_cq;
+
+ if (iwcq->ibcq.event_handler) {
+ struct ib_event ibevent;
+
+ ibevent.device = iwcq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &iwcq->ibcq;
+ iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
+ }
+ break;
+ case I40IW_AE_PRIV_OPERATION_DENIED:
+ case I40IW_AE_STAG_ZERO_INVALID:
+ case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+ case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case I40IW_AE_DDP_UBE_INVALID_MO:
+ case I40IW_AE_DDP_UBE_INVALID_QN:
+ case I40IW_AE_DDP_NO_L_BIT:
+ case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case I40IW_AE_INVALID_ARP_ENTRY:
+ case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+ case I40IW_AE_STALE_ARP_ENTRY:
+ case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+ case I40IW_AE_LLP_SYN_RECEIVED:
+ case I40IW_AE_LLP_TOO_MANY_RETRIES:
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ case I40IW_AE_LCE_QP_CATASTROPHIC:
+ case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
+ case I40IW_AE_LCE_CQ_CATASTROPHIC:
+ case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+ case I40IW_AE_QP_SUSPEND_COMPLETE:
+ ctx_info->err_rq_idx_valid = false;
+ default:
+ if (!info->sq && ctx_info->err_rq_idx_valid) {
+ ctx_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ iwqp->host_ctx.va,
+ ctx_info);
+ }
+ i40iw_terminate_connection(qp, info);
+ break;
+ }
+ } while (1);
+
+ if (aeqcnt)
+ dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * i40iw_manage_apbvt - add or delete tcp port
+ * @iwdev: iwarp device
+ * @accel_local_port: port for apbvt
+ * @add_port: add or delete port
+ */
+int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
+{
+ struct i40iw_apbvt_info *info;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_apbvt_entry.info;
+
+ memset(info, 0, sizeof(*info));
+ info->add = add_port;
+ info->port = cpu_to_le16(accel_local_port);
+
+ cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage APBVT entry fail");
+ return status;
+}
+
+/**
+ * i40iw_manage_arp_cache - manage hw arp cache
+ * @iwdev: iwarp device
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @action: add, delete or modify
+ */
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+ unsigned char *mac_addr,
+ __be32 *ip_addr,
+ bool ipv4,
+ u32 action)
+{
+ struct i40iw_add_arp_cache_entry_info *info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ int arp_index;
+
+ arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
+ if (arp_index == -1)
+ return;
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ if (action == I40IW_ARP_ADD) {
+ cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
+ info = &cqp_info->in.u.add_arp_cache_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->arp_index = cpu_to_le32(arp_index);
+ info->permanent = true;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ } else {
+ cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
+ cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+ }
+
+ cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->post_sq = 1;
+ if (i40iw_handle_cqp_op(iwdev, cqp_request))
+ i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
+}
+
+/**
+ * i40iw_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ * @send_ack: flag send ack
+ */
+static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
+{
+ i40iw_send_syn(cqp_request->param, send_ack);
+}
+
+/**
+ * i40iw_manage_qhash - add or modify qhash
+ * @iwdev: iwarp device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ * @user_pri:user pri of the connection
+ */
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+ struct i40iw_cm_info *cminfo,
+ enum i40iw_quad_entry_type etype,
+ enum i40iw_quad_hash_manage_type mtype,
+ void *cmnode,
+ bool wait)
+{
+ struct i40iw_qhash_table_info *info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_qhash_table_entry.info;
+ memset(info, 0, sizeof(*info));
+
+ info->manage = mtype;
+ info->entry_type = etype;
+ if (cminfo->vlan_id != 0xFFFF) {
+ info->vlan_valid = true;
+ info->vlan_id = cpu_to_le16(cminfo->vlan_id);
+ } else {
+ info->vlan_valid = false;
+ }
+
+ info->ipv4_valid = cminfo->ipv4;
+ ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+ info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+ info->dest_port = cpu_to_le16(cminfo->loc_port);
+ info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
+ info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
+ info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
+ info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
+ if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+ info->src_port = cpu_to_le16(cminfo->rem_port);
+ info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
+ info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
+ info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
+ info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
+ }
+ if (cmnode) {
+ cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
+ cqp_request->param = (void *)cmnode;
+ }
+
+ if (info->ipv4_valid)
+ i40iw_debug(dev, I40IW_DEBUG_CM,
+ "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
+ __func__, (!mtype) ? "DELETE" : "ADD",
+ info->dest_ip,
+ info->dest_port, info->mac_addr, cminfo->vlan_id);
+ else
+ i40iw_debug(dev, I40IW_DEBUG_CM,
+ "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
+ __func__, (!mtype) ? "DELETE" : "ADD",
+ info->dest_ip,
+ info->dest_port, info->mac_addr, cminfo->vlan_id);
+ cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
+ cqp_info->post_sq = 1;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
+ return status;
+}
+
+/**
+ * i40iw_hw_flush_wqes - flush qp's wqe
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_qp_flush_info *info,
+ bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_qp_flush_info *hw_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+
+ cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Flush WQE's fail");
+ return status;
+}
+
+/**
+ * i40iw_hw_manage_vf_pble_bp - manage vf pbles
+ * @iwdev: iwarp device
+ * @info: info for managing pble
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+ struct i40iw_manage_vf_pble_info *info,
+ bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_manage_vf_pble_info *hw_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ if ((iwdev->init_state < CCQ_CREATED) && wait)
+ wait = false;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+
+ cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
+ return status;
+}
+
+/**
+ * i40iw_get_ib_wc - return change flush code to IB's
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IB_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IB_WC_GENERAL_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IB_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * i40iw_set_flush_info - set flush info
+ * @pinfo: set flush info
+ * @min: minor err
+ * @maj: major err
+ * @opcode: flush error code
+ */
+static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
+ u16 *min,
+ u16 *maj,
+ enum i40iw_flush_opcode opcode)
+{
+ *min = (u16)i40iw_get_ib_wc(opcode);
+ *maj = CQE_MAJOR_DRV;
+ pinfo->userflushcode = true;
+}
+
+/**
+ * i40iw_flush_wqes - flush wqe for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp to flush wqes
+ */
+void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
+{
+ struct i40iw_qp_flush_info info;
+ struct i40iw_qp_flush_info *pinfo = &info;
+
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ memset(pinfo, 0, sizeof(*pinfo));
+ info.sq = true;
+ info.rq = true;
+ if (qp->term_flags) {
+ i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
+ &pinfo->sq_major_code, qp->flush_code);
+ i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
+ &pinfo->rq_major_code, qp->flush_code);
+ }
+ (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
new file mode 100644
index 000000000000..90e5af21737e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -0,0 +1,1910 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+
+#include "i40iw.h"
+#include "i40iw_register.h"
+#include <net/netevent.h>
+#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
+#define CLIENT_IW_INTERFACE_VERSION_MINOR 01
+#define CLIENT_IW_INTERFACE_VERSION_BUILD 00
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 123
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
+
+static int push_mode;
+module_param(push_mode, int, 0644);
+MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
+
+static int resource_profile;
+module_param(resource_profile, int, 0644);
+MODULE_PARM_DESC(resource_profile,
+ "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
+
+static int max_rdma_vfs = 32;
+module_param(max_rdma_vfs, int, 0644);
+MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
+static int mpa_version = 2;
+module_param(mpa_version, int, 0644);
+MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct i40e_client i40iw_client;
+static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
+
+static LIST_HEAD(i40iw_handlers);
+static spinlock_t i40iw_handler_lock;
+
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+ u32 vf_id, u8 *msg, u16 len);
+
+static struct notifier_block i40iw_inetaddr_notifier = {
+ .notifier_call = i40iw_inetaddr_event
+};
+
+static struct notifier_block i40iw_inetaddr6_notifier = {
+ .notifier_call = i40iw_inet6addr_event
+};
+
+static struct notifier_block i40iw_net_notifier = {
+ .notifier_call = i40iw_net_event
+};
+
+static int i40iw_notifiers_registered;
+
+/**
+ * i40iw_find_i40e_handler - find a handler given a client info
+ * @ldev: pointer to a client info
+ */
+static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
+{
+ struct i40iw_handler *hdl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_for_each_entry(hdl, &i40iw_handlers, list) {
+ if (hdl->ldev.netdev == ldev->netdev) {
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return hdl;
+ }
+ }
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_find_netdev - find a handler given a netdev
+ * @netdev: pointer to net_device
+ */
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
+{
+ struct i40iw_handler *hdl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_for_each_entry(hdl, &i40iw_handlers, list) {
+ if (hdl->ldev.netdev == netdev) {
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return hdl;
+ }
+ }
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return NULL;
+}
+
+/**
+ * i40iw_add_handler - add a handler to the list
+ * @hdl: handler to be added to the handler list
+ */
+static void i40iw_add_handler(struct i40iw_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_add(&hdl->list, &i40iw_handlers);
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+}
+
+/**
+ * i40iw_del_handler - delete a handler from the list
+ * @hdl: handler to be deleted from the handler list
+ */
+static int i40iw_del_handler(struct i40iw_handler *hdl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&i40iw_handler_lock, flags);
+ list_del(&hdl->list);
+ spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+ return 0;
+}
+
+/**
+ * i40iw_enable_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
+{
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
+ else
+ i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
+}
+
+/**
+ * i40iw_dpc - tasklet for aeq and ceq 0
+ * @data: iwarp device
+ */
+static void i40iw_dpc(unsigned long data)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+ if (iwdev->msix_shared)
+ i40iw_process_ceq(iwdev, iwdev->ceqlist);
+ i40iw_process_aeq(iwdev);
+ i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
+}
+
+/**
+ * i40iw_ceq_dpc - dpc handler for CEQ
+ * @data: data points to CEQ
+ */
+static void i40iw_ceq_dpc(unsigned long data)
+{
+ struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+ struct i40iw_device *iwdev = iwceq->iwdev;
+
+ i40iw_process_ceq(iwdev, iwceq);
+ i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * i40iw_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: iwarp device
+ */
+static irqreturn_t i40iw_irq_handler(int irq, void *data)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+ tasklet_schedule(&iwdev->dpc_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_destroy_cqp - destroy control qp
+ * @iwdev: iwarp device
+ * @create_done: 1 if cqp create poll was success
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
+{
+ enum i40iw_status_code status = 0;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+
+ if (free_hwcqp && dev->cqp_ops->cqp_destroy)
+ status = dev->cqp_ops->cqp_destroy(dev->cqp);
+ if (status)
+ i40iw_pr_err("destroy cqp failed");
+
+ i40iw_free_dma_mem(dev->hw, &cqp->sq);
+ kfree(cqp->scratch_array);
+ iwdev->cqp.scratch_array = NULL;
+
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+}
+
+/**
+ * i40iw_disable_irqs - disable device interrupts
+ * @dev: hardware control device structure
+ * @msic_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
+ struct i40iw_msix_vector *msix_vec,
+ void *dev_id)
+{
+ if (dev->is_pf)
+ i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
+ else
+ i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+ synchronize_irq(msix_vec->irq);
+ free_irq(msix_vec->irq, dev_id);
+}
+
+/**
+ * i40iw_destroy_aeq - destroy aeq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+{
+ enum i40iw_status_code status = I40IW_ERR_NOT_READY;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+
+ if (!iwdev->msix_shared)
+ i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
+ if (reset)
+ goto exit;
+
+ if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
+ status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
+ if (status)
+ i40iw_pr_err("destroy aeq failed %d\n", status);
+
+exit:
+ i40iw_free_dma_mem(dev->hw, &aeq->mem);
+}
+
+/**
+ * i40iw_destroy_ceq - destroy ceq
+ * @iwdev: iwarp device
+ * @iwceq: ceq to be destroyed
+ * @reset: true if called before reset
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ bool reset)
+{
+ enum i40iw_status_code status;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ if (reset)
+ goto exit;
+
+ status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
+ if (status) {
+ i40iw_pr_err("ceq destroy command failed %d\n", status);
+ goto exit;
+ }
+
+ status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
+ if (status)
+ i40iw_pr_err("ceq destroy completion failed %d\n", status);
+exit:
+ i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+}
+
+/**
+ * i40iw_dele_ceqs - destroy all ceq's
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Go through all of the device ceq's and for each ceq
+ * disable the ceq interrupt and destroy the ceq
+ */
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+{
+ u32 i = 0;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_ceq *iwceq = iwdev->ceqlist;
+ struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+
+ if (iwdev->msix_shared) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
+ i40iw_destroy_ceq(iwdev, iwceq, reset);
+ iwceq++;
+ i++;
+ }
+
+ for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
+ i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
+ i40iw_destroy_ceq(iwdev, iwceq, reset);
+ }
+}
+
+/**
+ * i40iw_destroy_ccq - destroy control cq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_ccq *ccq = &iwdev->ccq;
+ enum i40iw_status_code status = 0;
+
+ if (!reset)
+ status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ i40iw_pr_err("ccq destroy failed %d\n", status);
+ i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+}
+
+/* types of hmc objects */
+static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
+ I40IW_HMC_IW_QP,
+ I40IW_HMC_IW_CQ,
+ I40IW_HMC_IW_HTE,
+ I40IW_HMC_IW_ARP,
+ I40IW_HMC_IW_APBVT_ENTRY,
+ I40IW_HMC_IW_MR,
+ I40IW_HMC_IW_XF,
+ I40IW_HMC_IW_XFFL,
+ I40IW_HMC_IW_Q1,
+ I40IW_HMC_IW_Q1FL,
+ I40IW_HMC_IW_TIMER,
+};
+
+/**
+ * i40iw_close_hmc_objects_type - delete hmc objects of a given type
+ * @iwdev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @is_pf: true if the function is PF otherwise false
+ * @reset: true if called before reset
+ */
+static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type obj_type,
+ struct i40iw_hmc_info *hmc_info,
+ bool is_pf,
+ bool reset)
+{
+ struct i40iw_hmc_del_obj_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.rsrc_type = obj_type;
+ info.count = hmc_info->hmc_obj[obj_type].cnt;
+ info.is_pf = is_pf;
+ if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
+ i40iw_pr_err("del obj of type %d failed\n", obj_type);
+}
+
+/**
+ * i40iw_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
+ * by PF on behalf of VF
+ * @reset: true if called before reset
+ */
+static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_info *hmc_info,
+ bool is_pf,
+ bool reset)
+{
+ unsigned int i;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
+ i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
+}
+
+/**
+ * i40iw_ceq_handler - interrupt handler for ceq
+ * @data: ceq pointer
+ */
+static irqreturn_t i40iw_ceq_handler(int irq, void *data)
+{
+ struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+
+ if (iwceq->irq != irq)
+ i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
+ tasklet_schedule(&iwceq->dpc_tasklet);
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *info)
+{
+ return dev->hmc_ops->create_hmc_object(dev, info);
+}
+
+/**
+ * i40iw_create_hmc_objs - create all hmc objects for the device
+ * @iwdev: iwarp device
+ * @is_pf: true if the function is PF otherwise false
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
+ bool is_pf)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_hmc_create_obj_info info;
+ enum i40iw_status_code status;
+ int i;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = dev->hmc_info;
+ info.is_pf = is_pf;
+ info.entry_type = iwdev->sd_type;
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ info.rsrc_type = iw_hmc_obj_types[i];
+ info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ status = i40iw_create_hmc_obj_type(dev, &info);
+ if (status) {
+ i40iw_pr_err("create obj type %d status = %d\n",
+ iw_hmc_obj_types[i], status);
+ break;
+ }
+ }
+ if (!status)
+ return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
+ dev->hmc_fn_id,
+ true, true));
+
+ while (i) {
+ i--;
+ /* destroy the hmc objects of a given type */
+ i40iw_close_hmc_objects_type(dev,
+ iw_hmc_obj_types[i],
+ dev->hmc_info,
+ is_pf,
+ false);
+ }
+ return status;
+}
+
+/**
+ * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
+ * @iwdev: iwarp device
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+ struct i40iw_dma_mem *memptr,
+ u32 size,
+ u32 mask)
+{
+ unsigned long va, newva;
+ unsigned long extra;
+
+ va = (unsigned long)iwdev->obj_next.va;
+ newva = va;
+ if (mask)
+ newva = ALIGN(va, (mask + 1));
+ extra = newva - va;
+ memptr->va = (u8 *)va + extra;
+ memptr->pa = iwdev->obj_next.pa + extra;
+ memptr->size = size;
+ if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
+ return I40IW_ERR_NO_MEMORY;
+
+ iwdev->obj_next.va = memptr->va + size;
+ iwdev->obj_next.pa = memptr->pa + size;
+ return 0;
+}
+
+/**
+ * i40iw_create_cqp - create control qp
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+ u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
+ struct i40iw_dma_mem mem;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cqp_init_info cqp_init_info;
+ struct i40iw_cqp *cqp = &iwdev->cqp;
+ u16 maj_err, min_err;
+ int i;
+
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+ if (!cqp->cqp_requests)
+ return I40IW_ERR_NO_MEMORY;
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+ kfree(cqp->cqp_requests);
+ return I40IW_ERR_NO_MEMORY;
+ }
+ dev->cqp = &cqp->sc_cqp;
+ dev->cqp->dev = dev;
+ memset(&cqp_init_info, 0, sizeof(cqp_init_info));
+ status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
+ (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
+ I40IW_CQP_ALIGNMENT);
+ if (status)
+ goto exit;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
+ I40IW_HOST_CTX_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+ /* populate the cqp init info */
+ cqp_init_info.dev = dev;
+ cqp_init_info.sq_size = sqsize;
+ cqp_init_info.sq = cqp->sq.va;
+ cqp_init_info.sq_pa = cqp->sq.pa;
+ cqp_init_info.host_ctx_pa = mem.pa;
+ cqp_init_info.host_ctx = mem.va;
+ cqp_init_info.hmc_profile = iwdev->resource_profile;
+ cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
+ cqp_init_info.scratch_array = cqp->scratch_array;
+ status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto exit;
+ }
+ status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+ if (status) {
+ i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto exit;
+ }
+ spin_lock_init(&cqp->req_lock);
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+ INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+ /* init the waitq of the cqp_requests and add them to the list */
+ for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+ list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+ }
+ return 0;
+exit:
+ /* clean up the created resources */
+ i40iw_destroy_cqp(iwdev, false);
+ return status;
+}
+
+/**
+ * i40iw_create_ccq - create control cq
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_dma_mem mem;
+ enum i40iw_status_code status;
+ struct i40iw_ccq_init_info info;
+ struct i40iw_ccq *ccq = &iwdev->ccq;
+
+ memset(&info, 0, sizeof(info));
+ dev->ccq = &ccq->sc_cq;
+ dev->ccq->dev = dev;
+ info.dev = dev;
+ ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
+ ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
+ status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
+ ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
+ if (status)
+ goto exit;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
+ I40IW_SHADOWAREA_MASK);
+ if (status)
+ goto exit;
+ ccq->sc_cq.back_cq = (void *)ccq;
+ /* populate the ccq init info */
+ info.cq_base = ccq->mem_cq.va;
+ info.cq_pa = ccq->mem_cq.pa;
+ info.num_elem = IW_CCQ_SIZE;
+ info.shadow_area = mem.va;
+ info.shadow_area_pa = mem.pa;
+ info.ceqe_mask = false;
+ info.ceq_id_valid = true;
+ info.shadow_read_threshold = 16;
+ status = dev->ccq_ops->ccq_init(dev->ccq, &info);
+ if (!status)
+ status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+ return status;
+}
+
+/**
+ * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
+ * @iwdev: iwarp device
+ * @msix_vec: interrupt vector information
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ u32 ceq_id,
+ struct i40iw_msix_vector *msix_vec)
+{
+ enum i40iw_status_code status;
+
+ if (iwdev->msix_shared && !ceq_id) {
+ tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
+ } else {
+ tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
+ status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
+ }
+
+ if (status) {
+ i40iw_pr_err("ceq irq config fail\n");
+ return I40IW_ERR_CONFIG;
+ }
+ msix_vec->ceq_id = ceq_id;
+ msix_vec->cpu_affinity = 0;
+
+ return 0;
+}
+
+/**
+ * i40iw_create_ceq - create completion event queue
+ * @iwdev: iwarp device
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
+ struct i40iw_ceq *iwceq,
+ u32 ceq_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_ceq_init_info info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ u64 scratch;
+
+ memset(&info, 0, sizeof(info));
+ info.ceq_id = ceq_id;
+ iwceq->iwdev = iwdev;
+ iwceq->mem.size = sizeof(struct i40iw_ceqe) *
+ iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
+ I40IW_CEQ_ALIGNMENT);
+ if (status)
+ goto exit;
+ info.ceq_id = ceq_id;
+ info.ceqe_base = iwceq->mem.va;
+ info.ceqe_pa = iwceq->mem.pa;
+
+ info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
+ status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
+ if (!status)
+ status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
+
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+ return status;
+}
+
+void i40iw_request_reset(struct i40iw_device *iwdev)
+{
+ struct i40e_info *ldev = iwdev->ldev;
+
+ ldev->ops->request_reset(ldev, iwdev->client, 1);
+}
+
+/**
+ * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @iwdev: iwarp device
+ * @ldev: i40e lan device
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if at least one ceq is successfully set up, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ u32 i;
+ u32 ceq_id;
+ struct i40iw_ceq *iwceq;
+ struct i40iw_msix_vector *msix_vec;
+ enum i40iw_status_code status = 0;
+ u32 num_ceqs;
+
+ if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
+ status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
+ iwdev->iw_qvlist);
+ if (status)
+ goto exit;
+ } else {
+ status = I40IW_ERR_BAD_PTR;
+ goto exit;
+ }
+
+ num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
+ iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
+ if (!iwdev->ceqlist) {
+ status = I40IW_ERR_NO_MEMORY;
+ goto exit;
+ }
+ i = (iwdev->msix_shared) ? 0 : 1;
+ for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
+ iwceq = &iwdev->ceqlist[ceq_id];
+ status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
+ if (status) {
+ i40iw_pr_err("create ceq status = %d\n", status);
+ break;
+ }
+
+ msix_vec = &iwdev->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
+ if (status) {
+ i40iw_destroy_ceq(iwdev, iwceq, false);
+ break;
+ }
+ i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
+ iwdev->ceqs_count++;
+ }
+
+exit:
+ if (status) {
+ if (!iwdev->ceqs_count) {
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ } else {
+ status = 0;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40iw_configure_aeq_vector - set up the msix vector for aeq
+ * @iwdev: iwarp device
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
+{
+ struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+ u32 ret = 0;
+
+ if (!iwdev->msix_shared) {
+ tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+ ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
+ }
+ if (ret) {
+ i40iw_pr_err("aeq irq config fail\n");
+ return I40IW_ERR_CONFIG;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_create_aeq - create async event queue
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+ struct i40iw_aeq_init_info info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_aeq *aeq = &iwdev->aeq;
+ u64 scratch = 0;
+ u32 aeq_size;
+
+ aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
+ iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+ memset(&info, 0, sizeof(info));
+ aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
+ status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
+ I40IW_AEQ_ALIGNMENT);
+ if (status)
+ goto exit;
+
+ info.aeqe_base = aeq->mem.va;
+ info.aeq_elem_pa = aeq->mem.pa;
+ info.elem_cnt = aeq_size;
+ info.dev = dev;
+ status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
+ if (status)
+ goto exit;
+ status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
+ if (!status)
+ status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
+exit:
+ if (status)
+ i40iw_free_dma_mem(dev->hw, &aeq->mem);
+ return status;
+}
+
+/**
+ * i40iw_setup_aeq - set up the device aeq
+ * @iwdev: iwarp device
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ status = i40iw_create_aeq(iwdev);
+ if (status)
+ return status;
+
+ status = i40iw_configure_aeq_vector(iwdev);
+ if (status) {
+ i40iw_destroy_aeq(iwdev, false);
+ return status;
+ }
+
+ if (!iwdev->msix_shared)
+ i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
+ return 0;
+}
+
+/**
+ * i40iw_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+{
+ struct i40iw_puda_rsrc_info info;
+ enum i40iw_status_code status;
+
+ info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
+ info.cq_id = 1;
+ info.qp_id = 0;
+ info.count = 1;
+ info.pd_id = 1;
+ info.sq_size = 8192;
+ info.rq_size = 8192;
+ info.buf_size = 1024;
+ info.tx_buf_cnt = 16384;
+ info.mss = iwdev->mss;
+ info.receive = i40iw_receive_ilq;
+ info.xmit_complete = i40iw_free_sqbuf;
+ status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ if (status)
+ i40iw_pr_err("ilq create fail\n");
+ return status;
+}
+
+/**
+ * i40iw_initialize_ieq - create iwarp exception queue
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+{
+ struct i40iw_puda_rsrc_info info;
+ enum i40iw_status_code status;
+
+ info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
+ info.cq_id = 2;
+ info.qp_id = iwdev->sc_dev.exception_lan_queue;
+ info.count = 1;
+ info.pd_id = 2;
+ info.sq_size = 8192;
+ info.rq_size = 8192;
+ info.buf_size = 2048;
+ info.mss = iwdev->mss;
+ info.tx_buf_cnt = 16384;
+ status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+ if (status)
+ i40iw_pr_err("ieq create fail\n");
+ return status;
+}
+
+/**
+ * i40iw_hmc_setup - create hmc objects for the device
+ * @iwdev: iwarp device
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
+{
+ enum i40iw_status_code status;
+
+ iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
+ status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
+ if (status)
+ goto exit;
+ status = i40iw_create_hmc_objs(iwdev, true);
+ if (status)
+ goto exit;
+ iwdev->init_state = HMC_OBJS_CREATED;
+exit:
+ return status;
+}
+
+/**
+ * i40iw_del_init_mem - deallocate memory resources
+ * @iwdev: iwarp device
+ */
+static void i40iw_del_init_mem(struct i40iw_device *iwdev)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
+ kfree(dev->hmc_info->sd_table.sd_entry);
+ dev->hmc_info->sd_table.sd_entry = NULL;
+ kfree(iwdev->mem_resources);
+ iwdev->mem_resources = NULL;
+ kfree(iwdev->ceqlist);
+ iwdev->ceqlist = NULL;
+ kfree(iwdev->iw_msixtbl);
+ iwdev->iw_msixtbl = NULL;
+ kfree(iwdev->hmc_info_mem);
+ iwdev->hmc_info_mem = NULL;
+}
+
+/**
+ * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
+ * @iwdev: iwarp device
+ * @idx: the index of the mac ip address to delete
+ */
+static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
+{
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return;
+ }
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
+ cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
+}
+
+/**
+ * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
+ * @iwdev: iwarp device
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
+ u8 *mac_addr,
+ u8 idx)
+{
+ struct i40iw_local_mac_ipaddr_entry_info *info;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+
+ cqp_info = &cqp_request->info;
+
+ cqp_info->post_sq = 1;
+ info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ info->entry_idx = idx;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
+ return status;
+}
+
+/**
+ * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
+ * @iwdev: iwarp device
+ * @mac_ip_tbl_idx: the index of the new mac ip address
+ *
+ * Allocate a mac ip address entry and update the mac_ip_tbl_idx
+ * to hold the index of the newly created mac ip address
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
+ u16 *mac_ip_tbl_idx)
+{
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ enum i40iw_status_code status = 0;
+
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ i40iw_pr_err("cqp_request memory failed\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+
+ /* increment refcount, because we need the cqp request ret value */
+ atomic_inc(&cqp_request->refcount);
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
+ else
+ i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
+ /* decrement refcount and free the cqp request, if no longer used */
+ i40iw_put_cqp_request(iwcqp, cqp_request);
+ return status;
+}
+
+/**
+ * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
+ * @iwdev: iwarp device
+ * @macaddr: pointer to mac address
+ *
+ * Allocate a mac ip address entry and add it to the hw table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
+ u8 *macaddr)
+{
+ enum i40iw_status_code status;
+
+ status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
+ if (!status) {
+ status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+ (u8)iwdev->mac_ip_table_idx);
+ if (!status)
+ status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+ (u8)iwdev->mac_ip_table_idx);
+ else
+ i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+ }
+ return status;
+}
+
+/**
+ * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp;
+ __be32 local_ipaddr6[4];
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, ip_dev) {
+ if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
+ (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+ (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ i40iw_pr_err("ipv6 inet device not found\n");
+ break;
+ }
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
+ i40iw_copy_ip_ntohl(local_ipaddr6,
+ ifp->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ ip_dev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ bool got_lock = true;
+ u32 ip_addr;
+
+ if (!rtnl_trylock())
+ got_lock = false;
+
+ for_each_netdev(&init_net, dev) {
+ if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
+ (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+ (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+ idev = in_dev_get(dev);
+ for_ifa(idev) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+ "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
+ rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
+
+ ip_addr = ntohl(ifa->ifa_address);
+ i40iw_manage_arp_cache(iwdev,
+ dev->dev_addr,
+ &ip_addr,
+ true,
+ I40IW_ARP_ADD);
+ }
+ endfor_ifa(idev);
+ in_dev_put(idev);
+ }
+ }
+ if (got_lock)
+ rtnl_unlock();
+}
+
+/**
+ * i40iw_add_mac_ip - add mac and ip addresses
+ * @iwdev: iwarp device
+ *
+ * Create and add a mac ip address entry to the hw table and
+ * ipv4/ipv6 addresses to the arp cache
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
+{
+ struct net_device *netdev = iwdev->netdev;
+ enum i40iw_status_code status;
+
+ status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
+ if (status)
+ return status;
+ i40iw_add_ipv4_addr(iwdev);
+ i40iw_add_ipv6_addr(iwdev);
+ return 0;
+}
+
+/**
+ * i40iw_wait_pe_ready - Check if firmware is ready
+ * @hw: provides access to registers
+ */
+static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
+{
+ u32 statusfw;
+ u32 statuscpu0;
+ u32 statuscpu1;
+ u32 statuscpu2;
+ u32 retrycount = 0;
+
+ do {
+ statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
+ i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
+ statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
+ i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
+ statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
+ i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
+ __LINE__, statuscpu1);
+ statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
+ i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
+ __LINE__, statuscpu2);
+ if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
+ break; /* SUCCESS */
+ mdelay(1000);
+ retrycount++;
+ } while (retrycount < 14);
+ i40iw_wr32(hw, 0xb4040, 0x4C104C5);
+}
+
+/**
+ * i40iw_initialize_dev - initialize device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ enum i40iw_status_code status;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_device_init_info info;
+ struct i40iw_dma_mem mem;
+ u32 size;
+
+ memset(&info, 0, sizeof(info));
+ size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+ (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
+ iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+ if (!iwdev->hmc_info_mem) {
+ i40iw_pr_err("memory alloc fail\n");
+ return I40IW_ERR_NO_MEMORY;
+ }
+ iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
+ dev->hmc_info = &iwdev->hw.hmc;
+ dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+ status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
+ I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
+ if (status)
+ goto exit;
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+ info.hmc_fn_id = ldev->fid;
+ info.is_pf = (ldev->ftype) ? false : true;
+ info.bar0 = ldev->hw_addr;
+ info.hw = &iwdev->hw;
+ info.debug_mask = debug;
+ info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+ info.exception_lan_queue = 1;
+ info.vchnl_send = i40iw_virtchnl_send;
+ status = i40iw_device_init(&iwdev->sc_dev, &info);
+exit:
+ if (status) {
+ kfree(iwdev->hmc_info_mem);
+ iwdev->hmc_info_mem = NULL;
+ }
+ return status;
+}
+
+/**
+ * i40iw_register_notifiers - register tcp ip notifiers
+ */
+static void i40iw_register_notifiers(void)
+{
+ if (!i40iw_notifiers_registered) {
+ register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ register_netevent_notifier(&i40iw_net_notifier);
+ }
+ i40iw_notifiers_registered++;
+}
+
+/**
+ * i40iw_save_msix_info - copy msix vector information to iwarp device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate iwdev msix table and copy the ldev msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+ struct i40e_info *ldev)
+{
+ struct i40e_qvlist_info *iw_qvlist;
+ struct i40e_qv_info *iw_qvinfo;
+ u32 ceq_idx;
+ u32 i;
+ u32 size;
+
+ iwdev->msix_count = ldev->msix_count;
+
+ size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
+ size += sizeof(struct i40e_qvlist_info);
+ size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
+ iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+
+ if (!iwdev->iw_msixtbl)
+ return I40IW_ERR_NO_MEMORY;
+ iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
+ iw_qvlist = iwdev->iw_qvlist;
+ iw_qvinfo = iw_qvlist->qv_info;
+ iw_qvlist->num_vectors = iwdev->msix_count;
+ if (iwdev->msix_count <= num_online_cpus())
+ iwdev->msix_shared = true;
+ for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
+ iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
+ iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+ if (i == 0) {
+ iw_qvinfo->aeq_idx = 0;
+ if (iwdev->msix_shared)
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ else
+ iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
+ } else {
+ iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ }
+ iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_deinit_device - clean up the device resources
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ * @del_hdl: true if delete hdl entry
+ *
+ * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
+ * destroy the device queues and free the pble and the hmc objects
+ */
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+{
+ struct i40e_info *ldev = iwdev->ldev;
+
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+ i40iw_pr_info("state = %d\n", iwdev->init_state);
+
+ switch (iwdev->init_state) {
+ case RDMA_DEV_REGISTERED:
+ iwdev->iw_status = 0;
+ i40iw_port_ibevent(iwdev);
+ i40iw_destroy_rdma_device(iwdev->iwibdev);
+ /* fallthrough */
+ case IP_ADDR_REGISTERED:
+ if (!reset)
+ i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+ /* fallthrough */
+ case INET_NOTIFIER:
+ if (i40iw_notifiers_registered > 0) {
+ i40iw_notifiers_registered--;
+ unregister_netevent_notifier(&i40iw_net_notifier);
+ unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ }
+ /* fallthrough */
+ case CEQ_CREATED:
+ i40iw_dele_ceqs(iwdev, reset);
+ /* fallthrough */
+ case AEQ_CREATED:
+ i40iw_destroy_aeq(iwdev, reset);
+ /* fallthrough */
+ case IEQ_CREATED:
+ i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+ /* fallthrough */
+ case ILQ_CREATED:
+ i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+ /* fallthrough */
+ case CCQ_CREATED:
+ i40iw_destroy_ccq(iwdev, reset);
+ /* fallthrough */
+ case PBLE_CHUNK_MEM:
+ i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+ /* fallthrough */
+ case HMC_OBJS_CREATED:
+ i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+ /* fallthrough */
+ case CQP_CREATED:
+ i40iw_destroy_cqp(iwdev, !reset);
+ /* fallthrough */
+ case INITIAL_STATE:
+ i40iw_cleanup_cm_core(&iwdev->cm_core);
+ if (dev->is_pf)
+ i40iw_hw_stats_del_timer(dev);
+
+ i40iw_del_init_mem(iwdev);
+ break;
+ case INVALID_STATE:
+ /* fallthrough */
+ default:
+ i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
+ break;
+ }
+
+ if (del_hdl)
+ i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+ kfree(iwdev->hdl);
+}
+
+/**
+ * i40iw_setup_init_state - set up the initial device struct
+ * @hdl: handler for iwarp device - one per instance
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Initialize the iwarp device and its hdl information
+ * using the ldev and client information
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+ struct i40e_info *ldev,
+ struct i40e_client *client)
+{
+ struct i40iw_device *iwdev = &hdl->device;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ memcpy(&hdl->ldev, ldev, sizeof(*ldev));
+ if (resource_profile == 1)
+ resource_profile = 2;
+
+ iwdev->mpa_version = mpa_version;
+ iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
+ (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
+ I40IW_HMC_PROFILE_DEFAULT;
+ iwdev->max_rdma_vfs =
+ (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
+ iwdev->netdev = ldev->netdev;
+ hdl->client = client;
+ iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
+ if (!ldev->ftype)
+ iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
+ else
+ iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
+
+ status = i40iw_save_msix_info(iwdev, ldev);
+ if (status)
+ goto exit;
+ iwdev->hw.dev_context = (void *)ldev->pcidev;
+ iwdev->hw.hw_addr = ldev->hw_addr;
+ status = i40iw_allocate_dma_mem(&iwdev->hw,
+ &iwdev->obj_mem, 8192, 4096);
+ if (status)
+ goto exit;
+ iwdev->obj_next = iwdev->obj_mem;
+ iwdev->push_mode = push_mode;
+ init_waitqueue_head(&iwdev->vchnl_waitq);
+ status = i40iw_initialize_dev(iwdev, ldev);
+exit:
+ if (status) {
+ kfree(iwdev->iw_msixtbl);
+ i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
+ iwdev->iw_msixtbl = NULL;
+ }
+ return status;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_dev *dev;
+ enum i40iw_status_code status;
+ struct i40iw_handler *hdl;
+
+ hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+ if (!hdl)
+ return -ENOMEM;
+ iwdev = &hdl->device;
+ iwdev->hdl = hdl;
+ dev = &iwdev->sc_dev;
+ i40iw_setup_cm_core(iwdev);
+
+ dev->back_dev = (void *)iwdev;
+ iwdev->ldev = &hdl->ldev;
+ iwdev->client = client;
+ mutex_init(&iwdev->pbl_mutex);
+ i40iw_add_handler(hdl);
+
+ do {
+ status = i40iw_setup_init_state(hdl, ldev, client);
+ if (status)
+ break;
+ iwdev->init_state = INITIAL_STATE;
+ if (dev->is_pf)
+ i40iw_wait_pe_ready(dev->hw);
+ status = i40iw_create_cqp(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = CQP_CREATED;
+ status = i40iw_hmc_setup(iwdev);
+ if (status)
+ break;
+ status = i40iw_create_ccq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = CCQ_CREATED;
+ status = i40iw_initialize_ilq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = ILQ_CREATED;
+ status = i40iw_initialize_ieq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IEQ_CREATED;
+ status = i40iw_setup_aeq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = AEQ_CREATED;
+ status = i40iw_setup_ceqs(iwdev, ldev);
+ if (status)
+ break;
+ iwdev->init_state = CEQ_CREATED;
+ status = i40iw_initialize_hw_resources(iwdev);
+ if (status)
+ break;
+ dev->ccq_ops->ccq_arm(dev->ccq);
+ status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+ if (status)
+ break;
+ iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
+ i40iw_register_notifiers();
+ iwdev->init_state = INET_NOTIFIER;
+ status = i40iw_add_mac_ip(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IP_ADDR_REGISTERED;
+ if (i40iw_register_rdma_device(iwdev)) {
+ i40iw_pr_err("register rdma device fail\n");
+ break;
+ };
+
+ iwdev->init_state = RDMA_DEV_REGISTERED;
+ iwdev->iw_status = 1;
+ i40iw_port_ibevent(iwdev);
+ i40iw_pr_info("i40iw_open completed\n");
+ return 0;
+ } while (0);
+
+ i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
+ i40iw_deinit_device(iwdev, false, false);
+ return -ERESTART;
+}
+
+/**
+ * i40iw_l2param_change : handle qs handles for qos and mss change
+ * @ldev: lan device information
+ * @client: client for paramater change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_device *iwdev;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ iwdev = &hdl->device;
+ if (params->mtu)
+ iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @ldev: lan device information
+ * @client: client to close
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
+{
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ iwdev = &hdl->device;
+ destroy_workqueue(iwdev->virtchnl_wq);
+ i40iw_deinit_device(iwdev, reset, true);
+}
+
+/**
+ * i40iw_vf_reset - process VF reset
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Called when a VF is reset by the PF
+ * Destroy and clean up the VF resources
+ */
+static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_hmc_fcn_info hmc_fcn_info;
+ struct i40iw_virt_mem vf_dev_mem;
+ struct i40iw_vfdev *tmp_vfdev;
+ unsigned int i;
+ unsigned long flags;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ dev = &hdl->device.sc_dev;
+
+ for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
+ if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
+ continue;
+
+ /* free all resources allocated on behalf of vf */
+ tmp_vfdev = dev->vf_dev[i];
+ spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ dev->vf_dev[i] = NULL;
+ spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
+ /* remove vf hmc function */
+ memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
+ hmc_fcn_info.vf_id = vf_id;
+ hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
+ hmc_fcn_info.free_fcn = true;
+ i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+ /* free vf_dev */
+ vf_dev_mem.va = tmp_vfdev;
+ vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
+ sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
+ i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+ break;
+ }
+}
+
+/**
+ * i40iw_vf_enable - enable a number of VFs
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @num_vfs: number of VFs for the PF
+ *
+ * Called when the number of VFs changes
+ */
+static void i40iw_vf_enable(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 num_vfs)
+{
+ struct i40iw_handler *hdl;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return;
+
+ if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
+ hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
+ else
+ hdl->device.max_enabled_vfs = num_vfs;
+}
+
+/**
+ * i40iw_vf_capable - check if VF capable
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Return 1 if a VF slot is available or if VF is already RDMA enabled
+ * Return 0 otherwise
+ */
+static int i40iw_vf_capable(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ unsigned int i;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return 0;
+
+ dev = &hdl->device.sc_dev;
+
+ for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
+ if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_virtchnl_receive - receive a message through the virtual channel
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id associated with the message
+ * @msg: message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel receive operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_virtchnl_receive(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_handler *hdl;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_device *iwdev;
+ int ret_code = I40IW_NOT_SUPPORTED;
+
+ if (!len || !msg)
+ return I40IW_ERR_PARAM;
+
+ hdl = i40iw_find_i40e_handler(ldev);
+ if (!hdl)
+ return I40IW_ERR_PARAM;
+
+ dev = &hdl->device.sc_dev;
+ iwdev = dev->back_dev;
+
+ i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len);
+
+ if (dev->vchnl_if.vchnl_recv) {
+ ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
+ if (!dev->is_pf) {
+ atomic_dec(&iwdev->vchnl_msgs);
+ wake_up(&iwdev->vchnl_waitq);
+ }
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_virtchnl_send - send a message through the virtual channel
+ * @dev: iwarp device
+ * @vf_id: virtual function id associated with the message
+ * @msg: virtual channel message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel send operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_device *iwdev;
+ struct i40e_info *ldev;
+ enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR;
+
+ if (!dev || !dev->back_dev)
+ return ret_code;
+
+ iwdev = dev->back_dev;
+ ldev = iwdev->ldev;
+
+ if (ldev && ldev->ops && ldev->ops->virtchnl_send)
+ ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
+
+ return ret_code;
+}
+
+/* client interface functions */
+static struct i40e_client_ops i40e_ops = {
+ .open = i40iw_open,
+ .close = i40iw_close,
+ .l2_param_change = i40iw_l2param_change,
+ .virtchnl_receive = i40iw_virtchnl_receive,
+ .vf_reset = i40iw_vf_reset,
+ .vf_enable = i40iw_vf_enable,
+ .vf_capable = i40iw_vf_capable
+};
+
+/**
+ * i40iw_init_module - driver initialization function
+ *
+ * First function to call when the driver is loaded
+ * Register the driver as i40e client and port mapper client
+ */
+static int __init i40iw_init_module(void)
+{
+ int ret;
+
+ memset(&i40iw_client, 0, sizeof(i40iw_client));
+ i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
+ i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
+ i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
+ i40iw_client.ops = &i40e_ops;
+ memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
+ i40iw_client.type = I40E_CLIENT_IWARP;
+ spin_lock_init(&i40iw_handler_lock);
+ ret = i40e_register_client(&i40iw_client);
+ return ret;
+}
+
+/**
+ * i40iw_exit_module - driver exit clean up function
+ *
+ * The function is called just before the driver is unloaded
+ * Unregister the driver as i40e client and port mapper client
+ */
+static void __exit i40iw_exit_module(void)
+{
+ i40e_unregister_client(&i40iw_client);
+}
+
+module_init(i40iw_init_module);
+module_exit(i40iw_exit_module);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
new file mode 100644
index 000000000000..7e20493510e8
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -0,0 +1,215 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_OSDEP_H
+#define I40IW_OSDEP_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/tcp.h>
+#include <crypto/hash.h>
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define STATS_TIMER_DELAY 1000
+
+static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
+{
+ wqe_words[byte_index >> 3] = value;
+}
+
+/**
+ * set_32bit_val - set 32 value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @value: value to write
+ **/
+static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
+{
+ wqe_words[byte_index >> 2] = value;
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @value: read value
+ **/
+static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
+{
+ *value = wqe_words[byte_index >> 3];
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @value: return 32 bit value
+ **/
+static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
+{
+ *value = wqe_words[byte_index >> 2];
+}
+
+struct i40iw_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+struct i40iw_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+#define i40iw_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40iw " s, ##__VA_ARGS__); \
+} while (0)
+
+#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \
+ << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16
+#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20
+#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \
+ << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31
+#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
+
+#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \
+ << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
+
+#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \
+ /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \
+ << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \
+ << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \
+ << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \
+ << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
+
+#define I40E_GLPE_FWLDSTATUS 0x0000D200
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1
+#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \
+ << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_puda_buf;
+struct i40iw_puda_completion_info;
+struct i40iw_update_sds_info;
+struct i40iw_hmc_fcn_info;
+struct i40iw_virtchnl_work_info;
+struct i40iw_manage_vf_pble_info;
+struct i40iw_device;
+struct i40iw_hmc_info;
+struct i40iw_hw;
+
+u8 __iomem *i40iw_get_hw_addr(void *dev);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
+ u32 length, u32 value);
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
+void i40iw_free_hash_desc(struct shash_desc *);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *info);
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_fcn_info *hmcfcninfo);
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id);
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
+ struct i40iw_manage_vf_pble_info *info);
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
+void *i40iw_remove_head(struct list_head *list);
+
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
+
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+ struct i40iw_manage_vf_pble_info *info,
+ bool wait);
+struct i40iw_dev_pestat;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+#define i40iw_mmiowb() mmiowb()
+void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
+u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg);
+#endif /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
new file mode 100644
index 000000000000..a0b8ca10d67e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -0,0 +1,106 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_P_H
+#define I40IW_P_H
+
+#define PAUSE_TIMER_VALUE 0xFFFF
+#define REFRESH_THRESHOLD 0x7FFF
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xFF
+
+void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+ char *desc, u64 *buf, u32 size);
+/* init operations */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+ struct i40iw_device_init_info *info);
+
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
+
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
+ struct i40iw_fast_reg_stag_info *info,
+ bool post_sq);
+
+/* HMC/FPM functions */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
+ u8 hmc_fn_id);
+
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
+ u32 *vf_cnt_array);
+
+/* cqp misc functions */
+
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
+
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
+ u64 scratch, u8 hmc_fn_id,
+ bool post_sq,
+ bool poll_registers);
+
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
+
+void free_sd_mem(struct i40iw_sc_dev *dev);
+
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+ struct cqp_commands_info *pcmdinfo);
+
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+ struct i40iw_dma_mem *mem, u64 size,
+ u32 alignment);
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem, u32 size);
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem);
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
new file mode 100644
index 000000000000..ded853d2fad8
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -0,0 +1,618 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+
+#include <linux/pci.h>
+#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
+#include "i40iw_pble.h"
+#include "i40iw.h"
+
+struct i40iw_device;
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc);
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
+
+/**
+ * i40iw_destroy_pble_pool - destroy pool during module unload
+ * @pble_rsrc: pble resources
+ */
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct list_head *clist;
+ struct list_head *tlist;
+ struct i40iw_chunk *chunk;
+ struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
+
+ if (pinfo->pool) {
+ list_for_each_safe(clist, tlist, &pinfo->clist) {
+ chunk = list_entry(clist, struct i40iw_chunk, list);
+ if (chunk->type == I40IW_VMALLOC)
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ kfree(chunk);
+ }
+ gen_pool_destroy(pinfo->pool);
+ }
+}
+
+/**
+ * i40iw_hmc_init_pble - Initialize pble resources during module load
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct i40iw_hmc_info *hmc_info;
+ u32 fpm_idx = 0;
+
+ hmc_info = dev->hmc_info;
+ pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
+ /* Now start the pble' on 4k boundary */
+ if (pble_rsrc->fpm_base_addr & 0xfff)
+ fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+
+ pble_rsrc->unallocated_pble =
+ hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
+ pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+
+ pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
+ pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
+ INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+ if (!pble_rsrc->pinfo.pool)
+ goto error;
+
+ if (add_pble_pool(dev, pble_rsrc))
+ goto error;
+
+ return 0;
+
+ error:i40iw_destroy_pble_pool(dev, pble_rsrc);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
+ * @ pble_rsrc: structure containing fpm address
+ * @ idx: where to return indexes
+ */
+static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+{
+ idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
+ idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_add_page_info *info)
+{
+ enum i40iw_status_code ret_code = 0;
+ struct sd_pd_idx *idx = &info->idx;
+ struct i40iw_chunk *chunk = info->chunk;
+ struct i40iw_hmc_info *hmc_info = info->hmc_info;
+ struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+ u32 offset = 0;
+
+ if (!sd_entry->valid) {
+ if (dev->is_pf) {
+ ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx,
+ I40IW_SD_TYPE_DIRECT,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ return ret_code;
+ chunk->type = I40IW_DMA_COHERENT;
+ }
+ }
+ offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
+ chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
+ chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
+ chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+ return 0;
+}
+
+/**
+ * i40iw_free_vmalloc_mem - free vmalloc during close
+ * @hw: hw struct
+ * @chunk: chunk information for vmalloc
+ */
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ int i;
+
+ if (!chunk->pg_cnt)
+ goto done;
+ for (i = 0; i < chunk->pg_cnt; i++)
+ dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ done:
+ kfree(chunk->dmaaddrs);
+ chunk->dmaaddrs = NULL;
+ vfree(chunk->vaddr);
+ chunk->vaddr = NULL;
+ chunk->type = 0;
+}
+
+/**
+ * i40iw_get_vmalloc_mem - get 2M page for sd
+ * @hw: hardware address
+ * @chunk: chunk to adf
+ * @pg_cnt: #of 4 K pages
+ */
+static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
+ struct i40iw_chunk *chunk,
+ int pg_cnt)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+ struct page *page;
+ u8 *addr;
+ u32 size;
+ int i;
+
+ chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+ if (!chunk->dmaaddrs)
+ return I40IW_ERR_NO_MEMORY;
+ size = PAGE_SIZE * pg_cnt;
+ chunk->vaddr = vmalloc(size);
+ if (!chunk->vaddr) {
+ kfree(chunk->dmaaddrs);
+ chunk->dmaaddrs = NULL;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ chunk->size = size;
+ addr = (u8 *)chunk->vaddr;
+ for (i = 0; i < pg_cnt; i++) {
+ page = vmalloc_to_page((void *)addr);
+ if (!page)
+ break;
+ chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
+ break;
+ addr += PAGE_SIZE;
+ }
+
+ chunk->pg_cnt = i;
+ chunk->type = I40IW_VMALLOC;
+ if (i == pg_cnt)
+ return 0;
+
+ i40iw_free_vmalloc_mem(hw, chunk);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+ return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_add_page_info *info)
+{
+ u8 *addr;
+ struct i40iw_dma_mem mem;
+ struct i40iw_hmc_pd_entry *pd_entry;
+ struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+ struct i40iw_hmc_info *hmc_info = info->hmc_info;
+ struct i40iw_chunk *chunk = info->chunk;
+ struct i40iw_manage_vf_pble_info vf_pble_info;
+ enum i40iw_status_code status = 0;
+ u32 rel_pd_idx = info->idx.rel_pd_idx;
+ u32 pd_idx = info->idx.pd_idx;
+ u32 i;
+
+ status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
+ if (status)
+ return I40IW_ERR_NO_MEMORY;
+ status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
+ I40IW_HMC_DIRECT_BP_SIZE);
+ if (status) {
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+ }
+ if (!dev->is_pf) {
+ status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
+ fpm_to_idx(pble_rsrc,
+ pble_rsrc->next_fpm_addr),
+ (info->pages << PBLE_512_SHIFT));
+ if (status) {
+ i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+ }
+ }
+ addr = chunk->vaddr;
+ for (i = 0; i < info->pages; i++) {
+ mem.pa = chunk->dmaaddrs[i];
+ mem.size = PAGE_SIZE;
+ mem.va = (void *)(addr);
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+ if (!pd_entry->valid) {
+ status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
+ if (status)
+ goto error;
+ addr += PAGE_SIZE;
+ } else {
+ i40iw_pr_err("pd entry is valid expecting to be invalid\n");
+ }
+ }
+ if (!dev->is_pf) {
+ vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
+ vf_pble_info.inv_pd_ent = false;
+ vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
+ vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
+ vf_pble_info.sd_index = info->idx.sd_idx;
+ status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
+ &vf_pble_info, true);
+ if (status) {
+ i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
+ goto error;
+ }
+ }
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ return 0;
+error:
+ i40iw_free_vmalloc_mem(dev->hw, chunk);
+ return status;
+}
+
+/**
+ * add_pble_pool - add a sd entry for pble resoure
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ */
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_chunk *chunk;
+ struct i40iw_add_page_info info;
+ struct sd_pd_idx *idx = &info.idx;
+ enum i40iw_status_code ret_code = 0;
+ enum i40iw_sd_entry_type sd_entry_type;
+ u64 sd_reg_val = 0;
+ u32 pages;
+
+ if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+ return I40IW_ERR_NO_MEMORY;
+ if (pble_rsrc->next_fpm_addr & 0xfff) {
+ i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
+ return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+ }
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return I40IW_ERR_NO_MEMORY;
+ hmc_info = dev->hmc_info;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ get_sd_pd_idx(pble_rsrc, idx);
+ sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+ pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
+ idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
+ pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+ if (!pages) {
+ ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
+ goto error;
+ }
+ info.chunk = chunk;
+ info.hmc_info = hmc_info;
+ info.pages = pages;
+ info.sd_entry = sd_entry;
+ if (!sd_entry->valid) {
+ sd_entry_type = (!idx->rel_pd_idx &&
+ (pages == I40IW_HMC_PD_CNT_IN_SD) &&
+ dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
+ } else {
+ sd_entry_type = sd_entry->entry_type;
+ }
+ i40iw_debug(dev, I40IW_DEBUG_PBLE,
+ "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
+ pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
+ sd_entry_type, sd_entry->valid);
+
+ if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
+ ret_code = add_sd_direct(dev, pble_rsrc, &info);
+ if (ret_code)
+ sd_entry_type = I40IW_SD_TYPE_PAGED;
+ else
+ pble_rsrc->stats_direct_sds++;
+
+ if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
+ ret_code = add_bp_pages(dev, pble_rsrc, &info);
+ if (ret_code)
+ goto error;
+ else
+ pble_rsrc->stats_paged_sds++;
+ }
+
+ if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
+ (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
+ i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
+ ret_code = I40IW_ERR_NO_MEMORY;
+ goto error;
+ }
+ pble_rsrc->next_fpm_addr += chunk->size;
+ i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
+ pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ pble_rsrc->unallocated_pble -= (chunk->size >> 3);
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
+ if (sd_entry->valid)
+ return 0;
+ if (dev->is_pf)
+ ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
+ if (ret_code) {
+ i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
+ goto error;
+ }
+
+ sd_entry->valid = true;
+ return 0;
+ error:
+ kfree(chunk);
+ return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ u32 i;
+ struct gen_pool *pool;
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *root = &lvl2->root;
+ struct i40iw_pble_info *leaf = lvl2->leaf;
+
+ pool = pble_rsrc->pinfo.pool;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ if (leaf->addr)
+ gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
+ else
+ break;
+ }
+
+ if (root->addr)
+ gen_pool_free(pool, root->addr, (root->cnt << 3));
+
+ kfree(lvl2->leaf);
+ lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ * @pool: pool pointer
+ */
+static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ struct gen_pool *pool)
+{
+ u32 lf4k, lflast, total, i;
+ u32 pblcnt = PBLE_PER_PAGE;
+ u64 *addr;
+ struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+ struct i40iw_pble_info *root = &lvl2->root;
+ struct i40iw_pble_info *leaf;
+
+ /* number of full 512 (4K) leafs) */
+ lf4k = palloc->total_cnt >> 9;
+ lflast = palloc->total_cnt % PBLE_PER_PAGE;
+ total = (lflast == 0) ? lf4k : lf4k + 1;
+ lvl2->leaf_cnt = total;
+
+ leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
+ if (!leaf)
+ return I40IW_ERR_NO_MEMORY;
+ lvl2->leaf = leaf;
+ /* allocate pbles for the root */
+ root->addr = gen_pool_alloc(pool, (total << 3));
+ if (!root->addr) {
+ kfree(lvl2->leaf);
+ lvl2->leaf = NULL;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ root->idx = fpm_to_idx(pble_rsrc,
+ (u64)gen_pool_virt_to_phys(pool, root->addr));
+ root->cnt = total;
+ addr = (u64 *)root->addr;
+ for (i = 0; i < total; i++, leaf++) {
+ pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
+ leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
+ if (!leaf->addr)
+ goto error;
+ leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
+
+ leaf->cnt = pblcnt;
+ *addr = (u64)leaf->idx;
+ addr++;
+ }
+ palloc->level = I40IW_LEVEL_2;
+ pble_rsrc->stats_lvl2++;
+ return 0;
+ error:
+ free_lvl2(pble_rsrc, palloc);
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ u64 *addr;
+ struct gen_pool *pool;
+ struct i40iw_pble_info *lvl1 = &palloc->level1;
+
+ pool = pble_rsrc->pinfo.pool;
+ addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
+
+ if (!addr)
+ return I40IW_ERR_NO_MEMORY;
+
+ palloc->level = I40IW_LEVEL_1;
+ lvl1->addr = (unsigned long)addr;
+ lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
+ (unsigned long)addr));
+ lvl1->cnt = palloc->total_cnt;
+ pble_rsrc->stats_lvl1++;
+ return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pool: pointer to general purpose special memory pool descriptor
+ */
+static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ struct gen_pool *pool)
+{
+ enum i40iw_status_code status = 0;
+
+ status = get_lvl1_pble(dev, pble_rsrc, palloc);
+ if (status && (palloc->total_cnt > PBLE_PER_PAGE))
+ status = get_lvl2_pble(pble_rsrc, palloc, pool);
+ return status;
+}
+
+/**
+ * i40iw_get_pble - allocate pbles from the pool
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ */
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ u32 pble_cnt)
+{
+ struct gen_pool *pool;
+ enum i40iw_status_code status = 0;
+ u32 max_sds = 0;
+ int i;
+
+ pool = pble_rsrc->pinfo.pool;
+ palloc->total_cnt = pble_cnt;
+ palloc->level = I40IW_LEVEL_0;
+ /*check first to see if we can get pble's without acquiring additional sd's */
+ status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+ if (!status)
+ goto exit;
+ max_sds = (palloc->total_cnt >> 18) + 1;
+ for (i = 0; i < max_sds; i++) {
+ status = add_pble_pool(dev, pble_rsrc);
+ if (status)
+ break;
+ status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+ if (!status)
+ break;
+ }
+exit:
+ if (!status)
+ pble_rsrc->stats_alloc_ok++;
+ else
+ pble_rsrc->stats_alloc_fail++;
+
+ return status;
+}
+
+/**
+ * i40iw_free_pble - put pbles back into pool
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble resource being freed
+ */
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc)
+{
+ struct gen_pool *pool;
+
+ pool = pble_rsrc->pinfo.pool;
+ if (palloc->level == I40IW_LEVEL_2)
+ free_lvl2(pble_rsrc, palloc);
+ else
+ gen_pool_free(pool, palloc->level1.addr,
+ (palloc->level1.cnt << 3));
+ pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h
new file mode 100644
index 000000000000..7b1851d21cc0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PBLE_H
+#define I40IW_PBLE_H
+
+#define POOL_SHIFT 6
+#define PBLE_PER_PAGE 512
+#define I40IW_HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT 9
+
+enum i40iw_pble_level {
+ I40IW_LEVEL_0 = 0,
+ I40IW_LEVEL_1 = 1,
+ I40IW_LEVEL_2 = 2
+};
+
+enum i40iw_alloc_type {
+ I40IW_NO_ALLOC = 0,
+ I40IW_DMA_COHERENT = 1,
+ I40IW_VMALLOC = 2
+};
+
+struct i40iw_pble_info {
+ unsigned long addr;
+ u32 idx;
+ u32 cnt;
+};
+
+struct i40iw_pble_level2 {
+ struct i40iw_pble_info root;
+ struct i40iw_pble_info *leaf;
+ u32 leaf_cnt;
+};
+
+struct i40iw_pble_alloc {
+ u32 total_cnt;
+ enum i40iw_pble_level level;
+ union {
+ struct i40iw_pble_info level1;
+ struct i40iw_pble_level2 level2;
+ };
+};
+
+struct sd_pd_idx {
+ u32 sd_idx;
+ u32 pd_idx;
+ u32 rel_pd_idx;
+};
+
+struct i40iw_add_page_info {
+ struct i40iw_chunk *chunk;
+ struct i40iw_hmc_sd_entry *sd_entry;
+ struct i40iw_hmc_info *hmc_info;
+ struct sd_pd_idx idx;
+ u32 pages;
+};
+
+struct i40iw_chunk {
+ struct list_head list;
+ u32 size;
+ void *vaddr;
+ u64 fpm_addr;
+ u32 pg_cnt;
+ dma_addr_t *dmaaddrs;
+ enum i40iw_alloc_type type;
+};
+
+struct i40iw_pble_pool {
+ struct gen_pool *pool;
+ struct list_head clist;
+ u32 total_pble_alloc;
+ u32 free_pble_cnt;
+ u32 pool_shift;
+};
+
+struct i40iw_hmc_pble_rsrc {
+ u32 unallocated_pble;
+ u64 fpm_base_addr;
+ u64 next_fpm_addr;
+ struct i40iw_pble_pool pinfo;
+
+ u32 stats_direct_sds;
+ u32 stats_paged_sds;
+ u64 stats_alloc_ok;
+ u64 stats_alloc_fail;
+ u64 stats_alloc_freed;
+ u64 stats_lvl1;
+ u64 stats_lvl2;
+};
+
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc);
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_pble_rsrc *pble_rsrc,
+ struct i40iw_pble_alloc *palloc,
+ u32 pble_cnt);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
new file mode 100644
index 000000000000..8eb400d8a7a0
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -0,0 +1,1436 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_puda.h"
+
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
+ *rsrc, bool initial);
+/**
+ * i40iw_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
+{
+ struct i40iw_puda_buf *buf = NULL;
+
+ if (!list_empty(list)) {
+ buf = (struct i40iw_puda_buf *)list->next;
+ list_del((struct list_head *)&buf->list);
+ }
+ return buf;
+}
+
+/**
+ * i40iw_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_puda_buf *buf = NULL;
+ struct list_head *list = &rsrc->bufpool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ buf = i40iw_puda_get_listbuf(list);
+ if (buf)
+ rsrc->avail_buf_count--;
+ else
+ rsrc->stats_buf_alloc_fail++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ return buf;
+}
+
+/**
+ * i40iw_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffe to return to resouce
+ */
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ list_add(&buf->list, &rsrc->bufpool);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->avail_buf_count++;
+}
+
+/**
+ * i40iw_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
+ struct i40iw_puda_buf *buf, bool initial)
+{
+ u64 *wqe;
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ u64 offset24 = 0;
+
+ qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
+ wqe_idx, buf, wqe);
+ if (!initial)
+ get_64bit_val(wqe, 24, &offset24);
+
+ offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, offset24);
+
+ set_64bit_val(wqe, 0, buf->mem.pa);
+ set_64bit_val(wqe, 8,
+ LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
+ bool initial)
+{
+ u32 i;
+ u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+ struct i40iw_puda_buf *buf = NULL;
+
+ for (i = 0; i < invalid_cnt; i++) {
+ buf = i40iw_puda_get_bufpool(rsrc);
+ if (!buf)
+ return I40IW_ERR_list_empty;
+ i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
+ initial);
+ rsrc->rx_wqe_idx =
+ ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+ rsrc->rxq_invalid_cnt--;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @length: length of buffer
+ */
+static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
+ u32 length)
+{
+ struct i40iw_puda_buf *buf = NULL;
+ struct i40iw_virt_mem buf_mem;
+ enum i40iw_status_code ret;
+
+ ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
+ sizeof(struct i40iw_puda_buf));
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s: error mem for buf\n", __func__);
+ return NULL;
+ }
+ buf = (struct i40iw_puda_buf *)buf_mem.va;
+ ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA,
+ "%s: error dma mem for buf\n", __func__);
+ i40iw_free_virt_mem(dev->hw, &buf_mem);
+ return NULL;
+ }
+ buf->buf_mem.va = buf_mem.va;
+ buf->buf_mem.size = buf_mem.size;
+ return buf;
+}
+
+/**
+ * i40iw_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ i40iw_free_dma_mem(dev->hw, &buf->mem);
+ i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
+}
+
+/**
+ * i40iw_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+ u64 *wqe = NULL;
+ enum i40iw_status_code ret_code = 0;
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return wqe;
+ wqe = qp->sq_base[*wqe_idx].elem;
+
+ return wqe;
+}
+
+/**
+ * i40iw_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
+ struct i40iw_puda_completion_info *info)
+{
+ u64 qword0, qword2, qword3;
+ u64 *cqe;
+ u64 comp_ctx;
+ bool valid_bit;
+ u32 major_err, minor_err;
+ bool error;
+
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
+ get_64bit_val(cqe, 24, &qword3);
+ valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (valid_bit != cq->cq_uk.polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
+ error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+ if (error) {
+ i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
+ major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
+ minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
+ info->compl_error = major_err << 16 | minor_err;
+ return I40IW_ERR_CQ_COMPL_ERROR;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+ info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+ info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+
+ if (info->q_type == I40IW_CQE_QTYPE_RQ) {
+ info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
+ info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
+ info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
+ info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_puda_poll_completion - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq, u32 *compl_err)
+{
+ struct i40iw_qp_uk *qp;
+ struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
+ struct i40iw_puda_completion_info info;
+ enum i40iw_status_code ret = 0;
+ struct i40iw_puda_buf *buf;
+ struct i40iw_puda_rsrc *rsrc;
+ void *sqwrid;
+ u8 cq_type = cq->cq_type;
+ unsigned long flags;
+
+ if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
+ rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
+ return I40IW_ERR_BAD_PTR;
+ }
+ memset(&info, 0, sizeof(info));
+ ret = i40iw_puda_poll_info(cq, &info);
+ *compl_err = info.compl_error;
+ if (ret == I40IW_ERR_QUEUE_EMPTY)
+ return ret;
+ if (ret)
+ goto done;
+
+ qp = info.qp;
+ if (!qp || !rsrc) {
+ ret = I40IW_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (qp->qp_id != rsrc->qp_id) {
+ ret = I40IW_ERR_BAD_PTR;
+ goto done;
+ }
+
+ if (info.q_type == I40IW_CQE_QTYPE_RQ) {
+ buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
+ /* Get all the tcpip information in the buf header */
+ ret = i40iw_puda_get_tcpip_info(&info, buf);
+ if (ret) {
+ rsrc->stats_rcvd_pkt_err++;
+ if (cq_type == I40IW_CQ_TYPE_ILQ) {
+ i40iw_ilq_putback_rcvbuf(&rsrc->qp,
+ info.wqe_idx);
+ } else {
+ i40iw_puda_ret_bufpool(rsrc, buf);
+ i40iw_puda_replenish_rq(rsrc, false);
+ }
+ goto done;
+ }
+
+ rsrc->stats_pkt_rcvd++;
+ rsrc->compl_rxwqe_idx = info.wqe_idx;
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
+ rsrc->receive(rsrc->dev, buf);
+ if (cq_type == I40IW_CQ_TYPE_ILQ)
+ i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
+ else
+ i40iw_puda_replenish_rq(rsrc, false);
+
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
+ sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
+ I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ rsrc->xmit_complete(rsrc->dev, sqwrid);
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ rsrc->tx_wqe_avail_cnt++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ if (!list_empty(&dev->ilq->txpend))
+ i40iw_puda_send_buf(dev->ilq, NULL);
+ }
+
+done:
+ I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
+ if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
+ set_64bit_val(cq_uk->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
+ return 0;
+}
+
+/**
+ * i40iw_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_send_info *info)
+{
+ u64 *wqe;
+ u32 iplen, l4len;
+ u64 header[2];
+ u32 wqe_idx;
+ u8 iipt;
+
+ /* number of 32 bits DWORDS in header */
+ l4len = info->tcplen >> 2;
+ if (info->ipv4) {
+ iipt = 3;
+ iplen = 5;
+ } else {
+ iipt = 1;
+ iplen = 10;
+ }
+
+ wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+ /* Third line of WQE descriptor */
+ /* maclen is in words */
+ header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
+ LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
+ LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
+ LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
+ /* Forth line of WQE descriptor */
+ header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
+ LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
+ LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
+ set_64bit_val(wqe, 16, header[0]);
+ set_64bit_val(wqe, 24, header[1]);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
+ i40iw_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
+ * i40iw_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_send_info info;
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ /* if no wqe available or not from a completion and we have
+ * pending buffers, we must queue new buffer
+ */
+ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+ list_add_tail(&buf->list, &rsrc->txpend);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->stats_sent_pkt_q++;
+ if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: adding to txpend\n", __func__);
+ return;
+ }
+ rsrc->tx_wqe_avail_cnt--;
+ /* if we are coming from a completion and have pending buffers
+ * then Get one from pending list
+ */
+ if (!buf) {
+ buf = i40iw_puda_get_listbuf(&rsrc->txpend);
+ if (!buf)
+ goto done;
+ }
+
+ info.scratch = (void *)buf;
+ info.paddr = buf->mem.pa;
+ info.len = buf->totallen;
+ info.tcplen = buf->tcphlen;
+ info.maclen = buf->maclen;
+ info.ipv4 = buf->ipv4;
+ info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
+
+ ret = i40iw_puda_send(&rsrc->qp, &info);
+ if (ret) {
+ rsrc->tx_wqe_avail_cnt++;
+ rsrc->stats_sent_pkt_q++;
+ list_add(&buf->list, &rsrc->txpend);
+ if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s: adding to puda_send\n", __func__);
+ } else {
+ rsrc->stats_pkt_sent++;
+ }
+done:
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * i40iw_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ u64 *qp_ctx = qp->hw_host_ctx;
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ set_64bit_val(qp_ctx, 24,
+ LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+ LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
+
+ set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+ set_64bit_val(qp_ctx, 56, 0);
+ set_64bit_val(qp_ctx, 64, 1);
+
+ set_64bit_val(qp_ctx, 136,
+ LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
+ LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
+
+ set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
+
+ set_64bit_val(qp_ctx, 168,
+ LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
+
+ set_64bit_val(qp_ctx, 176,
+ LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+ LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+ LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
+
+ i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
+ qp_ctx, I40IW_QP_CTX_SIZE);
+}
+
+/**
+ * i40iw_puda_qp_wqe - setup wqe for qp create
+ * @rsrc: resource for qp
+ */
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ struct i40iw_sc_dev *dev = rsrc->dev;
+ struct i40iw_sc_cqp *cqp;
+ u64 *wqe;
+ u64 header;
+ struct i40iw_ccq_cqe_info compl_info;
+ enum i40iw_status_code status = 0;
+
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+ header = qp->qp_uk.qp_id |
+ LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+ LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
+ LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
+ LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
+ i40iw_sc_cqp_post_sq(cqp);
+ status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_QP,
+ &compl_info);
+ return status;
+}
+
+/**
+ * i40iw_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_qp *qp = &rsrc->qp;
+ struct i40iw_qp_uk *ukqp = &qp->qp_uk;
+ enum i40iw_status_code ret = 0;
+ u32 sq_size, rq_size, t_size;
+ struct i40iw_dma_mem *mem;
+
+ sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
+ rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
+ t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
+ I40IW_QP_CTX_SIZE);
+ /* Get page aligned memory */
+ ret =
+ i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
+ I40IW_HW_PAGE_SIZE);
+ if (ret) {
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
+ return ret;
+ }
+
+ mem = &rsrc->qpmem;
+ memset(mem->va, 0, t_size);
+ qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
+ qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
+ qp->pd = &rsrc->sc_pd;
+ qp->qp_type = I40IW_QP_TYPE_UDA;
+ qp->dev = rsrc->dev;
+ qp->back_qp = (void *)rsrc;
+ qp->sq_pa = mem->pa;
+ qp->rq_pa = qp->sq_pa + sq_size;
+ ukqp->sq_base = mem->va;
+ ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+ ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+ qp->shadow_area_pa = qp->rq_pa + rq_size;
+ qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
+ qp->hw_host_ctx_pa =
+ qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+ ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_size = rsrc->sq_size;
+ ukqp->rq_size = rsrc->rq_size;
+
+ I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+ I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+ I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+
+ if (qp->pd->dev->is_pf)
+ ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ I40E_PFPE_WQEALLOC);
+ else
+ ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+ I40E_VFPE_WQEALLOC1);
+
+ qp->qs_handle = qp->dev->qs_handle;
+ i40iw_puda_qp_setctx(rsrc);
+ ret = i40iw_puda_qp_wqe(rsrc);
+ if (ret)
+ i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+ return ret;
+}
+
+/**
+ * i40iw_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+{
+ struct i40iw_sc_dev *dev = rsrc->dev;
+ struct i40iw_sc_cq *cq = &rsrc->cq;
+ u64 *wqe;
+ struct i40iw_sc_cqp *cqp;
+ u64 header;
+ enum i40iw_status_code ret = 0;
+ u32 tsize, cqsize;
+ u32 shadow_read_threshold = 128;
+ struct i40iw_dma_mem *mem;
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_cq_init_info info;
+ struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+ cq->back_cq = (void *)rsrc;
+ cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
+ tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
+ ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
+ I40IW_CQ0_ALIGNMENT_MASK);
+ if (ret)
+ return ret;
+
+ mem = &rsrc->cqmem;
+ memset(&info, 0, sizeof(info));
+ info.dev = dev;
+ info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
+ I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
+ info.shadow_read_threshold = rsrc->cq_size >> 2;
+ info.ceq_id_valid = true;
+ info.cq_base_pa = mem->pa;
+ info.shadow_area_pa = mem->pa + cqsize;
+ init_info->cq_base = mem->va;
+ init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
+ init_info->cq_size = rsrc->cq_size;
+ init_info->cq_id = rsrc->cq_id;
+ ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
+ if (ret)
+ goto error;
+ cqp = dev->cqp;
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe) {
+ ret = I40IW_ERR_RING_FULL;
+ goto error;
+ }
+
+ set_64bit_val(wqe, 0, rsrc->cq_size);
+ set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+ set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+ header = rsrc->cq_id |
+ LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+ LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+ LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+ LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+ wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ i40iw_sc_cqp_post_sq(dev->cqp);
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_CREATE_CQ,
+ &compl_info);
+
+error:
+ if (ret)
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ return ret;
+}
+
+/**
+ * i40iw_puda_dele_resources - delete all resources during close
+ * @dev: iwarp device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+ enum puda_resource_type type,
+ bool reset)
+{
+ struct i40iw_ccq_cqe_info compl_info;
+ struct i40iw_puda_rsrc *rsrc;
+ struct i40iw_puda_buf *buf = NULL;
+ struct i40iw_puda_buf *nextbuf = NULL;
+ struct i40iw_virt_mem *vmem;
+ enum i40iw_status_code ret;
+
+ switch (type) {
+ case I40IW_PUDA_RSRC_TYPE_ILQ:
+ rsrc = dev->ilq;
+ vmem = &dev->ilq_mem;
+ break;
+ case I40IW_PUDA_RSRC_TYPE_IEQ:
+ rsrc = dev->ieq;
+ vmem = &dev->ieq_mem;
+ break;
+ default:
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
+ __func__, type);
+ return;
+ }
+
+ switch (rsrc->completion) {
+ case PUDA_HASH_CRC_COMPLETE:
+ i40iw_free_hash_desc(rsrc->hash_desc);
+ case PUDA_QP_CREATED:
+ do {
+ if (reset)
+ break;
+ ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+ 0, false, true, true);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy\n",
+ __func__);
+
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ } while (0);
+
+ i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
+ /* fallthrough */
+ case PUDA_CQ_CREATED:
+ do {
+ if (reset)
+ break;
+ ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq cq destroy\n",
+ __func__);
+
+ ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+ I40IW_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+ "%s error ieq qp destroy done\n",
+ __func__);
+ } while (0);
+
+ i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+ break;
+ default:
+ i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
+ break;
+ }
+ /* Free all allocated puda buffers for both tx and rx */
+ buf = rsrc->alloclist;
+ while (buf) {
+ nextbuf = buf->next;
+ i40iw_puda_dele_buf(dev, buf);
+ buf = nextbuf;
+ rsrc->alloc_buf_count--;
+ }
+ i40iw_free_virt_mem(dev->hw, vmem);
+}
+
+/**
+ * i40iw_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
+ u32 count)
+{
+ u32 i;
+ struct i40iw_puda_buf *buf;
+ struct i40iw_puda_buf *nextbuf;
+
+ for (i = 0; i < count; i++) {
+ buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+ if (!buf) {
+ rsrc->stats_buf_alloc_fail++;
+ return I40IW_ERR_NO_MEMORY;
+ }
+ i40iw_puda_ret_bufpool(rsrc, buf);
+ rsrc->alloc_buf_count++;
+ if (!rsrc->alloclist) {
+ rsrc->alloclist = buf;
+ } else {
+ nextbuf = rsrc->alloclist;
+ rsrc->alloclist = buf;
+ buf->next = nextbuf;
+ }
+ }
+ rsrc->avail_buf_count = rsrc->alloc_buf_count;
+ return 0;
+}
+
+/**
+ * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
+ * @dev: iwarp device
+ * @info: resource information
+ */
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_rsrc_info *info)
+{
+ enum i40iw_status_code ret = 0;
+ struct i40iw_puda_rsrc *rsrc;
+ u32 pudasize;
+ u32 sqwridsize, rqwridsize;
+ struct i40iw_virt_mem *vmem;
+
+ info->count = 1;
+ pudasize = sizeof(struct i40iw_puda_rsrc);
+ sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
+ rqwridsize = info->rq_size * 8;
+ switch (info->type) {
+ case I40IW_PUDA_RSRC_TYPE_ILQ:
+ vmem = &dev->ilq_mem;
+ break;
+ case I40IW_PUDA_RSRC_TYPE_IEQ:
+ vmem = &dev->ieq_mem;
+ break;
+ default:
+ return I40IW_NOT_SUPPORTED;
+ }
+ ret =
+ i40iw_allocate_virt_mem(dev->hw, vmem,
+ pudasize + sqwridsize + rqwridsize);
+ if (ret)
+ return ret;
+ rsrc = (struct i40iw_puda_rsrc *)vmem->va;
+ spin_lock_init(&rsrc->bufpool_lock);
+ if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
+ dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+ dev->ilq_count = info->count;
+ rsrc->receive = info->receive;
+ rsrc->xmit_complete = info->xmit_complete;
+ } else {
+ vmem = &dev->ieq_mem;
+ dev->ieq_count = info->count;
+ dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+ rsrc->receive = i40iw_ieq_receive;
+ rsrc->xmit_complete = i40iw_ieq_tx_compl;
+ }
+
+ rsrc->type = info->type;
+ rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
+ rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+ rsrc->mss = info->mss;
+ /* Initialize all ieq lists */
+ INIT_LIST_HEAD(&rsrc->bufpool);
+ INIT_LIST_HEAD(&rsrc->txpend);
+
+ rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+ dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+ rsrc->qp_id = info->qp_id;
+ rsrc->cq_id = info->cq_id;
+ rsrc->sq_size = info->sq_size;
+ rsrc->rq_size = info->rq_size;
+ rsrc->cq_size = info->rq_size + info->sq_size;
+ rsrc->buf_size = info->buf_size;
+ rsrc->dev = dev;
+
+ ret = i40iw_puda_cq_create(rsrc);
+ if (!ret) {
+ rsrc->completion = PUDA_CQ_CREATED;
+ ret = i40iw_puda_qp_create(rsrc);
+ }
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
+ goto error;
+ }
+ rsrc->completion = PUDA_QP_CREATED;
+
+ ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+ i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
+ goto error;
+ }
+
+ rsrc->rxq_invalid_cnt = info->rq_size;
+ ret = i40iw_puda_replenish_rq(rsrc, true);
+ if (ret)
+ goto error;
+
+ if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
+ if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
+ rsrc->check_crc = true;
+ rsrc->completion = PUDA_HASH_CRC_COMPLETE;
+ ret = 0;
+ }
+ }
+
+ dev->ccq_ops->ccq_arm(&rsrc->cq);
+ return ret;
+ error:
+ i40iw_puda_dele_resources(dev, info->type, false);
+
+ return ret;
+}
+
+/**
+ * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @wqe_idx: wqe index of completed rcvbuf
+ */
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
+{
+ u64 *wqe;
+ u64 offset24;
+
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ get_64bit_val(wqe, 24, &offset24);
+ offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_ieq_get_fpdu - given length return fpdu length
+ * @length: length if fpdu
+ */
+static u16 i40iw_ieq_get_fpdu_length(u16 length)
+{
+ u16 fpdu_len;
+
+ fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffffffc;
+ return fpdu_len;
+}
+
+/**
+ * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sendign back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @length: length of data to copy
+ */
+static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
+ struct i40iw_puda_buf *txbuf,
+ u16 buf_offset, u32 txbuf_offset,
+ u32 length)
+{
+ void *mem1 = (u8 *)buf->mem.va + buf_offset;
+ void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+ memcpy(mem2, mem1, length);
+}
+
+/**
+ * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
+ struct i40iw_puda_buf *txbuf)
+{
+ txbuf->maclen = buf->maclen;
+ txbuf->tcphlen = buf->tcphlen;
+ txbuf->ipv4 = buf->ipv4;
+ txbuf->hdrlen = buf->hdrlen;
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+}
+
+/**
+ * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
+{
+ u32 offset;
+
+ if (buf->seqnum < fps) {
+ offset = fps - buf->seqnum;
+ if (offset > buf->datalen)
+ return;
+ buf->data += offset;
+ buf->datalen -= (u16)offset;
+ buf->seqnum = fps;
+ }
+}
+
+/**
+ * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct i40iw_puda_buf *txbuf,
+ u16 fpdu_len)
+{
+ struct i40iw_puda_buf *buf;
+ u32 nextseqnum;
+ u16 txoffset, bufoffset;
+
+ buf = i40iw_puda_get_listbuf(pbufl);
+ nextseqnum = buf->seqnum + fpdu_len;
+ txbuf->totallen = buf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+ i40iw_ieq_setup_tx_buf(buf, txbuf);
+
+ txoffset = buf->hdrlen;
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+ do {
+ if (buf->datalen >= fpdu_len) {
+ /* copied full fpdu */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
+ buf->datalen -= fpdu_len;
+ buf->data += fpdu_len;
+ buf->seqnum = nextseqnum;
+ break;
+ }
+ /* copy partial fpdu */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
+ txoffset += buf->datalen;
+ fpdu_len -= buf->datalen;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ buf = i40iw_puda_get_listbuf(pbufl);
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ } while (1);
+
+ /* last buffer on the list*/
+ if (buf->datalen)
+ list_add(&buf->list, rxlist);
+ else
+ i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_create_pbufl - create buffer list for single fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static enum i40iw_status_code i40iw_ieq_create_pbufl(
+ struct i40iw_pfpdu *pfpdu,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct i40iw_puda_buf *buf,
+ u16 fpdu_len)
+{
+ enum i40iw_status_code status = 0;
+ struct i40iw_puda_buf *nextbuf;
+ u32 nextseqnum;
+ u16 plen = fpdu_len - buf->datalen;
+ bool done = false;
+
+ nextseqnum = buf->seqnum + buf->datalen;
+ do {
+ nextbuf = i40iw_puda_get_listbuf(rxlist);
+ if (!nextbuf) {
+ status = I40IW_ERR_list_empty;
+ break;
+ }
+ list_add_tail(&nextbuf->list, pbufl);
+ if (nextbuf->seqnum != nextseqnum) {
+ pfpdu->bad_seq_num++;
+ status = I40IW_ERR_SEQ_NUM;
+ break;
+ }
+ if (nextbuf->datalen >= plen) {
+ done = true;
+ } else {
+ plen -= nextbuf->datalen;
+ nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+ }
+
+ } while (!done);
+
+ return status;
+}
+
+/**
+ * i40iw_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_pfpdu *pfpdu,
+ struct i40iw_puda_buf *buf,
+ u16 fpdu_len)
+{
+ enum i40iw_status_code status = 0;
+ u8 *crcptr;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ struct list_head pbufl; /* partial buffer list */
+ struct i40iw_puda_buf *txbuf = NULL;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ INIT_LIST_HEAD(&pbufl);
+ list_add(&buf->list, &pbufl);
+
+ status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+ if (!status)
+ goto error;
+
+ txbuf = i40iw_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = I40IW_ERR_NO_TXBUFS;
+ goto error;
+ }
+
+ i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+ i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+ crcptr = txbuf->data + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc) {
+ status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
+ (fpdu_len - 4), mpacrc);
+ if (status) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error bad crc\n", __func__);
+ goto error;
+ }
+ }
+
+ i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
+ txbuf->mem.va, txbuf->totallen);
+ i40iw_puda_send_buf(ieq, txbuf);
+ pfpdu->rcv_nxt = seqnum + fpdu_len;
+ return status;
+ error:
+ while (!list_empty(&pbufl)) {
+ buf = (struct i40iw_puda_buf *)(pbufl.prev);
+ list_del(&buf->list);
+ list_add(&buf->list, rxlist);
+ }
+ if (txbuf)
+ i40iw_puda_ret_bufpool(ieq, txbuf);
+ return status;
+}
+
+/**
+ * i40iw_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_pfpdu *pfpdu,
+ struct i40iw_puda_buf *buf)
+{
+ u16 fpdu_len = 0;
+ u16 datalen = buf->datalen;
+ u8 *datap = buf->data;
+ u8 *crcptr;
+ u16 ioffset = 0;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ u16 length = 0;
+ u16 full = 0;
+ bool partial = false;
+ struct i40iw_puda_buf *txbuf;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ enum i40iw_status_code ret = 0;
+ enum i40iw_status_code status = 0;
+
+ ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ while (datalen) {
+ fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap));
+ if (fpdu_len > pfpdu->max_fpdu_data) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error bad fpdu_len\n", __func__);
+ status = I40IW_ERR_MPA_CRC;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+
+ if (datalen < fpdu_len) {
+ partial = true;
+ break;
+ }
+ crcptr = datap + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc)
+ ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
+ datap, fpdu_len - 4, mpacrc);
+ if (ret) {
+ status = I40IW_ERR_MPA_CRC;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+ full++;
+ pfpdu->fpdu_processed++;
+ datap += fpdu_len;
+ length += fpdu_len;
+ datalen -= fpdu_len;
+ }
+ if (full) {
+ /* copy full pdu's in the txbuf and send them out */
+ txbuf = i40iw_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = I40IW_ERR_NO_TXBUFS;
+ list_add(&buf->list, rxlist);
+ return status;
+ }
+ /* modify txbuf's buffer header */
+ i40iw_ieq_setup_tx_buf(buf, txbuf);
+ /* copy full fpdu's to new buffer */
+ i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
+ length);
+ txbuf->totallen = buf->hdrlen + length;
+
+ i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
+ i40iw_puda_send_buf(ieq, txbuf);
+
+ if (!datalen) {
+ pfpdu->rcv_nxt = buf->seqnum + length;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ return status;
+ }
+ buf->data = datap;
+ buf->seqnum = seqnum + length;
+ buf->datalen = datalen;
+ pfpdu->rcv_nxt = buf->seqnum;
+ }
+ if (partial)
+ status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+ return status;
+}
+
+/**
+ * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_rsrc *ieq)
+{
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct i40iw_puda_buf *buf;
+ enum i40iw_status_code status;
+
+ do {
+ if (list_empty(rxlist))
+ break;
+ buf = i40iw_puda_get_listbuf(rxlist);
+ if (!buf) {
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: error no buf\n", __func__);
+ break;
+ }
+ if (buf->seqnum != pfpdu->rcv_nxt) {
+ /* This could be out of order or missing packet */
+ pfpdu->out_of_order++;
+ list_add(&buf->list, rxlist);
+ break;
+ }
+ /* keep processing buffers from the head of the list */
+ status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
+ if (status == I40IW_ERR_MPA_CRC) {
+ pfpdu->mpa_crc_err = true;
+ while (!list_empty(rxlist)) {
+ buf = i40iw_puda_get_listbuf(rxlist);
+ i40iw_puda_ret_bufpool(ieq, buf);
+ pfpdu->crc_err++;
+ }
+ /* create CQP for AE */
+ i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
+ }
+ } while (!status);
+}
+
+/**
+ * i40iw_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+ struct i40iw_sc_qp *qp,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_buf *tmpbuf = NULL;
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ u32 rcv_wnd = hw_host_ctx[23];
+ /* first partial seq # in q2 */
+ u32 fps = qp->q2_buf[16];
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct list_head *plist;
+
+ pfpdu->total_ieq_bufs++;
+
+ if (pfpdu->mpa_crc_err) {
+ pfpdu->crc_err++;
+ goto error;
+ }
+ if (pfpdu->mode && (fps != pfpdu->fps)) {
+ /* clean up qp as it is new partial sequence */
+ i40iw_ieq_cleanup_qp(ieq->dev, qp);
+ i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+ "%s: restarting new partial\n", __func__);
+ pfpdu->mode = false;
+ }
+
+ if (!pfpdu->mode) {
+ i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
+ /* First_Partial_Sequence_Number check */
+ pfpdu->rcv_nxt = fps;
+ pfpdu->fps = fps;
+ pfpdu->mode = true;
+ pfpdu->max_fpdu_data = ieq->mss;
+ pfpdu->pmode_count++;
+ INIT_LIST_HEAD(rxlist);
+ i40iw_ieq_check_first_buf(buf, fps);
+ }
+
+ if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+ pfpdu->bad_seq_num++;
+ goto error;
+ }
+
+ if (!list_empty(rxlist)) {
+ tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
+ plist = &tmpbuf->list;
+ while ((struct list_head *)tmpbuf != rxlist) {
+ if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
+ break;
+ tmpbuf = (struct i40iw_puda_buf *)plist->next;
+ }
+ /* Insert buf before tmpbuf */
+ list_add_tail(&buf->list, &tmpbuf->list);
+ } else {
+ list_add_tail(&buf->list, rxlist);
+ }
+ i40iw_ieq_process_fpdus(qp, ieq);
+ return;
+ error:
+ i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_receive - received exception buffer
+ * @dev: iwarp device
+ * @buf: exception buffer received
+ */
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_sc_qp *qp = NULL;
+ u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+ qp = i40iw_ieq_get_qp(dev, buf);
+ if (!qp) {
+ ieq->stats_bad_qp_id++;
+ i40iw_puda_ret_bufpool(ieq, buf);
+ } else {
+ i40iw_ieq_handle_exception(ieq, qp, buf);
+ }
+ /*
+ * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
+ * on which wqe_idx to start replenish rq
+ */
+ if (!ieq->rxq_invalid_cnt)
+ ieq->rx_wqe_idx = wqe_idx;
+ ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * i40iw_ieq_tx_compl - put back after sending completed exception buffer
+ * @dev: iwarp device
+ * @sqwrid: pointer to puda buffer
+ */
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+{
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+ struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
+
+ i40iw_puda_ret_bufpool(ieq, buf);
+ if (!list_empty(&ieq->txpend)) {
+ buf = i40iw_puda_get_listbuf(&ieq->txpend);
+ i40iw_puda_send_buf(ieq, buf);
+ }
+}
+
+/**
+ * i40iw_ieq_cleanup_qp - qp is being destroyed
+ * @dev: iwarp device
+ * @qp: all pending fpdu buffers
+ */
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_puda_buf *buf;
+ struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct i40iw_puda_rsrc *ieq = dev->ieq;
+
+ if (!pfpdu->mode)
+ return;
+ while (!list_empty(rxlist)) {
+ buf = i40iw_puda_get_listbuf(rxlist);
+ i40iw_puda_ret_bufpool(ieq, buf);
+ }
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
new file mode 100644
index 000000000000..52bf7826ce4e
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -0,0 +1,183 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PUDA_H
+#define I40IW_PUDA_H
+
+#define I40IW_IEQ_MPA_FRAMING 6
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_sc_cq;
+
+enum puda_resource_type {
+ I40IW_PUDA_RSRC_TYPE_ILQ = 1,
+ I40IW_PUDA_RSRC_TYPE_IEQ
+};
+
+enum puda_rsrc_complete {
+ PUDA_CQ_CREATED = 1,
+ PUDA_QP_CREATED,
+ PUDA_TX_COMPLETE,
+ PUDA_RX_COMPLETE,
+ PUDA_HASH_CRC_COMPLETE
+};
+
+struct i40iw_puda_completion_info {
+ struct i40iw_qp_uk *qp;
+ u8 q_type;
+ u8 vlan_valid;
+ u8 l3proto;
+ u8 l4proto;
+ u16 payload_len;
+ u32 compl_error; /* No_err=0, else major and minor err code */
+ u32 qp_id;
+ u32 wqe_idx;
+};
+
+struct i40iw_puda_send_info {
+ u64 paddr; /* Physical address */
+ u32 len;
+ u8 tcplen;
+ u8 maclen;
+ bool ipv4;
+ bool doloopback;
+ void *scratch;
+};
+
+struct i40iw_puda_buf {
+ struct list_head list; /* MUST be first entry */
+ struct i40iw_dma_mem mem; /* DMA memory for the buffer */
+ struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */
+ struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */
+ void *scratch;
+ u8 *iph;
+ u8 *tcph;
+ u8 *data;
+ u16 datalen;
+ u16 vlan_id;
+ u8 tcphlen; /* tcp length in bytes */
+ u8 maclen; /* mac length in bytes */
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
+ atomic_t refcount;
+ u8 hdrlen;
+ bool ipv4;
+ u32 seqnum;
+};
+
+struct i40iw_puda_rsrc_info {
+ enum puda_resource_type type; /* ILQ or IEQ */
+ u32 count;
+ u16 pd_id;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u16 buf_size;
+ u16 mss;
+ u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
+ void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+};
+
+struct i40iw_puda_rsrc {
+ struct i40iw_sc_cq cq;
+ struct i40iw_sc_qp qp;
+ struct i40iw_sc_pd sc_pd;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_dma_mem cqmem;
+ struct i40iw_dma_mem qpmem;
+ struct i40iw_virt_mem ilq_mem;
+ enum puda_rsrc_complete completion;
+ enum puda_resource_type type;
+ u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */
+ u16 mss;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 cq_size;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 compl_rxwqe_idx;
+ u32 rx_wqe_idx;
+ u32 rxq_invalid_cnt;
+ u32 tx_wqe_avail_cnt;
+ bool check_crc;
+ struct shash_desc *hash_desc;
+ struct list_head txpend;
+ struct list_head bufpool; /* free buffers pool list for recv and xmit */
+ u32 alloc_buf_count;
+ u32 avail_buf_count; /* snapshot of currently available buffers */
+ spinlock_t bufpool_lock;
+ struct i40iw_puda_buf *alloclist;
+ void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+ void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+ /* puda stats */
+ u64 stats_buf_alloc_fail;
+ u64 stats_pkt_rcvd;
+ u64 stats_pkt_sent;
+ u64 stats_rcvd_pkt_err;
+ u64 stats_sent_pkt_q;
+ u64 stats_bad_qp_id;
+};
+
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf);
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+ struct i40iw_puda_send_info *info);
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_rsrc_info *info);
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+ enum puda_resource_type type,
+ bool reset);
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+ struct i40iw_sc_cq *cq, u32 *compl_err);
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr, u32 length, u32 value);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_free_hash_desc(struct shash_desc *desc);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
+ u32 seqnum);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h
new file mode 100644
index 000000000000..57768184e251
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_register.h
@@ -0,0 +1,1030 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_REGISTER_H
+#define I40IW_REGISTER_H
+
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
+
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF
+
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* I40IW_REGISTER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
new file mode 100644
index 000000000000..b0110c15e044
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_STATUS_H
+#define I40IW_STATUS_H
+
+/* Error Codes */
+enum i40iw_status_code {
+ I40IW_SUCCESS = 0,
+ I40IW_ERR_NVM = -1,
+ I40IW_ERR_NVM_CHECKSUM = -2,
+ I40IW_ERR_CONFIG = -4,
+ I40IW_ERR_PARAM = -5,
+ I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
+ I40IW_ERR_RESET_FAILED = -7,
+ I40IW_ERR_SWFW_SYNC = -8,
+ I40IW_ERR_NO_MEMORY = -9,
+ I40IW_ERR_BAD_PTR = -10,
+ I40IW_ERR_INVALID_PD_ID = -11,
+ I40IW_ERR_INVALID_QP_ID = -12,
+ I40IW_ERR_INVALID_CQ_ID = -13,
+ I40IW_ERR_INVALID_CEQ_ID = -14,
+ I40IW_ERR_INVALID_AEQ_ID = -15,
+ I40IW_ERR_INVALID_SIZE = -16,
+ I40IW_ERR_INVALID_ARP_INDEX = -17,
+ I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
+ I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
+ I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
+ I40IW_ERR_INVALID_FRAG_COUNT = -21,
+ I40IW_ERR_QUEUE_EMPTY = -22,
+ I40IW_ERR_INVALID_ALIGNMENT = -23,
+ I40IW_ERR_FLUSHED_QUEUE = -24,
+ I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
+ I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+ I40IW_ERR_TIMEOUT = -27,
+ I40IW_ERR_OPCODE_MISMATCH = -28,
+ I40IW_ERR_CQP_COMPL_ERROR = -29,
+ I40IW_ERR_INVALID_VF_ID = -30,
+ I40IW_ERR_INVALID_HMCFN_ID = -31,
+ I40IW_ERR_BACKING_PAGE_ERROR = -32,
+ I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
+ I40IW_ERR_INVALID_PBLE_INDEX = -34,
+ I40IW_ERR_INVALID_SD_INDEX = -35,
+ I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
+ I40IW_ERR_INVALID_SD_TYPE = -37,
+ I40IW_ERR_MEMCPY_FAILED = -38,
+ I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
+ I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
+ I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
+ I40IW_ERR_SRQ_ENABLED = -42,
+ I40IW_ERR_BUF_TOO_SHORT = -43,
+ I40IW_ERR_BAD_IWARP_CQE = -44,
+ I40IW_ERR_NVM_BLANK_MODE = -45,
+ I40IW_ERR_NOT_IMPLEMENTED = -46,
+ I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
+ I40IW_ERR_NOT_READY = -48,
+ I40IW_NOT_SUPPORTED = -49,
+ I40IW_ERR_FIRMWARE_API_VERSION = -50,
+ I40IW_ERR_RING_FULL = -51,
+ I40IW_ERR_MPA_CRC = -61,
+ I40IW_ERR_NO_TXBUFS = -62,
+ I40IW_ERR_SEQ_NUM = -63,
+ I40IW_ERR_list_empty = -64,
+ I40IW_ERR_INVALID_MAC_ADDR = -65,
+ I40IW_ERR_BAD_STAG = -66,
+ I40IW_ERR_CQ_COMPL_ERROR = -67,
+
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
new file mode 100644
index 000000000000..edb3a8c8267a
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -0,0 +1,1312 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_TYPE_H
+#define I40IW_TYPE_H
+#include "i40iw_user.h"
+#include "i40iw_hmc.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+struct i40iw_cqp_sq_wqe {
+ u64 buf[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_aeqe {
+ u64 buf[I40IW_AEQE_SIZE];
+};
+
+struct i40iw_ceqe {
+ u64 buf[I40IW_CEQE_SIZE];
+};
+
+struct i40iw_cqp_ctx {
+ u64 buf[I40IW_CQP_CTX_SIZE];
+};
+
+struct i40iw_cq_shadow_area {
+ u64 buf[I40IW_SHADOW_AREA_SIZE];
+};
+
+struct i40iw_sc_dev;
+struct i40iw_hmc_info;
+struct i40iw_dev_pestat;
+
+struct i40iw_cqp_ops;
+struct i40iw_ccq_ops;
+struct i40iw_ceq_ops;
+struct i40iw_aeq_ops;
+struct i40iw_mr_ops;
+struct i40iw_cqp_misc_ops;
+struct i40iw_pd_ops;
+struct i40iw_priv_qp_ops;
+struct i40iw_priv_cq_ops;
+struct i40iw_hmc_ops;
+
+enum i40iw_resource_indicator_type {
+ I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
+ I40IW_RSRC_INDICATOR_TYPE_CQ,
+ I40IW_RSRC_INDICATOR_TYPE_QP,
+ I40IW_RSRC_INDICATOR_TYPE_SRQ
+};
+
+enum i40iw_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20
+};
+
+enum i40iw_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2
+};
+
+enum i40iw_term_error_types {
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUFFER = 1,
+ DDP_UNTAGGED_BUFFER = 2,
+ DDP_LLP = 3
+};
+
+enum i40iw_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff
+};
+
+enum i40iw_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06
+};
+
+enum i40iw_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+enum i40iw_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_GENERAL_ERR,
+ FLUSH_FATAL_ERR
+};
+
+enum i40iw_term_eventtypes {
+ TERM_EVENT_QP_FATAL,
+ TERM_EVENT_QP_ACCESS_ERR
+};
+
+struct i40iw_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+enum i40iw_debug_flag {
+ I40IW_DEBUG_NONE = 0x00000000,
+ I40IW_DEBUG_ERR = 0x00000001,
+ I40IW_DEBUG_INIT = 0x00000002,
+ I40IW_DEBUG_DEV = 0x00000004,
+ I40IW_DEBUG_CM = 0x00000008,
+ I40IW_DEBUG_VERBS = 0x00000010,
+ I40IW_DEBUG_PUDA = 0x00000020,
+ I40IW_DEBUG_ILQ = 0x00000040,
+ I40IW_DEBUG_IEQ = 0x00000080,
+ I40IW_DEBUG_QP = 0x00000100,
+ I40IW_DEBUG_CQ = 0x00000200,
+ I40IW_DEBUG_MR = 0x00000400,
+ I40IW_DEBUG_PBLE = 0x00000800,
+ I40IW_DEBUG_WQE = 0x00001000,
+ I40IW_DEBUG_AEQ = 0x00002000,
+ I40IW_DEBUG_CQP = 0x00004000,
+ I40IW_DEBUG_HMC = 0x00008000,
+ I40IW_DEBUG_USER = 0x00010000,
+ I40IW_DEBUG_VIRT = 0x00020000,
+ I40IW_DEBUG_DCB = 0x00040000,
+ I40IW_DEBUG_CQE = 0x00800000,
+ I40IW_DEBUG_ALL = 0xFFFFFFFF
+};
+
+enum i40iw_hw_stat_index_32b {
+ I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
+ I40IW_HW_STAT_INDEX_IP4RXTRUNC,
+ I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
+ I40IW_HW_STAT_INDEX_IP6RXDISCARD,
+ I40IW_HW_STAT_INDEX_IP6RXTRUNC,
+ I40IW_HW_STAT_INDEX_IP6TXNOROUTE,
+ I40IW_HW_STAT_INDEX_TCPRTXSEG,
+ I40IW_HW_STAT_INDEX_TCPRXOPTERR,
+ I40IW_HW_STAT_INDEX_TCPRXPROTOERR,
+ I40IW_HW_STAT_INDEX_MAX_32
+};
+
+enum i40iw_hw_stat_index_64b {
+ I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
+ I40IW_HW_STAT_INDEX_IP4RXPKTS,
+ I40IW_HW_STAT_INDEX_IP4RXFRAGS,
+ I40IW_HW_STAT_INDEX_IP4RXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP4TXOCTS,
+ I40IW_HW_STAT_INDEX_IP4TXPKTS,
+ I40IW_HW_STAT_INDEX_IP4TXFRAGS,
+ I40IW_HW_STAT_INDEX_IP4TXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP6RXOCTS,
+ I40IW_HW_STAT_INDEX_IP6RXPKTS,
+ I40IW_HW_STAT_INDEX_IP6RXFRAGS,
+ I40IW_HW_STAT_INDEX_IP6RXMCPKTS,
+ I40IW_HW_STAT_INDEX_IP6TXOCTS,
+ I40IW_HW_STAT_INDEX_IP6TXPKTS,
+ I40IW_HW_STAT_INDEX_IP6TXFRAGS,
+ I40IW_HW_STAT_INDEX_IP6TXMCPKTS,
+ I40IW_HW_STAT_INDEX_TCPRXSEGS,
+ I40IW_HW_STAT_INDEX_TCPTXSEG,
+ I40IW_HW_STAT_INDEX_RDMARXRDS,
+ I40IW_HW_STAT_INDEX_RDMARXSNDS,
+ I40IW_HW_STAT_INDEX_RDMARXWRS,
+ I40IW_HW_STAT_INDEX_RDMATXRDS,
+ I40IW_HW_STAT_INDEX_RDMATXSNDS,
+ I40IW_HW_STAT_INDEX_RDMATXWRS,
+ I40IW_HW_STAT_INDEX_RDMAVBND,
+ I40IW_HW_STAT_INDEX_RDMAVINV,
+ I40IW_HW_STAT_INDEX_MAX_64
+};
+
+struct i40iw_dev_hw_stat_offsets {
+ u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_dev_hw_stats {
+ u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+ u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_device_pestat_ops {
+ void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
+ void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
+ void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
+ void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
+ void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+};
+
+struct i40iw_dev_pestat {
+ struct i40iw_hw *hw;
+ struct i40iw_device_pestat_ops ops;
+ struct i40iw_dev_hw_stats hw_stats;
+ struct i40iw_dev_hw_stats last_read_hw_stats;
+ struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+ struct timer_list stats_timer;
+ spinlock_t stats_lock; /* rdma stats lock */
+};
+
+struct i40iw_hw {
+ u8 __iomem *hw_addr;
+ void *dev_context;
+ struct i40iw_hmc_info hmc;
+};
+
+struct i40iw_pfpdu {
+ struct list_head rxlist;
+ u32 rcv_nxt;
+ u32 fps;
+ u32 max_fpdu_data;
+ bool mode;
+ bool mpa_crc_err;
+ u64 total_ieq_bufs;
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 no_tx_bufs;
+ u64 tx_err;
+ u64 out_of_order;
+ u64 pmode_count;
+};
+
+struct i40iw_sc_pd {
+ u32 size;
+ struct i40iw_sc_dev *dev;
+ u16 pd_id;
+};
+
+struct i40iw_cqp_quanta {
+ u64 elem[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_cqp {
+ u32 size;
+ u64 sq_pa;
+ u64 host_ctx_pa;
+ void *back_cqp;
+ struct i40iw_sc_dev *dev;
+ enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *,
+ struct i40iw_update_sds_info *);
+ struct i40iw_dma_mem sdbuf;
+ struct i40iw_ring sq_ring;
+ struct i40iw_cqp_quanta *sq_base;
+ u64 *host_ctx;
+ u64 *scratch_array;
+ u32 cqp_id;
+ u32 sq_size;
+ u32 hw_sq_size;
+ u8 struct_ver;
+ u8 polarity;
+ bool en_datacenter_tcp;
+ u8 hmc_profile;
+ u8 enabled_vf_count;
+ u8 timeout_count;
+};
+
+struct i40iw_sc_aeq {
+ u32 size;
+ u64 aeq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_sc_aeqe *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ struct i40iw_ring aeq_ring;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+};
+
+struct i40iw_sc_ceq {
+ u32 size;
+ u64 ceq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_ceqe *ceqe_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 elem_cnt;
+ struct i40iw_ring ceq_ring;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+};
+
+struct i40iw_sc_cq {
+ struct i40iw_cq_uk cq_uk;
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct i40iw_sc_dev *dev;
+ void *pbl_list;
+ void *back_cq;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u8 cq_type;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ bool check_overflow;
+};
+
+struct i40iw_sc_qp {
+ struct i40iw_qp_uk qp_uk;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 hw_host_ctx_pa;
+ u64 shadow_area_pa;
+ u64 q2_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_sc_pd *pd;
+ u64 *hw_host_ctx;
+ void *llp_stream_handle;
+ void *back_qp;
+ struct i40iw_pfpdu pfpdu;
+ u8 *q2_buf;
+ u64 qp_compl_ctx;
+ u16 qs_handle;
+ u16 exception_lan_queue;
+ u16 push_idx;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 qp_state;
+ u8 qp_type;
+ u8 hw_sq_size;
+ u8 hw_rq_size;
+ u8 src_mac_addr_idx;
+ bool sq_tph_en;
+ bool rq_tph_en;
+ bool rcv_tph_en;
+ bool xmit_tph_en;
+ bool virtual_map;
+ bool flush_sq;
+ bool flush_rq;
+ bool sq_flush;
+ enum i40iw_flush_opcode flush_code;
+ enum i40iw_term_eventtypes eventtype;
+ u8 term_flags;
+};
+
+struct i40iw_hmc_fpm_misc {
+ u32 max_ceqs;
+ u32 max_sds;
+ u32 xf_block_size;
+ u32 q1_block_size;
+ u32 ht_multiplier;
+ u32 timer_bucket;
+};
+
+struct i40iw_vchnl_if {
+ enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16);
+ enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16);
+};
+
+#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512
+
+struct i40iw_vchnl_vf_msg_buffer {
+ struct i40iw_virtchnl_op_buf vchnl_msg;
+ char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
+};
+
+struct i40iw_vfdev {
+ struct i40iw_sc_dev *pf_dev;
+ u8 *hmc_info_mem;
+ struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_hmc_pble_info *pble_info;
+ struct i40iw_hmc_info hmc_info;
+ struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
+ u64 fpm_query_buf_pa;
+ u64 *fpm_query_buf;
+ u32 vf_id;
+ u32 msg_count;
+ bool pf_hmc_initialized;
+ u16 pmf_index;
+ u16 iw_vf_idx; /* VF Device table index */
+ bool stats_initialized;
+};
+
+struct i40iw_sc_dev {
+ struct list_head cqp_cmd_head; /* head of the CQP command list */
+ spinlock_t cqp_lock; /* cqp list sync */
+ struct i40iw_dev_uk dev_uk;
+ struct i40iw_dev_pestat dev_pestat;
+ struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ u64 *fpm_query_buf;
+ u64 *fpm_commit_buf;
+ void *back_dev;
+ struct i40iw_hw *hw;
+ u8 __iomem *db_addr;
+ struct i40iw_hmc_info *hmc_info;
+ struct i40iw_hmc_pble_info *pble_info;
+ struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT];
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_sc_aeq *aeq;
+ struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];
+ struct i40iw_sc_cq *ccq;
+ struct i40iw_cqp_ops *cqp_ops;
+ struct i40iw_ccq_ops *ccq_ops;
+ struct i40iw_ceq_ops *ceq_ops;
+ struct i40iw_aeq_ops *aeq_ops;
+ struct i40iw_pd_ops *iw_pd_ops;
+ struct i40iw_priv_qp_ops *iw_priv_qp_ops;
+ struct i40iw_priv_cq_ops *iw_priv_cq_ops;
+ struct i40iw_mr_ops *mr_ops;
+ struct i40iw_cqp_misc_ops *cqp_misc_ops;
+ struct i40iw_hmc_ops *hmc_ops;
+ struct i40iw_vchnl_if vchnl_if;
+ u32 ilq_count;
+ struct i40iw_virt_mem ilq_mem;
+ struct i40iw_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct i40iw_virt_mem ieq_mem;
+ struct i40iw_puda_rsrc *ieq;
+
+ struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+
+ struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+ u16 qs_handle;
+ u32 debug_mask;
+ u16 exception_lan_queue;
+ u8 hmc_fn_id;
+ bool is_pf;
+ bool vchnl_up;
+ u8 vf_id;
+ u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
+ struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;
+ u8 hw_rev;
+};
+
+struct i40iw_modify_cq_info {
+ u64 cq_pa;
+ struct i40iw_cqe *cq_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 cq_size;
+ u32 shadow_read_threshold;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool check_overflow;
+ bool cq_resize;
+ bool ceq_change;
+ bool check_overflow_change;
+ u32 first_pm_pbl_idx;
+ bool ceq_valid;
+};
+
+struct i40iw_create_qp_info {
+ u8 next_iwarp_state;
+ bool ord_valid;
+ bool tcp_ctx_valid;
+ bool cq_num_valid;
+ bool static_rsrc;
+ bool arp_cache_idx_valid;
+};
+
+struct i40iw_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+ u16 new_mss;
+ u8 next_iwarp_state;
+ u8 termlen;
+ bool ord_valid;
+ bool tcp_ctx_valid;
+ bool cq_num_valid;
+ bool static_rsrc;
+ bool arp_cache_idx_valid;
+ bool reset_tcp_conn;
+ bool remove_hash_idx;
+ bool dont_send_term;
+ bool dont_send_fin;
+ bool cached_var_valid;
+ bool mss_change;
+ bool force_loopback;
+};
+
+struct i40iw_ccq_cqe_info {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ u8 op_code;
+ bool error;
+};
+
+struct i40iw_l2params {
+ u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
+ u16 mss;
+};
+
+struct i40iw_device_init_info {
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ u64 *fpm_query_buf;
+ u64 *fpm_commit_buf;
+ struct i40iw_hw *hw;
+ void __iomem *bar0;
+ enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
+ u16 qs_handle;
+ u16 exception_lan_queue;
+ u8 hmc_fn_id;
+ bool is_pf;
+ u32 debug_mask;
+};
+
+enum i40iw_cqp_hmc_profile {
+ I40IW_HMC_PROFILE_DEFAULT = 1,
+ I40IW_HMC_PROFILE_FAVOR_VF = 2,
+ I40IW_HMC_PROFILE_EQUAL = 3,
+};
+
+struct i40iw_cqp_init_info {
+ u64 cqp_compl_ctx;
+ u64 host_ctx_pa;
+ u64 sq_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cqp_quanta *sq;
+ u64 *host_ctx;
+ u64 *scratch_array;
+ u32 sq_size;
+ u8 struct_ver;
+ bool en_datacenter_tcp;
+ u8 hmc_profile;
+ u8 enabled_vf_count;
+};
+
+struct i40iw_ceq_init_info {
+ u64 ceqe_pa;
+ struct i40iw_sc_dev *dev;
+ u64 *ceqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ u32 ceq_id;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ bool tph_en;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iw_aeq_init_info {
+ u64 aeq_elem_pa;
+ struct i40iw_sc_dev *dev;
+ u32 *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iw_ccq_init_info {
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct i40iw_sc_dev *dev;
+ struct i40iw_cqe *cq_base;
+ u64 *shadow_area;
+ void *pbl_list;
+ u32 num_elem;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ bool avoid_mem_cflct;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+};
+
+struct i40iwarp_offload_info {
+ u16 rcv_mark_offset;
+ u16 snd_mark_offset;
+ u16 pd_id;
+ u8 ddp_ver;
+ u8 rdmap_ver;
+ u8 ord_size;
+ u8 ird_size;
+ bool wr_rdresp_en;
+ bool rd_enable;
+ bool snd_mark_en;
+ bool rcv_mark_en;
+ bool bind_en;
+ bool fast_reg_en;
+ bool priv_mode_en;
+ bool lsmm_present;
+ u8 iwarp_mode;
+ bool align_hdrs;
+ bool rcv_no_mpa_crc;
+
+ u8 last_byte_sent;
+};
+
+struct i40iw_tcp_offload_info {
+ bool ipv4;
+ bool no_nagle;
+ bool insert_vlan_tag;
+ bool time_stamp;
+ u8 cwnd_inc_limit;
+ bool drop_ooo_seg;
+ bool dup_ack_thresh;
+ u8 ttl;
+ u8 src_mac_addr_idx;
+ bool avoid_stretch_ack;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr0;
+ u32 dest_ip_addr1;
+ u32 dest_ip_addr2;
+ u32 dest_ip_addr3;
+ u32 snd_mss;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ bool wscale;
+ u8 tcp_state;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+ u32 time_stamp_recent;
+ u32 time_stamp_age;
+ u32 snd_nxt;
+ u32 snd_wnd;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 snd_max;
+ u32 snd_una;
+ u32 srtt;
+ u32 rtt_var;
+ u32 ss_thresh;
+ u32 cwnd;
+ u32 snd_wl1;
+ u32 snd_wl2;
+ u32 max_snd_window;
+ u8 rexmit_thresh;
+ u32 local_ipaddr0;
+ u32 local_ipaddr1;
+ u32 local_ipaddr2;
+ u32 local_ipaddr3;
+ bool ignore_tcp_opt;
+ bool ignore_tcp_uns_opt;
+};
+
+struct i40iw_qp_host_ctx_info {
+ u64 qp_compl_ctx;
+ struct i40iw_tcp_offload_info *tcp_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ u32 send_cq_num;
+ u32 rcv_cq_num;
+ u16 push_idx;
+ bool push_mode_en;
+ bool tcp_info_valid;
+ bool iwarp_info_valid;
+ bool err_rq_idx_valid;
+ u16 err_rq_idx;
+};
+
+struct i40iw_aeqe_info {
+ u64 compl_ctx;
+ u32 qp_cq_id;
+ u16 ae_id;
+ u16 wqe_idx;
+ u8 tcp_state;
+ u8 iwarp_state;
+ bool qp;
+ bool cq;
+ bool sq;
+ bool in_rdrsp_wr;
+ bool out_rdrsp;
+ u8 q2_data_written;
+ bool aeqe_overflow;
+};
+
+struct i40iw_allocate_stag_info {
+ u64 total_len;
+ u32 chunk_size;
+ u32 stag_idx;
+ u32 page_size;
+ u16 pd_id;
+ u16 access_rights;
+ bool remote_access;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+};
+
+struct i40iw_reg_ns_stag_info {
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index stag_idx;
+ u16 access_rights;
+ u16 pd_id;
+ i40iw_stag_key stag_key;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+};
+
+struct i40iw_fast_reg_stag_info {
+ u64 wr_id;
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index stag_idx;
+ u16 access_rights;
+ u16 pd_id;
+ i40iw_stag_key stag_key;
+ bool local_fence;
+ bool read_fence;
+ bool signaled;
+ bool use_hmc_fcn_index;
+ u8 hmc_fcn_index;
+ bool use_pf_rid;
+ bool defer_flag;
+};
+
+struct i40iw_dealloc_stag_info {
+ u32 stag_idx;
+ u16 pd_id;
+ bool mr;
+ bool dealloc_pbl;
+};
+
+struct i40iw_register_shared_stag {
+ void *va;
+ enum i40iw_addressing_type addr_type;
+ i40iw_stag_index new_stag_idx;
+ i40iw_stag_index parent_stag_idx;
+ u32 access_rights;
+ u16 pd_id;
+ i40iw_stag_key new_stag_key;
+};
+
+struct i40iw_qp_init_info {
+ struct i40iw_qp_uk_init_info qp_uk_init_info;
+ struct i40iw_sc_pd *pd;
+ u64 *host_ctx;
+ u8 *q2;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 host_ctx_pa;
+ u64 q2_pa;
+ u64 shadow_area_pa;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 type;
+ bool sq_tph_en;
+ bool rq_tph_en;
+ bool rcv_tph_en;
+ bool xmit_tph_en;
+ bool virtual_map;
+};
+
+struct i40iw_cq_init_info {
+ struct i40iw_sc_dev *dev;
+ u64 cq_base_pa;
+ u64 shadow_area_pa;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool virtual_map;
+ bool ceqe_mask;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool ceq_id_valid;
+ bool tph_en;
+ u8 tph_val;
+ u8 type;
+ struct i40iw_cq_uk_init_info cq_uk_init_info;
+};
+
+struct i40iw_upload_context_info {
+ u64 buf_pa;
+ bool freeze_qp;
+ bool raw_format;
+ u32 qp_id;
+ u8 qp_type;
+};
+
+struct i40iw_add_arp_cache_entry_info {
+ u8 mac_addr[6];
+ u32 reach_max;
+ u16 arp_index;
+ bool permanent;
+};
+
+struct i40iw_apbvt_info {
+ u16 port;
+ bool add;
+};
+
+enum i40iw_quad_entry_type {
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1,
+ I40IW_QHASH_TYPE_TCP_SYN,
+};
+
+enum i40iw_quad_hash_manage_type {
+ I40IW_QHASH_MANAGE_TYPE_DELETE = 0,
+ I40IW_QHASH_MANAGE_TYPE_ADD,
+ I40IW_QHASH_MANAGE_TYPE_MODIFY
+};
+
+struct i40iw_qhash_table_info {
+ enum i40iw_quad_hash_manage_type manage;
+ enum i40iw_quad_entry_type entry_type;
+ bool vlan_valid;
+ bool ipv4_valid;
+ u8 mac_addr[6];
+ u16 vlan_id;
+ u16 qs_handle;
+ u32 qp_num;
+ u32 dest_ip[4];
+ u32 src_ip[4];
+ u32 dest_port;
+ u32 src_port;
+};
+
+struct i40iw_local_mac_ipaddr_entry_info {
+ u8 mac_addr[6];
+ u8 entry_idx;
+};
+
+struct i40iw_cqp_manage_push_page_info {
+ u32 push_idx;
+ u16 qs_handle;
+ u8 free_page;
+};
+
+struct i40iw_qp_flush_info {
+ u16 sq_minor_code;
+ u16 sq_major_code;
+ u16 rq_minor_code;
+ u16 rq_major_code;
+ u16 ae_code;
+ u8 ae_source;
+ bool sq;
+ bool rq;
+ bool userflushcode;
+ bool generate_ae;
+};
+
+struct i40iw_cqp_commit_fpm_values {
+ u64 qp_base;
+ u64 cq_base;
+ u32 hte_base;
+ u32 arp_base;
+ u32 apbvt_inuse_base;
+ u32 mr_base;
+ u32 xf_base;
+ u32 xffl_base;
+ u32 q1_base;
+ u32 q1fl_base;
+ u32 fsimc_base;
+ u32 fsiav_base;
+ u32 pbl_base;
+
+ u32 qp_cnt;
+ u32 cq_cnt;
+ u32 hte_cnt;
+ u32 arp_cnt;
+ u32 mr_cnt;
+ u32 xf_cnt;
+ u32 xffl_cnt;
+ u32 q1_cnt;
+ u32 q1fl_cnt;
+ u32 fsimc_cnt;
+ u32 fsiav_cnt;
+ u32 pbl_cnt;
+};
+
+struct i40iw_cqp_query_fpm_values {
+ u16 first_pe_sd_index;
+ u32 qp_objsize;
+ u32 cq_objsize;
+ u32 hte_objsize;
+ u32 arp_objsize;
+ u32 mr_objsize;
+ u32 xf_objsize;
+ u32 q1_objsize;
+ u32 fsimc_objsize;
+ u32 fsiav_objsize;
+
+ u32 qp_max;
+ u32 cq_max;
+ u32 hte_max;
+ u32 arp_max;
+ u32 mr_max;
+ u32 xf_max;
+ u32 xffl_max;
+ u32 q1_max;
+ u32 q1fl_max;
+ u32 fsimc_max;
+ u32 fsiav_max;
+ u32 pbl_max;
+};
+
+struct i40iw_cqp_ops {
+ enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
+ struct i40iw_cqp_init_info *);
+ enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+ void (*cqp_post_sq)(struct i40iw_sc_cqp *);
+ u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
+ enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8,
+ struct i40iw_ccq_cqe_info *);
+};
+
+struct i40iw_ccq_ops {
+ enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *,
+ struct i40iw_ccq_init_info *);
+ enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+ enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool);
+ enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *);
+ enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *,
+ struct i40iw_ccq_cqe_info *);
+ void (*ccq_arm)(struct i40iw_sc_cq *);
+};
+
+struct i40iw_ceq_ops {
+ enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *,
+ struct i40iw_ceq_init_info *);
+ enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool);
+ enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *);
+ enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *);
+ enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64);
+ enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool);
+ void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *);
+};
+
+struct i40iw_aeq_ops {
+ enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *,
+ struct i40iw_aeq_init_info *);
+ enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool);
+ enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool);
+ enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *,
+ struct i40iw_aeqe_info *);
+ enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32);
+ enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *);
+ enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *);
+};
+
+struct i40iw_pd_ops {
+ void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+};
+
+struct i40iw_priv_qp_ops {
+ enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *);
+ enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *,
+ struct i40iw_create_qp_info *, u64, bool);
+ enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *,
+ struct i40iw_modify_qp_info *, u64, bool);
+ enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool);
+ enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *,
+ struct i40iw_qp_flush_info *, u64, bool);
+ enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *,
+ struct i40iw_upload_context_info *,
+ u64, bool);
+ enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *,
+ struct i40iw_qp_host_ctx_info *);
+
+ void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag);
+ void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);
+ void (*qp_send_rtt)(struct i40iw_sc_qp *, bool);
+ enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);
+};
+
+struct i40iw_priv_cq_ops {
+ enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *);
+ enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+ enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool);
+ enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *,
+ struct i40iw_modify_cq_info *, u64, bool);
+};
+
+struct i40iw_mr_ops {
+ enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *,
+ struct i40iw_allocate_stag_info *, u64, bool);
+ enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *,
+ struct i40iw_reg_ns_stag_info *,
+ u64, bool);
+ enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *,
+ struct i40iw_register_shared_stag *,
+ u64, bool);
+ enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *,
+ struct i40iw_dealloc_stag_info *,
+ u64, bool);
+ enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool);
+ enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool);
+};
+
+struct i40iw_cqp_misc_ops {
+ enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *,
+ struct i40iw_cqp_manage_push_page_info *,
+ u64, bool);
+ enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,
+ u64, u8, bool, bool);
+ enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,
+ u64, u8, u8, bool, bool);
+ enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+ struct i40iw_dma_mem *, bool, u8);
+ enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+ struct i40iw_dma_mem *, bool, u8);
+ enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *,
+ u64, u8, bool, bool);
+ enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_add_arp_cache_entry_info *,
+ u64, bool);
+ enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+ enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+ enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_apbvt_info *, u64, bool);
+ enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_qhash_table_info *, u64, bool);
+ enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool);
+ enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *,
+ struct i40iw_local_mac_ipaddr_entry_info *,
+ u64, bool);
+ enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool);
+ enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool);
+ enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp
+ *);
+ enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *);
+ enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+ enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+};
+
+struct i40iw_hmc_ops {
+ enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8);
+ enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,
+ struct i40iw_hmc_fpm_misc *);
+ enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);
+ enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *);
+ enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_create_obj_info *);
+ enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_del_obj_info *,
+ bool reset);
+ enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *);
+ enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *);
+};
+
+struct cqp_info {
+ union {
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_create_qp_info info;
+ u64 scratch;
+ } qp_create;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_modify_qp_info info;
+ u64 scratch;
+ } qp_modify;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ u64 scratch;
+ bool remove_hash_idx;
+ bool ignore_mw_bnd;
+ } qp_destroy;
+
+ struct {
+ struct i40iw_sc_cq *cq;
+ u64 scratch;
+ bool check_overflow;
+ } cq_create;
+
+ struct {
+ struct i40iw_sc_cq *cq;
+ u64 scratch;
+ } cq_destroy;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_allocate_stag_info info;
+ u64 scratch;
+ } alloc_stag;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ u64 scratch;
+ u32 mw_stag_index;
+ u16 pd_id;
+ } mw_alloc;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_reg_ns_stag_info info;
+ u64 scratch;
+ } mr_reg_non_shared;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_dealloc_stag_info info;
+ u64 scratch;
+ } dealloc_stag;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_local_mac_ipaddr_entry_info info;
+ u64 scratch;
+ } add_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_add_arp_cache_entry_info info;
+ u64 scratch;
+ } add_arp_cache_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u8 entry_idx;
+ u8 ignore_ref_count;
+ } del_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ u16 arp_index;
+ } del_arp_cache_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_manage_vf_pble_info info;
+ u64 scratch;
+ } manage_vf_pble_bp;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_cqp_manage_push_page_info info;
+ u64 scratch;
+ } manage_push_page;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_upload_context_info info;
+ u64 scratch;
+ } qp_upload_context;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ u64 scratch;
+ } alloc_local_mac_ipaddr_entry;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_hmc_fcn_info info;
+ u64 scratch;
+ } manage_hmc_pm;
+
+ struct {
+ struct i40iw_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_create;
+
+ struct {
+ struct i40iw_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_destroy;
+
+ struct {
+ struct i40iw_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_create;
+
+ struct {
+ struct i40iw_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_destroy;
+
+ struct {
+ struct i40iw_sc_qp *qp;
+ struct i40iw_qp_flush_info info;
+ u64 scratch;
+ } qp_flush_wqes;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *fpm_values_va;
+ u64 fpm_values_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } query_fpm_values;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ void *fpm_values_va;
+ u64 fpm_values_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } commit_fpm_values;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_apbvt_info info;
+ u64 scratch;
+ } manage_apbvt_entry;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_qhash_table_info info;
+ u64 scratch;
+ } manage_qhash_table_entry;
+
+ struct {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_update_sds_info info;
+ u64 scratch;
+ } update_pe_sds;
+
+ struct {
+ struct i40iw_sc_cqp *cqp;
+ struct i40iw_sc_qp *qp;
+ u64 scratch;
+ } suspend_resume;
+ } u;
+};
+
+struct cqp_commands_info {
+ struct list_head cqp_cmd_entry;
+ u8 cqp_cmd;
+ u8 post_sq;
+ struct cqp_info in;
+};
+
+struct i40iw_virtchnl_work_info {
+ void (*callback_fcn)(void *vf_dev);
+ void *worker_vf_dev;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
new file mode 100644
index 000000000000..12acd688def4
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_USER_CONTEXT_H
+#define I40IW_USER_CONTEXT_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_USERSPACE_VER 4
+#define I40IW_ABI_KERNEL_VER 4
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __u64 user_cq_buffer;
+ __u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __u64 user_wqe_buffers;
+ __u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
new file mode 100644
index 000000000000..f78c3dc8bdb2
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -0,0 +1,1204 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_status.h"
+#include "i40iw_d.h"
+#include "i40iw_user.h"
+#include "i40iw_register.h"
+
+static u32 nop_signature = 0x55550000;
+
+/**
+ * i40iw_nop_1 - insert a nop wqe and move head. no post work
+ * @qp: hw qp ptr
+ */
+static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
+{
+ u64 header, *wqe;
+ u64 *wqe_0 = NULL;
+ u32 wqe_idx, peek_head;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return I40IW_ERR_PARAM;
+
+ wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+ peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
+ wqe_0 = qp->sq_base[peek_head].elem;
+ if (peek_head)
+ wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+ else
+ wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
+
+ wmb(); /* Memory barrier to ensure data is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ return 0;
+}
+
+/**
+ * i40iw_qp_post_wr - post wr to hrdware
+ * @qp: hw qp ptr
+ */
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ mb(); /* valid bit is written and loads completed before reading shadow */
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, 0, &temp);
+
+ hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
+ sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if ((hw_sq_tail >= qp->initial_ring.head) &&
+ (hw_sq_tail < sw_sq_head)) {
+ writel(qp->qp_id, qp->wqe_alloc_reg);
+ }
+ } else if (sw_sq_head != qp->initial_ring.head) {
+ if ((hw_sq_tail >= qp->initial_ring.head) ||
+ (hw_sq_tail < sw_sq_head)) {
+ writel(qp->qp_id, qp->wqe_alloc_reg);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * i40iw_qp_ring_push_db - ring qp doorbell
+ * @qp: hw qp ptr
+ * @wqe_idx: wqe index
+ */
+static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
+{
+ set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+ qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+}
+
+/**
+ * i40iw_qp_get_next_send_wqe - return next wqe ptr
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @wqe_size: size of sq wqe
+ */
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
+ u32 *wqe_idx,
+ u8 wqe_size)
+{
+ u64 *wqe = NULL;
+ u64 wqe_ptr;
+ u32 peek_head = 0;
+ u16 offset;
+ enum i40iw_status_code ret_code = 0;
+ u8 nop_wqe_cnt = 0, i;
+ u64 *wqe_0 = NULL;
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
+ offset = (u16)(wqe_ptr) & 0x7F;
+ if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
+ nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
+ for (i = 0; i < nop_wqe_cnt; i++) {
+ i40iw_nop_1(qp);
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+ }
+
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ }
+ for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+ }
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+
+ peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ wqe_0 = qp->sq_base[peek_head].elem;
+ if (peek_head & 0x3)
+ wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+ return wqe;
+}
+
+/**
+ * i40iw_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ */
+static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
+ set_64bit_val(wqe, (offset + 8),
+ (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
+ LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
+ }
+}
+
+/**
+ * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+ u64 *wqe = NULL;
+ enum i40iw_status_code ret_code;
+
+ if (I40IW_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
+
+ return wqe;
+}
+
+/**
+ * i40iw_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 header;
+ u64 *wqe;
+ struct i40iw_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].len;
+
+ if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
+ return I40IW_ERR_QP_INVALID_MSG_SIZE;
+
+ read_fence |= info->read_fence;
+
+ ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+ set_64bit_val(wqe, 16,
+ LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+ if (!op_info->rem_addr.stag)
+ return I40IW_ERR_BAD_STAG;
+
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
+
+ for (i = 1; i < op_info->num_lo_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool inv_stag,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_rdma_read *op_info;
+ u64 header;
+ u32 wqe_idx;
+ enum i40iw_status_code ret_code;
+ u8 wqe_size;
+ bool local_fence = false;
+
+ op_info = &info->op.rdma_read;
+ ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
+ if (ret_code)
+ return ret_code;
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->lo_addr.len;
+ local_fence |= info->local_fence;
+
+ set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: stag_to_inv value
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ u32 stag_to_inv,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_post_send *op_info;
+ u64 header;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].len;
+ ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+ LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+ LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
+ I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, op_info->sg_list);
+
+ for (i = 1; i < op_info->num_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ u8 *dest, *src;
+ struct i40iw_inline_rdma_write *op_info;
+ u64 *push;
+ u64 header = 0;
+ u32 i, wqe_idx;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+
+ op_info = &info->op.inline_rdma_write;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+ set_64bit_val(wqe, 16,
+ LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+
+ header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+ LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+ LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+ LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+ LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ dest = (u8 *)wqe;
+ src = (u8 *)(op_info->data);
+
+ if (op_info->len <= 16) {
+ for (i = 0; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ } else {
+ for (i = 0; i < 16; i++, src++, dest++)
+ *dest = *src;
+ dest = (u8 *)wqe + 32;
+ for (; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (qp->push_db) {
+ push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+ memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+ i40iw_qp_ring_push_db(qp, wqe_idx);
+ } else {
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: remote stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ u32 stag_to_inv,
+ bool post_sq)
+{
+ u64 *wqe;
+ u8 *dest, *src;
+ struct i40iw_post_inline_send *op_info;
+ u64 header;
+ u32 wqe_idx, i;
+ enum i40iw_status_code ret_code;
+ bool read_fence = false;
+ u8 wqe_size;
+ u64 *push;
+
+ op_info = &info->op.inline_send;
+ if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ read_fence |= info->read_fence;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+ header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+ LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+ LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+ LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+ LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+ LS_64(read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ dest = (u8 *)wqe;
+ src = (u8 *)(op_info->data);
+
+ if (op_info->len <= 16) {
+ for (i = 0; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ } else {
+ for (i = 0; i < 16; i++, src++, dest++)
+ *dest = *src;
+ dest = (u8 *)wqe + 32;
+ for (; i < op_info->len; i++, src++, dest++)
+ *dest = *src;
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (qp->push_db) {
+ push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+ memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+ i40iw_qp_ring_push_db(qp, wqe_idx);
+ } else {
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_inv_local_stag *op_info;
+ u64 header;
+ u32 wqe_idx;
+ bool local_fence = false;
+
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8,
+ LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
+ set_64bit_val(wqe, 16, 0);
+ header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_mw_bind - Memory Window bind operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
+ struct i40iw_post_sq_info *info,
+ bool post_sq)
+{
+ u64 *wqe;
+ struct i40iw_bind_window *op_info;
+ u64 header;
+ u32 wqe_idx;
+ bool local_fence = false;
+
+ op_info = &info->op.bind_window;
+
+ local_fence |= info->local_fence;
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
+ LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
+ set_64bit_val(wqe, 16, op_info->bind_length);
+ header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
+ LS_64(((op_info->enable_reads << 2) |
+ (op_info->enable_writes << 3)),
+ I40IWQPSQ_STAGRIGHTS) |
+ LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0),
+ I40IWQPSQ_VABASEDTO) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
+ struct i40iw_post_rq_info *info)
+{
+ u64 *wqe;
+ u64 header;
+ u32 total_size = 0, wqe_idx, i, byte_off;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ for (i = 0; i < info->num_sges; i++)
+ total_size += info->sg_list[i].len;
+ wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
+ I40IWQPSQ_ADDFRAGCNT) |
+ LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_set_fragment(wqe, 0, info->sg_list);
+
+ for (i = 1; i < info->num_sges; i++) {
+ byte_off = 32 + (i - 1) * 16;
+ i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
+ }
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ return 0;
+}
+
+/**
+ * i40iw_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
+ enum i40iw_completion_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+ arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+ arm_seq_num++;
+
+ sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+ arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+ arm_next_se |= 1;
+ if (cq_notify == IW_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+ LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+ LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+ LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ writel(cq->cq_id, cq->cqe_alloc_reg);
+}
+
+/**
+ * i40iw_cq_post_entries - update tail in shadow memory
+ * @cq: hw cq
+ * @count: # of entries processed
+ */
+static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
+ u8 count)
+{
+ I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ return 0;
+}
+
+/**
+ * i40iw_cq_poll_completion - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ * @post_cq: update cq tail
+ */
+static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_poll_info *info,
+ bool post_cq)
+{
+ u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
+ u64 *cqe, *sw_wqe;
+ struct i40iw_qp_uk *qp;
+ struct i40iw_ring *pring = NULL;
+ u32 wqe_idx, q_type, array_idx = 0;
+ enum i40iw_status_code ret_code = 0;
+ enum i40iw_status_code ret_code2 = 0;
+ bool move_cq_head = true;
+ u8 polarity;
+ u8 addl_frag_cnt, addl_wqes = 0;
+
+ if (cq->avoid_mem_cflct)
+ cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
+ else
+ cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (polarity != cq->polarity)
+ return I40IW_ERR_QUEUE_EMPTY;
+
+ q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+ info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+ info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
+ if (info->error) {
+ info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
+ info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
+ info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
+ } else {
+ info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+
+ info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+
+ info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
+ info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
+
+ qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+ wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+ info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
+
+ if (q_type == I40IW_CQE_QTYPE_RQ) {
+ array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
+ if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ }
+
+ info->op_type = I40IW_OP_TYPE_REC;
+ if (qword3 & I40IWCQ_STAG_MASK) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
+ I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ pring = &qp->rq_ring;
+ } else {
+ if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+
+ info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ addl_frag_cnt =
+ (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+ i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+
+ addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
+ } else {
+ do {
+ u8 op_type;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
+ info->op_type = op_type;
+ addl_frag_cnt = (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+ i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+ addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
+ if (op_type != I40IWQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+ if (!ret_code &&
+ (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
+ if (pring && (I40IW_RING_MORE_WORK(*pring)))
+ move_cq_head = false;
+
+ if (move_cq_head) {
+ I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
+
+ if (ret_code2 && !ret_code)
+ ret_code = ret_code2;
+
+ if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
+ cq->polarity ^= 1;
+
+ if (post_cq) {
+ I40IW_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+ }
+ } else {
+ if (info->is_srq)
+ return ret_code;
+ qword3 &= ~I40IW_CQ_WQEIDX_MASK;
+ qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
+ set_64bit_val(cqe, 24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40iw_get_wqe_shift - get shift count for maximum wqe size
+ * @wqdepth: depth of wq required.
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on sge.
+ * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1
+ * (64 bytes wqes) and 2 otherwise (128 bytes wqe).
+ */
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift)
+{
+ u32 size;
+
+ *shift = 0;
+ if (sge > 1)
+ *shift = (sge < 4) ? 1 : 2;
+
+ /* check if wqdepth is multiple of 2 or not */
+
+ if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+ return I40IW_ERR_INVALID_SIZE;
+
+ size = wqdepth << *shift; /* multiple of 32 bytes count */
+ if (size > I40IWQP_SW_MAX_WQSIZE)
+ return I40IW_ERR_INVALID_SIZE;
+ return 0;
+}
+
+static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+ i40iw_qp_post_wr,
+ i40iw_qp_ring_push_db,
+ i40iw_rdma_write,
+ i40iw_rdma_read,
+ i40iw_send,
+ i40iw_inline_rdma_write,
+ i40iw_inline_send,
+ i40iw_stag_local_invalidate,
+ i40iw_mw_bind,
+ i40iw_post_receive,
+ i40iw_nop
+};
+
+static struct i40iw_cq_ops iw_cq_ops = {
+ i40iw_cq_request_notification,
+ i40iw_cq_poll_completion,
+ i40iw_cq_post_entries,
+ i40iw_clean_cq
+};
+
+static struct i40iw_device_uk_ops iw_device_uk_ops = {
+ i40iw_cq_uk_init,
+ i40iw_qp_uk_init,
+};
+
+/**
+ * i40iw_qp_uk_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq. If srq is used,
+ * then rq_base will point to one rq wqe only (not the whole
+ * array of wqes)
+ */
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+ struct i40iw_qp_uk_init_info *info)
+{
+ enum i40iw_status_code ret_code = 0;
+ u32 sq_ring_size;
+ u8 sqshift, rqshift;
+
+ if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+
+ if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, &sqshift);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, &rqshift);
+ if (ret_code)
+ return ret_code;
+
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+ qp->rq_wrid_array = info->rq_wrid_array;
+
+ qp->wqe_alloc_reg = info->wqe_alloc_reg;
+ qp->qp_id = info->qp_id;
+
+ qp->sq_size = info->sq_size;
+ qp->push_db = info->push_db;
+ qp->push_wqe = info->push_wqe;
+
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << sqshift;
+
+ I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
+ I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ I40IW_RING_MOVE_TAIL(qp->sq_ring);
+ I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
+ qp->swqe_polarity = 1;
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+
+ if (!qp->use_srq) {
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->rq_wqe_size = rqshift;
+ I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 4 << rqshift;
+ }
+ qp->ops = iw_qp_uk_ops;
+
+ return ret_code;
+}
+
+/**
+ * i40iw_cq_uk_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_uk_init_info *info)
+{
+ if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
+ (info->cq_size > I40IW_MAX_CQ_SIZE))
+ return I40IW_ERR_INVALID_SIZE;
+ cq->cq_base = (struct i40iw_cqe *)info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_reg = info->cqe_alloc_reg;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+
+ I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+ cq->ops = iw_cq_ops;
+
+ return 0;
+}
+
+/**
+ * i40iw_device_init_uk - setup routines for iwarp shared device
+ * @dev: iwarp shared (user and kernel)
+ */
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
+{
+ dev->ops_uk = iw_device_uk_ops;
+}
+
+/**
+ * i40iw_clean_cq - clean cq entries
+ * @ queue completion context
+ * @cq: cq to clean
+ */
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
+{
+ u64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
+ else
+ cqe = (u64 *)&cq->cq_base[cq_head];
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+ if (polarity != temp)
+ break;
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if ((void *)(unsigned long)comp_ctx == queue)
+ set_64bit_val(cqe, 8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+}
+
+/**
+ * i40iw_nop - send a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: flag if signaled for completion
+ * @post_sq: flag to post sq
+ */
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
+ u64 wr_id,
+ bool signaled,
+ bool post_sq)
+{
+ u64 header, *wqe;
+ u32 wqe_idx;
+
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ qp->sq_wrtrk_array[wqe_idx].wrid = wr_id;
+ qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+ LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+ if (post_sq)
+ i40iw_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ *wqe_size = 96;
+ break;
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ default:
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of rq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ default:
+ return I40IW_ERR_INVALID_FRAG_COUNT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
+ * @data_size: data size for inline
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+ u8 *wqe_size)
+{
+ if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
+ return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+ if (data_size <= 16)
+ *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+ else if (data_size <= 48)
+ *wqe_size = 64;
+ else if (data_size <= 80)
+ *wqe_size = 96;
+ else
+ *wqe_size = 128;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
new file mode 100644
index 000000000000..5cd971bb8cc7
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -0,0 +1,442 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_USER_H
+#define I40IW_USER_H
+
+enum i40iw_device_capabilities_const {
+ I40IW_WQE_SIZE = 4,
+ I40IW_CQP_WQE_SIZE = 8,
+ I40IW_CQE_SIZE = 4,
+ I40IW_EXTENDED_CQE_SIZE = 8,
+ I40IW_AEQE_SIZE = 2,
+ I40IW_CEQE_SIZE = 1,
+ I40IW_CQP_CTX_SIZE = 8,
+ I40IW_SHADOW_AREA_SIZE = 8,
+ I40IW_CEQ_MAX_COUNT = 256,
+ I40IW_QUERY_FPM_BUF_SIZE = 128,
+ I40IW_COMMIT_FPM_BUF_SIZE = 128,
+ I40IW_MIN_IW_QP_ID = 1,
+ I40IW_MAX_IW_QP_ID = 262143,
+ I40IW_MIN_CEQID = 0,
+ I40IW_MAX_CEQID = 256,
+ I40IW_MIN_CQID = 0,
+ I40IW_MAX_CQID = 131071,
+ I40IW_MIN_AEQ_ENTRIES = 1,
+ I40IW_MAX_AEQ_ENTRIES = 524287,
+ I40IW_MIN_CEQ_ENTRIES = 1,
+ I40IW_MAX_CEQ_ENTRIES = 131071,
+ I40IW_MIN_CQ_SIZE = 1,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
+ I40IW_DB_ID_ZERO = 0,
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 6,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
+ I40IW_MAX_PUSH_PAGE_COUNT = 4096,
+ I40IW_MAX_PE_ENABLED_VF_COUNT = 32,
+ I40IW_MAX_VF_FPM_ID = 47,
+ I40IW_MAX_VF_PER_PF = 127,
+ I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ I40IW_MAX_INLINE_DATA_SIZE = 112,
+ I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 112,
+ I40IW_MAX_IRD_SIZE = 32,
+ I40IW_QPCTX_ENCD_MAXIRD = 3,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_ORD_SIZE = 32,
+ I40IW_Q2_BUFFER_SIZE = (248 + 100),
+ I40IW_QP_CTX_SIZE = 248
+};
+
+#define i40iw_handle void *
+#define i40iw_adapter_handle i40iw_handle
+#define i40iw_qp_handle i40iw_handle
+#define i40iw_cq_handle i40iw_handle
+#define i40iw_srq_handle i40iw_handle
+#define i40iw_pd_id i40iw_handle
+#define i40iw_stag_handle i40iw_handle
+#define i40iw_stag_index u32
+#define i40iw_stag u32
+#define i40iw_stag_key u8
+
+#define i40iw_tagged_offset u64
+#define i40iw_access_privileges u32
+#define i40iw_physical_fragment u64
+#define i40iw_address_list u64 *
+
+#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key))
+
+#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF)
+
+#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
+
+struct i40iw_qp_uk;
+struct i40iw_cq_uk;
+struct i40iw_srq_uk;
+struct i40iw_qp_uk_init_info;
+struct i40iw_cq_uk_init_info;
+struct i40iw_srq_uk_init_info;
+
+struct i40iw_sge {
+ i40iw_tagged_offset tag_off;
+ u32 len;
+ i40iw_stag stag;
+};
+
+#define i40iw_sgl struct i40iw_sge *
+
+struct i40iw_ring {
+ u32 head;
+ u32 tail;
+ u32 size;
+};
+
+struct i40iw_cqe {
+ u64 buf[I40IW_CQE_SIZE];
+};
+
+struct i40iw_extended_cqe {
+ u64 buf[I40IW_EXTENDED_CQE_SIZE];
+};
+
+struct i40iw_wqe {
+ u64 buf[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk_ops;
+
+enum i40iw_addressing_type {
+ I40IW_ADDR_TYPE_ZERO_BASED = 0,
+ I40IW_ADDR_TYPE_VA_BASED = 1,
+};
+
+#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01
+#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02
+#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define I40IW_ACCESS_FLAGS_ALL 0x1F
+
+#define I40IW_OP_TYPE_RDMA_WRITE 0
+#define I40IW_OP_TYPE_RDMA_READ 1
+#define I40IW_OP_TYPE_SEND 3
+#define I40IW_OP_TYPE_SEND_INV 4
+#define I40IW_OP_TYPE_SEND_SOL 5
+#define I40IW_OP_TYPE_SEND_SOL_INV 6
+#define I40IW_OP_TYPE_REC 7
+#define I40IW_OP_TYPE_BIND_MW 8
+#define I40IW_OP_TYPE_FAST_REG_NSMR 9
+#define I40IW_OP_TYPE_INV_STAG 10
+#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
+#define I40IW_OP_TYPE_NOP 12
+
+enum i40iw_completion_status {
+ I40IW_COMPL_STATUS_SUCCESS = 0,
+ I40IW_COMPL_STATUS_FLUSHED,
+ I40IW_COMPL_STATUS_INVALID_WQE,
+ I40IW_COMPL_STATUS_QP_CATASTROPHIC,
+ I40IW_COMPL_STATUS_REMOTE_TERMINATION,
+ I40IW_COMPL_STATUS_INVALID_STAG,
+ I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ I40IW_COMPL_STATUS_ACCESS_VIOLATION,
+ I40IW_COMPL_STATUS_INVALID_PD_ID,
+ I40IW_COMPL_STATUS_WRAP_ERROR,
+ I40IW_COMPL_STATUS_STAG_INVALID_PDID,
+ I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ I40IW_COMPL_STATUS_STAG_NOT_INVALID,
+ I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
+ I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
+ I40IW_COMPL_STATUS_INVALID_FBO,
+ I40IW_COMPL_STATUS_INVALID_LENGTH,
+ I40IW_COMPL_STATUS_INVALID_ACCESS,
+ I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
+ I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ I40IW_COMPL_STATUS_INVALID_REGION,
+ I40IW_COMPL_STATUS_INVALID_WINDOW,
+ I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
+};
+
+enum i40iw_completion_notify {
+ IW_CQ_COMPL_EVENT = 0,
+ IW_CQ_COMPL_SOLICITED = 1
+};
+
+struct i40iw_post_send {
+ i40iw_sgl sg_list;
+ u8 num_sges;
+};
+
+struct i40iw_post_inline_send {
+ void *data;
+ u32 len;
+};
+
+struct i40iw_post_send_w_inv {
+ i40iw_sgl sg_list;
+ u32 num_sges;
+ i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_post_inline_send_w_inv {
+ void *data;
+ u32 len;
+ i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_rdma_write {
+ i40iw_sgl lo_sg_list;
+ u8 num_lo_sges;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_inline_rdma_write {
+ void *data;
+ u32 len;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_rdma_read {
+ struct i40iw_sge lo_addr;
+ struct i40iw_sge rem_addr;
+};
+
+struct i40iw_bind_window {
+ i40iw_stag mr_stag;
+ u64 bind_length;
+ void *va;
+ enum i40iw_addressing_type addressing_type;
+ bool enable_reads;
+ bool enable_writes;
+ i40iw_stag mw_stag;
+};
+
+struct i40iw_inv_local_stag {
+ i40iw_stag target_stag;
+};
+
+struct i40iw_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ bool signaled;
+ bool read_fence;
+ bool local_fence;
+ bool inline_data;
+ bool defer_flag;
+ union {
+ struct i40iw_post_send send;
+ struct i40iw_post_send send_w_sol;
+ struct i40iw_post_send_w_inv send_w_inv;
+ struct i40iw_post_send_w_inv send_w_sol_inv;
+ struct i40iw_rdma_write rdma_write;
+ struct i40iw_rdma_read rdma_read;
+ struct i40iw_rdma_read rdma_read_inv;
+ struct i40iw_bind_window bind_window;
+ struct i40iw_inv_local_stag inv_local_stag;
+ struct i40iw_inline_rdma_write inline_rdma_write;
+ struct i40iw_post_inline_send inline_send;
+ struct i40iw_post_inline_send inline_send_w_sol;
+ struct i40iw_post_inline_send_w_inv inline_send_w_inv;
+ struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
+ } op;
+};
+
+struct i40iw_post_rq_info {
+ u64 wr_id;
+ i40iw_sgl sg_list;
+ u32 num_sges;
+};
+
+struct i40iw_cq_poll_info {
+ u64 wr_id;
+ i40iw_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num;
+ u32 qp_id;
+ i40iw_stag inv_stag;
+ enum i40iw_completion_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u8 op_type;
+ bool stag_invalid_set;
+ bool push_dropped;
+ bool error;
+ bool is_srq;
+ bool solicited_event;
+};
+
+struct i40iw_qp_uk_ops {
+ void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
+ void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32);
+ enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool, bool);
+ enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, u32, bool);
+ enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, u32, bool);
+ enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
+ struct i40iw_post_sq_info *, bool);
+ enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
+ struct i40iw_post_rq_info *);
+ enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
+};
+
+struct i40iw_cq_ops {
+ void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
+ enum i40iw_completion_notify);
+ enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
+ struct i40iw_cq_poll_info *, bool);
+ enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
+ void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
+};
+
+struct i40iw_dev_uk;
+
+struct i40iw_device_uk_ops {
+ enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
+ struct i40iw_cq_uk_init_info *);
+ enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
+ struct i40iw_qp_uk_init_info *);
+};
+
+struct i40iw_dev_uk {
+ struct i40iw_device_uk_ops ops_uk;
+};
+
+struct i40iw_sq_uk_wr_trk_info {
+ u64 wrid;
+ u64 wr_len;
+};
+
+struct i40iw_qp_quanta {
+ u64 elem[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk {
+ struct i40iw_qp_quanta *sq_base;
+ struct i40iw_qp_quanta *rq_base;
+ u32 __iomem *wqe_alloc_reg;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u64 *shadow_area;
+ u32 *push_db;
+ u64 *push_wqe;
+ struct i40iw_ring sq_ring;
+ struct i40iw_ring rq_ring;
+ struct i40iw_ring initial_ring;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ struct i40iw_qp_uk_ops ops;
+ bool use_srq;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ u8 max_sq_frag_cnt;
+ u8 max_rq_frag_cnt;
+ bool deferred_flag;
+};
+
+struct i40iw_cq_uk {
+ struct i40iw_cqe *cq_base;
+ u32 __iomem *cqe_alloc_reg;
+ u64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct i40iw_ring cq_ring;
+ u8 polarity;
+ bool avoid_mem_cflct;
+
+ struct i40iw_cq_ops ops;
+};
+
+struct i40iw_qp_uk_init_info {
+ struct i40iw_qp_quanta *sq;
+ struct i40iw_qp_quanta *rq;
+ u32 __iomem *wqe_alloc_reg;
+ u64 *shadow_area;
+ struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 *push_db;
+ u64 *push_wqe;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u8 max_sq_frag_cnt;
+ u8 max_rq_frag_cnt;
+
+};
+
+struct i40iw_cq_uk_init_info {
+ u32 __iomem *cqe_alloc_reg;
+ struct i40iw_cqe *cq_base;
+ u64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
+
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
+ u8 wqe_size);
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
+u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
+
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+ struct i40iw_cq_uk_init_info *info);
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+ struct i40iw_qp_uk_init_info *info);
+
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
+ bool signaled, bool post_sq);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+ u8 *wqe_size);
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
new file mode 100644
index 000000000000..1ceec81bd8eb
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -0,0 +1,1270 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_arp_table - manage arp table
+ * @iwdev: iwarp device
+ * @ip_addr: ip address for device
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int i40iw_arp_table(struct i40iw_device *iwdev,
+ __be32 *ip_addr,
+ bool ipv4,
+ u8 *mac_addr,
+ u32 action)
+{
+ int arp_index;
+ int err;
+ u32 ip[4];
+
+ if (ipv4) {
+ memset(ip, 0, sizeof(ip));
+ ip[0] = *ip_addr;
+ } else {
+ memcpy(ip, ip_addr, sizeof(ip));
+ }
+
+ for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
+ if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
+ break;
+ switch (action) {
+ case I40IW_ARP_ADD:
+ if (arp_index != iwdev->arp_table_size)
+ return -1;
+
+ arp_index = 0;
+ err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
+ iwdev->arp_table_size,
+ (u32 *)&arp_index,
+ &iwdev->next_arp_index);
+
+ if (err)
+ return err;
+
+ memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
+ ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
+ break;
+ case I40IW_ARP_RESOLVE:
+ if (arp_index == iwdev->arp_table_size)
+ return -1;
+ break;
+ case I40IW_ARP_DELETE:
+ if (arp_index == iwdev->arp_table_size)
+ return -1;
+ memset(iwdev->arp_table[arp_index].ip_addr, 0,
+ sizeof(iwdev->arp_table[arp_index].ip_addr));
+ eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
+ i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
+ break;
+ default:
+ return -1;
+ }
+ return arp_index;
+}
+
+/**
+ * i40iw_wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @value: vvalue to write to register
+ */
+inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
+{
+ writel(value, hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_inetaddr_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *netdev;
+ struct net_device *upper_dev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+ __be32 local_ipaddr;
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ netdev = iwdev->ldev->netdev;
+ upper_dev = netdev_master_upper_dev_get(netdev);
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_DELETE);
+ return NOTIFY_OK;
+ case NETDEV_UP:
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_ADD);
+ break;
+ case NETDEV_CHANGEADDR:
+ /* Add the address to the IP table */
+ if (upper_dev)
+ local_ipaddr =
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ else
+ local_ipaddr = ifa->ifa_address;
+
+ local_ipaddr = ntohl(local_ipaddr);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ &local_ipaddr,
+ true,
+ I40IW_ARP_ADD);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+ struct net_device *event_netdev = ifa->idev->dev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+ __be32 local_ipaddr6[4];
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_DELETE);
+ return NOTIFY_OK;
+ case NETDEV_UP:
+ /* Fall through */
+ case NETDEV_CHANGEADDR:
+ i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ i40iw_manage_arp_cache(iwdev,
+ netdev->dev_addr,
+ local_ipaddr6,
+ false,
+ I40IW_ARP_ADD);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_net_event - system notifier for net events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *iwhdl;
+ __be32 *p;
+ u32 local_ipaddr[4];
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
+ if (!iwhdl)
+ return NOTIFY_DONE;
+ iwdev = &iwhdl->device;
+ p = (__be32 *)neigh->primary_key;
+ i40iw_copy_ip_ntohl(local_ipaddr, p);
+ if (neigh->nud_state & NUD_VALID) {
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ local_ipaddr,
+ false,
+ I40IW_ARP_ADD);
+
+ } else {
+ i40iw_manage_arp_cache(iwdev,
+ neigh->ha,
+ local_ipaddr,
+ false,
+ I40IW_ARP_DELETE);
+ }
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
+{
+ struct i40iw_cqp_request *cqp_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ if (!list_empty(&cqp->cqp_avail_reqs)) {
+ cqp_request = list_entry(cqp->cqp_avail_reqs.next,
+ struct i40iw_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ }
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ if (!cqp_request) {
+ cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+ if (cqp_request) {
+ cqp_request->dynamic = true;
+ INIT_LIST_HEAD(&cqp_request->list);
+ init_waitqueue_head(&cqp_request->waitq);
+ }
+ }
+ if (!cqp_request) {
+ i40iw_pr_err("CQP Request Fail: No Memory");
+ return NULL;
+ }
+
+ if (wait) {
+ atomic_set(&cqp_request->refcount, 2);
+ cqp_request->waiting = true;
+ } else {
+ atomic_set(&cqp_request->refcount, 1);
+ }
+ return cqp_request;
+}
+
+/**
+ * i40iw_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
+{
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ cqp_request->request_done = false;
+ cqp_request->callback_fcn = NULL;
+ cqp_request->waiting = false;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
+}
+
+/**
+ * i40iw_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
+ struct i40iw_cqp_request *cqp_request)
+{
+ if (atomic_dec_and_test(&cqp_request->refcount))
+ i40iw_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * i40iw_free_qp - callback after destroy cqp completes
+ * @cqp_request: cqp request for destroy qp
+ * @num: not used
+ */
+static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+{
+ struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+ struct i40iw_device *iwdev;
+ u32 qp_num = iwqp->ibqp.qp_num;
+
+ iwdev = iwqp->iwdev;
+
+ i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+}
+
+/**
+ * i40iw_wait_event - wait for completion
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to wait
+ */
+static int i40iw_wait_event(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request *cqp_request)
+{
+ struct cqp_commands_info *info = &cqp_request->info;
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ bool cqp_error = false;
+ int err_code = 0;
+ int timeout_ret = 0;
+
+ timeout_ret = wait_event_timeout(cqp_request->waitq,
+ cqp_request->request_done,
+ I40IW_EVENT_TIMEOUT);
+ if (!timeout_ret) {
+ i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
+ info->cqp_cmd, timeout_ret);
+ err_code = -ETIME;
+ i40iw_request_reset(iwdev);
+ goto done;
+ }
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
+ info->cqp_cmd, cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+ err_code = -EPROTO;
+ goto done;
+ }
+done:
+ i40iw_put_cqp_request(iwcqp, cqp_request);
+ return err_code;
+}
+
+/**
+ * i40iw_handle_cqp_op - process cqp command
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to process
+ */
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+ struct i40iw_cqp_request
+ *cqp_request)
+{
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+ struct cqp_commands_info *info = &cqp_request->info;
+ int err_code = 0;
+
+ status = i40iw_process_cqp_cmd(dev, info);
+ if (status) {
+ i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
+ i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+ return status;
+ }
+ if (cqp_request->waiting)
+ err_code = i40iw_wait_event(iwdev, cqp_request);
+ if (err_code)
+ status = I40IW_ERR_CQP_COMPL_ERROR;
+ return status;
+}
+
+/**
+ * i40iw_add_pdusecount - add pd refcount
+ * @iwpd: pd for refcount
+ */
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
+{
+ atomic_inc(&iwpd->usecount);
+}
+
+/**
+ * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
+ * @iwpd: pd for refcount
+ * @iwdev: iwarp device
+ */
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
+{
+ if (!atomic_dec_and_test(&iwpd->usecount))
+ return;
+ i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
+ kfree(iwpd);
+}
+
+/**
+ * i40iw_add_ref - add refcount for qp
+ * @ibqp: iqarp qp
+ */
+void i40iw_add_ref(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
+
+ atomic_inc(&iwqp->refcount);
+}
+
+/**
+ * i40iw_rem_ref - rem refcount for qp and free if 0
+ * @ibqp: iqarp qp
+ */
+void i40iw_rem_ref(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev;
+ u32 qp_num;
+
+ iwqp = to_iwqp(ibqp);
+ if (!atomic_dec_and_test(&iwqp->refcount))
+ return;
+
+ iwdev = iwqp->iwdev;
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->qp_table[qp_num] = NULL;
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_request->callback_fcn = i40iw_free_qp;
+ cqp_request->param = (void *)&iwqp->sc_qp;
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
+{
+ struct i40iw_device *iwdev = to_iwdev(device);
+
+ if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
+ return NULL;
+
+ return &iwdev->qp_table[qpn]->ibqp;
+}
+
+/**
+ * i40iw_debug_buf - print debug msg and buffer is mask set
+ * @dev: hardware control device structure
+ * @mask: mask to compare if to print debug buffer
+ * @buf: points buffer addr
+ * @size: saize of buffer to print
+ */
+void i40iw_debug_buf(struct i40iw_sc_dev *dev,
+ enum i40iw_debug_flag mask,
+ char *desc,
+ u64 *buf,
+ u32 size)
+{
+ u32 i;
+
+ if (!(dev->debug_mask & mask))
+ return;
+ i40iw_debug(dev, mask, "%s\n", desc);
+ i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
+ (unsigned long long)virt_to_phys(buf));
+
+ for (i = 0; i < size; i += 8)
+ i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
+}
+
+/**
+ * i40iw_get_hw_addr - return hw addr
+ * @par: points to shared dev
+ */
+u8 __iomem *i40iw_get_hw_addr(void *par)
+{
+ struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
+
+ return dev->hw->hw_addr;
+}
+
+/**
+ * i40iw_remove_head - return head entry and remove from list
+ * @list: list for entry
+ */
+void *i40iw_remove_head(struct list_head *list)
+{
+ struct list_head *entry;
+
+ if (list_empty(list))
+ return NULL;
+
+ entry = (void *)list->next;
+ list_del(entry);
+ return (void *)entry;
+}
+
+/**
+ * i40iw_allocate_dma_mem - Memory alloc helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+ struct i40iw_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+ if (!mem)
+ return I40IW_ERR_PARAM;
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
+ (dma_addr_t *)&mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return I40IW_ERR_NO_MEMORY;
+ return 0;
+}
+
+/**
+ * i40iw_free_dma_mem - Memory free helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ */
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
+{
+ struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+ if (!mem || !mem->va)
+ return;
+
+ dma_free_coherent(&pcidev->dev, mem->size,
+ mem->va, (dma_addr_t)mem->pa);
+ mem->va = NULL;
+}
+
+/**
+ * i40iw_allocate_virt_mem - virtual memory alloc helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ */
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return I40IW_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (mem->va)
+ return 0;
+ else
+ return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * i40iw_free_virt_mem - virtual memory free helper fn
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ */
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+ struct i40iw_virt_mem *mem)
+{
+ if (!mem)
+ return I40IW_ERR_PARAM;
+ kfree(mem->va);
+ mem->va = NULL;
+ return 0;
+}
+
+/**
+ * i40iw_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sd_info: information for sd cqp
+ *
+ */
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_update_sds_info *sdinfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+ sizeof(cqp_info->in.u.update_pe_sds.info));
+ cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.update_pe_sds.dev = dev;
+ cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Update SD's fail");
+ return status;
+}
+
+/**
+ * i40iw_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * i40iw_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
+{
+ struct i40iw_qp *iwqp;
+ u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
+ u8 hte = 0;
+ bool first_time;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->hte_added) {
+ iwqp->hte_added = 0;
+ hte = 1;
+ }
+ first_time = !(qp->term_flags & I40IW_TERM_DONE);
+ qp->term_flags |= I40IW_TERM_DONE;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (first_time) {
+ if (!timeout_occurred)
+ i40iw_terminate_del_timer(qp);
+ else
+ next_iwarp_state = I40IW_QP_STATE_CLOSING;
+
+ i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
+ i40iw_cm_disconn(iwqp);
+ }
+}
+
+/**
+ * i40iw_terminate_imeout - timeout happened
+ * @context: points to iwarp qp
+ */
+static void i40iw_terminate_timeout(unsigned long context)
+{
+ struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+ struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+
+ i40iw_terminate_done(qp, 1);
+}
+
+/**
+ * i40iw_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ init_timer(&iwqp->terminate_timer);
+ iwqp->terminate_timer.function = i40iw_terminate_timeout;
+ iwqp->terminate_timer.expires = jiffies + HZ;
+ iwqp->terminate_timer.data = (unsigned long)iwqp;
+ add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp *iwqp;
+
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ del_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_cqp_generic_worker - generic worker for cqp
+ * @work: work pointer
+ */
+static void i40iw_cqp_generic_worker(struct work_struct *work)
+{
+ struct i40iw_virtchnl_work_info *work_info =
+ &((struct virtchnl_work *)work)->work_info;
+
+ if (work_info->worker_vf_dev)
+ work_info->callback_fcn(work_info->worker_vf_dev);
+}
+
+/**
+ * i40iw_cqp_spawn_worker - spawn worket thread
+ * @iwdev: device struct pointer
+ * @work_info: work request info
+ * @iw_vf_idx: virtual function index
+ */
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_work_info *work_info,
+ u32 iw_vf_idx)
+{
+ struct virtchnl_work *work;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ work = &iwdev->virtchnl_w[iw_vf_idx];
+ memcpy(&work->work_info, work_info, sizeof(*work_info));
+ INIT_WORK(&work->work, i40iw_cqp_generic_worker);
+ queue_work(iwdev->virtchnl_wq, &work->work);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_worker -
+ * @work: work pointer for hmc info
+ */
+static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
+{
+ struct i40iw_cqp_request *cqp_request =
+ ((struct virtchnl_work *)work)->cqp_request;
+ struct i40iw_ccq_cqe_info ccq_cqe_info;
+ struct i40iw_hmc_fcn_info *hmcfcninfo =
+ &cqp_request->info.in.u.manage_hmc_pm.info;
+ struct i40iw_device *iwdev =
+ (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
+
+ ccq_cqe_info.cqp = NULL;
+ ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
+ ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
+ ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
+ ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
+ ccq_cqe_info.scratch = 0;
+ ccq_cqe_info.error = cqp_request->compl_info.error;
+ hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
+ hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
+ * @cqp_request: cqp request info struct for hmc fun
+ * @unused: unused param of callback
+ */
+static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
+ u32 unused)
+{
+ struct virtchnl_work *work;
+ struct i40iw_hmc_fcn_info *hmcfcninfo =
+ &cqp_request->info.in.u.manage_hmc_pm.info;
+ struct i40iw_device *iwdev =
+ (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
+ back_dev;
+
+ if (hmcfcninfo && hmcfcninfo->callback_fcn) {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
+ atomic_inc(&cqp_request->refcount);
+ work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
+ work->cqp_request = cqp_request;
+ INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
+ queue_work(iwdev->virtchnl_wq, &work->work);
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
+ } else {
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
+ }
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
+ * @dev: hardware control device structure
+ * @hmcfcninfo: info for hmc
+ */
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_hmc_fcn_info *hmcfcninfo)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
+ cqp_request->param = hmcfcninfo;
+ memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
+ sizeof(*hmcfcninfo));
+ cqp_info->in.u.manage_hmc_pm.dev = dev;
+ cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Manage HMC fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
+ * @iwdev: function device struct
+ * @values_mem: buffer for fpm
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
+ cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
+ cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
+ cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Query FPM fail");
+ return status;
+}
+
+/**
+ * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
+ * @dev: hardware control device structure
+ * @values_mem: buffer with fpm values
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *values_mem,
+ u8 hmc_fn_id)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return I40IW_ERR_NO_MEMORY;
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
+ cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
+ cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
+ cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Commit FPM fail");
+ return status;
+}
+
+/**
+ * i40iw_vf_wait_vchnl_resp - wait for channel msg
+ * @iwdev: function's device struct
+ */
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev = dev->back_dev;
+ enum i40iw_status_code err_code = 0;
+ int timeout_ret;
+
+ i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
+ __func__, __LINE__, dev, iwdev);
+ atomic_add(2, &iwdev->vchnl_msgs);
+ timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
+ (atomic_read(&iwdev->vchnl_msgs) == 1),
+ I40IW_VCHNL_EVENT_TIMEOUT);
+ atomic_dec(&iwdev->vchnl_msgs);
+ if (!timeout_ret) {
+ i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
+ err_code = I40IW_ERR_TIMEOUT;
+ }
+ return err_code;
+}
+
+/**
+ * i40iw_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_qp_flush_info info;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
+ memset(&info, 0, sizeof(info));
+ info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+ info.generate_ae = true;
+ info.ae_source = 0x3;
+ (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
+}
+
+/**
+ * i40iw_init_hash_desc - initialize hash for crc calculation
+ * @desc: cryption type
+ */
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *tdesc;
+
+ tfm = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(tfm))
+ return I40IW_ERR_MPA_CRC;
+
+ tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!tdesc) {
+ crypto_free_shash(tfm);
+ return I40IW_ERR_MPA_CRC;
+ }
+ tdesc->tfm = tfm;
+ *desc = tdesc;
+
+ return 0;
+}
+
+/**
+ * i40iw_free_hash_desc - free hash desc
+ * @desc: to be freed
+ */
+void i40iw_free_hash_desc(struct shash_desc *desc)
+{
+ if (desc) {
+ crypto_free_shash(desc->tfm);
+ kfree(desc);
+ }
+}
+
+/**
+ * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
+ * @dev: hardware control device structure
+ * @mem: buffer ptr for fpm to be allocated
+ * @return: memory allocation status
+ */
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+ struct i40iw_dma_mem *mem)
+{
+ enum i40iw_status_code status;
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+ status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
+ I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+ return status;
+}
+
+/**
+ * i40iw_ieq_check_mpacrc - check if mpa crc is OK
+ * @desc: desc for hash
+ * @addr: address of buffer for crc
+ * @length: length of buffer
+ * @value: value to be compared
+ */
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+ void *addr,
+ u32 length,
+ u32 value)
+{
+ u32 crc = 0;
+ int ret;
+ enum i40iw_status_code ret_code = 0;
+
+ crypto_shash_init(desc);
+ ret = crypto_shash_update(desc, addr, length);
+ if (!ret)
+ crypto_shash_final(desc, (u8 *)&crc);
+ if (crc != value) {
+ i40iw_pr_err("mpa crc check fail\n");
+ ret_code = I40IW_ERR_MPA_CRC;
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+ struct i40iw_puda_buf *buf)
+{
+ struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+ struct i40iw_qp *iwqp;
+ struct i40iw_cm_node *cm_node;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph = (struct iphdr *)buf->iph;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ if (iph->version == 4) {
+ memset(loc_addr, 0, sizeof(loc_addr));
+ loc_addr[0] = ntohl(iph->daddr);
+ memset(rem_addr, 0, sizeof(rem_addr));
+ rem_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+ i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+ }
+ loc_port = ntohs(tcph->dest);
+ rem_port = ntohs(tcph->source);
+
+ cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+ loc_addr, false);
+ if (!cm_node)
+ return NULL;
+ iwqp = cm_node->iwqp;
+ return &iwqp->sc_qp;
+}
+
+/**
+ * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @length: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u16 iphlen;
+ u16 packetsize;
+ u8 *addr = (u8 *)buf->mem.va;
+
+ iphlen = (buf->ipv4) ? 20 : 40;
+ iph = (struct iphdr *)(addr + buf->maclen);
+ tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+ packetsize = length + buf->tcphlen + iphlen;
+
+ iph->tot_len = htons(packetsize);
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+ struct i40iw_puda_buf *buf)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ u16 iphlen;
+ u16 pkt_len;
+ u8 *mem = (u8 *)buf->mem.va;
+ struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
+
+ if (ethh->h_proto == htons(0x8100)) {
+ info->vlan_valid = true;
+ buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
+ }
+ buf->maclen = (info->vlan_valid) ? 18 : 14;
+ iphlen = (info->l3proto) ? 40 : 20;
+ buf->ipv4 = (info->l3proto) ? false : true;
+ buf->iph = mem + buf->maclen;
+ iph = (struct iphdr *)buf->iph;
+
+ buf->tcph = buf->iph + iphlen;
+ tcph = (struct tcphdr *)buf->tcph;
+
+ if (buf->ipv4) {
+ pkt_len = ntohs(iph->tot_len);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ pkt_len = ntohs(ip6h->payload_len) + iphlen;
+ }
+
+ buf->totallen = pkt_len + buf->maclen;
+
+ if (info->payload_len < buf->totallen - 4) {
+ i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
+ info->payload_len, buf->totallen);
+ return I40IW_ERR_INVALID_SIZE;
+ }
+
+ buf->tcphlen = (tcph->doff) << 2;
+ buf->datalen = pkt_len - iphlen - buf->tcphlen;
+ buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+ return 0;
+}
+
+/**
+ * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @dev: hardware control device structure
+ */
+static void i40iw_hw_stats_timeout(unsigned long dev)
+{
+ struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
+ struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
+ struct i40iw_dev_pestat *vf_devstat = NULL;
+ u16 iw_vf_idx;
+ unsigned long flags;
+
+ /*PF*/
+ pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+ spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+ if (pf_dev->vf_dev[iw_vf_idx]) {
+ if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
+ vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
+ vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+ }
+ }
+ spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+ }
+
+ mod_timer(&pf_devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_start_timer - Start periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+ init_timer(&devstat->stats_timer);
+ devstat->stats_timer.function = i40iw_hw_stats_timeout;
+ devstat->stats_timer.data = (unsigned long)dev;
+ mod_timer(&devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_del_timer - Delete periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+ del_timer_sync(&devstat->stats_timer);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
new file mode 100644
index 000000000000..1fe3b84a06e4
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -0,0 +1,2437 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int i40iw_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+ memset(props, 0, sizeof(*props));
+ ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ props->fw_ver = I40IW_FW_VERSION;
+ props->device_cap_flags = iwdev->device_cap_flags;
+ props->vendor_id = iwdev->vendor_id;
+ props->vendor_part_id = iwdev->vendor_part_id;
+ props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
+ props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
+ props->max_qp = iwdev->max_qp;
+ props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+ props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ props->max_cq = iwdev->max_cq;
+ props->max_cqe = iwdev->max_cqe;
+ props->max_mr = iwdev->max_mr;
+ props->max_pd = iwdev->max_pd;
+ props->max_sge_rd = 1;
+ props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
+ props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_map_per_fmr = 1;
+ return 0;
+}
+
+/**
+ * i40iw_query_port - get port attrubutes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int i40iw_query_port(struct ib_device *ibdev,
+ u8 port,
+ struct ib_port_attr *props)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct net_device *netdev = iwdev->netdev;
+
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = IB_MTU_4096;
+ if (netdev->mtu >= 4096)
+ props->active_mtu = IB_MTU_4096;
+ else if (netdev->mtu >= 2048)
+ props->active_mtu = IB_MTU_2048;
+ else if (netdev->mtu >= 1024)
+ props->active_mtu = IB_MTU_1024;
+ else if (netdev->mtu >= 512)
+ props->active_mtu = IB_MTU_512;
+ else
+ props->active_mtu = IB_MTU_256;
+
+ props->lid = 1;
+ if (netif_carrier_ok(iwdev->netdev))
+ props->state = IB_PORT_ACTIVE;
+ else
+ props->state = IB_PORT_DOWN;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+ props->max_msg_sz = 0x80000000;
+ return 0;
+}
+
+/**
+ * i40iw_alloc_ucontext - Allocate the user context data structure
+ * @ibdev: device pointer from stack
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_alloc_ucontext_req req;
+ struct i40iw_alloc_ucontext_resp uresp;
+ struct i40iw_ucontext *ucontext;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req)))
+ return ERR_PTR(-EINVAL);
+
+ if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
+ i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+ return ERR_PTR(-EINVAL);
+ }
+
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.max_qps = iwdev->max_qp;
+ uresp.max_pds = iwdev->max_pd;
+ uresp.wq_size = iwdev->max_qp_wr * 2;
+ uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+
+ ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
+ if (!ucontext)
+ return ERR_PTR(-ENOMEM);
+
+ ucontext->iwdev = iwdev;
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ kfree(ucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+ spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+ spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+
+ return &ucontext->ibucontext;
+}
+
+/**
+ * i40iw_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct i40iw_ucontext *ucontext = to_ucontext(context);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->cq_reg_mem_list)) {
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->qp_reg_mem_list)) {
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ kfree(ucontext);
+ return 0;
+}
+
+/**
+ * i40iw_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct i40iw_ucontext *ucontext;
+ u64 db_addr_offset;
+ u64 push_offset;
+
+ ucontext = to_ucontext(context);
+ if (ucontext->iwdev->sc_dev.is_pf) {
+ db_addr_offset = I40IW_DB_ADDR_OFFSET;
+ push_offset = I40IW_PUSH_OFFSET;
+ if (vma->vm_pgoff)
+ vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
+ } else {
+ db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
+ push_offset = I40IW_VF_PUSH_OFFSET;
+ if (vma->vm_pgoff)
+ vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
+ }
+
+ vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+
+ if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_private_data = ucontext;
+ } else {
+ if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
+ PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+/**
+ * i40iw_alloc_push_page - allocate a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return;
+
+ atomic_inc(&cqp_request->refcount);
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+
+ cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 0;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ qp->push_idx = cqp_request->compl_info.op_ret_val;
+ else
+ i40iw_pr_err("CQP-OP Push page fail");
+ i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_dealloc_push_page - free a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ enum i40iw_status_code status;
+
+ if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+
+ cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+ cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 1;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (!status)
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+ else
+ i40iw_pr_err("CQP-OP Push page fail");
+}
+
+/**
+ * i40iw_alloc_pd - allocate protection domain
+ * @ibdev: device pointer from stack
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd;
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_alloc_pd_resp uresp;
+ struct i40iw_sc_pd *sc_pd;
+ u32 pd_id = 0;
+ int err;
+
+ err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
+ iwdev->max_pd, &pd_id, &iwdev->next_pd);
+ if (err) {
+ i40iw_pr_err("alloc resource failed\n");
+ return ERR_PTR(err);
+ }
+
+ iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
+ if (!iwpd) {
+ err = -ENOMEM;
+ goto free_res;
+ }
+
+ sc_pd = &iwpd->sc_pd;
+ dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
+
+ if (context) {
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.pd_id = pd_id;
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+ err = -EFAULT;
+ goto error;
+ }
+ }
+
+ i40iw_add_pdusecount(iwpd);
+ return &iwpd->ibpd;
+error:
+ kfree(iwpd);
+free_res:
+ i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
+ return ERR_PTR(err);
+}
+
+/**
+ * i40iw_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ */
+static int i40iw_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+
+ i40iw_rem_pdusecount(iwpd, iwdev);
+ return 0;
+}
+
+/**
+ * i40iw_qp_roundup - return round up qp ring size
+ * @wr_ring_size: ring size to round up
+ */
+static int i40iw_qp_roundup(u32 wr_ring_size)
+{
+ int scount = 1;
+
+ if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
+ wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
+
+ for (wr_ring_size--; scount <= 16; scount *= 2)
+ wr_ring_size |= wr_ring_size >> scount;
+ return ++wr_ring_size;
+}
+
+/**
+ * i40iw_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
+ struct list_head *pbl_list)
+{
+ struct i40iw_pbl *iwpbl;
+
+ list_for_each_entry(iwpbl, pbl_list, list) {
+ if (iwpbl->user_base == va) {
+ list_del(&iwpbl->list);
+ return iwpbl;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * i40iw_free_qp_resources - free up memory resources for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @qp_num: qp number assigned
+ */
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ u32 qp_num)
+{
+ i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+ if (qp_num)
+ i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
+ kfree(iwqp->kqp.wrid_mem);
+ iwqp->kqp.wrid_mem = NULL;
+ kfree(iwqp->allocated_buffer);
+ iwqp->allocated_buffer = NULL;
+}
+
+/**
+ * i40iw_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
+{
+ struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+
+ ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
+}
+
+/**
+ * i40iw_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ */
+static int i40iw_destroy_qp(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+
+ iwqp->destroyed = 1;
+
+ if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
+ i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
+
+ if (!iwqp->user_mode) {
+ if (iwqp->iwscq) {
+ i40iw_clean_cqes(iwqp, iwqp->iwscq);
+ if (iwqp->iwrcq != iwqp->iwscq)
+ i40iw_clean_cqes(iwqp, iwqp->iwrcq);
+ }
+ }
+
+ i40iw_rem_ref(&iwqp->ibqp);
+ return 0;
+}
+
+/**
+ * i40iw_setup_virt_qp - setup for allocation of virtual qp
+ * @dev: iwarp device
+ * @qp: qp ptr
+ * @init_info: initialize info to return
+ */
+static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ struct i40iw_qp_init_info *init_info)
+{
+ struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+ struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+ init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
+ if (iwpbl->pbl_allocated) {
+ init_info->virtual_map = true;
+ init_info->sq_pa = qpmr->sq_pbl.idx;
+ init_info->rq_pa = qpmr->rq_pbl.idx;
+ } else {
+ init_info->sq_pa = qpmr->sq_pbl.addr;
+ init_info->rq_pa = qpmr->rq_pbl.addr;
+ }
+ return 0;
+}
+
+/**
+ * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ */
+static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
+ struct i40iw_qp *iwqp,
+ struct i40iw_qp_init_info *info)
+{
+ struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
+ u32 sqdepth, rqdepth;
+ u32 sq_size, rq_size;
+ u8 sqshift, rqshift;
+ u32 size;
+ enum i40iw_status_code status;
+ struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+
+ ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
+ sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
+ rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
+
+ status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift);
+ if (!status)
+ status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift);
+
+ if (status)
+ return -ENOSYS;
+
+ sqdepth = sq_size << sqshift;
+ rqdepth = rq_size << rqshift;
+
+ size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
+ iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
+
+ ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
+ if (!ukinfo->sq_wrtrk_array)
+ return -ENOMEM;
+
+ ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
+
+ size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
+ size += (I40IW_SHADOW_AREA_SIZE << 3);
+
+ status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
+ if (status) {
+ kfree(ukinfo->sq_wrtrk_array);
+ ukinfo->sq_wrtrk_array = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq = mem->va;
+ info->sq_pa = mem->pa;
+
+ ukinfo->rq = &ukinfo->sq[sqdepth];
+ info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+ ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
+ info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+ ukinfo->sq_size = sq_size;
+ ukinfo->rq_size = rq_size;
+ ukinfo->qp_id = iwqp->ibqp.qp_num;
+ return 0;
+}
+
+/**
+ * i40iw_create_qp - create qp
+ * @ibpd: ptr of pd
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+ struct i40iw_cqp *iwcqp = &iwdev->cqp;
+ struct i40iw_qp *iwqp;
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_create_qp_req req;
+ struct i40iw_create_qp_resp uresp;
+ u32 qp_num = 0;
+ void *mem;
+ enum i40iw_status_code ret;
+ int err_code;
+ int sq_size;
+ int rq_size;
+ struct i40iw_sc_qp *qp;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_qp_init_info init_info;
+ struct i40iw_create_qp_info *qp_info;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ unsigned long flags;
+
+ if (init_attr->create_flags)
+ return ERR_PTR(-EINVAL);
+ if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
+ init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+
+ memset(&init_info, 0, sizeof(init_info));
+
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
+
+ init_info.qp_uk_init_info.sq_size = sq_size;
+ init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+
+ mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
+
+ iwqp = (struct i40iw_qp *)mem;
+ qp = &iwqp->sc_qp;
+ qp->back_qp = (void *)iwqp;
+ qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+
+ iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
+
+ if (i40iw_allocate_dma_mem(dev->hw,
+ &iwqp->q2_ctx_mem,
+ I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
+ 256)) {
+ i40iw_pr_err("dma_mem failed\n");
+ err_code = -ENOMEM;
+ goto error;
+ }
+
+ init_info.q2 = iwqp->q2_ctx_mem.va;
+ init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+
+ init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
+ init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
+
+ err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
+ &qp_num, &iwdev->next_qp);
+ if (err_code) {
+ i40iw_pr_err("qp resource\n");
+ goto error;
+ }
+
+ iwqp->allocated_buffer = mem;
+ iwqp->iwdev = iwdev;
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+ qp = &iwqp->sc_qp;
+ iwqp->iwscq = to_iwcq(init_attr->send_cq);
+ iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+
+ iwqp->host_ctx.va = init_info.host_ctx;
+ iwqp->host_ctx.pa = init_info.host_ctx_pa;
+ iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+ init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+
+ if (init_attr->qp_type != IB_QPT_RC) {
+ err_code = -ENOSYS;
+ goto error;
+ }
+ if (iwdev->push_mode)
+ i40iw_alloc_push_page(iwdev, qp);
+ if (udata) {
+ err_code = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (err_code) {
+ i40iw_pr_err("ib_copy_from_data\n");
+ goto error;
+ }
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ if (ibpd->uobject && ibpd->uobject->context) {
+ iwqp->user_mode = 1;
+ ucontext = to_ucontext(ibpd->uobject->context);
+
+ if (req.user_wqe_buffers) {
+ spin_lock_irqsave(
+ &ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = i40iw_get_pbl(
+ (unsigned long)req.user_wqe_buffers,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(
+ &ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ err_code = -ENODATA;
+ i40iw_pr_err("no pbl info\n");
+ goto error;
+ }
+ }
+ }
+ err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
+ } else {
+ err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
+ }
+
+ if (err_code) {
+ i40iw_pr_err("setup qp failed\n");
+ goto error;
+ }
+
+ init_info.type = I40IW_QP_TYPE_IWARP;
+ ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
+ if (ret) {
+ err_code = -EPROTO;
+ i40iw_pr_err("qp_init fail\n");
+ goto error;
+ }
+ ctx_info = &iwqp->ctx_info;
+ iwarp_info = &iwqp->iwarp_info;
+ iwarp_info->rd_enable = true;
+ iwarp_info->wr_rdresp_en = true;
+ if (!iwqp->user_mode)
+ iwarp_info->priv_mode_en = true;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->rdmap_ver = 1;
+
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
+ ctx_info->push_mode_en = false;
+ } else {
+ ctx_info->push_mode_en = true;
+ ctx_info->push_idx = qp->push_idx;
+ }
+
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ (u64 *)iwqp->host_ctx.va,
+ ctx_info);
+ ctx_info->iwarp_info_valid = false;
+ cqp_request = i40iw_get_cqp_request(iwcqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto error;
+ }
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
+
+ cqp_info->cqp_cmd = OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ ret = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (ret) {
+ i40iw_pr_err("CQP-OP QP create fail");
+ err_code = -EACCES;
+ goto error;
+ }
+
+ i40iw_add_ref(&iwqp->ibqp);
+ spin_lock_init(&iwqp->lock);
+ iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ iwdev->qp_table[qp_num] = iwqp;
+ i40iw_add_pdusecount(iwqp->iwpd);
+ if (ibpd->uobject && udata) {
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.actual_sq_size = sq_size;
+ uresp.actual_rq_size = rq_size;
+ uresp.qp_id = qp_num;
+ uresp.push_idx = qp->push_idx;
+ err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (err_code) {
+ i40iw_pr_err("copy_to_udata failed\n");
+ i40iw_destroy_qp(&iwqp->ibqp);
+ /* let the completion of the qp destroy free the qp */
+ return ERR_PTR(err_code);
+ }
+ }
+
+ return &iwqp->ibqp;
+error:
+ i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+ kfree(mem);
+ return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_query - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int i40iw_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ attr->qp_access_flags = 0;
+ attr->cap.max_send_wr = qp->qp_uk.sq_size;
+ attr->cap.max_recv_wr = qp->qp_uk.rq_size;
+ attr->cap.max_recv_sge = 1;
+ attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+ init_attr->event_handler = iwqp->ibqp.event_handler;
+ init_attr->qp_context = iwqp->ibqp.qp_context;
+ init_attr->send_cq = iwqp->ibqp.send_cq;
+ init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ return 0;
+}
+
+/**
+ * i40iw_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+ struct i40iw_modify_qp_info *info, bool wait)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_modify_qp_info *m_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Modify QP fail");
+}
+
+/**
+ * i40iw_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_device *iwdev = iwqp->iwdev;
+ struct i40iw_qp_host_ctx_info *ctx_info;
+ struct i40iwarp_offload_info *iwarp_info;
+ struct i40iw_modify_qp_info info;
+ u8 issue_modify_qp = 0;
+ u8 dont_wait = 0;
+ u32 err;
+ unsigned long flags;
+
+ memset(&info, 0, sizeof(info));
+ ctx_info = &iwqp->ctx_info;
+ iwarp_info = &iwqp->iwarp_info;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
+ info.next_iwarp_state = I40IW_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ break;
+ case IB_QPS_RTS:
+ if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
+ (!iwqp->cm_id)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ issue_modify_qp = 1;
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+ iwqp->hte_added = 1;
+ info.next_iwarp_state = I40IW_QP_STATE_RTS;
+ info.tcp_ctx_valid = true;
+ info.ord_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+ if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
+ (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
+ err = 0;
+ goto exit;
+ }
+ if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
+ err = -EINVAL;
+ goto exit;
+ }
+ info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
+ err = -EINVAL;
+ goto exit;
+ }
+ info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (iwqp->sc_qp.term_flags)
+ del_timer(&iwqp->terminate_timer);
+ info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+ if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
+ iwdev->iw_status &&
+ (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
+ info.reset_tcp_conn = true;
+ else
+ dont_wait = 1;
+ issue_modify_qp = 1;
+ info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+
+ if (issue_modify_qp)
+ iwqp->iwarp_state = info.next_iwarp_state;
+ else
+ info.next_iwarp_state = iwqp->iwarp_state;
+ }
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ ctx_info->iwarp_info_valid = true;
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ iwarp_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ iwarp_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ iwarp_info->rd_enable = true;
+ if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
+ iwarp_info->bind_en = true;
+
+ if (iwqp->user_mode) {
+ iwarp_info->rd_enable = true;
+ iwarp_info->wr_rdresp_en = true;
+ iwarp_info->priv_mode_en = false;
+ }
+ }
+
+ if (ctx_info->iwarp_info_valid) {
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ int ret;
+
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+ (u64 *)iwqp->host_ctx.va,
+ ctx_info);
+ if (ret) {
+ i40iw_pr_err("setting QP context\n");
+ err = -EINVAL;
+ goto exit;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (issue_modify_qp)
+ i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
+
+ if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
+ if (dont_wait) {
+ if (iwqp->cm_id && iwqp->hw_tcp_state) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+ iwqp->last_aeq = I40IW_AE_RESET_SENT;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ }
+ }
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return err;
+}
+
+/**
+ * cq_free_resources - free up recources for cq
+ * @iwdev: iwarp device
+ * @iwcq: cq ptr
+ */
+static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
+{
+ struct i40iw_sc_cq *cq = &iwcq->sc_cq;
+
+ if (!iwcq->user_mode)
+ i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
+ i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * cq_wq_destroy - send cq destroy cqp
+ * @iwdev: iwarp device
+ * @cq: hardware control cq
+ */
+static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+{
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+
+ cqp_info->cqp_cmd = OP_CQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_destroy.cq = cq;
+ cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ */
+static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct i40iw_cq *iwcq;
+ struct i40iw_device *iwdev;
+ struct i40iw_sc_cq *cq;
+
+ if (!ib_cq) {
+ i40iw_pr_err("ib_cq == NULL\n");
+ return 0;
+ }
+
+ iwcq = to_iwcq(ib_cq);
+ iwdev = to_iwdev(ib_cq->device);
+ cq = &iwcq->sc_cq;
+ cq_wq_destroy(iwdev, cq);
+ cq_free_resources(iwdev, iwcq);
+ kfree(iwcq);
+ return 0;
+}
+
+/**
+ * i40iw_create_cq - create cq
+ * @ibdev: device pointer from stack
+ * @attr: attributes for cq
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_cq *iwcq;
+ struct i40iw_pbl *iwpbl;
+ u32 cq_num = 0;
+ struct i40iw_sc_cq *cq;
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_cq_init_info info;
+ enum i40iw_status_code status;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+ unsigned long flags;
+ int err_code;
+ int entries = attr->cqe;
+
+ if (entries > iwdev->max_cqe)
+ return ERR_PTR(-EINVAL);
+
+ iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
+ if (!iwcq)
+ return ERR_PTR(-ENOMEM);
+
+ memset(&info, 0, sizeof(info));
+
+ err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
+ iwdev->max_cq, &cq_num,
+ &iwdev->next_cq);
+ if (err_code)
+ goto error;
+
+ cq = &iwcq->sc_cq;
+ cq->back_cq = (void *)iwcq;
+ spin_lock_init(&iwcq->lock);
+
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+ iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+ info.ceqe_mask = 0;
+ info.ceq_id = 0;
+ info.ceq_id_valid = true;
+ info.ceqe_mask = 1;
+ info.type = I40IW_CQ_TYPE_IWARP;
+ if (context) {
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_create_cq_req req;
+ struct i40iw_cq_mr *cqmr;
+
+ memset(&req, 0, sizeof(req));
+ iwcq->user_mode = true;
+ ucontext = to_ucontext(context);
+ if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+ goto cq_free_resources;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!iwpbl) {
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ iwcq->iwpbl = iwpbl;
+ iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+ info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
+ if (iwpbl->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+ } else {
+ info.cq_base_pa = cqmr->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode allocations */
+ int rsize;
+ int shadow;
+
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
+ rsize = round_up(rsize, 256);
+ shadow = I40IW_SHADOW_AREA_SIZE << 3;
+ status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
+ rsize + shadow, 256);
+ if (status) {
+ err_code = -ENOMEM;
+ goto cq_free_resources;
+ }
+ ukinfo->cq_base = iwcq->kmem.va;
+ info.cq_base_pa = iwcq->kmem.pa;
+ info.shadow_area_pa = info.cq_base_pa + rsize;
+ ukinfo->shadow_area = iwcq->kmem.va + rsize;
+ }
+
+ if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
+ i40iw_pr_err("init cq fail\n");
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto cq_free_resources;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status) {
+ i40iw_pr_err("CQP-OP Create QP fail");
+ err_code = -EPROTO;
+ goto cq_free_resources;
+ }
+
+ if (context) {
+ struct i40iw_create_cq_resp resp;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.cq_id = info.cq_uk_init_info.cq_id;
+ resp.cq_size = info.cq_uk_init_info.cq_size;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ i40iw_pr_err("copy to user data\n");
+ err_code = -EPROTO;
+ goto cq_destroy;
+ }
+ }
+
+ return (struct ib_cq *)iwcq;
+
+cq_destroy:
+ cq_wq_destroy(iwdev, cq);
+cq_free_resources:
+ cq_free_resources(iwdev, iwcq);
+error:
+ kfree(iwcq);
+ return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_get_user_access - get hw access from IB access
+ * @acc: IB access to return hw access
+ */
+static inline u16 i40iw_get_user_access(int acc)
+{
+ u16 access = 0;
+
+ access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
+ access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
+ access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
+ access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
+ return access;
+}
+
+/**
+ * i40iw_free_stag - free stag resource
+ * @iwdev: iwarp device
+ * @stag: stag to free
+ */
+static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
+{
+ u32 stag_idx;
+
+ stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+ i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+}
+
+/**
+ * i40iw_create_stag - create random stag
+ * @iwdev: iwarp device
+ */
+static u32 i40iw_create_stag(struct i40iw_device *iwdev)
+{
+ u32 stag = 0;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 random;
+ u8 consumer_key;
+ int ret;
+
+ get_random_bytes(&random, sizeof(random));
+ consumer_key = (u8)random;
+
+ driver_key = random & ~iwdev->mr_stagmask;
+ next_stag_index = (random & iwdev->mr_stagmask) >> 8;
+ next_stag_index %= iwdev->max_mr;
+
+ ret = i40iw_alloc_resource(iwdev,
+ iwdev->allocated_mrs, iwdev->max_mr,
+ &stag_index, &next_stag_index);
+ if (!ret) {
+ stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
+ stag |= driver_key;
+ stag += (u32)consumer_key;
+ }
+ return stag;
+}
+
+/**
+ * i40iw_next_pbl_addr - Get next pbl address
+ * @palloc: Poiner to allocated pbles
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
+ u64 *pbl,
+ struct i40iw_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+ return (u64 *)(*pinfo)->addr;
+}
+
+/**
+ * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+ u64 *pbl,
+ enum i40iw_pble_level level)
+{
+ struct ib_umem *region = iwmr->region;
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ int chunk_pages, entry, pg_shift, i;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_pble_info *pinfo;
+ struct scatterlist *sg;
+ u32 idx = 0;
+
+ pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+ pg_shift = ffs(region->page_size) - 1;
+ for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+ chunk_pages = sg_dma_len(sg) >> pg_shift;
+ if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
+ !iwpbl->qp_mr.sq_page)
+ iwpbl->qp_mr.sq_page = sg_page(sg);
+ for (i = 0; i < chunk_pages; i++) {
+ *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
+ pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+ }
+ }
+}
+
+/**
+ * i40iw_setup_pbles - copy user pg address to pble's
+ * @iwdev: iwarp device
+ * @iwmr: mr pointer for this memory registration
+ * @use_pbles: flag if to use pble's or memory (level 0)
+ */
+static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+ struct i40iw_mr *iwmr,
+ bool use_pbles)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_pble_info *pinfo;
+ u64 *pbl;
+ enum i40iw_status_code status;
+ enum i40iw_pble_level level = I40IW_LEVEL_1;
+
+ if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
+ return -ENOMEM;
+
+ if (use_pbles) {
+ mutex_lock(&iwdev->pbl_mutex);
+ status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
+ mutex_unlock(&iwdev->pbl_mutex);
+ if (status)
+ return -ENOMEM;
+
+ iwpbl->pbl_allocated = true;
+ level = palloc->level;
+ pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
+ pbl = (u64 *)pinfo->addr;
+ } else {
+ pbl = iwmr->pgaddrmem;
+ }
+
+ i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+ return 0;
+}
+
+/**
+ * i40iw_handle_q_mem - handle memory for qp and cq
+ * @iwdev: iwarp device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @use_pbles: flag to use pble
+ */
+static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+ struct i40iw_mem_reg_req *req,
+ struct i40iw_pbl *iwpbl,
+ bool use_pbles)
+{
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_mr *iwmr = iwpbl->iwmr;
+ struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+ struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct i40iw_hmc_pble *hmc_p;
+ u64 *arr = iwmr->pgaddrmem;
+ int err;
+ int total;
+
+ total = req->sq_pages + req->rq_pages + req->cq_pages;
+
+ err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+ if (err)
+ return err;
+ if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ return -ENOMEM;
+ }
+
+ if (use_pbles)
+ arr = (u64 *)palloc->level1.addr;
+ if (req->reg_type == IW_MEMREG_TYPE_QP) {
+ hmc_p = &qpmr->sq_pbl;
+ qpmr->shadow = (dma_addr_t)arr[total];
+ if (use_pbles) {
+ hmc_p->idx = palloc->level1.idx;
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->idx = palloc->level1.idx + req->sq_pages;
+ } else {
+ hmc_p->addr = arr[0];
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->addr = arr[1];
+ }
+ } else { /* CQ */
+ hmc_p = &cqmr->cq_pbl;
+ cqmr->shadow = (dma_addr_t)arr[total];
+ if (use_pbles)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ }
+ return err;
+}
+
+/**
+ * i40iw_hwreg_mr - send cqp command for memory registration
+ * @iwdev: iwarp device
+ * @iwmr: iwarp mr pointer
+ * @access: access for MR
+ */
+static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
+ struct i40iw_mr *iwmr,
+ u16 access)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_reg_ns_stag_info *stag_info;
+ struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ enum i40iw_status_code status;
+ int err = 0;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+ memset(stag_info, 0, sizeof(*stag_info));
+ stag_info->va = (void *)(unsigned long)iwpbl->user_base;
+ stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+ stag_info->stag_key = (u8)iwmr->stag;
+ stag_info->total_len = iwmr->length;
+ stag_info->access_rights = access;
+ stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+
+ if (iwmr->page_cnt > 1) {
+ if (palloc->level == I40IW_LEVEL_1) {
+ stag_info->first_pm_pbl_index = palloc->level1.idx;
+ stag_info->chunk_size = 1;
+ } else {
+ stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+ stag_info->chunk_size = 3;
+ }
+ } else {
+ stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+ }
+
+ cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
+ cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status) {
+ err = -ENOMEM;
+ i40iw_pr_err("CQP-OP MR Reg fail");
+ }
+ return err;
+}
+
+/**
+ * i40iw_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @length: length of mr
+ * @virt: virtual address
+ * @acc: access of mr
+ * @udata: user data
+ */
+static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+ u64 start,
+ u64 length,
+ u64 virt,
+ int acc,
+ struct ib_udata *udata)
+{
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
+ struct i40iw_ucontext *ucontext;
+ struct i40iw_pble_alloc *palloc;
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_mr *iwmr;
+ struct ib_umem *region;
+ struct i40iw_mem_reg_req req;
+ u32 pbl_depth = 0;
+ u32 stag = 0;
+ u16 access;
+ u32 region_length;
+ bool use_pbles = false;
+ unsigned long flags;
+ int err = -ENOSYS;
+
+ region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+ if (IS_ERR(region))
+ return (struct ib_mr *)region;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr) {
+ ib_umem_release(region);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ ucontext = to_ucontext(pd->uobject->context);
+ region_length = region->length + (start & 0xfff);
+ pbl_depth = region_length >> 12;
+ pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+ iwmr->length = region->length;
+
+ iwpbl->user_base = virt;
+ palloc = &iwpbl->pble_alloc;
+
+ iwmr->type = req.reg_type;
+ iwmr->page_cnt = pbl_depth;
+
+ switch (req.reg_type) {
+ case IW_MEMREG_TYPE_QP:
+ use_pbles = ((req.sq_pages + req.rq_pages) > 2);
+ err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_CQ:
+ use_pbles = (req.cq_pages > 1);
+ err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ goto error;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_MEM:
+ access = I40IW_ACCESS_FLAGS_LOCALREAD;
+
+ use_pbles = (iwmr->page_cnt != 1);
+ err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+ if (err)
+ goto error;
+
+ access |= i40iw_get_user_access(acc);
+ stag = i40iw_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+
+ err = i40iw_hwreg_mr(iwdev, iwmr, access);
+ if (err) {
+ i40iw_free_stag(iwdev, stag);
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ iwmr->type = req.reg_type;
+ if (req.reg_type == IW_MEMREG_TYPE_MEM)
+ i40iw_add_pdusecount(iwpd);
+ return &iwmr->ibmr;
+
+error:
+ if (palloc->level != I40IW_LEVEL_0)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ ib_umem_release(region);
+ kfree(iwmr);
+ return ERR_PTR(err);
+}
+
+/**
+ * i40iw_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @acc: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
+ u64 addr,
+ u64 size,
+ int acc,
+ u64 *iova_start)
+{
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_mr *iwmr;
+ enum i40iw_status_code status;
+ u32 stag;
+ u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
+ int ret;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IW_MEMREG_TYPE_MEM;
+ iwpbl->user_base = *iova_start;
+ stag = i40iw_create_stag(iwdev);
+ if (!stag) {
+ ret = -EOVERFLOW;
+ goto err;
+ }
+ access |= i40iw_get_user_access(acc);
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->page_cnt = 1;
+ iwmr->pgaddrmem[0] = addr;
+ status = i40iw_hwreg_mr(iwdev, iwmr, access);
+ if (status) {
+ i40iw_free_stag(iwdev, stag);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ i40iw_add_pdusecount(iwpd);
+ return &iwmr->ibmr;
+ err:
+ kfree(iwmr);
+ return ERR_PTR(ret);
+}
+
+/**
+ * i40iw_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ u64 kva = 0;
+
+ return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+}
+
+/**
+ * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void i40iw_del_memlist(struct i40iw_mr *iwmr,
+ struct i40iw_ucontext *ucontext)
+{
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ unsigned long flags;
+
+ switch (iwmr->type) {
+ case IW_MEMREG_TYPE_CQ:
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->cq_reg_mem_list))
+ list_del(&iwpbl->list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IW_MEMREG_TYPE_QP:
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (!list_empty(&ucontext->qp_reg_mem_list))
+ list_del(&iwpbl->list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40iw_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ */
+static int i40iw_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct ib_pd *ibpd = ib_mr->pd;
+ struct i40iw_pd *iwpd = to_iwpd(ibpd);
+ struct i40iw_mr *iwmr = to_iwmr(ib_mr);
+ struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
+ enum i40iw_status_code status;
+ struct i40iw_dealloc_stag_info *info;
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+ u32 stag_idx;
+
+ if (iwmr->region)
+ ib_umem_release(iwmr->region);
+
+ if (iwmr->type != IW_MEMREG_TYPE_MEM) {
+ if (ibpd->uobject) {
+ struct i40iw_ucontext *ucontext;
+
+ ucontext = to_ucontext(ibpd->uobject->context);
+ i40iw_del_memlist(iwmr, ucontext);
+ }
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ kfree(iwpbl->iwmr);
+ iwpbl->iwmr = NULL;
+ return 0;
+ }
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+
+ info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
+ info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
+ stag_idx = info->stag_idx;
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status)
+ i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
+ i40iw_rem_pdusecount(iwpd, iwdev);
+ i40iw_free_stag(iwdev, iwmr->stag);
+ if (iwpbl->pbl_allocated)
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+ kfree(iwmr);
+ return 0;
+}
+
+/**
+ * i40iw_show_rev
+ */
+static ssize_t i40iw_show_rev(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i40iw_ib_device *iwibdev = container_of(dev,
+ struct i40iw_ib_device,
+ ibdev.dev);
+ u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
+
+ return sprintf(buf, "%x\n", hw_rev);
+}
+
+/**
+ * i40iw_show_fw_ver
+ */
+static ssize_t i40iw_show_fw_ver(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 firmware_version = I40IW_FW_VERSION;
+
+ return sprintf(buf, "%u.%u\n", firmware_version,
+ (firmware_version & 0x000000ff));
+}
+
+/**
+ * i40iw_show_hca
+ */
+static ssize_t i40iw_show_hca(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "I40IW\n");
+}
+
+/**
+ * i40iw_show_board
+ */
+static ssize_t i40iw_show_board(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+
+static struct device_attribute *i40iw_dev_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id
+};
+
+/**
+ * i40iw_copy_sg_list - copy sg list for qp
+ * @sg_list: copied into sg_list
+ * @sgl: copy from sgl
+ * @num_sges: count of sg entries
+ */
+static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
+{
+ unsigned int i;
+
+ for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
+ sg_list[i].tag_off = sgl[i].addr;
+ sg_list[i].len = sgl[i].length;
+ sg_list[i].stag = sgl[i].lkey;
+ }
+}
+
+/**
+ * i40iw_post_send - kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int i40iw_post_send(struct ib_qp *ibqp,
+ struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_qp_uk *ukqp;
+ struct i40iw_post_sq_info info;
+ enum i40iw_status_code ret;
+ int err = 0;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)ibqp;
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ info.wr_id = (u64)(ib_wr->wr_id);
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ info.read_fence = true;
+
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = I40IW_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = I40IW_OP_TYPE_SEND;
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+ info.op.inline_send.len = ib_wr->sg_list[0].length;
+ ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ } else {
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
+ ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ }
+
+ if (ret)
+ err = -EIO;
+ break;
+ case IB_WR_RDMA_WRITE:
+ info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
+
+ if (ib_wr->send_flags & IB_SEND_INLINE) {
+ info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+ info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
+ } else {
+ info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
+ }
+
+ if (ret)
+ err = -EIO;
+ break;
+ case IB_WR_RDMA_READ:
+ info.op_type = I40IW_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
+ info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
+ info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
+ info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
+ ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false);
+ if (ret)
+ err = -EIO;
+ break;
+ default:
+ err = -EINVAL;
+ i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
+ ib_wr->opcode);
+ break;
+ }
+
+ if (err)
+ break;
+ ib_wr = ib_wr->next;
+ }
+
+ if (err)
+ *bad_wr = ib_wr;
+ else
+ ukqp->ops.iw_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return err;
+}
+
+/**
+ * i40iw_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int i40iw_post_recv(struct ib_qp *ibqp,
+ struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct i40iw_qp *iwqp;
+ struct i40iw_qp_uk *ukqp;
+ struct i40iw_post_rq_info post_recv;
+ struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
+ enum i40iw_status_code ret = 0;
+ unsigned long flags;
+
+ iwqp = (struct i40iw_qp *)ibqp;
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ memset(&post_recv, 0, sizeof(post_recv));
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
+ post_recv.sg_list = sg_list;
+ ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
+ if (ret) {
+ i40iw_pr_err(" post_recv err %d\n", ret);
+ *bad_wr = ib_wr;
+ goto out;
+ }
+ ib_wr = ib_wr->next;
+ }
+ out:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (ret)
+ return -ENOSYS;
+ return 0;
+}
+
+/**
+ * i40iw_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of entry completed
+ */
+static int i40iw_poll_cq(struct ib_cq *ibcq,
+ int num_entries,
+ struct ib_wc *entry)
+{
+ struct i40iw_cq *iwcq;
+ int cqe_count = 0;
+ struct i40iw_cq_poll_info cq_poll_info;
+ enum i40iw_status_code ret;
+ struct i40iw_cq_uk *ukcq;
+ struct i40iw_sc_qp *qp;
+ unsigned long flags;
+
+ iwcq = (struct i40iw_cq *)ibcq;
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ while (cqe_count < num_entries) {
+ ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+ if (ret == I40IW_ERR_QUEUE_EMPTY) {
+ break;
+ } else if (ret) {
+ if (!cqe_count)
+ cqe_count = -1;
+ break;
+ }
+ entry->wc_flags = 0;
+ entry->wr_id = cq_poll_info.wr_id;
+ if (!cq_poll_info.error)
+ entry->status = IB_WC_SUCCESS;
+ else
+ entry->status = IB_WC_WR_FLUSH_ERR;
+
+ switch (cq_poll_info.op_type) {
+ case I40IW_OP_TYPE_RDMA_WRITE:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
+ case I40IW_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case I40IW_OP_TYPE_SEND_SOL:
+ case I40IW_OP_TYPE_SEND_SOL_INV:
+ case I40IW_OP_TYPE_SEND_INV:
+ case I40IW_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case I40IW_OP_TYPE_REC:
+ entry->opcode = IB_WC_RECV;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ break;
+ }
+
+ entry->vendor_err =
+ cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
+ entry->ex.imm_data = 0;
+ qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
+ entry->qp = (struct ib_qp *)qp->back_qp;
+ entry->src_qp = cq_poll_info.qp_id;
+ entry->byte_len = cq_poll_info.bytes_xfered;
+ entry++;
+ cqe_count++;
+ }
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+ return cqe_count;
+}
+
+/**
+ * i40iw_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int i40iw_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct i40iw_cq *iwcq;
+ struct i40iw_cq_uk *ukcq;
+ enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+
+ iwcq = (struct i40iw_cq *)ibcq;
+ ukcq = &iwcq->sc_cq.cq_uk;
+ if (notify_flags == IB_CQ_NEXT_COMP)
+ cq_notify = IW_CQ_COMPL_EVENT;
+ ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+ return 0;
+}
+
+/**
+ * i40iw_port_immutable - return port's immutable data
+ * @ibdev: ib dev struct
+ * @port_num: port number
+ * @immutable: immutable data for the port return
+ */
+static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = i40iw_query_port(ibdev, port_num, &attr);
+
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+ return 0;
+}
+
+/**
+ * i40iw_get_protocol_stats - Populates the rdma_stats structure
+ * @ibdev: ib dev struct
+ * @stats: iw protocol stats struct
+ */
+static int i40iw_get_protocol_stats(struct ib_device *ibdev,
+ union rdma_protocol_stats *stats)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+ struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+ struct timespec curr_time;
+ static struct timespec last_rd_time = {0, 0};
+ enum i40iw_status_code status = 0;
+ unsigned long flags;
+
+ curr_time = current_kernel_time();
+ memset(stats, 0, sizeof(*stats));
+
+ if (dev->is_pf) {
+ spin_lock_irqsave(&devstat->stats_lock, flags);
+ devstat->ops.iw_hw_stat_read_all(devstat,
+ &devstat->hw_stats);
+ spin_unlock_irqrestore(&devstat->stats_lock, flags);
+ } else {
+ if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
+ status = i40iw_vchnl_vf_get_pe_stats(dev,
+ &devstat->hw_stats);
+
+ if (status)
+ return -ENOSYS;
+ }
+
+ stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
+ stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
+ stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
+ stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
+ hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
+ stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
+ stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
+ stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
+ stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
+ hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
+ stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
+ stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
+ stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
+
+ last_rd_time = curr_time;
+ return 0;
+}
+
+/**
+ * i40iw_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int i40iw_query_gid(struct ib_device *ibdev,
+ u8 port,
+ int index,
+ union ib_gid *gid)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+ memset(gid->raw, 0, sizeof(gid->raw));
+ ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+ return 0;
+}
+
+/**
+ * i40iw_modify_port Modify port properties
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @port_modify_mask: mask for port modifications
+ * @props: port properties
+ */
+static int i40iw_modify_port(struct ib_device *ibdev,
+ u8 port,
+ int port_modify_mask,
+ struct ib_port_modify *props)
+{
+ return 0;
+}
+
+/**
+ * i40iw_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int i40iw_query_pkey(struct ib_device *ibdev,
+ u8 port,
+ u16 index,
+ u16 *pkey)
+{
+ *pkey = 0;
+ return 0;
+}
+
+/**
+ * i40iw_create_ah - create address handle
+ * @ibpd: ptr of pd
+ * @ah_attr: address handle attributes
+ */
+static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
+ struct ib_ah_attr *attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * i40iw_destroy_ah - Destroy address handle
+ * @ah: pointer to address handle
+ */
+static int i40iw_destroy_ah(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+/**
+ * i40iw_init_rdma_device - initialization of iwarp device
+ * @iwdev: iwarp device
+ */
+static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
+{
+ struct i40iw_ib_device *iwibdev;
+ struct net_device *netdev = iwdev->netdev;
+ struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
+
+ iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
+ if (!iwibdev) {
+ i40iw_pr_err("iwdev == NULL\n");
+ return NULL;
+ }
+ strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
+ iwibdev->ibdev.owner = THIS_MODULE;
+ iwdev->iwibdev = iwibdev;
+ iwibdev->iwdev = iwdev;
+
+ iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
+ ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
+
+ iwibdev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND);
+ iwibdev->ibdev.phys_port_cnt = 1;
+ iwibdev->ibdev.num_comp_vectors = 1;
+ iwibdev->ibdev.dma_device = &pcidev->dev;
+ iwibdev->ibdev.dev.parent = &pcidev->dev;
+ iwibdev->ibdev.query_port = i40iw_query_port;
+ iwibdev->ibdev.modify_port = i40iw_modify_port;
+ iwibdev->ibdev.query_pkey = i40iw_query_pkey;
+ iwibdev->ibdev.query_gid = i40iw_query_gid;
+ iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
+ iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
+ iwibdev->ibdev.mmap = i40iw_mmap;
+ iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
+ iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
+ iwibdev->ibdev.create_qp = i40iw_create_qp;
+ iwibdev->ibdev.modify_qp = i40iw_modify_qp;
+ iwibdev->ibdev.query_qp = i40iw_query_qp;
+ iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
+ iwibdev->ibdev.create_cq = i40iw_create_cq;
+ iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
+ iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
+ iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
+ iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
+ iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+ iwibdev->ibdev.query_device = i40iw_query_device;
+ iwibdev->ibdev.create_ah = i40iw_create_ah;
+ iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
+ iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
+ if (!iwibdev->ibdev.iwcm) {
+ ib_dealloc_device(&iwibdev->ibdev);
+ i40iw_pr_err("iwcm == NULL\n");
+ return NULL;
+ }
+
+ iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
+ iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
+ iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
+ iwibdev->ibdev.iwcm->connect = i40iw_connect;
+ iwibdev->ibdev.iwcm->accept = i40iw_accept;
+ iwibdev->ibdev.iwcm->reject = i40iw_reject;
+ iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
+ iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
+ memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
+ sizeof(iwibdev->ibdev.iwcm->ifname));
+ iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
+ iwibdev->ibdev.poll_cq = i40iw_poll_cq;
+ iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
+ iwibdev->ibdev.post_send = i40iw_post_send;
+ iwibdev->ibdev.post_recv = i40iw_post_recv;
+
+ return iwibdev;
+}
+
+/**
+ * i40iw_port_ibevent - indicate port event
+ * @iwdev: iwarp device
+ */
+void i40iw_port_ibevent(struct i40iw_device *iwdev)
+{
+ struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
+ struct ib_event event;
+
+ event.device = &iwibdev->ibdev;
+ event.element.port_num = 1;
+ event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * i40iw_unregister_rdma_device - unregister of iwarp from IB
+ * @iwibdev: rdma device ptr
+ */
+static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
+ device_remove_file(&iwibdev->ibdev.dev,
+ i40iw_dev_attributes[i]);
+ ib_unregister_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_destroy_rdma_device - destroy rdma device and free resources
+ * @iwibdev: IB device ptr
+ */
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+ if (!iwibdev)
+ return;
+
+ i40iw_unregister_rdma_device(iwibdev);
+ kfree(iwibdev->ibdev.iwcm);
+ iwibdev->ibdev.iwcm = NULL;
+ ib_dealloc_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_register_rdma_device - register iwarp device to IB
+ * @iwdev: iwarp device
+ */
+int i40iw_register_rdma_device(struct i40iw_device *iwdev)
+{
+ int i, ret;
+ struct i40iw_ib_device *iwibdev;
+
+ iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
+ if (!iwdev->iwibdev)
+ return -ENOSYS;
+ iwibdev = iwdev->iwibdev;
+
+ ret = ib_register_device(&iwibdev->ibdev, NULL);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
+ ret =
+ device_create_file(&iwibdev->ibdev.dev,
+ i40iw_dev_attributes[i]);
+ if (ret) {
+ while (i > 0) {
+ i--;
+ device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
+ }
+ ib_unregister_device(&iwibdev->ibdev);
+ goto error;
+ }
+ }
+ return 0;
+error:
+ kfree(iwdev->iwibdev->ibdev.iwcm);
+ iwdev->iwibdev->ibdev.iwcm = NULL;
+ ib_dealloc_device(&iwdev->iwibdev->ibdev);
+ return -ENOSYS;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
new file mode 100644
index 000000000000..1101f77080e6
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -0,0 +1,173 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VERBS_H
+#define I40IW_VERBS_H
+
+struct i40iw_ucontext {
+ struct ib_ucontext ibucontext;
+ struct i40iw_device *iwdev;
+ struct list_head cq_reg_mem_list;
+ spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
+ struct list_head qp_reg_mem_list;
+ spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+};
+
+struct i40iw_pd {
+ struct ib_pd ibpd;
+ struct i40iw_sc_pd sc_pd;
+ atomic_t usecount;
+};
+
+struct i40iw_hmc_pble {
+ union {
+ u32 idx;
+ dma_addr_t addr;
+ };
+};
+
+struct i40iw_cq_mr {
+ struct i40iw_hmc_pble cq_pbl;
+ dma_addr_t shadow;
+};
+
+struct i40iw_qp_mr {
+ struct i40iw_hmc_pble sq_pbl;
+ struct i40iw_hmc_pble rq_pbl;
+ dma_addr_t shadow;
+ struct page *sq_page;
+};
+
+struct i40iw_pbl {
+ struct list_head list;
+ union {
+ struct i40iw_qp_mr qp_mr;
+ struct i40iw_cq_mr cq_mr;
+ };
+
+ bool pbl_allocated;
+ u64 user_base;
+ struct i40iw_pble_alloc pble_alloc;
+ struct i40iw_mr *iwmr;
+};
+
+#define MAX_SAVE_PAGE_ADDRS 4
+struct i40iw_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ struct ib_fmr ibfmr;
+ };
+ struct ib_umem *region;
+ u16 type;
+ u32 page_cnt;
+ u32 stag;
+ u64 length;
+ u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
+ struct i40iw_pbl iwpbl;
+};
+
+struct i40iw_cq {
+ struct ib_cq ibcq;
+ struct i40iw_sc_cq sc_cq;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+ bool user_mode;
+ u32 polled_completions;
+ u32 cq_mem_size;
+ struct i40iw_dma_mem kmem;
+ spinlock_t lock; /* for poll cq */
+ struct i40iw_pbl *iwpbl;
+};
+
+struct disconn_work {
+ struct work_struct work;
+ struct i40iw_qp *iwqp;
+};
+
+struct iw_cm_id;
+struct ietf_mpa_frame;
+struct i40iw_ud_file;
+
+struct i40iw_qp_kmode {
+ struct i40iw_dma_mem dma_mem;
+ u64 *wrid_mem;
+};
+
+struct i40iw_qp {
+ struct ib_qp ibqp;
+ struct i40iw_sc_qp sc_qp;
+ struct i40iw_device *iwdev;
+ struct i40iw_cq *iwscq;
+ struct i40iw_cq *iwrcq;
+ struct i40iw_pd *iwpd;
+ struct i40iw_qp_host_ctx_info ctx_info;
+ struct i40iwarp_offload_info iwarp_info;
+ void *allocated_buffer;
+ atomic_t refcount;
+ struct iw_cm_id *cm_id;
+ void *cm_node;
+ struct ib_mr *lsmm_mr;
+ struct work_struct work;
+ enum ib_qp_state ibqp_state;
+ u32 iwarp_state;
+ u32 qp_mem_size;
+ u32 last_aeq;
+ atomic_t close_timer_started;
+ spinlock_t lock; /* for post work requests */
+ struct i40iw_qp_context *iwqp_context;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ u8 active_conn:1;
+ u8 user_mode:1;
+ u8 hte_added:1;
+ u8 flush_issued:1;
+ u8 destroyed:1;
+ u8 sig_all:1;
+ u8 pau_mode:1;
+ u8 rsvd:1;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
+ u8 hw_iwarp_state;
+ u8 hw_tcp_state;
+ struct i40iw_qp_kmode kqp;
+ struct i40iw_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_dma_mem q2_ctx_mem;
+ struct i40iw_dma_mem ietf_mem;
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c
new file mode 100644
index 000000000000..cb0f18340e14
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.c
@@ -0,0 +1,85 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+
+/**
+ * i40iw_manage_vf_pble_bp - manage vf pble
+ * @cqp: cqp for cqp' sq wqe
+ * @info: pble info
+ * @scratch: pointer for completion
+ * @post_sq: to post and ring
+ */
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_manage_vf_pble_info *info,
+ u64 scratch,
+ bool post_sq)
+{
+ u64 *wqe;
+ u64 temp, header, pd_pl_pba = 0;
+
+ wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return I40IW_ERR_RING_FULL;
+
+ temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
+ LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
+ LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
+ set_64bit_val(wqe, 16, temp);
+
+ header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
+ LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
+ LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+ set_64bit_val(wqe, 24, header);
+
+ pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
+ set_64bit_val(wqe, 32, pd_pl_pba);
+
+ i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
+
+ if (post_sq)
+ i40iw_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
+ i40iw_manage_vf_pble_bp
+};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h
new file mode 100644
index 000000000000..f649f3a62e13
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.h
@@ -0,0 +1,62 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VF_H
+#define I40IW_VF_H
+
+struct i40iw_sc_cqp;
+
+struct i40iw_manage_vf_pble_info {
+ u32 sd_index;
+ u16 first_pd_index;
+ u16 pd_entry_cnt;
+ u8 inv_pd_ent;
+ u64 pd_pl_pba;
+};
+
+struct i40iw_vf_cqp_ops {
+ enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
+ struct i40iw_manage_vf_pble_info *,
+ u64,
+ bool);
+};
+
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+ struct i40iw_manage_vf_pble_info *info,
+ u64 scratch,
+ bool post_sq);
+
+extern struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
new file mode 100644
index 000000000000..6b68f7890b76
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -0,0 +1,748 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * vchnl_vf_send_get_ver_req - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_add_hmc_objs_req - Add HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
+ add_hmc_obj->obj_type = (u16)rsrc_type;
+ add_hmc_obj->start_index = start_index;
+ add_hmc_obj->obj_count = rsrc_count;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_vf_send_del_hmc_objs_req - del HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ * @ rsrc_type - resource type to delete
+ * @ start_index - starting index for resource
+ * @ rsrc_count - number of resource type to delete
+ */
+static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
+ struct i40iw_virtchnl_req *vchnl_req,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+ if (!dev->vchnl_up)
+ return ret_code;
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+ memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+ memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+ vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+ vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
+ vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
+ add_hmc_obj->obj_type = (u16)rsrc_type;
+ add_hmc_obj->start_index = start_index;
+ add_hmc_obj->obj_count = rsrc_count;
+ ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+ return ret_code;
+}
+
+/**
+ * vchnl_pf_send_get_ver_resp - Send channel version to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ u16 hmc_fcn)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ * @hw_stats: HW Stats struct
+ */
+
+static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ struct i40iw_dev_hw_stats hw_stats)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(*resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+ *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = hw_stats;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_error_resp - Send an error response to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
+ struct i40iw_virtchnl_op_buf *vchnl_msg,
+ u16 op_ret_code)
+{
+ enum i40iw_status_code ret_code;
+ u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+ memset(resp_buffer, 0, sizeof(resp_buffer));
+ vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+ vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+ vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
+ ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
+ * @cqp_req_param: CQP Request param value
+ * @not_used: unused CQP callback parameter
+ */
+static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
+ struct i40iw_ccq_cqe_info *cqe_info)
+{
+ struct i40iw_vfdev *vf_dev = callback_param;
+ struct i40iw_virt_mem vf_dev_mem;
+
+ if (cqe_info->error) {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n",
+ cqe_info->maj_err_code, cqe_info->min_err_code);
+ dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
+ vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
+ (u16)I40IW_ERR_CQP_COMPL_ERROR);
+ vf_dev_mem.va = vf_dev;
+ vf_dev_mem.size = sizeof(*vf_dev);
+ i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "CQP Completion Operation Return information = 0x%08x\n",
+ cqe_info->op_ret_val);
+ vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_hmc_fcn_resp(dev,
+ vf_dev->vf_id,
+ &vf_dev->vf_msg_buffer.vchnl_msg,
+ vf_dev->pmf_index);
+ }
+}
+
+/**
+ * pf_add_hmc_obj - Callback for Add HMC Object
+ * @vf_dev: pointer to the VF Device
+ */
+static void pf_add_hmc_obj_callback(void *work_vf_dev)
+{
+ struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+ struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+ struct i40iw_hmc_create_obj_info info;
+ struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+ enum i40iw_status_code ret_code;
+
+ if (!vf_dev->pf_hmc_initialized) {
+ ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
+ if (ret_code)
+ goto add_out;
+ vf_dev->pf_hmc_initialized = true;
+ }
+
+ add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.is_pf = false;
+ info.rsrc_type = (u32)add_hmc_obj->obj_type;
+ info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
+ info.start_idx = add_hmc_obj->start_index;
+ info.count = add_hmc_obj->obj_count;
+ i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+ "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n",
+ info.count, info.rsrc_type);
+ ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
+ if (!ret_code)
+ vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
+add_out:
+ vf_dev->msg_count--;
+ vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * pf_del_hmc_obj_callback - Callback for delete HMC Object
+ * @work_vf_dev: pointer to the VF Device
+ */
+static void pf_del_hmc_obj_callback(void *work_vf_dev)
+{
+ struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+ struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+ struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+ struct i40iw_hmc_del_obj_info info;
+ struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
+ enum i40iw_status_code ret_code = I40IW_SUCCESS;
+
+ if (!vf_dev->pf_hmc_initialized)
+ goto del_out;
+
+ del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+ memset(&info, 0, sizeof(info));
+ info.hmc_info = hmc_info;
+ info.is_pf = false;
+ info.rsrc_type = (u32)del_hmc_obj->obj_type;
+ info.start_idx = del_hmc_obj->start_index;
+ info.count = del_hmc_obj->obj_count;
+ i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+ "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n",
+ info.count, info.rsrc_type);
+ ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
+del_out:
+ vf_dev->msg_count--;
+ vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
+ struct i40iw_vfdev *vf_dev = NULL;
+ struct i40iw_hmc_fcn_info hmc_fcn_info;
+ u16 iw_vf_idx;
+ u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
+ struct i40iw_virt_mem vf_dev_mem;
+ struct i40iw_virtchnl_work_info work_info;
+ struct i40iw_dev_pestat *devstat;
+ enum i40iw_status_code ret_code;
+ unsigned long flags;
+
+ if (!dev || !msg || !len)
+ return I40IW_ERR_PARAM;
+
+ if (!dev->vchnl_up)
+ return I40IW_ERR_NOT_READY;
+ if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
+ if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
+ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ else
+ vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+ return I40IW_SUCCESS;
+ }
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
+ iw_vf_idx++) {
+ if (!dev->vf_dev[iw_vf_idx]) {
+ if (first_avail_iw_vf ==
+ I40IW_MAX_PE_ENABLED_VF_COUNT)
+ first_avail_iw_vf = iw_vf_idx;
+ continue;
+ }
+ if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
+ vf_dev = dev->vf_dev[iw_vf_idx];
+ break;
+ }
+ }
+ if (vf_dev) {
+ if (!vf_dev->msg_count) {
+ vf_dev->msg_count++;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u already has a channel message in progress.\n",
+ vf_id);
+ return I40IW_SUCCESS;
+ }
+ }
+ switch (vchnl_msg->iw_op_code) {
+ case I40IW_VCHNL_OP_GET_HMC_FCN:
+ if (!vf_dev &&
+ (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+ ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
+ (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
+ if (!ret_code) {
+ vf_dev = vf_dev_mem.va;
+ vf_dev->stats_initialized = false;
+ vf_dev->pf_dev = dev;
+ vf_dev->msg_count = 1;
+ vf_dev->vf_id = vf_id;
+ vf_dev->iw_vf_idx = first_avail_iw_vf;
+ vf_dev->pf_hmc_initialized = false;
+ vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "vf_dev %p, hmc_info %p, hmc_obj %p\n",
+ vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
+ dev->vf_dev[first_avail_iw_vf] = vf_dev;
+ iw_vf_idx = first_avail_iw_vf;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u Unable to allocate a VF device structure.\n",
+ vf_id);
+ vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
+ return I40IW_SUCCESS;
+ }
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
+ hmc_fcn_info.vf_id = vf_id;
+ hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
+ hmc_fcn_info.cqp_callback_param = vf_dev;
+ hmc_fcn_info.free_fcn = false;
+ ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u error CQP HMC Function operation.\n",
+ vf_id);
+ ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
+ if (ret_code)
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "VF%u - i40iw_device_init_pestat failed\n",
+ vf_id);
+ vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
+ (u8)vf_dev->pmf_index,
+ dev->hw, false);
+ vf_dev->stats_initialized = true;
+ } else {
+ if (vf_dev) {
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
+ } else {
+ vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
+ (u16)I40IW_ERR_NO_MEMORY);
+ }
+ }
+ break;
+ case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ work_info.worker_vf_dev = vf_dev;
+ work_info.callback_fcn = pf_add_hmc_obj_callback;
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+ break;
+ case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ work_info.worker_vf_dev = vf_dev;
+ work_info.callback_fcn = pf_del_hmc_obj_callback;
+ memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+ i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+ break;
+ case I40IW_VCHNL_OP_GET_STATS:
+ if (!vf_dev)
+ return I40IW_ERR_BAD_PTR;
+ devstat = &vf_dev->dev_pestat;
+ spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+ devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
+ spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+ vf_dev->msg_count--;
+ vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
+ break;
+ default:
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
+ vchnl_msg->iw_op_code);
+ vchnl_pf_send_error_resp(dev, vf_id,
+ vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
+ }
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_recv_vf - Receive VF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len)
+{
+ struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
+ struct i40iw_virtchnl_req *vchnl_req;
+
+ vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
+ vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
+ if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
+ if (vchnl_req->parm_len && vchnl_req->parm)
+ memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: Got response, data size %u\n", __func__,
+ vchnl_req->parm_len);
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s: error length on response, Got %u, expected %u\n", __func__,
+ len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
+ }
+
+ return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_vf_get_ver - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_ver: Virtual channel message version pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+ u32 *vchnl_ver)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = vchnl_ver;
+ vchnl_req.parm_len = sizeof(*vchnl_ver);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
+ * @dev: IWARP device pointer
+ * @hmc_fcn: HMC function index pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+ u16 *hmc_fcn)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = hmc_fcn;
+ vchnl_req.parm_len = sizeof(*hmc_fcn);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the objects to be added
+ * @rsrc_count: Number of resources to be added
+ */
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
+ &vchnl_req,
+ rsrc_type,
+ start_index,
+ rsrc_count);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_del_hmc_obj - del HMC obj
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the object to delete
+ * @rsrc_count: Number of resources to be delete
+ */
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
+ &vchnl_req,
+ rsrc_type,
+ start_index,
+ rsrc_count);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_pe_stats - Get PE stats
+ * @dev: IWARP device pointer
+ * @hw_stats: HW stats struct
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+ struct i40iw_dev_hw_stats *hw_stats)
+{
+ struct i40iw_virtchnl_req vchnl_req;
+ enum i40iw_status_code ret_code;
+
+ memset(&vchnl_req, 0, sizeof(vchnl_req));
+ vchnl_req.dev = dev;
+ vchnl_req.parm = hw_stats;
+ vchnl_req.parm_len = sizeof(*hw_stats);
+ vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+ ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
+ if (!ret_code) {
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (!ret_code)
+ ret_code = vchnl_req.ret_code;
+ else
+ dev->vchnl_up = false;
+ } else {
+ i40iw_debug(dev, I40IW_DEBUG_VIRT,
+ "%s Send message failed 0x%0x\n", __func__, ret_code);
+ }
+ return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
new file mode 100644
index 000000000000..24886ef08293
--- /dev/null
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VIRTCHNL_H
+#define I40IW_VIRTCHNL_H
+
+#include "i40iw_hmc.h"
+
+#pragma pack(push, 1)
+
+struct i40iw_virtchnl_op_buf {
+ u16 iw_op_code;
+ u16 iw_op_ver;
+ u16 iw_chnl_buf_len;
+ u16 rsvd;
+ u64 iw_chnl_op_ctx;
+ /* Member alignment MUST be maintained above this location */
+ u8 iw_chnl_buf[1];
+};
+
+struct i40iw_virtchnl_resp_buf {
+ u64 iw_chnl_op_ctx;
+ u16 iw_chnl_buf_len;
+ s16 iw_op_ret_code;
+ /* Member alignment MUST be maintained above this location */
+ u16 rsvd[2];
+ u8 iw_chnl_buf[1];
+};
+
+enum i40iw_virtchnl_ops {
+ I40IW_VCHNL_OP_GET_VER = 0,
+ I40IW_VCHNL_OP_GET_HMC_FCN,
+ I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
+ I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
+ I40IW_VCHNL_OP_GET_STATS
+};
+
+#define I40IW_VCHNL_OP_GET_VER_V0 0
+#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
+#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_GET_STATS_V0 0
+#define I40IW_VCHNL_CHNL_VER_V0 0
+
+struct i40iw_dev_hw_stats;
+
+struct i40iw_virtchnl_hmc_obj_range {
+ u16 obj_type;
+ u16 rsvd;
+ u32 start_index;
+ u32 obj_count;
+};
+
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len);
+
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+ u32 vf_id,
+ u8 *msg,
+ u16 len);
+
+struct i40iw_virtchnl_req {
+ struct i40iw_sc_dev *dev;
+ struct i40iw_virtchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ s16 ret_code;
+};
+
+#pragma pack(pop)
+
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+ u32 *vchnl_ver);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+ u16 *hmc_fcn);
+
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+ enum i40iw_hmc_rsrc_type rsrc_type,
+ u32 start_index,
+ u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+ struct i40iw_dev_hw_stats *hw_stats);
+#endif
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index fc01deac1d3c..db4aa13ebae0 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 21cb41a60fe8..c74ef2620b85 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
if (status) {
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
- rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
+ rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -416,7 +416,7 @@ next_entry:
be64_to_cpu((__force __be64)rec->guid_indexes),
be64_to_cpu((__force __be64)applied_guid_indexes),
be64_to_cpu((__force __be64)declined_guid_indexes));
- rec->time_to_run = ktime_get_real_ns() +
+ rec->time_to_run = ktime_get_boot_ns() +
resched_delay_sec * NSEC_PER_SEC;
} else {
rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
}
}
if (resched_delay_sec) {
- u64 curr_time = ktime_get_real_ns();
+ u64 curr_time = ktime_get_boot_ns();
*resched_delay_sec = (low_record_time < curr_time) ? 0 :
div_u64((low_record_time - curr_time), NSEC_PER_SEC);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1c7ab6cabbb8..f014eaf5969b 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/if_vlan.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/devlink.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -1643,6 +1644,56 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
return err;
}
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+ struct ib_flow_attr *flow_attr,
+ enum mlx4_net_trans_promisc_mode *type)
+{
+ int err = 0;
+
+ if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+ (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+ (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_attr->num_of_specs == 0) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ union ib_flow_spec *ib_spec;
+
+ ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ if (ib_spec->type != IB_FLOW_SPEC_ETH)
+ return -EINVAL;
+
+ /* if all is zero than MC and UC */
+ if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+ type[0] = MLX4_FS_MC_SNIFFER;
+ type[1] = MLX4_FS_UC_SNIFFER;
+ } else {
+ u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+ ib_spec->eth.mask.dst_mac[1],
+ ib_spec->eth.mask.dst_mac[2],
+ ib_spec->eth.mask.dst_mac[3],
+ ib_spec->eth.mask.dst_mac[4],
+ ib_spec->eth.mask.dst_mac[5]};
+
+ /* Above xor was only on MC bit, non empty mask is valid
+ * only if this bit is set and rest are zero.
+ */
+ if (!is_zero_ether_addr(&mac[0]))
+ return -EINVAL;
+
+ if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+ type[0] = MLX4_FS_MC_SNIFFER;
+ else
+ type[0] = MLX4_FS_UC_SNIFFER;
+ }
+ }
+
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1653,6 +1704,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+ (flow_attr->type != IB_FLOW_ATTR_NORMAL))
+ return ERR_PTR(-EOPNOTSUPP);
+
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -1663,7 +1718,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
switch (flow_attr->type) {
case IB_FLOW_ATTR_NORMAL:
- type[0] = MLX4_FS_REGULAR;
+ /* If dont trap flag (continue match) is set, under specific
+ * condition traffic be replicated to given qp,
+ * without stealing it
+ */
+ if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+ err = mlx4_ib_add_dont_trap_rule(dev,
+ flow_attr,
+ type);
+ if (err)
+ goto err_free;
+ } else {
+ type[0] = MLX4_FS_REGULAR;
+ }
break;
case IB_FLOW_ATTR_ALL_DEFAULT:
@@ -1675,8 +1742,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
break;
case IB_FLOW_ATTR_SNIFFER:
- type[0] = MLX4_FS_UC_SNIFFER;
- type[1] = MLX4_FS_MC_SNIFFER;
+ type[0] = MLX4_FS_MIRROR_RX_PORT;
+ type[1] = MLX4_FS_MIRROR_SX_PORT;
break;
default:
@@ -2519,6 +2586,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
ibdev->ib_active = true;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
+ &ibdev->ib_dev);
if (mlx4_is_mfunc(ibdev->dev))
init_pkeys(ibdev);
@@ -2643,7 +2713,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
{
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
+ int i;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
ibdev->ib_active = false;
flush_workqueue(wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 52ce7b000044..1eca01cebe51 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -711,7 +711,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 242b94ec105b..ce0b5aa8eb9b 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/slab.h>
+#include <rdma/ib_user_verbs.h>
#include "mlx4_ib.h"
@@ -334,7 +335,8 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
return 0;
}
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mw *mw;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 27a70159e2ea..7493a83acd28 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fd1de31e0611..a00ba4418de9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -207,7 +207,10 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
+ wc->wc_flags = IB_WC_IP_CSUM_OK;
+ if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))))
+ wc->wc_flags = 0;
break;
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
@@ -431,7 +434,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_core_qp *mqp;
struct mlx5_ib_wq *wq;
struct mlx5_sig_err_cqe *sig_err_cqe;
- struct mlx5_core_mr *mmr;
+ struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
uint8_t opcode;
uint32_t qpn;
@@ -536,17 +539,17 @@ repoll:
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev->priv.mr_table.lock);
- mmr = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- if (unlikely(!mmr)) {
- read_unlock(&dev->mdev->priv.mr_table.lock);
+ read_lock(&dev->mdev->priv.mkey_table.lock);
+ mmkey = __mlx5_mr_lookup(dev->mdev,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ if (unlikely(!mmkey)) {
+ read_unlock(&dev->mdev->priv.mkey_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
}
- mr = to_mibmr(mmr);
+ mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
mr->sig->sigerr_count++;
@@ -558,25 +561,51 @@ repoll:
mr->sig->err_item.expected,
mr->sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mr_table.lock);
+ read_unlock(&dev->mdev->priv.mkey_table.lock);
goto repoll;
}
return 0;
}
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+ int npolled = 0;
+
+ list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+ if (npolled >= num_entries)
+ break;
+
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+ }
+
+ return npolled;
+}
+
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL;
unsigned long flags;
+ int soft_polled = 0;
int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags);
- for (npolled = 0; npolled < num_entries; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
if (err)
break;
}
@@ -587,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
- return npolled;
+ return soft_polled + npolled;
else
return err;
}
@@ -595,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, irq_flags);
+ if (cq->notify_flags != IB_CQ_NEXT_COMP)
+ cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
- mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+ ret = 1;
+ spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+ mlx5_cq_arm(&cq->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
uar_page,
MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
to_mcq(ibcq)->mcq.cons_index);
- return 0;
+ return ret;
}
static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@@ -757,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
mlx5_db_free(dev->mdev, &cq->db);
}
+static void notify_soft_wc_handler(struct work_struct *work)
+{
+ struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+ notify_work);
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -807,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
&index, &inlen);
if (err)
goto err_create;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
cq->cqe_size = cqe_size;
@@ -832,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
+ INIT_LIST_HEAD(&cq->wc_list);
+
if (context)
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
err = -EFAULT;
@@ -1219,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
cq = to_mcq(ibcq);
return cq->cqe_size;
}
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+ struct mlx5_ib_wc *soft_wc;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ unsigned long flags;
+
+ soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+ if (!soft_wc)
+ return -ENOMEM;
+
+ soft_wc->wc = *wc;
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&soft_wc->list, &cq->wc_list);
+ if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+ wc->status != IB_WC_SUCCESS) {
+ cq->notify_flags = 0;
+ schedule_work(&cq->notify_work);
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
new file mode 100644
index 000000000000..53e03c8ede79
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_gsi_wr {
+ struct ib_cqe cqe;
+ struct ib_wc wc;
+ int send_flags;
+ bool completed:1;
+};
+
+struct mlx5_ib_gsi_qp {
+ struct ib_qp ibqp;
+ struct ib_qp *rx_qp;
+ u8 port_num;
+ struct ib_qp_cap cap;
+ enum ib_sig_type sq_sig_type;
+ /* Serialize qp state modifications */
+ struct mutex mutex;
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+ * outstanding_wrs array and indices.
+ */
+ spinlock_t lock;
+ struct ib_qp **tx_qps;
+};
+
+static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
+{
+ return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
+}
+
+static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+}
+
+static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
+{
+ return ++index % gsi->cap.max_send_wr;
+}
+
+#define for_each_outstanding_wr(gsi, index) \
+ for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
+ index = next_outstanding(gsi, index))
+
+/* Call with gsi->lock locked */
+static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
+ struct mlx5_ib_gsi_wr *wr;
+ u32 index;
+
+ for_each_outstanding_wr(gsi, index) {
+ wr = &gsi->outstanding_wrs[index];
+
+ if (!wr->completed)
+ break;
+
+ if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
+ wr->send_flags & IB_SEND_SIGNALED)
+ WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
+
+ wr->completed = false;
+ }
+
+ gsi->outstanding_ci = index;
+}
+
+static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
+ struct mlx5_ib_gsi_wr *wr =
+ container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+ u64 wr_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ wr->completed = true;
+ wr_id = wr->wc.wr_id;
+ wr->wc = *wc;
+ wr->wc.wr_id = wr_id;
+ wr->wc.qp = &gsi->ibqp;
+
+ generate_completions(gsi);
+ spin_unlock_irqrestore(&gsi->lock, flags);
+}
+
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_gsi_qp *gsi;
+ struct ib_qp_init_attr hw_init_attr = *init_attr;
+ const u8 port_num = init_attr->port_num;
+ const int num_pkeys = pd->device->attrs.max_pkeys;
+ const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+ int ret;
+
+ mlx5_ib_dbg(dev, "creating GSI QP\n");
+
+ if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
+ mlx5_ib_warn(dev,
+ "invalid port number %d during GSI QP creation\n",
+ port_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+ if (!gsi)
+ return ERR_PTR(-ENOMEM);
+
+ gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
+ if (!gsi->tx_qps) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
+ sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
+ if (!gsi->outstanding_wrs) {
+ ret = -ENOMEM;
+ goto err_free_tx;
+ }
+
+ mutex_init(&gsi->mutex);
+
+ mutex_lock(&dev->devr.mutex);
+
+ if (dev->devr.ports[port_num - 1].gsi) {
+ mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+ port_num);
+ ret = -EBUSY;
+ goto err_free_wrs;
+ }
+ gsi->num_qps = num_qps;
+ spin_lock_init(&gsi->lock);
+
+ gsi->cap = init_attr->cap;
+ gsi->sq_sig_type = init_attr->sq_sig_type;
+ gsi->ibqp.qp_num = 1;
+ gsi->port_num = port_num;
+
+ gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(gsi->cq)) {
+ mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
+ PTR_ERR(gsi->cq));
+ ret = PTR_ERR(gsi->cq);
+ goto err_free_wrs;
+ }
+
+ hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
+ hw_init_attr.send_cq = gsi->cq;
+ if (num_qps) {
+ hw_init_attr.cap.max_send_wr = 0;
+ hw_init_attr.cap.max_send_sge = 0;
+ hw_init_attr.cap.max_inline_data = 0;
+ }
+ gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+ if (IS_ERR(gsi->rx_qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
+ PTR_ERR(gsi->rx_qp));
+ ret = PTR_ERR(gsi->rx_qp);
+ goto err_destroy_cq;
+ }
+
+ dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
+
+ mutex_unlock(&dev->devr.mutex);
+
+ return &gsi->ibqp;
+
+err_destroy_cq:
+ ib_free_cq(gsi->cq);
+err_free_wrs:
+ mutex_unlock(&dev->devr.mutex);
+ kfree(gsi->outstanding_wrs);
+err_free_tx:
+ kfree(gsi->tx_qps);
+err_free:
+ kfree(gsi);
+ return ERR_PTR(ret);
+}
+
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ const int port_num = gsi->port_num;
+ int qp_index;
+ int ret;
+
+ mlx5_ib_dbg(dev, "destroying GSI QP\n");
+
+ mutex_lock(&dev->devr.mutex);
+ ret = ib_destroy_qp(gsi->rx_qp);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
+ ret);
+ mutex_unlock(&dev->devr.mutex);
+ return ret;
+ }
+ dev->devr.ports[port_num - 1].gsi = NULL;
+ mutex_unlock(&dev->devr.mutex);
+ gsi->rx_qp = NULL;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
+ if (!gsi->tx_qps[qp_index])
+ continue;
+ WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
+ gsi->tx_qps[qp_index] = NULL;
+ }
+
+ ib_free_cq(gsi->cq);
+
+ kfree(gsi->outstanding_wrs);
+ kfree(gsi->tx_qps);
+ kfree(gsi);
+
+ return 0;
+}
+
+static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_pd *pd = gsi->rx_qp->pd;
+ struct ib_qp_init_attr init_attr = {
+ .event_handler = gsi->rx_qp->event_handler,
+ .qp_context = gsi->rx_qp->qp_context,
+ .send_cq = gsi->cq,
+ .recv_cq = gsi->rx_qp->recv_cq,
+ .cap = {
+ .max_send_wr = gsi->cap.max_send_wr,
+ .max_send_sge = gsi->cap.max_send_sge,
+ .max_inline_data = gsi->cap.max_inline_data,
+ },
+ .sq_sig_type = gsi->sq_sig_type,
+ .qp_type = IB_QPT_UD,
+ .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+ };
+
+ return ib_create_qp(pd, &init_attr);
+}
+
+static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
+ u16 qp_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct ib_qp_attr attr;
+ int mask;
+ int ret;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
+ attr.qp_state = IB_QPS_INIT;
+ attr.pkey_index = qp_index;
+ attr.qkey = IB_QP1_QKEY;
+ attr.port_num = gsi->port_num;
+ ret = ib_modify_qp(qp, &attr, mask);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTS;
+ attr.sq_psn = 0;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
+{
+ struct ib_device *device = gsi->rx_qp->device;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct ib_qp *qp;
+ unsigned long flags;
+ u16 pkey;
+ int ret;
+
+ ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ if (!pkey) {
+ mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ qp = gsi->tx_qps[qp_index];
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ if (qp) {
+ mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ qp = create_gsi_ud_qp(gsi);
+ if (IS_ERR(qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
+ PTR_ERR(qp));
+ return;
+ }
+
+ ret = modify_to_rts(gsi, qp, qp_index);
+ if (ret)
+ goto err_destroy_qp;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ WARN_ON_ONCE(gsi->tx_qps[qp_index]);
+ gsi->tx_qps[qp_index] = qp;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+
+ return;
+
+err_destroy_qp:
+ WARN_ON_ONCE(qp);
+}
+
+static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
+{
+ u16 qp_index;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+}
+
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ int ret;
+
+ mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
+
+ mutex_lock(&gsi->mutex);
+ ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
+ goto unlock;
+ }
+
+ if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
+ setup_qps(gsi);
+
+unlock:
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ int ret;
+
+ mutex_lock(&gsi->mutex);
+ ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
+ qp_init_attr->cap = gsi->cap;
+ mutex_unlock(&gsi->mutex);
+
+ return ret;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+ struct ib_ud_wr *wr, struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_gsi_wr *gsi_wr;
+
+ if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
+ mlx5_ib_warn(dev, "no available GSI work request.\n");
+ return -ENOMEM;
+ }
+
+ gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
+ gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
+
+ if (!wc) {
+ memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+ gsi_wr->wc.pkey_index = wr->pkey_index;
+ gsi_wr->wc.wr_id = wr->wr.wr_id;
+ } else {
+ gsi_wr->wc = *wc;
+ gsi_wr->completed = true;
+ }
+
+ gsi_wr->cqe.done = &handle_single_completion;
+ wr->wr.wr_cqe = &gsi_wr->cqe;
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
+ struct ib_ud_wr *wr)
+{
+ struct ib_wc wc = {
+ { .wr_id = wr->wr.wr_id },
+ .status = IB_WC_SUCCESS,
+ .opcode = IB_WC_SEND,
+ .qp = &gsi->ibqp,
+ };
+ int ret;
+
+ ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
+ if (ret)
+ return ret;
+
+ generate_completions(gsi);
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ int qp_index = wr->pkey_index;
+
+ if (!mlx5_ib_deth_sqpn_cap(dev))
+ return gsi->rx_qp;
+
+ if (qp_index >= gsi->num_qps)
+ return NULL;
+
+ return gsi->tx_qps[qp_index];
+}
+
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+ struct ib_qp *tx_qp;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct ib_ud_wr cur_wr = *ud_wr(wr);
+
+ cur_wr.wr.next = NULL;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ tx_qp = get_tx_qp(gsi, &cur_wr);
+ if (!tx_qp) {
+ ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
+ if (ret)
+ goto err;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ continue;
+ }
+
+ ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
+ if (ret)
+ goto err;
+
+ ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
+ if (ret) {
+ /* Undo the effect of adding the outstanding wr */
+ gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
+ gsi->cap.max_send_wr;
+ goto err;
+ }
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ }
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ *bad_wr = wr;
+ return ret;
+}
+
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+
+ return ib_post_recv(gsi->rx_qp, wr, bad_wr);
+}
+
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
+{
+ if (!gsi)
+ return;
+
+ mutex_lock(&gsi->mutex);
+ setup_qps(gsi);
+ mutex_unlock(&gsi->mutex);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
new file mode 100644
index 000000000000..c1b9de800fe5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
+{
+ switch (mlx_policy) {
+ case MLX5_POLICY_DOWN:
+ return IFLA_VF_LINK_STATE_DISABLE;
+ case MLX5_POLICY_UP:
+ return IFLA_VF_LINK_STATE_ENABLE;
+ case MLX5_POLICY_FOLLOW:
+ return IFLA_VF_LINK_STATE_AUTO;
+ default:
+ return __IFLA_VF_LINK_STATE_MAX;
+ }
+}
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_info *info)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
+ vf, err);
+ goto free;
+ }
+ memset(info, 0, sizeof(*info));
+ info->linkstate = mlx_to_net_policy(rep->policy);
+ if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
+ err = -EINVAL;
+
+free:
+ kfree(rep);
+ return err;
+}
+
+static inline enum port_state_policy net_to_mlx_policy(int policy)
+{
+ switch (policy) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ return MLX5_POLICY_DOWN;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ return MLX5_POLICY_UP;
+ case IFLA_VF_LINK_STATE_AUTO:
+ return MLX5_POLICY_FOLLOW;
+ default:
+ return MLX5_POLICY_INVALID;
+ }
+}
+
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u8 port, int state)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->policy = net_to_mlx_policy(state);
+ if (in->policy == MLX5_POLICY_INVALID) {
+ err = -EINVAL;
+ goto out;
+ }
+ in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *dev;
+ void *out;
+ int err;
+
+ dev = to_mdev(device);
+ mdev = dev->mdev;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+ if (err)
+ goto ex;
+
+ stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
+ stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
+ stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
+ stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
+ stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
+
+ex:
+ kfree(out);
+ return err;
+}
+
+static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+ in->node_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ kfree(in);
+ return err;
+}
+
+static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+ in->port_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type)
+{
+ if (type == IFLA_VF_IB_NODE_GUID)
+ return set_vf_node_guid(device, vf, port, guid);
+ else if (type == IFLA_VF_IB_PORT_GUID)
+ return set_vf_port_guid(device, vf, port, guid);
+
+ return -EINVAL;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b84d13a487cc..1534af113058 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -31,8 +31,10 @@
*/
#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/vport.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
#include "mlx5_ib.h"
enum {
@@ -57,20 +59,12 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in, size_t in_mad_size,
- struct ib_mad_hdr *out, size_t *out_mad_size,
- u16 *out_mad_pkey_index)
+static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
u16 slid;
int err;
- const struct ib_mad *in_mad = (const struct ib_mad *)in;
- struct ib_mad *out_mad = (struct ib_mad *)out;
-
- if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
- *out_mad_size != sizeof(*out_mad)))
- return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
@@ -117,6 +111,156 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+ void *out)
+{
+#define MLX5_SUM_CNT(p, cntr1, cntr2) \
+ (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
+ MLX5_GET64(query_vport_counter_out, p, cntr2))
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
+ transmitted_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
+ received_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
+ transmitted_ib_multicast.packets));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
+ received_ib_multicast.packets));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_unicast.packets);
+ pma_cnt_ext->port_unicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_unicast.packets);
+ pma_cnt_ext->port_multicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_multicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ void *out)
+{
+ /* Traffic counters will be reported in
+ * their 64bit form via ib_pma_portcounters_ext by default.
+ */
+ void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set);
+
+#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
+ counter_var = MLX5_GET_BE(typeof(counter_var), \
+ ib_port_cntrs_grp_data_layout, \
+ out_pma, counter_name); \
+ }
+
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
+ symbol_error_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
+ link_error_recovery_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
+ link_downed_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
+ port_rcv_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
+ port_rcv_remote_physical_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
+ port_rcv_switch_relay_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
+ port_xmit_discards);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
+ port_xmit_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
+ port_rcv_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
+ link_overrun_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
+ vl_15_dropped);
+}
+
+static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ int err;
+ void *out_cnt;
+
+ /* Decalring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ }
+
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+
+ out_cnt = mlx5_vzalloc(sz);
+ if (!out_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ port_num, out_cnt, sz);
+ if (!err)
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+ } else {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ out_cnt = mlx5_vzalloc(sz);
+ if (!out_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ out_cnt, sz);
+ if (!err)
+ pma_cnt_assign(pma_cnt, out_cnt);
+ }
+
+ kvfree(out_cnt);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const struct ib_mad *in_mad = (const struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
+
+ memset(out_mad->data, 0, sizeof(out_mad->data));
+
+ if (MLX5_CAP_GEN(mdev, vport_counters) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
+ return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ } else {
+ return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in_mad, out_mad);
+ }
+}
+
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_smp *in_mad = NULL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 03c418ccbc98..5acf346e048e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -42,6 +42,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
+#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
@@ -283,7 +284,7 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !dev->mdev->issi;
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
}
enum {
@@ -487,6 +488,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ /* We support 'Gappy' memory registration too */
+ props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+ }
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) {
props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
@@ -504,6 +512,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
(MLX5_CAP_ETH(dev->mdev, csum_cap)))
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+ props->device_cap_flags |= IB_DEVICE_UD_TSO;
+ }
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -529,7 +542,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
props->max_srq_sge = max_rq_sg - 1;
- props->max_fast_reg_page_list_len = (unsigned int)-1;
+ props->max_fast_reg_page_list_len =
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
get_atomic_caps(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -549,6 +563,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, cd))
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
+ if (!mlx5_core_is_pf(mdev))
+ props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+
return 0;
}
@@ -686,6 +703,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
+ props->grh_required = rep->grh_required;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
@@ -1369,11 +1387,20 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
return 0;
}
+static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
+{
+ priority *= 2;
+ if (!dont_trap)
+ priority++;
+ return priority;
+}
+
#define MLX5_FS_MAX_TYPES 10
#define MLX5_FS_MAX_ENTRIES 32000UL
static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
struct ib_flow_attr *flow_attr)
{
+ bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_table *ft;
@@ -1383,10 +1410,12 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int err = 0;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (flow_is_multicast_only(flow_attr))
+ if (flow_is_multicast_only(flow_attr) &&
+ !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = flow_attr->priority;
+ priority = ib_prio_to_core_prio(flow_attr->priority,
+ dont_trap);
ns = mlx5_get_flow_namespace(dev->mdev,
MLX5_FLOW_NAMESPACE_BYPASS);
num_entries = MLX5_FS_MAX_ENTRIES;
@@ -1434,6 +1463,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
unsigned int spec_index;
u32 *match_c;
u32 *match_v;
+ u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
@@ -1459,9 +1489,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
/* Outer header support only */
match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+ action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
match_c, match_v,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ action,
MLX5_FS_DEFAULT_FLOW_TAG,
dst);
@@ -1481,6 +1513,29 @@ free:
return err ? ERR_PTR(err) : handler;
}
+static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_dst = NULL;
+ struct mlx5_ib_flow_handler *handler = NULL;
+
+ handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
+ if (!IS_ERR(handler)) {
+ handler_dst = create_flow_rule(dev, ft_prio,
+ flow_attr, dst);
+ if (IS_ERR(handler_dst)) {
+ mlx5_del_flow_rule(handler->rule);
+ kfree(handler);
+ handler = handler_dst;
+ } else {
+ list_add(&handler_dst->list, &handler->list);
+ }
+ }
+
+ return handler;
+}
enum {
LEFTOVERS_MC,
LEFTOVERS_UC,
@@ -1558,7 +1613,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
if (domain != IB_FLOW_DOMAIN_USER ||
flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
- flow_attr->flags)
+ (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
@@ -1577,8 +1632,13 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- handler = create_flow_rule(dev, ft_prio, flow_attr,
- dst);
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
+ handler = create_dont_trap_rule(dev, ft_prio,
+ flow_attr, dst);
+ } else {
+ handler = create_flow_rule(dev, ft_prio, flow_attr,
+ dst);
+ }
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
handler = create_leftovers_rule(dev, ft_prio, flow_attr,
@@ -1716,6 +1776,17 @@ static struct device_attribute *mlx5_class_attributes[] = {
&dev_attr_reg_pages,
};
+static void pkey_change_handler(struct work_struct *work)
+{
+ struct mlx5_ib_port_resources *ports =
+ container_of(work, struct mlx5_ib_port_resources,
+ pkey_change_work);
+
+ mutex_lock(&ports->devr->mutex);
+ mlx5_ib_gsi_pkey_change(ports->gsi);
+ mutex_unlock(&ports->devr->mutex);
+}
+
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param)
{
@@ -1752,6 +1823,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
port = (u8)param;
+
+ schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
@@ -1838,7 +1911,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "mr cache cleanup failed\n");
mlx5_ib_destroy_qp(dev->umrc.qp);
- ib_destroy_cq(dev->umrc.cq);
+ ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
}
@@ -1853,7 +1926,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
- struct ib_cq_init_attr cq_attr = {};
int ret;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1870,15 +1942,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
goto error_0;
}
- cq_attr.cqe = 128;
- cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
- &cq_attr);
+ cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
if (IS_ERR(cq)) {
mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
ret = PTR_ERR(cq);
goto error_2;
}
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
init_attr->send_cq = cq;
init_attr->recv_cq = cq;
@@ -1945,7 +2014,7 @@ error_4:
mlx5_ib_destroy_qp(qp);
error_3:
- ib_destroy_cq(cq);
+ ib_free_cq(cq);
error_2:
ib_dealloc_pd(pd);
@@ -1961,10 +2030,13 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
+ int port;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
+ mutex_init(&devr->mutex);
+
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
if (IS_ERR(devr->p0)) {
ret = PTR_ERR(devr->p0);
@@ -2052,6 +2124,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
+ INIT_WORK(&devr->ports[port].pkey_change_work,
+ pkey_change_handler);
+ devr->ports[port].devr = devr;
+ }
+
return 0;
error5:
@@ -2070,12 +2148,20 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
+ struct mlx5_ib_dev *dev =
+ container_of(devr, struct mlx5_ib_dev, devr);
+ int port;
+
mlx5_ib_destroy_srq(devr->s1);
mlx5_ib_destroy_srq(devr->s0);
mlx5_ib_dealloc_xrcd(devr->x0);
mlx5_ib_dealloc_xrcd(devr->x1);
mlx5_ib_destroy_cq(devr->c0);
mlx5_ib_dealloc_pd(devr->p0);
+
+ /* Make sure no change P_Key work items are still executing */
+ for (port = 0; port < dev->num_ports; ++port)
+ cancel_work_sync(&devr->ports[port].pkey_change_work);
}
static u32 get_core_cap_flags(struct ib_device *ibdev)
@@ -2198,6 +2284,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
@@ -2258,6 +2345,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
+ dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
@@ -2266,9 +2354,23 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ if (mlx5_core_is_pf(mdev)) {
+ dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
+ dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
+ dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
+ dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
+ }
mlx5_ib_internal_fill_odp_caps(dev);
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
+ dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d2b9737baa36..b46c25542a7c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -43,6 +43,7 @@
#include <linux/mlx5/srq.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
#define mlx5_ib_dbg(dev, format, arg...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
@@ -126,7 +127,7 @@ struct mlx5_ib_pd {
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
-#define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1)
+#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
#error "Invalid number of bypass priorities"
#endif
@@ -162,9 +163,31 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
+
+#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3)
+#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4)
+#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END
+
#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
+/*
+ * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
+ * creates the actual hardware QP.
+ */
+#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
+/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
+ *
+ * These flags are intended for internal use by the mlx5_ib driver, and they
+ * rely on the range reserved for that use in the ib_qp_create_flags enum.
+ */
+
+/* Create a UD QP whose source QP number is 1 */
+static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
+{
+ return IB_QP_CREATE_RESERVED_START;
+}
+
struct wr_list {
u16 opcode;
u16 next;
@@ -325,11 +348,14 @@ struct mlx5_ib_cq_buf {
};
enum mlx5_ib_qp_flags {
- MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
- MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
- MLX5_IB_QP_CROSS_CHANNEL = 1 << 2,
- MLX5_IB_QP_MANAGED_SEND = 1 << 3,
- MLX5_IB_QP_MANAGED_RECV = 1 << 4,
+ MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
+ MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
+ MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
+ /* QP uses 1 as its source QP number */
+ MLX5_IB_QP_SQPN_QP1 = 1 << 6,
};
struct mlx5_umr_wr {
@@ -373,6 +399,14 @@ struct mlx5_ib_cq {
struct ib_umem *resize_umem;
int cqe_size;
u32 create_flags;
+ struct list_head wc_list;
+ enum ib_cq_notify_flags notify_flags;
+ struct work_struct notify_work;
+};
+
+struct mlx5_ib_wc {
+ struct ib_wc wc;
+ struct list_head list;
};
struct mlx5_ib_srq {
@@ -413,7 +447,8 @@ struct mlx5_ib_mr {
int ndescs;
int max_descs;
int desc_size;
- struct mlx5_core_mr mmr;
+ int access_mode;
+ struct mlx5_core_mkey mmkey;
struct ib_umem *umem;
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
@@ -425,19 +460,20 @@ struct mlx5_ib_mr {
struct mlx5_core_sig_ctx *sig;
int live;
void *descs_alloc;
+ int access_flags; /* Needed for rereg MR */
+};
+
+struct mlx5_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx5_core_mkey mmkey;
};
struct mlx5_ib_umr_context {
+ struct ib_cqe cqe;
enum ib_wc_status status;
struct completion done;
};
-static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
-{
- context->status = -1;
- init_completion(&context->done);
-}
-
struct umr_common {
struct ib_pd *pd;
struct ib_cq *cq;
@@ -487,6 +523,14 @@ struct mlx5_mr_cache {
unsigned long last_add;
};
+struct mlx5_ib_gsi_qp;
+
+struct mlx5_ib_port_resources {
+ struct mlx5_ib_resources *devr;
+ struct mlx5_ib_gsi_qp *gsi;
+ struct work_struct pkey_change_work;
+};
+
struct mlx5_ib_resources {
struct ib_cq *c0;
struct ib_xrcd *x0;
@@ -494,6 +538,9 @@ struct mlx5_ib_resources {
struct ib_pd *p0;
struct ib_srq *s0;
struct ib_srq *s1;
+ struct mlx5_ib_port_resources ports[2];
+ /* Protects changes to the port resources */
+ struct mutex mutex;
};
struct mlx5_roce {
@@ -558,9 +605,9 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
}
-static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
{
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ return container_of(mmkey, struct mlx5_ib_mr, mmkey);
}
static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
@@ -588,6 +635,11 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
return container_of(ibmr, struct mlx5_ib_mr, ibmr);
}
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct mlx5_ib_mw, ibmw);
+}
+
struct mlx5_ib_ah {
struct ib_ah ibah;
struct mlx5_av av;
@@ -648,8 +700,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
int npages, int zap);
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
@@ -700,7 +758,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -719,7 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
unsigned long end);
-
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{
@@ -736,9 +792,35 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+ u64 guid, int type);
+
__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int index);
+/* GSI QP helper functions */
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr);
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask);
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
+
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -758,7 +840,7 @@ static inline u8 convert_access(int acc)
static inline int is_qp1(enum ib_qp_type qp_type)
{
- return qp_type == IB_QPT_GSI;
+ return qp_type == MLX5_IB_QPT_HW_GSI;
}
#define MLX5_MAX_UMR_SHIFT 16
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6000f7aeede9..4d5bff151cdf 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -40,6 +40,7 @@
#include <rdma/ib_umem_odp.h>
#include <rdma/ib_verbs.h>
#include "mlx5_ib.h"
+#include "user.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -57,7 +58,7 @@ static int clean_mr(struct mlx5_ib_mr *mr);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
- int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
@@ -77,6 +78,40 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
return order - cache->ent[0].order;
}
+static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
+{
+ return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
+ length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
+}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void update_odp_mr(struct mlx5_ib_mr *mr)
+{
+ if (mr->umem->odp_data) {
+ /*
+ * This barrier prevents the compiler from moving the
+ * setting of umem->odp_data->private to point to our
+ * MR, before reg_umr finished, to ensure that the MR
+ * initialization have finished before starting to
+ * handle invalidations.
+ */
+ smp_wmb();
+ mr->umem->odp_data->private = mr;
+ /*
+ * Make sure we will see the new
+ * umem->odp_data->private value in the invalidation
+ * routines, before we can get page faults on the
+ * MR. Page faults can happen once we put the MR in
+ * the tree, below this line. Without the barrier,
+ * there can be a fault handling and an invalidation
+ * before umem->odp_data->private == mr is visible to
+ * the invalidation handler.
+ */
+ smp_wmb();
+ }
+}
+#endif
+
static void reg_mr_callback(int status, void *context)
{
struct mlx5_ib_mr *mr = context;
@@ -86,7 +121,7 @@ static void reg_mr_callback(int status, void *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
+ struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
@@ -113,7 +148,7 @@ static void reg_mr_callback(int status, void *context)
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
- mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
cache->last_add = jiffies;
@@ -124,10 +159,10 @@ static void reg_mr_callback(int status, void *context)
spin_unlock_irqrestore(&ent->lock, flags);
write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
- &mr->mmr);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey);
if (err)
- pr_err("Error inserting to mr tree. 0x%x\n", -err);
+ pr_err("Error inserting to mkey tree. 0x%x\n", -err);
write_unlock_irqrestore(&table->lock, flags);
}
@@ -168,7 +203,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
@@ -657,14 +692,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+ err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
NULL);
if (err)
goto err_in;
kfree(in);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
return &mr->ibmr;
@@ -693,10 +728,40 @@ static int use_umr(int order)
return order <= MLX5_MAX_UMR_SHIFT;
}
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
- struct ib_sge *sg, u64 dma, int n, u32 key,
- int page_shift, u64 virt_addr, u64 len,
- int access_flags)
+static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int npages, int page_shift, int *size,
+ __be64 **mr_pas, dma_addr_t *dma)
+{
+ __be64 *pas;
+ struct device *ddev = dev->ib_dev.dma_device;
+
+ /*
+ * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the pas array, we allocate
+ * a little more.
+ */
+ *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+ *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+ if (!(*mr_pas))
+ return -ENOMEM;
+
+ pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
+ mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+ /* Clear padding after the actual pages. */
+ memset(pas + npages, 0, *size - npages * sizeof(u64));
+
+ *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, *dma)) {
+ kfree(*mr_pas);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
+ struct ib_sge *sg, u64 dma, int n, u32 key,
+ int page_shift)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -706,7 +771,6 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
sg->lkey = dev->umrc.pd->local_dma_lkey;
wr->next = NULL;
- wr->send_flags = 0;
wr->sg_list = sg;
if (n)
wr->num_sge = 1;
@@ -718,6 +782,19 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
umrwr->npages = n;
umrwr->page_shift = page_shift;
umrwr->mkey = key;
+}
+
+static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
+ struct ib_sge *sg, u64 dma, int n, u32 key,
+ int page_shift, u64 virt_addr, u64 len,
+ int access_flags)
+{
+ struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+ prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
+
+ wr->send_flags = 0;
+
umrwr->target.virt_addr = virt_addr;
umrwr->length = len;
umrwr->access_flags = access_flags;
@@ -734,26 +811,45 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key;
}
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
+static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+ int access_flags, int *npages,
+ int *page_shift, int *ncont, int *order)
{
- struct mlx5_ib_umr_context *context;
- struct ib_wc wc;
- int err;
-
- while (1) {
- err = ib_poll_cq(cq, 1, &wc);
- if (err < 0) {
- pr_warn("poll cq error %d\n", err);
- return;
- }
- if (err == 0)
- break;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
+ access_flags, 0);
+ if (IS_ERR(umem)) {
+ mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ return (void *)umem;
+ }
- context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
- context->status = wc.status;
- complete(&context->done);
+ mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+ if (!*npages) {
+ mlx5_ib_warn(dev, "avoid zero region\n");
+ ib_umem_release(umem);
+ return ERR_PTR(-EINVAL);
}
- ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
+ *npages, *ncont, *order, *page_shift);
+
+ return umem;
+}
+
+static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_umr_context *context =
+ container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+ context->status = wc->status;
+ complete(&context->done);
+}
+
+static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
+{
+ context->cqe.done = mlx5_ib_umr_done;
+ context->status = -1;
+ init_completion(&context->done);
}
static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
@@ -764,13 +860,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct mlx5_umr_wr umrwr;
+ struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
struct mlx5_ib_mr *mr;
struct ib_sge sg;
int size;
__be64 *mr_pas;
- __be64 *pas;
dma_addr_t dma;
int err = 0;
int i;
@@ -790,33 +885,17 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
if (!mr)
return ERR_PTR(-EAGAIN);
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the pas array, we allocate
- * a little more. */
- size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
- mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
- if (!mr_pas) {
- err = -ENOMEM;
+ err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
+ &dma);
+ if (err)
goto free_mr;
- }
- pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
- mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
- /* Clear padding after the actual pages. */
- memset(pas + npages, 0, size - npages * sizeof(u64));
-
- dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
- if (dma_mapping_error(ddev, dma)) {
- err = -ENOMEM;
- goto free_pas;
- }
+ mlx5_ib_init_umr_context(&umr_context);
- memset(&umrwr, 0, sizeof(umrwr));
- umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
page_shift, virt_addr, len, access_flags);
- mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
@@ -830,9 +909,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
}
}
- mr->mmr.iova = virt_addr;
- mr->mmr.size = len;
- mr->mmr.pd = to_mpd(pd)->pdn;
+ mr->mmkey.iova = virt_addr;
+ mr->mmkey.size = len;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
mr->live = 1;
@@ -840,7 +919,6 @@ unmap_dma:
up(&umrc->sem);
dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-free_pas:
kfree(mr_pas);
free_mr:
@@ -929,8 +1007,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
+ mlx5_ib_init_umr_context(&umr_context);
+
memset(&wr, 0, sizeof(wr));
- wr.wr.wr_id = (u64)(unsigned long)&umr_context;
+ wr.wr.wr_cqe = &umr_context.cqe;
sg.addr = dma;
sg.length = ALIGN(npages * sizeof(u64),
@@ -944,10 +1024,9 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
wr.wr.opcode = MLX5_IB_WR_UMR;
wr.npages = sg.length / sizeof(u64);
wr.page_shift = PAGE_SHIFT;
- wr.mkey = mr->mmr.key;
+ wr.mkey = mr->mmkey.key;
wr.target.offset = start_page_index;
- mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem);
err = ib_post_send(umrc->qp, &wr.wr, &bad);
if (err) {
@@ -974,10 +1053,14 @@ free_pas:
}
#endif
-static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
- u64 length, struct ib_umem *umem,
- int npages, int page_shift,
- int access_flags)
+/*
+ * If ibmr is NULL it will be allocated by reg_create.
+ * Else, the given ibmr will be used.
+ */
+static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
+ u64 virt_addr, u64 length,
+ struct ib_umem *umem, int npages,
+ int page_shift, int access_flags)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
@@ -986,7 +1069,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -1013,7 +1096,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1 << page_shift));
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
@@ -1024,7 +1107,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
mr->live = 1;
kvfree(in);
- mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
return mr;
@@ -1032,11 +1115,23 @@ err_2:
kvfree(in);
err_1:
- kfree(mr);
+ if (!ibmr)
+ kfree(mr);
return ERR_PTR(err);
}
+static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ int npages, u64 length, int access_flags)
+{
+ mr->npages = npages;
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.length = length;
+ mr->access_flags = access_flags;
+}
+
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -1052,22 +1147,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
- umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
- 0);
- if (IS_ERR(umem)) {
- mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
- return (void *)umem;
- }
+ umem = mr_umem_get(pd, start, length, access_flags, &npages,
+ &page_shift, &ncont, &order);
- mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
- if (!npages) {
- mlx5_ib_warn(dev, "avoid zero region\n");
- err = -EINVAL;
- goto error;
- }
-
- mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
- npages, ncont, order, page_shift);
+ if (IS_ERR(umem))
+ return (void *)umem;
if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1083,45 +1167,21 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
if (!mr)
- mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
- access_flags);
+ mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
+ page_shift, access_flags);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto error;
}
- mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem;
- mr->npages = npages;
- atomic_add(npages, &dev->mdev->priv.reg_pages);
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ set_mr_fileds(dev, mr, npages, length, access_flags);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem->odp_data) {
- /*
- * This barrier prevents the compiler from moving the
- * setting of umem->odp_data->private to point to our
- * MR, before reg_umr finished, to ensure that the MR
- * initialization have finished before starting to
- * handle invalidations.
- */
- smp_wmb();
- mr->umem->odp_data->private = mr;
- /*
- * Make sure we will see the new
- * umem->odp_data->private value in the invalidation
- * routines, before we can get page faults on the
- * MR. Page faults can happen once we put the MR in
- * the tree, below this line. Without the barrier,
- * there can be a fault handling and an invalidation
- * before umem->odp_data->private == mr is visible to
- * the invalidation handler.
- */
- smp_wmb();
- }
+ update_odp_mr(mr);
#endif
return &mr->ibmr;
@@ -1135,15 +1195,15 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
- struct mlx5_umr_wr umrwr;
+ struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad;
int err;
- memset(&umrwr.wr, 0, sizeof(umrwr));
- umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
- prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
-
mlx5_ib_init_umr_context(&umr_context);
+
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+
down(&umrc->sem);
err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
if (err) {
@@ -1165,6 +1225,167 @@ error:
return err;
}
+static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
+ u64 length, int npages, int page_shift, int order,
+ int access_flags, int flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct device *ddev = dev->ib_dev.dma_device;
+ struct mlx5_ib_umr_context umr_context;
+ struct ib_send_wr *bad;
+ struct mlx5_umr_wr umrwr = {};
+ struct ib_sge sg;
+ struct umr_common *umrc = &dev->umrc;
+ dma_addr_t dma = 0;
+ __be64 *mr_pas = NULL;
+ int size;
+ int err;
+
+ mlx5_ib_init_umr_context(&umr_context);
+
+ umrwr.wr.wr_cqe = &umr_context.cqe;
+ umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
+ &mr_pas, &dma);
+ if (err)
+ return err;
+
+ umrwr.target.virt_addr = virt_addr;
+ umrwr.length = length;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+ }
+
+ prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
+ page_shift);
+
+ if (flags & IB_MR_REREG_PD) {
+ umrwr.pd = pd;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ umrwr.access_flags = access_flags;
+ umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
+ }
+
+ /* post send request to UMR QP */
+ down(&umrc->sem);
+ err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
+
+ if (err) {
+ mlx5_ib_warn(dev, "post send failed, err %d\n", err);
+ } else {
+ wait_for_completion(&umr_context.done);
+ if (umr_context.status != IB_WC_SUCCESS) {
+ mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+ umr_context.status);
+ err = -EFAULT;
+ }
+ }
+
+ up(&umrc->sem);
+ if (flags & IB_MR_REREG_TRANS) {
+ dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+ kfree(mr_pas);
+ }
+ return err;
+}
+
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int new_access_flags,
+ struct ib_pd *new_pd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+ struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
+ int access_flags = flags & IB_MR_REREG_ACCESS ?
+ new_access_flags :
+ mr->access_flags;
+ u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
+ u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
+ int page_shift = 0;
+ int npages = 0;
+ int ncont = 0;
+ int order = 0;
+ int err;
+
+ mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, virt_addr, length, access_flags);
+
+ if (flags != IB_MR_REREG_PD) {
+ /*
+ * Replace umem. This needs to be done whether or not UMR is
+ * used.
+ */
+ flags |= IB_MR_REREG_TRANS;
+ ib_umem_release(mr->umem);
+ mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
+ &page_shift, &ncont, &order);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ mr->umem = NULL;
+ return err;
+ }
+ }
+
+ if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+ /*
+ * UMR can't be used - MKey needs to be replaced.
+ */
+ if (mr->umred) {
+ err = unreg_umr(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "Failed to unregister MR\n");
+ } else {
+ err = destroy_mkey(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "Failed to destroy MKey\n");
+ }
+ if (err)
+ return err;
+
+ mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
+ page_shift, access_flags);
+
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+
+ mr->umred = 0;
+ } else {
+ /*
+ * Send a UMR WQE
+ */
+ err = rereg_umr(pd, mr, addr, len, npages, page_shift,
+ order, access_flags, flags);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to rereg UMR\n");
+ return err;
+ }
+ }
+
+ if (flags & IB_MR_REREG_PD) {
+ ib_mr->pd = pd;
+ mr->mmkey.pd = to_mpd(pd)->pdn;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->access_flags = access_flags;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ set_mr_fileds(dev, mr, npages, len, access_flags);
+ mr->mmkey.iova = addr;
+ mr->mmkey.size = len;
+ }
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ update_odp_mr(mr);
+#endif
+
+ return 0;
+}
+
static int
mlx5_alloc_priv_descs(struct ib_device *device,
struct mlx5_ib_mr *mr,
@@ -1236,7 +1457,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
err = destroy_mkey(dev, mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmr.key, err);
+ mr->mmkey.key, err);
return err;
}
} else {
@@ -1300,8 +1521,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_ib_mr *mr;
- int access_mode, err;
- int ndescs = roundup(max_num_sg, 4);
+ int ndescs = ALIGN(max_num_sg, 4);
+ int err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
@@ -1319,7 +1540,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
if (mr_type == IB_MR_TYPE_MEM_REG) {
- access_mode = MLX5_ACCESS_MODE_MTT;
+ mr->access_mode = MLX5_ACCESS_MODE_MTT;
in->seg.log2_page_size = PAGE_SHIFT;
err = mlx5_alloc_priv_descs(pd->device, mr,
@@ -1329,6 +1550,15 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->desc_size = sizeof(u64);
mr->max_descs = ndescs;
+ } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
+ mr->access_mode = MLX5_ACCESS_MODE_KLM;
+
+ err = mlx5_alloc_priv_descs(pd->device, mr,
+ ndescs, sizeof(struct mlx5_klm));
+ if (err)
+ goto err_free_in;
+ mr->desc_size = sizeof(struct mlx5_klm);
+ mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SIGNATURE) {
u32 psv_index[2];
@@ -1347,7 +1577,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
if (err)
goto err_free_sig;
- access_mode = MLX5_ACCESS_MODE_KLM;
+ mr->access_mode = MLX5_ACCESS_MODE_KLM;
mr->sig->psv_memory.psv_idx = psv_index[0];
mr->sig->psv_wire.psv_idx = psv_index[1];
@@ -1361,14 +1591,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
goto err_free_in;
}
- in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
+ in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
NULL, NULL, NULL);
if (err)
goto err_destroy_psv;
- mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.rkey = mr->mmr.key;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
mr->umem = NULL;
kfree(in);
@@ -1395,6 +1625,88 @@ err_free:
return ERR_PTR(err);
}
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in = NULL;
+ struct mlx5_ib_mw *mw = NULL;
+ int ndescs;
+ int err;
+ struct mlx5_ib_alloc_mw req = {};
+ struct {
+ __u32 comp_mask;
+ __u32 response_length;
+ } resp = {};
+
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+ if (err)
+ return ERR_PTR(err);
+
+ if (req.comp_mask || req.reserved1 || req.reserved2)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (udata->inlen > sizeof(req) &&
+ !ib_is_udata_cleared(udata, sizeof(req),
+ udata->inlen - sizeof(req)))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
+
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!mw || !in) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
+ in->seg.xlt_oct_size = cpu_to_be32(ndescs);
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
+ MLX5_PERM_LOCAL_READ;
+ if (type == IB_MW_TYPE_2)
+ in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
+ NULL, NULL, NULL);
+ if (err)
+ goto free;
+
+ mw->ibmw.rkey = mw->mmkey.key;
+
+ resp.response_length = min(offsetof(typeof(resp), response_length) +
+ sizeof(resp.response_length), udata->outlen);
+ if (resp.response_length) {
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ goto free;
+ }
+ }
+
+ kfree(in);
+ return &mw->ibmw;
+
+free:
+ kfree(mw);
+ kfree(in);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+{
+ struct mlx5_ib_mw *mmw = to_mmw(mw);
+ int err;
+
+ err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
+ &mmw->mmkey);
+ if (!err)
+ kfree(mmw);
+ return err;
+}
+
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status)
{
@@ -1436,6 +1748,32 @@ done:
return ret;
}
+static int
+mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+ struct scatterlist *sgl,
+ unsigned short sg_nents)
+{
+ struct scatterlist *sg = sgl;
+ struct mlx5_klm *klms = mr->descs;
+ u32 lkey = mr->ibmr.pd->local_dma_lkey;
+ int i;
+
+ mr->ibmr.iova = sg_dma_address(sg);
+ mr->ibmr.length = 0;
+ mr->ndescs = sg_nents;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ if (unlikely(i > mr->max_descs))
+ break;
+ klms[i].va = cpu_to_be64(sg_dma_address(sg));
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
+ klms[i].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg);
+ }
+
+ return i;
+}
+
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1463,7 +1801,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
mr->desc_size * mr->max_descs,
DMA_TO_DEVICE);
- n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
+ if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
+ else
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b8d76361a48d..34e79e709c67 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -142,13 +142,13 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
u32 key)
{
u32 base_key = mlx5_base_mkey(key);
- struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
- struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+ struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
+ struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
- if (!mmr || mmr->key != key || !mr->live)
+ if (!mmkey || mmkey->key != key || !mr->live)
return NULL;
- return container_of(mmr, struct mlx5_ib_mr, mmr);
+ return container_of(mmkey, struct mlx5_ib_mr, mmkey);
}
static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
@@ -232,7 +232,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
io_virt += pfault->mpfault.bytes_committed;
bcnt -= pfault->mpfault.bytes_committed;
- start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+ start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
if (mr->umem->writable)
access_mask |= ODP_WRITE_ALLOWED_BIT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 34cb8e87c7b8..8dee8bc1e0fe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -58,6 +58,7 @@ enum {
static const u32 mlx5_ib_opcode[] = {
[IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
[IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
[IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
@@ -72,6 +73,9 @@ static const u32 mlx5_ib_opcode[] = {
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
static int is_qp0(enum ib_qp_type qp_type)
{
@@ -260,11 +264,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
return 0;
}
-static int sq_overhead(enum ib_qp_type qp_type)
+static int sq_overhead(struct ib_qp_init_attr *attr)
{
int size = 0;
- switch (qp_type) {
+ switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */
@@ -287,8 +291,12 @@ static int sq_overhead(enum ib_qp_type qp_type)
break;
case IB_QPT_UD:
+ if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ size += sizeof(struct mlx5_wqe_eth_pad) +
+ sizeof(struct mlx5_wqe_eth_seg);
+ /* fall through */
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
@@ -311,7 +319,7 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
int inl_size = 0;
int size;
- size = sq_overhead(attr->qp_type);
+ size = sq_overhead(attr);
if (size < 0)
return size;
@@ -348,8 +356,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
return -EINVAL;
}
- qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
- sizeof(struct mlx5_wqe_inline_seg);
+ qp->max_inline_data = wqe_size - sq_overhead(attr) -
+ sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
@@ -590,7 +598,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_INI:
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
- case IB_QPT_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -783,7 +791,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev->priv.uuari;
- if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
+ if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+ IB_QP_CREATE_IPOIB_UD_LSO |
+ mlx5_ib_create_qp_sqpn_qp1()))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
@@ -828,6 +839,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
(*in)->ctx.params1 |= cpu_to_be32(1 << 11);
(*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+ if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+ (*in)->ctx.deth_sqpn = cpu_to_be32(1);
+ qp->flags |= MLX5_IB_QP_SQPN_QP1;
+ }
+
mlx5_fill_page_array(&qp->buf, (*in)->pas);
err = mlx5_db_alloc(dev->mdev, &qp->db);
@@ -1228,6 +1244,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
qp->flags |= MLX5_IB_QP_MANAGED_RECV;
}
+
+ if (init_attr->qp_type == IB_QPT_UD &&
+ (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
+ if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1271,6 +1295,11 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ucmd.sq_wqe_count, max_wqes);
return -EINVAL;
}
+ if (init_attr->create_flags &
+ mlx5_ib_create_qp_sqpn_qp1()) {
+ mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
+ return -EINVAL;
+ }
err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
&resp, &inlen, base);
if (err)
@@ -1385,6 +1414,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(qpc, qpc, user_index, uidx);
}
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (init_attr->qp_type == IB_QPT_UD &&
+ (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+ qp->flags |= MLX5_IB_QP_LSO;
+ }
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1494,7 +1530,7 @@ static void get_cqs(struct mlx5_ib_qp *qp,
break;
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
@@ -1657,7 +1693,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_UC:
case IB_QPT_UD:
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
@@ -1686,6 +1722,9 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
break;
+ case IB_QPT_GSI:
+ return mlx5_ib_gsi_create_qp(pd, init_attr);
+
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_MAX:
@@ -1704,6 +1743,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
+ if (unlikely(qp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_destroy_qp(qp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2161,8 +2203,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
context = &in->ctx;
err = to_mlx5_st(ibqp->qp_type);
- if (err < 0)
+ if (err < 0) {
+ mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
+ }
context->flags = cpu_to_be32(err << 16);
@@ -2182,7 +2226,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
+ if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if (ibqp->qp_type == IB_QPT_UD ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
@@ -2284,6 +2328,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
context->sq_crq_size |= cpu_to_be16(1 << 4);
+ if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+ context->deth_sqpn = cpu_to_be32(1);
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
@@ -2363,11 +2409,18 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ enum ib_qp_type qp_type;
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
int port;
enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
+
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -2378,32 +2431,46 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
- if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- ll))
+ if (qp_type != MLX5_IB_QPT_REG_UMR &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ }
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
goto out;
+ }
if (attr_mask & IB_QP_PKEY_INDEX) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
if (attr->pkey_index >=
- dev->mdev->port_caps[port - 1].pkey_table_len)
+ dev->mdev->port_caps[port - 1].pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n",
+ attr->pkey_index);
goto out;
+ }
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
goto out;
+ }
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic >
- (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
goto out;
+ }
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0;
@@ -2442,6 +2509,59 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0;
}
+static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
+ struct ib_send_wr *wr, void *qend,
+ struct mlx5_ib_qp *qp, int *size)
+{
+ void *seg = eseg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+ u64 left, leftlen, copysz;
+ void *pdata = ud_wr->header;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr_sz = cpu_to_be16(left);
+
+ /*
+ * check if there is space till the end of queue, if yes,
+ * copy all in one shot, otherwise copy till the end of queue,
+ * rollback and than the copy the left
+ */
+ leftlen = qend - (void *)eseg->inline_hdr_start;
+ copysz = min_t(u64, leftlen, left);
+
+ memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
+
+ if (likely(copysz > size_of_inl_hdr_start)) {
+ seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
+ *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
+ }
+
+ if (unlikely(copysz < left)) { /* the last wqe in the queue */
+ seg = mlx5_get_send_wqe(qp, 0);
+ left -= copysz;
+ pdata += copysz;
+ memcpy(seg, pdata, left);
+ seg += ALIGN(left, 16);
+ *size += ALIGN(left, 16) / 16;
+ }
+ }
+
+ return seg;
+}
+
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
struct ib_send_wr *wr)
{
@@ -2509,6 +2629,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
int ndescs = mr->ndescs;
memset(umr, 0, sizeof(*umr));
+
+ if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
umr->klm_octowords = get_klm_octo(ndescs);
umr->mkey_mask = frwr_mkey_mask();
@@ -2558,6 +2683,44 @@ static __be64 get_umr_update_mtt_mask(void)
return cpu_to_be64(result);
}
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
struct ib_send_wr *wr)
{
@@ -2576,9 +2739,15 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
umr->mkey_mask = get_umr_update_mtt_mask();
umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
- } else {
- umr->mkey_mask = get_umr_reg_mr_mask();
}
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+ umr->mkey_mask |= get_umr_update_translation_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
+ umr->mkey_mask |= get_umr_update_access_mask();
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
+ umr->mkey_mask |= get_umr_update_pd_mask();
+ if (!umr->mkey_mask)
+ umr->mkey_mask = get_umr_reg_mr_mask();
} else {
umr->mkey_mask = get_umr_unreg_mr_mask();
}
@@ -2603,13 +2772,19 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
int ndescs = ALIGN(mr->ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
- seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
+
+ if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
seg->start_addr = cpu_to_be64(mr->ibmr.iova);
seg->len = cpu_to_be64(mr->ibmr.length);
seg->xlt_oct_size = cpu_to_be32(ndescs);
- seg->log2_page_size = ilog2(mr->ibmr.page_size);
}
static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
@@ -2630,7 +2805,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
seg->flags = convert_access(umrwr->access_flags);
if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
- seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+ if (umrwr->pd)
+ seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
}
seg->len = cpu_to_be64(umrwr->length);
@@ -3196,13 +3372,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
struct mlx5_wqe_xrc_seg *xrc;
- struct mlx5_bf *bf = qp->bf;
+ struct mlx5_bf *bf;
int uninitialized_var(size);
- void *qend = qp->sq.qend;
+ void *qend;
unsigned long flags;
unsigned idx;
int err = 0;
@@ -3214,6 +3390,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u8 next_fence = 0;
u8 fence;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ qp = to_mqp(ibqp);
+ bf = qp->bf;
+ qend = qp->sq.qend;
+
spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -3373,16 +3556,37 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
break;
- case IB_QPT_UD:
case IB_QPT_SMI:
- case IB_QPT_GSI:
+ case MLX5_IB_QPT_HW_GSI:
set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
break;
+ case IB_QPT_UD:
+ set_datagram_seg(seg, wr);
+ seg += sizeof(struct mlx5_wqe_datagram_seg);
+ size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+
+ if (unlikely((seg == qend)))
+ seg = mlx5_get_send_wqe(qp, 0);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ seg += sizeof(struct mlx5_wqe_eth_pad);
+ size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ seg = set_eth_seg(seg, wr, qend, qp, &size);
+
+ if (unlikely((seg == qend)))
+ seg = mlx5_get_send_wqe(qp, 0);
+ }
+ break;
case MLX5_IB_QPT_REG_UMR:
if (wr->opcode != MLX5_IB_WR_UMR) {
err = -EINVAL;
@@ -3502,6 +3706,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int ind;
int i;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
spin_lock_irqsave(&qp->rq.lock, flags);
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
@@ -3822,6 +4029,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int err = 0;
u8 raw_packet_qp_state;
+ if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+ return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
+ qp_init_attr);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/*
* Wait for any outstanding page faults, in case the user frees memory
@@ -3874,6 +4085,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
+ if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+ qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index b94a55404a59..61bc308bb802 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -152,6 +152,13 @@ struct mlx5_ib_create_qp_resp {
__u32 uuar_index;
};
+struct mlx5_ib_alloc_mw {
+ __u32 comp_mask;
+ __u8 num_klms;
+ __u8 reserved1;
+ __u16 reserved2;
+};
+
static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
struct mlx5_ib_create_qp *ucmd,
int inlen,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7d2e42dd6926..6c00d04b8b28 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,8 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
index 846dc97cf260..7964eba8e7ed 100644
--- a/drivers/infiniband/hw/nes/Kconfig
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -2,7 +2,6 @@ config INFINIBAND_NES
tristate "NetEffect RNIC Driver"
depends on PCI && INET && INFINIBAND
select LIBCRC32C
- select INET_LRO
---help---
This is the RDMA Network Interface Card (RNIC) driver for
NetEffect Ethernet Cluster Server Adapters.
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 9f9d5c563a61..35cbb17bec12 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -111,17 +111,6 @@ static struct pci_device_id nes_pci_table[] = {
MODULE_DEVICE_TABLE(pci, nes_pci_table);
-/* registered nes netlink callbacks */
-static struct ibnl_client_cbs nes_nl_cb_table[] = {
- [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
- [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
- [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
- [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
- [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
- [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
- [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
static int nes_net_event(struct notifier_block *, unsigned long, void *);
static int nes_notifiers_registered;
@@ -682,17 +671,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
}
nes_notifiers_registered++;
- if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table))
- printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n",
- __func__, __LINE__);
-
- ret = iwpm_init(RDMA_NL_NES);
- if (ret) {
- printk(KERN_ERR PFX "%s: port mapper initialization failed\n",
- pci_name(pcidev));
- goto bail7;
- }
-
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
@@ -731,7 +709,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
nesdev->netdev_count, nesdev->nesadapter->netdev_count);
- ibnl_remove_client(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
@@ -795,8 +772,6 @@ static void nes_remove(struct pci_dev *pcidev)
nesdev->nesadapter->netdev_count--;
}
}
- ibnl_remove_client(RDMA_NL_NES);
- iwpm_exit(RDMA_NL_NES);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index cb9f0f27308d..7f0aa23aef9d 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb,
iph->ttl = 0x40;
iph->protocol = 0x06; /* IPPROTO_TCP */
- iph->saddr = htonl(cm_node->mapped_loc_addr);
- iph->daddr = htonl(cm_node->mapped_rem_addr);
+ iph->saddr = htonl(cm_node->loc_addr);
+ iph->daddr = htonl(cm_node->rem_addr);
- tcph->source = htons(cm_node->mapped_loc_port);
- tcph->dest = htons(cm_node->mapped_rem_port);
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
if (flags & SET_ACK) {
@@ -525,125 +525,6 @@ static void form_cm_frame(struct sk_buff *skb,
cm_packets_created++;
}
-/*
- * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct
- */
-static void nes_create_sockaddr(__be32 ip_addr, __be16 port,
- struct sockaddr_storage *addr)
-{
- struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;
- nes_sockaddr->sin_family = AF_INET;
- memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));
- nes_sockaddr->sin_port = port;
-}
-
-/*
- * nes_create_mapinfo - Create a mapinfo object in the port mapper data base
- */
-static int nes_create_mapinfo(struct nes_cm_info *cm_info)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &local_sockaddr);
- nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),
- htons(cm_info->mapped_loc_port), &mapped_sockaddr);
-
- return iwpm_create_mapinfo(&local_sockaddr,
- &mapped_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base
- * and send a remove mapping op message to
- * the userspace port mapper
- */
-static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,
- u32 mapped_loc_addr, u16 mapped_loc_port)
-{
- struct sockaddr_storage local_sockaddr;
- struct sockaddr_storage mapped_sockaddr;
-
- nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);
- nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),
- &mapped_sockaddr);
-
- iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);
- return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_form_pm_msg - Form a port mapper message with mapping info
- */
-static void nes_form_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
- &pm_msg->loc_addr);
- nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),
- &pm_msg->rem_addr);
-}
-
-/*
- * nes_form_reg_msg - Form a port mapper message with dev info
- */
-static void nes_form_reg_msg(struct nes_vnic *nesvnic,
- struct iwpm_dev_data *pm_msg)
-{
- memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,
- IWPM_DEVNAME_SIZE);
- memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
-}
-
-static void record_sockaddr_info(struct sockaddr_storage *addr_info,
- nes_addr_t *ip_addr, u16 *port_num)
-{
- struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
-
- if (in_addr->sin_family == AF_INET) {
- *ip_addr = ntohl(in_addr->sin_addr.s_addr);
- *port_num = ntohs(in_addr->sin_port);
- }
-}
-
-/*
- * nes_record_pm_msg - Save the received mapping info
- */
-static void nes_record_pm_msg(struct nes_cm_info *cm_info,
- struct iwpm_sa_data *pm_msg)
-{
- record_sockaddr_info(&pm_msg->mapped_loc_addr,
- &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
-
- record_sockaddr_info(&pm_msg->mapped_rem_addr,
- &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
-}
-
-/*
- * nes_get_reminfo - Get the address info of the remote connecting peer
- */
-static int nes_get_remote_addr(struct nes_cm_node *cm_node)
-{
- struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
- struct sockaddr_storage remote_addr;
- int ret;
-
- nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
- htons(cm_node->mapped_loc_port), &mapped_loc_addr);
- nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
- htons(cm_node->mapped_rem_port), &mapped_rem_addr);
-
- ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
- &remote_addr, RDMA_NL_NES);
- if (ret)
- nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
- else
- record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
- &cm_node->rem_port);
- return ret;
-}
-
/**
* print_core - dump a cm core
*/
@@ -1266,11 +1147,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
loc_addr, loc_port,
cm_node->rem_addr, cm_node->rem_port,
rem_addr, rem_port);
- if ((cm_node->mapped_loc_addr == loc_addr) &&
- (cm_node->mapped_loc_port == loc_port) &&
- (cm_node->mapped_rem_addr == rem_addr) &&
- (cm_node->mapped_rem_port == rem_port)) {
-
+ if ((cm_node->loc_addr == loc_addr) &&
+ (cm_node->loc_port == loc_port) &&
+ (cm_node->rem_addr == rem_addr) &&
+ (cm_node->rem_port == rem_port)) {
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return cm_node;
@@ -1287,8 +1167,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
* find_listener - find a cm node listening on this addr-port pair
*/
static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
- nes_addr_t dst_addr, u16 dst_port,
- enum nes_cm_listener_state listener_state, int local)
+ nes_addr_t dst_addr, u16 dst_port,
+ enum nes_cm_listener_state listener_state)
{
unsigned long flags;
struct nes_cm_listener *listen_node;
@@ -1298,13 +1178,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
- if (local) {
- listen_addr = listen_node->loc_addr;
- listen_port = listen_node->loc_port;
- } else {
- listen_addr = listen_node->mapped_loc_addr;
- listen_port = listen_node->mapped_loc_port;
- }
+ listen_addr = listen_node->loc_addr;
+ listen_port = listen_node->loc_port;
+
/* compare node pair, return node handle if a match */
if (((listen_addr == dst_addr) ||
listen_addr == 0x00000000) &&
@@ -1443,17 +1319,13 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
if (listener->nesvnic) {
nes_manage_apbvt(listener->nesvnic,
- listener->mapped_loc_port,
+ listener->loc_port,
PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
- nes_remove_mapinfo(listener->loc_addr,
- listener->loc_port,
- listener->mapped_loc_addr,
- listener->mapped_loc_port);
nes_debug(NES_DBG_NLMSG,
- "Delete APBVT mapped_loc_port = %04X\n",
- listener->mapped_loc_port);
+ "Delete APBVT loc_port = %04X\n",
+ listener->loc_port);
}
nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
@@ -1602,11 +1474,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->rem_addr = cm_info->rem_addr;
cm_node->rem_port = cm_info->rem_port;
- cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
- cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;
- cm_node->mapped_loc_port = cm_info->mapped_loc_port;
- cm_node->mapped_rem_port = cm_info->mapped_rem_port;
-
cm_node->mpa_frame_rev = mpa_version;
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
cm_node->mpav2_ird_ord = 0;
@@ -1655,10 +1522,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
- oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,
- NULL, NES_ARP_RESOLVE);
- arpindex = nes_addr_resolve_neigh(nesvnic,
- cm_node->mapped_rem_addr, oldarpindex);
+ oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr,
+ NULL, NES_ARP_RESOLVE);
+ arpindex = nes_addr_resolve_neigh(nesvnic, cm_node->rem_addr,
+ oldarpindex);
if (arpindex < 0) {
kfree(cm_node);
return NULL;
@@ -1720,14 +1587,12 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
- nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,
+ nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
}
- nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",
- cm_node->mapped_loc_port);
- nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,
- cm_node->mapped_loc_addr, cm_node->mapped_loc_port);
+ nes_debug(NES_DBG_NLMSG, "Delete APBVT loc_port = %04X\n",
+ cm_node->loc_port);
}
atomic_dec(&cm_core->node_cnt);
@@ -2184,7 +2049,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
cm_node->state = NES_CM_STATE_ESTABLISHED;
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
- nes_get_remote_addr(cm_node);
handle_rcv_mpa(cm_node, skb);
} else { /* rcvd ACK only */
dev_kfree_skb_any(skb);
@@ -2399,17 +2263,14 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
unsigned long flags;
- int iwpm_err = 0;
nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
cm_info->loc_addr, cm_info->loc_port);
/* cannot have multiple matching listeners */
listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,
- NES_CM_LISTENER_EITHER_STATE, 1);
+ NES_CM_LISTENER_EITHER_STATE);
if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
/* find automatically incs ref count ??? */
@@ -2419,22 +2280,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
}
if (!listener) {
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(cm_info, &pm_msg);
- iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(cm_info, &pm_msg);
- }
-
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
if (!listener) {
@@ -2444,8 +2289,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
listener->loc_addr = cm_info->loc_addr;
listener->loc_port = cm_info->loc_port;
- listener->mapped_loc_addr = cm_info->mapped_loc_addr;
- listener->mapped_loc_port = cm_info->mapped_loc_port;
listener->reused_node = 0;
atomic_set(&listener->ref_count, 1);
@@ -2507,18 +2350,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
if (cm_info->loc_addr == cm_info->rem_addr) {
loopbackremotelistener = find_listener(cm_core,
- cm_node->mapped_loc_addr, cm_node->mapped_rem_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
+ cm_node->loc_addr, cm_node->rem_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
loopback_cm_info.rem_port = cm_info->loc_port;
- loopback_cm_info.mapped_loc_port =
- cm_info->mapped_rem_port;
- loopback_cm_info.mapped_rem_port =
- cm_info->mapped_loc_port;
+ loopback_cm_info.loc_port =
+ cm_info->rem_port;
+ loopback_cm_info.rem_port =
+ cm_info->loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
loopbackremotenode = make_cm_node(cm_core, nesvnic,
&loopback_cm_info, loopbackremotelistener);
@@ -2747,12 +2590,6 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
- /* If port mapper is available these should be mapped address info */
- nfo.mapped_loc_addr = ntohl(iph->daddr);
- nfo.mapped_loc_port = ntohs(tcph->dest);
- nfo.mapped_rem_addr = ntohl(iph->saddr);
- nfo.mapped_rem_port = ntohs(tcph->source);
-
tmp_daddr = cpu_to_be32(iph->daddr);
tmp_saddr = cpu_to_be32(iph->saddr);
@@ -2761,8 +2598,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
do {
cm_node = find_node(cm_core,
- nfo.mapped_rem_port, nfo.mapped_rem_addr,
- nfo.mapped_loc_port, nfo.mapped_loc_addr);
+ nfo.rem_port, nfo.rem_addr,
+ nfo.loc_port, nfo.loc_addr);
if (!cm_node) {
/* Only type of packet accepted are for */
@@ -2771,9 +2608,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
skb_handled = 0;
break;
}
- listener = find_listener(cm_core, nfo.mapped_loc_addr,
- nfo.mapped_loc_port,
- NES_CM_LISTENER_ACTIVE_STATE, 0);
+ listener = find_listener(cm_core, nfo.loc_addr,
+ nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
if (!listener) {
nfo.cm_id = NULL;
nfo.conn_type = 0;
@@ -2856,12 +2693,22 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ if (!cm_core->event_wq)
+ goto out_free_cmcore;
cm_core->post_event = nes_cm_post_event;
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+ if (!cm_core->disconn_wq)
+ goto out_free_wq;
print_core(cm_core);
return cm_core;
+
+out_free_wq:
+ destroy_workqueue(cm_core->event_wq);
+out_free_cmcore:
+ kfree(cm_core);
+ return NULL;
}
@@ -3121,8 +2968,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
cm_event.status = disconn_status;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3148,8 +2995,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3240,8 +3087,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u8 *start_ptr = &start_addr;
u8 **start_buff = &start_ptr;
u16 buff_len = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3378,11 +3225,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_cm_init_tsa_conn(nesqp, cm_node);
nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
+ cpu_to_le16(cm_node->loc_port);
nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
+ cpu_to_le16(cm_node->rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3406,9 +3253,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+ nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3437,8 +3284,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_event.event = IW_CM_EVENT_ESTABLISHED;
cm_event.status = 0;
cm_event.provider_data = (void *)nesqp;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
cm_event.ird = cm_node->ird_size;
@@ -3508,11 +3355,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_cm_node *cm_node;
struct nes_cm_info cm_info;
int apbvt_set = 0;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
- struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- struct iwpm_dev_data pm_reg_msg;
- struct iwpm_sa_data pm_msg;
- int iwpm_err = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
if (cm_id->remote_addr.ss_family != AF_INET)
return -ENOSYS;
@@ -3558,37 +3402,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
- /* No port mapper available, go with the specified peer information */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
- cm_info.mapped_rem_addr = cm_info.rem_addr;
- cm_info.mapped_rem_port = cm_info.rem_port;
-
- nes_form_reg_msg(nesvnic, &pm_reg_msg);
- iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
- if (iwpm_err) {
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
- }
- if (iwpm_valid_pid() && !iwpm_err) {
- nes_form_pm_msg(&cm_info, &pm_msg);
- iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);
- if (iwpm_err)
- nes_debug(NES_DBG_NLMSG,
- "Port Mapper query fail (err = %d).\n", iwpm_err);
- else
- nes_record_pm_msg(&cm_info, &pm_msg);
- }
-
if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ nes_manage_apbvt(nesvnic, cm_info.loc_port,
+ PCI_FUNC(nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_ADD);
apbvt_set = 1;
}
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
cm_id->add_ref(cm_id);
/* create a connect CM node connection */
@@ -3597,14 +3417,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (!cm_node) {
if (apbvt_set)
- nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
+ nes_manage_apbvt(nesvnic, cm_info.loc_port,
PCI_FUNC(nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
- nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",
- cm_info.mapped_loc_port);
- nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,
- cm_info.mapped_loc_addr, cm_info.mapped_loc_port);
+ nes_debug(NES_DBG_NLMSG, "Delete loc_port = %04X\n",
+ cm_info.loc_port);
cm_id->rem_ref(cm_id);
return -ENOMEM;
}
@@ -3633,12 +3451,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
int err;
- struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
cm_id, ntohs(laddr->sin_port));
- if (cm_id->local_addr.ss_family != AF_INET)
+ if (cm_id->m_local_addr.ss_family != AF_INET)
return -ENOSYS;
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
@@ -3658,10 +3476,6 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
- /* No port mapper available, go with the specified info */
- cm_info.mapped_loc_addr = cm_info.loc_addr;
- cm_info.mapped_loc_port = cm_info.loc_port;
-
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
@@ -3673,10 +3487,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_node->tos = cm_id->tos;
if (!cm_node->reused_node) {
- if (nes_create_mapinfo(&cm_info))
- return -ENOMEM;
-
- err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,
+ err = nes_manage_apbvt(nesvnic, cm_node->loc_port,
PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_ADD);
if (err) {
@@ -3786,8 +3597,8 @@ static void cm_event_connected(struct nes_cm_event *event)
nesvnic = to_nesvnic(nesqp->ibqp.device);
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
- laddr = (struct sockaddr_in *)&cm_id->local_addr;
- raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
if (nesqp->destroyed)
@@ -3802,10 +3613,10 @@ static void cm_event_connected(struct nes_cm_event *event)
/* set the QP tsa context */
nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(cm_node->mapped_loc_port);
+ cpu_to_le16(cm_node->loc_port);
nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(cm_node->mapped_rem_port);
- nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+ cpu_to_le16(cm_node->rem_port);
+ nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3835,9 +3646,9 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
- nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
- nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+ nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+ nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+ nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3858,14 +3669,14 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.provider_data = cm_id->provider_data;
cm_event_laddr->sin_family = AF_INET;
cm_event_laddr->sin_port = laddr->sin_port;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size;
cm_event.ird = cm_node->ird_size;
cm_event.ord = cm_node->ord_size;
- cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
@@ -3913,8 +3724,8 @@ static void cm_event_connect_error(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3970,8 +3781,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_DISCONNECT;
cm_event.status = -ECONNRESET;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
@@ -3981,8 +3792,8 @@ static void cm_event_reset(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr = cm_id->local_addr;
- cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.local_addr = cm_id->m_local_addr;
+ cm_event.remote_addr = cm_id->m_remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 147c2c884227..d827d03e3941 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -293,8 +293,8 @@ struct nes_cm_listener {
struct list_head list;
struct nes_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
- nes_addr_t loc_addr, mapped_loc_addr;
- u16 loc_port, mapped_loc_port;
+ nes_addr_t loc_addr;
+ u16 loc_port;
struct iw_cm_id *cm_id;
enum nes_cm_conn_type conn_type;
atomic_t ref_count;
@@ -309,9 +309,7 @@ struct nes_cm_listener {
/* per connection node and node state information */
struct nes_cm_node {
nes_addr_t loc_addr, rem_addr;
- nes_addr_t mapped_loc_addr, mapped_rem_addr;
u16 loc_port, rem_port;
- u16 mapped_loc_port, mapped_rem_port;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
@@ -368,11 +366,6 @@ struct nes_cm_info {
u16 rem_port;
nes_addr_t loc_addr;
nes_addr_t rem_addr;
- u16 mapped_loc_port;
- u16 mapped_rem_port;
- nes_addr_t mapped_loc_addr;
- nes_addr_t mapped_rem_addr;
-
enum nes_cm_conn_type conn_type;
int backlog;
};
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4713dd7ed764..a1c6481d8038 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -35,18 +35,11 @@
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
#include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
#include <linux/slab.h>
#include "nes.h"
-static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
-module_param(nes_lro_max_aggr, uint, 0444);
-MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
-
static int wide_ppm_offset;
module_param(wide_ppm_offset, int, 0644);
MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
@@ -1642,25 +1635,6 @@ static void nes_rq_wqes_timeout(unsigned long parm)
}
-static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *priv)
-{
- unsigned int ip_len;
- struct iphdr *iph;
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
- return 0;
-}
-
-
/**
* nes_init_nic_qp
*/
@@ -1895,14 +1869,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
return -ENOMEM;
}
- nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr;
- nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS;
- nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc;
- nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
- nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- nesvnic->lro_mgr.dev = netdev;
- nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
return 0;
}
@@ -2809,13 +2775,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
u16 pkt_type;
u16 rqes_processed = 0;
u8 sq_cqes = 0;
- u8 nes_use_lro = 0;
head = cq->cq_head;
cq_size = cq->cq_size;
cq->cqes_pending = 1;
- if (nesvnic->netdev->features & NETIF_F_LRO)
- nes_use_lro = 1;
do {
if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
NES_NIC_CQE_VALID) {
@@ -2950,10 +2913,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
__vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
}
- if (nes_use_lro)
- lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
- else
- netif_receive_skb(rx_skb);
+ napi_gro_receive(&nesvnic->napi, rx_skb);
skip_rx_indicate0:
;
@@ -2984,8 +2944,6 @@ skip_rx_indicate0:
} while (1);
- if (nes_use_lro)
- lro_flush_all(&nesvnic->lro_mgr);
if (sq_cqes) {
barrier();
/* restart the queue if it had been stopped */
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c9080208aad2..1b66ef1e9937 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -33,8 +33,6 @@
#ifndef __NES_HW_H
#define __NES_HW_H
-#include <linux/inet_lro.h>
-
#define NES_PHY_TYPE_CX4 1
#define NES_PHY_TYPE_1G 2
#define NES_PHY_TYPE_ARGUS 4
@@ -1049,8 +1047,6 @@ struct nes_hw_tune_timer {
#define NES_TIMER_ENABLE_LIMIT 4
#define NES_MAX_LINK_INTERRUPTS 128
#define NES_MAX_LINK_CHECK 200
-#define NES_MAX_LRO_DESCRIPTORS 32
-#define NES_LRO_MAX_AGGR 64
struct nes_adapter {
u64 fw_ver;
@@ -1263,9 +1259,6 @@ struct nes_vnic {
u8 next_qp_nic_index;
u8 of_device_registered;
u8 rdma_enabled;
- u32 lro_max_aggr;
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
struct timer_list event_timer;
enum ib_event_type delayed_event;
enum ib_event_type last_dispatched_event;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6a0bdfa0ce2e..3ea9e055fdd3 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1085,9 +1085,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Free 4Kpbls",
"Free 256pbls",
"Timer Inits",
- "LRO aggregated",
- "LRO flushed",
- "LRO no_desc",
"PAU CreateQPs",
"PAU DestroyQPs",
};
@@ -1302,9 +1299,6 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = nesadapter->free_4kpbl;
target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
- target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
target_stat_values[++index] = atomic_read(&pau_qps_created);
target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
}
@@ -1709,7 +1703,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netdev->hw_features |= NETIF_F_TSO;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
- netdev->hw_features |= NETIF_F_LRO;
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
" nic_index = %d, logical_port = %d, mac_index = %d.\n",
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8c4daf7f22ec..fba69a39a7eb 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -56,7 +56,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr);
/**
* nes_alloc_mw
*/
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type,
+ struct ib_udata *udata)
{
struct nes_pd *nespd = to_nespd(ibpd);
struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -3768,6 +3769,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
nesibdev->ibdev.get_port_immutable = nes_port_immutable;
+ memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
+ sizeof(nesibdev->ibdev.iwcm->ifname));
return nesibdev;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 12503f15fbd6..45bdfa0e3b2b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -114,6 +114,7 @@ struct ocrdma_dev_attr {
u8 local_ca_ack_delay;
u8 ird;
u8 num_ird_pages;
+ u8 udp_encap;
};
struct ocrdma_dma_mem {
@@ -356,6 +357,7 @@ struct ocrdma_ah {
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
+ u8 hdr_type;
};
struct ocrdma_qp_hwq_info {
@@ -598,4 +600,10 @@ static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
}
+static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
+{
+ return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
+ (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 3790771f2baa..797362a297b2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -55,18 +55,46 @@
#define OCRDMA_VID_PCP_SHIFT 0xD
+static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
+{
+ switch (hdr_type) {
+ case OCRDMA_L3_TYPE_IB_GRH:
+ return (u16)0x8915;
+ case OCRDMA_L3_TYPE_IPV4:
+ return (u16)0x0800;
+ case OCRDMA_L3_TYPE_IPV6:
+ return (u16)0x86dd;
+ default:
+ pr_err("ocrdma%d: Invalid network header\n", devid);
+ return 0;
+ }
+}
+
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, union ib_gid *sgid,
int pdid, bool *isvlan, u16 vlan_tag)
{
- int status = 0;
+ int status;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
+ u16 proto_num = 0;
+ u8 nxthdr = 0x11;
+ struct iphdr ipv4;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
+ /* Protocol Number */
+ proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
+ if (!proto_num)
+ return -EINVAL;
+ nxthdr = (proto_num == 0x8915) ? 0x1b : 0x11;
/* VLAN */
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
@@ -78,13 +106,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
dev->id);
}
eth.eth_type = cpu_to_be16(0x8100);
- eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.roce_eth_type = cpu_to_be16(proto_num);
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
*isvlan = true;
} else {
- eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+ eth.eth_type = cpu_to_be16(proto_num);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
/* MAC */
@@ -93,18 +121,33 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
if (status)
return status;
ah->sgid_index = attr->grh.sgid_index;
- memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
- memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
-
- grh.tclass_flow = cpu_to_be32((6 << 28) |
- (attr->grh.traffic_class << 24) |
- attr->grh.flow_label);
- /* 0x1b is next header value in GRH */
- grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
- (0x1b << 8) | attr->grh.hop_limit);
/* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
- memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ if (ah->hdr_type == RDMA_NETWORK_IPV4) {
+ *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
+ attr->grh.traffic_class);
+ ipv4.id = cpu_to_be16(pdid);
+ ipv4.frag_off = htons(IP_DF);
+ ipv4.tot_len = htons(0);
+ ipv4.ttl = attr->grh.hop_limit;
+ ipv4.protocol = nxthdr;
+ rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+ rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
+ ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+ memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+ } else {
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ grh.tclass_flow = cpu_to_be32((6 << 28) |
+ (attr->grh.traffic_class << 24) |
+ attr->grh.flow_label);
+ memcpy(&grh.dgid[0], attr->grh.dgid.raw,
+ sizeof(attr->grh.dgid.raw));
+ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+ (nxthdr << 8) |
+ attr->grh.hop_limit);
+ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ }
if (*isvlan)
ah->av->valid |= OCRDMA_AV_VLAN_VALID;
ah->av->valid = cpu_to_le32(ah->av->valid);
@@ -128,6 +171,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
if (atomic_cmpxchg(&dev->update_sl, 1, 0))
ocrdma_init_service_level(dev);
+
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -148,6 +192,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
dev_put(sgid_attr.ndev);
}
+ /* Get network header type for this GID */
+ ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
if ((pd->uctx) &&
(!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
@@ -172,6 +218,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
*ahid_addr = 0;
*ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ *ahid_addr |= ((u32)ah->hdr_type &
+ OCRDMA_AH_L3_TYPE_MASK) <<
+ OCRDMA_AH_L3_TYPE_SHIFT;
+ }
if (isvlan)
*ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
OCRDMA_AH_VLAN_VALID_SHIFT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 04a30ae67473..3856dd4c7e3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -46,9 +46,10 @@
enum {
OCRDMA_AH_ID_MASK = 0x3FF,
OCRDMA_AH_VLAN_VALID_MASK = 0x01,
- OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F
+ OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F,
+ OCRDMA_AH_L3_TYPE_MASK = 0x03,
+ OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
};
-
struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
int ocrdma_destroy_ah(struct ib_ah *);
int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 283ca842ff74..16740dcb876b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1113,7 +1113,7 @@ mbx_err:
static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
void *payload_va)
{
- int status = 0;
+ int status;
struct ocrdma_mbx_rsp *rsp = payload_va;
if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
@@ -1144,6 +1144,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+ attr->udp_encap = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
attr->max_dpp_pds =
(rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
@@ -2138,7 +2141,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
enum ib_qp_state *old_ib_state)
{
unsigned long flags;
- int status = 0;
enum ocrdma_qp_state new_state;
new_state = get_ocrdma_qp_state(new_ib_state);
@@ -2163,7 +2165,7 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
- return status;
+ return 0;
}
static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
@@ -2501,7 +2503,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
union ib_gid sgid, zgid;
struct ib_gid_attr sgid_attr;
u32 vlan_id = 0xFFFF;
- u8 mac_addr[6];
+ u8 mac_addr[6], hdr_type;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
@@ -2516,6 +2523,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.hop_lmt_rq_psn |=
(ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
+
+ /* GIDs */
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
@@ -2538,6 +2547,16 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
return status;
cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
(mac_addr[2] << 16) | (mac_addr[3] << 24);
+
+ hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ if (hdr_type == RDMA_NETWORK_IPV4) {
+ rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
+ memcpy(&cmd->params.dgid[0],
+ &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ memcpy(&cmd->params.sgid[0],
+ &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ }
/* convert them to LE format. */
ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
@@ -2558,7 +2577,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.rnt_rc_sl_fl |=
(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
-
+ cmd->params.max_sge_recv_flags |= ((hdr_type <<
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
return 0;
}
@@ -2871,7 +2892,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
struct ocrdma_dcbx_cfg *dcbxcfg)
{
- int status = 0;
+ int status;
dma_addr_t pa;
struct ocrdma_mqe cmd;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index f38743018cb4..3d75f65ce87e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -89,8 +89,10 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable)
{
struct ib_port_attr attr;
+ struct ocrdma_dev *dev;
int err;
+ dev = get_ocrdma_dev(ibdev);
err = ocrdma_query_port(ibdev, port_num, &attr);
if (err)
return err;
@@ -98,6 +100,8 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (ocrdma_is_udp_encap_supported(dev))
+ immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 99dd6fdf06d7..0efc9662c6d8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -140,7 +140,11 @@ enum {
OCRDMA_DB_RQ_SHIFT = 24
};
-#define OCRDMA_ROUDP_FLAGS_SHIFT 0x03
+enum {
+ OCRDMA_L3_TYPE_IB_GRH = 0x00,
+ OCRDMA_L3_TYPE_IPV4 = 0x01,
+ OCRDMA_L3_TYPE_IPV6 = 0x02
+};
#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
@@ -546,7 +550,8 @@ enum {
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF <<
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
-
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT = 3,
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
@@ -1107,6 +1112,8 @@ enum {
OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7),
OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8),
OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9),
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT = 11,
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK = BIT(11) | BIT(12) | BIT(13),
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16,
OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF <<
OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
@@ -1735,8 +1742,11 @@ enum {
/* w1 */
OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16,
+ OCRDMA_CQE_UD_XFER_LEN_MASK = 0x1FFF,
OCRDMA_CQE_PKEY_SHIFT = 0,
OCRDMA_CQE_PKEY_MASK = 0xFFFF,
+ OCRDMA_CQE_UD_L3TYPE_SHIFT = 29,
+ OCRDMA_CQE_UD_L3TYPE_MASK = 0x07,
/* w2 */
OCRDMA_CQE_QPN_SHIFT = 0,
@@ -1861,7 +1871,7 @@ struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
- u32 rsvd;
+ u32 hdr_type;
};
/* extended wqe followed by hdr_wqe for Fast Memory register */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 255f774080a4..8bef09a8c49f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -610,7 +610,7 @@ static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
static void ocrdma_update_stats(struct ocrdma_dev *dev)
{
ulong now = jiffies, secs;
- int status = 0;
+ int status;
struct ocrdma_rdma_stats_resp *rdma_stats =
(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
@@ -641,7 +641,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
{
char tmp_str[32];
long reset;
- int status = 0;
+ int status;
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 12420e4ecf3d..a8496a18e20d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -419,7 +419,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
struct ib_udata *udata)
{
struct ocrdma_pd *pd = NULL;
- int status = 0;
+ int status;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
@@ -468,7 +468,7 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
struct ocrdma_pd *pd)
{
- int status = 0;
+ int status;
if (dev->pd_mgr->pd_prealloc_valid)
status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
@@ -596,7 +596,7 @@ map_err:
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
- int status = 0;
+ int status;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
@@ -623,7 +623,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
- int status = 0;
+ int status;
bool found;
if (vma->vm_start & (PAGE_SIZE - 1))
@@ -1285,7 +1285,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
struct ib_udata *udata, int dpp_offset,
int dpp_credit_lmt, int srq)
{
- int status = 0;
+ int status;
u64 usr_db;
struct ocrdma_create_qp_uresp uresp;
struct ocrdma_pd *pd = qp->pd;
@@ -1494,9 +1494,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*/
if (status < 0)
return status;
- status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
-
- return status;
+ return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
}
int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1949,7 +1947,7 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata)
{
- int status = 0;
+ int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
@@ -2005,6 +2003,7 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
else
ud_hdr->qkey = ud_wr(wr)->remote_qkey;
ud_hdr->rsvd_ahid = ah->id;
+ ud_hdr->hdr_type = ah->hdr_type;
if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
}
@@ -2717,9 +2716,11 @@ static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
return expand;
}
-static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
+static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
+ struct ocrdma_cqe *cqe)
{
int status;
+ u16 hdr_type = 0;
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
@@ -2728,7 +2729,17 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
ibwc->pkey_index = 0;
ibwc->wc_flags = IB_WC_GRH;
ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
- OCRDMA_CQE_UD_XFER_LEN_SHIFT);
+ OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
+ OCRDMA_CQE_UD_XFER_LEN_MASK;
+
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+ OCRDMA_CQE_UD_L3TYPE_SHIFT) &
+ OCRDMA_CQE_UD_L3TYPE_MASK;
+ ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ ibwc->network_hdr_type = hdr_type;
+ }
+
return status;
}
@@ -2791,12 +2802,15 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
{
+ struct ocrdma_dev *dev;
+
+ dev = get_ocrdma_dev(qp->ibqp.device);
ibwc->opcode = IB_WC_RECV;
ibwc->qp = &qp->ibqp;
ibwc->status = IB_WC_SUCCESS;
if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
- ocrdma_update_ud_rcqe(ibwc, cqe);
+ ocrdma_update_ud_rcqe(dev, ibwc, cqe);
else
ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
index 495be09781b1..e0fdb9201423 100644
--- a/drivers/infiniband/hw/qib/Kconfig
+++ b/drivers/infiniband/hw/qib/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_QIB
tristate "Intel PCIe HCA support"
- depends on 64BIT
+ depends on 64BIT && INFINIBAND_RDMAVT
---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
index 57f8103e51f8..79ebd79e8405 100644
--- a/drivers/infiniband/hw/qib/Makefile
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -1,11 +1,11 @@
obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
-ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \
- qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \
- qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \
- qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
+ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
+ qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
+ qib_mad.o qib_pcie.o qib_pio_copy.o \
+ qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \
qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
- qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \
+ qib_user_pages.o qib_user_sdma.o qib_iba7220.o \
qib_sd7220.o qib_iba7322.o qib_verbs.o
# 6120 has no fallback if no MSI interrupts, others can do INTx
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7df16f74bb45..bbf0a163aeab 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -52,6 +52,7 @@
#include <linux/kref.h>
#include <linux/sched.h>
#include <linux/kthread.h>
+#include <rdma/rdma_vt.h>
#include "qib_common.h"
#include "qib_verbs.h"
@@ -229,9 +230,6 @@ struct qib_ctxtdata {
u8 redirect_seq_cnt;
/* ctxt rcvhdrq head offset */
u32 head;
- /* lookaside fields */
- struct qib_qp *lookaside_qp;
- u32 lookaside_qpn;
/* QPs waiting for context processing */
struct list_head qp_wait_list;
#ifdef CONFIG_DEBUG_FS
@@ -240,7 +238,7 @@ struct qib_ctxtdata {
#endif
};
-struct qib_sge_state;
+struct rvt_sge_state;
struct qib_sdma_txreq {
int flags;
@@ -258,14 +256,14 @@ struct qib_sdma_desc {
struct qib_verbs_txreq {
struct qib_sdma_txreq txreq;
- struct qib_qp *qp;
- struct qib_swqe *wqe;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
u32 dwords;
u16 hdr_dwords;
u16 hdr_inx;
struct qib_pio_header *align_buf;
- struct qib_mregion *mr;
- struct qib_sge_state *ss;
+ struct rvt_mregion *mr;
+ struct rvt_sge_state *ss;
};
#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
@@ -1096,8 +1094,6 @@ struct qib_devdata {
u16 psxmitwait_check_rate;
/* high volume overflow errors defered to tasklet */
struct tasklet_struct error_tasklet;
- /* per device cq worker */
- struct kthread_worker *worker;
int assigned_node_id; /* NUMA node closest to HCA */
};
@@ -1135,8 +1131,9 @@ extern spinlock_t qib_devs_lock;
extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-
+extern u16 qpt_mask;
extern unsigned qib_cc_table_size;
+
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
int qib_enable_wc(struct qib_devdata *dd);
@@ -1323,7 +1320,7 @@ void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
-int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
+int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
@@ -1454,6 +1451,8 @@ u64 qib_sps_ints(void);
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
const char *qib_get_unit_name(int unit);
+const char *qib_get_card_name(struct rvt_dev_info *rdi);
+struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
* Flush write combining store buffers (if present) and perform a write
@@ -1540,4 +1539,14 @@ struct qib_hwerror_msgs {
void qib_format_hwerrors(u64 hwerrs,
const struct qib_hwerror_msgs *hwerrmsgs,
size_t nhwerrmsgs, char *msg, size_t lmsg);
+
+void qib_stop_send_queue(struct rvt_qp *qp);
+void qib_quiesce_qp(struct rvt_qp *qp);
+void qib_flush_qp_waiters(struct rvt_qp *qp);
+int qib_mtu_to_path_mtu(u32 mtu);
+u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
+void qib_notify_error_qp(struct rvt_qp *qp);
+int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr);
+
#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 4fb78abd8ba1..1d6e63eb1146 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -742,14 +742,11 @@ struct qib_tid_session_member {
#define SIZE_OF_CRC 1
#define QIB_DEFAULT_P_KEY 0xFFFF
-#define QIB_PERMISSIVE_LID 0xFFFF
#define QIB_AETH_CREDIT_SHIFT 24
#define QIB_AETH_CREDIT_MASK 0x1F
#define QIB_AETH_CREDIT_INVAL 0x1F
#define QIB_PSN_MASK 0xFFFFFF
#define QIB_MSN_MASK 0xFFFFFF
-#define QIB_QPN_MASK 0xFFFFFF
-#define QIB_MULTICAST_LID_BASE 0xC000
#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
#define QIB_MULTICAST_QPN 0xFFFFFF
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
deleted file mode 100644
index 2b45d0b02300..000000000000
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/kthread.h>
-
-#include "qib_verbs.h"
-#include "qib.h"
-
-/**
- * qib_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
-{
- struct qib_cq_wc *wc;
- unsigned long flags;
- u32 head;
- u32 next;
-
- spin_lock_irqsave(&cq->lock, flags);
-
- /*
- * Note that the head pointer might be writable by user processes.
- * Take care to verify it is a sane value.
- */
- wc = cq->queue;
- head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
- head = cq->ibcq.cqe;
- next = 0;
- } else
- next = head + 1;
- if (unlikely(next == wc->tail)) {
- spin_unlock_irqrestore(&cq->lock, flags);
- if (cq->ibcq.event_handler) {
- struct ib_event ev;
-
- ev.device = cq->ibcq.device;
- ev.element.cq = &cq->ibcq;
- ev.event = IB_EVENT_CQ_ERR;
- cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
- }
- return;
- }
- if (cq->ip) {
- wc->uqueue[head].wr_id = entry->wr_id;
- wc->uqueue[head].status = entry->status;
- wc->uqueue[head].opcode = entry->opcode;
- wc->uqueue[head].vendor_err = entry->vendor_err;
- wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
- wc->uqueue[head].qp_num = entry->qp->qp_num;
- wc->uqueue[head].src_qp = entry->src_qp;
- wc->uqueue[head].wc_flags = entry->wc_flags;
- wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = entry->slid;
- wc->uqueue[head].sl = entry->sl;
- wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
- wc->uqueue[head].port_num = entry->port_num;
- /* Make sure entry is written before the head index. */
- smp_wmb();
- } else
- wc->kqueue[head] = *entry;
- wc->head = next;
-
- if (cq->notify == IB_CQ_NEXT_COMP ||
- (cq->notify == IB_CQ_SOLICITED &&
- (solicited || entry->status != IB_WC_SUCCESS))) {
- struct kthread_worker *worker;
- /*
- * This will cause send_complete() to be called in
- * another thread.
- */
- smp_rmb();
- worker = cq->dd->worker;
- if (likely(worker)) {
- cq->notify = IB_CQ_NONE;
- cq->triggered++;
- queue_kthread_work(worker, &cq->comptask);
- }
- }
-
- spin_unlock_irqrestore(&cq->lock, flags);
-}
-
-/**
- * qib_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
-
-static void send_complete(struct kthread_work *work)
-{
- struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
-
- /*
- * The completion handler will most likely rearm the notification
- * and poll for all pending entries. If a new completion entry
- * is added while we are in this routine, queue_work()
- * won't call us again until we return so we check triggered to
- * see if we need to call the handler again.
- */
- for (;;) {
- u8 triggered = cq->triggered;
-
- /*
- * IPoIB connected mode assumes the callback is from a
- * soft IRQ. We simulate this by blocking "bottom halves".
- * See the implementation for ipoib_cm_handle_tx_wc(),
- * netif_tx_lock_bh() and netif_tx_lock().
- */
- local_bh_disable();
- cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
- local_bh_enable();
-
- if (cq->triggered == triggered)
- return;
- }
-}
-
-/**
- * qib_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the QLogic_IB driver
- * @udata: user data for libibverbs.so
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- int entries = attr->cqe;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_cq *cq;
- struct qib_cq_wc *wc;
- struct ib_cq *ret;
- u32 sz;
-
- if (attr->flags)
- return ERR_PTR(-EINVAL);
-
- if (entries < 1 || entries > ib_qib_max_cqes) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- /* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Allocate the completion queue entries and head/tail pointers.
- * This is allocated separately so that it can be resized and
- * also mapped into user space.
- * We need to use vmalloc() in order to support mmap and large
- * numbers of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
- else
- sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_cq;
- }
-
- /*
- * Return the address of the WC as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
-
- cq->ip = qib_create_mmap_info(dev, sz, context, wc);
- if (!cq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wc;
- }
-
- err = ib_copy_to_udata(udata, &cq->ip->offset,
- sizeof(cq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- cq->ip = NULL;
-
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == ib_qib_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
-
- if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- /*
- * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
- * The number of entries should be >= the number requested or return
- * an error.
- */
- cq->dd = dd_from_dev(dev);
- cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
- spin_lock_init(&cq->lock);
- init_kthread_work(&cq->comptask, send_complete);
- wc->head = 0;
- wc->tail = 0;
- cq->queue = wc;
-
- ret = &cq->ibcq;
-
- goto done;
-
-bail_ip:
- kfree(cq->ip);
-bail_wc:
- vfree(wc);
-bail_cq:
- kfree(cq);
-done:
- return ret;
-}
-
-/**
- * qib_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int qib_destroy_cq(struct ib_cq *ibcq)
-{
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_cq *cq = to_icq(ibcq);
-
- flush_kthread_work(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
- if (cq->ip)
- kref_put(&cq->ip->ref, qib_release_mmap_info);
- else
- vfree(cq->queue);
- kfree(cq);
-
- return 0;
-}
-
-/**
- * qib_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context. Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
- struct qib_cq *cq = to_icq(ibcq);
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&cq->lock, flags);
- /*
- * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
- * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
- */
- if (cq->notify != IB_CQ_NEXT_COMP)
- cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- cq->queue->head != cq->queue->tail)
- ret = 1;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
- return ret;
-}
-
-/**
- * qib_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
- struct qib_cq *cq = to_icq(ibcq);
- struct qib_cq_wc *old_wc;
- struct qib_cq_wc *wc;
- u32 head, tail, n;
- int ret;
- u32 sz;
-
- if (cqe < 1 || cqe > ib_qib_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
- else
- sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->outlen >= sizeof(__u64)) {
- __u64 offset = 0;
-
- ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&cq->lock);
- /*
- * Make sure head and tail are sane since they
- * might be user writable.
- */
- old_wc = cq->queue;
- head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
- tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- if (head < tail)
- n = cq->ibcq.cqe + 1 + head - tail;
- else
- n = head - tail;
- if (unlikely((u32)cqe < n)) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- for (n = 0; tail != head; n++) {
- if (cq->ip)
- wc->uqueue[n] = old_wc->uqueue[tail];
- else
- wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- cq->ibcq.cqe = cqe;
- wc->head = n;
- wc->tail = 0;
- cq->queue = wc;
- spin_unlock_irq(&cq->lock);
-
- vfree(old_wc);
-
- if (cq->ip) {
- struct qib_ibdev *dev = to_idev(ibcq->device);
- struct qib_mmap_info *ip = cq->ip;
-
- qib_update_mmap_info(dev, ip, sz, wc);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = 0;
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&cq->lock);
-bail_free:
- vfree(wc);
-bail:
- return ret;
-}
-
-int qib_cq_init(struct qib_devdata *dd)
-{
- int ret = 0;
- int cpu;
- struct task_struct *task;
-
- if (dd->worker)
- return 0;
- dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
- if (!dd->worker)
- return -ENOMEM;
- init_kthread_worker(dd->worker);
- task = kthread_create_on_node(
- kthread_worker_fn,
- dd->worker,
- dd->assigned_node_id,
- "qib_cq%d", dd->unit);
- if (IS_ERR(task))
- goto task_fail;
- cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
- kthread_bind(task, cpu);
- wake_up_process(task);
-out:
- return ret;
-task_fail:
- ret = PTR_ERR(task);
- kfree(dd->worker);
- dd->worker = NULL;
- goto out;
-}
-
-void qib_cq_exit(struct qib_devdata *dd)
-{
- struct kthread_worker *worker;
-
- worker = dd->worker;
- if (!worker)
- return;
- /* blocks future queuing from send_complete() */
- dd->worker = NULL;
- smp_wmb();
- flush_kthread_worker(worker);
- kthread_stop(worker->task);
- kfree(worker);
-}
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index f58fdc3d25a2..67ee6438cf59 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -90,6 +90,22 @@ const char *qib_get_unit_name(int unit)
return iname;
}
+const char *qib_get_card_name(struct rvt_dev_info *rdi)
+{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+ return qib_get_unit_name(dd->unit);
+}
+
+struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
+{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+ return dd->pcidev;
+}
+
/*
* Return count of units with at least one port ACTIVE.
*/
@@ -306,7 +322,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
struct qib_other_headers *ohdr = NULL;
struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_qp *qp = NULL;
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp = NULL;
u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
u16 lid = be16_to_cpu(hdr->lrh[1]);
int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
@@ -319,7 +337,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
if (tlen < 24)
goto drop;
- if (lid < QIB_MULTICAST_LID_BASE) {
+ if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
lid &= ~((1 << ppd->lmc) - 1);
if (unlikely(lid != ppd->lid))
goto drop;
@@ -346,13 +364,16 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
psn = be32_to_cpu(ohdr->bth[2]);
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
if (qp_num != QIB_MULTICAST_QPN) {
int ruc_res;
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
goto drop;
+ }
/*
* Handle only RC QPs - for other QP types drop error
@@ -361,9 +382,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
spin_lock(&qp->r_lock);
/* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto unlock;
}
@@ -383,7 +404,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff = qib_cmp24(psn, qp->r_psn);
if (!qp->r_nak_state && diff >= 0) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state =
IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
@@ -398,7 +419,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
*/
if (list_empty(&qp->rspwait)) {
qp->r_flags |=
- QIB_R_RSP_NAK;
+ RVT_R_RSP_NAK;
atomic_inc(
&qp->refcount);
list_add_tail(
@@ -419,12 +440,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
unlock:
spin_unlock(&qp->r_lock);
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for us to finish.
- */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
} /* Unicast QP */
} /* Valid packet with TIDErr */
@@ -456,7 +472,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
int last;
u64 lval;
- struct qib_qp *qp, *nqp;
+ struct rvt_qp *qp, *nqp;
l = rcd->head;
rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
@@ -549,15 +565,6 @@ move_along:
updegr = 0;
}
}
- /*
- * Notify qib_destroy_qp() if it is waiting
- * for lookaside_qp to finish.
- */
- if (rcd->lookaside_qp) {
- if (atomic_dec_and_test(&rcd->lookaside_qp->refcount))
- wake_up(&rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
rcd->head = l;
@@ -567,17 +574,17 @@ move_along:
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
- if (qp->r_flags & QIB_R_RSP_NAK) {
- qp->r_flags &= ~QIB_R_RSP_NAK;
+ if (qp->r_flags & RVT_R_RSP_NAK) {
+ qp->r_flags &= ~RVT_R_RSP_NAK;
qib_send_rc_ack(qp);
}
- if (qp->r_flags & QIB_R_RSP_SEND) {
+ if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags;
- qp->r_flags &= ~QIB_R_RSP_SEND;
+ qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] &
- QIB_PROCESS_OR_FLUSH_SEND)
+ if (ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_OR_FLUSH_SEND)
qib_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 4b927809d1a1..a3733f25280f 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data)
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned long flags;
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
&cs->spkts, &cs->rpkts, &cs->xmit_wait);
mod_timer(&cs->pma_timer,
- jiffies + usecs_to_jiffies(ibp->pma_sample_interval));
+ jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
} else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
u64 ta, tb, tc, td, te;
@@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data)
cs->rpkts = td - cs->rpkts;
cs->xmit_wait = te - cs->xmit_wait;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
}
/*
- * Note that the caller has the ibp->lock held.
+ * Note that the caller has the ibp->rvp.lock held.
*/
static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
u32 start)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6c8ff10101c0..82d7c4bf5970 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2910,8 +2910,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data);
}
- if (dd->pport[i].ibport_data.smi_ah)
- ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah);
}
}
@@ -5497,7 +5495,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
unsigned delay;
int ret;
- agent = ibp->send_agent;
+ agent = ibp->rvp.send_agent;
if (!agent)
goto retry;
@@ -5515,7 +5513,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
- ibp->smi_ah = to_iah(ah);
+ ibp->smi_ah = ibah_to_rvtah(ah);
ret = 0;
}
} else {
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 4ff340fe904f..3f062f0dd9d8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -42,6 +42,7 @@
#ifdef CONFIG_INFINIBAND_QIB_DCA
#include <linux/dca.h>
#endif
+#include <rdma/rdma_vt.h>
#include "qib.h"
#include "qib_common.h"
@@ -244,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
alloc_percpu(struct qib_pma_counters);
if (!ppd->ibport_data.pmastats)
return -ENOMEM;
+ ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
+ if (!(ppd->ibport_data.rvp.rc_acks) ||
+ !(ppd->ibport_data.rvp.rc_qacks) ||
+ !(ppd->ibport_data.rvp.rc_delayed_comp))
+ return -ENOMEM;
if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
goto bail;
@@ -449,8 +457,6 @@ static int loadtime_init(struct qib_devdata *dd)
init_timer(&dd->intrchk_timer);
dd->intrchk_timer.function = verify_interrupt;
dd->intrchk_timer.data = (unsigned long) dd;
-
- ret = qib_cq_init(dd);
done:
return ret;
}
@@ -631,6 +637,9 @@ wq_error:
static void qib_free_pportdata(struct qib_pportdata *ppd)
{
free_percpu(ppd->ibport_data.pmastats);
+ free_percpu(ppd->ibport_data.rvp.rc_acks);
+ free_percpu(ppd->ibport_data.rvp.rc_qacks);
+ free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
ppd->ibport_data.pmastats = NULL;
}
@@ -1081,7 +1090,7 @@ void qib_free_devdata(struct qib_devdata *dd)
qib_dbg_ibdev_exit(&dd->verbs_dev);
#endif
free_percpu(dd->int_counter);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
}
u64 qib_int_counter(struct qib_devdata *dd)
@@ -1120,9 +1129,12 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
{
unsigned long flags;
struct qib_devdata *dd;
- int ret;
+ int ret, nports;
- dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
+ /* extra is * number of ports */
+ nports = extra / sizeof(struct qib_pportdata);
+ dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
+ nports);
if (!dd)
return ERR_PTR(-ENOMEM);
@@ -1171,7 +1183,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
return ERR_PTR(ret);
}
@@ -1421,7 +1433,6 @@ static void cleanup_device_data(struct qib_devdata *dd)
}
kfree(tmp);
kfree(dd->boardname);
- qib_cq_exit(dd);
}
/*
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 086616d071b9..a014fd4cd076 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -74,7 +74,7 @@ static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
struct ib_event event;
struct qib_devdata *dd = ppd->dd;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = ppd->port;
event.event = ev;
ib_dispatch_event(&event);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index d725c565518d..2c3c93572c17 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -46,20 +46,20 @@
*
*/
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
int ret = 0;
struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
/* special case for dma_mr lkey == 0 */
if (dma_region) {
- struct qib_mregion *tmr;
+ struct rvt_mregion *tmr;
tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
@@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
* bits are capped in qib_verbs.c to insure enough bits
* for generation number
*/
- mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
- ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
+ ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
<< 8);
if (mr->lkey == 0) {
mr->lkey |= 1 << 8;
@@ -114,13 +114,13 @@ bail:
* qib_free_lkey - free an lkey
* @mr: mr to free from tables
*/
-void qib_free_lkey(struct qib_mregion *mr)
+void qib_free_lkey(struct rvt_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct qib_lkey_table *rkt = &dev->lk_table;
+ struct rvt_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
if (!mr->lkey_published)
@@ -128,7 +128,7 @@ void qib_free_lkey(struct qib_mregion *mr)
if (lkey == 0)
RCU_INIT_POINTER(dev->dma_mr, NULL);
else {
- r = lkey >> (32 - ib_qib_lkey_table_size);
+ r = lkey >> (32 - ib_rvt_lkey_table_size);
RCU_INIT_POINTER(rkt->table[r], NULL);
}
qib_put_mr(mr);
@@ -138,105 +138,6 @@ out:
}
/**
- * qib_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @pd: protection domain
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * increments the reference count upon success
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc)
-{
- struct qib_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use LKEY == zero for kernel virtual addresses
- * (see qib_get_dma_mr and qib_dma.c).
- */
- rcu_read_lock();
- if (sge->lkey == 0) {
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- isge->mr = mr;
- isge->vaddr = (void *) sge->addr;
- isge->length = sge->length;
- isge->sge_length = sge->length;
- isge->m = 0;
- isge->n = 0;
- goto ok;
- }
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
- goto bail;
-
- off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- isge->mr = mr;
- isge->vaddr = mr->map[m]->segs[n].vaddr + off;
- isge->length = mr->map[m]->segs[n].length - off;
- isge->sge_length = sge->length;
- isge->m = m;
- isge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
-/**
* qib_rkey_ok - check the IB virtual address, length, and RKEY
* @qp: qp for validation
* @sge: SGE state
@@ -249,11 +150,11 @@ bail:
*
* increments the reference count upon success
*/
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
+int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_mregion *mr;
+ struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
+ struct rvt_mregion *mr;
unsigned n, m;
size_t off;
@@ -263,7 +164,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
*/
rcu_read_lock();
if (rkey == 0) {
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
+ struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
@@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
}
mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
+ rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
@@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
entries_spanned_by_off = off >> mr->page_shift;
off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off/QIB_SEGSZ;
- n = entries_spanned_by_off%QIB_SEGSZ;
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
} else {
m = 0;
n = 0;
while (off >= mr->map[m]->segs[n].length) {
off -= mr->map[m]->segs[n].length;
n++;
- if (n >= QIB_SEGSZ) {
+ if (n >= RVT_SEGSZ) {
m++;
n = 0;
}
@@ -335,58 +236,3 @@ bail:
return 0;
}
-/*
- * Initialize the memory region specified by the work request.
- */
-int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
-{
- struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct qib_pd *pd = to_ipd(qp->ibqp.pd);
- struct qib_mr *mr = to_imr(wr->mr);
- struct qib_mregion *mrg;
- u32 key = wr->key;
- unsigned i, n, m;
- int ret = -EINVAL;
- unsigned long flags;
- u64 *page_list;
- size_t ps;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (pd->user || key == 0)
- goto bail;
-
- mrg = rcu_dereference_protected(
- rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
- lockdep_is_held(&rkt->lock));
- if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
- goto bail;
-
- if (mr->npages > mrg->max_segs)
- goto bail;
-
- ps = mr->ibmr.page_size;
- if (mr->ibmr.length > ps * mr->npages)
- goto bail;
-
- mrg->user_base = mr->ibmr.iova;
- mrg->iova = mr->ibmr.iova;
- mrg->lkey = key;
- mrg->length = mr->ibmr.length;
- mrg->access_flags = wr->access;
- page_list = mr->pages;
- m = 0;
- n = 0;
- for (i = 0; i < mr->npages; i++) {
- mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
- mrg->map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
-
- ret = 0;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 9625e7c438e5..0bd18375d7df 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
unsigned long flags;
unsigned long timeout;
- agent = ibp->send_agent;
+ agent = ibp->rvp.send_agent;
if (!agent)
return;
@@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
return;
/* o14-2 */
- if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ if (ibp->rvp.trap_timeout &&
+ time_before(jiffies, ibp->rvp.trap_timeout))
return;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
@@ -93,42 +94,42 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = 1;
smp->method = IB_MGMT_METHOD_TRAP;
- ibp->tid++;
- smp->tid = cpu_to_be64(ibp->tid);
+ ibp->rvp.tid++;
+ smp->tid = cpu_to_be64(ibp->rvp.tid);
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
memcpy(smp->data, data, len);
- spin_lock_irqsave(&ibp->lock, flags);
- if (!ibp->sm_ah) {
- if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (!ibp->rvp.sm_ah) {
+ if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
- ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
+ ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid);
if (IS_ERR(ah))
ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
- ibp->sm_ah = to_iah(ah);
+ ibp->rvp.sm_ah = ibah_to_rvtah(ah);
ret = 0;
}
} else
ret = -EINVAL;
} else {
- send_buf->ah = &ibp->sm_ah->ibah;
+ send_buf->ah = &ibp->rvp.sm_ah->ibah;
ret = 0;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (!ret)
ret = ib_post_send_mad(send_buf, NULL);
if (!ret) {
/* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
- ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
+ ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
} else {
ib_free_send_mad(send_buf);
- ibp->trap_timeout = 0;
+ ibp->rvp.trap_timeout = 0;
}
}
@@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
struct ib_mad_notice_attr data;
if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
- ibp->pkey_violations++;
+ ibp->rvp.pkey_violations++;
else
- ibp->qkey_violations++;
- ibp->n_pkt_drops++;
+ ibp->rvp.qkey_violations++;
+ ibp->rvp.n_pkt_drops++;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
@@ -205,8 +206,11 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void qib_cap_mask_chg(struct qib_ibport *ibp)
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
{
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
struct ib_mad_notice_attr data;
data.generic_type = IB_NOTICE_TYPE_INFO;
@@ -217,8 +221,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
data.toggle_count = 0;
memset(&data.details, 0, sizeof(data.details));
data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
-
+ data.details.ntc_144.new_cap_mask =
+ cpu_to_be32(ibp->rvp.port_cap_flags);
qib_send_trap(ibp, &data, sizeof(data));
}
@@ -409,37 +413,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
int ret = 0;
/* Is the mkey in the process of expiring? */
- if (ibp->mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ if (ibp->rvp.mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */
- ibp->mkey_lease_timeout = 0;
- ibp->mkeyprot = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
+ ibp->rvp.mkeyprot = 0;
}
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
- ibp->mkey == smp->mkey)
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
+ ibp->rvp.mkey == smp->mkey)
valid_mkey = 1;
/* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->mkey_lease_timeout &&
+ if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
(smp->method == IB_MGMT_METHOD_GET ||
smp->method == IB_MGMT_METHOD_SET ||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->mkey_lease_timeout = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
if (!valid_mkey) {
switch (smp->method) {
case IB_MGMT_METHOD_GET:
/* Bad mkey not a violation below level 2 */
- if (ibp->mkeyprot < 2)
+ if (ibp->rvp.mkeyprot < 2)
break;
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->mkey_violations != 0xFFFF)
- ++ibp->mkey_violations;
- if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
- ibp->mkey_lease_timeout = jiffies +
- ibp->mkey_lease_period * HZ;
+ if (ibp->rvp.mkey_violations != 0xFFFF)
+ ++ibp->rvp.mkey_violations;
+ if (!ibp->rvp.mkey_lease_timeout &&
+ ibp->rvp.mkey_lease_period)
+ ibp->rvp.mkey_lease_timeout = jiffies +
+ ibp->rvp.mkey_lease_period * HZ;
/* Generate a trap notice. */
qib_bad_mkey(ibp, smp);
ret = 1;
@@ -489,15 +494,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* Only return the mkey if the protection field allows it. */
if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->mkey != smp->mkey &&
- ibp->mkeyprot == 1))
- pip->mkey = ibp->mkey;
- pip->gid_prefix = ibp->gid_prefix;
+ ibp->rvp.mkey != smp->mkey &&
+ ibp->rvp.mkeyprot == 1))
+ pip->mkey = ibp->rvp.mkey;
+ pip->gid_prefix = ibp->rvp.gid_prefix;
pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16(ibp->sm_lid);
- pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid);
+ pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
/* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
pip->local_port_num = port;
pip->link_width_enabled = ppd->link_width_enabled;
pip->link_width_supported = ppd->link_width_supported;
@@ -508,7 +513,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
pip->portphysstate_linkdown =
(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
(get_linkdowndefaultstate(ppd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
ppd->link_speed_enabled;
switch (ppd->ibmtu) {
@@ -529,9 +534,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
mtu = IB_MTU_256;
break;
}
- pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
+ pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
- pip->vl_high_limit = ibp->vl_high_limit;
+ pip->vl_high_limit = ibp->rvp.vl_high_limit;
pip->vl_arb_high_cap =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
pip->vl_arb_low_cap =
@@ -542,20 +547,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
/* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
- pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
/* P_KeyViolations are counted by hardware. */
- pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
- pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
+ pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
/* Only the hardware GUID is supported for now */
pip->guid_cap = QIB_GUIDS_PER_PORT;
- pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
+ pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
/* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors =
(get_phyerrthreshold(ppd) << 4) |
get_overrunthreshold(ppd);
/* pip->max_credit_hint; */
- if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
u32 v;
v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
@@ -685,13 +690,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
event.device = ibdev;
event.element.port_num = port;
- ibp->mkey = pip->mkey;
- ibp->gid_prefix = pip->gid_prefix;
- ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
+ ibp->rvp.mkey = pip->mkey;
+ ibp->rvp.gid_prefix = pip->gid_prefix;
+ ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
+ if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
smp->status |= IB_SMP_INVALID_FIELD;
else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
if (ppd->lid != lid)
@@ -706,21 +711,21 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
smlid = be16_to_cpu(pip->sm_lid);
msl = pip->neighbormtu_mastersmsl & 0xF;
/* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
+ if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
smp->status |= IB_SMP_INVALID_FIELD;
- else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
- spin_lock_irqsave(&ibp->lock, flags);
- if (ibp->sm_ah) {
- if (smlid != ibp->sm_lid)
- ibp->sm_ah->attr.dlid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_ah->attr.sl = msl;
+ else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (ibp->rvp.sm_ah) {
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_ah->attr.dlid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_ah->attr.sl = msl;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
- if (smlid != ibp->sm_lid)
- ibp->sm_lid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_sl = msl;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_lid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_sl = msl;
event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event);
}
@@ -768,10 +773,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
smp->status |= IB_SMP_INVALID_FIELD;
}
- ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- ibp->vl_high_limit = pip->vl_high_limit;
+ ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
+ ibp->rvp.vl_high_limit = pip->vl_high_limit;
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
- ibp->vl_high_limit);
+ ibp->rvp.vl_high_limit);
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1)
@@ -789,13 +794,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
}
if (pip->mkey_violations == 0)
- ibp->mkey_violations = 0;
+ ibp->rvp.mkey_violations = 0;
if (pip->pkey_violations == 0)
- ibp->pkey_violations = 0;
+ ibp->rvp.pkey_violations = 0;
if (pip->qkey_violations == 0)
- ibp->qkey_violations = 0;
+ ibp->rvp.qkey_violations = 0;
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
@@ -804,7 +809,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (set_overrunthreshold(ppd, (ore & 0xF)))
smp->status |= IB_SMP_INVALID_FIELD;
- ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
+ ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
/*
* Do the port state change now that the other link parameters
@@ -1028,7 +1033,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
}
@@ -1062,7 +1067,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
memset(smp->data, 0, sizeof(smp->data));
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
+ if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
smp->status |= IB_SMP_UNSUP_METHOD;
else
for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
@@ -1078,7 +1083,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
u8 *p = (u8 *) smp->data;
unsigned i;
- if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
+ if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
smp->status |= IB_SMP_UNSUP_METHOD;
return reply(smp);
}
@@ -1195,20 +1200,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->counter_width = 4; /* 32 bit counters */
p->counter_mask0_9 = COUNTER_MASK0_9;
- p->sample_start = cpu_to_be32(ibp->pma_sample_start);
- p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
- p->tag = cpu_to_be16(ibp->pma_tag);
- p->counter_select[0] = ibp->pma_counter_select[0];
- p->counter_select[1] = ibp->pma_counter_select[1];
- p->counter_select[2] = ibp->pma_counter_select[2];
- p->counter_select[3] = ibp->pma_counter_select[3];
- p->counter_select[4] = ibp->pma_counter_select[4];
- spin_unlock_irqrestore(&ibp->lock, flags);
+ p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
+ p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
+ p->counter_select[0] = ibp->rvp.pma_counter_select[0];
+ p->counter_select[1] = ibp->rvp.pma_counter_select[1];
+ p->counter_select[2] = ibp->rvp.pma_counter_select[2];
+ p->counter_select[3] = ibp->rvp.pma_counter_select[3];
+ p->counter_select[4] = ibp->rvp.pma_counter_select[4];
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
bail:
return reply((struct ib_smp *) pmp);
@@ -1233,7 +1238,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
goto bail;
}
- spin_lock_irqsave(&ibp->lock, flags);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
/* Port Sampling code owns the PS* HW counters */
xmit_flags = ppd->cong_stats.flags;
@@ -1242,18 +1247,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
if (status == IB_PMA_SAMPLE_STATUS_DONE ||
(status == IB_PMA_SAMPLE_STATUS_RUNNING &&
xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
- ibp->pma_sample_start = be32_to_cpu(p->sample_start);
- ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
- ibp->pma_tag = be16_to_cpu(p->tag);
- ibp->pma_counter_select[0] = p->counter_select[0];
- ibp->pma_counter_select[1] = p->counter_select[1];
- ibp->pma_counter_select[2] = p->counter_select[2];
- ibp->pma_counter_select[3] = p->counter_select[3];
- ibp->pma_counter_select[4] = p->counter_select[4];
- dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
- ibp->pma_sample_start);
+ ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
+ ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
+ ibp->rvp.pma_tag = be16_to_cpu(p->tag);
+ ibp->rvp.pma_counter_select[0] = p->counter_select[0];
+ ibp->rvp.pma_counter_select[1] = p->counter_select[1];
+ ibp->rvp.pma_counter_select[2] = p->counter_select[2];
+ ibp->rvp.pma_counter_select[3] = p->counter_select[3];
+ ibp->rvp.pma_counter_select[4] = p->counter_select[4];
+ dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
+ ibp->rvp.pma_sample_start);
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
ret = pma_get_portsamplescontrol(pmp, ibdev, port);
@@ -1357,8 +1362,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
int i;
memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
@@ -1373,11 +1378,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be32(
get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
+ ppd, ibp->rvp.pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp);
}
@@ -1397,8 +1402,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
/* Port Sampling code owns the PS* HW counters */
memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->lock, flags);
- p->tag = cpu_to_be16(ibp->pma_tag);
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ p->tag = cpu_to_be16(ibp->rvp.pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
@@ -1415,11 +1420,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
- for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
+ for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
p->counter[i] = cpu_to_be64(
get_cache_hw_sample_counters(
- ppd, ibp->pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->lock, flags);
+ ppd, ibp->rvp.pma_counter_select[i]));
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
return reply((struct ib_smp *) pmp);
}
@@ -1453,7 +1458,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
memset(pmp->data, 0, sizeof(pmp->data));
@@ -1546,9 +1551,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
xmit_wait_counter = xmit_wait_get_value_delta(ppd);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
@@ -1564,7 +1569,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->n_vl15_dropped;
+ cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
cntrs.port_xmit_data -= ibp->z_port_xmit_data;
cntrs.port_rcv_data -= ibp->z_port_rcv_data;
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
@@ -1743,7 +1748,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp,
cntrs.excessive_buffer_overrun_errors;
if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- ibp->n_vl15_dropped = 0;
+ ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
@@ -1778,11 +1783,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
ret = pma_get_portcounters_cong(pmp, ibdev, port);
if (counter_select & IB_PMA_SEL_CONG_XMIT) {
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
ppd->cong_stats.counter = 0;
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
0x0);
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
}
if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
ibp->z_port_xmit_data = cntrs.port_xmit_data;
@@ -1806,7 +1811,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
cntrs.local_link_integrity_errors;
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
- ibp->n_vl15_dropped = 0;
+ ibp->rvp.n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
@@ -1916,12 +1921,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_get_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
- if (ibp->port_cap_flags & IB_PORT_SM) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
@@ -1950,12 +1955,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = subn_set_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
- if (ibp->port_cap_flags & IB_PORT_SM) {
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
@@ -2443,12 +2448,6 @@ bail:
return ret;
}
-static void send_handler(struct ib_mad_agent *agent,
- struct ib_mad_send_wc *mad_send_wc)
-{
- ib_free_send_mad(mad_send_wc->send_buf);
-}
-
static void xmit_wait_timer_func(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -2456,7 +2455,7 @@ static void xmit_wait_timer_func(unsigned long opaque)
unsigned long flags;
u8 status;
- spin_lock_irqsave(&ppd->ibport_data.lock, flags);
+ spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
@@ -2469,74 +2468,35 @@ static void xmit_wait_timer_func(unsigned long opaque)
ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
done:
- spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
+ spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
}
-int qib_create_agents(struct qib_ibdev *dev)
+void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
- int ret;
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL, 0);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
-
- /* Initialize xmit_wait structure */
- dd->pport[p].cong_stats.counter = 0;
- init_timer(&dd->pport[p].cong_stats.timer);
- dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
- dd->pport[p].cong_stats.timer.data =
- (unsigned long)(&dd->pport[p]);
- dd->pport[p].cong_stats.timer.expires = 0;
- add_timer(&dd->pport[p].cong_stats.timer);
-
- ibp->send_agent = agent;
- }
-
- return 0;
-
-err:
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- }
-
- return ret;
+ /* Initialize xmit_wait structure */
+ dd->pport[port_idx].cong_stats.counter = 0;
+ init_timer(&dd->pport[port_idx].cong_stats.timer);
+ dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func;
+ dd->pport[port_idx].cong_stats.timer.data =
+ (unsigned long)(&dd->pport[port_idx]);
+ dd->pport[port_idx].cong_stats.timer.expires = 0;
+ add_timer(&dd->pport[port_idx].cong_stats.timer);
}
-void qib_free_agents(struct qib_ibdev *dev)
+void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
{
- struct qib_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct qib_ibport *ibp;
- int p;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- if (ibp->sm_ah) {
- ib_destroy_ah(&ibp->sm_ah->ibah);
- ibp->sm_ah = NULL;
- }
- if (dd->pport[p].cong_stats.timer.data)
- del_timer_sync(&dd->pport[p].cong_stats.timer);
- }
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(ibdev,
+ struct qib_devdata, verbs_dev);
+
+ if (dd->pport[port_idx].cong_stats.timer.data)
+ del_timer_sync(&dd->pport[port_idx].cong_stats.timer);
+
+ if (dd->pport[port_idx].ibport_data.smi_ah)
+ ib_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah);
}
diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c
deleted file mode 100644
index 34927b700b0e..000000000000
--- a/drivers/infiniband/hw/qib/qib_mmap.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct qib_mmap_info
- */
-void qib_release_mmap_info(struct kref *ref)
-{
- struct qib_mmap_info *ip =
- container_of(ref, struct qib_mmap_info, ref);
- struct qib_ibdev *dev = to_idev(ip->context->device);
-
- spin_lock_irq(&dev->pending_lock);
- list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- vfree(ip->obj);
- kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void qib_vma_open(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_get(&ip->ref);
-}
-
-static void qib_vma_close(struct vm_area_struct *vma)
-{
- struct qib_mmap_info *ip = vma->vm_private_data;
-
- kref_put(&ip->ref, qib_release_mmap_info);
-}
-
-static const struct vm_operations_struct qib_vm_ops = {
- .open = qib_vma_open,
- .close = qib_vma_close,
-};
-
-/**
- * qib_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
- struct qib_ibdev *dev = to_idev(context->device);
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct qib_mmap_info *ip, *pp;
- int ret = -EINVAL;
-
- /*
- * Search the device's list of objects waiting for a mmap call.
- * Normally, this list is very short since a call to create a
- * CQ, QP, or SRQ is soon followed by a call to mmap().
- */
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
- pending_mmaps) {
- /* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
- continue;
- /* Don't allow a mmap larger than the object. */
- if (size > ip->size)
- break;
-
- list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
-
- ret = remap_vmalloc_range(vma, ip->obj, 0);
- if (ret)
- goto done;
- vma->vm_ops = &qib_vm_ops;
- vma->vm_private_data = ip;
- qib_vma_open(vma);
- goto done;
- }
- spin_unlock_irq(&dev->pending_lock);
-done:
- return ret;
-}
-
-/*
- * Allocate information for qib_mmap
- */
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct qib_mmap_info *ip;
-
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
- if (!ip)
- goto bail;
-
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- INIT_LIST_HEAD(&ip->pending_mmaps);
- ip->size = size;
- ip->context = context;
- ip->obj = obj;
- kref_init(&ip->ref);
-
-bail:
- return ip;
-}
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj)
-{
- size = PAGE_ALIGN(size);
-
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
-
- ip->size = size;
- ip->obj = obj;
-}
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
deleted file mode 100644
index 5f53304e8a9b..000000000000
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-
-/* Fast memory region */
-struct qib_fmr {
- struct ib_fmr ibfmr;
- struct qib_mregion mr; /* must be last */
-};
-
-static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct qib_fmr, ibfmr);
-}
-
-static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
- int count)
-{
- int m, i = 0;
- int rval = 0;
-
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
- if (!mr->map[i])
- goto bail;
- }
- mr->mapsz = m;
- init_completion(&mr->comp);
- /* count returning the ptr to user */
- atomic_set(&mr->refcount, 1);
- mr->pd = pd;
- mr->max_segs = count;
-out:
- return rval;
-bail:
- while (i)
- kfree(mr->map[--i]);
- rval = -ENOMEM;
- goto out;
-}
-
-static void deinit_qib_mregion(struct qib_mregion *mr)
-{
- int i = mr->mapsz;
-
- mr->mapsz = 0;
- while (i)
- kfree(mr->map[--i]);
-}
-
-
-/**
- * qib_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see qib_dma.c).
- */
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct qib_mr *mr = NULL;
- struct ib_mr *ret;
- int rval;
-
- if (to_ipd(pd)->user) {
- ret = ERR_PTR(-EPERM);
- goto bail;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- rval = init_qib_mregion(&mr->mr, pd, 0);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail;
- }
-
-
- rval = qib_alloc_lkey(&mr->mr, 1);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail_mregion;
- }
-
- mr->mr.access_flags = acc;
- ret = &mr->ibmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- goto done;
-}
-
-static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
-{
- struct qib_mr *mr;
- int rval = -ENOMEM;
- int m;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
- if (!mr)
- goto bail;
-
- rval = init_qib_mregion(&mr->mr, pd, count);
- if (rval)
- goto bail;
-
- rval = qib_alloc_lkey(&mr->mr, 0);
- if (rval)
- goto bail_mregion;
- mr->ibmr.lkey = mr->mr.lkey;
- mr->ibmr.rkey = mr->mr.lkey;
-done:
- return mr;
-
-bail_mregion:
- deinit_qib_mregion(&mr->mr);
-bail:
- kfree(mr);
- mr = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the QLogic_IB driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct qib_mr *mr;
- struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
- struct ib_mr *ret;
-
- if (length == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(umem))
- return (void *) umem;
-
- n = umem->nmap;
-
- mr = alloc_mr(n, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- ib_umem_release(umem);
- goto bail;
- }
-
- mr->mr.user_base = start;
- mr->mr.iova = virt_addr;
- mr->mr.length = length;
- mr->mr.offset = ib_umem_offset(umem);
- mr->mr.access_flags = mr_access_flags;
- mr->umem = umem;
-
- if (is_power_of_2(umem->page_size))
- mr->mr.page_shift = ilog2(umem->page_size);
- m = 0;
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- void *vaddr;
-
- vaddr = page_address(sg_page(sg));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * qib_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by qib_get_dma_mr()
- * or qib_reg_user_mr().
- */
-int qib_dereg_mr(struct ib_mr *ibmr)
-{
- struct qib_mr *mr = to_imr(ibmr);
- int ret = 0;
- unsigned long timeout;
-
- kfree(mr->pages);
- qib_free_lkey(&mr->mr);
-
- qib_put_mr(&mr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&mr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&mr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&mr->mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
-out:
- return ret;
-}
-
-/*
- * Allocate a memory region usable with the
- * IB_WR_REG_MR send work request.
- *
- * Return the memory region on success, otherwise return an errno.
- */
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct qib_mr *mr;
-
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
- mr = alloc_mr(max_num_sg, pd);
- if (IS_ERR(mr))
- return (struct ib_mr *)mr;
-
- mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
- if (!mr->pages)
- goto err;
-
- return &mr->ibmr;
-
-err:
- qib_dereg_mr(&mr->ibmr);
- return ERR_PTR(-ENOMEM);
-}
-
-static int qib_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct qib_mr *mr = to_imr(ibmr);
-
- if (unlikely(mr->npages == mr->mr.max_segs))
- return -ENOMEM;
-
- mr->pages[mr->npages++] = addr;
-
- return 0;
-}
-
-int qib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
-{
- struct qib_mr *mr = to_imr(ibmr);
-
- mr->npages = 0;
-
- return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page);
-}
-
-/**
- * qib_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct qib_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
- fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = qib_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_qib_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * qib_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- struct qib_lkey_table *rkt;
- unsigned long flags;
- int m, n, i;
- u32 ps;
- int ret;
-
- i = atomic_read(&fmr->mr.refcount);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs) {
- ret = -EINVAL;
- goto bail;
- }
- rkt = &to_idev(ibfmr->device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- if (++n == QIB_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int qib_unmap_fmr(struct list_head *fmr_list)
-{
- struct qib_fmr *fmr;
- struct qib_lkey_table *rkt;
- unsigned long flags;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rkt = &to_idev(fmr->ibfmr.device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int qib_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct qib_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
- unsigned long timeout;
-
- qib_free_lkey(&fmr->mr);
- qib_put_mr(&fmr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&fmr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- qib_get_mr(&fmr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_qib_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
-
-void mr_rcu_callback(struct rcu_head *list)
-{
- struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
-
- complete(&mr->comp);
-}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3eff35c2d453..575b737d9ef3 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -34,32 +34,38 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
-#include <linux/jhash.h>
+#include <rdma/rdma_vt.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
#include "qib.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+/*
+ * mask field which was present in now deleted qib_qpn_table
+ * is not present in rvt_qpn_table. Defining the same field
+ * as qpt_mask here instead of adding the mask field to
+ * rvt_qpn_table.
+ */
+u16 qpt_mask;
-static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
-static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
- struct qpn_map *map, unsigned off,
+static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off,
unsigned n)
{
- if (qpt->mask) {
+ if (qpt_mask) {
off++;
- if (((off & qpt->mask) >> 1) >= n)
- off = (off | qpt->mask) + 2;
- } else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ if (((off & qpt_mask) >> 1) >= n)
+ off = (off | qpt_mask) + 2;
+ } else {
+ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
+ }
return off;
}
@@ -100,7 +106,7 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
gfp_t gfp)
{
unsigned long page = get_zeroed_page(gfp);
@@ -121,12 +127,15 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
* Allocate the next available QPN or
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
-static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
- enum ib_qp_type type, u8 port, gfp_t gfp)
+int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port, gfp_t gfp)
{
u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
u32 ret;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
unsigned n;
@@ -143,12 +152,12 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
}
qpn = qpt->last + 2;
- if (qpn >= QPN_MAX)
+ if (qpn >= RVT_QPN_MAX)
qpn = 2;
- if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
+ qpn = (qpn | qpt_mask) + 2;
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
@@ -173,14 +182,14 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
* We just need to be sure we don't loop
* forever.
*/
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
* the size of the bitmap table.
*/
if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
offset = 0;
@@ -200,706 +209,113 @@ bail:
return ret;
}
-static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
-{
- struct qpn_map *map;
-
- map = qpt->map + qpn / BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-}
-
-static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
-{
- return jhash_1word(qpn, dev->qp_rnd) &
- (dev->qp_table_size - 1);
-}
-
-
-/*
- * Put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned long flags;
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
-
- atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (qp->ibqp.qp_num == 0)
- rcu_assign_pointer(ibp->qp0, qp);
- else if (qp->ibqp.qp_num == 1)
- rcu_assign_pointer(ibp->qp1, qp);
- else {
- qp->next = dev->qp_table[n];
- rcu_assign_pointer(dev->qp_table[n], qp);
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
-}
-
-/*
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
- unsigned long flags;
- int removed = 1;
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
-
- if (rcu_dereference_protected(ibp->qp0,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp0, NULL);
- } else if (rcu_dereference_protected(ibp->qp1,
- lockdep_is_held(&dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp1, NULL);
- } else {
- struct qib_qp *q;
- struct qib_qp __rcu **qpp;
-
- removed = 0;
- qpp = &dev->qp_table[n];
- for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qpt_lock))) != NULL;
- qpp = &q->next)
- if (q == qp) {
- RCU_INIT_POINTER(*qpp,
- rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)));
- removed = 1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- if (removed) {
- synchronize_rcu();
- atomic_dec(&qp->refcount);
- }
-}
-
/**
* qib_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
*/
-unsigned qib_free_all_qps(struct qib_devdata *dd)
+unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
{
- struct qib_ibdev *dev = &dd->verbs_dev;
- unsigned long flags;
- struct qib_qp *qp;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
unsigned n, qp_inuse = 0;
for (n = 0; n < dd->num_pports; n++) {
struct qib_ibport *ibp = &dd->pport[n].ibport_data;
- if (!qib_mcast_tree_empty(ibp))
- qp_inuse++;
rcu_read_lock();
- if (rcu_dereference(ibp->qp0))
+ if (rcu_dereference(ibp->rvp.qp[0]))
qp_inuse++;
- if (rcu_dereference(ibp->qp1))
+ if (rcu_dereference(ibp->rvp.qp[1]))
qp_inuse++;
rcu_read_unlock();
}
-
- spin_lock_irqsave(&dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_table[n],
- lockdep_is_held(&dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_table[n], NULL);
-
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qpt_lock)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(&dev->qpt_lock, flags);
- synchronize_rcu();
-
return qp_inuse;
}
-/**
- * qib_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
+void qib_notify_qp_reset(struct rvt_qp *qp)
{
- struct qib_qp *qp = NULL;
-
- rcu_read_lock();
- if (unlikely(qpn <= 1)) {
- if (qpn == 0)
- qp = rcu_dereference(ibp->qp0);
- else
- qp = rcu_dereference(ibp->qp1);
- if (qp)
- atomic_inc(&qp->refcount);
- } else {
- struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- unsigned n = qpn_hash(dev, qpn);
-
- for (qp = rcu_dereference(dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next))
- if (qp->ibqp.qp_num == qpn) {
- atomic_inc(&qp->refcount);
- break;
- }
- }
- rcu_read_unlock();
- return qp;
-}
-
-/**
- * qib_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
-{
- qp->remote_qpn = 0;
- qp->qkey = 0;
- qp->qp_access_flags = 0;
- atomic_set(&qp->s_dma_busy, 0);
- qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
- qp->s_hdrwords = 0;
- qp->s_wqe = NULL;
- qp->s_draining = 0;
- qp->s_next_psn = 0;
- qp->s_last_psn = 0;
- qp->s_sending_psn = 0;
- qp->s_sending_hpsn = 0;
- qp->s_psn = 0;
- qp->r_psn = 0;
- qp->r_msn = 0;
- if (type == IB_QPT_RC) {
- qp->s_state = IB_OPCODE_RC_SEND_LAST;
- qp->r_state = IB_OPCODE_RC_SEND_LAST;
- } else {
- qp->s_state = IB_OPCODE_UC_SEND_LAST;
- qp->r_state = IB_OPCODE_UC_SEND_LAST;
- }
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->r_nak_state = 0;
- qp->r_aflags = 0;
- qp->r_flags = 0;
- qp->s_head = 0;
- qp->s_tail = 0;
- qp->s_cur = 0;
- qp->s_acked = 0;
- qp->s_last = 0;
- qp->s_ssn = 1;
- qp->s_lsn = 0;
- qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
- qp->r_head_ack_queue = 0;
- qp->s_tail_ack_queue = 0;
- qp->s_num_rd_atomic = 0;
- if (qp->r_rq.wq) {
- qp->r_rq.wq->head = 0;
- qp->r_rq.wq->tail = 0;
- }
- qp->r_sge.num_sge = 0;
-}
-
-static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
-{
- unsigned n;
-
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
-
- qib_put_ss(&qp->r_sge);
-
- if (clr_sends) {
- while (qp->s_last != qp->s_head) {
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- unsigned i;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
-
- qib_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- }
-
- if (qp->ibqp.qp_type != IB_QPT_RC)
- return;
+ struct qib_qp_priv *priv = qp->priv;
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
- struct qib_ack_entry *e = &qp->s_ack_queue[n];
-
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- }
+ atomic_set(&priv->s_dma_busy, 0);
}
-/**
- * qib_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
+void qib_notify_error_qp(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- int ret = 0;
-
- if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
- goto bail;
-
- qp->state = IB_QPS_ERR;
-
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
- qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
- qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
- list_del_init(&qp->iowait);
+ spin_lock(&dev->rdi.pending_lock);
+ if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ list_del_init(&priv->iowait);
}
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
- if (!(qp->s_flags & QIB_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
+ if (priv->s_tx) {
+ qib_put_txreq(priv->s_tx);
+ priv->s_tx = NULL;
}
}
-
- /* Schedule the sending tasklet to drain the send work queue. */
- if (qp->s_last != qp->s_head)
- qib_schedule_send(qp);
-
- clear_mr_refs(qp, 0);
-
- memset(&wc, 0, sizeof(wc));
- wc.qp = &qp->ibqp;
- wc.opcode = IB_WC_RECV;
-
- if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
- wc.wr_id = qp->r_wr_id;
- wc.status = err;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wc.status = IB_WC_WR_FLUSH_ERR;
-
- if (qp->r_rq.wq) {
- struct qib_rwq *wq;
- u32 head;
- u32 tail;
-
- spin_lock(&qp->r_rq.lock);
-
- /* sanity check pointers before trusting them */
- wq = qp->r_rq.wq;
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- while (tail != head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
- if (++tail >= qp->r_rq.size)
- tail = 0;
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wq->tail = tail;
-
- spin_unlock(&qp->r_rq.lock);
- } else if (qp->ibqp.event_handler)
- ret = 1;
-
-bail:
- return ret;
}
-/**
- * qib_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for libibverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+static int mtu_to_enum(u32 mtu)
{
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_qp *qp = to_iqp(ibqp);
- enum ib_qp_state cur_state, new_state;
- struct ib_event ev;
- int lastwqe = 0;
- int mig = 0;
- int ret;
- u32 pmtu = 0; /* for gcc warning only */
-
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
-
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : qp->state;
- new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED))
- goto inval;
-
- if (attr_mask & IB_QP_AV) {
- if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
- goto inval;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
- goto inval;
- if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
- goto inval;
- if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
- goto inval;
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- if (attr->min_rnr_timer > 31)
- goto inval;
-
- if (attr_mask & IB_QP_PORT)
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI ||
- attr->port_num == 0 ||
- attr->port_num > ibqp->device->phys_port_cnt)
- goto inval;
-
- if (attr_mask & IB_QP_DEST_QPN)
- if (attr->dest_qp_num > QIB_QPN_MASK)
- goto inval;
-
- if (attr_mask & IB_QP_RETRY_CNT)
- if (attr->retry_cnt > 7)
- goto inval;
-
- if (attr_mask & IB_QP_RNR_RETRY)
- if (attr->rnr_retry > 7)
- goto inval;
-
- /*
- * Don't allow invalid path_mtu values. OK to set greater
- * than the active mtu (or even the max_cap, if we have tuned
- * that to a small mtu. We'll set qp->path_mtu
- * to the lesser of requested attribute mtu and active,
- * for packetizing messages.
- * Note that the QP port has to be set in INIT and MTU in RTR.
- */
- if (attr_mask & IB_QP_PATH_MTU) {
- struct qib_devdata *dd = dd_from_dev(dev);
- int mtu, pidx = qp->port_num - 1;
-
- mtu = ib_mtu_enum_to_int(attr->path_mtu);
- if (mtu == -1)
- goto inval;
- if (mtu > dd->pport[pidx].ibmtu) {
- switch (dd->pport[pidx].ibmtu) {
- case 4096:
- pmtu = IB_MTU_4096;
- break;
- case 2048:
- pmtu = IB_MTU_2048;
- break;
- case 1024:
- pmtu = IB_MTU_1024;
- break;
- case 512:
- pmtu = IB_MTU_512;
- break;
- case 256:
- pmtu = IB_MTU_256;
- break;
- default:
- pmtu = IB_MTU_2048;
- }
- } else
- pmtu = attr->path_mtu;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- if (attr->path_mig_state == IB_MIG_REARM) {
- if (qp->s_mig_state == IB_MIG_ARMED)
- goto inval;
- if (new_state != IB_QPS_RTS)
- goto inval;
- } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
- if (qp->s_mig_state == IB_MIG_REARM)
- goto inval;
- if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
- goto inval;
- if (qp->s_mig_state == IB_MIG_ARMED)
- mig = 1;
- } else
- goto inval;
- }
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
- goto inval;
+ int enum_mtu;
- switch (new_state) {
- case IB_QPS_RESET:
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- /* Stop the sending work queue and retry timer */
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- clear_mr_refs(qp, 1);
- qib_reset_qp(qp, ibqp->qp_type);
- }
+ switch (mtu) {
+ case 4096:
+ enum_mtu = IB_MTU_4096;
break;
-
- case IB_QPS_RTR:
- /* Allow event to retrigger if QP set to RTR more than once */
- qp->r_flags &= ~QIB_R_COMM_EST;
- qp->state = new_state;
+ case 2048:
+ enum_mtu = IB_MTU_2048;
break;
-
- case IB_QPS_SQD:
- qp->s_draining = qp->s_last != qp->s_cur;
- qp->state = new_state;
+ case 1024:
+ enum_mtu = IB_MTU_1024;
break;
-
- case IB_QPS_SQE:
- if (qp->ibqp.qp_type == IB_QPT_RC)
- goto inval;
- qp->state = new_state;
+ case 512:
+ enum_mtu = IB_MTU_512;
break;
-
- case IB_QPS_ERR:
- lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ case 256:
+ enum_mtu = IB_MTU_256;
break;
-
default:
- qp->state = new_state;
- break;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
- if (attr_mask & IB_QP_PORT)
- qp->port_num = attr->port_num;
-
- if (attr_mask & IB_QP_DEST_QPN)
- qp->remote_qpn = attr->dest_qp_num;
-
- if (attr_mask & IB_QP_SQ_PSN) {
- qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
- qp->s_psn = qp->s_next_psn;
- qp->s_sending_psn = qp->s_next_psn;
- qp->s_last_psn = qp->s_next_psn - 1;
- qp->s_sending_hpsn = qp->s_last_psn;
- }
-
- if (attr_mask & IB_QP_RQ_PSN)
- qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
-
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->qp_access_flags = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
- qp->s_srate = attr->ah_attr.static_rate;
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
- qp->s_alt_pkey_index = attr->alt_pkey_index;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- qp->s_mig_state = attr->path_mig_state;
- if (mig) {
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- qp->path_mtu = pmtu;
- qp->pmtu = ib_mtu_enum_to_int(pmtu);
- }
-
- if (attr_mask & IB_QP_RETRY_CNT) {
- qp->s_retry_cnt = attr->retry_cnt;
- qp->s_retry = attr->retry_cnt;
- }
-
- if (attr_mask & IB_QP_RNR_RETRY) {
- qp->s_rnr_retry_cnt = attr->rnr_retry;
- qp->s_rnr_retry = attr->rnr_retry;
+ enum_mtu = IB_MTU_2048;
}
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->r_min_rnr_timer = attr->min_rnr_timer;
-
- if (attr_mask & IB_QP_TIMEOUT) {
- qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- }
-
- if (attr_mask & IB_QP_QKEY)
- qp->qkey = attr->qkey;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- qp->s_max_rd_atomic = attr->max_rd_atomic;
-
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- insert_qp(dev, qp);
-
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- if (mig) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- ret = 0;
- goto bail;
-
-inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- ret = -EINVAL;
-
-bail:
- return ret;
+ return enum_mtu;
}
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
+int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr)
{
- struct qib_qp *qp = to_iqp(ibqp);
+ int mtu, pmtu, pidx = qp->port_num - 1;
+ struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
+ verbs_dev);
+ mtu = ib_mtu_enum_to_int(attr->path_mtu);
+ if (mtu == -1)
+ return -EINVAL;
+
+ if (mtu > dd->pport[pidx].ibmtu)
+ pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
+ else
+ pmtu = attr->path_mtu;
+ return pmtu;
+}
- attr->qp_state = qp->state;
- attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
- attr->path_mig_state = qp->s_mig_state;
- attr->qkey = qp->qkey;
- attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
- attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
- attr->dest_qp_num = qp->remote_qpn;
- attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
- attr->cap.max_send_sge = qp->s_max_sge;
- attr->cap.max_recv_sge = qp->r_rq.max_sge;
- attr->cap.max_inline_data = 0;
- attr->ah_attr = qp->remote_ah_attr;
- attr->alt_ah_attr = qp->alt_ah_attr;
- attr->pkey_index = qp->s_pkey_index;
- attr->alt_pkey_index = qp->s_alt_pkey_index;
- attr->en_sqd_async_notify = 0;
- attr->sq_draining = qp->s_draining;
- attr->max_rd_atomic = qp->s_max_rd_atomic;
- attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
- attr->min_rnr_timer = qp->r_min_rnr_timer;
- attr->port_num = qp->port_num;
- attr->timeout = qp->timeout;
- attr->retry_cnt = qp->s_retry_cnt;
- attr->rnr_retry = qp->s_rnr_retry_cnt;
- attr->alt_port_num = qp->alt_ah_attr.port_num;
- attr->alt_timeout = qp->alt_timeout;
+int qib_mtu_to_path_mtu(u32 mtu)
+{
+ return mtu_to_enum(mtu);
+}
- init_attr->event_handler = qp->ibqp.event_handler;
- init_attr->qp_context = qp->ibqp.qp_context;
- init_attr->send_cq = qp->ibqp.send_cq;
- init_attr->recv_cq = qp->ibqp.recv_cq;
- init_attr->srq = qp->ibqp.srq;
- init_attr->cap = attr->cap;
- if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
- init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- else
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->qp_type = qp->ibqp.qp_type;
- init_attr->port_num = qp->port_num;
- return 0;
+u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
+{
+ return ib_mtu_enum_to_int(pmtu);
}
/**
@@ -908,7 +324,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Returns the AETH.
*/
-__be32 qib_compute_aeth(struct qib_qp *qp)
+__be32 qib_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & QIB_MSN_MASK;
@@ -921,7 +337,7 @@ __be32 qib_compute_aeth(struct qib_qp *qp)
} else {
u32 min, max, x;
u32 credits;
- struct qib_rwq *wq = qp->r_rq.wq;
+ struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
@@ -962,315 +378,63 @@ __be32 qib_compute_aeth(struct qib_qp *qp)
return cpu_to_be32(aeth);
}
-/**
- * qib_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: user data for libibverbs.so
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
{
- struct qib_qp *qp;
- int err;
- struct qib_swqe *swq = NULL;
- struct qib_ibdev *dev;
- struct qib_devdata *dd;
- size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret;
- gfp_t gfp;
+ struct qib_qp_priv *priv;
+ priv = kzalloc(sizeof(*priv), gfp);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ priv->owner = qp;
- if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
- init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
- init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
- return ERR_PTR(-EINVAL);
-
- /* GFP_NOIO is applicable in RC QPs only */
- if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
- init_attr->qp_type != IB_QPT_RC)
- return ERR_PTR(-EINVAL);
-
- gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
- GFP_NOIO : GFP_KERNEL;
-
- /* Check receive queue parameters if no SRQ is specified. */
- if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
- init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- if (init_attr->cap.max_send_sge +
- init_attr->cap.max_send_wr +
- init_attr->cap.max_recv_sge +
- init_attr->cap.max_recv_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
+ priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
+ if (!priv->s_hdr) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
}
+ init_waitqueue_head(&priv->wait_dma);
+ INIT_WORK(&priv->s_work, _qib_do_send);
+ INIT_LIST_HEAD(&priv->iowait);
- switch (init_attr->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (init_attr->port_num == 0 ||
- init_attr->port_num > ibpd->device->phys_port_cnt) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- case IB_QPT_UC:
- case IB_QPT_RC:
- case IB_QPT_UD:
- sz = sizeof(struct qib_sge) *
- init_attr->cap.max_send_sge +
- sizeof(struct qib_swqe);
- swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
- gfp, PAGE_KERNEL);
- if (swq == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
- sz = sizeof(*qp);
- sg_list_sz = 0;
- if (init_attr->srq) {
- struct qib_srq *srq = to_isrq(init_attr->srq);
-
- if (srq->rq.max_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (srq->rq.max_sge - 1);
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
- qp = kzalloc(sz + sg_list_sz, gfp);
- if (!qp) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_swq;
- }
- RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
- if (!qp->s_hdr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- if (init_attr->srq)
- sz = 0;
- else {
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct qib_rwqe);
- if (gfp != GFP_NOIO)
- qp->r_rq.wq = vmalloc_user(
- sizeof(struct qib_rwq) +
- qp->r_rq.size * sz);
- else
- qp->r_rq.wq = __vmalloc(
- sizeof(struct qib_rwq) +
- qp->r_rq.size * sz,
- gfp, PAGE_KERNEL);
-
- if (!qp->r_rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- }
-
- /*
- * ib_create_qp() will initialize qp->ibqp
- * except for qp->ibqp.qp_num.
- */
- spin_lock_init(&qp->r_lock);
- spin_lock_init(&qp->s_lock);
- spin_lock_init(&qp->r_rq.lock);
- atomic_set(&qp->refcount, 0);
- init_waitqueue_head(&qp->wait);
- init_waitqueue_head(&qp->wait_dma);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
- INIT_WORK(&qp->s_work, qib_do_send);
- INIT_LIST_HEAD(&qp->iowait);
- INIT_LIST_HEAD(&qp->rspwait);
- qp->state = IB_QPS_RESET;
- qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
- qp->s_max_sge = init_attr->cap.max_send_sge;
- if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = QIB_S_SIGNAL_REQ_WR;
- dev = to_idev(ibpd->device);
- dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
- init_attr->port_num, gfp);
- if (err < 0) {
- ret = ERR_PTR(err);
- vfree(qp->r_rq.wq);
- goto bail_qp;
- }
- qp->ibqp.qp_num = err;
- qp->port_num = init_attr->port_num;
- qib_reset_qp(qp, init_attr->qp_type);
- break;
-
- default:
- /* Don't support raw QPs */
- ret = ERR_PTR(-ENOSYS);
- goto bail;
- }
-
- init_attr->cap.max_inline_data = 0;
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- if (!qp->r_rq.wq) {
- __u64 offset = 0;
-
- err = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else {
- u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
-
- qp->ip = qib_create_mmap_info(dev, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- err = ib_copy_to_udata(udata, &(qp->ip->offset),
- sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- }
- }
-
- spin_lock(&dev->n_qps_lock);
- if (dev->n_qps_allocated == ib_qib_max_qps) {
- spin_unlock(&dev->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_qps_allocated++;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &qp->ibqp;
- goto bail;
-
-bail_ip:
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
-bail_qp:
- kfree(qp->s_hdr);
- kfree(qp);
-bail_swq:
- vfree(swq);
-bail:
- return ret;
+ return priv;
}
-/**
- * qib_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int qib_destroy_qp(struct ib_qp *ibqp)
+void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
+ struct qib_qp_priv *priv = qp->priv;
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->pending_lock);
- if (!list_empty(&qp->iowait))
- list_del_init(&qp->iowait);
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
- spin_unlock_irq(&qp->s_lock);
- cancel_work_sync(&qp->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
- if (qp->s_tx) {
- qib_put_txreq(qp->s_tx);
- qp->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- clear_mr_refs(qp, 1);
- } else
- spin_unlock_irq(&qp->s_lock);
+ kfree(priv->s_hdr);
+ kfree(priv);
+}
- /* all user's cleaned up, mark it available */
- free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
+void qib_stop_send_queue(struct rvt_qp *qp)
+{
+ struct qib_qp_priv *priv = qp->priv;
- if (qp->ip)
- kref_put(&qp->ip->ref, qib_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
- kfree(qp->s_hdr);
- kfree(qp);
- return 0;
+ cancel_work_sync(&priv->s_work);
+ del_timer_sync(&qp->s_timer);
}
-/**
- * qib_init_qpn_table - initialize the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
+void qib_quiesce_qp(struct rvt_qp *qp)
{
- spin_lock_init(&qpt->lock);
- qpt->last = 1; /* start with QPN 2 */
- qpt->nmaps = 1;
- qpt->mask = dd->qpn_mask;
+ struct qib_qp_priv *priv = qp->priv;
+
+ wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
+ if (priv->s_tx) {
+ qib_put_txreq(priv->s_tx);
+ priv->s_tx = NULL;
+ }
}
-/**
- * qib_free_qpn_table - free the QP number table for a device
- * @qpt: the QPN table
- */
-void qib_free_qpn_table(struct qib_qpn_table *qpt)
+void qib_flush_qp_waiters(struct rvt_qp *qp)
{
- int i;
+ struct qib_qp_priv *priv = qp->priv;
+ struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
- if (qpt->map[i].page)
- free_page((unsigned long) qpt->map[i].page);
+ spin_lock(&dev->rdi.pending_lock);
+ if (!list_empty(&priv->iowait))
+ list_del_init(&priv->iowait);
+ spin_unlock(&dev->rdi.pending_lock);
}
/**
@@ -1280,7 +444,7 @@ void qib_free_qpn_table(struct qib_qpn_table *qpt)
*
* The QP s_lock should be held.
*/
-void qib_get_credit(struct qib_qp *qp, u32 aeth)
+void qib_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
@@ -1290,31 +454,70 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth)
* honor the credit field.
*/
if (credit == QIB_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
- } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
if (qib_cmp24(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
- if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
qib_schedule_send(qp);
}
}
}
}
+/**
+ * qib_check_send_wqe - validate wr/wqe
+ * @qp - The qp
+ * @wqe - The built wqe
+ *
+ * validate wr/wqe. This is called
+ * prior to inserting the wqe into
+ * the ring but after the wqe has been
+ * setup.
+ *
+ * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
+ */
+int qib_check_send_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ struct rvt_ah *ah;
+ int ret = 0;
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ if (wqe->length > 0x80000000U)
+ return -EINVAL;
+ break;
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ /* progress hint */
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
struct qib_qp_iter {
struct qib_ibdev *dev;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
int n;
};
@@ -1340,14 +543,14 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct qib_ibdev *dev = iter->dev;
int n = iter->n;
int ret = 1;
- struct qib_qp *pqp = iter->qp;
- struct qib_qp *qp;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
- for (; n < dev->qp_table_size; n++) {
+ for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
if (pqp)
qp = rcu_dereference(pqp->next);
else
- qp = rcu_dereference(dev->qp_table[n]);
+ qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
pqp = qp;
if (qp) {
iter->qp = qp;
@@ -1364,10 +567,11 @@ static const char * const qp_type_str[] = {
void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
{
- struct qib_swqe *wqe;
- struct qib_qp *qp = iter->qp;
+ struct rvt_swqe *wqe;
+ struct rvt_qp *qp = iter->qp;
+ struct qib_qp_priv *priv = qp->priv;
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
seq_printf(s,
"N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
iter->n,
@@ -1377,8 +581,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
wqe->wr.opcode,
qp->s_hdrwords,
qp->s_flags,
- atomic_read(&qp->s_dma_busy),
- !list_empty(&qp->iowait),
+ atomic_read(&priv->s_dma_busy),
+ !list_empty(&priv->iowait),
qp->timeout,
wqe->ssn,
qp->s_lsn,
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e6b7556d5221..9088e26d3ac8 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -40,7 +40,7 @@
static void rc_timeout(unsigned long arg);
-static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
+static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
u32 len;
@@ -54,9 +54,9 @@ static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
return wqe->length - len;
}
-static void start_timer(struct qib_qp *qp)
+static void start_timer(struct rvt_qp *qp)
{
- qp->s_flags |= QIB_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
qp->s_timer.function = rc_timeout;
/* 4.096 usec. * (1 << qp->timeout) */
qp->s_timer.expires = jiffies + qp->timeout_jiffies;
@@ -74,17 +74,17 @@ static void start_timer(struct qib_qp *qp)
* Note that we are in the responder's side of the QP context.
* Note the QP s_lock must be held.
*/
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
+static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
struct qib_other_headers *ohdr, u32 pmtu)
{
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
/* Don't send an ACK if we aren't supposed to. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
/* FALLTHROUGH */
@@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & QIB_S_ACK_PENDING)
+ if (qp->s_flags & RVT_S_ACK_PENDING)
goto normal;
goto bail;
}
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
/* Copy SGE state in case we need to resend */
qp->s_rdma_mr = e->rdma_sge.mr;
if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
+ rvt_get_mr(qp->s_rdma_mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
qp->s_cur_sge = &qp->s_ack_rdma_sge;
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
if (qp->s_rdma_mr)
- qib_get_mr(qp->s_rdma_mr);
+ rvt_get_mr(qp->s_rdma_mr);
len = qp->s_ack_rdma_sge.sge.sge_length;
if (len > pmtu)
len = pmtu;
@@ -196,7 +196,7 @@ normal:
* (see above).
*/
qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~QIB_S_ACK_PENDING;
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
qp->s_cur_sge = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
@@ -218,7 +218,7 @@ normal:
bail:
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
+ qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
return 0;
}
@@ -226,63 +226,60 @@ bail:
* qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_rc_req(struct qib_qp *qp)
+int qib_make_rc_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_other_headers *ohdr;
- struct qib_sge_state *ss;
- struct qib_swqe *wqe;
+ struct rvt_sge_state *ss;
+ struct rvt_swqe *wqe;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
u32 pmtu = qp->pmtu;
char newreq;
- unsigned long flags;
int ret = 0;
int delta;
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
-
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout resends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
+ ohdr = &priv->s_hdr->u.l.oth;
/* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) &&
+ if ((qp->s_flags & RVT_S_RESP_PENDING) &&
qib_make_rc_ack(dev, qp, ohdr, pmtu))
goto done;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done;
}
- if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
goto bail;
if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= QIB_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
goto bail;
}
qp->s_sending_psn = qp->s_psn;
@@ -294,10 +291,10 @@ int qib_make_rc_req(struct qib_qp *qp)
bth0 = 0;
/* Send a request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
switch (qp->s_state) {
default:
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/*
* Resend an old request or start a new one.
@@ -317,11 +314,11 @@ int qib_make_rc_req(struct qib_qp *qp)
*/
if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
qp->s_num_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_FENCE;
+ qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
- wqe->psn = qp->s_next_psn;
newreq = 1;
+ qp->s_psn = wqe->psn;
}
/*
* Note that we have to be careful not to modify the
@@ -335,14 +332,12 @@ int qib_make_rc_req(struct qib_qp *qp)
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
@@ -363,14 +358,14 @@ int qib_make_rc_req(struct qib_qp *qp)
break;
case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
@@ -380,9 +375,7 @@ int qib_make_rc_req(struct qib_qp *qp)
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
@@ -411,19 +404,12 @@ int qib_make_rc_req(struct qib_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
}
ohdr->u.rc.reth.vaddr =
@@ -449,13 +435,12 @@ int qib_make_rc_req(struct qib_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= QIB_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- wqe->lpsn = wqe->psn;
}
if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
@@ -498,11 +483,8 @@ int qib_make_rc_req(struct qib_qp *qp)
}
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_psn = wqe->lpsn + 1;
- else {
+ else
qp->s_psn++;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
break;
case OP(RDMA_READ_RESPONSE_FIRST):
@@ -522,8 +504,6 @@ int qib_make_rc_req(struct qib_qp *qp)
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -563,8 +543,6 @@ int qib_make_rc_req(struct qib_qp *qp)
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
bth2 = qp->s_psn++ & QIB_PSN_MASK;
- if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -618,9 +596,9 @@ int qib_make_rc_req(struct qib_qp *qp)
delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
if (delta && delta % QIB_PSN_CREDIT == 0)
bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & QIB_S_SEND_ONE) {
- qp->s_flags &= ~QIB_S_SEND_ONE;
- qp->s_flags |= QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_SEND_ONE) {
+ qp->s_flags &= ~RVT_S_SEND_ONE;
+ qp->s_flags |= RVT_S_WAIT_ACK;
bth2 |= IB_BTH_REQ_ACK;
}
qp->s_len -= len;
@@ -629,13 +607,9 @@ int qib_make_rc_req(struct qib_qp *qp)
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -647,7 +621,7 @@ unlock:
* Note that RDMA reads and atomics are handled in the
* send side QP state and tasklet.
*/
-void qib_send_rc_ack(struct qib_qp *qp)
+void qib_send_rc_ack(struct rvt_qp *qp)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -665,11 +639,11 @@ void qib_send_rc_ack(struct qib_qp *qp)
spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto unlock;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
+ if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
goto queue_ack;
/* Construct the header with s_lock held so APM doesn't change it. */
@@ -758,9 +732,9 @@ void qib_send_rc_ack(struct qib_qp *qp)
goto done;
queue_ack:
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- ibp->n_rc_qacks++;
- qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ this_cpu_inc(*ibp->rvp.rc_qacks);
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn;
@@ -782,10 +756,10 @@ done:
* for the given QP.
* Called at interrupt level with the QP s_lock held.
*/
-static void reset_psn(struct qib_qp *qp, u32 psn)
+static void reset_psn(struct rvt_qp *qp, u32 psn)
{
u32 n = qp->s_acked;
- struct qib_swqe *wqe = get_swqe_ptr(qp, n);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
u32 opcode;
qp->s_cur = n;
@@ -808,7 +782,7 @@ static void reset_psn(struct qib_qp *qp, u32 psn)
n = 0;
if (n == qp->s_tail)
break;
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
diff = qib_cmp24(psn, wqe->psn);
if (diff < 0)
break;
@@ -854,22 +828,22 @@ static void reset_psn(struct qib_qp *qp, u32 psn)
done:
qp->s_psn = psn;
/*
- * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
+ * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
* asynchronously before the send tasklet can get scheduled.
* Doing it in qib_make_rc_req() is too late.
*/
if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= QIB_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
}
/*
* Back up requester to resend the last un-ACKed request.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
-static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
+static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
- struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct qib_ibport *ibp;
if (qp->s_retry == 0) {
@@ -878,7 +852,7 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else /* XXX need to handle delayed completion */
return;
@@ -887,15 +861,15 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
ibp = to_iport(qp->ibqp.device, qp->port_num);
if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->n_rc_resends++;
+ ibp->rvp.n_rc_resends++;
else
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+ ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
- qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
- QIB_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
+ RVT_S_WAIT_ACK);
if (wait)
- qp->s_flags |= QIB_S_SEND_ONE;
+ qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn);
}
@@ -904,16 +878,16 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
*/
static void rc_timeout(unsigned long arg)
{
- struct qib_qp *qp = (struct qib_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
struct qib_ibport *ibp;
unsigned long flags;
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_TIMER) {
+ if (qp->s_flags & RVT_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_rc_timeouts++;
- qp->s_flags &= ~QIB_S_TIMER;
+ ibp->rvp.n_rc_timeouts++;
+ qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
qib_restart_rc(qp, qp->s_last_psn + 1, 1);
qib_schedule_send(qp);
@@ -927,12 +901,12 @@ static void rc_timeout(unsigned long arg)
*/
void qib_rc_rnr_retry(unsigned long arg)
{
- struct qib_qp *qp = (struct qib_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_RNR) {
- qp->s_flags &= ~QIB_S_WAIT_RNR;
+ if (qp->s_flags & RVT_S_WAIT_RNR) {
+ qp->s_flags &= ~RVT_S_WAIT_RNR;
del_timer(&qp->s_timer);
qib_schedule_send(qp);
}
@@ -943,14 +917,14 @@ void qib_rc_rnr_retry(unsigned long arg)
* Set qp->s_sending_psn to the next PSN after the given one.
* This would be psn+1 except when RDMA reads are present.
*/
-static void reset_sending_psn(struct qib_qp *qp, u32 psn)
+static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
u32 n = qp->s_last;
/* Find the work request corresponding to the given PSN. */
for (;;) {
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
if (qib_cmp24(psn, wqe->lpsn) <= 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_sending_psn = wqe->lpsn + 1;
@@ -968,16 +942,16 @@ static void reset_sending_psn(struct qib_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
+void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
{
struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i;
u32 opcode;
u32 psn;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
/* Find out where the BTH is */
@@ -1002,22 +976,30 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
- (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
- wqe = get_swqe_ptr(qp, qp->s_last);
+ u32 s_last;
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1025,25 +1007,23 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
}
/*
* If we were waiting for sends to complete before resending,
* and they are now complete, restart sending.
*/
- if (qp->s_flags & QIB_S_WAIT_PSN &&
+ if (qp->s_flags & RVT_S_WAIT_PSN &&
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~QIB_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_WAIT_PSN;
qp->s_sending_psn = qp->s_psn;
qp->s_sending_hpsn = qp->s_psn - 1;
qib_schedule_send(qp);
}
}
-static inline void update_last_psn(struct qib_qp *qp, u32 psn)
+static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
{
qp->s_last_psn = psn;
}
@@ -1053,8 +1033,8 @@ static inline void update_last_psn(struct qib_qp *qp, u32 psn)
* This is similar to qib_send_complete but has to check to be sure
* that the SGEs are not being referenced if the SWQE is being resent.
*/
-static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
- struct qib_swqe *wqe,
+static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
struct qib_ibport *ibp)
{
struct ib_wc wc;
@@ -1067,13 +1047,21 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
*/
if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ u32 s_last;
+
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1081,12 +1069,10 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
} else
- ibp->n_rc_delayed_comp++;
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, wqe->lpsn);
@@ -1100,7 +1086,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
qp->s_acked = qp->s_cur;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
if (qp->s_acked != qp->s_tail) {
qp->s_state = OP(SEND_LAST);
qp->s_psn = wqe->psn;
@@ -1110,7 +1096,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
qp->s_acked = 0;
if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
qp->s_draining = 0;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
}
return wqe;
}
@@ -1126,19 +1112,19 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
* Called at interrupt level with the QP s_lock held.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
-static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
+static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u64 val, struct qib_ctxtdata *rcd)
{
struct qib_ibport *ibp;
enum ib_wc_status status;
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
int ret = 0;
u32 ack_psn;
int diff;
/* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
@@ -1151,7 +1137,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
ack_psn = psn;
if (aeth >> 29)
ack_psn--;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = to_iport(qp->ibqp.device, qp->port_num);
/*
@@ -1186,11 +1172,11 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
(opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/* Retry this request. */
- if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
- qp->r_flags |= QIB_R_RDMAR_SEQ;
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
@@ -1213,14 +1199,14 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */
- if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
!qp->s_num_rd_atomic) {
- qp->s_flags &= ~(QIB_S_WAIT_FENCE |
- QIB_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
qib_schedule_send(qp);
- } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
- qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
- QIB_S_WAIT_ACK);
+ } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_ACK);
qib_schedule_send(qp);
}
}
@@ -1231,7 +1217,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
switch (aeth >> 29) {
case 0: /* ACK */
- ibp->n_rc_acks++;
+ this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) {
/*
* We are expecting more ACKs so
@@ -1248,8 +1234,8 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_state = OP(SEND_LAST);
qp->s_psn = psn + 1;
}
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
}
qib_get_credit(qp, aeth);
@@ -1260,10 +1246,10 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
goto bail;
case 1: /* RNR NAK */
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail)
goto bail;
- if (qp->s_flags & QIB_S_WAIT_RNR)
+ if (qp->s_flags & RVT_S_WAIT_RNR)
goto bail;
if (qp->s_rnr_retry == 0) {
status = IB_WC_RNR_RETRY_EXC_ERR;
@@ -1275,12 +1261,12 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
/* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1);
- ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
+ ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
reset_psn(qp, psn);
- qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
- qp->s_flags |= QIB_S_WAIT_RNR;
+ qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
+ qp->s_flags |= RVT_S_WAIT_RNR;
qp->s_timer.function = qib_rc_rnr_retry;
qp->s_timer.expires = jiffies + usecs_to_jiffies(
ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
@@ -1296,7 +1282,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
QIB_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
- ibp->n_seq_naks++;
+ ibp->rvp.n_seq_naks++;
/*
* Back up to the responder's expected PSN.
* Note that we might get a NAK in the middle of an
@@ -1309,21 +1295,21 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
case 1: /* Invalid Request */
status = IB_WC_REM_INV_REQ_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 2: /* Remote Access Error */
status = IB_WC_REM_ACCESS_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 3: /* Remote Operation Error */
status = IB_WC_REM_OP_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1349,18 +1335,18 @@ bail:
* We have seen an out of sequence RDMA read middle or last packet.
* This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
*/
-static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
+static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
struct qib_ctxtdata *rcd)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
- qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
del_timer(&qp->s_timer);
}
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
while (qib_cmp24(psn, wqe->lpsn) > 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ ||
@@ -1370,11 +1356,11 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
wqe = do_rc_completion(qp, wqe, ibp);
}
- ibp->n_rdma_seq++;
- qp->r_flags |= QIB_R_RDMAR_SEQ;
+ ibp->rvp.n_rdma_seq++;
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
qib_restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -1399,12 +1385,12 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
static void qib_rc_rcv_resp(struct qib_ibport *ibp,
struct qib_other_headers *ohdr,
void *data, u32 tlen,
- struct qib_qp *qp,
+ struct rvt_qp *qp,
u32 opcode,
u32 psn, u32 hdrsize, u32 pmtu,
struct qib_ctxtdata *rcd)
{
- struct qib_swqe *wqe;
+ struct rvt_swqe *wqe;
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
enum ib_wc_status status;
unsigned long flags;
@@ -1425,7 +1411,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* If send tasklet not running attempt to progress
* SDMA queue.
*/
- if (!(qp->s_flags & QIB_S_BUSY)) {
+ if (!(qp->s_flags & RVT_S_BUSY)) {
/* Acquire SDMA Lock */
spin_lock_irqsave(&ppd->sdma_lock, flags);
/* Invoke sdma make progress */
@@ -1437,11 +1423,12 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
}
spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto ack_done;
/* Ignore invalid responses. */
- if (qib_cmp24(psn, qp->s_next_psn) >= 0)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
/* Ignore duplicate responses. */
@@ -1460,15 +1447,15 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
* Skip everything other than the PSN we expect, if we are waiting
* for a reply to a restarted RDMA read or atomic op.
*/
- if (qp->r_flags & QIB_R_RDMAR_SEQ) {
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
goto ack_done;
- qp->r_flags &= ~QIB_R_RDMAR_SEQ;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
}
if (unlikely(qp->s_acked == qp->s_tail))
goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
status = IB_WC_SUCCESS;
switch (opcode) {
@@ -1487,7 +1474,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
opcode != OP(RDMA_READ_RESPONSE_FIRST))
goto ack_done;
hdrsize += 4;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
/*
@@ -1515,10 +1502,10 @@ read_middle:
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= QIB_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & QIB_S_WAIT_ACK) {
- qp->s_flags &= ~QIB_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);
}
@@ -1553,7 +1540,7 @@ read_middle:
* have to be careful to copy the data to the right
* location.
*/
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
wqe, psn, pmtu);
goto read_last;
@@ -1598,7 +1585,7 @@ ack_len_err:
ack_err:
if (qp->s_last == qp->s_acked) {
qib_send_complete(qp, wqe, status);
- qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1623,14 +1610,14 @@ bail:
*/
static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
void *data,
- struct qib_qp *qp,
+ struct rvt_qp *qp,
u32 opcode,
u32 psn,
int diff,
struct qib_ctxtdata *rcd)
{
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
unsigned long flags;
u8 i, prev;
int old_req;
@@ -1642,7 +1629,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Don't queue the NAK if we already sent one.
*/
if (!qp->r_nak_state) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn;
@@ -1652,7 +1639,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* Otherwise, we end up propagating congestion.
*/
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -1678,7 +1665,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
*/
e = NULL;
old_req = 1;
- ibp->n_rc_dupreq++;
+ ibp->rvp.n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1732,7 +1719,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
if (e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
if (len != 0) {
@@ -1740,7 +1727,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
u64 vaddr = be64_to_cpu(reth->vaddr);
int ok;
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto unlock_done;
@@ -1791,7 +1778,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
* which doesn't accept a RDMA read response or atomic
* response as an ACK for earlier SENDs or RDMA writes.
*/
- if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
+ if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
spin_unlock_irqrestore(&qp->s_lock, flags);
qp->r_nak_state = 0;
qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
@@ -1805,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
break;
}
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qp->r_nak_state = 0;
qib_schedule_send(qp);
@@ -1818,13 +1805,13 @@ send_ack:
return 0;
}
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
+void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = qib_error_qp(qp, err);
+ lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
@@ -1837,7 +1824,7 @@ void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
}
}
-static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
+static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
{
unsigned next;
@@ -1862,7 +1849,7 @@ static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
* Called at interrupt level.
*/
void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
struct qib_other_headers *ohdr;
@@ -1948,8 +1935,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
break;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -2026,9 +2013,9 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -2047,7 +2034,7 @@ send_last:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
@@ -2069,7 +2056,7 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto nack_acc;
@@ -2096,7 +2083,7 @@ send_last:
goto send_last;
case OP(RDMA_READ_REQUEST): {
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 len;
u8 next;
@@ -2114,7 +2101,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
reth = &ohdr->u.rc.reth;
@@ -2125,7 +2112,7 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
rkey, IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto nack_acc_unlck;
@@ -2157,7 +2144,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp);
goto sunlock;
@@ -2166,7 +2153,7 @@ send_last:
case OP(COMPARE_SWAP):
case OP(FETCH_ADD): {
struct ib_atomic_eth *ateth;
- struct qib_ack_entry *e;
+ struct rvt_ack_entry *e;
u64 vaddr;
atomic64_t *maddr;
u64 sdata;
@@ -2186,7 +2173,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- qib_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
@@ -2196,7 +2183,7 @@ send_last:
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
/* Check rkey & NAK */
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
vaddr, rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto nack_acc_unlck;
@@ -2208,7 +2195,7 @@ send_last:
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data),
sdata);
- qib_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
e->opcode = opcode;
e->sent = 0;
@@ -2221,7 +2208,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= QIB_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qib_schedule_send(qp);
goto sunlock;
@@ -2245,7 +2232,7 @@ rnr_nak:
qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -2257,7 +2244,7 @@ nack_op_err:
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -2271,7 +2258,7 @@ nack_inv:
qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= QIB_R_RSP_NAK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index b1aa21bdd484..a5f07a64b228 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -79,16 +79,16 @@ const u32 ib_qib_rnr_table[32] = {
* Validate a RWQE and fill in the SGE state.
* Return 1 if OK.
*/
-static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
+static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
{
int i, j, ret;
struct ib_wc wc;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
- struct qib_sge_state *ss;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list;
qp->r_len = 0;
@@ -96,7 +96,7 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
- if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
&wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
@@ -109,9 +109,9 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
bad_lkey:
while (j) {
- struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
@@ -120,7 +120,7 @@ bad_lkey:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
/* Signal solicited completion event. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
@@ -136,19 +136,19 @@ bail:
*
* Can be called from interrupt level.
*/
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
+int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only)
{
unsigned long flags;
- struct qib_rq *rq;
- struct qib_rwq *wq;
- struct qib_srq *srq;
- struct qib_rwqe *wqe;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
@@ -158,7 +158,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
}
spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
ret = 0;
goto unlock;
}
@@ -174,7 +174,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
}
/* Make sure entry is read after head index is read. */
smp_rmb();
- wqe = get_rwqe_ptr(rq, tail);
+ wqe = rvt_get_rwqe_ptr(rq, tail);
/*
* Even though we update the tail index in memory, the verbs
* consumer is not supposed to post more entries until a
@@ -190,7 +190,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
qp->r_wr_id = wqe->wr_id;
ret = 1;
- set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
@@ -227,7 +227,7 @@ bail:
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
-void qib_migrate_qp(struct qib_qp *qp)
+void qib_migrate_qp(struct rvt_qp *qp)
{
struct ib_event ev;
@@ -266,7 +266,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
* The s_lock will be acquired around the qib_migrate_qp() call.
*/
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0)
+ int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
unsigned long flags;
@@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
goto err;
guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid,
+ ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
@@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
goto err;
guid = get_sguid(ibp,
qp->remote_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid,
+ ibp->rvp.gid_prefix, guid))
goto err;
if (!gid_ok(&hdr->u.l.grh.sgid,
qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
@@ -353,12 +355,15 @@ err:
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
-static void qib_ruc_loopback(struct qib_qp *sqp)
+static void qib_ruc_loopback(struct rvt_qp *sqp)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_qp *qp;
- struct qib_swqe *wqe;
- struct qib_sge *sge;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
@@ -367,29 +372,33 @@ static void qib_ruc_loopback(struct qib_qp *sqp)
int release;
int ret;
+ rcu_read_lock();
/*
* Note that we check the responder QP state after
* checking the requester's state.
*/
- qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
+ if (!qp)
+ goto done;
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
- !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
goto unlock;
- sqp->s_flags |= QIB_S_BUSY;
+ sqp->s_flags |= RVT_S_BUSY;
again:
- if (sqp->s_last == sqp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
- wqe = get_swqe_ptr(sqp, sqp->s_last);
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
/* Return if it is not OK to start a new work reqeust. */
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
goto clr_busy;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR;
@@ -407,9 +416,9 @@ again:
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
/*
* For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries.
@@ -458,7 +467,7 @@ again:
goto inv_err;
if (wqe->length == 0)
break;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_WRITE)))
@@ -471,7 +480,7 @@ again:
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
wqe->rdma_wr.remote_addr,
wqe->rdma_wr.rkey,
IB_ACCESS_REMOTE_READ)))
@@ -489,7 +498,7 @@ again:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
- if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
@@ -502,7 +511,7 @@ again:
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap);
- qib_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
goto send_comp;
@@ -526,11 +535,11 @@ again:
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -543,9 +552,9 @@ again:
sqp->s_len -= len;
}
if (release)
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
@@ -561,12 +570,12 @@ again:
wc.sl = qp->remote_ah_attr.sl;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->n_loop_pkts++;
+ ibp->rvp.n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
qib_send_complete(sqp, wqe, send_status);
@@ -576,7 +585,7 @@ rnr_nak:
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
/*
* Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded.
@@ -588,9 +597,9 @@ rnr_nak:
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
goto clr_busy;
- sqp->s_flags |= QIB_S_WAIT_RNR;
+ sqp->s_flags |= RVT_S_WAIT_RNR;
sqp->s_timer.function = qib_rc_rnr_retry;
sqp->s_timer.expires = jiffies +
usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
@@ -618,9 +627,9 @@ serr:
spin_lock_irqsave(&sqp->s_lock, flags);
qib_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
- sqp->s_flags &= ~QIB_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
@@ -633,12 +642,11 @@ serr:
goto done;
}
clr_busy:
- sqp->s_flags &= ~QIB_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
- if (qp && atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
}
/**
@@ -663,7 +671,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id = grh->sgid_index ?
ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
hdr->dgid = grh->dgid;
@@ -672,9 +680,10 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
return sizeof(struct ib_grh) / sizeof(u32);
}
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
u16 lrh0;
u32 nwords;
@@ -685,17 +694,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = QIB_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
+ qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&qp->remote_ah_attr.grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
}
lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
qp->remote_ah_attr.sl << 4;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+ priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ priv->s_hdr->lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
@@ -707,20 +717,29 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
}
+void _qib_do_send(struct work_struct *work)
+{
+ struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
+ s_work);
+ struct rvt_qp *qp = priv->owner;
+
+ qib_do_send(qp);
+}
+
/**
* qib_do_send - perform a send on a QP
- * @work: contains a pointer to the QP
+ * @qp: pointer to the QP
*
* Process entries in the send work queue until credit or queue is
* exhausted. Only allow one CPU to send a packet per QP (tasklet).
* Otherwise, two threads could send packets out of order.
*/
-void qib_do_send(struct work_struct *work)
+void qib_do_send(struct rvt_qp *qp)
{
- struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct qib_qp *qp);
+ int (*make_req)(struct rvt_qp *qp);
unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
@@ -745,50 +764,59 @@ void qib_do_send(struct work_struct *work)
return;
}
- qp->s_flags |= QIB_S_BUSY;
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags |= RVT_S_BUSY;
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
- if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords,
+ if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
qp->s_cur_sge, qp->s_cur_size))
- break;
+ return;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
+ spin_lock_irqsave(&qp->s_lock, flags);
}
} while (make_req(qp));
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
/*
* This should be called with s_lock held.
*/
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
unsigned i;
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct qib_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
@@ -800,15 +828,10 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
- qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index c6d6a54d2e19..891873b38a1e 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd)
static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx)
{
- atomic_inc(&tx->qp->s_dma_busy);
+ struct qib_qp_priv *priv = tx->qp->priv;
+
+ atomic_inc(&priv->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0;
@@ -531,18 +533,19 @@ static void complete_sdma_err_req(struct qib_pportdata *ppd,
* 3) The SGE addresses are suitable for passing to dma_map_single().
*/
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
- struct qib_sge_state *ss, u32 dwords,
+ struct rvt_sge_state *ss, u32 dwords,
struct qib_verbs_txreq *tx)
{
unsigned long flags;
- struct qib_sge *sge;
- struct qib_qp *qp;
+ struct rvt_sge *sge;
+ struct rvt_qp *qp;
int ret = 0;
u16 tail;
__le64 *descqp;
u64 sdmadesc[2];
u32 dwoffset;
dma_addr_t addr;
+ struct qib_qp_priv *priv;
spin_lock_irqsave(&ppd->sdma_lock, flags);
@@ -621,7 +624,7 @@ retry:
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -644,8 +647,8 @@ retry:
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
-
- atomic_inc(&tx->qp->s_dma_busy);
+ priv = tx->qp->priv;
+ atomic_inc(&priv->s_dma_busy);
tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count;
@@ -663,13 +666,14 @@ unmap:
unmap_desc(ppd, tail);
}
qp = tx->qp;
+ priv = qp->priv;
qib_put_txreq(tx);
spin_lock(&qp->r_lock);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
- qib_error_qp(qp, IB_WC_GENERAL_ERR);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
+ rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
@@ -679,8 +683,9 @@ unmap:
busy:
qp = tx->qp;
+ priv = qp->priv;
spin_lock(&qp->s_lock);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct qib_ibdev *dev;
/*
@@ -690,19 +695,19 @@ busy:
*/
tx->ss = ss;
tx->dwords = dwords;
- qp->s_tx = tx;
+ priv->s_tx = tx;
dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
struct qib_ibport *ibp;
ibp = &ppd->ibport_data;
- ibp->n_dmawait++;
- qp->s_flags |= QIB_S_WAIT_DMA_DESC;
- list_add_tail(&qp->iowait, &dev->dmawait);
+ ibp->rvp.n_dmawait++;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ list_add_tail(&priv->iowait, &dev->dmawait);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
spin_unlock(&qp->s_lock);
ret = -EBUSY;
} else {
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c
deleted file mode 100644
index d6235931a1ba..000000000000
--- a/drivers/infiniband/hw/qib/qib_srq.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "qib_verbs.h"
-
-/**
- * qib_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: A pointer to the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libibverbs when creating a user SRQ
- */
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibpd->device);
- struct qib_srq *srq;
- u32 sz;
- struct ib_srq *ret;
-
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
-
- if (srq_init_attr->attr.max_sge == 0 ||
- srq_init_attr->attr.max_sge > ib_qib_max_srq_sges ||
- srq_init_attr->attr.max_wr == 0 ||
- srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
-
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
-
- /*
- * Need to use vmalloc() if we want to support large #s of entries.
- */
- srq->rq.size = srq_init_attr->attr.max_wr + 1;
- srq->rq.max_sge = srq_init_attr->attr.max_sge;
- sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct qib_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz);
- if (!srq->rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_srq;
- }
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- int err;
- u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz;
-
- srq->ip =
- qib_create_mmap_info(dev, s, ibpd->uobject->context,
- srq->rq.wq);
- if (!srq->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_wq;
- }
-
- err = ib_copy_to_udata(udata, &srq->ip->offset,
- sizeof(srq->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else
- srq->ip = NULL;
-
- /*
- * ib_create_srq() will initialize srq->ibsrq.
- */
- spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
- srq->limit = srq_init_attr->attr.srq_limit;
-
- spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == ib_qib_max_srqs) {
- spin_unlock(&dev->n_srqs_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_srqs_allocated++;
- spin_unlock(&dev->n_srqs_lock);
-
- if (srq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &srq->ibsrq;
- goto done;
-
-bail_ip:
- kfree(srq->ip);
-bail_wq:
- vfree(srq->rq.wq);
-bail_srq:
- kfree(srq);
-done:
- return ret;
-}
-
-/**
- * qib_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for libibverbs.so
- */
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_rwq *wq;
- int ret = 0;
-
- if (attr_mask & IB_SRQ_MAX_WR) {
- struct qib_rwq *owq;
- struct qib_rwqe *p;
- u32 sz, size, n, head, tail;
-
- /* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > ib_qib_max_srq_wrs) ||
- ((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
-
- sz = sizeof(struct qib_rwqe) +
- srq->rq.max_sge * sizeof(struct ib_sge);
- size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
-
- /* Check that we can write the offset to mmap. */
- if (udata && udata->inlen >= sizeof(__u64)) {
- __u64 offset_addr;
- __u64 offset = 0;
-
- ret = ib_copy_from_udata(&offset_addr, udata,
- sizeof(offset_addr));
- if (ret)
- goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
- ret = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (ret)
- goto bail_free;
- }
-
- spin_lock_irq(&srq->rq.lock);
- /*
- * validate head and tail pointer values and compute
- * the number of remaining WQEs.
- */
- owq = srq->rq.wq;
- head = owq->head;
- tail = owq->tail;
- if (head >= srq->rq.size || tail >= srq->rq.size) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = head;
- if (n < tail)
- n += srq->rq.size - tail;
- else
- n -= tail;
- if (size <= n) {
- ret = -EINVAL;
- goto bail_unlock;
- }
- n = 0;
- p = wq->wq;
- while (tail != head) {
- struct qib_rwqe *wqe;
- int i;
-
- wqe = get_rwqe_ptr(&srq->rq, tail);
- p->wr_id = wqe->wr_id;
- p->num_sge = wqe->num_sge;
- for (i = 0; i < wqe->num_sge; i++)
- p->sg_list[i] = wqe->sg_list[i];
- n++;
- p = (struct qib_rwqe *)((char *) p + sz);
- if (++tail >= srq->rq.size)
- tail = 0;
- }
- srq->rq.wq = wq;
- srq->rq.size = size;
- wq->head = n;
- wq->tail = 0;
- if (attr_mask & IB_SRQ_LIMIT)
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
-
- vfree(owq);
-
- if (srq->ip) {
- struct qib_mmap_info *ip = srq->ip;
- struct qib_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct qib_rwq) + size * sz;
-
- qib_update_mmap_info(dev, ip, s, wq);
-
- /*
- * Return the offset to mmap.
- * See qib_mmap() for details.
- */
- if (udata && udata->inlen >= sizeof(__u64)) {
- ret = ib_copy_to_udata(udata, &ip->offset,
- sizeof(ip->offset));
- if (ret)
- goto bail;
- }
-
- /*
- * Put user mapping info onto the pending list
- * unless it already is on the list.
- */
- spin_lock_irq(&dev->pending_lock);
- if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps,
- &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
- } else if (attr_mask & IB_SRQ_LIMIT) {
- spin_lock_irq(&srq->rq.lock);
- if (attr->srq_limit >= srq->rq.size)
- ret = -EINVAL;
- else
- srq->limit = attr->srq_limit;
- spin_unlock_irq(&srq->rq.lock);
- }
- goto bail;
-
-bail_unlock:
- spin_unlock_irq(&srq->rq.lock);
-bail_free:
- vfree(wq);
-bail:
- return ret;
-}
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
-
- attr->max_wr = srq->rq.size - 1;
- attr->max_sge = srq->rq.max_sge;
- attr->srq_limit = srq->limit;
- return 0;
-}
-
-/**
- * qib_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int qib_destroy_srq(struct ib_srq *ibsrq)
-{
- struct qib_srq *srq = to_isrq(ibsrq);
- struct qib_ibdev *dev = to_idev(ibsrq->device);
-
- spin_lock(&dev->n_srqs_lock);
- dev->n_srqs_allocated--;
- spin_unlock(&dev->n_srqs_lock);
- if (srq->ip)
- kref_put(&srq->ip->ref, qib_release_mmap_info);
- else
- vfree(srq->rq.wq);
- kfree(srq);
-
- return 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 81f56cdff2bc..fe4cf5e4acec 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = {
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0664 }, \
- .counter = offsetof(struct qib_ibport, n_##N) \
+ .counter = offsetof(struct qib_ibport, rvp.n_##N) \
+ }
+
+#define QIB_DIAGC_ATTR_PER_CPU(N) \
+ static struct qib_diagc_attr qib_diagc_attr_##N = { \
+ .attr = { .name = __stringify(N), .mode = 0664 }, \
+ .counter = offsetof(struct qib_ibport, rvp.z_##N) \
}
struct qib_diagc_attr {
@@ -414,10 +420,11 @@ struct qib_diagc_attr {
size_t counter;
};
+QIB_DIAGC_ATTR_PER_CPU(rc_acks);
+QIB_DIAGC_ATTR_PER_CPU(rc_qacks);
+QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp);
+
QIB_DIAGC_ATTR(rc_resends);
-QIB_DIAGC_ATTR(rc_acks);
-QIB_DIAGC_ATTR(rc_qacks);
-QIB_DIAGC_ATTR(rc_delayed_comp);
QIB_DIAGC_ATTR(seq_naks);
QIB_DIAGC_ATTR(rdma_seq);
QIB_DIAGC_ATTR(rnr_naks);
@@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = {
NULL
};
+static u64 get_all_cpu_total(u64 __percpu *cntr)
+{
+ int cpu;
+ u64 counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += *per_cpu_ptr(cntr, cpu);
+ return counter;
+}
+
+#define def_write_per_cpu(cntr) \
+static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \
+{ \
+ struct qib_devdata *dd = ppd->dd; \
+ struct qib_ibport *qibp = &ppd->ibport_data; \
+ /* A write can only zero the counter */ \
+ if (data == 0) \
+ qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \
+ else \
+ qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \
+}
+
+def_write_per_cpu(rc_acks)
+def_write_per_cpu(rc_qacks)
+def_write_per_cpu(rc_delayed_comp)
+
+#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \
+ qibp->rvp.z_##cntr)
+
static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
@@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
- return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
+ if (!strncmp(dattr->attr.name, "rc_acks", 7))
+ return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks));
+ else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
+ return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks));
+ else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
+ return sprintf(buf, "%llu\n",
+ READ_PER_CPU_CNTR(rc_delayed_comp));
+ else
+ return sprintf(buf, "%u\n",
+ *(u32 *)((char *)qibp + dattr->counter));
}
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
@@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
- *(u32 *)((char *) qibp + dattr->counter) = val;
+
+ if (!strncmp(dattr->attr.name, "rc_acks", 7))
+ write_per_cpu_rc_acks(ppd, val);
+ else if (!strncmp(dattr->attr.name, "rc_qacks", 8))
+ write_per_cpu_rc_qacks(ppd, val);
+ else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15))
+ write_per_cpu_rc_delayed_comp(ppd, val);
+ else
+ *(u32 *)((char *)qibp + dattr->counter) = val;
return size;
}
@@ -502,7 +555,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
@@ -511,7 +564,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -533,7 +586,7 @@ static ssize_t show_boardversion(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -545,7 +598,7 @@ static ssize_t show_localbus_info(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
@@ -557,7 +610,7 @@ static ssize_t show_nctxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of user ports (contexts) available. */
@@ -572,7 +625,7 @@ static ssize_t show_nfreectxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
@@ -583,7 +636,7 @@ static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof(dd->serial)] = '\0';
@@ -597,7 +650,7 @@ static ssize_t store_chip_reset(struct device *device,
size_t count)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
@@ -618,7 +671,7 @@ static ssize_t show_tempsense(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
+ container_of(device, struct qib_ibdev, rdi.ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
int idx;
@@ -778,7 +831,7 @@ bail:
*/
int qib_verbs_register_sysfs(struct qib_devdata *dd)
{
- struct ib_device *dev = &dd->verbs_dev.ibdev;
+ struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 06a564589c35..7bdbc79ceaa3 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -41,61 +41,62 @@
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_uc_req(struct qib_qp *qp)
+int qib_make_uc_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
- struct qib_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = qp->pmtu;
int ret = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->u.l.oth;
+ ohdr = &priv->s_hdr->u.l.oth;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
hwords = 5;
bth0 = 0;
/* Get the next send request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
- if (!(ib_qib_state_ops[qp->state] &
- QIB_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -214,15 +215,11 @@ int qib_make_uc_req(struct qib_qp *qp)
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_next_psn++ & QIB_PSN_MASK);
+ qp->s_psn++ & QIB_PSN_MASK);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -240,7 +237,7 @@ unlock:
* Called at interrupt level.
*/
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_other_headers *ohdr;
u32 opcode;
@@ -278,10 +275,10 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
@@ -328,8 +325,8 @@ inv:
goto inv;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
- qp->r_flags |= QIB_R_COMM_EST;
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) {
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -346,7 +343,7 @@ inv:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
qp->r_sge = qp->s_rdma_read_sge;
else {
ret = qib_get_rwqe(qp, 0);
@@ -400,7 +397,7 @@ send_last:
goto rewind;
wc.opcode = IB_WC_RECV;
qib_copy_sge(&qp->r_sge, data, tlen, 0);
- qib_put_ss(&qp->s_rdma_read_sge);
+ rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -414,7 +411,7 @@ last_imm:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
@@ -438,7 +435,7 @@ rdma_first:
int ok;
/* Check rkey */
- ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
@@ -483,8 +480,8 @@ rdma_last_imm:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- qib_put_ss(&qp->s_rdma_read_sge);
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
+ rvt_put_ss(&qp->s_rdma_read_sge);
else {
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
@@ -495,7 +492,7 @@ rdma_last_imm:
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
@@ -511,7 +508,7 @@ rdma_last:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- qib_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
break;
default:
@@ -523,10 +520,10 @@ rdma_last:
return;
rewind:
- set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return;
op_err:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 59193f67ea78..d9502137de62 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -32,6 +32,7 @@
*/
#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
#include "qib.h"
#include "qib_mad.h"
@@ -46,22 +47,26 @@
* Note that the receive interrupt handler may be calling qib_ud_rcv()
* while this is being called.
*/
-static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
+static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
{
struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd;
- struct qib_qp *qp;
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+ struct rvt_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
- struct qib_sge_state ssge;
- struct qib_sge *sge;
+ struct rvt_sge_state ssge;
+ struct rvt_sge *sge;
struct ib_wc wc;
u32 length;
enum ib_qp_type sqptype, dqptype;
- qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
if (!qp) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
+ rcu_read_unlock();
return;
}
@@ -71,12 +76,12 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
IB_QPT_UD : qp->ibqp.qp_type;
if (dqptype != sqptype ||
- !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto drop;
}
- ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
+ ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) {
@@ -140,8 +145,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
@@ -152,14 +157,14 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
- ibp->n_pkt_drops++;
+ qp->r_flags |= RVT_R_REUSE_SGE;
+ ibp->rvp.n_pkt_drops++;
goto bail_unlock;
}
@@ -189,7 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -201,8 +206,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
}
length -= len;
}
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -216,30 +221,31 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->n_loop_pkts++;
+ ibp->rvp.n_loop_pkts++;
bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags);
drop:
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
+ rcu_read_unlock();
}
/**
* qib_make_ud_req - construct a UD request packet
* @qp: the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_ud_req(struct qib_qp *qp)
+int qib_make_ud_req(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
- struct qib_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 nwords;
u32 extra_bytes;
u32 bth0;
@@ -248,28 +254,29 @@ int qib_make_ud_req(struct qib_qp *qp)
int ret = 0;
int next_cur;
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
- if (qp->s_cur == qp->s_head)
+ /* see post_one_send() */
+ smp_read_barrier_depends();
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
next_cur = qp->s_cur + 1;
if (next_cur >= qp->s_size)
next_cur = 0;
@@ -277,9 +284,9 @@ int qib_make_ud_req(struct qib_qp *qp)
/* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
- ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
- if (ah_attr->dlid != QIB_PERMISSIVE_LID)
+ ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
+ if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE))
this_cpu_inc(ibp->pmastats->n_multicast_xmit);
else
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
@@ -287,6 +294,7 @@ int qib_make_ud_req(struct qib_qp *qp)
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
+ unsigned long flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -294,11 +302,12 @@ int qib_make_ud_req(struct qib_qp *qp)
* XXX Instead of waiting, we could queue a
* zero length descriptor so we get a callback.
*/
- if (atomic_read(&qp->s_dma_busy)) {
- qp->s_flags |= QIB_S_WAIT_DMA;
+ if (atomic_read(&priv->s_dma_busy)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
+ local_irq_save(flags);
spin_unlock_irqrestore(&qp->s_lock, flags);
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -324,11 +333,11 @@ int qib_make_ud_req(struct qib_qp *qp)
if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */
- qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh,
+ qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
&ah_attr->grh,
qp->s_hdrwords, nwords);
lrh0 = QIB_LRH_GRH;
- ohdr = &qp->s_hdr->u.l.oth;
+ ohdr = &priv->s_hdr->u.l.oth;
/*
* Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens.
@@ -336,7 +345,7 @@ int qib_make_ud_req(struct qib_qp *qp)
} else {
/* Header size in 32-bit words. */
lrh0 = QIB_LRH_BTH;
- ohdr = &qp->s_hdr->u.oth;
+ ohdr = &priv->s_hdr->u.oth;
}
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++;
@@ -349,15 +358,16 @@ int qib_make_ud_req(struct qib_qp *qp)
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
else
lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
- qp->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
- qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
+ priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
+ priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
+ priv->s_hdr->lrh[2] =
+ cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
lid = ppd->lid;
if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
- qp->s_hdr->lrh[3] = cpu_to_be16(lid);
+ priv->s_hdr->lrh[3] = cpu_to_be16(lid);
} else
- qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
+ priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
bth0 |= extra_bytes << 20;
@@ -368,11 +378,11 @@ int qib_make_ud_req(struct qib_qp *qp)
/*
* Use the multicast QP if the destination LID is a multicast LID.
*/
- ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID ?
+ ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ?
cpu_to_be32(QIB_MULTICAST_QPN) :
cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
+ ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
@@ -382,13 +392,9 @@ int qib_make_ud_req(struct qib_qp *qp)
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
- qp->s_flags &= ~QIB_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
return ret;
}
@@ -426,7 +432,7 @@ static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
* Called at interrupt level.
*/
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_other_headers *ohdr;
int opcode;
@@ -446,7 +452,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
}
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
/*
* Get the number of bytes the message was padded by
@@ -531,8 +537,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & QIB_R_REUSE_SGE)
- qp->r_flags &= ~QIB_R_REUSE_SGE;
+ if (qp->r_flags & RVT_R_REUSE_SGE)
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
else {
int ret;
@@ -543,13 +549,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= QIB_R_REUSE_SGE;
+ qp->r_flags |= RVT_R_REUSE_SGE;
goto drop;
}
if (has_grh) {
@@ -559,8 +565,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
- qib_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -576,15 +582,15 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
- wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
+ wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
(ohdr->bth[0] &
cpu_to_be32(IB_BTH_SOLICITED)) != 0);
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 74f90b2619f6..2d2b94fd3633 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -66,8 +66,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
}
for (got = 0; got < num_pages; got += ret) {
- ret = get_user_pages(current, current->mm,
- start_page + got * PAGE_SIZE,
+ ret = get_user_pages(start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
p + got, NULL);
if (ret < 0)
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index baf1e42b6896..cbf6200e6afc 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -41,6 +41,7 @@
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <rdma/rdma_vt.h>
#include "qib.h"
#include "qib_common.h"
@@ -49,8 +50,8 @@ static unsigned int ib_qib_qp_table_size = 256;
module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
-unsigned int ib_qib_lkey_table_size = 16;
-module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
+static unsigned int qib_lkey_table_size = 16;
+module_param_named(lkey_table_size, qib_lkey_table_size, uint,
S_IRUGO);
MODULE_PARM_DESC(lkey_table_size,
"LKEY table size in bits (2^n, 1 <= n <= 23)");
@@ -113,36 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; qib_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = 0,
- [IB_QPS_INIT] = QIB_POST_RECV_OK,
- [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
- [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
- QIB_PROCESS_NEXT_SEND_OK,
- [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
- [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
- [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
- QIB_POST_SEND_OK | QIB_FLUSH_SEND,
-};
-
-struct qib_ucontext {
- struct ib_ucontext ibucontext;
-};
-
-static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
-{
- return container_of(ibucontext, struct qib_ucontext, ibucontext);
-}
-
-/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_qib_wc_opcode[] = {
@@ -166,9 +137,9 @@ __be64 ib_qib_sys_image_guid;
* @data: the data to copy
* @length: the length of the data
*/
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
+void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -184,11 +155,11 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -208,9 +179,9 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
* @ss: the SGE state
* @length: the number of bytes to skip
*/
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
+void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -225,11 +196,11 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- qib_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -248,10 +219,10 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
* Don't modify the qib_sge_state to get the count.
* Return zero if any of the segments is not aligned.
*/
-static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
+static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sg_list = ss->sg_list;
- struct qib_sge sge = ss->sge;
+ struct rvt_sge *sg_list = ss->sg_list;
+ struct rvt_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
u32 ndesc = 1; /* count the header */
@@ -276,7 +247,7 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
if (--num_sge)
sge = *sg_list++;
} else if (sge.length == 0 && sge.mr->lkey) {
- if (++sge.n >= QIB_SEGSZ) {
+ if (++sge.n >= RVT_SEGSZ) {
if (++sge.m >= sge.mr->mapsz)
break;
sge.n = 0;
@@ -294,9 +265,9 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
/*
* Copy from the SGEs to the data buffer.
*/
-static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
+static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -314,7 +285,7 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -330,242 +301,6 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
}
/**
- * qib_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
- int *scheduled)
-{
- struct qib_swqe *wqe;
- u32 next;
- int i;
- int j;
- int acc;
- int ret;
- unsigned long flags;
- struct qib_lkey_table *rkt;
- struct qib_pd *pd;
- int avoid_schedule = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Check that state is OK to post send. */
- if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
- goto bail_inval;
-
- /* IB spec says that num_sge == 0 is OK. */
- if (wr->num_sge > qp->s_max_sge)
- goto bail_inval;
-
- /*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
- */
- if (wr->opcode == IB_WR_REG_MR) {
- if (qib_reg_mr(qp, reg_wr(wr)))
- goto bail_inval;
- } else if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
- goto bail_inval;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- goto bail_inval;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
- goto bail_inval;
- } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
- goto bail_inval;
- else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1)))
- goto bail_inval;
- else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
- goto bail_inval;
-
- next = qp->s_head + 1;
- if (next >= qp->s_size)
- next = 0;
- if (next == qp->s_last) {
- ret = -ENOMEM;
- goto bail;
- }
-
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.pd);
- wqe = get_swqe_ptr(qp, qp->s_head);
-
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_REG_MR)
- memcpy(&wqe->reg_wr, reg_wr(wr),
- sizeof(wqe->reg_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
- wqe->length = 0;
- j = 0;
- if (wr->num_sge) {
- acc = wr->opcode >= IB_WR_RDMA_READ ?
- IB_ACCESS_LOCAL_WRITE : 0;
- for (i = 0; i < wr->num_sge; i++) {
- u32 length = wr->sg_list[i].length;
- int ok;
-
- if (length == 0)
- continue;
- ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok)
- goto bail_inval_free;
- wqe->length += length;
- j++;
- }
- wqe->wr.num_sge = j;
- }
- if (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) {
- if (wqe->length > 0x80000000U)
- goto bail_inval_free;
- if (wqe->length <= qp->pmtu)
- avoid_schedule = 1;
- } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
- qp->port_num - 1)->ibmtu) {
- goto bail_inval_free;
- } else {
- atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount);
- avoid_schedule = 1;
- }
- wqe->ssn = qp->s_ssn++;
- qp->s_head = next;
-
- ret = 0;
- goto bail;
-
-bail_inval_free:
- while (j) {
- struct qib_sge *sge = &wqe->sg_list[--j];
-
- qib_put_mr(sge->mr);
- }
-bail_inval:
- ret = -EINVAL;
-bail:
- if (!ret && !wr->next && !avoid_schedule &&
- !qib_sdma_empty(
- dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
- qib_schedule_send(qp);
- *scheduled = 1;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-/**
- * qib_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- int err = 0;
- int scheduled = 0;
-
- for (; wr; wr = wr->next) {
- err = qib_post_one_send(qp, wr, &scheduled);
- if (err) {
- *bad_wr = wr;
- goto bail;
- }
- }
-
- /* Try to do the send work in the caller's context. */
- if (!scheduled)
- qib_do_send(&qp->s_work);
-
-bail:
- return err;
-}
-
-/**
- * qib_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_rwq *wq = qp->r_rq.wq;
- unsigned long flags;
- int ret;
-
- /* Check that state is OK to post receive. */
- if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- for (; wr; wr = wr->next) {
- struct qib_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = wq->head + 1;
- if (next >= qp->r_rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
* qib_qp_rcv - processing an incoming packet on a QP
* @rcd: the context pointer
* @hdr: the packet header
@@ -579,15 +314,15 @@ bail:
* Called at interrupt level.
*/
static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp)
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
{
struct qib_ibport *ibp = &rcd->ppd->ibport_data;
spin_lock(&qp->r_lock);
/* Check for valid receive state. */
- if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto unlock;
}
@@ -632,8 +367,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
struct qib_pportdata *ppd = rcd->ppd;
struct qib_ibport *ibp = &ppd->ibport_data;
struct qib_ib_header *hdr = rhdr;
+ struct qib_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
struct qib_other_headers *ohdr;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
u32 qp_num;
int lnh;
u8 opcode;
@@ -645,7 +382,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
/* Check for a valid destination LID (see ch. 7.11.1). */
lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < QIB_MULTICAST_LID_BASE) {
+ if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
lid &= ~((1 << ppd->lmc) - 1);
if (unlikely(lid != ppd->lid))
goto drop;
@@ -674,50 +411,40 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
#endif
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
if (qp_num == QIB_MULTICAST_QPN) {
- struct qib_mcast *mcast;
- struct qib_mcast_qp *p;
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *p;
if (lnh != QIB_LRH_GRH)
goto drop;
- mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
+ mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
if (mcast == NULL)
goto drop;
this_cpu_inc(ibp->pmastats->n_multicast_rcv);
list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
/*
- * Notify qib_multicast_detach() if it is waiting for us
+ * Notify rvt_multicast_detach() if it is waiting for us
* to finish.
*/
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
- if (rcd->lookaside_qp) {
- if (rcd->lookaside_qpn != qp_num) {
- if (atomic_dec_and_test(
- &rcd->lookaside_qp->refcount))
- wake_up(
- &rcd->lookaside_qp->wait);
- rcd->lookaside_qp = NULL;
- }
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
+ if (!qp) {
+ rcu_read_unlock();
+ goto drop;
}
- if (!rcd->lookaside_qp) {
- qp = qib_lookup_qpn(ibp, qp_num);
- if (!qp)
- goto drop;
- rcd->lookaside_qp = qp;
- rcd->lookaside_qpn = qp_num;
- } else
- qp = rcd->lookaside_qp;
this_cpu_inc(ibp->pmastats->n_unicast_rcv);
qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
+ rcu_read_unlock();
}
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
/*
@@ -728,23 +455,25 @@ static void mem_timer(unsigned long data)
{
struct qib_ibdev *dev = (struct qib_ibdev *) data;
struct list_head *list = &dev->memwait;
- struct qib_qp *qp = NULL;
+ struct rvt_qp *qp = NULL;
+ struct qib_qp_priv *priv = NULL;
unsigned long flags;
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
if (!list_empty(list)) {
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(list->next, struct qib_qp_priv, iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
}
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
if (qp) {
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_KMEM) {
- qp->s_flags &= ~QIB_S_WAIT_KMEM;
+ if (qp->s_flags & RVT_S_WAIT_KMEM) {
+ qp->s_flags &= ~RVT_S_WAIT_KMEM;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -753,9 +482,9 @@ static void mem_timer(unsigned long data)
}
}
-static void update_sge(struct qib_sge_state *ss, u32 length)
+static void update_sge(struct rvt_sge_state *ss, u32 length)
{
- struct qib_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
sge->vaddr += length;
sge->length -= length;
@@ -764,7 +493,7 @@ static void update_sge(struct qib_sge_state *ss, u32 length)
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= QIB_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
return;
sge->n = 0;
@@ -810,7 +539,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
}
#endif
-static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
+static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
u32 length, unsigned flush_wc)
{
u32 extra = 0;
@@ -947,30 +676,31 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
}
static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
+ struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_verbs_txreq *tx;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- spin_lock(&dev->pending_lock);
+ spin_lock(&dev->rdi.pending_lock);
if (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
list_del(l);
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
} else {
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
- list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
+ list_empty(&priv->iowait)) {
dev->n_txwait++;
- qp->s_flags |= QIB_S_WAIT_TX;
- list_add_tail(&qp->iowait, &dev->txwait);
+ qp->s_flags |= RVT_S_WAIT_TX;
+ list_add_tail(&priv->iowait, &dev->txwait);
}
- qp->s_flags &= ~QIB_S_BUSY;
- spin_unlock(&dev->pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
tx = ERR_PTR(-EBUSY);
}
@@ -978,22 +708,22 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
}
static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp)
+ struct rvt_qp *qp)
{
struct qib_verbs_txreq *tx;
unsigned long flags;
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
/* assume the list non empty */
if (likely(!list_empty(&dev->txreq_free))) {
struct list_head *l = dev->txreq_free.next;
list_del(l);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
} else {
/* call slow path to get the extra lock */
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
tx = __get_txreq(dev, qp);
}
return tx;
@@ -1002,16 +732,15 @@ static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
void qib_put_txreq(struct qib_verbs_txreq *tx)
{
struct qib_ibdev *dev;
- struct qib_qp *qp;
+ struct rvt_qp *qp;
+ struct qib_qp_priv *priv;
unsigned long flags;
qp = tx->qp;
dev = to_idev(qp->ibqp.device);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
if (tx->mr) {
- qib_put_mr(tx->mr);
+ rvt_put_mr(tx->mr);
tx->mr = NULL;
}
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
@@ -1022,21 +751,23 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
kfree(tx->align_buf);
}
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
/* Put struct back on free list */
list_add(&tx->txreq.list, &dev->txreq_free);
if (!list_empty(&dev->txwait)) {
/* Wake up first QP wanting a free struct */
- qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(dev->txwait.next, struct qib_qp_priv,
+ iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_TX) {
- qp->s_flags &= ~QIB_S_WAIT_TX;
+ if (qp->s_flags & RVT_S_WAIT_TX) {
+ qp->s_flags &= ~RVT_S_WAIT_TX;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1044,7 +775,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
} else
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
}
/*
@@ -1055,36 +786,39 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
*/
void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
{
- struct qib_qp *qp, *nqp;
- struct qib_qp *qps[20];
+ struct rvt_qp *qp, *nqp;
+ struct qib_qp_priv *qpp, *nqpp;
+ struct rvt_qp *qps[20];
struct qib_ibdev *dev;
unsigned i, n;
n = 0;
dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->pending_lock);
+ spin_lock(&dev->rdi.pending_lock);
/* Search wait list for first QP wanting DMA descriptors. */
- list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
+ list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
+ qp = qpp->owner;
+ nqp = nqpp->owner;
if (qp->port_num != ppd->port)
continue;
if (n == ARRAY_SIZE(qps))
break;
- if (qp->s_tx->txreq.sg_count > avail)
+ if (qpp->s_tx->txreq.sg_count > avail)
break;
- avail -= qp->s_tx->txreq.sg_count;
- list_del_init(&qp->iowait);
+ avail -= qpp->s_tx->txreq.sg_count;
+ list_del_init(&qpp->iowait);
atomic_inc(&qp->refcount);
qps[n++] = qp;
}
- spin_unlock(&dev->pending_lock);
+ spin_unlock(&dev->rdi.pending_lock);
for (i = 0; i < n; i++) {
qp = qps[i];
spin_lock(&qp->s_lock);
- if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
- qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
+ if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
qib_schedule_send(qp);
}
spin_unlock(&qp->s_lock);
@@ -1100,7 +834,8 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
{
struct qib_verbs_txreq *tx =
container_of(cookie, struct qib_verbs_txreq, txreq);
- struct qib_qp *qp = tx->qp;
+ struct rvt_qp *qp = tx->qp;
+ struct qib_qp_priv *priv = qp->priv;
spin_lock(&qp->s_lock);
if (tx->wqe)
@@ -1117,11 +852,11 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
}
qib_rc_send_complete(qp, hdr);
}
- if (atomic_dec_and_test(&qp->s_dma_busy)) {
+ if (atomic_dec_and_test(&priv->s_dma_busy)) {
if (qp->state == IB_QPS_RESET)
- wake_up(&qp->wait_dma);
- else if (qp->s_flags & QIB_S_WAIT_DMA) {
- qp->s_flags &= ~QIB_S_WAIT_DMA;
+ wake_up(&priv->wait_dma);
+ else if (qp->s_flags & RVT_S_WAIT_DMA) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA;
qib_schedule_send(qp);
}
}
@@ -1130,22 +865,23 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
qib_put_txreq(tx);
}
-static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
+static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= QIB_S_WAIT_KMEM;
- list_add_tail(&qp->iowait, &dev->memwait);
+ qp->s_flags |= RVT_S_WAIT_KMEM;
+ list_add_tail(&priv->iowait, &dev->memwait);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1153,10 +889,11 @@ static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
return ret;
}
-static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
+static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -1167,9 +904,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
u32 ndesc;
int ret;
- tx = qp->s_tx;
+ tx = priv->s_tx;
if (tx) {
- qp->s_tx = NULL;
+ priv->s_tx = NULL;
/* resend previously constructed packet */
ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
goto bail;
@@ -1182,7 +919,6 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
be16_to_cpu(hdr->lrh[0]) >> 12);
tx->qp = qp;
- atomic_inc(&qp->refcount);
tx->wqe = qp->s_wqe;
tx->mr = qp->s_rdma_mr;
if (qp->s_rdma_mr)
@@ -1245,7 +981,7 @@ err_tx:
qib_put_txreq(tx);
ret = wait_kmem(dev, qp);
unaligned:
- ibp->n_unaligned++;
+ ibp->rvp.n_unaligned++;
bail:
return ret;
bail_tx:
@@ -1257,8 +993,9 @@ bail_tx:
* If we are now in the error state, return zero to flush the
* send work request.
*/
-static int no_bufs_available(struct qib_qp *qp)
+static int no_bufs_available(struct rvt_qp *qp)
{
+ struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
struct qib_devdata *dd;
unsigned long flags;
@@ -1271,25 +1008,25 @@ static int no_bufs_available(struct qib_qp *qp)
* enabling the PIO avail interrupt.
*/
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
- spin_lock(&dev->pending_lock);
- if (list_empty(&qp->iowait)) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ spin_lock(&dev->rdi.pending_lock);
+ if (list_empty(&priv->iowait)) {
dev->n_piowait++;
- qp->s_flags |= QIB_S_WAIT_PIO;
- list_add_tail(&qp->iowait, &dev->piowait);
+ qp->s_flags |= RVT_S_WAIT_PIO;
+ list_add_tail(&priv->iowait, &dev->piowait);
dd = dd_from_dev(dev);
dd->f_wantpiobuf_intr(dd, 1);
}
- spin_unlock(&dev->pending_lock);
- qp->s_flags &= ~QIB_S_BUSY;
+ spin_unlock(&dev->rdi.pending_lock);
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
-static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len,
+static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len,
u32 plen, u32 dwords)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
@@ -1370,7 +1107,7 @@ done:
}
qib_sendbuf_done(dd, pbufn);
if (qp->s_rdma_mr) {
- qib_put_mr(qp->s_rdma_mr);
+ rvt_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
if (qp->s_wqe) {
@@ -1394,10 +1131,10 @@ done:
* @len: the length of the packet in bytes
*
* Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
+ * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len)
+int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len)
{
struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
u32 plen;
@@ -1529,10 +1266,11 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
struct list_head *list;
- struct qib_qp *qps[5];
- struct qib_qp *qp;
+ struct rvt_qp *qps[5];
+ struct rvt_qp *qp;
unsigned long flags;
unsigned i, n;
+ struct qib_qp_priv *priv;
list = &dev->piowait;
n = 0;
@@ -1543,25 +1281,26 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
* could end up with QPs on the wait list with the interrupt
* disabled.
*/
- spin_lock_irqsave(&dev->pending_lock, flags);
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
while (!list_empty(list)) {
if (n == ARRAY_SIZE(qps))
goto full;
- qp = list_entry(list->next, struct qib_qp, iowait);
- list_del_init(&qp->iowait);
+ priv = list_entry(list->next, struct qib_qp_priv, iowait);
+ qp = priv->owner;
+ list_del_init(&priv->iowait);
atomic_inc(&qp->refcount);
qps[n++] = qp;
}
dd->f_wantpiobuf_intr(dd, 0);
full:
- spin_unlock_irqrestore(&dev->pending_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
for (i = 0; i < n; i++) {
qp = qps[i];
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & QIB_S_WAIT_PIO) {
- qp->s_flags &= ~QIB_S_WAIT_PIO;
+ if (qp->s_flags & RVT_S_WAIT_PIO) {
+ qp->s_flags &= ~RVT_S_WAIT_PIO;
qib_schedule_send(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1572,82 +1311,24 @@ full:
}
}
-static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
- struct ib_udata *uhw)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibdev *dev = to_idev(ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
- memset(props, 0, sizeof(*props));
-
- props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- props->page_size_cap = PAGE_SIZE;
- props->vendor_id =
- QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
- props->vendor_part_id = dd->deviceid;
- props->hw_ver = dd->minrev;
- props->sys_image_guid = ib_qib_sys_image_guid;
- props->max_mr_size = ~0ULL;
- props->max_qp = ib_qib_max_qps;
- props->max_qp_wr = ib_qib_max_qp_wrs;
- props->max_sge = ib_qib_max_sges;
- props->max_sge_rd = ib_qib_max_sges;
- props->max_cq = ib_qib_max_cqs;
- props->max_ah = ib_qib_max_ahs;
- props->max_cqe = ib_qib_max_cqes;
- props->max_mr = dev->lk_table.max;
- props->max_fmr = dev->lk_table.max;
- props->max_map_per_fmr = 32767;
- props->max_pd = ib_qib_max_pds;
- props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
- props->max_qp_init_rd_atom = 255;
- /* props->max_res_rd_atom */
- props->max_srq = ib_qib_max_srqs;
- props->max_srq_wr = ib_qib_max_srq_wrs;
- props->max_srq_sge = ib_qib_max_srq_sges;
- /* props->local_ca_ack_delay */
- props->atomic_cap = IB_ATOMIC_GLOB;
- props->max_pkeys = qib_get_npkeys(dd);
- props->max_mcast_grp = ib_qib_max_mcast_grps;
- props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
- props->max_mcast_grp;
-
- return 0;
-}
-
-static int qib_query_port(struct ib_device *ibdev, u8 port,
+static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num,
struct ib_port_attr *props)
{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
enum ib_mtu mtu;
u16 lid = ppd->lid;
- memset(props, 0, sizeof(*props));
props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
props->lmc = ppd->lmc;
- props->sm_lid = ibp->sm_lid;
- props->sm_sl = ibp->sm_sl;
props->state = dd->f_iblink_state(ppd->lastibcstat);
props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
- props->port_cap_flags = ibp->port_cap_flags;
props->gid_tbl_len = QIB_GUIDS_PER_PORT;
- props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = qib_get_npkeys(dd);
- props->bad_pkey_cntr = ibp->pkey_violations;
- props->qkey_viol_cntr = ibp->qkey_violations;
props->active_width = ppd->link_width_active;
/* See rate_show() */
props->active_speed = ppd->link_speed_active;
props->max_vl_num = qib_num_vls(ppd->vls_supported);
- props->init_type_reply = 0;
props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
switch (ppd->ibmtu) {
@@ -1670,7 +1351,6 @@ static int qib_query_port(struct ib_device *ibdev, u8 port,
mtu = IB_MTU_2048;
}
props->active_mtu = mtu;
- props->subnet_timeout = ibp->subnet_timeout;
return 0;
}
@@ -1714,185 +1394,70 @@ bail:
return ret;
}
-static int qib_modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
+static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- ibp->port_cap_flags |= props->set_port_cap_mask;
- ibp->port_cap_flags &= ~props->clr_port_cap_mask;
- if (props->set_port_cap_mask || props->clr_port_cap_mask)
- qib_cap_mask_chg(ibp);
- if (port_modify_mask & IB_PORT_SHUTDOWN)
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
- if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
- ibp->qkey_violations = 0;
- return 0;
-}
-
-static int qib_query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret = 0;
-
- if (!port || port > dd->num_pports)
- ret = -EINVAL;
- else {
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- gid->global.subnet_prefix = ibp->gid_prefix;
- if (index == 0)
- gid->global.interface_id = ppd->guid;
- else if (index < QIB_GUIDS_PER_PORT)
- gid->global.interface_id = ibp->guids[index - 1];
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
+ struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
+ struct qib_devdata *dd = dd_from_dev(ibdev);
+ struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_pd *pd;
- struct ib_pd *ret;
+ qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
- /*
- * This is actually totally arbitrary. Some correctness tests
- * assume there's a maximum number of PDs that can be allocated.
- * We don't actually have this limit, but we fail the test if
- * we allow allocations of more than we report for this value.
- */
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock(&dev->n_pds_lock);
- if (dev->n_pds_allocated == ib_qib_max_pds) {
- spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_pds_allocated++;
- spin_unlock(&dev->n_pds_lock);
-
- /* ib_alloc_pd() will initialize pd->ibpd. */
- pd->user = udata != NULL;
-
- ret = &pd->ibpd;
-
-bail:
- return ret;
+ return 0;
}
-static int qib_dealloc_pd(struct ib_pd *ibpd)
+static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
+ int guid_index, __be64 *guid)
{
- struct qib_pd *pd = to_ipd(ibpd);
- struct qib_ibdev *dev = to_idev(ibpd->device);
-
- spin_lock(&dev->n_pds_lock);
- dev->n_pds_allocated--;
- spin_unlock(&dev->n_pds_lock);
+ struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- kfree(pd);
+ if (guid_index == 0)
+ *guid = ppd->guid;
+ else if (guid_index < QIB_GUIDS_PER_PORT)
+ *guid = ibp->guids[guid_index - 1];
+ else
+ return -EINVAL;
return 0;
}
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
{
- /* A multicast address requires a GRH (see ch. 8.4.1). */
- if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
- ah_attr->dlid != QIB_PERMISSIVE_LID &&
- !(ah_attr->ah_flags & IB_AH_GRH))
- goto bail;
- if ((ah_attr->ah_flags & IB_AH_GRH) &&
- ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
- goto bail;
- if (ah_attr->dlid == 0)
- goto bail;
- if (ah_attr->port_num < 1 ||
- ah_attr->port_num > ibdev->phys_port_cnt)
- goto bail;
- if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
- ib_rate_to_mult(ah_attr->static_rate) < 0)
- goto bail;
if (ah_attr->sl > 15)
- goto bail;
+ return -EINVAL;
+
return 0;
-bail:
- return -EINVAL;
}
-/**
- * qib_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *qib_create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+static void qib_notify_new_ah(struct ib_device *ibdev,
+ struct ib_ah_attr *ah_attr,
+ struct rvt_ah *ah)
{
- struct qib_ah *ah;
- struct ib_ah *ret;
- struct qib_ibdev *dev = to_idev(pd->device);
- unsigned long flags;
+ struct qib_ibport *ibp;
+ struct qib_pportdata *ppd;
- if (qib_check_ah(pd->device, ah_attr)) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- if (dev->n_ahs_allocated == ib_qib_max_ahs) {
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_ahs_allocated++;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- /* ib_create_ah() will initialize ah->ibah. */
- ah->attr = *ah_attr;
- atomic_set(&ah->refcount, 0);
-
- ret = &ah->ibah;
+ /*
+ * Do not trust reading anything from rvt_ah at this point as it is not
+ * done being setup. We can however modify things which we need to set.
+ */
-bail:
- return ret;
+ ibp = to_iport(ibdev, ah_attr->port_num);
+ ppd = ppd_from_ibp(ibp);
+ ah->vl = ibp->sl_to_vl[ah->attr.sl];
+ ah->log_pmtu = ilog2(ppd->ibmtu);
}
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
{
struct ib_ah_attr attr;
struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct qib_qp *qp0;
+ struct rvt_qp *qp0;
memset(&attr, 0, sizeof(attr));
attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock();
- qp0 = rcu_dereference(ibp->qp0);
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0)
ah = ib_create_ah(qp0->ibqp.pd, &attr);
rcu_read_unlock();
@@ -1900,51 +1465,6 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
}
/**
- * qib_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int qib_destroy_ah(struct ib_ah *ibah)
-{
- struct qib_ibdev *dev = to_idev(ibah->device);
- struct qib_ah *ah = to_iah(ibah);
- unsigned long flags;
-
- if (atomic_read(&ah->refcount) != 0)
- return -EBUSY;
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- dev->n_ahs_allocated--;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- kfree(ah);
-
- return 0;
-}
-
-static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- if (qib_check_ah(ibah->device, ah_attr))
- return -EINVAL;
-
- ah->attr = *ah_attr;
-
- return 0;
-}
-
-static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct qib_ah *ah = to_iah(ibah);
-
- *ah_attr = ah->attr;
-
- return 0;
-}
-
-/**
* qib_get_npkeys - return the size of the PKEY table for context 0
* @dd: the qlogic_ib device
*/
@@ -1973,75 +1493,27 @@ unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
return ret;
}
-static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (index >= qib_get_npkeys(dd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- *pkey = qib_get_pkey(to_iport(ibdev, port), index);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the QLogic_IB driver
- */
-
-static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct qib_ucontext *context;
- struct ib_ucontext *ret;
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- ret = &context->ibucontext;
-
-bail:
- return ret;
-}
-
-static int qib_dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(to_iucontext(context));
- return 0;
-}
-
static void init_ibport(struct qib_pportdata *ppd)
{
struct qib_verbs_counters cntrs;
struct qib_ibport *ibp = &ppd->ibport_data;
- spin_lock_init(&ibp->lock);
+ spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
+ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
+ ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
IB_PORT_OTHER_LOCAL_CHANGES_SUP;
if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
- ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+ ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
+ ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
/* Snapshot current HW counters to "clear" them. */
qib_get_counters(ppd, &cntrs);
@@ -2061,26 +1533,55 @@ static void init_ibport(struct qib_pportdata *ppd)
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
- RCU_INIT_POINTER(ibp->qp0, NULL);
- RCU_INIT_POINTER(ibp->qp1, NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
-static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
+/**
+ * qib_fill_device_attr - Fill in rvt dev info device attributes.
+ * @dd: the device data structure
+ */
+static void qib_fill_device_attr(struct qib_devdata *dd)
{
- struct ib_port_attr attr;
- int err;
-
- err = qib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
- return 0;
+ rdi->dparms.props.max_pd = ib_qib_max_pds;
+ rdi->dparms.props.max_ah = ib_qib_max_ahs;
+ rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ rdi->dparms.props.page_size_cap = PAGE_SIZE;
+ rdi->dparms.props.vendor_id =
+ QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
+ rdi->dparms.props.vendor_part_id = dd->deviceid;
+ rdi->dparms.props.hw_ver = dd->minrev;
+ rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
+ rdi->dparms.props.max_mr_size = ~0ULL;
+ rdi->dparms.props.max_qp = ib_qib_max_qps;
+ rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
+ rdi->dparms.props.max_sge = ib_qib_max_sges;
+ rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
+ rdi->dparms.props.max_cq = ib_qib_max_cqs;
+ rdi->dparms.props.max_cqe = ib_qib_max_cqes;
+ rdi->dparms.props.max_ah = ib_qib_max_ahs;
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
+ rdi->dparms.props.max_map_per_fmr = 32767;
+ rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
+ rdi->dparms.props.max_qp_init_rd_atom = 255;
+ rdi->dparms.props.max_srq = ib_qib_max_srqs;
+ rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
+ rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
+ rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
+ rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
+ rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
+ rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
+ rdi->dparms.props.max_total_mcast_qp_attach =
+ rdi->dparms.props.max_mcast_qp_attach *
+ rdi->dparms.props.max_mcast_grp;
}
/**
@@ -2091,68 +1592,20 @@ static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
int qib_register_ib_device(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
+ struct ib_device *ibdev = &dev->rdi.ibdev;
struct qib_pportdata *ppd = dd->pport;
- unsigned i, lk_tab_size;
+ unsigned i, ctxt;
int ret;
- dev->qp_table_size = ib_qib_qp_table_size;
get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
- dev->qp_table = kmalloc_array(
- dev->qp_table_size,
- sizeof(*dev->qp_table),
- GFP_KERNEL);
- if (!dev->qp_table) {
- ret = -ENOMEM;
- goto err_qpt;
- }
- for (i = 0; i < dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_table[i], NULL);
-
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- spin_lock_init(&dev->qpt_lock);
- spin_lock_init(&dev->n_pds_lock);
- spin_lock_init(&dev->n_ahs_lock);
- spin_lock_init(&dev->n_cqs_lock);
- spin_lock_init(&dev->n_qps_lock);
- spin_lock_init(&dev->n_srqs_lock);
- spin_lock_init(&dev->n_mcast_grps_lock);
- init_timer(&dev->mem_timer);
- dev->mem_timer.function = mem_timer;
- dev->mem_timer.data = (unsigned long) dev;
-
- qib_init_qpn_table(dd, &dev->qpn_table);
+ setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+
+ qpt_mask = dd->qpn_mask;
- /*
- * The top ib_qib_lkey_table_size bits are used to index the
- * table. The lower 8 bits can be owned by the user (copied from
- * the LKEY). The remaining bits act as a generation number or tag.
- */
- spin_lock_init(&dev->lk_table.lock);
- /* insure generation is at least 4 bits see keys.c */
- if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
- qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
- ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
- ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
- }
- dev->lk_table.max = 1 << ib_qib_lkey_table_size;
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct qib_mregion __rcu **)
- vmalloc(lk_tab_size);
- if (dev->lk_table.table == NULL) {
- ret = -ENOMEM;
- goto err_lk;
- }
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- for (i = 0; i < dev->lk_table.max; i++)
- RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
- INIT_LIST_HEAD(&dev->pending_mmaps);
- spin_lock_init(&dev->pending_lock);
- dev->mmap_offset = PAGE_SIZE;
- spin_lock_init(&dev->mmap_offset_lock);
INIT_LIST_HEAD(&dev->piowait);
INIT_LIST_HEAD(&dev->dmawait);
INIT_LIST_HEAD(&dev->txwait);
@@ -2194,110 +1647,91 @@ int qib_register_ib_device(struct qib_devdata *dd)
strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
- ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
- ibdev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- ibdev->node_type = RDMA_NODE_IB_CA;
ibdev->phys_port_cnt = dd->num_pports;
- ibdev->num_comp_vectors = 1;
ibdev->dma_device = &dd->pcidev->dev;
- ibdev->query_device = qib_query_device;
ibdev->modify_device = qib_modify_device;
- ibdev->query_port = qib_query_port;
- ibdev->modify_port = qib_modify_port;
- ibdev->query_pkey = qib_query_pkey;
- ibdev->query_gid = qib_query_gid;
- ibdev->alloc_ucontext = qib_alloc_ucontext;
- ibdev->dealloc_ucontext = qib_dealloc_ucontext;
- ibdev->alloc_pd = qib_alloc_pd;
- ibdev->dealloc_pd = qib_dealloc_pd;
- ibdev->create_ah = qib_create_ah;
- ibdev->destroy_ah = qib_destroy_ah;
- ibdev->modify_ah = qib_modify_ah;
- ibdev->query_ah = qib_query_ah;
- ibdev->create_srq = qib_create_srq;
- ibdev->modify_srq = qib_modify_srq;
- ibdev->query_srq = qib_query_srq;
- ibdev->destroy_srq = qib_destroy_srq;
- ibdev->create_qp = qib_create_qp;
- ibdev->modify_qp = qib_modify_qp;
- ibdev->query_qp = qib_query_qp;
- ibdev->destroy_qp = qib_destroy_qp;
- ibdev->post_send = qib_post_send;
- ibdev->post_recv = qib_post_receive;
- ibdev->post_srq_recv = qib_post_srq_receive;
- ibdev->create_cq = qib_create_cq;
- ibdev->destroy_cq = qib_destroy_cq;
- ibdev->resize_cq = qib_resize_cq;
- ibdev->poll_cq = qib_poll_cq;
- ibdev->req_notify_cq = qib_req_notify_cq;
- ibdev->get_dma_mr = qib_get_dma_mr;
- ibdev->reg_user_mr = qib_reg_user_mr;
- ibdev->dereg_mr = qib_dereg_mr;
- ibdev->alloc_mr = qib_alloc_mr;
- ibdev->map_mr_sg = qib_map_mr_sg;
- ibdev->alloc_fmr = qib_alloc_fmr;
- ibdev->map_phys_fmr = qib_map_phys_fmr;
- ibdev->unmap_fmr = qib_unmap_fmr;
- ibdev->dealloc_fmr = qib_dealloc_fmr;
- ibdev->attach_mcast = qib_multicast_attach;
- ibdev->detach_mcast = qib_multicast_detach;
ibdev->process_mad = qib_process_mad;
- ibdev->mmap = qib_mmap;
- ibdev->dma_ops = &qib_dma_mapping_ops;
- ibdev->get_port_immutable = qib_port_immutable;
snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
"Intel Infiniband HCA %s", init_utsname()->nodename);
- ret = ib_register_device(ibdev, qib_create_port_files);
- if (ret)
- goto err_reg;
+ /*
+ * Fill in rvt info object.
+ */
+ dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
+ dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
+ dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
+ dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
+ dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
+ dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
+ dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
+ dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
+ dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
+ dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
+ dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
+ dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
+ dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
+ dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
+ dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
+ dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
+ dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
+ dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
+ dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
+ dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
+ qib_notify_create_mad_agent;
+ dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
+ qib_notify_free_mad_agent;
+
+ dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
+ dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
+ dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
+ dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
+ dd->verbs_dev.rdi.dparms.qpn_start = 1;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
+ dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
+ dd->verbs_dev.rdi.dparms.qpn_inc = 1;
+ dd->verbs_dev.rdi.dparms.qos_shift = 1;
+ dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
+ dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
+ dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
+ dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
+ dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
+ dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
+ dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
+
+ snprintf(dd->verbs_dev.rdi.dparms.cq_name,
+ sizeof(dd->verbs_dev.rdi.dparms.cq_name),
+ "qib_cq%d", dd->unit);
+
+ qib_fill_device_attr(dd);
+
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++) {
+ ctxt = ppd->hw_pidx;
+ rvt_init_port(&dd->verbs_dev.rdi,
+ &ppd->ibport_data.rvp,
+ i,
+ dd->rcd[ctxt]->pkeys);
+ }
- ret = qib_create_agents(dev);
+ ret = rvt_register_device(&dd->verbs_dev.rdi);
if (ret)
- goto err_agents;
+ goto err_tx;
ret = qib_verbs_register_sysfs(dd);
if (ret)
goto err_class;
- goto bail;
+ return ret;
err_class:
- qib_free_agents(dev);
-err_agents:
- ib_unregister_device(ibdev);
-err_reg:
+ rvt_unregister_device(&dd->verbs_dev.rdi);
err_tx:
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
@@ -2313,27 +1747,17 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- vfree(dev->lk_table.table);
-err_lk:
- kfree(dev->qp_table);
-err_qpt:
qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-bail:
return ret;
}
void qib_unregister_ib_device(struct qib_devdata *dd)
{
struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
- u32 qps_inuse;
- unsigned lk_tab_size;
qib_verbs_unregister_sysfs(dd);
- qib_free_agents(dev);
-
- ib_unregister_device(ibdev);
+ rvt_unregister_device(&dd->verbs_dev.rdi);
if (!list_empty(&dev->piowait))
qib_dev_err(dd, "piowait list not empty!\n");
@@ -2343,16 +1767,8 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
qib_dev_err(dd, "txwait list not empty!\n");
if (!list_empty(&dev->memwait))
qib_dev_err(dd, "memwait list not empty!\n");
- if (dev->dma_mr)
- qib_dev_err(dd, "DMA MR not NULL!\n");
-
- qps_inuse = qib_free_all_qps(dd);
- if (qps_inuse)
- qib_dev_err(dd, "QP memory leak! %u still in use\n",
- qps_inuse);
del_timer_sync(&dev->mem_timer);
- qib_free_qpn_table(&dev->qpn_table);
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
struct qib_verbs_txreq *tx;
@@ -2366,21 +1782,36 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
dd->pport->sdma_descq_cnt *
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- vfree(dev->lk_table.table);
- kfree(dev->qp_table);
}
-/*
- * This must be called with s_lock held.
+/**
+ * _qib_schedule_send - schedule progress
+ * @qp - the qp
+ *
+ * This schedules progress w/o regard to the s_flags.
+ *
+ * It is only used in post send, which doesn't hold
+ * the s_lock.
*/
-void qib_schedule_send(struct qib_qp *qp)
+void _qib_schedule_send(struct rvt_qp *qp)
{
- if (qib_send_ok(qp)) {
- struct qib_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct qib_qp_priv *priv = qp->priv;
- queue_work(ppd->qib_wq, &qp->s_work);
- }
+ queue_work(ppd->qib_wq, &priv->s_work);
+}
+
+/**
+ * qib_schedule_send - schedule progress
+ * @qp - the qp
+ *
+ * This schedules qp progress. The s_lock
+ * should be held.
+ */
+void qib_schedule_send(struct rvt_qp *qp)
+{
+ if (qib_send_ok(qp))
+ _qib_schedule_send(qp);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 6c5e77753d85..4b76a8d59337 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -45,6 +45,8 @@
#include <linux/completion.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_cq.h>
struct qib_ctxtdata;
struct qib_pportdata;
@@ -53,9 +55,7 @@ struct qib_verbs_txreq;
#define QIB_MAX_RDMA_ATOMIC 16
#define QIB_GUIDS_PER_PORT 5
-
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+#define QIB_PSN_SHIFT 8
/*
* Increment this value if any changes that break userspace ABI
@@ -63,12 +63,6 @@ struct qib_verbs_txreq;
*/
#define QIB_UVERBS_ABI_VERSION 2
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
-
#define IB_SEQ_NAK (3 << 29)
/* AETH NAK opcode values */
@@ -79,17 +73,6 @@ struct qib_verbs_txreq;
#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
#define IB_NAK_INVALID_RD_REQUEST 0x64
-/* Flags for checking QP state (see ib_qib_state_ops[]) */
-#define QIB_POST_SEND_OK 0x01
-#define QIB_POST_RECV_OK 0x02
-#define QIB_PROCESS_RECV_OK 0x04
-#define QIB_PROCESS_SEND_OK 0x08
-#define QIB_PROCESS_NEXT_SEND_OK 0x10
-#define QIB_FLUSH_SEND 0x20
-#define QIB_FLUSH_RECV 0x40
-#define QIB_PROCESS_OR_FLUSH_SEND \
- (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND)
-
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
@@ -203,468 +186,21 @@ struct qib_pio_header {
} __packed;
/*
- * There is one struct qib_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct qib_mcast_qp.
+ * qib specific data structure that will be hidden from rvt after the queue pair
+ * is made common.
*/
-struct qib_mcast_qp {
- struct list_head list;
- struct qib_qp *qp;
-};
-
-struct qib_mcast {
- struct rb_node rb_node;
- union ib_gid mgid;
- struct list_head qp_list;
- wait_queue_head_t wait;
- atomic_t refcount;
- int n_attached;
-};
-
-/* Protection domain */
-struct qib_pd {
- struct ib_pd ibpd;
- int user; /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct qib_ah {
- struct ib_ah ibah;
- struct ib_ah_attr attr;
- atomic_t refcount;
-};
-
-/*
- * This structure is used by qib_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct qib_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct qib_cq_wc {
- u32 head; /* index of next entry to fill */
- u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
-};
-
-/*
- * The completion queue structure.
- */
-struct qib_cq {
- struct ib_cq ibcq;
- struct kthread_work comptask;
- struct qib_devdata *dd;
- spinlock_t lock; /* protect changes in this struct */
- u8 notify;
- u8 triggered;
- struct qib_cq_wc *queue;
- struct qib_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct qib_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of qib_segs that fit in a page. */
-#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg))
-
-struct qib_segarray {
- struct qib_seg segs[QIB_SEGSZ];
-};
-
-struct qib_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of qib_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- u8 page_shift; /* 0 - non unform/non powerof2 sizes */
- u8 lkey_published; /* in global table */
- struct completion comp; /* complete when refcount goes to zero */
- struct rcu_head list;
- atomic_t refcount;
- struct qib_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct qib_sge {
- struct qib_mregion *mr;
- void *vaddr; /* kernel virtual address of segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct qib_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- u64 *pages;
- u32 npages;
- struct qib_mregion mr; /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct qib_swqe {
- union {
- struct ib_send_wr wr; /* don't use wr.sg_list */
- struct ib_ud_wr ud_wr;
- struct ib_reg_wr reg_wr;
- struct ib_rdma_wr rdma_wr;
- struct ib_atomic_wr atomic_wr;
- };
- u32 psn; /* first packet sequence number */
- u32 lpsn; /* last packet sequence number */
- u32 ssn; /* send sequence number */
- u32 length; /* total length of data in sg_list */
- struct qib_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct qib_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct qib_rwq {
- u32 head; /* new work requests posted to the head */
- u32 tail; /* receives pull requests from here. */
- struct qib_rwqe wq[0];
-};
-
-struct qib_rq {
- struct qib_rwq *wq;
- u32 size; /* size of RWQE array */
- u8 max_sge;
- spinlock_t lock /* protect changes in this struct */
- ____cacheline_aligned_in_smp;
-};
-
-struct qib_srq {
- struct ib_srq ibsrq;
- struct qib_rq rq;
- struct qib_mmap_info *ip;
- /* send signal when number of RWQEs < limit */
- u32 limit;
-};
-
-struct qib_sge_state {
- struct qib_sge *sg_list; /* next SGE to be used if any */
- struct qib_sge sge; /* progress state for the current SGE */
- u32 total_len;
- u8 num_sge;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct qib_ack_entry {
- u8 opcode;
- u8 sent;
- u32 psn;
- u32 lpsn;
- union {
- struct qib_sge rdma_sge;
- u64 atomic_data;
- };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct qib_qp {
- struct ib_qp ibqp;
- /* read mostly fields above and below */
- struct ib_ah_attr remote_ah_attr;
- struct ib_ah_attr alt_ah_attr;
- struct qib_qp __rcu *next; /* link list for QPN hash table */
- struct qib_swqe *s_wq; /* send work queue */
- struct qib_mmap_info *ip;
- struct qib_ib_header *s_hdr; /* next packet header to send */
- unsigned long timeout_jiffies; /* computed from timeout */
-
- enum ib_mtu path_mtu;
- u32 remote_qpn;
- u32 pmtu; /* decoded from path_mtu */
- u32 qkey; /* QKEY for this QP (for UD or RD) */
- u32 s_size; /* send work queue size */
- u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
-
- u8 state; /* QP state */
- u8 qp_access_flags;
- u8 alt_timeout; /* Alternate path timeout for this QP */
- u8 timeout; /* Timeout for this QP */
- u8 s_srate;
- u8 s_mig_state;
- u8 port_num;
- u8 s_pkey_index; /* PKEY index to use */
- u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
- u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
- u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
- u8 s_retry_cnt; /* number of times to retry */
- u8 s_rnr_retry_cnt;
- u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 s_max_sge; /* size of s_wq->sg_list */
- u8 s_draining;
-
- /* start of read/write fields */
-
- atomic_t refcount ____cacheline_aligned_in_smp;
- wait_queue_head_t wait;
-
-
- struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
- struct qib_sge_state s_rdma_read_sge;
-
- spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
- unsigned long r_aflags;
- u64 r_wr_id; /* ID for current receive WQE */
- u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
- u32 r_len; /* total length of r_sge */
- u32 r_rcv_len; /* receive data len processed */
- u32 r_psn; /* expected rcv packet sequence number */
- u32 r_msn; /* message sequence number */
-
- u8 r_state; /* opcode of last packet received */
- u8 r_flags;
- u8 r_head_ack_queue; /* index into s_ack_queue[] */
-
- struct list_head rspwait; /* link for waititing to respond */
-
- struct qib_sge_state r_sge; /* current receive data */
- struct qib_rq r_rq; /* receive work queue */
-
- spinlock_t s_lock ____cacheline_aligned_in_smp;
- struct qib_sge_state *s_cur_sge;
- u32 s_flags;
- struct qib_verbs_txreq *s_tx;
- struct qib_swqe *s_wqe;
- struct qib_sge_state s_sge; /* current send request data */
- struct qib_mregion *s_rdma_mr;
- atomic_t s_dma_busy;
- u32 s_cur_size; /* size of send packet in bytes */
- u32 s_len; /* total length of s_sge */
- u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
- u32 s_next_psn; /* PSN for next request */
- u32 s_last_psn; /* last response PSN processed */
- u32 s_sending_psn; /* lowest PSN that is being sent */
- u32 s_sending_hpsn; /* highest PSN that is being sent */
- u32 s_psn; /* current packet sequence number */
- u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
- u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
- u32 s_head; /* new entries added here */
- u32 s_tail; /* next entry to process */
- u32 s_cur; /* current work queue entry */
- u32 s_acked; /* last un-ACK'ed entry */
- u32 s_last; /* last completed entry */
- u32 s_ssn; /* SSN of tail entry */
- u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
- u16 s_rdma_ack_cnt;
- u8 s_state; /* opcode of last packet sent */
- u8 s_ack_state; /* opcode of packet to ACK */
- u8 s_nak_state; /* non-zero if NAK is pending */
- u8 r_nak_state; /* non-zero if NAK is pending */
- u8 s_retry; /* requester retry counter */
- u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
- u8 s_tail_ack_queue; /* index into s_ack_queue[] */
-
- struct qib_sge_state s_ack_rdma_sge;
- struct timer_list s_timer;
+struct qib_qp_priv {
+ struct qib_ib_header *s_hdr; /* next packet header to send */
struct list_head iowait; /* link for wait PIO buf */
-
+ atomic_t s_dma_busy;
+ struct qib_verbs_txreq *s_tx;
struct work_struct s_work;
-
wait_queue_head_t wait_dma;
-
- struct qib_sge r_sg_list[0] /* verified SGEs */
- ____cacheline_aligned_in_smp;
+ struct rvt_qp *owner;
};
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define QIB_R_WRID_VALID 0
-#define QIB_R_REWIND_SGE 1
-
-/*
- * Bit definitions for r_flags.
- */
-#define QIB_R_REUSE_SGE 0x01
-#define QIB_R_RDMAR_SEQ 0x02
-#define QIB_R_RSP_NAK 0x04
-#define QIB_R_RSP_SEND 0x08
-#define QIB_R_COMM_EST 0x10
-
-/*
- * Bit definitions for s_flags.
- *
- * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
- * QIB_S_BUSY - send tasklet is processing the QP
- * QIB_S_TIMER - the RC retry timer is active
- * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
- * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
- * before processing the next SWQE
- * QIB_S_WAIT_RNR - waiting for RNR timeout
- * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA
- * QIB_S_WAIT_PIO - waiting for a send buffer to be available
- * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available
- * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
- * QIB_S_WAIT_KMEM - waiting for kernel memory to be available
- * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
- * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests
- * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK
- */
-#define QIB_S_SIGNAL_REQ_WR 0x0001
-#define QIB_S_BUSY 0x0002
-#define QIB_S_TIMER 0x0004
-#define QIB_S_RESP_PENDING 0x0008
-#define QIB_S_ACK_PENDING 0x0010
-#define QIB_S_WAIT_FENCE 0x0020
-#define QIB_S_WAIT_RDMAR 0x0040
-#define QIB_S_WAIT_RNR 0x0080
-#define QIB_S_WAIT_SSN_CREDIT 0x0100
-#define QIB_S_WAIT_DMA 0x0200
-#define QIB_S_WAIT_PIO 0x0400
-#define QIB_S_WAIT_TX 0x0800
-#define QIB_S_WAIT_DMA_DESC 0x1000
-#define QIB_S_WAIT_KMEM 0x2000
-#define QIB_S_WAIT_PSN 0x4000
-#define QIB_S_WAIT_ACK 0x8000
-#define QIB_S_SEND_ONE 0x10000
-#define QIB_S_UNLIMITED_CREDIT 0x20000
-
-/*
- * Wait flags that would prevent any packet type from being sent.
- */
-#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \
- QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM)
-
-/*
- * Wait flags that would prevent send work requests from making progress.
- */
-#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \
- QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \
- QIB_S_WAIT_PSN | QIB_S_WAIT_ACK)
-
-#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND)
-
#define QIB_PSN_CREDIT 16
-/*
- * Since struct qib_swqe is not a fixed size, we can't simply index into
- * struct qib_qp.s_wq. This function does the array index computation.
- */
-static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp,
- unsigned n)
-{
- return (struct qib_swqe *)((char *)qp->s_wq +
- (sizeof(struct qib_swqe) +
- qp->s_max_sge *
- sizeof(struct qib_sge)) * n);
-}
-
-/*
- * Since struct qib_rwqe is not a fixed size, we can't simply index into
- * struct qib_rwq.wq. This function does the array index computation.
- */
-static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n)
-{
- return (struct qib_rwqe *)
- ((char *) rq->wq->wq +
- (sizeof(struct qib_rwqe) +
- rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- void *page;
-};
-
-struct qib_qpn_table {
- spinlock_t lock; /* protect changes in this struct */
- unsigned flags; /* flags for QP0/1 allocated for each port */
- u32 last; /* last QP number allocated */
- u32 nmaps; /* size of the map table */
- u16 limit;
- u16 mask;
- /* bit map of free QP numbers other than 0/1 */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-#define MAX_LKEY_TABLE_BITS 23
-
-struct qib_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct qib_mregion __rcu **table;
-};
-
struct qib_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
@@ -682,21 +218,9 @@ struct qib_pma_counters {
};
struct qib_ibport {
- struct qib_qp __rcu *qp0;
- struct qib_qp __rcu *qp1;
- struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
- struct qib_ah *sm_ah;
- struct qib_ah *smi_ah;
- struct rb_root mcast_tree;
- spinlock_t lock; /* protect changes in this struct */
-
- /* non-zero when timer is set */
- unsigned long mkey_lease_timeout;
- unsigned long trap_timeout;
- __be64 gid_prefix; /* in network order */
- __be64 mkey;
+ struct rvt_ibport rvp;
+ struct rvt_ah *smi_ah;
__be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- u64 tid; /* TID for traps */
struct qib_pma_counters __percpu *pmastats;
u64 z_unicast_xmit; /* starting count for PMA */
u64 z_unicast_rcv; /* starting count for PMA */
@@ -715,82 +239,25 @@ struct qib_ibport {
u32 z_local_link_integrity_errors; /* starting count for PMA */
u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
u32 z_vl15_dropped; /* starting count for PMA */
- u32 n_rc_resends;
- u32 n_rc_acks;
- u32 n_rc_qacks;
- u32 n_rc_delayed_comp;
- u32 n_seq_naks;
- u32 n_rdma_seq;
- u32 n_rnr_naks;
- u32 n_other_naks;
- u32 n_loop_pkts;
- u32 n_pkt_drops;
- u32 n_vl15_dropped;
- u32 n_rc_timeouts;
- u32 n_dmawait;
- u32 n_unaligned;
- u32 n_rc_dupreq;
- u32 n_rc_seqnak;
- u32 port_cap_flags;
- u32 pma_sample_start;
- u32 pma_sample_interval;
- __be16 pma_counter_select[5];
- u16 pma_tag;
- u16 pkey_violations;
- u16 qkey_violations;
- u16 mkey_violations;
- u16 mkey_lease_period;
- u16 sm_lid;
- u16 repress_traps;
- u8 sm_sl;
- u8 mkeyprot;
- u8 subnet_timeout;
- u8 vl_high_limit;
u8 sl_to_vl[16];
-
};
-
struct qib_ibdev {
- struct ib_device ibdev;
- struct list_head pending_mmaps;
- spinlock_t mmap_offset_lock; /* protect mmap_offset */
- u32 mmap_offset;
- struct qib_mregion __rcu *dma_mr;
-
- /* QP numbers are shared by all IB ports */
- struct qib_qpn_table qpn_table;
- struct qib_lkey_table lk_table;
+ struct rvt_dev_info rdi;
+
struct list_head piowait; /* list for wait PIO buf */
struct list_head dmawait; /* list for wait DMA */
struct list_head txwait; /* list for wait qib_verbs_txreq */
struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free;
struct timer_list mem_timer;
- struct qib_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
- /* list of QPs waiting for RNR timer */
- spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */
- u32 qp_table_size; /* size of the hash table */
u32 qp_rnd; /* random bytes for hash */
- spinlock_t qpt_lock;
u32 n_piowait;
u32 n_txwait;
- u32 n_pds_allocated; /* number of PDs allocated for device */
- spinlock_t n_pds_lock;
- u32 n_ahs_allocated; /* number of AHs allocated for device */
- spinlock_t n_ahs_lock;
- u32 n_cqs_allocated; /* number of CQs allocated for device */
- spinlock_t n_cqs_lock;
- u32 n_qps_allocated; /* number of QPs allocated for device */
- spinlock_t n_qps_lock;
- u32 n_srqs_allocated; /* number of SRQs allocated for device */
- spinlock_t n_srqs_lock;
- u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
- spinlock_t n_mcast_grps_lock;
#ifdef CONFIG_DEBUG_FS
/* per HCA debugfs */
struct dentry *qib_ibdev_dbg;
@@ -813,56 +280,27 @@ struct qib_verbs_counters {
u32 vl15_dropped;
};
-static inline struct qib_mr *to_imr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct qib_mr, ibmr);
-}
-
-static inline struct qib_pd *to_ipd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct qib_pd, ibpd);
-}
-
-static inline struct qib_ah *to_iah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct qib_ah, ibah);
-}
-
-static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
-{
- return container_of(ibcq, struct qib_cq, ibcq);
-}
-
-static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct qib_srq, ibsrq);
-}
-
-static inline struct qib_qp *to_iqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct qib_qp, ibqp);
-}
-
static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
{
- return container_of(ibdev, struct qib_ibdev, ibdev);
+ struct rvt_dev_info *rdi;
+
+ rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
+ return container_of(rdi, struct qib_ibdev, rdi);
}
/*
* Send if not busy or waiting for I/O and either
* a RC response is pending or we can process send work requests.
*/
-static inline int qib_send_ok(struct qib_qp *qp)
+static inline int qib_send_ok(struct rvt_qp *qp)
{
- return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) ||
- !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
+ return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
-/*
- * This must be called with s_lock held.
- */
-void qib_schedule_send(struct qib_qp *qp);
+void _qib_schedule_send(struct rvt_qp *qp);
+void qib_schedule_send(struct rvt_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -878,7 +316,7 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct qib_ibport *ibp);
+void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -886,8 +324,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_mad_hdr *in, size_t in_mad_size,
struct ib_mad_hdr *out, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int qib_create_agents(struct qib_ibdev *dev);
-void qib_free_agents(struct qib_ibdev *dev);
+void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
+void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
/*
* Compare the lower 24 bits of the two values.
@@ -898,8 +336,6 @@ static inline int qib_cmp24(u32 a, u32 b)
return (((int) a) - ((int) b)) << 8;
}
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid);
-
int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
u64 *rwords, u64 *spkts, u64 *rpkts,
u64 *xmit_wait);
@@ -907,35 +343,17 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
int qib_get_counters(struct qib_pportdata *ppd,
struct qib_verbs_counters *cntrs);
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp);
-
-__be32 qib_compute_aeth(struct qib_qp *qp);
-
-struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
-
-struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata);
-
-int qib_destroy_qp(struct ib_qp *ibqp);
-
-int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err);
-
-int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned qib_free_all_qps(struct qib_devdata *dd);
+__be32 qib_compute_aeth(struct rvt_qp *qp);
-void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt);
-
-void qib_free_qpn_table(struct qib_qpn_table *qpt);
+/*
+ * Functions provided by qib driver for rdmavt to use
+ */
+unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
+void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp);
+void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+void qib_notify_qp_reset(struct rvt_qp *qp);
+int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port, gfp_t gfp);
#ifdef CONFIG_DEBUG_FS
@@ -949,7 +367,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter);
#endif
-void qib_get_credit(struct qib_qp *qp, u32 aeth);
+void qib_get_credit(struct rvt_qp *qp, u32 aeth);
unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
@@ -957,166 +375,66 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
void qib_put_txreq(struct qib_verbs_txreq *tx);
-int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
- u32 hdrwords, struct qib_sge_state *ss, u32 len);
+int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
+ u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length,
+void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
int release);
-void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release);
+void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
void qib_rc_rnr_retry(unsigned long arg);
-void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
+void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr);
-void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err);
+void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
-int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
+int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-
-int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
-
-void qib_free_lkey(struct qib_mregion *mr);
-
-int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
- struct qib_sge *isge, struct ib_sge *sge, int acc);
-
-int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc);
-
-int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_srq *qib_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
-
-int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata);
-
-int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int qib_destroy_srq(struct ib_srq *ibsrq);
-
-int qib_cq_init(struct qib_devdata *dd);
-
-void qib_cq_exit(struct qib_devdata *dd);
-
-void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
-
-int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *qib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int qib_destroy_cq(struct ib_cq *ibcq);
-
-int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata);
-
-int qib_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_entries);
-
-int qib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
-
-int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
-
-struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
-
-int qib_unmap_fmr(struct list_head *fmr_list);
-
-int qib_dealloc_fmr(struct ib_fmr *ibfmr);
-
-static inline void qib_get_mr(struct qib_mregion *mr)
-{
- atomic_inc(&mr->refcount);
-}
+ int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
void mr_rcu_callback(struct rcu_head *list);
-static inline void qib_put_mr(struct qib_mregion *mr)
-{
- if (unlikely(atomic_dec_and_test(&mr->refcount)))
- call_rcu(&mr->list, mr_rcu_callback);
-}
-
-static inline void qib_put_ss(struct qib_sge_state *ss)
-{
- while (ss->num_sge) {
- qib_put_mr(ss->sge.mr);
- if (--ss->num_sge)
- ss->sge = *ss->sg_list++;
- }
-}
-
-
-void qib_release_mmap_info(struct kref *ref);
+int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
- struct ib_ucontext *context,
- void *obj);
-
-void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip,
- u32 size, void *obj);
-
-int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int qib_get_rwqe(struct qib_qp *qp, int wr_id_only);
-
-void qib_migrate_qp(struct qib_qp *qp);
+void qib_migrate_qp(struct rvt_qp *qp);
int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
- int has_grh, struct qib_qp *qp, u32 bth0);
+ int has_grh, struct rvt_qp *qp, u32 bth0);
u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
-void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
+void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr,
u32 bth0, u32 bth2);
-void qib_do_send(struct work_struct *work);
+void _qib_do_send(struct work_struct *work);
+
+void qib_do_send(struct rvt_qp *qp);
-void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
+void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
-void qib_send_rc_ack(struct qib_qp *qp);
+void qib_send_rc_ack(struct rvt_qp *qp);
-int qib_make_rc_req(struct qib_qp *qp);
+int qib_make_rc_req(struct rvt_qp *qp);
-int qib_make_uc_req(struct qib_qp *qp);
+int qib_make_uc_req(struct rvt_qp *qp);
-int qib_make_ud_req(struct qib_qp *qp);
+int qib_make_ud_req(struct rvt_qp *qp);
int qib_register_ib_device(struct qib_devdata *);
@@ -1150,11 +468,11 @@ extern const enum ib_wc_opcode ib_qib_wc_opcode[];
#define IB_PHYSPORTSTATE_CFG_ENH 0x10
#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
-extern const int ib_qib_state_ops[];
+extern const int ib_rvt_state_ops[];
extern __be64 ib_qib_sys_image_guid; /* in network order */
-extern unsigned int ib_qib_lkey_table_size;
+extern unsigned int ib_rvt_lkey_table_size;
extern unsigned int ib_qib_max_cqes;
@@ -1178,6 +496,4 @@ extern unsigned int ib_qib_max_srq_wrs;
extern const u32 ib_qib_rnr_table[];
-extern struct ib_dma_mapping_ops qib_dma_mapping_ops;
-
#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
deleted file mode 100644
index b2fb5286dbd9..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-
-#include "qib.h"
-
-/**
- * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
-{
- struct qib_mcast_qp *mqp;
-
- mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
- if (!mqp)
- goto bail;
-
- mqp->qp = qp;
- atomic_inc(&qp->refcount);
-
-bail:
- return mqp;
-}
-
-static void qib_mcast_qp_free(struct qib_mcast_qp *mqp)
-{
- struct qib_qp *qp = mqp->qp;
-
- /* Notify qib_destroy_qp() if it is waiting. */
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
-
- kfree(mqp);
-}
-
-/**
- * qib_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
-{
- struct qib_mcast *mcast;
-
- mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
- if (!mcast)
- goto bail;
-
- mcast->mgid = *mgid;
- INIT_LIST_HEAD(&mcast->qp_list);
- init_waitqueue_head(&mcast->wait);
- atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
-
-bail:
- return mcast;
-}
-
-static void qib_mcast_free(struct qib_mcast *mcast)
-{
- struct qib_mcast_qp *p, *tmp;
-
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- qib_mcast_qp_free(p);
-
- kfree(mcast);
-}
-
-/**
- * qib_mcast_find - search the global table for the given multicast GID
- * @ibp: the IB port structure
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid)
-{
- struct rb_node *n;
- unsigned long flags;
- struct qib_mcast *mcast;
-
- spin_lock_irqsave(&ibp->lock, flags);
- n = ibp->mcast_tree.rb_node;
- while (n) {
- int ret;
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
-
- ret = memcmp(mgid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else {
- atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&ibp->lock, flags);
- goto bail;
- }
- }
- spin_unlock_irqrestore(&ibp->lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
-}
-
-/**
- * qib_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added. Return EEXIST if the GID was already in
- * the table but the QP was added. Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp,
- struct qib_mcast *mcast, struct qib_mcast_qp *mqp)
-{
- struct rb_node **n = &ibp->mcast_tree.rb_node;
- struct rb_node *pn = NULL;
- int ret;
-
- spin_lock_irq(&ibp->lock);
-
- while (*n) {
- struct qib_mcast *tmcast;
- struct qib_mcast_qp *p;
-
- pn = *n;
- tmcast = rb_entry(pn, struct qib_mcast, rb_node);
-
- ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0) {
- n = &pn->rb_left;
- continue;
- }
- if (ret > 0) {
- n = &pn->rb_right;
- continue;
- }
-
- /* Search the QP list to see if this is already there. */
- list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
- if (p->qp == mqp->qp) {
- ret = ESRCH;
- goto bail;
- }
- }
- if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) {
- ret = ENOMEM;
- goto bail;
- }
-
- tmcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
- ret = EEXIST;
- goto bail;
- }
-
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
- ret = ENOMEM;
- goto bail;
- }
-
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
-
- mcast->n_attached++;
-
- list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
- atomic_inc(&mcast->refcount);
- rb_link_node(&mcast->rb_node, pn, n);
- rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
-
- ret = 0;
-
-bail:
- spin_unlock_irq(&ibp->lock);
-
- return ret;
-}
-
-int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp;
- struct qib_mcast *mcast;
- struct qib_mcast_qp *mqp;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Allocate data structures since its better to do this outside of
- * spin locks and it will most likely be needed.
- */
- mcast = qib_mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = qib_mcast_qp_alloc(qp);
- if (mqp == NULL) {
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- ibp = to_iport(ibqp->device, qp->port_num);
- switch (qib_mcast_add(dev, ibp, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: OK to attach the same QP twice. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- break;
-
- case EEXIST: /* The mcast wasn't used */
- qib_mcast_free(mcast);
- break;
-
- case ENOMEM:
- /* Exceeded the maximum number of mcast groups. */
- qib_mcast_qp_free(mqp);
- qib_mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
-
- default:
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
- struct qib_qp *qp = to_iqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
- struct qib_mcast *mcast = NULL;
- struct qib_mcast_qp *p, *tmp, *delp = NULL;
- struct rb_node *n;
- int last = 0;
- int ret;
-
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
- return -EINVAL;
-
- spin_lock_irq(&ibp->lock);
-
- /* Find the GID in the mcast table. */
- n = ibp->mcast_tree.rb_node;
- while (1) {
- if (n == NULL) {
- spin_unlock_irq(&ibp->lock);
- return -EINVAL;
- }
-
- mcast = rb_entry(n, struct qib_mcast, rb_node);
- ret = memcmp(gid->raw, mcast->mgid.raw,
- sizeof(union ib_gid));
- if (ret < 0)
- n = n->rb_left;
- else if (ret > 0)
- n = n->rb_right;
- else
- break;
- }
-
- /* Search the QP list. */
- list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
- if (p->qp != qp)
- continue;
- /*
- * We found it, so remove it, but don't poison the forward
- * link until we are sure there are no list walkers.
- */
- list_del_rcu(&p->list);
- mcast->n_attached--;
- delp = p;
-
- /* If this was the last attached QP, remove the GID too. */
- if (list_empty(&mcast->qp_list)) {
- rb_erase(&mcast->rb_node, &ibp->mcast_tree);
- last = 1;
- }
- break;
- }
-
- spin_unlock_irq(&ibp->lock);
- /* QP not attached */
- if (!delp)
- return -EINVAL;
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- qib_mcast_qp_free(delp);
-
- if (last) {
- atomic_dec(&mcast->refcount);
- wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- qib_mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
- }
- return 0;
-}
-
-int qib_mcast_tree_empty(struct qib_ibport *ibp)
-{
- return ibp->mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 5f44b66ccb86..5b0248adf4ce 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -64,7 +64,7 @@ const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
case IB_QPS_ERR:
return "ERR";
default:
- return "UNKOWN STATE";
+ return "UNKNOWN STATE";
}
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 6cdb4d23f78f..a5bfbba6bbac 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -269,7 +269,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
union ib_gid gid;
struct ethtool_drvinfo info;
- struct ethtool_cmd cmd;
int qp_per_vf;
usnic_dbg("\n");
@@ -278,7 +277,6 @@ int usnic_ib_query_device(struct ib_device *ibdev,
mutex_lock(&us_ibdev->usdev_lock);
us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
memset(props, 0, sizeof(*props));
usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
&gid.raw[0]);
@@ -326,12 +324,12 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
- struct ethtool_cmd cmd;
+ struct ethtool_link_ksettings cmd;
usnic_dbg("\n");
mutex_lock(&us_ibdev->usdev_lock);
- us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
+ __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd);
memset(props, 0, sizeof(*props));
props->lid = 0;
@@ -355,8 +353,8 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
- &props->active_width);
+ eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed,
+ &props->active_width);
props->max_mtu = IB_MTU_4096;
props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
/* Userspace will adjust for hdrs */
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 645a5f6e6c88..7209fbc03ccb 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -144,7 +144,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0;
while (npages) {
- ret = get_user_pages(current, current->mm, cur_base,
+ ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
1, !writable, page_list, NULL);
diff --git a/drivers/infiniband/sw/Makefile b/drivers/infiniband/sw/Makefile
new file mode 100644
index 000000000000..988b6a0101a4
--- /dev/null
+++ b/drivers/infiniband/sw/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
new file mode 100644
index 000000000000..11aa6a34bd71
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -0,0 +1,6 @@
+config INFINIBAND_RDMAVT
+ tristate "RDMA verbs transport library"
+ depends on 64BIT
+ default m
+ ---help---
+ This is a common software verbs provider for RDMA networks.
diff --git a/drivers/infiniband/sw/rdmavt/Makefile b/drivers/infiniband/sw/rdmavt/Makefile
new file mode 100644
index 000000000000..ccaa7992ac97
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/Makefile
@@ -0,0 +1,13 @@
+#
+# rdmavt driver
+#
+#
+#
+# Called from the kernel module build system.
+#
+obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o
+
+rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o srq.o \
+ trace.o
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
new file mode 100644
index 000000000000..16c446142c2a
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "ah.h"
+#include "vt.h" /* for prints */
+
+/**
+ * rvt_check_ah - validate the attributes of AH
+ * @ibdev: the ib device
+ * @ah_attr: the attributes of the AH
+ *
+ * If driver supports a more detailed check_ah function call back to it
+ * otherwise just check the basics.
+ *
+ * Return: 0 on success
+ */
+int rvt_check_ah(struct ib_device *ibdev,
+ struct ib_ah_attr *ah_attr)
+{
+ int err;
+ struct ib_port_attr port_attr;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ enum rdma_link_layer link = rdma_port_get_link_layer(ibdev,
+ ah_attr->port_num);
+
+ err = ib_query_port(ibdev, ah_attr->port_num, &port_attr);
+ if (err)
+ return -EINVAL;
+ if (ah_attr->port_num < 1 ||
+ ah_attr->port_num > ibdev->phys_port_cnt)
+ return -EINVAL;
+ if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
+ ib_rate_to_mbps(ah_attr->static_rate) < 0)
+ return -EINVAL;
+ if ((ah_attr->ah_flags & IB_AH_GRH) &&
+ ah_attr->grh.sgid_index >= port_attr.gid_tbl_len)
+ return -EINVAL;
+ if (link != IB_LINK_LAYER_ETHERNET) {
+ if (ah_attr->dlid == 0)
+ return -EINVAL;
+ if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
+ ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) &&
+ !(ah_attr->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+ }
+ if (rdi->driver_f.check_ah)
+ return rdi->driver_f.check_ah(ibdev, ah_attr);
+ return 0;
+}
+EXPORT_SYMBOL(rvt_check_ah);
+
+/**
+ * rvt_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: newly allocated ah
+ */
+struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->device);
+ unsigned long flags;
+
+ if (rvt_check_ah(pd->device, ah_attr))
+ return ERR_PTR(-EINVAL);
+
+ ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ if (dev->n_ahs_allocated == dev->dparms.props.max_ah) {
+ spin_unlock(&dev->n_ahs_lock);
+ kfree(ah);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev->n_ahs_allocated++;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ ah->attr = *ah_attr;
+ atomic_set(&ah->refcount, 0);
+
+ if (dev->driver_f.notify_new_ah)
+ dev->driver_f.notify_new_ah(pd->device, ah_attr, ah);
+
+ return &ah->ibah;
+}
+
+/**
+ * rvt_destory_ah - Destory an address handle
+ * @ibah: address handle
+ *
+ * Return: 0 on success
+ */
+int rvt_destroy_ah(struct ib_ah *ibah)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibah->device);
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+ unsigned long flags;
+
+ if (atomic_read(&ah->refcount) != 0)
+ return -EBUSY;
+
+ spin_lock_irqsave(&dev->n_ahs_lock, flags);
+ dev->n_ahs_allocated--;
+ spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
+
+ kfree(ah);
+
+ return 0;
+}
+
+/**
+ * rvt_modify_ah - modify an ah with given attrs
+ * @ibah: address handle to modify
+ * @ah_attr: attrs to apply
+ *
+ * Return: 0 on success
+ */
+int rvt_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ if (rvt_check_ah(ibah->device, ah_attr))
+ return -EINVAL;
+
+ ah->attr = *ah_attr;
+
+ return 0;
+}
+
+/**
+ * rvt_query_ah - return attrs for ah
+ * @ibah: address handle to query
+ * @ah_attr: return info in this
+ *
+ * Return: always 0
+ */
+int rvt_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct rvt_ah *ah = ibah_to_rvtah(ibah);
+
+ *ah_attr = ah->attr;
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
new file mode 100644
index 000000000000..e9c36be87d79
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -0,0 +1,59 @@
+#ifndef DEF_RVTAH_H
+#define DEF_RVTAH_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+struct ib_ah *rvt_create_ah(struct ib_pd *pd,
+ struct ib_ah_attr *ah_attr);
+int rvt_destroy_ah(struct ib_ah *ibah);
+int rvt_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int rvt_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+
+#endif /* DEF_RVTAH_H */
diff --git a/drivers/staging/rdma/hfi1/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 4f046ffe7e60..b1ffc8b4a6c0 100644
--- a/drivers/staging/rdma/hfi1/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,25 +45,23 @@
*
*/
-#include <linux/err.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
-
-#include "verbs.h"
-#include "hfi.h"
+#include "cq.h"
+#include "vt.h"
/**
- * hfi1_cq_enter - add a new entry to the completion queue
+ * rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is a solicited entry
+ * @sig: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
-void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
+void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{
- struct hfi1_cq_wc *wc;
+ struct rvt_cq_wc *wc;
unsigned long flags;
u32 head;
u32 next;
@@ -79,11 +74,13 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
*/
wc = cq->queue;
head = wc->head;
- if (head >= (unsigned) cq->ibcq.cqe) {
+ if (head >= (unsigned)cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0;
- } else
+ } else {
next = head + 1;
+ }
+
if (unlikely(next == wc->tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
@@ -114,8 +111,9 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
wc->uqueue[head].port_num = entry->port_num;
/* Make sure entry is written before the head index. */
smp_wmb();
- } else
+ } else {
wc->kqueue[head] = *entry;
+ }
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -126,10 +124,10 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
* This will cause send_complete() to be called in
* another thread.
*/
- smp_read_barrier_depends(); /* see hfi1_cq_exit */
- worker = cq->dd->worker;
+ smp_read_barrier_depends(); /* see rvt_cq_exit */
+ worker = cq->rdi->worker;
if (likely(worker)) {
- cq->notify = IB_CQ_NONE;
+ cq->notify = RVT_CQ_NONE;
cq->triggered++;
queue_kthread_work(worker, &cq->comptask);
}
@@ -137,59 +135,11 @@ void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited)
spin_unlock_irqrestore(&cq->lock, flags);
}
-
-/**
- * hfi1_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context. Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
- struct hfi1_cq *cq = to_icq(ibcq);
- struct hfi1_cq_wc *wc;
- unsigned long flags;
- int npolled;
- u32 tail;
-
- /* The kernel can only poll a kernel completion queue */
- if (cq->ip) {
- npolled = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&cq->lock, flags);
-
- wc = cq->queue;
- tail = wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
- for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
- if (tail == wc->head)
- break;
- /* The kernel doesn't need a RMB since it has the lock. */
- *entry = wc->kqueue[tail];
- if (tail >= cq->ibcq.cqe)
- tail = 0;
- else
- tail++;
- }
- wc->tail = tail;
-
- spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
- return npolled;
-}
+EXPORT_SYMBOL(rvt_cq_enter);
static void send_complete(struct kthread_work *work)
{
- struct hfi1_cq *cq = container_of(work, struct hfi1_cq, comptask);
+ struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
/*
* The completion handler will most likely rearm the notification
@@ -217,26 +167,25 @@ static void send_complete(struct kthread_work *work)
}
/**
- * hfi1_create_cq - create a completion queue
+ * rvt_create_cq - create a completion queue
* @ibdev: the device this completion queue is attached to
* @attr: creation attributes
- * @context: unused by the driver
+ * @context: unused by the QLogic_IB driver
* @udata: user data for libibverbs.so
*
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
* Called by ib_create_cq() in the generic verbs code.
+ *
+ * Return: pointer to the completion queue or negative errno values
+ * for failure.
*/
-struct ib_cq *hfi1_create_cq(
- struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
+struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
{
- struct hfi1_ibdev *dev = to_idev(ibdev);
- struct hfi1_cq *cq;
- struct hfi1_cq_wc *wc;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_cq *cq;
+ struct rvt_cq_wc *wc;
struct ib_cq *ret;
u32 sz;
unsigned int entries = attr->cqe;
@@ -244,11 +193,11 @@ struct ib_cq *hfi1_create_cq(
if (attr->flags)
return ERR_PTR(-EINVAL);
- if (entries < 1 || entries > hfi1_max_cqes)
+ if (entries < 1 || entries > rdi->dparms.props.max_cqe)
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kmalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -272,12 +221,12 @@ struct ib_cq *hfi1_create_cq(
/*
* Return the address of the WC as the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- cq->ip = hfi1_create_mmap_info(dev, sz, context, wc);
+ cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
@@ -289,23 +238,22 @@ struct ib_cq *hfi1_create_cq(
ret = ERR_PTR(err);
goto bail_ip;
}
- } else
- cq->ip = NULL;
+ }
- spin_lock(&dev->n_cqs_lock);
- if (dev->n_cqs_allocated == hfi1_max_cqs) {
- spin_unlock(&dev->n_cqs_lock);
+ spin_lock(&rdi->n_cqs_lock);
+ if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
+ spin_unlock(&rdi->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
}
- dev->n_cqs_allocated++;
- spin_unlock(&dev->n_cqs_lock);
+ rdi->n_cqs_allocated++;
+ spin_unlock(&rdi->n_cqs_lock);
if (cq->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
}
/*
@@ -313,14 +261,11 @@ struct ib_cq *hfi1_create_cq(
* The number of entries should be >= the number requested or return
* an error.
*/
- cq->dd = dd_from_dev(dev);
+ cq->rdi = rdi;
cq->ibcq.cqe = entries;
- cq->notify = IB_CQ_NONE;
- cq->triggered = 0;
+ cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
init_kthread_work(&cq->comptask, send_complete);
- wc->head = 0;
- wc->tail = 0;
cq->queue = wc;
ret = &cq->ibcq;
@@ -338,24 +283,24 @@ done:
}
/**
- * hfi1_destroy_cq - destroy a completion queue
+ * rvt_destroy_cq - destroy a completion queue
* @ibcq: the completion queue to destroy.
*
- * Returns 0 for success.
- *
* Called by ib_destroy_cq() in the generic verbs code.
+ *
+ * Return: always 0
*/
-int hfi1_destroy_cq(struct ib_cq *ibcq)
+int rvt_destroy_cq(struct ib_cq *ibcq)
{
- struct hfi1_ibdev *dev = to_idev(ibcq->device);
- struct hfi1_cq *cq = to_icq(ibcq);
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_dev_info *rdi = cq->rdi;
flush_kthread_work(&cq->comptask);
- spin_lock(&dev->n_cqs_lock);
- dev->n_cqs_allocated--;
- spin_unlock(&dev->n_cqs_lock);
+ spin_lock(&rdi->n_cqs_lock);
+ rdi->n_cqs_allocated--;
+ spin_unlock(&rdi->n_cqs_lock);
if (cq->ip)
- kref_put(&cq->ip->ref, hfi1_release_mmap_info);
+ kref_put(&cq->ip->ref, rvt_release_mmap_info);
else
vfree(cq->queue);
kfree(cq);
@@ -364,18 +309,18 @@ int hfi1_destroy_cq(struct ib_cq *ibcq)
}
/**
- * hfi1_req_notify_cq - change the notification type for a completion queue
+ * rvt_req_notify_cq - change the notification type for a completion queue
* @ibcq: the completion queue
* @notify_flags: the type of notification to request
*
- * Returns 0 for success.
- *
* This may be called from interrupt context. Also called by
* ib_req_notify_cq() in the generic verbs code.
+ *
+ * Return: 0 for success.
*/
-int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
{
- struct hfi1_cq *cq = to_icq(ibcq);
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
unsigned long flags;
int ret = 0;
@@ -397,24 +342,23 @@ int hfi1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
}
/**
- * hfi1_resize_cq - change the size of the CQ
+ * rvt_resize_cq - change the size of the CQ
* @ibcq: the completion queue
*
- * Returns 0 for success.
+ * Return: 0 for success.
*/
-int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
- struct hfi1_cq *cq = to_icq(ibcq);
- struct hfi1_cq_wc *old_wc;
- struct hfi1_cq_wc *wc;
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_cq_wc *old_wc;
+ struct rvt_cq_wc *wc;
u32 head, tail, n;
int ret;
u32 sz;
+ struct rvt_dev_info *rdi = cq->rdi;
- if (cqe < 1 || cqe > hfi1_max_cqes) {
- ret = -EINVAL;
- goto bail;
- }
+ if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
+ return -EINVAL;
/*
* Need to use vmalloc() if we want to support large #s of entries.
@@ -425,10 +369,8 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
else
sz += sizeof(struct ib_wc) * (cqe + 1);
wc = vmalloc_user(sz);
- if (!wc) {
- ret = -ENOMEM;
- goto bail;
- }
+ if (!wc)
+ return -ENOMEM;
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
@@ -446,11 +388,11 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
*/
old_wc = cq->queue;
head = old_wc->head;
- if (head > (u32) cq->ibcq.cqe)
- head = (u32) cq->ibcq.cqe;
+ if (head > (u32)cq->ibcq.cqe)
+ head = (u32)cq->ibcq.cqe;
tail = old_wc->tail;
- if (tail > (u32) cq->ibcq.cqe)
- tail = (u32) cq->ibcq.cqe;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
if (head < tail)
n = cq->ibcq.cqe + 1 + head - tail;
else
@@ -464,7 +406,7 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
wc->uqueue[n] = old_wc->uqueue[tail];
else
wc->kqueue[n] = old_wc->kqueue[tail];
- if (tail == (u32) cq->ibcq.cqe)
+ if (tail == (u32)cq->ibcq.cqe)
tail = 0;
else
tail++;
@@ -478,80 +420,131 @@ int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
vfree(old_wc);
if (cq->ip) {
- struct hfi1_ibdev *dev = to_idev(ibcq->device);
- struct hfi1_mmap_info *ip = cq->ip;
+ struct rvt_mmap_info *ip = cq->ip;
- hfi1_update_mmap_info(dev, ip, sz, wc);
+ rvt_update_mmap_info(rdi, ip, sz, wc);
/*
* Return the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
- goto bail;
+ return ret;
}
- spin_lock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
if (list_empty(&ip->pending_mmaps))
- list_add(&ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
}
- ret = 0;
- goto bail;
+ return 0;
bail_unlock:
spin_unlock_irq(&cq->lock);
bail_free:
vfree(wc);
-bail:
return ret;
}
-int hfi1_cq_init(struct hfi1_devdata *dd)
+/**
+ * rvt_poll_cq - poll for work completion entries
+ * @ibcq: the completion queue to poll
+ * @num_entries: the maximum number of entries to return
+ * @entry: pointer to array where work completions are placed
+ *
+ * This may be called from interrupt context. Also called by ib_poll_cq()
+ * in the generic verbs code.
+ *
+ * Return: the number of completion entries polled.
+ */
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
+ struct rvt_cq_wc *wc;
+ unsigned long flags;
+ int npolled;
+ u32 tail;
+
+ /* The kernel can only poll a kernel completion queue */
+ if (cq->ip)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cq->lock, flags);
+
+ wc = cq->queue;
+ tail = wc->tail;
+ if (tail > (u32)cq->ibcq.cqe)
+ tail = (u32)cq->ibcq.cqe;
+ for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
+ if (tail == wc->head)
+ break;
+ /* The kernel doesn't need a RMB since it has the lock. */
+ *entry = wc->kqueue[tail];
+ if (tail >= cq->ibcq.cqe)
+ tail = 0;
+ else
+ tail++;
+ }
+ wc->tail = tail;
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return npolled;
+}
+
+/**
+ * rvt_driver_cq_init - Init cq resources on behalf of driver
+ * @rdi: rvt dev structure
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_cq_init(struct rvt_dev_info *rdi)
{
int ret = 0;
int cpu;
struct task_struct *task;
- if (dd->worker)
+ if (rdi->worker)
return 0;
- dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
- if (!dd->worker)
+ rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
+ if (!rdi->worker)
return -ENOMEM;
- init_kthread_worker(dd->worker);
+ init_kthread_worker(rdi->worker);
task = kthread_create_on_node(
kthread_worker_fn,
- dd->worker,
- dd->assigned_node_id,
- "hfi1_cq%d", dd->unit);
- if (IS_ERR(task))
- goto task_fail;
- cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
+ rdi->worker,
+ rdi->dparms.node,
+ "%s", rdi->dparms.cq_name);
+ if (IS_ERR(task)) {
+ kfree(rdi->worker);
+ rdi->worker = NULL;
+ return PTR_ERR(task);
+ }
+
+ cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
kthread_bind(task, cpu);
wake_up_process(task);
-out:
return ret;
-task_fail:
- ret = PTR_ERR(task);
- kfree(dd->worker);
- dd->worker = NULL;
- goto out;
}
-void hfi1_cq_exit(struct hfi1_devdata *dd)
+/**
+ * rvt_cq_exit - tear down cq reources
+ * @rdi: rvt dev structure
+ */
+void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
- worker = dd->worker;
+ worker = rdi->worker;
if (!worker)
return;
/* blocks future queuing from send_complete() */
- dd->worker = NULL;
- smp_wmb(); /* See hfi1_cq_enter */
+ rdi->worker = NULL;
+ smp_wmb(); /* See rdi_cq_enter */
flush_kthread_worker(worker);
kthread_stop(worker->task);
kfree(worker);
diff --git a/drivers/infiniband/sw/rdmavt/cq.h b/drivers/infiniband/sw/rdmavt/cq.h
new file mode 100644
index 000000000000..6182c29eff66
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/cq.h
@@ -0,0 +1,64 @@
+#ifndef DEF_RVTCQ_H
+#define DEF_RVTCQ_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_cq.h>
+
+struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int rvt_destroy_cq(struct ib_cq *ibcq);
+int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
+int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
+int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
+int rvt_driver_cq_init(struct rvt_dev_info *rdi);
+void rvt_cq_exit(struct rvt_dev_info *rdi);
+#endif /* DEF_RVTCQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
new file mode 100644
index 000000000000..33076a5eee2f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <rdma/ib_verbs.h>
+
+#include "dma.h"
+
+#define BAD_DMA_ADDRESS ((u64)0)
+
+/*
+ * The following functions implement driver specific replacements
+ * for the ib_dma_*() functions.
+ *
+ * These functions return kernel virtual addresses instead of
+ * device bus addresses since the driver uses the CPU to copy
+ * data instead of using hardware DMA.
+ */
+
+static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
+{
+ return dma_addr == BAD_DMA_ADDRESS;
+}
+
+static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ return (u64)cpu_addr;
+}
+
+static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ u64 addr;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return BAD_DMA_ADDRESS;
+
+ if (offset + size > PAGE_SIZE)
+ return BAD_DMA_ADDRESS;
+
+ addr = (u64)page_address(page);
+ if (addr)
+ addr += offset;
+
+ return addr;
+}
+
+static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ u64 addr;
+ int i;
+ int ret = nents;
+
+ if (WARN_ON(!valid_dma_direction(direction)))
+ return 0;
+
+ for_each_sg(sgl, sg, nents, i) {
+ addr = (u64)page_address(sg_page(sg));
+ if (!addr) {
+ ret = 0;
+ break;
+ }
+ sg->dma_address = addr + sg->offset;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
+ }
+ return ret;
+}
+
+static void rvt_unmap_sg(struct ib_device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ /* This is a stub, nothing to be done here */
+}
+
+static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
+ u64 *dma_handle, gfp_t flag)
+{
+ struct page *p;
+ void *addr = NULL;
+
+ p = alloc_pages(flag, get_order(size));
+ if (p)
+ addr = page_address(p);
+ if (dma_handle)
+ *dma_handle = (u64)addr;
+ return addr;
+}
+
+static void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
+ void *cpu_addr, u64 dma_handle)
+{
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
+ .mapping_error = rvt_mapping_error,
+ .map_single = rvt_dma_map_single,
+ .unmap_single = rvt_dma_unmap_single,
+ .map_page = rvt_dma_map_page,
+ .unmap_page = rvt_dma_unmap_page,
+ .map_sg = rvt_map_sg,
+ .unmap_sg = rvt_unmap_sg,
+ .sync_single_for_cpu = rvt_sync_single_for_cpu,
+ .sync_single_for_device = rvt_sync_single_for_device,
+ .alloc_coherent = rvt_dma_alloc_coherent,
+ .free_coherent = rvt_dma_free_coherent
+};
diff --git a/drivers/infiniband/sw/rdmavt/dma.h b/drivers/infiniband/sw/rdmavt/dma.h
new file mode 100644
index 000000000000..979f07e09195
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/dma.h
@@ -0,0 +1,53 @@
+#ifndef DEF_RDMAVTDMA_H
+#define DEF_RDMAVTDMA_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops;
+
+#endif /* DEF_RDMAVTDMA_H */
diff --git a/drivers/infiniband/sw/rdmavt/mad.c b/drivers/infiniband/sw/rdmavt/mad.c
new file mode 100644
index 000000000000..f6e99778d7ca
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/ib_mad.h>
+#include "mad.h"
+#include "vt.h"
+
+/**
+ * rvt_process_mad - process an incoming MAD packet
+ * @ibdev: the infiniband device this packet came in on
+ * @mad_flags: MAD flags
+ * @port_num: the port number this packet came in on, 1 based from ib core
+ * @in_wc: the work completion entry for this packet
+ * @in_grh: the global route header for this packet
+ * @in_mad: the incoming MAD
+ * @out_mad: any outgoing MAD reply
+ *
+ * Note that the verbs framework has already done the MAD sanity checks,
+ * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
+ * MADs.
+ *
+ * This is called by the ib_mad module.
+ *
+ * Return: IB_MAD_RESULT_SUCCESS or error
+ */
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ /*
+ * MAD processing is quite different between hfi1 and qib. Therfore this
+ * is expected to be provided by the driver. Other drivers in the future
+ * may chose to implement this but it should not be made into a
+ * requirement.
+ */
+ if (ibport_num_to_idx(ibdev, port_num) < 0)
+ return -EINVAL;
+
+ return IB_MAD_RESULT_FAILURE;
+}
+
+static void rvt_send_mad_handler(struct ib_mad_agent *agent,
+ struct ib_mad_send_wc *mad_send_wc)
+{
+ ib_free_send_mad(mad_send_wc->send_buf);
+}
+
+/**
+ * rvt_create_mad_agents - create mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs to be notified of mad agent creation then call back
+ *
+ * Return 0 on success
+ */
+int rvt_create_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+ int ret;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ agent = ib_register_mad_agent(&rdi->ibdev, p + 1,
+ IB_QPT_SMI,
+ NULL, 0, rvt_send_mad_handler,
+ NULL, NULL, 0);
+ if (IS_ERR(agent)) {
+ ret = PTR_ERR(agent);
+ goto err;
+ }
+
+ rvp->send_agent = agent;
+
+ if (rdi->driver_f.notify_create_mad_agent)
+ rdi->driver_f.notify_create_mad_agent(rdi, p);
+ }
+
+ return 0;
+
+err:
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * rvt_free_mad_agents - free up mad agents
+ * @rdi: rvt dev struct
+ *
+ * If driver needs notification of mad agent removal make the call back
+ */
+void rvt_free_mad_agents(struct rvt_dev_info *rdi)
+{
+ struct ib_mad_agent *agent;
+ struct rvt_ibport *rvp;
+ int p;
+
+ for (p = 0; p < rdi->dparms.nports; p++) {
+ rvp = rdi->ports[p];
+ if (rvp->send_agent) {
+ agent = rvp->send_agent;
+ rvp->send_agent = NULL;
+ ib_unregister_mad_agent(agent);
+ }
+ if (rvp->sm_ah) {
+ ib_destroy_ah(&rvp->sm_ah->ibah);
+ rvp->sm_ah = NULL;
+ }
+
+ if (rdi->driver_f.notify_free_mad_agent)
+ rdi->driver_f.notify_free_mad_agent(rdi, p);
+ }
+}
+
diff --git a/drivers/infiniband/sw/rdmavt/mad.h b/drivers/infiniband/sw/rdmavt/mad.h
new file mode 100644
index 000000000000..a9d6eecc3723
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mad.h
@@ -0,0 +1,60 @@
+#ifndef DEF_RVTMAD_H
+#define DEF_RVTMAD_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+int rvt_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+int rvt_create_mad_agents(struct rvt_dev_info *rdi);
+void rvt_free_mad_agents(struct rvt_dev_info *rdi);
+#endif /* DEF_RVTMAD_H */
diff --git a/drivers/staging/rdma/hfi1/verbs_mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index afc6b4c61a1d..983d319ac976 100644
--- a/drivers/staging/rdma/hfi1/verbs_mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,17 +45,36 @@
*
*/
+#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/rculist.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
-#include "hfi.h"
+#include "mcast.h"
+
+/**
+ * rvt_driver_mcast - init resources for multicast
+ * @rdi: rvt dev struct
+ *
+ * This is per device that registers with rdmavt
+ */
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
+{
+ /*
+ * Anything that needs setup for multicast on a per driver or per rdi
+ * basis should be done in here.
+ */
+ spin_lock_init(&rdi->n_mcast_grps_lock);
+}
/**
* mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
-static struct hfi1_mcast_qp *mcast_qp_alloc(struct hfi1_qp *qp)
+static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
{
- struct hfi1_mcast_qp *mqp;
+ struct rvt_mcast_qp *mqp;
mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
if (!mqp)
@@ -71,9 +87,9 @@ bail:
return mqp;
}
-static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
+static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
{
- struct hfi1_qp *qp = mqp->qp;
+ struct rvt_qp *qp = mqp->qp;
/* Notify hfi1_destroy_qp() if it is waiting. */
if (atomic_dec_and_test(&qp->refcount))
@@ -88,11 +104,11 @@ static void mcast_qp_free(struct hfi1_mcast_qp *mqp)
*
* A list of QPs will be attached to this structure.
*/
-static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid)
+static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
{
- struct hfi1_mcast *mcast;
+ struct rvt_mcast *mcast;
- mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
+ mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
if (!mcast)
goto bail;
@@ -100,75 +116,72 @@ static struct hfi1_mcast *mcast_alloc(union ib_gid *mgid)
INIT_LIST_HEAD(&mcast->qp_list);
init_waitqueue_head(&mcast->wait);
atomic_set(&mcast->refcount, 0);
- mcast->n_attached = 0;
bail:
return mcast;
}
-static void mcast_free(struct hfi1_mcast *mcast)
+static void rvt_mcast_free(struct rvt_mcast *mcast)
{
- struct hfi1_mcast_qp *p, *tmp;
+ struct rvt_mcast_qp *p, *tmp;
list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
- mcast_qp_free(p);
+ rvt_mcast_qp_free(p);
kfree(mcast);
}
/**
- * hfi1_mcast_find - search the global table for the given multicast GID
+ * rvt_mcast_find - search the global table for the given multicast GID
* @ibp: the IB port structure
* @mgid: the multicast GID to search for
*
- * Returns NULL if not found.
- *
* The caller is responsible for decrementing the reference count if found.
+ *
+ * Return: NULL if not found.
*/
-struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid)
+struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
{
struct rb_node *n;
unsigned long flags;
- struct hfi1_mcast *mcast;
+ struct rvt_mcast *found = NULL;
spin_lock_irqsave(&ibp->lock, flags);
n = ibp->mcast_tree.rb_node;
while (n) {
int ret;
+ struct rvt_mcast *mcast;
- mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
ret = memcmp(mgid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
- if (ret < 0)
+ if (ret < 0) {
n = n->rb_left;
- else if (ret > 0)
+ } else if (ret > 0) {
n = n->rb_right;
- else {
+ } else {
atomic_inc(&mcast->refcount);
- spin_unlock_irqrestore(&ibp->lock, flags);
- goto bail;
+ found = mcast;
+ break;
}
}
spin_unlock_irqrestore(&ibp->lock, flags);
-
- mcast = NULL;
-
-bail:
- return mcast;
+ return found;
}
+EXPORT_SYMBOL(rvt_mcast_find);
/**
* mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
- * Return zero if both were added. Return EEXIST if the GID was already in
+ * Return: zero if both were added. Return EEXIST if the GID was already in
* the table but the QP was added. Return ESRCH if the QP was already
* attached and neither structure was added.
*/
-static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
- struct hfi1_mcast *mcast, struct hfi1_mcast_qp *mqp)
+static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
+ struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
{
struct rb_node **n = &ibp->mcast_tree.rb_node;
struct rb_node *pn = NULL;
@@ -177,11 +190,11 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
spin_lock_irq(&ibp->lock);
while (*n) {
- struct hfi1_mcast *tmcast;
- struct hfi1_mcast_qp *p;
+ struct rvt_mcast *tmcast;
+ struct rvt_mcast_qp *p;
pn = *n;
- tmcast = rb_entry(pn, struct hfi1_mcast, rb_node);
+ tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
sizeof(union ib_gid));
@@ -201,7 +214,8 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
goto bail;
}
}
- if (tmcast->n_attached == hfi1_max_mcast_qp_attached) {
+ if (tmcast->n_attached ==
+ rdi->dparms.props.max_mcast_qp_attach) {
ret = ENOMEM;
goto bail;
}
@@ -213,15 +227,15 @@ static int mcast_add(struct hfi1_ibdev *dev, struct hfi1_ibport *ibp,
goto bail;
}
- spin_lock(&dev->n_mcast_grps_lock);
- if (dev->n_mcast_grps_allocated == hfi1_max_mcast_grps) {
- spin_unlock(&dev->n_mcast_grps_lock);
+ spin_lock(&rdi->n_mcast_grps_lock);
+ if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
+ spin_unlock(&rdi->n_mcast_grps_lock);
ret = ENOMEM;
goto bail;
}
- dev->n_mcast_grps_allocated++;
- spin_unlock(&dev->n_mcast_grps_lock);
+ rdi->n_mcast_grps_allocated++;
+ spin_unlock(&rdi->n_mcast_grps_lock);
mcast->n_attached++;
@@ -239,92 +253,98 @@ bail:
return ret;
}
-int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+/**
+ * rvt_attach_mcast - attach a qp to a multicast group
+ * @ibqp: Infiniband qp
+ * @igd: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_ibport *ibp;
- struct hfi1_mcast *mcast;
- struct hfi1_mcast_qp *mqp;
- int ret;
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *mqp;
+ int ret = -ENOMEM;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
/*
* Allocate data structures since its better to do this outside of
* spin locks and it will most likely be needed.
*/
- mcast = mcast_alloc(gid);
- if (mcast == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
- mqp = mcast_qp_alloc(qp);
- if (mqp == NULL) {
- mcast_free(mcast);
- ret = -ENOMEM;
- goto bail;
- }
- ibp = to_iport(ibqp->device, qp->port_num);
- switch (mcast_add(dev, ibp, mcast, mqp)) {
- case ESRCH:
- /* Neither was used: OK to attach the same QP twice. */
- mcast_qp_free(mqp);
- mcast_free(mcast);
- break;
+ mcast = rvt_mcast_alloc(gid);
+ if (!mcast)
+ return -ENOMEM;
- case EEXIST: /* The mcast wasn't used */
- mcast_free(mcast);
- break;
+ mqp = rvt_mcast_qp_alloc(qp);
+ if (!mqp)
+ goto bail_mcast;
+ switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
+ case ESRCH:
+ /* Neither was used: OK to attach the same QP twice. */
+ ret = 0;
+ goto bail_mqp;
+ case EEXIST: /* The mcast wasn't used */
+ ret = 0;
+ goto bail_mcast;
case ENOMEM:
/* Exceeded the maximum number of mcast groups. */
- mcast_qp_free(mqp);
- mcast_free(mcast);
ret = -ENOMEM;
- goto bail;
-
+ goto bail_mqp;
default:
break;
}
- ret = 0;
+ return 0;
+
+bail_mqp:
+ rvt_mcast_qp_free(mqp);
+
+bail_mcast:
+ rvt_mcast_free(mcast);
-bail:
return ret;
}
-int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+/**
+ * rvt_detach_mcast - remove a qp from a multicast group
+ * @ibqp: Infiniband qp
+ * @igd: multicast guid
+ * @lid: multicast lid
+ *
+ * Return: 0 on success
+ */
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_ibport *ibp = to_iport(ibqp->device, qp->port_num);
- struct hfi1_mcast *mcast = NULL;
- struct hfi1_mcast_qp *p, *tmp;
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
+ struct rvt_mcast *mcast = NULL;
+ struct rvt_mcast_qp *p, *tmp, *delp = NULL;
struct rb_node *n;
int last = 0;
- int ret;
+ int ret = 0;
- if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
- ret = -EINVAL;
- goto bail;
- }
+ if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
+ return -EINVAL;
spin_lock_irq(&ibp->lock);
/* Find the GID in the mcast table. */
n = ibp->mcast_tree.rb_node;
while (1) {
- if (n == NULL) {
+ if (!n) {
spin_unlock_irq(&ibp->lock);
- ret = -EINVAL;
- goto bail;
+ return -EINVAL;
}
- mcast = rb_entry(n, struct hfi1_mcast, rb_node);
+ mcast = rb_entry(n, struct rvt_mcast, rb_node);
ret = memcmp(gid->raw, mcast->mgid.raw,
sizeof(union ib_gid));
if (ret < 0)
@@ -345,6 +365,7 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
*/
list_del_rcu(&p->list);
mcast->n_attached--;
+ delp = p;
/* If this was the last attached QP, remove the GID too. */
if (list_empty(&mcast->qp_list)) {
@@ -355,31 +376,42 @@ int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
spin_unlock_irq(&ibp->lock);
+ /* QP not attached */
+ if (!delp)
+ return -EINVAL;
+
+ /*
+ * Wait for any list walkers to finish before freeing the
+ * list element.
+ */
+ wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+ rvt_mcast_qp_free(delp);
- if (p) {
- /*
- * Wait for any list walkers to finish before freeing the
- * list element.
- */
- wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
- mcast_qp_free(p);
- }
if (last) {
atomic_dec(&mcast->refcount);
wait_event(mcast->wait, !atomic_read(&mcast->refcount));
- mcast_free(mcast);
- spin_lock_irq(&dev->n_mcast_grps_lock);
- dev->n_mcast_grps_allocated--;
- spin_unlock_irq(&dev->n_mcast_grps_lock);
+ rvt_mcast_free(mcast);
+ spin_lock_irq(&rdi->n_mcast_grps_lock);
+ rdi->n_mcast_grps_allocated--;
+ spin_unlock_irq(&rdi->n_mcast_grps_lock);
}
- ret = 0;
-
-bail:
- return ret;
+ return 0;
}
-int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp)
+/**
+ *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
+ *@rdi: rvt dev struct
+ *
+ * Return: in use count
+ */
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
{
- return ibp->mcast_tree.rb_node == NULL;
+ int i;
+ int in_use = 0;
+
+ for (i = 0; i < rdi->dparms.nports; i++)
+ if (rdi->ports[i]->mcast_tree.rb_node)
+ in_use++;
+ return in_use;
}
diff --git a/drivers/infiniband/sw/rdmavt/mcast.h b/drivers/infiniband/sw/rdmavt/mcast.h
new file mode 100644
index 000000000000..29f579267608
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mcast.h
@@ -0,0 +1,58 @@
+#ifndef DEF_RVTMCAST_H
+#define DEF_RVTMCAST_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+void rvt_driver_mcast_init(struct rvt_dev_info *rdi);
+int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+int rvt_mcast_tree_empty(struct rvt_dev_info *rdi);
+
+#endif /* DEF_RVTMCAST_H */
diff --git a/drivers/staging/rdma/hfi1/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c
index 5173b1c60b3d..e202b8142759 100644
--- a/drivers/staging/rdma/hfi1/mmap.c
+++ b/drivers/infiniband/sw/rdmavt/mmap.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -48,68 +45,74 @@
*
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
-#include <linux/errno.h>
#include <asm/pgtable.h>
+#include "mmap.h"
-#include "verbs.h"
+/**
+ * rvt_mmap_init - init link list and lock for mem map
+ * @rdi: rvt dev struct
+ */
+void rvt_mmap_init(struct rvt_dev_info *rdi)
+{
+ INIT_LIST_HEAD(&rdi->pending_mmaps);
+ spin_lock_init(&rdi->pending_lock);
+ rdi->mmap_offset = PAGE_SIZE;
+ spin_lock_init(&rdi->mmap_offset_lock);
+}
/**
- * hfi1_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct hfi1_mmap_info
+ * rvt_release_mmap_info - free mmap info structure
+ * @ref: a pointer to the kref within struct rvt_mmap_info
*/
-void hfi1_release_mmap_info(struct kref *ref)
+void rvt_release_mmap_info(struct kref *ref)
{
- struct hfi1_mmap_info *ip =
- container_of(ref, struct hfi1_mmap_info, ref);
- struct hfi1_ibdev *dev = to_idev(ip->context->device);
+ struct rvt_mmap_info *ip =
+ container_of(ref, struct rvt_mmap_info, ref);
+ struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
- spin_lock_irq(&dev->pending_lock);
+ spin_lock_irq(&rdi->pending_lock);
list_del(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
vfree(ip->obj);
kfree(ip);
}
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void hfi1_vma_open(struct vm_area_struct *vma)
+static void rvt_vma_open(struct vm_area_struct *vma)
{
- struct hfi1_mmap_info *ip = vma->vm_private_data;
+ struct rvt_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
}
-static void hfi1_vma_close(struct vm_area_struct *vma)
+static void rvt_vma_close(struct vm_area_struct *vma)
{
- struct hfi1_mmap_info *ip = vma->vm_private_data;
+ struct rvt_mmap_info *ip = vma->vm_private_data;
- kref_put(&ip->ref, hfi1_release_mmap_info);
+ kref_put(&ip->ref, rvt_release_mmap_info);
}
-static struct vm_operations_struct hfi1_vm_ops = {
- .open = hfi1_vma_open,
- .close = hfi1_vma_close,
+static const struct vm_operations_struct rvt_vm_ops = {
+ .open = rvt_vma_open,
+ .close = rvt_vma_close,
};
/**
- * hfi1_mmap - create a new mmap region
+ * rvt_mmap - create a new mmap region
* @context: the IB user context of the process making the mmap() call
* @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
+ *
+ * Return: zero if the mmap is OK. Otherwise, return an errno.
*/
-int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
- struct hfi1_ibdev *dev = to_idev(context->device);
+ struct rvt_dev_info *rdi = ib_to_rvt(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
- struct hfi1_mmap_info *ip, *pp;
+ struct rvt_mmap_info *ip, *pp;
int ret = -EINVAL;
/*
@@ -117,53 +120,60 @@ int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
* Normally, this list is very short since a call to create a
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
- spin_lock_irq(&dev->pending_lock);
- list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
+ spin_lock_irq(&rdi->pending_lock);
+ list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
pending_mmaps) {
/* Only the creator is allowed to mmap the object */
- if (context != ip->context || (__u64) offset != ip->offset)
+ if (context != ip->context || (__u64)offset != ip->offset)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->size)
break;
list_del_init(&ip->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret)
goto done;
- vma->vm_ops = &hfi1_vm_ops;
+ vma->vm_ops = &rvt_vm_ops;
vma->vm_private_data = ip;
- hfi1_vma_open(vma);
+ rvt_vma_open(vma);
goto done;
}
- spin_unlock_irq(&dev->pending_lock);
+ spin_unlock_irq(&rdi->pending_lock);
done:
return ret;
}
-/*
- * Allocate information for hfi1_mmap
+/**
+ * rvt_create_mmap_info - allocate information for hfi1_mmap
+ * @rdi: rvt dev struct
+ * @size: size in bytes to map
+ * @context: user context
+ * @obj: opaque pointer to a cq, wq etc
+ *
+ * Return: rvt_mmap struct on success
*/
-struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
- u32 size,
- struct ib_ucontext *context,
- void *obj) {
- struct hfi1_mmap_info *ip;
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj)
+{
+ struct rvt_mmap_info *ip;
- ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
if (!ip)
- goto bail;
+ return ip;
size = PAGE_ALIGN(size);
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = PAGE_SIZE;
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += size;
+ spin_unlock_irq(&rdi->mmap_offset_lock);
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->size = size;
@@ -171,21 +181,27 @@ struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev,
ip->obj = obj;
kref_init(&ip->ref);
-bail:
return ip;
}
-void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
- u32 size, void *obj)
+/**
+ * rvt_update_mmap_info - update a mem map
+ * @rdi: rvt dev struct
+ * @ip: mmap info pointer
+ * @size: size to grow by
+ * @obj: opaque pointer to cq, wq, etc.
+ */
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj)
{
size = PAGE_ALIGN(size);
- spin_lock_irq(&dev->mmap_offset_lock);
- if (dev->mmap_offset == 0)
- dev->mmap_offset = PAGE_SIZE;
- ip->offset = dev->mmap_offset;
- dev->mmap_offset += size;
- spin_unlock_irq(&dev->mmap_offset_lock);
+ spin_lock_irq(&rdi->mmap_offset_lock);
+ if (rdi->mmap_offset == 0)
+ rdi->mmap_offset = PAGE_SIZE;
+ ip->offset = rdi->mmap_offset;
+ rdi->mmap_offset += size;
+ spin_unlock_irq(&rdi->mmap_offset_lock);
ip->size = size;
ip->obj = obj;
diff --git a/drivers/infiniband/sw/rdmavt/mmap.h b/drivers/infiniband/sw/rdmavt/mmap.h
new file mode 100644
index 000000000000..fab0e7b1daf9
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mmap.h
@@ -0,0 +1,63 @@
+#ifndef DEF_RDMAVTMMAP_H
+#define DEF_RDMAVTMMAP_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+void rvt_mmap_init(struct rvt_dev_info *rdi);
+void rvt_release_mmap_info(struct kref *ref);
+int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
+ u32 size,
+ struct ib_ucontext *context,
+ void *obj);
+void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
+ u32 size, void *obj);
+
+#endif /* DEF_RDMAVTMMAP_H */
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
new file mode 100644
index 000000000000..0ff765bfd619
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <rdma/ib_umem.h>
+#include <rdma/rdma_vt.h>
+#include "vt.h"
+#include "mr.h"
+
+/**
+ * rvt_driver_mr_init - Init MR resources per driver
+ * @rdi: rvt dev struct
+ *
+ * Do any intilization needed when a driver registers with rdmavt.
+ *
+ * Return: 0 on success or errno on failure
+ */
+int rvt_driver_mr_init(struct rvt_dev_info *rdi)
+{
+ unsigned int lkey_table_size = rdi->dparms.lkey_table_size;
+ unsigned lk_tab_size;
+ int i;
+
+ /*
+ * The top hfi1_lkey_table_size bits are used to index the
+ * table. The lower 8 bits can be owned by the user (copied from
+ * the LKEY). The remaining bits act as a generation number or tag.
+ */
+ if (!lkey_table_size)
+ return -EINVAL;
+
+ spin_lock_init(&rdi->lkey_table.lock);
+
+ /* ensure generation is at least 4 bits */
+ if (lkey_table_size > RVT_MAX_LKEY_TABLE_BITS) {
+ rvt_pr_warn(rdi, "lkey bits %u too large, reduced to %u\n",
+ lkey_table_size, RVT_MAX_LKEY_TABLE_BITS);
+ rdi->dparms.lkey_table_size = RVT_MAX_LKEY_TABLE_BITS;
+ lkey_table_size = rdi->dparms.lkey_table_size;
+ }
+ rdi->lkey_table.max = 1 << lkey_table_size;
+ lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
+ rdi->lkey_table.table = (struct rvt_mregion __rcu **)
+ vmalloc_node(lk_tab_size, rdi->dparms.node);
+ if (!rdi->lkey_table.table)
+ return -ENOMEM;
+
+ RCU_INIT_POINTER(rdi->dma_mr, NULL);
+ for (i = 0; i < rdi->lkey_table.max; i++)
+ RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+
+ return 0;
+}
+
+/**
+ *rvt_mr_exit: clean up MR
+ *@rdi: rvt dev structure
+ *
+ * called when drivers have unregistered or perhaps failed to register with us
+ */
+void rvt_mr_exit(struct rvt_dev_info *rdi)
+{
+ if (rdi->dma_mr)
+ rvt_pr_err(rdi, "DMA MR not null!\n");
+
+ vfree(rdi->lkey_table.table);
+}
+
+static void rvt_deinit_mregion(struct rvt_mregion *mr)
+{
+ int i = mr->mapsz;
+
+ mr->mapsz = 0;
+ while (i)
+ kfree(mr->map[--i]);
+}
+
+static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
+ int count)
+{
+ int m, i = 0;
+
+ mr->mapsz = 0;
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ for (; i < m; i++) {
+ mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+ if (!mr->map[i]) {
+ rvt_deinit_mregion(mr);
+ return -ENOMEM;
+ }
+ mr->mapsz++;
+ }
+ init_completion(&mr->comp);
+ /* count returning the ptr to user */
+ atomic_set(&mr->refcount, 1);
+ mr->pd = pd;
+ mr->max_segs = count;
+ return 0;
+}
+
+/**
+ * rvt_alloc_lkey - allocate an lkey
+ * @mr: memory region that this lkey protects
+ * @dma_region: 0->normal key, 1->restricted DMA key
+ *
+ * Returns 0 if successful, otherwise returns -errno.
+ *
+ * Increments mr reference count as required.
+ *
+ * Sets the lkey field mr for non-dma regions.
+ *
+ */
+static int rvt_alloc_lkey(struct rvt_mregion *mr, int dma_region)
+{
+ unsigned long flags;
+ u32 r;
+ u32 n;
+ int ret = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+
+ rvt_get_mr(mr);
+ spin_lock_irqsave(&rkt->lock, flags);
+
+ /* special case for dma_mr lkey == 0 */
+ if (dma_region) {
+ struct rvt_mregion *tmr;
+
+ tmr = rcu_access_pointer(dev->dma_mr);
+ if (!tmr) {
+ rcu_assign_pointer(dev->dma_mr, mr);
+ mr->lkey_published = 1;
+ } else {
+ rvt_put_mr(mr);
+ }
+ goto success;
+ }
+
+ /* Find the next available LKEY */
+ r = rkt->next;
+ n = r;
+ for (;;) {
+ if (!rcu_access_pointer(rkt->table[r]))
+ break;
+ r = (r + 1) & (rkt->max - 1);
+ if (r == n)
+ goto bail;
+ }
+ rkt->next = (r + 1) & (rkt->max - 1);
+ /*
+ * Make sure lkey is never zero which is reserved to indicate an
+ * unrestricted LKEY.
+ */
+ rkt->gen++;
+ /*
+ * bits are capped to ensure enough bits for generation number
+ */
+ mr->lkey = (r << (32 - dev->dparms.lkey_table_size)) |
+ ((((1 << (24 - dev->dparms.lkey_table_size)) - 1) & rkt->gen)
+ << 8);
+ if (mr->lkey == 0) {
+ mr->lkey |= 1 << 8;
+ rkt->gen++;
+ }
+ rcu_assign_pointer(rkt->table[r], mr);
+ mr->lkey_published = 1;
+success:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+out:
+ return ret;
+bail:
+ rvt_put_mr(mr);
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = -ENOMEM;
+ goto out;
+}
+
+/**
+ * rvt_free_lkey - free an lkey
+ * @mr: mr to free from tables
+ */
+static void rvt_free_lkey(struct rvt_mregion *mr)
+{
+ unsigned long flags;
+ u32 lkey = mr->lkey;
+ u32 r;
+ struct rvt_dev_info *dev = ib_to_rvt(mr->pd->device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ int freed = 0;
+
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (!mr->lkey_published)
+ goto out;
+ if (lkey == 0) {
+ RCU_INIT_POINTER(dev->dma_mr, NULL);
+ } else {
+ r = lkey >> (32 - dev->dparms.lkey_table_size);
+ RCU_INIT_POINTER(rkt->table[r], NULL);
+ }
+ mr->lkey_published = 0;
+ freed++;
+out:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ if (freed) {
+ synchronize_rcu();
+ rvt_put_mr(mr);
+ }
+}
+
+static struct rvt_mr *__rvt_alloc_mr(int count, struct ib_pd *pd)
+{
+ struct rvt_mr *mr;
+ int rval = -ENOMEM;
+ int m;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
+ if (!mr)
+ goto bail;
+
+ rval = rvt_init_mregion(&mr->mr, pd, count);
+ if (rval)
+ goto bail;
+ /*
+ * ib_reg_phys_mr() will initialize mr->ibmr except for
+ * lkey and rkey.
+ */
+ rval = rvt_alloc_lkey(&mr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ mr->ibmr.lkey = mr->mr.lkey;
+ mr->ibmr.rkey = mr->mr.lkey;
+done:
+ return mr;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ mr = ERR_PTR(rval);
+ goto done;
+}
+
+static void __rvt_free_mr(struct rvt_mr *mr)
+{
+ rvt_deinit_mregion(&mr->mr);
+ rvt_free_lkey(&mr->mr);
+ vfree(mr);
+}
+
+/**
+ * rvt_get_dma_mr - get a DMA memory region
+ * @pd: protection domain for this memory region
+ * @acc: access flags
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ * Note that all DMA addresses should be created via the
+ * struct ib_dma_mapping_ops functions (see dma.c).
+ */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct rvt_mr *mr;
+ struct ib_mr *ret;
+ int rval;
+
+ if (ibpd_to_rvtpd(pd)->user)
+ return ERR_PTR(-EPERM);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ rval = rvt_init_mregion(&mr->mr, pd, 0);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail;
+ }
+
+ rval = rvt_alloc_lkey(&mr->mr, 1);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail_mregion;
+ }
+
+ mr->mr.access_flags = acc;
+ ret = &mr->ibmr;
+done:
+ return ret;
+
+bail_mregion:
+ rvt_deinit_mregion(&mr->mr);
+bail:
+ kfree(mr);
+ goto done;
+}
+
+/**
+ * rvt_reg_user_mr - register a userspace memory region
+ * @pd: protection domain for this memory region
+ * @start: starting userspace address
+ * @length: length of region to register
+ * @mr_access_flags: access flags for this memory region
+ * @udata: unused by the driver
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct rvt_mr *mr;
+ struct ib_umem *umem;
+ struct scatterlist *sg;
+ int n, m, entry;
+ struct ib_mr *ret;
+
+ if (length == 0)
+ return ERR_PTR(-EINVAL);
+
+ umem = ib_umem_get(pd->uobject->context, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem))
+ return (void *)umem;
+
+ n = umem->nmap;
+
+ mr = __rvt_alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
+ goto bail_umem;
+ }
+
+ mr->mr.user_base = start;
+ mr->mr.iova = virt_addr;
+ mr->mr.length = length;
+ mr->mr.offset = ib_umem_offset(umem);
+ mr->mr.access_flags = mr_access_flags;
+ mr->umem = umem;
+
+ if (is_power_of_2(umem->page_size))
+ mr->mr.page_shift = ilog2(umem->page_size);
+ m = 0;
+ n = 0;
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ void *vaddr;
+
+ vaddr = page_address(sg_page(sg));
+ if (!vaddr) {
+ ret = ERR_PTR(-EINVAL);
+ goto bail_inval;
+ }
+ mr->mr.map[m]->segs[n].vaddr = vaddr;
+ mr->mr.map[m]->segs[n].length = umem->page_size;
+ n++;
+ if (n == RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ return &mr->ibmr;
+
+bail_inval:
+ __rvt_free_mr(mr);
+
+bail_umem:
+ ib_umem_release(umem);
+
+ return ret;
+}
+
+/**
+ * rvt_dereg_mr - unregister and free a memory region
+ * @ibmr: the memory region to free
+ *
+ *
+ * Note that this is called to free MRs created by rvt_get_dma_mr()
+ * or rvt_reg_user_mr().
+ *
+ * Returns 0 on success.
+ */
+int rvt_dereg_mr(struct ib_mr *ibmr)
+{
+ struct rvt_mr *mr = to_imr(ibmr);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibmr->pd->device);
+ int ret = 0;
+ unsigned long timeout;
+
+ rvt_free_lkey(&mr->mr);
+
+ rvt_put_mr(&mr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&mr->mr.comp, 5 * HZ);
+ if (!timeout) {
+ rvt_pr_err(rdi,
+ "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
+ mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
+ rvt_get_mr(&mr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ rvt_deinit_mregion(&mr->mr);
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+ kfree(mr);
+out:
+ return ret;
+}
+
+/**
+ * rvt_alloc_mr - Allocate a memory region usable with the
+ * @pd: protection domain for this memory region
+ * @mr_type: mem region type
+ * @max_num_sg: Max number of segments allowed
+ *
+ * Return: the memory region on success, otherwise return an errno.
+ */
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct rvt_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __rvt_alloc_mr(max_num_sg, pd);
+ if (IS_ERR(mr))
+ return (struct ib_mr *)mr;
+
+ return &mr->ibmr;
+}
+
+/**
+ * rvt_alloc_fmr - allocate a fast memory region
+ * @pd: the protection domain for this memory region
+ * @mr_access_flags: access flags for this memory region
+ * @fmr_attr: fast memory region attributes
+ *
+ * Return: the memory region on success, otherwise returns an errno.
+ */
+struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr)
+{
+ struct rvt_fmr *fmr;
+ int m;
+ struct ib_fmr *ret;
+ int rval = -ENOMEM;
+
+ /* Allocate struct plus pointers to first level page tables. */
+ m = (fmr_attr->max_pages + RVT_SEGSZ - 1) / RVT_SEGSZ;
+ fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
+ if (!fmr)
+ goto bail;
+
+ rval = rvt_init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
+ if (rval)
+ goto bail;
+
+ /*
+ * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
+ * rkey.
+ */
+ rval = rvt_alloc_lkey(&fmr->mr, 0);
+ if (rval)
+ goto bail_mregion;
+ fmr->ibfmr.rkey = fmr->mr.lkey;
+ fmr->ibfmr.lkey = fmr->mr.lkey;
+ /*
+ * Resources are allocated but no valid mapping (RKEY can't be
+ * used).
+ */
+ fmr->mr.access_flags = mr_access_flags;
+ fmr->mr.max_segs = fmr_attr->max_pages;
+ fmr->mr.page_shift = fmr_attr->page_shift;
+
+ ret = &fmr->ibfmr;
+done:
+ return ret;
+
+bail_mregion:
+ rvt_deinit_mregion(&fmr->mr);
+bail:
+ kfree(fmr);
+ ret = ERR_PTR(rval);
+ goto done;
+}
+
+/**
+ * rvt_map_phys_fmr - set up a fast memory region
+ * @ibmfr: the fast memory region to set up
+ * @page_list: the list of pages to associate with the fast memory region
+ * @list_len: the number of pages to associate with the fast memory region
+ * @iova: the virtual address of the start of the fast memory region
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success
+ */
+
+int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ struct rvt_fmr *fmr = to_ifmr(ibfmr);
+ struct rvt_lkey_table *rkt;
+ unsigned long flags;
+ int m, n, i;
+ u32 ps;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibfmr->device);
+
+ i = atomic_read(&fmr->mr.refcount);
+ if (i > 2)
+ return -EBUSY;
+
+ if (list_len > fmr->mr.max_segs)
+ return -EINVAL;
+
+ rkt = &rdi->lkey_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = iova;
+ fmr->mr.iova = iova;
+ ps = 1 << fmr->mr.page_shift;
+ fmr->mr.length = list_len * ps;
+ m = 0;
+ n = 0;
+ for (i = 0; i < list_len; i++) {
+ fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
+ fmr->mr.map[m]->segs[n].length = ps;
+ if (++n == RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ return 0;
+}
+
+/**
+ * rvt_unmap_fmr - unmap fast memory regions
+ * @fmr_list: the list of fast memory regions to unmap
+ *
+ * Return: 0 on success.
+ */
+int rvt_unmap_fmr(struct list_head *fmr_list)
+{
+ struct rvt_fmr *fmr;
+ struct rvt_lkey_table *rkt;
+ unsigned long flags;
+ struct rvt_dev_info *rdi;
+
+ list_for_each_entry(fmr, fmr_list, ibfmr.list) {
+ rdi = ib_to_rvt(fmr->ibfmr.device);
+ rkt = &rdi->lkey_table;
+ spin_lock_irqsave(&rkt->lock, flags);
+ fmr->mr.user_base = 0;
+ fmr->mr.iova = 0;
+ fmr->mr.length = 0;
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * rvt_dealloc_fmr - deallocate a fast memory region
+ * @ibfmr: the fast memory region to deallocate
+ *
+ * Return: 0 on success.
+ */
+int rvt_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct rvt_fmr *fmr = to_ifmr(ibfmr);
+ int ret = 0;
+ unsigned long timeout;
+
+ rvt_free_lkey(&fmr->mr);
+ rvt_put_mr(&fmr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&fmr->mr.comp, 5 * HZ);
+ if (!timeout) {
+ rvt_get_mr(&fmr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ rvt_deinit_mregion(&fmr->mr);
+ kfree(fmr);
+out:
+ return ret;
+}
+
+/**
+ * rvt_lkey_ok - check IB SGE for validity and initialize
+ * @rkt: table containing lkey to check SGE against
+ * @pd: protection domain
+ * @isge: outgoing internal SGE
+ * @sge: SGE to check
+ * @acc: access flags
+ *
+ * Check the IB SGE for validity and initialize our internal version
+ * of it.
+ *
+ * Return: 1 if valid and successful, otherwise returns 0.
+ *
+ * increments the reference count upon success
+ *
+ */
+int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
+ struct rvt_sge *isge, struct ib_sge *sge, int acc)
+{
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
+ /*
+ * We use LKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (sge->lkey == 0) {
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ isge->mr = mr;
+ isge->vaddr = (void *)sge->addr;
+ isge->length = sge->length;
+ isge->sge_length = sge->length;
+ isge->m = 0;
+ isge->n = 0;
+ goto ok;
+ }
+ mr = rcu_dereference(
+ rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
+ goto bail;
+
+ off = sge->addr - mr->user_base;
+ if (unlikely(sge->addr < mr->user_base ||
+ off + sge->length > mr->length ||
+ (mr->access_flags & acc) != acc))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ isge->mr = mr;
+ isge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ isge->length = mr->map[m]->segs[n].length - off;
+ isge->sge_length = sge->length;
+ isge->m = m;
+ isge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rvt_lkey_ok);
+
+/**
+ * rvt_rkey_ok - check the IB virtual address, length, and RKEY
+ * @qp: qp for validation
+ * @sge: SGE state
+ * @len: length of data
+ * @vaddr: virtual address to place data
+ * @rkey: rkey to check
+ * @acc: access flags
+ *
+ * Return: 1 if successful, otherwise 0.
+ *
+ * increments the reference count upon success
+ */
+int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
+ u32 len, u64 vaddr, u32 rkey, int acc)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(qp->ibqp.device);
+ struct rvt_lkey_table *rkt = &dev->lkey_table;
+ struct rvt_mregion *mr;
+ unsigned n, m;
+ size_t off;
+
+ /*
+ * We use RKEY == zero for kernel virtual addresses
+ * (see rvt_get_dma_mr and dma.c).
+ */
+ rcu_read_lock();
+ if (rkey == 0) {
+ struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ struct rvt_dev_info *rdi = ib_to_rvt(pd->ibpd.device);
+
+ if (pd->user)
+ goto bail;
+ mr = rcu_dereference(rdi->dma_mr);
+ if (!mr)
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ sge->mr = mr;
+ sge->vaddr = (void *)vaddr;
+ sge->length = len;
+ sge->sge_length = len;
+ sge->m = 0;
+ sge->n = 0;
+ goto ok;
+ }
+
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ goto bail;
+
+ off = vaddr - mr->iova;
+ if (unlikely(vaddr < mr->iova || off + len > mr->length ||
+ (mr->access_flags & acc) == 0))
+ goto bail;
+ atomic_inc(&mr->refcount);
+ rcu_read_unlock();
+
+ off += mr->offset;
+ if (mr->page_shift) {
+ /*
+ * page sizes are uniform power of 2 so no loop is necessary
+ * entries_spanned_by_off is the number of times the loop below
+ * would have executed.
+ */
+ size_t entries_spanned_by_off;
+
+ entries_spanned_by_off = off >> mr->page_shift;
+ off -= (entries_spanned_by_off << mr->page_shift);
+ m = entries_spanned_by_off / RVT_SEGSZ;
+ n = entries_spanned_by_off % RVT_SEGSZ;
+ } else {
+ m = 0;
+ n = 0;
+ while (off >= mr->map[m]->segs[n].length) {
+ off -= mr->map[m]->segs[n].length;
+ n++;
+ if (n >= RVT_SEGSZ) {
+ m++;
+ n = 0;
+ }
+ }
+ }
+ sge->mr = mr;
+ sge->vaddr = mr->map[m]->segs[n].vaddr + off;
+ sge->length = mr->map[m]->segs[n].length - off;
+ sge->sge_length = len;
+ sge->m = m;
+ sge->n = n;
+ok:
+ return 1;
+bail:
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(rvt_rkey_ok);
diff --git a/drivers/infiniband/sw/rdmavt/mr.h b/drivers/infiniband/sw/rdmavt/mr.h
new file mode 100644
index 000000000000..69380512c6d1
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/mr.h
@@ -0,0 +1,92 @@
+#ifndef DEF_RVTMR_H
+#define DEF_RVTMR_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+struct rvt_fmr {
+ struct ib_fmr ibfmr;
+ struct rvt_mregion mr; /* must be last */
+};
+
+struct rvt_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct rvt_mregion mr; /* must be last */
+};
+
+static inline struct rvt_fmr *to_ifmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct rvt_fmr, ibfmr);
+}
+
+static inline struct rvt_mr *to_imr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct rvt_mr, ibmr);
+}
+
+int rvt_driver_mr_init(struct rvt_dev_info *rdi);
+void rvt_mr_exit(struct rvt_dev_info *rdi);
+
+/* Mem Regions */
+struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+int rvt_dereg_mr(struct ib_mr *ibmr);
+struct ib_mr *rvt_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg);
+struct ib_fmr *rvt_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
+ struct ib_fmr_attr *fmr_attr);
+int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova);
+int rvt_unmap_fmr(struct list_head *fmr_list);
+int rvt_dealloc_fmr(struct ib_fmr *ibfmr);
+
+#endif /* DEF_RVTMR_H */
diff --git a/drivers/infiniband/sw/rdmavt/pd.c b/drivers/infiniband/sw/rdmavt/pd.c
new file mode 100644
index 000000000000..d1292f324c67
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/slab.h>
+#include "pd.h"
+
+/**
+ * rvt_alloc_pd - allocate a protection domain
+ * @ibdev: ib device
+ * @context: optional user context
+ * @udata: optional user data
+ *
+ * Allocate and keep track of a PD.
+ *
+ * Return: 0 on success
+ */
+struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct rvt_dev_info *dev = ib_to_rvt(ibdev);
+ struct rvt_pd *pd;
+ struct ib_pd *ret;
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+ /*
+ * While we could continue allocating protecetion domains, being
+ * constrained only by system resources. The IBTA spec defines that
+ * there is a max_pd limit that can be set and we need to check for
+ * that.
+ */
+
+ spin_lock(&dev->n_pds_lock);
+ if (dev->n_pds_allocated == dev->dparms.props.max_pd) {
+ spin_unlock(&dev->n_pds_lock);
+ kfree(pd);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail;
+ }
+
+ dev->n_pds_allocated++;
+ spin_unlock(&dev->n_pds_lock);
+
+ /* ib_alloc_pd() will initialize pd->ibpd. */
+ pd->user = udata ? 1 : 0;
+
+ ret = &pd->ibpd;
+
+bail:
+ return ret;
+}
+
+/**
+ * rvt_dealloc_pd - Free PD
+ * @ibpd: Free up PD
+ *
+ * Return: always 0
+ */
+int rvt_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct rvt_pd *pd = ibpd_to_rvtpd(ibpd);
+ struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+
+ spin_lock(&dev->n_pds_lock);
+ dev->n_pds_allocated--;
+ spin_unlock(&dev->n_pds_lock);
+
+ kfree(pd);
+
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/pd.h b/drivers/infiniband/sw/rdmavt/pd.h
new file mode 100644
index 000000000000..1892ca4a9746
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/pd.h
@@ -0,0 +1,58 @@
+#ifndef DEF_RDMAVTPD_H
+#define DEF_RDMAVTPD_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+struct ib_pd *rvt_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int rvt_dealloc_pd(struct ib_pd *ibpd);
+
+#endif /* DEF_RDMAVTPD_H */
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
new file mode 100644
index 000000000000..bd82a6948dc8
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -0,0 +1,1696 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/hash.h>
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <rdma/ib_verbs.h>
+#include "qp.h"
+#include "vt.h"
+#include "trace.h"
+
+/*
+ * Note that it is OK to post send work requests in the SQE and ERR
+ * states; rvt_do_send() will process them and generate error
+ * completions as per IB 1.2 C10-96.
+ */
+const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
+ [IB_QPS_RESET] = 0,
+ [IB_QPS_INIT] = RVT_POST_RECV_OK,
+ [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
+ [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
+ RVT_PROCESS_NEXT_SEND_OK,
+ [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
+ [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+ [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
+ RVT_POST_SEND_OK | RVT_FLUSH_SEND,
+};
+EXPORT_SYMBOL(ib_rvt_state_ops);
+
+static void get_map_page(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map,
+ gfp_t gfp)
+{
+ unsigned long page = get_zeroed_page(gfp);
+
+ /*
+ * Free the page if someone raced with us installing it.
+ */
+
+ spin_lock(&qpt->lock);
+ if (map->page)
+ free_page(page);
+ else
+ map->page = (void *)page;
+ spin_unlock(&qpt->lock);
+}
+
+/**
+ * init_qpn_table - initialize the QP number table for a device
+ * @qpt: the QPN table
+ */
+static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
+{
+ u32 offset, i;
+ struct rvt_qpn_map *map;
+ int ret = 0;
+
+ if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
+ return -EINVAL;
+
+ spin_lock_init(&qpt->lock);
+
+ qpt->last = rdi->dparms.qpn_start;
+ qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
+
+ /*
+ * Drivers may want some QPs beyond what we need for verbs let them use
+ * our qpn table. No need for two. Lets go ahead and mark the bitmaps
+ * for those. The reserved range must be *after* the range which verbs
+ * will pick from.
+ */
+
+ /* Figure out number of bit maps needed before reserved range */
+ qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
+
+ /* This should always be zero */
+ offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
+
+ /* Starting with the first reserved bit map */
+ map = &qpt->map[qpt->nmaps];
+
+ rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
+ rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
+ for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
+ if (!map->page) {
+ get_map_page(qpt, map, GFP_KERNEL);
+ if (!map->page) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ set_bit(offset, map->page);
+ offset++;
+ if (offset == RVT_BITS_PER_PAGE) {
+ /* next page */
+ qpt->nmaps++;
+ map++;
+ offset = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * free_qpn_table - free the QP number table for a device
+ * @qpt: the QPN table
+ */
+static void free_qpn_table(struct rvt_qpn_table *qpt)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
+ free_page((unsigned long)qpt->map[i].page);
+}
+
+/**
+ * rvt_driver_qp_init - Init driver qp resources
+ * @rdi: rvt dev strucutre
+ *
+ * Return: 0 on success
+ */
+int rvt_driver_qp_init(struct rvt_dev_info *rdi)
+{
+ int i;
+ int ret = -ENOMEM;
+
+ if (!rdi->dparms.qp_table_size)
+ return -EINVAL;
+
+ /*
+ * If driver is not doing any QP allocation then make sure it is
+ * providing the necessary QP functions.
+ */
+ if (!rdi->driver_f.free_all_qps ||
+ !rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset)
+ return -EINVAL;
+
+ /* allocate parent object */
+ rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
+ rdi->dparms.node);
+ if (!rdi->qp_dev)
+ return -ENOMEM;
+
+ /* allocate hash table */
+ rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
+ rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
+ rdi->qp_dev->qp_table =
+ kmalloc_node(rdi->qp_dev->qp_table_size *
+ sizeof(*rdi->qp_dev->qp_table),
+ GFP_KERNEL, rdi->dparms.node);
+ if (!rdi->qp_dev->qp_table)
+ goto no_qp_table;
+
+ for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
+
+ spin_lock_init(&rdi->qp_dev->qpt_lock);
+
+ /* initialize qpn map */
+ if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
+ goto fail_table;
+
+ spin_lock_init(&rdi->n_qps_lock);
+
+ return 0;
+
+fail_table:
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+
+no_qp_table:
+ kfree(rdi->qp_dev);
+
+ return ret;
+}
+
+/**
+ * free_all_qps - check for QPs still in use
+ * @qpt: the QP table to empty
+ *
+ * There should not be any QPs still in use.
+ * Free memory for table.
+ */
+static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
+{
+ unsigned long flags;
+ struct rvt_qp *qp;
+ unsigned n, qp_inuse = 0;
+ spinlock_t *ql; /* work around too long line below */
+
+ if (rdi->driver_f.free_all_qps)
+ qp_inuse = rdi->driver_f.free_all_qps(rdi);
+
+ qp_inuse += rvt_mcast_tree_empty(rdi);
+
+ if (!rdi->qp_dev)
+ return qp_inuse;
+
+ ql = &rdi->qp_dev->qpt_lock;
+ spin_lock_irqsave(ql, flags);
+ for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
+ qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
+ lockdep_is_held(ql));
+ RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+
+ for (; qp; qp = rcu_dereference_protected(qp->next,
+ lockdep_is_held(ql)))
+ qp_inuse++;
+ }
+ spin_unlock_irqrestore(ql, flags);
+ synchronize_rcu();
+ return qp_inuse;
+}
+
+/**
+ * rvt_qp_exit - clean up qps on device exit
+ * @rdi: rvt dev structure
+ *
+ * Check for qp leaks and free resources.
+ */
+void rvt_qp_exit(struct rvt_dev_info *rdi)
+{
+ u32 qps_inuse = rvt_free_all_qps(rdi);
+
+ if (qps_inuse)
+ rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
+ qps_inuse);
+ if (!rdi->qp_dev)
+ return;
+
+ kfree(rdi->qp_dev->qp_table);
+ free_qpn_table(&rdi->qp_dev->qpn_table);
+ kfree(rdi->qp_dev);
+}
+
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
+{
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
+}
+
+/**
+ * alloc_qpn - Allocate the next available qpn or zero/one for QP type
+ * IB_QPT_SMI/IB_QPT_GSI
+ *@rdi: rvt device info structure
+ *@qpt: queue pair number table pointer
+ *@port_num: IB port number, 1 based, comes from core
+ *
+ * Return: The queue pair number
+ */
+static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
+ enum ib_qp_type type, u8 port_num, gfp_t gfp)
+{
+ u32 i, offset, max_scan, qpn;
+ struct rvt_qpn_map *map;
+ u32 ret;
+
+ if (rdi->driver_f.alloc_qpn)
+ return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp);
+
+ if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
+ unsigned n;
+
+ ret = type == IB_QPT_GSI;
+ n = 1 << (ret + 2 * (port_num - 1));
+ spin_lock(&qpt->lock);
+ if (qpt->flags & n)
+ ret = -EINVAL;
+ else
+ qpt->flags |= n;
+ spin_unlock(&qpt->lock);
+ goto bail;
+ }
+
+ qpn = qpt->last + qpt->incr;
+ if (qpn >= RVT_QPN_MAX)
+ qpn = qpt->incr | ((qpt->last & 1) ^ 1);
+ /* offset carries bit 0 */
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
+ max_scan = qpt->nmaps - !offset;
+ for (i = 0;;) {
+ if (unlikely(!map->page)) {
+ get_map_page(qpt, map, gfp);
+ if (unlikely(!map->page))
+ break;
+ }
+ do {
+ if (!test_and_set_bit(offset, map->page)) {
+ qpt->last = qpn;
+ ret = qpn;
+ goto bail;
+ }
+ offset += qpt->incr;
+ /*
+ * This qpn might be bogus if offset >= BITS_PER_PAGE.
+ * That is OK. It gets re-assigned below
+ */
+ qpn = mk_qpn(qpt, map, offset);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
+ /*
+ * In order to keep the number of pages allocated to a
+ * minimum, we scan the all existing pages before increasing
+ * the size of the bitmap table.
+ */
+ if (++i > max_scan) {
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
+ break;
+ map = &qpt->map[qpt->nmaps++];
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else if (map < &qpt->map[qpt->nmaps]) {
+ ++map;
+ /* start at incr with current bit 0 */
+ offset = qpt->incr | (offset & 1);
+ } else {
+ map = &qpt->map[0];
+ /* wrap to first map page, invert bit 0 */
+ offset = qpt->incr | ((offset & 1) ^ 1);
+ }
+ /* there can be no bits at shift and below */
+ WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+ qpn = mk_qpn(qpt, map, offset);
+ }
+
+ ret = -ENOMEM;
+
+bail:
+ return ret;
+}
+
+static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
+/**
+ * rvt_clear_mr_refs - Drop help mr refs
+ * @qp: rvt qp data structure
+ * @clr_sends: If shoudl clear send side or not
+ */
+static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
+{
+ unsigned n;
+
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
+ rvt_put_ss(&qp->s_rdma_read_sge);
+
+ rvt_put_ss(&qp->r_sge);
+
+ if (clr_sends) {
+ while (qp->s_last != qp->s_head) {
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ unsigned i;
+
+ for (i = 0; i < wqe->wr.num_sge; i++) {
+ struct rvt_sge *sge = &wqe->sg_list[i];
+
+ rvt_put_mr(sge->mr);
+ }
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&ibah_to_rvtah(
+ wqe->ud_wr.ah)->refcount);
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ smp_wmb(); /* see qp_set_savail */
+ }
+ if (qp->s_rdma_mr) {
+ rvt_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ }
+
+ if (qp->ibqp.qp_type != IB_QPT_RC)
+ return;
+
+ for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ struct rvt_ack_entry *e = &qp->s_ack_queue[n];
+
+ if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
+ e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+ }
+}
+
+/**
+ * rvt_remove_qp - remove qp form table
+ * @rdi: rvt dev struct
+ * @qp: qp to remove
+ *
+ * Remove the QP from the table so it can't be found asynchronously by
+ * the receive routine.
+ */
+static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+ unsigned long flags;
+ int removed = 1;
+
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (rcu_dereference_protected(rvp->qp[0],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[0], NULL);
+ } else if (rcu_dereference_protected(rvp->qp[1],
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
+ RCU_INIT_POINTER(rvp->qp[1], NULL);
+ } else {
+ struct rvt_qp *q;
+ struct rvt_qp __rcu **qpp;
+
+ removed = 0;
+ qpp = &rdi->qp_dev->qp_table[n];
+ for (; (q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
+ qpp = &q->next) {
+ if (q == qp) {
+ RCU_INIT_POINTER(*qpp,
+ rcu_dereference_protected(qp->next,
+ lockdep_is_held(&rdi->qp_dev->qpt_lock)));
+ removed = 1;
+ trace_rvt_qpremove(qp, n);
+ break;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+ if (removed) {
+ synchronize_rcu();
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+}
+
+/**
+ * reset_qp - initialize the QP state to the reset state
+ * @qp: the QP to reset
+ * @type: the QP type
+ * r and s lock are required to be held by the caller
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ if (qp->state != IB_QPS_RESET) {
+ qp->state = IB_QPS_RESET;
+
+ /* Let drivers flush their waitlist */
+ rdi->driver_f.flush_qp_waiters(qp);
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* Stop the send queue and the retry timer */
+ rdi->driver_f.stop_send_queue(qp);
+
+ /* Wait for things to stop */
+ rdi->driver_f.quiesce_qp(qp);
+
+ /* take qp out the hash and wait for it to be unused */
+ rvt_remove_qp(rdi, qp);
+ wait_event(qp->wait, !atomic_read(&qp->refcount));
+
+ /* grab the lock b/c it was locked at call time */
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ rvt_clear_mr_refs(qp, 1);
+ }
+
+ /*
+ * Let the driver do any tear down it needs to for a qp
+ * that has been reset
+ */
+ rdi->driver_f.notify_qp_reset(qp);
+
+ qp->remote_qpn = 0;
+ qp->qkey = 0;
+ qp->qp_access_flags = 0;
+ qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
+ qp->s_hdrwords = 0;
+ qp->s_wqe = NULL;
+ qp->s_draining = 0;
+ qp->s_next_psn = 0;
+ qp->s_last_psn = 0;
+ qp->s_sending_psn = 0;
+ qp->s_sending_hpsn = 0;
+ qp->s_psn = 0;
+ qp->r_psn = 0;
+ qp->r_msn = 0;
+ if (type == IB_QPT_RC) {
+ qp->s_state = IB_OPCODE_RC_SEND_LAST;
+ qp->r_state = IB_OPCODE_RC_SEND_LAST;
+ } else {
+ qp->s_state = IB_OPCODE_UC_SEND_LAST;
+ qp->r_state = IB_OPCODE_UC_SEND_LAST;
+ }
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+ qp->r_nak_state = 0;
+ qp->r_aflags = 0;
+ qp->r_flags = 0;
+ qp->s_head = 0;
+ qp->s_tail = 0;
+ qp->s_cur = 0;
+ qp->s_acked = 0;
+ qp->s_last = 0;
+ qp->s_ssn = 1;
+ qp->s_lsn = 0;
+ qp->s_mig_state = IB_MIG_MIGRATED;
+ memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ qp->r_head_ack_queue = 0;
+ qp->s_tail_ack_queue = 0;
+ qp->s_num_rd_atomic = 0;
+ if (qp->r_rq.wq) {
+ qp->r_rq.wq->head = 0;
+ qp->r_rq.wq->tail = 0;
+ }
+ qp->r_sge.num_sge = 0;
+}
+
+/**
+ * rvt_create_qp - create a queue pair for a device
+ * @ibpd: the protection domain who's device we create the queue pair for
+ * @init_attr: the attributes of the queue pair
+ * @udata: user data for libibverbs.so
+ *
+ * Queue pair creation is mostly an rvt issue. However, drivers have their own
+ * unique idea of what queue pair numbers mean. For instance there is a reserved
+ * range for PSM.
+ *
+ * Return: the queue pair on success, otherwise returns an errno.
+ *
+ * Called by the ib_create_qp() core verbs function.
+ */
+struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct rvt_qp *qp;
+ int err;
+ struct rvt_swqe *swq = NULL;
+ size_t sz;
+ size_t sg_list_sz;
+ struct ib_qp *ret = ERR_PTR(-ENOMEM);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
+ void *priv = NULL;
+ gfp_t gfp;
+
+ if (!rdi)
+ return ERR_PTR(-EINVAL);
+
+ if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
+ init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
+ init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
+ return ERR_PTR(-EINVAL);
+
+ /* GFP_NOIO is applicable to RC QP's only */
+
+ if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
+ init_attr->qp_type != IB_QPT_RC)
+ return ERR_PTR(-EINVAL);
+
+ gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
+ GFP_NOIO : GFP_KERNEL;
+
+ /* Check receive queue parameters if no SRQ is specified. */
+ if (!init_attr->srq) {
+ if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
+ init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
+ return ERR_PTR(-EINVAL);
+
+ if (init_attr->cap.max_send_sge +
+ init_attr->cap.max_send_wr +
+ init_attr->cap.max_recv_sge +
+ init_attr->cap.max_recv_wr == 0)
+ return ERR_PTR(-EINVAL);
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ if (init_attr->port_num == 0 ||
+ init_attr->port_num > ibpd->device->phys_port_cnt)
+ return ERR_PTR(-EINVAL);
+ case IB_QPT_UC:
+ case IB_QPT_RC:
+ case IB_QPT_UD:
+ sz = sizeof(struct rvt_sge) *
+ init_attr->cap.max_send_sge +
+ sizeof(struct rvt_swqe);
+ if (gfp == GFP_NOIO)
+ swq = __vmalloc(
+ (init_attr->cap.max_send_wr + 1) * sz,
+ gfp, PAGE_KERNEL);
+ else
+ swq = vmalloc_node(
+ (init_attr->cap.max_send_wr + 1) * sz,
+ rdi->dparms.node);
+ if (!swq)
+ return ERR_PTR(-ENOMEM);
+
+ sz = sizeof(*qp);
+ sg_list_sz = 0;
+ if (init_attr->srq) {
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
+
+ if (srq->rq.max_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (srq->rq.max_sge - 1);
+ } else if (init_attr->cap.max_recv_sge > 1)
+ sg_list_sz = sizeof(*qp->r_sg_list) *
+ (init_attr->cap.max_recv_sge - 1);
+ qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node);
+ if (!qp)
+ goto bail_swq;
+
+ RCU_INIT_POINTER(qp->next, NULL);
+
+ /*
+ * Driver needs to set up it's private QP structure and do any
+ * initialization that is needed.
+ */
+ priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
+ if (!priv)
+ goto bail_qp;
+ qp->priv = priv;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ if (init_attr->srq) {
+ sz = 0;
+ } else {
+ qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
+ qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ sizeof(struct rvt_rwqe);
+ if (udata)
+ qp->r_rq.wq = vmalloc_user(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz);
+ else if (gfp == GFP_NOIO)
+ qp->r_rq.wq = __vmalloc(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz,
+ gfp, PAGE_KERNEL);
+ else
+ qp->r_rq.wq = vmalloc_node(
+ sizeof(struct rvt_rwq) +
+ qp->r_rq.size * sz,
+ rdi->dparms.node);
+ if (!qp->r_rq.wq)
+ goto bail_driver_priv;
+ }
+
+ /*
+ * ib_create_qp() will initialize qp->ibqp
+ * except for qp->ibqp.qp_num.
+ */
+ spin_lock_init(&qp->r_lock);
+ spin_lock_init(&qp->s_hlock);
+ spin_lock_init(&qp->s_lock);
+ spin_lock_init(&qp->r_rq.lock);
+ atomic_set(&qp->refcount, 0);
+ init_waitqueue_head(&qp->wait);
+ init_timer(&qp->s_timer);
+ qp->s_timer.data = (unsigned long)qp;
+ INIT_LIST_HEAD(&qp->rspwait);
+ qp->state = IB_QPS_RESET;
+ qp->s_wq = swq;
+ qp->s_size = init_attr->cap.max_send_wr + 1;
+ qp->s_avail = init_attr->cap.max_send_wr;
+ qp->s_max_sge = init_attr->cap.max_send_sge;
+ if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+ qp->s_flags = RVT_S_SIGNAL_REQ_WR;
+
+ err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
+ init_attr->qp_type,
+ init_attr->port_num, gfp);
+ if (err < 0) {
+ ret = ERR_PTR(err);
+ goto bail_rq_wq;
+ }
+ qp->ibqp.qp_num = err;
+ qp->port_num = init_attr->port_num;
+ rvt_reset_qp(rdi, qp, init_attr->qp_type);
+ break;
+
+ default:
+ /* Don't support raw QPs */
+ return ERR_PTR(-EINVAL);
+ }
+
+ init_attr->cap.max_inline_data = 0;
+
+ /*
+ * Return the address of the RWQ as the offset to mmap.
+ * See rvt_mmap() for details.
+ */
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ if (!qp->r_rq.wq) {
+ __u64 offset = 0;
+
+ err = ib_copy_to_udata(udata, &offset,
+ sizeof(offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_qpn;
+ }
+ } else {
+ u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
+
+ qp->ip = rvt_create_mmap_info(rdi, s,
+ ibpd->uobject->context,
+ qp->r_rq.wq);
+ if (!qp->ip) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_qpn;
+ }
+
+ err = ib_copy_to_udata(udata, &qp->ip->offset,
+ sizeof(qp->ip->offset));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto bail_ip;
+ }
+ }
+ qp->pid = current->pid;
+ }
+
+ spin_lock(&rdi->n_qps_lock);
+ if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
+ spin_unlock(&rdi->n_qps_lock);
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_ip;
+ }
+
+ rdi->n_qps_allocated++;
+ /*
+ * Maintain a busy_jiffies variable that will be added to the timeout
+ * period in mod_retry_timer and add_retry_timer. This busy jiffies
+ * is scaled by the number of rc qps created for the device to reduce
+ * the number of timeouts occurring when there is a large number of
+ * qps. busy_jiffies is incremented every rc qp scaling interval.
+ * The scaling interval is selected based on extensive performance
+ * evaluation of targeted workloads.
+ */
+ if (init_attr->qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps++;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip) {
+ spin_lock_irq(&rdi->pending_lock);
+ list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
+ spin_unlock_irq(&rdi->pending_lock);
+ }
+
+ ret = &qp->ibqp;
+
+ /*
+ * We have our QP and its good, now keep track of what types of opcodes
+ * can be processed on this QP. We do this by keeping track of what the
+ * 3 high order bits of the opcode are.
+ */
+ switch (init_attr->qp_type) {
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ case IB_QPT_RC:
+ qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ case IB_QPT_UC:
+ qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ break;
+ default:
+ ret = ERR_PTR(-EINVAL);
+ goto bail_ip;
+ }
+
+ return ret;
+
+bail_ip:
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+
+bail_qpn:
+ free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+bail_rq_wq:
+ vfree(qp->r_rq.wq);
+
+bail_driver_priv:
+ rdi->driver_f.qp_priv_free(rdi, qp);
+
+bail_qp:
+ kfree(qp);
+
+bail_swq:
+ vfree(swq);
+
+ return ret;
+}
+
+/**
+ * rvt_error_qp - put a QP into the error state
+ * @qp: the QP to put into the error state
+ * @err: the receive completion error to signal if a RWQE is active
+ *
+ * Flushes both send and receive work queues.
+ *
+ * Return: true if last WQE event should be generated.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
+ * If we are already in error state, just return.
+ */
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
+{
+ struct ib_wc wc;
+ int ret = 0;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
+ goto bail;
+
+ qp->state = IB_QPS_ERR;
+
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ }
+
+ if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
+ qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
+
+ rdi->driver_f.notify_error_qp(qp);
+
+ /* Schedule the sending tasklet to drain the send work queue. */
+ if (ACCESS_ONCE(qp->s_last) != qp->s_head)
+ rdi->driver_f.schedule_send(qp);
+
+ rvt_clear_mr_refs(qp, 0);
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+
+ if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
+ wc.wr_id = qp->r_wr_id;
+ wc.status = err;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wc.status = IB_WC_WR_FLUSH_ERR;
+
+ if (qp->r_rq.wq) {
+ struct rvt_rwq *wq;
+ u32 head;
+ u32 tail;
+
+ spin_lock(&qp->r_rq.lock);
+
+ /* sanity check pointers before trusting them */
+ wq = qp->r_rq.wq;
+ head = wq->head;
+ if (head >= qp->r_rq.size)
+ head = 0;
+ tail = wq->tail;
+ if (tail >= qp->r_rq.size)
+ tail = 0;
+ while (tail != head) {
+ wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
+ if (++tail >= qp->r_rq.size)
+ tail = 0;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ }
+ wq->tail = tail;
+
+ spin_unlock(&qp->r_rq.lock);
+ } else if (qp->ibqp.event_handler) {
+ ret = 1;
+ }
+
+bail:
+ return ret;
+}
+EXPORT_SYMBOL(rvt_error_qp);
+
+/*
+ * Put the QP into the hash table.
+ * The hash table holds a reference to the QP.
+ */
+static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
+ unsigned long flags;
+
+ atomic_inc(&qp->refcount);
+ spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
+
+ if (qp->ibqp.qp_num <= 1) {
+ rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
+ } else {
+ u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
+
+ qp->next = rdi->qp_dev->qp_table[n];
+ rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
+ trace_rvt_qpinsert(qp, n);
+ }
+
+ spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
+}
+
+/**
+ * qib_modify_qp - modify the attributes of a queue pair
+ * @ibqp: the queue pair who's attributes we're modifying
+ * @attr: the new attributes
+ * @attr_mask: the mask of attributes to modify
+ * @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success, otherwise returns an errno.
+ */
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ struct ib_event ev;
+ int lastwqe = 0;
+ int mig = 0;
+ int pmtu = 0; /* for gcc warning only */
+ enum rdma_link_layer link;
+
+ link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ?
+ attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask, link))
+ goto inval;
+
+ if (rdi->driver_f.check_modify_qp &&
+ rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
+ goto inval;
+
+ if (attr_mask & IB_QP_AV) {
+ if (attr->ah_attr.dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ if (attr->alt_ah_attr.dlid >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
+ goto inval;
+ if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
+ goto inval;
+ if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ if (attr->pkey_index >= rvt_get_npkeys(rdi))
+ goto inval;
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ if (attr->min_rnr_timer > 31)
+ goto inval;
+
+ if (attr_mask & IB_QP_PORT)
+ if (qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI ||
+ attr->port_num == 0 ||
+ attr->port_num > ibqp->device->phys_port_cnt)
+ goto inval;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ if (attr->dest_qp_num > RVT_QPN_MASK)
+ goto inval;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ if (attr->retry_cnt > 7)
+ goto inval;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ if (attr->rnr_retry > 7)
+ goto inval;
+
+ /*
+ * Don't allow invalid path_mtu values. OK to set greater
+ * than the active mtu (or even the max_cap, if we have tuned
+ * that to a small mtu. We'll set qp->path_mtu
+ * to the lesser of requested attribute mtu and active,
+ * for packetizing messages.
+ * Note that the QP port has to be set in INIT and MTU in RTR.
+ */
+ if (attr_mask & IB_QP_PATH_MTU) {
+ pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
+ if (pmtu < 0)
+ goto inval;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ if (attr->path_mig_state == IB_MIG_REARM) {
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ goto inval;
+ if (new_state != IB_QPS_RTS)
+ goto inval;
+ } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
+ if (qp->s_mig_state == IB_MIG_REARM)
+ goto inval;
+ if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
+ goto inval;
+ if (qp->s_mig_state == IB_MIG_ARMED)
+ mig = 1;
+ } else {
+ goto inval;
+ }
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
+ goto inval;
+
+ switch (new_state) {
+ case IB_QPS_RESET:
+ if (qp->state != IB_QPS_RESET)
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ break;
+
+ case IB_QPS_RTR:
+ /* Allow event to re-trigger if QP set to RTR more than once */
+ qp->r_flags &= ~RVT_R_COMM_EST;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQD:
+ qp->s_draining = qp->s_last != qp->s_cur;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_SQE:
+ if (qp->ibqp.qp_type == IB_QPT_RC)
+ goto inval;
+ qp->state = new_state;
+ break;
+
+ case IB_QPS_ERR:
+ lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ break;
+
+ default:
+ qp->state = new_state;
+ break;
+ }
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ qp->s_pkey_index = attr->pkey_index;
+
+ if (attr_mask & IB_QP_PORT)
+ qp->port_num = attr->port_num;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ qp->remote_qpn = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
+ qp->s_psn = qp->s_next_psn;
+ qp->s_sending_psn = qp->s_next_psn;
+ qp->s_last_psn = qp->s_next_psn - 1;
+ qp->s_sending_hpsn = qp->s_last_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->qp_access_flags = attr->qp_access_flags;
+
+ if (attr_mask & IB_QP_AV) {
+ qp->remote_ah_attr = attr->ah_attr;
+ qp->s_srate = attr->ah_attr.static_rate;
+ qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
+ }
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ qp->alt_ah_attr = attr->alt_ah_attr;
+ qp->s_alt_pkey_index = attr->alt_pkey_index;
+ }
+
+ if (attr_mask & IB_QP_PATH_MIG_STATE) {
+ qp->s_mig_state = attr->path_mig_state;
+ if (mig) {
+ qp->remote_ah_attr = qp->alt_ah_attr;
+ qp->port_num = qp->alt_ah_attr.port_num;
+ qp->s_pkey_index = qp->s_alt_pkey_index;
+ }
+ }
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
+ qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
+ qp->log_pmtu = ilog2(qp->pmtu);
+ }
+
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ qp->s_retry_cnt = attr->retry_cnt;
+ qp->s_retry = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ qp->s_rnr_retry_cnt = attr->rnr_retry;
+ qp->s_rnr_retry = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp->r_min_rnr_timer = attr->min_rnr_timer;
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ qp->timeout = attr->timeout;
+ qp->timeout_jiffies =
+ usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
+ 1000UL);
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ qp->s_max_rd_atomic = attr->max_rd_atomic;
+
+ if (rdi->driver_f.modify_qp)
+ rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
+
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ rvt_insert_qp(rdi, qp);
+
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ if (mig) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event = IB_EVENT_PATH_MIG;
+ qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
+ }
+ return 0;
+
+inval:
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ return -EINVAL;
+}
+
+/** rvt_free_qpn - Free a qpn from the bit map
+ * @qpt: QP table
+ * @qpn: queue pair number to free
+ */
+static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
+{
+ struct rvt_qpn_map *map;
+
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
+ if (map->page)
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
+}
+
+/**
+ * rvt_destroy_qp - destroy a queue pair
+ * @ibqp: the queue pair to destroy
+ *
+ * Note that this can be called while the QP is actively sending or
+ * receiving!
+ *
+ * Return: 0 on success.
+ */
+int rvt_destroy_qp(struct ib_qp *ibqp)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+
+ /* qpn is now available for use again */
+ rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
+
+ spin_lock(&rdi->n_qps_lock);
+ rdi->n_qps_allocated--;
+ if (qp->ibqp.qp_type == IB_QPT_RC) {
+ rdi->n_rc_qps--;
+ rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
+ }
+ spin_unlock(&rdi->n_qps_lock);
+
+ if (qp->ip)
+ kref_put(&qp->ip->ref, rvt_release_mmap_info);
+ else
+ vfree(qp->r_rq.wq);
+ vfree(qp->s_wq);
+ rdi->driver_f.qp_priv_free(rdi, qp);
+ kfree(qp);
+ return 0;
+}
+
+/**
+ * rvt_query_qp - query an ipbq
+ * @ibqp: IB qp to query
+ * @attr: attr struct to fill in
+ * @attr_mask: attr mask ignored
+ * @init_attr: struct to fill in
+ *
+ * Return: always 0
+ */
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ attr->qp_state = qp->state;
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = qp->path_mtu;
+ attr->path_mig_state = qp->s_mig_state;
+ attr->qkey = qp->qkey;
+ attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
+ attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
+ attr->dest_qp_num = qp->remote_qpn;
+ attr->qp_access_flags = qp->qp_access_flags;
+ attr->cap.max_send_wr = qp->s_size - 1;
+ attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
+ attr->cap.max_send_sge = qp->s_max_sge;
+ attr->cap.max_recv_sge = qp->r_rq.max_sge;
+ attr->cap.max_inline_data = 0;
+ attr->ah_attr = qp->remote_ah_attr;
+ attr->alt_ah_attr = qp->alt_ah_attr;
+ attr->pkey_index = qp->s_pkey_index;
+ attr->alt_pkey_index = qp->s_alt_pkey_index;
+ attr->en_sqd_async_notify = 0;
+ attr->sq_draining = qp->s_draining;
+ attr->max_rd_atomic = qp->s_max_rd_atomic;
+ attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
+ attr->min_rnr_timer = qp->r_min_rnr_timer;
+ attr->port_num = qp->port_num;
+ attr->timeout = qp->timeout;
+ attr->retry_cnt = qp->s_retry_cnt;
+ attr->rnr_retry = qp->s_rnr_retry_cnt;
+ attr->alt_port_num = qp->alt_ah_attr.port_num;
+ attr->alt_timeout = qp->alt_timeout;
+
+ init_attr->event_handler = qp->ibqp.event_handler;
+ init_attr->qp_context = qp->ibqp.qp_context;
+ init_attr->send_cq = qp->ibqp.send_cq;
+ init_attr->recv_cq = qp->ibqp.recv_cq;
+ init_attr->srq = qp->ibqp.srq;
+ init_attr->cap = attr->cap;
+ if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
+ init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ else
+ init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr->qp_type = qp->ibqp.qp_type;
+ init_attr->port_num = qp->port_num;
+ return 0;
+}
+
+/**
+ * rvt_post_receive - post a receive on a QP
+ * @ibqp: the QP to post the receive on
+ * @wr: the WR to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success otherwise errno
+ */
+int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_rwq *wq = qp->r_rq.wq;
+ unsigned long flags;
+ int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
+ !qp->ibqp.srq;
+
+ /* Check that state is OK to post receive. */
+ if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->r_rq.lock, flags);
+ next = wq->head + 1;
+ if (next >= qp->r_rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+ if (unlikely(qp_err_flush)) {
+ struct ib_wc wc;
+
+ memset(&wc, 0, sizeof(wc));
+ wc.qp = &qp->ibqp;
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = wr->wr_id;
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
+ } else {
+ wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /*
+ * Make sure queue entry is written
+ * before the head index.
+ */
+ smp_wmb();
+ wq->head = next;
+ }
+ spin_unlock_irqrestore(&qp->r_rq.lock, flags);
+ }
+ return 0;
+}
+
+/**
+ * qp_get_savail - return number of avail send entries
+ *
+ * @qp - the qp
+ *
+ * This assumes the s_hlock is held but the s_last
+ * qp variable is uncontrolled.
+ */
+static inline u32 qp_get_savail(struct rvt_qp *qp)
+{
+ u32 slast;
+ u32 ret;
+
+ smp_read_barrier_depends(); /* see rc.c */
+ slast = ACCESS_ONCE(qp->s_last);
+ if (qp->s_head >= slast)
+ ret = qp->s_size - (qp->s_head - slast);
+ else
+ ret = slast - qp->s_head;
+ return ret - 1;
+}
+
+/**
+ * rvt_post_one_wr - post one RC, UC, or UD send work request
+ * @qp: the QP to post on
+ * @wr: the work request to send
+ */
+static int rvt_post_one_wr(struct rvt_qp *qp,
+ struct ib_send_wr *wr,
+ int *call_send)
+{
+ struct rvt_swqe *wqe;
+ u32 next;
+ int i;
+ int j;
+ int acc;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ u8 log_pmtu;
+ int ret;
+
+ /* IB spec says that num_sge == 0 is OK. */
+ if (unlikely(wr->num_sge > qp->s_max_sge))
+ return -EINVAL;
+
+ /*
+ * Don't allow RDMA reads or atomic operations on UC or
+ * undefined operations.
+ * Make sure buffer is large enough to hold the result for atomics.
+ */
+ if (qp->ibqp.qp_type == IB_QPT_UC) {
+ if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
+ return -EINVAL;
+ } else if (qp->ibqp.qp_type != IB_QPT_RC) {
+ /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
+ if (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)
+ return -EINVAL;
+ /* Check UD destination address PD */
+ if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
+ return -EINVAL;
+ } else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
+ return -EINVAL;
+ } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
+ (wr->num_sge == 0 ||
+ wr->sg_list[0].length < sizeof(u64) ||
+ wr->sg_list[0].addr & (sizeof(u64) - 1))) {
+ return -EINVAL;
+ } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
+ return -EINVAL;
+ }
+ /* check for avail */
+ if (unlikely(!qp->s_avail)) {
+ qp->s_avail = qp_get_savail(qp);
+ if (WARN_ON(qp->s_avail > (qp->s_size - 1)))
+ rvt_pr_err(rdi,
+ "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
+ qp->ibqp.qp_num, qp->s_size, qp->s_avail,
+ qp->s_head, qp->s_tail, qp->s_cur,
+ qp->s_acked, qp->s_last);
+ if (!qp->s_avail)
+ return -ENOMEM;
+ }
+ next = qp->s_head + 1;
+ if (next >= qp->s_size)
+ next = 0;
+
+ rkt = &rdi->lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.pd);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_head);
+
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC)
+ memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
+ else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wr->opcode == IB_WR_RDMA_WRITE ||
+ wr->opcode == IB_WR_RDMA_READ)
+ memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
+ else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
+ memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
+ else
+ memcpy(&wqe->wr, wr, sizeof(wqe->wr));
+
+ wqe->length = 0;
+ j = 0;
+ if (wr->num_sge) {
+ acc = wr->opcode >= IB_WR_RDMA_READ ?
+ IB_ACCESS_LOCAL_WRITE : 0;
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 length = wr->sg_list[i].length;
+ int ok;
+
+ if (length == 0)
+ continue;
+ ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
+ &wr->sg_list[i], acc);
+ if (!ok) {
+ ret = -EINVAL;
+ goto bail_inval_free;
+ }
+ wqe->length += length;
+ j++;
+ }
+ wqe->wr.num_sge = j;
+ }
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.check_send_wqe) {
+ ret = rdi->driver_f.check_send_wqe(qp, wqe);
+ if (ret < 0)
+ goto bail_inval_free;
+ if (ret)
+ *call_send = ret;
+ }
+
+ log_pmtu = qp->log_pmtu;
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC) {
+ struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
+
+ log_pmtu = ah->log_pmtu;
+ atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
+ }
+
+ wqe->ssn = qp->s_ssn++;
+ wqe->psn = qp->s_next_psn;
+ wqe->lpsn = wqe->psn +
+ (wqe->length ? ((wqe->length - 1) >> log_pmtu) : 0);
+ qp->s_next_psn = wqe->lpsn + 1;
+ trace_rvt_post_one_wr(qp, wqe);
+ smp_wmb(); /* see request builders */
+ qp->s_avail--;
+ qp->s_head = next;
+
+ return 0;
+
+bail_inval_free:
+ /* release mr holds */
+ while (j) {
+ struct rvt_sge *sge = &wqe->sg_list[--j];
+
+ rvt_put_mr(sge->mr);
+ }
+ return ret;
+}
+
+/**
+ * rvt_post_send - post a send on a QP
+ * @ibqp: the QP to post the send on
+ * @wr: the list of work requests to post
+ * @bad_wr: the first bad WR is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+ unsigned long flags = 0;
+ int call_send;
+ unsigned nreq = 0;
+ int err = 0;
+
+ spin_lock_irqsave(&qp->s_hlock, flags);
+
+ /*
+ * Ensure QP state is such that we can send. If not bail out early,
+ * there is no need to do this every time we post a send.
+ */
+ if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * If the send queue is empty, and we only have a single WR then just go
+ * ahead and kick the send engine into gear. Otherwise we will always
+ * just schedule the send to happen later.
+ */
+ call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
+
+ for (; wr; wr = wr->next) {
+ err = rvt_post_one_wr(qp, wr, &call_send);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto bail;
+ }
+ nreq++;
+ }
+bail:
+ spin_unlock_irqrestore(&qp->s_hlock, flags);
+ if (nreq) {
+ if (call_send)
+ rdi->driver_f.schedule_send_no_lock(qp);
+ else
+ rdi->driver_f.do_send(qp);
+ }
+ return err;
+}
+
+/**
+ * rvt_post_srq_receive - post a receive on a shared receive queue
+ * @ibsrq: the SRQ to post the receive on
+ * @wr: the list of work requests to post
+ * @bad_wr: A pointer to the first WR to cause a problem is put here
+ *
+ * This may be called from interrupt context.
+ *
+ * Return: 0 on success else errno
+ */
+int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_rwq *wq;
+ unsigned long flags;
+
+ for (; wr; wr = wr->next) {
+ struct rvt_rwqe *wqe;
+ u32 next;
+ int i;
+
+ if ((unsigned)wr->num_sge > srq->rq.max_sge) {
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&srq->rq.lock, flags);
+ wq = srq->rq.wq;
+ next = wq->head + 1;
+ if (next >= srq->rq.size)
+ next = 0;
+ if (next == wq->tail) {
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
+ wqe->wr_id = wr->wr_id;
+ wqe->num_sge = wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++)
+ wqe->sg_list[i] = wr->sg_list[i];
+ /* Make sure queue entry is written before the head index. */
+ smp_wmb();
+ wq->head = next;
+ spin_unlock_irqrestore(&srq->rq.lock, flags);
+ }
+ return 0;
+}
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
new file mode 100644
index 000000000000..8409f80d5f25
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -0,0 +1,69 @@
+#ifndef DEF_RVTQP_H
+#define DEF_RVTQP_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+
+int rvt_driver_qp_init(struct rvt_dev_info *rdi);
+void rvt_qp_exit(struct rvt_dev_info *rdi);
+struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int rvt_destroy_qp(struct ib_qp *ibqp);
+int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr);
+int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+#endif /* DEF_RVTQP_H */
diff --git a/drivers/staging/rdma/hfi1/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index 67786d417493..f7c48e9023de 100644
--- a/drivers/staging/rdma/hfi1/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,96 +49,50 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include "verbs.h"
+#include "srq.h"
+#include "vt.h"
/**
- * hfi1_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: A pointer to the first WR to cause a problem is put here
+ * rvt_driver_srq_init - init srq resources on a per driver basis
+ * @rdi: rvt dev structure
*
- * This may be called from interrupt context.
+ * Do any initialization needed when a driver registers with rdmavt.
*/
-int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
+void rvt_driver_srq_init(struct rvt_dev_info *rdi)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_rwq *wq;
- unsigned long flags;
- int ret;
-
- for (; wr; wr = wr->next) {
- struct hfi1_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > srq->rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&srq->rq.lock, flags);
- wq = srq->rq.wq;
- next = wq->head + 1;
- if (next >= srq->rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&srq->rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&srq->rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
+ spin_lock_init(&rdi->n_srqs_lock);
+ rdi->n_srqs_allocated = 0;
}
/**
- * hfi1_create_srq - create a shared receive queue
+ * rvt_create_srq - create a shared receive queue
* @ibpd: the protection domain of the SRQ to create
* @srq_init_attr: the attributes of the SRQ
* @udata: data from libibverbs when creating a user SRQ
+ *
+ * Return: Allocated srq object
*/
-struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata)
+struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
{
- struct hfi1_ibdev *dev = to_idev(ibpd->device);
- struct hfi1_srq *srq;
+ struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
+ struct rvt_srq *srq;
u32 sz;
struct ib_srq *ret;
- if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
- ret = ERR_PTR(-ENOSYS);
- goto done;
- }
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC)
+ return ERR_PTR(-ENOSYS);
if (srq_init_attr->attr.max_sge == 0 ||
- srq_init_attr->attr.max_sge > hfi1_max_srq_sges ||
+ srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
srq_init_attr->attr.max_wr == 0 ||
- srq_init_attr->attr.max_wr > hfi1_max_srq_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto done;
- }
+ srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
+ return ERR_PTR(-EINVAL);
srq = kmalloc(sizeof(*srq), GFP_KERNEL);
- if (!srq) {
- ret = ERR_PTR(-ENOMEM);
- goto done;
- }
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
/*
* Need to use vmalloc() if we want to support large #s of entries.
@@ -149,8 +100,8 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
srq->rq.size = srq_init_attr->attr.max_wr + 1;
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
- sizeof(struct hfi1_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz);
+ sizeof(struct rvt_rwqe);
+ srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -158,15 +109,15 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
/*
* Return the address of the RWQ as the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- u32 s = sizeof(struct hfi1_rwq) + srq->rq.size * sz;
+ u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
srq->ip =
- hfi1_create_mmap_info(dev, s, ibpd->uobject->context,
- srq->rq.wq);
+ rvt_create_mmap_info(dev, s, ibpd->uobject->context,
+ srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
@@ -178,8 +129,9 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else
+ } else {
srq->ip = NULL;
+ }
/*
* ib_create_srq() will initialize srq->ibsrq.
@@ -190,7 +142,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
- if (dev->n_srqs_allocated == hfi1_max_srqs) {
+ if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
spin_unlock(&dev->n_srqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_ip;
@@ -205,8 +157,7 @@ struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
spin_unlock_irq(&dev->pending_lock);
}
- ret = &srq->ibsrq;
- goto done;
+ return &srq->ibsrq;
bail_ip:
kfree(srq->ip);
@@ -214,46 +165,44 @@ bail_wq:
vfree(srq->rq.wq);
bail_srq:
kfree(srq);
-done:
return ret;
}
/**
- * hfi1_modify_srq - modify a shared receive queue
+ * rvt_modify_srq - modify a shared receive queue
* @ibsrq: the SRQ to modify
* @attr: the new attributes of the SRQ
* @attr_mask: indicates which attributes to modify
* @udata: user data for libibverbs.so
+ *
+ * Return: 0 on success
*/
-int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata)
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_rwq *wq;
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+ struct rvt_rwq *wq;
int ret = 0;
if (attr_mask & IB_SRQ_MAX_WR) {
- struct hfi1_rwq *owq;
- struct hfi1_rwqe *p;
+ struct rvt_rwq *owq;
+ struct rvt_rwqe *p;
u32 sz, size, n, head, tail;
/* Check that the requested sizes are below the limits. */
- if ((attr->max_wr > hfi1_max_srq_wrs) ||
+ if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
((attr_mask & IB_SRQ_LIMIT) ?
- attr->srq_limit : srq->limit) > attr->max_wr) {
- ret = -EINVAL;
- goto bail;
- }
+ attr->srq_limit : srq->limit) > attr->max_wr)
+ return -EINVAL;
- sz = sizeof(struct hfi1_rwqe) +
+ sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz);
- if (!wq) {
- ret = -ENOMEM;
- goto bail;
- }
+ wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ if (!wq)
+ return -ENOMEM;
/* Check that we can write the offset to mmap. */
if (udata && udata->inlen >= sizeof(__u64)) {
@@ -264,8 +213,8 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sizeof(offset_addr));
if (ret)
goto bail_free;
- udata->outbuf =
- (void __user *) (unsigned long) offset_addr;
+ udata->outbuf = (void __user *)
+ (unsigned long)offset_addr;
ret = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (ret)
@@ -296,16 +245,16 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
n = 0;
p = wq->wq;
while (tail != head) {
- struct hfi1_rwqe *wqe;
+ struct rvt_rwqe *wqe;
int i;
- wqe = get_rwqe_ptr(&srq->rq, tail);
+ wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
p->wr_id = wqe->wr_id;
p->num_sge = wqe->num_sge;
for (i = 0; i < wqe->num_sge; i++)
p->sg_list[i] = wqe->sg_list[i];
n++;
- p = (struct hfi1_rwqe *)((char *)p + sz);
+ p = (struct rvt_rwqe *)((char *)p + sz);
if (++tail >= srq->rq.size)
tail = 0;
}
@@ -320,21 +269,21 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
vfree(owq);
if (srq->ip) {
- struct hfi1_mmap_info *ip = srq->ip;
- struct hfi1_ibdev *dev = to_idev(srq->ibsrq.device);
- u32 s = sizeof(struct hfi1_rwq) + size * sz;
+ struct rvt_mmap_info *ip = srq->ip;
+ struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
+ u32 s = sizeof(struct rvt_rwq) + size * sz;
- hfi1_update_mmap_info(dev, ip, s, wq);
+ rvt_update_mmap_info(dev, ip, s, wq);
/*
* Return the offset to mmap.
- * See hfi1_mmap() for details.
+ * See rvt_mmap() for details.
*/
if (udata && udata->inlen >= sizeof(__u64)) {
ret = ib_copy_to_udata(udata, &ip->offset,
sizeof(ip->offset));
if (ret)
- goto bail;
+ return ret;
}
/*
@@ -355,19 +304,24 @@ int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
srq->limit = attr->srq_limit;
spin_unlock_irq(&srq->rq.lock);
}
- goto bail;
+ return ret;
bail_unlock:
spin_unlock_irq(&srq->rq.lock);
bail_free:
vfree(wq);
-bail:
return ret;
}
-int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+/** rvt_query_srq - query srq data
+ * @ibsrq: srq to query
+ * @attr: return info in attr
+ *
+ * Return: always 0
+ */
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
attr->max_wr = srq->rq.size - 1;
attr->max_sge = srq->rq.max_sge;
@@ -376,19 +330,21 @@ int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
}
/**
- * hfi1_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
+ * rvt_destroy_srq - destory an srq
+ * @ibsrq: srq object to destroy
+ *
+ * Return always 0
*/
-int hfi1_destroy_srq(struct ib_srq *ibsrq)
+int rvt_destroy_srq(struct ib_srq *ibsrq)
{
- struct hfi1_srq *srq = to_isrq(ibsrq);
- struct hfi1_ibdev *dev = to_idev(ibsrq->device);
+ struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
+ struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
spin_lock(&dev->n_srqs_lock);
dev->n_srqs_allocated--;
spin_unlock(&dev->n_srqs_lock);
if (srq->ip)
- kref_put(&srq->ip->ref, hfi1_release_mmap_info);
+ kref_put(&srq->ip->ref, rvt_release_mmap_info);
else
vfree(srq->rq.wq);
kfree(srq);
diff --git a/drivers/infiniband/sw/rdmavt/srq.h b/drivers/infiniband/sw/rdmavt/srq.h
new file mode 100644
index 000000000000..bf0eaaf56465
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/srq.h
@@ -0,0 +1,62 @@
+#ifndef DEF_RVTSRQ_H
+#define DEF_RVTSRQ_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+void rvt_driver_srq_init(struct rvt_dev_info *rdi);
+struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata);
+int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
+int rvt_destroy_srq(struct ib_srq *ibsrq);
+
+#endif /* DEF_RVTSRQ_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace.c b/drivers/infiniband/sw/rdmavt/trace.c
new file mode 100644
index 000000000000..d593285a349c
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
new file mode 100644
index 000000000000..6c0457db5499
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR rdmavt
+
+#if !defined(__RDMAVT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RDMAVT_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdmavt
+
+TRACE_EVENT(rvt_dbg,
+ TP_PROTO(struct rvt_dev_info *rdi,
+ const char *msg),
+ TP_ARGS(rdi, msg),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(rdi)
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(rdi);
+ __assign_str(msg, msg);
+ ),
+ TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qphash
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, bucket)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->bucket = bucket;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x bucket %u",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->bucket
+ )
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+ TP_PROTO(struct rvt_qp *qp, u32 bucket),
+ TP_ARGS(qp, bucket));
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode }
+#define show_wr_opcode(opcode) \
+__print_symbolic(opcode, \
+ wr_opcode_name(RDMA_WRITE), \
+ wr_opcode_name(RDMA_WRITE_WITH_IMM), \
+ wr_opcode_name(SEND), \
+ wr_opcode_name(SEND_WITH_IMM), \
+ wr_opcode_name(RDMA_READ), \
+ wr_opcode_name(ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(ATOMIC_FETCH_AND_ADD), \
+ wr_opcode_name(LSO), \
+ wr_opcode_name(SEND_WITH_INV), \
+ wr_opcode_name(RDMA_READ_WITH_INV), \
+ wr_opcode_name(LOCAL_INV), \
+ wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP), \
+ wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
+
+#define POS_PRN \
+"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
+
+TRACE_EVENT(
+ rvt_post_one_wr,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
+ TP_ARGS(qp, wqe),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, psn)
+ __field(u32, lpsn)
+ __field(u32, length)
+ __field(u32, opcode)
+ __field(u32, size)
+ __field(u32, avail)
+ __field(u32, head)
+ __field(u32, last)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->psn = wqe->psn;
+ __entry->lpsn = wqe->lpsn;
+ __entry->length = wqe->length;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->size = qp->s_size;
+ __entry->avail = qp->s_avail;
+ __entry->head = qp->s_head;
+ __entry->last = qp->s_last;
+ ),
+ TP_printk(
+ POS_PRN,
+ __get_str(dev),
+ __entry->wr_id,
+ __entry->qpn,
+ __entry->psn,
+ __entry->lpsn,
+ __entry->length,
+ __entry->opcode, show_wr_opcode(__entry->opcode),
+ __entry->size,
+ __entry->avail,
+ __entry->head,
+ __entry->last
+ )
+);
+
+#endif /* __RDMAVT_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
new file mode 100644
index 000000000000..6caf5272ba1f
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -0,0 +1,873 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "vt.h"
+#include "trace.h"
+
+#define RVT_UVERBS_ABI_VERSION 2
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("RDMA Verbs Transport Library");
+
+static int rvt_init(void)
+{
+ /*
+ * rdmavt does not need to do anything special when it starts up. All it
+ * needs to do is sit and wait until a driver attempts registration.
+ */
+ return 0;
+}
+module_init(rvt_init);
+
+static void rvt_cleanup(void)
+{
+ /*
+ * Nothing to do at exit time either. The module won't be able to be
+ * removed until all drivers are gone which means all the dev structs
+ * are gone so there is really nothing to do.
+ */
+}
+module_exit(rvt_cleanup);
+
+/**
+ * rvt_alloc_device - allocate rdi
+ * @size: how big of a structure to allocate
+ * @nports: number of ports to allocate array slots for
+ *
+ * Use IB core device alloc to allocate space for the rdi which is assumed to be
+ * inside of the ib_device. Any extra space that drivers require should be
+ * included in size.
+ *
+ * We also allocate a port array based on the number of ports.
+ *
+ * Return: pointer to allocated rdi
+ */
+struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
+{
+ struct rvt_dev_info *rdi = ERR_PTR(-ENOMEM);
+
+ rdi = (struct rvt_dev_info *)ib_alloc_device(size);
+ if (!rdi)
+ return rdi;
+
+ rdi->ports = kcalloc(nports,
+ sizeof(struct rvt_ibport **),
+ GFP_KERNEL);
+ if (!rdi->ports)
+ ib_dealloc_device(&rdi->ibdev);
+
+ return rdi;
+}
+EXPORT_SYMBOL(rvt_alloc_device);
+
+static int rvt_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+ /*
+ * Return rvt_dev_info.dparms.props contents
+ */
+ *props = rdi->dparms.props;
+ return 0;
+}
+
+static int rvt_modify_device(struct ib_device *device,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ /*
+ * There is currently no need to supply this based on qib and hfi1.
+ * Future drivers may need to implement this though.
+ */
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * rvt_query_port: Passes the query port call to the driver
+ * @ibdev: Verbs IB dev
+ * @port_num: port number, 1 based from ib core
+ * @props: structure to hold returned properties
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_port(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_attr *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ int port_index = ibport_num_to_idx(ibdev, port_num);
+
+ if (port_index < 0)
+ return -EINVAL;
+
+ rvp = rdi->ports[port_index];
+ memset(props, 0, sizeof(*props));
+ props->sm_lid = rvp->sm_lid;
+ props->sm_sl = rvp->sm_sl;
+ props->port_cap_flags = rvp->port_cap_flags;
+ props->max_msg_sz = 0x80000000;
+ props->pkey_tbl_len = rvt_get_npkeys(rdi);
+ props->bad_pkey_cntr = rvp->pkey_violations;
+ props->qkey_viol_cntr = rvp->qkey_violations;
+ props->subnet_timeout = rvp->subnet_timeout;
+ props->init_type_reply = 0;
+
+ /* Populate the remaining ib_port_attr elements */
+ return rdi->driver_f.query_port_state(rdi, port_num, props);
+}
+
+/**
+ * rvt_modify_port
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @port_modify_mask: How to change the port
+ * @props: Structure to fill in
+ *
+ * Return: 0 on success
+ */
+static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct rvt_ibport *rvp;
+ int ret = 0;
+ int port_index = ibport_num_to_idx(ibdev, port_num);
+
+ if (port_index < 0)
+ return -EINVAL;
+
+ rvp = rdi->ports[port_index];
+ rvp->port_cap_flags |= props->set_port_cap_mask;
+ rvp->port_cap_flags &= ~props->clr_port_cap_mask;
+
+ if (props->set_port_cap_mask || props->clr_port_cap_mask)
+ rdi->driver_f.cap_mask_chg(rdi, port_num);
+ if (port_modify_mask & IB_PORT_SHUTDOWN)
+ ret = rdi->driver_f.shut_down_port(rdi, port_num);
+ if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
+ rvp->qkey_violations = 0;
+
+ return ret;
+}
+
+/**
+ * rvt_query_pkey - Return a pkey from the table at a given index
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @intex: Index into pkey table
+ *
+ * Return: 0 on failure pkey otherwise
+ */
+static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
+ u16 *pkey)
+{
+ /*
+ * Driver will be responsible for keeping rvt_dev_info.pkey_table up to
+ * date. This function will just return that value. There is no need to
+ * lock, if a stale value is read and sent to the user so be it there is
+ * no way to protect against that anyway.
+ */
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ int port_index;
+
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ if (index >= rvt_get_npkeys(rdi))
+ return -EINVAL;
+
+ *pkey = rvt_get_pkey(rdi, port_index, index);
+ return 0;
+}
+
+/**
+ * rvt_query_gid - Return a gid from the table
+ * @ibdev: Verbs IB dev
+ * @port_num: Port number, 1 based from ib core
+ * @index: = Index in table
+ * @gid: Gid to return
+ *
+ * Return: 0 on success
+ */
+static int rvt_query_gid(struct ib_device *ibdev, u8 port_num,
+ int guid_index, union ib_gid *gid)
+{
+ struct rvt_dev_info *rdi;
+ struct rvt_ibport *rvp;
+ int port_index;
+
+ /*
+ * Driver is responsible for updating the guid table. Which will be used
+ * to craft the return value. This will work similar to how query_pkey()
+ * is being done.
+ */
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ rdi = ib_to_rvt(ibdev);
+ rvp = rdi->ports[port_index];
+
+ gid->global.subnet_prefix = rvp->gid_prefix;
+
+ return rdi->driver_f.get_guid_be(rdi, rvp, guid_index,
+ &gid->global.interface_id);
+}
+
+struct rvt_ucontext {
+ struct ib_ucontext ibucontext;
+};
+
+static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct rvt_ucontext, ibucontext);
+}
+
+/**
+ * rvt_alloc_ucontext - Allocate a user context
+ * @ibdev: Vers IB dev
+ * @data: User data allocated
+ */
+static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct rvt_ucontext *context;
+
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+ return &context->ibucontext;
+}
+
+/**
+ *rvt_dealloc_ucontext - Free a user context
+ *@context - Free this
+ */
+static int rvt_dealloc_ucontext(struct ib_ucontext *context)
+{
+ kfree(to_iucontext(context));
+ return 0;
+}
+
+static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ struct ib_port_attr attr;
+ int err, port_index;
+
+ port_index = ibport_num_to_idx(ibdev, port_num);
+ if (port_index < 0)
+ return -EINVAL;
+
+ err = rvt_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = rdi->dparms.core_cap_flags;
+ immutable->max_mad_size = rdi->dparms.max_mad_size;
+
+ return 0;
+}
+
+enum {
+ MISC,
+ QUERY_DEVICE,
+ MODIFY_DEVICE,
+ QUERY_PORT,
+ MODIFY_PORT,
+ QUERY_PKEY,
+ QUERY_GID,
+ ALLOC_UCONTEXT,
+ DEALLOC_UCONTEXT,
+ GET_PORT_IMMUTABLE,
+ CREATE_QP,
+ MODIFY_QP,
+ DESTROY_QP,
+ QUERY_QP,
+ POST_SEND,
+ POST_RECV,
+ POST_SRQ_RECV,
+ CREATE_AH,
+ DESTROY_AH,
+ MODIFY_AH,
+ QUERY_AH,
+ CREATE_SRQ,
+ MODIFY_SRQ,
+ DESTROY_SRQ,
+ QUERY_SRQ,
+ ATTACH_MCAST,
+ DETACH_MCAST,
+ GET_DMA_MR,
+ REG_USER_MR,
+ DEREG_MR,
+ ALLOC_MR,
+ ALLOC_FMR,
+ MAP_PHYS_FMR,
+ UNMAP_FMR,
+ DEALLOC_FMR,
+ MMAP,
+ CREATE_CQ,
+ DESTROY_CQ,
+ POLL_CQ,
+ REQ_NOTFIY_CQ,
+ RESIZE_CQ,
+ ALLOC_PD,
+ DEALLOC_PD,
+ _VERB_IDX_MAX /* Must always be last! */
+};
+
+static inline int check_driver_override(struct rvt_dev_info *rdi,
+ size_t offset, void *func)
+{
+ if (!*(void **)((void *)&rdi->ibdev + offset)) {
+ *(void **)((void *)&rdi->ibdev + offset) = func;
+ return 0;
+ }
+
+ return 1;
+}
+
+static noinline int check_support(struct rvt_dev_info *rdi, int verb)
+{
+ switch (verb) {
+ case MISC:
+ /*
+ * These functions are not part of verbs specifically but are
+ * required for rdmavt to function.
+ */
+ if ((!rdi->driver_f.port_callback) ||
+ (!rdi->driver_f.get_card_name) ||
+ (!rdi->driver_f.get_pci_dev))
+ return -EINVAL;
+ break;
+
+ case QUERY_DEVICE:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_device),
+ rvt_query_device);
+ break;
+
+ case MODIFY_DEVICE:
+ /*
+ * rdmavt does not support modify device currently drivers must
+ * provide.
+ */
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_device),
+ rvt_modify_device))
+ return -EOPNOTSUPP;
+ break;
+
+ case QUERY_PORT:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ query_port),
+ rvt_query_port))
+ if (!rdi->driver_f.query_port_state)
+ return -EINVAL;
+ break;
+
+ case MODIFY_PORT:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_port),
+ rvt_modify_port))
+ if (!rdi->driver_f.cap_mask_chg ||
+ !rdi->driver_f.shut_down_port)
+ return -EINVAL;
+ break;
+
+ case QUERY_PKEY:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_pkey),
+ rvt_query_pkey);
+ break;
+
+ case QUERY_GID:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ query_gid),
+ rvt_query_gid))
+ if (!rdi->driver_f.get_guid_be)
+ return -EINVAL;
+ break;
+
+ case ALLOC_UCONTEXT:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_ucontext),
+ rvt_alloc_ucontext);
+ break;
+
+ case DEALLOC_UCONTEXT:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_ucontext),
+ rvt_dealloc_ucontext);
+ break;
+
+ case GET_PORT_IMMUTABLE:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ get_port_immutable),
+ rvt_get_port_immutable);
+ break;
+
+ case CREATE_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ create_qp),
+ rvt_create_qp))
+ if (!rdi->driver_f.qp_priv_alloc ||
+ !rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case MODIFY_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ modify_qp),
+ rvt_modify_qp))
+ if (!rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.schedule_send ||
+ !rdi->driver_f.get_pmtu_from_attr ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp ||
+ !rdi->driver_f.notify_error_qp ||
+ !rdi->driver_f.mtu_from_qp ||
+ !rdi->driver_f.mtu_to_path_mtu ||
+ !rdi->driver_f.shut_down_port ||
+ !rdi->driver_f.cap_mask_chg)
+ return -EINVAL;
+ break;
+
+ case DESTROY_QP:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_qp),
+ rvt_destroy_qp))
+ if (!rdi->driver_f.qp_priv_free ||
+ !rdi->driver_f.notify_qp_reset ||
+ !rdi->driver_f.flush_qp_waiters ||
+ !rdi->driver_f.stop_send_queue ||
+ !rdi->driver_f.quiesce_qp)
+ return -EINVAL;
+ break;
+
+ case QUERY_QP:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_qp),
+ rvt_query_qp);
+ break;
+
+ case POST_SEND:
+ if (!check_driver_override(rdi, offsetof(struct ib_device,
+ post_send),
+ rvt_post_send))
+ if (!rdi->driver_f.schedule_send ||
+ !rdi->driver_f.do_send)
+ return -EINVAL;
+ break;
+
+ case POST_RECV:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ post_recv),
+ rvt_post_recv);
+ break;
+ case POST_SRQ_RECV:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ post_srq_recv),
+ rvt_post_srq_recv);
+ break;
+
+ case CREATE_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_ah),
+ rvt_create_ah);
+ break;
+
+ case DESTROY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_ah),
+ rvt_destroy_ah);
+ break;
+
+ case MODIFY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ modify_ah),
+ rvt_modify_ah);
+ break;
+
+ case QUERY_AH:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_ah),
+ rvt_query_ah);
+ break;
+
+ case CREATE_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_srq),
+ rvt_create_srq);
+ break;
+
+ case MODIFY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ modify_srq),
+ rvt_modify_srq);
+ break;
+
+ case DESTROY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_srq),
+ rvt_destroy_srq);
+ break;
+
+ case QUERY_SRQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ query_srq),
+ rvt_query_srq);
+ break;
+
+ case ATTACH_MCAST:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ attach_mcast),
+ rvt_attach_mcast);
+ break;
+
+ case DETACH_MCAST:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ detach_mcast),
+ rvt_detach_mcast);
+ break;
+
+ case GET_DMA_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ get_dma_mr),
+ rvt_get_dma_mr);
+ break;
+
+ case REG_USER_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ reg_user_mr),
+ rvt_reg_user_mr);
+ break;
+
+ case DEREG_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dereg_mr),
+ rvt_dereg_mr);
+ break;
+
+ case ALLOC_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_fmr),
+ rvt_alloc_fmr);
+ break;
+
+ case ALLOC_MR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_mr),
+ rvt_alloc_mr);
+ break;
+
+ case MAP_PHYS_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ map_phys_fmr),
+ rvt_map_phys_fmr);
+ break;
+
+ case UNMAP_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ unmap_fmr),
+ rvt_unmap_fmr);
+ break;
+
+ case DEALLOC_FMR:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_fmr),
+ rvt_dealloc_fmr);
+ break;
+
+ case MMAP:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ mmap),
+ rvt_mmap);
+ break;
+
+ case CREATE_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ create_cq),
+ rvt_create_cq);
+ break;
+
+ case DESTROY_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ destroy_cq),
+ rvt_destroy_cq);
+ break;
+
+ case POLL_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ poll_cq),
+ rvt_poll_cq);
+ break;
+
+ case REQ_NOTFIY_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ req_notify_cq),
+ rvt_req_notify_cq);
+ break;
+
+ case RESIZE_CQ:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ resize_cq),
+ rvt_resize_cq);
+ break;
+
+ case ALLOC_PD:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ alloc_pd),
+ rvt_alloc_pd);
+ break;
+
+ case DEALLOC_PD:
+ check_driver_override(rdi, offsetof(struct ib_device,
+ dealloc_pd),
+ rvt_dealloc_pd);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rvt_register_device - register a driver
+ * @rdi: main dev structure for all of rdmavt operations
+ *
+ * It is up to drivers to allocate the rdi and fill in the appropriate
+ * information.
+ *
+ * Return: 0 on success otherwise an errno.
+ */
+int rvt_register_device(struct rvt_dev_info *rdi)
+{
+ int ret = 0, i;
+
+ if (!rdi)
+ return -EINVAL;
+
+ /*
+ * Check to ensure drivers have setup the required helpers for the verbs
+ * they want rdmavt to handle
+ */
+ for (i = 0; i < _VERB_IDX_MAX; i++)
+ if (check_support(rdi, i)) {
+ pr_err("Driver support req not met at %d\n", i);
+ return -EINVAL;
+ }
+
+
+ /* Once we get past here we can use rvt_pr macros and tracepoints */
+ trace_rvt_dbg(rdi, "Driver attempting registration");
+ rvt_mmap_init(rdi);
+
+ /* Queue Pairs */
+ ret = rvt_driver_qp_init(rdi);
+ if (ret) {
+ pr_err("Error in driver QP init.\n");
+ return -EINVAL;
+ }
+
+ /* Address Handle */
+ spin_lock_init(&rdi->n_ahs_lock);
+ rdi->n_ahs_allocated = 0;
+
+ /* Shared Receive Queue */
+ rvt_driver_srq_init(rdi);
+
+ /* Multicast */
+ rvt_driver_mcast_init(rdi);
+
+ /* Mem Region */
+ ret = rvt_driver_mr_init(rdi);
+ if (ret) {
+ pr_err("Error in driver MR init.\n");
+ goto bail_no_mr;
+ }
+
+ /* Completion queues */
+ ret = rvt_driver_cq_init(rdi);
+ if (ret) {
+ pr_err("Error in driver CQ init.\n");
+ goto bail_mr;
+ }
+
+ /* DMA Operations */
+ rdi->ibdev.dma_ops =
+ rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops;
+
+ /* Protection Domain */
+ spin_lock_init(&rdi->n_pds_lock);
+ rdi->n_pds_allocated = 0;
+
+ /*
+ * There are some things which could be set by underlying drivers but
+ * really should be up to rdmavt to set. For instance drivers can't know
+ * exactly which functions rdmavt supports, nor do they know the ABI
+ * version, so we do all of this sort of stuff here.
+ */
+ rdi->ibdev.uverbs_abi_ver = RVT_UVERBS_ABI_VERSION;
+ rdi->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
+ rdi->ibdev.node_type = RDMA_NODE_IB_CA;
+ rdi->ibdev.num_comp_vectors = 1;
+
+ /* We are now good to announce we exist */
+ ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
+ if (ret) {
+ rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
+ goto bail_cq;
+ }
+
+ rvt_create_mad_agents(rdi);
+
+ rvt_pr_info(rdi, "Registration with rdmavt done.\n");
+ return ret;
+
+bail_cq:
+ rvt_cq_exit(rdi);
+
+bail_mr:
+ rvt_mr_exit(rdi);
+
+bail_no_mr:
+ rvt_qp_exit(rdi);
+
+ return ret;
+}
+EXPORT_SYMBOL(rvt_register_device);
+
+/**
+ * rvt_unregister_device - remove a driver
+ * @rdi: rvt dev struct
+ */
+void rvt_unregister_device(struct rvt_dev_info *rdi)
+{
+ trace_rvt_dbg(rdi, "Driver is unregistering.");
+ if (!rdi)
+ return;
+
+ rvt_free_mad_agents(rdi);
+
+ ib_unregister_device(&rdi->ibdev);
+ rvt_cq_exit(rdi);
+ rvt_mr_exit(rdi);
+ rvt_qp_exit(rdi);
+}
+EXPORT_SYMBOL(rvt_unregister_device);
+
+/**
+ * rvt_init_port - init internal data for driver port
+ * @rdi: rvt dev strut
+ * @port: rvt port
+ * @port_index: 0 based index of ports, different from IB core port num
+ *
+ * Keep track of a list of ports. No need to have a detach port.
+ * They persist until the driver goes away.
+ *
+ * Return: always 0
+ */
+int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
+ int port_index, u16 *pkey_table)
+{
+
+ rdi->ports[port_index] = port;
+ rdi->ports[port_index]->pkey_table = pkey_table;
+
+ return 0;
+}
+EXPORT_SYMBOL(rvt_init_port);
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
new file mode 100644
index 000000000000..6b01eaa4461b
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -0,0 +1,104 @@
+#ifndef DEF_RDMAVT_H
+#define DEF_RDMAVT_H
+
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rdma/rdma_vt.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "pd.h"
+#include "qp.h"
+#include "ah.h"
+#include "mr.h"
+#include "srq.h"
+#include "mcast.h"
+#include "mmap.h"
+#include "cq.h"
+#include "mad.h"
+#include "mmap.h"
+
+#define rvt_pr_info(rdi, fmt, ...) \
+ __rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_warn(rdi, fmt, ...) \
+ __rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define rvt_pr_err(rdi, fmt, ...) \
+ __rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
+ rdi->driver_f.get_card_name(rdi), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define __rvt_pr_info(pdev, name, fmt, ...) \
+ dev_info(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_warn(pdev, name, fmt, ...) \
+ dev_warn(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+#define __rvt_pr_err(pdev, name, fmt, ...) \
+ dev_err(&pdev->dev, "%s: " fmt, name, ##__VA_ARGS__)
+
+static inline int ibport_num_to_idx(struct ib_device *ibdev, u8 port_num)
+{
+ struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
+ int port_index;
+
+ port_index = port_num - 1; /* IB ports start at 1 our arrays at 0 */
+ if ((port_index < 0) || (port_index >= rdi->dparms.nports))
+ return -EINVAL;
+
+ return port_index;
+}
+
+#endif /* DEF_RDMAVT_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index a6f3eab0f350..caec8e9c4666 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
unsigned tx_tail;
unsigned long flags;
u32 mtu;
+ unsigned max_send_sge;
};
struct ipoib_cm_rx_buf {
@@ -387,9 +388,10 @@ struct ipoib_dev_priv {
struct dentry *mcg_dentry;
struct dentry *path_dentry;
#endif
- int hca_caps;
+ u64 hca_caps;
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
+ unsigned max_send_sge;
};
struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 917e46ea3bf6..c8ed53562c9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
+ unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
-
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
- attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
tx_qp = ib_create_qp(priv->pd, &attr);
}
+ tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index fa9c42ff1fb0..f0e55e47eb54 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -180,6 +180,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
union ib_gid *dgid;
+ union ib_gid *sgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
@@ -203,13 +204,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
return;
}
- /*
- * Drop packets that this interface sent, ie multicast packets
- * that the HCA has replicated.
- */
- if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
- goto repost;
-
memcpy(mapping, priv->rx_ring[wr_id].mapping,
IPOIB_UD_RX_SG * sizeof *mapping);
@@ -239,6 +233,25 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
else
skb->pkt_type = PACKET_MULTICAST;
+ sgid = &((struct ib_grh *)skb->data)->sgid;
+
+ /*
+ * Drop packets that this interface sent, ie multicast packets
+ * that the HCA has replicated.
+ */
+ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+ int need_repost = 1;
+
+ if ((wc->wc_flags & IB_WC_GRH) &&
+ sgid->global.interface_id != priv->local_gid.global.interface_id)
+ need_repost = 0;
+
+ if (need_repost) {
+ dev_kfree_skb_any(skb);
+ goto repost;
+ }
+ }
+
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -538,6 +551,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
+ unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +575,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
phead = NULL;
hlen = 0;
}
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ if (skb_linearize(skb) < 0) {
+ ipoib_warn(priv, "skb could not be linearized\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ /* Does skb_linearize return ok without reducing nr_frags? */
+ if (skb_shinfo(skb)->nr_frags > usable_sge) {
+ ipoib_warn(priv, "too many frags after skb linearize\n");
+ ++dev->stats.tx_dropped;
+ ++dev->stats.tx_errors;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
skb->len, address, qpn);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 25509bbd4a05..80807d6e5c4c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -51,6 +51,7 @@
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
+#include <linux/pci.h>
#define DRV_VERSION "1.0.0"
@@ -1590,11 +1591,67 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->tx_ring = NULL;
}
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+ if (err)
+ return err;
+
+ ivf->vf = vf;
+
+ return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+ return -EINVAL;
+
+ return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
+}
+
static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
-static const struct net_device_ops ipoib_netdev_ops = {
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+ .ndo_uninit = ipoib_uninit,
+ .ndo_open = ipoib_open,
+ .ndo_stop = ipoib_stop,
+ .ndo_change_mtu = ipoib_change_mtu,
+ .ndo_fix_features = ipoib_fix_features,
+ .ndo_start_xmit = ipoib_start_xmit,
+ .ndo_tx_timeout = ipoib_timeout,
+ .ndo_set_rx_mode = ipoib_set_mcast_list,
+ .ndo_get_iflink = ipoib_get_iflink,
+ .ndo_set_vf_link_state = ipoib_set_vf_link_state,
+ .ndo_get_vf_config = ipoib_get_vf_config,
+ .ndo_get_vf_stats = ipoib_get_vf_stats,
+ .ndo_set_vf_guid = ipoib_set_vf_guid,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_uninit = ipoib_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
@@ -1610,7 +1667,11 @@ void ipoib_setup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- dev->netdev_ops = &ipoib_netdev_ops;
+ if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
+ dev->netdev_ops = &ipoib_netdev_ops_vf;
+ else
+ dev->netdev_ops = &ipoib_netdev_ops_pf;
+
dev->header_ops = &ipoib_header_ops;
ipoib_set_ethtool_ops(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index d48c5bae7877..b809c373e40e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
- init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+ init_attr.cap.max_send_sge =
+ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
+ priv->max_send_sge = init_attr.cap.max_send_sge;
+
return 0;
out_free_send_cq:
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index c827c93f46c5..80b6bedc172f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -969,7 +969,16 @@ static umode_t iser_attr_is_visible(int param_type, int param)
static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
{
- blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+ struct iscsi_session *session;
+ struct iser_conn *iser_conn;
+ struct ib_device *ib_dev;
+
+ session = starget_to_session(scsi_target(sdev))->dd_data;
+ iser_conn = session->leadconn->dd_data;
+ ib_dev = iser_conn->ib_conn.device->ib_device;
+
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 95f0a64e076b..0351059783b1 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -458,9 +458,6 @@ struct iser_fr_pool {
* @comp: iser completion context
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
- * @last: last send wr to signal all flush errors were drained
- * @last_cqe: cqe handler for last wr
- * @last_comp: completes when all connection completions consumed
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
@@ -472,10 +469,7 @@ struct ib_conn {
struct iser_comp *comp;
struct iser_fr_pool fr_pool;
bool pi_support;
- struct ib_send_wr last;
- struct ib_cqe last_cqe;
struct ib_cqe reg_cqe;
- struct completion last_comp;
};
/**
@@ -617,7 +611,6 @@ void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
void iser_task_rdma_init(struct iscsi_iser_task *task);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index ed54b388e7ad..81ae2e30dd12 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -729,13 +729,6 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
kmem_cache_free(ig.desc_cache, desc);
}
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct ib_conn *ib_conn = wc->qp->qp_context;
-
- complete(&ib_conn->last_comp);
-}
-
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 40c0f4978e2f..1b4945367e4f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -252,14 +252,21 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
}
static int
-iser_alloc_reg_res(struct ib_device *ib_device,
+iser_alloc_reg_res(struct iser_device *device,
struct ib_pd *pd,
struct iser_reg_resources *res,
unsigned int size)
{
+ struct ib_device *ib_dev = device->ib_device;
+ enum ib_mr_type mr_type;
int ret;
- res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
+ if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
+ res->mr = ib_alloc_mr(pd, mr_type, size);
if (IS_ERR(res->mr)) {
ret = PTR_ERR(res->mr);
iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
@@ -277,7 +284,7 @@ iser_free_reg_res(struct iser_reg_resources *rsc)
}
static int
-iser_alloc_pi_ctx(struct ib_device *ib_device,
+iser_alloc_pi_ctx(struct iser_device *device,
struct ib_pd *pd,
struct iser_fr_desc *desc,
unsigned int size)
@@ -291,7 +298,7 @@ iser_alloc_pi_ctx(struct ib_device *ib_device,
pi_ctx = desc->pi_ctx;
- ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
+ ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
if (ret) {
iser_err("failed to allocate reg_resources\n");
goto alloc_reg_res_err;
@@ -324,7 +331,7 @@ iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
}
static struct iser_fr_desc *
-iser_create_fastreg_desc(struct ib_device *ib_device,
+iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
bool pi_enable,
unsigned int size)
@@ -336,12 +343,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device,
if (!desc)
return ERR_PTR(-ENOMEM);
- ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
+ ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
if (ret)
goto reg_res_alloc_failure;
if (pi_enable) {
- ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
+ ret = iser_alloc_pi_ctx(device, pd, desc, size);
if (ret)
goto pi_ctx_alloc_failure;
}
@@ -374,7 +381,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
- desc = iser_create_fastreg_desc(device->ib_device, device->pd,
+ desc = iser_create_fastreg_desc(device, device->pd,
ib_conn->pi_support, size);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
@@ -663,7 +670,6 @@ void iser_conn_release(struct iser_conn *iser_conn)
int iser_conn_terminate(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct ib_send_wr *bad_wr;
int err = 0;
/* terminate the iser conn only if the conn state is UP */
@@ -688,14 +694,8 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
iser_conn, err);
- /* post an indication that all flush errors were consumed */
- err = ib_post_send(ib_conn->qp, &ib_conn->last, &bad_wr);
- if (err) {
- iser_err("conn %p failed to post last wr", ib_conn);
- return 1;
- }
-
- wait_for_completion(&ib_conn->last_comp);
+ /* block until all flush errors are consumed */
+ ib_drain_sq(ib_conn->qp);
}
return 1;
@@ -954,10 +954,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
- ib_conn->last_cqe.done = iser_last_comp;
- ib_conn->last.wr_cqe = &ib_conn->last_cqe;
- ib_conn->last.opcode = IB_WR_SEND;
- init_completion(&ib_conn->last_comp);
}
/**
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f121e6129339..411e4464ca23 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -49,22 +49,24 @@ static struct workqueue_struct *isert_release_wq;
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn);
+isert_login_post_recv(struct isert_conn *isert_conn);
static int
isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
+static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
@@ -138,7 +140,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.qp_context = isert_conn;
attr.send_cq = comp->cq;
attr.recv_cq = comp->cq;
- attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+ attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
@@ -177,12 +179,6 @@ err:
return ret;
}
-static void
-isert_cq_event_callback(struct ib_event *e, void *context)
-{
- isert_dbg("event: %d\n", e->event);
-}
-
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
@@ -212,6 +208,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
+ rx_desc->rx_cqe.done = isert_recv_done;
}
return 0;
@@ -250,9 +247,6 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->rx_descs = NULL;
}
-static void isert_cq_work(struct work_struct *);
-static void isert_cq_callback(struct ib_cq *, void *);
-
static void
isert_free_comps(struct isert_device *device)
{
@@ -261,10 +255,8 @@ isert_free_comps(struct isert_device *device)
for (i = 0; i < device->comps_used; i++) {
struct isert_comp *comp = &device->comps[i];
- if (comp->cq) {
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- }
+ if (comp->cq)
+ ib_free_cq(comp->cq);
}
kfree(device->comps);
}
@@ -293,28 +285,17 @@ isert_alloc_comps(struct isert_device *device)
max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
for (i = 0; i < device->comps_used; i++) {
- struct ib_cq_init_attr cq_attr = {};
struct isert_comp *comp = &device->comps[i];
comp->device = device;
- INIT_WORK(&comp->work, isert_cq_work);
- cq_attr.cqe = max_cqe;
- cq_attr.comp_vector = i;
- comp->cq = ib_create_cq(device->ib_device,
- isert_cq_callback,
- isert_cq_event_callback,
- (void *)comp,
- &cq_attr);
+ comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(comp->cq)) {
isert_err("Unable to allocate cq\n");
ret = PTR_ERR(comp->cq);
comp->cq = NULL;
goto out_cq;
}
-
- ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
- if (ret)
- goto out_cq;
}
return 0;
@@ -582,7 +563,6 @@ isert_init_conn(struct isert_conn *isert_conn)
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
- init_completion(&isert_conn->wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
spin_lock_init(&isert_conn->pool_lock);
@@ -596,11 +576,13 @@ isert_free_login_buf(struct isert_conn *isert_conn)
struct ib_device *ib_dev = isert_conn->device->ib_device;
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
+ kfree(isert_conn->login_rsp_buf);
+
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
+ ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
- kfree(isert_conn->login_buf);
+ kfree(isert_conn->login_req_buf);
}
static int
@@ -609,50 +591,48 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
- isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
- ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!isert_conn->login_buf) {
+ isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
+ GFP_KERNEL);
+ if (!isert_conn->login_req_buf) {
isert_err("Unable to allocate isert_conn->login_buf\n");
return -ENOMEM;
}
- isert_conn->login_req_buf = isert_conn->login_buf;
- isert_conn->login_rsp_buf = isert_conn->login_buf +
- ISCSI_DEF_MAX_RECV_SEG_LEN;
-
- isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
- isert_conn->login_buf, isert_conn->login_req_buf,
- isert_conn->login_rsp_buf);
-
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_req_buf,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-
+ isert_conn->login_req_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
- goto out_login_buf;
+ goto out_free_login_req_buf;
}
- isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
- (void *)isert_conn->login_rsp_buf,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!isert_conn->login_rsp_buf) {
+ isert_err("Unable to allocate isert_conn->login_rspbuf\n");
+ goto out_unmap_login_req_buf;
+ }
+ isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+ isert_conn->login_rsp_buf,
+ ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
- goto out_req_dma_map;
+ goto out_free_login_rsp_buf;
}
return 0;
-out_req_dma_map:
+out_free_login_rsp_buf:
+ kfree(isert_conn->login_rsp_buf);
+out_unmap_login_req_buf:
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
- kfree(isert_conn->login_buf);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+out_free_login_req_buf:
+ kfree(isert_conn->login_req_buf);
return ret;
}
@@ -726,7 +706,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
if (ret)
goto out_conn_dev;
- ret = isert_rdma_post_recvl(isert_conn);
+ ret = isert_login_post_recv(isert_conn);
if (ret)
goto out_conn_dev;
@@ -773,7 +753,7 @@ isert_connect_release(struct isert_conn *isert_conn)
ib_destroy_qp(isert_conn->qp);
}
- if (isert_conn->login_buf)
+ if (isert_conn->login_req_buf)
isert_free_login_buf(isert_conn);
isert_device_put(device);
@@ -820,12 +800,30 @@ isert_put_conn(struct isert_conn *isert_conn)
kref_put(&isert_conn->kref, isert_release_kref);
}
+static void
+isert_handle_unbound_conn(struct isert_conn *isert_conn)
+{
+ struct isert_np *isert_np = isert_conn->cm_id->context;
+
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ /*
+ * This means iscsi doesn't know this connection
+ * so schedule a cleanup ourselves
+ */
+ list_del_init(&isert_conn->node);
+ isert_put_conn(isert_conn);
+ queue_work(isert_release_wq, &isert_conn->release_work);
+ }
+ mutex_unlock(&isert_np->mutex);
+}
+
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
- * In case the connection state is FULL_FEATURE, move state
+ * In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
@@ -837,23 +835,16 @@ isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
- switch (isert_conn->state) {
- case ISER_CONN_TERMINATING:
- break;
- case ISER_CONN_UP:
- case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
- isert_info("Terminating conn %p state %d\n",
- isert_conn, isert_conn->state);
- isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->cm_id);
- if (err)
- isert_warn("Failed rdma_disconnect isert_conn %p\n",
- isert_conn);
- break;
- default:
- isert_warn("conn %p teminating in state %d\n",
- isert_conn, isert_conn->state);
- }
+ if (isert_conn->state >= ISER_CONN_TERMINATING)
+ return;
+
+ isert_info("Terminating conn %p state %d\n",
+ isert_conn, isert_conn->state);
+ isert_conn->state = ISER_CONN_TERMINATING;
+ err = rdma_disconnect(isert_conn->cm_id);
+ if (err)
+ isert_warn("Failed rdma_disconnect isert_conn %p\n",
+ isert_conn);
}
static int
@@ -887,35 +878,27 @@ static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
- struct isert_np *isert_np = cma_id->context;
- struct isert_conn *isert_conn;
- bool terminating = false;
-
- if (isert_np->cm_id == cma_id)
- return isert_np_cma_handler(cma_id->context, event);
-
- isert_conn = cma_id->qp->qp_context;
+ struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
- terminating = (isert_conn->state == ISER_CONN_TERMINATING);
- isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->mutex);
-
- isert_info("conn %p completing wait\n", isert_conn);
- complete(&isert_conn->wait);
-
- if (terminating)
- goto out;
-
- mutex_lock(&isert_np->mutex);
- if (!list_empty(&isert_conn->node)) {
- list_del_init(&isert_conn->node);
- isert_put_conn(isert_conn);
- queue_work(isert_release_wq, &isert_conn->release_work);
+ switch (isert_conn->state) {
+ case ISER_CONN_TERMINATING:
+ break;
+ case ISER_CONN_UP:
+ isert_conn_terminate(isert_conn);
+ ib_drain_qp(isert_conn->qp);
+ isert_handle_unbound_conn(isert_conn);
+ break;
+ case ISER_CONN_BOUND:
+ case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ break;
+ default:
+ isert_warn("conn %p teminating in state %d\n",
+ isert_conn, isert_conn->state);
}
- mutex_unlock(&isert_np->mutex);
+ mutex_unlock(&isert_conn->mutex);
-out:
return 0;
}
@@ -934,12 +917,16 @@ isert_connect_error(struct rdma_cm_id *cma_id)
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
+ struct isert_np *isert_np = cma_id->context;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
+ if (isert_np->cm_id == cma_id)
+ return isert_np_cma_handler(cma_id->context, event->event);
+
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
@@ -977,7 +964,8 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->rx_descs[i];
- rx_wr->wr_id = (uintptr_t)rx_desc;
+
+ rx_wr->wr_cqe = &rx_desc->rx_cqe;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
@@ -985,13 +973,10 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
- isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count -= count;
- }
return ret;
}
@@ -1002,23 +987,20 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
struct ib_recv_wr *rx_wr_failed, rx_wr;
int ret;
- rx_wr.wr_id = (uintptr_t)rx_desc;
+ rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
static int
-isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct ib_send_wr send_wr, *send_wr_failed;
@@ -1027,8 +1009,10 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ tx_desc->tx_cqe.done = isert_login_send_done;
+
send_wr.next = NULL;
- send_wr.wr_id = (uintptr_t)tx_desc;
+ send_wr.wr_cqe = &tx_desc->tx_cqe;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
@@ -1056,7 +1040,6 @@ isert_create_send_desc(struct isert_conn *isert_conn,
tx_desc->iser_header.flags = ISCSI_CTRL;
tx_desc->num_sge = 1;
- tx_desc->isert_cmd = isert_cmd;
if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
@@ -1097,8 +1080,9 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
- isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
- send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
+ isert_cmd->iser_ib_op = ISER_IB_SEND;
+ tx_desc->tx_cqe.done = isert_send_done;
+ send_wr->wr_cqe = &tx_desc->tx_cqe;
if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
send_wr->opcode = IB_WR_SEND_WITH_INV;
@@ -1113,7 +1097,7 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
}
static int
-isert_rdma_post_recvl(struct isert_conn *isert_conn)
+isert_login_post_recv(struct isert_conn *isert_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_fail;
struct ib_sge sge;
@@ -1121,23 +1105,22 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
- sge.length = ISER_RX_LOGIN_SIZE;
+ sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
+ isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
+
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
+ rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
- isert_conn->post_recv_buf_count++;
ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
- if (ret) {
+ if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
- isert_conn->post_recv_buf_count--;
- }
return ret;
}
@@ -1203,12 +1186,12 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
goto post_send;
}
- ret = isert_rdma_post_recvl(isert_conn);
+ ret = isert_login_post_recv(isert_conn);
if (ret)
return ret;
}
post_send:
- ret = isert_post_send(isert_conn, tx_desc);
+ ret = isert_login_post_send(isert_conn, tx_desc);
if (ret)
return ret;
@@ -1218,7 +1201,7 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
- struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+ struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
@@ -1551,12 +1534,42 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
}
static void
-isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
+isert_print_wc(struct ib_wc *wc, const char *type)
{
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ isert_err("%s failure: %s (%d) vend_err %x\n", type,
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->vendor_err);
+ else
+ isert_dbg("%s failure: %s (%d)\n", type,
+ ib_wc_status_msg(wc->status), wc->status);
+}
+
+static void
+isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
+ struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "recv");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ return;
+ }
+
+ ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+
+ isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+ rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
+ (int)(wc->byte_len - ISER_HEADERS_LEN));
+
switch (iser_ctrl->flags & 0xF0) {
case ISCSI_CTRL:
if (iser_ctrl->flags & ISER_RSV) {
@@ -1584,56 +1597,40 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
isert_rx_opcode(isert_conn, rx_desc,
read_stag, read_va, write_stag, write_va);
+
+ ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
static void
-isert_rcv_completion(struct iser_rx_desc *desc,
- struct isert_conn *isert_conn,
- u32 xfer_len)
+isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct iscsi_hdr *hdr;
- u64 rx_dma;
- int rx_buflen;
-
- if ((char *)desc == isert_conn->login_req_buf) {
- rx_dma = isert_conn->login_req_dma;
- rx_buflen = ISER_RX_LOGIN_SIZE;
- isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
- } else {
- rx_dma = desc->dma_addr;
- rx_buflen = ISER_RX_PAYLOAD_SIZE;
- isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
- rx_dma, rx_buflen);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login recv");
+ return;
}
- ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
- hdr = &desc->iscsi_header;
- isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
- hdr->opcode, hdr->itt, hdr->flags,
- (int)(xfer_len - ISER_HEADERS_LEN));
+ isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
- if ((char *)desc == isert_conn->login_req_buf) {
- isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
- if (isert_conn->conn) {
- struct iscsi_login *login = isert_conn->conn->conn_login;
+ if (isert_conn->conn) {
+ struct iscsi_login *login = isert_conn->conn->conn_login;
- if (login && !login->first_request)
- isert_rx_login_req(isert_conn);
- }
- mutex_lock(&isert_conn->mutex);
- complete(&isert_conn->login_req_comp);
- mutex_unlock(&isert_conn->mutex);
- } else {
- isert_rx_do_work(desc, isert_conn);
+ if (login && !login->first_request)
+ isert_rx_login_req(isert_conn);
}
- ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
- DMA_FROM_DEVICE);
+ mutex_lock(&isert_conn->mutex);
+ complete(&isert_conn->login_req_comp);
+ mutex_unlock(&isert_conn->mutex);
- isert_conn->post_recv_buf_count--;
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
static int
@@ -1683,54 +1680,50 @@ isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
static void
isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
isert_dbg("Cmd %p\n", isert_cmd);
- if (wr->data.sg) {
+ if (isert_cmd->data.sg) {
isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
}
- if (wr->rdma_wr) {
+ if (isert_cmd->rdma_wr) {
isert_dbg("Cmd %p free send_wr\n", isert_cmd);
- kfree(wr->rdma_wr);
- wr->rdma_wr = NULL;
+ kfree(isert_cmd->rdma_wr);
+ isert_cmd->rdma_wr = NULL;
}
- if (wr->ib_sge) {
+ if (isert_cmd->ib_sge) {
isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
- kfree(wr->ib_sge);
- wr->ib_sge = NULL;
+ kfree(isert_cmd->ib_sge);
+ isert_cmd->ib_sge = NULL;
}
}
static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-
isert_dbg("Cmd %p\n", isert_cmd);
- if (wr->fr_desc) {
- isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
- if (wr->fr_desc->ind & ISERT_PROTECTED) {
- isert_unmap_data_buf(isert_conn, &wr->prot);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (isert_cmd->fr_desc) {
+ isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
+ if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+ isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
spin_lock_bh(&isert_conn->pool_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
+ list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
spin_unlock_bh(&isert_conn->pool_lock);
- wr->fr_desc = NULL;
+ isert_cmd->fr_desc = NULL;
}
- if (wr->data.sg) {
+ if (isert_cmd->data.sg) {
isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
}
- wr->ib_sge = NULL;
- wr->rdma_wr = NULL;
+ isert_cmd->ib_sge = NULL;
+ isert_cmd->rdma_wr = NULL;
}
static void
@@ -1882,52 +1875,70 @@ fail_mr_status:
}
static void
-isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
+ struct se_cmd *cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma write");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
+ ret = isert_check_pi_status(cmd,
+ isert_cmd->fr_desc->pi_ctx->sig_mr);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
device->unreg_rdma_mem(isert_cmd, isert_conn);
- wr->rdma_wr_num = 0;
+ isert_cmd->rdma_wr_num = 0;
if (ret)
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->pi_err, 0);
+ transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
else
- isert_put_response(isert_conn->conn, cmd);
+ isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
}
static void
-isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd)
+isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct isert_device *device = isert_conn->device;
+ struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->device;
int ret = 0;
- if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "rdma read");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(desc, isert_cmd, device->ib_device, true);
+ return;
+ }
+
+ isert_dbg("Cmd %p\n", isert_cmd);
+
+ if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
ret = isert_check_pi_status(se_cmd,
- wr->fr_desc->pi_ctx->sig_mr);
- wr->fr_desc->ind &= ~ISERT_PROTECTED;
+ isert_cmd->fr_desc->pi_ctx->sig_mr);
+ isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
}
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = wr->data.len;
- wr->rdma_wr_num = 0;
+ cmd->write_data_done = isert_cmd->data.len;
+ isert_cmd->rdma_wr_num = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1975,170 +1986,56 @@ isert_do_control_comp(struct work_struct *work)
}
static void
-isert_response_completion(struct iser_tx_desc *tx_desc,
- struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn,
- struct ib_device *ib_dev)
+isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-
- if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
- cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
- cmd->i_state == ISTATE_SEND_REJECT ||
- cmd->i_state == ISTATE_SEND_TEXTRSP) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ struct isert_conn *isert_conn = wc->qp->qp_context;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
- INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
- queue_work(isert_comp_wq, &isert_cmd->comp_work);
- return;
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "login send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
- cmd->i_state = ISTATE_SENT_STATUS;
- isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
+ isert_unmap_tx_desc(tx_desc, ib_dev);
}
static void
-isert_snd_completion(struct iser_tx_desc *tx_desc,
- struct isert_conn *isert_conn)
+isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
+ struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
- struct isert_rdma_wr *wr;
+ struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
+ struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
- if (!isert_cmd) {
- isert_unmap_tx_desc(tx_desc, ib_dev);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ isert_print_wc(wc, "send");
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
return;
}
- wr = &isert_cmd->rdma_wr;
- isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+ isert_dbg("Cmd %p\n", isert_cmd);
- switch (wr->iser_ib_op) {
- case ISER_IB_SEND:
- isert_response_completion(tx_desc, isert_cmd,
- isert_conn, ib_dev);
- break;
- case ISER_IB_RDMA_WRITE:
- isert_completion_rdma_write(tx_desc, isert_cmd);
- break;
- case ISER_IB_RDMA_READ:
- isert_completion_rdma_read(tx_desc, isert_cmd);
- break;
+ switch (isert_cmd->iscsi_cmd->i_state) {
+ case ISTATE_SEND_TASKMGTRSP:
+ case ISTATE_SEND_LOGOUTRSP:
+ case ISTATE_SEND_REJECT:
+ case ISTATE_SEND_TEXTRSP:
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+
+ INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+ queue_work(isert_comp_wq, &isert_cmd->comp_work);
+ return;
default:
- isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
- dump_stack();
+ isert_cmd->iscsi_cmd->i_state = ISTATE_SENT_STATUS;
+ isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
break;
}
}
-/**
- * is_isert_tx_desc() - Indicate if the completion wr_id
- * is a TX descriptor or not.
- * @isert_conn: iser connection
- * @wr_id: completion WR identifier
- *
- * Since we cannot rely on wc opcode in FLUSH errors
- * we must work around it by checking if the wr_id address
- * falls in the iser connection rx_descs buffer. If so
- * it is an RX descriptor, otherwize it is a TX.
- */
-static inline bool
-is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
-{
- void *start = isert_conn->rx_descs;
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
-
- if (wr_id >= start && wr_id < start + len)
- return false;
-
- return true;
-}
-
-static void
-isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
-{
- if (wc->wr_id == ISER_BEACON_WRID) {
- isert_info("conn %p completing wait_comp_err\n",
- isert_conn);
- complete(&isert_conn->wait_comp_err);
- } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
- struct ib_device *ib_dev = isert_conn->cm_id->device;
- struct isert_cmd *isert_cmd;
- struct iser_tx_desc *desc;
-
- desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_cmd = desc->isert_cmd;
- if (!isert_cmd)
- isert_unmap_tx_desc(desc, ib_dev);
- else
- isert_completion_put(desc, isert_cmd, ib_dev, true);
- } else {
- isert_conn->post_recv_buf_count--;
- if (!isert_conn->post_recv_buf_count)
- iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
- }
-}
-
-static void
-isert_handle_wc(struct ib_wc *wc)
-{
- struct isert_conn *isert_conn;
- struct iser_tx_desc *tx_desc;
- struct iser_rx_desc *rx_desc;
-
- isert_conn = wc->qp->qp_context;
- if (likely(wc->status == IB_WC_SUCCESS)) {
- if (wc->opcode == IB_WC_RECV) {
- rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
- } else {
- tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_snd_completion(tx_desc, isert_conn);
- }
- } else {
- if (wc->status != IB_WC_WR_FLUSH_ERR)
- isert_err("%s (%d): wr id %llx vend_err %x\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id, wc->vendor_err);
- else
- isert_dbg("%s (%d): wr id %llx\n",
- ib_wc_status_msg(wc->status), wc->status,
- wc->wr_id);
-
- if (wc->wr_id != ISER_FASTREG_LI_WRID)
- isert_cq_comp_err(isert_conn, wc);
- }
-}
-
-static void
-isert_cq_work(struct work_struct *work)
-{
- enum { isert_poll_budget = 65536 };
- struct isert_comp *comp = container_of(work, struct isert_comp,
- work);
- struct ib_wc *const wcs = comp->wcs;
- int i, n, completed = 0;
-
- while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
- for (i = 0; i < n; i++)
- isert_handle_wc(&wcs[i]);
-
- completed += n;
- if (completed >= isert_poll_budget)
- break;
- }
-
- ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_callback(struct ib_cq *cq, void *context)
-{
- struct isert_comp *comp = context;
-
- queue_work(isert_comp_wq, &comp->work);
-}
-
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
@@ -2395,7 +2292,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
page_off = offset % PAGE_SIZE;
rdma_wr->wr.sg_list = ib_sge;
- rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
+ rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+
/*
* Perform mapping of TCM scatterlist memory ib_sge dma_addr.
*/
@@ -2428,24 +2326,23 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
}
static int
-isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
{
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_data_buf *data = &wr->data;
+ struct isert_data_buf *data = &isert_cmd->data;
struct ib_rdma_wr *rdma_wr;
struct ib_sge *ib_sge;
u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
int ret = 0, i, ib_sge_cnt;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+ cmd->write_data_done : 0;
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
+ offset, isert_cmd->iser_ib_op,
+ &isert_cmd->data);
if (ret)
return ret;
@@ -2458,41 +2355,44 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
ret = -ENOMEM;
goto unmap_cmd;
}
- wr->ib_sge = ib_sge;
+ isert_cmd->ib_sge = ib_sge;
- wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
- wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
- GFP_KERNEL);
- if (!wr->rdma_wr) {
- isert_dbg("Unable to allocate wr->rdma_wr\n");
+ isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
+ isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
+ isert_cmd->rdma_wr_num, GFP_KERNEL);
+ if (!isert_cmd->rdma_wr) {
+ isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
ret = -ENOMEM;
goto unmap_cmd;
}
- wr->isert_cmd = isert_cmd;
rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
- for (i = 0; i < wr->rdma_wr_num; i++) {
- rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
+ for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
+ rdma_wr = &isert_cmd->rdma_wr[i];
data_len = min(data_left, rdma_write_max);
rdma_wr->wr.send_flags = 0;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
rdma_wr->remote_addr = isert_cmd->read_va + offset;
rdma_wr->rkey = isert_cmd->read_stag;
- if (i + 1 == wr->rdma_wr_num)
+ if (i + 1 == isert_cmd->rdma_wr_num)
rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
else
- rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+ rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
} else {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
rdma_wr->rkey = isert_cmd->write_stag;
- if (i + 1 == wr->rdma_wr_num)
+ if (i + 1 == isert_cmd->rdma_wr_num)
rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
else
- rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
+ rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
}
ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
@@ -2517,7 +2417,7 @@ isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
u32 rkey;
memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+ inv_wr->wr_cqe = NULL;
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->ex.invalidate_rkey = mr->rkey;
@@ -2573,7 +2473,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
reg_wr.wr.next = NULL;
reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
+ reg_wr.wr.wr_cqe = NULL;
reg_wr.wr.send_flags = 0;
reg_wr.wr.num_sge = 0;
reg_wr.mr = mr;
@@ -2660,10 +2560,10 @@ isert_set_prot_checks(u8 prot_checks)
static int
isert_reg_sig_mr(struct isert_conn *isert_conn,
- struct se_cmd *se_cmd,
- struct isert_rdma_wr *rdma_wr,
+ struct isert_cmd *isert_cmd,
struct fast_reg_descriptor *fr_desc)
{
+ struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
struct ib_sig_handover_wr sig_wr;
struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
struct pi_context *pi_ctx = fr_desc->pi_ctx;
@@ -2684,14 +2584,14 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
+ sig_wr.wr.wr_cqe = NULL;
+ sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
sig_wr.wr.num_sge = 1;
sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
sig_wr.sig_attrs = &sig_attrs;
sig_wr.sig_mr = pi_ctx->sig_mr;
if (se_cmd->t_prot_sg)
- sig_wr.prot = &rdma_wr->ib_sg[PROT];
+ sig_wr.prot = &isert_cmd->ib_sg[PROT];
if (!wr)
wr = &sig_wr.wr;
@@ -2705,35 +2605,34 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
}
fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
- rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
- rdma_wr->ib_sg[SIG].addr = 0;
- rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
+ isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+ isert_cmd->ib_sg[SIG].addr = 0;
+ isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
/*
* We have protection guards on the wire
* so we need to set a larget transfer
*/
- rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
+ isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
- rdma_wr->ib_sg[SIG].lkey);
+ isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
+ isert_cmd->ib_sg[SIG].lkey);
err:
return ret;
}
static int
isert_handle_prot_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct isert_rdma_wr *wr)
+ struct isert_cmd *isert_cmd)
{
struct isert_device *device = isert_conn->device;
struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret;
- if (!wr->fr_desc->pi_ctx) {
- ret = isert_create_pi_ctx(wr->fr_desc,
+ if (!isert_cmd->fr_desc->pi_ctx) {
+ ret = isert_create_pi_ctx(isert_cmd->fr_desc,
device->ib_device,
device->pd);
if (ret) {
@@ -2748,16 +2647,20 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
se_cmd->t_prot_sg,
se_cmd->t_prot_nents,
se_cmd->prot_length,
- 0, wr->iser_ib_op, &wr->prot);
+ 0,
+ isert_cmd->iser_ib_op,
+ &isert_cmd->prot);
if (ret) {
isert_err("conn %p failed to map protection buffer\n",
isert_conn);
return ret;
}
- memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
- ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
- ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+ memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
+ ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
+ &isert_cmd->prot,
+ ISERT_PROT_KEY_VALID,
+ &isert_cmd->ib_sg[PROT]);
if (ret) {
isert_err("conn %p failed to fast reg mr\n",
isert_conn);
@@ -2765,29 +2668,28 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
}
}
- ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+ ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
if (ret) {
isert_err("conn %p failed to fast reg mr\n",
isert_conn);
goto unmap_prot_cmd;
}
- wr->fr_desc->ind |= ISERT_PROTECTED;
+ isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
return 0;
unmap_prot_cmd:
if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &wr->prot);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
return ret;
}
static int
-isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr)
+isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
{
+ struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct fast_reg_descriptor *fr_desc = NULL;
struct ib_rdma_wr *rdma_wr;
@@ -2796,57 +2698,61 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
int ret = 0;
unsigned long flags;
- isert_cmd->tx_desc.isert_cmd = isert_cmd;
-
- offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+ offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
+ cmd->write_data_done : 0;
ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
se_cmd->t_data_nents, se_cmd->data_length,
- offset, wr->iser_ib_op, &wr->data);
+ offset, isert_cmd->iser_ib_op,
+ &isert_cmd->data);
if (ret)
return ret;
- if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
+ if (isert_cmd->data.dma_nents != 1 ||
+ isert_prot_cmd(isert_conn, se_cmd)) {
spin_lock_irqsave(&isert_conn->pool_lock, flags);
fr_desc = list_first_entry(&isert_conn->fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- wr->fr_desc = fr_desc;
+ isert_cmd->fr_desc = fr_desc;
}
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
- ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
+ ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
+ ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
if (ret)
goto unmap_cmd;
if (isert_prot_cmd(isert_conn, se_cmd)) {
- ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
+ ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
if (ret)
goto unmap_cmd;
- ib_sg = &wr->ib_sg[SIG];
+ ib_sg = &isert_cmd->ib_sg[SIG];
} else {
- ib_sg = &wr->ib_sg[DATA];
+ ib_sg = &isert_cmd->ib_sg[DATA];
}
- memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
- wr->ib_sge = &wr->s_ib_sge;
- wr->rdma_wr_num = 1;
- memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
- wr->rdma_wr = &wr->s_rdma_wr;
- wr->isert_cmd = isert_cmd;
+ memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
+ isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
+ isert_cmd->rdma_wr_num = 1;
+ memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
+ isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
- rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
- rdma_wr->wr.sg_list = &wr->s_ib_sge;
+ rdma_wr = &isert_cmd->s_rdma_wr;
+ rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
rdma_wr->wr.num_sge = 1;
- rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
- if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
+ if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
rdma_wr->remote_addr = isert_cmd->read_va;
rdma_wr->rkey = isert_cmd->read_stag;
rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
0 : IB_SEND_SIGNALED;
} else {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+
rdma_wr->wr.opcode = IB_WR_RDMA_READ;
rdma_wr->remote_addr = isert_cmd->write_va;
rdma_wr->rkey = isert_cmd->write_stag;
@@ -2861,7 +2767,7 @@ unmap_cmd:
list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
}
- isert_unmap_data_buf(isert_conn, &wr->data);
+ isert_unmap_data_buf(isert_conn, &isert_cmd->data);
return ret;
}
@@ -2871,7 +2777,6 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
@@ -2880,8 +2785,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
- wr->iser_ib_op = ISER_IB_RDMA_WRITE;
- rc = device->reg_rdma_mem(conn, cmd, wr);
+ isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
+ rc = device->reg_rdma_mem(isert_cmd, conn);
if (rc) {
isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
@@ -2898,8 +2803,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
- isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
- wr->rdma_wr_num += 1;
+ isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
+ isert_cmd->rdma_wr_num += 1;
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (rc) {
@@ -2908,7 +2813,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
}
}
- rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
@@ -2927,7 +2832,6 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
@@ -2935,14 +2839,14 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, se_cmd->data_length, cmd->write_data_done);
- wr->iser_ib_op = ISER_IB_RDMA_READ;
- rc = device->reg_rdma_mem(conn, cmd, wr);
+ isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
+ rc = device->reg_rdma_mem(isert_cmd, conn);
if (rc) {
isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
return rc;
}
- rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
@@ -3214,6 +3118,7 @@ accept_wait:
conn->context = isert_conn;
isert_conn->conn = conn;
+ isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
@@ -3274,8 +3179,6 @@ static void isert_release_work(struct work_struct *work)
isert_info("Starting release conn %p\n", isert_conn);
- wait_for_completion(&isert_conn->wait);
-
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->mutex);
@@ -3309,24 +3212,6 @@ isert_wait4cmds(struct iscsi_conn *conn)
}
}
-static void
-isert_wait4flush(struct isert_conn *isert_conn)
-{
- struct ib_recv_wr *bad_wr;
-
- isert_info("conn %p\n", isert_conn);
-
- init_completion(&isert_conn->wait_comp_err);
- isert_conn->beacon.wr_id = ISER_BEACON_WRID;
- /* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
- isert_err("conn %p failed to post beacon", isert_conn);
- return;
- }
-
- wait_for_completion(&isert_conn->wait_comp_err);
-}
-
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -3369,18 +3254,10 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
- /*
- * Only wait for wait_comp_err if the isert_conn made it
- * into full feature phase..
- */
- if (isert_conn->state == ISER_CONN_INIT) {
- mutex_unlock(&isert_conn->mutex);
- return;
- }
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
@@ -3392,7 +3269,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- isert_wait4flush(isert_conn);
+ ib_drain_qp(isert_conn->qp);
isert_put_conn(isert_conn);
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8d50453eef66..147900cbb578 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -36,9 +36,7 @@
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + \
sizeof(struct iscsi_hdr))
-#define ISER_RECV_DATA_SEG_LEN 8192
-#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
-#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
@@ -62,12 +60,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
-#define ISER_RX_PAD_SIZE (ISER_RECV_DATA_SEG_LEN + 4096 - \
- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
+#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
+ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
+ sizeof(struct ib_cqe)))
#define ISCSI_ISER_SG_TABLESIZE 256
-#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
-#define ISER_BEACON_WRID 0xfffffffffffffffeULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -84,6 +81,7 @@ enum iser_ib_op_code {
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
+ ISER_CONN_BOUND,
ISER_CONN_FULL_FEATURE,
ISER_CONN_TERMINATING,
ISER_CONN_DOWN,
@@ -92,23 +90,35 @@ enum iser_conn_state {
struct iser_rx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
- char data[ISER_RECV_DATA_SEG_LEN];
+ char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
u64 dma_addr;
struct ib_sge rx_sg;
+ struct ib_cqe rx_cqe;
char pad[ISER_RX_PAD_SIZE];
} __packed;
+static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_rx_desc, rx_cqe);
+}
+
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
enum isert_desc_type type;
u64 dma_addr;
struct ib_sge tx_sg[2];
+ struct ib_cqe tx_cqe;
int num_sge;
- struct isert_cmd *isert_cmd;
struct ib_send_wr send_wr;
} __packed;
+static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
+{
+ return container_of(cqe, struct iser_tx_desc, tx_cqe);
+}
+
+
enum isert_indicator {
ISERT_PROTECTED = 1 << 0,
ISERT_DATA_KEY_VALID = 1 << 1,
@@ -144,20 +154,6 @@ enum {
SIG = 2,
};
-struct isert_rdma_wr {
- struct isert_cmd *isert_cmd;
- enum iser_ib_op_code iser_ib_op;
- struct ib_sge *ib_sge;
- struct ib_sge s_ib_sge;
- int rdma_wr_num;
- struct ib_rdma_wr *rdma_wr;
- struct ib_rdma_wr s_rdma_wr;
- struct ib_sge ib_sg[3];
- struct isert_data_buf data;
- struct isert_data_buf prot;
- struct fast_reg_descriptor *fr_desc;
-};
-
struct isert_cmd {
uint32_t read_stag;
uint32_t write_stag;
@@ -170,22 +166,34 @@ struct isert_cmd {
struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc;
- struct isert_rdma_wr rdma_wr;
+ enum iser_ib_op_code iser_ib_op;
+ struct ib_sge *ib_sge;
+ struct ib_sge s_ib_sge;
+ int rdma_wr_num;
+ struct ib_rdma_wr *rdma_wr;
+ struct ib_rdma_wr s_rdma_wr;
+ struct ib_sge ib_sg[3];
+ struct isert_data_buf data;
+ struct isert_data_buf prot;
+ struct fast_reg_descriptor *fr_desc;
struct work_struct comp_work;
struct scatterlist sg;
};
+static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
+{
+ return container_of(desc, struct isert_cmd, tx_desc);
+}
+
struct isert_device;
struct isert_conn {
enum iser_conn_state state;
- int post_recv_buf_count;
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
u32 max_sge;
- char *login_buf;
- char *login_req_buf;
+ struct iser_rx_desc *login_req_buf;
char *login_rsp_buf;
u64 login_req_dma;
int login_req_len;
@@ -201,15 +209,12 @@ struct isert_conn {
struct ib_qp *qp;
struct isert_device *device;
struct mutex mutex;
- struct completion wait;
- struct completion wait_comp_err;
struct kref kref;
struct list_head fr_pool;
int fr_pool_size;
/* lock to protect fastreg pool */
spinlock_t pool_lock;
struct work_struct release_work;
- struct ib_recv_wr beacon;
bool logout_posted;
bool snd_w_inv;
};
@@ -221,17 +226,13 @@ struct isert_conn {
*
* @device: pointer to device handle
* @cq: completion queue
- * @wcs: work completion array
* @active_qps: Number of active QPs attached
* to completion context
- * @work: completion work handle
*/
struct isert_comp {
struct isert_device *device;
struct ib_cq *cq;
- struct ib_wc wcs[16];
int active_qps;
- struct work_struct work;
};
struct isert_device {
@@ -243,9 +244,8 @@ struct isert_device {
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
- int (*reg_rdma_mem)(struct iscsi_conn *conn,
- struct iscsi_cmd *cmd,
- struct isert_rdma_wr *wr);
+ int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
+ struct iscsi_conn *conn);
void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
struct isert_conn *isert_conn);
};
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 03022f6420d7..b6bf20496021 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
dev->max_pages_per_mr);
}
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct srp_rdma_ch *ch = cq->cq_context;
-
- complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
- .done = srp_drain_done,
-};
-
/**
* srp_destroy_qp() - destroy an RDMA queue pair
* @ch: SRP RDMA channel.
*
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it. This avoids that the receive
+ * completion handler can access the queue pair while it is
* being destroyed.
*/
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
- static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
- static struct ib_recv_wr wr = { 0 };
- struct ib_recv_wr *bad_wr;
- int ret;
-
- wr.wr_cqe = &srp_drain_cqe;
- /* Destroying a QP and reusing ch->done is only safe if not connected */
- WARN_ON_ONCE(ch->connected);
-
- ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
- WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
- if (ret)
- goto out;
-
- init_completion(&ch->done);
- ret = ib_post_recv(ch->qp, &wr, &bad_wr);
- WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
- if (ret == 0)
- wait_for_completion(&ch->done);
-
-out:
+ ib_drain_rq(ch->qp);
ib_destroy_qp(ch->qp);
}
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
if (!init_attr)
return -ENOMEM;
- /* queue_size + 1 for ib_drain_qp */
+ /* queue_size + 1 for ib_drain_rq() */
recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
ch->comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(recv_cq)) {
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0c37fee363b1..8b42401d4795 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -91,76 +91,32 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
-static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static void srpt_release_cmd(struct se_cmd *se_cmd);
+static void srpt_free_ch(struct kref *kref);
static int srpt_queue_status(struct se_cmd *cmd);
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
-/**
- * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
- */
-static inline
-enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
- case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
- default: return dir;
- }
-}
-
-/**
- * srpt_sdev_name() - Return the name associated with the HCA.
- *
- * Examples are ib0, ib1, ...
- */
-static inline const char *srpt_sdev_name(struct srpt_device *sdev)
-{
- return sdev->device->name;
-}
-
-static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
-{
- unsigned long flags;
- enum rdma_ch_state state;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- state = ch->state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return state;
-}
-
-static enum rdma_ch_state
-srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
-{
- unsigned long flags;
- enum rdma_ch_state prev;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev = ch->state;
- ch->state = new_state;
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev;
-}
-
-/**
- * srpt_test_and_set_ch_state() - Test and set the channel state.
- *
- * Returns true if and only if the channel state has been set to the new state.
+/*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
*/
-static bool
-srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
- enum rdma_ch_state new)
+static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
{
unsigned long flags;
enum rdma_ch_state prev;
+ bool changed = false;
spin_lock_irqsave(&ch->spinlock, flags);
prev = ch->state;
- if (prev == old)
+ if (new > prev) {
ch->state = new;
+ changed = true;
+ }
spin_unlock_irqrestore(&ch->spinlock, flags);
- return prev == old;
+
+ return changed;
}
/**
@@ -182,7 +138,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
- srpt_sdev_name(sdev));
+ sdev->device->name);
switch (event->event) {
case IB_EVENT_PORT_ERR:
@@ -220,25 +176,39 @@ static void srpt_srq_event(struct ib_event *event, void *ctx)
pr_info("SRQ event %d\n", event->event);
}
+static const char *get_ch_state_name(enum rdma_ch_state s)
+{
+ switch (s) {
+ case CH_CONNECTING:
+ return "connecting";
+ case CH_LIVE:
+ return "live";
+ case CH_DISCONNECTING:
+ return "disconnecting";
+ case CH_DRAINING:
+ return "draining";
+ case CH_DISCONNECTED:
+ return "disconnected";
+ }
+ return "???";
+}
+
/**
* srpt_qp_event() - QP event callback function.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+ event->event, ch->cm_id, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
ib_cm_notify(ch->cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
- if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
- CH_RELEASING))
- srpt_release_channel(ch);
- else
- pr_debug("%s: state %d - ignored LAST_WQE.\n",
- ch->sess_name, srpt_get_ch_state(ch));
+ pr_debug("%s-%d, state %s: received Last WQE event.\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
break;
default:
pr_err("received unrecognized IB QP event %d\n", event->event);
@@ -281,7 +251,7 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
struct ib_class_port_info *cif;
cif = (struct ib_class_port_info *)mad->data;
- memset(cif, 0, sizeof *cif);
+ memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
cif->resp_time_value = 20;
@@ -340,7 +310,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
return;
}
- memset(iocp, 0, sizeof *iocp);
+ memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
@@ -390,7 +360,7 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
svc_entries = (struct ib_dm_svc_entries *)mad->data;
- memset(svc_entries, 0, sizeof *svc_entries);
+ memset(svc_entries, 0, sizeof(*svc_entries));
svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
snprintf(svc_entries->service_entries[0].name,
sizeof(svc_entries->service_entries[0].name),
@@ -484,7 +454,7 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
rsp->ah = ah;
dm_mad = rsp->mad;
- memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
dm_mad->mad_hdr.status = 0;
@@ -532,7 +502,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_port_attr port_attr;
int ret;
- memset(&port_modify, 0, sizeof port_modify);
+ memset(&port_modify, 0, sizeof(port_modify));
port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
port_modify.clr_port_cap_mask = 0;
@@ -553,7 +523,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
if (!sport->mad_agent) {
- memset(&reg_req, 0, sizeof reg_req);
+ memset(&reg_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
@@ -841,6 +811,39 @@ out:
}
/**
+ * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ *
+ * A quote from the InfiniBand specification: C9-88: For an HCA responder
+ * using Reliable Connection service, for each zero-length RDMA READ or WRITE
+ * request, the R_Key shall not be validated, even if the request includes
+ * Immediate data.
+ */
+static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
+{
+ struct ib_send_wr wr, *bad_wr;
+
+ memset(&wr, 0, sizeof(wr));
+ wr.opcode = IB_WR_RDMA_WRITE;
+ wr.wr_cqe = &ch->zw_cqe;
+ wr.send_flags = IB_SEND_SIGNALED;
+ return ib_post_send(ch->qp, &wr, &bad_wr);
+}
+
+static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct srpt_rdma_ch *ch = cq->cq_context;
+
+ if (wc->status == IB_WC_SUCCESS) {
+ srpt_process_wait_list(ch);
+ } else {
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ }
+}
+
+/**
* srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
@@ -903,14 +906,14 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
db = (struct srp_direct_buf *)(srp_cmd->add_data
+ add_cdb_offset);
- memcpy(ioctx->rbufs, db, sizeof *db);
+ memcpy(ioctx->rbufs, db, sizeof(*db));
*data_len = be32_to_cpu(db->len);
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ add_cdb_offset);
- ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
if (ioctx->n_rbuf >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
@@ -929,7 +932,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
ioctx->rbufs = &ioctx->single_rbuf;
else {
ioctx->rbufs =
- kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+ kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
if (!ioctx->rbufs) {
ioctx->n_rbuf = 0;
ret = -ENOMEM;
@@ -938,7 +941,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
db = idb->desc_list;
- memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
*data_len = be32_to_cpu(idb->len);
}
out:
@@ -956,7 +959,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
struct ib_qp_attr *attr;
int ret;
- attr = kzalloc(sizeof *attr, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
@@ -1070,7 +1073,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
dir = ioctx->cmd.data_direction;
BUG_ON(dir == DMA_NONE);
ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
- opposite_dma_dir(dir));
+ target_reverse_dma_direction(&ioctx->cmd));
ioctx->mapped_sg_count = 0;
}
}
@@ -1107,7 +1110,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
- opposite_dma_dir(dir));
+ target_reverse_dma_direction(cmd));
if (unlikely(!count))
return -EAGAIN;
@@ -1313,10 +1316,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
/*
* If the command is in a state where the target core is waiting for
- * the ib_srpt driver, change the state to the next state. Changing
- * the state of the command from SRPT_STATE_NEED_DATA to
- * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
- * function a second time.
+ * the ib_srpt driver, change the state to the next state.
*/
spin_lock_irqsave(&ioctx->spinlock, flags);
@@ -1325,25 +1325,17 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
case SRPT_STATE_NEED_DATA:
ioctx->state = SRPT_STATE_DATA_IN;
break;
- case SRPT_STATE_DATA_IN:
case SRPT_STATE_CMD_RSP_SENT:
case SRPT_STATE_MGMT_RSP_SENT:
ioctx->state = SRPT_STATE_DONE;
break;
default:
+ WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
+ __func__, state);
break;
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);
- if (state == SRPT_STATE_DONE) {
- struct srpt_rdma_ch *ch = ioctx->ch;
-
- BUG_ON(ch->sess == NULL);
-
- target_put_sess_cmd(&ioctx->cmd);
- goto out;
- }
-
pr_debug("Aborting cmd with state %d and tag %lld\n", state,
ioctx->cmd.tag);
@@ -1351,19 +1343,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
case SRPT_STATE_NEW:
case SRPT_STATE_DATA_IN:
case SRPT_STATE_MGMT:
+ case SRPT_STATE_DONE:
/*
* Do nothing - defer abort processing until
* srpt_queue_response() is invoked.
*/
- WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
break;
case SRPT_STATE_NEED_DATA:
- /* DMA_TO_DEVICE (write) - RDMA read error. */
-
- /* XXX(hch): this is a horrible layering violation.. */
- spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
- ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
- spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
+ pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
+ transport_generic_request_failure(&ioctx->cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1371,18 +1360,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* not been received in time.
*/
srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
- target_put_sess_cmd(&ioctx->cmd);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
break;
case SRPT_STATE_MGMT_RSP_SENT:
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(&ioctx->cmd);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
break;
default:
WARN(1, "Unexpected command state (%d)", state);
break;
}
-out:
return state;
}
@@ -1422,9 +1409,14 @@ static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ /*
+ * Note: if an RDMA write error completion is received that
+ * means that a SEND also has been posted. Defer further
+ * processing of the associated command until the send error
+ * completion has been received.
+ */
pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
ioctx, wc->status);
- srpt_abort_cmd(ioctx);
}
}
@@ -1464,7 +1456,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
sense_data_len = ioctx->cmd.scsi_sense_length;
WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
- memset(srp_rsp, 0, sizeof *srp_rsp);
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
@@ -1514,7 +1506,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
srp_rsp = ioctx->ioctx.buf;
BUG_ON(!srp_rsp);
- memset(srp_rsp, 0, sizeof *srp_rsp);
+ memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
@@ -1528,80 +1520,6 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
return resp_len;
}
-#define NO_SUCH_LUN ((uint64_t)-1LL)
-
-/*
- * SCSI LUN addressing method. See also SAM-2 and the section about
- * eight byte LUNs.
- */
-enum scsi_lun_addr_method {
- SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
- SCSI_LUN_ADDR_METHOD_FLAT = 1,
- SCSI_LUN_ADDR_METHOD_LUN = 2,
- SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
-};
-
-/*
- * srpt_unpack_lun() - Convert from network LUN to linear LUN.
- *
- * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
- * order (big endian) to a linear LUN. Supports three LUN addressing methods:
- * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
- */
-static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
-{
- uint64_t res = NO_SUCH_LUN;
- int addressing_method;
-
- if (unlikely(len < 2)) {
- pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
- len);
- goto out;
- }
-
- switch (len) {
- case 8:
- if ((*((__be64 *)lun) &
- cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
- goto out_err;
- break;
- case 4:
- if (*((__be16 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 6:
- if (*((__be32 *)&lun[2]) != 0)
- goto out_err;
- break;
- case 2:
- break;
- default:
- goto out_err;
- }
-
- addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
- switch (addressing_method) {
- case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
- case SCSI_LUN_ADDR_METHOD_FLAT:
- case SCSI_LUN_ADDR_METHOD_LUN:
- res = *(lun + 1) | (((*lun) & 0x3f) << 8);
- break;
-
- case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
- default:
- pr_err("Unimplemented LUN addressing method %u\n",
- addressing_method);
- break;
- }
-
-out:
- return res;
-
-out_err:
- pr_err("Support for multi-level LUNs has not yet been implemented\n");
- goto out;
-}
-
static int srpt_check_stop_free(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
@@ -1613,16 +1531,14 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
/**
* srpt_handle_cmd() - Process SRP_CMD.
*/
-static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
{
struct se_cmd *cmd;
struct srp_cmd *srp_cmd;
- uint64_t unpacked_lun;
u64 data_len;
enum dma_data_direction dir;
- sense_reason_t ret;
int rc;
BUG_ON(!send_ioctx);
@@ -1650,65 +1566,23 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
- ret = TCM_INVALID_CDB_FIELD;
- goto send_sense;
+ goto release_ioctx;
}
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
- sizeof(srp_cmd->lun));
rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
- &send_ioctx->sense_data[0], unpacked_lun, data_len,
- TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ &send_ioctx->sense_data[0],
+ scsilun_to_int(&srp_cmd->lun), data_len,
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- goto send_sense;
+ pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
+ srp_cmd->tag);
+ goto release_ioctx;
}
- return 0;
-
-send_sense:
- transport_send_check_condition_and_sense(cmd, ret, 0);
- return -1;
-}
-
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *target;
- int ret, i;
+ return;
- ret = -EINVAL;
- ch = ioctx->ch;
- BUG_ON(!ch);
- BUG_ON(!ch->sport);
- sdev = ch->sport->sdev;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- for (i = 0; i < ch->rq_size; ++i) {
- target = ch->ioctx_ring[i];
- if (target->cmd.se_lun == ioctx->cmd.se_lun &&
- target->cmd.tag == tag &&
- srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
- ret = 0;
- /* now let the target core abort &target->cmd; */
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
- return ret;
+release_ioctx:
+ send_ioctx->state = SRPT_STATE_DONE;
+ srpt_release_cmd(cmd);
}
static int srp_tmr_to_tcm(int fn)
@@ -1744,8 +1618,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
- uint64_t unpacked_lun;
- uint32_t tag = 0;
int tcm_tmr;
int rc;
@@ -1761,26 +1633,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
- if (tcm_tmr < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
- goto fail;
- }
- unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
- sizeof(srp_tsk->lun));
-
- if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
- rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
- if (rc < 0) {
- send_ioctx->cmd.se_tmr_req->response =
- TMR_TASK_DOES_NOT_EXIST;
- goto fail;
- }
- tag = srp_tsk->task_tag;
- }
- rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
- srp_tsk, tcm_tmr, GFP_KERNEL, tag,
- TARGET_SCF_ACK_KREF);
+ rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
+ scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
+ GFP_KERNEL, srp_tsk->task_tag,
+ TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
goto fail;
@@ -1800,7 +1656,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *send_ioctx)
{
struct srp_cmd *srp_cmd;
- enum rdma_ch_state ch_state;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
@@ -1809,13 +1664,12 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- ch_state = srpt_get_ch_state(ch);
- if (unlikely(ch_state == CH_CONNECTING)) {
+ if (unlikely(ch->state == CH_CONNECTING)) {
list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
goto out;
}
- if (unlikely(ch_state != CH_LIVE))
+ if (unlikely(ch->state != CH_LIVE))
goto out;
srp_cmd = recv_ioctx->ioctx.buf;
@@ -1878,6 +1732,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
}
+/*
+ * This function must be called from the context in which RDMA completions are
+ * processed because it accesses the wait list without protection against
+ * access from other threads.
+ */
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ while (!list_empty(&ch->cmd_wait_list) &&
+ ch->state >= CH_LIVE &&
+ (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+ struct srpt_recv_ioctx,
+ wait_list);
+ list_del(&recv_ioctx->wait_list);
+ srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ }
+}
+
/**
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
@@ -1905,15 +1781,10 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
atomic_inc(&ch->sq_wr_avail);
- if (wc->status != IB_WC_SUCCESS) {
+ if (wc->status != IB_WC_SUCCESS)
pr_info("sending response for ioctx 0x%p failed"
" with status %d\n", ioctx, wc->status);
- atomic_dec(&ch->req_lim);
- srpt_abort_cmd(ioctx);
- goto out;
- }
-
if (state != SRPT_STATE_DONE) {
srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
@@ -1922,18 +1793,7 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
" wr_id = %u.\n", ioctx->ioctx.index);
}
-out:
- while (!list_empty(&ch->cmd_wait_list) &&
- srpt_get_ch_state(ch) == CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
-
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
- }
+ srpt_process_wait_list(ch);
}
/**
@@ -1950,7 +1810,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
WARN_ON(ch->rq_size < 1);
ret = -ENOMEM;
- qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+ qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
if (!qp_init)
goto out;
@@ -2017,168 +1877,102 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ * srpt_close_ch() - Close an RDMA channel.
*
- * Reset the QP and make sure all resources associated with the channel will
- * be deallocated at an appropriate time.
+ * Make sure all resources associated with the channel will be deallocated at
+ * an appropriate time.
*
- * Note: The caller must hold ch->sport->sdev->spinlock.
+ * Returns true if and only if the channel state has been modified into
+ * CH_DRAINING.
*/
-static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+static bool srpt_close_ch(struct srpt_rdma_ch *ch)
{
- enum rdma_ch_state prev_state;
- unsigned long flags;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- prev_state = ch->state;
- switch (prev_state) {
- case CH_CONNECTING:
- case CH_LIVE:
- ch->state = CH_DISCONNECTING;
- break;
- default:
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
+ int ret;
- switch (prev_state) {
- case CH_CONNECTING:
- ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
- NULL, 0);
- /* fall through */
- case CH_LIVE:
- if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
- pr_err("sending CM DREQ failed.\n");
- break;
- case CH_DISCONNECTING:
- break;
- case CH_DRAINING:
- case CH_RELEASING:
- break;
+ if (!srpt_set_ch_state(ch, CH_DRAINING)) {
+ pr_debug("%s-%d: already closed\n", ch->sess_name,
+ ch->qp->qp_num);
+ return false;
}
-}
-/**
- * srpt_close_ch() - Close an RDMA channel.
- */
-static void srpt_close_ch(struct srpt_rdma_ch *ch)
-{
- struct srpt_device *sdev;
+ kref_get(&ch->kref);
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
-}
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ pr_err("%s-%d: changing queue pair into error state failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- unsigned long flags;
-
- spin_lock_irqsave(&ch->spinlock, flags);
- if (ch->in_shutdown) {
- spin_unlock_irqrestore(&ch->spinlock, flags);
- return true;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+ ret = srpt_zerolength_write(ch);
+ if (ret < 0) {
+ pr_err("%s-%d: queuing zero-length write failed: %d\n",
+ ch->sess_name, ch->qp->qp_num, ret);
+ if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+ schedule_work(&ch->release_work);
+ else
+ WARN_ON_ONCE(true);
}
- ch->in_shutdown = true;
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&ch->spinlock, flags);
+ kref_put(&ch->kref, srpt_free_ch);
return true;
}
-/**
- * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
- * @cm_id: Pointer to the CM ID of the channel to be drained.
- *
- * Note: Must be called from inside srpt_cm_handler to avoid a race between
- * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
- * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
- * waits until all target sessions for the associated IB device have been
- * unregistered and target session registration involves a call to
- * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
- * this function has finished).
+/*
+ * Change the channel state into CH_DISCONNECTING. If a channel has not yet
+ * reached the connected state, close it. If a channel is in the connected
+ * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
+ * the responsibility of the caller to ensure that this function is not
+ * invoked concurrently with the code that accepts a connection. This means
+ * that this function must either be invoked from inside a CM callback
+ * function or that it must be invoked with the srpt_port.mutex held.
*/
-static void srpt_drain_channel(struct ib_cm_id *cm_id)
+static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
{
- struct srpt_device *sdev;
- struct srpt_rdma_ch *ch;
int ret;
- bool do_reset = false;
- WARN_ON_ONCE(irqs_disabled());
+ if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
+ return -ENOTCONN;
- sdev = cm_id->context;
- BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- do_reset = srpt_test_and_set_ch_state(ch,
- CH_CONNECTING, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_LIVE, CH_DRAINING) ||
- srpt_test_and_set_ch_state(ch,
- CH_DISCONNECTING, CH_DRAINING);
- break;
- }
- }
- spin_unlock_irq(&sdev->spinlock);
+ ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ if (ret < 0)
+ ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
- if (do_reset) {
- if (ch->sess)
- srpt_shutdown_session(ch->sess);
+ if (ret < 0 && srpt_close_ch(ch))
+ ret = 0;
- ret = srpt_ch_qp_err(ch);
- if (ret < 0)
- pr_err("Setting queue pair in error state"
- " failed: %d\n", ret);
- }
+ return ret;
}
-/**
- * srpt_find_channel() - Look up an RDMA channel.
- * @cm_id: Pointer to the CM ID of the channel to be looked up.
- *
- * Return NULL if no matching RDMA channel has been found.
- */
-static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
- struct ib_cm_id *cm_id)
+static void __srpt_close_all_ch(struct srpt_device *sdev)
{
struct srpt_rdma_ch *ch;
- bool found;
- WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
+ lockdep_assert_held(&sdev->mutex);
- found = false;
- spin_lock_irq(&sdev->spinlock);
list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->cm_id == cm_id) {
- found = true;
- break;
- }
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sdev->device->name);
+ srpt_close_ch(ch);
}
- spin_unlock_irq(&sdev->spinlock);
-
- return found ? ch : NULL;
}
/**
- * srpt_release_channel() - Release channel resources.
- *
- * Schedules the actual release because:
- * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
- * trigger a deadlock.
- * - It is not safe to call TCM transport_* functions from interrupt context.
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
*/
-static void srpt_release_channel(struct srpt_rdma_ch *ch)
+static int srpt_shutdown_session(struct se_session *se_sess)
{
- schedule_work(&ch->release_work);
+ return 1;
+}
+
+static void srpt_free_ch(struct kref *kref)
+{
+ struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+
+ kfree(ch);
}
static void srpt_release_channel_work(struct work_struct *w)
@@ -2188,8 +1982,8 @@ static void srpt_release_channel_work(struct work_struct *w)
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
- ch->release_done);
+ pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
+ ch->qp->qp_num, ch->release_done);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -2197,6 +1991,7 @@ static void srpt_release_channel_work(struct work_struct *w)
se_sess = ch->sess;
BUG_ON(!se_sess);
+ target_sess_cmd_list_set_waiting(se_sess);
target_wait_for_sess_cmds(se_sess);
transport_deregister_session_configfs(se_sess);
@@ -2211,16 +2006,15 @@ static void srpt_release_channel_work(struct work_struct *w)
ch->sport->sdev, ch->rq_size,
ch->rsp_size, DMA_TO_DEVICE);
- spin_lock_irq(&sdev->spinlock);
- list_del(&ch->list);
- spin_unlock_irq(&sdev->spinlock);
-
+ mutex_lock(&sdev->mutex);
+ list_del_init(&ch->list);
if (ch->release_done)
complete(ch->release_done);
+ mutex_unlock(&sdev->mutex);
wake_up(&sdev->ch_releaseQ);
- kfree(ch);
+ kref_put(&ch->kref, srpt_free_ch);
}
/**
@@ -2240,7 +2034,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
struct srp_login_rej *rej;
struct ib_cm_rep_param *rep_param;
struct srpt_rdma_ch *ch, *tmp_ch;
- struct se_node_acl *se_acl;
u32 it_iu_len;
int i, ret = 0;
unsigned char *p;
@@ -2266,9 +2059,9 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
- rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
- rej = kzalloc(sizeof *rej, GFP_KERNEL);
- rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+ rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+ rej = kzalloc(sizeof(*rej), GFP_KERNEL);
+ rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
if (!rsp || !rej || !rep_param) {
ret = -ENOMEM;
@@ -2297,7 +2090,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
@@ -2305,26 +2098,16 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
&& param->port == ch->sport->port
&& param->listen_id == ch->sport->sdev->cm_id
&& ch->cm_id) {
- enum rdma_ch_state ch_state;
-
- ch_state = srpt_get_ch_state(ch);
- if (ch_state != CH_CONNECTING
- && ch_state != CH_LIVE)
+ if (srpt_disconnect_ch(ch) < 0)
continue;
-
- /* found an existing channel */
- pr_debug("Found existing channel %s"
- " cm_id= %p state= %d\n",
- ch->sess_name, ch->cm_id, ch_state);
-
- __srpt_close_ch(ch);
-
+ pr_info("Relogin - closed existing channel %s\n",
+ ch->sess_name);
rsp->rsp_flags =
SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
}
}
- spin_unlock_irq(&sdev->spinlock);
+ mutex_unlock(&sdev->mutex);
} else
rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
@@ -2340,7 +2123,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto reject;
}
- ch = kzalloc(sizeof *ch, GFP_KERNEL);
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -2349,11 +2132,14 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
goto reject;
}
+ kref_init(&ch->kref);
+ ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
memcpy(ch->i_port_id, req->initiator_port_id, 16);
memcpy(ch->t_port_id, req->target_port_id, 16);
ch->sport = &sdev->port[param->port - 1];
ch->cm_id = cm_id;
+ cm_id->context = ch;
/*
* Avoid QUEUE_FULL conditions by limiting the number of buffers used
* for the SRP protocol to the command queue size.
@@ -2406,19 +2192,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
pr_debug("registering session %s\n", ch->sess_name);
p = &ch->sess_name[0];
- ch->sess = transport_init_session(TARGET_PROT_NORMAL);
- if (IS_ERR(ch->sess)) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_debug("Failed to create session\n");
- goto destroy_ib;
- }
-
try_again:
- se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
- if (!se_acl) {
+ ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
+ TARGET_PROT_NORMAL, p, ch, NULL);
+ if (IS_ERR(ch->sess)) {
pr_info("Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
+ " configured yet for initiator %s.\n", p);
/*
* XXX: Hack to retry of ch->i_port_id without leading '0x'
*/
@@ -2426,14 +2205,11 @@ try_again:
p += 2;
goto try_again;
}
- rej->reason = cpu_to_be32(
+ rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
- transport_free_session(ch->sess);
goto destroy_ib;
}
- ch->sess->se_node_acl = se_acl;
-
- transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
ch->sess_name, ch->cm_id);
@@ -2453,7 +2229,7 @@ try_again:
/* create cm reply */
rep_param->qp_num = ch->qp->qp_num;
rep_param->private_data = (void *)rsp;
- rep_param->private_data_len = sizeof *rsp;
+ rep_param->private_data_len = sizeof(*rsp);
rep_param->rnr_retry_count = 7;
rep_param->flow_control = 1;
rep_param->failover_accepted = 0;
@@ -2468,14 +2244,14 @@ try_again:
goto release_channel;
}
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
list_add_tail(&ch->list, &sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
+ mutex_unlock(&sdev->mutex);
goto out;
release_channel:
- srpt_set_ch_state(ch, CH_RELEASING);
+ srpt_disconnect_ch(ch);
transport_deregister_session_configfs(ch->sess);
transport_deregister_session(ch->sess);
ch->sess = NULL;
@@ -2497,7 +2273,7 @@ reject:
| SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
- (void *)rej, sizeof *rej);
+ (void *)rej, sizeof(*rej));
out:
kfree(rep_param);
@@ -2507,10 +2283,23 @@ out:
return ret;
}
-static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
+ enum ib_cm_rej_reason reason,
+ const u8 *private_data,
+ u8 private_data_len)
{
- pr_info("Received IB REJ for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
+ char *priv = NULL;
+ int i;
+
+ if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
+ GFP_KERNEL))) {
+ for (i = 0; i < private_data_len; i++)
+ sprintf(priv + 3 * i, " %02x", private_data[i]);
+ }
+ pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
+ ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
+ "; private data" : "", priv ? priv : " (?)");
+ kfree(priv);
}
/**
@@ -2519,88 +2308,24 @@ static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
*/
-static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
- struct srpt_rdma_ch *ch;
int ret;
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
- struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
-
+ if (srpt_set_ch_state(ch, CH_LIVE)) {
ret = srpt_ch_qp_rts(ch, ch->qp);
- list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
- wait_list) {
- list_del(&ioctx->wait_list);
- srpt_handle_new_iu(ch, ioctx, NULL);
- }
- if (ret)
+ if (ret == 0) {
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
+ } else {
srpt_close_ch(ch);
- }
-}
-
-static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
-{
- pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
-{
- pr_info("Received IB REP error for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_dreq_recv() - Process reception of a DREQ message.
- */
-static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
-{
- struct srpt_rdma_ch *ch;
- unsigned long flags;
- bool send_drep = false;
-
- ch = srpt_find_channel(cm_id->context, cm_id);
- BUG_ON(!ch);
-
- pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
-
- spin_lock_irqsave(&ch->spinlock, flags);
- switch (ch->state) {
- case CH_CONNECTING:
- case CH_LIVE:
- send_drep = true;
- ch->state = CH_DISCONNECTING;
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- WARN(true, "unexpected channel state %d\n", ch->state);
- break;
- }
- spin_unlock_irqrestore(&ch->spinlock, flags);
-
- if (send_drep) {
- if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
- pr_err("Sending IB DREP failed.\n");
- pr_info("Received DREQ and sent DREP for session %s.\n",
- ch->sess_name);
+ }
}
}
/**
- * srpt_cm_drep_recv() - Process reception of a DREP message.
- */
-static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
-{
- pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
- srpt_drain_channel(cm_id);
-}
-
-/**
* srpt_cm_handler() - IB connection manager callback function.
*
* A non-zero return value will cause the caller destroy the CM ID.
@@ -2612,6 +2337,7 @@ static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
*/
static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
+ struct srpt_rdma_ch *ch = cm_id->context;
int ret;
ret = 0;
@@ -2621,32 +2347,39 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
event->private_data);
break;
case IB_CM_REJ_RECEIVED:
- srpt_cm_rej_recv(cm_id);
+ srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
+ event->private_data,
+ IB_CM_REJ_PRIVATE_DATA_SIZE);
break;
case IB_CM_RTU_RECEIVED:
case IB_CM_USER_ESTABLISHED:
- srpt_cm_rtu_recv(cm_id);
+ srpt_cm_rtu_recv(ch);
break;
case IB_CM_DREQ_RECEIVED:
- srpt_cm_dreq_recv(cm_id);
+ srpt_disconnect_ch(ch);
break;
case IB_CM_DREP_RECEIVED:
- srpt_cm_drep_recv(cm_id);
+ pr_info("Received CM DREP message for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
break;
case IB_CM_TIMEWAIT_EXIT:
- srpt_cm_timewait_exit(cm_id);
+ pr_info("Received CM TimeWait exit for ch %s-%d.\n",
+ ch->sess_name, ch->qp->qp_num);
+ srpt_close_ch(ch);
break;
case IB_CM_REP_ERROR:
- srpt_cm_rep_error(cm_id);
+ pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+ ch->qp->qp_num);
break;
case IB_CM_DREQ_ERROR:
- pr_info("Received IB DREQ ERROR event.\n");
+ pr_info("Received CM DREQ ERROR event.\n");
break;
case IB_CM_MRA_RECEIVED:
- pr_info("Received IB MRA event\n");
+ pr_info("Received CM MRA event\n");
break;
default:
- pr_err("received unrecognized IB CM event %d\n", event->event);
+ pr_err("received unrecognized CM event %d\n", event->event);
break;
}
@@ -2755,41 +2488,14 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx =
+ container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
enum srpt_command_state new_state;
- enum rdma_ch_state ch_state;
- int ret;
-
- ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
WARN_ON(new_state == SRPT_STATE_DONE);
-
- ch = ioctx->ch;
- BUG_ON(!ch);
-
- ch_state = srpt_get_ch_state(ch);
- switch (ch_state) {
- case CH_CONNECTING:
- WARN(true, "unexpected channel state %d\n", ch_state);
- ret = -EINVAL;
- goto out;
- case CH_LIVE:
- break;
- case CH_DISCONNECTING:
- case CH_DRAINING:
- case CH_RELEASING:
- pr_debug("cmd with tag %lld: channel disconnecting\n",
- ioctx->cmd.tag);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
- ret = -EINVAL;
- goto out;
- }
- ret = srpt_xfer_data(ch, ioctx);
-
-out:
- return ret;
+ return srpt_xfer_data(ch, ioctx);
}
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@ -2920,36 +2626,25 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
-static int srpt_ch_list_empty(struct srpt_device *sdev)
-{
- int res;
-
- spin_lock_irq(&sdev->spinlock);
- res = list_empty(&sdev->rch_list);
- spin_unlock_irq(&sdev->spinlock);
-
- return res;
-}
-
/**
* srpt_release_sdev() - Free the channel resources associated with a target.
*/
static int srpt_release_sdev(struct srpt_device *sdev)
{
- struct srpt_rdma_ch *ch, *tmp_ch;
- int res;
+ int i, res;
WARN_ON_ONCE(irqs_disabled());
BUG_ON(!sdev);
- spin_lock_irq(&sdev->spinlock);
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
+ for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
+ sdev->port[i].enabled = false;
+ __srpt_close_all_ch(sdev);
+ mutex_unlock(&sdev->mutex);
res = wait_event_interruptible(sdev->ch_releaseQ,
- srpt_ch_list_empty(sdev));
+ list_empty_careful(&sdev->rch_list));
if (res)
pr_err("%s: interrupted.\n", __func__);
@@ -3003,14 +2698,14 @@ static void srpt_add_one(struct ib_device *device)
pr_debug("device = %p, device->dma_ops = %p\n", device,
device->dma_ops);
- sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
goto err;
sdev->device = device;
INIT_LIST_HEAD(&sdev->rch_list);
init_waitqueue_head(&sdev->ch_releaseQ);
- spin_lock_init(&sdev->spinlock);
+ mutex_init(&sdev->mutex);
sdev->pd = ib_alloc_pd(device);
if (IS_ERR(sdev->pd))
@@ -3082,7 +2777,7 @@ static void srpt_add_one(struct ib_device *device)
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
- srpt_sdev_name(sdev), i);
+ sdev->device->name, i);
goto err_ring;
}
snprintf(sport->port_guid, sizeof(sport->port_guid),
@@ -3231,24 +2926,26 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_rdma_ch *ch;
- struct srpt_device *sdev;
- unsigned long res;
-
- ch = se_sess->fabric_sess_ptr;
- WARN_ON(ch->sess != se_sess);
+ struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+ struct srpt_device *sdev = ch->sport->sdev;
+ bool wait;
- pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+ pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+ ch->state);
- sdev = ch->sport->sdev;
- spin_lock_irq(&sdev->spinlock);
+ mutex_lock(&sdev->mutex);
BUG_ON(ch->release_done);
ch->release_done = &release_done;
- __srpt_close_ch(ch);
- spin_unlock_irq(&sdev->spinlock);
+ wait = !list_empty(&ch->list);
+ srpt_disconnect_ch(ch);
+ mutex_unlock(&sdev->mutex);
- res = wait_for_completion_timeout(&release_done, 60 * HZ);
- WARN_ON(res == 0);
+ if (!wait)
+ return;
+
+ while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+ ch->sess_name, ch->qp->qp_num, ch->state);
}
/**
@@ -3456,6 +3153,8 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ struct srpt_device *sdev = sport->sdev;
+ struct srpt_rdma_ch *ch;
unsigned long tmp;
int ret;
@@ -3469,11 +3168,24 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
- if (tmp == 1)
- sport->enabled = true;
- else
- sport->enabled = false;
+ if (sport->enabled == tmp)
+ goto out;
+ sport->enabled = tmp;
+ if (sport->enabled)
+ goto out;
+
+ mutex_lock(&sdev->mutex);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->sport == sport) {
+ pr_debug("%s: ch %p %s-%d\n", __func__, ch,
+ ch->sess_name, ch->qp->qp_num);
+ srpt_disconnect_ch(ch);
+ srpt_close_ch(ch);
+ }
+ }
+ mutex_unlock(&sdev->mutex);
+out:
return count;
}
@@ -3565,7 +3277,6 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
static const struct target_core_fabric_ops srpt_template = {
.module = THIS_MODULE,
.name = "srpt",
- .node_acl_size = sizeof(struct srpt_node_acl),
.get_fabric_name = srpt_get_fabric_name,
.tpg_get_wwn = srpt_get_fabric_wwn,
.tpg_get_tag = srpt_get_tag,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 09037f2b0b51..af9b8b527340 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -218,20 +218,20 @@ struct srpt_send_ioctx {
/**
* enum rdma_ch_state - SRP channel state.
- * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
- * @CH_LIVE: QP is in RTS state.
- * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
- * or DREQ has been send and waiting for DREP
- * or .
- * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
- * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
+ * been received.
+ * @CH_DRAINING: DREP has been received or waiting for DREP timed out
+ * and last work request has been queued.
+ * @CH_DISCONNECTED: Last completion has been received.
*/
enum rdma_ch_state {
CH_CONNECTING,
CH_LIVE,
CH_DISCONNECTING,
CH_DRAINING,
- CH_RELEASING
+ CH_DISCONNECTED,
};
/**
@@ -267,6 +267,8 @@ struct srpt_rdma_ch {
struct ib_cm_id *cm_id;
struct ib_qp *qp;
struct ib_cq *cq;
+ struct ib_cqe zw_cqe;
+ struct kref kref;
int rq_size;
u32 rsp_size;
atomic_t sq_wr_avail;
@@ -286,7 +288,6 @@ struct srpt_rdma_ch {
u8 sess_name[36];
struct work_struct release_work;
struct completion *release_done;
- bool in_shutdown;
};
/**
@@ -343,7 +344,7 @@ struct srpt_port {
* @ioctx_ring: Per-HCA SRQ.
* @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
* @ch_releaseQ: Enables waiting for removal from rch_list.
- * @spinlock: Protects rch_list and tpg.
+ * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -357,18 +358,10 @@ struct srpt_device {
struct srpt_recv_ioctx **ioctx_ring;
struct list_head rch_list;
wait_queue_head_t ch_releaseQ;
- spinlock_t spinlock;
+ struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
};
-/**
- * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
- * @nacl: Target core node ACL information.
- */
-struct srpt_node_acl {
- struct se_node_acl nacl;
-};
-
#endif /* IB_SRPT_H */
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index a35532ec00e4..6261874c07c9 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -201,6 +201,8 @@ source "drivers/input/touchscreen/Kconfig"
source "drivers/input/misc/Kconfig"
+source "drivers/input/rmi4/Kconfig"
+
endif
menu "Hardware I/O ports"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 0c9302ca9954..595820bbabe9 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -26,3 +26,5 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
+
+obj-$(CONFIG_RMI4_CORE) += rmi4/
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index 64ca7113ff28..d84d20b9cec0 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -17,7 +17,7 @@
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
- if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
@@ -41,7 +41,7 @@ int input_event_from_user(const char __user *buffer,
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
- if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
+ if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
@@ -65,7 +65,7 @@ int input_event_to_user(char __user *buffer,
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 148f66fe3205..1563160a7af3 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -17,18 +17,6 @@
#ifdef CONFIG_COMPAT
-/* Note to the author of this code: did it ever occur to
- you why the ifdefs are needed? Think about it again. -AK */
-#if defined(CONFIG_X86_64) || defined(CONFIG_TILE)
-# define INPUT_COMPAT_TEST is_compat_task()
-#elif defined(CONFIG_S390)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
-#elif defined(CONFIG_MIPS)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT_ADDR)
-#else
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT)
-#endif
-
struct input_event_compat {
struct compat_timeval time;
__u16 type;
@@ -67,7 +55,7 @@ struct ff_effect_compat {
static inline size_t input_event_size(void)
{
- return (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) ?
+ return (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) ?
sizeof(struct input_event_compat) : sizeof(struct input_event);
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 880605959aa6..b87ffbd4547d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1015,7 +1015,7 @@ static int input_bits_to_string(char *buf, int buf_size,
{
int len = 0;
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
u32 dword = bits >> 32;
if (dword || !skip_empty)
len += snprintf(buf, buf_size, "%x ", dword);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index ddd8148d51d7..509608c95994 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -560,7 +560,7 @@ config KEYBOARD_SUNKBD
config KEYBOARD_SH_KEYSC
tristate "SuperH KEYSC keypad support"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
Say Y here if you want to use a keypad attached to the KEYSC block
on SuperH processors such as sh7722 and sh7343.
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 907e4e278fce..f6e643b589b6 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/acpi.h>
enum {
REG_READ = 0x00,
@@ -178,10 +179,26 @@ static int events_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_events_of_match[] = {
+ { .compatible = "google,goldfish-events-keypad", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_events_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id goldfish_events_acpi_match[] = {
+ { "GFSH0002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_events_acpi_match);
+#endif
+
static struct platform_driver events_driver = {
.probe = events_probe,
.driver = {
.name = "goldfish_events",
+ .of_match_table = goldfish_events_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_events_acpi_match),
},
};
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 9adf13a5864a..24a9f599082f 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -111,9 +111,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return -ENOMEM;
pdata->snvs = syscon_regmap_lookup_by_phandle(np, "regmap");
- if (!pdata->snvs) {
+ if (IS_ERR(pdata->snvs)) {
dev_err(&pdev->dev, "Can't get snvs syscon\n");
- return -ENODEV;
+ return PTR_ERR(pdata->snvs);
}
if (of_property_read_u32(np, "linux,keycode", &pdata->keycode)) {
@@ -180,7 +180,7 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return 0;
}
-static int imx_snvs_pwrkey_suspend(struct device *dev)
+static int __maybe_unused imx_snvs_pwrkey_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
@@ -191,7 +191,7 @@ static int imx_snvs_pwrkey_suspend(struct device *dev)
return 0;
}
-static int imx_snvs_pwrkey_resume(struct device *dev)
+static int __maybe_unused imx_snvs_pwrkey_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 623d451767e3..8083eaa0524a 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -288,8 +288,7 @@ static int spear_kbd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int spear_kbd_suspend(struct device *dev)
+static int __maybe_unused spear_kbd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_kbd *kbd = platform_get_drvdata(pdev);
@@ -342,7 +341,7 @@ static int spear_kbd_suspend(struct device *dev)
return 0;
}
-static int spear_kbd_resume(struct device *dev)
+static int __maybe_unused spear_kbd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spear_kbd *kbd = platform_get_drvdata(pdev);
@@ -368,7 +367,6 @@ static int spear_kbd_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(spear_kbd_pm_ops, spear_kbd_suspend, spear_kbd_resume);
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index cfd58e87da26..1c5914cae853 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
ar2->udev = udev;
+ /* Sanity check, first interface must have an endpoint */
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 0 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail1;
+ }
ar2->intf[0] = interface;
ar2->ep[0] = &alt->endpoint[0].desc;
+ /* Sanity check, the device must have two interfaces */
ar2->intf[1] = usb_ifnum_to_if(udev, 1);
+ if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
+ dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
+ __func__, udev->actconfig->desc.bNumInterfaces);
+ r = -ENODEV;
+ goto fail1;
+ }
+
r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
if (r)
goto fail1;
+
+ /* Sanity check, second interface must have an endpoint */
alt = ar2->intf[1]->cur_altsetting;
+ if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
+ dev_err(&interface->dev,
+ "%s(): interface 1 must have an endpoint\n", __func__);
+ r = -ENODEV;
+ goto fail2;
+ }
ar2->ep[1] = &alt->endpoint[0].desc;
r = ati_remote2_urb_init(ar2);
if (r)
- goto fail2;
+ goto fail3;
ar2->channel_mask = channel_mask;
ar2->mode_mask = mode_mask;
r = ati_remote2_setup(ar2, ar2->channel_mask);
if (r)
- goto fail2;
+ goto fail3;
usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
if (r)
- goto fail2;
+ goto fail3;
r = ati_remote2_input_init(ar2);
if (r)
- goto fail3;
+ goto fail4;
usb_set_intfdata(interface, ar2);
@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
return 0;
- fail3:
+ fail4:
sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
- fail2:
+ fail3:
ati_remote2_urb_cleanup(ar2);
+ fail2:
usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
fail1:
kfree(ar2);
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index ac1fa5f44580..9c0ea36913b4 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bMasterInterface0);
+ if (!pcu->ctrl_intf)
+ return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
pcu->ep_ctrl = &alt->endpoint[0].desc;
@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
pcu->data_intf = usb_ifnum_to_if(pcu->udev,
union_desc->bSlaveInterface0);
+ if (!pcu->data_intf)
+ return -EINVAL;
alt = pcu->data_intf->cur_altsetting;
if (alt->desc.bNumEndpoints != 2) {
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 63b539d3daba..84909a12ff36 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
int error = -ENOMEM;
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 1)
+ return -EINVAL;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -EIO;
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 8aee71986430..96c486de49e0 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -20,70 +20,78 @@
#include <linux/input.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/rotary_encoder.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
+#include <linux/property.h>
#define DRV_NAME "rotary-encoder"
struct rotary_encoder {
struct input_dev *input;
- const struct rotary_encoder_platform_data *pdata;
- unsigned int axis;
+ struct mutex access_mutex;
+
+ u32 steps;
+ u32 axis;
+ bool relative_axis;
+ bool rollover;
+
unsigned int pos;
- unsigned int irq_a;
- unsigned int irq_b;
+ struct gpio_descs *gpios;
+
+ unsigned int *irq;
bool armed;
- unsigned char dir; /* 0 - clockwise, 1 - CCW */
+ signed char dir; /* 1 - clockwise, -1 - CCW */
- char last_stable;
+ unsigned last_stable;
};
-static int rotary_encoder_get_state(const struct rotary_encoder_platform_data *pdata)
+static unsigned rotary_encoder_get_state(struct rotary_encoder *encoder)
{
- int a = !!gpio_get_value(pdata->gpio_a);
- int b = !!gpio_get_value(pdata->gpio_b);
+ int i;
+ unsigned ret = 0;
- a ^= pdata->inverted_a;
- b ^= pdata->inverted_b;
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
+ /* convert from gray encoding to normal */
+ if (ret & 1)
+ val = !val;
- return ((a << 1) | b);
+ ret = ret << 1 | val;
+ }
+
+ return ret & 3;
}
static void rotary_encoder_report_event(struct rotary_encoder *encoder)
{
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- if (pdata->relative_axis) {
+ if (encoder->relative_axis) {
input_report_rel(encoder->input,
- pdata->axis, encoder->dir ? -1 : 1);
+ encoder->axis, encoder->dir);
} else {
unsigned int pos = encoder->pos;
- if (encoder->dir) {
+ if (encoder->dir < 0) {
/* turning counter-clockwise */
- if (pdata->rollover)
- pos += pdata->steps;
+ if (encoder->rollover)
+ pos += encoder->steps;
if (pos)
pos--;
} else {
/* turning clockwise */
- if (pdata->rollover || pos < pdata->steps)
+ if (encoder->rollover || pos < encoder->steps)
pos++;
}
- if (pdata->rollover)
- pos %= pdata->steps;
+ if (encoder->rollover)
+ pos %= encoder->steps;
encoder->pos = pos;
- input_report_abs(encoder->input, pdata->axis, encoder->pos);
+ input_report_abs(encoder->input, encoder->axis, encoder->pos);
}
input_sync(encoder->input);
@@ -92,9 +100,11 @@ static void rotary_encoder_report_event(struct rotary_encoder *encoder)
static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
+
+ state = rotary_encoder_get_state(encoder);
switch (state) {
case 0x0:
@@ -105,334 +115,227 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
break;
case 0x1:
- case 0x2:
+ case 0x3:
if (encoder->armed)
- encoder->dir = state - 1;
+ encoder->dir = 2 - state;
break;
- case 0x3:
+ case 0x2:
encoder->armed = true;
break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned int state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
- switch (state) {
- case 0x00:
- case 0x03:
+ state = rotary_encoder_get_state(encoder);
+
+ if (state & 1) {
+ encoder->dir = ((encoder->last_stable - state + 1) % 4) - 1;
+ } else {
if (state != encoder->last_stable) {
rotary_encoder_report_event(encoder);
encoder->last_stable = state;
}
- break;
-
- case 0x01:
- case 0x02:
- encoder->dir = (encoder->last_stable + state) & 0x01;
- break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_quarter_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- unsigned char sum;
- int state;
-
- state = rotary_encoder_get_state(encoder->pdata);
-
- /*
- * We encode the previous and the current state using a byte.
- * The previous state in the MSB nibble, the current state in the LSB
- * nibble. Then use a table to decide the direction of the turn.
- */
- sum = (encoder->last_stable << 4) + state;
- switch (sum) {
- case 0x31:
- case 0x10:
- case 0x02:
- case 0x23:
- encoder->dir = 0; /* clockwise */
- break;
+ unsigned int state;
- case 0x13:
- case 0x01:
- case 0x20:
- case 0x32:
- encoder->dir = 1; /* counter-clockwise */
- break;
+ mutex_lock(&encoder->access_mutex);
- default:
- /*
- * Ignore all other values. This covers the case when the
- * state didn't change (a spurious interrupt) and the
- * cases where the state changed by two steps, making it
- * impossible to tell the direction.
- *
- * In either case, don't report any event and save the
- * state for later.
- */
+ state = rotary_encoder_get_state(encoder);
+
+ if ((encoder->last_stable + 1) % 4 == state)
+ encoder->dir = 1;
+ else if (encoder->last_stable == (state + 1) % 4)
+ encoder->dir = -1;
+ else
goto out;
- }
rotary_encoder_report_event(encoder);
out:
encoder->last_stable = state;
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
-static const struct of_device_id rotary_encoder_of_match[] = {
- { .compatible = "rotary-encoder", },
- { },
-};
-MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-
-static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(rotary_encoder_of_match, dev);
- struct device_node *np = dev->of_node;
- struct rotary_encoder_platform_data *pdata;
- enum of_gpio_flags flags;
- int error;
-
- if (!of_id || !np)
- return NULL;
-
- pdata = kzalloc(sizeof(struct rotary_encoder_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "rotary-encoder,steps", &pdata->steps);
- of_property_read_u32(np, "linux,axis", &pdata->axis);
+ struct device *dev = &pdev->dev;
+ struct rotary_encoder *encoder;
+ struct input_dev *input;
+ irq_handler_t handler;
+ u32 steps_per_period;
+ unsigned int i;
+ int err;
- pdata->gpio_a = of_get_gpio_flags(np, 0, &flags);
- pdata->inverted_a = flags & OF_GPIO_ACTIVE_LOW;
+ encoder = devm_kzalloc(dev, sizeof(struct rotary_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
- pdata->gpio_b = of_get_gpio_flags(np, 1, &flags);
- pdata->inverted_b = flags & OF_GPIO_ACTIVE_LOW;
+ mutex_init(&encoder->access_mutex);
- pdata->relative_axis =
- of_property_read_bool(np, "rotary-encoder,relative-axis");
- pdata->rollover = of_property_read_bool(np, "rotary-encoder,rollover");
+ device_property_read_u32(dev, "rotary-encoder,steps", &encoder->steps);
- error = of_property_read_u32(np, "rotary-encoder,steps-per-period",
- &pdata->steps_per_period);
- if (error) {
+ err = device_property_read_u32(dev, "rotary-encoder,steps-per-period",
+ &steps_per_period);
+ if (err) {
/*
- * The 'half-period' property has been deprecated, you must use
- * 'steps-per-period' and set an appropriate value, but we still
- * need to parse it to maintain compatibility.
+ * The 'half-period' property has been deprecated, you must
+ * use 'steps-per-period' and set an appropriate value, but
+ * we still need to parse it to maintain compatibility. If
+ * neither property is present we fall back to the one step
+ * per period behavior.
*/
- if (of_property_read_bool(np, "rotary-encoder,half-period")) {
- pdata->steps_per_period = 2;
- } else {
- /* Fallback to one step per period behavior */
- pdata->steps_per_period = 1;
- }
+ steps_per_period = device_property_read_bool(dev,
+ "rotary-encoder,half-period") ? 2 : 1;
}
- pdata->wakeup_source = of_property_read_bool(np, "wakeup-source");
+ encoder->rollover =
+ device_property_read_bool(dev, "rotary-encoder,rollover");
- return pdata;
-}
-#else
-static inline struct rotary_encoder_platform_data *
-rotary_encoder_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static int rotary_encoder_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
- struct rotary_encoder *encoder;
- struct input_dev *input;
- irq_handler_t handler;
- int err;
-
- if (!pdata) {
- pdata = rotary_encoder_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ device_property_read_u32(dev, "linux,axis", &encoder->axis);
+ encoder->relative_axis =
+ device_property_read_bool(dev, "rotary-encoder,relative-axis");
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
+ if (IS_ERR(encoder->gpios)) {
+ dev_err(dev, "unable to get gpios\n");
+ return PTR_ERR(encoder->gpios);
}
-
- encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
- input = input_allocate_device();
- if (!encoder || !input) {
- err = -ENOMEM;
- goto exit_free_mem;
+ if (encoder->gpios->ndescs < 2) {
+ dev_err(dev, "not enough gpios found\n");
+ return -EINVAL;
}
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
encoder->input = input;
- encoder->pdata = pdata;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
input->dev.parent = dev;
- if (pdata->relative_axis) {
- input->evbit[0] = BIT_MASK(EV_REL);
- input->relbit[0] = BIT_MASK(pdata->axis);
- } else {
- input->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(encoder->input,
- pdata->axis, 0, pdata->steps, 0, 1);
- }
-
- /* request the GPIOs */
- err = gpio_request_one(pdata->gpio_a, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_a);
- goto exit_free_mem;
- }
-
- err = gpio_request_one(pdata->gpio_b, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_b);
- goto exit_free_gpio_a;
- }
-
- encoder->irq_a = gpio_to_irq(pdata->gpio_a);
- encoder->irq_b = gpio_to_irq(pdata->gpio_b);
+ if (encoder->relative_axis)
+ input_set_capability(input, EV_REL, encoder->axis);
+ else
+ input_set_abs_params(input,
+ encoder->axis, 0, encoder->steps, 0, 1);
- switch (pdata->steps_per_period) {
+ switch (steps_per_period >> (encoder->gpios->ndescs - 2)) {
case 4:
handler = &rotary_encoder_quarter_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 2:
handler = &rotary_encoder_half_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 1:
handler = &rotary_encoder_irq;
break;
default:
dev_err(dev, "'%d' is not a valid steps-per-period value\n",
- pdata->steps_per_period);
- err = -EINVAL;
- goto exit_free_gpio_b;
+ steps_per_period);
+ return -EINVAL;
}
- err = request_irq(encoder->irq_a, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_a);
- goto exit_free_gpio_b;
- }
-
- err = request_irq(encoder->irq_b, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_b);
- goto exit_free_irq_a;
+ encoder->irq =
+ devm_kzalloc(dev,
+ sizeof(*encoder->irq) * encoder->gpios->ndescs,
+ GFP_KERNEL);
+ if (!encoder->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ encoder->irq[i] = gpiod_to_irq(encoder->gpios->desc[i]);
+
+ err = devm_request_threaded_irq(dev, encoder->irq[i],
+ NULL, handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ DRV_NAME, encoder);
+ if (err) {
+ dev_err(dev, "unable to request IRQ %d (gpio#%d)\n",
+ encoder->irq[i], i);
+ return err;
+ }
}
err = input_register_device(input);
if (err) {
dev_err(dev, "failed to register input device\n");
- goto exit_free_irq_b;
+ return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup_source);
+ device_init_wakeup(dev,
+ device_property_read_bool(dev, "wakeup-source"));
platform_set_drvdata(pdev, encoder);
return 0;
-
-exit_free_irq_b:
- free_irq(encoder->irq_b, encoder);
-exit_free_irq_a:
- free_irq(encoder->irq_a, encoder);
-exit_free_gpio_b:
- gpio_free(pdata->gpio_b);
-exit_free_gpio_a:
- gpio_free(pdata->gpio_a);
-exit_free_mem:
- input_free_device(input);
- kfree(encoder);
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return err;
}
-static int rotary_encoder_remove(struct platform_device *pdev)
-{
- struct rotary_encoder *encoder = platform_get_drvdata(pdev);
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- device_init_wakeup(&pdev->dev, false);
-
- free_irq(encoder->irq_a, encoder);
- free_irq(encoder->irq_b, encoder);
- gpio_free(pdata->gpio_a);
- gpio_free(pdata->gpio_b);
-
- input_unregister_device(encoder->input);
- kfree(encoder);
-
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotary_encoder_suspend(struct device *dev)
+static int __maybe_unused rotary_encoder_suspend(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- enable_irq_wake(encoder->irq_a);
- enable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ enable_irq_wake(encoder->irq[i]);
}
return 0;
}
-static int rotary_encoder_resume(struct device *dev)
+static int __maybe_unused rotary_encoder_resume(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- disable_irq_wake(encoder->irq_a);
- disable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ disable_irq_wake(encoder->irq[i]);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(rotary_encoder_pm_ops,
- rotary_encoder_suspend, rotary_encoder_resume);
+ rotary_encoder_suspend, rotary_encoder_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id rotary_encoder_of_match[] = {
+ { .compatible = "rotary-encoder", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
+#endif
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.pm = &rotary_encoder_pm_ops,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 4eb9e4d94f46..abe1a927b332 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -664,7 +664,7 @@ struct uinput_ff_upload_compat {
static int uinput_ff_upload_to_user(char __user *buffer,
const struct uinput_ff_upload *ff_up)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
ff_up_compat.request_id = ff_up->request_id;
@@ -695,7 +695,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
static int uinput_ff_upload_from_user(const char __user *buffer,
struct uinput_ff_upload *ff_up)
{
- if (INPUT_COMPAT_TEST) {
+ if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
if (copy_from_user(&ff_up_compat, buffer,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 17f97e5e11e7..096abb4ad5cd 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -48,6 +48,16 @@ config MOUSE_PS2_ALPS
If unsure, say Y.
+config MOUSE_PS2_BYD
+ bool "BYD PS/2 mouse protocol extension" if EXPERT
+ default y
+ depends on MOUSE_PS2
+ help
+ Say Y here if you have a BYD PS/2 touchpad connected to
+ your system.
+
+ If unsure, say Y.
+
config MOUSE_PS2_LOGIPS2PP
bool "Logitech PS/2++ mouse protocol extension" if EXPERT
default y
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index ee6a6e9563d4..6168b134937b 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -28,6 +28,7 @@ cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
psmouse-objs := psmouse-base.o synaptics.o focaltech.o
psmouse-$(CONFIG_MOUSE_PS2_ALPS) += alps.o
+psmouse-$(CONFIG_MOUSE_PS2_BYD) += byd.o
psmouse-$(CONFIG_MOUSE_PS2_ELANTECH) += elantech.o
psmouse-$(CONFIG_MOUSE_PS2_OLPC) += hgpk.o
psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP) += logips2pp.o
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
new file mode 100644
index 000000000000..fdc243ca93ed
--- /dev/null
+++ b/drivers/input/mouse/byd.c
@@ -0,0 +1,508 @@
+/*
+ * BYD TouchPad PS/2 mouse driver
+ *
+ * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/libps2.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+
+#include "psmouse.h"
+#include "byd.h"
+
+/* PS2 Bits */
+#define PS2_Y_OVERFLOW BIT_MASK(7)
+#define PS2_X_OVERFLOW BIT_MASK(6)
+#define PS2_Y_SIGN BIT_MASK(5)
+#define PS2_X_SIGN BIT_MASK(4)
+#define PS2_ALWAYS_1 BIT_MASK(3)
+#define PS2_MIDDLE BIT_MASK(2)
+#define PS2_RIGHT BIT_MASK(1)
+#define PS2_LEFT BIT_MASK(0)
+
+/*
+ * BYD pad constants
+ */
+
+/*
+ * True device resolution is unknown, however experiments show the
+ * resolution is about 111 units/mm.
+ * Absolute coordinate packets are in the range 0-255 for both X and Y
+ * we pick ABS_X/ABS_Y dimensions which are multiples of 256 and in
+ * the right ballpark given the touchpad's physical dimensions and estimate
+ * resolution per spec sheet, device active area dimensions are
+ * 101.6 x 60.1 mm.
+ */
+#define BYD_PAD_WIDTH 11264
+#define BYD_PAD_HEIGHT 6656
+#define BYD_PAD_RESOLUTION 111
+
+/*
+ * Given the above dimensions, relative packets velocity is in multiples of
+ * 1 unit / 11 milliseconds. We use this dt to estimate distance traveled
+ */
+#define BYD_DT 11
+/* Time in jiffies used to timeout various touch events (64 ms) */
+#define BYD_TOUCH_TIMEOUT msecs_to_jiffies(64)
+
+/* BYD commands reverse engineered from windows driver */
+
+/*
+ * Swipe gesture from off-pad to on-pad
+ * 0 : disable
+ * 1 : enable
+ */
+#define BYD_CMD_SET_OFFSCREEN_SWIPE 0x10cc
+/*
+ * Tap and drag delay time
+ * 0 : disable
+ * 1 - 8 : least to most delay
+ */
+#define BYD_CMD_SET_TAP_DRAG_DELAY_TIME 0x10cf
+/*
+ * Physical buttons function mapping
+ * 0 : enable
+ * 4 : normal
+ * 5 : left button custom command
+ * 6 : right button custom command
+ * 8 : disable
+ */
+#define BYD_CMD_SET_PHYSICAL_BUTTONS 0x10d0
+/*
+ * Absolute mode (1 byte X/Y resolution)
+ * 0 : disable
+ * 2 : enable
+ */
+#define BYD_CMD_SET_ABSOLUTE_MODE 0x10d1
+/*
+ * Two finger scrolling
+ * 1 : vertical
+ * 2 : horizontal
+ * 3 : vertical + horizontal
+ * 4 : disable
+ */
+#define BYD_CMD_SET_TWO_FINGER_SCROLL 0x10d2
+/*
+ * Handedness
+ * 1 : right handed
+ * 2 : left handed
+ */
+#define BYD_CMD_SET_HANDEDNESS 0x10d3
+/*
+ * Tap to click
+ * 1 : enable
+ * 2 : disable
+ */
+#define BYD_CMD_SET_TAP 0x10d4
+/*
+ * Tap and drag
+ * 1 : tap and hold to drag
+ * 2 : tap and hold to drag + lock
+ * 3 : disable
+ */
+#define BYD_CMD_SET_TAP_DRAG 0x10d5
+/*
+ * Touch sensitivity
+ * 1 - 7 : least to most sensitive
+ */
+#define BYD_CMD_SET_TOUCH_SENSITIVITY 0x10d6
+/*
+ * One finger scrolling
+ * 1 : vertical
+ * 2 : horizontal
+ * 3 : vertical + horizontal
+ * 4 : disable
+ */
+#define BYD_CMD_SET_ONE_FINGER_SCROLL 0x10d7
+/*
+ * One finger scrolling function
+ * 1 : free scrolling
+ * 2 : edge motion
+ * 3 : free scrolling + edge motion
+ * 4 : disable
+ */
+#define BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC 0x10d8
+/*
+ * Sliding speed
+ * 1 - 5 : slowest to fastest
+ */
+#define BYD_CMD_SET_SLIDING_SPEED 0x10da
+/*
+ * Edge motion
+ * 1 : disable
+ * 2 : enable when dragging
+ * 3 : enable when dragging and pointing
+ */
+#define BYD_CMD_SET_EDGE_MOTION 0x10db
+/*
+ * Left edge region size
+ * 0 - 7 : smallest to largest width
+ */
+#define BYD_CMD_SET_LEFT_EDGE_REGION 0x10dc
+/*
+ * Top edge region size
+ * 0 - 9 : smallest to largest height
+ */
+#define BYD_CMD_SET_TOP_EDGE_REGION 0x10dd
+/*
+ * Disregard palm press as clicks
+ * 1 - 6 : smallest to largest
+ */
+#define BYD_CMD_SET_PALM_CHECK 0x10de
+/*
+ * Right edge region size
+ * 0 - 7 : smallest to largest width
+ */
+#define BYD_CMD_SET_RIGHT_EDGE_REGION 0x10df
+/*
+ * Bottom edge region size
+ * 0 - 9 : smallest to largest height
+ */
+#define BYD_CMD_SET_BOTTOM_EDGE_REGION 0x10e1
+/*
+ * Multitouch gestures
+ * 1 : enable
+ * 2 : disable
+ */
+#define BYD_CMD_SET_MULTITOUCH 0x10e3
+/*
+ * Edge motion speed
+ * 0 : control with finger pressure
+ * 1 - 9 : slowest to fastest
+ */
+#define BYD_CMD_SET_EDGE_MOTION_SPEED 0x10e4
+/*
+ * Two finger scolling function
+ * 0 : free scrolling
+ * 1 : free scrolling (with momentum)
+ * 2 : edge motion
+ * 3 : free scrolling (with momentum) + edge motion
+ * 4 : disable
+ */
+#define BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC 0x10e5
+
+/*
+ * The touchpad generates a mixture of absolute and relative packets, indicated
+ * by the the last byte of each packet being set to one of the following:
+ */
+#define BYD_PACKET_ABSOLUTE 0xf8
+#define BYD_PACKET_RELATIVE 0x00
+/* Multitouch gesture packets */
+#define BYD_PACKET_PINCH_IN 0xd8
+#define BYD_PACKET_PINCH_OUT 0x28
+#define BYD_PACKET_ROTATE_CLOCKWISE 0x29
+#define BYD_PACKET_ROTATE_ANTICLOCKWISE 0xd7
+#define BYD_PACKET_TWO_FINGER_SCROLL_RIGHT 0x2a
+#define BYD_PACKET_TWO_FINGER_SCROLL_DOWN 0x2b
+#define BYD_PACKET_TWO_FINGER_SCROLL_UP 0xd5
+#define BYD_PACKET_TWO_FINGER_SCROLL_LEFT 0xd6
+#define BYD_PACKET_THREE_FINGER_SWIPE_RIGHT 0x2c
+#define BYD_PACKET_THREE_FINGER_SWIPE_DOWN 0x2d
+#define BYD_PACKET_THREE_FINGER_SWIPE_UP 0xd3
+#define BYD_PACKET_THREE_FINGER_SWIPE_LEFT 0xd4
+#define BYD_PACKET_FOUR_FINGER_DOWN 0x33
+#define BYD_PACKET_FOUR_FINGER_UP 0xcd
+#define BYD_PACKET_REGION_SCROLL_RIGHT 0x35
+#define BYD_PACKET_REGION_SCROLL_DOWN 0x36
+#define BYD_PACKET_REGION_SCROLL_UP 0xca
+#define BYD_PACKET_REGION_SCROLL_LEFT 0xcb
+#define BYD_PACKET_RIGHT_CORNER_CLICK 0xd2
+#define BYD_PACKET_LEFT_CORNER_CLICK 0x2e
+#define BYD_PACKET_LEFT_AND_RIGHT_CORNER_CLICK 0x2f
+#define BYD_PACKET_ONTO_PAD_SWIPE_RIGHT 0x37
+#define BYD_PACKET_ONTO_PAD_SWIPE_DOWN 0x30
+#define BYD_PACKET_ONTO_PAD_SWIPE_UP 0xd0
+#define BYD_PACKET_ONTO_PAD_SWIPE_LEFT 0xc9
+
+struct byd_data {
+ struct timer_list timer;
+ s32 abs_x;
+ s32 abs_y;
+ typeof(jiffies) last_touch_time;
+ bool btn_left;
+ bool btn_right;
+ bool touch;
+};
+
+static void byd_report_input(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+
+ input_report_key(dev, BTN_TOUCH, priv->touch);
+ input_report_key(dev, BTN_TOOL_FINGER, priv->touch);
+
+ input_report_abs(dev, ABS_X, priv->abs_x);
+ input_report_abs(dev, ABS_Y, priv->abs_y);
+ input_report_key(dev, BTN_LEFT, priv->btn_left);
+ input_report_key(dev, BTN_RIGHT, priv->btn_right);
+
+ input_sync(dev);
+}
+
+static void byd_clear_touch(unsigned long data)
+{
+ struct psmouse *psmouse = (struct psmouse *)data;
+ struct byd_data *priv = psmouse->private;
+
+ serio_pause_rx(psmouse->ps2dev.serio);
+ priv->touch = false;
+
+ byd_report_input(psmouse);
+
+ serio_continue_rx(psmouse->ps2dev.serio);
+
+ /*
+ * Move cursor back to center of pad when we lose touch - this
+ * specifically improves user experience when moving cursor with one
+ * finger, and pressing a button with another.
+ */
+ priv->abs_x = BYD_PAD_WIDTH / 2;
+ priv->abs_y = BYD_PAD_HEIGHT / 2;
+}
+
+static psmouse_ret_t byd_process_byte(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+ u8 *pkt = psmouse->packet;
+
+ if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) {
+ psmouse_warn(psmouse, "Always_1 bit not 1. pkt[0] = %02x\n",
+ pkt[0]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ if (psmouse->pktcnt < psmouse->pktsize)
+ return PSMOUSE_GOOD_DATA;
+
+ /* Otherwise, a full packet has been received */
+ switch (pkt[3]) {
+ case BYD_PACKET_ABSOLUTE:
+ /* Only use absolute packets for the start of movement. */
+ if (!priv->touch) {
+ /* needed to detect tap */
+ typeof(jiffies) tap_time =
+ priv->last_touch_time + BYD_TOUCH_TIMEOUT;
+ priv->touch = time_after(jiffies, tap_time);
+
+ /* init abs position */
+ priv->abs_x = pkt[1] * (BYD_PAD_WIDTH / 256);
+ priv->abs_y = (255 - pkt[2]) * (BYD_PAD_HEIGHT / 256);
+ }
+ break;
+ case BYD_PACKET_RELATIVE: {
+ /* Standard packet */
+ /* Sign-extend if a sign bit is set. */
+ u32 signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0;
+ u32 signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0;
+ s32 dx = signx | (int) pkt[1];
+ s32 dy = signy | (int) pkt[2];
+
+ /* Update position based on velocity */
+ priv->abs_x += dx * BYD_DT;
+ priv->abs_y -= dy * BYD_DT;
+
+ priv->touch = true;
+ break;
+ }
+ default:
+ psmouse_warn(psmouse,
+ "Unrecognized Z: pkt = %02x %02x %02x %02x\n",
+ psmouse->packet[0], psmouse->packet[1],
+ psmouse->packet[2], psmouse->packet[3]);
+ return PSMOUSE_BAD_DATA;
+ }
+
+ priv->btn_left = pkt[0] & PS2_LEFT;
+ priv->btn_right = pkt[0] & PS2_RIGHT;
+
+ byd_report_input(psmouse);
+
+ /* Reset time since last touch. */
+ if (priv->touch) {
+ priv->last_touch_time = jiffies;
+ mod_timer(&priv->timer, jiffies + BYD_TOUCH_TIMEOUT);
+ }
+
+ return PSMOUSE_FULL_PACKET;
+}
+
+static int byd_reset_touchpad(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ u8 param[4];
+ size_t i;
+
+ const struct {
+ u16 command;
+ u8 arg;
+ } seq[] = {
+ /*
+ * Intellimouse initialization sequence, to get 4-byte instead
+ * of 3-byte packets.
+ */
+ { PSMOUSE_CMD_SETRATE, 0xC8 },
+ { PSMOUSE_CMD_SETRATE, 0x64 },
+ { PSMOUSE_CMD_SETRATE, 0x50 },
+ { PSMOUSE_CMD_GETID, 0 },
+ { PSMOUSE_CMD_ENABLE, 0 },
+ /*
+ * BYD-specific initialization, which enables absolute mode and
+ * (if desired), the touchpad's built-in gesture detection.
+ */
+ { 0x10E2, 0x00 },
+ { 0x10E0, 0x02 },
+ /* The touchpad should reply with 4 seemingly-random bytes */
+ { 0x14E0, 0x01 },
+ /* Pairs of parameters and values. */
+ { BYD_CMD_SET_HANDEDNESS, 0x01 },
+ { BYD_CMD_SET_PHYSICAL_BUTTONS, 0x04 },
+ { BYD_CMD_SET_TAP, 0x02 },
+ { BYD_CMD_SET_ONE_FINGER_SCROLL, 0x04 },
+ { BYD_CMD_SET_ONE_FINGER_SCROLL_FUNC, 0x04 },
+ { BYD_CMD_SET_EDGE_MOTION, 0x01 },
+ { BYD_CMD_SET_PALM_CHECK, 0x00 },
+ { BYD_CMD_SET_MULTITOUCH, 0x02 },
+ { BYD_CMD_SET_TWO_FINGER_SCROLL, 0x04 },
+ { BYD_CMD_SET_TWO_FINGER_SCROLL_FUNC, 0x04 },
+ { BYD_CMD_SET_LEFT_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_TOP_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_RIGHT_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_BOTTOM_EDGE_REGION, 0x00 },
+ { BYD_CMD_SET_ABSOLUTE_MODE, 0x02 },
+ /* Finalize initialization. */
+ { 0x10E0, 0x00 },
+ { 0x10E2, 0x01 },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(seq); ++i) {
+ memset(param, 0, sizeof(param));
+ param[0] = seq[i].arg;
+ if (ps2_command(ps2dev, param, seq[i].command))
+ return -EIO;
+ }
+
+ psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
+ return 0;
+}
+
+static int byd_reconnect(struct psmouse *psmouse)
+{
+ int retry = 0, error = 0;
+
+ psmouse_dbg(psmouse, "Reconnect\n");
+ do {
+ psmouse_reset(psmouse);
+ if (retry)
+ ssleep(1);
+ error = byd_detect(psmouse, 0);
+ } while (error && ++retry < 3);
+
+ if (error)
+ return error;
+
+ psmouse_dbg(psmouse, "Reconnected after %d attempts\n", retry);
+
+ error = byd_reset_touchpad(psmouse);
+ if (error) {
+ psmouse_err(psmouse, "Unable to initialize device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static void byd_disconnect(struct psmouse *psmouse)
+{
+ struct byd_data *priv = psmouse->private;
+
+ if (priv) {
+ del_timer(&priv->timer);
+ kfree(psmouse->private);
+ psmouse->private = NULL;
+ }
+}
+
+int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ u8 param[4] = {0x03, 0x00, 0x00, 0x00};
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+ return -1;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ if (param[1] != 0x03 || param[2] != 0x64)
+ return -ENODEV;
+
+ psmouse_dbg(psmouse, "BYD touchpad detected\n");
+
+ if (set_properties) {
+ psmouse->vendor = "BYD";
+ psmouse->name = "TouchPad";
+ }
+
+ return 0;
+}
+
+int byd_init(struct psmouse *psmouse)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct byd_data *priv;
+
+ if (psmouse_reset(psmouse))
+ return -EIO;
+
+ if (byd_reset_touchpad(psmouse))
+ return -EIO;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ memset(priv, 0, sizeof(*priv));
+ setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
+
+ psmouse->private = priv;
+ psmouse->disconnect = byd_disconnect;
+ psmouse->reconnect = byd_reconnect;
+ psmouse->protocol_handler = byd_process_byte;
+ psmouse->pktsize = 4;
+ psmouse->resync_time = 0;
+
+ __set_bit(INPUT_PROP_POINTER, dev->propbit);
+ /* Touchpad */
+ __set_bit(BTN_TOUCH, dev->keybit);
+ __set_bit(BTN_TOOL_FINGER, dev->keybit);
+ /* Buttons */
+ __set_bit(BTN_LEFT, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+
+ /* Absolute position */
+ __set_bit(EV_ABS, dev->evbit);
+ input_set_abs_params(dev, ABS_X, 0, BYD_PAD_WIDTH, 0, 0);
+ input_set_abs_params(dev, ABS_Y, 0, BYD_PAD_HEIGHT, 0, 0);
+ input_abs_set_res(dev, ABS_X, BYD_PAD_RESOLUTION);
+ input_abs_set_res(dev, ABS_Y, BYD_PAD_RESOLUTION);
+ /* No relative support */
+ __clear_bit(EV_REL, dev->evbit);
+ __clear_bit(REL_X, dev->relbit);
+ __clear_bit(REL_Y, dev->relbit);
+
+ return 0;
+}
diff --git a/drivers/input/mouse/byd.h b/drivers/input/mouse/byd.h
new file mode 100644
index 000000000000..d6c120cf36cd
--- /dev/null
+++ b/drivers/input/mouse/byd.h
@@ -0,0 +1,18 @@
+#ifndef _BYD_H
+#define _BYD_H
+
+#ifdef CONFIG_MOUSE_PS2_BYD
+int byd_detect(struct psmouse *psmouse, bool set_properties);
+int byd_init(struct psmouse *psmouse);
+#else
+static inline int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+ return -ENOSYS;
+}
+static inline int byd_init(struct psmouse *psmouse)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MOUSE_PS2_BYD */
+
+#endif /* _BYD_H */
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index eb76b61418f3..dc2394292088 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -383,7 +383,7 @@ static int cyapa_open(struct input_dev *input)
* when in operational mode.
*/
error = cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error) {
dev_warn(dev, "set active power failed: %d\n", error);
goto out;
@@ -424,7 +424,8 @@ static void cyapa_close(struct input_dev *input)
pm_runtime_set_suspended(dev);
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, CYAPA_PM_DEACTIVE);
mutex_unlock(&cyapa->state_sync_lock);
}
@@ -534,7 +535,7 @@ static void cyapa_enable_irq_for_cmd(struct cyapa *cyapa)
*/
if (!input || cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
/* Gen3 always using polling mode for command. */
if (cyapa->gen >= CYAPA_GEN5)
enable_irq(cyapa->client->irq);
@@ -550,7 +551,7 @@ static void cyapa_disable_irq_for_cmd(struct cyapa *cyapa)
disable_irq(cyapa->client->irq);
if (!input || cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_OFF, 0, false);
+ PWR_MODE_OFF, 0, CYAPA_PM_ACTIVE);
}
}
@@ -617,7 +618,8 @@ static int cyapa_initialize(struct cyapa *cyapa)
/* Power down the device until we need it. */
if (cyapa->operational)
- cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
+ cyapa->ops->set_power_mode(cyapa,
+ PWR_MODE_OFF, 0, CYAPA_PM_ACTIVE);
return 0;
}
@@ -634,7 +636,7 @@ static int cyapa_reinitialize(struct cyapa *cyapa)
/* Avoid command failures when TP was in OFF state. */
if (cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
error = cyapa_detect(cyapa);
if (error)
@@ -654,7 +656,7 @@ out:
/* Reset to power OFF state to save power when no user open. */
if (cyapa->operational)
cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_OFF, 0, false);
+ PWR_MODE_OFF, 0, CYAPA_PM_DEACTIVE);
} else if (!error && cyapa->operational) {
/*
* Make sure only enable runtime PM when device is
@@ -1392,7 +1394,7 @@ static int __maybe_unused cyapa_suspend(struct device *dev)
power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
: PWR_MODE_OFF;
error = cyapa->ops->set_power_mode(cyapa, power_mode,
- cyapa->suspend_sleep_time, true);
+ cyapa->suspend_sleep_time, CYAPA_PM_SUSPEND);
if (error)
dev_err(dev, "suspend set power mode failed: %d\n",
error);
@@ -1447,7 +1449,7 @@ static int __maybe_unused cyapa_runtime_suspend(struct device *dev)
error = cyapa->ops->set_power_mode(cyapa,
cyapa->runtime_suspend_power_mode,
cyapa->runtime_suspend_sleep_time,
- false);
+ CYAPA_PM_RUNTIME_SUSPEND);
if (error)
dev_warn(dev, "runtime suspend failed: %d\n", error);
@@ -1460,7 +1462,7 @@ static int __maybe_unused cyapa_runtime_resume(struct device *dev)
int error;
error = cyapa->ops->set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_RUNTIME_RESUME);
if (error)
dev_warn(dev, "runtime resume failed: %d\n", error);
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index b812bba8cdd7..ce951fe4516a 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -250,6 +250,15 @@ struct cyapa;
typedef bool (*cb_sort)(struct cyapa *, u8 *, int);
+enum cyapa_pm_stage {
+ CYAPA_PM_DEACTIVE,
+ CYAPA_PM_ACTIVE,
+ CYAPA_PM_SUSPEND,
+ CYAPA_PM_RESUME,
+ CYAPA_PM_RUNTIME_SUSPEND,
+ CYAPA_PM_RUNTIME_RESUME,
+};
+
struct cyapa_dev_ops {
int (*check_fw)(struct cyapa *, const struct firmware *);
int (*bl_enter)(struct cyapa *);
@@ -273,7 +282,7 @@ struct cyapa_dev_ops {
int (*sort_empty_output_data)(struct cyapa *,
u8 *, int *, cb_sort);
- int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+ int (*set_power_mode)(struct cyapa *, u8, u16, enum cyapa_pm_stage);
int (*set_proximity)(struct cyapa *, bool);
};
@@ -289,6 +298,9 @@ struct cyapa_pip_cmd_states {
u8 *resp_data;
int *resp_len;
+ enum cyapa_pm_stage pm_stage;
+ struct mutex pm_stage_lock;
+
u8 irq_cmd_buf[CYAPA_REG_MAP_SIZE];
u8 empty_buf[CYAPA_REG_MAP_SIZE];
};
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 1a9d12ae7538..f9600753eca5 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -269,6 +269,7 @@ static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
{ CYAPA_SMBUS_MIN_BASELINE, 1 }, /* CYAPA_CMD_MIN_BASELINE */
};
+static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa);
/*
* cyapa_smbus_read_block - perform smbus block read command
@@ -950,12 +951,14 @@ static u16 cyapa_get_wait_time_for_pwr_cmd(u8 pwr_mode)
* Device power mode can only be set when device is in operational mode.
*/
static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
- u16 always_unused, bool is_suspend_unused)
+ u16 always_unused, enum cyapa_pm_stage pm_stage)
{
- int ret;
+ struct input_dev *input = cyapa->input;
u8 power;
int tries;
- u16 sleep_time;
+ int sleep_time;
+ int interval;
+ int ret;
if (cyapa->state != CYAPA_STATE_OP)
return 0;
@@ -977,7 +980,7 @@ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
if ((ret & PWR_MODE_MASK) == power_mode)
return 0;
- sleep_time = cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK);
+ sleep_time = (int)cyapa_get_wait_time_for_pwr_cmd(ret & PWR_MODE_MASK);
power = ret;
power &= ~PWR_MODE_MASK;
power |= power_mode & PWR_MODE_MASK;
@@ -995,7 +998,23 @@ static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
* doing so before issuing the next command may result in errors
* depending on the command's content.
*/
- msleep(sleep_time);
+ if (cyapa->operational && input && input->users &&
+ (pm_stage == CYAPA_PM_RUNTIME_SUSPEND ||
+ pm_stage == CYAPA_PM_RUNTIME_RESUME)) {
+ /* Try to polling in 120Hz, read may fail, just ignore it. */
+ interval = 1000 / 120;
+ while (sleep_time > 0) {
+ if (sleep_time > interval)
+ msleep(interval);
+ else
+ msleep(sleep_time);
+ sleep_time -= interval;
+ cyapa_gen3_try_poll_handler(cyapa);
+ }
+ } else {
+ msleep(sleep_time);
+ }
+
return ret;
}
@@ -1112,7 +1131,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa)
* may cause problems, so we set the power mode first here.
*/
error = cyapa_gen3_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_err(dev, "%s: set full power mode failed: %d\n",
__func__, error);
@@ -1168,32 +1187,16 @@ static bool cyapa_gen3_irq_cmd_handler(struct cyapa *cyapa)
return false;
}
-static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
+static int cyapa_gen3_event_process(struct cyapa *cyapa,
+ struct cyapa_reg_data *data)
{
struct input_dev *input = cyapa->input;
- struct device *dev = &cyapa->client->dev;
- struct cyapa_reg_data data;
int num_fingers;
- int ret;
int i;
- ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
- if (ret != sizeof(data)) {
- dev_err(dev, "failed to read report data, (%d)\n", ret);
- return -EINVAL;
- }
-
- if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
- (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
- (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
- dev_err(dev, "invalid device state bytes, %02x %02x\n",
- data.device_status, data.finger_btn);
- return -EINVAL;
- }
-
- num_fingers = (data.finger_btn >> 4) & 0x0f;
+ num_fingers = (data->finger_btn >> 4) & 0x0f;
for (i = 0; i < num_fingers; i++) {
- const struct cyapa_touch *touch = &data.touches[i];
+ const struct cyapa_touch *touch = &data->touches[i];
/* Note: touch->id range is 1 to 15; slots are 0 to 14. */
int slot = touch->id - 1;
@@ -1210,18 +1213,65 @@ static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
input_report_key(input, BTN_LEFT,
- !!(data.finger_btn & OP_DATA_LEFT_BTN));
+ !!(data->finger_btn & OP_DATA_LEFT_BTN));
if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
input_report_key(input, BTN_MIDDLE,
- !!(data.finger_btn & OP_DATA_MIDDLE_BTN));
+ !!(data->finger_btn & OP_DATA_MIDDLE_BTN));
if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
input_report_key(input, BTN_RIGHT,
- !!(data.finger_btn & OP_DATA_RIGHT_BTN));
+ !!(data->finger_btn & OP_DATA_RIGHT_BTN));
input_sync(input);
return 0;
}
+static int cyapa_gen3_irq_handler(struct cyapa *cyapa)
+{
+ struct device *dev = &cyapa->client->dev;
+ struct cyapa_reg_data data;
+ int ret;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data)) {
+ dev_err(dev, "failed to read report data, (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+ dev_err(dev, "invalid device state bytes: %02x %02x\n",
+ data.device_status, data.finger_btn);
+ return -EINVAL;
+ }
+
+ return cyapa_gen3_event_process(cyapa, &data);
+}
+
+/*
+ * This function will be called in the cyapa_gen3_set_power_mode function,
+ * and it's known that it may failed in some situation after the set power
+ * mode command was sent. So this function is aimed to avoid the knwon
+ * and unwanted output I2C and data parse error messages.
+ */
+static int cyapa_gen3_try_poll_handler(struct cyapa *cyapa)
+{
+ struct cyapa_reg_data data;
+ int ret;
+
+ ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+ if (ret != sizeof(data))
+ return -EINVAL;
+
+ if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+ (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+ (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID)
+ return -EINVAL;
+
+ return cyapa_gen3_event_process(cyapa, &data);
+
+}
+
static int cyapa_gen3_initialize(struct cyapa *cyapa) { return 0; }
static int cyapa_gen3_bl_initiate(struct cyapa *cyapa,
const struct firmware *fw) { return 0; }
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 118ba977181e..5775d40b3d53 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -342,6 +342,9 @@ u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
0xff, 0xfe, 0xfd, 0x5a };
+static int cyapa_pip_event_process(struct cyapa *cyapa,
+ struct cyapa_pip_report_data *report_data);
+
int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
{
struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
@@ -350,6 +353,9 @@ int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
atomic_set(&pip->cmd_issued, 0);
mutex_init(&pip->cmd_lock);
+ mutex_init(&pip->pm_stage_lock);
+ pip->pm_stage = CYAPA_PM_DEACTIVE;
+
pip->resp_sort_func = NULL;
pip->in_progress_cmd = PIP_INVALID_CMD;
pip->resp_data = NULL;
@@ -397,6 +403,38 @@ ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
return 0;
}
+static void cyapa_set_pip_pm_state(struct cyapa *cyapa,
+ enum cyapa_pm_stage pm_stage)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+
+ mutex_lock(&pip->pm_stage_lock);
+ pip->pm_stage = pm_stage;
+ mutex_unlock(&pip->pm_stage_lock);
+}
+
+static void cyapa_reset_pip_pm_state(struct cyapa *cyapa)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+
+ /* Indicates the pip->pm_stage is not valid. */
+ mutex_lock(&pip->pm_stage_lock);
+ pip->pm_stage = CYAPA_PM_DEACTIVE;
+ mutex_unlock(&pip->pm_stage_lock);
+}
+
+static enum cyapa_pm_stage cyapa_get_pip_pm_state(struct cyapa *cyapa)
+{
+ struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+ enum cyapa_pm_stage pm_stage;
+
+ mutex_lock(&pip->pm_stage_lock);
+ pm_stage = pip->pm_stage;
+ mutex_unlock(&pip->pm_stage_lock);
+
+ return pm_stage;
+}
+
/**
* This function is aimed to dump all not read data in Gen5 trackpad
* before send any command, otherwise, the interrupt line will be blocked.
@@ -404,7 +442,9 @@ ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
int cyapa_empty_pip_output_data(struct cyapa *cyapa,
u8 *buf, int *len, cb_sort func)
{
+ struct input_dev *input = cyapa->input;
struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
+ enum cyapa_pm_stage pm_stage = cyapa_get_pip_pm_state(cyapa);
int length;
int report_count;
int empty_count;
@@ -478,6 +518,12 @@ int cyapa_empty_pip_output_data(struct cyapa *cyapa,
*len = length;
/* Response found, success. */
return 0;
+ } else if (cyapa->operational && input && input->users &&
+ (pm_stage == CYAPA_PM_RUNTIME_RESUME ||
+ pm_stage == CYAPA_PM_RUNTIME_SUSPEND)) {
+ /* Parse the data and report it if it's valid. */
+ cyapa_pip_event_process(cyapa,
+ (struct cyapa_pip_report_data *)pip->empty_buf);
}
error = -EINVAL;
@@ -1566,15 +1612,17 @@ int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
}
static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
- u8 power_mode, u16 sleep_time, bool is_suspend)
+ u8 power_mode, u16 sleep_time, enum cyapa_pm_stage pm_stage)
{
struct device *dev = &cyapa->client->dev;
u8 power_state;
- int error;
+ int error = 0;
if (cyapa->state != CYAPA_STATE_GEN5_APP)
return 0;
+ cyapa_set_pip_pm_state(cyapa, pm_stage);
+
if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
/*
* Assume TP in deep sleep mode when driver is loaded,
@@ -1597,7 +1645,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
power_mode == PWR_MODE_BTN_ONLY ||
PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
/* Has in correct power mode state, early return. */
- return 0;
+ goto out;
}
}
@@ -1605,11 +1653,11 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
if (error) {
dev_err(dev, "enter deep sleep fail: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
- return 0;
+ goto out;
}
/*
@@ -1621,7 +1669,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
if (error) {
dev_err(dev, "deep sleep wake fail: %d\n", error);
- return error;
+ goto out;
}
}
@@ -1630,7 +1678,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
GEN5_POWER_STATE_ACTIVE);
if (error) {
dev_err(dev, "change to active fail: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
@@ -1639,7 +1687,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
GEN5_POWER_STATE_BTN_ONLY);
if (error) {
dev_err(dev, "fail to button only mode: %d\n", error);
- return error;
+ goto out;
}
PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
@@ -1664,7 +1712,7 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
if (error) {
dev_err(dev, "set power state to 0x%02x failed: %d\n",
power_state, error);
- return error;
+ goto out;
}
/*
@@ -1677,14 +1725,16 @@ static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
* is suspending which may cause interrupt line unable to be
* asserted again.
*/
- if (is_suspend)
+ if (pm_stage == CYAPA_PM_SUSPEND)
cyapa_gen5_disable_pip_report(cyapa);
PIP_DEV_SET_PWR_STATE(cyapa,
cyapa_sleep_time_to_pwr_cmd(sleep_time));
}
- return 0;
+out:
+ cyapa_reset_pip_pm_state(cyapa);
+ return error;
}
int cyapa_pip_resume_scanning(struct cyapa *cyapa)
@@ -2513,7 +2563,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
* the device state is required.
*/
error = cyapa_gen5_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_warn(dev, "%s: failed to set power active mode.\n",
__func__);
@@ -2715,7 +2765,6 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
struct device *dev = &cyapa->client->dev;
struct cyapa_pip_report_data report_data;
unsigned int report_len;
- u8 report_id;
int ret;
if (!cyapa_is_pip_app_mode(cyapa)) {
@@ -2752,7 +2801,23 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
return -EINVAL;
}
- report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+ return cyapa_pip_event_process(cyapa, &report_data);
+}
+
+static int cyapa_pip_event_process(struct cyapa *cyapa,
+ struct cyapa_pip_report_data *report_data)
+{
+ struct device *dev = &cyapa->client->dev;
+ unsigned int report_len;
+ u8 report_id;
+
+ report_len = get_unaligned_le16(
+ &report_data->report_head[PIP_RESP_LENGTH_OFFSET]);
+ /* Idle, no data for report. */
+ if (report_len == PIP_RESP_LENGTH_SIZE)
+ return 0;
+
+ report_id = report_data->report_head[PIP_RESP_REPORT_ID_OFFSET];
if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
report_len == PIP_WAKEUP_EVENT_SIZE) {
/*
@@ -2805,11 +2870,11 @@ int cyapa_pip_irq_handler(struct cyapa *cyapa)
}
if (report_id == PIP_TOUCH_REPORT_ID)
- cyapa_pip_report_touches(cyapa, &report_data);
+ cyapa_pip_report_touches(cyapa, report_data);
else if (report_id == PIP_PROXIMITY_REPORT_ID)
- cyapa_pip_report_proximity(cyapa, &report_data);
+ cyapa_pip_report_proximity(cyapa, report_data);
else
- cyapa_pip_report_buttons(cyapa, &report_data);
+ cyapa_pip_report_buttons(cyapa, report_data);
return 0;
}
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index e4eb048d1bf6..016397850b1b 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -425,7 +425,7 @@ static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
}
static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
- u8 power_mode, u16 sleep_time, bool is_suspend)
+ u8 power_mode, u16 sleep_time, enum cyapa_pm_stage pm_stage)
{
struct device *dev = &cyapa->client->dev;
struct gen6_interval_setting *interval_setting =
@@ -689,7 +689,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
* the device state is required.
*/
error = cyapa_gen6_set_power_mode(cyapa,
- PWR_MODE_FULL_ACTIVE, 0, false);
+ PWR_MODE_FULL_ACTIVE, 0, CYAPA_PM_ACTIVE);
if (error)
dev_warn(dev, "%s: failed to set power active mode.\n",
__func__);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index b9e4ee34c132..5784e20542a4 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -37,6 +37,7 @@
#include "cypress_ps2.h"
#include "focaltech.h"
#include "vmmouse.h"
+#include "byd.h"
#define DRIVER_DESC "PS/2 mouse driver"
@@ -842,6 +843,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.init = vmmouse_init,
},
#endif
+#ifdef CONFIG_MOUSE_PS2_BYD
+ {
+ .type = PSMOUSE_BYD,
+ .name = "BYDPS/2",
+ .alias = "byd",
+ .detect = byd_detect,
+ .init = byd_init,
+ },
+#endif
{
.type = PSMOUSE_AUTO,
.name = "auto",
@@ -1105,6 +1115,10 @@ static int psmouse_extensions(struct psmouse *psmouse,
if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
&max_proto, set_properties, true))
return PSMOUSE_TOUCHKIT_PS2;
+
+ if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
+ &max_proto, set_properties, true))
+ return PSMOUSE_BYD;
}
/*
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index ad5a5a1ea872..e0ca6cda3d16 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -104,6 +104,7 @@ enum psmouse_type {
PSMOUSE_CYPRESS,
PSMOUSE_FOCALTECH,
PSMOUSE_VMMOUSE,
+ PSMOUSE_BYD,
PSMOUSE_AUTO /* This one should always be last */
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb430c0a..a41d8328c064 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
return;
- /* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
- if (SYN_ID_FULL(priv->identity) == 0x801 &&
+ /* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
+ if ((SYN_ID_FULL(priv->identity) == 0x801 ||
+ SYN_ID_FULL(priv->identity) == 0x802) &&
!((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
return;
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
new file mode 100644
index 000000000000..f73df2495fed
--- /dev/null
+++ b/drivers/input/rmi4/Kconfig
@@ -0,0 +1,63 @@
+#
+# RMI4 configuration
+#
+config RMI4_CORE
+ tristate "Synaptics RMI4 bus support"
+ help
+ Say Y here if you want to support the Synaptics RMI4 bus. This is
+ required for all RMI4 device support.
+
+ If unsure, say Y.
+
+config RMI4_I2C
+ tristate "RMI4 I2C Support"
+ depends on RMI4_CORE && I2C
+ help
+ Say Y here if you want to support RMI4 devices connected to an I2C
+ bus.
+
+ If unsure, say Y.
+
+config RMI4_SPI
+ tristate "RMI4 SPI Support"
+ depends on RMI4_CORE && SPI
+ help
+ Say Y here if you want to support RMI4 devices connected to a SPI
+ bus.
+
+ If unsure, say N.
+
+config RMI4_2D_SENSOR
+ bool
+ depends on RMI4_CORE
+
+config RMI4_F11
+ bool "RMI4 Function 11 (2D pointing)"
+ select RMI4_2D_SENSOR
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 11.
+
+ Function 11 provides 2D multifinger pointing for touchscreens and
+ touchpads. For sensors that support relative pointing, F11 also
+ provides mouse input.
+
+config RMI4_F12
+ bool "RMI4 Function 12 (2D pointing)"
+ select RMI4_2D_SENSOR
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 12.
+
+ Function 12 provides 2D multifinger pointing for touchscreens and
+ touchpads. For sensors that support relative pointing, F12 also
+ provides mouse input.
+
+config RMI4_F30
+ bool "RMI4 Function 30 (GPIO LED)"
+ depends on RMI4_CORE
+ help
+ Say Y here if you want to add support for RMI4 function 30.
+
+ Function 30 provides GPIO and LED support for RMI4 devices. This
+ includes support for buttons on TouchPads and ClickPads.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
new file mode 100644
index 000000000000..95c00a783992
--- /dev/null
+++ b/drivers/input/rmi4/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_RMI4_CORE) += rmi_core.o
+rmi_core-y := rmi_bus.o rmi_driver.o rmi_f01.o
+
+rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
+
+# Function drivers
+rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
+rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
+rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+
+# Transports
+obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
+obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
new file mode 100644
index 000000000000..e97bd7fabccc
--- /dev/null
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/rmi.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+#define RMI_2D_REL_POS_MIN -128
+#define RMI_2D_REL_POS_MAX 127
+
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
+void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+
+ /* we keep the previous values if the finger is released */
+ if (obj->type == RMI_2D_OBJECT_NONE)
+ return;
+
+ if (axis_align->swap_axes)
+ swap(obj->x, obj->y);
+
+ if (axis_align->flip_x)
+ obj->x = sensor->max_x - obj->x;
+
+ if (axis_align->flip_y)
+ obj->y = sensor->max_y - obj->y;
+
+ /*
+ * Here checking if X offset or y offset are specified is
+ * redundant. We just add the offsets or clip the values.
+ *
+ * Note: offsets need to be applied before clipping occurs,
+ * or we could get funny values that are outside of
+ * clipping boundaries.
+ */
+ obj->x += axis_align->offset_x;
+ obj->y += axis_align->offset_y;
+
+ obj->x = max(axis_align->clip_x_low, obj->x);
+ obj->y = max(axis_align->clip_y_low, obj->y);
+
+ if (axis_align->clip_x_high)
+ obj->x = min(sensor->max_x, obj->x);
+
+ if (axis_align->clip_y_high)
+ obj->y = min(sensor->max_y, obj->y);
+
+ sensor->tracking_pos[slot].x = obj->x;
+ sensor->tracking_pos[slot].y = obj->y;
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_abs_process);
+
+void rmi_2d_sensor_abs_report(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+ struct input_dev *input = sensor->input;
+ int wide, major, minor;
+
+ if (sensor->kernel_tracking)
+ input_mt_slot(input, sensor->tracking_slots[slot]);
+ else
+ input_mt_slot(input, slot);
+
+ input_mt_report_slot_state(input, obj->mt_tool,
+ obj->type != RMI_2D_OBJECT_NONE);
+
+ if (obj->type != RMI_2D_OBJECT_NONE) {
+ obj->x = sensor->tracking_pos[slot].x;
+ obj->y = sensor->tracking_pos[slot].y;
+
+ if (axis_align->swap_axes)
+ swap(obj->wx, obj->wy);
+
+ wide = (obj->wx > obj->wy);
+ major = max(obj->wx, obj->wy);
+ minor = min(obj->wx, obj->wy);
+
+ if (obj->type == RMI_2D_OBJECT_STYLUS) {
+ major = max(1, major);
+ minor = max(1, minor);
+ }
+
+ input_event(sensor->input, EV_ABS, ABS_MT_POSITION_X, obj->x);
+ input_event(sensor->input, EV_ABS, ABS_MT_POSITION_Y, obj->y);
+ input_event(sensor->input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(sensor->input, EV_ABS, ABS_MT_PRESSURE, obj->z);
+ input_event(sensor->input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
+ input_event(sensor->input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
+
+ rmi_dbg(RMI_DEBUG_2D_SENSOR, &sensor->input->dev,
+ "%s: obj[%d]: type: 0x%02x X: %d Y: %d Z: %d WX: %d WY: %d\n",
+ __func__, slot, obj->type, obj->x, obj->y, obj->z,
+ obj->wx, obj->wy);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_abs_report);
+
+void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
+{
+ struct rmi_2d_axis_alignment *axis_align = &sensor->axis_align;
+
+ x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
+ y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
+
+ if (axis_align->swap_axes)
+ swap(x, y);
+
+ if (axis_align->flip_x)
+ x = min(RMI_2D_REL_POS_MAX, -x);
+
+ if (axis_align->flip_y)
+ y = min(RMI_2D_REL_POS_MAX, -y);
+
+ if (x || y) {
+ input_report_rel(sensor->input, REL_X, x);
+ input_report_rel(sensor->input, REL_Y, y);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_rel_report);
+
+static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
+{
+ struct input_dev *input = sensor->input;
+ int res_x;
+ int res_y;
+ int input_flags = 0;
+
+ if (sensor->report_abs) {
+ if (sensor->axis_align.swap_axes)
+ swap(sensor->max_x, sensor->max_y);
+
+ sensor->min_x = sensor->axis_align.clip_x_low;
+ if (sensor->axis_align.clip_x_high)
+ sensor->max_x = min(sensor->max_x,
+ sensor->axis_align.clip_x_high);
+
+ sensor->min_y = sensor->axis_align.clip_y_low;
+ if (sensor->axis_align.clip_y_high)
+ sensor->max_y = min(sensor->max_y,
+ sensor->axis_align.clip_y_high);
+
+ set_bit(EV_ABS, input->evbit);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
+ 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
+ 0, 0);
+
+ if (sensor->x_mm && sensor->y_mm) {
+ res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
+ res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+
+ input_abs_set_res(input, ABS_X, res_x);
+ input_abs_set_res(input, ABS_Y, res_y);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X, res_x);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, res_y);
+
+ if (!sensor->dmax)
+ sensor->dmax = DMAX * res_x;
+ }
+
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ if (sensor->sensor_type == rmi_sensor_touchpad)
+ input_flags = INPUT_MT_POINTER;
+ else
+ input_flags = INPUT_MT_DIRECT;
+
+ if (sensor->kernel_tracking)
+ input_flags |= INPUT_MT_TRACK;
+
+ input_mt_init_slots(input, sensor->nbr_fingers, input_flags);
+ }
+
+ if (sensor->report_rel) {
+ set_bit(EV_REL, input->evbit);
+ set_bit(REL_X, input->relbit);
+ set_bit(REL_Y, input->relbit);
+ }
+
+ if (sensor->topbuttonpad)
+ set_bit(INPUT_PROP_TOPBUTTONPAD, input->propbit);
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_set_input_params);
+
+int rmi_2d_sensor_configure_input(struct rmi_function *fn,
+ struct rmi_2d_sensor *sensor)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+
+ if (!drv_data->input)
+ return -ENODEV;
+
+ sensor->input = drv_data->input;
+ rmi_2d_sensor_set_input_params(sensor);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_configure_input);
+
+#ifdef CONFIG_OF
+int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata)
+{
+ int retval;
+ u32 val;
+
+ pdata->axis_align.swap_axes = of_property_read_bool(dev->of_node,
+ "touchscreen-swapped-x-y");
+
+ pdata->axis_align.flip_x = of_property_read_bool(dev->of_node,
+ "touchscreen-inverted-x");
+
+ pdata->axis_align.flip_y = of_property_read_bool(dev->of_node,
+ "touchscreen-inverted-y");
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-x-low", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_x_low = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-y-low", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_y_low = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-x-high", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_x_high = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,clip-y-high", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.clip_y_high = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,offset-x", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.offset_x = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,offset-y", 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.offset_y = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,delta-x-threshold",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.delta_x_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,delta-y-threshold",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->axis_align.delta_y_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, (u32 *)&pdata->sensor_type,
+ "syna,sensor-type", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev, &val, "touchscreen-x-mm", 1);
+ if (retval)
+ return retval;
+
+ pdata->x_mm = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "touchscreen-y-mm", 1);
+ if (retval)
+ return retval;
+
+ pdata->y_mm = val;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,disable-report-mask", 1);
+ if (retval)
+ return retval;
+
+ pdata->disable_report_mask = val;
+
+ retval = rmi_of_property_read_u32(dev, &val, "syna,rezero-wait-ms",
+ 1);
+ if (retval)
+ return retval;
+
+ pdata->rezero_wait = val;
+
+ return 0;
+}
+#else
+inline int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+EXPORT_SYMBOL_GPL(rmi_2d_sensor_of_probe);
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
new file mode 100644
index 000000000000..77fcdfef003c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_2D_SENSOR_H
+#define _RMI_2D_SENSOR_H
+
+enum rmi_2d_sensor_object_type {
+ RMI_2D_OBJECT_NONE,
+ RMI_2D_OBJECT_FINGER,
+ RMI_2D_OBJECT_STYLUS,
+ RMI_2D_OBJECT_PALM,
+ RMI_2D_OBJECT_UNCLASSIFIED,
+};
+
+struct rmi_2d_sensor_abs_object {
+ enum rmi_2d_sensor_object_type type;
+ int mt_tool;
+ u16 x;
+ u16 y;
+ u8 z;
+ u8 wx;
+ u8 wy;
+};
+
+/**
+ * @axis_align - controls parameters that are useful in system prototyping
+ * and bring up.
+ * @max_x - The maximum X coordinate that will be reported by this sensor.
+ * @max_y - The maximum Y coordinate that will be reported by this sensor.
+ * @nbr_fingers - How many fingers can this sensor report?
+ * @data_pkt - buffer for data reported by this sensor.
+ * @pkt_size - number of bytes in that buffer.
+ * @attn_size - Size of the HID attention report (only contains abs data).
+ * position when two fingers are on the device. When this is true, we
+ * assume we have one of those sensors and report events appropriately.
+ * @sensor_type - indicates whether we're touchscreen or touchpad.
+ * @input - input device for absolute pointing stream
+ * @input_phys - buffer for the absolute phys name for this sensor.
+ */
+struct rmi_2d_sensor {
+ struct rmi_2d_axis_alignment axis_align;
+ struct input_mt_pos *tracking_pos;
+ int *tracking_slots;
+ bool kernel_tracking;
+ struct rmi_2d_sensor_abs_object *objs;
+ int dmax;
+ u16 min_x;
+ u16 max_x;
+ u16 min_y;
+ u16 max_y;
+ u8 nbr_fingers;
+ u8 *data_pkt;
+ int pkt_size;
+ int attn_size;
+ bool topbuttonpad;
+ enum rmi_sensor_type sensor_type;
+ struct input_dev *input;
+ struct rmi_function *fn;
+ char input_phys[32];
+ u8 report_abs;
+ u8 report_rel;
+ u8 x_mm;
+ u8 y_mm;
+};
+
+int rmi_2d_sensor_of_probe(struct device *dev,
+ struct rmi_2d_sensor_platform_data *pdata);
+
+void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot);
+
+void rmi_2d_sensor_abs_report(struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ int slot);
+
+void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y);
+
+int rmi_2d_sensor_configure_input(struct rmi_function *fn,
+ struct rmi_2d_sensor *sensor);
+#endif /* _RMI_2D_SENSOR_H */
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
new file mode 100644
index 000000000000..b368b0515c5a
--- /dev/null
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include "rmi_bus.h"
+#include "rmi_driver.h"
+
+static int debug_flags;
+module_param(debug_flags, int, 0644);
+MODULE_PARM_DESC(debug_flags, "control debugging information");
+
+void rmi_dbg(int flags, struct device *dev, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (flags & debug_flags) {
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ dev_printk(KERN_DEBUG, dev, "%pV", &vaf);
+
+ va_end(args);
+ }
+}
+EXPORT_SYMBOL_GPL(rmi_dbg);
+
+/*
+ * RMI Physical devices
+ *
+ * Physical RMI device consists of several functions serving particular
+ * purpose. For example F11 is a 2D touch sensor while F01 is a generic
+ * function present in every RMI device.
+ */
+
+static void rmi_release_device(struct device *dev)
+{
+ struct rmi_device *rmi_dev = to_rmi_device(dev);
+
+ kfree(rmi_dev);
+}
+
+static struct device_type rmi_device_type = {
+ .name = "rmi4_sensor",
+ .release = rmi_release_device,
+};
+
+bool rmi_is_physical_device(struct device *dev)
+{
+ return dev->type == &rmi_device_type;
+}
+
+/**
+ * rmi_register_transport_device - register a transport device connection
+ * on the RMI bus. Transport drivers provide communication from the devices
+ * on a bus (such as SPI, I2C, and so on) to the RMI4 sensor.
+ *
+ * @xport: the transport device to register
+ */
+int rmi_register_transport_device(struct rmi_transport_dev *xport)
+{
+ static atomic_t transport_device_count = ATOMIC_INIT(0);
+ struct rmi_device *rmi_dev;
+ int error;
+
+ rmi_dev = kzalloc(sizeof(struct rmi_device), GFP_KERNEL);
+ if (!rmi_dev)
+ return -ENOMEM;
+
+ device_initialize(&rmi_dev->dev);
+
+ rmi_dev->xport = xport;
+ rmi_dev->number = atomic_inc_return(&transport_device_count) - 1;
+
+ dev_set_name(&rmi_dev->dev, "rmi4-%02d", rmi_dev->number);
+
+ rmi_dev->dev.bus = &rmi_bus_type;
+ rmi_dev->dev.type = &rmi_device_type;
+
+ xport->rmi_dev = rmi_dev;
+
+ error = device_add(&rmi_dev->dev);
+ if (error)
+ goto err_put_device;
+
+ rmi_dbg(RMI_DEBUG_CORE, xport->dev,
+ "%s: Registered %s as %s.\n", __func__,
+ dev_name(rmi_dev->xport->dev), dev_name(&rmi_dev->dev));
+
+ return 0;
+
+err_put_device:
+ put_device(&rmi_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(rmi_register_transport_device);
+
+/**
+ * rmi_unregister_transport_device - unregister a transport device connection
+ * @xport: the transport driver to unregister
+ *
+ */
+void rmi_unregister_transport_device(struct rmi_transport_dev *xport)
+{
+ struct rmi_device *rmi_dev = xport->rmi_dev;
+
+ device_del(&rmi_dev->dev);
+ put_device(&rmi_dev->dev);
+}
+EXPORT_SYMBOL(rmi_unregister_transport_device);
+
+
+/* Function specific stuff */
+
+static void rmi_release_function(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+
+ kfree(fn);
+}
+
+static struct device_type rmi_function_type = {
+ .name = "rmi4_function",
+ .release = rmi_release_function,
+};
+
+bool rmi_is_function_device(struct device *dev)
+{
+ return dev->type == &rmi_function_type;
+}
+
+static int rmi_function_match(struct device *dev, struct device_driver *drv)
+{
+ struct rmi_function_handler *handler = to_rmi_function_handler(drv);
+ struct rmi_function *fn = to_rmi_function(dev);
+
+ return fn->fd.function_number == handler->func;
+}
+
+#ifdef CONFIG_OF
+static void rmi_function_of_probe(struct rmi_function *fn)
+{
+ char of_name[9];
+
+ snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
+ fn->fd.function_number);
+ fn->dev.of_node = of_find_node_by_name(
+ fn->rmi_dev->xport->dev->of_node, of_name);
+}
+#else
+static inline void rmi_function_of_probe(struct rmi_function *fn)
+{}
+#endif
+
+static int rmi_function_probe(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+ struct rmi_function_handler *handler =
+ to_rmi_function_handler(dev->driver);
+ int error;
+
+ rmi_function_of_probe(fn);
+
+ if (handler->probe) {
+ error = handler->probe(fn);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_function_remove(struct device *dev)
+{
+ struct rmi_function *fn = to_rmi_function(dev);
+ struct rmi_function_handler *handler =
+ to_rmi_function_handler(dev->driver);
+
+ if (handler->remove)
+ handler->remove(fn);
+
+ return 0;
+}
+
+int rmi_register_function(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+
+ device_initialize(&fn->dev);
+
+ dev_set_name(&fn->dev, "%s.fn%02x",
+ dev_name(&rmi_dev->dev), fn->fd.function_number);
+
+ fn->dev.parent = &rmi_dev->dev;
+ fn->dev.type = &rmi_function_type;
+ fn->dev.bus = &rmi_bus_type;
+
+ error = device_add(&fn->dev);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Failed device_register function device %s\n",
+ dev_name(&fn->dev));
+ goto err_put_device;
+ }
+
+ rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Registered F%02X.\n",
+ fn->fd.function_number);
+
+ return 0;
+
+err_put_device:
+ put_device(&fn->dev);
+ return error;
+}
+
+void rmi_unregister_function(struct rmi_function *fn)
+{
+ device_del(&fn->dev);
+
+ if (fn->dev.of_node)
+ of_node_put(fn->dev.of_node);
+
+ put_device(&fn->dev);
+}
+
+/**
+ * rmi_register_function_handler - register a handler for an RMI function
+ * @handler: RMI handler that should be registered.
+ * @module: pointer to module that implements the handler
+ * @mod_name: name of the module implementing the handler
+ *
+ * This function performs additional setup of RMI function handler and
+ * registers it with the RMI core so that it can be bound to
+ * RMI function devices.
+ */
+int __rmi_register_function_handler(struct rmi_function_handler *handler,
+ struct module *owner,
+ const char *mod_name)
+{
+ struct device_driver *driver = &handler->driver;
+ int error;
+
+ driver->bus = &rmi_bus_type;
+ driver->owner = owner;
+ driver->mod_name = mod_name;
+ driver->probe = rmi_function_probe;
+ driver->remove = rmi_function_remove;
+
+ error = driver_register(&handler->driver);
+ if (error) {
+ pr_err("driver_register() failed for %s, error: %d\n",
+ handler->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__rmi_register_function_handler);
+
+/**
+ * rmi_unregister_function_handler - unregister given RMI function handler
+ * @handler: RMI handler that should be unregistered.
+ *
+ * This function unregisters given function handler from RMI core which
+ * causes it to be unbound from the function devices.
+ */
+void rmi_unregister_function_handler(struct rmi_function_handler *handler)
+{
+ driver_unregister(&handler->driver);
+}
+EXPORT_SYMBOL_GPL(rmi_unregister_function_handler);
+
+/* Bus specific stuff */
+
+static int rmi_bus_match(struct device *dev, struct device_driver *drv)
+{
+ bool physical = rmi_is_physical_device(dev);
+
+ /* First see if types are not compatible */
+ if (physical != rmi_is_physical_driver(drv))
+ return 0;
+
+ return physical || rmi_function_match(dev, drv);
+}
+
+struct bus_type rmi_bus_type = {
+ .match = rmi_bus_match,
+ .name = "rmi4",
+};
+
+static struct rmi_function_handler *fn_handlers[] = {
+ &rmi_f01_handler,
+#ifdef CONFIG_RMI4_F11
+ &rmi_f11_handler,
+#endif
+#ifdef CONFIG_RMI4_F12
+ &rmi_f12_handler,
+#endif
+#ifdef CONFIG_RMI4_F30
+ &rmi_f30_handler,
+#endif
+};
+
+static void __rmi_unregister_function_handlers(int start_idx)
+{
+ int i;
+
+ for (i = start_idx; i >= 0; i--)
+ rmi_unregister_function_handler(fn_handlers[i]);
+}
+
+static void rmi_unregister_function_handlers(void)
+{
+ __rmi_unregister_function_handlers(ARRAY_SIZE(fn_handlers) - 1);
+}
+
+static int rmi_register_function_handlers(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fn_handlers); i++) {
+ ret = rmi_register_function_handler(fn_handlers[i]);
+ if (ret) {
+ pr_err("%s: error registering the RMI F%02x handler: %d\n",
+ __func__, fn_handlers[i]->func, ret);
+ goto err_unregister_function_handlers;
+ }
+ }
+
+ return 0;
+
+err_unregister_function_handlers:
+ __rmi_unregister_function_handlers(i - 1);
+ return ret;
+}
+
+int rmi_of_property_read_u32(struct device *dev, u32 *result,
+ const char *prop, bool optional)
+{
+ int retval;
+ u32 val = 0;
+
+ retval = of_property_read_u32(dev->of_node, prop, &val);
+ if (retval && (!optional && retval == -EINVAL)) {
+ dev_err(dev, "Failed to get %s value: %d\n",
+ prop, retval);
+ return retval;
+ }
+ *result = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_of_property_read_u32);
+
+static int __init rmi_bus_init(void)
+{
+ int error;
+
+ error = bus_register(&rmi_bus_type);
+ if (error) {
+ pr_err("%s: error registering the RMI bus: %d\n",
+ __func__, error);
+ return error;
+ }
+
+ error = rmi_register_function_handlers();
+ if (error)
+ goto err_unregister_bus;
+
+ error = rmi_register_physical_driver();
+ if (error) {
+ pr_err("%s: error registering the RMI physical driver: %d\n",
+ __func__, error);
+ goto err_unregister_bus;
+ }
+
+ return 0;
+
+err_unregister_bus:
+ bus_unregister(&rmi_bus_type);
+ return error;
+}
+module_init(rmi_bus_init);
+
+static void __exit rmi_bus_exit(void)
+{
+ /*
+ * We should only ever get here if all drivers are unloaded, so
+ * all we have to do at this point is unregister ourselves.
+ */
+
+ rmi_unregister_physical_driver();
+ rmi_unregister_function_handlers();
+ bus_unregister(&rmi_bus_type);
+}
+module_exit(rmi_bus_exit);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com");
+MODULE_DESCRIPTION("RMI bus");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
new file mode 100644
index 000000000000..899579830536
--- /dev/null
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_BUS_H
+#define _RMI_BUS_H
+
+#include <linux/rmi.h>
+
+struct rmi_device;
+
+/**
+ * struct rmi_function - represents the implementation of an RMI4
+ * function for a particular device (basically, a driver for that RMI4 function)
+ *
+ * @fd: The function descriptor of the RMI function
+ * @rmi_dev: Pointer to the RMI device associated with this function container
+ * @dev: The device associated with this particular function.
+ *
+ * @num_of_irqs: The number of irqs needed by this function
+ * @irq_pos: The position in the irq bitfield this function holds
+ * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
+ * interrupt handling.
+ *
+ * @node: entry in device's list of functions
+ */
+struct rmi_function {
+ struct rmi_function_descriptor fd;
+ struct rmi_device *rmi_dev;
+ struct device dev;
+ struct list_head node;
+
+ unsigned int num_of_irqs;
+ unsigned int irq_pos;
+ unsigned long irq_mask[];
+};
+
+#define to_rmi_function(d) container_of(d, struct rmi_function, dev)
+
+bool rmi_is_function_device(struct device *dev);
+
+int __must_check rmi_register_function(struct rmi_function *);
+void rmi_unregister_function(struct rmi_function *);
+
+/**
+ * struct rmi_function_handler - driver routines for a particular RMI function.
+ *
+ * @func: The RMI function number
+ * @reset: Called when a reset of the touch sensor is detected. The routine
+ * should perform any out-of-the-ordinary reset handling that might be
+ * necessary. Restoring of touch sensor configuration registers should be
+ * handled in the config() callback, below.
+ * @config: Called when the function container is first initialized, and
+ * after a reset is detected. This routine should write any necessary
+ * configuration settings to the device.
+ * @attention: Called when the IRQ(s) for the function are set by the touch
+ * sensor.
+ * @suspend: Should perform any required operations to suspend the particular
+ * function.
+ * @resume: Should perform any required operations to resume the particular
+ * function.
+ *
+ * All callbacks are expected to return 0 on success, error code on failure.
+ */
+struct rmi_function_handler {
+ struct device_driver driver;
+
+ u8 func;
+
+ int (*probe)(struct rmi_function *fn);
+ void (*remove)(struct rmi_function *fn);
+ int (*config)(struct rmi_function *fn);
+ int (*reset)(struct rmi_function *fn);
+ int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+ int (*suspend)(struct rmi_function *fn);
+ int (*resume)(struct rmi_function *fn);
+};
+
+#define to_rmi_function_handler(d) \
+ container_of(d, struct rmi_function_handler, driver)
+
+int __must_check __rmi_register_function_handler(struct rmi_function_handler *,
+ struct module *, const char *);
+#define rmi_register_function_handler(handler) \
+ __rmi_register_function_handler(handler, THIS_MODULE, KBUILD_MODNAME)
+
+void rmi_unregister_function_handler(struct rmi_function_handler *);
+
+#define to_rmi_driver(d) \
+ container_of(d, struct rmi_driver, driver)
+
+#define to_rmi_device(d) container_of(d, struct rmi_device, dev)
+
+static inline struct rmi_device_platform_data *
+rmi_get_platform_data(struct rmi_device *d)
+{
+ return &d->xport->pdata;
+}
+
+bool rmi_is_physical_device(struct device *dev);
+
+/**
+ * rmi_read - read a single byte
+ * @d: Pointer to an RMI device
+ * @addr: The address to read from
+ * @buf: The read buffer
+ *
+ * Reads a single byte of data using the underlying transport protocol
+ * into memory pointed by @buf. It returns 0 on success or a negative
+ * error code.
+ */
+static inline int rmi_read(struct rmi_device *d, u16 addr, u8 *buf)
+{
+ return d->xport->ops->read_block(d->xport, addr, buf, 1);
+}
+
+/**
+ * rmi_read_block - read a block of bytes
+ * @d: Pointer to an RMI device
+ * @addr: The start address to read from
+ * @buf: The read buffer
+ * @len: Length of the read buffer
+ *
+ * Reads a block of byte data using the underlying transport protocol
+ * into memory pointed by @buf. It returns 0 on success or a negative
+ * error code.
+ */
+static inline int rmi_read_block(struct rmi_device *d, u16 addr,
+ void *buf, size_t len)
+{
+ return d->xport->ops->read_block(d->xport, addr, buf, len);
+}
+
+/**
+ * rmi_write - write a single byte
+ * @d: Pointer to an RMI device
+ * @addr: The address to write to
+ * @data: The data to write
+ *
+ * Writes a single byte using the underlying transport protocol. It
+ * returns zero on success or a negative error code.
+ */
+static inline int rmi_write(struct rmi_device *d, u16 addr, u8 data)
+{
+ return d->xport->ops->write_block(d->xport, addr, &data, 1);
+}
+
+/**
+ * rmi_write_block - write a block of bytes
+ * @d: Pointer to an RMI device
+ * @addr: The start address to write to
+ * @buf: The write buffer
+ * @len: Length of the write buffer
+ *
+ * Writes a block of byte data from buf using the underlaying transport
+ * protocol. It returns the amount of bytes written or a negative error code.
+ */
+static inline int rmi_write_block(struct rmi_device *d, u16 addr,
+ const void *buf, size_t len)
+{
+ return d->xport->ops->write_block(d->xport, addr, buf, len);
+}
+
+int rmi_for_each_dev(void *data, int (*func)(struct device *dev, void *data));
+
+extern struct bus_type rmi_bus_type;
+
+int rmi_of_property_read_u32(struct device *dev, u32 *result,
+ const char *prop, bool optional);
+
+#define RMI_DEBUG_CORE BIT(0)
+#define RMI_DEBUG_XPORT BIT(1)
+#define RMI_DEBUG_FN BIT(2)
+#define RMI_DEBUG_2D_SENSOR BIT(3)
+
+void rmi_dbg(int flags, struct device *dev, const char *fmt, ...);
+#endif
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
new file mode 100644
index 000000000000..faa295ec4f31
--- /dev/null
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This driver provides the core support for a single RMI4-based device.
+ *
+ * The RMI4 specification can be found here (URL split for line length):
+ *
+ * http://www.synaptics.com/sites/default/files/
+ * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/kconfig.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <uapi/linux/input.h>
+#include <linux/rmi.h>
+#include "rmi_bus.h"
+#include "rmi_driver.h"
+
+#define HAS_NONSTANDARD_PDT_MASK 0x40
+#define RMI4_MAX_PAGE 0xff
+#define RMI4_PAGE_SIZE 0x100
+#define RMI4_PAGE_MASK 0xFF00
+
+#define RMI_DEVICE_RESET_CMD 0x01
+#define DEFAULT_RESET_DELAY_MS 100
+
+static void rmi_free_function_list(struct rmi_device *rmi_dev)
+{
+ struct rmi_function *fn, *tmp;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+
+ data->f01_container = NULL;
+
+ /* Doing it in the reverse order so F01 will be removed last */
+ list_for_each_entry_safe_reverse(fn, tmp,
+ &data->function_list, node) {
+ list_del(&fn->node);
+ rmi_unregister_function(fn);
+ }
+}
+
+static int reset_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->reset) {
+ retval = fh->reset(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Reset failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int configure_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->config) {
+ retval = fh->config(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Config failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = reset_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = configure_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void process_one_interrupt(struct rmi_driver_data *data,
+ struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+
+ if (!fn || !fn->dev.driver)
+ return;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->attention) {
+ bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
+ data->irq_count);
+ if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
+ fh->attention(fn, data->fn_irq_bits);
+ }
+}
+
+int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+ struct rmi_function *entry;
+ int error;
+
+ if (!data)
+ return 0;
+
+ if (!rmi_dev->xport->attn_data) {
+ error = rmi_read_block(rmi_dev,
+ data->f01_container->fd.data_base_addr + 1,
+ data->irq_status, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "Failed to read irqs, code=%d\n", error);
+ return error;
+ }
+ }
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+ data->irq_count);
+ /*
+ * At this point, irq_status has all bits that are set in the
+ * interrupt status register and are enabled.
+ */
+ mutex_unlock(&data->irq_mutex);
+
+ /*
+ * It would be nice to be able to use irq_chip to handle these
+ * nested IRQs. Unfortunately, most of the current customers for
+ * this driver are using older kernels (3.0.x) that don't support
+ * the features required for that. Once they've shifted to more
+ * recent kernels (say, 3.3 and higher), this should be switched to
+ * use irq_chip.
+ */
+ list_for_each_entry(entry, &data->function_list, node)
+ process_one_interrupt(data, entry);
+
+ if (data->input)
+ input_sync(data->input);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+static int suspend_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->suspend) {
+ retval = fh->suspend(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Suspend failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_suspend_functions(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = suspend_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int resume_one_function(struct rmi_function *fn)
+{
+ struct rmi_function_handler *fh;
+ int retval = 0;
+
+ if (!fn || !fn->dev.driver)
+ return 0;
+
+ fh = to_rmi_function_handler(fn->dev.driver);
+ if (fh->resume) {
+ retval = fh->resume(fn);
+ if (retval < 0)
+ dev_err(&fn->dev, "Resume failed with code %d.\n",
+ retval);
+ }
+
+ return retval;
+}
+
+static int rmi_resume_functions(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_function *entry;
+ int retval;
+
+ list_for_each_entry(entry, &data->function_list, node) {
+ retval = resume_one_function(entry);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int enable_sensor(struct rmi_device *rmi_dev)
+{
+ int retval = 0;
+
+ retval = rmi_driver_process_config_requests(rmi_dev);
+ if (retval < 0)
+ return retval;
+
+ return rmi_process_interrupt_requests(rmi_dev);
+}
+
+/**
+ * rmi_driver_set_input_params - set input device id and other data.
+ *
+ * @rmi_dev: Pointer to an RMI device
+ * @input: Pointer to input device
+ *
+ */
+static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
+ struct input_dev *input)
+{
+ input->name = SYNAPTICS_INPUT_DEVICE_NAME;
+ input->id.vendor = SYNAPTICS_VENDOR_ID;
+ input->id.bustype = BUS_RMI;
+ return 0;
+}
+
+static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
+ struct input_dev *input)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ char *device_name = rmi_f01_get_product_ID(data->f01_container);
+ char *name;
+
+ name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
+ "Synaptics %s", device_name);
+ if (!name)
+ return;
+
+ input->name = name;
+}
+
+static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
+ unsigned long *mask)
+{
+ int error = 0;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_or(data->new_irq_mask,
+ data->current_irq_mask, mask, data->irq_count);
+
+ error = rmi_write_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->new_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "%s: Failed to change enabled interrupts!",
+ __func__);
+ goto error_unlock;
+ }
+ bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+ data->num_of_irq_regs);
+
+error_unlock:
+ mutex_unlock(&data->irq_mutex);
+ return error;
+}
+
+static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
+ unsigned long *mask)
+{
+ int error = 0;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct device *dev = &rmi_dev->dev;
+
+ mutex_lock(&data->irq_mutex);
+ bitmap_andnot(data->new_irq_mask,
+ data->current_irq_mask, mask, data->irq_count);
+
+ error = rmi_write_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->new_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(dev, "%s: Failed to change enabled interrupts!",
+ __func__);
+ goto error_unlock;
+ }
+ bitmap_copy(data->current_irq_mask, data->new_irq_mask,
+ data->num_of_irq_regs);
+
+error_unlock:
+ mutex_unlock(&data->irq_mutex);
+ return error;
+}
+
+static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int error;
+
+ /*
+ * Can get called before the driver is fully ready to deal with
+ * this situation.
+ */
+ if (!data || !data->f01_container) {
+ dev_warn(&rmi_dev->dev,
+ "Not ready to handle reset yet!\n");
+ return 0;
+ }
+
+ error = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (error < 0) {
+ dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ return error;
+ }
+
+ error = rmi_driver_process_reset_requests(rmi_dev);
+ if (error < 0)
+ return error;
+
+ error = rmi_driver_process_config_requests(rmi_dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
+ u16 pdt_address)
+{
+ u8 buf[RMI_PDT_ENTRY_SIZE];
+ int error;
+
+ error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
+ if (error) {
+ dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
+ pdt_address, error);
+ return error;
+ }
+
+ entry->page_start = pdt_address & RMI4_PAGE_MASK;
+ entry->query_base_addr = buf[0];
+ entry->command_base_addr = buf[1];
+ entry->control_base_addr = buf[2];
+ entry->data_base_addr = buf[3];
+ entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
+ entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
+ entry->function_number = buf[5];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
+
+static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
+ struct rmi_function_descriptor *fd)
+{
+ fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
+ fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
+ fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
+ fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
+ fd->function_number = pdt->function_number;
+ fd->interrupt_source_count = pdt->interrupt_source_count;
+ fd->function_version = pdt->function_version;
+}
+
+#define RMI_SCAN_CONTINUE 0
+#define RMI_SCAN_DONE 1
+
+static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
+ int page,
+ void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx,
+ const struct pdt_entry *entry))
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ struct pdt_entry pdt_entry;
+ u16 page_start = RMI4_PAGE_SIZE * page;
+ u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
+ u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
+ u16 addr;
+ int error;
+ int retval;
+
+ for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
+ error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
+ if (error)
+ return error;
+
+ if (RMI4_END_OF_PDT(pdt_entry.function_number))
+ break;
+
+ retval = callback(rmi_dev, ctx, &pdt_entry);
+ if (retval != RMI_SCAN_CONTINUE)
+ return retval;
+ }
+
+ return (data->f01_bootloader_mode || addr == pdt_start) ?
+ RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
+}
+
+static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+ int (*callback)(struct rmi_device *rmi_dev,
+ void *ctx,
+ const struct pdt_entry *entry))
+{
+ int page;
+ int retval = RMI_SCAN_DONE;
+
+ for (page = 0; page <= RMI4_MAX_PAGE; page++) {
+ retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+ if (retval != RMI_SCAN_CONTINUE)
+ break;
+ }
+
+ return retval < 0 ? retval : 0;
+}
+
+int rmi_read_register_desc(struct rmi_device *d, u16 addr,
+ struct rmi_register_descriptor *rdesc)
+{
+ int ret;
+ u8 size_presence_reg;
+ u8 buf[35];
+ int presense_offset = 1;
+ u8 *struct_buf;
+ int reg;
+ int offset = 0;
+ int map_offset = 0;
+ int i;
+ int b;
+
+ /*
+ * The first register of the register descriptor is the size of
+ * the register descriptor's presense register.
+ */
+ ret = rmi_read(d, addr, &size_presence_reg);
+ if (ret)
+ return ret;
+ ++addr;
+
+ if (size_presence_reg < 0 || size_presence_reg > 35)
+ return -EIO;
+
+ memset(buf, 0, sizeof(buf));
+
+ /*
+ * The presence register contains the size of the register structure
+ * and a bitmap which identified which packet registers are present
+ * for this particular register type (ie query, control, or data).
+ */
+ ret = rmi_read_block(d, addr, buf, size_presence_reg);
+ if (ret)
+ return ret;
+ ++addr;
+
+ if (buf[0] == 0) {
+ presense_offset = 3;
+ rdesc->struct_size = buf[1] | (buf[2] << 8);
+ } else {
+ rdesc->struct_size = buf[0];
+ }
+
+ for (i = presense_offset; i < size_presence_reg; i++) {
+ for (b = 0; b < 8; b++) {
+ if (buf[i] & (0x1 << b))
+ bitmap_set(rdesc->presense_map, map_offset, 1);
+ ++map_offset;
+ }
+ }
+
+ rdesc->num_registers = bitmap_weight(rdesc->presense_map,
+ RMI_REG_DESC_PRESENSE_BITS);
+
+ rdesc->registers = devm_kzalloc(&d->dev, rdesc->num_registers *
+ sizeof(struct rmi_register_desc_item),
+ GFP_KERNEL);
+ if (!rdesc->registers)
+ return -ENOMEM;
+
+ /*
+ * Allocate a temporary buffer to hold the register structure.
+ * I'm not using devm_kzalloc here since it will not be retained
+ * after exiting this function
+ */
+ struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
+ if (!struct_buf)
+ return -ENOMEM;
+
+ /*
+ * The register structure contains information about every packet
+ * register of this type. This includes the size of the packet
+ * register and a bitmap of all subpackets contained in the packet
+ * register.
+ */
+ ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
+ if (ret)
+ goto free_struct_buff;
+
+ reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
+ map_offset = 0;
+ for (i = 0; i < rdesc->num_registers; i++) {
+ struct rmi_register_desc_item *item = &rdesc->registers[i];
+ int reg_size = struct_buf[offset];
+
+ ++offset;
+ if (reg_size == 0) {
+ reg_size = struct_buf[offset] |
+ (struct_buf[offset + 1] << 8);
+ offset += 2;
+ }
+
+ if (reg_size == 0) {
+ reg_size = struct_buf[offset] |
+ (struct_buf[offset + 1] << 8) |
+ (struct_buf[offset + 2] << 16) |
+ (struct_buf[offset + 3] << 24);
+ offset += 4;
+ }
+
+ item->reg = reg;
+ item->reg_size = reg_size;
+
+ do {
+ for (b = 0; b < 7; b++) {
+ if (struct_buf[offset] & (0x1 << b))
+ bitmap_set(item->subpacket_map,
+ map_offset, 1);
+ ++map_offset;
+ }
+ } while (struct_buf[offset++] & 0x80);
+
+ item->num_subpackets = bitmap_weight(item->subpacket_map,
+ RMI_REG_DESC_SUBPACKET_BITS);
+
+ rmi_dbg(RMI_DEBUG_CORE, &d->dev,
+ "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
+ item->reg, item->reg_size, item->num_subpackets);
+
+ reg = find_next_bit(rdesc->presense_map,
+ RMI_REG_DESC_PRESENSE_BITS, reg + 1);
+ }
+
+free_struct_buff:
+ kfree(struct_buf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rmi_read_register_desc);
+
+const struct rmi_register_desc_item *rmi_get_register_desc_item(
+ struct rmi_register_descriptor *rdesc, u16 reg)
+{
+ const struct rmi_register_desc_item *item;
+ int i;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ if (item->reg == reg)
+ return item;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
+
+size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
+{
+ const struct rmi_register_desc_item *item;
+ int i;
+ size_t size = 0;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ size += item->reg_size;
+ }
+ return size;
+}
+EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
+
+/* Compute the register offset relative to the base address */
+int rmi_register_desc_calc_reg_offset(
+ struct rmi_register_descriptor *rdesc, u16 reg)
+{
+ const struct rmi_register_desc_item *item;
+ int offset = 0;
+ int i;
+
+ for (i = 0; i < rdesc->num_registers; i++) {
+ item = &rdesc->registers[i];
+ if (item->reg == reg)
+ return offset;
+ ++offset;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
+
+bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
+ u8 subpacket)
+{
+ return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
+ subpacket) == subpacket;
+}
+
+/* Indicates that flash programming is enabled (bootloader mode). */
+#define RMI_F01_STATUS_BOOTLOADER(status) (!!((status) & 0x40))
+
+/*
+ * Given the PDT entry for F01, read the device status register to determine
+ * if we're stuck in bootloader mode or not.
+ *
+ */
+static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
+ const struct pdt_entry *pdt)
+{
+ int error;
+ u8 device_status;
+
+ error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
+ &device_status);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read device status: %d.\n", error);
+ return error;
+ }
+
+ return RMI_F01_STATUS_BOOTLOADER(device_status);
+}
+
+static int rmi_count_irqs(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int *irq_count = ctx;
+
+ *irq_count += pdt->interrupt_source_count;
+ if (pdt->function_number == 0x01) {
+ data->f01_bootloader_mode =
+ rmi_check_bootloader_mode(rmi_dev, pdt);
+ if (data->f01_bootloader_mode)
+ dev_warn(&rmi_dev->dev,
+ "WARNING: RMI4 device is in bootloader mode!\n");
+ }
+
+ return RMI_SCAN_CONTINUE;
+}
+
+static int rmi_initial_reset(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ int error;
+
+ if (pdt->function_number == 0x01) {
+ u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
+ u8 cmd_buf = RMI_DEVICE_RESET_CMD;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(rmi_dev);
+
+ if (rmi_dev->xport->ops->reset) {
+ error = rmi_dev->xport->ops->reset(rmi_dev->xport,
+ cmd_addr);
+ if (error)
+ return error;
+
+ return RMI_SCAN_DONE;
+ }
+
+ error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
+ if (error) {
+ dev_err(&rmi_dev->dev,
+ "Initial reset failed. Code = %d.\n", error);
+ return error;
+ }
+
+ mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
+
+ return RMI_SCAN_DONE;
+ }
+
+ /* F01 should always be on page 0. If we don't find it there, fail. */
+ return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
+}
+
+static int rmi_create_function(struct rmi_device *rmi_dev,
+ void *ctx, const struct pdt_entry *pdt)
+{
+ struct device *dev = &rmi_dev->dev;
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+ int *current_irq_count = ctx;
+ struct rmi_function *fn;
+ int i;
+ int error;
+
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
+ pdt->function_number);
+
+ fn = kzalloc(sizeof(struct rmi_function) +
+ BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!fn) {
+ dev_err(dev, "Failed to allocate memory for F%02X\n",
+ pdt->function_number);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&fn->node);
+ rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
+
+ fn->rmi_dev = rmi_dev;
+
+ fn->num_of_irqs = pdt->interrupt_source_count;
+ fn->irq_pos = *current_irq_count;
+ *current_irq_count += fn->num_of_irqs;
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ set_bit(fn->irq_pos + i, fn->irq_mask);
+
+ error = rmi_register_function(fn);
+ if (error)
+ goto err_put_fn;
+
+ if (pdt->function_number == 0x01)
+ data->f01_container = fn;
+
+ list_add_tail(&fn->node, &data->function_list);
+
+ return RMI_SCAN_CONTINUE;
+
+err_put_fn:
+ put_device(&fn->dev);
+ return error;
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev)
+{
+ int retval = 0;
+
+ retval = rmi_suspend_functions(rmi_dev);
+ if (retval)
+ dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
+ retval);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rmi_driver_suspend);
+
+int rmi_driver_resume(struct rmi_device *rmi_dev)
+{
+ int retval;
+
+ retval = rmi_resume_functions(rmi_dev);
+ if (retval)
+ dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
+ retval);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rmi_driver_resume);
+
+static int rmi_driver_remove(struct device *dev)
+{
+ struct rmi_device *rmi_dev = to_rmi_device(dev);
+
+ rmi_free_function_list(rmi_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int rmi_driver_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ int retval;
+
+ retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
+ "syna,reset-delay-ms", 1);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+#else
+static inline int rmi_driver_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_driver_probe(struct device *dev)
+{
+ struct rmi_driver *rmi_driver;
+ struct rmi_driver_data *data;
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device *rmi_dev;
+ size_t size;
+ void *irq_memory;
+ int irq_count;
+ int retval;
+
+ rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
+ __func__);
+
+ if (!rmi_is_physical_device(dev)) {
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
+ return -ENODEV;
+ }
+
+ rmi_dev = to_rmi_device(dev);
+ rmi_driver = to_rmi_driver(dev->driver);
+ rmi_dev->driver = rmi_driver;
+
+ pdata = rmi_get_platform_data(rmi_dev);
+
+ if (rmi_dev->xport->dev->of_node) {
+ retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
+ if (retval)
+ return retval;
+ }
+
+ data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&data->function_list);
+ data->rmi_dev = rmi_dev;
+ dev_set_drvdata(&rmi_dev->dev, data);
+
+ /*
+ * Right before a warm boot, the sensor might be in some unusual state,
+ * such as F54 diagnostics, or F34 bootloader mode after a firmware
+ * or configuration update. In order to clear the sensor to a known
+ * state and/or apply any updates, we issue a initial reset to clear any
+ * previous settings and force it into normal operation.
+ *
+ * We have to do this before actually building the PDT because
+ * the reflash updates (if any) might cause various registers to move
+ * around.
+ *
+ * For a number of reasons, this initial reset may fail to return
+ * within the specified time, but we'll still be able to bring up the
+ * driver normally after that failure. This occurs most commonly in
+ * a cold boot situation (where then firmware takes longer to come up
+ * than from a warm boot) and the reset_delay_ms in the platform data
+ * has been set too short to accommodate that. Since the sensor will
+ * eventually come up and be usable, we don't want to just fail here
+ * and leave the customer's device unusable. So we warn them, and
+ * continue processing.
+ */
+ retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+ if (retval < 0)
+ dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
+
+ retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
+ if (retval < 0) {
+ /*
+ * we'll print out a warning and continue since
+ * failure to get the PDT properties is not a cause to fail
+ */
+ dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
+ PDT_PROPERTIES_LOCATION, retval);
+ }
+
+ /*
+ * We need to count the IRQs and allocate their storage before scanning
+ * the PDT and creating the function entries, because adding a new
+ * function can trigger events that result in the IRQ related storage
+ * being accessed.
+ */
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
+ irq_count = 0;
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+ if (retval < 0) {
+ dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+ goto err;
+ }
+ data->irq_count = irq_count;
+ data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+ mutex_init(&data->irq_mutex);
+
+ size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+ irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+ if (!irq_memory) {
+ dev_err(dev, "Failed to allocate memory for irq masks.\n");
+ goto err;
+ }
+
+ data->irq_status = irq_memory + size * 0;
+ data->fn_irq_bits = irq_memory + size * 1;
+ data->current_irq_mask = irq_memory + size * 2;
+ data->new_irq_mask = irq_memory + size * 3;
+
+ if (rmi_dev->xport->input) {
+ /*
+ * The transport driver already has an input device.
+ * In some cases it is preferable to reuse the transport
+ * devices input device instead of creating a new one here.
+ * One example is some HID touchpads report "pass-through"
+ * button events are not reported by rmi registers.
+ */
+ data->input = rmi_dev->xport->input;
+ } else {
+ data->input = devm_input_allocate_device(dev);
+ if (!data->input) {
+ dev_err(dev, "%s: Failed to allocate input device.\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_destroy_functions;
+ }
+ rmi_driver_set_input_params(rmi_dev, data->input);
+ data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
+ "%s/input0", dev_name(dev));
+ }
+
+ irq_count = 0;
+ rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
+ retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+ if (retval < 0) {
+ dev_err(dev, "Function creation failed with code %d.\n",
+ retval);
+ goto err_destroy_functions;
+ }
+
+ if (!data->f01_container) {
+ dev_err(dev, "Missing F01 container!\n");
+ retval = -EINVAL;
+ goto err_destroy_functions;
+ }
+
+ retval = rmi_read_block(rmi_dev,
+ data->f01_container->fd.control_base_addr + 1,
+ data->current_irq_mask, data->num_of_irq_regs);
+ if (retval < 0) {
+ dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+
+ if (data->input) {
+ rmi_driver_set_input_name(rmi_dev, data->input);
+ if (!rmi_dev->xport->input) {
+ if (input_register_device(data->input)) {
+ dev_err(dev, "%s: Failed to register input device.\n",
+ __func__);
+ goto err_destroy_functions;
+ }
+ }
+ }
+
+ if (data->f01_container->dev.driver)
+ /* Driver already bound, so enable ATTN now. */
+ return enable_sensor(rmi_dev);
+
+ return 0;
+
+err_destroy_functions:
+ rmi_free_function_list(rmi_dev);
+err:
+ return retval < 0 ? retval : 0;
+}
+
+static struct rmi_driver rmi_physical_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rmi4_physical",
+ .bus = &rmi_bus_type,
+ .probe = rmi_driver_probe,
+ .remove = rmi_driver_remove,
+ },
+ .reset_handler = rmi_driver_reset_handler,
+ .clear_irq_bits = rmi_driver_clear_irq_bits,
+ .set_irq_bits = rmi_driver_set_irq_bits,
+ .set_input_params = rmi_driver_set_input_params,
+};
+
+bool rmi_is_physical_driver(struct device_driver *drv)
+{
+ return drv == &rmi_physical_driver.driver;
+}
+
+int __init rmi_register_physical_driver(void)
+{
+ int error;
+
+ error = driver_register(&rmi_physical_driver.driver);
+ if (error) {
+ pr_err("%s: driver register failed, code=%d.\n", __func__,
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+void __exit rmi_unregister_physical_driver(void)
+{
+ driver_unregister(&rmi_physical_driver.driver);
+}
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
new file mode 100644
index 000000000000..6e140fa3cce1
--- /dev/null
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_DRIVER_H
+#define _RMI_DRIVER_H
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/input.h>
+#include "rmi_bus.h"
+
+#define RMI_DRIVER_VERSION "2.0"
+
+#define SYNAPTICS_INPUT_DEVICE_NAME "Synaptics RMI4 Touch Sensor"
+#define SYNAPTICS_VENDOR_ID 0x06cb
+
+#define GROUP(_attrs) { \
+ .attrs = _attrs, \
+}
+
+#define PDT_PROPERTIES_LOCATION 0x00EF
+#define BSR_LOCATION 0x00FE
+
+#define RMI_PDT_PROPS_HAS_BSR 0x02
+
+#define NAME_BUFFER_SIZE 256
+
+#define RMI_PDT_ENTRY_SIZE 6
+#define RMI_PDT_FUNCTION_VERSION_MASK 0x60
+#define RMI_PDT_INT_SOURCE_COUNT_MASK 0x07
+
+#define PDT_START_SCAN_LOCATION 0x00e9
+#define PDT_END_SCAN_LOCATION 0x0005
+#define RMI4_END_OF_PDT(id) ((id) == 0x00 || (id) == 0xff)
+
+struct pdt_entry {
+ u16 page_start;
+ u8 query_base_addr;
+ u8 command_base_addr;
+ u8 control_base_addr;
+ u8 data_base_addr;
+ u8 interrupt_source_count;
+ u8 function_version;
+ u8 function_number;
+};
+
+int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
+ u16 pdt_address);
+
+#define RMI_REG_DESC_PRESENSE_BITS (32 * BITS_PER_BYTE)
+#define RMI_REG_DESC_SUBPACKET_BITS (37 * BITS_PER_BYTE)
+
+/* describes a single packet register */
+struct rmi_register_desc_item {
+ u16 reg;
+ unsigned long reg_size;
+ u8 num_subpackets;
+ unsigned long subpacket_map[BITS_TO_LONGS(
+ RMI_REG_DESC_SUBPACKET_BITS)];
+};
+
+/*
+ * describes the packet registers for a particular type
+ * (ie query, control, data)
+ */
+struct rmi_register_descriptor {
+ unsigned long struct_size;
+ unsigned long presense_map[BITS_TO_LONGS(RMI_REG_DESC_PRESENSE_BITS)];
+ u8 num_registers;
+ struct rmi_register_desc_item *registers;
+};
+
+int rmi_read_register_desc(struct rmi_device *d, u16 addr,
+ struct rmi_register_descriptor *rdesc);
+const struct rmi_register_desc_item *rmi_get_register_desc_item(
+ struct rmi_register_descriptor *rdesc, u16 reg);
+
+/*
+ * Calculate the total size of all of the registers described in the
+ * descriptor.
+ */
+size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc);
+int rmi_register_desc_calc_reg_offset(
+ struct rmi_register_descriptor *rdesc, u16 reg);
+bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
+ u8 subpacket);
+
+bool rmi_is_physical_driver(struct device_driver *);
+int rmi_register_physical_driver(void);
+void rmi_unregister_physical_driver(void);
+
+char *rmi_f01_get_product_ID(struct rmi_function *fn);
+
+extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f11_handler;
+extern struct rmi_function_handler rmi_f12_handler;
+extern struct rmi_function_handler rmi_f30_handler;
+#endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
new file mode 100644
index 000000000000..eb362bc71a4c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kconfig.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define RMI_PRODUCT_ID_LENGTH 10
+#define RMI_PRODUCT_INFO_LENGTH 2
+
+#define RMI_DATE_CODE_LENGTH 3
+
+#define PRODUCT_ID_OFFSET 0x10
+#define PRODUCT_INFO_OFFSET 0x1E
+
+
+/* Force a firmware reset of the sensor */
+#define RMI_F01_CMD_DEVICE_RESET 1
+
+/* Various F01_RMI_QueryX bits */
+
+#define RMI_F01_QRY1_CUSTOM_MAP BIT(0)
+#define RMI_F01_QRY1_NON_COMPLIANT BIT(1)
+#define RMI_F01_QRY1_HAS_LTS BIT(2)
+#define RMI_F01_QRY1_HAS_SENSOR_ID BIT(3)
+#define RMI_F01_QRY1_HAS_CHARGER_INP BIT(4)
+#define RMI_F01_QRY1_HAS_ADJ_DOZE BIT(5)
+#define RMI_F01_QRY1_HAS_ADJ_DOZE_HOFF BIT(6)
+#define RMI_F01_QRY1_HAS_QUERY42 BIT(7)
+
+#define RMI_F01_QRY5_YEAR_MASK 0x1f
+#define RMI_F01_QRY6_MONTH_MASK 0x0f
+#define RMI_F01_QRY7_DAY_MASK 0x1f
+
+#define RMI_F01_QRY2_PRODINFO_MASK 0x7f
+
+#define RMI_F01_BASIC_QUERY_LEN 21 /* From Query 00 through 20 */
+
+struct f01_basic_properties {
+ u8 manufacturer_id;
+ bool has_lts;
+ bool has_adjustable_doze;
+ bool has_adjustable_doze_holdoff;
+ char dom[11]; /* YYYY/MM/DD + '\0' */
+ u8 product_id[RMI_PRODUCT_ID_LENGTH + 1];
+ u16 productinfo;
+ u32 firmware_id;
+};
+
+/* F01 device status bits */
+
+/* Most recent device status event */
+#define RMI_F01_STATUS_CODE(status) ((status) & 0x0f)
+/* The device has lost its configuration for some reason. */
+#define RMI_F01_STATUS_UNCONFIGURED(status) (!!((status) & 0x80))
+
+/* Control register bits */
+
+/*
+ * Sleep mode controls power management on the device and affects all
+ * functions of the device.
+ */
+#define RMI_F01_CTRL0_SLEEP_MODE_MASK 0x03
+
+#define RMI_SLEEP_MODE_NORMAL 0x00
+#define RMI_SLEEP_MODE_SENSOR_SLEEP 0x01
+#define RMI_SLEEP_MODE_RESERVED0 0x02
+#define RMI_SLEEP_MODE_RESERVED1 0x03
+
+/*
+ * This bit disables whatever sleep mode may be selected by the sleep_mode
+ * field and forces the device to run at full power without sleeping.
+ */
+#define RMI_F01_CRTL0_NOSLEEP_BIT BIT(2)
+
+/*
+ * When this bit is set, the touch controller employs a noise-filtering
+ * algorithm designed for use with a connected battery charger.
+ */
+#define RMI_F01_CRTL0_CHARGER_BIT BIT(5)
+
+/*
+ * Sets the report rate for the device. The effect of this setting is
+ * highly product dependent. Check the spec sheet for your particular
+ * touch sensor.
+ */
+#define RMI_F01_CRTL0_REPORTRATE_BIT BIT(6)
+
+/*
+ * Written by the host as an indicator that the device has been
+ * successfully configured.
+ */
+#define RMI_F01_CRTL0_CONFIGURED_BIT BIT(7)
+
+/**
+ * @ctrl0 - see the bit definitions above.
+ * @doze_interval - controls the interval between checks for finger presence
+ * when the touch sensor is in doze mode, in units of 10ms.
+ * @wakeup_threshold - controls the capacitance threshold at which the touch
+ * sensor will decide to wake up from that low power state.
+ * @doze_holdoff - controls how long the touch sensor waits after the last
+ * finger lifts before entering the doze state, in units of 100ms.
+ */
+struct f01_device_control {
+ u8 ctrl0;
+ u8 doze_interval;
+ u8 wakeup_threshold;
+ u8 doze_holdoff;
+};
+
+struct f01_data {
+ struct f01_basic_properties properties;
+ struct f01_device_control device_control;
+
+ u16 doze_interval_addr;
+ u16 wakeup_threshold_addr;
+ u16 doze_holdoff_addr;
+
+ bool suspended;
+ bool old_nosleep;
+
+ unsigned int num_of_irq_regs;
+};
+
+static int rmi_f01_read_properties(struct rmi_device *rmi_dev,
+ u16 query_base_addr,
+ struct f01_basic_properties *props)
+{
+ u8 queries[RMI_F01_BASIC_QUERY_LEN];
+ int ret;
+ int query_offset = query_base_addr;
+ bool has_ds4_queries = false;
+ bool has_query42 = false;
+ bool has_sensor_id = false;
+ bool has_package_id_query = false;
+ bool has_build_id_query = false;
+ u16 prod_info_addr;
+ u8 ds4_query_len;
+
+ ret = rmi_read_block(rmi_dev, query_offset,
+ queries, RMI_F01_BASIC_QUERY_LEN);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read device query registers: %d\n", ret);
+ return ret;
+ }
+
+ prod_info_addr = query_offset + 17;
+ query_offset += RMI_F01_BASIC_QUERY_LEN;
+
+ /* Now parse what we got */
+ props->manufacturer_id = queries[0];
+
+ props->has_lts = queries[1] & RMI_F01_QRY1_HAS_LTS;
+ props->has_adjustable_doze =
+ queries[1] & RMI_F01_QRY1_HAS_ADJ_DOZE;
+ props->has_adjustable_doze_holdoff =
+ queries[1] & RMI_F01_QRY1_HAS_ADJ_DOZE_HOFF;
+ has_query42 = queries[1] & RMI_F01_QRY1_HAS_QUERY42;
+ has_sensor_id = queries[1] & RMI_F01_QRY1_HAS_SENSOR_ID;
+
+ snprintf(props->dom, sizeof(props->dom), "20%02d/%02d/%02d",
+ queries[5] & RMI_F01_QRY5_YEAR_MASK,
+ queries[6] & RMI_F01_QRY6_MONTH_MASK,
+ queries[7] & RMI_F01_QRY7_DAY_MASK);
+
+ memcpy(props->product_id, &queries[11],
+ RMI_PRODUCT_ID_LENGTH);
+ props->product_id[RMI_PRODUCT_ID_LENGTH] = '\0';
+
+ props->productinfo =
+ ((queries[2] & RMI_F01_QRY2_PRODINFO_MASK) << 7) |
+ (queries[3] & RMI_F01_QRY2_PRODINFO_MASK);
+
+ if (has_sensor_id)
+ query_offset++;
+
+ if (has_query42) {
+ ret = rmi_read(rmi_dev, query_offset, queries);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read query 42 register: %d\n", ret);
+ return ret;
+ }
+
+ has_ds4_queries = !!(queries[0] & BIT(0));
+ query_offset++;
+ }
+
+ if (has_ds4_queries) {
+ ret = rmi_read(rmi_dev, query_offset, &ds4_query_len);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read DS4 queries length: %d\n", ret);
+ return ret;
+ }
+ query_offset++;
+
+ if (ds4_query_len > 0) {
+ ret = rmi_read(rmi_dev, query_offset, queries);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read DS4 queries: %d\n",
+ ret);
+ return ret;
+ }
+
+ has_package_id_query = !!(queries[0] & BIT(0));
+ has_build_id_query = !!(queries[0] & BIT(1));
+ }
+
+ if (has_package_id_query)
+ prod_info_addr++;
+
+ if (has_build_id_query) {
+ ret = rmi_read_block(rmi_dev, prod_info_addr, queries,
+ 3);
+ if (ret) {
+ dev_err(&rmi_dev->dev,
+ "Failed to read product info: %d\n",
+ ret);
+ return ret;
+ }
+
+ props->firmware_id = queries[1] << 8 | queries[0];
+ props->firmware_id += queries[2] * 65536;
+ }
+ }
+
+ return 0;
+}
+
+char *rmi_f01_get_product_ID(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+
+ return f01->properties.product_id;
+}
+
+#ifdef CONFIG_OF
+static int rmi_f01_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ int retval;
+ u32 val;
+
+ retval = rmi_of_property_read_u32(dev,
+ (u32 *)&pdata->power_management.nosleep,
+ "syna,nosleep-mode", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,wakeup-threshold", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.wakeup_threshold = val;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,doze-holdoff-ms", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.doze_holdoff = val * 100;
+
+ retval = rmi_of_property_read_u32(dev, &val,
+ "syna,doze-interval-ms", 1);
+ if (retval)
+ return retval;
+
+ pdata->power_management.doze_interval = val / 10;
+
+ return 0;
+}
+#else
+static inline int rmi_f01_of_probe(struct device *dev,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_f01_probe(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *driver_data = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct f01_data *f01;
+ int error;
+ u16 ctrl_base_addr = fn->fd.control_base_addr;
+ u8 device_status;
+ u8 temp;
+
+ if (fn->dev.of_node) {
+ error = rmi_f01_of_probe(&fn->dev, pdata);
+ if (error)
+ return error;
+ }
+
+ f01 = devm_kzalloc(&fn->dev, sizeof(struct f01_data), GFP_KERNEL);
+ if (!f01)
+ return -ENOMEM;
+
+ f01->num_of_irq_regs = driver_data->num_of_irq_regs;
+
+ /*
+ * Set the configured bit and (optionally) other important stuff
+ * in the device control register.
+ */
+
+ error = rmi_read(rmi_dev, fn->fd.control_base_addr,
+ &f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to read F01 control: %d\n", error);
+ return error;
+ }
+
+ switch (pdata->power_management.nosleep) {
+ case RMI_F01_NOSLEEP_DEFAULT:
+ break;
+ case RMI_F01_NOSLEEP_OFF:
+ f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+ break;
+ case RMI_F01_NOSLEEP_ON:
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ break;
+ }
+
+ /*
+ * Sleep mode might be set as a hangover from a system crash or
+ * reboot without power cycle. If so, clear it so the sensor
+ * is certain to function.
+ */
+ if ((f01->device_control.ctrl0 & RMI_F01_CTRL0_SLEEP_MODE_MASK) !=
+ RMI_SLEEP_MODE_NORMAL) {
+ dev_warn(&fn->dev,
+ "WARNING: Non-zero sleep mode found. Clearing...\n");
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ }
+
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_CONFIGURED_BIT;
+
+ error = rmi_write(rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to write F01 control: %d\n", error);
+ return error;
+ }
+
+ /* Dummy read in order to clear irqs */
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr + 1, &temp);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read Interrupt Status.\n");
+ return error;
+ }
+
+ error = rmi_f01_read_properties(rmi_dev, fn->fd.query_base_addr,
+ &f01->properties);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read F01 properties.\n");
+ return error;
+ }
+
+ dev_info(&fn->dev, "found RMI device, manufacturer: %s, product: %s, fw id: %d\n",
+ f01->properties.manufacturer_id == 1 ? "Synaptics" : "unknown",
+ f01->properties.product_id, f01->properties.firmware_id);
+
+ /* Advance to interrupt control registers, then skip over them. */
+ ctrl_base_addr++;
+ ctrl_base_addr += f01->num_of_irq_regs;
+
+ /* read control register */
+ if (f01->properties.has_adjustable_doze) {
+ f01->doze_interval_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.doze_interval) {
+ f01->device_control.doze_interval =
+ pdata->power_management.doze_interval;
+ error = rmi_write(rmi_dev, f01->doze_interval_addr,
+ f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 doze interval register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->doze_interval_addr,
+ &f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read F01 doze interval register: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ f01->wakeup_threshold_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.wakeup_threshold) {
+ f01->device_control.wakeup_threshold =
+ pdata->power_management.wakeup_threshold;
+ error = rmi_write(rmi_dev, f01->wakeup_threshold_addr,
+ f01->device_control.wakeup_threshold);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 wakeup threshold register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->wakeup_threshold_addr,
+ &f01->device_control.wakeup_threshold);
+ if (error < 0) {
+ dev_err(&fn->dev,
+ "Failed to read F01 wakeup threshold register: %d\n",
+ error);
+ return error;
+ }
+ }
+ }
+
+ if (f01->properties.has_lts)
+ ctrl_base_addr++;
+
+ if (f01->properties.has_adjustable_doze_holdoff) {
+ f01->doze_holdoff_addr = ctrl_base_addr;
+ ctrl_base_addr++;
+
+ if (pdata->power_management.doze_holdoff) {
+ f01->device_control.doze_holdoff =
+ pdata->power_management.doze_holdoff;
+ error = rmi_write(rmi_dev, f01->doze_holdoff_addr,
+ f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to configure F01 doze holdoff register: %d\n",
+ error);
+ return error;
+ }
+ } else {
+ error = rmi_read(rmi_dev, f01->doze_holdoff_addr,
+ &f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read F01 doze holdoff register: %d\n",
+ error);
+ return error;
+ }
+ }
+ }
+
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr, &device_status);
+ if (error < 0) {
+ dev_err(&fn->dev,
+ "Failed to read device status: %d\n", error);
+ return error;
+ }
+
+ if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
+ dev_err(&fn->dev,
+ "Device was reset during configuration process, status: %#02x!\n",
+ RMI_F01_STATUS_CODE(device_status));
+ return -EINVAL;
+ }
+
+ dev_set_drvdata(&fn->dev, f01);
+
+ return 0;
+}
+
+static int rmi_f01_config(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write device_control register: %d\n", error);
+ return error;
+ }
+
+ if (f01->properties.has_adjustable_doze) {
+ error = rmi_write(fn->rmi_dev, f01->doze_interval_addr,
+ f01->device_control.doze_interval);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write doze interval: %d\n", error);
+ return error;
+ }
+
+ error = rmi_write_block(fn->rmi_dev,
+ f01->wakeup_threshold_addr,
+ &f01->device_control.wakeup_threshold,
+ sizeof(u8));
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write wakeup threshold: %d\n",
+ error);
+ return error;
+ }
+ }
+
+ if (f01->properties.has_adjustable_doze_holdoff) {
+ error = rmi_write(fn->rmi_dev, f01->doze_holdoff_addr,
+ f01->device_control.doze_holdoff);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to write doze holdoff: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f01_suspend(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ f01->old_nosleep =
+ f01->device_control.ctrl0 & RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CRTL0_NOSLEEP_BIT;
+
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ if (device_may_wakeup(fn->rmi_dev->xport->dev))
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_RESERVED1;
+ else
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_SENSOR_SLEEP;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev, "Failed to write sleep mode: %d.\n", error);
+ if (f01->old_nosleep)
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f01_resume(struct rmi_function *fn)
+{
+ struct f01_data *f01 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ if (f01->old_nosleep)
+ f01->device_control.ctrl0 |= RMI_F01_CRTL0_NOSLEEP_BIT;
+
+ f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_SLEEP_MODE_MASK;
+ f01->device_control.ctrl0 |= RMI_SLEEP_MODE_NORMAL;
+
+ error = rmi_write(fn->rmi_dev, fn->fd.control_base_addr,
+ f01->device_control.ctrl0);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to restore normal operation: %d.\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f01_attention(struct rmi_function *fn,
+ unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+ u8 device_status;
+
+ error = rmi_read(rmi_dev, fn->fd.data_base_addr, &device_status);
+ if (error) {
+ dev_err(&fn->dev,
+ "Failed to read device status: %d.\n", error);
+ return error;
+ }
+
+ if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
+ dev_warn(&fn->dev, "Device reset detected.\n");
+ error = rmi_dev->driver->reset_handler(rmi_dev);
+ if (error) {
+ dev_err(&fn->dev, "Device reset failed: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f01_handler = {
+ .driver = {
+ .name = "rmi4_f01",
+ /*
+ * Do not allow user unbinding F01 as it is critical
+ * function.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .func = 0x01,
+ .probe = rmi_f01_probe,
+ .config = rmi_f01_config,
+ .attention = rmi_f01_attention,
+ .suspend = rmi_f01_suspend,
+ .resume = rmi_f01_resume,
+};
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
new file mode 100644
index 000000000000..ec8a10d53288
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -0,0 +1,1317 @@
+/*
+ * Copyright (c) 2011-2015 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/kconfig.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+#define F11_MAX_NUM_OF_FINGERS 10
+#define F11_MAX_NUM_OF_TOUCH_SHAPES 16
+
+#define FINGER_STATE_MASK 0x03
+
+#define F11_CTRL_SENSOR_MAX_X_POS_OFFSET 6
+#define F11_CTRL_SENSOR_MAX_Y_POS_OFFSET 8
+
+#define DEFAULT_XY_MAX 9999
+#define DEFAULT_MAX_ABS_MT_PRESSURE 255
+#define DEFAULT_MAX_ABS_MT_TOUCH 15
+#define DEFAULT_MAX_ABS_MT_ORIENTATION 1
+#define DEFAULT_MIN_ABS_MT_TRACKING_ID 1
+#define DEFAULT_MAX_ABS_MT_TRACKING_ID 10
+
+/** A note about RMI4 F11 register structure.
+ *
+ * The properties for
+ * a given sensor are described by its query registers. The number of query
+ * registers and the layout of their contents are described by the F11 device
+ * queries as well as the sensor query information.
+ *
+ * Similarly, each sensor has control registers that govern its behavior. The
+ * size and layout of the control registers for a given sensor can be determined
+ * by parsing that sensors query registers.
+ *
+ * And in a likewise fashion, each sensor has data registers where it reports
+ * its touch data and other interesting stuff. The size and layout of a
+ * sensors data registers must be determined by parsing its query registers.
+ *
+ * The short story is that we need to read and parse a lot of query
+ * registers in order to determine the attributes of a sensor. Then
+ * we need to use that data to compute the size of the control and data
+ * registers for sensor.
+ *
+ * The end result is that we have a number of structs that aren't used to
+ * directly generate the input events, but their size, location and contents
+ * are critical to determining where the data we are interested in lives.
+ *
+ * At this time, the driver does not yet comprehend all possible F11
+ * configuration options, but it should be sufficient to cover 99% of RMI4 F11
+ * devices currently in the field.
+ */
+
+/* maximum ABS_MT_POSITION displacement (in mm) */
+#define DMAX 10
+
+/**
+ * @rezero - writing this to the F11 command register will cause the sensor to
+ * calibrate to the current capacitive state.
+ */
+#define RMI_F11_REZERO 0x01
+
+#define RMI_F11_HAS_QUERY9 (1 << 3)
+#define RMI_F11_HAS_QUERY11 (1 << 4)
+#define RMI_F11_HAS_QUERY12 (1 << 5)
+#define RMI_F11_HAS_QUERY27 (1 << 6)
+#define RMI_F11_HAS_QUERY28 (1 << 7)
+
+/** Defs for Query 1 */
+
+#define RMI_F11_NR_FINGERS_MASK 0x07
+#define RMI_F11_HAS_REL (1 << 3)
+#define RMI_F11_HAS_ABS (1 << 4)
+#define RMI_F11_HAS_GESTURES (1 << 5)
+#define RMI_F11_HAS_SENSITIVITY_ADJ (1 << 6)
+#define RMI_F11_CONFIGURABLE (1 << 7)
+
+/** Defs for Query 2, 3, and 4. */
+#define RMI_F11_NR_ELECTRODES_MASK 0x7F
+
+/** Defs for Query 5 */
+
+#define RMI_F11_ABS_DATA_SIZE_MASK 0x03
+#define RMI_F11_HAS_ANCHORED_FINGER (1 << 2)
+#define RMI_F11_HAS_ADJ_HYST (1 << 3)
+#define RMI_F11_HAS_DRIBBLE (1 << 4)
+#define RMI_F11_HAS_BENDING_CORRECTION (1 << 5)
+#define RMI_F11_HAS_LARGE_OBJECT_SUPPRESSION (1 << 6)
+#define RMI_F11_HAS_JITTER_FILTER (1 << 7)
+
+/** Defs for Query 7 */
+#define RMI_F11_HAS_SINGLE_TAP (1 << 0)
+#define RMI_F11_HAS_TAP_AND_HOLD (1 << 1)
+#define RMI_F11_HAS_DOUBLE_TAP (1 << 2)
+#define RMI_F11_HAS_EARLY_TAP (1 << 3)
+#define RMI_F11_HAS_FLICK (1 << 4)
+#define RMI_F11_HAS_PRESS (1 << 5)
+#define RMI_F11_HAS_PINCH (1 << 6)
+#define RMI_F11_HAS_CHIRAL (1 << 7)
+
+/** Defs for Query 8 */
+#define RMI_F11_HAS_PALM_DET (1 << 0)
+#define RMI_F11_HAS_ROTATE (1 << 1)
+#define RMI_F11_HAS_TOUCH_SHAPES (1 << 2)
+#define RMI_F11_HAS_SCROLL_ZONES (1 << 3)
+#define RMI_F11_HAS_INDIVIDUAL_SCROLL_ZONES (1 << 4)
+#define RMI_F11_HAS_MF_SCROLL (1 << 5)
+#define RMI_F11_HAS_MF_EDGE_MOTION (1 << 6)
+#define RMI_F11_HAS_MF_SCROLL_INERTIA (1 << 7)
+
+/** Defs for Query 9. */
+#define RMI_F11_HAS_PEN (1 << 0)
+#define RMI_F11_HAS_PROXIMITY (1 << 1)
+#define RMI_F11_HAS_PALM_DET_SENSITIVITY (1 << 2)
+#define RMI_F11_HAS_SUPPRESS_ON_PALM_DETECT (1 << 3)
+#define RMI_F11_HAS_TWO_PEN_THRESHOLDS (1 << 4)
+#define RMI_F11_HAS_CONTACT_GEOMETRY (1 << 5)
+#define RMI_F11_HAS_PEN_HOVER_DISCRIMINATION (1 << 6)
+#define RMI_F11_HAS_PEN_FILTERS (1 << 7)
+
+/** Defs for Query 10. */
+#define RMI_F11_NR_TOUCH_SHAPES_MASK 0x1F
+
+/** Defs for Query 11 */
+
+#define RMI_F11_HAS_Z_TUNING (1 << 0)
+#define RMI_F11_HAS_ALGORITHM_SELECTION (1 << 1)
+#define RMI_F11_HAS_W_TUNING (1 << 2)
+#define RMI_F11_HAS_PITCH_INFO (1 << 3)
+#define RMI_F11_HAS_FINGER_SIZE (1 << 4)
+#define RMI_F11_HAS_SEGMENTATION_AGGRESSIVENESS (1 << 5)
+#define RMI_F11_HAS_XY_CLIP (1 << 6)
+#define RMI_F11_HAS_DRUMMING_FILTER (1 << 7)
+
+/** Defs for Query 12. */
+
+#define RMI_F11_HAS_GAPLESS_FINGER (1 << 0)
+#define RMI_F11_HAS_GAPLESS_FINGER_TUNING (1 << 1)
+#define RMI_F11_HAS_8BIT_W (1 << 2)
+#define RMI_F11_HAS_ADJUSTABLE_MAPPING (1 << 3)
+#define RMI_F11_HAS_INFO2 (1 << 4)
+#define RMI_F11_HAS_PHYSICAL_PROPS (1 << 5)
+#define RMI_F11_HAS_FINGER_LIMIT (1 << 6)
+#define RMI_F11_HAS_LINEAR_COEFF (1 << 7)
+
+/** Defs for Query 13. */
+
+#define RMI_F11_JITTER_WINDOW_MASK 0x1F
+#define RMI_F11_JITTER_FILTER_MASK 0x60
+#define RMI_F11_JITTER_FILTER_SHIFT 5
+
+/** Defs for Query 14. */
+#define RMI_F11_LIGHT_CONTROL_MASK 0x03
+#define RMI_F11_IS_CLEAR (1 << 2)
+#define RMI_F11_CLICKPAD_PROPS_MASK 0x18
+#define RMI_F11_CLICKPAD_PROPS_SHIFT 3
+#define RMI_F11_MOUSE_BUTTONS_MASK 0x60
+#define RMI_F11_MOUSE_BUTTONS_SHIFT 5
+#define RMI_F11_HAS_ADVANCED_GESTURES (1 << 7)
+
+#define RMI_F11_QUERY_SIZE 4
+#define RMI_F11_QUERY_GESTURE_SIZE 2
+
+#define F11_LIGHT_CTL_NONE 0x00
+#define F11_LUXPAD 0x01
+#define F11_DUAL_MODE 0x02
+
+#define F11_NOT_CLICKPAD 0x00
+#define F11_HINGED_CLICKPAD 0x01
+#define F11_UNIFORM_CLICKPAD 0x02
+
+/**
+ * Query registers 1 through 4 are always present.
+ *
+ * @nr_fingers - describes the maximum number of fingers the 2-D sensor
+ * supports.
+ * @has_rel - the sensor supports relative motion reporting.
+ * @has_abs - the sensor supports absolute poition reporting.
+ * @has_gestures - the sensor supports gesture reporting.
+ * @has_sensitivity_adjust - the sensor supports a global sensitivity
+ * adjustment.
+ * @configurable - the sensor supports various configuration options.
+ * @num_of_x_electrodes - the maximum number of electrodes the 2-D sensor
+ * supports on the X axis.
+ * @num_of_y_electrodes - the maximum number of electrodes the 2-D sensor
+ * supports on the Y axis.
+ * @max_electrodes - the total number of X and Y electrodes that may be
+ * configured.
+ *
+ * Query 5 is present if the has_abs bit is set.
+ *
+ * @abs_data_size - describes the format of data reported by the absolute
+ * data source. Only one format (the kind used here) is supported at this
+ * time.
+ * @has_anchored_finger - then the sensor supports the high-precision second
+ * finger tracking provided by the manual tracking and motion sensitivity
+ * options.
+ * @has_adjust_hyst - the difference between the finger release threshold and
+ * the touch threshold.
+ * @has_dribble - the sensor supports the generation of dribble interrupts,
+ * which may be enabled or disabled with the dribble control bit.
+ * @has_bending_correction - Bending related data registers 28 and 36, and
+ * control register 52..57 are present.
+ * @has_large_object_suppression - control register 58 and data register 28
+ * exist.
+ * @has_jitter_filter - query 13 and control 73..76 exist.
+ *
+ * Gesture information queries 7 and 8 are present if has_gestures bit is set.
+ *
+ * @has_single_tap - a basic single-tap gesture is supported.
+ * @has_tap_n_hold - tap-and-hold gesture is supported.
+ * @has_double_tap - double-tap gesture is supported.
+ * @has_early_tap - early tap is supported and reported as soon as the finger
+ * lifts for any tap event that could be interpreted as either a single tap
+ * or as the first tap of a double-tap or tap-and-hold gesture.
+ * @has_flick - flick detection is supported.
+ * @has_press - press gesture reporting is supported.
+ * @has_pinch - pinch gesture detection is supported.
+ * @has_palm_det - the 2-D sensor notifies the host whenever a large conductive
+ * object such as a palm or a cheek touches the 2-D sensor.
+ * @has_rotate - rotation gesture detection is supported.
+ * @has_touch_shapes - TouchShapes are supported. A TouchShape is a fixed
+ * rectangular area on the sensor that behaves like a capacitive button.
+ * @has_scroll_zones - scrolling areas near the sensor edges are supported.
+ * @has_individual_scroll_zones - if 1, then 4 scroll zones are supported;
+ * if 0, then only two are supported.
+ * @has_mf_scroll - the multifinger_scrolling bit will be set when
+ * more than one finger is involved in a scrolling action.
+ *
+ * Convenience for checking bytes in the gesture info registers. This is done
+ * often enough that we put it here to declutter the conditionals
+ *
+ * @query7_nonzero - true if none of the query 7 bits are set
+ * @query8_nonzero - true if none of the query 8 bits are set
+ *
+ * Query 9 is present if the has_query9 is set.
+ *
+ * @has_pen - detection of a stylus is supported and registers F11_2D_Ctrl20
+ * and F11_2D_Ctrl21 exist.
+ * @has_proximity - detection of fingers near the sensor is supported and
+ * registers F11_2D_Ctrl22 through F11_2D_Ctrl26 exist.
+ * @has_palm_det_sensitivity - the sensor supports the palm detect sensitivity
+ * feature and register F11_2D_Ctrl27 exists.
+ * @has_two_pen_thresholds - is has_pen is also set, then F11_2D_Ctrl35 exists.
+ * @has_contact_geometry - the sensor supports the use of contact geometry to
+ * map absolute X and Y target positions and registers F11_2D_Data18
+ * through F11_2D_Data27 exist.
+ *
+ * Touch shape info (query 10) is present if has_touch_shapes is set.
+ *
+ * @nr_touch_shapes - the total number of touch shapes supported.
+ *
+ * Query 11 is present if the has_query11 bit is set in query 0.
+ *
+ * @has_z_tuning - if set, the sensor supports Z tuning and registers
+ * F11_2D_Ctrl29 through F11_2D_Ctrl33 exist.
+ * @has_algorithm_selection - controls choice of noise suppression algorithm
+ * @has_w_tuning - the sensor supports Wx and Wy scaling and registers
+ * F11_2D_Ctrl36 through F11_2D_Ctrl39 exist.
+ * @has_pitch_info - the X and Y pitches of the sensor electrodes can be
+ * configured and registers F11_2D_Ctrl40 and F11_2D_Ctrl41 exist.
+ * @has_finger_size - the default finger width settings for the
+ * sensor can be configured and registers F11_2D_Ctrl42 through F11_2D_Ctrl44
+ * exist.
+ * @has_segmentation_aggressiveness - the sensor’s ability to distinguish
+ * multiple objects close together can be configured and register F11_2D_Ctrl45
+ * exists.
+ * @has_XY_clip - the inactive outside borders of the sensor can be
+ * configured and registers F11_2D_Ctrl46 through F11_2D_Ctrl49 exist.
+ * @has_drumming_filter - the sensor can be configured to distinguish
+ * between a fast flick and a quick drumming movement and registers
+ * F11_2D_Ctrl50 and F11_2D_Ctrl51 exist.
+ *
+ * Query 12 is present if hasQuery12 bit is set.
+ *
+ * @has_gapless_finger - control registers relating to gapless finger are
+ * present.
+ * @has_gapless_finger_tuning - additional control and data registers relating
+ * to gapless finger are present.
+ * @has_8bit_w - larger W value reporting is supported.
+ * @has_adjustable_mapping - TBD
+ * @has_info2 - the general info query14 is present
+ * @has_physical_props - additional queries describing the physical properties
+ * of the sensor are present.
+ * @has_finger_limit - indicates that F11 Ctrl 80 exists.
+ * @has_linear_coeff - indicates that F11 Ctrl 81 exists.
+ *
+ * Query 13 is present if Query 5's has_jitter_filter bit is set.
+ * @jitter_window_size - used by Design Studio 4.
+ * @jitter_filter_type - used by Design Studio 4.
+ *
+ * Query 14 is present if query 12's has_general_info2 flag is set.
+ *
+ * @light_control - Indicates what light/led control features are present, if
+ * any.
+ * @is_clear - if set, this is a clear sensor (indicating direct pointing
+ * application), otherwise it's opaque (indicating indirect pointing).
+ * @clickpad_props - specifies if this is a clickpad, and if so what sort of
+ * mechanism it uses
+ * @mouse_buttons - specifies the number of mouse buttons present (if any).
+ * @has_advanced_gestures - advanced driver gestures are supported.
+ */
+struct f11_2d_sensor_queries {
+ /* query1 */
+ u8 nr_fingers;
+ bool has_rel;
+ bool has_abs;
+ bool has_gestures;
+ bool has_sensitivity_adjust;
+ bool configurable;
+
+ /* query2 */
+ u8 nr_x_electrodes;
+
+ /* query3 */
+ u8 nr_y_electrodes;
+
+ /* query4 */
+ u8 max_electrodes;
+
+ /* query5 */
+ u8 abs_data_size;
+ bool has_anchored_finger;
+ bool has_adj_hyst;
+ bool has_dribble;
+ bool has_bending_correction;
+ bool has_large_object_suppression;
+ bool has_jitter_filter;
+
+ u8 f11_2d_query6;
+
+ /* query 7 */
+ bool has_single_tap;
+ bool has_tap_n_hold;
+ bool has_double_tap;
+ bool has_early_tap;
+ bool has_flick;
+ bool has_press;
+ bool has_pinch;
+ bool has_chiral;
+
+ bool query7_nonzero;
+
+ /* query 8 */
+ bool has_palm_det;
+ bool has_rotate;
+ bool has_touch_shapes;
+ bool has_scroll_zones;
+ bool has_individual_scroll_zones;
+ bool has_mf_scroll;
+ bool has_mf_edge_motion;
+ bool has_mf_scroll_inertia;
+
+ bool query8_nonzero;
+
+ /* Query 9 */
+ bool has_pen;
+ bool has_proximity;
+ bool has_palm_det_sensitivity;
+ bool has_suppress_on_palm_detect;
+ bool has_two_pen_thresholds;
+ bool has_contact_geometry;
+ bool has_pen_hover_discrimination;
+ bool has_pen_filters;
+
+ /* Query 10 */
+ u8 nr_touch_shapes;
+
+ /* Query 11. */
+ bool has_z_tuning;
+ bool has_algorithm_selection;
+ bool has_w_tuning;
+ bool has_pitch_info;
+ bool has_finger_size;
+ bool has_segmentation_aggressiveness;
+ bool has_XY_clip;
+ bool has_drumming_filter;
+
+ /* Query 12 */
+ bool has_gapless_finger;
+ bool has_gapless_finger_tuning;
+ bool has_8bit_w;
+ bool has_adjustable_mapping;
+ bool has_info2;
+ bool has_physical_props;
+ bool has_finger_limit;
+ bool has_linear_coeff_2;
+
+ /* Query 13 */
+ u8 jitter_window_size;
+ u8 jitter_filter_type;
+
+ /* Query 14 */
+ u8 light_control;
+ bool is_clear;
+ u8 clickpad_props;
+ u8 mouse_buttons;
+ bool has_advanced_gestures;
+
+ /* Query 15 - 18 */
+ u16 x_sensor_size_mm;
+ u16 y_sensor_size_mm;
+};
+
+/* Defs for Ctrl0. */
+#define RMI_F11_REPORT_MODE_MASK 0x07
+#define RMI_F11_ABS_POS_FILT (1 << 3)
+#define RMI_F11_REL_POS_FILT (1 << 4)
+#define RMI_F11_REL_BALLISTICS (1 << 5)
+#define RMI_F11_DRIBBLE (1 << 6)
+#define RMI_F11_REPORT_BEYOND_CLIP (1 << 7)
+
+/* Defs for Ctrl1. */
+#define RMI_F11_PALM_DETECT_THRESH_MASK 0x0F
+#define RMI_F11_MOTION_SENSITIVITY_MASK 0x30
+#define RMI_F11_MANUAL_TRACKING (1 << 6)
+#define RMI_F11_MANUAL_TRACKED_FINGER (1 << 7)
+
+#define RMI_F11_DELTA_X_THRESHOLD 2
+#define RMI_F11_DELTA_Y_THRESHOLD 3
+
+#define RMI_F11_CTRL_REG_COUNT 12
+
+struct f11_2d_ctrl {
+ u8 ctrl0_11[RMI_F11_CTRL_REG_COUNT];
+ u16 ctrl0_11_address;
+};
+
+#define RMI_F11_ABS_BYTES 5
+#define RMI_F11_REL_BYTES 2
+
+/* Defs for Data 8 */
+
+#define RMI_F11_SINGLE_TAP (1 << 0)
+#define RMI_F11_TAP_AND_HOLD (1 << 1)
+#define RMI_F11_DOUBLE_TAP (1 << 2)
+#define RMI_F11_EARLY_TAP (1 << 3)
+#define RMI_F11_FLICK (1 << 4)
+#define RMI_F11_PRESS (1 << 5)
+#define RMI_F11_PINCH (1 << 6)
+
+/* Defs for Data 9 */
+
+#define RMI_F11_PALM_DETECT (1 << 0)
+#define RMI_F11_ROTATE (1 << 1)
+#define RMI_F11_SHAPE (1 << 2)
+#define RMI_F11_SCROLLZONE (1 << 3)
+#define RMI_F11_GESTURE_FINGER_COUNT_MASK 0x70
+
+/** Handy pointers into our data buffer.
+ *
+ * @f_state - start of finger state registers.
+ * @abs_pos - start of absolute position registers (if present).
+ * @rel_pos - start of relative data registers (if present).
+ * @gest_1 - gesture flags (if present).
+ * @gest_2 - gesture flags & finger count (if present).
+ * @pinch - pinch motion register (if present).
+ * @flick - flick distance X & Y, flick time (if present).
+ * @rotate - rotate motion and finger separation.
+ * @multi_scroll - chiral deltas for X and Y (if present).
+ * @scroll_zones - scroll deltas for 4 regions (if present).
+ */
+struct f11_2d_data {
+ u8 *f_state;
+ u8 *abs_pos;
+ s8 *rel_pos;
+ u8 *gest_1;
+ u8 *gest_2;
+ s8 *pinch;
+ u8 *flick;
+ u8 *rotate;
+ u8 *shapes;
+ s8 *multi_scroll;
+ s8 *scroll_zones;
+};
+
+/** Data pertaining to F11 in general. For per-sensor data, see struct
+ * f11_2d_sensor.
+ *
+ * @dev_query - F11 device specific query registers.
+ * @dev_controls - F11 device specific control registers.
+ * @dev_controls_mutex - lock for the control registers.
+ * @rezero_wait_ms - if nonzero, upon resume we will wait this many
+ * milliseconds before rezeroing the sensor(s). This is useful in systems with
+ * poor electrical behavior on resume, where the initial calibration of the
+ * sensor(s) coming out of sleep state may be bogus.
+ * @sensors - per sensor data structures.
+ */
+struct f11_data {
+ bool has_query9;
+ bool has_query11;
+ bool has_query12;
+ bool has_query27;
+ bool has_query28;
+ bool has_acm;
+ struct f11_2d_ctrl dev_controls;
+ struct mutex dev_controls_mutex;
+ u16 rezero_wait_ms;
+ struct rmi_2d_sensor sensor;
+ struct f11_2d_sensor_queries sens_query;
+ struct f11_2d_data data;
+ struct rmi_2d_sensor_platform_data sensor_pdata;
+ unsigned long *abs_mask;
+ unsigned long *rel_mask;
+ unsigned long *result_bits;
+};
+
+enum f11_finger_state {
+ F11_NO_FINGER = 0x00,
+ F11_PRESENT = 0x01,
+ F11_INACCURATE = 0x02,
+ F11_RESERVED = 0x03
+};
+
+static void rmi_f11_rel_pos_report(struct f11_data *f11, u8 n_finger)
+{
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ struct f11_2d_data *data = &f11->data;
+ s8 x, y;
+
+ x = data->rel_pos[n_finger * 2];
+ y = data->rel_pos[n_finger * 2 + 1];
+
+ rmi_2d_sensor_rel_report(sensor, x, y);
+}
+
+static void rmi_f11_abs_pos_process(struct f11_data *f11,
+ struct rmi_2d_sensor *sensor,
+ struct rmi_2d_sensor_abs_object *obj,
+ enum f11_finger_state finger_state,
+ u8 n_finger)
+{
+ struct f11_2d_data *data = &f11->data;
+ u8 *pos_data = &data->abs_pos[n_finger * RMI_F11_ABS_BYTES];
+ int tool_type = MT_TOOL_FINGER;
+
+ switch (finger_state) {
+ case F11_PRESENT:
+ obj->type = RMI_2D_OBJECT_FINGER;
+ break;
+ default:
+ obj->type = RMI_2D_OBJECT_NONE;
+ }
+
+ obj->mt_tool = tool_type;
+ obj->x = (pos_data[0] << 4) | (pos_data[2] & 0x0F);
+ obj->y = (pos_data[1] << 4) | (pos_data[2] >> 4);
+ obj->z = pos_data[4];
+ obj->wx = pos_data[3] & 0x0f;
+ obj->wy = pos_data[3] >> 4;
+
+ rmi_2d_sensor_abs_process(sensor, obj, n_finger);
+}
+
+static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
+{
+ return (f_state[n_finger / 4] >> (2 * (n_finger % 4))) &
+ FINGER_STATE_MASK;
+}
+
+static void rmi_f11_finger_handler(struct f11_data *f11,
+ struct rmi_2d_sensor *sensor,
+ unsigned long *irq_bits, int num_irq_regs)
+{
+ const u8 *f_state = f11->data.f_state;
+ u8 finger_state;
+ u8 i;
+
+ int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
+ num_irq_regs * 8);
+ int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
+ num_irq_regs * 8);
+
+ for (i = 0; i < sensor->nbr_fingers; i++) {
+ /* Possible of having 4 fingers per f_statet register */
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED) {
+ pr_err("Invalid finger state[%d]: 0x%02x", i,
+ finger_state);
+ continue;
+ }
+
+ if (abs_bits)
+ rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
+ finger_state, i);
+
+ if (rel_bits)
+ rmi_f11_rel_pos_report(f11, i);
+ }
+
+ if (abs_bits) {
+ /*
+ * the absolute part is made in 2 parts to allow the kernel
+ * tracking to take place.
+ */
+ if (sensor->kernel_tracking)
+ input_mt_assign_slots(sensor->input,
+ sensor->tracking_slots,
+ sensor->tracking_pos,
+ sensor->nbr_fingers,
+ sensor->dmax);
+
+ for (i = 0; i < sensor->nbr_fingers; i++) {
+ finger_state = rmi_f11_parse_finger_state(f_state, i);
+ if (finger_state == F11_RESERVED)
+ /* no need to send twice the error */
+ continue;
+
+ rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
+ }
+
+ input_mt_sync_frame(sensor->input);
+ }
+}
+
+static int f11_2d_construct_data(struct f11_data *f11)
+{
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ struct f11_2d_sensor_queries *query = &f11->sens_query;
+ struct f11_2d_data *data = &f11->data;
+ int i;
+
+ sensor->nbr_fingers = (query->nr_fingers == 5 ? 10 :
+ query->nr_fingers + 1);
+
+ sensor->pkt_size = DIV_ROUND_UP(sensor->nbr_fingers, 4);
+
+ if (query->has_abs) {
+ sensor->pkt_size += (sensor->nbr_fingers * 5);
+ sensor->attn_size = sensor->pkt_size;
+ }
+
+ if (query->has_rel)
+ sensor->pkt_size += (sensor->nbr_fingers * 2);
+
+ /* Check if F11_2D_Query7 is non-zero */
+ if (query->query7_nonzero)
+ sensor->pkt_size += sizeof(u8);
+
+ /* Check if F11_2D_Query7 or F11_2D_Query8 is non-zero */
+ if (query->query7_nonzero || query->query8_nonzero)
+ sensor->pkt_size += sizeof(u8);
+
+ if (query->has_pinch || query->has_flick || query->has_rotate) {
+ sensor->pkt_size += 3;
+ if (!query->has_flick)
+ sensor->pkt_size--;
+ if (!query->has_rotate)
+ sensor->pkt_size--;
+ }
+
+ if (query->has_touch_shapes)
+ sensor->pkt_size +=
+ DIV_ROUND_UP(query->nr_touch_shapes + 1, 8);
+
+ sensor->data_pkt = devm_kzalloc(&sensor->fn->dev, sensor->pkt_size,
+ GFP_KERNEL);
+ if (!sensor->data_pkt)
+ return -ENOMEM;
+
+ data->f_state = sensor->data_pkt;
+ i = DIV_ROUND_UP(sensor->nbr_fingers, 4);
+
+ if (query->has_abs) {
+ data->abs_pos = &sensor->data_pkt[i];
+ i += (sensor->nbr_fingers * RMI_F11_ABS_BYTES);
+ }
+
+ if (query->has_rel) {
+ data->rel_pos = &sensor->data_pkt[i];
+ i += (sensor->nbr_fingers * RMI_F11_REL_BYTES);
+ }
+
+ if (query->query7_nonzero) {
+ data->gest_1 = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->query7_nonzero || query->query8_nonzero) {
+ data->gest_2 = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->has_pinch) {
+ data->pinch = &sensor->data_pkt[i];
+ i++;
+ }
+
+ if (query->has_flick) {
+ if (query->has_pinch) {
+ data->flick = data->pinch;
+ i += 2;
+ } else {
+ data->flick = &sensor->data_pkt[i];
+ i += 3;
+ }
+ }
+
+ if (query->has_rotate) {
+ if (query->has_flick) {
+ data->rotate = data->flick + 1;
+ } else {
+ data->rotate = &sensor->data_pkt[i];
+ i += 2;
+ }
+ }
+
+ if (query->has_touch_shapes)
+ data->shapes = &sensor->data_pkt[i];
+
+ return 0;
+}
+
+static int f11_read_control_regs(struct rmi_function *fn,
+ struct f11_2d_ctrl *ctrl, u16 ctrl_base_addr) {
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error = 0;
+
+ ctrl->ctrl0_11_address = ctrl_base_addr;
+ error = rmi_read_block(rmi_dev, ctrl_base_addr, ctrl->ctrl0_11,
+ RMI_F11_CTRL_REG_COUNT);
+ if (error < 0) {
+ dev_err(&fn->dev, "Failed to read ctrl0, code: %d.\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int f11_write_control_regs(struct rmi_function *fn,
+ struct f11_2d_sensor_queries *query,
+ struct f11_2d_ctrl *ctrl,
+ u16 ctrl_base_addr)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error;
+
+ error = rmi_write_block(rmi_dev, ctrl_base_addr, ctrl->ctrl0_11,
+ RMI_F11_CTRL_REG_COUNT);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static int rmi_f11_get_query_parameters(struct rmi_device *rmi_dev,
+ struct f11_data *f11,
+ struct f11_2d_sensor_queries *sensor_query,
+ u16 query_base_addr)
+{
+ int query_size;
+ int rc;
+ u8 query_buf[RMI_F11_QUERY_SIZE];
+ bool has_query36 = false;
+
+ rc = rmi_read_block(rmi_dev, query_base_addr, query_buf,
+ RMI_F11_QUERY_SIZE);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->nr_fingers = query_buf[0] & RMI_F11_NR_FINGERS_MASK;
+ sensor_query->has_rel = !!(query_buf[0] & RMI_F11_HAS_REL);
+ sensor_query->has_abs = !!(query_buf[0] & RMI_F11_HAS_ABS);
+ sensor_query->has_gestures = !!(query_buf[0] & RMI_F11_HAS_GESTURES);
+ sensor_query->has_sensitivity_adjust =
+ !!(query_buf[0] & RMI_F11_HAS_SENSITIVITY_ADJ);
+ sensor_query->configurable = !!(query_buf[0] & RMI_F11_CONFIGURABLE);
+
+ sensor_query->nr_x_electrodes =
+ query_buf[1] & RMI_F11_NR_ELECTRODES_MASK;
+ sensor_query->nr_y_electrodes =
+ query_buf[2] & RMI_F11_NR_ELECTRODES_MASK;
+ sensor_query->max_electrodes =
+ query_buf[3] & RMI_F11_NR_ELECTRODES_MASK;
+
+ query_size = RMI_F11_QUERY_SIZE;
+
+ if (sensor_query->has_abs) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->abs_data_size =
+ query_buf[0] & RMI_F11_ABS_DATA_SIZE_MASK;
+ sensor_query->has_anchored_finger =
+ !!(query_buf[0] & RMI_F11_HAS_ANCHORED_FINGER);
+ sensor_query->has_adj_hyst =
+ !!(query_buf[0] & RMI_F11_HAS_ADJ_HYST);
+ sensor_query->has_dribble =
+ !!(query_buf[0] & RMI_F11_HAS_DRIBBLE);
+ sensor_query->has_bending_correction =
+ !!(query_buf[0] & RMI_F11_HAS_BENDING_CORRECTION);
+ sensor_query->has_large_object_suppression =
+ !!(query_buf[0] & RMI_F11_HAS_LARGE_OBJECT_SUPPRESSION);
+ sensor_query->has_jitter_filter =
+ !!(query_buf[0] & RMI_F11_HAS_JITTER_FILTER);
+ query_size++;
+ }
+
+ if (sensor_query->has_rel) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ &sensor_query->f11_2d_query6);
+ if (rc < 0)
+ return rc;
+ query_size++;
+ }
+
+ if (sensor_query->has_gestures) {
+ rc = rmi_read_block(rmi_dev, query_base_addr + query_size,
+ query_buf, RMI_F11_QUERY_GESTURE_SIZE);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_single_tap =
+ !!(query_buf[0] & RMI_F11_HAS_SINGLE_TAP);
+ sensor_query->has_tap_n_hold =
+ !!(query_buf[0] & RMI_F11_HAS_TAP_AND_HOLD);
+ sensor_query->has_double_tap =
+ !!(query_buf[0] & RMI_F11_HAS_DOUBLE_TAP);
+ sensor_query->has_early_tap =
+ !!(query_buf[0] & RMI_F11_HAS_EARLY_TAP);
+ sensor_query->has_flick =
+ !!(query_buf[0] & RMI_F11_HAS_FLICK);
+ sensor_query->has_press =
+ !!(query_buf[0] & RMI_F11_HAS_PRESS);
+ sensor_query->has_pinch =
+ !!(query_buf[0] & RMI_F11_HAS_PINCH);
+ sensor_query->has_chiral =
+ !!(query_buf[0] & RMI_F11_HAS_CHIRAL);
+
+ /* query 8 */
+ sensor_query->has_palm_det =
+ !!(query_buf[1] & RMI_F11_HAS_PALM_DET);
+ sensor_query->has_rotate =
+ !!(query_buf[1] & RMI_F11_HAS_ROTATE);
+ sensor_query->has_touch_shapes =
+ !!(query_buf[1] & RMI_F11_HAS_TOUCH_SHAPES);
+ sensor_query->has_scroll_zones =
+ !!(query_buf[1] & RMI_F11_HAS_SCROLL_ZONES);
+ sensor_query->has_individual_scroll_zones =
+ !!(query_buf[1] & RMI_F11_HAS_INDIVIDUAL_SCROLL_ZONES);
+ sensor_query->has_mf_scroll =
+ !!(query_buf[1] & RMI_F11_HAS_MF_SCROLL);
+ sensor_query->has_mf_edge_motion =
+ !!(query_buf[1] & RMI_F11_HAS_MF_EDGE_MOTION);
+ sensor_query->has_mf_scroll_inertia =
+ !!(query_buf[1] & RMI_F11_HAS_MF_SCROLL_INERTIA);
+
+ sensor_query->query7_nonzero = !!(query_buf[0]);
+ sensor_query->query8_nonzero = !!(query_buf[1]);
+
+ query_size += 2;
+ }
+
+ if (f11->has_query9) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_pen =
+ !!(query_buf[0] & RMI_F11_HAS_PEN);
+ sensor_query->has_proximity =
+ !!(query_buf[0] & RMI_F11_HAS_PROXIMITY);
+ sensor_query->has_palm_det_sensitivity =
+ !!(query_buf[0] & RMI_F11_HAS_PALM_DET_SENSITIVITY);
+ sensor_query->has_suppress_on_palm_detect =
+ !!(query_buf[0] & RMI_F11_HAS_SUPPRESS_ON_PALM_DETECT);
+ sensor_query->has_two_pen_thresholds =
+ !!(query_buf[0] & RMI_F11_HAS_TWO_PEN_THRESHOLDS);
+ sensor_query->has_contact_geometry =
+ !!(query_buf[0] & RMI_F11_HAS_CONTACT_GEOMETRY);
+ sensor_query->has_pen_hover_discrimination =
+ !!(query_buf[0] & RMI_F11_HAS_PEN_HOVER_DISCRIMINATION);
+ sensor_query->has_pen_filters =
+ !!(query_buf[0] & RMI_F11_HAS_PEN_FILTERS);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_touch_shapes) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->nr_touch_shapes = query_buf[0] &
+ RMI_F11_NR_TOUCH_SHAPES_MASK;
+
+ query_size++;
+ }
+
+ if (f11->has_query11) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_z_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_Z_TUNING);
+ sensor_query->has_algorithm_selection =
+ !!(query_buf[0] & RMI_F11_HAS_ALGORITHM_SELECTION);
+ sensor_query->has_w_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_W_TUNING);
+ sensor_query->has_pitch_info =
+ !!(query_buf[0] & RMI_F11_HAS_PITCH_INFO);
+ sensor_query->has_finger_size =
+ !!(query_buf[0] & RMI_F11_HAS_FINGER_SIZE);
+ sensor_query->has_segmentation_aggressiveness =
+ !!(query_buf[0] &
+ RMI_F11_HAS_SEGMENTATION_AGGRESSIVENESS);
+ sensor_query->has_XY_clip =
+ !!(query_buf[0] & RMI_F11_HAS_XY_CLIP);
+ sensor_query->has_drumming_filter =
+ !!(query_buf[0] & RMI_F11_HAS_DRUMMING_FILTER);
+
+ query_size++;
+ }
+
+ if (f11->has_query12) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->has_gapless_finger =
+ !!(query_buf[0] & RMI_F11_HAS_GAPLESS_FINGER);
+ sensor_query->has_gapless_finger_tuning =
+ !!(query_buf[0] & RMI_F11_HAS_GAPLESS_FINGER_TUNING);
+ sensor_query->has_8bit_w =
+ !!(query_buf[0] & RMI_F11_HAS_8BIT_W);
+ sensor_query->has_adjustable_mapping =
+ !!(query_buf[0] & RMI_F11_HAS_ADJUSTABLE_MAPPING);
+ sensor_query->has_info2 =
+ !!(query_buf[0] & RMI_F11_HAS_INFO2);
+ sensor_query->has_physical_props =
+ !!(query_buf[0] & RMI_F11_HAS_PHYSICAL_PROPS);
+ sensor_query->has_finger_limit =
+ !!(query_buf[0] & RMI_F11_HAS_FINGER_LIMIT);
+ sensor_query->has_linear_coeff_2 =
+ !!(query_buf[0] & RMI_F11_HAS_LINEAR_COEFF);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_jitter_filter) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->jitter_window_size = query_buf[0] &
+ RMI_F11_JITTER_WINDOW_MASK;
+ sensor_query->jitter_filter_type = (query_buf[0] &
+ RMI_F11_JITTER_FILTER_MASK) >>
+ RMI_F11_JITTER_FILTER_SHIFT;
+
+ query_size++;
+ }
+
+ if (sensor_query->has_info2) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size, query_buf);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->light_control =
+ query_buf[0] & RMI_F11_LIGHT_CONTROL_MASK;
+ sensor_query->is_clear =
+ !!(query_buf[0] & RMI_F11_IS_CLEAR);
+ sensor_query->clickpad_props =
+ (query_buf[0] & RMI_F11_CLICKPAD_PROPS_MASK) >>
+ RMI_F11_CLICKPAD_PROPS_SHIFT;
+ sensor_query->mouse_buttons =
+ (query_buf[0] & RMI_F11_MOUSE_BUTTONS_MASK) >>
+ RMI_F11_MOUSE_BUTTONS_SHIFT;
+ sensor_query->has_advanced_gestures =
+ !!(query_buf[0] & RMI_F11_HAS_ADVANCED_GESTURES);
+
+ query_size++;
+ }
+
+ if (sensor_query->has_physical_props) {
+ rc = rmi_read_block(rmi_dev, query_base_addr
+ + query_size, query_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sensor_query->x_sensor_size_mm =
+ (query_buf[0] | (query_buf[1] << 8)) / 10;
+ sensor_query->y_sensor_size_mm =
+ (query_buf[2] | (query_buf[3] << 8)) / 10;
+
+ /*
+ * query 15 - 18 contain the size of the sensor
+ * and query 19 - 26 contain bezel dimensions
+ */
+ query_size += 12;
+ }
+
+ if (f11->has_query27)
+ ++query_size;
+
+ if (f11->has_query28) {
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ query_buf);
+ if (rc < 0)
+ return rc;
+
+ has_query36 = !!(query_buf[0] & BIT(6));
+ }
+
+ if (has_query36) {
+ query_size += 2;
+ rc = rmi_read(rmi_dev, query_base_addr + query_size,
+ query_buf);
+ if (rc < 0)
+ return rc;
+
+ if (!!(query_buf[0] & BIT(5)))
+ f11->has_acm = true;
+ }
+
+ return query_size;
+}
+
+static int rmi_f11_initialize(struct rmi_function *fn)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f11_data *f11;
+ struct f11_2d_ctrl *ctrl;
+ u8 query_offset;
+ u16 query_base_addr;
+ u16 control_base_addr;
+ u16 max_x_pos, max_y_pos;
+ int rc;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(rmi_dev);
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct rmi_2d_sensor *sensor;
+ u8 buf;
+ int mask_size;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Initializing F11 values.\n");
+
+ mask_size = BITS_TO_LONGS(drvdata->irq_count) * sizeof(unsigned long);
+
+ /*
+ ** init instance data, fill in values and create any sysfs files
+ */
+ f11 = devm_kzalloc(&fn->dev, sizeof(struct f11_data) + mask_size * 3,
+ GFP_KERNEL);
+ if (!f11)
+ return -ENOMEM;
+
+ if (fn->dev.of_node) {
+ rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
+ if (rc)
+ return rc;
+ } else if (pdata->sensor_pdata) {
+ f11->sensor_pdata = *pdata->sensor_pdata;
+ }
+
+ f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
+
+ f11->abs_mask = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data));
+ f11->rel_mask = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data) + mask_size);
+ f11->result_bits = (unsigned long *)((char *)f11
+ + sizeof(struct f11_data) + mask_size * 2);
+
+ set_bit(fn->irq_pos, f11->abs_mask);
+ set_bit(fn->irq_pos + 1, f11->rel_mask);
+
+ query_base_addr = fn->fd.query_base_addr;
+ control_base_addr = fn->fd.control_base_addr;
+
+ rc = rmi_read(rmi_dev, query_base_addr, &buf);
+ if (rc < 0)
+ return rc;
+
+ f11->has_query9 = !!(buf & RMI_F11_HAS_QUERY9);
+ f11->has_query11 = !!(buf & RMI_F11_HAS_QUERY11);
+ f11->has_query12 = !!(buf & RMI_F11_HAS_QUERY12);
+ f11->has_query27 = !!(buf & RMI_F11_HAS_QUERY27);
+ f11->has_query28 = !!(buf & RMI_F11_HAS_QUERY28);
+
+ query_offset = (query_base_addr + 1);
+ sensor = &f11->sensor;
+ sensor->fn = fn;
+
+ rc = rmi_f11_get_query_parameters(rmi_dev, f11,
+ &f11->sens_query, query_offset);
+ if (rc < 0)
+ return rc;
+ query_offset += rc;
+
+ rc = f11_read_control_regs(fn, &f11->dev_controls,
+ control_base_addr);
+ if (rc < 0) {
+ dev_err(&fn->dev,
+ "Failed to read F11 control params.\n");
+ return rc;
+ }
+
+ if (f11->sens_query.has_info2) {
+ if (f11->sens_query.is_clear)
+ f11->sensor.sensor_type = rmi_sensor_touchscreen;
+ else
+ f11->sensor.sensor_type = rmi_sensor_touchpad;
+ }
+
+ sensor->report_abs = f11->sens_query.has_abs;
+
+ sensor->axis_align =
+ f11->sensor_pdata.axis_align;
+
+ sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
+ sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
+ sensor->dmax = f11->sensor_pdata.dmax;
+
+ if (f11->sens_query.has_physical_props) {
+ sensor->x_mm = f11->sens_query.x_sensor_size_mm;
+ sensor->y_mm = f11->sens_query.y_sensor_size_mm;
+ } else {
+ sensor->x_mm = f11->sensor_pdata.x_mm;
+ sensor->y_mm = f11->sensor_pdata.y_mm;
+ }
+
+ if (sensor->sensor_type == rmi_sensor_default)
+ sensor->sensor_type =
+ f11->sensor_pdata.sensor_type;
+
+ sensor->report_abs = sensor->report_abs
+ && !(f11->sensor_pdata.disable_report_mask
+ & RMI_F11_DISABLE_ABS_REPORT);
+
+ if (!sensor->report_abs)
+ /*
+ * If device doesn't have abs or if it has been disables
+ * fallback to reporting rel data.
+ */
+ sensor->report_rel = f11->sens_query.has_rel;
+
+ rc = rmi_read_block(rmi_dev,
+ control_base_addr + F11_CTRL_SENSOR_MAX_X_POS_OFFSET,
+ (u8 *)&max_x_pos, sizeof(max_x_pos));
+ if (rc < 0)
+ return rc;
+
+ rc = rmi_read_block(rmi_dev,
+ control_base_addr + F11_CTRL_SENSOR_MAX_Y_POS_OFFSET,
+ (u8 *)&max_y_pos, sizeof(max_y_pos));
+ if (rc < 0)
+ return rc;
+
+ sensor->max_x = max_x_pos;
+ sensor->max_y = max_y_pos;
+
+ rc = f11_2d_construct_data(f11);
+ if (rc < 0)
+ return rc;
+
+ if (f11->has_acm)
+ f11->sensor.attn_size += f11->sensor.nbr_fingers * 2;
+
+ /* allocate the in-kernel tracking buffers */
+ sensor->tracking_pos = devm_kzalloc(&fn->dev,
+ sizeof(struct input_mt_pos) * sensor->nbr_fingers,
+ GFP_KERNEL);
+ sensor->tracking_slots = devm_kzalloc(&fn->dev,
+ sizeof(int) * sensor->nbr_fingers, GFP_KERNEL);
+ sensor->objs = devm_kzalloc(&fn->dev,
+ sizeof(struct rmi_2d_sensor_abs_object)
+ * sensor->nbr_fingers, GFP_KERNEL);
+ if (!sensor->tracking_pos || !sensor->tracking_slots || !sensor->objs)
+ return -ENOMEM;
+
+ ctrl = &f11->dev_controls;
+ if (sensor->axis_align.delta_x_threshold)
+ ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] =
+ sensor->axis_align.delta_x_threshold;
+
+ if (sensor->axis_align.delta_y_threshold)
+ ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
+ sensor->axis_align.delta_y_threshold;
+
+ if (f11->sens_query.has_dribble)
+ ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+
+ if (f11->sens_query.has_palm_det)
+ ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+ &f11->dev_controls, fn->fd.query_base_addr);
+ if (rc)
+ dev_warn(&fn->dev, "Failed to write control registers\n");
+
+ mutex_init(&f11->dev_controls_mutex);
+
+ dev_set_drvdata(&fn->dev, f11);
+
+ return 0;
+}
+
+static int rmi_f11_config(struct rmi_function *fn)
+{
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+ struct rmi_2d_sensor *sensor = &f11->sensor;
+ int rc;
+
+ if (!sensor->report_abs)
+ drv->clear_irq_bits(fn->rmi_dev, f11->abs_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f11->abs_mask);
+
+ if (!sensor->report_rel)
+ drv->clear_irq_bits(fn->rmi_dev, f11->rel_mask);
+ else
+ drv->set_irq_bits(fn->rmi_dev, f11->rel_mask);
+
+ rc = f11_write_control_regs(fn, &f11->sens_query,
+ &f11->dev_controls, fn->fd.query_base_addr);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ u16 data_base_addr = fn->fd.data_base_addr;
+ u16 data_base_addr_offset = 0;
+ int error;
+
+ if (rmi_dev->xport->attn_data) {
+ memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
+ f11->sensor.attn_size);
+ rmi_dev->xport->attn_data += f11->sensor.attn_size;
+ rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+ } else {
+ error = rmi_read_block(rmi_dev,
+ data_base_addr + data_base_addr_offset,
+ f11->sensor.data_pkt,
+ f11->sensor.pkt_size);
+ if (error < 0)
+ return error;
+ }
+
+ rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
+ drvdata->num_of_irq_regs);
+ data_base_addr_offset += f11->sensor.pkt_size;
+
+ return 0;
+}
+
+static int rmi_f11_resume(struct rmi_function *fn)
+{
+ struct f11_data *f11 = dev_get_drvdata(&fn->dev);
+ int error;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Resuming...\n");
+ if (!f11->rezero_wait_ms)
+ return 0;
+
+ mdelay(f11->rezero_wait_ms);
+
+ error = rmi_write(fn->rmi_dev, fn->fd.command_base_addr,
+ RMI_F11_REZERO);
+ if (error) {
+ dev_err(&fn->dev,
+ "%s: failed to issue rezero command, error = %d.",
+ __func__, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f11_probe(struct rmi_function *fn)
+{
+ int error;
+ struct f11_data *f11;
+
+ error = rmi_f11_initialize(fn);
+ if (error)
+ return error;
+
+ f11 = dev_get_drvdata(&fn->dev);
+ error = rmi_2d_sensor_configure_input(fn, &f11->sensor);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f11_handler = {
+ .driver = {
+ .name = "rmi4_f11",
+ },
+ .func = 0x11,
+ .probe = rmi_f11_probe,
+ .config = rmi_f11_config,
+ .attention = rmi_f11_attention,
+ .resume = rmi_f11_resume,
+};
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
new file mode 100644
index 000000000000..8dd3fb5e1f94
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2012-2016 Synaptics Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/rmi.h>
+#include "rmi_driver.h"
+#include "rmi_2d_sensor.h"
+
+enum rmi_f12_object_type {
+ RMI_F12_OBJECT_NONE = 0x00,
+ RMI_F12_OBJECT_FINGER = 0x01,
+ RMI_F12_OBJECT_STYLUS = 0x02,
+ RMI_F12_OBJECT_PALM = 0x03,
+ RMI_F12_OBJECT_UNCLASSIFIED = 0x04,
+ RMI_F12_OBJECT_GLOVED_FINGER = 0x06,
+ RMI_F12_OBJECT_NARROW_OBJECT = 0x07,
+ RMI_F12_OBJECT_HAND_EDGE = 0x08,
+ RMI_F12_OBJECT_COVER = 0x0A,
+ RMI_F12_OBJECT_STYLUS_2 = 0x0B,
+ RMI_F12_OBJECT_ERASER = 0x0C,
+ RMI_F12_OBJECT_SMALL_OBJECT = 0x0D,
+};
+
+struct f12_data {
+ struct rmi_function *fn;
+ struct rmi_2d_sensor sensor;
+ struct rmi_2d_sensor_platform_data sensor_pdata;
+
+ u16 data_addr;
+
+ struct rmi_register_descriptor query_reg_desc;
+ struct rmi_register_descriptor control_reg_desc;
+ struct rmi_register_descriptor data_reg_desc;
+
+ /* F12 Data1 describes sensed objects */
+ const struct rmi_register_desc_item *data1;
+ u16 data1_offset;
+
+ /* F12 Data5 describes finger ACM */
+ const struct rmi_register_desc_item *data5;
+ u16 data5_offset;
+
+ /* F12 Data5 describes Pen */
+ const struct rmi_register_desc_item *data6;
+ u16 data6_offset;
+
+
+ /* F12 Data9 reports relative data */
+ const struct rmi_register_desc_item *data9;
+ u16 data9_offset;
+
+ const struct rmi_register_desc_item *data15;
+ u16 data15_offset;
+};
+
+static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
+{
+ const struct rmi_register_desc_item *item;
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+ struct rmi_function *fn = sensor->fn;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int ret;
+ int offset;
+ u8 buf[14];
+ int pitch_x = 0;
+ int pitch_y = 0;
+ int clip_x_low = 0;
+ int clip_x_high = 0;
+ int clip_y_low = 0;
+ int clip_y_high = 0;
+ int rx_receivers = 0;
+ int tx_receivers = 0;
+ int sensor_flags = 0;
+
+ item = rmi_get_register_desc_item(&f12->control_reg_desc, 8);
+ if (!item) {
+ dev_err(&fn->dev,
+ "F12 does not have the sensor tuning control register\n");
+ return -ENODEV;
+ }
+
+ offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
+
+ if (item->reg_size > 14) {
+ dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
+ item->reg_size);
+ return -ENODEV;
+ }
+
+ ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr + offset, buf,
+ item->reg_size);
+ if (ret)
+ return ret;
+
+ offset = 0;
+ if (rmi_register_desc_has_subpacket(item, 0)) {
+ sensor->max_x = (buf[offset + 1] << 8) | buf[offset];
+ sensor->max_y = (buf[offset + 3] << 8) | buf[offset + 2];
+ offset += 4;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: max_x: %d max_y: %d\n", __func__,
+ sensor->max_x, sensor->max_y);
+
+ if (rmi_register_desc_has_subpacket(item, 1)) {
+ pitch_x = (buf[offset + 1] << 8) | buf[offset];
+ pitch_y = (buf[offset + 3] << 8) | buf[offset + 2];
+ offset += 4;
+ }
+
+ if (rmi_register_desc_has_subpacket(item, 2)) {
+ sensor->axis_align.clip_x_low = buf[offset];
+ sensor->axis_align.clip_x_high = sensor->max_x
+ - buf[offset + 1];
+ sensor->axis_align.clip_y_low = buf[offset + 2];
+ sensor->axis_align.clip_y_high = sensor->max_y
+ - buf[offset + 3];
+ offset += 4;
+ }
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
+ __func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+
+ if (rmi_register_desc_has_subpacket(item, 3)) {
+ rx_receivers = buf[offset];
+ tx_receivers = buf[offset + 1];
+ offset += 2;
+ }
+
+ if (rmi_register_desc_has_subpacket(item, 4)) {
+ sensor_flags = buf[offset];
+ offset += 1;
+ }
+
+ sensor->x_mm = (pitch_x * rx_receivers) >> 12;
+ sensor->y_mm = (pitch_y * tx_receivers) >> 12;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x_mm: %d y_mm: %d\n", __func__,
+ sensor->x_mm, sensor->y_mm);
+
+ return 0;
+}
+
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+{
+ int i;
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+
+ for (i = 0; i < f12->data1->num_subpackets; i++) {
+ struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
+
+ obj->type = RMI_2D_OBJECT_NONE;
+ obj->mt_tool = MT_TOOL_FINGER;
+
+ switch (data1[0]) {
+ case RMI_F12_OBJECT_FINGER:
+ obj->type = RMI_2D_OBJECT_FINGER;
+ break;
+ case RMI_F12_OBJECT_STYLUS:
+ obj->type = RMI_2D_OBJECT_STYLUS;
+ obj->mt_tool = MT_TOOL_PEN;
+ break;
+ case RMI_F12_OBJECT_PALM:
+ obj->type = RMI_2D_OBJECT_PALM;
+ obj->mt_tool = MT_TOOL_PALM;
+ break;
+ case RMI_F12_OBJECT_UNCLASSIFIED:
+ obj->type = RMI_2D_OBJECT_UNCLASSIFIED;
+ break;
+ }
+
+ obj->x = (data1[2] << 8) | data1[1];
+ obj->y = (data1[4] << 8) | data1[3];
+ obj->z = data1[5];
+ obj->wx = data1[6];
+ obj->wy = data1[7];
+
+ rmi_2d_sensor_abs_process(sensor, obj, i);
+
+ data1 += 8;
+ }
+
+ if (sensor->kernel_tracking)
+ input_mt_assign_slots(sensor->input,
+ sensor->tracking_slots,
+ sensor->tracking_pos,
+ sensor->nbr_fingers,
+ sensor->dmax);
+
+ for (i = 0; i < sensor->nbr_fingers; i++)
+ rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
+}
+
+static int rmi_f12_attention(struct rmi_function *fn,
+ unsigned long *irq_nr_regs)
+{
+ int retval;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+ struct rmi_2d_sensor *sensor = &f12->sensor;
+
+ if (rmi_dev->xport->attn_data) {
+ memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
+ sensor->attn_size);
+ rmi_dev->xport->attn_data += sensor->attn_size;
+ rmi_dev->xport->attn_size -= sensor->attn_size;
+ } else {
+ retval = rmi_read_block(rmi_dev, f12->data_addr,
+ sensor->data_pkt, sensor->pkt_size);
+ if (retval < 0) {
+ dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
+ retval);
+ return retval;
+ }
+ }
+
+ if (f12->data1)
+ rmi_f12_process_objects(f12,
+ &sensor->data_pkt[f12->data1_offset]);
+
+ input_mt_sync_frame(sensor->input);
+
+ return 0;
+}
+
+static int rmi_f12_config(struct rmi_function *fn)
+{
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+ return 0;
+}
+
+static int rmi_f12_probe(struct rmi_function *fn)
+{
+ struct f12_data *f12;
+ int ret;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ char buf;
+ u16 query_addr = fn->fd.query_base_addr;
+ const struct rmi_register_desc_item *item;
+ struct rmi_2d_sensor *sensor;
+ struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+ struct rmi_transport_dev *xport = rmi_dev->xport;
+ u16 data_offset = 0;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
+
+ ret = rmi_read(fn->rmi_dev, query_addr, &buf);
+ if (ret < 0) {
+ dev_err(&fn->dev, "Failed to read general info register: %d\n",
+ ret);
+ return -ENODEV;
+ }
+ ++query_addr;
+
+ if (!(buf & 0x1)) {
+ dev_err(&fn->dev,
+ "Behavior of F12 without register descriptors is undefined.\n");
+ return -ENODEV;
+ }
+
+ f12 = devm_kzalloc(&fn->dev, sizeof(struct f12_data), GFP_KERNEL);
+ if (!f12)
+ return -ENOMEM;
+
+ if (fn->dev.of_node) {
+ ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
+ if (ret)
+ return ret;
+ } else if (pdata->sensor_pdata) {
+ f12->sensor_pdata = *pdata->sensor_pdata;
+ }
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->query_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Query Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->control_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Control Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ ret = rmi_read_register_desc(rmi_dev, query_addr,
+ &f12->data_reg_desc);
+ if (ret) {
+ dev_err(&fn->dev,
+ "Failed to read the Data Register Descriptor: %d\n",
+ ret);
+ return ret;
+ }
+ query_addr += 3;
+
+ sensor = &f12->sensor;
+ sensor->fn = fn;
+ f12->data_addr = fn->fd.data_base_addr;
+ sensor->pkt_size = rmi_register_desc_calc_size(&f12->data_reg_desc);
+
+ sensor->axis_align =
+ f12->sensor_pdata.axis_align;
+
+ sensor->x_mm = f12->sensor_pdata.x_mm;
+ sensor->y_mm = f12->sensor_pdata.y_mm;
+
+ if (sensor->sensor_type == rmi_sensor_default)
+ sensor->sensor_type =
+ f12->sensor_pdata.sensor_type;
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: data packet size: %d\n", __func__,
+ sensor->pkt_size);
+ sensor->data_pkt = devm_kzalloc(&fn->dev, sensor->pkt_size, GFP_KERNEL);
+ if (!sensor->data_pkt)
+ return -ENOMEM;
+
+ dev_set_drvdata(&fn->dev, f12);
+
+ ret = rmi_f12_read_sensor_tuning(f12);
+ if (ret)
+ return ret;
+
+ /*
+ * Figure out what data is contained in the data registers. HID devices
+ * may have registers defined, but their data is not reported in the
+ * HID attention report. Registers which are not reported in the HID
+ * attention report check to see if the device is receiving data from
+ * HID attention reports.
+ */
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
+ if (item) {
+ f12->data1 = item;
+ f12->data1_offset = data_offset;
+ data_offset += item->reg_size;
+ sensor->nbr_fingers = item->num_subpackets;
+ sensor->report_abs = 1;
+ sensor->attn_size += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
+ if (item) {
+ f12->data5 = item;
+ f12->data5_offset = data_offset;
+ data_offset += item->reg_size;
+ sensor->attn_size += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
+ if (item && !xport->attn_data) {
+ f12->data6 = item;
+ f12->data6_offset = data_offset;
+ data_offset += item->reg_size;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
+ if (item && !xport->attn_data) {
+ f12->data9 = item;
+ f12->data9_offset = data_offset;
+ data_offset += item->reg_size;
+ if (!sensor->report_abs)
+ sensor->report_rel = 1;
+ }
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
+ if (item && !xport->attn_data)
+ data_offset += item->reg_size;
+
+ item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
+ if (item && !xport->attn_data) {
+ f12->data15 = item;
+ f12->data15_offset = data_offset;
+ data_offset += item->reg_size;
+ }
+
+ /* allocate the in-kernel tracking buffers */
+ sensor->tracking_pos = devm_kzalloc(&fn->dev,
+ sizeof(struct input_mt_pos) * sensor->nbr_fingers,
+ GFP_KERNEL);
+ sensor->tracking_slots = devm_kzalloc(&fn->dev,
+ sizeof(int) * sensor->nbr_fingers, GFP_KERNEL);
+ sensor->objs = devm_kzalloc(&fn->dev,
+ sizeof(struct rmi_2d_sensor_abs_object)
+ * sensor->nbr_fingers, GFP_KERNEL);
+ if (!sensor->tracking_pos || !sensor->tracking_slots || !sensor->objs)
+ return -ENOMEM;
+
+ ret = rmi_2d_sensor_configure_input(fn, sensor);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct rmi_function_handler rmi_f12_handler = {
+ .driver = {
+ .name = "rmi4_f12",
+ },
+ .func = 0x12,
+ .probe = rmi_f12_probe,
+ .config = rmi_f12_config,
+ .attention = rmi_f12_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
new file mode 100644
index 000000000000..760aff1bc420
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012-2016 Synaptics Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define RMI_F30_QUERY_SIZE 2
+
+/* Defs for Query 0 */
+#define RMI_F30_EXTENDED_PATTERNS 0x01
+#define RMI_F30_HAS_MAPPABLE_BUTTONS (1 << 1)
+#define RMI_F30_HAS_LED (1 << 2)
+#define RMI_F30_HAS_GPIO (1 << 3)
+#define RMI_F30_HAS_HAPTIC (1 << 4)
+#define RMI_F30_HAS_GPIO_DRV_CTL (1 << 5)
+#define RMI_F30_HAS_MECH_MOUSE_BTNS (1 << 6)
+
+/* Defs for Query 1 */
+#define RMI_F30_GPIO_LED_COUNT 0x1F
+
+/* Defs for Control Registers */
+#define RMI_F30_CTRL_1_GPIO_DEBOUNCE 0x01
+#define RMI_F30_CTRL_1_HALT (1 << 4)
+#define RMI_F30_CTRL_1_HALTED (1 << 5)
+#define RMI_F30_CTRL_10_NUM_MECH_MOUSE_BTNS 0x03
+
+struct rmi_f30_ctrl_data {
+ int address;
+ int length;
+ u8 *regs;
+};
+
+#define RMI_F30_CTRL_MAX_REGS 32
+#define RMI_F30_CTRL_MAX_BYTES ((RMI_F30_CTRL_MAX_REGS + 7) >> 3)
+#define RMI_F30_CTRL_MAX_REG_BLOCKS 11
+
+#define RMI_F30_CTRL_REGS_MAX_SIZE (RMI_F30_CTRL_MAX_BYTES \
+ + 1 \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + 6 \
+ + RMI_F30_CTRL_MAX_REGS \
+ + RMI_F30_CTRL_MAX_REGS \
+ + RMI_F30_CTRL_MAX_BYTES \
+ + 1 \
+ + 1)
+
+struct f30_data {
+ /* Query Data */
+ bool has_extended_pattern;
+ bool has_mappable_buttons;
+ bool has_led;
+ bool has_gpio;
+ bool has_haptic;
+ bool has_gpio_driver_control;
+ bool has_mech_mouse_btns;
+ u8 gpioled_count;
+
+ u8 register_count;
+
+ /* Control Register Data */
+ struct rmi_f30_ctrl_data ctrl[RMI_F30_CTRL_MAX_REG_BLOCKS];
+ u8 ctrl_regs[RMI_F30_CTRL_REGS_MAX_SIZE];
+ u32 ctrl_regs_size;
+
+ u8 data_regs[RMI_F30_CTRL_MAX_BYTES];
+ u16 *gpioled_key_map;
+
+ struct input_dev *input;
+};
+
+static int rmi_f30_read_control_parameters(struct rmi_function *fn,
+ struct f30_data *f30)
+{
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int error = 0;
+
+ error = rmi_read_block(rmi_dev, fn->fd.control_base_addr,
+ f30->ctrl_regs, f30->ctrl_regs_size);
+ if (error) {
+ dev_err(&rmi_dev->dev, "%s : Could not read control registers at 0x%x error (%d)\n",
+ __func__, fn->fd.control_base_addr, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ int retval;
+ int gpiled = 0;
+ int value = 0;
+ int i;
+ int reg_num;
+
+ if (!f30->input)
+ return 0;
+
+ /* Read the gpi led data. */
+ if (rmi_dev->xport->attn_data) {
+ memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+ f30->register_count);
+ rmi_dev->xport->attn_data += f30->register_count;
+ rmi_dev->xport->attn_size -= f30->register_count;
+ } else {
+ retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
+ f30->data_regs, f30->register_count);
+
+ if (retval) {
+ dev_err(&fn->dev, "%s: Failed to read F30 data registers.\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ for (reg_num = 0; reg_num < f30->register_count; ++reg_num) {
+ for (i = 0; gpiled < f30->gpioled_count && i < 8; ++i,
+ ++gpiled) {
+ if (f30->gpioled_key_map[gpiled] != 0) {
+ /* buttons have pull up resistors */
+ value = (((f30->data_regs[reg_num] >> i) & 0x01)
+ == 0);
+
+ rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+ "%s: call input report key (0x%04x) value (0x%02x)",
+ __func__,
+ f30->gpioled_key_map[gpiled], value);
+ input_report_key(f30->input,
+ f30->gpioled_key_map[gpiled],
+ value);
+ }
+
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f30_register_device(struct rmi_function *fn)
+{
+ int i;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct input_dev *input_dev;
+ int button_count = 0;
+
+ input_dev = drv_data->input;
+ if (!input_dev) {
+ dev_info(&fn->dev, "F30: no input device found, ignoring.\n");
+ return -EINVAL;
+ }
+
+ f30->input = input_dev;
+
+ set_bit(EV_KEY, input_dev->evbit);
+
+ input_dev->keycode = f30->gpioled_key_map;
+ input_dev->keycodesize = sizeof(u16);
+ input_dev->keycodemax = f30->gpioled_count;
+
+ for (i = 0; i < f30->gpioled_count; i++) {
+ if (f30->gpioled_key_map[i] != 0) {
+ input_set_capability(input_dev, EV_KEY,
+ f30->gpioled_key_map[i]);
+ button_count++;
+ }
+ }
+
+ if (button_count == 1)
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+ return 0;
+}
+
+static int rmi_f30_config(struct rmi_function *fn)
+{
+ struct f30_data *f30 = dev_get_drvdata(&fn->dev);
+ struct rmi_driver *drv = fn->rmi_dev->driver;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+ int error;
+
+ if (pdata->f30_data && pdata->f30_data->disable) {
+ drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
+ } else {
+ /* Write Control Register values back to device */
+ error = rmi_write_block(fn->rmi_dev, fn->fd.control_base_addr,
+ f30->ctrl_regs, f30->ctrl_regs_size);
+ if (error) {
+ dev_err(&fn->rmi_dev->dev,
+ "%s : Could not write control registers at 0x%x error (%d)\n",
+ __func__, fn->fd.control_base_addr, error);
+ return error;
+ }
+
+ drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+ }
+ return 0;
+}
+
+static inline void rmi_f30_set_ctrl_data(struct rmi_f30_ctrl_data *ctrl,
+ int *ctrl_addr, int len, u8 **reg)
+{
+ ctrl->address = *ctrl_addr;
+ ctrl->length = len;
+ ctrl->regs = *reg;
+ *ctrl_addr += len;
+ *reg += len;
+}
+
+static inline bool rmi_f30_is_valid_button(int button,
+ struct rmi_f30_ctrl_data *ctrl)
+{
+ int byte_position = button >> 3;
+ int bit_position = button & 0x07;
+
+ /*
+ * ctrl2 -> dir == 0 -> input mode
+ * ctrl3 -> data == 1 -> actual button
+ */
+ return !(ctrl[2].regs[byte_position] & BIT(bit_position)) &&
+ (ctrl[3].regs[byte_position] & BIT(bit_position));
+}
+
+static inline int rmi_f30_initialize(struct rmi_function *fn)
+{
+ struct f30_data *f30;
+ struct rmi_device *rmi_dev = fn->rmi_dev;
+ const struct rmi_device_platform_data *pdata;
+ int retval = 0;
+ int control_address;
+ int i;
+ int button;
+ u8 buf[RMI_F30_QUERY_SIZE];
+ u8 *ctrl_reg;
+ u8 *map_memory;
+
+ f30 = devm_kzalloc(&fn->dev, sizeof(struct f30_data),
+ GFP_KERNEL);
+ if (!f30)
+ return -ENOMEM;
+
+ dev_set_drvdata(&fn->dev, f30);
+
+ retval = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr, buf,
+ RMI_F30_QUERY_SIZE);
+
+ if (retval) {
+ dev_err(&fn->dev, "Failed to read query register.\n");
+ return retval;
+ }
+
+ f30->has_extended_pattern = buf[0] & RMI_F30_EXTENDED_PATTERNS;
+ f30->has_mappable_buttons = buf[0] & RMI_F30_HAS_MAPPABLE_BUTTONS;
+ f30->has_led = buf[0] & RMI_F30_HAS_LED;
+ f30->has_gpio = buf[0] & RMI_F30_HAS_GPIO;
+ f30->has_haptic = buf[0] & RMI_F30_HAS_HAPTIC;
+ f30->has_gpio_driver_control = buf[0] & RMI_F30_HAS_GPIO_DRV_CTL;
+ f30->has_mech_mouse_btns = buf[0] & RMI_F30_HAS_MECH_MOUSE_BTNS;
+ f30->gpioled_count = buf[1] & RMI_F30_GPIO_LED_COUNT;
+
+ f30->register_count = (f30->gpioled_count + 7) >> 3;
+
+ control_address = fn->fd.control_base_addr;
+ ctrl_reg = f30->ctrl_regs;
+
+ if (f30->has_gpio && f30->has_led)
+ rmi_f30_set_ctrl_data(&f30->ctrl[0], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[1], &control_address, sizeof(u8),
+ &ctrl_reg);
+
+ if (f30->has_gpio) {
+ rmi_f30_set_ctrl_data(&f30->ctrl[2], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[3], &control_address,
+ f30->register_count, &ctrl_reg);
+ }
+
+ if (f30->has_led) {
+ int ctrl5_len;
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[4], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ if (f30->has_extended_pattern)
+ ctrl5_len = 6;
+ else
+ ctrl5_len = 2;
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[5], &control_address,
+ ctrl5_len, &ctrl_reg);
+ }
+
+ if (f30->has_led || f30->has_gpio_driver_control) {
+ /* control 6 uses a byte per gpio/led */
+ rmi_f30_set_ctrl_data(&f30->ctrl[6], &control_address,
+ f30->gpioled_count, &ctrl_reg);
+ }
+
+ if (f30->has_mappable_buttons) {
+ /* control 7 uses a byte per gpio/led */
+ rmi_f30_set_ctrl_data(&f30->ctrl[7], &control_address,
+ f30->gpioled_count, &ctrl_reg);
+ }
+
+ if (f30->has_haptic) {
+ rmi_f30_set_ctrl_data(&f30->ctrl[8], &control_address,
+ f30->register_count, &ctrl_reg);
+
+ rmi_f30_set_ctrl_data(&f30->ctrl[9], &control_address,
+ sizeof(u8), &ctrl_reg);
+ }
+
+ if (f30->has_mech_mouse_btns)
+ rmi_f30_set_ctrl_data(&f30->ctrl[10], &control_address,
+ sizeof(u8), &ctrl_reg);
+
+ f30->ctrl_regs_size = ctrl_reg - f30->ctrl_regs
+ ?: RMI_F30_CTRL_REGS_MAX_SIZE;
+
+ retval = rmi_f30_read_control_parameters(fn, f30);
+ if (retval < 0) {
+ dev_err(&fn->dev,
+ "Failed to initialize F19 control params.\n");
+ return retval;
+ }
+
+ map_memory = devm_kzalloc(&fn->dev,
+ (f30->gpioled_count * (sizeof(u16))),
+ GFP_KERNEL);
+ if (!map_memory) {
+ dev_err(&fn->dev, "Failed to allocate gpioled map memory.\n");
+ return -ENOMEM;
+ }
+
+ f30->gpioled_key_map = (u16 *)map_memory;
+
+ pdata = rmi_get_platform_data(rmi_dev);
+ if (pdata && f30->has_gpio) {
+ button = BTN_LEFT;
+ for (i = 0; i < f30->gpioled_count; i++) {
+ if (rmi_f30_is_valid_button(i, f30->ctrl)) {
+ f30->gpioled_key_map[i] = button++;
+
+ /*
+ * buttonpad might be given by
+ * f30->has_mech_mouse_btns, but I am
+ * not sure, so use only the pdata info
+ */
+ if (pdata->f30_data &&
+ pdata->f30_data->buttonpad)
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int rmi_f30_probe(struct rmi_function *fn)
+{
+ int rc;
+ const struct rmi_device_platform_data *pdata =
+ rmi_get_platform_data(fn->rmi_dev);
+
+ if (pdata->f30_data && pdata->f30_data->disable)
+ return 0;
+
+ rc = rmi_f30_initialize(fn);
+ if (rc < 0)
+ goto error_exit;
+
+ rc = rmi_f30_register_device(fn);
+ if (rc < 0)
+ goto error_exit;
+
+ return 0;
+
+error_exit:
+ return rc;
+
+}
+
+struct rmi_function_handler rmi_f30_handler = {
+ .driver = {
+ .name = "rmi4_f30",
+ },
+ .func = 0x30,
+ .probe = rmi_f30_probe,
+ .config = rmi_f30_config,
+ .attention = rmi_f30_attention,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
new file mode 100644
index 000000000000..a96a326b53bd
--- /dev/null
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/rmi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define BUFFER_SIZE_INCREMENT 32
+
+/**
+ * struct rmi_i2c_xport - stores information for i2c communication
+ *
+ * @xport: The transport interface structure
+ *
+ * @page_mutex: Locks current page to avoid changing pages in unexpected ways.
+ * @page: Keeps track of the current virtual page
+ *
+ * @tx_buf: Buffer used for transmitting data to the sensor over i2c.
+ * @tx_buf_size: Size of the buffer
+ */
+struct rmi_i2c_xport {
+ struct rmi_transport_dev xport;
+ struct i2c_client *client;
+
+ struct mutex page_mutex;
+ int page;
+
+ int irq;
+
+ u8 *tx_buf;
+ size_t tx_buf_size;
+};
+
+#define RMI_PAGE_SELECT_REGISTER 0xff
+#define RMI_I2C_PAGE(addr) (((addr) >> 8) & 0xff)
+
+/*
+ * rmi_set_page - Set RMI page
+ * @xport: The pointer to the rmi_transport_dev struct
+ * @page: The new page address.
+ *
+ * RMI devices have 16-bit addressing, but some of the transport
+ * implementations (like SMBus) only have 8-bit addressing. So RMI implements
+ * a page address at 0xff of every page so we can reliable page addresses
+ * every 256 registers.
+ *
+ * The page_mutex lock must be held when this function is entered.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int rmi_set_page(struct rmi_i2c_xport *rmi_i2c, u8 page)
+{
+ struct i2c_client *client = rmi_i2c->client;
+ u8 txbuf[2] = {RMI_PAGE_SELECT_REGISTER, page};
+ int retval;
+
+ retval = i2c_master_send(client, txbuf, sizeof(txbuf));
+ if (retval != sizeof(txbuf)) {
+ dev_err(&client->dev,
+ "%s: set page failed: %d.", __func__, retval);
+ return (retval < 0) ? retval : -EIO;
+ }
+
+ rmi_i2c->page = page;
+ return 0;
+}
+
+static int rmi_i2c_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
+{
+ struct rmi_i2c_xport *rmi_i2c =
+ container_of(xport, struct rmi_i2c_xport, xport);
+ struct i2c_client *client = rmi_i2c->client;
+ size_t tx_size = len + 1;
+ int retval;
+
+ mutex_lock(&rmi_i2c->page_mutex);
+
+ if (!rmi_i2c->tx_buf || rmi_i2c->tx_buf_size < tx_size) {
+ if (rmi_i2c->tx_buf)
+ devm_kfree(&client->dev, rmi_i2c->tx_buf);
+ rmi_i2c->tx_buf_size = tx_size + BUFFER_SIZE_INCREMENT;
+ rmi_i2c->tx_buf = devm_kzalloc(&client->dev,
+ rmi_i2c->tx_buf_size,
+ GFP_KERNEL);
+ if (!rmi_i2c->tx_buf) {
+ rmi_i2c->tx_buf_size = 0;
+ retval = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ rmi_i2c->tx_buf[0] = addr & 0xff;
+ memcpy(rmi_i2c->tx_buf + 1, buf, len);
+
+ if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
+ retval = rmi_set_page(rmi_i2c, RMI_I2C_PAGE(addr));
+ if (retval)
+ goto exit;
+ }
+
+ retval = i2c_master_send(client, rmi_i2c->tx_buf, tx_size);
+ if (retval == tx_size)
+ retval = 0;
+ else if (retval >= 0)
+ retval = -EIO;
+
+exit:
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "write %zd bytes at %#06x: %d (%*ph)\n",
+ len, addr, retval, (int)len, buf);
+
+ mutex_unlock(&rmi_i2c->page_mutex);
+ return retval;
+}
+
+static int rmi_i2c_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
+{
+ struct rmi_i2c_xport *rmi_i2c =
+ container_of(xport, struct rmi_i2c_xport, xport);
+ struct i2c_client *client = rmi_i2c->client;
+ u8 addr_offset = addr & 0xff;
+ int retval;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .len = sizeof(addr_offset),
+ .buf = &addr_offset,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ },
+ };
+
+ mutex_lock(&rmi_i2c->page_mutex);
+
+ if (RMI_I2C_PAGE(addr) != rmi_i2c->page) {
+ retval = rmi_set_page(rmi_i2c, RMI_I2C_PAGE(addr));
+ if (retval)
+ goto exit;
+ }
+
+ retval = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (retval == ARRAY_SIZE(msgs))
+ retval = 0; /* success */
+ else if (retval >= 0)
+ retval = -EIO;
+
+exit:
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+ "read %zd bytes at %#06x: %d (%*ph)\n",
+ len, addr, retval, (int)len, buf);
+
+ mutex_unlock(&rmi_i2c->page_mutex);
+ return retval;
+}
+
+static const struct rmi_transport_ops rmi_i2c_ops = {
+ .write_block = rmi_i2c_write_block,
+ .read_block = rmi_i2c_read_block,
+};
+
+static irqreturn_t rmi_i2c_irq(int irq, void *dev_id)
+{
+ struct rmi_i2c_xport *rmi_i2c = dev_id;
+ struct rmi_device *rmi_dev = rmi_i2c->xport.rmi_dev;
+ int ret;
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_i2c_init_irq(struct i2c_client *client)
+{
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_i2c->irq));
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&client->dev, rmi_i2c->irq, NULL,
+ rmi_i2c_irq, irq_flags | IRQF_ONESHOT, client->name,
+ rmi_i2c);
+ if (ret < 0) {
+ dev_warn(&client->dev, "Failed to register interrupt %d\n",
+ rmi_i2c->irq);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rmi_i2c_of_match[] = {
+ { .compatible = "syna,rmi4-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
+#endif
+
+static int rmi_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device_platform_data *client_pdata =
+ dev_get_platdata(&client->dev);
+ struct rmi_i2c_xport *rmi_i2c;
+ int retval;
+
+ rmi_i2c = devm_kzalloc(&client->dev, sizeof(struct rmi_i2c_xport),
+ GFP_KERNEL);
+ if (!rmi_i2c)
+ return -ENOMEM;
+
+ pdata = &rmi_i2c->xport.pdata;
+
+ if (!client->dev.of_node && client_pdata)
+ *pdata = *client_pdata;
+
+ if (client->irq > 0)
+ rmi_i2c->irq = client->irq;
+
+ rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
+ dev_name(&client->dev));
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev,
+ "adapter does not support required functionality.\n");
+ return -ENODEV;
+ }
+
+ rmi_i2c->client = client;
+ mutex_init(&rmi_i2c->page_mutex);
+
+ rmi_i2c->xport.dev = &client->dev;
+ rmi_i2c->xport.proto_name = "i2c";
+ rmi_i2c->xport.ops = &rmi_i2c_ops;
+
+ i2c_set_clientdata(client, rmi_i2c);
+
+ /*
+ * Setting the page to zero will (a) make sure the PSR is in a
+ * known state, and (b) make sure we can talk to the device.
+ */
+ retval = rmi_set_page(rmi_i2c, 0);
+ if (retval) {
+ dev_err(&client->dev, "Failed to set page select to 0.\n");
+ return retval;
+ }
+
+ retval = rmi_register_transport_device(&rmi_i2c->xport);
+ if (retval) {
+ dev_err(&client->dev, "Failed to register transport driver at 0x%.2X.\n",
+ client->addr);
+ return retval;
+ }
+
+ retval = rmi_i2c_init_irq(client);
+ if (retval < 0)
+ return retval;
+
+ dev_info(&client->dev, "registered rmi i2c driver at %#04x.\n",
+ client->addr);
+ return 0;
+}
+
+static int rmi_i2c_remove(struct i2c_client *client)
+{
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+
+ rmi_unregister_transport_device(&rmi_i2c->xport);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rmi_i2c_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_i2c->irq);
+ if (device_may_wakeup(&client->dev)) {
+ ret = enable_irq_wake(rmi_i2c->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq for wake: %d\n",
+ ret);
+ }
+ return ret;
+}
+
+static int rmi_i2c_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ enable_irq(rmi_i2c->irq);
+ if (device_may_wakeup(&client->dev)) {
+ ret = disable_irq_wake(rmi_i2c->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to disable irq for wake: %d\n",
+ ret);
+ }
+
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rmi_i2c_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_i2c->irq);
+
+ return 0;
+}
+
+static int rmi_i2c_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
+ int ret;
+
+ enable_irq(rmi_i2c->irq);
+
+ ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rmi_i2c_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_i2c_suspend, rmi_i2c_resume)
+ SET_RUNTIME_PM_OPS(rmi_i2c_runtime_suspend, rmi_i2c_runtime_resume,
+ NULL)
+};
+
+static const struct i2c_device_id rmi_id[] = {
+ { "rmi4_i2c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rmi_id);
+
+static struct i2c_driver rmi_i2c_driver = {
+ .driver = {
+ .name = "rmi4_i2c",
+ .pm = &rmi_i2c_pm,
+ .of_match_table = of_match_ptr(rmi_i2c_of_match),
+ },
+ .id_table = rmi_id,
+ .probe = rmi_i2c_probe,
+ .remove = rmi_i2c_remove,
+};
+
+module_i2c_driver(rmi_i2c_driver);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_DESCRIPTION("RMI I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
new file mode 100644
index 000000000000..55bd1b34970c
--- /dev/null
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (c) 2011-2016 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include "rmi_driver.h"
+
+#define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
+
+#define RMI_PAGE_SELECT_REGISTER 0x00FF
+#define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
+#define RMI_SPI_XFER_SIZE_LIMIT 255
+
+#define BUFFER_SIZE_INCREMENT 32
+
+enum rmi_spi_op {
+ RMI_SPI_WRITE = 0,
+ RMI_SPI_READ,
+ RMI_SPI_V2_READ_UNIFIED,
+ RMI_SPI_V2_READ_SPLIT,
+ RMI_SPI_V2_WRITE,
+};
+
+struct rmi_spi_cmd {
+ enum rmi_spi_op op;
+ u16 addr;
+};
+
+struct rmi_spi_xport {
+ struct rmi_transport_dev xport;
+ struct spi_device *spi;
+
+ struct mutex page_mutex;
+ int page;
+
+ int irq;
+
+ u8 *rx_buf;
+ u8 *tx_buf;
+ int xfer_buf_size;
+
+ struct spi_transfer *rx_xfers;
+ struct spi_transfer *tx_xfers;
+ int rx_xfer_count;
+ int tx_xfer_count;
+};
+
+static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
+{
+ struct spi_device *spi = rmi_spi->spi;
+ int buf_size = rmi_spi->xfer_buf_size
+ ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
+ struct spi_transfer *xfer_buf;
+ void *buf;
+ void *tmp;
+
+ while (buf_size < len)
+ buf_size *= 2;
+
+ if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
+ buf_size = RMI_SPI_XFER_SIZE_LIMIT;
+
+ tmp = rmi_spi->rx_buf;
+ buf = devm_kzalloc(&spi->dev, buf_size * 2,
+ GFP_KERNEL | GFP_DMA);
+ if (!buf)
+ return -ENOMEM;
+
+ rmi_spi->rx_buf = buf;
+ rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
+ rmi_spi->xfer_buf_size = buf_size;
+
+ if (tmp)
+ devm_kfree(&spi->dev, tmp);
+
+ if (rmi_spi->xport.pdata.spi_data.read_delay_us)
+ rmi_spi->rx_xfer_count = buf_size;
+ else
+ rmi_spi->rx_xfer_count = 1;
+
+ if (rmi_spi->xport.pdata.spi_data.write_delay_us)
+ rmi_spi->tx_xfer_count = buf_size;
+ else
+ rmi_spi->tx_xfer_count = 1;
+
+ /*
+ * Allocate a pool of spi_transfer buffers for devices which need
+ * per byte delays.
+ */
+ tmp = rmi_spi->rx_xfers;
+ xfer_buf = devm_kzalloc(&spi->dev,
+ (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
+ * sizeof(struct spi_transfer), GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
+
+ rmi_spi->rx_xfers = xfer_buf;
+ rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
+
+ if (tmp)
+ devm_kfree(&spi->dev, tmp);
+
+ return 0;
+}
+
+static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
+ const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
+ int tx_len, u8 *rx_buf, int rx_len)
+{
+ struct spi_device *spi = rmi_spi->spi;
+ struct rmi_device_platform_data_spi *spi_data =
+ &rmi_spi->xport.pdata.spi_data;
+ struct spi_message msg;
+ struct spi_transfer *xfer;
+ int ret = 0;
+ int len;
+ int cmd_len = 0;
+ int total_tx_len;
+ int i;
+ u16 addr = cmd->addr;
+
+ spi_message_init(&msg);
+
+ switch (cmd->op) {
+ case RMI_SPI_WRITE:
+ case RMI_SPI_READ:
+ cmd_len += 2;
+ break;
+ case RMI_SPI_V2_READ_UNIFIED:
+ case RMI_SPI_V2_READ_SPLIT:
+ case RMI_SPI_V2_WRITE:
+ cmd_len += 4;
+ break;
+ }
+
+ total_tx_len = cmd_len + tx_len;
+ len = max(total_tx_len, rx_len);
+
+ if (len > RMI_SPI_XFER_SIZE_LIMIT)
+ return -EINVAL;
+
+ if (rmi_spi->xfer_buf_size < len)
+ rmi_spi_manage_pools(rmi_spi, len);
+
+ if (addr == 0)
+ /*
+ * SPI needs an address. Use 0x7FF if we want to keep
+ * reading from the last position of the register pointer.
+ */
+ addr = 0x7FF;
+
+ switch (cmd->op) {
+ case RMI_SPI_WRITE:
+ rmi_spi->tx_buf[0] = (addr >> 8);
+ rmi_spi->tx_buf[1] = addr & 0xFF;
+ break;
+ case RMI_SPI_READ:
+ rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
+ rmi_spi->tx_buf[1] = addr & 0xFF;
+ break;
+ case RMI_SPI_V2_READ_UNIFIED:
+ break;
+ case RMI_SPI_V2_READ_SPLIT:
+ break;
+ case RMI_SPI_V2_WRITE:
+ rmi_spi->tx_buf[0] = 0x40;
+ rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
+ rmi_spi->tx_buf[2] = addr & 0xFF;
+ rmi_spi->tx_buf[3] = tx_len;
+ break;
+ }
+
+ if (tx_buf)
+ memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
+
+ if (rmi_spi->tx_xfer_count > 1) {
+ for (i = 0; i < total_tx_len; i++) {
+ xfer = &rmi_spi->tx_xfers[i];
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->tx_buf = &rmi_spi->tx_buf[i];
+ xfer->len = 1;
+ xfer->delay_usecs = spi_data->write_delay_us;
+ spi_message_add_tail(xfer, &msg);
+ }
+ } else {
+ xfer = rmi_spi->tx_xfers;
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->tx_buf = rmi_spi->tx_buf;
+ xfer->len = total_tx_len;
+ spi_message_add_tail(xfer, &msg);
+ }
+
+ rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
+ __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
+ total_tx_len, total_tx_len, rmi_spi->tx_buf);
+
+ if (rx_buf) {
+ if (rmi_spi->rx_xfer_count > 1) {
+ for (i = 0; i < rx_len; i++) {
+ xfer = &rmi_spi->rx_xfers[i];
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->rx_buf = &rmi_spi->rx_buf[i];
+ xfer->len = 1;
+ xfer->delay_usecs = spi_data->read_delay_us;
+ spi_message_add_tail(xfer, &msg);
+ }
+ } else {
+ xfer = rmi_spi->rx_xfers;
+ memset(xfer, 0, sizeof(struct spi_transfer));
+ xfer->rx_buf = rmi_spi->rx_buf;
+ xfer->len = rx_len;
+ spi_message_add_tail(xfer, &msg);
+ }
+ }
+
+ ret = spi_sync(spi, &msg);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
+ return ret;
+ }
+
+ if (rx_buf) {
+ memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
+ rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
+ __func__, rx_len, rx_len, rx_buf);
+ }
+
+ return 0;
+}
+
+/*
+ * rmi_set_page - Set RMI page
+ * @xport: The pointer to the rmi_transport_dev struct
+ * @page: The new page address.
+ *
+ * RMI devices have 16-bit addressing, but some of the transport
+ * implementations (like SMBus) only have 8-bit addressing. So RMI implements
+ * a page address at 0xff of every page so we can reliable page addresses
+ * every 256 registers.
+ *
+ * The page_mutex lock must be held when this function is entered.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
+{
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ cmd.op = RMI_SPI_WRITE;
+ cmd.addr = RMI_PAGE_SELECT_REGISTER;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
+
+ if (ret)
+ rmi_spi->page = page;
+
+ return ret;
+}
+
+static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
+ const void *buf, size_t len)
+{
+ struct rmi_spi_xport *rmi_spi =
+ container_of(xport, struct rmi_spi_xport, xport);
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ mutex_lock(&rmi_spi->page_mutex);
+
+ if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
+ ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
+ if (ret)
+ goto exit;
+ }
+
+ cmd.op = RMI_SPI_WRITE;
+ cmd.addr = addr;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
+
+exit:
+ mutex_unlock(&rmi_spi->page_mutex);
+ return ret;
+}
+
+static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
+ void *buf, size_t len)
+{
+ struct rmi_spi_xport *rmi_spi =
+ container_of(xport, struct rmi_spi_xport, xport);
+ struct rmi_spi_cmd cmd;
+ int ret;
+
+ mutex_lock(&rmi_spi->page_mutex);
+
+ if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
+ ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
+ if (ret)
+ goto exit;
+ }
+
+ cmd.op = RMI_SPI_READ;
+ cmd.addr = addr;
+
+ ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
+
+exit:
+ mutex_unlock(&rmi_spi->page_mutex);
+ return ret;
+}
+
+static const struct rmi_transport_ops rmi_spi_ops = {
+ .write_block = rmi_spi_write_block,
+ .read_block = rmi_spi_read_block,
+};
+
+static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
+{
+ struct rmi_spi_xport *rmi_spi = dev_id;
+ struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
+ int ret;
+
+ ret = rmi_process_interrupt_requests(rmi_dev);
+ if (ret)
+ rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
+ "Failed to process interrupt request: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int rmi_spi_init_irq(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
+ int ret;
+
+ if (!irq_flags)
+ irq_flags = IRQF_TRIGGER_LOW;
+
+ ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
+ rmi_spi_irq, irq_flags | IRQF_ONESHOT,
+ dev_name(&spi->dev), rmi_spi);
+ if (ret < 0) {
+ dev_warn(&spi->dev, "Failed to register interrupt %d\n",
+ rmi_spi->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int rmi_spi_of_probe(struct spi_device *spi,
+ struct rmi_device_platform_data *pdata)
+{
+ struct device *dev = &spi->dev;
+ int retval;
+
+ retval = rmi_of_property_read_u32(dev,
+ &pdata->spi_data.read_delay_us,
+ "spi-rx-delay-us", 1);
+ if (retval)
+ return retval;
+
+ retval = rmi_of_property_read_u32(dev,
+ &pdata->spi_data.write_delay_us,
+ "spi-tx-delay-us", 1);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+static const struct of_device_id rmi_spi_of_match[] = {
+ { .compatible = "syna,rmi4-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
+#else
+static inline int rmi_spi_of_probe(struct spi_device *spi,
+ struct rmi_device_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
+static int rmi_spi_probe(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi;
+ struct rmi_device_platform_data *pdata;
+ struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
+ int retval;
+
+ if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
+ return -EINVAL;
+
+ rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
+ GFP_KERNEL);
+ if (!rmi_spi)
+ return -ENOMEM;
+
+ pdata = &rmi_spi->xport.pdata;
+
+ if (spi->dev.of_node) {
+ retval = rmi_spi_of_probe(spi, pdata);
+ if (retval)
+ return retval;
+ } else if (spi_pdata) {
+ *pdata = *spi_pdata;
+ }
+
+ if (pdata->spi_data.bits_per_word)
+ spi->bits_per_word = pdata->spi_data.bits_per_word;
+
+ if (pdata->spi_data.mode)
+ spi->mode = pdata->spi_data.mode;
+
+ retval = spi_setup(spi);
+ if (retval < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return retval;
+ }
+
+ if (spi->irq > 0)
+ rmi_spi->irq = spi->irq;
+
+ rmi_spi->spi = spi;
+ mutex_init(&rmi_spi->page_mutex);
+
+ rmi_spi->xport.dev = &spi->dev;
+ rmi_spi->xport.proto_name = "spi";
+ rmi_spi->xport.ops = &rmi_spi_ops;
+
+ spi_set_drvdata(spi, rmi_spi);
+
+ retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
+ if (retval)
+ return retval;
+
+ /*
+ * Setting the page to zero will (a) make sure the PSR is in a
+ * known state, and (b) make sure we can talk to the device.
+ */
+ retval = rmi_set_page(rmi_spi, 0);
+ if (retval) {
+ dev_err(&spi->dev, "Failed to set page select to 0.\n");
+ return retval;
+ }
+
+ retval = rmi_register_transport_device(&rmi_spi->xport);
+ if (retval) {
+ dev_err(&spi->dev, "failed to register transport.\n");
+ return retval;
+ }
+
+ retval = rmi_spi_init_irq(spi);
+ if (retval < 0)
+ return retval;
+
+ dev_info(&spi->dev, "registered RMI SPI driver\n");
+ return 0;
+}
+
+static int rmi_spi_remove(struct spi_device *spi)
+{
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+
+ rmi_unregister_transport_device(&rmi_spi->xport);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rmi_spi_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_spi->irq);
+ if (device_may_wakeup(&spi->dev)) {
+ ret = enable_irq_wake(rmi_spi->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to enable irq for wake: %d\n",
+ ret);
+ }
+ return ret;
+}
+
+static int rmi_spi_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ enable_irq(rmi_spi->irq);
+ if (device_may_wakeup(&spi->dev)) {
+ ret = disable_irq_wake(rmi_spi->irq);
+ if (!ret)
+ dev_warn(dev, "Failed to disable irq for wake: %d\n",
+ ret);
+ }
+
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int rmi_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ disable_irq(rmi_spi->irq);
+
+ return 0;
+}
+
+static int rmi_spi_runtime_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
+ int ret;
+
+ enable_irq(rmi_spi->irq);
+
+ ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+ if (ret)
+ dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rmi_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
+ SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
+ NULL)
+};
+
+static const struct spi_device_id rmi_id[] = {
+ { "rmi4_spi", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, rmi_id);
+
+static struct spi_driver rmi_spi_driver = {
+ .driver = {
+ .name = "rmi4_spi",
+ .pm = &rmi_spi_pm,
+ .of_match_table = of_match_ptr(rmi_spi_of_match),
+ },
+ .id_table = rmi_id,
+ .probe = rmi_spi_probe,
+ .remove = rmi_spi_remove,
+};
+
+module_spi_driver(rmi_spi_driver);
+
+MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_DESCRIPTION("RMI SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RMI_DRIVER_VERSION);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 66c62641b59a..8ecdc38fd489 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -334,7 +334,7 @@ config TOUCHSCREEN_FUJITSU
config TOUCHSCREEN_GOODIX
tristate "Goodix I2C touchscreen"
depends on I2C
- depends on GPIOLIB
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have the Goodix touchscreen (such as one
installed in Onda v975w tablets) connected to your
@@ -491,6 +491,17 @@ config TOUCHSCREEN_MMS114
To compile this driver as a module, choose M here: the
module will be called mms114.
+config TOUCHSCREEN_MELFAS_MIP4
+ tristate "MELFAS MIP4 Touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a MELFAS MIP4 Touchscreen device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here:
+ the module will be called melfas_mip4.
+
config TOUCHSCREEN_MTOUCH
tristate "MicroTouch serial touchscreens"
select SERIO
@@ -822,6 +833,15 @@ config TOUCHSCREEN_USB_COMPOSITE
To compile this driver as a module, choose M here: the
module will be called usbtouchscreen.
+config TOUCHSCREEN_MX25
+ tristate "Freescale i.MX25 touchscreen input driver"
+ depends on MFD_MX25_TSADC
+ help
+ Enable support for touchscreen connected to your i.MX25.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl-imx25-tcq.
+
config TOUCHSCREEN_MC13783
tristate "Freescale MC13783 touchscreen input driver"
depends on MFD_MC13XXX
@@ -941,6 +961,7 @@ config TOUCHSCREEN_TOUCHIT213
config TOUCHSCREEN_TS4800
tristate "TS-4800 touchscreen"
depends on HAS_IOMEM && OF
+ depends on SOC_IMX51 || COMPILE_TEST
select MFD_SYSCON
select INPUT_POLLDEV
help
@@ -1112,7 +1133,8 @@ config TOUCHSCREEN_ZFORCE
config TOUCHSCREEN_COLIBRI_VF50
tristate "Toradex Colibri on board touchscreen driver"
- depends on GPIOLIB && IIO && VF610_ADC
+ depends on IIO && VF610_ADC
+ depends on GPIOLIB || COMPILE_TEST
help
Say Y here if you have a Colibri VF50 and plan to use
the on-board provided 4-wire touchscreen driver.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 968ff12e3132..f42975e719e0 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,8 +46,10 @@ obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
obj-$(CONFIG_TOUCHSCREEN_IPROC) += bcm_iproc_tsc.o
obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_MAX11801) += max11801_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MX25) += fsl-imx25-tcq.o
obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MELFAS_MIP4) += melfas_mip4.o
obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
obj-$(CONFIG_TOUCHSCREEN_MMS114) += mms114.o
obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d66962c5b1c2..58f72e0246ab 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/of.h>
#include <linux/pm.h>
#include "ad7879.h"
@@ -91,10 +92,19 @@ static const struct i2c_device_id ad7879_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7879_id);
+#ifdef CONFIG_OF
+static const struct of_device_id ad7879_i2c_dt_ids[] = {
+ { .compatible = "adi,ad7879-1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7879_i2c_dt_ids);
+#endif
+
static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
},
.probe = ad7879_i2c_probe,
.remove = ad7879_i2c_remove,
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index 48033c2689ab..d42b6b9af191 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -10,6 +10,7 @@
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include "ad7879.h"
@@ -146,10 +147,19 @@ static int ad7879_spi_remove(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id ad7879_spi_dt_ids[] = {
+ { .compatible = "adi,ad7879", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7879_spi_dt_ids);
+#endif
+
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
.pm = &ad7879_pm_ops,
+ .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
},
.probe = ad7879_spi_probe,
.remove = ad7879_spi_remove,
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 16b5cc2196f2..69d299d5dd00 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -31,7 +31,8 @@
#include <linux/i2c.h>
#include <linux/gpio.h>
-#include <linux/spi/ad7879.h>
+#include <linux/input/touchscreen.h>
+#include <linux/platform_data/ad7879.h>
#include <linux/module.h>
#include "ad7879.h"
@@ -94,8 +95,8 @@
#define AD7879_TEMP_BIT (1<<1)
enum {
- AD7879_SEQ_XPOS = 0,
- AD7879_SEQ_YPOS = 1,
+ AD7879_SEQ_YPOS = 0,
+ AD7879_SEQ_XPOS = 1,
AD7879_SEQ_Z1 = 2,
AD7879_SEQ_Z2 = 3,
AD7879_NR_SENSE = 4,
@@ -126,7 +127,6 @@ struct ad7879 {
u8 pen_down_acc_interval;
u8 median;
u16 x_plate_ohms;
- u16 pressure_max;
u16 cmd_crtl1;
u16 cmd_crtl2;
u16 cmd_crtl3;
@@ -170,10 +170,10 @@ static int ad7879_report(struct ad7879 *ts)
* filter. The combination of these two techniques provides a robust
* solution, discarding the spurious noise in the signal and keeping
* only the data of interest. The size of both filters is
- * programmable. (dev.platform_data, see linux/spi/ad7879.h) Other
- * user-programmable conversion controls include variable acquisition
- * time, and first conversion delay. Up to 16 averages can be taken
- * per conversion.
+ * programmable. (dev.platform_data, see linux/platform_data/ad7879.h)
+ * Other user-programmable conversion controls include variable
+ * acquisition time, and first conversion delay. Up to 16 averages can
+ * be taken per conversion.
*/
if (likely(x && z1)) {
@@ -186,7 +186,7 @@ static int ad7879_report(struct ad7879 *ts)
* Sample found inconsistent, pressure is beyond
* the maximum. Don't report it to user space.
*/
- if (Rt > ts->pressure_max)
+ if (Rt > input_abs_get_max(input_dev, ABS_PRESSURE))
return -EINVAL;
/*
@@ -469,7 +469,7 @@ static void ad7879_gpio_remove(struct ad7879 *ts)
{
const struct ad7879_platform_data *pdata = dev_get_platdata(ts->dev);
- if (pdata->gpio_export)
+ if (pdata && pdata->gpio_export)
gpiochip_remove(&ts->gc);
}
@@ -485,6 +485,32 @@ static inline void ad7879_gpio_remove(struct ad7879 *ts)
}
#endif
+static int ad7879_parse_dt(struct device *dev, struct ad7879 *ts)
+{
+ int err;
+ u32 tmp;
+
+ err = device_property_read_u32(dev, "adi,resistance-plate-x", &tmp);
+ if (err) {
+ dev_err(dev, "failed to get resistance-plate-x property\n");
+ return err;
+ }
+ ts->x_plate_ohms = (u16)tmp;
+
+ device_property_read_u8(dev, "adi,first-conversion-delay",
+ &ts->first_conversion_delay);
+ device_property_read_u8(dev, "adi,acquisition-time",
+ &ts->acquisition_time);
+ device_property_read_u8(dev, "adi,median-filter-size", &ts->median);
+ device_property_read_u8(dev, "adi,averaging", &ts->averaging);
+ device_property_read_u8(dev, "adi,conversion-interval",
+ &ts->pen_down_acc_interval);
+
+ ts->swap_xy = device_property_read_bool(dev, "touchscreen-swapped-x-y");
+
+ return 0;
+}
+
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
const struct ad7879_bus_ops *bops)
{
@@ -495,41 +521,44 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
u16 revid;
if (!irq) {
- dev_err(dev, "no IRQ?\n");
- err = -EINVAL;
- goto err_out;
+ dev_err(dev, "No IRQ specified\n");
+ return ERR_PTR(-EINVAL);
}
- if (!pdata) {
- dev_err(dev, "no platform data?\n");
- err = -EINVAL;
- goto err_out;
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return ERR_PTR(-ENOMEM);
+
+ if (pdata) {
+ /* Platform data use swapped axis (backward compatibility) */
+ ts->swap_xy = !pdata->swap_xy;
+
+ ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+
+ ts->first_conversion_delay = pdata->first_conversion_delay;
+ ts->acquisition_time = pdata->acquisition_time;
+ ts->averaging = pdata->averaging;
+ ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
+ ts->median = pdata->median;
+ } else if (dev->of_node) {
+ ad7879_parse_dt(dev, ts);
+ } else {
+ dev_err(dev, "No platform data\n");
+ return ERR_PTR(-EINVAL);
}
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- err = -ENOMEM;
- goto err_free_mem;
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return ERR_PTR(-ENOMEM);
}
ts->bops = bops;
ts->dev = dev;
ts->input = input_dev;
ts->irq = irq;
- ts->swap_xy = pdata->swap_xy;
setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts);
-
- ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
- ts->pressure_max = pdata->pressure_max ? : ~0;
-
- ts->first_conversion_delay = pdata->first_conversion_delay;
- ts->acquisition_time = pdata->acquisition_time;
- ts->averaging = pdata->averaging;
- ts->pen_down_acc_interval = pdata->pen_down_acc_interval;
- ts->median = pdata->median;
-
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
input_dev->name = "AD7879 Touchscreen";
@@ -550,21 +579,33 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_X,
- pdata->x_min ? : 0,
- pdata->x_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- pdata->y_min ? : 0,
- pdata->y_max ? : MAX_12BIT,
- 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- pdata->pressure_min, pdata->pressure_max, 0, 0);
+ if (pdata) {
+ input_set_abs_params(input_dev, ABS_X,
+ pdata->x_min ? : 0,
+ pdata->x_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ pdata->y_min ? : 0,
+ pdata->y_max ? : MAX_12BIT,
+ 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ pdata->pressure_min,
+ pdata->pressure_max ? : ~0,
+ 0, 0);
+ } else {
+ input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+ touchscreen_parse_properties(input_dev, false);
+ if (!input_abs_get_max(input_dev, ABS_PRESSURE)) {
+ dev_err(dev, "Touchscreen pressure is not specified\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
err = ad7879_write(ts, AD7879_REG_CTRL2, AD7879_RESET);
if (err < 0) {
dev_err(dev, "Failed to write %s\n", input_dev->name);
- goto err_free_mem;
+ return ERR_PTR(err);
}
revid = ad7879_read(ts, AD7879_REG_REVID);
@@ -573,8 +614,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
if (input_dev->id.product != devid) {
dev_err(dev, "Failed to probe %s (%x vs %x)\n",
input_dev->name, devid, revid);
- err = -ENODEV;
- goto err_free_mem;
+ return ERR_PTR(-ENODEV);
}
ts->cmd_crtl3 = AD7879_YPLUS_BIT |
@@ -594,23 +634,25 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
AD7879_ACQ(ts->acquisition_time) |
AD7879_TMR(ts->pen_down_acc_interval);
- err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), ts);
+ err = devm_request_threaded_irq(dev, ts->irq, NULL, ad7879_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), ts);
if (err) {
- dev_err(dev, "irq %d busy?\n", ts->irq);
- goto err_free_mem;
+ dev_err(dev, "Failed to request IRQ: %d\n", err);
+ return ERR_PTR(err);
}
__ad7879_disable(ts);
err = sysfs_create_group(&dev->kobj, &ad7879_attr_group);
if (err)
- goto err_free_irq;
+ goto err_out;
- err = ad7879_gpio_add(ts, pdata);
- if (err)
- goto err_remove_attr;
+ if (pdata) {
+ err = ad7879_gpio_add(ts, pdata);
+ if (err)
+ goto err_remove_attr;
+ }
err = input_register_device(input_dev);
if (err)
@@ -622,11 +664,6 @@ err_remove_gpio:
ad7879_gpio_remove(ts);
err_remove_attr:
sysfs_remove_group(&dev->kobj, &ad7879_attr_group);
-err_free_irq:
- free_irq(ts->irq, ts);
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
err_out:
return ERR_PTR(err);
}
@@ -636,9 +673,6 @@ void ad7879_remove(struct ad7879 *ts)
{
ad7879_gpio_remove(ts);
sysfs_remove_group(&ts->dev->kobj, &ad7879_attr_group);
- free_irq(ts->irq, ts);
- input_unregister_device(ts->input);
- kfree(ts);
}
EXPORT_SYMBOL(ad7879_remove);
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 5b74e8b84e79..91cda8f8119d 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -30,9 +30,12 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/property.h>
+#include <linux/gpio/consumer.h>
#include "cyttsp_core.h"
@@ -57,6 +60,7 @@
#define CY_DELAY_DFLT 20 /* ms */
#define CY_DELAY_MAX 500
#define CY_ACT_DIST_DFLT 0xF8
+#define CY_ACT_DIST_MASK 0x0F
#define CY_HNDSHK_BIT 0x80
/* device mode bits */
#define CY_OPERATE_MODE 0x00
@@ -120,7 +124,7 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
static int cyttsp_handshake(struct cyttsp *ts)
{
- if (ts->pdata->use_hndshk)
+ if (ts->use_hndshk)
return ttsp_send_command(ts,
ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
@@ -142,9 +146,9 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
u8 bl_cmd[sizeof(bl_command)];
memcpy(bl_cmd, bl_command, sizeof(bl_command));
- if (ts->pdata->bl_keys)
+ if (ts->bl_keys)
memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
- ts->pdata->bl_keys, CY_NUM_BL_KEYS);
+ ts->bl_keys, CY_NUM_BL_KEYS);
error = ttsp_write_block_data(ts, CY_REG_BASE,
sizeof(bl_cmd), bl_cmd);
@@ -217,14 +221,14 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
{
int retval = 0;
- if (ts->pdata->act_intrvl != CY_ACT_INTRVL_DFLT ||
- ts->pdata->tch_tmout != CY_TCH_TMOUT_DFLT ||
- ts->pdata->lp_intrvl != CY_LP_INTRVL_DFLT) {
+ if (ts->act_intrvl != CY_ACT_INTRVL_DFLT ||
+ ts->tch_tmout != CY_TCH_TMOUT_DFLT ||
+ ts->lp_intrvl != CY_LP_INTRVL_DFLT) {
u8 intrvl_ray[] = {
- ts->pdata->act_intrvl,
- ts->pdata->tch_tmout,
- ts->pdata->lp_intrvl
+ ts->act_intrvl,
+ ts->tch_tmout,
+ ts->lp_intrvl
};
/* set intrvl registers */
@@ -236,6 +240,16 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
return retval;
}
+static void cyttsp_hard_reset(struct cyttsp *ts)
+{
+ if (ts->reset_gpio) {
+ gpiod_set_value_cansleep(ts->reset_gpio, 1);
+ msleep(CY_DELAY_DFLT);
+ gpiod_set_value_cansleep(ts->reset_gpio, 0);
+ msleep(CY_DELAY_DFLT);
+ }
+}
+
static int cyttsp_soft_reset(struct cyttsp *ts)
{
unsigned long timeout;
@@ -263,7 +277,7 @@ out:
static int cyttsp_act_dist_setup(struct cyttsp *ts)
{
- u8 act_dist_setup = ts->pdata->act_dist;
+ u8 act_dist_setup = ts->act_dist;
/* Init gesture; active distance setup */
return ttsp_write_block_data(ts, CY_REG_ACT_DIST,
@@ -528,45 +542,110 @@ static void cyttsp_close(struct input_dev *dev)
cyttsp_disable(ts);
}
+static int cyttsp_parse_properties(struct cyttsp *ts)
+{
+ struct device *dev = ts->dev;
+ u32 dt_value;
+ int ret;
+
+ ts->bl_keys = devm_kzalloc(dev, CY_NUM_BL_KEYS, GFP_KERNEL);
+ if (!ts->bl_keys)
+ return -ENOMEM;
+
+ /* Set some default values */
+ ts->use_hndshk = false;
+ ts->act_dist = CY_ACT_DIST_DFLT;
+ ts->act_intrvl = CY_ACT_INTRVL_DFLT;
+ ts->tch_tmout = CY_TCH_TMOUT_DFLT;
+ ts->lp_intrvl = CY_LP_INTRVL_DFLT;
+
+ ret = device_property_read_u8_array(dev, "bootloader-key",
+ ts->bl_keys, CY_NUM_BL_KEYS);
+ if (ret) {
+ dev_err(dev,
+ "bootloader-key property could not be retrieved\n");
+ return ret;
+ }
+
+ ts->use_hndshk = device_property_present(dev, "use-handshake");
+
+ if (!device_property_read_u32(dev, "active-distance", &dt_value)) {
+ if (dt_value > 15) {
+ dev_err(dev, "active-distance (%u) must be [0-15]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ ts->act_dist &= ~CY_ACT_DIST_MASK;
+ ts->act_dist |= dt_value;
+ }
+
+ if (!device_property_read_u32(dev, "active-interval-ms", &dt_value)) {
+ if (dt_value > 255) {
+ dev_err(dev, "active-interval-ms (%u) must be [0-255]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ ts->act_intrvl = dt_value;
+ }
+
+ if (!device_property_read_u32(dev, "lowpower-interval-ms", &dt_value)) {
+ if (dt_value > 2550) {
+ dev_err(dev, "lowpower-interval-ms (%u) must be [0-2550]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ /* Register value is expressed in 0.01s / bit */
+ ts->lp_intrvl = dt_value / 10;
+ }
+
+ if (!device_property_read_u32(dev, "touch-timeout-ms", &dt_value)) {
+ if (dt_value > 2550) {
+ dev_err(dev, "touch-timeout-ms (%u) must be [0-2550]\n",
+ dt_value);
+ return -EINVAL;
+ }
+ /* Register value is expressed in 0.01s / bit */
+ ts->tch_tmout = dt_value / 10;
+ }
+
+ return 0;
+}
+
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size)
{
- const struct cyttsp_platform_data *pdata = dev_get_platdata(dev);
struct cyttsp *ts;
struct input_dev *input_dev;
int error;
- if (!pdata || !pdata->name || irq <= 0) {
- error = -EINVAL;
- goto err_out;
- }
+ ts = devm_kzalloc(dev, sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
+ if (!ts)
+ return ERR_PTR(-ENOMEM);
- ts = kzalloc(sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ts || !input_dev) {
- error = -ENOMEM;
- goto err_free_mem;
- }
+ input_dev = devm_input_allocate_device(dev);
+ if (!input_dev)
+ return ERR_PTR(-ENOMEM);
ts->dev = dev;
ts->input = input_dev;
- ts->pdata = dev_get_platdata(dev);
ts->bus_ops = bus_ops;
ts->irq = irq;
+ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->reset_gpio)) {
+ error = PTR_ERR(ts->reset_gpio);
+ dev_err(dev, "Failed to request reset gpio, error %d\n", error);
+ return ERR_PTR(error);
+ }
+
+ error = cyttsp_parse_properties(ts);
+ if (error)
+ return ERR_PTR(error);
+
init_completion(&ts->bl_ready);
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
- if (pdata->init) {
- error = pdata->init();
- if (error) {
- dev_err(ts->dev, "platform init failed, err: %d\n",
- error);
- goto err_free_mem;
- }
- }
-
- input_dev->name = pdata->name;
+ input_dev->name = "Cypress TTSP TouchScreen";
input_dev->phys = ts->phys;
input_dev->id.bustype = bus_ops->bustype;
input_dev->dev.parent = ts->dev;
@@ -576,63 +655,44 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
input_set_drvdata(input_dev, ts);
- __set_bit(EV_ABS, input_dev->evbit);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, pdata->maxx, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, pdata->maxy, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, CY_MAXZ, 0, 0);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ touchscreen_parse_properties(input_dev, true);
- input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+ error = input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+ if (error) {
+ dev_err(dev, "Unable to init MT slots.\n");
+ return ERR_PTR(error);
+ }
- error = request_threaded_irq(ts->irq, NULL, cyttsp_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- pdata->name, ts);
+ error = devm_request_threaded_irq(dev, ts->irq, NULL, cyttsp_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "cyttsp", ts);
if (error) {
dev_err(ts->dev, "failed to request IRQ %d, err: %d\n",
ts->irq, error);
- goto err_platform_exit;
+ return ERR_PTR(error);
}
disable_irq(ts->irq);
+ cyttsp_hard_reset(ts);
+
error = cyttsp_power_on(ts);
if (error)
- goto err_free_irq;
+ return ERR_PTR(error);
error = input_register_device(input_dev);
if (error) {
dev_err(ts->dev, "failed to register input device: %d\n",
error);
- goto err_free_irq;
+ return ERR_PTR(error);
}
return ts;
-
-err_free_irq:
- free_irq(ts->irq, ts);
-err_platform_exit:
- if (pdata->exit)
- pdata->exit();
-err_free_mem:
- input_free_device(input_dev);
- kfree(ts);
-err_out:
- return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(cyttsp_probe);
-void cyttsp_remove(struct cyttsp *ts)
-{
- free_irq(ts->irq, ts);
- input_unregister_device(ts->input);
- if (ts->pdata->exit)
- ts->pdata->exit();
- kfree(ts);
-}
-EXPORT_SYMBOL_GPL(cyttsp_remove);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core");
MODULE_AUTHOR("Cypress");
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 07074110a902..7835e2bacf5a 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -129,7 +129,6 @@ struct cyttsp {
int irq;
struct input_dev *input;
char phys[32];
- const struct cyttsp_platform_data *pdata;
const struct cyttsp_bus_ops *bus_ops;
struct cyttsp_bootloader_data bl_data;
struct cyttsp_sysinfo_data sysinfo_data;
@@ -138,12 +137,19 @@ struct cyttsp {
enum cyttsp_state state;
bool suspended;
+ struct gpio_desc *reset_gpio;
+ bool use_hndshk;
+ u8 act_dist;
+ u8 act_intrvl;
+ u8 tch_tmout;
+ u8 lp_intrvl;
+ u8 *bl_keys;
+
u8 xfer_buf[] ____cacheline_aligned;
};
struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
struct device *dev, int irq, size_t xfer_buf_size);
-void cyttsp_remove(struct cyttsp *ts);
int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
u8 length, const void *values);
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index eee51b3f2e3f..1edfdba96ede 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -56,15 +56,6 @@ static int cyttsp_i2c_probe(struct i2c_client *client,
return 0;
}
-static int cyttsp_i2c_remove(struct i2c_client *client)
-{
- struct cyttsp *ts = i2c_get_clientdata(client);
-
- cyttsp_remove(ts);
-
- return 0;
-}
-
static const struct i2c_device_id cyttsp_i2c_id[] = {
{ CY_I2C_NAME, 0 },
{ }
@@ -77,7 +68,6 @@ static struct i2c_driver cyttsp_i2c_driver = {
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_i2c_probe,
- .remove = cyttsp_i2c_remove,
.id_table = cyttsp_i2c_id,
};
diff --git a/drivers/input/touchscreen/cyttsp_spi.c b/drivers/input/touchscreen/cyttsp_spi.c
index bbeeb2488b57..3c9d18b1b6ef 100644
--- a/drivers/input/touchscreen/cyttsp_spi.c
+++ b/drivers/input/touchscreen/cyttsp_spi.c
@@ -170,22 +170,12 @@ static int cyttsp_spi_probe(struct spi_device *spi)
return 0;
}
-static int cyttsp_spi_remove(struct spi_device *spi)
-{
- struct cyttsp *ts = spi_get_drvdata(spi);
-
- cyttsp_remove(ts);
-
- return 0;
-}
-
static struct spi_driver cyttsp_spi_driver = {
.driver = {
.name = CY_SPI_NAME,
.pm = &cyttsp_pm_ops,
},
.probe = cyttsp_spi_probe,
- .remove = cyttsp_spi_remove,
};
module_spi_driver(cyttsp_spi_driver);
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
new file mode 100644
index 000000000000..fe9877a6af9e
--- /dev/null
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -0,0 +1,596 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * Based on driver from 2011:
+ * Juergen Beisert, Pengutronix <kernel@pengutronix.de>
+ *
+ * This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
+ * connected to the imx25 ADC.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static const char mx25_tcq_name[] = "mx25-tcq";
+
+enum mx25_tcq_mode {
+ MX25_TS_4WIRE,
+};
+
+struct mx25_tcq_priv {
+ struct regmap *regs;
+ struct regmap *core_regs;
+ struct input_dev *idev;
+ enum mx25_tcq_mode mode;
+ unsigned int pen_threshold;
+ unsigned int sample_count;
+ unsigned int expected_samples;
+ unsigned int pen_debounce;
+ unsigned int settling_time;
+ struct clk *clk;
+ int irq;
+ struct device *dev;
+};
+
+static struct regmap_config mx25_tcq_regconfig = {
+ .fast_io = true,
+ .max_register = 0x5c,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static const struct of_device_id mx25_tcq_ids[] = {
+ { .compatible = "fsl,imx25-tcq", },
+ { /* Sentinel */ }
+};
+
+#define TSC_4WIRE_PRE_INDEX 0
+#define TSC_4WIRE_X_INDEX 1
+#define TSC_4WIRE_Y_INDEX 2
+#define TSC_4WIRE_POST_INDEX 3
+#define TSC_4WIRE_LEAVE 4
+
+#define MX25_TSC_DEF_THRESHOLD 80
+#define TSC_MAX_SAMPLES 16
+
+#define MX25_TSC_REPEAT_WAIT 14
+
+enum mx25_adc_configurations {
+ MX25_CFG_PRECHARGE = 0,
+ MX25_CFG_TOUCH_DETECT,
+ MX25_CFG_X_MEASUREMENT,
+ MX25_CFG_Y_MEASUREMENT,
+};
+
+#define MX25_PRECHARGE_VALUE (\
+ MX25_ADCQ_CFG_YPLL_OFF | \
+ MX25_ADCQ_CFG_XNUR_OFF | \
+ MX25_ADCQ_CFG_XPUL_HIGH | \
+ MX25_ADCQ_CFG_REFP_INT | \
+ MX25_ADCQ_CFG_IN_XP | \
+ MX25_ADCQ_CFG_REFN_NGND2 | \
+ MX25_ADCQ_CFG_IGS)
+
+#define MX25_TOUCH_DETECT_VALUE (\
+ MX25_ADCQ_CFG_YNLR | \
+ MX25_ADCQ_CFG_YPLL_OFF | \
+ MX25_ADCQ_CFG_XNUR_OFF | \
+ MX25_ADCQ_CFG_XPUL_OFF | \
+ MX25_ADCQ_CFG_REFP_INT | \
+ MX25_ADCQ_CFG_IN_XP | \
+ MX25_ADCQ_CFG_REFN_NGND2 | \
+ MX25_ADCQ_CFG_PENIACK)
+
+static void imx25_setup_queue_cfgs(struct mx25_tcq_priv *priv,
+ unsigned int settling_cnt)
+{
+ u32 precharge_cfg =
+ MX25_PRECHARGE_VALUE |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt);
+ u32 touch_detect_cfg =
+ MX25_TOUCH_DETECT_VALUE |
+ MX25_ADCQ_CFG_NOS(1) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt);
+
+ regmap_write(priv->core_regs, MX25_TSC_TICR, precharge_cfg);
+
+ /* PRECHARGE */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_PRECHARGE),
+ precharge_cfg);
+
+ /* TOUCH_DETECT */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_TOUCH_DETECT),
+ touch_detect_cfg);
+
+ /* X Measurement */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_X_MEASUREMENT),
+ MX25_ADCQ_CFG_YPLL_OFF |
+ MX25_ADCQ_CFG_XNUR_LOW |
+ MX25_ADCQ_CFG_XPUL_HIGH |
+ MX25_ADCQ_CFG_REFP_XP |
+ MX25_ADCQ_CFG_IN_YP |
+ MX25_ADCQ_CFG_REFN_XN |
+ MX25_ADCQ_CFG_NOS(priv->sample_count) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt));
+
+ /* Y Measurement */
+ regmap_write(priv->regs, MX25_ADCQ_CFG(MX25_CFG_Y_MEASUREMENT),
+ MX25_ADCQ_CFG_YNLR |
+ MX25_ADCQ_CFG_YPLL_HIGH |
+ MX25_ADCQ_CFG_XNUR_OFF |
+ MX25_ADCQ_CFG_XPUL_OFF |
+ MX25_ADCQ_CFG_REFP_YP |
+ MX25_ADCQ_CFG_IN_XP |
+ MX25_ADCQ_CFG_REFN_YN |
+ MX25_ADCQ_CFG_NOS(priv->sample_count) |
+ MX25_ADCQ_CFG_SETTLING_TIME(settling_cnt));
+
+ /* Enable the touch detection right now */
+ regmap_write(priv->core_regs, MX25_TSC_TICR, touch_detect_cfg |
+ MX25_ADCQ_CFG_IGS);
+}
+
+static int imx25_setup_queue_4wire(struct mx25_tcq_priv *priv,
+ unsigned settling_cnt, int *items)
+{
+ imx25_setup_queue_cfgs(priv, settling_cnt);
+
+ /* Setup the conversion queue */
+ regmap_write(priv->regs, MX25_ADCQ_ITEM_7_0,
+ MX25_ADCQ_ITEM(0, MX25_CFG_PRECHARGE) |
+ MX25_ADCQ_ITEM(1, MX25_CFG_TOUCH_DETECT) |
+ MX25_ADCQ_ITEM(2, MX25_CFG_X_MEASUREMENT) |
+ MX25_ADCQ_ITEM(3, MX25_CFG_Y_MEASUREMENT) |
+ MX25_ADCQ_ITEM(4, MX25_CFG_PRECHARGE) |
+ MX25_ADCQ_ITEM(5, MX25_CFG_TOUCH_DETECT));
+
+ /*
+ * We measure X/Y with 'sample_count' number of samples and execute a
+ * touch detection twice, with 1 sample each
+ */
+ priv->expected_samples = priv->sample_count * 2 + 2;
+ *items = 6;
+
+ return 0;
+}
+
+static void mx25_tcq_disable_touch_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_PDMSK,
+ MX25_ADCQ_CR_PDMSK);
+}
+
+static void mx25_tcq_enable_touch_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_PDMSK, 0);
+}
+
+static void mx25_tcq_disable_fifo_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_FDRY_IRQ,
+ MX25_ADCQ_MR_FDRY_IRQ);
+}
+
+static void mx25_tcq_enable_fifo_irq(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_FDRY_IRQ, 0);
+}
+
+static void mx25_tcq_force_queue_start(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FQS,
+ MX25_ADCQ_CR_FQS);
+}
+
+static void mx25_tcq_force_queue_stop(struct mx25_tcq_priv *priv)
+{
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_FQS, 0);
+}
+
+static void mx25_tcq_fifo_reset(struct mx25_tcq_priv *priv)
+{
+ u32 tcqcr;
+
+ regmap_read(priv->regs, MX25_ADCQ_CR, &tcqcr);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FRST,
+ MX25_ADCQ_CR_FRST);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_FRST, 0);
+ regmap_write(priv->regs, MX25_ADCQ_CR, tcqcr);
+}
+
+static void mx25_tcq_re_enable_touch_detection(struct mx25_tcq_priv *priv)
+{
+ /* stop the queue from looping */
+ mx25_tcq_force_queue_stop(priv);
+
+ /* for a clean touch detection, preload the X plane */
+ regmap_write(priv->core_regs, MX25_TSC_TICR, MX25_PRECHARGE_VALUE);
+
+ /* waste some time now to pre-load the X plate to high voltage */
+ mx25_tcq_fifo_reset(priv);
+
+ /* re-enable the detection right now */
+ regmap_write(priv->core_regs, MX25_TSC_TICR,
+ MX25_TOUCH_DETECT_VALUE | MX25_ADCQ_CFG_IGS);
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_PD,
+ MX25_ADCQ_SR_PD);
+
+ /* enable the pen down event to be a source for the interrupt */
+ regmap_update_bits(priv->regs, MX25_ADCQ_MR, MX25_ADCQ_MR_PD_IRQ, 0);
+
+ /* lets fire the next IRQ if someone touches the touchscreen */
+ mx25_tcq_enable_touch_irq(priv);
+}
+
+static void mx25_tcq_create_event_for_4wire(struct mx25_tcq_priv *priv,
+ u32 *sample_buf,
+ unsigned int samples)
+{
+ unsigned int x_pos = 0;
+ unsigned int y_pos = 0;
+ unsigned int touch_pre = 0;
+ unsigned int touch_post = 0;
+ unsigned int i;
+
+ for (i = 0; i < samples; i++) {
+ unsigned int index = MX25_ADCQ_FIFO_ID(sample_buf[i]);
+ unsigned int val = MX25_ADCQ_FIFO_DATA(sample_buf[i]);
+
+ switch (index) {
+ case 1:
+ touch_pre = val;
+ break;
+ case 2:
+ x_pos = val;
+ break;
+ case 3:
+ y_pos = val;
+ break;
+ case 5:
+ touch_post = val;
+ break;
+ default:
+ dev_dbg(priv->dev, "Dropped samples because of invalid index %d\n",
+ index);
+ return;
+ }
+ }
+
+ if (samples != 0) {
+ /*
+ * only if both touch measures are below a threshold,
+ * the position is valid
+ */
+ if (touch_pre < priv->pen_threshold &&
+ touch_post < priv->pen_threshold) {
+ /* valid samples, generate a report */
+ x_pos /= priv->sample_count;
+ y_pos /= priv->sample_count;
+ input_report_abs(priv->idev, ABS_X, x_pos);
+ input_report_abs(priv->idev, ABS_Y, y_pos);
+ input_report_key(priv->idev, BTN_TOUCH, 1);
+ input_sync(priv->idev);
+
+ /* get next sample */
+ mx25_tcq_enable_fifo_irq(priv);
+ } else if (touch_pre >= priv->pen_threshold &&
+ touch_post >= priv->pen_threshold) {
+ /*
+ * if both samples are invalid,
+ * generate a release report
+ */
+ input_report_key(priv->idev, BTN_TOUCH, 0);
+ input_sync(priv->idev);
+ mx25_tcq_re_enable_touch_detection(priv);
+ } else {
+ /*
+ * if only one of both touch measurements are
+ * below the threshold, still some bouncing
+ * happens. Take additional samples in this
+ * case to be sure
+ */
+ mx25_tcq_enable_fifo_irq(priv);
+ }
+ }
+}
+
+static irqreturn_t mx25_tcq_irq_thread(int irq, void *dev_id)
+{
+ struct mx25_tcq_priv *priv = dev_id;
+ u32 sample_buf[TSC_MAX_SAMPLES];
+ unsigned int samples;
+ u32 stats;
+ unsigned int i;
+
+ /*
+ * Check how many samples are available. We always have to read exactly
+ * sample_count samples from the fifo, or a multiple of sample_count.
+ * Otherwise we mixup samples into different touch events.
+ */
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stats);
+ samples = MX25_ADCQ_SR_FDN(stats);
+ samples -= samples % priv->sample_count;
+
+ if (!samples)
+ return IRQ_HANDLED;
+
+ for (i = 0; i != samples; ++i)
+ regmap_read(priv->regs, MX25_ADCQ_FIFO, &sample_buf[i]);
+
+ mx25_tcq_create_event_for_4wire(priv, sample_buf, samples);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mx25_tcq_irq(int irq, void *dev_id)
+{
+ struct mx25_tcq_priv *priv = dev_id;
+ u32 stat;
+ int ret = IRQ_HANDLED;
+
+ regmap_read(priv->regs, MX25_ADCQ_SR, &stat);
+
+ if (stat & (MX25_ADCQ_SR_FRR | MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR))
+ mx25_tcq_re_enable_touch_detection(priv);
+
+ if (stat & MX25_ADCQ_SR_PD) {
+ mx25_tcq_disable_touch_irq(priv);
+ mx25_tcq_force_queue_start(priv);
+ mx25_tcq_enable_fifo_irq(priv);
+ }
+
+ if (stat & MX25_ADCQ_SR_FDRY) {
+ mx25_tcq_disable_fifo_irq(priv);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_SR, MX25_ADCQ_SR_FRR |
+ MX25_ADCQ_SR_FUR | MX25_ADCQ_SR_FOR |
+ MX25_ADCQ_SR_PD,
+ MX25_ADCQ_SR_FRR | MX25_ADCQ_SR_FUR |
+ MX25_ADCQ_SR_FOR | MX25_ADCQ_SR_PD);
+
+ return ret;
+}
+
+/* configure the state machine for a 4-wire touchscreen */
+static int mx25_tcq_init(struct mx25_tcq_priv *priv)
+{
+ u32 tgcr;
+ unsigned int ipg_div;
+ unsigned int adc_period;
+ unsigned int debounce_cnt;
+ unsigned int settling_cnt;
+ int itemct;
+ int error;
+
+ regmap_read(priv->core_regs, MX25_TSC_TGCR, &tgcr);
+ ipg_div = max_t(unsigned int, 4, MX25_TGCR_GET_ADCCLK(tgcr));
+ adc_period = USEC_PER_SEC * ipg_div * 2 + 2;
+ adc_period /= clk_get_rate(priv->clk) / 1000 + 1;
+ debounce_cnt = DIV_ROUND_UP(priv->pen_debounce, adc_period * 8) - 1;
+ settling_cnt = DIV_ROUND_UP(priv->settling_time, adc_period * 8) - 1;
+
+ /* Reset */
+ regmap_write(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_QRST | MX25_ADCQ_CR_FRST);
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_QRST | MX25_ADCQ_CR_FRST, 0);
+
+ /* up to 128 * 8 ADC clocks are possible */
+ if (debounce_cnt > 127)
+ debounce_cnt = 127;
+
+ /* up to 255 * 8 ADC clocks are possible */
+ if (settling_cnt > 255)
+ settling_cnt = 255;
+
+ error = imx25_setup_queue_4wire(priv, settling_cnt, &itemct);
+ if (error)
+ return error;
+
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_LITEMID_MASK | MX25_ADCQ_CR_WMRK_MASK,
+ MX25_ADCQ_CR_LITEMID(itemct - 1) |
+ MX25_ADCQ_CR_WMRK(priv->expected_samples - 1));
+
+ /* setup debounce count */
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR,
+ MX25_TGCR_PDBTIME_MASK,
+ MX25_TGCR_PDBTIME(debounce_cnt));
+
+ /* enable debounce */
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR, MX25_TGCR_PDBEN,
+ MX25_TGCR_PDBEN);
+ regmap_update_bits(priv->core_regs, MX25_TSC_TGCR, MX25_TGCR_PDEN,
+ MX25_TGCR_PDEN);
+
+ /* enable the engine on demand */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR, MX25_ADCQ_CR_QSM_MASK,
+ MX25_ADCQ_CR_QSM_FQS);
+
+ /* Enable repeat and repeat wait */
+ regmap_update_bits(priv->regs, MX25_ADCQ_CR,
+ MX25_ADCQ_CR_RPT | MX25_ADCQ_CR_RWAIT_MASK,
+ MX25_ADCQ_CR_RPT |
+ MX25_ADCQ_CR_RWAIT(MX25_TSC_REPEAT_WAIT));
+
+ return 0;
+}
+
+static int mx25_tcq_parse_dt(struct platform_device *pdev,
+ struct mx25_tcq_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 wires;
+ int error;
+
+ /* Setup defaults */
+ priv->pen_threshold = 500;
+ priv->sample_count = 3;
+ priv->pen_debounce = 1000000;
+ priv->settling_time = 250000;
+
+ error = of_property_read_u32(np, "fsl,wires", &wires);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to find fsl,wires properties\n");
+ return error;
+ }
+
+ if (wires == 4) {
+ priv->mode = MX25_TS_4WIRE;
+ } else {
+ dev_err(&pdev->dev, "%u-wire mode not supported\n", wires);
+ return -EINVAL;
+ }
+
+ /* These are optional, we don't care about the return values */
+ of_property_read_u32(np, "fsl,pen-threshold", &priv->pen_threshold);
+ of_property_read_u32(np, "fsl,settling-time-ns", &priv->settling_time);
+ of_property_read_u32(np, "fsl,pen-debounce-ns", &priv->pen_debounce);
+
+ return 0;
+}
+
+static int mx25_tcq_open(struct input_dev *idev)
+{
+ struct device *dev = &idev->dev;
+ struct mx25_tcq_priv *priv = dev_get_drvdata(dev);
+ int error;
+
+ error = clk_prepare_enable(priv->clk);
+ if (error) {
+ dev_err(dev, "Failed to enable ipg clock\n");
+ return error;
+ }
+
+ error = mx25_tcq_init(priv);
+ if (error) {
+ dev_err(dev, "Failed to init tcq\n");
+ clk_disable_unprepare(priv->clk);
+ return error;
+ }
+
+ mx25_tcq_re_enable_touch_detection(priv);
+
+ return 0;
+}
+
+static void mx25_tcq_close(struct input_dev *idev)
+{
+ struct mx25_tcq_priv *priv = input_get_drvdata(idev);
+
+ mx25_tcq_force_queue_stop(priv);
+ mx25_tcq_disable_touch_irq(priv);
+ mx25_tcq_disable_fifo_irq(priv);
+ clk_disable_unprepare(priv->clk);
+}
+
+static int mx25_tcq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct input_dev *idev;
+ struct mx25_tcq_priv *priv;
+ struct mx25_tsadc *tsadc = dev_get_drvdata(pdev->dev.parent);
+ struct resource *res;
+ void __iomem *mem;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ error = mx25_tcq_parse_dt(pdev, priv);
+ if (error)
+ return error;
+
+ priv->regs = devm_regmap_init_mmio(dev, mem, &mx25_tcq_regconfig);
+ if (IS_ERR(priv->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(priv->regs);
+ }
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq <= 0) {
+ dev_err(dev, "Failed to get IRQ\n");
+ return priv->irq;
+ }
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev) {
+ dev_err(dev, "Failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ idev->name = mx25_tcq_name;
+ input_set_capability(idev, EV_KEY, BTN_TOUCH);
+ input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
+
+ idev->id.bustype = BUS_HOST;
+ idev->open = mx25_tcq_open;
+ idev->close = mx25_tcq_close;
+
+ priv->idev = idev;
+ input_set_drvdata(idev, priv);
+
+ priv->core_regs = tsadc->regs;
+ if (!priv->core_regs)
+ return -EINVAL;
+
+ priv->clk = tsadc->clk;
+ if (!priv->clk)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, priv);
+
+ error = devm_request_threaded_irq(dev, priv->irq, mx25_tcq_irq,
+ mx25_tcq_irq_thread, 0, pdev->name,
+ priv);
+ if (error) {
+ dev_err(dev, "Failed requesting IRQ\n");
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Failed to register input device\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mx25_tcq_driver = {
+ .driver = {
+ .name = "mx25-tcq",
+ .of_match_table = mx25_tcq_ids,
+ },
+ .probe = mx25_tcq_probe,
+};
+module_platform_driver(mx25_tcq_driver);
+
+MODULE_DESCRIPTION("TS input driver for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
new file mode 100644
index 000000000000..fb5fb9140ca9
--- /dev/null
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -0,0 +1,1543 @@
+/*
+ * MELFAS MIP4 Touchscreen
+ *
+ * Copyright (C) 2016 MELFAS Inc.
+ *
+ * Author : Sangwon Jee <jeesw@melfas.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define MIP4_DEVICE_NAME "mip4_ts"
+
+/*****************************************************************
+ * Protocol
+ * Version : MIP 4.0 Rev 4.6
+ *****************************************************************/
+
+/* Address */
+#define MIP4_R0_BOOT 0x00
+#define MIP4_R1_BOOT_MODE 0x01
+#define MIP4_R1_BOOT_BUF_ADDR 0x10
+#define MIP4_R1_BOOT_STATUS 0x20
+#define MIP4_R1_BOOT_CMD 0x30
+#define MIP4_R1_BOOT_TARGET_ADDR 0x40
+#define MIP4_R1_BOOT_SIZE 0x44
+
+#define MIP4_R0_INFO 0x01
+#define MIP4_R1_INFO_PRODUCT_NAME 0x00
+#define MIP4_R1_INFO_RESOLUTION_X 0x10
+#define MIP4_R1_INFO_RESOLUTION_Y 0x12
+#define MIP4_R1_INFO_NODE_NUM_X 0x14
+#define MIP4_R1_INFO_NODE_NUM_Y 0x15
+#define MIP4_R1_INFO_KEY_NUM 0x16
+#define MIP4_R1_INFO_PRESSURE_NUM 0x17
+#define MIP4_R1_INFO_LENGTH_X 0x18
+#define MIP4_R1_INFO_LENGTH_Y 0x1A
+#define MIP4_R1_INFO_PPM_X 0x1C
+#define MIP4_R1_INFO_PPM_Y 0x1D
+#define MIP4_R1_INFO_VERSION_BOOT 0x20
+#define MIP4_R1_INFO_VERSION_CORE 0x22
+#define MIP4_R1_INFO_VERSION_APP 0x24
+#define MIP4_R1_INFO_VERSION_PARAM 0x26
+#define MIP4_R1_INFO_SECT_BOOT_START 0x30
+#define MIP4_R1_INFO_SECT_BOOT_END 0x31
+#define MIP4_R1_INFO_SECT_CORE_START 0x32
+#define MIP4_R1_INFO_SECT_CORE_END 0x33
+#define MIP4_R1_INFO_SECT_APP_START 0x34
+#define MIP4_R1_INFO_SECT_APP_END 0x35
+#define MIP4_R1_INFO_SECT_PARAM_START 0x36
+#define MIP4_R1_INFO_SECT_PARAM_END 0x37
+#define MIP4_R1_INFO_BUILD_DATE 0x40
+#define MIP4_R1_INFO_BUILD_TIME 0x44
+#define MIP4_R1_INFO_CHECKSUM_PRECALC 0x48
+#define MIP4_R1_INFO_CHECKSUM_REALTIME 0x4A
+#define MIP4_R1_INFO_PROTOCOL_NAME 0x50
+#define MIP4_R1_INFO_PROTOCOL_VERSION 0x58
+#define MIP4_R1_INFO_IC_ID 0x70
+#define MIP4_R1_INFO_IC_NAME 0x71
+#define MIP4_R1_INFO_IC_VENDOR_ID 0x75
+#define MIP4_R1_INFO_IC_HW_CATEGORY 0x77
+#define MIP4_R1_INFO_CONTACT_THD_SCR 0x78
+#define MIP4_R1_INFO_CONTACT_THD_KEY 0x7A
+
+#define MIP4_R0_EVENT 0x02
+#define MIP4_R1_EVENT_SUPPORTED_FUNC 0x00
+#define MIP4_R1_EVENT_FORMAT 0x04
+#define MIP4_R1_EVENT_SIZE 0x06
+#define MIP4_R1_EVENT_PACKET_INFO 0x10
+#define MIP4_R1_EVENT_PACKET_DATA 0x11
+
+#define MIP4_R0_CTRL 0x06
+#define MIP4_R1_CTRL_READY_STATUS 0x00
+#define MIP4_R1_CTRL_EVENT_READY 0x01
+#define MIP4_R1_CTRL_MODE 0x10
+#define MIP4_R1_CTRL_EVENT_TRIGGER_TYPE 0x11
+#define MIP4_R1_CTRL_RECALIBRATE 0x12
+#define MIP4_R1_CTRL_POWER_STATE 0x13
+#define MIP4_R1_CTRL_GESTURE_TYPE 0x14
+#define MIP4_R1_CTRL_DISABLE_ESD_ALERT 0x18
+#define MIP4_R1_CTRL_CHARGER_MODE 0x19
+#define MIP4_R1_CTRL_HIGH_SENS_MODE 0x1A
+#define MIP4_R1_CTRL_WINDOW_MODE 0x1B
+#define MIP4_R1_CTRL_PALM_REJECTION 0x1C
+#define MIP4_R1_CTRL_EDGE_CORRECTION 0x1D
+#define MIP4_R1_CTRL_ENTER_GLOVE_MODE 0x1E
+#define MIP4_R1_CTRL_I2C_ON_LPM 0x1F
+#define MIP4_R1_CTRL_GESTURE_DEBUG 0x20
+#define MIP4_R1_CTRL_PALM_EVENT 0x22
+#define MIP4_R1_CTRL_PROXIMITY_SENSING 0x23
+
+/* Value */
+#define MIP4_BOOT_MODE_BOOT 0x01
+#define MIP4_BOOT_MODE_APP 0x02
+
+#define MIP4_BOOT_STATUS_BUSY 0x05
+#define MIP4_BOOT_STATUS_ERROR 0x0E
+#define MIP4_BOOT_STATUS_DONE 0xA0
+
+#define MIP4_BOOT_CMD_MASS_ERASE 0x15
+#define MIP4_BOOT_CMD_PROGRAM 0x54
+#define MIP4_BOOT_CMD_ERASE 0x8F
+#define MIP4_BOOT_CMD_WRITE 0xA5
+#define MIP4_BOOT_CMD_READ 0xC2
+
+#define MIP4_EVENT_INPUT_TYPE_KEY 0
+#define MIP4_EVENT_INPUT_TYPE_SCREEN 1
+#define MIP4_EVENT_INPUT_TYPE_PROXIMITY 2
+
+#define I2C_RETRY_COUNT 3 /* 2~ */
+
+#define MIP4_BUF_SIZE 128
+#define MIP4_MAX_FINGERS 10
+#define MIP4_MAX_KEYS 4
+
+#define MIP4_TOUCH_MAJOR_MIN 0
+#define MIP4_TOUCH_MAJOR_MAX 255
+#define MIP4_TOUCH_MINOR_MIN 0
+#define MIP4_TOUCH_MINOR_MAX 255
+#define MIP4_PRESSURE_MIN 0
+#define MIP4_PRESSURE_MAX 255
+
+#define MIP4_FW_NAME "melfas_mip4.fw"
+#define MIP4_FW_UPDATE_DEBUG 0 /* 0 (default) or 1 */
+
+struct mip4_fw_version {
+ u16 boot;
+ u16 core;
+ u16 app;
+ u16 param;
+};
+
+struct mip4_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct gpio_desc *gpio_ce;
+
+ char phys[32];
+ char product_name[16];
+
+ unsigned int max_x;
+ unsigned int max_y;
+ u8 node_x;
+ u8 node_y;
+ u8 node_key;
+ unsigned int ppm_x;
+ unsigned int ppm_y;
+
+ struct mip4_fw_version fw_version;
+
+ unsigned int event_size;
+ unsigned int event_format;
+
+ unsigned int key_num;
+ unsigned short key_code[MIP4_MAX_KEYS];
+
+ bool wake_irq_enabled;
+
+ u8 buf[MIP4_BUF_SIZE];
+};
+
+static int mip4_i2c_xfer(struct mip4_ts *ts,
+ char *write_buf, unsigned int write_len,
+ char *read_buf, unsigned int read_len)
+{
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = write_buf,
+ .len = write_len,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = read_buf,
+ .len = read_len,
+ },
+ };
+ int retry = I2C_RETRY_COUNT;
+ int res;
+ int error;
+
+ do {
+ res = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (res == ARRAY_SIZE(msg))
+ return 0;
+
+ error = res < 0 ? res : -EIO;
+ dev_err(&ts->client->dev,
+ "%s - i2c_transfer failed: %d (%d)\n",
+ __func__, error, res);
+ } while (--retry);
+
+ return error;
+}
+
+static void mip4_parse_fw_version(const u8 *buf, struct mip4_fw_version *v)
+{
+ v->boot = get_unaligned_le16(buf + 0);
+ v->core = get_unaligned_le16(buf + 2);
+ v->app = get_unaligned_le16(buf + 4);
+ v->param = get_unaligned_le16(buf + 6);
+}
+
+/*
+ * Read chip firmware version
+ */
+static int mip4_get_fw_version(struct mip4_ts *ts)
+{
+ u8 cmd[] = { MIP4_R0_INFO, MIP4_R1_INFO_VERSION_BOOT };
+ u8 buf[sizeof(ts->fw_version)];
+ int error;
+
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, sizeof(buf));
+ if (error) {
+ memset(&ts->fw_version, 0xff, sizeof(ts->fw_version));
+ return error;
+ }
+
+ mip4_parse_fw_version(buf, &ts->fw_version);
+
+ return 0;
+}
+
+/*
+ * Fetch device characteristics
+ */
+static int mip4_query_device(struct mip4_ts *ts)
+{
+ int error;
+ u8 cmd[2];
+ u8 buf[14];
+
+ /* Product name */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_PRODUCT_NAME;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+ ts->product_name, sizeof(ts->product_name));
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve product name: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "product name: %.*s\n",
+ (int)sizeof(ts->product_name), ts->product_name);
+
+ /* Firmware version */
+ error = mip4_get_fw_version(ts);
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve FW version: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "F/W Version: %04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ /* Resolution */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_RESOLUTION_X;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 14);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve touchscreen parameters: %d\n",
+ error);
+ } else {
+ ts->max_x = get_unaligned_le16(&buf[0]);
+ ts->max_y = get_unaligned_le16(&buf[2]);
+ dev_dbg(&ts->client->dev, "max_x: %d, max_y: %d\n",
+ ts->max_x, ts->max_y);
+
+ ts->node_x = buf[4];
+ ts->node_y = buf[5];
+ ts->node_key = buf[6];
+ dev_dbg(&ts->client->dev,
+ "node_x: %d, node_y: %d, node_key: %d\n",
+ ts->node_x, ts->node_y, ts->node_key);
+
+ ts->ppm_x = buf[12];
+ ts->ppm_y = buf[13];
+ dev_dbg(&ts->client->dev, "ppm_x: %d, ppm_y: %d\n",
+ ts->ppm_x, ts->ppm_y);
+
+ /* Key ts */
+ if (ts->node_key > 0)
+ ts->key_num = ts->node_key;
+ }
+
+ /* Protocol */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_SUPPORTED_FUNC;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 7);
+ if (error) {
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve device type: %d\n", error);
+ ts->event_format = 0xff;
+ } else {
+ ts->event_format = get_unaligned_le16(&buf[4]);
+ ts->event_size = buf[6];
+ dev_dbg(&ts->client->dev, "event_format: %d, event_size: %d\n",
+ ts->event_format, ts->event_size);
+
+ if (ts->event_format == 2 || ts->event_format > 3)
+ dev_warn(&ts->client->dev,
+ "Unknown event format %d\n", ts->event_format);
+ }
+
+ return 0;
+}
+
+static int mip4_power_on(struct mip4_ts *ts)
+{
+ if (ts->gpio_ce) {
+ gpiod_set_value_cansleep(ts->gpio_ce, 1);
+
+ /* Booting delay : 200~300ms */
+ usleep_range(200 * 1000, 300 * 1000);
+ }
+
+ return 0;
+}
+
+static void mip4_power_off(struct mip4_ts *ts)
+{
+ if (ts->gpio_ce)
+ gpiod_set_value_cansleep(ts->gpio_ce, 0);
+}
+
+/*
+ * Clear touch input event status
+ */
+static void mip4_clear_input(struct mip4_ts *ts)
+{
+ int i;
+
+ /* Screen */
+ for (i = 0; i < MIP4_MAX_FINGERS; i++) {
+ input_mt_slot(ts->input, i);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ }
+
+ /* Keys */
+ for (i = 0; i < ts->key_num; i++)
+ input_report_key(ts->input, ts->key_code[i], 0);
+
+ input_sync(ts->input);
+}
+
+static int mip4_enable(struct mip4_ts *ts)
+{
+ int error;
+
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+
+ enable_irq(ts->client->irq);
+
+ return 0;
+}
+
+static void mip4_disable(struct mip4_ts *ts)
+{
+ disable_irq(ts->client->irq);
+
+ mip4_power_off(ts);
+
+ mip4_clear_input(ts);
+}
+
+/*****************************************************************
+ * Input handling
+ *****************************************************************/
+
+static void mip4_report_keys(struct mip4_ts *ts, u8 *packet)
+{
+ u8 key;
+ bool down;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ key = packet[0] & 0x0F;
+ down = packet[0] & 0x80;
+ break;
+
+ case 3:
+ default:
+ key = packet[0] & 0x0F;
+ down = packet[1] & 0x01;
+ break;
+ }
+
+ /* Report key event */
+ if (key >= 1 && key <= ts->key_num) {
+ unsigned short keycode = ts->key_code[key - 1];
+
+ dev_dbg(&ts->client->dev,
+ "Key - ID: %d, keycode: %d, state: %d\n",
+ key, keycode, down);
+
+ input_event(ts->input, EV_MSC, MSC_SCAN, keycode);
+ input_report_key(ts->input, keycode, down);
+
+ } else {
+ dev_err(&ts->client->dev, "Unknown key: %d\n", key);
+ }
+}
+
+static void mip4_report_touch(struct mip4_ts *ts, u8 *packet)
+{
+ int id;
+ bool hover;
+ bool palm;
+ bool state;
+ u16 x, y;
+ u8 pressure_stage = 0;
+ u8 pressure;
+ u8 size;
+ u8 touch_major;
+ u8 touch_minor;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ /* Touch only */
+ state = packet[0] & BIT(7);
+ hover = packet[0] & BIT(5);
+ palm = packet[0] & BIT(4);
+ id = (packet[0] & 0x0F) - 1;
+ x = ((packet[1] & 0x0F) << 8) | packet[2];
+ y = (((packet[1] >> 4) & 0x0F) << 8) |
+ packet[3];
+ pressure = packet[4];
+ size = packet[5];
+ if (ts->event_format == 0) {
+ touch_major = packet[5];
+ touch_minor = packet[5];
+ } else {
+ touch_major = packet[6];
+ touch_minor = packet[7];
+ }
+ break;
+
+ case 3:
+ default:
+ /* Touch + Force(Pressure) */
+ id = (packet[0] & 0x0F) - 1;
+ hover = packet[1] & BIT(2);
+ palm = packet[1] & BIT(1);
+ state = packet[1] & BIT(0);
+ x = ((packet[2] & 0x0F) << 8) | packet[3];
+ y = (((packet[2] >> 4) & 0x0F) << 8) |
+ packet[4];
+ size = packet[6];
+ pressure_stage = (packet[7] & 0xF0) >> 4;
+ pressure = ((packet[7] & 0x0F) << 8) |
+ packet[8];
+ touch_major = packet[9];
+ touch_minor = packet[10];
+ break;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Screen - Slot: %d State: %d X: %04d Y: %04d Z: %d\n",
+ id, state, x, y, pressure);
+
+ if (unlikely(id < 0 || id >= MIP4_MAX_FINGERS)) {
+ dev_err(&ts->client->dev, "Screen - invalid slot ID: %d\n", id);
+ } else if (state) {
+ /* Press or Move event */
+ input_mt_slot(ts->input, id);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, true);
+ input_report_abs(ts->input, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input, ABS_MT_PRESSURE, pressure);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR, touch_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR, touch_minor);
+ } else {
+ /* Release event */
+ input_mt_slot(ts->input, id);
+ input_mt_report_slot_state(ts->input, MT_TOOL_FINGER, 0);
+ }
+
+ input_mt_sync_frame(ts->input);
+}
+
+static int mip4_handle_packet(struct mip4_ts *ts, u8 *packet)
+{
+ u8 type;
+
+ switch (ts->event_format) {
+ case 0:
+ case 1:
+ type = (packet[0] & 0x40) >> 6;
+ break;
+
+ case 3:
+ type = (packet[0] & 0xF0) >> 4;
+ break;
+
+ default:
+ /* Should not happen unless we have corrupted firmware */
+ return -EINVAL;
+ }
+
+ dev_dbg(&ts->client->dev, "Type: %d\n", type);
+
+ /* Report input event */
+ switch (type) {
+ case MIP4_EVENT_INPUT_TYPE_KEY:
+ mip4_report_keys(ts, packet);
+ break;
+
+ case MIP4_EVENT_INPUT_TYPE_SCREEN:
+ mip4_report_touch(ts, packet);
+ break;
+
+ default:
+ dev_err(&ts->client->dev, "Unknown event type: %d\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static irqreturn_t mip4_interrupt(int irq, void *dev_id)
+{
+ struct mip4_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ unsigned int i;
+ int error;
+ u8 cmd[2];
+ u8 size;
+ bool alert;
+
+ /* Read packet info */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_PACKET_INFO;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), ts->buf, 1);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read packet info: %d\n", error);
+ goto out;
+ }
+
+ size = ts->buf[0] & 0x7F;
+ alert = ts->buf[0] & BIT(7);
+ dev_dbg(&client->dev, "packet size: %d, alert: %d\n", size, alert);
+
+ /* Check size */
+ if (!size) {
+ dev_err(&client->dev, "Empty packet\n");
+ goto out;
+ }
+
+ /* Read packet data */
+ cmd[0] = MIP4_R0_EVENT;
+ cmd[1] = MIP4_R1_EVENT_PACKET_DATA;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), ts->buf, size);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to read packet data: %d\n", error);
+ goto out;
+ }
+
+ if (alert) {
+ dev_dbg(&client->dev, "Alert: %d\n", ts->buf[0]);
+ } else {
+ for (i = 0; i < size; i += ts->event_size) {
+ error = mip4_handle_packet(ts, &ts->buf[i]);
+ if (error)
+ break;
+ }
+
+ input_sync(ts->input);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int mip4_input_open(struct input_dev *dev)
+{
+ struct mip4_ts *ts = input_get_drvdata(dev);
+
+ return mip4_enable(ts);
+}
+
+static void mip4_input_close(struct input_dev *dev)
+{
+ struct mip4_ts *ts = input_get_drvdata(dev);
+
+ mip4_disable(ts);
+}
+
+/*****************************************************************
+ * Firmware update
+ *****************************************************************/
+
+/* Firmware Info */
+#define MIP4_BL_PAGE_SIZE 512 /* 512 */
+#define MIP4_BL_PACKET_SIZE 512 /* 512, 256, 128, 64, ... */
+
+/*
+ * Firmware binary tail info
+ */
+
+struct mip4_bin_tail {
+ u8 tail_mark[4];
+ u8 chip_name[4];
+
+ __le32 bin_start_addr;
+ __le32 bin_length;
+
+ __le16 ver_boot;
+ __le16 ver_core;
+ __le16 ver_app;
+ __le16 ver_param;
+
+ u8 boot_start;
+ u8 boot_end;
+ u8 core_start;
+ u8 core_end;
+ u8 app_start;
+ u8 app_end;
+ u8 param_start;
+ u8 param_end;
+
+ u8 checksum_type;
+ u8 hw_category;
+
+ __le16 param_id;
+ __le32 param_length;
+ __le32 build_date;
+ __le32 build_time;
+
+ __le32 reserved1;
+ __le32 reserved2;
+ __le16 reserved3;
+ __le16 tail_size;
+ __le32 crc;
+} __packed;
+
+#define MIP4_BIN_TAIL_MARK "MBT\001"
+#define MIP4_BIN_TAIL_SIZE (sizeof(struct mip4_bin_tail))
+
+/*
+* Bootloader - Read status
+*/
+static int mip4_bl_read_status(struct mip4_ts *ts)
+{
+ u8 cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_STATUS };
+ u8 result;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = sizeof(cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = &result,
+ .len = sizeof(result),
+ },
+ };
+ int ret;
+ int error;
+ int retry = 1000;
+
+ do {
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read bootloader status: %d\n",
+ error);
+ return error;
+ }
+
+ switch (result) {
+ case MIP4_BOOT_STATUS_DONE:
+ dev_dbg(&ts->client->dev, "%s - done\n", __func__);
+ return 0;
+
+ case MIP4_BOOT_STATUS_ERROR:
+ dev_err(&ts->client->dev, "Bootloader failure\n");
+ return -EIO;
+
+ case MIP4_BOOT_STATUS_BUSY:
+ dev_dbg(&ts->client->dev, "%s - Busy\n", __func__);
+ error = -EBUSY;
+ break;
+
+ default:
+ dev_err(&ts->client->dev,
+ "Unexpected bootloader status: %#02x\n",
+ result);
+ error = -EINVAL;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (--retry);
+
+ return error;
+}
+
+/*
+* Bootloader - Change mode
+*/
+static int mip4_bl_change_mode(struct mip4_ts *ts, u8 mode)
+{
+ u8 mode_chg_cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_MODE, mode };
+ u8 mode_read_cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_MODE };
+ u8 result;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = mode_read_cmd,
+ .len = sizeof(mode_read_cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = &result,
+ .len = sizeof(result),
+ },
+ };
+ int retry = 10;
+ int ret;
+ int error;
+
+ do {
+ /* Send mode change command */
+ ret = i2c_master_send(ts->client,
+ mode_chg_cmd, sizeof(mode_chg_cmd));
+ if (ret != sizeof(mode_chg_cmd)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send %d mode change: %d (%d)\n",
+ mode, error, ret);
+ return error;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Sent mode change request (mode: %d)\n", mode);
+
+ /* Wait */
+ msleep(1000);
+
+ /* Verify target mode */
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read device mode: %d\n", error);
+ return error;
+ }
+
+ dev_dbg(&ts->client->dev,
+ "Current device mode: %d, want: %d\n", result, mode);
+
+ if (result == mode)
+ return 0;
+
+ } while (--retry);
+
+ return -EIO;
+}
+
+/*
+ * Bootloader - Start bootloader mode
+ */
+static int mip4_bl_enter(struct mip4_ts *ts)
+{
+ return mip4_bl_change_mode(ts, MIP4_BOOT_MODE_BOOT);
+}
+
+/*
+ * Bootloader - Exit bootloader mode
+ */
+static int mip4_bl_exit(struct mip4_ts *ts)
+{
+ return mip4_bl_change_mode(ts, MIP4_BOOT_MODE_APP);
+}
+
+static int mip4_bl_get_address(struct mip4_ts *ts, u16 *buf_addr)
+{
+ u8 cmd[] = { MIP4_R0_BOOT, MIP4_R1_BOOT_BUF_ADDR };
+ u8 result[sizeof(u16)];
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = sizeof(cmd),
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .buf = result,
+ .len = sizeof(result),
+ },
+ };
+ int ret;
+ int error;
+
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to retrieve bootloader buffer address: %d\n",
+ error);
+ return error;
+ }
+
+ *buf_addr = get_unaligned_le16(result);
+ dev_dbg(&ts->client->dev,
+ "Bootloader buffer address %#04x\n", *buf_addr);
+
+ return 0;
+}
+
+static int mip4_bl_program_page(struct mip4_ts *ts, int offset,
+ const u8 *data, int length, u16 buf_addr)
+{
+ u8 cmd[6];
+ u8 *data_buf;
+ u16 buf_offset;
+ int ret;
+ int error;
+
+ dev_dbg(&ts->client->dev, "Writing page @%#06x (%d)\n",
+ offset, length);
+
+ if (length > MIP4_BL_PAGE_SIZE || length % MIP4_BL_PACKET_SIZE) {
+ dev_err(&ts->client->dev,
+ "Invalid page length: %d\n", length);
+ return -EINVAL;
+ }
+
+ data_buf = kmalloc(2 + MIP4_BL_PACKET_SIZE, GFP_KERNEL);
+ if (!data_buf)
+ return -ENOMEM;
+
+ /* Addr */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_TARGET_ADDR;
+ put_unaligned_le32(offset, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send write page address: %d\n", error);
+ goto out;
+ }
+
+ /* Size */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_SIZE;
+ put_unaligned_le32(length, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send write page size: %d\n", error);
+ goto out;
+ }
+
+ /* Data */
+ for (buf_offset = 0;
+ buf_offset < length;
+ buf_offset += MIP4_BL_PACKET_SIZE) {
+ dev_dbg(&ts->client->dev,
+ "writing chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+ put_unaligned_be16(buf_addr + buf_offset, data_buf);
+ memcpy(&data_buf[2], &data[buf_offset], MIP4_BL_PACKET_SIZE);
+ ret = i2c_master_send(ts->client,
+ data_buf, 2 + MIP4_BL_PACKET_SIZE);
+ if (ret != 2 + MIP4_BL_PACKET_SIZE) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read chunk at %#04x (size %d): %d\n",
+ buf_offset, MIP4_BL_PACKET_SIZE, error);
+ goto out;
+ }
+ }
+
+ /* Command */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_CMD;
+ cmd[2] = MIP4_BOOT_CMD_PROGRAM;
+ ret = i2c_master_send(ts->client, cmd, 3);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send 'write' command: %d\n", error);
+ goto out;
+ }
+
+ /* Status */
+ error = mip4_bl_read_status(ts);
+
+out:
+ kfree(data_buf);
+ return error ? error : 0;
+}
+
+static int mip4_bl_verify_page(struct mip4_ts *ts, int offset,
+ const u8 *data, int length, int buf_addr)
+{
+ u8 cmd[8];
+ u8 *read_buf;
+ int buf_offset;
+ struct i2c_msg msg[] = {
+ {
+ .addr = ts->client->addr,
+ .flags = 0,
+ .buf = cmd,
+ .len = 2,
+ }, {
+ .addr = ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = MIP4_BL_PACKET_SIZE,
+ },
+ };
+ int ret;
+ int error;
+
+ dev_dbg(&ts->client->dev, "Validating page @%#06x (%d)\n",
+ offset, length);
+
+ /* Addr */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_TARGET_ADDR;
+ put_unaligned_le32(offset, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send read page address: %d\n", error);
+ return error;
+ }
+
+ /* Size */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_SIZE;
+ put_unaligned_le32(length, &cmd[2]);
+ ret = i2c_master_send(ts->client, cmd, 6);
+ if (ret != 6) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send read page size: %d\n", error);
+ return error;
+ }
+
+ /* Command */
+ cmd[0] = MIP4_R0_BOOT;
+ cmd[1] = MIP4_R1_BOOT_CMD;
+ cmd[2] = MIP4_BOOT_CMD_READ;
+ ret = i2c_master_send(ts->client, cmd, 3);
+ if (ret != 3) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to send 'read' command: %d\n", error);
+ return error;
+ }
+
+ /* Status */
+ error = mip4_bl_read_status(ts);
+ if (error)
+ return error;
+
+ /* Read */
+ msg[1].buf = read_buf = kmalloc(MIP4_BL_PACKET_SIZE, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ for (buf_offset = 0;
+ buf_offset < length;
+ buf_offset += MIP4_BL_PACKET_SIZE) {
+ dev_dbg(&ts->client->dev,
+ "reading chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+ put_unaligned_be16(buf_addr + buf_offset, cmd);
+ ret = i2c_transfer(ts->client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&ts->client->dev,
+ "Failed to read chunk at %#04x (size %d): %d\n",
+ buf_offset, MIP4_BL_PACKET_SIZE, error);
+ break;
+ }
+
+ if (memcmp(&data[buf_offset], read_buf, MIP4_BL_PACKET_SIZE)) {
+ dev_err(&ts->client->dev,
+ "Failed to validate chunk at %#04x (size %d)\n",
+ buf_offset, MIP4_BL_PACKET_SIZE);
+#if MIP4_FW_UPDATE_DEBUG
+ print_hex_dump(KERN_DEBUG,
+ MIP4_DEVICE_NAME " F/W File: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ data + offset, MIP4_BL_PACKET_SIZE,
+ false);
+ print_hex_dump(KERN_DEBUG,
+ MIP4_DEVICE_NAME " F/W Chip: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ read_buf, MIP4_BL_PAGE_SIZE, false);
+#endif
+ error = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(read_buf);
+ return error ? error : 0;
+}
+
+/*
+ * Flash chip firmware
+ */
+static int mip4_flash_fw(struct mip4_ts *ts,
+ const u8 *fw_data, u32 fw_size, u32 fw_offset)
+{
+ struct i2c_client *client = ts->client;
+ int offset;
+ u16 buf_addr;
+ int error, error2;
+
+ /* Enter bootloader mode */
+ dev_dbg(&client->dev, "Entering bootloader mode\n");
+
+ error = mip4_bl_enter(ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to enter bootloader mode: %d\n",
+ error);
+ return error;
+ }
+
+ /* Read info */
+ error = mip4_bl_get_address(ts, &buf_addr);
+ if (error)
+ goto exit_bl;
+
+ /* Program & Verify */
+ dev_dbg(&client->dev,
+ "Program & Verify, page size: %d, packet size: %d\n",
+ MIP4_BL_PAGE_SIZE, MIP4_BL_PACKET_SIZE);
+
+ for (offset = fw_offset;
+ offset < fw_offset + fw_size;
+ offset += MIP4_BL_PAGE_SIZE) {
+ /* Program */
+ error = mip4_bl_program_page(ts, offset, fw_data + offset,
+ MIP4_BL_PAGE_SIZE, buf_addr);
+ if (error)
+ break;
+
+ /* Verify */
+ error = mip4_bl_verify_page(ts, offset, fw_data + offset,
+ MIP4_BL_PAGE_SIZE, buf_addr);
+ if (error)
+ break;
+ }
+
+exit_bl:
+ /* Exit bootloader mode */
+ dev_dbg(&client->dev, "Exiting bootloader mode\n");
+
+ error2 = mip4_bl_exit(ts);
+ if (error2) {
+ dev_err(&client->dev,
+ "Failed to exit bootloader mode: %d\n", error2);
+ if (!error)
+ error = error2;
+ }
+
+ /* Reset chip */
+ mip4_power_off(ts);
+ mip4_power_on(ts);
+
+ mip4_query_device(ts);
+
+ /* Refresh device parameters */
+ input_set_abs_params(ts->input, ABS_MT_POSITION_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->max_y, 0, 0);
+ input_set_abs_params(ts->input, ABS_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, ts->max_y, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->ppm_y);
+ input_abs_set_res(ts->input, ABS_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_Y, ts->ppm_y);
+
+ return error ? error : 0;
+}
+
+static int mip4_parse_firmware(struct mip4_ts *ts, const struct firmware *fw,
+ u32 *fw_offset_start, u32 *fw_size,
+ const struct mip4_bin_tail **pfw_info)
+{
+ const struct mip4_bin_tail *fw_info;
+ struct mip4_fw_version fw_version;
+ u16 tail_size;
+
+ if (fw->size < MIP4_BIN_TAIL_SIZE) {
+ dev_err(&ts->client->dev,
+ "Invalid firmware, size mismatch (tail %zd vs %zd)\n",
+ MIP4_BIN_TAIL_SIZE, fw->size);
+ return -EINVAL;
+ }
+
+ fw_info = (const void *)&fw->data[fw->size - MIP4_BIN_TAIL_SIZE];
+
+#if MIP4_FW_UPDATE_DEBUG
+ print_hex_dump(KERN_ERR, MIP4_DEVICE_NAME " Bin Info: ",
+ DUMP_PREFIX_OFFSET, 16, 1, *fw_info, tail_size, false);
+#endif
+
+ tail_size = get_unaligned_le16(&fw_info->tail_size);
+ if (tail_size != MIP4_BIN_TAIL_SIZE) {
+ dev_err(&ts->client->dev,
+ "wrong tail size: %d (expected %zd)\n",
+ tail_size, MIP4_BIN_TAIL_SIZE);
+ return -EINVAL;
+ }
+
+ /* Check bin format */
+ if (memcmp(fw_info->tail_mark, MIP4_BIN_TAIL_MARK,
+ sizeof(fw_info->tail_mark))) {
+ dev_err(&ts->client->dev,
+ "unable to locate tail marker (%*ph vs %*ph)\n",
+ (int)sizeof(fw_info->tail_mark), fw_info->tail_mark,
+ (int)sizeof(fw_info->tail_mark), MIP4_BIN_TAIL_MARK);
+ return -EINVAL;
+ }
+
+ *fw_offset_start = get_unaligned_le32(&fw_info->bin_start_addr);
+ *fw_size = get_unaligned_le32(&fw_info->bin_length);
+
+ dev_dbg(&ts->client->dev,
+ "F/W Data offset: %#08x, size: %d\n",
+ *fw_offset_start, *fw_size);
+
+ if (*fw_size % MIP4_BL_PAGE_SIZE) {
+ dev_err(&ts->client->dev,
+ "encoded fw length %d is not multiple of pages (%d)\n",
+ *fw_size, MIP4_BL_PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (fw->size != *fw_offset_start + *fw_size) {
+ dev_err(&ts->client->dev,
+ "Wrong firmware size, expected %d bytes, got %zd\n",
+ *fw_offset_start + *fw_size, fw->size);
+ return -EINVAL;
+ }
+
+ mip4_parse_fw_version((const u8 *)&fw_info->ver_boot, &fw_version);
+
+ dev_dbg(&ts->client->dev,
+ "F/W file version %04X %04X %04X %04X\n",
+ fw_version.boot, fw_version.core,
+ fw_version.app, fw_version.param);
+
+ dev_dbg(&ts->client->dev, "F/W chip version: %04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ /* Check F/W type */
+ if (fw_version.boot != 0xEEEE && fw_version.boot != 0xFFFF &&
+ fw_version.core == 0xEEEE &&
+ fw_version.app == 0xEEEE &&
+ fw_version.param == 0xEEEE) {
+ dev_dbg(&ts->client->dev, "F/W type: Bootloader\n");
+ } else if (fw_version.boot == 0xEEEE &&
+ fw_version.core != 0xEEEE && fw_version.core != 0xFFFF &&
+ fw_version.app != 0xEEEE && fw_version.app != 0xFFFF &&
+ fw_version.param != 0xEEEE && fw_version.param != 0xFFFF) {
+ dev_dbg(&ts->client->dev, "F/W type: Main\n");
+ } else {
+ dev_err(&ts->client->dev, "Wrong firmware type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mip4_execute_fw_update(struct mip4_ts *ts, const struct firmware *fw)
+{
+ const struct mip4_bin_tail *fw_info;
+ u32 fw_start_offset;
+ u32 fw_size;
+ int retires = 3;
+ int error;
+
+ error = mip4_parse_firmware(ts, fw,
+ &fw_start_offset, &fw_size, &fw_info);
+ if (error)
+ return error;
+
+ if (ts->input->users) {
+ disable_irq(ts->client->irq);
+ } else {
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+ }
+
+ /* Update firmware */
+ do {
+ error = mip4_flash_fw(ts, fw->data, fw_size, fw_start_offset);
+ if (!error)
+ break;
+ } while (--retires);
+
+ if (error)
+ dev_err(&ts->client->dev,
+ "Failed to flash firmware: %d\n", error);
+
+ /* Enable IRQ */
+ if (ts->input->users)
+ enable_irq(ts->client->irq);
+ else
+ mip4_power_off(ts);
+
+ return error ? error : 0;
+}
+
+static ssize_t mip4_sysfs_fw_update(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ const struct firmware *fw;
+ int error;
+
+ error = request_firmware(&fw, MIP4_FW_NAME, dev);
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Failed to retrieve firmware %s: %d\n",
+ MIP4_FW_NAME, error);
+ return error;
+ }
+
+ /*
+ * Take input mutex to prevent racing with itself and also with
+ * userspace opening and closing the device and also suspend/resume
+ * transitions.
+ */
+ mutex_lock(&ts->input->mutex);
+
+ error = mip4_execute_fw_update(ts, fw);
+
+ mutex_unlock(&ts->input->mutex);
+
+ release_firmware(fw);
+
+ if (error) {
+ dev_err(&ts->client->dev,
+ "Firmware update failed: %d\n", error);
+ return error;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(update_fw, S_IWUSR, NULL, mip4_sysfs_fw_update);
+
+static ssize_t mip4_sysfs_read_fw_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ /* Take lock to prevent racing with firmware update */
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%04X %04X %04X %04X\n",
+ ts->fw_version.boot, ts->fw_version.core,
+ ts->fw_version.app, ts->fw_version.param);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(fw_version, S_IRUGO, mip4_sysfs_read_fw_version, NULL);
+
+static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ /* Take lock to prevent racing with firmware update */
+ mutex_lock(&ts->input->mutex);
+
+ /*
+ * product_name shows the name or version of the hardware
+ * paired with current firmware in the chip.
+ */
+ count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(ts->product_name), ts->product_name);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+
+static struct attribute *mip4_attrs[] = {
+ &dev_attr_fw_version.attr,
+ &dev_attr_hw_version.attr,
+ &dev_attr_update_fw.attr,
+ NULL,
+};
+
+static const struct attribute_group mip4_attr_group = {
+ .attrs = mip4_attrs,
+};
+
+static void mip4_sysfs_remove(void *_data)
+{
+ struct mip4_ts *ts = _data;
+
+ sysfs_remove_group(&ts->client->dev.kobj, &mip4_attr_group);
+}
+
+static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct mip4_ts *ts;
+ struct input_dev *input;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Not supported I2C adapter\n");
+ return -ENXIO;
+ }
+
+ ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
+
+ ts->client = client;
+ ts->input = input;
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ ts->gpio_ce = devm_gpiod_get_optional(&client->dev,
+ "ce", GPIOD_OUT_LOW);
+ if (IS_ERR(ts->gpio_ce)) {
+ error = PTR_ERR(ts->gpio_ce);
+ if (error != EPROBE_DEFER)
+ dev_err(&client->dev,
+ "Failed to get gpio: %d\n", error);
+ return error;
+ }
+
+ error = mip4_power_on(ts);
+ if (error)
+ return error;
+ error = mip4_query_device(ts);
+ mip4_power_off(ts);
+ if (error)
+ return error;
+
+ input->name = "MELFAS MIP4 Touchscreen";
+ input->phys = ts->phys;
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x13c5;
+
+ input->open = mip4_input_open;
+ input->close = mip4_input_close;
+
+ input_set_drvdata(input, ts);
+
+ input->keycode = ts->key_code;
+ input->keycodesize = sizeof(*ts->key_code);
+ input->keycodemax = ts->key_num;
+
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, ts->max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, ts->max_y, 0, 0);
+ input_set_abs_params(input, ABS_MT_PRESSURE,
+ MIP4_PRESSURE_MIN, MIP4_PRESSURE_MAX, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,
+ MIP4_TOUCH_MAJOR_MIN, MIP4_TOUCH_MAJOR_MAX, 0, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR,
+ MIP4_TOUCH_MINOR_MIN, MIP4_TOUCH_MINOR_MAX, 0, 0);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->ppm_x);
+ input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->ppm_y);
+
+ error = input_mt_init_slots(input, MIP4_MAX_FINGERS, INPUT_MT_DIRECT);
+ if (error)
+ return error;
+
+ i2c_set_clientdata(client, ts);
+
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mip4_interrupt,
+ IRQF_ONESHOT, MIP4_DEVICE_NAME, ts);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to request interrupt %d: %d\n",
+ client->irq, error);
+ return error;
+ }
+
+ disable_irq(client->irq);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ error = sysfs_create_group(&client->dev.kobj, &mip4_attr_group);
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to create sysfs attribute group: %d\n", error);
+ return error;
+ }
+
+ error = devm_add_action(&client->dev, mip4_sysfs_remove, ts);
+ if (error) {
+ mip4_sysfs_remove(ts);
+ dev_err(&client->dev,
+ "Failed to install sysfs remoce action: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mip4_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(dev))
+ ts->wake_irq_enabled = enable_irq_wake(client->irq) == 0;
+ else if (input->users)
+ mip4_disable(ts);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused mip4_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+
+ mutex_lock(&input->mutex);
+
+ if (ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+ else if (input->users)
+ mip4_enable(ts);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mip4_pm_ops, mip4_suspend, mip4_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mip4_of_match[] = {
+ { .compatible = "melfas,"MIP4_DEVICE_NAME, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mip4_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id mip4_acpi_match[] = {
+ { "MLFS0000", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, mip4_acpi_match);
+#endif
+
+static const struct i2c_device_id mip4_i2c_ids[] = {
+ { MIP4_DEVICE_NAME, 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mip4_i2c_ids);
+
+static struct i2c_driver mip4_driver = {
+ .id_table = mip4_i2c_ids,
+ .probe = mip4_probe,
+ .driver = {
+ .name = MIP4_DEVICE_NAME,
+ .of_match_table = of_match_ptr(mip4_of_match),
+ .acpi_match_table = ACPI_PTR(mip4_acpi_match),
+ .pm = &mip4_pm_ops,
+ },
+};
+module_i2c_driver(mip4_driver);
+
+MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
+MODULE_VERSION("2016.03.12");
+MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index e414d43e5159..2a78e27b4495 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -63,6 +63,37 @@
#define STMPE_TS_NAME "stmpe-ts"
#define XY_MASK 0xfff
+/**
+ * struct stmpe_touch - stmpe811 touch screen controller state
+ * @stmpe: pointer back to STMPE MFD container
+ * @idev: registered input device
+ * @work: a work item used to scan the device
+ * @dev: a pointer back to the MFD cell struct device*
+ * @sample_time: ADC converstion time in number of clock.
+ * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
+ * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
+ * recommended is 4.
+ * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
+ * @ref_sel: ADC reference source
+ * (0 -> internal reference, 1 -> external reference)
+ * @adc_freq: ADC Clock speed
+ * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
+ * @ave_ctrl: Sample average control
+ * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
+ * @touch_det_delay: Touch detect interrupt delay
+ * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
+ * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
+ * recommended is 3
+ * @settling: Panel driver settling time
+ * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
+ * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
+ * recommended is 2
+ * @fraction_z: Length of the fractional part in z
+ * (fraction_z ([0..7]) = Count of the fractional part)
+ * recommended is 7
+ * @i_drive: current limit value of the touchscreen drivers
+ * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
+ */
struct stmpe_touch {
struct stmpe *stmpe;
struct input_dev *idev;
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index b6c4d03de340..880c40b23f66 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -197,28 +197,34 @@ static int sur40_command(struct sur40_state *dev,
static int sur40_init(struct sur40_state *dev)
{
int result;
- u8 buffer[24];
+ u8 *buffer;
+
+ buffer = kmalloc(24, GFP_KERNEL);
+ if (!buffer) {
+ result = -ENOMEM;
+ goto error;
+ }
/* stupidly replay the original MS driver init sequence */
result = sur40_command(dev, SUR40_GET_VERSION, 0x00, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x01, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x02, buffer, 12);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5);
if (result < 0)
- return result;
+ goto error;
result = sur40_command(dev, SUR40_GET_VERSION, 0x03, buffer, 12);
@@ -226,7 +232,8 @@ static int sur40_init(struct sur40_state *dev)
* Discard the result buffer - no known data inside except
* some version strings, maybe extract these sometime...
*/
-
+error:
+ kfree(buffer);
return result;
}
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 515c20a6e10f..73861ad22df4 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -848,7 +848,7 @@ static int wdt87xx_do_update_firmware(struct i2c_client *client,
error = wdt87xx_get_sysparam(client, &wdt->param);
if (error)
dev_err(&client->dev,
- "failed to refresh system paramaters: %d\n", error);
+ "failed to refresh system parameters: %d\n", error);
out:
enable_irq(client->irq);
mutex_unlock(&wdt->fw_mutex);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index a1e75cba18e0..dd1dc39f84ff 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -39,6 +39,25 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
If unsure, say N here.
+config IOMMU_IO_PGTABLE_ARMV7S
+ bool "ARMv7/v8 Short Descriptor Format"
+ select IOMMU_IO_PGTABLE
+ depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
+ help
+ Enable support for the ARM Short-descriptor pagetable format.
+ This supports 32-bit virtual and physical addresses mapped using
+ 2-level tables with 4KB pages/1MB sections, and contiguous entries
+ for 64KB pages/16MB supersections if indicated by the IOMMU driver.
+
+config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
+ bool "ARMv7s selftests"
+ depends on IOMMU_IO_PGTABLE_ARMV7S
+ help
+ Enable self-tests for ARMv7s page table allocator. This performs
+ a series of page-table consistency checks during boot.
+
+ If unsure, say N here.
+
endmenu
config IOMMU_IOVA
@@ -51,9 +70,9 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA
bool
- depends on NEED_SG_DMA_LENGTH
select IOMMU_API
select IOMMU_IOVA
+ select NEED_SG_DMA_LENGTH
config FSL_PAMU
bool "Freescale IOMMU support"
@@ -243,7 +262,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && ARM && MMU
+ depends on ARCH_EXYNOS && MMU
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
@@ -266,7 +285,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARM_LPAE
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -318,4 +337,21 @@ config S390_IOMMU
help
Support for the IOMMU API for s390 PCI devices.
+config MTK_IOMMU
+ bool "MTK IOMMU Support"
+ depends on ARM || ARM64
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select ARM_DMA_USE_IOMMU
+ select IOMMU_API
+ select IOMMU_DMA
+ select IOMMU_IO_PGTABLE_ARMV7S
+ select MEMORY
+ select MTK_SMI
+ help
+ Support for the M4U on certain Mediatek SOCs. M4U is MultiMedia
+ Memory Management Unit. This option enables remapping of DMA memory
+ accesses for the multimedia subsystem.
+
+ If unsure, say N here.
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 42fc0c25cf1a..c6edb31bf8c6 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
@@ -16,6 +17,7 @@ obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
+obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index c865737326e1..56999d2fac07 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -526,6 +526,7 @@ static void do_fault(struct work_struct *work)
flags |= FAULT_FLAG_USER;
if (fault->flags & PPR_FAULT_WRITE)
flags |= FAULT_FLAG_WRITE;
+ flags |= FAULT_FLAG_REMOTE;
down_read(&mm->mmap_sem);
vma = find_extend_vma(mm, address);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 20875341c865..4ff73ff64e49 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -21,6 +21,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-iommu.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
@@ -1396,7 +1397,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
/*
@@ -1408,6 +1409,12 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&smmu_domain->domain)) {
+ kfree(smmu_domain);
+ return NULL;
+ }
+
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
return &smmu_domain->domain;
@@ -1436,6 +1443,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ iommu_put_dma_cookie(domain);
free_io_pgtable_ops(smmu_domain->pgtbl_ops);
/* Free the CD and ASID, if we allocated them */
@@ -1630,6 +1638,17 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
return 0;
}
+static void arm_smmu_detach_dev(struct device *dev)
+{
+ struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
+
+ smmu_group->ste.bypass = true;
+ if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+ dev_warn(dev, "failed to install bypass STE\n");
+
+ smmu_group->domain = NULL;
+}
+
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -1642,7 +1661,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Already attached to a different domain? */
if (smmu_group->domain && smmu_group->domain != smmu_domain)
- return -EEXIST;
+ arm_smmu_detach_dev(dev);
smmu = smmu_group->smmu;
mutex_lock(&smmu_domain->init_mutex);
@@ -1668,7 +1687,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock;
smmu_group->domain = smmu_domain;
- smmu_group->ste.bypass = false;
+
+ /*
+ * FIXME: This should always be "false" once we have IOMMU-backed
+ * DMA ops for all devices behind the SMMU.
+ */
+ smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
ret = arm_smmu_install_ste_for_group(smmu_group);
if (IS_ERR_VALUE(ret))
@@ -1679,25 +1703,6 @@ out_unlock:
return ret;
}
-static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
-
- BUG_ON(!smmu_domain);
- BUG_ON(!smmu_group);
-
- mutex_lock(&smmu_domain->init_mutex);
- BUG_ON(smmu_group->domain != smmu_domain);
-
- smmu_group->ste.bypass = true;
- if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
- dev_warn(dev, "failed to install bypass STE\n");
-
- smmu_group->domain = NULL;
- mutex_unlock(&smmu_domain->init_mutex);
-}
-
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
@@ -1935,7 +1940,6 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.iova_to_phys = arm_smmu_iova_to_phys,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 59ee4b8a3236..2409e3bd3df2 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -29,6 +29,7 @@
#define pr_fmt(fmt) "arm-smmu: " fmt
#include <linux/delay.h>
+#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -167,6 +168,9 @@
#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
+
/* Context bank attribute registers */
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
@@ -257,9 +261,13 @@
#define FSYNR0_WNR (1 << 4)
static int force_stage;
-module_param_named(force_stage, force_stage, int, S_IRUGO);
+module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
+static bool disable_bypass;
+module_param(disable_bypass, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_bypass,
+ "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
enum arm_smmu_arch_version {
ARM_SMMU_V1 = 1,
@@ -963,7 +971,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
- if (type != IOMMU_DOMAIN_UNMANAGED)
+ if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
@@ -974,6 +982,12 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
if (!smmu_domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA &&
+ iommu_get_dma_cookie(&smmu_domain->domain)) {
+ kfree(smmu_domain);
+ return NULL;
+ }
+
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
@@ -988,6 +1002,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have
* already been detached.
*/
+ iommu_put_dma_cookie(domain);
arm_smmu_destroy_domain_context(domain);
kfree(smmu_domain);
}
@@ -1079,11 +1094,18 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
if (ret)
return ret == -EEXIST ? 0 : ret;
+ /*
+ * FIXME: This won't be needed once we have IOMMU-backed DMA ops
+ * for all devices behind the SMMU.
+ */
+ if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
+ return 0;
+
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx, s2cr;
idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- s2cr = S2CR_TYPE_TRANS |
+ s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
(smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1108,14 +1130,24 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
*/
for (i = 0; i < cfg->num_streamids; ++i) {
u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+ u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
- writel_relaxed(S2CR_TYPE_BYPASS,
- gr0_base + ARM_SMMU_GR0_S2CR(idx));
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
arm_smmu_master_free_smrs(smmu, cfg);
}
+static void arm_smmu_detach_dev(struct device *dev,
+ struct arm_smmu_master_cfg *cfg)
+{
+ struct iommu_domain *domain = dev->archdata.iommu;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ dev->archdata.iommu = NULL;
+ arm_smmu_domain_remove_master(smmu_domain, cfg);
+}
+
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
@@ -1129,11 +1161,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
- if (dev->archdata.iommu) {
- dev_err(dev, "already attached to IOMMU domain\n");
- return -EEXIST;
- }
-
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (IS_ERR_VALUE(ret))
@@ -1155,25 +1182,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!cfg)
return -ENODEV;
+ /* Detach the dev from its current domain */
+ if (dev->archdata.iommu)
+ arm_smmu_detach_dev(dev, cfg);
+
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (!ret)
dev->archdata.iommu = domain;
return ret;
}
-static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_master_cfg *cfg;
-
- cfg = find_smmu_master_cfg(dev);
- if (!cfg)
- return;
-
- dev->archdata.iommu = NULL;
- arm_smmu_domain_remove_master(smmu_domain, cfg);
-}
-
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
@@ -1449,7 +1467,6 @@ static struct iommu_ops arm_smmu_ops = {
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
@@ -1473,11 +1490,11 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
- /* Mark all SMRn as invalid and all S2CRn as bypass */
+ /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
+ reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
for (i = 0; i < smmu->num_mapping_groups; ++i) {
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
- writel_relaxed(S2CR_TYPE_BYPASS,
- gr0_base + ARM_SMMU_GR0_S2CR(i));
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
}
/* Make sure all context banks are disabled and clear CB_FSR */
@@ -1499,8 +1516,12 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Disable TLB broadcasting. */
reg |= (sCR0_VMIDPNE | sCR0_PTM);
- /* Enable client access, but bypass when no mapping is found */
- reg &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+ /* Enable client access, handling unmatched streams as appropriate */
+ reg &= ~sCR0_CLIENTPD;
+ if (disable_bypass)
+ reg |= sCR0_USFCFG;
+ else
+ reg &= ~sCR0_USFCFG;
/* Disable forced broadcasting */
reg &= ~sCR0_FB;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 72d6182666cb..58f2fe687a24 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int s_length = sg_dma_len(s);
unsigned int s_dma_len = s->length;
- s->offset = s_offset;
+ s->offset += s_offset;
s->length = s_length;
sg_dma_address(s) = dma_addr + s_offset;
dma_addr += s_dma_len;
@@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
for_each_sg(sg, s, nents, i) {
if (sg_dma_address(s) != DMA_ERROR_CODE)
- s->offset = sg_dma_address(s);
+ s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
sg_dma_address(s) = DMA_ERROR_CODE;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 97c41b8ab5d9..5ecc86cb74c8 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1,6 +1,5 @@
-/* linux/drivers/iommu/exynos_iommu.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
@@ -25,10 +24,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-
-#include <asm/cacheflush.h>
-#include <asm/dma-iommu.h>
-#include <asm/pgtable.h>
+#include <linux/dma-iommu.h>
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
@@ -58,17 +54,25 @@ typedef u32 sysmmu_pte_t;
#define lv2ent_small(pent) ((*(pent) & 2) == 2)
#define lv2ent_large(pent) ((*(pent) & 3) == 1)
-static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
-{
- return iova & (size - 1);
-}
-
-#define section_phys(sent) (*(sent) & SECT_MASK)
-#define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
-#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
-#define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
-#define spage_phys(pent) (*(pent) & SPAGE_MASK)
-#define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
+/*
+ * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
+ * v5.0 introduced support for 36bit physical address space by shifting
+ * all page entry values by 4 bits.
+ * All SYSMMU controllers in the system support the address spaces of the same
+ * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
+ * value (0 or 4).
+ */
+static short PG_ENT_SHIFT = -1;
+#define SYSMMU_PG_ENT_SHIFT 0
+#define SYSMMU_V5_PG_ENT_SHIFT 4
+
+#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
+#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
+#define section_offs(iova) (iova & (SECT_SIZE - 1))
+#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
+#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
+#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
+#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
#define NUM_LV1ENTRIES 4096
#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
@@ -83,16 +87,16 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
}
+#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
-#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
-
-#define mk_lv1ent_sect(pa) ((pa) | 2)
-#define mk_lv1ent_page(pa) ((pa) | 1)
-#define mk_lv2ent_lpage(pa) ((pa) | 1)
-#define mk_lv2ent_spage(pa) ((pa) | 2)
+#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
+#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
+#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
#define CTRL_ENABLE 0x5
#define CTRL_BLOCK 0x7
@@ -100,14 +104,23 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define CFG_LRU 0x1
#define CFG_QOS(n) ((n & 0xF) << 7)
-#define CFG_MASK 0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
+/* common registers */
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
+#define REG_MMU_VERSION 0x034
+
+#define MMU_MAJ_VER(val) ((val) >> 7)
+#define MMU_MIN_VER(val) ((val) & 0x7F)
+#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
+
+#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
+
+/* v1.x - v3.x registers */
#define REG_MMU_FLUSH 0x00C
#define REG_MMU_FLUSH_ENTRY 0x010
#define REG_PT_BASE_ADDR 0x014
@@ -119,21 +132,18 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define REG_AR_FAULT_ADDR 0x02C
#define REG_DEFAULT_SLAVE_ADDR 0x030
-#define REG_MMU_VERSION 0x034
-
-#define MMU_MAJ_VER(val) ((val) >> 7)
-#define MMU_MIN_VER(val) ((val) & 0x7F)
-#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
-
-#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
-
-#define REG_PB0_SADDR 0x04C
-#define REG_PB0_EADDR 0x050
-#define REG_PB1_SADDR 0x054
-#define REG_PB1_EADDR 0x058
+/* v5.x registers */
+#define REG_V5_PT_BASE_PFN 0x00C
+#define REG_V5_MMU_FLUSH_ALL 0x010
+#define REG_V5_MMU_FLUSH_ENTRY 0x014
+#define REG_V5_INT_STATUS 0x060
+#define REG_V5_INT_CLEAR 0x064
+#define REG_V5_FAULT_AR_VA 0x070
+#define REG_V5_FAULT_AW_VA 0x080
#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
+static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache;
static sysmmu_pte_t *zero_lv2_table;
#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
@@ -149,40 +159,38 @@ static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
lv2table_base(sent)) + lv2ent_offset(iova);
}
-enum exynos_sysmmu_inttype {
- SYSMMU_PAGEFAULT,
- SYSMMU_AR_MULTIHIT,
- SYSMMU_AW_MULTIHIT,
- SYSMMU_BUSERROR,
- SYSMMU_AR_SECURITY,
- SYSMMU_AR_ACCESS,
- SYSMMU_AW_SECURITY,
- SYSMMU_AW_PROTECTION, /* 7 */
- SYSMMU_FAULT_UNKNOWN,
- SYSMMU_FAULTS_NUM
+/*
+ * IOMMU fault information register
+ */
+struct sysmmu_fault_info {
+ unsigned int bit; /* bit number in STATUS register */
+ unsigned short addr_reg; /* register to read VA fault address */
+ const char *name; /* human readable fault name */
+ unsigned int type; /* fault type for report_iommu_fault */
};
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
- REG_PAGE_FAULT_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AW_FAULT_ADDR,
- REG_DEFAULT_SLAVE_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AR_FAULT_ADDR,
- REG_AW_FAULT_ADDR,
- REG_AW_FAULT_ADDR
+static const struct sysmmu_fault_info sysmmu_faults[] = {
+ { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
+ { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
+ { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
+ { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
+ { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
+ { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
+ { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
+ { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
};
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
- "PAGE FAULT",
- "AR MULTI-HIT FAULT",
- "AW MULTI-HIT FAULT",
- "BUS ERROR",
- "AR SECURITY PROTECTION FAULT",
- "AR ACCESS PROTECTION FAULT",
- "AW SECURITY PROTECTION FAULT",
- "AW ACCESS PROTECTION FAULT",
- "UNKNOWN FAULT"
+static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
+ { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
+ { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
+ { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
+ { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
+ { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
+ { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
+ { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
+ { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
+ { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
+ { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
};
/*
@@ -193,6 +201,7 @@ static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
*/
struct exynos_iommu_owner {
struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
+ struct iommu_domain *domain; /* domain this device is attached */
};
/*
@@ -221,6 +230,8 @@ struct sysmmu_drvdata {
struct device *master; /* master device (owner) */
void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */
+ struct clk *aclk; /* SYSMMU's aclk clock */
+ struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
int activations; /* number of calls to sysmmu_enable */
spinlock_t lock; /* lock for modyfying state */
@@ -255,70 +266,101 @@ static bool is_sysmmu_active(struct sysmmu_drvdata *data)
return data->activations > 0;
}
-static void sysmmu_unblock(void __iomem *sfrbase)
+static void sysmmu_unblock(struct sysmmu_drvdata *data)
{
- __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+ writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
}
-static bool sysmmu_block(void __iomem *sfrbase)
+static bool sysmmu_block(struct sysmmu_drvdata *data)
{
int i = 120;
- __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
- while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
--i;
- if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
- sysmmu_unblock(sfrbase);
+ if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
+ sysmmu_unblock(data);
return false;
}
return true;
}
-static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
{
- __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel(0x1, data->sfrbase + REG_MMU_FLUSH);
+ else
+ writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
}
-static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
sysmmu_iova_t iova, unsigned int num_inv)
{
unsigned int i;
for (i = 0; i < num_inv; i++) {
- __raw_writel((iova & SPAGE_MASK) | 1,
- sfrbase + REG_MMU_FLUSH_ENTRY);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel((iova & SPAGE_MASK) | 1,
+ data->sfrbase + REG_MMU_FLUSH_ENTRY);
+ else
+ writel((iova & SPAGE_MASK) | 1,
+ data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
iova += SPAGE_SIZE;
}
}
-static void __sysmmu_set_ptbase(void __iomem *sfrbase,
- phys_addr_t pgd)
+static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
{
- __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+ if (MMU_MAJ_VER(data->version) < 5)
+ writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
+ else
+ writel(pgd >> PAGE_SHIFT,
+ data->sfrbase + REG_V5_PT_BASE_PFN);
- __sysmmu_tlb_invalidate(sfrbase);
+ __sysmmu_tlb_invalidate(data);
}
-static void show_fault_information(const char *name,
- enum exynos_sysmmu_inttype itype,
- phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
+static void __sysmmu_get_version(struct sysmmu_drvdata *data)
{
- sysmmu_pte_t *ent;
+ u32 ver;
+
+ clk_enable(data->clk_master);
+ clk_enable(data->clk);
+ clk_enable(data->pclk);
+ clk_enable(data->aclk);
+
+ ver = readl(data->sfrbase + REG_MMU_VERSION);
- if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
- itype = SYSMMU_FAULT_UNKNOWN;
+ /* controllers on some SoCs don't report proper version */
+ if (ver == 0x80000001u)
+ data->version = MAKE_MMU_VER(1, 0);
+ else
+ data->version = MMU_RAW_VER(ver);
+
+ dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
+ MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
- pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
- sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
+ clk_disable(data->aclk);
+ clk_disable(data->pclk);
+ clk_disable(data->clk);
+ clk_disable(data->clk_master);
+}
- ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
- pr_err("\tLv1 entry: %#x\n", *ent);
+static void show_fault_information(struct sysmmu_drvdata *data,
+ const struct sysmmu_fault_info *finfo,
+ sysmmu_iova_t fault_addr)
+{
+ sysmmu_pte_t *ent;
+ dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
+ finfo->name, fault_addr, &data->pgtable);
+ ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
+ dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- pr_err("\t Lv2 entry: %#x\n", *ent);
+ dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
}
}
@@ -326,49 +368,52 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
/* SYSMMU is in blocked state when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- enum exynos_sysmmu_inttype itype;
- sysmmu_iova_t addr = -1;
+ const struct sysmmu_fault_info *finfo;
+ unsigned int i, n, itype;
+ sysmmu_iova_t fault_addr = -1;
+ unsigned short reg_status, reg_clear;
int ret = -ENOSYS;
WARN_ON(!is_sysmmu_active(data));
+ if (MMU_MAJ_VER(data->version) < 5) {
+ reg_status = REG_INT_STATUS;
+ reg_clear = REG_INT_CLEAR;
+ finfo = sysmmu_faults;
+ n = ARRAY_SIZE(sysmmu_faults);
+ } else {
+ reg_status = REG_V5_INT_STATUS;
+ reg_clear = REG_V5_INT_CLEAR;
+ finfo = sysmmu_v5_faults;
+ n = ARRAY_SIZE(sysmmu_v5_faults);
+ }
+
spin_lock(&data->lock);
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
- itype = (enum exynos_sysmmu_inttype)
- __ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
- if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
- itype = SYSMMU_FAULT_UNKNOWN;
- else
- addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
+ itype = __ffs(readl(data->sfrbase + reg_status));
+ for (i = 0; i < n; i++, finfo++)
+ if (finfo->bit == itype)
+ break;
+ /* unknown/unsupported fault */
+ BUG_ON(i == n);
- if (itype == SYSMMU_FAULT_UNKNOWN) {
- pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
- __func__, dev_name(data->sysmmu));
- pr_err("%s: Please check if IRQ is correctly configured.\n",
- __func__);
- BUG();
- } else {
- unsigned int base =
- __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
- show_fault_information(dev_name(data->sysmmu),
- itype, base, addr);
- if (data->domain)
- ret = report_iommu_fault(&data->domain->domain,
- data->master, addr, itype);
- }
+ /* print debug message */
+ fault_addr = readl(data->sfrbase + finfo->addr_reg);
+ show_fault_information(data, finfo, fault_addr);
+ if (data->domain)
+ ret = report_iommu_fault(&data->domain->domain,
+ data->master, fault_addr, finfo->type);
/* fault is not recovered by fault handler */
BUG_ON(ret != 0);
- __raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
+ writel(1 << itype, data->sfrbase + reg_clear);
- sysmmu_unblock(data->sfrbase);
+ sysmmu_unblock(data);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
spin_unlock(&data->lock);
@@ -377,15 +422,15 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
{
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
- __raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
- __raw_writel(0, data->sfrbase + REG_MMU_CFG);
+ writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+ writel(0, data->sfrbase + REG_MMU_CFG);
+ clk_disable(data->aclk);
+ clk_disable(data->pclk);
clk_disable(data->clk);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static bool __sysmmu_disable(struct sysmmu_drvdata *data)
@@ -416,42 +461,34 @@ static bool __sysmmu_disable(struct sysmmu_drvdata *data)
static void __sysmmu_init_config(struct sysmmu_drvdata *data)
{
- unsigned int cfg = CFG_LRU | CFG_QOS(15);
- unsigned int ver;
-
- ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
- if (MMU_MAJ_VER(ver) == 3) {
- if (MMU_MIN_VER(ver) >= 2) {
- cfg |= CFG_FLPDCACHE;
- if (MMU_MIN_VER(ver) == 3) {
- cfg |= CFG_ACGEN;
- cfg &= ~CFG_LRU;
- } else {
- cfg |= CFG_SYSSEL;
- }
- }
- }
+ unsigned int cfg;
- __raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
- data->version = ver;
+ if (data->version <= MAKE_MMU_VER(3, 1))
+ cfg = CFG_LRU | CFG_QOS(15);
+ else if (data->version <= MAKE_MMU_VER(3, 2))
+ cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
+ else
+ cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
+
+ writel(cfg, data->sfrbase + REG_MMU_CFG);
}
static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
{
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
clk_enable(data->clk);
+ clk_enable(data->pclk);
+ clk_enable(data->aclk);
- __raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
+ writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
__sysmmu_init_config(data);
- __sysmmu_set_ptbase(data->sfrbase, data->pgtable);
+ __sysmmu_set_ptbase(data, data->pgtable);
- __raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+ writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
@@ -482,28 +519,21 @@ static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
return ret;
}
-static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
- sysmmu_iova_t iova)
-{
- if (data->version == MAKE_MMU_VER(3, 3))
- __raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
-}
-
static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
sysmmu_iova_t iova)
{
unsigned long flags;
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
spin_lock_irqsave(&data->lock, flags);
- if (is_sysmmu_active(data))
- __sysmmu_tlb_invalidate_flpdcache(data, iova);
+ if (is_sysmmu_active(data)) {
+ if (data->version >= MAKE_MMU_VER(3, 3))
+ __sysmmu_tlb_invalidate_entry(data, iova, 1);
+ }
spin_unlock_irqrestore(&data->lock, flags);
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
}
static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -515,8 +545,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
if (is_sysmmu_active(data)) {
unsigned int num_inv = 1;
- if (!IS_ERR(data->clk_master))
- clk_enable(data->clk_master);
+ clk_enable(data->clk_master);
/*
* L2TLB invalidation required
@@ -531,13 +560,11 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
if (MMU_MAJ_VER(data->version) == 2)
num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
- if (sysmmu_block(data->sfrbase)) {
- __sysmmu_tlb_invalidate_entry(
- data->sfrbase, iova, num_inv);
- sysmmu_unblock(data->sfrbase);
+ if (sysmmu_block(data)) {
+ __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
+ sysmmu_unblock(data);
}
- if (!IS_ERR(data->clk_master))
- clk_disable(data->clk_master);
+ clk_disable(data->clk_master);
} else {
dev_dbg(data->master,
"disabled. Skipping TLB invalidation @ %#x\n", iova);
@@ -575,25 +602,52 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
}
data->clk = devm_clk_get(dev, "sysmmu");
- if (IS_ERR(data->clk)) {
- dev_err(dev, "Failed to get clock!\n");
- return PTR_ERR(data->clk);
- } else {
+ if (!IS_ERR(data->clk)) {
ret = clk_prepare(data->clk);
if (ret) {
dev_err(dev, "Failed to prepare clk\n");
return ret;
}
+ } else {
+ data->clk = NULL;
+ }
+
+ data->aclk = devm_clk_get(dev, "aclk");
+ if (!IS_ERR(data->aclk)) {
+ ret = clk_prepare(data->aclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare aclk\n");
+ return ret;
+ }
+ } else {
+ data->aclk = NULL;
+ }
+
+ data->pclk = devm_clk_get(dev, "pclk");
+ if (!IS_ERR(data->pclk)) {
+ ret = clk_prepare(data->pclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare pclk\n");
+ return ret;
+ }
+ } else {
+ data->pclk = NULL;
+ }
+
+ if (!data->clk && (!data->aclk || !data->pclk)) {
+ dev_err(dev, "Failed to get device clock(s)!\n");
+ return -ENOSYS;
}
data->clk_master = devm_clk_get(dev, "master");
if (!IS_ERR(data->clk_master)) {
ret = clk_prepare(data->clk_master);
if (ret) {
- clk_unprepare(data->clk);
dev_err(dev, "Failed to prepare master's clk\n");
return ret;
}
+ } else {
+ data->clk_master = NULL;
}
data->sysmmu = dev;
@@ -601,6 +655,14 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ __sysmmu_get_version(data);
+ if (PG_ENT_SHIFT < 0) {
+ if (MMU_MAJ_VER(data->version) < 5)
+ PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
+ else
+ PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+ }
+
pm_runtime_enable(dev);
return 0;
@@ -650,28 +712,38 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
}
};
-static inline void pgtable_flush(void *vastart, void *vaend)
+static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{
- dmac_flush_range(vastart, vaend);
- outer_flush_range(virt_to_phys(vastart),
- virt_to_phys(vaend));
+ dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
+ DMA_TO_DEVICE);
+ *ent = val;
+ dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
+ DMA_TO_DEVICE);
}
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{
struct exynos_iommu_domain *domain;
+ dma_addr_t handle;
int i;
- if (type != IOMMU_DOMAIN_UNMANAGED)
- return NULL;
+ /* Check if correct PTE offsets are initialized */
+ BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
+ if (type == IOMMU_DOMAIN_DMA) {
+ if (iommu_get_dma_cookie(&domain->domain) != 0)
+ goto err_pgtable;
+ } else if (type != IOMMU_DOMAIN_UNMANAGED) {
+ goto err_pgtable;
+ }
+
domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!domain->pgtable)
- goto err_pgtable;
+ goto err_dma_cookie;
domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!domain->lv2entcnt)
@@ -689,7 +761,10 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
domain->pgtable[i + 7] = ZERO_LV2LINK;
}
- pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
+ handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
+ DMA_TO_DEVICE);
+ /* For mapping page table entries we rely on dma == phys */
+ BUG_ON(handle != virt_to_phys(domain->pgtable));
spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock);
@@ -703,6 +778,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
+err_dma_cookie:
+ if (type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(&domain->domain);
err_pgtable:
kfree(domain);
return NULL;
@@ -727,16 +805,62 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
spin_unlock_irqrestore(&domain->lock, flags);
+ if (iommu_domain->type == IOMMU_DOMAIN_DMA)
+ iommu_put_dma_cookie(iommu_domain);
+
+ dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
+ DMA_TO_DEVICE);
+
for (i = 0; i < NUM_LV1ENTRIES; i++)
- if (lv1ent_page(domain->pgtable + i))
+ if (lv1ent_page(domain->pgtable + i)) {
+ phys_addr_t base = lv2table_base(domain->pgtable + i);
+
+ dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
+ DMA_TO_DEVICE);
kmem_cache_free(lv2table_kmem_cache,
- phys_to_virt(lv2table_base(domain->pgtable + i)));
+ phys_to_virt(base));
+ }
free_pages((unsigned long)domain->pgtable, 2);
free_pages((unsigned long)domain->lv2entcnt, 1);
kfree(domain);
}
+static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
+ struct device *dev)
+{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ phys_addr_t pagetable = virt_to_phys(domain->pgtable);
+ struct sysmmu_drvdata *data, *next;
+ unsigned long flags;
+ bool found = false;
+
+ if (!has_sysmmu(dev) || owner->domain != iommu_domain)
+ return;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
+ if (data->master == dev) {
+ if (__sysmmu_disable(data)) {
+ data->master = NULL;
+ list_del_init(&data->domain_node);
+ }
+ pm_runtime_put(data->sysmmu);
+ found = true;
+ }
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ owner->domain = NULL;
+
+ if (found)
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
+ __func__, &pagetable);
+ else
+ dev_err(dev, "%s: No IOMMU is attached\n", __func__);
+}
+
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
struct device *dev)
{
@@ -750,6 +874,9 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
if (!has_sysmmu(dev))
return -ENODEV;
+ if (owner->domain)
+ exynos_iommu_detach_device(owner->domain, dev);
+
list_for_each_entry(data, &owner->controllers, owner_node) {
pm_runtime_get_sync(data->sysmmu);
ret = __sysmmu_enable(data, pagetable, domain);
@@ -768,44 +895,13 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
return ret;
}
+ owner->domain = iommu_domain;
dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
__func__, &pagetable, (ret == 0) ? "" : ", again");
return ret;
}
-static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
- struct device *dev)
-{
- struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
- phys_addr_t pagetable = virt_to_phys(domain->pgtable);
- struct sysmmu_drvdata *data, *next;
- unsigned long flags;
- bool found = false;
-
- if (!has_sysmmu(dev))
- return;
-
- spin_lock_irqsave(&domain->lock, flags);
- list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
- if (data->master == dev) {
- if (__sysmmu_disable(data)) {
- data->master = NULL;
- list_del_init(&data->domain_node);
- }
- pm_runtime_put(data->sysmmu);
- found = true;
- }
- }
- spin_unlock_irqrestore(&domain->lock, flags);
-
- if (found)
- dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
- __func__, &pagetable);
- else
- dev_err(dev, "%s: No IOMMU is attached\n", __func__);
-}
-
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
{
@@ -819,15 +915,14 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
bool need_flush_flpd_cache = lv1ent_zero(sent);
pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
- BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
+ BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
if (!pent)
return ERR_PTR(-ENOMEM);
- *sent = mk_lv1ent_page(virt_to_phys(pent));
+ update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
- pgtable_flush(pent, pent + NUM_LV2ENTRIES);
- pgtable_flush(sent, sent + 1);
+ dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
/*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -880,9 +975,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0;
}
- *sent = mk_lv1ent_sect(paddr);
-
- pgtable_flush(sent, sent + 1);
+ update_pte(sent, mk_lv1ent_sect(paddr));
spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) {
@@ -906,12 +999,15 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE;
- *pent = mk_lv2ent_spage(paddr);
- pgtable_flush(pent, pent + 1);
+ update_pte(pent, mk_lv2ent_spage(paddr));
*pgcnt -= 1;
} else { /* size == LPAGE_SIZE */
int i;
+ dma_addr_t pent_base = virt_to_phys(pent);
+ dma_sync_single_for_cpu(dma_dev, pent_base,
+ sizeof(*pent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
if (WARN_ON(!lv2ent_fault(pent))) {
if (i > 0)
@@ -921,7 +1017,9 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*pent = mk_lv2ent_lpage(paddr);
}
- pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+ dma_sync_single_for_device(dma_dev, pent_base,
+ sizeof(*pent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
*pgcnt -= SPAGES_PER_LPAGE;
}
@@ -1031,8 +1129,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
/* workaround for h/w bug in System MMU v3.3 */
- *ent = ZERO_LV2LINK;
- pgtable_flush(ent, ent + 1);
+ update_pte(ent, ZERO_LV2LINK);
size = SECT_SIZE;
goto done;
}
@@ -1053,9 +1150,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
}
if (lv2ent_small(ent)) {
- *ent = 0;
+ update_pte(ent, 0);
size = SPAGE_SIZE;
- pgtable_flush(ent, ent + 1);
domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done;
}
@@ -1066,9 +1162,13 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
goto err;
}
+ dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
+ sizeof(*ent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
- pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
-
+ dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
+ sizeof(*ent) * SPAGES_PER_LPAGE,
+ DMA_TO_DEVICE);
size = LPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done:
@@ -1114,28 +1214,32 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
return phys;
}
+static struct iommu_group *get_device_iommu_group(struct device *dev)
+{
+ struct iommu_group *group;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ group = iommu_group_alloc();
+
+ return group;
+}
+
static int exynos_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- int ret;
if (!has_sysmmu(dev))
return -ENODEV;
- group = iommu_group_get(dev);
+ group = iommu_group_get_for_dev(dev);
- if (!group) {
- group = iommu_group_alloc();
- if (IS_ERR(group)) {
- dev_err(dev, "Failed to allocate IOMMU group\n");
- return PTR_ERR(group);
- }
- }
+ if (IS_ERR(group))
+ return PTR_ERR(group);
- ret = iommu_group_add_device(group, dev);
iommu_group_put(group);
- return ret;
+ return 0;
}
static void exynos_iommu_remove_device(struct device *dev)
@@ -1182,6 +1286,7 @@ static struct iommu_ops exynos_iommu_ops = {
.unmap = exynos_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
+ .device_group = get_device_iommu_group,
.add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
@@ -1245,6 +1350,13 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
if (IS_ERR(pdev))
return PTR_ERR(pdev);
+ /*
+ * use the first registered sysmmu device for performing
+ * dma mapping operations on iommu page tables (cpu cache flush)
+ */
+ if (!dma_dev)
+ dma_dev = &pdev->dev;
+
of_iommu_set_ops(np, &exynos_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a2e1b7f14df2..e1852e845d21 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
}
/* register PCI DMA alias device */
- if (req_id != dma_alias && dev_is_pci(dev)) {
+ if (dev_is_pci(dev) && req_id != dma_alias) {
tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
dma_alias & 0xff, NULL, domain);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
new file mode 100644
index 000000000000..9488e3c97bcb
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -0,0 +1,846 @@
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * ARMv7 Short-descriptor format, supporting
+ * - Basic memory attributes
+ * - Simplified access permissions (AP[2:1] model)
+ * - Backwards-compatible TEX remap
+ * - Large pages/supersections (if indicated by the caller)
+ *
+ * Not supporting:
+ * - Legacy access permissions (AP[2:0] model)
+ *
+ * Almost certainly never supporting:
+ * - PXN
+ * - Domains
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014-2015 ARM Limited
+ * Copyright (c) 2014-2015 MediaTek Inc.
+ */
+
+#define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
+
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+
+#include "io-pgtable.h"
+
+/* Struct accessors */
+#define io_pgtable_to_data(x) \
+ container_of((x), struct arm_v7s_io_pgtable, iop)
+
+#define io_pgtable_ops_to_data(x) \
+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
+
+/*
+ * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
+ * and 12 bits in a page. With some carefully-chosen coefficients we can
+ * hide the ugly inconsistencies behind these macros and at least let the
+ * rest of the code pretend to be somewhat sane.
+ */
+#define ARM_V7S_ADDR_BITS 32
+#define _ARM_V7S_LVL_BITS(lvl) (16 - (lvl) * 4)
+#define ARM_V7S_LVL_SHIFT(lvl) (ARM_V7S_ADDR_BITS - (4 + 8 * (lvl)))
+#define ARM_V7S_TABLE_SHIFT 10
+
+#define ARM_V7S_PTES_PER_LVL(lvl) (1 << _ARM_V7S_LVL_BITS(lvl))
+#define ARM_V7S_TABLE_SIZE(lvl) \
+ (ARM_V7S_PTES_PER_LVL(lvl) * sizeof(arm_v7s_iopte))
+
+#define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
+#define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
+#define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
+#define _ARM_V7S_IDX_MASK(lvl) (ARM_V7S_PTES_PER_LVL(lvl) - 1)
+#define ARM_V7S_LVL_IDX(addr, lvl) ({ \
+ int _l = lvl; \
+ ((u32)(addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l); \
+})
+
+/*
+ * Large page/supersection entries are effectively a block of 16 page/section
+ * entries, along the lines of the LPAE contiguous hint, but all with the
+ * same output address. For want of a better common name we'll call them
+ * "contiguous" versions of their respective page/section entries here, but
+ * noting the distinction (WRT to TLB maintenance) that they represent *one*
+ * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
+ */
+#define ARM_V7S_CONT_PAGES 16
+
+/* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
+#define ARM_V7S_PTE_TYPE_TABLE 0x1
+#define ARM_V7S_PTE_TYPE_PAGE 0x2
+#define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
+
+#define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
+#define ARM_V7S_PTE_IS_TABLE(pte, lvl) (lvl == 1 && ((pte) & ARM_V7S_PTE_TYPE_TABLE))
+
+/* Page table bits */
+#define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
+#define ARM_V7S_ATTR_B BIT(2)
+#define ARM_V7S_ATTR_C BIT(3)
+#define ARM_V7S_ATTR_NS_TABLE BIT(3)
+#define ARM_V7S_ATTR_NS_SECTION BIT(19)
+
+#define ARM_V7S_CONT_SECTION BIT(18)
+#define ARM_V7S_CONT_PAGE_XN_SHIFT 15
+
+/*
+ * The attribute bits are consistently ordered*, but occupy bits [17:10] of
+ * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
+ * fields relative to that 8-bit block, plus a total shift relative to the PTE.
+ */
+#define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
+
+#define ARM_V7S_ATTR_MASK 0xff
+#define ARM_V7S_ATTR_AP0 BIT(0)
+#define ARM_V7S_ATTR_AP1 BIT(1)
+#define ARM_V7S_ATTR_AP2 BIT(5)
+#define ARM_V7S_ATTR_S BIT(6)
+#define ARM_V7S_ATTR_NG BIT(7)
+#define ARM_V7S_TEX_SHIFT 2
+#define ARM_V7S_TEX_MASK 0x7
+#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
+
+/* *well, except for TEX on level 2 large pages, of course :( */
+#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
+#define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
+
+/* Simplified access permissions */
+#define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
+#define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
+#define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
+
+/* Register bits */
+#define ARM_V7S_RGN_NC 0
+#define ARM_V7S_RGN_WBWA 1
+#define ARM_V7S_RGN_WT 2
+#define ARM_V7S_RGN_WB 3
+
+#define ARM_V7S_PRRR_TYPE_DEVICE 1
+#define ARM_V7S_PRRR_TYPE_NORMAL 2
+#define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
+#define ARM_V7S_PRRR_DS0 BIT(16)
+#define ARM_V7S_PRRR_DS1 BIT(17)
+#define ARM_V7S_PRRR_NS0 BIT(18)
+#define ARM_V7S_PRRR_NS1 BIT(19)
+#define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
+
+#define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
+#define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
+
+#define ARM_V7S_TTBR_S BIT(1)
+#define ARM_V7S_TTBR_NOS BIT(5)
+#define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
+#define ARM_V7S_TTBR_IRGN_ATTR(attr) \
+ ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
+
+#define ARM_V7S_TCR_PD1 BIT(5)
+
+typedef u32 arm_v7s_iopte;
+
+static bool selftest_running;
+
+struct arm_v7s_io_pgtable {
+ struct io_pgtable iop;
+
+ arm_v7s_iopte *pgd;
+ struct kmem_cache *l2_tables;
+};
+
+static dma_addr_t __arm_v7s_dma_addr(void *pages)
+{
+ return (dma_addr_t)virt_to_phys(pages);
+}
+
+static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
+{
+ if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
+ pte &= ARM_V7S_TABLE_MASK;
+ else
+ pte &= ARM_V7S_LVL_MASK(lvl);
+ return phys_to_virt(pte);
+}
+
+static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
+ struct arm_v7s_io_pgtable *data)
+{
+ struct device *dev = data->iop.cfg.iommu_dev;
+ dma_addr_t dma;
+ size_t size = ARM_V7S_TABLE_SIZE(lvl);
+ void *table = NULL;
+
+ if (lvl == 1)
+ table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+ else if (lvl == 2)
+ table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+ if (table && !selftest_running) {
+ dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto out_free;
+ /*
+ * We depend on the IOMMU being able to work with any physical
+ * address directly, so if the DMA layer suggests otherwise by
+ * translating or truncating them, that bodes very badly...
+ */
+ if (dma != virt_to_phys(table))
+ goto out_unmap;
+ }
+ kmemleak_ignore(table);
+ return table;
+
+out_unmap:
+ dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+ dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+ if (lvl == 1)
+ free_pages((unsigned long)table, get_order(size));
+ else
+ kmem_cache_free(data->l2_tables, table);
+ return NULL;
+}
+
+static void __arm_v7s_free_table(void *table, int lvl,
+ struct arm_v7s_io_pgtable *data)
+{
+ struct device *dev = data->iop.cfg.iommu_dev;
+ size_t size = ARM_V7S_TABLE_SIZE(lvl);
+
+ if (!selftest_running)
+ dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
+ DMA_TO_DEVICE);
+ if (lvl == 1)
+ free_pages((unsigned long)table, get_order(size));
+ else
+ kmem_cache_free(data->l2_tables, table);
+}
+
+static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
+ struct io_pgtable_cfg *cfg)
+{
+ if (selftest_running)
+ return;
+
+ dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
+ num_entries * sizeof(*ptep), DMA_TO_DEVICE);
+}
+static void __arm_v7s_set_pte(arm_v7s_iopte *ptep, arm_v7s_iopte pte,
+ int num_entries, struct io_pgtable_cfg *cfg)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ ptep[i] = pte;
+
+ __arm_v7s_pte_sync(ptep, num_entries, cfg);
+}
+
+static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
+ struct io_pgtable_cfg *cfg)
+{
+ bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
+ arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S |
+ ARM_V7S_ATTR_TEX(1);
+
+ if (ap) {
+ pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
+ if (!(prot & IOMMU_WRITE))
+ pte |= ARM_V7S_PTE_AP_RDONLY;
+ }
+ pte <<= ARM_V7S_ATTR_SHIFT(lvl);
+
+ if ((prot & IOMMU_NOEXEC) && ap)
+ pte |= ARM_V7S_ATTR_XN(lvl);
+ if (prot & IOMMU_CACHE)
+ pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
+
+ return pte;
+}
+
+static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
+{
+ int prot = IOMMU_READ;
+
+ if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl)))
+ prot |= IOMMU_WRITE;
+ if (pte & ARM_V7S_ATTR_C)
+ prot |= IOMMU_CACHE;
+
+ return prot;
+}
+
+static arm_v7s_iopte arm_v7s_pte_to_cont(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1) {
+ pte |= ARM_V7S_CONT_SECTION;
+ } else if (lvl == 2) {
+ arm_v7s_iopte xn = pte & ARM_V7S_ATTR_XN(lvl);
+ arm_v7s_iopte tex = pte & ARM_V7S_CONT_PAGE_TEX_MASK;
+
+ pte ^= xn | tex | ARM_V7S_PTE_TYPE_PAGE;
+ pte |= (xn << ARM_V7S_CONT_PAGE_XN_SHIFT) |
+ (tex << ARM_V7S_CONT_PAGE_TEX_SHIFT) |
+ ARM_V7S_PTE_TYPE_CONT_PAGE;
+ }
+ return pte;
+}
+
+static arm_v7s_iopte arm_v7s_cont_to_pte(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1) {
+ pte &= ~ARM_V7S_CONT_SECTION;
+ } else if (lvl == 2) {
+ arm_v7s_iopte xn = pte & BIT(ARM_V7S_CONT_PAGE_XN_SHIFT);
+ arm_v7s_iopte tex = pte & (ARM_V7S_CONT_PAGE_TEX_MASK <<
+ ARM_V7S_CONT_PAGE_TEX_SHIFT);
+
+ pte ^= xn | tex | ARM_V7S_PTE_TYPE_CONT_PAGE;
+ pte |= (xn >> ARM_V7S_CONT_PAGE_XN_SHIFT) |
+ (tex >> ARM_V7S_CONT_PAGE_TEX_SHIFT) |
+ ARM_V7S_PTE_TYPE_PAGE;
+ }
+ return pte;
+}
+
+static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl)
+{
+ if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte, lvl))
+ return pte & ARM_V7S_CONT_SECTION;
+ else if (lvl == 2)
+ return !(pte & ARM_V7S_PTE_TYPE_PAGE);
+ return false;
+}
+
+static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *, unsigned long,
+ size_t, int, arm_v7s_iopte *);
+
+static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, phys_addr_t paddr, int prot,
+ int lvl, int num_entries, arm_v7s_iopte *ptep)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ arm_v7s_iopte pte = arm_v7s_prot_to_pte(prot, lvl, cfg);
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) {
+ /*
+ * We need to unmap and free the old table before
+ * overwriting it with a block entry.
+ */
+ arm_v7s_iopte *tblp;
+ size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
+
+ tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
+ if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz,
+ sz, lvl, tblp) != sz))
+ return -EINVAL;
+ } else if (ptep[i]) {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
+ }
+
+ pte |= ARM_V7S_PTE_TYPE_PAGE;
+ if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
+ pte |= ARM_V7S_ATTR_NS_SECTION;
+
+ if (num_entries > 1)
+ pte = arm_v7s_pte_to_cont(pte, lvl);
+
+ pte |= paddr & ARM_V7S_LVL_MASK(lvl);
+
+ __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
+ return 0;
+}
+
+static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot,
+ int lvl, arm_v7s_iopte *ptep)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+ arm_v7s_iopte pte, *cptep;
+ int num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
+
+ /* Find our entry at the current level */
+ ptep += ARM_V7S_LVL_IDX(iova, lvl);
+
+ /* If we can install a leaf entry at this level, then do so */
+ if (num_entries)
+ return arm_v7s_init_pte(data, iova, paddr, prot,
+ lvl, num_entries, ptep);
+
+ /* We can't allocate tables at the final level */
+ if (WARN_ON(lvl == 2))
+ return -EINVAL;
+
+ /* Grab a pointer to the next level */
+ pte = *ptep;
+ if (!pte) {
+ cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+ if (!cptep)
+ return -ENOMEM;
+
+ pte = virt_to_phys(cptep) | ARM_V7S_PTE_TYPE_TABLE;
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
+ pte |= ARM_V7S_ATTR_NS_TABLE;
+
+ __arm_v7s_set_pte(ptep, pte, 1, cfg);
+ } else {
+ cptep = iopte_deref(pte, lvl);
+ }
+
+ /* Rinse, repeat */
+ return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+}
+
+static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
+ int ret;
+
+ /* If no access, then nothing to do */
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ return 0;
+
+ ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+ /*
+ * Synchronise all PTE updates for the new mapping before there's
+ * a chance for anything to kick off a table walk for the new iova.
+ */
+ if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
+ io_pgtable_tlb_add_flush(iop, iova, size,
+ ARM_V7S_BLOCK_SIZE(2), false);
+ io_pgtable_tlb_sync(iop);
+ } else {
+ wmb();
+ }
+
+ return ret;
+}
+
+static void arm_v7s_free_pgtable(struct io_pgtable *iop)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_to_data(iop);
+ int i;
+
+ for (i = 0; i < ARM_V7S_PTES_PER_LVL(1); i++) {
+ arm_v7s_iopte pte = data->pgd[i];
+
+ if (ARM_V7S_PTE_IS_TABLE(pte, 1))
+ __arm_v7s_free_table(iopte_deref(pte, 1), 2, data);
+ }
+ __arm_v7s_free_table(data->pgd, 1, data);
+ kmem_cache_destroy(data->l2_tables);
+ kfree(data);
+}
+
+static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, int idx, int lvl,
+ arm_v7s_iopte *ptep)
+{
+ struct io_pgtable *iop = &data->iop;
+ arm_v7s_iopte pte;
+ size_t size = ARM_V7S_BLOCK_SIZE(lvl);
+ int i;
+
+ ptep -= idx & (ARM_V7S_CONT_PAGES - 1);
+ pte = arm_v7s_cont_to_pte(*ptep, lvl);
+ for (i = 0; i < ARM_V7S_CONT_PAGES; i++) {
+ ptep[i] = pte;
+ pte += size;
+ }
+
+ __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
+
+ size *= ARM_V7S_CONT_PAGES;
+ io_pgtable_tlb_add_flush(iop, iova, size, size, true);
+ io_pgtable_tlb_sync(iop);
+}
+
+static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size,
+ arm_v7s_iopte *ptep)
+{
+ unsigned long blk_start, blk_end, blk_size;
+ phys_addr_t blk_paddr;
+ arm_v7s_iopte table = 0;
+ int prot = arm_v7s_pte_to_prot(*ptep, 1);
+
+ blk_size = ARM_V7S_BLOCK_SIZE(1);
+ blk_start = iova & ARM_V7S_LVL_MASK(1);
+ blk_end = blk_start + ARM_V7S_BLOCK_SIZE(1);
+ blk_paddr = *ptep & ARM_V7S_LVL_MASK(1);
+
+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
+ arm_v7s_iopte *tablep;
+
+ /* Unmap! */
+ if (blk_start == iova)
+ continue;
+
+ /* __arm_v7s_map expects a pointer to the start of the table */
+ tablep = &table - ARM_V7S_LVL_IDX(blk_start, 1);
+ if (__arm_v7s_map(data, blk_start, blk_paddr, size, prot, 1,
+ tablep) < 0) {
+ if (table) {
+ /* Free the table we allocated */
+ tablep = iopte_deref(table, 1);
+ __arm_v7s_free_table(tablep, 2, data);
+ }
+ return 0; /* Bytes unmapped */
+ }
+ }
+
+ __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg);
+ iova &= ~(blk_size - 1);
+ io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
+ return size;
+}
+
+static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
+ unsigned long iova, size_t size, int lvl,
+ arm_v7s_iopte *ptep)
+{
+ arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
+ struct io_pgtable *iop = &data->iop;
+ int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
+
+ /* Something went horribly wrong and we ran out of page table */
+ if (WARN_ON(lvl > 2))
+ return 0;
+
+ idx = ARM_V7S_LVL_IDX(iova, lvl);
+ ptep += idx;
+ do {
+ if (WARN_ON(!ARM_V7S_PTE_IS_VALID(ptep[i])))
+ return 0;
+ pte[i] = ptep[i];
+ } while (++i < num_entries);
+
+ /*
+ * If we've hit a contiguous 'large page' entry at this level, it
+ * needs splitting first, unless we're unmapping the whole lot.
+ */
+ if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl))
+ arm_v7s_split_cont(data, iova, idx, lvl, ptep);
+
+ /* If the size matches this level, we're in the right place */
+ if (num_entries) {
+ size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
+
+ __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
+
+ for (i = 0; i < num_entries; i++) {
+ if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
+ /* Also flush any partial walks */
+ io_pgtable_tlb_add_flush(iop, iova, blk_size,
+ ARM_V7S_BLOCK_SIZE(lvl + 1), false);
+ io_pgtable_tlb_sync(iop);
+ ptep = iopte_deref(pte[i], lvl);
+ __arm_v7s_free_table(ptep, lvl + 1, data);
+ } else {
+ io_pgtable_tlb_add_flush(iop, iova, blk_size,
+ blk_size, true);
+ }
+ iova += blk_size;
+ }
+ return size;
+ } else if (lvl == 1 && !ARM_V7S_PTE_IS_TABLE(pte[0], lvl)) {
+ /*
+ * Insert a table at the next level to map the old region,
+ * minus the part we want to unmap
+ */
+ return arm_v7s_split_blk_unmap(data, iova, size, ptep);
+ }
+
+ /* Keep on walkin' */
+ ptep = iopte_deref(pte[0], lvl);
+ return __arm_v7s_unmap(data, iova, size, lvl + 1, ptep);
+}
+
+static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t size)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ size_t unmapped;
+
+ unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
+ if (unmapped)
+ io_pgtable_tlb_sync(&data->iop);
+
+ return unmapped;
+}
+
+static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
+ unsigned long iova)
+{
+ struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
+ arm_v7s_iopte *ptep = data->pgd, pte;
+ int lvl = 0;
+ u32 mask;
+
+ do {
+ pte = ptep[ARM_V7S_LVL_IDX(iova, ++lvl)];
+ ptep = iopte_deref(pte, lvl);
+ } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
+
+ if (!ARM_V7S_PTE_IS_VALID(pte))
+ return 0;
+
+ mask = ARM_V7S_LVL_MASK(lvl);
+ if (arm_v7s_pte_is_cont(pte, lvl))
+ mask *= ARM_V7S_CONT_PAGES;
+ return (pte & mask) | (iova & ~mask);
+}
+
+static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
+ void *cookie)
+{
+ struct arm_v7s_io_pgtable *data;
+
+ if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS)
+ return NULL;
+
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
+ IO_PGTABLE_QUIRK_NO_PERMS |
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP))
+ return NULL;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
+ ARM_V7S_TABLE_SIZE(2),
+ ARM_V7S_TABLE_SIZE(2),
+ SLAB_CACHE_DMA, NULL);
+ if (!data->l2_tables)
+ goto out_free_data;
+
+ data->iop.ops = (struct io_pgtable_ops) {
+ .map = arm_v7s_map,
+ .unmap = arm_v7s_unmap,
+ .iova_to_phys = arm_v7s_iova_to_phys,
+ };
+
+ /* We have to do this early for __arm_v7s_alloc_table to work... */
+ data->iop.cfg = *cfg;
+
+ /*
+ * Unless the IOMMU driver indicates supersection support by
+ * having SZ_16M set in the initial bitmap, they won't be used.
+ */
+ cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+
+ /* TCR: T0SZ=0, disable TTBR1 */
+ cfg->arm_v7s_cfg.tcr = ARM_V7S_TCR_PD1;
+
+ /*
+ * TEX remap: the indices used map to the closest equivalent types
+ * under the non-TEX-remap interpretation of those attribute bits,
+ * excepting various implementation-defined aspects of shareability.
+ */
+ cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
+ ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
+ ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
+ ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
+ ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
+ cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
+ ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
+
+ /* Looking good; allocate a pgd */
+ data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data);
+ if (!data->pgd)
+ goto out_free_data;
+
+ /* Ensure the empty pgd is visible before any actual TTBR write */
+ wmb();
+
+ /* TTBRs */
+ cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
+ ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
+ ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
+ ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA);
+ cfg->arm_v7s_cfg.ttbr[1] = 0;
+ return &data->iop;
+
+out_free_data:
+ kmem_cache_destroy(data->l2_tables);
+ kfree(data);
+ return NULL;
+}
+
+struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns = {
+ .alloc = arm_v7s_alloc_pgtable,
+ .free = arm_v7s_free_pgtable,
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_add_flush = dummy_tlb_add_flush,
+ .tlb_sync = dummy_tlb_sync,
+};
+
+#define __FAIL(ops) ({ \
+ WARN(1, "selftest: test failed\n"); \
+ selftest_running = false; \
+ -EFAULT; \
+})
+
+static int __init arm_v7s_do_selftests(void)
+{
+ struct io_pgtable_ops *ops;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .oas = 32,
+ .ias = 32,
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS,
+ .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ };
+ unsigned int iova, size, iova_start;
+ unsigned int i, loopnr = 0;
+
+ selftest_running = true;
+
+ cfg_cookie = &cfg;
+
+ ops = alloc_io_pgtable_ops(ARM_V7S, &cfg, &cfg);
+ if (!ops) {
+ pr_err("selftest: failed to allocate io pgtable ops\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(ops);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
+ while (i != BITS_PER_LONG) {
+ size = 1UL << i;
+ if (ops->map(ops, iova, iova, size, IOMMU_READ |
+ IOMMU_WRITE |
+ IOMMU_NOEXEC |
+ IOMMU_CACHE))
+ return __FAIL(ops);
+
+ /* Overlapping mappings */
+ if (!ops->map(ops, iova, iova + size, size,
+ IOMMU_READ | IOMMU_NOEXEC))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops);
+
+ iova += SZ_16M;
+ i++;
+ i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
+ loopnr++;
+ }
+
+ /* Partial unmap */
+ i = 1;
+ size = 1UL << __ffs(cfg.pgsize_bitmap);
+ while (i < loopnr) {
+ iova_start = i * SZ_16M;
+ if (ops->unmap(ops, iova_start + size, size) != size)
+ return __FAIL(ops);
+
+ /* Remap of partial unmap */
+ if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova_start + size + 42)
+ != (size + 42))
+ return __FAIL(ops);
+ i++;
+ }
+
+ /* Full unmap */
+ iova = 0;
+ i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
+ while (i != BITS_PER_LONG) {
+ size = 1UL << i;
+
+ if (ops->unmap(ops, iova, size) != size)
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(ops);
+
+ /* Remap full block */
+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
+ return __FAIL(ops);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(ops);
+
+ iova += SZ_16M;
+ i++;
+ i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
+ }
+
+ free_io_pgtable_ops(ops);
+
+ selftest_running = false;
+
+ pr_info("self test ok\n");
+ return 0;
+}
+subsys_initcall(arm_v7s_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 381ca5a37a7b..f433b516098a 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end;
phys_addr_t blk_paddr;
arm_lpae_iopte table = 0;
- struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size;
@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
}
}
- __arm_lpae_set_pte(ptep, table, cfg);
+ __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
iova &= ~(blk_size - 1);
- cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie);
+ io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
return size;
}
@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
- const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
- void *cookie = data->iop.cookie;
+ struct io_pgtable *iop = &data->iop;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */
@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
- __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
+ __arm_lpae_set_pte(ptep, 0, &iop->cfg);
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
- tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data),
- false, cookie);
- tlb->tlb_sync(cookie);
+ io_pgtable_tlb_add_flush(iop, iova, size,
+ ARM_LPAE_GRANULE(data), false);
+ io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
- tlb->tlb_add_flush(iova, size, size, true, cookie);
+ io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
return size;
@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{
size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- struct io_pgtable *iop = &data->iop;
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
- iop->cfg.tlb->tlb_sync(iop->cookie);
+ io_pgtable_tlb_sync(&data->iop);
return unmapped;
}
@@ -662,8 +659,12 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg;
- struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+ struct arm_lpae_io_pgtable *data;
+ if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
+ return NULL;
+
+ data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
@@ -746,8 +747,13 @@ static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg, sl;
- struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
+ struct arm_lpae_io_pgtable *data;
+
+ /* The NS quirk doesn't apply at stage 2 */
+ if (cfg->quirks)
+ return NULL;
+ data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 6f2e319d4f04..876f6a76d288 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -33,6 +33,9 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
#endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
+ [ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
+#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
@@ -72,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
return;
iop = container_of(ops, struct io_pgtable, ops);
- iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 36673c83de58..d4f502742e3b 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -1,5 +1,6 @@
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
+#include <linux/bitops.h>
/*
* Public API for use by IOMMU drivers
@@ -9,6 +10,7 @@ enum io_pgtable_fmt {
ARM_32_LPAE_S2,
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
+ ARM_V7S,
IO_PGTABLE_NUM_FMTS,
};
@@ -45,8 +47,24 @@ struct iommu_gather_ops {
* page table walker.
*/
struct io_pgtable_cfg {
- #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
- int quirks;
+ /*
+ * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
+ * stage 1 PTEs, for hardware which insists on validating them
+ * even in non-secure state where they should normally be ignored.
+ *
+ * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
+ * IOMMU_NOEXEC flags and map everything with full access, for
+ * hardware which does not implement the permissions of a given
+ * format, and/or requires some format-specific default value.
+ *
+ * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
+ * (unmapped) entries but the hardware might do so anyway, perform
+ * TLB maintenance when mapping as well as when unmapping.
+ */
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
+ unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
unsigned int oas;
@@ -65,6 +83,13 @@ struct io_pgtable_cfg {
u64 vttbr;
u64 vtcr;
} arm_lpae_s2_cfg;
+
+ struct {
+ u32 ttbr[2];
+ u32 tcr;
+ u32 nmrr;
+ u32 prrr;
+ } arm_v7s_cfg;
};
};
@@ -121,18 +146,41 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
* @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines.
+ * @tlb_sync_pending: Private flag for optimising out redundant syncs.
* @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables.
*/
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
+ bool tlb_sync_pending;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
+static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
+{
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
+ iop->tlb_sync_pending = true;
+}
+
+static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
+ unsigned long iova, size_t size, size_t granule, bool leaf)
+{
+ iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
+ iop->tlb_sync_pending = true;
+}
+
+static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
+{
+ if (iop->tlb_sync_pending) {
+ iop->cfg.tlb->tlb_sync(iop->cookie);
+ iop->tlb_sync_pending = false;
+ }
+}
+
/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
@@ -149,5 +197,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 0e3b0092ec92..b9df1411c894 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (!group->default_domain) {
group->default_domain = __iommu_domain_alloc(dev->bus,
IOMMU_DOMAIN_DMA);
- group->domain = group->default_domain;
+ if (!group->domain)
+ group->domain = group->default_domain;
}
ret = iommu_group_add_device(group, dev);
@@ -1314,6 +1315,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
+ phys_addr_t orig_paddr = paddr;
int ret = 0;
if (unlikely(domain->ops->map == NULL ||
@@ -1358,7 +1360,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
- trace_map(orig_iova, paddr, orig_size);
+ trace_map(orig_iova, orig_paddr, orig_size);
return ret;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
new file mode 100644
index 000000000000..929a66a81b2b
--- /dev/null
+++ b/drivers/iommu/mtk_iommu.c
@@ -0,0 +1,736 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-iommu.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/barrier.h>
+#include <dt-bindings/memory/mt8173-larb-port.h>
+#include <soc/mediatek/smi.h>
+
+#include "io-pgtable.h"
+
+#define REG_MMU_PT_BASE_ADDR 0x000
+
+#define REG_MMU_INVALIDATE 0x020
+#define F_ALL_INVLD 0x2
+#define F_MMU_INV_RANGE 0x1
+
+#define REG_MMU_INVLD_START_A 0x024
+#define REG_MMU_INVLD_END_A 0x028
+
+#define REG_MMU_INV_SEL 0x038
+#define F_INVLD_EN0 BIT(0)
+#define F_INVLD_EN1 BIT(1)
+
+#define REG_MMU_STANDARD_AXI_MODE 0x048
+#define REG_MMU_DCM_DIS 0x050
+
+#define REG_MMU_CTRL_REG 0x110
+#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
+#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
+
+#define REG_MMU_IVRP_PADDR 0x114
+#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1)
+
+#define REG_MMU_INT_CONTROL0 0x120
+#define F_L2_MULIT_HIT_EN BIT(0)
+#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
+#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
+#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
+#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
+#define F_MISS_FIFO_ERR_INT_EN BIT(6)
+#define F_INT_CLR_BIT BIT(12)
+
+#define REG_MMU_INT_MAIN_CONTROL 0x124
+#define F_INT_TRANSLATION_FAULT BIT(0)
+#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
+#define F_INT_INVALID_PA_FAULT BIT(2)
+#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
+#define F_INT_TLB_MISS_FAULT BIT(4)
+#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
+#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
+
+#define REG_MMU_CPE_DONE 0x12C
+
+#define REG_MMU_FAULT_ST1 0x134
+
+#define REG_MMU_FAULT_VA 0x13c
+#define F_MMU_FAULT_VA_MSK 0xfffff000
+#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
+#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
+
+#define REG_MMU_INVLD_PA 0x140
+#define REG_MMU_INT_ID 0x150
+#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
+#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
+
+#define MTK_PROTECT_PA_ALIGN 128
+
+struct mtk_iommu_suspend_reg {
+ u32 standard_axi_mode;
+ u32 dcm_dis;
+ u32 ctrl_reg;
+ u32 int_control0;
+ u32 int_main_control;
+};
+
+struct mtk_iommu_client_priv {
+ struct list_head client;
+ unsigned int mtk_m4u_id;
+ struct device *m4udev;
+};
+
+struct mtk_iommu_domain {
+ spinlock_t pgtlock; /* lock for page table */
+
+ struct io_pgtable_cfg cfg;
+ struct io_pgtable_ops *iop;
+
+ struct iommu_domain domain;
+};
+
+struct mtk_iommu_data {
+ void __iomem *base;
+ int irq;
+ struct device *dev;
+ struct clk *bclk;
+ phys_addr_t protect_base; /* protect memory base */
+ struct mtk_iommu_suspend_reg reg;
+ struct mtk_iommu_domain *m4u_dom;
+ struct iommu_group *m4u_group;
+ struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
+};
+
+static struct iommu_ops mtk_iommu_ops;
+
+static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct mtk_iommu_domain, domain);
+}
+
+static void mtk_iommu_tlb_flush_all(void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+}
+
+static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf,
+ void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL);
+
+ writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
+ writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
+ writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
+}
+
+static void mtk_iommu_tlb_sync(void *cookie)
+{
+ struct mtk_iommu_data *data = cookie;
+ int ret;
+ u32 tmp;
+
+ ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
+ tmp != 0, 10, 100000);
+ if (ret) {
+ dev_warn(data->dev,
+ "Partial TLB flush timed out, falling back to full flush\n");
+ mtk_iommu_tlb_flush_all(cookie);
+ }
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+}
+
+static const struct iommu_gather_ops mtk_iommu_gather_ops = {
+ .tlb_flush_all = mtk_iommu_tlb_flush_all,
+ .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
+ .tlb_sync = mtk_iommu_tlb_sync,
+};
+
+static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
+{
+ struct mtk_iommu_data *data = dev_id;
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+ u32 int_state, regval, fault_iova, fault_pa;
+ unsigned int fault_larb, fault_port;
+ bool layer, write;
+
+ /* Read error info from registers */
+ int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
+ fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
+ layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
+ write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
+ fault_iova &= F_MMU_FAULT_VA_MSK;
+ fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
+ regval = readl_relaxed(data->base + REG_MMU_INT_ID);
+ fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
+ fault_port = F_MMU0_INT_ID_PORT_ID(regval);
+
+ if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
+ write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
+ dev_err_ratelimited(
+ data->dev,
+ "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
+ int_state, fault_iova, fault_pa, fault_larb, fault_port,
+ layer, write ? "write" : "read");
+ }
+
+ /* Interrupt clear */
+ regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
+ regval |= F_INT_CLR_BIT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+
+ mtk_iommu_tlb_flush_all(data);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_iommu_config(struct mtk_iommu_data *data,
+ struct device *dev, bool enable)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+ struct mtk_smi_larb_iommu *larb_mmu;
+ unsigned int larbid, portid;
+
+ head = dev->archdata.iommu;
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
+ portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+ larb_mmu = &data->smi_imu.larb_imu[larbid];
+
+ dev_dbg(dev, "%s iommu port: %d\n",
+ enable ? "enable" : "disable", portid);
+
+ if (enable)
+ larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
+ else
+ larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
+ }
+}
+
+static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
+{
+ struct mtk_iommu_domain *dom = data->m4u_dom;
+
+ spin_lock_init(&dom->pgtlock);
+
+ dom->cfg = (struct io_pgtable_cfg) {
+ .quirks = IO_PGTABLE_QUIRK_ARM_NS |
+ IO_PGTABLE_QUIRK_NO_PERMS |
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP,
+ .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
+ .ias = 32,
+ .oas = 32,
+ .tlb = &mtk_iommu_gather_ops,
+ .iommu_dev = data->dev,
+ };
+
+ dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
+ if (!dom->iop) {
+ dev_err(data->dev, "Failed to alloc io pgtable\n");
+ return -EINVAL;
+ }
+
+ /* Update our support page sizes bitmap */
+ mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+
+ writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ data->base + REG_MMU_PT_BASE_ADDR);
+ return 0;
+}
+
+static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+{
+ struct mtk_iommu_domain *dom;
+
+ if (type != IOMMU_DOMAIN_DMA)
+ return NULL;
+
+ dom = kzalloc(sizeof(*dom), GFP_KERNEL);
+ if (!dom)
+ return NULL;
+
+ if (iommu_get_dma_cookie(&dom->domain)) {
+ kfree(dom);
+ return NULL;
+ }
+
+ dom->domain.geometry.aperture_start = 0;
+ dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ dom->domain.geometry.force_aperture = true;
+
+ return &dom->domain;
+}
+
+static void mtk_iommu_domain_free(struct iommu_domain *domain)
+{
+ iommu_put_dma_cookie(domain);
+ kfree(to_mtk_domain(domain));
+}
+
+static int mtk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+ int ret;
+
+ if (!priv)
+ return -ENODEV;
+
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_dom) {
+ data->m4u_dom = dom;
+ ret = mtk_iommu_domain_finalise(data);
+ if (ret) {
+ data->m4u_dom = NULL;
+ return ret;
+ }
+ } else if (data->m4u_dom != dom) {
+ /* All the client devices should be in the same m4u domain */
+ dev_err(dev, "try to attach into the error iommu domain\n");
+ return -EPERM;
+ }
+
+ mtk_iommu_config(data, dev, true);
+ return 0;
+}
+
+static void mtk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
+ struct mtk_iommu_data *data;
+
+ if (!priv)
+ return;
+
+ data = dev_get_drvdata(priv->m4udev);
+ mtk_iommu_config(data, dev, false);
+}
+
+static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return ret;
+}
+
+static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ size_t unmapsz;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ unmapsz = dom->iop->unmap(dom->iop, iova, size);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return unmapsz;
+}
+
+static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct mtk_iommu_domain *dom = to_mtk_domain(domain);
+ unsigned long flags;
+ phys_addr_t pa;
+
+ spin_lock_irqsave(&dom->pgtlock, flags);
+ pa = dom->iop->iova_to_phys(dom->iop, iova);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+ return pa;
+}
+
+static int mtk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+
+ if (!dev->archdata.iommu) /* Not a iommu client device */
+ return -ENODEV;
+
+ group = iommu_group_get_for_dev(dev);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ iommu_group_put(group);
+ return 0;
+}
+
+static void mtk_iommu_remove_device(struct device *dev)
+{
+ struct mtk_iommu_client_priv *head, *cur, *next;
+
+ head = dev->archdata.iommu;
+ if (!head)
+ return;
+
+ list_for_each_entry_safe(cur, next, &head->client, client) {
+ list_del(&cur->client);
+ kfree(cur);
+ }
+ kfree(head);
+ dev->archdata.iommu = NULL;
+
+ iommu_group_remove_device(dev);
+}
+
+static struct iommu_group *mtk_iommu_device_group(struct device *dev)
+{
+ struct mtk_iommu_data *data;
+ struct mtk_iommu_client_priv *priv;
+
+ priv = dev->archdata.iommu;
+ if (!priv)
+ return ERR_PTR(-ENODEV);
+
+ /* All the client devices are in the same m4u iommu-group */
+ data = dev_get_drvdata(priv->m4udev);
+ if (!data->m4u_group) {
+ data->m4u_group = iommu_group_alloc();
+ if (IS_ERR(data->m4u_group))
+ dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+ }
+ return data->m4u_group;
+}
+
+static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct mtk_iommu_client_priv *head, *priv, *next;
+ struct platform_device *m4updev;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
+ args->args_count);
+ return -EINVAL;
+ }
+
+ if (!dev->archdata.iommu) {
+ /* Get the m4u device */
+ m4updev = of_find_device_by_node(args->np);
+ of_node_put(args->np);
+ if (WARN_ON(!m4updev))
+ return -EINVAL;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ dev->archdata.iommu = head;
+ INIT_LIST_HEAD(&head->client);
+ head->m4udev = &m4updev->dev;
+ } else {
+ head = dev->archdata.iommu;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto err_free_mem;
+
+ priv->mtk_m4u_id = args->args[0];
+ list_add_tail(&priv->client, &head->client);
+
+ return 0;
+
+err_free_mem:
+ list_for_each_entry_safe(priv, next, &head->client, client)
+ kfree(priv);
+ kfree(head);
+ dev->archdata.iommu = NULL;
+ return -ENOMEM;
+}
+
+static struct iommu_ops mtk_iommu_ops = {
+ .domain_alloc = mtk_iommu_domain_alloc,
+ .domain_free = mtk_iommu_domain_free,
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .add_device = mtk_iommu_add_device,
+ .remove_device = mtk_iommu_remove_device,
+ .device_group = mtk_iommu_device_group,
+ .of_xlate = mtk_iommu_of_xlate,
+ .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+};
+
+static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+{
+ u32 regval;
+ int ret;
+
+ ret = clk_prepare_enable(data->bclk);
+ if (ret) {
+ dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
+ return ret;
+ }
+
+ regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
+ F_MMU_TF_PROTECT_SEL(2);
+ writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
+
+ regval = F_L2_MULIT_HIT_EN |
+ F_TABLE_WALK_FAULT_INT_EN |
+ F_PREETCH_FIFO_OVERFLOW_INT_EN |
+ F_MISS_FIFO_OVERFLOW_INT_EN |
+ F_PREFETCH_FIFO_ERR_INT_EN |
+ F_MISS_FIFO_ERR_INT_EN;
+ writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
+
+ regval = F_INT_TRANSLATION_FAULT |
+ F_INT_MAIN_MULTI_HIT_FAULT |
+ F_INT_INVALID_PA_FAULT |
+ F_INT_ENTRY_REPLACEMENT_FAULT |
+ F_INT_TLB_MISS_FAULT |
+ F_INT_MISS_TRANSACTION_FIFO_FAULT |
+ F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
+ writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
+
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ data->base + REG_MMU_IVRP_PADDR);
+
+ writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
+ writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
+
+ if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
+ dev_name(data->dev), (void *)data)) {
+ writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
+ clk_disable_unprepare(data->bclk);
+ dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int mtk_iommu_bind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ return component_bind_all(dev, &data->smi_imu);
+}
+
+static void mtk_iommu_unbind(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+
+ component_unbind_all(dev, &data->smi_imu);
+}
+
+static const struct component_master_ops mtk_iommu_com_ops = {
+ .bind = mtk_iommu_bind,
+ .unbind = mtk_iommu_unbind,
+};
+
+static int mtk_iommu_probe(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct component_match *match = NULL;
+ void *protect;
+ int i, larb_nr, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->dev = dev;
+
+ /* Protect memory. HW will access here while translation fault.*/
+ protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
+ if (!protect)
+ return -ENOMEM;
+ data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq < 0)
+ return data->irq;
+
+ data->bclk = devm_clk_get(dev, "bclk");
+ if (IS_ERR(data->bclk))
+ return PTR_ERR(data->bclk);
+
+ larb_nr = of_count_phandle_with_args(dev->of_node,
+ "mediatek,larbs", NULL);
+ if (larb_nr < 0)
+ return larb_nr;
+ data->smi_imu.larb_nr = larb_nr;
+
+ for (i = 0; i < larb_nr; i++) {
+ struct device_node *larbnode;
+ struct platform_device *plarbdev;
+
+ larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
+ if (!larbnode)
+ return -EINVAL;
+
+ if (!of_device_is_available(larbnode))
+ continue;
+
+ plarbdev = of_find_device_by_node(larbnode);
+ of_node_put(larbnode);
+ if (!plarbdev) {
+ plarbdev = of_platform_device_create(
+ larbnode, NULL,
+ platform_bus_type.dev_root);
+ if (!plarbdev)
+ return -EPROBE_DEFER;
+ }
+ data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
+
+ component_match_add(dev, &match, compare_of, larbnode);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ ret = mtk_iommu_hw_init(data);
+ if (ret)
+ return ret;
+
+ if (!iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
+
+ return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
+}
+
+static int mtk_iommu_remove(struct platform_device *pdev)
+{
+ struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+
+ if (iommu_present(&platform_bus_type))
+ bus_set_iommu(&platform_bus_type, NULL);
+
+ free_io_pgtable_ops(data->m4u_dom->iop);
+ clk_disable_unprepare(data->bclk);
+ devm_free_irq(&pdev->dev, data->irq, data);
+ component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ reg->standard_axi_mode = readl_relaxed(base +
+ REG_MMU_STANDARD_AXI_MODE);
+ reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
+ reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
+ reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
+ reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
+ return 0;
+}
+
+static int __maybe_unused mtk_iommu_resume(struct device *dev)
+{
+ struct mtk_iommu_data *data = dev_get_drvdata(dev);
+ struct mtk_iommu_suspend_reg *reg = &data->reg;
+ void __iomem *base = data->base;
+
+ writel_relaxed(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ base + REG_MMU_PT_BASE_ADDR);
+ writel_relaxed(reg->standard_axi_mode,
+ base + REG_MMU_STANDARD_AXI_MODE);
+ writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
+ writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
+ writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
+ writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ base + REG_MMU_IVRP_PADDR);
+ return 0;
+}
+
+const struct dev_pm_ops mtk_iommu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
+};
+
+static const struct of_device_id mtk_iommu_of_ids[] = {
+ { .compatible = "mediatek,mt8173-m4u", },
+ {}
+};
+
+static struct platform_driver mtk_iommu_driver = {
+ .probe = mtk_iommu_probe,
+ .remove = mtk_iommu_remove,
+ .driver = {
+ .name = "mtk-iommu",
+ .of_match_table = mtk_iommu_of_ids,
+ .pm = &mtk_iommu_pm_ops,
+ }
+};
+
+static int mtk_iommu_init_fn(struct device_node *np)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
+ if (!pdev)
+ return -ENOMEM;
+
+ ret = platform_driver_register(&mtk_iommu_driver);
+ if (ret) {
+ pr_err("%s: Failed to register driver\n", __func__);
+ return ret;
+ }
+
+ of_iommu_set_ops(np, &mtk_iommu_ops);
+ return 0;
+}
+
+IOMMU_OF_DECLARE(mtkm4u, "mediatek,mt8173-m4u", mtk_iommu_init_fn);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 60ba238090d9..5fea665af99d 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -110,6 +110,7 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
if (WARN_ON(!iommu))
return;
+ of_node_get(np);
INIT_LIST_HEAD(&iommu->list);
iommu->np = np;
iommu->ops = ops;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index ebf0adb8e7ea..5710a06c3049 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -86,7 +86,8 @@ struct rk_iommu_domain {
struct rk_iommu {
struct device *dev;
- void __iomem *base;
+ void __iomem **bases;
+ int num_mmu;
int irq;
struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */
@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova)
return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
}
-static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+static u32 rk_iommu_read(void __iomem *base, u32 offset)
{
- return readl(iommu->base + offset);
+ return readl(base + offset);
}
-static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
{
- writel(value, iommu->base + offset);
+ writel(value, base + offset);
}
static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
{
- writel(command, iommu->base + RK_MMU_COMMAND);
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ writel(command, iommu->bases[i] + RK_MMU_COMMAND);
}
+static void rk_iommu_base_command(void __iomem *base, u32 command)
+{
+ writel(command, base + RK_MMU_COMMAND);
+}
static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
size_t size)
{
+ int i;
+
dma_addr_t iova_end = iova + size;
/*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas.
*/
- for (; iova < iova_end; iova += SPAGE_SIZE)
- rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+ for (i = 0; i < iommu->num_mmu; i++)
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
}
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
{
- return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+ bool active = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_STALL_ACTIVE);
+
+ return active;
}
static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
{
- return rk_iommu_read(iommu, RK_MMU_STATUS) &
- RK_MMU_STATUS_PAGING_ENABLED;
+ bool enable = true;
+ int i;
+
+ for (i = 0; i < iommu->num_mmu; i++)
+ enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED);
+
+ return enable;
}
static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (rk_iommu_is_stall_active(iommu))
return 0;
@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (!rk_iommu_is_stall_active(iommu))
return 0;
@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (rk_iommu_is_paging_enabled(iommu))
return 0;
@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
if (!rk_iommu_is_paging_enabled(iommu))
return 0;
@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
if (ret)
- dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
- rk_iommu_read(iommu, RK_MMU_STATUS));
+ for (i = 0; i < iommu->num_mmu; i++)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret;
}
static int rk_iommu_force_reset(struct rk_iommu *iommu)
{
- int ret;
+ int ret, i;
u32 dte_addr;
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
* and verifying that upper 5 nybbles are read back.
*/
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
- dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
- if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
- dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
- return -EFAULT;
+ dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
}
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
- ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
- FORCE_RESET_TIMEOUT);
- if (ret)
- dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ for (i = 0; i < iommu->num_mmu; i++) {
+ ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret) {
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+ return ret;
+ }
+ }
- return ret;
+ return 0;
}
-static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
+ void __iomem *base = iommu->bases[index];
u32 dte_index, pte_index, page_offset;
u32 mmu_dte_addr;
phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
pte_index = rk_iova_pte_index(iova);
page_offset = rk_iova_page_offset(iova);
- mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
@@ -460,51 +495,56 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
u32 status;
u32 int_status;
dma_addr_t iova;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
- int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
- if (int_status == 0)
- return IRQ_NONE;
+ for (i = 0; i < iommu->num_mmu; i++) {
+ int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ continue;
- iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+ ret = IRQ_HANDLED;
+ iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
- if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
- int flags;
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
- status = rk_iommu_read(iommu, RK_MMU_STATUS);
- flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
- IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+ status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
- dev_err(iommu->dev, "Page fault at %pad of type %s\n",
- &iova,
- (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
- log_iova(iommu, iova);
+ log_iova(iommu, i, iova);
- /*
- * Report page fault to any installed handlers.
- * Ignore the return code, though, since we always zap cache
- * and clear the page fault anyway.
- */
- if (iommu->domain)
- report_iommu_fault(iommu->domain, iommu->dev, iova,
- flags);
- else
- dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
- rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
- rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
- }
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
- if (int_status & RK_MMU_IRQ_BUS_ERROR)
- dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
- if (int_status & ~RK_MMU_IRQ_MASK)
- dev_err(iommu->dev, "unexpected int_status: %#08x\n",
- int_status);
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
- rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
+ }
- return IRQ_HANDLED;
+ return ret;
}
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
- int ret;
+ int ret, i;
phys_addr_t dte_addr;
/*
@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return ret;
dte_addr = virt_to_phys(rk_domain->dt);
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
- rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
- rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+ }
ret = rk_iommu_enable_paging(iommu);
if (ret)
@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
+ int i;
/* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev);
@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
/* Ignore error while disabling, just keep going */
rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu);
- rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
- rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ for (i = 0; i < iommu->num_mmu; i++) {
+ rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+ }
rk_iommu_disable_stall(iommu);
devm_free_irq(dev, iommu->irq, iommu);
@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rk_iommu *iommu;
struct resource *res;
+ int i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iommu);
iommu->dev = dev;
+ iommu->num_mmu = 0;
+ iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
+ GFP_KERNEL);
+ if (!iommu->bases)
+ return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iommu->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(iommu->base))
- return PTR_ERR(iommu->base);
+ for (i = 0; i < pdev->num_resources; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->bases[i]))
+ continue;
+ iommu->num_mmu++;
+ }
+ if (iommu->num_mmu == 0)
+ return PTR_ERR(iommu->bases[0]);
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index fb50911b3940..3e124793e224 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -32,14 +32,6 @@ config ARM_GIC_V3_ITS
bool
select PCI_MSI_IRQ_DOMAIN
-config HISILICON_IRQ_MBIGEN
- bool "Support mbigen interrupt controller"
- default n
- depends on ARM_GIC_V3 && ARM_GIC_V3_ITS && GENERIC_MSI_IRQ_DOMAIN
- help
- Enable the mbigen interrupt controller used on
- Hisilicon platform.
-
config ARM_NVIC
bool
select IRQ_DOMAIN
@@ -60,6 +52,17 @@ config ARM_VIC_NR
The maximum number of VICs available in the system, for
power management.
+config ARMADA_370_XP_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+
+config ALPINE_MSI
+ bool
+ depends on PCI && PCI_MSI
+ select GENERIC_IRQ_CHIP
+ select PCI_MSI_IRQ_DOMAIN
+
config ATMEL_AIC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -78,6 +81,11 @@ config I8259
bool
select IRQ_DOMAIN
+config BCM6345_L1_IRQ
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config BCM7038_L1_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -98,6 +106,12 @@ config DW_APB_ICTL
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config HISILICON_IRQ_MBIGEN
+ bool
+ select ARM_GIC_V3
+ select ARM_GIC_V3_ITS
+ select GENERIC_MSI_IRQ_DOMAIN
+
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
@@ -151,6 +165,11 @@ config ST_IRQCHIP
help
Enables SysCfg Controlled IRQs on STi based platforms.
+config TANGO_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+
config TB10X_IRQC
bool
select IRQ_DOMAIN
@@ -160,6 +179,7 @@ config TS4800_IRQ
tristate "TS-4800 IRQ controller"
select IRQ_DOMAIN
depends on HAS_IOMEM
+ depends on SOC_IMX51 || COMPILE_TEST
help
Support for the TS-4800 FPGA IRQ controller
@@ -193,6 +213,8 @@ config KEYSTONE_IRQ
config MIPS_GIC
bool
+ select GENERIC_IRQ_IPI
+ select IRQ_DOMAIN_HIERARCHY
select MIPS_CM
config INGENIC_IRQ
@@ -218,3 +240,7 @@ config IRQ_MXS
def_bool y if MACH_ASM9260 || ARCH_MXS
select IRQ_DOMAIN
select STMP_DEVICE
+
+config MVEBU_ODMI
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 18caacb60d58..b03cfcbbac6b 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,11 +1,13 @@
obj-$(CONFIG_IRQCHIP) += irqchip.o
+obj-$(CONFIG_ALPINE_MSI) += irq-alpine-msi.o
+obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
+obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
-obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
@@ -28,6 +30,7 @@ obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-g
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
+obj-$(CONFIG_ARMADA_370_XP_IRQ) += irq-armada-370-xp.o
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
@@ -40,12 +43,14 @@ obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_ST_IRQCHIP) += irq-st.o
+obj-$(CONFIG_TANGO_IRQ) += irq-tango.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o
obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
@@ -59,3 +64,4 @@ obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o
obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
+obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
new file mode 100644
index 000000000000..25384255b30f
--- /dev/null
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -0,0 +1,293 @@
+/*
+ * Annapurna Labs MSIX support services
+ *
+ * Copyright (C) 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <asm/irq.h>
+#include <asm-generic/msi.h>
+
+/* MSIX message address format: local GIC target */
+#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
+
+struct alpine_msix_data {
+ spinlock_t msi_map_lock;
+ phys_addr_t addr;
+ u32 spi_first; /* The SGI number that MSIs start */
+ u32 num_spis; /* The number of SGIs for MSIs */
+ unsigned long *msi_map;
+};
+
+static void alpine_msix_mask_msi_irq(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void alpine_msix_unmask_msi_irq(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip alpine_msix_irq_chip = {
+ .name = "MSIx",
+ .irq_mask = alpine_msix_mask_msi_irq,
+ .irq_unmask = alpine_msix_unmask_msi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static int alpine_msix_allocate_sgi(struct alpine_msix_data *priv, int num_req)
+{
+ int first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ first = bitmap_find_next_zero_area(priv->msi_map, priv->num_spis, 0,
+ num_req, 0);
+ if (first >= priv->num_spis) {
+ spin_unlock(&priv->msi_map_lock);
+ return -ENOSPC;
+ }
+
+ bitmap_set(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+
+ return priv->spi_first + first;
+}
+
+static void alpine_msix_free_sgi(struct alpine_msix_data *priv, unsigned sgi,
+ int num_req)
+{
+ int first = sgi - priv->spi_first;
+
+ spin_lock(&priv->msi_map_lock);
+
+ bitmap_clear(priv->msi_map, first, num_req);
+
+ spin_unlock(&priv->msi_map_lock);
+}
+
+static void alpine_msix_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(data);
+ phys_addr_t msg_addr = priv->addr;
+
+ msg_addr |= (data->hwirq << 3);
+
+ msg->address_hi = upper_32_bits(msg_addr);
+ msg->address_lo = lower_32_bits(msg_addr);
+ msg->data = 0;
+}
+
+static struct msi_domain_info alpine_msix_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX,
+ .chip = &alpine_msix_irq_chip,
+};
+
+static struct irq_chip middle_irq_chip = {
+ .name = "alpine_msix_middle",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = alpine_msix_compose_msi_msg,
+};
+
+static int alpine_msix_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int sgi)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ if (!is_of_node(domain->parent->fwnode))
+ return -EINVAL;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = sgi;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ return 0;
+}
+
+static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct alpine_msix_data *priv = domain->host_data;
+ int sgi, err, i;
+
+ sgi = alpine_msix_allocate_sgi(priv, nr_irqs);
+ if (sgi < 0)
+ return sgi;
+
+ for (i = 0; i < nr_irqs; i++) {
+ err = alpine_msix_gic_domain_alloc(domain, virq + i, sgi + i);
+ if (err)
+ goto err_sgi;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, sgi + i,
+ &middle_irq_chip, priv);
+ }
+
+ return 0;
+
+err_sgi:
+ while (--i >= 0)
+ irq_domain_free_irqs_parent(domain, virq, i);
+ alpine_msix_free_sgi(priv, sgi, nr_irqs);
+ return err;
+}
+
+static void alpine_msix_middle_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct alpine_msix_data *priv = irq_data_get_irq_chip_data(d);
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ alpine_msix_free_sgi(priv, d->hwirq, nr_irqs);
+}
+
+static const struct irq_domain_ops alpine_msix_middle_domain_ops = {
+ .alloc = alpine_msix_middle_domain_alloc,
+ .free = alpine_msix_middle_domain_free,
+};
+
+static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+ struct device_node *node)
+{
+ struct irq_domain *middle_domain, *msi_domain, *gic_domain;
+ struct device_node *gic_node;
+
+ gic_node = of_irq_find_parent(node);
+ if (!gic_node) {
+ pr_err("Failed to find the GIC node\n");
+ return -ENODEV;
+ }
+
+ gic_domain = irq_find_host(gic_node);
+ if (!gic_domain) {
+ pr_err("Failed to find the GIC domain\n");
+ return -ENXIO;
+ }
+
+ middle_domain = irq_domain_add_tree(NULL,
+ &alpine_msix_middle_domain_ops,
+ priv);
+ if (!middle_domain) {
+ pr_err("Failed to create the MSIX middle domain\n");
+ return -ENOMEM;
+ }
+
+ middle_domain->parent = gic_domain;
+
+ msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &alpine_msix_domain_info,
+ middle_domain);
+ if (!msi_domain) {
+ pr_err("Failed to create MSI domain\n");
+ irq_domain_remove(middle_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int alpine_msix_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct alpine_msix_data *priv;
+ struct resource res;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->msi_map_lock);
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Failed to allocate resource\n");
+ goto err_priv;
+ }
+
+ /*
+ * The 20 least significant bits of addr provide direct information
+ * regarding the interrupt destination.
+ *
+ * To select the primary GIC as the target GIC, bits [18:17] must be set
+ * to 0x0. In this case, bit 16 (SPI_TARGET_CLUSTER0) must be set.
+ */
+ priv->addr = res.start & GENMASK_ULL(63,20);
+ priv->addr |= ALPINE_MSIX_SPI_TARGET_CLUSTER0;
+
+ if (of_property_read_u32(node, "al,msi-base-spi", &priv->spi_first)) {
+ pr_err("Unable to parse MSI base\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ if (of_property_read_u32(node, "al,msi-num-spis", &priv->num_spis)) {
+ pr_err("Unable to parse MSI numbers\n");
+ ret = -EINVAL;
+ goto err_priv;
+ }
+
+ priv->msi_map = kzalloc(sizeof(*priv->msi_map) * BITS_TO_LONGS(priv->num_spis),
+ GFP_KERNEL);
+ if (!priv->msi_map) {
+ ret = -ENOMEM;
+ goto err_priv;
+ }
+
+ pr_debug("Registering %d msixs, starting at %d\n",
+ priv->num_spis, priv->spi_first);
+
+ ret = alpine_msix_init_domains(priv, node);
+ if (ret)
+ goto err_map;
+
+ return 0;
+
+err_map:
+ kfree(priv->msi_map);
+err_priv:
+ kfree(priv);
+ return ret;
+}
+IRQCHIP_DECLARE(alpine_msix, "al,alpine-msix", alpine_msix_init);
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3f3a8c3d2175..e7dc6cbda2a1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -71,6 +71,7 @@ static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
+static struct irq_domain *armada_370_xp_msi_inner_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
@@ -115,127 +116,102 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
#ifdef CONFIG_PCI_MSI
-static int armada_370_xp_alloc_msi(void)
-{
- int hwirq;
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
- mutex_lock(&msi_used_lock);
- hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
- if (hwirq >= PCI_MSI_DOORBELL_NR)
- hwirq = -ENOSPC;
- else
- set_bit(hwirq, msi_used);
- mutex_unlock(&msi_used_lock);
+static struct msi_domain_info armada_370_xp_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &armada_370_xp_msi_irq_chip,
+};
- return hwirq;
+static void armada_370_xp_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ msg->address_lo = lower_32_bits(msi_doorbell_addr);
+ msg->address_hi = upper_32_bits(msi_doorbell_addr);
+ msg->data = 0xf00 | (data->hwirq + PCI_MSI_DOORBELL_START);
}
-static void armada_370_xp_free_msi(int hwirq)
+static int armada_370_xp_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
{
- mutex_lock(&msi_used_lock);
- if (!test_bit(hwirq, msi_used))
- pr_err("trying to free unused MSI#%d\n", hwirq);
- else
- clear_bit(hwirq, msi_used);
- mutex_unlock(&msi_used_lock);
+ return -EINVAL;
}
-static int armada_370_xp_setup_msi_irq(struct msi_controller *chip,
- struct pci_dev *pdev,
- struct msi_desc *desc)
-{
- struct msi_msg msg;
- int virq, hwirq;
+static struct irq_chip armada_370_xp_msi_bottom_irq_chip = {
+ .name = "MPIC MSI",
+ .irq_compose_msi_msg = armada_370_xp_compose_msi_msg,
+ .irq_set_affinity = armada_370_xp_msi_set_affinity,
+};
- /* We support MSI, but not MSI-X */
- if (desc->msi_attrib.is_msix)
- return -EINVAL;
+static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ int hwirq, i;
- hwirq = armada_370_xp_alloc_msi();
- if (hwirq < 0)
- return hwirq;
+ mutex_lock(&msi_used_lock);
- virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
- if (!virq) {
- armada_370_xp_free_msi(hwirq);
- return -EINVAL;
+ hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
+ 0, nr_irqs, 0);
+ if (hwirq >= PCI_MSI_DOORBELL_NR) {
+ mutex_unlock(&msi_used_lock);
+ return -ENOSPC;
}
- irq_set_msi_desc(virq, desc);
-
- msg.address_lo = msi_doorbell_addr;
- msg.address_hi = 0;
- msg.data = 0xf00 | (hwirq + 16);
-
- pci_write_msi_msg(virq, &msg);
- return 0;
-}
+ bitmap_set(msi_used, hwirq, nr_irqs);
+ mutex_unlock(&msi_used_lock);
-static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip,
- unsigned int irq)
-{
- struct irq_data *d = irq_get_irq_data(irq);
- unsigned long hwirq = d->hwirq;
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &armada_370_xp_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
- irq_dispose_mapping(irq);
- armada_370_xp_free_msi(hwirq);
+ return hwirq;
}
-static struct irq_chip armada_370_xp_msi_irq_chip = {
- .name = "armada_370_xp_msi_irq",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hw)
+static void armada_370_xp_msi_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
- handle_simple_irq);
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- return 0;
+ mutex_lock(&msi_used_lock);
+ bitmap_clear(msi_used, d->hwirq, nr_irqs);
+ mutex_unlock(&msi_used_lock);
}
-static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
- .map = armada_370_xp_msi_map,
+static const struct irq_domain_ops armada_370_xp_msi_domain_ops = {
+ .alloc = armada_370_xp_msi_alloc,
+ .free = armada_370_xp_msi_free,
};
static int armada_370_xp_msi_init(struct device_node *node,
phys_addr_t main_int_phys_base)
{
- struct msi_controller *msi_chip;
u32 reg;
- int ret;
msi_doorbell_addr = main_int_phys_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS;
- msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
- if (!msi_chip)
+ armada_370_xp_msi_inner_domain =
+ irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+ &armada_370_xp_msi_domain_ops, NULL);
+ if (!armada_370_xp_msi_inner_domain)
return -ENOMEM;
- msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
- msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
- msi_chip->of_node = node;
-
armada_370_xp_msi_domain =
- irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
- &armada_370_xp_msi_irq_ops,
- NULL);
+ pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ &armada_370_xp_msi_domain_info,
+ armada_370_xp_msi_inner_domain);
if (!armada_370_xp_msi_domain) {
- kfree(msi_chip);
+ irq_domain_remove(armada_370_xp_msi_inner_domain);
return -ENOMEM;
}
- ret = of_pci_msi_chip_add(msi_chip);
- if (ret < 0) {
- irq_domain_remove(armada_370_xp_msi_domain);
- kfree(msi_chip);
- return ret;
- }
-
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
| PCI_MSI_DOORBELL_MASK;
@@ -280,7 +256,7 @@ static int armada_xp_set_affinity(struct irq_data *d,
#endif
static struct irq_chip armada_370_xp_irq_chip = {
- .name = "armada_370_xp_irq",
+ .name = "MPIC",
.irq_mask = armada_370_xp_irq_mask,
.irq_mask_ack = armada_370_xp_irq_mask,
.irq_unmask = armada_370_xp_irq_unmask,
@@ -427,12 +403,12 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
continue;
if (is_chained) {
- irq = irq_find_mapping(armada_370_xp_msi_domain,
- msinr - 16);
+ irq = irq_find_mapping(armada_370_xp_msi_inner_domain,
+ msinr - PCI_MSI_DOORBELL_START);
generic_handle_irq(irq);
} else {
- irq = msinr - 16;
- handle_domain_irq(armada_370_xp_msi_domain,
+ irq = msinr - PCI_MSI_DOORBELL_START;
+ handle_domain_irq(armada_370_xp_msi_inner_domain,
irq, regs);
}
}
@@ -604,8 +580,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
armada_370_xp_mpic_domain =
irq_domain_add_linear(node, nr_irqs,
&armada_370_xp_mpic_irq_ops, NULL);
-
BUG_ON(!armada_370_xp_mpic_domain);
+ armada_370_xp_mpic_domain->bus_token = DOMAIN_BUS_WIRED;
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
diff --git a/drivers/irqchip/irq-ath79-cpu.c b/drivers/irqchip/irq-ath79-cpu.c
new file mode 100644
index 000000000000..befe93c5a51a
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-cpu.c
@@ -0,0 +1,97 @@
+/*
+ * Atheros AR71xx/AR724x/AR913x specific interrupt handling
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mach-ath79/ath79.h>
+
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ *
+ * This array map the interrupt lines to the DDR write buffer channels.
+ */
+
+static unsigned irq_wb_chan[8] = {
+ -1, -1, -1, -1, -1, -1, -1, -1,
+};
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+ int irq;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
+
+ pending >>= CAUSEB_IP;
+ while (pending) {
+ irq = fls(pending) - 1;
+ if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1)
+ ath79_ddr_wb_flush(irq_wb_chan[irq]);
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+ pending &= ~BIT(irq);
+ }
+}
+
+static int __init ar79_cpu_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ int err, i, count;
+
+ /* Fill the irq_wb_chan table */
+ count = of_count_phandle_with_args(
+ node, "qca,ddr-wb-channels", "#qca,ddr-wb-channel-cells");
+
+ for (i = 0; i < count; i++) {
+ struct of_phandle_args args;
+ u32 irq = i;
+
+ of_property_read_u32_index(
+ node, "qca,ddr-wb-channel-interrupts", i, &irq);
+ if (irq >= ARRAY_SIZE(irq_wb_chan))
+ continue;
+
+ err = of_parse_phandle_with_args(
+ node, "qca,ddr-wb-channels",
+ "#qca,ddr-wb-channel-cells",
+ i, &args);
+ if (err)
+ return err;
+
+ irq_wb_chan[irq] = args.args[0];
+ }
+
+ return mips_cpu_irq_of_init(node, parent);
+}
+IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
+ ar79_cpu_intc_of_init);
+
+void __init ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3)
+{
+ irq_wb_chan[2] = irq_wb_chan2;
+ irq_wb_chan[3] = irq_wb_chan3;
+ mips_cpu_irq_init();
+}
diff --git a/drivers/irqchip/irq-ath79-misc.c b/drivers/irqchip/irq-ath79-misc.c
new file mode 100644
index 000000000000..aa7290784636
--- /dev/null
+++ b/drivers/irqchip/irq-ath79-misc.c
@@ -0,0 +1,189 @@
+/*
+ * Atheros AR71xx/AR724x/AR913x MISC interrupt controller
+ *
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define AR71XX_RESET_REG_MISC_INT_STATUS 0
+#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
+
+#define ATH79_MISC_IRQ_COUNT 32
+
+static void ath79_misc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *base = domain->host_data;
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ if (!pending) {
+ spurious_interrupt();
+ chained_irq_exit(chip, desc);
+ return;
+ }
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_linear_revmap(domain, bit));
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ar71xx_misc_irq_unmask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t | BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar71xx_misc_irq_mask(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+}
+
+static void ar724x_misc_irq_ack(struct irq_data *d)
+{
+ void __iomem *base = irq_data_get_irq_chip_data(d);
+ unsigned int irq = d->hwirq;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+ __raw_writel(t & ~BIT(irq), base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
+}
+
+static struct irq_chip ath79_misc_irq_chip = {
+ .name = "MISC",
+ .irq_unmask = ar71xx_misc_irq_unmask,
+ .irq_mask = ar71xx_misc_irq_mask,
+};
+
+static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops misc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = misc_map,
+};
+
+static void __init ath79_misc_intc_domain_init(
+ struct irq_domain *domain, int irq)
+{
+ void __iomem *base = domain->host_data;
+
+ /* Disable and clear all interrupts */
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
+
+ irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
+
+static int __init ath79_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ void __iomem *base;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ pr_err("Failed to get MISC IRQ\n");
+ return -EINVAL;
+ }
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("Failed to get MISC IRQ registers\n");
+ return -ENOMEM;
+ }
+
+ domain = irq_domain_add_linear(node, ATH79_MISC_IRQ_COUNT,
+ &misc_irq_domain_ops, base);
+ if (!domain) {
+ pr_err("Failed to add MISC irqdomain\n");
+ return -EINVAL;
+ }
+
+ ath79_misc_intc_domain_init(domain, irq);
+ return 0;
+}
+
+static int __init ar7100_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+ ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+ struct device_node *node, struct device_node *parent)
+{
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+ return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+ ar7240_misc_intc_of_init);
+
+void __init ath79_misc_irq_init(void __iomem *regs, int irq,
+ int irq_base, bool is_ar71xx)
+{
+ struct irq_domain *domain;
+
+ if (is_ar71xx)
+ ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+ else
+ ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+
+ domain = irq_domain_add_legacy(NULL, ATH79_MISC_IRQ_COUNT,
+ irq_base, 0, &misc_irq_domain_ops, regs);
+ if (!domain)
+ panic("Failed to create MISC irqdomain");
+
+ ath79_misc_intc_domain_init(domain, irq);
+}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 37199b9b2cfa..28b26c80f4cf 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -80,16 +80,10 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return 0;
}
-int aic_common_set_priority(int priority, unsigned *val)
+void aic_common_set_priority(int priority, unsigned *val)
{
- if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
- priority > AT91_AIC_IRQ_MAX_PRIORITY)
- return -EINVAL;
-
*val &= ~AT91_AIC_PRIOR;
*val |= priority;
-
- return 0;
}
int aic_common_irq_domain_xlate(struct irq_domain *d,
@@ -193,7 +187,7 @@ void __init aic_common_rtt_irq_fixup(struct device_node *root)
}
}
-void __init aic_common_irq_fixup(const struct of_device_id *matches)
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
{
struct device_node *root = of_find_node_by_path("/");
const struct of_device_id *match;
@@ -214,7 +208,8 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
struct irq_domain *__init aic_common_of_init(struct device_node *node,
const struct irq_domain_ops *ops,
- const char *name, int nirqs)
+ const char *name, int nirqs,
+ const struct of_device_id *matches)
{
struct irq_chip_generic *gc;
struct irq_domain *domain;
@@ -264,6 +259,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
}
aic_common_ext_irq_of_init(domain);
+ aic_common_irq_fixup(matches);
return domain;
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index 603f0a9d5411..af60376d50de 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -19,7 +19,7 @@
int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
-int aic_common_set_priority(int priority, unsigned *val);
+void aic_common_set_priority(int priority, unsigned *val);
int aic_common_irq_domain_xlate(struct irq_domain *d,
struct device_node *ctrlr,
@@ -30,12 +30,11 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
struct irq_domain *__init aic_common_of_init(struct device_node *node,
const struct irq_domain_ops *ops,
- const char *name, int nirqs);
+ const char *name, int nirqs,
+ const struct of_device_id *matches);
void __init aic_common_rtc_irq_fixup(struct device_node *root);
void __init aic_common_rtt_irq_fixup(struct device_node *root);
-void __init aic_common_irq_fixup(const struct of_device_id *matches);
-
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 8a0c7f288198..112e17c2768b 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -196,9 +196,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
irq_gc_lock(gc);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
- ret = aic_common_set_priority(intspec[2], &smr);
- if (!ret)
- irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
irq_gc_unlock(gc);
return ret;
@@ -248,12 +247,10 @@ static int __init aic_of_init(struct device_node *node,
return -EEXIST;
domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
- NR_AIC_IRQS);
+ NR_AIC_IRQS, aic_irq_fixups);
if (IS_ERR(domain))
return PTR_ERR(domain);
- aic_common_irq_fixup(aic_irq_fixups);
-
aic_domain = domain;
gc = irq_get_domain_generic_chip(domain, 0);
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 62bb840c613f..4f0d068e1abe 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -272,9 +272,8 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
irq_gc_lock(bgc);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
- ret = aic_common_set_priority(intspec[2], &smr);
- if (!ret)
- irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
+ aic_common_set_priority(intspec[2], &smr);
+ irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
irq_gc_unlock(bgc);
return ret;
@@ -312,12 +311,10 @@ static int __init aic5_of_init(struct device_node *node,
return -EEXIST;
domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
- nirqs);
+ nirqs, aic5_irq_fixups);
if (IS_ERR(domain))
return PTR_ERR(domain);
- aic_common_irq_fixup(aic5_irq_fixups);
-
aic5_domain = domain;
nchips = aic5_domain->revmap_size / 32;
for (i = 0; i < nchips; i++) {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 963065a0d774..b6e950d4782a 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -229,7 +229,6 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
unsigned long secondary_startup_phys =
(unsigned long)virt_to_phys((void *)secondary_startup);
- dsb();
writel(secondary_startup_phys,
intc.base + LOCAL_MAILBOX3_SET0 + 16 * cpu);
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644
index 000000000000..b844c89a9506
--- /dev/null
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -0,0 +1,364 @@
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
+ * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
+ * 0x1000_0030: CPU1_W0_ENABLE
+ * 0x1000_0034: CPU1_W1_ENABLE
+ * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
+ * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
+ *
+ * BCM63168:
+ * 0x1000_0020: CPU0_W0_ENABLE
+ * 0x1000_0024: CPU0_W1_ENABLE
+ * 0x1000_0028: CPU0_W2_ENABLE
+ * 0x1000_002c: CPU0_W3_ENABLE
+ * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
+ * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
+ * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
+ * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
+ * 0x1000_0040: CPU1_W0_ENABLE
+ * 0x1000_0044: CPU1_W1_ENABLE
+ * 0x1000_0048: CPU1_W2_ENABLE
+ * 0x1000_004c: CPU1_W3_ENABLE
+ * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
+ * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
+ * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
+ * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD 32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+ raw_spinlock_t lock;
+ unsigned int n_words;
+ struct irq_domain *domain;
+ struct cpumask cpumask;
+ struct bcm6345_l1_cpu *cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+ void __iomem *map_base;
+ unsigned int parent_irq;
+ u32 enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+ unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+ return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+ return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+ struct irq_data *d)
+{
+ return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+ struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+ struct bcm6345_l1_cpu *cpu;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int idx;
+
+#ifdef CONFIG_SMP
+ cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+ cpu = intc->cpus[0];
+#endif
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < intc->n_words; idx++) {
+ int base = idx * IRQS_PER_WORD;
+ unsigned long pending;
+ irq_hw_number_t hwirq;
+ unsigned int irq;
+
+ pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+ pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+ for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+ irq = irq_linear_revmap(intc->domain, base + hwirq);
+ if (irq)
+ do_IRQ(irq);
+ else
+ spurious_interrupt();
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+ intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+ __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+ intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_unmask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ __bcm6345_l1_mask(d);
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+ const struct cpumask *dest,
+ bool force)
+{
+ struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+ u32 word = d->hwirq / IRQS_PER_WORD;
+ u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+ unsigned int old_cpu = cpu_for_irq(intc, d);
+ unsigned int new_cpu;
+ struct cpumask valid;
+ unsigned long flags;
+ bool enabled;
+
+ if (!cpumask_and(&valid, &intc->cpumask, dest))
+ return -EINVAL;
+
+ new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+ if (new_cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ dest = cpumask_of(new_cpu);
+
+ raw_spin_lock_irqsave(&intc->lock, flags);
+ if (old_cpu != new_cpu) {
+ enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+ if (enabled)
+ __bcm6345_l1_mask(d);
+ cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ if (enabled)
+ __bcm6345_l1_unmask(d);
+ } else {
+ cpumask_copy(irq_data_get_affinity_mask(d), dest);
+ }
+ raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+ return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+ unsigned int idx,
+ struct bcm6345_l1_chip *intc)
+{
+ struct resource res;
+ resource_size_t sz;
+ struct bcm6345_l1_cpu *cpu;
+ unsigned int i, n_words;
+
+ if (of_address_to_resource(dn, idx, &res))
+ return -EINVAL;
+ sz = resource_size(&res);
+ n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+ if (!intc->n_words)
+ intc->n_words = n_words;
+ else if (intc->n_words != n_words)
+ return -EINVAL;
+
+ cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ GFP_KERNEL);
+ if (!cpu)
+ return -ENOMEM;
+
+ cpu->map_base = ioremap(res.start, sz);
+ if (!cpu->map_base)
+ return -ENOMEM;
+
+ for (i = 0; i < n_words; i++) {
+ cpu->enable_cache[i] = 0;
+ __raw_writel(0, cpu->map_base + reg_enable(intc, i));
+ }
+
+ cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+ if (!cpu->parent_irq) {
+ pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+ return -EINVAL;
+ }
+ irq_set_chained_handler_and_data(cpu->parent_irq,
+ bcm6345_l1_irq_handle, intc);
+
+ return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+ .name = "bcm6345-l1",
+ .irq_mask = bcm6345_l1_mask,
+ .irq_unmask = bcm6345_l1_unmask,
+ .irq_set_affinity = bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(virq,
+ &bcm6345_l1_irq_chip, handle_percpu_irq);
+ irq_set_chip_data(virq, d->host_data);
+ return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+ struct device_node *parent)
+{
+ struct bcm6345_l1_chip *intc;
+ unsigned int idx;
+ int ret;
+
+ intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+ if (!intc)
+ return -ENOMEM;
+
+ for_each_possible_cpu(idx) {
+ ret = bcm6345_l1_init_one(dn, idx, intc);
+ if (ret)
+ pr_err("failed to init intc L1 for cpu %d: %d\n",
+ idx, ret);
+ else
+ cpumask_set_cpu(idx, &intc->cpumask);
+ }
+
+ if (!cpumask_weight(&intc->cpumask)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ raw_spin_lock_init(&intc->lock);
+
+ intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+ &bcm6345_l1_domain_ops,
+ intc);
+ if (!intc->domain) {
+ ret = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+ IRQS_PER_WORD * intc->n_words);
+ for_each_cpu(idx, &intc->cpumask) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+ cpu->map_base, cpu->parent_irq);
+ }
+
+ return 0;
+
+out_unmap:
+ for_each_possible_cpu(idx) {
+ struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+ if (cpu) {
+ if (cpu->map_base)
+ iounmap(cpu->map_base);
+ kfree(cpu);
+ }
+ }
+out_free:
+ kfree(intc);
+ return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index aa46eb280a7f..54c296401525 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -10,7 +10,8 @@
#include <linux/irqchip/arm-gic.h>
#define REALVIEW_SYS_LOCK_OFFSET 0x20
-#define REALVIEW_PB11MP_SYS_PLD_CTRL1 0x74
+#define REALVIEW_SYS_PLD_CTRL1 0x74
+#define REALVIEW_EB_REVB_SYS_PLD_CTRL1 0xD8
#define VERSATILE_LOCK_VAL 0xA05F
#define PLD_INTMODE_MASK BIT(22)|BIT(23)|BIT(24)
#define PLD_INTMODE_LEGACY 0x0
@@ -18,26 +19,57 @@
#define PLD_INTMODE_NEW_NO_DCC BIT(23)
#define PLD_INTMODE_FIQ_ENABLE BIT(24)
+/* For some reason RealView EB Rev B moved this register */
+static const struct of_device_id syscon_pldset_of_match[] = {
+ {
+ .compatible = "arm,realview-eb11mp-revb-syscon",
+ .data = (void *)REALVIEW_EB_REVB_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb11mp-revc-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_SYS_PLD_CTRL1,
+ },
+ {},
+};
+
static int __init
realview_gic_of_init(struct device_node *node, struct device_node *parent)
{
static struct regmap *map;
+ struct device_node *np;
+ const struct of_device_id *gic_id;
+ u32 pld1_ctrl;
+
+ np = of_find_matching_node_and_match(NULL, syscon_pldset_of_match,
+ &gic_id);
+ if (!np)
+ return -ENODEV;
+ pld1_ctrl = (u32)gic_id->data;
/* The PB11MPCore GIC needs to be configured in the syscon */
- map = syscon_regmap_lookup_by_compatible("arm,realview-pb11mp-syscon");
+ map = syscon_node_to_regmap(np);
if (!IS_ERR(map)) {
/* new irq mode with no DCC */
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
- regmap_update_bits(map, REALVIEW_PB11MP_SYS_PLD_CTRL1,
+ regmap_update_bits(map, pld1_ctrl,
PLD_INTMODE_NEW_NO_DCC,
PLD_INTMODE_MASK);
regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000);
- pr_info("TC11MP GIC: set up interrupt controller to NEW mode, no DCC\n");
+ pr_info("RealView GIC: set up interrupt controller to NEW mode, no DCC\n");
} else {
- pr_err("TC11MP GIC setup: could not find syscon\n");
- return -ENXIO;
+ pr_err("RealView GIC setup: could not find syscon\n");
+ return -ENODEV;
}
return gic_of_init(node, parent);
}
IRQCHIP_DECLARE(armtc11mp_gic, "arm,tc11mp-gic", realview_gic_of_init);
+IRQCHIP_DECLARE(armeb11mp_gic, "arm,eb11mp-gic", realview_gic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index c779f83e511d..28f047c61baa 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -92,18 +92,6 @@ static struct msi_domain_info gicv2m_msi_domain_info = {
.chip = &gicv2m_msi_irq_chip,
};
-static int gicv2m_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- int ret;
-
- ret = irq_chip_set_affinity_parent(irq_data, mask, force);
- if (ret == IRQ_SET_MASK_OK)
- ret = IRQ_SET_MASK_OK_DONE;
-
- return ret;
-}
-
static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
@@ -122,7 +110,7 @@ static struct irq_chip gicv2m_irq_chip = {
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = gicv2m_set_affinity,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
.irq_compose_msi_msg = gicv2m_compose_msi_msg,
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 43dfd15c1dd2..39261798c59f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -103,7 +103,6 @@ struct its_device {
static LIST_HEAD(its_nodes);
static DEFINE_SPINLOCK(its_lock);
-static struct device_node *gic_root_node;
static struct rdists *gic_rdists;
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
@@ -671,7 +670,7 @@ static int its_chunk_to_lpi(int chunk)
return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
}
-static int its_lpi_init(u32 id_bits)
+static int __init its_lpi_init(u32 id_bits)
{
lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
@@ -1430,7 +1429,8 @@ static void its_enable_quirks(struct its_node *its)
gic_enable_quirks(iidr, its_quirks, its);
}
-static int its_probe(struct device_node *node, struct irq_domain *parent)
+static int __init its_probe(struct device_node *node,
+ struct irq_domain *parent)
{
struct resource res;
struct its_node *its;
@@ -1591,7 +1591,7 @@ static struct of_device_id its_device_id[] = {
{},
};
-int its_init(struct device_node *node, struct rdists *rdists,
+int __init its_init(struct device_node *node, struct rdists *rdists,
struct irq_domain *parent_domain)
{
struct device_node *np;
@@ -1607,8 +1607,6 @@ int its_init(struct device_node *node, struct rdists *rdists,
}
gic_rdists = rdists;
- gic_root_node = node;
-
its_alloc_lpi_tables();
its_lpi_init(rdists->id_bits);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d7be6ddc34f6..5b7d3c2129d8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,10 +15,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -38,6 +40,7 @@
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
+ bool single_redist;
};
struct gic_chip_data {
@@ -434,6 +437,9 @@ static int gic_populate_rdist(void)
return 0;
}
+ if (gic_data.redist_regions[i].single_redist)
+ break;
+
if (gic_data.redist_stride) {
ptr += gic_data.redist_stride;
} else {
@@ -634,7 +640,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
else
gic_dist_wait_for_rwp();
- return IRQ_SET_MASK_OK;
+ return IRQ_SET_MASK_OK_DONE;
}
#else
#define gic_set_affinity NULL
@@ -764,6 +770,15 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return 0;
}
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if(fwspec->param_count != 2)
+ return -EINVAL;
+
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
return -EINVAL;
}
@@ -811,17 +826,88 @@ static void gicv3_enable_quirks(void)
#endif
}
+static int __init gic_init_bases(void __iomem *dist_base,
+ struct redist_region *rdist_regs,
+ u32 nr_redist_regions,
+ u64 redist_stride,
+ struct fwnode_handle *handle)
+{
+ struct device_node *node;
+ u32 typer;
+ int gic_irqs;
+ int err;
+
+ if (!is_hyp_mode_available())
+ static_key_slow_dec(&supports_deactivate);
+
+ if (static_key_true(&supports_deactivate))
+ pr_info("GIC: Using split EOI/Deactivate mode\n");
+
+ gic_data.dist_base = dist_base;
+ gic_data.redist_regions = rdist_regs;
+ gic_data.nr_redist_regions = nr_redist_regions;
+ gic_data.redist_stride = redist_stride;
+
+ gicv3_enable_quirks();
+
+ /*
+ * Find out how many interrupts are supported.
+ * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ */
+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+ gic_irqs = GICD_TYPER_IRQS(typer);
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ gic_data.irq_nr = gic_irqs;
+
+ gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
+ &gic_data);
+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+
+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ set_handle_irq(gic_handle_irq);
+
+ node = to_of_node(handle);
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+ node) /* Temp hack to prevent ITS init for ACPI */
+ its_init(node, &gic_data.rdists, gic_data.domain);
+
+ gic_smp_init();
+ gic_dist_init();
+ gic_cpu_init();
+ gic_cpu_pm_init();
+
+ return 0;
+
+out_free:
+ if (gic_data.domain)
+ irq_domain_remove(gic_data.domain);
+ free_percpu(gic_data.rdists.rdist);
+ return err;
+}
+
+static int __init gic_validate_dist_version(void __iomem *dist_base)
+{
+ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+
+ if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
+ return -ENODEV;
+
+ return 0;
+}
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
struct redist_region *rdist_regs;
u64 redist_stride;
u32 nr_redist_regions;
- u32 typer;
- u32 reg;
- int gic_irqs;
- int err;
- int i;
+ int err, i;
dist_base = of_iomap(node, 0);
if (!dist_base) {
@@ -830,11 +916,10 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
return -ENXIO;
}
- reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
- if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+ err = gic_validate_dist_version(dist_base);
+ if (err) {
pr_err("%s: no distributor detected, giving up\n",
node->full_name);
- err = -ENODEV;
goto out_unmap_dist;
}
@@ -865,63 +950,229 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
- if (!is_hyp_mode_available())
- static_key_slow_dec(&supports_deactivate);
+ err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
+ redist_stride, &node->fwnode);
+ if (!err)
+ return 0;
- if (static_key_true(&supports_deactivate))
- pr_info("GIC: Using split EOI/Deactivate mode\n");
+out_unmap_rdist:
+ for (i = 0; i < nr_redist_regions; i++)
+ if (rdist_regs[i].redist_base)
+ iounmap(rdist_regs[i].redist_base);
+ kfree(rdist_regs);
+out_unmap_dist:
+ iounmap(dist_base);
+ return err;
+}
- gic_data.dist_base = dist_base;
- gic_data.redist_regions = rdist_regs;
- gic_data.nr_redist_regions = nr_redist_regions;
- gic_data.redist_stride = redist_stride;
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
- gicv3_enable_quirks();
+#ifdef CONFIG_ACPI
+static void __iomem *dist_base;
+static struct redist_region *redist_regs __initdata;
+static u32 nr_redist_regions __initdata;
+static bool single_redist;
+
+static void __init
+gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
+{
+ static int count = 0;
+
+ redist_regs[count].phys_base = phys_base;
+ redist_regs[count].redist_base = redist_base;
+ redist_regs[count].single_redist = single_redist;
+ count++;
+}
+
+static int __init
+gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_redistributor *redist =
+ (struct acpi_madt_generic_redistributor *)header;
+ void __iomem *redist_base;
+
+ redist_base = ioremap(redist->base_address, redist->length);
+ if (!redist_base) {
+ pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
+ return -ENOMEM;
+ }
+
+ gic_acpi_register_redist(redist->base_address, redist_base);
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+ u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
+ void __iomem *redist_base;
+
+ redist_base = ioremap(gicc->gicr_base_address, size);
+ if (!redist_base)
+ return -ENOMEM;
+
+ gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
+ return 0;
+}
+
+static int __init gic_acpi_collect_gicr_base(void)
+{
+ acpi_tbl_entry_handler redist_parser;
+ enum acpi_madt_type type;
+
+ if (single_redist) {
+ type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
+ redist_parser = gic_acpi_parse_madt_gicc;
+ } else {
+ type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
+ redist_parser = gic_acpi_parse_madt_redist;
+ }
+
+ /* Collect redistributor base addresses in GICR entries */
+ if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
+ return 0;
+
+ pr_info("No valid GICR entries exist\n");
+ return -ENODEV;
+}
+
+static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ /* Subtable presence means that redist exists, that's it */
+ return 0;
+}
+
+static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
/*
- * Find out how many interrupts are supported.
- * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+ * If GICC is enabled and has valid gicr base address, then it means
+ * GICR base is presented via GICC
*/
- typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
- gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
- gic_irqs = GICD_TYPER_IRQS(typer);
- if (gic_irqs > 1020)
- gic_irqs = 1020;
- gic_data.irq_nr = gic_irqs;
+ if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
+ return 0;
- gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
- &gic_data);
- gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
+ return -ENODEV;
+}
- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
+static int __init gic_acpi_count_gicr_regions(void)
+{
+ int count;
+
+ /*
+ * Count how many redistributor regions we have. It is not allowed
+ * to mix redistributor description, GICR and GICC subtables have to be
+ * mutually exclusive.
+ */
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
+ gic_acpi_match_gicr, 0);
+ if (count > 0) {
+ single_redist = false;
+ return count;
+ }
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_match_gicc, 0);
+ if (count > 0)
+ single_redist = true;
+
+ return count;
+}
+
+static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
+ struct acpi_probe_entry *ape)
+{
+ struct acpi_madt_generic_distributor *dist;
+ int count;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+ if (dist->version != ape->driver_data)
+ return false;
+
+ /* We need to do that exercise anyway, the sooner the better */
+ count = gic_acpi_count_gicr_regions();
+ if (count <= 0)
+ return false;
+
+ nr_redist_regions = count;
+ return true;
+}
+
+#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+
+static int __init
+gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+ struct fwnode_handle *domain_handle;
+ int i, err;
+
+ /* Get distributor base address */
+ dist = (struct acpi_madt_generic_distributor *)header;
+ dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
+ if (!dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ return -ENOMEM;
+ }
+
+ err = gic_validate_dist_version(dist_base);
+ if (err) {
+ pr_err("No distributor detected at @%p, giving up", dist_base);
+ goto out_dist_unmap;
+ }
+
+ redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
+ GFP_KERNEL);
+ if (!redist_regs) {
err = -ENOMEM;
- goto out_free;
+ goto out_dist_unmap;
}
- set_handle_irq(gic_handle_irq);
+ err = gic_acpi_collect_gicr_base();
+ if (err)
+ goto out_redist_unmap;
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_init(node, &gic_data.rdists, gic_data.domain);
+ domain_handle = irq_domain_alloc_fwnode(dist_base);
+ if (!domain_handle) {
+ err = -ENOMEM;
+ goto out_redist_unmap;
+ }
- gic_smp_init();
- gic_dist_init();
- gic_cpu_init();
- gic_cpu_pm_init();
+ err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
+ domain_handle);
+ if (err)
+ goto out_fwhandle_free;
+ acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
return 0;
-out_free:
- if (gic_data.domain)
- irq_domain_remove(gic_data.domain);
- free_percpu(gic_data.rdists.rdist);
-out_unmap_rdist:
+out_fwhandle_free:
+ irq_domain_free_fwnode(domain_handle);
+out_redist_unmap:
for (i = 0; i < nr_redist_regions; i++)
- if (rdist_regs[i].redist_base)
- iounmap(rdist_regs[i].redist_base);
- kfree(rdist_regs);
-out_unmap_dist:
+ if (redist_regs[i].redist_base)
+ iounmap(redist_regs[i].redist_base);
+ kfree(redist_regs);
+out_dist_unmap:
iounmap(dist_base);
return err;
}
-
-IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
+IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
+ gic_acpi_init);
+IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
+ acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
+ gic_acpi_init);
+#endif
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8f9ebf714e2b..282344b95ec2 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -319,7 +319,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writel_relaxed(val | bit, reg);
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
- return IRQ_SET_MASK_OK;
+ return IRQ_SET_MASK_OK_DONE;
}
#endif
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 4dd3eb8a40b3..d67baa231c13 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -239,8 +239,11 @@ static struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_device_probe(struct platform_device *pdev)
{
struct mbigen_device *mgn_chip;
- struct resource *res;
+ struct platform_device *child;
struct irq_domain *domain;
+ struct device_node *np;
+ struct device *parent;
+ struct resource *res;
u32 num_pins;
mgn_chip = devm_kzalloc(&pdev->dev, sizeof(*mgn_chip), GFP_KERNEL);
@@ -254,23 +257,30 @@ static int mbigen_device_probe(struct platform_device *pdev)
if (IS_ERR(mgn_chip->base))
return PTR_ERR(mgn_chip->base);
- if (of_property_read_u32(pdev->dev.of_node, "num-pins", &num_pins) < 0) {
- dev_err(&pdev->dev, "No num-pins property\n");
- return -EINVAL;
- }
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_property_read_bool(np, "interrupt-controller"))
+ continue;
- domain = platform_msi_create_device_domain(&pdev->dev, num_pins,
- mbigen_write_msg,
- &mbigen_domain_ops,
- mgn_chip);
+ parent = platform_bus_type.dev_root;
+ child = of_platform_device_create(np, NULL, parent);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- if (!domain)
- return -ENOMEM;
+ if (of_property_read_u32(child->dev.of_node, "num-pins",
+ &num_pins) < 0) {
+ dev_err(&pdev->dev, "No num-pins property\n");
+ return -EINVAL;
+ }
+
+ domain = platform_msi_create_device_domain(&child->dev, num_pins,
+ mbigen_write_msg,
+ &mbigen_domain_ops,
+ mgn_chip);
+ if (!domain)
+ return -ENOMEM;
+ }
platform_set_drvdata(pdev, mgn_chip);
-
- dev_info(&pdev->dev, "Allocated %d MSIs\n", num_pins);
-
return 0;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..94a30da0cfac 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -29,16 +29,32 @@ struct gic_pcpu_mask {
DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
};
+struct gic_irq_spec {
+ enum {
+ GIC_DEVICE,
+ GIC_IPI
+ } type;
+
+ union {
+ struct cpumask *ipimask;
+ unsigned int hwirq;
+ };
+};
+
static unsigned long __gic_base_addr;
+
static void __iomem *gic_base;
static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
+static struct irq_domain *gic_dev_domain;
+static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
static int gic_vpes;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
+DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
static void __gic_irq_dispatch(void);
@@ -264,9 +280,11 @@ static void gic_bind_eic_interrupt(int irq, int set)
GIC_VPE_EIC_SS(irq), set);
}
-void gic_send_ipi(unsigned int intr)
+static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
{
- gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
+ irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
+
+ gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
}
int gic_get_c0_compare_int(void)
@@ -449,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
- for (i = 0; i < NR_CPUS; i++)
+ for (i = 0; i < gic_vpes; i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
@@ -479,6 +497,7 @@ static struct irq_chip gic_edge_irq_controller = {
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
+ .ipi_send_single = gic_send_ipi,
};
static void gic_handle_local_int(bool chained)
@@ -572,83 +591,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
gic_handle_shared_int(true);
}
-#ifdef CONFIG_MIPS_GIC_IPI
-static int gic_resched_int_base;
-static int gic_call_int_base;
-
-unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
-{
- return gic_resched_int_base + cpu;
-}
-
-unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
-{
- return gic_call_int_base + cpu;
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI call"
-};
-
-static __init void gic_ipi_init_one(unsigned int intr, int cpu,
- struct irqaction *action)
-{
- int virq = irq_create_mapping(gic_irq_domain,
- GIC_SHARED_TO_HWIRQ(intr));
- int i;
-
- gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
- for (i = 0; i < NR_CPUS; i++)
- clear_bit(intr, pcpu_masks[i].pcpu_mask);
- set_bit(intr, pcpu_masks[cpu].pcpu_mask);
-
- irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
-
- irq_set_handler(virq, handle_percpu_irq);
- setup_irq(virq, action);
-}
-
-static __init void gic_ipi_init(void)
-{
- int i;
-
- /* Use last 2 * NR_CPUS interrupts as IPIs */
- gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
- gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
- gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
- }
-}
-#else
-static inline void gic_ipi_init(void)
-{
-}
-#endif
-
static void __init gic_basic_init(void)
{
unsigned int i;
@@ -753,19 +695,21 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
}
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
+ irq_hw_number_t hw, unsigned int vpe)
{
int intr = GIC_HWIRQ_TO_SHARED(hw);
unsigned long flags;
+ int i;
irq_set_chip_and_handler(virq, &gic_level_irq_controller,
handle_level_irq);
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- /* Map to VPE 0 by default */
- gic_map_to_vpe(intr, 0);
- set_bit(intr, pcpu_masks[0].pcpu_mask);
+ gic_map_to_vpe(intr, vpe);
+ for (i = 0; i < gic_vpes; i++)
+ clear_bit(intr, pcpu_masks[i].pcpu_mask);
+ set_bit(intr, pcpu_masks[vpe].pcpu_mask);
spin_unlock_irqrestore(&gic_lock, flags);
return 0;
@@ -776,10 +720,93 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
{
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
return gic_local_irq_domain_map(d, virq, hw);
- return gic_shared_irq_domain_map(d, virq, hw);
+ return gic_shared_irq_domain_map(d, virq, hw, 0);
}
-static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct gic_irq_spec *spec = arg;
+ irq_hw_number_t hwirq, base_hwirq;
+ int cpu, ret, i;
+
+ if (spec->type == GIC_DEVICE) {
+ /* verify that it doesn't conflict with an IPI irq */
+ if (test_bit(spec->hwirq, ipi_resrv))
+ return -EBUSY;
+ } else {
+ base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
+ if (base_hwirq == gic_shared_intrs) {
+ return -ENOMEM;
+ }
+
+ /* check that we have enough space */
+ for (i = base_hwirq; i < nr_irqs; i++) {
+ if (!test_bit(i, ipi_resrv))
+ return -EBUSY;
+ }
+ bitmap_clear(ipi_resrv, base_hwirq, nr_irqs);
+
+ /* map the hwirq for each cpu consecutively */
+ i = 0;
+ for_each_cpu(cpu, spec->ipimask) {
+ hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
+
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
+ if (ret)
+ goto error;
+
+ i++;
+ }
+
+ /*
+ * tell the parent about the base hwirq we allocated so it can
+ * set its own domain data
+ */
+ spec->hwirq = base_hwirq;
+ }
+
+ return 0;
+error:
+ bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+ return ret;
+}
+
+void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_hw_number_t base_hwirq;
+ struct irq_data *data;
+
+ data = irq_get_irq_data(virq);
+ if (!data)
+ return;
+
+ base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
+ bitmap_set(ipi_resrv, base_hwirq, nr_irqs);
+}
+
+int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ /* this domain should'nt be accessed directly */
+ return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .map = gic_irq_domain_map,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
+ .match = gic_irq_domain_match,
+};
+
+static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_type)
@@ -798,9 +825,130 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
return 0;
}
-static const struct irq_domain_ops gic_irq_domain_ops = {
- .map = gic_irq_domain_map,
- .xlate = gic_irq_domain_xlate,
+static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ struct gic_irq_spec spec = {
+ .type = GIC_DEVICE,
+ .hwirq = fwspec->param[1],
+ };
+ int i, ret;
+ bool is_shared = fwspec->param[0] == GIC_SHARED;
+
+ if (is_shared) {
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_hw_number_t hwirq;
+
+ if (is_shared)
+ hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
+ else
+ hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
+
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+ hwirq,
+ &gic_level_irq_controller,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ /* no real allocation is done for dev irqs, so no need to free anything */
+ return;
+}
+
+static struct irq_domain_ops gic_dev_domain_ops = {
+ .xlate = gic_dev_domain_xlate,
+ .alloc = gic_dev_domain_alloc,
+ .free = gic_dev_domain_free,
+};
+
+static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ /*
+ * There's nothing to translate here. hwirq is dynamically allocated and
+ * the irq type is always edge triggered.
+ * */
+ *out_hwirq = 0;
+ *out_type = IRQ_TYPE_EDGE_RISING;
+
+ return 0;
+}
+
+static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct cpumask *ipimask = arg;
+ struct gic_irq_spec spec = {
+ .type = GIC_IPI,
+ .ipimask = ipimask
+ };
+ int ret, i;
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
+ if (ret)
+ return ret;
+
+ /* the parent should have set spec.hwirq to the base_hwirq it allocated */
+ for (i = 0; i < nr_irqs; i++) {
+ ret = irq_domain_set_hwirq_and_chip(d, virq + i,
+ GIC_SHARED_TO_HWIRQ(spec.hwirq + i),
+ &gic_edge_irq_controller,
+ NULL);
+ if (ret)
+ goto error;
+
+ ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+ return ret;
+}
+
+void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ irq_domain_free_irqs_parent(d, virq, nr_irqs);
+}
+
+int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
+{
+ bool is_ipi;
+
+ switch (bus_token) {
+ case DOMAIN_BUS_IPI:
+ is_ipi = d->bus_token == bus_token;
+ return to_of_node(d->fwnode) == node && is_ipi;
+ break;
+ default:
+ return 0;
+ }
+}
+
+static struct irq_domain_ops gic_ipi_domain_ops = {
+ .xlate = gic_ipi_domain_xlate,
+ .alloc = gic_ipi_domain_alloc,
+ .free = gic_ipi_domain_free,
+ .match = gic_ipi_domain_match,
};
static void __init __gic_init(unsigned long gic_base_addr,
@@ -809,6 +957,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
struct device_node *node)
{
unsigned int gicconfig;
+ unsigned int v[2];
__gic_base_addr = gic_base_addr;
@@ -864,9 +1013,32 @@ static void __init __gic_init(unsigned long gic_base_addr,
if (!gic_irq_domain)
panic("Failed to add GIC IRQ domain");
- gic_basic_init();
+ gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_dev_domain_ops, NULL);
+ if (!gic_dev_domain)
+ panic("Failed to add GIC DEV domain");
+
+ gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU,
+ GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
+ node, &gic_ipi_domain_ops, NULL);
+ if (!gic_ipi_domain)
+ panic("Failed to add GIC IPI domain");
- gic_ipi_init();
+ gic_ipi_domain->bus_token = DOMAIN_BUS_IPI;
+
+ if (node &&
+ !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
+ bitmap_set(ipi_resrv, v[0], v[1]);
+ } else {
+ /* Make the last 2 * gic_vpes available for IPIs */
+ bitmap_set(ipi_resrv,
+ gic_shared_intrs - 2 * gic_vpes,
+ 2 * gic_vpes);
+ }
+
+ gic_basic_init();
}
void __init gic_init(unsigned long gic_base_addr,
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
new file mode 100644
index 000000000000..b4d367868dbb
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "GIC-ODMI: " fmt
+
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define GICP_ODMIN_SET 0x40
+#define GICP_ODMI_INT_NUM_SHIFT 12
+#define GICP_ODMIN_GM_EP_R0 0x110
+#define GICP_ODMIN_GM_EP_R1 0x114
+#define GICP_ODMIN_GM_EA_R0 0x108
+#define GICP_ODMIN_GM_EA_R1 0x118
+
+/*
+ * We don't support the group events, so we simply have 8 interrupts
+ * per frame.
+ */
+#define NODMIS_SHIFT 3
+#define NODMIS_PER_FRAME (1 << NODMIS_SHIFT)
+#define NODMIS_MASK (NODMIS_PER_FRAME - 1)
+
+struct odmi_data {
+ struct resource res;
+ void __iomem *base;
+ unsigned int spi_base;
+};
+
+static struct odmi_data *odmis;
+static unsigned long *odmis_bm;
+static unsigned int odmis_count;
+
+/* Protects odmis_bm */
+static DEFINE_SPINLOCK(odmis_bm_lock);
+
+static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct odmi_data *odmi;
+ phys_addr_t addr;
+ unsigned int odmin;
+
+ if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
+ return;
+
+ odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
+ odmin = d->hwirq & NODMIS_MASK;
+
+ addr = odmi->res.start + GICP_ODMIN_SET;
+
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
+ msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
+}
+
+static struct irq_chip odmi_irq_chip = {
+ .name = "ODMI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = odmi_compose_msi_msg,
+};
+
+static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct odmi_data *odmi = NULL;
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ unsigned int hwirq, odmin;
+ int ret;
+
+ spin_lock(&odmis_bm_lock);
+ hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
+ if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
+ spin_unlock(&odmis_bm_lock);
+ return -ENOSPC;
+ }
+
+ __set_bit(hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+
+ odmi = &odmis[hwirq >> NODMIS_SHIFT];
+ odmin = hwirq & NODMIS_MASK;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = GIC_SPI;
+ fwspec.param[1] = odmi->spi_base - 32 + odmin;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret) {
+ pr_err("Cannot allocate parent IRQ\n");
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(odmin, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+ return ret;
+ }
+
+ /* Configure the interrupt line to be edge */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &odmi_irq_chip, NULL);
+
+ return 0;
+}
+
+static void odmi_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
+ pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+
+ /* Actually free the MSI */
+ spin_lock(&odmis_bm_lock);
+ __clear_bit(d->hwirq, odmis_bm);
+ spin_unlock(&odmis_bm_lock);
+}
+
+static const struct irq_domain_ops odmi_domain_ops = {
+ .alloc = odmi_irq_domain_alloc,
+ .free = odmi_irq_domain_free,
+};
+
+static struct irq_chip odmi_msi_irq_chip = {
+ .name = "ODMI",
+};
+
+static struct msi_domain_ops odmi_msi_ops = {
+};
+
+static struct msi_domain_info odmi_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &odmi_msi_ops,
+ .chip = &odmi_msi_irq_chip,
+};
+
+static int __init mvebu_odmi_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *inner_domain, *plat_domain;
+ int ret, i;
+
+ if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
+ return -EINVAL;
+
+ odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
+ if (!odmis)
+ return -ENOMEM;
+
+ odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
+ sizeof(long), GFP_KERNEL);
+ if (!odmis_bm) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ ret = of_address_to_resource(node, i, &odmi->res);
+ if (ret)
+ goto err_unmap;
+
+ odmi->base = of_io_request_and_map(node, i, "odmi");
+ if (IS_ERR(odmi->base)) {
+ ret = PTR_ERR(odmi->base);
+ goto err_unmap;
+ }
+
+ if (of_property_read_u32_index(node, "marvell,spi-base",
+ i, &odmi->spi_base)) {
+ ret = -EINVAL;
+ goto err_unmap;
+ }
+ }
+
+ inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ odmis_count * NODMIS_PER_FRAME,
+ &odmi_domain_ops, NULL);
+ if (!inner_domain) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ inner_domain->parent = irq_find_host(parent);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &odmi_msi_domain_info,
+ inner_domain);
+ if (!plat_domain) {
+ ret = -ENOMEM;
+ goto err_remove_inner;
+ }
+
+ return 0;
+
+err_remove_inner:
+ irq_domain_remove(inner_domain);
+err_unmap:
+ for (i = 0; i < odmis_count; i++) {
+ struct odmi_data *odmi = &odmis[i];
+
+ if (odmi->base && !IS_ERR(odmi->base))
+ iounmap(odmis[i].base);
+ }
+ kfree(odmis_bm);
+err_alloc:
+ kfree(odmis);
+ return ret;
+}
+
+IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index efe50845939d..17304705f2cf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
void __iomem *icoll_base;
icoll_base = of_io_request_and_map(np, 0, np->name);
- if (!icoll_base)
+ if (IS_ERR(icoll_base))
panic("%s: unable to map resource", np->full_name);
return icoll_base;
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 0820f67cc9a7..668730c5cb66 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -160,9 +160,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!gc->reg_base) {
+ if (IS_ERR(gc->reg_base)) {
pr_err("unable to map resource\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(gc->reg_base);
goto fail_irqd_remove;
}
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
new file mode 100644
index 000000000000..bdbb5c0ff7fe
--- /dev/null
+++ b/drivers/irqchip/irq-tango.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+
+#define IRQ0_CTL_BASE 0x0000
+#define IRQ1_CTL_BASE 0x0100
+#define EDGE_CTL_BASE 0x0200
+#define IRQ2_CTL_BASE 0x0300
+
+#define IRQ_CTL_HI 0x18
+#define EDGE_CTL_HI 0x20
+
+#define IRQ_STATUS 0x00
+#define IRQ_RAWSTAT 0x04
+#define IRQ_EN_SET 0x08
+#define IRQ_EN_CLR 0x0c
+#define IRQ_SOFT_SET 0x10
+#define IRQ_SOFT_CLR 0x14
+
+#define EDGE_STATUS 0x00
+#define EDGE_RAWSTAT 0x04
+#define EDGE_CFG_RISE 0x08
+#define EDGE_CFG_FALL 0x0c
+#define EDGE_CFG_RISE_SET 0x10
+#define EDGE_CFG_RISE_CLR 0x14
+#define EDGE_CFG_FALL_SET 0x18
+#define EDGE_CFG_FALL_CLR 0x1c
+
+struct tangox_irq_chip {
+ void __iomem *base;
+ unsigned long ctl;
+};
+
+static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
+{
+ return readl_relaxed(chip->base + reg);
+}
+
+static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
+{
+ writel_relaxed(val, chip->base + reg);
+}
+
+static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
+ int base)
+{
+ unsigned int hwirq;
+ unsigned int virq;
+
+ while (status) {
+ hwirq = __ffs(status);
+ virq = irq_find_mapping(dom, base + hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ status &= ~BIT(hwirq);
+ }
+}
+
+static void tangox_irq_handler(struct irq_desc *desc)
+{
+ struct irq_domain *dom = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ struct tangox_irq_chip *chip = dom->host_data;
+ unsigned int status_lo, status_hi;
+
+ chained_irq_enter(host_chip, desc);
+
+ status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
+ status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);
+
+ tangox_dispatch_irqs(dom, status_lo, 0);
+ tangox_dispatch_irqs(dom, status_hi, 32);
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct tangox_irq_chip *chip = gc->domain->host_data;
+ struct irq_chip_regs *regs = &gc->chip_types[0].regs;
+
+ switch (flow_type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
+ intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
+ break;
+
+ default:
+ pr_err("Invalid trigger mode %x for IRQ %d\n",
+ flow_type, d->irq);
+ return -EINVAL;
+ }
+
+ return irq_setup_alt_chip(d, flow_type);
+}
+
+static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
+ unsigned long ctl_offs,
+ unsigned long edge_offs)
+{
+ struct tangox_irq_chip *chip = gc->domain->host_data;
+ struct irq_chip_type *ct = gc->chip_types;
+ unsigned long ctl_base = chip->ctl + ctl_offs;
+ unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
+ int i;
+
+ gc->reg_base = chip->base;
+ gc->unused = 0;
+
+ for (i = 0; i < 2; i++) {
+ ct[i].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
+ ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+ ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct[i].chip.irq_set_type = tangox_irq_set_type;
+ ct[i].chip.name = gc->domain->name;
+
+ ct[i].regs.enable = ctl_base + IRQ_EN_SET;
+ ct[i].regs.disable = ctl_base + IRQ_EN_CLR;
+ ct[i].regs.ack = edge_base + EDGE_RAWSTAT;
+ ct[i].regs.type = edge_base;
+ }
+
+ ct[0].type = IRQ_TYPE_LEVEL_MASK;
+ ct[0].handler = handle_level_irq;
+
+ ct[1].type = IRQ_TYPE_EDGE_BOTH;
+ ct[1].handler = handle_edge_irq;
+
+ intc_writel(chip, ct->regs.disable, 0xffffffff);
+ intc_writel(chip, ct->regs.ack, 0xffffffff);
+}
+
+static void __init tangox_irq_domain_init(struct irq_domain *dom)
+{
+ struct irq_chip_generic *gc;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ gc = irq_get_domain_generic_chip(dom, i * 32);
+ tangox_irq_init_chip(gc, i * IRQ_CTL_HI, i * EDGE_CTL_HI);
+ }
+}
+
+static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
+ struct device_node *node)
+{
+ struct tangox_irq_chip *chip;
+ struct irq_domain *dom;
+ struct resource res;
+ int irq;
+ int err;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic("%s: failed to get IRQ", node->name);
+
+ err = of_address_to_resource(node, 0, &res);
+ if (err)
+ panic("%s: failed to get address", node->name);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip->ctl = res.start - baseres->start;
+ chip->base = base;
+
+ dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
+ if (!dom)
+ panic("%s: failed to create irqdomain", node->name);
+
+ err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
+ handle_level_irq, 0, 0, 0);
+ if (err)
+ panic("%s: failed to allocate irqchip", node->name);
+
+ tangox_irq_domain_init(dom);
+
+ irq_set_chained_handler(irq, tangox_irq_handler);
+ irq_set_handler_data(irq, dom);
+
+ return 0;
+}
+
+static int __init tangox_of_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct device_node *c;
+ struct resource res;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base)
+ panic("%s: of_iomap failed", node->name);
+
+ of_address_to_resource(node, 0, &res);
+
+ for_each_child_of_node(node, c)
+ tangox_irq_init(base, &res, c);
+
+ return 0;
+}
+IRQCHIP_DECLARE(tangox_intc, "sigma,smp8642-intc", tangox_of_irq_init);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 121ec301372e..50be9639e27e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -275,22 +275,10 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
&parent_fwspec);
}
-static void tegra_ictlr_domain_free(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs)
-{
- unsigned int i;
-
- for (i = 0; i < nr_irqs; i++) {
- struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
- irq_domain_reset_irq_data(d);
- }
-}
-
static const struct irq_domain_ops tegra_ictlr_domain_ops = {
.translate = tegra_ictlr_domain_translate,
.alloc = tegra_ictlr_domain_alloc,
- .free = tegra_ictlr_domain_free,
+ .free = irq_domain_free_irqs_common,
};
static int __init tegra_ictlr_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 4192bdcd2734..2325fb3c482b 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -59,7 +59,7 @@ static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-struct irq_domain_ops ts4800_ic_ops = {
+static const struct irq_domain_ops ts4800_ic_ops = {
.map = ts4800_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 91c81965e7ca..c32e45826c2c 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -8,9 +8,6 @@ obj-$(CONFIG_MISDN) += mISDN/
obj-$(CONFIG_ISDN) += hardware/
obj-$(CONFIG_ISDN_DIVERSION) += divert/
obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
-obj-$(CONFIG_ISDN_DRV_ICN) += icn/
-obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/
obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
-obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
obj-$(CONFIG_HYSDN) += hysdn/
obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index b5226af6ddec..576b7b4a3278 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -192,8 +192,6 @@ static diva_os_spin_lock_t dbg_q_lock;
static diva_os_spin_lock_t dbg_adapter_lock;
static int dbg_q_busy;
static volatile dword dbg_sequence;
-static dword start_sec;
-static dword start_usec;
/*
INTERFACE:
@@ -215,8 +213,6 @@ int diva_maint_init(byte *base, unsigned long length, int do_init) {
dbg_base = base;
- diva_os_get_time(&start_sec, &start_usec);
-
*(dword *)base = (dword)DBG_MAGIC; /* Store Magic */
base += sizeof(dword);
length -= sizeof(dword);
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 48db08d0bb3d..0de29b7b712f 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -45,7 +45,6 @@ char *DRIVERRELEASE_MNT = "2.0";
static wait_queue_head_t msgwaitq;
static unsigned long opened;
-static struct timeval start_time;
extern int mntfunc_init(int *, void **, unsigned long);
extern void mntfunc_finit(void);
@@ -88,28 +87,12 @@ int diva_os_copy_from_user(void *os_handle, void *dst, const void __user *src,
*/
void diva_os_get_time(dword *sec, dword *usec)
{
- struct timeval tv;
-
- do_gettimeofday(&tv);
-
- if (tv.tv_sec > start_time.tv_sec) {
- if (start_time.tv_usec > tv.tv_usec) {
- tv.tv_sec--;
- tv.tv_usec += 1000000;
- }
- *sec = (dword) (tv.tv_sec - start_time.tv_sec);
- *usec = (dword) (tv.tv_usec - start_time.tv_usec);
- } else if (tv.tv_sec == start_time.tv_sec) {
- *sec = 0;
- if (start_time.tv_usec < tv.tv_usec) {
- *usec = (dword) (tv.tv_usec - start_time.tv_usec);
- } else {
- *usec = 0;
- }
- } else {
- *sec = (dword) tv.tv_sec;
- *usec = (dword) tv.tv_usec;
- }
+ struct timespec64 time;
+
+ ktime_get_ts64(&time);
+
+ *sec = (dword) time.tv_sec;
+ *usec = (dword) (time.tv_nsec / NSEC_PER_USEC);
}
/*
@@ -213,7 +196,6 @@ static int __init maint_init(void)
int ret = 0;
void *buffer = NULL;
- do_gettimeofday(&start_time);
init_waitqueue_head(&msgwaitq);
printk(KERN_INFO "%s\n", DRIVERNAME);
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 8121e046b739..720ee72aab6a 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -99,32 +99,32 @@ struct ipac_hw {
/* All registers original Siemens Spec */
/* IPAC/ISAC registers */
-#define ISAC_MASK 0x20
#define ISAC_ISTA 0x20
-#define ISAC_STAR 0x21
+#define ISAC_MASK 0x20
#define ISAC_CMDR 0x21
+#define ISAC_STAR 0x21
+#define ISAC_MODE 0x22
+#define ISAC_TIMR 0x23
#define ISAC_EXIR 0x24
-#define ISAC_ADF2 0x39
+#define ISAC_RBCL 0x25
+#define ISAC_RSTA 0x27
+#define ISAC_RBCH 0x2A
#define ISAC_SPCR 0x30
-#define ISAC_ADF1 0x38
#define ISAC_CIR0 0x31
#define ISAC_CIX0 0x31
-#define ISAC_CIR1 0x33
-#define ISAC_CIX1 0x33
-#define ISAC_STCR 0x37
-#define ISAC_MODE 0x22
-#define ISAC_RSTA 0x27
-#define ISAC_RBCL 0x25
-#define ISAC_RBCH 0x2A
-#define ISAC_TIMR 0x23
-#define ISAC_SQXR 0x3b
-#define ISAC_SQRR 0x3b
-#define ISAC_MOSR 0x3a
-#define ISAC_MOCR 0x3a
#define ISAC_MOR0 0x32
#define ISAC_MOX0 0x32
+#define ISAC_CIR1 0x33
+#define ISAC_CIX1 0x33
#define ISAC_MOR1 0x34
#define ISAC_MOX1 0x34
+#define ISAC_STCR 0x37
+#define ISAC_ADF1 0x38
+#define ISAC_ADF2 0x39
+#define ISAC_MOCR 0x3a
+#define ISAC_MOSR 0x3a
+#define ISAC_SQRR 0x3b
+#define ISAC_SQXR 0x3b
#define ISAC_RBCH_XAC 0x80
@@ -212,13 +212,14 @@ struct ipac_hw {
#define ISAC_CMD_DUI 0xF
/* ISAC/ISACX/IPAC/IPACX L1 indications */
-#define ISAC_IND_RS 0x1
-#define ISAC_IND_PU 0x7
#define ISAC_IND_DR 0x0
+#define ISAC_IND_RS 0x1
#define ISAC_IND_SD 0x2
#define ISAC_IND_DIS 0x3
-#define ISAC_IND_EI 0x6
#define ISAC_IND_RSY 0x4
+#define ISAC_IND_DR6 0x5
+#define ISAC_IND_EI 0x6
+#define ISAC_IND_PU 0x7
#define ISAC_IND_ARD 0x8
#define ISAC_IND_TI 0xA
#define ISAC_IND_ATI 0xB
@@ -339,9 +340,9 @@ struct ipac_hw {
#define ISACX__AUX 0x08
#define ISACX__CIC 0x10
#define ISACX__ST 0x20
+#define IPACX__ON 0x2C
#define IPACX__ICB 0x40
#define IPACX__ICA 0x80
-#define IPACX__ON 0x2C
/* ISACX/IPACX _CMDRD (W) */
#define ISACX_CMDRD_XRES 0x01
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index cb428b9ee441..aa9b6c3cadc1 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch)
l1_event(dch->l1, HW_DEACT_CNF);
break;
case ISAC_IND_DR:
+ case ISAC_IND_DR6:
dch->state = 3;
l1_event(dch->l1, HW_DEACT_IND);
break;
@@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd)
spin_lock_irqsave(isac->hwlock, flags);
if ((isac->state == ISAC_IND_EI) ||
(isac->state == ISAC_IND_DR) ||
+ (isac->state == ISAC_IND_DR6) ||
(isac->state == ISAC_IND_RS))
ph_command(isac, ISAC_CMD_TIM);
else
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c
index 7fdf78f46433..df7e05ca8f9c 100644
--- a/drivers/isdn/hisax/isac.c
+++ b/drivers/isdn/hisax/isac.c
@@ -215,9 +215,11 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
if (count == 0)
count = 32;
isac_empty_fifo(cs, count);
- if ((count = cs->rcvidx) > 0) {
+ count = cs->rcvidx;
+ if (count > 0) {
cs->rcvidx = 0;
- if (!(skb = alloc_skb(count, GFP_ATOMIC)))
+ skb = alloc_skb(count, GFP_ATOMIC);
+ if (!skb)
printk(KERN_WARNING "HiSax: D receive out of memory\n");
else {
memcpy(skb_put(skb, count), cs->rcvbuf, count);
@@ -251,7 +253,8 @@ isac_interrupt(struct IsdnCardState *cs, u_char val)
cs->tx_skb = NULL;
}
}
- if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
+ cs->tx_skb = skb_dequeue(&cs->sq);
+ if (cs->tx_skb) {
cs->tx_cnt = 0;
isac_fill_fifo(cs);
} else
@@ -313,7 +316,8 @@ afterXPR:
#if ARCOFI_USE
if (v1 & 0x08) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0xf0;
@@ -343,7 +347,8 @@ afterXPR:
afterMONR0:
if (v1 & 0x80) {
if (!cs->dc.isac.mon_rx) {
- if (!(cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC))) {
+ cs->dc.isac.mon_rx = kmalloc(MAX_MON_FRAME, GFP_ATOMIC);
+ if (!cs->dc.isac.mon_rx) {
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "ISAC MON RX out of memory!");
cs->dc.isac.mocr &= 0x0f;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index f5b714cd7618..68e54d9f2f53 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -123,16 +123,6 @@ comment "ISDN4Linux hardware drivers"
source "drivers/isdn/hisax/Kconfig"
-
-menu "Active cards"
-
-source "drivers/isdn/icn/Kconfig"
-
-source "drivers/isdn/pcbit/Kconfig"
-
-source "drivers/isdn/act2000/Kconfig"
-
-endmenu
# end ISDN_I4L
endif
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2175225af742..947d5c978b8f 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1572,7 +1572,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
#endif
return;
}
- port->flags |= ASYNC_CLOSING;
+ info->closing = 1;
tty->closing = 1;
/*
@@ -1603,6 +1603,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
info->ncarrier = 0;
tty_port_close_end(port, tty);
+ info->closing = 0;
#ifdef ISDN_DEBUG_MODEM_OPEN
printk(KERN_DEBUG "isdn_tty_close normal exit\n");
#endif
@@ -2236,7 +2237,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
l = strlen(msg);
spin_lock_irqsave(&info->readlock, flags);
- if (port->flags & ASYNC_CLOSING) {
+ if (info->closing) {
spin_unlock_irqrestore(&info->readlock, flags);
return;
}
@@ -2386,13 +2387,12 @@ isdn_tty_modem_result(int code, modem_info *info)
case RESULT_NO_CARRIER:
#ifdef ISDN_DEBUG_MODEM_HUP
printk(KERN_DEBUG "modem_result: NO CARRIER %d %d\n",
- (info->port.flags & ASYNC_CLOSING),
- (!info->port.tty));
+ info->closing, !info->port.tty);
#endif
m->mdmreg[REG_RINGCNT] = 0;
del_timer(&info->nc_timer);
info->ncarrier = 0;
- if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+ if (info->closing || !info->port.tty)
return;
#ifdef CONFIG_ISDN_AUDIO
@@ -2525,7 +2525,7 @@ isdn_tty_modem_result(int code, modem_info *info)
}
}
if (code == RESULT_NO_CARRIER) {
- if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+ if (info->closing || (!info->port.tty))
return;
if (info->port.flags & ASYNC_CHECK_CD)
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c
index 693fb7c9b59a..f8f659f1ce1b 100644
--- a/drivers/isdn/mISDN/clock.c
+++ b/drivers/isdn/mISDN/clock.c
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/spinlock.h>
+#include <linux/ktime.h>
#include <linux/mISDNif.h>
#include <linux/export.h>
#include "core.h"
@@ -45,15 +46,15 @@ static u_int *debug;
static LIST_HEAD(iclock_list);
static DEFINE_RWLOCK(iclock_lock);
static u16 iclock_count; /* counter of last clock */
-static struct timeval iclock_tv; /* time stamp of last clock */
-static int iclock_tv_valid; /* already received one timestamp */
+static ktime_t iclock_timestamp; /* time stamp of last clock */
+static int iclock_timestamp_valid; /* already received one timestamp */
static struct mISDNclock *iclock_current;
void
mISDN_init_clock(u_int *dp)
{
debug = dp;
- do_gettimeofday(&iclock_tv);
+ iclock_timestamp = ktime_get();
}
static void
@@ -86,7 +87,7 @@ select_iclock(void)
}
if (bestclock != iclock_current) {
/* no clock received yet */
- iclock_tv_valid = 0;
+ iclock_timestamp_valid = 0;
}
iclock_current = bestclock;
}
@@ -139,12 +140,11 @@ mISDN_unregister_clock(struct mISDNclock *iclock)
EXPORT_SYMBOL(mISDN_unregister_clock);
void
-mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
+mISDN_clock_update(struct mISDNclock *iclock, int samples, ktime_t *timestamp)
{
u_long flags;
- struct timeval tv_now;
- time_t elapsed_sec;
- int elapsed_8000th;
+ ktime_t timestamp_now;
+ u16 delta;
write_lock_irqsave(&iclock_lock, flags);
if (iclock_current != iclock) {
@@ -156,33 +156,27 @@ mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv)
write_unlock_irqrestore(&iclock_lock, flags);
return;
}
- if (iclock_tv_valid) {
+ if (iclock_timestamp_valid) {
/* increment sample counter by given samples */
iclock_count += samples;
- if (tv) { /* tv must be set, if function call is delayed */
- iclock_tv.tv_sec = tv->tv_sec;
- iclock_tv.tv_usec = tv->tv_usec;
- } else
- do_gettimeofday(&iclock_tv);
+ if (timestamp) { /* timestamp must be set, if function call is delayed */
+ iclock_timestamp = *timestamp;
+ } else {
+ iclock_timestamp = ktime_get();
+ }
} else {
/* calc elapsed time by system clock */
- if (tv) { /* tv must be set, if function call is delayed */
- tv_now.tv_sec = tv->tv_sec;
- tv_now.tv_usec = tv->tv_usec;
- } else
- do_gettimeofday(&tv_now);
- elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
- elapsed_8000th = (tv_now.tv_usec / 125)
- - (iclock_tv.tv_usec / 125);
- if (elapsed_8000th < 0) {
- elapsed_sec -= 1;
- elapsed_8000th += 8000;
+ if (timestamp) { /* timestamp must be set, if function call is delayed */
+ timestamp_now = *timestamp;
+ } else {
+ timestamp_now = ktime_get();
}
+ delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
+ (NSEC_PER_SEC / 8000));
/* add elapsed time to counter and set new timestamp */
- iclock_count += elapsed_sec * 8000 + elapsed_8000th;
- iclock_tv.tv_sec = tv_now.tv_sec;
- iclock_tv.tv_usec = tv_now.tv_usec;
- iclock_tv_valid = 1;
+ iclock_count += delta;
+ iclock_timestamp = timestamp_now;
+ iclock_timestamp_valid = 1;
if (*debug & DEBUG_CLOCK)
printk("Received first clock from source '%s'.\n",
iclock_current ? iclock_current->name : "nothing");
@@ -195,22 +189,17 @@ unsigned short
mISDN_clock_get(void)
{
u_long flags;
- struct timeval tv_now;
- time_t elapsed_sec;
- int elapsed_8000th;
+ ktime_t timestamp_now;
+ u16 delta;
u16 count;
read_lock_irqsave(&iclock_lock, flags);
/* calc elapsed time by system clock */
- do_gettimeofday(&tv_now);
- elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec;
- elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125);
- if (elapsed_8000th < 0) {
- elapsed_sec -= 1;
- elapsed_8000th += 8000;
- }
+ timestamp_now = ktime_get();
+ delta = ktime_divns(ktime_sub(timestamp_now, iclock_timestamp),
+ (NSEC_PER_SEC / 8000));
/* add elapsed time to counter */
- count = iclock_count + elapsed_sec * 8000 + elapsed_8000th;
+ count = iclock_count + delta;
read_unlock_irqrestore(&iclock_lock, flags);
return count;
}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (!maddr || maddr->family != AF_ISDN)
return -EINVAL;
+ if (addr_len < sizeof(struct sockaddr_mISDN))
+ return -EINVAL;
+
lock_sock(sk);
if (_pms(sk)->dev) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7f940c24a16b..225147863e02 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -443,6 +443,7 @@ config LEDS_DELL_NETBOOKS
tristate "External LED on Dell Business Netbooks"
depends on LEDS_CLASS
depends on X86 && ACPI_WMI
+ depends on DELL_SMBIOS
help
This adds support for the Latitude 2100 and similar
notebooks that have an external LED.
@@ -568,6 +569,14 @@ config LEDS_SEAD3
This driver can also be built as a module. If so the module
will be called leds-sead3.
+config LEDS_IS31FL32XX
+ tristate "LED support for ISSI IS31FL32XX I2C LED controller family"
+ depends on LEDS_CLASS && I2C && OF
+ help
+ Say Y here to include support for ISSI IS31FL32XX and Si-En SN32xx
+ LED controllers. They are I2C devices with multiple constant-current
+ channels, each with independent 256-level PWM control.
+
comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
config LEDS_BLINKM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e9d53092765d..cb2013df52d9 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o
+obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
index c36acaf566a6..b3d6e9c15cf9 100644
--- a/drivers/leds/dell-led.c
+++ b/drivers/leds/dell-led.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/dell-led.h>
+#include "../platform/x86/dell-smbios.h"
MODULE_AUTHOR("Louis Davis/Jim Dailey");
MODULE_DESCRIPTION("Dell LED Control Driver");
@@ -42,120 +43,32 @@ MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
#define CMD_LED_OFF 17
#define CMD_LED_BLINK 18
-struct app_wmi_args {
- u16 class;
- u16 selector;
- u32 arg1;
- u32 arg2;
- u32 arg3;
- u32 arg4;
- u32 res1;
- u32 res2;
- u32 res3;
- u32 res4;
- char dummy[92];
-};
-
#define GLOBAL_MIC_MUTE_ENABLE 0x364
#define GLOBAL_MIC_MUTE_DISABLE 0x365
-struct dell_bios_data_token {
- u16 tokenid;
- u16 location;
- u16 value;
-};
-
-struct __attribute__ ((__packed__)) dell_bios_calling_interface {
- struct dmi_header header;
- u16 cmd_io_addr;
- u8 cmd_io_code;
- u32 supported_cmds;
- struct dell_bios_data_token damap[];
-};
-
-static struct dell_bios_data_token dell_mic_tokens[2];
-
-static int dell_wmi_perform_query(struct app_wmi_args *args)
-{
- struct app_wmi_args *bios_return;
- union acpi_object *obj;
- struct acpi_buffer input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_status status;
- u32 rc = -EINVAL;
-
- input.length = 128;
- input.pointer = args;
-
- status = wmi_evaluate_method(DELL_APP_GUID, 0, 1, &input, &output);
- if (!ACPI_SUCCESS(status))
- goto err_out0;
-
- obj = output.pointer;
- if (!obj)
- goto err_out0;
-
- if (obj->type != ACPI_TYPE_BUFFER)
- goto err_out1;
-
- bios_return = (struct app_wmi_args *)obj->buffer.pointer;
- rc = bios_return->res1;
- if (rc)
- goto err_out1;
-
- memcpy(args, bios_return, sizeof(struct app_wmi_args));
- rc = 0;
-
- err_out1:
- kfree(obj);
- err_out0:
- return rc;
-}
-
-static void __init find_micmute_tokens(const struct dmi_header *dm, void *dummy)
-{
- struct dell_bios_calling_interface *calling_interface;
- struct dell_bios_data_token *token;
- int token_size = sizeof(struct dell_bios_data_token);
- int i = 0;
-
- if (dm->type == 0xda && dm->length > 17) {
- calling_interface = container_of(dm,
- struct dell_bios_calling_interface, header);
-
- token = &calling_interface->damap[i];
- while (token->tokenid != 0xffff) {
- if (token->tokenid == GLOBAL_MIC_MUTE_DISABLE)
- memcpy(&dell_mic_tokens[0], token, token_size);
- else if (token->tokenid == GLOBAL_MIC_MUTE_ENABLE)
- memcpy(&dell_mic_tokens[1], token, token_size);
-
- i++;
- token = &calling_interface->damap[i];
- }
- }
-}
-
static int dell_micmute_led_set(int state)
{
- struct app_wmi_args args;
- struct dell_bios_data_token *token;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
if (!wmi_has_guid(DELL_APP_GUID))
return -ENODEV;
- if (state == 0 || state == 1)
- token = &dell_mic_tokens[state];
+ if (state == 0)
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
+ else if (state == 1)
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
else
return -EINVAL;
- memset(&args, 0, sizeof(struct app_wmi_args));
-
- args.class = 1;
- args.arg1 = token->location;
- args.arg2 = token->value;
+ if (!token)
+ return -ENODEV;
- dell_wmi_perform_query(&args);
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
+ buffer->input[1] = token->value;
+ dell_smbios_send_request(1, 0);
+ dell_smbios_release_buffer();
return state;
}
@@ -177,14 +90,6 @@ int dell_app_wmi_led_set(int whichled, int on)
}
EXPORT_SYMBOL_GPL(dell_app_wmi_led_set);
-static int __init dell_micmute_led_init(void)
-{
- memset(dell_mic_tokens, 0, sizeof(struct dell_bios_data_token) * 2);
- dmi_walk(find_micmute_tokens, NULL);
-
- return 0;
-}
-
struct bios_args {
unsigned char length;
unsigned char result_code;
@@ -330,9 +235,6 @@ static int __init dell_led_init(void)
if (!wmi_has_guid(DELL_LED_BIOS_GUID) && !wmi_has_guid(DELL_APP_GUID))
return -ENODEV;
- if (wmi_has_guid(DELL_APP_GUID))
- error = dell_micmute_led_init();
-
if (wmi_has_guid(DELL_LED_BIOS_GUID)) {
error = led_off();
if (error != 0)
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 14139c337312..aa84e5b37593 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -245,6 +245,8 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
up_write(&led_cdev->trigger_lock);
#endif
+ led_cdev->flags |= LED_UNREGISTERING;
+
/* Stop blinking */
led_stop_software_blink(led_cdev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 19e1e60dfaa3..3495d5d6547f 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -25,6 +25,26 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
+static int __led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!led_cdev->brightness_set)
+ return -ENOTSUPP;
+
+ led_cdev->brightness_set(led_cdev, value);
+
+ return 0;
+}
+
+static int __led_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!led_cdev->brightness_set_blocking)
+ return -ENOTSUPP;
+
+ return led_cdev->brightness_set_blocking(led_cdev, value);
+}
+
static void led_timer_function(unsigned long data)
{
struct led_classdev *led_cdev = (void *)data;
@@ -91,14 +111,14 @@ static void set_brightness_delayed(struct work_struct *ws)
led_cdev->flags &= ~LED_BLINK_DISABLE;
}
- if (led_cdev->brightness_set)
- led_cdev->brightness_set(led_cdev, led_cdev->delayed_set_value);
- else if (led_cdev->brightness_set_blocking)
- ret = led_cdev->brightness_set_blocking(led_cdev,
- led_cdev->delayed_set_value);
- else
- ret = -ENOTSUPP;
- if (ret < 0)
+ ret = __led_set_brightness(led_cdev, led_cdev->delayed_set_value);
+ if (ret == -ENOTSUPP)
+ ret = __led_set_brightness_blocking(led_cdev,
+ led_cdev->delayed_set_value);
+ if (ret < 0 &&
+ /* LED HW might have been unplugged, therefore don't warn */
+ !(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
+ (led_cdev->flags & LED_HW_PLUGGABLE)))
dev_err(led_cdev->dev,
"Setting an LED's brightness failed (%d)\n", ret);
}
@@ -233,10 +253,8 @@ void led_set_brightness_nopm(struct led_classdev *led_cdev,
enum led_brightness value)
{
/* Use brightness_set op if available, it is guaranteed not to sleep */
- if (led_cdev->brightness_set) {
- led_cdev->brightness_set(led_cdev, value);
+ if (!__led_set_brightness(led_cdev, value))
return;
- }
/* If brightness setting can sleep, delegate it to a work queue task */
led_cdev->delayed_set_value = value;
@@ -267,10 +285,7 @@ int led_set_brightness_sync(struct led_classdev *led_cdev,
if (led_cdev->flags & LED_SUSPENDED)
return 0;
- if (led_cdev->brightness_set_blocking)
- return led_cdev->brightness_set_blocking(led_cdev,
- led_cdev->brightness);
- return -ENOTSUPP;
+ return __led_set_brightness_blocking(led_cdev, led_cdev->brightness);
}
EXPORT_SYMBOL_GPL(led_set_brightness_sync);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index e1e933424ac9..2181581795d3 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -34,9 +34,7 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- char trigger_name[TRIG_NAME_MAX];
struct led_trigger *trig;
- size_t len;
int ret = count;
mutex_lock(&led_cdev->led_access);
@@ -46,21 +44,14 @@ ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
goto unlock;
}
- trigger_name[sizeof(trigger_name) - 1] = '\0';
- strncpy(trigger_name, buf, sizeof(trigger_name) - 1);
- len = strlen(trigger_name);
-
- if (len && trigger_name[len - 1] == '\n')
- trigger_name[len - 1] = '\0';
-
- if (!strcmp(trigger_name, "none")) {
+ if (sysfs_streq(buf, "none")) {
led_trigger_remove(led_cdev);
goto unlock;
}
down_read(&triggers_list_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(trigger_name, trig->name)) {
+ if (sysfs_streq(buf, trig->name)) {
down_write(&led_cdev->trigger_lock);
led_trigger_set(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 1ad4d03a0a3c..77a104d2b124 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -195,7 +195,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
sprintf(data->name, "led1-blue");
break;
}
- platform_set_drvdata(pdev, data);
data->chip = chip;
data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
data->port = pdev->id;
@@ -208,7 +207,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->cdev.brightness_set_blocking = pm860x_led_set;
mutex_init(&data->lock);
- ret = led_classdev_register(chip->dev, &data->cdev);
+ ret = devm_led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
@@ -217,21 +216,12 @@ static int pm860x_led_probe(struct platform_device *pdev)
return 0;
}
-static int pm860x_led_remove(struct platform_device *pdev)
-{
- struct pm860x_led *data = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&data->cdev);
-
- return 0;
-}
static struct platform_driver pm860x_led_driver = {
.driver = {
.name = "88pm860x-led",
},
.probe = pm860x_led_probe,
- .remove = pm860x_led_remove,
};
module_platform_driver(pm860x_led_driver);
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index 4752a2b6ba2b..5ff7d72f73aa 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -113,21 +113,12 @@ static int da903x_led_probe(struct platform_device *pdev)
led->flags = pdata->flags;
led->master = pdev->dev.parent;
- ret = led_classdev_register(led->master, &led->cdev);
+ ret = devm_led_classdev_register(led->master, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", id);
return ret;
}
- platform_set_drvdata(pdev, led);
- return 0;
-}
-
-static int da903x_led_remove(struct platform_device *pdev)
-{
- struct da903x_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
return 0;
}
@@ -136,7 +127,6 @@ static struct platform_driver da903x_led_driver = {
.name = "da903x-led",
},
.probe = da903x_led_probe,
- .remove = da903x_led_remove,
};
module_platform_driver(da903x_led_driver);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 7bc53280dbfd..61143f55597e 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -86,7 +86,7 @@ static int create_gpio_led(const struct gpio_led *template,
* still uses GPIO numbers. Ultimately we would like to get
* rid of this block completely.
*/
- unsigned long flags = 0;
+ unsigned long flags = GPIOF_OUT_INIT_LOW;
/* skip leds that aren't available */
if (!gpio_is_valid(template->gpio)) {
@@ -104,8 +104,8 @@ static int create_gpio_led(const struct gpio_led *template,
return ret;
led_dat->gpiod = gpio_to_desc(template->gpio);
- if (IS_ERR(led_dat->gpiod))
- return PTR_ERR(led_dat->gpiod);
+ if (!led_dat->gpiod)
+ return -EINVAL;
}
led_dat->cdev.name = template->name;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
new file mode 100644
index 000000000000..c901d132d80c
--- /dev/null
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -0,0 +1,508 @@
+/*
+ * Driver for ISSI IS31FL32xx family of I2C LED controllers
+ *
+ * Copyright 2015 Allworx Corp.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheets:
+ * http://www.issi.com/US/product-analog-fxled-driver.shtml
+ * http://www.si-en.com/product.asp?parentid=890
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+/* Used to indicate a device has no such register */
+#define IS31FL32XX_REG_NONE 0xFF
+
+/* Software Shutdown bit in Shutdown Register */
+#define IS31FL32XX_SHUTDOWN_SSD_ENABLE 0
+#define IS31FL32XX_SHUTDOWN_SSD_DISABLE BIT(0)
+
+/* IS31FL3216 has a number of unique registers */
+#define IS31FL3216_CONFIG_REG 0x00
+#define IS31FL3216_LIGHTING_EFFECT_REG 0x03
+#define IS31FL3216_CHANNEL_CONFIG_REG 0x04
+
+/* Software Shutdown bit in 3216 Config Register */
+#define IS31FL3216_CONFIG_SSD_ENABLE BIT(7)
+#define IS31FL3216_CONFIG_SSD_DISABLE 0
+
+struct is31fl32xx_priv;
+struct is31fl32xx_led_data {
+ struct led_classdev cdev;
+ u8 channel; /* 1-based, max priv->cdef->channels */
+ struct is31fl32xx_priv *priv;
+};
+
+struct is31fl32xx_priv {
+ const struct is31fl32xx_chipdef *cdef;
+ struct i2c_client *client;
+ unsigned int num_leds;
+ struct is31fl32xx_led_data leds[0];
+};
+
+/**
+ * struct is31fl32xx_chipdef - chip-specific attributes
+ * @channels : Number of LED channels
+ * @shutdown_reg : address of Shutdown register (optional)
+ * @pwm_update_reg : address of PWM Update register
+ * @global_control_reg : address of Global Control register (optional)
+ * @reset_reg : address of Reset register (optional)
+ * @pwm_register_base : address of first PWM register
+ * @pwm_registers_reversed: : true if PWM registers count down instead of up
+ * @led_control_register_base : address of first LED control register (optional)
+ * @enable_bits_per_led_control_register: number of LEDs enable bits in each
+ * @reset_func: : pointer to reset function
+ *
+ * For all optional register addresses, the sentinel value %IS31FL32XX_REG_NONE
+ * indicates that this chip has no such register.
+ *
+ * If non-NULL, @reset_func will be called during probing to set all
+ * necessary registers to a known initialization state. This is needed
+ * for chips that do not have a @reset_reg.
+ *
+ * @enable_bits_per_led_control_register must be >=1 if
+ * @led_control_register_base != %IS31FL32XX_REG_NONE.
+ */
+struct is31fl32xx_chipdef {
+ u8 channels;
+ u8 shutdown_reg;
+ u8 pwm_update_reg;
+ u8 global_control_reg;
+ u8 reset_reg;
+ u8 pwm_register_base;
+ bool pwm_registers_reversed;
+ u8 led_control_register_base;
+ u8 enable_bits_per_led_control_register;
+ int (*reset_func)(struct is31fl32xx_priv *priv);
+ int (*sw_shutdown_func)(struct is31fl32xx_priv *priv, bool enable);
+};
+
+static const struct is31fl32xx_chipdef is31fl3236_cdef = {
+ .channels = 36,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x26,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3235_cdef = {
+ .channels = 28,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .pwm_register_base = 0x05,
+ .led_control_register_base = 0x2a,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3218_cdef = {
+ .channels = 18,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x16,
+ .global_control_reg = IS31FL32XX_REG_NONE,
+ .reset_reg = 0x17,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x13,
+ .enable_bits_per_led_control_register = 6,
+};
+
+static int is31fl3216_reset(struct is31fl32xx_priv *priv);
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable);
+static const struct is31fl32xx_chipdef is31fl3216_cdef = {
+ .channels = 16,
+ .shutdown_reg = IS31FL32XX_REG_NONE,
+ .pwm_update_reg = 0xB0,
+ .global_control_reg = IS31FL32XX_REG_NONE,
+ .reset_reg = IS31FL32XX_REG_NONE,
+ .pwm_register_base = 0x10,
+ .pwm_registers_reversed = true,
+ .led_control_register_base = 0x01,
+ .enable_bits_per_led_control_register = 8,
+ .reset_func = is31fl3216_reset,
+ .sw_shutdown_func = is31fl3216_software_shutdown,
+};
+
+static int is31fl32xx_write(struct is31fl32xx_priv *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_dbg(&priv->client->dev, "writing register 0x%02X=0x%02X", reg, val);
+
+ ret = i2c_smbus_write_byte_data(priv->client, reg, val);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "register write to 0x%02X failed (error %d)",
+ reg, ret);
+ }
+ return ret;
+}
+
+/*
+ * Custom reset function for IS31FL3216 because it does not have a RESET
+ * register the way that the other IS31FL32xx chips do. We don't bother
+ * writing the GPIO and animation registers, because the registers we
+ * do write ensure those will have no effect.
+ */
+static int is31fl3216_reset(struct is31fl32xx_priv *priv)
+{
+ unsigned int i;
+ int ret;
+
+ ret = is31fl32xx_write(priv, IS31FL3216_CONFIG_REG,
+ IS31FL3216_CONFIG_SSD_ENABLE);
+ if (ret)
+ return ret;
+ for (i = 0; i < priv->cdef->channels; i++) {
+ ret = is31fl32xx_write(priv, priv->cdef->pwm_register_base+i,
+ 0x00);
+ if (ret)
+ return ret;
+ }
+ ret = is31fl32xx_write(priv, priv->cdef->pwm_update_reg, 0);
+ if (ret)
+ return ret;
+ ret = is31fl32xx_write(priv, IS31FL3216_LIGHTING_EFFECT_REG, 0x00);
+ if (ret)
+ return ret;
+ ret = is31fl32xx_write(priv, IS31FL3216_CHANNEL_CONFIG_REG, 0x00);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Custom Software-Shutdown function for IS31FL3216 because it does not have
+ * a SHUTDOWN register the way that the other IS31FL32xx chips do.
+ * We don't bother doing a read/modify/write on the CONFIG register because
+ * we only ever use a value of '0' for the other fields in that register.
+ */
+static int is31fl3216_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable)
+{
+ u8 value = enable ? IS31FL3216_CONFIG_SSD_ENABLE :
+ IS31FL3216_CONFIG_SSD_DISABLE;
+
+ return is31fl32xx_write(priv, IS31FL3216_CONFIG_REG, value);
+}
+
+/*
+ * NOTE: A mutex is not needed in this function because:
+ * - All referenced data is read-only after probe()
+ * - The I2C core has a mutex on to protect the bus
+ * - There are no read/modify/write operations
+ * - Intervening operations between the write of the PWM register
+ * and the Update register are harmless.
+ *
+ * Example:
+ * PWM_REG_1 write 16
+ * UPDATE_REG write 0
+ * PWM_REG_2 write 128
+ * UPDATE_REG write 0
+ * vs:
+ * PWM_REG_1 write 16
+ * PWM_REG_2 write 128
+ * UPDATE_REG write 0
+ * UPDATE_REG write 0
+ * are equivalent. Poking the Update register merely applies all PWM
+ * register writes up to that point.
+ */
+static int is31fl32xx_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ const struct is31fl32xx_led_data *led_data =
+ container_of(led_cdev, struct is31fl32xx_led_data, cdev);
+ const struct is31fl32xx_chipdef *cdef = led_data->priv->cdef;
+ u8 pwm_register_offset;
+ int ret;
+
+ dev_dbg(led_cdev->dev, "%s: %d\n", __func__, brightness);
+
+ /* NOTE: led_data->channel is 1-based */
+ if (cdef->pwm_registers_reversed)
+ pwm_register_offset = cdef->channels - led_data->channel;
+ else
+ pwm_register_offset = led_data->channel - 1;
+
+ ret = is31fl32xx_write(led_data->priv,
+ cdef->pwm_register_base + pwm_register_offset,
+ brightness);
+ if (ret)
+ return ret;
+
+ return is31fl32xx_write(led_data->priv, cdef->pwm_update_reg, 0);
+}
+
+static int is31fl32xx_reset_regs(struct is31fl32xx_priv *priv)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ if (cdef->reset_reg != IS31FL32XX_REG_NONE) {
+ ret = is31fl32xx_write(priv, cdef->reset_reg, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (cdef->reset_func)
+ return cdef->reset_func(priv);
+
+ return 0;
+}
+
+static int is31fl32xx_software_shutdown(struct is31fl32xx_priv *priv,
+ bool enable)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ if (cdef->shutdown_reg != IS31FL32XX_REG_NONE) {
+ u8 value = enable ? IS31FL32XX_SHUTDOWN_SSD_ENABLE :
+ IS31FL32XX_SHUTDOWN_SSD_DISABLE;
+ ret = is31fl32xx_write(priv, cdef->shutdown_reg, value);
+ if (ret)
+ return ret;
+ }
+
+ if (cdef->sw_shutdown_func)
+ return cdef->sw_shutdown_func(priv, enable);
+
+ return 0;
+}
+
+static int is31fl32xx_init_regs(struct is31fl32xx_priv *priv)
+{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
+ int ret;
+
+ ret = is31fl32xx_reset_regs(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * Set enable bit for all channels.
+ * We will control state with PWM registers alone.
+ */
+ if (cdef->led_control_register_base != IS31FL32XX_REG_NONE) {
+ u8 value =
+ GENMASK(cdef->enable_bits_per_led_control_register-1, 0);
+ u8 num_regs = cdef->channels /
+ cdef->enable_bits_per_led_control_register;
+ int i;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = is31fl32xx_write(priv,
+ cdef->led_control_register_base+i,
+ value);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = is31fl32xx_software_shutdown(priv, false);
+ if (ret)
+ return ret;
+
+ if (cdef->global_control_reg != IS31FL32XX_REG_NONE) {
+ ret = is31fl32xx_write(priv, cdef->global_control_reg, 0x00);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline size_t sizeof_is31fl32xx_priv(int num_leds)
+{
+ return sizeof(struct is31fl32xx_priv) +
+ (sizeof(struct is31fl32xx_led_data) * num_leds);
+}
+
+static int is31fl32xx_parse_child_dt(const struct device *dev,
+ const struct device_node *child,
+ struct is31fl32xx_led_data *led_data)
+{
+ struct led_classdev *cdev = &led_data->cdev;
+ int ret = 0;
+ u32 reg;
+
+ if (of_property_read_string(child, "label", &cdev->name))
+ cdev->name = child->name;
+
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret || reg < 1 || reg > led_data->priv->cdef->channels) {
+ dev_err(dev,
+ "Child node %s does not have a valid reg property\n",
+ child->full_name);
+ return -EINVAL;
+ }
+ led_data->channel = reg;
+
+ of_property_read_string(child, "linux,default-trigger",
+ &cdev->default_trigger);
+
+ cdev->brightness_set_blocking = is31fl32xx_brightness_set;
+
+ return 0;
+}
+
+static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
+ struct is31fl32xx_priv *priv,
+ u8 channel)
+{
+ size_t i;
+
+ for (i = 0; i < priv->num_leds; i++) {
+ if (priv->leds[i].channel == channel)
+ return &priv->leds[i];
+ }
+
+ return NULL;
+}
+
+static int is31fl32xx_parse_dt(struct device *dev,
+ struct is31fl32xx_priv *priv)
+{
+ struct device_node *child;
+ int ret = 0;
+
+ for_each_child_of_node(dev->of_node, child) {
+ struct is31fl32xx_led_data *led_data =
+ &priv->leds[priv->num_leds];
+ const struct is31fl32xx_led_data *other_led_data;
+
+ led_data->priv = priv;
+
+ ret = is31fl32xx_parse_child_dt(dev, child, led_data);
+ if (ret)
+ goto err;
+
+ /* Detect if channel is already in use by another child */
+ other_led_data = is31fl32xx_find_led_data(priv,
+ led_data->channel);
+ if (other_led_data) {
+ dev_err(dev,
+ "%s and %s both attempting to use channel %d\n",
+ led_data->cdev.name,
+ other_led_data->cdev.name,
+ led_data->channel);
+ goto err;
+ }
+
+ ret = devm_led_classdev_register(dev, &led_data->cdev);
+ if (ret) {
+ dev_err(dev, "failed to register PWM led for %s: %d\n",
+ led_data->cdev.name, ret);
+ goto err;
+ }
+
+ priv->num_leds++;
+ }
+
+ return 0;
+
+err:
+ of_node_put(child);
+ return ret;
+}
+
+static const struct of_device_id of_is31fl31xx_match[] = {
+ { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
+ { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
+ { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
+ { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
+ { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
+ { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, of_is31fl31xx_match);
+
+static int is31fl32xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct is31fl32xx_chipdef *cdef;
+ const struct of_device_id *of_dev_id;
+ struct device *dev = &client->dev;
+ struct is31fl32xx_priv *priv;
+ int count;
+ int ret = 0;
+
+ of_dev_id = of_match_device(of_is31fl31xx_match, dev);
+ if (!of_dev_id)
+ return -EINVAL;
+
+ cdef = of_dev_id->data;
+
+ count = of_get_child_count(dev->of_node);
+ if (!count)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof_is31fl32xx_priv(count),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ priv->cdef = cdef;
+ i2c_set_clientdata(client, priv);
+
+ ret = is31fl32xx_init_regs(priv);
+ if (ret)
+ return ret;
+
+ ret = is31fl32xx_parse_dt(dev, priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int is31fl32xx_remove(struct i2c_client *client)
+{
+ struct is31fl32xx_priv *priv = i2c_get_clientdata(client);
+
+ return is31fl32xx_reset_regs(priv);
+}
+
+/*
+ * i2c-core requires that id_table be non-NULL, even though
+ * it is not used for DeviceTree based instantiation.
+ */
+static const struct i2c_device_id is31fl31xx_id[] = {
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, is31fl31xx_id);
+
+static struct i2c_driver is31fl32xx_driver = {
+ .driver = {
+ .name = "is31fl32xx",
+ .of_match_table = of_is31fl31xx_match,
+ },
+ .probe = is31fl32xx_probe,
+ .remove = is31fl32xx_remove,
+ .id_table = is31fl31xx_id,
+};
+
+module_i2c_driver(is31fl32xx_driver);
+
+MODULE_AUTHOR("David Rivshin <drivshin@allworx.com>");
+MODULE_DESCRIPTION("ISSI IS31FL32xx LED driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index 196dcb5e6004..5b529dc013d2 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
return ret;
@@ -708,18 +708,13 @@ static int lm3533_led_probe(struct platform_device *pdev)
ret = lm3533_led_setup(led, pdata);
if (ret)
- goto err_unregister;
+ return ret;
ret = lm3533_ctrlbank_enable(&led->cb);
if (ret)
- goto err_unregister;
+ return ret;
return 0;
-
-err_unregister:
- led_classdev_unregister(&led->cdev);
-
- return ret;
}
static int lm3533_led_remove(struct platform_device *pdev)
@@ -729,7 +724,6 @@ static int lm3533_led_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
lm3533_ctrlbank_disable(&led->cb);
- led_classdev_unregister(&led->cdev);
return 0;
}
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 6c758aea1bbd..be60c181222a 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -199,8 +199,11 @@ static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
if (status > LP3944_LED_STATUS_DIM1)
return -EINVAL;
- /* invert only 0 and 1, leave unchanged the other values,
- * remember we are abusing status to set blink patterns
+ /*
+ * Invert status only when it's < 2 (i.e. 0 or 1) which means it's
+ * controlling the on/off state directly.
+ * When, instead, status is >= 2 don't invert it because it would mean
+ * to mess with the hardware blinking mode.
*/
if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
status = 1 - status;
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 0eee38fc0565..38c253a43700 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -146,15 +146,13 @@ static int lp8788_led_probe(struct platform_device *pdev)
mutex_init(&led->lock);
- platform_set_drvdata(pdev, led);
-
ret = lp8788_led_init_device(led, led_pdata);
if (ret) {
dev_err(dev, "led init device err: %d\n", ret);
return ret;
}
- ret = led_classdev_register(dev, &led->led_dev);
+ ret = devm_led_classdev_register(dev, &led->led_dev);
if (ret) {
dev_err(dev, "led register err: %d\n", ret);
return ret;
@@ -163,18 +161,8 @@ static int lp8788_led_probe(struct platform_device *pdev)
return 0;
}
-static int lp8788_led_remove(struct platform_device *pdev)
-{
- struct lp8788_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->led_dev);
-
- return 0;
-}
-
static struct platform_driver lp8788_led_driver = {
.probe = lp8788_led_probe,
- .remove = lp8788_led_remove,
.driver = {
.name = LP8788_DEV_KEYLED,
},
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index 01b459069358..4edf74f1d6d4 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -281,30 +281,18 @@ static int max8997_led_probe(struct platform_device *pdev)
mutex_init(&led->mutex);
- platform_set_drvdata(pdev, led);
-
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
return 0;
}
-static int max8997_led_remove(struct platform_device *pdev)
-{
- struct max8997_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static struct platform_driver max8997_led_driver = {
.driver = {
.name = "max8997-led",
},
.probe = max8997_led_probe,
- .remove = max8997_led_remove,
};
module_platform_driver(max8997_led_driver);
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 83641a7b299a..404da451cb88 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -29,11 +29,6 @@ struct s3c24xx_gpio_led {
struct s3c24xx_led_platdata *pdata;
};
-static inline struct s3c24xx_gpio_led *pdev_to_gpio(struct platform_device *dev)
-{
- return platform_get_drvdata(dev);
-}
-
static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev)
{
return container_of(led_cdev, struct s3c24xx_gpio_led, cdev);
@@ -59,15 +54,6 @@ static void s3c24xx_led_set(struct led_classdev *led_cdev,
}
}
-static int s3c24xx_led_remove(struct platform_device *dev)
-{
- struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static int s3c24xx_led_probe(struct platform_device *dev)
{
struct s3c24xx_led_platdata *pdata = dev_get_platdata(&dev->dev);
@@ -79,8 +65,6 @@ static int s3c24xx_led_probe(struct platform_device *dev)
if (!led)
return -ENOMEM;
- platform_set_drvdata(dev, led);
-
led->cdev.brightness_set = s3c24xx_led_set;
led->cdev.default_trigger = pdata->def_trigger;
led->cdev.name = pdata->name;
@@ -104,7 +88,7 @@ static int s3c24xx_led_probe(struct platform_device *dev)
/* register our new led device */
- ret = led_classdev_register(&dev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&dev->dev, &led->cdev);
if (ret < 0)
dev_err(&dev->dev, "led_classdev_register failed\n");
@@ -113,7 +97,6 @@ static int s3c24xx_led_probe(struct platform_device *dev)
static struct platform_driver s3c24xx_led_driver = {
.probe = s3c24xx_led_probe,
- .remove = s3c24xx_led_remove,
.driver = {
.name = "s3c24xx_led",
},
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index 64a22263e7fc..be93b20e792a 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -239,7 +239,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- platform_set_drvdata(pdev, drvdata);
drvdata->wm831x = wm831x;
drvdata->reg = res->start;
@@ -284,7 +283,7 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->cdev.blink_set = wm831x_status_blink_set;
drvdata->cdev.groups = wm831x_status_groups;
- ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
+ ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
return ret;
@@ -293,21 +292,11 @@ static int wm831x_status_probe(struct platform_device *pdev)
return 0;
}
-static int wm831x_status_remove(struct platform_device *pdev)
-{
- struct wm831x_status *drvdata = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&drvdata->cdev);
-
- return 0;
-}
-
static struct platform_driver wm831x_status_driver = {
.driver = {
.name = "wm831x-status",
},
.probe = wm831x_status_probe,
- .remove = wm831x_status_remove,
};
module_platform_driver(wm831x_status_driver);
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index eb934b0242e0..67392b6ab845 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -331,7 +331,7 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
* Actually now I think of it, it's possible that Ron *is* half the Plan 9
* userbase. Oh well.
*/
-static bool could_be_syscall(unsigned int num)
+bool could_be_syscall(unsigned int num)
{
/* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
@@ -416,6 +416,10 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*
* This routine indicates if a particular trap number could be delivered
* directly.
+ *
+ * Unfortunately, Linux 4.6 started using an interrupt gate instead of a
+ * trap gate for syscalls, so this trick is ineffective. See Mastery for
+ * how we could do this anyway...
*/
static bool direct_trap(unsigned int num)
{
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index ac8ad0461e80..69b3814afd2f 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -167,6 +167,7 @@ void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta);
bool send_notify_to_eventfd(struct lg_cpu *cpu);
void init_clockdev(struct lg_cpu *cpu);
bool check_syscall_vector(struct lguest *lg);
+bool could_be_syscall(unsigned int num);
int init_interrupts(void);
void free_interrupts(void);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6a4cd771a2be..adc162c7040d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -429,8 +429,12 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
return;
break;
case 32 ... 255:
+ /* This might be a syscall. */
+ if (could_be_syscall(cpu->regs->trapnum))
+ break;
+
/*
- * These values mean a real interrupt occurred, in which case
+ * Other values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
* return to run the Guest again.
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 9f6acd5d1d2e..0dc9a80adb94 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -250,7 +250,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
return 0;
}
- plane_cnt = (1 << dev->plane_mode);
+ plane_cnt = dev->plane_mode;
rqd->nr_pages = plane_cnt * nr_ppas;
if (dev->ops->max_phys_sect < rqd->nr_pages)
@@ -463,13 +463,14 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
- dev->total_blocks = dev->nr_planes *
- dev->blks_per_lun *
- dev->luns_per_chnl *
- dev->nr_chnls;
- dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+ dev->total_secs = dev->nr_luns * dev->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!dev->lun_map)
+ return -ENOMEM;
INIT_LIST_HEAD(&dev->online_targets);
mutex_init(&dev->mlock);
+ spin_lock_init(&dev->lock);
return 0;
}
@@ -589,6 +590,7 @@ int nvm_register(struct request_queue *q, char *disk_name,
return 0;
err_init:
+ kfree(dev->lun_map);
kfree(dev);
return ret;
}
@@ -611,6 +613,7 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
+ kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -872,20 +875,19 @@ static int nvm_configure_by_str_event(const char *val,
static int nvm_configure_get(char *buf, const struct kernel_param *kp)
{
- int sz = 0;
- char *buf_start = buf;
+ int sz;
struct nvm_dev *dev;
- buf += sprintf(buf, "available devices:\n");
+ sz = sprintf(buf, "available devices:\n");
down_write(&nvm_lock);
list_for_each_entry(dev, &nvm_devices, devices) {
- if (sz > 4095 - DISK_NAME_LEN)
+ if (sz > 4095 - DISK_NAME_LEN - 2)
break;
- buf += sprintf(buf, " %32s\n", dev->name);
+ sz += sprintf(buf + sz, " %32s\n", dev->name);
}
up_write(&nvm_lock);
- return buf - buf_start - 1;
+ return sz;
}
static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b16148..72e124a3927d 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,68 @@
#include "gennvm.h"
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area, *prev, *next;
+ sector_t begin = 0;
+ sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+
+ if (len > max_sectors)
+ return -EINVAL;
+
+ area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+ if (!area)
+ return -ENOMEM;
+
+ prev = NULL;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(next, &gn->area_list, list) {
+ if (begin + len > next->begin) {
+ begin = next->end;
+ prev = next;
+ continue;
+ }
+ break;
+ }
+
+ if ((begin + len) > max_sectors) {
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return -EINVAL;
+ }
+
+ area->begin = *lba = begin;
+ area->end = begin + len;
+
+ if (prev) /* insert into sorted order */
+ list_add(&area->list, &prev->list);
+ else
+ list_add(&area->list, &gn->area_list);
+ spin_unlock(&dev->lock);
+
+ return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+ struct gen_nvm *gn = dev->mp;
+ struct gennvm_area *area;
+
+ spin_lock(&dev->lock);
+ list_for_each_entry(area, &gn->area_list, list) {
+ if (area->begin != begin)
+ continue;
+
+ list_del(&area->list);
+ spin_unlock(&dev->lock);
+ kfree(area);
+ return;
+ }
+ spin_unlock(&dev->lock);
+}
+
static void gennvm_blocks_free(struct nvm_dev *dev)
{
struct gen_nvm *gn = dev->mp;
@@ -100,14 +162,13 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
{
struct nvm_dev *dev = private;
struct gen_nvm *gn = dev->mp;
- sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
u64 elba = slba + nlb;
struct gen_lun *lun;
struct nvm_block *blk;
u64 i;
int lun_id;
- if (unlikely(elba > dev->total_pages)) {
+ if (unlikely(elba > dev->total_secs)) {
pr_err("gennvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
@@ -115,7 +176,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
for (i = 0; i < nlb; i++) {
u64 pba = le64_to_cpu(entries[i]);
- if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+ if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("gennvm: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -196,8 +257,8 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
}
- if (dev->ops->get_l2p_tbl) {
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
+ if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
+ ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
gennvm_block_map, dev);
if (ret) {
pr_err("gennvm: could not read L2P table.\n");
@@ -230,6 +291,7 @@ static int gennvm_register(struct nvm_dev *dev)
gn->dev = dev;
gn->nr_luns = dev->nr_luns;
+ INIT_LIST_HEAD(&gn->area_list);
dev->mp = gn;
ret = gennvm_luns_init(dev, gn);
@@ -420,10 +482,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
return nvm_erase_ppa(dev, &addr, 1);
}
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+ return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+ WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
{
struct gen_nvm *gn = dev->mp;
+ if (unlikely(lunid >= dev->nr_luns))
+ return NULL;
+
return &gn->luns[lunid].vlun;
}
@@ -465,7 +540,13 @@ static struct nvmm_type gennvm = {
.erase_blk = gennvm_erase_blk,
.get_lun = gennvm_get_lun,
+ .reserve_lun = gennvm_reserve_lun,
+ .release_lun = gennvm_release_lun,
.lun_info_print = gennvm_lun_info_print,
+
+ .get_area = gennvm_get_area,
+ .put_area = gennvm_put_area,
+
};
static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b32dac..04d7c23cfc61 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
int nr_luns;
struct gen_lun *luns;
+ struct list_head area_list;
};
+struct gennvm_area {
+ struct list_head list;
+ sector_t begin;
+ sector_t end; /* end is excluded */
+};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 307db1ea22de..3ab6495c3fd8 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -38,7 +38,7 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
spin_lock(&rblk->lock);
- div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+ div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
rblk->nr_invalid_pages++;
@@ -113,14 +113,24 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
{
- return (rblk->next_page == rrpc->dev->pgs_per_blk);
+ return (rblk->next_page == rrpc->dev->sec_per_blk);
}
+/* Calculate relative addr for the given block, considering instantiated LUNs */
+static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+ struct nvm_block *blk = rblk->parent;
+ int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+
+ return lun_blk * rrpc->dev->sec_per_blk;
+}
+
+/* Calculate global addr for the given block */
static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
{
struct nvm_block *blk = rblk->parent;
- return blk->id * rrpc->dev->pgs_per_blk;
+ return blk->id * rrpc->dev->sec_per_blk;
}
static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
@@ -136,7 +146,7 @@ static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
l.g.sec = secs;
sector_div(ppa, dev->sec_per_pg);
- div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+ div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
l.g.pg = pgs;
sector_div(ppa, dev->pgs_per_blk);
@@ -191,12 +201,12 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
return NULL;
}
- rblk = &rlun->blocks[blk->id];
+ rblk = rrpc_get_rblk(rlun, blk->id);
list_add_tail(&rblk->list, &rlun->open_list);
spin_unlock(&lun->lock);
blk->priv = rblk;
- bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+ bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
rblk->next_page = 0;
rblk->nr_invalid_pages = 0;
atomic_set(&rblk->data_cmnt_size, 0);
@@ -286,11 +296,11 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
struct bio *bio;
struct page *page;
int slot;
- int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+ int nr_sec_per_blk = rrpc->dev->sec_per_blk;
u64 phys_addr;
DECLARE_COMPLETION_ONSTACK(wait);
- if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+ if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
return 0;
bio = bio_alloc(GFP_NOIO, 1);
@@ -306,10 +316,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
}
while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_pgs_per_blk)) < nr_pgs_per_blk) {
+ nr_sec_per_blk)) < nr_sec_per_blk) {
/* Lock laddr */
- phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+ phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
try:
spin_lock(&rrpc->rev_lock);
@@ -381,7 +391,7 @@ finished:
mempool_free(page, rrpc->page_pool);
bio_put(bio);
- if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+ if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
pr_err("nvm: failed to garbage collect block\n");
return -EIO;
}
@@ -499,12 +509,21 @@ static void rrpc_gc_queue(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_lun *lun = rblk->parent->lun;
+ struct nvm_block *blk = rblk->parent;
struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
spin_unlock(&rlun->lock);
+ spin_lock(&lun->lock);
+ lun->nr_open_blocks--;
+ lun->nr_closed_blocks++;
+ blk->state &= ~NVM_BLK_ST_OPEN;
+ blk->state |= NVM_BLK_ST_CLOSED;
+ list_move_tail(&rblk->list, &rlun->closed_list);
+ spin_unlock(&lun->lock);
+
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
rblk->parent->id);
@@ -545,7 +564,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
struct rrpc_addr *gp;
struct rrpc_rev_addr *rev;
- BUG_ON(laddr >= rrpc->nr_pages);
+ BUG_ON(laddr >= rrpc->nr_sects);
gp = &rrpc->trans_map[laddr];
spin_lock(&rrpc->rev_lock);
@@ -668,20 +687,8 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
lun = rblk->parent->lun;
cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk)) {
- struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&lun->lock);
- lun->nr_open_blocks--;
- lun->nr_closed_blocks++;
- blk->state &= ~NVM_BLK_ST_OPEN;
- blk->state |= NVM_BLK_ST_CLOSED;
- list_move_tail(&rblk->list, &rlun->closed_list);
- spin_unlock(&lun->lock);
-
+ if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
rrpc_run_gc(rrpc, rblk);
- }
}
}
@@ -726,7 +733,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
for (i = 0; i < npages; i++) {
/* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+ BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr + i];
if (gp->rblk) {
@@ -757,7 +764,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
return NVM_IO_REQUEUE;
- BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+ BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
gp = &rrpc->trans_map[laddr];
if (gp->rblk) {
@@ -958,25 +965,11 @@ static void rrpc_requeue(struct work_struct *work)
static void rrpc_gc_free(struct rrpc *rrpc)
{
- struct rrpc_lun *rlun;
- int i;
-
if (rrpc->krqd_wq)
destroy_workqueue(rrpc->krqd_wq);
if (rrpc->kgc_wq)
destroy_workqueue(rrpc->kgc_wq);
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- if (!rlun->blocks)
- break;
- vfree(rlun->blocks);
- }
}
static int rrpc_gc_init(struct rrpc *rrpc)
@@ -1007,21 +1000,21 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
struct nvm_dev *dev = rrpc->dev;
struct rrpc_addr *addr = rrpc->trans_map + slba;
struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
u64 elba = slba + nlb;
u64 i;
- if (unlikely(elba > dev->total_pages)) {
+ if (unlikely(elba > dev->total_secs)) {
pr_err("nvm: L2P data from device is out of bounds!\n");
return -EINVAL;
}
for (i = 0; i < nlb; i++) {
u64 pba = le64_to_cpu(entries[i]);
+ unsigned int mod;
/* LNVM treats address-spaces as silos, LBA and PBA are
* equally large and zero-indexed.
*/
- if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+ if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
pr_err("nvm: L2P data entry is out of bounds!\n");
return -EINVAL;
}
@@ -1033,8 +1026,10 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
if (!pba)
continue;
+ div_u64_rem(pba, rrpc->nr_sects, &mod);
+
addr[i].addr = pba;
- raddr[pba].addr = slba + i;
+ raddr[mod].addr = slba + i;
}
return 0;
@@ -1044,18 +1039,21 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
+ u64 slba;
int ret;
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+ slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+
+ rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
if (!rrpc->trans_map)
return -ENOMEM;
rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_pages);
+ * rrpc->nr_sects);
if (!rrpc->rev_trans_map)
return -ENOMEM;
- for (i = 0; i < rrpc->nr_pages; i++) {
+ for (i = 0; i < rrpc->nr_sects; i++) {
struct rrpc_addr *p = &rrpc->trans_map[i];
struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
@@ -1067,8 +1065,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
- rrpc_l2p_update, rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
+ rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1077,7 +1075,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
}
-
/* Minimum pages needed within a lun */
#define PAGE_POOL_SIZE 16
#define ADDR_POOL_SIZE 64
@@ -1132,6 +1129,23 @@ static void rrpc_core_free(struct rrpc *rrpc)
static void rrpc_luns_free(struct rrpc *rrpc)
{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvm_lun *lun;
+ struct rrpc_lun *rlun;
+ int i;
+
+ if (!rrpc->luns)
+ return;
+
+ for (i = 0; i < rrpc->nr_luns; i++) {
+ rlun = &rrpc->luns[i];
+ lun = rlun->parent;
+ if (!lun)
+ break;
+ dev->mt->release_lun(dev, lun->id);
+ vfree(rlun->blocks);
+ }
+
kfree(rrpc->luns);
}
@@ -1139,9 +1153,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
struct nvm_dev *dev = rrpc->dev;
struct rrpc_lun *rlun;
- int i, j;
+ int i, j, ret = -EINVAL;
- if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+ if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
pr_err("rrpc: number of pages per block too high.");
return -EINVAL;
}
@@ -1155,25 +1169,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
/* 1:1 mapping */
for (i = 0; i < rrpc->nr_luns; i++) {
- struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+ int lunid = lun_begin + i;
+ struct nvm_lun *lun;
- rlun = &rrpc->luns[i];
- rlun->rrpc = rrpc;
- rlun->parent = lun;
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->open_list);
- INIT_LIST_HEAD(&rlun->closed_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
+ if (dev->mt->reserve_lun(dev, lunid)) {
+ pr_err("rrpc: lun %u is already allocated\n", lunid);
+ goto err;
+ }
- rrpc->total_blocks += dev->blks_per_lun;
- rrpc->nr_pages += dev->sec_per_lun;
+ lun = dev->mt->get_lun(dev, lunid);
+ if (!lun)
+ goto err;
+ rlun = &rrpc->luns[i];
+ rlun->parent = lun;
rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
rrpc->dev->blks_per_lun);
- if (!rlun->blocks)
+ if (!rlun->blocks) {
+ ret = -ENOMEM;
goto err;
+ }
for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1184,11 +1199,43 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
INIT_LIST_HEAD(&rblk->prio);
spin_lock_init(&rblk->lock);
}
+
+ rlun->rrpc = rrpc;
+ INIT_LIST_HEAD(&rlun->prio_list);
+ INIT_LIST_HEAD(&rlun->open_list);
+ INIT_LIST_HEAD(&rlun->closed_list);
+
+ INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+ spin_lock_init(&rlun->lock);
+
+ rrpc->total_blocks += dev->blks_per_lun;
+ rrpc->nr_sects += dev->sec_per_lun;
+
}
return 0;
err:
- return -ENOMEM;
+ return ret;
+}
+
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+ sector_t size = rrpc->nr_sects * dev->sec_size;
+
+ size >>= 9;
+
+ return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+ struct nvm_dev *dev = rrpc->dev;
+ struct nvmm_type *mt = dev->mt;
+
+ mt->put_area(dev, rrpc->soffset);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1197,6 +1244,7 @@ static void rrpc_free(struct rrpc *rrpc)
rrpc_map_free(rrpc);
rrpc_core_free(rrpc);
rrpc_luns_free(rrpc);
+ rrpc_area_free(rrpc);
kfree(rrpc);
}
@@ -1221,9 +1269,9 @@ static sector_t rrpc_capacity(void *private)
/* cur, gc, and two emergency blocks for each lun */
reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
- provisioned = rrpc->nr_pages - reserved;
+ provisioned = rrpc->nr_sects - reserved;
- if (reserved > rrpc->nr_pages) {
+ if (reserved > rrpc->nr_sects) {
pr_err("rrpc: not enough space available to expose storage.\n");
return 0;
}
@@ -1242,10 +1290,11 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
struct nvm_dev *dev = rrpc->dev;
int offset;
struct rrpc_addr *laddr;
- u64 paddr, pladdr;
+ u64 bpaddr, paddr, pladdr;
- for (offset = 0; offset < dev->pgs_per_blk; offset++) {
- paddr = block_to_addr(rrpc, rblk) + offset;
+ bpaddr = block_to_rel_addr(rrpc, rblk);
+ for (offset = 0; offset < dev->sec_per_blk; offset++) {
+ paddr = bpaddr + offset;
pladdr = rrpc->rev_trans_map[paddr].addr;
if (pladdr == ADDR_EMPTY)
@@ -1317,6 +1366,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
struct request_queue *bqueue = dev->q;
struct request_queue *tqueue = tdisk->queue;
struct rrpc *rrpc;
+ sector_t soffset;
int ret;
if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1342,6 +1392,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
+ ret = rrpc_area_init(rrpc, &soffset);
+ if (ret < 0) {
+ pr_err("nvm: rrpc: could not initialize area\n");
+ return ERR_PTR(ret);
+ }
+ rrpc->soffset = soffset;
+
ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
if (ret) {
pr_err("nvm: rrpc: could not initialize luns\n");
@@ -1386,7 +1443,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+ rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index f7b37336353f..2653484a3b40 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
struct nvm_dev *dev;
struct gendisk *disk;
+ sector_t soffset; /* logical sector offset */
u64 poffset; /* physical page offset */
int lun_offset;
@@ -104,7 +105,7 @@ struct rrpc {
struct rrpc_lun *luns;
/* calculated values */
- unsigned long long nr_pages;
+ unsigned long long nr_sects;
unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
@@ -156,6 +157,15 @@ struct rrpc_rev_addr {
u64 addr;
};
+static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
+ int blk_id)
+{
+ struct rrpc *rrpc = rlun->rrpc;
+ int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+
+ return &rlun->blocks[lun_blk];
+}
+
static inline sector_t rrpc_get_laddr(struct bio *bio)
{
return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
@@ -206,7 +216,7 @@ static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
unsigned pages,
struct rrpc_inflight_rq *r)
{
- BUG_ON((laddr + pages) > rrpc->nr_pages);
+ BUG_ON((laddr + pages) > rrpc->nr_sects);
return __rrpc_lock_laddr(rrpc, laddr, pages, r);
}
@@ -243,7 +253,7 @@ static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
uint8_t pages = rqd->nr_pages;
- BUG_ON((r->l_start + pages) > rrpc->nr_pages);
+ BUG_ON((r->l_start + pages) > rrpc->nr_sects);
rrpc_unlock_laddr(rrpc, r);
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 4f12c6f01fe7..b6819f0fc608 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -31,7 +31,6 @@
#include <asm/macio.h>
#include <asm/pmac_feature.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#undef DEBUG
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index b2bbe8659bed..5305923752d2 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -43,6 +43,15 @@ config OMAP_MBOX_KFIFO_SIZE
This can also be changed at runtime (via the mbox_kfifo_size
module parameter).
+config ROCKCHIP_MBOX
+ bool "Rockchip Soc Intergrated Mailbox Support"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ help
+ This driver provides support for inter-processor communication
+ between CPU cores and MCU processor on Some Rockchip SOCs.
+ Please check it that the Soc you use have Mailbox hardware.
+ Say Y here if you want to use the Rockchip Mailbox support.
+
config PCC
bool "Platform Communication Channel Driver"
depends on ACPI
@@ -78,6 +87,25 @@ config STI_MBOX
Mailbox implementation for STMicroelectonics family chips with
hardware for interprocessor communication.
+config TI_MESSAGE_MANAGER
+ tristate "Texas Instruments Message Manager Driver"
+ depends on ARCH_KEYSTONE
+ help
+ An implementation of Message Manager slave driver for Keystone
+ architecture SoCs from Texas Instruments. Message Manager is a
+ communication entity found on few of Texas Instrument's keystone
+ architecture SoCs. These may be used for communication between
+ multiple processors within the SoC. Select this driver if your
+ platform has support for the hardware block.
+
+config HI6220_MBOX
+ tristate "Hi6220 Mailbox"
+ depends on ARCH_HISI
+ help
+ An implementation of the hi6220 mailbox. It is used to send message
+ between application processors and MCU. Say Y here if you want to
+ build Hi6220 mailbox controller driver.
+
config MAILBOX_TEST
tristate "Mailbox Test Client"
depends on OF
@@ -86,4 +114,13 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config XGENE_SLIMPRO_MBOX
+ tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
+ depends on ARCH_XGENE
+ help
+ An implementation of the APM X-Gene Interprocessor Communication
+ Mailbox (IPCM) between the ARM 64-bit cores and SLIMpro controller.
+ It is used to send short messages between ARM64-bit cores and
+ the SLIMpro Management Engine, primarily for PM. Say Y here if you
+ want to use the APM X-Gene SLIMpro IPCM support.
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 92435ef11f26..0be3e742bb7d 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
+obj-$(CONFIG_ROCKCHIP_MBOX) += rockchip-mailbox.o
+
obj-$(CONFIG_PCC) += pcc.o
obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
@@ -17,3 +19,9 @@ obj-$(CONFIG_ALTERA_MBOX) += mailbox-altera.o
obj-$(CONFIG_BCM2835_MBOX) += bcm2835-mailbox.o
obj-$(CONFIG_STI_MBOX) += mailbox-sti.o
+
+obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o
+
+obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
+
+obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
new file mode 100644
index 000000000000..613722db5daf
--- /dev/null
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -0,0 +1,395 @@
+/*
+ * Hisilicon's Hi6220 mailbox driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MBOX_CHAN_MAX 32
+
+#define MBOX_TX 0x1
+
+/* Mailbox message length: 8 words */
+#define MBOX_MSG_LEN 8
+
+/* Mailbox Registers */
+#define MBOX_OFF(m) (0x40 * (m))
+#define MBOX_MODE_REG(m) (MBOX_OFF(m) + 0x0)
+#define MBOX_DATA_REG(m) (MBOX_OFF(m) + 0x4)
+
+#define MBOX_STATE_MASK (0xF << 4)
+#define MBOX_STATE_IDLE (0x1 << 4)
+#define MBOX_STATE_TX (0x2 << 4)
+#define MBOX_STATE_RX (0x4 << 4)
+#define MBOX_STATE_ACK (0x8 << 4)
+#define MBOX_ACK_CONFIG_MASK (0x1 << 0)
+#define MBOX_ACK_AUTOMATIC (0x1 << 0)
+#define MBOX_ACK_IRQ (0x0 << 0)
+
+/* IPC registers */
+#define ACK_INT_RAW_REG(i) ((i) + 0x400)
+#define ACK_INT_MSK_REG(i) ((i) + 0x404)
+#define ACK_INT_STAT_REG(i) ((i) + 0x408)
+#define ACK_INT_CLR_REG(i) ((i) + 0x40c)
+#define ACK_INT_ENA_REG(i) ((i) + 0x500)
+#define ACK_INT_DIS_REG(i) ((i) + 0x504)
+#define DST_INT_RAW_REG(i) ((i) + 0x420)
+
+
+struct hi6220_mbox_chan {
+
+ /*
+ * Description for channel's hardware info:
+ * - direction: tx or rx
+ * - dst irq: peer core's irq number
+ * - ack irq: local irq number
+ * - slot number
+ */
+ unsigned int dir, dst_irq, ack_irq;
+ unsigned int slot;
+
+ struct hi6220_mbox *parent;
+};
+
+struct hi6220_mbox {
+ struct device *dev;
+
+ int irq;
+
+ /* flag of enabling tx's irq mode */
+ bool tx_irq_mode;
+
+ /* region for ipc event */
+ void __iomem *ipc;
+
+ /* region for mailbox */
+ void __iomem *base;
+
+ unsigned int chan_num;
+ struct hi6220_mbox_chan *mchan;
+
+ void *irq_map_chan[MBOX_CHAN_MAX];
+ struct mbox_chan *chan;
+ struct mbox_controller controller;
+};
+
+static void mbox_set_state(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 status;
+
+ status = readl(mbox->base + MBOX_MODE_REG(slot));
+ status = (status & ~MBOX_STATE_MASK) | val;
+ writel(status, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static void mbox_set_mode(struct hi6220_mbox *mbox,
+ unsigned int slot, u32 val)
+{
+ u32 mode;
+
+ mode = readl(mbox->base + MBOX_MODE_REG(slot));
+ mode = (mode & ~MBOX_ACK_CONFIG_MASK) | val;
+ writel(mode, mbox->base + MBOX_MODE_REG(slot));
+}
+
+static bool hi6220_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ u32 state;
+
+ /* Only set idle state for polling mode */
+ BUG_ON(mbox->tx_irq_mode);
+
+ state = readl(mbox->base + MBOX_MODE_REG(mchan->slot));
+ return ((state & MBOX_STATE_MASK) == MBOX_STATE_IDLE);
+}
+
+static int hi6220_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+ unsigned int slot = mchan->slot;
+ u32 *buf = msg;
+ int i;
+
+ /* indicate as a TX channel */
+ mchan->dir = MBOX_TX;
+
+ mbox_set_state(mbox, slot, MBOX_STATE_TX);
+
+ if (mbox->tx_irq_mode)
+ mbox_set_mode(mbox, slot, MBOX_ACK_IRQ);
+ else
+ mbox_set_mode(mbox, slot, MBOX_ACK_AUTOMATIC);
+
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ writel(buf[i], mbox->base + MBOX_DATA_REG(slot) + i * 4);
+
+ /* trigger remote request */
+ writel(BIT(mchan->dst_irq), DST_INT_RAW_REG(mbox->ipc));
+ return 0;
+}
+
+static irqreturn_t hi6220_mbox_interrupt(int irq, void *p)
+{
+ struct hi6220_mbox *mbox = p;
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int state, intr_bit, i;
+ u32 msg[MBOX_MSG_LEN];
+
+ state = readl(ACK_INT_STAT_REG(mbox->ipc));
+ if (!state) {
+ dev_warn(mbox->dev, "%s: spurious interrupt\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ while (state) {
+ intr_bit = __ffs(state);
+ state &= (state - 1);
+
+ chan = mbox->irq_map_chan[intr_bit];
+ if (!chan) {
+ dev_warn(mbox->dev, "%s: unexpected irq vector %d\n",
+ __func__, intr_bit);
+ continue;
+ }
+
+ mchan = chan->con_priv;
+ if (mchan->dir == MBOX_TX)
+ mbox_chan_txdone(chan, 0);
+ else {
+ for (i = 0; i < MBOX_MSG_LEN; i++)
+ msg[i] = readl(mbox->base +
+ MBOX_DATA_REG(mchan->slot) + i * 4);
+
+ mbox_chan_received_data(chan, (void *)msg);
+ }
+
+ /* clear IRQ source */
+ writel(BIT(mchan->ack_irq), ACK_INT_CLR_REG(mbox->ipc));
+ mbox_set_state(mbox, mchan->slot, MBOX_STATE_IDLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hi6220_mbox_startup(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ mchan->dir = 0;
+
+ /* enable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_ENA_REG(mbox->ipc));
+ return 0;
+}
+
+static void hi6220_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct hi6220_mbox_chan *mchan = chan->con_priv;
+ struct hi6220_mbox *mbox = mchan->parent;
+
+ /* disable interrupt */
+ writel(BIT(mchan->ack_irq), ACK_INT_DIS_REG(mbox->ipc));
+ mbox->irq_map_chan[mchan->ack_irq] = NULL;
+}
+
+static struct mbox_chan_ops hi6220_mbox_ops = {
+ .send_data = hi6220_mbox_send_data,
+ .startup = hi6220_mbox_startup,
+ .shutdown = hi6220_mbox_shutdown,
+ .last_tx_done = hi6220_mbox_last_tx_done,
+};
+
+static struct mbox_chan *hi6220_mbox_xlate(struct mbox_controller *controller,
+ const struct of_phandle_args *spec)
+{
+ struct hi6220_mbox *mbox = dev_get_drvdata(controller->dev);
+ struct hi6220_mbox_chan *mchan;
+ struct mbox_chan *chan;
+ unsigned int i = spec->args[0];
+ unsigned int dst_irq = spec->args[1];
+ unsigned int ack_irq = spec->args[2];
+
+ /* Bounds checking */
+ if (i >= mbox->chan_num || dst_irq >= mbox->chan_num ||
+ ack_irq >= mbox->chan_num) {
+ dev_err(mbox->dev,
+ "Invalid channel idx %d dst_irq %d ack_irq %d\n",
+ i, dst_irq, ack_irq);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Is requested channel free? */
+ chan = &mbox->chan[i];
+ if (mbox->irq_map_chan[ack_irq] == (void *)chan) {
+ dev_err(mbox->dev, "Channel in use\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ mchan = chan->con_priv;
+ mchan->dst_irq = dst_irq;
+ mchan->ack_irq = ack_irq;
+
+ mbox->irq_map_chan[ack_irq] = (void *)chan;
+ return chan;
+}
+
+static const struct of_device_id hi6220_mbox_of_match[] = {
+ { .compatible = "hisilicon,hi6220-mbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi6220_mbox_of_match);
+
+static int hi6220_mbox_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct hi6220_mbox *mbox;
+ struct resource *res;
+ int i, err;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->dev = dev;
+ mbox->chan_num = MBOX_CHAN_MAX;
+ mbox->mchan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->mchan), GFP_KERNEL);
+ if (!mbox->mchan)
+ return -ENOMEM;
+
+ mbox->chan = devm_kzalloc(dev,
+ mbox->chan_num * sizeof(*mbox->chan), GFP_KERNEL);
+ if (!mbox->chan)
+ return -ENOMEM;
+
+ mbox->irq = platform_get_irq(pdev, 0);
+ if (mbox->irq < 0)
+ return mbox->irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mbox->ipc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->ipc)) {
+ dev_err(dev, "ioremap ipc failed\n");
+ return PTR_ERR(mbox->ipc);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mbox->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mbox->base)) {
+ dev_err(dev, "ioremap buffer failed\n");
+ return PTR_ERR(mbox->base);
+ }
+
+ err = devm_request_irq(dev, mbox->irq, hi6220_mbox_interrupt, 0,
+ dev_name(dev), mbox);
+ if (err) {
+ dev_err(dev, "Failed to register a mailbox IRQ handler: %d\n",
+ err);
+ return -ENODEV;
+ }
+
+ mbox->controller.dev = dev;
+ mbox->controller.chans = &mbox->chan[0];
+ mbox->controller.num_chans = mbox->chan_num;
+ mbox->controller.ops = &hi6220_mbox_ops;
+ mbox->controller.of_xlate = hi6220_mbox_xlate;
+
+ for (i = 0; i < mbox->chan_num; i++) {
+ mbox->chan[i].con_priv = &mbox->mchan[i];
+ mbox->irq_map_chan[i] = NULL;
+
+ mbox->mchan[i].parent = mbox;
+ mbox->mchan[i].slot = i;
+ }
+
+ /* mask and clear all interrupt vectors */
+ writel(0x0, ACK_INT_MSK_REG(mbox->ipc));
+ writel(~0x0, ACK_INT_CLR_REG(mbox->ipc));
+
+ /* use interrupt for tx's ack */
+ if (of_find_property(node, "hi6220,mbox-tx-noirq", NULL))
+ mbox->tx_irq_mode = false;
+ else
+ mbox->tx_irq_mode = true;
+
+ if (mbox->tx_irq_mode)
+ mbox->controller.txdone_irq = true;
+ else {
+ mbox->controller.txdone_poll = true;
+ mbox->controller.txpoll_period = 5;
+ }
+
+ err = mbox_controller_register(&mbox->controller);
+ if (err) {
+ dev_err(dev, "Failed to register mailbox %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, mbox);
+ dev_info(dev, "Mailbox enabled\n");
+ return 0;
+}
+
+static int hi6220_mbox_remove(struct platform_device *pdev)
+{
+ struct hi6220_mbox *mbox = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&mbox->controller);
+ return 0;
+}
+
+static struct platform_driver hi6220_mbox_driver = {
+ .driver = {
+ .name = "hi6220-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = hi6220_mbox_of_match,
+ },
+ .probe = hi6220_mbox_probe,
+ .remove = hi6220_mbox_remove,
+};
+
+static int __init hi6220_mbox_init(void)
+{
+ return platform_driver_register(&hi6220_mbox_driver);
+}
+core_initcall(hi6220_mbox_init);
+
+static void __exit hi6220_mbox_exit(void)
+{
+ platform_driver_unregister(&hi6220_mbox_driver);
+}
+module_exit(hi6220_mbox_exit);
+
+MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
+MODULE_DESCRIPTION("Hi6220 mailbox driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 684ae17dcf39..58d04726cdd7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -31,7 +31,8 @@ static struct dentry *root_debugfs_dir;
struct mbox_test_device {
struct device *dev;
- void __iomem *mmio;
+ void __iomem *tx_mmio;
+ void __iomem *rx_mmio;
struct mbox_chan *tx_channel;
struct mbox_chan *rx_channel;
char *rx_buffer;
@@ -45,7 +46,6 @@ static ssize_t mbox_test_signal_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct mbox_test_device *tdev = filp->private_data;
- int ret;
if (!tdev->tx_channel) {
dev_err(tdev->dev, "Channel cannot do Tx\n");
@@ -59,17 +59,20 @@ static ssize_t mbox_test_signal_write(struct file *filp,
return -EINVAL;
}
- tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
- if (!tdev->signal)
- return -ENOMEM;
+ /* Only allocate memory if we need to */
+ if (!tdev->signal) {
+ tdev->signal = kzalloc(MBOX_MAX_SIG_LEN, GFP_KERNEL);
+ if (!tdev->signal)
+ return -ENOMEM;
+ }
- ret = copy_from_user(tdev->signal, userbuf, count);
- if (ret) {
+ if (copy_from_user(tdev->signal, userbuf, count)) {
kfree(tdev->signal);
+ tdev->signal = NULL;
return -EFAULT;
}
- return ret < 0 ? ret : count;
+ return count;
}
static const struct file_operations mbox_test_signal_ops = {
@@ -112,16 +115,16 @@ static ssize_t mbox_test_message_write(struct file *filp,
* A separate signal is only of use if there is
* MMIO to subsequently pass the message through
*/
- if (tdev->mmio && tdev->signal) {
- print_hex_dump(KERN_INFO, "Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
- MBOX_BYTES_PER_LINE, 1, tdev->signal, MBOX_MAX_SIG_LEN, true);
+ if (tdev->tx_mmio && tdev->signal) {
+ print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
+ tdev->signal, MBOX_MAX_SIG_LEN);
data = tdev->signal;
} else
data = tdev->message;
- print_hex_dump(KERN_INFO, "Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
- MBOX_BYTES_PER_LINE, 1, tdev->message, MBOX_MAX_MSG_LEN, true);
+ print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
+ tdev->message, MBOX_MAX_MSG_LEN);
ret = mbox_send_message(tdev->tx_channel, data);
if (ret < 0)
@@ -220,15 +223,13 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
unsigned long flags;
spin_lock_irqsave(&tdev->lock, flags);
- if (tdev->mmio) {
- memcpy_fromio(tdev->rx_buffer, tdev->mmio, MBOX_MAX_MSG_LEN);
- print_hex_dump(KERN_INFO, "Client: Received [MMIO]: ",
- DUMP_PREFIX_ADDRESS, MBOX_BYTES_PER_LINE, 1,
- tdev->rx_buffer, MBOX_MAX_MSG_LEN, true);
+ if (tdev->rx_mmio) {
+ memcpy_fromio(tdev->rx_buffer, tdev->rx_mmio, MBOX_MAX_MSG_LEN);
+ print_hex_dump_bytes("Client: Received [MMIO]: ", DUMP_PREFIX_ADDRESS,
+ tdev->rx_buffer, MBOX_MAX_MSG_LEN);
} else if (message) {
- print_hex_dump(KERN_INFO, "Client: Received [API]: ",
- DUMP_PREFIX_ADDRESS, MBOX_BYTES_PER_LINE, 1,
- message, MBOX_MAX_MSG_LEN, true);
+ print_hex_dump_bytes("Client: Received [API]: ", DUMP_PREFIX_ADDRESS,
+ message, MBOX_MAX_MSG_LEN);
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
}
spin_unlock_irqrestore(&tdev->lock, flags);
@@ -238,11 +239,11 @@ static void mbox_test_prepare_message(struct mbox_client *client, void *message)
{
struct mbox_test_device *tdev = dev_get_drvdata(client->dev);
- if (tdev->mmio) {
+ if (tdev->tx_mmio) {
if (tdev->signal)
- memcpy_toio(tdev->mmio, tdev->message, MBOX_MAX_MSG_LEN);
+ memcpy_toio(tdev->tx_mmio, tdev->message, MBOX_MAX_MSG_LEN);
else
- memcpy_toio(tdev->mmio, message, MBOX_MAX_MSG_LEN);
+ memcpy_toio(tdev->tx_mmio, message, MBOX_MAX_MSG_LEN);
}
}
@@ -296,9 +297,15 @@ static int mbox_test_probe(struct platform_device *pdev)
/* It's okay for MMIO to be NULL */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->mmio = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tdev->mmio))
- tdev->mmio = NULL;
+ tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdev->tx_mmio))
+ tdev->tx_mmio = NULL;
+
+ /* If specified, second reg entry is Rx MMIO */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdev->rx_mmio))
+ tdev->rx_mmio = tdev->tx_mmio;
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
@@ -306,6 +313,10 @@ static int mbox_test_probe(struct platform_device *pdev)
if (!tdev->tx_channel && !tdev->rx_channel)
return -EPROBE_DEFER;
+ /* If Rx is not specified but has Rx MMIO, then Rx = Tx */
+ if (!tdev->rx_channel && (tdev->rx_mmio != tdev->tx_mmio))
+ tdev->rx_channel = tdev->tx_channel;
+
tdev->dev = &pdev->dev;
platform_set_drvdata(pdev, tdev);
@@ -342,13 +353,13 @@ static int mbox_test_remove(struct platform_device *pdev)
}
static const struct of_device_id mbox_test_match[] = {
- { .compatible = "mailbox_test" },
+ { .compatible = "mailbox-test" },
{},
};
static struct platform_driver mbox_test_driver = {
.driver = {
- .name = "mailbox_sti_test",
+ .name = "mailbox_test",
.of_match_table = mbox_test_match,
},
.probe = mbox_test_probe,
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
new file mode 100644
index 000000000000..dd2afbca51c9
--- /dev/null
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -0,0 +1,284 @@
+/*
+ * APM X-Gene SLIMpro MailBox Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Feng Kan fkan@apm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define MBOX_CON_NAME "slimpro-mbox"
+#define MBOX_REG_SET_OFFSET 0x1000
+#define MBOX_CNT 8
+#define MBOX_STATUS_AVAIL_MASK BIT(16)
+#define MBOX_STATUS_ACK_MASK BIT(0)
+
+/* Configuration and Status Registers */
+#define REG_DB_IN 0x00
+#define REG_DB_DIN0 0x04
+#define REG_DB_DIN1 0x08
+#define REG_DB_OUT 0x10
+#define REG_DB_DOUT0 0x14
+#define REG_DB_DOUT1 0x18
+#define REG_DB_STAT 0x20
+#define REG_DB_STATMASK 0x24
+
+/**
+ * X-Gene SlimPRO mailbox channel information
+ *
+ * @dev: Device to which it is attached
+ * @chan: Pointer to mailbox communication channel
+ * @reg: Base address to access channel registers
+ * @irq: Interrupt number of the channel
+ * @rx_msg: Received message storage
+ */
+struct slimpro_mbox_chan {
+ struct device *dev;
+ struct mbox_chan *chan;
+ void __iomem *reg;
+ int irq;
+ u32 rx_msg[3];
+};
+
+/**
+ * X-Gene SlimPRO Mailbox controller data
+ *
+ * X-Gene SlimPRO Mailbox controller has 8 commnunication channels.
+ * Each channel has a separate IRQ number assgined to it.
+ *
+ * @mb_ctrl: Representation of the commnunication channel controller
+ * @mc: Array of SlimPRO mailbox channels of the controller
+ * @chans: Array of mailbox communication channels
+ *
+ */
+struct slimpro_mbox {
+ struct mbox_controller mb_ctrl;
+ struct slimpro_mbox_chan mc[MBOX_CNT];
+ struct mbox_chan chans[MBOX_CNT];
+};
+
+static void mb_chan_send_msg(struct slimpro_mbox_chan *mb_chan, u32 *msg)
+{
+ writel(msg[1], mb_chan->reg + REG_DB_DOUT0);
+ writel(msg[2], mb_chan->reg + REG_DB_DOUT1);
+ writel(msg[0], mb_chan->reg + REG_DB_OUT);
+}
+
+static void mb_chan_recv_msg(struct slimpro_mbox_chan *mb_chan)
+{
+ mb_chan->rx_msg[1] = readl(mb_chan->reg + REG_DB_DIN0);
+ mb_chan->rx_msg[2] = readl(mb_chan->reg + REG_DB_DIN1);
+ mb_chan->rx_msg[0] = readl(mb_chan->reg + REG_DB_IN);
+}
+
+static int mb_chan_status_ack(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_ACK_MASK) {
+ writel(MBOX_STATUS_ACK_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static int mb_chan_status_avail(struct slimpro_mbox_chan *mb_chan)
+{
+ u32 val = readl(mb_chan->reg + REG_DB_STAT);
+
+ if (val & MBOX_STATUS_AVAIL_MASK) {
+ mb_chan_recv_msg(mb_chan);
+ writel(MBOX_STATUS_AVAIL_MASK, mb_chan->reg + REG_DB_STAT);
+ return 1;
+ }
+ return 0;
+}
+
+static irqreturn_t slimpro_mbox_irq(int irq, void *id)
+{
+ struct slimpro_mbox_chan *mb_chan = id;
+
+ if (mb_chan_status_ack(mb_chan))
+ mbox_chan_txdone(mb_chan->chan, 0);
+
+ if (mb_chan_status_avail(mb_chan))
+ mbox_chan_received_data(mb_chan->chan, mb_chan->rx_msg);
+
+ return IRQ_HANDLED;
+}
+
+static int slimpro_mbox_send_data(struct mbox_chan *chan, void *msg)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+
+ mb_chan_send_msg(mb_chan, msg);
+ return 0;
+}
+
+static int slimpro_mbox_startup(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ int rc;
+ u32 val;
+
+ rc = devm_request_irq(mb_chan->dev, mb_chan->irq, slimpro_mbox_irq, 0,
+ MBOX_CON_NAME, mb_chan);
+ if (unlikely(rc)) {
+ dev_err(mb_chan->dev, "failed to register mailbox interrupt %d\n",
+ mb_chan->irq);
+ return rc;
+ }
+
+ /* Enable HW interrupt */
+ writel(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK,
+ mb_chan->reg + REG_DB_STAT);
+ /* Unmask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val &= ~(MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ return 0;
+}
+
+static void slimpro_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct slimpro_mbox_chan *mb_chan = chan->con_priv;
+ u32 val;
+
+ /* Mask doorbell status interrupt */
+ val = readl(mb_chan->reg + REG_DB_STATMASK);
+ val |= (MBOX_STATUS_ACK_MASK | MBOX_STATUS_AVAIL_MASK);
+ writel(val, mb_chan->reg + REG_DB_STATMASK);
+
+ devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
+}
+
+static struct mbox_chan_ops slimpro_mbox_ops = {
+ .send_data = slimpro_mbox_send_data,
+ .startup = slimpro_mbox_startup,
+ .shutdown = slimpro_mbox_shutdown,
+};
+
+static int slimpro_mbox_probe(struct platform_device *pdev)
+{
+ struct slimpro_mbox *ctx;
+ struct resource *regs;
+ void __iomem *mb_base;
+ int rc;
+ int i;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(struct slimpro_mbox), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ctx);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+ if (!mb_base)
+ return -ENOMEM;
+
+ /* Setup mailbox links */
+ for (i = 0; i < MBOX_CNT; i++) {
+ ctx->mc[i].irq = platform_get_irq(pdev, i);
+ if (ctx->mc[i].irq < 0) {
+ if (i == 0) {
+ dev_err(&pdev->dev, "no available IRQ\n");
+ return -EINVAL;
+ }
+ dev_info(&pdev->dev, "no IRQ for channel %d\n", i);
+ break;
+ }
+
+ ctx->mc[i].dev = &pdev->dev;
+ ctx->mc[i].reg = mb_base + i * MBOX_REG_SET_OFFSET;
+ ctx->mc[i].chan = &ctx->chans[i];
+ ctx->chans[i].con_priv = &ctx->mc[i];
+ }
+
+ /* Setup mailbox controller */
+ ctx->mb_ctrl.dev = &pdev->dev;
+ ctx->mb_ctrl.chans = ctx->chans;
+ ctx->mb_ctrl.txdone_irq = true;
+ ctx->mb_ctrl.ops = &slimpro_mbox_ops;
+ ctx->mb_ctrl.num_chans = i;
+
+ rc = mbox_controller_register(&ctx->mb_ctrl);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "APM X-Gene SLIMpro MailBox register failed:%d\n", rc);
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "APM X-Gene SLIMpro MailBox registered\n");
+ return 0;
+}
+
+static int slimpro_mbox_remove(struct platform_device *pdev)
+{
+ struct slimpro_mbox *smb = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&smb->mb_ctrl);
+ return 0;
+}
+
+static const struct of_device_id slimpro_of_match[] = {
+ {.compatible = "apm,xgene-slimpro-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, slimpro_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id slimpro_acpi_ids[] = {
+ {"APMC0D01", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, slimpro_acpi_ids);
+#endif
+
+static struct platform_driver slimpro_mbox_driver = {
+ .probe = slimpro_mbox_probe,
+ .remove = slimpro_mbox_remove,
+ .driver = {
+ .name = "xgene-slimpro-mbox",
+ .of_match_table = of_match_ptr(slimpro_of_match),
+ .acpi_match_table = ACPI_PTR(slimpro_acpi_ids)
+ },
+};
+
+static int __init slimpro_mbox_init(void)
+{
+ return platform_driver_register(&slimpro_mbox_driver);
+}
+
+static void __exit slimpro_mbox_exit(void)
+{
+ platform_driver_unregister(&slimpro_mbox_driver);
+}
+
+subsys_initcall(slimpro_mbox_init);
+module_exit(slimpro_mbox_exit);
+
+MODULE_DESCRIPTION("APM X-Gene SLIMpro Mailbox Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f85705..4a36632c236f 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -375,13 +375,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
if (!of_get_property(np, "mbox-names", NULL)) {
dev_err(cl->dev,
"%s() requires an \"mbox-names\" property\n", __func__);
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EINVAL);
}
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 8f779a1ec99c..043828d541f7 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -63,6 +63,7 @@
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "mailbox.h"
@@ -70,6 +71,9 @@
static struct mbox_chan *pcc_mbox_channels;
+/* Array of cached virtual address for doorbell registers */
+static void __iomem **pcc_doorbell_vaddr;
+
static struct mbox_controller pcc_mbox_ctrl = {};
/**
* get_pcc_channel - Given a PCC subspace idx, get
@@ -160,6 +164,66 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+/*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+ * use it to read/write to PCC registers such as doorbell register
+ *
+ * The below read_register and write_registers are used to read and
+ * write from perf critical registers such as PCC doorbell register
+ */
+static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ *val = readb(vaddr);
+ break;
+ case 16:
+ *val = readw(vaddr);
+ break;
+ case 32:
+ *val = readl(vaddr);
+ break;
+ case 64:
+ *val = readq(vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot read register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
+static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+{
+ int ret_val = 0;
+
+ switch (bit_width) {
+ case 8:
+ writeb(val, vaddr);
+ break;
+ case 16:
+ writew(val, vaddr);
+ break;
+ case 32:
+ writel(val, vaddr);
+ break;
+ case 64:
+ writeq(val, vaddr);
+ break;
+ default:
+ pr_debug("Error: Cannot write register of %u bit width",
+ bit_width);
+ ret_val = -EFAULT;
+ break;
+ }
+ return ret_val;
+}
+
/**
* pcc_send_data - Called from Mailbox Controller code. Used
* here only to ring the channel doorbell. The PCC client
@@ -175,21 +239,39 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_generic_address doorbell;
+ struct acpi_generic_address *doorbell;
u64 doorbell_preserve;
u64 doorbell_val;
u64 doorbell_write;
+ u32 id = chan - pcc_mbox_channels;
+ int ret = 0;
+
+ if (id >= pcc_mbox_ctrl.num_chans) {
+ pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
+ return -ENOENT;
+ }
- doorbell = pcct_ss->doorbell_register;
+ doorbell = &pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask;
/* Sync notification from OS to Platform. */
- acpi_read(&doorbell_val, &doorbell);
- acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
- &doorbell);
-
- return 0;
+ if (pcc_doorbell_vaddr[id]) {
+ ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
+ doorbell->bit_width);
+ if (ret)
+ return ret;
+ ret = write_register(pcc_doorbell_vaddr[id],
+ (doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell->bit_width);
+ } else {
+ ret = acpi_read(&doorbell_val, doorbell);
+ if (ret)
+ return ret;
+ ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
+ doorbell);
+ }
+ return ret;
}
static const struct mbox_chan_ops pcc_chan_ops = {
@@ -265,12 +347,27 @@ static int __init acpi_pcc_probe(void)
return -ENOMEM;
}
+ pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
+ if (!pcc_doorbell_vaddr) {
+ kfree(pcc_mbox_channels);
+ return -ENOMEM;
+ }
+
/* Point to the first PCC subspace entry */
pcct_entry = (struct acpi_subtable_header *) (
(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
for (i = 0; i < count; i++) {
+ struct acpi_generic_address *db_reg;
+ struct acpi_pcct_hw_reduced *pcct_ss;
pcc_mbox_channels[i].con_priv = pcct_entry;
+
+ /* If doorbell is in system memory cache the virt address */
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ db_reg = &pcct_ss->doorbell_register;
+ if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
+ db_reg->bit_width/8);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
new file mode 100644
index 000000000000..d702a204f5c1
--- /dev/null
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define MAILBOX_A2B_INTEN 0x00
+#define MAILBOX_A2B_STATUS 0x04
+#define MAILBOX_A2B_CMD(x) (0x08 + (x) * 8)
+#define MAILBOX_A2B_DAT(x) (0x0c + (x) * 8)
+
+#define MAILBOX_B2A_INTEN 0x28
+#define MAILBOX_B2A_STATUS 0x2C
+#define MAILBOX_B2A_CMD(x) (0x30 + (x) * 8)
+#define MAILBOX_B2A_DAT(x) (0x34 + (x) * 8)
+
+struct rockchip_mbox_msg {
+ u32 cmd;
+ int rx_size;
+};
+
+struct rockchip_mbox_data {
+ int num_chans;
+};
+
+struct rockchip_mbox_chan {
+ int idx;
+ int irq;
+ struct rockchip_mbox_msg *msg;
+ struct rockchip_mbox *mb;
+};
+
+struct rockchip_mbox {
+ struct mbox_controller mbox;
+ struct clk *pclk;
+ void __iomem *mbox_base;
+
+ /* The maximum size of buf for each channel */
+ u32 buf_size;
+
+ struct rockchip_mbox_chan *chans;
+};
+
+static int rockchip_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_msg *msg = data;
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ if (!msg)
+ return -EINVAL;
+
+ if (msg->rx_size > mb->buf_size) {
+ dev_err(mb->mbox.dev, "Transmit size over buf size(%d)\n",
+ mb->buf_size);
+ return -EINVAL;
+ }
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: A2B message, cmd 0x%08x\n",
+ chans->idx, msg->cmd);
+
+ mb->chans[chans->idx].msg = msg;
+
+ writel_relaxed(msg->cmd, mb->mbox_base + MAILBOX_A2B_CMD(chans->idx));
+ writel_relaxed(msg->rx_size, mb->mbox_base +
+ MAILBOX_A2B_DAT(chans->idx));
+
+ return 0;
+}
+
+static int rockchip_mbox_startup(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+
+ /* Enable all B2A interrupts */
+ writel_relaxed((1 << mb->mbox.num_chans) - 1,
+ mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ return 0;
+}
+
+static void rockchip_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct rockchip_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ struct rockchip_mbox_chan *chans = mb->chans;
+
+ /* Disable all B2A interrupts */
+ writel_relaxed(0, mb->mbox_base + MAILBOX_B2A_INTEN);
+
+ mb->chans[chans->idx].msg = NULL;
+}
+
+static const struct mbox_chan_ops rockchip_mbox_chan_ops = {
+ .send_data = rockchip_mbox_send_data,
+ .startup = rockchip_mbox_startup,
+ .shutdown = rockchip_mbox_shutdown,
+};
+
+static irqreturn_t rockchip_mbox_irq(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+ u32 status = readl_relaxed(mb->mbox_base + MAILBOX_B2A_STATUS);
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if ((status & (1 << idx)) && (irq == mb->chans[idx].irq)) {
+ /* Clear mbox interrupt */
+ writel_relaxed(1 << idx,
+ mb->mbox_base + MAILBOX_B2A_STATUS);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t rockchip_mbox_isr(int irq, void *dev_id)
+{
+ int idx;
+ struct rockchip_mbox_msg *msg = NULL;
+ struct rockchip_mbox *mb = (struct rockchip_mbox *)dev_id;
+
+ for (idx = 0; idx < mb->mbox.num_chans; idx++) {
+ if (irq != mb->chans[idx].irq)
+ continue;
+
+ msg = mb->chans[idx].msg;
+ if (!msg) {
+ dev_err(mb->mbox.dev,
+ "Chan[%d]: B2A message is NULL\n", idx);
+ break; /* spurious */
+ }
+
+ mbox_chan_received_data(&mb->mbox.chans[idx], msg);
+ mb->chans[idx].msg = NULL;
+
+ dev_dbg(mb->mbox.dev, "Chan[%d]: B2A message, cmd 0x%08x\n",
+ idx, msg->cmd);
+
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct rockchip_mbox_data rk3368_drv_data = {
+ .num_chans = 4,
+};
+
+static const struct of_device_id rockchip_mbox_of_match[] = {
+ { .compatible = "rockchip,rk3368-mailbox", .data = &rk3368_drv_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
+
+static int rockchip_mbox_probe(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb;
+ const struct of_device_id *match;
+ const struct rockchip_mbox_data *drv_data;
+ struct resource *res;
+ int ret, irq, i;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ match = of_match_node(rockchip_mbox_of_match, pdev->dev.of_node);
+ drv_data = (const struct rockchip_mbox_data *)match->data;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->chans), GFP_KERNEL);
+ if (!mb->chans)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, drv_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->mbox.dev = &pdev->dev;
+ mb->mbox.num_chans = drv_data->num_chans;
+ mb->mbox.ops = &rockchip_mbox_chan_ops;
+ mb->mbox.txdone_irq = true;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mb->mbox_base))
+ return PTR_ERR(mb->mbox_base);
+
+ /* Each channel has two buffers for A2B and B2A */
+ mb->buf_size = (size_t)resource_size(res) / (drv_data->num_chans * 2);
+
+ mb->pclk = devm_clk_get(&pdev->dev, "pclk_mailbox");
+ if (IS_ERR(mb->pclk)) {
+ ret = PTR_ERR(mb->pclk);
+ dev_err(&pdev->dev, "failed to get pclk_mailbox clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mb->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable pclk: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < mb->mbox.num_chans; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ rockchip_mbox_irq,
+ rockchip_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (ret < 0)
+ return ret;
+
+ mb->chans[i].idx = i;
+ mb->chans[i].irq = irq;
+ mb->chans[i].mb = mb;
+ mb->chans[i].msg = NULL;
+ }
+
+ ret = mbox_controller_register(&mb->mbox);
+ if (ret < 0)
+ dev_err(&pdev->dev, "Failed to register mailbox: %d\n", ret);
+
+ return ret;
+}
+
+static int rockchip_mbox_remove(struct platform_device *pdev)
+{
+ struct rockchip_mbox *mb = platform_get_drvdata(pdev);
+
+ if (!mb)
+ return -EINVAL;
+
+ mbox_controller_unregister(&mb->mbox);
+
+ return 0;
+}
+
+static struct platform_driver rockchip_mbox_driver = {
+ .probe = rockchip_mbox_probe,
+ .remove = rockchip_mbox_remove,
+ .driver = {
+ .name = "rockchip-mailbox",
+ .of_match_table = of_match_ptr(rockchip_mbox_of_match),
+ },
+};
+
+module_platform_driver(rockchip_mbox_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Rockchip mailbox: communicate between CPU cores and MCU");
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_AUTHOR("Caesar Wang <wxt@rock-chips.com>");
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
new file mode 100644
index 000000000000..54b9e4cb4cfa
--- /dev/null
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -0,0 +1,639 @@
+/*
+ * Texas Instruments' Message Manager Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+
+#define Q_DATA_OFFSET(proxy, queue, reg) \
+ ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4))
+#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
+#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
+
+/**
+ * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
+ * @queue_id: Queue Number for this path
+ * @proxy_id: Proxy ID representing the processor in SoC
+ * @is_tx: Is this a receive path?
+ */
+struct ti_msgmgr_valid_queue_desc {
+ u8 queue_id;
+ u8 proxy_id;
+ bool is_tx;
+};
+
+/**
+ * struct ti_msgmgr_desc - Description of message manager integration
+ * @queue_count: Number of Queues
+ * @max_message_size: Message size in bytes
+ * @max_messages: Number of messages
+ * @q_slices: Number of queue engines
+ * @q_proxies: Number of queue proxies per page
+ * @data_first_reg: First data register for proxy data region
+ * @data_last_reg: Last data register for proxy data region
+ * @tx_polled: Do I need to use polled mechanism for tx
+ * @tx_poll_timeout_ms: Timeout in ms if polled
+ * @valid_queues: List of Valid queues that the processor can access
+ * @num_valid_queues: Number of valid queues
+ *
+ * This structure is used in of match data to describe how integration
+ * for a specific compatible SoC is done.
+ */
+struct ti_msgmgr_desc {
+ u8 queue_count;
+ u8 max_message_size;
+ u8 max_messages;
+ u8 q_slices;
+ u8 q_proxies;
+ u8 data_first_reg;
+ u8 data_last_reg;
+ bool tx_polled;
+ int tx_poll_timeout_ms;
+ const struct ti_msgmgr_valid_queue_desc *valid_queues;
+ int num_valid_queues;
+};
+
+/**
+ * struct ti_queue_inst - Description of a queue instance
+ * @name: Queue Name
+ * @queue_id: Queue Identifier as mapped on SoC
+ * @proxy_id: Proxy Identifier as mapped on SoC
+ * @irq: IRQ for Rx Queue
+ * @is_tx: 'true' if transmit queue, else, 'false'
+ * @queue_buff_start: First register of Data Buffer
+ * @queue_buff_end: Last (or confirmation) register of Data buffer
+ * @queue_state: Queue status register
+ * @chan: Mailbox channel
+ * @rx_buff: Receive buffer pointer allocated at probe, max_message_size
+ */
+struct ti_queue_inst {
+ char name[30];
+ u8 queue_id;
+ u8 proxy_id;
+ int irq;
+ bool is_tx;
+ void __iomem *queue_buff_start;
+ void __iomem *queue_buff_end;
+ void __iomem *queue_state;
+ struct mbox_chan *chan;
+ u32 *rx_buff;
+};
+
+/**
+ * struct ti_msgmgr_inst - Description of a Message Manager Instance
+ * @dev: device pointer corresponding to the Message Manager instance
+ * @desc: Description of the SoC integration
+ * @queue_proxy_region: Queue proxy region where queue buffers are located
+ * @queue_state_debug_region: Queue status register regions
+ * @num_valid_queues: Number of valid queues defined for the processor
+ * Note: other queues are probably reserved for other processors
+ * in the SoC.
+ * @qinsts: Array of valid Queue Instances for the Processor
+ * @mbox: Mailbox Controller
+ * @chans: Array for channels corresponding to the Queue Instances.
+ */
+struct ti_msgmgr_inst {
+ struct device *dev;
+ const struct ti_msgmgr_desc *desc;
+ void __iomem *queue_proxy_region;
+ void __iomem *queue_state_debug_region;
+ u8 num_valid_queues;
+ struct ti_queue_inst *qinsts;
+ struct mbox_controller mbox;
+ struct mbox_chan *chans;
+};
+
+/**
+ * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
+ * @qinst: Queue instance for which we check the number of pending messages
+ *
+ * Return: number of messages pending in the queue (0 == no pending messages)
+ */
+static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst)
+{
+ u32 val;
+
+ /*
+ * We cannot use relaxed operation here - update may happen
+ * real-time.
+ */
+ val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK;
+ val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK);
+
+ return val;
+}
+
+/**
+ * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
+ * @irq: Interrupt number
+ * @p: Channel Pointer
+ *
+ * Return: -EINVAL if there is no instance
+ * IRQ_NONE if the interrupt is not ours.
+ * IRQ_HANDLED if the rx interrupt was successfully handled.
+ */
+static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
+{
+ struct mbox_chan *chan = p;
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ struct ti_queue_inst *qinst = chan->con_priv;
+ const struct ti_msgmgr_desc *desc;
+ int msg_count, num_words;
+ struct ti_msgmgr_message message;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+
+ /* Do I have an invalid interrupt source? */
+ if (qinst->is_tx) {
+ dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n",
+ qinst->name);
+ return IRQ_NONE;
+ }
+
+ /* Do I actually have messages to read? */
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+ if (!msg_count) {
+ /* Shared IRQ? */
+ dev_dbg(dev, "Spurious event - 0 pending data!\n");
+ return IRQ_NONE;
+ }
+
+ /*
+ * I have no idea about the protocol being used to communicate with the
+ * remote producer - 0 could be valid data, so I wont make a judgement
+ * of how many bytes I should be reading. Let the client figure this
+ * out.. I just read the full message and pass it on..
+ */
+ desc = inst->desc;
+ message.len = desc->max_message_size;
+ message.buf = (u8 *)qinst->rx_buff;
+
+ /*
+ * NOTE about register access involved here:
+ * the hardware block is implemented with 32bit access operations and no
+ * support for data splitting. We don't want the hardware to misbehave
+ * with sub 32bit access - For example: if the last register read is
+ * split into byte wise access, it can result in the queue getting
+ * stuck or indeterminate behavior. An out of order read operation may
+ * result in weird data results as well.
+ * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead
+ * we depend on readl for the purpose.
+ *
+ * Also note that the final register read automatically marks the
+ * queue message as read.
+ */
+ for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff,
+ num_words = (desc->max_message_size / sizeof(u32));
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ *word_data = readl(data_reg);
+
+ /*
+ * Last register read automatically clears the IRQ if only 1 message
+ * is pending - so send the data up the stack..
+ * NOTE: Client is expected to be as optimal as possible, since
+ * we invoke the handler in IRQ context.
+ */
+ mbox_chan_received_data(chan, (void *)&message);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages.
+ * @chan: Channel Pointer
+ *
+ * Return: 'true' if there is pending rx data, 'false' if there is none.
+ */
+static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int msg_count;
+
+ if (qinst->is_tx)
+ return false;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+
+ return msg_count ? true : false;
+}
+
+/**
+ * ti_msgmgr_last_tx_done() - See if all the tx messages are sent
+ * @chan: Channel pointer
+ *
+ * Return: 'true' is no pending tx data, 'false' if there are any.
+ */
+static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int msg_count;
+
+ if (!qinst->is_tx)
+ return false;
+
+ msg_count = ti_msgmgr_queue_get_num_messages(qinst);
+
+ /* if we have any messages pending.. */
+ return msg_count ? false : true;
+}
+
+/**
+ * ti_msgmgr_send_data() - Send data
+ * @chan: Channel Pointer
+ * @data: ti_msgmgr_message * Message Pointer
+ *
+ * Return: 0 if all goes good, else appropriate error messages.
+ */
+static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
+{
+ struct device *dev = chan->mbox->dev;
+ struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
+ const struct ti_msgmgr_desc *desc;
+ struct ti_queue_inst *qinst = chan->con_priv;
+ int num_words, trail_bytes;
+ struct ti_msgmgr_message *message = data;
+ void __iomem *data_reg;
+ u32 *word_data;
+
+ if (WARN_ON(!inst)) {
+ dev_err(dev, "no platform drv data??\n");
+ return -EINVAL;
+ }
+ desc = inst->desc;
+
+ if (desc->max_message_size < message->len) {
+ dev_err(dev, "Queue %s message length %d > max %d\n",
+ qinst->name, message->len, desc->max_message_size);
+ return -EINVAL;
+ }
+
+ /* NOTE: Constraints similar to rx path exists here as well */
+ for (data_reg = qinst->queue_buff_start,
+ num_words = message->len / sizeof(u32),
+ word_data = (u32 *)message->buf;
+ num_words; num_words--, data_reg += sizeof(u32), word_data++)
+ writel(*word_data, data_reg);
+
+ trail_bytes = message->len % sizeof(u32);
+ if (trail_bytes) {
+ u32 data_trail = *word_data;
+
+ /* Ensure all unused data is 0 */
+ data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes));
+ writel(data_trail, data_reg);
+ data_reg++;
+ }
+ /*
+ * 'data_reg' indicates next register to write. If we did not already
+ * write on tx complete reg(last reg), we must do so for transmit
+ */
+ if (data_reg <= qinst->queue_buff_end)
+ writel(0, qinst->queue_buff_end);
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_startup() - Startup queue
+ * @chan: Channel pointer
+ *
+ * Return: 0 if all goes good, else return corresponding error message
+ */
+static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+ struct device *dev = chan->mbox->dev;
+ int ret;
+
+ if (!qinst->is_tx) {
+ /*
+ * With the expectation that the IRQ might be shared in SoC
+ */
+ ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
+ IRQF_SHARED, qinst->name, chan);
+ if (ret) {
+ dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
+ qinst->irq, qinst->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ti_msgmgr_queue_shutdown() - Shutdown the queue
+ * @chan: Channel pointer
+ */
+static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
+{
+ struct ti_queue_inst *qinst = chan->con_priv;
+
+ if (!qinst->is_tx)
+ free_irq(qinst->irq, chan);
+}
+
+/**
+ * ti_msgmgr_of_xlate() - Translation of phandle to queue
+ * @mbox: Mailbox controller
+ * @p: phandle pointer
+ *
+ * Return: Mailbox channel corresponding to the queue, else return error
+ * pointer.
+ */
+static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *p)
+{
+ struct ti_msgmgr_inst *inst;
+ int req_qid, req_pid;
+ struct ti_queue_inst *qinst;
+ int i;
+
+ inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
+ if (WARN_ON(!inst))
+ return ERR_PTR(-EINVAL);
+
+ /* #mbox-cells is 2 */
+ if (p->args_count != 2) {
+ dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n",
+ p->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+ req_qid = p->args[0];
+ req_pid = p->args[1];
+
+ for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
+ i++, qinst++) {
+ if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id)
+ return qinst->chan;
+ }
+
+ dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
+ req_qid, req_pid, p->np->name);
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * ti_msgmgr_queue_setup() - Setup data structures for each queue instance
+ * @idx: index of the queue
+ * @dev: pointer to the message manager device
+ * @np: pointer to the of node
+ * @inst: Queue instance pointer
+ * @d: Message Manager instance description data
+ * @qd: Queue description data
+ * @qinst: Queue instance pointer
+ * @chan: pointer to mailbox channel
+ *
+ * Return: 0 if all went well, else return corresponding error
+ */
+static int ti_msgmgr_queue_setup(int idx, struct device *dev,
+ struct device_node *np,
+ struct ti_msgmgr_inst *inst,
+ const struct ti_msgmgr_desc *d,
+ const struct ti_msgmgr_valid_queue_desc *qd,
+ struct ti_queue_inst *qinst,
+ struct mbox_chan *chan)
+{
+ qinst->proxy_id = qd->proxy_id;
+ qinst->queue_id = qd->queue_id;
+
+ if (qinst->queue_id > d->queue_count) {
+ dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n",
+ idx, qinst->queue_id, d->queue_count);
+ return -ERANGE;
+ }
+
+ qinst->is_tx = qd->is_tx;
+ snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
+ dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id,
+ qinst->proxy_id);
+
+ if (!qinst->is_tx) {
+ char of_rx_irq_name[7];
+
+ snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
+ "rx_%03d", qinst->queue_id);
+
+ qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
+ if (qinst->irq < 0) {
+ dev_crit(dev,
+ "[%d]QID %d PID %d:No IRQ[%s]: %d\n",
+ idx, qinst->queue_id, qinst->proxy_id,
+ of_rx_irq_name, qinst->irq);
+ return qinst->irq;
+ }
+ /* Allocate usage buffer for rx */
+ qinst->rx_buff = devm_kzalloc(dev,
+ d->max_message_size, GFP_KERNEL);
+ if (!qinst->rx_buff)
+ return -ENOMEM;
+ }
+
+ qinst->queue_buff_start = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
+ qinst->queue_buff_end = inst->queue_proxy_region +
+ Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
+ qinst->queue_state = inst->queue_state_debug_region +
+ Q_STATE_OFFSET(qinst->queue_id);
+ qinst->chan = chan;
+
+ chan->con_priv = qinst;
+
+ dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
+ idx, qinst->queue_id, qinst->proxy_id, qinst->irq,
+ qinst->queue_buff_start, qinst->queue_buff_end);
+ return 0;
+}
+
+/* Queue operations */
+static const struct mbox_chan_ops ti_msgmgr_chan_ops = {
+ .startup = ti_msgmgr_queue_startup,
+ .shutdown = ti_msgmgr_queue_shutdown,
+ .peek_data = ti_msgmgr_queue_peek_data,
+ .last_tx_done = ti_msgmgr_last_tx_done,
+ .send_data = ti_msgmgr_send_data,
+};
+
+/* Keystone K2G SoC integration details */
+static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = {
+ {.queue_id = 0, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 1, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 2, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 3, .proxy_id = 0, .is_tx = true,},
+ {.queue_id = 5, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 56, .proxy_id = 1, .is_tx = true,},
+ {.queue_id = 57, .proxy_id = 2, .is_tx = false,},
+ {.queue_id = 58, .proxy_id = 3, .is_tx = true,},
+ {.queue_id = 59, .proxy_id = 4, .is_tx = true,},
+ {.queue_id = 60, .proxy_id = 5, .is_tx = true,},
+ {.queue_id = 61, .proxy_id = 6, .is_tx = true,},
+};
+
+static const struct ti_msgmgr_desc k2g_desc = {
+ .queue_count = 64,
+ .max_message_size = 64,
+ .max_messages = 128,
+ .q_slices = 1,
+ .q_proxies = 1,
+ .data_first_reg = 16,
+ .data_last_reg = 31,
+ .tx_polled = false,
+ .valid_queues = k2g_valid_queues,
+ .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
+};
+
+static const struct of_device_id ti_msgmgr_of_match[] = {
+ {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
+
+static int ti_msgmgr_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct device_node *np;
+ struct resource *res;
+ const struct ti_msgmgr_desc *desc;
+ struct ti_msgmgr_inst *inst;
+ struct ti_queue_inst *qinst;
+ struct mbox_controller *mbox;
+ struct mbox_chan *chans;
+ int queue_count;
+ int i;
+ int ret = -EINVAL;
+ const struct ti_msgmgr_valid_queue_desc *queue_desc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "no OF information\n");
+ return -EINVAL;
+ }
+ np = dev->of_node;
+
+ of_id = of_match_device(ti_msgmgr_of_match, dev);
+ if (!of_id) {
+ dev_err(dev, "OF data missing\n");
+ return -EINVAL;
+ }
+ desc = of_id->data;
+
+ inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->dev = dev;
+ inst->desc = desc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "queue_proxy_region");
+ inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_proxy_region))
+ return PTR_ERR(inst->queue_proxy_region);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "queue_state_debug_region");
+ inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inst->queue_state_debug_region))
+ return PTR_ERR(inst->queue_state_debug_region);
+
+ dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
+ inst->queue_proxy_region, inst->queue_state_debug_region);
+
+ queue_count = desc->num_valid_queues;
+ if (!queue_count || queue_count > desc->queue_count) {
+ dev_crit(dev, "Invalid Number of queues %d. Max %d\n",
+ queue_count, desc->queue_count);
+ return -ERANGE;
+ }
+ inst->num_valid_queues = queue_count;
+
+ qinst = devm_kzalloc(dev, sizeof(*qinst) * queue_count, GFP_KERNEL);
+ if (!qinst)
+ return -ENOMEM;
+ inst->qinsts = qinst;
+
+ chans = devm_kzalloc(dev, sizeof(*chans) * queue_count, GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+ inst->chans = chans;
+
+ for (i = 0, queue_desc = desc->valid_queues;
+ i < queue_count; i++, qinst++, chans++, queue_desc++) {
+ ret = ti_msgmgr_queue_setup(i, dev, np, inst,
+ desc, queue_desc, qinst, chans);
+ if (ret)
+ return ret;
+ }
+
+ mbox = &inst->mbox;
+ mbox->dev = dev;
+ mbox->ops = &ti_msgmgr_chan_ops;
+ mbox->chans = inst->chans;
+ mbox->num_chans = inst->num_valid_queues;
+ mbox->txdone_irq = false;
+ mbox->txdone_poll = desc->tx_polled;
+ if (desc->tx_polled)
+ mbox->txpoll_period = desc->tx_poll_timeout_ms;
+ mbox->of_xlate = ti_msgmgr_of_xlate;
+
+ platform_set_drvdata(pdev, inst);
+ ret = mbox_controller_register(mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mbox_controller(%d)\n", ret);
+
+ return ret;
+}
+
+static int ti_msgmgr_remove(struct platform_device *pdev)
+{
+ struct ti_msgmgr_inst *inst;
+
+ inst = platform_get_drvdata(pdev);
+ mbox_controller_unregister(&inst->mbox);
+
+ return 0;
+}
+
+static struct platform_driver ti_msgmgr_driver = {
+ .probe = ti_msgmgr_probe,
+ .remove = ti_msgmgr_remove,
+ .driver = {
+ .name = "ti-msgmgr",
+ .of_match_table = of_match_ptr(ti_msgmgr_of_match),
+ },
+};
+module_platform_driver(ti_msgmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI message manager driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-msgmgr");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 0a2e7273db9e..02a5345a44a6 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -249,6 +249,7 @@ config DM_DEBUG_BLOCK_STACK_TRACING
block manager locking used by thin provisioning and caching.
If unsure, say N.
+
config DM_BIO_PRISON
tristate
depends on BLK_DEV_DM
@@ -304,16 +305,6 @@ config DM_CACHE
algorithms used to select which blocks are promoted, demoted,
cleaned etc. It supports writeback and writethrough modes.
-config DM_CACHE_MQ
- tristate "MQ Cache Policy (EXPERIMENTAL)"
- depends on DM_CACHE
- default y
- ---help---
- A cache policy that uses a multiqueue ordered by recent hit
- count to select which blocks should be promoted and demoted.
- This is meant to be a general purpose policy. It prioritises
- reads over writes.
-
config DM_CACHE_SMQ
tristate "Stochastic MQ Cache Policy (EXPERIMENTAL)"
depends on DM_CACHE
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 62a65764e8e0..52ba8dd82821 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -12,7 +12,6 @@ dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
-dm-cache-mq-y += dm-cache-policy-mq.o
dm-cache-smq-y += dm-cache-policy-smq.o
dm-cache-cleaner-y += dm-cache-policy-cleaner.o
dm-era-y += dm-era-target.o
@@ -55,7 +54,6 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
-obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_ERA) += dm-era.o
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 8d0ead98eb6e..a296425a7270 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1015,8 +1015,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
*/
atomic_set(&dc->count, 1);
- if (bch_cached_dev_writeback_start(dc))
+ /* Block writeback thread, but spawn it */
+ down_write(&dc->writeback_lock);
+ if (bch_cached_dev_writeback_start(dc)) {
+ up_write(&dc->writeback_lock);
return -ENOMEM;
+ }
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
@@ -1028,6 +1032,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
+ /* Allow the writeback thread to proceed */
+ up_write(&dc->writeback_lock);
+
pr_info("Caching %s as %s on set %pU",
bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
dc->disk.c->sb.set_uuid);
@@ -1366,6 +1373,9 @@ static void cache_set_flush(struct closure *cl)
struct btree *b;
unsigned i;
+ if (!c)
+ closure_return(cl);
+
bch_cache_accounting_destroy(&c->accounting);
kobject_put(&c->internal);
@@ -1828,11 +1838,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
return 0;
}
-static void register_cache(struct cache_sb *sb, struct page *sb_page,
+static int register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
- const char *err = "cannot allocate memory";
+ const char *err = NULL;
+ int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
@@ -1847,27 +1858,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
- if (cache_alloc(sb, ca) != 0)
+ ret = cache_alloc(sb, ca);
+ if (ret != 0)
goto err;
- err = "error creating kobject";
- if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
- goto err;
+ if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ err = "error calling kobject_add";
+ ret = -ENOMEM;
+ goto out;
+ }
mutex_lock(&bch_register_lock);
err = register_cache_set(ca);
mutex_unlock(&bch_register_lock);
- if (err)
- goto err;
+ if (err) {
+ ret = -ENODEV;
+ goto out;
+ }
pr_info("registered cache device %s", bdevname(bdev, name));
+
out:
kobject_put(&ca->kobj);
- return;
+
err:
- pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- goto out;
+ if (err)
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+
+ return ret;
}
/* Global interfaces/init */
@@ -1965,7 +1984,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!ca)
goto err_close;
- register_cache(sb, sb_page, bdev, ca);
+ if (register_cache(sb, sb_page, bdev, ca) != 0)
+ goto err_close;
}
out:
if (sb_page)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index d80cce499a56..3fe86b54d50b 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -98,7 +98,6 @@ __acquires(bitmap->lock)
bitmap->bp[page].hijacked) {
/* somebody beat us to getting the page */
kfree(mappage);
- return 0;
} else {
/* no page was in place and we have one, so install it */
@@ -323,7 +322,7 @@ __clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
- page_cache_release(page);
+ put_page(page);
}
static void free_buffers(struct page *page)
{
@@ -510,8 +509,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
sb->chunksize = cpu_to_le32(chunksize);
daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
- if (!daemon_sleep ||
- (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
+ if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
daemon_sleep = 5 * HZ;
}
@@ -1675,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (bitmap->sysfs_can_clear)
+ sysfs_put(bitmap->sysfs_can_clear);
+
if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
md_cluster_stop(bitmap->mddev);
@@ -1714,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev)
if (mddev->thread)
mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
- if (bitmap->sysfs_can_clear)
- sysfs_put(bitmap->sysfs_can_clear);
-
bitmap_free(bitmap);
}
/*
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
+ * once mddev->bitmap is set
*/
struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
@@ -1867,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
struct bitmap_counts *counts;
struct bitmap *bitmap = bitmap_create(mddev, slot);
- if (IS_ERR(bitmap))
+ if (IS_ERR(bitmap)) {
+ bitmap_free(bitmap);
return PTR_ERR(bitmap);
+ }
rv = bitmap_init_from_disk(bitmap, 0);
if (rv)
@@ -2172,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
else {
mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
+ if (rv)
mddev->bitmap_info.offset = 0;
- }
}
mddev->pers->quiesce(mddev, 0);
- if (rv)
+ if (rv) {
+ bitmap_destroy(mddev);
return rv;
+ }
}
}
}
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 7d5c3a610ca5..5e3fcd6ecf77 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -49,8 +49,8 @@
* When we set a bit, or in the counter (to start a write), if the fields is
* 0, we first set the disk bit and set the counter to 1.
*
- * If the counter is 0, the on-disk bit is clear and the stipe is clean
- * Anything that dirties the stipe pushes the counter to 2 (at least)
+ * If the counter is 0, the on-disk bit is clear and the stripe is clean
+ * Anything that dirties the stripe pushes the counter to 2 (at least)
* and sets the on-disk bit (lazily).
* If a periodic sweep find the counter at 2, it is decremented to 1.
* If the sweep find the counter at 1, the on-disk bit is cleared and the
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index f6543f3a970f..3970cda10080 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -867,18 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
return 0;
}
-#define WRITE_LOCK(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
- return -EINVAL; \
- down_write(&cmd->root_lock)
+static bool cmd_write_lock(struct dm_cache_metadata *cmd)
+{
+ down_write(&cmd->root_lock);
+ if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
+ up_write(&cmd->root_lock);
+ return false;
+ }
+ return true;
+}
+
+#define WRITE_LOCK(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
-#define WRITE_LOCK_VOID(cmd) \
- if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) \
- return; \
- down_write(&cmd->root_lock)
+#define WRITE_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_write_lock((cmd))) \
+ return; \
+ } while(0)
#define WRITE_UNLOCK(cmd) \
- up_write(&cmd->root_lock)
+ up_write(&(cmd)->root_lock)
+
+static bool cmd_read_lock(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ if (cmd->fail_io) {
+ up_read(&cmd->root_lock);
+ return false;
+ }
+ return true;
+}
+
+#define READ_LOCK(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return -EINVAL; \
+ } while(0)
+
+#define READ_LOCK_VOID(cmd) \
+ do { \
+ if (!cmd_read_lock((cmd))) \
+ return; \
+ } while(0)
+
+#define READ_UNLOCK(cmd) \
+ up_read(&(cmd)->root_lock)
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
{
@@ -1015,22 +1052,20 @@ int dm_cache_load_discards(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_discards(cmd, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
{
- dm_cblock_t r;
-
- down_read(&cmd->root_lock);
- r = cmd->cache_blocks;
- up_read(&cmd->root_lock);
+ READ_LOCK(cmd);
+ *result = cmd->cache_blocks;
+ READ_UNLOCK(cmd);
- return r;
+ return 0;
}
static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
@@ -1188,9 +1223,9 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = __load_mappings(cmd, policy, fn, context);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1215,18 +1250,18 @@ static int __dump_mappings(struct dm_cache_metadata *cmd)
void dm_cache_dump(struct dm_cache_metadata *cmd)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
__dump_mappings(cmd);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
{
int r;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = cmd->changed;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1276,9 +1311,9 @@ int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats)
{
- down_read(&cmd->root_lock);
+ READ_LOCK_VOID(cmd);
*stats = cmd->stats;
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
}
void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
@@ -1312,9 +1347,9 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_free(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1324,9 +1359,9 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
{
int r = -EINVAL;
- down_read(&cmd->root_lock);
+ READ_LOCK(cmd);
r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
- up_read(&cmd->root_lock);
+ READ_UNLOCK(cmd);
return r;
}
@@ -1417,7 +1452,13 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
{
- return blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ int r;
+
+ READ_LOCK(cmd);
+ r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
+ READ_UNLOCK(cmd);
+
+ return r;
}
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
@@ -1440,10 +1481,7 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- /*
- * We ignore fail_io for this function.
- */
- down_write(&cmd->root_lock);
+ WRITE_LOCK(cmd);
set_bit(NEEDS_CHECK, &cmd->flags);
r = superblock_lock(cmd, &sblock);
@@ -1458,19 +1496,17 @@ int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
dm_bm_unlock(sblock);
out:
- up_write(&cmd->root_lock);
+ WRITE_UNLOCK(cmd);
return r;
}
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd)
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
{
- bool needs_check;
+ READ_LOCK(cmd);
+ *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
+ READ_UNLOCK(cmd);
- down_read(&cmd->root_lock);
- needs_check = !!test_bit(NEEDS_CHECK, &cmd->flags);
- up_read(&cmd->root_lock);
-
- return needs_check;
+ return 0;
}
int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 2ffee21f318d..8528744195e5 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -66,7 +66,7 @@ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
* origin blocks to map to.
*/
int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
-dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
sector_t discard_block_size,
@@ -137,7 +137,7 @@ int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *
*/
int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
-bool dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd);
+int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
deleted file mode 100644
index ddb26980cd66..000000000000
--- a/drivers/md/dm-cache-policy-mq.c
+++ /dev/null
@@ -1,1473 +0,0 @@
-/*
- * Copyright (C) 2012 Red Hat. All rights reserved.
- *
- * This file is released under the GPL.
- */
-
-#include "dm-cache-policy.h"
-#include "dm.h"
-
-#include <linux/hash.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#define DM_MSG_PREFIX "cache-policy-mq"
-
-static struct kmem_cache *mq_entry_cache;
-
-/*----------------------------------------------------------------*/
-
-static unsigned next_power(unsigned n, unsigned min)
-{
- return roundup_pow_of_two(max(n, min));
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Large, sequential ios are probably better left on the origin device since
- * spindles tend to have good bandwidth.
- *
- * The io_tracker tries to spot when the io is in one of these sequential
- * modes.
- *
- * Two thresholds to switch between random and sequential io mode are defaulting
- * as follows and can be adjusted via the constructor and message interfaces.
- */
-#define RANDOM_THRESHOLD_DEFAULT 4
-#define SEQUENTIAL_THRESHOLD_DEFAULT 512
-
-enum io_pattern {
- PATTERN_SEQUENTIAL,
- PATTERN_RANDOM
-};
-
-struct io_tracker {
- enum io_pattern pattern;
-
- unsigned nr_seq_samples;
- unsigned nr_rand_samples;
- unsigned thresholds[2];
-
- dm_oblock_t last_end_oblock;
-};
-
-static void iot_init(struct io_tracker *t,
- int sequential_threshold, int random_threshold)
-{
- t->pattern = PATTERN_RANDOM;
- t->nr_seq_samples = 0;
- t->nr_rand_samples = 0;
- t->last_end_oblock = 0;
- t->thresholds[PATTERN_RANDOM] = random_threshold;
- t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
-}
-
-static enum io_pattern iot_pattern(struct io_tracker *t)
-{
- return t->pattern;
-}
-
-static void iot_update_stats(struct io_tracker *t, struct bio *bio)
-{
- if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
- t->nr_seq_samples++;
- else {
- /*
- * Just one non-sequential IO is enough to reset the
- * counters.
- */
- if (t->nr_seq_samples) {
- t->nr_seq_samples = 0;
- t->nr_rand_samples = 0;
- }
-
- t->nr_rand_samples++;
- }
-
- t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
-}
-
-static void iot_check_for_pattern_switch(struct io_tracker *t)
-{
- switch (t->pattern) {
- case PATTERN_SEQUENTIAL:
- if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
- t->pattern = PATTERN_RANDOM;
- t->nr_seq_samples = t->nr_rand_samples = 0;
- }
- break;
-
- case PATTERN_RANDOM:
- if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
- t->pattern = PATTERN_SEQUENTIAL;
- t->nr_seq_samples = t->nr_rand_samples = 0;
- }
- break;
- }
-}
-
-static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
-{
- iot_update_stats(t, bio);
- iot_check_for_pattern_switch(t);
-}
-
-/*----------------------------------------------------------------*/
-
-
-/*
- * This queue is divided up into different levels. Allowing us to push
- * entries to the back of any of the levels. Think of it as a partially
- * sorted queue.
- */
-#define NR_QUEUE_LEVELS 16u
-#define NR_SENTINELS NR_QUEUE_LEVELS * 3
-
-#define WRITEBACK_PERIOD HZ
-
-struct queue {
- unsigned nr_elts;
- bool current_writeback_sentinels;
- unsigned long next_writeback;
- struct list_head qs[NR_QUEUE_LEVELS];
- struct list_head sentinels[NR_SENTINELS];
-};
-
-static void queue_init(struct queue *q)
-{
- unsigned i;
-
- q->nr_elts = 0;
- q->current_writeback_sentinels = false;
- q->next_writeback = 0;
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- INIT_LIST_HEAD(q->qs + i);
- INIT_LIST_HEAD(q->sentinels + i);
- INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
- INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
- }
-}
-
-static unsigned queue_size(struct queue *q)
-{
- return q->nr_elts;
-}
-
-static bool queue_empty(struct queue *q)
-{
- return q->nr_elts == 0;
-}
-
-/*
- * Insert an entry to the back of the given level.
- */
-static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
-{
- q->nr_elts++;
- list_add_tail(elt, q->qs + level);
-}
-
-static void queue_remove(struct queue *q, struct list_head *elt)
-{
- q->nr_elts--;
- list_del(elt);
-}
-
-static bool is_sentinel(struct queue *q, struct list_head *h)
-{
- return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
-}
-
-/*
- * Gives us the oldest entry of the lowest popoulated level. If the first
- * level is emptied then we shift down one level.
- */
-static struct list_head *queue_peek(struct queue *q)
-{
- unsigned level;
- struct list_head *h;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level)
- if (!is_sentinel(q, h))
- return h;
-
- return NULL;
-}
-
-static struct list_head *queue_pop(struct queue *q)
-{
- struct list_head *r = queue_peek(q);
-
- if (r) {
- q->nr_elts--;
- list_del(r);
- }
-
- return r;
-}
-
-/*
- * Pops an entry from a level that is not past a sentinel.
- */
-static struct list_head *queue_pop_old(struct queue *q)
-{
- unsigned level;
- struct list_head *h;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level) {
- if (is_sentinel(q, h))
- break;
-
- q->nr_elts--;
- list_del(h);
- return h;
- }
-
- return NULL;
-}
-
-static struct list_head *list_pop(struct list_head *lh)
-{
- struct list_head *r = lh->next;
-
- BUG_ON(!r);
- list_del_init(r);
-
- return r;
-}
-
-static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
-{
- if (q->current_writeback_sentinels)
- return q->sentinels + NR_QUEUE_LEVELS + level;
- else
- return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
-}
-
-static void queue_update_writeback_sentinels(struct queue *q)
-{
- unsigned i;
- struct list_head *h;
-
- if (time_after(jiffies, q->next_writeback)) {
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- h = writeback_sentinel(q, i);
- list_del(h);
- list_add_tail(h, q->qs + i);
- }
-
- q->next_writeback = jiffies + WRITEBACK_PERIOD;
- q->current_writeback_sentinels = !q->current_writeback_sentinels;
- }
-}
-
-/*
- * Sometimes we want to iterate through entries that have been pushed since
- * a certain event. We use sentinel entries on the queues to delimit these
- * 'tick' events.
- */
-static void queue_tick(struct queue *q)
-{
- unsigned i;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- list_del(q->sentinels + i);
- list_add_tail(q->sentinels + i, q->qs + i);
- }
-}
-
-typedef void (*iter_fn)(struct list_head *, void *);
-static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
-{
- unsigned i;
- struct list_head *h;
-
- for (i = 0; i < NR_QUEUE_LEVELS; i++) {
- list_for_each_prev(h, q->qs + i) {
- if (is_sentinel(q, h))
- break;
-
- fn(h, context);
- }
- }
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Describes a cache entry. Used in both the cache and the pre_cache.
- */
-struct entry {
- struct hlist_node hlist;
- struct list_head list;
- dm_oblock_t oblock;
-
- /*
- * FIXME: pack these better
- */
- bool dirty:1;
- unsigned hit_count;
-};
-
-/*
- * Rather than storing the cblock in an entry, we allocate all entries in
- * an array, and infer the cblock from the entry position.
- *
- * Free entries are linked together into a list.
- */
-struct entry_pool {
- struct entry *entries, *entries_end;
- struct list_head free;
- unsigned nr_allocated;
-};
-
-static int epool_init(struct entry_pool *ep, unsigned nr_entries)
-{
- unsigned i;
-
- ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
- if (!ep->entries)
- return -ENOMEM;
-
- ep->entries_end = ep->entries + nr_entries;
-
- INIT_LIST_HEAD(&ep->free);
- for (i = 0; i < nr_entries; i++)
- list_add(&ep->entries[i].list, &ep->free);
-
- ep->nr_allocated = 0;
-
- return 0;
-}
-
-static void epool_exit(struct entry_pool *ep)
-{
- vfree(ep->entries);
-}
-
-static struct entry *alloc_entry(struct entry_pool *ep)
-{
- struct entry *e;
-
- if (list_empty(&ep->free))
- return NULL;
-
- e = list_entry(list_pop(&ep->free), struct entry, list);
- INIT_LIST_HEAD(&e->list);
- INIT_HLIST_NODE(&e->hlist);
- ep->nr_allocated++;
-
- return e;
-}
-
-/*
- * This assumes the cblock hasn't already been allocated.
- */
-static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
-{
- struct entry *e = ep->entries + from_cblock(cblock);
-
- list_del_init(&e->list);
- INIT_HLIST_NODE(&e->hlist);
- ep->nr_allocated++;
-
- return e;
-}
-
-static void free_entry(struct entry_pool *ep, struct entry *e)
-{
- BUG_ON(!ep->nr_allocated);
- ep->nr_allocated--;
- INIT_HLIST_NODE(&e->hlist);
- list_add(&e->list, &ep->free);
-}
-
-/*
- * Returns NULL if the entry is free.
- */
-static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
-{
- struct entry *e = ep->entries + from_cblock(cblock);
- return !hlist_unhashed(&e->hlist) ? e : NULL;
-}
-
-static bool epool_empty(struct entry_pool *ep)
-{
- return list_empty(&ep->free);
-}
-
-static bool in_pool(struct entry_pool *ep, struct entry *e)
-{
- return e >= ep->entries && e < ep->entries_end;
-}
-
-static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
-{
- return to_cblock(e - ep->entries);
-}
-
-/*----------------------------------------------------------------*/
-
-struct mq_policy {
- struct dm_cache_policy policy;
-
- /* protects everything */
- struct mutex lock;
- dm_cblock_t cache_size;
- struct io_tracker tracker;
-
- /*
- * Entries come from two pools, one of pre-cache entries, and one
- * for the cache proper.
- */
- struct entry_pool pre_cache_pool;
- struct entry_pool cache_pool;
-
- /*
- * We maintain three queues of entries. The cache proper,
- * consisting of a clean and dirty queue, contains the currently
- * active mappings. Whereas the pre_cache tracks blocks that
- * are being hit frequently and potential candidates for promotion
- * to the cache.
- */
- struct queue pre_cache;
- struct queue cache_clean;
- struct queue cache_dirty;
-
- /*
- * Keeps track of time, incremented by the core. We use this to
- * avoid attributing multiple hits within the same tick.
- *
- * Access to tick_protected should be done with the spin lock held.
- * It's copied to tick at the start of the map function (within the
- * mutex).
- */
- spinlock_t tick_lock;
- unsigned tick_protected;
- unsigned tick;
-
- /*
- * A count of the number of times the map function has been called
- * and found an entry in the pre_cache or cache. Currently used to
- * calculate the generation.
- */
- unsigned hit_count;
-
- /*
- * A generation is a longish period that is used to trigger some
- * book keeping effects. eg, decrementing hit counts on entries.
- * This is needed to allow the cache to evolve as io patterns
- * change.
- */
- unsigned generation;
- unsigned generation_period; /* in lookups (will probably change) */
-
- unsigned discard_promote_adjustment;
- unsigned read_promote_adjustment;
- unsigned write_promote_adjustment;
-
- /*
- * The hash table allows us to quickly find an entry by origin
- * block. Both pre_cache and cache entries are in here.
- */
- unsigned nr_buckets;
- dm_block_t hash_bits;
- struct hlist_head *table;
-};
-
-#define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
-#define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
-#define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
-#define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
-
-/*----------------------------------------------------------------*/
-
-/*
- * Simple hash table implementation. Should replace with the standard hash
- * table that's making its way upstream.
- */
-static void hash_insert(struct mq_policy *mq, struct entry *e)
-{
- unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
-
- hlist_add_head(&e->hlist, mq->table + h);
-}
-
-static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
-{
- unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
- struct hlist_head *bucket = mq->table + h;
- struct entry *e;
-
- hlist_for_each_entry(e, bucket, hlist)
- if (e->oblock == oblock) {
- hlist_del(&e->hlist);
- hlist_add_head(&e->hlist, bucket);
- return e;
- }
-
- return NULL;
-}
-
-static void hash_remove(struct entry *e)
-{
- hlist_del(&e->hlist);
-}
-
-/*----------------------------------------------------------------*/
-
-static bool any_free_cblocks(struct mq_policy *mq)
-{
- return !epool_empty(&mq->cache_pool);
-}
-
-static bool any_clean_cblocks(struct mq_policy *mq)
-{
- return !queue_empty(&mq->cache_clean);
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Now we get to the meat of the policy. This section deals with deciding
- * when to to add entries to the pre_cache and cache, and move between
- * them.
- */
-
-/*
- * The queue level is based on the log2 of the hit count.
- */
-static unsigned queue_level(struct entry *e)
-{
- return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
-}
-
-static bool in_cache(struct mq_policy *mq, struct entry *e)
-{
- return in_pool(&mq->cache_pool, e);
-}
-
-/*
- * Inserts the entry into the pre_cache or the cache. Ensures the cache
- * block is marked as allocated if necc. Inserts into the hash table.
- * Sets the tick which records when the entry was last moved about.
- */
-static void push(struct mq_policy *mq, struct entry *e)
-{
- hash_insert(mq, e);
-
- if (in_cache(mq, e))
- queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
- queue_level(e), &e->list);
- else
- queue_push(&mq->pre_cache, queue_level(e), &e->list);
-}
-
-/*
- * Removes an entry from pre_cache or cache. Removes from the hash table.
- */
-static void del(struct mq_policy *mq, struct entry *e)
-{
- if (in_cache(mq, e))
- queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
- else
- queue_remove(&mq->pre_cache, &e->list);
-
- hash_remove(e);
-}
-
-/*
- * Like del, except it removes the first entry in the queue (ie. the least
- * recently used).
- */
-static struct entry *pop(struct mq_policy *mq, struct queue *q)
-{
- struct entry *e;
- struct list_head *h = queue_pop(q);
-
- if (!h)
- return NULL;
-
- e = container_of(h, struct entry, list);
- hash_remove(e);
-
- return e;
-}
-
-static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
-{
- struct entry *e;
- struct list_head *h = queue_pop_old(q);
-
- if (!h)
- return NULL;
-
- e = container_of(h, struct entry, list);
- hash_remove(e);
-
- return e;
-}
-
-static struct entry *peek(struct queue *q)
-{
- struct list_head *h = queue_peek(q);
- return h ? container_of(h, struct entry, list) : NULL;
-}
-
-/*
- * The promotion threshold is adjusted every generation. As are the counts
- * of the entries.
- *
- * At the moment the threshold is taken by averaging the hit counts of some
- * of the entries in the cache (the first 20 entries across all levels in
- * ascending order, giving preference to the clean entries at each level).
- *
- * We can be much cleverer than this though. For example, each promotion
- * could bump up the threshold helping to prevent churn. Much more to do
- * here.
- */
-
-#define MAX_TO_AVERAGE 20
-
-static void check_generation(struct mq_policy *mq)
-{
- unsigned total = 0, nr = 0, count = 0, level;
- struct list_head *head;
- struct entry *e;
-
- if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
- mq->hit_count = 0;
- mq->generation++;
-
- for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
- head = mq->cache_clean.qs + level;
- list_for_each_entry(e, head, list) {
- nr++;
- total += e->hit_count;
-
- if (++count >= MAX_TO_AVERAGE)
- break;
- }
-
- head = mq->cache_dirty.qs + level;
- list_for_each_entry(e, head, list) {
- nr++;
- total += e->hit_count;
-
- if (++count >= MAX_TO_AVERAGE)
- break;
- }
- }
- }
-}
-
-/*
- * Whenever we use an entry we bump up it's hit counter, and push it to the
- * back to it's current level.
- */
-static void requeue(struct mq_policy *mq, struct entry *e)
-{
- check_generation(mq);
- del(mq, e);
- push(mq, e);
-}
-
-/*
- * Demote the least recently used entry from the cache to the pre_cache.
- * Returns the new cache entry to use, and the old origin block it was
- * mapped to.
- *
- * We drop the hit count on the demoted entry back to 1 to stop it bouncing
- * straight back into the cache if it's subsequently hit. There are
- * various options here, and more experimentation would be good:
- *
- * - just forget about the demoted entry completely (ie. don't insert it
- into the pre_cache).
- * - divide the hit count rather that setting to some hard coded value.
- * - set the hit count to a hard coded value other than 1, eg, is it better
- * if it goes in at level 2?
- */
-static int demote_cblock(struct mq_policy *mq,
- struct policy_locker *locker, dm_oblock_t *oblock)
-{
- struct entry *demoted = peek(&mq->cache_clean);
-
- if (!demoted)
- /*
- * We could get a block from mq->cache_dirty, but that
- * would add extra latency to the triggering bio as it
- * waits for the writeback. Better to not promote this
- * time and hope there's a clean block next time this block
- * is hit.
- */
- return -ENOSPC;
-
- if (locker->fn(locker, demoted->oblock))
- /*
- * We couldn't lock the demoted block.
- */
- return -EBUSY;
-
- del(mq, demoted);
- *oblock = demoted->oblock;
- free_entry(&mq->cache_pool, demoted);
-
- /*
- * We used to put the demoted block into the pre-cache, but I think
- * it's simpler to just let it work it's way up from zero again.
- * Stops blocks flickering in and out of the cache.
- */
-
- return 0;
-}
-
-/*
- * Entries in the pre_cache whose hit count passes the promotion
- * threshold move to the cache proper. Working out the correct
- * value for the promotion_threshold is crucial to this policy.
- */
-static unsigned promote_threshold(struct mq_policy *mq)
-{
- struct entry *e;
-
- if (any_free_cblocks(mq))
- return 0;
-
- e = peek(&mq->cache_clean);
- if (e)
- return e->hit_count;
-
- e = peek(&mq->cache_dirty);
- if (e)
- return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
-
- /* This should never happen */
- return 0;
-}
-
-/*
- * We modify the basic promotion_threshold depending on the specific io.
- *
- * If the origin block has been discarded then there's no cost to copy it
- * to the cache.
- *
- * We bias towards reads, since they can be demoted at no cost if they
- * haven't been dirtied.
- */
-static unsigned adjusted_promote_threshold(struct mq_policy *mq,
- bool discarded_oblock, int data_dir)
-{
- if (data_dir == READ)
- return promote_threshold(mq) + mq->read_promote_adjustment;
-
- if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
- /*
- * We don't need to do any copying at all, so give this a
- * very low threshold.
- */
- return mq->discard_promote_adjustment;
- }
-
- return promote_threshold(mq) + mq->write_promote_adjustment;
-}
-
-static bool should_promote(struct mq_policy *mq, struct entry *e,
- bool discarded_oblock, int data_dir)
-{
- return e->hit_count >=
- adjusted_promote_threshold(mq, discarded_oblock, data_dir);
-}
-
-static int cache_entry_found(struct mq_policy *mq,
- struct entry *e,
- struct policy_result *result)
-{
- requeue(mq, e);
-
- if (in_cache(mq, e)) {
- result->op = POLICY_HIT;
- result->cblock = infer_cblock(&mq->cache_pool, e);
- }
-
- return 0;
-}
-
-/*
- * Moves an entry from the pre_cache to the cache. The main work is
- * finding which cache block to use.
- */
-static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
- struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct entry *new_e;
-
- /* Ensure there's a free cblock in the cache */
- if (epool_empty(&mq->cache_pool)) {
- result->op = POLICY_REPLACE;
- r = demote_cblock(mq, locker, &result->old_oblock);
- if (r) {
- result->op = POLICY_MISS;
- return 0;
- }
-
- } else
- result->op = POLICY_NEW;
-
- new_e = alloc_entry(&mq->cache_pool);
- BUG_ON(!new_e);
-
- new_e->oblock = e->oblock;
- new_e->dirty = false;
- new_e->hit_count = e->hit_count;
-
- del(mq, e);
- free_entry(&mq->pre_cache_pool, e);
- push(mq, new_e);
-
- result->cblock = infer_cblock(&mq->cache_pool, new_e);
-
- return 0;
-}
-
-static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r = 0;
-
- if (!should_promote(mq, e, discarded_oblock, data_dir)) {
- requeue(mq, e);
- result->op = POLICY_MISS;
-
- } else if (!can_migrate)
- r = -EWOULDBLOCK;
-
- else {
- requeue(mq, e);
- r = pre_cache_to_cache(mq, e, locker, result);
- }
-
- return r;
-}
-
-static void insert_in_pre_cache(struct mq_policy *mq,
- dm_oblock_t oblock)
-{
- struct entry *e = alloc_entry(&mq->pre_cache_pool);
-
- if (!e)
- /*
- * There's no spare entry structure, so we grab the least
- * used one from the pre_cache.
- */
- e = pop(mq, &mq->pre_cache);
-
- if (unlikely(!e)) {
- DMWARN("couldn't pop from pre cache");
- return;
- }
-
- e->dirty = false;
- e->oblock = oblock;
- e->hit_count = 1;
- push(mq, e);
-}
-
-static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
- struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct entry *e;
-
- if (epool_empty(&mq->cache_pool)) {
- result->op = POLICY_REPLACE;
- r = demote_cblock(mq, locker, &result->old_oblock);
- if (unlikely(r)) {
- result->op = POLICY_MISS;
- insert_in_pre_cache(mq, oblock);
- return;
- }
-
- /*
- * This will always succeed, since we've just demoted.
- */
- e = alloc_entry(&mq->cache_pool);
- BUG_ON(!e);
-
- } else {
- e = alloc_entry(&mq->cache_pool);
- result->op = POLICY_NEW;
- }
-
- e->oblock = oblock;
- e->dirty = false;
- e->hit_count = 1;
- push(mq, e);
-
- result->cblock = infer_cblock(&mq->cache_pool, e);
-}
-
-static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
- if (can_migrate)
- insert_in_cache(mq, oblock, locker, result);
- else
- return -EWOULDBLOCK;
- } else {
- insert_in_pre_cache(mq, oblock);
- result->op = POLICY_MISS;
- }
-
- return 0;
-}
-
-/*
- * Looks the oblock up in the hash table, then decides whether to put in
- * pre_cache, or cache etc.
- */
-static int map(struct mq_policy *mq, dm_oblock_t oblock,
- bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r = 0;
- struct entry *e = hash_lookup(mq, oblock);
-
- if (e && in_cache(mq, e))
- r = cache_entry_found(mq, e, result);
-
- else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
- iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
- result->op = POLICY_MISS;
-
- else if (e)
- r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
- data_dir, locker, result);
-
- else
- r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
- data_dir, locker, result);
-
- if (r == -EWOULDBLOCK)
- result->op = POLICY_MISS;
-
- return r;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * Public interface, via the policy struct. See dm-cache-policy.h for a
- * description of these.
- */
-
-static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
-{
- return container_of(p, struct mq_policy, policy);
-}
-
-static void mq_destroy(struct dm_cache_policy *p)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- vfree(mq->table);
- epool_exit(&mq->cache_pool);
- epool_exit(&mq->pre_cache_pool);
- kfree(mq);
-}
-
-static void update_pre_cache_hits(struct list_head *h, void *context)
-{
- struct entry *e = container_of(h, struct entry, list);
- e->hit_count++;
-}
-
-static void update_cache_hits(struct list_head *h, void *context)
-{
- struct mq_policy *mq = context;
- struct entry *e = container_of(h, struct entry, list);
- e->hit_count++;
- mq->hit_count++;
-}
-
-static void copy_tick(struct mq_policy *mq)
-{
- unsigned long flags, tick;
-
- spin_lock_irqsave(&mq->tick_lock, flags);
- tick = mq->tick_protected;
- if (tick != mq->tick) {
- queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
- queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
- queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
- mq->tick = tick;
- }
-
- queue_tick(&mq->pre_cache);
- queue_tick(&mq->cache_dirty);
- queue_tick(&mq->cache_clean);
- queue_update_writeback_sentinels(&mq->cache_dirty);
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-}
-
-static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
- bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_locker *locker,
- struct policy_result *result)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- result->op = POLICY_MISS;
-
- if (can_block)
- mutex_lock(&mq->lock);
- else if (!mutex_trylock(&mq->lock))
- return -EWOULDBLOCK;
-
- copy_tick(mq);
-
- iot_examine_bio(&mq->tracker, bio);
- r = map(mq, oblock, can_migrate, discarded_oblock,
- bio_data_dir(bio), locker, result);
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
-
- if (!mutex_trylock(&mq->lock))
- return -EWOULDBLOCK;
-
- e = hash_lookup(mq, oblock);
- if (e && in_cache(mq, e)) {
- *cblock = infer_cblock(&mq->cache_pool, e);
- r = 0;
- } else
- r = -ENOENT;
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
-{
- struct entry *e;
-
- e = hash_lookup(mq, oblock);
- BUG_ON(!e || !in_cache(mq, e));
-
- del(mq, e);
- e->dirty = set;
- push(mq, e);
-}
-
-static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __mq_set_clear_dirty(mq, oblock, true);
- mutex_unlock(&mq->lock);
-}
-
-static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __mq_set_clear_dirty(mq, oblock, false);
- mutex_unlock(&mq->lock);
-}
-
-static int mq_load_mapping(struct dm_cache_policy *p,
- dm_oblock_t oblock, dm_cblock_t cblock,
- uint32_t hint, bool hint_valid)
-{
- struct mq_policy *mq = to_mq_policy(p);
- struct entry *e;
-
- e = alloc_particular_entry(&mq->cache_pool, cblock);
- e->oblock = oblock;
- e->dirty = false; /* this gets corrected in a minute */
- e->hit_count = hint_valid ? hint : 1;
- push(mq, e);
-
- return 0;
-}
-
-static int mq_save_hints(struct mq_policy *mq, struct queue *q,
- policy_walk_fn fn, void *context)
-{
- int r;
- unsigned level;
- struct list_head *h;
- struct entry *e;
-
- for (level = 0; level < NR_QUEUE_LEVELS; level++)
- list_for_each(h, q->qs + level) {
- if (is_sentinel(q, h))
- continue;
-
- e = container_of(h, struct entry, list);
- r = fn(context, infer_cblock(&mq->cache_pool, e),
- e->oblock, e->hit_count);
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
- void *context)
-{
- struct mq_policy *mq = to_mq_policy(p);
- int r = 0;
-
- mutex_lock(&mq->lock);
-
- r = mq_save_hints(mq, &mq->cache_clean, fn, context);
- if (!r)
- r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
-
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
-{
- struct entry *e;
-
- e = hash_lookup(mq, oblock);
- BUG_ON(!e || !in_cache(mq, e));
-
- del(mq, e);
- free_entry(&mq->cache_pool, e);
-}
-
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __remove_mapping(mq, oblock);
- mutex_unlock(&mq->lock);
-}
-
-static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
-{
- struct entry *e = epool_find(&mq->cache_pool, cblock);
-
- if (!e)
- return -ENODATA;
-
- del(mq, e);
- free_entry(&mq->cache_pool, e);
-
- return 0;
-}
-
-static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = __remove_cblock(mq, cblock);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-#define CLEAN_TARGET_PERCENTAGE 25
-
-static bool clean_target_met(struct mq_policy *mq)
-{
- /*
- * Cache entries may not be populated. So we're cannot rely on the
- * size of the clean queue.
- */
- unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
- unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
-
- return nr_clean >= target;
-}
-
-static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
- dm_cblock_t *cblock)
-{
- struct entry *e = pop_old(mq, &mq->cache_dirty);
-
- if (!e && !clean_target_met(mq))
- e = pop(mq, &mq->cache_dirty);
-
- if (!e)
- return -ENODATA;
-
- *oblock = e->oblock;
- *cblock = infer_cblock(&mq->cache_pool, e);
- e->dirty = false;
- push(mq, e);
-
- return 0;
-}
-
-static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
- dm_cblock_t *cblock, bool critical_only)
-{
- int r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = __mq_writeback_work(mq, oblock, cblock);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void __force_mapping(struct mq_policy *mq,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
-{
- struct entry *e = hash_lookup(mq, current_oblock);
-
- if (e && in_cache(mq, e)) {
- del(mq, e);
- e->oblock = new_oblock;
- e->dirty = true;
- push(mq, e);
- }
-}
-
-static void mq_force_mapping(struct dm_cache_policy *p,
- dm_oblock_t current_oblock, dm_oblock_t new_oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- __force_mapping(mq, current_oblock, new_oblock);
- mutex_unlock(&mq->lock);
-}
-
-static dm_cblock_t mq_residency(struct dm_cache_policy *p)
-{
- dm_cblock_t r;
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- r = to_cblock(mq->cache_pool.nr_allocated);
- mutex_unlock(&mq->lock);
-
- return r;
-}
-
-static void mq_tick(struct dm_cache_policy *p, bool can_block)
-{
- struct mq_policy *mq = to_mq_policy(p);
- unsigned long flags;
-
- spin_lock_irqsave(&mq->tick_lock, flags);
- mq->tick_protected++;
- spin_unlock_irqrestore(&mq->tick_lock, flags);
-
- if (can_block) {
- mutex_lock(&mq->lock);
- copy_tick(mq);
- mutex_unlock(&mq->lock);
- }
-}
-
-static int mq_set_config_value(struct dm_cache_policy *p,
- const char *key, const char *value)
-{
- struct mq_policy *mq = to_mq_policy(p);
- unsigned long tmp;
-
- if (kstrtoul(value, 10, &tmp))
- return -EINVAL;
-
- if (!strcasecmp(key, "random_threshold")) {
- mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
-
- } else if (!strcasecmp(key, "sequential_threshold")) {
- mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
-
- } else if (!strcasecmp(key, "discard_promote_adjustment"))
- mq->discard_promote_adjustment = tmp;
-
- else if (!strcasecmp(key, "read_promote_adjustment"))
- mq->read_promote_adjustment = tmp;
-
- else if (!strcasecmp(key, "write_promote_adjustment"))
- mq->write_promote_adjustment = tmp;
-
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
-{
- ssize_t sz = *sz_ptr;
- struct mq_policy *mq = to_mq_policy(p);
-
- DMEMIT("10 random_threshold %u "
- "sequential_threshold %u "
- "discard_promote_adjustment %u "
- "read_promote_adjustment %u "
- "write_promote_adjustment %u ",
- mq->tracker.thresholds[PATTERN_RANDOM],
- mq->tracker.thresholds[PATTERN_SEQUENTIAL],
- mq->discard_promote_adjustment,
- mq->read_promote_adjustment,
- mq->write_promote_adjustment);
-
- *sz_ptr = sz;
- return 0;
-}
-
-/* Init the policy plugin interface function pointers. */
-static void init_policy_functions(struct mq_policy *mq)
-{
- mq->policy.destroy = mq_destroy;
- mq->policy.map = mq_map;
- mq->policy.lookup = mq_lookup;
- mq->policy.set_dirty = mq_set_dirty;
- mq->policy.clear_dirty = mq_clear_dirty;
- mq->policy.load_mapping = mq_load_mapping;
- mq->policy.walk_mappings = mq_walk_mappings;
- mq->policy.remove_mapping = mq_remove_mapping;
- mq->policy.remove_cblock = mq_remove_cblock;
- mq->policy.writeback_work = mq_writeback_work;
- mq->policy.force_mapping = mq_force_mapping;
- mq->policy.residency = mq_residency;
- mq->policy.tick = mq_tick;
- mq->policy.emit_config_values = mq_emit_config_values;
- mq->policy.set_config_value = mq_set_config_value;
-}
-
-static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size)
-{
- struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
-
- if (!mq)
- return NULL;
-
- init_policy_functions(mq);
- iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
- mq->cache_size = cache_size;
-
- if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
- DMERR("couldn't initialize pool of pre-cache entries");
- goto bad_pre_cache_init;
- }
-
- if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
- DMERR("couldn't initialize pool of cache entries");
- goto bad_cache_init;
- }
-
- mq->tick_protected = 0;
- mq->tick = 0;
- mq->hit_count = 0;
- mq->generation = 0;
- mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
- mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
- mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
- mutex_init(&mq->lock);
- spin_lock_init(&mq->tick_lock);
-
- queue_init(&mq->pre_cache);
- queue_init(&mq->cache_clean);
- queue_init(&mq->cache_dirty);
-
- mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
-
- mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
- mq->hash_bits = __ffs(mq->nr_buckets);
- mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
- if (!mq->table)
- goto bad_alloc_table;
-
- return &mq->policy;
-
-bad_alloc_table:
- epool_exit(&mq->cache_pool);
-bad_cache_init:
- epool_exit(&mq->pre_cache_pool);
-bad_pre_cache_init:
- kfree(mq);
-
- return NULL;
-}
-
-/*----------------------------------------------------------------*/
-
-static struct dm_cache_policy_type mq_policy_type = {
- .name = "mq",
- .version = {1, 4, 0},
- .hint_size = 4,
- .owner = THIS_MODULE,
- .create = mq_create
-};
-
-static int __init mq_init(void)
-{
- int r;
-
- mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
- sizeof(struct entry),
- __alignof__(struct entry),
- 0, NULL);
- if (!mq_entry_cache)
- return -ENOMEM;
-
- r = dm_cache_policy_register(&mq_policy_type);
- if (r) {
- DMERR("register failed %d", r);
- kmem_cache_destroy(mq_entry_cache);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void __exit mq_exit(void)
-{
- dm_cache_policy_unregister(&mq_policy_type);
-
- kmem_cache_destroy(mq_entry_cache);
-}
-
-module_init(mq_init);
-module_exit(mq_exit);
-
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("mq cache policy");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 28d4586748d0..cf48a617a3a4 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1567,8 +1567,48 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
spin_unlock_irqrestore(&mq->lock, flags);
}
+/*
+ * smq has no config values, but the old mq policy did. To avoid breaking
+ * software we continue to accept these configurables for the mq policy,
+ * but they have no effect.
+ */
+static int mq_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ unsigned long tmp;
+
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ if (!strcasecmp(key, "random_threshold") ||
+ !strcasecmp(key, "sequential_threshold") ||
+ !strcasecmp(key, "discard_promote_adjustment") ||
+ !strcasecmp(key, "read_promote_adjustment") ||
+ !strcasecmp(key, "write_promote_adjustment")) {
+ DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
+ unsigned maxlen, ssize_t *sz_ptr)
+{
+ ssize_t sz = *sz_ptr;
+
+ DMEMIT("10 random_threshold 0 "
+ "sequential_threshold 0 "
+ "discard_promote_adjustment 0 "
+ "read_promote_adjustment 0 "
+ "write_promote_adjustment 0 ");
+
+ *sz_ptr = sz;
+ return 0;
+}
+
/* Init the policy plugin interface function pointers. */
-static void init_policy_functions(struct smq_policy *mq)
+static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
{
mq->policy.destroy = smq_destroy;
mq->policy.map = smq_map;
@@ -1583,6 +1623,11 @@ static void init_policy_functions(struct smq_policy *mq)
mq->policy.force_mapping = smq_force_mapping;
mq->policy.residency = smq_residency;
mq->policy.tick = smq_tick;
+
+ if (mimic_mq) {
+ mq->policy.set_config_value = mq_set_config_value;
+ mq->policy.emit_config_values = mq_emit_config_values;
+ }
}
static bool too_many_hotspot_blocks(sector_t origin_size,
@@ -1606,9 +1651,10 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}
-static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
- sector_t origin_size,
- sector_t cache_block_size)
+static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size,
+ bool mimic_mq)
{
unsigned i;
unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1618,7 +1664,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (!mq)
return NULL;
- init_policy_functions(mq);
+ init_policy_functions(mq, mimic_mq);
mq->cache_size = cache_size;
mq->cache_block_size = cache_block_size;
@@ -1706,19 +1752,41 @@ bad_pool_init:
return NULL;
}
+static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ return __smq_create(cache_size, origin_size, cache_block_size, false);
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ return __smq_create(cache_size, origin_size, cache_block_size, true);
+}
+
/*----------------------------------------------------------------*/
static struct dm_cache_policy_type smq_policy_type = {
.name = "smq",
- .version = {1, 0, 0},
+ .version = {1, 5, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = smq_create
};
+static struct dm_cache_policy_type mq_policy_type = {
+ .name = "mq",
+ .version = {1, 5, 0},
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create,
+};
+
static struct dm_cache_policy_type default_policy_type = {
.name = "default",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.hint_size = 4,
.owner = THIS_MODULE,
.create = smq_create,
@@ -1735,9 +1803,17 @@ static int __init smq_init(void)
return -ENOMEM;
}
+ r = dm_cache_policy_register(&mq_policy_type);
+ if (r) {
+ DMERR("register failed (as mq) %d", r);
+ dm_cache_policy_unregister(&smq_policy_type);
+ return -ENOMEM;
+ }
+
r = dm_cache_policy_register(&default_policy_type);
if (r) {
DMERR("register failed (as default) %d", r);
+ dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&smq_policy_type);
return -ENOMEM;
}
@@ -1748,6 +1824,7 @@ static int __init smq_init(void)
static void __exit smq_exit(void)
{
dm_cache_policy_unregister(&smq_policy_type);
+ dm_cache_policy_unregister(&mq_policy_type);
dm_cache_policy_unregister(&default_policy_type);
}
@@ -1759,3 +1836,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
MODULE_ALIAS("dm-cache-default");
+MODULE_ALIAS("dm-cache-mq");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5780accffa30..ee0510f9a85e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -984,9 +984,14 @@ static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mod
static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
{
- bool needs_check = dm_cache_metadata_needs_check(cache->cmd);
+ bool needs_check;
enum cache_metadata_mode old_mode = get_cache_mode(cache);
+ if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
+ DMERR("unable to read needs_check flag, setting failure mode");
+ new_mode = CM_FAIL;
+ }
+
if (new_mode == CM_WRITE && needs_check) {
DMERR("%s: unable to switch cache to write mode until repaired.",
cache_device_name(cache));
@@ -2771,7 +2776,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->split_discard_bios = false;
cache->features = ca->features;
- ti->per_bio_data_size = get_per_bio_data_size(cache);
+ ti->per_io_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -3510,6 +3515,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct cache *cache = ti->private;
dm_cblock_t residency;
+ bool needs_check;
switch (type) {
case STATUSTYPE_INFO:
@@ -3583,7 +3589,9 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("rw ");
- if (dm_cache_metadata_needs_check(cache->cmd))
+ r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
+
+ if (r || needs_check)
DMEMIT("needs_check ");
else
DMEMIT("- ");
@@ -3806,7 +3814,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {1, 8, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3147c8d09ea8..4f3cb3554944 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -28,6 +28,7 @@
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
+#include <crypto/skcipher.h>
#include <linux/device-mapper.h>
@@ -44,7 +45,7 @@ struct convert_context {
struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
- struct ablkcipher_request *req;
+ struct skcipher_request *req;
};
/*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
};
struct iv_essiv_private {
- struct crypto_hash *hash_tfm;
+ struct crypto_ahash *hash_tfm;
u8 *salt;
};
@@ -153,13 +154,13 @@ struct crypt_config {
/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
- struct crypto_ablkcipher **tfms;
+ struct crypto_skcipher **tfms;
unsigned tfms_count;
/*
* Layout of each crypto request:
*
- * struct ablkcipher_request
+ * struct skcipher_request
* context
* padding
* struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
/*
* Use this to access cipher attributes that are the same for each CPU.
*/
-static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
{
return cc->tfms[0];
}
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
struct scatterlist sg;
struct crypto_cipher *essiv_tfm;
int err;
sg_init_one(&sg, cc->key, cc->key_size);
- desc.tfm = essiv->hash_tfm;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ ahash_request_set_tfm(req, essiv->hash_tfm);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
- err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+ err = crypto_ahash_digest(req);
+ ahash_request_zero(req);
if (err)
return err;
essiv_tfm = cc->iv_private;
err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
- crypto_hash_digestsize(essiv->hash_tfm));
+ crypto_ahash_digestsize(essiv->hash_tfm));
if (err)
return err;
@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+ unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
struct crypto_cipher *essiv_tfm;
int r, err = 0;
@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
}
if (crypto_cipher_blocksize(essiv_tfm) !=
- crypto_ablkcipher_ivsize(any_tfm(cc))) {
+ crypto_skcipher_ivsize(any_tfm(cc))) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
crypto_free_cipher(essiv_tfm);
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
struct crypto_cipher *essiv_tfm;
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
- crypto_free_hash(essiv->hash_tfm);
+ crypto_free_ahash(essiv->hash_tfm);
essiv->hash_tfm = NULL;
kzfree(essiv->salt);
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct crypto_cipher *essiv_tfm = NULL;
- struct crypto_hash *hash_tfm = NULL;
+ struct crypto_ahash *hash_tfm = NULL;
u8 *salt = NULL;
int err;
@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
/* Allocate hash algorithm */
- hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+ hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
err = PTR_ERR(hash_tfm);
goto bad;
}
- salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+ salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
if (!salt) {
ti->error = "Error kmallocing salt storage in ESSIV";
err = -ENOMEM;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
essiv_tfm = setup_essiv_cpu(cc, ti, salt,
- crypto_hash_digestsize(hash_tfm));
+ crypto_ahash_digestsize(hash_tfm));
if (IS_ERR(essiv_tfm)) {
crypt_iv_essiv_dtr(cc);
return PTR_ERR(essiv_tfm);
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
bad:
if (hash_tfm && !IS_ERR(hash_tfm))
- crypto_free_hash(hash_tfm);
+ crypto_free_ahash(hash_tfm);
kfree(salt);
return err;
}
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
+ unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
}
static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
}
-static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
- return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+ return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
}
static u8 *iv_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
return (u8 *)ALIGN((unsigned long)(dmreq + 1),
- crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+ crypto_skcipher_alignmask(any_tfm(cc)) + 1);
}
static int crypt_convert_block(struct crypt_config *cc,
struct convert_context *ctx,
- struct ablkcipher_request *req)
+ struct skcipher_request *req)
{
struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
return r;
}
- ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
- 1 << SECTOR_SHIFT, iv);
+ skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
+ 1 << SECTOR_SHIFT, iv);
if (bio_data_dir(ctx->bio_in) == WRITE)
- r = crypto_ablkcipher_encrypt(req);
+ r = crypto_skcipher_encrypt(req);
else
- r = crypto_ablkcipher_decrypt(req);
+ r = crypto_skcipher_decrypt(req);
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, iv, dmreq);
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
if (!ctx->req)
ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
- ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+ skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
/*
* Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
* requests if driver request queue is full.
*/
- ablkcipher_request_set_callback(ctx->req,
+ skcipher_request_set_callback(ctx->req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, ctx->req));
}
static void crypt_free_req(struct crypt_config *cc,
- struct ablkcipher_request *req, struct bio *base_bio)
+ struct skcipher_request *req, struct bio *base_bio)
{
struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
- if ((struct ablkcipher_request *)(io + 1) != req)
+ if ((struct skcipher_request *)(io + 1) != req)
mempool_free(req, cc->req_pool);
}
@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
for (i = 0; i < cc->tfms_count; i++)
if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
- crypto_free_ablkcipher(cc->tfms[i]);
+ crypto_free_skcipher(cc->tfms[i]);
cc->tfms[i] = NULL;
}
@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
unsigned i;
int err;
- cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+ cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
GFP_KERNEL);
if (!cc->tfms)
return -ENOMEM;
for (i = 0; i < cc->tfms_count; i++) {
- cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+ cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
if (IS_ERR(cc->tfms[i])) {
err = PTR_ERR(cc->tfms[i]);
crypt_free_tfms(cc);
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
for (i = 0; i < cc->tfms_count; i++) {
- r = crypto_ablkcipher_setkey(cc->tfms[i],
- cc->key + (i * subkey_size),
- subkey_size);
+ r = crypto_skcipher_setkey(cc->tfms[i],
+ cc->key + (i * subkey_size),
+ subkey_size);
if (r)
err = r;
}
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
}
/* Initialize IV */
- cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
+ cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(cc->iv_size,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (ret < 0)
goto bad;
- cc->dmreq_start = sizeof(struct ablkcipher_request);
- cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+ cc->dmreq_start = sizeof(struct skcipher_request);
+ cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
- if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+ if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
/* Allocate the padding exactly */
iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
- & crypto_ablkcipher_alignmask(any_tfm(cc));
+ & crypto_skcipher_alignmask(any_tfm(cc));
} else {
/*
* If the cipher requires greater alignment than kmalloc
* alignment, we don't know the exact position of the
* initialization vector. We must assume worst case.
*/
- iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+ iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
}
ret = -ENOMEM;
@@ -1788,7 +1791,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->per_bio_data_size = ti->per_bio_data_size =
+ cc->per_bio_data_size = ti->per_io_data_size =
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
ARCH_KMALLOC_MINALIGN);
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
- io->ctx.req = (struct ablkcipher_request *)(io + 1);
+ io->ctx.req = (struct skcipher_request *)(io + 1);
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index b4c356a21123..cc70871a6d29 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -204,7 +204,7 @@ out:
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct dm_delay_info);
+ ti->per_io_data_size = sizeof(struct dm_delay_info);
ti->private = dc;
return 0;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 09e2afcafd2d..b7341de87015 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -220,7 +220,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->per_io_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 80a439543259..2adf81d81fca 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1291,7 +1291,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
immutable_target_type = dm_get_immutable_target_type(md);
if (immutable_target_type &&
- (immutable_target_type != dm_table_get_immutable_target_type(t))) {
+ (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
+ !dm_table_get_wildcard_target(t)) {
DMWARN("can't replace immutable target type %s",
immutable_target_type->name);
r = -EINVAL;
@@ -1303,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
dm_set_md_type(md, dm_table_get_type(t));
/* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
+ r = dm_setup_md_queue(md, t);
if (r) {
DMWARN("unable to set up device queue for new table.");
goto err_unlock_md_type;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 624589d51c2c..608302e222af 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -475,7 +475,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->flush_supported = true;
ti->num_discard_bios = 1;
ti->discards_supported = true;
- ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->per_io_data_size = sizeof(struct per_bio_data);
ti->private = lc;
return 0;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index cfa29f574c2a..677ba223e2ae 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
+#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
@@ -33,11 +34,12 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
- unsigned is_active; /* Path status */
unsigned fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
+
+ bool is_active:1; /* Path status */
};
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -53,10 +55,10 @@ struct priority_group {
struct path_selector ps;
unsigned pg_num; /* Reference number */
- unsigned bypassed; /* Temporarily bypass this PG? */
-
unsigned nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
+
+ bool bypassed:1; /* Temporarily bypass this PG? */
};
/* Multipath context */
@@ -74,21 +76,20 @@ struct multipath {
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
- unsigned pg_init_delay_retry; /* Delay pg_init retry? */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned repeat_count; /* I/Os left before calling PS again */
- unsigned queue_io:1; /* Must we queue all I/O? */
- unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
- unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
- unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
- unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
+ bool queue_io:1; /* Must we queue all I/O? */
+ bool queue_if_no_path:1; /* Queue I/O if last path fails? */
+ bool saved_queue_if_no_path:1; /* Saved state during suspension */
+ bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
+ bool pg_init_disabled:1; /* pg_init is not currently allowed */
+ bool pg_init_required:1; /* pg_init needs calling? */
+ bool pg_init_delay_retry:1; /* Delay pg_init retry? */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -120,7 +121,6 @@ static struct kmem_cache *_mpio_cache;
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
-static int __pgpath_busy(struct pgpath *pgpath);
/*-----------------------------------------------
@@ -132,7 +132,7 @@ static struct pgpath *alloc_pgpath(void)
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
if (pgpath) {
- pgpath->is_active = 1;
+ pgpath->is_active = true;
INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
}
@@ -181,25 +181,31 @@ static void free_priority_group(struct priority_group *pg,
kfree(pg);
}
-static struct multipath *alloc_multipath(struct dm_target *ti)
+static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
{
struct multipath *m;
- unsigned min_ios = dm_get_reserved_rq_based_ios();
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- m->queue_io = 1;
+ m->queue_io = true;
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
- m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
- if (!m->mpio_pool) {
- kfree(m);
- return NULL;
+
+ m->mpio_pool = NULL;
+ if (!use_blk_mq) {
+ unsigned min_ios = dm_get_reserved_rq_based_ios();
+
+ m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
+ if (!m->mpio_pool) {
+ kfree(m);
+ return NULL;
+ }
}
+
m->ti = ti;
ti->private = m;
}
@@ -222,26 +228,41 @@ static void free_multipath(struct multipath *m)
kfree(m);
}
-static int set_mapinfo(struct multipath *m, union map_info *info)
+static struct dm_mpath_io *get_mpio(union map_info *info)
+{
+ return info->ptr;
+}
+
+static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
{
struct dm_mpath_io *mpio;
+ if (!m->mpio_pool) {
+ /* Use blk-mq pdu memory requested via per_io_data_size */
+ mpio = get_mpio(info);
+ memset(mpio, 0, sizeof(*mpio));
+ return mpio;
+ }
+
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
if (!mpio)
- return -ENOMEM;
+ return NULL;
memset(mpio, 0, sizeof(*mpio));
info->ptr = mpio;
- return 0;
+ return mpio;
}
-static void clear_mapinfo(struct multipath *m, union map_info *info)
+static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
{
- struct dm_mpath_io *mpio = info->ptr;
+ /* Only needed for non blk-mq (.request_fn) multipath */
+ if (m->mpio_pool) {
+ struct dm_mpath_io *mpio = info->ptr;
- info->ptr = NULL;
- mempool_free(mpio, m->mpio_pool);
+ info->ptr = NULL;
+ mempool_free(mpio, m->mpio_pool);
+ }
}
/*-----------------------------------------------
@@ -257,7 +278,7 @@ static int __pg_init_all_paths(struct multipath *m)
return 0;
m->pg_init_count++;
- m->pg_init_required = 0;
+ m->pg_init_required = false;
/* Check here to reset pg_init_required */
if (!m->current_pg)
@@ -283,11 +304,11 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
- m->pg_init_required = 1;
- m->queue_io = 1;
+ m->pg_init_required = true;
+ m->queue_io = true;
} else {
- m->pg_init_required = 0;
- m->queue_io = 0;
+ m->pg_init_required = false;
+ m->queue_io = false;
}
m->pg_init_count = 0;
@@ -298,7 +319,7 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
{
struct dm_path *path;
- path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
+ path = pg->ps.type->select_path(&pg->ps, nr_bytes);
if (!path)
return -ENXIO;
@@ -313,10 +334,10 @@ static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
{
struct priority_group *pg;
- unsigned bypassed = 1;
+ bool bypassed = true;
if (!m->nr_valid_paths) {
- m->queue_io = 0;
+ m->queue_io = false;
goto failed;
}
@@ -344,7 +365,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
continue;
if (!__choose_path_in_pg(m, pg, nr_bytes)) {
if (!bypassed)
- m->pg_init_delay_retry = 1;
+ m->pg_init_delay_retry = true;
return;
}
}
@@ -380,7 +401,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context,
struct request *rq, struct request **__clone)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
struct pgpath *pgpath;
@@ -390,8 +411,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_lock_irq(&m->lock);
/* Do we need to select a new pgpath? */
- if (!m->current_pgpath ||
- (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
+ if (!m->current_pgpath || !m->queue_io)
__choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath;
@@ -405,11 +425,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
goto out_unlock;
}
- if (set_mapinfo(m, map_context) < 0)
+ mpio = set_mpio(m, map_context);
+ if (!mpio)
/* ENOMEM, requeue */
goto out_unlock;
- mpio = map_context->ptr;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -418,17 +438,24 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_unlock_irq(&m->lock);
if (clone) {
- /* Old request-based interface: allocated clone is passed in */
+ /*
+ * Old request-based interface: allocated clone is passed in.
+ * Used by: .request_fn stacked on .request_fn path(s).
+ */
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
} else {
- /* blk-mq request-based interface */
- *__clone = blk_get_request(bdev_get_queue(bdev),
- rq_data_dir(rq), GFP_ATOMIC);
+ /*
+ * blk-mq request-based interface; used by both:
+ * .request_fn stacked on blk-mq path(s) and
+ * blk-mq stacked on blk-mq path(s).
+ */
+ *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+ rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
if (IS_ERR(*__clone)) {
/* ENOMEM, requeue */
- clear_mapinfo(m, map_context);
+ clear_request_fn_mpio(m, map_context);
return r;
}
(*__clone)->bio = (*__clone)->biotail = NULL;
@@ -463,14 +490,14 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
static void multipath_release_clone(struct request *clone)
{
- blk_put_request(clone);
+ blk_mq_free_request(clone);
}
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
-static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
- unsigned save_old_value)
+static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
+ bool save_old_value)
{
unsigned long flags;
@@ -776,12 +803,12 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
argc--;
if (!strcasecmp(arg_name, "queue_if_no_path")) {
- r = queue_if_no_path(m, 1, 0);
+ r = queue_if_no_path(m, true, false);
continue;
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
- m->retain_attached_hw_handler = 1;
+ m->retain_attached_hw_handler = true;
continue;
}
@@ -820,11 +847,12 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
struct dm_arg_set as;
unsigned pg_count = 0;
unsigned next_pg_num;
+ bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
as.argc = argc;
as.argv = argv;
- m = alloc_multipath(ti);
+ m = alloc_multipath(ti, use_blk_mq);
if (!m) {
ti->error = "can't allocate multipath";
return -EINVAL;
@@ -880,6 +908,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
+ if (use_blk_mq)
+ ti->per_io_data_size = sizeof(struct dm_mpath_io);
return 0;
@@ -917,7 +947,7 @@ static void flush_multipath_work(struct multipath *m)
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = 1;
+ m->pg_init_disabled = true;
spin_unlock_irqrestore(&m->lock, flags);
flush_workqueue(kmpath_handlerd);
@@ -926,7 +956,7 @@ static void flush_multipath_work(struct multipath *m)
flush_work(&m->trigger_event);
spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = 0;
+ m->pg_init_disabled = false;
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -954,7 +984,7 @@ static int fail_path(struct pgpath *pgpath)
DMWARN("Failing path %s.", pgpath->path.dev->name);
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
- pgpath->is_active = 0;
+ pgpath->is_active = false;
pgpath->fail_count++;
m->nr_valid_paths--;
@@ -987,18 +1017,13 @@ static int reinstate_path(struct pgpath *pgpath)
if (pgpath->is_active)
goto out;
- if (!pgpath->pg->ps.type->reinstate_path) {
- DMWARN("Reinstate path not supported by path selector %s",
- pgpath->pg->ps.type->name);
- r = -EINVAL;
- goto out;
- }
+ DMWARN("Reinstating path %s.", pgpath->path.dev->name);
r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
if (r)
goto out;
- pgpath->is_active = 1;
+ pgpath->is_active = true;
if (!m->nr_valid_paths++) {
m->current_pgpath = NULL;
@@ -1045,7 +1070,7 @@ static int action_dev(struct multipath *m, struct dm_dev *dev,
* Temporarily try to avoid having to use the specified PG
*/
static void bypass_pg(struct multipath *m, struct priority_group *pg,
- int bypassed)
+ bool bypassed)
{
unsigned long flags;
@@ -1078,7 +1103,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
spin_lock_irqsave(&m->lock, flags);
list_for_each_entry(pg, &m->priority_groups, list) {
- pg->bypassed = 0;
+ pg->bypassed = false;
if (--pgnum)
continue;
@@ -1096,7 +1121,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
* Set/clear bypassed status of a PG.
* PGs are numbered upwards from 1 in the order they were declared.
*/
-static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
+static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{
struct priority_group *pg;
unsigned pgnum;
@@ -1120,17 +1145,17 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
/*
* Should we retry pg_init immediately?
*/
-static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
+static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
{
unsigned long flags;
- int limit_reached = 0;
+ bool limit_reached = false;
spin_lock_irqsave(&m->lock, flags);
if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
- m->pg_init_required = 1;
+ m->pg_init_required = true;
else
- limit_reached = 1;
+ limit_reached = true;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1143,7 +1168,7 @@ static void pg_init_done(void *data, int errors)
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
- unsigned delay_retry = 0;
+ bool delay_retry = false;
/* device or driver problems */
switch (errors) {
@@ -1166,7 +1191,7 @@ static void pg_init_done(void *data, int errors)
* Probably doing something like FW upgrade on the
* controller so try the other pg.
*/
- bypass_pg(m, pg, 1);
+ bypass_pg(m, pg, true);
break;
case SCSI_DH_RETRY:
/* Wait before retrying. */
@@ -1177,6 +1202,7 @@ static void pg_init_done(void *data, int errors)
fail_path(pgpath);
errors = 0;
break;
+ case SCSI_DH_DEV_OFFLINED:
default:
/*
* We probably do not want to fail the path for a device
@@ -1194,7 +1220,7 @@ static void pg_init_done(void *data, int errors)
m->current_pg = NULL;
}
} else if (!m->pg_init_required)
- pg->bypassed = 0;
+ pg->bypassed = false;
if (--m->pg_init_in_progress)
/* Activations of other paths are still on going */
@@ -1205,7 +1231,7 @@ static void pg_init_done(void *data, int errors)
if (__pg_init_all_paths(m))
goto out;
}
- m->queue_io = 0;
+ m->queue_io = false;
/*
* Wake up any thread waiting to suspend.
@@ -1291,21 +1317,21 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
struct multipath *m = ti->private;
- struct dm_mpath_io *mpio = map_context->ptr;
+ struct dm_mpath_io *mpio = get_mpio(map_context);
struct pgpath *pgpath;
struct path_selector *ps;
int r;
BUG_ON(!mpio);
- r = do_end_io(m, clone, error, mpio);
+ r = do_end_io(m, clone, error, mpio);
pgpath = mpio->pgpath;
if (pgpath) {
ps = &pgpath->pg->ps;
if (ps->type->end_io)
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
}
- clear_mapinfo(m, map_context);
+ clear_request_fn_mpio(m, map_context);
return r;
}
@@ -1318,9 +1344,9 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
*/
static void multipath_presuspend(struct dm_target *ti)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
- queue_if_no_path(m, 0, 1);
+ queue_if_no_path(m, false, true);
}
static void multipath_postsuspend(struct dm_target *ti)
@@ -1337,7 +1363,7 @@ static void multipath_postsuspend(struct dm_target *ti)
*/
static void multipath_resume(struct dm_target *ti)
{
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
@@ -1366,7 +1392,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
{
int sz = 0;
unsigned long flags;
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
@@ -1474,7 +1500,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
{
int r = -EINVAL;
struct dm_dev *dev;
- struct multipath *m = (struct multipath *) ti->private;
+ struct multipath *m = ti->private;
action_fn action;
mutex_lock(&m->work_mutex);
@@ -1486,10 +1512,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
if (argc == 1) {
if (!strcasecmp(argv[0], "queue_if_no_path")) {
- r = queue_if_no_path(m, 1, 0);
+ r = queue_if_no_path(m, true, false);
goto out;
} else if (!strcasecmp(argv[0], "fail_if_no_path")) {
- r = queue_if_no_path(m, 0, 0);
+ r = queue_if_no_path(m, false, false);
goto out;
}
}
@@ -1500,10 +1526,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
}
if (!strcasecmp(argv[0], "disable_group")) {
- r = bypass_pg_num(m, argv[1], 1);
+ r = bypass_pg_num(m, argv[1], true);
goto out;
} else if (!strcasecmp(argv[0], "enable_group")) {
- r = bypass_pg_num(m, argv[1], 0);
+ r = bypass_pg_num(m, argv[1], false);
goto out;
} else if (!strcasecmp(argv[0], "switch_group")) {
r = switch_pg_num(m, argv[1]);
@@ -1604,7 +1630,7 @@ out:
return ret;
}
-static int __pgpath_busy(struct pgpath *pgpath)
+static int pgpath_busy(struct pgpath *pgpath)
{
struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
@@ -1621,7 +1647,7 @@ static int __pgpath_busy(struct pgpath *pgpath)
*/
static int multipath_busy(struct dm_target *ti)
{
- int busy = 0, has_active = 0;
+ bool busy = false, has_active = false;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *pgpath;
@@ -1632,7 +1658,7 @@ static int multipath_busy(struct dm_target *ti)
/* pg_init in progress or no paths available */
if (m->pg_init_in_progress ||
(!m->nr_valid_paths && m->queue_if_no_path)) {
- busy = 1;
+ busy = true;
goto out;
}
/* Guess which priority_group will be used at next mapping time */
@@ -1654,13 +1680,12 @@ static int multipath_busy(struct dm_target *ti)
* If there is one non-busy active path at least, the path selector
* will be able to select it. So we consider such a pg as not busy.
*/
- busy = 1;
+ busy = true;
list_for_each_entry(pgpath, &pg->pgpaths, list)
if (pgpath->is_active) {
- has_active = 1;
-
- if (!__pgpath_busy(pgpath)) {
- busy = 0;
+ has_active = true;
+ if (!pgpath_busy(pgpath)) {
+ busy = false;
break;
}
}
@@ -1671,7 +1696,7 @@ static int multipath_busy(struct dm_target *ti)
* the current_pg will be changed at next mapping time.
* We need to try mapping to determine it.
*/
- busy = 0;
+ busy = false;
out:
spin_unlock_irqrestore(&m->lock, flags);
@@ -1684,7 +1709,8 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
+ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index e7d1fa8b0459..b6eb5365b1a4 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -50,13 +50,8 @@ struct path_selector_type {
/*
* Chooses a path for this io, if no paths are available then
* NULL will be returned.
- *
- * repeat_count is the number of times to use the path before
- * calling the function again. 0 means don't call it again unless
- * the path fails.
*/
struct dm_path *(*select_path) (struct path_selector *ps,
- unsigned *repeat_count,
size_t nr_bytes);
/*
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 3941fae0de9f..23f178641794 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -23,12 +23,13 @@
#include <linux/atomic.h>
#define DM_MSG_PREFIX "multipath queue-length"
-#define QL_MIN_IO 128
-#define QL_VERSION "0.1.0"
+#define QL_MIN_IO 1
+#define QL_VERSION "0.2.0"
struct selector {
struct list_head valid_paths;
struct list_head failed_paths;
+ spinlock_t lock;
};
struct path_info {
@@ -45,6 +46,7 @@ static struct selector *alloc_selector(void)
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
}
return s;
@@ -113,6 +115,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
struct path_info *pi;
unsigned repeat_count = QL_MIN_IO;
char dummy;
+ unsigned long flags;
/*
* Arguments: [<repeat_count>]
@@ -129,6 +132,11 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
+ if (repeat_count > 1) {
+ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+ repeat_count = 1;
+ }
+
/* Allocate the path information structure */
pi = kmalloc(sizeof(*pi), GFP_KERNEL);
if (!pi) {
@@ -142,7 +150,9 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -151,16 +161,22 @@ static void ql_fail_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->failed_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -168,14 +184,16 @@ static int ql_reinstate_path(struct path_selector *ps, struct dm_path *path)
/*
* Select a path having the minimum number of in-flight I/Os
*/
-static struct dm_path *ql_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
if (list_empty(&s->valid_paths))
- return NULL;
+ goto out;
/* Change preferred (first in list) path to evenly balance. */
list_move_tail(s->valid_paths.next, &s->valid_paths);
@@ -190,11 +208,12 @@ static struct dm_path *ql_select_path(struct path_selector *ps,
}
if (!best)
- return NULL;
-
- *repeat_count = best->repeat_count;
+ goto out;
- return best->path;
+ ret = best->path;
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
}
static int ql_start_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index f2a363a89629..b3ccf1e0d4f2 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1121,7 +1121,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
- ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
+ ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index 6ab1192cdd5f..4ace1da17db8 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#define DM_MSG_PREFIX "multipath round-robin"
+#define RR_MIN_IO 1000
+#define RR_VERSION "1.1.0"
/*-----------------------------------------------------------------
* Path-handling code, paths are held in lists
@@ -41,23 +43,48 @@ static void free_paths(struct list_head *paths)
* Round-robin selector
*---------------------------------------------------------------*/
-#define RR_MIN_IO 1000
-
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
+ spinlock_t lock;
+ struct dm_path * __percpu *current_path;
+ struct percpu_counter repeat_count;
};
+static void set_percpu_current_path(struct selector *s, struct dm_path *path)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(s->current_path, cpu) = path;
+}
+
static struct selector *alloc_selector(void)
{
struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- INIT_LIST_HEAD(&s->valid_paths);
- INIT_LIST_HEAD(&s->invalid_paths);
- }
+ if (!s)
+ return NULL;
+
+ INIT_LIST_HEAD(&s->valid_paths);
+ INIT_LIST_HEAD(&s->invalid_paths);
+ spin_lock_init(&s->lock);
+
+ s->current_path = alloc_percpu(struct dm_path *);
+ if (!s->current_path)
+ goto out_current_path;
+ set_percpu_current_path(s, NULL);
+
+ if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
+ goto out_repeat_count;
return s;
+
+out_repeat_count:
+ free_percpu(s->current_path);
+out_current_path:
+ kfree(s);
+ return NULL;;
}
static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
@@ -74,10 +101,12 @@ static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
static void rr_destroy(struct path_selector *ps)
{
- struct selector *s = (struct selector *) ps->context;
+ struct selector *s = ps->context;
free_paths(&s->valid_paths);
free_paths(&s->invalid_paths);
+ free_percpu(s->current_path);
+ percpu_counter_destroy(&s->repeat_count);
kfree(s);
ps->context = NULL;
}
@@ -111,10 +140,11 @@ static int rr_status(struct path_selector *ps, struct dm_path *path,
static int rr_add_path(struct path_selector *ps, struct dm_path *path,
int argc, char **argv, char **error)
{
- struct selector *s = (struct selector *) ps->context;
+ struct selector *s = ps->context;
struct path_info *pi;
unsigned repeat_count = RR_MIN_IO;
char dummy;
+ unsigned long flags;
if (argc > 1) {
*error = "round-robin ps: incorrect number of arguments";
@@ -139,42 +169,65 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
+ spin_lock_irqsave(&s->lock, flags);
+ if (p == *this_cpu_ptr(s->current_path))
+ set_percpu_current_path(s, NULL);
+
list_move(&pi->list, &s->invalid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int rr_reinstate_path(struct path_selector *ps, struct dm_path *p)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = p->pscontext;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
-static struct dm_path *rr_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
{
- struct selector *s = (struct selector *) ps->context;
+ unsigned long flags;
+ struct selector *s = ps->context;
struct path_info *pi = NULL;
+ struct dm_path *current_path = NULL;
+
+ current_path = *this_cpu_ptr(s->current_path);
+ if (current_path) {
+ percpu_counter_dec(&s->repeat_count);
+ if (percpu_counter_read_positive(&s->repeat_count) > 0)
+ return current_path;
+ }
+ spin_lock_irqsave(&s->lock, flags);
if (!list_empty(&s->valid_paths)) {
pi = list_entry(s->valid_paths.next, struct path_info, list);
list_move_tail(&pi->list, &s->valid_paths);
- *repeat_count = pi->repeat_count;
+ percpu_counter_set(&s->repeat_count, pi->repeat_count);
+ set_percpu_current_path(s, pi->path);
+ current_path = pi->path;
}
+ spin_unlock_irqrestore(&s->lock, flags);
- return pi ? pi->path : NULL;
+ return current_path;
}
static struct path_selector_type rr_ps = {
@@ -198,7 +251,7 @@ static int __init dm_rr_init(void)
if (r < 0)
DMERR("register failed %d", r);
- DMINFO("version 1.0.0 loaded");
+ DMINFO("version " RR_VERSION " loaded");
return r;
}
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 9df8f6bd6418..7b8642045c55 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -19,11 +19,12 @@
#define ST_MAX_RELATIVE_THROUGHPUT 100
#define ST_MAX_RELATIVE_THROUGHPUT_SHIFT 7
#define ST_MAX_INFLIGHT_SIZE ((size_t)-1 >> ST_MAX_RELATIVE_THROUGHPUT_SHIFT)
-#define ST_VERSION "0.2.0"
+#define ST_VERSION "0.3.0"
struct selector {
struct list_head valid_paths;
struct list_head failed_paths;
+ spinlock_t lock;
};
struct path_info {
@@ -41,6 +42,7 @@ static struct selector *alloc_selector(void)
if (s) {
INIT_LIST_HEAD(&s->valid_paths);
INIT_LIST_HEAD(&s->failed_paths);
+ spin_lock_init(&s->lock);
}
return s;
@@ -111,6 +113,7 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
unsigned repeat_count = ST_MIN_IO;
unsigned relative_throughput = 1;
char dummy;
+ unsigned long flags;
/*
* Arguments: [<repeat_count> [<relative_throughput>]]
@@ -134,6 +137,11 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
return -EINVAL;
}
+ if (repeat_count > 1) {
+ DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
+ repeat_count = 1;
+ }
+
if ((argc == 2) &&
(sscanf(argv[1], "%u%c", &relative_throughput, &dummy) != 1 ||
relative_throughput > ST_MAX_RELATIVE_THROUGHPUT)) {
@@ -155,7 +163,9 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
path->pscontext = pi;
+ spin_lock_irqsave(&s->lock, flags);
list_add_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -164,16 +174,22 @@ static void st_fail_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move(&pi->list, &s->failed_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
}
static int st_reinstate_path(struct path_selector *ps, struct dm_path *path)
{
struct selector *s = ps->context;
struct path_info *pi = path->pscontext;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
list_move_tail(&pi->list, &s->valid_paths);
+ spin_unlock_irqrestore(&s->lock, flags);
return 0;
}
@@ -255,14 +271,16 @@ static int st_compare_load(struct path_info *pi1, struct path_info *pi2,
return pi2->relative_throughput - pi1->relative_throughput;
}
-static struct dm_path *st_select_path(struct path_selector *ps,
- unsigned *repeat_count, size_t nr_bytes)
+static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
+ struct dm_path *ret = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&s->lock, flags);
if (list_empty(&s->valid_paths))
- return NULL;
+ goto out;
/* Change preferred (first in list) path to evenly balance. */
list_move_tail(s->valid_paths.next, &s->valid_paths);
@@ -272,11 +290,12 @@ static struct dm_path *st_select_path(struct path_selector *ps,
best = pi;
if (!best)
- return NULL;
-
- *repeat_count = best->repeat_count;
+ goto out;
- return best->path;
+ ret = best->path;
+out:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return ret;
}
static int st_start_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 3766386080a4..70bb0e8b62ce 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1105,6 +1105,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
+ dev_t origin_dev, cow_dev;
unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
@@ -1135,11 +1136,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Cannot get origin device";
goto bad_origin;
}
+ origin_dev = s->origin->bdev->bd_dev;
cow_path = argv[0];
argv++;
argc--;
+ cow_dev = dm_get_dev_t(cow_path);
+ if (cow_dev && cow_dev == origin_dev) {
+ ti->error = "COW device cannot be the same as origin device";
+ r = -EINVAL;
+ goto bad_cow;
+ }
+
r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
if (r) {
ti->error = "Cannot get COW device";
@@ -1201,7 +1210,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = s;
ti->num_flush_bios = num_flush_bios;
- ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
+ ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 061152a43730..f9e8f0bef332 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
}
/*
+ * Convert the path to a device
+ */
+dev_t dm_get_dev_t(const char *path)
+{
+ dev_t uninitialized_var(dev);
+ struct block_device *bdev;
+
+ bdev = lookup_bdev(path);
+ if (IS_ERR(bdev))
+ dev = name_to_dev_t(path);
+ else {
+ dev = bdev->bd_dev;
+ bdput(bdev);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(dm_get_dev_t);
+
+/*
* Add a device to the list, or just increment the usage count if
* it's already present.
*/
@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
struct dm_dev **result)
{
int r;
- dev_t uninitialized_var(dev);
+ dev_t dev;
struct dm_dev_internal *dd;
struct dm_table *t = ti->table;
- struct block_device *bdev;
BUG_ON(!t);
- /* convert the path to a device */
- bdev = lookup_bdev(path);
- if (IS_ERR(bdev)) {
- dev = name_to_dev_t(path);
- if (!dev)
- return -ENODEV;
- } else {
- dev = bdev->bd_dev;
- bdput(bdev);
- }
+ dev = dm_get_dev_t(path);
+ if (!dev)
+ return -ENODEV;
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -920,6 +932,30 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
return t->immutable_target_type;
}
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
+{
+ /* Immutable target is implicitly a singleton */
+ if (t->num_targets > 1 ||
+ !dm_target_is_immutable(t->targets[0].type))
+ return NULL;
+
+ return t->targets;
+}
+
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
+{
+ struct dm_target *uninitialized_var(ti);
+ unsigned i = 0;
+
+ while (i < dm_table_get_num_targets(t)) {
+ ti = dm_table_get_target(t, i++);
+ if (dm_target_is_wildcard(ti->type))
+ return ti;
+ }
+
+ return NULL;
+}
+
bool dm_table_request_based(struct dm_table *t)
{
return __table_type_request_based(dm_table_get_type(t));
@@ -933,7 +969,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
unsigned type = dm_table_get_type(t);
- unsigned per_bio_data_size = 0;
+ unsigned per_io_data_size = 0;
struct dm_target *tgt;
unsigned i;
@@ -945,10 +981,10 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (type == DM_TYPE_BIO_BASED)
for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i;
- per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+ per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
if (!t->mempools)
return -ENOMEM;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 925ec1b15e75..a317dd884ba6 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -150,7 +150,8 @@ static void io_err_release_clone_rq(struct request *clone)
static struct target_type error_target = {
.name = "error",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
+ .features = DM_TARGET_WILDCARD,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index f962d6453afd..43824d73366d 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -344,7 +344,7 @@ static void subtree_dec(void *context, const void *value)
memcpy(&root_le, value, sizeof(root_le));
root = le64_to_cpu(root_le);
if (dm_btree_del(info, root))
- DMERR("btree delete failed\n");
+ DMERR("btree delete failed");
}
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
@@ -1981,5 +1981,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
{
- dm_tm_issue_prefetches(pmd->tm);
+ down_read(&pmd->root_lock);
+ if (!pmd->fail_io)
+ dm_tm_issue_prefetches(pmd->tm);
+ up_read(&pmd->root_lock);
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 72d91f477683..92237b6fa8cd 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -235,6 +235,7 @@ struct pool {
struct pool_features pf;
bool low_water_triggered:1; /* A dm event has been sent */
bool suspended:1;
+ bool out_of_data_space:1;
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
@@ -461,9 +462,16 @@ static void cell_error_with_code(struct pool *pool,
dm_bio_prison_free_cell(pool->prison, cell);
}
+static int get_pool_io_error_code(struct pool *pool)
+{
+ return pool->out_of_data_space ? -ENOSPC : -EIO;
+}
+
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
{
- cell_error_with_code(pool, cell, -EIO);
+ int error = get_pool_io_error_code(pool);
+
+ cell_error_with_code(pool, cell, error);
}
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -622,7 +630,9 @@ static void error_retry_list_with_code(struct pool *pool, int error)
static void error_retry_list(struct pool *pool)
{
- return error_retry_list_with_code(pool, -EIO);
+ int error = get_pool_io_error_code(pool);
+
+ return error_retry_list_with_code(pool, error);
}
/*
@@ -2419,6 +2429,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
*/
if (old_mode != new_mode)
notify_of_pool_mode_change_to_oods(pool);
+ pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
pool->process_cell = process_cell_read_only;
@@ -2432,6 +2443,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
+ pool->out_of_data_space = false;
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
@@ -2832,6 +2844,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_LIST_HEAD(&pool->active_thins);
pool->low_water_triggered = false;
pool->suspended = true;
+ pool->out_of_data_space = false;
pool->shared_read_ds = dm_deferred_set_create();
if (!pool->shared_read_ds) {
@@ -3886,7 +3899,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 17, 0},
+ .version = {1, 18, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4037,7 +4050,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_flush_bios = 1;
ti->flush_supported = true;
- ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
+ ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
ti->discard_zeroes_data_unsupported = true;
@@ -4260,7 +4273,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 17, 0},
+ .version = {1, 18, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 1cc10c4de701..459a9f8905ed 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -812,7 +812,7 @@ int verity_fec_ctr(struct dm_verity *v)
}
/* Reserve space for our per-bio data */
- ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+ ti->per_io_data_size += sizeof(struct dm_verity_fec_io);
return 0;
}
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 5c5d30cb6ec5..0aba34a7b3b3 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -354,7 +354,7 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
size_t len))
{
unsigned todo = 1 << v->data_dev_block_bits;
- struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
do {
int r;
@@ -460,7 +460,7 @@ static int verity_verify_io(struct dm_verity_io *io)
static void verity_finish_io(struct dm_verity_io *io, int error)
{
struct dm_verity *v = io->v;
- struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_error = error;
@@ -574,7 +574,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE)
return -EIO;
- io = dm_per_bio_data(bio, ti->per_bio_data_size);
+ io = dm_per_bio_data(bio, ti->per_io_data_size);
io->v = v;
io->orig_bi_end_io = bio->bi_end_io;
io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -1036,15 +1036,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+ ti->per_io_data_size = sizeof(struct dm_verity_io) +
v->shash_descsize + v->digest_size * 2;
r = verity_fec_ctr(v);
if (r)
goto bad;
- ti->per_bio_data_size = roundup(ti->per_bio_data_size,
- __alignof__(struct dm_verity_io));
+ ti->per_io_data_size = roundup(ti->per_io_data_size,
+ __alignof__(struct dm_verity_io));
return 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dd834927bc66..3d3ac13287a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -106,14 +106,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-union map_info *dm_get_rq_mapinfo(struct request *rq)
-{
- if (rq && rq->end_io_data)
- return &((struct dm_rq_target_io *)rq->end_io_data)->info;
- return NULL;
-}
-EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
-
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -129,28 +121,18 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
#define DMF_SUSPENDED_INTERNALLY 7
/*
- * A dummy definition to make RCU happy.
- * struct dm_table should never be dereferenced in this file.
- */
-struct dm_table {
- int undefined__;
-};
-
-/*
* Work processed by per-device workqueue.
*/
struct mapped_device {
struct srcu_struct io_barrier;
struct mutex suspend_lock;
- atomic_t holders;
- atomic_t open_count;
/*
- * The current mapping.
+ * The current mapping (struct dm_table *).
* Use dm_get_live_table{_fast} or take suspend_lock for
* dereference.
*/
- struct dm_table __rcu *map;
+ void __rcu *map;
struct list_head table_devices;
struct mutex table_devices_lock;
@@ -158,10 +140,16 @@ struct mapped_device {
unsigned long flags;
struct request_queue *queue;
+ int numa_node_id;
+
unsigned type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
+ atomic_t holders;
+ atomic_t open_count;
+
+ struct dm_target *immutable_target;
struct target_type *immutable_target_type;
struct gendisk *disk;
@@ -175,8 +163,20 @@ struct mapped_device {
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
- struct bio_list deferred;
spinlock_t deferred_lock;
+ struct bio_list deferred;
+
+ /*
+ * Event handling.
+ */
+ wait_queue_head_t eventq;
+ atomic_t event_nr;
+ atomic_t uevent_seq;
+ struct list_head uevent_list;
+ spinlock_t uevent_lock; /* Protect access to uevent_list */
+
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
/*
* Processing queue (flush)
@@ -192,32 +192,21 @@ struct mapped_device {
struct bio_set *bs;
/*
- * Event handling.
- */
- atomic_t event_nr;
- wait_queue_head_t eventq;
- atomic_t uevent_seq;
- struct list_head uevent_list;
- spinlock_t uevent_lock; /* Protect access to uevent_list */
-
- /*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
- struct block_device *bdev;
/* forced geometry settings */
struct hd_geometry geometry;
+ struct block_device *bdev;
+
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
- /* the number of internal suspends */
- unsigned internal_suspend_count;
-
struct dm_stats stats;
struct kthread_worker kworker;
@@ -230,8 +219,9 @@ struct mapped_device {
ktime_t last_rq_start_time;
/* for blk-mq request-based DM support */
- struct blk_mq_tag_set tag_set;
- bool use_blk_mq;
+ struct blk_mq_tag_set *tag_set;
+ bool use_blk_mq:1;
+ bool init_tio_pdu:1;
};
#ifdef CONFIG_DM_MQ_DEFAULT
@@ -240,10 +230,19 @@ static bool use_blk_mq = true;
static bool use_blk_mq = false;
#endif
+#define DM_MQ_NR_HW_QUEUES 1
+#define DM_MQ_QUEUE_DEPTH 2048
+#define DM_NUMA_NODE NUMA_NO_NODE
+
+static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+static int dm_numa_node = DM_NUMA_NODE;
+
bool dm_use_blk_mq(struct mapped_device *md)
{
return md->use_blk_mq;
}
+EXPORT_SYMBOL_GPL(dm_use_blk_mq);
/*
* For mempools pre-allocation at the table loading time.
@@ -277,6 +276,27 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
*/
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+static int __dm_get_module_param_int(int *module_param, int min, int max)
+{
+ int param = ACCESS_ONCE(*module_param);
+ int modified_param = 0;
+ bool modified = true;
+
+ if (param < min)
+ modified_param = min;
+ else if (param > max)
+ modified_param = max;
+ else
+ modified = false;
+
+ if (modified) {
+ (void)cmpxchg(module_param, param, modified_param);
+ param = modified_param;
+ }
+
+ return param;
+}
+
static unsigned __dm_get_module_param(unsigned *module_param,
unsigned def, unsigned max)
{
@@ -310,6 +330,23 @@ unsigned dm_get_reserved_rq_based_ios(void)
}
EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
+static unsigned dm_get_blk_mq_nr_hw_queues(void)
+{
+ return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+}
+
+static unsigned dm_get_blk_mq_queue_depth(void)
+{
+ return __dm_get_module_param(&dm_mq_queue_depth,
+ DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+}
+
+static unsigned dm_get_numa_node(void)
+{
+ return __dm_get_module_param_int(&dm_numa_node,
+ DM_NUMA_NODE, num_online_nodes() - 1);
+}
+
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -323,7 +360,7 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
- _rq_cache = kmem_cache_create("dm_clone_request", sizeof(struct request),
+ _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
if (!_rq_cache)
goto out_free_rq_tio_cache;
@@ -556,16 +593,17 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
-static int dm_get_live_table_for_ioctl(struct mapped_device *md,
- struct dm_target **tgt, struct block_device **bdev,
- fmode_t *mode, int *srcu_idx)
+static int dm_grab_bdev_for_ioctl(struct mapped_device *md,
+ struct block_device **bdev,
+ fmode_t *mode)
{
+ struct dm_target *tgt;
struct dm_table *map;
- int r;
+ int srcu_idx, r;
retry:
r = -ENOTTY;
- map = dm_get_live_table(md, srcu_idx);
+ map = dm_get_live_table(md, &srcu_idx);
if (!map || !dm_table_get_size(map))
goto out;
@@ -573,9 +611,8 @@ retry:
if (dm_table_get_num_targets(map) != 1)
goto out;
- *tgt = dm_table_get_target(map, 0);
-
- if (!(*tgt)->type->prepare_ioctl)
+ tgt = dm_table_get_target(map, 0);
+ if (!tgt->type->prepare_ioctl)
goto out;
if (dm_suspended_md(md)) {
@@ -583,14 +620,16 @@ retry:
goto out;
}
- r = (*tgt)->type->prepare_ioctl(*tgt, bdev, mode);
+ r = tgt->type->prepare_ioctl(tgt, bdev, mode);
if (r < 0)
goto out;
+ bdgrab(*bdev);
+ dm_put_live_table(md, srcu_idx);
return r;
out:
- dm_put_live_table(md, *srcu_idx);
+ dm_put_live_table(md, srcu_idx);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
msleep(10);
goto retry;
@@ -602,11 +641,9 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct mapped_device *md = bdev->bd_disk->private_data;
- struct dm_target *tgt;
- struct block_device *tgt_bdev = NULL;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -621,9 +658,9 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
goto out;
}
- r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
+ r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
out:
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -642,24 +679,24 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
bio_put(&tio->clone);
}
-static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
- gfp_t gfp_mask)
+static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
+ gfp_t gfp_mask)
{
return mempool_alloc(md->io_pool, gfp_mask);
}
-static void free_rq_tio(struct dm_rq_target_io *tio)
+static void free_old_rq_tio(struct dm_rq_target_io *tio)
{
mempool_free(tio, tio->md->io_pool);
}
-static struct request *alloc_clone_request(struct mapped_device *md,
- gfp_t gfp_mask)
+static struct request *alloc_old_clone_request(struct mapped_device *md,
+ gfp_t gfp_mask)
{
return mempool_alloc(md->rq_pool, gfp_mask);
}
-static void free_clone_request(struct mapped_device *md, struct request *rq)
+static void free_old_clone_request(struct mapped_device *md, struct request *rq)
{
mempool_free(rq, md->rq_pool);
}
@@ -827,7 +864,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
mutex_lock(&md->table_devices_lock);
td = find_table_device(&md->table_devices, dev, mode);
if (!td) {
- td = kmalloc(sizeof(*td), GFP_KERNEL);
+ td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td) {
mutex_unlock(&md->table_devices_lock);
return -ENOMEM;
@@ -1109,12 +1146,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
- if (run_queue) {
- if (md->queue->mq_ops)
- blk_mq_run_hw_queues(md->queue, true);
- else
- blk_run_queue_async(md->queue);
- }
+ if (!md->queue->mq_ops && run_queue)
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
@@ -1134,15 +1167,10 @@ static void free_rq_clone(struct request *clone)
tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */
- free_clone_request(md, clone);
- /*
- * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
- * no need to call free_clone_request() because we leverage blk-mq by
- * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
- */
+ free_old_clone_request(md, clone);
if (!md->queue->mq_ops)
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
}
/*
@@ -1192,13 +1220,13 @@ static void dm_unprep_request(struct request *rq)
if (clone)
free_rq_clone(clone);
else if (!tio->md->queue->mq_ops)
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
}
/*
* Requeue the original request of a clone.
*/
-static void old_requeue_request(struct request *rq)
+static void dm_old_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned long flags;
@@ -1209,45 +1237,57 @@ static void old_requeue_request(struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
+static void dm_mq_requeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+ unsigned long flags;
+
+ blk_mq_requeue_request(rq);
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!blk_queue_stopped(q))
+ blk_mq_kick_requeue_list(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
static void dm_requeue_original_request(struct mapped_device *md,
struct request *rq)
{
int rw = rq_data_dir(rq);
+ rq_end_stats(md, rq);
dm_unprep_request(rq);
- rq_end_stats(md, rq);
if (!rq->q->mq_ops)
- old_requeue_request(rq);
- else {
- blk_mq_requeue_request(rq);
- blk_mq_kick_requeue_list(rq->q);
- }
+ dm_old_requeue_request(rq);
+ else
+ dm_mq_requeue_request(rq);
rq_completed(md, rw, false);
}
-static void old_stop_queue(struct request_queue *q)
+static void dm_old_stop_queue(struct request_queue *q)
{
unsigned long flags;
- if (blk_queue_stopped(q))
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_queue_stopped(q)) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
return;
+ }
- spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void stop_queue(struct request_queue *q)
+static void dm_stop_queue(struct request_queue *q)
{
if (!q->mq_ops)
- old_stop_queue(q);
+ dm_old_stop_queue(q);
else
blk_mq_stop_hw_queues(q);
}
-static void old_start_queue(struct request_queue *q)
+static void dm_old_start_queue(struct request_queue *q)
{
unsigned long flags;
@@ -1257,12 +1297,14 @@ static void old_start_queue(struct request_queue *q)
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static void start_queue(struct request_queue *q)
+static void dm_start_queue(struct request_queue *q)
{
if (!q->mq_ops)
- old_start_queue(q);
- else
+ dm_old_start_queue(q);
+ else {
blk_mq_start_stopped_hw_queues(q, true);
+ blk_mq_kick_requeue_list(q);
+ }
}
static void dm_done(struct request *clone, int error, bool mapped)
@@ -1313,7 +1355,7 @@ static void dm_softirq_done(struct request *rq)
if (!rq->q->mq_ops) {
blk_end_request_all(rq, tio->error);
rq_completed(tio->md, rw, false);
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
} else {
blk_mq_end_request(rq, tio->error);
rq_completed(tio->md, rw, false);
@@ -1336,7 +1378,10 @@ static void dm_complete_request(struct request *rq, int error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- blk_complete_request(rq);
+ if (!rq->q->mq_ops)
+ blk_complete_request(rq);
+ else
+ blk_mq_complete_request(rq, error);
}
/*
@@ -1352,7 +1397,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
}
/*
- * Called with the clone's queue lock held (for non-blk-mq)
+ * Called with the clone's queue lock held (in the case of .request_fn)
*/
static void end_clone_request(struct request *clone, int error)
{
@@ -1522,21 +1567,26 @@ static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
/*
* Creates a bio that consists of range of complete bvecs.
*/
-static void clone_bio(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned len)
+static int clone_bio(struct dm_target_io *tio, struct bio *bio,
+ sector_t sector, unsigned len)
{
struct bio *clone = &tio->clone;
__bio_clone_fast(clone, bio);
- if (bio_integrity(bio))
- bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (bio_integrity(bio)) {
+ int r = bio_integrity_clone(clone, bio, GFP_NOIO);
+ if (r < 0)
+ return r;
+ }
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (bio_integrity(bio))
bio_integrity_trim(clone, 0, len);
+
+ return 0;
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1593,13 +1643,14 @@ static int __send_empty_flush(struct clone_info *ci)
return 0;
}
-static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
unsigned target_bio_nr;
unsigned num_target_bios = 1;
+ int r = 0;
/*
* Does the target want to receive duplicate copies of the bio?
@@ -1610,9 +1661,15 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
tio = alloc_tio(ci, ti, target_bio_nr);
tio->len_ptr = len;
- clone_bio(tio, bio, sector, *len);
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(ci->md, tio);
+ break;
+ }
__map_bio(tio);
}
+
+ return r;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1689,6 +1746,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
+ int r;
if (unlikely(bio->bi_rw & REQ_DISCARD))
return __send_discard(ci);
@@ -1701,7 +1759,9 @@ static int __split_and_process_non_flush(struct clone_info *ci)
len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
- __clone_and_map_data_bio(ci, ti, ci->sector, &len);
+ r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
+ if (r < 0)
+ return r;
ci->sector += len;
ci->sector_count -= len;
@@ -1839,28 +1899,22 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static struct request *clone_rq(struct request *rq, struct mapped_device *md,
- struct dm_rq_target_io *tio, gfp_t gfp_mask)
+static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
+ struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
/*
- * Do not allocate a clone if tio->clone was already set
- * (see: dm_mq_queue_rq).
+ * Create clone for use with .request_fn request_queue
*/
- bool alloc_clone = !tio->clone;
struct request *clone;
- if (alloc_clone) {
- clone = alloc_clone_request(md, gfp_mask);
- if (!clone)
- return NULL;
- } else
- clone = tio->clone;
+ clone = alloc_old_clone_request(md, gfp_mask);
+ if (!clone)
+ return NULL;
blk_rq_init(NULL, clone);
if (setup_clone(clone, rq, tio, gfp_mask)) {
/* -ENOMEM */
- if (alloc_clone)
- free_clone_request(md, clone);
+ free_old_clone_request(md, clone);
return NULL;
}
@@ -1877,29 +1931,40 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
tio->clone = NULL;
tio->orig = rq;
tio->error = 0;
- memset(&tio->info, 0, sizeof(tio->info));
+ /*
+ * Avoid initializing info for blk-mq; it passes
+ * target-specific data through info.ptr
+ * (see: dm_mq_init_request)
+ */
+ if (!md->init_tio_pdu)
+ memset(&tio->info, 0, sizeof(tio->info));
if (md->kworker_task)
init_kthread_work(&tio->work, map_tio_request);
}
-static struct dm_rq_target_io *prep_tio(struct request *rq,
- struct mapped_device *md, gfp_t gfp_mask)
+static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
+ struct mapped_device *md,
+ gfp_t gfp_mask)
{
struct dm_rq_target_io *tio;
int srcu_idx;
struct dm_table *table;
- tio = alloc_rq_tio(md, gfp_mask);
+ tio = alloc_old_rq_tio(md, gfp_mask);
if (!tio)
return NULL;
init_tio(tio, rq, md);
table = dm_get_live_table(md, &srcu_idx);
+ /*
+ * Must clone a request if this .request_fn DM device
+ * is stacked on .request_fn device(s).
+ */
if (!dm_table_mq_request_based(table)) {
- if (!clone_rq(rq, md, tio, gfp_mask)) {
+ if (!clone_old_rq(rq, md, tio, gfp_mask)) {
dm_put_live_table(md, srcu_idx);
- free_rq_tio(tio);
+ free_old_rq_tio(tio);
return NULL;
}
}
@@ -1911,7 +1976,7 @@ static struct dm_rq_target_io *prep_tio(struct request *rq,
/*
* Called with the queue lock held.
*/
-static int dm_prep_fn(struct request_queue *q, struct request *rq)
+static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
{
struct mapped_device *md = q->queuedata;
struct dm_rq_target_io *tio;
@@ -1921,7 +1986,7 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_KILL;
}
- tio = prep_tio(rq, md, GFP_ATOMIC);
+ tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
if (!tio)
return BLKPREP_DEFER;
@@ -2079,12 +2144,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
static void dm_request_fn(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- struct dm_target *ti;
+ struct dm_target *ti = md->immutable_target;
struct request *rq;
struct dm_rq_target_io *tio;
- sector_t pos;
+ sector_t pos = 0;
+
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+ ti = dm_table_find_target(map, pos);
+ dm_put_live_table(md, srcu_idx);
+ }
/*
* For suspend, check blk_queue_stopped() and increment
@@ -2095,33 +2166,21 @@ static void dm_request_fn(struct request_queue *q)
while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto out;
+ return;
/* always use block 0 to find the target for flushes for now */
pos = 0;
if (!(rq->cmd_flags & REQ_FLUSH))
pos = blk_rq_pos(rq);
- ti = dm_table_find_target(map, pos);
- if (!dm_target_is_valid(ti)) {
- /*
- * Must perform setup, that rq_completed() requires,
- * before calling dm_kill_unmapped_request
- */
- DMERR_LIMIT("request attempted access beyond the end of device");
- dm_start_request(md, rq);
- dm_kill_unmapped_request(rq, -EIO);
- continue;
+ if ((dm_request_peeked_before_merge_deadline(md) &&
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+ md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+ (ti->type->busy && ti->type->busy(ti))) {
+ blk_delay_queue(q, HZ / 100);
+ return;
}
- if (dm_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
- goto delay_and_out;
-
- if (ti->type->busy && ti->type->busy(ti))
- goto delay_and_out;
-
dm_start_request(md, rq);
tio = tio_from_request(rq);
@@ -2130,13 +2189,6 @@ static void dm_request_fn(struct request_queue *q)
queue_kthread_work(&md->kworker, &tio->work);
BUG_ON(!irqs_disabled());
}
-
- goto out;
-
-delay_and_out:
- blk_delay_queue(q, HZ / 100);
-out:
- dm_put_live_table(md, srcu_idx);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -2146,19 +2198,18 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
struct dm_table *map;
if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
- map = dm_get_live_table_fast(md);
- if (map) {
+ if (dm_request_based(md)) {
/*
- * Request-based dm cares about only own queue for
- * the query about congestion status of request_queue
+ * With request-based DM we only need to check the
+ * top-level queue for congestion.
*/
- if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
- bdi_bits;
- else
+ r = md->queue->backing_dev_info.wb.state & bdi_bits;
+ } else {
+ map = dm_get_live_table_fast(md);
+ if (map)
r = dm_table_any_congested(map, bdi_bits);
+ dm_put_live_table_fast(md);
}
- dm_put_live_table_fast(md);
}
return r;
@@ -2238,7 +2289,7 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
}
-static void dm_init_old_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
dm_init_md_queue(md);
@@ -2285,10 +2336,11 @@ static void cleanup_mapped_device(struct mapped_device *md)
*/
static struct mapped_device *alloc_dev(int minor)
{
- int r;
- struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
+ int r, numa_node_id = dm_get_numa_node();
+ struct mapped_device *md;
void *old_md;
+ md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
if (!md) {
DMWARN("unable to allocate device, out of memory.");
return NULL;
@@ -2309,7 +2361,9 @@ static struct mapped_device *alloc_dev(int minor)
if (r < 0)
goto bad_io_barrier;
+ md->numa_node_id = numa_node_id;
md->use_blk_mq = use_blk_mq;
+ md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
mutex_init(&md->type_lock);
@@ -2323,13 +2377,13 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue(GFP_KERNEL);
+ md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
dm_init_md_queue(md);
- md->disk = alloc_disk(1);
+ md->disk = alloc_disk_node(1, numa_node_id);
if (!md->disk)
goto bad;
@@ -2393,8 +2447,10 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
cleanup_mapped_device(md);
- if (md->use_blk_mq)
- blk_mq_free_tag_set(&md->tag_set);
+ if (md->tag_set) {
+ blk_mq_free_tag_set(md->tag_set);
+ kfree(md->tag_set);
+ }
free_table_devices(&md->table_devices);
dm_stats_cleanup(&md->stats);
@@ -2502,13 +2558,20 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t))
- stop_queue(q);
+ if (dm_table_request_based(t)) {
+ dm_stop_queue(q);
+ /*
+ * Leverage the fact that request-based DM targets are
+ * immutable singletons and establish md->immutable_target
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ */
+ md->immutable_target = dm_table_get_immutable_target(t);
+ }
__bind_mempools(md, t);
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
- rcu_assign_pointer(md->map, t);
+ rcu_assign_pointer(md->map, (void *)t);
md->immutable_target_type = dm_table_get_immutable_target_type(t);
dm_table_set_restrictions(t, q, limits);
@@ -2574,7 +2637,6 @@ void dm_set_md_type(struct mapped_device *md, unsigned type)
unsigned dm_get_md_type(struct mapped_device *md)
{
- BUG_ON(!mutex_is_locked(&md->type_lock));
return md->type;
}
@@ -2594,7 +2656,7 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-static void init_rq_based_worker_thread(struct mapped_device *md)
+static void dm_old_init_rq_based_worker_thread(struct mapped_device *md)
{
/* Initialize the request-based DM worker thread */
init_kthread_worker(&md->kworker);
@@ -2603,26 +2665,22 @@ static void init_rq_based_worker_thread(struct mapped_device *md)
}
/*
- * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ * Fully initialize a .request_fn request-based queue.
*/
-static int dm_init_request_based_queue(struct mapped_device *md)
+static int dm_old_init_request_queue(struct mapped_device *md)
{
- struct request_queue *q = NULL;
-
/* Fully initialize the queue */
- q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
- if (!q)
+ if (!blk_init_allocated_queue(md->queue, dm_request_fn, NULL))
return -EINVAL;
/* disable dm_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- md->queue = q;
- dm_init_old_md_queue(md);
+ dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
- blk_queue_prep_rq(md->queue, dm_prep_fn);
+ blk_queue_prep_rq(md->queue, dm_old_prep_fn);
- init_rq_based_worker_thread(md);
+ dm_old_init_rq_based_worker_thread(md);
elv_register_queue(md->queue);
@@ -2642,6 +2700,11 @@ static int dm_mq_init_request(void *data, struct request *rq,
*/
tio->md = md;
+ if (md->init_tio_pdu) {
+ /* target-specific per-io data is immediately after the tio */
+ tio->info.ptr = tio + 1;
+ }
+
return 0;
}
@@ -2651,28 +2714,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *rq = bd->rq;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
struct mapped_device *md = tio->md;
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- struct dm_target *ti;
- sector_t pos;
+ struct dm_target *ti = md->immutable_target;
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (!(rq->cmd_flags & REQ_FLUSH))
- pos = blk_rq_pos(rq);
+ if (unlikely(!ti)) {
+ int srcu_idx;
+ struct dm_table *map = dm_get_live_table(md, &srcu_idx);
- ti = dm_table_find_target(map, pos);
- if (!dm_target_is_valid(ti)) {
+ ti = dm_table_find_target(map, 0);
dm_put_live_table(md, srcu_idx);
- DMERR_LIMIT("request attempted access beyond the end of device");
- /*
- * Must perform setup, that rq_completed() requires,
- * before returning BLK_MQ_RQ_QUEUE_ERROR
- */
- dm_start_request(md, rq);
- return BLK_MQ_RQ_QUEUE_ERROR;
}
- dm_put_live_table(md, srcu_idx);
if (ti->type->busy && ti->type->busy(ti))
return BLK_MQ_RQ_QUEUE_BUSY;
@@ -2688,20 +2738,12 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
*/
tio->ti = ti;
- /* Clone the request if underlying devices aren't blk-mq */
- if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
- /* clone request is allocated at the end of the pdu */
- tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
- (void) clone_rq(rq, md, tio, GFP_ATOMIC);
- queue_kthread_work(&md->kworker, &tio->work);
- } else {
- /* Direct call is fine since .queue_rq allows allocations */
- if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
- /* Undo dm_start_request() before requeuing */
- rq_end_stats(md, rq);
- rq_completed(md, rq_data_dir(rq), false);
- return BLK_MQ_RQ_QUEUE_BUSY;
- }
+ /* Direct call is fine since .queue_rq allows allocations */
+ if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+ /* Undo dm_start_request() before requeuing */
+ rq_end_stats(md, rq);
+ rq_completed(md, rq_data_dir(rq), false);
+ return BLK_MQ_RQ_QUEUE_BUSY;
}
return BLK_MQ_RQ_QUEUE_OK;
@@ -2714,47 +2756,56 @@ static struct blk_mq_ops dm_mq_ops = {
.init_request = dm_mq_init_request,
};
-static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
+static int dm_mq_init_request_queue(struct mapped_device *md,
+ struct dm_target *immutable_tgt)
{
- unsigned md_type = dm_get_md_type(md);
struct request_queue *q;
int err;
- memset(&md->tag_set, 0, sizeof(md->tag_set));
- md->tag_set.ops = &dm_mq_ops;
- md->tag_set.queue_depth = BLKDEV_MAX_RQ;
- md->tag_set.numa_node = NUMA_NO_NODE;
- md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
- md->tag_set.nr_hw_queues = 1;
- if (md_type == DM_TYPE_REQUEST_BASED) {
- /* make the memory for non-blk-mq clone part of the pdu */
- md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
- } else
- md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
- md->tag_set.driver_data = md;
-
- err = blk_mq_alloc_tag_set(&md->tag_set);
+ if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+ DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
+ return -EINVAL;
+ }
+
+ md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
+ if (!md->tag_set)
+ return -ENOMEM;
+
+ md->tag_set->ops = &dm_mq_ops;
+ md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
+ md->tag_set->numa_node = md->numa_node_id;
+ md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
+ md->tag_set->driver_data = md;
+
+ md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
+ if (immutable_tgt && immutable_tgt->per_io_data_size) {
+ /* any target-specific per-io data is immediately after the tio */
+ md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
+ md->init_tio_pdu = true;
+ }
+
+ err = blk_mq_alloc_tag_set(md->tag_set);
if (err)
- return err;
+ goto out_kfree_tag_set;
- q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
+ q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
if (IS_ERR(q)) {
err = PTR_ERR(q);
goto out_tag_set;
}
- md->queue = q;
dm_init_md_queue(md);
/* backfill 'mq' sysfs registration normally done in blk_register_queue */
blk_mq_register_disk(md->disk);
- if (md_type == DM_TYPE_REQUEST_BASED)
- init_rq_based_worker_thread(md);
-
return 0;
out_tag_set:
- blk_mq_free_tag_set(&md->tag_set);
+ blk_mq_free_tag_set(md->tag_set);
+out_kfree_tag_set:
+ kfree(md->tag_set);
+
return err;
}
@@ -2769,28 +2820,28 @@ static unsigned filter_md_type(unsigned type, struct mapped_device *md)
/*
* Setup the DM device's queue based on md's type
*/
-int dm_setup_md_queue(struct mapped_device *md)
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
unsigned md_type = filter_md_type(dm_get_md_type(md), md);
switch (md_type) {
case DM_TYPE_REQUEST_BASED:
- r = dm_init_request_based_queue(md);
+ r = dm_old_init_request_queue(md);
if (r) {
- DMWARN("Cannot initialize queue for request-based mapped device");
+ DMERR("Cannot initialize queue for request-based mapped device");
return r;
}
break;
case DM_TYPE_MQ_REQUEST_BASED:
- r = dm_init_request_based_blk_mq_queue(md);
+ r = dm_mq_init_request_queue(md, dm_table_get_immutable_target(t));
if (r) {
- DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
+ DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
break;
case DM_TYPE_BIO_BASED:
- dm_init_old_md_queue(md);
+ dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
/*
* DM handles splitting bios as needed. Free the bio_split bioset
@@ -3133,7 +3184,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* dm defers requests to md->wq from md->queue.
*/
if (dm_request_based(md)) {
- stop_queue(md->queue);
+ dm_stop_queue(md->queue);
if (md->kworker_task)
flush_kthread_worker(&md->kworker);
}
@@ -3157,7 +3208,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
dm_queue_flush(md);
if (dm_request_based(md))
- start_queue(md->queue);
+ dm_start_queue(md->queue);
unlock_fs(md);
dm_table_presuspend_undo_targets(map);
@@ -3236,7 +3287,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
if (dm_request_based(md))
- start_queue(md->queue);
+ dm_start_queue(md->queue);
unlock_fs(md);
@@ -3482,9 +3533,9 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
- unsigned integrity, unsigned per_bio_data_size)
+ unsigned integrity, unsigned per_io_data_size)
{
- struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+ struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
struct kmem_cache *cachep = NULL;
unsigned int pool_size = 0;
unsigned int front_pad;
@@ -3498,7 +3549,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
case DM_TYPE_BIO_BASED:
cachep = _io_cache;
pool_size = dm_get_reserved_bio_based_ios();
- front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+ front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
break;
case DM_TYPE_REQUEST_BASED:
cachep = _rq_tio_cache;
@@ -3511,8 +3562,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
if (!pool_size)
pool_size = dm_get_reserved_rq_based_ios();
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
- /* per_bio_data_size is not used. See __bind_mempools(). */
- WARN_ON(per_bio_data_size != 0);
+ /* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
default:
BUG();
@@ -3554,15 +3604,14 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
}
static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
- u32 flags)
+ u32 flags)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3572,20 +3621,19 @@ static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
- u32 flags)
+ u32 flags)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3595,7 +3643,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3603,11 +3651,10 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3617,20 +3664,19 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
- enum pr_type type, bool abort)
+ enum pr_type type, bool abort)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3640,7 +3686,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3648,11 +3694,10 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
{
struct mapped_device *md = bdev->bd_disk->private_data;
const struct pr_ops *ops;
- struct dm_target *tgt;
fmode_t mode;
- int srcu_idx, r;
+ int r;
- r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
+ r = dm_grab_bdev_for_ioctl(md, &bdev, &mode);
if (r < 0)
return r;
@@ -3662,7 +3707,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key)
else
r = -EOPNOTSUPP;
- dm_put_live_table(md, srcu_idx);
+ bdput(bdev);
return r;
}
@@ -3701,6 +3746,15 @@ MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
+module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
+
+module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
+
+module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+
MODULE_DESCRIPTION(DM_NAME " driver");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 7edcf97dfa5a..13a758ec0f88 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,6 +73,8 @@ int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
@@ -84,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
unsigned dm_get_md_type(struct mapped_device *md);
struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
-int dm_setup_md_queue(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
/*
* To check the return value from dm_table_find_target().
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e55e6cf9ec17..194580fba7fd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -305,6 +305,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
void mddev_suspend(struct mddev *mddev)
{
+ WARN_ON_ONCE(current == mddev->thread->tsk);
if (mddev->suspended++)
return;
synchronize_rcu();
@@ -717,6 +718,7 @@ static void super_written(struct bio *bio)
if (atomic_dec_and_test(&mddev->pending_writes))
wake_up(&mddev->sb_wait);
+ rdev_dec_pending(rdev, mddev);
bio_put(bio);
}
@@ -731,6 +733,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
*/
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ atomic_inc(&rdev->nr_pending);
+
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0);
@@ -5671,7 +5675,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
export_array(mddev);
md_clean(mddev);
- kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
@@ -6883,7 +6886,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
- * on if we are re-adding a preexisting device.
+ * only if we are re-adding a preexisting device.
* So require mddev->pers and MD_DISK_SYNC.
*/
if (mddev->pers) {
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 0a72ab6e6c20..dd483bb2e111 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
}
multipath = conf->multipaths + mp_bh->path;
- mp_bh->bio = *bio;
+ bio_init(&mp_bh->bio);
+ __bio_clone_fast(&mp_bh->bio, bio);
+
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e3843f7d245..a7f2b9c9f8a0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
@@ -2274,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2391,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
@@ -2695,7 +2698,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
- BUG_ON(sync_blocks < (PAGE_SIZE>>9));
if ((len >> 9) > sync_blocks)
len = sync_blocks<<9;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 1c1447dd3417..e3fd725d5c4d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2664,6 +2664,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
if (fail) {
spin_lock_irq(&conf->device_lock);
list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
+ conf->nr_queued++;
spin_unlock_irq(&conf->device_lock);
md_wakeup_thread(conf->mddev->thread);
} else {
@@ -2691,8 +2692,10 @@ static void raid10d(struct md_thread *thread)
LIST_HEAD(tmp);
spin_lock_irqsave(&conf->device_lock, flags);
if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
- list_add(&tmp, &conf->bio_end_io_list);
- list_del_init(&conf->bio_end_io_list);
+ while (!list_empty(&conf->bio_end_io_list)) {
+ list_move(conf->bio_end_io_list.prev, &tmp);
+ conf->nr_queued--;
+ }
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b4f02c9959f2..8ab8b65e1741 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -340,8 +340,7 @@ static void release_inactive_stripe_list(struct r5conf *conf,
int hash)
{
int size;
- unsigned long do_wakeup = 0;
- int i = 0;
+ bool do_wakeup = false;
unsigned long flags;
if (hash == NR_STRIPE_HASH_LOCKS) {
@@ -362,19 +361,15 @@ static void release_inactive_stripe_list(struct r5conf *conf,
!list_empty(list))
atomic_dec(&conf->empty_inactive_list_nr);
list_splice_tail_init(list, conf->inactive_list + hash);
- do_wakeup |= 1 << hash;
+ do_wakeup = true;
spin_unlock_irqrestore(conf->hash_locks + hash, flags);
}
size--;
hash--;
}
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- if (do_wakeup & (1 << i))
- wake_up(&conf->wait_for_stripe[i]);
- }
-
if (do_wakeup) {
+ wake_up(&conf->wait_for_stripe);
if (atomic_read(&conf->active_stripes) == 0)
wake_up(&conf->wait_for_quiescent);
if (conf->retry_read_aligned)
@@ -687,15 +682,14 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
if (!sh) {
set_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
- wait_event_exclusive_cmd(
- conf->wait_for_stripe[hash],
+ wait_event_lock_irq(
+ conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
|| !test_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state)),
- spin_unlock_irq(conf->hash_locks + hash),
- spin_lock_irq(conf->hash_locks + hash));
+ *(conf->hash_locks + hash));
clear_bit(R5_INACTIVE_BLOCKED,
&conf->cache_state);
} else {
@@ -720,9 +714,6 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
}
} while (sh == NULL);
- if (!list_empty(conf->inactive_list + hash))
- wake_up(&conf->wait_for_stripe[hash]);
-
spin_unlock_irq(conf->hash_locks + hash);
return sh;
}
@@ -2089,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
unsigned long cpu;
int err = 0;
+ /*
+ * Never shrink. And mddev_suspend() could deadlock if this is called
+ * from raid5d. In that case, scribble_disks and scribble_sectors
+ * should equal to new_disks and new_sectors
+ */
+ if (conf->scribble_disks >= new_disks &&
+ conf->scribble_sectors >= new_sectors)
+ return 0;
mddev_suspend(conf->mddev);
get_online_cpus();
for_each_present_cpu(cpu) {
@@ -2110,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
}
put_online_cpus();
mddev_resume(conf->mddev);
+ if (!err) {
+ conf->scribble_disks = new_disks;
+ conf->scribble_sectors = new_sectors;
+ }
return err;
}
@@ -2190,7 +2193,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
cnt = 0;
list_for_each_entry(nsh, &newstripes, lru) {
lock_device_hash_lock(conf, hash);
- wait_event_exclusive_cmd(conf->wait_for_stripe[hash],
+ wait_event_cmd(conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash),
unlock_device_hash_lock(conf, hash),
lock_device_hash_lock(conf, hash));
@@ -4233,10 +4236,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
list_del_init(&sh->batch_list);
- WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+ WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
(1 << STRIPE_SYNCING) |
(1 << STRIPE_REPLACED) |
- (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DELAYED) |
(1 << STRIPE_BIT_DELAY) |
(1 << STRIPE_FULL_WRITE) |
@@ -4246,11 +4248,14 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
(1 << STRIPE_DISCARD) |
(1 << STRIPE_BATCH_READY) |
(1 << STRIPE_BATCH_ERR) |
- (1 << STRIPE_BITMAP_PENDING)));
- WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
- (1 << STRIPE_REPLACED)));
+ (1 << STRIPE_BITMAP_PENDING)),
+ "stripe state: %lx\n", sh->state);
+ WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+ (1 << STRIPE_REPLACED)),
+ "head stripe state: %lx\n", head_sh->state);
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ (1 << STRIPE_PREREAD_ACTIVE) |
(1 << STRIPE_DEGRADED)),
head_sh->state & (1 << STRIPE_INSYNC));
@@ -6376,6 +6381,8 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
@@ -6413,6 +6420,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
}
put_online_cpus();
+ if (!err) {
+ conf->scribble_disks = max(conf->raid_disks,
+ conf->previous_raid_disks);
+ conf->scribble_sectors = max(conf->chunk_sectors,
+ conf->prev_chunk_sectors);
+ }
return err;
}
@@ -6503,9 +6516,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent);
- for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
- init_waitqueue_head(&conf->wait_for_stripe[i]);
- }
+ init_waitqueue_head(&conf->wait_for_stripe);
init_waitqueue_head(&conf->wait_for_overlap);
INIT_LIST_HEAD(&conf->handle_list);
INIT_LIST_HEAD(&conf->hold_list);
@@ -7014,8 +7025,8 @@ static int raid5_run(struct mddev *mddev)
}
if (discard_supported &&
- mddev->queue->limits.max_discard_sectors >= stripe &&
- mddev->queue->limits.discard_granularity >= stripe)
+ mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
+ mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
else
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index a415e1cd39b8..517d4b68a1be 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -510,6 +510,8 @@ struct r5conf {
* conversions
*/
} __percpu *percpu;
+ int scribble_disks;
+ int scribble_sectors;
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -522,7 +524,7 @@ struct r5conf {
atomic_t empty_inactive_list_nr;
struct llist_head released_stripes;
wait_queue_head_t wait_for_quiescent;
- wait_queue_head_t wait_for_stripe[NR_STRIPE_HASH_LOCKS];
+ wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
unsigned long cache_state;
#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index dbdbb84294c5..0afad395ef97 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -242,6 +242,7 @@
#define USB_PID_AVERMEDIA_1867 0x1867
#define USB_PID_AVERMEDIA_A867 0xa867
#define USB_PID_AVERMEDIA_H335 0x0335
+#define USB_PID_AVERMEDIA_TD110 0xa110
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 4c35eb47472b..c0142614c408 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -134,7 +134,6 @@ struct dvb_frontend_private {
#if defined(CONFIG_MEDIA_CONTROLLER_DVB)
struct media_pipeline pipe;
- struct media_entity *pipe_start_entity;
#endif
};
@@ -600,94 +599,6 @@ static void dvb_frontend_wakeup(struct dvb_frontend *fe)
wake_up_interruptible(&fepriv->wait_queue);
}
-/**
- * dvb_enable_media_tuner() - tries to enable the DVB tuner
- *
- * @fe: struct dvb_frontend pointer
- *
- * This function ensures that just one media tuner is enabled for a given
- * frontend. It has two different behaviors:
- * - For trivial devices with just one tuner:
- * it just enables the existing tuner->fe link
- * - For devices with more than one tuner:
- * It is up to the driver to implement the logic that will enable one tuner
- * and disable the other ones. However, if more than one tuner is enabled for
- * the same frontend, it will print an error message and return -EINVAL.
- *
- * At return, it will return the error code returned by media_entity_setup_link,
- * or 0 if everything is OK, if no tuner is linked to the frontend or if the
- * mdev is NULL.
- */
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
-static int dvb_enable_media_tuner(struct dvb_frontend *fe)
-{
- struct dvb_frontend_private *fepriv = fe->frontend_priv;
- struct dvb_adapter *adapter = fe->dvb;
- struct media_device *mdev = adapter->mdev;
- struct media_entity *entity, *source;
- struct media_link *link, *found_link = NULL;
- int ret, n_links = 0, active_links = 0;
-
- fepriv->pipe_start_entity = NULL;
-
- if (!mdev)
- return 0;
-
- entity = fepriv->dvbdev->entity;
- fepriv->pipe_start_entity = entity;
-
- list_for_each_entry(link, &entity->links, list) {
- if (link->sink->entity == entity) {
- found_link = link;
- n_links++;
- if (link->flags & MEDIA_LNK_FL_ENABLED)
- active_links++;
- }
- }
-
- if (!n_links || active_links == 1 || !found_link)
- return 0;
-
- /*
- * If a frontend has more than one tuner linked, it is up to the driver
- * to select with one will be the active one, as the frontend core can't
- * guess. If the driver doesn't do that, it is a bug.
- */
- if (n_links > 1 && active_links != 1) {
- dev_err(fe->dvb->device,
- "WARNING: there are %d active links among %d tuners. This is a driver's bug!\n",
- active_links, n_links);
- return -EINVAL;
- }
-
- source = found_link->source->entity;
- fepriv->pipe_start_entity = source;
- list_for_each_entry(link, &source->links, list) {
- struct media_entity *sink;
- int flags = 0;
-
- sink = link->sink->entity;
- if (sink == entity)
- flags = MEDIA_LNK_FL_ENABLED;
-
- ret = media_entity_setup_link(link, flags);
- if (ret) {
- dev_err(fe->dvb->device,
- "Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
- return ret;
- } else
- dev_dbg(fe->dvb->device,
- "link %s->%s was %s\n",
- source->name, sink->name,
- flags ? "ENABLED" : "disabled");
- }
- return 0;
-}
-#endif
-
static int dvb_frontend_thread(void *data)
{
struct dvb_frontend *fe = data;
@@ -695,10 +606,6 @@ static int dvb_frontend_thread(void *data)
struct dvb_frontend_private *fepriv = fe->frontend_priv;
enum fe_status s;
enum dvbfe_algo algo;
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- int ret;
-#endif
-
bool re_tune = false;
bool semheld = false;
@@ -711,20 +618,6 @@ static int dvb_frontend_thread(void *data)
fepriv->wakeup = 0;
fepriv->reinitialise = 0;
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- ret = dvb_enable_media_tuner(fe);
- if (ret) {
- /* FIXME: return an error if it fails */
- dev_info(fe->dvb->device,
- "proceeding with FE task\n");
- } else if (fepriv->pipe_start_entity) {
- ret = media_entity_pipeline_start(fepriv->pipe_start_entity,
- &fepriv->pipe);
- if (ret)
- return ret;
- }
-#endif
-
dvb_frontend_init(fe);
set_freezable();
@@ -834,12 +727,6 @@ restart:
}
}
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- if (fepriv->pipe_start_entity)
- media_entity_pipeline_stop(fepriv->pipe_start_entity);
- fepriv->pipe_start_entity = NULL;
-#endif
-
if (dvb_powerdown_on_sleep) {
if (fe->ops.set_voltage)
fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF);
@@ -2637,9 +2524,20 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
fepriv->tone = -1;
fepriv->voltage = -1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->enable_source) {
+ ret = fe->dvb->mdev->enable_source(dvbdev->entity,
+ &fepriv->pipe);
+ if (ret) {
+ dev_err(fe->dvb->device,
+ "Tuner is busy. Error %d\n", ret);
+ goto err2;
+ }
+ }
+#endif
ret = dvb_frontend_start (fe);
if (ret)
- goto err2;
+ goto err3;
/* empty event queue */
fepriv->events.eventr = fepriv->events.eventw = 0;
@@ -2649,7 +2547,12 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
mutex_unlock (&adapter->mfe_lock);
return ret;
+err3:
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
err2:
+#endif
dvb_generic_release(inode, file);
err1:
if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl)
@@ -2678,6 +2581,10 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
if (dvbdev->users == -1) {
wake_up(&fepriv->wait_queue);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (fe->dvb->mdev && fe->dvb->mdev->disable_source)
+ fe->dvb->mdev->disable_source(dvbdev->entity);
+#endif
if (fe->exit != DVB_FE_NO_EXIT)
wake_up(&dvbdev->wait_queue);
if (fe->ops.ts_bus_ctrl)
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 1b9732ee0a4f..e1684c570e2f 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -681,7 +681,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
if (demux && ca) {
ret = media_create_pad_link(demux, 1, ca,
0, MEDIA_LNK_FL_ENABLED);
- if (!ret)
+ if (ret)
return -ENOMEM;
}
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 310e4b8beae8..a82f77c49bd5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -73,6 +73,14 @@ config DVB_SI2165
Say Y when you want to support this frontend.
+config DVB_MN88473
+ tristate "Panasonic MN88473"
+ depends on DVB_CORE && I2C
+ select REGMAP_I2C
+ default m if !MEDIA_SUBDRV_AUTOSELECT
+ help
+ Say Y when you want to support this frontend.
+
comment "DVB-S (satellite) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb-frontends/Makefile b/drivers/media/dvb-frontends/Makefile
index 37ef17b5b995..eb7191f4219d 100644
--- a/drivers/media/dvb-frontends/Makefile
+++ b/drivers/media/dvb-frontends/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_DVB_STV0900) += stv0900.o
obj-$(CONFIG_DVB_STV090x) += stv090x.o
obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_M88DS3103) += m88ds3103.o
+obj-$(CONFIG_DVB_MN88473) += mn88473.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_HD29L2) += hd29l2.o
diff --git a/drivers/media/dvb-frontends/au8522.h b/drivers/media/dvb-frontends/au8522.h
index dde61582c158..78bf3f73e58d 100644
--- a/drivers/media/dvb-frontends/au8522.h
+++ b/drivers/media/dvb-frontends/au8522.h
@@ -89,5 +89,4 @@ enum au8522_audio_input {
AU8522_AUDIO_NONE,
AU8522_AUDIO_SIF,
};
-
#endif /* __AU8522_H__ */
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index 73612c5353d1..add246382806 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -763,9 +763,10 @@ static int au8522_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(sd, client, &au8522_ops);
#if defined(CONFIG_MEDIA_CONTROLLER)
- state->pads[AU8522_PAD_INPUT].flags = MEDIA_PAD_FL_SINK;
- state->pads[AU8522_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
- state->pads[AU8522_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+ state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[DEMOD_PAD_AUDIO_OUT].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads),
diff --git a/drivers/media/dvb-frontends/au8522_priv.h b/drivers/media/dvb-frontends/au8522_priv.h
index 404a0cb0ed8d..f5a9438f6ce5 100644
--- a/drivers/media/dvb-frontends/au8522_priv.h
+++ b/drivers/media/dvb-frontends/au8522_priv.h
@@ -30,6 +30,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
#include <linux/i2c.h>
#include "dvb_frontend.h"
#include "au8522.h"
@@ -39,14 +40,6 @@
#define AU8522_DIGITAL_MODE 1
#define AU8522_SUSPEND_MODE 2
-enum au8522_media_pads {
- AU8522_PAD_INPUT,
- AU8522_PAD_VID_OUT,
- AU8522_PAD_VBI_OUT,
-
- AU8522_NUM_PADS
-};
-
struct au8522_state {
struct i2c_client *c;
struct i2c_adapter *i2c;
@@ -78,7 +71,7 @@ struct au8522_state {
struct v4l2_ctrl_handler hdl;
#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_pad pads[AU8522_NUM_PADS];
+ struct media_pad pads[DEMOD_NUM_PADS];
#endif
};
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 6ccbd86c9490..066ee387bf25 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -1507,11 +1507,13 @@ static int cx24120_get_frontend(struct dvb_frontend *fe,
{
struct cx24120_state *state = fe->demodulator_priv;
u8 freq1, freq2, freq3;
+ int status;
dev_dbg(&state->i2c->dev, "\n");
/* don't return empty data if we're not tuned in */
- if ((state->fe_status & FE_HAS_LOCK) == 0)
+ status = cx24120_readreg(state, CX24120_REG_STATUS);
+ if (!(status & CX24120_HAS_LOCK))
return 0;
/* Get frequency */
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 976ee034a430..dc2d41e144fd 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1115,9 +1115,15 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
dib0090_set_bbramp_pwm(state, bb_ramp);
/* activate the ramp generator using PWM control */
- dprintk("ramp RF gain = %d BAND = %s version = %d", state->rf_ramp[0], (state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND", state->identity.version & 0x1f);
-
- if ((state->rf_ramp[0] == 0) || (state->current_band == BAND_CBAND && (state->identity.version & 0x1f) <= P1D_E_F)) {
+ if (state->rf_ramp)
+ dprintk("ramp RF gain = %d BAND = %s version = %d",
+ state->rf_ramp[0],
+ (state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
+ state->identity.version & 0x1f);
+
+ if (rf_ramp && ((state->rf_ramp[0] == 0) ||
+ (state->current_band == BAND_CBAND &&
+ (state->identity.version & 0x1f) <= P1D_E_F))) {
dprintk("DE-Engage mux for direct gain reg control");
en_pwm_rf_mux = 0;
} else
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index bab8c5a980a2..5897977d2d00 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -225,7 +225,7 @@ static u16 to_fw_output_mode(u16 mode)
}
}
-static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute)
+static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
@@ -309,7 +309,7 @@ static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u
#define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)
-static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute)
+static int dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 *buf, u32 len, u16 attribute)
{
u32 chunk_size = 126;
u32 l;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index b28b5787b39a..e48b741d439e 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -4131,7 +4131,7 @@ int drxj_dap_scu_atomic_read_write_block(struct i2c_device_addr *dev_addr, u32 a
{
struct drxjscu_cmd scu_cmd;
int rc;
- u16 set_param_parameters[15];
+ u16 set_param_parameters[18];
u16 cmd_result[15];
/* Parameter check */
@@ -9597,12 +9597,13 @@ ctrl_get_qam_sig_quality(struct drx_demod_instance *demod)
Precision errors still possible.
*/
- e = post_bit_err_rs * 742686;
- m = fec_oc_period * 100;
- if (fec_oc_period == 0)
+ if (!fec_oc_period) {
qam_post_rs_ber = 0xFFFFFFFF;
- else
+ } else {
+ e = post_bit_err_rs * 742686;
+ m = fec_oc_period * 100;
qam_post_rs_ber = e / m;
+ }
/* fill signal quality data structure */
p->pre_bit_count.stat[0].scale = FE_SCALE_COUNTER;
diff --git a/drivers/staging/media/mn88473/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index a222e99935d2..6c5d592161d4 100644
--- a/drivers/staging/media/mn88473/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -29,21 +29,17 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
+ unsigned int uitmp;
u32 if_frequency;
- u64 tmp;
- u8 delivery_system_val, if_val[3], bw_val[7];
+ u8 delivery_system_val, if_val[3], *conf_val_ptr;
+ u8 reg_bank2_2d_val, reg_bank0_d2_val;
dev_dbg(&client->dev,
"delivery_system=%u modulation=%u frequency=%u bandwidth_hz=%u symbol_rate=%u inversion=%d stream_id=%d\n",
- c->delivery_system,
- c->modulation,
- c->frequency,
- c->bandwidth_hz,
- c->symbol_rate,
- c->inversion,
- c->stream_id);
-
- if (!dev->warm) {
+ c->delivery_system, c->modulation, c->frequency,
+ c->bandwidth_hz, c->symbol_rate, c->inversion, c->stream_id);
+
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
@@ -51,30 +47,50 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBT:
delivery_system_val = 0x02;
+ reg_bank2_2d_val = 0x23;
+ reg_bank0_d2_val = 0x2a;
break;
case SYS_DVBT2:
delivery_system_val = 0x03;
+ reg_bank2_2d_val = 0x3b;
+ reg_bank0_d2_val = 0x29;
break;
case SYS_DVBC_ANNEX_A:
delivery_system_val = 0x04;
+ reg_bank2_2d_val = 0x3b;
+ reg_bank0_d2_val = 0x29;
break;
default:
ret = -EINVAL;
goto err;
}
- if (c->bandwidth_hz <= 6000000) {
- memcpy(bw_val, "\xe9\x55\x55\x1c\x29\x1c\x29", 7);
- } else if (c->bandwidth_hz <= 7000000) {
- memcpy(bw_val, "\xc8\x00\x00\x17\x0a\x17\x0a", 7);
- } else if (c->bandwidth_hz <= 8000000) {
- memcpy(bw_val, "\xaf\x00\x00\x11\xec\x11\xec", 7);
- } else {
- ret = -EINVAL;
- goto err;
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ conf_val_ptr = "\xe9\x55\x55\x1c\x29\x1c\x29";
+ break;
+ case 7000000:
+ conf_val_ptr = "\xc8\x00\x00\x17\x0a\x17\x0a";
+ break;
+ case 8000000:
+ conf_val_ptr = "\xaf\x00\x00\x11\xec\x11\xec";
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ conf_val_ptr = "\x10\xab\x0d\xae\x1d\x9d";
+ break;
+ default:
+ break;
}
- /* program tuner */
+ /* Program tuner */
if (fe->ops.tuner_ops.set_params) {
ret = fe->ops.tuner_ops.set_params(fe);
if (ret)
@@ -86,27 +102,45 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
if (ret)
goto err;
- dev_dbg(&client->dev, "get_if_frequency=%d\n", if_frequency);
+ dev_dbg(&client->dev, "get_if_frequency=%u\n", if_frequency);
} else {
- if_frequency = 0;
+ ret = -EINVAL;
+ goto err;
}
- /* Calculate IF registers ( (1<<24)*IF / Xtal ) */
- tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
- dev->xtal);
- if_val[0] = ((tmp >> 16) & 0xff);
- if_val[1] = ((tmp >> 8) & 0xff);
- if_val[2] = ((tmp >> 0) & 0xff);
+ /* Calculate IF registers */
+ uitmp = DIV_ROUND_CLOSEST_ULL((u64) if_frequency * 0x1000000, dev->clk);
+ if_val[0] = (uitmp >> 16) & 0xff;
+ if_val[1] = (uitmp >> 8) & 0xff;
+ if_val[2] = (uitmp >> 0) & 0xff;
ret = regmap_write(dev->regmap[2], 0x05, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xef, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0xf9, 0x13);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x00, 0x18);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x01, 0x01);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x02, 0x21);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x03, delivery_system_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x0b, 0x00);
+ if (ret)
+ goto err;
for (i = 0; i < sizeof(if_val); i++) {
ret = regmap_write(dev->regmap[2], 0x10 + i, if_val[i]);
@@ -114,52 +148,85 @@ static int mn88473_set_frontend(struct dvb_frontend *fe)
goto err;
}
- for (i = 0; i < sizeof(bw_val); i++) {
- ret = regmap_write(dev->regmap[2], 0x13 + i, bw_val[i]);
+ switch (c->delivery_system) {
+ case SYS_DVBT:
+ case SYS_DVBT2:
+ for (i = 0; i < 7; i++) {
+ ret = regmap_write(dev->regmap[2], 0x13 + i,
+ conf_val_ptr[i]);
+ if (ret)
+ goto err;
+ }
+ break;
+ case SYS_DVBC_ANNEX_A:
+ ret = regmap_bulk_write(dev->regmap[1], 0x10, conf_val_ptr, 6);
if (ret)
goto err;
+ break;
+ default:
+ break;
}
- ret = regmap_write(dev->regmap[2], 0x2d, 0x3b);
+ ret = regmap_write(dev->regmap[2], 0x2d, reg_bank2_2d_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x2e, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[2], 0x56, 0x0d);
- ret = regmap_write(dev->regmap[0], 0x01, 0xba);
- ret = regmap_write(dev->regmap[0], 0x02, 0x13);
- ret = regmap_write(dev->regmap[0], 0x03, 0x80);
- ret = regmap_write(dev->regmap[0], 0x04, 0xba);
- ret = regmap_write(dev->regmap[0], 0x05, 0x91);
- ret = regmap_write(dev->regmap[0], 0x07, 0xe7);
- ret = regmap_write(dev->regmap[0], 0x08, 0x28);
+ if (ret)
+ goto err;
+ ret = regmap_bulk_write(dev->regmap[0], 0x01,
+ "\xba\x13\x80\xba\x91\xdd\xe7\x28", 8);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x0a, 0x1a);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x13, 0x1f);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x19, 0x03);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x1d, 0xb0);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x2a, 0x72);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x2d, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x3c, 0x00);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0x3f, 0xf8);
- ret = regmap_write(dev->regmap[0], 0x40, 0xf4);
- ret = regmap_write(dev->regmap[0], 0x41, 0x08);
- ret = regmap_write(dev->regmap[0], 0xd2, 0x29);
+ if (ret)
+ goto err;
+ ret = regmap_bulk_write(dev->regmap[0], 0x40, "\xf4\x08", 2);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[0], 0xd2, reg_bank0_d2_val);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xd4, 0x55);
- ret = regmap_write(dev->regmap[1], 0x10, 0x10);
- ret = regmap_write(dev->regmap[1], 0x11, 0xab);
- ret = regmap_write(dev->regmap[1], 0x12, 0x0d);
- ret = regmap_write(dev->regmap[1], 0x13, 0xae);
- ret = regmap_write(dev->regmap[1], 0x14, 0x1d);
- ret = regmap_write(dev->regmap[1], 0x15, 0x9d);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[1], 0xbe, 0x08);
- ret = regmap_write(dev->regmap[2], 0x09, 0x08);
- ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xb2, 0x37);
+ if (ret)
+ goto err;
ret = regmap_write(dev->regmap[0], 0xd7, 0x04);
- ret = regmap_write(dev->regmap[2], 0x32, 0x80);
- ret = regmap_write(dev->regmap[2], 0x36, 0x00);
- ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
if (ret)
goto err;
- dev->delivery_system = c->delivery_system;
+ /* Reset FSM */
+ ret = regmap_write(dev->regmap[2], 0xf8, 0x9f);
+ if (ret)
+ goto err;
return 0;
err:
@@ -173,51 +240,61 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
struct mn88473_dev *dev = i2c_get_clientdata(client);
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
- unsigned int utmp;
- int lock = 0;
-
- *status = 0;
+ unsigned int uitmp;
- if (!dev->warm) {
+ if (!dev->active) {
ret = -EAGAIN;
goto err;
}
+ *status = 0;
+
switch (c->delivery_system) {
case SYS_DVBT:
- ret = regmap_read(dev->regmap[0], 0x62, &utmp);
+ ret = regmap_read(dev->regmap[0], 0x62, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0xA0)) {
- if ((utmp & 0xF) >= 0x03)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x09)
- lock = 1;
+
+ if (!(uitmp & 0xa0)) {
+ if ((uitmp & 0x0f) >= 0x09)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ else if ((uitmp & 0x0f) >= 0x03)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
}
break;
case SYS_DVBT2:
- ret = regmap_read(dev->regmap[2], 0x8B, &utmp);
+ ret = regmap_read(dev->regmap[2], 0x8b, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0x40)) {
- if ((utmp & 0xF) >= 0x07)
- *status |= FE_HAS_SIGNAL;
- if ((utmp & 0xF) >= 0x0a)
- *status |= FE_HAS_CARRIER;
- if ((utmp & 0xF) >= 0x0d)
- *status |= FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+
+ if (!(uitmp & 0x40)) {
+ if ((uitmp & 0x0f) >= 0x0d)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
+ else if ((uitmp & 0x0f) >= 0x0a)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+ else if ((uitmp & 0x0f) >= 0x07)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
}
break;
case SYS_DVBC_ANNEX_A:
- ret = regmap_read(dev->regmap[1], 0x85, &utmp);
+ ret = regmap_read(dev->regmap[1], 0x85, &uitmp);
if (ret)
goto err;
- if (!(utmp & 0x40)) {
- ret = regmap_read(dev->regmap[1], 0x89, &utmp);
+
+ if (!(uitmp & 0x40)) {
+ ret = regmap_read(dev->regmap[1], 0x89, &uitmp);
if (ret)
goto err;
- if (utmp & 0x01)
- lock = 1;
+
+ if (uitmp & 0x01)
+ *status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC |
+ FE_HAS_LOCK;
}
break;
default:
@@ -225,10 +302,6 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
goto err;
}
- if (lock)
- *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
- FE_HAS_SYNC | FE_HAS_LOCK;
-
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -239,85 +312,76 @@ static int mn88473_init(struct dvb_frontend *fe)
{
struct i2c_client *client = fe->demodulator_priv;
struct mn88473_dev *dev = i2c_get_clientdata(client);
- int ret, len, remaining;
- const struct firmware *fw = NULL;
- u8 *fw_file = MN88473_FIRMWARE;
- unsigned int tmp;
+ int ret, len, remain;
+ unsigned int uitmp;
+ const struct firmware *fw;
+ const char *name = MN88473_FIRMWARE;
dev_dbg(&client->dev, "\n");
- /* set cold state by default */
- dev->warm = false;
-
- /* check if firmware is already running */
- ret = regmap_read(dev->regmap[0], 0xf5, &tmp);
+ /* Check if firmware is already running */
+ ret = regmap_read(dev->regmap[0], 0xf5, &uitmp);
if (ret)
goto err;
- if (!(tmp & 0x1)) {
- dev_info(&client->dev, "firmware already running\n");
- dev->warm = true;
- return 0;
- }
+ if (!(uitmp & 0x01))
+ goto warm;
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, &client->dev);
+ /* Request the firmware, this will block and timeout */
+ ret = request_firmware(&fw, name, &client->dev);
if (ret) {
- dev_err(&client->dev, "firmare file '%s' not found\n", fw_file);
- goto err_request_firmware;
+ dev_err(&client->dev, "firmare file '%s' not found\n", name);
+ goto err;
}
- dev_info(&client->dev, "downloading firmware from file '%s'\n",
- fw_file);
+ dev_info(&client->dev, "downloading firmware from file '%s'\n", name);
ret = regmap_write(dev->regmap[0], 0xf5, 0x03);
if (ret)
- goto err;
-
- for (remaining = fw->size; remaining > 0;
- remaining -= (dev->i2c_wr_max - 1)) {
- len = remaining;
- if (len > (dev->i2c_wr_max - 1))
- len = dev->i2c_wr_max - 1;
+ goto err_release_firmware;
+ for (remain = fw->size; remain > 0; remain -= (dev->i2c_wr_max - 1)) {
+ len = min(dev->i2c_wr_max - 1, remain);
ret = regmap_bulk_write(dev->regmap[0], 0xf6,
- &fw->data[fw->size - remaining], len);
+ &fw->data[fw->size - remain], len);
if (ret) {
- dev_err(&client->dev, "firmware download failed=%d\n",
+ dev_err(&client->dev, "firmware download failed %d\n",
ret);
- goto err;
+ goto err_release_firmware;
}
}
- /* parity check of firmware */
- ret = regmap_read(dev->regmap[0], 0xf8, &tmp);
- if (ret) {
- dev_err(&client->dev,
- "parity reg read failed=%d\n", ret);
+ release_firmware(fw);
+
+ /* Parity check of firmware */
+ ret = regmap_read(dev->regmap[0], 0xf8, &uitmp);
+ if (ret)
goto err;
- }
- if (tmp & 0x10) {
- dev_err(&client->dev,
- "firmware parity check failed=0x%x\n", tmp);
+
+ if (uitmp & 0x10) {
+ dev_err(&client->dev, "firmware parity check failed\n");
+ ret = -EINVAL;
goto err;
}
- dev_err(&client->dev, "firmware parity check succeeded=0x%x\n", tmp);
ret = regmap_write(dev->regmap[0], 0xf5, 0x00);
if (ret)
goto err;
+warm:
+ /* TS config */
+ ret = regmap_write(dev->regmap[2], 0x09, 0x08);
+ if (ret)
+ goto err;
+ ret = regmap_write(dev->regmap[2], 0x08, 0x1d);
+ if (ret)
+ goto err;
- release_firmware(fw);
- fw = NULL;
-
- /* warm state */
- dev->warm = true;
+ dev->active = true;
return 0;
-
-err:
+err_release_firmware:
release_firmware(fw);
-err_request_firmware:
+err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
@@ -330,20 +394,20 @@ static int mn88473_sleep(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
+ dev->active = false;
+
ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
if (ret)
goto err;
- dev->delivery_system = SYS_UNDEFINED;
-
return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static struct dvb_frontend_ops mn88473_ops = {
- .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_AC},
+static const struct dvb_frontend_ops mn88473_ops = {
+ .delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
.info = {
.name = "Panasonic MN88473",
.symbol_rate_min = 1000000,
@@ -365,8 +429,7 @@ static struct dvb_frontend_ops mn88473_ops = {
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_HIERARCHY_AUTO |
FE_CAN_MUTE_TS |
- FE_CAN_2G_MODULATION |
- FE_CAN_MULTISTREAM
+ FE_CAN_2G_MODULATION
},
.get_tune_settings = mn88473_get_tune_settings,
@@ -385,7 +448,7 @@ static int mn88473_probe(struct i2c_client *client,
struct mn88473_config *config = client->dev.platform_data;
struct mn88473_dev *dev;
int ret;
- unsigned int utmp;
+ unsigned int uitmp;
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -393,7 +456,7 @@ static int mn88473_probe(struct i2c_client *client,
dev_dbg(&client->dev, "\n");
- /* Caller really need to provide pointer for frontend we create. */
+ /* Caller really need to provide pointer for frontend we create */
if (config->fe == NULL) {
dev_err(&client->dev, "frontend pointer not defined\n");
ret = -EINVAL;
@@ -406,11 +469,15 @@ static int mn88473_probe(struct i2c_client *client,
goto err;
}
- dev->i2c_wr_max = config->i2c_wr_max;
- if (!config->xtal)
- dev->xtal = 25000000;
+ if (config->i2c_wr_max)
+ dev->i2c_wr_max = config->i2c_wr_max;
else
- dev->xtal = config->xtal;
+ dev->i2c_wr_max = ~0;
+
+ if (config->xtal)
+ dev->clk = config->xtal;
+ else
+ dev->clk = 25000000;
dev->client[0] = client;
dev->regmap[0] = regmap_init_i2c(dev->client[0], &regmap_config);
if (IS_ERR(dev->regmap[0])) {
@@ -418,15 +485,25 @@ static int mn88473_probe(struct i2c_client *client,
goto err_kfree;
}
- /* check demod answers to I2C */
- ret = regmap_read(dev->regmap[0], 0x00, &utmp);
+ /* Check demod answers with correct chip id */
+ ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
if (ret)
goto err_regmap_0_regmap_exit;
+ dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
+
+ if (uitmp != 0x03) {
+ ret = -ENODEV;
+ goto err_regmap_0_regmap_exit;
+ }
+
/*
- * Chip has three I2C addresses for different register pages. Used
+ * Chip has three I2C addresses for different register banks. Used
* addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
- * 0x1a and 0x1c, in order to get own I2C client for each register page.
+ * 0x1a and 0x1c, in order to get own I2C client for each register bank.
+ *
+ * Also, register bank 2 do not support sequential I/O. Only single
+ * register write or read is allowed to that bank.
*/
dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
if (dev->client[1] == NULL) {
@@ -456,13 +533,19 @@ static int mn88473_probe(struct i2c_client *client,
}
i2c_set_clientdata(dev->client[2], dev);
- /* create dvb_frontend */
- memcpy(&dev->fe.ops, &mn88473_ops, sizeof(struct dvb_frontend_ops));
- dev->fe.demodulator_priv = client;
- *config->fe = &dev->fe;
+ /* Sleep because chip is active by default */
+ ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
+ if (ret)
+ goto err_client_2_i2c_unregister_device;
+
+ /* Create dvb frontend */
+ memcpy(&dev->frontend.ops, &mn88473_ops, sizeof(dev->frontend.ops));
+ dev->frontend.demodulator_priv = client;
+ *config->fe = &dev->frontend;
i2c_set_clientdata(client, dev);
- dev_info(&dev->client[0]->dev, "Panasonic MN88473 successfully attached\n");
+ dev_info(&client->dev, "Panasonic MN88473 successfully identified\n");
+
return 0;
err_client_2_i2c_unregister_device:
@@ -507,7 +590,8 @@ MODULE_DEVICE_TABLE(i2c, mn88473_id_table);
static struct i2c_driver mn88473_driver = {
.driver = {
- .name = "mn88473",
+ .name = "mn88473",
+ .suppress_bind_attrs = true,
},
.probe = mn88473_probe,
.remove = mn88473_remove,
diff --git a/drivers/media/dvb-frontends/mn88473.h b/drivers/media/dvb-frontends/mn88473.h
index c717ebed0e03..2aa5181f3033 100644
--- a/drivers/media/dvb-frontends/mn88473.h
+++ b/drivers/media/dvb-frontends/mn88473.h
@@ -22,10 +22,16 @@
struct mn88473_config {
/*
* Max num of bytes given I2C adapter could write at once.
- * Default: none
+ * Default: unlimited
*/
u16 i2c_wr_max;
+ /*
+ * Xtal frequency Hz.
+ * Default: 25000000
+ */
+ u32 xtal;
+
/* Everything after that is returned by the driver. */
@@ -33,12 +39,6 @@ struct mn88473_config {
* DVB frontend.
*/
struct dvb_frontend **fe;
-
- /*
- * Xtal frequency.
- * Hz
- */
- u32 xtal;
};
#endif
diff --git a/drivers/staging/media/mn88473/mn88473_priv.h b/drivers/media/dvb-frontends/mn88473_priv.h
index 54beb4241ccf..e6c65893e451 100644
--- a/drivers/staging/media/mn88473/mn88473_priv.h
+++ b/drivers/media/dvb-frontends/mn88473_priv.h
@@ -27,11 +27,10 @@
struct mn88473_dev {
struct i2c_client *client[3];
struct regmap *regmap[3];
- struct dvb_frontend fe;
+ struct dvb_frontend frontend;
u16 i2c_wr_max;
- enum fe_delivery_system delivery_system;
- bool warm; /* FW running */
- u32 xtal;
+ bool active;
+ u32 clk;
};
#endif
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index c2469fb42f12..7c96f7679669 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -347,6 +347,10 @@ static int rtl2832_init(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+
for (i = 0; i < ARRAY_SIZE(rtl2832_initial_regs); i++) {
ret = rtl2832_wr_demod_reg(dev, rtl2832_initial_regs[i].reg,
rtl2832_initial_regs[i].value);
@@ -404,8 +408,6 @@ static int rtl2832_init(struct dvb_frontend *fe)
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->post_bit_count.len = 1;
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
- /* start statistics polling */
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
dev->sleeping = false;
return 0;
@@ -423,8 +425,6 @@ static int rtl2832_sleep(struct dvb_frontend *fe)
dev_dbg(&client->dev, "\n");
dev->sleeping = true;
- /* stop statistics polling */
- cancel_delayed_work_sync(&dev->stat_work);
dev->fe_status = 0;
ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
@@ -491,11 +491,6 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
- /* PIP mode related */
- ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
- if (ret)
- goto err;
-
/* If the frontend has get_if_frequency(), use it */
if (fe->ops.tuner_ops.get_if_frequency) {
u32 if_freq;
@@ -692,8 +687,11 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct rtl2832_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
u32 uninitialized_var(tmp);
+ u8 u8tmp, buf[2];
+ u16 u16tmp;
dev_dbg(&client->dev, "\n");
@@ -714,45 +712,6 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
}
dev->fe_status = *status;
- return 0;
-err:
- dev_dbg(&client->dev, "failed=%d\n", ret);
- return ret;
-}
-
-static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
-{
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-
- /* report SNR in resolution of 0.1 dB */
- if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
- *snr = div_s64(c->cnr.stat[0].svalue, 100);
- else
- *snr = 0;
-
- return 0;
-}
-
-static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
-{
- struct rtl2832_dev *dev = fe->demodulator_priv;
-
- *ber = (dev->post_bit_error - dev->post_bit_error_prev);
- dev->post_bit_error_prev = dev->post_bit_error;
-
- return 0;
-}
-
-static void rtl2832_stat_work(struct work_struct *work)
-{
- struct rtl2832_dev *dev = container_of(work, struct rtl2832_dev, stat_work.work);
- struct i2c_client *client = dev->client;
- struct dtv_frontend_properties *c = &dev->fe.dtv_property_cache;
- int ret, tmp;
- u8 u8tmp, buf[2];
- u16 u16tmp;
-
- dev_dbg(&client->dev, "\n");
/* signal strength */
if (dev->fe_status & FE_HAS_SIGNAL) {
@@ -789,11 +748,11 @@ static void rtl2832_stat_work(struct work_struct *work)
constellation = (u8tmp >> 2) & 0x03; /* [3:2] */
if (constellation > CONSTELLATION_NUM - 1)
- goto err_schedule_delayed_work;
+ goto err;
hierarchy = (u8tmp >> 4) & 0x07; /* [6:4] */
if (hierarchy > HIERARCHY_NUM - 1)
- goto err_schedule_delayed_work;
+ goto err;
ret = rtl2832_bulk_read(client, 0x40c, buf, 2);
if (ret)
@@ -835,11 +794,33 @@ static void rtl2832_stat_work(struct work_struct *work)
c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
-err_schedule_delayed_work:
- schedule_delayed_work(&dev->stat_work, msecs_to_jiffies(2000));
- return;
+ return 0;
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
+ return ret;
+}
+
+static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+
+ /* report SNR in resolution of 0.1 dB */
+ if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
+ *snr = div_s64(c->cnr.stat[0].svalue, 100);
+ else
+ *snr = 0;
+
+ return 0;
+}
+
+static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ struct rtl2832_dev *dev = fe->demodulator_priv;
+
+ *ber = (dev->post_bit_error - dev->post_bit_error_prev);
+ dev->post_bit_error_prev = dev->post_bit_error;
+
+ return 0;
}
/*
@@ -1081,37 +1062,46 @@ static struct i2c_adapter *rtl2832_get_i2c_adapter(struct i2c_client *client)
return dev->i2c_adapter_tuner;
}
-static int rtl2832_enable_slave_ts(struct i2c_client *client)
+static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
{
struct rtl2832_dev *dev = i2c_get_clientdata(client);
int ret;
- dev_dbg(&client->dev, "\n");
-
- ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
- if (ret)
- goto err;
-
- ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
- if (ret)
- goto err;
-
- ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
- if (ret)
- goto err;
-
- ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
- if (ret)
- goto err;
-
- /* soft reset */
- ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
- if (ret)
- goto err;
+ dev_dbg(&client->dev, "enable=%d\n", enable);
- ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
- if (ret)
- goto err;
+ if (enable) {
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
+ if (ret)
+ goto err;
+ } else {
+ ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x0bc, "\x08", 1);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x0);
+ if (ret)
+ goto err;
+ ret = rtl2832_bulk_write(client, 0x10c, "\x00\x00", 2);
+ if (ret)
+ goto err;
+ ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
+ if (ret)
+ goto err;
+ }
return 0;
err:
@@ -1227,7 +1217,6 @@ static int rtl2832_probe(struct i2c_client *client,
dev->pdata = client->dev.platform_data;
dev->sleeping = true;
INIT_DELAYED_WORK(&dev->i2c_gate_work, rtl2832_i2c_gate_work);
- INIT_DELAYED_WORK(&dev->stat_work, rtl2832_stat_work);
/* create regmap */
mutex_init(&dev->regmap_mutex);
dev->regmap_config.reg_bits = 8,
@@ -1267,7 +1256,7 @@ static int rtl2832_probe(struct i2c_client *client,
/* setup callbacks */
pdata->get_dvb_frontend = rtl2832_get_dvb_frontend;
pdata->get_i2c_adapter = rtl2832_get_i2c_adapter;
- pdata->enable_slave_ts = rtl2832_enable_slave_ts;
+ pdata->slave_ts_ctrl = rtl2832_slave_ts_ctrl;
pdata->pid_filter = rtl2832_pid_filter;
pdata->pid_filter_ctrl = rtl2832_pid_filter_ctrl;
pdata->bulk_read = rtl2832_bulk_read;
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index c29a4c2bf71a..6390af64cf45 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -31,7 +31,7 @@
* @tuner: Used tuner model.
* @get_dvb_frontend: Get DVB frontend.
* @get_i2c_adapter: Get I2C adapter.
- * @enable_slave_ts: Enable slave TS IF.
+ * @slave_ts_ctrl: Control slave TS interface.
* @pid_filter: Set PID to PID filter.
* @pid_filter_ctrl: Control PID filter.
*/
@@ -53,7 +53,7 @@ struct rtl2832_platform_data {
struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
struct i2c_adapter* (*get_i2c_adapter)(struct i2c_client *);
- int (*enable_slave_ts)(struct i2c_client *);
+ int (*slave_ts_ctrl)(struct i2c_client *, bool);
int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
int (*pid_filter_ctrl)(struct dvb_frontend *, int);
/* private: Register access for SDR module use only */
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index 5dcd3a41d23f..6b875f462f8b 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -38,7 +38,6 @@ struct rtl2832_dev {
struct regmap *regmap;
struct i2c_adapter *i2c_adapter_tuner;
struct dvb_frontend fe;
- struct delayed_work stat_work;
enum fe_status fe_status;
u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
u64 post_bit_error;
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index 28239b1fd954..f667005a6661 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1087,7 +1087,7 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
s32 pilot, u8 chip_id)
{
u8 aclc_value = 0x29;
- s32 i;
+ s32 i, cllas2_size;
const struct stv0900_car_loop_optim *cls2, *cllqs2, *cllas2;
dprintk("%s\n", __func__);
@@ -1096,14 +1096,17 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
cls2 = FE_STV0900_S2CarLoop;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut30;
cllas2 = FE_STV0900_S2APSKCarLoopCut30;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut30);
} else if (chip_id == 0x20) {
cls2 = FE_STV0900_S2CarLoopCut20;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut20;
cllas2 = FE_STV0900_S2APSKCarLoopCut20;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut20);
} else {
cls2 = FE_STV0900_S2CarLoopCut30;
cllqs2 = FE_STV0900_S2LowQPCarLoopCut30;
cllas2 = FE_STV0900_S2APSKCarLoopCut30;
+ cllas2_size = ARRAY_SIZE(FE_STV0900_S2APSKCarLoopCut30);
}
if (modcode < STV0900_QPSK_12) {
@@ -1178,7 +1181,7 @@ u8 stv0900_get_optim_carr_loop(s32 srate, enum fe_stv0900_modcode modcode,
aclc_value = cls2[i].car_loop_pilots_off_30;
}
- } else {
+ } else if (i < cllas2_size) {
if (srate <= 3000000)
aclc_value = cllas2[i].car_loop_pilots_on_2;
else if (srate <= 7000000)
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index befb07df036d..41a1bfc5eaa7 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -2095,7 +2095,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
rep_write(sd, 0x76, spa_loc & 0xff);
rep_write_clr_set(sd, 0x77, 0x40, (spa_loc & 0x100) >> 2);
} else {
- /* FIXME: Where is the SPA location LSB register ? */
+ /* ADV7612 Software Manual Rev. A, p. 15 */
+ rep_write(sd, 0x70, spa_loc & 0xff);
rep_write_clr_set(sd, 0x71, 0x01, (spa_loc & 0x100) >> 8);
}
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index da7469bc6e56..972e0d47259d 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1198,21 +1198,21 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
#ifdef CONFIG_VIDEO_ADV_DEBUG
static void tc358743_print_register_map(struct v4l2_subdev *sd)
{
- v4l2_info(sd, "0x0000–0x00FF: Global Control Register\n");
- v4l2_info(sd, "0x0100–0x01FF: CSI2-TX PHY Register\n");
- v4l2_info(sd, "0x0200–0x03FF: CSI2-TX PPI Register\n");
- v4l2_info(sd, "0x0400–0x05FF: Reserved\n");
- v4l2_info(sd, "0x0600–0x06FF: CEC Register\n");
- v4l2_info(sd, "0x0700–0x84FF: Reserved\n");
- v4l2_info(sd, "0x8500–0x85FF: HDMIRX System Control Register\n");
- v4l2_info(sd, "0x8600–0x86FF: HDMIRX Audio Control Register\n");
- v4l2_info(sd, "0x8700–0x87FF: HDMIRX InfoFrame packet data Register\n");
- v4l2_info(sd, "0x8800–0x88FF: HDMIRX HDCP Port Register\n");
- v4l2_info(sd, "0x8900–0x89FF: HDMIRX Video Output Port & 3D Register\n");
- v4l2_info(sd, "0x8A00–0x8BFF: Reserved\n");
- v4l2_info(sd, "0x8C00–0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
- v4l2_info(sd, "0x9000–0x90FF: HDMIRX GBD Extraction Control\n");
- v4l2_info(sd, "0x9100–0x92FF: HDMIRX GBD RAM read\n");
+ v4l2_info(sd, "0x0000-0x00FF: Global Control Register\n");
+ v4l2_info(sd, "0x0100-0x01FF: CSI2-TX PHY Register\n");
+ v4l2_info(sd, "0x0200-0x03FF: CSI2-TX PPI Register\n");
+ v4l2_info(sd, "0x0400-0x05FF: Reserved\n");
+ v4l2_info(sd, "0x0600-0x06FF: CEC Register\n");
+ v4l2_info(sd, "0x0700-0x84FF: Reserved\n");
+ v4l2_info(sd, "0x8500-0x85FF: HDMIRX System Control Register\n");
+ v4l2_info(sd, "0x8600-0x86FF: HDMIRX Audio Control Register\n");
+ v4l2_info(sd, "0x8700-0x87FF: HDMIRX InfoFrame packet data Register\n");
+ v4l2_info(sd, "0x8800-0x88FF: HDMIRX HDCP Port Register\n");
+ v4l2_info(sd, "0x8900-0x89FF: HDMIRX Video Output Port & 3D Register\n");
+ v4l2_info(sd, "0x8A00-0x8BFF: Reserved\n");
+ v4l2_info(sd, "0x8C00-0x8FFF: HDMIRX EDID-RAM (1024bytes)\n");
+ v4l2_info(sd, "0x9000-0x90FF: HDMIRX GBD Extraction Control\n");
+ v4l2_info(sd, "0x9100-0x92FF: HDMIRX GBD RAM read\n");
v4l2_info(sd, "0x9300- : Reserved\n");
}
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index ef393f5daf2a..ff18444e19e4 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1386,7 +1386,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
goto err_connector;
}
- if (input_type > TVP5150_INPUT_NUM) {
+ if (input_type >= TVP5150_INPUT_NUM) {
ret = -EINVAL;
goto err_connector;
}
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 711c3674a5d9..6e43c95629ea 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -30,6 +30,8 @@
#include <linux/media.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
#include <media/media-device.h>
#include <media/media-devnode.h>
@@ -41,6 +43,11 @@
* Userspace API
*/
+static inline void __user *media_get_uptr(__u64 arg)
+{
+ return (void __user *)(uintptr_t)arg;
+}
+
static int media_device_open(struct file *filp)
{
return 0;
@@ -268,10 +275,10 @@ static long __media_device_get_topology(struct media_device *mdev,
struct media_interface *intf;
struct media_pad *pad;
struct media_link *link;
- struct media_v2_entity kentity, *uentity;
- struct media_v2_interface kintf, *uintf;
- struct media_v2_pad kpad, *upad;
- struct media_v2_link klink, *ulink;
+ struct media_v2_entity kentity, __user *uentity;
+ struct media_v2_interface kintf, __user *uintf;
+ struct media_v2_pad kpad, __user *upad;
+ struct media_v2_link klink, __user *ulink;
unsigned int i;
int ret = 0;
@@ -563,6 +570,7 @@ static void media_device_release(struct media_devnode *mdev)
int __must_check media_device_register_entity(struct media_device *mdev,
struct media_entity *entity)
{
+ struct media_entity_notify *notify, *next;
unsigned int i;
int ret;
@@ -602,8 +610,33 @@ int __must_check media_device_register_entity(struct media_device *mdev,
media_gobj_create(mdev, MEDIA_GRAPH_PAD,
&entity->pads[i].graph_obj);
+ /* invoke entity_notify callbacks */
+ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) {
+ (notify)->notify(entity, notify->notify_data);
+ }
+
spin_unlock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
+ if (mdev->entity_internal_idx_max
+ >= mdev->pm_count_walk.ent_enum.idx_max) {
+ struct media_entity_graph new = { .top = 0 };
+
+ /*
+ * Initialise the new graph walk before cleaning up
+ * the old one in order not to spoil the graph walk
+ * object of the media device if graph walk init fails.
+ */
+ ret = media_entity_graph_walk_init(&new, mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+ media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
+ mdev->pm_count_walk = new;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register_entity);
@@ -635,6 +668,8 @@ static void __media_device_unregister_entity(struct media_entity *entity)
/* Remove the entity */
media_gobj_destroy(&entity->graph_obj);
+ /* invoke entity_notify callbacks to handle entity removal?? */
+
entity->graph_obj.mdev = NULL;
}
@@ -667,6 +702,7 @@ void media_device_init(struct media_device *mdev)
INIT_LIST_HEAD(&mdev->interfaces);
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
+ INIT_LIST_HEAD(&mdev->entity_notify);
spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
@@ -679,6 +715,7 @@ void media_device_cleanup(struct media_device *mdev)
{
ida_destroy(&mdev->entity_internal_idx);
mdev->entity_internal_idx_max = 0;
+ media_entity_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
@@ -712,11 +749,40 @@ int __must_check __media_device_register(struct media_device *mdev,
}
EXPORT_SYMBOL_GPL(__media_device_register);
+int __must_check media_device_register_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ spin_lock(&mdev->lock);
+ list_add_tail(&nptr->list, &mdev->entity_notify);
+ spin_unlock(&mdev->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
+
+/*
+ * Note: Should be called with mdev->lock held.
+ */
+static void __media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ list_del(&nptr->list);
+}
+
+void media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ spin_lock(&mdev->lock);
+ __media_device_unregister_entity_notify(mdev, nptr);
+ spin_unlock(&mdev->lock);
+}
+EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
+
void media_device_unregister(struct media_device *mdev)
{
struct media_entity *entity;
struct media_entity *next;
struct media_interface *intf, *tmp_intf;
+ struct media_entity_notify *notify, *nextp;
if (mdev == NULL)
return;
@@ -733,6 +799,10 @@ void media_device_unregister(struct media_device *mdev)
list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
__media_device_unregister_entity(entity);
+ /* Remove all entity_notify callbacks from the media device */
+ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list)
+ __media_device_unregister_entity_notify(mdev, notify);
+
/* Remove all interfaces from the media device */
list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
graph_obj.list) {
@@ -776,4 +846,58 @@ struct media_device *media_device_find_devres(struct device *dev)
}
EXPORT_SYMBOL_GPL(media_device_find_devres);
+void media_device_pci_init(struct media_device *mdev,
+ struct pci_dev *pci_dev,
+ const char *name)
+{
+#ifdef CONFIG_PCI
+ mdev->dev = &pci_dev->dev;
+
+ if (name)
+ strlcpy(mdev->model, name, sizeof(mdev->model));
+ else
+ strlcpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
+
+ sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
+
+ mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
+ | pci_dev->subsystem_device;
+
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ media_device_init(mdev);
+#endif
+}
+EXPORT_SYMBOL_GPL(media_device_pci_init);
+
+void __media_device_usb_init(struct media_device *mdev,
+ struct usb_device *udev,
+ const char *board_name,
+ const char *driver_name)
+{
+#ifdef CONFIG_USB
+ mdev->dev = &udev->dev;
+
+ if (driver_name)
+ strlcpy(mdev->driver_name, driver_name,
+ sizeof(mdev->driver_name));
+
+ if (board_name)
+ strlcpy(mdev->model, board_name, sizeof(mdev->model));
+ else if (udev->product)
+ strlcpy(mdev->model, udev->product, sizeof(mdev->model));
+ else
+ strlcpy(mdev->model, "unknown model", sizeof(mdev->model));
+ if (udev->serial)
+ strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+ mdev->driver_version = LINUX_VERSION_CODE;
+
+ media_device_init(mdev);
+#endif
+}
+EXPORT_SYMBOL_GPL(__media_device_usb_init);
+
+
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index f2e43603d6d2..e95070b3a3d4 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -46,25 +46,41 @@ static inline const char *intf_type(struct media_interface *intf)
{
switch (intf->type) {
case MEDIA_INTF_T_DVB_FE:
- return "frontend";
+ return "dvb-frontend";
case MEDIA_INTF_T_DVB_DEMUX:
- return "demux";
+ return "dvb-demux";
case MEDIA_INTF_T_DVB_DVR:
- return "DVR";
+ return "dvb-dvr";
case MEDIA_INTF_T_DVB_CA:
- return "CA";
+ return "dvb-ca";
case MEDIA_INTF_T_DVB_NET:
- return "dvbnet";
+ return "dvb-net";
case MEDIA_INTF_T_V4L_VIDEO:
- return "video";
+ return "v4l-video";
case MEDIA_INTF_T_V4L_VBI:
- return "vbi";
+ return "v4l-vbi";
case MEDIA_INTF_T_V4L_RADIO:
- return "radio";
+ return "v4l-radio";
case MEDIA_INTF_T_V4L_SUBDEV:
- return "v4l2-subdev";
+ return "v4l-subdev";
case MEDIA_INTF_T_V4L_SWRADIO:
- return "swradio";
+ return "v4l-swradio";
+ case MEDIA_INTF_T_ALSA_PCM_CAPTURE:
+ return "alsa-pcm-capture";
+ case MEDIA_INTF_T_ALSA_PCM_PLAYBACK:
+ return "alsa-pcm-playback";
+ case MEDIA_INTF_T_ALSA_CONTROL:
+ return "alsa-control";
+ case MEDIA_INTF_T_ALSA_COMPRESS:
+ return "alsa-compress";
+ case MEDIA_INTF_T_ALSA_RAWMIDI:
+ return "alsa-rawmidi";
+ case MEDIA_INTF_T_ALSA_HWDEP:
+ return "alsa-hwdep";
+ case MEDIA_INTF_T_ALSA_SEQUENCER:
+ return "alsa-sequencer";
+ case MEDIA_INTF_T_ALSA_TIMER:
+ return "alsa-timer";
default:
return "unknown-intf";
}
@@ -73,8 +89,9 @@ static inline const char *intf_type(struct media_interface *intf)
__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum,
int idx_max)
{
- ent_enum->bmap = kcalloc(DIV_ROUND_UP(idx_max, BITS_PER_LONG),
- sizeof(long), GFP_KERNEL);
+ idx_max = ALIGN(idx_max, BITS_PER_LONG);
+ ent_enum->bmap = kcalloc(idx_max / BITS_PER_LONG, sizeof(long),
+ GFP_KERNEL);
if (!ent_enum->bmap)
return -ENOMEM;
@@ -349,8 +366,8 @@ EXPORT_SYMBOL_GPL(media_entity_graph_walk_next);
* Pipeline management
*/
-__must_check int media_entity_pipeline_start(struct media_entity *entity,
- struct media_pipeline *pipe)
+__must_check int __media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
{
struct media_device *mdev = entity->graph_obj.mdev;
struct media_entity_graph *graph = &pipe->graph;
@@ -358,8 +375,6 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
struct media_link *link;
int ret;
- mutex_lock(&mdev->graph_mutex);
-
if (!pipe->streaming_count++) {
ret = media_entity_graph_walk_init(&pipe->graph, mdev);
if (ret)
@@ -440,8 +455,6 @@ __must_check int media_entity_pipeline_start(struct media_entity *entity,
}
}
- mutex_unlock(&mdev->graph_mutex);
-
return 0;
error:
@@ -471,19 +484,28 @@ error_graph_walk_start:
if (!--pipe->streaming_count)
media_entity_graph_walk_cleanup(graph);
- mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__media_entity_pipeline_start);
+__must_check int media_entity_pipeline_start(struct media_entity *entity,
+ struct media_pipeline *pipe)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+ ret = __media_entity_pipeline_start(entity, pipe);
+ mutex_unlock(&mdev->graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_start);
-void media_entity_pipeline_stop(struct media_entity *entity)
+void __media_entity_pipeline_stop(struct media_entity *entity)
{
- struct media_device *mdev = entity->graph_obj.mdev;
struct media_entity_graph *graph = &entity->pipe->graph;
struct media_pipeline *pipe = entity->pipe;
- mutex_lock(&mdev->graph_mutex);
WARN_ON(!pipe->streaming_count);
media_entity_graph_walk_start(graph, entity);
@@ -500,6 +522,15 @@ void media_entity_pipeline_stop(struct media_entity *entity)
if (!--pipe->streaming_count)
media_entity_graph_walk_cleanup(graph);
+}
+EXPORT_SYMBOL_GPL(__media_entity_pipeline_stop);
+
+void media_entity_pipeline_stop(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_entity_pipeline_stop(entity);
mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_entity_pipeline_stop);
@@ -789,6 +820,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
return ret;
}
+EXPORT_SYMBOL_GPL(__media_entity_setup_link);
int media_entity_setup_link(struct media_link *link, u32 flags)
{
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 2c412377507b..df54e17ef864 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2321,6 +2321,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
return 0;
}
+static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
+ unsigned int *width_mask,
+ unsigned int *width_bias)
+{
+ if (fmt->flags & FORMAT_FLAGS_PLANAR) {
+ *width_mask = ~15; /* width must be a multiple of 16 pixels */
+ *width_bias = 8; /* nearest */
+ } else {
+ *width_mask = ~3; /* width must be a multiple of 4 pixels */
+ *width_bias = 2; /* nearest */
+ }
+}
+
static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -2330,6 +2343,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
enum v4l2_field field;
__s32 width, height;
__s32 height2;
+ unsigned int width_mask, width_bias;
int rc;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
@@ -2362,9 +2376,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
width = f->fmt.pix.width;
height = f->fmt.pix.height;
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
rc = limit_scaled_size_lock(fh, &width, &height, field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 0);
if (0 != rc)
@@ -2397,6 +2411,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
__s32 width, height;
+ unsigned int width_mask, width_bias;
enum v4l2_field field;
retval = bttv_switch_type(fh, f->type);
@@ -2411,9 +2426,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
field = f->fmt.pix.field;
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- /* width_mask: 4 pixels */ ~3,
- /* width_bias: nearest */ 2,
+ width_mask, width_bias,
/* adjust_size */ 1,
/* adjust_crop */ 1);
if (0 != retval)
@@ -2421,8 +2437,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = field;
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-
/* update our state informations */
fh->fmt = fmt;
fh->cap.field = f->fmt.pix.field;
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 5131c9f555fb..f041b6931ba8 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1139,7 +1139,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
u8 eeprom[256]; /* 24C02 i2c eeprom */
struct sp2_config sp2_config;
struct i2c_board_info info;
- struct cx23885_i2c *i2c_bus2 = &dev->i2c_bus[1];
+ struct cx23885_i2c *i2c_bus = &dev->i2c_bus[0];
/* attach CI */
memset(&sp2_config, 0, sizeof(sp2_config));
@@ -1151,7 +1151,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
info.addr = 0x40;
info.platform_data = &sp2_config;
request_module(info.type);
- client_ci = i2c_new_device(&i2c_bus2->i2c_adap, &info);
+ client_ci = i2c_new_device(&i2c_bus->i2c_adap, &info);
if (client_ci == NULL || client_ci->dev.driver == NULL)
return -ENODEV;
if (!try_module_get(client_ci->dev.driver->owner)) {
@@ -1988,8 +1988,8 @@ static int dvb_register(struct cx23885_tsport *port)
break;
case CX23885_BOARD_DVBSKY_T980C:
case CX23885_BOARD_TT_CT2_4500_CI:
- i2c_bus = &dev->i2c_bus[1];
- i2c_bus2 = &dev->i2c_bus[0];
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
/* attach frontend */
memset(&si2168_config, 0, sizeof(si2168_config));
@@ -2001,7 +2001,7 @@ static int dvb_register(struct cx23885_tsport *port)
info.addr = 0x64;
info.platform_data = &si2168_config;
request_module(info.type);
- client_demod = i2c_new_device(&i2c_bus->i2c_adap, &info);
+ client_demod = i2c_new_device(&i2c_bus2->i2c_adap, &info);
if (client_demod == NULL || client_demod->dev.driver == NULL)
goto frontend_detach;
if (!try_module_get(client_demod->dev.driver->owner)) {
@@ -2030,13 +2030,13 @@ static int dvb_register(struct cx23885_tsport *port)
port->i2c_client_tuner = client_tuner;
break;
case CX23885_BOARD_DVBSKY_S950C:
- i2c_bus = &dev->i2c_bus[1];
- i2c_bus2 = &dev->i2c_bus[0];
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
/* attach frontend */
fe0->dvb.frontend = dvb_attach(m88ds3103_attach,
&dvbsky_s950c_m88ds3103_config,
- &i2c_bus->i2c_adap, &adapter);
+ &i2c_bus2->i2c_adap, &adapter);
if (fe0->dvb.frontend == NULL)
break;
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index 7fde36e6d227..2128c2a8d7fd 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -141,7 +141,7 @@ int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_
spin_unlock_irqrestore(&s->qlock, flags);
return -ENOMEM;
}
- while (bytes_available < needed_bytes) {
+ while (steal && bytes_available < needed_bytes) {
struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
u16 dma_xfer_cnt = buf->dma_xfer_cnt;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 24152accc66c..4769469fe842 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(current, current->mm,
- user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
+ 1, dma->map);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2b8e7b2f2b86..b094054cda6e 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -75,14 +75,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
/* Get user pages for DMA Xfer */
- y_pages = get_user_pages_unlocked(current, current->mm,
- y_dma.uaddr, y_dma.page_count, 0, 1,
- &dma->map[0]);
+ y_pages = get_user_pages_unlocked(y_dma.uaddr,
+ y_dma.page_count, 0, 1, &dma->map[0]);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
- uv_pages = get_user_pages_unlocked(current, current->mm,
- uv_dma.uaddr, uv_dma.page_count, 0, 1,
- &dma->map[y_pages]);
+ uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
+ uv_dma.page_count, 0, 1, &dma->map[y_pages]);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/pci/pt3/pt3.c b/drivers/media/pci/pt3/pt3.c
index 0d2e2b217121..eff5e9f51ace 100644
--- a/drivers/media/pci/pt3/pt3.c
+++ b/drivers/media/pci/pt3/pt3.c
@@ -395,7 +395,8 @@ static int pt3_attach_fe(struct pt3_board *pt3, int i)
if (!try_module_get(cl->dev.driver->owner))
goto err_demod_i2c_unregister_device;
- if (!strncmp(cl->name, TC90522_I2C_DEV_SAT, sizeof(cl->name))) {
+ if (!strncmp(cl->name, TC90522_I2C_DEV_SAT,
+ strlen(TC90522_I2C_DEV_SAT))) {
struct qm1d1c0042_config tcfg;
tcfg = adap_conf[i].tuner_cfg.qm1d1c0042;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index 9a2fdc78eb85..c480a7e87593 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5733,7 +5733,36 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x08,
},
},
-
+ [SAA7134_BOARD_SNAZIO_TVPVR_PRO] = {
+ .name = "SnaZio* TVPVR PRO",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .gpiomask = 1 << 21,
+ .inputs = { {
+ .type = SAA7134_INPUT_TV,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x0000000,
+ }, {
+ .type = SAA7134_INPUT_COMPOSITE1,
+ .vmux = 3,
+ .amux = LINE2,
+ .gpio = 0x0000000,
+ }, {
+ .type = SAA7134_INPUT_SVIDEO,
+ .vmux = 8,
+ .amux = LINE2,
+ .gpio = 0x0000000,
+ } },
+ .radio = {
+ .type = SAA7134_INPUT_RADIO,
+ .amux = TV,
+ .gpio = 0x0200000,
+ },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -7004,6 +7033,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x6f3a,
.driver_data = SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x1779, /* V One Multimedia PTE Ltd */
+ .subdevice = 0x13cf,
+ .driver_data = SAA7134_BOARD_SNAZIO_TVPVR_PRO,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7534,6 +7569,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
case SAA7134_BOARD_BEHOLD_H7:
case SAA7134_BOARD_BEHOLD_A7:
case SAA7134_BOARD_KWORLD_PC150U:
+ case SAA7134_BOARD_SNAZIO_TVPVR_PRO:
dev->has_remote = SAA7134_REMOTE_I2C;
break;
case SAA7134_BOARD_AVERMEDIA_A169_B:
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 42bc4172febd..c0e1780ec831 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -829,18 +829,19 @@ static void saa7134_media_release(struct saa7134_dev *dev)
#endif
}
+#if defined(CONFIG_MEDIA_CONTROLLER)
static void saa7134_create_entities(struct saa7134_dev *dev)
{
-#if defined(CONFIG_MEDIA_CONTROLLER)
int ret, i;
struct media_entity *entity;
struct media_entity *decoder = NULL;
/* Check if it is using an external analog TV demod */
media_device_for_each_entity(entity, dev->media_dev) {
- if (entity->function == MEDIA_ENT_F_ATV_DECODER)
+ if (entity->function == MEDIA_ENT_F_ATV_DECODER) {
decoder = entity;
break;
+ }
}
/*
@@ -950,8 +951,8 @@ static void saa7134_create_entities(struct saa7134_dev *dev)
if (ret < 0)
pr_err("failed to register input entity %d!\n", i);
}
-#endif
}
+#endif
static struct video_device *vdev_init(struct saa7134_dev *dev,
struct video_device *template,
@@ -1042,11 +1043,12 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
sprintf(dev->name, "saa%x[%d]", pci_dev->device, dev->nr);
#ifdef CONFIG_MEDIA_CONTROLLER
- dev->media_dev = v4l2_mc_pci_media_device_init(pci_dev, dev->name);
+ dev->media_dev = kzalloc(sizeof(*dev->media_dev), GFP_KERNEL);
if (!dev->media_dev) {
err = -ENOMEM;
goto fail0;
}
+ media_device_pci_init(dev->media_dev, pci_dev, dev->name);
dev->v4l2_dev.mdev = dev->media_dev;
#endif
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 69d32d3fa32c..c8042c3888cd 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -975,6 +975,27 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
msg_msi.addr, dev->i2c_adap.name,
(1 == rc) ? "yes" : "no");
break;
+ case SAA7134_BOARD_SNAZIO_TVPVR_PRO:
+ dev->init_data.name = "SnaZio* TVPVR PRO";
+ dev->init_data.get_key = get_key_msi_tvanywhere_plus;
+ dev->init_data.ir_codes = RC_MAP_MSI_TVANYWHERE_PLUS;
+ /*
+ * MSI TV@nyware Plus requires more frequent polling
+ * otherwise it will miss some keypresses
+ */
+ dev->init_data.polling_interval = 50;
+ info.addr = 0x30;
+ /*
+ * MSI TV@nywhere Plus controller doesn't seem to
+ * respond to probes unless we read something from
+ * an existing device. Weird...
+ * REVISIT: might no longer be needed
+ */
+ rc = i2c_transfer(&dev->i2c_adap, &msg_msi, 1);
+ input_dbg("probe 0x%02x @ %s: %s\n",
+ msg_msi.addr, dev->i2c_adap.name,
+ (rc == 1) ? "yes" : "no");
+ break;
case SAA7134_BOARD_KWORLD_PC150U:
/* copied and modified from MSI TV@nywhere Plus */
dev->init_data.name = "Kworld PC150-U";
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 8936568fab94..69a9bbf22d4d 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -343,6 +343,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_WIS_VOYAGER 193
#define SAA7134_BOARD_AVERMEDIA_505 194
#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
+#define SAA7134_BOARD_SNAZIO_TVPVR_PRO 196
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 18d229fa65cf..382caf200ba1 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -2198,13 +2198,18 @@ static int frontend_init(struct av7110 *av7110)
break;
case 0x0001: // Hauppauge/TT Nexus-T premium rev1.X
+ {
+ struct dvb_frontend *fe;
+
// try ALPS TDLB7 first, then Grundig 29504-401
- av7110->fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
- if (av7110->fe) {
- av7110->fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
+ fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
+ if (fe) {
+ fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
+ av7110->fe = fe;
break;
}
- /* fall-thru */
+ }
+ /* fall-thru */
case 0x0008: // Hauppauge/TT DVB-T
// Grundig 29504-401
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index 9f48100227f1..fb8ede5a1531 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -615,33 +615,47 @@ static void frontend_init(struct budget *budget)
break;
case 0x1016: // Hauppauge/TT Nova-S SE (samsung s5h1420/????(tda8260))
- budget->dvb_frontend = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
- if (budget->dvb_frontend) {
- budget->dvb_frontend->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
- if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
+ {
+ struct dvb_frontend *fe;
+
+ fe = dvb_attach(s5h1420_attach, &s5h1420_config, &budget->i2c_adap);
+ if (fe) {
+ fe->ops.tuner_ops.set_params = s5h1420_tuner_set_params;
+ budget->dvb_frontend = fe;
+ if (dvb_attach(lnbp21_attach, fe, &budget->i2c_adap,
+ 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
-
+ }
+ /* fall through */
case 0x1018: // TT Budget-S-1401 (philips tda10086/philips tda8262)
+ {
+ struct dvb_frontend *fe;
+
// gpio2 is connected to CLB - reset it + leave it high
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
msleep(1);
saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
msleep(1);
- budget->dvb_frontend = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
- if (budget->dvb_frontend) {
- if (dvb_attach(tda826x_attach, budget->dvb_frontend, 0x60, &budget->i2c_adap, 0) == NULL)
+ fe = dvb_attach(tda10086_attach, &tda10086_config, &budget->i2c_adap);
+ if (fe) {
+ budget->dvb_frontend = fe;
+ if (dvb_attach(tda826x_attach, fe, 0x60,
+ &budget->i2c_adap, 0) == NULL)
printk("%s: No tda826x found!\n", __func__);
- if (dvb_attach(lnbp21_attach, budget->dvb_frontend, &budget->i2c_adap, 0, 0) == NULL) {
+ if (dvb_attach(lnbp21_attach, fe,
+ &budget->i2c_adap, 0, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
goto error_out;
}
break;
}
+ }
+ /* fall through */
case 0x101c: { /* TT S2-1600 */
const struct stv6110x_devctl *ctl;
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 6efe9d002961..b6625047250d 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -1455,9 +1455,9 @@ static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &ctx->dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
+ ctx->bitstream.vaddr = dma_alloc_wc(&ctx->dev->plat_dev->dev,
+ ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev,
"failed to allocate bitstream ringbuffer");
@@ -1474,8 +1474,8 @@ static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
if (ctx->bitstream.vaddr == NULL)
return;
- dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
- ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ dma_free_wc(&ctx->dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0);
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 2d782ce94a67..133ab9f70f85 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1950,16 +1950,76 @@ static int coda_register_device(struct coda_dev *dev, int i)
return video_register_device(vfd, VFL_TYPE_GRABBER, 0);
}
+static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf,
+ size_t size)
+{
+ u32 *src = (u32 *)buf;
+
+ /* Check if the firmware has a 16-byte Freescale header, skip it */
+ if (buf[0] == 'M' && buf[1] == 'X')
+ src += 4;
+ /*
+ * Check whether the firmware is in native order or pre-reordered for
+ * memory access. The first instruction opcode always is 0xe40e.
+ */
+ if (__le16_to_cpup((__le16 *)src) == 0xe40e) {
+ u32 *dst = dev->codebuf.vaddr;
+ int i;
+
+ /* Firmware in native order, reorder while copying */
+ if (dev->devtype->product == CODA_DX6) {
+ for (i = 0; i < (size - 16) / 4; i++)
+ dst[i] = (src[i] << 16) | (src[i] >> 16);
+ } else {
+ for (i = 0; i < (size - 16) / 4; i += 2) {
+ dst[i] = (src[i + 1] << 16) | (src[i + 1] >> 16);
+ dst[i + 1] = (src[i] << 16) | (src[i] >> 16);
+ }
+ }
+ } else {
+ /* Copy the already reordered firmware image */
+ memcpy(dev->codebuf.vaddr, src, size);
+ }
+}
+
+static void coda_fw_callback(const struct firmware *fw, void *context);
+
+static int coda_firmware_request(struct coda_dev *dev)
+{
+ char *fw = dev->devtype->firmware[dev->firmware];
+
+ dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
+ coda_product_name(dev->devtype->product));
+
+ return request_firmware_nowait(THIS_MODULE, true, fw,
+ &dev->plat_dev->dev, GFP_KERNEL, dev,
+ coda_fw_callback);
+}
+
static void coda_fw_callback(const struct firmware *fw, void *context)
{
struct coda_dev *dev = context;
struct platform_device *pdev = dev->plat_dev;
int i, ret;
- if (!fw) {
+ if (!fw && dev->firmware == 1) {
v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
goto put_pm;
}
+ if (!fw) {
+ dev->firmware = 1;
+ coda_firmware_request(dev);
+ return;
+ }
+ if (dev->firmware == 1) {
+ /*
+ * Since we can't suppress warnings for failed asynchronous
+ * firmware requests, report that the fallback firmware was
+ * found.
+ */
+ dev_info(&pdev->dev, "Using fallback firmware %s\n",
+ dev->devtype->firmware[dev->firmware]);
+ }
/* allocate auxiliary per-device code buffer for the BIT processor */
ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
@@ -1967,8 +2027,7 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
if (ret < 0)
goto put_pm;
- /* Copy the whole firmware image to the code buffer */
- memcpy(dev->codebuf.vaddr, fw->data, fw->size);
+ coda_copy_firmware(dev, fw->data, fw->size);
release_firmware(fw);
ret = coda_hw_init(dev);
@@ -2019,17 +2078,6 @@ put_pm:
pm_runtime_put_sync(&pdev->dev);
}
-static int coda_firmware_request(struct coda_dev *dev)
-{
- char *fw = dev->devtype->firmware;
-
- dev_dbg(&dev->plat_dev->dev, "requesting firmware '%s' for %s\n", fw,
- coda_product_name(dev->devtype->product));
-
- return request_firmware_nowait(THIS_MODULE, true,
- fw, &dev->plat_dev->dev, GFP_KERNEL, dev, coda_fw_callback);
-}
-
enum coda_platform {
CODA_IMX27,
CODA_IMX53,
@@ -2039,7 +2087,10 @@ enum coda_platform {
static const struct coda_devtype coda_devdata[] = {
[CODA_IMX27] = {
- .firmware = "v4l-codadx6-imx27.bin",
+ .firmware = {
+ "vpu_fw_imx27_TO2.bin",
+ "v4l-codadx6-imx27.bin"
+ },
.product = CODA_DX6,
.codecs = codadx6_codecs,
.num_codecs = ARRAY_SIZE(codadx6_codecs),
@@ -2049,7 +2100,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0xb000,
},
[CODA_IMX53] = {
- .firmware = "v4l-coda7541-imx53.bin",
+ .firmware = {
+ "vpu_fw_imx53.bin",
+ "v4l-coda7541-imx53.bin"
+ },
.product = CODA_7541,
.codecs = coda7_codecs,
.num_codecs = ARRAY_SIZE(coda7_codecs),
@@ -2060,7 +2114,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0x14000,
},
[CODA_IMX6Q] = {
- .firmware = "v4l-coda960-imx6q.bin",
+ .firmware = {
+ "vpu_fw_imx6q.bin",
+ "v4l-coda960-imx6q.bin"
+ },
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
@@ -2071,7 +2128,10 @@ static const struct coda_devtype coda_devdata[] = {
.iram_size = 0x21000,
},
[CODA_IMX6DL] = {
- .firmware = "v4l-coda960-imx6dl.bin",
+ .firmware = {
+ "vpu_fw_imx6d.bin",
+ "v4l-coda960-imx6dl.bin"
+ },
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
@@ -2118,14 +2178,12 @@ static int coda_probe(struct platform_device *pdev)
pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
- if (of_id) {
+ if (of_id)
dev->devtype = of_id->data;
- } else if (pdev_id) {
+ else if (pdev_id)
dev->devtype = &coda_devdata[pdev_id->driver_data];
- } else {
- ret = -EINVAL;
- goto err_v4l2_register;
- }
+ else
+ return -EINVAL;
spin_lock_init(&dev->irqlock);
INIT_LIST_HEAD(&dev->instances);
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index d08e9843e9f2..8f2c71e06966 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -50,7 +50,7 @@ enum coda_product {
struct coda_video_device;
struct coda_devtype {
- char *firmware;
+ char *firmware[2];
enum coda_product product;
const struct coda_codec *codecs;
unsigned int num_codecs;
@@ -74,6 +74,7 @@ struct coda_dev {
struct video_device vfd[5];
struct platform_device *plat_dev;
const struct coda_devtype *devtype;
+ int firmware;
void __iomem *regs_base;
struct clk *clk_per;
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index e85649147dc8..dc1b929f7a33 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -992,10 +992,6 @@ static int fimc_lite_link_setup(struct media_entity *entity,
switch (local->index) {
case FLITE_SD_PAD_SINK:
- if (!is_media_entity_v4l2_subdev(remote->entity)) {
- ret = -EINVAL;
- break;
- }
if (flags & MEDIA_LNK_FL_ENABLED) {
if (fimc->source_subdev_grp_id == 0)
fimc->source_subdev_grp_id = sd->grp_id;
@@ -1010,19 +1006,15 @@ static int fimc_lite_link_setup(struct media_entity *entity,
case FLITE_SD_PAD_SOURCE_DMA:
if (!(flags & MEDIA_LNK_FL_ENABLED))
atomic_set(&fimc->out_path, FIMC_IO_NONE);
- else if (is_media_entity_v4l2_io(remote->entity))
- atomic_set(&fimc->out_path, FIMC_IO_DMA);
else
- ret = -EINVAL;
+ atomic_set(&fimc->out_path, FIMC_IO_DMA);
break;
case FLITE_SD_PAD_SOURCE_ISP:
if (!(flags & MEDIA_LNK_FL_ENABLED))
atomic_set(&fimc->out_path, FIMC_IO_NONE);
- else if (is_media_entity_v4l2_subdev(remote->entity))
- atomic_set(&fimc->out_path, FIMC_IO_ISP);
else
- ret = -EINVAL;
+ atomic_set(&fimc->out_path, FIMC_IO_ISP);
break;
default:
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index f9e5245f26ac..5d54e2c6c16b 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -64,6 +64,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-of.h>
#include "isp.h"
@@ -657,216 +658,6 @@ static irqreturn_t isp_isr(int irq, void *_isp)
}
/* -----------------------------------------------------------------------------
- * Pipeline power management
- *
- * Entities must be powered up when part of a pipeline that contains at least
- * one open video device node.
- *
- * To achieve this use the entity use_count field to track the number of users.
- * For entities corresponding to video device nodes the use_count field stores
- * the users count of the node. For entities corresponding to subdevs the
- * use_count field stores the total number of users of all video device nodes
- * in the pipeline.
- *
- * The omap3isp_pipeline_pm_use() function must be called in the open() and
- * close() handlers of video device nodes. It increments or decrements the use
- * count of all subdev entities in the pipeline.
- *
- * To react to link management on powered pipelines, the link setup notification
- * callback updates the use count of all entities in the source and sink sides
- * of the link.
- */
-
-/*
- * isp_pipeline_pm_use_count - Count the number of users of a pipeline
- * @entity: The entity
- *
- * Return the total number of users of all video device nodes in the pipeline.
- */
-static int isp_pipeline_pm_use_count(struct media_entity *entity,
- struct media_entity_graph *graph)
-{
- int use = 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while ((entity = media_entity_graph_walk_next(graph))) {
- if (is_media_entity_v4l2_io(entity))
- use += entity->use_count;
- }
-
- return use;
-}
-
-/*
- * isp_pipeline_pm_power_one - Apply power change to an entity
- * @entity: The entity
- * @change: Use count change
- *
- * Change the entity use count by @change. If the entity is a subdev update its
- * power state by calling the core::s_power operation when the use count goes
- * from 0 to != 0 or from != 0 to 0.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int isp_pipeline_pm_power_one(struct media_entity *entity, int change)
-{
- struct v4l2_subdev *subdev;
- int ret;
-
- subdev = is_media_entity_v4l2_subdev(entity)
- ? media_entity_to_v4l2_subdev(entity) : NULL;
-
- if (entity->use_count == 0 && change > 0 && subdev != NULL) {
- ret = v4l2_subdev_call(subdev, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- if (entity->use_count == 0 && change < 0 && subdev != NULL)
- v4l2_subdev_call(subdev, core, s_power, 0);
-
- return 0;
-}
-
-/*
- * isp_pipeline_pm_power - Apply power change to all entities in a pipeline
- * @entity: The entity
- * @change: Use count change
- *
- * Walk the pipeline to update the use count and the power state of all non-node
- * entities.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int isp_pipeline_pm_power(struct media_entity *entity, int change,
- struct media_entity_graph *graph)
-{
- struct media_entity *first = entity;
- int ret = 0;
-
- if (!change)
- return 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while (!ret && (entity = media_entity_graph_walk_next(graph)))
- if (is_media_entity_v4l2_subdev(entity))
- ret = isp_pipeline_pm_power_one(entity, change);
-
- if (!ret)
- return ret;
-
- media_entity_graph_walk_start(graph, first);
-
- while ((first = media_entity_graph_walk_next(graph))
- && first != entity)
- if (is_media_entity_v4l2_subdev(first))
- isp_pipeline_pm_power_one(first, -change);
-
- return ret;
-}
-
-/*
- * omap3isp_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
- *
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
- */
-int omap3isp_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph)
-{
- int change = use ? 1 : -1;
- int ret;
-
- mutex_lock(&entity->graph_obj.mdev->graph_mutex);
-
- /* Apply use count to node. */
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- /* Apply power change to connected non-nodes. */
- ret = isp_pipeline_pm_power(entity, change, graph);
- if (ret < 0)
- entity->use_count -= change;
-
- mutex_unlock(&entity->graph_obj.mdev->graph_mutex);
-
- return ret;
-}
-
-/*
- * isp_pipeline_link_notify - Link management notification callback
- * @link: The link
- * @flags: New link flags that will be applied
- * @notification: The link's state change notification type (MEDIA_DEV_NOTIFY_*)
- *
- * React to link management on powered pipelines by updating the use count of
- * all entities in the source and sink sides of the link. Entities are powered
- * on or off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. This function will not fail for disconnection
- * events.
- */
-static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity_graph *graph =
- &container_of(link->graph_obj.mdev, struct isp_device,
- media_dev)->pm_count_graph;
- struct media_entity *source = link->source->entity;
- struct media_entity *sink = link->sink->entity;
- int source_use;
- int sink_use;
- int ret = 0;
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
- ret = media_entity_graph_walk_init(graph,
- link->graph_obj.mdev);
- if (ret)
- return ret;
- }
-
- source_use = isp_pipeline_pm_use_count(source, graph);
- sink_use = isp_pipeline_pm_use_count(sink, graph);
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- !(flags & MEDIA_LNK_FL_ENABLED)) {
- /* Powering off entities is assumed to never fail. */
- isp_pipeline_pm_power(source, -sink_use, graph);
- isp_pipeline_pm_power(sink, -source_use, graph);
- return 0;
- }
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
- (flags & MEDIA_LNK_FL_ENABLED)) {
-
- ret = isp_pipeline_pm_power(source, sink_use, graph);
- if (ret < 0)
- return ret;
-
- ret = isp_pipeline_pm_power(sink, source_use, graph);
- if (ret < 0)
- isp_pipeline_pm_power(source, -sink_use, graph);
- }
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH)
- media_entity_graph_walk_cleanup(graph);
-
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
@@ -1889,7 +1680,7 @@ static int isp_register_entities(struct isp_device *isp)
strlcpy(isp->media_dev.model, "TI OMAP3 ISP",
sizeof(isp->media_dev.model));
isp->media_dev.hw_revision = isp->revision;
- isp->media_dev.link_notify = isp_pipeline_link_notify;
+ isp->media_dev.link_notify = v4l2_pipeline_link_notify;
media_device_init(&isp->media_dev);
isp->v4l2_dev.mdev = &isp->media_dev;
diff --git a/drivers/media/platform/omap3isp/isp.h b/drivers/media/platform/omap3isp/isp.h
index 49b7f71ac968..7e6f6638433b 100644
--- a/drivers/media/platform/omap3isp/isp.h
+++ b/drivers/media/platform/omap3isp/isp.h
@@ -177,7 +177,6 @@ struct isp_device {
struct v4l2_device v4l2_dev;
struct v4l2_async_notifier notifier;
struct media_device media_dev;
- struct media_entity_graph pm_count_graph;
struct device *dev;
u32 revision;
@@ -267,9 +266,6 @@ void omap3isp_subclk_enable(struct isp_device *isp,
void omap3isp_subclk_disable(struct isp_device *isp,
enum isp_subclk_resource res);
-int omap3isp_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph);
-
int omap3isp_register_entities(struct platform_device *pdev,
struct v4l2_device *v4l2_dev);
void omap3isp_unregister_entities(struct platform_device *pdev);
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 2aff755ff77c..ac76d2901501 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -22,8 +22,10 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
#include <media/videobuf2-dma-contig.h>
#include "ispvideo.h"
@@ -1292,12 +1294,7 @@ static int isp_video_open(struct file *file)
goto done;
}
- ret = media_entity_graph_walk_init(&handle->graph,
- &video->isp->media_dev);
- if (ret)
- goto done;
-
- ret = omap3isp_pipeline_pm_use(&video->video.entity, 1, &handle->graph);
+ ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
if (ret < 0) {
omap3isp_put(video->isp);
goto done;
@@ -1328,7 +1325,6 @@ static int isp_video_open(struct file *file)
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
- media_entity_graph_walk_cleanup(&handle->graph);
kfree(handle);
}
@@ -1348,8 +1344,7 @@ static int isp_video_release(struct file *file)
vb2_queue_release(&handle->queue);
mutex_unlock(&video->queue_lock);
- omap3isp_pipeline_pm_use(&video->video.entity, 0, &handle->graph);
- media_entity_graph_walk_cleanup(&handle->graph);
+ v4l2_pipeline_pm_use(&video->video.entity, 0);
/* Release the file handle. */
v4l2_fh_del(vfh);
diff --git a/drivers/media/platform/omap3isp/ispvideo.h b/drivers/media/platform/omap3isp/ispvideo.h
index 156429878d64..6a48d5879c56 100644
--- a/drivers/media/platform/omap3isp/ispvideo.h
+++ b/drivers/media/platform/omap3isp/ispvideo.h
@@ -189,7 +189,6 @@ struct isp_video_fh {
struct vb2_queue queue;
struct v4l2_format format;
struct v4l2_fract timeperframe;
- struct media_entity_graph graph;
};
#define to_isp_video_fh(fh) container_of(fh, struct isp_video_fh, vfh)
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index f2776cd415ca..355298989dd8 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -17,19 +17,11 @@ config SOC_CAMERA_PLATFORM
help
This is a generic SoC camera platform driver, useful for testing
-config VIDEO_MX3
- tristate "i.MX3x Camera Sensor Interface driver"
- depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
- depends on MX3_IPU || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- This is a v4l2 driver for the i.MX3x Camera Sensor Interface
-
config VIDEO_PXA27x
tristate "PXA27x Quick Capture Interface driver"
depends on VIDEO_DEV && PXA27x && SOC_CAMERA
select VIDEOBUF_DMA_SG
+ select SG_SPLIT
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
@@ -60,25 +52,6 @@ config VIDEO_SH_MOBILE_CEU
---help---
This is a v4l2 driver for the SuperH Mobile CEU Interface
-config VIDEO_OMAP1
- tristate "OMAP1 Camera Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on ARCH_OMAP1
- depends on HAS_DMA
- select VIDEOBUF_DMA_CONTIG
- select VIDEOBUF_DMA_SG
- ---help---
- This is a v4l2 driver for the TI OMAP1 camera interface
-
-config VIDEO_MX2
- tristate "i.MX27 Camera Sensor Interface driver"
- depends on VIDEO_DEV && SOC_CAMERA
- depends on SOC_IMX27 || COMPILE_TEST
- depends on HAS_DMA
- select VIDEOBUF2_DMA_CONTIG
- ---help---
- This is a v4l2 driver for the i.MX27 Camera Sensor Interface
-
config VIDEO_ATMEL_ISI
tristate "ATMEL Image Sensor Interface (ISI) support"
depends on VIDEO_DEV && SOC_CAMERA
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 2826382dc9f8..7ee71ae231c7 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -7,9 +7,6 @@ obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o
# soc-camera host drivers have to be linked after camera drivers
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel-isi.o
-obj-$(CONFIG_VIDEO_MX2) += mx2_camera.o
-obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
-obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 1af779ee3c74..ab2d9b9b1f5d 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -1026,7 +1026,7 @@ static int atmel_isi_parse_dt(struct atmel_isi *isi,
static int atmel_isi_probe(struct platform_device *pdev)
{
- unsigned int irq;
+ int irq;
struct atmel_isi *isi;
struct resource *regs;
int ret, i;
@@ -1086,7 +1086,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
isi->width_flags |= 1 << 9;
irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(irq)) {
+ if (irq < 0) {
ret = irq;
goto err_req_irq;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 415f3bda60bf..2aaf4a8f71a0 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -28,6 +28,9 @@
#include <linux/clk.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
@@ -38,7 +41,6 @@
#include <linux/videodev2.h>
-#include <mach/dma.h>
#include <linux/platform_data/media/camera-pxa.h>
#define PXA_CAM_VERSION "0.0.6"
@@ -175,21 +177,16 @@ enum pxa_camera_active_dma {
DMA_V = 0x4,
};
-/* descriptor needed for the PXA DMA engine */
-struct pxa_cam_dma {
- dma_addr_t sg_dma;
- struct pxa_dma_desc *sg_cpu;
- size_t sg_size;
- int sglen;
-};
-
/* buffer for one video frame */
struct pxa_buffer {
/* common v4l buffer stuff -- must be first */
struct videobuf_buffer vb;
u32 code;
/* our descriptor lists for Y, U and V channels */
- struct pxa_cam_dma dmas[3];
+ struct dma_async_tx_descriptor *descs[3];
+ dma_cookie_t cookie[3];
+ struct scatterlist *sg[3];
+ int sg_len[3];
int inwork;
enum pxa_camera_active_dma active_dma;
};
@@ -207,7 +204,7 @@ struct pxa_camera_dev {
void __iomem *base;
int channels;
- unsigned int dma_chans[3];
+ struct dma_chan *dma_chans[3];
struct pxacamera_platform_data *pdata;
struct resource *res;
@@ -222,7 +219,7 @@ struct pxa_camera_dev {
spinlock_t lock;
struct pxa_buffer *active;
- struct pxa_dma_desc *sg_tail[3];
+ struct tasklet_struct task_eof;
u32 save_cicr[5];
};
@@ -258,7 +255,6 @@ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count,
static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
{
struct soc_camera_device *icd = vq->priv_data;
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
int i;
@@ -272,42 +268,45 @@ static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf)
* longer in STATE_QUEUED or STATE_ACTIVE
*/
videobuf_waiton(vq, &buf->vb, 0, 0);
- videobuf_dma_unmap(vq->dev, dma);
- videobuf_dma_free(dma);
- for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) {
- if (buf->dmas[i].sg_cpu)
- dma_free_coherent(ici->v4l2_dev.dev,
- buf->dmas[i].sg_size,
- buf->dmas[i].sg_cpu,
- buf->dmas[i].sg_dma);
- buf->dmas[i].sg_cpu = NULL;
+ for (i = 0; i < 3 && buf->descs[i]; i++) {
+ dmaengine_desc_free(buf->descs[i]);
+ kfree(buf->sg[i]);
+ buf->descs[i] = NULL;
+ buf->sg[i] = NULL;
+ buf->sg_len[i] = 0;
}
+ videobuf_dma_unmap(vq->dev, dma);
+ videobuf_dma_free(dma);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
+
+ dev_dbg(icd->parent, "%s end (vb=0x%p) 0x%08lx %d\n", __func__,
+ &buf->vb, buf->vb.baddr, buf->vb.bsize);
}
-static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
- int sg_first_ofs, int size)
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
+ enum pxa_camera_active_dma act_dma);
+
+static void pxa_camera_dma_irq_y(void *data)
{
- int i, offset, dma_len, xfer_len;
- struct scatterlist *sg;
+ struct pxa_camera_dev *pcdev = data;
- offset = sg_first_ofs;
- for_each_sg(sglist, sg, sglen, i) {
- dma_len = sg_dma_len(sg);
+ pxa_camera_dma_irq(pcdev, DMA_Y);
+}
- /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
- xfer_len = roundup(min(dma_len - offset, size), 8);
+static void pxa_camera_dma_irq_u(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
- size = max(0, size - xfer_len);
- offset = 0;
- if (size == 0)
- break;
- }
+ pxa_camera_dma_irq(pcdev, DMA_U);
+}
+
+static void pxa_camera_dma_irq_v(void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
- BUG_ON(size != 0);
- return i + 1;
+ pxa_camera_dma_irq(pcdev, DMA_V);
}
/**
@@ -318,93 +317,53 @@ static int calculate_dma_sglen(struct scatterlist *sglist, int sglen,
* @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V')
* @cibr: camera Receive Buffer Register
* @size: bytes to transfer
- * @sg_first: first element of sg_list
- * @sg_first_ofs: offset in first element of sg_list
+ * @offset: offset in videobuffer of the first byte to transfer
*
* Prepares the pxa dma descriptors to transfer one camera channel.
- * Beware sg_first and sg_first_ofs are both input and output parameters.
*
- * Returns 0 or -ENOMEM if no coherent memory is available
+ * Returns 0 if success or -ENOMEM if no memory is available
*/
static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf,
struct videobuf_dmabuf *dma, int channel,
- int cibr, int size,
- struct scatterlist **sg_first, int *sg_first_ofs)
+ int cibr, int size, int offset)
{
- struct pxa_cam_dma *pxa_dma = &buf->dmas[channel];
- struct device *dev = pcdev->soc_host.v4l2_dev.dev;
- struct scatterlist *sg;
- int i, offset, sglen;
- int dma_len = 0, xfer_len = 0;
-
- if (pxa_dma->sg_cpu)
- dma_free_coherent(dev, pxa_dma->sg_size,
- pxa_dma->sg_cpu, pxa_dma->sg_dma);
-
- sglen = calculate_dma_sglen(*sg_first, dma->sglen,
- *sg_first_ofs, size);
-
- pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc);
- pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size,
- &pxa_dma->sg_dma, GFP_KERNEL);
- if (!pxa_dma->sg_cpu)
- return -ENOMEM;
-
- pxa_dma->sglen = sglen;
- offset = *sg_first_ofs;
-
- dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n",
- *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma);
-
-
- for_each_sg(*sg_first, sg, sglen, i) {
- dma_len = sg_dma_len(sg);
-
- /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */
- xfer_len = roundup(min(dma_len - offset, size), 8);
-
- size = max(0, size - xfer_len);
-
- pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr;
- pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset;
- pxa_dma->sg_cpu[i].dcmd =
- DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len;
-#ifdef DEBUG
- if (!i)
- pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN;
-#endif
- pxa_dma->sg_cpu[i].ddadr =
- pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc);
-
- dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n",
- pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc),
- sg_dma_address(sg) + offset, xfer_len);
- offset = 0;
-
- if (size == 0)
- break;
+ struct dma_chan *dma_chan = pcdev->dma_chans[channel];
+ struct scatterlist *sg = buf->sg[channel];
+ int sglen = buf->sg_len[channel];
+ struct dma_async_tx_descriptor *tx;
+
+ tx = dmaengine_prep_slave_sg(dma_chan, sg, sglen, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_REUSE);
+ if (!tx) {
+ dev_err(pcdev->soc_host.v4l2_dev.dev,
+ "dmaengine_prep_slave_sg failed\n");
+ goto fail;
}
- pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP;
- pxa_dma->sg_cpu[sglen].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN;
-
- /*
- * Handle 1 special case :
- * - in 3 planes (YUV422P format), we might finish with xfer_len equal
- * to dma_len (end on PAGE boundary). In this case, the sg element
- * for next plane should be the next after the last used to store the
- * last scatter gather RAM page
- */
- if (xfer_len >= dma_len) {
- *sg_first_ofs = xfer_len - dma_len;
- *sg_first = sg_next(sg);
- } else {
- *sg_first_ofs = xfer_len;
- *sg_first = sg;
+ tx->callback_param = pcdev;
+ switch (channel) {
+ case 0:
+ tx->callback = pxa_camera_dma_irq_y;
+ break;
+ case 1:
+ tx->callback = pxa_camera_dma_irq_u;
+ break;
+ case 2:
+ tx->callback = pxa_camera_dma_irq_v;
+ break;
}
+ buf->descs[channel] = tx;
return 0;
+fail:
+ kfree(sg);
+
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (vb=0x%p) dma_tx=%p\n",
+ __func__, &buf->vb, tx);
+
+ return -ENOMEM;
}
static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
@@ -431,6 +390,7 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb);
int ret;
int size_y, size_u = 0, size_v = 0;
+ size_t sizes[3];
dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
vb, vb->baddr, vb->bsize);
@@ -473,13 +433,11 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int size = vb->size;
- int next_ofs = 0;
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
- struct scatterlist *sg;
ret = videobuf_iolock(vq, vb, NULL);
if (ret)
- goto fail;
+ goto out;
if (pcdev->channels == 3) {
size_y = size / 2;
@@ -488,11 +446,19 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
size_y = size;
}
- sg = dma->sglist;
+ sizes[0] = size_y;
+ sizes[1] = size_u;
+ sizes[2] = size_v;
+ ret = sg_split(dma->sglist, dma->sglen, 0, pcdev->channels,
+ sizes, buf->sg, buf->sg_len, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev, "sg_split failed: %d\n", ret);
+ goto fail;
+ }
/* init DMA for Y channel */
- ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y,
- &sg, &next_ofs);
+ ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0,
+ size_y, 0);
if (ret) {
dev_err(dev, "DMA initialization for Y/RGB failed\n");
goto fail;
@@ -501,19 +467,19 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
/* init DMA for U channel */
if (size_u)
ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1,
- size_u, &sg, &next_ofs);
+ size_u, size_y);
if (ret) {
dev_err(dev, "DMA initialization for U failed\n");
- goto fail_u;
+ goto fail;
}
/* init DMA for V channel */
if (size_v)
ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2,
- size_v, &sg, &next_ofs);
+ size_v, size_y + size_u);
if (ret) {
dev_err(dev, "DMA initialization for V failed\n");
- goto fail_v;
+ goto fail;
}
vb->state = VIDEOBUF_PREPARED;
@@ -524,12 +490,6 @@ static int pxa_videobuf_prepare(struct videobuf_queue *vq,
return 0;
-fail_v:
- dma_free_coherent(dev, buf->dmas[1].sg_size,
- buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma);
-fail_u:
- dma_free_coherent(dev, buf->dmas[0].sg_size,
- buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma);
fail:
free_buffer(vq, buf);
out:
@@ -553,10 +513,8 @@ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev)
for (i = 0; i < pcdev->channels; i++) {
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
- "%s (channel=%d) ddadr=%08x\n", __func__,
- i, active->dmas[i].sg_dma);
- DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma;
- DCSR(pcdev->dma_chans[i]) = DCSR_RUN;
+ "%s (channel=%d)\n", __func__, i);
+ dma_async_issue_pending(pcdev->dma_chans[i]);
}
}
@@ -567,7 +525,7 @@ static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev)
for (i = 0; i < pcdev->channels; i++) {
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
"%s (channel=%d)\n", __func__, i);
- DCSR(pcdev->dma_chans[i]) = 0;
+ dmaengine_terminate_all(pcdev->dma_chans[i]);
}
}
@@ -575,18 +533,12 @@ static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf)
{
int i;
- struct pxa_dma_desc *buf_last_desc;
for (i = 0; i < pcdev->channels; i++) {
- buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen;
- buf_last_desc->ddadr = DDADR_STOP;
-
- if (pcdev->sg_tail[i])
- /* Link the new buffer to the old tail */
- pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma;
-
- /* Update the channel tail */
- pcdev->sg_tail[i] = buf_last_desc;
+ buf->cookie[i] = dmaengine_submit(buf->descs[i]);
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "%s (channel=%d) : submit vb=%p cookie=%d\n",
+ __func__, i, buf, buf->descs[i]->cookie);
}
}
@@ -603,6 +555,7 @@ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev)
unsigned long cicr0;
dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__);
+ __raw_writel(__raw_readl(pcdev->base + CISR), pcdev->base + CISR);
/* Enable End-Of-Frame Interrupt */
cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB;
cicr0 &= ~CICR0_EOFM;
@@ -677,8 +630,6 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
struct videobuf_buffer *vb,
struct pxa_buffer *buf)
{
- int i;
-
/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
list_del_init(&vb->queue);
vb->state = VIDEOBUF_DONE;
@@ -690,8 +641,6 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
if (list_empty(&pcdev->capture)) {
pxa_camera_stop_capture(pcdev);
- for (i = 0; i < pcdev->channels; i++)
- pcdev->sg_tail[i] = NULL;
return;
}
@@ -715,50 +664,41 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
*
* Context: should only be called within the dma irq handler
*/
-static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev)
+static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev,
+ dma_cookie_t last_submitted,
+ dma_cookie_t last_issued)
{
- int i, is_dma_stopped = 1;
+ bool is_dma_stopped = last_submitted != last_issued;
- for (i = 0; i < pcdev->channels; i++)
- if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP)
- is_dma_stopped = 0;
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
- "%s : top queued buffer=%p, dma_stopped=%d\n",
+ "%s : top queued buffer=%p, is_dma_stopped=%d\n",
__func__, pcdev->active, is_dma_stopped);
+
if (pcdev->active && is_dma_stopped)
pxa_camera_start_capture(pcdev);
}
-static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
+static void pxa_camera_dma_irq(struct pxa_camera_dev *pcdev,
enum pxa_camera_active_dma act_dma)
{
struct device *dev = pcdev->soc_host.v4l2_dev.dev;
- struct pxa_buffer *buf;
+ struct pxa_buffer *buf, *last_buf;
unsigned long flags;
- u32 status, camera_status, overrun;
+ u32 camera_status, overrun;
+ int chan;
struct videobuf_buffer *vb;
+ enum dma_status last_status;
+ dma_cookie_t last_issued;
spin_lock_irqsave(&pcdev->lock, flags);
- status = DCSR(channel);
- DCSR(channel) = status;
-
camera_status = __raw_readl(pcdev->base + CISR);
+ dev_dbg(dev, "camera dma irq, cisr=0x%x dma=%d\n",
+ camera_status, act_dma);
overrun = CISR_IFO_0;
if (pcdev->channels == 3)
overrun |= CISR_IFO_1 | CISR_IFO_2;
- if (status & DCSR_BUSERR) {
- dev_err(dev, "DMA Bus Error IRQ!\n");
- goto out;
- }
-
- if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) {
- dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n",
- status);
- goto out;
- }
-
/*
* pcdev->active should not be NULL in DMA irq handler.
*
@@ -778,52 +718,47 @@ static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev,
buf = container_of(vb, struct pxa_buffer, vb);
WARN_ON(buf->inwork || list_empty(&vb->queue));
- dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n",
- __func__, channel, status & DCSR_STARTINTR ? "SOF " : "",
- status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel));
-
- if (status & DCSR_ENDINTR) {
- /*
- * It's normal if the last frame creates an overrun, as there
- * are no more DMA descriptors to fetch from QCI fifos
- */
- if (camera_status & overrun &&
- !list_is_last(pcdev->capture.next, &pcdev->capture)) {
- dev_dbg(dev, "FIFO overrun! CISR: %x\n",
- camera_status);
- pxa_camera_stop_capture(pcdev);
- pxa_camera_start_capture(pcdev);
- goto out;
- }
- buf->active_dma &= ~act_dma;
- if (!buf->active_dma) {
- pxa_camera_wakeup(pcdev, vb, buf);
- pxa_camera_check_link_miss(pcdev);
- }
+ /*
+ * It's normal if the last frame creates an overrun, as there
+ * are no more DMA descriptors to fetch from QCI fifos
+ */
+ switch (act_dma) {
+ case DMA_U:
+ chan = 1;
+ break;
+ case DMA_V:
+ chan = 2;
+ break;
+ default:
+ chan = 0;
+ break;
+ }
+ last_buf = list_entry(pcdev->capture.prev,
+ struct pxa_buffer, vb.queue);
+ last_status = dma_async_is_tx_complete(pcdev->dma_chans[chan],
+ last_buf->cookie[chan],
+ NULL, &last_issued);
+ if (camera_status & overrun &&
+ last_status != DMA_COMPLETE) {
+ dev_dbg(dev, "FIFO overrun! CISR: %x\n",
+ camera_status);
+ pxa_camera_stop_capture(pcdev);
+ list_for_each_entry(buf, &pcdev->capture, vb.queue)
+ pxa_dma_add_tail_buf(pcdev, buf);
+ pxa_camera_start_capture(pcdev);
+ goto out;
+ }
+ buf->active_dma &= ~act_dma;
+ if (!buf->active_dma) {
+ pxa_camera_wakeup(pcdev, vb, buf);
+ pxa_camera_check_link_miss(pcdev, last_buf->cookie[chan],
+ last_issued);
}
out:
spin_unlock_irqrestore(&pcdev->lock, flags);
}
-static void pxa_camera_dma_irq_y(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_Y);
-}
-
-static void pxa_camera_dma_irq_u(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_U);
-}
-
-static void pxa_camera_dma_irq_v(int channel, void *data)
-{
- struct pxa_camera_dev *pcdev = data;
- pxa_camera_dma_irq(channel, pcdev, DMA_V);
-}
-
static struct videobuf_queue_ops pxa_videobuf_ops = {
.buf_setup = pxa_videobuf_setup,
.buf_prepare = pxa_videobuf_prepare,
@@ -920,13 +855,35 @@ static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev)
clk_disable_unprepare(pcdev->clk);
}
-static irqreturn_t pxa_camera_irq(int irq, void *data)
+static void pxa_camera_eof(unsigned long arg)
{
- struct pxa_camera_dev *pcdev = data;
- unsigned long status, cifr, cicr0;
+ struct pxa_camera_dev *pcdev = (struct pxa_camera_dev *)arg;
+ unsigned long cifr;
struct pxa_buffer *buf;
struct videobuf_buffer *vb;
+ dev_dbg(pcdev->soc_host.v4l2_dev.dev,
+ "Camera interrupt status 0x%x\n",
+ __raw_readl(pcdev->base + CISR));
+
+ /* Reset the FIFOs */
+ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
+ __raw_writel(cifr, pcdev->base + CIFR);
+
+ pcdev->active = list_first_entry(&pcdev->capture,
+ struct pxa_buffer, vb.queue);
+ vb = &pcdev->active->vb;
+ buf = container_of(vb, struct pxa_buffer, vb);
+ pxa_videobuf_set_actdma(pcdev, buf);
+
+ pxa_dma_start_channels(pcdev);
+}
+
+static irqreturn_t pxa_camera_irq(int irq, void *data)
+{
+ struct pxa_camera_dev *pcdev = data;
+ unsigned long status, cicr0;
+
status = __raw_readl(pcdev->base + CISR);
dev_dbg(pcdev->soc_host.v4l2_dev.dev,
"Camera interrupt status 0x%lx\n", status);
@@ -937,20 +894,9 @@ static irqreturn_t pxa_camera_irq(int irq, void *data)
__raw_writel(status, pcdev->base + CISR);
if (status & CISR_EOF) {
- /* Reset the FIFOs */
- cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F;
- __raw_writel(cifr, pcdev->base + CIFR);
-
- pcdev->active = list_first_entry(&pcdev->capture,
- struct pxa_buffer, vb.queue);
- vb = &pcdev->active->vb;
- buf = container_of(vb, struct pxa_buffer, vb);
- pxa_videobuf_set_actdma(pcdev, buf);
-
- pxa_dma_start_channels(pcdev);
-
cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM;
__raw_writel(cicr0, pcdev->base + CICR0);
+ tasklet_schedule(&pcdev->task_eof);
}
return IRQ_HANDLED;
@@ -993,10 +939,7 @@ static void pxa_camera_clock_stop(struct soc_camera_host *ici)
__raw_writel(0x3ff, pcdev->base + CICR0);
/* Stop DMA engine */
- DCSR(pcdev->dma_chans[0]) = 0;
- DCSR(pcdev->dma_chans[1]) = 0;
- DCSR(pcdev->dma_chans[2]) = 0;
-
+ pxa_dma_stop_channels(pcdev);
pxa_camera_deactivate(pcdev);
}
@@ -1623,10 +1566,6 @@ static int pxa_camera_resume(struct device *dev)
struct pxa_camera_dev *pcdev = ici->priv;
int i = 0, ret = 0;
- DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
- DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
- DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
-
__raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1);
__raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2);
@@ -1732,8 +1671,15 @@ static int pxa_camera_probe(struct platform_device *pdev)
struct pxa_camera_dev *pcdev;
struct resource *res;
void __iomem *base;
+ struct dma_slave_config config = {
+ .src_addr_width = 0,
+ .src_maxburst = 8,
+ .direction = DMA_DEV_TO_MEM,
+ };
+ dma_cap_mask_t mask;
+ struct pxad_param params;
int irq;
- int err = 0;
+ int err = 0, i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
@@ -1801,36 +1747,47 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->base = base;
/* request dma */
- err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_y, pcdev);
- if (err < 0) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_PRIVATE, mask);
+
+ params.prio = 0;
+ params.drcmr = 68;
+ pcdev->dma_chans[0] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_Y");
+ if (!pcdev->dma_chans[0]) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- return err;
+ return -ENODEV;
}
- pcdev->dma_chans[0] = err;
- dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]);
- err = pxa_request_dma("CI_U", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_u, pcdev);
- if (err < 0) {
- dev_err(&pdev->dev, "Can't request DMA for U\n");
+ params.drcmr = 69;
+ pcdev->dma_chans[1] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_U");
+ if (!pcdev->dma_chans[1]) {
+ dev_err(&pdev->dev, "Can't request DMA for Y\n");
goto exit_free_dma_y;
}
- pcdev->dma_chans[1] = err;
- dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]);
- err = pxa_request_dma("CI_V", DMA_PRIO_HIGH,
- pxa_camera_dma_irq_v, pcdev);
- if (err < 0) {
+ params.drcmr = 70;
+ pcdev->dma_chans[2] =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &params, &pdev->dev, "CI_V");
+ if (!pcdev->dma_chans[2]) {
dev_err(&pdev->dev, "Can't request DMA for V\n");
goto exit_free_dma_u;
}
- pcdev->dma_chans[2] = err;
- dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]);
- DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD;
- DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD;
- DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD;
+ for (i = 0; i < 3; i++) {
+ config.src_addr = pcdev->res->start + CIBR0 + i * 8;
+ err = dmaengine_slave_config(pcdev->dma_chans[i], &config);
+ if (err < 0) {
+ dev_err(&pdev->dev, "dma slave config failed: %d\n",
+ err);
+ goto exit_free_dma;
+ }
+ }
/* request irq */
err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
@@ -1845,6 +1802,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->soc_host.priv = pcdev;
pcdev->soc_host.v4l2_dev.dev = &pdev->dev;
pcdev->soc_host.nr = pdev->id;
+ tasklet_init(&pcdev->task_eof, pxa_camera_eof, (unsigned long)pcdev);
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
@@ -1853,11 +1811,11 @@ static int pxa_camera_probe(struct platform_device *pdev)
return 0;
exit_free_dma:
- pxa_free_dma(pcdev->dma_chans[2]);
+ dma_release_channel(pcdev->dma_chans[2]);
exit_free_dma_u:
- pxa_free_dma(pcdev->dma_chans[1]);
+ dma_release_channel(pcdev->dma_chans[1]);
exit_free_dma_y:
- pxa_free_dma(pcdev->dma_chans[0]);
+ dma_release_channel(pcdev->dma_chans[0]);
return err;
}
@@ -1867,9 +1825,9 @@ static int pxa_camera_remove(struct platform_device *pdev)
struct pxa_camera_dev *pcdev = container_of(soc_host,
struct pxa_camera_dev, soc_host);
- pxa_free_dma(pcdev->dma_chans[0]);
- pxa_free_dma(pcdev->dma_chans[1]);
- pxa_free_dma(pcdev->dma_chans[2]);
+ dma_release_channel(pcdev->dma_chans[0]);
+ dma_release_channel(pcdev->dma_chans[1]);
+ dma_release_channel(pcdev->dma_chans[2]);
soc_camera_host_unregister(soc_host);
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index dc75a80794fb..3b8edf458964 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -124,7 +124,7 @@
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_DTMD_YCSEP (1 << 1)
-#define VNDMR_DTMD_ARGB1555 (1 << 0)
+#define VNDMR_DTMD_ARGB (1 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
@@ -643,21 +643,26 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
output_is_yuv = true;
break;
case V4L2_PIX_FMT_RGB555X:
- dmr = VNDMR_DTMD_ARGB1555;
+ dmr = VNDMR_DTMD_ARGB;
break;
case V4L2_PIX_FMT_RGB565:
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
- if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
- priv->chip == RCAR_E1) {
- dmr = VNDMR_EXRGB;
- break;
- }
+ if (priv->chip != RCAR_GEN2 && priv->chip != RCAR_H1 &&
+ priv->chip != RCAR_E1)
+ goto e_format;
+
+ dmr = VNDMR_EXRGB;
+ break;
+ case V4L2_PIX_FMT_ARGB32:
+ if (priv->chip != RCAR_GEN3)
+ goto e_format;
+
+ dmr = VNDMR_EXRGB | VNDMR_DTMD_ARGB;
+ break;
default:
- dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
- icd->current_fmt->host_fmt->fourcc);
- return -EINVAL;
+ goto e_format;
}
/* Always update on field change */
@@ -679,6 +684,11 @@ static int rcar_vin_setup(struct rcar_vin_priv *priv)
iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
return 0;
+
+e_format:
+ dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
+ icd->current_fmt->host_fmt->fourcc);
+ return -EINVAL;
}
static void rcar_vin_capture(struct rcar_vin_priv *priv)
@@ -1304,6 +1314,14 @@ static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB32,
+ .name = "ARGB8888",
+ .bits_per_sample = 32,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
};
static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
@@ -1611,6 +1629,7 @@ static int rcar_vin_set_fmt(struct soc_camera_device *icd,
case V4L2_PIX_FMT_RGB32:
can_scale = priv->chip != RCAR_E1;
break;
+ case V4L2_PIX_FMT_ARGB32:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_RGB565:
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 35fa1071c5b2..82001e6b5553 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -804,6 +804,9 @@ static int cal_get_external_info(struct cal_ctx *ctx)
{
struct v4l2_ctrl *ctrl;
+ if (!ctx->sensor)
+ return -ENODEV;
+
ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (!ctrl) {
ctx_err(ctx, "no pixel rate control in subdev: %s\n",
@@ -950,9 +953,6 @@ static int __subdev_get_format(struct cal_ctx *ctx,
struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- if (!ctx->sensor)
- return -EINVAL;
-
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sd_fmt.pad = 0;
@@ -975,9 +975,6 @@ static int __subdev_set_format(struct cal_ctx *ctx,
struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
int ret;
- if (!ctx->sensor)
- return -EINVAL;
-
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sd_fmt.pad = 0;
*mbus_fmt = *fmt;
@@ -1152,7 +1149,7 @@ static int cal_enum_framesizes(struct file *file, void *fh,
ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
if (ret)
- return -EINVAL;
+ return ret;
ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
__func__, fse.index, fse.code, fse.min_width, fse.max_width,
@@ -1201,42 +1198,25 @@ static int cal_enum_frameintervals(struct file *file, void *priv,
{
struct cal_ctx *ctx = video_drvdata(file);
const struct cal_fmt *fmt;
- struct v4l2_subdev_frame_size_enum fse;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
int ret;
- if (fival->index)
- return -EINVAL;
-
fmt = find_format_by_pix(ctx, fival->pixel_format);
if (!fmt)
return -EINVAL;
- /* check for valid width/height */
- ret = 0;
- fse.pad = 0;
- fse.code = fmt->code;
- fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- for (fse.index = 0; ; fse.index++) {
- ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
- NULL, &fse);
- if (ret)
- return -EINVAL;
-
- if ((fival->width == fse.max_width) &&
- (fival->height == fse.max_height))
- break;
- else if ((fival->width >= fse.min_width) &&
- (fival->width <= fse.max_width) &&
- (fival->height >= fse.min_height) &&
- (fival->height <= fse.max_height))
- break;
-
- return -EINVAL;
- }
-
+ fie.code = fmt->code;
+ ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
+ NULL, &fie);
+ if (ret)
+ return ret;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- fival->discrete.numerator = 1;
- fival->discrete.denominator = 30;
+ fival->discrete = fie.interval;
return 0;
}
@@ -1347,13 +1327,11 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
cal_wr_dma_addr(ctx, addr);
csi2_ppi_enable(ctx);
- if (ctx->sensor) {
- if (v4l2_subdev_call(ctx->sensor, video, s_stream, 1)) {
- ctx_err(ctx, "stream on failed in subdev\n");
- cal_runtime_put(ctx->dev);
- ret = -EINVAL;
- goto err;
- }
+ ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
+ if (ret) {
+ ctx_err(ctx, "stream on failed in subdev\n");
+ cal_runtime_put(ctx->dev);
+ goto err;
}
if (debug >= 4)
@@ -1376,10 +1354,8 @@ static void cal_stop_streaming(struct vb2_queue *vq)
struct cal_buffer *buf, *tmp;
unsigned long flags;
- if (ctx->sensor) {
- if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
- ctx_err(ctx, "stream off failed in subdev\n");
- }
+ if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
+ ctx_err(ctx, "stream off failed in subdev\n");
csi2_ppi_disable(ctx);
disable_irqs(ctx);
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 14256141f905..da862bb2e5f8 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -251,6 +251,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->planes = 3;
tpg->is_yuv = true;
break;
+ case V4L2_PIX_FMT_YUV422M:
+ case V4L2_PIX_FMT_YVU422M:
+ tpg->buffers = 3;
+ /* fall through */
case V4L2_PIX_FMT_YUV422P:
tpg->vdownsampling[1] = 1;
tpg->vdownsampling[2] = 1;
@@ -283,6 +287,16 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->planes = 2;
tpg->is_yuv = true;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ case V4L2_PIX_FMT_YVU444M:
+ tpg->buffers = 3;
+ tpg->planes = 3;
+ tpg->vdownsampling[1] = 1;
+ tpg->vdownsampling[2] = 1;
+ tpg->hdownsampling[1] = 1;
+ tpg->hdownsampling[2] = 1;
+ tpg->is_yuv = true;
+ break;
case V4L2_PIX_FMT_NV24:
case V4L2_PIX_FMT_NV42:
tpg->vdownsampling[1] = 1;
@@ -368,6 +382,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
tpg->twopixelsize[0] = 4;
tpg->twopixelsize[1] = 4;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ case V4L2_PIX_FMT_YVU444M:
+ case V4L2_PIX_FMT_YUV422M:
+ case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
@@ -933,6 +951,7 @@ static void gen_twopix(struct tpg_data *tpg,
buf[0][offset] = r_y;
buf[0][offset+1] = r_y == 0xff ? r_y : 0;
break;
+ case V4L2_PIX_FMT_YUV422M:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV420M:
@@ -947,6 +966,7 @@ static void gen_twopix(struct tpg_data *tpg,
buf[1][0] = g_u;
buf[2][0] = b_v;
break;
+ case V4L2_PIX_FMT_YVU422M:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YVU420M:
buf[0][offset] = r_y;
@@ -988,6 +1008,18 @@ static void gen_twopix(struct tpg_data *tpg,
buf[1][1] = g_u;
break;
+ case V4L2_PIX_FMT_YUV444M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = g_u;
+ buf[2][offset] = b_v;
+ break;
+
+ case V4L2_PIX_FMT_YVU444M:
+ buf[0][offset] = r_y;
+ buf[1][offset] = b_v;
+ buf[2][offset] = g_u;
+ break;
+
case V4L2_PIX_FMT_NV24:
buf[0][offset] = r_y;
buf[1][2 * offset] = g_u;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 1678b730dba2..b0d4e3a0acf0 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -445,6 +445,9 @@ struct vivid_fmt vivid_formats[] = {
.planes = 1,
.buffers = 1,
},
+
+ /* Multiplanar formats */
+
{
.fourcc = V4L2_PIX_FMT_NV16M,
.vdownsampling = { 1, 1 },
@@ -495,10 +498,42 @@ struct vivid_fmt vivid_formats[] = {
.planes = 2,
.buffers = 2,
},
+ {
+ .fourcc = V4L2_PIX_FMT_YUV422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU422M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 4, 4 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YVU444M,
+ .vdownsampling = { 1, 1, 1 },
+ .bit_depth = { 8, 8, 8 },
+ .is_yuv = true,
+ .planes = 3,
+ .buffers = 3,
+ },
};
-/* There are 6 multiplanar formats in the list */
-#define VIVID_MPLANAR_FORMATS 6
+/* There are this many multiplanar formats in the list */
+#define VIVID_MPLANAR_FORMATS 10
const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
{
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 7dc27ac6bd02..1a9a58588f84 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -278,7 +278,7 @@ struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1)
dl->vsp1 = vsp1;
dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all);
- dl->mem = dma_alloc_writecombine(vsp1->dev, dl->size, &dl->dma,
+ dl->mem = dma_alloc_wc(vsp1->dev, dl->size, &dl->dma,
GFP_KERNEL);
if (!dl->mem) {
kfree(dl);
@@ -300,6 +300,6 @@ struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1)
void vsp1_dl_destroy(struct vsp1_dl *dl)
{
- dma_free_writecombine(dl->vsp1->dev, dl->size, dl->mem, dl->dma);
+ dma_free_wc(dl->vsp1->dev, dl->size, dl->mem, dl->dma);
kfree(dl);
}
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 61ee0f92c1e5..72cc7d3729f8 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -289,7 +289,7 @@ static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
struct vsp1_rwpf *rwpf;
struct vsp1_entity *e;
- if (is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_subdev(entity))
continue;
subdev = media_entity_to_v4l2_subdev(entity);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index a35631891cc0..3f61d77d4147 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -443,6 +443,21 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
return retval;
}
+struct accel_times {
+ const char value;
+ unsigned int msecs;
+};
+
+static const struct accel_times accel[] = {
+ { 1, 125 },
+ { 2, 250 },
+ { 4, 500 },
+ { 6, 1000 },
+ { 9, 1500 },
+ { 13, 2000 },
+ { 20, 0 },
+};
+
/*
* ati_remote_compute_accel
*
@@ -454,30 +469,22 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
*/
static int ati_remote_compute_accel(struct ati_remote *ati_remote)
{
- static const char accel[] = { 1, 2, 4, 6, 9, 13, 20 };
- unsigned long now = jiffies;
- int acc;
+ unsigned long now = jiffies, reset_time;
+ int i;
+
+ reset_time = msecs_to_jiffies(250);
- if (time_after(now, ati_remote->old_jiffies + msecs_to_jiffies(250))) {
- acc = 1;
+ if (time_after(now, ati_remote->old_jiffies + reset_time)) {
ati_remote->acc_jiffies = now;
+ return 1;
}
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(125)))
- acc = accel[0];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(250)))
- acc = accel[1];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(500)))
- acc = accel[2];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1000)))
- acc = accel[3];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(1500)))
- acc = accel[4];
- else if (time_before(now, ati_remote->acc_jiffies + msecs_to_jiffies(2000)))
- acc = accel[5];
- else
- acc = accel[6];
+ for (i = 0; i < ARRAY_SIZE(accel) - 1; i++) {
+ unsigned long timeout = msecs_to_jiffies(accel[i].msecs);
- return acc;
+ if (time_before(now, ati_remote->acc_jiffies + timeout))
+ return accel[i].value;
+ }
+ return accel[i].value;
}
/*
diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c
index b36e51576f8e..e0c531fa01da 100644
--- a/drivers/media/rc/igorplugusb.c
+++ b/drivers/media/rc/igorplugusb.c
@@ -152,7 +152,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
struct usb_endpoint_descriptor *ep;
struct igorplugusb *ir;
struct rc_dev *rc;
- int ret;
+ int ret = -ENOMEM;
udev = interface_to_usbdev(intf);
idesc = intf->cur_altsetting;
@@ -182,7 +182,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ir->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ir->urb)
- return -ENOMEM;
+ goto fail;
usb_fill_control_urb(ir->urb, udev,
usb_rcvctrlpipe(udev, 0), (uint8_t *)&ir->request,
@@ -191,6 +191,9 @@ static int igorplugusb_probe(struct usb_interface *intf,
usb_make_path(udev, ir->phys, sizeof(ir->phys));
rc = rc_allocate_device();
+ if (!rc)
+ goto fail;
+
rc->input_name = DRIVER_DESC;
rc->input_phys = ir->phys;
usb_to_input_id(udev, &rc->input_id);
@@ -214,9 +217,7 @@ static int igorplugusb_probe(struct usb_interface *intf,
ret = rc_register_device(rc);
if (ret) {
dev_err(&intf->dev, "failed to register rc device: %d", ret);
- rc_free_device(rc);
- usb_free_urb(ir->urb);
- return ret;
+ goto fail;
}
usb_set_intfdata(intf, ir);
@@ -224,6 +225,12 @@ static int igorplugusb_probe(struct usb_interface *intf,
igorplugusb_cmd(ir, SET_INFRABUFFER_EMPTY);
return 0;
+fail:
+ rc_free_device(ir->rc);
+ usb_free_urb(ir->urb);
+ del_timer(&ir->timer);
+
+ return ret;
}
static void igorplugusb_disconnect(struct usb_interface *intf)
diff --git a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
index 8344bcc595be..2583400ca1b4 100644
--- a/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
+++ b/drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
@@ -23,35 +23,35 @@
/* Initial keytable is from Jose Alberto Reguero <jareguero@telefonica.net>
and Felipe Morales Moreno <felipe.morales.moreno@gmail.com> */
-/* FIXME: mappings are not 100% correct? */
+/* Keytable fixed by Philippe Valembois <lephilousophe@users.sourceforge.net> */
static struct rc_map_table avermedia_rm_ks[] = {
- { 0x0501, KEY_POWER2 },
- { 0x0502, KEY_CHANNELUP },
- { 0x0503, KEY_CHANNELDOWN },
- { 0x0504, KEY_VOLUMEUP },
- { 0x0505, KEY_VOLUMEDOWN },
- { 0x0506, KEY_MUTE },
- { 0x0507, KEY_RIGHT },
- { 0x0508, KEY_RED },
- { 0x0509, KEY_1 },
- { 0x050a, KEY_2 },
- { 0x050b, KEY_3 },
- { 0x050c, KEY_4 },
- { 0x050d, KEY_5 },
- { 0x050e, KEY_6 },
- { 0x050f, KEY_7 },
- { 0x0510, KEY_8 },
- { 0x0511, KEY_9 },
- { 0x0512, KEY_0 },
- { 0x0513, KEY_AUDIO },
- { 0x0515, KEY_EPG },
- { 0x0516, KEY_PLAY },
- { 0x0517, KEY_RECORD },
- { 0x0518, KEY_STOP },
- { 0x051c, KEY_BACK },
- { 0x051d, KEY_FORWARD },
- { 0x054d, KEY_LEFT },
- { 0x0556, KEY_ZOOM },
+ { 0x0501, KEY_POWER2 }, /* Power (RED POWER BUTTON) */
+ { 0x0502, KEY_CHANNELUP }, /* Channel+ */
+ { 0x0503, KEY_CHANNELDOWN }, /* Channel- */
+ { 0x0504, KEY_VOLUMEUP }, /* Volume+ */
+ { 0x0505, KEY_VOLUMEDOWN }, /* Volume- */
+ { 0x0506, KEY_MUTE }, /* Mute */
+ { 0x0507, KEY_AGAIN }, /* Recall */
+ { 0x0508, KEY_VIDEO }, /* Source */
+ { 0x0509, KEY_1 }, /* 1 */
+ { 0x050a, KEY_2 }, /* 2 */
+ { 0x050b, KEY_3 }, /* 3 */
+ { 0x050c, KEY_4 }, /* 4 */
+ { 0x050d, KEY_5 }, /* 5 */
+ { 0x050e, KEY_6 }, /* 6 */
+ { 0x050f, KEY_7 }, /* 7 */
+ { 0x0510, KEY_8 }, /* 8 */
+ { 0x0511, KEY_9 }, /* 9 */
+ { 0x0512, KEY_0 }, /* 0 */
+ { 0x0513, KEY_AUDIO }, /* Audio */
+ { 0x0515, KEY_EPG }, /* EPG */
+ { 0x0516, KEY_PLAYPAUSE }, /* Play/Pause */
+ { 0x0517, KEY_RECORD }, /* Record */
+ { 0x0518, KEY_STOP }, /* Stop */
+ { 0x051c, KEY_BACK }, /* << */
+ { 0x051d, KEY_FORWARD }, /* >> */
+ { 0x054d, KEY_INFO }, /* Display information */
+ { 0x0556, KEY_ZOOM }, /* Fullscreen */
};
static struct rc_map_list avermedia_rm_ks_map = {
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 4de0e85af805..92ae1903c010 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -506,6 +506,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
struct cdev *cdev;
+ int ret;
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
@@ -516,7 +517,8 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
- WARN_ON(mutex_lock_killable(&lirc_dev_lock));
+ ret = mutex_lock_killable(&lirc_dev_lock);
+ WARN_ON(ret);
rc_close(ir->d.rdev);
@@ -532,7 +534,8 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
kfree(ir);
}
- mutex_unlock(&lirc_dev_lock);
+ if (!ret)
+ mutex_unlock(&lirc_dev_lock);
return 0;
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 2cdb740cde48..35155ae500c7 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -587,9 +587,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
if (len == 2)
dev_dbg(dev, "Get hw/sw rev?");
else
- dev_dbg(dev, "hw/sw rev 0x%02x 0x%02x 0x%02x 0x%02x",
- data1, data2,
- buf[start + 4], buf[start + 5]);
+ dev_dbg(dev, "hw/sw rev %*ph",
+ 4, &buf[start + 2]);
break;
case MCE_CMD_RESUME:
dev_dbg(dev, "Device resume requested");
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index c96da3aaf00b..99b303b702ac 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -39,6 +39,8 @@
#include "nuvoton-cir.h"
+static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt);
+
static const struct nvt_chip nvt_chips[] = {
{ "w83667hg", NVT_W83667HG },
{ "NCT6775F", NVT_6775F },
@@ -177,6 +179,104 @@ static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
}
}
+static ssize_t wakeup_data_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rc_dev *rc_dev = to_rc_dev(dev);
+ struct nvt_dev *nvt = rc_dev->priv;
+ int fifo_len, duration;
+ unsigned long flags;
+ ssize_t buf_len = 0;
+ int i;
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
+ fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
+
+ /* go to first element to be read */
+ while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX))
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+
+ for (i = 0; i < fifo_len; i++) {
+ duration = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
+ duration = (duration & BUF_LEN_MASK) * SAMPLE_PERIOD;
+ buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len,
+ "%d ", duration);
+ }
+ buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ return buf_len;
+}
+
+static ssize_t wakeup_data_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct rc_dev *rc_dev = to_rc_dev(dev);
+ struct nvt_dev *nvt = rc_dev->priv;
+ unsigned long flags;
+ u8 tolerance, config, wake_buf[WAKEUP_MAX_SIZE];
+ char **argv;
+ int i, count;
+ unsigned int val;
+ ssize_t ret;
+
+ argv = argv_split(GFP_KERNEL, buf, &count);
+ if (!argv)
+ return -ENOMEM;
+ if (!count || count > WAKEUP_MAX_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = kstrtouint(argv[i], 10, &val);
+ if (ret)
+ goto out;
+ val = DIV_ROUND_CLOSEST(val, SAMPLE_PERIOD);
+ if (!val || val > 0x7f) {
+ ret = -EINVAL;
+ goto out;
+ }
+ wake_buf[i] = val;
+ /* sequence must start with a pulse */
+ if (i % 2 == 0)
+ wake_buf[i] |= BUF_PULSE_BIT;
+ }
+
+ /* hardcode the tolerance to 10% */
+ tolerance = DIV_ROUND_UP(count, 10);
+
+ spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+ nvt_clear_cir_wake_fifo(nvt);
+ nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
+ nvt_cir_wake_reg_write(nvt, tolerance, CIR_WAKE_FIFO_CMP_TOL);
+
+ config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+ /* enable writes to wake fifo */
+ nvt_cir_wake_reg_write(nvt, config | CIR_WAKE_IRCON_MODE1,
+ CIR_WAKE_IRCON);
+
+ for (i = 0; i < count; i++)
+ nvt_cir_wake_reg_write(nvt, wake_buf[i], CIR_WAKE_WR_FIFO_DATA);
+
+ nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
+
+ spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
+ ret = len;
+out:
+ argv_free(argv);
+ return ret;
+}
+static DEVICE_ATTR_RW(wakeup_data);
+
/* dump current cir register contents */
static void cir_dump_regs(struct nvt_dev *nvt)
{
@@ -1133,6 +1233,10 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
NVT_DRIVER_NAME "-wake", (void *)nvt))
goto exit_unregister_device;
+ ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
+ if (ret)
+ goto exit_unregister_device;
+
device_init_wakeup(&pdev->dev, true);
dev_notice(&pdev->dev, "driver has been successfully loaded\n");
@@ -1156,6 +1260,8 @@ static void nvt_remove(struct pnp_dev *pdev)
{
struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+ device_remove_file(&nvt->rdev->dev, &dev_attr_wakeup_data);
+
nvt_disable_cir(nvt);
/* enable CIR Wake (for IR power-on) */
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 4a5650dffa29..c9c98ebb19ee 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -417,3 +417,6 @@ struct nvt_dev {
/* as VISTA MCE definition, valid carrier value */
#define MAX_CARRIER 60000
#define MIN_CARRIER 30000
+
+/* max wakeup sequence length */
+#define WAKEUP_MAX_SIZE 65
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index dcf20d9cbe09..4e9bbe735ae9 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -13,6 +13,7 @@
*/
#include <media/rc-core.h>
+#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -723,10 +724,6 @@ int rc_open(struct rc_dev *rdev)
return -EINVAL;
mutex_lock(&rdev->lock);
- if (!rdev->initialized) {
- rval = -EINVAL;
- goto unlock;
- }
if (!rdev->users++ && rdev->open != NULL)
rval = rdev->open(rdev);
@@ -734,7 +731,6 @@ int rc_open(struct rc_dev *rdev)
if (rval)
rdev->users--;
-unlock:
mutex_unlock(&rdev->lock);
return rval;
@@ -879,11 +875,10 @@ static ssize_t show_protocols(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
mutex_lock(&dev->lock);
- if (!dev->initialized) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
if (fattr->type == RC_FILTER_NORMAL) {
enabled = dev->enabled_protocols;
@@ -1064,6 +1059,9 @@ static ssize_t store_protocols(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
if (fattr->type == RC_FILTER_NORMAL) {
IR_dprintk(1, "Normal protocol change requested\n");
current_protocols = &dev->enabled_protocols;
@@ -1084,10 +1082,6 @@ static ssize_t store_protocols(struct device *device,
}
mutex_lock(&dev->lock);
- if (!dev->initialized) {
- rc = -EINVAL;
- goto out;
- }
old_protocols = *current_protocols;
new_protocols = old_protocols;
@@ -1168,11 +1162,10 @@ static ssize_t show_filter(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
mutex_lock(&dev->lock);
- if (!dev->initialized) {
- mutex_unlock(&dev->lock);
- return -EINVAL;
- }
if (fattr->type == RC_FILTER_NORMAL)
filter = &dev->scancode_filter;
@@ -1223,6 +1216,9 @@ static ssize_t store_filter(struct device *device,
if (!dev)
return -EINVAL;
+ if (!atomic_read(&dev->initialized))
+ return -ERESTARTSYS;
+
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
@@ -1241,10 +1237,6 @@ static ssize_t store_filter(struct device *device,
return -EINVAL;
mutex_lock(&dev->lock);
- if (!dev->initialized) {
- ret = -EINVAL;
- goto unlock;
- }
new_filter = *filter;
if (fattr->mask)
@@ -1431,6 +1423,7 @@ int rc_register_device(struct rc_dev *dev)
dev->minor = minor;
dev_set_name(&dev->dev, "rc%u", dev->minor);
dev_set_drvdata(&dev->dev, dev);
+ atomic_set(&dev->initialized, 0);
dev->dev.groups = dev->sysfs_groups;
dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
@@ -1455,10 +1448,6 @@ int rc_register_device(struct rc_dev *dev)
dev->input_dev->phys = dev->input_phys;
dev->input_dev->name = dev->input_name;
- rc = input_register_device(dev->input_dev);
- if (rc)
- goto out_table;
-
/*
* Default delay of 250ms is too short for some protocols, especially
* since the timeout is currently set to 250ms. Increase it to 500ms,
@@ -1474,6 +1463,11 @@ int rc_register_device(struct rc_dev *dev)
*/
dev->input_dev->rep[REP_PERIOD] = 125;
+ /* rc_open will be called here */
+ rc = input_register_device(dev->input_dev);
+ if (rc)
+ goto out_table;
+
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->input_name ?: "Unspecified device", path ?: "N/A");
@@ -1497,8 +1491,9 @@ int rc_register_device(struct rc_dev *dev)
dev->enabled_protocols = rc_type;
}
+ /* Allow the RC sysfs nodes to be accessible */
mutex_lock(&dev->lock);
- dev->initialized = true;
+ atomic_set(&dev->initialized, 1);
mutex_unlock(&dev->lock);
IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 40f77685cc4a..eaadc081760a 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -326,6 +326,7 @@ static const struct of_device_id sunxi_ir_match[] = {
{ .compatible = "allwinner,sun5i-a13-ir", },
{},
};
+MODULE_DEVICE_TABLE(of, sunxi_ir_match);
static struct platform_driver sunxi_ir_driver = {
.probe = sunxi_ir_probe,
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 219ebafae70f..d95c7e082ccf 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1508,7 +1508,7 @@ static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
if (value >= 0x2000) {
value = 0;
} else {
- value = ~value << 3;
+ value = (~value << 3) & 0xffff;
}
goto ret;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 0d4ac5947f3a..87c12930416f 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -104,9 +104,8 @@ struct airspy_frame_buf {
};
struct airspy {
-#define POWER_ON (1 << 1)
-#define URB_BUF (1 << 2)
-#define USB_STATE_URB_BUF (1 << 3)
+#define POWER_ON 1
+#define USB_STATE_URB_BUF 2
unsigned long flags;
struct device *dev;
@@ -359,7 +358,7 @@ static int airspy_submit_urbs(struct airspy *s)
static int airspy_free_stream_bufs(struct airspy *s)
{
- if (s->flags & USB_STATE_URB_BUF) {
+ if (test_bit(USB_STATE_URB_BUF, &s->flags)) {
while (s->buf_num) {
s->buf_num--;
dev_dbg(s->dev, "free buf=%d\n", s->buf_num);
@@ -368,7 +367,7 @@ static int airspy_free_stream_bufs(struct airspy *s)
s->dma_addr[s->buf_num]);
}
}
- s->flags &= ~USB_STATE_URB_BUF;
+ clear_bit(USB_STATE_URB_BUF, &s->flags);
return 0;
}
@@ -394,7 +393,7 @@ static int airspy_alloc_stream_bufs(struct airspy *s)
dev_dbg(s->dev, "alloc buf=%d %p (dma %llu)\n", s->buf_num,
s->buf_list[s->buf_num],
(long long)s->dma_addr[s->buf_num]);
- s->flags |= USB_STATE_URB_BUF;
+ set_bit(USB_STATE_URB_BUF, &s->flags);
}
return 0;
diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c
index ca861aea68a5..6b469e8c4c6e 100644
--- a/drivers/media/usb/au0828/au0828-cards.c
+++ b/drivers/media/usb/au0828/au0828-cards.c
@@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev)
"au8522", 0x8e >> 1, NULL);
if (sd == NULL)
pr_err("analog subdev registration failed\n");
-#ifdef CONFIG_MEDIA_CONTROLLER
- if (sd)
- dev->decoder = &sd->entity;
-#endif
}
/* Setup tuners */
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 7cafe4dd5fd1..cc22b32776ad 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -20,6 +20,7 @@
*/
#include "au0828.h"
+#include "au8522.h"
#include <linux/module.h>
#include <linux/slab.h>
@@ -134,7 +135,13 @@ static void au0828_unregister_media_device(struct au0828_dev *dev)
{
#ifdef CONFIG_MEDIA_CONTROLLER
- if (dev->media_dev) {
+ if (dev->media_dev &&
+ media_devnode_is_registered(&dev->media_dev->devnode)) {
+ /* clear enable_source, disable_source */
+ dev->media_dev->source_priv = NULL;
+ dev->media_dev->enable_source = NULL;
+ dev->media_dev->disable_source = NULL;
+
media_device_unregister(dev->media_dev);
media_device_cleanup(dev->media_dev);
kfree(dev->media_dev);
@@ -165,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
Set the status so poll routines can check and avoid
access after disconnect.
*/
- dev->dev_state = DEV_DISCONNECTED;
+ set_bit(DEV_DISCONNECTED, &dev->dev_state);
au0828_rc_unregister(dev);
/* Digital TV */
@@ -191,18 +198,349 @@ static int au0828_media_device_init(struct au0828_dev *dev,
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device *mdev;
- if (!dev->board.name)
- mdev = v4l2_mc_usb_media_device_init(udev, "unknown au0828");
- else
- mdev = v4l2_mc_usb_media_device_init(udev, dev->board.name);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
+ /* check if media device is already initialized */
+ if (!mdev->dev)
+ media_device_usb_init(mdev, udev, udev->product);
+
dev->media_dev = mdev;
#endif
return 0;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+static void au0828_media_graph_notify(struct media_entity *new,
+ void *notify_data)
+{
+ struct au0828_dev *dev = (struct au0828_dev *) notify_data;
+ int ret;
+ struct media_entity *entity, *mixer = NULL, *decoder = NULL;
+
+ if (!new) {
+ /*
+ * Called during au0828 probe time to connect
+ * entites that were created prior to registering
+ * the notify handler. Find mixer and decoder.
+ */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ if (entity->function == MEDIA_ENT_F_AUDIO_MIXER)
+ mixer = entity;
+ else if (entity->function == MEDIA_ENT_F_ATV_DECODER)
+ decoder = entity;
+ }
+ goto create_link;
+ }
+
+ switch (new->function) {
+ case MEDIA_ENT_F_AUDIO_MIXER:
+ mixer = new;
+ if (dev->decoder)
+ decoder = dev->decoder;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ /* In case, Mixer is added first, find mixer and create link */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ if (entity->function == MEDIA_ENT_F_AUDIO_MIXER)
+ mixer = entity;
+ }
+ decoder = new;
+ break;
+ default:
+ break;
+ }
+
+create_link:
+ if (decoder && mixer) {
+ ret = media_create_pad_link(decoder,
+ DEMOD_PAD_AUDIO_OUT,
+ mixer, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ dev_err(&dev->usbdev->dev,
+ "Mixer Pad Link Create Error: %d\n", ret);
+ }
+}
+
+static int au0828_enable_source(struct media_entity *entity,
+ struct media_pipeline *pipe)
+{
+ struct media_entity *source, *find_source;
+ struct media_entity *sink;
+ struct media_link *link, *found_link = NULL;
+ int ret = 0;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct au0828_dev *dev;
+
+ if (!mdev)
+ return -ENODEV;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ dev = mdev->source_priv;
+
+ /*
+ * For Audio and V4L2 entity, find the link to which decoder
+ * is the sink. Look for an active link between decoder and
+ * source (tuner/s-video/Composite), if one exists, nothing
+ * to do. If not, look for any active links between source
+ * and any other entity. If one exists, source is busy. If
+ * source is free, setup link and start pipeline from source.
+ * For DVB FE entity, the source for the link is the tuner.
+ * Check if tuner is available and setup link and start
+ * pipeline.
+ */
+ if (entity->function == MEDIA_ENT_F_DTV_DEMOD) {
+ sink = entity;
+ find_source = dev->tuner;
+ } else {
+ /* Analog isn't configured or register failed */
+ if (!dev->decoder) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ sink = dev->decoder;
+
+ /*
+ * Default input is tuner and default input_type
+ * is AU0828_VMUX_TELEVISION.
+ * FIXME:
+ * There is a problem when s_input is called to
+ * change the default input. s_input will try to
+ * enable_source before attempting to change the
+ * input on the device, and will end up enabling
+ * default source which is tuner.
+ *
+ * Additional logic is necessary in au0828
+ * to detect that the input has changed and
+ * enable the right source.
+ */
+
+ if (dev->input_type == AU0828_VMUX_TELEVISION)
+ find_source = dev->tuner;
+ else if (dev->input_type == AU0828_VMUX_SVIDEO ||
+ dev->input_type == AU0828_VMUX_COMPOSITE)
+ find_source = &dev->input_ent[dev->input_type];
+ else {
+ /* unknown input - let user select input */
+ ret = 0;
+ goto end;
+ }
+ }
+
+ /* Is an active link between sink and source */
+ if (dev->active_link) {
+ /*
+ * If DVB is using the tuner and calling entity is
+ * audio/video, the following check will be false,
+ * since sink is different. Result is Busy.
+ */
+ if (dev->active_link->sink->entity == sink &&
+ dev->active_link->source->entity == find_source) {
+ /*
+ * Either ALSA or Video own tuner. sink is
+ * the same for both. Prevent Video stepping
+ * on ALSA when ALSA owns the source.
+ */
+ if (dev->active_link_owner != entity &&
+ dev->active_link_owner->function ==
+ MEDIA_ENT_F_AUDIO_CAPTURE) {
+ pr_debug("ALSA has the tuner\n");
+ ret = -EBUSY;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+ } else {
+ ret = -EBUSY;
+ goto end;
+ }
+ }
+
+ list_for_each_entry(link, &sink->links, list) {
+ /* Check sink, and source */
+ if (link->sink->entity == sink &&
+ link->source->entity == find_source) {
+ found_link = link;
+ break;
+ }
+ }
+
+ if (!found_link) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ /* activate link between source and sink and start pipeline */
+ source = found_link->source->entity;
+ ret = __media_entity_setup_link(found_link, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ pr_err("Activate tuner link %s->%s. Error %d\n",
+ source->name, sink->name, ret);
+ goto end;
+ }
+
+ ret = __media_entity_pipeline_start(entity, pipe);
+ if (ret) {
+ pr_err("Start Pipeline: %s->%s Error %d\n",
+ source->name, entity->name, ret);
+ ret = __media_entity_setup_link(found_link, 0);
+ pr_err("Deactivate link Error %d\n", ret);
+ goto end;
+ }
+ /*
+ * save active link and active link owner to avoid audio
+ * deactivating video owned link from disable_source and
+ * vice versa
+ */
+ dev->active_link = found_link;
+ dev->active_link_owner = entity;
+ dev->active_source = source;
+ dev->active_sink = sink;
+
+ pr_debug("Enabled Source: %s->%s->%s Ret %d\n",
+ dev->active_source->name, dev->active_sink->name,
+ dev->active_link_owner->name, ret);
+end:
+ mutex_unlock(&mdev->graph_mutex);
+ pr_debug("au0828_enable_source() end %s %d %d\n",
+ entity->name, entity->function, ret);
+ return ret;
+}
+
+static void au0828_disable_source(struct media_entity *entity)
+{
+ int ret = 0;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct au0828_dev *dev;
+
+ if (!mdev)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ dev = mdev->source_priv;
+
+ if (!dev->active_link) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ /* link is active - stop pipeline from source (tuner) */
+ if (dev->active_link->sink->entity == dev->active_sink &&
+ dev->active_link->source->entity == dev->active_source) {
+ /*
+ * prevent video from deactivating link when audio
+ * has active pipeline
+ */
+ if (dev->active_link_owner != entity)
+ goto end;
+ __media_entity_pipeline_stop(entity);
+ ret = __media_entity_setup_link(dev->active_link, 0);
+ if (ret)
+ pr_err("Deactivate link Error %d\n", ret);
+
+ pr_debug("Disabled Source: %s->%s->%s Ret %d\n",
+ dev->active_source->name, dev->active_sink->name,
+ dev->active_link_owner->name, ret);
+
+ dev->active_link = NULL;
+ dev->active_link_owner = NULL;
+ dev->active_source = NULL;
+ dev->active_sink = NULL;
+ }
+
+end:
+ mutex_unlock(&mdev->graph_mutex);
+}
+#endif
+
+static int au0828_media_device_register(struct au0828_dev *dev,
+ struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+ int ret;
+ struct media_entity *entity, *demod = NULL;
+ struct media_link *link;
+
+ if (!dev->media_dev)
+ return 0;
+
+ if (!media_devnode_is_registered(&dev->media_dev->devnode)) {
+
+ /* register media device */
+ ret = media_device_register(dev->media_dev);
+ if (ret) {
+ dev_err(&udev->dev,
+ "Media Device Register Error: %d\n", ret);
+ return ret;
+ }
+ } else {
+ /*
+ * Call au0828_media_graph_notify() to connect
+ * audio graph to our graph. In this case, audio
+ * driver registered the device and there is no
+ * entity_notify to be called when new entities
+ * are added. Invoke it now.
+ */
+ au0828_media_graph_notify(NULL, (void *) dev);
+ }
+
+ /*
+ * Find tuner, decoder and demod.
+ *
+ * The tuner and decoder should be cached, as they'll be used by
+ * au0828_enable_source.
+ *
+ * It also needs to disable the link between tuner and
+ * decoder/demod, to avoid disable step when tuner is requested
+ * by video or audio. Note that this step can't be done until dvb
+ * graph is created during dvb register.
+ */
+ media_device_for_each_entity(entity, dev->media_dev) {
+ switch (entity->function) {
+ case MEDIA_ENT_F_TUNER:
+ dev->tuner = entity;
+ break;
+ case MEDIA_ENT_F_ATV_DECODER:
+ dev->decoder = entity;
+ break;
+ case MEDIA_ENT_F_DTV_DEMOD:
+ demod = entity;
+ break;
+ }
+ }
+
+ /* Disable link between tuner->demod and/or tuner->decoder */
+ if (dev->tuner) {
+ list_for_each_entry(link, &dev->tuner->links, list) {
+ if (demod && link->sink->entity == demod)
+ media_entity_setup_link(link, 0);
+ if (dev->decoder && link->sink->entity == dev->decoder)
+ media_entity_setup_link(link, 0);
+ }
+ }
+
+ /* register entity_notify callback */
+ dev->entity_notify.notify_data = (void *) dev;
+ dev->entity_notify.notify = (void *) au0828_media_graph_notify;
+ ret = media_device_register_entity_notify(dev->media_dev,
+ &dev->entity_notify);
+ if (ret) {
+ dev_err(&udev->dev,
+ "Media Device register entity_notify Error: %d\n",
+ ret);
+ return ret;
+ }
+ /* set enable_source */
+ dev->media_dev->source_priv = (void *) dev;
+ dev->media_dev->enable_source = au0828_enable_source;
+ dev->media_dev->disable_source = au0828_disable_source;
+#endif
+ return 0;
+}
static int au0828_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
@@ -306,9 +644,7 @@ static int au0828_usb_probe(struct usb_interface *interface,
mutex_unlock(&dev->lock);
-#ifdef CONFIG_MEDIA_CONTROLLER
- retval = media_device_register(dev->media_dev);
-#endif
+ retval = au0828_media_device_register(dev, usbdev);
done:
if (retval < 0)
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index b0f067971979..3d6687f0407d 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
bool first = true;
/* do nothing if device is disconnected */
- if (ir->dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
return 0;
/* Check IR int */
@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
/* do nothing if device is disconnected */
- if (ir->dev->dev_state != DEV_DISCONNECTED) {
+ if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
/* Disable IR */
au8522_rc_clear(ir, 0xe0, 1 << 4);
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 2fc2b29d2dd9..32d7db96479c 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -28,12 +28,14 @@
*/
#include "au0828.h"
+#include "au8522.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-mc.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/tuner.h>
@@ -104,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev,
static int check_dev(struct au0828_dev *dev)
{
- if (dev->dev_state & DEV_DISCONNECTED) {
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
pr_info("v4l2 ioctl: device not present\n");
return -ENODEV;
}
- if (dev->dev_state & DEV_MISCONFIGURED) {
- pr_info("v4l2 ioctl: device is misconfigured; "
- "close and open it again\n");
+ if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
+ pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
return -EIO;
}
return 0;
@@ -519,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
if (!dev)
return 0;
- if ((dev->dev_state & DEV_DISCONNECTED) ||
- (dev->dev_state & DEV_MISCONFIGURED))
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
+ test_bit(DEV_MISCONFIGURED, &dev->dev_state))
return 0;
if (urb->status < 0) {
@@ -651,81 +652,6 @@ void au0828_usb_v4l2_media_release(struct au0828_dev *dev)
#endif
}
-static int au0828_create_media_graph(struct au0828_dev *dev)
-{
-#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_device *mdev = dev->media_dev;
- struct media_entity *entity;
- struct media_entity *tuner = NULL, *decoder = NULL;
- int i, ret;
-
- if (!mdev)
- return 0;
-
- media_device_for_each_entity(entity, mdev) {
- switch (entity->function) {
- case MEDIA_ENT_F_TUNER:
- tuner = entity;
- break;
- case MEDIA_ENT_F_ATV_DECODER:
- decoder = entity;
- break;
- }
- }
-
- /* Analog setup, using tuner as a link */
-
- /* Something bad happened! */
- if (!decoder)
- return -EINVAL;
-
- if (tuner) {
- ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
- decoder, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
- }
- ret = media_create_pad_link(decoder, 1, &dev->vdev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
- ret = media_create_pad_link(decoder, 2, &dev->vbi_dev.entity, 0,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
-
- for (i = 0; i < AU0828_MAX_INPUT; i++) {
- struct media_entity *ent = &dev->input_ent[i];
-
- switch (AUVI_INPUT(i).type) {
- case AU0828_VMUX_UNDEFINED:
- break;
- case AU0828_VMUX_CABLE:
- case AU0828_VMUX_TELEVISION:
- case AU0828_VMUX_DVB:
- if (!tuner)
- break;
-
- ret = media_create_pad_link(ent, 0, tuner,
- TUNER_PAD_RF_INPUT,
- MEDIA_LNK_FL_ENABLED);
- if (ret)
- return ret;
- break;
- case AU0828_VMUX_COMPOSITE:
- case AU0828_VMUX_SVIDEO:
- /* FIXME: fix the decoder PAD */
- ret = media_create_pad_link(ent, 0, decoder, 0, 0);
- if (ret)
- return ret;
- break;
- }
- }
-#endif
- return 0;
-}
-
static void au0828_usb_v4l2_release(struct v4l2_device *v4l2_dev)
{
struct au0828_dev *dev =
@@ -774,64 +700,6 @@ int au0828_v4l2_device_register(struct usb_interface *interface,
return 0;
}
-static int au0828_enable_analog_tuner(struct au0828_dev *dev)
-{
-#ifdef CONFIG_MEDIA_CONTROLLER
- struct media_device *mdev = dev->media_dev;
- struct media_entity *source;
- struct media_link *link, *found_link = NULL;
- int ret, active_links = 0;
-
- if (!mdev || !dev->decoder)
- return 0;
-
- /*
- * This will find the tuner that is connected into the decoder.
- * Technically, this is not 100% correct, as the device may be
- * using an analog input instead of the tuner. However, as we can't
- * do DVB streaming while the DMA engine is being used for V4L2,
- * this should be enough for the actual needs.
- */
- list_for_each_entry(link, &dev->decoder->links, list) {
- if (link->sink->entity == dev->decoder) {
- found_link = link;
- if (link->flags & MEDIA_LNK_FL_ENABLED)
- active_links++;
- break;
- }
- }
-
- if (active_links == 1 || !found_link)
- return 0;
-
- source = found_link->source->entity;
- list_for_each_entry(link, &source->links, list) {
- struct media_entity *sink;
- int flags = 0;
-
- sink = link->sink->entity;
-
- if (sink == dev->decoder)
- flags = MEDIA_LNK_FL_ENABLED;
-
- ret = media_entity_setup_link(link, flags);
- if (ret) {
- pr_err(
- "Couldn't change link %s->%s to %s. Error %d\n",
- source->name, sink->name,
- flags ? "enabled" : "disabled",
- ret);
- return ret;
- } else
- au0828_isocdbg(
- "link %s->%s was %s\n",
- source->name, sink->name,
- flags ? "ENABLED" : "disabled");
- }
-#endif
- return 0;
-}
-
static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
@@ -843,9 +711,6 @@ static int queue_setup(struct vb2_queue *vq,
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
-
- au0828_enable_analog_tuner(dev);
-
return 0;
}
@@ -958,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
int ret = 0;
dev->stream_state = STREAM_INTERRUPT;
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
return -ENODEV;
else if (ret) {
- dev->dev_state = DEV_MISCONFIGURED;
+ set_bit(DEV_MISCONFIGURED, &dev->dev_state);
dprintk(1, "%s device is misconfigured!\n", __func__);
return ret;
}
@@ -1160,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp)
int ret;
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1179,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp)
au0828_analog_stream_enable(dev);
au0828_analog_stream_reset(dev);
dev->stream_state = STREAM_OFF;
- dev->dev_state |= DEV_INITIALIZED;
+ set_bit(DEV_INITIALIZED, &dev->dev_state);
}
dev->users++;
mutex_unlock(&dev->lock);
@@ -1193,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp)
struct video_device *vdev = video_devdata(filp);
dprintk(1,
- "%s called std_set %d dev_state %d stream users %d users %d\n",
+ "%s called std_set %d dev_state %ld stream users %d users %d\n",
__func__, dev->std_set_in_tuner_core, dev->dev_state,
dev->streaming_users, dev->users);
@@ -1209,12 +1074,43 @@ static int au0828_v4l2_close(struct file *filp)
del_timer_sync(&dev->vbi_timeout);
}
- if (dev->dev_state == DEV_DISCONNECTED)
+ if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
goto end;
if (dev->users == 1) {
- /* Save some power by putting tuner to sleep */
- v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_power, 0);
+ /*
+ * Avoid putting tuner in sleep if DVB or ALSA are
+ * streaming.
+ *
+ * On most USB devices like au0828 the tuner can
+ * be safely put in sleep stare here if ALSA isn't
+ * streaming. Exceptions are some very old USB tuner
+ * models such as em28xx-based WinTV USB2 which have
+ * a separate audio output jack. The devices that have
+ * a separate audio output jack have analog tuners,
+ * like Philips FM1236. Those devices are always on,
+ * so the s_power callback are silently ignored.
+ * So, the current logic here does the following:
+ * Disable (put tuner to sleep) when
+ * - ALSA and DVB aren't not streaming;
+ * - the last V4L2 file handler is closed.
+ *
+ * FIXME:
+ *
+ * Additionally, this logic could be improved to
+ * disable the media source if the above conditions
+ * are met and if the device:
+ * - doesn't have a separate audio out plug (or
+ * - doesn't use a silicon tuner like xc2028/3028/4000/5000).
+ *
+ * Once this additional logic is in place, a callback
+ * is needed to enable the media source and power on
+ * the tuner, for radio to work.
+ */
+ ret = v4l_enable_media_source(vdev);
+ if (ret == 0)
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core,
+ s_power, 0);
dev->std_set_in_tuner_core = 0;
/* When close the device, set the usb intf0 into alt0 to free
@@ -1238,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
.type = V4L2_TUNER_ANALOG_TV,
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (dev->std_set_in_tuner_core)
@@ -1310,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strlcpy(cap->driver, "au0828", sizeof(cap->driver));
@@ -1353,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
f->fmt.pix.width = dev->width;
@@ -1372,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
@@ -1384,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct au0828_dev *dev = video_drvdata(file);
int rc;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
rc = check_dev(dev);
@@ -1406,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
if (norm == dev->std)
@@ -1438,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*norm = dev->std;
@@ -1460,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
[AU0828_VMUX_DVB] = "DVB",
};
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
tmp = input->index;
@@ -1490,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
*i = dev->ctrl_input;
@@ -1501,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
{
int i;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
switch (AUVI_INPUT(index).type) {
@@ -1520,9 +1416,11 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
default:
dprintk(1, "unknown input type set [%d]\n",
AUVI_INPUT(index).type);
- break;
+ return;
}
+ dev->ctrl_input = index;
+
v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_routing,
AUVI_INPUT(index).vmux, 0, 0);
@@ -1554,6 +1452,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
{
struct au0828_dev *dev = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
dprintk(1, "VIDIOC_S_INPUT in function %s, input=%d\n", __func__,
index);
@@ -1561,9 +1460,19 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
return -EINVAL;
if (AUVI_INPUT(index).type == 0)
return -EINVAL;
- dev->ctrl_input = index;
+
+ if (dev->ctrl_input == index)
+ return 0;
+
au0828_s_input(dev, index);
- return 0;
+
+ /*
+ * Input has been changed. Disable the media source
+ * associated with the old input and enable source
+ * for the newly set input
+ */
+ v4l_disable_media_source(vfd);
+ return v4l_enable_media_source(vfd);
}
static int vidioc_enumaudio(struct file *file, void *priv, struct v4l2_audio *a)
@@ -1586,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
a->index = dev->ctrl_ainput;
@@ -1606,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
if (a->index != dev->ctrl_ainput)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return 0;
}
@@ -1614,11 +1523,17 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
struct au0828_dev *dev = video_drvdata(file);
+ struct video_device *vfd = video_devdata(file);
+ int ret;
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
+
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
strcpy(t->name, "Auvitek tuner");
@@ -1638,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
if (t->index != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1660,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
freq->frequency = dev->ctrl_freq;
return 0;
@@ -1675,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
if (freq->tuner != 0)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
au0828_init_tuner(dev);
@@ -1701,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
format->fmt.vbi.samples_per_line = dev->vbi_width;
@@ -1727,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
cc->bounds.left = 0;
@@ -1749,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
reg->val = au0828_read(dev, reg->reg);
@@ -1762,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct au0828_dev *dev = video_drvdata(file);
- dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
+ dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
return au0828_writereg(dev, reg->reg, reg->val);
@@ -2071,6 +1986,7 @@ int au0828_analog_register(struct au0828_dev *dev,
dev->ctrl_ainput = 0;
dev->ctrl_freq = 960;
dev->std = V4L2_STD_NTSC_M;
+ /* Default input is TV Tuner */
au0828_s_input(dev, 0);
mutex_init(&dev->vb_queue_lock);
@@ -2122,14 +2038,16 @@ int au0828_analog_register(struct au0828_dev *dev,
ret = -ENODEV;
goto err_reg_vbi_dev;
}
- retval = au0828_create_media_graph(dev);
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+ retval = v4l2_mc_create_media_graph(dev->media_dev);
if (retval) {
pr_err("%s() au0282_dev_register failed to create graph\n",
__func__);
ret = -ENODEV;
goto err_reg_vbi_dev;
}
-
+#endif
dprintk(1, "%s completed!\n", __func__);
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 23f869cf11da..87f32846f1c0 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
@@ -121,9 +122,9 @@ enum au0828_stream_state {
/* device state */
enum au0828_dev_state {
- DEV_INITIALIZED = 0x01,
- DEV_DISCONNECTED = 0x02,
- DEV_MISCONFIGURED = 0x04
+ DEV_INITIALIZED = 0,
+ DEV_DISCONNECTED = 1,
+ DEV_MISCONFIGURED = 2
};
struct au0828_dev;
@@ -247,7 +248,7 @@ struct au0828_dev {
int input_type;
int std_set_in_tuner_core;
unsigned int ctrl_input;
- enum au0828_dev_state dev_state;
+ long unsigned int dev_state; /* defined at enum au0828_dev_state */;
enum au0828_stream_state stream_state;
wait_queue_head_t open;
@@ -282,6 +283,12 @@ struct au0828_dev {
struct media_entity *decoder;
struct media_entity input_ent[AU0828_MAX_INPUT];
struct media_pad input_pad[AU0828_MAX_INPUT];
+ struct media_entity_notify entity_notify;
+ struct media_entity *tuner;
+ struct media_link *active_link;
+ struct media_entity *active_link_owner;
+ struct media_entity *active_source;
+ struct media_entity *active_sink;
#endif
};
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 9e3a5d2038c2..c63248a18823 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1212,10 +1212,12 @@ static int cx231xx_media_device_init(struct cx231xx *dev,
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device *mdev;
- mdev = v4l2_mc_usb_media_device_init(udev, dev->board.name);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
+ media_device_usb_init(mdev, udev, dev->board.name);
+
dev->media_dev = mdev;
#endif
return 0;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index b3c09fe54d9b..2638e3251f2a 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -2053,6 +2053,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "Avermedia A835B(3835)", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835,
&af9035_props, "Avermedia A835B(4835)", RC_MAP_IT913X_V2) },
+ { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD110,
+ &af9035_props, "Avermedia AverTV Volar HD 2 (TD110)", RC_MAP_AVERMEDIA_RM_KS) },
{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335,
&af9035_props, "Avermedia H335", RC_MAP_IT913X_V2) },
{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09,
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 4a8769781cea..3fbb2cd19f5e 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -20,7 +20,7 @@
*/
#include "dvb_usb_common.h"
-#include <media/v4l2-mc.h>
+#include <media/media-device.h>
static int dvb_usbv2_disable_rc_polling;
module_param_named(disable_rc_polling, dvb_usbv2_disable_rc_polling, int, 0644);
@@ -408,10 +408,12 @@ static int dvb_usbv2_media_device_init(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d = adap_to_d(adap);
struct usb_device *udev = d->udev;
- mdev = v4l2_mc_usb_media_device_init(udev, d->name);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
+ media_device_usb_init(mdev, udev, d->name);
+
dvb_register_media_controller(&adap->dvb_adap, mdev);
dev_info(&d->udev->dev, "media controller created\n");
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index c4c6e92e8643..fa72642d41f3 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1571,19 +1571,19 @@ static int rtl28xxu_frontend_ctrl(struct dvb_frontend *fe, int onoff)
if (dev->chip_id == CHIP_ID_RTL2831U)
return 0;
- /* control internal demod ADC */
- if (fe->id == 0 && onoff)
- val = 0x48; /* enable ADC */
- else
- val = 0x00; /* disable ADC */
-
- ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
- if (ret)
- goto err;
+ if (fe->id == 0) {
+ /* control internal demod ADC */
+ if (onoff)
+ val = 0x48; /* enable ADC */
+ else
+ val = 0x00; /* disable ADC */
- /* bypass slave demod TS through master demod */
- if (fe->id == 1 && onoff) {
- ret = pdata->enable_slave_ts(dev->i2c_client_demod);
+ ret = rtl28xxu_wr_reg_mask(d, SYS_DEMOD_CTL, val, 0x48);
+ if (ret)
+ goto err;
+ } else if (fe->id == 1) {
+ /* bypass slave demod TS through master demod */
+ ret = pdata->slave_ts_ctrl(dev->i2c_client_demod, onoff);
if (ret)
goto err;
}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index 513b0c14e4f0..6477b04e95c7 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -7,7 +7,7 @@
* linux-dvb API.
*/
#include "dvb-usb-common.h"
-#include <media/v4l2-mc.h>
+#include <media/media-device.h>
/* does the complete input transfer handling */
static int dvb_usb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
@@ -103,7 +103,11 @@ static int dvb_usb_media_device_init(struct dvb_usb_adapter *adap)
struct dvb_usb_device *d = adap->dev;
struct usb_device *udev = d->udev;
- mdev = v4l2_mc_usb_media_device_init(udev, d->desc->name);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ media_device_usb_init(mdev, udev, d->desc->name);
dvb_register_media_controller(&adap->dvb_adap, mdev);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index dd46d6c78c4e..6d0dd859d684 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1,9 +1,10 @@
/* DVB USB framework compliant Linux driver for the
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
- * TeVii S600, S630, S650, S660, S480, S421, S632
+ * TeVii S421, S480, S482, S600, S630, S632, S650, S660, S662,
* Prof 1100, 7500,
* Geniatech SU3000, T220,
- * TechnoTrend S2-4600 Cards
+ * TechnoTrend S2-4600,
+ * Terratec Cinergy S2 cards
* Copyright (C) 2008-2012 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -33,7 +34,6 @@
#include "tda18271.h"
#include "cxd2820r.h"
#include "m88ds3103.h"
-#include "ts2020.h"
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
@@ -66,6 +66,10 @@
#define USB_PID_TEVII_S660 0xd660
#endif
+#ifndef USB_PID_TEVII_S662
+#define USB_PID_TEVII_S662 0xd662
+#endif
+
#ifndef USB_PID_TEVII_S480_1
#define USB_PID_TEVII_S480_1 0xd481
#endif
@@ -118,6 +122,7 @@
struct dw2102_state {
u8 initialized;
u8 last_lock;
+ struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
/* fe hook functions*/
@@ -1141,22 +1146,6 @@ static struct tda18271_config tda18271_config = {
.gate = TDA18271_GATE_DIGITAL,
};
-static const struct m88ds3103_config tt_s2_4600_m88ds3103_config = {
- .i2c_addr = 0x68,
- .clock = 27000000,
- .i2c_wr_max = 33,
- .ts_mode = M88DS3103_TS_CI,
- .ts_clk = 16000,
- .ts_clk_pol = 0,
- .spec_inv = 0,
- .agc_inv = 0,
- .clock_out = M88DS3103_CLOCK_OUT_ENABLED,
- .envelope_mode = 0,
- .agc = 0x99,
- .lnb_hv_pol = 1,
- .lnb_en_pol = 0,
-};
-
static u8 m88rs2000_inittab[] = {
DEMOD_WRITE, 0x9a, 0x30,
DEMOD_WRITE, 0x00, 0x01,
@@ -1509,7 +1498,8 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
u8 ibuf[] = { 0 };
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
- struct i2c_board_info info;
+ struct i2c_board_info board_info;
+ struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
@@ -1542,22 +1532,44 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
err("command 0x51 transfer failed.");
- memset(&info, 0, sizeof(struct i2c_board_info));
-
- adap->fe_adap[0].fe = dvb_attach(m88ds3103_attach,
- &tt_s2_4600_m88ds3103_config,
- &d->i2c_adap,
- &i2c_adapter);
- if (adap->fe_adap[0].fe == NULL)
+ /* attach demod */
+ m88ds3103_pdata.clk = 27000000;
+ m88ds3103_pdata.i2c_wr_max = 33;
+ m88ds3103_pdata.ts_mode = M88DS3103_TS_CI;
+ m88ds3103_pdata.ts_clk = 16000;
+ m88ds3103_pdata.ts_clk_pol = 0;
+ m88ds3103_pdata.spec_inv = 0;
+ m88ds3103_pdata.agc = 0x99;
+ m88ds3103_pdata.agc_inv = 0;
+ m88ds3103_pdata.clk_out = M88DS3103_CLOCK_OUT_ENABLED;
+ m88ds3103_pdata.envelope_mode = 0;
+ m88ds3103_pdata.lnb_hv_pol = 1;
+ m88ds3103_pdata.lnb_en_pol = 0;
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "m88ds3103", I2C_NAME_SIZE);
+ board_info.addr = 0x68;
+ board_info.platform_data = &m88ds3103_pdata;
+ request_module("m88ds3103");
+ client = i2c_new_device(&d->i2c_adap, &board_info);
+ if (client == NULL || client->dev.driver == NULL)
return -ENODEV;
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ return -ENODEV;
+ }
+ adap->fe_adap[0].fe = m88ds3103_pdata.get_dvb_frontend(client);
+ i2c_adapter = m88ds3103_pdata.get_i2c_adapter(client);
+
+ state->i2c_client_demod = client;
/* attach tuner */
ts2020_config.fe = adap->fe_adap[0].fe;
- strlcpy(info.type, "ts2022", I2C_NAME_SIZE);
- info.addr = 0x60;
- info.platform_data = &ts2020_config;
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "ts2022", I2C_NAME_SIZE);
+ board_info.addr = 0x60;
+ board_info.platform_data = &ts2020_config;
request_module("ts2020");
- client = i2c_new_device(i2c_adapter, &info);
+ client = i2c_new_device(i2c_adapter, &board_info);
if (client == NULL || client->dev.driver == NULL) {
dvb_frontend_detach(adap->fe_adap[0].fe);
@@ -1689,6 +1701,7 @@ enum dw2102_table_entry {
TEVII_S482_1,
TEVII_S482_2,
TERRATEC_CINERGY_S2_BOX,
+ TEVII_S662
};
static struct usb_device_id dw2102_table[] = {
@@ -1717,6 +1730,7 @@ static struct usb_device_id dw2102_table[] = {
[TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)},
[TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)},
[TERRATEC_CINERGY_S2_BOX] = {USB_DEVICE(USB_VID_TERRATEC, 0x0105)},
+ [TEVII_S662] = {USB_DEVICE(0x9022, USB_PID_TEVII_S662)},
{ }
};
@@ -2234,7 +2248,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
} },
}
},
- .num_device_descs = 4,
+ .num_device_descs = 5,
.devices = {
{ "TechnoTrend TT-connect S2-4600",
{ &dw2102_table[TECHNOTREND_S2_4600], NULL },
@@ -2252,6 +2266,10 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
{ &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL },
{ NULL },
},
+ { "TeVii S662",
+ { &dw2102_table[TEVII_S662], NULL },
+ { NULL },
+ },
}
};
@@ -2350,6 +2368,13 @@ static void dw2102_disconnect(struct usb_interface *intf)
i2c_unregister_device(client);
}
+ /* remove I2C client for demodulator */
+ client = st->i2c_client_demod;
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+
dvb_usb_device_exit(intf);
}
@@ -2365,10 +2390,10 @@ module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
- " TeVii S600, S630, S650, S660, S480, S421, S632"
- " Prof 1100, 7500 USB2.0,"
+ " TeVii S421, S480, S482, S600, S630, S632, S650,"
+ " TeVii S660, S662, Prof 1100, 7500 USB2.0,"
" Geniatech SU3000, T220,"
- " TechnoTrend S2-4600 devices");
+ " TechnoTrend S2-4600, Terratec Cinergy S2 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 51487d2f7764..d9f3262bf071 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -60,6 +60,8 @@ struct technisat_usb2_state {
u8 power_state;
u16 last_scan_code;
+
+ u8 buf[64];
};
/* debug print helpers */
@@ -220,19 +222,19 @@ enum technisat_usb2_led_state {
TECH_LED_UNDEFINED
};
-static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum technisat_usb2_led_state state)
+static int technisat_usb2_set_led(struct dvb_usb_device *d, int red,
+ enum technisat_usb2_led_state st)
{
+ struct technisat_usb2_state *state = d->priv;
+ u8 *led = state->buf;
int ret;
- u8 led[8] = {
- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
- 0
- };
+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
- if (disable_led_control && state != TECH_LED_OFF)
+ if (disable_led_control && st != TECH_LED_OFF)
return 0;
- switch (state) {
+ switch (st) {
case TECH_LED_ON:
led[1] = 0x82;
break;
@@ -263,7 +265,7 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_OUT,
0, 0,
- led, sizeof(led), 500);
+ led, 8, 500);
mutex_unlock(&d->i2c_mutex);
return ret;
@@ -271,8 +273,11 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
{
+ struct technisat_usb2_state *state = d->priv;
+ u8 *b = state->buf;
int ret;
- u8 b = 0;
+
+ b[0] = 0;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
@@ -281,7 +286,7 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_OUT,
(red << 8) | green, 0,
- &b, 1, 500);
+ b, 1, 500);
mutex_unlock(&d->i2c_mutex);
@@ -328,7 +333,11 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
struct dvb_usb_device_description **desc, int *cold)
{
int ret;
- u8 version[3];
+ u8 *version;
+
+ version = kmalloc(3, GFP_KERNEL);
+ if (!version)
+ return -ENOMEM;
/* first select the interface */
if (usb_set_interface(udev, 0, 1) != 0)
@@ -342,7 +351,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
GET_VERSION_INFO_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_IN,
0, 0,
- version, sizeof(version), 500);
+ version, 3, 500);
if (ret < 0)
*cold = 1;
@@ -351,6 +360,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
*cold = 0;
}
+ kfree(version);
+
return 0;
}
@@ -594,7 +605,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
static int technisat_usb2_get_ir(struct dvb_usb_device *d)
{
- u8 buf[62], *b;
+ struct technisat_usb2_state *state = d->priv;
+ u8 *buf = state->buf;
+ u8 *b;
int ret;
struct ir_raw_event ev;
@@ -620,7 +633,7 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
GET_IR_DATA_VENDOR_REQUEST,
USB_TYPE_VENDOR | USB_DIR_IN,
0x8080, 0,
- buf, sizeof(buf), 500);
+ buf, 62, 500);
unlock:
mutex_unlock(&d->i2c_mutex);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 389e95fb0211..930e3e3fc948 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -37,6 +37,7 @@
#include <media/i2c-addr.h>
#include <media/tveeprom.h>
#include <media/v4l2-common.h>
+#include <sound/ac97_codec.h>
#include "em28xx.h"
@@ -560,6 +561,16 @@ static struct em28xx_led pctv_80e_leds[] = {
{-1, 0, 0, 0},
};
+static struct em28xx_led terratec_grabby_leds[] = {
+ {
+ .role = EM28XX_LED_ANALOG_CAPTURING,
+ .gpio_reg = EM2820_R08_GPIO_CTRL,
+ .gpio_mask = EM_GPIO_3,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -2015,6 +2026,8 @@ struct em28xx_board em28xx_boards[] = {
.vmux = SAA7115_SVIDEO3,
.amux = EM28XX_AMUX_LINE_IN,
} },
+ .buttons = std_snapshot_button,
+ .leds = terratec_grabby_leds,
},
[EM2860_BOARD_TERRATEC_AV350] = {
.name = "Terratec AV350",
@@ -2551,6 +2564,36 @@ static inline void em28xx_set_model(struct em28xx *dev)
dev->def_i2c_bus = dev->board.def_i2c_bus;
}
+/* Wait until AC97_RESET reports the expected value reliably before proceeding.
+ * We also check that two unrelated registers accesses don't return the same
+ * value to avoid premature return.
+ * This procedure helps ensuring AC97 register accesses are reliable.
+ */
+static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
+ int expected_feat)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+ int feat, powerdown;
+
+ while (time_is_after_jiffies(timeout)) {
+ feat = em28xx_read_ac97(dev, AC97_RESET);
+ if (feat < 0)
+ return feat;
+
+ powerdown = em28xx_read_ac97(dev, AC97_POWERDOWN);
+ if (powerdown < 0)
+ return powerdown;
+
+ if (feat == expected_feat && feat != powerdown)
+ return 0;
+
+ msleep(50);
+ }
+
+ em28xx_warn("AC97 registers access is not reliable !\n");
+ return -ETIMEDOUT;
+}
+
/* Since em28xx_pre_card_setup() requires a proper dev->model,
* this won't work for boards with generic PCI IDs
*/
@@ -2656,6 +2699,13 @@ static void em28xx_pre_card_setup(struct em28xx *dev)
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
msleep(70);
break;
+
+ case EM2860_BOARD_TERRATEC_GRABBY:
+ /* HACK?: Ensure AC97 register reading is reliable before
+ * proceeding. In practice, this will wait about 1.6 seconds.
+ */
+ em28xx_wait_until_ac97_features_equals(dev, 0x6a90);
+ break;
}
em28xx_gpio_set(dev, dev->board.tuner_gpio);
@@ -3019,17 +3069,17 @@ static int em28xx_media_device_init(struct em28xx *dev,
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_device *mdev;
- if (udev->product) {
- mdev = v4l2_mc_usb_media_device_init(udev, udev->product);
- } else if (udev->manufacturer) {
- mdev = v4l2_mc_usb_media_device_init(udev, udev->manufacturer);
- } else {
- mdev = v4l2_mc_usb_media_device_init(udev, dev->name);
- }
-
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
+ if (udev->product)
+ media_device_usb_init(mdev, udev, udev->product);
+ else if (udev->manufacturer)
+ media_device_usb_init(mdev, udev, udev->manufacturer);
+ else
+ media_device_usb_init(mdev, udev, dev->name);
+
dev->media_dev = mdev;
#endif
return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index f772e2612608..44834b2eff55 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -2714,12 +2714,14 @@ static int em28xx_v4l2_init(struct em28xx *dev)
/* Init entities at the Media Controller */
em28xx_v4l2_create_entities(dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
ret = v4l2_mc_create_media_graph(dev->media_dev);
if (ret) {
em28xx_errdev("failed to create media graph\n");
em28xx_v4l2_media_release(dev);
goto unregister_dev;
}
+#endif
em28xx_info("V4L2 video device registered as %s\n",
video_device_node_name(&v4l2->vdev));
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index c95f32a0c02b..965372a5ff2f 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -360,40 +360,6 @@ static const struct v4l2_pix_format ov511_sif_mode[] = {
.priv = 0},
};
-static const struct v4l2_pix_format ovfx2_vga_mode[] = {
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 1},
- {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 640,
- .sizeimage = 640 * 480,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 0},
-};
-static const struct v4l2_pix_format ovfx2_cif_mode[] = {
- {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 160,
- .sizeimage = 160 * 120,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 3},
- {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 176,
- .sizeimage = 176 * 144,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 1},
- {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 320,
- .sizeimage = 320 * 240,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 2},
- {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
- .bytesperline = 352,
- .sizeimage = 352 * 288,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .priv = 0},
-};
static const struct v4l2_pix_format ovfx2_ov2610_mode[] = {
{800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 800,
@@ -2042,6 +2008,9 @@ static void reg_w(struct sd *sd, u16 index, u16 value)
if (sd->gspca_dev.usb_err < 0)
return;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
@@ -2103,6 +2072,8 @@ static int reg_r(struct sd *sd, u16 index)
req = 1;
}
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
req,
@@ -2131,6 +2102,8 @@ static int reg_r8(struct sd *sd,
if (sd->gspca_dev.usb_err < 0)
return -1;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
1, /* REQ_IO */
@@ -2187,6 +2160,8 @@ static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n)
*((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
1 /* REG_IO */,
diff --git a/drivers/media/usb/gspca/touptek.c b/drivers/media/usb/gspca/touptek.c
index 7bac6bc96063..b8af4370d27c 100644
--- a/drivers/media/usb/gspca/touptek.c
+++ b/drivers/media/usb/gspca/touptek.c
@@ -203,7 +203,7 @@ static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc)
return -EIO;
}
if (reply[0] != 0x08) {
- PERR("Bad reply 0x%02X", reply[0]);
+ PERR("Bad reply 0x%02x", (int)reply[0]);
return -EIO;
}
return 0;
@@ -211,7 +211,7 @@ static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc)
static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
{
- char buff[1];
+ char *buff = gspca_dev->usb_buf;
int rc;
PDEBUG(D_USBO,
@@ -219,7 +219,7 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
value, index);
rc = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0),
0x0B, 0xC0, value, index, buff, 1, 500);
- PDEBUG(D_USBO, "rc=%d, ret={0x%02X}", rc, buff[0]);
+ PDEBUG(D_USBO, "rc=%d, ret={0x%02x}", rc, (int)buff[0]);
if (rc < 0) {
PERR("Failed reg_w(0x0B, 0xC0, 0x%04X, 0x%04X) w/ rc %d\n",
value, index, rc);
@@ -438,7 +438,7 @@ static void configure_encrypted(struct gspca_dev *gspca_dev)
static int configure(struct gspca_dev *gspca_dev)
{
int rc;
- uint8_t buff[4];
+ char *buff = gspca_dev->usb_buf;
PDEBUG(D_STREAM, "configure()\n");
diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
index fb9fe2ef3a6f..896f1b2b9179 100644
--- a/drivers/media/usb/gspca/w996Xcf.c
+++ b/drivers/media/usb/gspca/w996Xcf.c
@@ -79,6 +79,8 @@ static void w9968cf_write_fsb(struct sd *sd, u16* data)
value = *data++;
memcpy(sd->gspca_dev.usb_buf, data, 6);
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
value, 0x06, sd->gspca_dev.usb_buf, 6, 500);
@@ -99,6 +101,9 @@ static void w9968cf_write_sb(struct sd *sd, u16 value)
if (sd->gspca_dev.usb_err < 0)
return;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
/* We don't use reg_w here, as that would cause all writes when
bitbanging i2c to be logged, making the logs impossible to read */
ret = usb_control_msg(sd->gspca_dev.dev,
@@ -126,6 +131,9 @@ static int w9968cf_read_sb(struct sd *sd)
if (sd->gspca_dev.usb_err < 0)
return -1;
+ /* Avoid things going to fast for the bridge with a xhci host */
+ udelay(150);
+
/* We don't use reg_r here, as the w9968cf is special and has 16
bit registers instead of 8 bit */
ret = usb_control_msg(sd->gspca_dev.dev,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 0533ef20decf..1a093e5953fd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -4903,6 +4903,9 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *hdw)
printk(KERN_INFO "%s %.*s\n",hdw->name,ccnt,buf);
}
ccnt = pvr2_hdw_report_clients(hdw, buf, sizeof(buf));
+ if (ccnt >= sizeof(buf))
+ ccnt = sizeof(buf);
+
ucnt = 0;
while (ucnt < ccnt) {
lcnt = 0;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index d860344de84e..e68ce24f27e3 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -473,7 +473,7 @@ static void buffer_complete(struct urb *urb)
}
spin_unlock_irqrestore(&sp->list_lock,irq_flags);
pvr2_buffer_set_ready(bp);
- if (sp && sp->callback_func) {
+ if (sp->callback_func) {
sp->callback_func(sp->callback_data);
}
}
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 4dac499ed28e..c2e25876e93b 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -27,7 +27,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <media/v4l2-mc.h>
+#include <media/media-device.h>
#include "sms-cards.h"
#include "smsendian.h"
@@ -367,10 +367,12 @@ static void *siano_media_device_register(struct smsusb_device_t *dev,
struct sms_board *board = sms_get_board(board_id);
int ret;
- mdev = v4l2_mc_usb_media_device_init(udev, board->name);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return NULL;
+ media_device_usb_init(mdev, udev, board->name);
+
ret = media_device_register(mdev);
if (ret) {
media_device_cleanup(mdev);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 4e7148815a78..451e84e962e2 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -148,6 +148,26 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_H264,
.fcc = V4L2_PIX_FMT_H264,
},
+ {
+ .name = "Greyscale 8 L/R (Y8I)",
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .name = "Greyscale 12 L/R (Y12I)",
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .name = "Depth data 16-bit (Z16)",
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .name = "Bayer 10-bit (SRGGB10P)",
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
};
/* ------------------------------------------------------------------------
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index f0f2391e1b43..7e4d3eea371b 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -119,6 +119,18 @@
#define UVC_GUID_FORMAT_H264 \
{ 'H', '2', '6', '4', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y8I \
+ { 'Y', '8', 'I', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Y12I \
+ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_Z16 \
+ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RW10 \
+ { 'R', 'W', '1', '0', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
/* ------------------------------------------------------------------------
* Driver specific constants.
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c97067a25bd2..c183f0996fa1 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -29,6 +29,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
{
@@ -92,6 +93,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
{
if (fh->vdev == NULL)
return;
+ v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
fh->vdev = NULL;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 14843090fd61..170dd68d27f4 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -27,6 +27,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mc.h>
#include <trace/events/v4l2.h>
@@ -1041,6 +1042,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
}
@@ -1165,7 +1172,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YVYU: descr = "YVYU 4:2:2"; break;
case V4L2_PIX_FMT_UYVY: descr = "UYVY 4:2:2"; break;
case V4L2_PIX_FMT_VYUY: descr = "VYUY 4:2:2"; break;
- case V4L2_PIX_FMT_YUV422P: descr = "Planar YVU 4:2:2"; break;
+ case V4L2_PIX_FMT_YUV422P: descr = "Planar YUV 4:2:2"; break;
case V4L2_PIX_FMT_YUV411P: descr = "Planar YUV 4:1:1"; break;
case V4L2_PIX_FMT_Y41P: descr = "YUV 4:1:1 (Packed)"; break;
case V4L2_PIX_FMT_YUV444: descr = "16-bit A/XYUV 4-4-4-4"; break;
@@ -1452,6 +1459,9 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
bool is_tx = vfd->vfl_dir != VFL_DIR_RX;
int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
v4l_sanitize_format(p);
switch (p->type) {
@@ -1641,7 +1651,11 @@ static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
struct v4l2_tuner *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
return ops->vidioc_s_tuner(file, fh, p);
@@ -1695,7 +1709,11 @@ static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
const struct v4l2_frequency *p = arg;
enum v4l2_tuner_type type;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
if (vfd->vfl_type == VFL_TYPE_SDR) {
if (p->type != V4L2_TUNER_SDR && p->type != V4L2_TUNER_RF)
return -EINVAL;
@@ -1750,7 +1768,11 @@ static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id id = *(v4l2_std_id *)arg, norm;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
norm = id & vfd->tvnorms;
if (vfd->tvnorms && !norm) /* Check if std is supported */
return -EINVAL;
@@ -1764,7 +1786,11 @@ static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
{
struct video_device *vfd = video_devdata(file);
v4l2_std_id *p = arg;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
/*
* If no signal is detected, then the driver should return
* V4L2_STD_UNKNOWN. Otherwise it should return tvnorms with
@@ -1783,7 +1809,11 @@ static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
struct video_device *vfd = video_devdata(file);
struct v4l2_hw_freq_seek *p = arg;
enum v4l2_tuner_type type;
+ int ret;
+ ret = v4l_enable_media_source(vfd);
+ if (ret)
+ return ret;
/* s_hw_freq_seek is not supported for SDR for now */
if (vfd->vfl_type == VFL_TYPE_SDR)
return -EINVAL;
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index a7f41b323522..2228cd3a846e 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -1,7 +1,10 @@
/*
* Media Controller ancillary functions
*
- * (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
+ * Copyright (C) 2006-2010 Nokia Corporation
+ * Copyright (c) 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,80 +20,14 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/usb.h>
+#include <media/media-device.h>
#include <media/media-entity.h>
+#include <media/v4l2-fh.h>
#include <media/v4l2-mc.h>
-
-
-struct media_device *v4l2_mc_pci_media_device_init(struct pci_dev *pci_dev,
- const char *name)
-{
-#ifdef CONFIG_PCI
- struct media_device *mdev;
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev)
- return NULL;
-
- mdev->dev = &pci_dev->dev;
-
- if (name)
- strlcpy(mdev->model, name, sizeof(mdev->model));
- else
- strlcpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
-
- sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
-
- mdev->hw_revision = pci_dev->subsystem_vendor << 16
- || pci_dev->subsystem_device;
-
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
-
- return mdev;
-#else
- return NULL;
-#endif
-}
-EXPORT_SYMBOL_GPL(v4l2_mc_pci_media_device_init);
-
-struct media_device *__v4l2_mc_usb_media_device_init(struct usb_device *udev,
- const char *board_name,
- const char *driver_name)
-{
-#ifdef CONFIG_USB
- struct media_device *mdev;
-
- mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
- if (!mdev)
- return NULL;
-
- mdev->dev = &udev->dev;
-
- if (driver_name)
- strlcpy(mdev->driver_name, driver_name,
- sizeof(mdev->driver_name));
-
- if (board_name)
- strlcpy(mdev->model, board_name, sizeof(mdev->model));
- else if (udev->product)
- strlcpy(mdev->model, udev->product, sizeof(mdev->model));
- else
- strlcpy(mdev->model, "unknown model", sizeof(mdev->model));
- if (udev->serial)
- strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
- usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
- mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
- mdev->driver_version = LINUX_VERSION_CODE;
-
- media_device_init(mdev);
-
- return mdev;
-#else
- return NULL;
-#endif
-}
-EXPORT_SYMBOL_GPL(__v4l2_mc_usb_media_device_init);
+#include <media/v4l2-subdev.h>
+#include <media/media-device.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-core.h>
int v4l2_mc_create_media_graph(struct media_device *mdev)
@@ -253,6 +190,214 @@ int v4l2_mc_create_media_graph(struct media_device *mdev)
flags = 0;
}
+
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_mc_create_media_graph);
+
+int v4l_enable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+ int ret;
+
+ if (!mdev || !mdev->enable_source)
+ return 0;
+ ret = mdev->enable_source(&vdev->entity, &vdev->pipe);
+ if (ret)
+ return -EBUSY;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_enable_media_source);
+
+void v4l_disable_media_source(struct video_device *vdev)
+{
+ struct media_device *mdev = vdev->entity.graph_obj.mdev;
+
+ if (mdev && mdev->disable_source)
+ mdev->disable_source(&vdev->entity);
+}
+EXPORT_SYMBOL_GPL(v4l_disable_media_source);
+
+int v4l_vb2q_enable_media_source(struct vb2_queue *q)
+{
+ struct v4l2_fh *fh = q->owner;
+
+ if (fh && fh->vdev)
+ return v4l_enable_media_source(fh->vdev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l_vb2q_enable_media_source);
+
+/* -----------------------------------------------------------------------------
+ * Pipeline power management
+ *
+ * Entities must be powered up when part of a pipeline that contains at least
+ * one open video device node.
+ *
+ * To achieve this use the entity use_count field to track the number of users.
+ * For entities corresponding to video device nodes the use_count field stores
+ * the users count of the node. For entities corresponding to subdevs the
+ * use_count field stores the total number of users of all video device nodes
+ * in the pipeline.
+ *
+ * The v4l2_pipeline_pm_use() function must be called in the open() and
+ * close() handlers of video device nodes. It increments or decrements the use
+ * count of all subdev entities in the pipeline.
+ *
+ * To react to link management on powered pipelines, the link setup notification
+ * callback updates the use count of all entities in the source and sink sides
+ * of the link.
+ */
+
+/*
+ * pipeline_pm_use_count - Count the number of users of a pipeline
+ * @entity: The entity
+ *
+ * Return the total number of users of all video device nodes in the pipeline.
+ */
+static int pipeline_pm_use_count(struct media_entity *entity,
+ struct media_entity_graph *graph)
+{
+ int use = 0;
+
+ media_entity_graph_walk_start(graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(graph))) {
+ if (is_media_entity_v4l2_io(entity))
+ use += entity->use_count;
+ }
+
+ return use;
+}
+
+/*
+ * pipeline_pm_power_one - Apply power change to an entity
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Change the entity use count by @change. If the entity is a subdev update its
+ * power state by calling the core::s_power operation when the use count goes
+ * from 0 to != 0 or from != 0 to 0.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power_one(struct media_entity *entity, int change)
+{
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = is_media_entity_v4l2_subdev(entity)
+ ? media_entity_to_v4l2_subdev(entity) : NULL;
+
+ if (entity->use_count == 0 && change > 0 && subdev != NULL) {
+ ret = v4l2_subdev_call(subdev, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ if (entity->use_count == 0 && change < 0 && subdev != NULL)
+ v4l2_subdev_call(subdev, core, s_power, 0);
+
+ return 0;
+}
+
+/*
+ * pipeline_pm_power - Apply power change to all entities in a pipeline
+ * @entity: The entity
+ * @change: Use count change
+ *
+ * Walk the pipeline to update the use count and the power state of all non-node
+ * entities.
+ *
+ * Return 0 on success or a negative error code on failure.
+ */
+static int pipeline_pm_power(struct media_entity *entity, int change,
+ struct media_entity_graph *graph)
+{
+ struct media_entity *first = entity;
+ int ret = 0;
+
+ if (!change)
+ return 0;
+
+ media_entity_graph_walk_start(graph, entity);
+
+ while (!ret && (entity = media_entity_graph_walk_next(graph)))
+ if (is_media_entity_v4l2_subdev(entity))
+ ret = pipeline_pm_power_one(entity, change);
+
+ if (!ret)
+ return ret;
+
+ media_entity_graph_walk_start(graph, first);
+
+ while ((first = media_entity_graph_walk_next(graph))
+ && first != entity)
+ if (is_media_entity_v4l2_subdev(first))
+ pipeline_pm_power_one(first, -change);
+
+ return ret;
+}
+
+int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ int change = use ? 1 : -1;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Apply use count to node. */
+ entity->use_count += change;
+ WARN_ON(entity->use_count < 0);
+
+ /* Apply power change to connected non-nodes. */
+ ret = pipeline_pm_power(entity, change, &mdev->pm_count_walk);
+ if (ret < 0)
+ entity->use_count -= change;
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_pm_use);
+
+int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity_graph *graph = &link->graph_obj.mdev->pm_count_walk;
+ struct media_entity *source = link->source->entity;
+ struct media_entity *sink = link->sink->entity;
+ int source_use;
+ int sink_use;
+ int ret = 0;
+
+ source_use = pipeline_pm_use_count(source, graph);
+ sink_use = pipeline_pm_use_count(sink, graph);
+
+ if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ /* Powering off entities is assumed to never fail. */
+ pipeline_pm_power(source, -sink_use, graph);
+ pipeline_pm_power(sink, -source_use, graph);
+ return 0;
+ }
+
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ (flags & MEDIA_LNK_FL_ENABLED)) {
+
+ ret = pipeline_pm_power(source, sink_use, graph);
+ if (ret < 0)
+ return ret;
+
+ ret = pipeline_pm_power(sink, source_use, graph);
+ if (ret < 0)
+ pipeline_pm_power(source, -sink_use, graph);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_pipeline_link_notify);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index 6c02989ee33f..def84753c4c3 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -75,7 +75,8 @@ struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
}
EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int state_neither_active_nor_queued(struct videobuf_queue *q,
+ struct videobuf_buffer *vb)
{
unsigned long flags;
bool rc;
@@ -95,7 +96,7 @@ int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
if (non_blocking) {
- if (is_state_active_or_queued(q, vb))
+ if (state_neither_active_nor_queued(q, vb))
return 0;
return -EAGAIN;
}
@@ -107,9 +108,10 @@ int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
if (is_ext_locked)
mutex_unlock(q->ext_lock);
if (intr)
- ret = wait_event_interruptible(vb->done, is_state_active_or_queued(q, vb));
+ ret = wait_event_interruptible(vb->done,
+ state_neither_active_nor_queued(q, vb));
else
- wait_event(vb->done, is_state_active_or_queued(q, vb));
+ wait_event(vb->done, state_neither_active_nor_queued(q, vb));
/* Relock */
if (is_ext_locked)
mutex_lock(q->ext_lock);
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f669cedca8bd..f300f060b3f3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -181,8 +181,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(current, current->mm,
- data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
rw == READ, 1, /* force */
dma->pages, NULL);
@@ -350,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
if (dma->pages) {
for (i = 0; i < dma->nr_pages; i++)
- page_cache_release(dma->pages[i]);
+ put_page(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index dab94080ec3a..5d016f496e0e 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <media/videobuf2-core.h>
+#include <media/v4l2-mc.h>
#include <trace/events/vb2.h>
@@ -1887,6 +1888,9 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
* are available.
*/
if (q->queued_count >= q->min_buffers_needed) {
+ ret = v4l_vb2q_enable_media_source(q);
+ if (ret)
+ return ret;
ret = vb2_start_streaming(q);
if (ret) {
__vb2_queue_cancel(q);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index c33127284cfe..5361197f3e57 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -23,13 +23,16 @@
struct vb2_dc_conf {
struct device *dev;
+ struct dma_attrs attrs;
};
struct vb2_dc_buf {
struct device *dev;
void *vaddr;
unsigned long size;
+ void *cookie;
dma_addr_t dma_addr;
+ struct dma_attrs attrs;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
struct frame_vector *vec;
@@ -131,7 +134,8 @@ static void vb2_dc_put(void *buf_priv)
sg_free_table(buf->sgt_base);
kfree(buf->sgt_base);
}
- dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
+ dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
+ &buf->attrs);
put_device(buf->dev);
kfree(buf);
}
@@ -147,14 +151,18 @@ static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags);
- if (!buf->vaddr) {
+ buf->attrs = conf->attrs;
+ buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
+ GFP_KERNEL | gfp_flags, &buf->attrs);
+ if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+ buf->vaddr = buf->cookie;
+
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
buf->size = size;
@@ -185,8 +193,8 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
*/
vma->vm_pgoff = 0;
- ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
- buf->dma_addr, buf->size);
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
+ buf->dma_addr, buf->size, &buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
@@ -329,7 +337,7 @@ static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr + pgnum * PAGE_SIZE;
+ return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
}
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
@@ -368,8 +376,8 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return NULL;
}
- ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
- buf->size);
+ ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
+ buf->size, &buf->attrs);
if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt);
@@ -721,7 +729,8 @@ const struct vb2_mem_ops vb2_dma_contig_memops = {
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
-void *vb2_dma_contig_init_ctx(struct device *dev)
+void *vb2_dma_contig_init_ctx_attrs(struct device *dev,
+ struct dma_attrs *attrs)
{
struct vb2_dc_conf *conf;
@@ -730,10 +739,12 @@ void *vb2_dma_contig_init_ctx(struct device *dev)
return ERR_PTR(-ENOMEM);
conf->dev = dev;
+ if (attrs)
+ conf->attrs = *attrs;
return conf;
}
-EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
+EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx_attrs);
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 6f3154613dc7..51d5cd20c26a 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -114,6 +114,14 @@ config JZ4780_NEMC
the Ingenic JZ4780. This controller is used to handle external
memory devices such as NAND and SRAM.
+config MTK_SMI
+ bool
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This driver is for the Memory Controller module in MediaTek SoCs,
+ mainly help enable/disable iommu and control the power domain and
+ clocks for each local arbiter.
+
source "drivers/memory/tegra/Kconfig"
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 1c46af501610..890bdf402449 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -15,5 +15,6 @@ obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
+obj-$(CONFIG_MTK_SMI) += mtk-smi.o
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index acd1460cf787..2a691da8c1c7 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+ if (fsl_ifc_ctrl_dev->irq == 0) {
dev_err(&dev->dev, "failed to get irq resource "
"for IFC\n");
ret = -ENODEV;
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
new file mode 100644
index 000000000000..089091f5f890
--- /dev/null
+++ b/drivers/memory/mtk-smi.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#define SMI_LARB_MMU_EN 0xf00
+
+struct mtk_smi {
+ struct device *dev;
+ struct clk *clk_apb, *clk_smi;
+};
+
+struct mtk_smi_larb { /* larb: local arbiter */
+ struct mtk_smi smi;
+ void __iomem *base;
+ struct device *smi_common_dev;
+ u32 *mmu;
+};
+
+static int mtk_smi_enable(const struct mtk_smi *smi)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(smi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(smi->clk_apb);
+ if (ret)
+ goto err_put_pm;
+
+ ret = clk_prepare_enable(smi->clk_smi);
+ if (ret)
+ goto err_disable_apb;
+
+ return 0;
+
+err_disable_apb:
+ clk_disable_unprepare(smi->clk_apb);
+err_put_pm:
+ pm_runtime_put_sync(smi->dev);
+ return ret;
+}
+
+static void mtk_smi_disable(const struct mtk_smi *smi)
+{
+ clk_disable_unprepare(smi->clk_smi);
+ clk_disable_unprepare(smi->clk_apb);
+ pm_runtime_put_sync(smi->dev);
+}
+
+int mtk_smi_larb_get(struct device *larbdev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+ int ret;
+
+ /* Enable the smi-common's power and clocks */
+ ret = mtk_smi_enable(common);
+ if (ret)
+ return ret;
+
+ /* Enable the larb's power and clocks */
+ ret = mtk_smi_enable(&larb->smi);
+ if (ret) {
+ mtk_smi_disable(common);
+ return ret;
+ }
+
+ /* Configure the iommu info for this larb */
+ writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+
+ return 0;
+}
+
+void mtk_smi_larb_put(struct device *larbdev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(larbdev);
+ struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
+
+ /*
+ * Don't de-configure the iommu info for this larb since there may be
+ * several modules in this larb.
+ * The iommu info will be reset after power off.
+ */
+
+ mtk_smi_disable(&larb->smi);
+ mtk_smi_disable(common);
+}
+
+static int
+mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ struct mtk_smi_iommu *smi_iommu = data;
+ unsigned int i;
+
+ for (i = 0; i < smi_iommu->larb_nr; i++) {
+ if (dev == smi_iommu->larb_imu[i].dev) {
+ /* The 'mmu' may be updated in iommu-attach/detach. */
+ larb->mmu = &smi_iommu->larb_imu[i].mmu;
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* Do nothing as the iommu is always enabled. */
+}
+
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *smi_node;
+ struct platform_device *smi_pdev;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ larb->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ larb->smi.clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(larb->smi.clk_apb))
+ return PTR_ERR(larb->smi.clk_apb);
+
+ larb->smi.clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(larb->smi.clk_smi))
+ return PTR_ERR(larb->smi.clk_smi);
+ larb->smi.dev = dev;
+
+ smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_node)
+ return -EINVAL;
+
+ smi_pdev = of_find_device_by_node(smi_node);
+ of_node_put(smi_node);
+ if (smi_pdev) {
+ larb->smi_common_dev = &smi_pdev->dev;
+ } else {
+ dev_err(dev, "Failed to get the smi_common device\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, larb);
+ return component_add(dev, &mtk_smi_larb_component_ops);
+}
+
+static int mtk_smi_larb_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ component_del(&pdev->dev, &mtk_smi_larb_component_ops);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_larb_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi-larb",},
+ {}
+};
+
+static struct platform_driver mtk_smi_larb_driver = {
+ .probe = mtk_smi_larb_probe,
+ .remove = mtk_smi_larb_remove,
+ .driver = {
+ .name = "mtk-smi-larb",
+ .of_match_table = mtk_smi_larb_of_ids,
+ }
+};
+
+static int mtk_smi_common_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_smi *common;
+
+ if (!dev->pm_domain)
+ return -EPROBE_DEFER;
+
+ common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ common->clk_apb = devm_clk_get(dev, "apb");
+ if (IS_ERR(common->clk_apb))
+ return PTR_ERR(common->clk_apb);
+
+ common->clk_smi = devm_clk_get(dev, "smi");
+ if (IS_ERR(common->clk_smi))
+ return PTR_ERR(common->clk_smi);
+
+ pm_runtime_enable(dev);
+ platform_set_drvdata(pdev, common);
+ return 0;
+}
+
+static int mtk_smi_common_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id mtk_smi_common_of_ids[] = {
+ { .compatible = "mediatek,mt8173-smi-common", },
+ {}
+};
+
+static struct platform_driver mtk_smi_common_driver = {
+ .probe = mtk_smi_common_probe,
+ .remove = mtk_smi_common_remove,
+ .driver = {
+ .name = "mtk-smi-common",
+ .of_match_table = mtk_smi_common_of_ids,
+ }
+};
+
+static int __init mtk_smi_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mtk_smi_common_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mtk_smi_larb_driver);
+ if (ret != 0) {
+ pr_err("Failed to register SMI-LARB driver\n");
+ goto err_unreg_smi;
+ }
+ return ret;
+
+err_unreg_smi:
+ platform_driver_unregister(&mtk_smi_common_driver);
+ return ret;
+}
+subsys_initcall(mtk_smi_init);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 6515dfc2b805..21825ddce4a3 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -541,9 +541,20 @@ static void gpmc_cs_show_timings(int cs, const char *desc)
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 4, 6, "adv-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 24, 26,
+ "adv-aad-mux-rd-off-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG3, 28, 30,
+ "adv-aad-mux-wr-off-ns");
+ }
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 4, 6, "oe-aad-mux-on-ns");
+ GPMC_GET_TICKS(GPMC_CS_CONFIG4, 13, 15, "oe-aad-mux-off-ns");
+ }
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
@@ -734,9 +745,18 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 4, 6, adv_aad_mux_on);
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 24, 26, adv_aad_mux_rd_off);
+ GPMC_SET_ONE(GPMC_CS_CONFIG3, 28, 30, adv_aad_mux_wr_off);
+ }
GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
+ if (gpmc_capability & GPMC_HAS_MUX_AAD) {
+ GPMC_SET_ONE(GPMC_CS_CONFIG4, 4, 6, oe_aad_mux_on);
+ GPMC_SET_ONE(GPMC_CS_CONFIG4, 13, 15, oe_aad_mux_off);
+ }
GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
@@ -1722,6 +1742,12 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-on-ns",
+ &gpmc_t->adv_aad_mux_on);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-rd-off-ns",
+ &gpmc_t->adv_aad_mux_rd_off);
+ of_property_read_u32(np, "gpmc,adv-aad-mux-wr-off-ns",
+ &gpmc_t->adv_aad_mux_wr_off);
/* WE signal timings */
of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
@@ -1730,6 +1756,10 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
/* OE signal timings */
of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-on-ns",
+ &gpmc_t->oe_aad_mux_on);
+ of_property_read_u32(np, "gpmc,oe-aad-mux-off-ns",
+ &gpmc_t->oe_aad_mux_off);
/* access and cycle timings */
of_property_read_u32(np, "gpmc,page-burst-access-ns",
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index ef09ba0289d7..d5cfb503b9d6 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -298,8 +298,7 @@ static int r592_transfer_fifo_dma(struct r592_device *dev)
sg_count = dma_map_sg(&dev->pci_dev->dev, &dev->req->sg, 1, is_write ?
PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
- if (sg_count != 1 ||
- (sg_dma_len(&dev->req->sg) < dev->req->sg.length)) {
+ if (sg_count != 1 || sg_dma_len(&dev->req->sg) < R592_LFIFO_SIZE) {
message("problem in dma_map_sg");
return -EIO;
}
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5dcc0313c38a..5537f8df8512 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1801,8 +1801,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->pcidev = pdev;
if (mpt_mapresources(ioc)) {
- kfree(ioc);
- return r;
+ goto out_free_ioc;
}
/*
@@ -1871,9 +1870,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ioc->reset_work_q) {
printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
ioc->name);
- pci_release_selected_regions(pdev, ioc->bars);
- kfree(ioc);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto out_unmap_resources;
}
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
@@ -1995,16 +1993,27 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ioc->fw_event_lock);
snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+ if (!ioc->fw_event_q) {
+ printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
+ ioc->name);
+ r = -ENOMEM;
+ goto out_remove_ioc;
+ }
if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
CAN_SLEEP)) != 0){
printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
ioc->name, r);
+ destroy_workqueue(ioc->fw_event_q);
+ ioc->fw_event_q = NULL;
+
list_del(&ioc->list);
if (ioc->alt_ioc)
ioc->alt_ioc->alt_ioc = NULL;
iounmap(ioc->memmap);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
if (r != -5)
pci_release_selected_regions(pdev, ioc->bars);
@@ -2012,7 +2021,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->reset_work_q = NULL;
kfree(ioc);
- pci_set_drvdata(pdev, NULL);
return r;
}
@@ -2040,6 +2048,24 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
msecs_to_jiffies(MPT_POLLING_INTERVAL));
return 0;
+
+out_remove_ioc:
+ list_del(&ioc->list);
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->alt_ioc = NULL;
+
+ destroy_workqueue(ioc->reset_work_q);
+ ioc->reset_work_q = NULL;
+
+out_unmap_resources:
+ iounmap(ioc->memmap);
+ pci_disable_device(pdev);
+ pci_release_selected_regions(pdev, ioc->bars);
+
+out_free_ioc:
+ kfree(ioc);
+
+ return r;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6229,7 +6255,7 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
- out:
+out:
if (pbuf)
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
@@ -6848,6 +6874,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
*size = y;
}
+#ifdef CONFIG_PROC_FS
static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
{
char expVer[32];
@@ -6879,6 +6906,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
seq_putc(m, '\n');
}
+#endif
/**
* mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 9ca66de0c1c1..eea61e349e26 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -18,6 +18,17 @@ config MFD_CS5535
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
+config MFD_ACT8945A
+ tristate "Active-semi ACT8945A"
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C && OF
+ help
+ Support for the ACT8945A PMIC from Active-semi. This device
+ features three step-down DC/DC converters and four low-dropout
+ linear regulators, along with a complete ActivePath battery
+ charger.
+
config MFD_AS3711
bool "AMS AS3711"
select MFD_CORE
@@ -91,14 +102,29 @@ config MFD_BCM590XX
Support for the BCM590xx PMUs from Broadcom
config MFD_AXP20X
- bool "X-Powers AXP20X"
+ tristate
select MFD_CORE
- select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+
+config MFD_AXP20X_I2C
+ tristate "X-Powers AXP series PMICs with I2C"
+ select MFD_AXP20X
+ select REGMAP_I2C
+ depends on I2C
+ help
+ If you say Y here you get support for the X-Powers AXP series power
+ management ICs (PMICs) controlled with I2C.
+ This driver include only the core APIs. You have to select individual
+ components like regulators or the PEK (Power Enable Key) under the
+ corresponding menus.
+
+config MFD_AXP20X_RSB
+ tristate "X-Powers AXP series PMICs with RSB"
+ select MFD_AXP20X
+ depends on SUNXI_RSB
help
- If you say Y here you get support for the X-Powers AXP202, AXP209 and
- AXP288 power management IC (PMIC).
+ If you say Y here you get support for the X-Powers AXP series power
+ management ICs (PMICs) controlled with RSB.
This driver include only the core APIs. You have to select individual
components like regulators or the PEK (Power Enable Key) under the
corresponding menus.
@@ -203,7 +229,7 @@ config MFD_DA9062
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C
help
Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
This includes the I2C driver and core APIs.
@@ -215,7 +241,7 @@ config MFD_DA9063
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
- depends on I2C=y
+ depends on I2C
help
Say yes here for support for the Dialog Semiconductor DA9063 PMIC.
This includes the I2C driver and core APIs.
@@ -224,7 +250,7 @@ config MFD_DA9063
config MFD_DA9150
tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
- depends on I2C=y
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -271,6 +297,15 @@ config MFD_MC13XXX_I2C
help
Select this if your MC13xxx is connected via an I2C bus.
+config MFD_MX25_TSADC
+ tristate "Freescale i.MX25 integrated Touchscreen and ADC unit"
+ select REGMAP_MMIO
+ depends on (SOC_IMX25 && OF) || COMPILE_TEST
+ help
+ Enable support for the integrated Touchscreen and ADC unit of the
+ i.MX25 processors. They consist of a conversion queue for general
+ purpose ADC and a queue for Touchscreens.
+
config MFD_HI6421_PMIC
tristate "HiSilicon Hi6421 PMU/Codec IC"
depends on OF
@@ -445,7 +480,7 @@ config MFD_KEMPLD
config MFD_88PM800
tristate "Marvell 88PM800"
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -457,7 +492,7 @@ config MFD_88PM800
config MFD_88PM805
tristate "Marvell 88PM805"
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
select MFD_CORE
@@ -493,8 +528,8 @@ config MFD_MAX14577
of the device.
config MFD_MAX77686
- bool "Maxim Semiconductor MAX77686/802 PMIC Support"
- depends on I2C=y
+ tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
+ depends on I2C
depends on OF
select MFD_CORE
select REGMAP_I2C
@@ -538,7 +573,7 @@ config MFD_MAX77843
config MFD_MAX8907
tristate "Maxim Semiconductor MAX8907 PMIC Support"
select MFD_CORE
- depends on I2C=y
+ depends on I2C
select REGMAP_I2C
select REGMAP_IRQ
help
@@ -743,7 +778,7 @@ config MFD_RTSX_PCI
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
- depends on I2C=y
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1106,6 +1141,19 @@ config TPS6507X
This driver can also be built as a module. If so, the module
will be called tps6507x.
+config MFD_TPS65086
+ tristate "TI TPS65086 Power Management Integrated Chips (PMICs)"
+ select REGMAP
+ select REGMAP_IRQ
+ select REGMAP_I2C
+ depends on I2C
+ help
+ If you say yes here you get support for the TPS65086 series of
+ Power Management chips.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device.
+
config TPS65911_COMPARATOR
tristate
@@ -1181,27 +1229,25 @@ config MFD_TPS65910
Power Management chips.
config MFD_TPS65912
- bool "TI TPS65912 Power Management chip"
- depends on GPIOLIB
+ tristate
select MFD_CORE
- help
- If you say yes here you get support for the TPS65912 series of
- PM chips.
+ select REGMAP
+ select REGMAP_IRQ
config MFD_TPS65912_I2C
- bool "TI TPS65912 Power Management chip with I2C"
- select MFD_CORE
+ tristate "TI TPS65912 Power Management chip with I2C"
select MFD_TPS65912
- depends on I2C=y && GPIOLIB
+ select REGMAP_I2C
+ depends on I2C
help
If you say yes here you get support for the TPS65912 series of
PM chips with I2C interface.
config MFD_TPS65912_SPI
- bool "TI TPS65912 Power Management chip with SPI"
- select MFD_CORE
+ tristate "TI TPS65912 Power Management chip with SPI"
select MFD_TPS65912
- depends on SPI_MASTER && GPIOLIB
+ select REGMAP_SPI
+ depends on SPI_MASTER
help
If you say yes here you get support for the TPS65912 series of
PM chips with SPI interface.
@@ -1372,7 +1418,6 @@ config MFD_ARIZONA
config MFD_ARIZONA_I2C
tristate "Cirrus Logic/Wolfson Microelectronics Arizona platform with I2C"
select MFD_ARIZONA
- select MFD_CORE
select REGMAP_I2C
depends on I2C
help
@@ -1382,12 +1427,11 @@ config MFD_ARIZONA_I2C
config MFD_ARIZONA_SPI
tristate "Cirrus Logic/Wolfson Microelectronics Arizona platform with SPI"
select MFD_ARIZONA
- select MFD_CORE
select REGMAP_SPI
depends on SPI_MASTER
help
Support for the Cirrus Logic/Wolfson Microelectronics Arizona platform
- audio SoC core functionality controlled via I2C.
+ audio SoC core functionality controlled via SPI.
config MFD_CS47L24
bool "Cirrus Logic CS47L24 and WM1831"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 0f230a6103f8..5eaa6465d0a6 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
+obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
@@ -70,11 +71,11 @@ obj-$(CONFIG_MFD_WM8994) += wm8994.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
obj-$(CONFIG_TPS6507X) += tps6507x.o
+obj-$(CONFIG_MFD_TPS65086) += tps65086.o
obj-$(CONFIG_MFD_TPS65217) += tps65217.o
obj-$(CONFIG_MFD_TPS65218) += tps65218.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o
-tps65912-objs := tps65912-core.o tps65912-irq.o
-obj-$(CONFIG_MFD_TPS65912) += tps65912.o
+obj-$(CONFIG_MFD_TPS65912) += tps65912-core.o
obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
obj-$(CONFIG_MFD_TPS65912_SPI) += tps65912-spi.o
obj-$(CONFIG_MFD_TPS80031) += tps80031.o
@@ -85,6 +86,8 @@ obj-$(CONFIG_TWL4030_POWER) += twl4030-power.o
obj-$(CONFIG_MFD_TWL4030_AUDIO) += twl4030-audio.o
obj-$(CONFIG_TWL6040_CORE) += twl6040.o
+obj-$(CONFIG_MFD_MX25_TSADC) += fsl-imx25-tsadc.o
+
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
@@ -111,6 +114,8 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
obj-$(CONFIG_MFD_AXP20X) += axp20x.o
+obj-$(CONFIG_MFD_AXP20X_I2C) += axp20x-i2c.o
+obj-$(CONFIG_MFD_AXP20X_RSB) += axp20x-rsb.o
obj-$(CONFIG_MFD_LP3943) += lp3943.o
obj-$(CONFIG_MFD_LP8788) += lp8788.o lp8788-irq.o
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
new file mode 100644
index 000000000000..525b546ba42f
--- /dev/null
+++ b/drivers/mfd/act8945a.c
@@ -0,0 +1,102 @@
+/*
+ * MFD driver for Active-semi ACT8945a PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation.
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+static const struct mfd_cell act8945a_devs[] = {
+ {
+ .name = "act8945a-regulator",
+ },
+ {
+ .name = "act8945a-charger",
+ },
+};
+
+static const struct regmap_config act8945a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int act8945a_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &act8945a_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, regmap);
+
+ ret = mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE, act8945a_devs,
+ ARRAY_SIZE(act8945a_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to add sub devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int act8945a_i2c_remove(struct i2c_client *i2c)
+{
+ mfd_remove_devices(&i2c->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id act8945a_i2c_id[] = {
+ { "act8945a", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, act8945a_i2c_id);
+
+static const struct of_device_id act8945a_of_match[] = {
+ { .compatible = "active-semi,act8945a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, act8945a_of_match);
+
+static struct i2c_driver act8945a_i2c_driver = {
+ .driver = {
+ .name = "act8945a",
+ .of_match_table = of_match_ptr(act8945a_of_match),
+ },
+ .probe = act8945a_i2c_probe,
+ .remove = act8945a_i2c_remove,
+ .id_table = act8945a_i2c_id,
+};
+
+static int __init act8945a_i2c_init(void)
+{
+ return i2c_add_driver(&act8945a_i2c_driver);
+}
+subsys_initcall(act8945a_i2c_init);
+
+static void __exit act8945a_i2c_exit(void)
+{
+ i2c_del_driver(&act8945a_i2c_driver);
+}
+module_exit(act8945a_i2c_exit);
+
+MODULE_DESCRIPTION("ACT8945A PMIC multi-function driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index 94d67a6e1eb7..09e1483b99bc 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -108,8 +108,8 @@ static const struct regmap_config as3711_regmap_config = {
.volatile_reg = as3711_volatile_reg,
.readable_reg = as3711_readable_reg,
.precious_reg = as3711_precious_reg,
- .max_register = AS3711_MAX_REGS,
- .num_reg_defaults_raw = AS3711_MAX_REGS,
+ .max_register = AS3711_MAX_REG,
+ .num_reg_defaults_raw = AS3711_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c
new file mode 100644
index 000000000000..b1b865822c07
--- /dev/null
+++ b/drivers/mfd/axp20x-i2c.c
@@ -0,0 +1,104 @@
+/*
+ * I2C driver for the X-Powers' Power Management ICs
+ *
+ * AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
+ * converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
+ * as well as configurable GPIOs.
+ *
+ * This driver supports the I2C variants.
+ *
+ * Copyright (C) 2014 Carlo Caione
+ *
+ * Author: Carlo Caione <carlo@caione.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static int axp20x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct axp20x_dev *axp20x;
+ int ret;
+
+ axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL);
+ if (!axp20x)
+ return -ENOMEM;
+
+ axp20x->dev = &i2c->dev;
+ axp20x->irq = i2c->irq;
+ dev_set_drvdata(axp20x->dev, axp20x);
+
+ ret = axp20x_match_device(axp20x);
+ if (ret)
+ return ret;
+
+ axp20x->regmap = devm_regmap_init_i2c(i2c, axp20x->regmap_cfg);
+ if (IS_ERR(axp20x->regmap)) {
+ ret = PTR_ERR(axp20x->regmap);
+ dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ return axp20x_device_probe(axp20x);
+}
+
+static int axp20x_i2c_remove(struct i2c_client *i2c)
+{
+ struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
+
+ return axp20x_device_remove(axp20x);
+}
+
+static const struct of_device_id axp20x_i2c_of_match[] = {
+ { .compatible = "x-powers,axp152", .data = (void *)AXP152_ID },
+ { .compatible = "x-powers,axp202", .data = (void *)AXP202_ID },
+ { .compatible = "x-powers,axp209", .data = (void *)AXP209_ID },
+ { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axp20x_i2c_of_match);
+
+/*
+ * This is useless for OF-enabled devices, but it is needed by I2C subsystem
+ */
+static const struct i2c_device_id axp20x_i2c_id[] = {
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
+
+static const struct acpi_device_id axp20x_i2c_acpi_match[] = {
+ {
+ .id = "INT33F4",
+ .driver_data = AXP288_ID,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, axp20x_i2c_acpi_match);
+
+static struct i2c_driver axp20x_i2c_driver = {
+ .driver = {
+ .name = "axp20x-i2c",
+ .of_match_table = of_match_ptr(axp20x_i2c_of_match),
+ .acpi_match_table = ACPI_PTR(axp20x_i2c_acpi_match),
+ },
+ .probe = axp20x_i2c_probe,
+ .remove = axp20x_i2c_remove,
+ .id_table = axp20x_i2c_id,
+};
+
+module_i2c_driver(axp20x_i2c_driver);
+
+MODULE_DESCRIPTION("PMIC MFD I2C driver for AXP20X");
+MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
new file mode 100644
index 000000000000..28c20247c112
--- /dev/null
+++ b/drivers/mfd/axp20x-rsb.c
@@ -0,0 +1,80 @@
+/*
+ * RSB driver for the X-Powers' Power Management ICs
+ *
+ * AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
+ * converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
+ * as well as configurable GPIOs.
+ *
+ * This driver supports the RSB variants.
+ *
+ * Copyright (C) 2015 Chen-Yu Tsai
+ *
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/err.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sunxi-rsb.h>
+
+static int axp20x_rsb_probe(struct sunxi_rsb_device *rdev)
+{
+ struct axp20x_dev *axp20x;
+ int ret;
+
+ axp20x = devm_kzalloc(&rdev->dev, sizeof(*axp20x), GFP_KERNEL);
+ if (!axp20x)
+ return -ENOMEM;
+
+ axp20x->dev = &rdev->dev;
+ axp20x->irq = rdev->irq;
+ dev_set_drvdata(&rdev->dev, axp20x);
+
+ ret = axp20x_match_device(axp20x);
+ if (ret)
+ return ret;
+
+ axp20x->regmap = devm_regmap_init_sunxi_rsb(rdev, axp20x->regmap_cfg);
+ if (IS_ERR(axp20x->regmap)) {
+ ret = PTR_ERR(axp20x->regmap);
+ dev_err(&rdev->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ return axp20x_device_probe(axp20x);
+}
+
+static int axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
+{
+ struct axp20x_dev *axp20x = sunxi_rsb_device_get_drvdata(rdev);
+
+ return axp20x_device_remove(axp20x);
+}
+
+static const struct of_device_id axp20x_rsb_of_match[] = {
+ { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { },
+};
+MODULE_DEVICE_TABLE(of, axp20x_rsb_of_match);
+
+static struct sunxi_rsb_driver axp20x_rsb_driver = {
+ .driver = {
+ .name = "axp20x-rsb",
+ .of_match_table = of_match_ptr(axp20x_rsb_of_match),
+ },
+ .probe = axp20x_rsb_probe,
+ .remove = axp20x_rsb_remove,
+};
+module_sunxi_rsb_driver(axp20x_rsb_driver);
+
+MODULE_DESCRIPTION("PMIC MFD sunXi RSB driver for AXP20X");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 9842199e2e6c..a57d6e940610 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -1,10 +1,14 @@
/*
- * axp20x.c - MFD core driver for the X-Powers' Power Management ICs
+ * MFD core driver for the X-Powers' Power Management ICs
*
* AXP20x typically comprises an adaptive USB-Compatible PWM charger, BUCK DC-DC
* converters, LDOs, multiple 12-bit ADCs of voltage, current and temperature
* as well as configurable GPIOs.
*
+ * This file contains the interface independent core functions.
+ *
+ * Copyright (C) 2014 Carlo Caione
+ *
* Author: Carlo Caione <carlo@caione.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -13,18 +17,15 @@
*/
#include <linux/err.h>
-#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/mfd/axp20x.h>
#include <linux/mfd/core.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/acpi.h>
#define AXP20X_OFF 0x80
@@ -34,6 +35,7 @@ static const char * const axp20x_model_names[] = {
"AXP202",
"AXP209",
"AXP221",
+ "AXP223",
"AXP288",
};
@@ -376,32 +378,6 @@ static const struct regmap_irq axp288_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1),
};
-static const struct of_device_id axp20x_of_match[] = {
- { .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
- { .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
- { .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
- { .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
- { },
-};
-MODULE_DEVICE_TABLE(of, axp20x_of_match);
-
-/*
- * This is useless for OF-enabled devices, but it is needed by I2C subsystem
- */
-static const struct i2c_device_id axp20x_i2c_id[] = {
- { },
-};
-MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id);
-
-static const struct acpi_device_id axp20x_acpi_match[] = {
- {
- .id = "INT33F4",
- .driver_data = AXP288_ID,
- },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
-
static const struct regmap_irq_chip axp152_regmap_irq_chip = {
.name = "axp152_irq_chip",
.status_base = AXP152_IRQ1_STATE,
@@ -606,25 +582,26 @@ static void axp20x_power_off(void)
AXP20X_OFF);
}
-static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
+int axp20x_match_device(struct axp20x_dev *axp20x)
{
+ struct device *dev = axp20x->dev;
const struct acpi_device_id *acpi_id;
const struct of_device_id *of_id;
if (dev->of_node) {
- of_id = of_match_device(axp20x_of_match, dev);
+ of_id = of_match_device(dev->driver->of_match_table, dev);
if (!of_id) {
dev_err(dev, "Unable to match OF ID\n");
return -ENODEV;
}
- axp20x->variant = (long) of_id->data;
+ axp20x->variant = (long)of_id->data;
} else {
acpi_id = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!acpi_id || !acpi_id->driver_data) {
dev_err(dev, "Unable to match ACPI ID and data\n");
return -ENODEV;
}
- axp20x->variant = (long) acpi_id->driver_data;
+ axp20x->variant = (long)acpi_id->driver_data;
}
switch (axp20x->variant) {
@@ -642,6 +619,7 @@ static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
axp20x->regmap_irq_chip = &axp20x_regmap_irq_chip;
break;
case AXP221_ID:
+ case AXP223_ID:
axp20x->nr_cells = ARRAY_SIZE(axp22x_cells);
axp20x->cells = axp22x_cells;
axp20x->regmap_cfg = &axp22x_regmap_config;
@@ -658,51 +636,31 @@ static int axp20x_match_device(struct axp20x_dev *axp20x, struct device *dev)
return -EINVAL;
}
dev_info(dev, "AXP20x variant %s found\n",
- axp20x_model_names[axp20x->variant]);
+ axp20x_model_names[axp20x->variant]);
return 0;
}
+EXPORT_SYMBOL(axp20x_match_device);
-static int axp20x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+int axp20x_device_probe(struct axp20x_dev *axp20x)
{
- struct axp20x_dev *axp20x;
int ret;
- axp20x = devm_kzalloc(&i2c->dev, sizeof(*axp20x), GFP_KERNEL);
- if (!axp20x)
- return -ENOMEM;
-
- ret = axp20x_match_device(axp20x, &i2c->dev);
- if (ret)
- return ret;
-
- axp20x->i2c_client = i2c;
- axp20x->dev = &i2c->dev;
- dev_set_drvdata(axp20x->dev, axp20x);
-
- axp20x->regmap = devm_regmap_init_i2c(i2c, axp20x->regmap_cfg);
- if (IS_ERR(axp20x->regmap)) {
- ret = PTR_ERR(axp20x->regmap);
- dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- ret = regmap_add_irq_chip(axp20x->regmap, i2c->irq,
+ ret = regmap_add_irq_chip(axp20x->regmap, axp20x->irq,
IRQF_ONESHOT | IRQF_SHARED, -1,
axp20x->regmap_irq_chip,
&axp20x->regmap_irqc);
if (ret) {
- dev_err(&i2c->dev, "failed to add irq chip: %d\n", ret);
+ dev_err(axp20x->dev, "failed to add irq chip: %d\n", ret);
return ret;
}
ret = mfd_add_devices(axp20x->dev, -1, axp20x->cells,
- axp20x->nr_cells, NULL, 0, NULL);
+ axp20x->nr_cells, NULL, 0, NULL);
if (ret) {
- dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
- regmap_del_irq_chip(i2c->irq, axp20x->regmap_irqc);
+ dev_err(axp20x->dev, "failed to add MFD devices: %d\n", ret);
+ regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
return ret;
}
@@ -711,38 +669,25 @@ static int axp20x_i2c_probe(struct i2c_client *i2c,
pm_power_off = axp20x_power_off;
}
- dev_info(&i2c->dev, "AXP20X driver loaded\n");
+ dev_info(axp20x->dev, "AXP20X driver loaded\n");
return 0;
}
+EXPORT_SYMBOL(axp20x_device_probe);
-static int axp20x_i2c_remove(struct i2c_client *i2c)
+int axp20x_device_remove(struct axp20x_dev *axp20x)
{
- struct axp20x_dev *axp20x = i2c_get_clientdata(i2c);
-
if (axp20x == axp20x_pm_power_off) {
axp20x_pm_power_off = NULL;
pm_power_off = NULL;
}
mfd_remove_devices(axp20x->dev);
- regmap_del_irq_chip(axp20x->i2c_client->irq, axp20x->regmap_irqc);
+ regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
return 0;
}
-
-static struct i2c_driver axp20x_i2c_driver = {
- .driver = {
- .name = "axp20x",
- .of_match_table = of_match_ptr(axp20x_of_match),
- .acpi_match_table = ACPI_PTR(axp20x_acpi_match),
- },
- .probe = axp20x_i2c_probe,
- .remove = axp20x_i2c_remove,
- .id_table = axp20x_i2c_id,
-};
-
-module_i2c_driver(axp20x_i2c_driver);
+EXPORT_SYMBOL(axp20x_device_remove);
MODULE_DESCRIPTION("PMIC MFD core driver for AXP20X");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index 870800657594..f6b78aafdb55 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -227,8 +227,6 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -245,8 +243,6 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -678,7 +674,7 @@ static const struct reg_default cs47l24_reg_default[] = {
{ 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C20, 0x0002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0000 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -858,8 +854,6 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -876,8 +870,6 @@ static bool cs47l24_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index a9ad024ec6b0..8f873866ea60 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -388,11 +388,32 @@ static const struct regmap_range da9062_aa_volatile_ranges[] = {
.range_min = DA9062AA_STATUS_D,
.range_max = DA9062AA_EVENT_C,
}, {
- .range_min = DA9062AA_CONTROL_F,
+ .range_min = DA9062AA_CONTROL_A,
+ .range_max = DA9062AA_CONTROL_B,
+ }, {
+ .range_min = DA9062AA_CONTROL_E,
.range_max = DA9062AA_CONTROL_F,
}, {
+ .range_min = DA9062AA_BUCK2_CONT,
+ .range_max = DA9062AA_BUCK4_CONT,
+ }, {
+ .range_min = DA9062AA_BUCK3_CONT,
+ .range_max = DA9062AA_BUCK3_CONT,
+ }, {
+ .range_min = DA9062AA_LDO1_CONT,
+ .range_max = DA9062AA_LDO4_CONT,
+ }, {
+ .range_min = DA9062AA_DVC_1,
+ .range_max = DA9062AA_DVC_1,
+ }, {
.range_min = DA9062AA_COUNT_S,
.range_max = DA9062AA_SECOND_D,
+ }, {
+ .range_min = DA9062AA_SEQ,
+ .range_max = DA9062AA_SEQ,
+ }, {
+ .range_min = DA9062AA_EN_32K,
+ .range_max = DA9062AA_EN_32K,
},
};
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 2d4e3e0f4e94..73901084945f 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -74,18 +74,30 @@ static const struct regmap_range da9063_ad_writeable_ranges[] = {
static const struct regmap_range da9063_ad_volatile_ranges[] = {
{
- .range_min = DA9063_REG_STATUS_A,
+ .range_min = DA9063_REG_PAGE_CON,
.range_max = DA9063_REG_EVENT_D,
}, {
- .range_min = DA9063_REG_CONTROL_F,
+ .range_min = DA9063_REG_CONTROL_A,
+ .range_max = DA9063_REG_CONTROL_B,
+ }, {
+ .range_min = DA9063_REG_CONTROL_E,
.range_max = DA9063_REG_CONTROL_F,
}, {
- .range_min = DA9063_REG_ADC_MAN,
+ .range_min = DA9063_REG_BCORE2_CONT,
+ .range_max = DA9063_REG_LDO11_CONT,
+ }, {
+ .range_min = DA9063_REG_DVC_1,
.range_max = DA9063_REG_ADC_MAN,
}, {
.range_min = DA9063_REG_ADC_RES_L,
.range_max = DA9063_AD_REG_SECOND_D,
}, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_SEQ,
+ }, {
+ .range_min = DA9063_REG_EN_32K,
+ .range_max = DA9063_REG_EN_32K,
+ }, {
.range_min = DA9063_AD_REG_MON_REG_5,
.range_max = DA9063_AD_REG_MON_REG_6,
},
@@ -152,18 +164,30 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = {
static const struct regmap_range da9063_bb_volatile_ranges[] = {
{
- .range_min = DA9063_REG_STATUS_A,
+ .range_min = DA9063_REG_PAGE_CON,
.range_max = DA9063_REG_EVENT_D,
}, {
- .range_min = DA9063_REG_CONTROL_F,
+ .range_min = DA9063_REG_CONTROL_A,
+ .range_max = DA9063_REG_CONTROL_B,
+ }, {
+ .range_min = DA9063_REG_CONTROL_E,
.range_max = DA9063_REG_CONTROL_F,
}, {
- .range_min = DA9063_REG_ADC_MAN,
+ .range_min = DA9063_REG_BCORE2_CONT,
+ .range_max = DA9063_REG_LDO11_CONT,
+ }, {
+ .range_min = DA9063_REG_DVC_1,
.range_max = DA9063_REG_ADC_MAN,
}, {
.range_min = DA9063_REG_ADC_RES_L,
.range_max = DA9063_BB_REG_SECOND_D,
}, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_SEQ,
+ }, {
+ .range_min = DA9063_REG_EN_32K,
+ .range_max = DA9063_REG_EN_32K,
+ }, {
.range_min = DA9063_BB_REG_MON_REG_5,
.range_max = DA9063_BB_REG_MON_REG_6,
},
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 12099b09a9a7..c0a86aeb1733 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -739,20 +739,17 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
if (!div && !requests[clkout])
return -EINVAL;
- switch (clkout) {
- case 0:
+ if (clkout == 0) {
div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV0_SHIFT));
- break;
- case 1:
+ } else {
div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
PRCM_CLKOCR_CLK1TYPE);
bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
(div << PRCM_CLKOCR_CLKODIV1_SHIFT));
- break;
}
bits &= mask;
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
new file mode 100644
index 000000000000..77b2675cf8f5
--- /dev/null
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/mfd/imx25-tsadc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config mx25_tsadc_regmap_config = {
+ .fast_io = true,
+ .max_register = 8,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static void mx25_tsadc_irq_handler(struct irq_desc *desc)
+{
+ struct mx25_tsadc *tsadc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ regmap_read(tsadc->regs, MX25_TSC_TGSR, &status);
+
+ if (status & MX25_TGSR_GCQ_INT)
+ generic_handle_irq(irq_find_mapping(tsadc->domain, 1));
+
+ if (status & MX25_TGSR_TCQ_INT)
+ generic_handle_irq(irq_find_mapping(tsadc->domain, 0));
+
+ chained_irq_exit(chip, desc);
+}
+
+static int mx25_tsadc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct mx25_tsadc *tsadc = d->host_data;
+
+ irq_set_chip_data(irq, tsadc);
+ irq_set_chip_and_handler(irq, &dummy_irq_chip,
+ handle_level_irq);
+ irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops mx25_tsadc_domain_ops = {
+ .map = mx25_tsadc_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int mx25_tsadc_setup_irq(struct platform_device *pdev,
+ struct mx25_tsadc *tsadc)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return irq;
+ }
+
+ tsadc->domain = irq_domain_add_simple(np, 2, 0, &mx25_tsadc_domain_ops,
+ tsadc);
+ if (!tsadc->domain) {
+ dev_err(dev, "Failed to add irq domain\n");
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler(irq, mx25_tsadc_irq_handler);
+ irq_set_handler_data(irq, tsadc);
+
+ return 0;
+}
+
+static void mx25_tsadc_setup_clk(struct platform_device *pdev,
+ struct mx25_tsadc *tsadc)
+{
+ unsigned clk_div;
+
+ /*
+ * According to the datasheet the ADC clock should never
+ * exceed 1,75 MHz. Base clock is the IPG and the ADC unit uses
+ * a funny clock divider. To keep the ADC conversion time constant
+ * adapt the ADC internal clock divider to the IPG clock rate.
+ */
+
+ dev_dbg(&pdev->dev, "Found master clock at %lu Hz\n",
+ clk_get_rate(tsadc->clk));
+
+ clk_div = DIV_ROUND_UP(clk_get_rate(tsadc->clk), 1750000);
+ dev_dbg(&pdev->dev, "Setting up ADC clock divider to %u\n", clk_div);
+
+ /* adc clock = IPG clock / (2 * div + 2) */
+ clk_div -= 2;
+ clk_div /= 2;
+
+ /*
+ * the ADC clock divider changes its behaviour when values below 4
+ * are used: it is fixed to "/ 10" in this case
+ */
+ clk_div = max_t(unsigned, 4, clk_div);
+
+ dev_dbg(&pdev->dev, "Resulting ADC conversion clock at %lu Hz\n",
+ clk_get_rate(tsadc->clk) / (2 * clk_div + 2));
+
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR,
+ MX25_TGCR_ADCCLKCFG(0x1f),
+ MX25_TGCR_ADCCLKCFG(clk_div));
+}
+
+static int mx25_tsadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mx25_tsadc *tsadc;
+ struct resource *res;
+ int ret;
+ void __iomem *iomem;
+
+ tsadc = devm_kzalloc(dev, sizeof(*tsadc), GFP_KERNEL);
+ if (!tsadc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iomem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(iomem))
+ return PTR_ERR(iomem);
+
+ tsadc->regs = devm_regmap_init_mmio(dev, iomem,
+ &mx25_tsadc_regmap_config);
+ if (IS_ERR(tsadc->regs)) {
+ dev_err(dev, "Failed to initialize regmap\n");
+ return PTR_ERR(tsadc->regs);
+ }
+
+ tsadc->clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(tsadc->clk)) {
+ dev_err(dev, "Failed to get ipg clock\n");
+ return PTR_ERR(tsadc->clk);
+ }
+
+ /* setup clock according to the datasheet */
+ mx25_tsadc_setup_clk(pdev, tsadc);
+
+ /* Enable clock and reset the component */
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_CLK_EN,
+ MX25_TGCR_CLK_EN);
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_TSC_RST,
+ MX25_TGCR_TSC_RST);
+
+ /* Setup powersaving mode, but enable internal reference voltage */
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_POWERMODE_MASK,
+ MX25_TGCR_POWERMODE_SAVE);
+ regmap_update_bits(tsadc->regs, MX25_TSC_TGCR, MX25_TGCR_INTREFEN,
+ MX25_TGCR_INTREFEN);
+
+ ret = mx25_tsadc_setup_irq(pdev, tsadc);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, tsadc);
+
+ of_platform_populate(np, NULL, NULL, dev);
+
+ return 0;
+}
+
+static const struct of_device_id mx25_tsadc_ids[] = {
+ { .compatible = "fsl,imx25-tsadc" },
+ { /* Sentinel */ }
+};
+
+static struct platform_driver mx25_tsadc_driver = {
+ .driver = {
+ .name = "mx25-tsadc",
+ .of_match_table = of_match_ptr(mx25_tsadc_ids),
+ },
+ .probe = mx25_tsadc_probe,
+};
+module_platform_driver(mx25_tsadc_driver);
+
+MODULE_DESCRIPTION("MFD for ADC/TSC for Freescale mx25");
+MODULE_AUTHOR("Markus Pargmann <mpa@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mx25-tsadc");
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 06f00d60be46..5a8d9c766633 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -44,8 +44,20 @@ static const struct intel_lpss_platform_info bxt_info = {
.clk_rate = 100000000,
};
+static struct property_entry bxt_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 42),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static struct property_set bxt_i2c_pset = {
+ .properties = bxt_i2c_properties,
+};
+
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
+ .pset = &bxt_i2c_pset,
};
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a7136c7ae9fb..a19e57118641 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -107,12 +107,24 @@ static const struct intel_lpss_platform_info bxt_uart_info = {
.pset = &uart_pset,
};
+static struct property_entry bxt_i2c_properties[] = {
+ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 42),
+ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
+ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 208),
+ { },
+};
+
+static struct property_set bxt_i2c_pset = {
+ .properties = bxt_i2c_properties,
+};
+
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
+ .pset = &bxt_i2c_pset,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
- /* BXT */
+ /* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x0ab0), (kernel_ulong_t)&bxt_i2c_info },
@@ -128,6 +140,23 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info },
+ /* BXT B-Step */
+ { PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1aae), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab0), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab2), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab4), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab6), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1ab8), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1aba), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+
/* APL */
{ PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&bxt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&bxt_i2c_info },
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 1743788f1595..1bbbe877ba7e 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
err_remove_ltr:
intel_lpss_debugfs_remove(lpss);
intel_lpss_ltr_hide(lpss);
+ intel_lpss_unregister_clock(lpss);
err_clk_register:
ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 042137465300..bdc5e27222c0 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -52,8 +52,6 @@
/* The Quark I2C controller source clock */
#define INTEL_QUARK_I2C_CLK_HZ 33000000
-#define INTEL_QUARK_I2C_NCLK 1
-
struct intel_quark_mfd {
struct pci_dev *pdev;
struct clk *i2c_clk;
@@ -128,30 +126,24 @@ MODULE_DEVICE_TABLE(pci, intel_quark_mfd_ids);
static int intel_quark_register_i2c_clk(struct intel_quark_mfd *quark_mfd)
{
struct pci_dev *pdev = quark_mfd->pdev;
- struct clk_lookup *i2c_clk_lookup;
struct clk *i2c_clk;
- int ret;
-
- i2c_clk_lookup = devm_kcalloc(&pdev->dev, INTEL_QUARK_I2C_NCLK,
- sizeof(*i2c_clk_lookup), GFP_KERNEL);
- if (!i2c_clk_lookup)
- return -ENOMEM;
-
- i2c_clk_lookup[0].dev_id = INTEL_QUARK_I2C_CONTROLLER_CLK;
i2c_clk = clk_register_fixed_rate(&pdev->dev,
INTEL_QUARK_I2C_CONTROLLER_CLK, NULL,
CLK_IS_ROOT, INTEL_QUARK_I2C_CLK_HZ);
+ if (IS_ERR(i2c_clk))
+ return PTR_ERR(i2c_clk);
- quark_mfd->i2c_clk_lookup = i2c_clk_lookup;
quark_mfd->i2c_clk = i2c_clk;
+ quark_mfd->i2c_clk_lookup = clkdev_create(i2c_clk, NULL,
+ INTEL_QUARK_I2C_CONTROLLER_CLK);
- ret = clk_register_clkdevs(i2c_clk, i2c_clk_lookup,
- INTEL_QUARK_I2C_NCLK);
- if (ret)
- dev_err(&pdev->dev, "Fixed clk register failed: %d\n", ret);
+ if (!quark_mfd->i2c_clk_lookup) {
+ dev_err(&pdev->dev, "Fixed clk register failed\n");
+ return -ENOMEM;
+ }
- return ret;
+ return 0;
}
static void intel_quark_unregister_i2c_clk(struct pci_dev *pdev)
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index a41859c55bda..df16fd1df68b 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -376,7 +376,7 @@ static const struct mfd_cell micro_cells[] = {
{ .name = "ipaq-micro-leds", },
};
-static int micro_resume(struct device *dev)
+static int __maybe_unused micro_resume(struct device *dev)
{
struct ipaq_micro *micro = dev_get_drvdata(dev);
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index d959ebbb2194..c1aff46e89d9 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -35,8 +35,6 @@
#include <linux/err.h>
#include <linux/of.h>
-#define I2C_ADDR_RTC (0x0C >> 1)
-
static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-pmic", },
{ .name = "max77686-rtc", },
@@ -116,11 +114,6 @@ static const struct regmap_config max77686_regmap_config = {
.val_bits = 8,
};
-static const struct regmap_config max77686_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static const struct regmap_config max77802_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -156,25 +149,6 @@ static const struct regmap_irq_chip max77686_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_irqs),
};
-static const struct regmap_irq max77686_rtc_irqs[] = {
- /* RTC interrupts */
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC60S_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA1_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA2_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_SMPL_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC1S_MSK, },
- { .reg_offset = 0, .mask = MAX77686_RTCINT_WTSR_MSK, },
-};
-
-static const struct regmap_irq_chip max77686_rtc_irq_chip = {
- .name = "max77686-rtc",
- .status_base = MAX77686_RTC_INT,
- .mask_base = MAX77686_RTC_INTM,
- .num_regs = 1,
- .irqs = max77686_rtc_irqs,
- .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
-};
-
static const struct regmap_irq_chip max77802_irq_chip = {
.name = "max77802-pmic",
.status_base = MAX77802_REG_INT1,
@@ -184,15 +158,6 @@ static const struct regmap_irq_chip max77802_irq_chip = {
.num_irqs = ARRAY_SIZE(max77686_irqs),
};
-static const struct regmap_irq_chip max77802_rtc_irq_chip = {
- .name = "max77802-rtc",
- .status_base = MAX77802_RTC_INT,
- .mask_base = MAX77802_RTC_INTM,
- .num_regs = 1,
- .irqs = max77686_rtc_irqs, /* same masks as 77686 */
- .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
-};
-
static const struct of_device_id max77686_pmic_dt_match[] = {
{
.compatible = "maxim,max77686",
@@ -204,6 +169,7 @@ static const struct of_device_id max77686_pmic_dt_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, max77686_pmic_dt_match);
static int max77686_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -214,8 +180,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
int ret = 0;
const struct regmap_config *config;
const struct regmap_irq_chip *irq_chip;
- const struct regmap_irq_chip *rtc_irq_chip;
- struct regmap **rtc_regmap;
const struct mfd_cell *cells;
int n_devs;
@@ -242,15 +206,11 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
if (max77686->type == TYPE_MAX77686) {
config = &max77686_regmap_config;
irq_chip = &max77686_irq_chip;
- rtc_irq_chip = &max77686_rtc_irq_chip;
- rtc_regmap = &max77686->rtc_regmap;
cells = max77686_devs;
n_devs = ARRAY_SIZE(max77686_devs);
} else {
config = &max77802_regmap_config;
irq_chip = &max77802_irq_chip;
- rtc_irq_chip = &max77802_rtc_irq_chip;
- rtc_regmap = &max77686->regmap;
cells = max77802_devs;
n_devs = ARRAY_SIZE(max77802_devs);
}
@@ -270,60 +230,25 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
}
- if (max77686->type == TYPE_MAX77686) {
- max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
- if (!max77686->rtc) {
- dev_err(max77686->dev,
- "Failed to allocate I2C device for RTC\n");
- return -ENODEV;
- }
- i2c_set_clientdata(max77686->rtc, max77686);
-
- max77686->rtc_regmap =
- devm_regmap_init_i2c(max77686->rtc,
- &max77686_rtc_regmap_config);
- if (IS_ERR(max77686->rtc_regmap)) {
- ret = PTR_ERR(max77686->rtc_regmap);
- dev_err(max77686->dev,
- "failed to allocate RTC regmap: %d\n",
- ret);
- goto err_unregister_i2c;
- }
- }
-
ret = regmap_add_irq_chip(max77686->regmap, max77686->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
IRQF_SHARED, 0, irq_chip,
&max77686->irq_data);
- if (ret) {
+ if (ret < 0) {
dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret);
- goto err_unregister_i2c;
- }
-
- ret = regmap_add_irq_chip(*rtc_regmap, max77686->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
- IRQF_SHARED, 0, rtc_irq_chip,
- &max77686->rtc_irq_data);
- if (ret) {
- dev_err(&i2c->dev, "failed to add RTC irq chip: %d\n", ret);
- goto err_del_irqc;
+ return ret;
}
ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL);
if (ret < 0) {
dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
- goto err_del_rtc_irqc;
+ goto err_del_irqc;
}
return 0;
-err_del_rtc_irqc:
- regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
err_del_irqc:
regmap_del_irq_chip(max77686->irq, max77686->irq_data);
-err_unregister_i2c:
- if (max77686->type == TYPE_MAX77686)
- i2c_unregister_device(max77686->rtc);
return ret;
}
@@ -334,17 +259,14 @@ static int max77686_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(max77686->dev);
- regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
regmap_del_irq_chip(max77686->irq, max77686->irq_data);
- if (max77686->type == TYPE_MAX77686)
- i2c_unregister_device(max77686->rtc);
-
return 0;
}
static const struct i2c_device_id max77686_i2c_id[] = {
{ "max77686", TYPE_MAX77686 },
+ { "max77802", TYPE_MAX77802 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max77686_i2c_id);
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 3ac36f5ccd3e..a4a8f1ec3fb6 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -42,10 +42,10 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/mfd/menelaus.h>
+#include <linux/gpio.h>
#include <asm/mach/irq.h>
-#include <asm/gpio.h>
#define DRIVER_NAME "menelaus"
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 1749c1c9f405..8e8d93249c09 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -19,11 +19,17 @@
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6323/core.h>
#include <linux/mfd/mt6397/registers.h>
+#include <linux/mfd/mt6323/registers.h>
#define MT6397_RTC_BASE 0xe000
#define MT6397_RTC_SIZE 0x3e
+#define MT6323_CID_CODE 0x23
+#define MT6391_CID_CODE 0x91
+#define MT6397_CID_CODE 0x97
+
static const struct resource mt6397_rtc_resources[] = {
{
.start = MT6397_RTC_BASE,
@@ -37,6 +43,13 @@ static const struct resource mt6397_rtc_resources[] = {
},
};
+static const struct mfd_cell mt6323_devs[] = {
+ {
+ .name = "mt6323-regulator",
+ .of_compatible = "mediatek,mt6323-regulator"
+ },
+};
+
static const struct mfd_cell mt6397_devs[] = {
{
.name = "mt6397-rtc",
@@ -69,8 +82,10 @@ static void mt6397_irq_sync_unlock(struct irq_data *data)
{
struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
- regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
- regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
+ regmap_write(mt6397->regmap, mt6397->int_con[0],
+ mt6397->irq_masks_cur[0]);
+ regmap_write(mt6397->regmap, mt6397->int_con[1],
+ mt6397->irq_masks_cur[1]);
mutex_unlock(&mt6397->irqlock);
}
@@ -147,8 +162,8 @@ static irqreturn_t mt6397_irq_thread(int irq, void *data)
{
struct mt6397_chip *mt6397 = data;
- mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS0, 0);
- mt6397_irq_handle_reg(mt6397, MT6397_INT_STATUS1, 16);
+ mt6397_irq_handle_reg(mt6397, mt6397->int_status[0], 0);
+ mt6397_irq_handle_reg(mt6397, mt6397->int_status[1], 16);
return IRQ_HANDLED;
}
@@ -177,8 +192,8 @@ static int mt6397_irq_init(struct mt6397_chip *mt6397)
mutex_init(&mt6397->irqlock);
/* Mask all interrupt sources */
- regmap_write(mt6397->regmap, MT6397_INT_CON0, 0x0);
- regmap_write(mt6397->regmap, MT6397_INT_CON1, 0x0);
+ regmap_write(mt6397->regmap, mt6397->int_con[0], 0x0);
+ regmap_write(mt6397->regmap, mt6397->int_con[1], 0x0);
mt6397->irq_domain = irq_domain_add_linear(mt6397->dev->of_node,
MT6397_IRQ_NR, &mt6397_irq_domain_ops, mt6397);
@@ -203,8 +218,8 @@ static int mt6397_irq_suspend(struct device *dev)
{
struct mt6397_chip *chip = dev_get_drvdata(dev);
- regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
- regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+ regmap_write(chip->regmap, chip->int_con[0], chip->wake_mask[0]);
+ regmap_write(chip->regmap, chip->int_con[1], chip->wake_mask[1]);
enable_irq_wake(chip->irq);
@@ -215,8 +230,8 @@ static int mt6397_irq_resume(struct device *dev)
{
struct mt6397_chip *chip = dev_get_drvdata(dev);
- regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
- regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+ regmap_write(chip->regmap, chip->int_con[0], chip->irq_masks_cur[0]);
+ regmap_write(chip->regmap, chip->int_con[1], chip->irq_masks_cur[1]);
disable_irq_wake(chip->irq);
@@ -230,34 +245,69 @@ static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
static int mt6397_probe(struct platform_device *pdev)
{
int ret;
- struct mt6397_chip *mt6397;
+ unsigned int id;
+ struct mt6397_chip *pmic;
- mt6397 = devm_kzalloc(&pdev->dev, sizeof(*mt6397), GFP_KERNEL);
- if (!mt6397)
+ pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
return -ENOMEM;
- mt6397->dev = &pdev->dev;
+ pmic->dev = &pdev->dev;
+
/*
* mt6397 MFD is child device of soc pmic wrapper.
* Regmap is set from its parent.
*/
- mt6397->regmap = dev_get_regmap(pdev->dev.parent, NULL);
- if (!mt6397->regmap)
+ pmic->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pmic->regmap)
return -ENODEV;
- platform_set_drvdata(pdev, mt6397);
+ platform_set_drvdata(pdev, pmic);
- mt6397->irq = platform_get_irq(pdev, 0);
- if (mt6397->irq > 0) {
- ret = mt6397_irq_init(mt6397);
+ ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+ if (ret) {
+ dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+ goto fail_irq;
+ }
+
+ switch (id & 0xff) {
+ case MT6323_CID_CODE:
+ pmic->int_con[0] = MT6323_INT_CON0;
+ pmic->int_con[1] = MT6323_INT_CON1;
+ pmic->int_status[0] = MT6323_INT_STATUS0;
+ pmic->int_status[1] = MT6323_INT_STATUS1;
+ ret = mfd_add_devices(&pdev->dev, -1, mt6323_devs,
+ ARRAY_SIZE(mt6323_devs), NULL, 0, NULL);
+ break;
+
+ case MT6397_CID_CODE:
+ case MT6391_CID_CODE:
+ pmic->int_con[0] = MT6397_INT_CON0;
+ pmic->int_con[1] = MT6397_INT_CON1;
+ pmic->int_status[0] = MT6397_INT_STATUS0;
+ pmic->int_status[1] = MT6397_INT_STATUS1;
+ ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
+ ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+ ret = -ENODEV;
+ break;
+ }
+
+ pmic->irq = platform_get_irq(pdev, 0);
+ if (pmic->irq > 0) {
+ ret = mt6397_irq_init(pmic);
if (ret)
return ret;
}
- ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
- ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
- if (ret)
+fail_irq:
+ if (ret) {
+ irq_domain_remove(pmic->irq_domain);
dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
+ }
return ret;
}
@@ -271,10 +321,17 @@ static int mt6397_remove(struct platform_device *pdev)
static const struct of_device_id mt6397_of_match[] = {
{ .compatible = "mediatek,mt6397" },
+ { .compatible = "mediatek,mt6323" },
{ }
};
MODULE_DEVICE_TABLE(of, mt6397_of_match);
+static const struct platform_device_id mt6397_id[] = {
+ { "mt6397", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, mt6397_id);
+
static struct platform_driver mt6397_driver = {
.probe = mt6397_probe,
.remove = mt6397_remove,
@@ -283,6 +340,7 @@ static struct platform_driver mt6397_driver = {
.of_match_table = of_match_ptr(mt6397_of_match),
.pm = &mt6397_pm_ops,
},
+ .id_table = mt6397_id,
};
module_platform_driver(mt6397_driver);
@@ -290,4 +348,3 @@ module_platform_driver(mt6397_driver);
MODULE_AUTHOR("Flora Fu, MediaTek");
MODULE_DESCRIPTION("Driver for MediaTek MT6397 PMIC");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mt6397");
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index e10f02f5d551..fc2b2d93f354 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -241,8 +241,8 @@ static const struct regmap_config rc5t583_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = volatile_reg,
- .max_register = RC5T583_MAX_REGS,
- .num_reg_defaults_raw = RC5T583_MAX_REGS,
+ .max_register = RC5T583_MAX_REG,
+ .num_reg_defaults_raw = RC5T583_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
};
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 8222e374e4b1..fb8f9e8b75df 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -334,6 +334,31 @@ static const struct mfd_cell stmpe_keypad_cell = {
};
/*
+ * PWM (1601, 2401, 2403)
+ */
+static struct resource stmpe_pwm_resources[] = {
+ {
+ .name = "PWM0",
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "PWM1",
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "PWM2",
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static const struct mfd_cell stmpe_pwm_cell = {
+ .name = "stmpe-pwm",
+ .of_compatible = "st,stmpe-pwm",
+ .resources = stmpe_pwm_resources,
+ .num_resources = ARRAY_SIZE(stmpe_pwm_resources),
+};
+
+/*
* STMPE801
*/
static const u8 stmpe801_regs[] = {
@@ -537,6 +562,11 @@ static struct stmpe_variant_block stmpe1601_blocks[] = {
.irq = STMPE1601_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
+ {
+ .cell = &stmpe_pwm_cell,
+ .irq = STMPE1601_IRQ_PWM0,
+ .block = STMPE_BLOCK_PWM,
+ },
};
/* supported autosleep timeout delay (in msecs) */
@@ -771,6 +801,11 @@ static struct stmpe_variant_block stmpe24xx_blocks[] = {
.irq = STMPE24XX_IRQ_KEYPAD,
.block = STMPE_BLOCK_KEYPAD,
},
+ {
+ .cell = &stmpe_pwm_cell,
+ .irq = STMPE24XX_IRQ_PWM0,
+ .block = STMPE_BLOCK_PWM,
+ },
};
static int stmpe24xx_enable(struct stmpe *stmpe, unsigned int blocks,
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b7aabeefab07..2f2225e845ef 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -36,7 +36,7 @@ struct syscon {
struct list_head list;
};
-static struct regmap_config syscon_regmap_config = {
+static const struct regmap_config syscon_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
@@ -50,6 +50,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
u32 reg_io_width;
int ret;
struct regmap_config syscon_config = syscon_regmap_config;
+ struct resource res;
if (!of_device_is_compatible(np, "syscon"))
return ERR_PTR(-EINVAL);
@@ -58,7 +59,12 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (!syscon)
return ERR_PTR(-ENOMEM);
- base = of_iomap(np, 0);
+ if (of_address_to_resource(np, 0, &res)) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ base = ioremap(res.start, resource_size(&res));
if (!base) {
ret = -ENOMEM;
goto err_map;
@@ -81,6 +87,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
+ syscon_config.max_register = resource_size(&res) - reg_io_width;
regmap = regmap_init_mmio(NULL, base, &syscon_config);
if (IS_ERR(regmap)) {
@@ -192,6 +199,7 @@ static int syscon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct syscon_platform_data *pdata = dev_get_platdata(dev);
struct syscon *syscon;
+ struct regmap_config syscon_config = syscon_regmap_config;
struct resource *res;
void __iomem *base;
@@ -207,11 +215,10 @@ static int syscon_probe(struct platform_device *pdev)
if (!base)
return -ENOMEM;
- syscon_regmap_config.max_register = res->end - res->start - 3;
+ syscon_config.max_register = res->end - res->start - 3;
if (pdata)
- syscon_regmap_config.name = pdata->label;
- syscon->regmap = devm_regmap_init_mmio(dev, base,
- &syscon_regmap_config);
+ syscon_config.name = pdata->label;
+ syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
if (IS_ERR(syscon->regmap)) {
dev_err(dev, "regmap init failed\n");
return PTR_ERR(syscon->regmap);
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 83e615ed100a..495e4518fc29 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -1059,26 +1059,7 @@ EXPORT_SYMBOL(tps65013_set_low_pwr);
static int __init tps_init(void)
{
- u32 tries = 3;
- int status = -ENODEV;
-
- printk(KERN_INFO "%s: version %s\n", DRIVER_NAME, DRIVER_VERSION);
-
- /* some boards have startup glitches */
- while (tries--) {
- status = i2c_add_driver(&tps65010_driver);
- if (the_tps)
- break;
- i2c_del_driver(&tps65010_driver);
- if (!tries) {
- printk(KERN_ERR "%s: no chip?\n", DRIVER_NAME);
- return -ENODEV;
- }
- pr_debug("%s: re-probe ...\n", DRIVER_NAME);
- msleep(10);
- }
-
- return status;
+ return i2c_add_driver(&tps65010_driver);
}
/* NOTE: this MUST be initialized before the other parts of the system
* that rely on it ... but after the i2c bus on which this relies.
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
new file mode 100644
index 000000000000..43119a6867fe
--- /dev/null
+++ b/drivers/mfd/tps65086.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+
+#include <linux/mfd/tps65086.h>
+
+static const struct mfd_cell tps65086_cells[] = {
+ { .name = "tps65086-regulator", },
+ { .name = "tps65086-gpio", },
+};
+
+static const struct regmap_range tps65086_yes_ranges[] = {
+ regmap_reg_range(TPS65086_IRQ, TPS65086_IRQ),
+ regmap_reg_range(TPS65086_PMICSTAT, TPS65086_SHUTDNSRC),
+ regmap_reg_range(TPS65086_GPOCTRL, TPS65086_GPOCTRL),
+ regmap_reg_range(TPS65086_PG_STATUS1, TPS65086_OC_STATUS),
+};
+
+static const struct regmap_access_table tps65086_volatile_table = {
+ .yes_ranges = tps65086_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65086_yes_ranges),
+};
+
+static const struct regmap_config tps65086_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65086_volatile_table,
+};
+
+static const struct regmap_irq tps65086_irqs[] = {
+ REGMAP_IRQ_REG(TPS65086_IRQ_DIETEMP, 0, TPS65086_IRQ_DIETEMP_MASK),
+ REGMAP_IRQ_REG(TPS65086_IRQ_SHUTDN, 0, TPS65086_IRQ_SHUTDN_MASK),
+ REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK),
+};
+
+static struct regmap_irq_chip tps65086_irq_chip = {
+ .name = "tps65086",
+ .status_base = TPS65086_IRQ,
+ .mask_base = TPS65086_IRQ_MASK,
+ .ack_base = TPS65086_IRQ,
+ .init_ack_masked = true,
+ .num_regs = 1,
+ .irqs = tps65086_irqs,
+ .num_irqs = ARRAY_SIZE(tps65086_irqs),
+};
+
+static const struct of_device_id tps65086_of_match_table[] = {
+ { .compatible = "ti,tps65086", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tps65086_of_match_table);
+
+static int tps65086_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct tps65086 *tps;
+ unsigned int version;
+ int ret;
+
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps65086_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
+
+ ret = regmap_read(tps->regmap, TPS65086_DEVICEID, &version);
+ if (ret) {
+ dev_err(tps->dev, "Failed to read revision register\n");
+ return ret;
+ }
+
+ dev_info(tps->dev, "Device: TPS65086%01lX, OTP: %c, Rev: %ld\n",
+ (version & TPS65086_DEVICEID_PART_MASK),
+ (char)((version & TPS65086_DEVICEID_OTP_MASK) >> 4) + 'A',
+ (version & TPS65086_DEVICEID_REV_MASK) >> 6);
+
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+ &tps65086_irq_chip, &tps->irq_data);
+ if (ret) {
+ dev_err(tps->dev, "Failed to register IRQ chip\n");
+ return ret;
+ }
+
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65086_cells,
+ ARRAY_SIZE(tps65086_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tps65086_remove(struct i2c_client *client)
+{
+ struct tps65086 *tps = i2c_get_clientdata(client);
+
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+
+ return 0;
+}
+
+static const struct i2c_device_id tps65086_id_table[] = {
+ { "tps65086", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tps65086_id_table);
+
+static struct i2c_driver tps65086_driver = {
+ .driver = {
+ .name = "tps65086",
+ .of_match_table = tps65086_of_match_table,
+ },
+ .probe = tps65086_probe,
+ .remove = tps65086_remove,
+ .id_table = tps65086_id_table,
+};
+module_i2c_driver(tps65086_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65086 PMIC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index f88085ad9772..d7ec318c40c3 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -30,7 +30,6 @@
#include <linux/err.h>
#define NUM_INT_REG 2
-#define TOTAL_NUM_REG 0x18
#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE 1
#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE 2
@@ -161,8 +160,8 @@ static bool is_volatile_reg(struct device *dev, unsigned int reg)
static const struct regmap_config tps65090_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = TOTAL_NUM_REG,
- .num_reg_defaults_raw = TOTAL_NUM_REG,
+ .max_register = TPS65090_MAX_REG,
+ .num_reg_defaults_raw = TPS65090_NUM_REGS,
.cache_type = REGCACHE_RBTREE,
.volatile_reg = is_volatile_reg,
};
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 1f82d60b1d0f..a88cfa80dbc4 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -1,175 +1,111 @@
/*
- * tps65912-core.c -- TI TPS65912x
+ * Core functions for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/module.h>
+
#include <linux/mfd/tps65912.h>
-static const struct mfd_cell tps65912s[] = {
- {
- .name = "tps65912-pmic",
- },
+static const struct mfd_cell tps65912_cells[] = {
+ { .name = "tps65912-regulator", },
+ { .name = "tps65912-gpio", },
};
-int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
-{
- u8 data;
- int err;
-
- mutex_lock(&tps65912->io_mutex);
-
- err = tps65912->read(tps65912, reg, 1, &data);
- if (err) {
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
- goto out;
- }
-
- data |= mask;
- err = tps65912->write(tps65912, reg, 1, &data);
- if (err)
- dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
+static const struct regmap_irq tps65912_irqs[] = {
+ /* INT_STS IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRHOLD_F, 0, TPS65912_INT_STS_PWRHOLD_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_VMON, 0, TPS65912_INT_STS_VMON),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRON, 0, TPS65912_INT_STS_PWRON),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRON_LP, 0, TPS65912_INT_STS_PWRON_LP),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PWRHOLD_R, 0, TPS65912_INT_STS_PWRHOLD_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_HOTDIE, 0, TPS65912_INT_STS_HOTDIE),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO1_R, 0, TPS65912_INT_STS_GPIO1_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO1_F, 0, TPS65912_INT_STS_GPIO1_F),
+ /* INT_STS2 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO2_R, 1, TPS65912_INT_STS2_GPIO2_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO2_F, 1, TPS65912_INT_STS2_GPIO2_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO3_R, 1, TPS65912_INT_STS2_GPIO3_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO3_F, 1, TPS65912_INT_STS2_GPIO3_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO4_R, 1, TPS65912_INT_STS2_GPIO4_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO4_F, 1, TPS65912_INT_STS2_GPIO4_F),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO5_R, 1, TPS65912_INT_STS2_GPIO5_R),
+ REGMAP_IRQ_REG(TPS65912_IRQ_GPIO5_F, 1, TPS65912_INT_STS2_GPIO5_F),
+ /* INT_STS3 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC1, 2, TPS65912_INT_STS3_PGOOD_DCDC1),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC2, 2, TPS65912_INT_STS3_PGOOD_DCDC2),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC3, 2, TPS65912_INT_STS3_PGOOD_DCDC3),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_DCDC4, 2, TPS65912_INT_STS3_PGOOD_DCDC4),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO1, 2, TPS65912_INT_STS3_PGOOD_LDO1),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO2, 2, TPS65912_INT_STS3_PGOOD_LDO2),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO3, 2, TPS65912_INT_STS3_PGOOD_LDO3),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO4, 2, TPS65912_INT_STS3_PGOOD_LDO4),
+ /* INT_STS4 IRQs */
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO5, 3, TPS65912_INT_STS4_PGOOD_LDO5),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO6, 3, TPS65912_INT_STS4_PGOOD_LDO6),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO7, 3, TPS65912_INT_STS4_PGOOD_LDO7),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO8, 3, TPS65912_INT_STS4_PGOOD_LDO8),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO9, 3, TPS65912_INT_STS4_PGOOD_LDO9),
+ REGMAP_IRQ_REG(TPS65912_IRQ_PGOOD_LDO10, 3, TPS65912_INT_STS4_PGOOD_LDO10),
+};
-out:
- mutex_unlock(&tps65912->io_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(tps65912_set_bits);
+static struct regmap_irq_chip tps65912_irq_chip = {
+ .name = "tps65912",
+ .irqs = tps65912_irqs,
+ .num_irqs = ARRAY_SIZE(tps65912_irqs),
+ .num_regs = 4,
+ .irq_reg_stride = 2,
+ .mask_base = TPS65912_INT_MSK,
+ .status_base = TPS65912_INT_STS,
+ .ack_base = TPS65912_INT_STS,
+ .init_ack_masked = true,
+};
-int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask)
+int tps65912_device_init(struct tps65912 *tps)
{
- u8 data;
- int err;
-
- mutex_lock(&tps65912->io_mutex);
- err = tps65912->read(tps65912, reg, 1, &data);
- if (err) {
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
- goto out;
+ int ret;
+
+ ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+ &tps65912_irq_chip, &tps->irq_data);
+ if (ret)
+ return ret;
+
+ ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65912_cells,
+ ARRAY_SIZE(tps65912_cells), NULL, 0,
+ regmap_irq_get_domain(tps->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
+ return ret;
}
- data &= ~mask;
- err = tps65912->write(tps65912, reg, 1, &data);
- if (err)
- dev_err(tps65912->dev, "Write to reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&tps65912->io_mutex);
- return err;
+ return 0;
}
-EXPORT_SYMBOL_GPL(tps65912_clear_bits);
+EXPORT_SYMBOL_GPL(tps65912_device_init);
-static inline int tps65912_read(struct tps65912 *tps65912, u8 reg)
+int tps65912_device_exit(struct tps65912 *tps)
{
- u8 val;
- int err;
-
- err = tps65912->read(tps65912, reg, 1, &val);
- if (err < 0)
- return err;
-
- return val;
-}
-
-static inline int tps65912_write(struct tps65912 *tps65912, u8 reg, u8 val)
-{
- return tps65912->write(tps65912, reg, 1, &val);
-}
-
-int tps65912_reg_read(struct tps65912 *tps65912, u8 reg)
-{
- int data;
-
- mutex_lock(&tps65912->io_mutex);
+ regmap_del_irq_chip(tps->irq, tps->irq_data);
- data = tps65912_read(tps65912, reg);
- if (data < 0)
- dev_err(tps65912->dev, "Read from reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps65912->io_mutex);
- return data;
-}
-EXPORT_SYMBOL_GPL(tps65912_reg_read);
-
-int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val)
-{
- int err;
-
- mutex_lock(&tps65912->io_mutex);
-
- err = tps65912_write(tps65912, reg, val);
- if (err < 0)
- dev_err(tps65912->dev, "Write for reg 0x%x failed\n", reg);
-
- mutex_unlock(&tps65912->io_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(tps65912_reg_write);
-
-int tps65912_device_init(struct tps65912 *tps65912)
-{
- struct tps65912_board *pmic_plat_data = dev_get_platdata(tps65912->dev);
- struct tps65912_platform_data *init_data;
- int ret, dcdc_avs, value;
-
- init_data = kzalloc(sizeof(struct tps65912_platform_data), GFP_KERNEL);
- if (init_data == NULL)
- return -ENOMEM;
-
- mutex_init(&tps65912->io_mutex);
- dev_set_drvdata(tps65912->dev, tps65912);
-
- dcdc_avs = (pmic_plat_data->is_dcdc1_avs << 0 |
- pmic_plat_data->is_dcdc2_avs << 1 |
- pmic_plat_data->is_dcdc3_avs << 2 |
- pmic_plat_data->is_dcdc4_avs << 3);
- if (dcdc_avs) {
- tps65912->read(tps65912, TPS65912_I2C_SPI_CFG, 1, &value);
- dcdc_avs |= value;
- tps65912->write(tps65912, TPS65912_I2C_SPI_CFG, 1, &dcdc_avs);
- }
-
- ret = mfd_add_devices(tps65912->dev, -1,
- tps65912s, ARRAY_SIZE(tps65912s),
- NULL, 0, NULL);
- if (ret < 0)
- goto err;
-
- init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq_base;
- ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
- if (ret < 0)
- goto err;
-
- kfree(init_data);
- return ret;
-
-err:
- kfree(init_data);
- mfd_remove_devices(tps65912->dev);
- return ret;
-}
-
-void tps65912_device_exit(struct tps65912 *tps65912)
-{
- mfd_remove_devices(tps65912->dev);
- tps65912_irq_exit(tps65912);
+ return 0;
}
+EXPORT_SYMBOL_GPL(tps65912_device_exit);
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("TPS65912x chip family multi-function driver");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x MFD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 7e55640b3ed5..45871403f995 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -1,139 +1,79 @@
/*
- * tps65912-i2c.c -- I2C access for TI TPS65912x PMIC
+ * I2C access driver for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tps65912.h>
-
-static int tps65912_i2c_read(struct tps65912 *tps65912, u8 reg,
- int bytes, void *dest)
-{
- struct i2c_client *i2c = tps65912->control_data;
- struct i2c_msg xfer[2];
- int ret;
-
- /* Write register */
- xfer[0].addr = i2c->addr;
- xfer[0].flags = 0;
- xfer[0].len = 1;
- xfer[0].buf = &reg;
-
- /* Read data */
- xfer[1].addr = i2c->addr;
- xfer[1].flags = I2C_M_RD;
- xfer[1].len = bytes;
- xfer[1].buf = dest;
-
- ret = i2c_transfer(i2c->adapter, xfer, 2);
- if (ret == 2)
- ret = 0;
- else if (ret >= 0)
- ret = -EIO;
- return ret;
-}
-
-static int tps65912_i2c_write(struct tps65912 *tps65912, u8 reg,
- int bytes, void *src)
-{
- struct i2c_client *i2c = tps65912->control_data;
- /* we add 1 byte for device register */
- u8 msg[TPS6591X_MAX_REGISTER + 1];
- int ret;
-
- if (bytes > TPS6591X_MAX_REGISTER)
- return -EINVAL;
-
- msg[0] = reg;
- memcpy(&msg[1], src, bytes);
+#include <linux/module.h>
+#include <linux/regmap.h>
- ret = i2c_master_send(i2c, msg, bytes + 1);
- if (ret < 0)
- return ret;
- if (ret != bytes + 1)
- return -EIO;
+#include <linux/mfd/tps65912.h>
- return 0;
-}
+static const struct of_device_id tps65912_i2c_of_match_table[] = {
+ { .compatible = "ti,tps65912", },
+ { /* sentinel */ }
+};
-static int tps65912_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int tps65912_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
{
- struct tps65912 *tps65912;
+ struct tps65912 *tps;
- tps65912 = devm_kzalloc(&i2c->dev,
- sizeof(struct tps65912), GFP_KERNEL);
- if (tps65912 == NULL)
+ tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
return -ENOMEM;
- i2c_set_clientdata(i2c, tps65912);
- tps65912->dev = &i2c->dev;
- tps65912->control_data = i2c;
- tps65912->read = tps65912_i2c_read;
- tps65912->write = tps65912_i2c_write;
+ i2c_set_clientdata(client, tps);
+ tps->dev = &client->dev;
+ tps->irq = client->irq;
+
+ tps->regmap = devm_regmap_init_i2c(client, &tps65912_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
- return tps65912_device_init(tps65912);
+ return tps65912_device_init(tps);
}
-static int tps65912_i2c_remove(struct i2c_client *i2c)
+static int tps65912_i2c_remove(struct i2c_client *client)
{
- struct tps65912 *tps65912 = i2c_get_clientdata(i2c);
+ struct tps65912 *tps = i2c_get_clientdata(client);
- tps65912_device_exit(tps65912);
-
- return 0;
+ return tps65912_device_exit(tps);
}
-static const struct i2c_device_id tps65912_i2c_id[] = {
- {"tps65912", 0 },
- { }
+static const struct i2c_device_id tps65912_i2c_id_table[] = {
+ { "tps65912", 0 },
+ { /* sentinel */ }
};
-MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id);
+MODULE_DEVICE_TABLE(i2c, tps65912_i2c_id_table);
static struct i2c_driver tps65912_i2c_driver = {
- .driver = {
- .name = "tps65912",
+ .driver = {
+ .name = "tps65912",
+ .of_match_table = tps65912_i2c_of_match_table,
},
- .probe = tps65912_i2c_probe,
- .remove = tps65912_i2c_remove,
- .id_table = tps65912_i2c_id,
+ .probe = tps65912_i2c_probe,
+ .remove = tps65912_i2c_remove,
+ .id_table = tps65912_i2c_id_table,
};
+module_i2c_driver(tps65912_i2c_driver);
-static int __init tps65912_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&tps65912_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register TPS65912 I2C driver: %d\n", ret);
-
- return ret;
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(tps65912_i2c_init);
-
-static void __exit tps65912_i2c_exit(void)
-{
- i2c_del_driver(&tps65912_i2c_driver);
-}
-module_exit(tps65912_i2c_exit);
-
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
deleted file mode 100644
index db2c29cb709b..000000000000
--- a/drivers/mfd/tps65912-irq.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * tps65912-irq.c -- TI TPS6591x
- *
- * Copyright 2011 Texas Instruments Inc.
- *
- * Author: Margarita Olaya <magi@slimlogic.co.uk>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This driver is based on wm8350 implementation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65912.h>
-
-static inline int irq_to_tps65912_irq(struct tps65912 *tps65912,
- int irq)
-{
- return irq - tps65912->irq_base;
-}
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI. Since the
- * IRQ handler explicitly clears the IRQ it handles the IRQ line
- * will be reasserted and the physical IRQ will be handled again if
- * another interrupt is asserted while we run - in the normal course
- * of events this is a rare occurrence so we save I2C/SPI reads. We're
- * also assuming that it's rare to get lots of interrupts firing
- * simultaneously so try to minimise I/O.
- */
-static irqreturn_t tps65912_irq(int irq, void *irq_data)
-{
- struct tps65912 *tps65912 = irq_data;
- u32 irq_sts;
- u32 irq_mask;
- u8 reg;
- int i;
-
-
- tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
- irq_sts = reg;
- tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
- irq_sts |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
- irq_sts |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
- irq_sts |= reg << 24;
-
- tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
- irq_mask = reg;
- tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
- irq_mask |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
- irq_mask |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
- irq_mask |= reg << 24;
-
- irq_sts &= ~irq_mask;
- if (!irq_sts)
- return IRQ_NONE;
-
- for (i = 0; i < tps65912->irq_num; i++) {
- if (!(irq_sts & (1 << i)))
- continue;
-
- handle_nested_irq(tps65912->irq_base + i);
- }
-
- /* Write the STS register back to clear IRQs we handled */
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
- reg = irq_sts & 0xFF;
- irq_sts >>= 8;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
- reg = irq_sts & 0xFF;
- if (reg)
- tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
-
- return IRQ_HANDLED;
-}
-
-static void tps65912_irq_lock(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&tps65912->irq_lock);
-}
-
-static void tps65912_irq_sync_unlock(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
- u32 reg_mask;
- u8 reg;
-
- tps65912->read(tps65912, TPS65912_INT_MSK, 1, &reg);
- reg_mask = reg;
- tps65912->read(tps65912, TPS65912_INT_MSK2, 1, &reg);
- reg_mask |= reg << 8;
- tps65912->read(tps65912, TPS65912_INT_MSK3, 1, &reg);
- reg_mask |= reg << 16;
- tps65912->read(tps65912, TPS65912_INT_MSK4, 1, &reg);
- reg_mask |= reg << 24;
-
- if (tps65912->irq_mask != reg_mask) {
- reg = tps65912->irq_mask & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK, 1, &reg);
- reg = tps65912->irq_mask >> 8 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK2, 1, &reg);
- reg = tps65912->irq_mask >> 16 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK3, 1, &reg);
- reg = tps65912->irq_mask >> 24 & 0xFF;
- tps65912->write(tps65912, TPS65912_INT_MSK4, 1, &reg);
- }
-
- mutex_unlock(&tps65912->irq_lock);
-}
-
-static void tps65912_irq_enable(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- tps65912->irq_mask &= ~(1 << irq_to_tps65912_irq(tps65912, data->irq));
-}
-
-static void tps65912_irq_disable(struct irq_data *data)
-{
- struct tps65912 *tps65912 = irq_data_get_irq_chip_data(data);
-
- tps65912->irq_mask |= (1 << irq_to_tps65912_irq(tps65912, data->irq));
-}
-
-static struct irq_chip tps65912_irq_chip = {
- .name = "tps65912",
- .irq_bus_lock = tps65912_irq_lock,
- .irq_bus_sync_unlock = tps65912_irq_sync_unlock,
- .irq_disable = tps65912_irq_disable,
- .irq_enable = tps65912_irq_enable,
-};
-
-int tps65912_irq_init(struct tps65912 *tps65912, int irq,
- struct tps65912_platform_data *pdata)
-{
- int ret, cur_irq;
- int flags = IRQF_ONESHOT;
- u8 reg;
-
- if (!irq) {
- dev_warn(tps65912->dev, "No interrupt support, no core IRQ\n");
- return 0;
- }
-
- if (!pdata || !pdata->irq_base) {
- dev_warn(tps65912->dev, "No interrupt support, no IRQ base\n");
- return 0;
- }
-
- /* Clear unattended interrupts */
- tps65912->read(tps65912, TPS65912_INT_STS, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS2, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS2, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS3, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS3, 1, &reg);
- tps65912->read(tps65912, TPS65912_INT_STS4, 1, &reg);
- tps65912->write(tps65912, TPS65912_INT_STS4, 1, &reg);
-
- /* Mask top level interrupts */
- tps65912->irq_mask = 0xFFFFFFFF;
-
- mutex_init(&tps65912->irq_lock);
- tps65912->chip_irq = irq;
- tps65912->irq_base = pdata->irq_base;
-
- tps65912->irq_num = TPS65912_NUM_IRQ;
-
- /* Register with genirq */
- for (cur_irq = tps65912->irq_base;
- cur_irq < tps65912->irq_num + tps65912->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, tps65912);
- irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
- irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
- "tps65912", tps65912);
-
- irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
- if (ret != 0)
- dev_err(tps65912->dev, "Failed to request IRQ: %d\n", ret);
-
- return ret;
-}
-
-int tps65912_irq_exit(struct tps65912 *tps65912)
-{
- free_irq(tps65912->chip_irq, tps65912);
- return 0;
-}
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index d59aa55b1495..4aeba9b6942a 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -1,140 +1,78 @@
/*
- * tps65912-spi.c -- SPI access for TI TPS65912x PMIC
+ * SPI access driver for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tps65912.h>
-
-static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
- int bytes, void *src)
-{
- struct spi_device *spi = tps65912->control_data;
- u8 *data = (u8 *) src;
- int ret;
- /* bit 23 is the read/write bit */
- unsigned long spi_data = 1 << 23 | addr << 15 | *data;
- struct spi_transfer xfer;
- struct spi_message msg;
- u32 tx_buf;
-
- tx_buf = spi_data;
-
- xfer.tx_buf = &tx_buf;
- xfer.rx_buf = NULL;
- xfer.len = sizeof(unsigned long);
- xfer.bits_per_word = 24;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- ret = spi_sync(spi, &msg);
- return ret;
-}
-
-static int tps65912_spi_read(struct tps65912 *tps65912, u8 addr,
- int bytes, void *dest)
-{
- struct spi_device *spi = tps65912->control_data;
- /* bit 23 is the read/write bit */
- unsigned long spi_data = 0 << 23 | addr << 15;
- struct spi_transfer xfer;
- struct spi_message msg;
- int ret;
- u8 *data = (u8 *) dest;
- u32 tx_buf, rx_buf;
-
- tx_buf = spi_data;
- rx_buf = 0;
- xfer.tx_buf = &tx_buf;
- xfer.rx_buf = &rx_buf;
- xfer.len = sizeof(unsigned long);
- xfer.bits_per_word = 24;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- if (spi == NULL)
- return 0;
+#include <linux/mfd/tps65912.h>
- ret = spi_sync(spi, &msg);
- if (ret == 0)
- *data = (u8) (rx_buf & 0xFF);
- return ret;
-}
+static const struct of_device_id tps65912_spi_of_match_table[] = {
+ { .compatible = "ti,tps65912", },
+ { /* sentinel */ }
+};
static int tps65912_spi_probe(struct spi_device *spi)
{
- struct tps65912 *tps65912;
+ struct tps65912 *tps;
- tps65912 = devm_kzalloc(&spi->dev,
- sizeof(struct tps65912), GFP_KERNEL);
- if (tps65912 == NULL)
+ tps = devm_kzalloc(&spi->dev, sizeof(*tps), GFP_KERNEL);
+ if (!tps)
return -ENOMEM;
- tps65912->dev = &spi->dev;
- tps65912->control_data = spi;
- tps65912->read = tps65912_spi_read;
- tps65912->write = tps65912_spi_write;
+ spi_set_drvdata(spi, tps);
+ tps->dev = &spi->dev;
+ tps->irq = spi->irq;
- spi_set_drvdata(spi, tps65912);
+ tps->regmap = devm_regmap_init_spi(spi, &tps65912_regmap_config);
+ if (IS_ERR(tps->regmap)) {
+ dev_err(tps->dev, "Failed to initialize register map\n");
+ return PTR_ERR(tps->regmap);
+ }
- return tps65912_device_init(tps65912);
+ return tps65912_device_init(tps);
}
-static int tps65912_spi_remove(struct spi_device *spi)
+static int tps65912_spi_remove(struct spi_device *client)
{
- struct tps65912 *tps65912 = spi_get_drvdata(spi);
+ struct tps65912 *tps = spi_get_drvdata(client);
- tps65912_device_exit(tps65912);
-
- return 0;
+ return tps65912_device_exit(tps);
}
+static const struct spi_device_id tps65912_spi_id_table[] = {
+ { "tps65912", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, tps65912_spi_id_table);
+
static struct spi_driver tps65912_spi_driver = {
- .driver = {
- .name = "tps65912",
+ .driver = {
+ .name = "tps65912",
+ .of_match_table = tps65912_spi_of_match_table,
},
- .probe = tps65912_spi_probe,
- .remove = tps65912_spi_remove,
+ .probe = tps65912_spi_probe,
+ .remove = tps65912_spi_remove,
+ .id_table = tps65912_spi_id_table,
};
+module_spi_driver(tps65912_spi_driver);
-static int __init tps65912_spi_init(void)
-{
- int ret;
-
- ret = spi_register_driver(&tps65912_spi_driver);
- if (ret != 0)
- pr_err("Failed to register TPS65912 SPI driver: %d\n", ret);
-
- return 0;
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(tps65912_spi_init);
-
-static void __exit tps65912_spi_exit(void)
-{
- spi_unregister_driver(&tps65912_spi_driver);
-}
-module_exit(tps65912_spi_exit);
-
-MODULE_AUTHOR("Margarita Olaya <magi@slimlogic.co.uk>");
-MODULE_DESCRIPTION("SPI support for TPS65912 chip family mfd");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65912x SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 0386eaf6be32..ab8b23b5bd22 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -297,7 +297,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -314,7 +313,6 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -338,7 +336,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
- { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
{ 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
@@ -402,7 +400,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x0FFF }, /* R1088 - DRE Enable */
{ 0x00000442, 0x3F0A }, /* R1090 - DRE Control 2 */
{ 0x00000443, 0xDC1F }, /* R1090 - DRE Control 3 */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
@@ -863,7 +861,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -984,7 +982,7 @@ static const struct reg_default wm5102_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
- { 0x00000EE3, 0x0400 }, /* R3811 - ASRC_RATE2 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -1062,8 +1060,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_4:
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
@@ -1080,8 +1076,6 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_4:
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
@@ -1849,8 +1843,6 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
- case ARIZONA_FLL1_NCO_TEST_0:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index c18e11f42b3f..8e74e71507e7 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -676,8 +676,8 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
{ 0x0000000A, 0x0001 }, /* R10 - Ctrl IF I2C2 CFG 1 */
- { 0x0000000B, 0x0036 }, /* R11 - Ctrl IF I2C1 CFG 2 */
- { 0x0000000C, 0x0036 }, /* R12 - Ctrl IF I2C2 CFG 2 */
+ { 0x0000000B, 0x001A }, /* R11 - Ctrl IF I2C1 CFG 2 */
+ { 0x0000000C, 0x001A }, /* R12 - Ctrl IF I2C2 CFG 2 */
{ 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
@@ -723,14 +723,12 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
- { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000171, 0x0002 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R376 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -740,15 +738,13 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R390 - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
- { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
- { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x0000018A, 0x000C }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0002 }, /* R401 - FLL2 Control 1 */
{ 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
{ 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R408 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -758,7 +754,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
{ 0x000001A7, 0x0001 }, /* R422 - FLL2 Synchroniser 7 */
{ 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
- { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x000001AA, 0x000C }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x0184 }, /* R528 - LDO1 Control 1 */
{ 0x00000213, 0x03E4 }, /* R531 - LDO2 Control 1 */
@@ -771,9 +767,9 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
- { 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
+ { 0x000002A7, 0x2C37 }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
- { 0x000002A9, 0x300A }, /* R681 - Mic Detect Level 4 */
+ { 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
{ 0x000002CB, 0x0000 }, /* R715 - Isolation control */
{ 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
@@ -810,53 +806,53 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
{ 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
- { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
- { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
{ 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
- { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
{ 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
- { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
{ 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
- { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
{ 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
{ 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
- { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
+ { 0x00000426, 0x0081 }, /* R1062 - DAC Volume Limit 3R */
{ 0x00000427, 0x0020 }, /* R1063 - Noise Gate Select 3R */
{ 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
- { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
{ 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
{ 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
- { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
{ 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
{ 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
- { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
{ 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
{ 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
- { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000438, 0x0000 }, /* R1080 - Output Path Config 6L */
{ 0x00000439, 0x0180 }, /* R1081 - DAC Digital Volume 6L */
- { 0x0000043A, 0x0080 }, /* R1082 - DAC Volume Limit 6L */
+ { 0x0000043A, 0x0081 }, /* R1082 - DAC Volume Limit 6L */
{ 0x0000043B, 0x0400 }, /* R1083 - Noise Gate Select 6L */
{ 0x0000043C, 0x0000 }, /* R1084 - Output Path Config 6R */
{ 0x0000043D, 0x0180 }, /* R1085 - DAC Digital Volume 6R */
- { 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
+ { 0x0000043E, 0x0081 }, /* R1086 - DAC Volume Limit 6R */
{ 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x003F }, /* R1088 - DRE Enable */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x0000 }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
@@ -864,8 +860,8 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */
{ 0x00000493, 0x0000 }, /* R1171 - PDM SPK2 CTRL 2 */
{ 0x000004A0, 0x3480 }, /* R1184 - HP1 Short Circuit Ctrl */
- { 0x000004A1, 0x3480 }, /* R1185 - HP2 Short Circuit Ctrl */
- { 0x000004A2, 0x3480 }, /* R1186 - HP3 Short Circuit Ctrl */
+ { 0x000004A1, 0x3400 }, /* R1185 - HP2 Short Circuit Ctrl */
+ { 0x000004A2, 0x3400 }, /* R1186 - HP3 Short Circuit Ctrl */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
@@ -1483,23 +1479,23 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
{ 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
- { 0x00000C30, 0x8282 }, /* R3120 - Misc Pad Ctrl 7 */
- { 0x00000C31, 0x0082 }, /* R3121 - Misc Pad Ctrl 8 */
- { 0x00000C32, 0x8282 }, /* R3122 - Misc Pad Ctrl 9 */
- { 0x00000C33, 0x8282 }, /* R3123 - Misc Pad Ctrl 10 */
- { 0x00000C34, 0x8282 }, /* R3124 - Misc Pad Ctrl 11 */
- { 0x00000C35, 0x8282 }, /* R3125 - Misc Pad Ctrl 12 */
- { 0x00000C36, 0x8282 }, /* R3126 - Misc Pad Ctrl 13 */
- { 0x00000C37, 0x8282 }, /* R3127 - Misc Pad Ctrl 14 */
- { 0x00000C38, 0x8282 }, /* R3128 - Misc Pad Ctrl 15 */
- { 0x00000C39, 0x8282 }, /* R3129 - Misc Pad Ctrl 16 */
- { 0x00000C3A, 0x8282 }, /* R3130 - Misc Pad Ctrl 17 */
- { 0x00000C3B, 0x8282 }, /* R3131 - Misc Pad Ctrl 18 */
+ { 0x00000C30, 0x0404 }, /* R3120 - Misc Pad Ctrl 7 */
+ { 0x00000C31, 0x0004 }, /* R3121 - Misc Pad Ctrl 8 */
+ { 0x00000C32, 0x0404 }, /* R3122 - Misc Pad Ctrl 9 */
+ { 0x00000C33, 0x0404 }, /* R3123 - Misc Pad Ctrl 10 */
+ { 0x00000C34, 0x0404 }, /* R3124 - Misc Pad Ctrl 11 */
+ { 0x00000C35, 0x0404 }, /* R3125 - Misc Pad Ctrl 12 */
+ { 0x00000C36, 0x0404 }, /* R3126 - Misc Pad Ctrl 13 */
+ { 0x00000C37, 0x0404 }, /* R3127 - Misc Pad Ctrl 14 */
+ { 0x00000C38, 0x0004 }, /* R3128 - Misc Pad Ctrl 15 */
+ { 0x00000C39, 0x0404 }, /* R3129 - Misc Pad Ctrl 16 */
+ { 0x00000C3A, 0x0404 }, /* R3130 - Misc Pad Ctrl 17 */
+ { 0x00000C3B, 0x0404 }, /* R3131 - Misc Pad Ctrl 18 */
{ 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
{ 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
{ 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
@@ -1641,7 +1637,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000F0D, 0x0000 }, /* R3853 - ANC Coefficient */
{ 0x00000F0E, 0x0000 }, /* R3854 - ANC Coefficient */
{ 0x00000F0F, 0x0000 }, /* R3855 - ANC Coefficient */
- { 0x00000F10, 0x0000 }, /* R3856 - ANC Coefficient */
+ { 0x00000F10, 0x0001 }, /* R3856 - ANC Coefficient */
{ 0x00000F11, 0x0000 }, /* R3857 - ANC Coefficient */
{ 0x00000F12, 0x0000 }, /* R3858 - ANC Coefficient */
{ 0x00000F15, 0x0000 }, /* R3861 - FCL Filter Control */
@@ -1947,8 +1943,6 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1965,8 +1959,6 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index 4c2dce77cdfc..a0de3002cdad 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -229,8 +229,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
- { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
- { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
@@ -247,8 +245,6 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
- { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
- { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
@@ -320,7 +316,7 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
- { 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
+ { 0x00000440, 0x002F }, /* R1088 - DRE Enable */
{ 0x00000441, 0xC759 }, /* R1089 - DRE Control 1 */
{ 0x00000442, 0x2A08 }, /* R1089 - DRE Control 2 */
{ 0x00000443, 0x5CFA }, /* R1089 - DRE Control 3 */
@@ -686,7 +682,7 @@ static const struct reg_default wm8998_reg_default[] = {
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C18, 0x0000 }, /* R3096 - GP Switch 1 */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
- { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C21, 0x0001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
@@ -888,8 +884,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_CONTROL_7:
- case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -906,8 +900,6 @@ static bool wm8998_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_CONTROL_7:
- case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
- case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 054fc10cb3b6..a216b4667742 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -440,7 +440,7 @@ config ARM_CHARLCD
still useful.
config BMP085
- bool
+ tristate
depends on SYSFS
config BMP085_I2C
@@ -470,7 +470,7 @@ config BMP085_SPI
config PCH_PHUB
tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
select GENERIC_NET_UTILS
- depends on PCI && (X86_32 || COMPILE_TEST)
+ depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
help
This driver is for PCH(Platform controller Hub) PHUB(Packet Hub) of
Intel Topcliff which is an IOH(Input/Output Hub) for x86 embedded
@@ -525,6 +525,284 @@ config VEXPRESS_SYSCFG
ARM Ltd. Versatile Express uses specialised platform configuration
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
+config PANEL
+ tristate "Parallel port LCD/Keypad Panel support"
+ depends on PARPORT
+ ---help---
+ Say Y here if you have an HD44780 or KS-0074 LCD connected to your
+ parallel port. This driver also features 4 and 6-key keypads. The LCD
+ is accessible through the /dev/lcd char device (10, 156), and the
+ keypad through /dev/keypad (10, 185). Both require misc device to be
+ enabled. This code can either be compiled as a module, or linked into
+ the kernel and started at boot. If you don't understand what all this
+ is about, say N.
+
+config PANEL_PARPORT
+ int "Default parallel port number (0=LPT1)"
+ depends on PANEL
+ range 0 255
+ default "0"
+ ---help---
+ This is the index of the parallel port the panel is connected to. One
+ driver instance only supports one parallel port, so if your keypad
+ and LCD are connected to two separate ports, you have to start two
+ modules with different arguments. Numbering starts with '0' for LPT1,
+ and so on.
+
+config PANEL_PROFILE
+ int "Default panel profile (0-5, 0=custom)"
+ depends on PANEL
+ range 0 5
+ default "5"
+ ---help---
+ To ease configuration, the driver supports different configuration
+ profiles for past and recent wirings. These profiles can also be
+ used to define an approximative configuration, completed by a few
+ other options. Here are the profiles :
+
+ 0 = custom (see further)
+ 1 = 2x16 parallel LCD, old keypad
+ 2 = 2x16 serial LCD (KS-0074), new keypad
+ 3 = 2x16 parallel LCD (Hantronix), no keypad
+ 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
+ 5 = 2x40 parallel LCD (old one), with old keypad
+
+ Custom configurations allow you to define how your display is
+ wired to the parallel port, and how it works. This is only intended
+ for experts.
+
+config PANEL_KEYPAD
+ depends on PANEL && PANEL_PROFILE="0"
+ int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
+ range 0 3
+ default 0
+ ---help---
+ This enables and configures a keypad connected to the parallel port.
+ The keys will be read from character device 10,185. Valid values are :
+
+ 0 : do not enable this driver
+ 1 : old 6 keys keypad
+ 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
+ 3 : Nexcom NSA1045's 4 keys keypad
+
+ New profiles can be described in the driver source. The driver also
+ supports simultaneous keys pressed when the keypad supports them.
+
+config PANEL_LCD
+ depends on PANEL && PANEL_PROFILE="0"
+ int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
+ range 0 5
+ default 0
+ ---help---
+ This enables and configures an LCD connected to the parallel port.
+ The driver includes an interpreter for escape codes starting with
+ '\e[L' which are specific to the LCD, and a few ANSI codes. The
+ driver will be registered as character device 10,156, usually
+ under the name '/dev/lcd'. There are a total of 6 supported types :
+
+ 0 : do not enable the driver
+ 1 : custom configuration and wiring (see further)
+ 2 : 2x16 & 2x40 parallel LCD (old wiring)
+ 3 : 2x16 serial LCD (KS-0074 based)
+ 4 : 2x16 parallel LCD (Hantronix wiring)
+ 5 : 2x16 parallel LCD (Nexcom wiring)
+
+ When type '1' is specified, other options will appear to configure
+ more precise aspects (wiring, dimensions, protocol, ...). Please note
+ that those values changed from the 2.4 driver for better consistency.
+
+config PANEL_LCD_HEIGHT
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of lines on the LCD (1-2)"
+ range 1 2
+ default 2
+ ---help---
+ This is the number of visible character lines on the LCD in custom profile.
+ It can either be 1 or 2.
+
+config PANEL_LCD_WIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Number of characters per line on the LCD (1-40)"
+ range 1 40
+ default 40
+ ---help---
+ This is the number of characters per line on the LCD in custom profile.
+ Common values are 16,20,24,40.
+
+config PANEL_LCD_BWIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Internal LCD line width (1-40, 40 by default)"
+ range 1 40
+ default 40
+ ---help---
+ Most LCDs use a standard controller which supports hardware lines of 40
+ characters, although sometimes only 16, 20 or 24 of them are really wired
+ to the terminal. This results in some non-visible but addressable characters,
+ and is the case for most parallel LCDs. Other LCDs, and some serial ones,
+ however, use the same line width internally as what is visible. The KS0074
+ for example, uses 16 characters per line for 16 visible characters per line.
+
+ This option lets you configure the value used by your LCD in 'custom' profile.
+ If you don't know, put '40' here.
+
+config PANEL_LCD_HWIDTH
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Hardware LCD line width (1-64, 64 by default)"
+ range 1 64
+ default 64
+ ---help---
+ Most LCDs use a single address bit to differentiate line 0 and line 1. Since
+ some of them need to be able to address 40 chars with the lower bits, they
+ often use the immediately superior power of 2, which is 64, to address the
+ next line.
+
+ If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
+ 64 here for a 2x40.
+
+config PANEL_LCD_CHARSET
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD character set (0=normal, 1=KS0074)"
+ range 0 1
+ default 0
+ ---help---
+ Some controllers such as the KS0074 use a somewhat strange character set
+ where many symbols are at unusual places. The driver knows how to map
+ 'standard' ASCII characters to the character sets used by these controllers.
+ Valid values are :
+
+ 0 : normal (untranslated) character set
+ 1 : KS0074 character set
+
+ If you don't know, use the normal one (0).
+
+config PANEL_LCD_PROTO
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "LCD communication mode (0=parallel 8 bits, 1=serial)"
+ range 0 1
+ default 0
+ ---help---
+ This driver now supports any serial or parallel LCD wired to a parallel
+ port. But before assigning signals, the driver needs to know if it will
+ be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
+ (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
+ (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
+ parallel LCD, and 1 for a serial LCD.
+
+config PANEL_LCD_PIN_E
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+ range -17 17
+ default 14
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'E'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'E' pin in custom profile is '14' (AUTOFEED).
+
+config PANEL_LCD_PIN_RS
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+ range -17 17
+ default 17
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RS'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RS' pin in custom profile is '17' (SELECT IN).
+
+config PANEL_LCD_PIN_RW
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
+ int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+ range -17 17
+ default 16
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'RW'
+ signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'RW' pin in custom profile is '16' (INIT).
+
+config PANEL_LCD_PIN_SCL
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+ range -17 17
+ default 1
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SCL' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SCL' pin in custom profile is '1' (STROBE).
+
+config PANEL_LCD_PIN_SDA
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
+ int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+ range -17 17
+ default 2
+ ---help---
+ This describes the number of the parallel port pin to which the serial
+ LCD 'SDA' signal has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'SDA' pin in custom profile is '2' (D0).
+
+config PANEL_LCD_PIN_BL
+ depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
+ int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+ range -17 17
+ default 0
+ ---help---
+ This describes the number of the parallel port pin to which the LCD 'BL' signal
+ has been connected. It can be :
+
+ 0 : no connection (eg: connected to ground)
+ 1..17 : directly connected to any of these pins on the DB25 plug
+ -1..-17 : connected to the same pin through an inverter (eg: transistor).
+
+ Default for the 'BL' pin in custom profile is '0' (uncontrolled).
+
+config PANEL_CHANGE_MESSAGE
+ depends on PANEL
+ bool "Change LCD initialization message ?"
+ default "n"
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
+ say 'N' and keep the default message with the version.
+
+config PANEL_BOOT_MESSAGE
+ depends on PANEL && PANEL_CHANGE_MESSAGE="y"
+ string "New initialization message"
+ default ""
+ ---help---
+ This allows you to replace the boot message indicating the kernel version
+ and the driver version with a custom message. This is useful on appliances
+ where a simple 'Starting system' message can be enough to stop a customer
+ from worrying.
+
+ An empty message will only clear the display at driver init time. Any other
+ printf()-formatted message is valid with newline and escape codes.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 537d7f3b78da..b2fb6dbffcef 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,3 +56,4 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
+obj-$(CONFIG_PANEL) += panel.o
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 15e88078ba1e..fe1672747bc1 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
*/
value = swab16(value);
- if (dpot->uid == DPOT_UID(AD5271_ID))
+ if (dpot->uid == DPOT_UID(AD5274_ID))
value = value >> 2;
return value;
default:
@@ -452,7 +452,7 @@ static ssize_t sysfs_set_reg(struct device *dev,
int err;
if (reg & DPOT_ADDR_OTP_EN) {
- if (!strncmp(buf, "enabled", sizeof("enabled")))
+ if (sysfs_streq(buf, "enabled"))
set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
else
clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index a3e789b85cc8..dfb72ecfa604 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1215,7 +1215,7 @@ static int apds990x_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int apds990x_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
@@ -1224,7 +1224,7 @@ static int apds990x_suspend(struct device *dev)
static int apds990x_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
/*
@@ -1240,7 +1240,7 @@ static int apds990x_resume(struct device *dev)
#ifdef CONFIG_PM
static int apds990x_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
@@ -1249,7 +1249,7 @@ static int apds990x_runtime_suspend(struct device *dev)
static int apds990x_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_on(chip);
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index c65b5ea5d5ef..b3176ee92b90 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -8,7 +8,6 @@
* Author: Linus Walleij <triad@df.lth.se>
*/
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -328,20 +327,6 @@ out_no_resource:
return ret;
}
-static int __exit charlcd_remove(struct platform_device *pdev)
-{
- struct charlcd *lcd = platform_get_drvdata(pdev);
-
- if (lcd) {
- free_irq(lcd->irq, lcd);
- iounmap(lcd->virtbase);
- release_mem_region(lcd->phybase, lcd->physize);
- kfree(lcd);
- }
-
- return 0;
-}
-
static int charlcd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -376,13 +361,8 @@ static struct platform_driver charlcd_driver = {
.driver = {
.name = DRIVERNAME,
.pm = &charlcd_pm_ops,
+ .suppress_bind_attrs = true,
.of_match_table = of_match_ptr(charlcd_match),
},
- .remove = __exit_p(charlcd_remove),
};
-
-module_platform_driver_probe(charlcd_driver, charlcd_probe);
-
-MODULE_AUTHOR("Linus Walleij <triad@df.lth.se>");
-MODULE_DESCRIPTION("ARM Character LCD Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(charlcd_driver, charlcd_probe);
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e11a0bd6c66e..0516ecda54d3 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -34,6 +34,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
== ssc_num) {
+ ssc->pdev->id = ssc_num;
ssc_valid = 1;
break;
}
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 753d7ecdadaa..845466e45b95 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1323,7 +1323,7 @@ static int bh1770_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int bh1770_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
@@ -1333,7 +1333,7 @@ static int bh1770_suspend(struct device *dev)
static int bh1770_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
int ret = 0;
@@ -1361,7 +1361,7 @@ static int bh1770_resume(struct device *dev)
#ifdef CONFIG_PM
static int bh1770_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
@@ -1371,7 +1371,7 @@ static int bh1770_runtime_suspend(struct device *dev)
static int bh1770_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_on(chip);
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index cc8645b5369d..1922cb8f6b88 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -721,9 +721,7 @@ static ssize_t c2port_read_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
- struct c2port_device *c2dev =
- dev_get_drvdata(container_of(kobj,
- struct device, kobj));
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
ssize_t ret;
/* Check the device and flash access status */
@@ -838,9 +836,7 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buffer, loff_t offset, size_t count)
{
- struct c2port_device *c2dev =
- dev_get_drvdata(container_of(kobj,
- struct device, kobj));
+ struct c2port_device *c2dev = dev_get_drvdata(kobj_to_dev(kobj));
int ret;
/* Check the device access status */
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index be2ac5ce349f..8a55c1aa11aa 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -4,6 +4,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
cxl-y += vphb.o api.o
+cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
obj-$(CONFIG_CXL) += cxl.o
obj-$(CONFIG_CXL_BASE) += base.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index ea3eeb7011e1..2107c948406d 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -51,8 +51,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
if (rc)
goto err_mapping;
- cxl_assign_psn_space(ctx);
-
return ctx;
err_mapping:
@@ -78,7 +76,6 @@ struct device *cxl_get_phys_dev(struct pci_dev *dev)
return afu->adapter->dev.parent;
}
-EXPORT_SYMBOL_GPL(cxl_get_phys_dev);
int cxl_release_context(struct cxl_context *ctx)
{
@@ -91,28 +88,11 @@ int cxl_release_context(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_release_context);
-int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
-{
- if (num == 0)
- num = ctx->afu->pp_irqs;
- return afu_allocate_irqs(ctx, num);
-}
-EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
-
-void cxl_free_afu_irqs(struct cxl_context *ctx)
-{
- afu_irq_name_free(ctx);
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
-}
-EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
-
static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
{
__u16 range;
int r;
- WARN_ON(num == 0);
-
for (r = 0; r < CXL_IRQ_RANGES; r++) {
range = ctx->irqs.range[r];
if (num < range) {
@@ -123,6 +103,44 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
+int cxl_allocate_afu_irqs(struct cxl_context *ctx, int num)
+{
+ int res;
+ irq_hw_number_t hwirq;
+
+ if (num == 0)
+ num = ctx->afu->pp_irqs;
+ res = afu_allocate_irqs(ctx, num);
+ if (!res && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ /* In a guest, the PSL interrupt is not multiplexed. It was
+ * allocated above, and we need to set its handler
+ */
+ hwirq = cxl_find_afu_irq(ctx, 0);
+ if (hwirq)
+ cxl_map_irq(ctx->afu->adapter, hwirq, cxl_ops->psl_interrupt, ctx, "psl");
+ }
+ return res;
+}
+EXPORT_SYMBOL_GPL(cxl_allocate_afu_irqs);
+
+void cxl_free_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE)) {
+ hwirq = cxl_find_afu_irq(ctx, 0);
+ if (hwirq) {
+ virq = irq_find_mapping(NULL, hwirq);
+ if (virq)
+ cxl_unmap_irq(virq, ctx);
+ }
+ }
+ afu_irq_name_free(ctx);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+}
+EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
+
int cxl_map_afu_irq(struct cxl_context *ctx, int num,
irq_handler_t handler, void *cookie, char *name)
{
@@ -178,7 +196,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
- if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
+ if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
cxl_ctx_put();
goto out;
@@ -193,7 +211,7 @@ EXPORT_SYMBOL_GPL(cxl_start_context);
int cxl_process_element(struct cxl_context *ctx)
{
- return ctx->pe;
+ return ctx->external_pe;
}
EXPORT_SYMBOL_GPL(cxl_process_element);
@@ -207,7 +225,6 @@ EXPORT_SYMBOL_GPL(cxl_stop_context);
void cxl_set_master(struct cxl_context *ctx)
{
ctx->master = true;
- cxl_assign_psn_space(ctx);
}
EXPORT_SYMBOL_GPL(cxl_set_master);
@@ -325,15 +342,11 @@ EXPORT_SYMBOL_GPL(cxl_start_work);
void __iomem *cxl_psa_map(struct cxl_context *ctx)
{
- struct cxl_afu *afu = ctx->afu;
- int rc;
-
- rc = cxl_afu_check_and_enable(afu);
- if (rc)
+ if (ctx->status != STARTED)
return NULL;
pr_devel("%s: psn_phys%llx size:%llx\n",
- __func__, afu->psn_phys, afu->adapter->ps_size);
+ __func__, ctx->psn_phys, ctx->psn_size);
return ioremap(ctx->psn_phys, ctx->psn_size);
}
EXPORT_SYMBOL_GPL(cxl_psa_map);
@@ -349,11 +362,11 @@ int cxl_afu_reset(struct cxl_context *ctx)
struct cxl_afu *afu = ctx->afu;
int rc;
- rc = __cxl_afu_reset(afu);
+ rc = cxl_ops->afu_reset(afu);
if (rc)
return rc;
- return cxl_afu_check_and_enable(afu);
+ return cxl_ops->afu_check_and_enable(afu);
}
EXPORT_SYMBOL_GPL(cxl_afu_reset);
@@ -363,3 +376,11 @@ void cxl_perst_reloads_same_image(struct cxl_afu *afu,
afu->adapter->perst_same_image = perst_reloads_same_image;
}
EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
+
+ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count)
+{
+ struct cxl_afu *afu = cxl_pci_to_afu(dev);
+
+ return cxl_ops->read_adapter_vpd(afu->adapter, buf, count);
+}
+EXPORT_SYMBOL_GPL(cxl_read_adapter_vpd);
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index a9f0dd3255a2..9b90ec6c07cd 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -11,6 +11,7 @@
#include <linux/rcupdate.h>
#include <asm/errno.h>
#include <misc/cxl-base.h>
+#include <linux/of_platform.h>
#include "cxl.h"
/* protected by rcu */
@@ -84,3 +85,34 @@ void unregister_cxl_calls(struct cxl_calls *calls)
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(unregister_cxl_calls);
+
+int cxl_update_properties(struct device_node *dn,
+ struct property *new_prop)
+{
+ return of_update_property(dn, new_prop);
+}
+EXPORT_SYMBOL_GPL(cxl_update_properties);
+
+static int __init cxl_base_init(void)
+{
+ struct device_node *np = NULL;
+ struct platform_device *dev;
+ int count = 0;
+
+ /*
+ * Scan for compatible devices in guest only
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
+ while ((np = of_find_compatible_node(np, NULL,
+ "ibm,coherent-platform-facility"))) {
+ dev = of_platform_device_create(np, NULL, NULL);
+ if (dev)
+ count++;
+ }
+ pr_devel("Found %d cxl device(s)\n", count);
+ return 0;
+}
+
+module_init(cxl_base_init);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 262b88eac414..10370f280500 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -95,7 +95,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return i;
ctx->pe = i;
- ctx->elem = &ctx->afu->spa[i];
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ ctx->elem = &ctx->afu->native->spa[i];
+ ctx->external_pe = ctx->pe;
+ } else {
+ ctx->external_pe = -1; /* assigned when attaching */
+ }
ctx->pe_inserted = false;
/*
@@ -214,8 +219,8 @@ int __detach_context(struct cxl_context *ctx)
/* Only warn if we detached while the link was OK.
* If detach fails when hw is down, we don't care.
*/
- WARN_ON(cxl_detach_process(ctx) &&
- cxl_adapter_link_ok(ctx->afu->adapter));
+ WARN_ON(cxl_ops->detach_process(ctx) &&
+ cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
/* release the reference to the group leader and mm handling pid */
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a521bc72cec2..38e21cf7806e 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -324,6 +324,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_MODE_TIME_SLICED 0x4
#define CXL_SUPPORTED_MODES (CXL_MODE_DEDICATED | CXL_MODE_DIRECTED)
+#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
+#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
+#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
+
enum cxl_context_status {
CLOSED,
OPENED,
@@ -336,6 +340,12 @@ enum prefault_modes {
CXL_PREFAULT_ALL,
};
+enum cxl_attrs {
+ CXL_ADAPTER_ATTRS,
+ CXL_AFU_MASTER_ATTRS,
+ CXL_AFU_ATTRS,
+};
+
struct cxl_sste {
__be64 esid_data;
__be64 vsid_data;
@@ -344,18 +354,46 @@ struct cxl_sste {
#define to_cxl_adapter(d) container_of(d, struct cxl, dev)
#define to_cxl_afu(d) container_of(d, struct cxl_afu, dev)
-struct cxl_afu {
+struct cxl_afu_native {
+ void __iomem *p1n_mmio;
+ void __iomem *afu_desc_mmio;
irq_hw_number_t psl_hwirq;
+ unsigned int psl_virq;
+ struct mutex spa_mutex;
+ /*
+ * Only the first part of the SPA is used for the process element
+ * linked list. The only other part that software needs to worry about
+ * is sw_command_status, which we store a separate pointer to.
+ * Everything else in the SPA is only used by hardware
+ */
+ struct cxl_process_element *spa;
+ __be64 *sw_command_status;
+ unsigned int spa_size;
+ int spa_order;
+ int spa_max_procs;
+ u64 pp_offset;
+};
+
+struct cxl_afu_guest {
+ u64 handle;
+ phys_addr_t p2n_phys;
+ u64 p2n_size;
+ int max_ints;
+ struct mutex recovery_lock;
+ int previous_state;
+};
+
+struct cxl_afu {
+ struct cxl_afu_native *native;
+ struct cxl_afu_guest *guest;
irq_hw_number_t serr_hwirq;
- char *err_irq_name;
- char *psl_irq_name;
unsigned int serr_virq;
- void __iomem *p1n_mmio;
+ char *psl_irq_name;
+ char *err_irq_name;
void __iomem *p2n_mmio;
phys_addr_t psn_phys;
- u64 pp_offset;
u64 pp_size;
- void __iomem *afu_desc_mmio;
+
struct cxl *adapter;
struct device dev;
struct cdev afu_cdev_s, afu_cdev_m, afu_cdev_d;
@@ -363,26 +401,12 @@ struct cxl_afu {
struct idr contexts_idr;
struct dentry *debugfs;
struct mutex contexts_lock;
- struct mutex spa_mutex;
spinlock_t afu_cntl_lock;
/* AFU error buffer fields and bin attribute for sysfs */
u64 eb_len, eb_offset;
struct bin_attribute attr_eb;
- /*
- * Only the first part of the SPA is used for the process element
- * linked list. The only other part that software needs to worry about
- * is sw_command_status, which we store a separate pointer to.
- * Everything else in the SPA is only used by hardware
- */
- struct cxl_process_element *spa;
- __be64 *sw_command_status;
- unsigned int spa_size;
- int spa_order;
- int spa_max_procs;
- unsigned int psl_virq;
-
/* pointer to the vphb */
struct pci_controller *phb;
@@ -421,6 +445,12 @@ struct cxl_irq_name {
char *name;
};
+struct irq_avail {
+ irq_hw_number_t offset;
+ irq_hw_number_t range;
+ unsigned long *bitmap;
+};
+
/*
* This is a cxl context. If the PSL is in dedicated mode, there will be one
* of these per AFU. If in AFU directed there can be lots of these.
@@ -476,7 +506,19 @@ struct cxl_context {
struct cxl_process_element *elem;
- int pe; /* process element handle */
+ /*
+ * pe is the process element handle, assigned by this driver when the
+ * context is initialized.
+ *
+ * external_pe is the PE shown outside of cxl.
+ * On bare-metal, pe=external_pe, because we decide what the handle is.
+ * In a guest, we only find out about the pe used by pHyp when the
+ * context is attached, and that's the value we want to report outside
+ * of cxl.
+ */
+ int pe;
+ int external_pe;
+
u32 irq_count;
bool pe_inserted;
bool master;
@@ -488,11 +530,34 @@ struct cxl_context {
struct rcu_head rcu;
};
-struct cxl {
+struct cxl_native {
+ u64 afu_desc_off;
+ u64 afu_desc_size;
void __iomem *p1_mmio;
void __iomem *p2_mmio;
irq_hw_number_t err_hwirq;
unsigned int err_virq;
+ u64 ps_off;
+};
+
+struct cxl_guest {
+ struct platform_device *pdev;
+ int irq_nranges;
+ struct cdev cdev;
+ irq_hw_number_t irq_base_offset;
+ struct irq_avail *irq_avail;
+ spinlock_t irq_alloc_lock;
+ u64 handle;
+ char *status;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem;
+};
+
+struct cxl {
+ struct cxl_native *native;
+ struct cxl_guest *guest;
spinlock_t afu_list_lock;
struct cxl_afu *afu[CXL_MAX_SLICES];
struct device dev;
@@ -503,9 +568,6 @@ struct cxl {
struct bin_attribute cxl_attr;
int adapter_num;
int user_irqs;
- u64 afu_desc_off;
- u64 afu_desc_size;
- u64 ps_off;
u64 ps_size;
u16 psl_rev;
u16 base_image;
@@ -519,13 +581,15 @@ struct cxl {
bool perst_same_image;
};
-int cxl_alloc_one_irq(struct cxl *adapter);
-void cxl_release_one_irq(struct cxl *adapter, int hwirq);
-int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
-void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
-int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
+int cxl_pci_alloc_one_irq(struct cxl *adapter);
+void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq);
+int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num);
+void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter);
+int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq, unsigned int virq);
int cxl_update_image_control(struct cxl *adapter);
-int cxl_reset(struct cxl *adapter);
+int cxl_pci_reset(struct cxl *adapter);
+void cxl_pci_release_afu(struct device *dev);
+ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
/* common == phyp + powernv */
struct cxl_process_element_common {
@@ -555,29 +619,32 @@ struct cxl_process_element {
__be32 software_state;
} __packed;
-static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+static inline bool cxl_adapter_link_ok(struct cxl *cxl, struct cxl_afu *afu)
{
struct pci_dev *pdev;
- pdev = to_pci_dev(cxl->dev.parent);
- return !pci_channel_offline(pdev);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ pdev = to_pci_dev(cxl->dev.parent);
+ return !pci_channel_offline(pdev);
+ }
+ return true;
}
static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
{
WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return cxl->p1_mmio + cxl_reg_off(reg);
+ return cxl->native->p1_mmio + cxl_reg_off(reg);
}
static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(cxl)))
+ if (likely(cxl_adapter_link_ok(cxl, NULL)))
out_be64(_cxl_p1_addr(cxl, reg), val);
}
static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(cxl)))
+ if (likely(cxl_adapter_link_ok(cxl, NULL)))
return in_be64(_cxl_p1_addr(cxl, reg));
else
return ~0ULL;
@@ -586,18 +653,18 @@ static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
{
WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
- return afu->p1n_mmio + cxl_reg_off(reg);
+ return afu->native->p1n_mmio + cxl_reg_off(reg);
}
static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
out_be64(_cxl_p1n_addr(afu, reg), val);
}
static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
return in_be64(_cxl_p1n_addr(afu, reg));
else
return ~0ULL;
@@ -610,39 +677,19 @@ static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg
static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
out_be64(_cxl_p2n_addr(afu, reg), val);
}
static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
+ if (likely(cxl_adapter_link_ok(afu->adapter, afu)))
return in_be64(_cxl_p2n_addr(afu, reg));
else
return ~0ULL;
}
-static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
- return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
- ((cr) * (afu)->crs_len) + (off));
- else
- return ~0ULL;
-}
-
-static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
-{
- if (likely(cxl_adapter_link_ok(afu->adapter)))
- return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
- ((cr) * (afu)->crs_len) + (off));
- else
- return 0xffffffff;
-}
-u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
-u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
-
-ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count);
@@ -652,13 +699,14 @@ struct cxl_calls {
};
int register_cxl_calls(struct cxl_calls *calls);
void unregister_cxl_calls(struct cxl_calls *calls);
+int cxl_update_properties(struct device_node *dn, struct property *new_prop);
-int cxl_alloc_adapter_nr(struct cxl *adapter);
void cxl_remove_adapter_nr(struct cxl *adapter);
int cxl_alloc_spa(struct cxl_afu *afu);
void cxl_release_spa(struct cxl_afu *afu);
+dev_t cxl_get_dev(void);
int cxl_file_init(void);
void cxl_file_exit(void);
int cxl_register_adapter(struct cxl *adapter);
@@ -679,21 +727,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu);
int cxl_sysfs_afu_m_add(struct cxl_afu *afu);
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu);
-int cxl_afu_activate_mode(struct cxl_afu *afu, int mode);
-int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
-int cxl_afu_deactivate_mode(struct cxl_afu *afu);
+struct cxl *cxl_alloc_adapter(void);
+struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice);
int cxl_afu_select_best_mode(struct cxl_afu *afu);
-int cxl_register_psl_irq(struct cxl_afu *afu);
-void cxl_release_psl_irq(struct cxl_afu *afu);
-int cxl_register_psl_err_irq(struct cxl *adapter);
-void cxl_release_psl_err_irq(struct cxl *adapter);
-int cxl_register_serr_irq(struct cxl_afu *afu);
-void cxl_release_serr_irq(struct cxl_afu *afu);
+int cxl_native_register_psl_irq(struct cxl_afu *afu);
+void cxl_native_release_psl_irq(struct cxl_afu *afu);
+int cxl_native_register_psl_err_irq(struct cxl *adapter);
+void cxl_native_release_psl_err_irq(struct cxl *adapter);
+int cxl_native_register_serr_irq(struct cxl_afu *afu);
+void cxl_native_release_serr_irq(struct cxl_afu *afu);
int afu_register_irqs(struct cxl_context *ctx, u32 count);
void afu_release_irqs(struct cxl_context *ctx, void *cookie);
void afu_irq_name_free(struct cxl_context *ctx);
-irqreturn_t cxl_slice_irq_err(int irq, void *data);
int cxl_debugfs_init(void);
void cxl_debugfs_exit(void);
@@ -707,6 +753,7 @@ void cxl_prefault(struct cxl_context *ctx, u64 wed);
struct cxl *get_cxl_adapter(int num);
int cxl_alloc_sst(struct cxl_context *ctx);
+void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
@@ -720,40 +767,54 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie);
int __detach_context(struct cxl_context *ctx);
-/* This matches the layout of the H_COLLECT_CA_INT_INFO retbuf */
+/*
+ * This must match the layout of the H_COLLECT_CA_INT_INFO retbuf defined
+ * in PAPR.
+ * A word about endianness: a pointer to this structure is passed when
+ * calling the hcall. However, it is not a block of memory filled up by
+ * the hypervisor. The return values are found in registers, and copied
+ * one by one when returning from the hcall. See the end of the call to
+ * plpar_hcall9() in hvCall.S
+ * As a consequence:
+ * - we don't need to do any endianness conversion
+ * - the pid and tid are an exception. They are 32-bit values returned in
+ * the same 64-bit register. So we do need to worry about byte ordering.
+ */
struct cxl_irq_info {
u64 dsisr;
u64 dar;
u64 dsr;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
u32 pid;
u32 tid;
+#else
+ u32 tid;
+ u32 pid;
+#endif
u64 afu_err;
u64 errstat;
- u64 padding[3]; /* to match the expected retbuf size for plpar_hcall9 */
+ u64 proc_handle;
+ u64 padding[2]; /* to match the expected retbuf size for plpar_hcall9 */
};
void cxl_assign_psn_space(struct cxl_context *ctx);
-int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
- u64 amr);
-int cxl_detach_process(struct cxl_context *ctx);
-
-int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info);
-int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info);
+int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler,
+ void *cookie, irq_hw_number_t *dest_hwirq,
+ unsigned int *dest_virq, const char *name);
int cxl_check_error(struct cxl_afu *afu);
int cxl_afu_slbia(struct cxl_afu *afu);
int cxl_tlb_slb_invalidate(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
-int __cxl_afu_reset(struct cxl_afu *afu);
-int cxl_afu_check_and_enable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
-void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
extern struct pci_driver cxl_pci_driver;
+extern struct platform_driver cxl_of_driver;
int afu_allocate_irqs(struct cxl_context *ctx, u32 count);
int afu_open(struct inode *inode, struct file *file);
@@ -764,4 +825,61 @@ unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
+struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *dev);
+void cxl_guest_remove_adapter(struct cxl *adapter);
+int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np);
+int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np);
+ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len);
+ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len);
+int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np);
+void cxl_guest_remove_afu(struct cxl_afu *afu);
+int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np);
+int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *afu_np);
+int cxl_guest_add_chardev(struct cxl *adapter);
+void cxl_guest_remove_chardev(struct cxl *adapter);
+void cxl_guest_reload_module(struct cxl *adapter);
+int cxl_of_probe(struct platform_device *pdev);
+
+struct cxl_backend_ops {
+ struct module *module;
+ int (*adapter_reset)(struct cxl *adapter);
+ int (*alloc_one_irq)(struct cxl *adapter);
+ void (*release_one_irq)(struct cxl *adapter, int hwirq);
+ int (*alloc_irq_ranges)(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num);
+ void (*release_irq_ranges)(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter);
+ int (*setup_irq)(struct cxl *adapter, unsigned int hwirq,
+ unsigned int virq);
+ irqreturn_t (*handle_psl_slice_error)(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat);
+ irqreturn_t (*psl_interrupt)(int irq, void *data);
+ int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ int (*attach_process)(struct cxl_context *ctx, bool kernel,
+ u64 wed, u64 amr);
+ int (*detach_process)(struct cxl_context *ctx);
+ bool (*support_attributes)(const char *attr_name, enum cxl_attrs type);
+ bool (*link_ok)(struct cxl *cxl, struct cxl_afu *afu);
+ void (*release_afu)(struct device *dev);
+ ssize_t (*afu_read_err_buffer)(struct cxl_afu *afu, char *buf,
+ loff_t off, size_t count);
+ int (*afu_check_and_enable)(struct cxl_afu *afu);
+ int (*afu_activate_mode)(struct cxl_afu *afu, int mode);
+ int (*afu_deactivate_mode)(struct cxl_afu *afu, int mode);
+ int (*afu_reset)(struct cxl_afu *afu);
+ int (*afu_cr_read8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 *val);
+ int (*afu_cr_read16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 *val);
+ int (*afu_cr_read32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 *val);
+ int (*afu_cr_read64)(struct cxl_afu *afu, int cr_idx, u64 offset, u64 *val);
+ int (*afu_cr_write8)(struct cxl_afu *afu, int cr_idx, u64 offset, u8 val);
+ int (*afu_cr_write16)(struct cxl_afu *afu, int cr_idx, u64 offset, u16 val);
+ int (*afu_cr_write32)(struct cxl_afu *afu, int cr_idx, u64 offset, u32 val);
+ ssize_t (*read_adapter_vpd)(struct cxl *adapter, void *buf, size_t count);
+};
+extern const struct cxl_backend_ops cxl_native_ops;
+extern const struct cxl_backend_ops cxl_guest_ops;
+extern const struct cxl_backend_ops *cxl_ops;
+
+/* check if the given pci_dev is on the the cxl vphb bus */
+bool cxl_pci_is_vphb_device(struct pci_dev *dev);
#endif
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 18df6f44af2a..5751899e0c17 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -118,6 +118,10 @@ void cxl_debugfs_afu_remove(struct cxl_afu *afu)
int __init cxl_debugfs_init(void)
{
struct dentry *ent;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
ent = debugfs_create_dir("cxl", NULL);
if (IS_ERR(ent))
return PTR_ERR(ent);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 81c3f75b7330..9a8650bcb042 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -101,7 +101,7 @@ static void cxl_ack_ae(struct cxl_context *ctx)
{
unsigned long flags;
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0);
spin_lock_irqsave(&ctx->lock, flags);
ctx->pending_fault = true;
@@ -125,7 +125,7 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
else {
mb(); /* Order seg table write to TFC MMIO write */
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
return IRQ_HANDLED;
@@ -163,7 +163,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
local_irq_restore(flags);
pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
}
/*
@@ -254,14 +254,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
u64 dar = ctx->dar;
struct mm_struct *mm = NULL;
- if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
- cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
- cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
- /* Most likely explanation is harmless - a dedicated process
- * has detached and these were cleared by the PSL purge, but
- * warn about it just in case */
- dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
- return;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar ||
+ cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) {
+ /* Most likely explanation is harmless - a dedicated
+ * process has detached and these were cleared by the
+ * PSL purge, but warn about it just in case
+ */
+ dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n");
+ return;
+ }
}
/* Early return if the context is being / has been detached */
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 783337d22f36..eec468f1612f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -26,9 +26,7 @@
#include "trace.h"
#define CXL_NUM_MINORS 256 /* Total to reserve */
-#define CXL_DEV_MINORS 13 /* 1 control + 4 AFUs * 3 (dedicated/master/shared) */
-#define CXL_CARD_MINOR(adapter) (adapter->adapter_num * CXL_DEV_MINORS)
#define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
#define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
#define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
@@ -36,7 +34,6 @@
#define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
#define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
-#define CXL_DEVT_ADAPTER(dev) (MINOR(dev) / CXL_DEV_MINORS)
#define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
#define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
@@ -79,7 +76,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
if (!afu->current_mode)
goto err_put_afu;
- if (!cxl_adapter_link_ok(adapter)) {
+ if (!cxl_ops->link_ok(adapter, afu)) {
rc = -EIO;
goto err_put_afu;
}
@@ -210,8 +207,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
- if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
- amr))) {
+ if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
+ amr))) {
afu_release_irqs(ctx, ctx);
goto out;
}
@@ -222,12 +219,13 @@ out:
mutex_unlock(&ctx->status_mutex);
return rc;
}
+
static long afu_ioctl_process_element(struct cxl_context *ctx,
int __user *upe)
{
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- if (copy_to_user(upe, &ctx->pe, sizeof(__u32)))
+ if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
return -EFAULT;
return 0;
@@ -259,7 +257,7 @@ long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (ctx->status == CLOSED)
return -EIO;
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
pr_devel("afu_ioctl\n");
@@ -289,7 +287,7 @@ int afu_mmap(struct file *file, struct vm_area_struct *vm)
if (ctx->status != STARTED)
return -EIO;
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
return cxl_context_iomap(ctx, vm);
@@ -336,7 +334,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
int rc;
DEFINE_WAIT(wait);
- if (!cxl_adapter_link_ok(ctx->afu->adapter))
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
return -EIO;
if (count < CXL_READ_MIN_SIZE)
@@ -349,7 +347,7 @@ ssize_t afu_read(struct file *file, char __user *buf, size_t count,
if (ctx_event_pending(ctx))
break;
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
rc = -EIO;
goto out;
}
@@ -445,7 +443,8 @@ static const struct file_operations afu_master_fops = {
static char *cxl_devnode(struct device *dev, umode_t *mode)
{
- if (CXL_DEVT_IS_CARD(dev->devt)) {
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ CXL_DEVT_IS_CARD(dev->devt)) {
/*
* These minor numbers will eventually be used to program the
* PSL and AFUs once we have dynamic reprogramming support
@@ -546,6 +545,11 @@ int cxl_register_adapter(struct cxl *adapter)
return device_register(&adapter->dev);
}
+dev_t cxl_get_dev(void)
+{
+ return cxl_dev;
+}
+
int __init cxl_file_init(void)
{
int rc;
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
new file mode 100644
index 000000000000..68dd0b7da471
--- /dev/null
+++ b/drivers/misc/cxl/flash.c
@@ -0,0 +1,538 @@
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <asm/rtas.h>
+
+#include "cxl.h"
+#include "hcalls.h"
+
+#define DOWNLOAD_IMAGE 1
+#define VALIDATE_IMAGE 2
+
+struct ai_header {
+ u16 version;
+ u8 reserved0[6];
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem;
+ u64 image_offset;
+ u64 image_length;
+ u8 reserved1[96];
+};
+
+static struct semaphore sem;
+unsigned long *buffer[CXL_AI_MAX_ENTRIES];
+struct sg_list *le;
+static u64 continue_token;
+static unsigned int transfer;
+
+struct update_props_workarea {
+ __be32 phandle;
+ __be32 state;
+ __be64 reserved;
+ __be32 nprops;
+} __packed;
+
+struct update_nodes_workarea {
+ __be32 state;
+ __be64 unit_address;
+ __be32 reserved;
+} __packed;
+
+#define DEVICE_SCOPE 3
+#define NODE_ACTION_MASK 0xff000000
+#define NODE_COUNT_MASK 0x00ffffff
+#define OPCODE_DELETE 0x01000000
+#define OPCODE_UPDATE 0x02000000
+#define OPCODE_ADD 0x03000000
+
+static int rcall(int token, char *buf, s32 scope)
+{
+ int rc;
+
+ spin_lock(&rtas_data_buf_lock);
+
+ memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
+ memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
+
+ spin_unlock(&rtas_data_buf_lock);
+ return rc;
+}
+
+static int update_property(struct device_node *dn, const char *name,
+ u32 vd, char *value)
+{
+ struct property *new_prop;
+ u32 *val;
+ int rc;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return -ENOMEM;
+
+ new_prop->name = kstrdup(name, GFP_KERNEL);
+ if (!new_prop->name) {
+ kfree(new_prop);
+ return -ENOMEM;
+ }
+
+ new_prop->length = vd;
+ new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
+ if (!new_prop->value) {
+ kfree(new_prop->name);
+ kfree(new_prop);
+ return -ENOMEM;
+ }
+ memcpy(new_prop->value, value, vd);
+
+ val = (u32 *)new_prop->value;
+ rc = cxl_update_properties(dn, new_prop);
+ pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
+ dn->name, name, vd, be32_to_cpu(*val));
+
+ if (rc) {
+ kfree(new_prop->name);
+ kfree(new_prop->value);
+ kfree(new_prop);
+ }
+ return rc;
+}
+
+static int update_node(__be32 phandle, s32 scope)
+{
+ struct update_props_workarea *upwa;
+ struct device_node *dn;
+ int i, rc, ret;
+ char *prop_data;
+ char *buf;
+ int token;
+ u32 nprops;
+ u32 vd;
+
+ token = rtas_token("ibm,update-properties");
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dn = of_find_node_by_phandle(be32_to_cpu(phandle));
+ if (!dn) {
+ kfree(buf);
+ return -ENOENT;
+ }
+
+ upwa = (struct update_props_workarea *)&buf[0];
+ upwa->phandle = phandle;
+ do {
+ rc = rcall(token, buf, scope);
+ if (rc < 0)
+ break;
+
+ prop_data = buf + sizeof(*upwa);
+ nprops = be32_to_cpu(upwa->nprops);
+
+ if (*prop_data == 0) {
+ prop_data++;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += vd + sizeof(vd);
+ nprops--;
+ }
+
+ for (i = 0; i < nprops; i++) {
+ char *prop_name;
+
+ prop_name = prop_data;
+ prop_data += strlen(prop_name) + 1;
+ vd = be32_to_cpu(*(__be32 *)prop_data);
+ prop_data += sizeof(vd);
+
+ if ((vd != 0x00000000) && (vd != 0x80000000)) {
+ ret = update_property(dn, prop_name, vd,
+ prop_data);
+ if (ret)
+ pr_err("cxl: Could not update property %s - %i\n",
+ prop_name, ret);
+
+ prop_data += vd;
+ }
+ }
+ } while (rc == 1);
+
+ of_node_put(dn);
+ kfree(buf);
+ return rc;
+}
+
+static int update_devicetree(struct cxl *adapter, s32 scope)
+{
+ struct update_nodes_workarea *unwa;
+ u32 action, node_count;
+ int token, rc, i;
+ __be32 *data, drc_index, phandle;
+ char *buf;
+
+ token = rtas_token("ibm,update-nodes");
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ unwa = (struct update_nodes_workarea *)&buf[0];
+ unwa->unit_address = cpu_to_be64(adapter->guest->handle);
+ do {
+ rc = rcall(token, buf, scope);
+ if (rc && rc != 1)
+ break;
+
+ data = (__be32 *)buf + 4;
+ while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
+ action = be32_to_cpu(*data) & NODE_ACTION_MASK;
+ node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
+ pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
+ action, node_count);
+ data++;
+
+ for (i = 0; i < node_count; i++) {
+ phandle = *data++;
+
+ switch (action) {
+ case OPCODE_DELETE:
+ /* nothing to do */
+ break;
+ case OPCODE_UPDATE:
+ update_node(phandle, scope);
+ break;
+ case OPCODE_ADD:
+ /* nothing to do, just move pointer */
+ drc_index = *data++;
+ break;
+ }
+ }
+ }
+ } while (rc == 1);
+
+ kfree(buf);
+ return 0;
+}
+
+static int handle_image(struct cxl *adapter, int operation,
+ long (*fct)(u64, u64, u64, u64 *),
+ struct cxl_adapter_image *ai)
+{
+ size_t mod, s_copy, len_chunk = 0;
+ struct ai_header *header = NULL;
+ unsigned int entries = 0, i;
+ void *dest, *from;
+ int rc = 0, need_header;
+
+ /* base adapter image header */
+ need_header = (ai->flags & CXL_AI_NEED_HEADER);
+ if (need_header) {
+ header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+ header->version = cpu_to_be16(1);
+ header->vendor = cpu_to_be16(adapter->guest->vendor);
+ header->device = cpu_to_be16(adapter->guest->device);
+ header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
+ header->subsystem = cpu_to_be16(adapter->guest->subsystem);
+ header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
+ header->image_length = cpu_to_be64(ai->len_image);
+ }
+
+ /* number of entries in the list */
+ len_chunk = ai->len_data;
+ if (need_header)
+ len_chunk += CXL_AI_HEADER_SIZE;
+
+ entries = len_chunk / CXL_AI_BUFFER_SIZE;
+ mod = len_chunk % CXL_AI_BUFFER_SIZE;
+ if (mod)
+ entries++;
+
+ if (entries > CXL_AI_MAX_ENTRIES) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
+ * chunk 0 ----------------------------------------------------
+ * | header | data |
+ * ----------------------------------------------------
+ * chunk 1 ----------------------------------------------------
+ * | data |
+ * ----------------------------------------------------
+ * ....
+ * chunk n ----------------------------------------------------
+ * | data |
+ * ----------------------------------------------------
+ */
+ from = (void *) ai->data;
+ for (i = 0; i < entries; i++) {
+ dest = buffer[i];
+ s_copy = CXL_AI_BUFFER_SIZE;
+
+ if ((need_header) && (i == 0)) {
+ /* add adapter image header */
+ memcpy(buffer[i], header, sizeof(struct ai_header));
+ s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
+ dest += CXL_AI_HEADER_SIZE; /* image offset */
+ }
+ if ((i == (entries - 1)) && mod)
+ s_copy = mod;
+
+ /* copy data */
+ if (copy_from_user(dest, from, s_copy))
+ goto err;
+
+ /* fill in the list */
+ le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
+ le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
+ if ((i == (entries - 1)) && mod)
+ le[i].len = cpu_to_be64(mod);
+ from += s_copy;
+ }
+ pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
+ __func__, operation, need_header, entries, continue_token);
+
+ /*
+ * download/validate the adapter image to the coherent
+ * platform facility
+ */
+ rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
+ &continue_token);
+ if (rc == 0) /* success of download/validation operation */
+ continue_token = 0;
+
+err:
+ kfree(header);
+
+ return rc;
+}
+
+static int transfer_image(struct cxl *adapter, int operation,
+ struct cxl_adapter_image *ai)
+{
+ int rc = 0;
+ int afu;
+
+ switch (operation) {
+ case DOWNLOAD_IMAGE:
+ rc = handle_image(adapter, operation,
+ &cxl_h_download_adapter_image, ai);
+ if (rc < 0) {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ }
+ return rc;
+
+ case VALIDATE_IMAGE:
+ rc = handle_image(adapter, operation,
+ &cxl_h_validate_adapter_image, ai);
+ if (rc < 0) {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ return rc;
+ }
+ if (rc == 0) {
+ pr_devel("remove curent afu\n");
+ for (afu = 0; afu < adapter->slices; afu++)
+ cxl_guest_remove_afu(adapter->afu[afu]);
+
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+
+ /* The entire image has now been
+ * downloaded and the validation has
+ * been successfully performed.
+ * After that, the partition should call
+ * ibm,update-nodes and
+ * ibm,update-properties to receive the
+ * current configuration
+ */
+ rc = update_devicetree(adapter, DEVICE_SCOPE);
+ transfer = 1;
+ }
+ return rc;
+ }
+
+ return -EINVAL;
+}
+
+static long ioctl_transfer_image(struct cxl *adapter, int operation,
+ struct cxl_adapter_image __user *uai)
+{
+ struct cxl_adapter_image ai;
+
+ pr_devel("%s\n", __func__);
+
+ if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
+ return -EFAULT;
+
+ /*
+ * Make sure reserved fields and bits are set to 0
+ */
+ if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
+ (ai.flags & ~CXL_AI_ALL))
+ return -EINVAL;
+
+ return transfer_image(adapter, operation, &ai);
+}
+
+static int device_open(struct inode *inode, struct file *file)
+{
+ int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
+ struct cxl *adapter;
+ int rc = 0, i;
+
+ pr_devel("in %s\n", __func__);
+
+ BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
+
+ /* Allows one process to open the device by using a semaphore */
+ if (down_interruptible(&sem) != 0)
+ return -EPERM;
+
+ if (!(adapter = get_cxl_adapter(adapter_num)))
+ return -ENODEV;
+
+ file->private_data = adapter;
+ continue_token = 0;
+ transfer = 0;
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
+ buffer[i] = NULL;
+
+ /* aligned buffer containing list entries which describes up to
+ * 1 megabyte of data (256 entries of 4096 bytes each)
+ * Logical real address of buffer 0 - Buffer 0 length in bytes
+ * Logical real address of buffer 1 - Buffer 1 length in bytes
+ * Logical real address of buffer 2 - Buffer 2 length in bytes
+ * ....
+ * ....
+ * Logical real address of buffer N - Buffer N length in bytes
+ */
+ le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
+ if (!le) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!buffer[i]) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+ }
+
+ return 0;
+
+err1:
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ if (buffer[i])
+ free_page((unsigned long) buffer[i]);
+ }
+
+ if (le)
+ free_page((unsigned long) le);
+err:
+ put_device(&adapter->dev);
+
+ return rc;
+}
+
+static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct cxl *adapter = file->private_data;
+
+ pr_devel("in %s\n", __func__);
+
+ if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
+ return ioctl_transfer_image(adapter,
+ DOWNLOAD_IMAGE,
+ (struct cxl_adapter_image __user *)arg);
+ else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
+ return ioctl_transfer_image(adapter,
+ VALIDATE_IMAGE,
+ (struct cxl_adapter_image __user *)arg);
+ else
+ return -EINVAL;
+}
+
+static long device_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return device_ioctl(file, cmd, arg);
+}
+
+static int device_close(struct inode *inode, struct file *file)
+{
+ struct cxl *adapter = file->private_data;
+ int i;
+
+ pr_devel("in %s\n", __func__);
+
+ for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
+ if (buffer[i])
+ free_page((unsigned long) buffer[i]);
+ }
+
+ if (le)
+ free_page((unsigned long) le);
+
+ up(&sem);
+ put_device(&adapter->dev);
+ continue_token = 0;
+
+ /* reload the module */
+ if (transfer)
+ cxl_guest_reload_module(adapter);
+ else {
+ pr_devel("resetting adapter\n");
+ cxl_h_reset_adapter(adapter->guest->handle);
+ }
+
+ transfer = 0;
+ return 0;
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = device_open,
+ .unlocked_ioctl = device_ioctl,
+ .compat_ioctl = device_compat_ioctl,
+ .release = device_close,
+};
+
+void cxl_guest_remove_chardev(struct cxl *adapter)
+{
+ cdev_del(&adapter->guest->cdev);
+}
+
+int cxl_guest_add_chardev(struct cxl *adapter)
+{
+ dev_t devt;
+ int rc;
+
+ devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
+ cdev_init(&adapter->guest->cdev, &fops);
+ if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
+ dev_err(&adapter->dev,
+ "Unable to add chardev on adapter (card%i): %i\n",
+ adapter->adapter_num, rc);
+ goto err;
+ }
+ adapter->dev.devt = devt;
+ sema_init(&sem, 1);
+err:
+ return rc;
+}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
new file mode 100644
index 000000000000..8213372de2b7
--- /dev/null
+++ b/drivers/misc/cxl/guest.c
@@ -0,0 +1,1177 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+
+#include "cxl.h"
+#include "hcalls.h"
+#include "trace.h"
+
+#define CXL_ERROR_DETECTED_EVENT 1
+#define CXL_SLOT_RESET_EVENT 2
+#define CXL_RESUME_EVENT 3
+
+static void pci_error_handlers(struct cxl_afu *afu,
+ int bus_error_event,
+ pci_channel_state_t state)
+{
+ struct pci_dev *afu_dev;
+
+ if (afu->phb == NULL)
+ return;
+
+ list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+ if (!afu_dev->driver)
+ continue;
+
+ switch (bus_error_event) {
+ case CXL_ERROR_DETECTED_EVENT:
+ afu_dev->error_state = state;
+
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->error_detected)
+ afu_dev->driver->err_handler->error_detected(afu_dev, state);
+ break;
+ case CXL_SLOT_RESET_EVENT:
+ afu_dev->error_state = state;
+
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->slot_reset)
+ afu_dev->driver->err_handler->slot_reset(afu_dev);
+ break;
+ case CXL_RESUME_EVENT:
+ if (afu_dev->driver->err_handler &&
+ afu_dev->driver->err_handler->resume)
+ afu_dev->driver->err_handler->resume(afu_dev);
+ break;
+ }
+ }
+}
+
+static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
+ u64 errstat)
+{
+ pr_devel("in %s\n", __func__);
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
+
+ return cxl_ops->ack_irq(ctx, 0, errstat);
+}
+
+static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
+ void *buf, size_t len)
+{
+ unsigned int entries, mod;
+ unsigned long **vpd_buf = NULL;
+ struct sg_list *le;
+ int rc = 0, i, tocopy;
+ u64 out = 0;
+
+ if (buf == NULL)
+ return -EINVAL;
+
+ /* number of entries in the list */
+ entries = len / SG_BUFFER_SIZE;
+ mod = len % SG_BUFFER_SIZE;
+ if (mod)
+ entries++;
+
+ if (entries > SG_MAX_ENTRIES) {
+ entries = SG_MAX_ENTRIES;
+ len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
+ mod = 0;
+ }
+
+ vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL);
+ if (!vpd_buf)
+ return -ENOMEM;
+
+ le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
+ if (!le) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ for (i = 0; i < entries; i++) {
+ vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!vpd_buf[i]) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+ le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
+ le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
+ if ((i == (entries - 1)) && mod)
+ le[i].len = cpu_to_be64(mod);
+ }
+
+ if (adapter)
+ rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
+ virt_to_phys(le), entries, &out);
+ else
+ rc = cxl_h_collect_vpd(afu->guest->handle, 0,
+ virt_to_phys(le), entries, &out);
+ pr_devel("length of available (entries: %i), vpd: %#llx\n",
+ entries, out);
+
+ if (!rc) {
+ /*
+ * hcall returns in 'out' the size of available VPDs.
+ * It fills the buffer with as much data as possible.
+ */
+ if (out < len)
+ len = out;
+ rc = len;
+ if (out) {
+ for (i = 0; i < entries; i++) {
+ if (len < SG_BUFFER_SIZE)
+ tocopy = len;
+ else
+ tocopy = SG_BUFFER_SIZE;
+ memcpy(buf, vpd_buf[i], tocopy);
+ buf += tocopy;
+ len -= tocopy;
+ }
+ }
+ }
+err2:
+ for (i = 0; i < entries; i++) {
+ if (vpd_buf[i])
+ free_page((unsigned long) vpd_buf[i]);
+ }
+ free_page((unsigned long) le);
+err1:
+ kfree(vpd_buf);
+ return rc;
+}
+
+static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
+{
+ return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
+}
+
+static irqreturn_t guest_psl_irq(int irq, void *data)
+{
+ struct cxl_context *ctx = data;
+ struct cxl_irq_info irq_info;
+ int rc;
+
+ pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
+ rc = guest_get_irq_info(ctx, &irq_info);
+ if (rc) {
+ WARN(1, "Unable to get IRQ info: %i\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = cxl_irq(irq, ctx, &irq_info);
+ return rc;
+}
+
+static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
+{
+ u64 state;
+ int rc = 0;
+
+ rc = cxl_h_read_error_state(afu->guest->handle, &state);
+ if (!rc) {
+ WARN_ON(state != H_STATE_NORMAL &&
+ state != H_STATE_DISABLE &&
+ state != H_STATE_TEMP_UNAVAILABLE &&
+ state != H_STATE_PERM_UNAVAILABLE);
+ *state_out = state & 0xffffffff;
+ }
+ return rc;
+}
+
+static irqreturn_t guest_slice_irq_err(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ int rc;
+ u64 serr;
+
+ WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+ rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
+ if (rc) {
+ dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
+ return IRQ_HANDLED;
+ }
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
+
+ rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
+ if (rc)
+ dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
+ rc);
+
+ return IRQ_HANDLED;
+}
+
+
+static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
+{
+ int i, n;
+ struct irq_avail *cur;
+
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
+ 0, len, 0);
+ if (n < cur->range) {
+ bitmap_set(cur->bitmap, n, len);
+ *irq = cur->offset + n;
+ pr_devel("guest: allocate IRQs %#x->%#x\n",
+ *irq, *irq + len - 1);
+
+ return 0;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int irq_free_range(struct cxl *adapter, int irq, int len)
+{
+ int i, n;
+ struct irq_avail *cur;
+
+ if (len == 0)
+ return -ENOENT;
+
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ if (irq >= cur->offset &&
+ (irq + len) <= (cur->offset + cur->range)) {
+ n = irq - cur->offset;
+ bitmap_clear(cur->bitmap, n, len);
+ pr_devel("guest: release IRQs %#x->%#x\n",
+ irq, irq + len - 1);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int guest_reset(struct cxl *adapter)
+{
+ struct cxl_afu *afu = NULL;
+ int i, rc;
+
+ pr_devel("Adapter reset request\n");
+ for (i = 0; i < adapter->slices; i++) {
+ if ((afu = adapter->afu[i])) {
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_frozen);
+ cxl_context_detach_all(afu);
+ }
+ }
+
+ rc = cxl_h_reset_adapter(adapter->guest->handle);
+ for (i = 0; i < adapter->slices; i++) {
+ if (!rc && (afu = adapter->afu[i])) {
+ pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
+ pci_channel_io_normal);
+ pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
+ }
+ }
+ return rc;
+}
+
+static int guest_alloc_one_irq(struct cxl *adapter)
+{
+ int irq;
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ if (irq_alloc_range(adapter, 1, &irq))
+ irq = -ENOSPC;
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return irq;
+}
+
+static void guest_release_one_irq(struct cxl *adapter, int irq)
+{
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ irq_free_range(adapter, irq, 1);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+}
+
+static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num)
+{
+ int i, try, irq;
+
+ memset(irqs, 0, sizeof(struct cxl_irq_ranges));
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
+ try = num;
+ while (try) {
+ if (irq_alloc_range(adapter, try, &irq) == 0)
+ break;
+ try /= 2;
+ }
+ if (!try)
+ goto error;
+ irqs->offset[i] = irq;
+ irqs->range[i] = try;
+ num -= try;
+ }
+ if (num)
+ goto error;
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return 0;
+
+error:
+ for (i = 0; i < CXL_IRQ_RANGES; i++)
+ irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+ return -ENOSPC;
+}
+
+static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter)
+{
+ int i;
+
+ spin_lock(&adapter->guest->irq_alloc_lock);
+ for (i = 0; i < CXL_IRQ_RANGES; i++)
+ irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
+ spin_unlock(&adapter->guest->irq_alloc_lock);
+}
+
+static int guest_register_serr_irq(struct cxl_afu *afu)
+{
+ afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&afu->dev));
+ if (!afu->err_irq_name)
+ return -ENOMEM;
+
+ if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
+ guest_slice_irq_err, afu, afu->err_irq_name))) {
+ kfree(afu->err_irq_name);
+ afu->err_irq_name = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void guest_release_serr_irq(struct cxl_afu *afu)
+{
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
+}
+
+static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
+{
+ return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
+ tfc >> 32, (psl_reset_mask != 0));
+}
+
+static void disable_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+ int r, i;
+
+ pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ virq = irq_find_mapping(NULL, hwirq);
+ disable_irq(virq);
+ }
+ }
+}
+
+static void enable_afu_irqs(struct cxl_context *ctx)
+{
+ irq_hw_number_t hwirq;
+ unsigned int virq;
+ int r, i;
+
+ pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ hwirq = ctx->irqs.offset[r];
+ for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
+ virq = irq_find_mapping(NULL, hwirq);
+ enable_irq(virq);
+ }
+ }
+}
+
+static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
+ u64 offset, u64 *val)
+{
+ unsigned long cr;
+ char c;
+ int rc = 0;
+
+ if (afu->crs_len < sz)
+ return -ENOENT;
+
+ if (unlikely(offset >= afu->crs_len))
+ return -ERANGE;
+
+ cr = get_zeroed_page(GFP_KERNEL);
+ if (!cr)
+ return -ENOMEM;
+
+ rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
+ virt_to_phys((void *)cr), sz);
+ if (rc)
+ goto err;
+
+ switch (sz) {
+ case 1:
+ c = *((char *) cr);
+ *val = c;
+ break;
+ case 2:
+ *val = in_le16((u16 *)cr);
+ break;
+ case 4:
+ *val = in_le32((unsigned *)cr);
+ break;
+ case 8:
+ *val = in_le64((u64 *)cr);
+ break;
+ default:
+ WARN_ON(1);
+ }
+err:
+ free_page(cr);
+ return rc;
+}
+
+static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u32 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u32) val;
+ return rc;
+}
+
+static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u16 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u16) val;
+ return rc;
+}
+
+static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u8 *out)
+{
+ int rc;
+ u64 val;
+
+ rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
+ if (!rc)
+ *out = (u8) val;
+ return rc;
+}
+
+static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
+ u64 *out)
+{
+ return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
+}
+
+static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
+{
+ /* config record is not writable from guest */
+ return -EPERM;
+}
+
+static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
+{
+ struct cxl_process_element_hcall *elem;
+ struct cxl *adapter = ctx->afu->adapter;
+ const struct cred *cred;
+ u32 pid, idx;
+ int rc, r, i;
+ u64 mmio_addr, mmio_size;
+ __be64 flags = 0;
+
+ /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
+ if (!(elem = (struct cxl_process_element_hcall *)
+ get_zeroed_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
+ if (ctx->kernel) {
+ pid = 0;
+ flags |= CXL_PE_TRANSLATION_ENABLED;
+ flags |= CXL_PE_PRIVILEGED_PROCESS;
+ if (mfmsr() & MSR_SF)
+ flags |= CXL_PE_64_BIT;
+ } else {
+ pid = current->pid;
+ flags |= CXL_PE_PROBLEM_STATE;
+ flags |= CXL_PE_TRANSLATION_ENABLED;
+ if (!test_tsk_thread_flag(current, TIF_32BIT))
+ flags |= CXL_PE_64_BIT;
+ cred = get_current_cred();
+ if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
+ flags |= CXL_PE_PRIVILEGED_PROCESS;
+ put_cred(cred);
+ }
+ elem->flags = cpu_to_be64(flags);
+ elem->common.tid = cpu_to_be32(0); /* Unused */
+ elem->common.pid = cpu_to_be32(pid);
+ elem->common.csrp = cpu_to_be64(0); /* disable */
+ elem->common.aurp0 = cpu_to_be64(0); /* disable */
+ elem->common.aurp1 = cpu_to_be64(0); /* disable */
+
+ cxl_prefault(ctx, wed);
+
+ elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
+ elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
+ for (i = 0; i < ctx->irqs.range[r]; i++) {
+ if (r == 0 && i == 0) {
+ elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
+ } else {
+ idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
+ elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
+ }
+ }
+ }
+ elem->common.amr = cpu_to_be64(amr);
+ elem->common.wed = cpu_to_be64(wed);
+
+ disable_afu_irqs(ctx);
+
+ rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
+ &ctx->process_token, &mmio_addr, &mmio_size);
+ if (rc == H_SUCCESS) {
+ if (ctx->master || !ctx->afu->pp_psa) {
+ ctx->psn_phys = ctx->afu->psn_phys;
+ ctx->psn_size = ctx->afu->adapter->ps_size;
+ } else {
+ ctx->psn_phys = mmio_addr;
+ ctx->psn_size = mmio_size;
+ }
+ if (ctx->afu->pp_psa && mmio_size &&
+ ctx->afu->pp_size == 0) {
+ /*
+ * There's no property in the device tree to read the
+ * pp_size. We only find out at the 1st attach.
+ * Compared to bare-metal, it is too late and we
+ * should really lock here. However, on powerVM,
+ * pp_size is really only used to display in /sys.
+ * Being discussed with pHyp for their next release.
+ */
+ ctx->afu->pp_size = mmio_size;
+ }
+ /* from PAPR: process element is bytes 4-7 of process token */
+ ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
+ pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
+ ctx->pe, ctx->external_pe, ctx->psn_size);
+ ctx->pe_inserted = true;
+ enable_afu_irqs(ctx);
+ }
+
+ free_page((u64)elem);
+ return rc;
+}
+
+static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
+{
+ pr_devel("in %s\n", __func__);
+
+ ctx->kernel = kernel;
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return attach_afu_directed(ctx, wed, amr);
+
+ /* dedicated mode not supported on FW840 */
+
+ return -EINVAL;
+}
+
+static int detach_afu_directed(struct cxl_context *ctx)
+{
+ if (!ctx->pe_inserted)
+ return 0;
+ if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
+ return -1;
+ return 0;
+}
+
+static int guest_detach_process(struct cxl_context *ctx)
+{
+ pr_devel("in %s\n", __func__);
+ trace_cxl_detach(ctx);
+
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
+ return -EIO;
+
+ if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
+ return detach_afu_directed(ctx);
+
+ return -EINVAL;
+}
+
+static void guest_release_afu(struct device *dev)
+{
+ struct cxl_afu *afu = to_cxl_afu(dev);
+
+ pr_devel("%s\n", __func__);
+
+ idr_destroy(&afu->contexts_idr);
+
+ kfree(afu->guest);
+ kfree(afu);
+}
+
+ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
+{
+ return guest_collect_vpd(NULL, afu, buf, len);
+}
+
+#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
+static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ loff_t off, size_t count)
+{
+ void *tbuf = NULL;
+ int rc = 0;
+
+ tbuf = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ rc = cxl_h_get_afu_err(afu->guest->handle,
+ off & 0x7,
+ virt_to_phys(tbuf),
+ count);
+ if (rc)
+ goto err;
+
+ if (count > ERR_BUFF_MAX_COPY_SIZE)
+ count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
+ memcpy(buf, tbuf, count);
+err:
+ free_page((u64)tbuf);
+
+ return rc;
+}
+
+static int guest_afu_check_and_enable(struct cxl_afu *afu)
+{
+ return 0;
+}
+
+static bool guest_support_attributes(const char *attr_name,
+ enum cxl_attrs type)
+{
+ switch (type) {
+ case CXL_ADAPTER_ATTRS:
+ if ((strcmp(attr_name, "base_image") == 0) ||
+ (strcmp(attr_name, "load_image_on_perst") == 0) ||
+ (strcmp(attr_name, "perst_reloads_same_image") == 0) ||
+ (strcmp(attr_name, "image_loaded") == 0))
+ return false;
+ break;
+ case CXL_AFU_MASTER_ATTRS:
+ if ((strcmp(attr_name, "pp_mmio_off") == 0))
+ return false;
+ break;
+ case CXL_AFU_ATTRS:
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static int activate_afu_directed(struct cxl_afu *afu)
+{
+ int rc;
+
+ dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
+
+ afu->current_mode = CXL_MODE_DIRECTED;
+
+ afu->num_procs = afu->max_procs_virtualised;
+
+ if ((rc = cxl_chardev_m_afu_add(afu)))
+ return rc;
+
+ if ((rc = cxl_sysfs_afu_m_add(afu)))
+ goto err;
+
+ if ((rc = cxl_chardev_s_afu_add(afu)))
+ goto err1;
+
+ return 0;
+err1:
+ cxl_sysfs_afu_m_remove(afu);
+err:
+ cxl_chardev_afu_remove(afu);
+ return rc;
+}
+
+static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
+{
+ if (!mode)
+ return 0;
+ if (!(mode & afu->modes_supported))
+ return -EINVAL;
+
+ if (mode == CXL_MODE_DIRECTED)
+ return activate_afu_directed(afu);
+
+ if (mode == CXL_MODE_DEDICATED)
+ dev_err(&afu->dev, "Dedicated mode not supported\n");
+
+ return -EINVAL;
+}
+
+static int deactivate_afu_directed(struct cxl_afu *afu)
+{
+ dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
+
+ afu->current_mode = 0;
+ afu->num_procs = 0;
+
+ cxl_sysfs_afu_m_remove(afu);
+ cxl_chardev_afu_remove(afu);
+
+ cxl_ops->afu_reset(afu);
+
+ return 0;
+}
+
+static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
+{
+ if (!mode)
+ return 0;
+ if (!(mode & afu->modes_supported))
+ return -EINVAL;
+
+ if (mode == CXL_MODE_DIRECTED)
+ return deactivate_afu_directed(afu);
+ return 0;
+}
+
+static int guest_afu_reset(struct cxl_afu *afu)
+{
+ pr_devel("AFU(%d) reset request\n", afu->slice);
+ return cxl_h_reset_afu(afu->guest->handle);
+}
+
+static int guest_map_slice_regs(struct cxl_afu *afu)
+{
+ if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
+ dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
+ afu->slice);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void guest_unmap_slice_regs(struct cxl_afu *afu)
+{
+ if (afu->p2n_mmio)
+ iounmap(afu->p2n_mmio);
+}
+
+static int afu_update_state(struct cxl_afu *afu)
+{
+ int rc, cur_state;
+
+ rc = afu_read_error_state(afu, &cur_state);
+ if (rc)
+ return rc;
+
+ if (afu->guest->previous_state == cur_state)
+ return 0;
+
+ pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
+
+ switch (cur_state) {
+ case H_STATE_NORMAL:
+ afu->guest->previous_state = cur_state;
+ rc = 1;
+ break;
+
+ case H_STATE_DISABLE:
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_frozen);
+
+ cxl_context_detach_all(afu);
+ if ((rc = cxl_ops->afu_reset(afu)))
+ pr_devel("reset hcall failed %d\n", rc);
+
+ rc = afu_read_error_state(afu, &cur_state);
+ if (!rc && cur_state == H_STATE_NORMAL) {
+ pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
+ pci_channel_io_normal);
+ pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
+ rc = 1;
+ }
+ afu->guest->previous_state = 0;
+ break;
+
+ case H_STATE_TEMP_UNAVAILABLE:
+ afu->guest->previous_state = cur_state;
+ break;
+
+ case H_STATE_PERM_UNAVAILABLE:
+ dev_err(&afu->dev, "AFU is in permanent error state\n");
+ pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
+ pci_channel_io_perm_failure);
+ afu->guest->previous_state = cur_state;
+ break;
+
+ default:
+ pr_err("Unexpected AFU(%d) error state: %#x\n",
+ afu->slice, cur_state);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int afu_do_recovery(struct cxl_afu *afu)
+{
+ int rc;
+
+ /* many threads can arrive here, in case of detach_all for example.
+ * Only one needs to drive the recovery
+ */
+ if (mutex_trylock(&afu->guest->recovery_lock)) {
+ rc = afu_update_state(afu);
+ mutex_unlock(&afu->guest->recovery_lock);
+ return rc;
+ }
+ return 0;
+}
+
+static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
+{
+ int state;
+
+ if (afu) {
+ if (afu_read_error_state(afu, &state) ||
+ state != H_STATE_NORMAL) {
+ if (afu_do_recovery(afu) > 0) {
+ /* check again in case we've just fixed it */
+ if (!afu_read_error_state(afu, &state) &&
+ state == H_STATE_NORMAL)
+ return true;
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int afu_properties_look_ok(struct cxl_afu *afu)
+{
+ if (afu->pp_irqs < 0) {
+ dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
+ return -EINVAL;
+ }
+
+ if (afu->max_procs_virtualised < 1) {
+ dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
+ return -EINVAL;
+ }
+
+ if (afu->crs_len < 0) {
+ dev_err(&afu->dev, "Unexpected configuration record size value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
+{
+ struct cxl_afu *afu;
+ bool free = true;
+ int rc;
+
+ pr_devel("in %s - AFU(%d)\n", __func__, slice);
+ if (!(afu = cxl_alloc_afu(adapter, slice)))
+ return -ENOMEM;
+
+ if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
+ kfree(afu);
+ return -ENOMEM;
+ }
+
+ mutex_init(&afu->guest->recovery_lock);
+
+ if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
+ adapter->adapter_num,
+ slice)))
+ goto err1;
+
+ adapter->slices++;
+
+ if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
+ goto err1;
+
+ if ((rc = cxl_ops->afu_reset(afu)))
+ goto err1;
+
+ if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
+ goto err1;
+
+ if ((rc = afu_properties_look_ok(afu)))
+ goto err1;
+
+ if ((rc = guest_map_slice_regs(afu)))
+ goto err1;
+
+ if ((rc = guest_register_serr_irq(afu)))
+ goto err2;
+
+ /*
+ * After we call this function we must not free the afu directly, even
+ * if it returns an error!
+ */
+ if ((rc = cxl_register_afu(afu)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_afu_add(afu)))
+ goto err_put1;
+
+ /*
+ * pHyp doesn't expose the programming models supported by the
+ * AFU. pHyp currently only supports directed mode. If it adds
+ * dedicated mode later, this version of cxl has no way to
+ * detect it. So we'll initialize the driver, but the first
+ * attach will fail.
+ * Being discussed with pHyp to do better (likely new property)
+ */
+ if (afu->max_procs_virtualised == 1)
+ afu->modes_supported = CXL_MODE_DEDICATED;
+ else
+ afu->modes_supported = CXL_MODE_DIRECTED;
+
+ if ((rc = cxl_afu_select_best_mode(afu)))
+ goto err_put2;
+
+ adapter->afu[afu->slice] = afu;
+
+ afu->enabled = true;
+
+ if ((rc = cxl_pci_vphb_add(afu)))
+ dev_info(&afu->dev, "Can't register vPHB\n");
+
+ return 0;
+
+err_put2:
+ cxl_sysfs_afu_remove(afu);
+err_put1:
+ device_unregister(&afu->dev);
+ free = false;
+ guest_release_serr_irq(afu);
+err2:
+ guest_unmap_slice_regs(afu);
+err1:
+ if (free) {
+ kfree(afu->guest);
+ kfree(afu);
+ }
+ return rc;
+}
+
+void cxl_guest_remove_afu(struct cxl_afu *afu)
+{
+ pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
+
+ if (!afu)
+ return;
+
+ cxl_pci_vphb_remove(afu);
+ cxl_sysfs_afu_remove(afu);
+
+ spin_lock(&afu->adapter->afu_list_lock);
+ afu->adapter->afu[afu->slice] = NULL;
+ spin_unlock(&afu->adapter->afu_list_lock);
+
+ cxl_context_detach_all(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
+ guest_release_serr_irq(afu);
+ guest_unmap_slice_regs(afu);
+
+ device_unregister(&afu->dev);
+}
+
+static void free_adapter(struct cxl *adapter)
+{
+ struct irq_avail *cur;
+ int i;
+
+ if (adapter->guest->irq_avail) {
+ for (i = 0; i < adapter->guest->irq_nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
+ }
+ kfree(adapter->guest->status);
+ cxl_remove_adapter_nr(adapter);
+ kfree(adapter->guest);
+ kfree(adapter);
+}
+
+static int properties_look_ok(struct cxl *adapter)
+{
+ /* The absence of this property means that the operational
+ * status is unknown or okay
+ */
+ if (strlen(adapter->guest->status) &&
+ strcmp(adapter->guest->status, "okay")) {
+ pr_err("ABORTING:Bad operational status of the device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
+{
+ return guest_collect_vpd(adapter, NULL, buf, len);
+}
+
+void cxl_guest_remove_adapter(struct cxl *adapter)
+{
+ pr_devel("in %s\n", __func__);
+
+ cxl_sysfs_adapter_remove(adapter);
+
+ cxl_guest_remove_chardev(adapter);
+ device_unregister(&adapter->dev);
+}
+
+static void release_adapter(struct device *dev)
+{
+ free_adapter(to_cxl_adapter(dev));
+}
+
+struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
+{
+ struct cxl *adapter;
+ bool free = true;
+ int rc;
+
+ if (!(adapter = cxl_alloc_adapter()))
+ return ERR_PTR(-ENOMEM);
+
+ if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
+ free_adapter(adapter);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ adapter->slices = 0;
+ adapter->guest->pdev = pdev;
+ adapter->dev.parent = &pdev->dev;
+ adapter->dev.release = release_adapter;
+ dev_set_drvdata(&pdev->dev, adapter);
+
+ if ((rc = cxl_of_read_adapter_handle(adapter, np)))
+ goto err1;
+
+ if ((rc = cxl_of_read_adapter_properties(adapter, np)))
+ goto err1;
+
+ if ((rc = properties_look_ok(adapter)))
+ goto err1;
+
+ if ((rc = cxl_guest_add_chardev(adapter)))
+ goto err1;
+
+ /*
+ * After we call this function we must not free the adapter directly,
+ * even if it returns an error!
+ */
+ if ((rc = cxl_register_adapter(adapter)))
+ goto err_put1;
+
+ if ((rc = cxl_sysfs_adapter_add(adapter)))
+ goto err_put1;
+
+ return adapter;
+
+err_put1:
+ device_unregister(&adapter->dev);
+ free = false;
+ cxl_guest_remove_chardev(adapter);
+err1:
+ if (free)
+ free_adapter(adapter);
+ return ERR_PTR(rc);
+}
+
+void cxl_guest_reload_module(struct cxl *adapter)
+{
+ struct platform_device *pdev;
+
+ pdev = adapter->guest->pdev;
+ cxl_guest_remove_adapter(adapter);
+
+ cxl_of_probe(pdev);
+}
+
+const struct cxl_backend_ops cxl_guest_ops = {
+ .module = THIS_MODULE,
+ .adapter_reset = guest_reset,
+ .alloc_one_irq = guest_alloc_one_irq,
+ .release_one_irq = guest_release_one_irq,
+ .alloc_irq_ranges = guest_alloc_irq_ranges,
+ .release_irq_ranges = guest_release_irq_ranges,
+ .setup_irq = NULL,
+ .handle_psl_slice_error = guest_handle_psl_slice_error,
+ .psl_interrupt = guest_psl_irq,
+ .ack_irq = guest_ack_irq,
+ .attach_process = guest_attach_process,
+ .detach_process = guest_detach_process,
+ .support_attributes = guest_support_attributes,
+ .link_ok = guest_link_ok,
+ .release_afu = guest_release_afu,
+ .afu_read_err_buffer = guest_afu_read_err_buffer,
+ .afu_check_and_enable = guest_afu_check_and_enable,
+ .afu_activate_mode = guest_afu_activate_mode,
+ .afu_deactivate_mode = guest_afu_deactivate_mode,
+ .afu_reset = guest_afu_reset,
+ .afu_cr_read8 = guest_afu_cr_read8,
+ .afu_cr_read16 = guest_afu_cr_read16,
+ .afu_cr_read32 = guest_afu_cr_read32,
+ .afu_cr_read64 = guest_afu_cr_read64,
+ .afu_cr_write8 = guest_afu_cr_write8,
+ .afu_cr_write16 = guest_afu_cr_write16,
+ .afu_cr_write32 = guest_afu_cr_write32,
+ .read_adapter_vpd = cxl_guest_read_adapter_vpd,
+};
diff --git a/drivers/misc/cxl/hcalls.c b/drivers/misc/cxl/hcalls.c
new file mode 100644
index 000000000000..d6d11f4056d7
--- /dev/null
+++ b/drivers/misc/cxl/hcalls.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include "hcalls.h"
+#include "trace.h"
+
+#define CXL_HCALL_TIMEOUT 60000
+#define CXL_HCALL_TIMEOUT_DOWNLOAD 120000
+
+#define H_ATTACH_CA_PROCESS 0x344
+#define H_CONTROL_CA_FUNCTION 0x348
+#define H_DETACH_CA_PROCESS 0x34C
+#define H_COLLECT_CA_INT_INFO 0x350
+#define H_CONTROL_CA_FAULTS 0x354
+#define H_DOWNLOAD_CA_FUNCTION 0x35C
+#define H_DOWNLOAD_CA_FACILITY 0x364
+#define H_CONTROL_CA_FACILITY 0x368
+
+#define H_CONTROL_CA_FUNCTION_RESET 1 /* perform a reset */
+#define H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS 2 /* suspend a process from being executed */
+#define H_CONTROL_CA_FUNCTION_RESUME_PROCESS 3 /* resume a process to be executed */
+#define H_CONTROL_CA_FUNCTION_READ_ERR_STATE 4 /* read the error state */
+#define H_CONTROL_CA_FUNCTION_GET_AFU_ERR 5 /* collect the AFU error buffer */
+#define H_CONTROL_CA_FUNCTION_GET_CONFIG 6 /* collect configuration record */
+#define H_CONTROL_CA_FUNCTION_GET_DOWNLOAD_STATE 7 /* query to return download status */
+#define H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS 8 /* terminate the process before completion */
+#define H_CONTROL_CA_FUNCTION_COLLECT_VPD 9 /* collect VPD */
+#define H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT 11 /* read the function-wide error data based on an interrupt */
+#define H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT 12 /* acknowledge function-wide error data based on an interrupt */
+#define H_CONTROL_CA_FUNCTION_GET_ERROR_LOG 13 /* retrieve the Platform Log ID (PLID) of an error log */
+
+#define H_CONTROL_CA_FAULTS_RESPOND_PSL 1
+#define H_CONTROL_CA_FAULTS_RESPOND_AFU 2
+
+#define H_CONTROL_CA_FACILITY_RESET 1 /* perform a reset */
+#define H_CONTROL_CA_FACILITY_COLLECT_VPD 2 /* collect VPD */
+
+#define H_DOWNLOAD_CA_FACILITY_DOWNLOAD 1 /* download adapter image */
+#define H_DOWNLOAD_CA_FACILITY_VALIDATE 2 /* validate adapter image */
+
+
+#define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...) \
+ { \
+ unsigned int delay, total_delay = 0; \
+ u64 token = 0; \
+ \
+ memset(retbuf, 0, sizeof(retbuf)); \
+ while (1) { \
+ rc = call(fn, retbuf, __VA_ARGS__, token); \
+ token = retbuf[0]; \
+ if (rc != H_BUSY && !H_IS_LONG_BUSY(rc)) \
+ break; \
+ \
+ if (rc == H_BUSY) \
+ delay = 10; \
+ else \
+ delay = get_longbusy_msecs(rc); \
+ \
+ total_delay += delay; \
+ if (total_delay > CXL_HCALL_TIMEOUT) { \
+ WARN(1, "Warning: Giving up waiting for CXL hcall " \
+ "%#x after %u msec\n", fn, total_delay); \
+ rc = H_BUSY; \
+ break; \
+ } \
+ msleep(delay); \
+ } \
+ }
+#define CXL_H_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall, __VA_ARGS__)
+#define CXL_H9_WAIT_UNTIL_DONE(...) _CXL_LOOP_HCALL(plpar_hcall9, __VA_ARGS__)
+
+#define _PRINT_MSG(rc, format, ...) \
+ { \
+ if ((rc != H_SUCCESS) && (rc != H_CONTINUE)) \
+ pr_err(format, __VA_ARGS__); \
+ else \
+ pr_devel(format, __VA_ARGS__); \
+ } \
+
+
+static char *afu_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "RESET", /* 1 */
+ "SUSPEND_PROCESS", /* 2 */
+ "RESUME_PROCESS", /* 3 */
+ "READ_ERR_STATE", /* 4 */
+ "GET_AFU_ERR", /* 5 */
+ "GET_CONFIG", /* 6 */
+ "GET_DOWNLOAD_STATE", /* 7 */
+ "TERMINATE_PROCESS", /* 8 */
+ "COLLECT_VPD", /* 9 */
+ "UNKNOWN_OP", /* 10 undefined */
+ "GET_FUNCTION_ERR_INT", /* 11 */
+ "ACK_FUNCTION_ERR_INT", /* 12 */
+ "GET_ERROR_LOG", /* 13 */
+};
+
+static char *control_adapter_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "RESET", /* 1 */
+ "COLLECT_VPD", /* 2 */
+};
+
+static char *download_op_names[] = {
+ "UNKNOWN_OP", /* 0 undefined */
+ "DOWNLOAD", /* 1 */
+ "VALIDATE", /* 2 */
+};
+
+static char *op_str(unsigned int op, char *name_array[], int array_len)
+{
+ if (op >= array_len)
+ return "UNKNOWN_OP";
+ return name_array[op];
+}
+
+#define OP_STR(op, name_array) op_str(op, name_array, ARRAY_SIZE(name_array))
+
+#define OP_STR_AFU(op) OP_STR(op, afu_op_names)
+#define OP_STR_CONTROL_ADAPTER(op) OP_STR(op, control_adapter_op_names)
+#define OP_STR_DOWNLOAD_ADAPTER(op) OP_STR(op, download_op_names)
+
+
+long cxl_h_attach_process(u64 unit_address,
+ struct cxl_process_element_hcall *element,
+ u64 *process_token, u64 *mmio_addr, u64 *mmio_size)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_ATTACH_CA_PROCESS, unit_address, virt_to_phys(element));
+ _PRINT_MSG(rc, "cxl_h_attach_process(%#.16llx, %#.16lx): %li\n",
+ unit_address, virt_to_phys(element), rc);
+ trace_cxl_hcall_attach(unit_address, virt_to_phys(element), retbuf[0], retbuf[1], retbuf[2], rc);
+
+ pr_devel("token: 0x%.8lx mmio_addr: 0x%lx mmio_size: 0x%lx\nProcess Element Structure:\n",
+ retbuf[0], retbuf[1], retbuf[2]);
+ cxl_dump_debug_buffer(element, sizeof(*element));
+
+ switch (rc) {
+ case H_SUCCESS: /* The process info is attached to the coherent platform function */
+ *process_token = retbuf[0];
+ if (mmio_addr)
+ *mmio_addr = retbuf[1];
+ if (mmio_size)
+ *mmio_size = retbuf[2];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The coherent platform function does not have enough additional resource to attach the process */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_detach_process - Detach a process element from a coherent
+ * platform function.
+ */
+long cxl_h_detach_process(u64 unit_address, u64 process_token)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ CXL_H_WAIT_UNTIL_DONE(rc, retbuf, H_DETACH_CA_PROCESS, unit_address, process_token);
+ _PRINT_MSG(rc, "cxl_h_detach_process(%#.16llx, 0x%.8llx): %li\n", unit_address, process_token, rc);
+ trace_cxl_hcall_detach(unit_address, process_token, rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The process was detached from the coherent platform function */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the detach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_function - This H_CONTROL_CA_FUNCTION hypervisor call allows
+ * the partition to manipulate or query
+ * certain coherent platform function behaviors.
+ */
+static long cxl_h_control_function(u64 unit_address, u64 op,
+ u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ long rc;
+
+ CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FUNCTION, unit_address, op, p1, p2, p3, p4);
+ _PRINT_MSG(rc, "cxl_h_control_function(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
+ unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
+ trace_cxl_hcall_control_function(unit_address, OP_STR_AFU(op), p1, p2, p3, p4, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform function */
+ if ((op == H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT ||
+ op == H_CONTROL_CA_FUNCTION_READ_ERR_STATE ||
+ op == H_CONTROL_CA_FUNCTION_COLLECT_VPD))
+ *out = retbuf[0];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
+ case H_SG_LIST: /* An block list entry was invalid */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform function is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_reset_afu - Perform a reset to the coherent platform function.
+ */
+long cxl_h_reset_afu(u64 unit_address)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_RESET,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_suspend_process - Suspend a process from being executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_suspend_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_SUSPEND_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_resume_process - Resume a process to be executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_resume_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_RESUME_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_read_error_state - Checks the error state of the coherent
+ * platform function.
+ * R4 contains the error state
+ */
+long cxl_h_read_error_state(u64 unit_address, u64 *state)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_READ_ERR_STATE,
+ 0, 0, 0, 0,
+ state);
+}
+
+/**
+ * cxl_h_get_afu_err - collect the AFU error buffer
+ * Parameter1 = byte offset into error buffer to retrieve, valid values
+ * are between 0 and (ibm,error-buffer-size - 1)
+ * Parameter2 = 4K aligned real address of error buffer, to be filled in
+ * Parameter3 = length of error buffer, valid values are 4K or less
+ */
+long cxl_h_get_afu_err(u64 unit_address, u64 offset,
+ u64 buf_address, u64 len)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_AFU_ERR,
+ offset, buf_address, len, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_get_config - collect configuration record for the
+ * coherent platform function
+ * Parameter1 = # of configuration record to retrieve, valid values are
+ * between 0 and (ibm,#config-records - 1)
+ * Parameter2 = byte offset into configuration record to retrieve,
+ * valid values are between 0 and (ibm,config-record-size - 1)
+ * Parameter3 = 4K aligned real address of configuration record buffer,
+ * to be filled in
+ * Parameter4 = length of configuration buffer, valid values are 4K or less
+ */
+long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
+ u64 buf_address, u64 len)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_CONFIG,
+ cr_num, offset, buf_address, len,
+ NULL);
+}
+
+/**
+ * cxl_h_terminate_process - Terminate the process before completion
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_terminate_process(u64 unit_address, u64 process_token)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_TERMINATE_PROCESS,
+ process_token, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = # of VPD record to retrieve, valid values are between 0
+ * and (ibm,#config-records - 1).
+ * Parameter2 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter3 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
+ u64 num, u64 *out)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_COLLECT_VPD,
+ record, list_address, num, 0,
+ out);
+}
+
+/**
+ * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
+ */
+long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_FUNCTION_ERR_INT,
+ 0, 0, 0, 0, reg);
+}
+
+/**
+ * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
+ * based on an interrupt
+ * Parameter1 = value to write to the function-wide error interrupt register
+ */
+long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_ACK_FUNCTION_ERR_INT,
+ value, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
+ * an error log
+ */
+long cxl_h_get_error_log(u64 unit_address, u64 value)
+{
+ return cxl_h_control_function(unit_address,
+ H_CONTROL_CA_FUNCTION_GET_ERROR_LOG,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_int_info - Collect interrupt info about a coherent
+ * platform function after an interrupt occurred.
+ */
+long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
+ struct cxl_irq_info *info)
+{
+ long rc;
+
+ BUG_ON(sizeof(*info) != sizeof(unsigned long[PLPAR_HCALL9_BUFSIZE]));
+
+ rc = plpar_hcall9(H_COLLECT_CA_INT_INFO, (unsigned long *) info,
+ unit_address, process_token);
+ _PRINT_MSG(rc, "cxl_h_collect_int_info(%#.16llx, 0x%llx): %li\n",
+ unit_address, process_token, rc);
+ trace_cxl_hcall_collect_int_info(unit_address, process_token, rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The interrupt info is returned in return registers. */
+ pr_devel("dsisr:%#llx, dar:%#llx, dsr:%#llx, pid:%u, tid:%u, afu_err:%#llx, errstat:%#llx\n",
+ info->dsisr, info->dar, info->dsr, info->pid,
+ info->tid, info->afu_err, info->errstat);
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall. */
+ case H_HARDWARE: /* A hardware event prevented the collection of the interrupt info.*/
+ case H_STATE: /* The coherent platform function is not in a valid state to collect interrupt info. */
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_faults - Control the operation of a coherent platform
+ * function after a fault occurs.
+ *
+ * Parameters
+ * control-mask: value to control the faults
+ * looks like PSL_TFC_An shifted >> 32
+ * reset-mask: mask to control reset of function faults
+ * Set reset_mask = 1 to reset PSL errors
+ */
+long cxl_h_control_faults(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ memset(retbuf, 0, sizeof(retbuf));
+
+ rc = plpar_hcall(H_CONTROL_CA_FAULTS, retbuf, unit_address,
+ H_CONTROL_CA_FAULTS_RESPOND_PSL, process_token,
+ control_mask, reset_mask);
+ _PRINT_MSG(rc, "cxl_h_control_faults(%#.16llx, 0x%llx, %#llx, %#llx): %li (%#lx)\n",
+ unit_address, process_token, control_mask, reset_mask,
+ rc, retbuf[0]);
+ trace_cxl_hcall_control_faults(unit_address, process_token,
+ control_mask, reset_mask, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* Faults were successfully controlled for the function. */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ return -EINVAL;
+ case H_HARDWARE: /* A hardware event prevented the control of faults. */
+ case H_STATE: /* The function was in an invalid state. */
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall; the coherent platform facilities may need to be licensed. */
+ return -EBUSY;
+ case H_FUNCTION: /* The function is not supported */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ return -EINVAL;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_control_facility - This H_CONTROL_CA_FACILITY hypervisor call
+ * allows the partition to manipulate or query
+ * certain coherent platform facility behaviors.
+ */
+static long cxl_h_control_facility(u64 unit_address, u64 op,
+ u64 p1, u64 p2, u64 p3, u64 p4, u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ long rc;
+
+ CXL_H9_WAIT_UNTIL_DONE(rc, retbuf, H_CONTROL_CA_FACILITY, unit_address, op, p1, p2, p3, p4);
+ _PRINT_MSG(rc, "cxl_h_control_facility(%#.16llx, %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li\n",
+ unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
+ trace_cxl_hcall_control_facility(unit_address, OP_STR_CONTROL_ADAPTER(op), p1, p2, p3, p4, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform facility */
+ if (op == H_CONTROL_CA_FACILITY_COLLECT_VPD)
+ *out = retbuf[0];
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied. */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_NOT_FOUND: /* The operation supplied was not valid */
+ case H_NOT_AVAILABLE: /* The operation cannot be performed because the AFU has not been downloaded */
+ case H_SG_LIST: /* An block list entry was invalid */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform facility is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
+ */
+long cxl_h_reset_adapter(u64 unit_address)
+{
+ return cxl_h_control_facility(unit_address,
+ H_CONTROL_CA_FACILITY_RESET,
+ 0, 0, 0, 0,
+ NULL);
+}
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter2 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
+ u64 num, u64 *out)
+{
+ return cxl_h_control_facility(unit_address,
+ H_CONTROL_CA_FACILITY_COLLECT_VPD,
+ list_address, num, 0, 0,
+ out);
+}
+
+/**
+ * cxl_h_download_facility - This H_DOWNLOAD_CA_FACILITY
+ * hypervisor call provide platform support for
+ * downloading a base adapter image to the coherent
+ * platform facility, and for validating the entire
+ * image after the download.
+ * Parameters
+ * op: operation to perform to the coherent platform function
+ * Download: operation = 1, the base image in the coherent platform
+ * facility is first erased, and then
+ * programmed using the image supplied
+ * in the scatter/gather list.
+ * Validate: operation = 2, the base image in the coherent platform
+ * facility is compared with the image
+ * supplied in the scatter/gather list.
+ * list_address: 4K naturally aligned real buffer containing
+ * scatter/gather list entries.
+ * num: number of block list entries in the scatter/gather list.
+ */
+static long cxl_h_download_facility(u64 unit_address, u64 op,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned int delay, total_delay = 0;
+ u64 token = 0;
+ long rc;
+
+ if (*out != 0)
+ token = *out;
+
+ memset(retbuf, 0, sizeof(retbuf));
+ while (1) {
+ rc = plpar_hcall(H_DOWNLOAD_CA_FACILITY, retbuf,
+ unit_address, op, list_address, num,
+ token);
+ token = retbuf[0];
+ if (rc != H_BUSY && !H_IS_LONG_BUSY(rc))
+ break;
+
+ if (rc != H_BUSY) {
+ delay = get_longbusy_msecs(rc);
+ total_delay += delay;
+ if (total_delay > CXL_HCALL_TIMEOUT_DOWNLOAD) {
+ WARN(1, "Warning: Giving up waiting for CXL hcall "
+ "%#x after %u msec\n",
+ H_DOWNLOAD_CA_FACILITY, total_delay);
+ rc = H_BUSY;
+ break;
+ }
+ msleep(delay);
+ }
+ }
+ _PRINT_MSG(rc, "cxl_h_download_facility(%#.16llx, %s(%#llx, %#llx), %#lx): %li\n",
+ unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
+ trace_cxl_hcall_download_facility(unit_address, OP_STR_DOWNLOAD_ADAPTER(op), list_address, num, retbuf[0], rc);
+
+ switch (rc) {
+ case H_SUCCESS: /* The operation is completed for the coherent platform facility */
+ return 0;
+ case H_PARAMETER: /* An incorrect parameter was supplied */
+ case H_FUNCTION: /* The function is not supported. */
+ case H_SG_LIST: /* An block list entry was invalid */
+ case H_BAD_DATA: /* Image verification failed */
+ return -EINVAL;
+ case H_AUTHORITY: /* The partition does not have authority to perform this hcall */
+ case H_RESOURCE: /* The function has page table mappings for MMIO */
+ case H_HARDWARE: /* A hardware event prevented the attach operation */
+ case H_STATE: /* The coherent platform facility is not in a valid state */
+ case H_BUSY:
+ return -EBUSY;
+ case H_CONTINUE:
+ *out = retbuf[0];
+ return 1; /* More data is needed for the complete image */
+ default:
+ WARN(1, "Unexpected return code: %lx", rc);
+ return -EINVAL;
+ }
+}
+
+/**
+ * cxl_h_download_adapter_image - Download the base image to the coherent
+ * platform facility.
+ */
+long cxl_h_download_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ return cxl_h_download_facility(unit_address,
+ H_DOWNLOAD_CA_FACILITY_DOWNLOAD,
+ list_address, num, out);
+}
+
+/**
+ * cxl_h_validate_adapter_image - Validate the base image in the coherent
+ * platform facility.
+ */
+long cxl_h_validate_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out)
+{
+ return cxl_h_download_facility(unit_address,
+ H_DOWNLOAD_CA_FACILITY_VALIDATE,
+ list_address, num, out);
+}
diff --git a/drivers/misc/cxl/hcalls.h b/drivers/misc/cxl/hcalls.h
new file mode 100644
index 000000000000..3e25522a5df6
--- /dev/null
+++ b/drivers/misc/cxl/hcalls.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _HCALLS_H
+#define _HCALLS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/hvcall.h>
+#include "cxl.h"
+
+#define SG_BUFFER_SIZE 4096
+#define SG_MAX_ENTRIES 256
+
+struct sg_list {
+ u64 phys_addr;
+ u64 len;
+};
+
+/*
+ * This is straight out of PAPR, but replacing some of the compound fields with
+ * a single field, where they were identical to the register layout.
+ *
+ * The 'flags' parameter regroups the various bit-fields
+ */
+#define CXL_PE_CSRP_VALID (1ULL << 63)
+#define CXL_PE_PROBLEM_STATE (1ULL << 62)
+#define CXL_PE_SECONDARY_SEGMENT_TBL_SRCH (1ULL << 61)
+#define CXL_PE_TAGS_ACTIVE (1ULL << 60)
+#define CXL_PE_USER_STATE (1ULL << 59)
+#define CXL_PE_TRANSLATION_ENABLED (1ULL << 58)
+#define CXL_PE_64_BIT (1ULL << 57)
+#define CXL_PE_PRIVILEGED_PROCESS (1ULL << 56)
+
+#define CXL_PROCESS_ELEMENT_VERSION 1
+struct cxl_process_element_hcall {
+ __be64 version;
+ __be64 flags;
+ u8 reserved0[12];
+ __be32 pslVirtualIsn;
+ u8 applicationVirtualIsnBitmap[256];
+ u8 reserved1[144];
+ struct cxl_process_element_common common;
+ u8 reserved4[12];
+} __packed;
+
+#define H_STATE_NORMAL 1
+#define H_STATE_DISABLE 2
+#define H_STATE_TEMP_UNAVAILABLE 3
+#define H_STATE_PERM_UNAVAILABLE 4
+
+/* NOTE: element must be a logical real address, and must be pinned */
+long cxl_h_attach_process(u64 unit_address, struct cxl_process_element_hcall *element,
+ u64 *process_token, u64 *mmio_addr, u64 *mmio_size);
+
+/**
+ * cxl_h_detach_process - Detach a process element from a coherent
+ * platform function.
+ */
+long cxl_h_detach_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_reset_afu - Perform a reset to the coherent platform function.
+ */
+long cxl_h_reset_afu(u64 unit_address);
+
+/**
+ * cxl_h_suspend_process - Suspend a process from being executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_suspend_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_resume_process - Resume a process to be executed
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_resume_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_read_error_state - Reads the error state of the coherent
+ * platform function.
+ * R4 contains the error state
+ */
+long cxl_h_read_error_state(u64 unit_address, u64 *state);
+
+/**
+ * cxl_h_get_afu_err - collect the AFU error buffer
+ * Parameter1 = byte offset into error buffer to retrieve, valid values
+ * are between 0 and (ibm,error-buffer-size - 1)
+ * Parameter2 = 4K aligned real address of error buffer, to be filled in
+ * Parameter3 = length of error buffer, valid values are 4K or less
+ */
+long cxl_h_get_afu_err(u64 unit_address, u64 offset, u64 buf_address, u64 len);
+
+/**
+ * cxl_h_get_config - collect configuration record for the
+ * coherent platform function
+ * Parameter1 = # of configuration record to retrieve, valid values are
+ * between 0 and (ibm,#config-records - 1)
+ * Parameter2 = byte offset into configuration record to retrieve,
+ * valid values are between 0 and (ibm,config-record-size - 1)
+ * Parameter3 = 4K aligned real address of configuration record buffer,
+ * to be filled in
+ * Parameter4 = length of configuration buffer, valid values are 4K or less
+ */
+long cxl_h_get_config(u64 unit_address, u64 cr_num, u64 offset,
+ u64 buf_address, u64 len);
+
+/**
+ * cxl_h_terminate_process - Terminate the process before completion
+ * Parameter1 = process-token as returned from H_ATTACH_CA_PROCESS when
+ * process was attached.
+ */
+long cxl_h_terminate_process(u64 unit_address, u64 process_token);
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = # of VPD record to retrieve, valid values are between 0
+ * and (ibm,#config-records - 1).
+ * Parameter2 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter3 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd(u64 unit_address, u64 record, u64 list_address,
+ u64 num, u64 *out);
+
+/**
+ * cxl_h_get_fn_error_interrupt - Read the function-wide error data based on an interrupt
+ */
+long cxl_h_get_fn_error_interrupt(u64 unit_address, u64 *reg);
+
+/**
+ * cxl_h_ack_fn_error_interrupt - Acknowledge function-wide error data
+ * based on an interrupt
+ * Parameter1 = value to write to the function-wide error interrupt register
+ */
+long cxl_h_ack_fn_error_interrupt(u64 unit_address, u64 value);
+
+/**
+ * cxl_h_get_error_log - Retrieve the Platform Log ID (PLID) of
+ * an error log
+ */
+long cxl_h_get_error_log(u64 unit_address, u64 value);
+
+/**
+ * cxl_h_collect_int_info - Collect interrupt info about a coherent
+ * platform function after an interrupt occurred.
+ */
+long cxl_h_collect_int_info(u64 unit_address, u64 process_token,
+ struct cxl_irq_info *info);
+
+/**
+ * cxl_h_control_faults - Control the operation of a coherent platform
+ * function after a fault occurs.
+ *
+ * Parameters
+ * control-mask: value to control the faults
+ * looks like PSL_TFC_An shifted >> 32
+ * reset-mask: mask to control reset of function faults
+ * Set reset_mask = 1 to reset PSL errors
+ */
+long cxl_h_control_faults(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask);
+
+/**
+ * cxl_h_reset_adapter - Perform a reset to the coherent platform facility.
+ */
+long cxl_h_reset_adapter(u64 unit_address);
+
+/**
+ * cxl_h_collect_vpd - Collect VPD for the coherent platform function.
+ * Parameter1 = 4K naturally aligned real buffer containing block
+ * list entries
+ * Parameter2 = number of block list entries in the block list, valid
+ * values are between 0 and 256
+ */
+long cxl_h_collect_vpd_adapter(u64 unit_address, u64 list_address,
+ u64 num, u64 *out);
+
+/**
+ * cxl_h_download_adapter_image - Download the base image to the coherent
+ * platform facility.
+ */
+long cxl_h_download_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out);
+
+/**
+ * cxl_h_validate_adapter_image - Validate the base image in the coherent
+ * platform facility.
+ */
+long cxl_h_validate_adapter_image(u64 unit_address,
+ u64 list_address, u64 num,
+ u64 *out);
+#endif /* _HCALLS_H */
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 09a406058c46..be646dc41a2c 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -19,70 +19,11 @@
#include "cxl.h"
#include "trace.h"
-/* XXX: This is implementation specific */
-static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat)
+static int afu_irq_range_start(void)
{
- u64 fir1, fir2, fir_slice, serr, afu_debug;
-
- fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
- fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
- serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
- afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
-
- dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
- dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
- dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
- dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-
- dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(ctx->afu->adapter);
-
- return cxl_ack_irq(ctx, 0, errstat);
-}
-
-irqreturn_t cxl_slice_irq_err(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- u64 fir_slice, errstat, serr, afu_debug;
-
- WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
- errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
- afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
- dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
- dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
- dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
- dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_err(int irq, void *data)
-{
- struct cxl *adapter = data;
- u64 fir1, fir2, err_ivte;
-
- WARN(1, "CXL ERROR interrupt %i\n", irq);
-
- err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
- dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
-
- dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
- cxl_stop_trace(adapter);
-
- fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
- fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
-
- dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
-
- return IRQ_HANDLED;
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ return 1;
+ return 0;
}
static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar)
@@ -93,9 +34,8 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
return IRQ_HANDLED;
}
-static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
+irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_info)
{
- struct cxl_context *ctx = data;
u64 dsisr, dar;
dsisr = irq_info->dsisr;
@@ -145,7 +85,8 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
if (dsisr & CXL_PSL_DSISR_An_UR)
pr_devel("CXL interrupt: AURP PTE not found\n");
if (dsisr & CXL_PSL_DSISR_An_PE)
- return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
+ return cxl_ops->handle_psl_slice_error(ctx, dsisr,
+ irq_info->errstat);
if (dsisr & CXL_PSL_DSISR_An_AE) {
pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
@@ -169,7 +110,7 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
wake_up_all(&ctx->wq);
}
- cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_A, 0);
return IRQ_HANDLED;
}
if (dsisr & CXL_PSL_DSISR_An_OC)
@@ -179,54 +120,27 @@ static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
return IRQ_HANDLED;
}
-static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
-{
- if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
- else
- cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
-{
- struct cxl_afu *afu = data;
- struct cxl_context *ctx;
- struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
- if ((ret = cxl_get_irq(afu, &irq_info))) {
- WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
- return fail_psl_irq(afu, &irq_info);
- }
-
- rcu_read_lock();
- ctx = idr_find(&afu->contexts_idr, ph);
- if (ctx) {
- ret = cxl_irq(irq, ctx, &irq_info);
- rcu_read_unlock();
- return ret;
- }
- rcu_read_unlock();
-
- WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
- " %016llx\n(Possible AFU HW issue - was a term/remove acked"
- " with outstanding transactions?)\n", ph, irq_info.dsisr,
- irq_info.dar);
- return fail_psl_irq(afu, &irq_info);
-}
-
static irqreturn_t cxl_irq_afu(int irq, void *data)
{
struct cxl_context *ctx = data;
irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq));
- int irq_off, afu_irq = 1;
+ int irq_off, afu_irq = 0;
__u16 range;
int r;
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ /*
+ * Look for the interrupt number.
+ * On bare-metal, we know range 0 only contains the PSL
+ * interrupt so we could start counting at range 1 and initialize
+ * afu_irq at 1.
+ * In a guest, range 0 also contains AFU interrupts, so it must
+ * be counted for. Therefore we initialize afu_irq at 0 to take into
+ * account the PSL interrupt.
+ *
+ * For code-readability, it just seems easier to go over all
+ * the ranges on bare-metal and guest. The end result is the same.
+ */
+ for (r = 0; r < CXL_IRQ_RANGES; r++) {
irq_off = hwirq - ctx->irqs.offset[r];
range = ctx->irqs.range[r];
if (irq_off >= 0 && irq_off < range) {
@@ -236,7 +150,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
afu_irq += range;
}
if (unlikely(r >= CXL_IRQ_RANGES)) {
- WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
+ WARN(1, "Received AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n",
ctx->pe, irq, hwirq);
return IRQ_HANDLED;
}
@@ -246,7 +160,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
afu_irq, ctx->pe, irq, hwirq);
if (unlikely(!ctx->irq_bitmap)) {
- WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n");
+ WARN(1, "Received AFU IRQ for context with no IRQ bitmap\n");
return IRQ_HANDLED;
}
spin_lock(&ctx->lock);
@@ -272,7 +186,8 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
return 0;
}
- cxl_setup_irq(adapter, hwirq, virq);
+ if (cxl_ops->setup_irq)
+ cxl_ops->setup_irq(adapter, hwirq, virq);
pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
@@ -291,16 +206,16 @@ void cxl_unmap_irq(unsigned int virq, void *cookie)
irq_dispose_mapping(virq);
}
-static int cxl_register_one_irq(struct cxl *adapter,
- irq_handler_t handler,
- void *cookie,
- irq_hw_number_t *dest_hwirq,
- unsigned int *dest_virq,
- const char *name)
+int cxl_register_one_irq(struct cxl *adapter,
+ irq_handler_t handler,
+ void *cookie,
+ irq_hw_number_t *dest_hwirq,
+ unsigned int *dest_virq,
+ const char *name)
{
int hwirq, virq;
- if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
+ if ((hwirq = cxl_ops->alloc_one_irq(adapter)) < 0)
return hwirq;
if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
@@ -312,108 +227,10 @@ static int cxl_register_one_irq(struct cxl *adapter,
return 0;
err:
- cxl_release_one_irq(adapter, hwirq);
+ cxl_ops->release_one_irq(adapter, hwirq);
return -ENOMEM;
}
-int cxl_register_psl_err_irq(struct cxl *adapter)
-{
- int rc;
-
- adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&adapter->dev));
- if (!adapter->irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
- &adapter->err_hwirq,
- &adapter->err_virq,
- adapter->irq_name))) {
- kfree(adapter->irq_name);
- adapter->irq_name = NULL;
- return rc;
- }
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
-
- return 0;
-}
-
-void cxl_release_psl_err_irq(struct cxl *adapter)
-{
- if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
- return;
-
- cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
- cxl_unmap_irq(adapter->err_virq, adapter);
- cxl_release_one_irq(adapter, adapter->err_hwirq);
- kfree(adapter->irq_name);
-}
-
-int cxl_register_serr_irq(struct cxl_afu *afu)
-{
- u64 serr;
- int rc;
-
- afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
- dev_name(&afu->dev));
- if (!afu->err_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
- &afu->serr_hwirq,
- &afu->serr_virq, afu->err_irq_name))) {
- kfree(afu->err_irq_name);
- afu->err_irq_name = NULL;
- return rc;
- }
-
- serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
- serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
- cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
-
- return 0;
-}
-
-void cxl_release_serr_irq(struct cxl_afu *afu)
-{
- if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
- return;
-
- cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
- cxl_unmap_irq(afu->serr_virq, afu);
- cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
- kfree(afu->err_irq_name);
-}
-
-int cxl_register_psl_irq(struct cxl_afu *afu)
-{
- int rc;
-
- afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
- dev_name(&afu->dev));
- if (!afu->psl_irq_name)
- return -ENOMEM;
-
- if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
- &afu->psl_hwirq, &afu->psl_virq,
- afu->psl_irq_name))) {
- kfree(afu->psl_irq_name);
- afu->psl_irq_name = NULL;
- }
- return rc;
-}
-
-void cxl_release_psl_irq(struct cxl_afu *afu)
-{
- if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
- return;
-
- cxl_unmap_irq(afu->psl_virq, afu);
- cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
- kfree(afu->psl_irq_name);
-}
-
void afu_irq_name_free(struct cxl_context *ctx)
{
struct cxl_irq_name *irq_name, *tmp;
@@ -429,16 +246,33 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
{
int rc, r, i, j = 1;
struct cxl_irq_name *irq_name;
+ int alloc_count;
+
+ /*
+ * In native mode, range 0 is reserved for the multiplexed
+ * PSL interrupt. It has been allocated when the AFU was initialized.
+ *
+ * In a guest, the PSL interrupt is not mutliplexed, but per-context,
+ * and is the first interrupt from range 0. It still needs to be
+ * allocated, so bump the count by one.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ alloc_count = count;
+ else
+ alloc_count = count + 1;
/* Initialize the list head to hold irq names */
INIT_LIST_HEAD(&ctx->irq_names);
- if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
+ if ((rc = cxl_ops->alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter,
+ alloc_count)))
return rc;
- /* Multiplexed PSL Interrupt */
- ctx->irqs.offset[0] = ctx->afu->psl_hwirq;
- ctx->irqs.range[0] = 1;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ /* Multiplexed PSL Interrupt */
+ ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
+ ctx->irqs.range[0] = 1;
+ }
ctx->irq_count = count;
ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
@@ -450,7 +284,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
* Allocate names first. If any fail, bail out before allocating
* actual hardware IRQs.
*/
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
irq_name = kmalloc(sizeof(struct cxl_irq_name),
GFP_KERNEL);
@@ -471,7 +305,7 @@ int afu_allocate_irqs(struct cxl_context *ctx, u32 count)
return 0;
out:
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
afu_irq_name_free(ctx);
return -ENOMEM;
}
@@ -480,15 +314,30 @@ static void afu_register_hwirqs(struct cxl_context *ctx)
{
irq_hw_number_t hwirq;
struct cxl_irq_name *irq_name;
- int r,i;
+ int r, i;
+ irqreturn_t (*handler)(int irq, void *data);
/* We've allocated all memory now, so let's do the irq allocations */
irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
- cxl_map_irq(ctx->afu->adapter, hwirq,
- cxl_irq_afu, ctx, irq_name->name);
+ if (r == 0 && i == 0)
+ /*
+ * The very first interrupt of range 0 is
+ * always the PSL interrupt, but we only
+ * need to connect a handler for guests,
+ * because there's one PSL interrupt per
+ * context.
+ * On bare-metal, the PSL interrupt is
+ * multiplexed and was setup when the AFU
+ * was configured.
+ */
+ handler = cxl_ops->psl_interrupt;
+ else
+ handler = cxl_irq_afu;
+ cxl_map_irq(ctx->afu->adapter, hwirq, handler, ctx,
+ irq_name->name);
irq_name = list_next_entry(irq_name, list);
}
}
@@ -504,7 +353,7 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
afu_register_hwirqs(ctx);
return 0;
- }
+}
void afu_release_irqs(struct cxl_context *ctx, void *cookie)
{
@@ -512,7 +361,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
unsigned int virq;
int r, i;
- for (r = 1; r < CXL_IRQ_RANGES; r++) {
+ for (r = afu_irq_range_start(); r < CXL_IRQ_RANGES; r++) {
hwirq = ctx->irqs.offset[r];
for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
virq = irq_find_mapping(NULL, hwirq);
@@ -522,7 +371,7 @@ void afu_release_irqs(struct cxl_context *ctx, void *cookie)
}
afu_irq_name_free(ctx);
- cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+ cxl_ops->release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
ctx->irq_count = 0;
}
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 9fde75ed4fac..ae68c3201156 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -32,6 +32,29 @@ uint cxl_verbose;
module_param_named(verbose, cxl_verbose, uint, 0600);
MODULE_PARM_DESC(verbose, "Enable verbose dmesg output");
+const struct cxl_backend_ops *cxl_ops;
+
+int cxl_afu_slbia(struct cxl_afu *afu)
+{
+ unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+
+ pr_devel("cxl_afu_slbia issuing SLBIA command\n");
+ cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
+ while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
+ if (time_after_eq(jiffies, timeout)) {
+ dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
+ return -EBUSY;
+ }
+ /* If the adapter has gone down, we can assume that we
+ * will PERST it and that will invalidate everything.
+ */
+ if (!cxl_ops->link_ok(afu->adapter, afu))
+ return -EIO;
+ cpu_relax();
+ }
+ return 0;
+}
+
static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm)
{
struct task_struct *task;
@@ -139,6 +162,32 @@ int cxl_alloc_sst(struct cxl_context *ctx)
return 0;
}
+/* print buffer content as integers when debugging */
+void cxl_dump_debug_buffer(void *buf, size_t buf_len)
+{
+#ifdef DEBUG
+ int i, *ptr;
+
+ /*
+ * We want to regroup up to 4 integers per line, which means they
+ * need to be in the same pr_devel() statement
+ */
+ ptr = (int *) buf;
+ for (i = 0; i * 4 < buf_len; i += 4) {
+ if ((i + 3) * 4 < buf_len)
+ pr_devel("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
+ ptr[i + 2], ptr[i + 3]);
+ else if ((i + 2) * 4 < buf_len)
+ pr_devel("%.8x %.8x %.8x\n", ptr[i], ptr[i + 1],
+ ptr[i + 2]);
+ else if ((i + 1) * 4 < buf_len)
+ pr_devel("%.8x %.8x\n", ptr[i], ptr[i + 1]);
+ else
+ pr_devel("%.8x\n", ptr[i]);
+ }
+#endif /* DEBUG */
+}
+
/* Find a CXL adapter by it's number and increase it's refcount */
struct cxl *get_cxl_adapter(int num)
{
@@ -152,7 +201,7 @@ struct cxl *get_cxl_adapter(int num)
return adapter;
}
-int cxl_alloc_adapter_nr(struct cxl *adapter)
+static int cxl_alloc_adapter_nr(struct cxl *adapter)
{
int i;
@@ -174,13 +223,58 @@ void cxl_remove_adapter_nr(struct cxl *adapter)
idr_remove(&cxl_adapter_idr, adapter->adapter_num);
}
+struct cxl *cxl_alloc_adapter(void)
+{
+ struct cxl *adapter;
+
+ if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
+ return NULL;
+
+ spin_lock_init(&adapter->afu_list_lock);
+
+ if (cxl_alloc_adapter_nr(adapter))
+ goto err1;
+
+ if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+ goto err2;
+
+ return adapter;
+
+err2:
+ cxl_remove_adapter_nr(adapter);
+err1:
+ kfree(adapter);
+ return NULL;
+}
+
+struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
+{
+ struct cxl_afu *afu;
+
+ if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
+ return NULL;
+
+ afu->adapter = adapter;
+ afu->dev.parent = &adapter->dev;
+ afu->dev.release = cxl_ops->release_afu;
+ afu->slice = slice;
+ idr_init(&afu->contexts_idr);
+ mutex_init(&afu->contexts_lock);
+ spin_lock_init(&afu->afu_cntl_lock);
+
+ afu->prefault_mode = CXL_PREFAULT_NONE;
+ afu->irqs_max = afu->adapter->user_irqs;
+
+ return afu;
+}
+
int cxl_afu_select_best_mode(struct cxl_afu *afu)
{
if (afu->modes_supported & CXL_MODE_DIRECTED)
- return cxl_afu_activate_mode(afu, CXL_MODE_DIRECTED);
+ return cxl_ops->afu_activate_mode(afu, CXL_MODE_DIRECTED);
if (afu->modes_supported & CXL_MODE_DEDICATED)
- return cxl_afu_activate_mode(afu, CXL_MODE_DEDICATED);
+ return cxl_ops->afu_activate_mode(afu, CXL_MODE_DEDICATED);
dev_warn(&afu->dev, "No supported programming modes available\n");
/* We don't fail this so the user can inspect sysfs */
@@ -191,9 +285,6 @@ static int __init init_cxl(void)
{
int rc = 0;
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EPERM;
-
if ((rc = cxl_file_init()))
return rc;
@@ -202,7 +293,17 @@ static int __init init_cxl(void)
if ((rc = register_cxl_calls(&cxl_calls)))
goto err;
- if ((rc = pci_register_driver(&cxl_pci_driver)))
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ cxl_ops = &cxl_native_ops;
+ rc = pci_register_driver(&cxl_pci_driver);
+ }
+#ifdef CONFIG_PPC_PSERIES
+ else {
+ cxl_ops = &cxl_guest_ops;
+ rc = platform_driver_register(&cxl_of_driver);
+ }
+#endif
+ if (rc)
goto err1;
return 0;
@@ -217,7 +318,12 @@ err:
static void exit_cxl(void)
{
- pci_unregister_driver(&cxl_pci_driver);
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ pci_unregister_driver(&cxl_pci_driver);
+#ifdef CONFIG_PPC_PSERIES
+ else
+ platform_driver_unregister(&cxl_of_driver);
+#endif
cxl_debugfs_exit();
cxl_file_exit();
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index f40909793490..387fcbdf9793 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -42,7 +42,7 @@ static int afu_control(struct cxl_afu *afu, u64 command,
goto out;
}
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
afu->enabled = enabled;
rc = -EIO;
goto out;
@@ -80,7 +80,7 @@ int cxl_afu_disable(struct cxl_afu *afu)
}
/* This will disable as well as reset */
-int __cxl_afu_reset(struct cxl_afu *afu)
+static int native_afu_reset(struct cxl_afu *afu)
{
pr_devel("AFU reset request\n");
@@ -90,9 +90,9 @@ int __cxl_afu_reset(struct cxl_afu *afu)
false);
}
-int cxl_afu_check_and_enable(struct cxl_afu *afu)
+static int native_afu_check_and_enable(struct cxl_afu *afu)
{
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Refusing to enable afu while link down!\n");
return -EIO;
}
@@ -114,7 +114,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
pr_devel("PSL purge request\n");
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
rc = -EIO;
goto out;
@@ -136,7 +136,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
rc = -EBUSY;
goto out;
}
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
rc = -EIO;
goto out;
}
@@ -186,22 +186,22 @@ static int spa_max_procs(int spa_size)
int cxl_alloc_spa(struct cxl_afu *afu)
{
/* Work out how many pages to allocate */
- afu->spa_order = 0;
+ afu->native->spa_order = 0;
do {
- afu->spa_order++;
- afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
- afu->spa_max_procs = spa_max_procs(afu->spa_size);
- } while (afu->spa_max_procs < afu->num_procs);
+ afu->native->spa_order++;
+ afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
+ afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
+ } while (afu->native->spa_max_procs < afu->num_procs);
- WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
+ WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */
- if (!(afu->spa = (struct cxl_process_element *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
+ if (!(afu->native->spa = (struct cxl_process_element *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
return -ENOMEM;
}
pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
- 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
+ 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
return 0;
}
@@ -210,13 +210,15 @@ static void attach_spa(struct cxl_afu *afu)
{
u64 spap;
- afu->sw_command_status = (__be64 *)((char *)afu->spa +
- ((afu->spa_max_procs + 3) * 128));
+ afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
+ ((afu->native->spa_max_procs + 3) * 128));
- spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
- spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
+ spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
+ spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
spap |= CXL_PSL_SPAP_V;
- pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
+ pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
+ afu->native->spa, afu->native->spa_max_procs,
+ afu->native->sw_command_status, spap);
cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
}
@@ -227,9 +229,10 @@ static inline void detach_spa(struct cxl_afu *afu)
void cxl_release_spa(struct cxl_afu *afu)
{
- if (afu->spa) {
- free_pages((unsigned long) afu->spa, afu->spa_order);
- afu->spa = NULL;
+ if (afu->native->spa) {
+ free_pages((unsigned long) afu->native->spa,
+ afu->native->spa_order);
+ afu->native->spa = NULL;
}
}
@@ -247,7 +250,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
return -EBUSY;
}
- if (!cxl_adapter_link_ok(adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
@@ -258,28 +261,7 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter)
dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
return -EBUSY;
}
- if (!cxl_adapter_link_ok(adapter))
- return -EIO;
- cpu_relax();
- }
- return 0;
-}
-
-int cxl_afu_slbia(struct cxl_afu *afu)
-{
- unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
-
- pr_devel("cxl_afu_slbia issuing SLBIA command\n");
- cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
- while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
- if (time_after_eq(jiffies, timeout)) {
- dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
- return -EBUSY;
- }
- /* If the adapter has gone down, we can assume that we
- * will PERST it and that will invalidate everything.
- */
- if (!cxl_adapter_link_ok(afu->adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
return -EIO;
cpu_relax();
}
@@ -312,7 +294,7 @@ static void slb_invalid(struct cxl_context *ctx)
struct cxl *adapter = ctx->afu->adapter;
u64 slbia;
- WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
+ WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
cxl_p1_write(adapter, CXL_PSL_LBISEL,
((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
@@ -320,7 +302,7 @@ static void slb_invalid(struct cxl_context *ctx)
cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
while (1) {
- if (!cxl_adapter_link_ok(adapter))
+ if (!cxl_ops->link_ok(adapter, NULL))
break;
slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
if (!(slbia & CXL_TLB_SLB_P))
@@ -342,7 +324,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
ctx->elem->software_state = cpu_to_be32(pe_state);
smp_wmb();
- *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
+ *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
smp_mb();
cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
while (1) {
@@ -351,12 +333,12 @@ static int do_process_element_cmd(struct cxl_context *ctx,
rc = -EBUSY;
goto out;
}
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
rc = -EIO;
goto out;
}
- state = be64_to_cpup(ctx->afu->sw_command_status);
+ state = be64_to_cpup(ctx->afu->native->sw_command_status);
if (state == ~0ULL) {
pr_err("cxl: Error adding process element to AFU\n");
rc = -1;
@@ -384,12 +366,12 @@ static int add_process_element(struct cxl_context *ctx)
{
int rc = 0;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
ctx->pe_inserted = true;
pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -401,18 +383,18 @@ static int terminate_process_element(struct cxl_context *ctx)
if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
return rc;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
/* We could be asked to terminate when the hw is down. That
* should always succeed: it's not running if the hw has gone
* away and is being reset.
*/
- if (cxl_adapter_link_ok(ctx->afu->adapter))
+ if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
ctx->elem->software_state = 0; /* Remove Valid bit */
pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -420,20 +402,20 @@ static int remove_process_element(struct cxl_context *ctx)
{
int rc = 0;
- mutex_lock(&ctx->afu->spa_mutex);
+ mutex_lock(&ctx->afu->native->spa_mutex);
pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
/* We could be asked to remove when the hw is down. Again, if
* the hw is down, the PE is gone, so we succeed.
*/
- if (cxl_adapter_link_ok(ctx->afu->adapter))
+ if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
if (!rc)
ctx->pe_inserted = false;
slb_invalid(ctx);
pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
- mutex_unlock(&ctx->afu->spa_mutex);
+ mutex_unlock(&ctx->afu->native->spa_mutex);
return rc;
}
@@ -446,7 +428,7 @@ void cxl_assign_psn_space(struct cxl_context *ctx)
ctx->psn_size = ctx->afu->adapter->ps_size;
} else {
ctx->psn_phys = ctx->afu->psn_phys +
- (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
+ (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
ctx->psn_size = ctx->afu->pp_size;
}
}
@@ -458,7 +440,7 @@ static int activate_afu_directed(struct cxl_afu *afu)
dev_info(&afu->dev, "Activating AFU directed mode\n");
afu->num_procs = afu->max_procs_virtualised;
- if (afu->spa == NULL) {
+ if (afu->native->spa == NULL) {
if (cxl_alloc_spa(afu))
return -ENOMEM;
}
@@ -552,7 +534,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->common.wed = cpu_to_be64(wed);
/* first guy needs to enable */
- if ((result = cxl_afu_check_and_enable(ctx->afu)))
+ if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
return result;
return add_process_element(ctx);
@@ -568,7 +550,7 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu);
- __cxl_afu_reset(afu);
+ cxl_ops->afu_reset(afu);
cxl_afu_disable(afu);
cxl_psl_purge(afu);
@@ -632,7 +614,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
/* master only context for dedicated */
cxl_assign_psn_space(ctx);
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
return rc;
cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
@@ -652,7 +634,7 @@ static int deactivate_dedicated_process(struct cxl_afu *afu)
return 0;
}
-int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
+static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
{
if (mode == CXL_MODE_DIRECTED)
return deactivate_afu_directed(afu);
@@ -661,19 +643,14 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
return 0;
}
-int cxl_afu_deactivate_mode(struct cxl_afu *afu)
-{
- return _cxl_afu_deactivate_mode(afu, afu->current_mode);
-}
-
-int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
+static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
{
if (!mode)
return 0;
if (!(mode & afu->modes_supported))
return -EINVAL;
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
WARN(1, "Device link is down, refusing to activate!\n");
return -EIO;
}
@@ -686,9 +663,10 @@ int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
return -EINVAL;
}
-int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
+static int native_attach_process(struct cxl_context *ctx, bool kernel,
+ u64 wed, u64 amr)
{
- if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+ if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
WARN(1, "Device link is down, refusing to attach process!\n");
return -EIO;
}
@@ -705,7 +683,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{
- __cxl_afu_reset(ctx->afu);
+ cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu);
return 0;
@@ -723,7 +701,7 @@ static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
return 0;
}
-int cxl_detach_process(struct cxl_context *ctx)
+static int native_detach_process(struct cxl_context *ctx)
{
trace_cxl_detach(ctx);
@@ -733,14 +711,14 @@ int cxl_detach_process(struct cxl_context *ctx)
return detach_process_native_afu_directed(ctx);
}
-int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
+static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
{
u64 pidtid;
/* If the adapter has gone away, we can't get any meaningful
* information.
*/
- if (!cxl_adapter_link_ok(afu->adapter))
+ if (!cxl_ops->link_ok(afu->adapter, afu))
return -EIO;
info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
@@ -751,10 +729,214 @@ int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
info->tid = pidtid & 0xffffffff;
info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ info->proc_handle = 0;
+
+ return 0;
+}
+
+static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
+ u64 dsisr, u64 errstat)
+{
+ u64 fir1, fir2, fir_slice, serr, afu_debug;
+
+ fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
+ fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
+ serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
+ afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
+
+ dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+ dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+ dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+ dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+
+ dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(ctx->afu->adapter);
+
+ return cxl_ops->ack_irq(ctx, 0, errstat);
+}
+
+static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
+{
+ if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
+ else
+ cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t native_irq_multiplexed(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ struct cxl_context *ctx;
+ struct cxl_irq_info irq_info;
+ int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
+ int ret;
+
+ if ((ret = native_get_irq_info(afu, &irq_info))) {
+ WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
+ return fail_psl_irq(afu, &irq_info);
+ }
+
+ rcu_read_lock();
+ ctx = idr_find(&afu->contexts_idr, ph);
+ if (ctx) {
+ ret = cxl_irq(irq, ctx, &irq_info);
+ rcu_read_unlock();
+ return ret;
+ }
+ rcu_read_unlock();
+
+ WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+ " %016llx\n(Possible AFU HW issue - was a term/remove acked"
+ " with outstanding transactions?)\n", ph, irq_info.dsisr,
+ irq_info.dar);
+ return fail_psl_irq(afu, &irq_info);
+}
+
+static irqreturn_t native_slice_irq_err(int irq, void *data)
+{
+ struct cxl_afu *afu = data;
+ u64 fir_slice, errstat, serr, afu_debug;
+
+ WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
+ errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
+ afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
+ dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+ dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+ dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+ dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t native_irq_err(int irq, void *data)
+{
+ struct cxl *adapter = data;
+ u64 fir1, fir2, err_ivte;
+
+ WARN(1, "CXL ERROR interrupt %i\n", irq);
+
+ err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
+ dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
+
+ dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
+ cxl_stop_trace(adapter);
+
+ fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
+ fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
+
+ dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
+
+ return IRQ_HANDLED;
+}
+
+int cxl_native_register_psl_err_irq(struct cxl *adapter)
+{
+ int rc;
+
+ adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&adapter->dev));
+ if (!adapter->irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
+ &adapter->native->err_hwirq,
+ &adapter->native->err_virq,
+ adapter->irq_name))) {
+ kfree(adapter->irq_name);
+ adapter->irq_name = NULL;
+ return rc;
+ }
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
+
+ return 0;
+}
+
+void cxl_native_release_psl_err_irq(struct cxl *adapter)
+{
+ if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+ return;
+
+ cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+ cxl_unmap_irq(adapter->native->err_virq, adapter);
+ cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
+ kfree(adapter->irq_name);
+}
+
+int cxl_native_register_serr_irq(struct cxl_afu *afu)
+{
+ u64 serr;
+ int rc;
+
+ afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
+ dev_name(&afu->dev));
+ if (!afu->err_irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
+ &afu->serr_hwirq,
+ &afu->serr_virq, afu->err_irq_name))) {
+ kfree(afu->err_irq_name);
+ afu->err_irq_name = NULL;
+ return rc;
+ }
+
+ serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
+ serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
return 0;
}
+void cxl_native_release_serr_irq(struct cxl_afu *afu)
+{
+ if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+ return;
+
+ cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
+ cxl_unmap_irq(afu->serr_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
+ kfree(afu->err_irq_name);
+}
+
+int cxl_native_register_psl_irq(struct cxl_afu *afu)
+{
+ int rc;
+
+ afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
+ dev_name(&afu->dev));
+ if (!afu->psl_irq_name)
+ return -ENOMEM;
+
+ if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
+ afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
+ afu->psl_irq_name))) {
+ kfree(afu->psl_irq_name);
+ afu->psl_irq_name = NULL;
+ }
+ return rc;
+}
+
+void cxl_native_release_psl_irq(struct cxl_afu *afu)
+{
+ if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+ return;
+
+ cxl_unmap_irq(afu->native->psl_virq, afu);
+ cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
+ kfree(afu->psl_irq_name);
+}
+
static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
{
u64 dsisr;
@@ -769,7 +951,7 @@ static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
}
-int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
+static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
{
trace_cxl_psl_irq_ack(ctx, tfc);
if (tfc)
@@ -784,3 +966,132 @@ int cxl_check_error(struct cxl_afu *afu)
{
return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
}
+
+static bool native_support_attributes(const char *attr_name,
+ enum cxl_attrs type)
+{
+ return true;
+}
+
+static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off);
+ return 0;
+}
+
+static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off);
+ return 0;
+}
+
+static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
+ if (!rc)
+ *out = (val >> ((off & 0x3) * 8)) & 0xffff;
+ return rc;
+}
+
+static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
+ if (!rc)
+ *out = (val >> ((off & 0x3) * 8)) & 0xff;
+ return rc;
+}
+
+static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
+{
+ if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
+ return -EIO;
+ if (unlikely(off >= afu->crs_len))
+ return -ERANGE;
+ out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
+ (cr * afu->crs_len) + off, in);
+ return 0;
+}
+
+static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val32, mask, shift;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
+ if (rc)
+ return rc;
+ shift = (off & 0x3) * 8;
+ WARN_ON(shift == 24);
+ mask = 0xffff << shift;
+ val32 = (val32 & ~mask) | (in << shift);
+
+ rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
+ return rc;
+}
+
+static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
+{
+ u64 aligned_off = off & ~0x3L;
+ u32 val32, mask, shift;
+ int rc;
+
+ rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
+ if (rc)
+ return rc;
+ shift = (off & 0x3) * 8;
+ mask = 0xff << shift;
+ val32 = (val32 & ~mask) | (in << shift);
+
+ rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
+ return rc;
+}
+
+const struct cxl_backend_ops cxl_native_ops = {
+ .module = THIS_MODULE,
+ .adapter_reset = cxl_pci_reset,
+ .alloc_one_irq = cxl_pci_alloc_one_irq,
+ .release_one_irq = cxl_pci_release_one_irq,
+ .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
+ .release_irq_ranges = cxl_pci_release_irq_ranges,
+ .setup_irq = cxl_pci_setup_irq,
+ .handle_psl_slice_error = native_handle_psl_slice_error,
+ .psl_interrupt = NULL,
+ .ack_irq = native_ack_irq,
+ .attach_process = native_attach_process,
+ .detach_process = native_detach_process,
+ .support_attributes = native_support_attributes,
+ .link_ok = cxl_adapter_link_ok,
+ .release_afu = cxl_pci_release_afu,
+ .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
+ .afu_check_and_enable = native_afu_check_and_enable,
+ .afu_activate_mode = native_afu_activate_mode,
+ .afu_deactivate_mode = native_afu_deactivate_mode,
+ .afu_reset = native_afu_reset,
+ .afu_cr_read8 = native_afu_cr_read8,
+ .afu_cr_read16 = native_afu_cr_read16,
+ .afu_cr_read32 = native_afu_cr_read32,
+ .afu_cr_read64 = native_afu_cr_read64,
+ .afu_cr_write8 = native_afu_cr_write8,
+ .afu_cr_write16 = native_afu_cr_write16,
+ .afu_cr_write32 = native_afu_cr_write32,
+ .read_adapter_vpd = cxl_pci_read_adapter_vpd,
+};
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
new file mode 100644
index 000000000000..edc458395f68
--- /dev/null
+++ b/drivers/misc/cxl/of.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "cxl.h"
+
+
+static const __be32 *read_prop_string(const struct device_node *np,
+ const char *prop_name)
+{
+ const __be32 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (cxl_verbose && prop)
+ pr_info("%s: %s\n", prop_name, (char *) prop);
+ return prop;
+}
+
+static const __be32 *read_prop_dword(const struct device_node *np,
+ const char *prop_name, u32 *val)
+{
+ const __be32 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (prop)
+ *val = be32_to_cpu(prop[0]);
+ if (cxl_verbose && prop)
+ pr_info("%s: %#x (%u)\n", prop_name, *val, *val);
+ return prop;
+}
+
+static const __be64 *read_prop64_dword(const struct device_node *np,
+ const char *prop_name, u64 *val)
+{
+ const __be64 *prop;
+
+ prop = of_get_property(np, prop_name, NULL);
+ if (prop)
+ *val = be64_to_cpu(prop[0]);
+ if (cxl_verbose && prop)
+ pr_info("%s: %#llx (%llu)\n", prop_name, *val, *val);
+ return prop;
+}
+
+
+static int read_handle(struct device_node *np, u64 *handle)
+{
+ const __be32 *prop;
+ u64 size;
+
+ /* Get address and size of the node */
+ prop = of_get_address(np, 0, &size, NULL);
+ if (size)
+ return -EINVAL;
+
+ /* Helper to read a big number; size is in cells (not bytes) */
+ *handle = of_read_number(prop, of_n_addr_cells(np));
+ return 0;
+}
+
+static int read_phys_addr(struct device_node *np, char *prop_name,
+ struct cxl_afu *afu)
+{
+ int i, len, entry_size, naddr, nsize, type;
+ u64 addr, size;
+ const __be32 *prop;
+
+ naddr = of_n_addr_cells(np);
+ nsize = of_n_size_cells(np);
+
+ prop = of_get_property(np, prop_name, &len);
+ if (prop) {
+ entry_size = naddr + nsize;
+ for (i = 0; i < (len / 4); i += entry_size, prop += entry_size) {
+ type = be32_to_cpu(prop[0]);
+ addr = of_read_number(prop, naddr);
+ size = of_read_number(&prop[naddr], nsize);
+ switch (type) {
+ case 0: /* unit address */
+ afu->guest->handle = addr;
+ break;
+ case 1: /* p2 area */
+ afu->guest->p2n_phys += addr;
+ afu->guest->p2n_size = size;
+ break;
+ case 2: /* problem state area */
+ afu->psn_phys += addr;
+ afu->adapter->ps_size = size;
+ break;
+ default:
+ pr_err("Invalid address type %d found in %s property of AFU\n",
+ type, prop_name);
+ return -EINVAL;
+ }
+ if (cxl_verbose)
+ pr_info("%s: %#x %#llx (size %#llx)\n",
+ prop_name, type, addr, size);
+ }
+ }
+ return 0;
+}
+
+static int read_vpd(struct cxl *adapter, struct cxl_afu *afu)
+{
+ char vpd[256];
+ int rc;
+ size_t len = sizeof(vpd);
+
+ memset(vpd, 0, len);
+
+ if (adapter)
+ rc = cxl_guest_read_adapter_vpd(adapter, vpd, len);
+ else
+ rc = cxl_guest_read_afu_vpd(afu, vpd, len);
+
+ if (rc > 0) {
+ cxl_dump_debug_buffer(vpd, rc);
+ rc = 0;
+ }
+ return rc;
+}
+
+int cxl_of_read_afu_handle(struct cxl_afu *afu, struct device_node *afu_np)
+{
+ if (read_handle(afu_np, &afu->guest->handle))
+ return -EINVAL;
+ pr_devel("AFU handle: 0x%.16llx\n", afu->guest->handle);
+
+ return 0;
+}
+
+int cxl_of_read_afu_properties(struct cxl_afu *afu, struct device_node *np)
+{
+ int i, len, rc;
+ char *p;
+ const __be32 *prop;
+ u16 device_id, vendor_id;
+ u32 val = 0, class_code;
+
+ /* Properties are read in the same order as listed in PAPR */
+
+ if (cxl_verbose) {
+ pr_info("Dump of the 'ibm,coherent-platform-function' node properties:\n");
+
+ prop = of_get_property(np, "compatible", &len);
+ i = 0;
+ while (i < len) {
+ p = (char *) prop + i;
+ pr_info("compatible: %s\n", p);
+ i += strlen(p) + 1;
+ }
+ read_prop_string(np, "name");
+ }
+
+ rc = read_phys_addr(np, "reg", afu);
+ if (rc)
+ return rc;
+
+ rc = read_phys_addr(np, "assigned-addresses", afu);
+ if (rc)
+ return rc;
+
+ if (afu->psn_phys == 0)
+ afu->psa = false;
+ else
+ afu->psa = true;
+
+ if (cxl_verbose) {
+ read_prop_string(np, "ibm,loc-code");
+ read_prop_string(np, "device_type");
+ }
+
+ read_prop_dword(np, "ibm,#processes", &afu->max_procs_virtualised);
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,scratchpad-size", &val);
+ read_prop_dword(np, "ibm,programmable", &val);
+ read_prop_string(np, "ibm,phandle");
+ read_vpd(NULL, afu);
+ }
+
+ read_prop_dword(np, "ibm,max-ints-per-process", &afu->guest->max_ints);
+ afu->irqs_max = afu->guest->max_ints;
+
+ prop = read_prop_dword(np, "ibm,min-ints-per-process", &afu->pp_irqs);
+ if (prop) {
+ /* One extra interrupt for the PSL interrupt is already
+ * included. Remove it now to keep only AFU interrupts and
+ * match the native case.
+ */
+ afu->pp_irqs--;
+ }
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,max-ints", &val);
+ read_prop_dword(np, "ibm,vpd-size", &val);
+ }
+
+ read_prop64_dword(np, "ibm,error-buffer-size", &afu->eb_len);
+ afu->eb_offset = 0;
+
+ if (cxl_verbose)
+ read_prop_dword(np, "ibm,config-record-type", &val);
+
+ read_prop64_dword(np, "ibm,config-record-size", &afu->crs_len);
+ afu->crs_offset = 0;
+
+ read_prop_dword(np, "ibm,#config-records", &afu->crs_num);
+
+ if (cxl_verbose) {
+ for (i = 0; i < afu->crs_num; i++) {
+ rc = cxl_ops->afu_cr_read16(afu, i, PCI_DEVICE_ID,
+ &device_id);
+ if (!rc)
+ pr_info("record %d - device-id: %#x\n",
+ i, device_id);
+ rc = cxl_ops->afu_cr_read16(afu, i, PCI_VENDOR_ID,
+ &vendor_id);
+ if (!rc)
+ pr_info("record %d - vendor-id: %#x\n",
+ i, vendor_id);
+ rc = cxl_ops->afu_cr_read32(afu, i, PCI_CLASS_REVISION,
+ &class_code);
+ if (!rc) {
+ class_code >>= 8;
+ pr_info("record %d - class-code: %#x\n",
+ i, class_code);
+ }
+ }
+
+ read_prop_dword(np, "ibm,function-number", &val);
+ read_prop_dword(np, "ibm,privileged-function", &val);
+ read_prop_dword(np, "vendor-id", &val);
+ read_prop_dword(np, "device-id", &val);
+ read_prop_dword(np, "revision-id", &val);
+ read_prop_dword(np, "class-code", &val);
+ read_prop_dword(np, "subsystem-vendor-id", &val);
+ read_prop_dword(np, "subsystem-id", &val);
+ }
+ /*
+ * if "ibm,process-mmio" doesn't exist then per-process mmio is
+ * not supported
+ */
+ val = 0;
+ prop = read_prop_dword(np, "ibm,process-mmio", &val);
+ if (prop && val == 1)
+ afu->pp_psa = true;
+ else
+ afu->pp_psa = false;
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,supports-aur", &val);
+ read_prop_dword(np, "ibm,supports-csrp", &val);
+ read_prop_dword(np, "ibm,supports-prr", &val);
+ }
+
+ prop = read_prop_dword(np, "ibm,function-error-interrupt", &val);
+ if (prop)
+ afu->serr_hwirq = val;
+
+ pr_devel("AFU handle: %#llx\n", afu->guest->handle);
+ pr_devel("p2n_phys: %#llx (size %#llx)\n",
+ afu->guest->p2n_phys, afu->guest->p2n_size);
+ pr_devel("psn_phys: %#llx (size %#llx)\n",
+ afu->psn_phys, afu->adapter->ps_size);
+ pr_devel("Max number of processes virtualised=%i\n",
+ afu->max_procs_virtualised);
+ pr_devel("Per-process irqs min=%i, max=%i\n", afu->pp_irqs,
+ afu->irqs_max);
+ pr_devel("Slice error interrupt=%#lx\n", afu->serr_hwirq);
+
+ return 0;
+}
+
+static int read_adapter_irq_config(struct cxl *adapter, struct device_node *np)
+{
+ const __be32 *ranges;
+ int len, nranges, i;
+ struct irq_avail *cur;
+
+ ranges = of_get_property(np, "interrupt-ranges", &len);
+ if (ranges == NULL || len < (2 * sizeof(int)))
+ return -EINVAL;
+
+ /*
+ * encoded array of two cells per entry, each cell encoded as
+ * with encode-int
+ */
+ nranges = len / (2 * sizeof(int));
+ if (nranges == 0 || (nranges * 2 * sizeof(int)) != len)
+ return -EINVAL;
+
+ adapter->guest->irq_avail = kzalloc(nranges * sizeof(struct irq_avail),
+ GFP_KERNEL);
+ if (adapter->guest->irq_avail == NULL)
+ return -ENOMEM;
+
+ adapter->guest->irq_base_offset = be32_to_cpu(ranges[0]);
+ for (i = 0; i < nranges; i++) {
+ cur = &adapter->guest->irq_avail[i];
+ cur->offset = be32_to_cpu(ranges[i * 2]);
+ cur->range = be32_to_cpu(ranges[i * 2 + 1]);
+ cur->bitmap = kcalloc(BITS_TO_LONGS(cur->range),
+ sizeof(*cur->bitmap), GFP_KERNEL);
+ if (cur->bitmap == NULL)
+ goto err;
+ if (cur->offset < adapter->guest->irq_base_offset)
+ adapter->guest->irq_base_offset = cur->offset;
+ if (cxl_verbose)
+ pr_info("available IRQ range: %#lx-%#lx (%lu)\n",
+ cur->offset, cur->offset + cur->range - 1,
+ cur->range);
+ }
+ adapter->guest->irq_nranges = nranges;
+ spin_lock_init(&adapter->guest->irq_alloc_lock);
+
+ return 0;
+err:
+ for (i--; i >= 0; i--) {
+ cur = &adapter->guest->irq_avail[i];
+ kfree(cur->bitmap);
+ }
+ kfree(adapter->guest->irq_avail);
+ adapter->guest->irq_avail = NULL;
+ return -ENOMEM;
+}
+
+int cxl_of_read_adapter_handle(struct cxl *adapter, struct device_node *np)
+{
+ if (read_handle(np, &adapter->guest->handle))
+ return -EINVAL;
+ pr_devel("Adapter handle: 0x%.16llx\n", adapter->guest->handle);
+
+ return 0;
+}
+
+int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
+{
+ int rc, len, naddr, i;
+ char *p;
+ const __be32 *prop;
+ u32 val = 0;
+
+ /* Properties are read in the same order as listed in PAPR */
+
+ naddr = of_n_addr_cells(np);
+
+ if (cxl_verbose) {
+ pr_info("Dump of the 'ibm,coherent-platform-facility' node properties:\n");
+
+ read_prop_dword(np, "#address-cells", &val);
+ read_prop_dword(np, "#size-cells", &val);
+
+ prop = of_get_property(np, "compatible", &len);
+ i = 0;
+ while (i < len) {
+ p = (char *) prop + i;
+ pr_info("compatible: %s\n", p);
+ i += strlen(p) + 1;
+ }
+ read_prop_string(np, "name");
+ read_prop_string(np, "model");
+
+ prop = of_get_property(np, "reg", NULL);
+ if (prop) {
+ pr_info("reg: addr:%#llx size:%#x\n",
+ of_read_number(prop, naddr),
+ be32_to_cpu(prop[naddr]));
+ }
+
+ read_prop_string(np, "ibm,loc-code");
+ }
+
+ if ((rc = read_adapter_irq_config(adapter, np)))
+ return rc;
+
+ if (cxl_verbose) {
+ read_prop_string(np, "device_type");
+ read_prop_string(np, "ibm,phandle");
+ }
+
+ prop = read_prop_dword(np, "ibm,caia-version", &val);
+ if (prop) {
+ adapter->caia_major = (val & 0xFF00) >> 8;
+ adapter->caia_minor = val & 0xFF;
+ }
+
+ prop = read_prop_dword(np, "ibm,psl-revision", &val);
+ if (prop)
+ adapter->psl_rev = val;
+
+ prop = read_prop_string(np, "status");
+ if (prop) {
+ adapter->guest->status = kasprintf(GFP_KERNEL, "%s", (char *) prop);
+ if (adapter->guest->status == NULL)
+ return -ENOMEM;
+ }
+
+ prop = read_prop_dword(np, "vendor-id", &val);
+ if (prop)
+ adapter->guest->vendor = val;
+
+ prop = read_prop_dword(np, "device-id", &val);
+ if (prop)
+ adapter->guest->device = val;
+
+ if (cxl_verbose) {
+ read_prop_dword(np, "ibm,privileged-facility", &val);
+ read_prop_dword(np, "revision-id", &val);
+ read_prop_dword(np, "class-code", &val);
+ }
+
+ prop = read_prop_dword(np, "subsystem-vendor-id", &val);
+ if (prop)
+ adapter->guest->subsystem_vendor = val;
+
+ prop = read_prop_dword(np, "subsystem-id", &val);
+ if (prop)
+ adapter->guest->subsystem = val;
+
+ if (cxl_verbose)
+ read_vpd(adapter, NULL);
+
+ return 0;
+}
+
+static int cxl_of_remove(struct platform_device *pdev)
+{
+ struct cxl *adapter;
+ int afu;
+
+ adapter = dev_get_drvdata(&pdev->dev);
+ for (afu = 0; afu < adapter->slices; afu++)
+ cxl_guest_remove_afu(adapter->afu[afu]);
+
+ cxl_guest_remove_adapter(adapter);
+ return 0;
+}
+
+static void cxl_of_shutdown(struct platform_device *pdev)
+{
+ cxl_of_remove(pdev);
+}
+
+int cxl_of_probe(struct platform_device *pdev)
+{
+ struct device_node *np = NULL;
+ struct device_node *afu_np = NULL;
+ struct cxl *adapter = NULL;
+ int ret;
+ int slice, slice_ok;
+
+ pr_devel("in %s\n", __func__);
+
+ np = pdev->dev.of_node;
+ if (np == NULL)
+ return -ENODEV;
+
+ /* init adapter */
+ adapter = cxl_guest_init_adapter(np, pdev);
+ if (IS_ERR(adapter)) {
+ dev_err(&pdev->dev, "guest_init_adapter failed: %li\n", PTR_ERR(adapter));
+ return PTR_ERR(adapter);
+ }
+
+ /* init afu */
+ slice_ok = 0;
+ for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) {
+ if ((ret = cxl_guest_init_afu(adapter, slice, afu_np)))
+ dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n",
+ slice, ret);
+ else
+ slice_ok++;
+ }
+
+ if (slice_ok == 0) {
+ dev_info(&pdev->dev, "No active AFU");
+ adapter->slices = 0;
+ }
+
+ if (afu_np)
+ of_node_put(afu_np);
+ return 0;
+}
+
+static const struct of_device_id cxl_of_match[] = {
+ { .compatible = "ibm,coherent-platform-facility",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cxl_of_match);
+
+struct platform_driver cxl_of_driver = {
+ .driver = {
+ .name = "cxl_of",
+ .of_match_table = cxl_of_match,
+ .owner = THIS_MODULE
+ },
+ .probe = cxl_of_probe,
+ .remove = cxl_of_remove,
+ .shutdown = cxl_of_shutdown,
+};
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 0c6c17a1c59e..2844e975bf79 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <asm/opal.h>
#include <asm/msi_bitmap.h>
-#include <asm/pci-bridge.h> /* for struct pci_controller */
#include <asm/pnv-pci.h>
#include <asm/io.h>
@@ -90,8 +89,8 @@
/* This works a little different than the p1/p2 register accesses to make it
* easier to pull out individual fields */
-#define AFUD_READ(afu, off) in_be64(afu->afu_desc_mmio + off)
-#define AFUD_READ_LE(afu, off) in_le64(afu->afu_desc_mmio + off)
+#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
+#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
@@ -116,24 +115,6 @@
#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
-u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
-
- val = cxl_afu_cr_read32(afu, cr, aligned_off);
- return (val >> ((off & 0x2) * 8)) & 0xffff;
-}
-
-u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
-{
- u64 aligned_off = off & ~0x3L;
- u32 val;
-
- val = cxl_afu_cr_read32(afu, cr, aligned_off);
- return (val >> ((off & 0x3) * 8)) & 0xff;
-}
-
static const struct pci_device_id cxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
@@ -433,8 +414,8 @@ static int init_implementation_afu_regs(struct cxl_afu *afu)
return 0;
}
-int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
- unsigned int virq)
+int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
+ unsigned int virq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -476,28 +457,30 @@ int cxl_update_image_control(struct cxl *adapter)
return 0;
}
-int cxl_alloc_one_irq(struct cxl *adapter)
+int cxl_pci_alloc_one_irq(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirqs(dev, 1);
}
-void cxl_release_one_irq(struct cxl *adapter, int hwirq)
+void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_release_hwirqs(dev, hwirq, 1);
}
-int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
+int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter, unsigned int num)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
}
-void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
+void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
+ struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
@@ -558,7 +541,7 @@ static int switch_card_to_cxl(struct pci_dev *dev)
return 0;
}
-static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
+static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
u64 p1n_base, p2n_base, afu_desc;
const u64 p1n_size = 0x100;
@@ -566,15 +549,15 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
p2n_base = p2_base(dev) + (afu->slice * p2n_size);
- afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
- afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
+ afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
+ afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
- if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
+ if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
goto err;
if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
goto err1;
if (afu_desc) {
- if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
+ if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
goto err2;
}
@@ -582,62 +565,41 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
err2:
iounmap(afu->p2n_mmio);
err1:
- iounmap(afu->p1n_mmio);
+ iounmap(afu->native->p1n_mmio);
err:
dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
return -ENOMEM;
}
-static void cxl_unmap_slice_regs(struct cxl_afu *afu)
+static void pci_unmap_slice_regs(struct cxl_afu *afu)
{
if (afu->p2n_mmio) {
iounmap(afu->p2n_mmio);
afu->p2n_mmio = NULL;
}
- if (afu->p1n_mmio) {
- iounmap(afu->p1n_mmio);
- afu->p1n_mmio = NULL;
+ if (afu->native->p1n_mmio) {
+ iounmap(afu->native->p1n_mmio);
+ afu->native->p1n_mmio = NULL;
}
- if (afu->afu_desc_mmio) {
- iounmap(afu->afu_desc_mmio);
- afu->afu_desc_mmio = NULL;
+ if (afu->native->afu_desc_mmio) {
+ iounmap(afu->native->afu_desc_mmio);
+ afu->native->afu_desc_mmio = NULL;
}
}
-static void cxl_release_afu(struct device *dev)
+void cxl_pci_release_afu(struct device *dev)
{
struct cxl_afu *afu = to_cxl_afu(dev);
- pr_devel("cxl_release_afu\n");
+ pr_devel("%s\n", __func__);
idr_destroy(&afu->contexts_idr);
cxl_release_spa(afu);
+ kfree(afu->native);
kfree(afu);
}
-static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
-{
- struct cxl_afu *afu;
-
- if (!(afu = kzalloc(sizeof(struct cxl_afu), GFP_KERNEL)))
- return NULL;
-
- afu->adapter = adapter;
- afu->dev.parent = &adapter->dev;
- afu->dev.release = cxl_release_afu;
- afu->slice = slice;
- idr_init(&afu->contexts_idr);
- mutex_init(&afu->contexts_lock);
- spin_lock_init(&afu->afu_cntl_lock);
- mutex_init(&afu->spa_mutex);
-
- afu->prefault_mode = CXL_PREFAULT_NONE;
- afu->irqs_max = afu->adapter->user_irqs;
-
- return afu;
-}
-
/* Expects AFU struct to have recently been zeroed out */
static int cxl_read_afu_descriptor(struct cxl_afu *afu)
{
@@ -659,7 +621,7 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
afu->psa = AFUD_PPPSA_PSA(val);
if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
- afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
+ afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
val = AFUD_READ_CR(afu);
afu->crs_len = AFUD_CR_LEN(val) * 256;
@@ -686,10 +648,11 @@ static int cxl_read_afu_descriptor(struct cxl_afu *afu)
static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
{
- int i;
+ int i, rc;
+ u32 val;
if (afu->psa && afu->adapter->ps_size <
- (afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
+ (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
return -ENODEV;
}
@@ -698,7 +661,8 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
for (i = 0; i < afu->crs_num; i++) {
- if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
+ rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
+ if (rc || val == 0) {
dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
return -EINVAL;
}
@@ -719,7 +683,7 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
- if (__cxl_afu_reset(afu))
+ if (cxl_ops->afu_reset(afu))
return -EIO;
if (cxl_afu_disable(afu))
return -EIO;
@@ -767,13 +731,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu)
* 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
* aligned the function uses a bounce buffer which can be max PAGE_SIZE.
*/
-ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
+ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
loff_t off, size_t count)
{
loff_t aligned_start, aligned_end;
size_t aligned_length;
void *tbuf;
- const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
+ const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
return 0;
@@ -804,18 +768,18 @@ ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
return count;
}
-static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
+static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
{
int rc;
- if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
+ if ((rc = pci_map_slice_regs(afu, adapter, dev)))
return rc;
if ((rc = sanitise_afu_regs(afu)))
goto err1;
/* We need to reset the AFU before we can read the AFU descriptor */
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
goto err1;
if (cxl_verbose)
@@ -830,44 +794,50 @@ static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = init_implementation_afu_regs(afu)))
goto err1;
- if ((rc = cxl_register_serr_irq(afu)))
+ if ((rc = cxl_native_register_serr_irq(afu)))
goto err1;
- if ((rc = cxl_register_psl_irq(afu)))
+ if ((rc = cxl_native_register_psl_irq(afu)))
goto err2;
return 0;
err2:
- cxl_release_serr_irq(afu);
+ cxl_native_release_serr_irq(afu);
err1:
- cxl_unmap_slice_regs(afu);
+ pci_unmap_slice_regs(afu);
return rc;
}
-static void cxl_deconfigure_afu(struct cxl_afu *afu)
+static void pci_deconfigure_afu(struct cxl_afu *afu)
{
- cxl_release_psl_irq(afu);
- cxl_release_serr_irq(afu);
- cxl_unmap_slice_regs(afu);
+ cxl_native_release_psl_irq(afu);
+ cxl_native_release_serr_irq(afu);
+ pci_unmap_slice_regs(afu);
}
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
{
struct cxl_afu *afu;
- int rc;
+ int rc = -ENOMEM;
afu = cxl_alloc_afu(adapter, slice);
if (!afu)
return -ENOMEM;
+ afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
+ if (!afu->native)
+ goto err_free_afu;
+
+ mutex_init(&afu->native->spa_mutex);
+
rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
if (rc)
- goto err_free;
+ goto err_free_native;
- rc = cxl_configure_afu(afu, adapter, dev);
+ rc = pci_configure_afu(afu, adapter, dev);
if (rc)
- goto err_free;
+ goto err_free_native;
/* Don't care if this fails */
cxl_debugfs_afu_add(afu);
@@ -890,24 +860,27 @@ static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
return 0;
err_put1:
- cxl_deconfigure_afu(afu);
+ pci_deconfigure_afu(afu);
cxl_debugfs_afu_remove(afu);
device_unregister(&afu->dev);
return rc;
-err_free:
+err_free_native:
+ kfree(afu->native);
+err_free_afu:
kfree(afu);
return rc;
}
-static void cxl_remove_afu(struct cxl_afu *afu)
+static void cxl_pci_remove_afu(struct cxl_afu *afu)
{
- pr_devel("cxl_remove_afu\n");
+ pr_devel("%s\n", __func__);
if (!afu)
return;
+ cxl_pci_vphb_remove(afu);
cxl_sysfs_afu_remove(afu);
cxl_debugfs_afu_remove(afu);
@@ -916,13 +889,13 @@ static void cxl_remove_afu(struct cxl_afu *afu)
spin_unlock(&afu->adapter->afu_list_lock);
cxl_context_detach_all(afu);
- cxl_afu_deactivate_mode(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
- cxl_deconfigure_afu(afu);
+ pci_deconfigure_afu(afu);
device_unregister(&afu->dev);
}
-int cxl_reset(struct cxl *adapter)
+int cxl_pci_reset(struct cxl *adapter)
{
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
int rc;
@@ -956,17 +929,17 @@ static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
- if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
+ if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
goto err3;
- if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
+ if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
goto err4;
return 0;
err4:
- iounmap(adapter->p1_mmio);
- adapter->p1_mmio = NULL;
+ iounmap(adapter->native->p1_mmio);
+ adapter->native->p1_mmio = NULL;
err3:
pci_release_region(dev, 0);
err2:
@@ -977,14 +950,14 @@ err1:
static void cxl_unmap_adapter_regs(struct cxl *adapter)
{
- if (adapter->p1_mmio) {
- iounmap(adapter->p1_mmio);
- adapter->p1_mmio = NULL;
+ if (adapter->native->p1_mmio) {
+ iounmap(adapter->native->p1_mmio);
+ adapter->native->p1_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 2);
}
- if (adapter->p2_mmio) {
- iounmap(adapter->p2_mmio);
- adapter->p2_mmio = NULL;
+ if (adapter->native->p2_mmio) {
+ iounmap(adapter->native->p2_mmio);
+ adapter->native->p2_mmio = NULL;
pci_release_region(to_pci_dev(adapter->dev.parent), 0);
}
}
@@ -1025,10 +998,10 @@ static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
/* Convert everything to bytes, because there is NO WAY I'd look at the
* code a month later and forget what units these are in ;-) */
- adapter->ps_off = ps_off * 64 * 1024;
+ adapter->native->ps_off = ps_off * 64 * 1024;
adapter->ps_size = ps_size * 64 * 1024;
- adapter->afu_desc_off = afu_desc_off * 64 * 1024;
- adapter->afu_desc_size = afu_desc_size *64 * 1024;
+ adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
+ adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
/* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
@@ -1079,21 +1052,26 @@ static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
return -EINVAL;
}
- if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
+ if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
return -EINVAL;
}
- if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
+ if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
dev_err(&dev->dev, "ABORTING: Problem state size larger than "
"available in BAR2: 0x%llx > 0x%llx\n",
- adapter->ps_size, p2_size(dev) - adapter->ps_off);
+ adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
return -EINVAL;
}
return 0;
}
+ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
+{
+ return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
+}
+
static void cxl_release_adapter(struct device *dev)
{
struct cxl *adapter = to_cxl_adapter(dev);
@@ -1102,33 +1080,10 @@ static void cxl_release_adapter(struct device *dev)
cxl_remove_adapter_nr(adapter);
+ kfree(adapter->native);
kfree(adapter);
}
-static struct cxl *cxl_alloc_adapter(void)
-{
- struct cxl *adapter;
-
- if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
- return NULL;
-
- spin_lock_init(&adapter->afu_list_lock);
-
- if (cxl_alloc_adapter_nr(adapter))
- goto err1;
-
- if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
- goto err2;
-
- return adapter;
-
-err2:
- cxl_remove_adapter_nr(adapter);
-err1:
- kfree(adapter);
- return NULL;
-}
-
#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
static int sanitise_adapter_regs(struct cxl *adapter)
@@ -1192,7 +1147,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_setup_psl_timebase(adapter, dev)))
goto err;
- if ((rc = cxl_register_psl_err_irq(adapter)))
+ if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
return 0;
@@ -1207,13 +1162,13 @@ static void cxl_deconfigure_adapter(struct cxl *adapter)
{
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
- cxl_release_psl_err_irq(adapter);
+ cxl_native_release_psl_err_irq(adapter);
cxl_unmap_adapter_regs(adapter);
pci_disable_device(pdev);
}
-static struct cxl *cxl_init_adapter(struct pci_dev *dev)
+static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
{
struct cxl *adapter;
int rc;
@@ -1222,6 +1177,12 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
if (!adapter)
return ERR_PTR(-ENOMEM);
+ adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
+ if (!adapter->native) {
+ rc = -ENOMEM;
+ goto err_release;
+ }
+
/* Set defaults for parameters which need to persist over
* configure/reconfigure
*/
@@ -1231,8 +1192,7 @@ static struct cxl *cxl_init_adapter(struct pci_dev *dev)
rc = cxl_configure_adapter(adapter, dev);
if (rc) {
pci_disable_device(dev);
- cxl_release_adapter(&adapter->dev);
- return ERR_PTR(rc);
+ goto err_release;
}
/* Don't care if this one fails: */
@@ -1258,9 +1218,13 @@ err_put1:
cxl_deconfigure_adapter(adapter);
device_unregister(&adapter->dev);
return ERR_PTR(rc);
+
+err_release:
+ cxl_release_adapter(&adapter->dev);
+ return ERR_PTR(rc);
}
-static void cxl_remove_adapter(struct cxl *adapter)
+static void cxl_pci_remove_adapter(struct cxl *adapter)
{
pr_devel("cxl_remove_adapter\n");
@@ -1278,17 +1242,22 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
+ if (cxl_pci_is_vphb_device(dev)) {
+ dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
+ return -ENODEV;
+ }
+
if (cxl_verbose)
dump_cxl_config_space(dev);
- adapter = cxl_init_adapter(dev);
+ adapter = cxl_pci_init_adapter(dev);
if (IS_ERR(adapter)) {
dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
return PTR_ERR(adapter);
}
for (slice = 0; slice < adapter->slices; slice++) {
- if ((rc = cxl_init_afu(adapter, slice, dev))) {
+ if ((rc = pci_init_afu(adapter, slice, dev))) {
dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
continue;
}
@@ -1313,10 +1282,9 @@ static void cxl_remove(struct pci_dev *dev)
*/
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- cxl_pci_vphb_remove(afu);
- cxl_remove_afu(afu);
+ cxl_pci_remove_afu(afu);
}
- cxl_remove_adapter(adapter);
+ cxl_pci_remove_adapter(adapter);
}
static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
@@ -1462,8 +1430,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
return result;
cxl_context_detach_all(afu);
- cxl_afu_deactivate_mode(afu);
- cxl_deconfigure_afu(afu);
+ cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
+ pci_deconfigure_afu(afu);
}
cxl_deconfigure_adapter(adapter);
@@ -1486,14 +1454,12 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
for (i = 0; i < adapter->slices; i++) {
afu = adapter->afu[i];
- if (cxl_configure_afu(afu, adapter, pdev))
+ if (pci_configure_afu(afu, adapter, pdev))
goto err;
if (cxl_afu_select_best_mode(afu))
goto err;
- cxl_pci_vphb_reconfigure(afu);
-
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
/* Reset the device context.
* TODO: make this less disruptive
@@ -1509,7 +1475,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
afu_dev->dev.archdata.cxl_ctx = ctx;
- if (cxl_afu_check_and_enable(afu))
+ if (cxl_ops->afu_check_and_enable(afu))
goto err;
afu_dev->error_state = pci_channel_io_normal;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 02006f7109a8..25913c08794c 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -69,7 +69,7 @@ static ssize_t reset_adapter_store(struct device *device,
if ((rc != 1) || (val != 1))
return -EINVAL;
- if ((rc = cxl_reset(adapter)))
+ if ((rc = cxl_ops->adapter_reset(adapter)))
return rc;
return count;
}
@@ -165,7 +165,7 @@ static ssize_t pp_mmio_off_show(struct device *device,
{
struct cxl_afu *afu = to_afu_chardev_m(device);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->pp_offset);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", afu->native->pp_offset);
}
static ssize_t pp_mmio_len_show(struct device *device,
@@ -211,7 +211,7 @@ static ssize_t reset_store_afu(struct device *device,
goto err;
}
- if ((rc = __cxl_afu_reset(afu)))
+ if ((rc = cxl_ops->afu_reset(afu)))
goto err;
rc = count;
@@ -253,8 +253,14 @@ static ssize_t irqs_max_store(struct device *device,
if (irqs_max < afu->pp_irqs)
return -EINVAL;
- if (irqs_max > afu->adapter->user_irqs)
- return -EINVAL;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (irqs_max > afu->adapter->user_irqs)
+ return -EINVAL;
+ } else {
+ /* pHyp sets a per-AFU limit */
+ if (irqs_max > afu->guest->max_ints)
+ return -EINVAL;
+ }
afu->irqs_max = irqs_max;
return count;
@@ -348,7 +354,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
}
/*
- * cxl_afu_deactivate_mode needs to be done outside the lock, prevent
+ * afu_deactivate_mode needs to be done outside the lock, prevent
* other contexts coming in before we are ready:
*/
old_mode = afu->current_mode;
@@ -357,9 +363,9 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
mutex_unlock(&afu->contexts_lock);
- if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
+ if ((rc = cxl_ops->afu_deactivate_mode(afu, old_mode)))
return rc;
- if ((rc = cxl_afu_activate_mode(afu, mode)))
+ if ((rc = cxl_ops->afu_activate_mode(afu, mode)))
return rc;
return count;
@@ -386,10 +392,9 @@ static ssize_t afu_eb_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
- struct cxl_afu *afu = to_cxl_afu(container_of(kobj,
- struct device, kobj));
+ struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj));
- return cxl_afu_read_err_buffer(afu, buf, off, count);
+ return cxl_ops->afu_read_err_buffer(afu, buf, off, count);
}
static struct device_attribute afu_attrs[] = {
@@ -406,24 +411,39 @@ static struct device_attribute afu_attrs[] = {
int cxl_sysfs_adapter_add(struct cxl *adapter)
{
+ struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
- if ((rc = device_create_file(&adapter->dev, &adapter_attrs[i])))
- goto err;
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS)) {
+ if ((rc = device_create_file(&adapter->dev, dev_attr)))
+ goto err;
+ }
}
return 0;
err:
- for (i--; i >= 0; i--)
- device_remove_file(&adapter->dev, &adapter_attrs[i]);
+ for (i--; i >= 0; i--) {
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS))
+ device_remove_file(&adapter->dev, dev_attr);
+ }
return rc;
}
+
void cxl_sysfs_adapter_remove(struct cxl *adapter)
{
+ struct device_attribute *dev_attr;
int i;
- for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++)
- device_remove_file(&adapter->dev, &adapter_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(adapter_attrs); i++) {
+ dev_attr = &adapter_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_ADAPTER_ATTRS))
+ device_remove_file(&adapter->dev, dev_attr);
+ }
}
struct afu_config_record {
@@ -467,12 +487,14 @@ static ssize_t afu_read_config(struct file *filp, struct kobject *kobj,
loff_t off, size_t count)
{
struct afu_config_record *cr = to_cr(kobj);
- struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
+ struct cxl_afu *afu = to_cxl_afu(kobj_to_dev(kobj->parent));
- u64 i, j, val;
+ u64 i, j, val, rc;
for (i = 0; i < count;) {
- val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
+ rc = cxl_ops->afu_cr_read64(afu, cr->cr, off & ~0x7, &val);
+ if (rc)
+ val = ~0ULL;
for (j = off & 0x7; j < 8 && i < count; i++, j++, off++)
buf[i] = (val >> (j * 8)) & 0xff;
}
@@ -517,14 +539,22 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c
return ERR_PTR(-ENOMEM);
cr->cr = cr_idx;
- cr->device = cxl_afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID);
- cr->vendor = cxl_afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID);
- cr->class = cxl_afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION) >> 8;
+
+ rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_DEVICE_ID, &cr->device);
+ if (rc)
+ goto err;
+ rc = cxl_ops->afu_cr_read16(afu, cr_idx, PCI_VENDOR_ID, &cr->vendor);
+ if (rc)
+ goto err;
+ rc = cxl_ops->afu_cr_read32(afu, cr_idx, PCI_CLASS_REVISION, &cr->class);
+ if (rc)
+ goto err;
+ cr->class >>= 8;
/*
* Export raw AFU PCIe like config record. For now this is read only by
* root - we can expand that later to be readable by non-root and maybe
- * even writable provided we have a good use-case. Once we suport
+ * even writable provided we have a good use-case. Once we support
* exposing AFUs through a virtual PHB they will get that for free from
* Linux' PCI infrastructure, but until then it's not clear that we
* need it for anything since the main use case is just identifying
@@ -562,6 +592,7 @@ err:
void cxl_sysfs_afu_remove(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
struct afu_config_record *cr, *tmp;
int i;
@@ -569,8 +600,12 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu)
if (afu->eb_len)
device_remove_bin_file(&afu->dev, &afu->attr_eb);
- for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
- device_remove_file(&afu->dev, &afu_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS))
+ device_remove_file(&afu->dev, &afu_attrs[i]);
+ }
list_for_each_entry_safe(cr, tmp, &afu->crs, list) {
sysfs_remove_bin_file(&cr->kobj, &cr->config_attr);
@@ -580,14 +615,19 @@ void cxl_sysfs_afu_remove(struct cxl_afu *afu)
int cxl_sysfs_afu_add(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
struct afu_config_record *cr;
int i, rc;
INIT_LIST_HEAD(&afu->crs);
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
- if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
- goto err;
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS)) {
+ if ((rc = device_create_file(&afu->dev, &afu_attrs[i])))
+ goto err;
+ }
}
/* conditionally create the add the binary file for error info buffer */
@@ -626,32 +666,50 @@ err:
/* reset the eb_len as we havent created the bin attr */
afu->eb_len = 0;
- for (i--; i >= 0; i--)
+ for (i--; i >= 0; i--) {
+ dev_attr = &afu_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_ATTRS))
device_remove_file(&afu->dev, &afu_attrs[i]);
+ }
return rc;
}
int cxl_sysfs_afu_m_add(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
- if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
- goto err;
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS)) {
+ if ((rc = device_create_file(afu->chardev_m, &afu_master_attrs[i])))
+ goto err;
+ }
}
return 0;
err:
- for (i--; i >= 0; i--)
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ for (i--; i >= 0; i--) {
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS))
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ }
return rc;
}
void cxl_sysfs_afu_m_remove(struct cxl_afu *afu)
{
+ struct device_attribute *dev_attr;
int i;
- for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++)
- device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(afu_master_attrs); i++) {
+ dev_attr = &afu_master_attrs[i];
+ if (cxl_ops->support_attributes(dev_attr->attr.name,
+ CXL_AFU_MASTER_ATTRS))
+ device_remove_file(afu->chardev_m, &afu_master_attrs[i]);
+ }
}
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index 6e1e2adfba8e..751d6119683e 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -450,6 +450,199 @@ DEFINE_EVENT(cxl_pe_class, cxl_slbia,
TP_ARGS(ctx)
);
+TRACE_EVENT(cxl_hcall,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+
+ TP_ARGS(unit_address, process_token, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, process_token)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->process_token = process_token;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx process_token=0x%016llx rc=%li",
+ __entry->unit_address,
+ __entry->process_token,
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(cxl_hcall_control,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(char *, fct)
+ __field(u64, p1)
+ __field(u64, p2)
+ __field(u64, p3)
+ __field(u64, p4)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->fct = fct;
+ __entry->p1 = p1;
+ __entry->p2 = p2;
+ __entry->p3 = p3;
+ __entry->p4 = p4;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=%#.16llx %s(%#llx, %#llx, %#llx, %#llx, R4: %#lx)): %li",
+ __entry->unit_address,
+ __entry->fct,
+ __entry->p1,
+ __entry->p2,
+ __entry->p3,
+ __entry->p4,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
+TRACE_EVENT(cxl_hcall_attach,
+ TP_PROTO(u64 unit_address, u64 phys_addr, unsigned long process_token,
+ unsigned long mmio_addr, unsigned long mmio_size, long rc),
+
+ TP_ARGS(unit_address, phys_addr, process_token,
+ mmio_addr, mmio_size, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, phys_addr)
+ __field(unsigned long, process_token)
+ __field(unsigned long, mmio_addr)
+ __field(unsigned long, mmio_size)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->phys_addr = phys_addr;
+ __entry->process_token = process_token;
+ __entry->mmio_addr = mmio_addr;
+ __entry->mmio_size = mmio_size;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx phys_addr=0x%016llx "
+ "token=0x%.8lx mmio_addr=0x%lx mmio_size=0x%lx rc=%li",
+ __entry->unit_address,
+ __entry->phys_addr,
+ __entry->process_token,
+ __entry->mmio_addr,
+ __entry->mmio_size,
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_hcall, cxl_hcall_detach,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+ TP_ARGS(unit_address, process_token, rc)
+);
+
+DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_function,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
+);
+
+DEFINE_EVENT(cxl_hcall, cxl_hcall_collect_int_info,
+ TP_PROTO(u64 unit_address, u64 process_token, long rc),
+ TP_ARGS(unit_address, process_token, rc)
+);
+
+TRACE_EVENT(cxl_hcall_control_faults,
+ TP_PROTO(u64 unit_address, u64 process_token,
+ u64 control_mask, u64 reset_mask, unsigned long r4,
+ long rc),
+
+ TP_ARGS(unit_address, process_token,
+ control_mask, reset_mask, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(u64, process_token)
+ __field(u64, control_mask)
+ __field(u64, reset_mask)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->process_token = process_token;
+ __entry->control_mask = control_mask;
+ __entry->reset_mask = reset_mask;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("unit_address=0x%016llx process_token=0x%llx "
+ "control_mask=%#llx reset_mask=%#llx r4=%#lx rc=%li",
+ __entry->unit_address,
+ __entry->process_token,
+ __entry->control_mask,
+ __entry->reset_mask,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
+DEFINE_EVENT(cxl_hcall_control, cxl_hcall_control_facility,
+ TP_PROTO(u64 unit_address, char *fct, u64 p1, u64 p2, u64 p3,
+ u64 p4, unsigned long r4, long rc),
+ TP_ARGS(unit_address, fct, p1, p2, p3, p4, r4, rc)
+);
+
+TRACE_EVENT(cxl_hcall_download_facility,
+ TP_PROTO(u64 unit_address, char *fct, u64 list_address, u64 num,
+ unsigned long r4, long rc),
+
+ TP_ARGS(unit_address, fct, list_address, num, r4, rc),
+
+ TP_STRUCT__entry(
+ __field(u64, unit_address)
+ __field(char *, fct)
+ __field(u64, list_address)
+ __field(u64, num)
+ __field(unsigned long, r4)
+ __field(long, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->unit_address = unit_address;
+ __entry->fct = fct;
+ __entry->list_address = list_address;
+ __entry->num = num;
+ __entry->r4 = r4;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("%#.16llx, %s(%#llx, %#llx), %#lx): %li",
+ __entry->unit_address,
+ __entry->fct,
+ __entry->list_address,
+ __entry->num,
+ __entry->r4,
+ __entry->rc
+ )
+);
+
#endif /* _CXL_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index cbd4331fb45c..cdc7723b845d 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -49,7 +49,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
- if (!cxl_adapter_link_ok(afu->adapter)) {
+ if (!cxl_ops->link_ok(afu->adapter, afu)) {
dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
return false;
}
@@ -66,7 +66,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
dev->dev.archdata.cxl_ctx = ctx;
- return (cxl_afu_check_and_enable(afu) == 0);
+ return (cxl_ops->afu_check_and_enable(afu) == 0);
}
static void cxl_pci_disable_device(struct pci_dev *dev)
@@ -99,113 +99,90 @@ static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
return (bus << 8) + devfn;
}
-static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
- u8 bus, u8 devfn, int offset)
-{
- int record = cxl_pcie_cfg_record(bus, devfn);
-
- return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
-}
-
-
static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
- int offset, int len,
- volatile void __iomem **ioaddr,
- u32 *mask, int *shift)
+ struct cxl_afu **_afu, int *_record)
{
struct pci_controller *phb;
struct cxl_afu *afu;
- unsigned long addr;
+ int record;
phb = pci_bus_to_host(bus);
if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
- afu = (struct cxl_afu *)phb->private_data;
- if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
+ afu = (struct cxl_afu *)phb->private_data;
+ record = cxl_pcie_cfg_record(bus->number, devfn);
+ if (record > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND;
- if (offset >= (unsigned long)phb->cfg_data)
- return PCIBIOS_BAD_REGISTER_NUMBER;
- addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
- *ioaddr = (void *)(addr & ~0x3ULL);
- *shift = ((addr & 0x3) * 8);
- switch (len) {
- case 1:
- *mask = 0xff;
- break;
- case 2:
- *mask = 0xffff;
- break;
- default:
- *mask = 0xffffffff;
- break;
- }
+ *_afu = afu;
+ *_record = record;
return 0;
}
-
-static inline bool cxl_config_link_ok(struct pci_bus *bus)
-{
- struct pci_controller *phb;
- struct cxl_afu *afu;
-
- /* Config space IO is based on phb->cfg_addr, which is based on
- * afu_desc_mmio. This isn't safe to read/write when the link
- * goes down, as EEH tears down MMIO space.
- *
- * Check if the link is OK before proceeding.
- */
-
- phb = pci_bus_to_host(bus);
- if (phb == NULL)
- return false;
- afu = (struct cxl_afu *)phb->private_data;
- return cxl_adapter_link_ok(afu->adapter);
-}
-
static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
- volatile void __iomem *ioaddr;
- int shift, rc;
- u32 mask;
+ int rc, record;
+ struct cxl_afu *afu;
+ u8 val8;
+ u16 val16;
+ u32 val32;
- rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
- &mask, &shift);
+ rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
if (rc)
return rc;
- if (!cxl_config_link_ok(bus))
+ switch (len) {
+ case 1:
+ rc = cxl_ops->afu_cr_read8(afu, record, offset, &val8);
+ *val = val8;
+ break;
+ case 2:
+ rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
+ *val = val16;
+ break;
+ case 4:
+ rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
+ *val = val32;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (rc)
return PCIBIOS_DEVICE_NOT_FOUND;
- /* Can only read 32 bits */
- *val = (in_le32(ioaddr) >> shift) & mask;
return PCIBIOS_SUCCESSFUL;
}
static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
- volatile void __iomem *ioaddr;
- u32 v, mask;
- int shift, rc;
+ int rc, record;
+ struct cxl_afu *afu;
- rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
- &mask, &shift);
+ rc = cxl_pcie_config_info(bus, devfn, &afu, &record);
if (rc)
return rc;
- if (!cxl_config_link_ok(bus))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /* Can only write 32 bits so do read-modify-write */
- mask <<= shift;
- val <<= shift;
+ switch (len) {
+ case 1:
+ rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
+ break;
+ case 2:
+ rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
+ break;
+ case 4:
+ rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
+ break;
+ default:
+ WARN_ON(1);
+ }
- v = (in_le32(ioaddr) & ~mask) | (val & mask);
+ if (rc)
+ return PCIBIOS_SET_FAILED;
- out_le32(ioaddr, v);
return PCIBIOS_SUCCESSFUL;
}
@@ -233,23 +210,31 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
{
struct pci_dev *phys_dev;
struct pci_controller *phb, *phys_phb;
-
- phys_dev = to_pci_dev(afu->adapter->dev.parent);
- phys_phb = pci_bus_to_host(phys_dev->bus);
+ struct device_node *vphb_dn;
+ struct device *parent;
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ phys_dev = to_pci_dev(afu->adapter->dev.parent);
+ phys_phb = pci_bus_to_host(phys_dev->bus);
+ vphb_dn = phys_phb->dn;
+ parent = &phys_dev->dev;
+ } else {
+ vphb_dn = afu->adapter->dev.parent->of_node;
+ parent = afu->adapter->dev.parent;
+ }
/* Alloc and setup PHB data structure */
- phb = pcibios_alloc_controller(phys_phb->dn);
-
+ phb = pcibios_alloc_controller(vphb_dn);
if (!phb)
return -ENODEV;
/* Setup parent in sysfs */
- phb->parent = &phys_dev->dev;
+ phb->parent = parent;
/* Setup the PHB using arch provided callback */
phb->ops = &cxl_pcie_pci_ops;
- phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
- phb->cfg_data = (void *)(u64)afu->crs_len;
+ phb->cfg_addr = NULL;
+ phb->cfg_data = 0;
phb->private_data = afu;
phb->controller_ops = cxl_pci_controller_ops;
@@ -272,15 +257,6 @@ int cxl_pci_vphb_add(struct cxl_afu *afu)
return 0;
}
-void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
-{
- /* When we are reconfigured, the AFU's MMIO space is unmapped
- * and remapped. We need to reflect this in the PHB's view of
- * the world.
- */
- afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
-}
-
void cxl_pci_vphb_remove(struct cxl_afu *afu)
{
struct pci_controller *phb;
@@ -296,6 +272,15 @@ void cxl_pci_vphb_remove(struct cxl_afu *afu)
pcibios_free_controller(phb);
}
+bool cxl_pci_is_vphb_device(struct pci_dev *dev)
+{
+ struct pci_controller *phb;
+
+ phb = pci_bus_to_host(dev->bus);
+
+ return (phb->ops == &cxl_pcie_pci_ops);
+}
+
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
{
struct pci_controller *phb;
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 04f2e1fa9dd1..cfc493c2e30a 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -3,6 +3,8 @@ menu "EEPROM support"
config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
+ select REGMAP
+ select NVMEM
help
Enable this driver to get read/write support to most I2C EEPROMs
and compatible devices like FRAMs, SRAMs, ROMs etc. After you
@@ -30,6 +32,8 @@ config EEPROM_AT24
config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
+ select REGMAP
+ select NVMEM
help
Enable this driver to get read/write support to most SPI EEPROMs,
after you configure the board init code to know about each eeprom
@@ -74,6 +78,8 @@ config EEPROM_93CX6
config EEPROM_93XX46
tristate "Microwire EEPROM 93XX46 support"
depends on SPI && SYSFS
+ select REGMAP
+ select NVMEM
help
Driver for the microwire EEPROM chipsets 93xx46x. The driver
supports both read and write commands and also the command to
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 5d7c0900fa1b..089d6943f68a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/mutex.h>
-#include <linux/sysfs.h>
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/bitops.h>
@@ -23,6 +22,8 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/i2c.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/platform_data/at24.h>
/*
@@ -55,7 +56,6 @@
struct at24_data {
struct at24_platform_data chip;
- struct memory_accessor macc;
int use_smbus;
int use_smbus_write;
@@ -64,12 +64,15 @@ struct at24_data {
* but not from changes by other I2C masters.
*/
struct mutex lock;
- struct bin_attribute bin;
u8 *writebuf;
unsigned write_max;
unsigned num_addresses;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
+
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
* them for us, and we'll use them with SMBus calls.
@@ -283,17 +286,6 @@ static ssize_t at24_read(struct at24_data *at24,
return retval;
}
-static ssize_t at24_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct at24_data *at24;
-
- at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
- return at24_read(at24, buf, off, count);
-}
-
-
/*
* Note that if the hardware write-protect pin is pulled high, the whole
* chip is normally write protected. But there are plenty of product
@@ -414,40 +406,49 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
return retval;
}
-static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- struct at24_data *at24;
-
- at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
- return at24_write(at24, buf, off, count);
-}
-
/*-------------------------------------------------------------------------*/
/*
- * This lets other kernel code access the eeprom data. For example, it
- * might hold a board's Ethernet address, or board-specific calibration
- * data generated on the manufacturing floor.
- */
-
-static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf,
- off_t offset, size_t count)
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
- struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+ struct at24_data *at24 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
- return at24_read(at24, buf, offset, count);
+ err = at24_read(at24, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
}
-static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf,
- off_t offset, size_t count)
+static int at24_regmap_write(void *context, const void *data, size_t count)
{
- struct at24_data *at24 = container_of(macc, struct at24_data, macc);
+ struct at24_data *at24 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
- return at24_write(at24, buf, offset, count);
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
+
+ err = at24_write(at24, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
}
+static const struct regmap_bus at24_regmap_bus = {
+ .read = at24_regmap_read,
+ .write = at24_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_OF
@@ -481,6 +482,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
+ struct regmap *regmap;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -573,29 +575,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->chip = chip;
at24->num_addresses = num_addresses;
- /*
- * Export the EEPROM bytes through sysfs, since that's convenient.
- * By default, only root should see the data (maybe passwords etc)
- */
- sysfs_bin_attr_init(&at24->bin);
- at24->bin.attr.name = "eeprom";
- at24->bin.attr.mode = chip.flags & AT24_FLAG_IRUGO ? S_IRUGO : S_IRUSR;
- at24->bin.read = at24_bin_read;
- at24->bin.size = chip.byte_len;
-
- at24->macc.read = at24_macc_read;
-
writable = !(chip.flags & AT24_FLAG_READONLY);
if (writable) {
if (!use_smbus || use_smbus_write) {
unsigned write_max = chip.page_size;
- at24->macc.write = at24_macc_write;
-
- at24->bin.write = at24_bin_write;
- at24->bin.attr.mode |= S_IWUSR;
-
if (write_max > io_limit)
write_max = io_limit;
if (use_smbus && write_max > I2C_SMBUS_BLOCK_MAX)
@@ -627,14 +612,38 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- err = sysfs_create_bin_file(&client->dev.kobj, &at24->bin);
- if (err)
+ at24->regmap_config.reg_bits = 32;
+ at24->regmap_config.val_bits = 8;
+ at24->regmap_config.reg_stride = 1;
+ at24->regmap_config.max_register = chip.byte_len - 1;
+
+ regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
+ &at24->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "regmap init failed\n");
+ err = PTR_ERR(regmap);
+ goto err_clients;
+ }
+
+ at24->nvmem_config.name = dev_name(&client->dev);
+ at24->nvmem_config.dev = &client->dev;
+ at24->nvmem_config.read_only = !writable;
+ at24->nvmem_config.root_only = true;
+ at24->nvmem_config.owner = THIS_MODULE;
+ at24->nvmem_config.compat = true;
+ at24->nvmem_config.base_dev = &client->dev;
+
+ at24->nvmem = nvmem_register(&at24->nvmem_config);
+
+ if (IS_ERR(at24->nvmem)) {
+ err = PTR_ERR(at24->nvmem);
goto err_clients;
+ }
i2c_set_clientdata(client, at24);
- dev_info(&client->dev, "%zu byte %s EEPROM, %s, %u bytes/write\n",
- at24->bin.size, client->name,
+ dev_info(&client->dev, "%u byte %s EEPROM, %s, %u bytes/write\n",
+ chip.byte_len, client->name,
writable ? "writable" : "read-only", at24->write_max);
if (use_smbus == I2C_SMBUS_WORD_DATA ||
use_smbus == I2C_SMBUS_BYTE_DATA) {
@@ -645,7 +654,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* export data to kernel code */
if (chip.setup)
- chip.setup(&at24->macc, chip.context);
+ chip.setup(at24->nvmem, chip.context);
return 0;
@@ -663,7 +672,8 @@ static int at24_remove(struct i2c_client *client)
int i;
at24 = i2c_get_clientdata(client);
- sysfs_remove_bin_file(&client->dev.kobj, &at24->bin);
+
+ nvmem_unregister(at24->nvmem);
for (i = 1; i < at24->num_addresses; i++)
i2c_unregister_device(at24->client[i]);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index f850ef556bcc..fa36a6e37084 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -16,6 +16,8 @@
#include <linux/device.h>
#include <linux/sched.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/property.h>
@@ -29,11 +31,12 @@
struct at25_data {
struct spi_device *spi;
- struct memory_accessor mem;
struct mutex lock;
struct spi_eeprom chip;
- struct bin_attribute bin;
unsigned addrlen;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
};
#define AT25_WREN 0x06 /* latch the write enable */
@@ -77,10 +80,10 @@ at25_ee_read(
struct spi_message m;
u8 instr;
- if (unlikely(offset >= at25->bin.size))
+ if (unlikely(offset >= at25->chip.byte_len))
return 0;
- if ((offset + count) > at25->bin.size)
- count = at25->bin.size - offset;
+ if ((offset + count) > at25->chip.byte_len)
+ count = at25->chip.byte_len - offset;
if (unlikely(!count))
return count;
@@ -131,21 +134,19 @@ at25_ee_read(
return status ? status : count;
}
-static ssize_t
-at25_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
- struct device *dev;
- struct at25_data *at25;
+ struct at25_data *at25 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
- dev = container_of(kobj, struct device, kobj);
- at25 = dev_get_drvdata(dev);
-
- return at25_ee_read(at25, buf, off, count);
+ err = at25_ee_read(at25, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
}
-
static ssize_t
at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
size_t count)
@@ -155,10 +156,10 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
unsigned buf_size;
u8 *bounce;
- if (unlikely(off >= at25->bin.size))
+ if (unlikely(off >= at25->chip.byte_len))
return -EFBIG;
- if ((off + count) > at25->bin.size)
- count = at25->bin.size - off;
+ if ((off + count) > at25->chip.byte_len)
+ count = at25->chip.byte_len - off;
if (unlikely(!count))
return count;
@@ -265,39 +266,29 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
return written ? written : status;
}
-static ssize_t
-at25_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static int at25_regmap_write(void *context, const void *data, size_t count)
{
- struct device *dev;
- struct at25_data *at25;
-
- dev = container_of(kobj, struct device, kobj);
- at25 = dev_get_drvdata(dev);
-
- return at25_ee_write(at25, buf, off, count);
-}
+ struct at25_data *at25 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
-/*-------------------------------------------------------------------------*/
-
-/* Let in-kernel code access the eeprom data. */
-
-static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf,
- off_t offset, size_t count)
-{
- struct at25_data *at25 = container_of(mem, struct at25_data, mem);
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
- return at25_ee_read(at25, buf, offset, count);
+ err = at25_ee_write(at25, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
}
-static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf,
- off_t offset, size_t count)
-{
- struct at25_data *at25 = container_of(mem, struct at25_data, mem);
-
- return at25_ee_write(at25, buf, offset, count);
-}
+static const struct regmap_bus at25_regmap_bus = {
+ .read = at25_regmap_read,
+ .write = at25_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
/*-------------------------------------------------------------------------*/
@@ -358,6 +349,7 @@ static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
struct spi_eeprom chip;
+ struct regmap *regmap;
int err;
int sr;
int addrlen;
@@ -402,40 +394,35 @@ static int at25_probe(struct spi_device *spi)
spi_set_drvdata(spi, at25);
at25->addrlen = addrlen;
- /* Export the EEPROM bytes through sysfs, since that's convenient.
- * And maybe to other kernel code; it might hold a board's Ethernet
- * address, or board-specific calibration data generated on the
- * manufacturing floor.
- *
- * Default to root-only access to the data; EEPROMs often hold data
- * that's sensitive for read and/or write, like ethernet addresses,
- * security codes, board-specific manufacturing calibrations, etc.
- */
- sysfs_bin_attr_init(&at25->bin);
- at25->bin.attr.name = "eeprom";
- at25->bin.attr.mode = S_IRUSR;
- at25->bin.read = at25_bin_read;
- at25->mem.read = at25_mem_read;
-
- at25->bin.size = at25->chip.byte_len;
- if (!(chip.flags & EE_READONLY)) {
- at25->bin.write = at25_bin_write;
- at25->bin.attr.mode |= S_IWUSR;
- at25->mem.write = at25_mem_write;
- }
+ at25->regmap_config.reg_bits = 32;
+ at25->regmap_config.val_bits = 8;
+ at25->regmap_config.reg_stride = 1;
+ at25->regmap_config.max_register = chip.byte_len - 1;
- err = sysfs_create_bin_file(&spi->dev.kobj, &at25->bin);
- if (err)
- return err;
-
- if (chip.setup)
- chip.setup(&at25->mem, chip.context);
+ regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
+ &at25->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
- dev_info(&spi->dev, "%Zd %s %s eeprom%s, pagesize %u\n",
- (at25->bin.size < 1024)
- ? at25->bin.size
- : (at25->bin.size / 1024),
- (at25->bin.size < 1024) ? "Byte" : "KByte",
+ at25->nvmem_config.name = dev_name(&spi->dev);
+ at25->nvmem_config.dev = &spi->dev;
+ at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+ at25->nvmem_config.root_only = true;
+ at25->nvmem_config.owner = THIS_MODULE;
+ at25->nvmem_config.compat = true;
+ at25->nvmem_config.base_dev = &spi->dev;
+
+ at25->nvmem = nvmem_register(&at25->nvmem_config);
+ if (IS_ERR(at25->nvmem))
+ return PTR_ERR(at25->nvmem);
+
+ dev_info(&spi->dev, "%d %s %s eeprom%s, pagesize %u\n",
+ (chip.byte_len < 1024)
+ ? chip.byte_len
+ : (chip.byte_len / 1024),
+ (chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name,
(chip.flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
@@ -447,7 +434,8 @@ static int at25_remove(struct spi_device *spi)
struct at25_data *at25;
at25 = spi_get_drvdata(spi);
- sysfs_remove_bin_file(&spi->dev.kobj, &at25->bin);
+ nvmem_unregister(at25->nvmem);
+
return 0;
}
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 7342fd637031..3d1d55157e5f 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -84,7 +84,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct i2c_client *client = to_i2c_client(container_of(kobj, struct device, kobj));
+ struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
struct eeprom_data *data = i2c_get_clientdata(client);
u8 slice;
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index ff63f05edc76..426fe2fd5238 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -10,12 +10,17 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
+#include <linux/nvmem-provider.h>
+#include <linux/regmap.h>
#include <linux/eeprom_93xx46.h>
#define OP_START 0x4
@@ -25,73 +30,111 @@
#define ADDR_ERAL 0x20
#define ADDR_EWEN 0x30
+struct eeprom_93xx46_devtype_data {
+ unsigned int quirks;
+};
+
+static const struct eeprom_93xx46_devtype_data atmel_at93c46d_data = {
+ .quirks = EEPROM_93XX46_QUIRK_SINGLE_WORD_READ |
+ EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH,
+};
+
struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
- struct bin_attribute bin;
struct mutex lock;
+ struct regmap_config regmap_config;
+ struct nvmem_config nvmem_config;
+ struct nvmem_device *nvmem;
int addrlen;
+ int size;
};
+static inline bool has_quirk_single_word_read(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_SINGLE_WORD_READ;
+}
+
+static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
+{
+ return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
+}
+
static ssize_t
-eeprom_93xx46_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
+ unsigned off, size_t count)
{
- struct eeprom_93xx46_dev *edev;
- struct device *dev;
- struct spi_message m;
- struct spi_transfer t[2];
- int bits, ret;
- u16 cmd_addr;
+ ssize_t ret = 0;
- dev = container_of(kobj, struct device, kobj);
- edev = dev_get_drvdata(dev);
+ if (unlikely(off >= edev->size))
+ return 0;
+ if ((off + count) > edev->size)
+ count = edev->size - off;
+ if (unlikely(!count))
+ return count;
- cmd_addr = OP_READ << edev->addrlen;
+ mutex_lock(&edev->lock);
- if (edev->addrlen == 7) {
- cmd_addr |= off & 0x7f;
- bits = 10;
- } else {
- cmd_addr |= off & 0x3f;
- bits = 9;
- }
+ if (edev->pdata->prepare)
+ edev->pdata->prepare(edev);
- dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
- cmd_addr, edev->spi->max_speed_hz);
+ while (count) {
+ struct spi_message m;
+ struct spi_transfer t[2] = { { 0 } };
+ u16 cmd_addr = OP_READ << edev->addrlen;
+ size_t nbytes = count;
+ int bits;
+ int err;
+
+ if (edev->addrlen == 7) {
+ cmd_addr |= off & 0x7f;
+ bits = 10;
+ if (has_quirk_single_word_read(edev))
+ nbytes = 1;
+ } else {
+ cmd_addr |= (off >> 1) & 0x3f;
+ bits = 9;
+ if (has_quirk_single_word_read(edev))
+ nbytes = 2;
+ }
- spi_message_init(&m);
- memset(t, 0, sizeof(t));
+ dev_dbg(&edev->spi->dev, "read cmd 0x%x, %d Hz\n",
+ cmd_addr, edev->spi->max_speed_hz);
- t[0].tx_buf = (char *)&cmd_addr;
- t[0].len = 2;
- t[0].bits_per_word = bits;
- spi_message_add_tail(&t[0], &m);
+ spi_message_init(&m);
- t[1].rx_buf = buf;
- t[1].len = count;
- t[1].bits_per_word = 8;
- spi_message_add_tail(&t[1], &m);
+ t[0].tx_buf = (char *)&cmd_addr;
+ t[0].len = 2;
+ t[0].bits_per_word = bits;
+ spi_message_add_tail(&t[0], &m);
- mutex_lock(&edev->lock);
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ t[1].bits_per_word = 8;
+ spi_message_add_tail(&t[1], &m);
- if (edev->pdata->prepare)
- edev->pdata->prepare(edev);
+ err = spi_sync(edev->spi, &m);
+ /* have to wait at least Tcsl ns */
+ ndelay(250);
- ret = spi_sync(edev->spi, &m);
- /* have to wait at least Tcsl ns */
- ndelay(250);
- if (ret) {
- dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
- count, (int)off, ret);
+ if (err) {
+ dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
+ nbytes, (int)off, err);
+ ret = err;
+ break;
+ }
+
+ buf += nbytes;
+ off += nbytes;
+ count -= nbytes;
+ ret += nbytes;
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
- return ret ? : count;
+ return ret;
}
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -110,7 +153,13 @@ static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
bits = 9;
}
- dev_dbg(&edev->spi->dev, "ew cmd 0x%04x\n", cmd_addr);
+ if (has_quirk_instruction_length(edev)) {
+ cmd_addr <<= 2;
+ bits += 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "ew%s cmd 0x%04x, %d bits\n",
+ is_on ? "en" : "ds", cmd_addr, bits);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
@@ -155,7 +204,7 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
bits = 10;
data_len = 1;
} else {
- cmd_addr |= off & 0x3f;
+ cmd_addr |= (off >> 1) & 0x3f;
bits = 9;
data_len = 2;
}
@@ -182,16 +231,17 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
}
static ssize_t
-eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
+ loff_t off, size_t count)
{
- struct eeprom_93xx46_dev *edev;
- struct device *dev;
int i, ret, step = 1;
- dev = container_of(kobj, struct device, kobj);
- edev = dev_get_drvdata(dev);
+ if (unlikely(off >= edev->size))
+ return -EFBIG;
+ if ((off + count) > edev->size)
+ count = edev->size - off;
+ if (unlikely(!count))
+ return count;
/* only write even number of bytes on 16-bit devices */
if (edev->addrlen == 6) {
@@ -228,6 +278,49 @@ eeprom_93xx46_bin_write(struct file *filp, struct kobject *kobj,
return ret ? : count;
}
+/*
+ * Provide a regmap interface, which is registered with the NVMEM
+ * framework
+*/
+static int eeprom_93xx46_regmap_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+ off_t offset = *(u32 *)reg;
+ int err;
+
+ err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int eeprom_93xx46_regmap_write(void *context, const void *data,
+ size_t count)
+{
+ struct eeprom_93xx46_dev *eeprom_93xx46 = context;
+ const char *buf;
+ u32 offset;
+ size_t len;
+ int err;
+
+ memcpy(&offset, data, sizeof(offset));
+ buf = (const char *)data + sizeof(offset);
+ len = count - sizeof(offset);
+
+ err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
+ if (err)
+ return err;
+ return 0;
+}
+
+static const struct regmap_bus eeprom_93xx46_regmap_bus = {
+ .read = eeprom_93xx46_regmap_read,
+ .write = eeprom_93xx46_regmap_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -245,6 +338,13 @@ static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
bits = 9;
}
+ if (has_quirk_instruction_length(edev)) {
+ cmd_addr <<= 2;
+ bits += 2;
+ }
+
+ dev_dbg(&edev->spi->dev, "eral cmd 0x%04x, %d bits\n", cmd_addr, bits);
+
spi_message_init(&m);
memset(&t, 0, sizeof(t));
@@ -294,12 +394,101 @@ static ssize_t eeprom_93xx46_store_erase(struct device *dev,
}
static DEVICE_ATTR(erase, S_IWUSR, NULL, eeprom_93xx46_store_erase);
+static void select_assert(void *context)
+{
+ struct eeprom_93xx46_dev *edev = context;
+
+ gpiod_set_value_cansleep(edev->pdata->select, 1);
+}
+
+static void select_deassert(void *context)
+{
+ struct eeprom_93xx46_dev *edev = context;
+
+ gpiod_set_value_cansleep(edev->pdata->select, 0);
+}
+
+static const struct of_device_id eeprom_93xx46_of_table[] = {
+ { .compatible = "eeprom-93xx46", },
+ { .compatible = "atmel,at93c46d", .data = &atmel_at93c46d_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+
+static int eeprom_93xx46_probe_dt(struct spi_device *spi)
+{
+ const struct of_device_id *of_id =
+ of_match_device(eeprom_93xx46_of_table, &spi->dev);
+ struct device_node *np = spi->dev.of_node;
+ struct eeprom_93xx46_platform_data *pd;
+ u32 tmp;
+ int gpio;
+ enum of_gpio_flags of_flags;
+ int ret;
+
+ pd = devm_kzalloc(&spi->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(np, "data-size", &tmp);
+ if (ret < 0) {
+ dev_err(&spi->dev, "data-size property not found\n");
+ return ret;
+ }
+
+ if (tmp == 8) {
+ pd->flags |= EE_ADDR8;
+ } else if (tmp == 16) {
+ pd->flags |= EE_ADDR16;
+ } else {
+ dev_err(&spi->dev, "invalid data-size (%d)\n", tmp);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(np, "read-only"))
+ pd->flags |= EE_READONLY;
+
+ gpio = of_get_named_gpio_flags(np, "select-gpios", 0, &of_flags);
+ if (gpio_is_valid(gpio)) {
+ unsigned long flags =
+ of_flags == OF_GPIO_ACTIVE_LOW ? GPIOF_ACTIVE_LOW : 0;
+
+ ret = devm_gpio_request_one(&spi->dev, gpio, flags,
+ "eeprom_93xx46_select");
+ if (ret)
+ return ret;
+
+ pd->select = gpio_to_desc(gpio);
+ pd->prepare = select_assert;
+ pd->finish = select_deassert;
+
+ gpiod_direction_output(pd->select, 0);
+ }
+
+ if (of_id->data) {
+ const struct eeprom_93xx46_devtype_data *data = of_id->data;
+
+ pd->quirks = data->quirks;
+ }
+
+ spi->dev.platform_data = pd;
+
+ return 0;
+}
+
static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
+ struct regmap *regmap;
int err;
+ if (spi->dev.of_node) {
+ err = eeprom_93xx46_probe_dt(spi);
+ if (err < 0)
+ return err;
+ }
+
pd = spi->dev.platform_data;
if (!pd) {
dev_err(&spi->dev, "missing platform data\n");
@@ -325,19 +514,34 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->spi = spi_dev_get(spi);
edev->pdata = pd;
- sysfs_bin_attr_init(&edev->bin);
- edev->bin.attr.name = "eeprom";
- edev->bin.attr.mode = S_IRUSR;
- edev->bin.read = eeprom_93xx46_bin_read;
- edev->bin.size = 128;
- if (!(pd->flags & EE_READONLY)) {
- edev->bin.write = eeprom_93xx46_bin_write;
- edev->bin.attr.mode |= S_IWUSR;
+ edev->size = 128;
+
+ edev->regmap_config.reg_bits = 32;
+ edev->regmap_config.val_bits = 8;
+ edev->regmap_config.reg_stride = 1;
+ edev->regmap_config.max_register = edev->size - 1;
+
+ regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
+ &edev->regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "regmap init failed\n");
+ err = PTR_ERR(regmap);
+ goto fail;
}
- err = sysfs_create_bin_file(&spi->dev.kobj, &edev->bin);
- if (err)
+ edev->nvmem_config.name = dev_name(&spi->dev);
+ edev->nvmem_config.dev = &spi->dev;
+ edev->nvmem_config.read_only = pd->flags & EE_READONLY;
+ edev->nvmem_config.root_only = true;
+ edev->nvmem_config.owner = THIS_MODULE;
+ edev->nvmem_config.compat = true;
+ edev->nvmem_config.base_dev = &spi->dev;
+
+ edev->nvmem = nvmem_register(&edev->nvmem_config);
+ if (IS_ERR(edev->nvmem)) {
+ err = PTR_ERR(edev->nvmem);
goto fail;
+ }
dev_info(&spi->dev, "%d-bit eeprom %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -359,10 +563,11 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
+ nvmem_unregister(edev->nvmem);
+
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
- sysfs_remove_bin_file(&spi->dev.kobj, &edev->bin);
kfree(edev);
return 0;
}
@@ -370,6 +575,7 @@ static int eeprom_93xx46_remove(struct spi_device *spi)
static struct spi_driver eeprom_93xx46_driver = {
.driver = {
.name = "93xx46",
+ .of_match_table = of_match_ptr(eeprom_93xx46_of_table),
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
diff --git a/drivers/misc/genwqe/card_sysfs.c b/drivers/misc/genwqe/card_sysfs.c
index 6ab31eff0536..c24c9b7c1dd3 100644
--- a/drivers/misc/genwqe/card_sysfs.c
+++ b/drivers/misc/genwqe/card_sysfs.c
@@ -278,7 +278,7 @@ static umode_t genwqe_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
unsigned int j;
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct genwqe_dev *cd = dev_get_drvdata(dev);
umode_t mode = attr->mode;
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 5bd127727d8e..9fea49d2e15b 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -34,6 +34,7 @@
#include <linux/kref.h>
#include <linux/device.h>
#include <linux/input.h>
+#include <linux/time64.h>
/* Driver identification */
#define DRIVER_NAME "ibmasm"
@@ -53,9 +54,11 @@ extern int ibmasm_debug;
static inline char *get_timestamp(char *buf)
{
- struct timeval now;
- do_gettimeofday(&now);
- sprintf(buf, "%lu.%lu", now.tv_sec, now.tv_usec);
+ struct timespec64 now;
+
+ ktime_get_real_ts64(&now);
+ sprintf(buf, "%llu.%.08lu", (long long)now.tv_sec,
+ now.tv_nsec / NSEC_PER_USEC);
return buf;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0d..9c677f3f3c26 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
{
struct inode *root;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = IBMASMFS_MAGIC;
sb->s_op = &ibmasmfs_s_ops;
sb->s_time_gran = 1;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index 0c3bb7e3ee80..14b7d539fed6 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -209,7 +209,7 @@ static int lis3lv02d_i2c_remove(struct i2c_client *client)
#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_i2c_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -219,7 +219,7 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
static int lis3lv02d_i2c_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
/*
@@ -238,7 +238,7 @@ static int lis3lv02d_i2c_resume(struct device *dev)
#ifdef CONFIG_PM
static int lis3_i2c_runtime_suspend(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweroff(lis3);
@@ -247,7 +247,7 @@ static int lis3_i2c_runtime_suspend(struct device *dev)
static int lis3_i2c_runtime_resume(struct device *dev)
{
- struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
lis3lv02d_poweron(lis3);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 11fdadc68e53..0a5cbbe12452 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -92,6 +92,9 @@ enum ctype {
CT_UNALIGNED_LOAD_STORE_WRITE,
CT_OVERWRITE_ALLOCATION,
CT_WRITE_AFTER_FREE,
+ CT_READ_AFTER_FREE,
+ CT_WRITE_BUDDY_AFTER_FREE,
+ CT_READ_BUDDY_AFTER_FREE,
CT_SOFTLOCKUP,
CT_HARDLOCKUP,
CT_SPINLOCKUP,
@@ -103,7 +106,9 @@ enum ctype {
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
+ CT_WRITE_RO_AFTER_INIT,
CT_WRITE_KERN,
+ CT_WRAP_ATOMIC
};
static char* cp_name[] = {
@@ -129,6 +134,9 @@ static char* cp_type[] = {
"UNALIGNED_LOAD_STORE_WRITE",
"OVERWRITE_ALLOCATION",
"WRITE_AFTER_FREE",
+ "READ_AFTER_FREE",
+ "WRITE_BUDDY_AFTER_FREE",
+ "READ_BUDDY_AFTER_FREE",
"SOFTLOCKUP",
"HARDLOCKUP",
"SPINLOCKUP",
@@ -140,7 +148,9 @@ static char* cp_type[] = {
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
+ "WRITE_RO_AFTER_INIT",
"WRITE_KERN",
+ "WRAP_ATOMIC"
};
static struct jprobe lkdtm;
@@ -162,6 +172,7 @@ static DEFINE_SPINLOCK(lock_me_up);
static u8 data_area[EXEC_SIZE];
static const unsigned long rodata = 0xAA55AA55;
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
@@ -335,7 +346,7 @@ static noinline void corrupt_stack(void)
memset((void *)data, 0, 64);
}
-static void execute_location(void *dst)
+static void noinline execute_location(void *dst)
{
void (*func)(void) = dst;
@@ -409,12 +420,114 @@ static void lkdtm_do_action(enum ctype which)
break;
}
case CT_WRITE_AFTER_FREE: {
+ int *base, *again;
size_t len = 1024;
- u32 *data = kmalloc(len, GFP_KERNEL);
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
- kfree(data);
+ break;
+ }
+ case CT_READ_AFTER_FREE: {
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ break;
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ kfree(base);
+ break;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+ break;
+ }
+ case CT_WRITE_BUDDY_AFTER_FREE: {
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p)
+ break;
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
schedule();
- memset(data, 0x78, len);
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+
+ break;
+ }
+ case CT_READ_BUDDY_AFTER_FREE: {
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p)
+ break;
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ free_page(p);
+ break;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
break;
}
case CT_SOFTLOCKUP:
@@ -503,11 +616,28 @@ static void lkdtm_do_action(enum ctype which)
break;
}
case CT_WRITE_RO: {
- unsigned long *ptr;
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
- ptr = (unsigned long *)&rodata;
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
- pr_info("attempting bad write at %p\n", ptr);
+ break;
+ }
+ case CT_WRITE_RO_AFTER_INIT: {
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ break;
+ }
+
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
@@ -528,6 +658,17 @@ static void lkdtm_do_action(enum ctype which)
do_overwritten();
break;
}
+ case CT_WRAP_ATOMIC: {
+ atomic_t under = ATOMIC_INIT(INT_MIN);
+ atomic_t over = ATOMIC_INIT(INT_MAX);
+
+ pr_info("attempting atomic underflow\n");
+ atomic_dec(&under);
+ pr_info("attempting atomic overflow\n");
+ atomic_inc(&over);
+
+ return;
+ }
case CT_NONE:
default:
break;
@@ -817,6 +958,9 @@ static int __init lkdtm_module_init(void)
int n_debugfs_entries = 1; /* Assume only the direct entry */
int i;
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index d23384dde73b..c49e1d2269af 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,6 +1,6 @@
config INTEL_MEI
tristate "Intel Management Engine Interface"
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
The Intel Management Engine (Intel ME) provides Manageability,
Security and Media services for system containing Intel chipsets.
@@ -12,7 +12,7 @@ config INTEL_MEI
config INTEL_MEI_ME
tristate "ME Enabled Intel Chipsets"
select INTEL_MEI
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
MEI support for ME Enabled Intel chipsets.
@@ -37,7 +37,7 @@ config INTEL_MEI_ME
config INTEL_MEI_TXE
tristate "Intel Trusted Execution Environment with ME Interface"
select INTEL_MEI
- depends on X86 && PCI && WATCHDOG_CORE
+ depends on X86 && PCI
help
MEI Support for Trusted Execution Environment device on Intel SoCs
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 01447ca21c26..59e6b0aede34 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -9,7 +9,6 @@ mei-objs += interrupt.o
mei-objs += client.o
mei-objs += main.o
mei-objs += amthif.o
-mei-objs += wd.o
mei-objs += bus.o
mei-objs += bus-fixup.o
mei-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index cd0403f09267..194360a5f782 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -50,7 +50,6 @@ void mei_amthif_reset_params(struct mei_device *dev)
dev->iamthif_current_cb = NULL;
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
dev->iamthif_stall_timer = 0;
dev->iamthif_open_count = 0;
}
@@ -68,11 +67,14 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
struct mei_cl *cl = &dev->iamthif_cl;
int ret;
+ if (mei_cl_is_connected(cl))
+ return 0;
+
dev->iamthif_state = MEI_IAMTHIF_IDLE;
mei_cl_init(cl, dev);
- ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+ ret = mei_cl_link(cl);
if (ret < 0) {
dev_err(dev->dev, "amthif: failed cl_link %d\n", ret);
return ret;
@@ -80,32 +82,10 @@ int mei_amthif_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
ret = mei_cl_connect(cl, me_cl, NULL);
- dev->iamthif_state = MEI_IAMTHIF_IDLE;
-
return ret;
}
/**
- * mei_amthif_find_read_list_entry - finds a amthilist entry for current file
- *
- * @dev: the device structure
- * @file: pointer to file object
- *
- * Return: returned a list entry on success, NULL on failure.
- */
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
- struct file *file)
-{
- struct mei_cl_cb *cb;
-
- list_for_each_entry(cb, &dev->amthif_rd_complete_list.list, list)
- if (cb->file_object == file)
- return cb;
- return NULL;
-}
-
-
-/**
* mei_amthif_read - read data from AMTHIF client
*
* @dev: the device structure
@@ -126,18 +106,11 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb;
- unsigned long timeout;
int rets;
int wait_ret;
- /* Only possible if we are in timeout */
- if (!cl) {
- dev_err(dev->dev, "bad file ext.\n");
- return -ETIME;
- }
-
dev_dbg(dev->dev, "checking amthif data\n");
- cb = mei_amthif_find_read_list_entry(dev, file);
+ cb = mei_cl_read_cb(cl, file);
/* Check for if we can block or not*/
if (cb == NULL && file->f_flags & O_NONBLOCK)
@@ -149,8 +122,9 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
/* unlock the Mutex */
mutex_unlock(&dev->device_lock);
- wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
- (cb = mei_amthif_find_read_list_entry(dev, file)));
+ wait_ret = wait_event_interruptible(cl->rx_wait,
+ !list_empty(&cl->rd_completed) ||
+ !mei_cl_is_connected(cl));
/* Locking again the Mutex */
mutex_lock(&dev->device_lock);
@@ -158,7 +132,12 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
if (wait_ret)
return -ERESTARTSYS;
- dev_dbg(dev->dev, "woke up from sleep\n");
+ if (!mei_cl_is_connected(cl)) {
+ rets = -EBUSY;
+ goto out;
+ }
+
+ cb = mei_cl_read_cb(cl, file);
}
if (cb->status) {
@@ -168,24 +147,10 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
}
dev_dbg(dev->dev, "Got amthif data\n");
- dev->iamthif_timer = 0;
-
- timeout = cb->read_time +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
- dev_dbg(dev->dev, "amthif timeout = %lud\n",
- timeout);
-
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev->dev, "amthif Time out\n");
- /* 15 sec for the message has expired */
- list_del_init(&cb->list);
- rets = -ETIME;
- goto free;
- }
/* if the whole message will fit remove it from the list */
if (cb->buf_idx >= *offset && length >= (cb->buf_idx - *offset))
list_del_init(&cb->list);
- else if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
+ else if (cb->buf_idx <= *offset) {
/* end of the message has been reached */
list_del_init(&cb->list);
rets = 0;
@@ -195,9 +160,8 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
* remove message from deletion list
*/
- dev_dbg(dev->dev, "amthif cb->buf size - %d\n",
- cb->buf.size);
- dev_dbg(dev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
+ dev_dbg(dev->dev, "amthif cb->buf.size - %zu cb->buf_idx - %zu\n",
+ cb->buf.size, cb->buf_idx);
/* length is being truncated to PAGE_SIZE, however,
* the buf_idx may point beyond */
@@ -229,7 +193,7 @@ out:
*
* Return: 0 on success, <0 on failure.
*/
-static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
+static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
@@ -248,7 +212,7 @@ static int mei_amthif_read_start(struct mei_cl *cl, struct file *file)
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
dev->iamthif_state = MEI_IAMTHIF_READING;
- dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_fp = cb->fp;
dev->iamthif_current_cb = cb;
return 0;
@@ -277,7 +241,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
dev->iamthif_state = MEI_IAMTHIF_WRITING;
dev->iamthif_current_cb = cb;
- dev->iamthif_file_object = cb->file_object;
+ dev->iamthif_fp = cb->fp;
dev->iamthif_canceled = false;
ret = mei_cl_write(cl, cb, false);
@@ -285,7 +249,7 @@ static int mei_amthif_send_cmd(struct mei_cl *cl, struct mei_cl_cb *cb)
return ret;
if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->file_object);
+ cb->status = mei_amthif_read_start(cl, cb->fp);
return 0;
}
@@ -304,8 +268,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
- dev->iamthif_file_object = NULL;
+ dev->iamthif_fp = NULL;
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
@@ -329,17 +292,17 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
{
- struct mei_device *dev;
-
- if (WARN_ON(!cl || !cl->dev))
- return -ENODEV;
+ struct mei_device *dev = cl->dev;
- if (WARN_ON(!cb))
- return -EINVAL;
+ list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
- dev = cl->dev;
+ /*
+ * The previous request is still in processing, queue this one.
+ */
+ if (dev->iamthif_state > MEI_IAMTHIF_IDLE &&
+ dev->iamthif_state < MEI_IAMTHIF_READ_COMPLETE)
+ return 0;
- list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
return mei_amthif_run_next_cmd(dev);
}
@@ -360,10 +323,10 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
{
unsigned int mask = 0;
- poll_wait(file, &dev->iamthif_cl.wait, wait);
+ poll_wait(file, &dev->iamthif_cl.rx_wait, wait);
if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
+ dev->iamthif_fp == file) {
mask |= POLLIN | POLLRDNORM;
mei_amthif_run_next_cmd(dev);
@@ -393,7 +356,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
if (cb->completed)
- cb->status = mei_amthif_read_start(cl, cb->file_object);
+ cb->status = mei_amthif_read_start(cl, cb->fp);
return 0;
}
@@ -437,11 +400,12 @@ int mei_amthif_irq_read_msg(struct mei_cl *cl,
/**
* mei_amthif_complete - complete amthif callback.
*
- * @dev: the device structure.
+ * @cl: host client
* @cb: callback block.
*/
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{
+ struct mei_device *dev = cl->dev;
if (cb->fop_type == MEI_FOP_WRITE) {
if (!cb->status) {
@@ -453,25 +417,22 @@ void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb)
* in case of error enqueue the write cb to complete read list
* so it can be propagated to the reader
*/
- list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
- wake_up_interruptible(&dev->iamthif_cl.wait);
+ list_add_tail(&cb->list, &cl->rd_completed);
+ wake_up_interruptible(&cl->rx_wait);
return;
}
if (!dev->iamthif_canceled) {
dev->iamthif_state = MEI_IAMTHIF_READ_COMPLETE;
dev->iamthif_stall_timer = 0;
- list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
+ list_add_tail(&cb->list, &cl->rd_completed);
dev_dbg(dev->dev, "amthif read completed\n");
- dev->iamthif_timer = jiffies;
- dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
} else {
mei_amthif_run_next_cmd(dev);
}
dev_dbg(dev->dev, "completing amthif call back.\n");
- wake_up_interruptible(&dev->iamthif_cl.wait);
+ wake_up_interruptible(&cl->rx_wait);
}
/**
@@ -497,7 +458,7 @@ static bool mei_clear_list(struct mei_device *dev,
/* list all list member */
list_for_each_entry_safe(cb, next, mei_cb_list, list) {
/* check if list member associated with a file */
- if (file == cb->file_object) {
+ if (file == cb->fp) {
/* check if cb equal to current iamthif cb */
if (dev->iamthif_current_cb == cb) {
dev->iamthif_current_cb = NULL;
@@ -523,13 +484,14 @@ static bool mei_clear_list(struct mei_device *dev,
*
* Return: true if callback removed from the list, false otherwise
*/
-static bool mei_clear_lists(struct mei_device *dev, struct file *file)
+static bool mei_clear_lists(struct mei_device *dev, const struct file *file)
{
bool removed = false;
+ struct mei_cl *cl = &dev->iamthif_cl;
/* remove callbacks associated with a file */
mei_clear_list(dev, file, &dev->amthif_cmd_list.list);
- if (mei_clear_list(dev, file, &dev->amthif_rd_complete_list.list))
+ if (mei_clear_list(dev, file, &cl->rd_completed))
removed = true;
mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
@@ -546,7 +508,7 @@ static bool mei_clear_lists(struct mei_device *dev, struct file *file)
/* check if iamthif_current_cb not NULL */
if (dev->iamthif_current_cb && !removed) {
/* check file and iamthif current cb association */
- if (dev->iamthif_current_cb->file_object == file) {
+ if (dev->iamthif_current_cb->fp == file) {
/* remove cb */
mei_io_cb_free(dev->iamthif_current_cb);
dev->iamthif_current_cb = NULL;
@@ -569,7 +531,7 @@ int mei_amthif_release(struct mei_device *dev, struct file *file)
if (dev->iamthif_open_count > 0)
dev->iamthif_open_count--;
- if (dev->iamthif_file_object == file &&
+ if (dev->iamthif_fp == file &&
dev->iamthif_state != MEI_IAMTHIF_IDLE) {
dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 020de5919c21..e9e6ea3ab73c 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -35,6 +35,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+ 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
#define MEI_UUID_ANY NULL_UUID_LE
/**
@@ -48,8 +51,7 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
*/
static void number_of_connections(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
- __func__, mei_me_cl_uuid(cldev->me_cl));
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
if (cldev->me_cl->props.max_number_of_connections > 1)
cldev->do_match = 0;
@@ -62,11 +64,36 @@ static void number_of_connections(struct mei_cl_device *cldev)
*/
static void blacklist(struct mei_cl_device *cldev)
{
- dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
- __func__, mei_me_cl_uuid(cldev->me_cl));
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+
cldev->do_match = 0;
}
+/**
+ * mei_wd - wd client on the bus, change protocol version
+ * as the API has changed.
+ *
+ * @cldev: me clients device
+ */
+#if IS_ENABLED(CONFIG_INTEL_MEI_ME)
+#include <linux/pci.h>
+#include "hw-me-regs.h"
+static void mei_wd(struct mei_cl_device *cldev)
+{
+ struct pci_dev *pdev = to_pci_dev(cldev->dev.parent);
+
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
+ if (pdev->device == MEI_DEV_ID_WPT_LP ||
+ pdev->device == MEI_DEV_ID_SPT ||
+ pdev->device == MEI_DEV_ID_SPT_H)
+ cldev->me_cl->props.protocol_version = 0x2;
+
+ cldev->do_match = 1;
+}
+#else
+static inline void mei_wd(struct mei_cl_device *cldev) {}
+#endif /* CONFIG_INTEL_MEI_ME */
+
struct mei_nfc_cmd {
u8 command;
u8 status;
@@ -208,12 +235,11 @@ static void mei_nfc(struct mei_cl_device *cldev)
bus = cldev->bus;
- dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
- __func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+ dev_dbg(&cldev->dev, "running hook %s\n", __func__);
mutex_lock(&bus->device_lock);
/* we need to connect to INFO GUID */
- cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(bus);
if (IS_ERR(cl)) {
ret = PTR_ERR(cl);
cl = NULL;
@@ -282,6 +308,7 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+ MEI_FIXUP(MEI_UUID_WD, mei_wd),
};
/**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 0b05aa938799..5d5996e39a67 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -44,7 +44,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking)
{
struct mei_device *bus;
- struct mei_cl_cb *cb = NULL;
+ struct mei_cl_cb *cb;
ssize_t rets;
if (WARN_ON(!cl || !cl->dev))
@@ -53,6 +53,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
+
if (!mei_cl_is_connected(cl)) {
rets = -ENODEV;
goto out;
@@ -81,8 +86,6 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
out:
mutex_unlock(&bus->device_lock);
- if (rets < 0)
- mei_io_cb_free(cb);
return rets;
}
@@ -109,6 +112,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
bus = cl->dev;
mutex_lock(&bus->device_lock);
+ if (bus->dev_state != MEI_DEV_ENABLED) {
+ rets = -ENODEV;
+ goto out;
+ }
cb = mei_cl_read_cb(cl, NULL);
if (cb)
@@ -230,45 +237,55 @@ static void mei_cl_bus_event_work(struct work_struct *work)
* mei_cl_bus_notify_event - schedule notify cb on bus client
*
* @cl: host client
+ *
+ * Return: true if event was scheduled
+ * false if the client is not waiting for event
*/
-void mei_cl_bus_notify_event(struct mei_cl *cl)
+bool mei_cl_bus_notify_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->event_cb)
- return;
+ return false;
if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
- return;
+ return false;
if (!cl->notify_ev)
- return;
+ return false;
set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
schedule_work(&cldev->event_work);
cl->notify_ev = false;
+
+ return true;
}
/**
- * mei_cl_bus_rx_event - schedule rx evenet
+ * mei_cl_bus_rx_event - schedule rx event
*
* @cl: host client
+ *
+ * Return: true if event was scheduled
+ * false if the client is not waiting for event
*/
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+bool mei_cl_bus_rx_event(struct mei_cl *cl)
{
struct mei_cl_device *cldev = cl->cldev;
if (!cldev || !cldev->event_cb)
- return;
+ return false;
if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
- return;
+ return false;
set_bit(MEI_CL_EVENT_RX, &cldev->events);
schedule_work(&cldev->event_work);
+
+ return true;
}
/**
@@ -398,7 +415,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
if (!cl) {
mutex_lock(&bus->device_lock);
- cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(bus);
mutex_unlock(&bus->device_lock);
if (IS_ERR(cl))
return PTR_ERR(cl);
@@ -958,6 +975,22 @@ void mei_cl_bus_rescan(struct mei_device *bus)
dev_dbg(bus->dev, "rescan end");
}
+void mei_cl_bus_rescan_work(struct work_struct *work)
+{
+ struct mei_device *bus =
+ container_of(work, struct mei_device, bus_rescan_work);
+ struct mei_me_client *me_cl;
+
+ mutex_lock(&bus->device_lock);
+ me_cl = mei_me_cl_by_uuid(bus, &mei_amthif_guid);
+ if (me_cl)
+ mei_amthif_host_init(bus, me_cl);
+ mei_me_cl_put(me_cl);
+ mutex_unlock(&bus->device_lock);
+
+ mei_cl_bus_rescan(bus);
+}
+
int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
struct module *owner)
{
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index a6c87c713193..bab17e4197b6 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -359,7 +359,7 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
* Return: mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- struct file *fp)
+ const struct file *fp)
{
struct mei_cl_cb *cb;
@@ -368,7 +368,7 @@ struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
return NULL;
INIT_LIST_HEAD(&cb->list);
- cb->file_object = fp;
+ cb->fp = fp;
cb->cl = cl;
cb->buf_idx = 0;
cb->fop_type = type;
@@ -455,7 +455,8 @@ int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length)
* Return: cb on success and NULL on failure
*/
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
- enum mei_cb_file_ops type, struct file *fp)
+ enum mei_cb_file_ops type,
+ const struct file *fp)
{
struct mei_cl_cb *cb;
@@ -485,7 +486,7 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
struct mei_cl_cb *cb;
list_for_each_entry(cb, &cl->rd_completed, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
return cb;
return NULL;
@@ -503,12 +504,12 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
struct mei_cl_cb *cb, *next;
list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
- if (!fp || fp == cb->file_object)
+ if (!fp || fp == cb->fp)
mei_io_cb_free(cb);
}
@@ -535,7 +536,6 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
- mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
mei_cl_read_cb_flush(cl, fp);
@@ -587,27 +587,23 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
* mei_cl_link - allocate host id in the host map
*
* @cl: host client
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
*
* Return: 0 on success
* -EINVAL on incorrect values
* -EMFILE if open count exceeded.
*/
-int mei_cl_link(struct mei_cl *cl, int id)
+int mei_cl_link(struct mei_cl *cl)
{
struct mei_device *dev;
long open_handle_count;
+ int id;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
- /* If Id is not assigned get one*/
- if (id == MEI_HOST_CLIENT_ID_ANY)
- id = find_first_zero_bit(dev->host_clients_map,
- MEI_CLIENTS_MAX);
-
+ id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
@@ -648,7 +644,7 @@ int mei_cl_unlink(struct mei_cl *cl)
if (!cl)
return 0;
- /* wd and amthif might not be initialized */
+ /* amthif might not be initialized */
if (!cl->dev)
return 0;
@@ -670,31 +666,12 @@ int mei_cl_unlink(struct mei_cl *cl)
return 0;
}
-
-void mei_host_client_init(struct work_struct *work)
+void mei_host_client_init(struct mei_device *dev)
{
- struct mei_device *dev =
- container_of(work, struct mei_device, init_work);
- struct mei_me_client *me_cl;
-
- mutex_lock(&dev->device_lock);
-
-
- me_cl = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
- if (me_cl)
- mei_amthif_host_init(dev, me_cl);
- mei_me_cl_put(me_cl);
-
- me_cl = mei_me_cl_by_uuid(dev, &mei_wd_guid);
- if (me_cl)
- mei_wd_host_init(dev, me_cl);
- mei_me_cl_put(me_cl);
-
dev->dev_state = MEI_DEV_ENABLED;
dev->reset_count = 0;
- mutex_unlock(&dev->device_lock);
- mei_cl_bus_rescan(dev);
+ schedule_work(&dev->bus_rescan_work);
pm_runtime_mark_last_busy(dev->dev);
dev_dbg(dev->dev, "rpm: autosuspend\n");
@@ -726,6 +703,33 @@ bool mei_hbuf_acquire(struct mei_device *dev)
}
/**
+ * mei_cl_wake_all - wake up readers, writers and event waiters so
+ * they can be interrupted
+ *
+ * @cl: host client
+ */
+static void mei_cl_wake_all(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->rx_wait)) {
+ cl_dbg(dev, cl, "Waking up reading client!\n");
+ wake_up_interruptible(&cl->rx_wait);
+ }
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->tx_wait)) {
+ cl_dbg(dev, cl, "Waking up writing client!\n");
+ wake_up_interruptible(&cl->tx_wait);
+ }
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->ev_wait)) {
+ cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+ wake_up_interruptible(&cl->ev_wait);
+ }
+}
+
+/**
* mei_cl_set_disconnected - set disconnected state and clear
* associated states and resources
*
@@ -740,8 +744,11 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
return;
cl->state = MEI_FILE_DISCONNECTED;
+ mei_io_list_free(&dev->write_list, cl);
+ mei_io_list_free(&dev->write_waiting_list, cl);
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
+ mei_cl_wake_all(cl);
cl->mei_flow_ctrl_creds = 0;
cl->timer_count = 0;
@@ -1034,7 +1041,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
- struct file *file)
+ const struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1119,11 +1126,10 @@ nortpm:
* mei_cl_alloc_linked - allocate and link host client
*
* @dev: the device structure
- * @id: fixed host id or MEI_HOST_CLIENT_ID_ANY (-1) for generic one
*
* Return: cl on success ERR_PTR on failure
*/
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
{
struct mei_cl *cl;
int ret;
@@ -1134,7 +1140,7 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id)
goto err;
}
- ret = mei_cl_link(cl, id);
+ ret = mei_cl_link(cl);
if (ret)
goto err;
@@ -1149,11 +1155,12 @@ err:
/**
* mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
*
- * @cl: private data of the file object
+ * @cl: host client
+ * @fp: the file pointer associated with the pointer
*
* Return: 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
*/
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_creds(struct mei_cl *cl, const struct file *fp)
{
int rets;
@@ -1164,7 +1171,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
return 1;
if (mei_cl_is_fixed_address(cl)) {
- rets = mei_cl_read_start(cl, mei_cl_mtu(cl), NULL);
+ rets = mei_cl_read_start(cl, mei_cl_mtu(cl), fp);
if (rets && rets != -EBUSY)
return rets;
return 1;
@@ -1186,7 +1193,7 @@ int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
* 0 on success
* -EINVAL when ctrl credits are <= 0
*/
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+static int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->me_cl))
return -EINVAL;
@@ -1283,7 +1290,8 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* Return: 0 on such and error otherwise.
*/
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+int mei_cl_notify_request(struct mei_cl *cl,
+ const struct file *file, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1368,12 +1376,12 @@ void mei_cl_notify(struct mei_cl *cl)
cl_dbg(dev, cl, "notify event");
cl->notify_ev = true;
- wake_up_interruptible_all(&cl->ev_wait);
+ if (!mei_cl_bus_notify_event(cl))
+ wake_up_interruptible(&cl->ev_wait);
if (cl->ev_async)
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
- mei_cl_bus_notify_event(cl);
}
/**
@@ -1422,6 +1430,25 @@ out:
}
/**
+ * mei_cl_is_read_fc_cb - check if read cb is waiting for flow control
+ * for given host client
+ *
+ * @cl: host client
+ *
+ * Return: true, if found at least one cb.
+ */
+static bool mei_cl_is_read_fc_cb(struct mei_cl *cl)
+{
+ struct mei_device *dev = cl->dev;
+ struct mei_cl_cb *cb;
+
+ list_for_each_entry(cb, &dev->ctrl_wr_list.list, list)
+ if (cb->fop_type == MEI_FOP_READ && cb->cl == cl)
+ return true;
+ return false;
+}
+
+/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
@@ -1430,7 +1457,7 @@ out:
*
* Return: 0 on success, <0 on failure.
*/
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@@ -1445,7 +1472,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp)
return -ENODEV;
/* HW currently supports only one pending read */
- if (!list_empty(&cl->rd_pending))
+ if (!list_empty(&cl->rd_pending) || mei_cl_is_read_fc_cb(cl))
return -EBUSY;
if (!mei_me_cl_is_active(cl->me_cl)) {
@@ -1524,7 +1551,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
first_chunk = cb->buf_idx == 0;
- rets = first_chunk ? mei_cl_flow_ctrl_creds(cl) : 1;
+ rets = first_chunk ? mei_cl_flow_ctrl_creds(cl, cb->fp) : 1;
if (rets < 0)
return rets;
@@ -1556,7 +1583,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
- cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
+ cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
cb->buf.size, cb->buf_idx);
rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
@@ -1618,7 +1645,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
- return rets;
+ goto free;
}
cb->buf_idx = 0;
@@ -1630,7 +1657,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
mei_hdr.msg_complete = 0;
mei_hdr.internal = cb->internal;
- rets = mei_cl_flow_ctrl_creds(cl);
+ rets = mei_cl_flow_ctrl_creds(cl, cb->fp);
if (rets < 0)
goto err;
@@ -1677,7 +1704,8 @@ out:
mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->tx_wait,
- cl->writing_state == MEI_WRITE_COMPLETE);
+ cl->writing_state == MEI_WRITE_COMPLETE ||
+ (!mei_cl_is_connected(cl)));
mutex_lock(&dev->device_lock);
/* wait_event_interruptible returns -ERESTARTSYS */
if (rets) {
@@ -1685,6 +1713,10 @@ out:
rets = -EINTR;
goto err;
}
+ if (cl->writing_state != MEI_WRITE_COMPLETE) {
+ rets = -EFAULT;
+ goto err;
+ }
}
rets = size;
@@ -1692,6 +1724,8 @@ err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
+free:
+ mei_io_cb_free(cb);
return rets;
}
@@ -1721,10 +1755,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
case MEI_FOP_READ:
list_add_tail(&cb->list, &cl->rd_completed);
- if (waitqueue_active(&cl->rx_wait))
- wake_up_interruptible_all(&cl->rx_wait);
- else
- mei_cl_bus_rx_event(cl);
+ if (!mei_cl_bus_rx_event(cl))
+ wake_up_interruptible(&cl->rx_wait);
break;
case MEI_FOP_CONNECT:
@@ -1753,44 +1785,3 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry(cl, &dev->file_list, link)
mei_cl_set_disconnected(cl);
}
-
-
-/**
- * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
- *
- * @dev: mei device
- */
-void mei_cl_all_wakeup(struct mei_device *dev)
-{
- struct mei_cl *cl;
-
- list_for_each_entry(cl, &dev->file_list, link) {
- if (waitqueue_active(&cl->rx_wait)) {
- cl_dbg(dev, cl, "Waking up reading client!\n");
- wake_up_interruptible(&cl->rx_wait);
- }
- if (waitqueue_active(&cl->tx_wait)) {
- cl_dbg(dev, cl, "Waking up writing client!\n");
- wake_up_interruptible(&cl->tx_wait);
- }
-
- /* synchronized under device mutex */
- if (waitqueue_active(&cl->ev_wait)) {
- cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
- wake_up_interruptible(&cl->ev_wait);
- }
- }
-}
-
-/**
- * mei_cl_all_write_clear - clear all pending writes
- *
- * @dev: mei device
- */
-void mei_cl_all_write_clear(struct mei_device *dev)
-{
- mei_io_list_free(&dev->write_list, NULL);
- mei_io_list_free(&dev->write_waiting_list, NULL);
-}
-
-
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 04e1aa39243f..0d7a3a1fef78 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -18,7 +18,6 @@
#define _MEI_CLIENT_H_
#include <linux/types.h>
-#include <linux/watchdog.h>
#include <linux/poll.h>
#include <linux/mei.h>
@@ -84,7 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
* MEI IO Functions
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
- struct file *fp);
+ const struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
int mei_io_cb_alloc_buf(struct mei_cl_cb *cb, size_t length);
@@ -108,21 +107,19 @@ struct mei_cl *mei_cl_allocate(struct mei_device *dev);
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_link(struct mei_cl *cl);
int mei_cl_unlink(struct mei_cl *cl);
-struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev, int id);
+struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
const struct file *fp);
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
- enum mei_cb_file_ops type, struct file *fp);
+ enum mei_cb_file_ops type,
+ const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
-int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
-
-int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
@@ -217,10 +214,10 @@ void mei_cl_set_disconnected(struct mei_cl *cl);
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
- struct file *file);
+ const struct file *file);
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
-int mei_cl_read_start(struct mei_cl *cl, size_t length, struct file *fp);
+int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
struct mei_cl_cb *cmpl_list);
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
@@ -229,19 +226,18 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
-void mei_host_client_init(struct work_struct *work);
+void mei_host_client_init(struct mei_device *dev);
u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
-int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_notify_request(struct mei_cl *cl,
+ const struct file *file, u8 request);
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
void mei_cl_notify(struct mei_cl *cl);
void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_wakeup(struct mei_device *dev);
-void mei_cl_all_write_clear(struct mei_device *dev);
#define MEI_CL_FMT "cl:host=%02d me=%02d "
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
@@ -249,6 +245,9 @@ void mei_cl_all_write_clear(struct mei_device *dev);
#define cl_dbg(dev, cl, format, arg...) \
dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+#define cl_warn(dev, cl, format, arg...) \
+ dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+
#define cl_err(dev, cl, format, arg...) \
dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index a138d8a27ab5..c6c051b52f55 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -50,6 +50,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
}
pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -90,23 +91,37 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
{
struct mei_device *dev = fp->private_data;
struct mei_cl *cl;
- const size_t bufsz = 1024;
+ size_t bufsz = 1;
char *buf;
int i = 0;
int pos = 0;
int ret;
+#define HDR " |me|host|state|rd|wr|\n"
+
if (!dev)
return -ENODEV;
+ mutex_lock(&dev->device_lock);
+
+ /*
+ * if the driver is not enabled the list won't be consistent,
+ * we output empty table
+ */
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ list_for_each_entry(cl, &dev->file_list, link)
+ bufsz++;
+
+ bufsz *= sizeof(HDR) + 1;
+
buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
+ if (!buf) {
+ mutex_unlock(&dev->device_lock);
return -ENOMEM;
+ }
- pos += scnprintf(buf + pos, bufsz - pos,
- " |me|host|state|rd|wr|\n");
-
- mutex_lock(&dev->device_lock);
+ pos += scnprintf(buf + pos, bufsz - pos, HDR);
+#undef HDR
/* if the driver is not enabled the list won't be consistent */
if (dev->dev_state != MEI_DEV_ENABLED)
@@ -115,7 +130,7 @@ static ssize_t mei_dbgfs_read_active(struct file *fp, char __user *ubuf,
list_for_each_entry(cl, &dev->file_list, link) {
pos += scnprintf(buf + pos, bufsz - pos,
- "%2d|%2d|%4d|%5d|%2d|%2d|\n",
+ "%3d|%2d|%4d|%5d|%2d|%2d|\n",
i, mei_cl_me_id(cl), cl->host_client_id, cl->state,
!list_empty(&cl->rd_completed), cl->writing_state);
i++;
@@ -150,16 +165,21 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
pos += scnprintf(buf + pos, bufsz - pos, "hbm: %s\n",
mei_hbm_state_str(dev->hbm_state));
- if (dev->hbm_state == MEI_HBM_STARTED) {
+ if (dev->hbm_state >= MEI_HBM_ENUM_CLIENTS &&
+ dev->hbm_state <= MEI_HBM_STARTED) {
pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
dev->hbm_f_pg_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
dev->hbm_f_dc_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tIE: %01d\n",
+ dev->hbm_f_ie_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
dev->hbm_f_dot_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
dev->hbm_f_ev_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
+ dev->hbm_f_fa_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
@@ -175,6 +195,30 @@ static const struct file_operations mei_dbgfs_fops_devstate = {
.llseek = generic_file_llseek,
};
+static ssize_t mei_dbgfs_write_allow_fa(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_device *dev;
+ int ret;
+
+ dev = container_of(file->private_data,
+ struct mei_device, allow_fixed_address);
+
+ ret = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (ret < 0)
+ return ret;
+ dev->override_fixed_address = true;
+ return ret;
+}
+
+static const struct file_operations mei_dbgfs_fops_allow_fa = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = mei_dbgfs_write_allow_fa,
+ .llseek = generic_file_llseek,
+};
+
/**
* mei_dbgfs_deregister - Remove the debugfs files and directories
*
@@ -224,8 +268,9 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(dev->dev, "devstate: registration failed\n");
goto err;
}
- f = debugfs_create_bool("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
- &dev->allow_fixed_address);
+ f = debugfs_create_file("allow_fixed_address", S_IRUSR | S_IWUSR, dir,
+ &dev->allow_fixed_address,
+ &mei_dbgfs_fops_allow_fa);
if (!f) {
dev_err(dev->dev, "allow_fixed_address: registration failed\n");
goto err;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e7b7aad0999b..5e305d2605f3 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -301,7 +301,10 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
memset(enum_req, 0, len);
enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
- enum_req->allow_add = dev->hbm_f_dc_supported;
+ enum_req->flags |= dev->hbm_f_dc_supported ?
+ MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+ enum_req->flags |= dev->hbm_f_ie_supported ?
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
if (ret) {
@@ -401,6 +404,9 @@ static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
if (ret)
status = !MEI_HBMS_SUCCESS;
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ schedule_work(&dev->bus_rescan_work);
+
return mei_hbm_add_cl_resp(dev, req->me_addr, status);
}
@@ -543,7 +549,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
/* We got all client properties */
if (next_client_index == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
- schedule_work(&dev->init_work);
+ mei_host_client_init(dev);
return 0;
}
@@ -789,8 +795,11 @@ static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
cl->state = MEI_FILE_CONNECTED;
else {
cl->state = MEI_FILE_DISCONNECT_REPLY;
- if (rs->status == MEI_CL_CONN_NOT_FOUND)
+ if (rs->status == MEI_CL_CONN_NOT_FOUND) {
mei_me_cl_del(dev, cl->me_cl);
+ if (dev->dev_state == MEI_DEV_ENABLED)
+ schedule_work(&dev->bus_rescan_work);
+ }
}
cl->status = mei_cl_conn_status_to_errno(rs->status);
}
@@ -866,7 +875,7 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl = mei_hbm_cl_find_by_cmd(dev, disconnect_req);
if (cl) {
- cl_dbg(dev, cl, "fw disconnect request received\n");
+ cl_warn(dev, cl, "fw disconnect request received\n");
cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
@@ -972,6 +981,9 @@ static void mei_hbm_config_features(struct mei_device *dev)
if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
dev->hbm_f_dc_supported = 1;
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_IE)
+ dev->hbm_f_ie_supported = 1;
+
/* disconnect on connect timeout instead of link reset */
if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
dev->hbm_f_dot_supported = 1;
@@ -979,6 +991,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* Notification Event Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
dev->hbm_f_ev_supported = 1;
+
+ /* Fixed Address Client Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
+ dev->hbm_f_fa_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a8a68acd3267..0dcb854b4bfc 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -121,6 +121,10 @@
#define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */
#define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
#define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
+
+#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
+#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 25b1997a62cb..e2fb44cc5c37 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -189,8 +189,11 @@ static int mei_me_fw_status(struct mei_device *dev,
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev,
- fw_src->status[i], &fw_status->status[i]);
+ ret = pci_read_config_dword(pdev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ fw_src->status[i],
+ fw_status->status[i]);
if (ret)
return ret;
}
@@ -215,6 +218,7 @@ static void mei_me_hw_config(struct mei_device *dev)
reg = 0;
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -1248,6 +1252,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
u32 reg;
pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_2", PCI_CFG_HFS_2, reg);
/* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
return (reg & 0x600) == 0x200;
}
@@ -1260,6 +1265,7 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
u32 reg;
/* Read ME FW Status check for SPS Firmware */
pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
/* if bits [19:16] = 15, running SPS Firmware */
return (reg & 0xf0000) == 0xf0000;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index bae680c648ff..4a6c1b85f11e 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -28,6 +28,9 @@
#include "client.h"
#include "hbm.h"
+#include "mei-trace.h"
+
+
/**
* mei_txe_reg_read - Reads 32bit data from the txe device
*
@@ -640,8 +643,11 @@ static int mei_txe_fw_status(struct mei_device *dev,
fw_status->count = fw_src->count;
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
- ret = pci_read_config_dword(pdev,
- fw_src->status[i], &fw_status->status[i]);
+ ret = pci_read_config_dword(pdev, fw_src->status[i],
+ &fw_status->status[i]);
+ trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ fw_src->status[i],
+ fw_status->status[i]);
if (ret)
return ret;
}
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 4cebde85924f..9daf3f9aed25 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -29,7 +29,6 @@
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
#define MEI_IAMTHIF_STALL_TIMER 12 /* HPS */
-#define MEI_IAMTHIF_READ_TIMER 10 /* HPS */
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
@@ -54,6 +53,12 @@
#define HBM_MAJOR_VERSION_DC 2
/*
+ * MEI version with immediate reply to enum request support
+ */
+#define HBM_MINOR_VERSION_IE 0
+#define HBM_MAJOR_VERSION_IE 2
+
+/*
* MEI version with disconnect on connection timeout support
*/
#define HBM_MINOR_VERSION_DOT 0
@@ -65,6 +70,12 @@
#define HBM_MINOR_VERSION_EV 0
#define HBM_MAJOR_VERSION_EV 2
+/*
+ * MEI version with fixed address client support
+ */
+#define HBM_MINOR_VERSION_FA 0
+#define HBM_MAJOR_VERSION_FA 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
@@ -241,15 +252,26 @@ struct hbm_me_stop_request {
} __packed;
/**
- * struct hbm_host_enum_request - enumeration request from host to fw
+ * enum hbm_host_enum_flags - enumeration request flags (HBM version >= 2.0)
*
- * @hbm_cmd: bus message command header
- * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @MEI_HBM_ENUM_F_ALLOW_ADD: allow dynamic clients add
+ * @MEI_HBM_ENUM_F_IMMEDIATE_ENUM: allow FW to send answer immediately
+ */
+enum hbm_host_enum_flags {
+ MEI_HBM_ENUM_F_ALLOW_ADD = BIT(0),
+ MEI_HBM_ENUM_F_IMMEDIATE_ENUM = BIT(1),
+};
+
+/**
+ * struct hbm_host_enum_request - enumeration request from host to fw
+ *
+ * @hbm_cmd : bus message command header
+ * @flags : request flags
* @reserved: reserved
*/
struct hbm_host_enum_request {
u8 hbm_cmd;
- u8 allow_add;
+ u8 flags;
u8 reserved[2];
} __packed;
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 3edafc8d3ad4..f7c8dfdb6a12 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -91,8 +91,8 @@ EXPORT_SYMBOL_GPL(mei_fw_status2str);
*/
void mei_cancel_work(struct mei_device *dev)
{
- cancel_work_sync(&dev->init_work);
cancel_work_sync(&dev->reset_work);
+ cancel_work_sync(&dev->bus_rescan_work);
cancel_delayed_work(&dev->timer_work);
}
@@ -148,16 +148,10 @@ int mei_reset(struct mei_device *dev)
state != MEI_DEV_POWER_UP) {
/* remove all waiting requests */
- mei_cl_all_write_clear(dev);
-
mei_cl_all_disconnect(dev);
- /* wake up all readers and writers so they can be interrupted */
- mei_cl_all_wakeup(dev);
-
/* remove entry if already in list */
- dev_dbg(dev->dev, "remove iamthif and wd from the file list.\n");
- mei_cl_unlink(&dev->wd_cl);
+ dev_dbg(dev->dev, "remove iamthif from the file list.\n");
mei_cl_unlink(&dev->iamthif_cl);
mei_amthif_reset_params(dev);
}
@@ -165,7 +159,6 @@ int mei_reset(struct mei_device *dev)
mei_hbm_reset(dev);
dev->rd_msg_hdr = 0;
- dev->wd_pending = false;
if (ret) {
dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
@@ -335,16 +328,12 @@ void mei_stop(struct mei_device *dev)
mutex_lock(&dev->device_lock);
- mei_wd_stop(dev);
-
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev);
/* move device to disabled state unconditionally */
dev->dev_state = MEI_DEV_DISABLED;
mutex_unlock(&dev->device_lock);
-
- mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);
@@ -394,7 +383,6 @@ void mei_device_init(struct mei_device *dev,
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
- init_waitqueue_head(&dev->wait_stop_wd);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
@@ -404,13 +392,11 @@ void mei_device_init(struct mei_device *dev,
mei_io_list_init(&dev->ctrl_rd_list);
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- INIT_WORK(&dev->init_work, mei_host_client_init);
INIT_WORK(&dev->reset_work, mei_reset_work);
+ INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
- INIT_LIST_HEAD(&dev->wd_cl.link);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
mei_io_list_init(&dev->amthif_cmd_list);
- mei_io_list_init(&dev->amthif_rd_complete_list);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 64b568a0268d..1e5cb1f704f8 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -48,7 +48,7 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
dev_dbg(dev->dev, "completing call back.\n");
if (cl == &dev->iamthif_cl)
- mei_amthif_complete(dev, cb);
+ mei_amthif_complete(cl, cb);
else
mei_cl_complete(cl, cb);
}
@@ -104,6 +104,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
unsigned char *buffer = NULL;
+ size_t buf_sz;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
if (!cb) {
@@ -124,11 +125,21 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
goto out;
}
- if (cb->buf.size < mei_hdr->length + cb->buf_idx) {
- cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n",
+ buf_sz = mei_hdr->length + cb->buf_idx;
+ /* catch for integer overflow */
+ if (buf_sz < cb->buf_idx) {
+ cl_err(dev, cl, "message is too big len %d idx %zu\n",
+ mei_hdr->length, cb->buf_idx);
+
+ list_move_tail(&cb->list, &complete_list->list);
+ cb->status = -EMSGSIZE;
+ goto out;
+ }
+
+ if (cb->buf.size < buf_sz) {
+ cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
cb->buf.size, mei_hdr->length, cb->buf_idx);
- buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx,
- GFP_KERNEL);
+ buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL);
if (!buffer) {
cb->status = -ENOMEM;
@@ -136,7 +147,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
goto out;
}
cb->buf.data = buffer;
- cb->buf.size = mei_hdr->length + cb->buf_idx;
+ cb->buf.size = buf_sz;
}
buffer = cb->buf.data + cb->buf_idx;
@@ -145,8 +156,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
cb->buf_idx += mei_hdr->length;
if (mei_hdr->msg_complete) {
- cb->read_time = jiffies;
- cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx);
+ cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
list_move_tail(&cb->list, &complete_list->list);
} else {
pm_runtime_mark_last_busy(dev->dev);
@@ -229,6 +239,16 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return 0;
}
+static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
+{
+ return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
+}
+
+static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
+{
+ return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
+}
+
/**
* mei_irq_read_handler - bottom half read routine after ISR to
* handle the read processing.
@@ -270,7 +290,7 @@ int mei_irq_read_handler(struct mei_device *dev,
}
/* HBM message */
- if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) {
+ if (hdr_is_hbm(mei_hdr)) {
ret = mei_hbm_dispatch(dev, mei_hdr);
if (ret) {
dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
@@ -290,6 +310,14 @@ int mei_irq_read_handler(struct mei_device *dev,
/* if no recipient cl was found we assume corrupted header */
if (&cl->link == &dev->file_list) {
+ /* A message for not connected fixed address clients
+ * should be silently discarded
+ */
+ if (hdr_is_fixed(mei_hdr)) {
+ mei_irq_discard_msg(dev, mei_hdr);
+ ret = 0;
+ goto reset_slots;
+ }
dev_err(dev->dev, "no destination client found 0x%08X\n",
dev->rd_msg_hdr);
ret = -EBADMSG;
@@ -360,21 +388,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
list_move_tail(&cb->list, &cmpl_list->list);
}
- if (dev->wd_state == MEI_WD_STOPPING) {
- dev->wd_state = MEI_WD_IDLE;
- wake_up(&dev->wait_stop_wd);
- }
-
- if (mei_cl_is_connected(&dev->wd_cl)) {
- if (dev->wd_pending &&
- mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
- ret = mei_wd_send(dev);
- if (ret)
- return ret;
- dev->wd_pending = false;
- }
- }
-
/* complete control write list CB */
dev_dbg(dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
@@ -462,7 +475,6 @@ static void mei_connect_timeout(struct mei_cl *cl)
*/
void mei_timer(struct work_struct *work)
{
- unsigned long timeout;
struct mei_cl *cl;
struct mei_device *dev = container_of(work,
@@ -508,45 +520,15 @@ void mei_timer(struct work_struct *work)
mei_reset(dev);
dev->iamthif_canceled = false;
dev->iamthif_state = MEI_IAMTHIF_IDLE;
- dev->iamthif_timer = 0;
mei_io_cb_free(dev->iamthif_current_cb);
dev->iamthif_current_cb = NULL;
- dev->iamthif_file_object = NULL;
+ dev->iamthif_fp = NULL;
mei_amthif_run_next_cmd(dev);
}
}
- if (dev->iamthif_timer) {
-
- timeout = dev->iamthif_timer +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
- dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n",
- dev->iamthif_timer);
- dev_dbg(dev->dev, "timeout = %ld\n", timeout);
- dev_dbg(dev->dev, "jiffies = %ld\n", jiffies);
- if (time_after(jiffies, timeout)) {
- /*
- * User didn't read the AMTHI data on time (15sec)
- * freeing AMTHI for other requests
- */
-
- dev_dbg(dev->dev, "freeing AMTHI for other requests\n");
-
- mei_io_list_flush(&dev->amthif_rd_complete_list,
- &dev->iamthif_cl);
- mei_io_cb_free(dev->iamthif_current_cb);
- dev->iamthif_current_cb = NULL;
-
- dev->iamthif_file_object->private_data = NULL;
- dev->iamthif_file_object = NULL;
- dev->iamthif_timer = 0;
- mei_amthif_run_next_cmd(dev);
-
- }
- }
out:
if (dev->dev_state != MEI_DEV_DISABLED)
schedule_delayed_work(&dev->timer_work, 2 * HZ);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 80f9afcb1382..52635b063873 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -65,7 +65,7 @@ static int mei_open(struct inode *inode, struct file *file)
goto err_unlock;
}
- cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
+ cl = mei_cl_alloc_linked(dev);
if (IS_ERR(cl)) {
err = PTR_ERR(cl);
goto err_unlock;
@@ -159,27 +159,22 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
goto out;
}
+ if (ubuf == NULL) {
+ rets = -EMSGSIZE;
+ goto out;
+ }
+
if (cl == &dev->iamthif_cl) {
rets = mei_amthif_read(dev, file, ubuf, length, offset);
goto out;
}
cb = mei_cl_read_cb(cl, file);
- if (cb) {
- /* read what left */
- if (cb->buf_idx > *offset)
- goto copy_buffer;
- /* offset is beyond buf_idx we have no more data return 0 */
- if (cb->buf_idx > 0 && cb->buf_idx <= *offset) {
- rets = 0;
- goto free;
- }
- /* Offset needs to be cleaned for contiguous reads*/
- if (cb->buf_idx == 0 && *offset > 0)
- *offset = 0;
- } else if (*offset > 0) {
+ if (cb)
+ goto copy_buffer;
+
+ if (*offset > 0)
*offset = 0;
- }
err = mei_cl_read_start(cl, length, file);
if (err && err != -EBUSY) {
@@ -214,11 +209,6 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
cb = mei_cl_read_cb(cl, file);
if (!cb) {
- if (mei_cl_is_fixed_address(cl) && dev->allow_fixed_address) {
- cb = mei_cl_read_cb(cl, NULL);
- if (cb)
- goto copy_buffer;
- }
rets = 0;
goto out;
}
@@ -231,10 +221,10 @@ copy_buffer:
goto free;
}
- cl_dbg(dev, cl, "buf.size = %d buf.idx = %ld\n",
- cb->buf.size, cb->buf_idx);
- if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
- rets = -EMSGSIZE;
+ cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
+ cb->buf.size, cb->buf_idx, *offset);
+ if (*offset >= cb->buf_idx) {
+ rets = 0;
goto free;
}
@@ -250,11 +240,13 @@ copy_buffer:
rets = length;
*offset += length;
- if ((unsigned long)*offset < cb->buf_idx)
+ /* not all data was read, keep the cb */
+ if (*offset < cb->buf_idx)
goto out;
free:
mei_io_cb_free(cb);
+ *offset = 0;
out:
cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
@@ -275,9 +267,8 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
- struct mei_cl_cb *write_cb = NULL;
+ struct mei_cl_cb *cb;
struct mei_device *dev;
- unsigned long timeout = 0;
int rets;
if (WARN_ON(!cl || !cl->dev))
@@ -313,52 +304,31 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out;
}
- if (cl == &dev->iamthif_cl) {
- write_cb = mei_amthif_find_read_list_entry(dev, file);
-
- if (write_cb) {
- timeout = write_cb->read_time +
- mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-
- if (time_after(jiffies, timeout)) {
- *offset = 0;
- mei_io_cb_free(write_cb);
- write_cb = NULL;
- }
- }
- }
-
*offset = 0;
- write_cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
- if (!write_cb) {
+ cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
+ if (!cb) {
rets = -ENOMEM;
goto out;
}
- rets = copy_from_user(write_cb->buf.data, ubuf, length);
+ rets = copy_from_user(cb->buf.data, ubuf, length);
if (rets) {
dev_dbg(dev->dev, "failed to copy data from userland\n");
rets = -EFAULT;
+ mei_io_cb_free(cb);
goto out;
}
if (cl == &dev->iamthif_cl) {
- rets = mei_amthif_write(cl, write_cb);
-
- if (rets) {
- dev_err(dev->dev,
- "amthif write failed with status = %d\n", rets);
- goto out;
- }
- mutex_unlock(&dev->device_lock);
- return length;
+ rets = mei_amthif_write(cl, cb);
+ if (!rets)
+ rets = length;
+ goto out;
}
- rets = mei_cl_write(cl, write_cb, false);
+ rets = mei_cl_write(cl, cb, false);
out:
mutex_unlock(&dev->device_lock);
- if (rets < 0)
- mei_io_cb_free(write_cb);
return rets;
}
@@ -393,12 +363,22 @@ static int mei_ioctl_connect_client(struct file *file,
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
- if (!me_cl ||
- (me_cl->props.fixed_address && !dev->allow_fixed_address)) {
+ if (!me_cl) {
dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
&data->in_client_uuid);
- mei_me_cl_put(me_cl);
- return -ENOTTY;
+ rets = -ENOTTY;
+ goto end;
+ }
+
+ if (me_cl->props.fixed_address) {
+ bool forbidden = dev->override_fixed_address ?
+ !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
+ if (forbidden) {
+ dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
+ &data->in_client_uuid);
+ rets = -ENOTTY;
+ goto end;
+ }
}
dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
@@ -454,7 +434,7 @@ end:
*
* Return: 0 on success , <0 on error
*/
-static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
{
struct mei_cl *cl = file->private_data;
@@ -473,7 +453,7 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
*
* Return: 0 on success , <0 on error
*/
-static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
{
struct mei_cl *cl = file->private_data;
bool notify_ev;
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index 388efb519138..e19e6acb191b 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -22,4 +22,6 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
+EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index 47e1bc6551d4..7d2d5d4a1624 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -60,7 +60,45 @@ TRACE_EVENT(mei_reg_write,
__entry->offs = offs;
__entry->val = val;
),
- TP_printk("[%s] write %s[%#x] = %#x)",
+ TP_printk("[%s] write %s[%#x] = %#x",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_read,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] pci cfg read %s:[%#x] = %#x",
+ __get_str(dev), __entry->reg, __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(mei_pci_cfg_write,
+ TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
+ TP_ARGS(dev, reg, offs, val),
+ TP_STRUCT__entry(
+ __string(dev, dev_name(dev))
+ __field(const char *, reg)
+ __field(u32, offs)
+ __field(u32, val)
+ ),
+ TP_fast_assign(
+ __assign_str(dev, dev_name(dev))
+ __entry->reg = reg;
+ __entry->offs = offs;
+ __entry->val = val;
+ ),
+ TP_printk("[%s] pci cfg write %s[%#x] = %#x",
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 4250555d5e72..db78e6d99456 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -18,7 +18,7 @@
#define _MEI_DEV_H_
#include <linux/types.h>
-#include <linux/watchdog.h>
+#include <linux/cdev.h>
#include <linux/poll.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -26,33 +26,13 @@
#include "hw.h"
#include "hbm.h"
-/*
- * watch dog definition
- */
-#define MEI_WD_HDR_SIZE 4
-#define MEI_WD_STOP_MSG_SIZE MEI_WD_HDR_SIZE
-#define MEI_WD_START_MSG_SIZE (MEI_WD_HDR_SIZE + 16)
-
-#define MEI_WD_DEFAULT_TIMEOUT 120 /* seconds */
-#define MEI_WD_MIN_TIMEOUT 120 /* seconds */
-#define MEI_WD_MAX_TIMEOUT 65535 /* seconds */
-
-#define MEI_WD_STOP_TIMEOUT 10 /* msecs */
-
-#define MEI_WD_STATE_INDEPENDENCE_MSG_SENT (1 << 0)
-
-#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
-
/*
* AMTHI Client UUID
*/
extern const uuid_le mei_amthif_guid;
-/*
- * Watchdog Client UUID
- */
-extern const uuid_le mei_wd_guid;
+#define MEI_RD_MSG_BUF_SIZE (128 * sizeof(u32))
/*
* Number of Maximum MEI Clients
@@ -73,15 +53,6 @@ extern const uuid_le mei_wd_guid;
*/
#define MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
-/*
- * Internal Clients Number
- */
-#define MEI_HOST_CLIENT_ID_ANY (-1)
-#define MEI_HBM_HOST_CLIENT_ID 0 /* not used, just for documentation */
-#define MEI_WD_HOST_CLIENT_ID 1
-#define MEI_IAMTHIF_HOST_CLIENT_ID 2
-
-
/* File state */
enum file_state {
MEI_FILE_INITIALIZING = 0,
@@ -123,12 +94,6 @@ enum mei_file_transaction_states {
MEI_READ_COMPLETE
};
-enum mei_wd_states {
- MEI_WD_IDLE,
- MEI_WD_RUNNING,
- MEI_WD_STOPPING,
-};
-
/**
* enum mei_cb_file_ops - file operation associated with the callback
* @MEI_FOP_READ: read
@@ -153,7 +118,7 @@ enum mei_cb_file_ops {
* Intel MEI message data struct
*/
struct mei_msg_data {
- u32 size;
+ size_t size;
unsigned char *data;
};
@@ -206,8 +171,7 @@ struct mei_cl;
* @fop_type: file operation type
* @buf: buffer for data associated with the callback
* @buf_idx: last read index
- * @read_time: last read operation time stamp (iamthif)
- * @file_object: pointer to file structure
+ * @fp: pointer to file structure
* @status: io status of the cb
* @internal: communication between driver and FW flag
* @completed: the transfer or reception has completed
@@ -217,9 +181,8 @@ struct mei_cl_cb {
struct mei_cl *cl;
enum mei_cb_file_ops fop_type;
struct mei_msg_data buf;
- unsigned long buf_idx;
- unsigned long read_time;
- struct file *file_object;
+ size_t buf_idx;
+ const struct file *fp;
int status;
u32 internal:1;
u32 completed:1;
@@ -341,12 +304,13 @@ struct mei_hw_ops {
/* MEI bus API*/
void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
bool blocking);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
-void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_notify_event(struct mei_cl *cl);
+bool mei_cl_bus_rx_event(struct mei_cl *cl);
+bool mei_cl_bus_notify_event(struct mei_cl *cl);
void mei_cl_bus_remove_devices(struct mei_device *bus);
int mei_cl_bus_init(void);
void mei_cl_bus_exit(void);
@@ -404,7 +368,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @wait_hw_ready : wait queue for receive HW ready message form FW
* @wait_pg : wait queue for receive PG message from FW
* @wait_hbm_start : wait queue for receive HBM start message from FW
- * @wait_stop_wd : wait queue for receive WD stop message from FW
*
* @reset_count : number of consecutive resets
* @dev_state : device state
@@ -426,6 +389,8 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_dc_supported : hbm feature dynamic clients
* @hbm_f_dot_supported : hbm feature disconnect on timeout
* @hbm_f_ev_supported : hbm feature event notification
+ * @hbm_f_fa_supported : hbm feature fixed address client
+ * @hbm_f_ie_supported : hbm feature immediate reply to enum request
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -434,26 +399,19 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @me_client_index : last FW client index in enumeration
*
* @allow_fixed_address: allow user space to connect a fixed client
- *
- * @wd_cl : watchdog client
- * @wd_state : watchdog client state
- * @wd_pending : watchdog command is pending
- * @wd_timeout : watchdog expiration timeout
- * @wd_data : watchdog message buffer
+ * @override_fixed_address: force allow fixed address behavior
*
* @amthif_cmd_list : amthif list for cmd waiting
- * @amthif_rd_complete_list : amthif list for reading completed cmd data
- * @iamthif_file_object : file for current amthif operation
+ * @iamthif_fp : file for current amthif operation
* @iamthif_cl : amthif host client
* @iamthif_current_cb : amthif current operation callback
* @iamthif_open_count : number of opened amthif connections
- * @iamthif_timer : time stamp of current amthif command completion
* @iamthif_stall_timer : timer to detect amthif hang
* @iamthif_state : amthif processor state
* @iamthif_canceled : current amthif command is canceled
*
- * @init_work : work item for the device init
* @reset_work : work item for the device reset
+ * @bus_rescan_work : work item for the bus rescan
*
* @device_list : mei client bus list
* @cl_bus_lock : client bus list lock
@@ -486,7 +444,6 @@ struct mei_device {
wait_queue_head_t wait_hw_ready;
wait_queue_head_t wait_pg;
wait_queue_head_t wait_hbm_start;
- wait_queue_head_t wait_stop_wd;
/*
* mei device states
@@ -522,6 +479,8 @@ struct mei_device {
unsigned int hbm_f_dc_supported:1;
unsigned int hbm_f_dot_supported:1;
unsigned int hbm_f_ev_supported:1;
+ unsigned int hbm_f_fa_supported:1;
+ unsigned int hbm_f_ie_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
@@ -530,29 +489,21 @@ struct mei_device {
unsigned long me_client_index;
bool allow_fixed_address;
-
- struct mei_cl wd_cl;
- enum mei_wd_states wd_state;
- bool wd_pending;
- u16 wd_timeout;
- unsigned char wd_data[MEI_WD_START_MSG_SIZE];
-
+ bool override_fixed_address;
/* amthif list for cmd waiting */
struct mei_cl_cb amthif_cmd_list;
/* driver managed amthif list for reading completed amthif cmd data */
- struct mei_cl_cb amthif_rd_complete_list;
- struct file *iamthif_file_object;
+ const struct file *iamthif_fp;
struct mei_cl iamthif_cl;
struct mei_cl_cb *iamthif_current_cb;
long iamthif_open_count;
- unsigned long iamthif_timer;
u32 iamthif_stall_timer;
enum iamthif_states iamthif_state;
bool iamthif_canceled;
- struct work_struct init_work;
struct work_struct reset_work;
+ struct work_struct bus_rescan_work;
/* List of bus devices */
struct list_head device_list;
@@ -635,47 +586,18 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
int mei_amthif_release(struct mei_device *dev, struct file *file);
-struct mei_cl_cb *mei_amthif_find_read_list_entry(struct mei_device *dev,
- struct file *file);
-
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_run_next_cmd(struct mei_device *dev);
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct mei_cl_cb *cmpl_list);
-void mei_amthif_complete(struct mei_device *dev, struct mei_cl_cb *cb);
+void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
int mei_amthif_irq_read_msg(struct mei_cl *cl,
struct mei_msg_hdr *mei_hdr,
struct mei_cl_cb *complete_list);
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/*
- * NFC functions
- */
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-void mei_nfc_host_exit(struct mei_device *dev);
-
-/*
- * NFC Client UUID
- */
-extern const uuid_le mei_nfc_guid;
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl);
-/*
- * mei_watchdog_register - Registering watchdog interface
- * once we got connection to the WD Client
- * @dev: mei device
- */
-int mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister - Unregistering watchdog interface
- * @dev: mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-/*
* Register Access Function
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 75fc9c688df8..64e64da6da44 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,6 +88,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
@@ -210,7 +213,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = mei_register(dev, &pdev->dev);
if (err)
- goto release_irq;
+ goto stop;
pci_set_drvdata(pdev, dev);
@@ -231,6 +234,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+stop:
+ mei_stop(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 71f8a7475717..30cc30683c07 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -154,7 +154,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = mei_register(dev, &pdev->dev);
if (err)
- goto release_irq;
+ goto stop;
pci_set_drvdata(pdev, dev);
@@ -170,6 +170,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+stop:
+ mei_stop(dev);
release_irq:
mei_cancel_work(dev);
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
deleted file mode 100644
index b346638833b0..000000000000
--- a/drivers/misc/mei/wd.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/sched.h>
-#include <linux/watchdog.h>
-
-#include <linux/mei.h>
-
-#include "mei_dev.h"
-#include "hbm.h"
-#include "client.h"
-
-static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
-static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
-
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-
-/* UUIDs for AMT F/W clients */
-const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
- 0x9D, 0xA9, 0x15, 0x14, 0xCB,
- 0x32, 0xAB);
-
-static void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
-{
- dev_dbg(dev->dev, "wd: set timeout=%d.\n", timeout);
- memcpy(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE);
- memcpy(dev->wd_data + MEI_WD_HDR_SIZE, &timeout, sizeof(u16));
-}
-
-/**
- * mei_wd_host_init - connect to the watchdog client
- *
- * @dev: the device structure
- * @me_cl: me client
- *
- * Return: -ENOTTY if wd client cannot be found
- * -EIO if write has failed
- * 0 on success
- */
-int mei_wd_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
- struct mei_cl *cl = &dev->wd_cl;
- int ret;
-
- mei_cl_init(cl, dev);
-
- dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
- dev->wd_state = MEI_WD_IDLE;
-
- ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
- if (ret < 0) {
- dev_info(dev->dev, "wd: failed link client\n");
- return ret;
- }
-
- ret = mei_cl_connect(cl, me_cl, NULL);
- if (ret) {
- dev_err(dev->dev, "wd: failed to connect = %d\n", ret);
- mei_cl_unlink(cl);
- return ret;
- }
-
- ret = mei_watchdog_register(dev);
- if (ret) {
- mei_cl_disconnect(cl);
- mei_cl_unlink(cl);
- }
- return ret;
-}
-
-/**
- * mei_wd_send - sends watch dog message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success,
- * -EIO when message send fails
- * -EINVAL when invalid message is to be sent
- * -ENODEV on flow control failure
- */
-int mei_wd_send(struct mei_device *dev)
-{
- struct mei_cl *cl = &dev->wd_cl;
- struct mei_msg_hdr hdr;
- int ret;
-
- hdr.host_addr = cl->host_client_id;
- hdr.me_addr = mei_cl_me_id(cl);
- hdr.msg_complete = 1;
- hdr.reserved = 0;
- hdr.internal = 0;
-
- if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
- hdr.length = MEI_WD_START_MSG_SIZE;
- else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
- hdr.length = MEI_WD_STOP_MSG_SIZE;
- else {
- dev_err(dev->dev, "wd: invalid message is to be sent, aborting\n");
- return -EINVAL;
- }
-
- ret = mei_write_message(dev, &hdr, dev->wd_data);
- if (ret) {
- dev_err(dev->dev, "wd: write message failed\n");
- return ret;
- }
-
- ret = mei_cl_flow_ctrl_reduce(cl);
- if (ret) {
- dev_err(dev->dev, "wd: flow_ctrl_reduce failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * mei_wd_stop - sends watchdog stop message to fw.
- *
- * @dev: the device structure
- *
- * Return: 0 if success
- * on error:
- * -EIO when message send fails
- * -EINVAL when invalid message is to be sent
- * -ETIME on message timeout
- */
-int mei_wd_stop(struct mei_device *dev)
-{
- struct mei_cl *cl = &dev->wd_cl;
- int ret;
-
- if (!mei_cl_is_connected(cl) ||
- dev->wd_state != MEI_WD_RUNNING)
- return 0;
-
- memcpy(dev->wd_data, mei_stop_wd_params, MEI_WD_STOP_MSG_SIZE);
-
- dev->wd_state = MEI_WD_STOPPING;
-
- ret = mei_cl_flow_ctrl_creds(cl);
- if (ret < 0)
- goto err;
-
- if (ret && mei_hbuf_acquire(dev)) {
- ret = mei_wd_send(dev);
- if (ret)
- goto err;
- dev->wd_pending = false;
- } else {
- dev->wd_pending = true;
- }
-
- mutex_unlock(&dev->device_lock);
-
- ret = wait_event_timeout(dev->wait_stop_wd,
- dev->wd_state == MEI_WD_IDLE,
- msecs_to_jiffies(MEI_WD_STOP_TIMEOUT));
- mutex_lock(&dev->device_lock);
- if (dev->wd_state != MEI_WD_IDLE) {
- /* timeout */
- ret = -ETIME;
- dev_warn(dev->dev, "wd: stop failed to complete ret=%d\n", ret);
- goto err;
- }
- dev_dbg(dev->dev, "wd: stop completed after %u msec\n",
- MEI_WD_STOP_TIMEOUT - jiffies_to_msecs(ret));
- return 0;
-err:
- return ret;
-}
-
-/**
- * mei_wd_ops_start - wd start command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_start(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- int err = -ENODEV;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- cl = &dev->wd_cl;
-
- mutex_lock(&dev->device_lock);
-
- if (dev->dev_state != MEI_DEV_ENABLED) {
- dev_dbg(dev->dev, "wd: dev_state != MEI_DEV_ENABLED dev_state = %s\n",
- mei_dev_state_str(dev->dev_state));
- goto end_unlock;
- }
-
- if (!mei_cl_is_connected(cl)) {
- cl_dbg(dev, cl, "MEI Driver is not connected to Watchdog Client\n");
- goto end_unlock;
- }
-
- mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
- err = 0;
-end_unlock:
- mutex_unlock(&dev->device_lock);
- return err;
-}
-
-/**
- * mei_wd_ops_stop - wd stop command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&dev->device_lock);
- mei_wd_stop(dev);
- mutex_unlock(&dev->device_lock);
-
- return 0;
-}
-
-/**
- * mei_wd_ops_ping - wd ping command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
-{
- struct mei_device *dev;
- struct mei_cl *cl;
- int ret;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- cl = &dev->wd_cl;
-
- mutex_lock(&dev->device_lock);
-
- if (!mei_cl_is_connected(cl)) {
- cl_err(dev, cl, "wd: not connected.\n");
- ret = -ENODEV;
- goto end;
- }
-
- dev->wd_state = MEI_WD_RUNNING;
-
- ret = mei_cl_flow_ctrl_creds(cl);
- if (ret < 0)
- goto end;
-
- /* Check if we can send the ping to HW*/
- if (ret && mei_hbuf_acquire(dev)) {
- dev_dbg(dev->dev, "wd: sending ping\n");
-
- ret = mei_wd_send(dev);
- if (ret)
- goto end;
- dev->wd_pending = false;
- } else {
- dev->wd_pending = true;
- }
-
-end:
- mutex_unlock(&dev->device_lock);
- return ret;
-}
-
-/**
- * mei_wd_ops_set_timeout - wd set timeout command from the watchdog core.
- *
- * @wd_dev: watchdog device struct
- * @timeout: timeout value to set
- *
- * Return: 0 if success, negative errno code for failure
- */
-static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev,
- unsigned int timeout)
-{
- struct mei_device *dev;
-
- dev = watchdog_get_drvdata(wd_dev);
- if (!dev)
- return -ENODEV;
-
- /* Check Timeout value */
- if (timeout < MEI_WD_MIN_TIMEOUT || timeout > MEI_WD_MAX_TIMEOUT)
- return -EINVAL;
-
- mutex_lock(&dev->device_lock);
-
- dev->wd_timeout = timeout;
- wd_dev->timeout = timeout;
- mei_wd_set_start_timeout(dev, dev->wd_timeout);
-
- mutex_unlock(&dev->device_lock);
-
- return 0;
-}
-
-/*
- * Watchdog Device structs
- */
-static const struct watchdog_ops wd_ops = {
- .owner = THIS_MODULE,
- .start = mei_wd_ops_start,
- .stop = mei_wd_ops_stop,
- .ping = mei_wd_ops_ping,
- .set_timeout = mei_wd_ops_set_timeout,
-};
-static const struct watchdog_info wd_info = {
- .identity = INTEL_AMT_WATCHDOG_ID,
- .options = WDIOF_KEEPALIVEPING |
- WDIOF_SETTIMEOUT |
- WDIOF_ALARMONLY,
-};
-
-static struct watchdog_device amt_wd_dev = {
- .info = &wd_info,
- .ops = &wd_ops,
- .timeout = MEI_WD_DEFAULT_TIMEOUT,
- .min_timeout = MEI_WD_MIN_TIMEOUT,
- .max_timeout = MEI_WD_MAX_TIMEOUT,
-};
-
-
-int mei_watchdog_register(struct mei_device *dev)
-{
-
- int ret;
-
- amt_wd_dev.parent = dev->dev;
- /* unlock to perserve correct locking order */
- mutex_unlock(&dev->device_lock);
- ret = watchdog_register_device(&amt_wd_dev);
- mutex_lock(&dev->device_lock);
- if (ret) {
- dev_err(dev->dev, "wd: unable to register watchdog device = %d.\n",
- ret);
- return ret;
- }
-
- dev_dbg(dev->dev, "wd: successfully register watchdog interface.\n");
- watchdog_set_drvdata(&amt_wd_dev, dev);
- return 0;
-}
-
-void mei_watchdog_unregister(struct mei_device *dev)
-{
- if (watchdog_get_drvdata(&amt_wd_dev) == NULL)
- return;
-
- watchdog_set_drvdata(&amt_wd_dev, NULL);
- watchdog_unregister_device(&amt_wd_dev);
-}
-
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 40677df7f996..2e4f3ba75c8e 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -32,12 +32,29 @@ config SCIF_BUS
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+comment "VOP Bus Driver"
+
+config VOP_BUS
+ tristate "VOP Bus Driver"
+ depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS
+ help
+ This option is selected by any driver which registers a
+ device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
+ and CONFIG_INTEL_MIC_CARD.
+
+ If you are building a host/card kernel with an Intel MIC device
+ then say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
+
comment "Intel MIC Host Driver"
config INTEL_MIC_HOST
tristate "Intel MIC Host Driver"
- depends on 64BIT && PCI && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
- select VHOST_RING
+ depends on 64BIT && PCI && X86
+ depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
help
This enables Host Driver support for the Intel Many Integrated
Core (MIC) family of PCIe form factor coprocessor devices that
@@ -56,7 +73,8 @@ comment "Intel MIC Card Driver"
config INTEL_MIC_CARD
tristate "Intel MIC Card Driver"
- depends on 64BIT && X86 && INTEL_MIC_BUS && SCIF_BUS && MIC_COSM
+ depends on 64BIT && X86
+ depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
select VIRTIO
help
This enables card driver support for the Intel Many Integrated
@@ -107,3 +125,23 @@ config MIC_COSM
More information about the Intel MIC family as well as the Linux
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
+
+comment "VOP Driver"
+
+config VOP
+ tristate "VOP Driver"
+ depends on 64BIT && PCI && X86 && VOP_BUS
+ select VHOST_RING
+ help
+ This enables VOP (Virtio over PCIe) Driver support for the Intel
+ Many Integrated Core (MIC) family of PCIe form factor coprocessor
+ devices. The VOP driver allows virtio drivers, e.g. net, console
+ and block drivers, on the card connect to user space virtio
+ devices on the host.
+
+ If you are building a host kernel with an Intel MIC device then
+ say M (recommended) or Y, else say N. If unsure say N.
+
+ More information about the Intel MIC family as well as the Linux
+ OS and tools for MIC to use with this driver are available from
+ <http://software.intel.com/en-us/mic-developer>.
diff --git a/drivers/misc/mic/Makefile b/drivers/misc/mic/Makefile
index e288a1106738..f2b1323ff96c 100644
--- a/drivers/misc/mic/Makefile
+++ b/drivers/misc/mic/Makefile
@@ -8,3 +8,4 @@ obj-y += bus/
obj-$(CONFIG_SCIF) += scif/
obj-$(CONFIG_MIC_COSM) += cosm/
obj-$(CONFIG_MIC_COSM) += cosm_client/
+obj-$(CONFIG_VOP) += vop/
diff --git a/drivers/misc/mic/bus/Makefile b/drivers/misc/mic/bus/Makefile
index 761842b0d0bb..8758a7daa52c 100644
--- a/drivers/misc/mic/bus/Makefile
+++ b/drivers/misc/mic/bus/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_INTEL_MIC_BUS) += mic_bus.o
obj-$(CONFIG_SCIF_BUS) += scif_bus.o
obj-$(CONFIG_MIC_COSM) += cosm_bus.o
+obj-$(CONFIG_VOP_BUS) += vop_bus.o
diff --git a/drivers/misc/mic/bus/cosm_bus.h b/drivers/misc/mic/bus/cosm_bus.h
index f7c57f266916..8b6341855dc3 100644
--- a/drivers/misc/mic/bus/cosm_bus.h
+++ b/drivers/misc/mic/bus/cosm_bus.h
@@ -30,6 +30,7 @@
* @attr_group: Pointer to list of sysfs attribute groups.
* @sdev: Device for sysfs entries.
* @state: MIC state.
+ * @prev_state: MIC state previous to MIC_RESETTING
* @shutdown_status: MIC status reported by card for shutdown/crashes.
* @shutdown_status_int: Internal shutdown status maintained by the driver
* @cosm_mutex: Mutex for synchronizing access to data structures.
@@ -55,6 +56,7 @@ struct cosm_device {
const struct attribute_group **attr_group;
struct device *sdev;
u8 state;
+ u8 prev_state;
u8 shutdown_status;
u8 shutdown_status_int;
struct mutex cosm_mutex;
diff --git a/drivers/misc/mic/bus/vop_bus.c b/drivers/misc/mic/bus/vop_bus.c
new file mode 100644
index 000000000000..303da222f5b6
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.c
@@ -0,0 +1,203 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) Bus driver.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_bus.h"
+
+static ssize_t device_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t vendor_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "0x%04x\n", dev->id.vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t modalias_show(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct vop_device *dev = dev_to_vop(d);
+
+ return sprintf(buf, "vop:d%08Xv%08X\n",
+ dev->id.device, dev->id.vendor);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *vop_dev_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vop_dev);
+
+static inline int vop_id_match(const struct vop_device *dev,
+ const struct vop_device_id *id)
+{
+ if (id->device != dev->id.device && id->device != VOP_DEV_ANY_ID)
+ return 0;
+
+ return id->vendor == VOP_DEV_ANY_ID || id->vendor == dev->id.vendor;
+}
+
+/*
+ * This looks through all the IDs a driver claims to support. If any of them
+ * match, we return 1 and the kernel will call vop_dev_probe().
+ */
+static int vop_dev_match(struct device *dv, struct device_driver *dr)
+{
+ unsigned int i;
+ struct vop_device *dev = dev_to_vop(dv);
+ const struct vop_device_id *ids;
+
+ ids = drv_to_vop(dr)->id_table;
+ for (i = 0; ids[i].device; i++)
+ if (vop_id_match(dev, &ids[i]))
+ return 1;
+ return 0;
+}
+
+static int vop_uevent(struct device *dv, struct kobj_uevent_env *env)
+{
+ struct vop_device *dev = dev_to_vop(dv);
+
+ return add_uevent_var(env, "MODALIAS=vop:d%08Xv%08X",
+ dev->id.device, dev->id.vendor);
+}
+
+static int vop_dev_probe(struct device *d)
+{
+ struct vop_device *dev = dev_to_vop(d);
+ struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+ return drv->probe(dev);
+}
+
+static int vop_dev_remove(struct device *d)
+{
+ struct vop_device *dev = dev_to_vop(d);
+ struct vop_driver *drv = drv_to_vop(dev->dev.driver);
+
+ drv->remove(dev);
+ return 0;
+}
+
+static struct bus_type vop_bus = {
+ .name = "vop_bus",
+ .match = vop_dev_match,
+ .dev_groups = vop_dev_groups,
+ .uevent = vop_uevent,
+ .probe = vop_dev_probe,
+ .remove = vop_dev_remove,
+};
+
+int vop_register_driver(struct vop_driver *driver)
+{
+ driver->driver.bus = &vop_bus;
+ return driver_register(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_register_driver);
+
+void vop_unregister_driver(struct vop_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_driver);
+
+static void vop_release_dev(struct device *d)
+{
+ put_device(d);
+}
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+ const struct dma_map_ops *dma_ops,
+ struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+ struct dma_chan *chan)
+{
+ int ret;
+ struct vop_device *vdev;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return ERR_PTR(-ENOMEM);
+
+ vdev->dev.parent = pdev;
+ vdev->id.device = id;
+ vdev->id.vendor = VOP_DEV_ANY_ID;
+ vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops;
+ vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
+ dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
+ vdev->dev.release = vop_release_dev;
+ vdev->hw_ops = hw_ops;
+ vdev->dev.bus = &vop_bus;
+ vdev->dnode = dnode;
+ vdev->aper = aper;
+ vdev->dma_ch = chan;
+ vdev->index = dnode - 1;
+ dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
+ /*
+ * device_register() causes the bus infrastructure to look for a
+ * matching driver.
+ */
+ ret = device_register(&vdev->dev);
+ if (ret)
+ goto free_vdev;
+ return vdev;
+free_vdev:
+ kfree(vdev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vop_register_device);
+
+void vop_unregister_device(struct vop_device *dev)
+{
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vop_unregister_device);
+
+static int __init vop_init(void)
+{
+ return bus_register(&vop_bus);
+}
+
+static void __exit vop_exit(void)
+{
+ bus_unregister(&vop_bus);
+}
+
+core_initcall(vop_init);
+module_exit(vop_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) VOP Bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h
new file mode 100644
index 000000000000..fff7a865d721
--- /dev/null
+++ b/drivers/misc/mic/bus/vop_bus.h
@@ -0,0 +1,140 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio over PCIe Bus driver.
+ */
+#ifndef _VOP_BUS_H_
+#define _VOP_BUS_H_
+/*
+ * Everything a vop driver needs to work with any particular vop
+ * implementation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "../common/mic_dev.h"
+
+struct vop_device_id {
+ u32 device;
+ u32 vendor;
+};
+
+#define VOP_DEV_TRNSP 1
+#define VOP_DEV_ANY_ID 0xffffffff
+/*
+ * Size of the internal buffer used during DMA's as an intermediate buffer
+ * for copy to/from user. Must be an integral number of pages.
+ */
+#define VOP_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
+
+/**
+ * vop_device - representation of a device using vop
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * @dnode - The destination node which this device will communicate with.
+ * @aper: Aperture memory window
+ * @dma_ch - DMA channel
+ * @index: unique position on the vop bus
+ */
+struct vop_device {
+ struct vop_hw_ops *hw_ops;
+ struct vop_device_id id;
+ struct device dev;
+ u8 dnode;
+ struct mic_mw *aper;
+ struct dma_chan *dma_ch;
+ int index;
+};
+
+/**
+ * vop_driver - operations for a vop I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct vop_driver {
+ struct device_driver driver;
+ const struct vop_device_id *id_table;
+ int (*probe)(struct vop_device *dev);
+ void (*remove)(struct vop_device *dev);
+};
+
+/**
+ * vop_hw_ops - Hardware operations for accessing a VOP device on the VOP bus.
+ *
+ * @next_db: Obtain the next available doorbell.
+ * @request_irq: Request an interrupt on a particular doorbell.
+ * @free_irq: Free an interrupt requested previously.
+ * @ack_interrupt: acknowledge an interrupt in the ISR.
+ * @get_remote_dp: Get access to the virtio device page used by the remote
+ * node to add/remove/configure virtio devices.
+ * @get_dp: Get access to the virtio device page used by the self
+ * node to add/remove/configure virtio devices.
+ * @send_intr: Send an interrupt to the peer node on a specified doorbell.
+ * @ioremap: Map a buffer with the specified DMA address and length.
+ * @iounmap: Unmap a buffer previously mapped.
+ * @dma_filter: The DMA filter function to use for obtaining access to
+ * a DMA channel on the peer node.
+ */
+struct vop_hw_ops {
+ int (*next_db)(struct vop_device *vpdev);
+ struct mic_irq *(*request_irq)(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data,
+ int intr_src);
+ void (*free_irq)(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data);
+ void (*ack_interrupt)(struct vop_device *vpdev, int num);
+ void __iomem * (*get_remote_dp)(struct vop_device *vpdev);
+ void * (*get_dp)(struct vop_device *vpdev);
+ void (*send_intr)(struct vop_device *vpdev, int db);
+ void __iomem * (*ioremap)(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len);
+ void (*iounmap)(struct vop_device *vpdev, void __iomem *va);
+};
+
+struct vop_device *
+vop_register_device(struct device *pdev, int id,
+ const struct dma_map_ops *dma_ops,
+ struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper,
+ struct dma_chan *chan);
+void vop_unregister_device(struct vop_device *dev);
+int vop_register_driver(struct vop_driver *drv);
+void vop_unregister_driver(struct vop_driver *drv);
+
+/*
+ * module_vop_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_vop_driver(__vop_driver) \
+ module_driver(__vop_driver, vop_register_driver, \
+ vop_unregister_driver)
+
+static inline struct vop_device *dev_to_vop(struct device *dev)
+{
+ return container_of(dev, struct vop_device, dev);
+}
+
+static inline struct vop_driver *drv_to_vop(struct device_driver *drv)
+{
+ return container_of(drv, struct vop_driver, driver);
+}
+#endif /* _VOP_BUS_H */
diff --git a/drivers/misc/mic/card/Makefile b/drivers/misc/mic/card/Makefile
index 69d58bef92ce..6e9675e12a09 100644
--- a/drivers/misc/mic/card/Makefile
+++ b/drivers/misc/mic/card/Makefile
@@ -8,4 +8,3 @@ obj-$(CONFIG_INTEL_MIC_CARD) += mic_card.o
mic_card-y += mic_x100.o
mic_card-y += mic_device.o
mic_card-y += mic_debugfs.o
-mic_card-y += mic_virtio.o
diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c
index d0edaf7e0cd5..e749af48f736 100644
--- a/drivers/misc/mic/card/mic_device.c
+++ b/drivers/misc/mic/card/mic_device.c
@@ -34,7 +34,6 @@
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
#include "mic_device.h"
-#include "mic_virtio.h"
static struct mic_driver *g_drv;
@@ -250,12 +249,82 @@ static struct scif_hw_ops scif_hw_ops = {
.iounmap = ___mic_iounmap,
};
+static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev)
+{
+ return dev_get_drvdata(vpdev->dev.parent);
+}
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src)
+{
+ return mic_request_card_irq(func, NULL, name, data, intr_src);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data)
+{
+ return mic_free_card_irq(cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_ack_interrupt(&mdrv->mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+ return mic_next_card_db();
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ return mdrv->dp;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_send_intr(&mdrv->mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ return mic_card_map(&mdrv->mdev, pa, len);
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+ struct mic_driver *mdrv = vpdev_to_mdrv(vpdev);
+
+ mic_card_unmap(&mdrv->mdev, va);
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+ .request_irq = __mic_request_irq,
+ .free_irq = __mic_free_irq,
+ .ack_interrupt = __mic_ack_interrupt,
+ .next_db = __mic_next_db,
+ .get_remote_dp = __mic_get_remote_dp,
+ .send_intr = __mic_send_intr,
+ .ioremap = __mic_ioremap,
+ .iounmap = __mic_iounmap,
+};
+
static int mic_request_dma_chans(struct mic_driver *mdrv)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
- request_module("mic_x100_dma");
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -309,9 +378,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
rc = -ENODEV;
goto irq_uninit;
}
- rc = mic_devices_init(mdrv);
- if (rc)
+ mdrv->vpdev = vop_register_device(mdrv->dev, VOP_DEV_TRNSP,
+ NULL, &vop_hw_ops, 0,
+ NULL, mdrv->dma_ch[0]);
+ if (IS_ERR(mdrv->vpdev)) {
+ rc = PTR_ERR(mdrv->vpdev);
goto dma_free;
+ }
bootparam = mdrv->dp;
node_id = ioread8(&bootparam->node_id);
mdrv->scdev = scif_register_device(mdrv->dev, MIC_SCIF_DEV,
@@ -321,13 +394,13 @@ int __init mic_driver_init(struct mic_driver *mdrv)
mdrv->num_dma_ch, true);
if (IS_ERR(mdrv->scdev)) {
rc = PTR_ERR(mdrv->scdev);
- goto device_uninit;
+ goto vop_remove;
}
mic_create_card_debug_dir(mdrv);
done:
return rc;
-device_uninit:
- mic_devices_uninit(mdrv);
+vop_remove:
+ vop_unregister_device(mdrv->vpdev);
dma_free:
mic_free_dma_chans(mdrv);
irq_uninit:
@@ -348,7 +421,7 @@ void mic_driver_uninit(struct mic_driver *mdrv)
{
mic_delete_card_debug_dir(mdrv);
scif_unregister_device(mdrv->scdev);
- mic_devices_uninit(mdrv);
+ vop_unregister_device(mdrv->vpdev);
mic_free_dma_chans(mdrv);
mic_uninit_irq();
mic_dp_uninit();
diff --git a/drivers/misc/mic/card/mic_device.h b/drivers/misc/mic/card/mic_device.h
index 1dbf83c41289..333dbed972f6 100644
--- a/drivers/misc/mic/card/mic_device.h
+++ b/drivers/misc/mic/card/mic_device.h
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
/**
* struct mic_intr_info - Contains h/w specific interrupt sources info
@@ -76,6 +77,7 @@ struct mic_device {
* @dma_ch - Array of DMA channels
* @num_dma_ch - Number of DMA channels available
* @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
*/
struct mic_driver {
char name[20];
@@ -90,6 +92,7 @@ struct mic_driver {
struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
int num_dma_ch;
struct scif_hw_dev *scdev;
+ struct vop_device *vpdev;
};
/**
diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c
deleted file mode 100644
index f6ed57d3125c..000000000000
--- a/drivers/misc/mic/card/mic_virtio.c
+++ /dev/null
@@ -1,634 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Adapted from:
- *
- * virtio for kvm on s390
- *
- * Copyright IBM Corp. 2008
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
- * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * Intel MIC Card driver.
- *
- */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/virtio_config.h>
-
-#include "../common/mic_dev.h"
-#include "mic_virtio.h"
-
-#define VIRTIO_SUBCODE_64 0x0D00
-
-#define MIC_MAX_VRINGS 4
-struct mic_vdev {
- struct virtio_device vdev;
- struct mic_device_desc __iomem *desc;
- struct mic_device_ctrl __iomem *dc;
- struct mic_device *mdev;
- void __iomem *vr[MIC_MAX_VRINGS];
- int used_size[MIC_MAX_VRINGS];
- struct completion reset_done;
- struct mic_irq *virtio_cookie;
- int c2h_vdev_db;
-};
-
-static struct mic_irq *virtio_config_cookie;
-#define to_micvdev(vd) container_of(vd, struct mic_vdev, vdev)
-
-/* Helper API to obtain the parent of the virtio device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
- return mvdev->vdev.dev.parent;
-}
-
-/* This gets the device's feature bits. */
-static u64 mic_get_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- u32 features = 0;
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
- u8 __iomem *in_features = mic_vq_features(desc);
- int feature_len = ioread8(&desc->feature_len);
-
- bits = min_t(unsigned, feature_len, sizeof(features)) * 8;
- for (i = 0; i < bits; i++)
- if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
- features |= BIT(i);
-
- return features;
-}
-
-static int mic_finalize_features(struct virtio_device *vdev)
-{
- unsigned int i, bits;
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
- u8 feature_len = ioread8(&desc->feature_len);
- /* Second half of bitmap is features we accept. */
- u8 __iomem *out_features =
- mic_vq_features(desc) + feature_len;
-
- /* Give virtio_ring a chance to accept features. */
- vring_transport_features(vdev);
-
- /* Make sure we don't have any features > 32 bits! */
- BUG_ON((u32)vdev->features != vdev->features);
-
- memset_io(out_features, 0, feature_len);
- bits = min_t(unsigned, feature_len,
- sizeof(vdev->features)) * 8;
- for (i = 0; i < bits; i++) {
- if (__virtio_test_bit(vdev, i))
- iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
- &out_features[i / 8]);
- }
-
- return 0;
-}
-
-/*
- * Reading and writing elements in config space
- */
-static void mic_get(struct virtio_device *vdev, unsigned int offset,
- void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_fromio(buf, mic_vq_configspace(desc) + offset, len);
-}
-
-static void mic_set(struct virtio_device *vdev, unsigned int offset,
- const void *buf, unsigned len)
-{
- struct mic_device_desc __iomem *desc = to_micvdev(vdev)->desc;
-
- if (offset + len > ioread8(&desc->config_len))
- return;
- memcpy_toio(mic_vq_configspace(desc) + offset, buf, len);
-}
-
-/*
- * The operations to get and set the status word just access the status
- * field of the device descriptor. set_status also interrupts the host
- * to tell about status changes.
- */
-static u8 mic_get_status(struct virtio_device *vdev)
-{
- return ioread8(&to_micvdev(vdev)->desc->status);
-}
-
-static void mic_set_status(struct virtio_device *vdev, u8 status)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- if (!status)
- return;
- iowrite8(status, &mvdev->desc->status);
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-}
-
-/* Inform host on a virtio device reset and wait for ack from host */
-static void mic_reset_inform_host(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int retry;
-
- iowrite8(0, &dc->host_ack);
- iowrite8(1, &dc->vdev_reset);
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
-
- /* Wait till host completes all card accesses and acks the reset */
- for (retry = 100; retry--;) {
- if (ioread8(&dc->host_ack))
- break;
- msleep(100);
- };
-
- dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
-
- /* Reset status to 0 in case we timed out */
- iowrite8(0, &mvdev->desc->status);
-}
-
-static void mic_reset(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
-
- dev_dbg(mic_dev(mvdev), "%s: virtio id %d\n",
- __func__, vdev->id.device);
-
- mic_reset_inform_host(vdev);
- complete_all(&mvdev->reset_done);
-}
-
-/*
- * The virtio_ring code calls this API when it wants to notify the Host.
- */
-static bool mic_notify(struct virtqueue *vq)
-{
- struct mic_vdev *mvdev = vq->priv;
-
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
- return true;
-}
-
-static void mic_del_vq(struct virtqueue *vq, int n)
-{
- struct mic_vdev *mvdev = to_micvdev(vq->vdev);
- struct vring *vr = (struct vring *)(vq + 1);
-
- free_pages((unsigned long) vr->used, get_order(mvdev->used_size[n]));
- vring_del_virtqueue(vq);
- mic_card_unmap(mvdev->mdev, mvdev->vr[n]);
- mvdev->vr[n] = NULL;
-}
-
-static void mic_del_vqs(struct virtio_device *vdev)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct virtqueue *vq, *n;
- int idx = 0;
-
- dev_dbg(mic_dev(mvdev), "%s\n", __func__);
-
- list_for_each_entry_safe(vq, n, &vdev->vqs, list)
- mic_del_vq(vq, idx++);
-}
-
-/*
- * This routine will assign vring's allocated in host/io memory. Code in
- * virtio_ring.c however continues to access this io memory as if it were local
- * memory without io accessors.
- */
-static struct virtqueue *mic_find_vq(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *vq),
- const char *name)
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_vqconfig __iomem *vqconfig;
- struct mic_vqconfig config;
- struct virtqueue *vq;
- void __iomem *va;
- struct _mic_vring_info __iomem *info;
- void *used;
- int vr_size, _vr_size, err, magic;
- struct vring *vr;
- u8 type = ioread8(&mvdev->desc->type);
-
- if (index >= ioread8(&mvdev->desc->num_vq))
- return ERR_PTR(-ENOENT);
-
- if (!name)
- return ERR_PTR(-ENOENT);
-
- /* First assign the vring's allocated in host memory */
- vqconfig = mic_vq_config(mvdev->desc) + index;
- memcpy_fromio(&config, vqconfig, sizeof(config));
- _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
- vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
- va = mic_card_map(mvdev->mdev, le64_to_cpu(config.address), vr_size);
- if (!va)
- return ERR_PTR(-ENOMEM);
- mvdev->vr[index] = va;
- memset_io(va, 0x0, _vr_size);
- vq = vring_new_virtqueue(index, le16_to_cpu(config.num),
- MIC_VIRTIO_RING_ALIGN, vdev, false,
- (void __force *)va, mic_notify, callback,
- name);
- if (!vq) {
- err = -ENOMEM;
- goto unmap;
- }
- info = va + _vr_size;
- magic = ioread32(&info->magic);
-
- if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
- err = -EIO;
- goto unmap;
- }
-
- /* Allocate and reassign used ring now */
- mvdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
- sizeof(struct vring_used_elem) *
- le16_to_cpu(config.num));
- used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(mvdev->used_size[index]));
- if (!used) {
- err = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto del_vq;
- }
- iowrite64(virt_to_phys(used), &vqconfig->used_address);
-
- /*
- * To reassign the used ring here we are directly accessing
- * struct vring_virtqueue which is a private data structure
- * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
- * vring_new_virtqueue() would ensure that
- * (&vq->vring == (struct vring *) (&vq->vq + 1));
- */
- vr = (struct vring *)(vq + 1);
- vr->used = used;
-
- vq->priv = mvdev;
- return vq;
-del_vq:
- vring_del_virtqueue(vq);
-unmap:
- mic_card_unmap(mvdev->mdev, mvdev->vr[index]);
- return ERR_PTR(err);
-}
-
-static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[])
-{
- struct mic_vdev *mvdev = to_micvdev(vdev);
- struct mic_device_ctrl __iomem *dc = mvdev->dc;
- int i, err, retry;
-
- /* We must have this many virtqueues. */
- if (nvqs > ioread8(&mvdev->desc->num_vq))
- return -ENOENT;
-
- for (i = 0; i < nvqs; ++i) {
- dev_dbg(mic_dev(mvdev), "%s: %d: %s\n",
- __func__, i, names[i]);
- vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]);
- if (IS_ERR(vqs[i])) {
- err = PTR_ERR(vqs[i]);
- goto error;
- }
- }
-
- iowrite8(1, &dc->used_address_updated);
- /*
- * Send an interrupt to the host to inform it that used
- * rings have been re-assigned.
- */
- mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db);
- for (retry = 100; retry--;) {
- if (!ioread8(&dc->used_address_updated))
- break;
- msleep(100);
- };
-
- dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry);
- if (!retry) {
- err = -ENODEV;
- goto error;
- }
-
- return 0;
-error:
- mic_del_vqs(vdev);
- return err;
-}
-
-/*
- * The config ops structure as defined by virtio config
- */
-static struct virtio_config_ops mic_vq_config_ops = {
- .get_features = mic_get_features,
- .finalize_features = mic_finalize_features,
- .get = mic_get,
- .set = mic_set,
- .get_status = mic_get_status,
- .set_status = mic_set_status,
- .reset = mic_reset,
- .find_vqs = mic_find_vqs,
- .del_vqs = mic_del_vqs,
-};
-
-static irqreturn_t
-mic_virtio_intr_handler(int irq, void *data)
-{
- struct mic_vdev *mvdev = data;
- struct virtqueue *vq;
-
- mic_ack_interrupt(mvdev->mdev);
- list_for_each_entry(vq, &mvdev->vdev.vqs, list)
- vring_interrupt(0, vq);
-
- return IRQ_HANDLED;
-}
-
-static void mic_virtio_release_dev(struct device *_d)
-{
- /*
- * No need for a release method similar to virtio PCI.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
-/*
- * adds a new device and register it with virtio
- * appropriate drivers are loaded by the device model
- */
-static int mic_add_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_vdev *mvdev;
- int ret;
- int virtio_db;
- u8 type = ioread8(&d->type);
-
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev) {
- dev_err(mdrv->dev, "Cannot allocate mic dev %u type %u\n",
- offset, type);
- return -ENOMEM;
- }
-
- mvdev->mdev = &mdrv->mdev;
- mvdev->vdev.dev.parent = mdrv->dev;
- mvdev->vdev.dev.release = mic_virtio_release_dev;
- mvdev->vdev.id.device = type;
- mvdev->vdev.config = &mic_vq_config_ops;
- mvdev->desc = d;
- mvdev->dc = (void __iomem *)d + mic_aligned_desc_size(d);
- init_completion(&mvdev->reset_done);
-
- virtio_db = mic_next_card_db();
- mvdev->virtio_cookie = mic_request_card_irq(mic_virtio_intr_handler,
- NULL, "virtio intr", mvdev, virtio_db);
- if (IS_ERR(mvdev->virtio_cookie)) {
- ret = PTR_ERR(mvdev->virtio_cookie);
- goto kfree;
- }
- iowrite8((u8)virtio_db, &mvdev->dc->h2c_vdev_db);
- mvdev->c2h_vdev_db = ioread8(&mvdev->dc->c2h_vdev_db);
-
- ret = register_virtio_device(&mvdev->vdev);
- if (ret) {
- dev_err(mic_dev(mvdev),
- "Failed to register mic device %u type %u\n",
- offset, type);
- goto free_irq;
- }
- iowrite64((u64)mvdev, &mvdev->dc->vdev);
- dev_dbg(mic_dev(mvdev), "%s: registered mic device %u type %u mvdev %p\n",
- __func__, offset, type, mvdev);
-
- return 0;
-
-free_irq:
- mic_free_card_irq(mvdev->virtio_cookie, mvdev);
-kfree:
- kfree(mvdev);
- return ret;
-}
-
-/*
- * match for a mic device with a specific desc pointer
- */
-static int mic_match_desc(struct device *dev, void *data)
-{
- struct virtio_device *vdev = dev_to_virtio(dev);
- struct mic_vdev *mvdev = to_micvdev(vdev);
-
- return mvdev->desc == (void __iomem *)data;
-}
-
-static void mic_handle_config_change(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + mic_aligned_desc_size(d);
- struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
-
- if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
- return;
-
- dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__);
- virtio_config_changed(&mvdev->vdev);
- iowrite8(1, &dc->guest_ack);
-}
-
-/*
- * removes a virtio device if a hot remove event has been
- * requested by the host.
- */
-static int mic_remove_device(struct mic_device_desc __iomem *d,
- unsigned int offset, struct mic_driver *mdrv)
-{
- struct mic_device_ctrl __iomem *dc
- = (void __iomem *)d + mic_aligned_desc_size(d);
- struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev);
- u8 status;
- int ret = -1;
-
- if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
- dev_dbg(mdrv->dev,
- "%s %d config_change %d type %d mvdev %p\n",
- __func__, __LINE__,
- ioread8(&dc->config_change), ioread8(&d->type), mvdev);
-
- status = ioread8(&d->status);
- reinit_completion(&mvdev->reset_done);
- unregister_virtio_device(&mvdev->vdev);
- mic_free_card_irq(mvdev->virtio_cookie, mvdev);
- if (status & VIRTIO_CONFIG_S_DRIVER_OK)
- wait_for_completion(&mvdev->reset_done);
- kfree(mvdev);
- iowrite8(1, &dc->guest_ack);
- dev_dbg(mdrv->dev, "%s %d guest_ack %d\n",
- __func__, __LINE__, ioread8(&dc->guest_ack));
- ret = 0;
- }
-
- return ret;
-}
-
-#define REMOVE_DEVICES true
-
-static void mic_scan_devices(struct mic_driver *mdrv, bool remove)
-{
- s8 type;
- unsigned int i;
- struct mic_device_desc __iomem *d;
- struct mic_device_ctrl __iomem *dc;
- struct device *dev;
- int ret;
-
- for (i = sizeof(struct mic_bootparam); i < MIC_DP_SIZE;
- i += mic_total_desc_size(d)) {
- d = mdrv->dp + i;
- dc = (void __iomem *)d + mic_aligned_desc_size(d);
- /*
- * This read barrier is paired with the corresponding write
- * barrier on the host which is inserted before adding or
- * removing a virtio device descriptor, by updating the type.
- */
- rmb();
- type = ioread8(&d->type);
-
- /* end of list */
- if (type == 0)
- break;
-
- if (type == -1)
- continue;
-
- /* device already exists */
- dev = device_find_child(mdrv->dev, (void __force *)d,
- mic_match_desc);
- if (dev) {
- if (remove)
- iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
- &dc->config_change);
- put_device(dev);
- mic_handle_config_change(d, i, mdrv);
- ret = mic_remove_device(d, i, mdrv);
- if (!ret && !remove)
- iowrite8(-1, &d->type);
- if (remove) {
- iowrite8(0, &dc->config_change);
- iowrite8(0, &dc->guest_ack);
- }
- continue;
- }
-
- /* new device */
- dev_dbg(mdrv->dev, "%s %d Adding new virtio device %p\n",
- __func__, __LINE__, d);
- if (!remove)
- mic_add_device(d, i, mdrv);
- }
-}
-
-/*
- * mic_hotplug_device tries to find changes in the device page.
- */
-static void mic_hotplug_devices(struct work_struct *work)
-{
- struct mic_driver *mdrv = container_of(work,
- struct mic_driver, hotplug_work);
-
- mic_scan_devices(mdrv, !REMOVE_DEVICES);
-}
-
-/*
- * Interrupt handler for hot plug/config changes etc.
- */
-static irqreturn_t
-mic_extint_handler(int irq, void *data)
-{
- struct mic_driver *mdrv = (struct mic_driver *)data;
-
- dev_dbg(mdrv->dev, "%s %d hotplug work\n",
- __func__, __LINE__);
- mic_ack_interrupt(&mdrv->mdev);
- schedule_work(&mdrv->hotplug_work);
- return IRQ_HANDLED;
-}
-
-/*
- * Init function for virtio
- */
-int mic_devices_init(struct mic_driver *mdrv)
-{
- int rc;
- struct mic_bootparam __iomem *bootparam;
- int config_db;
-
- INIT_WORK(&mdrv->hotplug_work, mic_hotplug_devices);
- mic_scan_devices(mdrv, !REMOVE_DEVICES);
-
- config_db = mic_next_card_db();
- virtio_config_cookie = mic_request_card_irq(mic_extint_handler, NULL,
- "virtio_config_intr", mdrv,
- config_db);
- if (IS_ERR(virtio_config_cookie)) {
- rc = PTR_ERR(virtio_config_cookie);
- goto exit;
- }
-
- bootparam = mdrv->dp;
- iowrite8(config_db, &bootparam->h2c_config_db);
- return 0;
-exit:
- return rc;
-}
-
-/*
- * Uninit function for virtio
- */
-void mic_devices_uninit(struct mic_driver *mdrv)
-{
- struct mic_bootparam __iomem *bootparam = mdrv->dp;
- iowrite8(-1, &bootparam->h2c_config_db);
- mic_free_card_irq(virtio_config_cookie, mdrv);
- flush_work(&mdrv->hotplug_work);
- mic_scan_devices(mdrv, REMOVE_DEVICES);
-}
diff --git a/drivers/misc/mic/card/mic_virtio.h b/drivers/misc/mic/card/mic_virtio.h
deleted file mode 100644
index d0407ba53bb7..000000000000
--- a/drivers/misc/mic/card/mic_virtio.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Disclaimer: The codes contained in these modules may be specific to
- * the Intel Software Development Platform codenamed: Knights Ferry, and
- * the Intel product codenamed: Knights Corner, and are not backward
- * compatible with other Intel products. Additionally, Intel will NOT
- * support the codes or instruction set in future products.
- *
- * Intel MIC Card driver.
- *
- */
-#ifndef __MIC_CARD_VIRTIO_H
-#define __MIC_CARD_VIRTIO_H
-
-#include <linux/mic_common.h>
-#include "mic_device.h"
-
-/*
- * 64 bit I/O access
- */
-#ifndef ioread64
-#define ioread64 readq
-#endif
-#ifndef iowrite64
-#define iowrite64 writeq
-#endif
-
-static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc)
-{
- return sizeof(*desc)
- + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
- + ioread8(&desc->feature_len) * 2
- + ioread8(&desc->config_len);
-}
-
-static inline struct mic_vqconfig __iomem *
-mic_vq_config(struct mic_device_desc __iomem *desc)
-{
- return (struct mic_vqconfig __iomem *)(desc + 1);
-}
-
-static inline __u8 __iomem *
-mic_vq_features(struct mic_device_desc __iomem *desc)
-{
- return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq));
-}
-
-static inline __u8 __iomem *
-mic_vq_configspace(struct mic_device_desc __iomem *desc)
-{
- return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2;
-}
-static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc)
-{
- return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
-}
-
-int mic_devices_init(struct mic_driver *mdrv);
-void mic_devices_uninit(struct mic_driver *mdrv);
-
-#endif
diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
index b2958ce2368c..b9f0710ffa6b 100644
--- a/drivers/misc/mic/card/mic_x100.c
+++ b/drivers/misc/mic/card/mic_x100.c
@@ -326,6 +326,7 @@ static int __init mic_init(void)
goto done;
}
+ request_module("mic_x100_dma");
mic_init_card_debugfs();
ret = platform_device_register(&mic_platform_dev);
if (ret) {
diff --git a/drivers/misc/mic/cosm/cosm_main.c b/drivers/misc/mic/cosm/cosm_main.c
index 4b4b356c797d..7005cb1e01d2 100644
--- a/drivers/misc/mic/cosm/cosm_main.c
+++ b/drivers/misc/mic/cosm/cosm_main.c
@@ -153,8 +153,10 @@ void cosm_stop(struct cosm_device *cdev, bool force)
* stop(..) calls device_unregister and will crash the system if
* called multiple times.
*/
- bool call_hw_ops = cdev->state != MIC_RESET_FAILED &&
- cdev->state != MIC_READY;
+ u8 state = cdev->state == MIC_RESETTING ?
+ cdev->prev_state : cdev->state;
+ bool call_hw_ops = state != MIC_RESET_FAILED &&
+ state != MIC_READY;
if (cdev->state != MIC_RESETTING)
cosm_set_state(cdev, MIC_RESETTING);
@@ -195,8 +197,11 @@ int cosm_reset(struct cosm_device *cdev)
mutex_lock(&cdev->cosm_mutex);
if (cdev->state != MIC_READY) {
- cosm_set_state(cdev, MIC_RESETTING);
- schedule_work(&cdev->reset_trigger_work);
+ if (cdev->state != MIC_RESETTING) {
+ cdev->prev_state = cdev->state;
+ cosm_set_state(cdev, MIC_RESETTING);
+ schedule_work(&cdev->reset_trigger_work);
+ }
} else {
dev_err(&cdev->dev, "%s %d MIC is READY\n", __func__, __LINE__);
rc = -EINVAL;
diff --git a/drivers/misc/mic/host/Makefile b/drivers/misc/mic/host/Makefile
index 004d3db0f990..f3b502333ded 100644
--- a/drivers/misc/mic/host/Makefile
+++ b/drivers/misc/mic/host/Makefile
@@ -9,5 +9,3 @@ mic_host-objs += mic_smpt.o
mic_host-objs += mic_intr.o
mic_host-objs += mic_boot.o
mic_host-objs += mic_debugfs.o
-mic_host-objs += mic_fops.o
-mic_host-objs += mic_virtio.o
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 7845564dff64..8c91c9950b54 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -25,10 +25,117 @@
#include <linux/mic_common.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
-#include "mic_virtio.h"
+
+static inline struct mic_device *vpdev_to_mdev(struct device *dev)
+{
+ return dev_get_drvdata(dev->parent);
+}
+
+static dma_addr_t
+_mic_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ void *va = phys_to_virt(page_to_phys(page)) + offset;
+ struct mic_device *mdev = vpdev_to_mdev(dev);
+
+ return mic_map_single(mdev, va, size);
+}
+
+static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct mic_device *mdev = vpdev_to_mdev(dev);
+
+ mic_unmap_single(mdev, dma_addr, size);
+}
+
+static const struct dma_map_ops _mic_dma_ops = {
+ .map_page = _mic_dma_map_page,
+ .unmap_page = _mic_dma_unmap_page,
+};
+
+static struct mic_irq *
+__mic_request_irq(struct vop_device *vpdev,
+ irqreturn_t (*func)(int irq, void *data),
+ const char *name, void *data, int intr_src)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_request_threaded_irq(mdev, func, NULL, name, data,
+ intr_src, MIC_INTR_DB);
+}
+
+static void __mic_free_irq(struct vop_device *vpdev,
+ struct mic_irq *cookie, void *data)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_free_irq(mdev, cookie, data);
+}
+
+static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ mdev->ops->intr_workarounds(mdev);
+}
+
+static int __mic_next_db(struct vop_device *vpdev)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mic_next_db(mdev);
+}
+
+static void *__mic_get_dp(struct vop_device *vpdev)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mdev->dp;
+}
+
+static void __iomem *__mic_get_remote_dp(struct vop_device *vpdev)
+{
+ return NULL;
+}
+
+static void __mic_send_intr(struct vop_device *vpdev, int db)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ mdev->ops->send_intr(mdev, db);
+}
+
+static void __iomem *__mic_ioremap(struct vop_device *vpdev,
+ dma_addr_t pa, size_t len)
+{
+ struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
+
+ return mdev->aper.va + pa;
+}
+
+static void __mic_iounmap(struct vop_device *vpdev, void __iomem *va)
+{
+ /* nothing to do */
+}
+
+static struct vop_hw_ops vop_hw_ops = {
+ .request_irq = __mic_request_irq,
+ .free_irq = __mic_free_irq,
+ .ack_interrupt = __mic_ack_interrupt,
+ .next_db = __mic_next_db,
+ .get_dp = __mic_get_dp,
+ .get_remote_dp = __mic_get_remote_dp,
+ .send_intr = __mic_send_intr,
+ .ioremap = __mic_ioremap,
+ .iounmap = __mic_iounmap,
+};
static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev)
{
@@ -315,7 +422,6 @@ static int mic_request_dma_chans(struct mic_device *mdev)
dma_cap_mask_t mask;
struct dma_chan *chan;
- request_module("mic_x100_dma");
dma_cap_zero(mask);
dma_cap_set(DMA_MEMCPY, mask);
@@ -387,9 +493,18 @@ static int _mic_start(struct cosm_device *cdev, int id)
goto dma_free;
}
+ mdev->vpdev = vop_register_device(&mdev->pdev->dev,
+ VOP_DEV_TRNSP, &_mic_dma_ops,
+ &vop_hw_ops, id + 1, &mdev->aper,
+ mdev->dma_ch[0]);
+ if (IS_ERR(mdev->vpdev)) {
+ rc = PTR_ERR(mdev->vpdev);
+ goto scif_remove;
+ }
+
rc = mdev->ops->load_mic_fw(mdev, NULL);
if (rc)
- goto scif_remove;
+ goto vop_remove;
mic_smpt_restore(mdev);
mic_intr_restore(mdev);
mdev->intr_ops->enable_interrupts(mdev);
@@ -397,6 +512,8 @@ static int _mic_start(struct cosm_device *cdev, int id)
mdev->ops->write_spad(mdev, MIC_DPHI_SPAD, mdev->dp_dma_addr >> 32);
mdev->ops->send_firmware_intr(mdev);
goto unlock_ret;
+vop_remove:
+ vop_unregister_device(mdev->vpdev);
scif_remove:
scif_unregister_device(mdev->scdev);
dma_free:
@@ -423,7 +540,7 @@ static void _mic_stop(struct cosm_device *cdev, bool force)
* will be the first to be registered and the last to be
* unregistered.
*/
- mic_virtio_reset_devices(mdev);
+ vop_unregister_device(mdev->vpdev);
scif_unregister_device(mdev->scdev);
mic_free_dma_chans(mdev);
mbus_unregister_device(mdev->dma_mbdev);
diff --git a/drivers/misc/mic/host/mic_debugfs.c b/drivers/misc/mic/host/mic_debugfs.c
index 10581600777a..0a9daba8bb5d 100644
--- a/drivers/misc/mic/host/mic_debugfs.c
+++ b/drivers/misc/mic/host/mic_debugfs.c
@@ -26,7 +26,6 @@
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
-#include "mic_virtio.h"
/* Debugfs parent dir */
static struct dentry *mic_dbg;
@@ -100,190 +99,6 @@ static const struct file_operations post_code_ops = {
.release = mic_post_code_debug_release
};
-static int mic_dp_show(struct seq_file *s, void *pos)
-{
- struct mic_device *mdev = s->private;
- struct mic_device_desc *d;
- struct mic_device_ctrl *dc;
- struct mic_vqconfig *vqconfig;
- __u32 *features;
- __u8 *config;
- struct mic_bootparam *bootparam = mdev->dp;
- int i, j;
-
- seq_printf(s, "Bootparam: magic 0x%x\n",
- bootparam->magic);
- seq_printf(s, "Bootparam: h2c_config_db %d\n",
- bootparam->h2c_config_db);
- seq_printf(s, "Bootparam: node_id %d\n",
- bootparam->node_id);
- seq_printf(s, "Bootparam: c2h_scif_db %d\n",
- bootparam->c2h_scif_db);
- seq_printf(s, "Bootparam: h2c_scif_db %d\n",
- bootparam->h2c_scif_db);
- seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
- bootparam->scif_host_dma_addr);
- seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
- bootparam->scif_card_dma_addr);
-
-
- for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
- i += mic_total_desc_size(d)) {
- d = mdev->dp + i;
- dc = (void *)d + mic_aligned_desc_size(d);
-
- /* end of list */
- if (d->type == 0)
- break;
-
- if (d->type == -1)
- continue;
-
- seq_printf(s, "Type %d ", d->type);
- seq_printf(s, "Num VQ %d ", d->num_vq);
- seq_printf(s, "Feature Len %d\n", d->feature_len);
- seq_printf(s, "Config Len %d ", d->config_len);
- seq_printf(s, "Shutdown Status %d\n", d->status);
-
- for (j = 0; j < d->num_vq; j++) {
- vqconfig = mic_vq_config(d) + j;
- seq_printf(s, "vqconfig[%d]: ", j);
- seq_printf(s, "address 0x%llx ", vqconfig->address);
- seq_printf(s, "num %d ", vqconfig->num);
- seq_printf(s, "used address 0x%llx\n",
- vqconfig->used_address);
- }
-
- features = (__u32 *)mic_vq_features(d);
- seq_printf(s, "Features: Host 0x%x ", features[0]);
- seq_printf(s, "Guest 0x%x\n", features[1]);
-
- config = mic_vq_configspace(d);
- for (j = 0; j < d->config_len; j++)
- seq_printf(s, "config[%d]=%d\n", j, config[j]);
-
- seq_puts(s, "Device control:\n");
- seq_printf(s, "Config Change %d ", dc->config_change);
- seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
- seq_printf(s, "Guest Ack %d ", dc->guest_ack);
- seq_printf(s, "Host ack %d\n", dc->host_ack);
- seq_printf(s, "Used address updated %d ",
- dc->used_address_updated);
- seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
- seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
- seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
- }
-
- return 0;
-}
-
-static int mic_dp_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mic_dp_show, inode->i_private);
-}
-
-static int mic_dp_debug_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-
-static const struct file_operations dp_ops = {
- .owner = THIS_MODULE,
- .open = mic_dp_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mic_dp_debug_release
-};
-
-static int mic_vdev_info_show(struct seq_file *s, void *unused)
-{
- struct mic_device *mdev = s->private;
- struct list_head *pos, *tmp;
- struct mic_vdev *mvdev;
- int i, j;
-
- mutex_lock(&mdev->mic_mutex);
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- mvdev = list_entry(pos, struct mic_vdev, list);
- seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
- mvdev->virtio_id,
- mic_vdevup(mvdev) ? "UP" : "DOWN",
- mvdev->in_bytes,
- mvdev->out_bytes);
- for (i = 0; i < MIC_MAX_VRINGS; i++) {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
- struct mic_vringh *mvr = &mvdev->mvr[i];
- struct vringh *vrh = &mvr->vrh;
- int num = vrh->vring.num;
- if (!num)
- continue;
- desc = vrh->vring.desc;
- seq_printf(s, "vring i %d avail_idx %d",
- i, mvr->vring.info->avail_idx & (num - 1));
- seq_printf(s, " vring i %d avail_idx %d\n",
- i, mvr->vring.info->avail_idx);
- seq_printf(s, "vrh i %d weak_barriers %d",
- i, vrh->weak_barriers);
- seq_printf(s, " last_avail_idx %d last_used_idx %d",
- vrh->last_avail_idx, vrh->last_used_idx);
- seq_printf(s, " completed %d\n", vrh->completed);
- for (j = 0; j < num; j++) {
- seq_printf(s, "desc[%d] addr 0x%llx len %d",
- j, desc->addr, desc->len);
- seq_printf(s, " flags 0x%x next %d\n",
- desc->flags, desc->next);
- desc++;
- }
- avail = vrh->vring.avail;
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx) & (num - 1));
- seq_printf(s, "avail flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, avail->flags),
- vringh16_to_cpu(vrh, avail->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "avail ring[%d] %d\n",
- j, avail->ring[j]);
- used = vrh->vring.used;
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx) & (num - 1));
- seq_printf(s, "used flags 0x%x idx %d\n",
- vringh16_to_cpu(vrh, used->flags),
- vringh16_to_cpu(vrh, used->idx));
- for (j = 0; j < num; j++)
- seq_printf(s, "used ring[%d] id %d len %d\n",
- j, vringh32_to_cpu(vrh,
- used->ring[j].id),
- vringh32_to_cpu(vrh,
- used->ring[j].len));
- }
- }
- mutex_unlock(&mdev->mic_mutex);
-
- return 0;
-}
-
-static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mic_vdev_info_show, inode->i_private);
-}
-
-static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
-{
- return single_release(inode, file);
-}
-
-static const struct file_operations vdev_info_ops = {
- .owner = THIS_MODULE,
- .open = mic_vdev_info_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = mic_vdev_info_debug_release
-};
-
static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
@@ -367,11 +182,6 @@ void mic_create_debug_dir(struct mic_device *mdev)
debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
&post_code_ops);
- debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
-
- debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
- &vdev_info_ops);
-
debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
&msi_irq_info_ops);
}
diff --git a/drivers/misc/mic/host/mic_device.h b/drivers/misc/mic/host/mic_device.h
index 461184a12fbb..52b12b22f4ae 100644
--- a/drivers/misc/mic/host/mic_device.h
+++ b/drivers/misc/mic/host/mic_device.h
@@ -29,6 +29,7 @@
#include <linux/miscdevice.h>
#include <linux/mic_bus.h>
#include "../bus/scif_bus.h"
+#include "../bus/vop_bus.h"
#include "../bus/cosm_bus.h"
#include "mic_intr.h"
@@ -64,13 +65,11 @@ extern struct cosm_hw_ops cosm_hw_ops;
* @bootaddr: MIC boot address.
* @dp: virtio device page
* @dp_dma_addr: virtio device page DMA address.
- * @name: name for the misc char device
- * @miscdev: registered misc char device
- * @vdev_list: list of virtio devices.
* @dma_mbdev: MIC BUS DMA device.
* @dma_ch - Array of DMA channels
* @num_dma_ch - Number of DMA channels available
* @scdev: SCIF device on the SCIF virtual bus.
+ * @vpdev: Virtio over PCIe device on the VOP virtual bus.
* @cosm_dev: COSM device
*/
struct mic_device {
@@ -91,13 +90,11 @@ struct mic_device {
u32 bootaddr;
void *dp;
dma_addr_t dp_dma_addr;
- char name[16];
- struct miscdevice miscdev;
- struct list_head vdev_list;
struct mbus_device *dma_mbdev;
struct dma_chan *dma_ch[MIC_MAX_DMA_CHAN];
int num_dma_ch;
struct scif_hw_dev *scdev;
+ struct vop_device *vpdev;
struct cosm_device *cosm_dev;
};
diff --git a/drivers/misc/mic/host/mic_fops.c b/drivers/misc/mic/host/mic_fops.c
deleted file mode 100644
index 8cc1d90cd949..000000000000
--- a/drivers/misc/mic/host/mic_fops.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/poll.h>
-#include <linux/pci.h>
-
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
-
-int mic_open(struct inode *inode, struct file *f)
-{
- struct mic_vdev *mvdev;
- struct mic_device *mdev = container_of(f->private_data,
- struct mic_device, miscdev);
-
- mvdev = kzalloc(sizeof(*mvdev), GFP_KERNEL);
- if (!mvdev)
- return -ENOMEM;
-
- init_waitqueue_head(&mvdev->waitq);
- INIT_LIST_HEAD(&mvdev->list);
- mvdev->mdev = mdev;
- mvdev->virtio_id = -1;
-
- f->private_data = mvdev;
- return 0;
-}
-
-int mic_release(struct inode *inode, struct file *f)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
-
- if (-1 != mvdev->virtio_id)
- mic_virtio_del_device(mvdev);
- f->private_data = NULL;
- kfree(mvdev);
- return 0;
-}
-
-long mic_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- void __user *argp = (void __user *)arg;
- int ret;
-
- switch (cmd) {
- case MIC_VIRTIO_ADD_DEVICE:
- {
- ret = mic_virtio_add_device(mvdev, argp);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- break;
- }
- case MIC_VIRTIO_COPY_DESC:
- {
- struct mic_copy_desc copy;
-
- ret = mic_vdev_inited(mvdev);
- if (ret)
- return ret;
-
- if (copy_from_user(&copy, argp, sizeof(copy)))
- return -EFAULT;
-
- dev_dbg(mic_dev(mvdev),
- "%s %d === iovcnt 0x%x vr_idx 0x%x update_used %d\n",
- __func__, __LINE__, copy.iovcnt, copy.vr_idx,
- copy.update_used);
-
- ret = mic_virtio_copy_desc(mvdev, &copy);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- if (copy_to_user(
- &((struct mic_copy_desc __user *)argp)->out_len,
- &copy.out_len, sizeof(copy.out_len))) {
- dev_err(mic_dev(mvdev), "%s %d errno ret %d\n",
- __func__, __LINE__, -EFAULT);
- return -EFAULT;
- }
- break;
- }
- case MIC_VIRTIO_CONFIG_CHANGE:
- {
- ret = mic_vdev_inited(mvdev);
- if (ret)
- return ret;
-
- ret = mic_virtio_config_change(mvdev, argp);
- if (ret < 0) {
- dev_err(mic_dev(mvdev),
- "%s %d errno ret %d\n",
- __func__, __LINE__, ret);
- return ret;
- }
- break;
- }
- default:
- return -ENOIOCTLCMD;
- };
- return 0;
-}
-
-/*
- * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
- * not when previously enqueued buffers may be available. This means that
- * in the card->host (TX) path, when userspace is unblocked by poll it
- * must drain all available descriptors or it can stall.
- */
-unsigned int mic_poll(struct file *f, poll_table *wait)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- int mask = 0;
-
- poll_wait(f, &mvdev->waitq, wait);
-
- if (mic_vdev_inited(mvdev)) {
- mask = POLLERR;
- } else if (mvdev->poll_wake) {
- mvdev->poll_wake = 0;
- mask = POLLIN | POLLOUT;
- }
-
- return mask;
-}
-
-static inline int
-mic_query_offset(struct mic_vdev *mvdev, unsigned long offset,
- unsigned long *size, unsigned long *pa)
-{
- struct mic_device *mdev = mvdev->mdev;
- unsigned long start = MIC_DP_SIZE;
- int i;
-
- /*
- * MMAP interface is as follows:
- * offset region
- * 0x0 virtio device_page
- * 0x1000 first vring
- * 0x1000 + size of 1st vring second vring
- * ....
- */
- if (!offset) {
- *pa = virt_to_phys(mdev->dp);
- *size = MIC_DP_SIZE;
- return 0;
- }
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
- if (offset == start) {
- *pa = virt_to_phys(mvr->vring.va);
- *size = mvr->vring.len;
- return 0;
- }
- start += mvr->vring.len;
- }
- return -1;
-}
-
-/*
- * Maps the device page and virtio rings to user space for readonly access.
- */
-int
-mic_mmap(struct file *f, struct vm_area_struct *vma)
-{
- struct mic_vdev *mvdev = (struct mic_vdev *)f->private_data;
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
- int i, err;
-
- err = mic_vdev_inited(mvdev);
- if (err)
- return err;
-
- if (vma->vm_flags & VM_WRITE)
- return -EACCES;
-
- while (size_rem) {
- i = mic_query_offset(mvdev, offset, &size, &pa);
- if (i < 0)
- return -EINVAL;
- err = remap_pfn_range(vma, vma->vm_start + offset,
- pa >> PAGE_SHIFT, size, vma->vm_page_prot);
- if (err)
- return err;
- dev_dbg(mic_dev(mvdev),
- "%s %d type %d size 0x%lx off 0x%lx pa 0x%lx vma 0x%lx\n",
- __func__, __LINE__, mvdev->virtio_id, size, offset,
- pa, vma->vm_start + offset);
- size_rem -= size;
- offset += size;
- }
- return 0;
-}
diff --git a/drivers/misc/mic/host/mic_fops.h b/drivers/misc/mic/host/mic_fops.h
deleted file mode 100644
index dc3893dff667..000000000000
--- a/drivers/misc/mic/host/mic_fops.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#ifndef _MIC_FOPS_H_
-#define _MIC_FOPS_H_
-
-int mic_open(struct inode *inode, struct file *filp);
-int mic_release(struct inode *inode, struct file *filp);
-ssize_t mic_read(struct file *filp, char __user *buf,
- size_t count, loff_t *pos);
-long mic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-int mic_mmap(struct file *f, struct vm_area_struct *vma);
-unsigned int mic_poll(struct file *f, poll_table *wait);
-
-#endif
diff --git a/drivers/misc/mic/host/mic_main.c b/drivers/misc/mic/host/mic_main.c
index 153894e7ed5b..035be3e9ceba 100644
--- a/drivers/misc/mic/host/mic_main.c
+++ b/drivers/misc/mic/host/mic_main.c
@@ -27,8 +27,6 @@
#include "mic_device.h"
#include "mic_x100.h"
#include "mic_smpt.h"
-#include "mic_fops.h"
-#include "mic_virtio.h"
static const char mic_driver_name[] = "mic";
@@ -57,17 +55,6 @@ MODULE_DEVICE_TABLE(pci, mic_pci_tbl);
/* ID allocator for MIC devices */
static struct ida g_mic_ida;
-/* Base device node number for MIC devices */
-static dev_t g_mic_devno;
-
-static const struct file_operations mic_fops = {
- .open = mic_open,
- .release = mic_release,
- .unlocked_ioctl = mic_ioctl,
- .poll = mic_poll,
- .mmap = mic_mmap,
- .owner = THIS_MODULE,
-};
/* Initialize the device page */
static int mic_dp_init(struct mic_device *mdev)
@@ -169,7 +156,6 @@ mic_device_init(struct mic_device *mdev, struct pci_dev *pdev)
mic_ops_init(mdev);
mutex_init(&mdev->mic_mutex);
mdev->irq_info.next_avail_src = 0;
- INIT_LIST_HEAD(&mdev->vdev_list);
}
/**
@@ -259,30 +245,15 @@ static int mic_probe(struct pci_dev *pdev,
goto smpt_uninit;
}
mic_bootparam_init(mdev);
-
mic_create_debug_dir(mdev);
- mdev->miscdev.minor = MISC_DYNAMIC_MINOR;
- snprintf(mdev->name, sizeof(mdev->name), "mic%d", mdev->id);
- mdev->miscdev.name = mdev->name;
- mdev->miscdev.fops = &mic_fops;
- mdev->miscdev.parent = &mdev->pdev->dev;
- rc = misc_register(&mdev->miscdev);
- if (rc) {
- dev_err(&pdev->dev, "misc_register err id %d rc %d\n",
- mdev->id, rc);
- goto cleanup_debug_dir;
- }
-
mdev->cosm_dev = cosm_register_device(&mdev->pdev->dev, &cosm_hw_ops);
if (IS_ERR(mdev->cosm_dev)) {
rc = PTR_ERR(mdev->cosm_dev);
dev_err(&pdev->dev, "cosm_add_device failed rc %d\n", rc);
- goto misc_dereg;
+ goto cleanup_debug_dir;
}
return 0;
-misc_dereg:
- misc_deregister(&mdev->miscdev);
cleanup_debug_dir:
mic_delete_debug_dir(mdev);
mic_dp_uninit(mdev);
@@ -323,7 +294,6 @@ static void mic_remove(struct pci_dev *pdev)
return;
cosm_unregister_device(mdev->cosm_dev);
- misc_deregister(&mdev->miscdev);
mic_delete_debug_dir(mdev);
mic_dp_uninit(mdev);
mic_smpt_uninit(mdev);
@@ -347,26 +317,18 @@ static int __init mic_init(void)
{
int ret;
- ret = alloc_chrdev_region(&g_mic_devno, 0,
- MIC_MAX_NUM_DEVS, mic_driver_name);
- if (ret) {
- pr_err("alloc_chrdev_region failed ret %d\n", ret);
- goto error;
- }
-
+ request_module("mic_x100_dma");
mic_init_debugfs();
ida_init(&g_mic_ida);
ret = pci_register_driver(&mic_driver);
if (ret) {
pr_err("pci_register_driver failed ret %d\n", ret);
- goto cleanup_chrdev;
+ goto cleanup_debugfs;
}
- return ret;
-cleanup_chrdev:
+ return 0;
+cleanup_debugfs:
ida_destroy(&g_mic_ida);
mic_exit_debugfs();
- unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
-error:
return ret;
}
@@ -375,7 +337,6 @@ static void __exit mic_exit(void)
pci_unregister_driver(&mic_driver);
ida_destroy(&g_mic_ida);
mic_exit_debugfs();
- unregister_chrdev_region(g_mic_devno, MIC_MAX_NUM_DEVS);
}
module_init(mic_init);
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
deleted file mode 100644
index 58b107a24a8b..000000000000
--- a/drivers/misc/mic/host/mic_virtio.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * Intel MIC Platform Software Stack (MPSS)
- *
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Intel MIC Host driver.
- *
- */
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/uaccess.h>
-#include <linux/dmaengine.h>
-#include <linux/mic_common.h>
-#include "../common/mic_dev.h"
-#include "mic_device.h"
-#include "mic_smpt.h"
-#include "mic_virtio.h"
-
-/*
- * Size of the internal buffer used during DMA's as an intermediate buffer
- * for copy to/from user.
- */
-#define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
-
-static int mic_sync_dma(struct mic_device *mdev, dma_addr_t dst,
- dma_addr_t src, size_t len)
-{
- int err = 0;
- struct dma_async_tx_descriptor *tx;
- struct dma_chan *mic_ch = mdev->dma_ch[0];
-
- if (!mic_ch) {
- err = -EBUSY;
- goto error;
- }
-
- tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len,
- DMA_PREP_FENCE);
- if (!tx) {
- err = -ENOMEM;
- goto error;
- } else {
- dma_cookie_t cookie = tx->tx_submit(tx);
-
- err = dma_submit_error(cookie);
- if (err)
- goto error;
- err = dma_sync_wait(mic_ch, cookie);
- }
-error:
- if (err)
- dev_err(&mdev->pdev->dev, "%s %d err %d\n",
- __func__, __LINE__, err);
- return err;
-}
-
-/*
- * Initiates the copies across the PCIe bus from card memory to a user
- * space buffer. When transfers are done using DMA, source/destination
- * addresses and transfer length must follow the alignment requirements of
- * the MIC DMA engine.
- */
-static int mic_virtio_copy_to_user(struct mic_vdev *mvdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct mic_device *mdev = mvdev->mdev;
- void __iomem *dbuf = mdev->aper.va + daddr;
- struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
- size_t dma_offset;
- size_t partlen;
- int err;
-
- dma_offset = daddr - round_down(daddr, dma_alignment);
- daddr -= dma_offset;
- len += dma_offset;
-
- while (len) {
- partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
- err = mic_sync_dma(mdev, mvr->buf_da, daddr,
- ALIGN(partlen, dma_alignment));
- if (err)
- goto err;
-
- if (copy_to_user(ubuf, mvr->buf + dma_offset,
- partlen - dma_offset)) {
- err = -EFAULT;
- goto err;
- }
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- mvdev->in_bytes_dma += partlen;
- mvdev->in_bytes += partlen;
- len -= partlen;
- dma_offset = 0;
- }
- return 0;
-err:
- dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-/*
- * Initiates copies across the PCIe bus from a user space buffer to card
- * memory. When transfers are done using DMA, source/destination addresses
- * and transfer length must follow the alignment requirements of the MIC
- * DMA engine.
- */
-static int mic_virtio_copy_from_user(struct mic_vdev *mvdev, void __user *ubuf,
- size_t len, u64 daddr, size_t dlen,
- int vr_idx)
-{
- struct mic_device *mdev = mvdev->mdev;
- void __iomem *dbuf = mdev->aper.va + daddr;
- struct mic_vringh *mvr = &mvdev->mvr[vr_idx];
- size_t dma_alignment = 1 << mdev->dma_ch[0]->device->copy_align;
- size_t partlen;
- int err;
-
- if (daddr & (dma_alignment - 1)) {
- mvdev->tx_dst_unaligned += len;
- goto memcpy;
- } else if (ALIGN(len, dma_alignment) > dlen) {
- mvdev->tx_len_unaligned += len;
- goto memcpy;
- }
-
- while (len) {
- partlen = min_t(size_t, len, MIC_INT_DMA_BUF_SIZE);
-
- if (copy_from_user(mvr->buf, ubuf, partlen)) {
- err = -EFAULT;
- goto err;
- }
- err = mic_sync_dma(mdev, daddr, mvr->buf_da,
- ALIGN(partlen, dma_alignment));
- if (err)
- goto err;
- daddr += partlen;
- ubuf += partlen;
- dbuf += partlen;
- mvdev->out_bytes_dma += partlen;
- mvdev->out_bytes += partlen;
- len -= partlen;
- }
-memcpy:
- /*
- * We are copying to IO below and should ideally use something
- * like copy_from_user_toio(..) if it existed.
- */
- if (copy_from_user((void __force *)dbuf, ubuf, len)) {
- err = -EFAULT;
- goto err;
- }
- mvdev->out_bytes += len;
- return 0;
-err:
- dev_err(mic_dev(mvdev), "%s %d err %d\n", __func__, __LINE__, err);
- return err;
-}
-
-#define MIC_VRINGH_READ true
-
-/* The function to call to notify the card about added buffers */
-static void mic_notify(struct vringh *vrh)
-{
- struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
- struct mic_vdev *mvdev = mvrh->mvdev;
- s8 db = mvdev->dc->h2c_vdev_db;
-
- if (db != -1)
- mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-}
-
-/* Determine the total number of bytes consumed in a VRINGH KIOV */
-static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
-{
- int i;
- u32 total = iov->consumed;
-
- for (i = 0; i < iov->i; i++)
- total += iov->iov[i].iov_len;
- return total;
-}
-
-/*
- * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
- * This API is heavily based on the vringh_iov_xfer(..) implementation
- * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
- * and vringh_iov_push_kern(..) directly is because there is no
- * way to override the VRINGH xfer(..) routines as of v3.10.
- */
-static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
- void __user *ubuf, size_t len, bool read, int vr_idx,
- size_t *out_len)
-{
- int ret = 0;
- size_t partlen, tot_len = 0;
-
- while (len && iov->i < iov->used) {
- partlen = min(iov->iov[iov->i].iov_len, len);
- if (read)
- ret = mic_virtio_copy_to_user(mvdev, ubuf, partlen,
- (u64)iov->iov[iov->i].iov_base,
- iov->iov[iov->i].iov_len,
- vr_idx);
- else
- ret = mic_virtio_copy_from_user(mvdev, ubuf, partlen,
- (u64)iov->iov[iov->i].iov_base,
- iov->iov[iov->i].iov_len,
- vr_idx);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= partlen;
- ubuf += partlen;
- tot_len += partlen;
- iov->consumed += partlen;
- iov->iov[iov->i].iov_len -= partlen;
- iov->iov[iov->i].iov_base += partlen;
- if (!iov->iov[iov->i].iov_len) {
- /* Fix up old iov element then increment. */
- iov->iov[iov->i].iov_len = iov->consumed;
- iov->iov[iov->i].iov_base -= iov->consumed;
-
- iov->consumed = 0;
- iov->i++;
- }
- }
- *out_len = tot_len;
- return ret;
-}
-
-/*
- * Use the standard VRINGH infrastructure in the kernel to fetch new
- * descriptors, initiate the copies and update the used ring.
- */
-static int _mic_virtio_copy(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- int ret = 0;
- u32 iovcnt = copy->iovcnt;
- struct iovec iov;
- struct iovec __user *u_iov = copy->iov;
- void __user *ubuf = NULL;
- struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
- struct vringh_kiov *riov = &mvr->riov;
- struct vringh_kiov *wiov = &mvr->wiov;
- struct vringh *vrh = &mvr->vrh;
- u16 *head = &mvr->head;
- struct mic_vring *vr = &mvr->vring;
- size_t len = 0, out_len;
-
- copy->out_len = 0;
- /* Fetch a new IOVEC if all previous elements have been processed */
- if (riov->i == riov->used && wiov->i == wiov->used) {
- ret = vringh_getdesc_kern(vrh, riov, wiov,
- head, GFP_KERNEL);
- /* Check if there are available descriptors */
- if (ret <= 0)
- return ret;
- }
- while (iovcnt) {
- if (!len) {
- /* Copy over a new iovec from user space. */
- ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
- if (ret) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len = iov.iov_len;
- ubuf = iov.iov_base;
- }
- /* Issue all the read descriptors first */
- ret = mic_vringh_copy(mvdev, riov, ubuf, len, MIC_VRINGH_READ,
- copy->vr_idx, &out_len);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- /* Issue the write descriptors next */
- ret = mic_vringh_copy(mvdev, wiov, ubuf, len, !MIC_VRINGH_READ,
- copy->vr_idx, &out_len);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- break;
- }
- len -= out_len;
- ubuf += out_len;
- copy->out_len += out_len;
- if (!len) {
- /* One user space iovec is now completed */
- iovcnt--;
- u_iov++;
- }
- /* Exit loop if all elements in KIOVs have been processed. */
- if (riov->i == riov->used && wiov->i == wiov->used)
- break;
- }
- /*
- * Update the used ring if a descriptor was available and some data was
- * copied in/out and the user asked for a used ring update.
- */
- if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
- u32 total = 0;
-
- /* Determine the total data consumed */
- total += mic_vringh_iov_consumed(riov);
- total += mic_vringh_iov_consumed(wiov);
- vringh_complete_kern(vrh, *head, total);
- *head = USHRT_MAX;
- if (vringh_need_notify_kern(vrh) > 0)
- vringh_notify(vrh);
- vringh_kiov_cleanup(riov);
- vringh_kiov_cleanup(wiov);
- /* Update avail idx for user space */
- vr->info->avail_idx = vrh->last_avail_idx;
- }
- return ret;
-}
-
-static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- if (copy->vr_idx >= mvdev->dd->num_vq) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
- return 0;
-}
-
-/* Copy a specified number of virtio descriptors in a chain */
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
- struct mic_copy_desc *copy)
-{
- int err;
- struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
-
- err = mic_verify_copy_args(mvdev, copy);
- if (err)
- return err;
-
- mutex_lock(&mvr->vr_mutex);
- if (!mic_vdevup(mvdev)) {
- err = -ENODEV;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- goto err;
- }
- err = _mic_virtio_copy(mvdev, copy);
- if (err) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, err);
- }
-err:
- mutex_unlock(&mvr->vr_mutex);
- return err;
-}
-
-static void mic_virtio_init_post(struct mic_vdev *mvdev)
-{
- struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
- int i;
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- if (!le64_to_cpu(vqconfig[i].used_address)) {
- dev_warn(mic_dev(mvdev), "used_address zero??\n");
- continue;
- }
- mvdev->mvr[i].vrh.vring.used =
- (void __force *)mvdev->mdev->aper.va +
- le64_to_cpu(vqconfig[i].used_address);
- }
-
- mvdev->dc->used_address_updated = 0;
-
- dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
- __func__, mvdev->virtio_id);
-}
-
-static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
-{
- int i;
-
- dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
- __func__, mvdev->dd->status, mvdev->virtio_id);
-
- for (i = 0; i < mvdev->dd->num_vq; i++)
- /*
- * Avoid lockdep false positive. The + 1 is for the mic
- * mutex which is held in the reset devices code path.
- */
- mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
- /* 0 status means "reset" */
- mvdev->dd->status = 0;
- mvdev->dc->vdev_reset = 0;
- mvdev->dc->host_ack = 1;
-
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct vringh *vrh = &mvdev->mvr[i].vrh;
- mvdev->mvr[i].vring.info->avail_idx = 0;
- vrh->completed = 0;
- vrh->last_avail_idx = 0;
- vrh->last_used_idx = 0;
- }
-
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_unlock(&mvdev->mvr[i].vr_mutex);
-}
-
-void mic_virtio_reset_devices(struct mic_device *mdev)
-{
- struct list_head *pos, *tmp;
- struct mic_vdev *mvdev;
-
- dev_dbg(&mdev->pdev->dev, "%s\n", __func__);
-
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- mvdev = list_entry(pos, struct mic_vdev, list);
- mic_virtio_device_reset(mvdev);
- mvdev->poll_wake = 1;
- wake_up(&mvdev->waitq);
- }
-}
-
-void mic_bh_handler(struct work_struct *work)
-{
- struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
- virtio_bh_work);
-
- if (mvdev->dc->used_address_updated)
- mic_virtio_init_post(mvdev);
-
- if (mvdev->dc->vdev_reset)
- mic_virtio_device_reset(mvdev);
-
- mvdev->poll_wake = 1;
- wake_up(&mvdev->waitq);
-}
-
-static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
-{
- struct mic_vdev *mvdev = data;
- struct mic_device *mdev = mvdev->mdev;
-
- mdev->ops->intr_workarounds(mdev);
- schedule_work(&mvdev->virtio_bh_work);
- return IRQ_HANDLED;
-}
-
-int mic_virtio_config_change(struct mic_vdev *mvdev,
- void __user *argp)
-{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int ret = 0, retry, i;
- struct mic_bootparam *bootparam = mvdev->mdev->dp;
- s8 db = bootparam->h2c_config_db;
-
- mutex_lock(&mvdev->mdev->mic_mutex);
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
-
- if (db == -1 || mvdev->dd->type == -1) {
- ret = -EIO;
- goto exit;
- }
-
- if (copy_from_user(mic_vq_configspace(mvdev->dd),
- argp, mvdev->dd->config_len)) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EFAULT);
- ret = -EFAULT;
- goto exit;
- }
- mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
- mvdev->mdev->ops->send_intr(mvdev->mdev, db);
-
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake,
- mvdev->dc->guest_ack, msecs_to_jiffies(100));
- if (ret)
- break;
- }
-
- dev_dbg(mic_dev(mvdev),
- "%s %d retry: %d\n", __func__, __LINE__, retry);
- mvdev->dc->config_change = 0;
- mvdev->dc->guest_ack = 0;
-exit:
- for (i = 0; i < mvdev->dd->num_vq; i++)
- mutex_unlock(&mvdev->mvr[i].vr_mutex);
- mutex_unlock(&mvdev->mdev->mic_mutex);
- return ret;
-}
-
-static int mic_copy_dp_entry(struct mic_vdev *mvdev,
- void __user *argp,
- __u8 *type,
- struct mic_device_desc **devpage)
-{
- struct mic_device *mdev = mvdev->mdev;
- struct mic_device_desc dd, *dd_config, *devp;
- struct mic_vqconfig *vqconfig;
- int ret = 0, i;
- bool slot_found = false;
-
- if (copy_from_user(&dd, argp, sizeof(dd))) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EFAULT);
- return -EFAULT;
- }
-
- if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
- dd.num_vq > MIC_MAX_VRINGS) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
-
- dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
- if (dd_config == NULL) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -ENOMEM);
- return -ENOMEM;
- }
- if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
- ret = -EFAULT;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
-
- vqconfig = mic_vq_config(dd_config);
- for (i = 0; i < dd.num_vq; i++) {
- if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- }
-
- /* Find the first free device page entry */
- for (i = sizeof(struct mic_bootparam);
- i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
- i += mic_total_desc_size(devp)) {
- devp = mdev->dp + i;
- if (devp->type == 0 || devp->type == -1) {
- slot_found = true;
- break;
- }
- }
- if (!slot_found) {
- ret = -EINVAL;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto exit;
- }
- /*
- * Save off the type before doing the memcpy. Type will be set in the
- * end after completing all initialization for the new device.
- */
- *type = dd_config->type;
- dd_config->type = 0;
- memcpy(devp, dd_config, mic_desc_size(dd_config));
-
- *devpage = devp;
-exit:
- kfree(dd_config);
- return ret;
-}
-
-static void mic_init_device_ctrl(struct mic_vdev *mvdev,
- struct mic_device_desc *devpage)
-{
- struct mic_device_ctrl *dc;
-
- dc = (void *)devpage + mic_aligned_desc_size(devpage);
-
- dc->config_change = 0;
- dc->guest_ack = 0;
- dc->vdev_reset = 0;
- dc->host_ack = 0;
- dc->used_address_updated = 0;
- dc->c2h_vdev_db = -1;
- dc->h2c_vdev_db = -1;
- mvdev->dc = dc;
-}
-
-int mic_virtio_add_device(struct mic_vdev *mvdev,
- void __user *argp)
-{
- struct mic_device *mdev = mvdev->mdev;
- struct mic_device_desc *dd = NULL;
- struct mic_vqconfig *vqconfig;
- int vr_size, i, j, ret;
- u8 type = 0;
- s8 db;
- char irqname[10];
- struct mic_bootparam *bootparam = mdev->dp;
- u16 num;
- dma_addr_t vr_addr;
-
- mutex_lock(&mdev->mic_mutex);
-
- ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
- if (ret) {
- mutex_unlock(&mdev->mic_mutex);
- return ret;
- }
-
- mic_init_device_ctrl(mvdev, dd);
-
- mvdev->dd = dd;
- mvdev->virtio_id = type;
- vqconfig = mic_vq_config(dd);
- INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
-
- for (i = 0; i < dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
- struct mic_vring *vr = &mvdev->mvr[i].vring;
- num = le16_to_cpu(vqconfig[i].num);
- mutex_init(&mvr->vr_mutex);
- vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
- sizeof(struct _mic_vring_info));
- vr->va = (void *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(vr_size));
- if (!vr->va) {
- ret = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vr->len = vr_size;
- vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
- vr->info->magic = cpu_to_le32(MIC_MAGIC + mvdev->virtio_id + i);
- vr_addr = mic_map_single(mdev, vr->va, vr_size);
- if (mic_map_error(vr_addr)) {
- free_pages((unsigned long)vr->va, get_order(vr_size));
- ret = -ENOMEM;
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vqconfig[i].address = cpu_to_le64(vr_addr);
-
- vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
- ret = vringh_init_kern(&mvr->vrh,
- *(u32 *)mic_vq_features(mvdev->dd), num, false,
- vr->vr.desc, vr->vr.avail, vr->vr.used);
- if (ret) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, ret);
- goto err;
- }
- vringh_kiov_init(&mvr->riov, NULL, 0);
- vringh_kiov_init(&mvr->wiov, NULL, 0);
- mvr->head = USHRT_MAX;
- mvr->mvdev = mvdev;
- mvr->vrh.notify = mic_notify;
- dev_dbg(&mdev->pdev->dev,
- "%s %d index %d va %p info %p vr_size 0x%x\n",
- __func__, __LINE__, i, vr->va, vr->info, vr_size);
- mvr->buf = (void *)__get_free_pages(GFP_KERNEL,
- get_order(MIC_INT_DMA_BUF_SIZE));
- mvr->buf_da = mic_map_single(mvdev->mdev, mvr->buf,
- MIC_INT_DMA_BUF_SIZE);
- }
-
- snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
- mvdev->virtio_id);
- mvdev->virtio_db = mic_next_db(mdev);
- mvdev->virtio_cookie = mic_request_threaded_irq(mdev,
- mic_virtio_intr_handler,
- NULL, irqname, mvdev,
- mvdev->virtio_db, MIC_INTR_DB);
- if (IS_ERR(mvdev->virtio_cookie)) {
- ret = PTR_ERR(mvdev->virtio_cookie);
- dev_dbg(&mdev->pdev->dev, "request irq failed\n");
- goto err;
- }
-
- mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
-
- list_add_tail(&mvdev->list, &mdev->vdev_list);
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- dd->type = type;
-
- dev_dbg(&mdev->pdev->dev, "Added virtio device id %d\n", dd->type);
-
- db = bootparam->h2c_config_db;
- if (db != -1)
- mdev->ops->send_intr(mdev, db);
- mutex_unlock(&mdev->mic_mutex);
- return 0;
-err:
- vqconfig = mic_vq_config(dd);
- for (j = 0; j < i; j++) {
- struct mic_vringh *mvr = &mvdev->mvr[j];
- mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
- mvr->vring.len);
- free_pages((unsigned long)mvr->vring.va,
- get_order(mvr->vring.len));
- }
- mutex_unlock(&mdev->mic_mutex);
- return ret;
-}
-
-void mic_virtio_del_device(struct mic_vdev *mvdev)
-{
- struct list_head *pos, *tmp;
- struct mic_vdev *tmp_mvdev;
- struct mic_device *mdev = mvdev->mdev;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
- int i, ret, retry;
- struct mic_vqconfig *vqconfig;
- struct mic_bootparam *bootparam = mdev->dp;
- s8 db;
-
- mutex_lock(&mdev->mic_mutex);
- db = bootparam->h2c_config_db;
- if (db == -1)
- goto skip_hot_remove;
- dev_dbg(&mdev->pdev->dev,
- "Requesting hot remove id %d\n", mvdev->virtio_id);
- mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
- mdev->ops->send_intr(mdev, db);
- for (retry = 100; retry--;) {
- ret = wait_event_timeout(wake,
- mvdev->dc->guest_ack, msecs_to_jiffies(100));
- if (ret)
- break;
- }
- dev_dbg(&mdev->pdev->dev,
- "Device id %d config_change %d guest_ack %d retry %d\n",
- mvdev->virtio_id, mvdev->dc->config_change,
- mvdev->dc->guest_ack, retry);
- mvdev->dc->config_change = 0;
- mvdev->dc->guest_ack = 0;
-skip_hot_remove:
- mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
- flush_work(&mvdev->virtio_bh_work);
- vqconfig = mic_vq_config(mvdev->dd);
- for (i = 0; i < mvdev->dd->num_vq; i++) {
- struct mic_vringh *mvr = &mvdev->mvr[i];
-
- mic_unmap_single(mvdev->mdev, mvr->buf_da,
- MIC_INT_DMA_BUF_SIZE);
- free_pages((unsigned long)mvr->buf,
- get_order(MIC_INT_DMA_BUF_SIZE));
- vringh_kiov_cleanup(&mvr->riov);
- vringh_kiov_cleanup(&mvr->wiov);
- mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
- mvr->vring.len);
- free_pages((unsigned long)mvr->vring.va,
- get_order(mvr->vring.len));
- }
-
- list_for_each_safe(pos, tmp, &mdev->vdev_list) {
- tmp_mvdev = list_entry(pos, struct mic_vdev, list);
- if (tmp_mvdev == mvdev) {
- list_del(pos);
- dev_dbg(&mdev->pdev->dev,
- "Removing virtio device id %d\n",
- mvdev->virtio_id);
- break;
- }
- }
- /*
- * Order the type update with previous stores. This write barrier
- * is paired with the corresponding read barrier before the uncached
- * system memory read of the type, on the card while scanning the
- * device page.
- */
- smp_wmb();
- mvdev->dd->type = -1;
- mutex_unlock(&mdev->mic_mutex);
-}
diff --git a/drivers/misc/mic/host/mic_x100.c b/drivers/misc/mic/host/mic_x100.c
index 8118ac48c764..82a973c85b5d 100644
--- a/drivers/misc/mic/host/mic_x100.c
+++ b/drivers/misc/mic/host/mic_x100.c
@@ -450,26 +450,29 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
rc = mic_x100_get_boot_addr(mdev);
if (rc)
- goto error;
+ return rc;
/* load OS */
rc = request_firmware(&fw, mdev->cosm_dev->firmware, &mdev->pdev->dev);
if (rc < 0) {
dev_err(&mdev->pdev->dev,
"ramdisk request_firmware failed: %d %s\n",
rc, mdev->cosm_dev->firmware);
- goto error;
+ return rc;
}
if (mdev->bootaddr > mdev->aper.len - fw->size) {
rc = -EINVAL;
dev_err(&mdev->pdev->dev, "%s %d rc %d bootaddr 0x%x\n",
__func__, __LINE__, rc, mdev->bootaddr);
- release_firmware(fw);
goto error;
}
memcpy_toio(mdev->aper.va + mdev->bootaddr, fw->data, fw->size);
mdev->ops->write_spad(mdev, MIC_X100_FW_SIZE, fw->size);
- if (!strcmp(mdev->cosm_dev->bootmode, "flash"))
- goto done;
+ if (!strcmp(mdev->cosm_dev->bootmode, "flash")) {
+ rc = -EINVAL;
+ dev_err(&mdev->pdev->dev, "%s %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto error;
+ }
/* load command line */
rc = mic_x100_load_command_line(mdev, fw);
if (rc) {
@@ -481,9 +484,11 @@ mic_x100_load_firmware(struct mic_device *mdev, const char *buf)
/* load ramdisk */
if (mdev->cosm_dev->ramdisk)
rc = mic_x100_load_ramdisk(mdev);
+
+ return rc;
+
error:
- dev_dbg(&mdev->pdev->dev, "%s %d rc %d\n", __func__, __LINE__, rc);
-done:
+ release_firmware(fw);
return rc;
}
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 95a13c629a8e..cd01a0efda6b 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -74,11 +74,6 @@ struct scif_copy_work {
bool ordered;
};
-#ifndef list_entry_next
-#define list_entry_next(pos, member) \
- list_entry(pos->member.next, typeof(*pos), member)
-#endif
-
/**
* scif_reserve_dma_chan:
* @ep: Endpoint Descriptor.
@@ -276,13 +271,10 @@ static struct scif_mmu_notif *
scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma)
{
struct scif_mmu_notif *mmn;
- struct list_head *item;
- list_for_each(item, &rma->mmn_list) {
- mmn = list_entry(item, struct scif_mmu_notif, list);
+ list_for_each_entry(mmn, &rma->mmn_list, list)
if (mmn->mm == mm)
return mmn;
- }
return NULL;
}
@@ -293,13 +285,12 @@ scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep)
= kzalloc(sizeof(*mmn), GFP_KERNEL);
if (!mmn)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
scif_init_mmu_notifier(mmn, current->mm, ep);
- if (mmu_notifier_register(&mmn->ep_mmu_notifier,
- current->mm)) {
+ if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) {
kfree(mmn);
- return ERR_PTR(EBUSY);
+ return ERR_PTR(-EBUSY);
}
list_add(&mmn->list, &ep->rma_info.mmn_list);
return mmn;
@@ -851,7 +842,7 @@ static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window,
(window->nr_pages << PAGE_SHIFT);
while (rem_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -957,7 +948,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
remaining_len -= tail_len;
while (remaining_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -1064,7 +1055,7 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
}
if (tail_len) {
if (offset == end_offset) {
- window = list_entry_next(window, list);
+ window = list_next_entry(window, list);
end_offset = window->offset +
(window->nr_pages << PAGE_SHIFT);
}
@@ -1147,13 +1138,13 @@ static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
(dst_window->nr_pages << PAGE_SHIFT);
while (remaining_len) {
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
end_src_offset = src_window->offset +
(src_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(src_window, &src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1314,13 +1305,13 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
remaining_len -= tail_len;
while (remaining_len) {
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
end_src_offset = src_window->offset +
(src_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(src_window, &src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
scif_init_window_iter(dst_window, &dst_win_iter);
@@ -1405,9 +1396,9 @@ static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work,
if (remaining_len) {
loop_len = remaining_len;
if (src_offset == end_src_offset)
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
if (dst_offset == end_dst_offset)
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset);
dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset);
@@ -1550,12 +1541,12 @@ static int scif_rma_list_cpu_copy(struct scif_copy_work *work)
end_dst_offset = dst_window->offset +
(dst_window->nr_pages << PAGE_SHIFT);
if (src_offset == end_src_offset) {
- src_window = list_entry_next(src_window, list);
+ src_window = list_next_entry(src_window, list);
scif_init_window_iter(src_window,
&src_win_iter);
}
if (dst_offset == end_dst_offset) {
- dst_window = list_entry_next(dst_window, list);
+ dst_window = list_next_entry(dst_window, list);
scif_init_window_iter(dst_window,
&dst_win_iter);
}
@@ -1730,7 +1721,7 @@ static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr,
mutex_lock(&ep->rma_info.mmn_lock);
mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info);
if (!mmn)
- scif_add_mmu_notifier(current->mm, ep);
+ mmn = scif_add_mmu_notifier(current->mm, ep);
mutex_unlock(&ep->rma_info.mmn_lock);
if (IS_ERR(mmn)) {
scif_put_peer_dev(spdev);
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 8310b4dbff06..e0203b1a20fd 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1394,8 +1394,6 @@ retry:
}
pinned_pages->nr_pages = get_user_pages(
- current,
- mm,
(u64)addr,
nr_pages,
!!(prot & SCIF_PROT_WRITE),
@@ -1511,7 +1509,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
might_sleep();
@@ -1614,7 +1612,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
if ((map_flags & SCIF_MAP_FIXED) &&
((ALIGN(offset, PAGE_SIZE) != offset) ||
(offset < 0) ||
- (offset + (off_t)len < offset)))
+ (len > LONG_MAX - offset)))
return -EINVAL;
/* Unsupported protection requested */
@@ -1732,7 +1730,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
/* Offset is not page aligned or offset+len wraps around */
if ((ALIGN(offset, PAGE_SIZE) != offset) ||
- (offset + (off_t)len < offset))
+ (offset < 0) ||
+ (len > LONG_MAX - offset))
return -EINVAL;
err = scif_verify_epd(ep);
diff --git a/drivers/misc/mic/vop/Makefile b/drivers/misc/mic/vop/Makefile
new file mode 100644
index 000000000000..78819c8999f1
--- /dev/null
+++ b/drivers/misc/mic/vop/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile - Intel MIC Linux driver.
+# Copyright(c) 2016, Intel Corporation.
+#
+obj-m := vop.o
+
+vop-objs += vop_main.o
+vop-objs += vop_debugfs.o
+vop-objs += vop_vringh.o
diff --git a/drivers/misc/mic/vop/vop_debugfs.c b/drivers/misc/mic/vop/vop_debugfs.c
new file mode 100644
index 000000000000..ab43884e5cd7
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_debugfs.c
@@ -0,0 +1,232 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "vop_main.h"
+
+static int vop_dp_show(struct seq_file *s, void *pos)
+{
+ struct mic_device_desc *d;
+ struct mic_device_ctrl *dc;
+ struct mic_vqconfig *vqconfig;
+ __u32 *features;
+ __u8 *config;
+ struct vop_info *vi = s->private;
+ struct vop_device *vpdev = vi->vpdev;
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ int j, k;
+
+ seq_printf(s, "Bootparam: magic 0x%x\n",
+ bootparam->magic);
+ seq_printf(s, "Bootparam: h2c_config_db %d\n",
+ bootparam->h2c_config_db);
+ seq_printf(s, "Bootparam: node_id %d\n",
+ bootparam->node_id);
+ seq_printf(s, "Bootparam: c2h_scif_db %d\n",
+ bootparam->c2h_scif_db);
+ seq_printf(s, "Bootparam: h2c_scif_db %d\n",
+ bootparam->h2c_scif_db);
+ seq_printf(s, "Bootparam: scif_host_dma_addr 0x%llx\n",
+ bootparam->scif_host_dma_addr);
+ seq_printf(s, "Bootparam: scif_card_dma_addr 0x%llx\n",
+ bootparam->scif_card_dma_addr);
+
+ for (j = sizeof(*bootparam);
+ j < MIC_DP_SIZE; j += mic_total_desc_size(d)) {
+ d = (void *)bootparam + j;
+ dc = (void *)d + mic_aligned_desc_size(d);
+
+ /* end of list */
+ if (d->type == 0)
+ break;
+
+ if (d->type == -1)
+ continue;
+
+ seq_printf(s, "Type %d ", d->type);
+ seq_printf(s, "Num VQ %d ", d->num_vq);
+ seq_printf(s, "Feature Len %d\n", d->feature_len);
+ seq_printf(s, "Config Len %d ", d->config_len);
+ seq_printf(s, "Shutdown Status %d\n", d->status);
+
+ for (k = 0; k < d->num_vq; k++) {
+ vqconfig = mic_vq_config(d) + k;
+ seq_printf(s, "vqconfig[%d]: ", k);
+ seq_printf(s, "address 0x%llx ",
+ vqconfig->address);
+ seq_printf(s, "num %d ", vqconfig->num);
+ seq_printf(s, "used address 0x%llx\n",
+ vqconfig->used_address);
+ }
+
+ features = (__u32 *)mic_vq_features(d);
+ seq_printf(s, "Features: Host 0x%x ", features[0]);
+ seq_printf(s, "Guest 0x%x\n", features[1]);
+
+ config = mic_vq_configspace(d);
+ for (k = 0; k < d->config_len; k++)
+ seq_printf(s, "config[%d]=%d\n", k, config[k]);
+
+ seq_puts(s, "Device control:\n");
+ seq_printf(s, "Config Change %d ", dc->config_change);
+ seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
+ seq_printf(s, "Guest Ack %d ", dc->guest_ack);
+ seq_printf(s, "Host ack %d\n", dc->host_ack);
+ seq_printf(s, "Used address updated %d ",
+ dc->used_address_updated);
+ seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
+ seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
+ seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
+ }
+ schedule_work(&vi->hotplug_work);
+ return 0;
+}
+
+static int vop_dp_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vop_dp_show, inode->i_private);
+}
+
+static int vop_dp_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations dp_ops = {
+ .owner = THIS_MODULE,
+ .open = vop_dp_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = vop_dp_debug_release
+};
+
+static int vop_vdev_info_show(struct seq_file *s, void *unused)
+{
+ struct vop_info *vi = s->private;
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+ int i, j;
+
+ mutex_lock(&vi->vop_mutex);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ seq_printf(s, "VDEV type %d state %s in %ld out %ld in_dma %ld out_dma %ld\n",
+ vdev->virtio_id,
+ vop_vdevup(vdev) ? "UP" : "DOWN",
+ vdev->in_bytes,
+ vdev->out_bytes,
+ vdev->in_bytes_dma,
+ vdev->out_bytes_dma);
+ for (i = 0; i < MIC_MAX_VRINGS; i++) {
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ struct vop_vringh *vvr = &vdev->vvr[i];
+ struct vringh *vrh = &vvr->vrh;
+ int num = vrh->vring.num;
+
+ if (!num)
+ continue;
+ desc = vrh->vring.desc;
+ seq_printf(s, "vring i %d avail_idx %d",
+ i, vvr->vring.info->avail_idx & (num - 1));
+ seq_printf(s, " vring i %d avail_idx %d\n",
+ i, vvr->vring.info->avail_idx);
+ seq_printf(s, "vrh i %d weak_barriers %d",
+ i, vrh->weak_barriers);
+ seq_printf(s, " last_avail_idx %d last_used_idx %d",
+ vrh->last_avail_idx, vrh->last_used_idx);
+ seq_printf(s, " completed %d\n", vrh->completed);
+ for (j = 0; j < num; j++) {
+ seq_printf(s, "desc[%d] addr 0x%llx len %d",
+ j, desc->addr, desc->len);
+ seq_printf(s, " flags 0x%x next %d\n",
+ desc->flags, desc->next);
+ desc++;
+ }
+ avail = vrh->vring.avail;
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh,
+ avail->idx) & (num - 1));
+ seq_printf(s, "avail flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, avail->flags),
+ vringh16_to_cpu(vrh, avail->idx));
+ for (j = 0; j < num; j++)
+ seq_printf(s, "avail ring[%d] %d\n",
+ j, avail->ring[j]);
+ used = vrh->vring.used;
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx) & (num - 1));
+ seq_printf(s, "used flags 0x%x idx %d\n",
+ vringh16_to_cpu(vrh, used->flags),
+ vringh16_to_cpu(vrh, used->idx));
+ for (j = 0; j < num; j++)
+ seq_printf(s, "used ring[%d] id %d len %d\n",
+ j, vringh32_to_cpu(vrh,
+ used->ring[j].id),
+ vringh32_to_cpu(vrh,
+ used->ring[j].len));
+ }
+ }
+ mutex_unlock(&vi->vop_mutex);
+
+ return 0;
+}
+
+static int vop_vdev_info_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vop_vdev_info_show, inode->i_private);
+}
+
+static int vop_vdev_info_debug_release(struct inode *inode, struct file *file)
+{
+ return single_release(inode, file);
+}
+
+static const struct file_operations vdev_info_ops = {
+ .owner = THIS_MODULE,
+ .open = vop_vdev_info_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = vop_vdev_info_debug_release
+};
+
+void vop_init_debugfs(struct vop_info *vi)
+{
+ char name[16];
+
+ snprintf(name, sizeof(name), "%s%d", KBUILD_MODNAME, vi->vpdev->dnode);
+ vi->dbg = debugfs_create_dir(name, NULL);
+ if (!vi->dbg) {
+ pr_err("can't create debugfs dir vop\n");
+ return;
+ }
+ debugfs_create_file("dp", 0444, vi->dbg, vi, &dp_ops);
+ debugfs_create_file("vdev_info", 0444, vi->dbg, vi, &vdev_info_ops);
+}
+
+void vop_exit_debugfs(struct vop_info *vi)
+{
+ debugfs_remove_recursive(vi->dbg);
+}
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
new file mode 100644
index 000000000000..1a2b67f3183d
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -0,0 +1,755 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Adapted from:
+ *
+ * virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "vop_main.h"
+
+#define VOP_MAX_VRINGS 4
+
+/*
+ * _vop_vdev - Allocated per virtio device instance injected by the peer.
+ *
+ * @vdev: Virtio device
+ * @desc: Virtio device page descriptor
+ * @dc: Virtio device control
+ * @vpdev: VOP device which is the parent for this virtio device
+ * @vr: Buffer for accessing the VRING
+ * @used: Buffer for used
+ * @used_size: Size of the used buffer
+ * @reset_done: Track whether VOP reset is complete
+ * @virtio_cookie: Cookie returned upon requesting a interrupt
+ * @c2h_vdev_db: The doorbell used by the guest to interrupt the host
+ * @h2c_vdev_db: The doorbell used by the host to interrupt the guest
+ * @dnode: The destination node
+ */
+struct _vop_vdev {
+ struct virtio_device vdev;
+ struct mic_device_desc __iomem *desc;
+ struct mic_device_ctrl __iomem *dc;
+ struct vop_device *vpdev;
+ void __iomem *vr[VOP_MAX_VRINGS];
+ dma_addr_t used[VOP_MAX_VRINGS];
+ int used_size[VOP_MAX_VRINGS];
+ struct completion reset_done;
+ struct mic_irq *virtio_cookie;
+ int c2h_vdev_db;
+ int h2c_vdev_db;
+ int dnode;
+};
+
+#define to_vopvdev(vd) container_of(vd, struct _vop_vdev, vdev)
+
+#define _vop_aligned_desc_size(d) __mic_align(_vop_desc_size(d), 8)
+
+/* Helper API to obtain the parent of the virtio device */
+static inline struct device *_vop_dev(struct _vop_vdev *vdev)
+{
+ return vdev->vdev.dev.parent;
+}
+
+static inline unsigned _vop_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return sizeof(*desc)
+ + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig)
+ + ioread8(&desc->feature_len) * 2
+ + ioread8(&desc->config_len);
+}
+
+static inline struct mic_vqconfig __iomem *
+_vop_vq_config(struct mic_device_desc __iomem *desc)
+{
+ return (struct mic_vqconfig __iomem *)(desc + 1);
+}
+
+static inline u8 __iomem *
+_vop_vq_features(struct mic_device_desc __iomem *desc)
+{
+ return (u8 __iomem *)(_vop_vq_config(desc) + ioread8(&desc->num_vq));
+}
+
+static inline u8 __iomem *
+_vop_vq_configspace(struct mic_device_desc __iomem *desc)
+{
+ return _vop_vq_features(desc) + ioread8(&desc->feature_len) * 2;
+}
+
+static inline unsigned
+_vop_total_desc_size(struct mic_device_desc __iomem *desc)
+{
+ return _vop_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl);
+}
+
+/* This gets the device's feature bits. */
+static u64 vop_get_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ u32 features = 0;
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+ u8 __iomem *in_features = _vop_vq_features(desc);
+ int feature_len = ioread8(&desc->feature_len);
+
+ bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++)
+ if (ioread8(&in_features[i / 8]) & (BIT(i % 8)))
+ features |= BIT(i);
+
+ return features;
+}
+
+static int vop_finalize_features(struct virtio_device *vdev)
+{
+ unsigned int i, bits;
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+ u8 feature_len = ioread8(&desc->feature_len);
+ /* Second half of bitmap is features we accept. */
+ u8 __iomem *out_features =
+ _vop_vq_features(desc) + feature_len;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ memset_io(out_features, 0, feature_len);
+ bits = min_t(unsigned, feature_len,
+ sizeof(vdev->features)) * 8;
+ for (i = 0; i < bits; i++) {
+ if (__virtio_test_bit(vdev, i))
+ iowrite8(ioread8(&out_features[i / 8]) | (1 << (i % 8)),
+ &out_features[i / 8]);
+ }
+ return 0;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void vop_get(struct virtio_device *vdev, unsigned int offset,
+ void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_fromio(buf, _vop_vq_configspace(desc) + offset, len);
+}
+
+static void vop_set(struct virtio_device *vdev, unsigned int offset,
+ const void *buf, unsigned len)
+{
+ struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc;
+
+ if (offset + len > ioread8(&desc->config_len))
+ return;
+ memcpy_toio(_vop_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access the status
+ * field of the device descriptor. set_status also interrupts the host
+ * to tell about status changes.
+ */
+static u8 vop_get_status(struct virtio_device *vdev)
+{
+ return ioread8(&to_vopvdev(vdev)->desc->status);
+}
+
+static void vop_set_status(struct virtio_device *dev, u8 status)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+
+ if (!status)
+ return;
+ iowrite8(status, &vdev->desc->status);
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+}
+
+/* Inform host on a virtio device reset and wait for ack from host */
+static void vop_reset_inform_host(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct mic_device_ctrl __iomem *dc = vdev->dc;
+ struct vop_device *vpdev = vdev->vpdev;
+ int retry;
+
+ iowrite8(0, &dc->host_ack);
+ iowrite8(1, &dc->vdev_reset);
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+
+ /* Wait till host completes all card accesses and acks the reset */
+ for (retry = 100; retry--;) {
+ if (ioread8(&dc->host_ack))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+
+ /* Reset status to 0 in case we timed out */
+ iowrite8(0, &vdev->desc->status);
+}
+
+static void vop_reset(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+
+ dev_dbg(_vop_dev(vdev), "%s: virtio id %d\n",
+ __func__, dev->id.device);
+
+ vop_reset_inform_host(dev);
+ complete_all(&vdev->reset_done);
+}
+
+/*
+ * The virtio_ring code calls this API when it wants to notify the Host.
+ */
+static bool vop_notify(struct virtqueue *vq)
+{
+ struct _vop_vdev *vdev = vq->priv;
+ struct vop_device *vpdev = vdev->vpdev;
+
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+ return true;
+}
+
+static void vop_del_vq(struct virtqueue *vq, int n)
+{
+ struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
+ struct vring *vr = (struct vring *)(vq + 1);
+ struct vop_device *vpdev = vdev->vpdev;
+
+ dma_unmap_single(&vpdev->dev, vdev->used[n],
+ vdev->used_size[n], DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vr->used, get_order(vdev->used_size[n]));
+ vring_del_virtqueue(vq);
+ vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
+ vdev->vr[n] = NULL;
+}
+
+static void vop_del_vqs(struct virtio_device *dev)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct virtqueue *vq, *n;
+ int idx = 0;
+
+ dev_dbg(_vop_dev(vdev), "%s\n", __func__);
+
+ list_for_each_entry_safe(vq, n, &dev->vqs, list)
+ vop_del_vq(vq, idx++);
+}
+
+/*
+ * This routine will assign vring's allocated in host/io memory. Code in
+ * virtio_ring.c however continues to access this io memory as if it were local
+ * memory without io accessors.
+ */
+static struct virtqueue *vop_find_vq(struct virtio_device *dev,
+ unsigned index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_vqconfig __iomem *vqconfig;
+ struct mic_vqconfig config;
+ struct virtqueue *vq;
+ void __iomem *va;
+ struct _mic_vring_info __iomem *info;
+ void *used;
+ int vr_size, _vr_size, err, magic;
+ struct vring *vr;
+ u8 type = ioread8(&vdev->desc->type);
+
+ if (index >= ioread8(&vdev->desc->num_vq))
+ return ERR_PTR(-ENOENT);
+
+ if (!name)
+ return ERR_PTR(-ENOENT);
+
+ /* First assign the vring's allocated in host memory */
+ vqconfig = _vop_vq_config(vdev->desc) + index;
+ memcpy_fromio(&config, vqconfig, sizeof(config));
+ _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN);
+ vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info));
+ va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address),
+ vr_size);
+ if (!va)
+ return ERR_PTR(-ENOMEM);
+ vdev->vr[index] = va;
+ memset_io(va, 0x0, _vr_size);
+ vq = vring_new_virtqueue(
+ index,
+ le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
+ dev,
+ false,
+ (void __force *)va, vop_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto unmap;
+ }
+ info = va + _vr_size;
+ magic = ioread32(&info->magic);
+
+ if (WARN(magic != MIC_MAGIC + type + index, "magic mismatch")) {
+ err = -EIO;
+ goto unmap;
+ }
+
+ /* Allocate and reassign used ring now */
+ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
+ sizeof(struct vring_used_elem) *
+ le16_to_cpu(config.num));
+ used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(vdev->used_size[index]));
+ if (!used) {
+ err = -ENOMEM;
+ dev_err(_vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto del_vq;
+ }
+ vdev->used[index] = dma_map_single(&vpdev->dev, used,
+ vdev->used_size[index],
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vpdev->dev, vdev->used[index])) {
+ err = -ENOMEM;
+ dev_err(_vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto free_used;
+ }
+ writeq(vdev->used[index], &vqconfig->used_address);
+ /*
+ * To reassign the used ring here we are directly accessing
+ * struct vring_virtqueue which is a private data structure
+ * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
+ * vring_new_virtqueue() would ensure that
+ * (&vq->vring == (struct vring *) (&vq->vq + 1));
+ */
+ vr = (struct vring *)(vq + 1);
+ vr->used = used;
+
+ vq->priv = vdev;
+ return vq;
+free_used:
+ free_pages((unsigned long)used,
+ get_order(vdev->used_size[index]));
+del_vq:
+ vring_del_virtqueue(vq);
+unmap:
+ vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
+ return ERR_PTR(err);
+}
+
+static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[])
+{
+ struct _vop_vdev *vdev = to_vopvdev(dev);
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_device_ctrl __iomem *dc = vdev->dc;
+ int i, err, retry;
+
+ /* We must have this many virtqueues. */
+ if (nvqs > ioread8(&vdev->desc->num_vq))
+ return -ENOENT;
+
+ for (i = 0; i < nvqs; ++i) {
+ dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
+ __func__, i, names[i]);
+ vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i]);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+
+ iowrite8(1, &dc->used_address_updated);
+ /*
+ * Send an interrupt to the host to inform it that used
+ * rings have been re-assigned.
+ */
+ vpdev->hw_ops->send_intr(vpdev, vdev->c2h_vdev_db);
+ for (retry = 100; --retry;) {
+ if (!ioread8(&dc->used_address_updated))
+ break;
+ msleep(100);
+ };
+
+ dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry);
+ if (!retry) {
+ err = -ENODEV;
+ goto error;
+ }
+
+ return 0;
+error:
+ vop_del_vqs(dev);
+ return err;
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops vop_vq_config_ops = {
+ .get_features = vop_get_features,
+ .finalize_features = vop_finalize_features,
+ .get = vop_get,
+ .set = vop_set,
+ .get_status = vop_get_status,
+ .set_status = vop_set_status,
+ .reset = vop_reset,
+ .find_vqs = vop_find_vqs,
+ .del_vqs = vop_del_vqs,
+};
+
+static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
+{
+ struct _vop_vdev *vdev = data;
+ struct vop_device *vpdev = vdev->vpdev;
+ struct virtqueue *vq;
+
+ vpdev->hw_ops->ack_interrupt(vpdev, vdev->h2c_vdev_db);
+ list_for_each_entry(vq, &vdev->vdev.vqs, list)
+ vring_interrupt(0, vq);
+
+ return IRQ_HANDLED;
+}
+
+static void vop_virtio_release_dev(struct device *_d)
+{
+ /*
+ * No need for a release method similar to virtio PCI.
+ * Provide an empty one to avoid getting a warning from core.
+ */
+}
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static int _vop_add_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct vop_device *vpdev,
+ int dnode)
+{
+ struct _vop_vdev *vdev;
+ int ret;
+ u8 type = ioread8(&d->type);
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->vpdev = vpdev;
+ vdev->vdev.dev.parent = &vpdev->dev;
+ vdev->vdev.dev.release = vop_virtio_release_dev;
+ vdev->vdev.id.device = type;
+ vdev->vdev.config = &vop_vq_config_ops;
+ vdev->desc = d;
+ vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+ vdev->dnode = dnode;
+ vdev->vdev.priv = (void *)(u64)dnode;
+ init_completion(&vdev->reset_done);
+
+ vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev);
+ vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+ vop_virtio_intr_handler, "virtio intr",
+ vdev, vdev->h2c_vdev_db);
+ if (IS_ERR(vdev->virtio_cookie)) {
+ ret = PTR_ERR(vdev->virtio_cookie);
+ goto kfree;
+ }
+ iowrite8((u8)vdev->h2c_vdev_db, &vdev->dc->h2c_vdev_db);
+ vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
+
+ ret = register_virtio_device(&vdev->vdev);
+ if (ret) {
+ dev_err(_vop_dev(vdev),
+ "Failed to register vop device %u type %u\n",
+ offset, type);
+ goto free_irq;
+ }
+ writeq((u64)vdev, &vdev->dc->vdev);
+ dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n",
+ __func__, offset, type, vdev);
+
+ return 0;
+
+free_irq:
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+kfree:
+ kfree(vdev);
+ return ret;
+}
+
+/*
+ * match for a vop device with a specific desc pointer
+ */
+static int vop_match_desc(struct device *dev, void *data)
+{
+ struct virtio_device *_dev = dev_to_virtio(dev);
+ struct _vop_vdev *vdev = to_vopvdev(_dev);
+
+ return vdev->desc == (void __iomem *)data;
+}
+
+static void _vop_handle_config_change(struct mic_device_desc __iomem *d,
+ unsigned int offset,
+ struct vop_device *vpdev)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + _vop_aligned_desc_size(d);
+ struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+
+ if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED)
+ return;
+
+ dev_dbg(&vpdev->dev, "%s %d\n", __func__, __LINE__);
+ virtio_config_changed(&vdev->vdev);
+ iowrite8(1, &dc->guest_ack);
+}
+
+/*
+ * removes a virtio device if a hot remove event has been
+ * requested by the host.
+ */
+static int _vop_remove_device(struct mic_device_desc __iomem *d,
+ unsigned int offset, struct vop_device *vpdev)
+{
+ struct mic_device_ctrl __iomem *dc
+ = (void __iomem *)d + _vop_aligned_desc_size(d);
+ struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev);
+ u8 status;
+ int ret = -1;
+
+ if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+ dev_dbg(&vpdev->dev,
+ "%s %d config_change %d type %d vdev %p\n",
+ __func__, __LINE__,
+ ioread8(&dc->config_change), ioread8(&d->type), vdev);
+ status = ioread8(&d->status);
+ reinit_completion(&vdev->reset_done);
+ unregister_virtio_device(&vdev->vdev);
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+ iowrite8(-1, &dc->h2c_vdev_db);
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK)
+ wait_for_completion(&vdev->reset_done);
+ kfree(vdev);
+ iowrite8(1, &dc->guest_ack);
+ dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
+ __func__, __LINE__, ioread8(&dc->guest_ack));
+ iowrite8(-1, &d->type);
+ ret = 0;
+ }
+ return ret;
+}
+
+#define REMOVE_DEVICES true
+
+static void _vop_scan_devices(void __iomem *dp, struct vop_device *vpdev,
+ bool remove, int dnode)
+{
+ s8 type;
+ unsigned int i;
+ struct mic_device_desc __iomem *d;
+ struct mic_device_ctrl __iomem *dc;
+ struct device *dev;
+ int ret;
+
+ for (i = sizeof(struct mic_bootparam);
+ i < MIC_DP_SIZE; i += _vop_total_desc_size(d)) {
+ d = dp + i;
+ dc = (void __iomem *)d + _vop_aligned_desc_size(d);
+ /*
+ * This read barrier is paired with the corresponding write
+ * barrier on the host which is inserted before adding or
+ * removing a virtio device descriptor, by updating the type.
+ */
+ rmb();
+ type = ioread8(&d->type);
+
+ /* end of list */
+ if (type == 0)
+ break;
+
+ if (type == -1)
+ continue;
+
+ /* device already exists */
+ dev = device_find_child(&vpdev->dev, (void __force *)d,
+ vop_match_desc);
+ if (dev) {
+ if (remove)
+ iowrite8(MIC_VIRTIO_PARAM_DEV_REMOVE,
+ &dc->config_change);
+ put_device(dev);
+ _vop_handle_config_change(d, i, vpdev);
+ ret = _vop_remove_device(d, i, vpdev);
+ if (remove) {
+ iowrite8(0, &dc->config_change);
+ iowrite8(0, &dc->guest_ack);
+ }
+ continue;
+ }
+
+ /* new device */
+ dev_dbg(&vpdev->dev, "%s %d Adding new virtio device %p\n",
+ __func__, __LINE__, d);
+ if (!remove)
+ _vop_add_device(d, i, vpdev, dnode);
+ }
+}
+
+static void vop_scan_devices(struct vop_info *vi,
+ struct vop_device *vpdev, bool remove)
+{
+ void __iomem *dp = vpdev->hw_ops->get_remote_dp(vpdev);
+
+ if (!dp)
+ return;
+ mutex_lock(&vi->vop_mutex);
+ _vop_scan_devices(dp, vpdev, remove, vpdev->dnode);
+ mutex_unlock(&vi->vop_mutex);
+}
+
+/*
+ * vop_hotplug_device tries to find changes in the device page.
+ */
+static void vop_hotplug_devices(struct work_struct *work)
+{
+ struct vop_info *vi = container_of(work, struct vop_info,
+ hotplug_work);
+
+ vop_scan_devices(vi, vi->vpdev, !REMOVE_DEVICES);
+}
+
+/*
+ * Interrupt handler for hot plug/config changes etc.
+ */
+static irqreturn_t vop_extint_handler(int irq, void *data)
+{
+ struct vop_info *vi = data;
+ struct mic_bootparam __iomem *bp;
+ struct vop_device *vpdev = vi->vpdev;
+
+ bp = vpdev->hw_ops->get_remote_dp(vpdev);
+ dev_dbg(&vpdev->dev, "%s %d hotplug work\n",
+ __func__, __LINE__);
+ vpdev->hw_ops->ack_interrupt(vpdev, ioread8(&bp->h2c_config_db));
+ schedule_work(&vi->hotplug_work);
+ return IRQ_HANDLED;
+}
+
+static int vop_driver_probe(struct vop_device *vpdev)
+{
+ struct vop_info *vi;
+ int rc;
+
+ vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+ if (!vi) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ dev_set_drvdata(&vpdev->dev, vi);
+ vi->vpdev = vpdev;
+
+ mutex_init(&vi->vop_mutex);
+ INIT_WORK(&vi->hotplug_work, vop_hotplug_devices);
+ if (vpdev->dnode) {
+ rc = vop_host_init(vi);
+ if (rc < 0)
+ goto free;
+ } else {
+ struct mic_bootparam __iomem *bootparam;
+
+ vop_scan_devices(vi, vpdev, !REMOVE_DEVICES);
+
+ vi->h2c_config_db = vpdev->hw_ops->next_db(vpdev);
+ vi->cookie = vpdev->hw_ops->request_irq(vpdev,
+ vop_extint_handler,
+ "virtio_config_intr",
+ vi, vi->h2c_config_db);
+ if (IS_ERR(vi->cookie)) {
+ rc = PTR_ERR(vi->cookie);
+ goto free;
+ }
+ bootparam = vpdev->hw_ops->get_remote_dp(vpdev);
+ iowrite8(vi->h2c_config_db, &bootparam->h2c_config_db);
+ }
+ vop_init_debugfs(vi);
+ return 0;
+free:
+ kfree(vi);
+exit:
+ return rc;
+}
+
+static void vop_driver_remove(struct vop_device *vpdev)
+{
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+
+ if (vpdev->dnode) {
+ vop_host_uninit(vi);
+ } else {
+ struct mic_bootparam __iomem *bootparam =
+ vpdev->hw_ops->get_remote_dp(vpdev);
+ if (bootparam)
+ iowrite8(-1, &bootparam->h2c_config_db);
+ vpdev->hw_ops->free_irq(vpdev, vi->cookie, vi);
+ flush_work(&vi->hotplug_work);
+ vop_scan_devices(vi, vpdev, REMOVE_DEVICES);
+ }
+ vop_exit_debugfs(vi);
+ kfree(vi);
+}
+
+static struct vop_device_id id_table[] = {
+ { VOP_DEV_TRNSP, VOP_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct vop_driver vop_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .id_table = id_table,
+ .probe = vop_driver_probe,
+ .remove = vop_driver_remove,
+};
+
+module_vop_driver(vop_driver);
+
+MODULE_DEVICE_TABLE(mbus, id_table);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Virtio Over PCIe (VOP) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/host/mic_virtio.h b/drivers/misc/mic/vop/vop_main.h
index a80631f2790d..ba47ec7a6386 100644
--- a/drivers/misc/mic/host/mic_virtio.h
+++ b/drivers/misc/mic/vop/vop_main.h
@@ -1,7 +1,7 @@
/*
* Intel MIC Platform Software Stack (MPSS)
*
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
@@ -15,14 +15,21 @@
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
- * Intel MIC Host driver.
+ * Intel Virtio Over PCIe (VOP) driver.
*
*/
-#ifndef MIC_VIRTIO_H
-#define MIC_VIRTIO_H
+#ifndef _VOP_MAIN_H_
+#define _VOP_MAIN_H_
+#include <linux/vringh.h>
#include <linux/virtio_config.h>
-#include <linux/mic_ioctl.h>
+#include <linux/virtio.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include "../bus/vop_bus.h"
/*
* Note on endianness.
@@ -39,38 +46,68 @@
* in guest endianness.
*/
+/*
+ * vop_info - Allocated per invocation of VOP probe
+ *
+ * @vpdev: VOP device
+ * @hotplug_work: Handle virtio device creation, deletion and configuration
+ * @cookie: Cookie received upon requesting a virtio configuration interrupt
+ * @h2c_config_db: The doorbell used by the peer to indicate a config change
+ * @vdev_list: List of "active" virtio devices injected in the peer node
+ * @vop_mutex: Synchronize access to the device page as well as serialize
+ * creation/deletion of virtio devices on the peer node
+ * @dp: Peer device page information
+ * @dbg: Debugfs entry
+ * @dma_ch: The DMA channel used by this transport for data transfers.
+ * @name: Name for this transport used in misc device creation.
+ * @miscdev: The misc device registered.
+ */
+struct vop_info {
+ struct vop_device *vpdev;
+ struct work_struct hotplug_work;
+ struct mic_irq *cookie;
+ int h2c_config_db;
+ struct list_head vdev_list;
+ struct mutex vop_mutex;
+ void __iomem *dp;
+ struct dentry *dbg;
+ struct dma_chan *dma_ch;
+ char name[16];
+ struct miscdevice miscdev;
+};
+
/**
- * struct mic_vringh - Virtio ring host information.
+ * struct vop_vringh - Virtio ring host information.
*
- * @vring: The MIC vring used for setting up user space mappings.
+ * @vring: The VOP vring used for setting up user space mappings.
* @vrh: The host VRINGH used for accessing the card vrings.
* @riov: The VRINGH read kernel IOV.
* @wiov: The VRINGH write kernel IOV.
+ * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
* @vr_mutex: Mutex for synchronizing access to the VRING.
* @buf: Temporary kernel buffer used to copy in/out data
* from/to the card via DMA.
* @buf_da: dma address of buf.
- * @mvdev: Back pointer to MIC virtio device for vringh_notify(..).
- * @head: The VRINGH head index address passed to vringh_getdesc_kern(..).
+ * @vdev: Back pointer to VOP virtio device for vringh_notify(..).
*/
-struct mic_vringh {
+struct vop_vringh {
struct mic_vring vring;
struct vringh vrh;
struct vringh_kiov riov;
struct vringh_kiov wiov;
+ u16 head;
struct mutex vr_mutex;
void *buf;
dma_addr_t buf_da;
- struct mic_vdev *mvdev;
- u16 head;
+ struct vop_vdev *vdev;
};
/**
- * struct mic_vdev - Host information for a card Virtio device.
+ * struct vop_vdev - Host information for a card Virtio device.
*
* @virtio_id - Virtio device id.
* @waitq - Waitqueue to allow ring3 apps to poll.
- * @mdev - Back pointer to host MIC device.
+ * @vpdev - pointer to VOP bus device.
* @poll_wake - Used for waking up threads blocked in poll.
* @out_bytes - Debug stats for number of bytes copied from host to card.
* @in_bytes - Debug stats for number of bytes copied from card to host.
@@ -82,18 +119,23 @@ struct mic_vringh {
* the transfer length did not have the required DMA alignment.
* @tx_dst_unaligned - Debug stats for number of bytes copied where the
* destination address on the card did not have the required DMA alignment.
- * @mvr - Store per VRING data structures.
+ * @vvr - Store per VRING data structures.
* @virtio_bh_work - Work struct used to schedule virtio bottom half handling.
* @dd - Virtio device descriptor.
* @dc - Virtio device control fields.
* @list - List of Virtio devices.
* @virtio_db - The doorbell used by the card to interrupt the host.
* @virtio_cookie - The cookie returned while requesting interrupts.
+ * @vi: Transport information.
+ * @vdev_mutex: Mutex synchronizing virtio device injection,
+ * removal and data transfers.
+ * @destroy: Track if a virtio device is being destroyed.
+ * @deleted: The virtio device has been deleted.
*/
-struct mic_vdev {
+struct vop_vdev {
int virtio_id;
wait_queue_head_t waitq;
- struct mic_device *mdev;
+ struct vop_device *vpdev;
int poll_wake;
unsigned long out_bytes;
unsigned long in_bytes;
@@ -101,55 +143,28 @@ struct mic_vdev {
unsigned long in_bytes_dma;
unsigned long tx_len_unaligned;
unsigned long tx_dst_unaligned;
- struct mic_vringh mvr[MIC_MAX_VRINGS];
+ unsigned long rx_dst_unaligned;
+ struct vop_vringh vvr[MIC_MAX_VRINGS];
struct work_struct virtio_bh_work;
struct mic_device_desc *dd;
struct mic_device_ctrl *dc;
struct list_head list;
int virtio_db;
struct mic_irq *virtio_cookie;
+ struct vop_info *vi;
+ struct mutex vdev_mutex;
+ struct completion destroy;
+ bool deleted;
};
-void mic_virtio_uninit(struct mic_device *mdev);
-int mic_virtio_add_device(struct mic_vdev *mvdev,
- void __user *argp);
-void mic_virtio_del_device(struct mic_vdev *mvdev);
-int mic_virtio_config_change(struct mic_vdev *mvdev,
- void __user *argp);
-int mic_virtio_copy_desc(struct mic_vdev *mvdev,
- struct mic_copy_desc *request);
-void mic_virtio_reset_devices(struct mic_device *mdev);
-void mic_bh_handler(struct work_struct *work);
-
-/* Helper API to obtain the MIC PCIe device */
-static inline struct device *mic_dev(struct mic_vdev *mvdev)
-{
- return &mvdev->mdev->pdev->dev;
-}
-
-/* Helper API to check if a virtio device is initialized */
-static inline int mic_vdev_inited(struct mic_vdev *mvdev)
-{
- /* Device has not been created yet */
- if (!mvdev->dd || !mvdev->dd->type) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -EINVAL);
- return -EINVAL;
- }
-
- /* Device has been removed/deleted */
- if (mvdev->dd->type == -1) {
- dev_err(mic_dev(mvdev), "%s %d err %d\n",
- __func__, __LINE__, -ENODEV);
- return -ENODEV;
- }
-
- return 0;
-}
-
/* Helper API to check if a virtio device is running */
-static inline bool mic_vdevup(struct mic_vdev *mvdev)
+static inline bool vop_vdevup(struct vop_vdev *vdev)
{
- return !!mvdev->dd->status;
+ return !!vdev->dd->status;
}
+
+void vop_init_debugfs(struct vop_info *vi);
+void vop_exit_debugfs(struct vop_info *vi);
+int vop_host_init(struct vop_info *vi);
+void vop_host_uninit(struct vop_info *vi);
#endif
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
new file mode 100644
index 000000000000..e94c7fb6712a
--- /dev/null
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -0,0 +1,1165 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel Virtio Over PCIe (VOP) driver.
+ *
+ */
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mic_common.h>
+#include "../common/mic_dev.h"
+
+#include <linux/mic_ioctl.h>
+#include "vop_main.h"
+
+/* Helper API to obtain the VOP PCIe device */
+static inline struct device *vop_dev(struct vop_vdev *vdev)
+{
+ return vdev->vpdev->dev.parent;
+}
+
+/* Helper API to check if a virtio device is initialized */
+static inline int vop_vdev_inited(struct vop_vdev *vdev)
+{
+ if (!vdev)
+ return -EINVAL;
+ /* Device has not been created yet */
+ if (!vdev->dd || !vdev->dd->type) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, -EINVAL);
+ return -EINVAL;
+ }
+ /* Device has been removed/deleted */
+ if (vdev->dd->type == -1) {
+ dev_dbg(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, -ENODEV);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void _vop_notify(struct vringh *vrh)
+{
+ struct vop_vringh *vvrh = container_of(vrh, struct vop_vringh, vrh);
+ struct vop_vdev *vdev = vvrh->vdev;
+ struct vop_device *vpdev = vdev->vpdev;
+ s8 db = vdev->dc->h2c_vdev_db;
+
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+}
+
+static void vop_virtio_init_post(struct vop_vdev *vdev)
+{
+ struct mic_vqconfig *vqconfig = mic_vq_config(vdev->dd);
+ struct vop_device *vpdev = vdev->vpdev;
+ int i, used_size;
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ used_size = PAGE_ALIGN(sizeof(u16) * 3 +
+ sizeof(struct vring_used_elem) *
+ le16_to_cpu(vqconfig->num));
+ if (!le64_to_cpu(vqconfig[i].used_address)) {
+ dev_warn(vop_dev(vdev), "used_address zero??\n");
+ continue;
+ }
+ vdev->vvr[i].vrh.vring.used =
+ (void __force *)vpdev->hw_ops->ioremap(
+ vpdev,
+ le64_to_cpu(vqconfig[i].used_address),
+ used_size);
+ }
+
+ vdev->dc->used_address_updated = 0;
+
+ dev_info(vop_dev(vdev), "%s: device type %d LINKUP\n",
+ __func__, vdev->virtio_id);
+}
+
+static inline void vop_virtio_device_reset(struct vop_vdev *vdev)
+{
+ int i;
+
+ dev_dbg(vop_dev(vdev), "%s: status %d device type %d RESET\n",
+ __func__, vdev->dd->status, vdev->virtio_id);
+
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ /*
+ * Avoid lockdep false positive. The + 1 is for the vop
+ * mutex which is held in the reset devices code path.
+ */
+ mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+ /* 0 status means "reset" */
+ vdev->dd->status = 0;
+ vdev->dc->vdev_reset = 0;
+ vdev->dc->host_ack = 1;
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vringh *vrh = &vdev->vvr[i].vrh;
+
+ vdev->vvr[i].vring.info->avail_idx = 0;
+ vrh->completed = 0;
+ vrh->last_avail_idx = 0;
+ vrh->last_used_idx = 0;
+ }
+
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_unlock(&vdev->vvr[i].vr_mutex);
+}
+
+static void vop_virtio_reset_devices(struct vop_info *vi)
+{
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ vop_virtio_device_reset(vdev);
+ vdev->poll_wake = 1;
+ wake_up(&vdev->waitq);
+ }
+}
+
+static void vop_bh_handler(struct work_struct *work)
+{
+ struct vop_vdev *vdev = container_of(work, struct vop_vdev,
+ virtio_bh_work);
+
+ if (vdev->dc->used_address_updated)
+ vop_virtio_init_post(vdev);
+
+ if (vdev->dc->vdev_reset)
+ vop_virtio_device_reset(vdev);
+
+ vdev->poll_wake = 1;
+ wake_up(&vdev->waitq);
+}
+
+static irqreturn_t _vop_virtio_intr_handler(int irq, void *data)
+{
+ struct vop_vdev *vdev = data;
+ struct vop_device *vpdev = vdev->vpdev;
+
+ vpdev->hw_ops->ack_interrupt(vpdev, vdev->virtio_db);
+ schedule_work(&vdev->virtio_bh_work);
+ return IRQ_HANDLED;
+}
+
+static int vop_virtio_config_change(struct vop_vdev *vdev, void *argp)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+ int ret = 0, retry, i;
+ struct vop_device *vpdev = vdev->vpdev;
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ s8 db = bootparam->h2c_config_db;
+
+ mutex_lock(&vi->vop_mutex);
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_lock_nested(&vdev->vvr[i].vr_mutex, i + 1);
+
+ if (db == -1 || vdev->dd->type == -1) {
+ ret = -EIO;
+ goto exit;
+ }
+
+ memcpy(mic_vq_configspace(vdev->dd), argp, vdev->dd->config_len);
+ vdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
+ vpdev->hw_ops->send_intr(vpdev, db);
+
+ for (retry = 100; retry--;) {
+ ret = wait_event_timeout(wake, vdev->dc->guest_ack,
+ msecs_to_jiffies(100));
+ if (ret)
+ break;
+ }
+
+ dev_dbg(vop_dev(vdev),
+ "%s %d retry: %d\n", __func__, __LINE__, retry);
+ vdev->dc->config_change = 0;
+ vdev->dc->guest_ack = 0;
+exit:
+ for (i = 0; i < vdev->dd->num_vq; i++)
+ mutex_unlock(&vdev->vvr[i].vr_mutex);
+ mutex_unlock(&vi->vop_mutex);
+ return ret;
+}
+
+static int vop_copy_dp_entry(struct vop_vdev *vdev,
+ struct mic_device_desc *argp, __u8 *type,
+ struct mic_device_desc **devpage)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ struct mic_device_desc *devp;
+ struct mic_vqconfig *vqconfig;
+ int ret = 0, i;
+ bool slot_found = false;
+
+ vqconfig = mic_vq_config(argp);
+ for (i = 0; i < argp->num_vq; i++) {
+ if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ }
+
+ /* Find the first free device page entry */
+ for (i = sizeof(struct mic_bootparam);
+ i < MIC_DP_SIZE - mic_total_desc_size(argp);
+ i += mic_total_desc_size(devp)) {
+ devp = vpdev->hw_ops->get_dp(vpdev) + i;
+ if (devp->type == 0 || devp->type == -1) {
+ slot_found = true;
+ break;
+ }
+ }
+ if (!slot_found) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto exit;
+ }
+ /*
+ * Save off the type before doing the memcpy. Type will be set in the
+ * end after completing all initialization for the new device.
+ */
+ *type = argp->type;
+ argp->type = 0;
+ memcpy(devp, argp, mic_desc_size(argp));
+
+ *devpage = devp;
+exit:
+ return ret;
+}
+
+static void vop_init_device_ctrl(struct vop_vdev *vdev,
+ struct mic_device_desc *devpage)
+{
+ struct mic_device_ctrl *dc;
+
+ dc = (void *)devpage + mic_aligned_desc_size(devpage);
+
+ dc->config_change = 0;
+ dc->guest_ack = 0;
+ dc->vdev_reset = 0;
+ dc->host_ack = 0;
+ dc->used_address_updated = 0;
+ dc->c2h_vdev_db = -1;
+ dc->h2c_vdev_db = -1;
+ vdev->dc = dc;
+}
+
+static int vop_virtio_add_device(struct vop_vdev *vdev,
+ struct mic_device_desc *argp)
+{
+ struct vop_info *vi = vdev->vi;
+ struct vop_device *vpdev = vi->vpdev;
+ struct mic_device_desc *dd = NULL;
+ struct mic_vqconfig *vqconfig;
+ int vr_size, i, j, ret;
+ u8 type = 0;
+ s8 db = -1;
+ char irqname[16];
+ struct mic_bootparam *bootparam;
+ u16 num;
+ dma_addr_t vr_addr;
+
+ bootparam = vpdev->hw_ops->get_dp(vpdev);
+ init_waitqueue_head(&vdev->waitq);
+ INIT_LIST_HEAD(&vdev->list);
+ vdev->vpdev = vpdev;
+
+ ret = vop_copy_dp_entry(vdev, argp, &type, &dd);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ kfree(vdev);
+ return ret;
+ }
+
+ vop_init_device_ctrl(vdev, dd);
+
+ vdev->dd = dd;
+ vdev->virtio_id = type;
+ vqconfig = mic_vq_config(dd);
+ INIT_WORK(&vdev->virtio_bh_work, vop_bh_handler);
+
+ for (i = 0; i < dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+ struct mic_vring *vr = &vdev->vvr[i].vring;
+
+ num = le16_to_cpu(vqconfig[i].num);
+ mutex_init(&vvr->vr_mutex);
+ vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
+ sizeof(struct _mic_vring_info));
+ vr->va = (void *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(vr_size));
+ if (!vr->va) {
+ ret = -ENOMEM;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vr->len = vr_size;
+ vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
+ vr->info->magic = cpu_to_le32(MIC_MAGIC + vdev->virtio_id + i);
+ vr_addr = dma_map_single(&vpdev->dev, vr->va, vr_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&vpdev->dev, vr_addr)) {
+ free_pages((unsigned long)vr->va, get_order(vr_size));
+ ret = -ENOMEM;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vqconfig[i].address = cpu_to_le64(vr_addr);
+
+ vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
+ ret = vringh_init_kern(&vvr->vrh,
+ *(u32 *)mic_vq_features(vdev->dd),
+ num, false, vr->vr.desc, vr->vr.avail,
+ vr->vr.used);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ goto err;
+ }
+ vringh_kiov_init(&vvr->riov, NULL, 0);
+ vringh_kiov_init(&vvr->wiov, NULL, 0);
+ vvr->head = USHRT_MAX;
+ vvr->vdev = vdev;
+ vvr->vrh.notify = _vop_notify;
+ dev_dbg(&vpdev->dev,
+ "%s %d index %d va %p info %p vr_size 0x%x\n",
+ __func__, __LINE__, i, vr->va, vr->info, vr_size);
+ vvr->buf = (void *)__get_free_pages(GFP_KERNEL,
+ get_order(VOP_INT_DMA_BUF_SIZE));
+ vvr->buf_da = dma_map_single(&vpdev->dev,
+ vvr->buf, VOP_INT_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ snprintf(irqname, sizeof(irqname), "vop%dvirtio%d", vpdev->index,
+ vdev->virtio_id);
+ vdev->virtio_db = vpdev->hw_ops->next_db(vpdev);
+ vdev->virtio_cookie = vpdev->hw_ops->request_irq(vpdev,
+ _vop_virtio_intr_handler, irqname, vdev,
+ vdev->virtio_db);
+ if (IS_ERR(vdev->virtio_cookie)) {
+ ret = PTR_ERR(vdev->virtio_cookie);
+ dev_dbg(&vpdev->dev, "request irq failed\n");
+ goto err;
+ }
+
+ vdev->dc->c2h_vdev_db = vdev->virtio_db;
+
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ dd->type = type;
+ argp->type = type;
+
+ if (bootparam) {
+ db = bootparam->h2c_config_db;
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+ }
+ dev_dbg(&vpdev->dev, "Added virtio id %d db %d\n", dd->type, db);
+ return 0;
+err:
+ vqconfig = mic_vq_config(dd);
+ for (j = 0; j < i; j++) {
+ struct vop_vringh *vvr = &vdev->vvr[j];
+
+ dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[j].address),
+ vvr->vring.len, DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->vring.va,
+ get_order(vvr->vring.len));
+ }
+ return ret;
+}
+
+static void vop_dev_remove(struct vop_info *pvi, struct mic_device_ctrl *devp,
+ struct vop_device *vpdev)
+{
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+ s8 db;
+ int ret, retry;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+
+ devp->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
+ db = bootparam->h2c_config_db;
+ if (db != -1)
+ vpdev->hw_ops->send_intr(vpdev, db);
+ else
+ goto done;
+ for (retry = 15; retry--;) {
+ ret = wait_event_timeout(wake, devp->guest_ack,
+ msecs_to_jiffies(1000));
+ if (ret)
+ break;
+ }
+done:
+ devp->config_change = 0;
+ devp->guest_ack = 0;
+}
+
+static void vop_virtio_del_device(struct vop_vdev *vdev)
+{
+ struct vop_info *vi = vdev->vi;
+ struct vop_device *vpdev = vdev->vpdev;
+ int i;
+ struct mic_vqconfig *vqconfig;
+ struct mic_bootparam *bootparam = vpdev->hw_ops->get_dp(vpdev);
+
+ if (!bootparam)
+ goto skip_hot_remove;
+ vop_dev_remove(vi, vdev->dc, vpdev);
+skip_hot_remove:
+ vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
+ flush_work(&vdev->virtio_bh_work);
+ vqconfig = mic_vq_config(vdev->dd);
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+
+ dma_unmap_single(&vpdev->dev,
+ vvr->buf_da, VOP_INT_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->buf,
+ get_order(VOP_INT_DMA_BUF_SIZE));
+ vringh_kiov_cleanup(&vvr->riov);
+ vringh_kiov_cleanup(&vvr->wiov);
+ dma_unmap_single(&vpdev->dev, le64_to_cpu(vqconfig[i].address),
+ vvr->vring.len, DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vvr->vring.va,
+ get_order(vvr->vring.len));
+ }
+ /*
+ * Order the type update with previous stores. This write barrier
+ * is paired with the corresponding read barrier before the uncached
+ * system memory read of the type, on the card while scanning the
+ * device page.
+ */
+ smp_wmb();
+ vdev->dd->type = -1;
+}
+
+/*
+ * vop_sync_dma - Wrapper for synchronous DMAs.
+ *
+ * @dev - The address of the pointer to the device instance used
+ * for DMA registration.
+ * @dst - destination DMA address.
+ * @src - source DMA address.
+ * @len - size of the transfer.
+ *
+ * Return DMA_SUCCESS on success
+ */
+static int vop_sync_dma(struct vop_vdev *vdev, dma_addr_t dst, dma_addr_t src,
+ size_t len)
+{
+ int err = 0;
+ struct dma_device *ddev;
+ struct dma_async_tx_descriptor *tx;
+ struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+ struct dma_chan *vop_ch = vi->dma_ch;
+
+ if (!vop_ch) {
+ err = -EBUSY;
+ goto error;
+ }
+ ddev = vop_ch->device;
+ tx = ddev->device_prep_dma_memcpy(vop_ch, dst, src, len,
+ DMA_PREP_FENCE);
+ if (!tx) {
+ err = -ENOMEM;
+ goto error;
+ } else {
+ dma_cookie_t cookie;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ err = -ENOMEM;
+ goto error;
+ }
+ dma_async_issue_pending(vop_ch);
+ err = dma_sync_wait(vop_ch, cookie);
+ }
+error:
+ if (err)
+ dev_err(&vi->vpdev->dev, "%s %d err %d\n",
+ __func__, __LINE__, err);
+ return err;
+}
+
+#define VOP_USE_DMA true
+
+/*
+ * Initiates the copies across the PCIe bus from card memory to a user
+ * space buffer. When transfers are done using DMA, source/destination
+ * addresses and transfer length must follow the alignment requirements of
+ * the MIC DMA engine.
+ */
+static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf,
+ size_t len, u64 daddr, size_t dlen,
+ int vr_idx)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+ struct vop_info *vi = dev_get_drvdata(&vpdev->dev);
+ size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t dma_offset, partlen;
+ int err;
+
+ if (!VOP_USE_DMA) {
+ if (copy_to_user(ubuf, (void __force *)dbuf, len)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ vdev->in_bytes += len;
+ err = 0;
+ goto err;
+ }
+
+ dma_offset = daddr - round_down(daddr, dma_alignment);
+ daddr -= dma_offset;
+ len += dma_offset;
+ /*
+ * X100 uses DMA addresses as seen by the card so adding
+ * the aperture base is not required for DMA. However x200
+ * requires DMA addresses to be an offset into the bar so
+ * add the aperture base for x200.
+ */
+ if (x200)
+ daddr += vpdev->aper->pa;
+ while (len) {
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+ err = vop_sync_dma(vdev, vvr->buf_da, daddr,
+ ALIGN(partlen, dma_alignment));
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ if (copy_to_user(ubuf, vvr->buf + dma_offset,
+ partlen - dma_offset)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ daddr += partlen;
+ ubuf += partlen;
+ dbuf += partlen;
+ vdev->in_bytes_dma += partlen;
+ vdev->in_bytes += partlen;
+ len -= partlen;
+ dma_offset = 0;
+ }
+ err = 0;
+err:
+ vpdev->hw_ops->iounmap(vpdev, dbuf);
+ dev_dbg(vop_dev(vdev),
+ "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ __func__, ubuf, dbuf, len, vr_idx);
+ return err;
+}
+
+/*
+ * Initiates copies across the PCIe bus from a user space buffer to card
+ * memory. When transfers are done using DMA, source/destination addresses
+ * and transfer length must follow the alignment requirements of the MIC
+ * DMA engine.
+ */
+static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf,
+ size_t len, u64 daddr, size_t dlen,
+ int vr_idx)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len);
+ struct vop_vringh *vvr = &vdev->vvr[vr_idx];
+ struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev);
+ size_t dma_alignment = 1 << vi->dma_ch->device->copy_align;
+ bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1);
+ size_t partlen;
+ bool dma = VOP_USE_DMA;
+ int err = 0;
+
+ if (daddr & (dma_alignment - 1)) {
+ vdev->tx_dst_unaligned += len;
+ dma = false;
+ } else if (ALIGN(len, dma_alignment) > dlen) {
+ vdev->tx_len_unaligned += len;
+ dma = false;
+ }
+
+ if (!dma)
+ goto memcpy;
+
+ /*
+ * X100 uses DMA addresses as seen by the card so adding
+ * the aperture base is not required for DMA. However x200
+ * requires DMA addresses to be an offset into the bar so
+ * add the aperture base for x200.
+ */
+ if (x200)
+ daddr += vpdev->aper->pa;
+ while (len) {
+ partlen = min_t(size_t, len, VOP_INT_DMA_BUF_SIZE);
+
+ if (copy_from_user(vvr->buf, ubuf, partlen)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ err = vop_sync_dma(vdev, daddr, vvr->buf_da,
+ ALIGN(partlen, dma_alignment));
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ daddr += partlen;
+ ubuf += partlen;
+ dbuf += partlen;
+ vdev->out_bytes_dma += partlen;
+ vdev->out_bytes += partlen;
+ len -= partlen;
+ }
+memcpy:
+ /*
+ * We are copying to IO below and should ideally use something
+ * like copy_from_user_toio(..) if it existed.
+ */
+ if (copy_from_user((void __force *)dbuf, ubuf, len)) {
+ err = -EFAULT;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ vdev->out_bytes += len;
+ err = 0;
+err:
+ vpdev->hw_ops->iounmap(vpdev, dbuf);
+ dev_dbg(vop_dev(vdev),
+ "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n",
+ __func__, ubuf, dbuf, len, vr_idx);
+ return err;
+}
+
+#define MIC_VRINGH_READ true
+
+/* Determine the total number of bytes consumed in a VRINGH KIOV */
+static inline u32 vop_vringh_iov_consumed(struct vringh_kiov *iov)
+{
+ int i;
+ u32 total = iov->consumed;
+
+ for (i = 0; i < iov->i; i++)
+ total += iov->iov[i].iov_len;
+ return total;
+}
+
+/*
+ * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
+ * This API is heavily based on the vringh_iov_xfer(..) implementation
+ * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
+ * and vringh_iov_push_kern(..) directly is because there is no
+ * way to override the VRINGH xfer(..) routines as of v3.10.
+ */
+static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov,
+ void __user *ubuf, size_t len, bool read, int vr_idx,
+ size_t *out_len)
+{
+ int ret = 0;
+ size_t partlen, tot_len = 0;
+
+ while (len && iov->i < iov->used) {
+ struct kvec *kiov = &iov->iov[iov->i];
+
+ partlen = min(kiov->iov_len, len);
+ if (read)
+ ret = vop_virtio_copy_to_user(vdev, ubuf, partlen,
+ (u64)kiov->iov_base,
+ kiov->iov_len,
+ vr_idx);
+ else
+ ret = vop_virtio_copy_from_user(vdev, ubuf, partlen,
+ (u64)kiov->iov_base,
+ kiov->iov_len,
+ vr_idx);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= partlen;
+ ubuf += partlen;
+ tot_len += partlen;
+ iov->consumed += partlen;
+ kiov->iov_len -= partlen;
+ kiov->iov_base += partlen;
+ if (!kiov->iov_len) {
+ /* Fix up old iov element then increment. */
+ kiov->iov_len = iov->consumed;
+ kiov->iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
+ }
+ *out_len = tot_len;
+ return ret;
+}
+
+/*
+ * Use the standard VRINGH infrastructure in the kernel to fetch new
+ * descriptors, initiate the copies and update the used ring.
+ */
+static int _vop_virtio_copy(struct vop_vdev *vdev, struct mic_copy_desc *copy)
+{
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
+ struct iovec iov;
+ struct iovec __user *u_iov = copy->iov;
+ void __user *ubuf = NULL;
+ struct vop_vringh *vvr = &vdev->vvr[copy->vr_idx];
+ struct vringh_kiov *riov = &vvr->riov;
+ struct vringh_kiov *wiov = &vvr->wiov;
+ struct vringh *vrh = &vvr->vrh;
+ u16 *head = &vvr->head;
+ struct mic_vring *vr = &vvr->vring;
+ size_t len = 0, out_len;
+
+ copy->out_len = 0;
+ /* Fetch a new IOVEC if all previous elements have been processed */
+ if (riov->i == riov->used && wiov->i == wiov->used) {
+ ret = vringh_getdesc_kern(vrh, riov, wiov,
+ head, GFP_KERNEL);
+ /* Check if there are available descriptors */
+ if (ret <= 0)
+ return ret;
+ }
+ while (iovcnt) {
+ if (!len) {
+ /* Copy over a new iovec from user space. */
+ ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
+ if (ret) {
+ ret = -EINVAL;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len = iov.iov_len;
+ ubuf = iov.iov_base;
+ }
+ /* Issue all the read descriptors first */
+ ret = vop_vringh_copy(vdev, riov, ubuf, len,
+ MIC_VRINGH_READ, copy->vr_idx, &out_len);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ /* Issue the write descriptors next */
+ ret = vop_vringh_copy(vdev, wiov, ubuf, len,
+ !MIC_VRINGH_READ, copy->vr_idx, &out_len);
+ if (ret) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, ret);
+ break;
+ }
+ len -= out_len;
+ ubuf += out_len;
+ copy->out_len += out_len;
+ if (!len) {
+ /* One user space iovec is now completed */
+ iovcnt--;
+ u_iov++;
+ }
+ /* Exit loop if all elements in KIOVs have been processed. */
+ if (riov->i == riov->used && wiov->i == wiov->used)
+ break;
+ }
+ /*
+ * Update the used ring if a descriptor was available and some data was
+ * copied in/out and the user asked for a used ring update.
+ */
+ if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
+ u32 total = 0;
+
+ /* Determine the total data consumed */
+ total += vop_vringh_iov_consumed(riov);
+ total += vop_vringh_iov_consumed(wiov);
+ vringh_complete_kern(vrh, *head, total);
+ *head = USHRT_MAX;
+ if (vringh_need_notify_kern(vrh) > 0)
+ vringh_notify(vrh);
+ vringh_kiov_cleanup(riov);
+ vringh_kiov_cleanup(wiov);
+ /* Update avail idx for user space */
+ vr->info->avail_idx = vrh->last_avail_idx;
+ }
+ return ret;
+}
+
+static inline int vop_verify_copy_args(struct vop_vdev *vdev,
+ struct mic_copy_desc *copy)
+{
+ if (!vdev || copy->vr_idx >= vdev->dd->num_vq)
+ return -EINVAL;
+ return 0;
+}
+
+/* Copy a specified number of virtio descriptors in a chain */
+static int vop_virtio_copy_desc(struct vop_vdev *vdev,
+ struct mic_copy_desc *copy)
+{
+ int err;
+ struct vop_vringh *vvr;
+
+ err = vop_verify_copy_args(vdev, copy);
+ if (err)
+ return err;
+
+ vvr = &vdev->vvr[copy->vr_idx];
+ mutex_lock(&vvr->vr_mutex);
+ if (!vop_vdevup(vdev)) {
+ err = -ENODEV;
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ goto err;
+ }
+ err = _vop_virtio_copy(vdev, copy);
+ if (err) {
+ dev_err(vop_dev(vdev), "%s %d err %d\n",
+ __func__, __LINE__, err);
+ }
+err:
+ mutex_unlock(&vvr->vr_mutex);
+ return err;
+}
+
+static int vop_open(struct inode *inode, struct file *f)
+{
+ struct vop_vdev *vdev;
+ struct vop_info *vi = container_of(f->private_data,
+ struct vop_info, miscdev);
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev)
+ return -ENOMEM;
+ vdev->vi = vi;
+ mutex_init(&vdev->vdev_mutex);
+ f->private_data = vdev;
+ init_completion(&vdev->destroy);
+ complete(&vdev->destroy);
+ return 0;
+}
+
+static int vop_release(struct inode *inode, struct file *f)
+{
+ struct vop_vdev *vdev = f->private_data, *vdev_tmp;
+ struct vop_info *vi = vdev->vi;
+ struct list_head *pos, *tmp;
+ bool found = false;
+
+ mutex_lock(&vdev->vdev_mutex);
+ if (vdev->deleted)
+ goto unlock;
+ mutex_lock(&vi->vop_mutex);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev_tmp = list_entry(pos, struct vop_vdev, list);
+ if (vdev == vdev_tmp) {
+ vop_virtio_del_device(vdev);
+ list_del(pos);
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&vi->vop_mutex);
+unlock:
+ mutex_unlock(&vdev->vdev_mutex);
+ if (!found)
+ wait_for_completion(&vdev->destroy);
+ f->private_data = NULL;
+ kfree(vdev);
+ return 0;
+}
+
+static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct vop_vdev *vdev = f->private_data;
+ struct vop_info *vi = vdev->vi;
+ void __user *argp = (void __user *)arg;
+ int ret;
+
+ switch (cmd) {
+ case MIC_VIRTIO_ADD_DEVICE:
+ {
+ struct mic_device_desc dd, *dd_config;
+
+ if (copy_from_user(&dd, argp, sizeof(dd)))
+ return -EFAULT;
+
+ if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
+ dd.num_vq > MIC_MAX_VRINGS)
+ return -EINVAL;
+
+ dd_config = kzalloc(mic_desc_size(&dd), GFP_KERNEL);
+ if (!dd_config)
+ return -ENOMEM;
+ if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
+ ret = -EFAULT;
+ goto free_ret;
+ }
+ mutex_lock(&vdev->vdev_mutex);
+ mutex_lock(&vi->vop_mutex);
+ ret = vop_virtio_add_device(vdev, dd_config);
+ if (ret)
+ goto unlock_ret;
+ list_add_tail(&vdev->list, &vi->vdev_list);
+unlock_ret:
+ mutex_unlock(&vi->vop_mutex);
+ mutex_unlock(&vdev->vdev_mutex);
+free_ret:
+ kfree(dd_config);
+ return ret;
+ }
+ case MIC_VIRTIO_COPY_DESC:
+ {
+ struct mic_copy_desc copy;
+
+ mutex_lock(&vdev->vdev_mutex);
+ ret = vop_vdev_inited(vdev);
+ if (ret)
+ goto _unlock_ret;
+
+ if (copy_from_user(&copy, argp, sizeof(copy))) {
+ ret = -EFAULT;
+ goto _unlock_ret;
+ }
+
+ ret = vop_virtio_copy_desc(vdev, &copy);
+ if (ret < 0)
+ goto _unlock_ret;
+ if (copy_to_user(
+ &((struct mic_copy_desc __user *)argp)->out_len,
+ &copy.out_len, sizeof(copy.out_len)))
+ ret = -EFAULT;
+_unlock_ret:
+ mutex_unlock(&vdev->vdev_mutex);
+ return ret;
+ }
+ case MIC_VIRTIO_CONFIG_CHANGE:
+ {
+ void *buf;
+
+ mutex_lock(&vdev->vdev_mutex);
+ ret = vop_vdev_inited(vdev);
+ if (ret)
+ goto __unlock_ret;
+ buf = kzalloc(vdev->dd->config_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto __unlock_ret;
+ }
+ if (copy_from_user(buf, argp, vdev->dd->config_len)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ ret = vop_virtio_config_change(vdev, buf);
+done:
+ kfree(buf);
+__unlock_ret:
+ mutex_unlock(&vdev->vdev_mutex);
+ return ret;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ };
+ return 0;
+}
+
+/*
+ * We return POLLIN | POLLOUT from poll when new buffers are enqueued, and
+ * not when previously enqueued buffers may be available. This means that
+ * in the card->host (TX) path, when userspace is unblocked by poll it
+ * must drain all available descriptors or it can stall.
+ */
+static unsigned int vop_poll(struct file *f, poll_table *wait)
+{
+ struct vop_vdev *vdev = f->private_data;
+ int mask = 0;
+
+ mutex_lock(&vdev->vdev_mutex);
+ if (vop_vdev_inited(vdev)) {
+ mask = POLLERR;
+ goto done;
+ }
+ poll_wait(f, &vdev->waitq, wait);
+ if (vop_vdev_inited(vdev)) {
+ mask = POLLERR;
+ } else if (vdev->poll_wake) {
+ vdev->poll_wake = 0;
+ mask = POLLIN | POLLOUT;
+ }
+done:
+ mutex_unlock(&vdev->vdev_mutex);
+ return mask;
+}
+
+static inline int
+vop_query_offset(struct vop_vdev *vdev, unsigned long offset,
+ unsigned long *size, unsigned long *pa)
+{
+ struct vop_device *vpdev = vdev->vpdev;
+ unsigned long start = MIC_DP_SIZE;
+ int i;
+
+ /*
+ * MMAP interface is as follows:
+ * offset region
+ * 0x0 virtio device_page
+ * 0x1000 first vring
+ * 0x1000 + size of 1st vring second vring
+ * ....
+ */
+ if (!offset) {
+ *pa = virt_to_phys(vpdev->hw_ops->get_dp(vpdev));
+ *size = MIC_DP_SIZE;
+ return 0;
+ }
+
+ for (i = 0; i < vdev->dd->num_vq; i++) {
+ struct vop_vringh *vvr = &vdev->vvr[i];
+
+ if (offset == start) {
+ *pa = virt_to_phys(vvr->vring.va);
+ *size = vvr->vring.len;
+ return 0;
+ }
+ start += vvr->vring.len;
+ }
+ return -1;
+}
+
+/*
+ * Maps the device page and virtio rings to user space for readonly access.
+ */
+static int vop_mmap(struct file *f, struct vm_area_struct *vma)
+{
+ struct vop_vdev *vdev = f->private_data;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pa, size = vma->vm_end - vma->vm_start, size_rem = size;
+ int i, err;
+
+ err = vop_vdev_inited(vdev);
+ if (err)
+ goto ret;
+ if (vma->vm_flags & VM_WRITE) {
+ err = -EACCES;
+ goto ret;
+ }
+ while (size_rem) {
+ i = vop_query_offset(vdev, offset, &size, &pa);
+ if (i < 0) {
+ err = -EINVAL;
+ goto ret;
+ }
+ err = remap_pfn_range(vma, vma->vm_start + offset,
+ pa >> PAGE_SHIFT, size,
+ vma->vm_page_prot);
+ if (err)
+ goto ret;
+ size_rem -= size;
+ offset += size;
+ }
+ret:
+ return err;
+}
+
+static const struct file_operations vop_fops = {
+ .open = vop_open,
+ .release = vop_release,
+ .unlocked_ioctl = vop_ioctl,
+ .poll = vop_poll,
+ .mmap = vop_mmap,
+ .owner = THIS_MODULE,
+};
+
+int vop_host_init(struct vop_info *vi)
+{
+ int rc;
+ struct miscdevice *mdev;
+ struct vop_device *vpdev = vi->vpdev;
+
+ INIT_LIST_HEAD(&vi->vdev_list);
+ vi->dma_ch = vpdev->dma_ch;
+ mdev = &vi->miscdev;
+ mdev->minor = MISC_DYNAMIC_MINOR;
+ snprintf(vi->name, sizeof(vi->name), "vop_virtio%d", vpdev->index);
+ mdev->name = vi->name;
+ mdev->fops = &vop_fops;
+ mdev->parent = &vpdev->dev;
+
+ rc = misc_register(mdev);
+ if (rc)
+ dev_err(&vpdev->dev, "%s failed rc %d\n", __func__, rc);
+ return rc;
+}
+
+void vop_host_uninit(struct vop_info *vi)
+{
+ struct list_head *pos, *tmp;
+ struct vop_vdev *vdev;
+
+ mutex_lock(&vi->vop_mutex);
+ vop_virtio_reset_devices(vi);
+ list_for_each_safe(pos, tmp, &vi->vdev_list) {
+ vdev = list_entry(pos, struct vop_vdev, list);
+ list_del(pos);
+ reinit_completion(&vdev->destroy);
+ mutex_unlock(&vi->vop_mutex);
+ mutex_lock(&vdev->vdev_mutex);
+ vop_virtio_del_device(vdev);
+ vdev->deleted = true;
+ mutex_unlock(&vdev->vdev_mutex);
+ complete(&vdev->destroy);
+ mutex_lock(&vi->vop_mutex);
+ }
+ mutex_unlock(&vi->vop_mutex);
+ misc_deregister(&vi->miscdev);
+}
diff --git a/drivers/staging/panel/panel.c b/drivers/misc/panel.c
index 70b8f4fabfad..6030ac5b8c63 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/misc/panel.c
@@ -172,8 +172,6 @@ static __u8 scan_mask_o;
/* logical or of the input bits involved in the scan matrix */
static __u8 scan_mask_i;
-typedef __u64 pmask_t;
-
enum input_type {
INPUT_TYPE_STD,
INPUT_TYPE_KBD,
@@ -188,8 +186,8 @@ enum input_state {
struct logical_input {
struct list_head list;
- pmask_t mask;
- pmask_t value;
+ __u64 mask;
+ __u64 value;
enum input_type type;
enum input_state state;
__u8 rise_time, fall_time;
@@ -219,19 +217,19 @@ static LIST_HEAD(logical_inputs); /* list of all defined logical inputs */
* corresponds to the ground.
* Within each group, bits are stored in the same order as read on the port :
* BAPSE (busy=4, ack=3, paper empty=2, select=1, error=0).
- * So, each __u64 (or pmask_t) is represented like this :
+ * So, each __u64 is represented like this :
* 0000000000000000000BAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSEBAPSE
* <-----unused------><gnd><d07><d06><d05><d04><d03><d02><d01><d00>
*/
/* what has just been read from the I/O ports */
-static pmask_t phys_read;
+static __u64 phys_read;
/* previous phys_read */
-static pmask_t phys_read_prev;
+static __u64 phys_read_prev;
/* stabilized phys_read (phys_read|phys_read_prev) */
-static pmask_t phys_curr;
+static __u64 phys_curr;
/* previous phys_curr */
-static pmask_t phys_prev;
+static __u64 phys_prev;
/* 0 means that at least one logical signal needs be computed */
static char inputs_stable;
@@ -650,34 +648,28 @@ static const char nexcom_keypad_profile[][4][9] = {
static const char (*keypad_profile)[4][9] = old_keypad_profile;
-/* FIXME: this should be converted to a bit array containing signals states */
-static struct {
- unsigned char e; /* parallel LCD E (data latch on falling edge) */
- unsigned char rs; /* parallel LCD RS (0 = cmd, 1 = data) */
- unsigned char rw; /* parallel LCD R/W (0 = W, 1 = R) */
- unsigned char bl; /* parallel LCD backlight (0 = off, 1 = on) */
- unsigned char cl; /* serial LCD clock (latch on rising edge) */
- unsigned char da; /* serial LCD data */
-} bits;
+static DECLARE_BITMAP(bits, LCD_BITS);
+
+static void lcd_get_bits(unsigned int port, int *val)
+{
+ unsigned int bit, state;
+
+ for (bit = 0; bit < LCD_BITS; bit++) {
+ state = test_bit(bit, bits) ? BIT_SET : BIT_CLR;
+ *val &= lcd_bits[port][bit][BIT_MSK];
+ *val |= lcd_bits[port][bit][state];
+ }
+}
static void init_scan_timer(void);
/* sets data port bits according to current signals values */
static int set_data_bits(void)
{
- int val, bit;
+ int val;
val = r_dtr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_D][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_D][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_D][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_D][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_D][LCD_BIT_DA][bits.da];
-
+ lcd_get_bits(LCD_PORT_D, &val);
w_dtr(pprt, val);
return val;
}
@@ -685,19 +677,10 @@ static int set_data_bits(void)
/* sets ctrl port bits according to current signals values */
static int set_ctrl_bits(void)
{
- int val, bit;
+ int val;
val = r_ctr(pprt);
- for (bit = 0; bit < LCD_BITS; bit++)
- val &= lcd_bits[LCD_PORT_C][bit][BIT_MSK];
-
- val |= lcd_bits[LCD_PORT_C][LCD_BIT_E][bits.e]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RS][bits.rs]
- | lcd_bits[LCD_PORT_C][LCD_BIT_RW][bits.rw]
- | lcd_bits[LCD_PORT_C][LCD_BIT_BL][bits.bl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_CL][bits.cl]
- | lcd_bits[LCD_PORT_C][LCD_BIT_DA][bits.da];
-
+ lcd_get_bits(LCD_PORT_C, &val);
w_ctr(pprt, val);
return val;
}
@@ -793,12 +776,17 @@ static void lcd_send_serial(int byte)
* LCD reads D0 on STROBE's rising edge.
*/
for (bit = 0; bit < 8; bit++) {
- bits.cl = BIT_CLR; /* CLK low */
+ clear_bit(LCD_BIT_CL, bits); /* CLK low */
panel_set_bits();
- bits.da = byte & 1;
+ if (byte & 1) {
+ set_bit(LCD_BIT_DA, bits);
+ } else {
+ clear_bit(LCD_BIT_DA, bits);
+ }
+
panel_set_bits();
udelay(2); /* maintain the data during 2 us before CLK up */
- bits.cl = BIT_SET; /* CLK high */
+ set_bit(LCD_BIT_CL, bits); /* CLK high */
panel_set_bits();
udelay(1); /* maintain the strobe during 1 us */
byte >>= 1;
@@ -813,7 +801,10 @@ static void lcd_backlight(int on)
/* The backlight is activated by setting the AUTOFEED line to +5V */
spin_lock_irq(&pprt_lock);
- bits.bl = on;
+ if (on)
+ set_bit(LCD_BIT_BL, bits);
+ else
+ clear_bit(LCD_BIT_BL, bits);
panel_set_bits();
spin_unlock_irq(&pprt_lock);
}
@@ -848,14 +839,14 @@ static void lcd_write_cmd_p8(int cmd)
w_dtr(pprt, cmd);
udelay(20); /* maintain the data during 20 us before the strobe */
- bits.e = BIT_SET;
- bits.rs = BIT_CLR;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ clear_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
udelay(40); /* maintain the strobe during 40 us */
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
udelay(120); /* the shortest command takes at least 120 us */
@@ -870,14 +861,14 @@ static void lcd_write_data_p8(int data)
w_dtr(pprt, data);
udelay(20); /* maintain the data during 20 us before the strobe */
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
udelay(40); /* maintain the strobe during 40 us */
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
udelay(45); /* the shortest data takes at least 45 us */
@@ -943,7 +934,8 @@ static void lcd_clear_fast_s(void)
lcd_send_serial(0x5F); /* R/W=W, RS=1 */
lcd_send_serial(' ' & 0x0F);
lcd_send_serial((' ' >> 4) & 0x0F);
- udelay(40); /* the shortest data takes at least 40 us */
+ /* the shortest data takes at least 40 us */
+ udelay(40);
}
spin_unlock_irq(&pprt_lock);
@@ -969,15 +961,15 @@ static void lcd_clear_fast_p8(void)
/* maintain the data during 20 us before the strobe */
udelay(20);
- bits.e = BIT_SET;
- bits.rs = BIT_SET;
- bits.rw = BIT_CLR;
+ set_bit(LCD_BIT_E, bits);
+ set_bit(LCD_BIT_RS, bits);
+ clear_bit(LCD_BIT_RW, bits);
set_ctrl_bits();
/* maintain the strobe during 40 us */
udelay(40);
- bits.e = BIT_CLR;
+ clear_bit(LCD_BIT_E, bits);
set_ctrl_bits();
/* the shortest data takes at least 45 us */
@@ -1784,7 +1776,7 @@ static void phys_scan_contacts(void)
gndmask = PNL_PINPUT(r_str(pprt)) & scan_mask_i;
/* grounded inputs are signals 40-44 */
- phys_read |= (pmask_t) gndmask << 40;
+ phys_read |= (__u64)gndmask << 40;
if (bitmask != gndmask) {
/*
@@ -1800,7 +1792,7 @@ static void phys_scan_contacts(void)
w_dtr(pprt, oldval & ~bitval); /* enable this output */
bitmask = PNL_PINPUT(r_str(pprt)) & ~gndmask;
- phys_read |= (pmask_t) bitmask << (5 * bit);
+ phys_read |= (__u64)bitmask << (5 * bit);
}
w_dtr(pprt, oldval); /* disable all outputs */
}
@@ -2037,32 +2029,32 @@ static void init_scan_timer(void)
* corresponding to out and in bits respectively.
* returns 1 if ok, 0 if error (in which case, nothing is written).
*/
-static int input_name2mask(const char *name, pmask_t *mask, pmask_t *value,
- char *imask, char *omask)
+static u8 input_name2mask(const char *name, __u64 *mask, __u64 *value,
+ u8 *imask, u8 *omask)
{
- static char sigtab[10] = "EeSsPpAaBb";
- char im, om;
- pmask_t m, v;
+ const char sigtab[] = "EeSsPpAaBb";
+ u8 im, om;
+ __u64 m, v;
- om = 0ULL;
- im = 0ULL;
+ om = 0;
+ im = 0;
m = 0ULL;
v = 0ULL;
while (*name) {
int in, out, bit, neg;
+ const char *idx;
- for (in = 0; (in < sizeof(sigtab)) && (sigtab[in] != *name);
- in++)
- ;
-
- if (in >= sizeof(sigtab))
+ idx = strchr(sigtab, *name);
+ if (!idx)
return 0; /* input name not found */
+
+ in = idx - sigtab;
neg = (in & 1); /* odd (lower) names are negated */
in >>= 1;
im |= BIT(in);
name++;
- if (isdigit(*name)) {
+ if (*name >= '0' && *name <= '7') {
out = *name - '0';
om |= BIT(out);
} else if (*name == '-') {
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 9a17a9bab8d6..4810e039bbec 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -503,8 +503,7 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
int err;
ssize_t rom_size;
- struct pch_phub_reg *chip =
- dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret) {
@@ -514,8 +513,10 @@ static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
/* Get Rom signature */
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
- if (!chip->pch_phub_extrom_base_address)
+ if (!chip->pch_phub_extrom_base_address) {
+ err = -ENODATA;
goto exrom_map_err;
+ }
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
(unsigned char *)&rom_signature);
@@ -567,8 +568,7 @@ static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
unsigned int addr_offset;
int ret;
ssize_t rom_size;
- struct pch_phub_reg *chip =
- dev_get_drvdata(container_of(kobj, struct device, kobj));
+ struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret)
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index f74fc0ca2ef9..a2d97b9b17e3 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,8 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages
- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 736dae715dbf..69cdabea9c03 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -360,7 +360,10 @@ static int sram_probe(struct platform_device *pdev)
return -EBUSY;
}
- sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
+ if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
+ sram->virt_base = devm_ioremap(sram->dev, res->start, size);
+ else
+ sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
if (IS_ERR(sram->virt_base))
return PTR_ERR(sram->virt_base);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 6e3af8b42cdd..dcdbd58672cc 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -632,7 +632,6 @@ long st_register(struct st_proto_s *new_proto)
spin_unlock_irqrestore(&st_gdata->lock, flags);
return err;
}
- pr_debug("done %s(%d) ", __func__, new_proto->chnl_id);
}
EXPORT_SYMBOL_GPL(st_register);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index b823f9a6e464..896be150e28f 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.3.0-k");
+MODULE_VERSION("1.1.4.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f42d9c4e4561..f84a4275ca29 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
if (dirty)
set_page_dirty(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
pages[i] = NULL;
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fe207e542032..8a0147dfed27 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -86,7 +86,6 @@ static int max_devices;
/* TODO: Replace these with struct ida */
static DECLARE_BITMAP(dev_use, MAX_DEVICES);
-static DECLARE_BITMAP(name_use, MAX_DEVICES);
/*
* There is one mmc_blk_data per slot.
@@ -105,7 +104,6 @@ struct mmc_blk_data {
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
- unsigned int name_idx;
unsigned int reset_done;
#define MMC_BLK_READ BIT(0)
#define MMC_BLK_WRITE BIT(1)
@@ -589,6 +587,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
int err = 0, ioc_err = 0;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
if (IS_ERR(idata))
return PTR_ERR(idata);
@@ -631,6 +637,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
int i, err = 0, ioc_err = 0;
__u64 num_of_cmds;
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
sizeof(num_of_cmds)))
return -EFAULT;
@@ -688,14 +702,6 @@ cmd_err:
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- /*
- * The caller must have CAP_SYS_RAWIO, and must be calling this on the
- * whole block device, not on a partition. This prevents overspray
- * between sibling partitions.
- */
- if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
- return -EPERM;
-
switch (cmd) {
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
@@ -1362,8 +1368,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
if (brq->data.error) {
if (need_retune && !brq->retune_retry_done) {
- pr_info("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
+ pr_debug("%s: retrying because a re-tune was needed\n",
+ req->rq_disk->disk_name);
brq->retune_retry_done = 1;
return MMC_BLK_RETRY;
}
@@ -1524,13 +1530,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
}
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
- brq->data.flags |= MMC_DATA_READ;
+ brq->data.flags = MMC_DATA_READ;
if (brq->mrq.stop)
brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
MMC_CMD_AC;
} else {
brq->cmd.opcode = writecmd;
- brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.flags = MMC_DATA_WRITE;
if (brq->mrq.stop)
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
MMC_CMD_AC;
@@ -1799,7 +1805,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = packed->blocks + hdr_blocks;
- brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.flags = MMC_DATA_WRITE;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -2194,19 +2200,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto out;
}
- /*
- * !subname implies we are creating main mmc_blk_data that will be
- * associated with mmc_card with dev_set_drvdata. Due to device
- * partitions, devidx will not coincide with a per-physical card
- * index anymore so we keep track of a name index.
- */
- if (!subname) {
- md->name_idx = find_first_zero_bit(name_use, max_devices);
- __set_bit(md->name_idx, name_use);
- } else
- md->name_idx = ((struct mmc_blk_data *)
- dev_to_disk(parent)->private_data)->name_idx;
-
md->area_type = area_type;
/*
@@ -2256,7 +2249,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%u%s", md->name_idx, subname ? subname : "");
+ "mmcblk%u%s", card->host->index, subname ? subname : "");
if (mmc_card_mmc(card))
blk_queue_logical_block_size(md->queue.queue,
@@ -2410,7 +2403,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
- __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 7fc9174d4619..c032eef45762 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2829,6 +2829,7 @@ static int mtf_testlist_show(struct seq_file *sf, void *data)
mutex_lock(&mmc_test_lock);
+ seq_printf(sf, "0:\tRun all tests\n");
for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index d2de5925b73e..5415056f9aa5 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -493,7 +493,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
if (status & UART_MSR_DCTS) {
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
- if (tty && (tty->termios.c_cflag & CRTSCTS)) {
+ if (tty && C_CRTSCTS(tty)) {
int cts = (status & UART_MSR_CTS);
if (tty->hw_stopped) {
if (cts) {
@@ -648,10 +648,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
sdio_uart_change_speed(port, &tty->termios, NULL);
- if (tty->termios.c_cflag & CBAUD)
+ if (C_BAUD(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
tty->hw_stopped = 1;
@@ -833,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -844,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
sdio_uart_start_tx(port);
}
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
sdio_uart_clear_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
@@ -855,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -870,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
}
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
sdio_uart_set_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index f95d41ffc766..41b1e761965f 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1033,7 +1033,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
- ios->bus_width, ios->timing);
+ 1 << ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
@@ -1079,7 +1079,8 @@ int mmc_execute_tuning(struct mmc_card *card)
err = host->ops->execute_tuning(host, opcode);
if (err)
- pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+ pr_err("%s: tuning execution failed: %d\n",
+ mmc_hostname(host), err);
else
mmc_retune_enable(host);
@@ -1204,8 +1205,9 @@ EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
* @np: The device node need to be parsed.
* @mask: mask of voltages available for MMC/SD/SDIO
*
- * 1. Return zero on success.
- * 2. Return negative errno: voltage-range is invalid.
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
*/
int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
{
@@ -1214,8 +1216,12 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges || !num_ranges) {
- pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ if (!voltage_ranges) {
+ pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
+ return 0;
+ }
+ if (!num_ranges) {
+ pr_err("%s: voltage-ranges empty\n", np->full_name);
return -EINVAL;
}
@@ -1234,7 +1240,7 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
*mask |= ocr_mask;
}
- return 0;
+ return 1;
}
EXPORT_SYMBOL(mmc_of_parse_voltage);
@@ -2532,7 +2538,7 @@ int mmc_detect_card_removed(struct mmc_host *host)
if (!card)
return 1;
- if (host->caps & MMC_CAP_NONREMOVABLE)
+ if (!mmc_card_is_removable(host))
return 0;
ret = mmc_card_removed(card);
@@ -2570,7 +2576,7 @@ void mmc_rescan(struct work_struct *work)
return;
/* If there is a non-removable card registered, only scan once */
- if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+ if (!mmc_card_is_removable(host) && host->rescan_entered)
return;
host->rescan_entered = 1;
@@ -2587,8 +2593,7 @@ void mmc_rescan(struct work_struct *work)
* if there is a _removable_ card registered, check whether it is
* still present
*/
- if (host->bus_ops && !host->bus_dead
- && !(host->caps & MMC_CAP_NONREMOVABLE))
+ if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
host->bus_ops->detect(host);
host->detect_change = 0;
@@ -2613,7 +2618,7 @@ void mmc_rescan(struct work_struct *work)
mmc_bus_put(host);
mmc_claim_host(host);
- if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+ if (mmc_card_is_removable(host) && host->ops->get_cd &&
host->ops->get_cd(host) == 0) {
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 65cc0ac9b82d..9382a57a5aa4 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -220,7 +220,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
struct mmc_host *host = data;
/* We need this check due to input value is u64 */
- if (val > host->f_max)
+ if (val != 0 && (val > host->f_max || val < host->f_min))
return -EINVAL;
mmc_claim_host(host);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0aecd5c00b86..6e4c55a4aab5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -339,6 +339,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.parent = dev;
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
+ device_enable_async_suspend(&host->class_dev);
if (mmc_gpio_alloc(host)) {
put_device(&host->class_dev);
@@ -355,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* They have to set these according to their abilities.
*/
host->max_segs = 1;
- host->max_seg_size = PAGE_CACHE_SIZE;
+ host->max_seg_size = PAGE_SIZE;
- host->max_req_size = PAGE_CACHE_SIZE;
+ host->max_req_size = PAGE_SIZE;
host->max_blk_size = 512;
- host->max_blk_count = PAGE_CACHE_SIZE / 512;
+ host->max_blk_count = PAGE_SIZE / 512;
return host;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bf49e44571f2..4dbe3df8024b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -501,7 +501,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
if (!card->ext_csd.man_bkops_en)
- pr_info("%s: MAN_BKOPS_EN bit is not set\n",
+ pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
mmc_hostname(card->host));
}
@@ -945,7 +945,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
break;
} else {
pr_warn("%s: switch to bus width %d failed\n",
- mmc_hostname(host), ext_csd_bits[idx]);
+ mmc_hostname(host), 1 << bus_width);
}
}
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 2c90635c89af..62355bda608f 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -90,7 +90,6 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!host);
@@ -105,11 +104,7 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
}
- err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
}
int mmc_select_card(struct mmc_card *card)
@@ -244,7 +239,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
int mmc_set_relative_addr(struct mmc_card *card)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
@@ -254,11 +248,7 @@ int mmc_set_relative_addr(struct mmc_card *card)
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
}
static int
@@ -743,7 +733,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
- int err, width;
+ int width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8;
@@ -759,8 +749,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
* is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
- err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
- return err;
+ return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index aba786daebca..bc173e18b71c 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/mmc/host.h>
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bb39a29b2db6..b95bd24d92f4 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -74,8 +74,6 @@ void mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
- memset(&card->cid, 0, sizeof(struct mmc_cid));
-
/*
* SD doesn't currently have a version field so we will
* have to assume we can parse this.
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index 48d0c93ba25a..16b774c18e75 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
int mmc_app_set_bus_width(struct mmc_card *card, int width)
{
- int err;
struct mmc_command cmd = {0};
BUG_ON(!card);
@@ -140,11 +139,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
return -EINVAL;
}
- err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
- if (err)
- return err;
-
- return 0;
+ return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
}
int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 467b3cf80c44..bd44ba8116d1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -106,8 +106,6 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
unsigned char data;
unsigned char speed;
- memset(&card->cccr, 0, sizeof(struct sdio_cccr));
-
ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_CCCR, 0, &data);
if (ret)
goto out;
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 62508b457c4f..34f6e8015306 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -217,7 +217,6 @@ int sdio_reset(struct mmc_host *host)
else
abort |= 0x08;
- ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
- return ret;
+ return mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 1526b8a10b09..04feea8354cb 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -318,15 +318,15 @@ config MMC_SDHCI_F_SDH30
If unsure, say N.
config MMC_SDHCI_IPROC
- tristate "SDHCI platform support for the iProc SD/MMC Controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
+ tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
+ depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
depends on MMC_SDHCI_PLTFM
default ARCH_BCM_IPROC
select MMC_SDHCI_IO_ACCESSORS
help
This selects the iProc SD/MMC controller.
- If you have an IPROC platform with SD or MMC devices,
+ If you have a BCM2835 or IPROC platform with SD or MMC devices,
say Y or M here.
If unsure, say N.
@@ -560,8 +560,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "SH-Mobile SDHI SD/SDIO controller support"
- depends on SUPERH || ARM
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARM || ARM64
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
help
This provides support for the SDHI SD/SDIO controller found in
@@ -673,8 +673,8 @@ config MMC_DW_ROCKCHIP
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
- depends on MMC_BLOCK && HAS_DMA
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_DMA
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
This selects the MMC Host Interface controller (MMCIF).
@@ -786,3 +786,14 @@ config MMC_MTK
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
This is needed if support for any SD/SDIO/MMC devices is required.
If unsure, say N.
+
+config MMC_SDHCI_MICROCHIP_PIC32
+ tristate "Microchip PIC32MZDA SDHCI support"
+ depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM
+ help
+ This selects the Secure Digital Host Controller Interface (SDHCI)
+ for PIC32MZDA platform.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3595f83e89dd..af918d261ff9 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 851ccd9ac868..9268c41a8561 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -848,9 +848,7 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= ATMCI_CMDR_SDIO_BLOCK;
} else {
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= ATMCI_CMDR_STREAM;
- else if (data->blocks > 1)
+ if (data->blocks > 1)
cmdr |= ATMCI_CMDR_MULTI_BLOCK;
else
cmdr |= ATMCI_CMDR_BLOCK;
@@ -1371,10 +1369,7 @@ static void atmci_start_request(struct atmel_mci *host,
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
if (!(data->flags & MMC_DATA_WRITE))
host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
- if (data->flags & MMC_DATA_STREAM)
- host->stop_cmdr |= ATMCI_CMDR_STREAM;
- else
- host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
+ host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
}
/*
@@ -2443,7 +2438,7 @@ static int atmci_configure_dma(struct atmel_mci *host)
struct mci_platform_data *pdata = host->pdev->dev.platform_data;
dma_cap_mask_t mask;
- if (!pdata->dma_filter)
+ if (!pdata || !pdata->dma_filter)
return -ENODEV;
dma_cap_zero(mask);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 2b7f37e82ca9..526231e38583 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -126,9 +126,6 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
length = data->blksz * data->blocks;
bfin_write_SDH_DATA_LGTH(length);
- if (data->flags & MMC_DATA_STREAM)
- data_ctl |= DTX_MODE;
-
if (data->flags & MMC_DATA_READ)
data_ctl |= DTX_DIR;
/* Only supports power-of-2 block size */
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ea2a2ebc6b91..693144e7427b 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -346,10 +346,6 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
if (cmd->data)
cmd_reg |= MMCCMD_WDATX;
- /* Setting whether stream or block transfer */
- if (cmd->flags & MMC_DATA_STREAM)
- cmd_reg |= MMCCMD_STRMTP;
-
/* Setting whether data read or write */
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
cmd_reg |= MMCCMD_DTRW;
@@ -568,8 +564,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
return;
}
- dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
- (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+ dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n",
(data->flags & MMC_DATA_WRITE) ? "write" : "read",
data->blocks, data->blksz);
dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n",
@@ -584,22 +579,18 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
writel(data->blksz, host->base + DAVINCI_MMCBLEN);
/* Configure the FIFO */
- switch (data->flags & MMC_DATA_WRITE) {
- case MMC_DATA_WRITE:
+ if (data->flags & MMC_DATA_WRITE) {
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
- break;
-
- default:
+ } else {
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
host->base + DAVINCI_MMCFIFOCTL);
- break;
}
host->buffer = NULL;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 3a7e835a0033..8790f2afc057 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -145,6 +145,16 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
mci_writel(host, CLKSEL64, clksel);
else
mci_writel(host, CLKSEL, clksel);
+
+ /*
+ * Exynos4412 and Exynos5250 extends the use of CMD register with the
+ * use of bit 29 (which is reserved on standard MSHC controllers) for
+ * optionally bypassing the HOLD register for command and data. The
+ * HOLD register should be bypassed in case there is no phase shift
+ * applied on CMD/DATA that is sent to the card.
+ */
+ if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel))
+ set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
}
#ifdef CONFIG_PM_SLEEP
@@ -202,26 +212,6 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
#define dw_mci_exynos_resume_noirq NULL
#endif /* CONFIG_PM_SLEEP */
-static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- struct dw_mci_exynos_priv_data *priv = host->priv;
- /*
- * Exynos4412 and Exynos5250 extends the use of CMD register with the
- * use of bit 29 (which is reserved on standard MSHC controllers) for
- * optionally bypassing the HOLD register for command and data. The
- * HOLD register should be bypassed in case there is no phase shift
- * applied on CMD/DATA that is sent to the card.
- */
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
- if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64)))
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
- } else {
- if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
- }
-}
-
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -500,7 +490,6 @@ static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
.setup_clock = dw_mci_exynos_setup_clock,
- .prepare_command = dw_mci_exynos_prepare_command,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
index 81bdeeb05a4d..c0bb0c793e84 100644
--- a/drivers/mmc/host/dw_mmc-pltfm.c
+++ b/drivers/mmc/host/dw_mmc-pltfm.c
@@ -26,19 +26,6 @@
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
-static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
-static const struct dw_mci_drv_data socfpga_drv_data = {
- .prepare_command = dw_mci_pltfm_prepare_command,
-};
-
-static const struct dw_mci_drv_data pistachio_drv_data = {
- .prepare_command = dw_mci_pltfm_prepare_command,
-};
-
int dw_mci_pltfm_register(struct platform_device *pdev,
const struct dw_mci_drv_data *drv_data)
{
@@ -94,10 +81,8 @@ EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
static const struct of_device_id dw_mci_pltfm_match[] = {
{ .compatible = "snps,dw-mshc", },
- { .compatible = "altr,socfpga-dw-mshc",
- .data = &socfpga_drv_data },
- { .compatible = "img,pistachio-dw-mshc",
- .data = &pistachio_drv_data },
+ { .compatible = "altr,socfpga-dw-mshc", },
+ { .compatible = "img,pistachio-dw-mshc", },
{},
};
MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index d9c92f31da64..84e50f3a64b6 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -26,11 +26,6 @@ struct dw_mci_rockchip_priv_data {
int default_sample_phase;
};
-static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
- *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
{
host->bus_hz /= RK3288_CLKGEN_DIV;
@@ -240,12 +235,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
}
static const struct dw_mci_drv_data rk2928_drv_data = {
- .prepare_command = dw_mci_rockchip_prepare_command,
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
- .prepare_command = dw_mci_rockchip_prepare_command,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 712835177e8b..242f9a0769bd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -234,7 +234,6 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
struct mmc_data *data;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
- const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -290,14 +289,12 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
data = cmd->data;
if (data) {
cmdr |= SDMMC_CMD_DAT_EXP;
- if (data->flags & MMC_DATA_STREAM)
- cmdr |= SDMMC_CMD_STRM_MODE;
if (data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
- if (drv_data && drv_data->prepare_command)
- drv_data->prepare_command(slot->host, &cmdr);
+ if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
+ cmdr |= SDMMC_CMD_USE_HOLD_REG;
return cmdr;
}
@@ -1450,12 +1447,11 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
- struct dw_mci_board *brd = slot->host->pdata;
struct dw_mci *host = slot->host;
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
(mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
else if (!IS_ERR_VALUE(gpio_cd))
@@ -1477,6 +1473,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+static void dw_mci_hw_reset(struct mmc_host *mmc)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ int reset;
+
+ if (host->use_dma == TRANS_MODE_IDMAC)
+ dw_mci_idmac_reset(host);
+
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
+ SDMMC_CTRL_FIFO_RESET))
+ return;
+
+ /*
+ * According to eMMC spec, card reset procedure:
+ * tRstW >= 1us: RST_n pulse width
+ * tRSCA >= 200us: RST_n to Command time
+ * tRSTH >= 1us: RST_n high period
+ */
+ reset = mci_readl(host, RST_N);
+ reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
+ mci_writel(host, RST_N, reset);
+ usleep_range(1, 2);
+ reset |= SDMMC_RST_HWACTIVE << slot->id;
+ mci_writel(host, RST_N, reset);
+ usleep_range(200, 300);
+}
+
static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1563,6 +1587,7 @@ static const struct mmc_host_ops dw_mci_ops = {
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
+ .hw_reset = dw_mci_hw_reset,
.enable_sdio_irq = dw_mci_enable_sdio_irq,
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
@@ -2840,23 +2865,13 @@ static void dw_mci_dto_timer(unsigned long arg)
}
#ifdef CONFIG_OF
-static struct dw_mci_of_quirks {
- char *quirk;
- int id;
-} of_quirks[] = {
- {
- .quirk = "broken-cd",
- .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
- },
-};
-
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
struct dw_mci_board *pdata;
struct device *dev = host->dev;
struct device_node *np = dev->of_node;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int idx, ret;
+ int ret;
u32 clock_frequency;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -2864,17 +2879,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
return ERR_PTR(-ENOMEM);
/* find out number of slots supported */
- if (of_property_read_u32(dev->of_node, "num-slots",
- &pdata->num_slots)) {
- dev_info(dev,
- "num-slots property not found, assuming 1 slot is available\n");
- pdata->num_slots = 1;
- }
-
- /* get quirks */
- for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
- if (of_get_property(np, of_quirks[idx].quirk, NULL))
- pdata->quirks |= of_quirks[idx].id;
+ of_property_read_u32(np, "num-slots", &pdata->num_slots);
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
dev_info(dev,
@@ -2908,18 +2913,19 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
static void dw_mci_enable_cd(struct dw_mci *host)
{
- struct dw_mci_board *brd = host->pdata;
unsigned long irqflags;
u32 temp;
int i;
+ struct dw_mci_slot *slot;
- /* No need for CD if broken card detection */
- if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
- return;
-
- /* No need for CD if all slots have a non-error GPIO */
+ /*
+ * No need for CD if all slots have a non-error GPIO
+ * as well as broken card detection is found.
+ */
for (i = 0; i < host->num_slots; i++) {
- struct dw_mci_slot *slot = host->slot[i];
+ slot = host->slot[i];
+ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
+ return;
if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
break;
@@ -2949,12 +2955,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (host->pdata->num_slots < 1) {
- dev_err(host->dev,
- "Platform data must supply num_slots.\n");
- return -ENODEV;
- }
-
host->biu_clk = devm_clk_get(host->dev, "biu");
if (IS_ERR(host->biu_clk)) {
dev_dbg(host->dev, "biu clock not available\n");
@@ -3052,8 +3052,10 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
- return -ENODEV;
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
host->dma_ops = host->pdata->dma_ops;
dw_mci_init_dma(host);
@@ -3111,13 +3113,20 @@ int dw_mci_probe(struct dw_mci *host)
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
- host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
+ host->num_slots = 1;
+
+ if (host->num_slots < 1 ||
+ host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
+ dev_err(host->dev,
+ "Platform data must supply correct num_slots.\n");
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
/*
* Enable interrupts for command done, data over, data empty,
* receive ready and error such as transmit, receive timeout, crc error
*/
- mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index f695b58f0613..68d5da2dfd19 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -46,6 +46,7 @@
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
#define SDMMC_UHS_REG 0x074
+#define SDMMC_RST_N 0x078
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
@@ -169,6 +170,8 @@
#define SDMMC_IDMAC_ENABLE BIT(7)
#define SDMMC_IDMAC_FB BIT(1)
#define SDMMC_IDMAC_SWRESET BIT(0)
+/* H/W reset */
+#define SDMMC_RST_HWACTIVE 0x1
/* Version ID register define */
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Card read threshold */
@@ -265,6 +268,7 @@ struct dw_mci_slot {
#define DW_MMC_CARD_PRESENT 0
#define DW_MMC_CARD_NEED_INIT 1
#define DW_MMC_CARD_NO_LOW_PWR 2
+#define DW_MMC_CARD_NO_USE_HOLD 3
int id;
int sdio_id;
};
@@ -274,7 +278,6 @@ struct dw_mci_slot {
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
* @setup_clock: implementation specific clock configuration.
- * @prepare_command: handle CMD register extensions.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
@@ -287,7 +290,6 @@ struct dw_mci_drv_data {
unsigned long *caps;
int (*init)(struct dw_mci *host);
int (*setup_clock)(struct dw_mci *host);
- void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 76e8bce6f46e..03ddf0ecf402 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -660,8 +660,6 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
cmdat |= JZ_MMC_CMDAT_DATA_EN;
if (cmd->data->flags & MMC_DATA_WRITE)
cmdat |= JZ_MMC_CMDAT_WRITE;
- if (cmd->data->flags & MMC_DATA_STREAM)
- cmdat |= JZ_MMC_CMDAT_STREAM;
if (host->use_dma)
cmdat |= JZ_MMC_CMDAT_DMA_EN;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 3446097a43c0..e77d79c8cd9f 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1442,6 +1442,12 @@ static int mmc_spi_probe(struct spi_device *spi)
host->pdata->cd_debounce);
if (status != 0)
goto fail_add_host;
+
+ /* The platform has a CD GPIO signal that may support
+ * interrupts, so let mmc_gpiod_request_cd_irq() decide
+ * if polling is needed or not.
+ */
+ mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0d6ca4116f3d..2e6c96845c9a 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -40,7 +40,6 @@
#include <asm/div64.h>
#include <asm/io.h>
-#include <asm/sizes.h>
#include "mmci.h"
#include "mmci_qcom_dml.h"
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 82a97ac4e956..b17f30da97da 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -35,6 +35,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/slot-gpio.h>
#define MAX_BD_NUM 1024
@@ -1020,26 +1021,19 @@ static void msdc_set_buswidth(struct msdc_host *host, u32 width)
static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
- int min_uv, max_uv;
int ret = 0;
if (!IS_ERR(mmc->supply.vqmmc)) {
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
- min_uv = 3300000;
- max_uv = 3300000;
- } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- min_uv = 1800000;
- max_uv = 1800000;
- } else {
+ if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 &&
+ ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
dev_err(host->dev, "Unsupported signal voltage!\n");
return -EINVAL;
}
- ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
if (ret) {
- dev_err(host->dev,
- "Regulator set error %d: %d - %d\n",
- ret, min_uv, max_uv);
+ dev_dbg(host->dev, "Regulator set error %d (%d)\n",
+ ret, ios->signal_voltage);
} else {
/* Apply different pinctrl settings for different signal voltage */
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
@@ -1452,6 +1446,7 @@ static struct mmc_host_ops mt_msdc_ops = {
.pre_req = msdc_pre_req,
.request = msdc_ops_request,
.set_ios = msdc_ops_set_ios,
+ .get_ro = mmc_gpio_get_ro,
.start_signal_voltage_switch = msdc_ops_switch_volt,
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d110f9e98c4b..3d1ea5e0e549 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -307,9 +307,6 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
enum dma_transfer_direction slave_dirn;
int i, nents;
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
host->data = data;
data->bytes_xfered = 0;
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index 660170cd04d9..85bbebfde02e 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -74,7 +74,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
const u32 *voltage_ranges;
int num_ranges;
int i;
- int ret = -EINVAL;
if (dev->platform_data || !np)
return dev->platform_data;
@@ -97,7 +96,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
be32_to_cpu(voltage_ranges[j + 1]));
if (!mask) {
- ret = -EINVAL;
dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
goto err_ocr;
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f6e4d9718035..f9ac3bb5d617 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -503,8 +503,11 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
host->pbias = devm_regulator_get_optional(host->dev, "pbias");
if (IS_ERR(host->pbias)) {
ret = PTR_ERR(host->pbias);
- if ((ret != -ENODEV) && host->dev->of_node)
+ if ((ret != -ENODEV) && host->dev->of_node) {
+ dev_err(host->dev,
+ "SD card detect fail? enable CONFIG_REGULATOR_PBIAS\n");
return ret;
+ }
dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
PTR_ERR(host->pbias));
host->pbias = NULL;
@@ -2159,7 +2162,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
&rx_req, &pdev->dev, "rx");
if (!host->rx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
ret = -ENXIO;
goto err_irq;
}
@@ -2169,7 +2172,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
&tx_req, &pdev->dev, "tx");
if (!host->tx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
ret = -ENXIO;
goto err_irq;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index da824772bbb4..86fac3e86833 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -191,9 +191,6 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
host->data = data;
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
writel(nob, host->base + MMC_NOB);
writel(data->blksz, host->base + MMC_BLKLEN);
@@ -443,9 +440,6 @@ static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
if (mrq->data->flags & MMC_DATA_WRITE)
cmdat |= CMDAT_WRITE;
-
- if (mrq->data->flags & MMC_DATA_STREAM)
- cmdat |= CMDAT_STREAM;
}
pxamci_start_cmd(host, mrq->cmd, cmdat);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 6291d5042ef2..39814f3dc96f 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1014,8 +1014,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
if (host->bus_width == MMC_BUS_WIDTH_4)
dcon |= S3C2410_SDIDCON_WIDEBUS;
- if (!(data->flags & MMC_DATA_STREAM))
- dcon |= S3C2410_SDIDCON_BLOCKMODE;
+ dcon |= S3C2410_SDIDCON_BLOCKMODE;
if (data->flags & MMC_DATA_WRITE) {
dcon |= S3C2410_SDIDCON_TXAFTERRESP;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index a5cda926d38e..6839e41c6d58 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -75,7 +75,6 @@ struct sdhci_acpi_host {
const struct sdhci_acpi_slot *slot;
struct platform_device *pdev;
bool use_runtime_pm;
- bool dma_setup;
};
static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
@@ -83,33 +82,6 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
-static int sdhci_acpi_enable_dma(struct sdhci_host *host)
-{
- struct sdhci_acpi_host *c = sdhci_priv(host);
- struct device *dev = &c->pdev->dev;
- int err = -1;
-
- if (c->dma_setup)
- return 0;
-
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
- host->flags &= ~SDHCI_USE_64_BIT_DMA;
- } else {
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err)
- dev_warn(dev, "Failed to set 64-bit DMA mask\n");
- }
- }
-
- if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
- c->dma_setup = !err;
-
- return err;
-}
-
static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
{
u8 reg;
@@ -127,7 +99,6 @@ static void sdhci_acpi_int_hw_reset(struct sdhci_host *host)
static const struct sdhci_ops sdhci_acpi_ops_dflt = {
.set_clock = sdhci_set_clock,
- .enable_dma = sdhci_acpi_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -135,7 +106,6 @@ static const struct sdhci_ops sdhci_acpi_ops_dflt = {
static const struct sdhci_ops sdhci_acpi_ops_int = {
.set_clock = sdhci_set_clock,
- .enable_dma = sdhci_acpi_enable_dma,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -264,6 +234,17 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.probe_slot = sdhci_acpi_sd_probe_slot,
};
+static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
+ .caps = MMC_CAP_NONREMOVABLE,
+};
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ .caps = MMC_CAP_NONREMOVABLE,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -284,6 +265,8 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
{ "PNP0D40" },
+ { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
+ { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
{ },
};
@@ -298,6 +281,8 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT3436" },
{ "INT344D" },
{ "PNP0D40" },
+ { "QCOM8051" },
+ { "QCOM8052" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -418,6 +403,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
}
+ device_enable_async_suspend(dev);
+
return 0;
err_free:
diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c
index 1c65d4690e70..4a6a1d1386cb 100644
--- a/drivers/mmc/host/sdhci-bcm2835.c
+++ b/drivers/mmc/host/sdhci-bcm2835.c
@@ -74,7 +74,7 @@ static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg)
static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv;
+ struct bcm2835_sdhci *bcm2835_host = sdhci_pltfm_priv(pltfm_host);
u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow :
bcm2835_sdhci_readl(host, reg & ~3);
u32 word_num = (reg >> 1) & 1;
@@ -152,20 +152,12 @@ static int bcm2835_sdhci_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
int ret;
- host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata,
+ sizeof(*bcm2835_host));
if (IS_ERR(host))
return PTR_ERR(host);
- bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host),
- GFP_KERNEL);
- if (!bcm2835_host) {
- dev_err(mmc_dev(host->mmc),
- "failed to allocate bcm2835_sdhci\n");
- return -ENOMEM;
- }
-
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = bcm2835_host;
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f25f29253595..2d300d87cda8 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -260,7 +260,7 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 val = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_PRESENT_STATE)) {
@@ -338,7 +338,7 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
@@ -388,7 +388,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u16 ret = 0;
u32 val;
@@ -448,7 +448,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 new_val = 0;
switch (reg) {
@@ -556,7 +556,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 new_val;
u32 mask;
@@ -633,7 +633,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
unsigned int host_clock = pltfm_host->clock;
int pre_div = 2;
int div = 1;
@@ -692,7 +692,7 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
switch (boarddata->wp_type) {
@@ -794,7 +794,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host,
unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct pinctrl_state *pinctrl;
dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
@@ -864,7 +864,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
{
u32 m;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct esdhc_platform_data *boarddata = &imx_data->boarddata;
/* disable ddr mode and disable HS400 mode */
@@ -917,7 +917,7 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
return esdhc_is_usdhc(imx_data) ? 1 << 28 : 1 << 27;
}
@@ -925,7 +925,7 @@ static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
/* use maximum timeout counter */
sdhci_writeb(host, esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
@@ -1100,21 +1100,17 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
int err;
struct pltfm_imx_data *imx_data;
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata,
+ sizeof(*imx_data));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL);
- if (!imx_data) {
- err = -ENOMEM;
- goto free_sdhci;
- }
+ imx_data = sdhci_pltfm_priv(pltfm_host);
imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
pdev->id_entry->driver_data;
- pltfm_host->priv = imx_data;
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx_data->clk_ipg)) {
@@ -1241,7 +1237,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
pm_runtime_get_sync(&pdev->dev);
@@ -1264,7 +1260,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -1282,7 +1278,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
if (!sdhci_sdio_irq_enabled(host)) {
clk_prepare_enable(imx_data->clk_per);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3b423b0ad8e7..1110f73b08aa 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -26,6 +26,7 @@ struct sdhci_iproc_data {
const struct sdhci_pltfm_data *pdata;
u32 caps;
u32 caps1;
+ u32 mmc_caps;
};
struct sdhci_iproc_host {
@@ -165,9 +166,25 @@ static const struct sdhci_iproc_data iproc_data = {
.pdata = &sdhci_iproc_pltfm_data,
.caps = 0x05E90000,
.caps1 = 0x00000064,
+ .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
+static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MISSING_CAPS,
+ .ops = &sdhci_iproc_ops,
+};
+
+static const struct sdhci_iproc_data bcm2835_data = {
+ .pdata = &sdhci_bcm2835_pltfm_data,
+ .caps = SDHCI_CAN_VDD_330,
+ .caps1 = 0x00000000,
+ .mmc_caps = 0x00000000,
};
static const struct of_device_id sdhci_iproc_of_match[] = {
+ { .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
{ .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
{ }
};
@@ -199,32 +216,37 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
mmc_of_parse(host->mmc);
sdhci_get_of_property(pdev);
- /* Enable EMMC 1/8V DDR capable */
- host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ host->mmc->caps |= iproc_host->data->mmc_caps;
pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
goto err;
}
+ ret = clk_prepare_enable(pltfm_host->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable host clk\n");
+ goto err;
+ }
if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
host->caps = iproc_host->data->caps;
host->caps1 = iproc_host->data->caps1;
}
- return sdhci_add_host(host);
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto err_clk;
+
+ return 0;
+err_clk:
+ clk_disable_unprepare(pltfm_host->clk);
err:
sdhci_pltfm_free(pdev);
return ret;
}
-static int sdhci_iproc_remove(struct platform_device *pdev)
-{
- return sdhci_pltfm_unregister(pdev);
-}
-
static struct platform_driver sdhci_iproc_driver = {
.driver = {
.name = "sdhci-iproc",
@@ -232,7 +254,7 @@ static struct platform_driver sdhci_iproc_driver = {
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_iproc_probe,
- .remove = sdhci_iproc_remove,
+ .remove = sdhci_pltfm_unregister,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4695bee203ea..0653fe730150 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -60,7 +60,6 @@ struct sdhci_msm_host {
struct clk *pclk; /* SDHC peripheral bus clock */
struct clk *bus_clk; /* SDHC bus voter clock */
struct mmc_host *mmc;
- struct sdhci_pltfm_data sdhci_msm_pdata;
};
/* Platform specific tuning */
@@ -418,7 +417,7 @@ static const struct of_device_id sdhci_msm_dt_match[] = {
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
-static struct sdhci_ops sdhci_msm_ops = {
+static const struct sdhci_ops sdhci_msm_ops = {
.platform_execute_tuning = sdhci_msm_execute_tuning,
.reset = sdhci_reset,
.set_clock = sdhci_set_clock,
@@ -426,6 +425,12 @@ static struct sdhci_ops sdhci_msm_ops = {
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
+static const struct sdhci_pltfm_data sdhci_msm_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE,
+ .ops = &sdhci_msm_ops,
+};
+
static int sdhci_msm_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
@@ -437,17 +442,12 @@ static int sdhci_msm_probe(struct platform_device *pdev)
u32 core_version, caps;
u8 core_major;
- msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host)
- return -ENOMEM;
-
- msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
- host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = msm_host;
+ msm_host = sdhci_pltfm_priv(pltfm_host);
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
@@ -522,9 +522,6 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
- host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
- host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
-
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
@@ -570,16 +567,16 @@ static int sdhci_msm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
0xffffffff);
sdhci_remove_host(host, dead);
- sdhci_pltfm_free(pdev);
clk_disable_unprepare(msm_host->clk);
clk_disable_unprepare(msm_host->pclk);
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
+ sdhci_pltfm_free(pdev);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 75379cb0fb35..2e482b13d25e 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
#include "sdhci-pltfm.h"
#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c
@@ -32,9 +33,11 @@
/**
* struct sdhci_arasan_data
* @clk_ahb: Pointer to the AHB clock
+ * @phy: Pointer to the generic phy
*/
struct sdhci_arasan_data {
struct clk *clk_ahb;
+ struct phy *phy;
};
static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
@@ -81,13 +84,22 @@ static int sdhci_arasan_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_suspend_host(host);
if (ret)
return ret;
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ ret = phy_power_off(sdhci_arasan->phy);
+ if (ret) {
+ dev_err(dev, "Cannot power off phy.\n");
+ sdhci_resume_host(host);
+ return ret;
+ }
+ }
+
clk_disable(pltfm_host->clk);
clk_disable(sdhci_arasan->clk_ahb);
@@ -106,7 +118,7 @@ static int sdhci_arasan_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_enable(sdhci_arasan->clk_ahb);
@@ -118,10 +130,17 @@ static int sdhci_arasan_resume(struct device *dev)
ret = clk_enable(pltfm_host->clk);
if (ret) {
dev_err(dev, "Cannot enable SD clock.\n");
- clk_disable(sdhci_arasan->clk_ahb);
return ret;
}
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ ret = phy_power_on(sdhci_arasan->phy);
+ if (ret) {
+ dev_err(dev, "Cannot power on phy.\n");
+ return ret;
+ }
+ }
+
return sdhci_resume_host(host);
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -137,27 +156,32 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
- sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan),
- GFP_KERNEL);
- if (!sdhci_arasan)
- return -ENOMEM;
+ host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
+ sizeof(*sdhci_arasan));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb");
if (IS_ERR(sdhci_arasan->clk_ahb)) {
dev_err(&pdev->dev, "clk_ahb clock not found.\n");
- return PTR_ERR(sdhci_arasan->clk_ahb);
+ ret = PTR_ERR(sdhci_arasan->clk_ahb);
+ goto err_pltfm_free;
}
clk_xin = devm_clk_get(&pdev->dev, "clk_xin");
if (IS_ERR(clk_xin)) {
dev_err(&pdev->dev, "clk_xin clock not found.\n");
- return PTR_ERR(clk_xin);
+ ret = PTR_ERR(clk_xin);
+ goto err_pltfm_free;
}
ret = clk_prepare_enable(sdhci_arasan->clk_ahb);
if (ret) {
dev_err(&pdev->dev, "Unable to enable AHB clock.\n");
- return ret;
+ goto err_pltfm_free;
}
ret = clk_prepare_enable(clk_xin);
@@ -166,20 +190,7 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_dis_ahb;
}
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0);
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto clk_disable_all;
- }
-
- if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-4.9a")) {
- host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
- }
-
sdhci_get_of_property(pdev);
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = sdhci_arasan;
pltfm_host->clk = clk_xin;
ret = mmc_of_parse(host->mmc);
@@ -188,31 +199,69 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
goto clk_disable_all;
}
+ sdhci_arasan->phy = ERR_PTR(-ENODEV);
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "arasan,sdhci-5.1")) {
+ sdhci_arasan->phy = devm_phy_get(&pdev->dev,
+ "phy_arasan");
+ if (IS_ERR(sdhci_arasan->phy)) {
+ ret = PTR_ERR(sdhci_arasan->phy);
+ dev_err(&pdev->dev, "No phy for arasan,sdhci-5.1.\n");
+ goto clk_disable_all;
+ }
+
+ ret = phy_init(sdhci_arasan->phy);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_init err.\n");
+ goto clk_disable_all;
+ }
+
+ ret = phy_power_on(sdhci_arasan->phy);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "phy_power_on err.\n");
+ goto err_phy_power;
+ }
+ }
+
ret = sdhci_add_host(host);
if (ret)
- goto err_pltfm_free;
+ goto err_add_host;
return 0;
-err_pltfm_free:
- sdhci_pltfm_free(pdev);
+err_add_host:
+ if (!IS_ERR(sdhci_arasan->phy))
+ phy_power_off(sdhci_arasan->phy);
+err_phy_power:
+ if (!IS_ERR(sdhci_arasan->phy))
+ phy_exit(sdhci_arasan->phy);
clk_disable_all:
clk_disable_unprepare(clk_xin);
clk_dis_ahb:
clk_disable_unprepare(sdhci_arasan->clk_ahb);
-
+err_pltfm_free:
+ sdhci_pltfm_free(pdev);
return ret;
}
static int sdhci_arasan_remove(struct platform_device *pdev)
{
+ int ret;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv;
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ struct clk *clk_ahb = sdhci_arasan->clk_ahb;
- clk_disable_unprepare(sdhci_arasan->clk_ahb);
+ if (!IS_ERR(sdhci_arasan->phy)) {
+ phy_power_off(sdhci_arasan->phy);
+ phy_exit(sdhci_arasan->phy);
+ }
- return sdhci_pltfm_unregister(pdev);
+ ret = sdhci_pltfm_unregister(pdev);
+
+ clk_disable_unprepare(clk_ahb);
+
+ return ret;
}
static const struct of_device_id sdhci_arasan_of_match[] = {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 9cb86fb25976..2703aa90d018 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -18,6 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -58,7 +59,7 @@ static int sdhci_at91_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -74,7 +75,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = clk_prepare_enable(priv->mainck);
@@ -124,11 +125,12 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&pdev->dev, "unable to allocate private data\n");
- return -ENOMEM;
- }
+ host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
priv->mainck = devm_clk_get(&pdev->dev, "baseclk");
if (IS_ERR(priv->mainck)) {
@@ -148,10 +150,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
return PTR_ERR(priv->gck);
}
- host = sdhci_pltfm_init(pdev, soc_data, 0);
- if (IS_ERR(host))
- return PTR_ERR(host);
-
/*
* The mult clock is provided by as a generated clock by the PMC
* controller. In order to set the rate of gck, we have to get the
@@ -191,9 +189,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
clk_prepare_enable(priv->mainck);
clk_prepare_enable(priv->gck);
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = priv;
-
ret = mmc_of_parse(host->mmc);
if (ret)
goto clocks_disable_unprepare;
@@ -210,6 +205,25 @@ static int sdhci_at91_probe(struct platform_device *pdev)
if (ret)
goto pm_runtime_disable;
+ /*
+ * When calling sdhci_runtime_suspend_host(), the sdhci layer makes
+ * the assumption that all the clocks of the controller are disabled.
+ * It means we can't get irq from it when it is runtime suspended.
+ * For that reason, it is not planned to wake-up on a card detect irq
+ * from the controller.
+ * If we want to use runtime PM and to be able to wake-up on card
+ * insertion, we have to use a GPIO for the card detection or we can
+ * use polling. Be aware that using polling will resume/suspend the
+ * controller between each attempt.
+ * Disable SDHCI_QUIRK_BROKEN_CARD_DETECTION to be sure nobody tries
+ * to enable polling via device tree with broken-cd property.
+ */
+ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
+ IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -231,7 +245,10 @@ static int sdhci_at91_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_at91_priv *priv = pltfm_host->priv;
+ struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ struct clk *gck = priv->gck;
+ struct clk *hclock = priv->hclock;
+ struct clk *mainck = priv->mainck;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -239,9 +256,9 @@ static int sdhci_at91_remove(struct platform_device *pdev)
sdhci_pltfm_unregister(pdev);
- clk_disable_unprepare(priv->gck);
- clk_disable_unprepare(priv->hclock);
- clk_disable_unprepare(priv->mainck);
+ clk_disable_unprepare(gck);
+ clk_disable_unprepare(hclock);
+ clk_disable_unprepare(mainck);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 83b1226471c1..3f34d354f1fc 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -49,7 +49,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
int spec_reg, u32 value)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 ret;
/*
@@ -354,7 +354,7 @@ static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
bool applicable;
dma_addr_t dmastart;
dma_addr_t dmanow;
@@ -404,7 +404,7 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_esdhc *esdhc = pltfm_host->priv;
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
int pre_div = 1;
int div = 1;
u32 temp;
@@ -569,15 +569,12 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
u16 host_ver;
pltfm_host = sdhci_priv(host);
- esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
- GFP_KERNEL);
+ esdhc = sdhci_pltfm_priv(pltfm_host);
host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
SDHCI_VENDOR_VER_SHIFT;
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
-
- pltfm_host->priv = esdhc;
}
static int sdhci_esdhc_probe(struct platform_device *pdev)
@@ -591,9 +588,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (of_get_property(np, "little-endian", NULL))
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
+ sizeof(struct sdhci_esdhc));
else
- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
+ sizeof(struct sdhci_esdhc));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -603,7 +602,7 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
sdhci_get_of_property(pdev);
pltfm_host = sdhci_priv(host);
- esdhc = pltfm_host->priv;
+ esdhc = sdhci_pltfm_priv(pltfm_host);
if (esdhc->vendor_ver == VENDOR_V_22)
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index df3b8eced8c4..79e19017343e 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_idx = 0;
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
@@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BXTM_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
@@ -1302,7 +1327,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
- int ret = -1;
slot = sdhci_priv(host);
pdev = slot->chip->pdev;
@@ -1314,20 +1338,6 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
"doesn't fully claim to support it.\n");
}
- if (host->flags & SDHCI_USE_64_BIT_DMA) {
- if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) {
- host->flags &= ~SDHCI_USE_64_BIT_DMA;
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret)
- dev_warn(&pdev->dev, "Failed to set 64-bit DMA mask\n");
- }
- }
- if (ret)
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
pci_set_master(pdev);
return 0;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d1a0b4db60db..89e7151684a1 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -28,6 +28,9 @@
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
+#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca
+#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc
+#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
new file mode 100644
index 000000000000..059df707a2fe
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -0,0 +1,257 @@
+/*
+ * Support of SDHCI platform devices for Microchip PIC32.
+ *
+ * Copyright (C) 2015 Microchip
+ * Andrei Pistirica, Paul Thacker
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+#include <linux/platform_data/sdhci-pic32.h>
+
+#define SDH_SHARED_BUS_CTRL 0x000000E0
+#define SDH_SHARED_BUS_NR_CLK_PINS_MASK 0x7
+#define SDH_SHARED_BUS_NR_IRQ_PINS_MASK 0x30
+#define SDH_SHARED_BUS_CLK_PINS 0x10
+#define SDH_SHARED_BUS_IRQ_PINS 0x14
+#define SDH_CAPS_SDH_SLOT_TYPE_MASK 0xC0000000
+#define SDH_SLOT_TYPE_REMOVABLE 0x0
+#define SDH_SLOT_TYPE_EMBEDDED 0x1
+#define SDH_SLOT_TYPE_SHARED_BUS 0x2
+#define SDHCI_CTRL_CDSSEL 0x80
+#define SDHCI_CTRL_CDTLVL 0x40
+
+#define ADMA_FIFO_RD_THSHLD 512
+#define ADMA_FIFO_WR_THSHLD 512
+
+struct pic32_sdhci_priv {
+ struct platform_device *pdev;
+ struct clk *sys_clk;
+ struct clk *base_clk;
+};
+
+static unsigned int pic32_sdhci_get_max_clock(struct sdhci_host *host)
+{
+ struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+
+ return clk_get_rate(sdhci_pdata->base_clk);
+}
+
+static void pic32_sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ } else {
+ if (host->version >= SDHCI_SPEC_300)
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+
+ /* CD select and test bits must be set for errata workaround. */
+ ctrl &= ~SDHCI_CTRL_CDTLVL;
+ ctrl |= SDHCI_CTRL_CDSSEL;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static unsigned int pic32_sdhci_get_ro(struct sdhci_host *host)
+{
+ /*
+ * The SDHCI_WRITE_PROTECT bit is unstable on current hardware so we
+ * can't depend on its value in any way.
+ */
+ return 0;
+}
+
+static const struct sdhci_ops pic32_sdhci_ops = {
+ .get_max_clock = pic32_sdhci_get_max_clock,
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = pic32_sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .get_ro = pic32_sdhci_get_ro,
+};
+
+static struct sdhci_pltfm_data sdhci_pic32_pdata = {
+ .ops = &pic32_sdhci_ops,
+ .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
+ .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
+};
+
+static void pic32_sdhci_shared_bus(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ u32 bus = readl(host->ioaddr + SDH_SHARED_BUS_CTRL);
+ u32 clk_pins = (bus & SDH_SHARED_BUS_NR_CLK_PINS_MASK) >> 0;
+ u32 irq_pins = (bus & SDH_SHARED_BUS_NR_IRQ_PINS_MASK) >> 4;
+
+ /* select first clock */
+ if (clk_pins & 1)
+ bus |= (1 << SDH_SHARED_BUS_CLK_PINS);
+
+ /* select first interrupt */
+ if (irq_pins & 1)
+ bus |= (1 << SDH_SHARED_BUS_IRQ_PINS);
+
+ writel(bus, host->ioaddr + SDH_SHARED_BUS_CTRL);
+}
+
+static int pic32_sdhci_probe_platform(struct platform_device *pdev,
+ struct pic32_sdhci_priv *pdata)
+{
+ int ret = 0;
+ u32 caps_slot_type;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+
+ /* Check card slot connected on shared bus. */
+ host->caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
+ caps_slot_type = (host->caps & SDH_CAPS_SDH_SLOT_TYPE_MASK) >> 30;
+ if (caps_slot_type == SDH_SLOT_TYPE_SHARED_BUS)
+ pic32_sdhci_shared_bus(pdev);
+
+ return ret;
+}
+
+static int pic32_sdhci_probe(struct platform_device *pdev)
+{
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct pic32_sdhci_priv *sdhci_pdata;
+ struct pic32_sdhci_platform_data *plat_data;
+ int ret;
+
+ host = sdhci_pltfm_init(pdev, &sdhci_pic32_pdata,
+ sizeof(struct pic32_sdhci_priv));
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto err;
+ }
+
+ pltfm_host = sdhci_priv(host);
+ sdhci_pdata = sdhci_pltfm_priv(pltfm_host);
+
+ plat_data = pdev->dev.platform_data;
+ if (plat_data && plat_data->setup_dma) {
+ ret = plat_data->setup_dma(ADMA_FIFO_RD_THSHLD,
+ ADMA_FIFO_WR_THSHLD);
+ if (ret)
+ goto err_host;
+ }
+
+ sdhci_pdata->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(sdhci_pdata->sys_clk)) {
+ ret = PTR_ERR(sdhci_pdata->sys_clk);
+ dev_err(&pdev->dev, "Error getting clock\n");
+ goto err_host;
+ }
+
+ ret = clk_prepare_enable(sdhci_pdata->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error enabling clock\n");
+ goto err_host;
+ }
+
+ sdhci_pdata->base_clk = devm_clk_get(&pdev->dev, "base_clk");
+ if (IS_ERR(sdhci_pdata->base_clk)) {
+ ret = PTR_ERR(sdhci_pdata->base_clk);
+ dev_err(&pdev->dev, "Error getting clock\n");
+ goto err_sys_clk;
+ }
+
+ ret = clk_prepare_enable(sdhci_pdata->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Error enabling clock\n");
+ goto err_base_clk;
+ }
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err_base_clk;
+
+ ret = pic32_sdhci_probe_platform(pdev, sdhci_pdata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to probe platform!\n");
+ goto err_base_clk;
+ }
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding host\n");
+ goto err_base_clk;
+ }
+
+ dev_info(&pdev->dev, "Successfully added sdhci host\n");
+ return 0;
+
+err_base_clk:
+ clk_disable_unprepare(sdhci_pdata->base_clk);
+err_sys_clk:
+ clk_disable_unprepare(sdhci_pdata->sys_clk);
+err_host:
+ sdhci_pltfm_free(pdev);
+err:
+ dev_err(&pdev->dev, "pic32-sdhci probe failed: %d\n", ret);
+ return ret;
+}
+
+static int pic32_sdhci_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+ u32 scratch;
+
+ scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+ sdhci_remove_host(host, scratch == (u32)~0);
+ clk_disable_unprepare(sdhci_pdata->base_clk);
+ clk_disable_unprepare(sdhci_pdata->sys_clk);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sdhci_id_table[] = {
+ { .compatible = "microchip,pic32mzda-sdhci" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
+
+static struct platform_driver pic32_sdhci_driver = {
+ .driver = {
+ .name = "pic32-sdhci",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_sdhci_id_table),
+ },
+ .probe = pic32_sdhci_probe,
+ .remove = pic32_sdhci_remove,
+};
+
+module_platform_driver(pic32_sdhci_driver);
+
+MODULE_DESCRIPTION("Microchip PIC32 SDHCI driver");
+MODULE_AUTHOR("Pistirica Sorin Andrei & Sandeep Sheriker");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 04bc2481e5c3..d38053bf9e4d 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -23,7 +23,6 @@ struct sdhci_pltfm_data {
struct sdhci_pltfm_host {
struct clk *clk;
- void *priv; /* to handle quirks across io-accessor calls */
/* migrate from sdhci_of_host */
unsigned int clock;
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index beffd8615489..1d8dd3540636 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -177,7 +177,6 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = NULL;
clk = clk_get(dev, "PXA-SDHCLK");
if (IS_ERR(clk)) {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index f5edf9d3a18a..30132500aa1c 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -132,11 +132,15 @@ static int armada_38x_quirks(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
struct resource *res;
host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
if (res) {
@@ -150,7 +154,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* Configuration register, if the adjustment is not done,
* remove them from the capabilities.
*/
- host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
@@ -161,7 +164,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
* controller has different capabilities than the ones shown
* in its registers
*/
- host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
if (of_property_read_bool(np, "no-1-8-v")) {
host->caps &= ~SDHCI_CAN_VDD_180;
host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
@@ -201,7 +203,7 @@ static void pxav3_reset(struct sdhci_host *host, u8 mask)
static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
u16 tmp;
int count;
@@ -250,7 +252,7 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2;
/*
@@ -307,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
__func__, uhs, ctrl_2);
}
+static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+ u8 pwr = host->pwr;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (host->pwr == pwr)
+ return;
+
+ if (host->pwr == 0)
+ vdd = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+ }
+}
+
static const struct sdhci_ops pxav3_sdhci_ops = {
.set_clock = sdhci_set_clock,
+ .set_power = pxav3_set_power,
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -370,16 +394,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
const struct of_device_id *match;
int ret;
- pxa = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_pxa), GFP_KERNEL);
- if (!pxa)
- return -ENOMEM;
-
- host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, sizeof(*pxa));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->priv = pxa;
+ pxa = sdhci_pltfm_priv(pltfm_host);
pxa->clk_io = devm_clk_get(dev, "io");
if (IS_ERR(pxa->clk_io))
@@ -486,7 +506,7 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -535,7 +555,7 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
int ret;
ret = sdhci_runtime_suspend_host(host);
@@ -553,7 +573,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_pxa *pxa = pltfm_host->priv;
+ struct sdhci_pxa *pxa = sdhci_pltfm_priv(pltfm_host);
clk_prepare_enable(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 969c2b0d57fd..320e1c2f8853 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -251,7 +251,7 @@ static int sdhci_st_set_dll_for_clock(struct sdhci_host *host)
{
int ret = 0;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
if (host->clock > CLK_TO_CHECK_DLL_LOCK) {
st_mmcss_set_dll(pdata->top_ioaddr);
@@ -265,7 +265,7 @@ static void sdhci_st_set_uhs_signaling(struct sdhci_host *host,
unsigned int uhs)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
u16 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
int ret = 0;
@@ -357,10 +357,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
int ret = 0;
u16 host_version;
struct resource *res;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
+ struct reset_control *rstc;
clk = devm_clk_get(&pdev->dev, "mmc");
if (IS_ERR(clk)) {
@@ -368,19 +365,23 @@ static int sdhci_st_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
- pdata->rstc = devm_reset_control_get(&pdev->dev, NULL);
- if (IS_ERR(pdata->rstc))
- pdata->rstc = NULL;
+ rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ rstc = NULL;
else
- reset_control_deassert(pdata->rstc);
+ reset_control_deassert(rstc);
- host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, 0);
+ host = sdhci_pltfm_init(pdev, &sdhci_st_pdata, sizeof(*pdata));
if (IS_ERR(host)) {
dev_err(&pdev->dev, "Failed sdhci_pltfm_init\n");
ret = PTR_ERR(host);
goto err_pltfm_init;
}
+ pltfm_host = sdhci_priv(host);
+ pdata = sdhci_pltfm_priv(pltfm_host);
+ pdata->rstc = rstc;
+
ret = mmc_of_parse(host->mmc);
if (ret) {
dev_err(&pdev->dev, "Failed mmc_of_parse\n");
@@ -398,8 +399,6 @@ static int sdhci_st_probe(struct platform_device *pdev)
pdata->top_ioaddr = NULL;
}
- pltfm_host = sdhci_priv(host);
- pltfm_host->priv = pdata;
pltfm_host->clk = clk;
/* Configure the Arasan HC inside the flashSS */
@@ -427,8 +426,8 @@ err_out:
err_of:
sdhci_pltfm_free(pdev);
err_pltfm_init:
- if (pdata->rstc)
- reset_control_assert(pdata->rstc);
+ if (rstc)
+ reset_control_assert(rstc);
return ret;
}
@@ -437,13 +436,14 @@ static int sdhci_st_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
+ struct reset_control *rstc = pdata->rstc;
int ret;
ret = sdhci_pltfm_unregister(pdev);
- if (pdata->rstc)
- reset_control_assert(pdata->rstc);
+ if (rstc)
+ reset_control_assert(rstc);
return ret;
}
@@ -453,7 +453,7 @@ static int sdhci_st_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
int ret = sdhci_suspend_host(host);
if (ret)
@@ -471,7 +471,7 @@ static int sdhci_st_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct st_mmc_platform_data *pdata = pltfm_host->priv;
+ struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct device_node *np = dev->of_node;
clk_prepare_enable(pltfm_host->clk);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 83c4bf7bc16c..bcc0de47fe7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -42,12 +43,17 @@
#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200
+#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
+#define SDHCI_AUTO_CAL_START BIT(31)
+#define SDHCI_AUTO_CAL_ENABLE BIT(29)
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
#define NVQUIRK_ENABLE_SDR50 BIT(3)
#define NVQUIRK_ENABLE_SDR104 BIT(4)
#define NVQUIRK_ENABLE_DDR50 BIT(5)
+#define NVQUIRK_HAS_PADCALIB BIT(6)
struct sdhci_tegra_soc_data {
const struct sdhci_pltfm_data *pdata;
@@ -58,12 +64,13 @@ struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
bool ddr_signaling;
+ bool pad_calib_required;
};
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
@@ -99,7 +106,7 @@ static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
/* Seems like we're getting spurious timeout and crc errors, so
@@ -131,7 +138,7 @@ static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
u32 misc_ctrl, clk_ctrl;
@@ -147,10 +154,16 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
/* Advertise UHS modes as supported by host */
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50;
if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
+ else
+ misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104;
sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
@@ -159,6 +172,9 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ tegra_host->pad_calib_required = true;
+
tegra_host->ddr_signaling = false;
}
@@ -181,27 +197,43 @@ static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
+static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
+{
+ u32 val;
+
+ mdelay(1);
+
+ val = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+ val |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
+ sdhci_writel(host,val, SDHCI_TEGRA_AUTO_CAL_CONFIG);
+}
+
static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
unsigned long host_clk;
if (!clock)
- return;
+ return sdhci_set_clock(host, clock);
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
clk_set_rate(pltfm_host->clk, host_clk);
host->max_clk = clk_get_rate(pltfm_host->clk);
- return sdhci_set_clock(host, clock);
+ sdhci_set_clock(host, clock);
+
+ if (tegra_host->pad_calib_required) {
+ tegra_sdhci_pad_autocalib(host);
+ tegra_host->pad_calib_required = false;
+ }
}
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
unsigned timing)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
if (timing == MMC_TIMING_UHS_DDR50)
tegra_host->ddr_signaling = true;
@@ -264,6 +296,16 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
return mmc_send_tuning(host->mmc, opcode, NULL);
}
+static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
+ tegra_host->pad_calib_required = true;
+}
+
static const struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_w = tegra_sdhci_readw,
@@ -273,6 +315,7 @@ static const struct sdhci_ops tegra_sdhci_ops = {
.reset = tegra_sdhci_reset,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
};
@@ -306,7 +349,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_PADCALIB,
};
static const struct sdhci_ops tegra114_sdhci_ops = {
@@ -319,6 +363,7 @@ static const struct sdhci_ops tegra114_sdhci_ops = {
.reset = tegra_sdhci_reset,
.platform_execute_tuning = tegra_sdhci_execute_tuning,
.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
+ .voltage_switch = tegra_sdhci_voltage_switch,
.get_max_clock = tegra_sdhci_get_max_clock,
};
@@ -335,9 +380,6 @@ static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
.pdata = &sdhci_tegra114_pdata,
- .nvquirks = NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_DDR50 |
- NVQUIRK_ENABLE_SDR104,
};
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
@@ -380,20 +422,15 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
return -EINVAL;
soc_data = match->data;
- host = sdhci_pltfm_init(pdev, soc_data->pdata, 0);
+ host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
if (IS_ERR(host))
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
- if (!tegra_host) {
- dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
- rc = -ENOMEM;
- goto err_alloc_tegra_host;
- }
+ tegra_host = sdhci_pltfm_priv(pltfm_host);
tegra_host->ddr_signaling = false;
+ tegra_host->pad_calib_required = false;
tegra_host->soc_data = soc_data;
- pltfm_host->priv = tegra_host;
rc = mmc_of_parse(host->mmc);
if (rc)
@@ -429,7 +466,6 @@ err_add_host:
err_clk_get:
err_power_req:
err_parse_dt:
-err_alloc_tegra_host:
sdhci_pltfm_free(pdev);
return rc;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index add9fdfd1d8f..6bd3d1794966 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -53,8 +53,6 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data);
static int sdhci_do_get_cd(struct sdhci_host *host);
#ifdef CONFIG_PM
@@ -428,6 +426,31 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
DBG("PIO transfer complete.\n");
}
+static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+ struct mmc_data *data, int cookie)
+{
+ int sg_count;
+
+ /*
+ * If the data buffers are already mapped, return the previous
+ * dma_map_sg() result.
+ */
+ if (data->host_cookie == COOKIE_PRE_MAPPED)
+ return data->sg_count;
+
+ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (sg_count == 0)
+ return -ENOSPC;
+
+ data->sg_count = sg_count;
+ data->host_cookie = cookie;
+
+ return sg_count;
+}
+
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
@@ -462,41 +485,22 @@ static void sdhci_adma_mark_end(void *desc)
dma_desc->cmd |= cpu_to_le16(ADMA2_END);
}
-static int sdhci_adma_table_pre(struct sdhci_host *host,
- struct mmc_data *data)
+static void sdhci_adma_table_pre(struct sdhci_host *host,
+ struct mmc_data *data, int sg_count)
{
- int direction;
-
- void *desc;
- void *align;
- dma_addr_t addr;
- dma_addr_t align_addr;
- int len, offset;
-
struct scatterlist *sg;
- int i;
- char *buffer;
unsigned long flags;
+ dma_addr_t addr, align_addr;
+ void *desc, *align;
+ char *buffer;
+ int len, offset, i;
/*
* The spec does not specify endianness of descriptor table.
* We currently guess that it is LE.
*/
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- host->align_addr = dma_map_single(mmc_dev(host->mmc),
- host->align_buffer, host->align_buffer_sz, direction);
- if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
- goto fail;
- BUG_ON(host->align_addr & SDHCI_ADMA2_MASK);
-
- host->sg_count = sdhci_pre_dma_transfer(host, data);
- if (host->sg_count < 0)
- goto unmap_align;
+ host->sg_count = sg_count;
desc = host->adma_table;
align = host->align_buffer;
@@ -508,10 +512,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
len = sg_dma_len(sg);
/*
- * The SDHCI specification states that ADMA
- * addresses must be 32-bit aligned. If they
- * aren't, then we use a bounce buffer for
- * the (up to three) bytes that screw up the
+ * The SDHCI specification states that ADMA addresses must
+ * be 32-bit aligned. If they aren't, then we use a bounce
+ * buffer for the (up to three) bytes that screw up the
* alignment.
*/
offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
@@ -555,92 +558,56 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
}
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
- /*
- * Mark the last descriptor as the terminating descriptor
- */
+ /* Mark the last descriptor as the terminating descriptor */
if (desc != host->adma_table) {
desc -= host->desc_sz;
sdhci_adma_mark_end(desc);
}
} else {
- /*
- * Add a terminating entry.
- */
-
- /* nop, end, valid */
+ /* Add a terminating entry - nop, end, valid */
sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
}
-
- /*
- * Resync align buffer as we might have changed it.
- */
- if (data->flags & MMC_DATA_WRITE) {
- dma_sync_single_for_device(mmc_dev(host->mmc),
- host->align_addr, host->align_buffer_sz, direction);
- }
-
- return 0;
-
-unmap_align:
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
-fail:
- return -EINVAL;
}
static void sdhci_adma_table_post(struct sdhci_host *host,
struct mmc_data *data)
{
- int direction;
-
struct scatterlist *sg;
int i, size;
void *align;
char *buffer;
unsigned long flags;
- bool has_unaligned;
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
-
- dma_unmap_single(mmc_dev(host->mmc), host->align_addr,
- host->align_buffer_sz, direction);
+ if (data->flags & MMC_DATA_READ) {
+ bool has_unaligned = false;
- /* Do a quick scan of the SG list for any unaligned mappings */
- has_unaligned = false;
- for_each_sg(data->sg, sg, host->sg_count, i)
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
- has_unaligned = true;
- break;
- }
+ /* Do a quick scan of the SG list for any unaligned mappings */
+ for_each_sg(data->sg, sg, host->sg_count, i)
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ has_unaligned = true;
+ break;
+ }
- if (has_unaligned && data->flags & MMC_DATA_READ) {
- dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
+ if (has_unaligned) {
+ dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
+ data->sg_len, DMA_FROM_DEVICE);
- align = host->align_buffer;
+ align = host->align_buffer;
- for_each_sg(data->sg, sg, host->sg_count, i) {
- if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
- size = SDHCI_ADMA2_ALIGN -
- (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
+ size = SDHCI_ADMA2_ALIGN -
+ (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
- buffer = sdhci_kmap_atomic(sg, &flags);
- memcpy(buffer, align, size);
- sdhci_kunmap_atomic(buffer, &flags);
+ buffer = sdhci_kmap_atomic(sg, &flags);
+ memcpy(buffer, align, size);
+ sdhci_kunmap_atomic(buffer, &flags);
- align += SDHCI_ADMA2_ALIGN;
+ align += SDHCI_ADMA2_ALIGN;
+ }
}
}
}
-
- if (data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, direction);
- data->host_cookie = COOKIE_UNMAPPED;
- }
}
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
@@ -666,9 +633,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
if (!data)
target_timeout = cmd->busy_timeout * 1000;
else {
- target_timeout = data->timeout_ns / 1000;
- if (host->clock)
- target_timeout += data->timeout_clks / host->clock;
+ target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
+ if (host->clock && data->timeout_clks) {
+ unsigned long long val;
+
+ /*
+ * data->timeout_clks is in units of clock cycles.
+ * host->clock is in Hz. target_timeout is in us.
+ * Hence, us = 1000000 * cycles / Hz. Round up.
+ */
+ val = 1000000 * data->timeout_clks;
+ if (do_div(val, host->clock))
+ target_timeout++;
+ target_timeout += val;
+ }
}
/*
@@ -729,7 +707,6 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 ctrl;
struct mmc_data *data = cmd->data;
- int ret;
WARN_ON(host->data);
@@ -748,63 +725,48 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
host->data_early = 0;
host->data->bytes_xfered = 0;
- if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
- host->flags |= SDHCI_REQ_USE_DMA;
-
- /*
- * FIXME: This doesn't account for merging when mapping the
- * scatterlist.
- */
- if (host->flags & SDHCI_REQ_USE_DMA) {
- int broken, i;
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
struct scatterlist *sg;
+ unsigned int length_mask, offset_mask;
+ int i;
- broken = 0;
+ host->flags |= SDHCI_REQ_USE_DMA;
+
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ *
+ * The assumption here being that alignment and lengths are
+ * the same after DMA mapping to device address space.
+ */
+ length_mask = 0;
+ offset_mask = 0;
if (host->flags & SDHCI_USE_ADMA) {
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
- broken = 1;
+ if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
+ length_mask = 3;
+ /*
+ * As we use up to 3 byte chunks to work
+ * around alignment problems, we need to
+ * check the offset as well.
+ */
+ offset_mask = 3;
+ }
} else {
if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
- broken = 1;
+ length_mask = 3;
+ if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
+ offset_mask = 3;
}
- if (unlikely(broken)) {
+ if (unlikely(length_mask | offset_mask)) {
for_each_sg(data->sg, sg, data->sg_len, i) {
- if (sg->length & 0x3) {
+ if (sg->length & length_mask) {
DBG("Reverting to PIO because of transfer size (%d)\n",
- sg->length);
+ sg->length);
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
}
- }
- }
- }
-
- /*
- * The assumption here being that alignment is the same after
- * translation to device address space.
- */
- if (host->flags & SDHCI_REQ_USE_DMA) {
- int broken, i;
- struct scatterlist *sg;
-
- broken = 0;
- if (host->flags & SDHCI_USE_ADMA) {
- /*
- * As we use 3 byte chunks to work around
- * alignment problems, we need to check this
- * quirk.
- */
- if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE)
- broken = 1;
- } else {
- if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
- broken = 1;
- }
-
- if (unlikely(broken)) {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- if (sg->offset & 0x3) {
+ if (sg->offset & offset_mask) {
DBG("Reverting to PIO because of bad alignment\n");
host->flags &= ~SDHCI_REQ_USE_DMA;
break;
@@ -814,39 +776,27 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
}
if (host->flags & SDHCI_REQ_USE_DMA) {
- if (host->flags & SDHCI_USE_ADMA) {
- ret = sdhci_adma_table_pre(host, data);
- if (ret) {
- /*
- * This only happens when someone fed
- * us an invalid request.
- */
- WARN_ON(1);
- host->flags &= ~SDHCI_REQ_USE_DMA;
- } else {
- sdhci_writel(host, host->adma_addr,
- SDHCI_ADMA_ADDRESS);
- if (host->flags & SDHCI_USE_64_BIT_DMA)
- sdhci_writel(host,
- (u64)host->adma_addr >> 32,
- SDHCI_ADMA_ADDRESS_HI);
- }
- } else {
- int sg_cnt;
+ int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
- sg_cnt = sdhci_pre_dma_transfer(host, data);
- if (sg_cnt <= 0) {
- /*
- * This only happens when someone fed
- * us an invalid request.
- */
- WARN_ON(1);
- host->flags &= ~SDHCI_REQ_USE_DMA;
- } else {
- WARN_ON(sg_cnt != 1);
- sdhci_writel(host, sg_dma_address(data->sg),
- SDHCI_DMA_ADDRESS);
- }
+ if (sg_cnt <= 0) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+ } else if (host->flags & SDHCI_USE_ADMA) {
+ sdhci_adma_table_pre(host, data, sg_cnt);
+
+ sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ sdhci_writel(host,
+ (u64)host->adma_addr >> 32,
+ SDHCI_ADMA_ADDRESS_HI);
+ } else {
+ WARN_ON(sg_cnt != 1);
+ sdhci_writel(host, sg_dma_address(data->sg),
+ SDHCI_DMA_ADDRESS);
}
}
@@ -946,19 +896,9 @@ static void sdhci_finish_data(struct sdhci_host *host)
data = host->data;
host->data = NULL;
- if (host->flags & SDHCI_REQ_USE_DMA) {
- if (host->flags & SDHCI_USE_ADMA)
- sdhci_adma_table_post(host, data);
- else {
- if (data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- data->host_cookie = COOKIE_UNMAPPED;
- }
- }
- }
+ if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
+ (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
+ sdhci_adma_table_post(host, data);
/*
* The specification states that the block count register must
@@ -1003,6 +943,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
+ /* Initially, a command has no error */
+ cmd->error = 0;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1097,8 +1040,6 @@ static void sdhci_finish_command(struct sdhci_host *host)
}
}
- host->cmd->error = 0;
-
/* Finished CMD23, now send actual command. */
if (host->cmd == host->mrq->sbc) {
host->cmd = NULL;
@@ -1269,10 +1210,24 @@ clock_set:
}
EXPORT_SYMBOL_GPL(sdhci_set_clock);
-static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
+
+ spin_unlock_irq(&host->lock);
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+}
+
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
u8 pwr = 0;
if (mode != MMC_POWER_OFF) {
@@ -1304,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
- vdd = 0;
} else {
/*
* Spec says that we should clear the power reg before setting
@@ -1335,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
}
+}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
- if (!IS_ERR(mmc->supply.vmmc)) {
- spin_unlock_irq(&host->lock);
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
- spin_lock_irq(&host->lock);
- }
+static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (host->ops->set_power)
+ host->ops->set_power(host, mode, vdd);
+ else if (!IS_ERR(mmc->supply.vmmc))
+ sdhci_set_power_reg(host, mode, vdd);
+ else
+ sdhci_set_power(host, mode, vdd);
}
/*****************************************************************************\
@@ -1490,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
}
- sdhci_set_power(host, ios->power_mode, ios->vdd);
+ __sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2114,39 +2076,12 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct sdhci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->flags & SDHCI_REQ_USE_DMA) {
- if (data->host_cookie == COOKIE_GIVEN ||
- data->host_cookie == COOKIE_MAPPED)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- data->host_cookie = COOKIE_UNMAPPED;
- }
-}
-
-static int sdhci_pre_dma_transfer(struct sdhci_host *host,
- struct mmc_data *data)
-{
- int sg_count;
-
- if (data->host_cookie == COOKIE_MAPPED) {
- data->host_cookie = COOKIE_GIVEN;
- return data->sg_count;
- }
-
- WARN_ON(data->host_cookie == COOKIE_GIVEN);
-
- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
- if (sg_count == 0)
- return -ENOSPC;
-
- data->sg_count = sg_count;
- data->host_cookie = COOKIE_MAPPED;
+ if (data->host_cookie != COOKIE_UNMAPPED)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
- return sg_count;
+ data->host_cookie = COOKIE_UNMAPPED;
}
static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -2157,7 +2092,7 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
mrq->data->host_cookie = COOKIE_UNMAPPED;
if (host->flags & SDHCI_REQ_USE_DMA)
- sdhci_pre_dma_transfer(host, mrq->data);
+ sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2238,6 +2173,22 @@ static void sdhci_tasklet_finish(unsigned long param)
mrq = host->mrq;
/*
+ * Always unmap the data buffers if they were mapped by
+ * sdhci_prepare_data() whenever we finish with a request.
+ * This avoids leaking DMA mappings on error.
+ */
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+ struct mmc_data *data = mrq->data;
+
+ if (data && data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ data->host_cookie = COOKIE_UNMAPPED;
+ }
+ }
+
+ /*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
@@ -2322,13 +2273,30 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if (intmask & SDHCI_INT_TIMEOUT)
- host->cmd->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
- SDHCI_INT_INDEX))
- host->cmd->error = -EILSEQ;
+ if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
+ SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
+ if (intmask & SDHCI_INT_TIMEOUT)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->cmd->error = -EILSEQ;
+
+ /*
+ * If this command initiates a data phase and a response
+ * CRC error is signalled, the card can start transferring
+ * data - the card may have received the command without
+ * error. We must not terminate the mmc_request early.
+ *
+ * If the card did not receive the command or returned an
+ * error which prevented it sending data, the data phase
+ * will time out.
+ */
+ if (host->cmd->data &&
+ (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+ SDHCI_INT_CRC) {
+ host->cmd = NULL;
+ return;
+ }
- if (host->cmd->error) {
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -2857,6 +2825,36 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+static int sdhci_set_dma_mask(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+ struct device *dev = mmc_dev(mmc);
+ int ret = -EINVAL;
+
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+
+ /* Try 64-bit mask if hardware is capable of it */
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ pr_warn("%s: Failed to set 64-bit DMA mask.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~SDHCI_USE_64_BIT_DMA;
+ }
+ }
+
+ /* 32-bit mask as default & fallback */
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ pr_warn("%s: Failed to set 32-bit DMA mask.\n",
+ mmc_hostname(mmc));
+ }
+
+ return ret;
+}
+
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
@@ -2928,17 +2926,21 @@ int sdhci_add_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
+ if (caps[0] & SDHCI_CAN_64BIT)
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
- if (host->ops->enable_dma) {
- if (host->ops->enable_dma(host)) {
- pr_warn("%s: No suitable DMA available - falling back to PIO\n",
- mmc_hostname(mmc));
- host->flags &=
- ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
- }
+ ret = sdhci_set_dma_mask(host);
+
+ if (!ret && host->ops->enable_dma)
+ ret = host->ops->enable_dma(host);
+
+ if (ret) {
+ pr_warn("%s: No suitable DMA available - falling back to PIO\n",
+ mmc_hostname(mmc));
+ host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
+
+ ret = 0;
}
}
@@ -2947,6 +2949,9 @@ int sdhci_add_host(struct sdhci_host *host)
host->flags &= ~SDHCI_USE_SDMA;
if (host->flags & SDHCI_USE_ADMA) {
+ dma_addr_t dma;
+ void *buf;
+
/*
* The DMA descriptor table size is calculated as the maximum
* number of segments times 2, to allow for an alignment
@@ -2962,33 +2967,27 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_ADMA2_32_DESC_SZ;
host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
}
- host->adma_table = dma_alloc_coherent(mmc_dev(mmc),
- host->adma_table_sz,
- &host->adma_addr,
- GFP_KERNEL);
+
host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
- host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
- if (!host->adma_table || !host->align_buffer) {
- if (host->adma_table)
- dma_free_coherent(mmc_dev(mmc),
- host->adma_table_sz,
- host->adma_table,
- host->adma_addr);
- kfree(host->align_buffer);
+ buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, &dma, GFP_KERNEL);
+ if (!buf) {
pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
- host->adma_table = NULL;
- host->align_buffer = NULL;
- } else if (host->adma_addr & (SDHCI_ADMA2_DESC_ALIGN - 1)) {
+ } else if ((dma + host->align_buffer_sz) &
+ (SDHCI_ADMA2_DESC_ALIGN - 1)) {
pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
mmc_hostname(mmc));
host->flags &= ~SDHCI_USE_ADMA;
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
- host->adma_table = NULL;
- host->align_buffer = NULL;
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, buf, dma);
+ } else {
+ host->align_buffer = buf;
+ host->align_addr = dma;
+
+ host->adma_table = buf + host->align_buffer_sz;
+ host->adma_addr = dma + host->align_buffer_sz;
}
}
@@ -3072,14 +3071,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
+ if (override_timeout_clk)
+ host->timeout_clk = override_timeout_clk;
+
mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
host->ops->get_max_timeout_count(host) : 1 << 27;
mmc->max_busy_timeout /= host->timeout_clk;
}
- if (override_timeout_clk)
- host->timeout_clk = override_timeout_clk;
-
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
@@ -3449,10 +3448,10 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
if (!IS_ERR(mmc->supply.vqmmc))
regulator_disable(mmc->supply.vqmmc);
- if (host->adma_table)
- dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
- host->adma_table, host->adma_addr);
- kfree(host->align_buffer);
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
host->adma_table = NULL;
host->align_buffer = NULL;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0115e9907bf8..0f39f4f84d10 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -316,8 +316,8 @@ struct sdhci_adma2_64_desc {
enum sdhci_cookie {
COOKIE_UNMAPPED,
- COOKIE_MAPPED,
- COOKIE_GIVEN,
+ COOKIE_PRE_MAPPED, /* mapped by sdhci_pre_req() */
+ COOKIE_MAPPED, /* mapped by sdhci_prepare_data() */
};
struct sdhci_host {
@@ -529,6 +529,8 @@ struct sdhci_ops {
#endif
void (*set_clock)(struct sdhci_host *host, unsigned int clock);
+ void (*set_power)(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
int (*enable_dma)(struct sdhci_host *host);
unsigned int (*get_max_clock)(struct sdhci_host *host);
@@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
}
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index b7e305775314..5ff26ab81eb1 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -398,10 +398,10 @@ static struct mmc_host_ops sdricoh_ops = {
static int sdricoh_init_mmc(struct pci_dev *pci_dev,
struct pcmcia_device *pcmcia_dev)
{
- int result = 0;
- void __iomem *iobase = NULL;
- struct mmc_host *mmc = NULL;
- struct sdricoh_host *host = NULL;
+ int result;
+ void __iomem *iobase;
+ struct mmc_host *mmc;
+ struct sdricoh_host *host;
struct device *dev = &pcmcia_dev->dev;
/* map iomem */
if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
@@ -419,7 +419,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (readl(iobase + R104_VERSION) != 0x4000) {
dev_dbg(dev, "no supported mmc controller found\n");
result = -ENODEV;
- goto err;
+ goto unmap_io;
}
/* allocate privdata */
mmc = pcmcia_dev->priv =
@@ -427,7 +427,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
result = -ENOMEM;
- goto err;
+ goto unmap_io;
}
host = mmc_priv(mmc);
@@ -451,8 +451,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
if (sdricoh_reset(host)) {
dev_dbg(dev, "could not reset\n");
result = -EIO;
- goto err;
-
+ goto free_host;
}
result = mmc_add_host(mmc);
@@ -461,13 +460,10 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
dev_dbg(dev, "mmc host registered\n");
return 0;
}
-
-err:
- if (iobase)
- pci_iounmap(pci_dev, iobase);
- if (mmc)
- mmc_free_host(mmc);
-
+free_host:
+ mmc_free_host(mmc);
+unmap_io:
+ pci_iounmap(pci_dev, iobase);
return result;
}
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 6234eab38ff3..d9a655f47d41 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1395,7 +1395,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
static void sh_mmcif_timeout_work(struct work_struct *work)
{
- struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct delayed_work *d = to_delayed_work(work);
struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct device *dev = sh_mmcif_host_to_dev(host);
@@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 354f4f335ed5..9aa147959276 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -1,6 +1,8 @@
/*
* SuperH Mobile SDHI
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
@@ -43,6 +45,7 @@ struct sh_mobile_sdhi_of_data {
unsigned long capabilities2;
enum dma_slave_buswidth dma_buswidth;
dma_addr_t dma_rx_offset;
+ unsigned bus_shift;
};
static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
@@ -59,12 +62,19 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
};
+static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
+ .capabilities = MMC_CAP_SD_HIGHSPEED,
+ .bus_shift = 2,
+};
+
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-shmobile" },
{ .compatible = "renesas,sdhi-sh7372" },
@@ -78,6 +88,7 @@ static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, },
{ .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, },
+ { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, },
{},
};
MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
@@ -103,6 +114,15 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
case 0xCB0D:
val = (width == 32) ? 0x0000 : 0x0001;
break;
+ case 0xCC10: /* Gen3, SD only */
+ case 0xCD10: /* Gen3, SD + MMC */
+ if (width == 64)
+ val = 0x0000;
+ else if (width == 32)
+ val = 0x0101;
+ else
+ val = 0x0001;
+ break;
default:
/* nothing to do */
return;
@@ -163,6 +183,7 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
case CTL_SD_MEM_CARD_OPT:
case CTL_TRANSACTION_CTL:
case CTL_DMA_ENABLE:
+ case EXT_ACC:
return sh_mobile_sdhi_wait_idle(host);
}
@@ -213,10 +234,8 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&pdev->dev, "kzalloc failed\n");
+ if (!priv)
return -ENOMEM;
- }
mmc_data = &priv->mmc_data;
dma_priv = &priv->dma_priv;
@@ -234,16 +253,26 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ if (of_id && of_id->data) {
+ const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
+
+ mmc_data->flags |= of_data->tmio_flags;
+ mmc_data->capabilities |= of_data->capabilities;
+ mmc_data->capabilities2 |= of_data->capabilities2;
+ mmc_data->dma_rx_offset = of_data->dma_rx_offset;
+ dma_priv->dma_buswidth = of_data->dma_buswidth;
+ host->bus_shift = of_data->bus_shift;
+ }
+
host->dma = dma_priv;
host->write16_hook = sh_mobile_sdhi_write16_hook;
host->clk_enable = sh_mobile_sdhi_clk_enable;
host->clk_disable = sh_mobile_sdhi_clk_disable;
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
- /* SD control register space size is 0x100, 0x200 for bus_shift=1 */
- if (resource_size(res) > 0x100)
+
+ /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
+ if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
host->bus_shift = 1;
- else
- host->bus_shift = 0;
if (mmd)
*mmc_data = *mmd;
@@ -275,15 +304,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
*/
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_QUIRK;
- if (of_id && of_id->data) {
- const struct sh_mobile_sdhi_of_data *of_data = of_id->data;
- mmc_data->flags |= of_data->tmio_flags;
- mmc_data->capabilities |= of_data->capabilities;
- mmc_data->capabilities2 |= of_data->capabilities2;
- mmc_data->dma_rx_offset = of_data->dma_rx_offset;
- dma_priv->dma_buswidth = of_data->dma_buswidth;
- }
-
ret = tmio_mmc_host_probe(host, mmc_data);
if (ret < 0)
goto efree;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 83de82bceafc..8372a413848c 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -28,6 +28,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
@@ -214,6 +215,7 @@
#define SDXC_CLK_25M 1
#define SDXC_CLK_50M 2
#define SDXC_CLK_50M_DDR 3
+#define SDXC_CLK_50M_DDR_8BIT 4
struct sunxi_mmc_clk_delay {
u32 output;
@@ -256,6 +258,9 @@ struct sunxi_mmc_host {
struct mmc_request *mrq;
struct mmc_request *manual_stop_mrq;
int ferror;
+
+ /* vqmmc */
+ bool vqmmc_enabled;
};
static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
@@ -284,16 +289,28 @@ static int sunxi_mmc_init_host(struct mmc_host *mmc)
if (sunxi_mmc_reset_host(host))
return -EIO;
+ /*
+ * Burst 8 transfers, RX trigger level: 7, TX trigger level: 8
+ *
+ * TODO: sun9i has a larger FIFO and supports higher trigger values
+ */
mmc_writel(host, REG_FTRGL, 0x20070008);
+ /* Maximum timeout value */
mmc_writel(host, REG_TMOUT, 0xffffffff);
+ /* Unmask SDIO interrupt if needed */
mmc_writel(host, REG_IMASK, host->sdio_imask);
+ /* Clear all pending interrupts */
mmc_writel(host, REG_RINTR, 0xffffffff);
+ /* Debug register? undocumented */
mmc_writel(host, REG_DBGC, 0xdeb);
+ /* Enable CEATA support */
mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
+ /* Set DMA descriptor list base address */
mmc_writel(host, REG_DLBA, host->sg_dma);
rval = mmc_readl(host, REG_GCTRL);
rval |= SDXC_INTERRUPT_ENABLE_BIT;
+ /* Undocumented, but found in Allwinner code */
rval &= ~SDXC_ACCESS_DONE_DIRECT;
mmc_writel(host, REG_GCTRL, rval);
@@ -640,11 +657,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
struct mmc_ios *ios)
{
u32 rate, oclk_dly, rval, sclk_dly;
+ u32 clock = ios->clock;
int ret;
- rate = clk_round_rate(host->clk_mmc, ios->clock);
+ /* 8 bit DDR requires a higher module clock */
+ if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+ ios->bus_width == MMC_BUS_WIDTH_8)
+ clock <<= 1;
+
+ rate = clk_round_rate(host->clk_mmc, clock);
dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
- ios->clock, rate);
+ clock, rate);
/* setting clock rate */
ret = clk_set_rate(host->clk_mmc, rate);
@@ -661,6 +684,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* clear internal divider */
rval = mmc_readl(host, REG_CLKCR);
rval &= ~0xff;
+ /* set internal divider for 8 bit eMMC DDR, so card clock is right */
+ if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+ ios->bus_width == MMC_BUS_WIDTH_8) {
+ rval |= 1;
+ rate >>= 1;
+ }
mmc_writel(host, REG_CLKCR, rval);
/* determine delays */
@@ -670,13 +699,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
} else if (rate <= 25000000) {
oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
- } else if (rate <= 50000000) {
- if (ios->timing == MMC_TIMING_UHS_DDR50) {
- oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
- sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
- } else {
+ } else if (rate <= 52000000) {
+ if (ios->timing != MMC_TIMING_UHS_DDR50 &&
+ ios->timing != MMC_TIMING_MMC_DDR52) {
oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
+ } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].sample;
+ } else {
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
}
} else {
return -EINVAL;
@@ -699,7 +732,20 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
case MMC_POWER_UP:
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->vdd);
+ if (host->ferror)
+ return;
+
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ host->ferror = regulator_enable(mmc->supply.vqmmc);
+ if (host->ferror) {
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc\n");
+ return;
+ }
+ host->vqmmc_enabled = true;
+ }
host->ferror = sunxi_mmc_init_host(mmc);
if (host->ferror)
@@ -712,6 +758,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(mmc_dev(mmc), "power off!\n");
sunxi_mmc_reset_host(host);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
break;
}
@@ -730,7 +779,8 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* set ddr mode */
rval = mmc_readl(host, REG_GCTRL);
- if (ios->timing == MMC_TIMING_UHS_DDR50)
+ if (ios->timing == MMC_TIMING_UHS_DDR50 ||
+ ios->timing == MMC_TIMING_MMC_DDR52)
rval |= SDXC_DDR_MODE;
else
rval &= ~SDXC_DDR_MODE;
@@ -743,6 +793,19 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ /* vqmmc regulator is available */
+ if (!IS_ERR(mmc->supply.vqmmc))
+ return mmc_regulator_set_vqmmc(mmc, ios);
+
+ /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
+ if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ return 0;
+
+ return -EINVAL;
+}
+
static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sunxi_mmc_host *host = mmc_priv(mmc);
@@ -815,11 +878,6 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER;
- if (cmd->data->flags & MMC_DATA_STREAM) {
- imask |= SDXC_AUTO_COMMAND_DONE;
- cmd_val |= SDXC_SEQUENCE_MODE |
- SDXC_SEND_AUTO_STOP;
- }
if (cmd->data->stop) {
imask |= SDXC_AUTO_COMMAND_DONE;
@@ -894,6 +952,7 @@ static struct mmc_host_ops sunxi_mmc_ops = {
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
+ .start_signal_voltage_switch = sunxi_mmc_volt_switch,
.hw_reset = sunxi_mmc_hw_reset,
.card_busy = sunxi_mmc_card_busy,
};
@@ -903,6 +962,8 @@ static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 90, .sample = 120 },
[SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
+ /* Value from A83T "new timing mode". Works but might not be right. */
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 180 },
};
static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
@@ -910,6 +971,7 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 150, .sample = 120 },
[SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1060,10 +1122,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des);
mmc->max_seg_size = (1 << host->idma_des_size_bits);
mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
- /* 400kHz ~ 50MHz */
+ /* 400kHz ~ 52MHz */
mmc->f_min = 400000;
- mmc->f_max = 50000000;
+ mmc->f_max = 52000000;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+ MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 4a0d6b80eaa3..7fb0c034dcb6 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@@ -94,10 +94,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
}
-
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -115,9 +112,6 @@ pio:
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p, sg[%d]\n", __func__,
- desc, host->sg_len);
}
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
@@ -139,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
}
- if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+ if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@@ -174,10 +168,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
desc = NULL;
ret = cookie;
}
- dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
- __func__, host->sg_len, ret, cookie, host->mrq);
}
-
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -195,8 +186,6 @@ pio:
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
}
-
- dev_dbg(&host->pdev->dev, "%s(): desc %p\n", __func__, desc);
}
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index a10fde40b6c3..0521b4662748 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/tmio_mmc_pio.c
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2011 Guennadi Liakhovetski
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
@@ -159,42 +161,44 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
if (new_clock) {
for (clock = host->mmc->f_min, clk = 0x80000080;
- new_clock >= (clock<<1); clk >>= 1)
+ new_clock >= (clock << 1);
+ clk >>= 1)
clock <<= 1;
/* 1/1 clock is option */
if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
- ((clk >> 22) & 0x1))
+ ((clk >> 22) & 0x1))
clk |= 0xff;
}
if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk>>22) & 1);
+ host->set_clk_div(host->pdev, (clk >> 22) & 1);
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
- msleep(10);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+ msleep(10);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
}
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
+ msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
}
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(10);
+ msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
@@ -205,7 +209,6 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
- /* implicit BUG_ON(!res) */
if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
@@ -1122,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+ mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index b47122d3e8d8..807c06e203c3 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1630,7 +1630,7 @@ static irqreturn_t usdhi6_cd(int irq, void *dev_id)
*/
static void usdhi6_timeout_work(struct work_struct *work)
{
- struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct delayed_work *d = to_delayed_work(work);
struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = mrq ? mrq->data : NULL;
@@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
- mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
+ mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
/*
* Setting .max_seg_size to 1 page would simplify our page-mapping code,
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 42cc953309f1..e83a279f1217 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -142,7 +142,7 @@ config MTD_AR7_PARTS
config MTD_BCM63XX_PARTS
tristate "BCM63XX CFE partitioning support"
- depends on BCM63XX
+ depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
help
This provides partions parsing for BCM63xx devices with CFE
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 8282f47bcf5d..845dd27d9f41 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
{
uint32_t buf;
size_t bytes_read;
+ int err;
- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
goto out_default;
}
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
int trx_part = -1;
int last_trx_part = -1;
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
- /* Nothing more in higher memory */
- if (offset >= 0x2000000)
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read middle of the block */
- if (mtd_read(master, offset + 0x8000, 0x4,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
offset = master->size - possible_nvram_sizes[i];
- if (mtd_read(master, offset, 0x4, &bytes_read,
- (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while reading at offset 0x%X!\n",
- offset);
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
continue;
}
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index cec3188a170d..41d1d3149c61 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -24,6 +24,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bcm963xx_nvram.h>
#include <linux/bcm963xx_tag.h>
#include <linux/crc32.h>
#include <linux/module.h>
@@ -34,12 +35,15 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm63xx/bcm63xx_nvram.h>
-#include <asm/mach-bcm63xx/board_bcm963xx.h>
+#define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
-#define BCM63XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */
+#define BCM963XX_CFE_MAGIC_OFFSET 0x4e0
+#define BCM963XX_CFE_VERSION_OFFSET 0x570
+#define BCM963XX_NVRAM_OFFSET 0x580
-#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+/* Ensure strings read from flash structs are null terminated */
+#define STR_NULL_TERMINATE(x) \
+ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0)
static int bcm63xx_detect_cfe(struct mtd_info *master)
{
@@ -58,68 +62,130 @@ static int bcm63xx_detect_cfe(struct mtd_info *master)
return 0;
/* very old CFE's do not have the cfe-v string, so check for magic */
- ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen,
(void *)buf);
buf[retlen] = 0;
return strncmp("CFE1CFE1", buf, 8);
}
-static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
- const struct mtd_partition **pparts,
- struct mtd_part_parser_data *data)
+static int bcm63xx_read_nvram(struct mtd_info *master,
+ struct bcm963xx_nvram *nvram)
+{
+ u32 actual_crc, expected_crc;
+ size_t retlen;
+ int ret;
+
+ /* extract nvram data */
+ ret = mtd_read(master, BCM963XX_NVRAM_OFFSET, BCM963XX_NVRAM_V5_SIZE,
+ &retlen, (void *)nvram);
+ if (ret)
+ return ret;
+
+ ret = bcm963xx_nvram_checksum(nvram, &expected_crc, &actual_crc);
+ if (ret)
+ pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
+ expected_crc, actual_crc);
+
+ if (!nvram->psi_size)
+ nvram->psi_size = BCM963XX_DEFAULT_PSI_SIZE;
+
+ return 0;
+}
+
+static int bcm63xx_read_image_tag(struct mtd_info *master, const char *name,
+ loff_t tag_offset, struct bcm_tag *buf)
+{
+ int ret;
+ size_t retlen;
+ u32 computed_crc;
+
+ ret = mtd_read(master, tag_offset, sizeof(*buf), &retlen, (void *)buf);
+ if (ret)
+ return ret;
+
+ if (retlen != sizeof(*buf))
+ return -EIO;
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ STR_NULL_TERMINATE(buf->board_id);
+ STR_NULL_TERMINATE(buf->tag_version);
+
+ pr_info("%s: CFE image tag found at 0x%llx with version %s, board type %s\n",
+ name, tag_offset, buf->tag_version, buf->board_id);
+
+ return 0;
+ }
+
+ pr_warn("%s: CFE image tag at 0x%llx CRC invalid (expected %08x, actual %08x)\n",
+ name, tag_offset, buf->header_crc, computed_crc);
+ return 1;
+}
+
+static int bcm63xx_parse_cfe_nor_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts, struct bcm963xx_nvram *nvram)
{
/* CFE, NVRAM and global Linux are always present */
int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
+ struct bcm_tag *buf = NULL;
struct mtd_partition *parts;
int ret;
- size_t retlen;
unsigned int rootfsaddr, kerneladdr, spareaddr;
unsigned int rootfslen, kernellen, sparelen, totallen;
unsigned int cfelen, nvramlen;
unsigned int cfe_erasesize;
int i;
- u32 computed_crc;
bool rootfs_first = false;
- if (bcm63xx_detect_cfe(master))
- return -EINVAL;
-
cfe_erasesize = max_t(uint32_t, master->erasesize,
- BCM63XX_CFE_BLOCK_SIZE);
+ BCM963XX_CFE_BLOCK_SIZE);
cfelen = cfe_erasesize;
- nvramlen = bcm63xx_nvram_get_psi_size() * SZ_1K;
+ nvramlen = nvram->psi_size * SZ_1K;
nvramlen = roundup(nvramlen, cfe_erasesize);
- /* Allocate memory for buffer */
buf = vmalloc(sizeof(struct bcm_tag));
if (!buf)
return -ENOMEM;
/* Get the tag */
- ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
- (void *)buf);
-
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
+ ret = bcm63xx_read_image_tag(master, "rootfs", cfelen, buf);
+ if (!ret) {
+ STR_NULL_TERMINATE(buf->flash_image_start);
+ if (kstrtouint(buf->flash_image_start, 10, &rootfsaddr) ||
+ rootfsaddr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
+ goto invalid_tag;
+ }
- computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
- offsetof(struct bcm_tag, header_crc));
- if (computed_crc == buf->header_crc) {
- char *boardid = &(buf->board_id[0]);
- char *tagversion = &(buf->tag_version[0]);
+ STR_NULL_TERMINATE(buf->kernel_address);
+ if (kstrtouint(buf->kernel_address, 10, &kerneladdr) ||
+ kerneladdr < BCM963XX_EXTENDED_SIZE) {
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
+ goto invalid_tag;
+ }
- sscanf(buf->flash_image_start, "%u", &rootfsaddr);
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
+ STR_NULL_TERMINATE(buf->kernel_length);
+ if (kstrtouint(buf->kernel_length, 10, &kernellen)) {
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
+ goto invalid_tag;
+ }
- pr_info("CFE boot tag found with version %s and board type %s\n",
- tagversion, boardid);
+ STR_NULL_TERMINATE(buf->total_length);
+ if (kstrtouint(buf->total_length, 10, &totallen)) {
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
+ goto invalid_tag;
+ }
kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
@@ -134,13 +200,14 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
rootfsaddr = kerneladdr + kernellen;
rootfslen = spareaddr - rootfsaddr;
}
- } else {
- pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
- buf->header_crc, computed_crc);
+ } else if (ret > 0) {
+invalid_tag:
kernellen = 0;
rootfslen = 0;
rootfsaddr = 0;
spareaddr = cfelen;
+ } else {
+ goto out;
}
sparelen = master->size - spareaddr - nvramlen;
@@ -151,11 +218,10 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
if (kernellen > 0)
nrparts++;
- /* Ask kernel for more memory */
parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
if (!parts) {
- vfree(buf);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
/* Start building partition list */
@@ -206,9 +272,43 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
sparelen);
*pparts = parts;
+ ret = 0;
+
+out:
vfree(buf);
+ if (ret)
+ return ret;
+
return nrparts;
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct bcm963xx_nvram *nvram = NULL;
+ int ret;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ nvram = vzalloc(sizeof(*nvram));
+ if (!nvram)
+ return -ENOMEM;
+
+ ret = bcm63xx_read_nvram(master, nvram);
+ if (ret)
+ goto out;
+
+ if (!mtd_type_is_nand(master))
+ ret = bcm63xx_parse_cfe_nor_partitions(master, pparts, nvram);
+ else
+ ret = -EINVAL;
+
+out:
+ vfree(nvram);
+ return ret;
};
static struct mtd_part_parser bcm63xx_cfe_parser = {
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index e2c0057737e6..7c887f111a7d 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
break;
}
- page_cache_release(page);
+ put_page(page);
pages--;
index++;
}
@@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, cpylen);
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
@@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
}
- page_cache_release(page);
+ put_page(page);
if (retlen)
*retlen += cpylen;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index c3a2695a4420..e7b2e439696c 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -72,13 +72,11 @@ MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
* @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
* @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
* @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
- * @oobavail: 8 available bytes remaining after ECC toll
*/
static struct nand_ecclayout docg3_oobinfo = {
.eccbytes = 8,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
.oobfree = {{0, 7}, {15, 1} },
- .oobavail = 8,
};
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
@@ -1438,7 +1436,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
oobdelta = mtd->oobsize;
break;
case MTD_OPS_AUTO_OOB:
- oobdelta = mtd->ecclayout->oobavail;
+ oobdelta = mtd->oobavail;
break;
default:
return -EINVAL;
@@ -1860,6 +1858,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
mtd->ecclayout = &docg3_oobinfo;
+ mtd->oobavail = 8;
mtd->ecc_strength = DOC_ECC_BCH_T;
return 0;
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 627a9bc37679..cbd8547d7aad 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -19,6 +19,7 @@
static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
+static unsigned long writebuf_size = 64;
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
@@ -27,6 +28,8 @@ module_param(total_size, ulong, 0);
MODULE_PARM_DESC(total_size, "Total device size in KiB");
module_param(erase_size, ulong, 0);
MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
+module_param(writebuf_size, ulong, 0);
+MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)");
#endif
// We could store these in the mtd structure, but we only support 1 device..
@@ -123,7 +126,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
mtd->flags = MTD_CAP_RAM;
mtd->size = size;
mtd->writesize = 1;
- mtd->writebufsize = 64; /* Mimic CFI NOR flashes */
+ mtd->writebufsize = writebuf_size;
mtd->erasesize = MTDRAM_ERASE_SIZE;
mtd->priv = mapped_address;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 10bf304027dd..08de4b2cf0f5 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -126,10 +126,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
if (ops->oobbuf) {
size_t len, pages;
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = mtd->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
pages = mtd_div_by_ws(mtd->size, mtd);
pages -= mtd_div_by_ws(from, mtd);
if (ops->ooboffs + ops->ooblen > pages * len)
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fc8b3d16cce7..cb06bdd21a1b 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
- ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
+ ops.ooblen = 2 * d->mtd->oobavail;
ops.oobbuf = d->oob_buf;
ops.ooboffs = 0;
ops.datbuf = NULL;
@@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
data = (struct mtdswap_oobdata *)d->oob_buf;
data2 = (struct mtdswap_oobdata *)
- (d->oob_buf + d->mtd->ecclayout->oobavail);
+ (d->oob_buf + d->mtd->oobavail);
if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
eb->erase_count = le32_to_cpu(data->count);
@@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = mtd->writesize;
- ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooblen = mtd->oobavail;
ops.ooboffs = 0;
ops.datbuf = d->page_buf;
ops.oobbuf = d->oob_buf;
@@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
for (i = 0; i < mtd_pages; i++) {
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
- memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
+ memset(d->oob_buf, patt, mtd->oobavail);
ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
if (p1[j] != patt)
goto error;
- for (j = 0; j < mtd->ecclayout->oobavail; j++)
+ for (j = 0; j < mtd->oobavail; j++)
if (p2[j] != (unsigned char)patt)
goto error;
@@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks,
if (!d->page_buf)
goto page_buf_fail;
- d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
+ d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
if (!d->oob_buf)
goto oob_buf_fail;
@@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
unsigned long part;
unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
uint64_t swap_size, use_size, size_limit;
- struct nand_ecclayout *oinfo;
int ret;
parts = &partitions[0];
@@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
- oinfo = mtd->ecclayout;
- if (!oinfo) {
- printk(KERN_ERR "%s: mtd%d does not have OOB\n",
- MTDSWAP_PREFIX, mtd->index);
- return;
- }
-
- if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
+ if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
"%d available, %zu needed.\n",
- MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
+ MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
return;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 20f01b3ec23d..f05e0e9eb2f7 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
config MTD_NAND_GPIO
tristate "GPIO assisted NAND Flash driver"
depends on GPIOLIB || COMPILE_TEST
+ depends on HAS_IOMEM
help
This enables a NAND flash driver where control signals are
connected to GPIO pins, and commands and data are communicated
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
config MTD_NAND_CS553X
tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
depends on X86_32
+ depends on !UML && HAS_IOMEM
help
The CS553x companion chips for the AMD Geode processor
include NAND flash controllers with built-in hardware ECC
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
config MTD_NAND_VF610_NFC
tristate "Support for Freescale NFC for VF610/MPC5125"
depends on (SOC_VF610 || COMPILE_TEST)
+ depends on HAS_IOMEM
help
Enables support for NAND Flash Controller on some Freescale
processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
@@ -553,4 +556,11 @@ config MTD_NAND_HISI504
help
Enables support for NAND controller on Hisilicon SoC Hip04.
+config MTD_NAND_QCOM
+ tristate "Support for NAND on QCOM SoCs"
+ depends on ARCH_QCOM
+ help
+ Enables support for NAND flash chips on SoCs containing the EBI2 NAND
+ controller. This controller is found on IPQ806x SoC.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 9e3623308509..f55335373f7c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -56,5 +56,6 @@ obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
+obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index bddcf83d6859..20cbaabb2959 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -65,6 +65,11 @@ module_param(on_flash_bbt, int, 0);
struct atmel_nand_caps {
bool pmecc_correct_erase_page;
+ uint8_t pmecc_max_correction;
+};
+
+struct atmel_nand_nfc_caps {
+ uint32_t rb_mask;
};
/* oob layout for large page size
@@ -111,6 +116,7 @@ struct atmel_nfc {
/* Point to the sram bank which include readed data via NFC */
void *data_in_sram;
bool will_write_sram;
+ const struct atmel_nand_nfc_caps *caps;
};
static struct atmel_nfc nand_nfc;
@@ -140,6 +146,7 @@ struct atmel_nand_host {
int pmecc_cw_len; /* Length of codeword */
void __iomem *pmerrloc_base;
+ void __iomem *pmerrloc_el_base;
void __iomem *pmecc_rom_base;
/* lookup table for alpha_to and index_of */
@@ -468,6 +475,7 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
* 8-bits 13-bytes 14-bytes
* 12-bits 20-bytes 21-bytes
* 24-bits 39-bytes 42-bytes
+ * 32-bits 52-bytes 56-bytes
*/
static int pmecc_get_ecc_bytes(int cap, int sector_size)
{
@@ -813,7 +821,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
sector_size = host->pmecc_sector_size;
while (err_nbr) {
- tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_base, i) - 1;
+ tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
byte_pos = tmp / 8;
bit_pos = tmp % 8;
@@ -825,7 +833,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
*(buf + byte_pos) ^= (1 << bit_pos);
pos = sector_num * host->pmecc_sector_size + byte_pos;
- dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, *(buf + byte_pos));
} else {
/* Bit flip in OOB area */
@@ -835,7 +843,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
ecc[tmp] ^= (1 << bit_pos);
pos = tmp + nand_chip->ecc.layout->eccpos[0];
- dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, ecc[tmp]);
}
@@ -1017,6 +1025,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
case 24:
val = PMECC_CFG_BCH_ERR24;
break;
+ case 32:
+ val = PMECC_CFG_BCH_ERR32;
+ break;
}
if (host->pmecc_sector_size == 512)
@@ -1078,6 +1089,9 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
/* If device tree doesn't specify, use NAND's minimum ECC parameters */
if (host->pmecc_corr_cap == 0) {
+ if (*cap > host->caps->pmecc_max_correction)
+ return -EINVAL;
+
/* use the most fitable ecc bits (the near bigger one ) */
if (*cap <= 2)
host->pmecc_corr_cap = 2;
@@ -1089,6 +1103,8 @@ static int pmecc_choose_ecc(struct atmel_nand_host *host,
host->pmecc_corr_cap = 12;
else if (*cap <= 24)
host->pmecc_corr_cap = 24;
+ else if (*cap <= 32)
+ host->pmecc_corr_cap = 32;
else
return -EINVAL;
}
@@ -1205,6 +1221,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
err_no = PTR_ERR(host->pmerrloc_base);
goto err;
}
+ host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
+ (host->caps->pmecc_max_correction + 1) * 4;
if (!host->has_no_lookup_table) {
regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
@@ -1486,8 +1504,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
-static const struct of_device_id atmel_nand_dt_ids[];
-
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
@@ -1498,7 +1514,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
enum of_gpio_flags flags = 0;
host->caps = (struct atmel_nand_caps *)
- of_match_device(atmel_nand_dt_ids, host->dev)->data;
+ of_device_get_match_data(host->dev);
if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
if (val >= 32) {
@@ -1547,10 +1563,16 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
* them from NAND ONFI parameters.
*/
if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
- if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
- (val != 24)) {
+ if (val > host->caps->pmecc_max_correction) {
dev_err(host->dev,
- "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ "Required ECC strength too high: %u max %u\n",
+ val, host->caps->pmecc_max_correction);
+ return -EINVAL;
+ }
+ if ((val != 2) && (val != 4) && (val != 8) &&
+ (val != 12) && (val != 24) && (val != 32)) {
+ dev_err(host->dev,
+ "Required ECC strength not supported: %u\n",
val);
return -EINVAL;
}
@@ -1560,7 +1582,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
if ((val != 512) && (val != 1024)) {
dev_err(host->dev,
- "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ "Required ECC sector size not supported: %u\n",
val);
return -EINVAL;
}
@@ -1677,9 +1699,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
ret = IRQ_HANDLED;
}
- if (pending & NFC_SR_RB_EDGE) {
+ if (pending & host->nfc->caps->rb_mask) {
complete(&host->nfc->comp_ready);
- nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+ nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
@@ -1697,7 +1719,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
- if (flag & NFC_SR_RB_EDGE)
+ if (flag & host->nfc->caps->rb_mask)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
@@ -1715,7 +1737,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
- if (flag & NFC_SR_RB_EDGE)
+ if (flag & host->nfc->caps->rb_mask)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
@@ -1783,7 +1805,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
- return status & NFC_SR_RB_EDGE;
+ return status & host->nfc->caps->rb_mask;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1956,8 +1978,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
- nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
- nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
+ nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
}
}
@@ -2304,17 +2326,34 @@ static int atmel_nand_remove(struct platform_device *pdev)
return 0;
}
+/*
+ * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
+ * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
+ * devices from the SAM9 family that have those.
+ */
static const struct atmel_nand_caps at91rm9200_caps = {
.pmecc_correct_erase_page = false,
+ .pmecc_max_correction = 24,
};
static const struct atmel_nand_caps sama5d4_caps = {
.pmecc_correct_erase_page = true,
+ .pmecc_max_correction = 24,
+};
+
+/*
+ * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
+ * as the increased correction strength requires more registers.
+ */
+static const struct atmel_nand_caps sama5d2_caps = {
+ .pmecc_correct_erase_page = true,
+ .pmecc_max_correction = 32,
};
static const struct of_device_id atmel_nand_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
{ .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
+ { .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
{ /* sentinel */ }
};
@@ -2354,6 +2393,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
+ nfc->caps = (const struct atmel_nand_nfc_caps *)
+ of_device_get_match_data(&pdev->dev);
+ if (!nfc->caps)
+ return -ENODEV;
+
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
@@ -2382,8 +2426,17 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
return 0;
}
+static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
+ .rb_mask = NFC_SR_RB_EDGE0,
+};
+
+static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
+ .rb_mask = NFC_SR_RB_EDGE3,
+};
+
static const struct of_device_id atmel_nand_nfc_match[] = {
- { .compatible = "atmel,sama5d3-nfc" },
+ { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
+ { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index 668e7358f19b..834d694487bd 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -43,6 +43,7 @@
#define PMECC_CFG_BCH_ERR8 (2 << 0)
#define PMECC_CFG_BCH_ERR12 (3 << 0)
#define PMECC_CFG_BCH_ERR24 (4 << 0)
+#define PMECC_CFG_BCH_ERR32 (5 << 0)
#define PMECC_CFG_SECTOR512 (0 << 4)
#define PMECC_CFG_SECTOR1024 (1 << 4)
@@ -108,7 +109,11 @@
#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
#define PMERRLOC_CALC_DONE (1 << 0)
#define ATMEL_PMERRLOC_SIGMAx 0x028 /* Error location SIGMA x */
-#define ATMEL_PMERRLOC_ELx 0x08c /* Error location x */
+
+/*
+ * The ATMEL_PMERRLOC_ELx register location depends from the number of
+ * bits corrected by the PMECC controller. Do not use it.
+ */
/* Register access macros for PMECC */
#define pmecc_readl_relaxed(addr, reg) \
@@ -136,7 +141,7 @@
readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
#define pmerrloc_readl_el_relaxed(addr, n) \
- readl_relaxed((addr) + ATMEL_PMERRLOC_ELx + ((n) * 4))
+ readl_relaxed((addr) + ((n) * 4))
/* Galois field dimension */
#define PMECC_GF_DIMENSION_13 13
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 4d5d26221a7e..0bbc1fa97dba 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,7 +42,8 @@
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
-#define NFC_SR_RB_EDGE (1 << 24)
+#define NFC_SR_RB_EDGE0 (1 << 24)
+#define NFC_SR_RB_EDGE3 (1 << 27)
#define ATMEL_HSMC_NFC_IER 0x0c
#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 844fc07d22cd..e0528397306a 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -311,6 +311,36 @@ static const u16 brcmnand_regs_v60[] = {
[BRCMNAND_FC_BASE] = 0x400,
};
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
enum brcmnand_cs_reg {
BRCMNAND_CS_CFG_EXT = 0,
BRCMNAND_CS_CFG,
@@ -406,7 +436,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
}
/* Register offsets */
- if (ctrl->nand_version >= 0x0600)
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
ctrl->reg_offsets = brcmnand_regs_v60;
else if (ctrl->nand_version >= 0x0500)
ctrl->reg_offsets = brcmnand_regs_v50;
@@ -796,7 +828,8 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
break;
}
- goto out;
+
+ return layout;
}
/*
@@ -847,10 +880,7 @@ static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
break;
}
-out:
- /* Sum available OOB */
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
- layout->oobavail += layout->oobfree[i].length;
+
return layout;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index aa1a616b9fb6..e553aff68987 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -537,7 +537,7 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
return 0;
}
-static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
{
return 0;
}
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index f170f3c31b34..547c1002941d 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -794,7 +794,7 @@ static int doc200x_dev_ready(struct mtd_info *mtd)
}
}
-static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
{
/* This is our last resort if we couldn't find or create a BBT. Just
pretend all blocks are good. */
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index df4165b02c62..d86a60e1bbcb 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -225,7 +225,6 @@ struct docg4_priv {
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobavail = 5,
.oobfree = { {.offset = 2, .length = 5} }
};
@@ -1121,7 +1120,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
-static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
{
/* only called when module_param ignore_badblocks is set */
return 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 235ddcb58f39..8122c699ccf2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1,7 +1,7 @@
/*
* Freescale GPMI NAND Flash Driver
*
- * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
* Copyright (C) 2008 Embedded Alley Solutions, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
*
* We may have available oob space in this case.
*/
-static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
@@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
unsigned int block_mark_bit_offset;
if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
- return false;
+ return -EINVAL;
switch (chip->ecc_step_ds) {
case SZ_512:
@@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
dev_err(this->dev,
"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
chip->ecc_strength_ds, chip->ecc_step_ds);
- return false;
+ return -EINVAL;
}
geo->ecc_chunk_size = chip->ecc_step_ds;
geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
if (!gpmi_check_ecc(this))
- return false;
+ return -EINVAL;
/* Keep the C >= O */
if (geo->ecc_chunk_size < mtd->oobsize) {
dev_err(this->dev,
"unsupported nand chip. ecc size: %d, oob size : %d\n",
chip->ecc_step_ds, mtd->oobsize);
- return false;
+ return -EINVAL;
}
/* The default value, see comment in the legacy_set_geometry(). */
@@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+ ALIGN(geo->ecc_chunk_count, 4);
if (!this->swap_block_mark)
- return true;
+ return 0;
/* For bit swap. */
block_mark_bit_offset = mtd->writesize * 8 -
@@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
geo->block_mark_byte_offset = block_mark_bit_offset / 8;
geo->block_mark_bit_offset = block_mark_bit_offset % 8;
- return true;
+ return 0;
}
static int legacy_set_geometry(struct gpmi_nand_data *this)
@@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
geo->ecc_strength = get_ecc_strength(this);
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
- "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+ "ecc strength: %d cannot be supported by the controller (%d)\n"
+ "try to use minimum ecc strength that NAND chip required\n",
geo->ecc_strength,
this->devdata->bch_max_ecc_strength);
return -EINVAL;
@@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
- if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
- && set_geometry_by_ecc_info(this))
- return 0;
- return legacy_set_geometry(this);
+ if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+ || legacy_set_geometry(this))
+ return set_geometry_by_ecc_info(this);
+
+ return 0;
}
struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
@@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+ release_dma_channels(this);
+ return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+ struct gpmi_nand_data *this = dev_get_drvdata(dev);
+ int ret;
+
+ ret = acquire_dma_channels(this);
+ if (ret < 0)
+ return ret;
+
+ /* re-init the GPMI registers */
+ this->flags &= ~GPMI_TIMING_INIT_OK;
+ ret = gpmi_init(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+ return ret;
+ }
+
+ /* re-init the BCH registers */
+ ret = bch_set_geometry(this);
+ if (ret) {
+ dev_err(this->dev, "Error setting BCH : %d\n", ret);
+ return ret;
+ }
+
+ /* re-init others */
+ gpmi_extra_init(this);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+};
+
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
+ .pm = &gpmi_pm_ops,
.of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index f8d37f36a81c..96502b624cfb 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -632,7 +632,6 @@ static void hisi_nfc_host_init(struct hinfc_host *host)
}
static struct nand_ecclayout nand_ecc_2K_16bits = {
- .oobavail = 6,
.oobfree = { {2, 6} },
};
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index b19d2a9a5eb9..673ceb2a0b44 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->ecc.strength = 4;
chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
- if (pdata)
- chip->ecc.layout = pdata->ecc_layout;
-
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
chip->select_chip = jz_nand_select_chip;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 9bc435d72a86..d8c3e7afcc0b 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
nand_chip->ecc.mode = NAND_ECC_HW;
- nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.size = 512;
nand_chip->ecc.layout = &lpc32xx_nand_oob;
host->mlcsubpages = mtd->writesize / 512;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 6b93e899d4e9..5d7843ffff6a 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
static int mpc5121_nfc_probe(struct platform_device *op)
{
- struct device_node *rootnode, *dn = op->dev.of_node;
+ struct device_node *dn = op->dev.of_node;
struct clk *clk;
struct device *dev = &op->dev;
struct mpc5121_nfc_prv *prv;
@@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->ecc.mode = NAND_ECC_SOFT;
/* Support external chip-select logic on ADS5121 board */
- rootnode = of_find_node_by_path("/");
- if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+ if (of_machine_is_compatible("fsl,mpc5121ads")) {
retval = ads5121_chipselect_init(mtd);
if (retval) {
dev_err(dev, "Chipselect init error!\n");
- of_node_put(rootnode);
return retval;
}
chip->select_chip = ads5121_select_chip;
}
- of_node_put(rootnode);
/* Enable NFC clock */
clk = devm_clk_get(dev, "ipg");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index f2c8ff398d6c..557b8462f55e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -313,13 +313,12 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
* nand_block_bad - [DEFAULT] Read bad block marker from the chip
* @mtd: MTD device structure
* @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
*
* Check, if the block is bad.
*/
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
{
- int page, chipnr, res = 0, i = 0;
+ int page, res = 0, i = 0;
struct nand_chip *chip = mtd_to_nand(mtd);
u16 bad;
@@ -328,15 +327,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
page = (int)(ofs >> chip->page_shift) & chip->pagemask;
- if (getchip) {
- chipnr = (int)(ofs >> chip->chip_shift);
-
- nand_get_device(mtd, FL_READING);
-
- /* Select the NAND device */
- chip->select_chip(mtd, chipnr);
- }
-
do {
if (chip->options & NAND_BUSWIDTH_16) {
chip->cmdfunc(mtd, NAND_CMD_READOOB,
@@ -361,11 +351,6 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
i++;
} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
- if (getchip) {
- chip->select_chip(mtd, -1);
- nand_release_device(mtd);
- }
-
return res;
}
@@ -503,19 +488,17 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
- int allowbbt)
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
{
struct nand_chip *chip = mtd_to_nand(mtd);
if (!chip->bbt)
- return chip->block_bad(mtd, ofs, getchip);
+ return chip->block_bad(mtd, ofs);
/* Return info from the table */
return nand_isbad_bbt(mtd, ofs, allowbbt);
@@ -566,8 +549,8 @@ void nand_wait_ready(struct mtd_info *mtd)
cond_resched();
} while (time_before(jiffies, timeo));
- pr_warn_ratelimited(
- "timeout while waiting for chip to become ready\n");
+ if (!chip->dev_ready(mtd))
+ pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
out:
led_trigger_event(nand_led_trigger, LED_OFF);
}
@@ -1723,8 +1706,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
- uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
- mtd->oobavail : mtd->oobsize;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
uint8_t *bufpoi, *oob, *buf;
int use_bufpoi;
@@ -2075,10 +2057,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
if (unlikely(ops->ooboffs >= len)) {
pr_debug("%s: attempt to start read outside oob\n",
@@ -2575,8 +2554,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
- uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
- mtd->oobavail : mtd->oobsize;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
@@ -2766,10 +2744,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
pr_debug("%s: to = 0x%08x, len = %i\n",
__func__, (unsigned int)to, (int)ops->ooblen);
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ len = mtd_oobavail(mtd, ops);
/* Do not allow write past end of page */
if ((ops->ooboffs + ops->ooblen) > len) {
@@ -2957,7 +2932,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
- chip->page_shift, 0, allowbbt)) {
+ chip->page_shift, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3044,7 +3019,20 @@ static void nand_sync(struct mtd_info *mtd)
*/
static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
{
- return nand_block_checkbad(mtd, offs, 1, 0);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ nand_get_device(mtd, FL_READING);
+ chip->select_chip(mtd, chipnr);
+
+ ret = nand_block_checkbad(mtd, offs, 0);
+
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
}
/**
@@ -4021,7 +4009,6 @@ static int nand_dt_init(struct nand_chip *chip)
* This is the first phase of the normal nand_scan() function. It reads the
* flash ID and sets up MTD fields accordingly.
*
- * The mtd->owner field must be set to the module of the caller.
*/
int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_flash_dev *table)
@@ -4287,10 +4274,8 @@ int nand_scan_tail(struct mtd_info *mtd)
}
/* See nand_bch_init() for details. */
- ecc->bytes = DIV_ROUND_UP(
- ecc->strength * fls(8 * ecc->size), 8);
- ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
- &ecc->layout);
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
if (!ecc->priv) {
pr_warn("BCH ECC initialization failed!\n");
BUG();
@@ -4325,11 +4310,11 @@ int nand_scan_tail(struct mtd_info *mtd)
* The number of bytes available for a client to place data into
* the out of band area.
*/
- ecc->layout->oobavail = 0;
- for (i = 0; ecc->layout->oobfree[i].length
- && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
- ecc->layout->oobavail += ecc->layout->oobfree[i].length;
- mtd->oobavail = ecc->layout->oobavail;
+ mtd->oobavail = 0;
+ if (ecc->layout) {
+ for (i = 0; ecc->layout->oobfree[i].length; i++)
+ mtd->oobavail += ecc->layout->oobfree[i].length;
+ }
/* ECC sanity check: warn if it's too weak */
if (!nand_ecc_strength_good(mtd))
@@ -4443,19 +4428,12 @@ EXPORT_SYMBOL(nand_scan_tail);
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
int nand_scan(struct mtd_info *mtd, int maxchips)
{
int ret;
- /* Many callers got this wrong, so check for it for a while... */
- if (!mtd->owner && caller_is_module()) {
- pr_crit("%s called with NULL mtd->owner!\n", __func__);
- BUG();
- }
-
ret = nand_scan_ident(mtd, maxchips, NULL);
if (!ret)
ret = nand_scan_tail(mtd);
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4b6a7085b442..2fbb523df066 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
return ret;
}
-
-EXPORT_SYMBOL(nand_scan_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index a87c1b628dfc..b585bae37929 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -107,9 +107,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
/**
* nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
* @mtd: MTD block structure
- * @eccsize: ecc block size in bytes
- * @eccbytes: ecc length in bytes
- * @ecclayout: output default layout
*
* Returns:
* a pointer to a new NAND BCH control structure, or NULL upon failure
@@ -123,14 +120,21 @@ EXPORT_SYMBOL(nand_bch_correct_data);
* @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
* @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
*/
-struct nand_bch_control *
-nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
- struct nand_ecclayout **ecclayout)
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
+ struct nand_chip *nand = mtd_to_nand(mtd);
unsigned int m, t, eccsteps, i;
- struct nand_ecclayout *layout;
+ struct nand_ecclayout *layout = nand->ecc.layout;
struct nand_bch_control *nbc = NULL;
unsigned char *erased_page;
+ unsigned int eccsize = nand->ecc.size;
+ unsigned int eccbytes = nand->ecc.bytes;
+ unsigned int eccstrength = nand->ecc.strength;
+
+ if (!eccbytes && eccstrength) {
+ eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
+ nand->ecc.bytes = eccbytes;
+ }
if (!eccsize || !eccbytes) {
printk(KERN_WARNING "ecc parameters not supplied\n");
@@ -158,7 +162,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
eccsteps = mtd->writesize/eccsize;
/* if no ecc placement scheme was provided, build one */
- if (!*ecclayout) {
+ if (!layout) {
/* handle large page devices only */
if (mtd->oobsize < 64) {
@@ -184,7 +188,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
- *ecclayout = layout;
+ nand->ecc.layout = layout;
}
/* sanity checks */
@@ -192,7 +196,7 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
goto fail;
}
- if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
+ if (layout->eccbytes != (eccsteps*eccbytes)) {
printk(KERN_WARNING "invalid ecc layout\n");
goto fail;
}
@@ -216,6 +220,9 @@ nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
for (i = 0; i < eccbytes; i++)
nbc->eccmask[i] ^= 0xff;
+ if (!eccstrength)
+ nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
+
return nbc;
fail:
nand_bch_free(nbc);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a8804a3da076..ccc05f5b2695 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = {
SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
- SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
- 4 },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K), 4 },
LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1fd519503bb1..a58169a28741 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
int i;
for (i = 0; i < ns->held_cnt; i++)
- page_cache_release(ns->held_pages[i]);
+ put_page(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
@@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
struct page *page;
struct address_space *mapping = file->f_mapping;
- start_index = pos >> PAGE_CACHE_SHIFT;
- end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+ start_index = pos >> PAGE_SHIFT;
+ end_index = (pos + count - 1) >> PAGE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 220ddfcf29f5..dbc5b571c2bb 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
- val = __raw_readl(REG_SMISR);
+ val = __raw_readl(nand->reg + REG_SMISR);
val &= READYBUSY;
spin_unlock(&nand->lock);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c553f78ab83f..0749ca1a1456 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1807,13 +1807,19 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
+ /*
+ * Bail out earlier to let NAND_ECC_SOFT code create its own
+ * ecclayout instead of using ours.
+ */
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ goto scan_tail;
+ }
+
/* populate MTD interface based on ECC scheme */
ecclayout = &info->oobinfo;
+ nand_chip->ecc.layout = ecclayout;
switch (info->ecc_opt) {
- case OMAP_ECC_HAM1_CODE_SW:
- nand_chip->ecc.mode = NAND_ECC_SOFT;
- break;
-
case OMAP_ECC_HAM1_CODE_HW:
pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
nand_chip->ecc.mode = NAND_ECC_HW;
@@ -1861,10 +1867,7 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->oobfree->offset = 1 +
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd,
- nand_chip->ecc.size,
- nand_chip->ecc.bytes,
- &ecclayout);
+ nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
dev_err(&info->pdev->dev, "unable to use BCH library\n");
err = -EINVAL;
@@ -1925,10 +1928,7 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->oobfree->offset = 1 +
ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
- nand_chip->ecc.priv = nand_bch_init(mtd,
- nand_chip->ecc.size,
- nand_chip->ecc.bytes,
- &ecclayout);
+ nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
dev_err(&info->pdev->dev, "unable to use BCH library\n");
err = -EINVAL;
@@ -2002,9 +2002,6 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW)
- goto scan_tail;
-
/* all OOB bytes from oobfree->offset till end off OOB are free */
ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
@@ -2015,7 +2012,6 @@ static int omap_nand_probe(struct platform_device *pdev)
err = -EINVAL;
goto return_error;
}
- nand_chip->ecc.layout = ecclayout;
scan_tail:
/* second phase scan */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index a0e26dea1424..e4e50da30444 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.bbt_options |= pdata->chip.bbt_options;
data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
- data->chip.ecc.layout = pdata->chip.ecclayout;
data->chip.ecc.mode = NAND_ECC_SOFT;
platform_set_drvdata(pdev, data);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 86fc245dc71a..d6508856da99 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -131,11 +131,23 @@
#define READ_ID_BYTES 7
/* macros for registers read/write */
-#define nand_writel(info, off, val) \
- writel_relaxed((val), (info)->mmio_base + (off))
-
-#define nand_readl(info, off) \
- readl_relaxed((info)->mmio_base + (off))
+#define nand_writel(info, off, val) \
+ do { \
+ dev_vdbg(&info->pdev->dev, \
+ "%s():%d nand_writel(0x%x, 0x%04x)\n", \
+ __func__, __LINE__, (val), (off)); \
+ writel_relaxed((val), (info)->mmio_base + (off)); \
+ } while (0)
+
+#define nand_readl(info, off) \
+ ({ \
+ unsigned int _v; \
+ _v = readl_relaxed((info)->mmio_base + (off)); \
+ dev_vdbg(&info->pdev->dev, \
+ "%s():%d nand_readl(0x%04x) = 0x%x\n", \
+ __func__, __LINE__, (off), _v); \
+ _v; \
+ })
/* error code and state */
enum {
@@ -199,7 +211,6 @@ struct pxa3xx_nand_info {
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
int drcmr_dat;
- int drcmr_cmd;
unsigned char *data_buff;
unsigned char *oob_buff;
@@ -222,15 +233,44 @@ struct pxa3xx_nand_info {
int use_spare; /* use spare ? */
int need_wait;
- unsigned int data_size; /* data to be read from FIFO */
- unsigned int chunk_size; /* split commands chunk size */
- unsigned int oob_size;
+ /* Amount of real data per full chunk */
+ unsigned int chunk_size;
+
+ /* Amount of spare data per full chunk */
unsigned int spare_size;
+
+ /* Number of full chunks (i.e chunk_size + spare_size) */
+ unsigned int nfullchunks;
+
+ /*
+ * Total number of chunks. If equal to nfullchunks, then there
+ * are only full chunks. Otherwise, there is one last chunk of
+ * size (last_chunk_size + last_spare_size)
+ */
+ unsigned int ntotalchunks;
+
+ /* Amount of real data in the last chunk */
+ unsigned int last_chunk_size;
+
+ /* Amount of spare data in the last chunk */
+ unsigned int last_spare_size;
+
unsigned int ecc_size;
unsigned int ecc_err_cnt;
unsigned int max_bitflips;
int retcode;
+ /*
+ * Variables only valid during command
+ * execution. step_chunk_size and step_spare_size is the
+ * amount of real data and spare data in the current
+ * chunk. cur_chunk is the current chunk being
+ * read/programmed.
+ */
+ unsigned int step_chunk_size;
+ unsigned int step_spare_size;
+ unsigned int cur_chunk;
+
/* cached register value */
uint32_t reg_ndcr;
uint32_t ndtr0cs0;
@@ -526,25 +566,6 @@ static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
return 0;
}
-/*
- * Set the data and OOB size, depending on the selected
- * spare and ECC configuration.
- * Only applicable to READ0, READOOB and PAGEPROG commands.
- */
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
- struct mtd_info *mtd)
-{
- int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
-
- info->data_size = mtd->writesize;
- if (!oob_enable)
- return;
-
- info->oob_size = info->spare_size;
- if (!info->use_ecc)
- info->oob_size += info->ecc_size;
-}
-
/**
* NOTE: it is a must to set ND_RUN firstly, then write
* command buffer, otherwise, it does not work.
@@ -660,28 +681,28 @@ static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
static void handle_data_pio(struct pxa3xx_nand_info *info)
{
- unsigned int do_bytes = min(info->data_size, info->chunk_size);
-
switch (info->state) {
case STATE_PIO_WRITING:
- writesl(info->mmio_base + NDDB,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ if (info->step_chunk_size)
+ writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(info->step_chunk_size, 4));
- if (info->oob_size > 0)
+ if (info->step_spare_size)
writesl(info->mmio_base + NDDB,
info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ DIV_ROUND_UP(info->step_spare_size, 4));
break;
case STATE_PIO_READING:
- drain_fifo(info,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(do_bytes, 4));
+ if (info->step_chunk_size)
+ drain_fifo(info,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(info->step_chunk_size, 4));
- if (info->oob_size > 0)
+ if (info->step_spare_size)
drain_fifo(info,
info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->oob_size, 4));
+ DIV_ROUND_UP(info->step_spare_size, 4));
break;
default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
@@ -690,9 +711,8 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
}
/* Update buffer pointers for multi-page read/write */
- info->data_buff_pos += do_bytes;
- info->oob_buff_pos += info->oob_size;
- info->data_size -= do_bytes;
+ info->data_buff_pos += info->step_chunk_size;
+ info->oob_buff_pos += info->step_spare_size;
}
static void pxa3xx_nand_data_dma_irq(void *data)
@@ -733,8 +753,9 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
info->state);
BUG();
}
- info->sg.length = info->data_size +
- (info->oob_size ? info->spare_size + info->ecc_size : 0);
+ info->sg.length = info->chunk_size;
+ if (info->use_spare)
+ info->sg.length += info->spare_size + info->ecc_size;
dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
@@ -895,9 +916,11 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
/* reset data and oob column point to handle data */
info->buf_start = 0;
info->buf_count = 0;
- info->oob_size = 0;
info->data_buff_pos = 0;
info->oob_buff_pos = 0;
+ info->step_chunk_size = 0;
+ info->step_spare_size = 0;
+ info->cur_chunk = 0;
info->use_ecc = 0;
info->use_spare = 1;
info->retcode = ERR_NONE;
@@ -909,8 +932,6 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
case NAND_CMD_READ0:
case NAND_CMD_PAGEPROG:
info->use_ecc = 1;
- case NAND_CMD_READOOB:
- pxa3xx_set_datasize(info, mtd);
break;
case NAND_CMD_PARAM:
info->use_spare = 0;
@@ -969,6 +990,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
if (command == NAND_CMD_READOOB)
info->buf_start += mtd->writesize;
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
/*
* Multiple page read needs an 'extended command type' field,
* which is either naked-read or last-read according to the
@@ -980,8 +1009,8 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->chunk_size +
- info->oob_size;
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
}
set_command_address(info, mtd->writesize, column, page_addr);
@@ -1001,8 +1030,6 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| addr_cycle
| command;
- /* No data transfer in this case */
- info->data_size = 0;
exec_cmd = 1;
}
break;
@@ -1014,6 +1041,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
break;
}
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
/* Second command setting for large pages */
if (mtd->writesize > PAGE_CHUNK_SIZE) {
/*
@@ -1024,14 +1059,14 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
| NDCB0_LEN_OVRD
| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->chunk_size +
- info->oob_size;
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
/*
* This is the command dispatch that completes a chunked
* page program operation.
*/
- if (info->data_size == 0) {
+ if (info->cur_chunk == info->ntotalchunks) {
info->ndcb0 = NDCB0_CMD_TYPE(0x1)
| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
| command;
@@ -1058,7 +1093,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| command;
info->ndcb1 = (column & 0xFF);
info->ndcb3 = INIT_BUFFER_SIZE;
- info->data_size = INIT_BUFFER_SIZE;
+ info->step_chunk_size = INIT_BUFFER_SIZE;
break;
case NAND_CMD_READID:
@@ -1068,7 +1103,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| command;
info->ndcb1 = (column & 0xFF);
- info->data_size = 8;
+ info->step_chunk_size = 8;
break;
case NAND_CMD_STATUS:
info->buf_count = 1;
@@ -1076,7 +1111,7 @@ static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
| NDCB0_ADDR_CYC(1)
| command;
- info->data_size = 8;
+ info->step_chunk_size = 8;
break;
case NAND_CMD_ERASE1:
@@ -1217,6 +1252,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
init_completion(&info->dev_ready);
do {
info->state = STATE_PREPARED;
+
exec_cmd = prepare_set_command(info, command, ext_cmd_type,
column, page_addr);
if (!exec_cmd) {
@@ -1236,22 +1272,30 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
break;
}
+ /* Only a few commands need several steps */
+ if (command != NAND_CMD_PAGEPROG &&
+ command != NAND_CMD_READ0 &&
+ command != NAND_CMD_READOOB)
+ break;
+
+ info->cur_chunk++;
+
/* Check if the sequence is complete */
- if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+ if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
break;
/*
* After a splitted program command sequence has issued
* the command dispatch, the command sequence is complete.
*/
- if (info->data_size == 0 &&
+ if (info->cur_chunk == (info->ntotalchunks + 1) &&
command == NAND_CMD_PAGEPROG &&
ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
break;
if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
/* Last read: issue a 'last naked read' */
- if (info->data_size == info->chunk_size)
+ if (info->cur_chunk == info->ntotalchunks - 1)
ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
else
ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
@@ -1261,7 +1305,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
* the command dispatch must be issued to complete.
*/
} else if (command == NAND_CMD_PAGEPROG &&
- info->data_size == 0) {
+ info->cur_chunk == info->ntotalchunks) {
ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
}
} while (1);
@@ -1506,6 +1550,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
int strength, int ecc_stepsize, int page_size)
{
if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 40;
info->ecc_size = 24;
@@ -1514,6 +1560,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
ecc->strength = 1;
} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 512;
info->spare_size = 8;
info->ecc_size = 8;
@@ -1527,6 +1575,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
*/
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
info->ecc_bch = 1;
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@@ -1537,6 +1587,8 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
+ info->nfullchunks = 2;
+ info->ntotalchunks = 2;
info->chunk_size = 2048;
info->spare_size = 32;
info->ecc_size = 32;
@@ -1551,8 +1603,12 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
*/
} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
info->ecc_bch = 1;
+ info->nfullchunks = 4;
+ info->ntotalchunks = 5;
info->chunk_size = 1024;
info->spare_size = 0;
+ info->last_chunk_size = 0;
+ info->last_spare_size = 64;
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
@@ -1738,7 +1794,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
if (ret < 0)
return ret;
- if (use_dma) {
+ if (!np && use_dma) {
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (r == NULL) {
dev_err(&pdev->dev,
@@ -1747,15 +1803,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
goto fail_disable_clk;
}
info->drcmr_dat = r->start;
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev,
- "no resource defined for cmd DMA\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
- info->drcmr_cmd = r->start;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
new file mode 100644
index 000000000000..f550a57e6eea
--- /dev/null
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -0,0 +1,2223 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
+#include <linux/delay.h>
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_DEV1_ECC_CFG 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+
+/* dummy register offsets, used by write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE 6
+#define UD_SIZE_BYTES 9
+#define ECC_PARITY_SIZE_BYTES_RS 19
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES 27
+#define STATUS_BFR_READ 30
+#define SET_RD_MODE_AFTER_STATUS 31
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE 0
+#define WIDE_FLASH 1
+#define NAND_RECOVERY_CYCLES 2
+#define CS_ACTIVE_BSY 5
+#define BAD_BLOCK_BYTE_NUM 6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP 17
+#define ENABLE_BCH_ECC 27
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE 0
+#define ECC_SW_RESET 1
+#define ECC_MODE 4
+#define ECC_PARITY_SIZE_BYTES_BCH 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR 0
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD 0
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS 0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define PAGE_READ 0x2
+#define PAGE_READ_WITH_ECC 0x3
+#define PAGE_READ_WITH_ECC_SPARE 0x4
+#define PROGRAM_PAGE 0x6
+#define PAGE_PROGRAM_WITH_ECC 0x7
+#define PROGRAM_PAGE_SPARE 0x9
+#define BLOCK_ERASE 0xa
+#define FETCH_ID 0xb
+#define RESET_DEVICE 0xd
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+struct desc_info {
+ struct list_head node;
+
+ enum dma_data_direction dir;
+ struct scatterlist sgl;
+ struct dma_async_tx_descriptor *dma_desc;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @controller: base controller structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ * @dev: parent device
+ * @base: MMIO base
+ * @base_dma: physical base address of controller registers
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @buf_size/count/start: markers for chip->read_buf/write_buf functions
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ * @cmd1/vld: some fixed controller register values
+ * @ecc_modes: supported ECC modes by the current controller,
+ * initialized via DT match data
+ */
+struct qcom_nand_controller {
+ struct nand_hw_control controller;
+ struct list_head host_list;
+
+ struct device *dev;
+
+ void __iomem *base;
+ dma_addr_t base_dma;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ int buf_size;
+ int buf_count;
+ int buf_start;
+
+ __le32 *reg_read_buf;
+ int reg_read_pos;
+
+ struct nandc_regs *regs;
+
+ u32 cmd1, vld;
+ u32 ecc_modes;
+};
+
+/*
+ * NAND chip structure
+ *
+ * @chip: base NAND chip structure
+ * @node: list node to add itself to host_list in
+ * qcom_nand_controller
+ *
+ * @cs: chip select value for this chip
+ * @cw_size: the number of bytes in a single step/codeword
+ * of a page, consisting of all data, ecc, spare
+ * and reserved bytes
+ * @cw_data: the number of bytes within a codeword protected
+ * by ECC
+ * @use_ecc: request the controller to use ECC for the
+ * upcoming read/write
+ * @bch_enabled: flag to tell whether BCH ECC mode is used
+ * @ecc_bytes_hw: ECC bytes used by controller hardware for this
+ * chip
+ * @status: value to be returned if NAND_CMD_STATUS command
+ * is executed
+ * @last_command: keeps track of last command on this chip. used
+ * for reading correct status
+ *
+ * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
+ * ecc/non-ecc mode for the current nand flash
+ * device
+ */
+struct qcom_nand_host {
+ struct nand_chip chip;
+ struct list_head node;
+
+ int cs;
+ int cw_size;
+ int cw_data;
+ bool use_ecc;
+ bool bch_enabled;
+ int ecc_bytes_hw;
+ int spare_bytes;
+ int bbm_size;
+ u8 status;
+ int last_command;
+
+ u32 cfg0, cfg1;
+ u32 cfg0_raw, cfg1_raw;
+ u32 ecc_buf_cfg;
+ u32 ecc_bch_cfg;
+ u32 clrflashstatus;
+ u32 clrreadstatus;
+};
+
+static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+{
+ return container_of(chip, struct qcom_nand_host, chip);
+}
+
+static inline struct qcom_nand_controller *
+get_qcom_nand_controller(struct nand_chip *chip)
+{
+ return container_of(chip->controller, struct qcom_nand_controller,
+ controller);
+}
+
+static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+{
+ return ioread32(nandc->base + offset);
+}
+
+static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ iowrite32(val, nandc->base + offset);
+}
+
+static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+{
+ switch (offset) {
+ case NAND_FLASH_CMD:
+ return &regs->cmd;
+ case NAND_ADDR0:
+ return &regs->addr0;
+ case NAND_ADDR1:
+ return &regs->addr1;
+ case NAND_FLASH_CHIP_SELECT:
+ return &regs->chip_sel;
+ case NAND_EXEC_CMD:
+ return &regs->exec;
+ case NAND_FLASH_STATUS:
+ return &regs->clrflashstatus;
+ case NAND_DEV0_CFG0:
+ return &regs->cfg0;
+ case NAND_DEV0_CFG1:
+ return &regs->cfg1;
+ case NAND_DEV0_ECC_CFG:
+ return &regs->ecc_bch_cfg;
+ case NAND_READ_STATUS:
+ return &regs->clrreadstatus;
+ case NAND_DEV_CMD1:
+ return &regs->cmd1;
+ case NAND_DEV_CMD1_RESTORE:
+ return &regs->orig_cmd1;
+ case NAND_DEV_CMD_VLD:
+ return &regs->vld;
+ case NAND_DEV_CMD_VLD_RESTORE:
+ return &regs->orig_vld;
+ case NAND_EBI2_ECC_BUF_CFG:
+ return &regs->ecc_buf_cfg;
+ default:
+ return NULL;
+ }
+}
+
+static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
+{
+ struct nandc_regs *regs = nandc->regs;
+ __le32 *reg;
+
+ reg = offset_to_nandc_reg(regs, offset);
+
+ if (reg)
+ *reg = cpu_to_le32(val);
+}
+
+/* helper to configure address register values */
+static void set_address(struct qcom_nand_host *host, u16 column, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+
+ nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
+ nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
+}
+
+/*
+ * update_rw_regs: set up read/write register values, these will be
+ * written to the NAND controller registers via DMA
+ *
+ * @num_cw: number of steps for the read/write operation
+ * @read: read or write operation
+ */
+static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+
+ if (read) {
+ if (host->use_ecc)
+ cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ else
+ cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+ } else {
+ cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ }
+
+ if (host->use_ecc) {
+ cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1;
+ ecc_bch_cfg = host->ecc_bch_cfg;
+ } else {
+ cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+ (num_cw - 1) << CW_PER_PAGE;
+
+ cfg1 = host->cfg1_raw;
+ ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ }
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
+ nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+}
+
+static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct desc_info *desc;
+ struct dma_async_tx_descriptor *dma_desc;
+ struct scatterlist *sgl;
+ struct dma_slave_config slave_conf;
+ enum dma_transfer_direction dir_eng;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (ret == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(&slave_conf, 0x00, sizeof(slave_conf));
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->data_crci;
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ slave_conf.slave_id = nandc->cmd_crci;
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+
+/*
+ * read_reg_dma: prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ */
+static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs)
+{
+ bool flow_control = false;
+ void *vaddr;
+ int size;
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ size = num_regs * sizeof(u32);
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
+}
+
+/*
+ * write_reg_dma: prepares a descriptor to write a given number of
+ * contiguous registers
+ *
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ */
+static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs)
+{
+ bool flow_control = false;
+ struct nandc_regs *regs = nandc->regs;
+ void *vaddr;
+ int size;
+
+ vaddr = offset_to_nandc_reg(regs, first);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ if (first == NAND_DEV_CMD1_RESTORE)
+ first = NAND_DEV_CMD1;
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE)
+ first = NAND_DEV_CMD_VLD;
+
+ size = num_regs * sizeof(u32);
+
+ return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
+}
+
+/*
+ * read_data_dma: prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ */
+static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size)
+{
+ return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+
+/*
+ * write_data_dma: prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ *
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ */
+static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size)
+{
+ return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+
+/*
+ * helper to prepare dma descriptors to configure registers needed for reading a
+ * codeword/step in a page
+ */
+static void config_cw_read(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
+ read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
+}
+
+/*
+ * helpers to prepare dma descriptors used to configure registers needed for
+ * writing a codeword/step in a page
+ */
+static void config_cw_write_pre(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+ write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+}
+
+static void config_cw_write_post(struct qcom_nand_controller *nandc)
+{
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1);
+}
+
+/*
+ * the following functions are used within chip->cmdfunc() to perform different
+ * NAND_CMD_* commands
+ */
+
+/* sets up descriptors for NAND_CMD_PARAM */
+static int nandc_param(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ /*
+ * NAND_CMD_PARAM is called before we know much about the FLASH chip
+ * in use. we configure the controller to perform a raw read of 512
+ * bytes to read onfi params
+ */
+ nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, 0);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing */
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~(1 << READ_START_VLD))
+ | 0 << READ_START_VLD);
+ nandc_set_reg(nandc, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1);
+
+ nandc->buf_count = 512;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count);
+
+ /* restore CMD1 and VLD regs */
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_ERASE1 */
+static int erase_block(struct qcom_nand_host *host, int page_addr)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD,
+ BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+ nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+ nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+ nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+ write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+ write_reg_dma(nandc, NAND_READ_STATUS, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_READID */
+static int read_id(struct qcom_nand_host *host, int column)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (column == -1)
+ return 0;
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+ nandc_set_reg(nandc, NAND_ADDR0, column);
+ nandc_set_reg(nandc, NAND_ADDR1, 0);
+ nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1);
+
+ return 0;
+}
+
+/* sets up descriptors for NAND_CMD_RESET */
+static int reset(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+ nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+ return 0;
+}
+
+/* helpers to submit/free our list of dma descriptors */
+static int submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc;
+ dma_cookie_t cookie = 0;
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void free_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+ dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
+ kfree(desc);
+ }
+}
+
+/* reset the register read buffer for next NAND operation */
+static void clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ memset(nandc->reg_read_buf, 0,
+ MAX_REG_RD * sizeof(*nandc->reg_read_buf));
+}
+
+static void pre_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ host->last_command = command;
+
+ clear_read_regs(nandc);
+}
+
+/*
+ * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
+ * privately maintained status byte, this status byte can be read after
+ * NAND_CMD_STATUS is called
+ */
+static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int num_cw;
+ int i;
+
+ num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
+
+ for (i = 0; i < num_cw; i++) {
+ u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
+ (flash_status &
+ FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+}
+
+static void post_command(struct qcom_nand_host *host, int command)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ switch (command) {
+ case NAND_CMD_READID:
+ memcpy(nandc->data_buffer, nandc->reg_read_buf,
+ nandc->buf_count);
+ break;
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ parse_erase_write_errors(host, command);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Implements chip->cmdfunc. It's only used for a limited set of commands.
+ * The rest of the commands wouldn't be called by upper layers. For example,
+ * NAND_CMD_READOOB would never be called because we have our own versions
+ * of read_oob ops for nand_ecc_ctrl.
+ */
+static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ bool wait = false;
+ int ret = 0;
+
+ pre_command(host, command);
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ ret = reset(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_READID:
+ nandc->buf_count = 4;
+ ret = read_id(host, column);
+ wait = true;
+ break;
+
+ case NAND_CMD_PARAM:
+ ret = nandc_param(host);
+ wait = true;
+ break;
+
+ case NAND_CMD_ERASE1:
+ ret = erase_block(host, page_addr);
+ wait = true;
+ break;
+
+ case NAND_CMD_READ0:
+ /* we read the entire page for now */
+ WARN_ON(column != 0);
+
+ host->use_ecc = true;
+ set_address(host, 0, page_addr);
+ update_rw_regs(host, ecc->steps, true);
+ break;
+
+ case NAND_CMD_SEQIN:
+ WARN_ON(column != 0);
+ set_address(host, 0, page_addr);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_NONE:
+ default:
+ break;
+ }
+
+ if (ret) {
+ dev_err(nandc->dev, "failure executing command %d\n",
+ command);
+ free_descs(nandc);
+ return;
+ }
+
+ if (wait) {
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev,
+ "failure submitting descs for command %d\n",
+ command);
+ }
+
+ free_descs(nandc);
+
+ post_command(host, command);
+}
+
+/*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+ *
+ * when using RS ECC, the HW reports the same erros when reading an erased CW,
+ * but it notifies that it is an erased CW by placing special characters at
+ * certain offsets in the buffer.
+ *
+ * verify if the page is erased or not, and fix up the page for RS ECC by
+ * replacing the special characters with 0xff.
+ */
+static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
+{
+ u8 empty1, empty2;
+
+ /*
+ * an erased page flags an error in NAND_FLASH_STATUS, check if the page
+ * is erased by looking for 0x54s at offsets 3 and 175 from the
+ * beginning of each codeword
+ */
+
+ empty1 = data_buf[3];
+ empty2 = data_buf[175];
+
+ /*
+ * if the erased codework markers, if they exist override them with
+ * 0xffs
+ */
+ if ((empty1 == 0x54 && empty2 == 0xff) ||
+ (empty1 == 0xff && empty2 == 0x54)) {
+ data_buf[3] = 0xff;
+ data_buf[175] = 0xff;
+ }
+
+ /*
+ * check if the entire chunk contains 0xffs or not. if it doesn't, then
+ * restore the original values at the special offsets
+ */
+ if (memchr_inv(data_buf, 0xff, data_len)) {
+ data_buf[3] = empty1;
+ data_buf[175] = empty2;
+
+ return false;
+ }
+
+ return true;
+}
+
+struct read_stats {
+ __le32 flash;
+ __le32 buffer;
+ __le32 erased_cw;
+};
+
+/*
+ * reads back status registers set by the controller to notify page read
+ * errors. this is equivalent to what 'ecc->correct()' would do.
+ */
+static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0;
+ struct read_stats *buf;
+ int i;
+
+ buf = (struct read_stats *)nandc->reg_read_buf;
+
+ for (i = 0; i < ecc->steps; i++, buf++) {
+ u32 flash, buffer, erased_cw;
+ int data_len, oob_len;
+
+ if (i == (ecc->steps - 1)) {
+ data_len = ecc->size - ((ecc->steps - 1) << 2);
+ oob_len = ecc->steps << 2;
+ } else {
+ data_len = host->cw_data;
+ oob_len = 0;
+ }
+
+ flash = le32_to_cpu(buf->flash);
+ buffer = le32_to_cpu(buf->buffer);
+ erased_cw = le32_to_cpu(buf->erased_cw);
+
+ if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+ bool erased;
+
+ /* ignore erased codeword errors */
+ if (host->bch_enabled) {
+ erased = (erased_cw & ERASED_CW) == ERASED_CW ?
+ true : false;
+ } else {
+ erased = erased_chunk_check_and_fixup(data_buf,
+ data_len);
+ }
+
+ if (erased) {
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ continue;
+ }
+
+ if (buffer & BS_UNCORRECTABLE_BIT) {
+ int ret, ecclen, extraooblen;
+ void *eccbuf;
+
+ eccbuf = oob_buf ? oob_buf + oob_len : NULL;
+ ecclen = oob_buf ? host->ecc_bytes_hw : 0;
+ extraooblen = oob_buf ? oob_len : 0;
+
+ /*
+ * make sure it isn't an erased page reported
+ * as not-erased by HW because of a few bitflips
+ */
+ ret = nand_check_erased_ecc_chunk(data_buf,
+ data_len, eccbuf, ecclen, oob_buf,
+ extraooblen, ecc->strength);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips =
+ max_t(unsigned int, max_bitflips, ret);
+ }
+ }
+ } else {
+ unsigned int stat;
+
+ stat = buffer & BS_CORRECTABLE_ERR_MSK;
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max(max_bitflips, stat);
+ }
+
+ data_buf += data_len;
+ if (oob_buf)
+ oob_buf += oob_len + ecc->bytes;
+ }
+
+ return max_bitflips;
+}
+
+/*
+ * helper to perform the actual page read operation, used by ecc->read_page(),
+ * ecc->read_oob()
+ */
+static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+ u8 *oob_buf)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, ret;
+
+ /* queue cmd descs for each codeword */
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_read(nandc);
+
+ if (data_buf)
+ read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size);
+
+ /*
+ * when ecc is enabled, the controller doesn't read the real
+ * or dummy bad block markers in each chunk. To maintain a
+ * consistent layout across RAW and ECC reads, we just
+ * leave the real/dummy BBM offsets empty (i.e, filled with
+ * 0xffs)
+ */
+ if (oob_buf) {
+ int j;
+
+ for (j = 0; j < host->bbm_size; j++)
+ *oob_buf++ = 0xff;
+
+ read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size);
+ }
+
+ if (data_buf)
+ data_buf += data_size;
+ if (oob_buf)
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to read page/oob\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/*
+ * a helper that copies the last step/codeword of a page (containing free oob)
+ * into our local buffer
+ */
+static int copy_last_cw(struct qcom_nand_host *host, int page)
+{
+ struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int size;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ size = host->use_ecc ? host->cw_data : host->cw_size;
+
+ /* prepare a clean read buffer */
+ memset(nandc->data_buffer, 0xff, size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, true);
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failed to copy last codeword\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/* implements ecc->read_page() */
+static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *data_buf, *oob_buf = NULL;
+ int ret;
+
+ data_buf = buf;
+ oob_buf = oob_required ? chip->oob_poi : NULL;
+
+ ret = read_page_ecc(host, data_buf, oob_buf);
+ if (ret) {
+ dev_err(nandc->dev, "failure to read page\n");
+ return ret;
+ }
+
+ return parse_read_errors(host, data_buf, oob_buf);
+}
+
+/* implements ecc->read_page_raw() */
+static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *data_buf, *oob_buf;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int i, ret;
+
+ data_buf = buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, true);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_read(nandc);
+
+ read_data_dma(nandc, reg_off, data_buf, data_size1);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size1);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ read_data_dma(nandc, reg_off, data_buf, data_size2);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ read_data_dma(nandc, reg_off, oob_buf, oob_size2);
+ oob_buf += oob_size2;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to read raw page\n");
+
+ free_descs(nandc);
+
+ return 0;
+}
+
+/* implements ecc->read_oob() */
+static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret;
+
+ clear_read_regs(nandc);
+
+ host->use_ecc = true;
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true);
+
+ ret = read_page_ecc(host, NULL, chip->oob_poi);
+ if (ret)
+ dev_err(nandc->dev, "failure to read oob\n");
+
+ return ret;
+}
+
+/* implements ecc->write_page() */
+static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ clear_read_regs(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = true;
+ update_rw_regs(host, ecc->steps, false);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size, oob_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size = host->cw_data;
+ oob_size = ecc->bytes;
+ }
+
+ config_cw_write_pre(nandc);
+
+ write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
+
+ /*
+ * when ECC is enabled, we don't really need to write anything
+ * to oob for the first n - 1 codewords since these oob regions
+ * just contain ECC bytes that's written by the controller
+ * itself. For the last codeword, we skip the bbm positions and
+ * write to the free oob area.
+ */
+ if (i == (ecc->steps - 1)) {
+ oob_buf += host->bbm_size;
+
+ write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size);
+ }
+
+ config_cw_write_post(nandc);
+
+ data_buf += data_size;
+ oob_buf += oob_size;
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write page\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/* implements ecc->write_page_raw() */
+static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *data_buf, *oob_buf;
+ int i, ret;
+
+ clear_read_regs(nandc);
+
+ data_buf = (u8 *)buf;
+ oob_buf = chip->oob_poi;
+
+ host->use_ecc = false;
+ update_rw_regs(host, ecc->steps, false);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_size1, data_size2, oob_size1, oob_size2;
+ int reg_off = FLASH_BUF_ACC;
+
+ data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+ oob_size1 = host->bbm_size;
+
+ if (i == (ecc->steps - 1)) {
+ data_size2 = ecc->size - data_size1 -
+ ((ecc->steps - 1) << 2);
+ oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+ host->spare_bytes;
+ } else {
+ data_size2 = host->cw_data - data_size1;
+ oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+ }
+
+ config_cw_write_pre(nandc);
+
+ write_data_dma(nandc, reg_off, data_buf, data_size1);
+ reg_off += data_size1;
+ data_buf += data_size1;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size1);
+ reg_off += oob_size1;
+ oob_buf += oob_size1;
+
+ write_data_dma(nandc, reg_off, data_buf, data_size2);
+ reg_off += data_size2;
+ data_buf += data_size2;
+
+ write_data_dma(nandc, reg_off, oob_buf, oob_size2);
+ oob_buf += oob_size2;
+
+ config_cw_write_post(nandc);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret)
+ dev_err(nandc->dev, "failure to write raw page\n");
+
+ free_descs(nandc);
+
+ return ret;
+}
+
+/*
+ * implements ecc->write_oob()
+ *
+ * the NAND controller cannot write only data or only oob within a codeword,
+ * since ecc is calculated for the combined codeword. we first copy the
+ * entire contents for the last codeword(data + oob), replace the old oob
+ * with the new one in chip->oob_poi, and then write the entire codeword.
+ * this read-copy-write operation results in a slight performance loss.
+ */
+static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ u8 *oob = chip->oob_poi;
+ int free_boff;
+ int data_size, oob_size;
+ int ret, status = 0;
+
+ host->use_ecc = true;
+
+ ret = copy_last_cw(host, page);
+ if (ret)
+ return ret;
+
+ clear_read_regs(nandc);
+
+ /* calculate the data and oob size for the last codeword/step */
+ data_size = ecc->size - ((ecc->steps - 1) << 2);
+ oob_size = ecc->steps << 2;
+
+ free_boff = ecc->layout->oobfree[0].offset;
+
+ /* override new oob content to last codeword */
+ memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
+
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_cw_write_pre(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ data_size + oob_size);
+ config_cw_write_post(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to write oob\n");
+ return -EIO;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, bbpos, bad = 0;
+ u32 flash_status;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /*
+ * configure registers for a raw sub page read, the address is set to
+ * the beginning of the last codeword, we don't care about reading ecc
+ * portion of oob. we just want the first few bytes from this codeword
+ * that contains the BBM
+ */
+ host->use_ecc = false;
+
+ ret = copy_last_cw(host, page);
+ if (ret)
+ goto err;
+
+ flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
+
+ if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+ dev_warn(nandc->dev, "error when trying to read BBM\n");
+ goto err;
+ }
+
+ bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
+
+ bad = nandc->data_buffer[bbpos] != 0xff;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
+err:
+ return bad;
+}
+
+static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int page, ret, status = 0;
+
+ clear_read_regs(nandc);
+
+ /*
+ * to mark the BBM as bad, we flash the entire last codeword with 0s.
+ * we don't care about the rest of the content in the codeword since
+ * we aren't going to use this block again
+ */
+ memset(nandc->data_buffer, 0x00, host->cw_size);
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ /* prepare write */
+ host->use_ecc = false;
+ set_address(host, host->cw_size * (ecc->steps - 1), page);
+ update_rw_regs(host, 1, false);
+
+ config_cw_write_pre(nandc);
+ write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
+ config_cw_write_post(nandc);
+
+ ret = submit_descs(nandc);
+
+ free_descs(nandc);
+
+ if (ret) {
+ dev_err(nandc->dev, "failure to update BBM\n");
+ return -EIO;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * the three functions below implement chip->read_byte(), chip->read_buf()
+ * and chip->write_buf() respectively. these aren't used for
+ * reading/writing page data, they are used for smaller data like reading
+ * id, status etc
+ */
+static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ u8 *buf = nandc->data_buffer;
+ u8 ret = 0x0;
+
+ if (host->last_command == NAND_CMD_STATUS) {
+ ret = host->status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ return ret;
+ }
+
+ if (nandc->buf_start < nandc->buf_count)
+ ret = buf[nandc->buf_start++];
+
+ return ret;
+}
+
+static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
+ nandc->buf_start += real_len;
+}
+
+static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+ memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
+
+ nandc->buf_start += real_len;
+}
+
+/* we support only one external chip for now */
+static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+ if (chipnr <= 0)
+ return;
+
+ dev_warn(nandc->dev, "invalid chip select\n");
+}
+
+/*
+ * NAND controller page layout info
+ *
+ * Layout with ECC enabled:
+ *
+ * |----------------------| |---------------------------------|
+ * | xx.......yy| | *********xx.......yy|
+ * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
+ * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
+ * | xx.......yy| | *********xx.......yy|
+ * |----------------------| |---------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Reserved byte(s)
+ *
+ * 2K page: n = 4, spare = 16 bytes
+ * 4K page: n = 8, spare = 32 bytes
+ * 8K page: n = 16, spare = 64 bytes
+ *
+ * the qcom nand controller operates at a sub page/codeword level. each
+ * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
+ * the number of ECC bytes vary based on the ECC strength and the bus width.
+ *
+ * the first n - 1 codewords contains 516 bytes of user data, the remaining
+ * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
+ * both user data and spare(oobavail) bytes that sum up to 516 bytes.
+ *
+ * When we access a page with ECC enabled, the reserved bytes(s) are not
+ * accessible at all. When reading, we fill up these unreadable positions
+ * with 0xffs. When writing, the controller skips writing the inaccessible
+ * bytes.
+ *
+ * Layout with ECC disabled:
+ *
+ * |------------------------------| |---------------------------------------|
+ * | yy xx.......| | bb *********xx.......|
+ * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
+ * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
+ * | yy xx.......| | bb *********xx.......|
+ * |------------------------------| |---------------------------------------|
+ * codeword 1,2..n-1 codeword n
+ * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Dummy Bad Bock byte(s)
+ * b = Real Bad Block byte(s)
+ * size1/size2 = function of codeword size and 'n'
+ *
+ * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
+ * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
+ * Block Markers. In the last codeword, this position contains the real BBM
+ *
+ * In order to have a consistent layout between RAW and ECC modes, we assume
+ * the following OOB layout arrangement:
+ *
+ * |-----------| |--------------------|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
+ * |yyxx.......| |bb*********xx.......|
+ * |yyxx.......| |bb*********xx.......|
+ * |-----------| |--------------------|
+ * first n - 1 nth OOB region
+ * OOB regions
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = FREE OOB bytes
+ * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
+ * x = Unused byte(s)
+ * b = Real bad block byte(s) (inaccessible when ECC enabled)
+ *
+ * This layout is read as is when ECC is disabled. When ECC is enabled, the
+ * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
+ * and assumed as 0xffs when we read a page/oob. The ECC, unused and
+ * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
+ * ecc->bytes is the sum of the three).
+ */
+
+static struct nand_ecclayout *
+qcom_nand_create_layout(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_ecclayout *layout;
+ int i, j, steps, pos = 0, shift = 0;
+
+ layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
+ if (!layout)
+ return NULL;
+
+ steps = mtd->writesize / ecc->size;
+ layout->eccbytes = steps * ecc->bytes;
+
+ layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
+ layout->oobfree[0].length = steps << 2;
+
+ /*
+ * the oob bytes in the first n - 1 codewords are all grouped together
+ * in the format:
+ * DUMMY_BBM + UNUSED + ECC
+ */
+ for (i = 0; i < steps - 1; i++) {
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[pos++] = i * ecc->bytes + j;
+ }
+
+ /*
+ * the oob bytes in the last codeword are grouped in the format:
+ * BBM + FREE OOB + UNUSED + ECC
+ */
+
+ /* fill up the bbm positions */
+ for (j = 0; j < host->bbm_size; j++)
+ layout->eccpos[pos++] = i * ecc->bytes + j;
+
+ /*
+ * fill up the ecc and reserved positions, their indices are offseted
+ * by the free oob region
+ */
+ shift = layout->oobfree[0].length + host->bbm_size;
+
+ for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
+ layout->eccpos[pos++] = i * ecc->bytes + shift + j;
+
+ return layout;
+}
+
+static int qcom_nand_host_setup(struct qcom_nand_host *host)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ int cwperpage, bad_block_byte;
+ bool wide_bus;
+ int ecc_mode = 1;
+
+ /*
+ * the controller requires each step consists of 512 bytes of data.
+ * bail out if DT has populated a wrong step size.
+ */
+ if (ecc->size != NANDC_STEP_SIZE) {
+ dev_err(nandc->dev, "invalid ecc size\n");
+ return -EINVAL;
+ }
+
+ wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+
+ if (ecc->strength >= 8) {
+ /* 8 bit ECC defaults to BCH ECC on all platforms */
+ host->bch_enabled = true;
+ ecc_mode = 1;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 14;
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 13;
+ host->spare_bytes = 2;
+ host->bbm_size = 1;
+ }
+ } else {
+ /*
+ * if the controller supports BCH for 4 bit ECC, the controller
+ * uses lesser bytes for ECC. If RS is used, the ECC bytes is
+ * always 10 bytes
+ */
+ if (nandc->ecc_modes & ECC_BCH_4BIT) {
+ /* BCH */
+ host->bch_enabled = true;
+ ecc_mode = 0;
+
+ if (wide_bus) {
+ host->ecc_bytes_hw = 8;
+ host->spare_bytes = 2;
+ host->bbm_size = 2;
+ } else {
+ host->ecc_bytes_hw = 7;
+ host->spare_bytes = 4;
+ host->bbm_size = 1;
+ }
+ } else {
+ /* RS */
+ host->ecc_bytes_hw = 10;
+
+ if (wide_bus) {
+ host->spare_bytes = 0;
+ host->bbm_size = 2;
+ } else {
+ host->spare_bytes = 1;
+ host->bbm_size = 1;
+ }
+ }
+ }
+
+ /*
+ * we consider ecc->bytes as the sum of all the non-data content in a
+ * step. It gives us a clean representation of the oob area (even if
+ * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
+ * ECC and 12 bytes for 4 bit ECC
+ */
+ ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
+
+ ecc->read_page = qcom_nandc_read_page;
+ ecc->read_page_raw = qcom_nandc_read_page_raw;
+ ecc->read_oob = qcom_nandc_read_oob;
+ ecc->write_page = qcom_nandc_write_page;
+ ecc->write_page_raw = qcom_nandc_write_page_raw;
+ ecc->write_oob = qcom_nandc_write_oob;
+
+ ecc->mode = NAND_ECC_HW;
+
+ ecc->layout = qcom_nand_create_layout(host);
+ if (!ecc->layout)
+ return -ENOMEM;
+
+ cwperpage = mtd->writesize / ecc->size;
+
+ /*
+ * DATA_UD_BYTES varies based on whether the read/write command protects
+ * spare data with ECC too. We protect spare data by default, so we set
+ * it to main + spare data, which are 512 and 4 bytes respectively.
+ */
+ host->cw_data = 516;
+
+ /*
+ * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
+ * for 8 bit ECC
+ */
+ host->cw_size = host->cw_data + ecc->bytes;
+
+ if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
+ dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
+ return -EINVAL;
+ }
+
+ bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+ host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_data << UD_SIZE_BYTES
+ | 0 << DISABLE_STATUS_AFTER_WRITE
+ | 5 << NUM_ADDR_CYCLES
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+ | 0 << STATUS_BFR_READ
+ | 1 << SET_RD_MODE_AFTER_STATUS
+ | host->spare_bytes << SPARE_SIZE_BYTES;
+
+ host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | bad_block_byte << BAD_BLOCK_BYTE_NUM
+ | 0 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | host->bch_enabled << ENABLE_BCH_ECC;
+
+ host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+ | host->cw_size << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES;
+
+ host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | wide_bus << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE;
+
+ host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
+ | 0 << ECC_SW_RESET
+ | host->cw_data << ECC_NUM_DATA_BYTES
+ | 1 << ECC_FORCE_CLK_OPEN
+ | ecc_mode << ECC_MODE
+ | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+
+ host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+
+ host->clrflashstatus = FS_READY_BSY_N;
+ host->clrreadstatus = 0xc0;
+
+ dev_dbg(nandc->dev,
+ "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
+ host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
+ host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
+ cwperpage);
+
+ return 0;
+}
+
+static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
+ GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
+ GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kzalloc(nandc->dev,
+ MAX_REG_RD * sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
+ if (!nandc->chan) {
+ dev_err(nandc->dev, "failed to request slave channel\n");
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ spin_lock_init(&nandc->controller.lock);
+ init_waitqueue_head(&nandc->controller.wq);
+
+ return 0;
+}
+
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ dma_release_channel(nandc->chan);
+}
+
+/* one time setup of a few nand controller registers */
+static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+{
+ /* kill onenand */
+ nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+
+ /* enable ADM DMA */
+ nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+
+ /* save the original values of these registers */
+ nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+ nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
+
+ return 0;
+}
+
+static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
+ struct qcom_nand_host *host,
+ struct device_node *dn)
+{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = nandc->dev;
+ int ret;
+
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+ if (ret) {
+ dev_err(dev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ nand_set_flash_node(chip, dn);
+ mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ chip->cmdfunc = qcom_nandc_command;
+ chip->select_chip = qcom_nandc_select_chip;
+ chip->read_byte = qcom_nandc_read_byte;
+ chip->read_buf = qcom_nandc_read_buf;
+ chip->write_buf = qcom_nandc_write_buf;
+
+ /*
+ * the bad block marker is readable only when we read the last codeword
+ * of a page with ECC disabled. currently, the nand_base and nand_bbt
+ * helpers don't allow us to read BB from a nand chip with ECC
+ * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
+ * and block_markbad helpers until we permanently switch to using
+ * MTD_OPS_RAW for all drivers (with the help of badblockbits)
+ */
+ chip->block_bad = qcom_nandc_block_bad;
+ chip->block_markbad = qcom_nandc_block_markbad;
+
+ chip->controller = &nandc->controller;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
+ NAND_SKIP_BBTSCAN;
+
+ /* set up initial status value */
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ ret = qcom_nand_host_setup(host);
+ if (ret)
+ return ret;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
+
+ return mtd_device_register(mtd, NULL, 0);
+}
+
+/* parse custom DT properties here */
+static int qcom_nandc_parse_dt(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct device_node *np = nandc->dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
+ if (ret) {
+ dev_err(nandc->dev, "command CRCI unspecified\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
+ if (ret) {
+ dev_err(nandc->dev, "data CRCI unspecified\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_nandc_probe(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc;
+ struct qcom_nand_host *host;
+ const void *dev_data;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+ struct resource *res;
+ int ret;
+
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ if (!nandc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, nandc);
+ nandc->dev = dev;
+
+ dev_data = of_device_get_match_data(dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "failed to get device data\n");
+ return -ENODEV;
+ }
+
+ nandc->ecc_modes = (unsigned long)dev_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nandc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nandc->base))
+ return PTR_ERR(nandc->base);
+
+ nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
+
+ nandc->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(nandc->core_clk))
+ return PTR_ERR(nandc->core_clk);
+
+ nandc->aon_clk = devm_clk_get(dev, "aon");
+ if (IS_ERR(nandc->aon_clk))
+ return PTR_ERR(nandc->aon_clk);
+
+ ret = qcom_nandc_parse_dt(pdev);
+ if (ret)
+ return ret;
+
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(nandc->core_clk);
+ if (ret)
+ goto err_core_clk;
+
+ ret = clk_prepare_enable(nandc->aon_clk);
+ if (ret)
+ goto err_aon_clk;
+
+ ret = qcom_nandc_setup(nandc);
+ if (ret)
+ goto err_setup;
+
+ for_each_available_child_of_node(dn, child) {
+ if (of_device_is_compatible(child, "qcom,nandcs")) {
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto err_cs_init;
+ }
+
+ ret = qcom_nand_host_init(nandc, host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue;
+ }
+
+ list_add_tail(&host->node, &nandc->host_list);
+ }
+ }
+
+ if (list_empty(&nandc->host_list)) {
+ ret = -ENODEV;
+ goto err_cs_init;
+ }
+
+ return 0;
+
+err_cs_init:
+ list_for_each_entry(host, &nandc->host_list, node)
+ nand_release(nand_to_mtd(&host->chip));
+err_setup:
+ clk_disable_unprepare(nandc->aon_clk);
+err_aon_clk:
+ clk_disable_unprepare(nandc->core_clk);
+err_core_clk:
+ qcom_nandc_unalloc(nandc);
+
+ return ret;
+}
+
+static int qcom_nandc_remove(struct platform_device *pdev)
+{
+ struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+ struct qcom_nand_host *host;
+
+ list_for_each_entry(host, &nandc->host_list, node)
+ nand_release(nand_to_mtd(&host->chip));
+
+ qcom_nandc_unalloc(nandc);
+
+ clk_disable_unprepare(nandc->aon_clk);
+ clk_disable_unprepare(nandc->core_clk);
+
+ return 0;
+}
+
+#define EBI2_NANDC_ECC_MODES (ECC_RS_4BIT | ECC_BCH_8BIT)
+
+/*
+ * data will hold a struct pointer containing more differences once we support
+ * more controller variants
+ */
+static const struct of_device_id qcom_nandc_of_match[] = {
+ { .compatible = "qcom,ipq806x-nand",
+ .data = (void *)EBI2_NANDC_ECC_MODES,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
+
+static struct platform_driver qcom_nandc_driver = {
+ .driver = {
+ .name = "qcom-nandc",
+ .of_match_table = qcom_nandc_of_match,
+ },
+ .probe = qcom_nandc_probe,
+ .remove = qcom_nandc_remove,
+};
+module_platform_driver(qcom_nandc_driver);
+
+MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 01ac74fa3b95..9c9397b54b2c 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -861,9 +861,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->ecc.mode = NAND_ECC_SOFT;
#endif
- if (set->ecc_layout != NULL)
- chip->ecc.layout = set->ecc_layout;
-
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 51e10a35fe08..1c03eee44f3d 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -60,6 +60,7 @@
#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_REG_PAT_ID 0x00A4
#define NFC_RAM0_BASE 0x0400
#define NFC_RAM1_BASE 0x0800
@@ -538,6 +539,174 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
}
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+ 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+ 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+ 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+ 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+ 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+ 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+ 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+ 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+ 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+ 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+ 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+ 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+ 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+ 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+ 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+ 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+ 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+ 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+ 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+ 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+ 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+ 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+ 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+ 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+ 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+ 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+ 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+ 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+ 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+ 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+ 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+ 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+ 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+ 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+ 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+ 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+ 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+ 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+ 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+ 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+ 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+ 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+ 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+ 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+ 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+ 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+ 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+ 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+ state &= 0x7fff;
+
+ /*
+ * This loop is just a simple implementation of a Fibonacci LFSR using
+ * the x16 + x15 + 1 polynomial.
+ */
+ while (count--)
+ state = ((state >> 1) |
+ (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+ return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+{
+ const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+ int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+ if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+ mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+ if (ecc) {
+ if (mtd->ecc_step_size == 512)
+ seeds = sunxi_nfc_randomizer_ecc512_seeds;
+ else
+ seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+ }
+
+ return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
+ int page, bool ecc)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ u16 state;
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+ writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+{
+ u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+
+ bbm[0] ^= state;
+ bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len,
+ bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(mtd, page, ecc);
+ sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_write_buf(mtd, buf, len);
+ sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+ int len, bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(mtd, page, ecc);
+ sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_read_buf(mtd, buf, len);
+ sunxi_nfc_randomizer_disable(mtd);
+}
+
static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
@@ -574,18 +743,20 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
u8 *data, int data_off,
u8 *oob, int oob_off,
int *cur_off,
- unsigned int *max_bitflips)
+ unsigned int *max_bitflips,
+ bool bbm, int page)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int raw_mode = 0;
u32 status;
int ret;
if (*cur_off != data_off)
nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- sunxi_nfc_read_buf(mtd, NULL, ecc->size);
+ sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
@@ -594,25 +765,54 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (ret)
return ret;
+ sunxi_nfc_randomizer_enable(mtd);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
+ *cur_off = oob_off + ecc->bytes + 4;
+
status = readl(nfc->regs + NFC_REG_ECC_ST);
+ if (status & NFC_ECC_PAT_FOUND(0)) {
+ u8 pattern = 0xff;
+
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
+ pattern = 0x0;
+
+ memset(data, pattern, ecc->size);
+ memset(oob, pattern, ecc->bytes + 4);
+
+ return 1;
+ }
+
ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4);
+ sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
if (status & NFC_ECC_ERR(0)) {
+ /*
+ * Re-read the data with the randomizer disabled to identify
+ * bitflips in erased pages.
+ */
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand->read_buf(mtd, data, ecc->size);
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+ }
+
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
NULL, 0, ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
} else {
/*
* The engine protects 4 bytes of OOB data per chunk.
@@ -620,6 +820,10 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
*/
sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
oob);
+
+ /* De-randomize the Bad Block Marker. */
+ if (bbm && nand->options & NAND_NEED_SCRAMBLING)
+ sunxi_nfc_randomize_bbm(mtd, page, oob);
}
if (ret < 0) {
@@ -629,13 +833,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
}
- *cur_off = oob_off + ecc->bytes + 4;
-
- return 0;
+ return raw_mode;
}
static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
- u8 *oob, int *cur_off)
+ u8 *oob, int *cur_off,
+ bool randomize, int page)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -649,7 +852,11 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
offset + mtd->writesize, -1);
- sunxi_nfc_read_buf(mtd, oob + offset, len);
+ if (!randomize)
+ sunxi_nfc_read_buf(mtd, oob + offset, len);
+ else
+ sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+ false, page);
*cur_off = mtd->oobsize + mtd->writesize;
}
@@ -662,7 +869,8 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
const u8 *data, int data_off,
const u8 *oob, int oob_off,
- int *cur_off)
+ int *cur_off, bool bbm,
+ int page)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
@@ -672,11 +880,20 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (data_off != *cur_off)
nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
- sunxi_nfc_write_buf(mtd, data, ecc->size);
+ sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
/* Fill OOB data in */
- writel(sunxi_nfc_buf_to_user_data(oob),
- nfc->regs + NFC_REG_USER_DATA(0));
+ if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
+ u8 user_data[4];
+
+ memcpy(user_data, oob, 4);
+ sunxi_nfc_randomize_bbm(mtd, page, user_data);
+ writel(sunxi_nfc_buf_to_user_data(user_data),
+ nfc->regs + NFC_REG_USER_DATA(0));
+ } else {
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(0));
+ }
if (data_off + ecc->size != oob_off)
nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
@@ -685,11 +902,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
if (ret)
return ret;
+ sunxi_nfc_randomizer_enable(mtd);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
@@ -699,7 +918,8 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
}
static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
- u8 *oob, int *cur_off)
+ u8 *oob, int *cur_off,
+ int page)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -713,7 +933,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
nand->cmdfunc(mtd, NAND_CMD_RNDIN,
offset + mtd->writesize, -1);
- sunxi_nfc_write_buf(mtd, oob + offset, len);
+ sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
*cur_off = mtd->oobsize + mtd->writesize;
}
@@ -725,6 +945,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
+ bool raw_mode = false;
sunxi_nfc_hw_ecc_enable(mtd);
@@ -736,13 +957,17 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
oob_off + mtd->writesize,
- &cur_off, &max_bitflips);
- if (ret)
+ &cur_off, &max_bitflips,
+ !i, page);
+ if (ret < 0)
return ret;
+ else if (ret)
+ raw_mode = true;
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ !raw_mode, page);
sunxi_nfc_hw_ecc_disable(mtd);
@@ -767,13 +992,14 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
oob_off + mtd->writesize,
- &cur_off);
+ &cur_off, !i, page);
if (ret)
return ret;
}
- if (oob_required)
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ &cur_off, page);
sunxi_nfc_hw_ecc_disable(mtd);
@@ -788,6 +1014,7 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
unsigned int max_bitflips = 0;
int ret, i, cur_off = 0;
+ bool raw_mode = false;
sunxi_nfc_hw_ecc_enable(mtd);
@@ -799,13 +1026,16 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
oob_off, &cur_off,
- &max_bitflips);
- if (ret)
+ &max_bitflips, !i, page);
+ if (ret < 0)
return ret;
+ else if (ret)
+ raw_mode = true;
}
if (oob_required)
- sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ !raw_mode, page);
sunxi_nfc_hw_ecc_disable(mtd);
@@ -829,13 +1059,15 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
- oob, oob_off, &cur_off);
+ oob, oob_off, &cur_off,
+ false, page);
if (ret)
return ret;
}
- if (oob_required)
- sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ &cur_off, page);
sunxi_nfc_hw_ecc_disable(mtd);
@@ -1345,6 +1577,9 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
if (nand->bbt_options & NAND_BBT_USE_FLASH)
nand->bbt_options |= NAND_BBT_NO_OOB;
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
ret = sunxi_nand_chip_init_timings(chip, np);
if (ret) {
dev_err(dev, "could not configure chip timings: %d\n", ret);
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 034420f313d5..293feb19b0b1 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -795,8 +795,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
goto error;
}
- /* propagate ecc.layout to mtd_info */
- mtd->ecclayout = chip->ecc.layout;
chip->ecc.read_page = vf610_nfc_read_page;
chip->ecc.write_page = vf610_nfc_write_page;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 0aacf125938b..24a1388d3031 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -35,10 +35,10 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
#include <asm/mach/flash.h>
#include <linux/platform_data/mtd-onenand-omap2.h>
-#include <asm/gpio.h>
#include <linux/omap-dma.h>
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 43b3392ffee7..af28bb3ae7cf 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1124,11 +1124,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
(int)len);
- if (ops->mode == MTD_OPS_AUTO_OOB)
- oobsize = this->ecclayout->oobavail;
- else
- oobsize = mtd->oobsize;
-
+ oobsize = mtd_oobavail(mtd, ops);
oobcolumn = from & (mtd->oobsize - 1);
/* Do not allow reads past end of device */
@@ -1229,11 +1225,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
(int)len);
- if (ops->mode == MTD_OPS_AUTO_OOB)
- oobsize = this->ecclayout->oobavail;
- else
- oobsize = mtd->oobsize;
-
+ oobsize = mtd_oobavail(mtd, ops);
oobcolumn = from & (mtd->oobsize - 1);
/* Do not allow reads past end of device */
@@ -1365,7 +1357,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
ops->oobretlen = 0;
if (mode == MTD_OPS_AUTO_OOB)
- oobsize = this->ecclayout->oobavail;
+ oobsize = mtd->oobavail;
else
oobsize = mtd->oobsize;
@@ -1885,12 +1877,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
/* Check zero length */
if (!len)
return 0;
-
- if (ops->mode == MTD_OPS_AUTO_OOB)
- oobsize = this->ecclayout->oobavail;
- else
- oobsize = mtd->oobsize;
-
+ oobsize = mtd_oobavail(mtd, ops);
oobcolumn = to & (mtd->oobsize - 1);
column = to & (mtd->writesize - 1);
@@ -2063,7 +2050,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
ops->oobretlen = 0;
if (mode == MTD_OPS_AUTO_OOB)
- oobsize = this->ecclayout->oobavail;
+ oobsize = mtd->oobavail;
else
oobsize = mtd->oobsize;
@@ -2599,6 +2586,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2610,7 +2598,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = mtd_block_markbad(mtd, ofs);
+ ret = this->block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
@@ -4049,12 +4037,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
* The number of bytes available for a client to place data into
* the out of band area
*/
- this->ecclayout->oobavail = 0;
+ mtd->oobavail = 0;
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
this->ecclayout->oobfree[i].length; i++)
- this->ecclayout->oobavail +=
- this->ecclayout->oobfree[i].length;
- mtd->oobavail = this->ecclayout->oobavail;
+ mtd->oobavail += this->ecclayout->oobfree[i].length;
mtd->ecclayout = this->ecclayout;
mtd->ecc_strength = 1;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 08d0085f3e93..680188a88130 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -179,7 +179,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
* by the onenand_release function.
*
*/
-int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
@@ -247,6 +247,3 @@ int onenand_default_bbt(struct mtd_info *mtd)
return onenand_scan_bbt(mtd, bbm->badblock_pattern);
}
-
-EXPORT_SYMBOL(onenand_scan_bbt);
-EXPORT_SYMBOL(onenand_default_bbt);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 0dc927540b3d..d42c98e1f581 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -9,6 +9,7 @@ if MTD_SPI_NOR
config MTD_MT81xx_NOR
tristate "Mediatek MT81xx SPI NOR flash controller"
+ depends on HAS_IOMEM
help
This enables access to SPI NOR flash, using MT81xx SPI NOR flash
controller. This controller does not support generic SPI BUS, it only
@@ -30,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
- depends on ARCH_MXC || COMPILE_TEST
+ depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
depends on HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 54640f1eb3a1..9ab2b51d54b8 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -213,6 +213,7 @@ enum fsl_qspi_devtype {
FSL_QUADSPI_IMX6SX,
FSL_QUADSPI_IMX7D,
FSL_QUADSPI_IMX6UL,
+ FSL_QUADSPI_LS1021A,
};
struct fsl_qspi_devtype_data {
@@ -258,6 +259,14 @@ static struct fsl_qspi_devtype_data imx6ul_data = {
| QUADSPI_QUIRK_4X_INT_CLK,
};
+static struct fsl_qspi_devtype_data ls1021a_data = {
+ .devtype = FSL_QUADSPI_LS1021A,
+ .rxfifo = 128,
+ .txfifo = 64,
+ .ahb_buf_size = 1024,
+ .driver_data = 0,
+};
+
#define FSL_QSPI_MAX_CHIP 4
struct fsl_qspi {
struct spi_nor nor[FSL_QSPI_MAX_CHIP];
@@ -275,6 +284,7 @@ struct fsl_qspi {
u32 clk_rate;
unsigned int chip_base_addr; /* We may support two chips. */
bool has_second_chip;
+ bool big_endian;
struct mutex lock;
struct pm_qos_request pm_qos_req;
};
@@ -300,6 +310,28 @@ static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
}
/*
+ * R/W functions for big- or little-endian registers:
+ * The qSPI controller's endian is independent of the CPU core's endian.
+ * So far, although the CPU core is little-endian but the qSPI have two
+ * versions for big-endian and little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+/*
* An IC bug makes us to re-arrange the 32-bit data.
* The following chips, such as IMX6SLX, have fixed this bug.
*/
@@ -310,14 +342,14 @@ static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
{
- writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
}
static inline void fsl_qspi_lock_lut(struct fsl_qspi *q)
{
- writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
- writel(QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
}
static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
@@ -326,8 +358,8 @@ static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
u32 reg;
/* clear interrupt */
- reg = readl(q->iobase + QUADSPI_FR);
- writel(reg, q->iobase + QUADSPI_FR);
+ reg = qspi_readl(q, q->iobase + QUADSPI_FR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_FR);
if (reg & QUADSPI_FR_TFF_MASK)
complete(&q->c);
@@ -348,7 +380,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
/* Clear all the LUT table */
for (i = 0; i < QUADSPI_LUT_NUM; i++)
- writel(0, base + QUADSPI_LUT_BASE + i * 4);
+ qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4);
/* Quad Read */
lut_base = SEQID_QUAD_READ * 4;
@@ -364,14 +396,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
dummy = 8;
}
- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
+ qspi_writel(q, LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
base + QUADSPI_LUT(lut_base + 1));
/* Write enable */
lut_base = SEQID_WREN * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_WREN), base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN),
+ base + QUADSPI_LUT(lut_base));
/* Page Program */
lut_base = SEQID_PP * 4;
@@ -385,13 +418,15 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
addrlen = ADDR32BIT;
}
- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
- writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+ qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0),
+ base + QUADSPI_LUT(lut_base + 1));
/* Read Status */
lut_base = SEQID_RDSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1),
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) |
+ LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Erase a sector */
@@ -400,40 +435,46 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
cmd = q->nor[0].erase_opcode;
addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
- writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ qspi_writel(q, LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
base + QUADSPI_LUT(lut_base));
/* Erase the whole chip */
lut_base = SEQID_CHIP_ERASE * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE),
base + QUADSPI_LUT(lut_base));
/* READ ID */
lut_base = SEQID_RDID * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8),
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) |
+ LUT1(FSL_READ, PAD1, 0x8),
base + QUADSPI_LUT(lut_base));
/* Write Register */
lut_base = SEQID_WRSR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2),
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) |
+ LUT1(FSL_WRITE, PAD1, 0x2),
base + QUADSPI_LUT(lut_base));
/* Read Configuration Register */
lut_base = SEQID_RDCR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1),
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) |
+ LUT1(FSL_READ, PAD1, 0x1),
base + QUADSPI_LUT(lut_base));
/* Write disable */
lut_base = SEQID_WRDI * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_WRDI), base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI),
+ base + QUADSPI_LUT(lut_base));
/* Enter 4 Byte Mode (Micron) */
lut_base = SEQID_EN4B * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_EN4B), base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B),
+ base + QUADSPI_LUT(lut_base));
/* Enter 4 Byte Mode (Spansion) */
lut_base = SEQID_BRWR * 4;
- writel(LUT0(CMD, PAD1, SPINOR_OP_BRWR), base + QUADSPI_LUT(lut_base));
+ qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR),
+ base + QUADSPI_LUT(lut_base));
fsl_qspi_lock_lut(q);
}
@@ -488,15 +529,16 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
q->chip_base_addr, addr, len, cmd);
/* save the reg */
- reg = readl(base + QUADSPI_MCR);
+ reg = qspi_readl(q, base + QUADSPI_MCR);
- writel(q->memmap_phy + q->chip_base_addr + addr, base + QUADSPI_SFAR);
- writel(QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
+ qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr,
+ base + QUADSPI_SFAR);
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS,
base + QUADSPI_RBCT);
- writel(reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
+ qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR);
do {
- reg2 = readl(base + QUADSPI_SR);
+ reg2 = qspi_readl(q, base + QUADSPI_SR);
if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) {
udelay(1);
dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2);
@@ -507,21 +549,22 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
/* trigger the LUT now */
seqid = fsl_qspi_get_seqid(q, cmd);
- writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
+ qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
+ base + QUADSPI_IPCR);
/* Wait for the interrupt. */
if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
dev_err(q->dev,
"cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
- cmd, addr, readl(base + QUADSPI_FR),
- readl(base + QUADSPI_SR));
+ cmd, addr, qspi_readl(q, base + QUADSPI_FR),
+ qspi_readl(q, base + QUADSPI_SR));
err = -ETIMEDOUT;
} else {
err = 0;
}
/* restore the MCR */
- writel(reg, base + QUADSPI_MCR);
+ qspi_writel(q, reg, base + QUADSPI_MCR);
return err;
}
@@ -533,7 +576,7 @@ static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf)
int i = 0;
while (len > 0) {
- tmp = readl(q->iobase + QUADSPI_RBDR + i * 4);
+ tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4);
tmp = fsl_qspi_endian_xchg(q, tmp);
dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n",
q->chip_base_addr, tmp);
@@ -561,9 +604,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
{
u32 reg;
- reg = readl(q->iobase + QUADSPI_MCR);
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
- writel(reg, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
/*
* The minimum delay : 1 AHB + 2 SFCK clocks.
@@ -572,7 +615,7 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q)
udelay(1);
reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
- writel(reg, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
@@ -586,20 +629,20 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor,
q->chip_base_addr, to, count);
/* clear the TX FIFO. */
- tmp = readl(q->iobase + QUADSPI_MCR);
- writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
+ tmp = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
/* fill the TX data to the FIFO */
for (j = 0, i = ((count + 3) / 4); j < i; j++) {
tmp = fsl_qspi_endian_xchg(q, *txbuf);
- writel(tmp, q->iobase + QUADSPI_TBDR);
+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
txbuf++;
}
/* fill the TXFIFO upto 16 bytes for i.MX7d */
if (needs_fill_txfifo(q))
for (; i < 4; i++)
- writel(tmp, q->iobase + QUADSPI_TBDR);
+ qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR);
/* Trigger it */
ret = fsl_qspi_runcmd(q, opcode, to, count);
@@ -615,10 +658,10 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
int nor_size = q->nor_size;
void __iomem *base = q->iobase;
- writel(nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
- writel(nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
- writel(nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
- writel(nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
+ qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD);
+ qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD);
+ qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD);
+ qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD);
}
/*
@@ -640,24 +683,26 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q)
int seqid;
/* AHB configuration for access buffer 0/1/2 .*/
- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
- writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR);
+ qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR);
/*
* Set ADATSZ with the maximum AHB buffer size to improve the
* read performance.
*/
- writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8)
- << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR);
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ ((q->devtype_data->ahb_buf_size / 8)
+ << QUADSPI_BUF3CR_ADATSZ_SHIFT),
+ base + QUADSPI_BUF3CR);
/* We only use the buffer3 */
- writel(0, base + QUADSPI_BUF0IND);
- writel(0, base + QUADSPI_BUF1IND);
- writel(0, base + QUADSPI_BUF2IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
/* Set the default lut sequence for AHB Read. */
seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
- writel(seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
+ qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
q->iobase + QUADSPI_BFGENCR);
}
@@ -713,7 +758,7 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
return ret;
/* Reset the module */
- writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
base + QUADSPI_MCR);
udelay(1);
@@ -721,24 +766,24 @@ static int fsl_qspi_nor_setup(struct fsl_qspi *q)
fsl_qspi_init_lut(q);
/* Disable the module */
- writel(QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
base + QUADSPI_MCR);
- reg = readl(base + QUADSPI_SMPR);
- writel(reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
| QUADSPI_SMPR_FSPHS_MASK
| QUADSPI_SMPR_HSENA_MASK
| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
/* Enable the module */
- writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
/* clear all interrupt status */
- writel(0xffffffff, q->iobase + QUADSPI_FR);
+ qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
/* enable the interrupt */
- writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
+ qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
return 0;
}
@@ -776,6 +821,7 @@ static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
{ .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
{ .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -954,6 +1000,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
if (IS_ERR(q->iobase))
return PTR_ERR(q->iobase);
+ q->big_endian = of_property_read_bool(np, "big-endian");
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
if (!devm_request_mem_region(dev, res->start, resource_size(res),
@@ -1101,8 +1148,8 @@ static int fsl_qspi_remove(struct platform_device *pdev)
}
/* disable the hardware */
- writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
- writel(0x0, q->iobase + QUADSPI_RSER);
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
+ qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
mutex_destroy(&q->lock);
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index d5f850d035bb..8bed1a4cb79c 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -371,8 +371,8 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
return ret;
}
-static int __init mtk_nor_init(struct mt8173_nor *mt8173_nor,
- struct device_node *flash_node)
+static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+ struct device_node *flash_node)
{
int ret;
struct spi_nor *nor;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index ed0c19c558b5..157841dc3e99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -61,14 +61,20 @@ struct flash_info {
u16 addr_width;
u16 flags;
-#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
-#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
-#define SST_WRITE 0x04 /* use SST byte programming */
-#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
-#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
-#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
-#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
-#define USE_FSR 0x80 /* use flag status register */
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
};
#define JEDEC_MFR(info) ((info)->id[0])
@@ -434,32 +440,58 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
} else {
pow = ((sr & mask) ^ mask) >> shift;
*len = mtd->size >> pow;
- *ofs = mtd->size - *len;
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
}
}
/*
- * Return 1 if the entire region is locked, 0 otherwise
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
*/
-static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
- u8 sr)
+static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr, bool locked)
{
loff_t lock_offs;
uint64_t lock_len;
+ if (!len)
+ return 1;
+
stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
- return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, false);
}
/*
* Lock a region of the flash. Compatible with ST Micro and similar flash.
- * Supports only the block protection bits BP{0,1,2} in the status register
+ * Supports the block protection bits BP{0,1,2} in the status register
* (SR). Does not support these features found in newer SR bitfields:
- * - TB: top/bottom protect - only handle TB=0 (top protect)
* - SEC: sector/block protect - only handle SEC=0 (block protect)
* - CMP: complement protect - only support CMP=0 (range is not complemented)
*
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
* Sample table portion for 8MB flash (Winbond w25q64fw):
*
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
@@ -472,6 +504,13 @@ static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
* X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
*
* Returns negative on errors, 0 on success.
*/
@@ -481,20 +520,39 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
int ret;
status_old = read_sr(nor);
if (status_old < 0)
return status_old;
- /* SPI NOR always locks to the end */
- if (ofs + len != mtd->size) {
- /* Does combined region extend to end? */
- if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
- status_old))
- return -EINVAL;
- len = mtd->size - ofs;
- }
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (stm_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!stm_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
/*
* Need smallest pow such that:
@@ -505,7 +563,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
*/
- pow = ilog2(mtd->size) - ilog2(len);
+ pow = ilog2(mtd->size) - ilog2(lock_len);
val = mask - (pow << shift);
if (val & ~mask)
return -EINVAL;
@@ -513,10 +571,20 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if (!(val & mask))
return -EINVAL;
- status_new = (status_old & ~mask) | val;
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
/* Only modify protection if it will not unlock other areas */
- if ((status_new & mask) <= (status_old & mask))
+ if ((status_new & mask) < (status_old & mask))
return -EINVAL;
write_enable(nor);
@@ -537,17 +605,40 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
int ret;
status_old = read_sr(nor);
if (status_old < 0)
return status_old;
- /* Cannot unlock; would unlock larger region than requested */
- if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
- status_old))
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (stm_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
return -EINVAL;
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
/*
* Need largest pow such that:
*
@@ -557,8 +648,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
*
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
*/
- pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
- if (ofs + len == mtd->size) {
+ pow = ilog2(mtd->size) - order_base_2(lock_len);
+ if (lock_len == 0) {
val = 0; /* fully unlocked */
} else {
val = mask - (pow << shift);
@@ -567,10 +658,21 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
return -EINVAL;
}
- status_new = (status_old & ~mask) | val;
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == mtd->size)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
/* Only modify protection if it will not lock other areas */
- if ((status_new & mask) >= (status_old & mask))
+ if ((status_new & mask) > (status_old & mask))
return -EINVAL;
write_enable(nor);
@@ -762,8 +864,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
- { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
- { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
+ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
@@ -797,6 +899,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
@@ -860,11 +963,23 @@ static const struct flash_info spi_nor_ids[] = {
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
- { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ {
+ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
- { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
- { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ {
+ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -1100,45 +1215,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
return 0;
}
-static int micron_quad_enable(struct spi_nor *nor)
-{
- int ret;
- u8 val;
-
- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading EVCR\n", ret);
- return ret;
- }
-
- write_enable(nor);
-
- /* set EVCR, enable quad I/O */
- nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
- ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing EVCR register\n");
- return ret;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- /* read EVCR and check it */
- ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error %d reading EVCR\n", ret);
- return ret;
- }
- if (val & EVCR_QUAD_EN_MICRON) {
- dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
{
int status;
@@ -1152,12 +1228,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
}
return status;
case SNOR_MFR_MICRON:
- status = micron_quad_enable(nor);
- if (status) {
- dev_err(nor->dev, "Micron quad-read not enabled\n");
- return -EINVAL;
- }
- return status;
+ return 0;
default:
status = spansion_quad_enable(nor);
if (status) {
@@ -1233,9 +1304,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST) {
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
write_enable(nor);
write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
}
if (!mtd->name)
@@ -1249,7 +1322,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_read = spi_nor_read;
/* NOR protection support for STmicro/Micron chips and similar */
- if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+ info->flags & SPI_NOR_HAS_LOCK) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
nor->flash_is_locked = stm_is_locked;
@@ -1269,6 +1343,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (info->flags & USE_FSR)
nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB)
+ nor->flags |= SNOR_F_HAS_SR_TB;
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
/* prefer "small sector" erase if possible */
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index 79316159eec6..88b6c81cebbe 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -187,7 +187,7 @@ static int double_bit_error_detect(void *error_data, void *error_ecc,
__nand_calculate_ecc(error_data, size, calc_ecc);
ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
- return (ret == -1) ? 0 : -EINVAL;
+ return (ret == -EBADMSG) ? 0 : -EINVAL;
}
static const struct nand_ecc_test nand_ecc_test[] = {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 31762120eb56..1cb3f7758fb6 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum)
pr_info("ignoring error as within bitflip_limit\n");
}
- if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
+ if (use_offset != 0 || use_len < mtd->oobavail) {
int k;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
- if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
+ if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
errcnt += 1;
@@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum)
/* verify post-(use_offset + use_len) area for 0xff */
k = use_offset + use_len;
bitflips += memffshow(addr, k, readbuf + k,
- mtd->ecclayout->oobavail - k);
+ mtd->oobavail - k);
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
@@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(int ebnum)
struct mtd_oob_ops ops;
int err = 0;
loff_t addr = (loff_t)ebnum * mtd->erasesize;
- size_t len = mtd->ecclayout->oobavail * pgcnt;
- size_t oobavail = mtd->ecclayout->oobavail;
+ size_t len = mtd->oobavail * pgcnt;
+ size_t oobavail = mtd->oobavail;
size_t bitflips;
int i;
@@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void)
goto out;
use_offset = 0;
- use_len = mtd->ecclayout->oobavail;
- use_len_max = mtd->ecclayout->oobavail;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
vary_offset = 0;
/* First test: write all OOB, read it back and verify */
@@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void)
/* Write all eraseblocks */
use_offset = 0;
- use_len = mtd->ecclayout->oobavail;
- use_len_max = mtd->ecclayout->oobavail;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
@@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void)
/* Check all eraseblocks */
use_offset = 0;
- use_len = mtd->ecclayout->oobavail;
- use_len_max = mtd->ecclayout->oobavail;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
vary_offset = 1;
prandom_seed_state(&rnd_state, 5);
err = verify_all_eraseblocks();
@@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void)
goto out;
use_offset = 0;
- use_len = mtd->ecclayout->oobavail;
- use_len_max = mtd->ecclayout->oobavail;
+ use_len = mtd->oobavail;
+ use_len_max = mtd->oobavail;
vary_offset = 0;
/* Fourth test: try to write off end of device */
@@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void)
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
- ops.ooboffs = mtd->ecclayout->oobavail;
+ ops.ooboffs = mtd->oobavail;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
pr_info("attempting to start write past end of OOB\n");
@@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void)
ops.retlen = 0;
ops.ooblen = 1;
ops.oobretlen = 0;
- ops.ooboffs = mtd->ecclayout->oobavail;
+ ops.ooboffs = mtd->oobavail;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
pr_info("attempting to start read past end of OOB\n");
@@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void)
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail + 1;
+ ops.ooblen = mtd->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
@@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void)
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail + 1;
+ ops.ooblen = mtd->oobavail + 1;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
@@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void)
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
@@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void)
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail;
+ ops.ooblen = mtd->oobavail;
ops.oobretlen = 0;
ops.ooboffs = 1;
ops.datbuf = NULL;
@@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void)
for (i = 0; i < ebcnt - 1; ++i) {
int cnt = 2;
int pg;
- size_t sz = mtd->ecclayout->oobavail;
+ size_t sz = mtd->oobavail;
if (bbt[i] || bbt[i + 1])
continue;
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
@@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void)
for (i = 0; i < ebcnt - 1; ++i) {
if (bbt[i] || bbt[i + 1])
continue;
- prandom_bytes_state(&rnd_state, writebuf,
- mtd->ecclayout->oobavail * 2);
+ prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = 0;
ops.retlen = 0;
- ops.ooblen = mtd->ecclayout->oobavail * 2;
+ ops.ooblen = mtd->oobavail * 2;
ops.oobretlen = 0;
ops.ooboffs = 0;
ops.datbuf = NULL;
@@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void)
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
- mtd->ecclayout->oobavail * 2)) {
+ mtd->oobavail * 2)) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
errcnt += 1;
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 2a45ac210b16..989036c681b8 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -153,3 +153,52 @@ int ubi_check_pattern(const void *buf, uint8_t patt, int size)
return 0;
return 1;
}
+
+/* Normal UBI messages */
+void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
+
+ va_end(args);
+}
+
+/* UBI warning messages */
+void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n",
+ ubi->ubi_num, __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+
+/* UBI error messages */
+void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err(UBI_NAME_STR "%d error: %ps: %pV\n",
+ ubi->ubi_num, __builtin_return_address(0), &vaf);
+ va_end(args);
+}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 2974b67f6c6c..dadc6a9d5755 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -49,15 +49,19 @@
/* UBI name used for character devices, sysfs, etc */
#define UBI_NAME_STR "ubi"
+struct ubi_device;
+
/* Normal UBI messages */
-#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
- ubi->ubi_num, ##__VA_ARGS__)
+__printf(2, 3)
+void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...);
+
/* UBI warning messages */
-#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
- ubi->ubi_num, __func__, ##__VA_ARGS__)
+__printf(2, 3)
+void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...);
+
/* UBI error messages */
-#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
- ubi->ubi_num, __func__, ##__VA_ARGS__)
+__printf(2, 3)
+void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f184fb5bd110..a24c18eee598 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -193,6 +193,14 @@ config GENEVE
To compile this driver as a module, choose M here: the module
will be called geneve.
+config MACSEC
+ tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
+ ---help---
+ MACsec is an encryption standard for Ethernet.
+
config NETCONSOLE
tristate "Network console logging support"
---help---
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 900b0c5320bb..1aa7cb845663 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
+obj-$(CONFIG_MACSEC) += macsec.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_MII) += mii.o
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 4cbb8b27a891..b9304a295f86 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -357,6 +357,14 @@ static u8 __get_duplex(struct port *port)
return retval;
}
+static void __ad_actor_update_port(struct port *port)
+{
+ const struct bonding *bond = bond_get_bond_by_slave(port->slave);
+
+ port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+ port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+}
+
/* Conversions */
/**
@@ -1963,9 +1971,7 @@ void bond_3ad_bind_slave(struct slave *slave)
port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
ad_update_actor_keys(port, false);
/* actor system is the bond's system */
- port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
- port->actor_system_priority =
- BOND_AD_INFO(bond).system.sys_priority;
+ __ad_actor_update_port(port);
/* tx timer(to verify that no more than MAX_TX_IN_SECOND
* lacpdu's are sent in one second)
*/
@@ -2148,6 +2154,38 @@ out:
}
/**
+ * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports
+ * @bond: bonding struct to work on
+ *
+ * If an ad_actor setting gets changed we need to update the individual port
+ * settings so the bond device will use the new values when it gets upped.
+ */
+void bond_3ad_update_ad_actor_settings(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *slave;
+
+ ASSERT_RTNL();
+
+ BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
+
+ spin_lock_bh(&bond->mode_lock);
+ bond_for_each_slave(bond, slave, iter) {
+ struct port *port = &(SLAVE_AD_INFO(slave))->port;
+
+ __ad_actor_update_port(port);
+ port->ntt = true;
+ }
+ spin_unlock_bh(&bond->mode_lock);
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @bond: bonding struct to work on
*
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index bb9e9fc45e1b..c5ac160a8ae9 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -159,7 +159,7 @@ static int tlb_initialize(struct bonding *bond)
new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl)
- return -1;
+ return -ENOMEM;
spin_lock_bh(&bond->mode_lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b7f1a9919033..941ec99cd3b6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -376,22 +376,20 @@ down:
static void bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
- struct ethtool_cmd ecmd;
- u32 slave_speed;
+ struct ethtool_link_ksettings ecmd;
int res;
slave->speed = SPEED_UNKNOWN;
slave->duplex = DUPLEX_UNKNOWN;
- res = __ethtool_get_settings(slave_dev, &ecmd);
+ res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
if (res < 0)
return;
- slave_speed = ethtool_cmd_speed(&ecmd);
- if (slave_speed == 0 || slave_speed == ((__u32) -1))
+ if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
return;
- switch (ecmd.duplex) {
+ switch (ecmd.base.duplex) {
case DUPLEX_FULL:
case DUPLEX_HALF:
break;
@@ -399,8 +397,8 @@ static void bond_update_speed_duplex(struct slave *slave)
return;
}
- slave->speed = slave_speed;
- slave->duplex = ecmd.duplex;
+ slave->speed = ecmd.base.speed;
+ slave->duplex = ecmd.base.duplex;
return;
}
@@ -620,8 +618,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
static void bond_set_dev_addr(struct net_device *bond_dev,
struct net_device *slave_dev)
{
- netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
- bond_dev, slave_dev, slave_dev->addr_len);
+ netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
+ bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
@@ -930,11 +928,10 @@ void bond_select_active_slave(struct bonding *bond)
if (!rv)
return;
- if (netif_carrier_ok(bond->dev)) {
+ if (netif_carrier_ok(bond->dev))
netdev_info(bond->dev, "first active interface up!\n");
- } else {
+ else
netdev_info(bond->dev, "now running without any active interface!\n");
- }
}
}
@@ -1180,9 +1177,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
- }
skb->dev = bond->dev;
@@ -1243,7 +1239,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
{
struct slave *slave = NULL;
- slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave)
return NULL;
@@ -1383,8 +1379,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->flags & IFF_UP) {
netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
- res = -EPERM;
- goto err_undo_flags;
+ return -EPERM;
}
/* set bonding device ether type by slave - bonding netdevices are
@@ -1404,8 +1399,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = notifier_to_errno(res);
if (res) {
netdev_err(bond_dev, "refused to change device type\n");
- res = -EBUSY;
- goto err_undo_flags;
+ return -EBUSY;
}
/* Flush unicast and multicast addresses */
@@ -1425,8 +1419,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
} else if (bond_dev->type != slave_dev->type) {
netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
slave_dev->name, slave_dev->type, bond_dev->type);
- res = -EINVAL;
- goto err_undo_flags;
+ return -EINVAL;
}
if (slave_ops->ndo_set_mac_address == NULL) {
@@ -3308,6 +3301,30 @@ static int bond_close(struct net_device *bond_dev)
return 0;
}
+/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
+ * that some drivers can provide 32bit values only.
+ */
+static void bond_fold_stats(struct rtnl_link_stats64 *_res,
+ const struct rtnl_link_stats64 *_new,
+ const struct rtnl_link_stats64 *_old)
+{
+ const u64 *new = (const u64 *)_new;
+ const u64 *old = (const u64 *)_old;
+ u64 *res = (u64 *)_res;
+ int i;
+
+ for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
+ u64 nv = new[i];
+ u64 ov = old[i];
+
+ /* detects if this particular field is 32bit only */
+ if (((nv | ov) >> 32) == 0)
+ res[i] += (u32)nv - (u32)ov;
+ else
+ res[i] += nv - ov;
+ }
+}
+
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3316,43 +3333,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct list_head *iter;
struct slave *slave;
+ spin_lock(&bond->stats_lock);
memcpy(stats, &bond->bond_stats, sizeof(*stats));
- bond_for_each_slave(bond, slave, iter) {
- const struct rtnl_link_stats64 *sstats =
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
- struct rtnl_link_stats64 *pstats = &slave->slave_stats;
-
- stats->rx_packets += sstats->rx_packets - pstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
-
- stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
- stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
-
- stats->multicast += sstats->multicast - pstats->multicast;
- stats->collisions += sstats->collisions - pstats->collisions;
-
- stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
-
- stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
+
+ bond_fold_stats(stats, new, &slave->slave_stats);
/* save off the slave stats for the next run */
- memcpy(pstats, sstats, sizeof(*sstats));
+ memcpy(&slave->slave_stats, new, sizeof(*new));
}
+ rcu_read_unlock();
+
memcpy(&bond->bond_stats, stats, sizeof(*stats));
+ spin_unlock(&bond->stats_lock);
return stats;
}
@@ -4166,6 +4163,7 @@ void bond_setup(struct net_device *bond_dev)
struct bonding *bond = netdev_priv(bond_dev);
spin_lock_init(&bond->mode_lock);
+ spin_lock_init(&bond->stats_lock);
bond->params = bonding_defaults;
/* Initialize pointers */
@@ -4181,7 +4179,7 @@ void bond_setup(struct net_device *bond_dev)
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
/* Initialize the device options */
- bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
+ bond_dev->flags |= IFF_MASTER;
bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 55e93b6b6d21..577e57cad1dc 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -402,7 +402,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_AD_ACTOR_SYS_PRIO,
.name = "ad_actor_sys_prio",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
- .flags = BOND_OPTFLAG_IFDOWN,
.values = bond_ad_actor_sys_prio_tbl,
.set = bond_option_ad_actor_sys_prio_set,
},
@@ -410,7 +409,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_AD_ACTOR_SYSTEM,
.name = "ad_actor_system",
.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
- .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN,
+ .flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_ad_actor_system_set,
},
[BOND_OPT_AD_USER_PORT_KEY] = {
@@ -1392,6 +1391,8 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
newval->value);
bond->params.ad_actor_sys_prio = newval->value;
+ bond_3ad_update_ad_actor_settings(bond);
+
return 0;
}
@@ -1418,6 +1419,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
ether_addr_copy(bond->params.ad_actor_system, mac);
+ bond_3ad_update_ad_actor_settings(bond);
+
return 0;
err:
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 6d04183ed955..0d40aef928e2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -70,13 +70,6 @@ config CAN_AT91
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
-config CAN_TI_HECC
- depends on ARM
- tristate "TI High End CAN Controller"
- ---help---
- Driver for TI HECC (High End CAN Controller) module found on many
- TI devices. The device specifications are available from www.ti.com
-
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
@@ -86,30 +79,12 @@ config CAN_BFIN
To compile this driver as a module, choose M here: the
module will be called bfin_can.
-config CAN_JANZ_ICAN3
- tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
- depends on MFD_JANZ_CMODIO
- ---help---
- Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
- connects to a MODULbus carrier board.
-
- This driver can also be built as a module. If so, the module will be
- called janz-ican3.ko.
-
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on ARM || PPC
---help---
Say Y here if you want to support for Freescale FlexCAN.
-config PCH_CAN
- tristate "Intel EG20T PCH CAN controller"
- depends on PCI && (X86_32 || COMPILE_TEST)
- ---help---
- This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
- is an IOH for x86 embedded processor (Intel Atom E6xx series).
- This driver can access CAN bus.
-
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
depends on OF && HAS_DMA
@@ -119,9 +94,19 @@ config CAN_GRCAN
endian syntheses of the cores would need some modifications on
the hardware level to work.
+config CAN_JANZ_ICAN3
+ tristate "Janz VMOD-ICAN3 Intelligent CAN controller"
+ depends on MFD_JANZ_CMODIO
+ ---help---
+ Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which
+ connects to a MODULbus carrier board.
+
+ This driver can also be built as a module. If so, the module will be
+ called janz-ican3.ko.
+
config CAN_RCAR
tristate "Renesas R-Car CAN controller"
- depends on ARM
+ depends on ARCH_RENESAS || ARM
---help---
Say Y here if you want to use CAN controller found on Renesas R-Car
SoCs.
@@ -139,6 +124,13 @@ config CAN_SUN4I
To compile this driver as a module, choose M here: the module will
be called sun4i_can.
+config CAN_TI_HECC
+ depends on ARM
+ tristate "TI High End CAN Controller"
+ ---help---
+ Driver for TI HECC (High End CAN Controller) module found on many
+ TI devices. The device specifications are available from www.ti.com
+
config CAN_XILINXCAN
tristate "Xilinx CAN"
depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST
@@ -147,22 +139,24 @@ config CAN_XILINXCAN
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
Zynq CANPS IP.
-source "drivers/net/can/mscan/Kconfig"
-
-source "drivers/net/can/sja1000/Kconfig"
+config PCH_CAN
+ tristate "Intel EG20T PCH CAN controller"
+ depends on PCI && (X86_32 || COMPILE_TEST)
+ ---help---
+ This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
+ is an IOH for x86 embedded processor (Intel Atom E6xx series).
+ This driver can access CAN bus.
source "drivers/net/can/c_can/Kconfig"
-
-source "drivers/net/can/m_can/Kconfig"
-
source "drivers/net/can/cc770/Kconfig"
-
+source "drivers/net/can/ifi_canfd/Kconfig"
+source "drivers/net/can/m_can/Kconfig"
+source "drivers/net/can/mscan/Kconfig"
+source "drivers/net/can/sja1000/Kconfig"
+source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
-
source "drivers/net/can/usb/Kconfig"
-source "drivers/net/can/softing/Kconfig"
-
endif
config CAN_DEBUG_DEVICES
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 1f21cef1d458..e3db0c807f55 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,21 +14,22 @@ obj-y += spi/
obj-y += usb/
obj-y += softing/
-obj-$(CONFIG_CAN_SJA1000) += sja1000/
-obj-$(CONFIG_CAN_MSCAN) += mscan/
-obj-$(CONFIG_CAN_C_CAN) += c_can/
-obj-$(CONFIG_CAN_M_CAN) += m_can/
-obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
-obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
-obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
+obj-$(CONFIG_CAN_CC770) += cc770/
+obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
-obj-$(CONFIG_PCH_CAN) += pch_can.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
+obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
+obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
+obj-$(CONFIG_CAN_MSCAN) += mscan/
+obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_RCAR) += rcar_can.o
+obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
+obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
+obj-$(CONFIG_PCH_CAN) += pch_can.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/ifi_canfd/Kconfig b/drivers/net/can/ifi_canfd/Kconfig
new file mode 100644
index 000000000000..9e8934ff63a7
--- /dev/null
+++ b/drivers/net/can/ifi_canfd/Kconfig
@@ -0,0 +1,8 @@
+config CAN_IFI_CANFD
+ depends on HAS_IOMEM
+ tristate "IFI CAN_FD IP"
+ ---help---
+ This driver adds support for the I/F/I CAN_FD soft IP block
+ connected to the "platform bus" (Linux abstraction for directly
+ to the processor attached devices). The CAN_FD is most often
+ synthesised into an FPGA or CPLD.
diff --git a/drivers/net/can/ifi_canfd/Makefile b/drivers/net/can/ifi_canfd/Makefile
new file mode 100644
index 000000000000..b229960cdf39
--- /dev/null
+++ b/drivers/net/can/ifi_canfd/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the IFI CANFD controller driver.
+#
+
+obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd.o
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
new file mode 100644
index 000000000000..a1bd54ffd31e
--- /dev/null
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -0,0 +1,944 @@
+/*
+ * CAN bus driver for IFI CANFD controller
+ *
+ * Copyright (C) 2016 Marek Vasut <marex@denx.de>
+ *
+ * Details about this controller can be found at
+ * http://www.ifi-pld.de/IP/CANFD/canfd.html
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <linux/can/dev.h>
+
+#define IFI_CANFD_STCMD 0x0
+#define IFI_CANFD_STCMD_HARDRESET 0xDEADCAFD
+#define IFI_CANFD_STCMD_ENABLE BIT(0)
+#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
+#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
+#define IFI_CANFD_STCMD_BUSOFF BIT(4)
+#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
+#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
+#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
+#define IFI_CANFD_STCMD_ENABLE_ISO BIT(25)
+#define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31))
+
+#define IFI_CANFD_RXSTCMD 0x4
+#define IFI_CANFD_RXSTCMD_REMOVE_MSG BIT(0)
+#define IFI_CANFD_RXSTCMD_RESET BIT(7)
+#define IFI_CANFD_RXSTCMD_EMPTY BIT(8)
+#define IFI_CANFD_RXSTCMD_OVERFLOW BIT(13)
+
+#define IFI_CANFD_TXSTCMD 0x8
+#define IFI_CANFD_TXSTCMD_ADD_MSG BIT(0)
+#define IFI_CANFD_TXSTCMD_HIGH_PRIO BIT(1)
+#define IFI_CANFD_TXSTCMD_RESET BIT(7)
+#define IFI_CANFD_TXSTCMD_EMPTY BIT(8)
+#define IFI_CANFD_TXSTCMD_FULL BIT(12)
+#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
+
+#define IFI_CANFD_INTERRUPT 0xc
+#define IFI_CANFD_INTERRUPT_ERROR_WARNING ((u32)BIT(1))
+#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
+#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
+#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24)
+#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER BIT(25)
+#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
+
+#define IFI_CANFD_IRQMASK 0x10
+#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
+#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
+#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
+#define IFI_CANFD_IRQMASK_SET_TX BIT(23)
+#define IFI_CANFD_IRQMASK_RXFIFO_NEMPTY BIT(24)
+#define IFI_CANFD_IRQMASK_SET_RX ((u32)BIT(31))
+
+#define IFI_CANFD_TIME 0x14
+#define IFI_CANFD_FTIME 0x18
+#define IFI_CANFD_TIME_TIMEB_OFF 0
+#define IFI_CANFD_TIME_TIMEA_OFF 8
+#define IFI_CANFD_TIME_PRESCALE_OFF 16
+#define IFI_CANFD_TIME_SJW_OFF_ISO 25
+#define IFI_CANFD_TIME_SJW_OFF_BOSCH 28
+#define IFI_CANFD_TIME_SET_SJW_BOSCH BIT(6)
+#define IFI_CANFD_TIME_SET_TIMEB_BOSCH BIT(7)
+#define IFI_CANFD_TIME_SET_PRESC_BOSCH BIT(14)
+#define IFI_CANFD_TIME_SET_TIMEA_BOSCH BIT(15)
+
+#define IFI_CANFD_TDELAY 0x1c
+
+#define IFI_CANFD_ERROR 0x20
+#define IFI_CANFD_ERROR_TX_OFFSET 0
+#define IFI_CANFD_ERROR_TX_MASK 0xff
+#define IFI_CANFD_ERROR_RX_OFFSET 16
+#define IFI_CANFD_ERROR_RX_MASK 0xff
+
+#define IFI_CANFD_ERRCNT 0x24
+
+#define IFI_CANFD_SUSPEND 0x28
+
+#define IFI_CANFD_REPEAT 0x2c
+
+#define IFI_CANFD_TRAFFIC 0x30
+
+#define IFI_CANFD_TSCONTROL 0x34
+
+#define IFI_CANFD_TSC 0x38
+
+#define IFI_CANFD_TST 0x3c
+
+#define IFI_CANFD_RES1 0x40
+
+#define IFI_CANFD_RES2 0x44
+
+#define IFI_CANFD_PAR 0x48
+
+#define IFI_CANFD_CANCLOCK 0x4c
+
+#define IFI_CANFD_SYSCLOCK 0x50
+
+#define IFI_CANFD_VER 0x54
+
+#define IFI_CANFD_IP_ID 0x58
+#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
+
+#define IFI_CANFD_TEST 0x5c
+
+#define IFI_CANFD_RXFIFO_TS_63_32 0x60
+
+#define IFI_CANFD_RXFIFO_TS_31_0 0x64
+
+#define IFI_CANFD_RXFIFO_DLC 0x68
+#define IFI_CANFD_RXFIFO_DLC_DLC_OFFSET 0
+#define IFI_CANFD_RXFIFO_DLC_DLC_MASK 0xf
+#define IFI_CANFD_RXFIFO_DLC_RTR BIT(4)
+#define IFI_CANFD_RXFIFO_DLC_EDL BIT(5)
+#define IFI_CANFD_RXFIFO_DLC_BRS BIT(6)
+#define IFI_CANFD_RXFIFO_DLC_ESI BIT(7)
+#define IFI_CANFD_RXFIFO_DLC_OBJ_OFFSET 8
+#define IFI_CANFD_RXFIFO_DLC_OBJ_MASK 0x1ff
+#define IFI_CANFD_RXFIFO_DLC_FNR_OFFSET 24
+#define IFI_CANFD_RXFIFO_DLC_FNR_MASK 0xff
+
+#define IFI_CANFD_RXFIFO_ID 0x6c
+#define IFI_CANFD_RXFIFO_ID_ID_OFFSET 0
+#define IFI_CANFD_RXFIFO_ID_ID_STD_MASK CAN_SFF_MASK
+#define IFI_CANFD_RXFIFO_ID_ID_STD_OFFSET 0
+#define IFI_CANFD_RXFIFO_ID_ID_STD_WIDTH 10
+#define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK
+#define IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET 11
+#define IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH 18
+#define IFI_CANFD_RXFIFO_ID_IDE BIT(29)
+
+#define IFI_CANFD_RXFIFO_DATA 0x70 /* 0x70..0xac */
+
+#define IFI_CANFD_TXFIFO_SUSPEND_US 0xb0
+
+#define IFI_CANFD_TXFIFO_REPEATCOUNT 0xb4
+
+#define IFI_CANFD_TXFIFO_DLC 0xb8
+#define IFI_CANFD_TXFIFO_DLC_DLC_OFFSET 0
+#define IFI_CANFD_TXFIFO_DLC_DLC_MASK 0xf
+#define IFI_CANFD_TXFIFO_DLC_RTR BIT(4)
+#define IFI_CANFD_TXFIFO_DLC_EDL BIT(5)
+#define IFI_CANFD_TXFIFO_DLC_BRS BIT(6)
+#define IFI_CANFD_TXFIFO_DLC_FNR_OFFSET 24
+#define IFI_CANFD_TXFIFO_DLC_FNR_MASK 0xff
+
+#define IFI_CANFD_TXFIFO_ID 0xbc
+#define IFI_CANFD_TXFIFO_ID_ID_OFFSET 0
+#define IFI_CANFD_TXFIFO_ID_ID_STD_MASK CAN_SFF_MASK
+#define IFI_CANFD_TXFIFO_ID_ID_STD_OFFSET 0
+#define IFI_CANFD_TXFIFO_ID_ID_STD_WIDTH 10
+#define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK
+#define IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET 11
+#define IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH 18
+#define IFI_CANFD_TXFIFO_ID_IDE BIT(29)
+
+#define IFI_CANFD_TXFIFO_DATA 0xc0 /* 0xb0..0xfc */
+
+#define IFI_CANFD_FILTER_MASK(n) (0x800 + ((n) * 8) + 0)
+#define IFI_CANFD_FILTER_MASK_EXT BIT(29)
+#define IFI_CANFD_FILTER_MASK_EDL BIT(30)
+#define IFI_CANFD_FILTER_MASK_VALID ((u32)BIT(31))
+
+#define IFI_CANFD_FILTER_IDENT(n) (0x800 + ((n) * 8) + 4)
+#define IFI_CANFD_FILTER_IDENT_IDE BIT(29)
+#define IFI_CANFD_FILTER_IDENT_CANFD BIT(30)
+#define IFI_CANFD_FILTER_IDENT_VALID ((u32)BIT(31))
+
+/* IFI CANFD private data structure */
+struct ifi_canfd_priv {
+ struct can_priv can; /* must be the first member */
+ struct napi_struct napi;
+ struct net_device *ndev;
+ void __iomem *base;
+};
+
+static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 enirq = 0;
+
+ if (enable) {
+ enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
+ IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+ }
+
+ writel(IFI_CANFD_IRQMASK_SET_ERR |
+ IFI_CANFD_IRQMASK_SET_TS |
+ IFI_CANFD_IRQMASK_SET_TX |
+ IFI_CANFD_IRQMASK_SET_RX | enirq,
+ priv->base + IFI_CANFD_IRQMASK);
+}
+
+static void ifi_canfd_read_fifo(struct net_device *ndev)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
+ IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+ u32 rxdlc, rxid;
+ u32 dlc, id;
+ int i;
+
+ rxdlc = readl(priv->base + IFI_CANFD_RXFIFO_DLC);
+ if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL)
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) &
+ IFI_CANFD_RXFIFO_DLC_DLC_MASK;
+ if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL)
+ cf->len = can_dlc2len(dlc);
+ else
+ cf->len = get_can_dlc(dlc);
+
+ rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID);
+ id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET);
+ if (id & IFI_CANFD_RXFIFO_ID_IDE) {
+ id &= IFI_CANFD_RXFIFO_ID_ID_XTD_MASK;
+ /*
+ * In case the Extended ID frame is received, the standard
+ * and extended part of the ID are swapped in the register,
+ * so swap them back to obtain the correct ID.
+ */
+ id = (id >> IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET) |
+ ((id & IFI_CANFD_RXFIFO_ID_ID_STD_MASK) <<
+ IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH);
+ id |= CAN_EFF_FLAG;
+ } else {
+ id &= IFI_CANFD_RXFIFO_ID_ID_STD_MASK;
+ }
+ cf->can_id = id;
+
+ if (rxdlc & IFI_CANFD_RXFIFO_DLC_ESI) {
+ cf->flags |= CANFD_ESI;
+ netdev_dbg(ndev, "ESI Error\n");
+ }
+
+ if (!(rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) &&
+ (rxdlc & IFI_CANFD_RXFIFO_DLC_RTR)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ if (rxdlc & IFI_CANFD_RXFIFO_DLC_BRS)
+ cf->flags |= CANFD_BRS;
+
+ for (i = 0; i < cf->len; i += 4) {
+ *(u32 *)(cf->data + i) =
+ readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
+ }
+ }
+
+ /* Remove the packet from FIFO */
+ writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
+ writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->len;
+
+ netif_receive_skb(skb);
+}
+
+static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 pkts = 0;
+ u32 rxst;
+
+ rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
+ if (rxst & IFI_CANFD_RXSTCMD_EMPTY) {
+ netdev_dbg(ndev, "No messages in RX FIFO\n");
+ return 0;
+ }
+
+ for (;;) {
+ if (rxst & IFI_CANFD_RXSTCMD_EMPTY)
+ break;
+ if (quota <= 0)
+ break;
+
+ ifi_canfd_read_fifo(ndev);
+ quota--;
+ pkts++;
+ rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
+ }
+
+ if (pkts)
+ can_led_event(ndev, CAN_LED_EVENT_RX);
+
+ return pkts;
+}
+
+static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
+{
+ struct net_device_stats *stats = &ndev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ netdev_err(ndev, "RX FIFO overflow, message(s) lost.\n");
+
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ skb = alloc_can_err_skb(ndev, &frame);
+ if (unlikely(!skb))
+ return 0;
+
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 err;
+
+ err = readl(priv->base + IFI_CANFD_ERROR);
+ bec->rxerr = (err >> IFI_CANFD_ERROR_RX_OFFSET) &
+ IFI_CANFD_ERROR_RX_MASK;
+ bec->txerr = (err >> IFI_CANFD_ERROR_TX_OFFSET) &
+ IFI_CANFD_ERROR_TX_MASK;
+
+ return 0;
+}
+
+static int ifi_canfd_handle_state_change(struct net_device *ndev,
+ enum can_state new_state)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct can_berr_counter bec;
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ /* error passive state */
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case CAN_STATE_BUS_OFF:
+ /* bus-off state */
+ priv->can.state = CAN_STATE_BUS_OFF;
+ ifi_canfd_irq_enable(ndev, 0);
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ break;
+ default:
+ break;
+ }
+
+ /* propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ ifi_canfd_get_berr_counter(ndev, &bec);
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ /* error warning state */
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ /* error passive state */
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if (bec.txerr > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ case CAN_STATE_BUS_OFF:
+ /* bus-off state */
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ default:
+ break;
+ }
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 isr;
+
+ /*
+ * The ErrWarn condition is a little special, since the bit is
+ * located in the INTERRUPT register instead of STCMD register.
+ */
+ isr = readl(priv->base + IFI_CANFD_INTERRUPT);
+ if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) &&
+ (priv->can.state != CAN_STATE_ERROR_WARNING)) {
+ /* Clear the interrupt */
+ writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
+ priv->base + IFI_CANFD_INTERRUPT);
+ netdev_dbg(ndev, "Error, entered warning state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_ERROR_WARNING);
+ }
+
+ if ((stcmd & IFI_CANFD_STCMD_ERROR_PASSIVE) &&
+ (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
+ netdev_dbg(ndev, "Error, entered passive state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_ERROR_PASSIVE);
+ }
+
+ if ((stcmd & IFI_CANFD_STCMD_BUSOFF) &&
+ (priv->can.state != CAN_STATE_BUS_OFF)) {
+ netdev_dbg(ndev, "Error, entered bus-off state\n");
+ work_done += ifi_canfd_handle_state_change(ndev,
+ CAN_STATE_BUS_OFF);
+ }
+
+ return work_done;
+}
+
+static int ifi_canfd_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
+ IFI_CANFD_STCMD_BUSOFF;
+ int work_done = 0;
+
+ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+
+ /* Handle bus state changes */
+ if ((stcmd & stcmd_state_mask) ||
+ ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
+ work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
+
+ /* Handle lost messages on RX */
+ if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
+ work_done += ifi_canfd_handle_lost_msg(ndev);
+
+ /* Handle normal messages on RX */
+ if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
+ work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
+
+ if (work_done < quota) {
+ napi_complete(napi);
+ ifi_canfd_irq_enable(ndev, 1);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
+ IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+ const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
+ IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
+ const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING);
+ u32 isr;
+
+ isr = readl(priv->base + IFI_CANFD_INTERRUPT);
+
+ /* No interrupt */
+ if (isr == 0)
+ return IRQ_NONE;
+
+ /* Clear all pending interrupts but ErrWarn */
+ writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
+
+ /* RX IRQ, start NAPI */
+ if (isr & rx_irq_mask) {
+ ifi_canfd_irq_enable(ndev, 0);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TX IRQ */
+ if (isr & tx_irq_mask) {
+ stats->tx_bytes += can_get_echo_skb(ndev, 0);
+ stats->tx_packets++;
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ netif_wake_queue(ndev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct can_bittiming_const ifi_canfd_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 64,
+ .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 64,
+ .sjw_max = 16,
+ .brp_min = 2,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ifi_canfd_data_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 64,
+ .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 64,
+ .sjw_max = 16,
+ .brp_min = 2,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static void ifi_canfd_set_bittiming(struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u16 brp, sjw, tseg1, tseg2;
+ u32 noniso_arg = 0;
+ u32 time_off;
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) {
+ time_off = IFI_CANFD_TIME_SJW_OFF_ISO;
+ } else {
+ noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH |
+ IFI_CANFD_TIME_SET_TIMEA_BOSCH |
+ IFI_CANFD_TIME_SET_PRESC_BOSCH |
+ IFI_CANFD_TIME_SET_SJW_BOSCH;
+ time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH;
+ }
+
+ /* Configure bit timing */
+ brp = bt->brp - 2;
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 2;
+ writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
+ (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
+ (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
+ (sjw << time_off) |
+ noniso_arg,
+ priv->base + IFI_CANFD_TIME);
+
+ /* Configure data bit timing */
+ brp = dbt->brp - 2;
+ sjw = dbt->sjw - 1;
+ tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+ tseg2 = dbt->phase_seg2 - 2;
+ writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
+ (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
+ (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
+ (sjw << time_off) |
+ noniso_arg,
+ priv->base + IFI_CANFD_FTIME);
+}
+
+static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
+ const u32 mask, const u32 ident)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+
+ writel(mask, priv->base + IFI_CANFD_FILTER_MASK(id));
+ writel(ident, priv->base + IFI_CANFD_FILTER_IDENT(id));
+}
+
+static void ifi_canfd_set_filters(struct net_device *ndev)
+{
+ /* Receive all CAN frames (standard ID) */
+ ifi_canfd_set_filter(ndev, 0,
+ IFI_CANFD_FILTER_MASK_VALID |
+ IFI_CANFD_FILTER_MASK_EXT,
+ IFI_CANFD_FILTER_IDENT_VALID);
+
+ /* Receive all CAN frames (extended ID) */
+ ifi_canfd_set_filter(ndev, 1,
+ IFI_CANFD_FILTER_MASK_VALID |
+ IFI_CANFD_FILTER_MASK_EXT,
+ IFI_CANFD_FILTER_IDENT_VALID |
+ IFI_CANFD_FILTER_IDENT_IDE);
+
+ /* Receive all CANFD frames */
+ ifi_canfd_set_filter(ndev, 2,
+ IFI_CANFD_FILTER_MASK_VALID |
+ IFI_CANFD_FILTER_MASK_EDL |
+ IFI_CANFD_FILTER_MASK_EXT,
+ IFI_CANFD_FILTER_IDENT_VALID |
+ IFI_CANFD_FILTER_IDENT_CANFD |
+ IFI_CANFD_FILTER_IDENT_IDE);
+}
+
+static void ifi_canfd_start(struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ u32 stcmd;
+
+ /* Reset the IP */
+ writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
+ writel(0, priv->base + IFI_CANFD_STCMD);
+
+ ifi_canfd_set_bittiming(ndev);
+ ifi_canfd_set_filters(ndev);
+
+ /* Reset FIFOs */
+ writel(IFI_CANFD_RXSTCMD_RESET, priv->base + IFI_CANFD_RXSTCMD);
+ writel(0, priv->base + IFI_CANFD_RXSTCMD);
+ writel(IFI_CANFD_TXSTCMD_RESET, priv->base + IFI_CANFD_TXSTCMD);
+ writel(0, priv->base + IFI_CANFD_TXSTCMD);
+
+ /* Repeat transmission until successful */
+ writel(0, priv->base + IFI_CANFD_REPEAT);
+ writel(0, priv->base + IFI_CANFD_SUSPEND);
+
+ /* Clear all pending interrupts */
+ writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
+ priv->base + IFI_CANFD_INTERRUPT);
+
+ stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ stcmd |= IFI_CANFD_STCMD_LOOPBACK;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
+
+ if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)))
+ stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ ifi_canfd_irq_enable(ndev, 1);
+
+ /* Enable controller */
+ writel(stcmd, priv->base + IFI_CANFD_STCMD);
+}
+
+static void ifi_canfd_stop(struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+
+ /* Reset the IP */
+ writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
+
+ /* Mask all interrupts */
+ writel(~0, priv->base + IFI_CANFD_IRQMASK);
+
+ /* Clear all pending interrupts */
+ writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
+ priv->base + IFI_CANFD_INTERRUPT);
+
+ /* Set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int ifi_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ ifi_canfd_start(ndev);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ifi_canfd_open(struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to open CAN device\n");
+ return ret;
+ }
+
+ /* Register interrupt handler */
+ ret = request_irq(ndev->irq, ifi_canfd_isr, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to request interrupt\n");
+ goto err_irq;
+ }
+
+ ifi_canfd_start(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+err_irq:
+ close_candev(ndev);
+ return ret;
+}
+
+static int ifi_canfd_close(struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ ifi_canfd_stop(ndev);
+
+ free_irq(ndev->irq, ndev);
+
+ close_candev(ndev);
+
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txst, txid, txdlc;
+ int i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ /* Check if the TX buffer is full */
+ txst = readl(priv->base + IFI_CANFD_TXSTCMD);
+ if (txst & IFI_CANFD_TXSTCMD_FULL) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG! TX FIFO full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ netif_stop_queue(ndev);
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ txid = cf->can_id & CAN_EFF_MASK;
+ /*
+ * In case the Extended ID frame is transmitted, the
+ * standard and extended part of the ID are swapped
+ * in the register, so swap them back to send the
+ * correct ID.
+ */
+ txid = (txid >> IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH) |
+ ((txid & IFI_CANFD_TXFIFO_ID_ID_XTD_MASK) <<
+ IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET);
+ txid |= IFI_CANFD_TXFIFO_ID_IDE;
+ } else {
+ txid = cf->can_id & CAN_SFF_MASK;
+ }
+
+ txdlc = can_len2dlc(cf->len);
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
+ txdlc |= IFI_CANFD_TXFIFO_DLC_EDL;
+ if (cf->flags & CANFD_BRS)
+ txdlc |= IFI_CANFD_TXFIFO_DLC_BRS;
+ }
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ txdlc |= IFI_CANFD_TXFIFO_DLC_RTR;
+
+ /* message ram configuration */
+ writel(txid, priv->base + IFI_CANFD_TXFIFO_ID);
+ writel(txdlc, priv->base + IFI_CANFD_TXFIFO_DLC);
+
+ for (i = 0; i < cf->len; i += 4) {
+ writel(*(u32 *)(cf->data + i),
+ priv->base + IFI_CANFD_TXFIFO_DATA + i);
+ }
+
+ writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT);
+ writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US);
+
+ can_put_echo_skb(skb, ndev, 0);
+
+ /* Start the transmission */
+ writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ifi_canfd_netdev_ops = {
+ .ndo_open = ifi_canfd_open,
+ .ndo_stop = ifi_canfd_close,
+ .ndo_start_xmit = ifi_canfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int ifi_canfd_plat_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct ifi_canfd_priv *priv;
+ struct resource *res;
+ void __iomem *addr;
+ int irq, ret;
+ u32 id;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(dev, res);
+ irq = platform_get_irq(pdev, 0);
+ if (IS_ERR(addr) || irq < 0)
+ return -EINVAL;
+
+ id = readl(addr + IFI_CANFD_IP_ID);
+ if (id != IFI_CANFD_IP_ID_VALUE) {
+ dev_err(dev, "This block is not IFI CANFD, id=%08x\n", id);
+ return -EINVAL;
+ }
+
+ ndev = alloc_candev(sizeof(*priv), 1);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* we support local echo */
+ ndev->netdev_ops = &ifi_canfd_netdev_ops;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->base = addr;
+
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
+
+ priv->can.bittiming_const = &ifi_canfd_bittiming_const;
+ priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const;
+ priv->can.do_set_mode = ifi_canfd_set_mode;
+ priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
+
+ /* IFI CANFD can do both Bosch FD and ISO FD */
+ priv->can.ctrlmode = CAN_CTRLMODE_FD;
+
+ /* IFI CANFD can do both Bosch FD and ISO FD */
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to register (ret=%d)\n", ret);
+ goto err_reg;
+ }
+
+ devm_can_led_init(ndev);
+
+ dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
+ priv->base, ndev->irq, priv->can.clock.freq);
+
+ return 0;
+
+err_reg:
+ free_candev(ndev);
+ return ret;
+}
+
+static int ifi_canfd_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_candev(ndev);
+ platform_set_drvdata(pdev, NULL);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id ifi_canfd_of_table[] = {
+ { .compatible = "ifi,canfd-1.0", .data = NULL },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ifi_canfd_of_table);
+
+static struct platform_driver ifi_canfd_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = ifi_canfd_of_table,
+ },
+ .probe = ifi_canfd_plat_probe,
+ .remove = ifi_canfd_plat_remove,
+};
+
+module_platform_driver(ifi_canfd_plat_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for IFI CANFD controller");
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index bc46be39549d..788459f6bf5c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -904,6 +904,9 @@ static const struct of_device_id rcar_can_of_table[] __maybe_unused = {
{ .compatible = "renesas,can-r8a7779" },
{ .compatible = "renesas,can-r8a7790" },
{ .compatible = "renesas,can-r8a7791" },
+ { .compatible = "renesas,rcar-gen1-can" },
+ { .compatible = "renesas,rcar-gen2-can" },
+ { .compatible = "renesas,rcar-gen3-can" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_can_of_table);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 0552ed46a206..dc9c6db96c3c 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -27,6 +27,7 @@
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include "sja1000.h"
@@ -40,6 +41,15 @@ MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
+struct sja1000_of_data {
+ size_t priv_sz;
+ int (*init)(struct sja1000_priv *priv, struct device_node *of);
+};
+
+struct technologic_priv {
+ spinlock_t io_lock;
+};
+
static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
{
return ioread8(priv->reg_base + reg);
@@ -70,6 +80,43 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
iowrite8(val, priv->reg_base + reg * 4);
}
+static u8 sp_technologic_read_reg16(const struct sja1000_priv *priv, int reg)
+{
+ struct technologic_priv *tp = priv->priv;
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&tp->io_lock, flags);
+ iowrite16(reg, priv->reg_base + 0);
+ val = ioread16(priv->reg_base + 2);
+ spin_unlock_irqrestore(&tp->io_lock, flags);
+
+ return val;
+}
+
+static void sp_technologic_write_reg16(const struct sja1000_priv *priv,
+ int reg, u8 val)
+{
+ struct technologic_priv *tp = priv->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->io_lock, flags);
+ iowrite16(reg, priv->reg_base + 0);
+ iowrite16(val, priv->reg_base + 2);
+ spin_unlock_irqrestore(&tp->io_lock, flags);
+}
+
+static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ struct technologic_priv *tp = priv->priv;
+
+ priv->read_reg = sp_technologic_read_reg16;
+ priv->write_reg = sp_technologic_write_reg16;
+ spin_lock_init(&tp->io_lock);
+
+ return 0;
+}
+
static void sp_populate(struct sja1000_priv *priv,
struct sja1000_platform_data *pdata,
unsigned long resource_mem_flags)
@@ -154,6 +201,18 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->cdr |= CDR_CBP; /* default */
}
+static struct sja1000_of_data technologic_data = {
+ .priv_sz = sizeof(struct technologic_priv),
+ .init = sp_technologic_init,
+};
+
+static const struct of_device_id sp_of_table[] = {
+ { .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "technologic,sja1000", .data = &technologic_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
static int sp_probe(struct platform_device *pdev)
{
int err, irq = 0;
@@ -163,6 +222,9 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq = NULL;
struct sja1000_platform_data *pdata;
struct device_node *of = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ const struct sja1000_of_data *of_data = NULL;
+ size_t priv_sz = 0;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -191,7 +253,13 @@ static int sp_probe(struct platform_device *pdev)
if (!irq && !res_irq)
return -ENODEV;
- dev = alloc_sja1000dev(0);
+ of_id = of_match_device(sp_of_table, &pdev->dev);
+ if (of_id && of_id->data) {
+ of_data = of_id->data;
+ priv_sz = of_data->priv_sz;
+ }
+
+ dev = alloc_sja1000dev(priv_sz);
if (!dev)
return -ENOMEM;
priv = netdev_priv(dev);
@@ -208,10 +276,17 @@ static int sp_probe(struct platform_device *pdev)
dev->irq = irq;
priv->reg_base = addr;
- if (of)
+ if (of) {
sp_populate_of(priv, of);
- else
+
+ if (of_data && of_data->init) {
+ err = of_data->init(priv, of);
+ if (err)
+ goto exit_free;
+ }
+ } else {
sp_populate(priv, pdata, res_mem->flags);
+ }
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -242,12 +317,6 @@ static int sp_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id sp_of_table[] = {
- {.compatible = "nxp,sja1000"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sp_of_table);
-
static struct platform_driver sp_driver = {
.probe = sp_probe,
.remove = sp_remove,
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index eb7192fab593..3400fd1cada7 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -281,11 +281,9 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
switch (urb->status) {
case 0:
dev->free_slots = dev->intr_in_buffer[1];
- if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
- if (netif_queue_stopped(netdev)){
- netif_wake_queue(netdev);
- }
- }
+ if (dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH &&
+ netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
break;
case -ECONNRESET: /* unlink */
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 4c483d937481..90ba003d8fdf 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -27,7 +27,7 @@ config NET_DSA_MV88E6131
This enables support for the Marvell 88E6085/6095/6095F/6131
ethernet switch chips.
-config NET_DSA_MV88E6123_61_65
+config NET_DSA_MV88E6123
tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
depends on NET_DSA
select NET_DSA_MV88E6XXX
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index e2d51c4b9382..a6e09939be65 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,8 +1,8 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
mv88e6xxx_drv-y += mv88e6xxx.o
-ifdef CONFIG_NET_DSA_MV88E6123_61_65
-mv88e6xxx_drv-y += mv88e6123_61_65.o
+ifdef CONFIG_NET_DSA_MV88E6123
+mv88e6xxx_drv-y += mv88e6123.o
endif
ifdef CONFIG_NET_DSA_MV88E6131
mv88e6xxx_drv-y += mv88e6131.o
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 6f946fedbb77..95944d5e3e22 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -483,16 +483,17 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
}
static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
- u32 br_port_mask)
+ struct net_device *bridge)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
unsigned int i;
u32 reg, p_ctl;
+ priv->port_sts[port].bridge_dev = bridge;
p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
for (i = 0; i < priv->hw_params.num_ports; i++) {
- if (!((1 << i) & br_port_mask))
+ if (priv->port_sts[i].bridge_dev != bridge)
continue;
/* Add this local port to the remote port VLAN control
@@ -515,10 +516,10 @@ static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
return 0;
}
-static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
- u32 br_port_mask)
+static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
+ struct net_device *bridge = priv->port_sts[port].bridge_dev;
unsigned int i;
u32 reg, p_ctl;
@@ -526,7 +527,7 @@ static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
for (i = 0; i < priv->hw_params.num_ports; i++) {
/* Don't touch the remaining ports */
- if (!((1 << i) & br_port_mask))
+ if (priv->port_sts[i].bridge_dev != bridge)
continue;
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
@@ -541,8 +542,7 @@ static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port,
core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
priv->port_sts[port].vlan_ctl_mask = p_ctl;
-
- return 0;
+ priv->port_sts[port].bridge_dev = NULL;
}
static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
@@ -1385,8 +1385,8 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.port_disable = bcm_sf2_port_disable,
.get_eee = bcm_sf2_sw_get_eee,
.set_eee = bcm_sf2_sw_set_eee,
- .port_join_bridge = bcm_sf2_sw_br_join,
- .port_leave_bridge = bcm_sf2_sw_br_leave,
+ .port_bridge_join = bcm_sf2_sw_br_join,
+ .port_bridge_leave = bcm_sf2_sw_br_leave,
.port_stp_update = bcm_sf2_sw_br_set_stp_state,
.port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
.port_fdb_add = bcm_sf2_sw_fdb_add,
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 6bba1c98d764..200b1f5fdb56 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -50,6 +50,8 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
u32 vlan_ctl_mask;
+
+ struct net_device *bridge_dev;
};
struct bcm_sf2_arl_entry {
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123.c
index d4fcf4570d95..69a6f79dcb10 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123.c
@@ -17,7 +17,7 @@
#include <net/dsa.h>
#include "mv88e6xxx.h"
-static const struct mv88e6xxx_switch_id mv88e6123_61_65_table[] = {
+static const struct mv88e6xxx_switch_id mv88e6123_table[] = {
{ PORT_SWITCH_ID_6123, "Marvell 88E6123" },
{ PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" },
{ PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" },
@@ -29,13 +29,13 @@ static const struct mv88e6xxx_switch_id mv88e6123_61_65_table[] = {
{ PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" },
};
-static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
+static char *mv88e6123_probe(struct device *host_dev, int sw_addr)
{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_61_65_table,
- ARRAY_SIZE(mv88e6123_61_65_table));
+ return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table,
+ ARRAY_SIZE(mv88e6123_table));
}
-static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
+static int mv88e6123_setup_global(struct dsa_switch *ds)
{
u32 upstream_port = dsa_upstream_port(ds);
int ret;
@@ -68,7 +68,7 @@ static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
return 0;
}
-static int mv88e6123_61_65_setup(struct dsa_switch *ds)
+static int mv88e6123_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
@@ -93,18 +93,18 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
if (ret < 0)
return ret;
- ret = mv88e6123_61_65_setup_global(ds);
+ ret = mv88e6123_setup_global(ds);
if (ret < 0)
return ret;
return mv88e6xxx_setup_ports(ds);
}
-struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+struct dsa_switch_driver mv88e6123_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_EDSA,
.priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6123_61_65_probe,
- .setup = mv88e6123_61_65_setup,
+ .probe = mv88e6123_probe,
+ .setup = mv88e6123_setup,
.set_addr = mv88e6xxx_set_addr_indirect,
.phy_read = mv88e6xxx_phy_read,
.phy_write = mv88e6xxx_phy_write,
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 6e18213b9c04..c0164b98fc08 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -103,14 +103,14 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
#endif
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
- .port_join_bridge = mv88e6xxx_port_bridge_join,
- .port_leave_bridge = mv88e6xxx_port_bridge_leave,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_stp_update = mv88e6xxx_port_stp_update,
- .port_pvid_get = mv88e6xxx_port_pvid_get,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
- .vlan_getnext = mv88e6xxx_vlan_getnext,
+ .port_vlan_dump = mv88e6xxx_port_vlan_dump,
.port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index a47f52f44b0d..5f528abc8af1 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -324,14 +324,14 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
.set_eeprom = mv88e6352_set_eeprom,
.get_regs_len = mv88e6xxx_get_regs_len,
.get_regs = mv88e6xxx_get_regs,
- .port_join_bridge = mv88e6xxx_port_bridge_join,
- .port_leave_bridge = mv88e6xxx_port_bridge_leave,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_stp_update = mv88e6xxx_port_stp_update,
- .port_pvid_get = mv88e6xxx_port_pvid_get,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
- .vlan_getnext = mv88e6xxx_vlan_getnext,
+ .port_vlan_dump = mv88e6xxx_port_vlan_dump,
.port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
.port_fdb_add = mv88e6xxx_port_fdb_add,
.port_fdb_del = mv88e6xxx_port_fdb_del,
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 512c8c0be1b4..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1051,48 +1051,78 @@ static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
}
-static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
+static const char * const mv88e6xxx_port_state_names[] = {
+ [PORT_CONTROL_STATE_DISABLED] = "Disabled",
+ [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening",
+ [PORT_CONTROL_STATE_LEARNING] = "Learning",
+ [PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
+};
+
+static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg, ret = 0;
u8 oldstate;
- mutex_lock(&ps->smi_mutex);
-
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
- if (reg < 0) {
- ret = reg;
- goto abort;
- }
+ if (reg < 0)
+ return reg;
oldstate = reg & PORT_CONTROL_STATE_MASK;
+
if (oldstate != state) {
/* Flush forwarding database if we're moving a port
* from Learning or Forwarding state to Disabled or
* Blocking or Listening state.
*/
- if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
- state <= PORT_CONTROL_STATE_BLOCKING) {
+ if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
+ oldstate == PORT_CONTROL_STATE_FORWARDING)
+ && (state == PORT_CONTROL_STATE_DISABLED ||
+ state == PORT_CONTROL_STATE_BLOCKING)) {
ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
if (ret)
- goto abort;
+ return ret;
}
+
reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
reg);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ds->ports[port], "PortState %s (was %s)\n",
+ mv88e6xxx_port_state_names[state],
+ mv88e6xxx_port_state_names[oldstate]);
}
-abort:
- mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
- u16 output_ports)
+static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct net_device *bridge = ps->ports[port].bridge_dev;
const u16 mask = (1 << ps->num_ports) - 1;
+ u16 output_ports = 0;
int reg;
+ int i;
+
+ /* allow CPU port or DSA link(s) to send frames to every port */
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ output_ports = mask;
+ } else {
+ for (i = 0; i < ps->num_ports; ++i) {
+ /* allow sending frames to every group member */
+ if (bridge && ps->ports[i].bridge_dev == bridge)
+ output_ports |= BIT(i);
+
+ /* allow sending frames to CPU port and DSA link(s) */
+ if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
+ output_ports |= BIT(i);
+ }
+ }
+
+ /* prevent frames from going back out of the port they came in on */
+ output_ports &= ~BIT(port);
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
if (reg < 0)
@@ -1126,48 +1156,55 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
break;
}
- netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
-
/* mv88e6xxx_port_stp_update may be called with softirqs disabled,
* so we can not update the port state directly but need to schedule it.
*/
- ps->port_state[port] = stp_state;
- set_bit(port, &ps->port_state_update_mask);
+ ps->ports[port].state = stp_state;
+ set_bit(port, ps->port_state_update_mask);
schedule_work(&ps->bridge_work);
return 0;
}
-static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
+ u16 *old)
{
+ u16 pvid;
int ret;
ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
if (ret < 0)
return ret;
- *pvid = ret & PORT_DEFAULT_VLAN_MASK;
+ pvid = ret & PORT_DEFAULT_VLAN_MASK;
- return 0;
-}
+ if (new) {
+ ret &= ~PORT_DEFAULT_VLAN_MASK;
+ ret |= *new & PORT_DEFAULT_VLAN_MASK;
-int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
-{
- int ret;
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ PORT_DEFAULT_VLAN, ret);
+ if (ret < 0)
+ return ret;
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
- if (ret < 0)
- return ret;
+ netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new,
+ pvid);
+ }
- *pvid = ret & PORT_DEFAULT_VLAN_MASK;
+ if (old)
+ *old = pvid;
return 0;
}
+static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+{
+ return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
+}
+
static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
{
- return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
- pvid & PORT_DEFAULT_VLAN_MASK);
+ return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
}
static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
@@ -1306,6 +1343,57 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
return 0;
}
+int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry next;
+ u16 pvid;
+ int err;
+
+ mutex_lock(&ps->smi_mutex);
+
+ err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ if (err)
+ goto unlock;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(ds, &next);
+ if (err)
+ break;
+
+ if (!next.valid)
+ break;
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
+
+ /* reinit and dump this VLAN obj */
+ vlan->vid_begin = vlan->vid_end = next.vid;
+ vlan->flags = 0;
+
+ if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+ vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (next.vid == pvid)
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ } while (next.vid < GLOBAL_VTU_VID_MASK);
+
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
+}
+
static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
struct mv88e6xxx_vtu_stu_entry *entry)
{
@@ -1420,16 +1508,122 @@ loadpurge:
return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
- struct mv88e6xxx_vtu_stu_entry *entry)
+static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
+ u16 *old)
+{
+ u16 fid;
+ int ret;
+
+ /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ if (ret < 0)
+ return ret;
+
+ fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+
+ if (new) {
+ ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+ ret);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+ if (ret < 0)
+ return ret;
+
+ fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
+
+ if (new) {
+ ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
+ ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+ ret);
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid);
+ }
+
+ if (old)
+ *old = fid;
+
+ return 0;
+}
+
+static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+{
+ return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+}
+
+static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+{
+ return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+}
+
+static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
+
+ /* Set every FID bit used by the (un)bridged ports */
+ for (i = 0; i < ps->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(ds, i, fid);
+ if (err)
+ return err;
+
+ set_bit(*fid, fid_bitmap);
+ }
+
+ /* Set every FID bit used by the VLAN entries */
+ err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ if (err)
+ return err;
+
+ if (!vlan.valid)
+ break;
+
+ set_bit(vlan.fid, fid_bitmap);
+ } while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
+ /* The reset value 0x000 is used to indicate that multiple address
+ * databases are not needed. Return the next positive available.
+ */
+ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ if (unlikely(*fid == MV88E6XXX_N_FID))
+ return -ENOSPC;
+
+ /* Clear the database */
+ return _mv88e6xxx_atu_flush(ds, *fid, true);
+}
+
+static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan = {
.valid = true,
.vid = vid,
- .fid = vid, /* We use one FID per VLAN */
};
- int i;
+ int i, err;
+
+ err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+ if (err)
+ return err;
/* exclude all ports except the CPU and DSA ports */
for (i = 0; i < ps->num_ports; ++i)
@@ -1440,7 +1634,6 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
struct mv88e6xxx_vtu_stu_entry vstp;
- int err;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
* implemented, only one STU entry is needed to cover all VTU
@@ -1460,24 +1653,152 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
if (err)
return err;
}
-
- /* Clear all MAC addresses from the new database */
- err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
- if (err)
- return err;
}
*entry = vlan;
return 0;
}
+static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+ struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
+{
+ int err;
+
+ if (!vid)
+ return -EINVAL;
+
+ err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ if (err)
+ return err;
+
+ err = _mv88e6xxx_vtu_getnext(ds, entry);
+ if (err)
+ return err;
+
+ if (entry->vid != vid || !entry->valid) {
+ if (!creat)
+ return -EOPNOTSUPP;
+ /* -ENOENT would've been more appropriate, but switchdev expects
+ * -EOPNOTSUPP to inform bridge about an eventual software VLAN.
+ */
+
+ err = _mv88e6xxx_vtu_new(ds, vid, entry);
+ }
+
+ return err;
+}
+
+static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
+ u16 vid_begin, u16 vid_end)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int i, err;
+
+ if (!vid_begin)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ps->smi_mutex);
+
+ err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
+ if (err)
+ goto unlock;
+
+ do {
+ err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid)
+ break;
+
+ if (vlan.vid > vid_end)
+ break;
+
+ for (i = 0; i < ps->num_ports; ++i) {
+ if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
+ continue;
+
+ if (vlan.data[i] ==
+ GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ continue;
+
+ if (ps->ports[i].bridge_dev ==
+ ps->ports[port].bridge_dev)
+ break; /* same bridge, check next VLAN */
+
+ netdev_warn(ds->ports[port],
+ "hardware VLAN %d already used by %s\n",
+ vlan.vid,
+ netdev_name(ps->ports[i].bridge_dev));
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+ } while (vlan.vid < vid_end);
+
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
+}
+
+static const char * const mv88e6xxx_port_8021q_mode_names[] = {
+ [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled",
+ [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback",
+ [PORT_CONTROL_2_8021Q_CHECK] = "Check",
+ [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
+};
+
+int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
+ PORT_CONTROL_2_8021Q_DISABLED;
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+ if (ret < 0)
+ goto unlock;
+
+ old = ret & PORT_CONTROL_2_8021Q_MASK;
+
+ if (new != old) {
+ ret &= ~PORT_CONTROL_2_8021Q_MASK;
+ ret |= new & PORT_CONTROL_2_8021Q_MASK;
+
+ ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
+ ret);
+ if (ret < 0)
+ goto unlock;
+
+ netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n",
+ mv88e6xxx_port_8021q_mode_names[new],
+ mv88e6xxx_port_8021q_mode_names[old]);
+ }
+
+ ret = 0;
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans)
{
- /* We reserve a few VLANs to isolate unbridged ports */
- if (vlan->vid_end >= 4000)
- return -EOPNOTSUPP;
+ int err;
+
+ /* If the requested port doesn't belong to the same bridge as the VLAN
+ * members, do not support it (yet) and fallback to software VLAN.
+ */
+ err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
+ vlan->vid_end);
+ if (err)
+ return err;
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
@@ -1491,20 +1812,10 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
struct mv88e6xxx_vtu_stu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
- if (err)
- return err;
-
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
if (err)
return err;
- if (vlan.vid != vid || !vlan.valid) {
- err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
- if (err)
- return err;
- }
-
vlan.data[port] = untagged ?
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
@@ -1545,16 +1856,12 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
if (err)
return err;
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
- if (err)
- return err;
-
- if (vlan.vid != vid || !vlan.valid ||
- vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ /* Tell switchdev if this VLAN is handled in software */
+ if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
@@ -1582,7 +1889,6 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
u16 pvid, vid;
int err = 0;
@@ -1598,8 +1904,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
if (vid == pvid) {
- /* restore reserved VLAN ID */
- err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
+ err = _mv88e6xxx_port_pvid_set(ds, port, 0);
if (err)
goto unlock;
}
@@ -1611,52 +1916,6 @@ unlock:
return err;
}
-int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
- unsigned long *ports, unsigned long *untagged)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- struct mv88e6xxx_vtu_stu_entry next;
- int port;
- int err;
-
- if (*vid == 4095)
- return -ENOENT;
-
- mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_vid_write(ds, *vid);
- if (err)
- goto unlock;
-
- err = _mv88e6xxx_vtu_getnext(ds, &next);
-unlock:
- mutex_unlock(&ps->smi_mutex);
-
- if (err)
- return err;
-
- if (!next.valid)
- return -ENOENT;
-
- *vid = next.vid;
-
- for (port = 0; port < ps->num_ports; ++port) {
- clear_bit(port, ports);
- clear_bit(port, untagged);
-
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- continue;
-
- if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
- next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
- set_bit(port, ports);
-
- if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
- set_bit(port, untagged);
- }
-
- return 0;
-}
-
static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
const unsigned char *addr)
{
@@ -1718,8 +1977,18 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
u8 state)
{
struct mv88e6xxx_atu_entry entry = { 0 };
+ struct mv88e6xxx_vtu_stu_entry vlan;
+ int err;
+
+ /* Null VLAN ID corresponds to the port private database */
+ if (vid == 0)
+ err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+ else
+ err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ if (err)
+ return err;
- entry.fid = vid; /* We use one FID per VLAN */
+ entry.fid = vlan.fid;
entry.state = state;
ether_addr_copy(entry.mac, addr);
if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
@@ -1734,10 +2003,6 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans)
{
- /* We don't use per-port FDB */
- if (fdb->vid == 0)
- return -EOPNOTSUPP;
-
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
@@ -1824,6 +2089,47 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
return 0;
}
+static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
+ int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct mv88e6xxx_atu_entry addr = {
+ .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
+ int err;
+
+ err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+ if (err)
+ break;
+
+ if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+ bool is_static = addr.state ==
+ (is_multicast_ether_addr(addr.mac) ?
+ GLOBAL_ATU_DATA_STATE_MC_STATIC :
+ GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+ fdb->vid = vid;
+ ether_addr_copy(fdb->addr, addr.mac);
+ fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
+
+ err = cb(&fdb->obj);
+ if (err)
+ break;
+ }
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return err;
+}
+
int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
@@ -1832,55 +2138,37 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
struct mv88e6xxx_vtu_stu_entry vlan = {
.vid = GLOBAL_VTU_VID_MASK, /* all ones */
};
+ u16 fid;
int err;
mutex_lock(&ps->smi_mutex);
+ /* Dump port's default Filtering Information Database (VLAN ID 0) */
+ err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+ if (err)
+ goto unlock;
+
+ err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+ if (err)
+ goto unlock;
+
+ /* Dump VLANs' Filtering Information Databases */
err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
if (err)
goto unlock;
do {
- struct mv88e6xxx_atu_entry addr = {
- .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
- };
-
err = _mv88e6xxx_vtu_getnext(ds, &vlan);
if (err)
- goto unlock;
+ break;
if (!vlan.valid)
break;
- err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+ fdb, cb);
if (err)
- goto unlock;
-
- do {
- err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
- if (err)
- goto unlock;
-
- if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
- break;
-
- if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
- bool is_static = addr.state ==
- (is_multicast_ether_addr(addr.mac) ?
- GLOBAL_ATU_DATA_STATE_MC_STATIC :
- GLOBAL_ATU_DATA_STATE_UC_STATIC);
-
- fdb->vid = vlan.vid;
- ether_addr_copy(fdb->addr, addr.mac);
- fdb->ndm_state = is_static ? NUD_NOARP :
- NUD_REACHABLE;
-
- err = cb(&fdb->obj);
- if (err)
- goto unlock;
- }
- } while (!is_broadcast_ether_addr(addr.mac));
-
+ break;
} while (vlan.vid < GLOBAL_VTU_VID_MASK);
unlock:
@@ -1889,28 +2177,47 @@ unlock:
return err;
}
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
+int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
{
- return 0;
-}
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int i, err;
-int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
-{
- return 0;
+ mutex_lock(&ps->smi_mutex);
+
+ /* Assign the bridge and remap each port's VLANTable */
+ ps->ports[port].bridge_dev = bridge;
+
+ for (i = 0; i < ps->num_ports; ++i) {
+ if (ps->ports[i].bridge_dev == bridge) {
+ err = _mv88e6xxx_port_based_vlan_map(ds, i);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
}
-static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
+void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
- int err;
+ struct net_device *bridge = ps->ports[port].bridge_dev;
+ int i;
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
- if (!err)
- err = _mv88e6xxx_port_pvid_set(ds, port, pvid);
+
+ /* Unassign the bridge and remap each port's VLANTable */
+ ps->ports[port].bridge_dev = NULL;
+
+ for (i = 0; i < ps->num_ports; ++i)
+ if (i == port || ps->ports[i].bridge_dev == bridge)
+ if (_mv88e6xxx_port_based_vlan_map(ds, i))
+ netdev_warn(ds->ports[i], "failed to remap\n");
+
mutex_unlock(&ps->smi_mutex);
- return err;
}
static void mv88e6xxx_bridge_work(struct work_struct *work)
@@ -1922,11 +2229,66 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
ds = ((struct dsa_switch *)ps) - 1;
- while (ps->port_state_update_mask) {
- port = __ffs(ps->port_state_update_mask);
- clear_bit(port, &ps->port_state_update_mask);
- mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
+ mutex_lock(&ps->smi_mutex);
+
+ for (port = 0; port < ps->num_ports; ++port)
+ if (test_and_clear_bit(port, ps->port_state_update_mask) &&
+ _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
+ netdev_warn(ds->ports[port], "failed to update state to %s\n",
+ mv88e6xxx_port_state_names[ps->ports[port].state]);
+
+ mutex_unlock(&ps->smi_mutex);
+}
+
+static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
+ int reg, int val)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
+ int reg)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
+
+ ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+
+ return ret;
+}
+
+static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+{
+ int ret;
+
+ ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+ MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (ret & BMCR_PDOWN) {
+ ret &= ~BMCR_PDOWN;
+ ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+ PAGE_FIBER_SERDES, MII_BMCR,
+ ret);
}
+
+ return ret;
}
static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
@@ -2032,8 +2394,25 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
goto abort;
}
+ /* If this port is connected to a SerDes, make sure the SerDes is not
+ * powered down.
+ */
+ if (mv88e6xxx_6352_family(ds)) {
+ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ if (ret < 0)
+ goto abort;
+ ret &= PORT_STATUS_CMODE_MASK;
+ if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
+ (ret == PORT_STATUS_CMODE_1000BASE_X) ||
+ (ret == PORT_STATUS_CMODE_SGMII)) {
+ ret = mv88e6xxx_power_on_serdes(ds);
+ if (ret < 0)
+ goto abort;
+ }
+ }
+
/* Port Control 2: don't force a good FCS, set the maximum frame size to
- * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+ * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
* received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU.
@@ -2058,7 +2437,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
}
- reg |= PORT_CONTROL_2_8021Q_SECURE;
+ reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
@@ -2073,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* the other bits clear.
*/
reg = 1 << port;
- /* Disable learning for DSA and CPU ports */
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
- reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+ /* Disable learning for CPU port */
+ if (dsa_is_cpu_port(ds, port))
+ reg = 0;
ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
@@ -2155,12 +2534,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (ret)
goto abort;
- /* Port based VLAN map: do not give each port its own address
- * database, and allow every port to egress frames on all other ports.
+ /* Port based VLAN map: give each port the same default address
+ * database, and allow bidirectional communication between the
+ * CPU and DSA port(s), and the other ports.
*/
- reg = BIT(ps->num_ports) - 1; /* all ports */
- reg &= ~BIT(port); /* except itself */
- ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
+ ret = _mv88e6xxx_port_fid_set(ds, port, 0);
+ if (ret)
+ goto abort;
+
+ ret = _mv88e6xxx_port_based_vlan_map(ds, port);
if (ret)
goto abort;
@@ -2184,13 +2566,6 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
ret = mv88e6xxx_setup_port(ds, i);
if (ret < 0)
return ret;
-
- if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
- continue;
-
- ret = mv88e6xxx_setup_port_default_vlan(ds, i);
- if (ret < 0)
- return ret;
}
return 0;
}
@@ -2383,13 +2758,9 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
- ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-error:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
mutex_unlock(&ps->smi_mutex);
+
return ret;
}
@@ -2400,14 +2771,9 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto error;
-
- ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
-error:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
mutex_unlock(&ps->smi_mutex);
+
return ret;
}
@@ -2648,8 +3014,8 @@ static int __init mv88e6xxx_init(void)
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
register_switch_driver(&mv88e6131_switch_driver);
#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
- register_switch_driver(&mv88e6123_61_65_switch_driver);
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
+ register_switch_driver(&mv88e6123_switch_driver);
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
register_switch_driver(&mv88e6352_switch_driver);
@@ -2669,8 +3035,8 @@ static void __exit mv88e6xxx_cleanup(void)
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
unregister_switch_driver(&mv88e6352_switch_driver);
#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
- unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
+ unregister_switch_driver(&mv88e6123_switch_driver);
#endif
#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
unregister_switch_driver(&mv88e6131_switch_driver);
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index ca08f913d302..26a424acd10f 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -28,6 +28,10 @@
#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
#define SMI_DATA 0x01
+/* Fiber/SERDES Registers are located at SMI address F, page 1 */
+#define REG_FIBER_SERDES 0x0f
+#define PAGE_FIBER_SERDES 0x01
+
#define REG_PORT(p) (0x10 + (p))
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
@@ -45,6 +49,10 @@
#define PORT_STATUS_MGMII BIT(6) /* 6185 */
#define PORT_STATUS_TX_PAUSED BIT(5)
#define PORT_STATUS_FLOW_CTRL BIT(4)
+#define PORT_STATUS_CMODE_MASK 0x0f
+#define PORT_STATUS_CMODE_100BASE_X 0x8
+#define PORT_STATUS_CMODE_1000BASE_X 0x9
+#define PORT_STATUS_CMODE_SGMII 0xa
#define PORT_PCS_CTRL 0x01
#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15)
#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14)
@@ -133,7 +141,9 @@
#define PORT_CONTROL_STATE_LEARNING 0x02
#define PORT_CONTROL_STATE_FORWARDING 0x03
#define PORT_CONTROL_1 0x05
+#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0)
#define PORT_BASE_VLAN 0x06
+#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12)
#define PORT_DEFAULT_VLAN 0x07
#define PORT_DEFAULT_VLAN_MASK 0xfff
#define PORT_CONTROL_2 0x08
@@ -355,6 +365,8 @@
#define GLOBAL2_QOS_WEIGHT 0x1c
#define GLOBAL2_MISC 0x1d
+#define MV88E6XXX_N_FID 4096
+
struct mv88e6xxx_switch_id {
u16 id;
char *name;
@@ -379,6 +391,11 @@ struct mv88e6xxx_vtu_stu_entry {
u8 data[DSA_MAX_PORTS];
};
+struct mv88e6xxx_priv_port {
+ struct net_device *bridge_dev;
+ u8 state;
+};
+
struct mv88e6xxx_priv_state {
/* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
@@ -415,8 +432,9 @@ struct mv88e6xxx_priv_state {
int id; /* switch product id */
int num_ports; /* number of switch ports */
- unsigned long port_state_update_mask;
- u8 port_state[DSA_MAX_PORTS];
+ struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
+
+ DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
struct work_struct bridge_work;
};
@@ -476,9 +494,12 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members);
-int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members);
+int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge);
+void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering);
int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct switchdev_trans *trans);
@@ -487,9 +508,9 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
struct switchdev_trans *trans);
int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
-int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
-int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
- unsigned long *ports, unsigned long *untagged);
+int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj));
int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_fdb *fdb,
struct switchdev_trans *trans);
@@ -506,7 +527,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int reg, int val);
extern struct dsa_switch_driver mv88e6131_switch_driver;
-extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+extern struct dsa_switch_driver mv88e6123_switch_driver;
extern struct dsa_switch_driver mv88e6352_switch_driver;
extern struct dsa_switch_driver mv88e6171_switch_driver;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 17b2126075e0..d81fceddbe0e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1601,15 +1601,9 @@ vortex_up(struct net_device *dev)
dev->name, media_tbl[dev->if_port].name);
}
- init_timer(&vp->timer);
- vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
- vp->timer.data = (unsigned long)dev;
- vp->timer.function = vortex_timer; /* timer handler */
- add_timer(&vp->timer);
-
- init_timer(&vp->rx_oom_timer);
- vp->rx_oom_timer.data = (unsigned long)dev;
- vp->rx_oom_timer.function = rx_oom_timer;
+ setup_timer(&vp->timer, vortex_timer, (unsigned long)dev);
+ mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
+ setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev);
if (vortex_debug > 1)
pr_debug("%s: Initial media type %s.\n",
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0b13af8e4070..2ffd63463299 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -106,6 +106,7 @@ config LANTIQ_ETOP
Support for the MII0 inside the Lantiq SoC
source "drivers/net/ethernet/marvell/Kconfig"
+source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
@@ -138,7 +139,6 @@ config NET_NETX
source "drivers/net/ethernet/nuvoton/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
-source "drivers/net/ethernet/octeon/Kconfig"
source "drivers/net/ethernet/oki-semi/Kconfig"
config ETHOC
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 38dc1a776a2b..1d349e9aa9a6 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_JME) += jme.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
+obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
@@ -58,7 +59,6 @@ obj-$(CONFIG_NET_NETX) += netx-eth.o
obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
obj-$(CONFIG_LPC_ENET) += nxp/
-obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b6fa89102526..bbef95973c27 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -768,12 +768,16 @@
#define MTL_Q_TQDR 0x08
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
-#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQDR 0x48
#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
#define MTL_Q_RQFCR_RFA_INDEX 1
#define MTL_Q_RQFCR_RFA_WIDTH 6
#define MTL_Q_RQFCR_RFD_INDEX 17
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index a6b9899e285f..895d35639129 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int i, tc_ets, tc_ets_weight;
+ u8 max_tc = 0;
tc_ets = 0;
tc_ets_weight = 0;
@@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
ets->prio_tc[i]);
- if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
- (i >= pdata->hw_feat.tc_cnt))
- return -EINVAL;
-
- if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
- return -EINVAL;
+ max_tc = max_t(u8, max_tc, ets->prio_tc[i]);
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]))
+ max_tc = max_t(u8, max_tc, i);
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
@@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
tc_ets = 1;
tc_ets_weight += ets->tc_tx_bw[i];
break;
-
default:
+ netif_err(pdata, drv, netdev,
+ "unsupported TSA algorithm (%hhu)\n",
+ ets->tc_tsa[i]);
return -EINVAL;
}
}
+ /* Check maximum traffic class requested */
+ if (max_tc >= pdata->hw_feat.tc_cnt) {
+ netif_err(pdata, drv, netdev,
+ "exceeded number of supported traffic classes\n");
+ return -EINVAL;
+ }
+
/* Weights must add up to 100% */
- if (tc_ets && (tc_ets_weight != 100))
+ if (tc_ets && (tc_ets_weight != 100)) {
+ netif_err(pdata, drv, netdev,
+ "sum of ETS algorithm weights is not 100 (%u)\n",
+ tc_ets_weight);
return -EINVAL;
+ }
if (!pdata->ets) {
pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
@@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
return -ENOMEM;
}
+ pdata->num_tcs = max_tc + 1;
memcpy(pdata->ets, ets, sizeof(*pdata->ets));
pdata->hw_if.config_dcb_tc(pdata);
@@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
"cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+ /* Check PFC for supported number of traffic classes */
+ if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) {
+ netif_err(pdata, drv, netdev,
+ "PFC requested for unsupported traffic class\n");
+ return -EINVAL;
+ }
+
if (!pdata->pfc) {
pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f6a7161e3b85..1babcc11a248 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
/* Set MTL flow control */
- for (i = 0; i < pdata->rx_q_count; i++)
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ if (pfc && ets) {
+ unsigned int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ unsigned int tc;
+
+ /* Does this queue handle the priority? */
+ if (pdata->prio2q_map[prio] != i)
+ continue;
+
+ /* Get the Traffic Class for this priority */
+ tc = ets->prio_tc[prio];
+
+ /* Check if flow control should be enabled */
+ if (pfc->pfc_en & (1 << tc)) {
+ ehfc = 1;
+ break;
+ }
+ }
+ } else {
+ ehfc = 1;
+ }
+
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "flow control %s for RXq%u\n",
+ ehfc ? "enabled" : "disabled", i);
+ }
/* Set MAC flow control */
max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
@@ -702,6 +734,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
return 0;
}
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Enable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Disable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+ return 0;
+}
+
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 poly = 0xedb88320; /* CRCPOLY_LE */
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+ u32 crc;
+ u16 vid;
+ __le16 vid_le;
+ u16 vlan_hash_table = 0;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ /* Set the VLAN Hash Table filtering register */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+ return 0;
+}
+
static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
unsigned int enable)
{
@@ -714,6 +853,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+ /* Hardware will still perform VLAN filtering in promiscuous mode */
+ if (enable) {
+ xgbe_disable_rx_vlan_filtering(pdata);
+ } else {
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ }
+
return 0;
}
@@ -875,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
+ unsigned long flags;
unsigned int mmd_address;
int mmd_data;
@@ -892,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* register offsets must therefore be adjusted by left shifting the
* offset 2 bits and reading 32 bits of data.
*/
- mutex_lock(&pdata->xpcs_mutex);
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
- mutex_unlock(&pdata->xpcs_mutex);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
return mmd_data;
}
@@ -904,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg, int mmd_data)
{
unsigned int mmd_address;
+ unsigned long flags;
if (mmd_reg & MII_ADDR_C45)
mmd_address = mmd_reg & ~MII_ADDR_C45;
@@ -919,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
* register offsets must therefore be adjusted by left shifting the
* offset 2 bits and reading 32 bits of data.
*/
- mutex_lock(&pdata->xpcs_mutex);
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
- mutex_unlock(&pdata->xpcs_mutex);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
@@ -944,116 +1093,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
-{
- /* Put the VLAN tag in the Rx descriptor */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
-
- /* Don't check the VLAN type */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
-
- /* Check only C-TAG (0x8100) packets */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
-
- /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
-
- /* Enable VLAN tag stripping */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
-
- return 0;
-}
-
-static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
-{
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
-
- return 0;
-}
-
-static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
-{
- /* Enable VLAN filtering */
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
-
- /* Enable VLAN Hash Table filtering */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
-
- /* Disable VLAN tag inverse matching */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
-
- /* Only filter on the lower 12-bits of the VLAN tag */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
-
- /* In order for the VLAN Hash Table filtering to be effective,
- * the VLAN tag identifier in the VLAN Tag Register must not
- * be zero. Set the VLAN tag identifier to "1" to enable the
- * VLAN Hash Table filtering. This implies that a VLAN tag of
- * 1 will always pass filtering.
- */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
-
- return 0;
-}
-
-static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
-{
- /* Disable VLAN filtering */
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
-
- return 0;
-}
-
-#ifndef CRCPOLY_LE
-#define CRCPOLY_LE 0xedb88320
-#endif
-static u32 xgbe_vid_crc32_le(__le16 vid_le)
-{
- u32 poly = CRCPOLY_LE;
- u32 crc = ~0;
- u32 temp = 0;
- unsigned char *data = (unsigned char *)&vid_le;
- unsigned char data_byte = 0;
- int i, bits;
-
- bits = get_bitmask_order(VLAN_VID_MASK);
- for (i = 0; i < bits; i++) {
- if ((i % 8) == 0)
- data_byte = data[i / 8];
-
- temp = ((crc & 1) ^ data_byte) & 1;
- crc >>= 1;
- data_byte >>= 1;
-
- if (temp)
- crc ^= poly;
- }
-
- return crc;
-}
-
-static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
-{
- u32 crc;
- u16 vid;
- __le16 vid_le;
- u16 vlan_hash_table = 0;
-
- /* Generate the VLAN Hash Table value */
- for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
- /* Get the CRC32 value of the VLAN ID */
- vid_le = cpu_to_le16(vid);
- crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
-
- vlan_hash_table |= (1 << crc);
- }
-
- /* Set the VLAN Hash Table filtering register */
- XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
-
- return 0;
-}
-
static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
@@ -1288,11 +1327,42 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
return 0;
}
+static void xgbe_config_tc(struct xgbe_prv_data *pdata)
+{
+ unsigned int offset, queue, prio;
+ u8 i;
+
+ netdev_reset_tc(pdata->netdev);
+ if (!pdata->num_tcs)
+ return;
+
+ netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
+
+ for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
+
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
+ i, offset, queue - 1);
+ netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+
+ if (!pdata->ets)
+ return;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ netdev_set_prio_tc_map(pdata->netdev, prio,
+ pdata->ets->prio_tc[prio]);
+}
+
static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
{
struct ieee_ets *ets = pdata->ets;
unsigned int total_weight, min_weight, weight;
- unsigned int i;
+ unsigned int mask, reg, reg_val;
+ unsigned int i, prio;
if (!ets)
return;
@@ -1309,6 +1379,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
min_weight = 1;
for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ /* Map the priorities to the traffic class */
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (ets->prio_tc[prio] == i)
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
+ i, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ /* Set the traffic class algorithm */
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
netif_dbg(pdata, drv, pdata->netdev,
@@ -1329,38 +1418,12 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
break;
}
}
+
+ xgbe_config_tc(pdata);
}
static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- struct ieee_ets *ets = pdata->ets;
- unsigned int mask, reg, reg_val;
- unsigned int tc, prio;
-
- if (!pfc || !ets)
- return;
-
- for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
- mask = 0;
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- if ((pfc->pfc_en & (1 << prio)) &&
- (ets->prio_tc[prio] == tc))
- mask |= (1 << prio);
- }
- mask &= 0xff;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
- tc, mask);
- reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
- reg_val = XGMAC_IOREAD(pdata, reg);
-
- reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
- reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
-
- XGMAC_IOWRITE(pdata, reg, reg_val);
- }
-
xgbe_config_flow_control(pdata);
}
@@ -2595,6 +2658,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, rx_timeout)) {
+ rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, rx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
@@ -2633,6 +2722,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ xgbe_prepare_rx_stop(pdata, i);
+
/* Disable each Rx queue */
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
@@ -2881,6 +2974,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
/* For Data Center Bridging config */
+ hw_if->config_tc = xgbe_config_tc;
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 8a9b493566c9..ebf9224b2d31 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
xgbe_disable_rx_tx_ints(pdata);
/* Turn on polling */
- __napi_schedule(&pdata->napi);
+ __napi_schedule_irqoff(&pdata->napi);
}
}
@@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
disable_irq_nosync(channel->dma_irq);
/* Turn on polling */
- __napi_schedule(&channel->napi);
+ __napi_schedule_irqoff(&channel->napi);
}
return IRQ_HANDLED;
@@ -1626,30 +1626,22 @@ static void xgbe_poll_controller(struct net_device *netdev)
}
#endif /* End CONFIG_NET_POLL_CONTROLLER */
-static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc_to_netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- unsigned int offset, queue;
- u8 i;
+ u8 tc;
- if (tc && (tc != pdata->hw_feat.tc_cnt))
+ if (tc_to_netdev->type != TC_SETUP_MQPRIO)
return -EINVAL;
- if (tc) {
- netdev_set_num_tc(netdev, tc);
- for (i = 0, queue = 0, offset = 0; i < tc; i++) {
- while ((queue < pdata->tx_q_count) &&
- (pdata->q2tc_map[queue] == i))
- queue++;
-
- netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
- i, offset, queue - 1);
- netdev_set_tc_queue(netdev, i, queue - offset, offset);
- offset = queue;
- }
- } else {
- netdev_reset_tc(netdev);
- }
+ tc = tc_to_netdev->tc;
+
+ if (tc > pdata->hw_feat.tc_cnt)
+ return -EINVAL;
+
+ pdata->num_tcs = tc;
+ pdata->hw_if.config_tc(pdata);
return 0;
}
@@ -2062,7 +2054,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
/* If we processed everything, we are done */
if (processed < budget) {
/* Turn off polling */
- napi_complete(napi);
+ napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */
enable_irq(channel->dma_irq);
@@ -2104,7 +2096,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
/* If we processed everything, we are done */
if (processed < budget) {
/* Turn off polling */
- napi_complete(napi);
+ napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */
xgbe_enable_rx_tx_ints(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6040293db9c1..11d9f0c5b78b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev,
if (cmd->autoneg == AUTONEG_DISABLE) {
switch (speed) {
case SPEED_10000:
+ break;
case SPEED_2500:
+ if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
+ netdev_err(netdev, "unsupported speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+ break;
case SPEED_1000:
+ if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
+ netdev_err(netdev, "unsupported speed %u\n",
+ speed);
+ return -EINVAL;
+ }
break;
default:
netdev_err(netdev, "unsupported speed %u\n", speed);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 618d952c2984..3eee3201b58f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
spin_lock_init(&pdata->lock);
- mutex_init(&pdata->xpcs_mutex);
+ spin_lock_init(&pdata->xpcs_lock);
mutex_init(&pdata->rss_mutex);
spin_lock_init(&pdata->tstamp_lock);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 446058081866..84c5d296d13e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
- /* Interrupt reason must be read and cleared outside of IRQ context */
- disable_irq_nosync(pdata->an_irq);
+ /* Disable AN interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
- queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ } else {
+ /* Enable AN interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ XGBE_AN_INT_MASK);
+ }
return IRQ_HANDLED;
}
@@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work)
struct xgbe_prv_data,
an_work);
enum xgbe_an cur_state = pdata->an_state;
- unsigned int int_reg, int_mask;
mutex_lock(&pdata->an_mutex);
- /* Read the interrupt */
- int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
- if (!int_reg)
+ if (!pdata->an_int)
goto out;
next_int:
- if (int_reg & XGBE_AN_PG_RCV) {
+ if (pdata->an_int & XGBE_AN_PG_RCV) {
pdata->an_state = XGBE_AN_PAGE_RECEIVED;
- int_mask = XGBE_AN_PG_RCV;
- } else if (int_reg & XGBE_AN_INC_LINK) {
+ pdata->an_int &= ~XGBE_AN_PG_RCV;
+ } else if (pdata->an_int & XGBE_AN_INC_LINK) {
pdata->an_state = XGBE_AN_INCOMPAT_LINK;
- int_mask = XGBE_AN_INC_LINK;
- } else if (int_reg & XGBE_AN_INT_CMPLT) {
+ pdata->an_int &= ~XGBE_AN_INC_LINK;
+ } else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
pdata->an_state = XGBE_AN_COMPLETE;
- int_mask = XGBE_AN_INT_CMPLT;
+ pdata->an_int &= ~XGBE_AN_INT_CMPLT;
} else {
pdata->an_state = XGBE_AN_ERROR;
- int_mask = 0;
}
- /* Clear the interrupt to be processed */
- int_reg &= ~int_mask;
- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
-
pdata->an_result = pdata->an_state;
again:
@@ -740,14 +744,14 @@ again:
}
if (pdata->an_state == XGBE_AN_NO_LINK) {
- int_reg = 0;
+ pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} else if (pdata->an_state == XGBE_AN_ERROR) {
netdev_err(pdata->netdev,
"error during auto-negotiation, state=%u\n",
cur_state);
- int_reg = 0;
+ pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
@@ -765,11 +769,12 @@ again:
if (cur_state != pdata->an_state)
goto again;
- if (int_reg)
+ if (pdata->an_int)
goto next_int;
out:
- enable_irq(pdata->an_irq);
+ /* Enable AN interrupts on the way out */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
mutex_unlock(&pdata->an_mutex);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index e234b9970318..98d9d63c4353 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -6,7 +6,7 @@
*
* License 1: GPLv2
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
*
* This file is free software; you may copy, redistribute and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,7 +56,7 @@
*
* License 2: Modified BSD
*
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -673,6 +673,7 @@ struct xgbe_hw_if {
u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
/* For Data Center Bridging config */
+ void (*config_tc)(struct xgbe_prv_data *);
void (*config_dcb_tc)(struct xgbe_prv_data *);
void (*config_dcb_pfc)(struct xgbe_prv_data *);
@@ -773,8 +774,8 @@ struct xgbe_prv_data {
/* Overall device lock */
spinlock_t lock;
- /* XPCS indirect addressing mutex */
- struct mutex xpcs_mutex;
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
/* RSS addressing mutex */
struct mutex rss_mutex;
@@ -880,6 +881,7 @@ struct xgbe_prv_data {
struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
+ u8 num_tcs;
/* Hardware features of the device */
struct xgbe_hw_features hw_feat;
@@ -925,6 +927,7 @@ struct xgbe_prv_data {
u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
/* Auto-negotiation state machine support */
+ unsigned int an_int;
struct mutex an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile
index 700b5abe5de5..f46321f68315 100644
--- a/drivers/net/ethernet/apm/xgene/Makefile
+++ b/drivers/net/ethernet/apm/xgene/Makefile
@@ -3,5 +3,6 @@
#
xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
- xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
+ xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o \
+ xgene_enet_cle.o
obj-$(CONFIG_NET_XGENE) += xgene-enet.o
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
new file mode 100644
index 000000000000..b212488606da
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -0,0 +1,734 @@
+/* Applied Micro X-Gene SoC Ethernet Classifier structures
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Authors: Khuong Dinh <kdinh@apm.com>
+ * Tanmay Inamdar <tinamdar@apm.com>
+ * Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+
+/* interfaces to convert structures to HW recognized bit formats */
+static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver,
+ enum xgene_cle_prot_type type, u32 len,
+ u32 *reg)
+{
+ *reg = SET_VAL(SB_IPFRAG, frag) |
+ SET_VAL(SB_IPPROT, type) |
+ SET_VAL(SB_IPVER, ver) |
+ SET_VAL(SB_HDRLEN, len);
+}
+
+static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
+ u32 nfpsel, u32 *idt_reg)
+{
+ *idt_reg = SET_VAL(IDT_DSTQID, dstqid) |
+ SET_VAL(IDT_FPSEL, fpsel) |
+ SET_VAL(IDT_NFPSEL, nfpsel);
+}
+
+static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
+ struct xgene_cle_dbptr *dbptr, u32 *buf)
+{
+ buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
+ SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
+
+ buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) |
+ SET_VAL(CLE_PRIORITY, dbptr->cle_priority);
+}
+
+static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
+{
+ u32 i, j = 0;
+ u32 data;
+
+ buf[j++] = SET_VAL(CLE_TYPE, kn->node_type);
+ for (i = 0; i < kn->num_keys; i++) {
+ struct xgene_cle_ptree_key *key = &kn->key[i];
+
+ if (!(i % 2)) {
+ buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) |
+ SET_VAL(CLE_KN_RPTR, key->result_pointer);
+ } else {
+ data = SET_VAL(CLE_KN_PRIO, key->priority) |
+ SET_VAL(CLE_KN_RPTR, key->result_pointer);
+ buf[j++] |= (data << 16);
+ }
+ }
+}
+
+static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn,
+ u32 *buf, u32 jb)
+{
+ struct xgene_cle_ptree_branch *br;
+ u32 i, j = 0;
+ u32 npp;
+
+ buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) |
+ SET_VAL(CLE_DN_LASTN, dn->last_node) |
+ SET_VAL(CLE_DN_HLS, dn->hdr_len_store) |
+ SET_VAL(CLE_DN_EXT, dn->hdr_extn) |
+ SET_VAL(CLE_DN_BSTOR, dn->byte_store) |
+ SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) |
+ SET_VAL(CLE_DN_RPTR, dn->result_pointer);
+
+ for (i = 0; i < dn->num_branches; i++) {
+ br = &dn->branch[i];
+ npp = br->next_packet_pointer;
+
+ if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE))
+ npp += jb;
+
+ buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) |
+ SET_VAL(CLE_BR_NPPTR, npp) |
+ SET_VAL(CLE_BR_JB, br->jump_bw) |
+ SET_VAL(CLE_BR_JR, br->jump_rel) |
+ SET_VAL(CLE_BR_OP, br->operation) |
+ SET_VAL(CLE_BR_NNODE, br->next_node) |
+ SET_VAL(CLE_BR_NBR, br->next_branch);
+
+ buf[j++] = SET_VAL(CLE_BR_DATA, br->data) |
+ SET_VAL(CLE_BR_MASK, br->mask);
+ }
+}
+
+static int xgene_cle_poll_cmd_done(void __iomem *base,
+ enum xgene_cle_cmd_type cmd)
+{
+ u32 status, loop = 10;
+ int ret = -EBUSY;
+
+ while (loop--) {
+ status = ioread32(base + INDCMD_STATUS);
+ if (status & cmd) {
+ ret = 0;
+ break;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ return ret;
+}
+
+static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs,
+ u32 index, enum xgene_cle_dram_type type,
+ enum xgene_cle_cmd_type cmd)
+{
+ enum xgene_cle_parser parser = cle->active_parser;
+ void __iomem *base = cle->base;
+ u32 i, j, ind_addr;
+ u8 port, nparsers;
+ int ret = 0;
+
+ /* PTREE_RAM onwards, DRAM regions are common for all parsers */
+ nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers;
+
+ for (i = 0; i < nparsers; i++) {
+ port = i;
+ if ((type < PTREE_RAM) && (parser != PARSER_ALL))
+ port = parser;
+
+ ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index;
+ iowrite32(ind_addr, base + INDADDR);
+ for (j = 0; j < nregs; j++)
+ iowrite32(data[j], base + DATA_RAM0 + (j * 4));
+ iowrite32(cmd, base + INDCMD);
+
+ ret = xgene_cle_poll_cmd_done(base, cmd);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_cle *cle)
+{
+ struct xgene_cle_ptree *ptree = &cle->ptree;
+ void __iomem *addr, *base = cle->base;
+ u32 offset = CLE_PORT_OFFSET;
+ u32 i;
+
+ /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */
+ ptree->start_pkt += cle->jump_bytes;
+ for (i = 0; i < cle->parsers; i++) {
+ if (cle->active_parser != PARSER_ALL)
+ addr = base + cle->active_parser * offset;
+ else
+ addr = base + (i * offset);
+
+ iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0);
+ iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0);
+ }
+}
+
+static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_cle *cle)
+{
+ struct xgene_cle_ptree *ptree = &cle->ptree;
+ u32 buf[CLE_DRAM_REGS];
+ u32 i;
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ for (i = 0; i < ptree->num_dbptr; i++) {
+ xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf);
+ ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr,
+ DB_RAM, CLE_CMD_WR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_cle *cle)
+{
+ struct xgene_cle_ptree *ptree = &cle->ptree;
+ struct xgene_cle_ptree_ewdn *dn = ptree->dn;
+ struct xgene_cle_ptree_kn *kn = ptree->kn;
+ u32 buf[CLE_DRAM_REGS];
+ int i, j, ret;
+
+ memset(buf, 0, sizeof(buf));
+ for (i = 0; i < ptree->num_dn; i++) {
+ xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
+ ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
+ PTREE_RAM, CLE_CMD_WR);
+ if (ret)
+ return ret;
+ }
+
+ /* continue node index for key node */
+ memset(buf, 0, sizeof(buf));
+ for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) {
+ xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf);
+ ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
+ PTREE_RAM, CLE_CMD_WR);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_cle *cle)
+{
+ int ret;
+
+ ret = xgene_cle_setup_node(pdata, cle);
+ if (ret)
+ return ret;
+
+ ret = xgene_cle_setup_dbptr(pdata, cle);
+ if (ret)
+ return ret;
+
+ xgene_cle_enable_ptree(pdata, cle);
+
+ return 0;
+}
+
+static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_cle *enet_cle,
+ struct xgene_cle_dbptr *dbptr,
+ u32 index, u8 priority)
+{
+ void __iomem *base = enet_cle->base;
+ void __iomem *base_addr;
+ u32 buf[CLE_DRAM_REGS];
+ u32 def_cls, offset;
+ u32 i, j;
+
+ memset(buf, 0, sizeof(buf));
+ xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
+
+ for (i = 0; i < enet_cle->parsers; i++) {
+ if (enet_cle->active_parser != PARSER_ALL) {
+ offset = enet_cle->active_parser *
+ CLE_PORT_OFFSET;
+ } else {
+ offset = i * CLE_PORT_OFFSET;
+ }
+
+ base_addr = base + DFCLSRESDB00 + offset;
+ for (j = 0; j < 6; j++)
+ iowrite32(buf[j], base_addr + (j * 4));
+
+ def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
+ iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
+ }
+}
+
+static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
+{
+ u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
+ u32 mac_hdr_len = ETH_HLEN;
+ u32 sband, reg = 0;
+ u32 ipv4_ihl = 5;
+ u32 hdr_len;
+ int ret;
+
+ /* Sideband: IPV4/TCP packets */
+ hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+ xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
+ sband = reg;
+
+ /* Sideband: IPv4/UDP packets */
+ hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+ xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
+ sband |= (reg << 16);
+
+ ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
+ if (ret)
+ return ret;
+
+ /* Sideband: IPv4/RAW packets */
+ hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
+ xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
+ hdr_len, &reg);
+ sband = reg;
+
+ /* Sideband: Ethernet II/RAW packets */
+ hdr_len = (mac_hdr_len << 5);
+ xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
+ hdr_len, &reg);
+ sband |= (reg << 16);
+
+ ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
+{
+ u32 secret_key_ipv4[4]; /* 16 Bytes*/
+ int ret = 0;
+
+ get_random_bytes(secret_key_ipv4, 16);
+ ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
+ RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
+ return ret;
+}
+
+static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
+{
+ u32 fpsel, dstqid, nfpsel, idt_reg, idx;
+ int i, ret = 0;
+ u16 pool_id;
+
+ for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
+ idx = i % pdata->rxq_cnt;
+ pool_id = pdata->rx_ring[idx]->buf_pool->id;
+ fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
+ dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
+ nfpsel = 0;
+ idt_reg = 0;
+
+ xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg);
+ ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
+ RSS_IDT, CLE_CMD_WR);
+ if (ret)
+ return ret;
+ }
+
+ ret = xgene_cle_set_rss_skeys(&pdata->cle);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
+{
+ struct xgene_enet_cle *cle = &pdata->cle;
+ void __iomem *base = cle->base;
+ u32 offset, val = 0;
+ int i, ret = 0;
+
+ offset = CLE_PORT_OFFSET;
+ for (i = 0; i < cle->parsers; i++) {
+ if (cle->active_parser != PARSER_ALL)
+ offset = cle->active_parser * CLE_PORT_OFFSET;
+ else
+ offset = i * CLE_PORT_OFFSET;
+
+ /* enable RSS */
+ val = (RSS_IPV4_12B << 1) | 0x1;
+ writel(val, base + RSS_CTRL0 + offset);
+ }
+
+ /* setup sideband data */
+ ret = xgene_cle_set_rss_sband(cle);
+ if (ret)
+ return ret;
+
+ /* setup indirection table */
+ ret = xgene_cle_set_rss_idt(pdata);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
+{
+ struct xgene_enet_cle *enet_cle = &pdata->cle;
+ struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
+ struct xgene_cle_ptree_branch *br;
+ u32 def_qid, def_fpsel, pool_id;
+ struct xgene_cle_ptree *ptree;
+ struct xgene_cle_ptree_kn kn;
+ int ret;
+ struct xgene_cle_ptree_ewdn ptree_dn[] = {
+ {
+ /* PKT_TYPE_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 2,
+ .branch = {
+ {
+ /* IPV4 */
+ .valid = 0,
+ .next_packet_pointer = 22,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = PKT_PROT_NODE,
+ .next_branch = 0,
+ .data = 0x8,
+ .mask = 0xffff
+ },
+ {
+ .valid = 0,
+ .next_packet_pointer = 262,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ },
+ },
+ {
+ /* PKT_PROT_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 3,
+ .branch = {
+ {
+ /* TCP */
+ .valid = 1,
+ .next_packet_pointer = 26,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 0,
+ .data = 0x0600,
+ .mask = 0xffff
+ },
+ {
+ /* UDP */
+ .valid = 1,
+ .next_packet_pointer = 26,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 0,
+ .data = 0x1100,
+ .mask = 0xffff
+ },
+ {
+ .valid = 0,
+ .next_packet_pointer = 260,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* RSS_IPV4_TCP_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = BOTH_BYTES,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 6,
+ .branch = {
+ {
+ /* SRC IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 28,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 1,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* SRC IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 30,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 2,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 32,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 3,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 34,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 4,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP SRC Port */
+ .valid = 0,
+ .next_packet_pointer = 36,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_TCP_NODE,
+ .next_branch = 5,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP DST Port */
+ .valid = 0,
+ .next_packet_pointer = 256,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* RSS_IPV4_UDP_NODE */
+ .node_type = EWDN,
+ .last_node = 0,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = BOTH_BYTES,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 6,
+ .branch = {
+ {
+ /* SRC IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 28,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 1,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* SRC IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 30,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 2,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B01 */
+ .valid = 0,
+ .next_packet_pointer = 32,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 3,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* DST IPV4 B23 */
+ .valid = 0,
+ .next_packet_pointer = 34,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 4,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP SRC Port */
+ .valid = 0,
+ .next_packet_pointer = 36,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = RSS_IPV4_UDP_NODE,
+ .next_branch = 5,
+ .data = 0x0,
+ .mask = 0xffff
+ },
+ {
+ /* TCP DST Port */
+ .valid = 0,
+ .next_packet_pointer = 256,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = LAST_NODE,
+ .next_branch = 0,
+ .data = 0x0,
+ .mask = 0xffff
+ }
+ }
+ },
+ {
+ /* LAST NODE */
+ .node_type = EWDN,
+ .last_node = 1,
+ .hdr_len_store = 1,
+ .hdr_extn = NO_BYTE,
+ .byte_store = NO_BYTE,
+ .search_byte_store = NO_BYTE,
+ .result_pointer = DB_RES_DROP,
+ .num_branches = 1,
+ .branch = {
+ {
+ .valid = 0,
+ .next_packet_pointer = 0,
+ .jump_bw = JMP_FW,
+ .jump_rel = JMP_ABS,
+ .operation = EQT,
+ .next_node = MAX_NODES,
+ .next_branch = 0,
+ .data = 0,
+ .mask = 0xffff
+ }
+ }
+ }
+ };
+
+ ptree = &enet_cle->ptree;
+ ptree->start_pkt = 12; /* Ethertype */
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ ret = xgene_cle_setup_rss(pdata);
+ if (ret) {
+ netdev_err(pdata->ndev, "RSS initialization failed\n");
+ return ret;
+ }
+ } else {
+ br = &ptree_dn[PKT_PROT_NODE].branch[0];
+ br->valid = 0;
+ br->next_packet_pointer = 260;
+ br->next_node = LAST_NODE;
+ br->data = 0x0000;
+ br->mask = 0xffff;
+ }
+
+ def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+ pool_id = pdata->rx_ring[0]->buf_pool->id;
+ def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
+
+ memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
+ dbptr[DB_RES_ACCEPT].fpsel = def_fpsel;
+ dbptr[DB_RES_ACCEPT].dstqid = def_qid;
+ dbptr[DB_RES_ACCEPT].cle_priority = 1;
+
+ dbptr[DB_RES_DEF].fpsel = def_fpsel;
+ dbptr[DB_RES_DEF].dstqid = def_qid;
+ dbptr[DB_RES_DEF].cle_priority = 7;
+ xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF],
+ DB_RES_ACCEPT, 7);
+
+ dbptr[DB_RES_DROP].drop = 1;
+
+ memset(&kn, 0, sizeof(kn));
+ kn.node_type = KN;
+ kn.num_keys = 1;
+ kn.key[0].priority = 0;
+ kn.key[0].result_pointer = DB_RES_ACCEPT;
+
+ ptree->dn = ptree_dn;
+ ptree->kn = &kn;
+ ptree->dbptr = dbptr;
+ ptree->num_dn = MAX_NODES;
+ ptree->num_kn = 1;
+ ptree->num_dbptr = DB_MAX_PTRS;
+
+ return xgene_cle_setup_ptree(pdata, enet_cle);
+}
+
+struct xgene_cle_ops xgene_cle3in_ops = {
+ .cle_init = xgene_enet_cle_init,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
new file mode 100644
index 000000000000..29a17abdd828
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -0,0 +1,295 @@
+/* Applied Micro X-Gene SoC Ethernet Classifier structures
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Authors: Khuong Dinh <kdinh@apm.com>
+ * Tanmay Inamdar <tinamdar@apm.com>
+ * Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_CLE_H__
+#define __XGENE_ENET_CLE_H__
+
+#include <linux/io.h>
+#include <linux/random.h>
+
+/* Register offsets */
+#define INDADDR 0x04
+#define INDCMD 0x08
+#define INDCMD_STATUS 0x0c
+#define DATA_RAM0 0x10
+#define SNPTR0 0x0100
+#define SPPTR0 0x0104
+#define DFCLSRESDBPTR0 0x0108
+#define DFCLSRESDB00 0x010c
+#define RSS_CTRL0 0x0000013c
+
+#define CLE_CMD_TO 10 /* ms */
+#define CLE_PKTRAM_SIZE 256 /* bytes */
+#define CLE_PORT_OFFSET 0x200
+#define CLE_DRAM_REGS 17
+
+#define CLE_DN_TYPE_LEN 2
+#define CLE_DN_TYPE_POS 0
+#define CLE_DN_LASTN_LEN 1
+#define CLE_DN_LASTN_POS 2
+#define CLE_DN_HLS_LEN 1
+#define CLE_DN_HLS_POS 3
+#define CLE_DN_EXT_LEN 2
+#define CLE_DN_EXT_POS 4
+#define CLE_DN_BSTOR_LEN 2
+#define CLE_DN_BSTOR_POS 6
+#define CLE_DN_SBSTOR_LEN 2
+#define CLE_DN_SBSTOR_POS 8
+#define CLE_DN_RPTR_LEN 12
+#define CLE_DN_RPTR_POS 12
+
+#define CLE_BR_VALID_LEN 1
+#define CLE_BR_VALID_POS 0
+#define CLE_BR_NPPTR_LEN 9
+#define CLE_BR_NPPTR_POS 1
+#define CLE_BR_JB_LEN 1
+#define CLE_BR_JB_POS 10
+#define CLE_BR_JR_LEN 1
+#define CLE_BR_JR_POS 11
+#define CLE_BR_OP_LEN 3
+#define CLE_BR_OP_POS 12
+#define CLE_BR_NNODE_LEN 9
+#define CLE_BR_NNODE_POS 15
+#define CLE_BR_NBR_LEN 5
+#define CLE_BR_NBR_POS 24
+
+#define CLE_BR_DATA_LEN 16
+#define CLE_BR_DATA_POS 0
+#define CLE_BR_MASK_LEN 16
+#define CLE_BR_MASK_POS 16
+
+#define CLE_KN_PRIO_POS 0
+#define CLE_KN_PRIO_LEN 3
+#define CLE_KN_RPTR_POS 3
+#define CLE_KN_RPTR_LEN 10
+#define CLE_TYPE_POS 0
+#define CLE_TYPE_LEN 2
+
+#define CLE_DSTQIDL_POS 25
+#define CLE_DSTQIDL_LEN 7
+#define CLE_DSTQIDH_POS 0
+#define CLE_DSTQIDH_LEN 5
+#define CLE_FPSEL_POS 21
+#define CLE_FPSEL_LEN 4
+#define CLE_PRIORITY_POS 5
+#define CLE_PRIORITY_LEN 3
+
+#define JMP_ABS 0
+#define JMP_REL 1
+#define JMP_FW 0
+#define JMP_BW 1
+
+enum xgene_cle_ptree_nodes {
+ PKT_TYPE_NODE,
+ PKT_PROT_NODE,
+ RSS_IPV4_TCP_NODE,
+ RSS_IPV4_UDP_NODE,
+ LAST_NODE,
+ MAX_NODES
+};
+
+enum xgene_cle_byte_store {
+ NO_BYTE,
+ FIRST_BYTE,
+ SECOND_BYTE,
+ BOTH_BYTES
+};
+
+/* Preclassification operation types */
+enum xgene_cle_node_type {
+ INV,
+ KN,
+ EWDN,
+ RES_NODE
+};
+
+/* Preclassification operation types */
+enum xgene_cle_op_type {
+ EQT,
+ NEQT,
+ LTEQT,
+ GTEQT,
+ AND,
+ NAND
+};
+
+enum xgene_cle_parser {
+ PARSER0,
+ PARSER1,
+ PARSER2,
+ PARSER_ALL
+};
+
+#define XGENE_CLE_DRAM(type) (((type) & 0xf) << 28)
+enum xgene_cle_dram_type {
+ PKT_RAM,
+ RSS_IDT,
+ RSS_IPV4_HASH_SKEY,
+ PTREE_RAM = 0xc,
+ AVL_RAM,
+ DB_RAM
+};
+
+enum xgene_cle_cmd_type {
+ CLE_CMD_WR = 1,
+ CLE_CMD_RD = 2,
+ CLE_CMD_AVL_ADD = 8,
+ CLE_CMD_AVL_DEL = 16,
+ CLE_CMD_AVL_SRCH = 32
+};
+
+enum xgene_cle_ipv4_rss_hashtype {
+ RSS_IPV4_8B,
+ RSS_IPV4_12B,
+};
+
+enum xgene_cle_prot_type {
+ XGENE_CLE_TCP,
+ XGENE_CLE_UDP,
+ XGENE_CLE_ESP,
+ XGENE_CLE_OTHER
+};
+
+enum xgene_cle_prot_version {
+ XGENE_CLE_IPV4,
+};
+
+enum xgene_cle_ptree_dbptrs {
+ DB_RES_DROP,
+ DB_RES_DEF,
+ DB_RES_ACCEPT,
+ DB_MAX_PTRS
+};
+
+/* RSS sideband signal info */
+#define SB_IPFRAG_POS 0
+#define SB_IPFRAG_LEN 1
+#define SB_IPPROT_POS 1
+#define SB_IPPROT_LEN 2
+#define SB_IPVER_POS 3
+#define SB_IPVER_LEN 1
+#define SB_HDRLEN_POS 4
+#define SB_HDRLEN_LEN 12
+
+/* RSS indirection table */
+#define XGENE_CLE_IDT_ENTRIES 128
+#define IDT_DSTQID_POS 0
+#define IDT_DSTQID_LEN 12
+#define IDT_FPSEL_POS 12
+#define IDT_FPSEL_LEN 4
+#define IDT_NFPSEL_POS 16
+#define IDT_NFPSEL_LEN 4
+
+struct xgene_cle_ptree_branch {
+ bool valid;
+ u16 next_packet_pointer;
+ bool jump_bw;
+ bool jump_rel;
+ u8 operation;
+ u16 next_node;
+ u8 next_branch;
+ u16 data;
+ u16 mask;
+};
+
+struct xgene_cle_ptree_ewdn {
+ u8 node_type;
+ bool last_node;
+ bool hdr_len_store;
+ u8 hdr_extn;
+ u8 byte_store;
+ u8 search_byte_store;
+ u16 result_pointer;
+ u8 num_branches;
+ struct xgene_cle_ptree_branch branch[6];
+};
+
+struct xgene_cle_ptree_key {
+ u8 priority;
+ u16 result_pointer;
+};
+
+struct xgene_cle_ptree_kn {
+ u8 node_type;
+ u8 num_keys;
+ struct xgene_cle_ptree_key key[32];
+};
+
+struct xgene_cle_dbptr {
+ u8 split_boundary;
+ u8 mirror_nxtfpsel;
+ u8 mirror_fpsel;
+ u16 mirror_dstqid;
+ u8 drop;
+ u8 mirror;
+ u8 hdr_data_split;
+ u64 hopinfomsbs;
+ u8 DR;
+ u8 HR;
+ u64 hopinfomlsbs;
+ u16 h0enq_num;
+ u8 h0fpsel;
+ u8 nxtfpsel;
+ u8 fpsel;
+ u16 dstqid;
+ u8 cle_priority;
+ u8 cle_flowgroup;
+ u8 cle_perflow;
+ u8 cle_insert_timestamp;
+ u8 stash;
+ u8 in;
+ u8 perprioen;
+ u8 perflowgroupen;
+ u8 perflowen;
+ u8 selhash;
+ u8 selhdrext;
+ u8 mirror_nxtfpsel_msb;
+ u8 mirror_fpsel_msb;
+ u8 hfpsel_msb;
+ u8 nxtfpsel_msb;
+ u8 fpsel_msb;
+};
+
+struct xgene_cle_ptree {
+ struct xgene_cle_ptree_ewdn *dn;
+ struct xgene_cle_ptree_kn *kn;
+ struct xgene_cle_dbptr *dbptr;
+ u32 num_dn;
+ u32 num_kn;
+ u32 num_dbptr;
+ u32 start_node;
+ u32 start_pkt;
+ u32 start_dbptr;
+};
+
+struct xgene_enet_cle {
+ void __iomem *base;
+ struct xgene_cle_ptree ptree;
+ enum xgene_cle_parser active_parser;
+ u32 parsers;
+ u32 max_nodes;
+ u32 max_dbptrs;
+ u32 jump_bytes;
+};
+
+extern struct xgene_cle_ops xgene_cle3in_ops;
+
+#endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index db55c9f6e8e1..39e081a70f5b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs;
}
+static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
+{
+ u32 data = 0x7777;
+
+ xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
+}
+
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status)
@@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = {
.clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len,
+ .coalesce = xgene_enet_setup_coalescing,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8a9091039ab4..ba7da98af2ef 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -54,6 +54,11 @@ enum xgene_enet_rm {
#define IS_BUFFER_POOL BIT(20)
#define PREFETCH_BUF_EN BIT(21)
#define CSR_RING_ID_BUF 0x000c
+#define CSR_PBM_COAL 0x0014
+#define CSR_PBM_CTICK1 0x001c
+#define CSR_PBM_CTICK2 0x0020
+#define CSR_THRESHOLD0_SET1 0x0030
+#define CSR_THRESHOLD1_SET1 0x0034
#define CSR_RING_NE_INT_MODE 0x017c
#define CSR_RING_CONFIG 0x006c
#define CSR_RING_WR_BASE 0x0070
@@ -101,6 +106,7 @@ enum xgene_enet_rm {
#define MAC_OFFSET 0x30
#define BLOCK_ETH_CSR_OFFSET 0x2000
+#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000
#define BLOCK_ETH_RING_IF_OFFSET 0x9000
#define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000
#define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5eb9b20c0eea..8d4c1ad2fc60 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -93,13 +93,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
return 0;
}
-static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
-{
- struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
-
- return ((u16)pdata->rm << 10) | ring->num;
-}
-
static u8 xgene_enet_hdr_len(const void *data)
{
const struct ethhdr *eth = data;
@@ -189,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
static u64 xgene_enet_work_msg(struct sk_buff *skb)
{
struct net_device *ndev = skb->dev;
- struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
@@ -235,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
- if (mss != pdata->mss) {
- pdata->mss = mss;
- pdata->mac_ops->set_mss(pdata);
- }
hopinfo |= SET_BIT(ET);
}
} else if (iph->protocol == IPPROTO_UDP) {
@@ -420,7 +408,7 @@ out:
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
SET_VAL(USERINFO, tx_ring->tail));
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
- pdata->tx_level += count;
+ pdata->tx_level[tx_ring->cp_ring->index] += count;
tx_ring->tail = tail;
return count;
@@ -430,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
- u32 tx_level = pdata->tx_level;
+ struct xgene_enet_desc_ring *tx_ring;
+ int index = skb->queue_mapping;
+ u32 tx_level = pdata->tx_level[index];
int count;
- if (tx_level < pdata->txc_level)
- tx_level += ((typeof(pdata->tx_level))~0U);
+ tx_ring = pdata->tx_ring[index];
+ if (tx_level < pdata->txc_level[index])
+ tx_level += ((typeof(pdata->tx_level[index]))~0U);
- if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
- netif_stop_queue(ndev);
+ if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
+ netif_stop_subqueue(ndev, index);
return NETDEV_TX_BUSY;
}
@@ -536,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget)
{
- struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+ struct net_device *ndev = ring->ndev;
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
@@ -580,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
desc_count++;
processed++;
if (is_completion)
- pdata->txc_level += desc_count;
+ pdata->txc_level[ring->index] += desc_count;
if (ret)
break;
@@ -590,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
- if (netif_queue_stopped(ring->ndev))
- netif_start_queue(ring->ndev);
+ if (__netif_subqueue_stopped(ndev, ring->index))
+ netif_start_subqueue(ndev, ring->index);
}
return processed;
@@ -616,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
static void xgene_enet_timeout(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct netdev_queue *txq;
+ int i;
pdata->mac_ops->reset(pdata);
+
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ txq = netdev_get_tx_queue(ndev, i);
+ txq->trans_start = jiffies;
+ netif_tx_start_queue(txq);
+ }
}
static int xgene_enet_register_irq(struct net_device *ndev)
@@ -625,17 +624,21 @@ static int xgene_enet_register_irq(struct net_device *ndev)
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *ring;
- int ret;
+ int ret = 0, i;
- ring = pdata->rx_ring;
- irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
- ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
- IRQF_SHARED, ring->irq_name, ring);
- if (ret)
- netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
+ IRQF_SHARED, ring->irq_name, ring);
+ if (ret) {
+ netdev_err(ndev, "Failed to request irq %s\n",
+ ring->irq_name);
+ }
+ }
- if (pdata->cq_cnt) {
- ring = pdata->tx_ring->cp_ring;
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ ring = pdata->tx_ring[i]->cp_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring);
@@ -653,15 +656,19 @@ static void xgene_enet_free_irq(struct net_device *ndev)
struct xgene_enet_pdata *pdata;
struct xgene_enet_desc_ring *ring;
struct device *dev;
+ int i;
pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev);
- ring = pdata->rx_ring;
- irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
- devm_free_irq(dev, ring->irq, ring);
- if (pdata->cq_cnt) {
- ring = pdata->tx_ring->cp_ring;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+ devm_free_irq(dev, ring->irq, ring);
+ }
+
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ ring = pdata->tx_ring[i]->cp_ring;
irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring);
}
@@ -670,12 +677,15 @@ static void xgene_enet_free_irq(struct net_device *ndev)
static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
+ int i;
- napi = &pdata->rx_ring->napi;
- napi_enable(napi);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ napi = &pdata->rx_ring[i]->napi;
+ napi_enable(napi);
+ }
- if (pdata->cq_cnt) {
- napi = &pdata->tx_ring->cp_ring->napi;
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ napi = &pdata->tx_ring[i]->cp_ring->napi;
napi_enable(napi);
}
}
@@ -683,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
+ int i;
- napi = &pdata->rx_ring->napi;
- napi_disable(napi);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ napi = &pdata->rx_ring[i]->napi;
+ napi_disable(napi);
+ }
- if (pdata->cq_cnt) {
- napi = &pdata->tx_ring->cp_ring->napi;
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ napi = &pdata->tx_ring[i]->cp_ring->napi;
napi_disable(napi);
}
}
@@ -699,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev)
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int ret;
+ ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
+ if (ret)
+ return ret;
+
+ ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
+ if (ret)
+ return ret;
+
mac_ops->tx_enable(pdata);
mac_ops->rx_enable(pdata);
@@ -721,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
+ int i;
netif_stop_queue(ndev);
@@ -734,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev)
xgene_enet_free_irq(ndev);
xgene_enet_napi_disable(pdata);
- xgene_enet_process_ring(pdata->rx_ring, -1);
+ for (i = 0; i < pdata->rxq_cnt; i++)
+ xgene_enet_process_ring(pdata->rx_ring[i], -1);
return 0;
}
@@ -754,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_desc_ring *buf_pool;
+ struct xgene_enet_desc_ring *ring;
+ int i;
- if (pdata->tx_ring) {
- xgene_enet_delete_ring(pdata->tx_ring);
- pdata->tx_ring = NULL;
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+ if (ring) {
+ xgene_enet_delete_ring(ring);
+ pdata->tx_ring[i] = NULL;
+ }
}
- if (pdata->rx_ring) {
- buf_pool = pdata->rx_ring->buf_pool;
- xgene_enet_delete_bufpool(buf_pool);
- xgene_enet_delete_ring(buf_pool);
- xgene_enet_delete_ring(pdata->rx_ring);
- pdata->rx_ring = NULL;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ if (ring) {
+ buf_pool = ring->buf_pool;
+ xgene_enet_delete_bufpool(buf_pool);
+ xgene_enet_delete_ring(buf_pool);
+ xgene_enet_delete_ring(ring);
+ pdata->rx_ring[i] = NULL;
+ }
}
}
@@ -820,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring;
+ int i;
+
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+ if (ring) {
+ if (ring->cp_ring && ring->cp_ring->cp_skb)
+ devm_kfree(dev, ring->cp_ring->cp_skb);
+ if (ring->cp_ring && pdata->cq_cnt)
+ xgene_enet_free_desc_ring(ring->cp_ring);
+ xgene_enet_free_desc_ring(ring);
+ }
+ }
- ring = pdata->tx_ring;
- if (ring) {
- if (ring->cp_ring && ring->cp_ring->cp_skb)
- devm_kfree(dev, ring->cp_ring->cp_skb);
- if (ring->cp_ring && pdata->cq_cnt)
- xgene_enet_free_desc_ring(ring->cp_ring);
- xgene_enet_free_desc_ring(ring);
- }
-
- ring = pdata->rx_ring;
- if (ring) {
- if (ring->buf_pool) {
- if (ring->buf_pool->rx_skb)
- devm_kfree(dev, ring->buf_pool->rx_skb);
- xgene_enet_free_desc_ring(ring->buf_pool);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ if (ring) {
+ if (ring->buf_pool) {
+ if (ring->buf_pool->rx_skb)
+ devm_kfree(dev, ring->buf_pool->rx_skb);
+ xgene_enet_free_desc_ring(ring->buf_pool);
+ }
+ xgene_enet_free_desc_ring(ring);
}
- xgene_enet_free_desc_ring(ring);
}
}
@@ -950,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
- int ret, size;
-
- /* allocate rx descriptor ring */
- owner = xgene_derive_ring_owner(pdata);
- ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
- rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
- RING_CFGSIZE_16KB, ring_id);
- if (!rx_ring) {
- ret = -ENOMEM;
- goto err;
- }
+ int i, ret, size;
- /* allocate buffer pool for receiving packets */
- owner = xgene_derive_ring_owner(pdata);
- ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
- buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
- RING_CFGSIZE_2KB, ring_id);
- if (!buf_pool) {
- ret = -ENOMEM;
- goto err;
- }
-
- rx_ring->nbufpool = NUM_BUFPOOL;
- rx_ring->buf_pool = buf_pool;
- rx_ring->irq = pdata->rx_irq;
- if (!pdata->cq_cnt) {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
- ndev->name);
- } else {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name);
- }
- buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
- sizeof(struct sk_buff *), GFP_KERNEL);
- if (!buf_pool->rx_skb) {
- ret = -ENOMEM;
- goto err;
- }
-
- buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
- rx_ring->buf_pool = buf_pool;
- pdata->rx_ring = rx_ring;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ /* allocate rx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
+ rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+ RING_CFGSIZE_16KB,
+ ring_id);
+ if (!rx_ring) {
+ ret = -ENOMEM;
+ goto err;
+ }
- /* allocate tx descriptor ring */
- owner = xgene_derive_ring_owner(pdata);
- ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
- tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
- RING_CFGSIZE_16KB, ring_id);
- if (!tx_ring) {
- ret = -ENOMEM;
- goto err;
- }
+ /* allocate buffer pool for receiving packets */
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
+ buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
+ RING_CFGSIZE_2KB,
+ ring_id);
+ if (!buf_pool) {
+ ret = -ENOMEM;
+ goto err;
+ }
- size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
- tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+ rx_ring->nbufpool = NUM_BUFPOOL;
+ rx_ring->buf_pool = buf_pool;
+ rx_ring->irq = pdata->irqs[i];
+ if (!pdata->cq_cnt) {
+ snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+ ndev->name);
+ } else {
+ snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
+ ndev->name, i);
+ }
+ buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
+ sizeof(struct sk_buff *),
GFP_KERNEL);
- if (!tx_ring->exp_bufs) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!buf_pool->rx_skb) {
+ ret = -ENOMEM;
+ goto err;
+ }
- pdata->tx_ring = tx_ring;
+ buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
+ rx_ring->buf_pool = buf_pool;
+ pdata->rx_ring[i] = rx_ring;
+ }
- if (!pdata->cq_cnt) {
- cp_ring = pdata->rx_ring;
- } else {
- /* allocate tx completion descriptor ring */
- ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
- cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ /* allocate tx descriptor ring */
+ owner = xgene_derive_ring_owner(pdata);
+ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
+ tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB,
ring_id);
- if (!cp_ring) {
+ if (!tx_ring) {
ret = -ENOMEM;
goto err;
}
- cp_ring->irq = pdata->txc_irq;
- snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
- }
- cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
- sizeof(struct sk_buff *), GFP_KERNEL);
- if (!cp_ring->cp_skb) {
- ret = -ENOMEM;
- goto err;
- }
+ size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+ tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
+ &dma_exp_bufs,
+ GFP_KERNEL);
+ if (!tx_ring->exp_bufs) {
+ ret = -ENOMEM;
+ goto err;
+ }
- size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
- cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
- size, GFP_KERNEL);
- if (!cp_ring->frag_dma_addr) {
- devm_kfree(dev, cp_ring->cp_skb);
- ret = -ENOMEM;
- goto err;
- }
+ pdata->tx_ring[i] = tx_ring;
- pdata->tx_ring->cp_ring = cp_ring;
- pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
+ if (!pdata->cq_cnt) {
+ cp_ring = pdata->rx_ring[i];
+ } else {
+ /* allocate tx completion descriptor ring */
+ ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
+ cpu_bufnum++);
+ cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
+ RING_CFGSIZE_16KB,
+ ring_id);
+ if (!cp_ring) {
+ ret = -ENOMEM;
+ goto err;
+ }
- pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
+ cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
+ cp_ring->index = i;
+ snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
+ ndev->name, i);
+ }
+
+ cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!cp_ring->cp_skb) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+ cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+ size, GFP_KERNEL);
+ if (!cp_ring->frag_dma_addr) {
+ devm_kfree(dev, cp_ring->cp_skb);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ tx_ring->cp_ring = cp_ring;
+ tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
+ }
+
+ pdata->ring_ops->coalesce(pdata->tx_ring[0]);
+ pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
return 0;
@@ -1166,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
return 0;
}
+static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
+{
+ struct platform_device *pdev = pdata->pdev;
+ struct device *dev = &pdev->dev;
+ int i, ret, max_irqs;
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ max_irqs = 1;
+ else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
+ max_irqs = 2;
+ else
+ max_irqs = XGENE_MAX_ENET_IRQ;
+
+ for (i = 0; i < max_irqs; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret <= 0) {
+ dev_err(dev, "Unable to get ENET IRQ\n");
+ ret = ret ? : -ENXIO;
+ return ret;
+ }
+ pdata->irqs[i] = ret;
+ }
+
+ return 0;
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -1247,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
- ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- dev_err(dev, "Unable to get ENET Rx IRQ\n");
- ret = ret ? : -ENXIO;
+ ret = xgene_enet_get_irqs(pdata);
+ if (ret)
return ret;
- }
- pdata->rx_irq = ret;
-
- if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
- ret = platform_get_irq(pdev, 1);
- if (ret <= 0) {
- pdata->cq_cnt = 0;
- dev_info(dev, "Unable to get Tx completion IRQ,"
- "using Rx IRQ instead\n");
- } else {
- pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
- pdata->txc_irq = ret;
- }
- }
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
@@ -1278,6 +1340,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
else
base_addr = pdata->base_addr;
pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
+ pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
@@ -1298,10 +1361,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
{
+ struct xgene_enet_cle *enet_cle = &pdata->cle;
struct net_device *ndev = pdata->ndev;
struct xgene_enet_desc_ring *buf_pool;
u16 dst_ring_num;
- int ret;
+ int i, ret;
ret = pdata->port_ops->reset(pdata);
if (ret)
@@ -1314,16 +1378,36 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
}
/* setup buffer pool */
- buf_pool = pdata->rx_ring->buf_pool;
- xgene_enet_init_bufpool(buf_pool);
- ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
- if (ret) {
- xgene_enet_delete_desc_rings(pdata);
- return ret;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ buf_pool = pdata->rx_ring[i]->buf_pool;
+ xgene_enet_init_bufpool(buf_pool);
+ ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
+ if (ret) {
+ xgene_enet_delete_desc_rings(pdata);
+ return ret;
+ }
+ }
+
+ dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
+ buf_pool = pdata->rx_ring[0]->buf_pool;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ /* Initialize and Enable PreClassifier Tree */
+ enet_cle->max_nodes = 512;
+ enet_cle->max_dbptrs = 1024;
+ enet_cle->parsers = 3;
+ enet_cle->active_parser = PARSER_ALL;
+ enet_cle->ptree.start_node = 0;
+ enet_cle->ptree.start_dbptr = 0;
+ enet_cle->jump_bytes = 8;
+ ret = pdata->cle_ops->cle_init(pdata);
+ if (ret) {
+ netdev_err(ndev, "Preclass Tree init error\n");
+ return ret;
+ }
+ } else {
+ pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
}
- dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring);
- pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
pdata->mac_ops->init(pdata);
return ret;
@@ -1336,16 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3;
+ pdata->rxq_cnt = 1;
+ pdata->txq_cnt = 1;
+ pdata->cq_cnt = 0;
break;
case PHY_INTERFACE_MODE_SGMII:
pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1;
+ pdata->rxq_cnt = 1;
+ pdata->txq_cnt = 1;
+ pdata->cq_cnt = 1;
break;
default:
pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops;
+ pdata->cle_ops = &xgene_cle3in_ops;
pdata->rm = RM0;
+ pdata->rxq_cnt = XGENE_NUM_RX_RING;
+ pdata->txq_cnt = XGENE_NUM_TX_RING;
+ pdata->cq_cnt = XGENE_NUM_TXC_RING;
break;
}
@@ -1399,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
+ int i;
- napi = &pdata->rx_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ napi = &pdata->rx_ring[i]->napi;
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
+ NAPI_POLL_WEIGHT);
+ }
- if (pdata->cq_cnt) {
- napi = &pdata->tx_ring->cp_ring->napi;
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ napi = &pdata->tx_ring[i]->cp_ring->napi;
netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
NAPI_POLL_WEIGHT);
}
@@ -1413,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
{
struct napi_struct *napi;
+ int i;
- napi = &pdata->rx_ring->napi;
- netif_napi_del(napi);
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ napi = &pdata->rx_ring[i]->napi;
+ netif_napi_del(napi);
+ }
- if (pdata->cq_cnt) {
- napi = &pdata->tx_ring->cp_ring->napi;
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ napi = &pdata->tx_ring[i]->cp_ring->napi;
netif_napi_del(napi);
}
}
@@ -1432,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
int ret;
- ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
+ ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
+ XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 248dfc40a761..175d18890c7a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -36,6 +36,7 @@
#include <linux/if_vlan.h>
#include <linux/phy.h>
#include "xgene_enet_hw.h"
+#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
#define XGENE_DRV_VERSION "v1.0"
@@ -48,6 +49,11 @@
#define XGENE_ENET_MSS 1448
#define XGENE_MIN_ENET_FRAME_SIZE 60
+#define XGENE_MAX_ENET_IRQ 8
+#define XGENE_NUM_RX_RING 4
+#define XGENE_NUM_TX_RING 4
+#define XGENE_NUM_TXC_RING 4
+
#define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2
#define START_BP_BUFNUM_0 0x22
@@ -72,7 +78,6 @@
#define X2_START_RING_NUM_1 256
#define IRQ_ID_SIZE 16
-#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -102,6 +107,7 @@ struct xgene_enet_desc_ring {
void *irq_mbox_addr;
u16 dst_ring_num;
u8 nbufpool;
+ u8 index;
struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb);
dma_addr_t *frag_dma_addr;
@@ -143,6 +149,11 @@ struct xgene_ring_ops {
void (*clear)(struct xgene_enet_desc_ring *);
void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
u32 (*len)(struct xgene_enet_desc_ring *);
+ void (*coalesce)(struct xgene_enet_desc_ring *);
+};
+
+struct xgene_cle_ops {
+ int (*cle_init)(struct xgene_enet_pdata *pdata);
};
/* ethernet private data */
@@ -154,15 +165,16 @@ struct xgene_enet_pdata {
struct clk *clk;
struct platform_device *pdev;
enum xgene_enet_id enet_id;
- struct xgene_enet_desc_ring *tx_ring;
- struct xgene_enet_desc_ring *rx_ring;
- u16 tx_level;
- u16 txc_level;
+ struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING];
+ struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING];
+ u16 tx_level[XGENE_NUM_TX_RING];
+ u16 txc_level[XGENE_NUM_TX_RING];
char *dev_name;
u32 rx_buff_cnt;
u32 tx_qcnt_hi;
- u32 rx_irq;
- u32 txc_irq;
+ u32 irqs[XGENE_MAX_ENET_IRQ];
+ u8 rxq_cnt;
+ u8 txq_cnt;
u8 cq_cnt;
void __iomem *eth_csr_addr;
void __iomem *eth_ring_if_addr;
@@ -174,10 +186,12 @@ struct xgene_enet_pdata {
void __iomem *ring_cmd_addr;
int phy_mode;
enum xgene_enet_rm rm;
+ struct xgene_enet_cle cle;
struct rtnl_link_stats64 stats;
const struct xgene_mac_ops *mac_ops;
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
+ struct xgene_cle_ops *cle_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
@@ -229,6 +243,13 @@ static inline struct device *ndev_to_dev(struct net_device *ndev)
return ndev->dev.parent;
}
+static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+ return ((u16)pdata->rm << 10) | ring->num;
+}
+
void xgene_enet_set_ethtool_ops(struct net_device *netdev);
#endif /* __XGENE_ENET_MAIN_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 0b6896bb351e..2b76732add5d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs;
}
+static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
+{
+ u32 data = 0x7777;
+
+ xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
+ xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
+ xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
+}
+
struct xgene_ring_ops xgene_ring2_ops = {
.num_ring_config = X2_NUM_RING_CONFIG,
.num_ring_id_shift = 13,
@@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = {
.clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len,
+ .coalesce = xgene_enet_setup_coalescing,
};
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index dae1ac300a49..ca562bc034c3 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -14,36 +14,36 @@
#include <linux/clk.h>
/* STATUS and ENABLE Register bit masks */
-#define TXINT_MASK (1<<0) /* Transmit interrupt */
-#define RXINT_MASK (1<<1) /* Receive interrupt */
-#define ERR_MASK (1<<2) /* Error interrupt */
-#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */
-#define MSER_MASK (1<<4) /* Missed packet counter error */
-#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */
-#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */
-#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */
-#define MDIO_MASK (1<<12) /* MDIO complete interrupt */
-#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */
+#define TXINT_MASK (1 << 0) /* Transmit interrupt */
+#define RXINT_MASK (1 << 1) /* Receive interrupt */
+#define ERR_MASK (1 << 2) /* Error interrupt */
+#define TXCH_MASK (1 << 3) /* Transmit chaining error interrupt */
+#define MSER_MASK (1 << 4) /* Missed packet counter error */
+#define RXCR_MASK (1 << 8) /* RXCRCERR counter rolled over */
+#define RXFR_MASK (1 << 9) /* RXFRAMEERR counter rolled over */
+#define RXFL_MASK (1 << 10) /* RXOFLOWERR counter rolled over */
+#define MDIO_MASK (1 << 12) /* MDIO complete interrupt */
+#define TXPL_MASK (1 << 31) /* Force polling of BD by EMAC */
/* CONTROL Register bit masks */
-#define EN_MASK (1<<0) /* VMAC enable */
-#define TXRN_MASK (1<<3) /* TX enable */
-#define RXRN_MASK (1<<4) /* RX enable */
-#define DSBC_MASK (1<<8) /* Disable receive broadcast */
-#define ENFL_MASK (1<<10) /* Enable Full-duplex */
-#define PROM_MASK (1<<11) /* Promiscuous mode */
+#define EN_MASK (1 << 0) /* VMAC enable */
+#define TXRN_MASK (1 << 3) /* TX enable */
+#define RXRN_MASK (1 << 4) /* RX enable */
+#define DSBC_MASK (1 << 8) /* Disable receive broadcast */
+#define ENFL_MASK (1 << 10) /* Enable Full-duplex */
+#define PROM_MASK (1 << 11) /* Promiscuous mode */
/* Buffer descriptor INFO bit masks */
-#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */
-#define FIRST_MASK (1<<16) /* First buffer in chain */
-#define LAST_MASK (1<<17) /* Last buffer in chain */
+#define OWN_MASK (1 << 31) /* 0-CPU or 1-EMAC owns buffer */
+#define FIRST_MASK (1 << 16) /* First buffer in chain */
+#define LAST_MASK (1 << 17) /* Last buffer in chain */
#define LEN_MASK 0x000007FF /* last 11 bits */
-#define CRLS (1<<21)
-#define DEFR (1<<22)
-#define DROP (1<<23)
-#define RTRY (1<<24)
-#define LTCL (1<<28)
-#define UFLO (1<<29)
+#define CRLS (1 << 21)
+#define DEFR (1 << 22)
+#define DROP (1 << 23)
+#define RTRY (1 << 24)
+#define LTCL (1 << 28)
+#define UFLO (1 << 29)
#define FOR_EMAC OWN_MASK
#define FOR_CPU 0
@@ -66,7 +66,7 @@ enum {
R_MDIO,
};
-#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */
+#define TX_TIMEOUT (400 * HZ / 1000) /* Transmission timeout */
#define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */
@@ -102,6 +102,11 @@ struct buffer_state {
DEFINE_DMA_UNMAP_LEN(len);
};
+struct arc_emac_mdio_bus_data {
+ struct gpio_desc *reset_gpio;
+ int msec;
+};
+
/**
* struct arc_emac_priv - Storage of EMAC's private information.
* @dev: Pointer to the current device.
@@ -131,6 +136,7 @@ struct arc_emac_priv {
struct device *dev;
struct phy_device *phy_dev;
struct mii_bus *bus;
+ struct arc_emac_mdio_bus_data bus_data;
void __iomem *regs;
struct clk *clk;
@@ -190,6 +196,7 @@ static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg)
static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask)
{
unsigned int value = arc_reg_get(priv, reg);
+
arc_reg_set(priv, reg, value | mask);
}
@@ -205,6 +212,7 @@ static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask)
static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask)
{
unsigned int value = arc_reg_get(priv, reg);
+
arc_reg_set(priv, reg, value & ~mask);
}
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 6446af1403f7..a3a9392a4954 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,7 +26,6 @@
#include "emac.h"
-
/**
* arc_emac_tx_avail - Return the number of available slots in the tx ring.
* @priv: Pointer to ARC EMAC private data structure.
@@ -66,7 +65,7 @@ static void arc_emac_adjust_link(struct net_device *ndev)
if (priv->duplex != phy_dev->duplex) {
reg = arc_reg_get(priv, R_CTRL);
- if (DUPLEX_FULL == phy_dev->duplex)
+ if (phy_dev->duplex == DUPLEX_FULL)
reg |= ENFL_MASK;
else
reg &= ~ENFL_MASK;
@@ -466,9 +465,9 @@ static int arc_emac_open(struct net_device *ndev)
/* Set CONTROL */
arc_reg_set(priv, R_CTRL,
- (RX_BD_NUM << 24) | /* RX BD table length */
- (TX_BD_NUM << 16) | /* TX BD table length */
- TXRN_MASK | RXRN_MASK);
+ (RX_BD_NUM << 24) | /* RX BD table length */
+ (TX_BD_NUM << 16) | /* TX BD table length */
+ TXRN_MASK | RXRN_MASK);
napi_enable(&priv->napi);
@@ -533,8 +532,10 @@ static void arc_free_tx_queue(struct net_device *ndev)
struct buffer_state *tx_buff = &priv->tx_buff[i];
if (tx_buff->skb) {
- dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
- dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
+ dma_unmap_single(&ndev->dev,
+ dma_unmap_addr(tx_buff, addr),
+ dma_unmap_len(tx_buff, len),
+ DMA_TO_DEVICE);
/* return the sk_buff to system */
dev_kfree_skb_irq(tx_buff->skb);
@@ -562,8 +563,10 @@ static void arc_free_rx_queue(struct net_device *ndev)
struct buffer_state *rx_buff = &priv->rx_buff[i];
if (rx_buff->skb) {
- dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
- dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev,
+ dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len),
+ DMA_FROM_DEVICE);
/* return the sk_buff to system */
dev_kfree_skb_irq(rx_buff->skb);
@@ -717,8 +720,8 @@ static void arc_emac_set_address_internal(struct net_device *ndev)
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int addr_low, addr_hi;
- addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
- addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
+ addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]);
+ addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]);
arc_reg_set(priv, R_ADDRL, addr_low);
arc_reg_set(priv, R_ADDRH, addr_hi);
@@ -774,7 +777,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
unsigned int id, clock_frequency, irq;
int err;
-
/* Get PHY from device tree */
phy_node = of_parse_phandle(dev->of_node, "phy", 0);
if (!phy_node) {
@@ -796,7 +798,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
return -ENODEV;
}
-
ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -807,9 +808,9 @@ int arc_emac_probe(struct net_device *ndev, int interface)
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
- if (IS_ERR(priv->regs)) {
+ if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
- }
+
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
if (priv->clk) {
@@ -930,10 +931,8 @@ int arc_emac_remove(struct net_device *ndev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
- if (!IS_ERR(priv->clk)) {
+ if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index d5ee986936da..16419f550eff 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
#include "emac.h"
@@ -93,12 +94,31 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
phy_addr, reg_num, value);
arc_reg_set(priv, R_MDIO,
- 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value);
+ 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value);
return arc_mdio_complete_wait(priv);
}
/**
+ * arc_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+int arc_mdio_reset(struct mii_bus *bus)
+{
+ struct arc_emac_priv *priv = bus->priv;
+ struct arc_emac_mdio_bus_data *data = &priv->bus_data;
+
+ if (data->reset_gpio) {
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+ msleep(data->msec);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+ }
+
+ return 0;
+}
+
+/**
* arc_mdio_probe - MDIO probe function.
* @priv: Pointer to ARC EMAC private data structure.
*
@@ -109,6 +129,8 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr,
*/
int arc_mdio_probe(struct arc_emac_priv *priv)
{
+ struct arc_emac_mdio_bus_data *data = &priv->bus_data;
+ struct device_node *np = priv->dev->of_node;
struct mii_bus *bus;
int error;
@@ -122,6 +144,21 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
bus->name = "Synopsys MII Bus",
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
+ bus->reset = &arc_mdio_reset;
+
+ /* optional reset-related properties */
+ data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(data->reset_gpio)) {
+ error = PTR_ERR(data->reset_gpio);
+ dev_err(priv->dev, "Failed to request gpio: %d\n", error);
+ return error;
+ }
+
+ of_property_read_u32(np, "phy-reset-duration", &data->msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (data->msec > 1000)
+ data->msec = 1;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name);
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 85e821ccfcd2..e278e3d96ee0 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -50,7 +50,7 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed)
u32 data;
int err = 0;
- switch(speed) {
+ switch (speed) {
case 10:
data = (1 << (speed_offset + 16)) | (0 << speed_offset);
break;
@@ -83,9 +83,18 @@ static const struct emac_rockchip_soc_data emac_rk3188_emac_data = {
};
static const struct of_device_id emac_rockchip_dt_ids[] = {
- { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data },
- { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data },
- { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data },
+ {
+ .compatible = "rockchip,rk3036-emac",
+ .data = &emac_rk3036_emac_data,
+ },
+ {
+ .compatible = "rockchip,rk3066-emac",
+ .data = &emac_rk3066_emac_data,
+ },
+ {
+ .compatible = "rockchip,rk3188-emac",
+ .data = &emac_rk3188_emac_data,
+ },
{ /* Sentinel */ }
};
@@ -123,9 +132,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
goto out_netdev;
}
- priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
if (IS_ERR(priv->grf)) {
- dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf));
+ dev_err(dev, "failed to retrieve global register file (%ld)\n",
+ PTR_ERR(priv->grf));
err = PTR_ERR(priv->grf);
goto out_netdev;
}
@@ -135,14 +146,16 @@ static int emac_rockchip_probe(struct platform_device *pdev)
priv->emac.clk = devm_clk_get(dev, "hclk");
if (IS_ERR(priv->emac.clk)) {
- dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk));
+ dev_err(dev, "failed to retrieve host clock (%ld)\n",
+ PTR_ERR(priv->emac.clk));
err = PTR_ERR(priv->emac.clk);
goto out_netdev;
}
priv->refclk = devm_clk_get(dev, "macref");
if (IS_ERR(priv->refclk)) {
- dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk));
+ dev_err(dev, "failed to retrieve reference clock (%ld)\n",
+ PTR_ERR(priv->refclk));
err = PTR_ERR(priv->refclk);
goto out_netdev;
}
@@ -179,19 +192,22 @@ static int emac_rockchip_probe(struct platform_device *pdev)
err = regmap_write(priv->grf, priv->soc_data->grf_offset, data);
if (err) {
- dev_err(dev, "unable to apply initial settings to grf (%d)\n", err);
+ dev_err(dev, "unable to apply initial settings to grf (%d)\n",
+ err);
goto out_regulator_disable;
}
/* RMII interface needs always a rate of 50MHz */
err = clk_set_rate(priv->refclk, 50000000);
if (err)
- dev_err(dev, "failed to change reference clock rate (%d)\n", err);
+ dev_err(dev,
+ "failed to change reference clock rate (%d)\n", err);
if (priv->soc_data->need_div_macclk) {
priv->macclk = devm_clk_get(dev, "macclk");
if (IS_ERR(priv->macclk)) {
- dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk));
+ dev_err(dev, "failed to retrieve mac clock (%ld)\n",
+ PTR_ERR(priv->macclk));
err = PTR_ERR(priv->macclk);
goto out_regulator_disable;
}
@@ -205,7 +221,8 @@ static int emac_rockchip_probe(struct platform_device *pdev)
/* RMII TX/RX needs always a rate of 25MHz */
err = clk_set_rate(priv->macclk, 25000000);
if (err)
- dev_err(dev, "failed to change mac clock rate (%d)\n", err);
+ dev_err(dev,
+ "failed to change mac clock rate (%d)\n", err);
}
err = arc_emac_probe(ndev, interface);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8b5988e210d5..d0084d4d1a9b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -65,10 +65,6 @@ static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
static int atl1c_configure(struct atl1c_adapter *adapter);
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
-static const u16 atl1c_pay_load_size[] = {
- 128, 256, 512, 1024, 2048, 4096,
-};
-
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -EIO;
- netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 19f7cd02e085..18042c2460bd 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -149,6 +149,16 @@ config BNX2X_VXLAN
Say Y here if you want to enable hardware offload support for
Virtual eXtensible Local Area Network (VXLAN) in the driver.
+config BNX2X_GENEVE
+ bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+ depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m)
+ ---help---
+ This allows one to create GENEVE virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to enable hardware offload support for
+ Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
config BGMAC
tristate "BCMA bus GBit core support"
depends on BCMA && BCMA_HOST_SOC
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 06f6cffdfaf5..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -26,6 +26,18 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = {
};
MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
+{
+ switch (bgmac->core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4707:
+ case BCMA_CHIP_ID_BCM47094:
+ case BCMA_CHIP_ID_BCM53018:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
@@ -987,11 +999,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
static void bgmac_miiconfig(struct bgmac *bgmac)
{
struct bcma_device *core = bgmac->core;
- struct bcma_chipinfo *ci = &core->bus->chipinfo;
u8 imode;
- if (ci->id == BCMA_CHIP_ID_BCM4707 ||
- ci->id == BCMA_CHIP_ID_BCM53018) {
+ if (bgmac_is_bcm4707_family(bgmac)) {
bcma_awrite32(core, BCMA_IOCTL,
bcma_aread32(core, BCMA_IOCTL) | 0x40 |
BGMAC_BCMA_IOCTL_SW_CLKEN);
@@ -1043,8 +1053,9 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
- /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
- if (ci->id != BCMA_CHIP_ID_BCM4707) {
+ /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
+ if (ci->id != BCMA_CHIP_ID_BCM4707 &&
+ ci->id != BCMA_CHIP_ID_BCM47094) {
flags = 0;
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
@@ -1055,9 +1066,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* Request Misc PLL for corerev > 2 */
- if (core->id.rev > 2 &&
- ci->id != BCMA_CHIP_ID_BCM4707 &&
- ci->id != BCMA_CHIP_ID_BCM53018) {
+ if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
bgmac_set(bgmac, BCMA_CLKCTLST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
@@ -1193,8 +1202,7 @@ static void bgmac_enable(struct bgmac *bgmac)
break;
}
- if (ci->id != BCMA_CHIP_ID_BCM4707 &&
- ci->id != BCMA_CHIP_ID_BCM53018) {
+ if (!bgmac_is_bcm4707_family(bgmac)) {
rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
@@ -1472,14 +1480,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
static int bgmac_mii_register(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
int err = 0;
- if (ci->id == BCMA_CHIP_ID_BCM4707 ||
- ci->id == BCMA_CHIP_ID_BCM53018)
+ if (bgmac_is_bcm4707_family(bgmac))
return bgmac_fixed_phy_register(bgmac);
mii_bus = mdiobus_alloc();
@@ -1539,7 +1545,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
static int bgmac_probe(struct bcma_device *core)
{
- struct bcma_chipinfo *ci = &core->bus->chipinfo;
struct net_device *net_dev;
struct bgmac *bgmac;
struct ssb_sprom *sprom = &core->bus->sprom;
@@ -1567,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
@@ -1620,8 +1630,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac_chip_reset(bgmac);
/* For Northstar, we have to take all GMAC core out of reset */
- if (ci->id == BCMA_CHIP_ID_BCM4707 ||
- ci->id == BCMA_CHIP_ID_BCM53018) {
+ if (bgmac_is_bcm4707_family(bgmac)) {
struct bcma_device *ns_core;
int ns_gmac;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
#define BGMAC_CMDCFG_TAI 0x00000200
#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
+#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
+#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cae0956186ce..7dd7490fdac1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1277,8 +1277,7 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
- BNX2X_SP_RTNL_ADD_VXLAN_PORT,
- BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+ BNX2X_SP_RTNL_CHANGE_UDP_PORT,
};
enum bnx2x_iov_flag {
@@ -1327,6 +1326,17 @@ struct bnx2x_vlan_entry {
bool hw;
};
+enum bnx2x_udp_port_type {
+ BNX2X_UDP_PORT_VXLAN,
+ BNX2X_UDP_PORT_GENEVE,
+ BNX2X_UDP_PORT_MAX,
+};
+
+struct bnx2x_udp_tunnel {
+ u16 dst_port;
+ u8 count;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -1830,9 +1840,10 @@ struct bnx2x {
struct list_head vlan_reg;
u16 vlan_cnt;
u16 vlan_credit;
- u16 vxlan_dst_port;
- u8 vxlan_dst_port_count;
bool accept_any_vlan;
+
+ /* Vxlan/Geneve related information */
+ struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
};
/* Tx queues may be less or equal to Rx queues */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9695a4c4a434..0a9108cd4c45 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3042,8 +3042,12 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
bnx2x_save_statistics(bp);
}
- /* wait till consumers catch up with producers in all queues */
- bnx2x_drain_tx_queues(bp);
+ /* wait till consumers catch up with producers in all queues.
+ * If we're recovering, FW can't write to host so no reason
+ * to wait for the queues to complete all Tx.
+ */
+ if (unload_mode != UNLOAD_RECOVERY)
+ bnx2x_drain_tx_queues(bp);
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
@@ -4272,6 +4276,14 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return 0;
}
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+ return bnx2x_setup_tc(dev, tc->tc);
+}
+
/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
@@ -5086,4 +5098,3 @@ void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
flag);
schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
-EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 4cbb03f87b5a..0e68fadecfdb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -486,6 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc);
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
@@ -923,6 +925,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_start_params *start_params =
&func_params.params.start;
+ u16 port;
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -959,8 +962,14 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
start_params->network_cos_mode = STATIC_COS;
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
-
- start_params->vxlan_dst_port = bp->vxlan_dst_port;
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
+ port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port;
+ start_params->vxlan_dst_port = port;
+ }
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
+ port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port;
+ start_params->geneve_dst_port = port;
+ }
start_params->inner_rss = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 7ccf6684e0a3..2c6ba046d2a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -195,6 +195,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
u32 error) {
u8 index;
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+ u8 iscsi_pri_found = 0, fcoe_pri_found = 0;
if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n");
@@ -210,29 +211,57 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
bp->dcbx_port_params.app.enabled = true;
+ /* Use 0 as the default application priority for all. */
for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
ttp[index] = 0;
- if (app->default_pri < MAX_PFC_PRIORITIES)
- ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
-
for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
struct dcbx_app_priority_entry *entry =
app->app_pri_tbl;
+ enum traffic_type type = MAX_TRAFFIC_TYPE;
if (GET_FLAGS(entry[index].appBitfield,
- DCBX_APP_SF_ETH_TYPE) &&
- ETH_TYPE_FCOE == entry[index].app_id)
- bnx2x_dcbx_get_ap_priority(bp,
- entry[index].pri_bitmap,
- LLFC_TRAFFIC_TYPE_FCOE);
+ DCBX_APP_SF_DEFAULT) &&
+ GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE)) {
+ type = LLFC_TRAFFIC_TYPE_NW;
+ } else if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_PORT) &&
+ TCP_PORT_ISCSI == entry[index].app_id) {
+ type = LLFC_TRAFFIC_TYPE_ISCSI;
+ iscsi_pri_found = 1;
+ } else if (GET_FLAGS(entry[index].appBitfield,
+ DCBX_APP_SF_ETH_TYPE) &&
+ ETH_TYPE_FCOE == entry[index].app_id) {
+ type = LLFC_TRAFFIC_TYPE_FCOE;
+ fcoe_pri_found = 1;
+ }
- if (GET_FLAGS(entry[index].appBitfield,
- DCBX_APP_SF_PORT) &&
- TCP_PORT_ISCSI == entry[index].app_id)
- bnx2x_dcbx_get_ap_priority(bp,
- entry[index].pri_bitmap,
- LLFC_TRAFFIC_TYPE_ISCSI);
+ if (type == MAX_TRAFFIC_TYPE)
+ continue;
+
+ bnx2x_dcbx_get_ap_priority(bp,
+ entry[index].pri_bitmap,
+ type);
+ }
+
+ /* If we have received a non-zero default application
+ * priority, then use that for applications which are
+ * not configured with any priority.
+ */
+ if (ttp[LLFC_TRAFFIC_TYPE_NW] != 0) {
+ if (!iscsi_pri_found) {
+ ttp[LLFC_TRAFFIC_TYPE_ISCSI] =
+ ttp[LLFC_TRAFFIC_TYPE_NW];
+ DP(BNX2X_MSG_DCB,
+ "ISCSI is using default priority.\n");
+ }
+ if (!fcoe_pri_found) {
+ ttp[LLFC_TRAFFIC_TYPE_FCOE] =
+ ttp[LLFC_TRAFFIC_TYPE_NW];
+ DP(BNX2X_MSG_DCB,
+ "FCoE is using default priority.\n");
+ }
}
} else {
DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 820b7e04bb5f..85a7800bfc12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -981,6 +981,11 @@ static void bnx2x_get_regs(struct net_device *dev,
memcpy(p, &dump_hdr, sizeof(struct dump_header));
p += dump_hdr.header_size + 1;
+ /* This isn't really an error, but since attention handling is going
+ * to print the GRC timeouts using this macro, we use the same.
+ */
+ BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n");
+
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 91874d24fd56..f8b810313094 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1824,17 +1824,22 @@ struct dcbx_app_priority_entry {
u8 pri_bitmap;
u8 appBitfield;
#define DCBX_APP_ENTRY_VALID 0x01
- #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_MASK 0xF0
#define DCBX_APP_ENTRY_SF_SHIFT 4
#define DCBX_APP_SF_ETH_TYPE 0x10
#define DCBX_APP_SF_PORT 0x20
+ #define DCBX_APP_SF_UDP 0x40
+ #define DCBX_APP_SF_DEFAULT 0x80
#elif defined(__LITTLE_ENDIAN)
u8 appBitfield;
#define DCBX_APP_ENTRY_VALID 0x01
- #define DCBX_APP_ENTRY_SF_MASK 0x30
+ #define DCBX_APP_ENTRY_SF_MASK 0xF0
#define DCBX_APP_ENTRY_SF_SHIFT 4
+ #define DCBX_APP_ENTRY_VALID 0x01
#define DCBX_APP_SF_ETH_TYPE 0x10
#define DCBX_APP_SF_PORT 0x20
+ #define DCBX_APP_SF_UDP 0x40
+ #define DCBX_APP_SF_DEFAULT 0x80
u8 pri_bitmap;
u16 app_id;
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2bf9c871144f..d465bd721146 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -59,7 +59,9 @@
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
-
+#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
+#include <net/geneve.h>
+#endif
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
@@ -10074,11 +10076,13 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
-#ifdef CONFIG_BNX2X_VXLAN
-static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
+static int bnx2x_udp_port_update(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_udp_tunnel *udp_tunnel;
+ u16 vxlan_port = 0, geneve_port = 0;
int rc;
switch_update_params = &func_params.params.switch_update;
@@ -10093,69 +10097,125 @@ static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
/* Function parameters */
__set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
&switch_update_params->changes);
- switch_update_params->vxlan_dst_port = port;
+
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
+ udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
+ geneve_port = udp_tunnel->dst_port;
+ switch_update_params->geneve_dst_port = geneve_port;
+ }
+
+ if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
+ udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
+ vxlan_port = udp_tunnel->dst_port;
+ switch_update_params->vxlan_dst_port = vxlan_port;
+ }
+
+ /* Re-enable inner-rss for the offloaded UDP tunnels */
+ __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+ &switch_update_params->changes);
+
rc = bnx2x_func_state_change(bp, &func_params);
if (rc)
- BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
- port, rc);
+ BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
+ vxlan_port, geneve_port, rc);
+ else
+ DP(BNX2X_MSG_SP,
+ "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
+ vxlan_port, geneve_port);
+
return rc;
}
-static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
+ enum bnx2x_udp_port_type type)
{
- if (!netif_running(bp->dev))
+ struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
+
+ if (!netif_running(bp->dev) || !IS_PF(bp))
return;
- if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
- bp->vxlan_dst_port_count++;
+ if (udp_port->count && udp_port->dst_port == port) {
+ udp_port->count++;
return;
}
- if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
- DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+ if (udp_port->count) {
+ DP(BNX2X_MSG_SP,
+ "UDP tunnel [%d] - destination port limit reached\n",
+ type);
return;
}
- bp->vxlan_dst_port = port;
- bp->vxlan_dst_port_count = 1;
- bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+ udp_port->dst_port = port;
+ udp_port->count = 1;
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
}
+static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
+ enum bnx2x_udp_port_type type)
+{
+ struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
+
+ if (!IS_PF(bp))
+ return;
+
+ if (!udp_port->count || udp_port->dst_port != port) {
+ DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
+ type);
+ return;
+ }
+
+ /* Remove reference, and make certain it's no longer in use */
+ udp_port->count--;
+ if (udp_port->count)
+ return;
+ udp_port->dst_port = 0;
+
+ if (netif_running(bp->dev))
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
+ else
+ DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
+ type, port);
+}
+#endif
+
+#ifdef CONFIG_BNX2X_VXLAN
static void bnx2x_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
struct bnx2x *bp = netdev_priv(netdev);
u16 t_port = ntohs(port);
- __bnx2x_add_vxlan_port(bp, t_port);
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
}
-static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
{
- if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
- !IS_PF(bp)) {
- DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
- return;
- }
- bp->vxlan_dst_port_count--;
- if (bp->vxlan_dst_port_count)
- return;
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
- if (netif_running(bp->dev)) {
- bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
- } else {
- bp->vxlan_dst_port = 0;
- netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
- }
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
}
+#endif
-static void bnx2x_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
+static void bnx2x_add_geneve_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct bnx2x *bp = netdev_priv(netdev);
+ u16 t_port = ntohs(port);
+
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+}
+
+static void bnx2x_del_geneve_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
{
struct bnx2x *bp = netdev_priv(netdev);
u16 t_port = ntohs(port);
- __bnx2x_del_vxlan_port(bp, t_port);
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
}
#endif
@@ -10167,9 +10227,6 @@ static int bnx2x_close(struct net_device *dev);
static void bnx2x_sp_rtnl_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
-#ifdef CONFIG_BNX2X_VXLAN
- u16 port;
-#endif
rtnl_lock();
@@ -10268,23 +10325,27 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
-#ifdef CONFIG_BNX2X_VXLAN
- port = bp->vxlan_dst_port;
- if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
- &bp->sp_rtnl_state)) {
- if (!bnx2x_vxlan_port_update(bp, port))
- netdev_info(bp->dev, "Added vxlan dest port %d", port);
- else
- bp->vxlan_dst_port = 0;
- }
-
- if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
+ if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
- if (!bnx2x_vxlan_port_update(bp, 0)) {
- netdev_info(bp->dev,
- "Deleted vxlan dest port %d", port);
- bp->vxlan_dst_port = 0;
- vxlan_get_rx_port(bp->dev);
+ if (bnx2x_udp_port_update(bp)) {
+ /* On error, forget configuration */
+ memset(bp->udp_tunnel_ports, 0,
+ sizeof(struct bnx2x_udp_tunnel) *
+ BNX2X_UDP_PORT_MAX);
+ } else {
+ /* Since we don't store additional port information,
+ * if no port is configured for any feature ask for
+ * information about currently configured ports.
+ */
+#ifdef CONFIG_BNX2X_VXLAN
+ if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
+ vxlan_get_rx_port(bp->dev);
+#endif
+#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
+ if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
+ geneve_get_rx_port(bp->dev);
+#endif
}
}
#endif
@@ -12366,8 +12427,10 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
+ SHMEM2_HAS(bp, dcbx_en) &&
SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
- SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
+ SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
+ SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
bnx2x_dcbx_init_params(bp);
} else {
@@ -12492,6 +12555,10 @@ static int bnx2x_open(struct net_device *dev)
if (IS_PF(bp))
vxlan_get_rx_port(dev);
#endif
+#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
+ if (IS_PF(bp))
+ geneve_get_rx_port(dev);
+#endif
return 0;
}
@@ -12992,7 +13059,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
- .ndo_setup_tc = bnx2x_setup_tc,
+ .ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
.ndo_set_vf_vlan = bnx2x_set_vf_vlan,
@@ -13009,6 +13076,10 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_add_vxlan_port = bnx2x_add_vxlan_port,
.ndo_del_vxlan_port = bnx2x_del_vxlan_port,
#endif
+#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
+ .ndo_add_geneve_port = bnx2x_add_geneve_port,
+ .ndo_del_geneve_port = bnx2x_del_geneve_port,
+#endif
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -14814,6 +14885,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev,
}
offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+ if (!offset) {
+ DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
+ goto out;
+ }
DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
/* Read the table contents from nvram */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 82f191382989..12a009d720cd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1240,13 +1240,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
switch (event_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
- break;
+ goto async_event_process_exit;
}
+ schedule_work(&bp->sp_task);
+async_event_process_exit:
return 0;
}
@@ -2358,6 +2362,14 @@ static void bnxt_free_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
+ if (bp->hw_rx_port_stats) {
+ dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
+ bp->hw_rx_port_stats,
+ bp->hw_rx_port_stats_map);
+ bp->hw_rx_port_stats = NULL;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ }
+
if (!bp->bnapi)
return;
@@ -2394,6 +2406,24 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
+
+ if (BNXT_PF(bp)) {
+ bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
+ sizeof(struct tx_port_stats) + 1024;
+
+ bp->hw_rx_port_stats =
+ dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
+ &bp->hw_rx_port_stats_map,
+ GFP_KERNEL);
+ if (!bp->hw_rx_port_stats)
+ return -ENOMEM;
+
+ bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
+ 512;
+ bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats) + 512;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
+ }
return 0;
}
@@ -2597,44 +2627,45 @@ alloc_mem_err:
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id)
{
- struct hwrm_cmd_req_hdr *req = request;
+ struct input *req = request;
- req->cmpl_ring_req_type =
- cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
- req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+ req->req_type = cpu_to_le16(req_type);
+ req->cmpl_ring = cpu_to_le16(cmpl_ring);
+ req->target_id = cpu_to_le16(target_id);
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
-int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout, bool silent)
{
int i, intr_process, rc;
- struct hwrm_cmd_req_hdr *req = msg;
+ struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
- req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+ req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
- cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
- HWRM_CMPL_RING_MASK) >>
- HWRM_CMPL_RING_SFT;
+ cp_ring_id = le16_to_cpu(req->cmpl_ring);
intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
/* Write request msg to hwrm channel */
__iowrite32_copy(bp->bar0, data, msg_len / 4);
- for (i = msg_len; i < HWRM_MAX_REQ_LEN; i += 4)
+ for (i = msg_len; i < BNXT_HWRM_MAX_REQ_LEN; i += 4)
writel(0, bp->bar0 + i);
/* currently supports only one outstanding message */
if (intr_process)
- bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
- HWRM_SEQ_ID_MASK;
+ bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
/* Ring channel doorbell */
writel(1, bp->bar0 + 0x100);
+ if (!timeout)
+ timeout = DFLT_HWRM_CMD_TIMEOUT;
+
i = 0;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
@@ -2645,7 +2676,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- req->cmpl_ring_req_type);
+ le16_to_cpu(req->req_type));
return -1;
}
} else {
@@ -2661,8 +2692,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- timeout, req->cmpl_ring_req_type,
- req->target_id_seq_id, *resp_len);
+ timeout, le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), *resp_len);
return -1;
}
@@ -2676,20 +2707,23 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
if (i >= timeout) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- timeout, req->cmpl_ring_req_type,
- req->target_id_seq_id, len, *valid);
+ timeout, le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len, *valid);
return -1;
}
}
rc = le16_to_cpu(resp->error_code);
- if (rc) {
+ if (rc && !silent)
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
- return rc;
- }
- return 0;
+ return rc;
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+ return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
@@ -2702,6 +2736,17 @@ int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return rc;
}
+int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout)
+{
+ int rc;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
@@ -3346,11 +3391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ cpr->cp_doorbell = bp->bar1 + i * 0x80;
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
INVALID_STATS_CTX_ID);
if (rc)
goto err_out;
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
}
@@ -3518,47 +3563,82 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
+ u32 buf_tmrs, u16 flags,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ req->flags = cpu_to_le16(flags);
+ req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
+ req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
+ req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
+ req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
+ /* Minimum time between 2 interrupts set to buf_tmr x 2 */
+ req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
+ req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
+ req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
+}
+
int bnxt_hwrm_set_coal(struct bnxt *bp)
{
int i, rc = 0;
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
+ req_tx = {0}, *req;
u16 max_buf, max_buf_irq;
u16 buf_tmr, buf_tmr_irq;
u32 flags;
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
- -1, -1);
+ bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+ bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
- /* Each rx completion (2 records) should be DMAed immediately */
- max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+ /* Each rx completion (2 records) should be DMAed immediately.
+ * DMA 1/4 of the completion buffers at a time.
+ */
+ max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
/* max_buf must not be zero */
max_buf = clamp_t(u16, max_buf, 1, 63);
- max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
- buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
- buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+ max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(u16, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
+ buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* RING_IDLE generates more IRQs for lower latency. Enable it only
* if coal_ticks is less than 25 us.
*/
- if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+ if (bp->rx_coal_ticks < 25)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
- req.flags = cpu_to_le16(flags);
- req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
- req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
- req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
- req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
- req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
- req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
- req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+ bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
+
+ /* max_buf must not be zero */
+ max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
+ max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(u16, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
+ buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
+
+ flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
- req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+ struct bnxt_napi *bnapi = bp->bnapi[i];
- rc = _hwrm_send_message(bp, &req, sizeof(req),
+ req = &req_rx;
+ if (!bnapi->rx_ring)
+ req = &req_tx;
+ req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+ rc = _hwrm_send_message(bp, req, sizeof(*req),
HWRM_CMD_TIMEOUT);
if (rc)
break;
@@ -3750,6 +3830,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
struct hwrm_ver_get_input req = {0};
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
@@ -3767,15 +3848,39 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_upd);
netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
}
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+ bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bp->hwrm_cmd_timeout)
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+
+ if (resp->hwrm_intf_maj >= 1)
+ bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
+static int bnxt_hwrm_port_qstats(struct bnxt *bp)
+{
+ int rc;
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct hwrm_port_qstats_input req = {0};
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
+ req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt) {
@@ -4410,6 +4515,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->pause = resp->pause;
link_info->auto_mode = resp->auto_mode;
link_info->auto_pause_setting = resp->auto_pause;
+ link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex;
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4420,6 +4526,8 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->lp_auto_link_speeds =
+ le16_to_cpu(resp->link_partner_adv_speeds);
link_info->preemphasis = le32_to_cpu(resp->preemphasis);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
@@ -4451,7 +4559,7 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
- req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+ req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
} else {
@@ -4831,6 +4939,22 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ struct rx_port_stats *rx = bp->hw_rx_port_stats;
+ struct tx_port_stats *tx = bp->hw_tx_port_stats;
+
+ stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
+ stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
+ stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
+ le64_to_cpu(rx->rx_ovrsz_frames) +
+ le64_to_cpu(rx->rx_runt_frames);
+ stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
+ le64_to_cpu(rx->rx_jbr_frames);
+ stats->collisions = le64_to_cpu(tx->tx_total_collisions);
+ stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
+ stats->tx_errors = le64_to_cpu(tx->tx_err);
+ }
+
return stats;
}
@@ -5171,6 +5295,10 @@ static void bnxt_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
+ if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
+ set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5222,6 +5350,9 @@ static void bnxt_sp_task(struct work_struct *work)
rtnl_unlock();
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
@@ -5285,6 +5416,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
+ pci_enable_pcie_error_reporting(pdev);
+
INIT_WORK(&bp->sp_task, bnxt_sp_task);
spin_lock_init(&bp->ntp_fltr_lock);
@@ -5292,10 +5425,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
- bp->coal_bufs = 20;
- bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
- bp->coal_bufs_irq = 2;
+ /* tick values in micro seconds */
+ bp->rx_coal_ticks = 12;
+ bp->rx_coal_bufs = 30;
+ bp->rx_coal_ticks_irq = 1;
+ bp->rx_coal_bufs_irq = 2;
+
+ bp->tx_coal_ticks = 25;
+ bp->tx_coal_bufs = 30;
+ bp->tx_coal_ticks_irq = 2;
+ bp->tx_coal_bufs_irq = 2;
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
@@ -5378,9 +5517,16 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
{
struct bnxt *bp = netdev_priv(dev);
+ u8 tc;
+
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ tc = ntc->tc;
if (tc > bp->max_tc) {
netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
@@ -5553,6 +5699,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
}
+ if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
+ netdev_info(bp->dev, "Receive PF driver unload event!");
}
#else
@@ -5649,6 +5797,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
@@ -5668,7 +5817,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
- char phy_ver[PHY_VER_STR_LEN];
rc = bnxt_update_link(bp, false);
if (rc) {
@@ -5688,11 +5836,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->req_duplex = link_info->duplex_setting;
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
- snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
- link_info->phy_ver[0],
- link_info->phy_ver[1],
- link_info->phy_ver[2]);
- strcat(bp->fw_ver_str, phy_ver);
return rc;
}
@@ -5894,11 +6037,117 @@ init_err_free:
return rc;
}
+/**
+ * bnxt_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
+ rtnl_lock();
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure) {
+ rtnl_unlock();
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (netif_running(netdev))
+ bnxt_close(netdev);
+
+ pci_disable_device(pdev);
+ rtnl_unlock();
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnxt_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err = 0;
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+
+ netdev_info(bp->dev, "PCI Slot Reset\n");
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ } else {
+ pci_set_master(pdev);
+
+ if (netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ if (!err)
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
+ dev_close(netdev);
+
+ rtnl_unlock();
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err); /* non-fatal, continue */
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnxt_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void bnxt_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+
+ netif_device_attach(netdev);
+
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers bnxt_err_handler = {
+ .error_detected = bnxt_io_error_detected,
+ .slot_reset = bnxt_io_slot_reset,
+ .resume = bnxt_io_resume
+};
+
static struct pci_driver bnxt_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = bnxt_pci_tbl,
.probe = bnxt_init_one,
.remove = bnxt_remove_one,
+ .err_handler = &bnxt_err_handler,
#if defined(CONFIG_BNXT_SRIOV)
.sriov_configure = bnxt_sriov_configure,
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 2be51b332652..709b95b8fcba 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -477,12 +477,16 @@ struct rx_tpa_end_cmp_ext {
#define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
-#define HWRM_CMD_TIMEOUT 500
+#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
+#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
+#define HWRM_RESP_LEN_OFFSET 4
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
+#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -644,19 +648,6 @@ struct bnxt_irq {
#define INVALID_STATS_CTX_ID -1
-struct hwrm_cmd_req_hdr {
-#define HWRM_CMPL_RING_MASK 0xffff0000
-#define HWRM_CMPL_RING_SFT 16
- __le32 cmpl_ring_req_type;
-#define HWRM_SEQ_ID_MASK 0xffff
-#define HWRM_SEQ_ID_INVALID -1
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_TARGET_FID_MASK 0xffff0000
-#define HWRM_TARGET_FID_SFT 16
- __le32 target_id_seq_id;
- __le64 resp_addr;
-};
-
struct bnxt_ring_grp_info {
u16 fw_stats_ctx;
u16 fw_grp_id;
@@ -767,10 +758,6 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
-#define BNXT_ALL_COPPER_ETHTOOL_SPEED \
- (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \
- ADVERTISED_10000baseT_Full)
-
struct bnxt_link_info {
u8 media_type;
u8 transceiver;
@@ -790,6 +777,7 @@ struct bnxt_link_info {
#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
PORT_PHY_QCFG_RESP_PAUSE_TX)
+ u8 lp_pause;
u8 auto_pause_setting;
u8 force_pause_setting;
u8 duplex_setting;
@@ -824,6 +812,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 lp_auto_link_speeds;
u16 auto_link_speed;
u16 force_link_speed;
u32 preemphasis;
@@ -885,6 +874,7 @@ struct bnxt {
#define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
+ #define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -937,7 +927,7 @@ struct bnxt {
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
unsigned int current_interval;
-#define BNXT_TIMER_INTERVAL (HZ / 2)
+#define BNXT_TIMER_INTERVAL HZ
struct timer_list timer;
@@ -957,6 +947,15 @@ struct bnxt {
void *hwrm_dbg_resp_addr;
dma_addr_t hwrm_dbg_resp_dma_addr;
#define HWRM_DBG_REG_BUF_SIZE 128
+
+ struct rx_port_stats *hw_rx_port_stats;
+ struct tx_port_stats *hw_tx_port_stats;
+ dma_addr_t hw_rx_port_stats_map;
+ dma_addr_t hw_tx_port_stats_map;
+ int hw_port_stats_size;
+
+ u16 hwrm_max_req_len;
+ int hwrm_cmd_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -968,13 +967,17 @@ struct bnxt {
__le16 vxlan_fw_dst_port_id;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
- u16 coal_ticks;
- u16 coal_ticks_irq;
- u16 coal_bufs;
- u16 coal_bufs_irq;
+
+ u16 rx_coal_ticks;
+ u16 rx_coal_ticks_irq;
+ u16 rx_coal_bufs;
+ u16 rx_coal_bufs_irq;
+ u16 tx_coal_ticks;
+ u16 tx_coal_ticks_irq;
+ u16 tx_coal_bufs;
+ u16 tx_coal_bufs_irq;
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
struct work_struct sp_task;
unsigned long sp_event;
@@ -986,6 +989,8 @@ struct bnxt {
#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
#define BNXT_RESET_TASK_SP_EVENT 6
#define BNXT_RST_RING_SP_EVENT 7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
+#define BNXT_PERIODIC_STATS_SP_EVENT 9
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1099,6 +1104,7 @@ void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 3238817dfd5f..2e472f6dbf2d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -7,6 +7,8 @@
* the Free Software Foundation.
*/
+#include <linux/ctype.h>
+#include <linux/stringify.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -20,6 +22,8 @@
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
+
static u32 bnxt_get_msglevel(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -41,12 +45,16 @@ static int bnxt_get_coalesce(struct net_device *dev,
memset(coal, 0, sizeof(*coal));
- coal->rx_coalesce_usecs =
- max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
- coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
- coal->rx_coalesce_usecs_irq =
- max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
- coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+ coal->rx_coalesce_usecs = bp->rx_coal_ticks;
+ /* 2 completion records per rx packet */
+ coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2;
+ coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq;
+ coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2;
+
+ coal->tx_coalesce_usecs = bp->tx_coal_ticks;
+ coal->tx_max_coalesced_frames = bp->tx_coal_bufs;
+ coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
+ coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
return 0;
}
@@ -57,11 +65,16 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
- bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
- bp->coal_ticks_irq =
- BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
- bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+ bp->rx_coal_ticks = coal->rx_coalesce_usecs;
+ /* 2 completion records per rx packet */
+ bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2;
+ bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq;
+ bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+ bp->tx_coal_ticks = coal->tx_coalesce_usecs;
+ bp->tx_coal_bufs = coal->tx_max_coalesced_frames;
+ bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
+ bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
if (netif_running(dev))
rc = bnxt_hwrm_set_coal(bp);
@@ -71,13 +84,99 @@ static int bnxt_set_coalesce(struct net_device *dev,
#define BNXT_NUM_STATS 21
+#define BNXT_RX_STATS_OFFSET(counter) \
+ (offsetof(struct rx_port_stats, counter) / 8)
+
+#define BNXT_RX_STATS_ENTRY(counter) \
+ { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
+
+#define BNXT_TX_STATS_OFFSET(counter) \
+ ((offsetof(struct tx_port_stats, counter) + \
+ sizeof(struct rx_port_stats) + 512) / 8)
+
+#define BNXT_TX_STATS_ENTRY(counter) \
+ { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
+
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_port_stats_arr[] = {
+ BNXT_RX_STATS_ENTRY(rx_64b_frames),
+ BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
+ BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
+ BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
+ BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
+ BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames),
+ BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
+ BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
+ BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
+ BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
+ BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
+ BNXT_RX_STATS_ENTRY(rx_total_frames),
+ BNXT_RX_STATS_ENTRY(rx_ucast_frames),
+ BNXT_RX_STATS_ENTRY(rx_mcast_frames),
+ BNXT_RX_STATS_ENTRY(rx_bcast_frames),
+ BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
+ BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
+ BNXT_RX_STATS_ENTRY(rx_pause_frames),
+ BNXT_RX_STATS_ENTRY(rx_pfc_frames),
+ BNXT_RX_STATS_ENTRY(rx_align_err_frames),
+ BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
+ BNXT_RX_STATS_ENTRY(rx_jbr_frames),
+ BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
+ BNXT_RX_STATS_ENTRY(rx_tagged_frames),
+ BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
+ BNXT_RX_STATS_ENTRY(rx_good_frames),
+ BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
+ BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
+ BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
+ BNXT_RX_STATS_ENTRY(rx_bytes),
+ BNXT_RX_STATS_ENTRY(rx_runt_bytes),
+ BNXT_RX_STATS_ENTRY(rx_runt_frames),
+
+ BNXT_TX_STATS_ENTRY(tx_64b_frames),
+ BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
+ BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
+ BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
+ BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
+ BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames),
+ BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
+ BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames),
+ BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
+ BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
+ BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
+ BNXT_TX_STATS_ENTRY(tx_good_frames),
+ BNXT_TX_STATS_ENTRY(tx_total_frames),
+ BNXT_TX_STATS_ENTRY(tx_ucast_frames),
+ BNXT_TX_STATS_ENTRY(tx_mcast_frames),
+ BNXT_TX_STATS_ENTRY(tx_bcast_frames),
+ BNXT_TX_STATS_ENTRY(tx_pause_frames),
+ BNXT_TX_STATS_ENTRY(tx_pfc_frames),
+ BNXT_TX_STATS_ENTRY(tx_jabber_frames),
+ BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
+ BNXT_TX_STATS_ENTRY(tx_err),
+ BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
+ BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
+ BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
+ BNXT_TX_STATS_ENTRY(tx_total_collisions),
+ BNXT_TX_STATS_ENTRY(tx_bytes),
+};
+
+#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+
static int bnxt_get_sset_count(struct net_device *dev, int sset)
{
struct bnxt *bp = netdev_priv(dev);
switch (sset) {
- case ETH_SS_STATS:
- return BNXT_NUM_STATS * bp->cp_nr_rings;
+ case ETH_SS_STATS: {
+ int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
+ num_stats += BNXT_NUM_PORT_STATS;
+
+ return num_stats;
+ }
default:
return -EOPNOTSUPP;
}
@@ -106,6 +205,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors;
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) {
+ buf[j] = le64_to_cpu(*(port_stats +
+ bnxt_port_stats_arr[i].offset));
+ }
+ }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -160,6 +267,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN;
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
+ strcpy(buf, bnxt_port_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
@@ -460,10 +573,20 @@ static void bnxt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnxt *bp = netdev_priv(dev);
+ char *pkglog;
+ char *pkgver = NULL;
+ pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
+ if (pkglog)
+ pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+ "%s pkg %s", bp->fw_ver_str, pkgver);
+ else
+ strlcpy(info->fw_version, bp->fw_ver_str,
+ sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
info->testinfo_len = BNXT_NUM_TESTS(bp);
@@ -471,30 +594,11 @@ static void bnxt_get_drvinfo(struct net_device *dev,
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
info->regdump_len = 0;
+ kfree(pkglog);
}
-static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
- u16 fw_speeds = link_info->support_speeds;
- u32 speed_mask = 0;
-
- if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= SUPPORTED_100baseT_Full;
- if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= SUPPORTED_1000baseT_Full;
- if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= SUPPORTED_2500baseX_Full;
- if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= SUPPORTED_10000baseT_Full;
- if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= SUPPORTED_40000baseCR4_Full;
-
- return speed_mask;
-}
-
-static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
-{
- u16 fw_speeds = link_info->auto_link_speeds;
u32 speed_mask = 0;
/* TODO: support 25GB, 40GB, 50GB with different cable type */
@@ -509,9 +613,48 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
speed_mask |= ADVERTISED_10000baseT_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
speed_mask |= ADVERTISED_40000baseCR4_Full;
+
+ if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
+ speed_mask |= ADVERTISED_Pause;
+ else if (fw_pause & BNXT_LINK_PAUSE_TX)
+ speed_mask |= ADVERTISED_Asym_Pause;
+ else if (fw_pause & BNXT_LINK_PAUSE_RX)
+ speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
return speed_mask;
}
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->auto_link_speeds;
+ u8 fw_pause = 0;
+
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ fw_pause = link_info->auto_pause_setting;
+
+ return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+}
+
+static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->lp_auto_link_speeds;
+ u8 fw_pause = 0;
+
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ fw_pause = link_info->lp_pause;
+
+ return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+ u16 fw_speeds = link_info->support_speeds;
+ u32 supported;
+
+ supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
{
switch (fw_link_speed) {
@@ -543,7 +686,6 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
u16 ethtool_speed;
cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
- cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
if (link_info->auto_link_speeds)
cmd->supported |= SUPPORTED_Autoneg;
@@ -553,21 +695,13 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bnxt_fw_to_ethtool_advertised_spds(link_info);
cmd->advertising |= ADVERTISED_Autoneg;
cmd->autoneg = AUTONEG_ENABLE;
+ if (link_info->phy_link_status == BNXT_LINK_LINK)
+ cmd->lp_advertising =
+ bnxt_fw_to_ethtool_lp_adv(link_info);
} else {
cmd->autoneg = AUTONEG_DISABLE;
cmd->advertising = 0;
}
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
- if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
- BNXT_LINK_PAUSE_BOTH) {
- cmd->advertising |= ADVERTISED_Pause;
- } else {
- cmd->advertising |= ADVERTISED_Asym_Pause;
- if (link_info->auto_pause_setting &
- BNXT_LINK_PAUSE_RX)
- cmd->advertising |= ADVERTISED_Pause;
- }
- }
cmd->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
@@ -663,16 +797,10 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return rc;
if (cmd->autoneg == AUTONEG_ENABLE) {
- if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
- netdev_err(dev, "Media type doesn't support autoneg\n");
- rc = -EINVAL;
- goto set_setting_exit;
- }
- if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
- ADVERTISED_Autoneg |
- ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Asym_Pause)) {
+ u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
+
+ if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
+ ADVERTISED_TP | ADVERTISED_FIBRE)) {
netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
cmd->advertising);
rc = -EINVAL;
@@ -727,8 +855,8 @@ static void bnxt_get_pauseparam(struct net_device *dev,
if (BNXT_VF(bp))
return;
epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
- epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
- epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+ epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
+ epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
static int bnxt_set_pauseparam(struct net_device *dev,
@@ -1102,6 +1230,85 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+ struct hwrm_nvm_find_dir_entry_input req = {0};
+ struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1);
+ req.enables = 0;
+ req.dir_idx = 0;
+ req.dir_type = cpu_to_le16(type);
+ req.dir_ordinal = cpu_to_le16(ordinal);
+ req.dir_ext = cpu_to_le16(ext);
+ req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == 0) {
+ if (index)
+ *index = le16_to_cpu(output->dir_idx);
+ if (item_length)
+ *item_length = le32_to_cpu(output->dir_item_length);
+ if (data_length)
+ *data_length = le32_to_cpu(output->dir_data_length);
+ }
+ return rc;
+}
+
+static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
+{
+ char *retval = NULL;
+ char *p;
+ char *value;
+ int field = 0;
+
+ if (datalen < 1)
+ return NULL;
+ /* null-terminate the log data (removing last '\n'): */
+ data[datalen - 1] = 0;
+ for (p = data; *p != 0; p++) {
+ field = 0;
+ retval = NULL;
+ while (*p != 0 && *p != '\n') {
+ value = p;
+ while (*p != 0 && *p != '\t' && *p != '\n')
+ p++;
+ if (field == desired_field)
+ retval = value;
+ if (*p != '\t')
+ break;
+ *p = 0;
+ field++;
+ p++;
+ }
+ if (*p == 0)
+ break;
+ *p = 0;
+ }
+ return retval;
+}
+
+static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
+{
+ u16 index = 0;
+ u32 datalen;
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) != 0)
+ return NULL;
+
+ memset(buf, 0, buflen);
+ if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
+ return NULL;
+
+ return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
+ datalen);
+}
+
static int bnxt_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom,
u8 *data)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 3cf3e1b70b64..43ef392c8588 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -50,10 +50,24 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ORDINAL_FIRST 0
+#define BNX_DIR_EXT_NONE 0
#define BNX_DIR_EXT_INACTIVE (1 << 0)
#define BNX_DIR_EXT_UPDATE (1 << 1)
+#define BNX_DIR_ATTR_NONE 0
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
+#define BNX_PKG_LOG_MAX_LENGTH 4096
+
+enum bnxnvm_pkglog_field_index {
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
+ BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+ BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
+ BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
+ BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
+};
+
#endif /* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index c1cc83d7e38c..0c5f510492f1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -522,6 +522,46 @@ err_out1:
return rc;
}
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf,
+ u16 event_id)
+{
+ int rc = 0;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_async_event_cmpl *async_cmpl;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
@@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
if (pci_vfs_assigned(bp->pdev)) {
+ bnxt_hwrm_fwd_async_event_cmpl(
+ bp, NULL,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
num_vfs);
} else {
@@ -758,8 +801,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
{
int rc = 0;
- struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
- u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+ struct input *encap_req = vf->hwrm_cmd_req_addr;
+ u32 req_type = le16_to_cpu(encap_req->req_type);
switch (req_type) {
case HWRM_CFA_L2_FILTER_ALLOC:
@@ -809,13 +852,19 @@ void bnxt_update_vf_mac(struct bnxt *bp)
if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
goto update_vf_mac_exit;
- if (!is_valid_ether_addr(resp->perm_mac_address))
- goto update_vf_mac_exit;
-
+ /* Store MAC address from the firmware. There are 2 cases:
+ * 1. MAC address is valid. It is assigned from the PF and we
+ * need to override the current VF MAC address with it.
+ * 2. MAC address is zero. The VF will use a random MAC address by
+ * default but the stored zero MAC will allow the VF user to change
+ * the random MAC address using ndo_set_mac_address() if he wants.
+ */
if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+
+ /* overwrite netdev dev_addr with admin VF MAC */
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index d7e01a74e927..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ if (sizeof(unsigned long) != sizeof(u32) &&
+ s->stat_sizeof == sizeof(unsigned long))
+ data[i] = *(unsigned long *)p;
+ else
+ data[i] = *(u32 *)p;
}
}
@@ -1171,6 +1175,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
+ unsigned int bytes_compl = 0;
unsigned int c_index;
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
@@ -1193,16 +1198,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
if (tx_cb_ptr->skb) {
pkts_compl++;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
- tx_cb_ptr->skb->len,
+ dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dev->stats.tx_bytes +=
- dma_unmap_len(tx_cb_ptr, dma_len);
dma_unmap_page(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1220,6 +1222,9 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
ring->free_bds += txbds_processed;
ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ dev->stats.tx_packets += pkts_compl;
+ dev->stats.tx_bytes += bytes_compl;
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
@@ -1296,7 +1301,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
tx_cb_ptr->skb = skb;
- skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+ skb_len = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
@@ -1308,7 +1313,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
@@ -1464,6 +1469,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ /* Retain how many bytes will be sent on the wire, without TSB inserted
+ * by transmit checksum offload
+ */
+ GENET_CB(skb)->bytes_sent = skb->len;
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
skb = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 967367557309..1e2dc34d331a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -531,6 +531,12 @@ struct bcmgenet_hw_params {
u32 flags;
};
+struct bcmgenet_skb_cb {
+ unsigned int bytes_sent; /* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
+
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 50c94104f19c..48a7d7dee846 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -58,6 +58,9 @@
#define GEM_MTU_MIN_SIZE 68
+#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
+#define MACB_WOL_ENABLED (0x1 << 1)
+
/*
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -914,7 +917,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int frag_len = bp->rx_buffer_size;
if (offset + frag_len > len) {
- BUG_ON(frag != last_frag);
+ if (unlikely(frag != last_frag)) {
+ dev_kfree_skb_any(skb);
+ return -1;
+ }
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
@@ -942,8 +948,23 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0;
}
+static inline void macb_init_rx_ring(struct macb *bp)
+{
+ dma_addr_t addr;
+ int i;
+
+ addr = bp->rx_buffers_dma;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ bp->rx_ring[i].addr = addr;
+ bp->rx_ring[i].ctrl = 0;
+ addr += bp->rx_buffer_size;
+ }
+ bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+}
+
static int macb_rx(struct macb *bp, int budget)
{
+ bool reset_rx_queue = false;
int received = 0;
unsigned int tail;
int first_frag = -1;
@@ -969,10 +990,18 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_EOF)) {
int dropped;
- BUG_ON(first_frag == -1);
+
+ if (unlikely(first_frag == -1)) {
+ reset_rx_queue = true;
+ continue;
+ }
dropped = macb_rx_frame(bp, first_frag, tail);
first_frag = -1;
+ if (unlikely(dropped < 0)) {
+ reset_rx_queue = true;
+ continue;
+ }
if (!dropped) {
received++;
budget--;
@@ -980,6 +1009,26 @@ static int macb_rx(struct macb *bp, int budget)
}
}
+ if (unlikely(reset_rx_queue)) {
+ unsigned long flags;
+ u32 ctrl;
+
+ netdev_err(bp->dev, "RX queue corruption: reset it\n");
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ ctrl = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+
+ macb_init_rx_ring(bp);
+ macb_writel(bp, RBQP, bp->rx_ring_dma);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return received;
+ }
+
if (first_frag != -1)
bp->rx_tail = first_frag;
else
@@ -1097,7 +1146,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- macb_writel(bp, ISR, MACB_BIT(RXUBR));
+ queue_writel(queue, ISR, MACB_BIT(RXUBR));
}
if (status & MACB_BIT(ISR_ROVR)) {
@@ -1520,15 +1569,8 @@ static void gem_init_rings(struct macb *bp)
static void macb_init_rings(struct macb *bp)
{
int i;
- dma_addr_t addr;
- addr = bp->rx_buffers_dma;
- for (i = 0; i < RX_RING_SIZE; i++) {
- bp->rx_ring[i].addr = addr;
- bp->rx_ring[i].ctrl = 0;
- addr += bp->rx_buffer_size;
- }
- bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+ macb_init_rx_ring(bp);
for (i = 0; i < TX_RING_SIZE; i++) {
bp->queues[0].tx_ring[i].addr = 0;
@@ -2124,6 +2166,39 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
}
+static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
+ wol->supported = WAKE_MAGIC;
+
+ if (bp->wol & MACB_WOL_ENABLED)
+ wol->wolopts |= WAKE_MAGIC;
+ }
+}
+
+static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
+ (wol->wolopts & ~WAKE_MAGIC))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ bp->wol |= MACB_WOL_ENABLED;
+ else
+ bp->wol &= ~MACB_WOL_ENABLED;
+
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+
+ return 0;
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_settings = macb_get_settings,
.set_settings = macb_set_settings,
@@ -2131,6 +2206,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = macb_get_wol,
+ .set_wol = macb_set_wol,
};
static const struct ethtool_ops gem_ethtool_ops = {
@@ -2402,9 +2479,9 @@ static int macb_init(struct platform_device *pdev)
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
val = GEM_BIT(RGMII);
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
- (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+ (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
val = MACB_BIT(RMII);
- else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
+ else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
val = MACB_BIT(MII);
if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
@@ -2736,7 +2813,7 @@ static int at91ether_init(struct platform_device *pdev)
}
static const struct macb_config at91sam9260_config = {
- .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
+ .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
};
@@ -2749,21 +2826,22 @@ static const struct macb_config pc302gem_config = {
};
static const struct macb_config sama5d2_config = {
- .caps = 0,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
+ | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
};
static const struct macb_config sama5d4_config = {
- .caps = 0,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.dma_burst_length = 4,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -2890,6 +2968,11 @@ static int macb_probe(struct platform_device *pdev)
if (macb_config)
bp->jumbo_max_len = macb_config->jumbo_max_len;
+ bp->wol = 0;
+ if (of_get_property(np, "magic-packet", NULL))
+ bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
+ device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+
spin_lock_init(&bp->lock);
/* setup capabilities */
@@ -2913,9 +2996,10 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
- if (gpio_is_valid(gpio))
+ if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
- gpiod_set_value(bp->reset_gpio, GPIOD_OUT_HIGH);
+ gpiod_direction_output(bp->reset_gpio, 1);
+ }
}
of_node_put(phy_node);
@@ -2985,7 +3069,8 @@ static int macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
/* Shutdown the PHY if there is a GPIO reset */
- gpiod_set_value(bp->reset_gpio, GPIOD_OUT_LOW);
+ if (bp->reset_gpio)
+ gpiod_set_value(bp->reset_gpio, 0);
unregister_netdev(dev);
clk_disable_unprepare(bp->tx_clk);
@@ -3006,9 +3091,15 @@ static int __maybe_unused macb_suspend(struct device *dev)
netif_carrier_off(netdev);
netif_device_detach(netdev);
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
+ if (bp->wol & MACB_WOL_ENABLED) {
+ macb_writel(bp, IER, MACB_BIT(WOL));
+ macb_writel(bp, WOL, MACB_BIT(MAG));
+ enable_irq_wake(bp->queues[0].irq);
+ } else {
+ clk_disable_unprepare(bp->tx_clk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
+ }
return 0;
}
@@ -3019,9 +3110,15 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
- clk_prepare_enable(bp->pclk);
- clk_prepare_enable(bp->hclk);
- clk_prepare_enable(bp->tx_clk);
+ if (bp->wol & MACB_WOL_ENABLED) {
+ macb_writel(bp, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ disable_irq_wake(bp->queues[0].irq);
+ } else {
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
+ clk_prepare_enable(bp->tx_clk);
+ }
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 0d4ecfcd60b7..8a13824ef802 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -312,6 +312,8 @@
#define MACB_PFR_SIZE 1
#define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */
#define MACB_PTZ_SIZE 1
+#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
+#define MACB_WOL_SIZE 1
/* Bitfields in MAN */
#define MACB_DATA_OFFSET 0 /* data */
@@ -398,7 +400,7 @@
/* Capability mask bits */
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
-#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
+#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_FIFO_MODE 0x10000000
@@ -842,6 +844,8 @@ struct macb {
unsigned int rx_frm_len_mask;
unsigned int jumbo_max_len;
+
+ u32 wol;
};
static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 8fb84e69c30e..0ef232d3331e 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -35,7 +35,7 @@ config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
select PHYLIB
- select MDIO_OCTEON
+ select MDIO_THUNDER
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
@@ -53,4 +53,15 @@ config LIQUIDIO
To compile this driver as a module, choose M here: the module
will be called liquidio. This is recommended.
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CAVIUM_OCTEON_SOC
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ help
+ Enable the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
+
endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
index d22f886ac291..872da9f7c31a 100644
--- a/drivers/net/ethernet/cavium/Makefile
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/cavium/octeon/Makefile
index efa41c1d91c5..efa41c1d91c5 100644
--- a/drivers/net/ethernet/octeon/Makefile
+++ b/drivers/net/ethernet/cavium/octeon/Makefile
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index c177c7cec13b..c177c7cec13b 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 34e9acea8747..83025bb4737c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -257,10 +257,13 @@ struct nicvf_drv_stats {
u64 rx_frames_jumbo;
u64 rx_drops;
+ u64 rcv_buffer_alloc_failures;
+
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso;
+ u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
};
@@ -269,45 +272,54 @@ struct nicvf {
struct nicvf *pnicvf;
struct net_device *netdev;
struct pci_dev *pdev;
+ void __iomem *reg_base;
+ struct queue_set *qs;
+ struct nicvf_cq_poll *napi[8];
u8 vf_id;
- u8 node;
- u8 tns_mode:1;
- u8 sqs_mode:1;
- u8 loopback_supported:1;
+ u8 sqs_id;
+ bool sqs_mode;
bool hw_tso;
- u16 mtu;
- struct queue_set *qs;
+
+ /* Receive buffer alloc */
+ u32 rb_page_offset;
+ u16 rb_pageref;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct page *rb_page;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+
+ /* Secondary Qset */
+ u8 sqs_count;
#define MAX_SQS_PER_VF_SINGLE_NODE 5
#define MAX_SQS_PER_VF 11
- u8 sqs_id;
- u8 sqs_count; /* Secondary Qset count */
struct nicvf *snicvf[MAX_SQS_PER_VF];
+
+ /* Queue count */
u8 rx_queues;
u8 tx_queues;
u8 max_queues;
- void __iomem *reg_base;
+
+ u8 node;
+ u8 cpi_alg;
+ u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
- struct page *rb_page;
- u32 rb_page_offset;
- bool rb_alloc_fail;
- bool rb_work_scheduled;
- struct delayed_work rbdr_work;
- struct tasklet_struct rbdr_task;
- struct tasklet_struct qs_err_task;
- struct tasklet_struct cq_task;
- struct nicvf_cq_poll *napi[8];
+ bool tns_mode;
+ bool loopback_supported;
struct nicvf_rss_info rss_info;
- u8 cpi_alg;
+ struct tasklet_struct qs_err_task;
+ struct work_struct reset_task;
+
/* Interrupt coalescing settings */
u32 cq_coalesce_usecs;
-
u32 msg_enable;
+
+ /* Stats */
struct nicvf_hw_stats hw_stats;
struct nicvf_drv_stats drv_stats;
struct bgx_stats bgx_stats;
- struct work_struct reset_task;
/* MSI-X */
bool msix_enabled;
@@ -315,6 +327,7 @@ struct nicvf {
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
+ cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
/* VF <-> PF mailbox communication */
bool pf_acked;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index a12b2e38cf61..d2d8ef270142 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_1518),
NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
+ NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a009bc30dc4d..bfee298fc02a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -826,7 +826,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
/* Schedule NAPI */
- napi_schedule(&cq_poll->napi);
+ napi_schedule_irqoff(&cq_poll->napi);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
@@ -897,6 +897,31 @@ static void nicvf_disable_msix(struct nicvf *nic)
}
}
+static void nicvf_set_irq_affinity(struct nicvf *nic)
+{
+ int vec, cpu;
+ int irqnum;
+
+ for (vec = 0; vec < nic->num_vec; vec++) {
+ if (!nic->irq_allocated[vec])
+ continue;
+
+ if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
+ return;
+ /* CQ interrupts */
+ if (vec < NICVF_INTR_ID_SQ)
+ /* Leave CPU0 for RBDR and other interrupts */
+ cpu = nicvf_netdev_qidx(nic, vec) + 1;
+ else
+ cpu = 0;
+
+ cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
+ nic->affinity_mask[vec]);
+ irqnum = nic->msix_entries[vec].vector;
+ irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
+ }
+}
+
static int nicvf_register_interrupts(struct nicvf *nic)
{
int irq, ret = 0;
@@ -942,8 +967,13 @@ static int nicvf_register_interrupts(struct nicvf *nic)
ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler,
0, nic->irq_name[irq], nic);
- if (!ret)
- nic->irq_allocated[irq] = true;
+ if (ret)
+ goto err;
+
+ nic->irq_allocated[irq] = true;
+
+ /* Set IRQ affinities */
+ nicvf_set_irq_affinity(nic);
err:
if (ret)
@@ -961,6 +991,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
if (!nic->irq_allocated[irq])
continue;
+ irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
+ free_cpumask_var(nic->affinity_mask[irq]);
+
if (irq < NICVF_INTR_ID_SQ)
free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
else
@@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
+ nic->drv_stats.tx_timeout++;
schedule_work(&nic->reset_task);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 767347b1f631..fa05e347262f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -18,6 +18,15 @@
#include "q_struct.h"
#include "nicvf_queues.h"
+static void nicvf_get_page(struct nicvf *nic)
+{
+ if (!nic->rb_pageref || !nic->rb_page)
+ return;
+
+ atomic_add(nic->rb_pageref, &nic->rb_page->_count);
+ nic->rb_pageref = 0;
+}
+
/* Poll a register for a specific value */
static int nicvf_poll_reg(struct nicvf *nic, int qidx,
u64 reg, int bit_pos, int bits, int val)
@@ -78,32 +87,32 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- int order = get_order(buf_len);
+ int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
/* Check if request can be accomodated in previous allocated page */
- if (nic->rb_page) {
- if ((nic->rb_page_offset + buf_len + buf_len) >
- (PAGE_SIZE << order)) {
- nic->rb_page = NULL;
- } else {
- nic->rb_page_offset += buf_len;
- get_page(nic->rb_page);
- }
+ if (nic->rb_page &&
+ ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) {
+ nic->rb_pageref++;
+ goto ret;
}
+ nicvf_get_page(nic);
+ nic->rb_page = NULL;
+
/* Allocate a new page */
if (!nic->rb_page) {
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
- netdev_err(nic->netdev,
- "Failed to allocate new rcv buffer\n");
+ nic->drv_stats.rcv_buffer_alloc_failures++;
return -ENOMEM;
}
nic->rb_page_offset = 0;
}
+ret:
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+ nic->rb_page_offset += buf_len;
return 0;
}
@@ -159,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
desc = GET_RBDR_DESC(rbdr, idx);
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
}
+
+ nicvf_get_page(nic);
+
return 0;
}
@@ -242,6 +254,8 @@ refill:
new_rb++;
}
+ nicvf_get_page(nic);
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9df26c2263bc..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
#ifdef CONFIG_ACPI
-static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
+static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
+ u8 *dst)
{
u8 mac[ETH_ALEN];
int ret;
@@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
goto out;
if (!is_valid_ether_addr(mac)) {
+ dev_err(dev, "MAC address invalid: %pM\n", mac);
ret = -EINVAL;
goto out;
}
+ dev_info(dev, "MAC address set to: %pM\n", mac);
+
memcpy(dst, mac, ETH_ALEN);
out:
return ret;
@@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
struct bgx *bgx = context;
+ struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
out:
@@ -968,38 +973,64 @@ static int bgx_init_acpi_phy(struct bgx *bgx)
static int bgx_init_of_phy(struct bgx *bgx)
{
- struct device_node *np;
- struct device_node *np_child;
+ struct fwnode_handle *fwn;
+ struct device_node *node = NULL;
u8 lmac = 0;
- char bgx_sel[5];
- const char *mac;
-
- /* Get BGX node from DT */
- snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
- np = of_find_node_by_name(NULL, bgx_sel);
- if (!np)
- return -ENODEV;
-
- for_each_child_of_node(np, np_child) {
- struct device_node *phy_np = of_parse_phandle(np_child,
- "phy-handle", 0);
- if (!phy_np)
- continue;
- bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
- mac = of_get_mac_address(np_child);
+ device_for_each_child_node(&bgx->pdev->dev, fwn) {
+ struct phy_device *pd;
+ struct device_node *phy_np;
+ const char *mac;
+
+ /* Should always be an OF node. But if it is not, we
+ * cannot handle it, so exit the loop.
+ */
+ node = to_of_node(fwn);
+ if (!node)
+ break;
+
+ mac = of_get_mac_address(node);
if (mac)
ether_addr_copy(bgx->lmac[lmac].mac, mac);
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
+
+ phy_np = of_parse_phandle(node, "phy-handle", 0);
+ /* If there is no phy or defective firmware presents
+ * this cortina phy, for which there is no driver
+ * support, ignore it.
+ */
+ if (phy_np &&
+ !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd)
+ goto defer;
+ bgx->lmac[lmac].phydev = pd;
+ }
+
lmac++;
if (lmac == MAX_LMAC_PER_BGX) {
- of_node_put(np_child);
+ of_node_put(node);
break;
}
}
return 0;
+
+defer:
+ /* We are bailing out, try not to leak device reference counts
+ * for phy devices we may have already found.
+ */
+ while (lmac) {
+ if (bgx->lmac[lmac].phydev) {
+ put_device(&bgx->lmac[lmac].phydev->mdio.dev);
+ bgx->lmac[lmac].phydev = NULL;
+ }
+ lmac--;
+ }
+ of_node_put(node);
+ return -EPROBE_DEFER;
}
#else
@@ -1026,9 +1057,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bgx *bgx = NULL;
u8 lmac;
- /* Load octeon mdio driver */
- octeon_mdiobus_force_mod_depencency();
-
bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
if (!bgx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4d187f22c48b..4686a85a8a22 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,6 +96,17 @@ config CHELSIO_T4_DCB
If unsure, say N.
+config CHELSIO_T4_UWIRE
+ bool "Unified Wire Support for Chelsio T5 cards"
+ default n
+ depends on CHELSIO_T4
+ ---help---
+ Enable unified-wire offload features.
+ Say Y here if you want to enable unified-wire over Ethernet
+ in the driver.
+
+ If unsure, say N.
+
config CHELSIO_T4_FCOE
bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
default n
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index ace0ab98d0f1..85c92821b239 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
+cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ec6e849676c1..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,9 @@ enum {
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
+
+ /* # of streaming iSCSIT Rx queues */
+ MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -420,8 +423,8 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
- + MAX_RDMA_CIQS + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
+ MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
};
struct adapter;
@@ -508,6 +511,15 @@ struct pkt_gl {
typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
+typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
+/* LRO related declarations for ULD */
+struct t4_lro_mgr {
+#define MAX_LRO_SESSIONS 64
+ u8 lro_session_cnt; /* # of sessions to aggregate */
+ unsigned long lro_pkts; /* # of LRO super packets */
+ unsigned long lro_merged; /* # of wire packets merged by LRO */
+ struct sk_buff_head lroq; /* list of aggregated sessions */
+};
struct sge_rspq { /* state for an SGE response queue */
struct napi_struct napi;
@@ -532,6 +544,8 @@ struct sge_rspq { /* state for an SGE response queue */
struct adapter *adap;
struct net_device *netdev; /* associated net device */
rspq_handler_t handler;
+ rspq_flush_handler_t flush_handler;
+ struct t4_lro_mgr lro_mgr;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define CXGB_POLL_STATE_IDLE 0
#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
@@ -641,6 +655,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
+ struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
@@ -652,9 +667,11 @@ struct sge {
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 iscsiqsets; /* # of active iSCSI queue sets */
+ u16 niscsitq; /* # of available iSCST Rx queues */
u16 rdmaqs; /* # of available RDMA Rx queues */
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 iscsi_rxq[MAX_OFLD_QSETS];
+ u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
u16 rdma_rxq[MAX_RDMA_QUEUES];
u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
@@ -681,6 +698,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
+#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
@@ -702,6 +720,11 @@ struct doorbell_stats {
u32 db_full;
};
+struct hash_mac_addr {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -740,6 +763,9 @@ struct adapter {
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
struct list_head rcu_node;
+ struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
+
+ void *iscsi_ppm;
struct tid_info tids;
void **tid_release_head;
@@ -1107,7 +1133,8 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd, int cong);
+ struct sge_fl *fl, rspq_handler_t hnd,
+ rspq_flush_handler_t flush_handler, int cong);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *netdevq,
unsigned int iqid);
@@ -1207,6 +1234,24 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by HW inexact
+ * (hash) address matching.
+ */
+static inline int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1389,6 +1434,9 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
+int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, unsigned int naddr,
+ const u8 **addr, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
@@ -1403,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp);
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index e6a4072b494b..0bb41e9b9b1c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2334,12 +2334,14 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
+ int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
int iscsi_idx = r - eth_entries;
- int rdma_idx = iscsi_idx - iscsi_entries;
+ int iscsit_idx = iscsi_idx - iscsi_entries;
+ int rdma_idx = iscsit_idx - iscsit_entries;
int ciq_idx = rdma_idx - rdma_entries;
int ctrl_idx = ciq_idx - ciq_entries;
int fq_idx = ctrl_idx - ctrl_entries;
@@ -2453,6 +2455,35 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
+ } else if (iscsit_idx < iscsit_entries) {
+ const struct sge_ofld_rxq *rx =
+ &adap->sge.iscsitrxq[iscsit_idx * 4];
+ int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
+
+ S("QType:", "iSCSIT");
+ R("RspQ ID:", rspq.abs_id);
+ R("RspQ size:", rspq.size);
+ R("RspQE size:", rspq.iqe_len);
+ R("RspQ CIDX:", rspq.cidx);
+ R("RspQ Gen:", rspq.gen);
+ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+ S3("u", "Intr pktcnt:",
+ adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+ R("FL ID:", fl.cntxt_id);
+ R("FL size:", fl.size - 8);
+ R("FL pend:", fl.pend_cred);
+ R("FL avail:", fl.avail);
+ R("FL PIDX:", fl.pidx);
+ R("FL CIDX:", fl.cidx);
+ RL("RxPackets:", stats.pkts);
+ RL("RxImmPkts:", stats.imm);
+ RL("RxNoMem:", stats.nomem);
+ RL("FLAllocErr:", fl.alloc_failed);
+ RL("FLLrgAlcErr:", fl.large_alloc_failed);
+ RL("FLMapErr:", fl.mapping_err);
+ RL("FLLow:", fl.low);
+ RL("FLStarving:", fl.starving);
+
} else if (rdma_idx < rdma_entries) {
const struct sge_ofld_rxq *rx =
&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2543,6 +2574,7 @@ static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
+ DIV_ROUND_UP(adap->sge.niscsitq, 4) +
DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index b8a5fb0c32d4..d1e3f0997d6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -227,7 +227,7 @@ static DEFINE_MUTEX(uld_mutex);
static LIST_HEAD(adap_rcu_list);
static DEFINE_SPINLOCK(adap_rcu_lock);
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *uld_str[] = { "RDMA", "iSCSI" };
+static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
static void link_report(struct net_device *dev)
{
@@ -338,84 +338,108 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
}
+int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
+module_param(dbfifo_int_thresh, int, 0644);
+MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
+
/*
- * Configure the exact and hash address filters to handle a port's multicast
- * and secondary unicast MAC addresses.
+ * usecs to sleep while draining the dbfifo
*/
-static int set_addr_filters(const struct net_device *dev, bool sleep)
+static int dbfifo_drain_delay = 1000;
+module_param(dbfifo_drain_delay, int, 0644);
+MODULE_PARM_DESC(dbfifo_drain_delay,
+ "usecs to sleep while draining the dbfifo");
+
+static inline int cxgb4_set_addr_hash(struct port_info *pi)
{
+ struct adapter *adap = pi->adapter;
+ u64 vec = 0;
+ bool ucast = false;
+ struct hash_mac_addr *entry;
+
+ /* Calculate the hash vector for the updated list and program it */
+ list_for_each_entry(entry, &adap->mac_hlist, list) {
+ ucast |= is_unicast_ether_addr(entry->addr);
+ vec |= (1ULL << hash_mac_addr(entry->addr));
+ }
+ return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
+ vec, false);
+}
+
+static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adap = pi->adapter;
+ int ret;
u64 mhash = 0;
u64 uhash = 0;
- bool free = true;
- u16 filt_idx[7];
- const u8 *addr[7];
- int ret, naddr = 0;
- const struct netdev_hw_addr *ha;
- int uc_cnt = netdev_uc_count(dev);
- int mc_cnt = netdev_mc_count(dev);
- const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->pf;
+ bool free = false;
+ bool ucast = is_unicast_ether_addr(mac_addr);
+ const u8 *maclist[1] = {mac_addr};
+ struct hash_mac_addr *new_entry;
- /* first do the secondary unicast addresses */
- netdev_for_each_uc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
- ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
- if (ret < 0)
- return ret;
-
- free = false;
- naddr = 0;
- }
+ ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
+ NULL, ucast ? &uhash : &mhash, false);
+ if (ret < 0)
+ goto out;
+ /* if hash != 0, then add the addr to hash addr list
+ * so on the end we will calculate the hash for the
+ * list and program it
+ */
+ if (uhash || mhash) {
+ new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
+ if (!new_entry)
+ return -ENOMEM;
+ ether_addr_copy(new_entry->addr, mac_addr);
+ list_add_tail(&new_entry->list, &adap->mac_hlist);
+ ret = cxgb4_set_addr_hash(pi);
}
+out:
+ return ret < 0 ? ret : 0;
+}
- /* next set up the multicast addresses */
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
- ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
- if (ret < 0)
- return ret;
+static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+ const u8 *maclist[1] = {mac_addr};
+ struct hash_mac_addr *entry, *tmp;
- free = false;
- naddr = 0;
+ /* If the MAC address to be removed is in the hash addr
+ * list, delete it from the list and update hash vector
+ */
+ list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
+ if (ether_addr_equal(entry->addr, mac_addr)) {
+ list_del(&entry->list);
+ kfree(entry);
+ return cxgb4_set_addr_hash(pi);
}
}
- return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
- uhash | mhash, sleep);
+ ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
+ return ret < 0 ? -EINVAL : 0;
}
-int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
-module_param(dbfifo_int_thresh, int, 0644);
-MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
-
-/*
- * usecs to sleep while draining the dbfifo
- */
-static int dbfifo_drain_delay = 1000;
-module_param(dbfifo_drain_delay, int, 0644);
-MODULE_PARM_DESC(dbfifo_drain_delay,
- "usecs to sleep while draining the dbfifo");
-
/*
* Set Rx properties of a port, such as promiscruity, address filters, and MTU.
* If @mtu is -1 it is left unchanged.
*/
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
- ret = set_addr_filters(dev, sleep_ok);
- if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
- (dev->flags & IFF_PROMISC) ? 1 : 0,
- (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
- sleep_ok);
- return ret;
+ if (!(dev->flags & IFF_PROMISC)) {
+ __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
+ if (!(dev->flags & IFF_ALLMULTI))
+ __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
+ }
+
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
+ (dev->flags & IFF_PROMISC) ? 1 : 0,
+ (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
+ sleep_ok);
}
/**
@@ -640,6 +664,13 @@ out:
return 0;
}
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ if (ulds[q->uld].lro_flush)
+ ulds[q->uld].lro_flush(&q->lro_mgr);
+}
+
/**
* uldrx_handler - response queue handler for ULD queues
* @q: the response queue that received the packet
@@ -653,6 +684,7 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+ int ret;
/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
*/
@@ -660,10 +692,19 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
rsp += 2;
- if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+ if (q->flush_handler)
+ ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
+ rsp, gl, &q->lro_mgr,
+ &q->napi);
+ else
+ ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
+ rsp, gl);
+
+ if (ret) {
rxq->stats.nomem++;
return -1;
}
+
if (gl == NULL)
rxq->stats.imm++;
else if (gl == CXGB4_MSG_AN)
@@ -730,6 +771,10 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
adap->port[0]->name, i);
+ for_each_iscsitrxq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
+ adap->port[0]->name, i);
+
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
@@ -743,6 +788,7 @@ static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int iscsitqidx = 0;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -768,6 +814,15 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
+ for_each_iscsitrxq(s, iscsitqidx) {
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
+ &s->iscsitrxq[iscsitqidx].rspq);
+ if (err)
+ goto unwind;
+ msi_index++;
+ }
for_each_rdmarxq(s, rdmaqidx) {
err = request_irq(adap->msix_info[msi_index].vec,
t4_sge_intr_msix, 0,
@@ -795,6 +850,9 @@ unwind:
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
+ while (--iscsitqidx >= 0)
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->iscsitrxq[iscsitqidx].rspq);
while (--iscsiqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->iscsirxq[iscsiqidx].rspq);
@@ -816,6 +874,9 @@ static void free_msix_queue_irqs(struct adapter *adap)
for_each_iscsirxq(s, i)
free_irq(adap->msix_info[msi_index++].vec,
&s->iscsirxq[i].rspq);
+ for_each_iscsitrxq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec,
+ &s->iscsitrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
for_each_rdmaciq(s, i)
@@ -960,7 +1021,7 @@ static void enable_rx(struct adapter *adap)
static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids)
+ u16 *ids, bool lro)
{
int i, err;
@@ -970,7 +1031,9 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler, 0);
+ uldrx_handler,
+ lro ? uldrx_flush_handler : NULL,
+ 0);
if (err)
return err;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1000,7 +1063,7 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
else {
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL, -1);
+ NULL, NULL, NULL, -1);
if (err)
return err;
msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1020,7 +1083,7 @@ static int setup_sge_queues(struct adapter *adap)
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler, -1);
+ msi_idx, NULL, fwevtq_handler, NULL, -1);
if (err) {
freeout: t4_free_sge_resources(adap);
return err;
@@ -1038,6 +1101,7 @@ freeout: t4_free_sge_resources(adap);
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
msi_idx, &q->fl,
t4_ethrx_handler,
+ NULL,
t4_get_mps_bg_map(adap,
pi->tx_chan));
if (err)
@@ -1063,18 +1127,19 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
+#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
+ err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
if (err) \
goto freeout; \
if (msi_idx > 0) \
msi_idx += nq; \
} while (0)
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
+ ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
+ ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
+ ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
+ ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
#undef ALLOC_OFLD_RXQS
@@ -2406,6 +2471,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.iscsi_rxq;
lli.nrxq = adap->sge.iscsiqsets;
+ } else if (uld == CXGB4_ULD_ISCSIT) {
+ lli.rxq_ids = adap->sge.iscsit_rxq;
+ lli.nrxq = adap->sge.niscsitq;
}
lli.ntxq = adap->sge.iscsiqsets;
lli.nchan = adap->params.nports;
@@ -2413,6 +2481,10 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.wr_cred = adap->params.ofldq_wr_cred;
lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lli.iscsi_ppm = &adap->iscsi_ppm;
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
lli.udb_density = 1 << adap->params.sge.eq_qpp;
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
@@ -2677,6 +2749,8 @@ static int cxgb_up(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
#endif
+ /* Initialize hash mac addr list*/
+ INIT_LIST_HEAD(&adap->mac_hlist);
out:
return err;
irq_err:
@@ -4310,6 +4384,9 @@ static void cfg_queues(struct adapter *adap)
s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
adap->params.nports;
s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
+
+ if (!is_t4(adap->params.chip))
+ s->niscsitq = s->iscsiqsets;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4336,6 +4413,16 @@ static void cfg_queues(struct adapter *adap)
r->fl.size = 72;
}
+ if (!is_t4(adap->params.chip)) {
+ for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
+ struct sge_ofld_rxq *r = &s->iscsitrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
+ r->rspq.uld = CXGB4_ULD_ISCSIT;
+ r->fl.size = 72;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
@@ -4410,9 +4497,13 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
+ s->niscsitq;
/* need nchan for each possible ULD */
- ofld_need = 3 * nchan;
+ if (is_t4(adap->params.chip))
+ ofld_need = 3 * nchan;
+ else
+ ofld_need = 4 * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4444,12 +4535,16 @@ static int enable_msix(struct adapter *adap)
if (allocated < want) {
s->rdmaqs = nchan;
s->rdmaciqs = nchan;
+
+ if (!is_t4(adap->params.chip))
+ s->niscsitq = nchan;
}
/* leftovers go to OFLD */
i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs;
+ s->rdmaqs - s->rdmaciqs - s->niscsitq;
s->iscsiqsets = (i / nchan) * nchan; /* round down */
+
}
for (i = 0; i < allocated; ++i)
adap->msix_info[i].vec = entries[i].vector;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
new file mode 100644
index 000000000000..d88a7a7b2400
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
@@ -0,0 +1,464 @@
+/*
+ * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+
+#include "cxgb4_ppm.h"
+
+/* Direct Data Placement -
+ * Directly place the iSCSI Data-In or Data-Out PDU's payload into
+ * pre-posted final destination host-memory buffers based on the
+ * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT)
+ * in Data-Out PDUs. The host memory address is programmed into
+ * h/w in the format of pagepod entries. The location of the
+ * pagepod entry is encoded into ddp tag which is used as the base
+ * for ITT/TTT.
+ */
+
+/* Direct-Data Placement page size adjustment
+ */
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ int i;
+
+ for (i = 0; i < DDP_PGIDX_MAX; i++) {
+ if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT +
+ tformat->pgsz_order[i])) {
+ pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n",
+ __func__, ppm->ndev->name, pgsz, i);
+ return i;
+ }
+ }
+ pr_info("ippm: ddp page size %lu not supported.\n", pgsz);
+ return DDP_PGIDX_MAX;
+}
+
+/* DDP setup & teardown
+ */
+static int ppm_find_unused_entries(unsigned long *bmap,
+ unsigned int max_ppods,
+ unsigned int start,
+ unsigned int nr,
+ unsigned int align_mask)
+{
+ unsigned long i;
+
+ i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask);
+
+ if (unlikely(i >= max_ppods) && (start > nr))
+ i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1,
+ align_mask);
+ if (unlikely(i >= max_ppods))
+ return -ENOSPC;
+
+ bitmap_set(bmap, i, nr);
+ return (int)i;
+}
+
+static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata = ppm->ppod_data + i;
+
+ pdata->caller_data = caller_data;
+ pdata->npods = count;
+
+ if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1))
+ pdata->color = 0;
+ else
+ pdata->color++;
+}
+
+static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ struct cxgbi_ppm_pool *pool;
+ unsigned int cpu;
+ int i;
+
+ cpu = get_cpu();
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ put_cpu();
+
+ i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max,
+ pool->next, count, 0);
+ if (i < 0) {
+ pool->next = 0;
+ spin_unlock_bh(&pool->lock);
+ return -ENOSPC;
+ }
+
+ pool->next = i + count;
+ if (pool->next >= ppm->pool_index_max)
+ pool->next = 0;
+
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n",
+ __func__, cpu, i, count, i + cpu * ppm->pool_index_max,
+ pool->next);
+
+ i += cpu * ppm->pool_index_max;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count,
+ unsigned long caller_data)
+{
+ int i;
+
+ spin_lock_bh(&ppm->map_lock);
+ i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max,
+ ppm->next, count, 0);
+ if (i < 0) {
+ ppm->next = 0;
+ spin_unlock_bh(&ppm->map_lock);
+ pr_debug("ippm: NO suitable entries %u available.\n",
+ count);
+ return -ENOSPC;
+ }
+
+ ppm->next = i + count;
+ if (ppm->next >= ppm->bmap_index_max)
+ ppm->next = 0;
+
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n",
+ __func__, i, count, i + ppm->pool_rsvd, ppm->next,
+ caller_data);
+
+ i += ppm->pool_rsvd;
+ ppm_mark_entries(ppm, i, count, caller_data);
+
+ return i;
+}
+
+static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count)
+{
+ pr_debug("%s: idx %d + %d.\n", __func__, i, count);
+
+ if (i < ppm->pool_rsvd) {
+ unsigned int cpu;
+ struct cxgbi_ppm_pool *pool;
+
+ cpu = i / ppm->pool_index_max;
+ i %= ppm->pool_index_max;
+
+ pool = per_cpu_ptr(ppm->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ bitmap_clear(pool->bmap, i, count);
+
+ if (i < pool->next)
+ pool->next = i;
+ spin_unlock_bh(&pool->lock);
+
+ pr_debug("%s: cpu %u, idx %d, next %u.\n",
+ __func__, cpu, i, pool->next);
+ } else {
+ spin_lock_bh(&ppm->map_lock);
+
+ i -= ppm->pool_rsvd;
+ bitmap_clear(ppm->ppod_bmap, i, count);
+
+ if (i < ppm->next)
+ ppm->next = i;
+ spin_unlock_bh(&ppm->map_lock);
+
+ pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next);
+ }
+}
+
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx)
+{
+ struct cxgbi_ppod_data *pdata;
+
+ if (idx >= ppm->ppmax) {
+ pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax);
+ return;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ if (!pdata->npods) {
+ pr_warn("ippm: idx %u, npods 0.\n", idx);
+ return;
+ }
+
+ pr_debug("release idx %u, npods %u.\n", idx, pdata->npods);
+ ppm_unmark_entries(ppm, idx, pdata->npods);
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppod_release);
+
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx,
+ u32 *ddp_tag, unsigned long caller_data)
+{
+ struct cxgbi_ppod_data *pdata;
+ unsigned int npods;
+ int idx = -1;
+ unsigned int hwidx;
+ u32 tag;
+
+ npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
+ if (!npods) {
+ pr_warn("%s: pages %u -> npods %u, full.\n",
+ __func__, nr_pages, npods);
+ return -EINVAL;
+ }
+
+ /* grab from cpu pool first */
+ idx = ppm_get_cpu_entries(ppm, npods, caller_data);
+ /* try the general pool */
+ if (idx < 0)
+ idx = ppm_get_entries(ppm, npods, caller_data);
+ if (idx < 0) {
+ pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n",
+ nr_pages, npods, ppm->next, caller_data);
+ return idx;
+ }
+
+ pdata = ppm->ppod_data + idx;
+ hwidx = ppm->base_idx + idx;
+
+ tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color);
+
+ if (per_tag_pg_idx)
+ tag |= (per_tag_pg_idx << 30) & 0xC0000000;
+
+ *ppod_idx = idx;
+ *ddp_tag = tag;
+
+ pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n",
+ nr_pages, tag, idx, npods, caller_data);
+
+ return npods;
+}
+EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve);
+
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr)
+{
+ /* The ddp tag in pagepod should be with bit 31:30 set to 0.
+ * The ddp Tag on the wire should be with non-zero 31:30 to the peer
+ */
+ tag &= 0x3FFFFFFF;
+
+ hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
+
+ hdr->rsvd = 0;
+ hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask);
+ hdr->max_offset = htonl(length);
+ hdr->page_offset = htonl(offset);
+
+ pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n",
+ tag, tid, length, offset);
+}
+EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr);
+
+static void ppm_free(struct cxgbi_ppm *ppm)
+{
+ vfree(ppm);
+}
+
+static void ppm_destroy(struct kref *kref)
+{
+ struct cxgbi_ppm *ppm = container_of(kref,
+ struct cxgbi_ppm,
+ refcnt);
+ pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n",
+ ppm->ndev->name, ppm);
+
+ *ppm->ppm_pp = NULL;
+
+ free_percpu(ppm->pool);
+ ppm_free(ppm);
+}
+
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
+{
+ if (ppm) {
+ int rv;
+
+ rv = kref_put(&ppm->refcnt, ppm_destroy);
+ return rv;
+ }
+ return 1;
+}
+
+static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
+ unsigned int *pcpu_ppmax)
+{
+ struct cxgbi_ppm_pool *pools;
+ unsigned int ppmax = (*total) / num_possible_cpus();
+ unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3;
+ unsigned int bmap;
+ unsigned int alloc_sz;
+ unsigned int count = 0;
+ unsigned int cpu;
+
+ /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */
+ if (ppmax > max)
+ ppmax = max;
+
+ /* pool size must be multiple of unsigned long */
+ bmap = BITS_TO_LONGS(ppmax);
+ ppmax = (bmap * sizeof(unsigned long)) << 3;
+
+ alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
+ pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool));
+
+ if (!pools)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu);
+
+ memset(ppool, 0, alloc_sz);
+ spin_lock_init(&ppool->lock);
+ count += ppmax;
+ }
+
+ *total = count;
+ *pcpu_ppmax = ppmax;
+
+ return pools;
+}
+
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
+ struct pci_dev *pdev, void *lldev,
+ struct cxgbi_tag_format *tformat,
+ unsigned int ppmax,
+ unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor)
+{
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp);
+ struct cxgbi_ppm_pool *pool = NULL;
+ unsigned int ppmax_pool = 0;
+ unsigned int pool_index_max = 0;
+ unsigned int alloc_sz;
+ unsigned int ppod_bmap_size;
+
+ if (ppm) {
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax);
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+
+ if (reserve_factor) {
+ ppmax_pool = ppmax / reserve_factor;
+ pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+
+ pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
+ ndev->name, ppmax, ppmax_pool, pool_index_max);
+ }
+
+ ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool);
+ alloc_sz = sizeof(struct cxgbi_ppm) +
+ ppmax * (sizeof(struct cxgbi_ppod_data)) +
+ ppod_bmap_size * sizeof(unsigned long);
+
+ ppm = vmalloc(alloc_sz);
+ if (!ppm)
+ goto release_ppm_pool;
+
+ memset(ppm, 0, alloc_sz);
+
+ ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]);
+
+ if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) {
+ unsigned int start = ppmax - ppmax_pool;
+ unsigned int end = ppod_bmap_size >> 3;
+
+ bitmap_set(ppm->ppod_bmap, ppmax, end - start);
+ pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n",
+ __func__, ppmax, ppmax_pool, ppod_bmap_size, start,
+ end);
+ }
+
+ spin_lock_init(&ppm->map_lock);
+ kref_init(&ppm->refcnt);
+
+ memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format));
+
+ ppm->ppm_pp = ppm_pp;
+ ppm->ndev = ndev;
+ ppm->pdev = pdev;
+ ppm->lldev = lldev;
+ ppm->ppmax = ppmax;
+ ppm->next = 0;
+ ppm->llimit = llimit;
+ ppm->base_idx = start > llimit ?
+ (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0;
+ ppm->bmap_index_max = ppmax - ppmax_pool;
+
+ ppm->pool = pool;
+ ppm->pool_rsvd = ppmax_pool;
+ ppm->pool_index_max = pool_index_max;
+
+ /* check one more time */
+ if (*ppm_pp) {
+ ppm_free(ppm);
+ ppm = (struct cxgbi_ppm *)(*ppm_pp);
+
+ pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n",
+ ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax);
+
+ kref_get(&ppm->refcnt);
+ return 1;
+ }
+ *ppm_pp = ppm;
+
+ ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE);
+
+ pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n",
+ ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE,
+ ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd,
+ ppm->pool_index_max);
+
+ return 0;
+
+release_ppm_pool:
+ free_percpu(pool);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(cxgbi_ppm_init);
+
+unsigned int cxgbi_tagmask_set(unsigned int ppmax)
+{
+ unsigned int bits = fls(ppmax);
+
+ if (bits > PPOD_IDX_MAX_SIZE)
+ bits = PPOD_IDX_MAX_SIZE;
+
+ pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n",
+ ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT));
+
+ return 1 << (bits + PPOD_IDX_SHIFT);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
new file mode 100644
index 000000000000..d48732673b75
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
@@ -0,0 +1,310 @@
+/*
+ * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Karen Xie (kxie@chelsio.com)
+ */
+
+#ifndef __CXGB4PPM_H__
+#define __CXGB4PPM_H__
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/bitmap.h>
+
+struct cxgbi_pagepod_hdr {
+ u32 vld_tid;
+ u32 pgsz_tag_clr;
+ u32 max_offset;
+ u32 page_offset;
+ u64 rsvd;
+};
+
+#define PPOD_PAGES_MAX 4
+struct cxgbi_pagepod {
+ struct cxgbi_pagepod_hdr hdr;
+ u64 addr[PPOD_PAGES_MAX + 1];
+};
+
+/* ddp tag format
+ * for a 32-bit tag:
+ * bit #
+ * 31 ..... ..... 0
+ * X Y...Y Z...Z, where
+ * ^ ^^^^^ ^^^^
+ * | | |____ when ddp bit = 0: color bits
+ * | |
+ * | |____ when ddp bit = 0: idx into the ddp memory region
+ * |
+ * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag
+ *
+ * [page selector:2] [sw/free bits] [0] [idx] [color:6]
+ */
+
+#define DDP_PGIDX_MAX 4
+#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */
+
+struct cxgbi_task_tag_info {
+ unsigned char flags;
+#define CXGBI_PPOD_INFO_FLAG_VALID 0x1
+#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2
+ unsigned char cid;
+ unsigned short pg_shift;
+ unsigned int npods;
+ unsigned int idx;
+ unsigned int tag;
+ struct cxgbi_pagepod_hdr hdr;
+ int nents;
+ int nr_pages;
+ struct scatterlist *sgl;
+};
+
+struct cxgbi_tag_format {
+ unsigned char pgsz_order[DDP_PGIDX_MAX];
+ unsigned char pgsz_idx_dflt;
+ unsigned char free_bits:4;
+ unsigned char color_bits:4;
+ unsigned char idx_bits;
+ unsigned char rsvd_bits;
+ unsigned int no_ddp_mask;
+ unsigned int idx_mask;
+ unsigned int color_mask;
+ unsigned int idx_clr_mask;
+ unsigned int rsvd_mask;
+};
+
+struct cxgbi_ppod_data {
+ unsigned char pg_idx:2;
+ unsigned char color:6;
+ unsigned char chan_id;
+ unsigned short npods;
+ unsigned long caller_data;
+};
+
+/* per cpu ppm pool */
+struct cxgbi_ppm_pool {
+ unsigned int base; /* base index */
+ unsigned int next; /* next possible free index */
+ spinlock_t lock; /* ppm pool lock */
+ unsigned long bmap[0];
+} ____cacheline_aligned_in_smp;
+
+struct cxgbi_ppm {
+ struct kref refcnt;
+ struct net_device *ndev; /* net_device, 1st port */
+ struct pci_dev *pdev;
+ void *lldev;
+ void **ppm_pp;
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ unsigned int llimit;
+ unsigned int base_idx;
+
+ unsigned int pool_rsvd;
+ unsigned int pool_index_max;
+ struct cxgbi_ppm_pool __percpu *pool;
+ /* map lock */
+ spinlock_t map_lock; /* ppm map lock */
+ unsigned int bmap_index_max;
+ unsigned int next;
+ unsigned long *ppod_bmap;
+ struct cxgbi_ppod_data ppod_data[0];
+};
+
+#define DDP_THRESHOLD 512
+
+#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
+
+#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */
+#define PPOD_SIZE_SHIFT 6
+
+/* page pods are allocated in groups of this size (must be power of 2) */
+#define PPOD_CLUSTER_SIZE 16U
+
+#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */
+#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */
+
+#define PPOD_COLOR_SHIFT 0
+#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT)
+
+#define PPOD_IDX_SHIFT 6
+#define PPOD_IDX_MAX_SIZE 24
+
+#define PPOD_TID_SHIFT 0
+#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT)
+
+#define PPOD_TAG_SHIFT 6
+#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT)
+
+#define PPOD_VALID_SHIFT 24
+#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT)
+#define PPOD_VALID_FLAG PPOD_VALID(1U)
+
+#define PPOD_PI_EXTRACT_CTL_SHIFT 31
+#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT)
+#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U)
+
+#define PPOD_PI_TYPE_SHIFT 29
+#define PPOD_PI_TYPE_MASK 0x3
+#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT)
+
+#define PPOD_PI_CHECK_CTL_SHIFT 27
+#define PPOD_PI_CHECK_CTL_MASK 0x3
+#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT)
+
+#define PPOD_PI_REPORT_CTL_SHIFT 25
+#define PPOD_PI_REPORT_CTL_MASK 0x3
+#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT)
+
+static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag)
+{
+ return !(tag & ppm->tformat.no_ddp_mask);
+}
+
+static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ /* the sw tag must be using <= 31 bits */
+ return !(tag & 0x80000000U);
+}
+
+static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 sw_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+
+ if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) {
+ pr_info("sw_tag 0x%x NOT usable.\n", sw_tag);
+ return -EINVAL;
+ }
+
+ if (!sw_tag) {
+ *final_tag = tformat->no_ddp_mask;
+ } else {
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = sw_tag & tformat->idx_clr_mask;
+ u32 upper = (sw_tag >> shift) << (shift + 1);
+
+ *final_tag = upper | tformat->no_ddp_mask | lower;
+ }
+ return 0;
+}
+
+static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm,
+ u32 tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ unsigned int shift = tformat->idx_bits + tformat->color_bits;
+ u32 lower = tag & tformat->idx_clr_mask;
+ u32 upper = (tag >> tformat->rsvd_bits) << shift;
+
+ return upper | lower;
+}
+
+static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) &
+ ppm->tformat.idx_mask;
+
+ return hw_idx - ppm->base_idx;
+}
+
+static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx,
+ unsigned char color)
+{
+ return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color);
+}
+
+static inline unsigned long
+cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm,
+ u32 ddp_tag)
+{
+ u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag);
+
+ return ppm->ppod_data[idx].caller_data;
+}
+
+/* sw bits are the free bits */
+static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm,
+ u32 val, u32 orig_tag,
+ u32 *final_tag)
+{
+ struct cxgbi_tag_format *tformat = &ppm->tformat;
+ u32 v = val >> tformat->free_bits;
+
+ if (v) {
+ pr_info("sw_bits 0x%x too large, avail bits %u.\n",
+ val, tformat->free_bits);
+ return -EINVAL;
+ }
+ if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag))
+ return -EINVAL;
+
+ *final_tag = (val << tformat->rsvd_bits) |
+ (orig_tag & ppm->tformat.rsvd_mask);
+ return 0;
+}
+
+static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod)
+{
+ ppod->hdr.vld_tid = 0U;
+}
+
+static inline void cxgbi_tagmask_check(unsigned int tagmask,
+ struct cxgbi_tag_format *tformat)
+{
+ unsigned int bits = fls(tagmask);
+
+ /* reserve top most 2 bits for page selector */
+ tformat->free_bits = 32 - 2 - bits;
+ tformat->rsvd_bits = bits;
+ tformat->color_bits = PPOD_IDX_SHIFT;
+ tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT;
+ tformat->no_ddp_mask = 1 << (bits - 1);
+ tformat->idx_mask = (1 << tformat->idx_bits) - 1;
+ tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1;
+ tformat->idx_clr_mask = (1 << (bits - 1)) - 1;
+ tformat->rsvd_mask = (1 << bits) - 1;
+
+ pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, "
+ "pg %u,%u,%u,%u.\n",
+ tagmask, tformat->rsvd_bits, tformat->idx_bits,
+ tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask,
+ tformat->pgsz_order[0], tformat->pgsz_order[1],
+ tformat->pgsz_order[2], tformat->pgsz_order[3]);
+}
+
+int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz);
+void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag,
+ unsigned int tid, unsigned int offset,
+ unsigned int length,
+ struct cxgbi_pagepod_hdr *hdr);
+void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx);
+int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages,
+ u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag,
+ unsigned long caller_data);
+int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *,
+ void *lldev, struct cxgbi_tag_format *,
+ unsigned int ppmax, unsigned int llimit,
+ unsigned int start,
+ unsigned int reserve_factor);
+int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
+void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
+unsigned int cxgbi_tagmask_set(unsigned int ppmax);
+
+#endif /*__CXGB4PPM_H__*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index cf711d5f15be..f3c58aaa932d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -191,6 +191,7 @@ static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
enum cxgb4_uld {
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
+ CXGB4_ULD_ISCSIT,
CXGB4_ULD_MAX
};
@@ -212,6 +213,7 @@ struct l2t_data;
struct net_device;
struct pkt_gl;
struct tp_tcp_stats;
+struct t4_lro_mgr;
struct cxgb4_range {
unsigned int start;
@@ -273,6 +275,10 @@ struct cxgb4_lld_info {
unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
unsigned int max_ird_adapter; /* Max IRD memory per adapter */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ unsigned int iscsi_tagmask; /* iscsi ddp tag mask */
+ unsigned int iscsi_pgsz_order; /* iscsi ddp page size orders */
+ unsigned int iscsi_llimit; /* chip's iscsi region llimit */
+ void **iscsi_ppm; /* iscsi page pod manager */
int nodeid; /* device numa node id */
};
@@ -283,6 +289,11 @@ struct cxgb4_uld_info {
const struct pkt_gl *gl);
int (*state_change)(void *handle, enum cxgb4_state new_state);
int (*control)(void *handle, enum cxgb4_control control, ...);
+ int (*lro_rx_handler)(void *handle, const __be64 *rsp,
+ const struct pkt_gl *gl,
+ struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi);
+ void (*lro_flush)(struct t4_lro_mgr *);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 5b0f3ef348e9..60a26037a1c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -48,8 +48,6 @@
#include "t4_regs.h"
#include "t4_values.h"
-#define VLAN_NONE 0xfff
-
/* identifies sync vs async L2T_WRITE_REQs */
#define SYNC_WR_S 12
#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 4e2d47ac102b..79665bd8f881 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,8 @@
#include <linux/if_ether.h>
#include <linux/atomic.h>
+#define VLAN_NONE 0xfff
+
enum { L2T_SIZE = 4096 }; /* # of L2T entries */
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b4eb4680a27c..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2157,8 +2157,11 @@ static int process_responses(struct sge_rspq *q, int budget)
while (likely(budget_left)) {
rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
- if (!is_new_response(rc, q))
+ if (!is_new_response(rc, q)) {
+ if (q->flush_handler)
+ q->flush_handler(q);
break;
+ }
dma_rmb();
rsp_type = RSPD_TYPE_G(rc->type_gen);
@@ -2226,7 +2229,7 @@ static int process_responses(struct sge_rspq *q, int budget)
budget_left--;
}
- if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
__refill_fl(q->adap, &rxq->fl);
return budget - budget_left;
}
@@ -2544,7 +2547,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
*/
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
- struct sge_fl *fl, rspq_handler_t hnd, int cong)
+ struct sge_fl *fl, rspq_handler_t hnd,
+ rspq_flush_handler_t flush_hnd, int cong)
{
int ret, flsz = 0;
struct fw_iq_cmd c;
@@ -2611,8 +2615,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
FW_IQ_CMD_FL0CONGCIF_F |
FW_IQ_CMD_FL0CONGEN_F);
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module. Hence the maximum
+ * allowed burst size is 448 bytes. For T4/T5, the hardware
+ * doesn't coalesce fetch requests if more than 64 bytes of
+ * Free List pointers are provided, so we use a 128-byte Fetch
+ * Burst Minimum there (T6 implements coalescing so we can use
+ * the smaller 64-byte value there).
+ */
c.fl0dcaen_to_fl0cidxfthresh =
- htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+ htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ FETCHBURSTMIN_128B_X :
+ FETCHBURSTMIN_64B_X) |
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
@@ -2638,6 +2652,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size--; /* subtract status entry */
iq->netdev = dev;
iq->handler = hnd;
+ iq->flush_handler = flush_hnd;
+
+ memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
+ skb_queue_head_init(&iq->lro_mgr.lroq);
/* set offset to -1 to distinguish ingress queues without FL */
iq->offset = fl ? 0 : -1;
@@ -2963,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
void t4_free_sge_resources(struct adapter *adap)
{
int i;
- struct sge_eth_rxq *eq = adap->sge.ethrxq;
- struct sge_eth_txq *etq = adap->sge.ethtxq;
+ struct sge_eth_rxq *eq;
+ struct sge_eth_txq *etq;
+
+ /* stop all Rx queues in order to start them draining */
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
+ if (eq->rspq.desc)
+ t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+ FW_IQ_TYPE_FL_INT_CAP,
+ eq->rspq.cntxt_id,
+ eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+ 0xffff);
+ }
/* clean up Ethernet Tx/Rx queues */
- for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+ for (i = 0; i < adap->sge.ethqsets; i++) {
+ eq = &adap->sge.ethrxq[i];
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+
+ etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
@@ -2982,6 +3014,7 @@ void t4_free_sge_resources(struct adapter *adap)
/* clean up RDMA and iSCSI Rx queues */
t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
+ t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 636b4691f252..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
+ /* We have two VPD data structures stored in the adapter VPD area.
+ * By default, Linux calculates the size of the VPD area by traversing
+ * the first VPD area at offset 0x0, so we need to tell the OS what
+ * our real VPD size is.
+ */
+ ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+ if (ret < 0)
+ goto out;
+
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
@@ -4433,23 +4443,6 @@ void t4_intr_disable(struct adapter *adapter)
}
/**
- * hash_mac_addr - return the hash value of a MAC address
- * @addr: the 48-bit Ethernet MAC address
- *
- * Hashes a MAC address according to the hash function used by HW inexact
- * (hash) address matching.
- */
-static int hash_mac_addr(const u8 *addr)
-{
- u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
- u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
- a ^= b;
- a ^= (a >> 12);
- a ^= (a >> 6);
- return a & 0x3f;
-}
-
-/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
@@ -6738,6 +6731,81 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
}
/**
+ * t4_free_mac_filt - frees exact-match filters of given MAC addresses
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Frees the exact-match filter for each of the supplied addresses
+ *
+ * Returns a negative error number or the number of filters freed.
+ */
+int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, unsigned int naddr,
+ const u8 **addr, bool sleep_ok)
+{
+ int offset, ret = 0;
+ struct fw_vi_mac_cmd c;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = is_t4(adap->params.chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int rem = naddr;
+
+ if (naddr > max_naddr)
+ return -EINVAL;
+
+ for (offset = 0; offset < (int)naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
+ ? rem
+ : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
+
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret)
+ break;
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (index < max_naddr)
+ nfilters++;
+ }
+
+ offset += fw_naddr;
+ rem -= fw_naddr;
+ }
+
+ if (ret == 0)
+ ret = nfilters;
+ return ret;
+}
+
+/**
* t4_change_mac - modifies the exact-match filter for a MAC address
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -6882,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_iq_stop - stop an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Stops an ingress queue and its associated FLs, if any. This causes
+ * any current or future data/messages destined for these queues to be
+ * tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+ FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+ FW_IQ_CMD_VFN_V(vf));
+ c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index a072d341e205..80417fc564d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -51,6 +51,7 @@ enum {
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
CPL_TID_RELEASE = 0x1A,
+ CPL_TX_DATA_ISO = 0x1F,
CPL_CLOSE_LISTSRV_RPL = 0x20,
CPL_L2T_WRITE_RPL = 0x23,
@@ -344,6 +345,87 @@ struct cpl_pass_open_rpl {
u8 status;
};
+struct tcp_options {
+ __be16 mss;
+ __u8 wsf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8:4;
+ __u8 unknown:1;
+ __u8:1;
+ __u8 sack:1;
+ __u8 tstamp:1;
+#else
+ __u8 tstamp:1;
+ __u8 sack:1;
+ __u8:1;
+ __u8 unknown:1;
+ __u8:4;
+#endif
+};
+
+struct cpl_pass_accept_req {
+ union opcode_tid ot;
+ __be16 rsvd;
+ __be16 len;
+ __be32 hdr_len;
+ __be16 vlan;
+ __be16 l2info;
+ __be32 tos_stid;
+ struct tcp_options tcpopt;
+};
+
+/* cpl_pass_accept_req.hdr_len fields */
+#define SYN_RX_CHAN_S 0
+#define SYN_RX_CHAN_M 0xF
+#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
+#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
+
+#define TCP_HDR_LEN_S 10
+#define TCP_HDR_LEN_M 0x3F
+#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
+#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
+
+#define IP_HDR_LEN_S 16
+#define IP_HDR_LEN_M 0x3FF
+#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
+#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
+
+#define ETH_HDR_LEN_S 26
+#define ETH_HDR_LEN_M 0x1F
+#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
+#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
+
+/* cpl_pass_accept_req.l2info fields */
+#define SYN_MAC_IDX_S 0
+#define SYN_MAC_IDX_M 0x1FF
+#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
+#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
+
+#define SYN_XACT_MATCH_S 9
+#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
+#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
+
+#define SYN_INTF_S 12
+#define SYN_INTF_M 0xF
+#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
+#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
+
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+#define CONG_CNTRL_S 14
+#define CONG_CNTRL_M 0x3
+#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
+#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
+
+#define T5_ISS_S 18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F T5_ISS_V(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -818,6 +900,110 @@ struct cpl_iscsi_hdr {
#define ISCSI_DDP_V(x) ((x) << ISCSI_DDP_S)
#define ISCSI_DDP_F ISCSI_DDP_V(1U)
+struct cpl_rx_data_ddp {
+ union opcode_tid ot;
+ __be16 urg;
+ __be16 len;
+ __be32 seq;
+ union {
+ __be32 nxt_seq;
+ __be32 ddp_report;
+ };
+ __be32 ulp_crc;
+ __be32 ddpvld;
+};
+
+#define cpl_rx_iscsi_ddp cpl_rx_data_ddp
+
+struct cpl_iscsi_data {
+ union opcode_tid ot;
+ __u8 rsvd0[2];
+ __be16 len;
+ __be32 seq;
+ __be16 urg;
+ __u8 rsvd1;
+ __u8 status;
+};
+
+struct cpl_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 reserved1;
+ __u8 ahs_len;
+ __be16 mpdu;
+ __be32 burst_size;
+ __be32 len;
+ __be32 reserved2_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+
+ /* encapsulated CPL_TX_DATA follows here */
+};
+
+/* cpl_tx_data_iso.op_to_scsi fields */
+#define CPL_TX_DATA_ISO_OP_S 24
+#define CPL_TX_DATA_ISO_OP_M 0xff
+#define CPL_TX_DATA_ISO_OP_V(x) ((x) << CPL_TX_DATA_ISO_OP_S)
+#define CPL_TX_DATA_ISO_OP_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_OP_S) & CPL_TX_DATA_ISO_OP_M)
+
+#define CPL_TX_DATA_ISO_FIRST_S 23
+#define CPL_TX_DATA_ISO_FIRST_M 0x1
+#define CPL_TX_DATA_ISO_FIRST_V(x) ((x) << CPL_TX_DATA_ISO_FIRST_S)
+#define CPL_TX_DATA_ISO_FIRST_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_FIRST_S) & CPL_TX_DATA_ISO_FIRST_M)
+#define CPL_TX_DATA_ISO_FIRST_F CPL_TX_DATA_ISO_FIRST_V(1U)
+
+#define CPL_TX_DATA_ISO_LAST_S 22
+#define CPL_TX_DATA_ISO_LAST_M 0x1
+#define CPL_TX_DATA_ISO_LAST_V(x) ((x) << CPL_TX_DATA_ISO_LAST_S)
+#define CPL_TX_DATA_ISO_LAST_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_LAST_S) & CPL_TX_DATA_ISO_LAST_M)
+#define CPL_TX_DATA_ISO_LAST_F CPL_TX_DATA_ISO_LAST_V(1U)
+
+#define CPL_TX_DATA_ISO_CPLHDRLEN_S 21
+#define CPL_TX_DATA_ISO_CPLHDRLEN_M 0x1
+#define CPL_TX_DATA_ISO_CPLHDRLEN_V(x) ((x) << CPL_TX_DATA_ISO_CPLHDRLEN_S)
+#define CPL_TX_DATA_ISO_CPLHDRLEN_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_CPLHDRLEN_S) & CPL_TX_DATA_ISO_CPLHDRLEN_M)
+#define CPL_TX_DATA_ISO_CPLHDRLEN_F CPL_TX_DATA_ISO_CPLHDRLEN_V(1U)
+
+#define CPL_TX_DATA_ISO_HDRCRC_S 20
+#define CPL_TX_DATA_ISO_HDRCRC_M 0x1
+#define CPL_TX_DATA_ISO_HDRCRC_V(x) ((x) << CPL_TX_DATA_ISO_HDRCRC_S)
+#define CPL_TX_DATA_ISO_HDRCRC_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_HDRCRC_S) & CPL_TX_DATA_ISO_HDRCRC_M)
+#define CPL_TX_DATA_ISO_HDRCRC_F CPL_TX_DATA_ISO_HDRCRC_V(1U)
+
+#define CPL_TX_DATA_ISO_PLDCRC_S 19
+#define CPL_TX_DATA_ISO_PLDCRC_M 0x1
+#define CPL_TX_DATA_ISO_PLDCRC_V(x) ((x) << CPL_TX_DATA_ISO_PLDCRC_S)
+#define CPL_TX_DATA_ISO_PLDCRC_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_PLDCRC_S) & CPL_TX_DATA_ISO_PLDCRC_M)
+#define CPL_TX_DATA_ISO_PLDCRC_F CPL_TX_DATA_ISO_PLDCRC_V(1U)
+
+#define CPL_TX_DATA_ISO_IMMEDIATE_S 18
+#define CPL_TX_DATA_ISO_IMMEDIATE_M 0x1
+#define CPL_TX_DATA_ISO_IMMEDIATE_V(x) ((x) << CPL_TX_DATA_ISO_IMMEDIATE_S)
+#define CPL_TX_DATA_ISO_IMMEDIATE_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_IMMEDIATE_S) & CPL_TX_DATA_ISO_IMMEDIATE_M)
+#define CPL_TX_DATA_ISO_IMMEDIATE_F CPL_TX_DATA_ISO_IMMEDIATE_V(1U)
+
+#define CPL_TX_DATA_ISO_SCSI_S 16
+#define CPL_TX_DATA_ISO_SCSI_M 0x3
+#define CPL_TX_DATA_ISO_SCSI_V(x) ((x) << CPL_TX_DATA_ISO_SCSI_S)
+#define CPL_TX_DATA_ISO_SCSI_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_SCSI_S) & CPL_TX_DATA_ISO_SCSI_M)
+
+/* cpl_tx_data_iso.reserved2_seglen_offset fields */
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_S 0
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_M 0xffffff
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_V(x) \
+ ((x) << CPL_TX_DATA_ISO_SEGLEN_OFFSET_S)
+#define CPL_TX_DATA_ISO_SEGLEN_OFFSET_G(x) \
+ (((x) >> CPL_TX_DATA_ISO_SEGLEN_OFFSET_S) & \
+ CPL_TX_DATA_ISO_SEGLEN_OFFSET_M)
+
struct cpl_rx_data {
union opcode_tid ot;
__be16 rsvd;
@@ -854,6 +1040,15 @@ struct cpl_rx_data_ack {
#define RX_FORCE_ACK_V(x) ((x) << RX_FORCE_ACK_S)
#define RX_FORCE_ACK_F RX_FORCE_ACK_V(1U)
+#define RX_DACK_MODE_S 29
+#define RX_DACK_MODE_M 0x3
+#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
+#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
+
+#define RX_DACK_CHANGE_S 31
+#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
+#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
+
struct cpl_rx_pkt {
struct rss_header rsshdr;
u8 opcode;
@@ -1021,6 +1216,8 @@ struct cpl_l2t_write_req {
#define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
#define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U)
+#define CPL_L2T_VLAN_NONE 0xfff
+
struct cpl_l2t_write_rpl {
union opcode_tid ot;
u8 status;
@@ -1088,6 +1285,12 @@ struct cpl_fw4_ack {
__be64 rsvd1;
};
+enum {
+ CPL_FW4_ACK_FLAGS_SEQVAL = 0x1, /* seqn valid */
+ CPL_FW4_ACK_FLAGS_CH = 0x2, /* channel change complete */
+ CPL_FW4_ACK_FLAGS_FLOWC = 0x4, /* fw_flowc_wr complete */
+};
+
struct cpl_fw6_msg {
u8 opcode;
u8 type;
@@ -1113,6 +1316,17 @@ struct cpl_fw6_msg_ofld_connection_wr_rpl {
__u8 rsvd[2];
};
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+/* cpl_tx_data.flags field */
+#define TX_FORCE_S 13
+#define TX_FORCE_V(x) ((x) << TX_FORCE_S)
+
enum {
ULP_TX_MEM_READ = 2,
ULP_TX_MEM_WRITE = 3,
@@ -1141,6 +1355,11 @@ struct ulptx_sgl {
struct ulptx_sge_pair sge[0];
};
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 06bc2d2e7a73..a2cdfc1261dc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index a5231fa771db..36cf3073ca37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -65,6 +65,7 @@
#define TIMERREG_COUNTER0_X 0
#define FETCHBURSTMIN_64B_X 2
+#define FETCHBURSTMIN_128B_X 3
#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a32de30ea663..7ad6d4e75b2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -101,6 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
+ FW_ISCSI_TX_DATA_WR = 0x45,
FW_LASTC2E_WR = 0x70
};
@@ -561,6 +562,12 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SNDBUF,
FW_FLOWC_MNEM_MSS,
FW_FLOWC_MNEM_TXDATAPLEN_MAX,
+ FW_FLOWC_MNEM_TCPSTATE,
+ FW_FLOWC_MNEM_EOSTATE,
+ FW_FLOWC_MNEM_SCHEDCLASS,
+ FW_FLOWC_MNEM_DCBPRIO,
+ FW_FLOWC_MNEM_SND_SCALE,
+ FW_FLOWC_MNEM_RCV_SCALE,
};
struct fw_flowc_mnemval {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 6049f70e110c..4a707c32d76f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -348,6 +348,11 @@ struct sge {
#define for_each_ethrxq(sge, iter) \
for (iter = 0; iter < (sge)->ethqsets; iter++)
+struct hash_mac_addr {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+};
+
/*
* Per-"adapter" (Virtual Function) information.
*/
@@ -381,6 +386,9 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+
+ /* list of MAC addresses in MPS Hash */
+ struct list_head mac_hlist;
};
enum { /* adapter flags */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0cfa5d72cafd..1cc8a7a69457 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -741,6 +741,9 @@ static int adapter_up(struct adapter *adapter)
*/
enable_rx(adapter);
t4vf_sge_start(adapter);
+
+ /* Initialize hash mac addr list*/
+ INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
@@ -787,10 +790,6 @@ static int cxgb4vf_open(struct net_device *dev)
/*
* Note that this interface is up and start everything up ...
*/
- netif_set_real_num_tx_queues(dev, pi->nqsets);
- err = netif_set_real_num_rx_queues(dev, pi->nqsets);
- if (err)
- goto err_unwind;
err = link_start(dev);
if (err)
goto err_unwind;
@@ -859,97 +858,74 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
return ns;
}
-/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
- * at a specified offset within the list, into an array of addrss pointers and
- * return the number collected.
- */
-static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int offset,
- unsigned int maxaddrs)
+static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
{
- unsigned int index = 0;
- unsigned int naddr = 0;
- const struct netdev_hw_addr *ha;
-
- for_each_dev_addr(dev, ha)
- if (index++ >= offset) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
- return naddr;
-}
-
-/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
- * at a specified offset within the list, into an array of addrss pointers and
- * return the number collected.
- */
-static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int offset,
- unsigned int maxaddrs)
-{
- unsigned int index = 0;
- unsigned int naddr = 0;
- const struct netdev_hw_addr *ha;
+ struct adapter *adapter = pi->adapter;
+ u64 vec = 0;
+ bool ucast = false;
+ struct hash_mac_addr *entry;
- netdev_for_each_mc_addr(ha, dev)
- if (index++ >= offset) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
- return naddr;
+ /* Calculate the hash vector for the updated list and program it */
+ list_for_each_entry(entry, &adapter->mac_hlist, list) {
+ ucast |= is_unicast_ether_addr(entry->addr);
+ vec |= (1ULL << hash_mac_addr(entry->addr));
+ }
+ return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
}
-/*
- * Configure the exact and hash address filters to handle a port's multicast
- * and secondary unicast MAC addresses.
- */
-static int set_addr_filters(const struct net_device *dev, bool sleep)
+static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
u64 mhash = 0;
u64 uhash = 0;
- bool free = true;
- unsigned int offset, naddr;
- const u8 *addr[7];
- int ret;
- const struct port_info *pi = netdev_priv(dev);
-
- /* first do the secondary unicast addresses */
- for (offset = 0; ; offset += naddr) {
- naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
- ARRAY_SIZE(addr));
- if (naddr == 0)
- break;
-
- ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, NULL, &uhash, sleep);
- if (ret < 0)
- return ret;
+ bool free = false;
+ bool ucast = is_unicast_ether_addr(mac_addr);
+ const u8 *maclist[1] = {mac_addr};
+ struct hash_mac_addr *new_entry;
- free = false;
+ ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
+ NULL, ucast ? &uhash : &mhash, false);
+ if (ret < 0)
+ goto out;
+ /* if hash != 0, then add the addr to hash addr list
+ * so on the end we will calculate the hash for the
+ * list and program it
+ */
+ if (uhash || mhash) {
+ new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
+ if (!new_entry)
+ return -ENOMEM;
+ ether_addr_copy(new_entry->addr, mac_addr);
+ list_add_tail(&new_entry->list, &adapter->mac_hlist);
+ ret = cxgb4vf_set_addr_hash(pi);
}
+out:
+ return ret < 0 ? ret : 0;
+}
- /* next set up the multicast addresses */
- for (offset = 0; ; offset += naddr) {
- naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
- ARRAY_SIZE(addr));
- if (naddr == 0)
- break;
+static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+ const u8 *maclist[1] = {mac_addr};
+ struct hash_mac_addr *entry, *tmp;
- ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, NULL, &mhash, sleep);
- if (ret < 0)
- return ret;
- free = false;
+ /* If the MAC address to be removed is in the hash addr
+ * list, delete it from the list and update hash vector
+ */
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
+ if (ether_addr_equal(entry->addr, mac_addr)) {
+ list_del(&entry->list);
+ kfree(entry);
+ return cxgb4vf_set_addr_hash(pi);
+ }
}
- return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
- uhash | mhash, sleep);
+ ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
+ return ret < 0 ? -EINVAL : 0;
}
/*
@@ -958,16 +934,18 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
*/
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
- int ret;
struct port_info *pi = netdev_priv(dev);
- ret = set_addr_filters(dev, sleep_ok);
- if (ret == 0)
- ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
- (dev->flags & IFF_PROMISC) != 0,
- (dev->flags & IFF_ALLMULTI) != 0,
- 1, -1, sleep_ok);
- return ret;
+ if (!(dev->flags & IFF_PROMISC)) {
+ __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
+ if (!(dev->flags & IFF_ALLMULTI))
+ __dev_mc_sync(dev, cxgb4vf_mac_sync,
+ cxgb4vf_mac_unsync);
+ }
+ return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
+ (dev->flags & IFF_PROMISC) != 0,
+ (dev->flags & IFF_ALLMULTI) != 0,
+ 1, -1, sleep_ok);
}
/*
@@ -2194,6 +2172,73 @@ static void cleanup_debugfs(struct adapter *adapter)
/* nothing to do */
}
+/* Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int ethqsets, pmask_nports;
+
+ /* The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /* We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * crash the kernel or anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ /* We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue. And if we're using MSI Interrupts, we'll also need to
+ * reserve an Ingress Queue for a Forwarded Interrupts.
+ *
+ * The rest of the FL/Intr-capable ingress queues will be matched up
+ * one-for-one with Ethernet/Control egress queues in order to form
+ * "Queue Sets" which will be aportioned between the "ports". For
+ * each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ *
+ * Note that even if we're currently configured to use MSI-X
+ * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
+ * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
+ * happens we'll need to adjust things later.
+ */
+ ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets*2)
+ ethqsets = vfres->neq/2;
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
/*
* Perform early "adapter" initialization. This is where we discover what
* adapter parameters we're going to be using and initialize basic adapter
@@ -2201,24 +2246,12 @@ static void cleanup_debugfs(struct adapter *adapter)
*/
static int adap_init0(struct adapter *adapter)
{
- struct vf_resources *vfres = &adapter->params.vfres;
struct sge_params *sge_params = &adapter->params.sge;
struct sge *s = &adapter->sge;
- unsigned int ethqsets;
int err;
u32 param, val = 0;
/*
- * Wait for the device to become ready before proceeding ...
- */
- err = t4vf_wait_dev_ready(adapter);
- if (err) {
- dev_err(adapter->pdev_dev, "device didn't become ready:"
- " err=%d\n", err);
- return err;
- }
-
- /*
* Some environments do not properly handle PCIE FLRs -- e.g. in Linux
* 2.6.31 and later we can't call pci_reset_function() in order to
* issue an FLR because of a self- deadlock on the device semaphore.
@@ -2323,69 +2356,23 @@ static int adap_init0(struct adapter *adapter)
return err;
}
- /*
- * The number of "ports" which we support is equal to the number of
- * Virtual Interfaces with which we've been provisioned.
- */
- adapter->params.nports = vfres->nvi;
- if (adapter->params.nports > MAX_NPORTS) {
- dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
- " virtual interfaces\n", MAX_NPORTS,
- adapter->params.nports);
- adapter->params.nports = MAX_NPORTS;
- }
-
- /*
- * We need to reserve a number of the ingress queues with Free List
- * and Interrupt capabilities for special interrupt purposes (like
- * asynchronous firmware messages, or forwarded interrupts if we're
- * using MSI). The rest of the FL/Intr-capable ingress queues will be
- * matched up one-for-one with Ethernet/Control egress queues in order
- * to form "Queue Sets" which will be aportioned between the "ports".
- * For each Queue Set, we'll need the ability to allocate two Egress
- * Contexts -- one for the Ingress Queue Free List and one for the TX
- * Ethernet Queue.
- */
- ethqsets = vfres->niqflint - INGQ_EXTRAS;
- if (vfres->nethctrl != ethqsets) {
- dev_warn(adapter->pdev_dev, "unequal number of [available]"
- " ingress/egress queues (%d/%d); using minimum for"
- " number of Queue Sets\n", ethqsets, vfres->nethctrl);
- ethqsets = min(vfres->nethctrl, ethqsets);
- }
- if (vfres->neq < ethqsets*2) {
- dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
- " to support Queue Sets (%d); reducing allowed Queue"
- " Sets\n", vfres->neq, ethqsets);
- ethqsets = vfres->neq/2;
- }
- if (ethqsets > MAX_ETH_QSETS) {
- dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
- " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
- ethqsets = MAX_ETH_QSETS;
- }
- if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
- dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
- " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
- }
- adapter->sge.max_ethqsets = ethqsets;
-
- /*
- * Check for various parameter sanity issues. Most checks simply
- * result in us using fewer resources than our provissioning but we
- * do need at least one "port" with which to work ...
- */
- if (adapter->sge.max_ethqsets < adapter->params.nports) {
- dev_warn(adapter->pdev_dev, "only using %d of %d available"
- " virtual interfaces (too few Queue Sets)\n",
- adapter->sge.max_ethqsets, adapter->params.nports);
- adapter->params.nports = adapter->sge.max_ethqsets;
+ /* Check for various parameter sanity issues */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
}
- if (adapter->params.nports == 0) {
+ if (adapter->params.vfres.nvi == 0) {
dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
"usable!\n");
return -EINVAL;
}
+
+ /* Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+
return 0;
}
@@ -2799,6 +2786,40 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
}
+ /* See what interrupts we'll be using. If we've been configured to
+ * use MSI-X interrupts, try to enable them but fall back to using
+ * MSI interrupts if we can't enable MSI-X interrupts. If we can't
+ * get MSI interrupts we bail with the error.
+ */
+ if (msi == MSI_MSIX && enable_msix(adapter) == 0)
+ adapter->flags |= USING_MSIX;
+ else {
+ if (msi == MSI_MSIX) {
+ dev_info(adapter->pdev_dev,
+ "Unable to use MSI-X Interrupts; falling "
+ "back to MSI Interrupts\n");
+
+ /* We're going to need a Forwarded Interrupt Queue so
+ * that may cut into how many Queue Sets we can
+ * support.
+ */
+ msi = MSI_MSI;
+ size_nports_qsets(adapter);
+ }
+ err = pci_enable_msi(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
+ " err=%d\n", err);
+ goto err_free_dev;
+ }
+ adapter->flags |= USING_MSI;
+ }
+
+ /* Now that we know how many "ports" we have and what interrupt
+ * mechanism we're going to use, we can configure our queue resources.
+ */
+ cfg_queues(adapter);
+
/*
* The "card" is now ready to go. If any errors occur during device
* registration we do not fail the whole "card" but rather proceed
@@ -2806,10 +2827,14 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* must register at least one net device.
*/
for_each_port(adapter, pidx) {
+ struct port_info *pi = netdev_priv(adapter->port[pidx]);
netdev = adapter->port[pidx];
if (netdev == NULL)
continue;
+ netif_set_real_num_tx_queues(netdev, pi->nqsets);
+ netif_set_real_num_rx_queues(netdev, pi->nqsets);
+
err = register_netdev(netdev);
if (err) {
dev_warn(&pdev->dev, "cannot register net device %s,"
@@ -2821,7 +2846,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
if (adapter->registered_device_map == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
- goto err_free_dev;
+ goto err_disable_interrupts;
}
/*
@@ -2839,32 +2864,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
/*
- * See what interrupts we'll be using. If we've been configured to
- * use MSI-X interrupts, try to enable them but fall back to using
- * MSI interrupts if we can't enable MSI-X interrupts. If we can't
- * get MSI interrupts we bail with the error.
- */
- if (msi == MSI_MSIX && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
- else {
- err = pci_enable_msi(pdev);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
- " err=%d\n",
- msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
- goto err_free_debugfs;
- }
- adapter->flags |= USING_MSI;
- }
-
- /*
- * Now that we know how many "ports" we have and what their types are,
- * and how many Queue Sets we can support, we can configure our queue
- * resources.
- */
- cfg_queues(adapter);
-
- /*
* Print a short notice on the existence and configuration of the new
* VF network device ...
*/
@@ -2884,11 +2883,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Error recovery and exit code. Unwind state that's been created
* so far and return the error.
*/
-
-err_free_debugfs:
- if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
- cleanup_debugfs(adapter);
- debugfs_remove_recursive(adapter->debugfs_root);
+err_disable_interrupts:
+ if (adapter->flags & USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+ adapter->flags &= ~USING_MSIX;
+ } else if (adapter->flags & USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+ adapter->flags &= ~USING_MSI;
}
err_free_dev:
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 6528231d8a59..1ccd282949a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1864,7 +1864,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
* for new buffer pointers, refill the Free List.
*/
if (rspq->offset >= 0 &&
- rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
+ fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
__refill_fl(rspq->adapter, &rxq->fl);
return budget - budget_left;
}
@@ -2300,9 +2300,20 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
FW_IQ_CMD_FL0PACKEN_F |
FW_IQ_CMD_FL0PADEN_F);
+
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module. Hence the maximum
+ * allowed burst size is 448 bytes. For T4/T5, the hardware
+ * doesn't coalesce fetch requests if more than 64 bytes of
+ * Free List pointers are provided, so we use a 128-byte Fetch
+ * Burst Minimum there (T6 implements coalescing so we can use
+ * the smaller 64-byte value there).
+ */
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
- FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
+ FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
+ FETCHBURSTMIN_128B_X :
+ FETCHBURSTMIN_64B_X) |
FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
FETCHBURSTMAX_512B_X :
FETCHBURSTMAX_256B_X));
@@ -2607,7 +2618,6 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
- unsigned int ingpadboundary, ingpackboundary, ingpad_shift;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2619,7 +2629,8 @@ int t4vf_sge_init(struct adapter *adapter)
fl0, fl1);
return -EINVAL;
}
- if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
+ if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
+ RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
return -EINVAL;
}
@@ -2632,41 +2643,7 @@ int t4vf_sge_init(struct adapter *adapter)
s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
? 128 : 64);
s->pktshift = PKTSHIFT_G(sge_params->sge_control);
-
- /* T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications. (Note that it makes no real practical sense to
- * have the Pading Boudary be larger than the Packing Boundary but you
- * could set the chip up that way and, in fact, legacy T4 code would
- * end doing this because it would initialize the Padding Boundary and
- * leave the Packing Boundary initialized to 0 (16 bytes).)
- * Padding Boundary values in T6 starts from 8B,
- * where as it is 32B for T4 and T5.
- */
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
- ingpad_shift = INGPADBOUNDARY_SHIFT_X;
- else
- ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
-
- ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
- ingpad_shift);
- if (is_t4(adapter->params.chip)) {
- s->fl_align = ingpadboundary;
- } else {
- /* T5 has a different interpretation of one of the PCIe Packing
- * Boundary values.
- */
- ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
- if (ingpackboundary == INGPACKBOUNDARY_16B_X)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- INGPACKBOUNDARY_SHIFT_X);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
+ s->fl_align = t4vf_fl_pkt_align(adapter);
/* A FL with <= fl_starve_thres buffers is starving and a periodic
* timer will attempt to refill it. This needs to be larger than the
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 88b8981b4751..9b40a85cc1e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -285,12 +285,31 @@ static inline int is_t4(enum chip_type chip)
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
+/**
+ * hash_mac_addr - return the hash value of a MAC address
+ * @addr: the 48-bit Ethernet MAC address
+ *
+ * Hashes a MAC address according to the hash function used by hardware
+ * inexact (hash) address matching.
+ */
+static inline int hash_mac_addr(const u8 *addr)
+{
+ u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+ u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+
+ a ^= b;
+ a ^= (a >> 12);
+ a ^= (a >> 6);
+ return a & 0x3f;
+}
+
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
+int t4vf_fl_pkt_align(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4vf_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
@@ -320,6 +339,8 @@ int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int,
bool);
int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int,
const u8 **, u16 *, u64 *, bool);
+int t4vf_free_mac_filt(struct adapter *, unsigned int, unsigned int naddr,
+ const u8 **, bool);
int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool);
int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool);
int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index b6fa74aafe47..fed83d88fc4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -236,23 +236,6 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
return -ETIMEDOUT;
}
-/**
- * hash_mac_addr - return the hash value of a MAC address
- * @addr: the 48-bit Ethernet MAC address
- *
- * Hashes a MAC address according to the hash function used by hardware
- * inexact (hash) address matching.
- */
-static int hash_mac_addr(const u8 *addr)
-{
- u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
- u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
- a ^= b;
- a ^= (a >> 12);
- a ^= (a >> 6);
- return a & 0x3f;
-}
-
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
@@ -435,6 +418,61 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two. And T6 changes the
+ * Ingress Padding Boundary Shift, so it's all a mess and it's best
+ * if we put this in low-level Common Code ...
+ *
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = adapter->params.sge.sge_control;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ * Padding Boundary values in T6 starts from 8B,
+ * where as it is 32B for T4 and T5.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = INGPADBOUNDARY_SHIFT_X;
+ else
+ ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
+
+ ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ sge_control2 = adapter->params.sge.sge_control2;
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
* t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
@@ -1266,6 +1304,77 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
}
/**
+ * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
+ * @adapter: the adapter
+ * @viid: the VI id
+ * @naddr: the number of MAC addresses to allocate filters for (up to 7)
+ * @addr: the MAC address(es)
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Frees the exact-match filter for each of the supplied addresses
+ *
+ * Returns a negative error number or the number of filters freed.
+ */
+int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
+ unsigned int naddr, const u8 **addr, bool sleep_ok)
+{
+ int offset, ret = 0;
+ struct fw_vi_mac_cmd cmd;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
+ unsigned int rem = naddr;
+
+ if (naddr > max_naddr)
+ return -EINVAL;
+
+ for (offset = 0; offset < (int)naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
+ rem : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
+
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
+ sleep_ok);
+ if (ret)
+ break;
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (index < max_naddr)
+ nfilters++;
+ }
+
+ offset += fw_naddr;
+ rem -= fw_naddr;
+ }
+
+ if (ret == 0)
+ ret = nfilters;
+ return ret;
+}
+
+/**
* t4vf_change_mac - modifies the exact-match filter for a MAC address
* @adapter: the adapter
* @viid: the Virtual Interface ID
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 7ba6d530b0c0..130f910e4785 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -201,16 +201,20 @@ static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
}
/* wrappers function for kernel log
- * Make sure variable vdev of struct vnic_dev is available in the block where
- * these macros are used
*/
-#define vdev_info(args...) dev_info(&vdev->pdev->dev, args)
-#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args)
-#define vdev_err(args...) dev_err(&vdev->pdev->dev, args)
-
-#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args)
-#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args)
-#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args)
+#define vdev_err(vdev, fmt, ...) \
+ dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
+#define vdev_warn(vdev, fmt, ...) \
+ dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
+#define vdev_info(vdev, fmt, ...) \
+ dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
+
+#define vdev_neterr(vdev, fmt, ...) \
+ netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
+#define vdev_netwarn(vdev, fmt, ...) \
+ netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
+#define vdev_netinfo(vdev, fmt, ...) \
+ netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
static inline struct device *enic_get_dev(struct enic *enic)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index abeda2a9ea27..9c682aff3834 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -43,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
- vdev_err("Failed to hook CQ[%d] resource\n", index);
+ vdev_err(vdev, "Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1fdf5fe12a95..8f27df3207bc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -53,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
- vdev_err("vNIC BAR0 res hdr length error\n");
+ vdev_err(vdev, "vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
mrh = bar->vaddr;
if (!rh) {
- vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
+ vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
@@ -69,7 +69,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
(ioread32(&rh->version) != VNIC_RES_VERSION)) {
if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
- vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+ vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
@@ -106,7 +106,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
- vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+ vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
type, bar_offset, len,
bar[bar_num].len);
return -EINVAL;
@@ -198,7 +198,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
&ring->base_addr_unaligned);
if (!ring->descs_unaligned) {
- vdev_err("Failed to allocate ring (size=%d), aborting\n",
+ vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
@@ -241,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -ENODEV;
}
if (status & STAT_BUSY) {
- vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
@@ -275,7 +275,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return -err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- vdev_neterr("Error %d devcmd %d\n",
+ vdev_neterr(vdev, "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
@@ -290,7 +290,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
- vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
+ vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
@@ -313,7 +313,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
if (new_posted == fetch_index) {
- vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+ vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
_CMD_N(cmd), fetch_index, posted);
return -EBUSY;
}
@@ -352,7 +352,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
err = result->error;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- vdev_neterr("Error %d devcmd %d\n",
+ vdev_neterr(vdev, "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
@@ -365,7 +365,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
udelay(100);
}
- vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+ vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
@@ -401,7 +401,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
- vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+ vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
return -ENODEV;
}
@@ -474,8 +474,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
- vdev_neterr("Error %d proxy devcmd %d\n", err,
- _CMD_N(cmd));
+ vdev_neterr(vdev, "Error %d proxy devcmd %d\n",
+ err, _CMD_N(cmd));
return err;
}
@@ -768,7 +768,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
- vdev_neterr("Can't set packet filter\n");
+ vdev_neterr(vdev, "Can't set packet filter\n");
return err;
}
@@ -785,7 +785,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
- vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
+ vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err);
return err;
}
@@ -802,7 +802,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
- vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
+ vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err);
return err;
}
@@ -846,7 +846,8 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
- vdev_neterr("notify block %p still allocated", vdev->notify);
+ vdev_neterr(vdev, "notify block %p still allocated\n",
+ vdev->notify);
return -EINVAL;
}
@@ -965,7 +966,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
- vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
+ vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
@@ -1103,16 +1104,16 @@ int vnic_devcmd_init(struct vnic_dev *vdev)
if (res) {
err = vnic_dev_init_devcmd2(vdev);
if (err)
- vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+ vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
err);
else
return 0;
} else {
- vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+ vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
}
err = vnic_dev_init_devcmd1(vdev);
if (err)
- vdev_err("DEVCMD1 initialization failed: %d", err);
+ vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err);
return err;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 942759d9cb3c..23604e3d4455 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -40,7 +40,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
- vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
+ vdev_err(vdev, "Failed to hook INTR[%d].ctrl resource\n",
+ index);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index cce2777dfc41..e572a527b18d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -92,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
- vdev_err("Failed to hook RQ[%d] resource\n", index);
+ vdev_err(vdev, "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
@@ -179,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
udelay(10);
}
- vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 05ad16a7e872..090cc65658a3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -95,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
- vdev_err("Failed to hook WQ[%d] resource\n", index);
+ vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
@@ -187,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
udelay(10);
}
- vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
+ vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index b553409e04ad..94d0eebef129 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -505,9 +505,7 @@ media_picked:
tp->timer.expires = RUN_AT(next_tick);
add_timer(&tp->timer);
#ifdef CONFIG_TULIP_NAPI
- init_timer(&tp->oom_timer);
- tp->oom_timer.data = (unsigned long)dev;
- tp->oom_timer.function = oom_timer;
+ setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev);
#endif
}
@@ -782,9 +780,8 @@ static void tulip_down (struct net_device *dev)
spin_unlock_irqrestore (&tp->lock, flags);
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long)dev;
- tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+ setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
+ (unsigned long)dev);
dev->if_port = tp->saved_if_port;
@@ -1475,9 +1472,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->csr0 = csr0;
spin_lock_init(&tp->lock);
spin_lock_init(&tp->mii_lock);
- init_timer(&tp->timer);
- tp->timer.data = (unsigned long)dev;
- tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+ setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer,
+ (unsigned long)dev);
INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f9751294ece7..fe3763df3f13 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -72,6 +72,9 @@
#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \
(ETH_HLEN + ETH_FCS_LEN))
+/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */
+#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN)
+
#define BE_NUM_VLANS_SUPPORTED 64
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -89,6 +92,10 @@
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
#define BE3_SRIOV_MAX_EVT_QS 8
+#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs
+ * and at least 1 is granted to either
+ * SURF/DPDK
+ */
#define MAX_RSS_IFACES 15
#define MAX_RX_QS 32
@@ -111,6 +118,8 @@
#define RSS_INDIR_TABLE_LEN 128
#define RSS_HASH_KEY_LEN 40
+#define BE_UNKNOWN_PHY_STATE 0xFF
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -118,27 +127,27 @@ struct be_dma_mem {
};
struct be_queue_info {
+ u32 len;
+ u32 entry_size; /* Size of an element in the queue */
+ u32 tail, head;
+ atomic_t used; /* Number of valid elements in the queue */
+ u32 id;
struct be_dma_mem dma_mem;
- u16 len;
- u16 entry_size; /* Size of an element in the queue */
- u16 id;
- u16 tail, head;
bool created;
- atomic_t used; /* Number of valid elements in the queue */
};
-static inline u32 MODULO(u16 val, u16 limit)
+static inline u32 MODULO(u32 val, u32 limit)
{
BUG_ON(limit & (limit - 1));
return val & (limit - 1);
}
-static inline void index_adv(u16 *index, u16 val, u16 limit)
+static inline void index_adv(u32 *index, u32 val, u32 limit)
{
*index = MODULO((*index + val), limit);
}
-static inline void index_inc(u16 *index, u16 limit)
+static inline void index_inc(u32 *index, u32 limit)
{
*index = MODULO((*index + 1), limit);
}
@@ -163,7 +172,7 @@ static inline void queue_head_inc(struct be_queue_info *q)
index_inc(&q->head, q->len);
}
-static inline void index_dec(u16 *index, u16 limit)
+static inline void index_dec(u32 *index, u32 limit)
{
*index = MODULO((*index - 1), limit);
}
@@ -386,13 +395,17 @@ enum vf_state {
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7)
#define BE_FLAGS_VXLAN_OFFLOADS BIT(8)
#define BE_FLAGS_SETUP_DONE BIT(9)
-#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10)
+#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_FLAGS_OS2BMC BIT(12)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
+#define MAX_ERR_RECOVERY_RETRY_COUNT 3
+#define ERR_DETECTION_DELAY 1000
+#define ERR_RECOVERY_RETRY_DELAY 30000
+
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
#define LANCER_DELETE_FW_DUMP 0x2
@@ -530,6 +543,7 @@ struct be_adapter {
u16 work_counter;
struct delayed_work be_err_detection_work;
+ u8 recovery_retries;
u8 err_flags;
bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
u32 flags;
@@ -595,6 +609,7 @@ struct be_adapter {
u32 bmc_filt_mask;
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
+ u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index b63d8ad2e115..22402db275f2 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,19 +19,25 @@
#include "be.h"
#include "be_cmds.h"
-static char *be_port_misconfig_evt_desc[] = {
- "A valid SFP module detected",
- "Optics faulted/ incorrectly installed/ not installed.",
- "Optics of two types installed.",
- "Incompatible optics.",
- "Unknown port SFP status"
+char *be_misconfig_evt_port_state[] = {
+ "Physical Link is functional",
+ "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
+ "Optics of two types installed – Remove one optic or install matching pair of optics.",
+ "Incompatible optics – Replace with compatible optics for card to function.",
+ "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
+ "Uncertified optics – Replace with Avago-certified optics to enable link operation."
};
-static char *be_port_misconfig_remedy_desc[] = {
- "",
- "Reseat optics. If issue not resolved, replace",
- "Remove one optic or install matching pair of optics",
- "Replace with compatible optics for card to function",
+static char *be_port_misconfig_evt_severity[] = {
+ "KERN_WARN",
+ "KERN_INFO",
+ "KERN_ERR",
+ "KERN_WARN"
+};
+
+static char *phy_state_oper_desc[] = {
+ "Link is non-operational",
+ "Link is operational",
""
};
@@ -65,7 +71,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_COMMON,
BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
- }
+ },
+ {
+ OPCODE_LOWLEVEL_HOST_DDR_DMA,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_LOWLEVEL_LOOPBACK_TEST,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
+ {
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ CMD_SUBSYSTEM_LOWLEVEL,
+ BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+ },
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -236,7 +257,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (base_status != MCC_STATUS_SUCCESS &&
!be_skip_err_log(opcode, base_status, addl_status)) {
- if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
dev_warn(&adapter->pdev->dev,
"VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
@@ -281,22 +303,56 @@ static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
{
struct be_async_event_misconfig_port *evt =
(struct be_async_event_misconfig_port *)compl;
- u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
+ u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
+ u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
+ u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
struct device *dev = &adapter->pdev->dev;
- u8 port_misconfig_evt;
+ u8 msg_severity = DEFAULT_MSG_SEVERITY;
+ u8 phy_state_info;
+ u8 new_phy_state;
+
+ new_phy_state =
+ (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
+
+ if (new_phy_state == adapter->phy_state)
+ return;
+
+ adapter->phy_state = new_phy_state;
+
+ /* for older fw that doesn't populate link effect data */
+ if (!sfp_misconfig_evt_word2)
+ goto log_message;
- port_misconfig_evt =
- ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
+ phy_state_info =
+ (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
+ if (phy_state_info & PHY_STATE_INFO_VALID) {
+ msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
+
+ if (be_phy_unqualified(new_phy_state))
+ phy_oper_state = (phy_state_info & PHY_STATE_OPER);
+ }
+
+log_message:
/* Log an error message that would allow a user to determine
* whether the SFPs have an issue
*/
- dev_info(dev, "Port %c: %s %s", adapter->port_name,
- be_port_misconfig_evt_desc[port_misconfig_evt],
- be_port_misconfig_remedy_desc[port_misconfig_evt]);
+ if (be_phy_state_unknown(new_phy_state))
+ dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
+ "Port %c: Unrecognized Optics state: 0x%x. %s",
+ adapter->port_name,
+ new_phy_state,
+ phy_state_oper_desc[phy_oper_state]);
+ else
+ dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
+ "Port %c: %s %s",
+ adapter->port_name,
+ be_misconfig_evt_port_state[new_phy_state],
+ phy_state_oper_desc[phy_oper_state]);
- if (port_misconfig_evt == INCOMPATIBLE_SFP)
- adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+ /* Log Vendor name and part no. if a misconfigured SFP is detected */
+ if (be_phy_misconfigured(new_phy_state))
+ adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
}
/* Grp5 CoS Priority evt */
@@ -540,7 +596,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
int status;
struct be_mcc_wrb *wrb;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- u16 index = mcc_obj->q.head;
+ u32 index = mcc_obj->q.head;
struct be_cmd_resp_hdr *resp;
index_dec(&index, mcc_obj->q.len);
@@ -1497,34 +1553,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
return status;
}
-/* Uses MCCQ */
+/* Uses MCCQ if available else MBOX */
int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_if_destroy *req;
int status;
if (interface_id == -1)
return 0;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
- sizeof(*req), wrb, NULL);
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
- status = be_mcc_notify_wait(adapter);
-err:
- spin_unlock_bh(&adapter->mcc_lock);
+ status = be_cmd_notify_wait(adapter, &wrb);
return status;
}
@@ -3168,6 +3215,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_set_lmode *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -3213,6 +3264,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
struct be_cmd_resp_loopback_test *resp;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -3259,6 +3314,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
int status;
int i, j = 0;
+ if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
+ CMD_SUBSYSTEM_LOWLEVEL))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 6d9a8d78e8ad..d8540ae95e5a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -68,7 +68,8 @@ enum mcc_addl_status {
MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
- MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57
+ MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57,
+ MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60
};
#define CQE_BASE_STATUS_MASK 0xFFFF
@@ -175,10 +176,53 @@ struct be_async_event_qnq {
u32 flags;
} __packed;
-#define INCOMPATIBLE_SFP 0x3
+enum {
+ BE_PHY_FUNCTIONAL = 0,
+ BE_PHY_NOT_PRESENT = 1,
+ BE_PHY_DIFF_MEDIA = 2,
+ BE_PHY_INCOMPATIBLE = 3,
+ BE_PHY_UNQUALIFIED = 4,
+ BE_PHY_UNCERTIFIED = 5
+};
+
+#define PHY_STATE_MSG_SEVERITY 0x6
+#define PHY_STATE_OPER 0x1
+#define PHY_STATE_INFO_VALID 0x80
+#define PHY_STATE_OPER_MSG_NONE 0x2
+#define DEFAULT_MSG_SEVERITY 0x1
+
+#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED)
+#define be_phy_unqualified(phy_state) \
+ (phy_state == BE_PHY_UNQUALIFIED || \
+ phy_state == BE_PHY_UNCERTIFIED)
+#define be_phy_misconfigured(phy_state) \
+ (phy_state == BE_PHY_INCOMPATIBLE || \
+ phy_state == BE_PHY_UNQUALIFIED || \
+ phy_state == BE_PHY_UNCERTIFIED)
+
+extern char *be_misconfig_evt_port_state[];
+
/* async event indicating misconfigured port */
struct be_async_event_misconfig_port {
+ /* DATA_WORD1:
+ * phy state of port 0: bits 7 - 0
+ * phy state of port 1: bits 15 - 8
+ * phy state of port 2: bits 23 - 16
+ * phy state of port 3: bits 31 - 24
+ */
u32 event_data_word1;
+ /* DATA_WORD2:
+ * phy state info of port 0: bits 7 - 0
+ * phy state info of port 1: bits 15 - 8
+ * phy state info of port 2: bits 23 - 16
+ * phy state info of port 3: bits 31 - 24
+ *
+ * PHY STATE INFO:
+ * Link operability :bit 0
+ * Message severity :bit 2 - 1
+ * Rsvd :bits 6 - 3
+ * phy state info valid :bit 7
+ */
u32 event_data_word2;
u32 rsvd0;
u32 flags;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index a19ac441336f..2ff691636dac 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
switch (state) {
case ETHTOOL_ID_ACTIVE:
- be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
- &adapter->beacon_state);
- return 1; /* cycle on/off once per second */
+ status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+ &adapter->beacon_state);
+ if (status)
+ return be_cmd_status(status);
+ return 1; /* cycle on/off once per second */
case ETHTOOL_ID_ON:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_ENABLED);
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, BEACON_STATE_ENABLED);
break;
case ETHTOOL_ID_OFF:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_DISABLED);
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, BEACON_STATE_DISABLED);
break;
case ETHTOOL_ID_INACTIVE:
- be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- adapter->beacon_state);
+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+ 0, 0, adapter->beacon_state);
}
- return 0;
+ return be_cmd_status(status);
}
static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1cf1274fc2f..536686476369 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -854,9 +854,9 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
/* Grab a WRB header for xmit */
-static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
+static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
{
- u16 head = txo->q.head;
+ u32 head = txo->q.head;
queue_head_inc(&txo->q);
return head;
@@ -900,7 +900,7 @@ static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
* WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
*/
static void be_xmit_restore(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 head, bool map_single,
+ struct be_tx_obj *txo, u32 head, bool map_single,
u32 copied)
{
struct device *dev;
@@ -935,7 +935,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
struct device *dev = &adapter->pdev->dev;
struct be_queue_info *txq = &txo->q;
bool map_single = false;
- u16 head = txq->head;
+ u32 head = txq->head;
dma_addr_t busaddr;
int len;
@@ -1128,6 +1128,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
struct sk_buff *skb,
struct be_wrb_params *wrb_params)
{
+ int err;
+
/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
* packets that are 32b or less may cause a transmit stall
* on that port. The workaround is to pad such packets
@@ -1144,6 +1146,13 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
return NULL;
}
+ /* The stack can send us skbs with length greater than
+ * what the HW can handle. Trim the extra bytes.
+ */
+ WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
+ err = pskb_trim(skb, BE_MAX_GSO_SIZE);
+ WARN_ON(err);
+
return skb;
}
@@ -1468,6 +1477,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return 0;
+ if (!test_bit(vid, adapter->vids))
+ return 0;
+
clear_bit(vid, adapter->vids);
adapter->vlans_added--;
@@ -1919,8 +1931,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
if (!aic->enable)
return 0;
- if (time_before_eq(now, aic->jiffies) ||
- jiffies_to_msecs(now - aic->jiffies) < 1)
+ if (jiffies_to_msecs(now - aic->jiffies) < 1)
eqd = aic->prev_eqd;
else
eqd = be_get_new_eqd(eqo);
@@ -1993,7 +2004,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *rx_page_info;
struct be_queue_info *rxq = &rxo->q;
- u16 frag_idx = rxq->tail;
+ u32 frag_idx = rxq->tail;
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
@@ -2404,10 +2415,11 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
{
struct sk_buff **sent_skbs = txo->sent_skb_list;
struct be_queue_info *txq = &txo->q;
- u16 frag_index, num_wrbs = 0;
struct sk_buff *skb = NULL;
bool unmap_skb_hdr = false;
struct be_eth_wrb *wrb;
+ u16 num_wrbs = 0;
+ u32 frag_index;
do {
if (sent_skbs[txq->tail]) {
@@ -2519,10 +2531,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
static void be_tx_compl_clean(struct be_adapter *adapter)
{
- u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
struct device *dev = &adapter->pdev->dev;
+ u16 cmpl = 0, timeo = 0, num_wrbs = 0;
struct be_tx_compl_info *txcp;
struct be_queue_info *txq;
+ u32 end_idx, notified_idx;
struct be_tx_obj *txo;
int i, pending_txqs;
@@ -3368,6 +3381,7 @@ done:
static void be_rx_qs_destroy(struct be_adapter *adapter)
{
+ struct rss_info *rss = &adapter->rss_info;
struct be_queue_info *q;
struct be_rx_obj *rxo;
int i;
@@ -3394,6 +3408,12 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
}
be_queue_free(adapter, q);
}
+
+ if (rss->rss_flags) {
+ rss->rss_flags = RSS_ENABLE_NONE;
+ be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss->rss_hkey);
+ }
}
static void be_disable_if_filters(struct be_adapter *adapter)
@@ -3514,20 +3534,21 @@ static int be_rx_qs_create(struct be_adapter *adapter)
if (!BEx_chip(adapter))
rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
RSS_ENABLE_UDP_IPV6;
+
+ netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ RSS_INDIR_TABLE_LEN, rss_key);
+ if (rc) {
+ rss->rss_flags = RSS_ENABLE_NONE;
+ return rc;
+ }
+
+ memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
} else {
/* Disable RSS, if only default RX Q is created */
rss->rss_flags = RSS_ENABLE_NONE;
}
- netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
- rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
- RSS_INDIR_TABLE_LEN, rss_key);
- if (rc) {
- rss->rss_flags = RSS_ENABLE_NONE;
- return rc;
- }
-
- memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
/* Post 1 less than RXQ-len to avoid head being equal to tail,
* which is a queue empty condition
@@ -3794,18 +3815,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
struct be_resources res = adapter->pool_res;
u16 num_vf_qs = 1;
- /* Distribute the queue resources equally among the PF and it's VFs
+ /* Distribute the queue resources among the PF and it's VFs
* Do not distribute queue resources in multi-channel configuration.
*/
if (num_vfs && !be_is_mc(adapter)) {
- /* If number of VFs requested is 8 less than max supported,
- * assign 8 queue pairs to the PF and divide the remaining
- * resources evenly among the VFs
- */
- if (num_vfs < (be_max_vfs(adapter) - 8))
- num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
- else
- num_vf_qs = res.max_rss_qs / num_vfs;
+ /* Divide the qpairs evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder qpairs belong to the PF.
+ */
+ num_vf_qs = min(SH_VF_MAX_NIC_EQS,
+ res.max_rss_qs / (num_vfs + 1));
/* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
* interfaces per port. Provide RSS on VFs, only if number
@@ -4084,6 +4102,7 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->if_handle = -1;
adapter->be3_native = false;
adapter->if_flags = 0;
+ adapter->phy_state = BE_UNKNOWN_PHY_STATE;
if (be_physfn(adapter))
adapter->cmd_privileges = MAX_PRIVILEGES;
else
@@ -4267,10 +4286,10 @@ static void be_schedule_worker(struct be_adapter *adapter)
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}
-static void be_schedule_err_detection(struct be_adapter *adapter)
+static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
{
schedule_delayed_work(&adapter->be_err_detection_work,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(delay));
adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
@@ -4309,6 +4328,23 @@ err:
return status;
}
+static int be_if_create(struct be_adapter *adapter)
+{
+ u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ u32 cap_flags = be_if_cap_flags(adapter);
+ int status;
+
+ if (adapter->cfg_num_qs == 1)
+ cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
+
+ en_flags &= cap_flags;
+ /* will enable all the needed filter flags in be_open() */
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
+
+ return status;
+}
+
int be_update_queues(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -4326,6 +4362,9 @@ int be_update_queues(struct be_adapter *adapter)
be_msix_disable(adapter);
be_clear_queues(adapter);
+ status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
+ if (status)
+ return status;
if (!msix_enabled(adapter)) {
status = be_msix_enable(adapter);
@@ -4333,6 +4372,10 @@ int be_update_queues(struct be_adapter *adapter)
return status;
}
+ status = be_if_create(adapter);
+ if (status)
+ return status;
+
status = be_setup_queues(adapter);
if (status)
return status;
@@ -4397,7 +4440,6 @@ static int be_func_init(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4430,10 +4472,7 @@ static int be_setup(struct be_adapter *adapter)
goto err;
/* will enable all the needed filter flags in be_open() */
- en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
- en_flags = en_flags & be_if_cap_flags(adapter);
- status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
- &adapter->if_handle, 0);
+ status = be_if_create(adapter);
if (status)
goto err;
@@ -4591,6 +4630,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
/* BE and Lancer chips support VEB mode only */
if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
+ if (!pci_sriov_get_totalvfs(adapter->pdev))
+ return 0;
hsw_mode = PORT_FWD_TYPE_VEB;
} else {
status = be_cmd_get_hsw_config(adapter, NULL, 0,
@@ -4806,7 +4848,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
- if (be_multi_rxq(adapter))
+ if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
@@ -4819,7 +4861,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
+ netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@ -4861,21 +4903,27 @@ static int be_resume(struct be_adapter *adapter)
static int be_err_recover(struct be_adapter *adapter)
{
- struct device *dev = &adapter->pdev->dev;
int status;
+ /* Error recovery is supported only Lancer as of now */
+ if (!lancer_chip(adapter))
+ return -EIO;
+
+ /* Wait for adapter to reach quiescent state before
+ * destroying queues
+ */
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ goto err;
+
+ be_cleanup(adapter);
+
status = be_resume(adapter);
if (status)
goto err;
- dev_info(dev, "Adapter recovery successful\n");
return 0;
err:
- if (be_physfn(adapter))
- dev_err(dev, "Adapter recovery failed\n");
- else
- dev_err(dev, "Re-trying adapter recovery\n");
-
return status;
}
@@ -4884,21 +4932,43 @@ static void be_err_detection_task(struct work_struct *work)
struct be_adapter *adapter =
container_of(work, struct be_adapter,
be_err_detection_work.work);
- int status = 0;
+ struct device *dev = &adapter->pdev->dev;
+ int recovery_status;
+ int delay = ERR_DETECTION_DELAY;
be_detect_error(adapter);
- if (be_check_error(adapter, BE_ERROR_HW)) {
- be_cleanup(adapter);
-
- /* As of now error recovery support is in Lancer only */
- if (lancer_chip(adapter))
- status = be_err_recover(adapter);
+ if (be_check_error(adapter, BE_ERROR_HW))
+ recovery_status = be_err_recover(adapter);
+ else
+ goto reschedule_task;
+
+ if (!recovery_status) {
+ adapter->recovery_retries = 0;
+ dev_info(dev, "Adapter recovery successful\n");
+ goto reschedule_task;
+ } else if (be_virtfn(adapter)) {
+ /* For VFs, check if PF have allocated resources
+ * every second.
+ */
+ dev_err(dev, "Re-trying adapter recovery\n");
+ goto reschedule_task;
+ } else if (adapter->recovery_retries++ <
+ MAX_ERR_RECOVERY_RETRY_COUNT) {
+ /* In case of another error during recovery, it takes 30 sec
+ * for adapter to come out of error. Retry error recovery after
+ * this time interval.
+ */
+ dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
+ delay = ERR_RECOVERY_RETRY_DELAY;
+ goto reschedule_task;
+ } else {
+ dev_err(dev, "Adapter recovery failed\n");
}
- /* Always attempt recovery on VFs */
- if (!status || be_virtfn(adapter))
- be_schedule_err_detection(adapter);
+ return;
+reschedule_task:
+ be_schedule_err_detection(adapter, delay);
}
static void be_log_sfp_info(struct be_adapter *adapter)
@@ -4908,11 +4978,13 @@ static void be_log_sfp_info(struct be_adapter *adapter)
status = be_cmd_query_sfp_info(adapter);
if (!status) {
dev_err(&adapter->pdev->dev,
- "Unqualified SFP+ detected on %c from %s part no: %s",
- adapter->port_name, adapter->phy.vendor_name,
+ "Port %c: %s Vendor: %s part no: %s",
+ adapter->port_name,
+ be_misconfig_evt_port_state[adapter->phy_state],
+ adapter->phy.vendor_name,
adapter->phy.vendor_pn);
}
- adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
+ adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
}
static void be_worker(struct work_struct *work)
@@ -4956,7 +5028,7 @@ static void be_worker(struct work_struct *work)
if (!skyhawk_chip(adapter))
be_eqd_update(adapter, false);
- if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
+ if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
be_log_sfp_info(adapter);
reschedule:
@@ -5298,7 +5370,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_roce_dev_add(adapter);
- be_schedule_err_detection(adapter);
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
/* On Die temperature not supported for VF. */
if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5365,7 +5437,7 @@ static int be_pci_resume(struct pci_dev *pdev)
if (status)
return status;
- be_schedule_err_detection(adapter);
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
if (adapter->wol_en)
be_setup_wol(adapter, false);
@@ -5401,6 +5473,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
+ be_roce_dev_remove(adapter);
+
if (!be_check_error(adapter, BE_ERROR_EEH)) {
be_set_error(adapter, BE_ERROR_EEH);
@@ -5465,7 +5539,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
- be_schedule_err_detection(adapter);
+ be_roce_dev_add(adapter);
+
+ be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
return;
err:
dev_err(&adapter->pdev->dev, "EEH resume failed\n");
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index b1026689b78f..1f23845a0694 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -43,20 +43,21 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
/* In case dst is not aligned we need an intermediate buffer */
- if (dst_is_aligned)
- for (i = 0; i < len; i++, reg++)
- *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+ if (dst_is_aligned) {
+ ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
+ reg += len;
+ }
else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
- put_unaligned(buf, reg);
+ put_unaligned_be32(buf, reg);
}
}
-
/* copy last bytes (if any) */
if (last) {
- u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
- memcpy((u8*)reg, &buf, last);
+ u32 buf;
+ ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
+ memcpy((u8 *)reg, &buf, last);
}
}
@@ -66,26 +67,28 @@ static u32 nps_enet_rx_handler(struct net_device *ndev)
u32 work_done = 0;
struct nps_enet_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
- struct nps_enet_rx_ctl rx_ctrl;
+ u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
+ u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT;
+ u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT;
- rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- frame_len = rx_ctrl.nr;
+ frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT;
/* Check if we got RX */
- if (!rx_ctrl.cr)
+ if (!rx_ctrl_cr)
return work_done;
/* If we got here there is a work for us */
work_done++;
/* Check Rx error */
- if (rx_ctrl.er) {
+ if (rx_ctrl_er) {
ndev->stats.rx_errors++;
err = 1;
}
/* Check Rx CRC error */
- if (rx_ctrl.crc) {
+ if (rx_ctrl_crc) {
ndev->stats.rx_crc_errors++;
ndev->stats.rx_dropped++;
err = 1;
@@ -136,23 +139,24 @@ rx_irq_frame_done:
static void nps_enet_tx_handler(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_tx_ctl tx_ctrl;
-
- tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+ u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
+ u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */
- if (!priv->tx_packet_sent || tx_ctrl.ct)
+ if (!priv->tx_packet_sent || tx_ctrl_ct)
return;
/* Ack Tx ctrl register */
nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
/* Check Tx transmit error */
- if (unlikely(tx_ctrl.et)) {
+ if (unlikely(tx_ctrl_et)) {
ndev->stats.tx_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += tx_ctrl.nt;
+ ndev->stats.tx_bytes += tx_ctrl_nt;
}
dev_kfree_skb(priv->tx_skb);
@@ -178,13 +182,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
nps_enet_tx_handler(ndev);
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
- struct nps_enet_buf_int_enable buf_int_enable;
+ u32 buf_int_enable_value = 0;
napi_complete(napi);
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
+
+ /* set tx_done and rx_rdy bits */
+ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
+ buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
+
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
- buf_int_enable.value);
+ buf_int_enable_value);
}
return work_done;
@@ -205,13 +212,12 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
{
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_rx_ctl rx_ctrl;
- struct nps_enet_tx_ctl tx_ctrl;
-
- rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+ u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
- if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
+ if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -223,22 +229,24 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
static void nps_enet_set_hw_mac_address(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1;
- struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
+ u32 ge_mac_cfg_1_value = 0;
+ u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
/* set MAC address in HW */
- ge_mac_cfg_1.octet_0 = ndev->dev_addr[0];
- ge_mac_cfg_1.octet_1 = ndev->dev_addr[1];
- ge_mac_cfg_1.octet_2 = ndev->dev_addr[2];
- ge_mac_cfg_1.octet_3 = ndev->dev_addr[3];
- ge_mac_cfg_2->octet_4 = ndev->dev_addr[4];
- ge_mac_cfg_2->octet_5 = ndev->dev_addr[5];
+ ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT;
+ ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK)
+ | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK)
+ | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
- ge_mac_cfg_1.value);
+ ge_mac_cfg_1_value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
- ge_mac_cfg_2->value);
+ *ge_mac_cfg_2_value);
}
/**
@@ -254,93 +262,97 @@ static void nps_enet_set_hw_mac_address(struct net_device *ndev)
static void nps_enet_hw_reset(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_rst ge_rst;
- struct nps_enet_phase_fifo_ctl phase_fifo_ctl;
+ u32 ge_rst_value = 0, phase_fifo_ctl_value = 0;
- ge_rst.value = 0;
- phase_fifo_ctl.value = 0;
/* Pcs reset sequence*/
- ge_rst.gmac_0 = NPS_ENET_ENABLE;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+ ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
- ge_rst.value = 0;
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
- phase_fifo_ctl.rst = NPS_ENET_ENABLE;
- phase_fifo_ctl.init = NPS_ENET_ENABLE;
+ phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT;
+ phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
- phase_fifo_ctl.value);
+ phase_fifo_ctl_value);
usleep_range(10, 20);
- phase_fifo_ctl.value = 0;
+ phase_fifo_ctl_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
- phase_fifo_ctl.value);
+ phase_fifo_ctl_value);
}
static void nps_enet_hw_enable_control(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0;
- struct nps_enet_buf_int_enable buf_int_enable;
- struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2;
- struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3;
+ u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0;
+ u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
+ u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value;
s32 max_frame_length;
- ge_mac_cfg_0.value = 0;
- buf_int_enable.value = 0;
/* Enable Rx and Tx statistics */
- ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK)
+ | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT;
/* Discard packets with different MAC address */
- ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
/* Discard multicast packets */
- ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE;
+ *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
- ge_mac_cfg_2->value);
+ *ge_mac_cfg_2_value);
/* Discard Packets bigger than max frame length */
max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
- if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
- ge_mac_cfg_3->max_len = max_frame_length;
+ if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+ *ge_mac_cfg_3_value =
+ (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK)
+ | max_frame_length << CFG_3_MAX_LEN_SHIFT;
+ }
/* Enable interrupts */
- buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
- buf_int_enable.tx_done = NPS_ENET_ENABLE;
+ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
+ buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
- buf_int_enable.value);
+ buf_int_enable_value);
/* Write device MAC address to HW */
nps_enet_set_hw_mac_address(ndev);
/* Rx and Tx HW features */
- ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT;
/* IFG configuration */
- ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG;
- ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT;
/* preamble configuration */
- ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT;
/* enable flow control frames */
- ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
- ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT;
+ ge_mac_cfg_0_value |=
+ NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT;
+ *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK)
+ | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT;
/* Enable Rx and Tx */
- ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
- ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT;
+ ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
- ge_mac_cfg_3->value);
+ *ge_mac_cfg_3_value);
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
- ge_mac_cfg_0.value);
+ ge_mac_cfg_0_value);
}
static void nps_enet_hw_disable_control(struct net_device *ndev)
@@ -358,31 +370,28 @@ static void nps_enet_send_frame(struct net_device *ndev,
struct sk_buff *skb)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_tx_ctl tx_ctrl;
+ u32 tx_ctrl_value = 0;
short length = skb->len;
u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
u32 *src = (void *)skb->data;
bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
- tx_ctrl.value = 0;
/* In case src is not aligned we need an intermediate buffer */
if (src_is_aligned)
- for (i = 0; i < len; i++, src++)
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
+ iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len);
else /* !src_is_aligned */
for (i = 0; i < len; i++, src++)
nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
- get_unaligned(src));
+ get_unaligned_be32(src));
/* Write the length of the Frame */
- tx_ctrl.nt = length;
+ tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
/* Indicate SW is done */
priv->tx_packet_sent = true;
- tx_ctrl.ct = NPS_ENET_ENABLE;
-
+ tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
/* Send Frame */
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
}
/**
@@ -422,19 +431,23 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
static void nps_enet_set_rx_mode(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
- struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
-
- ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value;
+ u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value;
if (ndev->flags & IFF_PROMISC) {
- ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE;
- ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT;
+
} else {
- ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE;
- ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
+ ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
+ | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
+
}
- nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value);
+ nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
}
/**
@@ -453,12 +466,15 @@ static s32 nps_enet_open(struct net_device *ndev)
/* Reset private variables */
priv->tx_packet_sent = false;
- priv->ge_mac_cfg_2.value = 0;
- priv->ge_mac_cfg_3.value = 0;
+ priv->ge_mac_cfg_2_value = 0;
+ priv->ge_mac_cfg_3_value = 0;
/* ge_mac_cfg_3 default values */
- priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH;
- priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN;
+ priv->ge_mac_cfg_3_value |=
+ NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT;
+
+ priv->ge_mac_cfg_3_value |=
+ NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT;
/* Disable HW device */
nps_enet_hw_disable_control(ndev);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 6703674d679c..d0cab600bce8 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -43,233 +43,123 @@
#define NPS_ENET_REG_GE_RST 0x1400
#define NPS_ENET_REG_PHASE_FIFO_CTL 0x1404
-/* Tx control register */
-struct nps_enet_tx_ctl {
- union {
- /* ct: SW sets to indicate frame ready in Tx buffer for
- * transmission. HW resets to when transmission done
- * et: Transmit error
- * nt: Length in bytes of Tx frame loaded to Tx buffer
- */
- struct {
- u32
- __reserved_1:16,
- ct:1,
- et:1,
- __reserved_2:3,
- nt:11;
- };
-
- u32 value;
- };
-};
-
-/* Rx control register */
-struct nps_enet_rx_ctl {
- union {
- /* cr: HW sets to indicate frame ready in Rx buffer.
- * SW resets to indicate host read received frame
- * and new frames can be written to Rx buffer
- * er: Rx error indication
- * crc: Rx CRC error indication
- * nr: Length in bytes of Rx frame loaded by MAC to Rx buffer
- */
- struct {
- u32
- __reserved_1:16,
- cr:1,
- er:1,
- crc:1,
- __reserved_2:2,
- nr:11;
- };
-
- u32 value;
- };
-};
-
-/* Interrupt enable for data buffer events register */
-struct nps_enet_buf_int_enable {
- union {
- /* tx_done: Interrupt generation in the case when new frame
- * is ready in Rx buffer
- * rx_rdy: Interrupt generation in the case when current frame
- * was read from TX buffer
- */
- struct {
- u32
- __reserved:30,
- tx_done:1,
- rx_rdy:1;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 0 register */
-struct nps_enet_ge_mac_cfg_0 {
- union {
- /* tx_pr_len: Transmit preamble length in bytes
- * tx_ifg_nib: Tx idle pattern
- * nib_mode: Nibble (4-bit) Mode
- * rx_pr_check_en: Receive preamble Check Enable
- * tx_ifg: Transmit inter-Frame Gap
- * rx_ifg: Receive inter-Frame Gap
- * tx_fc_retr: Transmit Flow Control Retransmit Mode
- * rx_length_check_en: Receive Length Check Enable
- * rx_crc_ignore: Results of the CRC check are ignored
- * rx_crc_strip: MAC strips the CRC from received frames
- * rx_fc_en: Receive Flow Control Enable
- * tx_crc_en: Transmit CRC Enabled
- * tx_pad_en: Transmit Padding Enable
- * tx_cf_en: Transmit Flow Control Enable
- * tx_en: Transmit Enable
- * rx_en: Receive Enable
- */
- struct {
- u32
- tx_pr_len:4,
- tx_ifg_nib:4,
- nib_mode:1,
- rx_pr_check_en:1,
- tx_ifg:6,
- rx_ifg:4,
- tx_fc_retr:3,
- rx_length_check_en:1,
- rx_crc_ignore:1,
- rx_crc_strip:1,
- rx_fc_en:1,
- tx_crc_en:1,
- tx_pad_en:1,
- tx_fc_en:1,
- tx_en:1,
- rx_en:1;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 1 register */
-struct nps_enet_ge_mac_cfg_1 {
- union {
- /* octet_3: MAC address octet 3
- * octet_2: MAC address octet 2
- * octet_1: MAC address octet 1
- * octet_0: MAC address octet 0
- */
- struct {
- u32
- octet_3:8,
- octet_2:8,
- octet_1:8,
- octet_0:8;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 2 register */
-struct nps_enet_ge_mac_cfg_2 {
- union {
- /* transmit_flush_en: MAC flush enable
- * stat_en: RMON statistics interface enable
- * disc_da: Discard frames with DA different
- * from MAC address
- * disc_bc: Discard broadcast frames
- * disc_mc: Discard multicast frames
- * octet_5: MAC address octet 5
- * octet_4: MAC address octet 4
- */
- struct {
- u32
- transmit_flush_en:1,
- __reserved_1:5,
- stat_en:2,
- __reserved_2:1,
- disc_da:1,
- disc_bc:1,
- disc_mc:1,
- __reserved_3:4,
- octet_5:8,
- octet_4:8;
- };
-
- u32 value;
- };
-};
-
-/* Gbps Eth MAC Configuration 3 register */
-struct nps_enet_ge_mac_cfg_3 {
- union {
- /* ext_oob_cbfc_sel: Selects one of the 4 profiles for
- * extended OOB in-flow-control indication
- * max_len: Maximum receive frame length in bytes
- * tx_cbfc_en: Enable transmission of class-based
- * flow control packets
- * rx_ifg_th: Threshold for IFG status reporting via OOB
- * cf_timeout: Configurable time to decrement FC counters
- * cf_drop: Drop control frames
- * redirect_cbfc_sel: Selects one of CBFC redirect profiles
- * rx_cbfc_redir_en: Enable Rx class-based flow
- * control redirect
- * rx_cbfc_en: Enable Rx class-based flow control
- * tm_hd_mode: TM header mode
- */
- struct {
- u32
- ext_oob_cbfc_sel:2,
- max_len:14,
- tx_cbfc_en:1,
- rx_ifg_th:5,
- cf_timeout:4,
- cf_drop:1,
- redirect_cbfc_sel:2,
- rx_cbfc_redir_en:1,
- rx_cbfc_en:1,
- tm_hd_mode:1;
- };
-
- u32 value;
- };
-};
-
-/* GE MAC, PCS reset control register */
-struct nps_enet_ge_rst {
- union {
- /* gmac_0: GE MAC reset
- * spcs_0: SGMII PCS reset
- */
- struct {
- u32
- __reserved_1:23,
- gmac_0:1,
- __reserved_2:7,
- spcs_0:1;
- };
-
- u32 value;
- };
-};
-
-/* Tx phase sync FIFO control register */
-struct nps_enet_phase_fifo_ctl {
- union {
- /* init: initialize serdes TX phase sync FIFO pointers
- * rst: reset serdes TX phase sync FIFO
- */
- struct {
- u32
- __reserved:30,
- init:1,
- rst:1;
- };
-
- u32 value;
- };
-};
+/* Tx control register masks and shifts */
+#define TX_CTL_NT_MASK 0x7FF
+#define TX_CTL_NT_SHIFT 0
+#define TX_CTL_ET_MASK 0x4000
+#define TX_CTL_ET_SHIFT 14
+#define TX_CTL_CT_MASK 0x8000
+#define TX_CTL_CT_SHIFT 15
+
+/* Rx control register masks and shifts */
+#define RX_CTL_NR_MASK 0x7FF
+#define RX_CTL_NR_SHIFT 0
+#define RX_CTL_CRC_MASK 0x2000
+#define RX_CTL_CRC_SHIFT 13
+#define RX_CTL_ER_MASK 0x4000
+#define RX_CTL_ER_SHIFT 14
+#define RX_CTL_CR_MASK 0x8000
+#define RX_CTL_CR_SHIFT 15
+
+/* Interrupt enable for data buffer events register masks and shifts */
+#define RX_RDY_MASK 0x1
+#define RX_RDY_SHIFT 0
+#define TX_DONE_MASK 0x2
+#define TX_DONE_SHIFT 1
+
+/* Gbps Eth MAC Configuration 0 register masks and shifts */
+#define CFG_0_RX_EN_MASK 0x1
+#define CFG_0_RX_EN_SHIFT 0
+#define CFG_0_TX_EN_MASK 0x2
+#define CFG_0_TX_EN_SHIFT 1
+#define CFG_0_TX_FC_EN_MASK 0x4
+#define CFG_0_TX_FC_EN_SHIFT 2
+#define CFG_0_TX_PAD_EN_MASK 0x8
+#define CFG_0_TX_PAD_EN_SHIFT 3
+#define CFG_0_TX_CRC_EN_MASK 0x10
+#define CFG_0_TX_CRC_EN_SHIFT 4
+#define CFG_0_RX_FC_EN_MASK 0x20
+#define CFG_0_RX_FC_EN_SHIFT 5
+#define CFG_0_RX_CRC_STRIP_MASK 0x40
+#define CFG_0_RX_CRC_STRIP_SHIFT 6
+#define CFG_0_RX_CRC_IGNORE_MASK 0x80
+#define CFG_0_RX_CRC_IGNORE_SHIFT 7
+#define CFG_0_RX_LENGTH_CHECK_EN_MASK 0x100
+#define CFG_0_RX_LENGTH_CHECK_EN_SHIFT 8
+#define CFG_0_TX_FC_RETR_MASK 0xE00
+#define CFG_0_TX_FC_RETR_SHIFT 9
+#define CFG_0_RX_IFG_MASK 0xF000
+#define CFG_0_RX_IFG_SHIFT 12
+#define CFG_0_TX_IFG_MASK 0x3F0000
+#define CFG_0_TX_IFG_SHIFT 16
+#define CFG_0_RX_PR_CHECK_EN_MASK 0x400000
+#define CFG_0_RX_PR_CHECK_EN_SHIFT 22
+#define CFG_0_NIB_MODE_MASK 0x800000
+#define CFG_0_NIB_MODE_SHIFT 23
+#define CFG_0_TX_IFG_NIB_MASK 0xF000000
+#define CFG_0_TX_IFG_NIB_SHIFT 24
+#define CFG_0_TX_PR_LEN_MASK 0xF0000000
+#define CFG_0_TX_PR_LEN_SHIFT 28
+
+/* Gbps Eth MAC Configuration 1 register masks and shifts */
+#define CFG_1_OCTET_0_MASK 0x000000FF
+#define CFG_1_OCTET_0_SHIFT 0
+#define CFG_1_OCTET_1_MASK 0x0000FF00
+#define CFG_1_OCTET_1_SHIFT 8
+#define CFG_1_OCTET_2_MASK 0x00FF0000
+#define CFG_1_OCTET_2_SHIFT 16
+#define CFG_1_OCTET_3_MASK 0xFF000000
+#define CFG_1_OCTET_3_SHIFT 24
+
+/* Gbps Eth MAC Configuration 2 register masks and shifts */
+#define CFG_2_OCTET_4_MASK 0x000000FF
+#define CFG_2_OCTET_4_SHIFT 0
+#define CFG_2_OCTET_5_MASK 0x0000FF00
+#define CFG_2_OCTET_5_SHIFT 8
+#define CFG_2_DISK_MC_MASK 0x00100000
+#define CFG_2_DISK_MC_SHIFT 20
+#define CFG_2_DISK_BC_MASK 0x00200000
+#define CFG_2_DISK_BC_SHIFT 21
+#define CFG_2_DISK_DA_MASK 0x00400000
+#define CFG_2_DISK_DA_SHIFT 22
+#define CFG_2_STAT_EN_MASK 0x3000000
+#define CFG_2_STAT_EN_SHIFT 24
+#define CFG_2_TRANSMIT_FLUSH_EN_MASK 0x80000000
+#define CFG_2_TRANSMIT_FLUSH_EN_SHIFT 31
+
+/* Gbps Eth MAC Configuration 3 register masks and shifts */
+#define CFG_3_TM_HD_MODE_MASK 0x1
+#define CFG_3_TM_HD_MODE_SHIFT 0
+#define CFG_3_RX_CBFC_EN_MASK 0x2
+#define CFG_3_RX_CBFC_EN_SHIFT 1
+#define CFG_3_RX_CBFC_REDIR_EN_MASK 0x4
+#define CFG_3_RX_CBFC_REDIR_EN_SHIFT 2
+#define CFG_3_REDIRECT_CBFC_SEL_MASK 0x18
+#define CFG_3_REDIRECT_CBFC_SEL_SHIFT 3
+#define CFG_3_CF_DROP_MASK 0x20
+#define CFG_3_CF_DROP_SHIFT 5
+#define CFG_3_CF_TIMEOUT_MASK 0x3C0
+#define CFG_3_CF_TIMEOUT_SHIFT 6
+#define CFG_3_RX_IFG_TH_MASK 0x7C00
+#define CFG_3_RX_IFG_TH_SHIFT 10
+#define CFG_3_TX_CBFC_EN_MASK 0x8000
+#define CFG_3_TX_CBFC_EN_SHIFT 15
+#define CFG_3_MAX_LEN_MASK 0x3FFF0000
+#define CFG_3_MAX_LEN_SHIFT 16
+#define CFG_3_EXT_OOB_CBFC_SEL_MASK 0xC0000000
+#define CFG_3_EXT_OOB_CBFC_SEL_SHIFT 30
+
+/* GE MAC, PCS reset control register masks and shifts */
+#define RST_SPCS_MASK 0x1
+#define RST_SPCS_SHIFT 0
+#define RST_GMAC_0_MASK 0x100
+#define RST_GMAC_0_SHIFT 8
+
+/* Tx phase sync FIFO control register masks and shifts */
+#define PHASE_FIFO_CTL_RST_MASK 0x1
+#define PHASE_FIFO_CTL_RST_SHIFT 0
+#define PHASE_FIFO_CTL_INIT_MASK 0x2
+#define PHASE_FIFO_CTL_INIT_SHIFT 1
/**
* struct nps_enet_priv - Storage of ENET's private information.
@@ -285,8 +175,8 @@ struct nps_enet_priv {
bool tx_packet_sent;
struct sk_buff *tx_skb;
struct napi_struct napi;
- struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2;
- struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3;
+ u32 ge_mac_cfg_2_value;
+ u32 ge_mac_cfg_3_value;
};
/**
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2106d72c91dc..195122e11f10 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -64,6 +64,7 @@
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
+#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/
#define FEC_RACC 0x1c4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
@@ -309,12 +310,6 @@ struct bufdesc_ex {
#define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
(((X) == 2) ? \
FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
-#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
- (((X) == 2) ? \
- FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
-#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
- (((X) == 2) ? \
- FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
@@ -380,6 +375,7 @@ struct bufdesc_ex {
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
@@ -447,33 +443,35 @@ struct bufdesc_ex {
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
+struct bufdesc_prop {
+ int qid;
+ /* Address of Rx and Tx buffers */
+ struct bufdesc *base;
+ struct bufdesc *last;
+ struct bufdesc *cur;
+ void __iomem *reg_desc_active;
+ dma_addr_t dma;
+ unsigned short ring_size;
+ unsigned char dsize;
+ unsigned char dsize_log2;
+};
+
struct fec_enet_priv_tx_q {
- int index;
+ struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff *tx_skbuff[TX_RING_SIZE];
- dma_addr_t bd_dma;
- struct bufdesc *tx_bd_base;
- uint tx_ring_size;
-
unsigned short tx_stop_threshold;
unsigned short tx_wake_threshold;
- struct bufdesc *cur_tx;
struct bufdesc *dirty_tx;
char *tso_hdrs;
dma_addr_t tso_hdrs_dma;
};
struct fec_enet_priv_rx_q {
- int index;
+ struct bufdesc_prop bd;
struct sk_buff *rx_skbuff[RX_RING_SIZE];
-
- dma_addr_t bd_dma;
- struct bufdesc *rx_bd_base;
- uint rx_ring_size;
-
- struct bufdesc *cur_rx;
};
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
@@ -513,8 +511,6 @@ struct fec_enet_private {
unsigned long work_ts;
unsigned long work_mdio;
- unsigned short bufdesc_size;
-
struct platform_device *pdev;
int dev_id;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 41c81f6ec630..08243c2ff4b4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -217,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
- (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+ (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
static int mii_cnt;
-static inline
-struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
- struct fec_enet_private *fep,
- int queue_id)
-{
- struct bufdesc *new_bd = bdp + 1;
- struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
- struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
- struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
- struct bufdesc_ex *ex_base;
- struct bufdesc *base;
- int ring_size;
-
- if (bdp >= txq->tx_bd_base) {
- base = txq->tx_bd_base;
- ring_size = txq->tx_ring_size;
- ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
- } else {
- base = rxq->rx_bd_base;
- ring_size = rxq->rx_ring_size;
- ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
- }
-
- if (fep->bufdesc_ex)
- return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
- ex_base : ex_new_bd);
- else
- return (new_bd >= (base + ring_size)) ?
- base : new_bd;
-}
-
-static inline
-struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
- struct fec_enet_private *fep,
- int queue_id)
-{
- struct bufdesc *new_bd = bdp - 1;
- struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
- struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
- struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
- struct bufdesc_ex *ex_base;
- struct bufdesc *base;
- int ring_size;
-
- if (bdp >= txq->tx_bd_base) {
- base = txq->tx_bd_base;
- ring_size = txq->tx_ring_size;
- ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
- } else {
- base = rxq->rx_bd_base;
- ring_size = rxq->rx_ring_size;
- ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
- }
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
+{
+ return (bdp >= bd->last) ? bd->base
+ : (struct bufdesc *)(((unsigned)bdp) + bd->dsize);
+}
- if (fep->bufdesc_ex)
- return (struct bufdesc *)((ex_new_bd < ex_base) ?
- (ex_new_bd + ring_size) : ex_new_bd);
- else
- return (new_bd < base) ? (new_bd + ring_size) : new_bd;
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
+{
+ return (bdp <= bd->base) ? bd->last
+ : (struct bufdesc *)(((unsigned)bdp) - bd->dsize);
}
-static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
- struct fec_enet_private *fep)
+static int fec_enet_get_bd_index(struct bufdesc *bdp,
+ struct bufdesc_prop *bd)
{
- return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+ return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
}
-static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
- struct fec_enet_priv_tx_q *txq)
+static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
{
int entries;
- entries = ((const char *)txq->dirty_tx -
- (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
+ entries = (((const char *)txq->dirty_tx -
+ (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
- return entries > 0 ? entries : entries + txq->tx_ring_size;
+ return entries >= 0 ? entries : entries + txq->bd.ring_size;
}
static void swap_buffer(void *bufaddr, int len)
@@ -329,20 +281,20 @@ static void fec_dump(struct net_device *ndev)
pr_info("Nr SC addr len SKB\n");
txq = fep->tx_queue[0];
- bdp = txq->tx_bd_base;
+ bdp = txq->bd.base;
do {
pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index,
- bdp == txq->cur_tx ? 'S' : ' ',
+ bdp == txq->bd.cur ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ',
fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]);
- bdp = fec_enet_get_nextdesc(bdp, fep, 0);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
index++;
- } while (bdp != txq->tx_bd_base);
+ } while (bdp != txq->bd.base);
}
static inline bool is_ipv4_pkt(struct sk_buff *skb)
@@ -373,10 +325,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct bufdesc *bdp = txq->cur_tx;
+ struct bufdesc *bdp = txq->bd.cur;
struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len;
unsigned short status;
unsigned int estatus = 0;
@@ -388,7 +339,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag];
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
ebdp = (struct bufdesc_ex *)bdp;
status = fec16_to_cpu(bdp->cbd_sc);
@@ -409,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
- estatus |= FEC_TX_BD_FTYPE(queue);
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -418,7 +369,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], bufaddr, frag_len);
@@ -431,7 +382,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, addr)) {
- dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
goto dma_mapping_error;
@@ -439,14 +389,18 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = cpu_to_fec16(frag_len);
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
bdp->cbd_sc = cpu_to_fec16(status);
}
return bdp;
dma_mapping_error:
- bdp = txq->cur_tx;
+ bdp = txq->bd.cur;
for (i = 0; i < frag; i++) {
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
}
@@ -463,12 +417,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
dma_addr_t addr;
unsigned short status;
unsigned short buflen;
- unsigned short queue;
unsigned int estatus = 0;
unsigned int index;
int entries_free;
- entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
@@ -483,7 +436,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
}
/* Fill in a Tx ring entry */
- bdp = txq->cur_tx;
+ bdp = txq->bd.cur;
last_bdp = bdp;
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
@@ -492,8 +445,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
bufaddr = skb->data;
buflen = skb_headlen(skb);
- queue = skb_get_queue_mapping(skb);
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen);
@@ -514,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
if (nr_frags) {
last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
- if (IS_ERR(last_bdp))
+ if (IS_ERR(last_bdp)) {
+ dma_unmap_single(&fep->pdev->dev, addr,
+ buflen, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
+ }
} else {
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
if (fep->bufdesc_ex) {
@@ -525,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_TS;
}
}
+ bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ bdp->cbd_datlen = cpu_to_fec16(buflen);
if (fep->bufdesc_ex) {
@@ -535,7 +493,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (fep->quirks & FEC_QUIRK_HAS_AVB)
- estatus |= FEC_TX_BD_FTYPE(queue);
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -544,12 +502,14 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
+ index = fec_enet_get_bd_index(last_bdp, &txq->bd);
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
- bdp->cbd_datlen = cpu_to_fec16(buflen);
- bdp->cbd_bufaddr = cpu_to_fec32(addr);
+ /* Make sure the updates to rest of the descriptor are performed before
+ * transferring ownership.
+ */
+ wmb();
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
@@ -558,18 +518,18 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
skb_tx_timestamp(skb);
/* Make sure the update to bdp and tx_skbuff are performed before
- * cur_tx.
+ * txq->bd.cur.
*/
wmb();
- txq->cur_tx = bdp;
+ txq->bd.cur = bdp;
/* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+ writel(0, txq->bd.reg_desc_active);
return 0;
}
@@ -582,7 +542,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
- unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status;
unsigned int estatus = 0;
dma_addr_t addr;
@@ -614,7 +573,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
- estatus |= FEC_TX_BD_FTYPE(queue);
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -643,7 +602,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
- unsigned short queue = skb_get_queue_mapping(skb);
void *bufaddr;
unsigned long dmabuf;
unsigned short status;
@@ -678,7 +636,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB)
- estatus |= FEC_TX_BD_FTYPE(queue);
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -697,13 +655,12 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left;
- struct bufdesc *bdp = txq->cur_tx;
- unsigned short queue = skb_get_queue_mapping(skb);
+ struct bufdesc *bdp = txq->bd.cur;
struct tso_t tso;
unsigned int index = 0;
int ret;
- if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for TSO!\n");
@@ -723,7 +680,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
while (total_len > 0) {
char *hdr;
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left;
@@ -738,9 +695,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
int size;
size = min_t(int, tso.size, data_left);
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
- index = fec_enet_get_bd_index(txq->tx_bd_base,
- bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
bdp, index,
tso.data, size,
@@ -753,22 +709,22 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
tso_build_data(skb, &tso, size);
}
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
/* Save skb pointer */
txq->tx_skbuff[index] = skb;
skb_tx_timestamp(skb);
- txq->cur_tx = bdp;
+ txq->bd.cur = bdp;
/* Trigger transmission start */
if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
- !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
- !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
- !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
- !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
- writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active) ||
+ !readl(txq->bd.reg_desc_active))
+ writel(0, txq->bd.reg_desc_active);
return 0;
@@ -798,7 +754,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret)
return ret;
- entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free <= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
@@ -819,32 +775,32 @@ static void fec_enet_bd_init(struct net_device *dev)
for (q = 0; q < fep->num_rx_queues; q++) {
/* Initialize the receive buffer descriptors. */
rxq = fep->rx_queue[q];
- bdp = rxq->rx_bd_base;
+ bdp = rxq->bd.base;
- for (i = 0; i < rxq->rx_ring_size; i++) {
+ for (i = 0; i < rxq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else
bdp->cbd_sc = cpu_to_fec16(0);
- bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
- rxq->cur_rx = rxq->rx_bd_base;
+ rxq->bd.cur = rxq->bd.base;
}
for (q = 0; q < fep->num_tx_queues; q++) {
/* ...and the same for transmit */
txq = fep->tx_queue[q];
- bdp = txq->tx_bd_base;
- txq->cur_tx = bdp;
+ bdp = txq->bd.base;
+ txq->bd.cur = bdp;
- for (i = 0; i < txq->tx_ring_size; i++) {
+ for (i = 0; i < txq->bd.ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) {
@@ -852,11 +808,11 @@ static void fec_enet_bd_init(struct net_device *dev)
txq->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = cpu_to_fec32(0);
- bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp;
}
@@ -868,7 +824,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
int i;
for (i = 0; i < fep->num_rx_queues; i++)
- writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
+ writel(0, fep->rx_queue[i]->bd.reg_desc_active);
}
static void fec_enet_enable_ring(struct net_device *ndev)
@@ -880,7 +836,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_rx_queues; i++) {
rxq = fep->rx_queue[i];
- writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+ writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
/* enable DMA1/2 */
@@ -891,7 +847,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++) {
txq = fep->tx_queue[i];
- writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+ writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
/* enable DMA1/2 */
if (i)
@@ -909,7 +865,7 @@ static void fec_enet_reset_skb(struct net_device *ndev)
for (i = 0; i < fep->num_tx_queues; i++) {
txq = fep->tx_queue[i];
- for (j = 0; j < txq->tx_ring_size; j++) {
+ for (j = 0; j < txq->bd.ring_size; j++) {
if (txq->tx_skbuff[j]) {
dev_kfree_skb_any(txq->tx_skbuff[j]);
txq->tx_skbuff[j] = NULL;
@@ -987,6 +943,7 @@ fec_restart(struct net_device *ndev)
else
val &= ~FEC_RACC_OPTIONS;
writel(val, fep->hwp + FEC_RACC);
+ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
}
#endif
@@ -1221,16 +1178,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
bdp = txq->dirty_tx;
/* get next bdp of dirty_tx */
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- while (bdp != READ_ONCE(txq->cur_tx)) {
- /* Order the load of cur_tx and cbd_sc */
+ while (bdp != READ_ONCE(txq->bd.cur)) {
+ /* Order the load of bd.cur and cbd_sc */
rmb();
status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY)
break;
- index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL;
@@ -1241,7 +1198,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
continue;
}
@@ -1290,21 +1247,21 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
/* Since we have freed up a buffer, the ring is no longer full
*/
if (netif_queue_stopped(ndev)) {
- entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
/* ERR006538: Keep the transmitter going */
- if (bdp != txq->cur_tx &&
- readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
- writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
+ if (bdp != txq->bd.cur &&
+ readl(txq->bd.reg_desc_active) == 0)
+ writel(0, txq->bd.reg_desc_active);
}
static void
@@ -1366,7 +1323,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true;
}
-/* During a receive, the cur_rx points to the current incoming buffer.
+/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
@@ -1399,7 +1356,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = rxq->cur_rx;
+ bdp = rxq->bd.cur;
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1407,37 +1364,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
- /* Since we have allocated space to hold a complete frame,
- * the last indicator should be set.
- */
- if ((status & BD_ENET_RX_LAST) == 0)
- netdev_err(ndev, "rcv is not +last\n");
-
writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */
+ status ^= BD_ENET_RX_LAST;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
- BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+ BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+ BD_ENET_RX_CL)) {
ndev->stats.rx_errors++;
- if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ if (status & BD_ENET_RX_OV) {
+ /* FIFO overrun */
+ ndev->stats.rx_fifo_errors++;
+ goto rx_processing_done;
+ }
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
+ | BD_ENET_RX_LAST)) {
/* Frame too long or too short. */
ndev->stats.rx_length_errors++;
+ if (status & BD_ENET_RX_LAST)
+ netdev_err(ndev, "rcv is not +last\n");
}
- if (status & BD_ENET_RX_NO) /* Frame alignment */
- ndev->stats.rx_frame_errors++;
if (status & BD_ENET_RX_CR) /* CRC Error */
ndev->stats.rx_crc_errors++;
- if (status & BD_ENET_RX_OV) /* FIFO overrun */
- ndev->stats.rx_fifo_errors++;
- }
-
- /* Report late collisions as a frame error.
- * On this error, the BD is closed, but we don't know what we
- * have in the buffer. So, just drop this frame on the floor.
- */
- if (status & BD_ENET_RX_CL) {
- ndev->stats.rx_errors++;
- ndev->stats.rx_frame_errors++;
+ /* Report late collisions as a frame error. */
+ if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+ ndev->stats.rx_frame_errors++;
goto rx_processing_done;
}
@@ -1446,7 +1397,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len;
- index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(bdp, &rxq->bd);
skb = rxq->rx_skbuff[index];
/* The packet length includes FCS, but we don't want to
@@ -1535,7 +1486,6 @@ rx_processing_done:
/* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY;
- bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -1544,17 +1494,22 @@ rx_processing_done:
ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0;
}
+ /* Make sure the updates to rest of the descriptor are
+ * performed before transferring ownership.
+ */
+ wmb();
+ bdp->cbd_sc = cpu_to_fec16(status);
/* Update BD pointer to next entry */
- bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
+ writel(0, rxq->bd.reg_desc_active);
}
- rxq->cur_rx = bdp;
+ rxq->bd.cur = bdp;
return pkt_received;
}
@@ -1613,7 +1568,7 @@ fec_enet_interrupt(int irq, void *dev_id)
if (napi_schedule_prep(&fep->napi)) {
/* Disable the NAPI interrupts */
- writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+ writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK);
__napi_schedule(&fep->napi);
}
}
@@ -2663,8 +2618,8 @@ static void fec_enet_free_buffers(struct net_device *ndev)
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->rx_bd_base;
- for (i = 0; i < rxq->rx_ring_size; i++) {
+ bdp = rxq->bd.base;
+ for (i = 0; i < rxq->bd.ring_size; i++) {
skb = rxq->rx_skbuff[i];
rxq->rx_skbuff[i] = NULL;
if (skb) {
@@ -2674,14 +2629,14 @@ static void fec_enet_free_buffers(struct net_device *ndev)
DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
- bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
}
for (q = 0; q < fep->num_tx_queues; q++) {
txq = fep->tx_queue[q];
- bdp = txq->tx_bd_base;
- for (i = 0; i < txq->tx_ring_size; i++) {
+ bdp = txq->bd.base;
+ for (i = 0; i < txq->bd.ring_size; i++) {
kfree(txq->tx_bounce[i]);
txq->tx_bounce[i] = NULL;
skb = txq->tx_skbuff[i];
@@ -2701,7 +2656,7 @@ static void fec_enet_free_queue(struct net_device *ndev)
if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
txq = fep->tx_queue[i];
dma_free_coherent(NULL,
- txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
txq->tso_hdrs,
txq->tso_hdrs_dma);
}
@@ -2727,15 +2682,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
}
fep->tx_queue[i] = txq;
- txq->tx_ring_size = TX_RING_SIZE;
- fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+ txq->bd.ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
txq->tx_wake_threshold =
- (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+ (txq->bd.ring_size - txq->tx_stop_threshold) / 2;
txq->tso_hdrs = dma_alloc_coherent(NULL,
- txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->bd.ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma,
GFP_KERNEL);
if (!txq->tso_hdrs) {
@@ -2752,8 +2707,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
goto alloc_failed;
}
- fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
- fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
+ fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
}
return ret;
@@ -2772,8 +2727,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
struct fec_enet_priv_rx_q *rxq;
rxq = fep->rx_queue[queue];
- bdp = rxq->rx_bd_base;
- for (i = 0; i < rxq->rx_ring_size; i++) {
+ bdp = rxq->bd.base;
+ for (i = 0; i < rxq->bd.ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb)
goto err_alloc;
@@ -2791,11 +2746,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
}
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep, queue);
+ bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;
@@ -2813,8 +2768,8 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
struct fec_enet_priv_tx_q *txq;
txq = fep->tx_queue[queue];
- bdp = txq->tx_bd_base;
- for (i = 0; i < txq->tx_ring_size; i++) {
+ bdp = txq->bd.base;
+ for (i = 0; i < txq->bd.ring_size; i++) {
txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
if (!txq->tx_bounce[i])
goto err_alloc;
@@ -2827,11 +2782,11 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
}
- bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep, queue);
+ bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0;
@@ -3115,6 +3070,14 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_set_features = fec_set_features,
};
+static const unsigned short offset_des_active_rxq[] = {
+ FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+ FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
+};
+
/*
* XXX: We need to clean up on failure exits here.
*
@@ -3122,13 +3085,15 @@ static const struct net_device_ops fec_netdev_ops = {
static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_enet_priv_tx_q *txq;
- struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base;
dma_addr_t bd_dma;
int bd_size;
unsigned int i;
+ unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+ sizeof(struct bufdesc);
+ unsigned dsize_log2 = __fls(dsize);
+ WARN_ON(dsize != (1 << dsize_log2));
#if defined(CONFIG_ARM)
fep->rx_align = 0xf;
fep->tx_align = 0xf;
@@ -3139,12 +3104,7 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_alloc_queue(ndev);
- if (fep->bufdesc_ex)
- fep->bufdesc_size = sizeof(struct bufdesc_ex);
- else
- fep->bufdesc_size = sizeof(struct bufdesc);
- bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
- fep->bufdesc_size;
+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
/* Allocate memory for buffer descriptors. */
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
@@ -3162,33 +3122,35 @@ static int fec_enet_init(struct net_device *ndev)
/* Set receive and transmit descriptor base. */
for (i = 0; i < fep->num_rx_queues; i++) {
- rxq = fep->rx_queue[i];
- rxq->index = i;
- rxq->rx_bd_base = (struct bufdesc *)cbd_base;
- rxq->bd_dma = bd_dma;
- if (fep->bufdesc_ex) {
- bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
- cbd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
- } else {
- bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
- cbd_base += rxq->rx_ring_size;
- }
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+ unsigned size = dsize * rxq->bd.ring_size;
+
+ rxq->bd.qid = i;
+ rxq->bd.base = cbd_base;
+ rxq->bd.cur = cbd_base;
+ rxq->bd.dma = bd_dma;
+ rxq->bd.dsize = dsize;
+ rxq->bd.dsize_log2 = dsize_log2;
+ rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
+ bd_dma += size;
+ cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+ rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
}
for (i = 0; i < fep->num_tx_queues; i++) {
- txq = fep->tx_queue[i];
- txq->index = i;
- txq->tx_bd_base = (struct bufdesc *)cbd_base;
- txq->bd_dma = bd_dma;
- if (fep->bufdesc_ex) {
- bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
- cbd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
- } else {
- bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
- cbd_base += txq->tx_ring_size;
- }
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
+ unsigned size = dsize * txq->bd.ring_size;
+
+ txq->bd.qid = i;
+ txq->bd.base = cbd_base;
+ txq->bd.cur = cbd_base;
+ txq->bd.dma = bd_dma;
+ txq->bd.dsize = dsize;
+ txq->bd.dsize_log2 = dsize_log2;
+ txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
+ bd_dma += size;
+ cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+ txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
}
@@ -3229,6 +3191,7 @@ static int fec_enet_init(struct net_device *ndev)
static void fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
+ bool active_high = false;
int msec = 1;
struct device_node *np = pdev->dev.of_node;
@@ -3244,14 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev)
if (!gpio_is_valid(phy_reset))
return;
+ active_high = of_property_read_bool(np, "phy-reset-active-high");
+
err = devm_gpio_request_one(&pdev->dev, phy_reset,
- GPIOF_OUT_INIT_LOW, "phy-reset");
+ active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+ "phy-reset");
if (err) {
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
- gpio_set_value_cansleep(phy_reset, 1);
+ gpio_set_value_cansleep(phy_reset, !active_high);
}
#else /* CONFIG_OF */
static void fec_reset_phy(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 79a210aaf0bb..ea83712a6d62 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -35,6 +35,7 @@
#include "fman.h"
#include "fman_muram.h"
+#include <linux/fsl/guts.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -1871,6 +1872,90 @@ err_fm_state:
return -EINVAL;
}
+static int fman_reset(struct fman *fman)
+{
+ u32 count;
+ int err = 0;
+
+ if (fman->state->rev_info.major < 6) {
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0)
+ err = -EBUSY;
+
+ goto _return;
+ } else {
+ struct device_node *guts_node;
+ struct ccsr_guts __iomem *guts_regs;
+ u32 devdisr2, reg;
+
+ /* Errata A007273 */
+ guts_node =
+ of_find_compatible_node(NULL, NULL,
+ "fsl,qoriq-device-config-2.0");
+ if (!guts_node) {
+ dev_err(fman->dev, "%s: Couldn't find guts node\n",
+ __func__);
+ goto guts_node;
+ }
+
+ guts_regs = of_iomap(guts_node, 0);
+ if (!guts_regs) {
+ dev_err(fman->dev, "%s: Couldn't map %s regs\n",
+ __func__, guts_node->full_name);
+ goto guts_regs;
+ }
+#define FMAN1_ALL_MACS_MASK 0xFCC00000
+#define FMAN2_ALL_MACS_MASK 0x000FCC00
+ /* Read current state */
+ devdisr2 = ioread32be(&guts_regs->devdisr2);
+ if (fman->dts_params.id == 0)
+ reg = devdisr2 & ~FMAN1_ALL_MACS_MASK;
+ else
+ reg = devdisr2 & ~FMAN2_ALL_MACS_MASK;
+
+ /* Enable all MACs */
+ iowrite32be(reg, &guts_regs->devdisr2);
+
+ /* Perform FMan reset */
+ iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
+
+ /* Wait for reset completion */
+ count = 100;
+ do {
+ udelay(1);
+ } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
+ FPM_RSTC_FM_RESET) && --count);
+ if (count == 0) {
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+ err = -EBUSY;
+ goto _return;
+ }
+
+ /* Restore devdisr2 value */
+ iowrite32be(devdisr2, &guts_regs->devdisr2);
+
+ iounmap(guts_regs);
+ of_node_put(guts_node);
+
+ goto _return;
+
+guts_regs:
+ of_node_put(guts_node);
+guts_node:
+ dev_dbg(fman->dev, "%s: Didn't perform FManV3 reset due to Errata A007273!\n",
+ __func__);
+ }
+_return:
+ return err;
+}
+
static int fman_init(struct fman *fman)
{
struct fman_cfg *cfg = NULL;
@@ -1914,22 +1999,9 @@ static int fman_init(struct fman *fman)
fman->liodn_base[i] = liodn_base;
}
- /* FMan Reset (supported only for FMan V2) */
- if (fman->state->rev_info.major >= 6) {
- /* Errata A007273 */
- dev_dbg(fman->dev, "%s: FManV3 reset is not supported!\n",
- __func__);
- } else {
- iowrite32be(FPM_RSTC_FM_RESET, &fman->fpm_regs->fm_rstc);
- /* Wait for reset completion */
- count = 100;
- do {
- udelay(1);
- } while (((ioread32be(&fman->fpm_regs->fm_rstc)) &
- FPM_RSTC_FM_RESET) && --count);
- if (count == 0)
- return -EBUSY;
- }
+ err = fman_reset(fman);
+ if (err)
+ return err;
if (ioread32be(&fman->qmi_regs->fmqm_gs) & QMI_GS_HALT_NOT_BUSY) {
resume(fman->fpm_regs);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 7c92eb854925..c88918c4c5f3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -932,15 +932,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
- if (dtsec->fm_rev_info.major == 2)
- if (pause_time <= 320) {
+ if (pause_time) {
+ /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
+ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
pr_warn("pause-time: %d illegal.Should be > 320\n",
pause_time);
return -EINVAL;
}
- if (pause_time) {
ptv = ioread32be(&regs->ptv);
ptv &= PTV_PTE_MASK;
ptv |= pause_time & PTV_PT_MASK;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b9ecf197ad11..d2f917af539f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2324,6 +2324,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct txfcb *fcb = NULL;
struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
+ skb_frag_t *frag;
int i, rq = 0;
int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
@@ -2391,52 +2392,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbdp = txbdp_start = tx_queue->cur_tx;
lstatus = be32_to_cpu(txbdp->lstatus);
- /* Time stamp insertion requires one additional TxBD */
- if (unlikely(do_tstamp))
- txbdp_tstamp = txbdp = next_txbd(txbdp, base,
- tx_queue->tx_ring_size);
-
- if (nr_frags == 0) {
- if (unlikely(do_tstamp)) {
- u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
-
- lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
- } else {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- }
- } else {
- /* Place the fragment addresses and lengths into the TxBDs */
- for (i = 0; i < nr_frags; i++) {
- unsigned int frag_len;
- /* Point at the next BD, wrapping as needed */
- txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
-
- frag_len = skb_shinfo(skb)->frags[i].size;
-
- lstatus = be32_to_cpu(txbdp->lstatus) | frag_len |
- BD_LFLAG(TXBD_READY);
-
- /* Handle the last BD specially */
- if (i == nr_frags - 1)
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
-
- bufaddr = skb_frag_dma_map(priv->dev,
- &skb_shinfo(skb)->frags[i],
- 0,
- frag_len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
- goto dma_map_err;
-
- /* set the TxBD length and buffer pointer */
- txbdp->bufPtr = cpu_to_be32(bufaddr);
- txbdp->lstatus = cpu_to_be32(lstatus);
- }
-
- lstatus = be32_to_cpu(txbdp_start->lstatus);
- }
-
/* Add TxPAL between FCB and frame if required */
if (unlikely(do_tstamp)) {
skb_push(skb, GMAC_TXPAL_LEN);
@@ -2471,12 +2426,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (do_vlan)
gfar_tx_vlan(skb, fcb);
- /* Setup tx hardware time stamping if requested */
- if (unlikely(do_tstamp)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- fcb->ptp = 1;
- }
-
bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
@@ -2484,6 +2433,46 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbdp_start->bufPtr = cpu_to_be32(bufaddr);
+ /* Time stamp insertion requires one additional TxBD */
+ if (unlikely(do_tstamp))
+ txbdp_tstamp = txbdp = next_txbd(txbdp, base,
+ tx_queue->tx_ring_size);
+
+ if (likely(!nr_frags)) {
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ } else {
+ u32 lstatus_start = lstatus;
+
+ /* Place the fragment addresses and lengths into the TxBDs */
+ frag = &skb_shinfo(skb)->frags[0];
+ for (i = 0; i < nr_frags; i++, frag++) {
+ unsigned int size;
+
+ /* Point at the next BD, wrapping as needed */
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+
+ size = skb_frag_size(frag);
+
+ lstatus = be32_to_cpu(txbdp->lstatus) | size |
+ BD_LFLAG(TXBD_READY);
+
+ /* Handle the last BD specially */
+ if (i == nr_frags - 1)
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+
+ bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ /* set the TxBD length and buffer pointer */
+ txbdp->bufPtr = cpu_to_be32(bufaddr);
+ txbdp->lstatus = cpu_to_be32(lstatus);
+ }
+
+ lstatus = lstatus_start;
+ }
+
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
@@ -2494,12 +2483,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = be32_to_cpu(txbdp_start->bufPtr);
bufaddr += fcb_len;
+
lstatus_ts |= BD_LFLAG(TXBD_READY) |
(skb_headlen(skb) - fcb_len);
+ if (!nr_frags)
+ lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
+
+ /* Setup tx hardware time stamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ fcb->ptp = 1;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
@@ -2712,7 +2708,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb_tstamp_tx(skb, &shhwtstamps);
gfar_clear_txbd_status(bdp);
@@ -2944,7 +2940,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
/* change offset to the other half */
rxb->page_offset ^= GFAR_RXB_TRUESIZE;
- atomic_inc(&page->_count);
+ page_ref_inc(page);
return true;
}
@@ -3041,7 +3037,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
u64 *ns = (u64 *) skb->data;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
- shhwtstamps->hwtstamp = ns_to_ktime(*ns);
+ shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
}
if (priv->padding)
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b40fba929d65..57798814160d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -422,19 +422,6 @@ static struct ptp_clock_info ptp_gianfar_caps = {
.enable = ptp_gianfar_enable,
};
-/* OF device tree */
-
-static int get_of_u32(struct device_node *node, char *str, u32 *val)
-{
- int plen;
- const u32 *prop = of_get_property(node, str, &plen);
-
- if (!prop || plen != sizeof(*prop))
- return -1;
- *val = *prop;
- return 0;
-}
-
static int gianfar_ptp_probe(struct platform_device *dev)
{
struct device_node *node = dev->dev.of_node;
@@ -452,15 +439,21 @@ static int gianfar_ptp_probe(struct platform_device *dev)
etsects->caps = ptp_gianfar_caps;
- if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+ if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel))
etsects->cksel = DEFAULT_CKSEL;
- if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
- get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
- get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) ||
- get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
- get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
- get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) {
+ if (of_property_read_u32(node,
+ "fsl,tclk-period", &etsects->tclk_period) ||
+ of_property_read_u32(node,
+ "fsl,tmr-prsc", &etsects->tmr_prsc) ||
+ of_property_read_u32(node,
+ "fsl,tmr-add", &etsects->tmr_add) ||
+ of_property_read_u32(node,
+ "fsl,tmr-fiper1", &etsects->tmr_fiper1) ||
+ of_property_read_u32(node,
+ "fsl,tmr-fiper2", &etsects->tmr_fiper2) ||
+ of_property_read_u32(node,
+ "fsl,max-adj", &etsects->caps.max_adj)) {
pr_err("device tree node missing required elements\n");
goto no_node;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 1cbcb9fa3fb5..e8d36aaea223 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -147,6 +147,8 @@ enum hnae_led_state {
#define HNSV2_TXD_BUFNUM_S 0
#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
+#define HNSV2_TXD_PORTID_S 4
+#define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
#define HNSV2_TXD_RI_B 1
#define HNSV2_TXD_L4CS_B 2
#define HNSV2_TXD_L3CS_B 3
@@ -467,7 +469,7 @@ struct hnae_ae_ops {
u32 *tx_usecs, u32 *rx_usecs);
void (*get_rx_max_coalesced_frames)(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames);
- void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
@@ -516,6 +518,7 @@ struct hnae_handle {
int q_num;
int vf_id;
u32 eport_id;
+ u32 dport_id; /* v2 tx bd should fill the dport_id */
enum hnae_port_type port_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index d4f92ed322d6..a1cb461ac45f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -159,11 +159,6 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
ring_pair_cb->used_by_vf = 1;
- if (port_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
- ring_pair_cb->port_id_in_dsa = port_idx;
- else
- ring_pair_cb->port_id_in_dsa = 0;
-
ring_pair_cb++;
}
@@ -175,6 +170,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
ae_handle->phy_node = vf_cb->mac_cb->phy_node;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->dport_id = port_idx;
return ae_handle;
vf_id_err:
@@ -419,7 +415,10 @@ static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+ hns_mac_set_promisc(mac_cb, (u8)!!en);
}
static int hns_ae_get_autoneg(struct hnae_handle *handle)
@@ -449,59 +448,46 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
u32 *tx_usecs, u32 *rx_usecs)
{
- int port;
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- *tx_usecs = hns_rcb_get_coalesce_usecs(
- hns_ae_get_dsaf_dev(handle->dev),
- hns_dsaf_get_comm_idx_by_port(port));
- *rx_usecs = hns_rcb_get_coalesce_usecs(
- hns_ae_get_dsaf_dev(handle->dev),
- hns_dsaf_get_comm_idx_by_port(port));
+ *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
}
static void hns_ae_get_rx_max_coalesced_frames(struct hnae_handle *handle,
u32 *tx_frames, u32 *rx_frames)
{
- int port;
-
- assert(handle);
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- *tx_frames = hns_rcb_get_coalesced_frames(
- hns_ae_get_dsaf_dev(handle->dev), port);
- *rx_frames = hns_rcb_get_coalesced_frames(
- hns_ae_get_dsaf_dev(handle->dev), port);
+ *tx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
}
-static void hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
- u32 timeout)
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
{
- int port;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- assert(handle);
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- hns_rcb_set_coalesce_usecs(hns_ae_get_dsaf_dev(handle->dev),
- port, timeout);
+ return hns_rcb_set_coalesce_usecs(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
}
static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
u32 coalesce_frames)
{
- int port;
- int ret;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
- assert(handle);
-
- port = hns_ae_map_eport_to_dport(handle->eport_id);
-
- ret = hns_rcb_set_coalesced_frames(hns_ae_get_dsaf_dev(handle->dev),
- port, coalesce_frames);
- return ret;
+ return hns_rcb_set_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, coalesce_frames);
}
void hns_ae_update_stats(struct hnae_handle *handle,
@@ -787,7 +773,8 @@ static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
/* update the current hash->queue mappings from the shadow RSS table */
- memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE);
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
return 0;
}
@@ -799,10 +786,11 @@ static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
/* set the RSS Hash Key if specififed by the user */
if (key)
- hns_ppe_set_rss_key(ppe_cb, (int *)key);
+ hns_ppe_set_rss_key(ppe_cb, (u32 *)key);
/* update the shadow RSS table with user specified qids */
- memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE);
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
/* now update the hardware */
hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index b8517b00e706..44abb08de155 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -290,6 +290,24 @@ static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
return 0;
}
+static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
+ GMAC_UC_MATCH_EN_B, !en);
+ dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ GMAC_ADDR_EN_B, !en);
+}
+
+static void hns_gmac_set_promisc(void *mac_drv, u8 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, en);
+}
+
static void hns_gmac_init(void *mac_drv)
{
u32 port;
@@ -305,6 +323,8 @@ static void hns_gmac_init(void *mac_drv)
mdelay(10);
hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
hns_gmac_tx_loop_pkt_dis(mac_drv);
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, 0);
}
void hns_gmac_update_stats(void *mac_drv)
@@ -402,14 +422,17 @@ static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
- if (drv->mac_id >= DSAF_SERVICE_NW_NUM) {
- u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
- u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
- | (mac_addr[3] << 16) | (mac_addr[2] << 24);
- dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
- dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG, high_val);
- }
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+
+ u32 val = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ u32 sta_addr_en = dsaf_get_bit(val, GMAC_ADDR_EN_B);
+
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ high_val | (sta_addr_en << GMAC_ADDR_EN_B));
}
static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
@@ -641,7 +664,8 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
return;
for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
- snprintf(buff, ETH_GSTRING_LEN, g_gmac_stats_string[i].desc);
+ snprintf(buff, ETH_GSTRING_LEN, "%s",
+ g_gmac_stats_string[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
}
@@ -699,6 +723,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
mac_drv->get_sset_count = hns_gmac_get_sset_count;
mac_drv->get_strings = hns_gmac_get_strings;
mac_drv->update_stats = hns_gmac_update_stats;
+ mac_drv->set_promiscuous = hns_gmac_set_promisc;
return (void *)mac_drv;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 5ef0e96e918a..a38084a22bf2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -467,8 +467,13 @@ int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu)
struct mac_driver *drv = hns_mac_get_drv(mac_cb);
u32 buf_size = mac_cb->dsaf_dev->buf_size;
u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ u32 max_frm = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver) ?
+ MAC_MAX_MTU : MAC_MAX_MTU_V2;
- if ((new_mtu < MAC_MIN_MTU) || (new_frm > MAC_MAX_MTU) ||
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ max_frm = MAC_MAX_MTU_DBG;
+
+ if ((new_mtu < MAC_MIN_MTU) || (new_frm > max_frm) ||
(new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size))
return -EINVAL;
@@ -861,6 +866,14 @@ int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
return mac_ctrl_drv->get_sset_count(stringset);
}
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->set_promiscuous)
+ mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
+}
+
int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 0b052191d751..823b6e78c8aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -26,7 +26,9 @@ struct dsaf_device;
#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
#define MAC_MAX_MTU 9600
+#define MAC_MAX_MTU_V2 9728
#define MAC_MIN_MTU 68
+#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
#define MAC_DEFAULT_PAUSE_TIME 0xff
@@ -365,7 +367,7 @@ struct mac_driver {
/*config rx pause enable*/
void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
/* config rx mode for promiscuous*/
- int (*set_promiscuous)(void *mac_drv, u8 enable);
+ void (*set_promiscuous)(void *mac_drv, u8 enable);
/* get mac id */
void (*mac_get_id)(void *mac_drv, u8 *mac_id);
void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
@@ -453,4 +455,6 @@ int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 38fc5be3870c..5978a5c8ef35 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -748,8 +748,9 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
*/
static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
{
- dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
- DSAF_FC_XGE_TX_PAUSE_S, 1);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
}
/* set msk for dsaf exception irq*/
@@ -2218,17 +2219,17 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
/* dsaf onode registers */
for (i = 0; i < DSAF_XOD_NUM; i++) {
p[311 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
p[319 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
p[327 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
p[335 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
p[343 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
p[351 + i] = dsaf_read_dev(ddev,
- DSAF_XOD_ETS_TOKEN_CFG_0_REG + j * 0x90);
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
}
p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 607c3be42241..e69b02287c44 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -244,31 +244,35 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
*/
phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
- u32 hilink3_mode;
- u32 hilink4_mode;
+ u32 mode;
+ u32 reg;
+ u32 shift;
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
- int dev_id = mac_cb->mac_id;
+ int mac_id = mac_cb->mac_id;
phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
- hilink3_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK3_REG);
- hilink4_mode = dsaf_read_reg(sys_ctl_vaddr, HNS_MAC_HILINK4_REG);
- if (dev_id >= 0 && dev_id <= 3) {
- if (hilink4_mode == 0)
- phy_if = PHY_INTERFACE_MODE_SGMII;
- else
+ if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ } else if (mac_id >= 0 && mac_id <= 3) {
+ reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
+ mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+ /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
+ shift = is_ver1 ? 0 : mac_id;
+ if (dsaf_get_bit(mode, shift))
phy_if = PHY_INTERFACE_MODE_XGMII;
- } else if (dev_id >= 4 && dev_id <= 5) {
- if (hilink3_mode == 0)
- phy_if = PHY_INTERFACE_MODE_SGMII;
else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+ } else if (mac_id >= 4 && mac_id <= 7) {
+ reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
+ mode = dsaf_read_reg(sys_ctl_vaddr, reg);
+ /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
+ shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
+ if (dsaf_get_bit(mode, shift))
phy_if = PHY_INTERFACE_MODE_XGMII;
- } else {
- phy_if = PHY_INTERFACE_MODE_SGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
}
-
- dev_dbg(mac_cb->dev,
- "hilink3_mode=%d, hilink4_mode=%d dev_id=%d, phy_if=%d\n",
- hilink3_mode, hilink4_mode, dev_id, phy_if);
return phy_if;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index f302ef9073c6..5b7ae5ff43e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -27,7 +27,7 @@ void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value)
void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
{
- int key_item = 0;
+ u32 key_item;
for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
@@ -343,6 +343,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
hns_ppe_set_vlan_strip(ppe_cb, 0);
+ dsaf_write_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG,
+ HNS_PPEV2_MAX_FRAME_LEN);
+
/* set default RSS key in h/w */
hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 0f5cb6962acf..e9c0ec2fa0dd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -30,6 +30,8 @@
#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */
#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32))
+#define HNS_PPEV2_MAX_FRAME_LEN 0X980
+
enum ppe_qid_mode {
PPE_QID_MODE0 = 0, /* fixed queue id mode */
PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 12188807468c..28ee26e5c478 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -215,9 +215,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
bd_size_type);
dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
} else {
dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
(u32)dma);
@@ -227,9 +227,9 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
bd_size_type);
dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
- ring_pair->port_id_in_dsa);
+ ring_pair->port_id_in_comm);
}
}
@@ -256,50 +256,16 @@ static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
desc_cnt);
}
-/**
- *hns_rcb_set_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- *@coalesced_frames:BD num for coalesced frames
- */
-static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common,
- u32 port_idx,
- u32 coalesced_frames)
-{
- if (coalesced_frames >= rcb_common->desc_num ||
- coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES)
- return -EINVAL;
-
- dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
- coalesced_frames);
- return 0;
-}
-
-/**
- *hns_rcb_get_port_coalesced_frames - set rcb port coalesced frames
- *@rcb_common: rcb_common device
- *@port_idx:port index
- * return coaleseced frames value
- */
-static u32 hns_rcb_get_port_coalesced_frames(struct rcb_common_cb *rcb_common,
- u32 port_idx)
+static void hns_rcb_set_port_timeout(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM)
- port_idx = 0;
-
- return dsaf_read_dev(rcb_common,
- RCB_CFG_PKTLINE_REG + port_idx * 4);
-}
-
-/**
- *hns_rcb_set_timeout - set rcb port coalesced time_out
- *@rcb_common: rcb_common device
- *@time_out:time for coalesced time_out
- */
-static void hns_rcb_set_timeout(struct rcb_common_cb *rcb_common,
- u32 timeout)
-{
- dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG, timeout);
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+ timeout * HNS_RCB_CLK_FREQ_MHZ);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
}
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
@@ -361,10 +327,11 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
for (i = 0; i < port_num; i++) {
hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
- (void)hns_rcb_set_port_coalesced_frames(
- rcb_common, i, rcb_common->coalesced_frames);
+ (void)hns_rcb_set_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_FRAMES);
+ hns_rcb_set_port_timeout(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
}
- hns_rcb_set_timeout(rcb_common, rcb_common->timeout);
dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
HNS_RCB_COMMON_ENDIAN);
@@ -460,7 +427,8 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
}
-static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
+static int hns_rcb_get_port_in_comm(
+ struct rcb_common_cb *rcb_common, int ring_idx)
{
int comm_index = rcb_common->comm_index;
int port;
@@ -470,7 +438,7 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx)
q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
port = ring_idx / q_num;
} else {
- port = HNS_RCB_SERVICE_NW_ENGINE_NUM + comm_index - 1;
+ port = 0; /* config debug-ports port_id_in_comm to 0*/
}
return port;
@@ -518,7 +486,8 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->index = i;
ring_pair_cb->q.io_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
- ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i);
+ ring_pair_cb->port_id_in_comm =
+ hns_rcb_get_port_in_comm(rcb_common, i);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
@@ -534,82 +503,95 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
/**
*hns_rcb_get_coalesced_frames - get rcb port coalesced frames
*@rcb_common: rcb_common device
- *@comm_index:port index
- *return coalesced_frames
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
*/
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int port)
+u32 hns_rcb_get_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
- return hns_rcb_get_port_coalesced_frames(rcb_comm, port);
+ return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
}
/**
*hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
*@rcb_common: rcb_common device
- *@comm_index:port index
- *return time_out
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
*/
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index)
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
{
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
-
- return rcb_comm->timeout;
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+ HNS_RCB_CLK_FREQ_MHZ;
+ else
+ return dsaf_read_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
}
/**
*hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
*@rcb_common: rcb_common device
- *@comm_index: comm :index
- *@etx_usecs:tx time for coalesced time_out
- *@rx_usecs:rx time for coalesced time_out
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
*/
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
- int port, u32 timeout)
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
+ u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
- if (rcb_comm->timeout == timeout)
- return;
+ if (timeout == old_timeout)
+ return 0;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- dev_err(dsaf_dev->dev,
- "error: not support coalesce_usecs setting!\n");
- return;
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return -EINVAL;
+ }
}
- rcb_comm->timeout = timeout;
- hns_rcb_set_timeout(rcb_comm, rcb_comm->timeout);
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce %dus!\n", timeout);
+ return -EINVAL;
+ }
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
}
/**
*hns_rcb_set_coalesced_frames - set rcb coalesced frames
*@rcb_common: rcb_common device
- *@tx_frames:tx BD num for coalesced frames
- *@rx_frames:rx BD num for coalesced frames
- *Return 0 on success, negative on failure
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
*/
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
- int port, u32 coalesced_frames)
+int hns_rcb_set_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
{
- int comm_index = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[comm_index];
- u32 coalesced_reg_val;
- int ret;
+ u32 old_waterline = hns_rcb_get_coalesced_frames(rcb_common, port_idx);
- coalesced_reg_val = hns_rcb_get_port_coalesced_frames(rcb_comm, port);
-
- if (coalesced_reg_val == coalesced_frames)
+ if (coalesced_frames == old_waterline)
return 0;
- if (coalesced_frames >= HNS_RCB_MIN_COALESCED_FRAMES) {
- ret = hns_rcb_set_port_coalesced_frames(rcb_comm, port,
- coalesced_frames);
- return ret;
- } else {
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+ coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_frames setting!\n");
return -EINVAL;
}
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
}
/**
@@ -749,8 +731,6 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
rcb_common->dsaf_dev = dsaf_dev;
rcb_common->desc_num = dsaf_dev->desc_num;
- rcb_common->coalesced_frames = HNS_RCB_DEF_COALESCED_FRAMES;
- rcb_common->timeout = HNS_RCB_MAX_TIME_OUT;
hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
rcb_common->max_vfn = max_vfn;
@@ -951,6 +931,10 @@ void hns_rcb_get_strings(int stringset, u8 *data, int index)
void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
{
u32 *regs = data;
+ bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+ bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+ u32 reg_tmp;
+ u32 reg_num_tmp;
u32 i = 0;
/*rcb common registers */
@@ -1004,12 +988,16 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
= dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
}
- regs[70] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_REG);
- regs[71] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
- regs[72] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+ reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+ reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+ for (i = 0; i < reg_num_tmp; i++)
+ regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+ regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
/* mark end of rcb common regs */
- for (i = 73; i < 80; i++)
+ for (i = 78; i < 80; i++)
regs[i] = 0xcccccccc;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index 81fe9f849973..eb61014ad615 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -38,7 +38,9 @@ struct rcb_common_cb;
#define HNS_RCB_MAX_COALESCED_FRAMES 1023
#define HNS_RCB_MIN_COALESCED_FRAMES 1
#define HNS_RCB_DEF_COALESCED_FRAMES 50
-#define HNS_RCB_MAX_TIME_OUT 0x500
+#define HNS_RCB_CLK_FREQ_MHZ 350
+#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS 3
#define HNS_RCB_COMMON_ENDIAN 1
@@ -82,7 +84,7 @@ struct ring_pair_cb {
int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
- u8 port_id_in_dsa;
+ u8 port_id_in_comm;
u8 used_by_vf;
struct hns_ring_hw_stats hw_stats;
@@ -97,8 +99,6 @@ struct rcb_common_cb {
u8 comm_index;
u32 ring_num;
- u32 coalesced_frames; /* frames threshold of rx interrupt */
- u32 timeout; /* time threshold of rx interrupt */
u32 desc_num; /* desc num per queue*/
struct ring_pair_cb ring_pair_cb[0];
@@ -125,13 +125,14 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
void hns_rcb_init_hw(struct ring_pair_cb *ring);
void hns_rcb_reset_ring_hw(struct hnae_queue *q);
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
-
-u32 hns_rcb_get_coalesced_frames(struct dsaf_device *dsaf_dev, int comm_index);
-u32 hns_rcb_get_coalesce_usecs(struct dsaf_device *dsaf_dev, int comm_index);
-void hns_rcb_set_coalesce_usecs(struct dsaf_device *dsaf_dev,
- int comm_index, u32 timeout);
-int hns_rcb_set_coalesced_frames(struct dsaf_device *dsaf_dev,
- int comm_index, u32 coalesce_frames);
+u32 hns_rcb_get_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
void hns_rcb_update_stats(struct hnae_queue *queue);
void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 60d695daa471..7d7204f45e78 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -103,6 +103,8 @@
/*serdes offset**/
#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
@@ -404,6 +406,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
@@ -922,6 +925,8 @@
#define GMAC_LP_REG_CF2MI_LP_EN_B 2
#define GMAC_MODE_CHANGE_EB_B 0
+#define GMAC_UC_MATCH_EN_B 0
+#define GMAC_ADDR_EN_B 16
#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 802d55457f19..fd90f3737963 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -7,7 +7,7 @@
* (at your option) any later version.
*/
-#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/of_mdio.h>
#include "hns_dsaf_main.h"
#include "hns_dsaf_mac.h"
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 3f77ff77abbc..687204b780b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -48,7 +48,6 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
struct iphdr *iphdr;
struct ipv6hdr *ipv6hdr;
struct sk_buff *skb;
- int skb_tmp_len;
__be16 protocol;
u8 bn_pid = 0;
u8 rrcfv = 0;
@@ -66,10 +65,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size);
- /*config bd buffer end */
+ /* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
+ /* fill port_id in the tx bd for sending management pkts */
+ hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
+ HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
+
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
@@ -90,13 +93,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
- if (iphdr->protocol == IPPROTO_TCP) {
+ if (iphdr->protocol == IPPROTO_TCP &&
+ skb_is_gso(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
- skb_tmp_len = SKB_TMP_LEN(skb);
l4_len = tcp_hdrlen(skb);
- mss = mtu - skb_tmp_len - ETH_FCS_LEN;
- paylen = skb->len - skb_tmp_len;
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -104,13 +107,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
/* check for tcp/udp header */
- if (ipv6hdr->nexthdr == IPPROTO_TCP) {
+ if (ipv6hdr->nexthdr == IPPROTO_TCP &&
+ skb_is_gso(skb) && skb_is_gso_v6(skb)) {
hnae_set_bit(tvsvsn,
HNSV2_TXD_TSE_B, 1);
- skb_tmp_len = SKB_TMP_LEN(skb);
l4_len = tcp_hdrlen(skb);
- mss = mtu - skb_tmp_len - ETH_FCS_LEN;
- paylen = skb->len - skb_tmp_len;
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
}
}
desc->tx.ip_offset = ip_offset;
@@ -564,6 +567,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
+ struct ethhdr *eh;
unsigned char *va;
int bnum, length, i;
int pull_len;
@@ -670,6 +674,14 @@ out_bnum_err:
return -EFAULT;
}
+ /* filter out multicast pkt with the same src mac as this port */
+ eh = eth_hdr(skb);
+ if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
+ ether_addr_equal(ndev->dev_addr, eh->h_source))) {
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
@@ -901,10 +913,7 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int head = ring->next_to_clean;
-
- /* for hardware bug fixed */
- head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -947,8 +956,8 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring_data->ring, 0);
-
- ring_data->fini_process(ring_data);
+ if (ring_data->fini_process)
+ ring_data->fini_process(ring_data);
return 0;
}
@@ -1711,6 +1720,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
struct hns_nic_ring_data *rd;
+ bool is_ver1 = AE_IS_VER1(priv->enet_ver);
int i;
if (h->q_num > NIC_MAX_Q_PER_VF) {
@@ -1728,7 +1738,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
- rd->fini_process = hns_nic_tx_fini_pro;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1740,7 +1750,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
- rd->fini_process = hns_nic_rx_fini_pro;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
@@ -1804,7 +1814,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
h = hnae_get_handle(&priv->netdev->dev,
priv->ae_node, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
- ret = PTR_ERR(h);
+ ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
goto out;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3c4a3bc31a89..3d746c887873 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -794,8 +794,10 @@ static int hns_set_coalesce(struct net_device *net_dev,
(!ops->set_coalesce_frames))
return -ESRCH;
- ops->set_coalesce_usecs(priv->ae_handle,
- ec->rx_coalesce_usecs);
+ ret = ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+ if (ret)
+ return ret;
ret = ops->set_coalesce_frames(
priv->ae_handle,
@@ -1013,8 +1015,8 @@ int hns_phy_led_set(struct net_device *netdev, int value)
struct phy_device *phy_dev = priv->phy;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
- retval = phy_write(phy_dev, HNS_LED_FC_REG, value);
- retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+ retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
if (retval) {
netdev_err(netdev, "mdiobus_write fail !\n");
return retval;
@@ -1173,18 +1175,15 @@ hns_get_rss_key_size(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- u32 ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
"RSS feature is not supported on this hardware\n");
- return -EOPNOTSUPP;
+ return 0;
}
ops = priv->ae_handle->dev->ops;
- ret = ops->get_rss_key_size(priv->ae_handle);
-
- return ret;
+ return ops->get_rss_key_size(priv->ae_handle);
}
static u32
@@ -1192,18 +1191,15 @@ hns_get_rss_indir_size(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- u32 ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
"RSS feature is not supported on this hardware\n");
- return -EOPNOTSUPP;
+ return 0;
}
ops = priv->ae_handle->dev->ops;
- ret = ops->get_rss_indir_size(priv->ae_handle);
-
- return ret;
+ return ops->get_rss_indir_size(priv->ae_handle);
}
static int
@@ -1211,7 +1207,6 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- int ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
@@ -1224,9 +1219,7 @@ hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (!indir)
return 0;
- ret = ops->get_rss(priv->ae_handle, indir, key, hfunc);
-
- return 0;
+ return ops->get_rss(priv->ae_handle, indir, key, hfunc);
}
static int
@@ -1235,7 +1228,6 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_ae_ops *ops;
- int ret;
if (AE_IS_VER1(priv->enet_ver)) {
netdev_err(netdev,
@@ -1252,7 +1244,22 @@ hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
if (!indir)
return 0;
- ret = ops->set_rss(priv->ae_handle, indir, key, hfunc);
+ return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int hns_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->ae_handle->q_num;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -1280,6 +1287,7 @@ static struct ethtool_ops hns_ethtool_ops = {
.get_rxfh_indir_size = hns_get_rss_indir_size,
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
+ .get_rxnfc = hns_get_rxnfc,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index fa593dd3efe1..3772f3ac956e 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -83,6 +83,15 @@ config E1000E
To compile this driver as a module, choose M here. The module
will be called e1000e.
+config E1000E_HWTS
+ bool "Support HW cross-timestamp on PCH devices"
+ default y
+ depends on E1000E && X86
+ ---help---
+ Say Y to enable hardware supported cross-timestamping on PCH
+ devices. The cross-timestamp is available through the PTP clock
+ driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE).
+
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3fc7bde699ba..ae90d4f12b70 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
return __e1000_maybe_stop_tx(netdev, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
+#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
nr_frags, mss);
if (count) {
+ /* The descriptors needed is higher than other Intel drivers
+ * due to a number of workarounds. The breakdown is below:
+ * Data descriptors: MAX_SKB_FRAGS + 1
+ * Context Descriptor: 1
+ * Keep head from touching tail: 2
+ * Workarounds: 3
+ */
+ int desc_needed = MAX_SKB_FRAGS + 7;
+
netdev_sent_queue(netdev, skb->len);
skb_tx_timestamp(skb);
e1000_tx_queue(adapter, tx_ring, tx_flags, count);
+
+ /* 82544 potentially requires twice as many data descriptors
+ * in order to guarantee buffers don't end on evenly-aligned
+ * dwords
+ */
+ if (adapter->pcix_82544)
+ desc_needed += MAX_SKB_FRAGS + 1;
+
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+ e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index f7c7804d79e5..0641c0098738 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -528,6 +528,11 @@
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b3949d5bef5c..4e733bf1a38e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -92,6 +92,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */
#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */
#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7
+#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
+#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
+#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
#define E1000_REVISION_4 4
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a049e30639a1..c0f4887ea44d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1252,9 +1252,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 10) {
+ if (i++ == 30) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1328,6 +1328,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
I218_ULP_CONFIG1_RESET_TO_SMBUS |
I218_ULP_CONFIG1_WOL_HOST |
I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
+ I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
I218_ULP_CONFIG1_DISABLE_SMB_PERST);
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
@@ -1433,6 +1435,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
emi_addr = I217_RX_CONFIG;
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+ if (hw->mac.type == e1000_pch_lpt ||
+ hw->mac.type == e1000_pch_spt) {
+ u16 phy_reg;
+
+ e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg);
+ phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
+ if (speed == SPEED_100 || speed == SPEED_10)
+ phy_reg |= 0x3E8;
+ else
+ phy_reg |= 0xFA;
+ e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+ }
hw->phy.ops.release(hw);
if (ret_val)
@@ -1467,6 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
if (ret_val)
return ret_val;
+ } else {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = e1e_wphy_locked(hw,
+ PHY_REG(776, 20),
+ 0xC023);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
}
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 34c551e322eb..2311f6003f58 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -188,6 +188,10 @@
#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+/* enable ULP even if when phy powered down via lanphypc */
+#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400
+/* disable clear of sticky ULP on PERST */
+#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800
#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
/* SMBus Address Phy Register */
@@ -226,6 +230,9 @@
#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
#define HV_PM_CTRL_K1_ENABLE 0x4000
+#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c71ba1bfc1ec..9b4ec13d9161 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7452,6 +7452,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 25a0ad5102d6..e2ff3ef75d5d 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -26,6 +26,12 @@
#include "e1000.h"
+#ifdef CONFIG_E1000E_HWTS
+#include <linux/clocksource.h>
+#include <linux/ktime.h>
+#include <asm/tsc.h>
+#endif
+
/**
* e1000e_phc_adjfreq - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
@@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
+#ifdef CONFIG_E1000E_HWTS
+#define MAX_HW_WAIT_COUNT (3)
+
+/**
+ * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers
+ * @device: current device time
+ * @system: system counter value read synchronously with device time
+ * @ctx: context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ **/
+static int e1000e_phc_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct e1000_adapter *adapter = (struct e1000_adapter *)ctx;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ int i;
+ u32 tsync_ctrl;
+ cycle_t dev_cycles;
+ cycle_t sys_cycles;
+
+ tsync_ctrl = er32(TSYNCTXCTL);
+ tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
+ E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK;
+ ew32(TSYNCTXCTL, tsync_ctrl);
+ for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) {
+ udelay(1);
+ tsync_ctrl = er32(TSYNCTXCTL);
+ if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP)
+ break;
+ }
+
+ if (i == MAX_HW_WAIT_COUNT)
+ return -ETIMEDOUT;
+
+ dev_cycles = er32(SYSSTMPH);
+ dev_cycles <<= 32;
+ dev_cycles |= er32(SYSSTMPL);
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles));
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ sys_cycles = er32(PLTSTMPH);
+ sys_cycles <<= 32;
+ sys_cycles |= er32(PLTSTMPL);
+ *system = convert_art_to_tsc(sys_cycles);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_getsynctime - Reads the current system/device cross timestamp
+ * @ptp: ptp clock structure
+ * @cts: structure containing timestamp
+ *
+ * Read device and system (ART) clock simultaneously and return the scaled
+ * clock values in ns.
+ **/
+static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+
+ return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime,
+ adapter, NULL, xtstamp);
+}
+#endif/*CONFIG_E1000E_HWTS*/
+
/**
* e1000e_phc_gettime - Reads the current time from the hardware clock
* @ptp: ptp clock structure
@@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
break;
}
+#ifdef CONFIG_E1000E_HWTS
+ /* CPU must have ART and GBe must be from Sunrise Point or greater */
+ if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART))
+ adapter->ptp_clock_info.getcrosststamp =
+ e1000e_phc_getcrosststamp;
+#endif/*CONFIG_E1000E_HWTS*/
+
INIT_DELAYED_WORK(&adapter->systim_overflow_work,
e1000e_systim_overflow_work);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 1d5e0b77062a..0cb4d365e5ad 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -245,6 +245,10 @@
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
+#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
+#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
+#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index b243c3cbe68f..4de17db3808c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -243,7 +243,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
- atomic_inc(&page->_count);
+ page_ref_inc(page);
return true;
}
@@ -1937,8 +1937,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
u32 reta, base;
- /* If the netdev is initialized we have to maintain table if possible */
- if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
+ /* If the Rx flow indirection table has been configured manually, we
+ * need to maintain it when possible.
+ */
+ if (netif_is_rxfh_configured(interface->netdev)) {
for (i = FM10K_RETA_SIZE; i--;) {
reta = interface->reta[i];
if ((((reta << 24) >> 24) < rss_i) &&
@@ -1946,6 +1948,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
(((reta << 8) >> 24) < rss_i) &&
(((reta) >> 24) < rss_i))
continue;
+
+ /* this should never happen */
+ dev_err(&interface->pdev->dev,
+ "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
goto repopulate_reta;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 662569d5b7c0..d09a8dd71fc2 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1204,6 +1204,15 @@ err_queueing_scheme:
return err;
}
+static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return fm10k_setup_tc(dev, tc->tc);
+}
+
static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -1386,7 +1395,7 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
.ndo_set_rx_mode = fm10k_set_rx_mode,
.ndo_get_stats64 = fm10k_get_stats64,
- .ndo_setup_tc = fm10k_setup_tc,
+ .ndo_setup_tc = __fm10k_setup_tc,
.ndo_set_vf_mac = fm10k_ndo_set_vf_mac,
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index b4729ba57c9c..3b3c63e54ed6 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
+ i40e_client.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 68f2204ec6f3..1ce6e9c0427d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -58,15 +58,13 @@
#ifdef I40E_FCOE
#include "i40e_fcoe.h"
#endif
+#include "i40e_client.h"
#include "i40e_virtchnl.h"
#include "i40e_virtchnl_pf.h"
#include "i40e_txrx.h"
#include "i40e_dcb.h"
/* Useful i40e defaults */
-#define I40E_BASE_PF_SEID 16
-#define I40E_BASE_VSI_SEID 512
-#define I40E_BASE_VEB_SEID 288
#define I40E_MAX_VEB 16
#define I40E_MAX_NUM_DESCRIPTORS 4096
@@ -104,6 +102,7 @@
#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
#define I40E_PRIV_FLAGS_PS BIT(4)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -113,6 +112,7 @@
#define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24
+#define I40E_PHY_DEBUG_PORT BIT(4)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -137,6 +137,19 @@
/* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+/**
+ * i40e_is_mac_710 - Return true if MAC is X710/XL710
+ * @hw: ptr to the hardware info
+ **/
+static inline bool i40e_is_mac_710(struct i40e_hw *hw)
+{
+ if ((hw->mac.type == I40E_MAC_X710) ||
+ (hw->mac.type == I40E_MAC_XL710))
+ return true;
+
+ return false;
+}
+
/* driver state flags */
enum i40e_state_t {
__I40E_TESTING,
@@ -178,6 +191,7 @@ struct i40e_lump_tracking {
u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
+#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
};
#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
@@ -270,6 +284,8 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
+ int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
u16 alloc_rss_size; /* allocated RSS queues */
u16 rss_size_max; /* HW defined max RSS queues */
@@ -317,6 +333,7 @@ struct i40e_pf {
#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
#define I40E_FLAG_PROCESS_MDD_EVENT BIT_ULL(17)
#define I40E_FLAG_PROCESS_VFLR_EVENT BIT_ULL(18)
#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
@@ -339,6 +356,12 @@ struct i40e_pf {
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
#define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41)
#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42)
+#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43)
+#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44)
+#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
+#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
+#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
+#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
/* tracks features that get auto disabled by errors */
@@ -391,6 +414,7 @@ struct i40e_pf {
struct i40e_vf *vf;
int num_alloc_vfs; /* actual number of VFs allocated */
u32 vf_aq_requests;
+ u32 arq_overflows; /* Not fatal, possibly indicative of problems */
/* DCBx/DCBNL capability for PF that indicates
* whether DCBx is managed by firmware or host
@@ -423,6 +447,7 @@ struct i40e_pf {
u32 ioremap_len;
u32 fd_inv;
+ u16 phy_led_val;
};
struct i40e_mac_filter {
@@ -492,6 +517,7 @@ struct i40e_vsi {
u32 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_lost_interrupt;
u32 rx_buf_failed;
u32 rx_page_failed;
@@ -500,13 +526,6 @@ struct i40e_vsi {
struct i40e_ring **tx_rings;
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
u16 int_rate_limit; /* value in usecs */
u16 rss_table_size; /* HW RSS table size */
@@ -557,6 +576,8 @@ struct i40e_vsi {
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
+ void *priv; /* client driver data reference. */
+
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -714,6 +735,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_vsi_context *ctxt,
u8 enabled_tc, bool is_add);
#endif
+void i40e_service_event_schedule(struct i40e_pf *pf);
+void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
+ u8 *msg, u16 len);
+
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -736,6 +761,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
+/* needed by client drivers */
+int i40e_lan_add_device(struct i40e_pf *pf);
+int i40e_lan_del_device(struct i40e_pf *pf);
+void i40e_client_subtask(struct i40e_pf *pf);
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+ enum i40e_client_type type);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
@@ -747,6 +783,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
struct i40e_hw *hw = &pf->hw;
u32 val;
+ /* definitely clear the PBA here, as this function is meant to
+ * clean out all previous interrupts AND enable the interrupt
+ */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
@@ -754,9 +793,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
/* skip the flush */
}
-void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE
struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev,
@@ -786,7 +824,8 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev);
-int i40e_setup_tc(struct net_device *netdev, u8 tc);
+int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
int i40e_fcoe_enable(struct net_device *netdev);
int i40e_fcoe_disable(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 1fd5ea82a9bc..df8e2fd6a649 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -953,6 +953,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
u16 flags;
u16 ntu;
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
@@ -1020,14 +1023,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
-clean_arq_element_out:
- /* Set pending if needed, unlock and return */
- if (pending != NULL)
- *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
-
-clean_arq_element_err:
- mutex_unlock(&hw->aq.arq_mutex);
-
if (i40e_is_nvm_update_op(&e->desc)) {
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
@@ -1048,6 +1043,13 @@ clean_arq_element_err:
}
}
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ mutex_unlock(&hw->aq.arq_mutex);
+
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b22012a446a6..8d5c65ab6267 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0004
+#define I40E_FW_API_VERSION_MINOR 0x0005
struct i40e_aq_desc {
__le16 flags;
@@ -145,6 +145,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_statistics = 0x0202,
i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211,
@@ -220,6 +223,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -228,6 +232,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -402,6 +407,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
#define I40E_AQ_CAP_ID_SRIOV 0x0012
#define I40E_AQ_CAP_ID_VF 0x0013
#define I40E_AQ_CAP_ID_VMDQ 0x0014
@@ -422,6 +428,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_LED 0x0061
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -680,6 +687,31 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+ __le16 flags;
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+ __le16 valid_flags;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -906,7 +938,8 @@ struct i40e_aqc_add_veb {
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
u8 enable_tcs;
u8 reserved[9];
};
@@ -973,6 +1006,7 @@ struct i40e_aqc_add_macvlan_element_data {
#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
__le16 queue_number;
#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
@@ -1069,6 +1103,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
@@ -1257,10 +1292,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
__le32 tenant_id;
u8 reserved[4];
@@ -1755,7 +1796,12 @@ struct i40e_aqc_get_link_status {
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+ u8 external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1 0x00
+#define I40E_AQ_LINK_POWER_CLASS_2 0x01
+#define I40E_AQ_LINK_POWER_CLASS_3 0x02
+#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1823,6 +1869,18 @@ enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1912,6 +1970,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer {
I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+ u8 sensor_action;
+#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2191,6 +2265,7 @@ struct i40e_aqc_add_udp_tunnel {
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
u8 reserved1[10];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
new file mode 100644
index 000000000000..0e6ac841321c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -0,0 +1,1012 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40e.h"
+#include "i40e_prototype.h"
+#include "i40e_client.h"
+
+static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
+
+static LIST_HEAD(i40e_devices);
+static DEFINE_MUTEX(i40e_device_mutex);
+
+static LIST_HEAD(i40e_clients);
+static DEFINE_MUTEX(i40e_client_mutex);
+
+static LIST_HEAD(i40e_client_instances);
+static DEFINE_MUTEX(i40e_client_instance_mutex);
+
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info);
+
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level);
+
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+
+static struct i40e_ops i40e_lan_ops = {
+ .virtchnl_send = i40e_client_virtchnl_send,
+ .setup_qvlist = i40e_client_setup_qvlist,
+ .request_reset = i40e_client_request_reset,
+ .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
+};
+
+/**
+ * i40e_client_type_to_vsi_type - convert client type to vsi type
+ * @client_type: the i40e_client type
+ *
+ * returns the related vsi type value
+ **/
+static
+enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
+{
+ switch (type) {
+ case I40E_CLIENT_IWARP:
+ return I40E_VSI_IWARP;
+
+ case I40E_CLIENT_VMDQ2:
+ return I40E_VSI_VMDQ2;
+
+ default:
+ pr_err("i40e: Client type unknown\n");
+ return I40E_VSI_TYPE_UNKNOWN;
+ }
+}
+
+/**
+ * i40e_client_get_params - Get the params that can change at runtime
+ * @vsi: the VSI with the message
+ * @param: clinet param struct
+ *
+ **/
+static
+int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
+ int i = 0;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u8 tc = dcb_cfg->etscfg.prioritytable[i];
+ u16 qs_handle;
+
+ /* If TC is not enabled for VSI use TC0 for UP */
+ if (!(vsi->tc_config.enabled_tc & BIT(tc)))
+ tc = 0;
+
+ qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
+ params->qos.prio_qos[i].tc = tc;
+ params->qos.prio_qos[i].qs_handle = qs_handle;
+ if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
+ dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
+ tc, vsi->id);
+ return -EINVAL;
+ }
+ }
+
+ params->mtu = vsi->netdev->mtu;
+ return 0;
+}
+
+/**
+ * i40e_notify_client_of_vf_msg - call the client vf message callback
+ * @vsi: the VSI with the message
+ * @vf_id: the absolute VF id that sent the message
+ * @msg: message buffer
+ * @len: length of the message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void
+i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == vsi->back) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->virtchnl_receive) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance virtual channel receive routine\n");
+ continue;
+ }
+ cdev->client->ops->virtchnl_receive(&cdev->lan_info,
+ cdev->client,
+ vf_id, msg, len);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_l2_param_changes - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cdev;
+ struct i40e_params params;
+
+ if (!vsi)
+ return;
+ memset(&params, 0, sizeof(params));
+ i40e_client_get_params(vsi, &params);
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == vsi->back) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->l2_param_change) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance l2_param_change routine\n");
+ continue;
+ }
+ cdev->lan_info.params = params;
+ cdev->client->ops->l2_param_change(&cdev->lan_info,
+ cdev->client,
+ &params);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_netdev_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.netdev == vsi->netdev) {
+ if (!cdev->client ||
+ !cdev->client->ops || !cdev->client->ops->open) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance open routine\n");
+ continue;
+ }
+ cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_client_release_qvlist
+ * @ldev: pointer to L2 context.
+ *
+ **/
+static void i40e_client_release_qvlist(struct i40e_info *ldev)
+{
+ struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
+ u32 i;
+
+ if (!ldev->qvlist_info)
+ return;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_qv_info *qv_info;
+ u32 reg_idx;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
+ wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ }
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+}
+
+/**
+ * i40e_notify_client_of_netdev_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to a reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!vsi)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.netdev == vsi->netdev) {
+ if (!cdev->client ||
+ !cdev->client->ops || !cdev->client->ops->close) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Cannot locate client instance close routine\n");
+ continue;
+ }
+ cdev->client->ops->close(&cdev->lan_info, cdev->client,
+ reset);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_reset - call the client vf reset callback
+ * @pf: PF device pointer
+ * @vf_id: asolute id of VF being reset
+ *
+ * If there is a client attached to this PF, notify when a VF is reset
+ **/
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!pf)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_reset) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF reset routine\n");
+ continue;
+ }
+ cdev->client->ops->vf_reset(&cdev->lan_info,
+ cdev->client, vf_id);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_enable - call the client vf notification callback
+ * @pf: PF device pointer
+ * @num_vfs: the number of VFs currently enabled, 0 for disable
+ *
+ * If there is a client attached to this PF, call its VF notification routine
+ **/
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
+{
+ struct i40e_client_instance *cdev;
+
+ if (!pf)
+ return;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_enable) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF enable routine\n");
+ continue;
+ }
+ cdev->client->ops->vf_enable(&cdev->lan_info,
+ cdev->client, num_vfs);
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_vf_client_capable - ask the client if it likes the specified VF
+ * @pf: PF device pointer
+ * @vf_id: the VF in question
+ *
+ * If there is a client of the specified type attached to this PF, call
+ * its vf_capable routine
+ **/
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+ enum i40e_client_type type)
+{
+ struct i40e_client_instance *cdev;
+ int capable = false;
+
+ if (!pf)
+ return false;
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if (cdev->lan_info.pf == pf) {
+ if (!cdev->client ||
+ !cdev->client->ops ||
+ !cdev->client->ops->vf_capable ||
+ !(cdev->client->type == type)) {
+ dev_dbg(&pf->pdev->dev,
+ "Cannot locate client instance VF capability routine\n");
+ continue;
+ }
+ capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+ cdev->client,
+ vf_id);
+ break;
+ }
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+ return capable;
+}
+
+/**
+ * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
+ * @pf: board private structure
+ * @type: vsi type
+ * @start_vsi: a VSI pointer from where to start the search
+ *
+ * Returns non NULL on success or NULL for failure
+ **/
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
+ enum i40e_vsi_type type,
+ struct i40e_vsi *start_vsi)
+{
+ struct i40e_vsi *vsi;
+ int i = 0;
+
+ if (start_vsi) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ vsi = pf->vsi[i];
+ if (vsi == start_vsi)
+ break;
+ }
+ }
+ for (; i < pf->num_alloc_vsi; i++) {
+ vsi = pf->vsi[i];
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_client_add_instance - add a client instance struct to the instance list
+ * @pf: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cdev ptr on success, NULL on failure
+ **/
+static
+struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
+ struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry(cdev, &i40e_client_instances, list) {
+ if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
+ cdev = NULL;
+ goto out;
+ }
+ }
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto out;
+
+ cdev->lan_info.pf = (void *)pf;
+ cdev->lan_info.netdev = vsi->netdev;
+ cdev->lan_info.pcidev = pf->pdev;
+ cdev->lan_info.fid = pf->hw.pf_id;
+ cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
+ cdev->lan_info.hw_addr = pf->hw.hw_addr;
+ cdev->lan_info.ops = &i40e_lan_ops;
+ cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
+ cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
+ cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
+ cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
+ cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
+ cdev->lan_info.fw_build = pf->hw.aq.fw_build;
+ set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
+
+ if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
+ kfree(cdev);
+ cdev = NULL;
+ goto out;
+ }
+
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+ mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (mac)
+ ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
+ else
+ dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
+
+ cdev->client = client;
+ INIT_LIST_HEAD(&cdev->list);
+ list_add(&cdev->list, &i40e_client_instances);
+out:
+ mutex_unlock(&i40e_client_instance_mutex);
+ return cdev;
+}
+
+/**
+ * i40e_client_del_instance - removes a client instance from the list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+static
+int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+ if ((cdev->lan_info.pf != pf) || (cdev->client != client))
+ continue;
+
+ dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ list_del(&cdev->list);
+ kfree(cdev);
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&i40e_client_instance_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_subtask - client maintenance work
+ * @pf: board private structure
+ **/
+void i40e_client_subtask(struct i40e_pf *pf)
+{
+ struct i40e_client_instance *cdev;
+ struct i40e_client *client;
+ int ret = 0;
+
+ if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
+ return;
+ pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* Check client state and instantiate client if client registered */
+ mutex_lock(&i40e_client_mutex);
+ list_for_each_entry(client, &i40e_clients, list) {
+ /* first check client is registered */
+ if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
+ continue;
+
+ /* Do we also need the LAN VSI to be up, to create instance */
+ if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
+ /* check if L2 VSI is up, if not we are not ready */
+ if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+ continue;
+ }
+
+ /* Add the client instance to the instance list */
+ cdev = i40e_client_add_instance(pf, client);
+ if (!cdev)
+ continue;
+
+ /* Also up the ref_cnt of no. of instances of this client */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+
+ /* Send an Open request to the client */
+ atomic_inc(&cdev->ref_cnt);
+ if (client->ops && client->ops->open)
+ ret = client->ops->open(&cdev->lan_info, client);
+ atomic_dec(&cdev->ref_cnt);
+ if (!ret) {
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ } else {
+ /* remove client instance */
+ i40e_client_del_instance(pf, client);
+ atomic_dec(&client->ref_cnt);
+ continue;
+ }
+ }
+ mutex_unlock(&i40e_client_mutex);
+}
+
+/**
+ * i40e_lan_add_device - add a lan device struct to the list of lan devices
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40e_lan_add_device(struct i40e_pf *pf)
+{
+ struct i40e_device *ldev;
+ int ret = 0;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ldev->pf = pf;
+ INIT_LIST_HEAD(&ldev->list);
+ list_add(&ldev->list, &i40e_devices);
+ dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+
+ /* Since in some cases register may have happened before a device gets
+ * added, we can schedule a subtask to go initiate the clients.
+ */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ i40e_service_event_schedule(pf);
+
+out:
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_lan_del_device - removes a lan device from the device list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_lan_del_device(struct i40e_pf *pf)
+{
+ struct i40e_device *ldev, *tmp;
+ int ret = -ENODEV;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+ if (ldev->pf == pf) {
+ dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
+ pf->hw.pf_id, pf->hw.bus.device,
+ pf->hw.bus.func);
+ list_del(&ldev->list);
+ kfree(ldev);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_release(struct i40e_client *client)
+{
+ struct i40e_client_instance *cdev, *tmp;
+ struct i40e_pf *pf = NULL;
+ int ret = 0;
+
+ LIST_HEAD(cdevs_tmp);
+
+ mutex_lock(&i40e_client_instance_mutex);
+ list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+ if (strncmp(cdev->client->name, client->name,
+ I40E_CLIENT_STR_LENGTH))
+ continue;
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ if (atomic_read(&cdev->ref_cnt) > 0) {
+ ret = I40E_ERR_NOT_READY;
+ goto out;
+ }
+ pf = (struct i40e_pf *)cdev->lan_info.pf;
+ if (client->ops && client->ops->close)
+ client->ops->close(&cdev->lan_info, client,
+ false);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d closed\n",
+ client->name, pf->hw.pf_id);
+ }
+ /* delete the client instance from the list */
+ list_del(&cdev->list);
+ list_add(&cdev->list, &cdevs_tmp);
+ atomic_dec(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
+ client->name);
+ }
+out:
+ mutex_unlock(&i40e_client_instance_mutex);
+
+ /* free the client device and release its vsi */
+ list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
+ kfree(cdev);
+ }
+ return ret;
+}
+
+/**
+ * i40e_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_prepare(struct i40e_client *client)
+{
+ struct i40e_device *ldev;
+ struct i40e_pf *pf;
+ int ret = 0;
+
+ mutex_lock(&i40e_device_mutex);
+ list_for_each_entry(ldev, &i40e_devices, list) {
+ pf = ldev->pf;
+ /* Start the client subtask */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+ i40e_service_event_schedule(pf);
+ }
+ mutex_unlock(&i40e_device_mutex);
+ return ret;
+}
+
+/**
+ * i40e_client_virtchnl_send - TBD
+ * @ldev: pointer to L2 context
+ * @client: Client pointer
+ * @vf_id: absolute VF identifier
+ * @msg: message buffer
+ * @len: length of message buffer
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status err;
+
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
+ 0, msg, len, NULL);
+ if (err)
+ dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
+ err, hw->aq.asq_last_status);
+
+ return err;
+}
+
+/**
+ * i40e_client_setup_qvlist
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+ u32 size;
+
+ size = sizeof(struct i40e_qvlist_info) +
+ (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
+ ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+
+ /* Validate vector id belongs to this client */
+ if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
+ (v_idx < pf->iwarp_base_vector))
+ goto err;
+
+ ldev->qvlist_info->qv_info[i] = *qv_info;
+ reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
+
+ if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
+ /* Special case - No CEQ mapped on this vector */
+ wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ } else {
+ reg = (qv_info->ceq_idx &
+ I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, reg_idx, reg);
+
+ reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST <<
+ I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
+ }
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx <<
+ I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_PFINT_AEQCTL, reg);
+ }
+ }
+
+ return 0;
+err:
+ kfree(ldev->qvlist_info);
+ ldev->qvlist_info = NULL;
+ return -EINVAL;
+}
+
+/**
+ * i40e_client_request_reset
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @level: reset level
+ **/
+static void i40e_client_request_reset(struct i40e_info *ldev,
+ struct i40e_client *client,
+ u32 reset_level)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ switch (reset_level) {
+ case I40E_CLIENT_RESET_LEVEL_PF:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ case I40E_CLIENT_RESET_LEVEL_CORE:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ default:
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d request an unsupported reset: %d.\n",
+ client->name, pf->hw.pf_id, reset_level);
+ break;
+ }
+
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_client_update_vsi_ctxt
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @is_vf: if this for the VF
+ * @vf_id: if is_vf true this carries the vf_id
+ * @flag: Any device level setting that needs to be done for PE
+ * @valid_flag: Bits in this match up and enable changing of flag bits
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi_context ctxt;
+ bool update = true;
+ i40e_status err;
+
+ /* TODO: for now do not allow setting VF's VSI setting */
+ if (is_vf)
+ return -EINVAL;
+
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get PF vsi config, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return -ENOENT;
+ }
+
+ if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+ (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+ !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+ ctxt.info.valid_sections =
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ } else {
+ update = false;
+ dev_warn(&pf->pdev->dev,
+ "Client %s instance for PF id %d request an unsupported Config: %x.\n",
+ client->name, pf->hw.pf_id, flag);
+ }
+
+ if (update) {
+ err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "update VSI ctxt for PE failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
+ }
+ return err;
+}
+
+/**
+ * i40e_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_register_client(struct i40e_client *client)
+{
+ int ret = 0;
+ enum i40e_vsi_type vsi_type;
+
+ if (!client) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (strlen(client->name) == 0) {
+ pr_info("i40e: Failed to register client with no name\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ mutex_lock(&i40e_client_mutex);
+ if (i40e_client_is_registered(client)) {
+ pr_info("i40e: Client %s has already been registered!\n",
+ client->name);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
+ (client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
+ pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
+ client->name);
+ pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+ client->version.major, client->version.minor,
+ client->version.build,
+ i40e_client_interface_version_str);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EIO;
+ goto out;
+ }
+
+ vsi_type = i40e_client_type_to_vsi_type(client->type);
+ if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
+ pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
+ client->name, client->type);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -EIO;
+ goto out;
+ }
+ list_add(&client->list, &i40e_clients);
+ set_bit(__I40E_CLIENT_REGISTERED, &client->state);
+ mutex_unlock(&i40e_client_mutex);
+
+ if (i40e_client_prepare(client)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ pr_info("i40e: Registered client %s with return code %d\n",
+ client->name, ret);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40e_register_client);
+
+/**
+ * i40e_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_unregister_client(struct i40e_client *client)
+{
+ int ret = 0;
+
+ /* When a unregister request comes through we would have to send
+ * a close for each of the client instances that were opened.
+ * client_release function is called to handle this.
+ */
+ if (!client || i40e_client_release(client)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* TODO: check if device is in reset, or if that matters? */
+ mutex_lock(&i40e_client_mutex);
+ if (!i40e_client_is_registered(client)) {
+ pr_info("i40e: Client %s has not been registered\n",
+ client->name);
+ mutex_unlock(&i40e_client_mutex);
+ ret = -ENODEV;
+ goto out;
+ }
+ if (atomic_read(&client->ref_cnt) == 0) {
+ clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
+ list_del(&client->list);
+ pr_info("i40e: Unregistered client %s with return code %d\n",
+ client->name, ret);
+ } else {
+ ret = I40E_ERR_NOT_READY;
+ pr_err("i40e: Client %s failed unregister - client has open instances\n",
+ client->name);
+ }
+
+ mutex_unlock(&i40e_client_mutex);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
new file mode 100644
index 000000000000..bf6b453d93a1
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -0,0 +1,232 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40E_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40E_CLIENT_VERSION_MAJOR 0
+#define I40E_CLIENT_VERSION_MINOR 01
+#define I40E_CLIENT_VERSION_BUILD 00
+#define I40E_CLIENT_VERSION_STR \
+ XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
+ XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
+ XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+ u8 major;
+ u8 minor;
+ u8 build;
+ u8 rsvd;
+};
+
+enum i40e_client_state {
+ __I40E_CLIENT_NULL,
+ __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+ __I40E_CLIENT_INSTANCE_NONE,
+ __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+enum i40e_client_type {
+ I40E_CLIENT_IWARP,
+ I40E_CLIENT_VMDQ2
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+ u32 num_vectors;
+ struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+ u16 qs_handle; /* qs handle for prio */
+ u8 tc; /* TC mapped to prio */
+ u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY 8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+ struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+ struct i40e_qos_params qos;
+ u16 mtu;
+};
+
+/* Structure to hold Lan device info for a client device */
+struct i40e_info {
+ struct i40e_client_version version;
+ u8 lanmac[6];
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+ u8 __iomem *hw_addr;
+ u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+ u8 ftype; /* function type, PF or VF */
+ void *pf;
+
+ /* All L2 params that could change during the life span of the PF
+ * and needs to be communicated to the client when they change
+ */
+ struct i40e_qvlist_info *qvlist_info;
+ struct i40e_params params;
+ struct i40e_ops *ops;
+
+ u16 msix_count; /* number of msix vectors*/
+ /* Array down below will be dynamically allocated based on msix_count */
+ struct msix_entry *msix_entries;
+ u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+};
+
+#define I40E_CLIENT_RESET_LEVEL_PF 1
+#define I40E_CLIENT_RESET_LEVEL_CORE 2
+#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE BIT(1)
+
+struct i40e_ops {
+ /* setup_q_vector_list enables queues with a particular vector */
+ int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+ struct i40e_qvlist_info *qv_info);
+
+ int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+ u32 vf_id, u8 *msg, u16 len);
+
+ /* If the PE Engine is unresponsive, RDMA driver can request a reset.
+ * The level helps determine the level of reset being requested.
+ */
+ void (*request_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 level);
+
+ /* API for the RDMA driver to set certain VSI flags that control
+ * PE Engine.
+ */
+ int (*update_vsi_ctxt)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ bool is_vf, u32 vf_id,
+ u32 flag, u32 valid_flag);
+};
+
+struct i40e_client_ops {
+ /* Should be called from register_client() or whenever PF is ready
+ * to create a specific client instance.
+ */
+ int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+ /* Should be called when netdev is unavailable or when unregister
+ * call comes in. If the close is happenening due to a reset being
+ * triggered set the reset bit to true.
+ */
+ void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+ bool reset);
+
+ /* called when some l2 managed parameters changes - mtu */
+ void (*l2_param_change)(struct i40e_info *ldev,
+ struct i40e_client *client,
+ struct i40e_params *params);
+
+ int (*virtchnl_receive)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id,
+ u8 *msg, u16 len);
+
+ /* called when a VF is reset by the PF */
+ void (*vf_reset)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+
+ /* called when the number of VFs changes */
+ void (*vf_enable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 num_vfs);
+
+ /* returns true if VF is capable of specified offload */
+ int (*vf_capable)(struct i40e_info *ldev,
+ struct i40e_client *client, u32 vf_id);
+};
+
+/* Client device */
+struct i40e_client_instance {
+ struct list_head list;
+ struct i40e_info lan_info;
+ struct i40e_client *client;
+ unsigned long state;
+ /* A count of all the in-progress calls to the client */
+ atomic_t ref_cnt;
+};
+
+struct i40e_client {
+ struct list_head list; /* list of registered clients */
+ char name[I40E_CLIENT_STR_LENGTH];
+ struct i40e_client_version version;
+ unsigned long state; /* client state */
+ atomic_t ref_cnt; /* Count of all the client devices of this kind */
+ u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
+ enum i40e_client_type type;
+ struct i40e_client_ops *ops; /* client ops provided by the client */
+};
+
+static inline bool i40e_client_is_registered(struct i40e_client *client)
+{
+ return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
+}
+
+/* used by clients */
+int i40e_register_client(struct i40e_client *client);
+int i40e_unregister_client(struct i40e_client *client);
+
+#endif /* _I40E_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6a034ddac36a..4596294c2ab1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -55,19 +55,13 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2_A:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
hw->mac.type = I40E_MAC_X722;
break;
- case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
- hw->mac.type = I40E_MAC_X722_VF;
- break;
- case I40E_DEV_ID_VF:
- case I40E_DEV_ID_VF_HV:
- hw->mac.type = I40E_MAC_VF;
- break;
default:
hw->mac.type = I40E_MAC_GENERIC;
break;
@@ -1245,7 +1239,13 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 10; cnt++) {
+
+ /* It can take upto 15 secs for GRST steady state.
+ * Bump it to 16 secs max to be safe.
+ */
+ grst_del = grst_del * 20;
+
+ for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -1894,6 +1894,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_phy_debug *cmd =
+ (struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_phy_debug);
+
+ cmd->command_flags = cmd_flags;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_add_vsi
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -1958,12 +1984,19 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (set)
+ if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ }
cmd->promiscuous_flags = cpu_to_le16(flags);
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
cmd->seid = cpu_to_le16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2039,6 +2072,37 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_get_vsi_params - get VSI configuration info
* @hw: pointer to the hw struct
* @vsi_ctx: pointer to a vsi context struct
@@ -2283,8 +2347,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
* @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port
- * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID
+ * @enable_stats: true to turn on VEB stats
* @cmd_details: pointer to command details structure or NULL
*
* This asks the FW to add a VEB between the uplink and downlink
@@ -2292,8 +2356,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
**/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, bool enable_l2_filtering,
- u16 *veb_seid,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2320,8 +2384,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
- if (enable_l2_filtering)
- veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+ /* reverse logic here: set the bitflag to disable the stats */
+ if (!enable_stats)
+ veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
cmd->veb_flags = cpu_to_le16(veb_flags);
@@ -2410,6 +2475,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
(struct i40e_aqc_macvlan *)&desc.params.raw;
i40e_status status;
u16 buf_size;
+ int i;
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
@@ -2423,12 +2489,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
cmd->seid[1] = 0;
cmd->seid[2] = 0;
+ for (i = 0; i < count; i++)
+ if (is_multicast_ether_addr(mv_list[i].mac_addr))
+ mv_list[i].flags |=
+ cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
+
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ cmd_details);
return status;
}
@@ -2476,6 +2547,137 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
+ * @hw: pointer to the hw struct
+ * @opcode: AQ opcode for add or delete mirror rule
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @id: Destination VSI SEID or Rule ID
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+ * VEBs/VEPA elements only
+ **/
+static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_delete_mirror_rule *cmd =
+ (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ buf_size = count * sizeof(*mr_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+ cmd->seid = cpu_to_le16(sw_seid);
+ cmd->rule_type = cpu_to_le16(rule_type &
+ I40E_AQC_MIRROR_RULE_TYPE_MASK);
+ cmd->num_entries = cpu_to_le16(count);
+ /* Dest VSI for add, rule_id for delete */
+ cmd->destination = cpu_to_le16(id);
+ if (mr_list) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ }
+
+ status = i40e_asq_send_command(hw, &desc, mr_list, buf_size,
+ cmd_details);
+ if (!status ||
+ hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) {
+ if (rule_id)
+ *rule_id = le16_to_cpu(resp->rule_id);
+ if (rules_used)
+ *rules_used = le16_to_cpu(resp->mirror_rules_used);
+ if (rules_free)
+ *rules_free = le16_to_cpu(resp->mirror_rules_free);
+ }
+ return status;
+}
+
+/**
+ * i40e_aq_add_mirrorrule - add a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @dest_vsi: SEID of VSI to which packets will be mirrored
+ * @count: length of the list
+ * @mr_list: list of mirrored VSI SEIDs or VLAN IDs
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_id: Rule ID returned from FW
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+ **/
+i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
+{
+ if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
+ rule_type, dest_vsi, count, mr_list,
+ cmd_details, rule_id, rules_used, rules_free);
+}
+
+/**
+ * i40e_aq_delete_mirrorrule - delete a mirror rule
+ * @hw: pointer to the hw struct
+ * @sw_seid: Switch SEID (to which rule refers)
+ * @rule_type: Rule Type (ingress/egress/VLAN)
+ * @count: length of the list
+ * @rule_id: Rule ID that is returned in the receive desc as part of
+ * add_mirrorrule.
+ * @mr_list: list of mirrored VLAN IDs to be removed
+ * @cmd_details: pointer to command details structure or NULL
+ * @rule_used: Number of rules used in internal switch
+ * @rule_free: Number of rules free in internal switch
+ *
+ * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+ **/
+i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
+{
+ /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+ if (!rule_id)
+ return I40E_ERR_PARAM;
+ } else {
+ /* count and mr_list shall be valid for rule_type INGRESS VLAN
+ * mirroring. For other rule_type, count and rule_type should
+ * not matter.
+ */
+ if (count == 0 || !mr_list)
+ return I40E_ERR_PARAM;
+ }
+
+ return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
+ rule_type, rule_id, count, mr_list,
+ cmd_details, NULL, rules_used, rules_free);
+}
+
+/**
* i40e_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF id to send msg
@@ -2765,35 +2967,6 @@ i40e_aq_erase_nvm_exit:
return status;
}
-#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
-#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
-#define I40E_DEV_FUNC_CAP_NPAR 0x03
-#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
-#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
-#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
-#define I40E_DEV_FUNC_CAP_VF 0x13
-#define I40E_DEV_FUNC_CAP_VMDQ 0x14
-#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
-#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
-#define I40E_DEV_FUNC_CAP_VSI 0x17
-#define I40E_DEV_FUNC_CAP_DCB 0x18
-#define I40E_DEV_FUNC_CAP_FCOE 0x21
-#define I40E_DEV_FUNC_CAP_ISCSI 0x22
-#define I40E_DEV_FUNC_CAP_RSS 0x40
-#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
-#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
-#define I40E_DEV_FUNC_CAP_MSIX 0x43
-#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
-#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
-#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
-#define I40E_DEV_FUNC_CAP_FLEX10 0xF1
-#define I40E_DEV_FUNC_CAP_CEM 0xF2
-#define I40E_DEV_FUNC_CAP_IWARP 0x51
-#define I40E_DEV_FUNC_CAP_LED 0x61
-#define I40E_DEV_FUNC_CAP_SDP 0x62
-#define I40E_DEV_FUNC_CAP_MDIO 0x63
-#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64
-
/**
* i40e_parse_discover_capabilities
* @hw: pointer to the hw struct
@@ -2832,79 +3005,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
major_rev = cap->major_rev;
switch (id) {
- case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ case I40E_AQ_CAP_ID_SWITCH_MODE:
p->switch_mode = number;
break;
- case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
break;
- case I40E_DEV_FUNC_CAP_NPAR:
+ case I40E_AQ_CAP_ID_NPAR_ACTIVE:
p->npar_enable = number;
break;
- case I40E_DEV_FUNC_CAP_OS2BMC:
+ case I40E_AQ_CAP_ID_OS2BMC_CAP:
p->os2bmc = number;
break;
- case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
p->valid_functions = number;
break;
- case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ case I40E_AQ_CAP_ID_SRIOV:
if (number == 1)
p->sr_iov_1_1 = true;
break;
- case I40E_DEV_FUNC_CAP_VF:
+ case I40E_AQ_CAP_ID_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
break;
- case I40E_DEV_FUNC_CAP_VMDQ:
+ case I40E_AQ_CAP_ID_VMDQ:
if (number == 1)
p->vmdq = true;
break;
- case I40E_DEV_FUNC_CAP_802_1_QBG:
+ case I40E_AQ_CAP_ID_8021QBG:
if (number == 1)
p->evb_802_1_qbg = true;
break;
- case I40E_DEV_FUNC_CAP_802_1_QBH:
+ case I40E_AQ_CAP_ID_8021QBR:
if (number == 1)
p->evb_802_1_qbh = true;
break;
- case I40E_DEV_FUNC_CAP_VSI:
+ case I40E_AQ_CAP_ID_VSI:
p->num_vsis = number;
break;
- case I40E_DEV_FUNC_CAP_DCB:
+ case I40E_AQ_CAP_ID_DCB:
if (number == 1) {
p->dcb = true;
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
break;
- case I40E_DEV_FUNC_CAP_FCOE:
+ case I40E_AQ_CAP_ID_FCOE:
if (number == 1)
p->fcoe = true;
break;
- case I40E_DEV_FUNC_CAP_ISCSI:
+ case I40E_AQ_CAP_ID_ISCSI:
if (number == 1)
p->iscsi = true;
break;
- case I40E_DEV_FUNC_CAP_RSS:
+ case I40E_AQ_CAP_ID_RSS:
p->rss = true;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
- case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ case I40E_AQ_CAP_ID_RXQ:
p->num_rx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ case I40E_AQ_CAP_ID_TXQ:
p->num_tx_qp = number;
p->base_queue = phys_id;
break;
- case I40E_DEV_FUNC_CAP_MSIX:
+ case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
break;
- case I40E_DEV_FUNC_CAP_MSIX_VF:
+ case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
break;
- case I40E_DEV_FUNC_CAP_FLEX10:
+ case I40E_AQ_CAP_ID_FLEX10:
if (major_rev == 1) {
if (number == 1) {
p->flex10_enable = true;
@@ -2920,38 +3093,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
break;
- case I40E_DEV_FUNC_CAP_CEM:
+ case I40E_AQ_CAP_ID_CEM:
if (number == 1)
p->mgmt_cem = true;
break;
- case I40E_DEV_FUNC_CAP_IWARP:
+ case I40E_AQ_CAP_ID_IWARP:
if (number == 1)
p->iwarp = true;
break;
- case I40E_DEV_FUNC_CAP_LED:
+ case I40E_AQ_CAP_ID_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = true;
break;
- case I40E_DEV_FUNC_CAP_SDP:
+ case I40E_AQ_CAP_ID_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = true;
break;
- case I40E_DEV_FUNC_CAP_MDIO:
+ case I40E_AQ_CAP_ID_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
break;
- case I40E_DEV_FUNC_CAP_IEEE_1588:
+ case I40E_AQ_CAP_ID_1588:
if (number == 1)
p->ieee_1588 = true;
break;
- case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
p->fd = true;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
break;
- case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+ case I40E_AQ_CAP_ID_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
@@ -3709,7 +3882,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return ret;
/* Read the PF Queue Filter control register */
- val = rd32(hw, I40E_PFQF_CTL_0);
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
/* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
@@ -3746,7 +3919,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
if (settings->enable_macvlan)
val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
- wr32(hw, I40E_PFQF_CTL_0, val);
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
return 0;
}
@@ -4073,3 +4246,454 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
return status;
}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 *value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_read_end;
+ }
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_READ) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+ if (!status) {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't read register value from external PHY.\n");
+ }
+
+phy_read_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 value)
+{
+ i40e_status status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_write_end;
+ }
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_WRITE) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = 0;
+ break;
+ }
+ usleep_range(10, 20);
+ retry--;
+ } while (retry);
+
+phy_write_end:
+ return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ * @phy_addr: Returned PHY address
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+ u8 port_num = hw->func_caps.mdio_port_num;
+ u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+ return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
+{
+ i40e_status status = 0;
+ u32 i;
+ u16 led_ctl;
+ u16 gpio_led_port;
+ u16 led_reg;
+ u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u8 phy_addr = 0;
+ u8 port_num;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ led_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto phy_blinking_end;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto phy_blinking_end;
+ break;
+ }
+ }
+
+ if (time > 0 && interval > 0) {
+ for (i = 0; i < time * 1000; i += interval) {
+ status = i40e_read_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
+ if (status)
+ goto restore_config;
+ if (led_reg & I40E_PHY_LED_MANUAL_ON)
+ led_reg = 0;
+ else
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto restore_config;
+ msleep(interval);
+ }
+ }
+
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+
+phy_blinking_end:
+ return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
+{
+ i40e_status status = 0;
+ u16 gpio_led_port;
+ u8 phy_addr = 0;
+ u16 reg_val;
+ u16 temp_addr;
+ u8 port_num;
+ u32 i;
+
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr, &reg_val);
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: true or false
+ * @mode: original val plus bit for set or ignore
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
+{
+ i40e_status status = 0;
+ u16 led_ctl = 0;
+ u16 led_reg = 0;
+ u8 phy_addr = 0;
+ u8 port_num;
+ u32 i;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, &led_reg);
+ if (status)
+ return status;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ return status;
+ }
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (on)
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ else
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ goto restore_config;
+ if (mode & I40E_PHY_LED_MODE_ORIG) {
+ led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
+ }
+ return status;
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+ return status;
+}
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (!reg_val)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == 0)
+ *reg_val = le32_to_cpu(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ i40e_status status = 0;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = cpu_to_le32(reg_addr);
+ cmd->value = cpu_to_le32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ i40e_status status = 0;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 2691277c0055..0fab3a9b51d9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -380,17 +380,20 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
{
u16 length, typelength, offset = 0;
struct i40e_cee_app_prio *app;
- u8 i, up, selector;
+ u8 i;
typelength = ntohs(tlv->hdr.typelen);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
dcbcfg->numapps = length / sizeof(*app);
+
if (!dcbcfg->numapps)
return;
for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
if (app->prio_map & BIT(up))
@@ -400,13 +403,17 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
/* Get Selector from lower 2 bits, and convert to IEEE */
selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
- if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+ switch (selector) {
+ case I40E_CEE_APP_SEL_ETHTYPE:
dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
- else if (selector == I40E_CEE_APP_SEL_TCPIP)
+ break;
+ case I40E_CEE_APP_SEL_TCPIP:
dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
- else
+ break;
+ default:
/* Keep selector as it is for unknown types */
dcbcfg->app[i].selector = selector;
+ }
dcbcfg->app[i].protocolid = ntohs(app->protocol);
/* Move to next app */
@@ -814,13 +821,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
- /* If Firmware version < v4.33 IEEE only */
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4))
+ /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)))
return i40e_get_ieee_dcb_config(hw);
- /* If Firmware version == v4.33 use old CEE struct */
- if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+ /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
sizeof(cee_v1_cfg), NULL);
if (!ret) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 10744a698d6f..0c97733d253c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -61,257 +61,13 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
{
int i;
- if ((seid < I40E_BASE_VEB_SEID) ||
- (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
- dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == seid)
+ return pf->veb[i];
return NULL;
}
/**************************************************************
- * dump
- * The dump entry in debugfs is for getting a data snapshow of
- * the driver's current configuration and runtime details.
- * When the filesystem entry is written, a snapshot is taken.
- * When the entry is read, the most recent snapshot data is dumped.
- **************************************************************/
-static char *i40e_dbg_dump_buf;
-static ssize_t i40e_dbg_dump_data_len;
-static ssize_t i40e_dbg_dump_buffer_len;
-
-/**
- * i40e_dbg_dump_read - read the dump data
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- **/
-static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- int bytes_not_copied;
- int len;
-
- /* is *ppos bigger than the available data? */
- if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
- return 0;
-
- /* be sure to not read beyond the end of available data */
- len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
-
- bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
- if (bytes_not_copied)
- return -EFAULT;
-
- *ppos += len;
- return len;
-}
-
-/**
- * i40e_dbg_prep_dump_buf
- * @pf: the PF we're working with
- * @buflen: the desired buffer length
- *
- * Return positive if success, 0 if failed
- **/
-static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
-{
- /* if not already big enough, prep for re alloc */
- if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
- kfree(i40e_dbg_dump_buf);
- i40e_dbg_dump_buffer_len = 0;
- i40e_dbg_dump_buf = NULL;
- }
-
- /* get a new buffer if needed */
- if (!i40e_dbg_dump_buf) {
- i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
- if (i40e_dbg_dump_buf != NULL)
- i40e_dbg_dump_buffer_len = buflen;
- }
-
- return i40e_dbg_dump_buffer_len;
-}
-
-/**
- * i40e_dbg_dump_write - trigger a datadump snapshot
- * @filp: the opened file
- * @buffer: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- *
- * Any write clears the stats
- **/
-static ssize_t i40e_dbg_dump_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct i40e_pf *pf = filp->private_data;
- bool seid_found = false;
- long seid = -1;
- int buflen = 0;
- int i, ret;
- int len;
- u8 *p;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* decode the SEID given to be dumped */
- ret = kstrtol_from_user(buffer, count, 0, &seid);
-
- if (ret) {
- dev_info(&pf->pdev->dev, "bad seid value\n");
- } else if (seid == 0) {
- seid_found = true;
-
- kfree(i40e_dbg_dump_buf);
- i40e_dbg_dump_buffer_len = 0;
- i40e_dbg_dump_data_len = 0;
- i40e_dbg_dump_buf = NULL;
- dev_info(&pf->pdev->dev, "debug buffer freed\n");
-
- } else if (seid == pf->pf_seid || seid == 1) {
- seid_found = true;
-
- buflen = sizeof(struct i40e_pf);
- buflen += (sizeof(struct i40e_aq_desc)
- * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
-
- if (i40e_dbg_prep_dump_buf(pf, buflen)) {
- p = i40e_dbg_dump_buf;
-
- len = sizeof(struct i40e_pf);
- memcpy(p, pf, len);
- p += len;
-
- len = (sizeof(struct i40e_aq_desc)
- * pf->hw.aq.num_asq_entries);
- memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
- p += len;
-
- len = (sizeof(struct i40e_aq_desc)
- * pf->hw.aq.num_arq_entries);
- memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
- p += len;
-
- i40e_dbg_dump_data_len = buflen;
- dev_info(&pf->pdev->dev,
- "PF seid %ld dumped %d bytes\n",
- seid, (int)i40e_dbg_dump_data_len);
- }
- } else if (seid >= I40E_BASE_VSI_SEID) {
- struct i40e_vsi *vsi = NULL;
- struct i40e_mac_filter *f;
- int filter_count = 0;
-
- mutex_lock(&pf->switch_mutex);
- vsi = i40e_dbg_find_vsi(pf, seid);
- if (!vsi) {
- mutex_unlock(&pf->switch_mutex);
- goto write_exit;
- }
-
- buflen = sizeof(struct i40e_vsi);
- buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
- buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
- buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
- buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
- list_for_each_entry(f, &vsi->mac_filter_list, list)
- filter_count++;
- buflen += sizeof(struct i40e_mac_filter) * filter_count;
-
- if (i40e_dbg_prep_dump_buf(pf, buflen)) {
- p = i40e_dbg_dump_buf;
- seid_found = true;
-
- len = sizeof(struct i40e_vsi);
- memcpy(p, vsi, len);
- p += len;
-
- if (vsi->num_q_vectors) {
- len = (sizeof(struct i40e_q_vector)
- * vsi->num_q_vectors);
- memcpy(p, vsi->q_vectors, len);
- p += len;
- }
-
- if (vsi->num_queue_pairs) {
- len = (sizeof(struct i40e_ring) *
- vsi->num_queue_pairs);
- memcpy(p, vsi->tx_rings, len);
- p += len;
- memcpy(p, vsi->rx_rings, len);
- p += len;
- }
-
- if (vsi->tx_rings[0]) {
- len = sizeof(struct i40e_tx_buffer);
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- memcpy(p, vsi->tx_rings[i]->tx_bi, len);
- p += len;
- }
- len = sizeof(struct i40e_rx_buffer);
- for (i = 0; i < vsi->num_queue_pairs; i++) {
- memcpy(p, vsi->rx_rings[i]->rx_bi, len);
- p += len;
- }
- }
-
- /* macvlan filter list */
- len = sizeof(struct i40e_mac_filter);
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
- memcpy(p, f, len);
- p += len;
- }
-
- i40e_dbg_dump_data_len = buflen;
- dev_info(&pf->pdev->dev,
- "VSI seid %ld dumped %d bytes\n",
- seid, (int)i40e_dbg_dump_data_len);
- }
- mutex_unlock(&pf->switch_mutex);
- } else if (seid >= I40E_BASE_VEB_SEID) {
- struct i40e_veb *veb = NULL;
-
- mutex_lock(&pf->switch_mutex);
- veb = i40e_dbg_find_veb(pf, seid);
- if (!veb) {
- mutex_unlock(&pf->switch_mutex);
- goto write_exit;
- }
-
- buflen = sizeof(struct i40e_veb);
- if (i40e_dbg_prep_dump_buf(pf, buflen)) {
- seid_found = true;
- memcpy(i40e_dbg_dump_buf, veb, buflen);
- i40e_dbg_dump_data_len = buflen;
- dev_info(&pf->pdev->dev,
- "VEB seid %ld dumped %d bytes\n",
- seid, (int)i40e_dbg_dump_data_len);
- }
- mutex_unlock(&pf->switch_mutex);
- }
-
-write_exit:
- if (!seid_found)
- dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
-
- return count;
-}
-
-static const struct file_operations i40e_dbg_dump_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = i40e_dbg_dump_read,
- .write = i40e_dbg_dump_write,
-};
-
-/**************************************************************
* command
* The command entry in debugfs is for giving the driver commands
* to be executed - these may be for changing the internal switch
@@ -379,19 +135,27 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
return;
}
dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
- if (vsi->netdev)
- dev_info(&pf->pdev->dev,
- " netdev: name = %s\n",
- vsi->netdev->name);
+ if (vsi->netdev) {
+ struct net_device *nd = vsi->netdev;
+
+ dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
+ nd->name, nd->state, nd->flags);
+ dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
+ (unsigned long int)nd->features);
+ dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
+ (unsigned long int)nd->hw_features);
+ dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
+ (unsigned long int)nd->vlan_features);
+ }
if (vsi->active_vlans)
dev_info(&pf->pdev->dev,
" vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
- " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
- vsi->netdev_registered,
- vsi->current_netdev_flags, vsi->state, vsi->flags);
+ " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
+ vsi->state, vsi->flags,
+ vsi->netdev_registered, vsi->current_netdev_flags);
if (vsi == pf->vsi[pf->lan_vsi])
- dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
+ dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
pf->hw.mac.san_addr,
pf->hw.mac.port_addr);
@@ -511,7 +275,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->dtype);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, rx_ring->hsplit,
+ i, ring_is_ps_enabled(rx_ring),
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
@@ -526,6 +290,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+ i,
+ rx_ring->rx_stats.realloc_count,
+ rx_ring->rx_stats.page_reuse_count);
+ dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
(unsigned long int)rx_ring->dma);
@@ -533,6 +302,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_itr_setting = %d (%s)\n",
+ i, rx_ring->rx_itr_setting,
+ ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
@@ -557,8 +330,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_rings[%i]: dtype = %d\n",
i, tx_ring->dtype);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, tx_ring->hsplit,
+ " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_ring->ring_active);
@@ -583,14 +356,15 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_itr_setting = %d (%s)\n",
+ i, tx_ring->tx_itr_setting,
+ ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed");
}
rcu_read_unlock();
dev_info(&pf->pdev->dev,
- " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
- vsi->work_limit, vsi->rx_itr_setting,
- ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
- vsi->tx_itr_setting,
- ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+ " work_limit = %d\n",
+ vsi->work_limit);
dev_info(&pf->pdev->dev,
" max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
@@ -815,20 +589,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
rxd->read.hdr_addr,
rxd->read.rsvd1, rxd->read.rsvd2);
@@ -843,20 +617,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr,
rxd->read.rsvd1, rxd->read.rsvd2);
@@ -918,12 +692,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- if ((seid < I40E_BASE_VEB_SEID) ||
- (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
- dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- return;
- }
-
veb = i40e_dbg_find_veb(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
@@ -2202,11 +1970,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
if (!pfile)
goto create_failed;
- pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_dump_fops);
- if (!pfile)
- goto create_failed;
-
pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
&i40e_dbg_netdev_ops_fops);
if (!pfile)
@@ -2227,9 +1990,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
{
debugfs_remove_recursive(pf->i40e_dbg_pf);
pf->i40e_dbg_pf = NULL;
-
- kfree(i40e_dbg_dump_buf);
- i40e_dbg_dump_buf = NULL;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 448ef4c17efb..99257fcd1ef4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -39,13 +39,11 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
-#define I40E_DEV_ID_VF 0x154C
-#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
-#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 29d5833e24a3..784b1659457a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -89,6 +89,9 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
+ I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
+ I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
+ I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
@@ -143,6 +146,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+ I40E_PF_STAT("arq_overflows", arq_overflows),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
@@ -232,6 +236,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"flow-director-atr",
"veb-stats",
"packet-split",
+ "hw-atr-eviction",
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
@@ -340,7 +345,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
SUPPORTED_1000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
- if (pf->hw.mac.type == I40E_MAC_X722) {
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
ecmd->supported |= SUPPORTED_100baseT_Full;
if (hw_link_info->requested_speeds &
I40E_LINK_SPEED_100MB)
@@ -411,6 +416,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
if (pf->hw.mac.type == I40E_MAC_X722) {
ecmd->supported |= SUPPORTED_100baseT_Full;
ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ }
}
}
if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
@@ -996,16 +1005,19 @@ static int i40e_get_eeprom(struct net_device *netdev,
/* check for NVMUpdate access method */
magic = hw->vendor_id | (hw->device_id << 16);
if (eeprom->magic && eeprom->magic != magic) {
- struct i40e_nvm_access *cmd;
- int errno;
+ struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
+ int errno = 0;
/* make sure it is the right magic for NVMUpdate */
if ((eeprom->magic >> 16) != hw->device_id)
- return -EINVAL;
+ errno = -EINVAL;
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ errno = -EBUSY;
+ else
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- cmd = (struct i40e_nvm_access *)eeprom;
- ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+ if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1089,27 +1101,25 @@ static int i40e_set_eeprom(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
struct i40e_pf *pf = np->vsi->back;
- struct i40e_nvm_access *cmd;
+ struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
int ret_val = 0;
- int errno;
+ int errno = 0;
u32 magic;
/* normal ethtool set_eeprom is not supported */
magic = hw->vendor_id | (hw->device_id << 16);
if (eeprom->magic == magic)
- return -EOPNOTSUPP;
-
+ errno = -EOPNOTSUPP;
/* check for NVMUpdate access method */
- if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
- return -EINVAL;
-
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
- test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
- return -EBUSY;
+ else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+ errno = -EINVAL;
+ else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+ errno = -EBUSY;
+ else
+ ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- cmd = (struct i40e_nvm_access *)eeprom;
- ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
- if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
+ if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
dev_info(&pf->pdev->dev,
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
ret_val, hw->aq.asq_last_status, errno,
@@ -1816,28 +1826,52 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int blink_freq = 2;
+ u16 temp_status;
switch (state) {
case ETHTOOL_ID_ACTIVE:
- pf->led_status = i40e_led_get(hw);
+ if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ pf->led_status = i40e_led_get(hw);
+ } else {
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ ret = i40e_led_get_phy(hw, &temp_status,
+ &pf->phy_led_val);
+ pf->led_status = temp_status;
+ }
return blink_freq;
case ETHTOOL_ID_ON:
- i40e_led_set(hw, 0xF, false);
+ if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ i40e_led_set(hw, 0xf, false);
+ else
+ ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break;
case ETHTOOL_ID_OFF:
- i40e_led_set(hw, 0x0, false);
+ if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
+ i40e_led_set(hw, 0x0, false);
+ else
+ ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break;
case ETHTOOL_ID_INACTIVE:
- i40e_led_set(hw, pf->led_status, false);
+ if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
+ i40e_led_set(hw, false, pf->led_status);
+ } else {
+ ret = i40e_led_set_phy(hw, false, pf->led_status,
+ (pf->phy_led_val |
+ I40E_PHY_LED_MODE_ORIG));
+ i40e_aq_set_phy_debug(hw, 0, NULL);
+ }
break;
default:
break;
}
-
- return 0;
+ if (ret)
+ return -ENOENT;
+ else
+ return 0;
}
/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
@@ -1845,8 +1879,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
-static int i40e_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int __i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -1854,14 +1889,24 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* rx and tx usecs has per queue value. If user doesn't specify the queue,
+ * return queue 0's value to represent.
+ */
+ if (queue < 0) {
+ queue = 0;
+ } else if (queue >= vsi->num_queue_pairs) {
+ return -EINVAL;
+ }
+
+ if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
* fits the original intent of the ethtool variable,
@@ -1874,15 +1919,63 @@ static int i40e_get_coalesce(struct net_device *netdev,
return 0;
}
-static int i40e_set_coalesce(struct net_device *netdev,
+static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
+ return __i40e_get_coalesce(netdev, ec, -1);
+}
+
+static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_get_coalesce(netdev, ec, queue);
+}
+
+static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
+ u16 vector, intrl;
+
+ intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
+ vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
+ vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = vsi->rx_rings[queue]->q_vector;
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = vsi->tx_rings[queue]->q_vector;
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
+ i40e_flush(hw);
+}
+
+static int __i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- u16 vector;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
@@ -1899,57 +1992,53 @@ static int i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- vector = vsi->base_vector;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- } else if (ec->rx_coalesce_usecs == 0) {
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else {
- netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
- return -EINVAL;
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
+ return -EINVAL;
}
vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- } else if (ec->tx_coalesce_usecs == 0) {
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
+ return -EINVAL;
+ }
+
+ /* rx and tx usecs has per queue value. If user doesn't specify the queue,
+ * apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_set_itr_per_queue(vsi, ec, i);
+ } else if (queue < vsi->num_queue_pairs) {
+ i40e_set_itr_per_queue(vsi, ec, queue);
} else {
- netif_info(pf, drv, netdev,
- "Invalid value, tx-usecs range is 0-8160\n");
+ netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ vsi->num_queue_pairs - 1);
return -EINVAL;
}
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
- u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+ return 0;
+}
- q_vector = vsi->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
- wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
- i40e_flush(hw);
- }
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_set_coalesce(netdev, ec, -1);
+}
- return 0;
+static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40e_set_coalesce(netdev, ec, queue);
}
/**
@@ -2147,8 +2236,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
{
struct i40e_hw *hw = &pf->hw;
- u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
@@ -2166,9 +2255,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
break;
default:
@@ -2178,9 +2270,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case TCP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
break;
default:
@@ -2190,10 +2285,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
@@ -2204,10 +2302,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
+ return -EINVAL;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
@@ -2245,8 +2346,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw);
/* Save setting for future output/update */
@@ -2712,6 +2813,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
I40E_PRIV_FLAGS_VEB_STATS : 0;
ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
I40E_PRIV_FLAGS_PS : 0;
+ ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
+ 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
return ret_flags;
}
@@ -2763,10 +2866,21 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
}
- if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+ if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
+ !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
- else
+ reset_required = true;
+ } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
+ (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+ reset_required = true;
+ }
+
+ if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
+ (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
+ pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ else
+ pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE;
/* if needed, issue reset to cause things to take effect */
if (reset_required)
@@ -2812,6 +2926,8 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.get_ts_info = i40e_get_ts_info,
.get_priv_flags = i40e_get_priv_flags,
.set_priv_flags = i40e_set_priv_flags,
+ .get_per_queue_coalesce = i40e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40e_set_per_queue_coalesce,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 579a46ca82df..8ad162c16f61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -295,11 +295,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
}
/* enable FCoE hash filter */
- val = rd32(hw, I40E_PFQF_HENA(1));
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1));
val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
- wr32(hw, I40E_PFQF_HENA(1), val);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val);
/* enable flag */
pf->flags |= I40E_FLAG_FCOE_ENABLED;
@@ -317,11 +317,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
/* Setup max frame with FCoE_MTU plus L2 overheads */
- val = rd32(hw, I40E_GLFCOE_RCTL);
+ val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL);
val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
<< I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
- wr32(hw, I40E_GLFCOE_RCTL, val);
+ i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
}
@@ -1359,16 +1359,32 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
struct i40e_tx_buffer *first;
u32 tx_flags = 0;
+ int fso, count;
u8 hdr_len = 0;
u8 sof = 0;
u8 eof = 0;
- int fso;
if (i40e_fcoe_set_skb_header(skb))
goto out_drop;
- if (!i40e_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+ if (i40e_chk_linearize(skb, count)) {
+ if (__skb_linearize(skb))
+ goto out_drop;
+ count = TXD_USE_COUNT(skb->len);
+ tx_ring->tx_stats.tx_linearize++;
+ }
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
@@ -1457,7 +1473,7 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = {
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
- .ndo_setup_tc = i40e_setup_tc,
+ .ndo_setup_tc = __i40e_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40e_netpoll,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 8f3b53e0dc46..344912957cab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -28,11 +28,6 @@
#include <linux/of_net.h>
#include <linux/pci.h>
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
@@ -51,7 +46,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 8
+#define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +85,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
@@ -110,6 +107,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static struct workqueue_struct *i40e_wq;
+
/**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@@ -290,12 +289,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
*
* If not already scheduled, this puts the task into the work queue
**/
-static void i40e_service_event_schedule(struct i40e_pf *pf)
+void i40e_service_event_schedule(struct i40e_pf *pf)
{
if (!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
!test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
- schedule_work(&pf->service_task);
+ queue_work(i40e_wq, &pf->service_task);
}
/**
@@ -769,7 +768,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
if (vsi->type != I40E_VSI_FCOE)
return;
- idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
+ idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
fs = &vsi->fcoe_stats;
ofs = &vsi->fcoe_stats_offsets;
@@ -820,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
u32 tx_restart, tx_busy;
+ u64 tx_lost_interrupt;
struct i40e_ring *p;
u32 rx_page, rx_buf;
u64 bytes, packets;
@@ -845,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_lost_interrupt = 0;
rx_page = 0;
rx_buf = 0;
rcu_read_lock();
@@ -863,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
/* Rx queue is part of the same block as Tx queue */
p = &p[1];
@@ -881,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_lost_interrupt = tx_lost_interrupt;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
@@ -1368,7 +1371,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->changed = true;
INIT_LIST_HEAD(&f->list);
- list_add(&f->list, &vsi->mac_filter_list);
+ list_add_tail(&f->list, &vsi->mac_filter_list);
}
/* increment counter and add a new flag if needed */
@@ -1538,7 +1541,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(netdev->dev_addr, addr->sa_data);
- return i40e_sync_vsi_filters(vsi);
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
+ return 0;
}
/**
@@ -1762,6 +1769,11 @@ bottom_of_search_loop:
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
+
+ /* schedule our worker thread which will take care of
+ * applying the new filter changes
+ */
+ i40e_service_event_schedule(vsi->back);
}
/**
@@ -1933,7 +1945,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
sizeof(struct i40e_aqc_remove_macvlan_element_data);
del_list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kzalloc(del_list_size, GFP_KERNEL);
+ del_list = kzalloc(del_list_size, GFP_ATOMIC);
if (!del_list) {
i40e_cleanup_add_list(&tmp_add_list);
@@ -2011,7 +2023,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
sizeof(struct i40e_aqc_add_macvlan_element_data),
add_list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data);
- add_list = kzalloc(add_list_size, GFP_KERNEL);
+ add_list = kzalloc(add_list_size, GFP_ATOMIC);
if (!add_list) {
/* Purge element from temporary lists */
i40e_cleanup_add_list(&tmp_add_list);
@@ -2110,7 +2122,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
- if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+ if ((vsi->type == I40E_VSI_MAIN) &&
+ (pf->lan_veb != I40E_NO_VEB) &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
* promisc behavior but will not get VF or VMDq traffic
@@ -2160,6 +2174,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
out:
+ /* if something went wrong then set the changed flag so we try again */
+ if (retval)
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval;
}
@@ -2212,7 +2230,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
-
+ i40e_notify_client_of_l2_param_changes(vsi);
return 0;
}
@@ -3106,11 +3124,11 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[i];
q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
q_vector->tx.itr);
@@ -3202,10 +3220,10 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* set the ITR configuration */
q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
q_vector->tx.latency_range = I40E_LOW_LATENCY;
wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
@@ -3245,14 +3263,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
+ * @clearpba: true when all pending interrupt events should be cleared
**/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, val);
@@ -3260,22 +3279,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
}
/**
- * i40e_irq_dynamic_disable - Disable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: disable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
-{
- struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- u32 val;
-
- val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
- i40e_flush(hw);
-}
-
-/**
* i40e_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
@@ -3400,7 +3403,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
- i40e_irq_dynamic_enable_icr0(pf);
+ i40e_irq_dynamic_enable_icr0(pf, true);
}
i40e_flush(&pf->hw);
@@ -3459,16 +3462,12 @@ static irqreturn_t i40e_intr(int irq, void *data)
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
- /* temporarily disable queue cause for NAPI processing */
- u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-
- qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), qval);
-
- qval = rd32(hw, I40E_QINT_TQCTL(0));
- qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), qval);
-
+ /* We do not have a way to disarm Queue causes while leaving
+ * interrupt enabled for all other causes, ideally
+ * interrupt should be disabled while we are in NAPI but
+ * this is not a performance path and napi_schedule()
+ * can deal with rescheduling.
+ */
if (!test_bit(__I40E_DOWN, &pf->state))
napi_schedule_irqoff(&q_vector->napi);
}
@@ -3476,6 +3475,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
}
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
@@ -3546,7 +3546,7 @@ enable_intr:
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf);
- i40e_irq_dynamic_enable_icr0(pf);
+ i40e_irq_dynamic_enable_icr0(pf, false);
}
return ret;
@@ -3750,7 +3750,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * i40e_netpoll - A Polling 'interrupt'handler
+ * i40e_netpoll - A Polling 'interrupt' handler
* @netdev: network interface device structure
*
* This is used by netconsole to send skbs without having to re-enable
@@ -3929,6 +3929,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
else
rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+ /* No waiting for the Tx queue to disable */
+ if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
+ continue;
/* wait for the change to finish */
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4166,6 +4169,9 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
free_irq(pf->msix_entries[0].vector, pf);
}
+ i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
+ I40E_IWARP_IRQ_PILE_ID);
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -4209,12 +4215,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
**/
static void i40e_vsi_close(struct i40e_vsi *vsi)
{
+ bool reset = false;
+
if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
i40e_down(vsi);
i40e_vsi_free_irq(vsi);
i40e_vsi_free_tx_resources(vsi);
i40e_vsi_free_rx_resources(vsi);
vsi->current_netdev_flags = 0;
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ reset = true;
+ i40e_notify_client_of_netdev_close(vsi, reset);
}
/**
@@ -4287,12 +4298,12 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
#ifdef CONFIG_I40E_DCB
/**
- * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
+ * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
* @vsi: the VSI being configured
*
- * This function waits for the given VSI's Tx queues to be disabled.
+ * This function waits for the given VSI's queues to be disabled.
**/
-static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
+static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
int i, pf_q, ret;
@@ -4309,24 +4320,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
}
}
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ /* Check and wait for the disable status of the queue */
+ ret = i40e_pf_rxq_wait(pf, pf_q, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "VSI seid %d Rx ring %d disable timeout\n",
+ vsi->seid, pf_q);
+ return ret;
+ }
+ }
+
return 0;
}
/**
- * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
+ * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
* @pf: the PF
*
- * This function waits for the Tx queues to be in disabled state for all the
+ * This function waits for the queues to be in disabled state for all the
* VSIs that are managed by this PF.
**/
-static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
+static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
/* No need to wait for FCoE VSI queues */
if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
- ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
+ ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret)
break;
}
@@ -4352,7 +4375,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct i40e_pf *pf;
- u32 head, val, tx_pending;
+ u32 head, val, tx_pending_hw;
int i;
pf = vsi->back;
@@ -4378,16 +4401,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
else
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
- /* Bail out if interrupts are disabled because napi_poll
- * execution in-progress or will get scheduled soon.
- * napi_poll cleans TX and RX queues and updates 'next_to_clean'.
- */
- if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
- return;
-
head = i40e_get_head(tx_ring);
- tx_pending = i40e_get_tx_pending(tx_ring);
+ tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
/* HW is done executing descriptors, updated HEAD write back,
* but SW hasn't processed those descriptors. If interrupt is
@@ -4395,12 +4411,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
* dev_watchdog detecting timeout on those netdev_queue,
* hence proactively trigger SW interrupt.
*/
- if (tx_pending) {
+ if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
/* NAPI Poll didn't run and clear since it was set */
if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
&tx_ring->q_vector->hung_detected)) {
- netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
- vsi->seid, q_idx, tx_pending,
+ netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
+ vsi->seid, q_idx, tx_pending_hw,
tx_ring->next_to_clean, head,
tx_ring->next_to_use,
readl(tx_ring->tail));
@@ -4413,6 +4429,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
&tx_ring->q_vector->hung_detected);
}
}
+
+ /* This is the case where we have interrupts missing,
+ * so the tx_pending in HW will most likely be 0, but we
+ * will have tx_pending in SW since the WB happened but the
+ * interrupt got lost.
+ */
+ if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
+ (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
+ if (napi_reschedule(&tx_ring->q_vector->napi))
+ tx_ring->tx_stats.tx_lost_interrupt++;
+ }
}
/**
@@ -4831,6 +4858,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.info = vsi->info;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
@@ -4974,6 +5007,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
+ i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
@@ -5016,8 +5050,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
int err = 0;
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
- if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4))
+ if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
goto out;
/* Get the initial DCB configuration */
@@ -5173,6 +5206,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
}
i40e_fdir_filter_restore(vsi);
}
+
+ /* On the next run of the service_task, notify any clients of the new
+ * opened netdev
+ */
+ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
return 0;
@@ -5249,11 +5287,7 @@ void i40e_down(struct i40e_vsi *vsi)
* @netdev: net device to configure
* @tc: number of traffic classes to enable
**/
-#ifdef I40E_FCOE
-int i40e_setup_tc(struct net_device *netdev, u8 tc)
-#else
static int i40e_setup_tc(struct net_device *netdev, u8 tc)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5306,6 +5340,19 @@ exit:
return ret;
}
+#ifdef I40E_FCOE
+int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+#else
+static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+#endif
+{
+ if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+ return i40e_setup_tc(netdev, tc->tc);
+}
+
/**
* i40e_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -5348,9 +5395,12 @@ int i40e_open(struct net_device *netdev)
vxlan_get_rx_port(netdev);
#endif
#ifdef CONFIG_I40E_GENEVE
- geneve_get_rx_port(netdev);
+ if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
+ geneve_get_rx_port(netdev);
#endif
+ i40e_notify_client_of_netdev_open(vsi);
+
return 0;
}
@@ -5713,8 +5763,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (ret)
goto exit;
- /* Wait for the PF's Tx queues to be disabled */
- ret = i40e_pf_wait_txq_disabled(pf);
+ /* Wait for the PF's queues to be disabled */
+ ret = i40e_pf_wait_queues_disabled(pf);
if (ret) {
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
@@ -6015,6 +6065,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
case I40E_VSI_CTRL:
+ case I40E_VSI_IWARP:
case I40E_VSI_MIRROR:
default:
/* there is no notification for other VSIs */
@@ -6244,6 +6295,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
if (hw->debug_mask & I40E_DEBUG_AQ)
dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ pf->arq_overflows++;
}
if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
if (hw->debug_mask & I40E_DEBUG_AQ)
@@ -6319,7 +6371,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
case i40e_aqc_opc_nvm_erase:
case i40e_aqc_opc_nvm_update:
case i40e_aqc_opc_oem_post_update:
- i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
+ i40e_debug(&pf->hw, I40E_DEBUG_NVM,
+ "ARQ NVM operation 0x%04x completed\n",
+ opcode);
break;
default:
dev_info(&pf->pdev->dev,
@@ -6803,12 +6857,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
if (ret)
goto end_core_reset;
- /* driver is only interested in link up/down and module qualification
- * reports from firmware
+ /* The driver only wants link up/down and module qualification
+ * reports from firmware. Note the negative logic.
*/
ret = i40e_aq_set_phy_int_mask(&pf->hw,
- I40E_AQ_EVENT_LINK_UPDOWN |
- I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, ret),
@@ -6889,8 +6943,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
wr32(hw, I40E_REG_MSS, val);
}
- if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4)) {
+ if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
@@ -7079,12 +7132,13 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) {
- dev_info(&pf->pdev->dev,
- "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
- port ? "add" : "delete",
- ntohs(port), i,
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw,
+ dev_dbg(&pf->pdev->dev,
+ "%s %s port %d, index %d failed, err %s aq_err %s\n",
+ pf->udp_ports[i].type ? "vxlan" : "geneve",
+ port ? "add" : "delete",
+ ntohs(port), i,
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
pf->udp_ports[i].index = 0;
}
@@ -7111,11 +7165,13 @@ static void i40e_service_task(struct work_struct *work)
}
i40e_detect_recover_hung(pf);
+ i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
i40e_vc_process_vflr_event(pf);
i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf);
+ i40e_client_subtask(pf);
i40e_sync_filters_subtask(pf);
i40e_sync_udp_filters_subtask(pf);
i40e_clean_adminq_subtask(pf);
@@ -7290,8 +7346,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
set_bit(__I40E_DOWN, &vsi->state);
vsi->flags = 0;
vsi->idx = vsi_idx;
- vsi->rx_itr_setting = pf->rx_itr_default;
- vsi->tx_itr_setting = pf->tx_itr_default;
vsi->int_rate_limit = 0;
vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
pf->rss_table_size : 64;
@@ -7458,8 +7512,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->dcb_tc = 0;
if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
+ tx_ring->tx_itr_setting = pf->tx_itr_default;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
@@ -7476,6 +7529,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring);
else
clear_ring_16byte_desc_enabled(rx_ring);
+ rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
@@ -7520,6 +7574,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
int vectors_left;
int v_budget, i;
int v_actual;
+ int iwarp_requested = 0;
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
return -ENODEV;
@@ -7533,6 +7588,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
* is governed by number of cpus in the system.
* - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
+ * - The CPU count within the NUMA node if iWARP is enabled
#ifdef I40E_FCOE
* - The number of FCOE qps.
#endif
@@ -7579,6 +7635,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
#endif
+ /* can we reserve enough for iWARP? */
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ if (!vectors_left)
+ pf->num_iwarp_msix = 0;
+ else if (vectors_left < pf->num_iwarp_msix)
+ pf->num_iwarp_msix = 1;
+ v_budget += pf->num_iwarp_msix;
+ vectors_left -= pf->num_iwarp_msix;
+ }
+
/* any vectors left over go for VMDq support */
if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
@@ -7613,6 +7679,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
* of these features based on the policy and at the end disable
* the features that did not get any vectors.
*/
+ iwarp_requested = pf->num_iwarp_msix;
+ pf->num_iwarp_msix = 0;
#ifdef I40E_FCOE
pf->num_fcoe_qps = 0;
pf->num_fcoe_msix = 0;
@@ -7651,17 +7719,33 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->num_lan_msix = 1;
break;
case 3:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_lan_msix = 1;
+ pf->num_iwarp_msix = 1;
+ } else {
+ pf->num_lan_msix = 2;
+ }
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
pf->num_lan_msix = 1;
pf->num_fcoe_msix = 1;
}
-#else
- pf->num_lan_msix = 2;
#endif
break;
default:
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->num_iwarp_msix = min_t(int, (vec / 3),
+ iwarp_requested);
+ pf->num_vmdq_vsis = min_t(int, (vec / 3),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ } else {
+ pf->num_vmdq_vsis = min_t(int, (vec / 2),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ }
+ pf->num_lan_msix = min_t(int,
+ (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
+ pf->num_lan_msix);
#ifdef I40E_FCOE
/* give one vector to FCoE */
if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7669,8 +7753,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
vec--;
}
#endif
- /* give the rest to the PF */
- pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
break;
}
}
@@ -7680,6 +7762,12 @@ static int i40e_init_msix(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
}
+
+ if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+ (pf->num_iwarp_msix == 0)) {
+ dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
#ifdef I40E_FCOE
if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
@@ -7771,6 +7859,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
vectors = i40e_init_msix(pf);
if (vectors < 0) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -7852,7 +7941,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_flush(hw);
- i40e_irq_dynamic_enable_icr0(pf);
+ i40e_irq_dynamic_enable_icr0(pf, true);
return err;
}
@@ -7936,6 +8025,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
}
/**
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ u8 *lut, u16 lut_size)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int ret = 0;
+
+ if (seed) {
+ ret = i40e_aq_get_rss_key(hw, vsi->id,
+ (struct i40e_aqc_get_set_rss_key_data *)seed);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot get RSS key, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+
+ if (lut) {
+ bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+ ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Cannot get RSS lut, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/**
* i40e_config_rss_reg - Configure RSS keys and lut by writing registers
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
@@ -7956,7 +8091,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
}
if (lut) {
@@ -7993,7 +8128,7 @@ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i));
+ seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
@@ -8037,7 +8172,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
*/
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
{
- return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+ struct i40e_pf *pf = vsi->back;
+
+ if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+ return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+ else
+ return i40e_get_rss_reg(vsi, seed, lut, lut_size);
}
/**
@@ -8071,19 +8211,19 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
int ret;
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
- hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
hena |= i40e_pf_get_default_rss_hena(pf);
- wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
/* Determine the RSS table size based on the hardware capabilities */
- reg_val = rd32(hw, I40E_PFQF_CTL_0);
+ reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
reg_val = (pf->rss_table_size == 512) ?
(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
- wr32(hw, I40E_PFQF_CTL_0, reg_val);
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */
if (!vsi->rss_size)
@@ -8367,12 +8507,38 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort;
}
+ if (i40e_is_mac_710(&pf->hw) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+ (pf->hw.aq.fw_maj_ver < 4))) {
+ pf->flags |= I40E_FLAG_RESTART_AUTONEG;
+ /* No DCB support for FW < v4.33 */
+ pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
+ }
+
+ /* Disable FW LLDP if FW < v4.3 */
+ if (i40e_is_mac_710(&pf->hw) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+ (pf->hw.aq.fw_maj_ver < 4)))
+ pf->flags |= I40E_FLAG_STOP_FW_LLDP;
+
+ /* Use the FW Set LLDP MIB API if FW > v4.40 */
+ if (i40e_is_mac_710(&pf->hw) &&
+ (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
+ (pf->hw.aq.fw_maj_ver >= 5)))
+ pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
+
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
}
+ if (pf->hw.func_caps.iwarp) {
+ pf->flags |= I40E_FLAG_IWARP_ENABLED;
+ /* IWARP needs one extra vector for CQP just like MISC.*/
+ pf->num_iwarp_msix = (int)num_online_cpus() + 1;
+ }
+
#ifdef I40E_FCOE
i40e_init_pf_fcoe(pf);
@@ -8393,8 +8559,20 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+ I40E_FLAG_NO_PCI_LINK_CHECK |
+ I40E_FLAG_100M_SGMII_CAPABLE |
+ I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
+ ((pf->hw.aq.api_maj_ver == 1) &&
+ (pf->hw.aq.api_min_ver > 4))) {
+ /* Supported in FW API version higher than 1.4 */
+ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+ } else {
+ pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
}
+
pf->eeprom_version = 0xDEAD;
pf->lan_veb = I40E_NO_VEB;
pf->lan_vsi = I40E_NO_VSI;
@@ -8530,9 +8708,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
u8 next_idx;
u8 idx;
- if (sa_family == AF_INET6)
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
@@ -8572,9 +8747,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
u8 idx;
- if (sa_family == AF_INET6)
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
@@ -8608,7 +8780,7 @@ static void i40e_add_geneve_port(struct net_device *netdev,
u8 next_idx;
u8 idx;
- if (sa_family == AF_INET6)
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
return;
idx = i40e_get_udp_port_idx(pf, port);
@@ -8652,7 +8824,7 @@ static void i40e_del_geneve_port(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
u8 idx;
- if (sa_family == AF_INET6)
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
return;
idx = i40e_get_udp_port_idx(pf, port);
@@ -8890,7 +9062,7 @@ static const struct net_device_ops i40e_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40e_netpoll,
#endif
- .ndo_setup_tc = i40e_setup_tc,
+ .ndo_setup_tc = __i40e_setup_tc,
#ifdef I40E_FCOE
.ndo_fcoe_enable = i40e_fcoe_enable,
.ndo_fcoe_disable = i40e_fcoe_disable,
@@ -8942,11 +9114,15 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_TSO;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ 0;
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
@@ -8967,6 +9143,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
netdev->features |= NETIF_F_NTUPLE;
+ if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
@@ -9216,6 +9394,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
}
+ if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |=
+ I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
if (pf->vf[vsi->vf_id].spoofchk) {
@@ -9239,6 +9424,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
#endif /* I40E_FCOE */
+ case I40E_VSI_IWARP:
+ /* send down message to iWARP */
+ break;
+
default:
return -ENODEV;
}
@@ -9471,10 +9660,15 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
- struct i40e_pf *pf = vsi->back;
+ struct i40e_pf *pf;
u8 enabled_tc;
int ret;
+ if (!vsi)
+ return NULL;
+
+ pf = vsi->back;
+
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_vsi_clear_rings(vsi);
@@ -9975,13 +10169,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
bool is_default = veb->pf->cur_promisc;
- bool is_cloud = false;
+ bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
veb->enabled_tc, is_default,
- is_cloud, &veb->seid, NULL);
+ &veb->seid, enable_stats, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n",
@@ -10350,6 +10544,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -10367,6 +10562,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_IWARP_ENABLED |
#ifdef I40E_FCOE
I40E_FLAG_FCOE_ENABLED |
#endif
@@ -10538,21 +10734,9 @@ static void i40e_print_features(struct i40e_pf *pf)
**/
static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
{
- struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
- u8 *mac_addr = pf->hw.mac.addr;
-
pf->flags &= ~I40E_FLAG_PF_MAC;
- addr = of_get_mac_address(dp);
- if (addr) {
- ether_addr_copy(mac_addr, addr);
+ if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
pf->flags |= I40E_FLAG_PF_MAC;
-#ifdef CONFIG_SPARC
- } else {
- ether_addr_copy(mac_addr, idprom->id_ethaddr);
- pf->flags |= I40E_FLAG_PF_MAC;
-#endif /* CONFIG_SPARC */
- }
}
/**
@@ -10575,7 +10759,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u16 wol_nvm_bits;
u16 link_status;
int err;
- u32 len;
u32 val;
u32 i;
u8 set_fc_aq_fail;
@@ -10758,8 +10941,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Ignore error return codes because if it was already disabled via
* hardware settings this will fail
*/
- if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4)) {
+ if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
i40e_aq_stop_lldp(hw, true, NULL);
}
@@ -10834,8 +11016,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
- len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
- pf->vsi = kzalloc(len, GFP_KERNEL);
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_switch_setup;
@@ -10882,12 +11064,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- /* driver is only interested in link up/down and module qualification
- * reports from firmware
+ /* The driver only wants link up/down and module qualification
+ * reports from firmware. Note the negative logic.
*/
err = i40e_aq_set_phy_int_mask(&pf->hw,
- I40E_AQ_EVENT_LINK_UPDOWN |
- I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
i40e_stat_str(&pf->hw, err),
@@ -10904,8 +11086,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
wr32(hw, I40E_REG_MSS, val);
}
- if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
- (pf->hw.aq.fw_maj_ver < 4)) {
+ if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
@@ -10939,8 +11120,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
!test_bit(__I40E_BAD_EEPROM, &pf->state)) {
- u32 val;
-
/* disable link interrupts for VFs */
val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
@@ -10959,7 +11138,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif /* CONFIG_PCI_IOV */
- pfs_found++;
+ if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+ pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
+ pf->num_iwarp_msix,
+ I40E_IWARP_IRQ_PILE_ID);
+ if (pf->iwarp_base_vector < 0) {
+ dev_info(&pdev->dev,
+ "failed to get tracking for %d vectors for IWARP err=%d\n",
+ pf->num_iwarp_msix, pf->iwarp_base_vector);
+ pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+ }
+ }
i40e_dbg_pf_init(pf);
@@ -10970,6 +11159,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* add this PF to client device list and launch a client service task */
+ err = i40e_lan_add_device(pf);
+ if (err)
+ dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+ err);
+
#ifdef I40E_FCOE
/* create FCoE interface */
i40e_fcoe_vsi_setup(pf);
@@ -11051,6 +11246,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid);
+ if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
+ (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
+ pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
+
/* print a string summarizing features */
i40e_print_features(pf);
@@ -11107,10 +11306,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
/* Disable RSS in hw */
- wr32(hw, I40E_PFQF_HENA(0), 0);
- wr32(hw, I40E_PFQF_HENA(1), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */
+ set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
@@ -11140,9 +11340,16 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ /* remove attached clients */
+ ret_code = i40e_lan_del_device(pf);
+ if (ret_code) {
+ dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+ ret_code);
+ }
+
/* shutdown and destroy the HMC */
- if (pf->hw.hmc.hmc_obj) {
- ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (hw->hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(hw);
if (ret_code)
dev_warn(&pdev->dev,
"Failed to destroy the HMC resources: %d\n",
@@ -11150,7 +11357,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(&pf->hw);
+ ret_code = i40e_shutdown_adminq(hw);
if (ret_code)
dev_warn(&pdev->dev,
"Failed to destroy the Admin Queue resources: %d\n",
@@ -11178,7 +11385,7 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->vsi);
- iounmap(pf->hw.hw_addr);
+ iounmap(hw->hw_addr);
kfree(pf);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
@@ -11413,6 +11620,16 @@ static int __init i40e_init_module(void)
i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+ /* we will see if single thread per module is enough for now,
+ * it can't be any worse than using the system workqueue which
+ * was already single threaded
+ */
+ i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+ if (!i40e_wq) {
+ pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+ return -ENOMEM;
+ }
+
i40e_dbg_init();
return pci_register_driver(&i40e_driver);
}
@@ -11427,6 +11644,7 @@ module_init(i40e_init_module);
static void __exit i40e_exit_module(void)
{
pci_unregister_driver(&i40e_driver);
+ destroy_workqueue(i40e_wq);
i40e_dbg_exit();
}
module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 6100cdd9ad13..5730f8091e1b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done);
+ hw->aq.nvm_release_on_done,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
*perrno = -EFAULT;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index bb9d583e5416..d51eee5bf79a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -74,6 +74,12 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
/* admin send queue commands */
@@ -127,6 +133,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_vsi_context *vsi_ctx,
struct i40e_asq_cmd_details *cmd_details);
@@ -135,8 +144,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc,
- bool default_port, bool enable_l2_filtering,
- u16 *pveb_seid,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating,
@@ -149,6 +158,15 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
+
i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@@ -324,4 +342,19 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
+i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index dc0402fe3370..86ca27f72f02 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -2045,6 +2045,14 @@
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
@@ -2216,6 +2224,14 @@
#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
@@ -5155,6 +5171,38 @@
#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 47bd8b3145a7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
+ * @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- head = i40e_get_head(ring);
+ if (!in_sw)
+ head = i40e_get_head(ring);
+ else
+ head = ring->next_to_clean;
tail = readl(ring->tail);
if (head != tail)
@@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40e_get_tx_pending(tx_ring);
+ j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
@@ -774,29 +778,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
/**
- * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about
- * @q_vector: the vector on which to force writeback
+ * @q_vector: the vector on which to enable writeback
*
**/
-void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
+ u32 val;
- if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- u32 val;
+ if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+ return;
- if (q_vector->arm_wb_state)
- return;
+ if (q_vector->arm_wb_state)
+ return;
- val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw,
- I40E_PFINT_DYN_CTLN(q_vector->v_idx +
- vsi->base_vector - 1),
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
val);
- q_vector->arm_wb_state = true;
- } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ } else {
+ val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
+ I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
+
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+ }
+ q_vector->arm_wb_state = true;
+}
+
+/**
+ * i40e_force_wb - Issue SW Interrupt so HW does a wb
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
@@ -1041,7 +1064,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (rx_bi->page_dma) {
dma_unmap_page(dev,
rx_bi->page_dma,
- PAGE_SIZE / 2,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
}
@@ -1176,16 +1199,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
**/
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
+ const int current_node = numa_node_id();
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
- return;
+ return false;
while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i);
@@ -1193,56 +1219,79 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
if (bi->skb) /* desc is in use */
goto no_buffers;
+
+ /* If we've been moved to a different NUMA node, release the
+ * page so we can get a new one on the current node.
+ */
+ if (bi->page && page_to_nid(bi->page) != current_node) {
+ dma_unmap_page(rx_ring->dev,
+ bi->page_dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(bi->page);
+ bi->page = NULL;
+ bi->page_dma = 0;
+ rx_ring->rx_stats.realloc_count++;
+ } else if (bi->page) {
+ rx_ring->rx_stats.page_reuse_count++;
+ }
+
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
- }
-
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev,
bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
+ 0,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
rx_ring->rx_stats.alloc_page_failed++;
+ __free_page(bi->page);
+ bi->page = NULL;
bi->page_dma = 0;
+ bi->page_offset = 0;
goto no_buffers;
}
+ bi->page_offset = 0;
}
- dma_sync_single_range_for_device(rx_ring->dev,
- bi->dma,
- 0,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(bi->page_dma + bi->page_offset);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
i = 0;
}
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+
+ return false;
+
no_buffers:
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
}
/**
* i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
**/
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
@@ -1251,7 +1300,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
- return;
+ return false;
while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i);
@@ -1259,8 +1308,10 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len);
+ skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len,
+ GFP_ATOMIC |
+ __GFP_NOWARN);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
@@ -1278,6 +1329,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
+ dev_kfree_skb(bi->skb);
+ bi->skb = NULL;
goto no_buffers;
}
}
@@ -1289,9 +1342,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0;
}
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+
+ return false;
+
no_buffers:
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
}
/**
@@ -1326,16 +1389,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u16 rx_ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4 = false, ipv6 = false;
- bool ipv4_tunnel, ipv6_tunnel;
- __wsum rx_udp_csum;
- struct iphdr *iph;
- __sum16 csum;
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
@@ -1351,12 +1405,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (!(decoded.known && decoded.outer_ip))
return;
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
- ipv4 = true;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
- ipv6 = true;
+ ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1380,37 +1432,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ /* The hardware supported by this driver does not validate outer
+ * checksums for tunneled VXLAN or GENEVE frames. I don't agree
+ * with it but the specification states that you "MAY validate", it
+ * doesn't make it a hard requirement so if we have validated the
+ * inner checksum report CHECKSUM_UNNECESSARY.
*/
- if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
- (ipv4_tunnel)) {
- skb->transport_header = skb->mac_header +
- sizeof(struct ethhdr) +
- (ip_hdr(skb)->ihl * 4);
-
- /* Add 4 bytes for VLAN tagged packets */
- skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD))
- ? VLAN_HLEN : 0;
-
- if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
- (udp_hdr(skb)->check != 0)) {
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
-
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
-
- } /* else its GRE and so no outer UDP header */
- }
+
+ ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = ipv4_tunnel || ipv6_tunnel;
@@ -1475,18 +1507,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ bool failure = false;
u8 rx_ptype;
u64 qword;
+ u32 copysize;
if (budget <= 0)
return 0;
@@ -1497,7 +1530,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+ failure = failure ||
+ i40e_alloc_rx_buffers_ps(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -1515,6 +1550,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
* DD bit is set.
*/
dma_rmb();
+ /* sync header buffer for reading */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_ring->rx_bi[0].dma,
+ i * rx_ring->rx_hdr_len,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
if (i40e_rx_is_programming_status(qword)) {
i40e_clean_programming_status(rx_ring, rx_desc);
I40E_RX_INCREMENT(rx_ring, i);
@@ -1523,10 +1564,13 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb;
if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len);
+ skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_hdr_len,
+ GFP_ATOMIC |
+ __GFP_NOWARN);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
+ failure = true;
break;
}
@@ -1534,8 +1578,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_record_rx_queue(skb, rx_ring->queue_index);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- 0,
+ rx_ring->rx_bi[0].dma,
+ i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
}
@@ -1553,9 +1597,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
- prefetch(rx_bi->page);
+ /* sync half-page for reading */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->page_dma,
+ rx_bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
rx_bi->skb = NULL;
cleaned_count++;
+ copysize = 0;
if (rx_hbo || rx_sph) {
int len;
@@ -1566,38 +1617,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
} else if (skb->len == 0) {
int len;
+ unsigned char *va = page_address(rx_bi->page) +
+ rx_bi->page_offset;
- len = (rx_packet_len > skb_headlen(skb) ?
- skb_headlen(skb) : rx_packet_len);
- memcpy(__skb_put(skb, len),
- rx_bi->page + rx_bi->page_offset,
- len);
- rx_bi->page_offset += len;
+ len = min(rx_packet_len, rx_ring->rx_hdr_len);
+ memcpy(__skb_put(skb, len), va, len);
+ copysize = len;
rx_packet_len -= len;
}
-
/* Get the rest of the data if this was a header split */
if (rx_packet_len) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset,
- rx_packet_len);
-
- skb->len += rx_packet_len;
- skb->data_len += rx_packet_len;
- skb->truesize += rx_packet_len;
-
- if ((page_count(rx_bi->page) == 1) &&
- (page_to_nid(rx_bi->page) == current_node))
- get_page(rx_bi->page);
- else
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset + copysize,
+ rx_packet_len, I40E_RXBUFFER_2048);
+
+ /* If the page count is more than 2, then both halves
+ * of the page are used and we need to free it. Do it
+ * here instead of in the alloc code. Otherwise one
+ * of the half-pages might be released between now and
+ * then, and we wouldn't know which one to use.
+ * Don't call get_page and free_page since those are
+ * both expensive atomic operations that just change
+ * the refcount in opposite directions. Just give the
+ * page to the stack; he can have our refcount.
+ */
+ if (page_count(rx_bi->page) > 2) {
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
rx_bi->page = NULL;
+ rx_bi->page_dma = 0;
+ rx_ring->rx_stats.realloc_count++;
+ } else {
+ get_page(rx_bi->page);
+ /* switch to the other half-page here; the
+ * allocation code programs the right addr
+ * into HW. If we haven't used this half-page,
+ * the address won't be changed, and HW can
+ * just use it next time through.
+ */
+ rx_bi->page_offset ^= PAGE_SIZE / 2;
+ }
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
}
I40E_RX_INCREMENT(rx_ring, i);
@@ -1656,7 +1719,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- return total_rx_packets;
+ return failure ? budget : total_rx_packets;
}
/**
@@ -1674,6 +1737,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u16 rx_packet_len;
+ bool failure = false;
u8 rx_ptype;
u64 qword;
u16 i;
@@ -1684,7 +1748,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+ failure = failure ||
+ i40e_alloc_rx_buffers_1buf(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -1783,7 +1849,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- return total_rx_packets;
+ return failure ? budget : total_rx_packets;
}
static u32 i40e_buildreg_itr(const int type, const u16 itr)
@@ -1791,7 +1857,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ /* Don't clear PBA because that can cause lost interrupts that
+ * came in while we were cleaning/polling
+ */
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
@@ -1814,6 +1882,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1823,17 +1892,17 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -1906,7 +1975,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ clean_complete = clean_complete &&
+ i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false;
}
@@ -1930,7 +2000,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
- clean_complete &= (budget_per_ring != cleaned);
+ clean_complete = clean_complete && (budget_per_ring > cleaned);
}
/* If work not completed, return budget and polling will return */
@@ -1938,7 +2008,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
tx_only:
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40e_force_wb(vsi, q_vector);
+ i40e_enable_wb_on_itr(vsi, q_vector);
}
return budget;
}
@@ -1951,20 +2021,7 @@ tx_only:
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
i40e_update_enable_itr(vsi, q_vector);
} else { /* Legacy mode */
- struct i40e_hw *hw = &vsi->back->hw;
- /* We re-enable the queue 0 cause, but
- * don't worry about dynamic_enable
- * because we left it on for the other
- * possible interrupts during napi
- */
- u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
- I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-
- wr32(hw, I40E_QINT_RQCTL(0), qval);
- qval = rd32(hw, I40E_QINT_TQCTL(0)) |
- I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), qval);
- i40e_irq_dynamic_enable_icr0(vsi->back);
+ i40e_irq_dynamic_enable_icr0(vsi->back, false);
}
return 0;
}
@@ -1974,10 +2031,9 @@ tx_only:
* @tx_ring: ring to add programming descriptor to
* @skb: send buffer
* @tx_flags: send tx flags
- * @protocol: wire protocol
**/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol)
+ u32 tx_flags)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1989,6 +2045,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th;
unsigned int hlen;
u32 flex_ptype, dtype_cmd;
+ int l4_proto;
u16 i;
/* make sure ATR is enabled */
@@ -2002,36 +2059,28 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
+ /* Currently only IPv4/IPv6 with TCP is supported */
if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
return;
- if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) {
- /* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ /* snag network header to get L4 type and address */
+ hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
+ skb_inner_network_header(skb) : skb_network_header(skb);
- /* Currently only IPv4/IPv6 with TCP is supported
- * access ihl as u8 to avoid unaligned access on ia64
- */
- if (tx_flags & I40E_TX_FLAGS_IPV4)
- hlen = (hdr.network[0] & 0x0F) << 2;
- else if (protocol == htons(ETH_P_IPV6))
- hlen = sizeof(struct ipv6hdr);
- else
- return;
+ /* Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ /* access ihl as u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ l4_proto = hdr.ipv4->protocol;
} else {
- hdr.network = skb_inner_network_header(skb);
- hlen = skb_inner_network_header_len(skb);
+ hlen = hdr.network - skb->data;
+ l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+ hlen -= hdr.network - skb->data;
}
- /* Currently only IPv4/IPv6 with TCP is supported
- * Note: tx_flags gets modified to reflect inner protocols in
- * tx_enable_csum function if encap is enabled.
- */
- if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
- (hdr.ipv4->protocol != IPPROTO_TCP))
- return;
- else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
- (hdr.ipv6->nexthdr != IPPROTO_TCP))
+ if (l4_proto != IPPROTO_TCP)
return;
th = (struct tcphdr *)(hdr.network + hlen);
@@ -2039,7 +2088,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Due to lack of space, no more new filters can be programmed */
if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
return;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+ if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
+ (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) {
/* HW ATR eviction will take care of removing filters on FIN
* and RST packets.
*/
@@ -2067,7 +2117,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK;
- flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+ flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
(I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
@@ -2101,7 +2151,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+ if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) &&
+ (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)))
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
@@ -2206,13 +2257,23 @@ out:
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
- u32 cd_cmd, cd_tso_len, cd_mss;
- struct ipv6hdr *ipv6h;
- struct tcphdr *tcph;
- struct iphdr *iph;
- u32 l4len;
+ u64 cd_cmd, cd_tso_len, cd_mss;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -2220,35 +2281,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
- if (iph->version == 4) {
- tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
- } else if (ipv6h->version == 6) {
- tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
- ipv6h->payload_len = 0;
- tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
- 0, IPPROTO_TCP, 0);
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
}
- l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = (skb->encapsulation
- ? (skb_inner_transport_header(skb) - skb->data)
- : skb_transport_offset(skb)) + l4len;
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ /* determine offset of outer transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from outer checksum */
+ paylen = (__force u16)l4.udp->check;
+ paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
+ l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ }
+
+ /* reset pointers to inner headers */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+ }
+
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = (__force u16)l4.tcp->check;
+ paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
+ l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
- ((u64)cd_tso_len <<
- I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
- ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
return 1;
}
@@ -2303,129 +2389,154 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
- u32 *td_cmd, u32 *td_offset,
- struct i40e_ring *tx_ring,
- u32 *cd_tunneling)
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
{
- struct ipv6hdr *this_ipv6_hdr;
- unsigned int this_tcp_hdrlen;
- struct iphdr *this_ip_hdr;
- u32 network_hdr_len;
- u8 l4_hdr = 0;
- struct udphdr *oudph = NULL;
- struct iphdr *oiph = NULL;
- u32 l4_tunnel = 0;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ unsigned char *exthdr;
+ u32 offset, cmd = 0, tunnel = 0;
+ __be16 frag_off;
+ u8 l4_proto = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute outer L2 header size */
+ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
- switch (ip_hdr(skb)->protocol) {
+ /* define outer network header type */
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_CTX_EXT_IP_IPV4 :
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+ l4_proto = ip.v4->protocol;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* define outer transport */
+ switch (l4_proto) {
case IPPROTO_UDP:
- oudph = udp_hdr(skb);
- oiph = ip_hdr(skb);
- l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
case IPPROTO_GRE:
- l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
+ tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
default:
- return;
- }
- network_hdr_len = skb_inner_network_header_len(skb);
- this_ip_hdr = inner_ip_hdr(skb);
- this_ipv6_hdr = inner_ipv6_hdr(skb);
- this_tcp_hdrlen = inner_tcp_hdrlen(skb);
-
- if (*tx_flags & I40E_TX_FLAGS_IPV4) {
- if (*tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
- } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
if (*tx_flags & I40E_TX_FLAGS_TSO)
- ip_hdr(skb)->check = 0;
+ return -1;
+
+ skb_checksum_help(skb);
+ return 0;
}
- /* Now set the ctx descriptor fields */
- *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- l4_tunnel |
- ((skb_inner_network_offset(skb) -
- skb_transport_offset(skb)) >> 1) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
- if (this_ip_hdr->version == 6) {
- *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ /* indicate if we need to offload outer UDP header */
+ if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+
+ /* record tunnel offload values */
+ *cd_tunneling |= tunnel;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
*tx_flags |= I40E_TX_FLAGS_IPV6;
- }
- if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
- (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
- (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
- oudph->check = ~csum_tcpudp_magic(oiph->saddr,
- oiph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, 0);
- *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
- }
- } else {
- network_hdr_len = skb_network_header_len(skb);
- this_ip_hdr = ip_hdr(skb);
- this_ipv6_hdr = ipv6_hdr(skb);
- this_tcp_hdrlen = tcp_hdrlen(skb);
}
/* Enable IP checksum offloads */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
- l4_hdr = this_ip_hdr->protocol;
+ l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (*tx_flags & I40E_TX_FLAGS_TSO) {
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- this_ip_hdr->check = 0;
- } else {
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
- }
- /* Now set the td_offset for IP header length */
- *td_offset = (network_hdr_len >> 2) <<
- I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
+ I40E_TX_DESC_CMD_IIPT_IPV4;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- l4_hdr = this_ipv6_hdr->nexthdr;
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
- /* Now set the td_offset for IP header length */
- *td_offset = (network_hdr_len >> 2) <<
- I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
}
- /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
- *td_offset |= (skb_network_offset(skb) >> 1) <<
- I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* compute inner L3 header size */
+ offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
/* Enable L4 checksum offloads */
- switch (l4_hdr) {
+ switch (l4_proto) {
case IPPROTO_TCP:
/* enable checksum offloads */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (this_tcp_hdrlen >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_SCTP:
/* enable SCTP checksum offload */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
- *td_offset |= (sizeof(struct sctphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_UDP:
/* enable UDP checksum offload */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
- break;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
}
+
+ *td_cmd |= cmd;
+ *td_offset |= offset;
+
+ return 1;
}
/**
@@ -2466,7 +2577,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
*
* Returns -EBUSY if a stop is needed, else 0
**/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
@@ -2483,77 +2594,70 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-#ifdef I40E_FCOE
-inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#else
-static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-#endif
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
- * @tx_flags: collected send information
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
+bool __i40e_chk_linearize(struct sk_buff *skb)
{
- struct skb_frag_struct *frag;
- bool linearize = false;
- unsigned int size = 0;
- u16 num_frags;
- u16 gso_segs;
+ const struct skb_frag_struct *frag, *stale;
+ int nr_frags, sum;
- num_frags = skb_shinfo(skb)->nr_frags;
- gso_segs = skb_shinfo(skb)->gso_segs;
+ /* no need to check if number of frags is less than 7 */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+ return false;
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 0;
+ /* We need to walk through the list and validate that each group
+ * of 6 fragments totals at least gso_size. However we don't need
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
+ */
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We
+ * use this as the worst case scenerio in which the frag ahead
+ * of us only provides one byte which is why we are limited to 6
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - skb_shinfo(skb)->gso_size;
- if (num_frags < (I40E_MAX_BUFFER_TXD))
- goto linearize_chk_done;
- /* try the simple math, if we have too many frags per segment */
- if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
- I40E_MAX_BUFFER_TXD) {
- linearize = true;
- goto linearize_chk_done;
- }
- frag = &skb_shinfo(skb)->frags[0];
- /* we might still have more fragments per segment */
- do {
- size += skb_frag_size(frag);
- frag++; j++;
- if ((size >= skb_shinfo(skb)->gso_size) &&
- (j < I40E_MAX_BUFFER_TXD)) {
- size = (size % skb_shinfo(skb)->gso_size);
- j = (size) ? 1 : 0;
- }
- if (j == I40E_MAX_BUFFER_TXD) {
- linearize = true;
- break;
- }
- num_frags--;
- } while (num_frags);
- } else {
- if (num_frags >= I40E_MAX_BUFFER_TXD)
- linearize = true;
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ stale = &skb_shinfo(skb)->frags[0];
+ for (;;) {
+ sum += skb_frag_size(frag++);
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ /* use pre-decrement to avoid processing last fragment */
+ if (!--nr_frags)
+ break;
+
+ sum -= skb_frag_size(stale++);
}
-linearize_chk_done:
- return linearize;
+ return false;
}
/**
@@ -2760,43 +2864,6 @@ dma_error:
}
/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
- * @skb: send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-#ifdef I40E_FCOE
-inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#endif
-{
- unsigned int f;
- int count = 0;
-
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 4 desc gap to avoid the cache line where head is,
- * + 1 desc for context descriptor,
- * otherwise try next time
- */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
- tx_ring->tx_stats.tx_busy++;
- return 0;
- }
- return count;
-}
-
-/**
* i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -2814,14 +2881,30 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
+ int tso, count;
int tsyn;
- int tso;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+ if (i40e_chk_linearize(skb, count)) {
+ if (__skb_linearize(skb))
+ goto out_drop;
+ count = TXD_USE_COUNT(skb->len);
+ tx_ring->tx_stats.tx_linearize++;
+ }
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
@@ -2846,29 +2929,22 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ /* Always offload the checksum, since it's in the data descriptor */
+ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ if (tso < 0)
+ goto out_drop;
+
tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- if (i40e_chk_linearize(skb, tx_flags)) {
- if (skb_linearize(skb))
- goto out_drop;
- tx_ring->tx_stats.tx_linearize++;
- }
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Always offload the checksum, since it's in the data descriptor */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- tx_flags |= I40E_TX_FLAGS_CSUM;
-
- i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
- tx_ring, &cd_tunneling);
- }
-
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
@@ -2876,7 +2952,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
*
* NOTE: this must always be directly before the data descriptor.
*/
- i40e_atr(tx_ring, skb, tx_flags, protocol);
+ i40e_atr(tx_ring, skb, tx_flags);
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 3f081e25e097..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -153,7 +153,6 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3)
@@ -203,12 +202,15 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_lost_interrupt;
};
struct i40e_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
+ u64 page_reuse_count;
+ u64 realloc_count;
};
enum i40e_ring_state_t {
@@ -246,6 +248,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_hdr_len;
@@ -254,7 +264,6 @@ struct i40e_ring {
#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
- u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
@@ -275,7 +284,6 @@ struct i40e_ring {
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */
@@ -316,8 +324,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -331,13 +339,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset);
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
-int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags);
#endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring);
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+bool __i40e_chk_linearize(struct sk_buff *skb);
/**
* i40e_get_head - Retrieve head from head writeback
@@ -352,4 +360,67 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += TXD_USE_COUNT(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @count: number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+{
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
+ return false;
+
+ if (skb_is_gso(skb))
+ return __i40e_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index dd2da356d9a1..3335f9d13374 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -78,7 +78,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
-
+ I40E_DEBUG_IWARP = 0x00F00000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
@@ -90,6 +90,22 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF
};
+#define I40E_MDIO_STCODE 0
+#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE 0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+#define I40E_PHY_LED_MANUAL_ON 0x100
+#define I40E_PHY_LED_PROV_REG_1 0xC430
+#define I40E_PHY_LED_MODE_MASK 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG 0x80000000
+
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -144,6 +160,7 @@ enum i40e_vsi_type {
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
+ I40E_VSI_IWARP = 8,
I40E_VSI_TYPE_UNKNOWN
};
@@ -1098,6 +1115,10 @@ enum i40e_filter_program_desc_pcmd {
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 3226946bf3d4..ab866cf3dc18 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,6 +81,9 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_IWARP = 20,
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -348,6 +351,37 @@ struct i40e_virtchnl_pf_event {
int severity;
};
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+*/
+#define I40E_QUEUE_TYPE_PE_AEQ 0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+ u32 num_vectors;
+ struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 63e62f9aec6e..816c6bbf7093 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -352,6 +352,136 @@ irq_list_done:
}
/**
+ * i40e_release_iwarp_qvlist
+ * @vf: pointer to the VF.
+ *
+ **/
+static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ u32 msix_vf;
+ u32 i;
+
+ if (!vf->qvlist_info)
+ return;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ u32 next_q_index, next_q_type;
+ struct i40e_hw *hw = &pf->hw;
+ u32 v_idx, reg_idx, reg;
+
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ /* Figure out the queue after CEQ and make that the
+ * first queue.
+ */
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
+ next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
+ next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
+ >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (next_q_index &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (next_q_type <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+ }
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+}
+
+/**
+ * i40e_config_iwarp_qvlist
+ * @vf: pointer to the VF info
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_virtchnl_iwarp_qv_info *qv_info;
+ u32 v_idx, i, reg_idx, reg;
+ u32 next_q_idx, next_q_type;
+ u32 msix_vf, size;
+
+ size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+ (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+ (qvlist_info->num_vectors - 1));
+ vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+ vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < qvlist_info->num_vectors; i++) {
+ qv_info = &qvlist_info->qv_info[i];
+ if (!qv_info)
+ continue;
+ v_idx = qv_info->v_idx;
+
+ /* Validate vector id belongs to this vf */
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx))
+ goto err;
+
+ vf->qvlist_info->qv_info[i] = *qv_info;
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ /* We might be sharing the interrupt, so get the first queue
+ * index and type, push it down the list by adding the new
+ * queue on top. Also link it with the new queue in CEQCTL.
+ */
+ reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
+ next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
+ next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+ if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+ reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
+ (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
+ (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
+ wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
+
+ reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+ reg = (qv_info->ceq_idx &
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ (I40E_QUEUE_TYPE_PE_CEQ <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+ wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+ }
+
+ if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+ reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
+ (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
+ (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
+
+ wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
+ }
+ }
+
+ return 0;
+err:
+ kfree(vf->qvlist_info);
+ vf->qvlist_info = NULL;
+ return -EINVAL;
+}
+
+/**
* i40e_config_vsi_tx_queue
* @vf: pointer to the VF info
* @vsi_id: id of VSI as provided by the FW
@@ -461,7 +591,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
/* set splitalways mode 10b */
- rx_ctx.dtype = 0x2;
+ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
}
/* databuffer length validation */
@@ -602,8 +732,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
* that VF queues be mapped using this method, even when they are
* contiguous in real life
*/
- wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
- I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
/* enable VF vplan_qtable mappings */
reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
@@ -630,7 +760,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
(j * 2) + 1);
reg |= qid << 16;
}
- wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
+ reg);
}
i40e_flush(hw);
@@ -849,9 +980,11 @@ complete_reset:
/* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf);
if (!i40e_alloc_vf_res(vf)) {
+ int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_enable_vf_mappings(vf);
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ i40e_notify_client_of_vf_reset(pf, abs_vf_id);
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -876,11 +1009,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
usleep_range(1000, 2000);
- for (i = 0; i < pf->num_alloc_vfs; i++)
- if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
- i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
- false);
-
+ i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
@@ -952,6 +1081,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
goto err_iov;
}
}
+ i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
/* allocate memory */
vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -980,7 +1110,7 @@ err_alloc:
i40e_free_vfs(pf);
err_iov:
/* Re-enable interrupt 0. */
- i40e_irq_dynamic_enable_icr0(pf);
+ i40e_irq_dynamic_enable_icr0(pf, false);
return ret;
}
@@ -1205,6 +1335,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi->info.pvid)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
+ set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
+ }
+
if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
vfres->vf_offload_flags |=
@@ -1213,9 +1350,21 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
+ if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+ }
+
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ }
+
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1814,6 +1963,72 @@ error_param:
}
/**
+ * i40e_vc_iwarp_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
+ msg, msglen);
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_iwarp_qvmap_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @config: config qvmap or release it
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
+ bool config)
+{
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
+ (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (config) {
+ if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ aq_ret = I40E_ERR_PARAM;
+ } else {
+ i40e_release_iwarp_qvlist(vf);
+ }
+
+error_param:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf,
+ config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1908,6 +2123,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ /* These messages are opaque to us and will be validated in
+ * the RDMA client code. We just need to check for nonzero
+ * length. The firmware will enforce max length restrictions.
+ */
+ if (msglen)
+ valid_len = msglen;
+ else
+ err_msg_format = true;
+ break;
+ case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_iwarp_qvlist_info *qv =
+ (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+ if (qv->num_vectors == 0) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += ((qv->num_vectors - 1) *
+ sizeof(struct i40e_virtchnl_iwarp_qv_info));
+ }
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -1997,6 +2238,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg, msglen);
break;
+ case I40E_VIRTCHNL_OP_IWARP:
+ ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
+ break;
+ case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
+ break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2025,7 +2275,11 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
- /* re-enable vflr interrupt cause */
+ /* Re-enable the VFLR interrupt cause here, before looking for which
+ * VF got reset. Otherwise, if another VF gets a reset while the
+ * first one is being processed, that interrupt will be lost, and
+ * that VF will be stuck in reset forever.
+ */
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
@@ -2186,6 +2440,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
* and then reloading the VF driver.
*/
i40e_vc_disable_vf(pf, vf);
+ /* During reset the VF got a new VSI, so refresh the pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
}
/* Check for condition where there was already a port VLAN ID
@@ -2294,6 +2550,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
case I40E_LINK_SPEED_40GB:
speed = 40000;
break;
+ case I40E_LINK_SPEED_20GB:
+ speed = 20000;
+ break;
case I40E_LINK_SPEED_10GB:
speed = 10000;
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index da44995def42..e7b2fba0309e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -58,6 +58,7 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STAT_INIT = 0,
I40E_VF_STAT_ACTIVE,
+ I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
};
@@ -66,6 +67,7 @@ enum i40e_vf_states {
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
+ I40E_VIRTCHNL_VF_CAP_IWARP,
};
/* VF information structure */
@@ -91,8 +93,8 @@ struct i40e_vf {
* When assigned, these will be non-zero, because VSI 0 is always
* the main LAN VSI for the PF.
*/
- u8 lan_vsi_idx; /* index into PF struct */
- u8 lan_vsi_id; /* ID as used by firmware */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 lan_vsi_id; /* ID as used by firmware */
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u64 num_mdd_events; /* num of mdd events detected */
@@ -106,6 +108,8 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ /* RDMA Client */
+ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 3f65e39b3fe4..44f7ed7583dd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -887,6 +887,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
u16 flags;
u16 ntu;
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f5b2b369dc7c..aad8d6277110 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0004
+#define I40E_FW_API_VERSION_MINOR 0x0005
struct i40e_aq_desc {
__le16 flags;
@@ -145,6 +145,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_statistics = 0x0202,
i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211,
@@ -220,6 +223,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@@ -228,6 +232,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -399,6 +404,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008
#define I40E_AQ_CAP_ID_SRIOV 0x0012
#define I40E_AQ_CAP_ID_VF 0x0013
#define I40E_AQ_CAP_ID_VMDQ 0x0014
@@ -419,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_LED 0x0061
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -677,6 +684,31 @@ struct i40e_aqc_switch_resource_alloc_element_resp {
I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+/* Set Switch Configuration (direct 0x0205) */
+struct i40e_aqc_set_switch_config {
+ __le16 flags;
+#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
+#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+ __le16 valid_flags;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -903,7 +935,8 @@ struct i40e_aqc_add_veb {
I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */
+#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10
u8 enable_tcs;
u8 reserved[9];
};
@@ -970,6 +1003,7 @@ struct i40e_aqc_add_macvlan_element_data {
#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010
__le16 queue_number;
#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
@@ -1066,6 +1100,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
@@ -1254,10 +1289,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000
+#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000
__le32 tenant_id;
u8 reserved[4];
@@ -1752,7 +1793,12 @@ struct i40e_aqc_get_link_status {
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 reserved[5];
+ u8 external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1 0x00
+#define I40E_AQ_LINK_POWER_CLASS_2 0x01
+#define I40E_AQ_LINK_POWER_CLASS_3 0x02
+#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+ u8 reserved[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1820,6 +1866,18 @@ enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+ __le16 activity_id;
+ u8 flags;
+ u8 reserved1;
+ __le32 control;
+ __le32 data;
+ u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
@@ -1909,6 +1967,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer {
I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+/* Thermal Sensor (indirect 0x0721)
+ * read or set thermal sensor configs and values
+ * takes a sensor and command specific data buffer, not detailed here
+ */
+struct i40e_aqc_thermal_sensor {
+ u8 sensor_action;
+#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0
+#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1
+#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor);
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -2083,6 +2157,7 @@ struct i40e_aqc_add_udp_tunnel {
#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
u8 reserved1[10];
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 938783e0baac..771ac6ad8cda 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -904,6 +904,131 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
};
/**
+ * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ if (!reg_val)
+ return I40E_ERR_PARAM;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = cpu_to_le32(reg_addr);
+
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == 0)
+ *reg_val = le32_to_cpu(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40evf_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ i40e_status status = 0;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
+ &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40evf_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ i40e_status status;
+
+ i40evf_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = cpu_to_le32(reg_addr);
+ cmd->value = cpu_to_le32(reg_val);
+
+ status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40evf_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ i40e_status status = 0;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ usleep_range(1000, 2000);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index cbd9a1b078ab..d89d52109efa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -103,4 +103,19 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
+i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 *value);
+i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 7a00657dacda..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40evf_get_tx_pending - how many Tx descriptors not processed
* @tx_ring: the ring of descriptors
+ * @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40evf_get_tx_pending(struct i40e_ring *ring)
+u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- head = i40e_get_head(ring);
+ if (!in_sw)
+ head = i40e_get_head(ring);
+ else
+ head = ring->next_to_clean;
tail = readl(ring->tail);
if (head != tail)
@@ -252,6 +256,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+ unsigned int j = 0;
+ /* check to see if there are < 4 descriptors
+ * waiting to be written back, then kick the hardware to force
+ * them to be written back in case we stay in NAPI.
+ * In this mode on X722 we do not enable Interrupt.
+ */
+ j = i40evf_get_tx_pending(tx_ring, false);
+
+ if (budget &&
+ ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ }
+
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
total_packets, total_bytes);
@@ -276,39 +296,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
/**
- * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors
+ * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about
- * @q_vector: the vector on which to force writeback
+ * @q_vector: the vector on which to enable writeback
*
**/
-static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
+ struct i40e_q_vector *q_vector)
{
u16 flags = q_vector->tx.ring[0].flags;
+ u32 val;
- if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- u32 val;
+ if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
+ return;
- if (q_vector->arm_wb_state)
- return;
+ if (q_vector->arm_wb_state)
+ return;
- val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+ val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
- wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
- vsi->base_vector - 1),
- val);
- q_vector->arm_wb_state = true;
- } else {
- u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
- I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
- /* allow 00 to be written to the index */
-
- wr32(&vsi->back->hw,
- I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
- vsi->base_vector - 1), val);
- }
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+ vsi->base_vector - 1), val);
+ q_vector->arm_wb_state = true;
+}
+
+/**
+ * i40evf_force_wb - Issue SW Interrupt so HW does a wb
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
+ /* allow 00 to be written to the index */;
+
+ wr32(&vsi->back->hw,
+ I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
+ val);
}
/**
@@ -506,7 +536,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (rx_bi->page_dma) {
dma_unmap_page(dev,
rx_bi->page_dma,
- PAGE_SIZE / 2,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
}
@@ -641,16 +671,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
**/
-void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
+ const int current_node = numa_node_id();
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
- return;
+ return false;
while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i);
@@ -658,56 +691,79 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
if (bi->skb) /* desc is in use */
goto no_buffers;
+
+ /* If we've been moved to a different NUMA node, release the
+ * page so we can get a new one on the current node.
+ */
+ if (bi->page && page_to_nid(bi->page) != current_node) {
+ dma_unmap_page(rx_ring->dev,
+ bi->page_dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(bi->page);
+ bi->page = NULL;
+ bi->page_dma = 0;
+ rx_ring->rx_stats.realloc_count++;
+ } else if (bi->page) {
+ rx_ring->rx_stats.page_reuse_count++;
+ }
+
if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) {
rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers;
}
- }
-
- if (!bi->page_dma) {
- /* use a half page if we're re-using */
- bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev,
bi->page,
- bi->page_offset,
- PAGE_SIZE / 2,
+ 0,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev,
- bi->page_dma)) {
+ if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
rx_ring->rx_stats.alloc_page_failed++;
+ __free_page(bi->page);
+ bi->page = NULL;
bi->page_dma = 0;
+ bi->page_offset = 0;
goto no_buffers;
}
+ bi->page_offset = 0;
}
- dma_sync_single_range_for_device(rx_ring->dev,
- bi->dma,
- 0,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(bi->page_dma + bi->page_offset);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
i = 0;
}
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+
+ return false;
+
no_buffers:
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
}
/**
* i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
+ *
+ * Returns true if any errors on allocation
**/
-void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
@@ -716,7 +772,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
- return;
+ return false;
while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i);
@@ -724,8 +780,10 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len);
+ skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len,
+ GFP_ATOMIC |
+ __GFP_NOWARN);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers;
@@ -743,6 +801,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0;
+ dev_kfree_skb(bi->skb);
+ bi->skb = NULL;
goto no_buffers;
}
}
@@ -754,9 +814,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0;
}
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+
+ return false;
+
no_buffers:
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
+
+ /* make sure to come back via polling to try again after
+ * allocation failure
+ */
+ return true;
}
/**
@@ -791,16 +861,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u16 rx_ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4 = false, ipv6 = false;
- bool ipv4_tunnel, ipv6_tunnel;
- __wsum rx_udp_csum;
- struct iphdr *iph;
- __sum16 csum;
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
+ bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
skb->ip_summed = CHECKSUM_NONE;
@@ -816,12 +877,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (!(decoded.known && decoded.outer_ip))
return;
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
- ipv4 = true;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
- ipv6 = true;
+ ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
+ ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -845,36 +904,17 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ /* The hardware supported by this driver does not validate outer
+ * checksums for tunneled VXLAN or GENEVE frames. I don't agree
+ * with it but the specification states that you "MAY validate", it
+ * doesn't make it a hard requirement so if we have validated the
+ * inner checksum report CHECKSUM_UNNECESSARY.
*/
- if (ipv4_tunnel) {
- skb->transport_header = skb->mac_header +
- sizeof(struct ethhdr) +
- (ip_hdr(skb)->ihl * 4);
-
- /* Add 4 bytes for VLAN tagged packets */
- skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD))
- ? VLAN_HLEN : 0;
-
- if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
- (udp_hdr(skb)->check != 0)) {
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
- (skb->len -
- skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
-
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
-
- } /* else its GRE and so no outer UDP header */
- }
+
+ ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
+ ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
+ (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = ipv4_tunnel || ipv6_tunnel;
@@ -939,18 +979,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
+ bool failure = false;
u8 rx_ptype;
u64 qword;
+ u32 copysize;
do {
struct i40e_rx_buffer *rx_bi;
@@ -958,7 +999,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count);
+ failure = failure ||
+ i40evf_alloc_rx_buffers_ps(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -976,13 +1019,22 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
* DD bit is set.
*/
dma_rmb();
+ /* sync header buffer for reading */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_ring->rx_bi[0].dma,
+ i * rx_ring->rx_hdr_len,
+ rx_ring->rx_hdr_len,
+ DMA_FROM_DEVICE);
rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb;
if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len);
+ skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_hdr_len,
+ GFP_ATOMIC |
+ __GFP_NOWARN);
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
+ failure = true;
break;
}
@@ -990,8 +1042,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
skb_record_rx_queue(skb, rx_ring->queue_index);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- 0,
+ rx_ring->rx_bi[0].dma,
+ i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
}
@@ -1009,9 +1061,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT;
- prefetch(rx_bi->page);
+ /* sync half-page for reading */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_bi->page_dma,
+ rx_bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
rx_bi->skb = NULL;
cleaned_count++;
+ copysize = 0;
if (rx_hbo || rx_sph) {
int len;
@@ -1022,38 +1081,50 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
} else if (skb->len == 0) {
int len;
+ unsigned char *va = page_address(rx_bi->page) +
+ rx_bi->page_offset;
- len = (rx_packet_len > skb_headlen(skb) ?
- skb_headlen(skb) : rx_packet_len);
- memcpy(__skb_put(skb, len),
- rx_bi->page + rx_bi->page_offset,
- len);
- rx_bi->page_offset += len;
+ len = min(rx_packet_len, rx_ring->rx_hdr_len);
+ memcpy(__skb_put(skb, len), va, len);
+ copysize = len;
rx_packet_len -= len;
}
-
/* Get the rest of the data if this was a header split */
if (rx_packet_len) {
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset,
- rx_packet_len);
-
- skb->len += rx_packet_len;
- skb->data_len += rx_packet_len;
- skb->truesize += rx_packet_len;
-
- if ((page_count(rx_bi->page) == 1) &&
- (page_to_nid(rx_bi->page) == current_node))
- get_page(rx_bi->page);
- else
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset + copysize,
+ rx_packet_len, I40E_RXBUFFER_2048);
+
+ /* If the page count is more than 2, then both halves
+ * of the page are used and we need to free it. Do it
+ * here instead of in the alloc code. Otherwise one
+ * of the half-pages might be released between now and
+ * then, and we wouldn't know which one to use.
+ * Don't call get_page and free_page since those are
+ * both expensive atomic operations that just change
+ * the refcount in opposite directions. Just give the
+ * page to the stack; he can have our refcount.
+ */
+ if (page_count(rx_bi->page) > 2) {
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
rx_bi->page = NULL;
+ rx_bi->page_dma = 0;
+ rx_ring->rx_stats.realloc_count++;
+ } else {
+ get_page(rx_bi->page);
+ /* switch to the other half-page here; the
+ * allocation code programs the right addr
+ * into HW. If we haven't used this half-page,
+ * the address won't be changed, and HW can
+ * just use it next time through.
+ */
+ rx_bi->page_offset ^= PAGE_SIZE / 2;
+ }
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
}
I40E_RX_INCREMENT(rx_ring, i);
@@ -1105,7 +1176,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- return total_rx_packets;
+ return failure ? budget : total_rx_packets;
}
/**
@@ -1123,6 +1194,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status;
u16 rx_packet_len;
+ bool failure = false;
u8 rx_ptype;
u64 qword;
u16 i;
@@ -1133,7 +1205,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
+ failure = failure ||
+ i40evf_alloc_rx_buffers_1buf(rx_ring,
+ cleaned_count);
cleaned_count = 0;
}
@@ -1214,7 +1288,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
- return total_rx_packets;
+ return failure ? budget : total_rx_packets;
}
static u32 i40e_buildreg_itr(const int type, const u16 itr)
@@ -1222,7 +1296,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ /* Don't clear PBA because that can cause lost interrupts that
+ * came in while we were cleaning/polling
+ */
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
@@ -1335,7 +1411,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ clean_complete = clean_complete &&
+ i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false;
}
@@ -1359,7 +1436,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */
- clean_complete &= (budget_per_ring != cleaned);
+ clean_complete = clean_complete && (budget_per_ring > cleaned);
}
/* If work not completed, return budget and polling will return */
@@ -1367,7 +1444,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
tx_only:
if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
- i40evf_force_wb(vsi, q_vector);
+ i40e_enable_wb_on_itr(vsi, q_vector);
}
return budget;
}
@@ -1447,13 +1524,23 @@ out:
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
- u32 cd_cmd, cd_tso_len, cd_mss;
- struct ipv6hdr *ipv6h;
- struct tcphdr *tcph;
- struct iphdr *iph;
- u32 l4len;
+ u64 cd_cmd, cd_tso_len, cd_mss;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -1461,35 +1548,60 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
-
- if (iph->version == 4) {
- tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
- } else if (ipv6h->version == 6) {
- tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
- ipv6h->payload_len = 0;
- tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
- 0, IPPROTO_TCP, 0);
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
+
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ /* determine offset of outer transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from outer checksum */
+ paylen = (__force u16)l4.udp->check;
+ paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
+ l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ }
+
+ /* reset pointers to inner headers */
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* initialize inner IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
}
- l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = (skb->encapsulation
- ? (skb_inner_transport_header(skb) - skb->data)
- : skb_transport_offset(skb)) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner checksum */
+ paylen = (__force u16)l4.tcp->check;
+ paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
+ l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* find the field values */
cd_cmd = I40E_TX_CTX_DESC_TSO;
cd_tso_len = skb->len - *hdr_len;
cd_mss = skb_shinfo(skb)->gso_size;
- *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
- ((u64)cd_tso_len <<
- I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
- ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
return 1;
}
@@ -1499,129 +1611,157 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
* @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
- u32 *td_cmd, u32 *td_offset,
- struct i40e_ring *tx_ring,
- u32 *cd_tunneling)
+static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
{
- struct ipv6hdr *this_ipv6_hdr;
- unsigned int this_tcp_hdrlen;
- struct iphdr *this_ip_hdr;
- u32 network_hdr_len;
- u8 l4_hdr = 0;
- struct udphdr *oudph;
- struct iphdr *oiph;
- u32 l4_tunnel = 0;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ unsigned char *exthdr;
+ u32 offset, cmd = 0, tunnel = 0;
+ __be16 frag_off;
+ u8 l4_proto = 0;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute outer L2 header size */
+ offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
- switch (ip_hdr(skb)->protocol) {
+ /* define outer network header type */
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_CTX_EXT_IP_IPV4 :
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+ l4_proto = ip.v4->protocol;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
+ tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ }
+
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* define outer transport */
+ switch (l4_proto) {
case IPPROTO_UDP:
- oudph = udp_hdr(skb);
- oiph = ip_hdr(skb);
- l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
- return;
- }
- network_hdr_len = skb_inner_network_header_len(skb);
- this_ip_hdr = inner_ip_hdr(skb);
- this_ipv6_hdr = inner_ipv6_hdr(skb);
- this_tcp_hdrlen = inner_tcp_hdrlen(skb);
-
- if (*tx_flags & I40E_TX_FLAGS_IPV4) {
- if (*tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
- } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
if (*tx_flags & I40E_TX_FLAGS_TSO)
- ip_hdr(skb)->check = 0;
- }
+ return -1;
- /* Now set the ctx descriptor fields */
- *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- l4_tunnel |
- ((skb_inner_network_offset(skb) -
- skb_transport_offset(skb)) >> 1) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
- if (this_ip_hdr->version == 6) {
- *tx_flags &= ~I40E_TX_FLAGS_IPV4;
- *tx_flags |= I40E_TX_FLAGS_IPV6;
+ skb_checksum_help(skb);
+ return 0;
}
- if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
- (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) &&
- (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
- oudph->check = ~csum_tcpudp_magic(oiph->saddr,
- oiph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, 0);
- *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
- }
- } else {
- network_hdr_len = skb_network_header_len(skb);
- this_ip_hdr = ip_hdr(skb);
- this_ipv6_hdr = ipv6_hdr(skb);
- this_tcp_hdrlen = tcp_hdrlen(skb);
+ /* compute tunnel header size */
+ tunnel |= ((ip.hdr - l4.hdr) / 2) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ /* indicate if we need to offload outer UDP header */
+ if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+
+ /* record tunnel offload values */
+ *cd_tunneling |= tunnel;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
- l4_hdr = this_ip_hdr->protocol;
+ l4_proto = ip.v4->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (*tx_flags & I40E_TX_FLAGS_TSO) {
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- this_ip_hdr->check = 0;
- } else {
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
- }
- /* Now set the td_offset for IP header length */
- *td_offset = (network_hdr_len >> 2) <<
- I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
+ I40E_TX_DESC_CMD_IIPT_IPV4;
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
- l4_hdr = this_ipv6_hdr->nexthdr;
- *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
- /* Now set the td_offset for IP header length */
- *td_offset = (network_hdr_len >> 2) <<
- I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
}
- /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
- *td_offset |= (skb_network_offset(skb) >> 1) <<
- I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* compute inner L3 header size */
+ offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
/* Enable L4 checksum offloads */
- switch (l4_hdr) {
+ switch (l4_proto) {
case IPPROTO_TCP:
/* enable checksum offloads */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (this_tcp_hdrlen >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_SCTP:
/* enable SCTP checksum offload */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
- *td_offset |= (sizeof(struct sctphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case IPPROTO_UDP:
/* enable UDP checksum offload */
- *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udphdr) >> 2) <<
- I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
default:
- break;
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
+ return -1;
+ skb_checksum_help(skb);
+ return 0;
}
+
+ *td_cmd |= cmd;
+ *td_offset |= offset;
+
+ return 1;
}
/**
@@ -1656,59 +1796,70 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
- * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
- * @tx_flags: collected send information
*
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
**/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
+bool __i40evf_chk_linearize(struct sk_buff *skb)
{
- struct skb_frag_struct *frag;
- bool linearize = false;
- unsigned int size = 0;
- u16 num_frags;
- u16 gso_segs;
+ const struct skb_frag_struct *frag, *stale;
+ int nr_frags, sum;
- num_frags = skb_shinfo(skb)->nr_frags;
- gso_segs = skb_shinfo(skb)->gso_segs;
+ /* no need to check if number of frags is less than 7 */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
+ return false;
- if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
- u16 j = 0;
+ /* We need to walk through the list and validate that each group
+ * of 6 fragments totals at least gso_size. However we don't need
+ * to perform such validation on the last 6 since the last 6 cannot
+ * inherit any data from a descriptor after them.
+ */
+ nr_frags -= I40E_MAX_BUFFER_TXD - 2;
+ frag = &skb_shinfo(skb)->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We
+ * use this as the worst case scenerio in which the frag ahead
+ * of us only provides one byte which is why we are limited to 6
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - skb_shinfo(skb)->gso_size;
- if (num_frags < (I40E_MAX_BUFFER_TXD))
- goto linearize_chk_done;
- /* try the simple math, if we have too many frags per segment */
- if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
- I40E_MAX_BUFFER_TXD) {
- linearize = true;
- goto linearize_chk_done;
- }
- frag = &skb_shinfo(skb)->frags[0];
- /* we might still have more fragments per segment */
- do {
- size += skb_frag_size(frag);
- frag++; j++;
- if ((size >= skb_shinfo(skb)->gso_size) &&
- (j < I40E_MAX_BUFFER_TXD)) {
- size = (size % skb_shinfo(skb)->gso_size);
- j = (size) ? 1 : 0;
- }
- if (j == I40E_MAX_BUFFER_TXD) {
- linearize = true;
- break;
- }
- num_frags--;
- } while (num_frags);
- } else {
- if (num_frags >= I40E_MAX_BUFFER_TXD)
- linearize = true;
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ stale = &skb_shinfo(skb)->frags[0];
+ for (;;) {
+ sum += skb_frag_size(frag++);
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ /* use pre-decrement to avoid processing last fragment */
+ if (!--nr_frags)
+ break;
+
+ sum -= skb_frag_size(stale++);
}
-linearize_chk_done:
- return linearize;
+ return false;
}
/**
@@ -1718,7 +1869,7 @@ linearize_chk_done:
*
* Returns -EBUSY if a stop is needed, else 0
**/
-static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
@@ -1735,20 +1886,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
- * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40evf_maybe_stop_tx(tx_ring, size);
-}
-
-/**
* i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -1863,7 +2000,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
- i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
@@ -1946,38 +2083,6 @@ dma_error:
}
/**
- * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
- * @skb: send buffer
- * @tx_ring: ring to send buffer on
- *
- * Returns number of data descriptors needed for this skb. Returns 0 to indicate
- * there is not enough descriptors available in this ring since we need at least
- * one descriptor.
- **/
-static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-{
- unsigned int f;
- int count = 0;
-
- /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
- * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
- * + 4 desc gap to avoid the cache line where head is,
- * + 1 desc for context descriptor,
- * otherwise try next time
- */
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
- tx_ring->tx_stats.tx_busy++;
- return 0;
- }
- return count;
-}
-
-/**
* i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -1995,13 +2100,29 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
- int tso;
+ int tso, count;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
- if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
+ count = i40e_xmit_descriptor_count(skb);
+ if (i40e_chk_linearize(skb, count)) {
+ if (__skb_linearize(skb))
+ goto out_drop;
+ count = TXD_USE_COUNT(skb->len);
+ tx_ring->tx_stats.tx_linearize++;
+ }
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 4 desc gap to avoid the cache line where head is,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
@@ -2026,24 +2147,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
- if (i40e_chk_linearize(skb, tx_flags)) {
- if (skb_linearize(skb))
- goto out_drop;
- tx_ring->tx_stats.tx_linearize++;
- }
+ /* Always offload the checksum, since it's in the data descriptor */
+ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+ if (tso < 0)
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Always offload the checksum, since it's in the data descriptor */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- tx_flags |= I40E_TX_FLAGS_CSUM;
-
- i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
- tx_ring, &cd_tunneling);
- }
-
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e29bb3e86cfd..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -153,7 +153,6 @@ enum i40e_dyn_idx_t {
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
-#define I40E_TX_FLAGS_CSUM BIT(0)
#define I40E_TX_FLAGS_HW_VLAN BIT(1)
#define I40E_TX_FLAGS_SW_VLAN BIT(2)
#define I40E_TX_FLAGS_TSO BIT(3)
@@ -202,12 +201,15 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_lost_interrupt;
};
struct i40e_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
+ u64 page_reuse_count;
+ u64 realloc_count;
};
enum i40e_ring_state_t {
@@ -253,7 +255,6 @@ struct i40e_ring {
#define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
- u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4
@@ -273,7 +274,6 @@ struct i40e_ring {
u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1)
/* stats structs */
struct i40e_queue_stats stats;
@@ -313,8 +313,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
+bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
@@ -324,7 +324,10 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
-u32 i40evf_get_tx_pending(struct i40e_ring *ring);
+void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
+u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
+int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+bool __i40evf_chk_linearize(struct sk_buff *skb);
/**
* i40e_get_head - Retrieve head from head writeback
@@ -339,4 +342,67 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
+{
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ int count = 0, size = skb_headlen(skb);
+
+ for (;;) {
+ count += TXD_USE_COUNT(size);
+
+ if (!nr_frags--)
+ break;
+
+ size = skb_frag_size(frag++);
+ }
+
+ return count;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @count: number of buffers used
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
+{
+ /* Both TSO and single send will work if count is less than 8 */
+ if (likely(count < I40E_MAX_BUFFER_TXD))
+ return false;
+
+ if (skb_is_gso(skb))
+ return __i40evf_chk_linearize(skb);
+
+ /* we can support up to 8 data buffers for a single send */
+ return count != I40E_MAX_BUFFER_TXD;
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index be1b72b93888..e657eccd232c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -173,6 +173,7 @@ enum i40evf_state_t {
__I40EVF_RESETTING, /* in reset */
/* Below here, watchdog is running */
__I40EVF_DOWN, /* ready, can be opened */
+ __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */
__I40EVF_TESTING, /* in ethtool self-test */
__I40EVF_RUNNING, /* opened, working */
};
@@ -273,6 +274,9 @@ struct i40evf_adapter {
};
+/* Ethtool Private Flags */
+#define I40EVF_PRIV_FLAGS_PS BIT(0)
+
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
extern const char i40evf_driver_version[];
@@ -280,6 +284,7 @@ extern const char i40evf_driver_version[];
int i40evf_up(struct i40evf_adapter *adapter);
void i40evf_down(struct i40evf_adapter *adapter);
int i40evf_process_config(struct i40evf_adapter *adapter);
+void i40evf_schedule_reset(struct i40evf_adapter *adapter);
void i40evf_reset(struct i40evf_adapter *adapter);
void i40evf_set_ethtool_ops(struct net_device *netdev);
void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index a4c9feb589e7..dd4430aae7fa 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -63,6 +63,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
+static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ "packet-split",
+};
+
+#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
+
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -97,6 +103,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
+ else if (sset == ETH_SS_PRIV_FLAGS)
+ return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -162,6 +170,12 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
+ } else if (sset == ETH_SS_PRIV_FLAGS) {
+ for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40evf_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
}
}
@@ -211,6 +225,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -459,6 +474,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
struct ethtool_rxnfc *nfc)
{
struct i40e_hw *hw = &adapter->hw;
+ u32 flags = adapter->vf_res->vf_offload_flags;
u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -477,54 +493,50 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- break;
- default:
+ } else {
return -EINVAL;
}
break;
case TCP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- break;
- default:
+ } else {
return -EINVAL;
}
break;
case UDP_V4_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
case UDP_V6_FLOW:
- switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- case 0:
- hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ hena |=
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
+ } else {
return -EINVAL;
}
break;
@@ -713,6 +725,54 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
I40EVF_HLUT_ARRAY_SIZE);
}
+/**
+ * i40evf_get_priv_flags - report device private flags
+ * @dev: network interface device structure
+ *
+ * The get string set count and the string set should be matched for each
+ * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
+ * array.
+ *
+ * Returns a u32 bitmap of flags.
+ **/
+static u32 i40evf_get_priv_flags(struct net_device *dev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(dev);
+ u32 ret_flags = 0;
+
+ ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
+ I40EVF_PRIV_FLAGS_PS : 0;
+
+ return ret_flags;
+}
+
+/**
+ * i40evf_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct i40evf_adapter *adapter = netdev_priv(dev);
+ bool reset_required = false;
+
+ if ((flags & I40EVF_PRIV_FLAGS_PS) &&
+ !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
+ adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
+ reset_required = true;
+ } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
+ (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
+ reset_required = true;
+ }
+
+ /* if needed, issue reset to cause things to take effect */
+ if (reset_required)
+ i40evf_schedule_reset(adapter);
+
+ return 0;
+}
+
static const struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
@@ -722,6 +782,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
+ .get_priv_flags = i40evf_get_priv_flags,
+ .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 94da913b151d..4b70aae2fa84 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -32,13 +32,13 @@ static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
- "Intel(R) XL710/X710 Virtual Function Network Driver";
+ "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 15
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static struct workqueue_struct *i40evf_wq;
+
/**
* i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@@ -171,6 +173,19 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
}
/**
+ * i40evf_schedule_reset - Set the flags and schedule a reset event
+ * @adapter: board private structure
+ **/
+void i40evf_schedule_reset(struct i40evf_adapter *adapter)
+{
+ if (!(adapter->flags &
+ (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
+ adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
+/**
* i40evf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
@@ -179,11 +194,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
- I40EVF_FLAG_RESET_NEEDED))) {
- adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
- schedule_work(&adapter->reset_task);
- }
+ i40evf_schedule_reset(adapter);
}
/**
@@ -636,35 +647,22 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
int rx_buf_len;
- adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
- adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
-
- /* Decide whether to use packet split mode or not */
- if (netdev->mtu > ETH_DATA_LEN) {
- if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- else
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- } else {
- if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- else
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- }
-
/* Set the RX buffer length according to the mode */
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- rx_buf_len = I40E_RX_HDR_SIZE;
- } else {
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = I40EVF_RXBUFFER_2048;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
- }
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
+ netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = I40EVF_RXBUFFER_2048;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i].rx_buf_len = rx_buf_len;
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ set_ring_ps_enabled(&adapter->rx_rings[i]);
+ adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
+ } else {
+ clear_ring_ps_enabled(&adapter->rx_rings[i]);
+ }
}
}
@@ -1001,7 +999,12 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i];
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ i40evf_alloc_rx_headers(ring);
+ i40evf_alloc_rx_buffers_ps(ring, ring->count);
+ } else {
i40evf_alloc_rx_buffers_1buf(ring, ring->count);
+ }
ring->next_to_use = ring->count - 1;
writel(ring->next_to_use, ring->tail);
}
@@ -1032,7 +1035,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
- if (adapter->state == __I40EVF_DOWN)
+ if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1122,7 +1125,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
if (!adapter->vsi_res)
return;
kfree(adapter->tx_rings);
+ adapter->tx_rings = NULL;
kfree(adapter->rx_rings);
+ adapter->rx_rings = NULL;
}
/**
@@ -1454,7 +1459,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
int ret;
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- hena = I40E_DEFAULT_RSS_HENA;
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ else
+ hena = I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
@@ -1829,6 +1838,7 @@ static void i40evf_reset_task(struct work_struct *work)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
+ pci_set_master(adapter->pdev);
/* extra wait to make sure minimum wait is met */
msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) {
@@ -1873,6 +1883,7 @@ static void i40evf_reset_task(struct work_struct *work)
adapter->netdev->flags &= ~IFF_UP;
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ adapter->state = __I40EVF_DOWN;
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2142,7 +2153,8 @@ static int i40evf_open(struct net_device *netdev)
dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
return -EIO;
}
- if (adapter->state != __I40EVF_DOWN || adapter->aq_required)
+
+ if (adapter->state != __I40EVF_DOWN)
return -EBUSY;
/* allocate transmit descriptors */
@@ -2197,14 +2209,14 @@ static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- if (adapter->state <= __I40EVF_DOWN)
+ if (adapter->state <= __I40EVF_DOWN_PENDING)
return 0;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
- adapter->state = __I40EVF_DOWN;
+ adapter->state = __I40EVF_DOWN_PENDING;
i40evf_free_traffic_irqs(adapter);
return 0;
@@ -2325,9 +2337,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_RXCSUM |
NETIF_F_GRO;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_TSO_ECN |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
netdev->hw_features &= ~NETIF_F_RXCSUM;
@@ -2466,11 +2493,20 @@ static void i40evf_init_task(struct work_struct *work)
default:
goto err_alloc;
}
+
+ if (hw->mac.type == I40E_MAC_X722_VF)
+ adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE;
+
if (i40evf_process_config(adapter))
goto err_alloc;
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
+ adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
+ adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
+
+ /* Default to single buffer rx, can be changed through ethtool. */
+ adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
@@ -2502,10 +2538,9 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
i40evf_map_rings_to_vectors(adapter);
if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
- if (!RSS_AQ(adapter))
- i40evf_init_rss(adapter);
+
err = i40evf_request_misc_irq(adapter);
if (err)
goto err_sw_init;
@@ -2885,6 +2920,11 @@ static int __init i40evf_init_module(void)
pr_info("%s\n", i40evf_copyright);
+ i40evf_wq = create_singlethread_workqueue(i40evf_driver_name);
+ if (!i40evf_wq) {
+ pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
+ return -ENOMEM;
+ }
ret = pci_register_driver(&i40evf_driver);
return ret;
}
@@ -2900,6 +2940,7 @@ module_init(i40evf_init_module);
static void __exit i40evf_exit_module(void)
{
pci_unregister_driver(&i40evf_driver);
+ destroy_workqueue(i40evf_wq);
}
module_exit(i40evf_exit_module);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index c1c526283757..488e738f76c6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,6 +270,10 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
+ if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
+ vqpi->rxq.splithdr_enabled = true;
+ vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
+ }
vqpi++;
}
@@ -804,6 +808,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
+ if (adapter->state == __I40EVF_DOWN_PENDING)
+ adapter->state = __I40EVF_DOWN;
break;
case I40E_VIRTCHNL_OP_VERSION:
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index adb33e2a0137..a23aa6704394 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -34,6 +34,7 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
#include "e1000_i210.h"
+#include "igb.h"
static s32 igb_get_invariants_82575(struct e1000_hw *);
static s32 igb_acquire_phy_82575(struct e1000_hw *);
@@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] = {
36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * igb_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ struct igb_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 10; i--;)
+ array_wr32(E1000_VFTA, offset, value);
+
+ wrfl();
+ adapter->shadow_vfta[offset] = value;
+}
+
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
* @hw: pointer to the HW structure
@@ -398,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
/* Set mta register count */
mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
/* Set rar entry count */
switch (mac->type) {
case e1000_82576:
@@ -429,6 +458,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
+ if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+ mac->ops.write_vfta = igb_write_vfta_i350;
+ else
+ mac->ops.write_vfta = igb_write_vfta;
+
/* Set if part includes ASF firmware */
mac->asf_firmware_present = true;
/* Set if manageability features are enabled. */
@@ -1517,10 +1551,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
/* Disabling VLAN filtering */
hw_dbg("Initializing the IEEE VLAN\n");
- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
- igb_clear_vfta_i350(hw);
- else
- igb_clear_vfta(hw);
+ igb_clear_vfta(hw);
/* Setup the receive address */
igb_init_rx_addrs(hw, rar_count);
@@ -2889,7 +2920,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
#endif
};
-static struct e1000_phy_operations e1000_phy_ops_82575 = {
+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
.acquire = igb_acquire_phy_82575,
.get_cfg_done = igb_get_cfg_done_82575,
.release = igb_release_phy_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 2154aea7aa7e..de8805a2a2fe 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -56,10 +56,10 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_SRRCTL_TIMESTAMP 0x40000000
-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index c3c598c347a9..e9f23ee8f15e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -356,7 +356,8 @@
/* Ethertype field values */
#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
-#define MAX_JUMBO_FRAME_SIZE 0x3F00
+/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
+#define MAX_JUMBO_FRAME_SIZE 0x2600
/* PBA constants */
#define E1000_PBA_34K 0x0022
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 4034207eb5cc..2fb2213cd562 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -325,7 +325,7 @@ struct e1000_mac_operations {
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
#endif
-
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
};
struct e1000_phy_operations {
@@ -372,7 +372,7 @@ struct e1000_thermal_sensor_data {
struct e1000_info {
s32 (*get_invariants)(struct e1000_hw *);
struct e1000_mac_operations *mac_ops;
- struct e1000_phy_operations *phy_ops;
+ const struct e1000_phy_operations *phy_ops;
struct e1000_nvm_operations *nvm_ops;
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 2a88595f956c..07cf4fe58338 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw)
{
u32 offset;
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- array_wr32(E1000_VFTA, offset, 0);
- wrfl();
- }
+ for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
+ hw->mac.ops.write_vfta(hw, offset, 0);
}
/**
@@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw)
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
-static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
{
+ struct igb_adapter *adapter = hw->back;
+
array_wr32(E1000_VFTA, offset, value);
wrfl();
-}
-
-/* Due to a hw errata, if the host tries to configure the VFTA register
- * while performing queries from the BMC or DMA, then the VFTA in some
- * cases won't be written.
- */
-/**
- * igb_clear_vfta_i350 - Clear VLAN filter table
- * @hw: pointer to the HW structure
- *
- * Clears the register array which contains the VLAN filter table by
- * setting all the values to 0.
- **/
-void igb_clear_vfta_i350(struct e1000_hw *hw)
-{
- u32 offset;
- int i;
-
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- for (i = 0; i < 10; i++)
- array_wr32(E1000_VFTA, offset, 0);
-
- wrfl();
- }
-}
-
-/**
- * igb_write_vfta_i350 - Write value to VLAN filter table
- * @hw: pointer to the HW structure
- * @offset: register offset in VLAN filter table
- * @value: register value written to VLAN filter table
- *
- * Writes value at the given offset in the register array which stores
- * the VLAN filter table.
- **/
-static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
-{
- int i;
-
- for (i = 0; i < 10; i++)
- array_wr32(E1000_VFTA, offset, value);
-
- wrfl();
+ adapter->shadow_vfta[offset] = value;
}
/**
@@ -183,40 +141,155 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
}
/**
+ * igb_find_vlvf_slot - find the VLAN id or the first empty slot
+ * @hw: pointer to hardware structure
+ * @vlan: VLAN id to write to VLAN filter
+ * @vlvf_bypass: skip VLVF if no match is found
+ *
+ * return the VLVF index where this VLAN id should be placed
+ *
+ **/
+static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+ s32 regindex, first_empty_slot;
+ u32 bits;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* if vlvf_bypass is set we don't want to use an empty slot, we
+ * will simply bypass the VLVF if there are no entries present in the
+ * VLVF that contain our VLAN
+ */
+ first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
+
+ /* Search for the VLAN id in the VLVF entries. Save off the first empty
+ * slot found along the way.
+ *
+ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+ */
+ for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
+ bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
+ if (bits == vlan)
+ return regindex;
+ if (!first_empty_slot && !bits)
+ first_empty_slot = regindex;
+ }
+
+ return first_empty_slot ? : -E1000_ERR_NO_SPACE;
+}
+
+/**
* igb_vfta_set - enable or disable vlan in VLAN filter table
* @hw: pointer to the HW structure
- * @vid: VLAN id to add or remove
- * @add: if true add filter, if false remove
+ * @vlan: VLAN id to add or remove
+ * @vind: VMDq output index that maps queue to VLAN id
+ * @vlan_on: if true add filter, if false remove
*
* Sets or clears a bit in the VLAN filter table array based on VLAN id
* and if we are adding or removing the filter
**/
-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
{
- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- u32 vfta;
struct igb_adapter *adapter = hw->back;
- s32 ret_val = 0;
+ u32 regidx, vfta_delta, vfta, bits;
+ s32 vlvf_index;
- vfta = adapter->shadow_vfta[index];
+ if ((vlan > 4095) || (vind > 7))
+ return -E1000_ERR_PARAM;
- /* bit was set/cleared before we started */
- if ((!!(vfta & mask)) == add) {
- ret_val = -E1000_ERR_CONFIG;
- } else {
- if (add)
- vfta |= mask;
- else
- vfta &= ~mask;
+ /* this is a 2 part operation - first the VFTA, then the
+ * VLVF and VLVFB if VT Mode is set
+ * We don't write the VFTA until we know the VLVF part succeeded.
+ */
+
+ /* Part 1
+ * The VFTA is a bitstring made up of 128 32-bit registers
+ * that enable the particular VLAN id, much like the MTA:
+ * bits[11-5]: which register
+ * bits[4-0]: which bit in the register
+ */
+ regidx = vlan / 32;
+ vfta_delta = 1 << (vlan % 32);
+ vfta = adapter->shadow_vfta[regidx];
+
+ /* vfta_delta represents the difference between the current value
+ * of vfta and the value we want in the register. Since the diff
+ * is an XOR mask we can just update vfta using an XOR.
+ */
+ vfta_delta &= vlan_on ? ~vfta : vfta;
+ vfta ^= vfta_delta;
+
+ /* Part 2
+ * If VT Mode is set
+ * Either vlan_on
+ * make sure the VLAN is in VLVF
+ * set the vind bit in the matching VLVFB
+ * Or !vlan_on
+ * clear the pool bit and possibly the vind
+ */
+ if (!adapter->vfs_allocated_count)
+ goto vfta_update;
+
+ vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
+ if (vlvf_index < 0) {
+ if (vlvf_bypass)
+ goto vfta_update;
+ return vlvf_index;
}
- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
- igb_write_vfta_i350(hw, index, vfta);
- else
- igb_write_vfta(hw, index, vfta);
- adapter->shadow_vfta[index] = vfta;
- return ret_val;
+ bits = rd32(E1000_VLVF(vlvf_index));
+
+ /* set the pool bit */
+ bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ if (vlan_on)
+ goto vlvf_update;
+
+ /* clear the pool bit */
+ bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+
+ if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
+ /* Clear VFTA first, then disable VLVF. Otherwise
+ * we run the risk of stray packets leaking into
+ * the PF via the default pool
+ */
+ if (vfta_delta)
+ hw->mac.ops.write_vfta(hw, regidx, vfta);
+
+ /* disable VLVF and clear remaining bit from pool */
+ wr32(E1000_VLVF(vlvf_index), 0);
+
+ return 0;
+ }
+
+ /* If there are still bits set in the VLVFB registers
+ * for the VLAN ID indicated we need to see if the
+ * caller is requesting that we clear the VFTA entry bit.
+ * If the caller has requested that we clear the VFTA
+ * entry bit but there are still pools/VFs using this VLAN
+ * ID entry then ignore the request. We're not worried
+ * about the case where we're turning the VFTA VLAN ID
+ * entry bit on, only when requested to turn it off as
+ * there may be multiple pools and/or VFs using the
+ * VLAN ID entry. In that case we cannot clear the
+ * VFTA bit until all pools/VFs using that VLAN ID have also
+ * been cleared. This will be indicated by "bits" being
+ * zero.
+ */
+ vfta_delta = 0;
+
+vlvf_update:
+ /* record pool change and enable VLAN ID if not already enabled */
+ wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
+
+vfta_update:
+ /* bit was set/cleared before we started */
+ if (vfta_delta)
+ hw->mac.ops.write_vfta(hw, regidx, vfta);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index ea24961b0d70..90c8893c3eed 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -56,8 +56,9 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
-void igb_clear_vfta_i350(struct e1000_hw *hw);
-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
+void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind,
+ bool vlan_on, bool vlvf_bypass);
void igb_config_collision_dist(struct e1000_hw *hw);
void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 162cc49345d0..10f5c9e016a9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
+ int count = 10;
- /* Take ownership of the buffer */
- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+ do {
+ /* Take ownership of the buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
- /* reserve mailbox for vf use */
- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
- ret_val = 0;
+ /* reserve mailbox for vf use */
+ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
+ ret_val = 0;
+ break;
+ }
+ udelay(1000);
+ } while (count-- > 0);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index e3cb93bdb21a..9413fa61392f 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -95,7 +95,6 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
- u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
@@ -482,6 +481,7 @@ struct igb_adapter {
#define IGB_FLAG_MAS_ENABLE (1 << 12)
#define IGB_FLAG_HAS_MSIX (1 << 13)
#define IGB_FLAG_EEE (1 << 14)
+#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */
#define IGB_MAS_ENABLE_0 0X0001
@@ -510,6 +510,8 @@ enum igb_boards {
extern char igb_driver_name[];
extern char igb_driver_version[];
+int igb_open(struct net_device *netdev);
+int igb_close(struct net_device *netdev);
int igb_up(struct igb_adapter *);
void igb_down(struct igb_adapter *);
void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1d329f1d047b..7982243d1f9b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2017,7 +2017,7 @@ static void igb_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ igb_close(netdev);
else
igb_reset(adapter);
@@ -2050,7 +2050,7 @@ static void igb_diag_test(struct net_device *netdev,
clear_bit(__IGB_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ igb_open(netdev);
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 31e5f3942839..55a1405cb2a1 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -122,8 +122,8 @@ static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
static int igb_sw_init(struct igb_adapter *);
-static int igb_open(struct net_device *);
-static int igb_close(struct net_device *);
+int igb_open(struct net_device *);
+int igb_close(struct net_device *);
static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
@@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
-static void igb_set_uta(struct igb_adapter *adapter);
+static void igb_set_uta(struct igb_adapter *adapter, bool set);
static irqreturn_t igb_intr(int irq, void *);
static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *);
@@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter)
static void igb_update_mng_vlan(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
/* add VID to filter table */
- igb_vfta_set(hw, vid, true);
+ igb_vfta_set(hw, vid, pf_id, true, true);
adapter->mng_vlan_id = vid;
} else {
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
@@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
(vid != old_vid) &&
!test_bit(old_vid, adapter->active_vlans)) {
/* remove VID from filter table */
- igb_vfta_set(hw, old_vid, false);
+ igb_vfta_set(hw, vid, pf_id, false, true);
}
}
@@ -1818,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
igb_reset(adapter);
+
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
+
igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter);
#ifdef CONFIG_IGB_DCA
@@ -1862,7 +1867,7 @@ void igb_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
+ u32 pba, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@@ -1886,9 +1891,10 @@ void igb_reset(struct igb_adapter *adapter)
break;
}
- if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
- (mac->type < e1000_82576)) {
- /* adjust PBA for jumbo frames */
+ if (mac->type == e1000_82575) {
+ u32 min_rx_space, min_tx_space, needed_tx_space;
+
+ /* write Rx PBA so that hardware can report correct Tx PBA */
wr32(E1000_PBA, pba);
/* To maintain wire speed transmits, the Tx FIFO should be
@@ -1898,31 +1904,26 @@ void igb_reset(struct igb_adapter *adapter)
* one full receive packet and is similarly rounded up and
* expressed in KB.
*/
- pba = rd32(E1000_PBA);
- /* upper 16 bits has Tx packet buffer allocation size in KB */
- tx_space = pba >> 16;
- /* lower 16 bits has Rx packet buffer allocation size in KB */
- pba &= 0xffff;
- /* the Tx fifo also stores 16 bytes of information about the Tx
- * but don't include ethernet FCS because hardware appends it
+ min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
+
+ /* The Tx FIFO also stores 16 bytes of information about the Tx
+ * but don't include Ethernet FCS because hardware appends it.
+ * We only need to round down to the nearest 512 byte block
+ * count since the value we care about is 2 frames, not 1.
*/
- min_tx_space = (adapter->max_frame_size +
- sizeof(union e1000_adv_tx_desc) -
- ETH_FCS_LEN) * 2;
- min_tx_space = ALIGN(min_tx_space, 1024);
- min_tx_space >>= 10;
- /* software strips receive CRC, so leave room for it */
- min_rx_space = adapter->max_frame_size;
- min_rx_space = ALIGN(min_rx_space, 1024);
- min_rx_space >>= 10;
+ min_tx_space = adapter->max_frame_size;
+ min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
+ min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
+
+ /* upper 16 bits has Tx packet buffer allocation size in KB */
+ needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation
+ * allocation, take space away from current Rx allocation.
*/
- if (tx_space < min_tx_space &&
- ((min_tx_space - tx_space) < pba)) {
- pba = pba - (min_tx_space - tx_space);
+ if (needed_tx_space < pba) {
+ pba -= needed_tx_space;
/* if short on Rx space, Rx wins and must trump Tx
* adjustment
@@ -1930,18 +1931,20 @@ void igb_reset(struct igb_adapter *adapter)
if (pba < min_rx_space)
pba = min_rx_space;
}
+
+ /* adjust PBA for jumbo frames */
wr32(E1000_PBA, pba);
}
- /* flow control settings */
- /* The high water mark must be low enough to fit one full frame
- * (or the size used for early receive) above it in the Rx FIFO.
- * Set it to the lower of:
- * - 90% of the Rx FIFO size, or
- * - the full Rx FIFO size minus one full frame
+ /* flow control settings
+ * The high water mark must be low enough to fit one full frame
+ * after transmitting the pause frame. As such we must have enough
+ * space to allow for us to complete our current transmit and then
+ * receive the frame that is in progress from the link partner.
+ * Set it to:
+ * - the full Rx FIFO size minus one full Tx plus one full Rx frame
*/
- hwm = min(((pba << 10) * 9 / 10),
- ((pba << 10) - 2 * adapter->max_frame_size));
+ hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
@@ -2051,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev,
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
igb_vlan_mode(netdev, features);
- if (!(changed & NETIF_F_RXALL))
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
netdev->features = features;
@@ -2064,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev,
return 0;
}
+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 vid,
+ u16 flags)
+{
+ /* guarantee we can provide a unique filter for the unicast address */
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ int vfn = adapter->vfs_allocated_count;
+ int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+
+ if (netdev_uc_count(dev) >= rar_entries)
+ return -ENOMEM;
+ }
+
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2087,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = {
#endif
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
+ .ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = passthru_features_check,
};
@@ -2349,27 +2372,35 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* assignment.
*/
netdev->features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CRC;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
netdev->hw_features |= NETIF_F_RXALL;
+ if (hw->mac.type >= e1000_i350)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
/* set this bit last since it cannot be part of hw_features */
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
- netdev->vlan_features |= NETIF_F_TSO |
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
+
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -2378,11 +2409,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- if (hw->mac.type >= e1000_82576) {
- netdev->hw_features |= NETIF_F_SCTP_CRC;
- netdev->features |= NETIF_F_SCTP_CRC;
- }
-
netdev->priv_flags |= IFF_UNICAST_FLT;
adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
@@ -2515,6 +2541,26 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->wol = 0;
}
+ /* Some vendors want the ability to Use the EEPROM setting as
+ * enable/disable only, and not for capability
+ */
+ if (((hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354)) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+ if (hw->mac.type == e1000_i350) {
+ if (((pdev->subsystem_device == 0x5001) ||
+ (pdev->subsystem_device == 0x5002)) &&
+ (hw->bus.func == 0)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+ if (pdev->subsystem_device == 0x1F52)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ }
+
device_set_wakeup_enable(&adapter->pdev->dev,
adapter->flags & IGB_FLAG_WOL_SUPPORTED);
@@ -2921,14 +2967,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
/* Device supports enough interrupts without queue pairing. */
break;
case e1000_82576:
- /* If VFs are going to be allocated with RSS queues then we
- * should pair the queues in order to conserve interrupts due
- * to limited supply.
- */
- if ((adapter->rss_queues > 1) &&
- (adapter->vfs_allocated_count > 6))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
- /* fall through */
case e1000_82580:
case e1000_i350:
case e1000_i354:
@@ -2939,6 +2977,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
*/
if (adapter->rss_queues > (max_rss_queues / 2))
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
break;
}
}
@@ -3132,7 +3172,7 @@ err_setup_tx:
return err;
}
-static int igb_open(struct net_device *netdev)
+int igb_open(struct net_device *netdev)
{
return __igb_open(netdev, false);
}
@@ -3169,7 +3209,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
return 0;
}
-static int igb_close(struct net_device *netdev)
+int igb_close(struct net_device *netdev)
{
return __igb_close(netdev, false);
}
@@ -3460,12 +3500,12 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
wr32(E1000_VT_CTL, vtctl);
}
if (adapter->rss_queues > 1)
- mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
else
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
if (hw->mac.type != e1000_i211)
- mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+ mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
}
igb_vmm_control(adapter);
@@ -3498,7 +3538,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
/* disable store bad packets and clear size bits. */
rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
- /* enable LPE to prevent packets larger than max_frame_size */
+ /* enable LPE to allow for reception of jumbo frames */
rctl |= E1000_RCTL_LPE;
/* disable queue 0 to prevent tail write w/o re-config */
@@ -3522,8 +3562,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
- E1000_RCTL_DPF | /* Allow filtered pause */
+ rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
* and that breaks VLANs.
@@ -3539,12 +3578,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
- /* if it isn't the PF check to see if VFs are enabled and
- * increase the size to support vlan tags
- */
- if (vfn < adapter->vfs_allocated_count &&
- adapter->vf_data[vfn].vlans_enabled)
- size += VLAN_TAG_SIZE;
+ if (size > MAX_JUMBO_FRAME_SIZE)
+ size = MAX_JUMBO_FRAME_SIZE;
vmolr = rd32(E1000_VMOLR(vfn));
vmolr &= ~E1000_VMOLR_RLPML_MASK;
@@ -3554,30 +3589,26 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
return 0;
}
-/**
- * igb_rlpml_set - set maximum receive packet size
- * @adapter: board private structure
- *
- * Configure maximum receivable packet size.
- **/
-static void igb_rlpml_set(struct igb_adapter *adapter)
+static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
+ int vfn, bool enable)
{
- u32 max_frame_size = adapter->max_frame_size;
struct e1000_hw *hw = &adapter->hw;
- u16 pf_id = adapter->vfs_allocated_count;
+ u32 val, reg;
- if (pf_id) {
- igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
- /* If we're in VMDQ or SR-IOV mode, then set global RLPML
- * to our max jumbo frame size, in case we need to enable
- * jumbo frames on one of the rings later.
- * This will not pass over-length frames into the default
- * queue because it's gated by the VMOLR.RLPML.
- */
- max_frame_size = MAX_JUMBO_FRAME_SIZE;
- }
+ if (hw->mac.type < e1000_82576)
+ return;
- wr32(E1000_RLPML, max_frame_size);
+ if (hw->mac.type == e1000_i350)
+ reg = E1000_DVMOLR(vfn);
+ else
+ reg = E1000_VMOLR(vfn);
+
+ val = rd32(reg);
+ if (enable)
+ val |= E1000_VMOLR_STRVLAN;
+ else
+ val &= ~(E1000_VMOLR_STRVLAN);
+ wr32(reg, val);
}
static inline void igb_set_vmolr(struct igb_adapter *adapter,
@@ -3593,14 +3624,6 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
- if (hw->mac.type == e1000_i350) {
- u32 dvmolr;
-
- dvmolr = rd32(E1000_DVMOLR(vfn));
- dvmolr |= E1000_DVMOLR_STRVLAN;
- wr32(E1000_DVMOLR(vfn), dvmolr);
- }
if (aupe)
vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
@@ -3684,9 +3707,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
{
int i;
- /* set UTA to appropriate mode */
- igb_set_uta(adapter);
-
/* set the correct pool for the PF default MAC address in entry 0 */
igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
adapter->vfs_allocated_count);
@@ -4004,6 +4024,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
return count;
}
+static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 i, pf_id;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ case e1000_i350:
+ /* VLAN filtering needed for VLAN prio filter */
+ if (adapter->netdev->features & NETIF_F_NTUPLE)
+ break;
+ /* fall through */
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i354:
+ /* VLAN filtering needed for pool filtering */
+ if (adapter->vfs_allocated_count)
+ break;
+ /* fall through */
+ default:
+ return 1;
+ }
+
+ /* We are already in VLAN promisc, nothing to do */
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
+ return 0;
+
+ if (!adapter->vfs_allocated_count)
+ goto set_vfta;
+
+ /* Add PF to all active pools */
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
+
+ for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
+ u32 vlvf = rd32(E1000_VLVF(i));
+
+ vlvf |= 1 << pf_id;
+ wr32(E1000_VLVF(i), vlvf);
+ }
+
+set_vfta:
+ /* Set all bits in the VLAN filter table array */
+ for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
+ hw->mac.ops.write_vfta(hw, i, ~0U);
+
+ /* Set flag so we don't redo unnecessary work */
+ adapter->flags |= IGB_FLAG_VLAN_PROMISC;
+
+ return 0;
+}
+
+#define VFTA_BLOCK_SIZE 8
+static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
+ u32 vid_start = vfta_offset * 32;
+ u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
+ u32 i, vid, word, bits, pf_id;
+
+ /* guarantee that we don't scrub out management VLAN */
+ vid = adapter->mng_vlan_id;
+ if (vid >= vid_start && vid < vid_end)
+ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+
+ if (!adapter->vfs_allocated_count)
+ goto set_vfta;
+
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
+
+ for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
+ u32 vlvf = rd32(E1000_VLVF(i));
+
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & VLAN_VID_MASK;
+
+ /* only concern ourselves with a certain range */
+ if (vid < vid_start || vid >= vid_end)
+ continue;
+
+ if (vlvf & E1000_VLVF_VLANID_ENABLE) {
+ /* record VLAN ID in VFTA */
+ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+
+ /* if PF is part of this then continue */
+ if (test_bit(vid, adapter->active_vlans))
+ continue;
+ }
+
+ /* remove PF from the pool */
+ bits = ~(1 << pf_id);
+ bits &= rd32(E1000_VLVF(i));
+ wr32(E1000_VLVF(i), bits);
+ }
+
+set_vfta:
+ /* extract values from active_vlans and write back to VFTA */
+ for (i = VFTA_BLOCK_SIZE; i--;) {
+ vid = (vfta_offset + i) * 32;
+ word = vid / BITS_PER_LONG;
+ bits = vid % BITS_PER_LONG;
+
+ vfta[i] |= adapter->active_vlans[word] >> bits;
+
+ hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
+ }
+}
+
+static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
+{
+ u32 i;
+
+ /* We are not in VLAN promisc, nothing to do */
+ if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
+ return;
+
+ /* Set flag so we don't redo unnecessary work */
+ adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
+
+ for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
+ igb_scrub_vfta(adapter, i);
+}
+
/**
* igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -4018,21 +4162,17 @@ static void igb_set_rx_mode(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
unsigned int vfn = adapter->vfs_allocated_count;
- u32 rctl, vmolr = 0;
+ u32 rctl = 0, vmolr = 0;
int count;
/* Check for Promiscuous and All Multicast modes */
- rctl = rd32(E1000_RCTL);
-
- /* clear the effected bits */
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
-
if (netdev->flags & IFF_PROMISC) {
- /* retain VLAN HW filtering if in VT mode */
- if (adapter->vfs_allocated_count)
- rctl |= E1000_RCTL_VFE;
- rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
+ vmolr |= E1000_VMOLR_MPME;
+
+ /* enable use of UTA filter to force packets to default pool */
+ if (hw->mac.type == e1000_82576)
+ vmolr |= E1000_VMOLR_ROPE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
@@ -4050,17 +4190,34 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= E1000_VMOLR_ROMPE;
}
}
- /* Write addresses to available RAR registers, if there is not
- * sufficient space to store all the addresses then enable
- * unicast promiscuous mode
- */
- count = igb_write_uc_addr_list(netdev);
- if (count < 0) {
- rctl |= E1000_RCTL_UPE;
- vmolr |= E1000_VMOLR_ROPE;
- }
- rctl |= E1000_RCTL_VFE;
}
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = igb_write_uc_addr_list(netdev);
+ if (count < 0) {
+ rctl |= E1000_RCTL_UPE;
+ vmolr |= E1000_VMOLR_ROPE;
+ }
+
+ /* enable VLAN filtering by default */
+ rctl |= E1000_RCTL_VFE;
+
+ /* disable VLAN filtering for modes that require it */
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev->features & NETIF_F_RXALL)) {
+ /* if we fail to set all rules then just clear VFE */
+ if (igb_vlan_promisc_enable(adapter))
+ rctl &= ~E1000_RCTL_VFE;
+ } else {
+ igb_vlan_promisc_disable(adapter);
+ }
+
+ /* update state of unicast, multicast, and VLAN filtering modes */
+ rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
+ E1000_RCTL_VFE);
wr32(E1000_RCTL, rctl);
/* In order to support SR-IOV and eventually VMDq it is necessary to set
@@ -4071,9 +4228,19 @@ static void igb_set_rx_mode(struct net_device *netdev)
if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
return;
+ /* set UTA to appropriate mode */
+ igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
+
vmolr |= rd32(E1000_VMOLR(vfn)) &
~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+
+ /* enable Rx jumbo frames, no need for restriction */
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
+
wr32(E1000_VMOLR(vfn), vmolr);
+ wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
+
igb_restore_vf_multicasts(adapter);
}
@@ -4227,6 +4394,7 @@ static void igb_watchdog_task(struct work_struct *work)
u32 link;
int i;
u32 connsw;
+ u16 phy_data, retry_count = 20;
link = igb_has_link(adapter);
@@ -4305,6 +4473,25 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
+ if (adapter->link_speed != SPEED_1000)
+ goto no_wait;
+
+ /* wait for Remote receiver status OK */
+retry_read_status:
+ if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
+ &phy_data)) {
+ if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
+ retry_count) {
+ msleep(100);
+ retry_count--;
+ goto retry_read_status;
+ } else if (!retry_count) {
+ dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
+ }
+no_wait:
netif_carrier_on(netdev);
igb_ping_all_vfs(adapter);
@@ -4713,70 +4900,57 @@ static int igb_tso(struct igb_ring *tx_ring,
return 1;
}
+static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
return;
- } else {
- u8 l4_hdr = 0;
-
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
- }
- break;
- }
+ goto no_csum;
+ }
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- E1000_ADVTXD_L4LEN_SHIFT;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
- }
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ igb_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IGB_TX_FLAGS_CSUM;
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
+ /* update TX checksum flag */
+ first->tx_flags |= IGB_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
+no_csum:
vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
- igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
+ igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
}
#define IGB_SET_FLAG(_input, _flag, _result) \
@@ -5088,16 +5262,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
@@ -5792,125 +5956,132 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
- u32 pool_mask, reg, vid;
- int i;
+ u32 pool_mask, vlvf_mask, i;
- pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+ /* create mask for VF and other pools */
+ pool_mask = E1000_VLVF_POOLSEL_MASK;
+ vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* drop PF from pool bits */
+ pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count));
/* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
+ for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
+ u32 vlvf = rd32(E1000_VLVF(i));
+ u32 vfta_mask, vid, vfta;
/* remove the vf from the pool */
- reg &= ~pool_mask;
-
- /* if pool is empty then remove entry from vfta */
- if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
- (reg & E1000_VLVF_VLANID_ENABLE)) {
- reg = 0;
- vid = reg & E1000_VLVF_VLANID_MASK;
- igb_vfta_set(hw, vid, false);
- }
+ if (!(vlvf & vlvf_mask))
+ continue;
+
+ /* clear out bit from VLVF */
+ vlvf ^= vlvf_mask;
+
+ /* if other pools are present, just remove ourselves */
+ if (vlvf & pool_mask)
+ goto update_vlvfb;
- wr32(E1000_VLVF(i), reg);
+ /* if PF is present, leave VFTA */
+ if (vlvf & E1000_VLVF_POOLSEL_MASK)
+ goto update_vlvf;
+
+ vid = vlvf & E1000_VLVF_VLANID_MASK;
+ vfta_mask = 1 << (vid % 32);
+
+ /* clear bit from VFTA */
+ vfta = adapter->shadow_vfta[vid / 32];
+ if (vfta & vfta_mask)
+ hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
+update_vlvf:
+ /* clear pool selection enable */
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
+ vlvf &= E1000_VLVF_POOLSEL_MASK;
+ else
+ vlvf = 0;
+update_vlvfb:
+ /* clear pool bits */
+ wr32(E1000_VLVF(i), vlvf);
}
+}
- adapter->vf_data[vf].vlans_enabled = 0;
+static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
+{
+ u32 vlvf;
+ int idx;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the VLAN id in the VLVF entries */
+ for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
+ vlvf = rd32(E1000_VLVF(idx));
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ return idx;
}
-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
{
struct e1000_hw *hw = &adapter->hw;
- u32 reg, i;
-
- /* The vlvf table only exists on 82576 hardware and newer */
- if (hw->mac.type < e1000_82576)
- return -1;
+ u32 bits, pf_id;
+ int idx;
- /* we only need to do this if VMDq is enabled */
- if (!adapter->vfs_allocated_count)
- return -1;
+ idx = igb_find_vlvf_entry(hw, vid);
+ if (!idx)
+ return;
- /* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
- vid == (reg & E1000_VLVF_VLANID_MASK))
- break;
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
+ bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+ bits &= rd32(E1000_VLVF(idx));
+
+ /* Disable the filter so this falls into the default pool. */
+ if (!bits) {
+ if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
+ wr32(E1000_VLVF(idx), 1 << pf_id);
+ else
+ wr32(E1000_VLVF(idx), 0);
}
+}
- if (add) {
- if (i == E1000_VLVF_ARRAY_SIZE) {
- /* Did not find a matching VLAN ID entry that was
- * enabled. Search for a free filter entry, i.e.
- * one without the enable bit set
- */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
- if (!(reg & E1000_VLVF_VLANID_ENABLE))
- break;
- }
- }
- if (i < E1000_VLVF_ARRAY_SIZE) {
- /* Found an enabled/available entry */
- reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
-
- /* if !enabled we need to set this up in vfta */
- if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
- /* add VID to filter table */
- igb_vfta_set(hw, vid, true);
- reg |= E1000_VLVF_VLANID_ENABLE;
- }
- reg &= ~E1000_VLVF_VLANID_MASK;
- reg |= vid;
- wr32(E1000_VLVF(i), reg);
-
- /* do not modify RLPML for PF devices */
- if (vf >= adapter->vfs_allocated_count)
- return 0;
-
- if (!adapter->vf_data[vf].vlans_enabled) {
- u32 size;
-
- reg = rd32(E1000_VMOLR(vf));
- size = reg & E1000_VMOLR_RLPML_MASK;
- size += 4;
- reg &= ~E1000_VMOLR_RLPML_MASK;
- reg |= size;
- wr32(E1000_VMOLR(vf), reg);
- }
+static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
+ bool add, u32 vf)
+{
+ int pf_id = adapter->vfs_allocated_count;
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
- adapter->vf_data[vf].vlans_enabled++;
- }
- } else {
- if (i < E1000_VLVF_ARRAY_SIZE) {
- /* remove vf from the pool */
- reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
- /* if pool is empty then remove entry from vfta */
- if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
- reg = 0;
- igb_vfta_set(hw, vid, false);
- }
- wr32(E1000_VLVF(i), reg);
-
- /* do not modify RLPML for PF devices */
- if (vf >= adapter->vfs_allocated_count)
- return 0;
-
- adapter->vf_data[vf].vlans_enabled--;
- if (!adapter->vf_data[vf].vlans_enabled) {
- u32 size;
-
- reg = rd32(E1000_VMOLR(vf));
- size = reg & E1000_VMOLR_RLPML_MASK;
- size -= 4;
- reg &= ~E1000_VMOLR_RLPML_MASK;
- reg |= size;
- wr32(E1000_VMOLR(vf), reg);
- }
- }
+ /* If VLAN overlaps with one the PF is currently monitoring make
+ * sure that we are able to allocate a VLVF entry. This may be
+ * redundant but it guarantees PF will maintain visibility to
+ * the VLAN.
+ */
+ if (add && test_bit(vid, adapter->active_vlans)) {
+ err = igb_vfta_set(hw, vid, pf_id, true, false);
+ if (err)
+ return err;
}
- return 0;
+
+ err = igb_vfta_set(hw, vid, vf, add, false);
+
+ if (add && !err)
+ return err;
+
+ /* If we failed to add the VF VLAN or we are removing the VF VLAN
+ * we may need to drop the PF pool bit in order to allow us to free
+ * up the VLVF resources.
+ */
+ if (test_bit(vid, adapter->active_vlans) ||
+ (adapter->flags & IGB_FLAG_VLAN_PROMISC))
+ igb_update_pf_vlvf(adapter, vid);
+
+ return err;
}
static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
@@ -5923,130 +6094,104 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
wr32(E1000_VMVIR(vf), 0);
}
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
+ u16 vlan, u8 qos)
{
- int err = 0;
- struct igb_adapter *adapter = netdev_priv(netdev);
+ int err;
- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
- if (vlan || qos) {
- err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
- if (err)
- goto out;
- igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
- igb_set_vmolr(adapter, vf, !vlan);
- adapter->vf_data[vf].pf_vlan = vlan;
- adapter->vf_data[vf].pf_qos = qos;
- dev_info(&adapter->pdev->dev,
- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
- if (test_bit(__IGB_DOWN, &adapter->state)) {
- dev_warn(&adapter->pdev->dev,
- "The VF VLAN has been set, but the PF device is not up.\n");
- dev_warn(&adapter->pdev->dev,
- "Bring the PF device up before attempting to use the VF device.\n");
- }
- } else {
- igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
- false, vf);
- igb_set_vmvir(adapter, vlan, vf);
- igb_set_vmolr(adapter, vf, true);
- adapter->vf_data[vf].pf_vlan = 0;
- adapter->vf_data[vf].pf_qos = 0;
+ err = igb_set_vf_vlan(adapter, vlan, true, vf);
+ if (err)
+ return err;
+
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+
+ /* revoke access to previous VLAN */
+ if (vlan != adapter->vf_data[vf].pf_vlan)
+ igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ igb_set_vf_vlan_strip(adapter, vf, true);
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set, but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before attempting to use the VF device.\n");
}
-out:
+
return err;
}
-static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
+static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
{
- struct e1000_hw *hw = &adapter->hw;
- int i;
- u32 reg;
+ /* Restore tagless access via VLAN 0 */
+ igb_set_vf_vlan(adapter, 0, true, vf);
- /* Find the vlan filter for this id */
- for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
- if ((reg & E1000_VLVF_VLANID_ENABLE) &&
- vid == (reg & E1000_VLVF_VLANID_MASK))
- break;
- }
+ igb_set_vmvir(adapter, 0, vf);
+ igb_set_vmolr(adapter, vf, true);
+
+ /* Remove any PF assigned VLAN */
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
- if (i >= E1000_VLVF_ARRAY_SIZE)
- i = -1;
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ igb_set_vf_vlan_strip(adapter, vf, false);
- return i;
+ return 0;
}
-static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
{
- struct e1000_hw *hw = &adapter->hw;
- int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
- int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
- int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
- /* If in promiscuous mode we need to make sure the PF also has
- * the VLAN filter set.
- */
- if (add && (adapter->netdev->flags & IFF_PROMISC))
- err = igb_vlvf_set(adapter, vid, add,
- adapter->vfs_allocated_count);
- if (err)
- goto out;
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
- err = igb_vlvf_set(adapter, vid, add, vf);
+ return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
+ igb_disable_port_vlan(adapter, vf);
+}
- if (err)
- goto out;
+static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+ int ret;
- /* Go through all the checks to see if the VLAN filter should
- * be wiped completely.
- */
- if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
- u32 vlvf, bits;
- int regndx = igb_find_vlvf_entry(adapter, vid);
-
- if (regndx < 0)
- goto out;
- /* See if any other pools are set for this VLAN filter
- * entry other than the PF.
- */
- vlvf = bits = rd32(E1000_VLVF(regndx));
- bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count);
- /* If the filter was removed then ensure PF pool bit
- * is cleared if the PF only added itself to the pool
- * because the PF is in promiscuous mode.
- */
- if ((vlvf & VLAN_VID_MASK) == vid &&
- !test_bit(vid, adapter->active_vlans) &&
- !bits)
- igb_vlvf_set(adapter, vid, add,
- adapter->vfs_allocated_count);
- }
+ if (adapter->vf_data[vf].pf_vlan)
+ return -1;
-out:
- return err;
+ /* VLAN 0 is a special case, don't allow it to be removed */
+ if (!vid && !add)
+ return 0;
+
+ ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
+ if (!ret)
+ igb_set_vf_vlan_strip(adapter, vf, !!vid);
+ return ret;
}
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear flags - except flag that indicates PF has set the MAC */
- adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
- adapter->vf_data[vf].last_nack = jiffies;
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- /* reset offloads to defaults */
- igb_set_vmolr(adapter, vf, true);
+ /* clear flags - except flag that indicates PF has set the MAC */
+ vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
+ vf_data->last_nack = jiffies;
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
- if (adapter->vf_data[vf].pf_vlan)
- igb_ndo_set_vf_vlan(adapter->netdev, vf,
- adapter->vf_data[vf].pf_vlan,
- adapter->vf_data[vf].pf_qos);
- else
- igb_clear_vf_vfta(adapter, vf);
+ igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
+ igb_set_vmvir(adapter, vf_data->pf_vlan |
+ (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
+ igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -6191,7 +6336,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
"VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
vf);
else
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -6233,6 +6378,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
/**
* igb_set_uta - Set unicast filter table address
* @adapter: board private structure
+ * @set: boolean indicating if we are setting or clearing bits
*
* The unicast table address is a register array of 32-bit registers.
* The table is meant to be used in a way similar to how the MTA is used
@@ -6240,21 +6386,18 @@ static void igb_msg_task(struct igb_adapter *adapter)
* set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
* enable bit to allow vlan tag stripping when promiscuous mode is enabled
**/
-static void igb_set_uta(struct igb_adapter *adapter)
+static void igb_set_uta(struct igb_adapter *adapter, bool set)
{
struct e1000_hw *hw = &adapter->hw;
+ u32 uta = set ? ~0 : 0;
int i;
- /* The UTA table only exists on 82576 hardware and newer */
- if (hw->mac.type < e1000_82576)
- return;
-
/* we only need to do this if VMDq is enabled */
if (!adapter->vfs_allocated_count)
return;
- for (i = 0; i < hw->mac.uta_reg_count; i++)
- array_wr32(E1000_UTA, i, ~0);
+ for (i = hw->mac.uta_reg_count; i--;)
+ array_wr32(E1000_UTA, i, uta);
}
/**
@@ -6630,7 +6773,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
- atomic_inc(&page->_count);
+ page_ref_inc(page);
return true;
}
@@ -7202,7 +7345,7 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
wr32(E1000_CTRL, ctrl);
}
- igb_rlpml_set(adapter);
+ igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
}
static int igb_vlan_rx_add_vid(struct net_device *netdev,
@@ -7212,11 +7355,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count;
- /* attempt to add filter to vlvf array */
- igb_vlvf_set(adapter, vid, true, pf_id);
-
/* add the filter since PF can receive vlans w/o entry in vlvf */
- igb_vfta_set(hw, vid, true);
+ if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
+ igb_vfta_set(hw, vid, pf_id, true, !!vid);
set_bit(vid, adapter->active_vlans);
@@ -7227,16 +7368,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count;
- s32 err;
-
- /* remove vlan from VLVF table array */
- err = igb_vlvf_set(adapter, vid, false, pf_id);
+ struct e1000_hw *hw = &adapter->hw;
- /* if vid was not present in VLVF just remove it from table */
- if (err)
- igb_vfta_set(hw, vid, false);
+ /* remove VID from filter table */
+ if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
+ igb_vfta_set(hw, vid, pf_id, false, true);
clear_bit(vid, adapter->active_vlans);
@@ -7245,11 +7382,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev,
static void igb_restore_vlan(struct igb_adapter *adapter)
{
- u16 vid;
+ u16 vid = 1;
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+ igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
@@ -7704,15 +7842,14 @@ static void igb_io_resume(struct pci_dev *pdev)
static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
u8 qsel)
{
- u32 rar_low, rar_high;
struct e1000_hw *hw = &adapter->hw;
+ u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
+ * from network order (big endian) to CPU endian
*/
- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+ rar_low = le32_to_cpup((__be32 *)(addr));
+ rar_high = le16_to_cpup((__be16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV;
@@ -7959,9 +8096,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* than the Rx threshold. Set hwm to PBA - max frame
* size in 16B units, capping it at PBA - 6KB.
*/
- hwm = 64 * pba - adapter->max_frame_size / 16;
- if (hwm < 64 * (pba - 6))
- hwm = 64 * (pba - 6);
+ hwm = 64 * (pba - 6);
reg = rd32(E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
@@ -7971,9 +8106,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
* frame size, capping it at PBA - 10KB.
*/
- dmac_thr = pba - adapter->max_frame_size / 512;
- if (dmac_thr < pba - 10)
- dmac_thr = pba - 10;
+ dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c44df87c38de..22a8a29895b4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -525,7 +525,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec64_to_ns(&ts);
ns = ns >> 1;
- if (on && ns <= 70000000LL) {
+ if (on && ((ns <= 70000000LL) || (ns == 125000000LL) ||
+ (ns == 250000000LL) || (ns == 500000000LL))) {
if (ns < 8LL)
return -EINVAL;
use_freq = 1;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 7b6cb4c3764c..01752f44ace2 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw)
static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
-
- /* Take ownership of the buffer */
- ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
-
- /* reserve mailbox for VF use */
- if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
- ret_val = E1000_SUCCESS;
+ int count = 10;
+
+ do {
+ /* Take ownership of the buffer */
+ ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+ /* reserve mailbox for VF use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) {
+ ret_val = 0;
+ break;
+ }
+ udelay(1000);
+ } while (count-- > 0);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 297af801f051..c12442252adb 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -43,6 +43,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
+#include <linux/sctp.h>
#include "igbvf.h"
@@ -876,7 +877,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data)
adapter->int_counter1++;
- netif_carrier_off(netdev);
hw->mac.get_link_status = 1;
if (!test_bit(__IGBVF_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
@@ -1908,6 +1908,31 @@ static void igbvf_watchdog_task(struct work_struct *work)
#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGBVF_TX_FLAGS_VLAN_SHIFT 16
+static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
+{
+ struct e1000_adv_tx_context_desc *context_desc;
+ struct igbvf_buffer *buffer_info;
+ u16 i = tx_ring->next_to_use;
+
+ context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ buffer_info->time_stamp = jiffies;
+ buffer_info->dma = 0;
+}
+
static int igbvf_tso(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
@@ -1987,65 +2012,56 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
return true;
}
-static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- __be16 protocol)
+static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
{
- struct e1000_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
-
- if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
+ unsigned int offset = 0;
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- info |= (skb_transport_header(skb) -
- skb_network_header(skb));
+ return offset == skb_checksum_start_offset(skb);
+}
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ u32 vlan_macip_lens = 0;
+ u32 type_tucmd = 0;
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+csum_failed:
+ if (!(tx_flags & IGBVF_TX_FLAGS_VLAN))
+ return false;
+ goto no_csum;
+ }
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (protocol) {
- case htons(ETH_P_IP):
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- break;
- case htons(ETH_P_IPV6):
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- break;
- }
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((protocol == htons(ETH_P_IPV6)) &&
+ igbvf_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
+ break;
}
-
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
- context_desc->seqnum_seed = 0;
- context_desc->mss_l4len_idx = 0;
-
- buffer_info->time_stamp = jiffies;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return true;
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
- return false;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
+no_csum:
+ vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
+
+ igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
+ return true;
}
static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
@@ -2264,7 +2280,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
if (tso)
tx_flags |= IGBVF_TX_FLAGS_TSO;
- else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
+ else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IGBVF_TX_FLAGS_CSUM;
@@ -2717,11 +2733,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->bd_number = cards_found++;
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
netdev->features = netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX |
@@ -2731,11 +2747,14 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
+
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index 0f1eca639f68..f00a41d9a1ca 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -126,6 +126,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4b9156cd8b93..e4949af7dd6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -661,9 +661,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
-#ifdef CONFIG_IXGBE_VXLAN
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
-#endif
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
/* Tx fast path data */
@@ -675,6 +673,9 @@ struct ixgbe_adapter {
int num_rx_queues;
u16 rx_itr_setting;
+ /* Port number used to identify VXLAN traffic */
+ __be16 vxlan_port;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
@@ -782,9 +783,6 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
struct ixgbe_mac_addr *mac_table;
-#ifdef CONFIG_IXGBE_VXLAN
- u16 vxlan_port;
-#endif
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -796,6 +794,10 @@ struct ixgbe_adapter {
u8 default_up;
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+#define IXGBE_MAX_LINK_HANDLE 10
+ struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+ unsigned long tables;
+
/* maximum number of RETA entries among all devices supported by ixgbe
* driver: currently it's x550 device in non-SRIOV mode
*/
@@ -875,6 +877,8 @@ extern const char ixgbe_driver_version[];
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
+int ixgbe_open(struct net_device *netdev);
+int ixgbe_close(struct net_device *netdev);
void ixgbe_up(struct ixgbe_adapter *adapter);
void ixgbe_down(struct ixgbe_adapter *adapter);
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
@@ -925,6 +929,9 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
+int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx);
void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bea96b3bc90c..b3530e1e3ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2053,7 +2053,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ ixgbe_close(netdev);
else
ixgbe_reset(adapter);
@@ -2091,7 +2091,7 @@ skip_loopback:
/* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ ixgbe_open(netdev);
else if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
} else {
@@ -2520,9 +2520,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return ret;
}
-static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
- struct ixgbe_fdir_filter *input,
- u16 sw_idx)
+int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hlist_node *node2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4003a88bbf6..7df3fe29b210 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -51,6 +51,8 @@
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
#include <net/vxlan.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
#ifdef CONFIG_OF
#include <linux/of_net.h>
@@ -65,6 +67,7 @@
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
+#include "ixgbe_model.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -1089,7 +1092,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
* @tx_ring: tx ring to clean
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring, int napi_budget)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_tx_buffer *tx_buffer;
@@ -1127,7 +1130,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1942,7 +1945,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
- atomic_inc(&page->_count);
+ page_ref_inc(page);
return true;
}
@@ -2784,7 +2787,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
+ clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
/* Exit if we are called by netpoll or busy polling is active */
if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -4528,9 +4531,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
-#ifdef CONFIG_IXGBE_VXLAN
adapter->vxlan_port = 0;
-#endif
break;
default:
break;
@@ -5545,6 +5546,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ /* initialize static ixgbe jump table entries */
+ adapter->jump_tables[0] = ixgbe_ipv4_fields;
+
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
GFP_ATOMIC);
@@ -5988,7 +5992,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int ixgbe_open(struct net_device *netdev)
+int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -6090,7 +6094,7 @@ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int ixgbe_close(struct net_device *netdev)
+int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7554,11 +7558,10 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
struct ipv6hdr *ipv6;
} hdr;
struct tcphdr *th;
+ unsigned int hlen;
struct sk_buff *skb;
-#ifdef CONFIG_IXGBE_VXLAN
- u8 encap = false;
-#endif /* CONFIG_IXGBE_VXLAN */
__be16 vlan_id;
+ int l4_proto;
/* if ring doesn't have a interrupt vector, cannot perform ATR */
if (!q_vector)
@@ -7570,62 +7573,50 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
ring->atr_count++;
+ /* currently only IPv4/IPv6 with TCP is supported */
+ if ((first->protocol != htons(ETH_P_IP)) &&
+ (first->protocol != htons(ETH_P_IPV6)))
+ return;
+
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
- if (!skb->encapsulation) {
- th = tcp_hdr(skb);
- } else {
#ifdef CONFIG_IXGBE_VXLAN
+ if (skb->encapsulation &&
+ first->protocol == htons(ETH_P_IP) &&
+ hdr.ipv4->protocol != IPPROTO_UDP) {
struct ixgbe_adapter *adapter = q_vector->adapter;
- if (!adapter->vxlan_port)
- return;
- if (first->protocol != htons(ETH_P_IP) ||
- hdr.ipv4->version != IPVERSION ||
- hdr.ipv4->protocol != IPPROTO_UDP) {
- return;
- }
- if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
- return;
- encap = true;
- hdr.network = skb_inner_network_header(skb);
- th = inner_tcp_hdr(skb);
-#else
- return;
-#endif /* CONFIG_IXGBE_VXLAN */
+ /* verify the port is recognized as VXLAN */
+ if (adapter->vxlan_port &&
+ udp_hdr(skb)->dest == adapter->vxlan_port)
+ hdr.network = skb_inner_network_header(skb);
}
+#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
case IPVERSION:
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ /* access ihl as u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ l4_proto = hdr.ipv4->protocol;
break;
case 6:
- if (likely((unsigned char *)th - hdr.network ==
- sizeof(struct ipv6hdr))) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
- return;
- } else {
- __be16 frag_off;
- u8 l4_hdr;
-
- ipv6_skip_exthdr(skb, hdr.network - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- return;
- if (l4_hdr != IPPROTO_TCP)
- return;
- }
+ hlen = hdr.network - skb->data;
+ l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
+ hlen -= hdr.network - skb->data;
break;
default:
return;
}
- /* skip this packet since it is invalid or the socket is closing */
- if (!th || th->fin)
+ if (l4_proto != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* skip this packet since the socket is closing */
+ if (th->fin)
return;
/* sample on all syn packets or once every atr sample count */
@@ -7676,10 +7667,8 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
break;
}
-#ifdef CONFIG_IXGBE_VXLAN
- if (encap)
+ if (hdr.network != skb_network_header(skb))
input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
-#endif /* CONFIG_IXGBE_VXLAN */
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
@@ -8200,6 +8189,240 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
+ struct tc_cls_u32_offload *cls)
+{
+ u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
+ u32 loc;
+ int err;
+
+ if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
+ return -EINVAL;
+
+ loc = cls->knode.handle & 0xfffff;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
+ spin_unlock(&adapter->fdir_perfect_lock);
+ return err;
+}
+
+static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
+ __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ /* This ixgbe devices do not support hash tables at the moment
+ * so abort when given hash tables.
+ */
+ if (cls->hnode.divisor > 0)
+ return -EINVAL;
+
+ set_bit(uhtid - 1, &adapter->tables);
+ return 0;
+}
+
+static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
+ struct tc_cls_u32_offload *cls)
+{
+ u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
+
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ clear_bit(uhtid - 1, &adapter->tables);
+ return 0;
+}
+
+static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
+ __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ u32 loc = cls->knode.handle & 0xfffff;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_mat_field *field_ptr;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input mask;
+#ifdef CONFIG_NET_CLS_ACT
+ const struct tc_action *a;
+#endif
+ int i, err = 0;
+ u8 queue;
+ u32 uhtid, link_uhtid;
+
+ memset(&mask, 0, sizeof(union ixgbe_atr_input));
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+ /* At the moment cls_u32 jumps to network layer and skips past
+ * L2 headers. The canonical method to match L2 frames is to use
+ * negative values. However this is error prone at best but really
+ * just broken because there is no way to "know" what sort of hdr
+ * is in front of the network layer. Fix cls_u32 to support L2
+ * headers when needed.
+ */
+ if (protocol != htons(ETH_P_IP))
+ return -EINVAL;
+
+ if (link_uhtid) {
+ struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
+
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
+ return -EINVAL;
+
+ for (i = 0; nexthdr[i].jump; i++) {
+ if (nexthdr->o != cls->knode.sel->offoff ||
+ nexthdr->s != cls->knode.sel->offshift ||
+ nexthdr->m != cls->knode.sel->offmask ||
+ /* do not support multiple key jumps its just mad */
+ cls->knode.sel->nkeys > 1)
+ return -EINVAL;
+
+ if (nexthdr->off != cls->knode.sel->keys[0].off ||
+ nexthdr->val != cls->knode.sel->keys[0].val ||
+ nexthdr->mask != cls->knode.sel->keys[0].mask)
+ return -EINVAL;
+
+ adapter->jump_tables[link_uhtid] = nexthdr->jump;
+ }
+ return 0;
+ }
+
+ if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
+ e_err(drv, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ /* cls u32 is a graph starting at root node 0x800. The driver tracks
+ * links and also the fields used to advance the parser across each
+ * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
+ * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
+ * To add support for new nodes update ixgbe_model.h parse structures
+ * this function _should_ be generic try not to hardcode values here.
+ */
+ if (uhtid == 0x800) {
+ field_ptr = adapter->jump_tables[0];
+ } else {
+ if (uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return -EINVAL;
+
+ field_ptr = adapter->jump_tables[uhtid];
+ }
+
+ if (!field_ptr)
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ int off = cls->knode.sel->keys[i].off;
+ __be32 val = cls->knode.sel->keys[i].val;
+ __be32 m = cls->knode.sel->keys[i].mask;
+ bool found_entry = false;
+ int j;
+
+ for (j = 0; field_ptr[j].val; j++) {
+ if (field_ptr[j].off == off) {
+ field_ptr[j].val(input, &mask, val, m);
+ input->filter.formatted.flow_type |=
+ field_ptr[j].type;
+ found_entry = true;
+ break;
+ }
+ }
+
+ if (!found_entry)
+ goto err_out;
+ }
+
+ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (list_empty(&cls->knode.exts->actions))
+ goto err_out;
+
+ list_for_each_entry(a, &cls->knode.exts->actions, list) {
+ if (!is_tcf_gact_shot(a))
+ goto err_out;
+ }
+#endif
+
+ input->action = IXGBE_FDIR_DROP_QUEUE;
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ input->sw_idx = loc;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (hlist_empty(&adapter->fdir_filter_list)) {
+ memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ if (err)
+ goto err_out_w_lock;
+ } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ err = -EINVAL;
+ goto err_out_w_lock;
+ }
+
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+ err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
+ input->sw_idx, queue);
+ if (!err)
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+err_out_w_lock:
+ spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return -EINVAL;
+}
+
+static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+ tc->type == TC_SETUP_CLSU32) {
+ switch (tc->cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return ixgbe_configure_clsu32(adapter,
+ proto, tc->cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return ixgbe_delete_clsu32(adapter, tc->cls_u32);
+ case TC_CLSU32_NEW_HNODE:
+ case TC_CLSU32_REPLACE_HNODE:
+ return ixgbe_configure_clsu32_add_hnode(adapter, proto,
+ tc->cls_u32);
+ case TC_CLSU32_DELETE_HNODE:
+ return ixgbe_configure_clsu32_del_hnode(adapter,
+ tc->cls_u32);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return ixgbe_setup_tc(dev, tc->tc);
+}
+
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{
@@ -8262,19 +8485,17 @@ static int ixgbe_set_features(struct net_device *netdev,
}
/*
- * Check if Flow Director n-tuple support was enabled or disabled. If
- * the state changed, we need to reset.
+ * Check if Flow Director n-tuple support or hw_tc support was
+ * enabled or disabled. If the state changed, we need to reset.
*/
- switch (features & NETIF_F_NTUPLE) {
- case NETIF_F_NTUPLE:
+ if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
/* turn off ATR, enable perfect filters and reset */
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
need_reset = true;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- break;
- default:
+ } else {
/* turn off perfect filters, enable ATR and reset */
if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
need_reset = true;
@@ -8282,23 +8503,16 @@ static int ixgbe_set_features(struct net_device *netdev,
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
/* We cannot enable ATR if SR-IOV is enabled */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- break;
-
- /* We cannot enable ATR if we have 2 or more traffic classes */
- if (netdev_get_num_tc(netdev) > 1)
- break;
-
- /* We cannot enable ATR if RSS is disabled */
- if (adapter->ring_feature[RING_F_RSS].limit <= 1)
- break;
-
- /* A sample rate of 0 indicates ATR disabled */
- if (!adapter->atr_sample_rate)
- break;
-
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- break;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
+ /* We cannot enable ATR if we have 2 or more tcs */
+ (netdev_get_num_tc(netdev) > 1) ||
+ /* We cannot enable ATR if RSS is disabled */
+ (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
+ /* A sample rate of 0 indicates ATR disabled */
+ (!adapter->atr_sample_rate))
+ ; /* do nothing not supported */
+ else /* otherwise supported and set the flag */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -8338,7 +8552,6 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
@@ -8346,18 +8559,18 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port == new_port)
+ if (adapter->vxlan_port == port)
return;
if (adapter->vxlan_port) {
netdev_info(dev,
"Hit Max num of VXLAN ports, not adding port %d\n",
- new_port);
+ ntohs(port));
return;
}
- adapter->vxlan_port = new_port;
- IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, new_port);
+ adapter->vxlan_port = port;
+ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port));
}
/**
@@ -8370,7 +8583,6 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
__be16 port)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- u16 new_port = ntohs(port);
if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
@@ -8378,9 +8590,9 @@ static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
if (sa_family == AF_INET6)
return;
- if (adapter->vxlan_port != new_port) {
+ if (adapter->vxlan_port != port) {
netdev_info(dev, "Port %d was not found, not deleting\n",
- new_port);
+ ntohs(port));
return;
}
@@ -8657,9 +8869,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
-#ifdef CONFIG_IXGBE_DCB
- .ndo_setup_tc = ixgbe_setup_tc,
-#endif
+ .ndo_setup_tc = __ixgbe_setup_tc,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -9030,7 +9240,8 @@ skip_sriov:
case ixgbe_mac_X550EM_x:
netdev->features |= NETIF_F_SCTP_CRC;
netdev->hw_features |= NETIF_F_SCTP_CRC |
- NETIF_F_NTUPLE;
+ NETIF_F_NTUPLE |
+ NETIF_F_HW_TC;
break;
default:
break;
@@ -9050,17 +9261,6 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
-#ifdef CONFIG_IXGBE_VXLAN
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
- break;
- default:
- break;
- }
-#endif /* CONFIG_IXGBE_VXLAN */
-
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
@@ -9114,6 +9314,8 @@ skip_sriov:
goto err_sw_init;
}
+ /* Set hw->mac.addr to permanent MAC address */
+ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
new file mode 100644
index 000000000000..74c53ad9d268
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -0,0 +1,103 @@
+/*******************************************************************************
+ *
+ * Intel 10 Gigabit PCI Express Linux drive
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _IXGBE_MODEL_H_
+#define _IXGBE_MODEL_H_
+
+#include "ixgbe.h"
+#include "ixgbe_type.h"
+
+struct ixgbe_mat_field {
+ unsigned int off;
+ int (*val)(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ u32 val, u32 m);
+ unsigned int type;
+};
+
+static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ u32 val, u32 m)
+{
+ input->filter.formatted.src_ip[0] = val;
+ mask->formatted.src_ip[0] = m;
+ return 0;
+}
+
+static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ u32 val, u32 m)
+{
+ input->filter.formatted.dst_ip[0] = val;
+ mask->formatted.dst_ip[0] = m;
+ return 0;
+}
+
+static struct ixgbe_mat_field ixgbe_ipv4_fields[] = {
+ { .off = 12, .val = ixgbe_mat_prgm_sip,
+ .type = IXGBE_ATR_FLOW_TYPE_IPV4},
+ { .off = 16, .val = ixgbe_mat_prgm_dip,
+ .type = IXGBE_ATR_FLOW_TYPE_IPV4},
+ { .val = NULL } /* terminal node */
+};
+
+static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ u32 val, u32 m)
+{
+ input->filter.formatted.src_port = val & 0xffff;
+ mask->formatted.src_port = m & 0xffff;
+ input->filter.formatted.dst_port = val >> 16;
+ mask->formatted.dst_port = m >> 16;
+
+ return 0;
+};
+
+static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
+ .type = IXGBE_ATR_FLOW_TYPE_TCPV4},
+ { .val = NULL } /* terminal node */
+};
+
+struct ixgbe_nexthdr {
+ /* offset, shift, and mask of position to next header */
+ unsigned int o;
+ u32 s;
+ u32 m;
+ /* match criteria to make this jump*/
+ unsigned int off;
+ u32 val;
+ u32 mask;
+ /* location of jump to make */
+ struct ixgbe_mat_field *jump;
+};
+
+static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
+ { .o = 0, .s = 6, .m = 0xf,
+ .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+ { .jump = NULL } /* terminal node */
+};
+#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 87aca3f7c3de..68a9c646498e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -355,7 +355,7 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
break;
- usleep_range(10, 20);
+ udelay(10);
}
if (ctrl)
*ctrl = command;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index c48aef613b0a..d7aa4b203f40 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -680,7 +680,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ ixgbevf_close(netdev);
else
ixgbevf_reset(adapter);
@@ -692,7 +692,7 @@ static void ixgbevf_diag_test(struct net_device *netdev,
clear_bit(__IXGBEVF_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ ixgbevf_open(netdev);
} else {
hw_dbg(&adapter->hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 68ec7daa04fd..991eeae81473 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -486,6 +486,8 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
+int ixgbevf_open(struct net_device *netdev);
+int ixgbevf_close(struct net_device *netdev);
void ixgbevf_up(struct ixgbevf_adapter *adapter);
void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3558f019b631..b0edae94d73d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -837,7 +837,7 @@ add_tail_frag:
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
- atomic_inc(&page->_count);
+ page_ref_inc(page);
return true;
}
@@ -3122,7 +3122,7 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int ixgbevf_open(struct net_device *netdev)
+int ixgbevf_open(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3205,7 +3205,7 @@ err_setup_reset:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int ixgbevf_close(struct net_device *netdev)
+int ixgbevf_close(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3692,19 +3692,23 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
- ether_addr_copy(hw->mac.addr, addr->sa_data);
-
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
spin_unlock_bh(&adapter->mbx_lock);
+ if (err)
+ return -EPERM;
+
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 61a98f4c5746..4d613a4f2a7f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -408,8 +408,10 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
return ret_val;
}
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a1c862b4664d..b5c6d42daa12 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -40,6 +40,19 @@ config MVMDIO
This driver is used by the MV643XX_ETH and MVNETA drivers.
+config MVNETA_BM_ENABLE
+ tristate "Marvell Armada 38x/XP network interface BM support"
+ depends on MVNETA
+ ---help---
+ This driver supports auxiliary block of the network
+ interface units in the Marvell ARMADA XP and ARMADA 38x SoC
+ family, which is called buffer manager.
+
+ This driver, when enabled, strictly cooperates with mvneta
+ driver and is common for all network ports of the devices,
+ even for Armada 370 SoC, which doesn't support hardware
+ buffer management.
+
config MVNETA
tristate "Marvell Armada 370/38x/XP network interface support"
depends on PLAT_ORION
@@ -53,6 +66,15 @@ config MVNETA
driver, which should be used for the older Marvell SoCs
(Dove, Orion, Discovery, Kirkwood).
+config MVNETA_BM
+ tristate
+ default y if MVNETA=y && MVNETA_BM_ENABLE
+ default MVNETA_BM_ENABLE
+ select HWBM
+ help
+ MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures
+ that all dependencies are met.
+
config MVPP2
tristate "Marvell Armada 375 network interface support"
depends on MACH_ARMADA_375
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index f6425bd2884b..ff1bffa74803 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_MVMDIO) += mvmdio.o
obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
+obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o
obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_MVPP2) += mvpp2.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b0ae69f84493..7fc490225da5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -30,6 +30,8 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
+#include <net/hwbm.h>
+#include "mvneta_bm.h"
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
@@ -37,6 +39,10 @@
/* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
+#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
+#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
+#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
+#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
@@ -50,6 +56,9 @@
#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
+#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
+#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
+#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
#define MVNETA_PORT_RX_RESET 0x1cc0
#define MVNETA_PORT_RX_DMA_RESET BIT(0)
#define MVNETA_PHY_ADDR 0x2000
@@ -107,6 +116,7 @@
#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
#define MVNETA_ACC_MODE 0x2500
+#define MVNETA_BM_ADDRESS 0x2504
#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
@@ -250,10 +260,12 @@
#define MVNETA_VLAN_TAG_LEN 4
-#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
#define MVNETA_TX_CSUM_DEF_SIZE 1600
#define MVNETA_TX_CSUM_MAX_SIZE 9800
-#define MVNETA_ACC_MODE_EXT 1
+#define MVNETA_ACC_MODE_EXT1 1
+#define MVNETA_ACC_MODE_EXT2 2
+
+#define MVNETA_MAX_DECODE_WIN 6
/* Timeout constants */
#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
@@ -287,13 +299,14 @@
#define MVNETA_RX_PKT_SIZE(mtu) \
ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
ETH_HLEN + ETH_FCS_LEN, \
- MVNETA_CPU_D_CACHE_LINE_SIZE)
+ cache_line_size())
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_phys) && \
(addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
-#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
+ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
struct mvneta_statistic {
unsigned short offset;
@@ -359,6 +372,7 @@ struct mvneta_pcpu_port {
};
struct mvneta_port {
+ u8 id;
struct mvneta_pcpu_port __percpu *ports;
struct mvneta_pcpu_stats __percpu *stats;
@@ -394,6 +408,11 @@ struct mvneta_port {
unsigned int tx_csum_limit;
unsigned int use_inband_status:1;
+ struct mvneta_bm *bm_priv;
+ struct mvneta_bm_pool *pool_long;
+ struct mvneta_bm_pool *pool_short;
+ int bm_win_id;
+
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -419,6 +438,8 @@ struct mvneta_port {
#define MVNETA_TX_L4_CSUM_NOT BIT(31)
#define MVNETA_RXD_ERR_CRC 0x0
+#define MVNETA_RXD_BM_POOL_SHIFT 13
+#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
#define MVNETA_RXD_ERR_SUMMARY BIT(16)
#define MVNETA_RXD_ERR_OVERRUN BIT(17)
#define MVNETA_RXD_ERR_LEN BIT(18)
@@ -563,6 +584,9 @@ static int rxq_def;
static int rx_copybreak __read_mostly = 256;
+/* HW BM need that each port be identify by a unique ID */
+static int global_port_id;
+
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -829,6 +853,215 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
+/* Enable buffer management (BM) */
+static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val |= MVNETA_RXQ_HW_BUF_ALLOC;
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Notify HW about port's assignment of pool for bigger packets */
+static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
+ val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Notify HW about port's assignment of pool for smaller packets */
+static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
+ u32 val;
+
+ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
+ val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
+ val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
+
+ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
+}
+
+/* Set port's receive buffer size for assigned BM pool */
+static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
+ int buf_size,
+ u8 pool_id)
+{
+ u32 val;
+
+ if (!IS_ALIGNED(buf_size, 8)) {
+ dev_warn(pp->dev->dev.parent,
+ "illegal buf_size value %d, round to %d\n",
+ buf_size, ALIGN(buf_size, 8));
+ buf_size = ALIGN(buf_size, 8);
+ }
+
+ val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
+ val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
+ mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
+}
+
+/* Configure MBUS window in order to enable access BM internal SRAM */
+static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
+ u8 target, u8 attr)
+{
+ u32 win_enable, win_protect;
+ int i;
+
+ win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
+
+ if (pp->bm_win_id < 0) {
+ /* Find first not occupied window */
+ for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
+ if (win_enable & (1 << i)) {
+ pp->bm_win_id = i;
+ break;
+ }
+ }
+ if (i == MVNETA_MAX_DECODE_WIN)
+ return -ENOMEM;
+ } else {
+ i = pp->bm_win_id;
+ }
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
+
+ if (i < 4)
+ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
+
+ mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
+ (attr << 8) | target);
+
+ mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
+
+ win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
+ win_protect |= 3 << (2 * i);
+ mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
+
+ win_enable &= ~(1 << i);
+ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
+
+ return 0;
+}
+
+/* Assign and initialize pools for port. In case of fail
+ * buffer manager will remain disabled for current port.
+ */
+static int mvneta_bm_port_init(struct platform_device *pdev,
+ struct mvneta_port *pp)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ u32 long_pool_id, short_pool_id, wsize;
+ u8 target, attr;
+ int err;
+
+ /* Get BM window information */
+ err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
+ &target, &attr);
+ if (err < 0)
+ return err;
+
+ pp->bm_win_id = -1;
+
+ /* Open NETA -> BM window */
+ err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
+ target, attr);
+ if (err < 0) {
+ netdev_info(pp->dev, "fail to configure mbus window to BM\n");
+ return err;
+ }
+
+ if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
+ netdev_info(pp->dev, "missing long pool id\n");
+ return -EINVAL;
+ }
+
+ /* Create port's long pool depending on mtu */
+ pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
+ MVNETA_BM_LONG, pp->id,
+ MVNETA_RX_PKT_SIZE(pp->dev->mtu));
+ if (!pp->pool_long) {
+ netdev_info(pp->dev, "fail to obtain long pool for port\n");
+ return -ENOMEM;
+ }
+
+ pp->pool_long->port_map |= 1 << pp->id;
+
+ mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
+ pp->pool_long->id);
+
+ /* If short pool id is not defined, assume using single pool */
+ if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
+ short_pool_id = long_pool_id;
+
+ /* Create port's short pool */
+ pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
+ MVNETA_BM_SHORT, pp->id,
+ MVNETA_BM_SHORT_PKT_SIZE);
+ if (!pp->pool_short) {
+ netdev_info(pp->dev, "fail to obtain short pool for port\n");
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ return -ENOMEM;
+ }
+
+ if (short_pool_id != long_pool_id) {
+ pp->pool_short->port_map |= 1 << pp->id;
+ mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
+ pp->pool_short->id);
+ }
+
+ return 0;
+}
+
+/* Update settings of a pool for bigger packets */
+static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
+{
+ struct mvneta_bm_pool *bm_pool = pp->pool_long;
+ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
+ int num;
+
+ /* Release all buffers from long pool */
+ mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
+ if (hwbm_pool->buf_num) {
+ WARN(1, "cannot free all buffers in pool %d\n",
+ bm_pool->id);
+ goto bm_mtu_err;
+ }
+
+ bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
+ bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
+ hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
+
+ /* Fill entire long pool */
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+ if (num != hwbm_pool->size) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ bm_pool->id, num, hwbm_pool->size);
+ goto bm_mtu_err;
+ }
+ mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
+
+ return;
+
+bm_mtu_err:
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
+
+ pp->bm_priv = NULL;
+ mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
+ netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
+}
+
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@@ -873,14 +1106,14 @@ static void mvneta_port_down(struct mvneta_port *pp)
do {
if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
netdev_warn(pp->dev,
- "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
+ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
val);
break;
}
mdelay(1);
val = mvreg_read(pp, MVNETA_RXQ_CMD);
- } while (val & 0xff);
+ } while (val & MVNETA_RXQ_ENABLE_MASK);
/* Stop Tx port activity. Check port Tx activity. Issue stop
* command for active channels only
@@ -905,14 +1138,14 @@ static void mvneta_port_down(struct mvneta_port *pp)
/* Check TX Command reg that all Txqs are stopped */
val = mvreg_read(pp, MVNETA_TXQ_CMD);
- } while (val & 0xff);
+ } while (val & MVNETA_TXQ_ENABLE_MASK);
/* Double check to verify that TX FIFO is empty */
count = 0;
do {
if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
netdev_warn(pp->dev,
- "TX FIFO empty timeout status=0x08%x\n",
+ "TX FIFO empty timeout status=0x%08x\n",
val);
break;
}
@@ -1149,9 +1382,17 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
/* Set Port Acceleration Mode */
- val = MVNETA_ACC_MODE_EXT;
+ if (pp->bm_priv)
+ /* HW buffer management + legacy parser */
+ val = MVNETA_ACC_MODE_EXT2;
+ else
+ /* SW buffer management + legacy parser */
+ val = MVNETA_ACC_MODE_EXT1;
mvreg_write(pp, MVNETA_ACC_MODE, val);
+ if (pp->bm_priv)
+ mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
+
/* Update val of portCfg register accordingly with all RxQueue types */
val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
mvreg_write(pp, MVNETA_PORT_CONFIG, val);
@@ -1518,23 +1759,25 @@ static void mvneta_txq_done(struct mvneta_port *pp,
}
}
-static void *mvneta_frag_alloc(const struct mvneta_port *pp)
+void *mvneta_frag_alloc(unsigned int frag_size)
{
- if (likely(pp->frag_size <= PAGE_SIZE))
- return netdev_alloc_frag(pp->frag_size);
+ if (likely(frag_size <= PAGE_SIZE))
+ return netdev_alloc_frag(frag_size);
else
- return kmalloc(pp->frag_size, GFP_ATOMIC);
+ return kmalloc(frag_size, GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
-static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
+void mvneta_frag_free(unsigned int frag_size, void *data)
{
- if (likely(pp->frag_size <= PAGE_SIZE))
+ if (likely(frag_size <= PAGE_SIZE))
skb_free_frag(data);
else
kfree(data);
}
+EXPORT_SYMBOL_GPL(mvneta_frag_free);
-/* Refill processing */
+/* Refill processing for SW buffer management */
static int mvneta_rx_refill(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc)
@@ -1542,7 +1785,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
dma_addr_t phys_addr;
void *data;
- data = mvneta_frag_alloc(pp);
+ data = mvneta_frag_alloc(pp->frag_size);
if (!data)
return -ENOMEM;
@@ -1550,7 +1793,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
MVNETA_RX_BUF_SIZE(pp->pkt_size),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
- mvneta_frag_free(pp, data);
+ mvneta_frag_free(pp->frag_size, data);
return -ENOMEM;
}
@@ -1596,22 +1839,156 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
int rx_done, i;
rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+ if (rx_done)
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+ if (pp->bm_priv) {
+ for (i = 0; i < rx_done; i++) {
+ struct mvneta_rx_desc *rx_desc =
+ mvneta_rxq_next_desc_get(rxq);
+ u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
+ struct mvneta_bm_pool *bm_pool;
+
+ bm_pool = &pp->bm_priv->bm_pools[pool_id];
+ /* Return dropped buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+ }
+ return;
+ }
+
for (i = 0; i < rxq->size; i++) {
struct mvneta_rx_desc *rx_desc = rxq->descs + i;
void *data = (void *)rx_desc->buf_cookie;
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
- mvneta_frag_free(pp, data);
+ mvneta_frag_free(pp->frag_size, data);
}
+}
- if (rx_done)
- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+/* Main rx processing when using software buffer management */
+static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
+{
+ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
+ struct net_device *dev = pp->dev;
+ int rx_done;
+ u32 rcvd_pkts = 0;
+ u32 rcvd_bytes = 0;
+
+ /* Get number of received packets */
+ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
+
+ if (rx_todo > rx_done)
+ rx_todo = rx_done;
+
+ rx_done = 0;
+
+ /* Fairness NAPI loop */
+ while (rx_done < rx_todo) {
+ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct sk_buff *skb;
+ unsigned char *data;
+ dma_addr_t phys_addr;
+ u32 rx_status, frag_size;
+ int rx_bytes, err;
+
+ rx_done++;
+ rx_status = rx_desc->status;
+ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
+ data = (unsigned char *)rx_desc->buf_cookie;
+ phys_addr = rx_desc->buf_phys_addr;
+
+ if (!mvneta_rxq_desc_is_first_last(rx_status) ||
+ (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+err_drop_frame:
+ dev->stats.rx_errors++;
+ mvneta_rx_error(pp, rx_desc);
+ /* leave the descriptor untouched */
+ continue;
+ }
+
+ if (rx_bytes <= rx_copybreak) {
+ /* better copy a small frame and not unmap the DMA region */
+ skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
+ if (unlikely(!skb))
+ goto err_drop_frame;
+
+ dma_sync_single_range_for_cpu(dev->dev.parent,
+ rx_desc->buf_phys_addr,
+ MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes,
+ DMA_FROM_DEVICE);
+ memcpy(skb_put(skb, rx_bytes),
+ data + MVNETA_MH_SIZE + NET_SKB_PAD,
+ rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ mvneta_rx_csum(pp, rx_status, skb);
+ napi_gro_receive(&port->napi, skb);
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* leave the descriptor and buffer untouched */
+ continue;
+ }
+
+ /* Refill processing */
+ err = mvneta_rx_refill(pp, rx_desc);
+ if (err) {
+ netdev_err(dev, "Linux processing - Can't refill\n");
+ rxq->missed++;
+ goto err_drop_frame;
+ }
+
+ frag_size = pp->frag_size;
+
+ skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
+
+ /* After refill old buffer has to be unmapped regardless
+ * the skb is successfully built or not.
+ */
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size),
+ DMA_FROM_DEVICE);
+
+ if (!skb)
+ goto err_drop_frame;
+
+ rcvd_pkts++;
+ rcvd_bytes += rx_bytes;
+
+ /* Linux processing */
+ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
+ skb_put(skb, rx_bytes);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ mvneta_rx_csum(pp, rx_status, skb);
+
+ napi_gro_receive(&port->napi, skb);
+ }
+
+ if (rcvd_pkts) {
+ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += rcvd_pkts;
+ stats->rx_bytes += rcvd_bytes;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ /* Update rxq management counters */
+ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
+
+ return rx_done;
}
-/* Main rx processing */
-static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
- struct mvneta_rx_queue *rxq)
+/* Main rx processing when using hardware buffer management */
+static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
+ struct mvneta_rx_queue *rxq)
{
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
struct net_device *dev = pp->dev;
@@ -1630,21 +2007,29 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
/* Fairness NAPI loop */
while (rx_done < rx_todo) {
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
+ struct mvneta_bm_pool *bm_pool = NULL;
struct sk_buff *skb;
unsigned char *data;
dma_addr_t phys_addr;
- u32 rx_status;
+ u32 rx_status, frag_size;
int rx_bytes, err;
+ u8 pool_id;
rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
phys_addr = rx_desc->buf_phys_addr;
+ pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
+ bm_pool = &pp->bm_priv->bm_pools[pool_id];
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
- err_drop_frame:
+err_drop_frame_ret_pool:
+ /* Return the buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+err_drop_frame:
dev->stats.rx_errors++;
mvneta_rx_error(pp, rx_desc);
/* leave the descriptor untouched */
@@ -1655,7 +2040,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
/* better copy a small frame and not unmap the DMA region */
skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
if (unlikely(!skb))
- goto err_drop_frame;
+ goto err_drop_frame_ret_pool;
dma_sync_single_range_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
@@ -1673,26 +2058,31 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rcvd_pkts++;
rcvd_bytes += rx_bytes;
+ /* Return the buffer to the pool */
+ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
+ rx_desc->buf_phys_addr);
+
/* leave the descriptor and buffer untouched */
continue;
}
/* Refill processing */
- err = mvneta_rx_refill(pp, rx_desc);
+ err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
netdev_err(dev, "Linux processing - Can't refill\n");
rxq->missed++;
- goto err_drop_frame;
+ goto err_drop_frame_ret_pool;
}
- skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
+ frag_size = bm_pool->hwbm_pool.frag_size;
+
+ skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
/* After refill old buffer has to be unmapped regardless
* the skb is successfully built or not.
*/
- dma_unmap_single(dev->dev.parent, phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
-
+ dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
if (!skb)
goto err_drop_frame;
@@ -2297,7 +2687,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
if (rx_queue) {
rx_queue = rx_queue - 1;
- rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
+ if (pp->bm_priv)
+ rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+ else
+ rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
budget -= rx_done;
@@ -2370,9 +2763,6 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
if (rxq->descs == NULL)
return -ENOMEM;
- BUG_ON(rxq->descs !=
- PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
rxq->last_desc = rxq->size - 1;
/* Set Rx descriptors queue starting address */
@@ -2386,9 +2776,17 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
- /* Fill RXQ with buffers from RX pool */
- mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
- mvneta_rxq_bm_disable(pp, rxq);
+ if (!pp->bm_priv) {
+ /* Fill RXQ with buffers from RX pool */
+ mvneta_rxq_buf_size_set(pp, rxq,
+ MVNETA_RX_BUF_SIZE(pp->pkt_size));
+ mvneta_rxq_bm_disable(pp, rxq);
+ } else {
+ mvneta_rxq_bm_enable(pp, rxq);
+ mvneta_rxq_long_pool_set(pp, rxq);
+ mvneta_rxq_short_pool_set(pp, rxq);
+ }
+
mvneta_rxq_fill(pp, rxq, rxq->size);
return 0;
@@ -2435,10 +2833,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
if (txq->descs == NULL)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(txq->descs !=
- PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
-
txq->last_desc = txq->size - 1;
/* Set maximum bandwidth for enabled TXQs */
@@ -2648,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
return mtu;
}
+static void mvneta_percpu_enable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ disable_percpu_irq(pp->dev->irq);
+}
+
/* Change the device mtu */
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
@@ -2661,6 +3069,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
dev->mtu = mtu;
if (!netif_running(dev)) {
+ if (pp->bm_priv)
+ mvneta_bm_update_mtu(pp, mtu);
+
netdev_update_features(dev);
return 0;
}
@@ -2669,10 +3080,14 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
* reallocation of the queues
*/
mvneta_stop_dev(pp);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
mvneta_cleanup_txqs(pp);
mvneta_cleanup_rxqs(pp);
+ if (pp->bm_priv)
+ mvneta_bm_update_mtu(pp, mtu);
+
pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2689,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return ret;
}
+ on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
mvneta_port_up(pp);
@@ -2842,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
pp->phy_dev = NULL;
}
-static void mvneta_percpu_enable(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
-}
-
-static void mvneta_percpu_disable(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- disable_percpu_irq(pp->dev->irq);
-}
-
/* Electing a CPU must be done in an atomic way: it should be done
* after or before the removal/insertion of a CPU and this function is
* not reentrant.
@@ -2920,6 +3322,8 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
spin_lock(&pp->lock);
/* Configuring the driver for a new CPU while the
* driver is stopping is racy, so just avoid it.
@@ -3070,17 +3474,17 @@ static int mvneta_stop(struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
/* Inform that we are stopping so we don't want to setup the
- * driver for new CPUs in the notifiers
+ * driver for new CPUs in the notifiers. The code of the
+ * notifier for CPU online is protected by the same spinlock,
+ * so when we get the lock, the notifer work is done.
*/
spin_lock(&pp->lock);
pp->is_stopped = true;
+ spin_unlock(&pp->lock);
+
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
- /* Now that the notifier are unregistered, we can release le
- * lock
- */
- spin_unlock(&pp->lock);
on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
@@ -3557,6 +3961,7 @@ static int mvneta_probe(struct platform_device *pdev)
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
+ struct device_node *bm_node;
struct mvneta_port *pp;
struct net_device *dev;
const char *dt_mac_addr;
@@ -3612,6 +4017,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->ethtool_ops = &mvneta_eth_tool_ops;
pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -3690,26 +4096,39 @@ static int mvneta_probe(struct platform_device *pdev)
pp->tx_csum_limit = tx_csum_limit;
+ dram_target_info = mv_mbus_dram_info();
+ if (dram_target_info)
+ mvneta_conf_mbus_windows(pp, dram_target_info);
+
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
pp->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
+ pp->id = global_port_id++;
+
+ /* Obtain access to BM resources if enabled and already initialized */
+ bm_node = of_parse_phandle(dn, "buffer-manager", 0);
+ if (bm_node && bm_node->data) {
+ pp->bm_priv = bm_node->data;
+ err = mvneta_bm_port_init(pdev, pp);
+ if (err < 0) {
+ dev_info(&pdev->dev, "use SW buffer management\n");
+ pp->bm_priv = NULL;
+ }
+ }
+
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
- goto err_free_stats;
+ goto err_netdev;
err = mvneta_port_power_up(pp, phy_mode);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_free_stats;
+ goto err_netdev;
}
- dram_target_info = mv_mbus_dram_info();
- if (dram_target_info)
- mvneta_conf_mbus_windows(pp, dram_target_info);
-
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
@@ -3720,7 +4139,7 @@ static int mvneta_probe(struct platform_device *pdev)
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
@@ -3744,6 +4163,13 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
+err_netdev:
+ unregister_netdev(dev);
+ if (pp->bm_priv) {
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
+ 1 << pp->id);
+ }
err_free_stats:
free_percpu(pp->stats);
err_free_ports:
@@ -3775,6 +4201,12 @@ static int mvneta_remove(struct platform_device *pdev)
of_node_put(pp->phy_node);
free_netdev(dev);
+ if (pp->bm_priv) {
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
+ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
+ 1 << pp->id);
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
new file mode 100644
index 000000000000..01fccec632ec
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -0,0 +1,487 @@
+/*
+ * Driver for Marvell NETA network controller Buffer Manager.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/hwbm.h>
+#include "mvneta_bm.h"
+
+#define MVNETA_BM_DRIVER_NAME "mvneta_bm"
+#define MVNETA_BM_DRIVER_VERSION "1.0"
+
+static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data)
+{
+ writel(data, priv->reg_base + offset);
+}
+
+static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset)
+{
+ return readl(priv->reg_base + offset);
+}
+
+static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
+ val |= MVNETA_BM_POOL_ENABLE_MASK;
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
+
+ /* Clear BM cause register */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
+}
+
+static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id));
+ val &= ~MVNETA_BM_POOL_ENABLE_MASK;
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val);
+}
+
+static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+ val |= mask;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+ val &= ~mask;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
+ u8 target_id, u8 attr)
+{
+ u32 val;
+
+ val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id));
+ val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id);
+ val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id);
+ val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id);
+ val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr);
+
+ mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
+}
+
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{
+ struct mvneta_bm_pool *bm_pool =
+ (struct mvneta_bm_pool *)hwbm_pool->priv;
+ struct mvneta_bm *priv = bm_pool->priv;
+ dma_addr_t phys_addr;
+
+ /* In order to update buf_cookie field of RX descriptor properly,
+ * BM hardware expects buf virtual address to be placed in the
+ * first four bytes of mapped buffer.
+ */
+ *(u32 *)buf = (u32)buf;
+ phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
+ return -ENOMEM;
+
+ mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_construct);
+
+/* Create pool */
+static int mvneta_bm_pool_create(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{
+ struct platform_device *pdev = priv->pdev;
+ u8 target_id, attr;
+ int size_bytes, err;
+ size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
+ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+ &bm_pool->phys_addr,
+ GFP_KERNEL);
+ if (!bm_pool->virt_addr)
+ return -ENOMEM;
+
+ if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) {
+ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+ bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN);
+ return -ENOMEM;
+ }
+
+ err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id,
+ &attr);
+ if (err < 0) {
+ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+ bm_pool->phys_addr);
+ return err;
+ }
+
+ /* Set pool address */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id),
+ bm_pool->phys_addr);
+
+ mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr);
+ mvneta_bm_pool_enable(priv, bm_pool->id);
+
+ return 0;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
+ enum mvneta_bm_type type, u8 port_id,
+ int pkt_size)
+{
+ struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id];
+ int num, err;
+
+ if (new_pool->type == MVNETA_BM_LONG &&
+ new_pool->port_map != 1 << port_id) {
+ dev_err(&priv->pdev->dev,
+ "long pool cannot be shared by the ports\n");
+ return NULL;
+ }
+
+ if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) {
+ dev_err(&priv->pdev->dev,
+ "mixing pools' types between the ports is forbidden\n");
+ return NULL;
+ }
+
+ if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT)
+ new_pool->pkt_size = pkt_size;
+
+ /* Allocate buffers in case BM pool hasn't been used yet */
+ if (new_pool->type == MVNETA_BM_FREE) {
+ struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
+
+ new_pool->priv = priv;
+ new_pool->type = type;
+ new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
+ hwbm_pool->frag_size =
+ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ hwbm_pool->construct = mvneta_bm_construct;
+ hwbm_pool->priv = new_pool;
+
+ /* Create new pool */
+ err = mvneta_bm_pool_create(priv, new_pool);
+ if (err) {
+ dev_err(&priv->pdev->dev, "fail to create pool %d\n",
+ new_pool->id);
+ return NULL;
+ }
+
+ /* Allocate buffers for this pool */
+ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+ if (num != hwbm_pool->size) {
+ WARN(1, "pool %d: %d of %d allocated\n",
+ new_pool->id, num, hwbm_pool->size);
+ return NULL;
+ }
+ }
+
+ return new_pool;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_pool_use);
+
+/* Free all buffers from the pool */
+void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
+ u8 port_map)
+{
+ int i;
+
+ bm_pool->port_map &= ~port_map;
+ if (bm_pool->port_map)
+ return;
+
+ mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
+
+ for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
+ dma_addr_t buf_phys_addr;
+ u32 *vaddr;
+
+ /* Get buffer physical address (indirect access) */
+ buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool);
+
+ /* Work-around to the problems when destroying the pool,
+ * when it occurs that a read access to BPPI returns 0.
+ */
+ if (buf_phys_addr == 0)
+ continue;
+
+ vaddr = phys_to_virt(buf_phys_addr);
+ if (!vaddr)
+ break;
+
+ dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+ hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
+ }
+
+ mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
+
+ /* Update BM driver with number of buffers removed from pool */
+ bm_pool->hwbm_pool.buf_num -= i;
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
+
+/* Cleanup pool */
+void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool, u8 port_map)
+{
+ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
+ bm_pool->port_map &= ~port_map;
+ if (bm_pool->port_map)
+ return;
+
+ bm_pool->type = MVNETA_BM_FREE;
+
+ mvneta_bm_bufs_free(priv, bm_pool, port_map);
+ if (hwbm_pool->buf_num)
+ WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+
+ if (bm_pool->virt_addr) {
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(u32) * hwbm_pool->size,
+ bm_pool->virt_addr, bm_pool->phys_addr);
+ bm_pool->virt_addr = NULL;
+ }
+
+ mvneta_bm_pool_disable(priv, bm_pool->id);
+}
+EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy);
+
+static void mvneta_bm_pools_init(struct mvneta_bm *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct mvneta_bm_pool *bm_pool;
+ char prop[15];
+ u32 size;
+ int i;
+
+ /* Activate BM unit */
+ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK);
+
+ /* Create all pools with maximum size */
+ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
+ bm_pool = &priv->bm_pools[i];
+ bm_pool->id = i;
+ bm_pool->type = MVNETA_BM_FREE;
+
+ /* Reset read pointer */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0);
+
+ /* Reset write pointer */
+ mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0);
+
+ /* Configure pool size according to DT or use default value */
+ sprintf(prop, "pool%d,capacity", i);
+ if (of_property_read_u32(dn, prop, &size)) {
+ size = MVNETA_BM_POOL_CAP_DEF;
+ } else if (size > MVNETA_BM_POOL_CAP_MAX) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, set to %d\n",
+ i, size, MVNETA_BM_POOL_CAP_MAX);
+ size = MVNETA_BM_POOL_CAP_MAX;
+ } else if (size < MVNETA_BM_POOL_CAP_MIN) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, set to %d\n",
+ i, size, MVNETA_BM_POOL_CAP_MIN);
+ size = MVNETA_BM_POOL_CAP_MIN;
+ } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) {
+ dev_warn(&priv->pdev->dev,
+ "Illegal pool %d capacity %d, round to %d\n",
+ i, size, ALIGN(size,
+ MVNETA_BM_POOL_CAP_ALIGN));
+ size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
+ }
+ bm_pool->hwbm_pool.size = size;
+
+ mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
+ bm_pool->hwbm_pool.size);
+
+ /* Obtain custom pkt_size from DT */
+ sprintf(prop, "pool%d,pkt-size", i);
+ if (of_property_read_u32(dn, prop, &bm_pool->pkt_size))
+ bm_pool->pkt_size = 0;
+ }
+}
+
+static void mvneta_bm_default_set(struct mvneta_bm *priv)
+{
+ u32 val;
+
+ /* Mask BM all interrupts */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0);
+
+ /* Clear BM cause register */
+ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0);
+
+ /* Set BM configuration register */
+ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG);
+
+ /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
+ val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK;
+ val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP;
+ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val);
+}
+
+static int mvneta_bm_init(struct mvneta_bm *priv)
+{
+ mvneta_bm_default_set(priv);
+
+ /* Allocate and initialize BM pools structures */
+ priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM,
+ sizeof(struct mvneta_bm_pool),
+ GFP_KERNEL);
+ if (!priv->bm_pools)
+ return -ENOMEM;
+
+ mvneta_bm_pools_init(priv);
+
+ return 0;
+}
+
+static int mvneta_bm_get_sram(struct device_node *dn,
+ struct mvneta_bm *priv)
+{
+ priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0);
+ if (!priv->bppi_pool)
+ return -ENOMEM;
+
+ priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool,
+ MVNETA_BM_BPPI_SIZE,
+ &priv->bppi_phys_addr);
+ if (!priv->bppi_virt_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mvneta_bm_put_sram(struct mvneta_bm *priv)
+{
+ gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr,
+ MVNETA_BM_BPPI_SIZE);
+}
+
+static int mvneta_bm_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct mvneta_bm *priv;
+ struct resource *res;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->reg_base))
+ return PTR_ERR(priv->reg_base);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err < 0)
+ return err;
+
+ err = mvneta_bm_get_sram(dn, priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to allocate internal memory\n");
+ goto err_clk;
+ }
+
+ priv->pdev = pdev;
+
+ /* Initialize buffer manager internals */
+ err = mvneta_bm_init(priv);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize controller\n");
+ goto err_sram;
+ }
+
+ dn->data = priv;
+ platform_set_drvdata(pdev, priv);
+
+ dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n");
+
+ return 0;
+
+err_sram:
+ mvneta_bm_put_sram(priv);
+err_clk:
+ clk_disable_unprepare(priv->clk);
+ return err;
+}
+
+static int mvneta_bm_remove(struct platform_device *pdev)
+{
+ struct mvneta_bm *priv = platform_get_drvdata(pdev);
+ u8 all_ports_map = 0xff;
+ int i = 0;
+
+ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) {
+ struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i];
+
+ mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map);
+ }
+
+ mvneta_bm_put_sram(priv);
+
+ /* Dectivate BM unit */
+ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mvneta_bm_match[] = {
+ { .compatible = "marvell,armada-380-neta-bm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvneta_bm_match);
+
+static struct platform_driver mvneta_bm_driver = {
+ .probe = mvneta_bm_probe,
+ .remove = mvneta_bm_remove,
+ .driver = {
+ .name = MVNETA_BM_DRIVER_NAME,
+ .of_match_table = mvneta_bm_match,
+ },
+};
+
+module_platform_driver(mvneta_bm_driver);
+
+MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
new file mode 100644
index 000000000000..e74fd44a92f7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -0,0 +1,182 @@
+/*
+ * Driver for Marvell NETA network controller Buffer Manager.
+ *
+ * Copyright (C) 2015 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MVNETA_BM_H_
+#define _MVNETA_BM_H_
+
+/* BM Configuration Register */
+#define MVNETA_BM_CONFIG_REG 0x0
+#define MVNETA_BM_STATUS_MASK 0x30
+#define MVNETA_BM_ACTIVE_MASK BIT(4)
+#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000
+#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18)
+#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19)
+
+/* BM Activation Register */
+#define MVNETA_BM_COMMAND_REG 0x4
+#define MVNETA_BM_START_MASK BIT(0)
+#define MVNETA_BM_STOP_MASK BIT(1)
+#define MVNETA_BM_PAUSE_MASK BIT(2)
+
+/* BM Xbar interface Register */
+#define MVNETA_BM_XBAR_01_REG 0x8
+#define MVNETA_BM_XBAR_23_REG 0xc
+#define MVNETA_BM_XBAR_POOL_REG(pool) \
+ (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG)
+#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0)
+#define MVNETA_BM_TARGET_ID_MASK(pool) \
+ (0xf << MVNETA_BM_TARGET_ID_OFFS(pool))
+#define MVNETA_BM_TARGET_ID_VAL(pool, id) \
+ ((id) << MVNETA_BM_TARGET_ID_OFFS(pool))
+#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4)
+#define MVNETA_BM_XBAR_ATTR_MASK(pool) \
+ (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool))
+#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \
+ ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool))
+
+/* Address of External Buffer Pointers Pool Register */
+#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4))
+#define MVNETA_BM_POOL_ENABLE_MASK BIT(0)
+
+/* External Buffer Pointers Pool RD pointer Register */
+#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4))
+#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc
+#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16
+#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000
+
+/* External Buffer Pointers Pool WR pointer */
+#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4))
+#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0
+#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc
+#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16
+#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000
+
+/* External Buffer Pointers Pool Size Register */
+#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4))
+#define MVNETA_BM_POOL_SIZE_MASK 0x3fff
+
+/* BM Interrupt Cause Register */
+#define MVNETA_BM_INTR_CAUSE_REG (0x50)
+
+/* BM interrupt Mask Register */
+#define MVNETA_BM_INTR_MASK_REG (0x54)
+
+/* Other definitions */
+#define MVNETA_BM_SHORT_PKT_SIZE 256
+#define MVNETA_BM_POOLS_NUM 4
+#define MVNETA_BM_POOL_CAP_MIN 128
+#define MVNETA_BM_POOL_CAP_DEF 2048
+#define MVNETA_BM_POOL_CAP_MAX \
+ (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN)
+#define MVNETA_BM_POOL_CAP_ALIGN 32
+#define MVNETA_BM_POOL_PTR_ALIGN 32
+
+#define MVNETA_BM_POOL_ACCESS_OFFS 8
+
+#define MVNETA_BM_BPPI_SIZE 0x100000
+
+#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
+
+enum mvneta_bm_type {
+ MVNETA_BM_FREE,
+ MVNETA_BM_LONG,
+ MVNETA_BM_SHORT
+};
+
+struct mvneta_bm {
+ void __iomem *reg_base;
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ struct gen_pool *bppi_pool;
+ /* BPPI virtual base address */
+ void __iomem *bppi_virt_addr;
+ /* BPPI physical base address */
+ dma_addr_t bppi_phys_addr;
+
+ /* BM pools */
+ struct mvneta_bm_pool *bm_pools;
+};
+
+struct mvneta_bm_pool {
+ struct hwbm_pool hwbm_pool;
+ /* Pool number in the range 0-3 */
+ u8 id;
+ enum mvneta_bm_type type;
+
+ /* Packet size */
+ int pkt_size;
+ /* Size of the buffer acces through DMA*/
+ u32 buf_size;
+
+ /* BPPE virtual base address */
+ u32 *virt_addr;
+ /* BPPE physical base address */
+ dma_addr_t phys_addr;
+
+ /* Ports using BM pool */
+ u8 port_map;
+
+ struct mvneta_bm *priv;
+};
+
+/* Declarations and definitions */
+void *mvneta_frag_alloc(unsigned int frag_size);
+void mvneta_frag_free(unsigned int frag_size, void *data);
+
+#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
+void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool, u8 port_map);
+void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
+ u8 port_map);
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
+int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool);
+struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
+ enum mvneta_bm_type type, u8 port_id,
+ int pkt_size);
+
+static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ dma_addr_t buf_phys_addr)
+{
+ writel_relaxed(buf_phys_addr, priv->bppi_virt_addr +
+ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
+}
+
+static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{
+ return readl_relaxed(priv->bppi_virt_addr +
+ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
+}
+#else
+void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool, u8 port_map) {}
+void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
+ u8 port_map) {}
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
+int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool) {return 0; }
+struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
+ enum mvneta_bm_type type, u8 port_id,
+ int pkt_size) { return NULL; }
+
+static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool,
+ dma_addr_t buf_phys_addr) {}
+
+static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
+ struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+#endif /* CONFIG_MVNETA_BM */
+#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index c797971aefab..868a957f24bb 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -321,7 +321,6 @@
/* Lbtd 802.3 type */
#define MVPP2_IP_LBDT_TYPE 0xfffa
-#define MVPP2_CPU_D_CACHE_LINE_SIZE 32
#define MVPP2_TX_CSUM_MAX_SIZE 9800
/* Timeout constants */
@@ -377,7 +376,7 @@
#define MVPP2_RX_PKT_SIZE(mtu) \
ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
- ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+ ETH_HLEN + ETH_FCS_LEN, cache_line_size())
#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
@@ -4493,10 +4492,6 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
if (!aggr_txq->descs)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(aggr_txq->descs !=
- PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
aggr_txq->last_desc = aggr_txq->size - 1;
/* Aggr TXQ no reset WA */
@@ -4526,9 +4521,6 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
if (!rxq->descs)
return -ENOMEM;
- BUG_ON(rxq->descs !=
- PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
rxq->last_desc = rxq->size - 1;
/* Zero occupied and non-occupied counters - direct access */
@@ -4616,10 +4608,6 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
if (!txq->descs)
return -ENOMEM;
- /* Make sure descriptor address is cache line size aligned */
- BUG_ON(txq->descs !=
- PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
-
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
@@ -6059,8 +6047,10 @@ static int mvpp2_port_init(struct mvpp2_port *port)
/* Map physical Rx queue to port's logical Rx queue */
rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
- if (!rxq)
+ if (!rxq) {
+ err = -ENOMEM;
goto err_free_percpu;
+ }
/* Map this Rx queue to a physical queue */
rxq->id = port->first_rxq + queue;
rxq->port = port->id;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
new file mode 100644
index 000000000000..698bb89aa901
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -0,0 +1,17 @@
+config NET_VENDOR_MEDIATEK
+ bool "MediaTek ethernet driver"
+ depends on ARCH_MEDIATEK
+ ---help---
+ If you have a Mediatek SoC with ethernet, say Y.
+
+if NET_VENDOR_MEDIATEK
+
+config NET_MEDIATEK_SOC
+ tristate "MediaTek MT7623 Gigabit ethernet support"
+ depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701)
+ select PHYLIB
+ ---help---
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek MT2701/MT7623 chipset family.
+
+endif #NET_VENDOR_MEDIATEK
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
new file mode 100644
index 000000000000..aa3f1c8ccd4a
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
new file mode 100644
index 000000000000..e0b68afea56e
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -0,0 +1,1808 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+
+#include "mtk_eth_soc.h"
+
+static int mtk_msg_level = -1;
+module_param_named(msg_level, mtk_msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+#define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
+/* strings used by ethtool */
+static const struct mtk_ethtool_stats {
+ char str[ETH_GSTRING_LEN];
+ u32 offset;
+} mtk_ethtool_stats[] = {
+ MTK_ETHTOOL_STAT(tx_bytes),
+ MTK_ETHTOOL_STAT(tx_packets),
+ MTK_ETHTOOL_STAT(tx_skip),
+ MTK_ETHTOOL_STAT(tx_collisions),
+ MTK_ETHTOOL_STAT(rx_bytes),
+ MTK_ETHTOOL_STAT(rx_packets),
+ MTK_ETHTOOL_STAT(rx_overflow),
+ MTK_ETHTOOL_STAT(rx_fcs_errors),
+ MTK_ETHTOOL_STAT(rx_short_errors),
+ MTK_ETHTOOL_STAT(rx_long_errors),
+ MTK_ETHTOOL_STAT(rx_checksum_errors),
+ MTK_ETHTOOL_STAT(rx_flow_control_packets),
+};
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ __raw_writel(val, eth->base + reg);
+}
+
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return __raw_readl(eth->base + reg);
+}
+
+static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
+ return 0;
+ if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+ break;
+ usleep_range(10, 20);
+ }
+
+ dev_err(eth->dev, "mdio: MDIO timeout\n");
+ return -1;
+}
+
+u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data)
+{
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ write_data &= 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
+ (phy_register << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return -1;
+
+ return 0;
+}
+
+u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+{
+ u32 d;
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
+ (phy_reg << PHY_IAC_REG_SHIFT) |
+ (phy_addr << PHY_IAC_ADDR_SHIFT),
+ MTK_PHY_IAC);
+
+ if (mtk_mdio_busy_wait(eth))
+ return 0xffff;
+
+ d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+
+ return d;
+}
+
+static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+}
+
+static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read(eth, phy_addr, phy_reg);
+}
+
+static void mtk_phy_link_adjust(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
+ MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
+ MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
+ MAC_MCR_BACKPR_EN;
+
+ switch (mac->phy_dev->speed) {
+ case SPEED_1000:
+ mcr |= MAC_MCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= MAC_MCR_SPEED_100;
+ break;
+ };
+
+ if (mac->phy_dev->link)
+ mcr |= MAC_MCR_FORCE_LINK;
+
+ if (mac->phy_dev->duplex)
+ mcr |= MAC_MCR_FORCE_DPX;
+
+ if (mac->phy_dev->pause)
+ mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+
+ if (mac->phy_dev->link)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+}
+
+static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
+ struct device_node *phy_node)
+{
+ const __be32 *_addr = NULL;
+ struct phy_device *phydev;
+ int phy_mode, addr;
+
+ _addr = of_get_property(phy_node, "reg", NULL);
+
+ if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
+ pr_err("%s: invalid phy address\n", phy_node->name);
+ return -EINVAL;
+ }
+ addr = be32_to_cpu(*_addr);
+ phy_mode = of_get_phy_mode(phy_node);
+ if (phy_mode < 0) {
+ dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
+ return -EINVAL;
+ }
+
+ phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
+ mtk_phy_link_adjust, 0, phy_mode);
+ if (!phydev) {
+ dev_err(eth->dev, "could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ dev_info(eth->dev,
+ "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
+ mac->id, phydev_name(phydev), phydev->phy_id,
+ phydev->drv->name);
+
+ mac->phy_dev = phydev;
+
+ return 0;
+}
+
+static int mtk_phy_connect(struct mtk_mac *mac)
+{
+ struct mtk_eth *eth = mac->hw;
+ struct device_node *np;
+ u32 val, ge_mode;
+
+ np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np)
+ return -ENODEV;
+
+ switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII:
+ ge_mode = 0;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ ge_mode = 1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ ge_mode = 2;
+ break;
+ default:
+ dev_err(eth->dev, "invalid phy_mode\n");
+ return -1;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mtk_phy_connect_node(eth, mac, np);
+ mac->phy_dev->autoneg = AUTONEG_ENABLE;
+ mac->phy_dev->speed = 0;
+ mac->phy_dev->duplex = 0;
+ mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->advertising = mac->phy_dev->supported |
+ ADVERTISED_Autoneg;
+ phy_start_aneg(mac->phy_dev);
+
+ return 0;
+}
+
+static int mtk_mdio_init(struct mtk_eth *eth)
+{
+ struct device_node *mii_np;
+ int err;
+
+ mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ if (!mii_np) {
+ dev_err(eth->dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(mii_np)) {
+ err = 0;
+ goto err_put_node;
+ }
+
+ eth->mii_bus = mdiobus_alloc();
+ if (!eth->mii_bus) {
+ err = -ENOMEM;
+ goto err_put_node;
+ }
+
+ eth->mii_bus->name = "mdio";
+ eth->mii_bus->read = mtk_mdio_read;
+ eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->priv = eth;
+ eth->mii_bus->parent = eth->dev;
+
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
+ err = of_mdiobus_register(eth->mii_bus, mii_np);
+ if (err)
+ goto err_free_bus;
+
+ return 0;
+
+err_free_bus:
+ kfree(eth->mii_bus);
+
+err_put_node:
+ of_node_put(mii_np);
+ eth->mii_bus = NULL;
+ return err;
+}
+
+static void mtk_mdio_cleanup(struct mtk_eth *eth)
+{
+ if (!eth->mii_bus)
+ return;
+
+ mdiobus_unregister(eth->mii_bus);
+ of_node_put(eth->mii_bus->dev.of_node);
+ kfree(eth->mii_bus);
+}
+
+static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ u32 val;
+
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ /* flush write */
+ mtk_r32(eth, MTK_QDMA_INT_MASK);
+}
+
+static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ u32 val;
+
+ val = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ /* flush write */
+ mtk_r32(eth, MTK_QDMA_INT_MASK);
+}
+
+static int mtk_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret = eth_mac_addr(dev, p);
+ struct mtk_mac *mac = netdev_priv(dev);
+ const char *macaddr = dev->dev_addr;
+ unsigned long flags;
+
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&mac->hw->page_lock, flags);
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ spin_unlock_irqrestore(&mac->hw->page_lock, flags);
+
+ return 0;
+}
+
+void mtk_stats_update_mac(struct mtk_mac *mac)
+{
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int base = MTK_GDM1_TX_GBCNT;
+ u64 stats;
+
+ base += hw_stats->reg_offset;
+
+ u64_stats_update_begin(&hw_stats->syncp);
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw, base);
+ stats = mtk_r32(mac->hw, base + 0x04);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
+ hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
+ hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
+ hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
+ hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
+ hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, base + 0x24);
+ hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
+ hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
+ stats = mtk_r32(mac->hw, base + 0x34);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ u64_stats_update_end(&hw_stats->syncp);
+}
+
+static void mtk_stats_update(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] || !eth->mac[i]->hw_stats)
+ continue;
+ if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
+ mtk_stats_update_mac(eth->mac[i]);
+ spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
+ }
+ }
+}
+
+static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int start;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock(&hw_stats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock(&hw_stats->stats_lock);
+ }
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+ storage->rx_packets = hw_stats->rx_packets;
+ storage->tx_packets = hw_stats->tx_packets;
+ storage->rx_bytes = hw_stats->rx_bytes;
+ storage->tx_bytes = hw_stats->tx_bytes;
+ storage->collisions = hw_stats->tx_collisions;
+ storage->rx_length_errors = hw_stats->rx_short_errors +
+ hw_stats->rx_long_errors;
+ storage->rx_over_errors = hw_stats->rx_overflow;
+ storage->rx_crc_errors = hw_stats->rx_fcs_errors;
+ storage->rx_errors = hw_stats->rx_checksum_errors;
+ storage->tx_aborted_errors = hw_stats->tx_skip;
+ } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+
+ storage->tx_errors = dev->stats.tx_errors;
+ storage->rx_dropped = dev->stats.rx_dropped;
+ storage->tx_dropped = dev->stats.tx_dropped;
+
+ return storage;
+}
+
+static inline int mtk_max_frag_size(int mtu)
+{
+ /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
+ mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
+ return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static inline int mtk_max_buf_size(int frag_size)
+{
+ int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+
+ return buf_size;
+}
+
+static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
+ struct mtk_rx_dma *dma_rxd)
+{
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+}
+
+/* the qdma core needs scratch memory to be setup */
+static int mtk_init_fq_dma(struct mtk_eth *eth)
+{
+ dma_addr_t phy_ring_head, phy_ring_tail;
+ int cnt = MTK_DMA_SIZE;
+ dma_addr_t dma_addr;
+ int i;
+
+ eth->scratch_ring = dma_alloc_coherent(eth->dev,
+ cnt * sizeof(struct mtk_tx_dma),
+ &phy_ring_head,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
+ GFP_KERNEL);
+ dma_addr = dma_map_single(eth->dev,
+ eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+
+ memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
+ phy_ring_tail = phy_ring_head +
+ (sizeof(struct mtk_tx_dma) * (cnt - 1));
+
+ for (i = 0; i < cnt; i++) {
+ eth->scratch_ring[i].txd1 =
+ (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ if (i < cnt - 1)
+ eth->scratch_ring[i].txd2 = (phy_ring_head +
+ ((i + 1) * sizeof(struct mtk_tx_dma)));
+ eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+ }
+
+ mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
+ mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+
+ return 0;
+}
+
+static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+{
+ void *ret = ring->dma;
+
+ return ret + (desc - ring->phys);
+}
+
+static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *txd)
+{
+ int idx = txd - ring->dma;
+
+ return &ring->buf[idx];
+}
+
+static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+{
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ tx_buf->flags = 0;
+ if (tx_buf->skb &&
+ (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+}
+
+static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ int tx_num, struct mtk_tx_ring *ring, bool gso)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_buf *tx_buf;
+ unsigned long flags;
+ dma_addr_t mapped_addr;
+ unsigned int nr_frags;
+ int i, n_desc = 1;
+ u32 txd4 = 0;
+
+ itxd = ring->next_free;
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+ /* set the forward port */
+ txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+ if (gso)
+ txd4 |= TX_DMA_TSO;
+
+ /* TX Checksum offload */
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ txd4 |= TX_DMA_CHKSUM;
+
+ /* VLAN header offload */
+ if (skb_vlan_tag_present(skb))
+ txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
+
+ mapped_addr = dma_map_single(&dev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ return -ENOMEM;
+
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running ont he same ring so we need to lock
+ * the ring access
+ */
+ spin_lock_irqsave(&eth->page_lock, flags);
+ WRITE_ONCE(itxd->txd1, mapped_addr);
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
+
+ /* TX SG offload */
+ txd = itxd;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ unsigned int offset = 0;
+ int frag_size = skb_frag_size(frag);
+
+ while (frag_size) {
+ bool last_frag = false;
+ unsigned int frag_map_size;
+
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
+ mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ frag_map_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ goto err_dma;
+
+ if (i == nr_frags - 1 &&
+ (frag_size - frag_map_size) == 0)
+ last_frag = true;
+
+ WRITE_ONCE(txd->txd1, mapped_addr);
+ WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
+ TX_DMA_PLEN0(frag_map_size) |
+ last_frag * TX_DMA_LS0) |
+ mac->id);
+ WRITE_ONCE(txd->txd4, 0);
+
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
+ frag_size -= frag_map_size;
+ offset += frag_map_size;
+ }
+ }
+
+ /* store skb to cleanup */
+ tx_buf->skb = skb;
+
+ WRITE_ONCE(itxd->txd4, txd4);
+ WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
+ (!nr_frags * TX_DMA_LS0)));
+
+ spin_unlock_irqrestore(&eth->page_lock, flags);
+
+ netdev_sent_queue(dev, skb->len);
+ skb_tx_timestamp(skb);
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+
+ return 0;
+
+err_dma:
+ do {
+ tx_buf = mtk_desc_to_tx_buf(ring, txd);
+
+ /* unmap dma */
+ mtk_tx_unmap(&dev->dev, tx_buf);
+
+ itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ } while (itxd != txd);
+
+ spin_unlock_irqrestore(&eth->page_lock, flags);
+
+ return -ENOMEM;
+}
+
+static inline int mtk_cal_txd_req(struct sk_buff *skb)
+{
+ int i, nfrags;
+ struct skb_frag_struct *frag;
+
+ nfrags = 1;
+ if (skb_is_gso(skb)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+ }
+
+ return DIV_ROUND_UP(nfrags, 2);
+}
+
+static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct net_device_stats *stats = &dev->stats;
+ bool gso = false;
+ int tx_num;
+
+ tx_num = mtk_cal_txd_req(skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+ netif_stop_queue(dev);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0)) {
+ netif_warn(eth, tx_err, dev,
+ "GSO expand head fail.\n");
+ goto drop;
+ }
+
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ gso = true;
+ tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
+ }
+ }
+
+ if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+ netif_stop_queue(dev);
+ if (unlikely(atomic_read(&ring->free_count) >
+ ring->thresh))
+ netif_wake_queue(dev);
+ }
+
+ return NETDEV_TX_OK;
+
+drop:
+ stats->tx_dropped++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth, u32 rx_intr)
+{
+ struct mtk_rx_ring *ring = &eth->rx_ring;
+ int idx = ring->calc_idx;
+ struct sk_buff *skb;
+ u8 *data, *new_data;
+ struct mtk_rx_dma *rxd, trxd;
+ int done = 0;
+
+ while (done < budget) {
+ struct net_device *netdev;
+ unsigned int pktlen;
+ dma_addr_t dma_addr;
+ int mac = 0;
+
+ idx = NEXT_RX_DESP_IDX(idx);
+ rxd = &ring->dma[idx];
+ data = ring->data[idx];
+
+ mtk_rx_get_desc(&trxd, rxd);
+ if (!(trxd.rxd2 & RX_DMA_DONE))
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+ mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK;
+ mac--;
+
+ netdev = eth->netdev[mac];
+
+ /* alloc new buffer */
+ new_data = napi_alloc_frag(ring->frag_size);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+ dma_addr = dma_map_single(&eth->netdev[mac]->dev,
+ new_data + NET_SKB_PAD,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ skb_free_frag(new_data);
+ goto release_desc;
+ }
+
+ /* receive data */
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ put_page(virt_to_head_page(new_data));
+ goto release_desc;
+ }
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+ dma_unmap_single(&netdev->dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+ skb->dev = netdev;
+ skb_put(skb, pktlen);
+ if (trxd.rxd4 & RX_DMA_L4_VALID)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+ RX_DMA_VID(trxd.rxd3))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ RX_DMA_VID(trxd.rxd3));
+ napi_gro_receive(napi, skb);
+
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+
+release_desc:
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+
+ ring->calc_idx = idx;
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0);
+ done++;
+ }
+
+ if (done < budget)
+ mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
+
+ return done;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ int total = 0, done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ u32 cpu, dma;
+ static int condition;
+ int i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
+ dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+
+ desc = mtk_qdma_phys_to_virt(ring, cpu);
+
+ while ((cpu != dma) && budget) {
+ u32 next_cpu = desc->txd2;
+ int mac;
+
+ desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
+ if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
+ break;
+
+ mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
+ TX_DMA_FPORT_MASK;
+ mac--;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ skb = tx_buf->skb;
+ if (!skb) {
+ condition = 1;
+ break;
+ }
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[mac] += skb->len;
+ done[mac]++;
+ budget--;
+ }
+ mtk_tx_unmap(eth->dev, tx_buf);
+
+ ring->last_free->txd2 = next_cpu;
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = next_cpu;
+ }
+
+ mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i] || !done[i])
+ continue;
+ netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
+ total += done[i];
+ }
+
+ /* read hw index again make sure no new tx packet */
+ if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
+ *tx_again = true;
+ else
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+
+ if (!total)
+ return 0;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i] ||
+ unlikely(!netif_queue_stopped(eth->netdev[i])))
+ continue;
+ if (atomic_read(&ring->free_count) > ring->thresh)
+ netif_wake_queue(eth->netdev[i]);
+ }
+
+ return total;
+}
+
+static int mtk_poll(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, status2, mask, tx_intr, rx_intr, status_intr;
+ int tx_done, rx_done;
+ bool tx_again = false;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ status2 = mtk_r32(eth, MTK_INT_STATUS2);
+ tx_intr = MTK_TX_DONE_INT;
+ rx_intr = MTK_RX_DONE_INT;
+ status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
+ tx_done = 0;
+ rx_done = 0;
+ tx_again = 0;
+
+ if (status & tx_intr)
+ tx_done = mtk_poll_tx(eth, budget, &tx_again);
+
+ if (status & rx_intr)
+ rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
+
+ if (unlikely(status2 & status_intr)) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, status_intr, MTK_INT_STATUS2);
+ }
+
+ if (unlikely(netif_msg_intr(eth))) {
+ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ netdev_info(eth->netdev[0],
+ "done tx %d, rx %d, intr 0x%08x/0x%x\n",
+ tx_done, rx_done, status, mask);
+ }
+
+ if (tx_again || rx_done == budget)
+ return budget;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ if (status & (tx_intr | rx_intr))
+ return budget;
+
+ napi_complete(napi);
+ mtk_irq_enable(eth, tx_intr | rx_intr);
+
+ return rx_done;
+}
+
+static int mtk_tx_alloc(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ int i, sz = sizeof(*ring->dma);
+
+ ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+ GFP_KERNEL);
+ if (!ring->buf)
+ goto no_tx_mem;
+
+ ring->dma = dma_alloc_coherent(eth->dev,
+ MTK_DMA_SIZE * sz,
+ &ring->phys,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!ring->dma)
+ goto no_tx_mem;
+
+ memset(ring->dma, 0, MTK_DMA_SIZE * sz);
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ int next = (i + 1) % MTK_DMA_SIZE;
+ u32 next_ptr = ring->phys + next * sz;
+
+ ring->dma[i].txd2 = next_ptr;
+ ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ }
+
+ atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
+ ring->next_free = &ring->dma[0];
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
+ ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
+ MAX_SKB_FRAGS);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+
+ return 0;
+
+no_tx_mem:
+ return -ENOMEM;
+}
+
+static void mtk_tx_clean(struct mtk_eth *eth)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ int i;
+
+ if (ring->buf) {
+ for (i = 0; i < MTK_DMA_SIZE; i++)
+ mtk_tx_unmap(eth->dev, &ring->buf[i]);
+ kfree(ring->buf);
+ ring->buf = NULL;
+ }
+
+ if (ring->dma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+}
+
+static int mtk_rx_alloc(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring = &eth->rx_ring;
+ int i;
+
+ ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
+ ring->buf_size = mtk_max_buf_size(ring->frag_size);
+ ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
+ GFP_KERNEL);
+ if (!ring->data)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->data[i] = netdev_alloc_frag(ring->frag_size);
+ if (!ring->data[i])
+ return -ENOMEM;
+ }
+
+ ring->dma = dma_alloc_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ &ring->phys,
+ GFP_ATOMIC | __GFP_ZERO);
+ if (!ring->dma)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ dma_addr_t dma_addr = dma_map_single(eth->dev,
+ ring->data[i] + NET_SKB_PAD,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ return -ENOMEM;
+ ring->dma[i].rxd1 = (unsigned int)dma_addr;
+
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ }
+ ring->calc_idx = MTK_DMA_SIZE - 1;
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0);
+ mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0);
+ mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+
+ return 0;
+}
+
+static void mtk_rx_clean(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring = &eth->rx_ring;
+ int i;
+
+ if (ring->data && ring->dma) {
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ if (!ring->data[i])
+ continue;
+ if (!ring->dma[i].rxd1)
+ continue;
+ dma_unmap_single(eth->dev,
+ ring->dma[i].rxd1,
+ ring->buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(ring->data[i]);
+ }
+ kfree(ring->data);
+ ring->data = NULL;
+ }
+
+ if (ring->dma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma,
+ ring->phys);
+ ring->dma = NULL;
+ }
+}
+
+/* wait for DMA to finish whatever it is doing before we start using it again */
+static int mtk_dma_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
+ break;
+ }
+
+ dev_err(eth->dev, "DMA init timeout\n");
+ return -1;
+}
+
+static int mtk_dma_init(struct mtk_eth *eth)
+{
+ int err;
+
+ if (mtk_dma_busy_wait(eth))
+ return -EBUSY;
+
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+
+ err = mtk_tx_alloc(eth);
+ if (err)
+ return err;
+
+ err = mtk_rx_alloc(eth);
+ if (err)
+ return err;
+
+ /* Enable random early drop and set drop threshold automatically */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
+ MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+
+ return 0;
+}
+
+static void mtk_dma_free(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++)
+ if (eth->netdev[i])
+ netdev_reset_queue(eth->netdev[i]);
+ mtk_tx_clean(eth);
+ mtk_rx_clean(eth);
+ kfree(eth->scratch_head);
+}
+
+static void mtk_tx_timeout(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ eth->netdev[mac->id]->stats.tx_errors++;
+ netif_err(eth, tx_err, dev,
+ "transmit timed out\n");
+ schedule_work(&mac->pending_work);
+}
+
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+ u32 status;
+
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
+ if (likely(napi_schedule_prep(&eth->rx_napi)))
+ __napi_schedule(&eth->rx_napi);
+ } else {
+ mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
+ }
+ mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mtk_poll_controller(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
+
+ mtk_irq_disable(eth, int_mask);
+ mtk_handle_irq(dev->irq, dev);
+ mtk_irq_enable(eth, int_mask);
+}
+#endif
+
+static int mtk_start_dma(struct mtk_eth *eth)
+{
+ int err;
+
+ err = mtk_dma_init(eth);
+ if (err) {
+ mtk_dma_free(eth);
+ return err;
+ }
+
+ mtk_w32(eth,
+ MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
+ MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
+
+ return 0;
+}
+
+static int mtk_open(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!atomic_read(&eth->dma_refcnt)) {
+ int err = mtk_start_dma(eth);
+
+ if (err)
+ return err;
+
+ napi_enable(&eth->rx_napi);
+ mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ }
+ atomic_inc(&eth->dma_refcnt);
+
+ phy_start(mac->phy_dev);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
+{
+ unsigned long flags;
+ u32 val;
+ int i;
+
+ /* stop the dma engine */
+ spin_lock_irqsave(&eth->page_lock, flags);
+ val = mtk_r32(eth, glo_cfg);
+ mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
+ glo_cfg);
+ spin_unlock_irqrestore(&eth->page_lock, flags);
+
+ /* wait for dma stop */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, glo_cfg);
+ if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+}
+
+static int mtk_stop(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ netif_tx_disable(dev);
+ phy_stop(mac->phy_dev);
+
+ /* only shutdown DMA if this is the last user */
+ if (!atomic_dec_and_test(&eth->dma_refcnt))
+ return 0;
+
+ mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ napi_disable(&eth->rx_napi);
+
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+
+ mtk_dma_free(eth);
+
+ return 0;
+}
+
+static int __init mtk_hw_init(struct mtk_eth *eth)
+{
+ int err, i;
+
+ /* reset the frame engine */
+ reset_control_assert(eth->rstc);
+ usleep_range(10, 20);
+ reset_control_deassert(eth->rstc);
+ usleep_range(10, 20);
+
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+
+ /* GE1, Force 1000M/FD, FC ON */
+ mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
+
+ /* GE2, Force 1000M/FD, FC ON */
+ mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+
+ /* Enable RX VLan Offloading */
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+
+ err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ return err;
+
+ err = mtk_mdio_init(eth);
+ if (err)
+ return err;
+
+ /* disable delay and normal interrupt */
+ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
+ mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+
+ /* FE int grouping */
+ mtk_w32(eth, 0, MTK_FE_INT_GRP);
+
+ for (i = 0; i < 2; i++) {
+ u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* setup the forward port to send frame to QDMA */
+ val &= ~0xffff;
+ val |= 0x5555;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ /* setup the mac dma */
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+
+ return 0;
+}
+
+static int __init mtk_init(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *mac_addr;
+
+ mac_addr = of_get_mac_address(mac->of_node);
+ if (mac_addr)
+ ether_addr_copy(dev->dev_addr, mac_addr);
+
+ /* If the mac address is invalid, use random mac address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ dev_err(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ }
+
+ return mtk_phy_connect(mac);
+}
+
+static void mtk_uninit(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phy_disconnect(mac->phy_dev);
+ mtk_mdio_cleanup(eth);
+ mtk_irq_disable(eth, ~0);
+ free_irq(dev->irq, dev);
+}
+
+static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mtk_pending_work(struct work_struct *work)
+{
+ struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
+ struct mtk_eth *eth = mac->hw;
+ struct net_device *dev = eth->netdev[mac->id];
+ int err;
+
+ rtnl_lock();
+ mtk_stop(dev);
+
+ err = mtk_open(dev);
+ if (err) {
+ netif_alert(eth, ifup, dev,
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(dev);
+ }
+ rtnl_unlock();
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
+
+ if (!eth->netdev[i])
+ continue;
+
+ unregister_netdev(eth->netdev[i]);
+ free_netdev(eth->netdev[i]);
+ cancel_work_sync(&mac->pending_work);
+ }
+
+ return 0;
+}
+
+static int mtk_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int err;
+
+ err = phy_read_status(mac->phy_dev);
+ if (err)
+ return -ENODEV;
+
+ return phy_ethtool_gset(mac->phy_dev, cmd);
+}
+
+static int mtk_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (cmd->phy_address != mac->phy_dev->mdio.addr) {
+ mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
+ cmd->phy_address);
+ if (!mac->phy_dev)
+ return -ENODEV;
+ }
+
+ return phy_ethtool_sset(mac->phy_dev, cmd);
+}
+
+static void mtk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
+}
+
+static u32 mtk_get_msglevel(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return mac->hw->msg_enable;
+}
+
+static void mtk_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mac->hw->msg_enable = value;
+}
+
+static int mtk_nway_reset(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return genphy_restart_aneg(mac->phy_dev);
+}
+
+static u32 mtk_get_link(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int err;
+
+ err = genphy_update_link(mac->phy_dev);
+ if (err)
+ return ethtool_op_get_link(dev);
+
+ return mac->phy_dev->link;
+}
+
+static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+ memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int mtk_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(mtk_ethtool_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mtk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hwstats = mac->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
+ int i;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock(&hwstats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock(&hwstats->stats_lock);
+ }
+ }
+
+ do {
+ data_src = (u64*)hwstats;
+ data_dst = data;
+ start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+}
+
+static struct ethtool_ops mtk_ethtool_ops = {
+ .get_settings = mtk_get_settings,
+ .set_settings = mtk_set_settings,
+ .get_drvinfo = mtk_get_drvinfo,
+ .get_msglevel = mtk_get_msglevel,
+ .set_msglevel = mtk_set_msglevel,
+ .nway_reset = mtk_nway_reset,
+ .get_link = mtk_get_link,
+ .get_strings = mtk_get_strings,
+ .get_sset_count = mtk_get_sset_count,
+ .get_ethtool_stats = mtk_get_ethtool_stats,
+};
+
+static const struct net_device_ops mtk_netdev_ops = {
+ .ndo_init = mtk_init,
+ .ndo_uninit = mtk_uninit,
+ .ndo_open = mtk_open,
+ .ndo_stop = mtk_stop,
+ .ndo_start_xmit = mtk_start_xmit,
+ .ndo_set_mac_address = mtk_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_tx_timeout = mtk_tx_timeout,
+ .ndo_get_stats64 = mtk_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mtk_poll_controller,
+#endif
+};
+
+static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+{
+ struct mtk_mac *mac;
+ const __be32 *_id = of_get_property(np, "reg", NULL);
+ int id, err;
+
+ if (!_id) {
+ dev_err(eth->dev, "missing mac id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(_id);
+ if (id >= MTK_MAC_COUNT) {
+ dev_err(eth->dev, "%d is not a valid mac id\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->netdev[id]) {
+ dev_err(eth->dev, "duplicate mac id found: %d\n", id);
+ return -EINVAL;
+ }
+
+ eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+ if (!eth->netdev[id]) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+ mac = netdev_priv(eth->netdev[id]);
+ eth->mac[id] = mac;
+ mac->id = id;
+ mac->hw = eth;
+ mac->of_node = np;
+ INIT_WORK(&mac->pending_work, mtk_pending_work);
+
+ mac->hw_stats = devm_kzalloc(eth->dev,
+ sizeof(*mac->hw_stats),
+ GFP_KERNEL);
+ if (!mac->hw_stats) {
+ dev_err(eth->dev, "failed to allocate counter memory\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+ spin_lock_init(&mac->hw_stats->stats_lock);
+ mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+
+ SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
+ eth->netdev[id]->base_addr = (unsigned long)eth->base;
+ eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
+ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ eth->netdev[id]->features |= MTK_HW_FEATURES;
+ eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+
+ err = register_netdev(eth->netdev[id]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto free_netdev;
+ }
+ eth->netdev[id]->irq = eth->irq;
+ netif_info(eth, probe, eth->netdev[id],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[id]->base_addr, eth->netdev[id]->irq);
+
+ return 0;
+
+free_netdev:
+ free_netdev(eth->netdev[id]);
+ return err;
+}
+
+static int mtk_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct device_node *mac_np;
+ const struct of_device_id *match;
+ struct mtk_soc_data *soc;
+ struct mtk_eth *eth;
+ int err;
+
+ err = device_reset(&pdev->dev);
+ if (err)
+ return err;
+
+ match = of_match_device(of_mtk_match, &pdev->dev);
+ soc = (struct mtk_soc_data *)match->data;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
+
+ spin_lock_init(&eth->page_lock);
+
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
+
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ return PTR_ERR(eth->pctl);
+ }
+
+ eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
+ if (IS_ERR(eth->rstc)) {
+ dev_err(&pdev->dev, "no eth reset found\n");
+ return PTR_ERR(eth->rstc);
+ }
+
+ eth->irq = platform_get_irq(pdev, 0);
+ if (eth->irq < 0) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
+ eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
+ eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
+ eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
+ if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
+ IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
+ return -ENODEV;
+
+ clk_prepare_enable(eth->clk_ethif);
+ clk_prepare_enable(eth->clk_esw);
+ clk_prepare_enable(eth->clk_gp1);
+ clk_prepare_enable(eth->clk_gp2);
+
+ eth->dev = &pdev->dev;
+ eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+
+ err = mtk_hw_init(eth);
+ if (err)
+ return err;
+
+ for_each_child_of_node(pdev->dev.of_node, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(mac_np))
+ continue;
+
+ err = mtk_add_mac(eth, mac_np);
+ if (err)
+ goto err_free_dev;
+ }
+
+ /* we run 2 devices on the same DMA ring so we need a dummy device
+ * for NAPI to work
+ */
+ init_dummy_netdev(&eth->dummy_dev);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
+ MTK_NAPI_WEIGHT);
+
+ platform_set_drvdata(pdev, eth);
+
+ return 0;
+
+err_free_dev:
+ mtk_cleanup(eth);
+ return err;
+}
+
+static int mtk_remove(struct platform_device *pdev)
+{
+ struct mtk_eth *eth = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(eth->clk_ethif);
+ clk_disable_unprepare(eth->clk_esw);
+ clk_disable_unprepare(eth->clk_gp1);
+ clk_disable_unprepare(eth->clk_gp2);
+
+ netif_napi_del(&eth->rx_napi);
+ mtk_cleanup(eth);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+const struct of_device_id of_mtk_match[] = {
+ { .compatible = "mediatek,mt7623-eth" },
+ {},
+};
+
+static struct platform_driver mtk_driver = {
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+ .driver = {
+ .name = "mtk_soc_eth",
+ .owner = THIS_MODULE,
+ .of_match_table = of_mtk_match,
+ },
+};
+
+module_platform_driver(mtk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
new file mode 100644
index 000000000000..48a5292c8ed8
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -0,0 +1,421 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_H
+#define MTK_ETH_H
+
+#define MTK_QDMA_PAGE_SIZE 2048
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_DMA_SIZE 256
+#define MTK_NAPI_WEIGHT 64
+#define MTK_MAC_COUNT 2
+#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+#define MTK_DMA_DUMMY_DESC 0xffffffff
+#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_SG | NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_IPV6_CSUM)
+#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
+
+/* Frame Engine Global Reset Register */
+#define MTK_RST_GL 0x04
+#define RST_GL_PSE BIT(0)
+
+/* Frame Engine Interrupt Status Register */
+#define MTK_INT_STATUS2 0x08
+#define MTK_GDM1_AF BIT(28)
+#define MTK_GDM2_AF BIT(29)
+
+/* Frame Engine Interrupt Grouping Register */
+#define MTK_FE_INT_GRP 0x20
+
+/* CDMP Exgress Control Register */
+#define MTK_CDMP_EG_CTRL 0x404
+
+/* GDM Exgress Control Register */
+#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000))
+#define MTK_GDMA_ICS_EN BIT(22)
+#define MTK_GDMA_TCS_EN BIT(21)
+#define MTK_GDMA_UCS_EN BIT(20)
+
+/* Unicast Filter MAC Address Register - Low */
+#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
+
+/* Unicast Filter MAC Address Register - High */
+#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+
+/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
+#define QDMA_RES_THRES 4
+
+/* QDMA TX Queue Scheduler Registers */
+#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
+
+/* QDMA RX Base Pointer Register */
+#define MTK_QRX_BASE_PTR0 0x1900
+
+/* QDMA RX Maximum Count Register */
+#define MTK_QRX_MAX_CNT0 0x1904
+
+/* QDMA RX CPU Pointer Register */
+#define MTK_QRX_CRX_IDX0 0x1908
+
+/* QDMA RX DMA Pointer Register */
+#define MTK_QRX_DRX_IDX0 0x190C
+
+/* QDMA Global Configuration Register */
+#define MTK_QDMA_GLO_CFG 0x1A04
+#define MTK_RX_2B_OFFSET BIT(31)
+#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_TX_WB_DDONE BIT(6)
+#define MTK_DMA_SIZE_16DWORDS (2 << 4)
+#define MTK_RX_DMA_BUSY BIT(3)
+#define MTK_TX_DMA_BUSY BIT(1)
+#define MTK_RX_DMA_EN BIT(2)
+#define MTK_TX_DMA_EN BIT(0)
+#define MTK_DMA_BUSY_TIMEOUT HZ
+
+/* QDMA Reset Index Register */
+#define MTK_QDMA_RST_IDX 0x1A08
+#define MTK_PST_DRX_IDX0 BIT(16)
+
+/* QDMA Delay Interrupt Register */
+#define MTK_QDMA_DELAY_INT 0x1A0C
+
+/* QDMA Flow Control Register */
+#define MTK_QDMA_FC_THRES 0x1A10
+#define FC_THRES_DROP_MODE BIT(20)
+#define FC_THRES_DROP_EN (7 << 16)
+#define FC_THRES_MIN 0x4444
+
+/* QDMA Interrupt Status Register */
+#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_RX_DONE_INT1 BIT(17)
+#define MTK_RX_DONE_INT0 BIT(16)
+#define MTK_TX_DONE_INT3 BIT(3)
+#define MTK_TX_DONE_INT2 BIT(2)
+#define MTK_TX_DONE_INT1 BIT(1)
+#define MTK_TX_DONE_INT0 BIT(0)
+#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1)
+#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
+ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+
+/* QDMA Interrupt Status Register */
+#define MTK_QDMA_INT_MASK 0x1A1C
+
+/* QDMA Interrupt Mask Register */
+#define MTK_QDMA_HRED2 0x1A44
+
+/* QDMA TX Forward CPU Pointer Register */
+#define MTK_QTX_CTX_PTR 0x1B00
+
+/* QDMA TX Forward DMA Pointer Register */
+#define MTK_QTX_DTX_PTR 0x1B04
+
+/* QDMA TX Release CPU Pointer Register */
+#define MTK_QTX_CRX_PTR 0x1B10
+
+/* QDMA TX Release DMA Pointer Register */
+#define MTK_QTX_DRX_PTR 0x1B14
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_HEAD 0x1B20
+
+/* QDMA FQ Head Pointer Register */
+#define MTK_QDMA_FQ_TAIL 0x1B24
+
+/* QDMA FQ Free Page Counter Register */
+#define MTK_QDMA_FQ_CNT 0x1B28
+
+/* QDMA FQ Free Page Buffer Length Register */
+#define MTK_QDMA_FQ_BLEN 0x1B2C
+
+/* GMA1 Received Good Byte Count Register */
+#define MTK_GDM1_TX_GBCNT 0x2400
+#define MTK_STAT_OFFSET 0x40
+
+/* QDMA descriptor txd4 */
+#define TX_DMA_CHKSUM (0x7 << 29)
+#define TX_DMA_TSO BIT(28)
+#define TX_DMA_FPORT_SHIFT 25
+#define TX_DMA_FPORT_MASK 0x7
+#define TX_DMA_INS_VLAN BIT(16)
+
+/* QDMA descriptor txd3 */
+#define TX_DMA_OWNER_CPU BIT(31)
+#define TX_DMA_LS0 BIT(30)
+#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
+#define TX_DMA_SWC BIT(14)
+#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+
+/* QDMA descriptor rxd2 */
+#define RX_DMA_DONE BIT(31)
+#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
+#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+
+/* QDMA descriptor rxd3 */
+#define RX_DMA_VID(_x) ((_x) & 0xfff)
+
+/* QDMA descriptor rxd4 */
+#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_FPORT_SHIFT 19
+#define RX_DMA_FPORT_MASK 0x7
+
+/* PHY Indirect Access Control registers */
+#define MTK_PHY_IAC 0x10004
+#define PHY_IAC_ACCESS BIT(31)
+#define PHY_IAC_READ BIT(19)
+#define PHY_IAC_WRITE BIT(18)
+#define PHY_IAC_START BIT(16)
+#define PHY_IAC_ADDR_SHIFT 20
+#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_TIMEOUT HZ
+
+/* Mac control registers */
+#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
+#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
+#define MAC_MCR_FORCE_MODE BIT(15)
+#define MAC_MCR_TX_EN BIT(14)
+#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_BACKOFF_EN BIT(9)
+#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_FORCE_RX_FC BIT(5)
+#define MAC_MCR_FORCE_TX_FC BIT(4)
+#define MAC_MCR_SPEED_1000 BIT(3)
+#define MAC_MCR_SPEED_100 BIT(2)
+#define MAC_MCR_FORCE_DPX BIT(1)
+#define MAC_MCR_FORCE_LINK BIT(0)
+#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \
+ MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
+ MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
+ MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
+ MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+
+/* GPIO port control registers for GMAC 2*/
+#define GPIO_OD33_CTRL8 0x4c0
+#define GPIO_BIAS_CTRL 0xed0
+#define GPIO_DRV_SEL10 0xf00
+
+/* ethernet subsystem config register */
+#define ETHSYS_SYSCFG0 0x14
+#define SYSCFG0_GE_MASK 0x3
+#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+
+struct mtk_rx_dma {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+} __packed __aligned(4);
+
+struct mtk_tx_dma {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+} __packed __aligned(4);
+
+struct mtk_eth;
+struct mtk_mac;
+
+/* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock: make sure that stats operations are atomic
+ * @reg_offset: the status register offset of the SoC
+ * @syncp: the refcount
+ *
+ * All of the supported SoCs have hardware counters for traffic statistics.
+ * Whenever the status IRQ triggers we can read the latest stats from these
+ * counters and store them in this struct.
+ */
+struct mtk_hw_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_skip;
+ u64 tx_collisions;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_overflow;
+ u64 rx_fcs_errors;
+ u64 rx_short_errors;
+ u64 rx_long_errors;
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
+
+ spinlock_t stats_lock;
+ u32 reg_offset;
+ struct u64_stats_sync syncp;
+};
+
+/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
+ * memory was allocated so that it can be freed properly
+ */
+enum mtk_tx_flags {
+ MTK_TX_FLAGS_SINGLE0 = 0x01,
+ MTK_TX_FLAGS_PAGE0 = 0x02,
+};
+
+/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+ * @dma_addr0: The base addr of the first segment
+ * @dma_len0: The length of the first segment
+ * @dma_addr1: The base addr of the second segment
+ * @dma_len1: The length of the second segment
+ */
+struct mtk_tx_buf {
+ struct sk_buff *skb;
+ u32 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr1);
+ DEFINE_DMA_UNMAP_LEN(dma_len1);
+};
+
+/* struct mtk_tx_ring - This struct holds info describing a TX ring
+ * @dma: The descriptor ring
+ * @buf: The memory pointed at by the ring
+ * @phys: The physical addr of tx_buf
+ * @next_free: Pointer to the next free descriptor
+ * @last_free: Pointer to the last free descriptor
+ * @thresh: The threshold of minimum amount of free descriptors
+ * @free_count: QDMA uses a linked list. Track how many free descriptors
+ * are present
+ */
+struct mtk_tx_ring {
+ struct mtk_tx_dma *dma;
+ struct mtk_tx_buf *buf;
+ dma_addr_t phys;
+ struct mtk_tx_dma *next_free;
+ struct mtk_tx_dma *last_free;
+ u16 thresh;
+ atomic_t free_count;
+};
+
+/* struct mtk_rx_ring - This struct holds info describing a RX ring
+ * @dma: The descriptor ring
+ * @data: The memory pointed at by the ring
+ * @phys: The physical addr of rx_buf
+ * @frag_size: How big can each fragment be
+ * @buf_size: The size of each packet buffer
+ * @calc_idx: The current head of ring
+ */
+struct mtk_rx_ring {
+ struct mtk_rx_dma *dma;
+ u8 **data;
+ dma_addr_t phys;
+ u16 frag_size;
+ u16 buf_size;
+ u16 calc_idx;
+};
+
+/* currently no SoC has more than 2 macs */
+#define MTK_MAX_DEVS 2
+
+/* struct mtk_eth - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @base: The mapped register i/o base
+ * @page_lock: Make sure that register operations are atomic
+ * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
+ * dummy for NAPI to work
+ * @netdev: The netdev instances
+ * @mac: Each netdev is linked to a physical MAC
+ * @irq: The IRQ that we are using
+ * @msg_enable: Ethtool msg level
+ * @ethsys: The register map pointing at the range used to setup
+ * MII modes
+ * @pctl: The register map pointing at the range used to setup
+ * GMAC port drive/slew values
+ * @dma_refcnt: track how many netdevs are using the DMA engine
+ * @tx_ring: Pointer to the memore holding info about the TX ring
+ * @rx_ring: Pointer to the memore holding info about the RX ring
+ * @rx_napi: The NAPI struct
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+ * @clk_ethif: The ethif clock
+ * @clk_esw: The switch clock
+ * @clk_gp1: The gmac1 clock
+ * @clk_gp2: The gmac2 clock
+ * @mii_bus: If there is a bus we need to create an instance for it
+ */
+
+struct mtk_eth {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rstc;
+ spinlock_t page_lock;
+ struct net_device dummy_dev;
+ struct net_device *netdev[MTK_MAX_DEVS];
+ struct mtk_mac *mac[MTK_MAX_DEVS];
+ int irq;
+ u32 msg_enable;
+ unsigned long sysclk;
+ struct regmap *ethsys;
+ struct regmap *pctl;
+ atomic_t dma_refcnt;
+ struct mtk_tx_ring tx_ring;
+ struct mtk_rx_ring rx_ring;
+ struct napi_struct rx_napi;
+ struct mtk_tx_dma *scratch_ring;
+ void *scratch_head;
+ struct clk *clk_ethif;
+ struct clk *clk_esw;
+ struct clk *clk_gp1;
+ struct clk *clk_gp2;
+ struct mii_bus *mii_bus;
+};
+
+/* struct mtk_mac - the structure that holds the info about the MACs of the
+ * SoC
+ * @id: The number of the MAC
+ * @of_node: Our devicetree node
+ * @hw: Backpointer to our main datastruture
+ * @hw_stats: Packet statistics counter
+ * @phy_dev: The attached PHY if available
+ * @pending_work: The workqueue used to reset the dma ring
+ */
+struct mtk_mac {
+ int id;
+ struct device_node *of_node;
+ struct mtk_eth *hw;
+ struct mtk_hw_stats *hw_stats;
+ struct phy_device *phy_dev;
+ struct work_struct pending_work;
+};
+
+/* the struct describing the SoC. these are declared in the soc_xyz.c files */
+extern const struct of_device_id of_mtk_match[];
+
+/* read the hardware status register */
+void mtk_stats_update_mac(struct mtk_mac *mac);
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+
+#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1486ce902a56..9ca3734ebb6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,6 +4,7 @@
config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
+ depends on MAY_USE_DEVLINK
depends on PCI
select MLX4_CORE
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index dd84cabb2a51..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
(priv->tx_ring_num * 2) +
- (priv->rx_ring_num * 2);
+ (priv->rx_ring_num * 3);
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
+ data[index++] = priv->rx_ring[i]->dropped;
}
spin_unlock_bh(&priv->stats_lock);
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_dropped", i);
}
break;
case ETH_SS_PRIV_FLAGS:
@@ -501,34 +504,30 @@ static u32 mlx4_en_autoneg_get(struct net_device *dev)
return autoneg;
}
-static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
+static void ptys2ethtool_update_supported_port(unsigned long *mask,
+ struct mlx4_ptys_reg *ptys_reg)
{
u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
| MLX4_PROT_MASK(MLX4_1000BASE_T)
| MLX4_PROT_MASK(MLX4_100BASE_TX))) {
- return SUPPORTED_TP;
- }
-
- if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
+ __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
| MLX4_PROT_MASK(MLX4_10GBASE_SR)
| MLX4_PROT_MASK(MLX4_56GBASE_SR4)
| MLX4_PROT_MASK(MLX4_40GBASE_CR4)
| MLX4_PROT_MASK(MLX4_40GBASE_SR4)
| MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
- return SUPPORTED_FIBRE;
- }
-
- if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
+ __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
| MLX4_PROT_MASK(MLX4_40GBASE_KR4)
| MLX4_PROT_MASK(MLX4_20GBASE_KR2)
| MLX4_PROT_MASK(MLX4_10GBASE_KR)
| MLX4_PROT_MASK(MLX4_10GBASE_KX4)
| MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
- return SUPPORTED_Backplane;
+ __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
}
- return 0;
}
static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
@@ -574,122 +573,111 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
enum ethtool_report {
SUPPORTED = 0,
ADVERTISED = 1,
- SPEED = 2
};
+struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ u32 speed;
+};
+
+static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
+ enum ethtool_report report)
+{
+ switch (report) {
+ case SUPPORTED:
+ return cfg->supported;
+ case ADVERTISED:
+ return cfg->advertised;
+ }
+ return NULL;
+}
+
+#define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
+ ({ \
+ struct ptys2ethtool_config *cfg; \
+ const unsigned int modes[] = { __VA_ARGS__ }; \
+ unsigned int i; \
+ cfg = &ptys2ethtool_map[reg_]; \
+ cfg->speed = speed_; \
+ bitmap_zero(cfg->supported, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ bitmap_zero(cfg->advertised, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
+ __set_bit(modes[i], cfg->supported); \
+ __set_bit(modes[i], cfg->advertised); \
+ } \
+ })
+
/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
-static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
- [MLX4_100BASE_TX] = {
- SUPPORTED_100baseT_Full,
- ADVERTISED_100baseT_Full,
- SPEED_100
- },
-
- [MLX4_1000BASE_T] = {
- SUPPORTED_1000baseT_Full,
- ADVERTISED_1000baseT_Full,
- SPEED_1000
- },
- [MLX4_1000BASE_CX_SGMII] = {
- SUPPORTED_1000baseKX_Full,
- ADVERTISED_1000baseKX_Full,
- SPEED_1000
- },
- [MLX4_1000BASE_KX] = {
- SUPPORTED_1000baseKX_Full,
- ADVERTISED_1000baseKX_Full,
- SPEED_1000
- },
-
- [MLX4_10GBASE_T] = {
- SUPPORTED_10000baseT_Full,
- ADVERTISED_10000baseT_Full,
- SPEED_10000
- },
- [MLX4_10GBASE_CX4] = {
- SUPPORTED_10000baseKX4_Full,
- ADVERTISED_10000baseKX4_Full,
- SPEED_10000
- },
- [MLX4_10GBASE_KX4] = {
- SUPPORTED_10000baseKX4_Full,
- ADVERTISED_10000baseKX4_Full,
- SPEED_10000
- },
- [MLX4_10GBASE_KR] = {
- SUPPORTED_10000baseKR_Full,
- ADVERTISED_10000baseKR_Full,
- SPEED_10000
- },
- [MLX4_10GBASE_CR] = {
- SUPPORTED_10000baseKR_Full,
- ADVERTISED_10000baseKR_Full,
- SPEED_10000
- },
- [MLX4_10GBASE_SR] = {
- SUPPORTED_10000baseKR_Full,
- ADVERTISED_10000baseKR_Full,
- SPEED_10000
- },
-
- [MLX4_20GBASE_KR2] = {
- SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
- ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
- SPEED_20000
- },
-
- [MLX4_40GBASE_CR4] = {
- SUPPORTED_40000baseCR4_Full,
- ADVERTISED_40000baseCR4_Full,
- SPEED_40000
- },
- [MLX4_40GBASE_KR4] = {
- SUPPORTED_40000baseKR4_Full,
- ADVERTISED_40000baseKR4_Full,
- SPEED_40000
- },
- [MLX4_40GBASE_SR4] = {
- SUPPORTED_40000baseSR4_Full,
- ADVERTISED_40000baseSR4_Full,
- SPEED_40000
- },
-
- [MLX4_56GBASE_KR4] = {
- SUPPORTED_56000baseKR4_Full,
- ADVERTISED_56000baseKR4_Full,
- SPEED_56000
- },
- [MLX4_56GBASE_CR4] = {
- SUPPORTED_56000baseCR4_Full,
- ADVERTISED_56000baseCR4_Full,
- SPEED_56000
- },
- [MLX4_56GBASE_SR4] = {
- SUPPORTED_56000baseSR4_Full,
- ADVERTISED_56000baseSR4_Full,
- SPEED_56000
- },
+static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
+
+void __init mlx4_en_init_ptys2ethtool_map(void)
+{
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
+ MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
};
-static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
+static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
+ u32 eth_proto,
+ enum ethtool_report report)
{
int i;
- u32 link_modes = 0;
-
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (eth_proto & MLX4_PROT_MASK(i))
- link_modes |= ptys2ethtool_map[i][report];
+ bitmap_or(link_modes, link_modes,
+ ptys2ethtool_link_mode(&ptys2ethtool_map[i],
+ report),
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
- return link_modes;
}
-static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
+static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
+ enum ethtool_report report)
{
int i;
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
- if (ptys2ethtool_map[i][report] & link_modes)
+ if (bitmap_intersects(
+ ptys2ethtool_link_mode(&ptys2ethtool_map[i],
+ report),
+ link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= 1 << i;
}
return ptys_modes;
@@ -702,14 +690,15 @@ static u32 speed2ptys_link_modes(u32 speed)
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
- if (ptys2ethtool_map[i][SPEED] == speed)
+ if (ptys2ethtool_map[i].speed == speed)
ptys_modes |= 1 << i;
}
return ptys_modes;
}
-static int ethtool_get_ptys_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int
+ethtool_get_ptys_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
@@ -737,79 +726,102 @@ static int ethtool_get_ptys_settings(struct net_device *dev,
en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
be32_to_cpu(ptys_reg.eth_proto_lp_adv));
- cmd->supported = 0;
- cmd->advertising = 0;
+ /* reset supported/advertising masks */
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- cmd->supported |= ptys_get_supported_port(&ptys_reg);
+ ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
+ &ptys_reg);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
- cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
+ ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
+ eth_proto, SUPPORTED);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
- cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
+ ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
+ eth_proto, ADVERTISED);
- cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Asym_Pause);
- cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
- ADVERTISED_Asym_Pause : 0;
+ if (priv->prof->tx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Pause);
+ if (priv->prof->tx_pause ^ priv->prof->rx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Asym_Pause);
- cmd->port = ptys_get_active_port(&ptys_reg);
- cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
+ link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
if (mlx4_en_autoneg_get(dev)) {
- cmd->supported |= SUPPORTED_Autoneg;
- cmd->advertising |= ADVERTISED_Autoneg;
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
}
- cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
+ link_ksettings->base.autoneg
+ = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
- cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
- cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
- ADVERTISED_Autoneg : 0;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+ ptys2ethtool_update_link_modes(
+ link_ksettings->link_modes.lp_advertising,
+ eth_proto, ADVERTISED);
+ if (priv->port_state.flags & MLX4_EN_PORT_ANC)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ lp_advertising, Autoneg);
- cmd->phy_address = 0;
- cmd->mdio_support = 0;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ link_ksettings->base.phy_address = 0;
+ link_ksettings->base.mdio_support = 0;
+ link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
return ret;
}
-static void ethtool_get_default_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static void
+ethtool_get_default_link_ksettings(
+ struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int trans_type;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->supported = SUPPORTED_10000baseT_Full;
- cmd->advertising = ADVERTISED_10000baseT_Full;
- trans_type = priv->port_state.transceiver;
+ link_ksettings->base.autoneg = AUTONEG_DISABLE;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
+ 10000baseT_Full);
+
+ trans_type = priv->port_state.transceiver;
if (trans_type > 0 && trans_type <= 0xC) {
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ link_ksettings->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, FIBRE);
} else if (trans_type == 0x80 || trans_type == 0) {
- cmd->port = PORT_TP;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
+ link_ksettings->base.port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, TP);
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, TP);
} else {
- cmd->port = -1;
- cmd->transceiver = -1;
+ link_ksettings->base.port = -1;
}
}
-static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int
+mlx4_en_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int ret = -EINVAL;
@@ -822,16 +834,16 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
priv->port_state.flags & MLX4_EN_PORT_ANE);
if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
- ret = ethtool_get_ptys_settings(dev, cmd);
+ ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
- ethtool_get_default_settings(dev, cmd);
+ ethtool_get_default_link_ksettings(dev, link_ksettings);
if (netif_carrier_ok(dev)) {
- ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
- cmd->duplex = DUPLEX_FULL;
+ link_ksettings->base.speed = priv->port_state.link_speed;
+ link_ksettings->base.duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ link_ksettings->base.speed = SPEED_UNKNOWN;
+ link_ksettings->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
@@ -855,21 +867,29 @@ static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
return proto_admin;
}
-static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int
+mlx4_en_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
__be32 proto_admin;
int ret;
- u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
- int speed = ethtool_cmd_speed(cmd);
+ u32 ptys_adv = ethtool2ptys_link_modes(
+ link_ksettings->link_modes.advertising, ADVERTISED);
+ const int speed = link_ksettings->base.speed;
- en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
- speed, cmd->advertising, cmd->autoneg, cmd->duplex);
+ en_dbg(DRV, priv,
+ "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
+ speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
+ link_ksettings->link_modes.advertising,
+ link_ksettings->base.autoneg,
+ link_ksettings->base.duplex);
- if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
- (cmd->duplex == DUPLEX_HALF))
+ if (!(priv->mdev->dev->caps.flags2 &
+ MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
+ (link_ksettings->base.duplex == DUPLEX_HALF))
return -EINVAL;
memset(&ptys_reg, 0, sizeof(ptys_reg));
@@ -883,7 +903,7 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
- proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
+ proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
cpu_to_be32(ptys_adv) :
speed_set_ptys_admin(priv, speed,
ptys_reg.eth_proto_cap);
@@ -1982,8 +2002,8 @@ static int mlx4_en_set_phys_id(struct net_device *dev,
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
- .get_settings = mlx4_en_get_settings,
- .set_settings = mlx4_en_set_settings,
+ .get_link_ksettings = mlx4_en_get_link_ksettings,
+ .set_link_ksettings = mlx4_en_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index e0ec280a7fa1..bf7628db098a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -382,6 +382,7 @@ static void mlx4_en_verify_params(void)
static int __init mlx4_en_init(void)
{
mlx4_en_verify_params();
+ mlx4_en_init_ptys2ethtool_map();
return mlx4_register_interface(&mlx4_en_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 21e2c0960271..b4b258c8ca47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -40,6 +40,7 @@
#include <net/ip.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
+#include <net/devlink.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -69,6 +70,15 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
+static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return mlx4_en_setup_tc(dev, tc->tc);
+}
+
#ifdef CONFIG_RFS_ACCEL
struct mlx4_en_filter {
@@ -2024,8 +2034,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
- if (priv->registered)
+ if (priv->registered) {
+ devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
+ priv->port));
unregister_netdev(dev);
+ }
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@ -2462,7 +2475,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
- .ndo_setup_tc = mlx4_en_setup_tc,
+ .ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
@@ -2500,7 +2513,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
- .ndo_setup_tc = mlx4_en_setup_tc,
+ .ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
@@ -3042,6 +3055,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
priv->registered = 1;
+ devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
+ dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
+ unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i]->packets;
stats->rx_bytes += priv->rx_ring[i]->bytes;
+ sw_rx_dropped += priv->rx_ring[i]->dropped;
priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
stats->collisions = 0;
- stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+ sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 41440b2b20a3..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
gfp_t gfp = _gfp;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
page = alloc_pages(gfp, order);
if (likely(page))
break;
@@ -82,8 +82,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
/* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set().
*/
- atomic_add(page_alloc->page_size / frag_info->frag_stride - 1,
- &page->_count);
+ page_ref_add(page, page_alloc->page_size / frag_info->frag_stride - 1);
return 0;
}
@@ -127,7 +126,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc[i].dma,
page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
page = page_alloc[i].page;
- atomic_set(&page->_count, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc[i].page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
}
}
@@ -165,7 +166,7 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n",
i, ring->page_alloc[i].page_size,
- atomic_read(&ring->page_alloc[i].page->_count));
+ page_ref_count(ring->page_alloc[i].page));
}
return 0;
@@ -177,7 +178,9 @@ out:
dma_unmap_page(priv->ddev, page_alloc->dma,
page_alloc->page_size, PCI_DMA_FROMDEVICE);
page = page_alloc->page;
- atomic_set(&page->_count, 1);
+ /* Revert changes done by mlx4_alloc_pages */
+ page_ref_sub(page, page_alloc->page_size /
+ priv->frag_info[i].frag_stride - 1);
put_page(page);
page_alloc->page = NULL;
}
@@ -940,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
- priv->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index e0946ab22010..c0d7b7296236 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -276,7 +276,8 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp)
+ int index, u8 owner, u64 timestamp,
+ int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
@@ -347,7 +348,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
}
}
}
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, napi_mode);
+
return tx_info->nr_txbb;
}
@@ -371,7 +373,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
while (ring->cons != ring->prod) {
ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
- !!(ring->cons & ring->size), 0);
+ !!(ring->cons & ring->size), 0,
+ 0 /* Non-NAPI caller */);
ring->cons += ring->last_nr_txbb;
cnt++;
}
@@ -385,7 +388,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
}
static bool mlx4_en_process_tx_cq(struct net_device *dev,
- struct mlx4_en_cq *cq)
+ struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
@@ -451,7 +454,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
last_nr_txbb = mlx4_en_free_tx_desc(
priv, ring, ring_index,
!!((ring_cons + txbbs_skipped) &
- ring->size), timestamp);
+ ring->size), timestamp, napi_budget);
mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring_cons + txbbs_stamp) &
@@ -511,7 +514,7 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
struct mlx4_en_priv *priv = netdev_priv(dev);
int clean_complete;
- clean_complete = mlx4_en_process_tx_cq(dev, cq);
+ clean_complete = mlx4_en_process_tx_cq(dev, cq, budget);
if (!clean_complete)
return budget;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d66c690a8597..e97094598b2d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -157,7 +157,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[29] = "802.1ad offload support",
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
- [33] = "RoCEv2 support"
+ [33] = "RoCEv2 support",
+ [34] = "DMFS Sniffer support (UC & MC)"
};
int i;
@@ -810,6 +811,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ if (field & 0x20)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0472941af820..dec77d6f0ac9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/errno.h>
+#include <net/devlink.h>
#include "mlx4.h"
@@ -249,3 +250,11 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int
return result;
}
EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
+
+struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+
+ return &info->devlink_port;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f8674ae62752..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -42,6 +42,7 @@
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/kmod.h>
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -104,6 +105,11 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
+static bool enable_4k_uar;
+module_param(enable_4k_uar, bool, 0444);
+MODULE_PARM_DESC(enable_4k_uar,
+ "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
+
#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
MLX4_FUNC_CAP_DMFS_A0_STATIC)
@@ -422,7 +428,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
/* Virtual PCI function needs to determine UAR page size from
* firmware. Only master PCI function can set the uar page size
*/
- dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
+ if (enable_4k_uar)
+ dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
+ else
+ dev->uar_page_shift = PAGE_SHIFT;
+
mlx4_set_num_reserved_uars(dev, dev_cap);
}
@@ -1081,36 +1091,20 @@ static ssize_t show_port_type(struct device *dev,
return strlen(buf);
}
-static ssize_t set_port_type(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int __set_port_type(struct mlx4_port_info *info,
+ enum mlx4_port_type port_type)
{
- struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
- port_attr);
struct mlx4_dev *mdev = info->dev;
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
- static DEFINE_MUTEX(set_port_type_mutex);
int i;
int err = 0;
- mutex_lock(&set_port_type_mutex);
-
- if (!strcmp(buf, "ib\n"))
- info->tmp_type = MLX4_PORT_TYPE_IB;
- else if (!strcmp(buf, "eth\n"))
- info->tmp_type = MLX4_PORT_TYPE_ETH;
- else if (!strcmp(buf, "auto\n"))
- info->tmp_type = MLX4_PORT_TYPE_AUTO;
- else {
- mlx4_err(mdev, "%s is not supported port type\n", buf);
- err = -EINVAL;
- goto err_out;
- }
-
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
+ info->tmp_type = port_type;
+
/* Possible type is always the one that was delivered */
mdev->caps.possible_type[info->port] = info->tmp_type;
@@ -1152,6 +1146,37 @@ static ssize_t set_port_type(struct device *dev,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
+
+ return err;
+}
+
+static ssize_t set_port_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ enum mlx4_port_type port_type;
+ static DEFINE_MUTEX(set_port_type_mutex);
+ int err;
+
+ mutex_lock(&set_port_type_mutex);
+
+ if (!strcmp(buf, "ib\n")) {
+ port_type = MLX4_PORT_TYPE_IB;
+ } else if (!strcmp(buf, "eth\n")) {
+ port_type = MLX4_PORT_TYPE_ETH;
+ } else if (!strcmp(buf, "auto\n")) {
+ port_type = MLX4_PORT_TYPE_AUTO;
+ } else {
+ mlx4_err(mdev, "%s is not supported port type\n", buf);
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ err = __set_port_type(info, port_type);
+
err_out:
mutex_unlock(&set_port_type_mutex);
@@ -2217,11 +2242,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
- /* Always set UAR page size 4KB, set log_uar_sz accordingly */
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
- PAGE_SHIFT -
- DEFAULT_UAR_PAGE_SHIFT;
- init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+ if (enable_4k_uar) {
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
+ init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+ } else {
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ }
init_hca.mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
@@ -2889,8 +2917,13 @@ no_msi:
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
{
+ struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- int err = 0;
+ int err;
+
+ err = devlink_port_register(devlink, &info->devlink_port, port);
+ if (err)
+ return err;
info->dev = dev;
info->port = port;
@@ -2915,6 +2948,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -3138,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
return 0;
}
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+ int err = 0;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+ err = pci_enable_device(pdev);
+ if (!err)
+ dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+
+ return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+ struct pci_dev *pdev = dev->persist->pdev;
+
+ mutex_lock(&dev->persist->pci_status_mutex);
+ if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+ pci_disable_device(pdev);
+ dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+ }
+ mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
@@ -3548,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
- err = pci_enable_device(pdev);
+ err = mlx4_pci_enable_device(&priv->dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
@@ -3681,28 +3743,59 @@ err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(&priv->dev);
pci_set_drvdata(pdev, NULL);
return err;
}
+static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+{
+ struct mlx4_port_info *info = container_of(devlink_port,
+ struct mlx4_port_info,
+ devlink_port);
+ enum mlx4_port_type mlx4_port_type;
+
+ switch (port_type) {
+ case DEVLINK_PORT_TYPE_AUTO:
+ mlx4_port_type = MLX4_PORT_TYPE_AUTO;
+ break;
+ case DEVLINK_PORT_TYPE_ETH:
+ mlx4_port_type = MLX4_PORT_TYPE_ETH;
+ break;
+ case DEVLINK_PORT_TYPE_IB:
+ mlx4_port_type = MLX4_PORT_TYPE_IB;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return __set_port_type(info, mlx4_port_type);
+}
+
+static const struct devlink_ops mlx4_devlink_ops = {
+ .port_type_set = mlx4_devlink_port_type_set,
+};
+
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct devlink *devlink;
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int ret;
printk_once(KERN_INFO "%s", mlx4_version);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
+ if (!devlink)
return -ENOMEM;
+ priv = devlink_priv(devlink);
dev = &priv->dev;
dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
if (!dev->persist) {
- kfree(priv);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_devlink_free;
}
dev->persist->pdev = pdev;
dev->persist->dev = dev;
@@ -3710,15 +3803,25 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
+ mutex_init(&dev->persist->pci_status_mutex);
+
+ ret = devlink_register(devlink, &pdev->dev);
+ if (ret)
+ goto err_persist_free;
ret = __mlx4_init_one(pdev, id->driver_data, priv);
- if (ret) {
- kfree(dev->persist);
- kfree(priv);
- } else {
- pci_save_state(pdev);
- }
+ if (ret)
+ goto err_devlink_unregister;
+
+ pci_save_state(pdev);
+ return 0;
+err_devlink_unregister:
+ devlink_unregister(devlink);
+err_persist_free:
+ kfree(dev->persist);
+err_devlink_free:
+ devlink_free(devlink);
return ret;
}
@@ -3819,6 +3922,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
mutex_lock(&persist->interface_state_mutex);
@@ -3848,9 +3952,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
}
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(dev);
+ devlink_unregister(devlink);
kfree(dev->persist);
- kfree(priv);
+ devlink_free(devlink);
pci_set_drvdata(pdev, NULL);
}
@@ -3966,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- pci_disable_device(pdev);
+ mlx4_pci_disable_device(persist->dev);
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -3974,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret;
- int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
- int total_vfs;
+ int err;
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
- ret = pci_enable_device(pdev);
- if (ret) {
- mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+ err = mlx4_pci_enable_device(dev);
+ if (err) {
+ mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ int total_vfs;
+ int err;
+ mlx4_err(dev, "%s was called\n", __func__);
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
- ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+ err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
priv, 1);
- if (ret) {
- mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
- __func__, ret);
+ if (err) {
+ mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+ __func__, err);
goto end;
}
- ret = restore_current_port_types(dev, dev->persist->
+ err = restore_current_port_types(dev, dev->persist->
curr_port_type, dev->persist->
curr_port_poss_type);
- if (ret)
- mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+ if (err)
+ mlx4_err(dev, "could not restore original port types (%d)\n", err);
}
end:
mutex_unlock(&persist->interface_state_mutex);
- return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4029,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
+ .resume = mlx4_pci_resume,
};
static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1d4e2e054647..6aa73972d478 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,8 +39,6 @@
#include "mlx4.h"
-static const u8 zero_gid[16]; /* automatically initialized to 0 */
-
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
@@ -752,8 +750,10 @@ static const u8 __promisc_mode[] = {
[MLX4_FS_REGULAR] = 0x0,
[MLX4_FS_ALL_DEFAULT] = 0x1,
[MLX4_FS_MC_DEFAULT] = 0x3,
- [MLX4_FS_UC_SNIFFER] = 0x4,
- [MLX4_FS_MC_SNIFFER] = 0x5,
+ [MLX4_FS_MIRROR_RX_PORT] = 0x4,
+ [MLX4_FS_MIRROR_SX_PORT] = 0x5,
+ [MLX4_FS_UC_SNIFFER] = 0x6,
+ [MLX4_FS_MC_SNIFFER] = 0x7,
};
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7baef52db6b7..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -45,6 +45,7 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <net/devlink.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -585,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u8 pptx;
+ u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
struct mlx4_resource_tracker res_tracker;
struct workqueue_struct *comm_wq;
@@ -828,6 +831,7 @@ struct mlx4_port_info {
struct mlx4_roce_gid_table gid_table;
int base_qpn;
struct cpu_rmap *rmap;
+ struct devlink_port devlink_port;
};
struct mlx4_sense {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35de7d2e6b34..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
unsigned long csum_complete;
+ unsigned long dropped;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
@@ -607,6 +608,7 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+void mlx4_en_init_ptys2ethtool_map(void);
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ ((gen_context->pptx != master->pptx) ||
+ (gen_context->pprx != master->pprx))) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev,
+ "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 25ce1b030a00..cd9b2b28df88 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
- if (slave != mlx4_master_func_num(dev))
+ if (slave != mlx4_master_func_num(dev)) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
@@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c503ea05e742..1cf722eba607 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -19,3 +19,15 @@ config MLX5_CORE_EN
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
Ethernet and Infiniband support in ConnectX-4 are currently mutually
exclusive.
+
+config MLX5_CORE_EN_DCB
+ bool "Data Center Bridging (DCB) Support"
+ default y
+ depends on MLX5_CORE_EN && DCB
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
+ If set to N, will not be able to configure QoS and ratelimit attributes.
+ This flag is depended on the kernel's DCB support.
+
+ If unsure, set to Y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 01c0256effb8..4fc45ee0c5d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,6 +3,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o
+ en_txrx.o en_clock.o vxlan.o en_tc.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 037fc4cdf5af..eb926e1ee71c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -407,6 +407,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
const char *mlx5_command_str(int command)
{
switch (command) {
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return "QUERY_HCA_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ return "MODIFY_HCA_VPORT_CONTEXT";
+
case MLX5_CMD_OP_QUERY_HCA_CAP:
return "QUERY_HCA_CAP";
@@ -560,6 +566,18 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ACCESS_REG:
return "MLX5_CMD_OP_ACCESS_REG";
+ case MLX5_CMD_OP_SET_WOL_ROL:
+ return "SET_WOL_ROL";
+
+ case MLX5_CMD_OP_QUERY_WOL_ROL:
+ return "QUERY_WOL_ROL";
+
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return "ADD_VXLAN_UDP_DPORT";
+
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ return "DELETE_VXLAN_UDP_DPORT";
+
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 5b1753233c5d..879e6276c473 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -29,6 +29,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#ifndef __MLX5_EN_H__
+#define __MLX5_EN_H__
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
@@ -38,8 +40,10 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
+#include <linux/rhashtable.h>
#include "wq.h"
#include "mlx5_core.h"
@@ -69,6 +73,11 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
+#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
+#endif
+
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
"rx_packets",
@@ -95,12 +104,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
/* SW counters */
"tso_packets",
"tso_bytes",
+ "tso_inner_packets",
+ "tso_inner_bytes",
"lro_packets",
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
"rx_csum_sw",
"tx_csum_offload",
+ "tx_csum_inner",
"tx_queue_stopped",
"tx_queue_wake",
"tx_queue_dropped",
@@ -133,18 +145,21 @@ struct mlx5e_vport_stats {
/* SW counters */
u64 tso_packets;
u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
u64 lro_packets;
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
u64 rx_csum_sw;
u64 tx_csum_offload;
+ u64 tx_csum_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
-#define NUM_VPORT_COUNTERS 32
+#define NUM_VPORT_COUNTERS 35
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
@@ -247,24 +262,32 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"bytes",
"tso_packets",
"tso_bytes",
+ "tso_inner_packets",
+ "tso_inner_bytes",
+ "csum_offload_inner",
+ "nop",
"csum_offload_none",
"stopped",
"wake",
"dropped",
- "nop"
};
struct mlx5e_sq_stats {
+ /* commonly accessed in data path */
u64 packets;
u64 bytes;
u64 tso_packets;
u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 csum_offload_inner;
+ u64 nop;
+ /* less likely accessed in data path */
u64 csum_offload_none;
u64 stopped;
u64 wake;
u64 dropped;
- u64 nop;
-#define NUM_SQ_STATS 9
+#define NUM_SQ_STATS 12
};
struct mlx5e_stats {
@@ -276,7 +299,6 @@ struct mlx5e_params {
u8 log_sq_size;
u8 log_rq_size;
u16 num_channels;
- u8 default_vlan_prio;
u8 num_tc;
u16 rx_cq_moderation_usec;
u16 rx_cq_moderation_pkts;
@@ -289,6 +311,9 @@ struct mlx5e_params {
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ struct ieee_ets ets;
+#endif
};
struct mlx5e_tstamp {
@@ -363,6 +388,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_BF_ENABLE,
};
struct mlx5e_sq {
@@ -391,7 +417,6 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
- void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
u16 bf_buf_size;
@@ -492,21 +517,33 @@ struct mlx5e_vlan_db {
bool filter_disabled;
};
+struct mlx5e_vxlan_db {
+ spinlock_t lock; /* protect vxlan table */
+ struct radix_tree_root tree;
+};
+
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
};
+struct mlx5e_tc_flow_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable_params ht_params;
+ struct rhashtable ht;
+};
+
struct mlx5e_flow_tables {
struct mlx5_flow_namespace *ns;
+ struct mlx5e_tc_flow_table tc;
struct mlx5e_flow_table vlan;
struct mlx5e_flow_table main;
};
struct mlx5e_priv {
/* priv data path fields - start */
- int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
@@ -516,7 +553,7 @@ struct mlx5e_priv {
struct mlx5_uar cq_uar;
u32 pdn;
u32 tdn;
- struct mlx5_core_mr mr;
+ struct mlx5_core_mkey mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
@@ -527,9 +564,9 @@ struct mlx5e_priv {
struct mlx5e_flow_tables fts;
struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan;
+ struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
- spinlock_t async_events_spinlock; /* sync hw events */
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
@@ -592,7 +629,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
@@ -640,16 +677,12 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
* doorbell
*/
wmb();
-
- if (bf_sz) {
- __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
-
- /* flush the write-combining mapped buffer */
- wmb();
-
- } else {
+ if (bf_sz)
+ __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+ else
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
- }
+ /* flush the write-combining mapped buffer */
+ wmb();
sq->bf_offset ^= sq->bf_buf_size;
}
@@ -669,4 +702,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
+int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
+#endif
+
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+
+#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
new file mode 100644
index 000000000000..3036f279a8fd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include "en.h"
+
+#define MLX5E_MAX_PRIORITY 8
+
+#define MLX5E_100MB (100000)
+#define MLX5E_1GB (1000000)
+
+static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!MLX5_CAP_GEN(priv->mdev, ets))
+ return -ENOTSUPP;
+
+ memcpy(ets, &priv->params.ets, sizeof(*ets));
+ return 0;
+}
+
+enum {
+ MLX5E_VENDOR_TC_GROUP_NUM = 7,
+ MLX5E_ETS_TC_GROUP_NUM = 0,
+};
+
+static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
+{
+ bool any_tc_mapped_to_ets = false;
+ int strict_group;
+ int i;
+
+ for (i = 0; i <= max_tc; i++)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ any_tc_mapped_to_ets = true;
+
+ strict_group = any_tc_mapped_to_ets ? 1 : 0;
+
+ for (i = 0; i <= max_tc; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
+ break;
+ case IEEE_8021QAZ_TSA_STRICT:
+ tc_group[i] = strict_group++;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
+ break;
+ }
+ }
+}
+
+static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
+ u8 *tc_group, int max_tc)
+{
+ int i;
+
+ for (i = 0; i <= max_tc; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_VENDOR:
+ tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+ break;
+ case IEEE_8021QAZ_TSA_STRICT:
+ tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ break;
+ }
+ }
+}
+
+int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_group[IEEE_8021QAZ_MAX_TCS];
+ int max_tc = mlx5_max_tc(mdev);
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, ets))
+ return -ENOTSUPP;
+
+ mlx5e_build_tc_group(ets, tc_group, max_tc);
+ mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
+
+ err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
+ if (err)
+ return err;
+
+ err = mlx5_set_port_tc_group(mdev, tc_group);
+ if (err)
+ return err;
+
+ return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
+}
+
+static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+{
+ int bw_sum = 0;
+ int i;
+
+ /* Validate Priority */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+ return -EINVAL;
+ }
+
+ /* Validate Bandwidth Sum */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ bw_sum += ets->tc_tx_bw[i];
+ }
+
+ if (bw_sum != 0 && bw_sum != 100)
+ return -EINVAL;
+ return 0;
+}
+
+static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = mlx5e_dbcnl_validate_ets(ets);
+ if (err)
+ return err;
+
+ err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
+ if (err)
+ return err;
+
+ memcpy(&priv->params.ets, ets, sizeof(*ets));
+ priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
+
+ return 0;
+}
+
+static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+
+ return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
+}
+
+static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ enum mlx5_port_status ps;
+ u8 curr_pfc_en;
+ int ret;
+
+ mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
+
+ if (pfc->pfc_en == curr_pfc_en)
+ return 0;
+
+ mlx5_query_port_admin_status(mdev, &ps);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+
+ ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
+
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+ return ret;
+}
+
+static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ (mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ return 0;
+}
+
+static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
+ int err;
+ int i;
+
+ err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
+ if (err)
+ return err;
+
+ memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ switch (max_bw_unit[i]) {
+ case MLX5_100_MBPS_UNIT:
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
+ break;
+ case MLX5_GBPS_UNIT:
+ maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
+ break;
+ case MLX5_BW_NO_LIMIT:
+ break;
+ default:
+ WARN(true, "non-supported BW unit");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+ u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
+ __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+ int i;
+
+ memset(max_bw_value, 0, sizeof(max_bw_value));
+ memset(max_bw_unit, 0, sizeof(max_bw_unit));
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ if (!maxrate->tc_maxrate[i]) {
+ max_bw_unit[i] = MLX5_BW_NO_LIMIT;
+ continue;
+ }
+ if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+ max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
+ MLX5E_100MB);
+ max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
+ max_bw_unit[i] = MLX5_100_MBPS_UNIT;
+ } else {
+ max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
+ MLX5E_1GB);
+ max_bw_unit[i] = MLX5_GBPS_UNIT;
+ }
+ }
+
+ return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
+}
+
+const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
+ .ieee_getets = mlx5e_dcbnl_ieee_getets,
+ .ieee_setets = mlx5e_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
+ .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
+ .getdcbx = mlx5e_dcbnl_getdcbx,
+ .setdcbx = mlx5e_dcbnl_setdcbx,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5abeb00fceb8..68834b715f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -211,13 +211,14 @@ static void mlx5e_get_strings(struct net_device *dev,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
"rx%d_%s", i, rq_stats_strings[j]);
- for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
- (idx++) * ETH_GSTRING_LEN,
- "tx%d_%d_%s", i, tc,
- sq_stats_strings[j]);
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%s",
+ priv->channeltc_to_txq_map[i][tc],
+ sq_stats_strings[j]);
break;
}
}
@@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
&priv->state) ? 0 :
((u64 *)&priv->channel[i]->rq.stats)[j];
- for (i = 0; i < priv->params.num_channels; i++)
- for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
@@ -401,6 +402,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -ENOTSUPP;
+
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
@@ -418,11 +422,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int tc;
int i;
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -ENOTSUPP;
+
+ mutex_lock(&priv->state_lock);
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
@@ -438,6 +449,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_max_coalesced_frames);
}
+out:
+ mutex_unlock(&priv->state_lock);
return 0;
}
@@ -900,6 +913,129 @@ static int mlx5e_get_ts_info(struct net_device *dev,
return 0;
}
+static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
+{
+ __u32 ret = 0;
+
+ if (MLX5_CAP_GEN(mdev, wol_g))
+ ret |= WAKE_MAGIC;
+
+ if (MLX5_CAP_GEN(mdev, wol_s))
+ ret |= WAKE_MAGICSECURE;
+
+ if (MLX5_CAP_GEN(mdev, wol_a))
+ ret |= WAKE_ARP;
+
+ if (MLX5_CAP_GEN(mdev, wol_b))
+ ret |= WAKE_BCAST;
+
+ if (MLX5_CAP_GEN(mdev, wol_m))
+ ret |= WAKE_MCAST;
+
+ if (MLX5_CAP_GEN(mdev, wol_u))
+ ret |= WAKE_UCAST;
+
+ if (MLX5_CAP_GEN(mdev, wol_p))
+ ret |= WAKE_PHY;
+
+ return ret;
+}
+
+static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
+{
+ __u32 ret = 0;
+
+ if (mode & MLX5_WOL_MAGIC)
+ ret |= WAKE_MAGIC;
+
+ if (mode & MLX5_WOL_SECURED_MAGIC)
+ ret |= WAKE_MAGICSECURE;
+
+ if (mode & MLX5_WOL_ARP)
+ ret |= WAKE_ARP;
+
+ if (mode & MLX5_WOL_BROADCAST)
+ ret |= WAKE_BCAST;
+
+ if (mode & MLX5_WOL_MULTICAST)
+ ret |= WAKE_MCAST;
+
+ if (mode & MLX5_WOL_UNICAST)
+ ret |= WAKE_UCAST;
+
+ if (mode & MLX5_WOL_PHY_ACTIVITY)
+ ret |= WAKE_PHY;
+
+ return ret;
+}
+
+static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
+{
+ u8 ret = 0;
+
+ if (mode & WAKE_MAGIC)
+ ret |= MLX5_WOL_MAGIC;
+
+ if (mode & WAKE_MAGICSECURE)
+ ret |= MLX5_WOL_SECURED_MAGIC;
+
+ if (mode & WAKE_ARP)
+ ret |= MLX5_WOL_ARP;
+
+ if (mode & WAKE_BCAST)
+ ret |= MLX5_WOL_BROADCAST;
+
+ if (mode & WAKE_MCAST)
+ ret |= MLX5_WOL_MULTICAST;
+
+ if (mode & WAKE_UCAST)
+ ret |= MLX5_WOL_UNICAST;
+
+ if (mode & WAKE_PHY)
+ ret |= MLX5_WOL_PHY_ACTIVITY;
+
+ return ret;
+}
+
+static void mlx5e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 mlx5_wol_mode;
+ int err;
+
+ memset(wol, 0, sizeof(*wol));
+
+ wol->supported = mlx5e_get_wol_supported(mdev);
+ if (!wol->supported)
+ return;
+
+ err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
+ if (err)
+ return;
+
+ wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
+}
+
+static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ __u32 wol_supported = mlx5e_get_wol_supported(mdev);
+ u32 mlx5_wol_mode;
+
+ if (!wol_supported)
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~wol_supported)
+ return -EINVAL;
+
+ mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
+
+ return mlx5_set_port_wol(mdev, mlx5_wol_mode);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -924,4 +1060,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
+ .get_wol = mlx5e_get_wol,
+ .set_wol = mlx5e_set_wol,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 80d81abc4820..d00a24203410 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1041,7 +1041,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1150,7 +1150,7 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 402994bf7e16..e0adb604f461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -30,9 +30,14 @@
* SOFTWARE.
*/
+#include <net/tc_act/tc_gact.h>
+#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
+#include <net/vxlan.h>
#include "en.h"
+#include "en_tc.h"
#include "eswitch.h"
+#include "vxlan.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -147,9 +152,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_bytes = 0;
s->tso_packets = 0;
s->tso_bytes = 0;
+ s->tso_inner_packets = 0;
+ s->tso_inner_bytes = 0;
s->tx_queue_stopped = 0;
s->tx_queue_wake = 0;
s->tx_queue_dropped = 0;
+ s->tx_csum_inner = 0;
tx_offload_none = 0;
s->lro_packets = 0;
s->lro_bytes = 0;
@@ -174,9 +182,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_bytes += sq_stats->bytes;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
+ s->tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_csum_inner += sq_stats->csum_offload_inner;
tx_offload_none += sq_stats->csum_offload_none;
}
}
@@ -234,7 +245,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
/* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw;
@@ -258,9 +269,14 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void __mlx5e_async_event(struct mlx5e_priv *priv,
- enum mlx5_dev_event event)
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
{
+ struct mlx5e_priv *priv = vpriv;
+
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ return;
+
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
@@ -272,17 +288,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv,
}
}
-static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
- enum mlx5_dev_event event, unsigned long param)
-{
- struct mlx5e_priv *priv = vpriv;
-
- spin_lock(&priv->async_events_spinlock);
- if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
- __mlx5e_async_event(priv, event);
- spin_unlock(&priv->async_events_spinlock);
-}
-
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
@@ -290,9 +295,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- spin_lock_irq(&priv->async_events_spinlock);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
- spin_unlock_irq(&priv->async_events_spinlock);
+ synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
@@ -538,7 +542,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
int txq_ix;
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
if (err)
return err;
@@ -550,8 +554,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
- sq->uar_map = sq->uar.map;
- sq->uar_bf_map = sq->uar.bf_map;
+ if (sq->uar.bf_map) {
+ set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
+ sq->uar_map = sq->uar.bf_map;
+ } else {
+ sq->uar_map = sq->uar.map;
+ }
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
@@ -860,12 +868,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (err)
goto err_destroy_cq;
- err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
- if (err)
- goto err_destroy_cq;
-
+ if (MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
return 0;
err_destroy_cq:
@@ -973,7 +979,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->mkey_be = cpu_to_be32(priv->mkey.key);
c->num_tc = priv->params.num_tc;
mlx5e_build_channeltc_to_txq_map(priv, ix);
@@ -1054,6 +1060,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
+static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+}
+
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
@@ -1410,6 +1425,24 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
return 0;
}
+static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int nch = priv->params.num_channels;
+ int ntc = priv->params.num_tc;
+ int tc;
+
+ netdev_reset_tc(netdev);
+
+ if (ntc == 1)
+ return;
+
+ netdev_set_num_tc(netdev, ntc);
+
+ for (tc = 0; tc < ntc; tc++)
+ netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1418,6 +1451,8 @@ int mlx5e_open_locked(struct net_device *netdev)
set_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_netdev_set_tcs(netdev);
+
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1440,8 +1475,8 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_close_channels;
}
- mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
+ mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
@@ -1480,8 +1515,8 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_timestamp_cleanup(priv);
- mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
+ mlx5e_redirect_rqts(priv);
mlx5e_close_channels(priv);
return 0;
@@ -1563,8 +1598,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
memset(&cq_param, 0, sizeof(cq_param));
memset(&rq_param, 0, sizeof(rq_param));
- mlx5e_build_rx_cq_param(priv, &cq_param);
- mlx5e_build_rq_param(priv, &rq_param);
+ mlx5e_build_drop_rq_param(&rq_param);
err = mlx5e_create_drop_cq(priv, cq, &cq_param);
if (err)
@@ -1612,7 +1646,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
- MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
@@ -1628,7 +1662,7 @@ static int mlx5e_create_tises(struct mlx5e_priv *priv)
int err;
int tc;
- for (tc = 0; tc < priv->params.num_tc; tc++) {
+ for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
@@ -1647,7 +1681,7 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
int tc;
- for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(priv, tc);
}
@@ -1824,6 +1858,58 @@ static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv, i);
}
+static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ bool was_opened;
+ int err = 0;
+
+ if (tc && tc != MLX5E_MAX_NUM_TC)
+ return -EINVAL;
+
+ mutex_lock(&priv->state_lock);
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.num_tc = tc ? tc : 1;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ goto mqprio;
+
+ switch (tc->type) {
+ case TC_SETUP_CLSFLOWER:
+ switch (tc->cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return mlx5e_configure_flower(priv, proto, tc->cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return mlx5e_delete_flower(priv, tc->cls_flower);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+mqprio:
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ return mlx5e_setup_tc(dev, tc->tc);
+}
+
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
@@ -1903,6 +1989,13 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_disable_vlan_filter(priv);
}
+ if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
+ mlx5e_tc_num_filters(priv)) {
+ netdev_err(netdev,
+ "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
+
return err;
}
@@ -2026,10 +2119,84 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
+static void mlx5e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!mlx5e_vxlan_allowed(priv->mdev))
+ return;
+
+ mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+}
+
+static void mlx5e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!mlx5e_vxlan_allowed(priv->mdev))
+ return;
+
+ mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+}
+
+static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct udphdr *udph;
+ u16 proto;
+ u16 port = 0;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ goto out;
+ }
+
+ if (proto == IPPROTO_UDP) {
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ }
+
+ /* Verify if UDP port is being offloaded by HW */
+ if (port && mlx5e_vxlan_lookup_port(priv, port))
+ return features;
+
+out:
+ /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ features = vlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ /* Validate if the tunneled packet is being offloaded by HW */
+ if (skb->encapsulation &&
+ (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
+ return mlx5e_vxlan_features_check(priv, skb, features);
+
+ return features;
+}
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
+ .ndo_setup_tc = mlx5e_ndo_setup_tc,
+ .ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
@@ -2044,6 +2211,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
+ .ndo_setup_tc = mlx5e_ndo_setup_tc,
+ .ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
@@ -2052,6 +2221,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
+ .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
+ .ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_get_vf_config = mlx5e_get_vf_config,
@@ -2078,6 +2250,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ mlx5_core_warn(mdev, "CQ modiration is not supported\n");
return 0;
}
@@ -2091,6 +2265,24 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static void mlx5e_ets_init(struct mlx5e_priv *priv)
+{
+ int i;
+
+ priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < priv->params.ets.ets_cap; i++) {
+ priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+ priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+ priv->params.ets.prio_tc[i] = i;
+ }
+
+ /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+ priv->params.ets.prio_tc[0] = 1;
+ priv->params.ets.prio_tc[1] = 0;
+}
+#endif
+
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels)
{
@@ -2122,7 +2314,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
- priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
@@ -2137,9 +2328,11 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = num_channels;
- priv->default_vlan_prio = priv->params.default_vlan_prio;
- spin_lock_init(&priv->async_events_spinlock);
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_ets_init(priv);
+#endif
+
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
@@ -2166,10 +2359,14 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
- else
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+#endif
+ } else {
netdev->netdev_ops = &mlx5e_netdev_ops_basic;
+ }
netdev->watchdog_timeo = 15 * HZ;
@@ -2192,10 +2389,27 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (mlx5e_vxlan_allowed(mdev)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM;
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_TSO;
+ netdev->hw_enc_features |= NETIF_F_TSO6;
+ netdev->hw_enc_features |= NETIF_F_RXHASH;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
+#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
+ if (FT_CAP(flow_modify_en) &&
+ FT_CAP(modify_root) &&
+ FT_CAP(identified_miss_table_mode) &&
+ FT_CAP(flow_table_modify))
+ priv->netdev->hw_features |= NETIF_F_HW_TC;
+
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2204,7 +2418,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
}
static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
- struct mlx5_core_mr *mr)
+ struct mlx5_core_mkey *mkey)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
@@ -2220,7 +2434,7 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
NULL);
kvfree(in);
@@ -2238,7 +2452,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ nch * MLX5E_MAX_NUM_TC,
+ nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
@@ -2251,7 +2467,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev);
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
@@ -2269,7 +2485,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_dealloc_pd;
}
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
if (err) {
mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
@@ -2313,17 +2529,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
mlx5e_init_eth_addr(priv);
+ mlx5e_vxlan_init(priv);
+
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_destroy_flow_tables;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
+#endif
+
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_destroy_flow_tables;
+ goto err_tc_cleanup;
}
+ if (mlx5e_vxlan_allowed(mdev))
+ vxlan_get_rx_port(netdev);
+
mlx5e_enable_async_events(priv);
schedule_work(&priv->set_rx_mode_work);
return priv;
+err_tc_cleanup:
+ mlx5e_tc_cleanup(priv);
+
err_destroy_flow_tables:
mlx5e_destroy_flow_tables(priv);
@@ -2343,7 +2575,7 @@ err_destroy_tises:
mlx5e_destroy_tises(priv);
err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &priv->mr);
+ mlx5_core_destroy_mkey(mdev, &priv->mkey);
err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
@@ -2371,13 +2603,15 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5e_disable_async_events(priv);
flush_scheduled_work();
unregister_netdev(netdev);
+ mlx5e_tc_cleanup(priv);
+ mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_flow_tables(priv);
mlx5e_destroy_tirs(priv);
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 59658b9d05d1..58d4e2f962c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -35,6 +35,7 @@
#include <linux/tcp.h>
#include <net/busy_poll.h>
#include "en.h"
+#include "en_tc.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -167,14 +168,15 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb)
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool lro)
{
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
- if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+ if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (is_first_ethertype_ip(skb)) {
+ } else if (likely(is_first_ethertype_ip(skb))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++;
@@ -211,7 +213,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
- mlx5e_handle_csum(netdev, cqe, rq, skb);
+ mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
skb->protocol = eth_type_trans(skb, netdev);
@@ -223,6 +225,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (cqe_has_vlan(cqe))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
+
+ skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
new file mode 100644
index 000000000000..b3de09f13425
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_skbedit.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/device.h>
+#include <linux/rhashtable.h>
+#include "en.h"
+#include "en_tc.h"
+
+struct mlx5e_tc_flow {
+ struct rhash_head node;
+ u64 cookie;
+ struct mlx5_flow_rule *rule;
+};
+
+#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+
+static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
+ u32 *match_c, u32 *match_v,
+ u32 action, u32 flow_tag)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ {.ft = priv->fts.vlan.t},
+ };
+ struct mlx5_flow_rule *rule;
+ bool table_created = false;
+
+ if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
+ priv->fts.tc.t =
+ mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
+ MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
+ MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
+ if (IS_ERR(priv->fts.tc.t)) {
+ netdev_err(priv->netdev,
+ "Failed to create tc offload table\n");
+ return ERR_CAST(priv->fts.tc.t);
+ }
+
+ table_created = true;
+ }
+
+ rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
+ match_c, match_v,
+ action, flow_tag,
+ action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+
+ if (IS_ERR(rule) && table_created) {
+ mlx5_destroy_flow_table(priv->fts.tc.t);
+ priv->fts.tc.t = NULL;
+ }
+
+ return rule;
+}
+
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_rule *rule)
+{
+ mlx5_del_flow_rule(rule);
+
+ if (!mlx5e_tc_num_filters(priv)) {
+ mlx5_destroy_flow_table(priv->fts.tc.t);
+ priv->fts.tc.t = NULL;
+ }
+}
+
+static int parse_cls_flower(struct mlx5e_priv *priv,
+ u32 *match_c, u32 *match_v,
+ struct tc_cls_flower_offload *f)
+{
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ ip_proto = key->ip_proto;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+ ntohs(mask->n_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ ntohs(key->n_proto));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ mask->ip_proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ key->ip_proto);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dmac_47_16),
+ mask->dst);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16),
+ key->dst);
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ smac_47_16),
+ mask->src);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ smac_47_16),
+ key->src);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &mask->src, sizeof(mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &key->src, sizeof(key->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &mask->dst, sizeof(mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &key->dst, sizeof(key->dst));
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &mask->src, sizeof(mask->src));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &key->src, sizeof(key->src));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &mask->dst, sizeof(mask->dst));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &key->dst, sizeof(key->dst));
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_sport, ntohs(key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ tcp_dport, ntohs(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ tcp_dport, ntohs(key->dst));
+ break;
+
+ case IPPROTO_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(key->src));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_dport, ntohs(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_dport, ntohs(key->dst));
+ break;
+ default:
+ netdev_err(priv->netdev,
+ "Only UDP and TCP transport are supported\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ u32 *action, u32 *flow_tag)
+{
+ const struct tc_action *a;
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ *action = 0;
+
+ tc_for_each_action(a, exts) {
+ /* Only support a single action per rule */
+ if (*action)
+ return -EINVAL;
+
+ if (is_tcf_gact_shot(a)) {
+ *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ continue;
+ }
+
+ if (is_tcf_skbedit_mark(a)) {
+ u32 mark = tcf_skbedit_mark(a);
+
+ if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
+ netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
+ mark);
+ return -EINVAL;
+ }
+
+ *flow_tag = mark;
+ *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ continue;
+ }
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ u32 *match_c;
+ u32 *match_v;
+ int err = 0;
+ u32 flow_tag;
+ u32 action;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_flow_rule *old = NULL;
+
+ flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+ tc->ht_params);
+ if (flow)
+ old = flow->rule;
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_c || !match_v || !flow) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ flow->cookie = f->cookie;
+
+ err = parse_cls_flower(priv, match_c, match_v, f);
+ if (err < 0)
+ goto err_free;
+
+ err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
+ if (err < 0)
+ goto err_free;
+
+ err = rhashtable_insert_fast(&tc->ht, &flow->node,
+ tc->ht_params);
+ if (err)
+ goto err_free;
+
+ flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
+ flow_tag);
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
+ goto err_hash_del;
+ }
+
+ if (old)
+ mlx5e_tc_del_flow(priv, old);
+
+ goto out;
+
+err_hash_del:
+ rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+
+err_free:
+ if (!old)
+ kfree(flow);
+out:
+ kfree(match_c);
+ kfree(match_v);
+ return err;
+}
+
+int mlx5e_delete_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+
+ flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+ tc->ht_params);
+ if (!flow)
+ return -EINVAL;
+
+ rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+
+ mlx5e_tc_del_flow(priv, flow->rule);
+
+ kfree(flow);
+
+ return 0;
+}
+
+static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_flow, node),
+ .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
+ .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
+ .automatic_shrinking = true,
+};
+
+int mlx5e_tc_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+
+ tc->ht_params = mlx5e_tc_flow_ht_params;
+ return rhashtable_init(&tc->ht, &tc->ht_params);
+}
+
+static void _mlx5e_tc_del_flow(void *ptr, void *arg)
+{
+ struct mlx5e_tc_flow *flow = ptr;
+ struct mlx5e_priv *priv = arg;
+
+ mlx5e_tc_del_flow(priv, flow->rule);
+ kfree(flow);
+}
+
+void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
+
+ if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
+ mlx5_destroy_flow_table(priv->fts.tc.t);
+ priv->fts.tc.t = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
new file mode 100644
index 000000000000..d677428dc10f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_EN_TC_H__
+#define __MLX5_EN_TC_H__
+
+#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
+
+int mlx5e_tc_init(struct mlx5e_priv *priv);
+void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+ struct tc_cls_flower_offload *f);
+int mlx5e_delete_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f);
+
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+{
+ return atomic_read(&priv->fts.tc.ht.nelems);
+}
+
+#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index bb4eeeb007de..1ffc7cb6f78c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -109,12 +109,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = skb_vlan_tag_present(skb) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT :
- priv->default_vlan_prio;
- int tc = netdev_get_prio_tc_map(dev, up);
+ int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
- return priv->channeltc_to_txq_map[channel_ix][tc];
+ return priv->channeltc_to_txq_map[channel_ix][up];
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
@@ -188,9 +186,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
memset(wqe, 0, sizeof(*wqe));
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
- else
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (skb->encapsulation) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ sq->stats.csum_offload_inner++;
+ } else {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ } else
sq->stats.csum_offload_none++;
if (sq->cc != sq->prev_cc) {
@@ -199,15 +204,20 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (skb_is_gso(skb)) {
- u32 payload_len;
-
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO;
- ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
- payload_len = skb->len - ihs;
+
+ if (skb->encapsulation) {
+ ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ sq->stats.tso_inner_packets++;
+ sq->stats.tso_inner_bytes += skb->len - ihs;
+ } else {
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += skb->len - ihs;
+ }
+
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- sq->stats.tso_packets++;
- sq->stats.tso_bytes += payload_len;
} else {
bf = sq->bf_budget &&
!skb->xmit_more &&
@@ -295,7 +305,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
- if (bf && sq->uar_bf_map)
+ if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -329,7 +339,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
u32 dma_fifo_cc;
@@ -401,7 +411,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
npkts++;
nbytes += wi->num_bytes;
sqcc += wi->num_wqebbs;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, napi_budget);
} while (!last_wqe);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 66d51a77609e..9bb4395aceeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -60,7 +60,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
for (i = 0; i < c->num_tc; i++)
- busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 647a3ca2c2a9..18fccec72c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
+{
+ return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
+}
+
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index a9894d2e8e26..f46f1db0fc00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -218,19 +218,22 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
- list_for_each_entry(dst, &fte->node.children, node.list) {
- unsigned int id;
-
- MLX5_SET(dest_format_struct, in_dests, destination_type,
- dst->dest_attr.type);
- if (dst->dest_attr.type ==
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
- id = dst->dest_attr.ft->id;
- else
- id = dst->dest_attr.tir_num;
- MLX5_SET(dest_format_struct, in_dests, destination_id, id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ unsigned int id;
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ dst->dest_attr.type);
+ if (dst->dest_attr.type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
+ id = dst->dest_attr.ft->id;
+ } else {
+ id = dst->dest_attr.tir_num;
+ }
+ MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+ in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ }
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 6f68dba8d7ed..5121be4675d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -73,10 +73,13 @@
#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_MAX_FT)
-#define KERNEL_MAX_FT 2
-#define KERNEL_NUM_PRIOS 1
+#define KERNEL_MAX_FT 3
+#define KERNEL_NUM_PRIOS 2
#define KENREL_MIN_LEVEL 2
+#define ANCHOR_MAX_FT 1
+#define ANCHOR_NUM_PRIOS 1
+#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
struct node_caps {
size_t arr_sz;
long *caps;
@@ -92,7 +95,7 @@ static struct init_tree_node {
int max_ft;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 3,
+ .ar_size = 4,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
@@ -108,6 +111,8 @@ static struct init_tree_node {
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
}
};
@@ -196,8 +201,10 @@ static void tree_put_node(struct fs_node *node)
static int tree_remove_node(struct fs_node *node)
{
- if (atomic_read(&node->refcount) > 1)
- return -EPERM;
+ if (atomic_read(&node->refcount) > 1) {
+ atomic_dec(&node->refcount);
+ return -EEXIST;
+ }
tree_put_node(node);
return 0;
}
@@ -360,8 +367,13 @@ static void del_rule(struct fs_node *node)
memcpy(match_value, fte->val, sizeof(fte->val));
fs_get_obj(ft, fg->node.parent);
list_del(&rule->node.list);
- fte->dests_size--;
- if (fte->dests_size) {
+ if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+ if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+ --fte->dests_size) {
err = mlx5_cmd_update_fte(dev, ft,
fg->id, fte);
if (err)
@@ -465,6 +477,8 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->type = table_type;
ft->max_fte = max_fte;
+ INIT_LIST_HEAD(&ft->fwd_rules);
+ mutex_init(&ft->lock);
return ft;
}
@@ -601,9 +615,63 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
+static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct fs_fte *fte;
+ int err = 0;
+
+ fs_get_obj(fte, rule->node.parent);
+ if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return -EINVAL;
+ lock_ref_node(&fte->node);
+ fs_get_obj(fg, fte->node.parent);
+ fs_get_obj(ft, fg->node.parent);
+
+ memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ err = mlx5_cmd_update_fte(get_dev(&ft->node),
+ ft, fg->id, fte);
+ unlock_ref_node(&fte->node);
+
+ return err;
+}
+
+/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
+static int connect_fwd_rules(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *new_next_ft,
+ struct mlx5_flow_table *old_next_ft)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *iter;
+ int err = 0;
+
+ /* new_next_ft and old_next_ft could be NULL only
+ * when we create/destroy the anchor flow table.
+ */
+ if (!new_next_ft || !old_next_ft)
+ return 0;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = new_next_ft;
+
+ mutex_lock(&old_next_ft->lock);
+ list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
+ mutex_unlock(&old_next_ft->lock);
+ list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+ err = mlx5_modify_rule_destination(iter, &dest);
+ if (err)
+ pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
+ new_next_ft->id);
+ }
+ return 0;
+}
+
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio)
{
+ struct mlx5_flow_table *next_ft;
int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
@@ -612,6 +680,11 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
err = connect_prev_fts(dev, ft, prio);
if (err)
return err;
+
+ next_ft = find_next_chained_ft(prio);
+ err = connect_fwd_rules(dev, ft, next_ft);
+ if (err)
+ return err;
}
if (MLX5_CAP_FLOWTABLE(dev,
@@ -762,8 +835,10 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
if (!rule)
return NULL;
+ INIT_LIST_HEAD(&rule->next_ft);
rule->node.type = FS_TYPE_FLOW_DEST;
- memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ if (dest)
+ memcpy(&rule->dest_attr, dest, sizeof(*dest));
return rule;
}
@@ -782,11 +857,17 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
return ERR_PTR(-ENOMEM);
fs_get_obj(ft, fg->node.parent);
- /* Add dest to dests list- added as first element after the head */
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
tree_init_node(&rule->node, 1, del_rule);
- list_add_tail(&rule->node.list, &fte->node.children);
- fte->dests_size++;
- if (fte->dests_size == 1)
+ if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, &fte->node.children);
+ else
+ list_add_tail(&rule->node.list, &fte->node.children);
+ if (dest)
+ fte->dests_size++;
+ if (fte->dests_size == 1 || !dest)
err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte);
else
@@ -802,7 +883,8 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
free_rule:
list_del(&rule->node.list);
kfree(rule);
- fte->dests_size--;
+ if (dest)
+ fte->dests_size--;
return ERR_PTR(err);
}
@@ -903,6 +985,25 @@ out:
return fg;
}
+static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_rule *rule;
+
+ list_for_each_entry(rule, &fte->node.children, node.list) {
+ if (rule->dest_attr.type == dest->type) {
+ if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dest->vport_num == rule->dest_attr.vport_num) ||
+ (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ dest->ft == rule->dest_attr.ft) ||
+ (dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ dest->tir_num == rule->dest_attr.tir_num))
+ return rule;
+ }
+ }
+ return NULL;
+}
+
static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
u32 *match_value,
u8 action,
@@ -919,6 +1020,13 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) &&
action == fte->action && flow_tag == fte->flow_tag) {
+ rule = find_flow_rule(fte, dest);
+ if (rule) {
+ atomic_inc(&rule->node.refcount);
+ unlock_ref_node(&fte->node);
+ unlock_ref_node(&fg->node);
+ return rule;
+ }
rule = add_rule_fte(fte, fg, dest);
unlock_ref_node(&fte->node);
if (IS_ERR(rule))
@@ -984,18 +1092,21 @@ static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
return rule;
}
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
+static struct mlx5_flow_rule *
+_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule;
+ if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+ return ERR_PTR(-EINVAL);
+
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft)
if (compare_match_criteria(g->mask.match_criteria_enable,
@@ -1014,6 +1125,63 @@ unlock:
unlock_ref_node(&ft->node);
return rule;
}
+
+static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
+{
+ return ((ft->type == FS_FT_NIC_RX) &&
+ (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
+}
+
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ u32 *match_criteria,
+ u32 *match_value,
+ u32 action,
+ u32 flow_tag,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct mlx5_flow_destination gen_dest;
+ struct mlx5_flow_table *next_ft = NULL;
+ struct mlx5_flow_rule *rule = NULL;
+ u32 sw_action = action;
+ struct fs_prio *prio;
+
+ fs_get_obj(prio, ft->node.parent);
+ if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (!fwd_next_prio_supported(ft))
+ return ERR_PTR(-EOPNOTSUPP);
+ if (dest)
+ return ERR_PTR(-EINVAL);
+ mutex_lock(&root->chain_lock);
+ next_ft = find_next_chained_ft(prio);
+ if (next_ft) {
+ gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ gen_dest.ft = next_ft;
+ dest = &gen_dest;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else {
+ mutex_unlock(&root->chain_lock);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ }
+
+ rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+ match_value, action, flow_tag, dest);
+
+ if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (!IS_ERR_OR_NULL(rule) &&
+ (list_empty(&rule->next_ft))) {
+ mutex_lock(&next_ft->lock);
+ list_add(&rule->next_ft, &next_ft->fwd_rules);
+ mutex_unlock(&next_ft->lock);
+ rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ }
+ mutex_unlock(&root->chain_lock);
+ }
+ return rule;
+}
EXPORT_SYMBOL(mlx5_add_flow_rule);
void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
@@ -1077,6 +1245,10 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
return 0;
next_ft = find_next_chained_ft(prio);
+ err = connect_fwd_rules(dev, next_ft, ft);
+ if (err)
+ return err;
+
err = connect_prev_fts(dev, next_ft, prio);
if (err)
mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
@@ -1126,6 +1298,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
case MLX5_FLOW_NAMESPACE_BYPASS:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+ case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type;
break;
case MLX5_FLOW_NAMESPACE_FDB:
@@ -1351,6 +1524,25 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
}
}
+#define ANCHOR_PRIO 0
+#define ANCHOR_SIZE 1
+static int create_anchor_flow_table(struct mlx5_core_dev
+ *dev)
+{
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_table *ft;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+ if (!ns)
+ return -EINVAL;
+ ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+ if (IS_ERR(ft)) {
+ mlx5_core_err(dev, "Failed to create last anchor flow table");
+ return PTR_ERR(ft);
+ }
+ return 0;
+}
+
static int init_root_ns(struct mlx5_core_dev *dev)
{
@@ -1363,6 +1555,9 @@ static int init_root_ns(struct mlx5_core_dev *dev)
set_prio_attrs(dev->priv.root_ns);
+ if (create_anchor_flow_table(dev))
+ goto cleanup;
+
return 0;
cleanup:
@@ -1392,6 +1587,15 @@ static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
root_ns = NULL;
}
+static void destroy_flow_tables(struct fs_prio *prio)
+{
+ struct mlx5_flow_table *iter;
+ struct mlx5_flow_table *tmp;
+
+ fs_for_each_ft_safe(iter, tmp, prio)
+ mlx5_destroy_flow_table(iter);
+}
+
static void cleanup_root_ns(struct mlx5_core_dev *dev)
{
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
@@ -1420,6 +1624,7 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
list);
fs_get_obj(obj_iter_prio2, iter_prio2);
+ destroy_flow_tables(obj_iter_prio2);
if (tree_remove_node(iter_prio2)) {
mlx5_core_warn(dev,
"Priority %d wasn't destroyed, refcount > 1\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 00245fd7e4bc..f37a6248a27b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -68,6 +68,11 @@ struct fs_node {
struct mlx5_flow_rule {
struct fs_node node;
struct mlx5_flow_destination dest_attr;
+ /* next_ft should be accessed under chain_lock and only of
+ * destination type is FWD_NEXT_fT.
+ */
+ struct list_head next_ft;
+ u32 sw_action;
};
/* Type of children is mlx5_flow_group */
@@ -82,6 +87,10 @@ struct mlx5_flow_table {
unsigned int required_groups;
unsigned int num_groups;
} autogroup;
+ /* Protect fwd_rules */
+ struct mutex lock;
+ /* FWD rules that point on this flow table */
+ struct list_head fwd_rules;
};
/* Type of children is mlx5_flow_rule */
@@ -142,6 +151,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#define fs_list_for_each_entry(pos, root) \
list_for_each_entry(pos, root, node.list)
+#define fs_list_for_each_entry_safe(pos, tmp, root) \
+ list_for_each_entry_safe(pos, tmp, root, node.list)
+
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->node.children, list)
@@ -157,6 +169,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#define fs_for_each_ft(pos, prio) \
fs_list_for_each_entry(pos, &(prio)->node.children)
+#define fs_for_each_ft_safe(pos, tmp, prio) \
+ fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
+
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, &(ft)->node.children)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa1ab4702385..75c7ae6a5cc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -98,88 +98,55 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
-
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
return err;
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, roce)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, nic_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, vport_group_manager) &&
MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_CUR);
- if (err)
- return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
- HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
}
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
- HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
- err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
- HCA_CAP_OPMOD_GET_MAX);
+ }
+
+ if (MLX5_CAP_GEN(dev, vector_calc)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1545a944c309..3f3b2fae4991 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -341,8 +341,9 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
- enum mlx5_cap_mode cap_mode)
+static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
+ enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -392,6 +393,16 @@ query_ex:
return err;
}
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
+{
+ int ret;
+
+ ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
+ if (ret)
+ return ret;
+ return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
+}
+
static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
{
u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
@@ -419,8 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
int err;
if (MLX5_CAP_GEN(dev, atomic)) {
- err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
- HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
if (err)
return err;
} else {
@@ -462,11 +472,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
if (!set_ctx)
goto query_ex;
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
- if (err)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
if (err)
goto query_ex;
@@ -767,22 +773,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
-static int map_bf_area(struct mlx5_core_dev *dev)
-{
- resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
- resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
-
- dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
-
- return dev->priv.bf_mapping ? 0 : -ENOMEM;
-}
-
-static void unmap_bf_area(struct mlx5_core_dev *dev)
-{
- if (dev->priv.bf_mapping)
- io_mapping_free(dev->priv.bf_mapping);
-}
-
static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_device_context *dev_ctx;
@@ -1103,21 +1093,16 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_stop_eqs;
}
- if (map_bf_area(dev))
- dev_err(&pdev->dev, "Failed to map blue flame area\n");
-
err = mlx5_irq_set_affinity_hints(dev);
- if (err) {
+ if (err)
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_unmap_bf_area;
- }
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
mlx5_init_qp_table(dev);
mlx5_init_srq_table(dev);
- mlx5_init_mr_table(dev);
+ mlx5_init_mkey_table(dev);
err = mlx5_init_fs(dev);
if (err) {
@@ -1164,15 +1149,11 @@ err_sriov:
err_reg_dev:
mlx5_cleanup_fs(dev);
err_fs:
- mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
-
-err_unmap_bf_area:
- unmap_bf_area(dev);
-
free_comp_eqs(dev);
err_stop_eqs:
@@ -1237,12 +1218,11 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
#endif
mlx5_cleanup_fs(dev);
- mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
- unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0336847ec9a1..0b0b226c789e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -99,6 +99,7 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 6fa22b51e460..77a7293921d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -36,25 +36,26 @@
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
-void mlx5_init_mr_table(struct mlx5_core_dev *dev)
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
{
- struct mlx5_mr_table *table = &dev->priv.mr_table;
+ struct mlx5_mkey_table *table = &dev->priv.mkey_table;
memset(table, 0, sizeof(*table));
rwlock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
}
-void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
{
}
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey,
struct mlx5_create_mkey_mbox_in *in, int inlen,
mlx5_cmd_cbk_t callback, void *context,
struct mlx5_create_mkey_mbox_out *out)
{
- struct mlx5_mr_table *table = &dev->priv.mr_table;
+ struct mlx5_mkey_table *table = &dev->priv.mkey_table;
struct mlx5_create_mkey_mbox_out lout;
int err;
u8 key;
@@ -83,34 +84,35 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
return mlx5_cmd_status_to_err(&lout.hdr);
}
- mr->iova = be64_to_cpu(in->seg.start_addr);
- mr->size = be64_to_cpu(in->seg.len);
- mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
- mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+ mkey->iova = be64_to_cpu(in->seg.start_addr);
+ mkey->size = be64_to_cpu(in->seg.len);
+ mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+ mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
- be32_to_cpu(lout.mkey), key, mr->key);
+ be32_to_cpu(lout.mkey), key, mkey->key);
- /* connect to MR tree */
+ /* connect to mkey tree */
write_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
+ err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
write_unlock_irq(&table->lock);
if (err) {
- mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
- mlx5_base_mkey(mr->key), err);
- mlx5_core_destroy_mkey(dev, mr);
+ mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
+ mlx5_base_mkey(mkey->key), err);
+ mlx5_core_destroy_mkey(dev, mkey);
}
return err;
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+ struct mlx5_core_mkey *mkey)
{
- struct mlx5_mr_table *table = &dev->priv.mr_table;
+ struct mlx5_mkey_table *table = &dev->priv.mkey_table;
struct mlx5_destroy_mkey_mbox_in in;
struct mlx5_destroy_mkey_mbox_out out;
- struct mlx5_core_mr *deleted_mr;
+ struct mlx5_core_mkey *deleted_mkey;
unsigned long flags;
int err;
@@ -118,16 +120,16 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
memset(&out, 0, sizeof(out));
write_lock_irqsave(&table->lock, flags);
- deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+ deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
write_unlock_irqrestore(&table->lock, flags);
- if (!deleted_mr) {
- mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
- mlx5_base_mkey(mr->key));
+ if (!deleted_mkey) {
+ mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
+ mlx5_base_mkey(mkey->key));
return -ENOENT;
}
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
+ in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
@@ -139,7 +141,7 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
struct mlx5_query_mkey_mbox_out *out, int outlen)
{
struct mlx5_query_mkey_mbox_in in;
@@ -149,7 +151,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
memset(out, 0, outlen);
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
- in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
+ in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err)
return err;
@@ -161,7 +163,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
u32 *mkey)
{
struct mlx5_query_special_ctxs_mbox_in in;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index a87e773e93f3..ae378c575deb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/port.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
@@ -324,6 +325,29 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+ u8 port_num, void *out, size_t sz)
+{
+ u32 *in;
+ int err;
+
+ in = mlx5_vzalloc(sz);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
+
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
@@ -363,3 +387,223 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
+ MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (pfc_en_tx)
+ *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
+
+ if (pfc_en_rx)
+ *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev)
+{
+ u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
+
+ return num_tc - 1;
+}
+
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
+{
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+ int err;
+ int i;
+
+ memset(in, 0, sizeof(in));
+ for (i = 0; i < 8; i++) {
+ if (prio_tc[i] > mlx5_max_tc(mdev))
+ return -EINVAL;
+
+ MLX5_SET(qtct_reg, in, prio, i);
+ MLX5_SET(qtct_reg, in, tclass, prio_tc[i]);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QTCT, 0, 1);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
+
+static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+
+ if (!MLX5_CAP_GEN(mdev, ets))
+ return -ENOTSUPP;
+
+ return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
+ MLX5_REG_QETCR, 0, 1);
+}
+
+static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+
+ if (!MLX5_CAP_GEN(mdev, ets))
+ return -ENOTSUPP;
+
+ memset(in, 0, sizeof(in));
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
+ MLX5_REG_QETCR, 0, 0);
+}
+
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
+{
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ int i;
+
+ memset(in, 0, sizeof(in));
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
+ MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
+ }
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+{
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ int i;
+
+ memset(in, 0, sizeof(in));
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
+ MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
+ }
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
+
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_units)
+{
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)];
+ void *ets_tcn_conf;
+ int i;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(qetc_reg, in, port_number, 1);
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]);
+
+ MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1);
+ MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units,
+ max_bw_units[i]);
+ MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value,
+ max_bw_value[i]);
+ }
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
+
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+ u8 *max_bw_units)
+{
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+ void *ets_tcn_conf;
+ int err;
+ int i;
+
+ err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+ ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]);
+
+ max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+ max_bw_value);
+ max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+ max_bw_units);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
+
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
+ u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
+ MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
+ MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
+
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+
+ if (!err)
+ *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index eb05c845ece9..8ba080e441a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -226,7 +226,8 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
+ bool map_wc)
{
phys_addr_t pfn;
phys_addr_t uar_bar_start;
@@ -240,20 +241,26 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
uar_bar_start = pci_resource_start(mdev->pdev, 0);
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
- uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
- if (!uar->map) {
- mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
- err = -ENOMEM;
- goto err_free_uar;
- }
- if (mdev->priv.bf_mapping)
- uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
- uar->index << PAGE_SHIFT);
+ if (map_wc) {
+ uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->bf_map) {
+ mlx5_core_warn(mdev, "ioremap_wc() failed\n");
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map)
+ goto err_free_uar;
+ }
+ } else {
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map)
+ goto err_free_uar;
+ }
return 0;
err_free_uar:
+ mlx5_core_warn(mdev, "ioremap() failed\n");
+ err = -ENOMEM;
mlx5_cmd_free_uar(mdev, uar->index);
return err;
@@ -262,8 +269,8 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
- io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
+ iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index);
}
EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index c7398b95aecd..bd518405859e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -850,3 +850,111 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+ int vf, u8 port_num, void *out,
+ size_t out_sz)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+ int is_group_manager;
+ void *in;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ in = mlx5_vzalloc(in_sz);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+ MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
+ } else {
+ err = -EPERM;
+ goto free;
+ }
+ }
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_vport_counter_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto free;
+ err = mlx5_cmd_status_to_err_v2(out);
+
+free:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+ u8 other_vport, u8 port_num,
+ int vf,
+ struct mlx5_hca_vport_context *req)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
+ u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
+ int is_group_manager;
+ void *in;
+ int err;
+ void *ctx;
+
+ mlx5_core_dbg(dev, "vf %d\n", vf);
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ in = kzalloc(in_sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ memset(out, 0, sizeof(out));
+ MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev, num_ports) > 1)
+ MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
+
+ ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
+ MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
+ MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
+ MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
+ MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
+ MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
+ MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
+ MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
+ MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+ MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+ MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
+ MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
+ MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
+ MLX5_SET(hca_vport_context, ctx, lid, req->lid);
+ MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
+ MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
+ MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
+ MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
+ MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
+ MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
+ MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ if (err)
+ goto ex;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+
+ex:
+ kfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
new file mode 100644
index 000000000000..9f10df25f3cd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "vxlan.h"
+
+void mlx5e_vxlan_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+
+ spin_lock_init(&vxlan_db->lock);
+ INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
+}
+
+static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ struct mlx5_outbox_hdr *hdr;
+ int err;
+
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ hdr = (struct mlx5_outbox_hdr *)out;
+ return hdr->status ? -ENOMEM : 0;
+}
+
+static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
+{
+ struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ struct mlx5e_vxlan *vxlan;
+
+ spin_lock(&vxlan_db->lock);
+ vxlan = radix_tree_lookup(&vxlan_db->tree, port);
+ spin_unlock(&vxlan_db->lock);
+
+ return vxlan;
+}
+
+int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+{
+ struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ struct mlx5e_vxlan *vxlan;
+ int err;
+
+ err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
+ if (err)
+ return err;
+
+ vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+ if (!vxlan) {
+ err = -ENOMEM;
+ goto err_delete_port;
+ }
+
+ vxlan->udp_port = port;
+
+ spin_lock_irq(&vxlan_db->lock);
+ err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
+ spin_unlock_irq(&vxlan_db->lock);
+ if (err)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ kfree(vxlan);
+err_delete_port:
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+ return err;
+}
+
+static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+{
+ struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ struct mlx5e_vxlan *vxlan;
+
+ spin_lock_irq(&vxlan_db->lock);
+ vxlan = radix_tree_delete(&vxlan_db->tree, port);
+ spin_unlock_irq(&vxlan_db->lock);
+
+ if (!vxlan)
+ return;
+
+ mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
+
+ kfree(vxlan);
+}
+
+void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+{
+ if (!mlx5e_vxlan_lookup_port(priv, port))
+ return;
+
+ __mlx5e_vxlan_core_del_port(priv, port);
+}
+
+void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+ struct mlx5e_vxlan *vxlan;
+ unsigned int port = 0;
+
+ spin_lock_irq(&vxlan_db->lock);
+ while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
+ port = vxlan->udp_port;
+ spin_unlock_irq(&vxlan_db->lock);
+ __mlx5e_vxlan_core_del_port(priv, (u16)port);
+ spin_lock_irq(&vxlan_db->lock);
+ }
+ spin_unlock_irq(&vxlan_db->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
new file mode 100644
index 000000000000..a01685056ab1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_VXLAN_H__
+#define __MLX5_VXLAN_H__
+
+#include <linux/mlx5/driver.h>
+#include "en.h"
+
+struct mlx5e_vxlan {
+ u16 udp_port;
+};
+
+static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+{
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+ mlx5_core_is_pf(mdev));
+}
+
+void mlx5e_vxlan_init(struct mlx5e_priv *priv);
+int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index ce26adcb4988..2ad7f67854d5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,6 +4,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
+ depends on MAY_USE_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 22379eb8e924..f69f6280519f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -56,6 +56,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
+#include <net/devlink.h>
#include "core.h"
#include "item.h"
@@ -784,6 +785,38 @@ static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
debugfs_remove_recursive(mlxsw_core->dbg_dir);
}
+static int mlxsw_devlink_port_split(struct devlink *devlink,
+ unsigned int port_index,
+ unsigned int count)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (port_index >= MLXSW_PORT_MAX_PORTS)
+ return -EINVAL;
+ if (!mlxsw_core->driver->port_split)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
+ port_index, count);
+}
+
+static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
+ unsigned int port_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ if (port_index >= MLXSW_PORT_MAX_PORTS)
+ return -EINVAL;
+ if (!mlxsw_core->driver->port_unsplit)
+ return -EOPNOTSUPP;
+ return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
+ port_index);
+}
+
+static const struct devlink_ops mlxsw_devlink_ops = {
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+};
+
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
void *bus_priv)
@@ -791,6 +824,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
struct mlxsw_driver *mlxsw_driver;
+ struct devlink *devlink;
size_t alloc_size;
int err;
@@ -798,12 +832,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (!mlxsw_driver)
return -EINVAL;
alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
- mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
- if (!mlxsw_core) {
+ devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
+ if (!devlink) {
err = -ENOMEM;
- goto err_core_alloc;
+ goto err_devlink_alloc;
}
+ mlxsw_core = devlink_priv(devlink);
INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
mlxsw_core->driver = mlxsw_driver;
@@ -841,6 +876,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_hwmon_init;
+ err = devlink_register(devlink, mlxsw_bus_info->dev);
+ if (err)
+ goto err_devlink_register;
+
err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
mlxsw_bus_info);
if (err)
@@ -855,6 +894,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_debugfs_init:
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
err_driver_init:
+ devlink_unregister(devlink);
+err_devlink_register:
err_hwmon_init:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
@@ -864,8 +905,8 @@ err_bus_init:
err_alloc_lag_mapping:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
- kfree(mlxsw_core);
-err_core_alloc:
+ devlink_free(devlink);
+err_devlink_alloc:
mlxsw_core_driver_put(device_kind);
return err;
}
@@ -874,14 +915,16 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
{
const char *device_kind = mlxsw_core->bus_info->device_kind;
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
kfree(mlxsw_core->lag.mapping);
free_percpu(mlxsw_core->pcpu_stats);
- kfree(mlxsw_core);
+ devlink_free(devlink);
mlxsw_core_driver_put(device_kind);
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index a01723600f0a..c73d1c0792a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -186,6 +186,8 @@ struct mlxsw_driver {
int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
void (*fini)(void *driver_priv);
+ int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
+ int (*port_unsplit)(void *driver_priv, u8 local_port);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 7992c553c1f5..7f4173c8eda3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1681,11 +1681,18 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
{
+ unsigned long end;
+
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
- /* Current firware does not let us know when the reset is done.
- * So we just wait here for constant time and hope for the best.
- */
- msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ wmb(); /* reset needs to be written before we read control register */
+ end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ do {
+ u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
+
+ if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
+ break;
+ cond_resched();
+ } while (time_before(jiffies, end));
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 912106054ff2..d942a3e6fa41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -61,6 +61,9 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
+#define MLXSW_PCI_FW_READY 0xA1844
+#define MLXSW_PCI_FW_READY_MASK 0xFF
+#define MLXSW_PCI_FW_READY_MAGIC 0x5E
#define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000
#define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ae65b9940aed..f33b997f2b61 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -59,6 +59,8 @@
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
+#define MLXSW_PORT_MODULE_MAX_WIDTH 4
+
enum mlxsw_port_admin_status {
MLXSW_PORT_ADMIN_STATUS_UP = 1,
MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index a94daa8c346c..4afbc3e9e381 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,6 +49,7 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
+#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -304,21 +305,47 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
-static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_usable)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *p_module,
+ u8 *p_width)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
if (err)
return err;
- *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+ *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
+ *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
return 0;
}
+static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 module, u8 width, u8 lane)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+ int i;
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, width);
+ for (i = 0; i < width; i++) {
+ mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
+ mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+}
+
+static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+ char pmlp_pl[MLXSW_REG_PMLP_LEN];
+
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
+ mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+}
+
static int mlxsw_sp_port_open(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1273,6 +1300,18 @@ static u32 mlxsw_sp_to_ptys_speed(u32 speed)
return ptys_proto;
}
+static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
+{
+ u32 ptys_proto = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
+ ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+ }
+ return ptys_proto;
+}
+
static int mlxsw_sp_port_set_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
@@ -1349,11 +1388,27 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_settings = mlxsw_sp_port_set_settings,
};
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+static int
+mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_admin;
+
+ eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
+ eth_proto_admin);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct devlink_port *devlink_port;
struct net_device *dev;
- bool usable;
size_t bytes;
int err;
@@ -1364,6 +1419,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
+ mlxsw_sp_port->split = split;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1404,17 +1460,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
*/
dev->hard_header_len += MLXSW_TXHDR_LEN;
- err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+ devlink_port = &mlxsw_sp_port->devlink_port;
+ if (mlxsw_sp_port->split)
+ devlink_port_split_set(devlink_port, module);
+ err = devlink_port_register(devlink, devlink_port, local_port);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
mlxsw_sp_port->local_port);
- goto err_port_module_check;
- }
-
- if (!usable) {
- dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
- mlxsw_sp_port->local_port);
- goto port_not_usable;
+ goto err_devlink_port_register;
}
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
@@ -1431,6 +1484,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
goto err_port_swid_set;
}
+ err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_speed_by_width_set;
+ }
+
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1457,6 +1517,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
goto err_register_netdev;
}
+ devlink_port_type_eth_set(devlink_port, dev);
+
err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
if (err)
goto err_port_vlan_init;
@@ -1470,10 +1532,11 @@ err_register_netdev:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
+err_port_speed_by_width_set:
err_port_swid_set:
err_port_system_port_mapping_set:
-port_not_usable:
-err_port_module_check:
+ devlink_port_unregister(&mlxsw_sp_port->devlink_port);
+err_devlink_port_register:
err_dev_addr_init:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
@@ -1485,6 +1548,28 @@ err_port_active_vlans_alloc:
return err;
}
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
+{
+ int err;
+
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ lane);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
+ width);
+ if (err)
+ goto err_port_create;
+
+ return 0;
+
+err_port_create:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
+ return err;
+}
+
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@@ -1505,12 +1590,19 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct devlink_port *devlink_port;
if (!mlxsw_sp_port)
return;
+ mlxsw_sp->ports[local_port] = NULL;
+ devlink_port = &mlxsw_sp_port->devlink_port;
+ devlink_port_type_clear(devlink_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ devlink_port_unregister(devlink_port);
mlxsw_sp_port_vports_fini(mlxsw_sp_port);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
@@ -1529,6 +1621,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
size_t alloc_size;
+ u8 module, width;
int i;
int err;
@@ -1538,19 +1631,158 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, i);
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
+ &width);
+ if (err)
+ goto err_port_module_info_get;
+ if (!width)
+ continue;
+ mlxsw_sp->port_to_module[i] = module;
+ err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
+err_port_module_info_get:
for (i--; i >= 1; i--)
mlxsw_sp_port_remove(mlxsw_sp, i);
kfree(mlxsw_sp->ports);
return err;
}
+static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
+{
+ u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
+
+ return local_port - offset;
+}
+
+static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ u8 module, cur_width, base_port;
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ if (count != 2 && count != 4) {
+ netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
+ &cur_width);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
+ return err;
+ }
+
+ if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
+ netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
+ return -EINVAL;
+ }
+
+ /* Make sure we have enough slave (even) ports for the split. */
+ if (count == 2) {
+ base_port = local_port;
+ if (mlxsw_sp->ports[base_port + 1]) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ return -EINVAL;
+ }
+ } else {
+ base_port = mlxsw_sp_cluster_base_port_get(local_port);
+ if (mlxsw_sp->ports[base_port + 1] ||
+ mlxsw_sp->ports[base_port + 3]) {
+ netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
+ goto err_port_create;
+ }
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ for (i = 0; i < count / 2; i++) {
+ module = mlxsw_sp->port_to_module[base_port + i * 2];
+ mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
+ module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
+ }
+ return err;
+}
+
+static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ u8 module, cur_width, base_port;
+ unsigned int count;
+ int i;
+ int err;
+
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ if (!mlxsw_sp_port) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
+ local_port);
+ return -EINVAL;
+ }
+
+ if (!mlxsw_sp_port->split) {
+ netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
+ &cur_width);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
+ return err;
+ }
+ count = cur_width == 1 ? 4 : 2;
+
+ base_port = mlxsw_sp_cluster_base_port_get(local_port);
+
+ /* Determine which ports to remove. */
+ if (count == 2 && local_port >= base_port + 2)
+ base_port = base_port + 2;
+
+ for (i = 0; i < count; i++)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+
+ for (i = 0; i < count / 2; i++) {
+ module = mlxsw_sp->port_to_module[base_port + i * 2];
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
+ module, MLXSW_PORT_MODULE_MAX_WIDTH,
+ 0);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
+ }
+
+ return 0;
+}
+
static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
char *pude_pl, void *priv)
{
@@ -1974,6 +2206,8 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.priv_size = sizeof(struct mlxsw_sp),
.init = mlxsw_sp_init,
.fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp_config_profile,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3b89ed2f3c76..4b8abaf06321 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -43,6 +43,7 @@
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <net/switchdev.h>
+#include <net/devlink.h>
#include "port.h"
#include "core.h"
@@ -57,6 +58,10 @@
#define MLXSW_SP_MID_MAX 7000
+#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
+
+#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+
struct mlxsw_sp_port;
struct mlxsw_sp_upper {
@@ -118,10 +123,13 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
unsigned int interval; /* ms */
} fdb_notify;
+#define MLXSW_SP_MIN_AGEING_TIME 10
+#define MLXSW_SP_MAX_AGEING_TIME 1000000
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ u8 port_to_module[MLXSW_PORT_MAX_PORTS];
};
static inline struct mlxsw_sp_upper *
@@ -149,7 +157,8 @@ struct mlxsw_sp_port {
learning_sync:1,
uc_flood:1,
bridged:1,
- lagged:1;
+ lagged:1,
+ split:1;
u16 pvid;
u16 lag_id;
struct {
@@ -162,6 +171,7 @@ struct mlxsw_sp_port {
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
+ struct devlink_port devlink_port;
};
static inline struct mlxsw_sp_port *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 7b56098acc58..e1c74efff51a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
+ else
+ return 0;
+ }
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index d85960cfb694..7a60a26759b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,6 +43,7 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
+#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -78,6 +79,7 @@ struct mlxsw_sx_port {
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
+ struct devlink_port devlink_port;
};
/* tx_hdr_version
@@ -953,7 +955,9 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
struct mlxsw_sx_port *mlxsw_sx_port;
+ struct devlink_port *devlink_port;
struct net_device *dev;
bool usable;
int err;
@@ -1007,6 +1011,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto port_not_usable;
}
+ devlink_port = &mlxsw_sx_port->devlink_port;
+ err = devlink_port_register(devlink, devlink_port, local_port);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
+ mlxsw_sx_port->local_port);
+ goto err_devlink_port_register;
+ }
+
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1064,6 +1076,8 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
+ devlink_port_type_eth_set(devlink_port, dev);
+
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
@@ -1075,6 +1089,8 @@ err_port_mtu_set:
err_port_speed_set:
err_port_swid_set:
err_port_system_port_mapping_set:
+ devlink_port_unregister(&mlxsw_sx_port->devlink_port);
+err_devlink_port_register:
port_not_usable:
err_port_module_check:
err_dev_addr_get:
@@ -1087,11 +1103,15 @@ err_alloc_stats:
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+ struct devlink_port *devlink_port;
if (!mlxsw_sx_port)
return;
+ devlink_port = &mlxsw_sx_port->devlink_port;
+ devlink_port_type_clear(devlink_port);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+ devlink_port_unregister(devlink_port);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 09d2e16fd6b0..cb0102dd7f70 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -561,8 +561,8 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_init_table(sg, 1);
sg_dma_address(sg) = dma_map_single(adapter->dev,
ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
- err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
- if (unlikely(err)) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) {
+ err = -ENOMEM;
sg_dma_address(sg) = 0;
goto out;
}
@@ -572,8 +572,10 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
- if (!ctl->adesc)
+ if (!ctl->adesc) {
+ err = -ENOMEM;
goto out;
+ }
ctl->adesc->callback_param = netdev;
ctl->adesc->callback = ks8842_dma_rx_cb;
@@ -584,7 +586,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
goto out;
}
- return err;
+ return 0;
out:
if (sg_dma_address(sg))
dma_unmap_single(adapter->dev, sg_dma_address(sg),
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index ab264e1bccd0..75683fb26734 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -45,7 +45,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <asm-generic/io-64-nonatomic-hi-lo.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include "nfp_net_ctrl.h"
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index afa445842f3e..52d9a94aebb9 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1038,7 +1038,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
error = register_netdev(dev);
if (error != 0) {
- dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
+ dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n");
error = -ENODEV;
goto failed_put_rmiiclk;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 75e88f4c1531..9b0d7f463ff3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5629,12 +5629,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u64_stats_init(&np->swstats_rx_syncp);
u64_stats_init(&np->swstats_tx_syncp);
- init_timer(&np->oom_kick);
- np->oom_kick.data = (unsigned long) dev;
- np->oom_kick.function = nv_do_rx_refill; /* timer handler */
- init_timer(&np->nic_poll);
- np->nic_poll.data = (unsigned long) dev;
- np->nic_poll.function = nv_do_nic_poll; /* timer handler */
+ setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev);
+ setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev);
init_timer_deferrable(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
np->stats_poll.function = nv_do_stats_poll; /* timer handler */
diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig
deleted file mode 100644
index a7aa28054cc1..000000000000
--- a/drivers/net/ethernet/octeon/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Cavium network device configuration
-#
-
-config OCTEON_MGMT_ETHERNET
- tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
- depends on CAVIUM_OCTEON_SOC
- select PHYLIB
- select MDIO_OCTEON
- default y
- ---help---
- This option enables the ethernet driver for the management
- port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
- CN54XX, CN52XX, and CN6XXX chips.
diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig
index db19c6f49859..7c92e8306c19 100644
--- a/drivers/net/ethernet/pasemi/Kconfig
+++ b/drivers/net/ethernet/pasemi/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_PASEMI
bool "PA Semi devices"
default y
- depends on PPC_PASEMI && PCI && INET
+ depends on PPC_PASEMI && PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,9 +18,8 @@ if NET_VENDOR_PASEMI
config PASEMI_MAC
tristate "PA Semi 1/10Gbit MAC"
- depends on PPC_PASEMI && PCI && INET
+ depends on PPC_PASEMI && PCI
select PHYLIB
- select INET_LRO
---help---
This driver supports the on-chip 1/10Gbit Ethernet controller on
PA Semi's PWRficient line of chips.
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 57a6e6cd74fc..af54df52aa6b 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -30,9 +30,7 @@
#include <linux/skbuff.h>
#include <linux/ip.h>
-#include <linux/tcp.h>
#include <net/checksum.h>
-#include <linux/inet_lro.h>
#include <linux/prefetch.h>
#include <asm/irq.h>
@@ -52,12 +50,9 @@
*
* - Multicast support
* - Large MTU support
- * - SW LRO
* - Multiqueue RX/TX
*/
-#define LRO_MAX_AGGR 64
-
#define PE_MIN_MTU 64
#define PE_MAX_MTU 9000
#define PE_DEF_MTU ETH_DATA_LEN
@@ -257,37 +252,6 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
- void **tcph, u64 *hdr_flags, void *data)
-{
- u64 macrx = (u64) data;
- unsigned int ip_len;
- struct iphdr *iph;
-
- /* IPv4 header checksum failed */
- if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK)
- return -1;
-
- /* non tcp packet */
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
- if (iph->protocol != IPPROTO_TCP)
- return -1;
-
- ip_len = ip_hdrlen(skb);
- skb_set_transport_header(skb, ip_len);
- *tcph = tcp_hdr(skb);
-
- /* check if ip header and tcp header are complete */
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
- return -1;
-
- *hdr_flags = LRO_IPV4 | LRO_TCP;
- *iphdr = iph;
-
- return 0;
-}
-
static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
const int nfrags,
struct sk_buff *skb,
@@ -817,7 +781,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
skb_put(skb, len-4);
skb->protocol = eth_type_trans(skb, mac->netdev);
- lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx);
+ napi_gro_receive(&mac->napi, skb);
next:
RX_DESC(rx, n) = 0;
@@ -839,8 +803,6 @@ next:
rx_ring(mac)->next_to_clean = n;
- lro_flush_all(&mac->lro_mgr);
-
/* Increase is in number of 16-byte entries, and since each descriptor
* with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
* count*2.
@@ -1754,16 +1716,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_GSO;
- mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
- mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- mac->lro_mgr.lro_arr = mac->lro_desc;
- mac->lro_mgr.get_skb_header = get_skb_hdr;
- mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- mac->lro_mgr.dev = mac->netdev;
- mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-
-
mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!mac->dma_pdev) {
dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index a5807703ab96..161c99a98403 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -31,7 +31,6 @@
#define CS_RING_SIZE (TX_RING_SIZE*2)
-#define MAX_LRO_DESCRIPTORS 8
#define MAX_CS 2
struct pasemi_mac_txring {
@@ -84,10 +83,7 @@ struct pasemi_mac {
u8 mac_addr[ETH_ALEN];
- struct net_lro_mgr lro_mgr;
- struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
struct timer_list rxtimer;
- unsigned int lro_max_aggr;
struct pasemi_mac_txring *tx;
struct pasemi_mac_rxring *rx;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index 25fae568261f..f046bfc18e7d 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -20,7 +20,6 @@
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/pci.h>
-#include <linux/inet_lro.h>
#include <asm/pasemi_dma.h>
#include "pasemi_mac.h"
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 6409a06bbdf6..fd362b6923f4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2891,7 +2891,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
@@ -2919,7 +2919,7 @@ netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u32 data;
u64 qmdata;
@@ -2960,7 +2960,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -2981,7 +2981,7 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -3018,7 +3018,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct netxen_adapter *adapter = dev_get_drvdata(dev);
struct net_device *netdev = adapter->netdev;
struct netxen_dimm_cfg dimm;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1292c360390c..fcb8e9ba51d9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,7 +26,7 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.4.0.0"
+#define DRV_MODULE_VERSION "8.7.0.0"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -70,8 +70,8 @@ struct qed_sb_sp_info;
struct qed_mcp_info;
struct qed_rt_data {
- u32 init_val;
- bool b_valid;
+ u32 *init_val;
+ bool *b_valid;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -120,6 +120,10 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_1X25G
};
+enum qed_dev_cap {
+ QED_DEV_CAP_ETH,
+};
+
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
@@ -142,15 +146,13 @@ struct qed_hw_info {
u16 ovlan;
u32 part_num[4];
- u32 vendor_id;
- u32 device_id;
-
unsigned char hw_mac_addr[ETH_ALEN];
struct qed_igu_info *p_igu_info;
u32 port_mode;
u32 hw_mode;
+ unsigned long device_capabilities;
};
struct qed_hw_cid_data {
@@ -267,7 +269,7 @@ struct qed_hwfn {
struct qed_hw_info hw_info;
/* rt_array (for init-tool) */
- struct qed_rt_data *rt_data;
+ struct qed_rt_data rt_data;
/* SPQ */
struct qed_spq *p_spq;
@@ -301,6 +303,9 @@ struct qed_hwfn {
bool b_int_enabled;
bool b_int_requested;
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids;
@@ -350,9 +355,20 @@ struct qed_dev {
char name[NAME_SIZE];
u8 type;
-#define QED_DEV_TYPE_BB_A0 (0 << 0)
-#define QED_DEV_TYPE_MASK (0x3)
-#define QED_DEV_TYPE_SHIFT (0)
+#define QED_DEV_TYPE_BB (0 << 0)
+#define QED_DEV_TYPE_AH BIT(0)
+/* Translate type/revision combo into the proper conditions */
+#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
+#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
+ CHIP_REV_IS_A0(dev))
+#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
+ CHIP_REV_IS_B0(dev))
+
+#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+ QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+ u16 vendor_id;
+ u16 device_id;
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -361,6 +377,8 @@ struct qed_dev {
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
+#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
+#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
@@ -375,10 +393,10 @@ struct qed_dev {
u8 num_funcs_in_port;
u8 path_id;
- enum mf_mode mf_mode;
-#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF)
-#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
-#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+ enum qed_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
int pcie_width;
int pcie_speed;
@@ -441,11 +459,6 @@ struct qed_dev {
const struct firmware *firmware;
};
-#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \
- QED_DEV_TYPE_SHIFT)
-#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
-#define QED_IS_BB(dev) (QED_IS_BB_A0(dev))
-
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7ccdb46c6764..fc767c07a264 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr;
u32 i;
- p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
if (!p_mngr) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
return -ENOMEM;
@@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.num_pf_cids = iids.cids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs;
- params.start_vport = qm_info->num_vports;
+ params.start_vport = qm_info->start_vport;
+ params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl;
params.pq_params = qm_info->qm_pq_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 817bbd5476ff..b7d100f6bd6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -32,6 +32,33 @@
#include "qed_sp.h"
/* API common to all protocols */
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+ if (val)
+ return 1 << (val + 15);
+
+ /* Old MFW initialized above registered only conditionally */
+ if (p_hwfn->cdev->num_hwfns > 1) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
+ }
+}
+
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module, u8 dp_level)
{
@@ -134,17 +161,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_ATOMIC);
+ num_pqs, GFP_KERNEL);
if (!qm_info->qm_pq_params)
goto alloc_err;
qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_ATOMIC);
+ num_vports, GFP_KERNEL);
if (!qm_info->qm_vport_params)
goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_ATOMIC);
+ MAX_NUM_PORTS, GFP_KERNEL);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -341,11 +368,6 @@ void qed_resc_setup(struct qed_dev *cdev)
}
}
-#define FINAL_CLEANUP_CMD_OFFSET (0)
-#define FINAL_CLEANUP_CMD (0x1)
-#define FINAL_CLEANUP_VALID_OFFSET (6)
-#define FINAL_CLEANUP_VFPF_ID_SHIFT (7)
-#define FINAL_CLEANUP_COMP (0x2)
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
@@ -355,12 +377,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+ addr = GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
- command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
- command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
- command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
- command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
@@ -396,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_A0);
+ hw_mode = (1 << MODE_BB_B0);
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -415,18 +439,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
}
switch (p_hwfn->cdev->mf_mode) {
- case SF:
- hw_mode |= 1 << MODE_SF;
+ case QED_MF_DEFAULT:
+ case QED_MF_NPAR:
+ hw_mode |= 1 << MODE_MF_SI;
break;
- case MF_OVLAN:
+ case QED_MF_OVLAN:
hw_mode |= 1 << MODE_MF_SD;
break;
- case MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
default:
- DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
- hw_mode |= 1 << MODE_SF;
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ hw_mode |= 1 << MODE_MF_SI;
}
hw_mode |= 1 << MODE_ASIC;
@@ -655,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev,
bool allow_npar_tx_switch,
const u8 *bin_fw_data)
{
- struct qed_storm_stats *p_stat;
- u32 load_code, param, *p_address;
+ u32 load_code, param;
int rc, mfw_rc, i;
- u8 fw_vport = 0;
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
@@ -667,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
- if (rc != 0)
- return rc;
-
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -734,35 +750,60 @@ int qed_hw_init(struct qed_dev *cdev,
}
p_hwfn->hw_init_done = true;
+ }
+
+ return 0;
+}
- /* init PF stats */
- p_stat = &p_hwfn->storm_stats;
- p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static inline void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ int i;
- p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ /* close timers */
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(fw_vport);
- p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
- p_address = &p_stat->tstats.address;
- *p_address = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ usleep_range(1000, 2000);
}
- return 0;
+ if (i < QED_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void qed_hw_timers_stop_all(struct qed_dev *cdev)
+{
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+ }
}
-#define QED_HW_STOP_RETRY_LIMIT (10)
int qed_hw_stop(struct qed_dev *cdev)
{
int rc = 0, t_rc;
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -775,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev)
rc = qed_sp_pf_stop(p_hwfn);
if (rc)
- return rc;
+ DP_NOTICE(p_hwfn,
+ "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -786,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
/* Disable Attention Generation */
qed_int_igu_disable_int(p_hwfn, p_ptt);
@@ -832,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev)
void qed_hw_stop_fastpath(struct qed_dev *cdev)
{
- int i, j;
+ int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
@@ -851,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
- qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
- if ((!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN)) &&
- (!qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK)))
- break;
-
- usleep_range(1000, 2000);
- }
- if (i == QED_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)qed_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
-
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
@@ -954,18 +960,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
}
/* Setup bar access */
-static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
- int rc;
-
- /* Allocate PTT pool */
- rc = qed_ptt_pool_alloc(p_hwfn);
- if (rc)
- return rc;
-
- /* Allocate the main PTT */
- p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
-
/* clear indirect access */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
@@ -980,8 +976,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
/* enable internal target-read */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
- return 0;
}
static void get_function_id(struct qed_hwfn *p_hwfn)
@@ -1016,14 +1010,17 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
+ struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i;
- num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
- : p_hwfn->cdev->num_ports_in_engines;
+ num_funcs = MAX_NUM_PFS_BB;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
(MAX_SB_PER_PATH_BB / num_funcs),
- qed_int_get_num_sbs(p_hwfn, NULL));
+ sb_cnt_info.sb_cnt);
resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
@@ -1071,7 +1068,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
- u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct qed_mcp_link_params *link;
/* Read global nvm_cfg address */
@@ -1086,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
- /* Read Vendor Id / Device Id */
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, glob) +
- offsetof(struct nvm_cfg1_glob, pci_id);
- p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
- NVM_CFG1_GLOB_VENDOR_ID_MASK;
-
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, core_cfg);
@@ -1134,21 +1124,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
break;
}
- addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
- offsetof(struct nvm_cfg1_func, device_id);
- val = qed_rd(p_hwfn, p_ptt, addr);
-
- if (IS_MF(p_hwfn)) {
- p_hwfn->hw_info.device_id =
- (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
- NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
- } else {
- p_hwfn->hw_info.device_id =
- (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
- NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
- }
-
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -1220,18 +1195,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
- p_hwfn->cdev->mf_mode = MF_OVLAN;
+ p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
- p_hwfn->cdev->mf_mode = MF_NPAR;
+ p_hwfn->cdev->mf_mode = QED_MF_NPAR;
break;
- case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
- p_hwfn->cdev->mf_mode = SF;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
break;
}
DP_INFO(p_hwfn, "Multi function mode is %08x\n",
p_hwfn->cdev->mf_mode);
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ __set_bit(QED_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
@@ -1291,31 +1276,38 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_dev *cdev)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u32 tmp;
- cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ /* Read Vendor Id / Device Id */
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
+ &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
+ &cdev->device_id);
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
- cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev);
+ cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
- tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
- if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
cdev->num_hwfns = 2;
} else {
cdev->num_hwfns = 1;
}
- cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
- cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
@@ -1323,6 +1315,14 @@ static void qed_get_dev_info(struct qed_dev *cdev)
"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
+
+ if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
+ DP_NOTICE(cdev->hwfns,
+ "The chip type/rev (BB A0) is not supported!\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
@@ -1345,15 +1345,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
get_function_id(p_hwfn);
- rc = qed_hw_hwfn_prepare(p_hwfn);
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
goto err0;
}
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
/* First hwfn learns basic information, e.g., number of hwfns */
- if (!p_hwfn->my_id)
- qed_get_dev_info(p_hwfn->cdev);
+ if (!p_hwfn->my_id) {
+ rc = qed_get_dev_info(p_hwfn->cdev);
+ if (rc != 0)
+ goto err1;
+ }
+
+ qed_hw_hwfn_prepare(p_hwfn);
/* Initialize MCP structure */
rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
@@ -1385,17 +1394,6 @@ err0:
return rc;
}
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
- u8 bar_id)
-{
- u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
- : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
-
- /* Get the BAR size(in KB) from hardware given val */
- return 1 << (val + 15);
-}
-
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
@@ -1420,11 +1418,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
p_regview = addr;
/* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -1536,223 +1534,6 @@ void qed_chain_free(struct qed_dev *cdev,
p_chain->p_phys_addr);
}
-static void __qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- int i, j;
-
- memset(stats, 0, sizeof(*stats));
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct tstorm_per_port_stat tstats;
- struct port_stats port_stats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- p_hwfn->storm_stats.mstats.address,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- p_hwfn->storm_stats.ustats.address,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- p_hwfn->storm_stats.pstats.address,
- p_hwfn->storm_stats.pstats.len);
-
- memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- p_hwfn->storm_stats.tstats.address,
- p_hwfn->storm_stats.tstats.len);
-
- memset(&port_stats, 0, sizeof(port_stats));
-
- if (p_hwfn->mcp_info)
- qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port, stats),
- sizeof(port_stats));
- qed_ptt_release(p_hwfn, p_ptt);
-
- stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
- stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
- stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
- stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
-
- stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
-
- stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
-
- stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
- stats->rx_64_byte_packets += port_stats.pmm.r64;
- stats->rx_127_byte_packets += port_stats.pmm.r127;
- stats->rx_255_byte_packets += port_stats.pmm.r255;
- stats->rx_511_byte_packets += port_stats.pmm.r511;
- stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- stats->rx_16383_byte_packets += port_stats.pmm.r16383;
- stats->rx_crc_errors += port_stats.pmm.rfcs;
- stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- stats->rx_pause_frames += port_stats.pmm.rxpf;
- stats->rx_pfc_frames += port_stats.pmm.rxpp;
- stats->rx_align_errors += port_stats.pmm.raln;
- stats->rx_carrier_errors += port_stats.pmm.rfcr;
- stats->rx_oversize_packets += port_stats.pmm.rovr;
- stats->rx_jabbers += port_stats.pmm.rjbr;
- stats->rx_undersize_packets += port_stats.pmm.rund;
- stats->rx_fragments += port_stats.pmm.rfrg;
- stats->tx_64_byte_packets += port_stats.pmm.t64;
- stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- stats->tx_pause_frames += port_stats.pmm.txpf;
- stats->tx_pfc_frames += port_stats.pmm.txpp;
- stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- stats->tx_total_collisions += port_stats.pmm.tncl;
- stats->rx_mac_bytes += port_stats.pmm.rbyte;
- stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- stats->tx_mac_bytes += port_stats.pmm.tbyte;
- stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
-
- for (j = 0; j < 8; j++) {
- stats->brb_truncates += port_stats.brb.brb_truncate[j];
- stats->brb_discards += port_stats.brb.brb_discard[j];
- }
- }
-}
-
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
-{
- u32 i;
-
- if (!cdev) {
- memset(stats, 0, sizeof(*stats));
- return;
- }
-
- __qed_get_vport_stats(cdev, stats);
-
- if (!cdev->reset_stats)
- return;
-
- /* Reduce the statistics baseline */
- for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
- ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
-}
-
-/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
-void qed_reset_vport_stats(struct qed_dev *cdev)
-{
- int i;
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct eth_mstorm_per_queue_stat mstats;
- struct eth_ustorm_per_queue_stat ustats;
- struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
-
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Failed to acquire ptt\n");
- continue;
- }
-
- memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.mstats.address,
- &mstats,
- p_hwfn->storm_stats.mstats.len);
-
- memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.ustats.address,
- &ustats,
- p_hwfn->storm_stats.ustats.len);
-
- memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_to(p_hwfn, p_ptt,
- p_hwfn->storm_stats.pstats.address,
- &pstats,
- p_hwfn->storm_stats.pstats.len);
-
- qed_ptt_release(p_hwfn, p_ptt);
- }
-
- /* PORT statistics are not necessarily reset, so we need to
- * read and create a baseline for future statistics.
- */
- if (!cdev->reset_stats)
- DP_INFO(cdev, "Reset stats not allocated\n");
- else
- __qed_get_vport_stats(cdev, cdev->reset_stats);
-}
-
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id, u16 *dst_id)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index e29a3ba6c8b0..d6c7ddf4f4d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -78,6 +78,15 @@ int qed_hw_init(struct qed_dev *cdev,
const u8 *bin_fw_data);
/**
+ * @brief qed_hw_timers_stop_all - stop the timers HW block
+ *
+ * @param cdev
+ *
+ * @return void
+ */
+void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+/**
* @brief qed_hw_stop -
*
* @param cdev
@@ -156,8 +165,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
-void qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
enum qed_dmae_address_type_t {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 264e954675d1..a368f5e71d95 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -34,6 +34,8 @@ enum common_event_opcode {
COMMON_EVENT_RESERVED3,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
+ COMMON_EVENT_RESERVED6,
+ COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
@@ -45,6 +47,7 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_RESERVED,
COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -331,6 +334,179 @@ struct xstorm_core_conn_ag_ctx {
__le16 word15 /* word15 */;
};
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 word1 /* word1 */;
+ __le32 rx_producers /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+};
+
/* The core storm context for the Mstorm */
struct mstorm_core_conn_st_ctx {
__le32 reserved[24];
@@ -349,8 +525,9 @@ struct core_conn_context {
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct regpair mstorm_st_padding[2];
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2] /* padding */;
};
@@ -397,10 +574,12 @@ union event_ring_element {
};
enum personality_type {
+ BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
PERSONALITY_RESERVED2,
PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
PERSONALITY_RESERVED3,
+ PERSONALITY_CORE,
PERSONALITY_ETH /* Ethernet */,
PERSONALITY_RESERVED4,
MAX_PERSONALITY_TYPE
@@ -570,7 +749,7 @@ enum block_addr {
GRCBASE_NWM = 0x800000,
GRCBASE_NWS = 0x700000,
GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x618000,
+ GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -789,19 +968,19 @@ struct igu_msix_vector {
enum init_modes {
MODE_BB_A0,
- MODE_RESERVED,
+ MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
+ MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
MODE_PORTS_PER_ENG_1,
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
- MODE_40G,
MODE_100G,
MODE_EAGLE_ENG1_WORKAROUND,
MAX_INIT_MODES
@@ -816,43 +995,6 @@ enum init_phases {
MAX_INIT_PHASES
};
-struct mstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -945,6 +1087,17 @@ struct qm_rf_pq_map {
#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
/* SDM operation gen command (generate aggregative interrupt) */
struct sdm_op_gen {
__le32 command;
@@ -956,223 +1109,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-struct tstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
-};
-
-struct ustorm_core_conn_ag_ctx {
- u8 reserved /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
- u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 word1 /* word1 */;
- __le32 rx_producers /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
-};
-
-struct ystorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
- __le16 word4 /* word4 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
-};
-
/*********************************** Init ************************************/
/* Width of GRC address in bits (addresses are specified in dwords) */
@@ -1274,13 +1210,6 @@ enum chip_ids {
MAX_CHIP_IDS
};
-enum idle_chk_severity_types {
- IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- IDLE_CHK_SEVERITY_WARNING,
- MAX_IDLE_CHK_SEVERITY_TYPES
-};
-
struct init_array_raw_hdr {
__le32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
@@ -1340,14 +1269,6 @@ struct init_callback_op {
__le16 block_id /* Blocks ID */;
};
-/* init comparison types */
-enum init_comparison_types {
- INIT_COMPARISON_EQ /* init value is included in the init command */,
- INIT_COMPARISON_OR /* init value is all zeros */,
- INIT_COMPARISON_AND /* init value is an array of values */,
- MAX_INIT_COMPARISON_TYPES
-};
-
/* init operation: delay */
struct init_delay_op {
__le32 op_data;
@@ -1444,12 +1365,10 @@ struct init_read_op {
__le32 op_data;
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_COMP_MASK 0x7
-#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 7
-#define INIT_READ_OP_POLL_MASK 0x1
-#define INIT_READ_OP_POLL_SHIFT 8
+#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
@@ -1477,6 +1396,14 @@ enum init_op_types {
MAX_INIT_OP_TYPES
};
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
/* init source types */
enum init_source_types {
INIT_SRC_INLINE /* init value is included in the init command */,
@@ -1677,175 +1604,213 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 num_pqs);
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \
- ((port_id) * \
- IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \
- ((vf_id) * \
- IRO[2].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base)
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \
- ((pf_id) * \
- IRO[4].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[4].size)
-/* Ustorm Completion ring consumer */
-#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \
- ((global_queue_id) * \
- IRO[5].m1))
-#define USTORM_CQ_CONS_SIZE (IRO[5].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
+ (IRO[6].base + ((global_queue_id) * IRO[6].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \
- ((core_rx_queue_id) * \
- IRO[12].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size)
-/* Tstorm LiteL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
- ((core_rx_q_id) * \
- IRO[13].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size)
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+ (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
- ((core_rx_q_id) * \
- IRO[14].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
- ((core_txst_id) * \
- IRO[15].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
- ((stat_counter_id) * \
- IRO[16].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[17].base + ((stat_counter_id) * IRO[17].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \
- ((queue_id) * \
- IRO[17].m1))
-#define MSTORM_PRODS_SIZE (IRO[17].size)
+#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1))
+#define MSTORM_PRODS_SIZE (IRO[18].size)
/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \
- ((stat_counter_id) * \
- IRO[19].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[19].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[20].base + ((stat_counter_id) * IRO[20].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \
- ((queue_id) * \
- IRO[20].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[21].base + ((queue_id) * IRO[21].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \
- ((stat_counter_id) * \
- IRO[21].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \
- ((pf_id) * \
- IRO[22].m1))
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \
- ((queue_id) * \
- IRO[23].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size)
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[25].base + ((queue_id) * IRO[25].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \
- ((rss_id) * \
- IRO[24].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size)
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[26].base + ((rss_id) * IRO[26].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \
- ((rss_id) * \
- IRO[25].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size)
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[27].base + ((rss_id) * IRO[27].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \
- ((pf_id) * \
- IRO[26].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \
- ((cmdq_queue_id) * \
- IRO[27].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+ (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \
- ((rq_queue_id) * \
- IRO[28].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size)
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
+ (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
+/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+ (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[33].base + ((pf_id) * IRO[33].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[34].base + ((pf_id) * IRO[34].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+ (IRO[35].base + ((pf_id) * IRO[35].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[36].base + ((pf_id) * IRO[36].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[37].base + ((pf_id) * IRO[37].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+ (IRO[38].base + ((pf_id) * IRO[38].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[39].base + ((pf_id) * IRO[39].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
+/* Mstorm FCoE RX stats */
+#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[40].base + ((pf_id) * IRO[40].m1))
+#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[41].base + ((pf_id) * IRO[41].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
/* Pstorm RoCE statistics */
-#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \
- ((stat_counter_id) * \
- IRO[29].m1))
-#define PSTORM_ROCE_STAT_SIZE (IRO[29].size)
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
+ (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
+#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
/* Tstorm RoCE statistics */
-#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \
- ((stat_counter_id) * \
- IRO[30].m1))
-#define TSTORM_ROCE_STAT_SIZE (IRO[30].size)
-
-static const struct iro iro_arr[31] = {
- { 0x10, 0x0, 0x0, 0x0, 0x8 },
- { 0x4448, 0x60, 0x0, 0x0, 0x60 },
- { 0x498, 0x8, 0x0, 0x0, 0x4 },
- { 0x494, 0x0, 0x0, 0x0, 0x4 },
- { 0x10, 0x8, 0x0, 0x0, 0x2 },
- { 0x90, 0x8, 0x0, 0x0, 0x2 },
- { 0x4540, 0x0, 0x0, 0x0, 0xf8 },
- { 0x39e0, 0x0, 0x0, 0x0, 0xf8 },
- { 0x2598, 0x0, 0x0, 0x0, 0xf8 },
- { 0x4350, 0x0, 0x0, 0x0, 0xf8 },
- { 0x52d0, 0x0, 0x0, 0x0, 0xf8 },
- { 0x7a48, 0x0, 0x0, 0x0, 0xf8 },
- { 0x100, 0x8, 0x0, 0x0, 0x8 },
- { 0x5808, 0x10, 0x0, 0x0, 0x10 },
- { 0xb100, 0x30, 0x0, 0x0, 0x30 },
- { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
- { 0x54f8, 0x40, 0x0, 0x0, 0x40 },
- { 0x200, 0x10, 0x0, 0x0, 0x8 },
- { 0x9e70, 0x0, 0x0, 0x0, 0x4 },
- { 0x7ca0, 0x40, 0x0, 0x0, 0x30 },
- { 0xd00, 0x8, 0x0, 0x0, 0x8 },
- { 0x2790, 0x80, 0x0, 0x0, 0x38 },
- { 0xa520, 0xf0, 0x0, 0x0, 0xf0 },
- { 0x80, 0x8, 0x0, 0x0, 0x8 },
- { 0xac0, 0x8, 0x0, 0x0, 0x8 },
- { 0x2580, 0x8, 0x0, 0x0, 0x8 },
- { 0x2500, 0x8, 0x0, 0x0, 0x8 },
- { 0x440, 0x8, 0x0, 0x0, 0x2 },
- { 0x1800, 0x8, 0x0, 0x0, 0x2 },
- { 0x27c8, 0x80, 0x0, 0x0, 0x10 },
- { 0x4710, 0x10, 0x0, 0x0, 0x10 },
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
+ (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
+#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
+
+static const struct iro iro_arr[44] = {
+ { 0x10, 0x0, 0x0, 0x0, 0x8 },
+ { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
+ { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
+ { 0x510, 0x8, 0x0, 0x0, 0x4 },
+ { 0x490, 0x8, 0x0, 0x0, 0x4 },
+ { 0x10, 0x8, 0x0, 0x0, 0x2 },
+ { 0x90, 0x8, 0x0, 0x0, 0x2 },
+ { 0x4940, 0x0, 0x0, 0x0, 0x78 },
+ { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
+ { 0x2998, 0x0, 0x0, 0x0, 0x78 },
+ { 0x4750, 0x0, 0x0, 0x0, 0x78 },
+ { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
+ { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
+ { 0x100, 0x8, 0x0, 0x0, 0x8 },
+ { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
+ { 0xb508, 0x30, 0x0, 0x0, 0x30 },
+ { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
+ { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
+ { 0x200, 0x10, 0x0, 0x0, 0x8 },
+ { 0xa230, 0x0, 0x0, 0x0, 0x4 },
+ { 0x8058, 0x40, 0x0, 0x0, 0x30 },
+ { 0xd00, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
+ { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
+ { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
+ { 0x80, 0x8, 0x0, 0x0, 0x8 },
+ { 0xac0, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2580, 0x8, 0x0, 0x0, 0x8 },
+ { 0x2500, 0x8, 0x0, 0x0, 0x8 },
+ { 0x440, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1800, 0x8, 0x0, 0x0, 0x2 },
+ { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
+ { 0x640, 0x10, 0x8, 0x0, 0x2 },
+ { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
+ { 0x11048, 0x10, 0x0, 0x0, 0x8 },
+ { 0x11678, 0x38, 0x0, 0x0, 0x18 },
+ { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
+ { 0x8700, 0x28, 0x0, 0x0, 0x18 },
+ { 0xec00, 0x10, 0x0, 0x0, 0x10 },
+ { 0xde38, 0x40, 0x0, 0x0, 0x30 },
+ { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
+ { 0xf068, 0x20, 0x0, 0x0, 0x20 },
+ { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
+ { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
};
/* Runtime array offsets */
@@ -1866,426 +1831,427 @@ static const struct iro iro_arr[31] = {
#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2232
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6663
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6665
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6667
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29834
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29836
#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856
#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29901
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29902
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29964
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29966
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30505
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30507
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30762
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30764
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30764
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30766
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30796
+#define QM_REG_RLPFCRD_RT_OFFSET 30798
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30812
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814
+#define QM_REG_RLPFENABLE_RT_OFFSET 30814
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30846
+#define QM_REG_WFQPFCRD_RT_OFFSET 30848
#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31006
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31007
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31008
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31009
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31520
+#define QM_REG_TXPQMAP_RT_OFFSET 31522
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544
-#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 33056
+#define QM_REG_WFQVPCRD_RT_OFFSET 32546
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33568
+#define QM_REG_WFQVPMAP_RT_OFFSET 33058
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431
-
-#define RUNTIME_ARRAY_SIZE 34432
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922
+
+#define RUNTIME_ARRAY_SIZE 33923
-/* The eth storm context for the Ystorm */
-struct ystorm_eth_conn_st_ctx {
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
__le32 reserved[4];
};
@@ -2562,14 +2528,226 @@ struct xstorm_eth_conn_ag_ctx {
__le16 word15 /* word15 */;
};
-/* The eth storm context for the Tstorm */
-struct tstorm_eth_conn_st_ctx {
- __le32 reserved[4];
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
};
-/* The eth storm context for the Mstorm */
-struct mstorm_eth_conn_st_ctx {
- __le32 reserved[8];
+struct ystorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 terminate_spqe /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 tx_bd_cons_upd /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+ __le32 reg5 /* reg5 */;
+ __le32 reg6 /* reg6 */;
+ __le32 reg7 /* reg7 */;
+ __le32 reg8 /* reg8 */;
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 rx_bd_cons /* word0 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ __le16 rx_bd_prod /* word1 */;
+ __le16 word2 /* conn_dpi */;
+ __le16 word3 /* word3 */;
+ __le32 reg9 /* reg9 */;
+ __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* conn_dpi */;
+ __le16 tx_bd_cons /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 tx_int_coallecing_timeset /* reg3 */;
+ __le16 tx_drv_bd_cons /* word2 */;
+ __le16 rx_drv_cqe_cons /* word3 */;
};
/* The eth storm context for the Ustorm */
@@ -2577,24 +2755,30 @@ struct ustorm_eth_conn_st_ctx {
__le32 reserved[40];
};
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
/* eth connection context */
struct eth_conn_context {
- struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2] /* padding */;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct tstorm_eth_conn_st_ctx tstorm_st_context;
- struct regpair tstorm_st_padding[2] /* padding */;
- struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
enum eth_filter_action {
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
- ETH_FILTER_ACTION_REPLACE,
+ ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
@@ -2653,6 +2837,32 @@ enum eth_ramrod_cmd_id {
MAX_ETH_RAMROD_CMD_ID
};
+enum eth_tx_err {
+ ETH_TX_ERR_DROP /* Drop erronous packet. */,
+ ETH_TX_ERR_ASSERT_MALICIOUS,
+ MAX_ETH_TX_ERR
+};
+
+struct eth_tx_err_vals {
+ __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
struct eth_vport_rss_config {
__le16 capabilities;
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
@@ -2669,12 +2879,8 @@ struct eth_vport_rss_config {
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
u8 rss_id;
u8 rss_mode;
u8 update_rss_key;
@@ -2713,7 +2919,19 @@ struct eth_vport_rx_mode {
};
struct eth_vport_tpa_param {
- u64 reserved[2];
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
struct eth_vport_tx_mode {
@@ -2749,10 +2967,14 @@ struct rx_queue_start_ramrod_data {
u8 pxp_tph_valid_pkt;
u8 pxp_st_hint;
__le16 pxp_st_index;
- u8 reserved[4];
- struct regpair cqe_pbl_addr;
- struct regpair bd_base;
- struct regpair sge_base;
+ u8 pmd_mode;
+ u8 notify_en;
+ u8 toggle_val;
+ u8 reserved[7];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
};
struct rx_queue_stop_ramrod_data {
@@ -2764,23 +2986,24 @@ struct rx_queue_stop_ramrod_data {
};
struct rx_queue_update_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 init_sge_ring_flg;
- u8 vport_id;
- u8 pxp_tph_valid_sge;
- u8 pxp_st_hint;
- __le16 pxp_st_index;
- u8 reserved[6];
- struct regpair sge_base;
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[4];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+ struct regpair reserved6;
};
struct tx_queue_start_ramrod_data {
__le16 sb_id;
u8 sb_index;
u8 vport_id;
- u8 tc;
+ u8 reserved0;
u8 stats_counter_id;
__le16 qm_pq_id;
u8 flags;
@@ -2790,18 +3013,25 @@ struct tx_queue_start_ramrod_data {
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3
- u8 pin_context;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- __le16 pxp_st_index;
- u8 pxp_st_hint;
- u8 reserved1[3];
- __le16 queue_zone_id;
- __le16 test_dup_count;
- __le16 pbl_size;
- struct regpair pbl_base_addr;
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ __le16 comp_agg_size;
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
};
struct tx_queue_stop_ramrod_data {
@@ -2822,16 +3052,16 @@ struct vport_start_ramrod_data {
struct eth_vport_rx_mode rx_mode;
struct eth_vport_tx_mode tx_mode;
struct eth_vport_tpa_param tpa_param;
- __le16 sge_buff_size;
- u8 max_sges_num;
- u8 tx_switching_en;
- u8 anti_spoofing_en;
- u8 default_vlan_en;
- u8 handle_ptp_pkts;
- u8 silent_vlan_removal_en;
- __le16 default_vlan;
- u8 untagged;
- u8 reserved[7];
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+ u8 zero_placement_offset;
+ u8 reserved[7];
};
struct vport_stop_ramrod_data {
@@ -2840,36 +3070,35 @@ struct vport_stop_ramrod_data {
};
struct vport_update_ramrod_data_cmn {
- u8 vport_id;
- u8 update_rx_active_flg;
- u8 rx_active_flg;
- u8 update_tx_active_flg;
- u8 tx_active_flg;
- u8 update_rx_mode_flg;
- u8 update_tx_mode_flg;
- u8 update_approx_mcast_flg;
- u8 update_rss_flg;
- u8 update_inner_vlan_removal_en_flg;
- u8 inner_vlan_removal_en;
- u8 update_tpa_param_flg;
- u8 update_tpa_en_flg;
- u8 update_sge_param_flg;
- __le16 sge_buff_size;
- u8 max_sges_num;
- u8 update_tx_switching_en_flg;
- u8 tx_switching_en;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_handle_ptp_pkts;
- u8 handle_ptp_pkts;
- u8 update_default_vlan_en_flg;
- u8 default_vlan_en;
- u8 update_default_vlan_flg;
- __le16 default_vlan;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- u8 silent_vlan_removal_en;
- u8 reserved;
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+ u8 default_vlan_en;
+ u8 update_default_vlan_flg;
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+ __le16 mtu;
+ u8 reserved[2];
};
struct vport_update_ramrod_mcast {
@@ -2885,436 +3114,6 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-struct tstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
- u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
- u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
- u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
- u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 rx_bd_cons /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 rx_bd_prod /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
-};
-
-struct ustorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
- u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
- u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 tx_bd_cons /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 tx_drv_bd_cons /* word2 */;
- __le16 rx_drv_cqe_cons /* word3 */;
-};
-
-struct xstorm_eth_hw_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
-};
-
#define VF_MAX_STATIC 192 /* In case of K2 */
#define MCP_GLOB_PATH_MAX 2
@@ -3818,6 +3617,13 @@ struct public_port {
struct dcbx_local_params local_admin_dcbx_mib;
struct dcbx_mib remote_dcbx_mib;
struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+ u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
};
/**************************************/
@@ -3830,7 +3636,11 @@ struct public_func {
u32 iscsi_boot_signature;
u32 iscsi_boot_block_offset;
- u32 reserved[8];
+ u32 mtu_size;
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+ u32 reserved[4];
u32 config;
@@ -3894,10 +3704,10 @@ struct public_func {
#define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
-#define DRV_ID_DRV_TYPE_MASK 0xff000000
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
@@ -3905,6 +3715,10 @@ struct public_func {
#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
};
/**************************************/
@@ -3964,6 +3778,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MASK 0xffff0000
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -4100,6 +3915,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
+#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -4142,6 +3958,14 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_MAX
};
@@ -4212,7 +4036,7 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
@@ -4643,8 +4467,12 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
-
- u32 reserved[46]; /* 0x88 */
+ u32 device_capabilities; /* 0x88 */
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ u32 power_dissipated; /* 0x8C */
+ u32 power_consumed; /* 0x90 */
+ u32 efi_version; /* 0x94 */
+ u32 reserved[42]; /* 0x98 */
};
struct nvm_cfg1_path {
@@ -4652,26 +4480,8 @@ struct nvm_cfg1_path {
};
struct nvm_cfg1_port {
- u32 power_dissipated; /* 0x0 */
-#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF
-#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0
-#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00
-#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8
-#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000
-#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16
-#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000
-#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24
-
- u32 power_consumed; /* 0x4 */
-#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF
-#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0
-#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00
-#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8
-#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000
-#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16
-#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000
-#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24
-
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
u32 generic_cont0; /* 0x8 */
#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
@@ -4699,7 +4509,9 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -4784,10 +4596,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
@@ -4801,9 +4614,6 @@ struct nvm_cfg1_port {
u32 mgmt_traffic; /* 0x20 */
#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1
-#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2
u32 ext_phy; /* 0x24 */
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
@@ -4814,16 +4624,12 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_MBA_MASK 0x00000001
-#define NVM_CFG1_PORT_MBA_OFFSET 0
-#define NVM_CFG1_PORT_MBA_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_ENABLED 0x1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2
-#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3
+#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
@@ -4836,61 +4642,30 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED5_2K 0x1
-#define NVM_CFG1_PORT_RESERVED5_4K 0x2
-#define NVM_CFG1_PORT_RESERVED5_8K 0x3
-#define NVM_CFG1_PORT_RESERVED5_16K 0x4
-#define NVM_CFG1_PORT_RESERVED5_32K 0x5
-#define NVM_CFG1_PORT_RESERVED5_64K 0x6
-#define NVM_CFG1_PORT_RESERVED5_128K 0x7
-#define NVM_CFG1_PORT_RESERVED5_256K 0x8
-#define NVM_CFG1_PORT_RESERVED5_512K 0x9
-#define NVM_CFG1_PORT_RESERVED5_1M 0xA
-#define NVM_CFG1_PORT_RESERVED5_2M 0xB
-#define NVM_CFG1_PORT_RESERVED5_4M 0xC
-#define NVM_CFG1_PORT_RESERVED5_8M 0xD
-#define NVM_CFG1_PORT_RESERVED5_16M 0xE
-#define NVM_CFG1_PORT_RESERVED5_32M 0xF
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0
-#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000
-#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16
+#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
-#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0
-#define NVM_CFG1_PORT_RESERVED6_4K 0x1
-#define NVM_CFG1_PORT_RESERVED6_8K 0x2
-#define NVM_CFG1_PORT_RESERVED6_16K 0x3
-#define NVM_CFG1_PORT_RESERVED6_32K 0x4
-#define NVM_CFG1_PORT_RESERVED6_64K 0x5
-#define NVM_CFG1_PORT_RESERVED6_128K 0x6
-#define NVM_CFG1_PORT_RESERVED6_256K 0x7
-#define NVM_CFG1_PORT_RESERVED6_512K 0x8
-#define NVM_CFG1_PORT_RESERVED6_1M 0x9
-#define NVM_CFG1_PORT_RESERVED6_2M 0xA
-#define NVM_CFG1_PORT_RESERVED6_4M 0xB
-#define NVM_CFG1_PORT_RESERVED6_8M 0xC
-#define NVM_CFG1_PORT_RESERVED6_16M 0xD
-#define NVM_CFG1_PORT_RESERVED6_32M 0xE
-#define NVM_CFG1_PORT_RESERVED6_64M 0xF
struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
@@ -4973,18 +4748,16 @@ struct nvm_cfg1_func {
u32 device_id; /* 0x10 */
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16
+#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4
-#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
@@ -5029,8 +4802,8 @@ struct nvm_cfg1_func {
struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
-
- u32 reserved[9]; /* 0x2C */
+ u32 preboot_generic_cfg; /* 0x2C */
+ u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index ffa99273b353..a95a3e4b3101 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -44,7 +44,7 @@ struct qed_ptt_pool {
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
- GFP_ATOMIC);
+ GFP_KERNEL);
int i;
if (!p_pool)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index 0b21a553cc7d..f55ebdc3c832 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
- u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
- u8 tc, i, vport_id;
u32 inc_val;
+ u8 tc, i;
/* go over all PF VPORTs */
- for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
- u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+ for (i = 0; i < num_vports; i++) {
if (!vport_params[i].vport_wfq)
continue;
@@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
* different TCs
*/
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = pq_ids[tc];
+ u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
- STORE_RT_REG(p_hwfn, temp + vport_pq_id,
- QM_WFQ_UPPER_BOUND |
- QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
QM_REG_WFQVPCRD_RT_OFFSET +
vport_pq_id,
- QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
}
@@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
return -1;
- if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
- p_params->num_vports, vport_params))
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 796f1390e598..3269b3610e03 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
- p_hwfn->rt_data[i].b_valid = false;
+ p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 val)
{
- p_hwfn->rt_data[rt_offset].init_val = val;
- p_hwfn->rt_data[rt_offset].b_valid = true;
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
- u32 rt_offset,
- u32 *val,
+ u32 rt_offset, u32 *p_val,
size_t size)
{
size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
- p_hwfn->rt_data[rt_offset + i].init_val = val[i];
- p_hwfn->rt_data[rt_offset + i].b_valid = true;
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
-static void qed_init_rt(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 addr,
- u32 rt_offset,
- u32 size)
+static int qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u16 rt_offset,
+ u16 size,
+ bool b_must_dmae)
{
- struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
- u32 i;
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ u16 i, segment;
+ int rc = 0;
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
for (i = 0; i < size; i++) {
- if (!rt_data[i].b_valid)
+ if (!p_valid[i])
continue;
- qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2),
+ p_init_val[i]);
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, 0);
+ if (rc != 0)
+ return rc;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
}
+
+ return rc;
}
int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_rt_data *rt_data;
+ struct qed_rt_data *rt_data = &p_hwfn->rt_data;
- rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
- if (!rt_data)
+ rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!rt_data->b_valid)
return -ENOMEM;
- p_hwfn->rt_data = rt_data;
+ rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE,
+ GFP_KERNEL);
+ if (!rt_data->init_val) {
+ kfree(rt_data->b_valid);
+ return -ENOMEM;
+ }
return 0;
}
void qed_init_free(struct qed_hwfn *p_hwfn)
{
- kfree(p_hwfn->rt_data);
- p_hwfn->rt_data = NULL;
+ kfree(p_hwfn->rt_data.init_val);
+ kfree(p_hwfn->rt_data.b_valid);
}
static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
@@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_RUNTIME:
qed_init_rt(p_hwfn, p_ptt, addr,
le16_to_cpu(arg->runtime.offset),
- le16_to_cpu(arg->runtime.size));
+ le16_to_cpu(arg->runtime.size),
+ b_must_dmae);
break;
}
@@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct init_read_op *cmd)
{
- u32 data = le32_to_cpu(cmd->op_data);
- u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = le32_to_cpu(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
- bool (*comp_check)(u32 val,
- u32 expected_val);
- u32 delay = QED_INIT_POLL_PERIOD_US, val;
val = qed_rd(p_hwfn, p_ptt, addr);
- data = le32_to_cpu(cmd->op_data);
- if (GET_FIELD(data, INIT_READ_OP_POLL)) {
- int i;
+ if (poll == INIT_POLL_NONE)
+ return;
- switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
- case INIT_COMPARISON_EQ:
- comp_check = comp_eq;
- break;
- case INIT_COMPARISON_OR:
- comp_check = comp_or;
- break;
- case INIT_COMPARISON_AND:
- comp_check = comp_and;
- break;
- default:
- comp_check = NULL;
- DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
- data);
- return;
- }
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
- for (i = 0;
- i < QED_INIT_MAX_POLL_COUNT &&
- !comp_check(val, le32_to_cpu(cmd->expected_val));
- i++) {
- udelay(delay);
- val = qed_rd(p_hwfn, p_ptt, addr);
- }
+ data = le32_to_cpu(cmd->expected_val);
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
- if (i == QED_INIT_MAX_POLL_COUNT)
- DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
- addr, le32_to_cpu(cmd->expected_val),
- val, data);
+ if (i == QED_INIT_MAX_POLL_COUNT) {
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, le32_to_cpu(cmd->op_data));
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9cc9d62c1fec..2017b0121f5f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -39,24 +39,1737 @@ struct qed_sb_sp_info {
struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
+enum qed_attention_type {
+ QED_ATTN_TYPE_ATTN,
+ QED_ATTN_TYPE_PARITY,
+};
+
#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
-#define ATTN_STATE_BITS (0xfff)
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ int (*cb)(struct qed_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+/* HW Attention register */
+struct attn_hw_reg {
+ u16 reg_idx; /* Index of this register in its block */
+ u16 num_of_bits; /* number of valid attention bits */
+ u32 sts_addr; /* Address of the STS register */
+ u32 sts_clr_addr; /* Address of the STS_CLR register */
+ u32 sts_wr_addr; /* Address of the STS_WR register */
+ u32 mask_addr; /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+ u16 num_of_int_regs; /* Number of interrupt regs */
+ u16 num_of_prty_regs; /* Number of parity regs */
+ struct attn_hw_reg **int_regs; /* interrupt regs */
+ struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+ const char *name; /* Block name */
+ struct attn_hw_regs chip_regs[1];
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+ 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+ &grc_int0_bb_b0};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+ 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+ &grc_prty1_bb_b0};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+ 0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+ 1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+ &miscs_int0_bb_b0, &miscs_int1_bb_b0};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+ 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+ &miscs_prty0_bb_b0};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+ 0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+ &misc_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+ 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+ &pglue_b_int0_bb_b0};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+ 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+ 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+ &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+ 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+ &cnig_int0_bb_b0};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+ 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+ &cnig_prty0_bb_b0};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+ 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+ &cpmu_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+ 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+ &ncsi_int0_bb_b0};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+ 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+ &ncsi_prty1_bb_b0};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+ 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+ 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+ &opte_prty1_bb_b0, &opte_prty0_bb_b0};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+ 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+ 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+ 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+ 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+ 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+ 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+ 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+ 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+ 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+ 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+ 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+ 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+ &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+ &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+ &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+ 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+ 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+ 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+ &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+ 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+ &pcie_prty1_bb_b0};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+ 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+ 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+ &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+ 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+ &pswhst_int0_bb_b0};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+ 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+ 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+ &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+ 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+ &pswhst2_int0_bb_b0};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+ 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+ &pswhst2_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+ 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+ &pswrd_int0_bb_b0};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+ 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+ &pswrd_prty0_bb_b0};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+ 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+ &pswrd2_int0_bb_b0};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+ 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+ 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+ 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+ &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+ 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+ &pswwr_int0_bb_b0};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+ 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+ &pswwr_prty0_bb_b0};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+ 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+ &pswwr2_int0_bb_b0};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+ 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+ 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+ 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+ 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+ 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+ &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+ &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+ 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+ &pswrq_int0_bb_b0};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+ 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+ &pswrq_prty0_bb_b0};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+ 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+ &pswrq2_int0_bb_b0};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+ 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+ &pswrq2_prty1_bb_b0};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+ 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+ &pglcs_int0_bb_b0};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+ 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+ &dmae_int0_bb_b0};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+ 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+ &dmae_prty1_bb_b0};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+ 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+ &ptu_int0_bb_b0};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+ 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+ &ptu_prty1_bb_b0};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+ 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+ 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+ 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+ &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+ 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+ 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+ &tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+ 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+ 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+ 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+ &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+ 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+ 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+ &mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+ 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+ 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+ 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+ &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+ 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+ 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+ &ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+ 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+ 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+ 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+ &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+ 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+ 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+ &xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+ 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+ 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+ 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+ &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+ 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+ 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+ &ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+ 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+ 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+ 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+ &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+ 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+ &pcm_prty1_bb_b0};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+ 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+ &qm_int0_bb_b0};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+ 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+ 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+ 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+ 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+ &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+ 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+ 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+ &tm_int0_bb_b0, &tm_int1_bb_b0};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+ 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+ &tm_prty1_bb_b0};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+ 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+ &dorq_int0_bb_b0};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+ 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+ 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+ &dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+ 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+ 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+ 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+ 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+ 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+ 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+ 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+ 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+ 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+ 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+ 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+ 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+ &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+ &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+ &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+ 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+ 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+ 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+ &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+ 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+ &src_int0_bb_b0};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+ 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+ &prs_int0_bb_b0};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+ 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+ 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+ 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+ &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+ 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+ &tsdm_int0_bb_b0};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+ 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+ &tsdm_prty1_bb_b0};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+ 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+ &msdm_int0_bb_b0};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+ 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+ &msdm_prty1_bb_b0};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+ 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+ &usdm_int0_bb_b0};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+ 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+ &usdm_prty1_bb_b0};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+ 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+ &xsdm_int0_bb_b0};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+ 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+ &xsdm_prty1_bb_b0};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+ 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+ &ysdm_int0_bb_b0};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+ 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+ &ysdm_prty1_bb_b0};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+ 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+ &psdm_int0_bb_b0};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+ 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+ &psdm_prty1_bb_b0};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+ 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+ 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+ &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+ 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+ 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+ &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+ &tsem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+ 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+ 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+ &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+ 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+ 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+ &msem_prty0_bb_b0, &msem_prty1_bb_b0};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+ 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+ 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+ &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+ 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+ 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+ &usem_prty0_bb_b0, &usem_prty1_bb_b0};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+ 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+ 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+ &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+ 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+ 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+ &xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+ 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+ 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+ &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+ 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+ 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+ &ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+ 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+ 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+ 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+ &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+ 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+ 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+ 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+ &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+ &psem_fast_memory_vfc_config_prty1_bb_b0};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+ 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+ &rss_int0_bb_b0};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+ 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+ &rss_prty1_bb_b0};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+ 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+ &tmld_int0_bb_b0};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+ 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+ &tmld_prty1_bb_b0};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+ 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+ &muld_int0_bb_b0};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+ 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+ &muld_prty1_bb_b0};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+ 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+ &yuld_int0_bb_b0};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+ 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+ &yuld_prty1_bb_b0};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+ 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+ &xyld_int0_bb_b0};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+ 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+ &xyld_prty1_bb_b0};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+ 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+ &prm_int0_bb_b0};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+ 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+ 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+ &prm_prty0_bb_b0, &prm_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+ 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+ &pbf_pb1_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+ 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+ &pbf_pb1_prty0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+ 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+ &pbf_pb2_int0_bb_b0};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+ 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+ &pbf_pb2_prty0_bb_b0};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+ 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+ &rpb_int0_bb_b0};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+ 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+ &rpb_prty0_bb_b0};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+ 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+ 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+ 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+ 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+ 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+ 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+ 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+ 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+ 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+ 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+ 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+ &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+ &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+ &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+ 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+ 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+ &btb_prty0_bb_b0, &btb_prty1_bb_b0};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+ 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+ &pbf_int0_bb_b0};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+ 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+ 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+ 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+ &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+ 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+ &rdif_int0_bb_b0};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+ 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+ &rdif_prty0_bb_b0};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+ 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+ &tdif_int0_bb_b0};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+ 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+ 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+ &tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+ 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+ &cdu_int0_bb_b0};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+ 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+ &cdu_prty1_bb_b0};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+ 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+ &ccfc_int0_bb_b0};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+ 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+ 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+ &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+ 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+ &tcfc_int0_bb_b0};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+ 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+ 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+ &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+ 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+ &igu_int0_bb_b0};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+ 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+ 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+ 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+ &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+ 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+ &cau_int0_bb_b0};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+ 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+ &cau_prty1_bb_b0};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+ 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+ &dbg_int0_bb_b0};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+ 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+ &dbg_prty1_bb_b0};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+ 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+ 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+ 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+ 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+ 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+ 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+ &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+ &nig_int4_bb_b0, &nig_int5_bb_b0};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+ 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+ 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+ 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+ 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+ 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+ &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
+ &nig_prty3_bb_b0, &nig_prty4_bb_b0};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+ 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+ &ipc_int0_bb_b0};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+ 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+ &ipc_prty0_bb_b0};
+
+static struct attn_hw_block attn_blocks[] = {
+ {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
+ {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
+ {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
+ {"dbu", {{0, 0, NULL, NULL} } },
+ {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
+ pglue_b_prty_bb_b0_regs} } },
+ {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
+ {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
+ {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
+ {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
+ {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
+ {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
+ {"mcp", {{0, 0, NULL, NULL} } },
+ {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
+ {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
+ {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
+ pswhst2_prty_bb_b0_regs} } },
+ {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
+ {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
+ {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
+ {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
+ {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
+ {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
+ {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
+ {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
+ {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
+ {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
+ {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
+ {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
+ {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
+ {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
+ {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
+ {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
+ {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
+ {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
+ {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
+ {"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
+ {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
+ {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
+ {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
+ {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
+ {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
+ {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
+ {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
+ {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
+ {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
+ {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
+ {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
+ {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
+ {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
+ {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
+ {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
+ {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
+ {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
+ {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
+ {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
+ {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
+ pbf_pb1_prty_bb_b0_regs} } },
+ {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
+ pbf_pb2_prty_bb_b0_regs} } },
+ {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
+ {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
+ {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
+ {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
+ {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
+ {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
+ {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
+ {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
+ {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
+ {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
+ {"umac", { {0, 0, NULL, NULL} } },
+ {"xmac", { {0, 0, NULL, NULL} } },
+ {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
+ {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
+ {"wol", { {0, 0, NULL, NULL} } },
+ {"bmbn", { {0, 0, NULL, NULL} } },
+ {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
+ {"nwm", { {0, 0, NULL, NULL} } },
+ {"nws", { {0, 0, NULL, NULL} } },
+ {"ms", { {0, 0, NULL, NULL} } },
+ {"phy_pcie", { {0, 0, NULL, NULL} } },
+ {"misc_aeu", { {0, 0, NULL, NULL} } },
+ {"bar0_map", { {0, 0, NULL, NULL} } },};
+
+/* Specific HW attention callbacks */
+static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ /* This might occur on certain instances; Log it once then mask it */
+ DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+ tmp);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+ 0xffffffff);
+
+ return 0;
+}
+
+#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+
+ if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->cdev,
+ "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_VF_VALID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_CLIENT),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_BYTE_EN),
+ data);
+ }
+
+ return 0;
+}
+
+#define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
+#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define QED_GRC_ATTENTION_MASTER_MASK (0xf)
+#define QED_GRC_ATTENTION_MASTER_SHIFT (24)
+#define QED_GRC_ATTENTION_PF_MASK (0xf)
+#define QED_GRC_ATTENTION_PF_SHIFT (0)
+#define QED_GRC_ATTENTION_VF_MASK (0xff)
+#define QED_GRC_ATTENTION_VF_SHIFT (4)
+#define QED_GRC_ATTENTION_PRIV_MASK (0x3)
+#define QED_GRC_ATTENTION_PRIV_SHIFT (14)
+#define QED_GRC_ATTENTION_PRIV_VF (0)
+static const char *attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1: return "PXP";
+ case 2: return "MCP";
+ case 3: return "MSDM";
+ case 4: return "PSDM";
+ case 5: return "YSDM";
+ case 6: return "USDM";
+ case 7: return "TSDM";
+ case 8: return "XSDM";
+ case 9: return "DBU";
+ case 10: return "DMAE";
+ default:
+ return "Unkown";
+ }
+}
+
+static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->cdev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
+ attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
+ (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return 0;
+}
+
+#define PGLUE_ATTENTION_VALID (1 << 29)
+#define PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
+#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
+#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
+#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
+#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
+#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+ : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+ : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID)
+ DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_INFO(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+ return 0;
+}
+
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 reason;
+
+ reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ if (reason) {
+ u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+
+ DP_INFO(p_hwfn->cdev,
+ "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+ qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+ (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ reason);
+ }
+
+ return -EINVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT,
+ qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
+ (1 << ATTENTION_OFFSET_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d",
+ (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) |
+ (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
+ NULL, BLOCK_CNIG},
+ {"MCP CPU", ATTENTION_SINGLE,
+ qed_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT,
+ qed_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT,
+ qed_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT,
+ qed_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
+ NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+};
+
+#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct qed_sb_attn_info {
/* Virtual & Physical address of the SB */
struct atten_status_block *sb_attn;
- dma_addr_t sb_phys;
+ dma_addr_t sb_phys;
/* Last seen running index */
- u16 index;
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
/* Previously asserted attentions, which are still unasserted */
- u16 known_attn;
+ u16 known_attn;
/* Cleanup address for the link's general hw attention */
- u32 mfw_attn_addr;
+ u32 mfw_attn_addr;
};
static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
@@ -127,6 +1840,162 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
+ struct attn_hw_reg *p_reg_desc,
+ struct attn_hw_block *p_block,
+ enum qed_attention_type type,
+ u32 val, u32 mask)
+{
+ int j;
+
+ for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+ if (!(val & (1 << j)))
+ continue;
+
+ DP_NOTICE(p_hwfn,
+ "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
+ p_block->name,
+ type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
+ "Parity",
+ p_reg_desc->reg_idx, p_reg_desc->sts_addr,
+ j, (mask & (1 << j)) ? " [MASKED]" : "");
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return int
+ */
+static int
+qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ u32 bitmask)
+{
+ int rc = -EINVAL;
+ u32 val;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_aeu->bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_aeu->bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ /* Handle HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ struct attn_hw_block *p_block;
+ u32 mask;
+ int i;
+
+ p_block = &attn_blocks[p_aeu->block_index];
+
+ /* Handle each interrupt register */
+ for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 sts_addr;
+
+ p_reg_desc = p_block->chip_regs[0].int_regs[i];
+
+ /* In case of fatal attention, don't clear the status
+ * so it would appear in following idle check.
+ */
+ if (rc == 0)
+ sts_addr = p_reg_desc->sts_clr_addr;
+ else
+ sts_addr = p_reg_desc->sts_addr;
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_ATTN,
+ val, mask);
+ }
+ }
+
+ /* If the attention is benign, no need to prevent it */
+ if (!rc)
+ goto out;
+
+ /* Prevent this Attention from being asserted in the future */
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_aeu->bit_name);
+
+out:
+ return rc;
+}
+
+static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ struct attn_hw_block *p_block,
+ u8 bit_index)
+{
+ int i;
+
+ for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
+ struct attn_hw_reg *p_reg_desc;
+ u32 val, mask;
+
+ p_reg_desc = p_block->chip_regs[0].prty_regs[i];
+
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->sts_clr_addr);
+ mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ p_reg_desc->mask_addr);
+ qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+ p_block,
+ QED_ATTN_TYPE_PARITY,
+ val, mask);
+ }
+}
+
+/**
+ * @brief qed_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param bit_index
+ */
+static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index;
+
+ DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
+ p_aeu->bit_name, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+ bit_index);
+
+ /* In BB, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_OPTE],
+ bit_index);
+ qed_int_parity_print(p_hwfn, p_aeu,
+ &attn_blocks[BLOCK_MCP],
+ bit_index);
+ }
+ }
+}
+
/**
* @brief - handles deassertion of previously asserted attentions.
*
@@ -139,17 +2008,108 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_mask;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+ u8 i, j, k, bit_idx;
+ int rc = 0;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n",
+ i, aeu_inv_arr[i]);
+ }
+
+ /* Find parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32));
+ u32 parities;
- if (deasserted_bits != 0x100)
- DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+ /* Skip register in which no parity bit is currently set */
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if ((p_bit->flags & ATTENTION_PARITY) &&
+ !!(parities & (1 << bit_idx)))
+ qed_int_deassertion_parity(p_hwfn, p_bit,
+ bit_idx);
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ u32 en, bits;
+
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ u8 bit, bit_len;
+ u32 bitmask;
+
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+ /* No need to handle parity-only bits */
+ if (p_aeu->flags == ATTENTION_PAR)
+ continue;
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (p_aeu->flags & ATTENTION_PAR_INT) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ if (bitmask) {
+ /* Handle source of the attention */
+ qed_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- ((IGU_CMD_ATTN_BIT_CLR_UPPER -
- IGU_CMD_INT_ACK_BASE) << 3),
- ~((u32)deasserted_bits));
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
/* Unmask deasserted attentions in IGU */
aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -160,7 +2120,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
/* Clear deassertion from inner state */
sb_attn_sw->known_attn &= ~deasserted_bits;
- return 0;
+ return rc;
}
static int qed_int_attentions(struct qed_hwfn *p_hwfn)
@@ -343,17 +2303,17 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie)
static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
{
- struct qed_dev *cdev = p_hwfn->cdev;
- struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
-
- if (p_sb) {
- if (p_sb->sb_attn)
- dma_free_coherent(&cdev->pdev->dev,
- SB_ATTN_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_attn,
- p_sb->sb_phys);
- kfree(p_sb);
- }
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn,
+ p_sb->sb_phys);
+ kfree(p_sb);
}
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
@@ -379,10 +2339,31 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr)
{
struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
sb_info->sb_attn = sb_virt_addr;
sb_info->sb_phys = sb_phy_addr;
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32; j++) {
+ unsigned int flags = aeu_descs[i].bits[j].flags;
+
+ if (flags & ATTENTION_PARITY)
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(flags);
+ }
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
/* Set the address of cleanup for the mcp attention */
sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
MISC_REG_AEU_GENERAL_ATTN_0;
@@ -399,7 +2380,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
dma_addr_t p_phys = 0;
/* SB struct */
- p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) {
DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
return -ENOMEM;
@@ -433,6 +2414,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid)
{
+ struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -451,14 +2433,12 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cau_state = CAU_HC_DISABLE_STATE;
- if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
cau_state = CAU_HC_ENABLE_STATE;
- if (!p_hwfn->cdev->rx_coalesce_usecs)
- p_hwfn->cdev->rx_coalesce_usecs =
- QED_CAU_DEF_RX_USECS;
- if (!p_hwfn->cdev->tx_coalesce_usecs)
- p_hwfn->cdev->tx_coalesce_usecs =
- QED_CAU_DEF_TX_USECS;
+ if (!cdev->rx_coalesce_usecs)
+ cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
+ if (!cdev->tx_coalesce_usecs)
+ cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
@@ -473,20 +2453,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid)
{
struct cau_sb_entry sb_entry;
- u32 val;
qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
vf_number, vf_valid);
if (p_hwfn->hw_init_done) {
- val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
- qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
- qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
- upper_32_bits(sb_phys));
-
- val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
- qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
- qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, 0);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -638,8 +2618,10 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- p_hwfn->sbs_info[sb_id] = NULL;
- p_hwfn->num_sbs--;
+ if (p_hwfn->sbs_info[sb_id] != NULL) {
+ p_hwfn->sbs_info[sb_id] = NULL;
+ p_hwfn->num_sbs--;
+ }
return 0;
}
@@ -648,14 +2630,15 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
{
struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
- if (p_sb) {
- if (p_sb->sb_info.sb_virt)
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- SB_ALIGNED_SIZE(p_hwfn),
- p_sb->sb_info.sb_virt,
- p_sb->sb_info.sb_phys);
- kfree(p_sb);
- }
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
}
static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
@@ -666,7 +2649,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
void *p_virt;
/* SB struct */
- p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
if (!p_sb) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
return -ENOMEM;
@@ -692,25 +2675,6 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
-{
- if (!p_hwfn)
- return;
-
- if (p_hwfn->p_sp_sb)
- qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup Slow path status block - NULL pointer\n");
-
- if (p_hwfn->p_sb_attn)
- qed_int_sb_attn_setup(p_hwfn, p_ptt);
- else
- DP_NOTICE(p_hwfn->cdev,
- "Failed to setup attentions status block - NULL pointer\n");
-}
-
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
void *cookie,
@@ -718,36 +2682,36 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
+ int rc = -ENOMEM;
u8 pi;
/* Look for a free index */
for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
- if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
- p_sp_sb->pi_info_arr[pi].cookie = cookie;
- *sb_idx = pi;
- *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
- qed_status = 0;
- break;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = 0;
+ break;
}
- return qed_status;
+ return rc;
}
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
{
struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
- int qed_status = -ENOMEM;
- if (p_sp_sb->pi_info_arr[pi].comp_cb) {
- p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
- p_sp_sb->pi_info_arr[pi].cookie = NULL;
- qed_status = 0;
- }
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
+ return -ENOMEM;
- return qed_status;
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+
+ return 0;
}
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
@@ -786,16 +2750,13 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode)
{
- int rc, i;
-
- /* Mask non-link attentions */
- for (i = 0; i < 9; i++)
- qed_wr(p_hwfn, p_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+ int rc = 0;
- /* Configure AEU signal change to produce attentions for link */
+ /* Configure AEU signal change to produce attentions */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
/* Flush the writes to IGU */
mmiowb();
@@ -937,6 +2898,39 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
}
}
+static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 sb_id)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * sb_id);
+ struct qed_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+ /* stop scanning when hit first invalid PF entry */
+ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+ GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+ goto out;
+
+ /* Fill the block information */
+ p_block->status = QED_IGU_STATUS_VALID;
+ p_block->function_id = GET_FIELD(val,
+ IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val,
+ IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ sb_id, val, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
+out:
+ return val;
+}
+
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -946,7 +2940,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
u16 sb_id;
u16 prev_sb_id = 0xFF;
- p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
if (!p_hwfn->hw_info.p_igu_info)
return -ENOMEM;
@@ -963,26 +2957,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
- val = qed_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
/* stop scanning when hit first invalid PF entry */
if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
break;
- blk->status = QED_IGU_STATUS_VALID;
- blk->function_id = GET_FIELD(val,
- IGU_MAPPING_LINE_FUNCTION_NUMBER);
- blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
- blk->vector_number = GET_FIELD(val,
- IGU_MAPPING_LINE_VECTOR_NUMBER);
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
- val, blk->function_id, blk->is_pf,
- blk->vector_number);
-
if (blk->is_pf) {
if (blk->function_id == p_hwfn->rel_pf_id) {
blk->status |= QED_IGU_STATUS_PF;
@@ -1072,7 +3053,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
{
- p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+ p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
if (!p_hwfn->sp_dpc)
return -ENOMEM;
@@ -1117,22 +3098,22 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
- qed_int_sp_sb_setup(p_hwfn, p_ptt);
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
qed_int_sp_dpc_setup(p_hwfn);
}
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks)
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info)
{
struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
- if (!info)
- return 0;
-
- if (p_iov_blks)
- *p_iov_blks = info->free_blks;
+ if (!info || !p_sb_cnt_info)
+ return;
- return info->igu_sb_cnt;
+ p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+ p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+ p_sb_cnt_info->sb_free_blk = info->free_blks;
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 51e0b09a7f47..c57f2e680770 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -161,12 +161,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie);
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
- * @param p_iov_blks - configured free blks for vfs
+ * @param p_sb_cnt_info
*
* @return int - number of status blocks configured
*/
-int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
- int *p_iov_blks);
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info);
/**
* @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index f72036a2ef5b..3f35c6ca9252 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,6 +31,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
@@ -124,52 +125,65 @@ struct qed_sp_vport_update_params {
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_approx_mcast_flg;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
};
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- u32 concrete_fid,
- u16 opaque_fid,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_sp_vport_start_params *p_params)
{
- struct qed_sp_init_request_params params;
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
u16 rx_mode = 0;
u8 abs_vport_id = 0;
- rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
- memset(&params, 0, sizeof(params));
- params.ramrod_data_size = sizeof(*p_ramrod);
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_VPORT_START,
- PROTOCOLID_ETH,
- &params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.vport_start;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->mtu = cpu_to_le16(mtu);
- p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
- p_ramrod->drop_ttl0_en = drop_ttl0_flg;
+ p_ramrod->mtu = cpu_to_le16(p_params->mtu);
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -180,9 +194,26 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
memset(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
+ p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case QED_TPA_MODE_GRO:
+ p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_ramrod->tpa_param.tpa_max_size = (u16)-1;
+ p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+ p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+ p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
- concrete_fid);
+ p_params->concrete_fid);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -360,7 +391,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
- struct qed_sp_init_request_params sp_params;
+ struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
u8 abs_vport_id = 0;
@@ -370,17 +401,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
if (rc != 0)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_params->opaque_fid,
ETH_RAMROD_VPORT_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -393,7 +422,9 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
-
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ p_cmn->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -412,8 +443,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u8 vport_id)
{
- struct qed_sp_init_request_params sp_params;
struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
struct qed_spq_entry *p_ent;
u8 abs_vport_id = 0;
int rc;
@@ -422,16 +453,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
if (rc != 0)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_VPORT_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -444,8 +473,10 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -454,6 +485,8 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
memset(&vport_update_params, 0, sizeof(vport_update_params));
vport_update_params.vport_id = vport;
vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -471,6 +504,10 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
"Accept filter configured, flags = [Rx]%x [Tx]%x\n",
accept_flags.rx_accept_filter,
accept_flags.tx_accept_filter);
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
}
return 0;
@@ -502,8 +539,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_rx_cid;
u16 abs_rx_q_id = 0;
u8 abs_vport_id = 0;
@@ -528,15 +565,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
opaque_fid, cid, params->queue_id, params->vport_id,
params->sb);
- memset(&sp_params, 0, sizeof(params));
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- cid, opaque_fid,
ETH_RAMROD_RX_QUEUE_START,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -551,12 +588,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->complete_event_flg = 1;
p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
- p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr);
- p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
- p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr);
- p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
rc = qed_spq_post(p_hwfn, p_ent, NULL);
@@ -628,21 +663,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- p_rx_cid->cid,
- p_rx_cid->opaque_fid,
ETH_RAMROD_RX_QUEUE_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -680,8 +714,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid;
u8 abs_vport_id;
int rc = -EINVAL;
@@ -696,15 +730,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = cid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
- opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_TX_QUEUE_START,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -714,11 +748,9 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(p_params->sb);
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
- p_ramrod->tc = p_pq_params->eth.tc;
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
- p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
pq_id = qed_get_qm_pq(p_hwfn,
PROTOCOLID_ETH,
@@ -785,20 +817,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
- sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_tx_cid->cid;
+ init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- p_tx_cid->cid,
- p_tx_cid->opaque_fid,
ETH_RAMROD_TX_QUEUE_STOP,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -821,9 +852,8 @@ qed_filter_action(enum qed_filter_opcode opcode)
case QED_FILTER_REMOVE:
action = ETH_FILTER_ACTION_REMOVE;
break;
- case QED_FILTER_REPLACE:
case QED_FILTER_FLUSH:
- action = ETH_FILTER_ACTION_REPLACE;
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
break;
default:
action = MAX_ETH_FILTER_ACTION;
@@ -856,9 +886,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
{
u8 vport_to_add_to = 0, vport_to_remove_from = 0;
struct vport_filter_update_ramrod_data *p_ramrod;
- struct qed_sp_init_request_params sp_params;
struct eth_filter_cmd *p_first_filter;
struct eth_filter_cmd *p_second_filter;
+ struct qed_sp_init_data init_data;
enum eth_filter_action action;
int rc;
@@ -872,17 +902,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(**pp_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, pp_ent,
- qed_spq_get_cid(p_hwfn),
- opaque_fid,
ETH_RAMROD_FILTERS_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
+ PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -892,8 +921,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
switch (p_filter_cmd->opcode) {
- case QED_FILTER_FLUSH:
- p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+ case QED_FILTER_REPLACE:
case QED_FILTER_MOVE:
p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
default:
@@ -962,6 +990,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
p_second_filter->action = ETH_FILTER_ACTION_ADD;
p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ memcpy(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
} else {
action = qed_filter_action(p_filter_cmd->opcode);
@@ -1101,8 +1135,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
{
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
- struct qed_sp_init_request_params sp_params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
int rc, i;
@@ -1118,18 +1152,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return rc;
}
- memset(&sp_params, 0, sizeof(sp_params));
- sp_params.ramrod_data_size = sizeof(*p_ramrod);
- sp_params.comp_mode = comp_mode;
- sp_params.p_comp_data = p_comp_data;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
rc = qed_sp_init_request(p_hwfn, &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
ETH_RAMROD_VPORT_UPDATE,
- PROTOCOLID_ETH,
- &sp_params);
-
+ PROTOCOLID_ETH, &init_data);
if (rc) {
DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
return rc;
@@ -1230,6 +1262,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
return rc;
}
+/* Statistics related code */
+static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+ pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len)
+{
+ *p_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ *p_len = sizeof(struct tstorm_per_port_stat);
+}
+
+static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ u32 tstats_addr = 0, tstats_len = 0;
+ struct tstorm_per_port_stat tstats;
+
+ __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+ tstats_addr, tstats_len);
+
+ p_stats->mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+}
+
+static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+ ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len,
+ u16 statistics_bin)
+{
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+}
+
+static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats,
+ u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+ mstats_addr, mstats_len);
+
+ p_stats->no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
+{
+ struct port_stats port_stats;
+ int j;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+ p_stats->rx_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+ p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+ p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+ p_stats->rx_align_errors += port_stats.pmm.raln;
+ p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+ p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+ p_stats->rx_jabbers += port_stats.pmm.rjbr;
+ p_stats->rx_undersize_packets += port_stats.pmm.rund;
+ p_stats->rx_fragments += port_stats.pmm.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+ p_stats->tx_pause_frames += port_stats.pmm.txpf;
+ p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+ p_stats->tx_total_collisions += port_stats.pmm.tncl;
+ p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+ p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ for (j = 0; j < 8; j++) {
+ p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ }
+}
+
+static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *stats,
+ u16 statistics_bin)
+{
+ __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+ if (p_hwfn->mcp_info)
+ __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ continue;
+ }
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats)
+{
+ u32 i;
+
+ if (!cdev) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _qed_get_vport_stats(cdev, stats);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr = 0, len = 0;
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ */
+ if (!cdev->reset_stats)
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ else
+ _qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1268,24 +1622,25 @@ static void qed_register_eth_ops(struct qed_dev *cdev,
}
static int qed_start_vport(struct qed_dev *cdev,
- u8 vport_id,
- u16 mtu,
- u8 drop_ttl0_flg,
- u8 inner_vlan_removal_en_flg)
+ struct qed_start_vport_params *params)
{
int rc, i;
for_each_hwfn(cdev, i) {
+ struct qed_sp_vport_start_params start = { 0 };
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- rc = qed_sp_vport_start(p_hwfn,
- p_hwfn->hw_info.concrete_fid,
- p_hwfn->hw_info.opaque_fid,
- vport_id,
- mtu,
- drop_ttl0_flg,
- inner_vlan_removal_en_flg);
-
+ start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
+ QED_TPA_MODE_NONE;
+ start.remove_inner_vlan = params->remove_inner_vlan;
+ start.drop_ttl0 = params->drop_ttl0;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.vport_id = params->vport_id;
+ start.max_buffers_per_cqe = 16;
+ start.mtu = params->mtu;
+
+ rc = qed_sp_vport_start(p_hwfn, &start);
if (rc) {
DP_ERR(cdev, "Failed to start VPORT\n");
return rc;
@@ -1295,7 +1650,7 @@ static int qed_start_vport(struct qed_dev *cdev,
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
- vport_id, mtu);
+ start.vport_id, start.mtu);
}
qed_reset_vport_stats(cdev);
@@ -1344,6 +1699,9 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.accept_any_vlan = params->accept_any_vlan;
+ sp_params.update_accept_any_vlan_flg =
+ params->update_accept_any_vlan_flg;
/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
* We need to re-fix the rss values per engine for CMT.
@@ -1563,7 +1921,7 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
- return qed_filter_accept_cmd(cdev, 0, accept_flags,
+ return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
QED_SPQ_MODE_CB, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 9d76ce249277..26d40db07ddd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -29,10 +29,10 @@
#include "qed_mcp.h"
#include "qed_hw.h"
-static const char version[] =
- "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+static char version[] =
+ "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
-MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define QED_FW_FILE_NAME \
"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+MODULE_FIRMWARE(QED_FW_FILE_NAME);
+
static int __init qed_init(void)
{
pr_notice("qed_init called\n");
@@ -97,12 +99,15 @@ static void qed_free_pci(struct qed_dev *cdev)
pci_disable_device(pdev);
}
+#define PCI_REVISION_ID_ERROR_VAL 0xff
+
/* Performs PCI initializations as well as initializing PCI-related parameters
* in the device structrue. Returns 0 in case of success.
*/
static int qed_init_pci(struct qed_dev *cdev,
struct pci_dev *pdev)
{
+ u8 rev_id;
int rc;
cdev->pdev = pdev;
@@ -136,6 +141,14 @@ static int qed_init_pci(struct qed_dev *cdev,
pci_save_state(pdev);
}
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+ if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
+ DP_NOTICE(cdev,
+ "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
+ rev_id);
+ rc = -ENODEV;
+ goto err2;
+ }
if (!pci_is_pcie(pdev)) {
DP_NOTICE(cdev, "The bus is not PCI Express\n");
rc = -EIO;
@@ -190,7 +203,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
- dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+ dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION;
@@ -621,15 +634,18 @@ static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
- int rc, i;
- u8 num_vectors = 0;
-
+ struct qed_sb_cnt_info sb_cnt_info;
+ int rc;
+ int i;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
- for_each_hwfn(cdev, i)
- num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
- cdev->int_params.in.num_vectors = num_vectors;
+ for_each_hwfn(cdev, i) {
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
+ cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
+ cdev->int_params.in.num_vectors++; /* slowpath */
+ }
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
@@ -763,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
true, data);
if (rc)
- goto err3;
+ goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
@@ -782,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
return rc;
}
+ qed_reset_vport_stats(cdev);
+
return 0;
-err3:
- qed_free_stream_mem(cdev);
- qed_slowpath_irq_free(cdev);
err2:
+ qed_hw_timers_stop_all(cdev);
+ qed_slowpath_irq_free(cdev);
+ qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index ba1b1f1ef789..b89c9a8e1655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -11,8 +11,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
#include "qed_hsi.h"
@@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
u32 size;
/* Allocate mcp_info structure */
- p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
if (!p_hwfn->mcp_info)
goto err;
p_info = p_hwfn->mcp_info;
@@ -161,15 +161,15 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
}
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
- p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
p_info->mfw_mb_shadow =
kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
- p_info->mfw_mb_length), GFP_ATOMIC);
+ p_info->mfw_mb_length), GFP_KERNEL);
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW mutex */
- mutex_init(&p_info->mutex);
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->lock);
return 0;
@@ -179,6 +179,52 @@ err:
return -ENOMEM;
}
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ spin_lock_bh(&p_hwfn->mcp_info->lock);
+
+ /* The spinlock shouldn't be acquired when the mailbox command is
+ * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+ * pending [UN]LOAD_REQ command of another PF together with a spinlock
+ * (i.e. interrupts are disabled) - can lead to a deadlock.
+ * It is assumed that for a single PF, no other mailbox commands can be
+ * sent from another context while sending LOAD_REQ, and that any
+ * parallel commands to UNLOAD_REQ can be cancelled.
+ */
+ if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (p_hwfn->mcp_info->block_mb_sending) {
+ DP_NOTICE(p_hwfn,
+ "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
+ cmd);
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ return -EBUSY;
+ }
+
+ if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ p_hwfn->mcp_info->block_mb_sending = true;
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ }
+
+ return 0;
+}
+
+static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn,
+ u32 cmd)
+{
+ if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+ spin_unlock_bh(&p_hwfn->mcp_info->lock);
+}
+
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -187,6 +233,13 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
u32 org_mcp_reset_seq, cnt = 0;
int rc = 0;
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ if (rc != 0)
+ return rc;
+
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
@@ -209,6 +262,8 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
rc = -EAGAIN;
}
+ qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+
return rc;
}
@@ -275,14 +330,12 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params)
{
- int rc = 0;
+ u32 union_data_addr;
+ int rc;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -290,28 +343,56 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- /* Lock Mutex to ensure only single thread is
- * accessing the MCP at one time
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
*/
- mutex_lock(&p_hwfn->mcp_info->mutex);
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
- o_mcp_resp, o_mcp_param);
- /* Release Mutex */
- mutex_unlock(&p_hwfn->mcp_info->mutex);
+ rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+ if (rc)
+ return rc;
+
+ if (p_mb_params->p_data_src != NULL)
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
+ p_mb_params->p_data_src,
+ sizeof(*p_mb_params->p_data_src));
+
+ rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+ p_mb_params->param, &p_mb_params->mcp_resp,
+ &p_mb_params->mcp_param);
+
+ if (p_mb_params->p_data_dst != NULL)
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr,
+ sizeof(*p_mb_params->p_data_dst));
+
+ qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
return rc;
}
-static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
- struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
{
- u32 i;
+ struct qed_mcp_mb_params mb_params;
+ int rc;
- /* Copy version string to MCP */
- for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
- DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
- *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return 0;
}
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
@@ -319,26 +400,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
u32 *p_load_code)
{
struct qed_dev *cdev = p_hwfn->cdev;
- u32 param;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
int rc;
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
-
- /* Save driver's version to shmem */
- qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
-
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
+ memset(&mb_params, 0, sizeof(mb_params));
/* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
- (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type),
- p_load_code, &param);
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ cdev->drv_type;
+ memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -346,6 +419,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return rc;
}
+ *p_load_code = mb_params.mcp_resp;
+
/* If MFW refused (e.g. other port is in diagnostic mode) we
* must abort. This can happen in the following cases:
* - Other port is in diagnostic mode
@@ -365,6 +440,33 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_HW | QED_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+ transceiver_state,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state,
+ PMM_TRANSCEIVER_STATE);
+
+ if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_reset)
@@ -390,7 +492,10 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
return;
}
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ if (p_hwfn->b_drv_link_init)
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ else
+ p_link->link_up = false;
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -492,53 +597,43 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
- u32 param = 0, reply = 0, cmd;
- struct pmm_phy_cfg phy_cfg;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ struct pmm_phy_cfg *phy_cfg;
int rc = 0;
- u32 i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ u32 cmd;
/* Set the shmem configuration according to params */
- memset(&phy_cfg, 0, sizeof(phy_cfg));
+ phy_cfg = &union_data.drv_phy_cfg;
+ memset(phy_cfg, 0, sizeof(*phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg.speed = params->speed.forced_speed;
- phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
- phy_cfg.adv_speed = params->speed.advertised_speeds;
- phy_cfg.loopback_mode = params->loopback_mode;
-
- /* Write the requested configuration to shmem */
- for (i = 0; i < sizeof(phy_cfg); i += 4)
- qed_wr(p_hwfn, p_ptt,
- p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data) + i,
- ((u32 *)&phy_cfg)[i >> 2]);
+ phy_cfg->speed = params->speed.forced_speed;
+ phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->adv_speed = params->speed.advertised_speeds;
+ phy_cfg->loopback_mode = params->loopback_mode;
+
+ p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg.speed,
- phy_cfg.pause,
- phy_cfg.adv_speed,
- phy_cfg.loopback_mode,
- phy_cfg.feature_config_flags);
+ phy_cfg->speed,
+ phy_cfg->pause,
+ phy_cfg->adv_speed,
+ phy_cfg->loopback_mode,
+ phy_cfg->feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
- p_hwfn->mcp_info->drv_mb_seq,
- p_hwfn->mcp_info->drv_pulse_seq);
-
- /* Load Request */
- rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc) {
@@ -581,6 +676,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -720,26 +818,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- if (p_hwfn->cdev->mf_mode != SF) {
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ info->bandwidth_min = (shmem_info.config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ info->bandwidth_min);
+ info->bandwidth_min = 1;
+ }
+
+ info->bandwidth_max = (shmem_info.config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ info->bandwidth_max);
+ info->bandwidth_max = 100;
}
if (shmem_info.mac_upper || shmem_info.mac_lower) {
@@ -802,11 +899,11 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn,
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 100,
+ DRV_MSG_CODE_NIG_DRAIN, 1000,
&resp, &param);
/* Wait for the drain to complete before returning */
- msleep(120);
+ msleep(1020);
return rc;
}
@@ -832,31 +929,28 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- int rc = 0;
- u32 param = 0, reply = 0, i;
-
- if (!qed_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
- return -EBUSY;
- }
+ struct drv_version_stc *p_drv_version;
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ __be32 val;
+ u32 i;
+ int rc;
- DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
- p_ver->version);
- /* Copy version string to shmem */
- for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
- DRV_MB_WR(p_hwfn, p_ptt,
- union_data.drv_version.name[i * sizeof(u32)],
- *(u32 *)&p_ver->name[i * sizeof(u32)]);
+ p_drv_version = &union_data.drv_version;
+ p_drv_version->version = p_ver->version;
+ for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
+ val = cpu_to_be32(p_ver->name[i]);
+ *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
- &param);
- if (rc) {
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
- return rc;
- }
- return 0;
+ return rc;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 506197d5c3dd..50917a2131a5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -11,8 +11,8 @@
#include <linux/types.h>
#include <linux/delay.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "qed_hsi.h"
struct qed_mcp_link_speed_params {
@@ -255,7 +255,8 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info {
- struct mutex mutex; /* MCP access lock */
+ spinlock_t lock;
+ bool block_mb_sending;
u32 public_base;
u32 drv_mb_addr;
u32 mfw_mb_addr;
@@ -272,6 +273,15 @@ struct qed_mcp_info {
u16 mcp_hist;
};
+struct qed_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ union drv_union_data *p_data_src;
+ union drv_union_data *p_data_dst;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
/**
* @brief Initialize the interface with the MCP
*
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index e8df12335a97..c15b1622e636 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -127,8 +127,20 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_DB_DROP_REASON \
+ 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS \
+ 0x100a24UL
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \
+ 0x100a1cUL
#define GRC_REG_TIMEOUT_EN \
0x050404UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \
+ 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \
+ 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \
+ 0x050050UL
#define IGU_REG_BLOCK_CONFIGURATION \
0x180040UL
#define MCM_REG_INIT \
@@ -155,6 +167,40 @@
0x1100000UL
#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
0x2a9000UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \
+ 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \
+ 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \
+ 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS \
+ 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \
+ 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \
+ 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS \
+ 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \
+ 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \
+ 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \
+ 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \
+ 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \
+ 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \
+ 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \
+ 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \
+ 0x2aae7cUL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \
+ 0x2aae80UL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
+ 0x2aa3bcUL
#define PRM_REG_DISABLE_PRM \
0x230000UL
#define PRS_REG_SOFT_RST \
@@ -171,6 +217,14 @@
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
0x29e050UL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID \
+ 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \
+ 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA \
+ 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \
+ 0x2a006cUL
#define PSWRD_REG_DBG_SELECT \
0x29c040UL
#define PSWRD2_REG_CONF11 \
@@ -333,6 +387,8 @@
0x180800UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
+ 0x0087b4UL
#define MISC_REG_AEU_MASK_ATTN_IGU \
0x008494UL
#define IGU_REG_CLEANUP_STATUS_0 \
@@ -363,6 +419,10 @@
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
+#define MCP_REG_CPU_STATE \
+ 0xe05004UL
+#define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
#define PGLUE_B_REG_PF_BAR0_SIZE \
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 287fadfab52d..d39f914b66ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn,
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
-struct qed_sp_init_request_params {
- size_t ramrod_data_size;
+struct qed_sp_init_data {
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
enum spq_mode comp_mode;
struct qed_spq_comp_cb *p_comp_data;
};
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u32 cid,
- u16 opaque_fid,
u8 cmd,
u8 protocol,
- struct qed_sp_init_request_params *p_params);
+ struct qed_sp_init_data *p_data);
/**
* @brief qed_sp_pf_start - PF Function Start Ramrod
@@ -343,7 +344,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum mf_mode mode);
+ enum qed_mf_mode mode);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 6f7879136633..1c06c37d4c3d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -23,15 +23,13 @@
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
- u32 cid,
- u16 opaque_fid,
u8 cmd,
u8 protocol,
- struct qed_sp_init_request_params *p_params)
+ struct qed_sp_init_data *p_data)
{
- int rc = -EINVAL;
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct qed_spq_entry *p_ent = NULL;
- u32 opaque_cid = opaque_fid << 16 | cid;
+ int rc;
if (!pp_ent)
return -ENOMEM;
@@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
- p_ent->comp_mode = p_params->comp_mode;
+ p_ent->comp_mode = p_data->comp_mode;
p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) {
@@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
break;
case QED_SPQ_MODE_BLOCK:
- if (!p_params->p_comp_data)
+ if (!p_data->p_comp_data)
return -EINVAL;
- p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
case QED_SPQ_MODE_CB:
- if (!p_params->p_comp_data)
+ if (!p_data->p_comp_data)
p_ent->comp_cb.function = NULL;
else
- p_ent->comp_cb = *p_params->p_comp_data;
+ p_ent->comp_cb = *p_data->p_comp_data;
break;
default:
@@ -83,37 +81,35 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
- if (p_params->ramrod_data_size)
- memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+ memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return 0;
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum mf_mode mode)
+ enum qed_mf_mode mode)
{
- struct qed_sp_init_request_params params;
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
- memset(&params, 0, sizeof(params));
- params.ramrod_data_size = sizeof(*p_ramrod);
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn,
- &p_ent,
- qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON,
- &params);
+ &init_data);
if (rc)
return rc;
@@ -125,26 +121,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
p_ramrod->mf_mode = mode;
+ switch (mode) {
+ case QED_MF_DEFAULT:
+ case QED_MF_NPAR:
+ p_ramrod->mf_mode = MF_NPAR;
+ break;
+ case QED_MF_OVLAN:
+ p_ramrod->mf_mode = MF_OVLAN;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
+ p_ramrod->mf_mode = MF_NPAR;
+ }
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
- p_ramrod->event_ring_pbl_addr.hi =
- DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
- p_ramrod->event_ring_pbl_addr.lo =
- DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ p_hwfn->p_eq->chain.pbl.p_phys_table);
p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
- p_ramrod->consolid_q_pbl_addr.hi =
- DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
- p_ramrod->consolid_q_pbl_addr.lo =
- DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+ p_hwfn->p_consq->chain.pbl.p_phys_table);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+ "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
- (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
p_ramrod->outer_tag);
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -152,17 +155,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
- struct qed_sp_init_request_params params;
struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
int rc = -EINVAL;
- memset(&params, 0, sizeof(params));
- params.comp_mode = QED_SPQ_MODE_EBLOCK;
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
- rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
- p_hwfn->hw_info.opaque_fid,
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
- &params);
+ &init_data);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3dd548ab8df1..89469d5aae25 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.lo =
- DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.hi =
- DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@@ -327,7 +325,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
struct qed_eq *p_eq;
/* Allocate EQ struct */
- p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
if (!p_eq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
return NULL;
@@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_virt = p_spq->p_virt;
for (i = 0; i < p_spq->chain.capacity; i++) {
- p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
- p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -457,7 +454,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
/* SPQ struct */
p_spq =
- kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+ kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
if (!p_spq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
return -ENOMEM;
@@ -853,7 +850,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
struct qed_consq *p_consq;
/* Allocate ConsQ struct */
- p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
if (!p_consq) {
DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 7c6caf7f6612..d023251544d9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 4
+#define QEDE_MINOR_VERSION 7
#define QEDE_REVISION_VERSION 0
#define QEDE_ENGINEERING_VERSION 0
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -100,6 +100,12 @@ struct qede_stats {
u64 tx_mac_ctrl_frames;
};
+struct qede_vlan {
+ struct list_head list;
+ u16 vid;
+ bool configured;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -154,6 +160,11 @@ struct qede_dev {
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
+ bool gro_disable;
+ struct list_head vlan_list;
+ u16 configured_vlans;
+ u16 non_configured_vlans;
+ bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
};
@@ -173,9 +184,27 @@ enum QEDE_STATE {
* skb are built only after the frame was DMA-ed.
*/
struct sw_rx_data {
- u8 *data;
+ struct page *data;
+ dma_addr_t mapping;
+ unsigned int page_offset;
+};
- DEFINE_DMA_UNMAP_ADDR(mapping);
+enum qede_agg_state {
+ QEDE_AGG_STATE_NONE = 0,
+ QEDE_AGG_STATE_START = 1,
+ QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+ struct sw_rx_data replace_buf;
+ dma_addr_t replace_buf_mapping;
+ struct sw_rx_data start_buf;
+ dma_addr_t start_buf_mapping;
+ struct eth_fast_path_rx_tpa_start_cqe start_cqe;
+ enum qede_agg_state agg_state;
+ struct sk_buff *skb;
+ int frag_id;
+ u16 vlan_tag;
};
struct qede_rx_queue {
@@ -187,7 +216,11 @@ struct qede_rx_queue {
struct qed_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr;
+ /* GRO */
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
int rx_buf_size;
+ unsigned int rx_buf_seg_size;
u16 num_rx_buffers;
u16 rxq_id;
@@ -281,6 +314,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e442b85c9a5e..c49dc10ce151 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (edev->dev_info.common.is_mf) {
+ if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
- "Link parameters can not be changed in MF mode\n");
+ "Link parameters can not be changed in non-default mode\n");
return -EOPNOTSUPP;
}
@@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf) {
+ if (!edev->dev_info.common.is_mf_default) {
DP_INFO(edev,
"Pause parameters can not be updated in non-default mode\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 6237f10b5119..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -39,10 +39,10 @@
#include "qede.h"
-static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
- DRV_MODULE_VERSION "\n";
+static char version[] =
+ "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
-MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_40 0x1634
-#define CHIP_NUM_57980S_10 0x1635
+#define CHIP_NUM_57980S_10 0x1666
#define CHIP_NUM_57980S_MF 0x1636
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
@@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
struct eth_tx_3rd_bd *third_bd)
{
u8 l4_proto;
- u16 bd2_bits = 0, bd2_bits2 = 0;
+ u16 bd2_bits1 = 0, bd2_bits2 = 0;
- bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+ bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
- bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
@@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
l4_proto = ip_hdr(skb)->protocol;
if (l4_proto == IPPROTO_UDP)
- bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
- if (third_bd) {
+ if (third_bd)
third_bd->data.bitfields |=
- ((tcp_hdrlen(skb) / 4) &
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
- ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
- }
+ cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
- second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+ second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
}
@@ -381,6 +380,28 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
+ u8 xmit_type)
+{
+ int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+ if (xmit_type & XMIT_LSO) {
+ int hlen;
+
+ hlen = skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data;
+
+ /* linear payload would require its own BD */
+ if (skb_headlen(skb) > hlen)
+ allowed_frags--;
+ }
+
+ return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -408,16 +429,22 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq = QEDE_TX_QUEUE(edev, txq_index);
netdev_txq = netdev_get_tx_queue(ndev, txq_index);
- /* Current code doesn't support SKB linearization, since the max number
- * of skb frags can be passed in the FW HSI.
- */
- BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
-
WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
(MAX_SKB_FRAGS + 1));
xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+ if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+ if (skb_linearize(skb)) {
+ DP_NOTICE(edev,
+ "SKB linearization failed - silently dropping this SKB\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
txq->sw_tx_ring[idx].skb = skb;
@@ -464,12 +491,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
+ u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
+
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
* support parsing IPv6 with extension header/s.
@@ -491,7 +522,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
- (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
/* Make life easier for FW guys who can't deal with header and
* data on same BD. If we need to split, use the second bd...
@@ -719,26 +750,79 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
return false;
}
-/* This function copies the Rx buffer from the CONS position to the PROD
- * position, since we failed to allocate a new Rx buffer.
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
*/
-static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+static inline void qede_reuse_page(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
{
- struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
- struct sw_rx_data *sw_rx_data_cons =
- &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
- struct sw_rx_data *sw_rx_data_prod =
- &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ struct sw_rx_data *curr_prod;
+ dma_addr_t new_mapping;
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(sw_rx_data_cons, mapping));
+ curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ *curr_prod = *curr_cons;
- sw_rx_data_prod->data = sw_rx_data_cons->data;
- memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+ new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
- rxq->sw_rx_cons++;
rxq->sw_rx_prod++;
+ curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(edev, rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ /* Move to the next segment in the page */
+ curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+ if (curr_cons->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+ return -ENOMEM;
+ }
+
+ dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ } else {
+ /* Increment refcount of the page as we don't want
+ * network stack to take the ownership of the page
+ * which can be recycled multiple times by the driver.
+ */
+ atomic_inc(&curr_cons->data->_count);
+ qede_reuse_page(edev, rxq, curr_cons);
+ }
+
+ return 0;
}
static inline void qede_update_rx_prod(struct qede_dev *edev,
@@ -809,6 +893,301 @@ static inline void qede_skb_receive(struct qede_dev *edev,
napi_gro_receive(&fp->napi, skb);
}
+static void qede_set_gro_params(struct qede_dev *edev,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+ if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+ PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+ cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ u8 tpa_agg_index,
+ u16 len_on_bd)
+{
+ struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+ NUM_RX_BDS_MAX];
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+ struct sk_buff *skb = tpa_info->skb;
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto out;
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, tpa_info->frag_id++,
+ current_bd->data, current_bd->page_offset,
+ len_on_bd);
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ atomic_inc(&current_bd->data->_count);
+ goto out;
+ }
+
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+
+ skb->data_len += len_on_bd;
+ skb->truesize += rxq->rx_buf_seg_size;
+ skb->len += len_on_bd;
+
+ return 0;
+
+out:
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -ENOMEM;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+ dma_addr_t mapping = tpa_info->replace_buf_mapping;
+ struct sw_rx_data *sw_rx_data_cons;
+ struct sw_rx_data *sw_rx_data_prod;
+ enum pkt_hash_types rxhash_type;
+ u32 rxhash;
+
+ sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+ /* Use pre-allocated replacement buffer - we can't release the agg.
+ * start until its over and we don't want to risk allocation failing
+ * here, so re-allocate when aggregation will be over.
+ */
+ dma_unmap_addr_set(sw_rx_data_prod, mapping,
+ dma_unmap_addr(replace_buf, mapping));
+
+ sw_rx_data_prod->data = replace_buf->data;
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+ rxq->sw_rx_prod++;
+
+ /* move partial skb from cons to pool (don't unmap yet)
+ * save mapping, incase we drop the packet later on.
+ */
+ tpa_info->start_buf = *sw_rx_data_cons;
+ mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+ le32_to_cpu(rx_bd_cons->addr.lo));
+
+ tpa_info->start_buf_mapping = mapping;
+ rxq->sw_rx_cons++;
+
+ /* set tpa state to start only if we are able to allocate skb
+ * for this aggregation, otherwise mark as error and aggregation will
+ * be dropped
+ */
+ tpa_info->skb = netdev_alloc_skb(edev->ndev,
+ le16_to_cpu(cqe->len_on_first_bd));
+ if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ goto cons_buf;
+ }
+
+ skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+ memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
+
+ /* Start filling in the aggregation info */
+ tpa_info->frag_id = 0;
+ tpa_info->agg_state = QEDE_AGG_STATE_START;
+
+ rxhash = qede_get_rxhash(edev, cqe->bitfields,
+ cqe->rss_hash, &rxhash_type);
+ skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+ if ((le16_to_cpu(cqe->pars_flags.flags) >>
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ else
+ tpa_info->vlan_tag = 0;
+
+ /* This is needed in order to enable forwarding support */
+ qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+ if (likely(cqe->ext_bd_len_list[0]))
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+ if (unlikely(cqe->ext_bd_len_list[1])) {
+ DP_ERR(edev,
+ "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+ tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+ }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ qede_gro_ip_csum(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ qede_gro_ipv6_csum(skb);
+ break;
+ default:
+ DP_ERR(edev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ ntohs(skb->protocol));
+ }
+ }
+#endif
+
+send_skb:
+ skb_record_rx_queue(skb, fp->rss_id);
+ qede_skb_receive(edev, fp, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ int i;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_agg_info *tpa_info;
+ struct sk_buff *skb;
+ int i;
+
+ tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ skb = tpa_info->skb;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA emd with more than a single len_list entry\n");
+
+ if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+ goto err;
+
+ /* Sanity */
+ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+ DP_ERR(edev,
+ "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+ cqe->num_of_bds, tpa_info->frag_id);
+ if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+ DP_ERR(edev,
+ "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+ le16_to_cpu(cqe->total_packet_len), skb->len);
+
+ memcpy(skb->data,
+ page_address(tpa_info->start_buf.data) +
+ tpa_info->start_cqe.placement_offset +
+ tpa_info->start_buf.page_offset,
+ le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
+
+ /* Recycle [mapped] start buffer for the next replacement */
+ tpa_info->replace_buf = tpa_info->start_buf;
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+
+ /* Finalize the SKB */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+ qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+
+ return;
+err:
+ /* The BD starting the aggregation is still mapped; Re-use it for
+ * future aggregations [as replacement buffer]
+ */
+ memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
+ sizeof(struct sw_rx_data));
+ tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+ tpa_info->start_buf.data = NULL;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ dev_kfree_skb_any(tpa_info->skb);
+ tpa_info->skb = NULL;
+}
+
static u8 qede_check_csum(u16 flag)
{
u16 csum_flag = 0;
@@ -857,9 +1236,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
struct sk_buff *skb;
+ struct page *data;
+ __le16 flags;
u16 len, pad;
u32 rx_hash;
- u8 *data;
/* Get the CQE from the completion ring */
cqe = (union eth_rx_cqe *)
@@ -873,62 +1253,155 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq,
+ &cqe->fast_path_tpa_start);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_tpa_end(edev, fp,
+ &cqe->fast_path_tpa_end);
+ goto next_rx_only;
+ default:
+ break;
+ }
+ }
+
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
data = sw_rx_data->data;
fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->pkt_len);
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
pad = fp_cqe->placement_offset;
+ flags = cqe->fast_path_regular.pars_flags.flags;
- /* For every Rx BD consumed, we allocate a new BD so the BD ring
- * is always with a fixed size. If allocation fails, we take the
- * consumed BD and return it to the ring in the PROD position.
- * The packet that was received on that BD will be dropped (and
- * not passed to the upper stack).
- */
- if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(sw_rx_data, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
-
- /* If this is an error packet then drop it */
- parse_flag =
- le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
- csum_flag = qede_check_csum(parse_flag);
- if (csum_flag == QEDE_CSUM_ERROR) {
- DP_NOTICE(edev,
- "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- kfree(data);
- goto next_rx;
- }
-
- skb = build_skb(data, 0);
-
- if (unlikely(!skb)) {
- DP_NOTICE(edev,
- "Build_skb failed, dropping incoming packet\n");
- kfree(data);
- rxq->rx_alloc_errors++;
- goto next_rx;
- }
+ /* If this is an error packet then drop it */
+ parse_flag = le16_to_cpu(flags);
- skb_reserve(skb, pad);
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ DP_NOTICE(edev,
+ "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+ sw_comp_cons, parse_flag);
+ rxq->rx_hw_errors++;
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+ goto next_cqe;
+ }
- } else {
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
DP_NOTICE(edev,
- "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
- qede_reuse_rx_data(rxq);
+ "Build_skb failed, dropping incoming packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
rxq->rx_alloc_errors++;
goto next_cqe;
}
- sw_rx_data->data = NULL;
+ /* Copy data into SKB */
+ if (len + pad <= QEDE_RX_HDR_SIZE) {
+ memcpy(skb_put(skb, len),
+ page_address(data) + pad +
+ sw_rx_data->page_offset, len);
+ qede_reuse_page(edev, rxq, sw_rx_data);
+ } else {
+ struct skb_frag_struct *frag;
+ unsigned int pull_len;
+ unsigned char *va;
+
+ frag = &skb_shinfo(skb)->frags[0];
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
+ pad + sw_rx_data->page_offset,
+ len, rxq->rx_buf_seg_size);
+
+ va = skb_frag_address(frag);
+ pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
- skb_put(skb, len);
+ /* Align the pull_len to optimize memcpy */
+ memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+
+ if (unlikely(qede_realloc_rx_buffer(edev, rxq,
+ sw_rx_data))) {
+ DP_ERR(edev, "Failed to allocate rx buffer\n");
+ /* Incr page ref count to reuse on allocation
+ * failure so that it doesn't get freed while
+ * freeing SKB.
+ */
+
+ atomic_inc(&sw_rx_data->data->_count);
+ rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, edev,
+ fp_cqe->bd_num);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
+ }
+
+ qede_rx_bd_ring_consume(rxq);
+
+ if (fp_cqe->bd_num != 1) {
+ u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
+ u8 num_frags;
+
+ pkt_len -= len;
+
+ for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
+ num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ?
+ rxq->rx_buf_size : pkt_len;
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
+
+ if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+ qede_recycle_rx_bd_ring(rxq, edev,
+ num_frags);
+ dev_kfree_skb_any(skb);
+ goto next_cqe;
+ }
+
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ qede_rx_bd_ring_consume(rxq);
+
+ dma_unmap_page(&edev->pdev->dev,
+ sw_rx_data->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_fill_page_desc(skb,
+ skb_shinfo(skb)->nr_frags++,
+ sw_rx_data->data, 0,
+ cur_size);
+
+ skb->truesize += PAGE_SIZE;
+ skb->data_len += cur_size;
+ skb->len += cur_size;
+ pkt_len -= cur_size;
+ }
+
+ if (unlikely(pkt_len))
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+ }
skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -943,11 +1416,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
skb_record_rx_queue(skb, fp->rss_id);
qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
- qed_chain_consume(&rxq->rx_bd_ring);
-
-next_rx:
- rxq->sw_rx_cons++;
+next_rx_only:
rx_pkt++;
next_cqe: /* don't consume bd rx buffer */
@@ -1056,6 +1525,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev,
return edev->ops->filter_config(edev->cdev, &filter_cmd);
}
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ u16 vid)
+{
+ struct qed_filter_params filter_cmd;
+
+ memset(&filter_cmd, 0, sizeof(filter_cmd));
+ filter_cmd.type = QED_FILTER_TYPE_UCAST;
+ filter_cmd.filter.ucast.type = opcode;
+ filter_cmd.filter.ucast.vlan_valid = 1;
+ filter_cmd.filter.ucast.vlan = vid;
+
+ return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
struct qed_eth_stats stats;
@@ -1168,6 +1652,247 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+ struct qed_update_vport_params params;
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (edev->accept_any_vlan == action)
+ return;
+
+ memset(&params, 0, sizeof(params));
+
+ params.vport_id = 0;
+ params.accept_any_vlan = action;
+ params.update_accept_any_vlan_flg = 1;
+
+ rc = edev->ops->vport_update(edev->cdev, &params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ edev->accept_any_vlan = action;
+ }
+}
+
+static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan, *tmp;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ DP_INFO(edev, "Failed to allocate struct for vlan\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&vlan->list);
+ vlan->vid = vid;
+ vlan->configured = false;
+
+ /* Verify vlan isn't already configured */
+ list_for_each_entry(tmp, &edev->vlan_list, list) {
+ if (tmp->vid == vlan->vid) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "vlan already configured\n");
+ kfree(vlan);
+ return -EEXIST;
+ }
+ }
+
+ /* If interface is down, cache this VLAN ID and return */
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, VLAN %d will be configured when interface is up\n",
+ vid);
+ if (vid != 0)
+ edev->non_configured_vlans++;
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+ }
+
+ /* Check for the filter limit.
+ * Note - vlan0 has a reserved filter and can be added without
+ * worrying about quota
+ */
+ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+ (vlan->vid == 0)) {
+ rc = qede_set_ucast_rx_vlan(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %d\n",
+ vlan->vid);
+ kfree(vlan);
+ return -EINVAL;
+ }
+ vlan->configured = true;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0)
+ edev->configured_vlans++;
+ } else {
+ /* Out of quota; Activate accept-any-VLAN mode */
+ if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, true);
+
+ edev->non_configured_vlans++;
+ }
+
+ list_add(&vlan->list, &edev->vlan_list);
+
+ return 0;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+ struct qede_vlan *vlan)
+{
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ if (vlan->configured)
+ edev->configured_vlans--;
+ else
+ edev->non_configured_vlans--;
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+ int rc = 0, real_rc = 0, accept_any_vlan = 0;
+ struct qed_dev_eth_info *dev_info;
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return 0;
+
+ dev_info = &edev->dev_info;
+
+ /* Configure non-configured vlans */
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (vlan->configured)
+ continue;
+
+ /* We have used all our credits, now enable accept_any_vlan */
+ if ((vlan->vid != 0) &&
+ (edev->configured_vlans == dev_info->num_vlan_filters)) {
+ accept_any_vlan = 1;
+ continue;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %u\n",
+ vlan->vid);
+ real_rc = rc;
+ continue;
+ }
+
+ vlan->configured = true;
+ /* vlan0 filter doesn't consume our VLAN filter's quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans--;
+ edev->configured_vlans++;
+ }
+ }
+
+ /* enable accept_any_vlan mode if we have more VLANs than credits,
+ * or remove accept_any_vlan mode if we've actually removed
+ * a non-configured vlan, and all remaining vlans are truly configured.
+ */
+
+ if (accept_any_vlan)
+ qede_config_accept_any_vlan(edev, true);
+ else if (!edev->non_configured_vlans)
+ qede_config_accept_any_vlan(edev, false);
+
+ return real_rc;
+}
+
+static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan = NULL;
+ int rc;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+ /* Find whether entry exists */
+ list_for_each_entry(vlan, &edev->vlan_list, list)
+ if (vlan->vid == vid)
+ break;
+
+ if (!vlan || (vlan->vid != vid)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Vlan isn't configured\n");
+ return 0;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ /* As interface is already down, we don't have a VPORT
+ * instance to remove vlan filter. So just update vlan list
+ */
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, removing VLAN from list only\n");
+ qede_del_vlan_from_list(edev, vlan);
+ return 0;
+ }
+
+ /* Remove vlan */
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
+
+ qede_del_vlan_from_list(edev, vlan);
+
+ /* We have removed a VLAN - try to see if we can
+ * configure non-configured VLAN from the list.
+ */
+ rc = qede_configure_vlan_filters(edev);
+
+ return rc;
+}
+
+static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return;
+
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (!vlan->configured)
+ continue;
+
+ vlan->configured = false;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans++;
+ edev->configured_vlans--;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "marked vlan %d as non-configured\n",
+ vlan->vid);
+ }
+
+ edev->accept_any_vlan = false;
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1176,6 +1901,8 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64,
};
@@ -1220,6 +1947,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
edev->num_tc = edev->dev_info.num_tc;
+ INIT_LIST_HEAD(&edev->vlan_list);
+
return edev;
}
@@ -1251,7 +1980,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
ndev->hw_features = hw_features;
@@ -1566,23 +2295,45 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
struct sw_rx_data *rx_buf;
- u8 *data;
+ struct page *data;
rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
data = rx_buf->data;
- dma_unmap_single(&edev->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- rxq->rx_buf_size, DMA_FROM_DEVICE);
+ dma_unmap_page(&edev->pdev->dev,
+ rx_buf->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rx_buf->data = NULL;
- kfree(data);
+ __free_page(data);
+ }
+}
+
+static void qede_free_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq) {
+ int i;
+
+ if (edev->gro_disable)
+ return;
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ if (replace_buf->data) {
+ dma_unmap_page(&edev->pdev->dev,
+ dma_unmap_addr(replace_buf, mapping),
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(replace_buf->data);
+ }
}
}
static void qede_free_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
+ qede_free_sge_mem(edev, rxq);
+
/* Free rx buffers */
qede_free_rx_buffers(edev, rxq);
@@ -1600,29 +2351,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct sw_rx_data *sw_rx_data;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
+ struct page *data;
u16 rx_buf_size;
- u8 *data;
rx_buf_size = rxq->rx_buf_size;
- data = kmalloc(rx_buf_size, GFP_ATOMIC);
+ data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data)) {
- DP_NOTICE(edev, "Failed to allocate Rx data\n");
+ DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
return -ENOMEM;
}
- mapping = dma_map_single(&edev->pdev->dev, data,
- rx_buf_size, DMA_FROM_DEVICE);
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(&edev->pdev->dev, data, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
- kfree(data);
+ __free_page(data);
DP_NOTICE(edev, "Failed to map Rx buffer\n");
return -ENOMEM;
}
sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
sw_rx_data->data = data;
-
- dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+ sw_rx_data->mapping = mapping;
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
@@ -1635,24 +2389,75 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
return 0;
}
+static int qede_alloc_sge_mem(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ dma_addr_t mapping;
+ int i;
+
+ if (edev->gro_disable)
+ return 0;
+
+ if (edev->ndev->mtu > PAGE_SIZE) {
+ edev->gro_disable = 1;
+ return 0;
+ }
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+ struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+
+ replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!replace_buf->data)) {
+ DP_NOTICE(edev,
+ "Failed to allocate TPA skb pool [replacement buffer]\n");
+ goto err;
+ }
+
+ mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
+ rxq->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev,
+ "Failed to map TPA replacement buffer\n");
+ goto err;
+ }
+
+ dma_unmap_addr_set(replace_buf, mapping, mapping);
+ tpa_info->replace_buf.page_offset = 0;
+
+ tpa_info->replace_buf_mapping = mapping;
+ tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+ }
+
+ return 0;
+err:
+ qede_free_sge_mem(edev, rxq);
+ edev->gro_disable = 1;
+ return -ENOMEM;
+}
+
/* This function allocates all memory needed per Rx queue */
static int qede_alloc_mem_rxq(struct qede_dev *edev,
struct qede_rx_queue *rxq)
{
- int i, rc, size, num_allocated;
+ int i, rc, size;
rxq->num_rx_buffers = edev->q_num_rx_buffers;
- rxq->rx_buf_size = NET_IP_ALIGN +
- ETH_OVERHEAD +
- edev->ndev->mtu +
- QEDE_FW_RX_ALIGN_END;
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD +
+ edev->ndev->mtu;
+ if (rxq->rx_buf_size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE;
+
+ /* Segment size to spilt a page in multiple equal parts */
+ rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
/* Allocate the parallel driver ring for Rx buffers */
- size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
if (!rxq->sw_rx_ring) {
DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ rc = -ENOMEM;
goto err;
}
@@ -1660,7 +2465,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -1671,7 +2476,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
- NUM_RX_BDS_MAX,
+ RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
if (rc)
@@ -1680,24 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
/* Allocate buffers for the Rx ring */
for (i = 0; i < rxq->num_rx_buffers; i++) {
rc = qede_alloc_rx_buffer(edev, rxq);
- if (rc)
- break;
- }
- num_allocated = i;
- if (!num_allocated) {
- DP_ERR(edev, "Rx buffers allocation failed\n");
- goto err;
- } else if (num_allocated < rxq->num_rx_buffers) {
- DP_NOTICE(edev,
- "Allocated less buffers than desired (%d allocated)\n",
- num_allocated);
+ if (rc) {
+ DP_ERR(edev,
+ "Rx buffers allocation failed at index %d\n", i);
+ goto err;
+ }
}
- return 0;
-
+ rc = qede_alloc_sge_mem(edev, rxq);
err:
- qede_free_mem_rxq(edev, rxq);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_txq(struct qede_dev *edev,
@@ -1780,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
}
return 0;
-
err:
- qede_free_mem_fp(edev, fp);
- return -ENOMEM;
+ return rc;
}
static void qede_free_mem_load(struct qede_dev *edev)
@@ -1806,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
struct qede_fastpath *fp = &edev->fp_array[rss_id];
rc = qede_alloc_mem_fp(edev, fp);
- if (rc)
- break;
- }
-
- if (rss_id != QEDE_RSS_CNT(edev)) {
- /* Failed allocating memory for all the queues */
- if (!rss_id) {
+ if (rc) {
DP_ERR(edev,
- "Failed to allocate memory for the leading queue\n");
- rc = -ENOMEM;
- } else {
- DP_NOTICE(edev,
- "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
- QEDE_RSS_CNT(edev), rss_id);
+ "Failed to allocate memory for fastpath - rss id = %d\n",
+ rss_id);
+ qede_free_mem_load(edev);
+ return rc;
}
- edev->num_rss = rss_id;
}
return 0;
@@ -1855,6 +2641,8 @@ static void qede_init_fp(struct qede_dev *edev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
edev->ndev->name, rss_id);
}
+
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
}
static int qede_set_real_num_queues(struct qede_dev *edev)
@@ -2088,11 +2876,12 @@ static int qede_stop_queues(struct qede_dev *edev)
static int qede_start_queues(struct qede_dev *edev)
{
int rc, tc, i;
- int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+ int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_start_vport_params start = {0};
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2100,10 +2889,13 @@ static int qede_start_queues(struct qede_dev *edev)
return -EINVAL;
}
- rc = edev->ops->vport_start(cdev, vport_id,
- edev->ndev->mtu,
- drop_ttl0_flg,
- vlan_removal_en);
+ start.gro_enable = !edev->gro_disable;
+ start.mtu = edev->ndev->mtu;
+ start.vport_id = 0;
+ start.drop_ttl0 = true;
+ start.remove_inner_vlan = vlan_removal_en;
+
+ rc = edev->ops->vport_start(cdev, &start);
if (rc) {
DP_ERR(edev, "Start V-PORT failed %d\n", rc);
@@ -2112,7 +2904,7 @@ static int qede_start_queues(struct qede_dev *edev)
DP_VERBOSE(edev, NETIF_MSG_IFUP,
"Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
- vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+ start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
for_each_rss(i) {
struct qede_fastpath *fp = &edev->fp_array[i];
@@ -2177,7 +2969,7 @@ static int qede_start_queues(struct qede_dev *edev)
/* Prepare and send the vport enable */
memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = vport_id;
+ vport_update_params.vport_id = start.vport_id;
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
@@ -2252,6 +3044,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
DP_INFO(edev, "Stopped Queues\n");
+ qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
/* Release the interrupts */
@@ -2320,6 +3113,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
edev->state = QEDE_STATE_OPEN;
mutex_unlock(&edev->qede_lock);
+ /* Program un-configured VLANs */
+ qede_configure_vlan_filters(edev);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2398,13 +3194,17 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
}
if (link->link_up) {
- DP_NOTICE(edev, "Link is up\n");
- netif_tx_start_all_queues(edev->ndev);
- netif_carrier_on(edev->ndev);
+ if (!netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ }
} else {
- DP_NOTICE(edev, "Link is down\n");
- netif_tx_disable(edev->ndev);
- netif_carrier_off(edev->ndev);
+ if (netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ }
}
}
@@ -2580,6 +3380,17 @@ static void qede_config_rx_mode(struct net_device *ndev)
goto out;
}
+ /* take care of VLAN mode */
+ if (ndev->flags & IFF_PROMISC) {
+ qede_config_accept_any_vlan(edev, true);
+ } else if (!edev->non_configured_vlans) {
+ /* It's possible that accept_any_vlan mode is set due to a
+ * previous setting of IFF_PROMISC. If vlan credits are
+ * sufficient, disable accept_any_vlan.
+ */
+ qede_config_accept_any_vlan(edev, false);
+ }
+
rx_mode.filter.accept_flags = accept_flags;
edev->ops->filter_config(edev->cdev, &rx_mode);
out:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 46bbea8e023c..55007f1e6bbc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -566,6 +566,7 @@ struct qlcnic_adapter_stats {
u64 tx_dma_map_error;
u64 spurious_intr;
u64 mac_filter_limit_overrun;
+ u64 mbx_spurious_intr;
};
/*
@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
unsigned long status;
spinlock_t queue_lock; /* Mailbox queue lock */
spinlock_t aen_lock; /* Mailbox response/AEN lock */
- atomic_t rsp_status;
+ u32 rsp_status;
u32 num_cmds;
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 37a731be7d39..f9640d5ce6ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
{
- atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
complete(&mbx->completion);
}
@@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
out:
@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
} else {
- if (atomic_read(&mbx->rsp_status) != rsp_status)
+ if (mbx->rsp_status != rsp_status)
qlcnic_83xx_notify_mbx_response(mbx);
}
}
@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
+ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_adapter *adapter = data;
struct qlcnic_mailbox *mbx;
- u32 mask, resp, event;
unsigned long flags;
mbx = adapter->ahw->mailbox;
@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
- else
- qlcnic_83xx_notify_mbx_response(mbx);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ else
+ adapter->stats.mbx_spurious_intr++;
+ }
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
@@ -4050,10 +4054,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
struct qlcnic_adapter *adapter = mbx->adapter;
const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
struct device *dev = &adapter->pdev->dev;
- atomic_t *rsp_status = &mbx->rsp_status;
struct list_head *head = &mbx->cmd_q;
struct qlcnic_hardware_context *ahw;
struct qlcnic_cmd_args *cmd = NULL;
+ unsigned long flags;
ahw = adapter->ahw;
@@ -4063,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
return;
}
- atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
spin_lock(&mbx->queue_lock);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 494e8105adee..0a2318cad34d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
QLC_OFF(stats.mac_filter_limit_overrun)},
{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
QLC_OFF(stats.spurious_intr)},
-
+ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+ QLC_OFF(stats.mbx_spurious_intr)},
};
static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ef332708e5f2..6d31f92ef2b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.34"
+#define DRV_VERSION "1.00.00.35"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 997976426799..b28e73ea2c25 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
return;
}
skb_reserve(new_skb, NET_IP_ALIGN);
+
+ pci_dma_sync_single_for_cpu(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
+
memcpy(skb_put(new_skb, length), skb->data, length);
+
+ pci_dma_sync_single_for_device(qdev->pdev,
+ dma_unmap_addr(sbq_desc, mapaddr),
+ dma_unmap_len(sbq_desc, maplen),
+ PCI_DMA_FROMDEVICE);
skb = new_skb;
/* Frame error, so drop the packet. */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index dd2cf3738b73..94f08f1e841c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1999,7 +1999,8 @@ static int rtl8169_set_speed(struct net_device *dev,
goto out;
if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
- (advertising & ADVERTISED_1000baseT_Full)) {
+ (advertising & ADVERTISED_1000baseT_Full) &&
+ !pci_is_pcie(tp->pci_dev)) {
mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
}
out:
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 270c4c9cac7f..4f132cf177cd 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_RENESAS
config SH_ETH
tristate "Renesas SuperH Ethernet support"
depends on HAS_DMA
- depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+ depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
@@ -32,7 +32,7 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
depends on HAS_DMA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select CRC32
select MII
select MDIO_BITBANG
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9fbe92ac225b..b2160d1b9c71 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -2,7 +2,7 @@
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* Based on the SuperH Ethernet driver
*
@@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data,
iowrite32(data, priv->addr + reg);
}
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+ u32 set);
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 86449c357168..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* Based on the SuperH Ethernet driver
*
@@ -42,6 +42,12 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+ u32 set)
+{
+ ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
+}
+
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
{
int i;
@@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev)
int error;
/* Set config mode */
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
- CCC);
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Check if the operating mode is changed to the config mode */
error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
if (error)
@@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev)
static void ravb_set_duplex(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- u32 ecmr = ravb_read(ndev, ECMR);
- if (priv->duplex) /* Full */
- ecmr |= ECMR_DM;
- else /* Half */
- ecmr &= ~ECMR_DM;
- ravb_write(ndev, ecmr, ECMR);
+ ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
}
static void ravb_set_rate(struct net_device *ndev)
@@ -92,8 +92,6 @@ static void ravb_set_rate(struct net_device *ndev)
case 1000: /* 1000BASE */
ravb_write(ndev, GECMR_SPEED_1000, GECMR);
break;
- default:
- break;
}
}
@@ -131,13 +129,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
{
struct ravb_private *priv = container_of(ctrl, struct ravb_private,
mdiobb);
- u32 pir = ravb_read(priv->ndev, PIR);
- if (set)
- pir |= mask;
- else
- pir &= ~mask;
- ravb_write(priv->ndev, pir, PIR);
+ ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
}
/* MDC pin control */
@@ -393,9 +386,9 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_ring_format(ndev, RAVB_NC);
#if defined(__LITTLE_ENDIAN)
- ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+ ravb_modify(ndev, CCC, CCC_BOC, 0);
#else
- ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+ ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
#endif
/* Set AVB RX */
@@ -418,8 +411,7 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
/* Setting the control will start the AVB-DMAC process. */
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
- CCC);
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
return 0;
}
@@ -493,7 +485,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
break;
}
}
- ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+ ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
}
}
@@ -613,13 +605,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
static void ravb_rcv_snd_disable(struct net_device *ndev)
{
/* Disable TX and RX */
- ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+ ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
}
static void ravb_rcv_snd_enable(struct net_device *ndev)
{
/* Enable TX and RX */
- ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+ ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
}
/* function for waiting dma process finished */
@@ -765,8 +757,8 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
}
- if (iss & ISS_CGIS)
- result = ravb_ptp_interrupt(ndev);
+ if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+ result = IRQ_HANDLED;
mmiowb();
spin_unlock(&priv->lock);
@@ -812,8 +804,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
- ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC);
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -852,8 +844,7 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_set_rate(ndev);
}
if (!priv->link) {
- ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
- ECMR);
+ ravb_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = true;
priv->link = phydev->link;
if (priv->no_avb_link)
@@ -1386,18 +1377,18 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* TAG and timestamp required flag */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- skb_tx_timestamp(skb);
desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
desc->ds_tagl |= le16_to_cpu(ts_skb->tag << 12);
}
+ skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
desc->die_dt = DT_FEND;
desc--;
desc->die_dt = DT_FSTART;
- ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
+ ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
@@ -1472,15 +1463,10 @@ static void ravb_set_rx_mode(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
- u32 ecmr;
spin_lock_irqsave(&priv->lock, flags);
- ecmr = ravb_read(ndev, ECMR);
- if (ndev->flags & IFF_PROMISC)
- ecmr |= ECMR_PRM;
- else
- ecmr &= ~ECMR_PRM;
- ravb_write(ndev, ecmr, ECMR);
+ ravb_modify(ndev, ECMR, ECMR_PRM,
+ ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1705,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
rate = clk_get_rate(clk);
clk_put(clk);
+ if (!rate)
+ return -EINVAL;
+
inc = 1000000000ULL << 20;
do_div(inc, rate);
@@ -1806,14 +1795,12 @@ static int ravb_probe(struct platform_device *pdev)
/* Set AVB config mode */
if (chip_id == RCAR_GEN2) {
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
- CCC_OPC_CONFIG, CCC);
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Set CSEL value */
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) |
- CCC_CSEL_HPB, CCC);
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
} else {
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
- CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+ CCC_GAC | CCC_CSEL_HPB);
}
/* Set GTI value */
@@ -1822,7 +1809,7 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
/* Request GTI loading */
- ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 7a8ce920c49e..57992ccc4657 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
if (error)
return error;
- ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+ ravb_modify(ndev, GCCR, request, request);
return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
}
@@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
ptp.info);
struct net_device *ndev = priv->ndev;
unsigned long flags;
- u32 gic;
if (req->index)
return -EINVAL;
@@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- gic = ravb_read(ndev, GIC);
- if (on)
- gic |= GIC_PTCE;
- else
- gic &= ~GIC_PTCE;
- ravb_write(ndev, gic, GIC);
+ ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
struct ravb_ptp_perout *perout;
unsigned long flags;
int error = 0;
- u32 gic;
if (req->index)
return -EINVAL;
@@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- gic = ravb_read(ndev, GIC);
- gic |= GIC_PTME;
- ravb_write(ndev, gic, GIC);
+ ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
}
} else {
spin_lock_irqsave(&priv->lock, flags);
@@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- gic = ravb_read(ndev, GIC);
- gic &= ~GIC_PTME;
- ravb_write(ndev, gic, GIC);
+ ravb_modify(ndev, GIC, GIC_PTME, 0);
}
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned long flags;
- u32 gccr;
priv->ptp.info = ravb_ptp_info;
@@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
spin_lock_irqsave(&priv->lock, flags);
ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
- gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
- ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+ ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 738449992876..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3,7 +3,7 @@
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
* Copyright (C) 2008-2014 Renesas Solutions Corp.
- * Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ * Copyright (C) 2013-2016 Cogent Embedded, Inc.
* Copyright (C) 2014 Codethink Limited
*
* This program is free software; you can redistribute it and/or modify it
@@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index)
return ioread32(mdp->addr + offset);
}
+static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
+ u32 set)
+{
+ sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
+ enum_index);
+}
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -440,8 +447,8 @@ static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
static void sh_eth_select_mii(struct net_device *ndev)
{
- u32 value = 0x0;
struct sh_eth_private *mdp = netdev_priv(ndev);
+ u32 value;
switch (mdp->phy_interface) {
case PHY_INTERFACE_MODE_GMII:
@@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- if (mdp->duplex) /* Full */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
- else /* Half */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
}
static void sh_eth_chip_reset(struct net_device *ndev)
@@ -496,8 +500,6 @@ static void sh_eth_set_rate_gether(struct net_device *ndev)
case 1000: /* 1000BASE */
sh_eth_write(ndev, GECMR_1000, GECMR);
break;
- default:
- break;
}
}
@@ -583,12 +585,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
break;
case 100:/* 100BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
- break;
- default:
+ sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
break;
}
}
@@ -649,12 +649,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev)
switch (mdp->speed) {
case 10: /* 10BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
break;
case 100:/* 100BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
- break;
- default:
+ sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
break;
}
}
@@ -694,8 +692,6 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev)
case 100:/* 100BASE */
sh_eth_write(ndev, 1, RTRATE);
break;
- default:
- break;
}
}
@@ -763,8 +759,6 @@ static void sh_eth_set_rate_giga(struct net_device *ndev)
case 1000: /* 1000BASE */
sh_eth_write(ndev, 0x00000020, GECMR);
break;
- default:
- break;
}
}
@@ -924,8 +918,7 @@ static int sh_eth_reset(struct net_device *ndev)
if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
- EDMR);
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
ret = sh_eth_check_reset(ndev);
if (ret)
@@ -949,11 +942,9 @@ static int sh_eth_reset(struct net_device *ndev)
if (mdp->cd->select_mii)
sh_eth_select_mii(ndev);
} else {
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
- EDMR);
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
mdelay(3);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
- EDMR);
+ sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
}
return ret;
@@ -1136,11 +1127,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
sh_eth_set_receive_align(skb);
- /* RX descriptor */
- rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- rxdesc->len = cpu_to_le32(buf_len << 16);
dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
@@ -1148,6 +1136,10 @@ static void sh_eth_ring_format(struct net_device *ndev)
break;
}
mdp->rx_skbuff[i] = skb;
+
+ /* RX descriptor */
+ rxdesc = &mdp->rx_ring[i];
+ rxdesc->len = cpu_to_le32(buf_len << 16);
rxdesc->addr = cpu_to_le32(dma_addr);
rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
@@ -1163,7 +1155,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
- rxdesc->status |= cpu_to_le32(RD_RDLE);
+ if (rxdesc)
+ rxdesc->status |= cpu_to_le32(RD_RDLE);
memset(mdp->tx_ring, 0, tx_ringsize);
@@ -1238,8 +1231,8 @@ ring_free:
static int sh_eth_dev_init(struct net_device *ndev, bool start)
{
- int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
/* Soft Reset */
ret = sh_eth_reset(ndev);
@@ -1285,7 +1278,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
RFLR);
- sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+ sh_eth_modify(ndev, EESR, 0, 0);
if (start) {
mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
@@ -1319,8 +1312,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (start) {
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
-
- netif_start_queue(ndev);
}
return ret;
@@ -1362,7 +1353,7 @@ static int sh_eth_txfree(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
int free_num = 0;
- int entry = 0;
+ int entry;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
entry = mdp->dirty_tx % mdp->num_tx_ring;
@@ -1403,10 +1394,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
int limit;
struct sk_buff *skb;
- u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
+ u16 pkt_len;
u32 buf_len;
boguscnt = min(boguscnt, *quota);
@@ -1532,15 +1523,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
static void sh_eth_rcv_snd_disable(struct net_device *ndev)
{
/* disable tx and rx */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
- ~(ECMR_RE | ECMR_TE), ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
}
static void sh_eth_rcv_snd_enable(struct net_device *ndev)
{
/* enable tx and rx */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
- (ECMR_RE | ECMR_TE), ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
}
/* error control function */
@@ -1569,13 +1558,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
sh_eth_rcv_snd_disable(ndev);
} else {
/* Link Up */
- sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
- ~DMAC_M_ECI, EESIPR);
+ sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
/* clear int */
- sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
- ECSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
- DMAC_M_ECI, EESIPR);
+ sh_eth_modify(ndev, ECSR, 0, 0);
+ sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
+ DMAC_M_ECI);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1765,9 +1752,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (!mdp->link) {
- sh_eth_write(ndev,
- sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
- ECMR);
+ sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
@@ -1791,7 +1776,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct sh_eth_private *mdp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
+ struct phy_device *phydev;
mdp->link = 0;
mdp->speed = 0;
@@ -2209,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, false);
+ ret = sh_eth_dev_init(ndev, true);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
return ret;
}
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
netif_device_attach(ndev);
}
@@ -2245,8 +2226,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
/* network device open function */
static int sh_eth_open(struct net_device *ndev)
{
- int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
pm_runtime_get_sync(&mdp->pdev->dev);
@@ -2274,6 +2255,8 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
+ netif_start_queue(ndev);
+
mdp->is_opened = 1;
return ret;
@@ -2317,6 +2300,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* device init */
sh_eth_dev_init(ndev, true);
+
+ netif_start_queue(ndev);
}
/* Packet transmit function */
@@ -2922,8 +2907,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_FAST_SH3_SH2:
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
- default:
- break;
}
return reg_offset;
@@ -3003,12 +2986,12 @@ static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
static int sh_eth_drv_probe(struct platform_device *pdev)
{
- int ret, devno = 0;
struct resource *res;
- struct net_device *ndev = NULL;
- struct sh_eth_private *mdp = NULL;
struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct sh_eth_private *mdp;
+ struct net_device *ndev;
+ int ret, devno;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile
index f85fb12f36f1..faa36acee223 100644
--- a/drivers/net/ethernet/rocker/Makefile
+++ b/drivers/net/ethernet/rocker/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_ROCKER) += rocker.o
+rocker-y := rocker_main.o rocker_tlv.o rocker_ofdpa.o
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
deleted file mode 100644
index 166a7fc87e2f..000000000000
--- a/drivers/net/ethernet/rocker/rocker.c
+++ /dev/null
@@ -1,5495 +0,0 @@
-/*
- * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
- * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
- * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/hashtable.h>
-#include <linux/crc32.h>
-#include <linux/sort.h>
-#include <linux/random.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/if_bridge.h>
-#include <linux/bitops.h>
-#include <linux/ctype.h>
-#include <net/switchdev.h>
-#include <net/rtnetlink.h>
-#include <net/ip_fib.h>
-#include <net/netevent.h>
-#include <net/arp.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <generated/utsrelease.h>
-
-#include "rocker.h"
-
-static const char rocker_driver_name[] = "rocker";
-
-static const struct pci_device_id rocker_pci_id_table[] = {
- {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
- {0, }
-};
-
-struct rocker_flow_tbl_key {
- u32 priority;
- enum rocker_of_dpa_table_id tbl_id;
- union {
- struct {
- u32 in_pport;
- u32 in_pport_mask;
- enum rocker_of_dpa_table_id goto_tbl;
- } ig_port;
- struct {
- u32 in_pport;
- __be16 vlan_id;
- __be16 vlan_id_mask;
- enum rocker_of_dpa_table_id goto_tbl;
- bool untagged;
- __be16 new_vlan_id;
- } vlan;
- struct {
- u32 in_pport;
- u32 in_pport_mask;
- __be16 eth_type;
- u8 eth_dst[ETH_ALEN];
- u8 eth_dst_mask[ETH_ALEN];
- __be16 vlan_id;
- __be16 vlan_id_mask;
- enum rocker_of_dpa_table_id goto_tbl;
- bool copy_to_cpu;
- } term_mac;
- struct {
- __be16 eth_type;
- __be32 dst4;
- __be32 dst4_mask;
- enum rocker_of_dpa_table_id goto_tbl;
- u32 group_id;
- } ucast_routing;
- struct {
- u8 eth_dst[ETH_ALEN];
- u8 eth_dst_mask[ETH_ALEN];
- int has_eth_dst;
- int has_eth_dst_mask;
- __be16 vlan_id;
- u32 tunnel_id;
- enum rocker_of_dpa_table_id goto_tbl;
- u32 group_id;
- bool copy_to_cpu;
- } bridge;
- struct {
- u32 in_pport;
- u32 in_pport_mask;
- u8 eth_src[ETH_ALEN];
- u8 eth_src_mask[ETH_ALEN];
- u8 eth_dst[ETH_ALEN];
- u8 eth_dst_mask[ETH_ALEN];
- __be16 eth_type;
- __be16 vlan_id;
- __be16 vlan_id_mask;
- u8 ip_proto;
- u8 ip_proto_mask;
- u8 ip_tos;
- u8 ip_tos_mask;
- u32 group_id;
- } acl;
- };
-};
-
-struct rocker_flow_tbl_entry {
- struct hlist_node entry;
- u32 cmd;
- u64 cookie;
- struct rocker_flow_tbl_key key;
- size_t key_len;
- u32 key_crc32; /* key */
-};
-
-struct rocker_group_tbl_entry {
- struct hlist_node entry;
- u32 cmd;
- u32 group_id; /* key */
- u16 group_count;
- u32 *group_ids;
- union {
- struct {
- u8 pop_vlan;
- } l2_interface;
- struct {
- u8 eth_src[ETH_ALEN];
- u8 eth_dst[ETH_ALEN];
- __be16 vlan_id;
- u32 group_id;
- } l2_rewrite;
- struct {
- u8 eth_src[ETH_ALEN];
- u8 eth_dst[ETH_ALEN];
- __be16 vlan_id;
- bool ttl_check;
- u32 group_id;
- } l3_unicast;
- };
-};
-
-struct rocker_fdb_tbl_entry {
- struct hlist_node entry;
- u32 key_crc32; /* key */
- bool learned;
- unsigned long touched;
- struct rocker_fdb_tbl_key {
- struct rocker_port *rocker_port;
- u8 addr[ETH_ALEN];
- __be16 vlan_id;
- } key;
-};
-
-struct rocker_internal_vlan_tbl_entry {
- struct hlist_node entry;
- int ifindex; /* key */
- u32 ref_count;
- __be16 vlan_id;
-};
-
-struct rocker_neigh_tbl_entry {
- struct hlist_node entry;
- __be32 ip_addr; /* key */
- struct net_device *dev;
- u32 ref_count;
- u32 index;
- u8 eth_dst[ETH_ALEN];
- bool ttl_check;
-};
-
-struct rocker_desc_info {
- char *data; /* mapped */
- size_t data_size;
- size_t tlv_size;
- struct rocker_desc *desc;
- dma_addr_t mapaddr;
-};
-
-struct rocker_dma_ring_info {
- size_t size;
- u32 head;
- u32 tail;
- struct rocker_desc *desc; /* mapped */
- dma_addr_t mapaddr;
- struct rocker_desc_info *desc_info;
- unsigned int type;
-};
-
-struct rocker;
-
-enum {
- ROCKER_CTRL_LINK_LOCAL_MCAST,
- ROCKER_CTRL_LOCAL_ARP,
- ROCKER_CTRL_IPV4_MCAST,
- ROCKER_CTRL_IPV6_MCAST,
- ROCKER_CTRL_DFLT_BRIDGING,
- ROCKER_CTRL_DFLT_OVS,
- ROCKER_CTRL_MAX,
-};
-
-#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
-#define ROCKER_N_INTERNAL_VLANS 255
-#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
-#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
-
-struct rocker_port {
- struct net_device *dev;
- struct net_device *bridge_dev;
- struct rocker *rocker;
- unsigned int port_number;
- u32 pport;
- __be16 internal_vlan_id;
- int stp_state;
- u32 brport_flags;
- unsigned long ageing_time;
- bool ctrls[ROCKER_CTRL_MAX];
- unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
- struct napi_struct napi_tx;
- struct napi_struct napi_rx;
- struct rocker_dma_ring_info tx_ring;
- struct rocker_dma_ring_info rx_ring;
-};
-
-struct rocker {
- struct pci_dev *pdev;
- u8 __iomem *hw_addr;
- struct msix_entry *msix_entries;
- unsigned int port_count;
- struct rocker_port **ports;
- struct {
- u64 id;
- } hw;
- spinlock_t cmd_ring_lock; /* for cmd ring accesses */
- struct rocker_dma_ring_info cmd_ring;
- struct rocker_dma_ring_info event_ring;
- DECLARE_HASHTABLE(flow_tbl, 16);
- spinlock_t flow_tbl_lock; /* for flow tbl accesses */
- u64 flow_tbl_next_cookie;
- DECLARE_HASHTABLE(group_tbl, 16);
- spinlock_t group_tbl_lock; /* for group tbl accesses */
- struct timer_list fdb_cleanup_timer;
- DECLARE_HASHTABLE(fdb_tbl, 16);
- spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
- unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
- DECLARE_HASHTABLE(internal_vlan_tbl, 8);
- spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
- DECLARE_HASHTABLE(neigh_tbl, 16);
- spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
- u32 neigh_tbl_next_index;
-};
-
-static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
-static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
-static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
-static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
-static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
-static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
-static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
-
-/* Rocker priority levels for flow table entries. Higher
- * priority match takes precedence over lower priority match.
- */
-
-enum {
- ROCKER_PRIORITY_UNKNOWN = 0,
- ROCKER_PRIORITY_IG_PORT = 1,
- ROCKER_PRIORITY_VLAN = 1,
- ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
- ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
- ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
- ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
- ROCKER_PRIORITY_BRIDGING_VLAN = 3,
- ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
- ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
- ROCKER_PRIORITY_BRIDGING_TENANT = 3,
- ROCKER_PRIORITY_ACL_CTRL = 3,
- ROCKER_PRIORITY_ACL_NORMAL = 2,
- ROCKER_PRIORITY_ACL_DFLT = 1,
-};
-
-static bool rocker_vlan_id_is_internal(__be16 vlan_id)
-{
- u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
- u16 end = 0xffe;
- u16 _vlan_id = ntohs(vlan_id);
-
- return (_vlan_id >= start && _vlan_id <= end);
-}
-
-static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
- u16 vid, bool *pop_vlan)
-{
- __be16 vlan_id;
-
- if (pop_vlan)
- *pop_vlan = false;
- vlan_id = htons(vid);
- if (!vlan_id) {
- vlan_id = rocker_port->internal_vlan_id;
- if (pop_vlan)
- *pop_vlan = true;
- }
-
- return vlan_id;
-}
-
-static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
- __be16 vlan_id)
-{
- if (rocker_vlan_id_is_internal(vlan_id))
- return 0;
-
- return ntohs(vlan_id);
-}
-
-static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
-{
- return rocker_port->bridge_dev &&
- netif_is_bridge_master(rocker_port->bridge_dev);
-}
-
-static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
-{
- return rocker_port->bridge_dev &&
- netif_is_ovs_master(rocker_port->bridge_dev);
-}
-
-#define ROCKER_OP_FLAG_REMOVE BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT BIT(1)
-#define ROCKER_OP_FLAG_LEARNED BIT(2)
-#define ROCKER_OP_FLAG_REFRESH BIT(3)
-
-static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- size_t size)
-{
- struct switchdev_trans_item *elem = NULL;
- gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
- GFP_ATOMIC : GFP_KERNEL;
-
- /* If in transaction prepare phase, allocate the memory
- * and enqueue it on a transaction. If in transaction
- * commit phase, dequeue the memory from the transaction
- * rather than re-allocating the memory. The idea is the
- * driver code paths for prepare and commit are identical
- * so the memory allocated in the prepare phase is the
- * memory used in the commit phase.
- */
-
- if (!trans) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- } else if (switchdev_trans_ph_prepare(trans)) {
- elem = kzalloc(size + sizeof(*elem), gfp_flags);
- if (!elem)
- return NULL;
- switchdev_trans_item_enqueue(trans, elem, kfree, elem);
- } else {
- elem = switchdev_trans_item_dequeue(trans);
- }
-
- return elem ? elem + 1 : NULL;
-}
-
-static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- size_t size)
-{
- return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
-}
-
-static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- size_t n, size_t size)
-{
- return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
-}
-
-static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
-{
- struct switchdev_trans_item *elem;
-
- /* Frees are ignored if in transaction prepare phase. The
- * memory remains on the per-port list until freed in the
- * commit phase.
- */
-
- if (switchdev_trans_ph_prepare(trans))
- return;
-
- elem = (struct switchdev_trans_item *) mem - 1;
- kfree(elem);
-}
-
-struct rocker_wait {
- wait_queue_head_t wait;
- bool done;
- bool nowait;
-};
-
-static void rocker_wait_reset(struct rocker_wait *wait)
-{
- wait->done = false;
- wait->nowait = false;
-}
-
-static void rocker_wait_init(struct rocker_wait *wait)
-{
- init_waitqueue_head(&wait->wait);
- rocker_wait_reset(wait);
-}
-
-static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- int flags)
-{
- struct rocker_wait *wait;
-
- wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
- if (!wait)
- return NULL;
- rocker_wait_init(wait);
- return wait;
-}
-
-static void rocker_wait_destroy(struct switchdev_trans *trans,
- struct rocker_wait *wait)
-{
- rocker_port_kfree(trans, wait);
-}
-
-static bool rocker_wait_event_timeout(struct rocker_wait *wait,
- unsigned long timeout)
-{
- wait_event_timeout(wait->wait, wait->done, HZ / 10);
- if (!wait->done)
- return false;
- return true;
-}
-
-static void rocker_wait_wake_up(struct rocker_wait *wait)
-{
- wait->done = true;
- wake_up(&wait->wait);
-}
-
-static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
-{
- return rocker->msix_entries[vector].vector;
-}
-
-static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
-{
- return rocker_msix_vector(rocker_port->rocker,
- ROCKER_MSIX_VEC_TX(rocker_port->port_number));
-}
-
-static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
-{
- return rocker_msix_vector(rocker_port->rocker,
- ROCKER_MSIX_VEC_RX(rocker_port->port_number));
-}
-
-#define rocker_write32(rocker, reg, val) \
- writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
-#define rocker_read32(rocker, reg) \
- readl((rocker)->hw_addr + (ROCKER_ ## reg))
-#define rocker_write64(rocker, reg, val) \
- writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
-#define rocker_read64(rocker, reg) \
- readq((rocker)->hw_addr + (ROCKER_ ## reg))
-
-/*****************************
- * HW basic testing functions
- *****************************/
-
-static int rocker_reg_test(const struct rocker *rocker)
-{
- const struct pci_dev *pdev = rocker->pdev;
- u64 test_reg;
- u64 rnd;
-
- rnd = prandom_u32();
- rnd >>= 1;
- rocker_write32(rocker, TEST_REG, rnd);
- test_reg = rocker_read32(rocker, TEST_REG);
- if (test_reg != rnd * 2) {
- dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
- test_reg, rnd * 2);
- return -EIO;
- }
-
- rnd = prandom_u32();
- rnd <<= 31;
- rnd |= prandom_u32();
- rocker_write64(rocker, TEST_REG64, rnd);
- test_reg = rocker_read64(rocker, TEST_REG64);
- if (test_reg != rnd * 2) {
- dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
- test_reg, rnd * 2);
- return -EIO;
- }
-
- return 0;
-}
-
-static int rocker_dma_test_one(const struct rocker *rocker,
- struct rocker_wait *wait, u32 test_type,
- dma_addr_t dma_handle, const unsigned char *buf,
- const unsigned char *expect, size_t size)
-{
- const struct pci_dev *pdev = rocker->pdev;
- int i;
-
- rocker_wait_reset(wait);
- rocker_write32(rocker, TEST_DMA_CTRL, test_type);
-
- if (!rocker_wait_event_timeout(wait, HZ / 10)) {
- dev_err(&pdev->dev, "no interrupt received within a timeout\n");
- return -EIO;
- }
-
- for (i = 0; i < size; i++) {
- if (buf[i] != expect[i]) {
- dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
- buf[i], i, expect[i]);
- return -EIO;
- }
- }
- return 0;
-}
-
-#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
-#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
-
-static int rocker_dma_test_offset(const struct rocker *rocker,
- struct rocker_wait *wait, int offset)
-{
- struct pci_dev *pdev = rocker->pdev;
- unsigned char *alloc;
- unsigned char *buf;
- unsigned char *expect;
- dma_addr_t dma_handle;
- int i;
- int err;
-
- alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
- GFP_KERNEL | GFP_DMA);
- if (!alloc)
- return -ENOMEM;
- buf = alloc + offset;
- expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
-
- dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
- err = -EIO;
- goto free_alloc;
- }
-
- rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
- rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
-
- memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
- err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
- dma_handle, buf, expect,
- ROCKER_TEST_DMA_BUF_SIZE);
- if (err)
- goto unmap;
-
- memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
- err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
- dma_handle, buf, expect,
- ROCKER_TEST_DMA_BUF_SIZE);
- if (err)
- goto unmap;
-
- prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
- for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
- expect[i] = ~buf[i];
- err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
- dma_handle, buf, expect,
- ROCKER_TEST_DMA_BUF_SIZE);
- if (err)
- goto unmap;
-
-unmap:
- pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
-free_alloc:
- kfree(alloc);
-
- return err;
-}
-
-static int rocker_dma_test(const struct rocker *rocker,
- struct rocker_wait *wait)
-{
- int i;
- int err;
-
- for (i = 0; i < 8; i++) {
- err = rocker_dma_test_offset(rocker, wait, i);
- if (err)
- return err;
- }
- return 0;
-}
-
-static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
-{
- struct rocker_wait *wait = dev_id;
-
- rocker_wait_wake_up(wait);
-
- return IRQ_HANDLED;
-}
-
-static int rocker_basic_hw_test(const struct rocker *rocker)
-{
- const struct pci_dev *pdev = rocker->pdev;
- struct rocker_wait wait;
- int err;
-
- err = rocker_reg_test(rocker);
- if (err) {
- dev_err(&pdev->dev, "reg test failed\n");
- return err;
- }
-
- err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
- rocker_test_irq_handler, 0,
- rocker_driver_name, &wait);
- if (err) {
- dev_err(&pdev->dev, "cannot assign test irq\n");
- return err;
- }
-
- rocker_wait_init(&wait);
- rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
-
- if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
- dev_err(&pdev->dev, "no interrupt received within a timeout\n");
- err = -EIO;
- goto free_irq;
- }
-
- err = rocker_dma_test(rocker, &wait);
- if (err)
- dev_err(&pdev->dev, "dma test failed\n");
-
-free_irq:
- free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
- return err;
-}
-
-/******
- * TLV
- ******/
-
-#define ROCKER_TLV_ALIGNTO 8U
-#define ROCKER_TLV_ALIGN(len) \
- (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
-#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
-
-/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
- * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
- * | Header | Pad | Payload | Pad |
- * | (struct rocker_tlv) | ing | | ing |
- * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
- * <--------------------------- tlv->len -------------------------->
- */
-
-static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
- int *remaining)
-{
- int totlen = ROCKER_TLV_ALIGN(tlv->len);
-
- *remaining -= totlen;
- return (struct rocker_tlv *) ((char *) tlv + totlen);
-}
-
-static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
-{
- return remaining >= (int) ROCKER_TLV_HDRLEN &&
- tlv->len >= ROCKER_TLV_HDRLEN &&
- tlv->len <= remaining;
-}
-
-#define rocker_tlv_for_each(pos, head, len, rem) \
- for (pos = head, rem = len; \
- rocker_tlv_ok(pos, rem); \
- pos = rocker_tlv_next(pos, &(rem)))
-
-#define rocker_tlv_for_each_nested(pos, tlv, rem) \
- rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
- rocker_tlv_len(tlv), rem)
-
-static int rocker_tlv_attr_size(int payload)
-{
- return ROCKER_TLV_HDRLEN + payload;
-}
-
-static int rocker_tlv_total_size(int payload)
-{
- return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
-}
-
-static int rocker_tlv_padlen(int payload)
-{
- return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
-}
-
-static int rocker_tlv_type(const struct rocker_tlv *tlv)
-{
- return tlv->type;
-}
-
-static void *rocker_tlv_data(const struct rocker_tlv *tlv)
-{
- return (char *) tlv + ROCKER_TLV_HDRLEN;
-}
-
-static int rocker_tlv_len(const struct rocker_tlv *tlv)
-{
- return tlv->len - ROCKER_TLV_HDRLEN;
-}
-
-static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
-{
- return *(u8 *) rocker_tlv_data(tlv);
-}
-
-static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
-{
- return *(u16 *) rocker_tlv_data(tlv);
-}
-
-static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
-{
- return *(__be16 *) rocker_tlv_data(tlv);
-}
-
-static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
-{
- return *(u32 *) rocker_tlv_data(tlv);
-}
-
-static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
-{
- return *(u64 *) rocker_tlv_data(tlv);
-}
-
-static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
- const char *buf, int buf_len)
-{
- const struct rocker_tlv *tlv;
- const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
- int rem;
-
- memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
-
- rocker_tlv_for_each(tlv, head, buf_len, rem) {
- u32 type = rocker_tlv_type(tlv);
-
- if (type > 0 && type <= maxtype)
- tb[type] = tlv;
- }
-}
-
-static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
- const struct rocker_tlv *tlv)
-{
- rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
- rocker_tlv_len(tlv));
-}
-
-static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
- const struct rocker_desc_info *desc_info)
-{
- rocker_tlv_parse(tb, maxtype, desc_info->data,
- desc_info->desc->tlv_size);
-}
-
-static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
-{
- return (struct rocker_tlv *) ((char *) desc_info->data +
- desc_info->tlv_size);
-}
-
-static int rocker_tlv_put(struct rocker_desc_info *desc_info,
- int attrtype, int attrlen, const void *data)
-{
- int tail_room = desc_info->data_size - desc_info->tlv_size;
- int total_size = rocker_tlv_total_size(attrlen);
- struct rocker_tlv *tlv;
-
- if (unlikely(tail_room < total_size))
- return -EMSGSIZE;
-
- tlv = rocker_tlv_start(desc_info);
- desc_info->tlv_size += total_size;
- tlv->type = attrtype;
- tlv->len = rocker_tlv_attr_size(attrlen);
- memcpy(rocker_tlv_data(tlv), data, attrlen);
- memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
- return 0;
-}
-
-static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
- int attrtype, u8 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
-}
-
-static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
- int attrtype, u16 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
-}
-
-static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
- int attrtype, __be16 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
-}
-
-static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
- int attrtype, u32 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
-}
-
-static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
- int attrtype, __be32 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
-}
-
-static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
- int attrtype, u64 value)
-{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
-}
-
-static struct rocker_tlv *
-rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
-{
- struct rocker_tlv *start = rocker_tlv_start(desc_info);
-
- if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
- return NULL;
-
- return start;
-}
-
-static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
- struct rocker_tlv *start)
-{
- start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
-}
-
-static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
- const struct rocker_tlv *start)
-{
- desc_info->tlv_size = (const char *) start - desc_info->data;
-}
-
-/******************************************
- * DMA rings and descriptors manipulations
- ******************************************/
-
-static u32 __pos_inc(u32 pos, size_t limit)
-{
- return ++pos == limit ? 0 : pos;
-}
-
-static int rocker_desc_err(const struct rocker_desc_info *desc_info)
-{
- int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
-
- switch (err) {
- case ROCKER_OK:
- return 0;
- case -ROCKER_ENOENT:
- return -ENOENT;
- case -ROCKER_ENXIO:
- return -ENXIO;
- case -ROCKER_ENOMEM:
- return -ENOMEM;
- case -ROCKER_EEXIST:
- return -EEXIST;
- case -ROCKER_EINVAL:
- return -EINVAL;
- case -ROCKER_EMSGSIZE:
- return -EMSGSIZE;
- case -ROCKER_ENOTSUP:
- return -EOPNOTSUPP;
- case -ROCKER_ENOBUFS:
- return -ENOBUFS;
- }
-
- return -EINVAL;
-}
-
-static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
-{
- desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
-}
-
-static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
-{
- u32 comp_err = desc_info->desc->comp_err;
-
- return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
-}
-
-static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
-{
- return (void *)(uintptr_t)desc_info->desc->cookie;
-}
-
-static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
- void *ptr)
-{
- desc_info->desc->cookie = (uintptr_t) ptr;
-}
-
-static struct rocker_desc_info *
-rocker_desc_head_get(const struct rocker_dma_ring_info *info)
-{
- static struct rocker_desc_info *desc_info;
- u32 head = __pos_inc(info->head, info->size);
-
- desc_info = &info->desc_info[info->head];
- if (head == info->tail)
- return NULL; /* ring full */
- desc_info->tlv_size = 0;
- return desc_info;
-}
-
-static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
-{
- desc_info->desc->buf_size = desc_info->data_size;
- desc_info->desc->tlv_size = desc_info->tlv_size;
-}
-
-static void rocker_desc_head_set(const struct rocker *rocker,
- struct rocker_dma_ring_info *info,
- const struct rocker_desc_info *desc_info)
-{
- u32 head = __pos_inc(info->head, info->size);
-
- BUG_ON(head == info->tail);
- rocker_desc_commit(desc_info);
- info->head = head;
- rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
-}
-
-static struct rocker_desc_info *
-rocker_desc_tail_get(struct rocker_dma_ring_info *info)
-{
- static struct rocker_desc_info *desc_info;
-
- if (info->tail == info->head)
- return NULL; /* nothing to be done between head and tail */
- desc_info = &info->desc_info[info->tail];
- if (!rocker_desc_gen(desc_info))
- return NULL; /* gen bit not set, desc is not ready yet */
- info->tail = __pos_inc(info->tail, info->size);
- desc_info->tlv_size = desc_info->desc->tlv_size;
- return desc_info;
-}
-
-static void rocker_dma_ring_credits_set(const struct rocker *rocker,
- const struct rocker_dma_ring_info *info,
- u32 credits)
-{
- if (credits)
- rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
-}
-
-static unsigned long rocker_dma_ring_size_fix(size_t size)
-{
- return max(ROCKER_DMA_SIZE_MIN,
- min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
-}
-
-static int rocker_dma_ring_create(const struct rocker *rocker,
- unsigned int type,
- size_t size,
- struct rocker_dma_ring_info *info)
-{
- int i;
-
- BUG_ON(size != rocker_dma_ring_size_fix(size));
- info->size = size;
- info->type = type;
- info->head = 0;
- info->tail = 0;
- info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
- GFP_KERNEL);
- if (!info->desc_info)
- return -ENOMEM;
-
- info->desc = pci_alloc_consistent(rocker->pdev,
- info->size * sizeof(*info->desc),
- &info->mapaddr);
- if (!info->desc) {
- kfree(info->desc_info);
- return -ENOMEM;
- }
-
- for (i = 0; i < info->size; i++)
- info->desc_info[i].desc = &info->desc[i];
-
- rocker_write32(rocker, DMA_DESC_CTRL(info->type),
- ROCKER_DMA_DESC_CTRL_RESET);
- rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
- rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
-
- return 0;
-}
-
-static void rocker_dma_ring_destroy(const struct rocker *rocker,
- const struct rocker_dma_ring_info *info)
-{
- rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
-
- pci_free_consistent(rocker->pdev,
- info->size * sizeof(struct rocker_desc),
- info->desc, info->mapaddr);
- kfree(info->desc_info);
-}
-
-static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
- struct rocker_dma_ring_info *info)
-{
- int i;
-
- BUG_ON(info->head || info->tail);
-
- /* When ring is consumer, we need to advance head for each desc.
- * That tells hw that the desc is ready to be used by it.
- */
- for (i = 0; i < info->size - 1; i++)
- rocker_desc_head_set(rocker, info, &info->desc_info[i]);
- rocker_desc_commit(&info->desc_info[i]);
-}
-
-static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
- const struct rocker_dma_ring_info *info,
- int direction, size_t buf_size)
-{
- struct pci_dev *pdev = rocker->pdev;
- int i;
- int err;
-
- for (i = 0; i < info->size; i++) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
- struct rocker_desc *desc = &info->desc[i];
- dma_addr_t dma_handle;
- char *buf;
-
- buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto rollback;
- }
-
- dma_handle = pci_map_single(pdev, buf, buf_size, direction);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
- kfree(buf);
- err = -EIO;
- goto rollback;
- }
-
- desc_info->data = buf;
- desc_info->data_size = buf_size;
- dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
-
- desc->buf_addr = dma_handle;
- desc->buf_size = buf_size;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--) {
- const struct rocker_desc_info *desc_info = &info->desc_info[i];
-
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
- desc_info->data_size, direction);
- kfree(desc_info->data);
- }
- return err;
-}
-
-static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
- const struct rocker_dma_ring_info *info,
- int direction)
-{
- struct pci_dev *pdev = rocker->pdev;
- int i;
-
- for (i = 0; i < info->size; i++) {
- const struct rocker_desc_info *desc_info = &info->desc_info[i];
- struct rocker_desc *desc = &info->desc[i];
-
- desc->buf_addr = 0;
- desc->buf_size = 0;
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
- desc_info->data_size, direction);
- kfree(desc_info->data);
- }
-}
-
-static int rocker_dma_rings_init(struct rocker *rocker)
-{
- const struct pci_dev *pdev = rocker->pdev;
- int err;
-
- err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
- ROCKER_DMA_CMD_DEFAULT_SIZE,
- &rocker->cmd_ring);
- if (err) {
- dev_err(&pdev->dev, "failed to create command dma ring\n");
- return err;
- }
-
- spin_lock_init(&rocker->cmd_ring_lock);
-
- err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
- goto err_dma_cmd_ring_bufs_alloc;
- }
-
- err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
- ROCKER_DMA_EVENT_DEFAULT_SIZE,
- &rocker->event_ring);
- if (err) {
- dev_err(&pdev->dev, "failed to create event dma ring\n");
- goto err_dma_event_ring_create;
- }
-
- err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
- PCI_DMA_FROMDEVICE, PAGE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
- goto err_dma_event_ring_bufs_alloc;
- }
- rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
- return 0;
-
-err_dma_event_ring_bufs_alloc:
- rocker_dma_ring_destroy(rocker, &rocker->event_ring);
-err_dma_event_ring_create:
- rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
-err_dma_cmd_ring_bufs_alloc:
- rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
- return err;
-}
-
-static void rocker_dma_rings_fini(struct rocker *rocker)
-{
- rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
- PCI_DMA_BIDIRECTIONAL);
- rocker_dma_ring_destroy(rocker, &rocker->event_ring);
- rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
- rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
-}
-
-static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- struct sk_buff *skb, size_t buf_len)
-{
- const struct rocker *rocker = rocker_port->rocker;
- struct pci_dev *pdev = rocker->pdev;
- dma_addr_t dma_handle;
-
- dma_handle = pci_map_single(pdev, skb->data, buf_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma_handle))
- return -EIO;
- if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
- goto tlv_put_failure;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
- goto tlv_put_failure;
- return 0;
-
-tlv_put_failure:
- pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
- desc_info->tlv_size = 0;
- return -EMSGSIZE;
-}
-
-static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
-{
- return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-}
-
-static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
-{
- struct net_device *dev = rocker_port->dev;
- struct sk_buff *skb;
- size_t buf_len = rocker_port_rx_buf_len(rocker_port);
- int err;
-
- /* Ensure that hw will see tlv_size zero in case of an error.
- * That tells hw to use another descriptor.
- */
- rocker_desc_cookie_ptr_set(desc_info, NULL);
- desc_info->tlv_size = 0;
-
- skb = netdev_alloc_skb_ip_align(dev, buf_len);
- if (!skb)
- return -ENOMEM;
- err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
- if (err) {
- dev_kfree_skb_any(skb);
- return err;
- }
- rocker_desc_cookie_ptr_set(desc_info, skb);
- return 0;
-}
-
-static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
- const struct rocker_tlv **attrs)
-{
- struct pci_dev *pdev = rocker->pdev;
- dma_addr_t dma_handle;
- size_t len;
-
- if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
- !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
- return;
- dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
- len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
- pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
-}
-
-static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
- const struct rocker_desc_info *desc_info)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
- struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
-
- if (!skb)
- return;
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
- rocker_dma_rx_ring_skb_unmap(rocker, attrs);
- dev_kfree_skb_any(skb);
-}
-
-static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
-{
- const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
- const struct rocker *rocker = rocker_port->rocker;
- int i;
- int err;
-
- for (i = 0; i < rx_ring->size; i++) {
- err = rocker_dma_rx_ring_skb_alloc(rocker_port,
- &rx_ring->desc_info[i]);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- for (i--; i >= 0; i--)
- rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
- return err;
-}
-
-static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
-{
- const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
- const struct rocker *rocker = rocker_port->rocker;
- int i;
-
- for (i = 0; i < rx_ring->size; i++)
- rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
-}
-
-static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
-{
- struct rocker *rocker = rocker_port->rocker;
- int err;
-
- err = rocker_dma_ring_create(rocker,
- ROCKER_DMA_TX(rocker_port->port_number),
- ROCKER_DMA_TX_DEFAULT_SIZE,
- &rocker_port->tx_ring);
- if (err) {
- netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
- return err;
- }
-
- err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE,
- ROCKER_DMA_TX_DESC_SIZE);
- if (err) {
- netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
- goto err_dma_tx_ring_bufs_alloc;
- }
-
- err = rocker_dma_ring_create(rocker,
- ROCKER_DMA_RX(rocker_port->port_number),
- ROCKER_DMA_RX_DEFAULT_SIZE,
- &rocker_port->rx_ring);
- if (err) {
- netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
- goto err_dma_rx_ring_create;
- }
-
- err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL,
- ROCKER_DMA_RX_DESC_SIZE);
- if (err) {
- netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
- goto err_dma_rx_ring_bufs_alloc;
- }
-
- err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
- if (err) {
- netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
- goto err_dma_rx_ring_skbs_alloc;
- }
- rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
-
- return 0;
-
-err_dma_rx_ring_skbs_alloc:
- rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
-err_dma_rx_ring_bufs_alloc:
- rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
-err_dma_rx_ring_create:
- rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
-err_dma_tx_ring_bufs_alloc:
- rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
- return err;
-}
-
-static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
-{
- struct rocker *rocker = rocker_port->rocker;
-
- rocker_dma_rx_ring_skbs_free(rocker_port);
- rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
- rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
- rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
- rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
-}
-
-static void rocker_port_set_enable(const struct rocker_port *rocker_port,
- bool enable)
-{
- u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
-
- if (enable)
- val |= 1ULL << rocker_port->pport;
- else
- val &= ~(1ULL << rocker_port->pport);
- rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
-}
-
-/********************************
- * Interrupt handler and helpers
- ********************************/
-
-static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
-{
- struct rocker *rocker = dev_id;
- const struct rocker_desc_info *desc_info;
- struct rocker_wait *wait;
- u32 credits = 0;
-
- spin_lock(&rocker->cmd_ring_lock);
- while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
- wait = rocker_desc_cookie_ptr_get(desc_info);
- if (wait->nowait) {
- rocker_desc_gen_clear(desc_info);
- rocker_wait_destroy(NULL, wait);
- } else {
- rocker_wait_wake_up(wait);
- }
- credits++;
- }
- spin_unlock(&rocker->cmd_ring_lock);
- rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
-
- return IRQ_HANDLED;
-}
-
-static void rocker_port_link_up(const struct rocker_port *rocker_port)
-{
- netif_carrier_on(rocker_port->dev);
- netdev_info(rocker_port->dev, "Link is up\n");
-}
-
-static void rocker_port_link_down(const struct rocker_port *rocker_port)
-{
- netif_carrier_off(rocker_port->dev);
- netdev_info(rocker_port->dev, "Link is down\n");
-}
-
-static int rocker_event_link_change(const struct rocker *rocker,
- const struct rocker_tlv *info)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
- unsigned int port_number;
- bool link_up;
- struct rocker_port *rocker_port;
-
- rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
- if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
- !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
- return -EIO;
- port_number =
- rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
- link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
-
- if (port_number >= rocker->port_count)
- return -EINVAL;
-
- rocker_port = rocker->ports[port_number];
- if (netif_carrier_ok(rocker_port->dev) != link_up) {
- if (link_up)
- rocker_port_link_up(rocker_port);
- else
- rocker_port_link_down(rocker_port);
- }
-
- return 0;
-}
-
-static int rocker_port_fdb(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- const unsigned char *addr,
- __be16 vlan_id, int flags);
-
-static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
- const struct rocker_tlv *info)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
- unsigned int port_number;
- struct rocker_port *rocker_port;
- const unsigned char *addr;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
- __be16 vlan_id;
-
- rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
- if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
- !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
- !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
- return -EIO;
- port_number =
- rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
- addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
- vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
-
- if (port_number >= rocker->port_count)
- return -EINVAL;
-
- rocker_port = rocker->ports[port_number];
-
- if (rocker_port->stp_state != BR_STATE_LEARNING &&
- rocker_port->stp_state != BR_STATE_FORWARDING)
- return 0;
-
- return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
-}
-
-static int rocker_event_process(const struct rocker *rocker,
- const struct rocker_desc_info *desc_info)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
- const struct rocker_tlv *info;
- u16 type;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
- if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
- !attrs[ROCKER_TLV_EVENT_INFO])
- return -EIO;
-
- type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
- info = attrs[ROCKER_TLV_EVENT_INFO];
-
- switch (type) {
- case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
- return rocker_event_link_change(rocker, info);
- case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
- return rocker_event_mac_vlan_seen(rocker, info);
- }
-
- return -EOPNOTSUPP;
-}
-
-static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
-{
- struct rocker *rocker = dev_id;
- const struct pci_dev *pdev = rocker->pdev;
- const struct rocker_desc_info *desc_info;
- u32 credits = 0;
- int err;
-
- while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
- err = rocker_desc_err(desc_info);
- if (err) {
- dev_err(&pdev->dev, "event desc received with err %d\n",
- err);
- } else {
- err = rocker_event_process(rocker, desc_info);
- if (err)
- dev_err(&pdev->dev, "event processing failed with err %d\n",
- err);
- }
- rocker_desc_gen_clear(desc_info);
- rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
- credits++;
- }
- rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
-{
- struct rocker_port *rocker_port = dev_id;
-
- napi_schedule(&rocker_port->napi_tx);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
-{
- struct rocker_port *rocker_port = dev_id;
-
- napi_schedule(&rocker_port->napi_rx);
- return IRQ_HANDLED;
-}
-
-/********************
- * Command interface
- ********************/
-
-typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv);
-
-typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info,
- void *priv);
-
-static int rocker_cmd_exec(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- rocker_cmd_prep_cb_t prepare, void *prepare_priv,
- rocker_cmd_proc_cb_t process, void *process_priv)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
- struct rocker_wait *wait;
- bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
- unsigned long lock_flags;
- int err;
-
- wait = rocker_wait_create(rocker_port, trans, flags);
- if (!wait)
- return -ENOMEM;
- wait->nowait = nowait;
-
- spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
-
- desc_info = rocker_desc_head_get(&rocker->cmd_ring);
- if (!desc_info) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
- err = -EAGAIN;
- goto out;
- }
-
- err = prepare(rocker_port, desc_info, prepare_priv);
- if (err) {
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
- goto out;
- }
-
- rocker_desc_cookie_ptr_set(desc_info, wait);
-
- if (!switchdev_trans_ph_prepare(trans))
- rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
-
- spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
-
- if (nowait)
- return 0;
-
- if (!switchdev_trans_ph_prepare(trans))
- if (!rocker_wait_event_timeout(wait, HZ / 10))
- return -EIO;
-
- err = rocker_desc_err(desc_info);
- if (err)
- return err;
-
- if (process)
- err = process(rocker_port, desc_info, process_priv);
-
- rocker_desc_gen_clear(desc_info);
-out:
- rocker_wait_destroy(trans, wait);
- return err;
-}
-
-static int
-rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
- return 0;
-}
-
-static int
-rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct ethtool_cmd *ecmd = priv;
- const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- u32 speed;
- u8 duplex;
- u8 autoneg;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
- if (!attrs[ROCKER_TLV_CMD_INFO])
- return -EIO;
-
- rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
- attrs[ROCKER_TLV_CMD_INFO]);
- if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
- !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
- !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
- return -EIO;
-
- speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
- duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
- autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
-
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = SUPPORTED_TP;
- ecmd->phy_address = 0xff;
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, speed);
- ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
-
- return 0;
-}
-
-static int
-rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info,
- void *priv)
-{
- unsigned char *macaddr = priv;
- const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- const struct rocker_tlv *attr;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
- if (!attrs[ROCKER_TLV_CMD_INFO])
- return -EIO;
-
- rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
- attrs[ROCKER_TLV_CMD_INFO]);
- attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
- if (!attr)
- return -EIO;
-
- if (rocker_tlv_len(attr) != ETH_ALEN)
- return -EINVAL;
-
- ether_addr_copy(macaddr, rocker_tlv_data(attr));
- return 0;
-}
-
-struct port_name {
- char *buf;
- size_t len;
-};
-
-static int
-rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info,
- void *priv)
-{
- const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct port_name *name = priv;
- const struct rocker_tlv *attr;
- size_t i, j, len;
- const char *str;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
- if (!attrs[ROCKER_TLV_CMD_INFO])
- return -EIO;
-
- rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
- attrs[ROCKER_TLV_CMD_INFO]);
- attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
- if (!attr)
- return -EIO;
-
- len = min_t(size_t, rocker_tlv_len(attr), name->len);
- str = rocker_tlv_data(attr);
-
- /* make sure name only contains alphanumeric characters */
- for (i = j = 0; i < len; ++i) {
- if (isalnum(str[i])) {
- name->buf[j] = str[i];
- j++;
- }
- }
-
- if (j == 0)
- return -EIO;
-
- name->buf[j] = '\0';
-
- return 0;
-}
-
-static int
-rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct ethtool_cmd *ecmd = priv;
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
- ethtool_cmd_speed(ecmd)))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
- ecmd->duplex))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
- ecmd->autoneg))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
- return 0;
-}
-
-static int
-rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- const unsigned char *macaddr = priv;
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
- ETH_ALEN, macaddr))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
- return 0;
-}
-
-static int
-rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- int mtu = *(int *)priv;
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
- mtu))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
- return 0;
-}
-
-static int
-rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
- !!(rocker_port->brport_flags & BR_LEARNING)))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
- return 0;
-}
-
-static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_ethtool_proc,
- ecmd);
-}
-
-static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
- unsigned char *macaddr)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_macaddr_proc,
- macaddr);
-}
-
-static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
- struct ethtool_cmd *ecmd)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_set_port_settings_ethtool_prep,
- ecmd, NULL, NULL);
-}
-
-static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
- unsigned char *macaddr)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_set_port_settings_macaddr_prep,
- macaddr, NULL, NULL);
-}
-
-static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
- int mtu)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_set_port_settings_mtu_prep,
- &mtu, NULL, NULL);
-}
-
-static int rocker_port_set_learning(struct rocker_port *rocker_port,
- struct switchdev_trans *trans)
-{
- return rocker_cmd_exec(rocker_port, trans, 0,
- rocker_cmd_set_port_learning_prep,
- NULL, NULL, NULL);
-}
-
-static int
-rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
- entry->key.ig_port.in_pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
- entry->key.ig_port.in_pport_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
- entry->key.ig_port.goto_tbl))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
- entry->key.vlan.in_pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->key.vlan.vlan_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
- entry->key.vlan.vlan_id_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
- entry->key.vlan.goto_tbl))
- return -EMSGSIZE;
- if (entry->key.vlan.untagged &&
- rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
- entry->key.vlan.new_vlan_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
- entry->key.term_mac.in_pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
- entry->key.term_mac.in_pport_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
- entry->key.term_mac.eth_type))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
- ETH_ALEN, entry->key.term_mac.eth_dst))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
- ETH_ALEN, entry->key.term_mac.eth_dst_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->key.term_mac.vlan_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
- entry->key.term_mac.vlan_id_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
- entry->key.term_mac.goto_tbl))
- return -EMSGSIZE;
- if (entry->key.term_mac.copy_to_cpu &&
- rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
- entry->key.term_mac.copy_to_cpu))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
- entry->key.ucast_routing.eth_type))
- return -EMSGSIZE;
- if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
- entry->key.ucast_routing.dst4))
- return -EMSGSIZE;
- if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
- entry->key.ucast_routing.dst4_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
- entry->key.ucast_routing.goto_tbl))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
- entry->key.ucast_routing.group_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (entry->key.bridge.has_eth_dst &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
- ETH_ALEN, entry->key.bridge.eth_dst))
- return -EMSGSIZE;
- if (entry->key.bridge.has_eth_dst_mask &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
- ETH_ALEN, entry->key.bridge.eth_dst_mask))
- return -EMSGSIZE;
- if (entry->key.bridge.vlan_id &&
- rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->key.bridge.vlan_id))
- return -EMSGSIZE;
- if (entry->key.bridge.tunnel_id &&
- rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
- entry->key.bridge.tunnel_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
- entry->key.bridge.goto_tbl))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
- entry->key.bridge.group_id))
- return -EMSGSIZE;
- if (entry->key.bridge.copy_to_cpu &&
- rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
- entry->key.bridge.copy_to_cpu))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
- const struct rocker_flow_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
- entry->key.acl.in_pport))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
- entry->key.acl.in_pport_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
- ETH_ALEN, entry->key.acl.eth_src))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
- ETH_ALEN, entry->key.acl.eth_src_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
- ETH_ALEN, entry->key.acl.eth_dst))
- return -EMSGSIZE;
- if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
- ETH_ALEN, entry->key.acl.eth_dst_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
- entry->key.acl.eth_type))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->key.acl.vlan_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
- entry->key.acl.vlan_id_mask))
- return -EMSGSIZE;
-
- switch (ntohs(entry->key.acl.eth_type)) {
- case ETH_P_IP:
- case ETH_P_IPV6:
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
- entry->key.acl.ip_proto))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info,
- ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
- entry->key.acl.ip_proto_mask))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
- entry->key.acl.ip_tos & 0x3f))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info,
- ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
- entry->key.acl.ip_tos_mask & 0x3f))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
- (entry->key.acl.ip_tos & 0xc0) >> 6))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info,
- ROCKER_TLV_OF_DPA_IP_ECN_MASK,
- (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
- return -EMSGSIZE;
- break;
- }
-
- if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
- rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
- entry->key.acl.group_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- const struct rocker_flow_tbl_entry *entry = priv;
- struct rocker_tlv *cmd_info;
- int err = 0;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
- entry->key.tbl_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
- entry->key.priority))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
- return -EMSGSIZE;
- if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
- entry->cookie))
- return -EMSGSIZE;
-
- switch (entry->key.tbl_id) {
- case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
- err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
- break;
- case ROCKER_OF_DPA_TABLE_ID_VLAN:
- err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
- break;
- case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
- err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
- break;
- case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
- err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
- break;
- case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
- err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
- break;
- case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
- err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
- break;
- default:
- err = -ENOTSUPP;
- break;
- }
-
- if (err)
- return err;
-
- rocker_tlv_nest_end(desc_info, cmd_info);
-
- return 0;
-}
-
-static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- const struct rocker_flow_tbl_entry *entry = priv;
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
- entry->cookie))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
-
- return 0;
-}
-
-static int
-rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
- ROCKER_GROUP_PORT_GET(entry->group_id)))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
- entry->l2_interface.pop_vlan))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
- const struct rocker_group_tbl_entry *entry)
-{
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
- entry->l2_rewrite.group_id))
- return -EMSGSIZE;
- if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
- ETH_ALEN, entry->l2_rewrite.eth_src))
- return -EMSGSIZE;
- if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
- ETH_ALEN, entry->l2_rewrite.eth_dst))
- return -EMSGSIZE;
- if (entry->l2_rewrite.vlan_id &&
- rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->l2_rewrite.vlan_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int
-rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
- const struct rocker_group_tbl_entry *entry)
-{
- int i;
- struct rocker_tlv *group_ids;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
- entry->group_count))
- return -EMSGSIZE;
-
- group_ids = rocker_tlv_nest_start(desc_info,
- ROCKER_TLV_OF_DPA_GROUP_IDS);
- if (!group_ids)
- return -EMSGSIZE;
-
- for (i = 0; i < entry->group_count; i++)
- /* Note TLV array is 1-based */
- if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
- return -EMSGSIZE;
-
- rocker_tlv_nest_end(desc_info, group_ids);
-
- return 0;
-}
-
-static int
-rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
- const struct rocker_group_tbl_entry *entry)
-{
- if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
- ETH_ALEN, entry->l3_unicast.eth_src))
- return -EMSGSIZE;
- if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
- rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
- ETH_ALEN, entry->l3_unicast.eth_dst))
- return -EMSGSIZE;
- if (entry->l3_unicast.vlan_id &&
- rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
- entry->l3_unicast.vlan_id))
- return -EMSGSIZE;
- if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
- entry->l3_unicast.ttl_check))
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
- entry->l3_unicast.group_id))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct rocker_group_tbl_entry *entry = priv;
- struct rocker_tlv *cmd_info;
- int err = 0;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
-
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
- entry->group_id))
- return -EMSGSIZE;
-
- switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
- case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
- err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
- break;
- case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
- err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
- break;
- case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
- case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
- break;
- case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
- err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
- break;
- default:
- err = -ENOTSUPP;
- break;
- }
-
- if (err)
- return err;
-
- rocker_tlv_nest_end(desc_info, cmd_info);
-
- return 0;
-}
-
-static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- const struct rocker_group_tbl_entry *entry = priv;
- struct rocker_tlv *cmd_info;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
- return -EMSGSIZE;
- cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_info)
- return -EMSGSIZE;
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
- entry->group_id))
- return -EMSGSIZE;
- rocker_tlv_nest_end(desc_info, cmd_info);
-
- return 0;
-}
-
-/***************************************************
- * Flow, group, FDB, internal VLAN and neigh tables
- ***************************************************/
-
-static int rocker_init_tbls(struct rocker *rocker)
-{
- hash_init(rocker->flow_tbl);
- spin_lock_init(&rocker->flow_tbl_lock);
-
- hash_init(rocker->group_tbl);
- spin_lock_init(&rocker->group_tbl_lock);
-
- hash_init(rocker->fdb_tbl);
- spin_lock_init(&rocker->fdb_tbl_lock);
-
- hash_init(rocker->internal_vlan_tbl);
- spin_lock_init(&rocker->internal_vlan_tbl_lock);
-
- hash_init(rocker->neigh_tbl);
- spin_lock_init(&rocker->neigh_tbl_lock);
-
- return 0;
-}
-
-static void rocker_free_tbls(struct rocker *rocker)
-{
- unsigned long flags;
- struct rocker_flow_tbl_entry *flow_entry;
- struct rocker_group_tbl_entry *group_entry;
- struct rocker_fdb_tbl_entry *fdb_entry;
- struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
- struct rocker_neigh_tbl_entry *neigh_entry;
- struct hlist_node *tmp;
- int bkt;
-
- spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
- hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
- hash_del(&flow_entry->entry);
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
-
- spin_lock_irqsave(&rocker->group_tbl_lock, flags);
- hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
- hash_del(&group_entry->entry);
- spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
-
- spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
- hash_del(&fdb_entry->entry);
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
-
- spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
- hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
- tmp, internal_vlan_entry, entry)
- hash_del(&internal_vlan_entry->entry);
- spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
-
- spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
- hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
- hash_del(&neigh_entry->entry);
- spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
-}
-
-static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(const struct rocker *rocker,
- const struct rocker_flow_tbl_entry *match)
-{
- struct rocker_flow_tbl_entry *found;
- size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
-
- hash_for_each_possible(rocker->flow_tbl, found,
- entry, match->key_crc32) {
- if (memcmp(&found->key, &match->key, key_len) == 0)
- return found;
- }
-
- return NULL;
-}
-
-static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_flow_tbl_entry *match)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_flow_tbl_entry *found;
- size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long lock_flags;
-
- match->key_crc32 = crc32(~0, &match->key, key_len);
-
- spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
-
- found = rocker_flow_tbl_find(rocker, match);
-
- if (found) {
- match->cookie = found->cookie;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- rocker_port_kfree(trans, found);
- found = match;
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
- } else {
- found = match;
- found->cookie = rocker->flow_tbl_next_cookie++;
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
- }
-
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
-
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
-
- return rocker_cmd_exec(rocker_port, trans, flags,
- rocker_cmd_flow_tbl_add, found, NULL, NULL);
-}
-
-static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_flow_tbl_entry *match)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_flow_tbl_entry *found;
- size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
- unsigned long lock_flags;
- int err = 0;
-
- match->key_crc32 = crc32(~0, &match->key, key_len);
-
- spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
-
- found = rocker_flow_tbl_find(rocker, match);
-
- if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
- }
-
- spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
-
- rocker_port_kfree(trans, match);
-
- if (found) {
- err = rocker_cmd_exec(rocker_port, trans, flags,
- rocker_cmd_flow_tbl_del,
- found, NULL, NULL);
- rocker_port_kfree(trans, found);
- }
-
- return err;
-}
-
-static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_flow_tbl_entry *entry)
-{
- if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
- else
- return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- u32 in_pport, u32 in_pport_mask,
- enum rocker_of_dpa_table_id goto_tbl)
-{
- struct rocker_flow_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->key.priority = ROCKER_PRIORITY_IG_PORT;
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
- entry->key.ig_port.in_pport = in_pport;
- entry->key.ig_port.in_pport_mask = in_pport_mask;
- entry->key.ig_port.goto_tbl = goto_tbl;
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- u32 in_pport, __be16 vlan_id,
- __be16 vlan_id_mask,
- enum rocker_of_dpa_table_id goto_tbl,
- bool untagged, __be16 new_vlan_id)
-{
- struct rocker_flow_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->key.priority = ROCKER_PRIORITY_VLAN;
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
- entry->key.vlan.in_pport = in_pport;
- entry->key.vlan.vlan_id = vlan_id;
- entry->key.vlan.vlan_id_mask = vlan_id_mask;
- entry->key.vlan.goto_tbl = goto_tbl;
-
- entry->key.vlan.untagged = untagged;
- entry->key.vlan.new_vlan_id = new_vlan_id;
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- u32 in_pport, u32 in_pport_mask,
- __be16 eth_type, const u8 *eth_dst,
- const u8 *eth_dst_mask, __be16 vlan_id,
- __be16 vlan_id_mask, bool copy_to_cpu,
- int flags)
-{
- struct rocker_flow_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- if (is_multicast_ether_addr(eth_dst)) {
- entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
- entry->key.term_mac.goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
- } else {
- entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
- entry->key.term_mac.goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
- }
-
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
- entry->key.term_mac.in_pport = in_pport;
- entry->key.term_mac.in_pport_mask = in_pport_mask;
- entry->key.term_mac.eth_type = eth_type;
- ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
- ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
- entry->key.term_mac.vlan_id = vlan_id;
- entry->key.term_mac.vlan_id_mask = vlan_id_mask;
- entry->key.term_mac.copy_to_cpu = copy_to_cpu;
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 vlan_id, u32 tunnel_id,
- enum rocker_of_dpa_table_id goto_tbl,
- u32 group_id, bool copy_to_cpu)
-{
- struct rocker_flow_tbl_entry *entry;
- u32 priority;
- bool vlan_bridging = !!vlan_id;
- bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
- bool wild = false;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
-
- if (eth_dst) {
- entry->key.bridge.has_eth_dst = 1;
- ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
- }
- if (eth_dst_mask) {
- entry->key.bridge.has_eth_dst_mask = 1;
- ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
- if (!ether_addr_equal(eth_dst_mask, ff_mac))
- wild = true;
- }
-
- priority = ROCKER_PRIORITY_UNKNOWN;
- if (vlan_bridging && dflt && wild)
- priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
- else if (vlan_bridging && dflt && !wild)
- priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
- else if (vlan_bridging && !dflt)
- priority = ROCKER_PRIORITY_BRIDGING_VLAN;
- else if (!vlan_bridging && dflt && wild)
- priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
- else if (!vlan_bridging && dflt && !wild)
- priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
- else if (!vlan_bridging && !dflt)
- priority = ROCKER_PRIORITY_BRIDGING_TENANT;
-
- entry->key.priority = priority;
- entry->key.bridge.vlan_id = vlan_id;
- entry->key.bridge.tunnel_id = tunnel_id;
- entry->key.bridge.goto_tbl = goto_tbl;
- entry->key.bridge.group_id = group_id;
- entry->key.bridge.copy_to_cpu = copy_to_cpu;
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- __be16 eth_type, __be32 dst,
- __be32 dst_mask, u32 priority,
- enum rocker_of_dpa_table_id goto_tbl,
- u32 group_id, int flags)
-{
- struct rocker_flow_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
- entry->key.priority = priority;
- entry->key.ucast_routing.eth_type = eth_type;
- entry->key.ucast_routing.dst4 = dst;
- entry->key.ucast_routing.dst4_mask = dst_mask;
- entry->key.ucast_routing.goto_tbl = goto_tbl;
- entry->key.ucast_routing.group_id = group_id;
- entry->key_len = offsetof(struct rocker_flow_tbl_key,
- ucast_routing.group_id);
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- u32 in_pport, u32 in_pport_mask,
- const u8 *eth_src, const u8 *eth_src_mask,
- const u8 *eth_dst, const u8 *eth_dst_mask,
- __be16 eth_type, __be16 vlan_id,
- __be16 vlan_id_mask, u8 ip_proto,
- u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
- u32 group_id)
-{
- u32 priority;
- struct rocker_flow_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- priority = ROCKER_PRIORITY_ACL_NORMAL;
- if (eth_dst && eth_dst_mask) {
- if (ether_addr_equal(eth_dst_mask, mcast_mac))
- priority = ROCKER_PRIORITY_ACL_DFLT;
- else if (is_link_local_ether_addr(eth_dst))
- priority = ROCKER_PRIORITY_ACL_CTRL;
- }
-
- entry->key.priority = priority;
- entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- entry->key.acl.in_pport = in_pport;
- entry->key.acl.in_pport_mask = in_pport_mask;
-
- if (eth_src)
- ether_addr_copy(entry->key.acl.eth_src, eth_src);
- if (eth_src_mask)
- ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
- if (eth_dst)
- ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
- if (eth_dst_mask)
- ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
-
- entry->key.acl.eth_type = eth_type;
- entry->key.acl.vlan_id = vlan_id;
- entry->key.acl.vlan_id_mask = vlan_id_mask;
- entry->key.acl.ip_proto = ip_proto;
- entry->key.acl.ip_proto_mask = ip_proto_mask;
- entry->key.acl.ip_tos = ip_tos;
- entry->key.acl.ip_tos_mask = ip_tos_mask;
- entry->key.acl.group_id = group_id;
-
- return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(const struct rocker *rocker,
- const struct rocker_group_tbl_entry *match)
-{
- struct rocker_group_tbl_entry *found;
-
- hash_for_each_possible(rocker->group_tbl, found,
- entry, match->group_id) {
- if (found->group_id == match->group_id)
- return found;
- }
-
- return NULL;
-}
-
-static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
- struct rocker_group_tbl_entry *entry)
-{
- switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
- case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
- case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- rocker_port_kfree(trans, entry->group_ids);
- break;
- default:
- break;
- }
- rocker_port_kfree(trans, entry);
-}
-
-static int rocker_group_tbl_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_group_tbl_entry *match)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_group_tbl_entry *found;
- unsigned long lock_flags;
-
- spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
-
- found = rocker_group_tbl_find(rocker, match);
-
- if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- rocker_group_tbl_entry_free(trans, found);
- found = match;
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
- } else {
- found = match;
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
- }
-
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(rocker->group_tbl, &found->entry, found->group_id);
-
- spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
-
- return rocker_cmd_exec(rocker_port, trans, flags,
- rocker_cmd_group_tbl_add, found, NULL, NULL);
-}
-
-static int rocker_group_tbl_del(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_group_tbl_entry *match)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_group_tbl_entry *found;
- unsigned long lock_flags;
- int err = 0;
-
- spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
-
- found = rocker_group_tbl_find(rocker, match);
-
- if (found) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
- }
-
- spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
-
- rocker_group_tbl_entry_free(trans, match);
-
- if (found) {
- err = rocker_cmd_exec(rocker_port, trans, flags,
- rocker_cmd_group_tbl_del,
- found, NULL, NULL);
- rocker_group_tbl_entry_free(trans, found);
- }
-
- return err;
-}
-
-static int rocker_group_tbl_do(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- struct rocker_group_tbl_entry *entry)
-{
- if (flags & ROCKER_OP_FLAG_REMOVE)
- return rocker_group_tbl_del(rocker_port, trans, flags, entry);
- else
- return rocker_group_tbl_add(rocker_port, trans, flags, entry);
-}
-
-static int rocker_group_l2_interface(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u32 out_pport,
- int pop_vlan)
-{
- struct rocker_group_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
- entry->l2_interface.pop_vlan = pop_vlan;
-
- return rocker_group_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- int flags, u8 group_count,
- const u32 *group_ids, u32 group_id)
-{
- struct rocker_group_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->group_id = group_id;
- entry->group_count = group_count;
-
- entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
- group_count, sizeof(u32));
- if (!entry->group_ids) {
- rocker_port_kfree(trans, entry);
- return -ENOMEM;
- }
- memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
-
- return rocker_group_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static int rocker_group_l2_flood(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, u8 group_count,
- const u32 *group_ids, u32 group_id)
-{
- return rocker_group_l2_fan_out(rocker_port, trans, flags,
- group_count, group_ids,
- group_id);
-}
-
-static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- u32 index, const u8 *src_mac, const u8 *dst_mac,
- __be16 vlan_id, bool ttl_check, u32 pport)
-{
- struct rocker_group_tbl_entry *entry;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
- if (src_mac)
- ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
- if (dst_mac)
- ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
- entry->l3_unicast.vlan_id = vlan_id;
- entry->l3_unicast.ttl_check = ttl_check;
- entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
-
- return rocker_group_tbl_do(rocker_port, trans, flags, entry);
-}
-
-static struct rocker_neigh_tbl_entry *
-rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
-{
- struct rocker_neigh_tbl_entry *found;
-
- hash_for_each_possible(rocker->neigh_tbl, found,
- entry, be32_to_cpu(ip_addr))
- if (found->ip_addr == ip_addr)
- return found;
-
- return NULL;
-}
-
-static void _rocker_neigh_add(struct rocker *rocker,
- struct switchdev_trans *trans,
- struct rocker_neigh_tbl_entry *entry)
-{
- if (!switchdev_trans_ph_commit(trans))
- entry->index = rocker->neigh_tbl_next_index++;
- if (switchdev_trans_ph_prepare(trans))
- return;
- entry->ref_count++;
- hash_add(rocker->neigh_tbl, &entry->entry,
- be32_to_cpu(entry->ip_addr));
-}
-
-static void _rocker_neigh_del(struct switchdev_trans *trans,
- struct rocker_neigh_tbl_entry *entry)
-{
- if (switchdev_trans_ph_prepare(trans))
- return;
- if (--entry->ref_count == 0) {
- hash_del(&entry->entry);
- rocker_port_kfree(trans, entry);
- }
-}
-
-static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
- struct switchdev_trans *trans,
- const u8 *eth_dst, bool ttl_check)
-{
- if (eth_dst) {
- ether_addr_copy(entry->eth_dst, eth_dst);
- entry->ttl_check = ttl_check;
- } else if (!switchdev_trans_ph_prepare(trans)) {
- entry->ref_count++;
- }
-}
-
-static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- int flags, __be32 ip_addr, const u8 *eth_dst)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_neigh_tbl_entry *entry;
- struct rocker_neigh_tbl_entry *found;
- unsigned long lock_flags;
- __be16 eth_type = htons(ETH_P_IP);
- enum rocker_of_dpa_table_id goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- u32 group_id;
- u32 priority = 0;
- bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
- bool updating;
- bool removing;
- int err = 0;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
-
- found = rocker_neigh_tbl_find(rocker, ip_addr);
-
- updating = found && adding;
- removing = found && !adding;
- adding = !found && adding;
-
- if (adding) {
- entry->ip_addr = ip_addr;
- entry->dev = rocker_port->dev;
- ether_addr_copy(entry->eth_dst, eth_dst);
- entry->ttl_check = true;
- _rocker_neigh_add(rocker, trans, entry);
- } else if (removing) {
- memcpy(entry, found, sizeof(*entry));
- _rocker_neigh_del(trans, found);
- } else if (updating) {
- _rocker_neigh_update(found, trans, eth_dst, true);
- memcpy(entry, found, sizeof(*entry));
- } else {
- err = -ENOENT;
- }
-
- spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
-
- if (err)
- goto err_out;
-
- /* For each active neighbor, we have an L3 unicast group and
- * a /32 route to the neighbor, which uses the L3 unicast
- * group. The L3 unicast group can also be referred to by
- * other routes' nexthops.
- */
-
- err = rocker_group_l3_unicast(rocker_port, trans, flags,
- entry->index,
- rocker_port->dev->dev_addr,
- entry->eth_dst,
- rocker_port->internal_vlan_id,
- entry->ttl_check,
- rocker_port->pport);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) L3 unicast group index %d\n",
- err, entry->index);
- goto err_out;
- }
-
- if (adding || removing) {
- group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
- err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
- eth_type, ip_addr,
- inet_make_mask(32),
- priority, goto_tbl,
- group_id, flags);
-
- if (err)
- netdev_err(rocker_port->dev,
- "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
- err, &entry->ip_addr, group_id);
- }
-
-err_out:
- if (!adding)
- rocker_port_kfree(trans, entry);
-
- return err;
-}
-
-static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- __be32 ip_addr)
-{
- struct net_device *dev = rocker_port->dev;
- struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
- int err = 0;
-
- if (!n) {
- n = neigh_create(&arp_tbl, &ip_addr, dev);
- if (IS_ERR(n))
- return IS_ERR(n);
- }
-
- /* If the neigh is already resolved, then go ahead and
- * install the entry, otherwise start the ARP process to
- * resolve the neigh.
- */
-
- if (n->nud_state & NUD_VALID)
- err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
- ip_addr, n->ha);
- else
- neigh_event_send(n, NULL);
-
- neigh_release(n);
- return err;
-}
-
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be32 ip_addr, u32 *index)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_neigh_tbl_entry *entry;
- struct rocker_neigh_tbl_entry *found;
- unsigned long lock_flags;
- bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
- bool updating;
- bool removing;
- bool resolved = true;
- int err = 0;
-
- entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
- if (!entry)
- return -ENOMEM;
-
- spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
-
- found = rocker_neigh_tbl_find(rocker, ip_addr);
- if (found)
- *index = found->index;
-
- updating = found && adding;
- removing = found && !adding;
- adding = !found && adding;
-
- if (adding) {
- entry->ip_addr = ip_addr;
- entry->dev = rocker_port->dev;
- _rocker_neigh_add(rocker, trans, entry);
- *index = entry->index;
- resolved = false;
- } else if (removing) {
- _rocker_neigh_del(trans, found);
- } else if (updating) {
- _rocker_neigh_update(found, trans, NULL, false);
- resolved = !is_zero_ether_addr(found->eth_dst);
- } else {
- err = -ENOENT;
- }
-
- spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
-
- if (!adding)
- rocker_port_kfree(trans, entry);
-
- if (err)
- return err;
-
- /* Resolved means neigh ip_addr is resolved to neigh mac. */
-
- if (!resolved)
- err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
-
- return err;
-}
-
-static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- int flags, __be16 vlan_id)
-{
- struct rocker_port *p;
- const struct rocker *rocker = rocker_port->rocker;
- u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
- u32 *group_ids;
- u8 group_count = 0;
- int err = 0;
- int i;
-
- group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
- rocker->port_count, sizeof(u32));
- if (!group_ids)
- return -ENOMEM;
-
- /* Adjust the flood group for this VLAN. The flood group
- * references an L2 interface group for each port in this
- * VLAN.
- */
-
- for (i = 0; i < rocker->port_count; i++) {
- p = rocker->ports[i];
- if (!p)
- continue;
- if (!rocker_port_is_bridged(p))
- continue;
- if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
- group_ids[group_count++] =
- ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
- }
- }
-
- /* If there are no bridged ports in this VLAN, we're done */
- if (group_count == 0)
- goto no_ports_in_vlan;
-
- err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
- group_count, group_ids, group_id);
- if (err)
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 flood group\n", err);
-
-no_ports_in_vlan:
- rocker_port_kfree(trans, group_ids);
- return err;
-}
-
-static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id, bool pop_vlan)
-{
- const struct rocker *rocker = rocker_port->rocker;
- struct rocker_port *p;
- bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
- u32 out_pport;
- int ref = 0;
- int err;
- int i;
-
- /* An L2 interface group for this port in this VLAN, but
- * only when port STP state is LEARNING|FORWARDING.
- */
-
- if (rocker_port->stp_state == BR_STATE_LEARNING ||
- rocker_port->stp_state == BR_STATE_FORWARDING) {
- out_pport = rocker_port->pport;
- err = rocker_group_l2_interface(rocker_port, trans, flags,
- vlan_id, out_pport, pop_vlan);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 group for pport %d\n",
- err, out_pport);
- return err;
- }
- }
-
- /* An L2 interface group for this VLAN to CPU port.
- * Add when first port joins this VLAN and destroy when
- * last port leaves this VLAN.
- */
-
- for (i = 0; i < rocker->port_count; i++) {
- p = rocker->ports[i];
- if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
- ref++;
- }
-
- if ((!adding || ref != 1) && (adding || ref != 0))
- return 0;
-
- out_pport = 0;
- err = rocker_group_l2_interface(rocker_port, trans, flags,
- vlan_id, out_pport, pop_vlan);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 group for CPU port\n", err);
- return err;
- }
-
- return 0;
-}
-
-static struct rocker_ctrl {
- const u8 *eth_dst;
- const u8 *eth_dst_mask;
- __be16 eth_type;
- bool acl;
- bool bridge;
- bool term;
- bool copy_to_cpu;
-} rocker_ctrls[] = {
- [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
- /* pass link local multicast pkts up to CPU for filtering */
- .eth_dst = ll_mac,
- .eth_dst_mask = ll_mask,
- .acl = true,
- },
- [ROCKER_CTRL_LOCAL_ARP] = {
- /* pass local ARP pkts up to CPU */
- .eth_dst = zero_mac,
- .eth_dst_mask = zero_mac,
- .eth_type = htons(ETH_P_ARP),
- .acl = true,
- },
- [ROCKER_CTRL_IPV4_MCAST] = {
- /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
- .eth_dst = ipv4_mcast,
- .eth_dst_mask = ipv4_mask,
- .eth_type = htons(ETH_P_IP),
- .term = true,
- .copy_to_cpu = true,
- },
- [ROCKER_CTRL_IPV6_MCAST] = {
- /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
- .eth_dst = ipv6_mcast,
- .eth_dst_mask = ipv6_mask,
- .eth_type = htons(ETH_P_IPV6),
- .term = true,
- .copy_to_cpu = true,
- },
- [ROCKER_CTRL_DFLT_BRIDGING] = {
- /* flood any pkts on vlan */
- .bridge = true,
- .copy_to_cpu = true,
- },
- [ROCKER_CTRL_DFLT_OVS] = {
- /* pass all pkts up to CPU */
- .eth_dst = zero_mac,
- .eth_dst_mask = zero_mac,
- .acl = true,
- },
-};
-
-static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const struct rocker_ctrl *ctrl, __be16 vlan_id)
-{
- u32 in_pport = rocker_port->pport;
- u32 in_pport_mask = 0xffffffff;
- u32 out_pport = 0;
- const u8 *eth_src = NULL;
- const u8 *eth_src_mask = NULL;
- __be16 vlan_id_mask = htons(0xffff);
- u8 ip_proto = 0;
- u8 ip_proto_mask = 0;
- u8 ip_tos = 0;
- u8 ip_tos_mask = 0;
- u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
- int err;
-
- err = rocker_flow_tbl_acl(rocker_port, trans, flags,
- in_pport, in_pport_mask,
- eth_src, eth_src_mask,
- ctrl->eth_dst, ctrl->eth_dst_mask,
- ctrl->eth_type,
- vlan_id, vlan_id_mask,
- ip_proto, ip_proto_mask,
- ip_tos, ip_tos_mask,
- group_id);
-
- if (err)
- netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
-
- return err;
-}
-
-static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- int flags,
- const struct rocker_ctrl *ctrl,
- __be16 vlan_id)
-{
- enum rocker_of_dpa_table_id goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
- u32 tunnel_id = 0;
- int err;
-
- if (!rocker_port_is_bridged(rocker_port))
- return 0;
-
- err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
- ctrl->eth_dst, ctrl->eth_dst_mask,
- vlan_id, tunnel_id,
- goto_tbl, group_id, ctrl->copy_to_cpu);
-
- if (err)
- netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
-
- return err;
-}
-
-static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const struct rocker_ctrl *ctrl, __be16 vlan_id)
-{
- u32 in_pport_mask = 0xffffffff;
- __be16 vlan_id_mask = htons(0xffff);
- int err;
-
- if (ntohs(vlan_id) == 0)
- vlan_id = rocker_port->internal_vlan_id;
-
- err = rocker_flow_tbl_term_mac(rocker_port, trans,
- rocker_port->pport, in_pport_mask,
- ctrl->eth_type, ctrl->eth_dst,
- ctrl->eth_dst_mask, vlan_id,
- vlan_id_mask, ctrl->copy_to_cpu,
- flags);
-
- if (err)
- netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
-
- return err;
-}
-
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const struct rocker_ctrl *ctrl, __be16 vlan_id)
-{
- if (ctrl->acl)
- return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
- ctrl, vlan_id);
- if (ctrl->bridge)
- return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
- ctrl, vlan_id);
-
- if (ctrl->term)
- return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
- ctrl, vlan_id);
-
- return -EOPNOTSUPP;
-}
-
-static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < ROCKER_CTRL_MAX; i++) {
- if (rocker_port->ctrls[i]) {
- err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
- &rocker_ctrls[i], vlan_id);
- if (err)
- return err;
- }
- }
-
- return err;
-}
-
-static int rocker_port_ctrl(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const struct rocker_ctrl *ctrl)
-{
- u16 vid;
- int err = 0;
-
- for (vid = 1; vid < VLAN_N_VID; vid++) {
- if (!test_bit(vid, rocker_port->vlan_bitmap))
- continue;
- err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
- ctrl, htons(vid));
- if (err)
- break;
- }
-
- return err;
-}
-
-static int rocker_port_vlan(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags, u16 vid)
-{
- enum rocker_of_dpa_table_id goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
- u32 in_pport = rocker_port->pport;
- __be16 vlan_id = htons(vid);
- __be16 vlan_id_mask = htons(0xffff);
- __be16 internal_vlan_id;
- bool untagged;
- bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
- int err;
-
- internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
-
- if (adding && test_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
- return 0; /* already added */
- else if (!adding && !test_bit(ntohs(internal_vlan_id),
- rocker_port->vlan_bitmap))
- return 0; /* already removed */
-
- change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
-
- if (adding) {
- err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
- internal_vlan_id);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port ctrl vlan add\n", err);
- goto err_out;
- }
- }
-
- err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
- internal_vlan_id, untagged);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 groups\n", err);
- goto err_out;
- }
-
- err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
- internal_vlan_id);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 flood group\n", err);
- goto err_out;
- }
-
- err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
- in_pport, vlan_id, vlan_id_mask,
- goto_tbl, untagged, internal_vlan_id);
- if (err)
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN table\n", err);
-
-err_out:
- if (switchdev_trans_ph_prepare(trans))
- change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
-
- return err;
-}
-
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags)
-{
- enum rocker_of_dpa_table_id goto_tbl;
- u32 in_pport;
- u32 in_pport_mask;
- int err;
-
- /* Normal Ethernet Frames. Matches pkts from any local physical
- * ports. Goto VLAN tbl.
- */
-
- in_pport = 0;
- in_pport_mask = 0xffff0000;
- goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
-
- err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
- in_pport, in_pport_mask,
- goto_tbl);
- if (err)
- netdev_err(rocker_port->dev,
- "Error (%d) ingress port table entry\n", err);
-
- return err;
-}
-
-struct rocker_fdb_learn_work {
- struct work_struct work;
- struct rocker_port *rocker_port;
- struct switchdev_trans *trans;
- int flags;
- u8 addr[ETH_ALEN];
- u16 vid;
-};
-
-static void rocker_port_fdb_learn_work(struct work_struct *work)
-{
- const struct rocker_fdb_learn_work *lw =
- container_of(work, struct rocker_fdb_learn_work, work);
- bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
- bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
- struct switchdev_notifier_fdb_info info;
-
- info.addr = lw->addr;
- info.vid = lw->vid;
-
- rtnl_lock();
- if (learned && removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
- lw->rocker_port->dev, &info.info);
- else if (learned && !removing)
- call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
- lw->rocker_port->dev, &info.info);
- rtnl_unlock();
-
- rocker_port_kfree(lw->trans, work);
-}
-
-static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- const u8 *addr, __be16 vlan_id)
-{
- struct rocker_fdb_learn_work *lw;
- enum rocker_of_dpa_table_id goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- u32 out_pport = rocker_port->pport;
- u32 tunnel_id = 0;
- u32 group_id = ROCKER_GROUP_NONE;
- bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
- bool copy_to_cpu = false;
- int err;
-
- if (rocker_port_is_bridged(rocker_port))
- group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
-
- if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
- err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
- NULL, vlan_id, tunnel_id, goto_tbl,
- group_id, copy_to_cpu);
- if (err)
- return err;
- }
-
- if (!syncing)
- return 0;
-
- if (!rocker_port_is_bridged(rocker_port))
- return 0;
-
- lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
- if (!lw)
- return -ENOMEM;
-
- INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
-
- lw->rocker_port = rocker_port;
- lw->trans = trans;
- lw->flags = flags;
- ether_addr_copy(lw->addr, addr);
- lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
-
- if (switchdev_trans_ph_prepare(trans))
- rocker_port_kfree(trans, lw);
- else
- schedule_work(&lw->work);
-
- return 0;
-}
-
-static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(const struct rocker *rocker,
- const struct rocker_fdb_tbl_entry *match)
-{
- struct rocker_fdb_tbl_entry *found;
-
- hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
- if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
- return found;
-
- return NULL;
-}
-
-static int rocker_port_fdb(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- const unsigned char *addr,
- __be16 vlan_id, int flags)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *fdb;
- struct rocker_fdb_tbl_entry *found;
- bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
- unsigned long lock_flags;
-
- fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
- if (!fdb)
- return -ENOMEM;
-
- fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
- fdb->touched = jiffies;
- fdb->key.rocker_port = rocker_port;
- ether_addr_copy(fdb->key.addr, addr);
- fdb->key.vlan_id = vlan_id;
- fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
-
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
-
- found = rocker_fdb_tbl_find(rocker, fdb);
-
- if (found) {
- found->touched = jiffies;
- if (removing) {
- rocker_port_kfree(trans, fdb);
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- }
- } else if (!removing) {
- if (!switchdev_trans_ph_prepare(trans))
- hash_add(rocker->fdb_tbl, &fdb->entry,
- fdb->key_crc32);
- }
-
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-
- /* Check if adding and already exists, or removing and can't find */
- if (!found != !removing) {
- rocker_port_kfree(trans, fdb);
- if (!found && removing)
- return 0;
- /* Refreshing existing to update aging timers */
- flags |= ROCKER_OP_FLAG_REFRESH;
- }
-
- return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
-}
-
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *found;
- unsigned long lock_flags;
- struct hlist_node *tmp;
- int bkt;
- int err = 0;
-
- if (rocker_port->stp_state == BR_STATE_LEARNING ||
- rocker_port->stp_state == BR_STATE_FORWARDING)
- return 0;
-
- flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
-
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
-
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.rocker_port != rocker_port)
- continue;
- if (!found->learned)
- continue;
- err = rocker_port_fdb_learn(rocker_port, trans, flags,
- found->key.addr,
- found->key.vlan_id);
- if (err)
- goto err_out;
- if (!switchdev_trans_ph_prepare(trans))
- hash_del(&found->entry);
- }
-
-err_out:
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-
- return err;
-}
-
-static void rocker_fdb_cleanup(unsigned long data)
-{
- struct rocker *rocker = (struct rocker *)data;
- struct rocker_port *rocker_port;
- struct rocker_fdb_tbl_entry *entry;
- struct hlist_node *tmp;
- unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME;
- unsigned long expires;
- unsigned long lock_flags;
- int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
- ROCKER_OP_FLAG_LEARNED;
- int bkt;
-
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
-
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
- if (!entry->learned)
- continue;
- rocker_port = entry->key.rocker_port;
- expires = entry->touched + rocker_port->ageing_time;
- if (time_before_eq(expires, jiffies)) {
- rocker_port_fdb_learn(rocker_port, NULL,
- flags, entry->key.addr,
- entry->key.vlan_id);
- hash_del(&entry->entry);
- } else if (time_before(expires, next_timer)) {
- next_timer = expires;
- }
- }
-
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-
- mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
-}
-
-static int rocker_port_router_mac(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- __be16 vlan_id)
-{
- u32 in_pport_mask = 0xffffffff;
- __be16 eth_type;
- const u8 *dst_mac_mask = ff_mac;
- __be16 vlan_id_mask = htons(0xffff);
- bool copy_to_cpu = false;
- int err;
-
- if (ntohs(vlan_id) == 0)
- vlan_id = rocker_port->internal_vlan_id;
-
- eth_type = htons(ETH_P_IP);
- err = rocker_flow_tbl_term_mac(rocker_port, trans,
- rocker_port->pport, in_pport_mask,
- eth_type, rocker_port->dev->dev_addr,
- dst_mac_mask, vlan_id, vlan_id_mask,
- copy_to_cpu, flags);
- if (err)
- return err;
-
- eth_type = htons(ETH_P_IPV6);
- err = rocker_flow_tbl_term_mac(rocker_port, trans,
- rocker_port->pport, in_pport_mask,
- eth_type, rocker_port->dev->dev_addr,
- dst_mac_mask, vlan_id, vlan_id_mask,
- copy_to_cpu, flags);
-
- return err;
-}
-
-static int rocker_port_fwding(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags)
-{
- bool pop_vlan;
- u32 out_pport;
- __be16 vlan_id;
- u16 vid;
- int err;
-
- /* Port will be forwarding-enabled if its STP state is LEARNING
- * or FORWARDING. Traffic from CPU can still egress, regardless of
- * port STP state. Use L2 interface group on port VLANs as a way
- * to toggle port forwarding: if forwarding is disabled, L2
- * interface group will not exist.
- */
-
- if (rocker_port->stp_state != BR_STATE_LEARNING &&
- rocker_port->stp_state != BR_STATE_FORWARDING)
- flags |= ROCKER_OP_FLAG_REMOVE;
-
- out_pport = rocker_port->pport;
- for (vid = 1; vid < VLAN_N_VID; vid++) {
- if (!test_bit(vid, rocker_port->vlan_bitmap))
- continue;
- vlan_id = htons(vid);
- pop_vlan = rocker_vlan_id_is_internal(vlan_id);
- err = rocker_group_l2_interface(rocker_port, trans, flags,
- vlan_id, out_pport, pop_vlan);
- if (err) {
- netdev_err(rocker_port->dev,
- "Error (%d) port VLAN l2 group for pport %d\n",
- err, out_pport);
- return err;
- }
- }
-
- return 0;
-}
-
-static int rocker_port_stp_update(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags,
- u8 state)
-{
- bool want[ROCKER_CTRL_MAX] = { 0, };
- bool prev_ctrls[ROCKER_CTRL_MAX];
- u8 uninitialized_var(prev_state);
- int err;
- int i;
-
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
- prev_state = rocker_port->stp_state;
- }
-
- if (rocker_port->stp_state == state)
- return 0;
-
- rocker_port->stp_state = state;
-
- switch (state) {
- case BR_STATE_DISABLED:
- /* port is completely disabled */
- break;
- case BR_STATE_LISTENING:
- case BR_STATE_BLOCKING:
- want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
- break;
- case BR_STATE_LEARNING:
- case BR_STATE_FORWARDING:
- if (!rocker_port_is_ovsed(rocker_port))
- want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
- want[ROCKER_CTRL_IPV4_MCAST] = true;
- want[ROCKER_CTRL_IPV6_MCAST] = true;
- if (rocker_port_is_bridged(rocker_port))
- want[ROCKER_CTRL_DFLT_BRIDGING] = true;
- else if (rocker_port_is_ovsed(rocker_port))
- want[ROCKER_CTRL_DFLT_OVS] = true;
- else
- want[ROCKER_CTRL_LOCAL_ARP] = true;
- break;
- }
-
- for (i = 0; i < ROCKER_CTRL_MAX; i++) {
- if (want[i] != rocker_port->ctrls[i]) {
- int ctrl_flags = flags |
- (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
- err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
- &rocker_ctrls[i]);
- if (err)
- goto err_out;
- rocker_port->ctrls[i] = want[i];
- }
- }
-
- err = rocker_port_fdb_flush(rocker_port, trans, flags);
- if (err)
- goto err_out;
-
- err = rocker_port_fwding(rocker_port, trans, flags);
-
-err_out:
- if (switchdev_trans_ph_prepare(trans)) {
- memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
- rocker_port->stp_state = prev_state;
- }
-
- return err;
-}
-
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags)
-{
- if (rocker_port_is_bridged(rocker_port))
- /* bridge STP will enable port */
- return 0;
-
- /* port is not bridged, so simulate going to FORWARDING state */
- return rocker_port_stp_update(rocker_port, trans, flags,
- BR_STATE_FORWARDING);
-}
-
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, int flags)
-{
- if (rocker_port_is_bridged(rocker_port))
- /* bridge STP will disable port */
- return 0;
-
- /* port is not bridged, so simulate going to DISABLED state */
- return rocker_port_stp_update(rocker_port, trans, flags,
- BR_STATE_DISABLED);
-}
-
-static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
-{
- struct rocker_internal_vlan_tbl_entry *found;
-
- hash_for_each_possible(rocker->internal_vlan_tbl, found,
- entry, ifindex) {
- if (found->ifindex == ifindex)
- return found;
- }
-
- return NULL;
-}
-
-static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
- int ifindex)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_internal_vlan_tbl_entry *entry;
- struct rocker_internal_vlan_tbl_entry *found;
- unsigned long lock_flags;
- int i;
-
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return 0;
-
- entry->ifindex = ifindex;
-
- spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
-
- found = rocker_internal_vlan_tbl_find(rocker, ifindex);
- if (found) {
- kfree(entry);
- goto found;
- }
-
- found = entry;
- hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
-
- for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
- if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
- continue;
- found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
- goto found;
- }
-
- netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
-
-found:
- found->ref_count++;
- spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
-
- return found->vlan_id;
-}
-
-static void
-rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
- int ifindex)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_internal_vlan_tbl_entry *found;
- unsigned long lock_flags;
- unsigned long bit;
-
- spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
-
- found = rocker_internal_vlan_tbl_find(rocker, ifindex);
- if (!found) {
- netdev_err(rocker_port->dev,
- "ifindex (%d) not found in internal VLAN tbl\n",
- ifindex);
- goto not_found;
- }
-
- if (--found->ref_count <= 0) {
- bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
- clear_bit(bit, rocker->internal_vlan_bitmap);
- hash_del(&found->entry);
- kfree(found);
- }
-
-not_found:
- spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
-}
-
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
- struct switchdev_trans *trans, __be32 dst,
- int dst_len, const struct fib_info *fi,
- u32 tb_id, int flags)
-{
- const struct fib_nh *nh;
- __be16 eth_type = htons(ETH_P_IP);
- __be32 dst_mask = inet_make_mask(dst_len);
- __be16 internal_vlan_id = rocker_port->internal_vlan_id;
- u32 priority = fi->fib_priority;
- enum rocker_of_dpa_table_id goto_tbl =
- ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
- u32 group_id;
- bool nh_on_port;
- bool has_gw;
- u32 index;
- int err;
-
- /* XXX support ECMP */
-
- nh = fi->fib_nh;
- nh_on_port = (fi->fib_dev == rocker_port->dev);
- has_gw = !!nh->nh_gw;
-
- if (has_gw && nh_on_port) {
- err = rocker_port_ipv4_nh(rocker_port, trans, flags,
- nh->nh_gw, &index);
- if (err)
- return err;
-
- group_id = ROCKER_GROUP_L3_UNICAST(index);
- } else {
- /* Send to CPU for processing */
- group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
- }
-
- err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
- dst_mask, priority, goto_tbl,
- group_id, flags);
- if (err)
- netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
- err, &dst);
-
- return err;
-}
-
-/*****************
- * Net device ops
- *****************/
-
-static int rocker_port_open(struct net_device *dev)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
-
- err = rocker_port_dma_rings_init(rocker_port);
- if (err)
- return err;
-
- err = request_irq(rocker_msix_tx_vector(rocker_port),
- rocker_tx_irq_handler, 0,
- rocker_driver_name, rocker_port);
- if (err) {
- netdev_err(rocker_port->dev, "cannot assign tx irq\n");
- goto err_request_tx_irq;
- }
-
- err = request_irq(rocker_msix_rx_vector(rocker_port),
- rocker_rx_irq_handler, 0,
- rocker_driver_name, rocker_port);
- if (err) {
- netdev_err(rocker_port->dev, "cannot assign rx irq\n");
- goto err_request_rx_irq;
- }
-
- err = rocker_port_fwd_enable(rocker_port, NULL, 0);
- if (err)
- goto err_fwd_enable;
-
- napi_enable(&rocker_port->napi_tx);
- napi_enable(&rocker_port->napi_rx);
- if (!dev->proto_down)
- rocker_port_set_enable(rocker_port, true);
- netif_start_queue(dev);
- return 0;
-
-err_fwd_enable:
- free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
-err_request_rx_irq:
- free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
-err_request_tx_irq:
- rocker_port_dma_rings_fini(rocker_port);
- return err;
-}
-
-static int rocker_port_stop(struct net_device *dev)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- netif_stop_queue(dev);
- rocker_port_set_enable(rocker_port, false);
- napi_disable(&rocker_port->napi_rx);
- napi_disable(&rocker_port->napi_tx);
- rocker_port_fwd_disable(rocker_port, NULL,
- ROCKER_OP_FLAG_NOWAIT);
- free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
- free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
- rocker_port_dma_rings_fini(rocker_port);
-
- return 0;
-}
-
-static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info)
-{
- const struct rocker *rocker = rocker_port->rocker;
- struct pci_dev *pdev = rocker->pdev;
- const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
- struct rocker_tlv *attr;
- int rem;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
- if (!attrs[ROCKER_TLV_TX_FRAGS])
- return;
- rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
- const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
- dma_addr_t dma_handle;
- size_t len;
-
- if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
- continue;
- rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
- attr);
- if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
- !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
- continue;
- dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
- len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
- pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
- }
-}
-
-static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- char *buf, size_t buf_len)
-{
- const struct rocker *rocker = rocker_port->rocker;
- struct pci_dev *pdev = rocker->pdev;
- dma_addr_t dma_handle;
- struct rocker_tlv *frag;
-
- dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
- if (net_ratelimit())
- netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
- return -EIO;
- }
- frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
- if (!frag)
- goto unmap_frag;
- if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
- dma_handle))
- goto nest_cancel;
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
- buf_len))
- goto nest_cancel;
- rocker_tlv_nest_end(desc_info, frag);
- return 0;
-
-nest_cancel:
- rocker_tlv_nest_cancel(desc_info, frag);
-unmap_frag:
- pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
- return -EMSGSIZE;
-}
-
-static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
- struct rocker_tlv *frags;
- int i;
- int err;
-
- desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
- if (unlikely(!desc_info)) {
- if (net_ratelimit())
- netdev_err(dev, "tx ring full when queue awake\n");
- return NETDEV_TX_BUSY;
- }
-
- rocker_desc_cookie_ptr_set(desc_info, skb);
-
- frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
- if (!frags)
- goto out;
- err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
- skb->data, skb_headlen(skb));
- if (err)
- goto nest_cancel;
- if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
- err = skb_linearize(skb);
- if (err)
- goto unmap_frags;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
- skb_frag_address(frag),
- skb_frag_size(frag));
- if (err)
- goto unmap_frags;
- }
- rocker_tlv_nest_end(desc_info, frags);
-
- rocker_desc_gen_clear(desc_info);
- rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
-
- desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
- if (!desc_info)
- netif_stop_queue(dev);
-
- return NETDEV_TX_OK;
-
-unmap_frags:
- rocker_tx_desc_frags_unmap(rocker_port, desc_info);
-nest_cancel:
- rocker_tlv_nest_cancel(desc_info, frags);
-out:
- dev_kfree_skb(skb);
- dev->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-
-static int rocker_port_set_mac_address(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
- if (err)
- return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- return 0;
-}
-
-static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int running = netif_running(dev);
- int err;
-
-#define ROCKER_PORT_MIN_MTU 68
-#define ROCKER_PORT_MAX_MTU 9000
-
- if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
- return -EINVAL;
-
- if (running)
- rocker_port_stop(dev);
-
- netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
- dev->mtu = new_mtu;
-
- err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
- if (err)
- return err;
-
- if (running)
- err = rocker_port_open(dev);
-
- return err;
-}
-
-static int rocker_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct port_name name = { .buf = buf, .len = len };
- int err;
-
- err = rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_get_port_settings_prep, NULL,
- rocker_cmd_get_port_settings_phys_name_proc,
- &name);
-
- return err ? -EOPNOTSUPP : 0;
-}
-
-static int rocker_port_change_proto_down(struct net_device *dev,
- bool proto_down)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- if (rocker_port->dev->flags & IFF_UP)
- rocker_port_set_enable(rocker_port, !proto_down);
- rocker_port->dev->proto_down = proto_down;
- return 0;
-}
-
-static void rocker_port_neigh_destroy(struct neighbour *n)
-{
- struct rocker_port *rocker_port = netdev_priv(n->dev);
- int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
- __be32 ip_addr = *(__be32 *)n->primary_key;
-
- rocker_port_ipv4_neigh(rocker_port, NULL,
- flags, ip_addr, n->ha);
-}
-
-static const struct net_device_ops rocker_port_netdev_ops = {
- .ndo_open = rocker_port_open,
- .ndo_stop = rocker_port_stop,
- .ndo_start_xmit = rocker_port_xmit,
- .ndo_set_mac_address = rocker_port_set_mac_address,
- .ndo_change_mtu = rocker_port_change_mtu,
- .ndo_bridge_getlink = switchdev_port_bridge_getlink,
- .ndo_bridge_setlink = switchdev_port_bridge_setlink,
- .ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_fdb_add = switchdev_port_fdb_add,
- .ndo_fdb_del = switchdev_port_fdb_del,
- .ndo_fdb_dump = switchdev_port_fdb_dump,
- .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
- .ndo_change_proto_down = rocker_port_change_proto_down,
- .ndo_neigh_destroy = rocker_port_neigh_destroy,
-};
-
-/********************
- * swdev interface
- ********************/
-
-static int rocker_port_attr_get(struct net_device *dev,
- struct switchdev_attr *attr)
-{
- const struct rocker_port *rocker_port = netdev_priv(dev);
- const struct rocker *rocker = rocker_port->rocker;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = sizeof(rocker->hw.id);
- memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags = rocker_port->brport_flags;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- unsigned long brport_flags)
-{
- unsigned long orig_flags;
- int err = 0;
-
- orig_flags = rocker_port->brport_flags;
- rocker_port->brport_flags = brport_flags;
- if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
- err = rocker_port_set_learning(rocker_port, trans);
-
- if (switchdev_trans_ph_prepare(trans))
- rocker_port->brport_flags = orig_flags;
-
- return err;
-}
-
-static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- u32 ageing_time)
-{
- if (!switchdev_trans_ph_prepare(trans)) {
- rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
- mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
- }
-
- return 0;
-}
-
-static int rocker_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err = 0;
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = rocker_port_stp_update(rocker_port, trans, 0,
- attr->u.stp_state);
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = rocker_port_brport_flags_set(rocker_port, trans,
- attr->u.brport_flags);
- break;
- case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = rocker_port_bridge_ageing_time(rocker_port, trans,
- attr->u.ageing_time);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int rocker_port_vlan_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- u16 vid, u16 flags)
-{
- int err;
-
- /* XXX deal with flags for PVID and untagged */
-
- err = rocker_port_vlan(rocker_port, trans, 0, vid);
- if (err)
- return err;
-
- err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
- if (err)
- rocker_port_vlan(rocker_port, trans,
- ROCKER_OP_FLAG_REMOVE, vid);
-
- return err;
-}
-
-static int rocker_port_vlans_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- const struct switchdev_obj_port_vlan *vlan)
-{
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = rocker_port_vlan_add(rocker_port, trans,
- vid, vlan->flags);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int rocker_port_fdb_add(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- const struct switchdev_obj_port_fdb *fdb)
-{
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
- int flags = 0;
-
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
-
- return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
-}
-
-static int rocker_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- const struct switchdev_obj_ipv4_fib *fib4;
- int err = 0;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = rocker_port_vlans_add(rocker_port, trans,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
- break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
- err = rocker_port_fib_ipv4(rocker_port, trans,
- htonl(fib4->dst), fib4->dst_len,
- &fib4->fi, fib4->tb_id, 0);
- break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_port_fdb_add(rocker_port, trans,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int rocker_port_vlan_del(struct rocker_port *rocker_port,
- u16 vid, u16 flags)
-{
- int err;
-
- err = rocker_port_router_mac(rocker_port, NULL,
- ROCKER_OP_FLAG_REMOVE, htons(vid));
- if (err)
- return err;
-
- return rocker_port_vlan(rocker_port, NULL,
- ROCKER_OP_FLAG_REMOVE, vid);
-}
-
-static int rocker_port_vlans_del(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int rocker_port_fdb_del(struct rocker_port *rocker_port,
- struct switchdev_trans *trans,
- const struct switchdev_obj_port_fdb *fdb)
-{
- __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
- int flags = ROCKER_OP_FLAG_REMOVE;
-
- if (!rocker_port_is_bridged(rocker_port))
- return -EINVAL;
-
- return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
-}
-
-static int rocker_port_obj_del(struct net_device *dev,
- const struct switchdev_obj *obj)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- const struct switchdev_obj_ipv4_fib *fib4;
- int err = 0;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = rocker_port_vlans_del(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
- break;
- case SWITCHDEV_OBJ_ID_IPV4_FIB:
- fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
- err = rocker_port_fib_ipv4(rocker_port, NULL,
- htonl(fib4->dst), fib4->dst_len,
- &fib4->fi, fib4->tb_id,
- ROCKER_OP_FLAG_REMOVE);
- break;
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_port_fdb_del(rocker_port, NULL,
- SWITCHDEV_OBJ_PORT_FDB(obj));
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_fdb *fdb,
- switchdev_obj_dump_cb_t *cb)
-{
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_fdb_tbl_entry *found;
- struct hlist_node *tmp;
- unsigned long lock_flags;
- int bkt;
- int err = 0;
-
- spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
- hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
- if (found->key.rocker_port != rocker_port)
- continue;
- ether_addr_copy(fdb->addr, found->key.addr);
- fdb->ndm_state = NUD_REACHABLE;
- fdb->vid = rocker_port_vlan_to_vid(rocker_port,
- found->key.vlan_id);
- err = cb(&fdb->obj);
- if (err)
- break;
- }
- spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-
- return err;
-}
-
-static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
- struct switchdev_obj_port_vlan *vlan,
- switchdev_obj_dump_cb_t *cb)
-{
- u16 vid;
- int err = 0;
-
- for (vid = 1; vid < VLAN_N_VID; vid++) {
- if (!test_bit(vid, rocker_port->vlan_bitmap))
- continue;
- vlan->flags = 0;
- if (rocker_vlan_id_is_internal(htons(vid)))
- vlan->flags |= BRIDGE_VLAN_INFO_PVID;
- vlan->vid_begin = vlan->vid_end = vid;
- err = cb(&vlan->obj);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int rocker_port_obj_dump(struct net_device *dev,
- struct switchdev_obj *obj,
- switchdev_obj_dump_cb_t *cb)
-{
- const struct rocker_port *rocker_port = netdev_priv(dev);
- int err = 0;
-
- switch (obj->id) {
- case SWITCHDEV_OBJ_ID_PORT_FDB:
- err = rocker_port_fdb_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_FDB(obj), cb);
- break;
- case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = rocker_port_vlan_dump(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- return err;
-}
-
-static const struct switchdev_ops rocker_port_switchdev_ops = {
- .switchdev_port_attr_get = rocker_port_attr_get,
- .switchdev_port_attr_set = rocker_port_attr_set,
- .switchdev_port_obj_add = rocker_port_obj_add,
- .switchdev_port_obj_del = rocker_port_obj_del,
- .switchdev_port_obj_dump = rocker_port_obj_dump,
-};
-
-/********************
- * ethtool interface
- ********************/
-
-static int rocker_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
-}
-
-static int rocker_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
-}
-
-static void rocker_port_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
-}
-
-static struct rocker_port_stats {
- char str[ETH_GSTRING_LEN];
- int type;
-} rocker_port_stats[] = {
- { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
- { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
- { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
- { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
-
- { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
- { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
- { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
- { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
-};
-
-#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
-
-static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
-{
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
- memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int
-rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv)
-{
- struct rocker_tlv *cmd_stats;
-
- if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
- ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
- return -EMSGSIZE;
-
- cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
- if (!cmd_stats)
- return -EMSGSIZE;
-
- if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
- rocker_port->pport))
- return -EMSGSIZE;
-
- rocker_tlv_nest_end(desc_info, cmd_stats);
-
- return 0;
-}
-
-static int
-rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
- const struct rocker_desc_info *desc_info,
- void *priv)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
- const struct rocker_tlv *pattr;
- u32 pport;
- u64 *data = priv;
- int i;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
-
- if (!attrs[ROCKER_TLV_CMD_INFO])
- return -EIO;
-
- rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
- attrs[ROCKER_TLV_CMD_INFO]);
-
- if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
- return -EIO;
-
- pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
- if (pport != rocker_port->pport)
- return -EIO;
-
- for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
- pattr = stats_attrs[rocker_port_stats[i].type];
- if (!pattr)
- continue;
-
- data[i] = rocker_tlv_get_u64(pattr);
- }
-
- return 0;
-}
-
-static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
- void *priv)
-{
- return rocker_cmd_exec(rocker_port, NULL, 0,
- rocker_cmd_get_port_stats_prep, NULL,
- rocker_cmd_get_port_stats_ethtool_proc,
- priv);
-}
-
-static void rocker_port_get_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
-
- if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
- data[i] = 0;
- }
-}
-
-static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return ROCKER_PORT_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static const struct ethtool_ops rocker_port_ethtool_ops = {
- .get_settings = rocker_port_get_settings,
- .set_settings = rocker_port_set_settings,
- .get_drvinfo = rocker_port_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_strings = rocker_port_get_strings,
- .get_ethtool_stats = rocker_port_get_stats,
- .get_sset_count = rocker_port_get_sset_count,
-};
-
-/*****************
- * NAPI interface
- *****************/
-
-static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
-{
- return container_of(napi, struct rocker_port, napi_tx);
-}
-
-static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
-{
- struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
- const struct rocker *rocker = rocker_port->rocker;
- const struct rocker_desc_info *desc_info;
- u32 credits = 0;
- int err;
-
- /* Cleanup tx descriptors */
- while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
- struct sk_buff *skb;
-
- err = rocker_desc_err(desc_info);
- if (err && net_ratelimit())
- netdev_err(rocker_port->dev, "tx desc received with err %d\n",
- err);
- rocker_tx_desc_frags_unmap(rocker_port, desc_info);
-
- skb = rocker_desc_cookie_ptr_get(desc_info);
- if (err == 0) {
- rocker_port->dev->stats.tx_packets++;
- rocker_port->dev->stats.tx_bytes += skb->len;
- } else {
- rocker_port->dev->stats.tx_errors++;
- }
-
- dev_kfree_skb_any(skb);
- credits++;
- }
-
- if (credits && netif_queue_stopped(rocker_port->dev))
- netif_wake_queue(rocker_port->dev);
-
- napi_complete(napi);
- rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
-
- return 0;
-}
-
-static int rocker_port_rx_proc(const struct rocker *rocker,
- const struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
-{
- const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
- struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
- size_t rx_len;
- u16 rx_flags = 0;
-
- if (!skb)
- return -ENOENT;
-
- rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
- if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
- return -EINVAL;
- if (attrs[ROCKER_TLV_RX_FLAGS])
- rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
-
- rocker_dma_rx_ring_skb_unmap(rocker, attrs);
-
- rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
- skb_put(skb, rx_len);
- skb->protocol = eth_type_trans(skb, rocker_port->dev);
-
- if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
- skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
-
- rocker_port->dev->stats.rx_packets++;
- rocker_port->dev->stats.rx_bytes += skb->len;
-
- netif_receive_skb(skb);
-
- return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
-}
-
-static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
-{
- return container_of(napi, struct rocker_port, napi_rx);
-}
-
-static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
-{
- struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
- const struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
- u32 credits = 0;
- int err;
-
- /* Process rx descriptors */
- while (credits < budget &&
- (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
- err = rocker_desc_err(desc_info);
- if (err) {
- if (net_ratelimit())
- netdev_err(rocker_port->dev, "rx desc received with err %d\n",
- err);
- } else {
- err = rocker_port_rx_proc(rocker, rocker_port,
- desc_info);
- if (err && net_ratelimit())
- netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
- err);
- }
- if (err)
- rocker_port->dev->stats.rx_errors++;
-
- rocker_desc_gen_clear(desc_info);
- rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
- credits++;
- }
-
- if (credits < budget)
- napi_complete(napi);
-
- rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
-
- return credits;
-}
-
-/*****************
- * PCI driver ops
- *****************/
-
-static void rocker_carrier_init(const struct rocker_port *rocker_port)
-{
- const struct rocker *rocker = rocker_port->rocker;
- u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
- bool link_up;
-
- link_up = link_status & (1 << rocker_port->pport);
- if (link_up)
- netif_carrier_on(rocker_port->dev);
- else
- netif_carrier_off(rocker_port->dev);
-}
-
-static void rocker_remove_ports(const struct rocker *rocker)
-{
- struct rocker_port *rocker_port;
- int i;
-
- for (i = 0; i < rocker->port_count; i++) {
- rocker_port = rocker->ports[i];
- if (!rocker_port)
- continue;
- rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
- unregister_netdev(rocker_port->dev);
- free_netdev(rocker_port->dev);
- }
- kfree(rocker->ports);
-}
-
-static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
-{
- const struct rocker *rocker = rocker_port->rocker;
- const struct pci_dev *pdev = rocker->pdev;
- int err;
-
- err = rocker_cmd_get_port_settings_macaddr(rocker_port,
- rocker_port->dev->dev_addr);
- if (err) {
- dev_warn(&pdev->dev, "failed to get mac address, using random\n");
- eth_hw_addr_random(rocker_port->dev);
- }
-}
-
-static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
-{
- const struct pci_dev *pdev = rocker->pdev;
- struct rocker_port *rocker_port;
- struct net_device *dev;
- u16 untagged_vid = 0;
- int err;
-
- dev = alloc_etherdev(sizeof(struct rocker_port));
- if (!dev)
- return -ENOMEM;
- rocker_port = netdev_priv(dev);
- rocker_port->dev = dev;
- rocker_port->rocker = rocker;
- rocker_port->port_number = port_number;
- rocker_port->pport = port_number + 1;
- rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
- rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
-
- rocker_port_dev_addr_init(rocker_port);
- dev->netdev_ops = &rocker_port_netdev_ops;
- dev->ethtool_ops = &rocker_port_ethtool_ops;
- dev->switchdev_ops = &rocker_port_switchdev_ops;
- netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
- NAPI_POLL_WEIGHT);
- netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
- NAPI_POLL_WEIGHT);
- rocker_carrier_init(rocker_port);
-
- dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
-
- err = register_netdev(dev);
- if (err) {
- dev_err(&pdev->dev, "register_netdev failed\n");
- goto err_register_netdev;
- }
- rocker->ports[port_number] = rocker_port;
-
- switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
-
- rocker_port_set_learning(rocker_port, NULL);
-
- err = rocker_port_ig_tbl(rocker_port, NULL, 0);
- if (err) {
- netdev_err(rocker_port->dev, "install ig port table failed\n");
- goto err_port_ig_tbl;
- }
-
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
-
- err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
- if (err) {
- netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
- goto err_untagged_vlan;
- }
-
- return 0;
-
-err_untagged_vlan:
- rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
-err_port_ig_tbl:
- rocker->ports[port_number] = NULL;
- unregister_netdev(dev);
-err_register_netdev:
- free_netdev(dev);
- return err;
-}
-
-static int rocker_probe_ports(struct rocker *rocker)
-{
- int i;
- size_t alloc_size;
- int err;
-
- alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
- rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
- if (!rocker->ports)
- return -ENOMEM;
- for (i = 0; i < rocker->port_count; i++) {
- err = rocker_probe_port(rocker, i);
- if (err)
- goto remove_ports;
- }
- return 0;
-
-remove_ports:
- rocker_remove_ports(rocker);
- return err;
-}
-
-static int rocker_msix_init(struct rocker *rocker)
-{
- struct pci_dev *pdev = rocker->pdev;
- int msix_entries;
- int i;
- int err;
-
- msix_entries = pci_msix_vec_count(pdev);
- if (msix_entries < 0)
- return msix_entries;
-
- if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
- return -EINVAL;
-
- rocker->msix_entries = kmalloc_array(msix_entries,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!rocker->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < msix_entries; i++)
- rocker->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
- if (err < 0)
- goto err_enable_msix;
-
- return 0;
-
-err_enable_msix:
- kfree(rocker->msix_entries);
- return err;
-}
-
-static void rocker_msix_fini(const struct rocker *rocker)
-{
- pci_disable_msix(rocker->pdev);
- kfree(rocker->msix_entries);
-}
-
-static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct rocker *rocker;
- int err;
-
- rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
- if (!rocker)
- return -ENOMEM;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
- goto err_pci_enable_device;
- }
-
- err = pci_request_regions(pdev, rocker_driver_name);
- if (err) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- goto err_pci_request_regions;
- }
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- }
-
- if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
- dev_err(&pdev->dev, "invalid PCI region size\n");
- err = -EINVAL;
- goto err_pci_resource_len_check;
- }
-
- rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!rocker->hw_addr) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto err_ioremap;
- }
- pci_set_master(pdev);
-
- rocker->pdev = pdev;
- pci_set_drvdata(pdev, rocker);
-
- rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
-
- err = rocker_msix_init(rocker);
- if (err) {
- dev_err(&pdev->dev, "MSI-X init failed\n");
- goto err_msix_init;
- }
-
- err = rocker_basic_hw_test(rocker);
- if (err) {
- dev_err(&pdev->dev, "basic hw test failed\n");
- goto err_basic_hw_test;
- }
-
- rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
-
- err = rocker_dma_rings_init(rocker);
- if (err)
- goto err_dma_rings_init;
-
- err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
- rocker_cmd_irq_handler, 0,
- rocker_driver_name, rocker);
- if (err) {
- dev_err(&pdev->dev, "cannot assign cmd irq\n");
- goto err_request_cmd_irq;
- }
-
- err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
- rocker_event_irq_handler, 0,
- rocker_driver_name, rocker);
- if (err) {
- dev_err(&pdev->dev, "cannot assign event irq\n");
- goto err_request_event_irq;
- }
-
- rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
-
- err = rocker_init_tbls(rocker);
- if (err) {
- dev_err(&pdev->dev, "cannot init rocker tables\n");
- goto err_init_tbls;
- }
-
- setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
- (unsigned long) rocker);
- mod_timer(&rocker->fdb_cleanup_timer, jiffies);
-
- err = rocker_probe_ports(rocker);
- if (err) {
- dev_err(&pdev->dev, "failed to probe ports\n");
- goto err_probe_ports;
- }
-
- dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
- (int)sizeof(rocker->hw.id), &rocker->hw.id);
-
- return 0;
-
-err_probe_ports:
- del_timer_sync(&rocker->fdb_cleanup_timer);
- rocker_free_tbls(rocker);
-err_init_tbls:
- free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
-err_request_event_irq:
- free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
-err_request_cmd_irq:
- rocker_dma_rings_fini(rocker);
-err_dma_rings_init:
-err_basic_hw_test:
- rocker_msix_fini(rocker);
-err_msix_init:
- iounmap(rocker->hw_addr);
-err_ioremap:
-err_pci_resource_len_check:
-err_pci_set_dma_mask:
- pci_release_regions(pdev);
-err_pci_request_regions:
- pci_disable_device(pdev);
-err_pci_enable_device:
- kfree(rocker);
- return err;
-}
-
-static void rocker_remove(struct pci_dev *pdev)
-{
- struct rocker *rocker = pci_get_drvdata(pdev);
-
- del_timer_sync(&rocker->fdb_cleanup_timer);
- rocker_free_tbls(rocker);
- rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
- rocker_remove_ports(rocker);
- free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
- free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
- rocker_dma_rings_fini(rocker);
- rocker_msix_fini(rocker);
- iounmap(rocker->hw_addr);
- pci_release_regions(rocker->pdev);
- pci_disable_device(rocker->pdev);
- kfree(rocker);
-}
-
-static struct pci_driver rocker_pci_driver = {
- .name = rocker_driver_name,
- .id_table = rocker_pci_id_table,
- .probe = rocker_probe,
- .remove = rocker_remove,
-};
-
-/************************************
- * Net device notifier event handler
- ************************************/
-
-static bool rocker_port_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &rocker_port_netdev_ops;
-}
-
-static int rocker_port_bridge_join(struct rocker_port *rocker_port,
- struct net_device *bridge)
-{
- u16 untagged_vid = 0;
- int err;
-
- /* Port is joining bridge, so the internal VLAN for the
- * port is going to change to the bridge internal VLAN.
- * Let's remove untagged VLAN (vid=0) from port and
- * re-add once internal VLAN has changed.
- */
-
- err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
- if (err)
- return err;
-
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->dev->ifindex);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
-
- rocker_port->bridge_dev = bridge;
- switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
-
- return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
-}
-
-static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
-{
- u16 untagged_vid = 0;
- int err;
-
- err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
- if (err)
- return err;
-
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->bridge_dev->ifindex);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port,
- rocker_port->dev->ifindex);
-
- switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
- false);
- rocker_port->bridge_dev = NULL;
-
- err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
- if (err)
- return err;
-
- if (rocker_port->dev->flags & IFF_UP)
- err = rocker_port_fwd_enable(rocker_port, NULL, 0);
-
- return err;
-}
-
-
-static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
- struct net_device *master)
-{
- int err;
-
- rocker_port->bridge_dev = master;
-
- err = rocker_port_fwd_disable(rocker_port, NULL, 0);
- if (err)
- return err;
- err = rocker_port_fwd_enable(rocker_port, NULL, 0);
-
- return err;
-}
-
-static int rocker_port_master_linked(struct rocker_port *rocker_port,
- struct net_device *master)
-{
- int err = 0;
-
- if (netif_is_bridge_master(master))
- err = rocker_port_bridge_join(rocker_port, master);
- else if (netif_is_ovs_master(master))
- err = rocker_port_ovs_changed(rocker_port, master);
- return err;
-}
-
-static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
-{
- int err = 0;
-
- if (rocker_port_is_bridged(rocker_port))
- err = rocker_port_bridge_leave(rocker_port);
- else if (rocker_port_is_ovsed(rocker_port))
- err = rocker_port_ovs_changed(rocker_port, NULL);
- return err;
-}
-
-static int rocker_netdevice_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
- struct rocker_port *rocker_port;
- int err;
-
- if (!rocker_port_dev_check(dev))
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_CHANGEUPPER:
- info = ptr;
- if (!info->master)
- goto out;
- rocker_port = netdev_priv(dev);
- if (info->linking) {
- err = rocker_port_master_linked(rocker_port,
- info->upper_dev);
- if (err)
- netdev_warn(dev, "failed to reflect master linked (err %d)\n",
- err);
- } else {
- err = rocker_port_master_unlinked(rocker_port);
- if (err)
- netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
- err);
- }
- break;
- }
-out:
- return NOTIFY_DONE;
-}
-
-static struct notifier_block rocker_netdevice_nb __read_mostly = {
- .notifier_call = rocker_netdevice_event,
-};
-
-/************************************
- * Net event notifier event handler
- ************************************/
-
-static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
- ROCKER_OP_FLAG_NOWAIT;
- __be32 ip_addr = *(__be32 *)n->primary_key;
-
- return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
-}
-
-static int rocker_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct net_device *dev;
- struct neighbour *n = ptr;
- int err;
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- if (n->tbl != &arp_tbl)
- return NOTIFY_DONE;
- dev = n->dev;
- if (!rocker_port_dev_check(dev))
- return NOTIFY_DONE;
- err = rocker_neigh_update(dev, n);
- if (err)
- netdev_warn(dev,
- "failed to handle neigh update (err %d)\n",
- err);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block rocker_netevent_nb __read_mostly = {
- .notifier_call = rocker_netevent_event,
-};
-
-/***********************
- * Module init and exit
- ***********************/
-
-static int __init rocker_module_init(void)
-{
- int err;
-
- register_netdevice_notifier(&rocker_netdevice_nb);
- register_netevent_notifier(&rocker_netevent_nb);
- err = pci_register_driver(&rocker_pci_driver);
- if (err)
- goto err_pci_register_driver;
- return 0;
-
-err_pci_register_driver:
- unregister_netevent_notifier(&rocker_netevent_nb);
- unregister_netdevice_notifier(&rocker_netdevice_nb);
- return err;
-}
-
-static void __exit rocker_module_exit(void)
-{
- unregister_netevent_notifier(&rocker_netevent_nb);
- unregister_netdevice_notifier(&rocker_netdevice_nb);
- pci_unregister_driver(&rocker_pci_driver);
-}
-
-module_init(rocker_module_init);
-module_exit(rocker_module_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
-MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
-MODULE_DESCRIPTION("Rocker switch device driver");
-MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 12490b2f6504..1ab995f7146b 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver
- * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -12,456 +12,137 @@
#ifndef _ROCKER_H
#define _ROCKER_H
+#include <linux/kernel.h>
#include <linux/types.h>
-
-/* Return codes */
-enum {
- ROCKER_OK = 0,
- ROCKER_ENOENT = 2,
- ROCKER_ENXIO = 6,
- ROCKER_ENOMEM = 12,
- ROCKER_EEXIST = 17,
- ROCKER_EINVAL = 22,
- ROCKER_EMSGSIZE = 90,
- ROCKER_ENOTSUP = 95,
- ROCKER_ENOBUFS = 105,
-};
-
-#define ROCKER_FP_PORTS_MAX 62
-
-#define PCI_VENDOR_ID_REDHAT 0x1b36
-#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
-
-#define ROCKER_PCI_BAR0_SIZE 0x2000
-
-/* MSI-X vectors */
-enum {
- ROCKER_MSIX_VEC_CMD,
- ROCKER_MSIX_VEC_EVENT,
- ROCKER_MSIX_VEC_TEST,
- ROCKER_MSIX_VEC_RESERVED0,
- __ROCKER_MSIX_VEC_TX,
- __ROCKER_MSIX_VEC_RX,
-#define ROCKER_MSIX_VEC_TX(port) \
- (__ROCKER_MSIX_VEC_TX + ((port) * 2))
-#define ROCKER_MSIX_VEC_RX(port) \
- (__ROCKER_MSIX_VEC_RX + ((port) * 2))
-#define ROCKER_MSIX_VEC_COUNT(portcnt) \
- (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1)
-};
-
-/* Rocker bogus registers */
-#define ROCKER_BOGUS_REG0 0x0000
-#define ROCKER_BOGUS_REG1 0x0004
-#define ROCKER_BOGUS_REG2 0x0008
-#define ROCKER_BOGUS_REG3 0x000c
-
-/* Rocker test registers */
-#define ROCKER_TEST_REG 0x0010
-#define ROCKER_TEST_REG64 0x0018 /* 8-byte */
-#define ROCKER_TEST_IRQ 0x0020
-#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */
-#define ROCKER_TEST_DMA_SIZE 0x0030
-#define ROCKER_TEST_DMA_CTRL 0x0034
-
-/* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0)
-#define ROCKER_TEST_DMA_CTRL_FILL BIT(1)
-#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2)
-
-/* Rocker DMA ring register offsets */
-#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
-#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32)
-#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32)
-#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32)
-#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32)
-#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32)
-#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
-
-/* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET BIT(0)
-
-/* Rocker DMA ring types */
-enum rocker_dma_type {
- ROCKER_DMA_CMD,
- ROCKER_DMA_EVENT,
- __ROCKER_DMA_TX,
- __ROCKER_DMA_RX,
-#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2)
-#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2)
-};
-
-/* Rocker DMA ring size limits and default sizes */
-#define ROCKER_DMA_SIZE_MIN 2ul
-#define ROCKER_DMA_SIZE_MAX 65536ul
-#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul
-#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul
-#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul
-#define ROCKER_DMA_TX_DESC_SIZE 256
-#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul
-#define ROCKER_DMA_RX_DESC_SIZE 256
-
-/* Rocker DMA descriptor struct */
-struct rocker_desc {
- u64 buf_addr;
- u64 cookie;
- u16 buf_size;
- u16 tlv_size;
- u16 resv[5];
- u16 comp_err;
-};
-
-#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15)
-
-/* Rocker DMA TLV struct */
-struct rocker_tlv {
- u32 type;
- u16 len;
-};
-
-/* TLVs */
-enum {
- ROCKER_TLV_CMD_UNSPEC,
- ROCKER_TLV_CMD_TYPE, /* u16 */
- ROCKER_TLV_CMD_INFO, /* nest */
-
- __ROCKER_TLV_CMD_MAX,
- ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_CMD_TYPE_UNSPEC,
- ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS,
- ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL,
- ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS,
- ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD,
- ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD,
- ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
- ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
-
- ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
- ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
-
- __ROCKER_TLV_CMD_TYPE_MAX,
- ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
- ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */
- ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */
- ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */
- ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */
- ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
- ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
- ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
- ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
- ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */
-
- __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
- ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
- __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
- ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */
-
- ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */
-
- ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */
- ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */
-
- __ROCKER_TLV_CMD_PORT_STATS_MAX,
- ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
-};
-
-enum rocker_port_mode {
- ROCKER_PORT_MODE_OF_DPA,
-};
-
-enum {
- ROCKER_TLV_EVENT_UNSPEC,
- ROCKER_TLV_EVENT_TYPE, /* u16 */
- ROCKER_TLV_EVENT_INFO, /* nest */
-
- __ROCKER_TLV_EVENT_MAX,
- ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_EVENT_TYPE_UNSPEC,
- ROCKER_TLV_EVENT_TYPE_LINK_CHANGED,
- ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN,
-
- __ROCKER_TLV_EVENT_TYPE_MAX,
- ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
- ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */
- ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */
-
- __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
- ROCKER_TLV_EVENT_LINK_CHANGED_MAX =
- __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
- ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */
- ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */
- ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */
-
- __ROCKER_TLV_EVENT_MAC_VLAN_MAX,
- ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_RX_UNSPEC,
- ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */
- ROCKER_TLV_RX_CSUM, /* u16 */
- ROCKER_TLV_RX_FRAG_ADDR, /* u64 */
- ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */
- ROCKER_TLV_RX_FRAG_LEN, /* u16 */
-
- __ROCKER_TLV_RX_MAX,
- ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
-};
-
-#define ROCKER_RX_FLAGS_IPV4 BIT(0)
-#define ROCKER_RX_FLAGS_IPV6 BIT(1)
-#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3)
-#define ROCKER_RX_FLAGS_IP_FRAG BIT(4)
-#define ROCKER_RX_FLAGS_TCP BIT(5)
-#define ROCKER_RX_FLAGS_UDP BIT(6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
-#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8)
-
-enum {
- ROCKER_TLV_TX_UNSPEC,
- ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */
- ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */
- ROCKER_TLV_TX_TSO_MSS, /* u16 */
- ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */
- ROCKER_TLV_TX_FRAGS, /* array */
-
- __ROCKER_TLV_TX_MAX,
- ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1,
-};
-
-#define ROCKER_TX_OFFLOAD_NONE 0
-#define ROCKER_TX_OFFLOAD_IP_CSUM 1
-#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2
-#define ROCKER_TX_OFFLOAD_L3_CSUM 3
-#define ROCKER_TX_OFFLOAD_TSO 4
-
-#define ROCKER_TX_FRAGS_MAX 16
-
-enum {
- ROCKER_TLV_TX_FRAG_UNSPEC,
- ROCKER_TLV_TX_FRAG, /* nest */
-
- __ROCKER_TLV_TX_FRAG_MAX,
- ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1,
-};
-
-enum {
- ROCKER_TLV_TX_FRAG_ATTR_UNSPEC,
- ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */
- ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */
-
- __ROCKER_TLV_TX_FRAG_ATTR_MAX,
- ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1,
-};
-
-/* cmd info nested for OF-DPA msgs */
-enum {
- ROCKER_TLV_OF_DPA_UNSPEC,
- ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */
- ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */
- ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */
- ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */
- ROCKER_TLV_OF_DPA_COOKIE, /* u64 */
- ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */
- ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */
- ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */
- ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */
- ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */
- ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */
- ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */
- ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */
- ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */
- ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */
- ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */
- ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */
- ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */
- ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */
- ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */
- ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */
- ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */
- ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */
- ROCKER_TLV_OF_DPA_DST_MAC, /* binary */
- ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */
- ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */
- ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */
- ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */
- ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */
- ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */
- ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */
- ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */
- ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */
- ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */
- ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */
- ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */
- ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */
- ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */
- ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */
- ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */
- ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */
- ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */
- ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */
- ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */
- ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */
- ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */
- ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */
- ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */
- ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */
- ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */
- ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */
- ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */
- ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */
- ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */
- ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */
- ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */
- ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */
- ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */
- ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */
- ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */
- ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */
-
- __ROCKER_TLV_OF_DPA_MAX,
- ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1,
-};
-
-/* OF-DPA table IDs */
-
-enum rocker_of_dpa_table_id {
- ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0,
- ROCKER_OF_DPA_TABLE_ID_VLAN = 10,
- ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20,
- ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30,
- ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40,
- ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50,
- ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60,
-};
-
-/* OF-DPA flow stats */
-enum {
- ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC,
- ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */
- ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */
- ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */
-
- __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX,
- ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1,
-};
-
-/* OF-DPA group types */
-enum rocker_of_dpa_group_type {
- ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0,
- ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE,
- ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST,
- ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST,
- ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD,
- ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE,
- ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST,
- ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP,
- ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY,
-};
-
-/* OF-DPA group L2 overlay types */
-enum rocker_of_dpa_overlay_type {
- ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0,
- ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST,
- ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST,
- ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST,
-};
-
-/* OF-DPA group ID encoding */
-#define ROCKER_GROUP_TYPE_SHIFT 28
-#define ROCKER_GROUP_TYPE_MASK 0xf0000000
-#define ROCKER_GROUP_VLAN_SHIFT 16
-#define ROCKER_GROUP_VLAN_MASK 0x0fff0000
-#define ROCKER_GROUP_PORT_SHIFT 0
-#define ROCKER_GROUP_PORT_MASK 0x0000ffff
-#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12
-#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000
-#define ROCKER_GROUP_SUBTYPE_SHIFT 10
-#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00
-#define ROCKER_GROUP_INDEX_SHIFT 0
-#define ROCKER_GROUP_INDEX_MASK 0x0000ffff
-#define ROCKER_GROUP_INDEX_LONG_SHIFT 0
-#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff
-
-#define ROCKER_GROUP_TYPE_GET(group_id) \
- (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT)
-#define ROCKER_GROUP_TYPE_SET(type) \
- (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK)
-#define ROCKER_GROUP_VLAN_GET(group_id) \
- (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT)
-#define ROCKER_GROUP_VLAN_SET(vlan_id) \
- (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK)
-#define ROCKER_GROUP_PORT_GET(group_id) \
- (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT)
-#define ROCKER_GROUP_PORT_SET(port) \
- (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK)
-#define ROCKER_GROUP_INDEX_GET(group_id) \
- (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT)
-#define ROCKER_GROUP_INDEX_SET(index) \
- (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK)
-#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \
- (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \
- ROCKER_GROUP_INDEX_LONG_SHIFT)
-#define ROCKER_GROUP_INDEX_LONG_SET(index) \
- (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \
- ROCKER_GROUP_INDEX_LONG_MASK)
-
-#define ROCKER_GROUP_NONE 0
-#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \
- (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\
- ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port))
-#define ROCKER_GROUP_L2_REWRITE(index) \
- (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\
- ROCKER_GROUP_INDEX_LONG_SET(index))
-#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \
- (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\
- ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
-#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \
- (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\
- ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
-#define ROCKER_GROUP_L3_UNICAST(index) \
- (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\
- ROCKER_GROUP_INDEX_LONG_SET(index))
-
-/* Rocker general purpose registers */
-#define ROCKER_CONTROL 0x0300
-#define ROCKER_PORT_PHYS_COUNT 0x0304
-#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */
-#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */
-#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
-
-/* Rocker control bits */
-#define ROCKER_CONTROL_RESET BIT(0)
+#include <linux/netdevice.h>
+#include <net/neighbour.h>
+#include <net/switchdev.h>
+
+#include "rocker_hw.h"
+
+struct rocker_desc_info {
+ char *data; /* mapped */
+ size_t data_size;
+ size_t tlv_size;
+ struct rocker_desc *desc;
+ dma_addr_t mapaddr;
+};
+
+struct rocker_dma_ring_info {
+ size_t size;
+ u32 head;
+ u32 tail;
+ struct rocker_desc *desc; /* mapped */
+ dma_addr_t mapaddr;
+ struct rocker_desc_info *desc_info;
+ unsigned int type;
+};
+
+struct rocker;
+
+struct rocker_port {
+ struct net_device *dev;
+ struct rocker *rocker;
+ void *wpriv;
+ unsigned int port_number;
+ u32 pport;
+ struct napi_struct napi_tx;
+ struct napi_struct napi_rx;
+ struct rocker_dma_ring_info tx_ring;
+ struct rocker_dma_ring_info rx_ring;
+};
+
+struct rocker_world_ops;
+
+struct rocker {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+ struct msix_entry *msix_entries;
+ unsigned int port_count;
+ struct rocker_port **ports;
+ struct {
+ u64 id;
+ } hw;
+ spinlock_t cmd_ring_lock; /* for cmd ring accesses */
+ struct rocker_dma_ring_info cmd_ring;
+ struct rocker_dma_ring_info event_ring;
+ struct rocker_world_ops *wops;
+ void *wpriv;
+};
+
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
+
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv);
+
+int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait,
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv);
+
+int rocker_port_set_learning(struct rocker_port *rocker_port,
+ bool learning);
+
+struct rocker_world_ops {
+ const char *kind;
+ size_t priv_size;
+ size_t port_priv_size;
+ u8 mode;
+ int (*init)(struct rocker *rocker);
+ void (*fini)(struct rocker *rocker);
+ int (*port_pre_init)(struct rocker_port *rocker_port);
+ int (*port_init)(struct rocker_port *rocker_port);
+ void (*port_fini)(struct rocker_port *rocker_port);
+ void (*port_post_fini)(struct rocker_port *rocker_port);
+ int (*port_open)(struct rocker_port *rocker_port);
+ void (*port_stop)(struct rocker_port *rocker_port);
+ int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
+ u8 state,
+ struct switchdev_trans *trans);
+ int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
+ unsigned long brport_flags,
+ struct switchdev_trans *trans);
+ int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port,
+ unsigned long *p_brport_flags);
+ int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
+ u32 ageing_time,
+ struct switchdev_trans *trans);
+ int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans);
+ int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb);
+ int (*port_obj_fib4_add)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans);
+ int (*port_obj_fib4_del)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4);
+ int (*port_obj_fdb_add)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans);
+ int (*port_obj_fdb_del)(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb);
+ int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb);
+ int (*port_master_linked)(struct rocker_port *rocker_port,
+ struct net_device *master);
+ int (*port_master_unlinked)(struct rocker_port *rocker_port,
+ struct net_device *master);
+ int (*port_neigh_update)(struct rocker_port *rocker_port,
+ struct neighbour *n);
+ int (*port_neigh_destroy)(struct rocker_port *rocker_port,
+ struct neighbour *n);
+ int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id);
+};
+
+extern struct rocker_world_ops rocker_ofdpa_ops;
#endif
diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h
new file mode 100644
index 000000000000..2adfe88859f2
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker_hw.h
@@ -0,0 +1,467 @@
+/*
+ * drivers/net/ethernet/rocker/rocker_hw.h - Rocker switch device driver
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ROCKER_HW_H
+#define _ROCKER_HW_H
+
+#include <linux/types.h>
+
+/* Return codes */
+enum {
+ ROCKER_OK = 0,
+ ROCKER_ENOENT = 2,
+ ROCKER_ENXIO = 6,
+ ROCKER_ENOMEM = 12,
+ ROCKER_EEXIST = 17,
+ ROCKER_EINVAL = 22,
+ ROCKER_EMSGSIZE = 90,
+ ROCKER_ENOTSUP = 95,
+ ROCKER_ENOBUFS = 105,
+};
+
+#define ROCKER_FP_PORTS_MAX 62
+
+#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006
+
+#define ROCKER_PCI_BAR0_SIZE 0x2000
+
+/* MSI-X vectors */
+enum {
+ ROCKER_MSIX_VEC_CMD,
+ ROCKER_MSIX_VEC_EVENT,
+ ROCKER_MSIX_VEC_TEST,
+ ROCKER_MSIX_VEC_RESERVED0,
+ __ROCKER_MSIX_VEC_TX,
+ __ROCKER_MSIX_VEC_RX,
+#define ROCKER_MSIX_VEC_TX(port) \
+ (__ROCKER_MSIX_VEC_TX + ((port) * 2))
+#define ROCKER_MSIX_VEC_RX(port) \
+ (__ROCKER_MSIX_VEC_RX + ((port) * 2))
+#define ROCKER_MSIX_VEC_COUNT(portcnt) \
+ (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1)
+};
+
+/* Rocker bogus registers */
+#define ROCKER_BOGUS_REG0 0x0000
+#define ROCKER_BOGUS_REG1 0x0004
+#define ROCKER_BOGUS_REG2 0x0008
+#define ROCKER_BOGUS_REG3 0x000c
+
+/* Rocker test registers */
+#define ROCKER_TEST_REG 0x0010
+#define ROCKER_TEST_REG64 0x0018 /* 8-byte */
+#define ROCKER_TEST_IRQ 0x0020
+#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */
+#define ROCKER_TEST_DMA_SIZE 0x0030
+#define ROCKER_TEST_DMA_CTRL 0x0034
+
+/* Rocker test register ctrl */
+#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2)
+
+/* Rocker DMA ring register offsets */
+#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */
+#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32)
+#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32)
+#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32)
+#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32)
+#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32)
+#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32)
+
+/* Rocker dma ctrl register bits */
+#define ROCKER_DMA_DESC_CTRL_RESET BIT(0)
+
+/* Rocker DMA ring types */
+enum rocker_dma_type {
+ ROCKER_DMA_CMD,
+ ROCKER_DMA_EVENT,
+ __ROCKER_DMA_TX,
+ __ROCKER_DMA_RX,
+#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2)
+#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2)
+};
+
+/* Rocker DMA ring size limits and default sizes */
+#define ROCKER_DMA_SIZE_MIN 2ul
+#define ROCKER_DMA_SIZE_MAX 65536ul
+#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul
+#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_TX_DESC_SIZE 256
+#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul
+#define ROCKER_DMA_RX_DESC_SIZE 256
+
+/* Rocker DMA descriptor struct */
+struct rocker_desc {
+ u64 buf_addr;
+ u64 cookie;
+ u16 buf_size;
+ u16 tlv_size;
+ u16 resv[5];
+ u16 comp_err;
+};
+
+#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15)
+
+/* Rocker DMA TLV struct */
+struct rocker_tlv {
+ u32 type;
+ u16 len;
+};
+
+/* TLVs */
+enum {
+ ROCKER_TLV_CMD_UNSPEC,
+ ROCKER_TLV_CMD_TYPE, /* u16 */
+ ROCKER_TLV_CMD_INFO, /* nest */
+
+ __ROCKER_TLV_CMD_MAX,
+ ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_TYPE_UNSPEC,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL,
+ ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS,
+
+ ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_STATS,
+
+ __ROCKER_TLV_CMD_TYPE_MAX,
+ ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC,
+ ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */
+ ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */
+ ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */
+
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
+ __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_CMD_PORT_STATS_UNSPEC,
+ ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */
+
+ ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */
+
+ ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */
+ ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */
+
+ __ROCKER_TLV_CMD_PORT_STATS_MAX,
+ ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1,
+};
+
+enum rocker_port_mode {
+ ROCKER_PORT_MODE_OF_DPA,
+};
+
+enum {
+ ROCKER_TLV_EVENT_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE, /* u16 */
+ ROCKER_TLV_EVENT_INFO, /* nest */
+
+ __ROCKER_TLV_EVENT_MAX,
+ ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_TYPE_UNSPEC,
+ ROCKER_TLV_EVENT_TYPE_LINK_CHANGED,
+ ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN,
+
+ __ROCKER_TLV_EVENT_TYPE_MAX,
+ ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC,
+ ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */
+ ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */
+
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX,
+ ROCKER_TLV_EVENT_LINK_CHANGED_MAX =
+ __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC,
+ ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */
+ ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */
+ ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */
+
+ __ROCKER_TLV_EVENT_MAC_VLAN_MAX,
+ ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_RX_UNSPEC,
+ ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */
+ ROCKER_TLV_RX_CSUM, /* u16 */
+ ROCKER_TLV_RX_FRAG_ADDR, /* u64 */
+ ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */
+ ROCKER_TLV_RX_FRAG_LEN, /* u16 */
+
+ __ROCKER_TLV_RX_MAX,
+ ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
+};
+
+#define ROCKER_RX_FLAGS_IPV4 BIT(0)
+#define ROCKER_RX_FLAGS_IPV6 BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG BIT(4)
+#define ROCKER_RX_FLAGS_TCP BIT(5)
+#define ROCKER_RX_FLAGS_UDP BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8)
+
+enum {
+ ROCKER_TLV_TX_UNSPEC,
+ ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */
+ ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */
+ ROCKER_TLV_TX_TSO_MSS, /* u16 */
+ ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */
+ ROCKER_TLV_TX_FRAGS, /* array */
+
+ __ROCKER_TLV_TX_MAX,
+ ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1,
+};
+
+#define ROCKER_TX_OFFLOAD_NONE 0
+#define ROCKER_TX_OFFLOAD_IP_CSUM 1
+#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2
+#define ROCKER_TX_OFFLOAD_L3_CSUM 3
+#define ROCKER_TX_OFFLOAD_TSO 4
+
+#define ROCKER_TX_FRAGS_MAX 16
+
+enum {
+ ROCKER_TLV_TX_FRAG_UNSPEC,
+ ROCKER_TLV_TX_FRAG, /* nest */
+
+ __ROCKER_TLV_TX_FRAG_MAX,
+ ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1,
+};
+
+enum {
+ ROCKER_TLV_TX_FRAG_ATTR_UNSPEC,
+ ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */
+ ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */
+
+ __ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1,
+};
+
+/* cmd info nested for OF-DPA msgs */
+enum {
+ ROCKER_TLV_OF_DPA_UNSPEC,
+ ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */
+ ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */
+ ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */
+ ROCKER_TLV_OF_DPA_COOKIE, /* u64 */
+ ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */
+ ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */
+ ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */
+ ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */
+ ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */
+ ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */
+ ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */
+ ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */
+ ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */
+ ROCKER_TLV_OF_DPA_DST_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */
+ ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */
+ ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */
+ ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */
+ ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */
+ ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */
+ ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */
+ ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */
+ ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */
+ ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */
+ ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */
+
+ __ROCKER_TLV_OF_DPA_MAX,
+ ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1,
+};
+
+/* OF-DPA table IDs */
+
+enum rocker_of_dpa_table_id {
+ ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0,
+ ROCKER_OF_DPA_TABLE_ID_VLAN = 10,
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20,
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30,
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40,
+ ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50,
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60,
+};
+
+/* OF-DPA flow stats */
+enum {
+ ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */
+ ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */
+
+ __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX,
+ ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1,
+};
+
+/* OF-DPA group types */
+enum rocker_of_dpa_group_type {
+ ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST,
+ ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP,
+ ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY,
+};
+
+/* OF-DPA group L2 overlay types */
+enum rocker_of_dpa_overlay_type {
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0,
+ ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST,
+ ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST,
+};
+
+/* OF-DPA group ID encoding */
+#define ROCKER_GROUP_TYPE_SHIFT 28
+#define ROCKER_GROUP_TYPE_MASK 0xf0000000
+#define ROCKER_GROUP_VLAN_SHIFT 16
+#define ROCKER_GROUP_VLAN_MASK 0x0fff0000
+#define ROCKER_GROUP_PORT_SHIFT 0
+#define ROCKER_GROUP_PORT_MASK 0x0000ffff
+#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12
+#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000
+#define ROCKER_GROUP_SUBTYPE_SHIFT 10
+#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00
+#define ROCKER_GROUP_INDEX_SHIFT 0
+#define ROCKER_GROUP_INDEX_MASK 0x0000ffff
+#define ROCKER_GROUP_INDEX_LONG_SHIFT 0
+#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff
+
+#define ROCKER_GROUP_TYPE_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT)
+#define ROCKER_GROUP_TYPE_SET(type) \
+ (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK)
+#define ROCKER_GROUP_VLAN_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT)
+#define ROCKER_GROUP_VLAN_SET(vlan_id) \
+ (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK)
+#define ROCKER_GROUP_PORT_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT)
+#define ROCKER_GROUP_PORT_SET(port) \
+ (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK)
+#define ROCKER_GROUP_INDEX_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT)
+#define ROCKER_GROUP_INDEX_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK)
+#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \
+ (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \
+ ROCKER_GROUP_INDEX_LONG_SHIFT)
+#define ROCKER_GROUP_INDEX_LONG_SET(index) \
+ (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \
+ ROCKER_GROUP_INDEX_LONG_MASK)
+
+#define ROCKER_GROUP_NONE 0
+#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port))
+#define ROCKER_GROUP_L2_REWRITE(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\
+ ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index))
+#define ROCKER_GROUP_L3_UNICAST(index) \
+ (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\
+ ROCKER_GROUP_INDEX_LONG_SET(index))
+
+/* Rocker general purpose registers */
+#define ROCKER_CONTROL 0x0300
+#define ROCKER_PORT_PHYS_COUNT 0x0304
+#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */
+#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */
+#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */
+
+/* Rocker control bits */
+#define ROCKER_CONTROL_RESET BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
new file mode 100644
index 000000000000..28b775e5a9ad
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -0,0 +1,2909 @@
+/*
+ * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sort.h>
+#include <linux/random.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <net/switchdev.h>
+#include <net/rtnetlink.h>
+#include <net/netevent.h>
+#include <net/arp.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <generated/utsrelease.h>
+
+#include "rocker_hw.h"
+#include "rocker.h"
+#include "rocker_tlv.h"
+
+static const char rocker_driver_name[] = "rocker";
+
+static const struct pci_device_id rocker_pci_id_table[] = {
+ {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
+ {0, }
+};
+
+struct rocker_wait {
+ wait_queue_head_t wait;
+ bool done;
+ bool nowait;
+};
+
+static void rocker_wait_reset(struct rocker_wait *wait)
+{
+ wait->done = false;
+ wait->nowait = false;
+}
+
+static void rocker_wait_init(struct rocker_wait *wait)
+{
+ init_waitqueue_head(&wait->wait);
+ rocker_wait_reset(wait);
+}
+
+static struct rocker_wait *rocker_wait_create(void)
+{
+ struct rocker_wait *wait;
+
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return NULL;
+ return wait;
+}
+
+static void rocker_wait_destroy(struct rocker_wait *wait)
+{
+ kfree(wait);
+}
+
+static bool rocker_wait_event_timeout(struct rocker_wait *wait,
+ unsigned long timeout)
+{
+ wait_event_timeout(wait->wait, wait->done, HZ / 10);
+ if (!wait->done)
+ return false;
+ return true;
+}
+
+static void rocker_wait_wake_up(struct rocker_wait *wait)
+{
+ wait->done = true;
+ wake_up(&wait->wait);
+}
+
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
+{
+ return rocker->msix_entries[vector].vector;
+}
+
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_TX(rocker_port->port_number));
+}
+
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
+{
+ return rocker_msix_vector(rocker_port->rocker,
+ ROCKER_MSIX_VEC_RX(rocker_port->port_number));
+}
+
+#define rocker_write32(rocker, reg, val) \
+ writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read32(rocker, reg) \
+ readl((rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_write64(rocker, reg, val) \
+ writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
+#define rocker_read64(rocker, reg) \
+ readq((rocker)->hw_addr + (ROCKER_ ## reg))
+
+/*****************************
+ * HW basic testing functions
+ *****************************/
+
+static int rocker_reg_test(const struct rocker *rocker)
+{
+ const struct pci_dev *pdev = rocker->pdev;
+ u64 test_reg;
+ u64 rnd;
+
+ rnd = prandom_u32();
+ rnd >>= 1;
+ rocker_write32(rocker, TEST_REG, rnd);
+ test_reg = rocker_read32(rocker, TEST_REG);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ rnd = prandom_u32();
+ rnd <<= 31;
+ rnd |= prandom_u32();
+ rocker_write64(rocker, TEST_REG64, rnd);
+ test_reg = rocker_read64(rocker, TEST_REG64);
+ if (test_reg != rnd * 2) {
+ dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
+ test_reg, rnd * 2);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rocker_dma_test_one(const struct rocker *rocker,
+ struct rocker_wait *wait, u32 test_type,
+ dma_addr_t dma_handle, const unsigned char *buf,
+ const unsigned char *expect, size_t size)
+{
+ const struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ rocker_wait_reset(wait);
+ rocker_write32(rocker, TEST_DMA_CTRL, test_type);
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (buf[i] != expect[i]) {
+ dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
+ buf[i], i, expect[i]);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
+#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
+
+static int rocker_dma_test_offset(const struct rocker *rocker,
+ struct rocker_wait *wait, int offset)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ unsigned char *alloc;
+ unsigned char *buf;
+ unsigned char *expect;
+ dma_addr_t dma_handle;
+ int i;
+ int err;
+
+ alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
+ GFP_KERNEL | GFP_DMA);
+ if (!alloc)
+ return -ENOMEM;
+ buf = alloc + offset;
+ expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
+
+ dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ err = -EIO;
+ goto free_alloc;
+ }
+
+ rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
+ rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
+
+ memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+ prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
+ for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
+ expect[i] = ~buf[i];
+ err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
+ dma_handle, buf, expect,
+ ROCKER_TEST_DMA_BUF_SIZE);
+ if (err)
+ goto unmap;
+
+unmap:
+ pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+free_alloc:
+ kfree(alloc);
+
+ return err;
+}
+
+static int rocker_dma_test(const struct rocker *rocker,
+ struct rocker_wait *wait)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < 8; i++) {
+ err = rocker_dma_test_offset(rocker, wait, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_wait *wait = dev_id;
+
+ rocker_wait_wake_up(wait);
+
+ return IRQ_HANDLED;
+}
+
+static int rocker_basic_hw_test(const struct rocker *rocker)
+{
+ const struct pci_dev *pdev = rocker->pdev;
+ struct rocker_wait wait;
+ int err;
+
+ err = rocker_reg_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "reg test failed\n");
+ return err;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
+ rocker_test_irq_handler, 0,
+ rocker_driver_name, &wait);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign test irq\n");
+ return err;
+ }
+
+ rocker_wait_init(&wait);
+ rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
+
+ if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
+ dev_err(&pdev->dev, "no interrupt received within a timeout\n");
+ err = -EIO;
+ goto free_irq;
+ }
+
+ err = rocker_dma_test(rocker, &wait);
+ if (err)
+ dev_err(&pdev->dev, "dma test failed\n");
+
+free_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
+ return err;
+}
+
+/******************************************
+ * DMA rings and descriptors manipulations
+ ******************************************/
+
+static u32 __pos_inc(u32 pos, size_t limit)
+{
+ return ++pos == limit ? 0 : pos;
+}
+
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
+{
+ int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+
+ switch (err) {
+ case ROCKER_OK:
+ return 0;
+ case -ROCKER_ENOENT:
+ return -ENOENT;
+ case -ROCKER_ENXIO:
+ return -ENXIO;
+ case -ROCKER_ENOMEM:
+ return -ENOMEM;
+ case -ROCKER_EEXIST:
+ return -EEXIST;
+ case -ROCKER_EINVAL:
+ return -EINVAL;
+ case -ROCKER_EMSGSIZE:
+ return -EMSGSIZE;
+ case -ROCKER_ENOTSUP:
+ return -EOPNOTSUPP;
+ case -ROCKER_ENOBUFS:
+ return -ENOBUFS;
+ }
+
+ return -EINVAL;
+}
+
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
+}
+
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
+{
+ u32 comp_err = desc_info->desc->comp_err;
+
+ return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
+}
+
+static void *
+rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
+{
+ return (void *)(uintptr_t)desc_info->desc->cookie;
+}
+
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
+ void *ptr)
+{
+ desc_info->desc->cookie = (uintptr_t) ptr;
+}
+
+static struct rocker_desc_info *
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+ u32 head = __pos_inc(info->head, info->size);
+
+ desc_info = &info->desc_info[info->head];
+ if (head == info->tail)
+ return NULL; /* ring full */
+ desc_info->tlv_size = 0;
+ return desc_info;
+}
+
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
+{
+ desc_info->desc->buf_size = desc_info->data_size;
+ desc_info->desc->tlv_size = desc_info->tlv_size;
+}
+
+static void rocker_desc_head_set(const struct rocker *rocker,
+ struct rocker_dma_ring_info *info,
+ const struct rocker_desc_info *desc_info)
+{
+ u32 head = __pos_inc(info->head, info->size);
+
+ BUG_ON(head == info->tail);
+ rocker_desc_commit(desc_info);
+ info->head = head;
+ rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
+}
+
+static struct rocker_desc_info *
+rocker_desc_tail_get(struct rocker_dma_ring_info *info)
+{
+ static struct rocker_desc_info *desc_info;
+
+ if (info->tail == info->head)
+ return NULL; /* nothing to be done between head and tail */
+ desc_info = &info->desc_info[info->tail];
+ if (!rocker_desc_gen(desc_info))
+ return NULL; /* gen bit not set, desc is not ready yet */
+ info->tail = __pos_inc(info->tail, info->size);
+ desc_info->tlv_size = desc_info->desc->tlv_size;
+ return desc_info;
+}
+
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
+ u32 credits)
+{
+ if (credits)
+ rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
+}
+
+static unsigned long rocker_dma_ring_size_fix(size_t size)
+{
+ return max(ROCKER_DMA_SIZE_MIN,
+ min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
+}
+
+static int rocker_dma_ring_create(const struct rocker *rocker,
+ unsigned int type,
+ size_t size,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(size != rocker_dma_ring_size_fix(size));
+ info->size = size;
+ info->type = type;
+ info->head = 0;
+ info->tail = 0;
+ info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
+ GFP_KERNEL);
+ if (!info->desc_info)
+ return -ENOMEM;
+
+ info->desc = pci_alloc_consistent(rocker->pdev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr);
+ if (!info->desc) {
+ kfree(info->desc_info);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < info->size; i++)
+ info->desc_info[i].desc = &info->desc[i];
+
+ rocker_write32(rocker, DMA_DESC_CTRL(info->type),
+ ROCKER_DMA_DESC_CTRL_RESET);
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
+ rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
+
+ return 0;
+}
+
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info)
+{
+ rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
+
+ pci_free_consistent(rocker->pdev,
+ info->size * sizeof(struct rocker_desc),
+ info->desc, info->mapaddr);
+ kfree(info->desc_info);
+}
+
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
+ struct rocker_dma_ring_info *info)
+{
+ int i;
+
+ BUG_ON(info->head || info->tail);
+
+ /* When ring is consumer, we need to advance head for each desc.
+ * That tells hw that the desc is ready to be used by it.
+ */
+ for (i = 0; i < info->size - 1; i++)
+ rocker_desc_head_set(rocker, info, &info->desc_info[i]);
+ rocker_desc_commit(&info->desc_info[i]);
+}
+
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
+ int direction, size_t buf_size)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+ int err;
+
+ for (i = 0; i < info->size; i++) {
+ struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+ dma_addr_t dma_handle;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+
+ dma_handle = pci_map_single(pdev, buf, buf_size, direction);
+ if (pci_dma_mapping_error(pdev, dma_handle)) {
+ kfree(buf);
+ err = -EIO;
+ goto rollback;
+ }
+
+ desc_info->data = buf;
+ desc_info->data_size = buf_size;
+ dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
+
+ desc->buf_addr = dma_handle;
+ desc->buf_size = buf_size;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--) {
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
+
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+ return err;
+}
+
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
+ int direction)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int i;
+
+ for (i = 0; i < info->size; i++) {
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
+ struct rocker_desc *desc = &info->desc[i];
+
+ desc->buf_addr = 0;
+ desc->buf_size = 0;
+ pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ desc_info->data_size, direction);
+ kfree(desc_info->data);
+ }
+}
+
+static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
+{
+ struct rocker_wait *wait;
+
+ wait = rocker_wait_create();
+ if (!wait)
+ return -ENOMEM;
+ rocker_desc_cookie_ptr_set(desc_info, wait);
+ return 0;
+}
+
+static void
+rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
+{
+ struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
+
+ rocker_wait_destroy(wait);
+}
+
+static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
+{
+ const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
+ int i;
+ int err;
+
+ for (i = 0; i < cmd_ring->size; i++) {
+ err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--)
+ rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
+ return err;
+}
+
+static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
+{
+ const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
+ int i;
+
+ for (i = 0; i < cmd_ring->size; i++)
+ rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
+}
+
+static int rocker_dma_rings_init(struct rocker *rocker)
+{
+ const struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
+ ROCKER_DMA_CMD_DEFAULT_SIZE,
+ &rocker->cmd_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create command dma ring\n");
+ return err;
+ }
+
+ spin_lock_init(&rocker->cmd_ring_lock);
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
+ goto err_dma_cmd_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_cmd_ring_waits_alloc(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
+ goto err_dma_cmd_ring_waits_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
+ ROCKER_DMA_EVENT_DEFAULT_SIZE,
+ &rocker->event_ring);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create event dma ring\n");
+ goto err_dma_event_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
+ PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ if (err) {
+ dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
+ goto err_dma_event_ring_bufs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
+ return 0;
+
+err_dma_event_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+err_dma_event_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_cmd_ring_waits_alloc:
+ rocker_dma_cmd_ring_waits_free(rocker);
+err_dma_cmd_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+ return err;
+}
+
+static void rocker_dma_rings_fini(struct rocker *rocker)
+{
+ rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->event_ring);
+ rocker_dma_cmd_ring_waits_free(rocker);
+ rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
+}
+
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ struct sk_buff *skb, size_t buf_len)
+{
+ const struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+
+ dma_handle = pci_map_single(pdev, skb->data, buf_len,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, dma_handle))
+ return -EIO;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
+ goto tlv_put_failure;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
+ goto tlv_put_failure;
+ return 0;
+
+tlv_put_failure:
+ pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ desc_info->tlv_size = 0;
+ return -EMSGSIZE;
+}
+
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
+{
+ return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+}
+
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ struct net_device *dev = rocker_port->dev;
+ struct sk_buff *skb;
+ size_t buf_len = rocker_port_rx_buf_len(rocker_port);
+ int err;
+
+ /* Ensure that hw will see tlv_size zero in case of an error.
+ * That tells hw to use another descriptor.
+ */
+ rocker_desc_cookie_ptr_set(desc_info, NULL);
+ desc_info->tlv_size = 0;
+
+ skb = netdev_alloc_skb_ip_align(dev, buf_len);
+ if (!skb)
+ return -ENOMEM;
+ err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return err;
+ }
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+ return 0;
+}
+
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+ const struct rocker_tlv **attrs)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
+ !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
+ return;
+ dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
+ len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+}
+
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+
+ if (!skb)
+ return;
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+ dev_kfree_skb_any(skb);
+}
+
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
+{
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
+ int i;
+ int err;
+
+ for (i = 0; i < rx_ring->size; i++) {
+ err = rocker_dma_rx_ring_skb_alloc(rocker_port,
+ &rx_ring->desc_info[i]);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ for (i--; i >= 0; i--)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+ return err;
+}
+
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
+{
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
+ int i;
+
+ for (i = 0; i < rx_ring->size; i++)
+ rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
+}
+
+static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ int err;
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_TX(rocker_port->port_number),
+ ROCKER_DMA_TX_DEFAULT_SIZE,
+ &rocker_port->tx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
+ return err;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE,
+ ROCKER_DMA_TX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
+ goto err_dma_tx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_ring_create(rocker,
+ ROCKER_DMA_RX(rocker_port->port_number),
+ ROCKER_DMA_RX_DEFAULT_SIZE,
+ &rocker_port->rx_ring);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
+ goto err_dma_rx_ring_create;
+ }
+
+ err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL,
+ ROCKER_DMA_RX_DESC_SIZE);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
+ goto err_dma_rx_ring_bufs_alloc;
+ }
+
+ err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
+ goto err_dma_rx_ring_skbs_alloc;
+ }
+ rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
+
+ return 0;
+
+err_dma_rx_ring_skbs_alloc:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+err_dma_rx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+err_dma_rx_ring_create:
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+err_dma_tx_ring_bufs_alloc:
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+ return err;
+}
+
+static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+
+ rocker_dma_rx_ring_skbs_free(rocker_port);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
+ PCI_DMA_BIDIRECTIONAL);
+ rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
+ rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
+ PCI_DMA_TODEVICE);
+ rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
+}
+
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+ bool enable)
+{
+ u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
+
+ if (enable)
+ val |= 1ULL << rocker_port->pport;
+ else
+ val &= ~(1ULL << rocker_port->pport);
+ rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
+}
+
+/********************************
+ * Interrupt handler and helpers
+ ********************************/
+
+static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ const struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ u32 credits = 0;
+
+ spin_lock(&rocker->cmd_ring_lock);
+ while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
+ wait = rocker_desc_cookie_ptr_get(desc_info);
+ if (wait->nowait) {
+ rocker_desc_gen_clear(desc_info);
+ } else {
+ rocker_wait_wake_up(wait);
+ }
+ credits++;
+ }
+ spin_unlock(&rocker->cmd_ring_lock);
+ rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
+{
+ netif_carrier_on(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is up\n");
+}
+
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
+{
+ netif_carrier_off(rocker_port->dev);
+ netdev_info(rocker_port->dev, "Link is down\n");
+}
+
+static int rocker_event_link_change(const struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ unsigned int port_number;
+ bool link_up;
+ struct rocker_port *rocker_port;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
+ !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
+ link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+ if (netif_carrier_ok(rocker_port->dev) != link_up) {
+ if (link_up)
+ rocker_port_link_up(rocker_port);
+ else
+ rocker_port_link_down(rocker_port);
+ }
+
+ return 0;
+}
+
+static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id);
+
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
+ const struct rocker_tlv *info)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ unsigned int port_number;
+ struct rocker_port *rocker_port;
+ const unsigned char *addr;
+ __be16 vlan_id;
+
+ rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
+ if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
+ !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
+ return -EIO;
+ port_number =
+ rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
+ addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
+ vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
+
+ if (port_number >= rocker->port_count)
+ return -EINVAL;
+
+ rocker_port = rocker->ports[port_number];
+ return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
+}
+
+static int rocker_event_process(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ const struct rocker_tlv *info;
+ u16 type;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
+ !attrs[ROCKER_TLV_EVENT_INFO])
+ return -EIO;
+
+ type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
+ info = attrs[ROCKER_TLV_EVENT_INFO];
+
+ switch (type) {
+ case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
+ return rocker_event_link_change(rocker, info);
+ case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
+ return rocker_event_mac_vlan_seen(rocker, info);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
+{
+ struct rocker *rocker = dev_id;
+ const struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ dev_err(&pdev->dev, "event desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_event_process(rocker, desc_info);
+ if (err)
+ dev_err(&pdev->dev, "event processing failed with err %d\n",
+ err);
+ }
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
+ credits++;
+ }
+ rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_tx);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
+{
+ struct rocker_port *rocker_port = dev_id;
+
+ napi_schedule(&rocker_port->napi_rx);
+ return IRQ_HANDLED;
+}
+
+/********************
+ * Command interface
+ ********************/
+
+int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait,
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ struct rocker_wait *wait;
+ unsigned long lock_flags;
+ int err;
+
+ spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
+
+ desc_info = rocker_desc_head_get(&rocker->cmd_ring);
+ if (!desc_info) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
+ return -EAGAIN;
+ }
+
+ wait = rocker_desc_cookie_ptr_get(desc_info);
+ rocker_wait_init(wait);
+ wait->nowait = nowait;
+
+ err = prepare(rocker_port, desc_info, prepare_priv);
+ if (err) {
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
+ return err;
+ }
+
+ rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
+
+ spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
+
+ if (nowait)
+ return 0;
+
+ if (!rocker_wait_event_timeout(wait, HZ / 10))
+ return -EIO;
+
+ err = rocker_desc_err(desc_info);
+ if (err)
+ return err;
+
+ if (process)
+ err = process(rocker_port, desc_info, process_priv);
+
+ rocker_desc_gen_clear(desc_info);
+ return err;
+}
+
+static int
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ u32 speed;
+ u8 duplex;
+ u8 autoneg;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
+ !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
+ return -EIO;
+
+ speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
+ duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
+ autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
+
+ ecmd->transceiver = XCVR_INTERNAL;
+ ecmd->supported = SUPPORTED_TP;
+ ecmd->phy_address = 0xff;
+ ecmd->port = PORT_TP;
+ ethtool_cmd_speed_set(ecmd, speed);
+ ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ unsigned char *macaddr = priv;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
+ if (!attr)
+ return -EIO;
+
+ if (rocker_tlv_len(attr) != ETH_ALEN)
+ return -EINVAL;
+
+ ether_addr_copy(macaddr, rocker_tlv_data(attr));
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ u8 *p_mode = priv;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
+ if (!attr)
+ return -EIO;
+
+ *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
+ return 0;
+}
+
+struct port_name {
+ char *buf;
+ size_t len;
+};
+
+static int
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ struct port_name *name = priv;
+ const struct rocker_tlv *attr;
+ size_t i, j, len;
+ const char *str;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+ attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
+ if (!attr)
+ return -EIO;
+
+ len = min_t(size_t, rocker_tlv_len(attr), name->len);
+ str = rocker_tlv_data(attr);
+
+ /* make sure name only contains alphanumeric characters */
+ for (i = j = 0; i < len; ++i) {
+ if (isalnum(str[i])) {
+ name->buf[j] = str[i];
+ j++;
+ }
+ }
+
+ if (j == 0)
+ return -EIO;
+
+ name->buf[j] = '\0';
+
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ethtool_cmd *ecmd = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
+ ethtool_cmd_speed(ecmd)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
+ ecmd->duplex))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
+ ecmd->autoneg))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const unsigned char *macaddr = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
+ ETH_ALEN, macaddr))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ int mtu = *(int *)priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+ mtu))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ bool learning = *(bool *)priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
+ learning))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+ return 0;
+}
+
+static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_ethtool_proc,
+ ecmd);
+}
+
+static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_macaddr_proc,
+ macaddr);
+}
+
+static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
+ u8 *p_mode)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_mode_proc, p_mode);
+}
+
+static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
+ struct ethtool_cmd *ecmd)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_set_port_settings_ethtool_prep,
+ ecmd, NULL, NULL);
+}
+
+static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
+ unsigned char *macaddr)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_set_port_settings_macaddr_prep,
+ macaddr, NULL, NULL);
+}
+
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+ int mtu)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_set_port_settings_mtu_prep,
+ &mtu, NULL, NULL);
+}
+
+int rocker_port_set_learning(struct rocker_port *rocker_port,
+ bool learning)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_set_port_learning_prep,
+ &learning, NULL, NULL);
+}
+
+/**********************
+ * Worlds manipulation
+ **********************/
+
+static struct rocker_world_ops *rocker_world_ops[] = {
+ &rocker_ofdpa_ops,
+};
+
+#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
+
+static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
+{
+ int i;
+
+ for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
+ if (rocker_world_ops[i]->mode == mode)
+ return rocker_world_ops[i];
+ return NULL;
+}
+
+static int rocker_world_init(struct rocker *rocker, u8 mode)
+{
+ struct rocker_world_ops *wops;
+ int err;
+
+ wops = rocker_world_ops_find(mode);
+ if (!wops) {
+ dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
+ mode);
+ return -EINVAL;
+ }
+ rocker->wops = wops;
+ rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
+ if (!rocker->wpriv)
+ return -ENOMEM;
+ if (!wops->init)
+ return 0;
+ err = wops->init(rocker);
+ if (err)
+ kfree(rocker->wpriv);
+ return err;
+}
+
+static void rocker_world_fini(struct rocker *rocker)
+{
+ struct rocker_world_ops *wops = rocker->wops;
+
+ if (!wops || !wops->fini)
+ return;
+ wops->fini(rocker);
+ kfree(rocker->wpriv);
+}
+
+static int rocker_world_check_init(struct rocker_port *rocker_port)
+{
+ struct rocker *rocker = rocker_port->rocker;
+ u8 mode;
+ int err;
+
+ err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
+ if (err) {
+ dev_err(&rocker->pdev->dev, "failed to get port mode\n");
+ return err;
+ }
+ if (rocker->wops) {
+ if (rocker->wops->mode != mode) {
+ dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
+ return err;
+ }
+ return 0;
+ }
+ return rocker_world_init(rocker, mode);
+}
+
+static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+ int err;
+
+ rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
+ if (!rocker_port->wpriv)
+ return -ENOMEM;
+ if (!wops->port_pre_init)
+ return 0;
+ err = wops->port_pre_init(rocker_port);
+ if (err)
+ kfree(rocker_port->wpriv);
+ return 0;
+}
+
+static int rocker_world_port_init(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_init)
+ return 0;
+ return wops->port_init(rocker_port);
+}
+
+static void rocker_world_port_fini(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_fini)
+ return;
+ wops->port_fini(rocker_port);
+}
+
+static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_post_fini)
+ return;
+ wops->port_post_fini(rocker_port);
+ kfree(rocker_port->wpriv);
+}
+
+static int rocker_world_port_open(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_open)
+ return 0;
+ return wops->port_open(rocker_port);
+}
+
+static void rocker_world_port_stop(struct rocker_port *rocker_port)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_stop)
+ return;
+ wops->port_stop(rocker_port);
+}
+
+static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
+ u8 state,
+ struct switchdev_trans *trans)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_stp_state_set)
+ return -EOPNOTSUPP;
+ return wops->port_attr_stp_state_set(rocker_port, state, trans);
+}
+
+static int
+rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
+ unsigned long brport_flags,
+ struct switchdev_trans *trans)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_bridge_flags_set)
+ return -EOPNOTSUPP;
+ return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
+ trans);
+}
+
+static int
+rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
+ unsigned long *p_brport_flags)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_bridge_flags_get)
+ return -EOPNOTSUPP;
+ return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags);
+}
+
+static int
+rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
+ u32 ageing_time,
+ struct switchdev_trans *trans)
+
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_attr_bridge_ageing_time_set)
+ return -EOPNOTSUPP;
+ return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
+ trans);
+}
+
+static int
+rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_vlan_add)
+ return -EOPNOTSUPP;
+ return wops->port_obj_vlan_add(rocker_port, vlan, trans);
+}
+
+static int
+rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_vlan_del)
+ return -EOPNOTSUPP;
+ return wops->port_obj_vlan_del(rocker_port, vlan);
+}
+
+static int
+rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_vlan_dump)
+ return -EOPNOTSUPP;
+ return wops->port_obj_vlan_dump(rocker_port, vlan, cb);
+}
+
+static int
+rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_fib4_add)
+ return -EOPNOTSUPP;
+ return wops->port_obj_fib4_add(rocker_port, fib4, trans);
+}
+
+static int
+rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_fib4_del)
+ return -EOPNOTSUPP;
+ return wops->port_obj_fib4_del(rocker_port, fib4);
+}
+
+static int
+rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_fdb_add)
+ return -EOPNOTSUPP;
+ return wops->port_obj_fdb_add(rocker_port, fdb, trans);
+}
+
+static int
+rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_fdb_del)
+ return -EOPNOTSUPP;
+ return wops->port_obj_fdb_del(rocker_port, fdb);
+}
+
+static int
+rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_obj_fdb_dump)
+ return -EOPNOTSUPP;
+ return wops->port_obj_fdb_dump(rocker_port, fdb, cb);
+}
+
+static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_master_linked)
+ return -EOPNOTSUPP;
+ return wops->port_master_linked(rocker_port, master);
+}
+
+static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_master_unlinked)
+ return -EOPNOTSUPP;
+ return wops->port_master_unlinked(rocker_port, master);
+}
+
+static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
+ struct neighbour *n)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_neigh_update)
+ return -EOPNOTSUPP;
+ return wops->port_neigh_update(rocker_port, n);
+}
+
+static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
+ struct neighbour *n)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_neigh_destroy)
+ return -EOPNOTSUPP;
+ return wops->port_neigh_destroy(rocker_port, n);
+}
+
+static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id)
+{
+ struct rocker_world_ops *wops = rocker_port->rocker->wops;
+
+ if (!wops->port_ev_mac_vlan_seen)
+ return -EOPNOTSUPP;
+ return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
+}
+
+/*****************
+ * Net device ops
+ *****************/
+
+static int rocker_port_open(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ err = rocker_port_dma_rings_init(rocker_port);
+ if (err)
+ return err;
+
+ err = request_irq(rocker_msix_tx_vector(rocker_port),
+ rocker_tx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign tx irq\n");
+ goto err_request_tx_irq;
+ }
+
+ err = request_irq(rocker_msix_rx_vector(rocker_port),
+ rocker_rx_irq_handler, 0,
+ rocker_driver_name, rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot assign rx irq\n");
+ goto err_request_rx_irq;
+ }
+
+ err = rocker_world_port_open(rocker_port);
+ if (err) {
+ netdev_err(rocker_port->dev, "cannot open port in world\n");
+ goto err_world_port_open;
+ }
+
+ napi_enable(&rocker_port->napi_tx);
+ napi_enable(&rocker_port->napi_rx);
+ if (!dev->proto_down)
+ rocker_port_set_enable(rocker_port, true);
+ netif_start_queue(dev);
+ return 0;
+
+err_world_port_open:
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+err_request_rx_irq:
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+err_request_tx_irq:
+ rocker_port_dma_rings_fini(rocker_port);
+ return err;
+}
+
+static int rocker_port_stop(struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ rocker_port_set_enable(rocker_port, false);
+ napi_disable(&rocker_port->napi_rx);
+ napi_disable(&rocker_port->napi_tx);
+ rocker_world_port_stop(rocker_port);
+ free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
+ free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
+ rocker_port_dma_rings_fini(rocker_port);
+
+ return 0;
+}
+
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info)
+{
+ const struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ struct rocker_tlv *attr;
+ int rem;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_TX_FRAGS])
+ return;
+ rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
+ const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ dma_addr_t dma_handle;
+ size_t len;
+
+ if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
+ continue;
+ rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
+ attr);
+ if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
+ !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
+ continue;
+ dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
+ len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
+ pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ }
+}
+
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ char *buf, size_t buf_len)
+{
+ const struct rocker *rocker = rocker_port->rocker;
+ struct pci_dev *pdev = rocker->pdev;
+ dma_addr_t dma_handle;
+ struct rocker_tlv *frag;
+
+ dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
+ return -EIO;
+ }
+ frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
+ if (!frag)
+ goto unmap_frag;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
+ dma_handle))
+ goto nest_cancel;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
+ buf_len))
+ goto nest_cancel;
+ rocker_tlv_nest_end(desc_info, frag);
+ return 0;
+
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frag);
+unmap_frag:
+ pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ return -EMSGSIZE;
+}
+
+static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ struct rocker_tlv *frags;
+ int i;
+ int err;
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (unlikely(!desc_info)) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx ring full when queue awake\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ rocker_desc_cookie_ptr_set(desc_info, skb);
+
+ frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
+ if (!frags)
+ goto out;
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb->data, skb_headlen(skb));
+ if (err)
+ goto nest_cancel;
+ if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+ err = skb_linearize(skb);
+ if (err)
+ goto unmap_frags;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
+ skb_frag_address(frag),
+ skb_frag_size(frag));
+ if (err)
+ goto unmap_frags;
+ }
+ rocker_tlv_nest_end(desc_info, frags);
+
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
+
+ desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
+ if (!desc_info)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+
+unmap_frags:
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+nest_cancel:
+ rocker_tlv_nest_cancel(desc_info, frags);
+out:
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static int rocker_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
+ if (err)
+ return err;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int running = netif_running(dev);
+ int err;
+
+#define ROCKER_PORT_MIN_MTU 68
+#define ROCKER_PORT_MAX_MTU 9000
+
+ if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+ return -EINVAL;
+
+ if (running)
+ rocker_port_stop(dev);
+
+ netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+ dev->mtu = new_mtu;
+
+ err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+ if (err)
+ return err;
+
+ if (running)
+ err = rocker_port_open(dev);
+
+ return err;
+}
+
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct port_name name = { .buf = buf, .len = len };
+ int err;
+
+ err = rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_get_port_settings_prep, NULL,
+ rocker_cmd_get_port_settings_phys_name_proc,
+ &name);
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+static int rocker_port_change_proto_down(struct net_device *dev,
+ bool proto_down)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_port->dev->flags & IFF_UP)
+ rocker_port_set_enable(rocker_port, !proto_down);
+ rocker_port->dev->proto_down = proto_down;
+ return 0;
+}
+
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+ struct rocker_port *rocker_port = netdev_priv(n->dev);
+ int err;
+
+ err = rocker_world_port_neigh_destroy(rocker_port, n);
+ if (err)
+ netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
+ err);
+}
+
+static const struct net_device_ops rocker_port_netdev_ops = {
+ .ndo_open = rocker_port_open,
+ .ndo_stop = rocker_port_stop,
+ .ndo_start_xmit = rocker_port_xmit,
+ .ndo_set_mac_address = rocker_port_set_mac_address,
+ .ndo_change_mtu = rocker_port_change_mtu,
+ .ndo_bridge_getlink = switchdev_port_bridge_getlink,
+ .ndo_bridge_setlink = switchdev_port_bridge_setlink,
+ .ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_fdb_add = switchdev_port_fdb_add,
+ .ndo_fdb_del = switchdev_port_fdb_del,
+ .ndo_fdb_dump = switchdev_port_fdb_dump,
+ .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
+ .ndo_change_proto_down = rocker_port_change_proto_down,
+ .ndo_neigh_destroy = rocker_port_neigh_destroy,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = sizeof(rocker->hw.id);
+ memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = rocker_world_port_attr_bridge_flags_get(rocker_port,
+ &attr->u.brport_flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int rocker_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = rocker_world_port_attr_stp_state_set(rocker_port,
+ attr->u.stp_state,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = rocker_world_port_attr_bridge_flags_set(rocker_port,
+ attr->u.brport_flags,
+ trans);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
+ attr->u.ageing_time,
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_world_port_obj_vlan_add(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = rocker_world_port_obj_fib4_add(rocker_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj),
+ trans);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_world_port_obj_fdb_add(rocker_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj),
+ trans);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_world_port_obj_vlan_del(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = rocker_world_port_obj_fib4_del(rocker_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_world_port_obj_fdb_del(rocker_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int rocker_port_obj_dump(struct net_device *dev,
+ struct switchdev_obj *obj,
+ switchdev_obj_dump_cb_t *cb)
+{
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ int err = 0;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_FDB:
+ err = rocker_world_port_obj_fdb_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_FDB(obj),
+ cb);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = rocker_world_port_obj_vlan_dump(rocker_port,
+ SWITCHDEV_OBJ_PORT_VLAN(obj),
+ cb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+ .switchdev_port_attr_get = rocker_port_attr_get,
+ .switchdev_port_attr_set = rocker_port_attr_set,
+ .switchdev_port_obj_add = rocker_port_obj_add,
+ .switchdev_port_obj_del = rocker_port_obj_del,
+ .switchdev_port_obj_dump = rocker_port_obj_dump,
+};
+
+/********************
+ * ethtool interface
+ ********************/
+
+static int rocker_port_get_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static int rocker_port_set_settings(struct net_device *dev,
+ struct ethtool_cmd *ecmd)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
+}
+
+static void rocker_port_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static struct rocker_port_stats {
+ char str[ETH_GSTRING_LEN];
+ int type;
+} rocker_port_stats[] = {
+ { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
+ { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
+ { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
+ { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
+
+ { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
+ { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
+ { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
+ { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
+};
+
+#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
+
+static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+ memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct rocker_tlv *cmd_stats;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+ ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
+ return -EMSGSIZE;
+
+ cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_stats)
+ return -EMSGSIZE;
+
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
+ rocker_port->pport))
+ return -EMSGSIZE;
+
+ rocker_tlv_nest_end(desc_info, cmd_stats);
+
+ return 0;
+}
+
+static int
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ const struct rocker_tlv *pattr;
+ u32 pport;
+ u64 *data = priv;
+ int i;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
+
+ if (!attrs[ROCKER_TLV_CMD_INFO])
+ return -EIO;
+
+ rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
+ attrs[ROCKER_TLV_CMD_INFO]);
+
+ if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
+ return -EIO;
+
+ pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
+ if (pport != rocker_port->pport)
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
+ pattr = stats_attrs[rocker_port_stats[i].type];
+ if (!pattr)
+ continue;
+
+ data[i] = rocker_tlv_get_u64(pattr);
+ }
+
+ return 0;
+}
+
+static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
+ void *priv)
+{
+ return rocker_cmd_exec(rocker_port, false,
+ rocker_cmd_get_port_stats_prep, NULL,
+ rocker_cmd_get_port_stats_ethtool_proc,
+ priv);
+}
+
+static void rocker_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+
+ if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
+ data[i] = 0;
+ }
+}
+
+static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ROCKER_PORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops rocker_port_ethtool_ops = {
+ .get_settings = rocker_port_get_settings,
+ .set_settings = rocker_port_set_settings,
+ .get_drvinfo = rocker_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = rocker_port_get_strings,
+ .get_ethtool_stats = rocker_port_get_stats,
+ .get_sset_count = rocker_port_get_sset_count,
+};
+
+/*****************
+ * NAPI interface
+ *****************/
+
+static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_tx);
+}
+
+static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Cleanup tx descriptors */
+ while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
+ struct sk_buff *skb;
+
+ err = rocker_desc_err(desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "tx desc received with err %d\n",
+ err);
+ rocker_tx_desc_frags_unmap(rocker_port, desc_info);
+
+ skb = rocker_desc_cookie_ptr_get(desc_info);
+ if (err == 0) {
+ rocker_port->dev->stats.tx_packets++;
+ rocker_port->dev->stats.tx_bytes += skb->len;
+ } else {
+ rocker_port->dev->stats.tx_errors++;
+ }
+
+ dev_kfree_skb_any(skb);
+ credits++;
+ }
+
+ if (credits && netif_queue_stopped(rocker_port->dev))
+ netif_wake_queue(rocker_port->dev);
+
+ napi_complete(napi);
+ rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
+
+ return 0;
+}
+
+static int rocker_port_rx_proc(const struct rocker *rocker,
+ const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info)
+{
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
+ size_t rx_len;
+ u16 rx_flags = 0;
+
+ if (!skb)
+ return -ENOENT;
+
+ rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
+ if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
+ return -EINVAL;
+ if (attrs[ROCKER_TLV_RX_FLAGS])
+ rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
+
+ rocker_dma_rx_ring_skb_unmap(rocker, attrs);
+
+ rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
+ skb_put(skb, rx_len);
+ skb->protocol = eth_type_trans(skb, rocker_port->dev);
+
+ if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+ skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
+ rocker_port->dev->stats.rx_packets++;
+ rocker_port->dev->stats.rx_bytes += skb->len;
+
+ netif_receive_skb(skb);
+
+ return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
+}
+
+static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
+{
+ return container_of(napi, struct rocker_port, napi_rx);
+}
+
+static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
+ const struct rocker *rocker = rocker_port->rocker;
+ struct rocker_desc_info *desc_info;
+ u32 credits = 0;
+ int err;
+
+ /* Process rx descriptors */
+ while (credits < budget &&
+ (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
+ err = rocker_desc_err(desc_info);
+ if (err) {
+ if (net_ratelimit())
+ netdev_err(rocker_port->dev, "rx desc received with err %d\n",
+ err);
+ } else {
+ err = rocker_port_rx_proc(rocker, rocker_port,
+ desc_info);
+ if (err && net_ratelimit())
+ netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
+ err);
+ }
+ if (err)
+ rocker_port->dev->stats.rx_errors++;
+
+ rocker_desc_gen_clear(desc_info);
+ rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
+ credits++;
+ }
+
+ if (credits < budget)
+ napi_complete(napi);
+
+ rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
+
+ return credits;
+}
+
+/*****************
+ * PCI driver ops
+ *****************/
+
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
+{
+ const struct rocker *rocker = rocker_port->rocker;
+ u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
+ bool link_up;
+
+ link_up = link_status & (1 << rocker_port->pport);
+ if (link_up)
+ netif_carrier_on(rocker_port->dev);
+ else
+ netif_carrier_off(rocker_port->dev);
+}
+
+static void rocker_remove_ports(struct rocker *rocker)
+{
+ struct rocker_port *rocker_port;
+ int i;
+
+ for (i = 0; i < rocker->port_count; i++) {
+ rocker_port = rocker->ports[i];
+ if (!rocker_port)
+ continue;
+ rocker_world_port_fini(rocker_port);
+ unregister_netdev(rocker_port->dev);
+ rocker_world_port_post_fini(rocker_port);
+ free_netdev(rocker_port->dev);
+ }
+ rocker_world_fini(rocker);
+ kfree(rocker->ports);
+}
+
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
+{
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct pci_dev *pdev = rocker->pdev;
+ int err;
+
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port,
+ rocker_port->dev->dev_addr);
+ if (err) {
+ dev_warn(&pdev->dev, "failed to get mac address, using random\n");
+ eth_hw_addr_random(rocker_port->dev);
+ }
+}
+
+static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
+{
+ const struct pci_dev *pdev = rocker->pdev;
+ struct rocker_port *rocker_port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct rocker_port));
+ if (!dev)
+ return -ENOMEM;
+ rocker_port = netdev_priv(dev);
+ rocker_port->dev = dev;
+ rocker_port->rocker = rocker;
+ rocker_port->port_number = port_number;
+ rocker_port->pport = port_number + 1;
+
+ err = rocker_world_check_init(rocker_port);
+ if (err) {
+ dev_err(&pdev->dev, "world init failed\n");
+ goto err_world_check_init;
+ }
+
+ rocker_port_dev_addr_init(rocker_port);
+ dev->netdev_ops = &rocker_port_netdev_ops;
+ dev->ethtool_ops = &rocker_port_ethtool_ops;
+ dev->switchdev_ops = &rocker_port_switchdev_ops;
+ netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
+ NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
+ NAPI_POLL_WEIGHT);
+ rocker_carrier_init(rocker_port);
+
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
+
+ err = rocker_world_port_pre_init(rocker_port);
+ if (err) {
+ dev_err(&pdev->dev, "port world pre-init failed\n");
+ goto err_world_port_pre_init;
+ }
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "register_netdev failed\n");
+ goto err_register_netdev;
+ }
+ rocker->ports[port_number] = rocker_port;
+
+ err = rocker_world_port_init(rocker_port);
+ if (err) {
+ dev_err(&pdev->dev, "port world init failed\n");
+ goto err_world_port_init;
+ }
+
+ return 0;
+
+err_world_port_init:
+ rocker->ports[port_number] = NULL;
+ unregister_netdev(dev);
+err_register_netdev:
+ rocker_world_port_post_fini(rocker_port);
+err_world_port_pre_init:
+err_world_check_init:
+ free_netdev(dev);
+ return err;
+}
+
+static int rocker_probe_ports(struct rocker *rocker)
+{
+ int i;
+ size_t alloc_size;
+ int err;
+
+ alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
+ rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
+ for (i = 0; i < rocker->port_count; i++) {
+ err = rocker_probe_port(rocker, i);
+ if (err)
+ goto remove_ports;
+ }
+ return 0;
+
+remove_ports:
+ rocker_remove_ports(rocker);
+ return err;
+}
+
+static int rocker_msix_init(struct rocker *rocker)
+{
+ struct pci_dev *pdev = rocker->pdev;
+ int msix_entries;
+ int i;
+ int err;
+
+ msix_entries = pci_msix_vec_count(pdev);
+ if (msix_entries < 0)
+ return msix_entries;
+
+ if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
+ return -EINVAL;
+
+ rocker->msix_entries = kmalloc_array(msix_entries,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!rocker->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < msix_entries; i++)
+ rocker->msix_entries[i].entry = i;
+
+ err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
+ if (err < 0)
+ goto err_enable_msix;
+
+ return 0;
+
+err_enable_msix:
+ kfree(rocker->msix_entries);
+ return err;
+}
+
+static void rocker_msix_fini(const struct rocker *rocker)
+{
+ pci_disable_msix(rocker->pdev);
+ kfree(rocker->msix_entries);
+}
+
+static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct rocker *rocker;
+ int err;
+
+ rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
+ if (!rocker)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
+
+ err = pci_request_regions(pdev, rocker_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ goto err_pci_set_dma_mask;
+ }
+ }
+
+ if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
+ dev_err(&pdev->dev, "invalid PCI region size\n");
+ err = -EINVAL;
+ goto err_pci_resource_len_check;
+ }
+
+ rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rocker->hw_addr) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto err_ioremap;
+ }
+ pci_set_master(pdev);
+
+ rocker->pdev = pdev;
+ pci_set_drvdata(pdev, rocker);
+
+ rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
+
+ err = rocker_msix_init(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "MSI-X init failed\n");
+ goto err_msix_init;
+ }
+
+ err = rocker_basic_hw_test(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "basic hw test failed\n");
+ goto err_basic_hw_test;
+ }
+
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+
+ err = rocker_dma_rings_init(rocker);
+ if (err)
+ goto err_dma_rings_init;
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
+ rocker_cmd_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign cmd irq\n");
+ goto err_request_cmd_irq;
+ }
+
+ err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
+ rocker_event_irq_handler, 0,
+ rocker_driver_name, rocker);
+ if (err) {
+ dev_err(&pdev->dev, "cannot assign event irq\n");
+ goto err_request_event_irq;
+ }
+
+ rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
+
+ err = rocker_probe_ports(rocker);
+ if (err) {
+ dev_err(&pdev->dev, "failed to probe ports\n");
+ goto err_probe_ports;
+ }
+
+ dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+ (int)sizeof(rocker->hw.id), &rocker->hw.id);
+
+ return 0;
+
+err_probe_ports:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+err_request_event_irq:
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+err_request_cmd_irq:
+ rocker_dma_rings_fini(rocker);
+err_dma_rings_init:
+err_basic_hw_test:
+ rocker_msix_fini(rocker);
+err_msix_init:
+ iounmap(rocker->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+ pci_disable_device(pdev);
+err_pci_enable_device:
+ kfree(rocker);
+ return err;
+}
+
+static void rocker_remove(struct pci_dev *pdev)
+{
+ struct rocker *rocker = pci_get_drvdata(pdev);
+
+ rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
+ rocker_remove_ports(rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
+ free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
+ rocker_dma_rings_fini(rocker);
+ rocker_msix_fini(rocker);
+ iounmap(rocker->hw_addr);
+ pci_release_regions(rocker->pdev);
+ pci_disable_device(rocker->pdev);
+ kfree(rocker);
+}
+
+static struct pci_driver rocker_pci_driver = {
+ .name = rocker_driver_name,
+ .id_table = rocker_pci_id_table,
+ .probe = rocker_probe,
+ .remove = rocker_remove,
+};
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static bool rocker_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &rocker_port_netdev_ops;
+}
+
+static int rocker_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ struct rocker_port *rocker_port;
+ int err;
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+ if (!info->master)
+ goto out;
+ rocker_port = netdev_priv(dev);
+ if (info->linking) {
+ err = rocker_world_port_master_linked(rocker_port,
+ info->upper_dev);
+ if (err)
+ netdev_warn(dev, "failed to reflect master linked (err %d)\n",
+ err);
+ } else {
+ err = rocker_world_port_master_unlinked(rocker_port,
+ info->upper_dev);
+ if (err)
+ netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
+ err);
+ }
+ }
+out:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netdevice_nb __read_mostly = {
+ .notifier_call = rocker_netdevice_event,
+};
+
+/************************************
+ * Net event notifier event handler
+ ************************************/
+
+static int rocker_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct rocker_port *rocker_port;
+ struct net_device *dev;
+ struct neighbour *n = ptr;
+ int err;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+ dev = n->dev;
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+ rocker_port = netdev_priv(dev);
+ err = rocker_world_port_neigh_update(rocker_port, n);
+ if (err)
+ netdev_warn(dev, "failed to handle neigh update (err %d)\n",
+ err);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rocker_netevent_nb __read_mostly = {
+ .notifier_call = rocker_netevent_event,
+};
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init rocker_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&rocker_netdevice_nb);
+ register_netevent_notifier(&rocker_netevent_nb);
+ err = pci_register_driver(&rocker_pci_driver);
+ if (err)
+ goto err_pci_register_driver;
+ return 0;
+
+err_pci_register_driver:
+ unregister_netevent_notifier(&rocker_netevent_nb);
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ return err;
+}
+
+static void __exit rocker_module_exit(void)
+{
+ unregister_netevent_notifier(&rocker_netevent_nb);
+ unregister_netdevice_notifier(&rocker_netdevice_nb);
+ pci_unregister_driver(&rocker_pci_driver);
+}
+
+module_init(rocker_module_init);
+module_exit(rocker_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
+MODULE_DESCRIPTION("Rocker switch device driver");
+MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
new file mode 100644
index 000000000000..0e758bcb26b0
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -0,0 +1,2958 @@
+/*
+ * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like
+ * implementation
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/hashtable.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <net/neighbour.h>
+#include <net/switchdev.h>
+#include <net/ip_fib.h>
+#include <net/arp.h>
+
+#include "rocker.h"
+#include "rocker_tlv.h"
+
+struct ofdpa_flow_tbl_key {
+ u32 priority;
+ enum rocker_of_dpa_table_id tbl_id;
+ union {
+ struct {
+ u32 in_pport;
+ u32 in_pport_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ } ig_port;
+ struct {
+ u32 in_pport;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool untagged;
+ __be16 new_vlan_id;
+ } vlan;
+ struct {
+ u32 in_pport;
+ u32 in_pport_mask;
+ __be16 eth_type;
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ bool copy_to_cpu;
+ } term_mac;
+ struct {
+ __be16 eth_type;
+ __be32 dst4;
+ __be32 dst4_mask;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ } ucast_routing;
+ struct {
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ int has_eth_dst;
+ int has_eth_dst_mask;
+ __be16 vlan_id;
+ u32 tunnel_id;
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 group_id;
+ bool copy_to_cpu;
+ } bridge;
+ struct {
+ u32 in_pport;
+ u32 in_pport_mask;
+ u8 eth_src[ETH_ALEN];
+ u8 eth_src_mask[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ u8 eth_dst_mask[ETH_ALEN];
+ __be16 eth_type;
+ __be16 vlan_id;
+ __be16 vlan_id_mask;
+ u8 ip_proto;
+ u8 ip_proto_mask;
+ u8 ip_tos;
+ u8 ip_tos_mask;
+ u32 group_id;
+ } acl;
+ };
+};
+
+struct ofdpa_flow_tbl_entry {
+ struct hlist_node entry;
+ u32 cmd;
+ u64 cookie;
+ struct ofdpa_flow_tbl_key key;
+ size_t key_len;
+ u32 key_crc32; /* key */
+};
+
+struct ofdpa_group_tbl_entry {
+ struct hlist_node entry;
+ u32 cmd;
+ u32 group_id; /* key */
+ u16 group_count;
+ u32 *group_ids;
+ union {
+ struct {
+ u8 pop_vlan;
+ } l2_interface;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ u32 group_id;
+ } l2_rewrite;
+ struct {
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dst[ETH_ALEN];
+ __be16 vlan_id;
+ bool ttl_check;
+ u32 group_id;
+ } l3_unicast;
+ };
+};
+
+struct ofdpa_fdb_tbl_entry {
+ struct hlist_node entry;
+ u32 key_crc32; /* key */
+ bool learned;
+ unsigned long touched;
+ struct ofdpa_fdb_tbl_key {
+ struct ofdpa_port *ofdpa_port;
+ u8 addr[ETH_ALEN];
+ __be16 vlan_id;
+ } key;
+};
+
+struct ofdpa_internal_vlan_tbl_entry {
+ struct hlist_node entry;
+ int ifindex; /* key */
+ u32 ref_count;
+ __be16 vlan_id;
+};
+
+struct ofdpa_neigh_tbl_entry {
+ struct hlist_node entry;
+ __be32 ip_addr; /* key */
+ struct net_device *dev;
+ u32 ref_count;
+ u32 index;
+ u8 eth_dst[ETH_ALEN];
+ bool ttl_check;
+};
+
+enum {
+ OFDPA_CTRL_LINK_LOCAL_MCAST,
+ OFDPA_CTRL_LOCAL_ARP,
+ OFDPA_CTRL_IPV4_MCAST,
+ OFDPA_CTRL_IPV6_MCAST,
+ OFDPA_CTRL_DFLT_BRIDGING,
+ OFDPA_CTRL_DFLT_OVS,
+ OFDPA_CTRL_MAX,
+};
+
+#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00
+#define OFDPA_N_INTERNAL_VLANS 255
+#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
+#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS)
+#define OFDPA_UNTAGGED_VID 0
+
+struct ofdpa {
+ struct rocker *rocker;
+ DECLARE_HASHTABLE(flow_tbl, 16);
+ spinlock_t flow_tbl_lock; /* for flow tbl accesses */
+ u64 flow_tbl_next_cookie;
+ DECLARE_HASHTABLE(group_tbl, 16);
+ spinlock_t group_tbl_lock; /* for group tbl accesses */
+ struct timer_list fdb_cleanup_timer;
+ DECLARE_HASHTABLE(fdb_tbl, 16);
+ spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
+ unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN];
+ DECLARE_HASHTABLE(internal_vlan_tbl, 8);
+ spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
+ DECLARE_HASHTABLE(neigh_tbl, 16);
+ spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
+ u32 neigh_tbl_next_index;
+ unsigned long ageing_time;
+};
+
+struct ofdpa_port {
+ struct ofdpa *ofdpa;
+ struct rocker_port *rocker_port;
+ struct net_device *dev;
+ u32 pport;
+ struct net_device *bridge_dev;
+ __be16 internal_vlan_id;
+ int stp_state;
+ u32 brport_flags;
+ unsigned long ageing_time;
+ bool ctrls[OFDPA_CTRL_MAX];
+ unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN];
+};
+
+static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
+static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+/* Rocker priority levels for flow table entries. Higher
+ * priority match takes precedence over lower priority match.
+ */
+
+enum {
+ OFDPA_PRIORITY_UNKNOWN = 0,
+ OFDPA_PRIORITY_IG_PORT = 1,
+ OFDPA_PRIORITY_VLAN = 1,
+ OFDPA_PRIORITY_TERM_MAC_UCAST = 0,
+ OFDPA_PRIORITY_TERM_MAC_MCAST = 1,
+ OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
+ OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
+ OFDPA_PRIORITY_BRIDGING_VLAN = 3,
+ OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
+ OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
+ OFDPA_PRIORITY_BRIDGING_TENANT = 3,
+ OFDPA_PRIORITY_ACL_CTRL = 3,
+ OFDPA_PRIORITY_ACL_NORMAL = 2,
+ OFDPA_PRIORITY_ACL_DFLT = 1,
+};
+
+static bool ofdpa_vlan_id_is_internal(__be16 vlan_id)
+{
+ u16 start = OFDPA_INTERNAL_VLAN_ID_BASE;
+ u16 end = 0xffe;
+ u16 _vlan_id = ntohs(vlan_id);
+
+ return (_vlan_id >= start && _vlan_id <= end);
+}
+
+static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port,
+ u16 vid, bool *pop_vlan)
+{
+ __be16 vlan_id;
+
+ if (pop_vlan)
+ *pop_vlan = false;
+ vlan_id = htons(vid);
+ if (!vlan_id) {
+ vlan_id = ofdpa_port->internal_vlan_id;
+ if (pop_vlan)
+ *pop_vlan = true;
+ }
+
+ return vlan_id;
+}
+
+static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port,
+ __be16 vlan_id)
+{
+ if (ofdpa_vlan_id_is_internal(vlan_id))
+ return 0;
+
+ return ntohs(vlan_id);
+}
+
+static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port,
+ const char *kind)
+{
+ return ofdpa_port->bridge_dev &&
+ !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind);
+}
+
+static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port)
+{
+ return ofdpa_port_is_slave(ofdpa_port, "bridge");
+}
+
+static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port)
+{
+ return ofdpa_port_is_slave(ofdpa_port, "openvswitch");
+}
+
+#define OFDPA_OP_FLAG_REMOVE BIT(0)
+#define OFDPA_OP_FLAG_NOWAIT BIT(1)
+#define OFDPA_OP_FLAG_LEARNED BIT(2)
+#define OFDPA_OP_FLAG_REFRESH BIT(3)
+
+static bool ofdpa_flags_nowait(int flags)
+{
+ return flags & OFDPA_OP_FLAG_NOWAIT;
+}
+
+static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags,
+ size_t size)
+{
+ struct switchdev_trans_item *elem = NULL;
+ gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ?
+ GFP_ATOMIC : GFP_KERNEL;
+
+ /* If in transaction prepare phase, allocate the memory
+ * and enqueue it on a transaction. If in transaction
+ * commit phase, dequeue the memory from the transaction
+ * rather than re-allocating the memory. The idea is the
+ * driver code paths for prepare and commit are identical
+ * so the memory allocated in the prepare phase is the
+ * memory used in the commit phase.
+ */
+
+ if (!trans) {
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ } else if (switchdev_trans_ph_prepare(trans)) {
+ elem = kzalloc(size + sizeof(*elem), gfp_flags);
+ if (!elem)
+ return NULL;
+ switchdev_trans_item_enqueue(trans, elem, kfree, elem);
+ } else {
+ elem = switchdev_trans_item_dequeue(trans);
+ }
+
+ return elem ? elem + 1 : NULL;
+}
+
+static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags,
+ size_t size)
+{
+ return __ofdpa_mem_alloc(trans, flags, size);
+}
+
+static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags,
+ size_t n, size_t size)
+{
+ return __ofdpa_mem_alloc(trans, flags, n * size);
+}
+
+static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem)
+{
+ struct switchdev_trans_item *elem;
+
+ /* Frees are ignored if in transaction prepare phase. The
+ * memory remains on the per-port list until freed in the
+ * commit phase.
+ */
+
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+
+ elem = (struct switchdev_trans_item *) mem - 1;
+ kfree(elem);
+}
+
+/*************************************************************
+ * Flow, group, FDB, internal VLAN and neigh command prepares
+ *************************************************************/
+
+static int
+ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.ig_port.in_pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.ig_port.in_pport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ig_port.goto_tbl))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.vlan.in_pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.vlan.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.vlan.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.vlan.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.vlan.untagged &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
+ entry->key.vlan.new_vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.term_mac.in_pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.term_mac.in_pport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.term_mac.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.term_mac.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.term_mac.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.term_mac.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.term_mac.vlan_id_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.term_mac.goto_tbl))
+ return -EMSGSIZE;
+ if (entry->key.term_mac.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.term_mac.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.ucast_routing.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
+ entry->key.ucast_routing.dst4))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
+ entry->key.ucast_routing.dst4_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.ucast_routing.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.ucast_routing.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (entry->key.bridge.has_eth_dst &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.bridge.eth_dst))
+ return -EMSGSIZE;
+ if (entry->key.bridge.has_eth_dst_mask &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.bridge.eth_dst_mask))
+ return -EMSGSIZE;
+ if (entry->key.bridge.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.bridge.vlan_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.tunnel_id &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
+ entry->key.bridge.tunnel_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
+ entry->key.bridge.goto_tbl))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.bridge.group_id))
+ return -EMSGSIZE;
+ if (entry->key.bridge.copy_to_cpu &&
+ rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
+ entry->key.bridge.copy_to_cpu))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ const struct ofdpa_flow_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
+ entry->key.acl.in_pport))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
+ entry->key.acl.in_pport_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->key.acl.eth_src))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_src_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->key.acl.eth_dst))
+ return -EMSGSIZE;
+ if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
+ ETH_ALEN, entry->key.acl.eth_dst_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
+ entry->key.acl.eth_type))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->key.acl.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
+ entry->key.acl.vlan_id_mask))
+ return -EMSGSIZE;
+
+ switch (ntohs(entry->key.acl.eth_type)) {
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
+ entry->key.acl.ip_proto))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
+ entry->key.acl.ip_proto_mask))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
+ entry->key.acl.ip_tos & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
+ entry->key.acl.ip_tos_mask & 0x3f))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
+ (entry->key.acl.ip_tos & 0xc0) >> 6))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info,
+ ROCKER_TLV_OF_DPA_IP_ECN_MASK,
+ (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
+ return -EMSGSIZE;
+ break;
+ }
+
+ if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
+ rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->key.acl.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct ofdpa_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
+ entry->key.tbl_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
+ entry->key.priority))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+
+ switch (entry->key.tbl_id) {
+ case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
+ err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_VLAN:
+ err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
+ err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
+ err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
+ err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
+ err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct ofdpa_flow_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
+ entry->cookie))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
+ struct ofdpa_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
+ ROCKER_GROUP_PORT_GET(entry->group_id)))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
+ entry->l2_interface.pop_vlan))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
+ const struct ofdpa_group_tbl_entry *entry)
+{
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l2_rewrite.group_id))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l2_rewrite.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l2_rewrite.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l2_rewrite.vlan_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
+ const struct ofdpa_group_tbl_entry *entry)
+{
+ int i;
+ struct rocker_tlv *group_ids;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
+ entry->group_count))
+ return -EMSGSIZE;
+
+ group_ids = rocker_tlv_nest_start(desc_info,
+ ROCKER_TLV_OF_DPA_GROUP_IDS);
+ if (!group_ids)
+ return -EMSGSIZE;
+
+ for (i = 0; i < entry->group_count; i++)
+ /* Note TLV array is 1-based */
+ if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
+ return -EMSGSIZE;
+
+ rocker_tlv_nest_end(desc_info, group_ids);
+
+ return 0;
+}
+
+static int
+ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
+ const struct ofdpa_group_tbl_entry *entry)
+{
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_src))
+ return -EMSGSIZE;
+ if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
+ rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
+ ETH_ALEN, entry->l3_unicast.eth_dst))
+ return -EMSGSIZE;
+ if (entry->l3_unicast.vlan_id &&
+ rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
+ entry->l3_unicast.vlan_id))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
+ entry->l3_unicast.ttl_check))
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
+ entry->l3_unicast.group_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ struct ofdpa_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+ int err = 0;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
+ err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
+ err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry);
+ break;
+ case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
+ err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry);
+ break;
+ default:
+ err = -ENOTSUPP;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv)
+{
+ const struct ofdpa_group_tbl_entry *entry = priv;
+ struct rocker_tlv *cmd_info;
+
+ if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
+ return -EMSGSIZE;
+ cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+ if (!cmd_info)
+ return -EMSGSIZE;
+ if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
+ entry->group_id))
+ return -EMSGSIZE;
+ rocker_tlv_nest_end(desc_info, cmd_info);
+
+ return 0;
+}
+
+/***************************************************
+ * Flow, group, FDB, internal VLAN and neigh tables
+ ***************************************************/
+
+static struct ofdpa_flow_tbl_entry *
+ofdpa_flow_tbl_find(const struct ofdpa *ofdpa,
+ const struct ofdpa_flow_tbl_entry *match)
+{
+ struct ofdpa_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
+
+ hash_for_each_possible(ofdpa->flow_tbl, found,
+ entry, match->key_crc32) {
+ if (memcmp(&found->key, &match->key, key_len) == 0)
+ return found;
+ }
+
+ return NULL;
+}
+
+static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_flow_tbl_entry *match)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
+ unsigned long lock_flags;
+
+ match->key_crc32 = crc32(~0, &match->key, key_len);
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
+
+ found = ofdpa_flow_tbl_find(ofdpa, match);
+
+ if (found) {
+ match->cookie = found->cookie;
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ ofdpa_kfree(trans, found);
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
+ } else {
+ found = match;
+ found->cookie = ofdpa->flow_tbl_next_cookie++;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
+ }
+
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32);
+
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_add,
+ found, NULL, NULL);
+ return 0;
+}
+
+static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_flow_tbl_entry *match)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_flow_tbl_entry *found;
+ size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
+ unsigned long lock_flags;
+ int err = 0;
+
+ match->key_crc32 = crc32(~0, &match->key, key_len);
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags);
+
+ found = ofdpa_flow_tbl_find(ofdpa, match);
+
+ if (found) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
+ }
+
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags);
+
+ ofdpa_kfree(trans, match);
+
+ if (found) {
+ if (!switchdev_trans_ph_prepare(trans))
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_flow_tbl_del,
+ found, NULL, NULL);
+ ofdpa_kfree(trans, found);
+ }
+
+ return err;
+}
+
+static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_flow_tbl_entry *entry)
+{
+ if (flags & OFDPA_OP_FLAG_REMOVE)
+ return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry);
+ else
+ return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
+ enum rocker_of_dpa_table_id goto_tbl)
+{
+ struct ofdpa_flow_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = OFDPA_PRIORITY_IG_PORT;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+ entry->key.ig_port.in_pport = in_pport;
+ entry->key.ig_port.in_pport_mask = in_pport_mask;
+ entry->key.ig_port.goto_tbl = goto_tbl;
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ u32 in_pport, __be16 vlan_id,
+ __be16 vlan_id_mask,
+ enum rocker_of_dpa_table_id goto_tbl,
+ bool untagged, __be16 new_vlan_id)
+{
+ struct ofdpa_flow_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.priority = OFDPA_PRIORITY_VLAN;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
+ entry->key.vlan.in_pport = in_pport;
+ entry->key.vlan.vlan_id = vlan_id;
+ entry->key.vlan.vlan_id_mask = vlan_id_mask;
+ entry->key.vlan.goto_tbl = goto_tbl;
+
+ entry->key.vlan.untagged = untagged;
+ entry->key.vlan.new_vlan_id = new_vlan_id;
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ u32 in_pport, u32 in_pport_mask,
+ __be16 eth_type, const u8 *eth_dst,
+ const u8 *eth_dst_mask, __be16 vlan_id,
+ __be16 vlan_id_mask, bool copy_to_cpu,
+ int flags)
+{
+ struct ofdpa_flow_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ if (is_multicast_ether_addr(eth_dst)) {
+ entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+ } else {
+ entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST;
+ entry->key.term_mac.goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ }
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ entry->key.term_mac.in_pport = in_pport;
+ entry->key.term_mac.in_pport_mask = in_pport_mask;
+ entry->key.term_mac.eth_type = eth_type;
+ ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
+ ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
+ entry->key.term_mac.vlan_id = vlan_id;
+ entry->key.term_mac.vlan_id_mask = vlan_id_mask;
+ entry->key.term_mac.copy_to_cpu = copy_to_cpu;
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 vlan_id, u32 tunnel_id,
+ enum rocker_of_dpa_table_id goto_tbl,
+ u32 group_id, bool copy_to_cpu)
+{
+ struct ofdpa_flow_tbl_entry *entry;
+ u32 priority;
+ bool vlan_bridging = !!vlan_id;
+ bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool wild = false;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+
+ if (eth_dst) {
+ entry->key.bridge.has_eth_dst = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
+ }
+ if (eth_dst_mask) {
+ entry->key.bridge.has_eth_dst_mask = 1;
+ ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
+ if (!ether_addr_equal(eth_dst_mask, ff_mac))
+ wild = true;
+ }
+
+ priority = OFDPA_PRIORITY_UNKNOWN;
+ if (vlan_bridging && dflt && wild)
+ priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
+ else if (vlan_bridging && dflt && !wild)
+ priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
+ else if (vlan_bridging && !dflt)
+ priority = OFDPA_PRIORITY_BRIDGING_VLAN;
+ else if (!vlan_bridging && dflt && wild)
+ priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
+ else if (!vlan_bridging && dflt && !wild)
+ priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
+ else if (!vlan_bridging && !dflt)
+ priority = OFDPA_PRIORITY_BRIDGING_TENANT;
+
+ entry->key.priority = priority;
+ entry->key.bridge.vlan_id = vlan_id;
+ entry->key.bridge.tunnel_id = tunnel_id;
+ entry->key.bridge.goto_tbl = goto_tbl;
+ entry->key.bridge.group_id = group_id;
+ entry->key.bridge.copy_to_cpu = copy_to_cpu;
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ __be16 eth_type, __be32 dst,
+ __be32 dst_mask, u32 priority,
+ enum rocker_of_dpa_table_id goto_tbl,
+ u32 group_id, int flags)
+{
+ struct ofdpa_flow_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+ entry->key.priority = priority;
+ entry->key.ucast_routing.eth_type = eth_type;
+ entry->key.ucast_routing.dst4 = dst;
+ entry->key.ucast_routing.dst4_mask = dst_mask;
+ entry->key.ucast_routing.goto_tbl = goto_tbl;
+ entry->key.ucast_routing.group_id = group_id;
+ entry->key_len = offsetof(struct ofdpa_flow_tbl_key,
+ ucast_routing.group_id);
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ u32 in_pport, u32 in_pport_mask,
+ const u8 *eth_src, const u8 *eth_src_mask,
+ const u8 *eth_dst, const u8 *eth_dst_mask,
+ __be16 eth_type, __be16 vlan_id,
+ __be16 vlan_id_mask, u8 ip_proto,
+ u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
+ u32 group_id)
+{
+ u32 priority;
+ struct ofdpa_flow_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ priority = OFDPA_PRIORITY_ACL_NORMAL;
+ if (eth_dst && eth_dst_mask) {
+ if (ether_addr_equal(eth_dst_mask, mcast_mac))
+ priority = OFDPA_PRIORITY_ACL_DFLT;
+ else if (is_link_local_ether_addr(eth_dst))
+ priority = OFDPA_PRIORITY_ACL_CTRL;
+ }
+
+ entry->key.priority = priority;
+ entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ entry->key.acl.in_pport = in_pport;
+ entry->key.acl.in_pport_mask = in_pport_mask;
+
+ if (eth_src)
+ ether_addr_copy(entry->key.acl.eth_src, eth_src);
+ if (eth_src_mask)
+ ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
+ if (eth_dst)
+ ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
+ if (eth_dst_mask)
+ ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
+
+ entry->key.acl.eth_type = eth_type;
+ entry->key.acl.vlan_id = vlan_id;
+ entry->key.acl.vlan_id_mask = vlan_id_mask;
+ entry->key.acl.ip_proto = ip_proto;
+ entry->key.acl.ip_proto_mask = ip_proto_mask;
+ entry->key.acl.ip_tos = ip_tos;
+ entry->key.acl.ip_tos_mask = ip_tos_mask;
+ entry->key.acl.group_id = group_id;
+
+ return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static struct ofdpa_group_tbl_entry *
+ofdpa_group_tbl_find(const struct ofdpa *ofdpa,
+ const struct ofdpa_group_tbl_entry *match)
+{
+ struct ofdpa_group_tbl_entry *found;
+
+ hash_for_each_possible(ofdpa->group_tbl, found,
+ entry, match->group_id) {
+ if (found->group_id == match->group_id)
+ return found;
+ }
+
+ return NULL;
+}
+
+static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans,
+ struct ofdpa_group_tbl_entry *entry)
+{
+ switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
+ case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
+ ofdpa_kfree(trans, entry->group_ids);
+ break;
+ default:
+ break;
+ }
+ ofdpa_kfree(trans, entry);
+}
+
+static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_group_tbl_entry *match)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_group_tbl_entry *found;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
+
+ found = ofdpa_group_tbl_find(ofdpa, match);
+
+ if (found) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ ofdpa_group_tbl_entry_free(trans, found);
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
+ } else {
+ found = match;
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
+ }
+
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_add(ofdpa->group_tbl, &found->entry, found->group_id);
+
+ spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_add,
+ found, NULL, NULL);
+ return 0;
+}
+
+static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_group_tbl_entry *match)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_group_tbl_entry *found;
+ unsigned long lock_flags;
+ int err = 0;
+
+ spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags);
+
+ found = ofdpa_group_tbl_find(ofdpa, match);
+
+ if (found) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
+ }
+
+ spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags);
+
+ ofdpa_group_tbl_entry_free(trans, match);
+
+ if (found) {
+ if (!switchdev_trans_ph_prepare(trans))
+ err = rocker_cmd_exec(ofdpa_port->rocker_port,
+ ofdpa_flags_nowait(flags),
+ ofdpa_cmd_group_tbl_del,
+ found, NULL, NULL);
+ ofdpa_group_tbl_entry_free(trans, found);
+ }
+
+ return err;
+}
+
+static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ struct ofdpa_group_tbl_entry *entry)
+{
+ if (flags & OFDPA_OP_FLAG_REMOVE)
+ return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry);
+ else
+ return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be16 vlan_id, u32 out_pport,
+ int pop_vlan)
+{
+ struct ofdpa_group_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
+ entry->l2_interface.pop_vlan = pop_vlan;
+
+ return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ int flags, u8 group_count,
+ const u32 *group_ids, u32 group_id)
+{
+ struct ofdpa_group_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = group_id;
+ entry->group_count = group_count;
+
+ entry->group_ids = ofdpa_kcalloc(trans, flags,
+ group_count, sizeof(u32));
+ if (!entry->group_ids) {
+ ofdpa_kfree(trans, entry);
+ return -ENOMEM;
+ }
+ memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
+
+ return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be16 vlan_id, u8 group_count,
+ const u32 *group_ids, u32 group_id)
+{
+ return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags,
+ group_count, group_ids,
+ group_id);
+}
+
+static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ u32 index, const u8 *src_mac, const u8 *dst_mac,
+ __be16 vlan_id, bool ttl_check, u32 pport)
+{
+ struct ofdpa_group_tbl_entry *entry;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
+ if (src_mac)
+ ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
+ if (dst_mac)
+ ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
+ entry->l3_unicast.vlan_id = vlan_id;
+ entry->l3_unicast.ttl_check = ttl_check;
+ entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
+
+ return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry);
+}
+
+static struct ofdpa_neigh_tbl_entry *
+ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr)
+{
+ struct ofdpa_neigh_tbl_entry *found;
+
+ hash_for_each_possible(ofdpa->neigh_tbl, found,
+ entry, be32_to_cpu(ip_addr))
+ if (found->ip_addr == ip_addr)
+ return found;
+
+ return NULL;
+}
+
+static void ofdpa_neigh_add(struct ofdpa *ofdpa,
+ struct switchdev_trans *trans,
+ struct ofdpa_neigh_tbl_entry *entry)
+{
+ if (!switchdev_trans_ph_commit(trans))
+ entry->index = ofdpa->neigh_tbl_next_index++;
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+ entry->ref_count++;
+ hash_add(ofdpa->neigh_tbl, &entry->entry,
+ be32_to_cpu(entry->ip_addr));
+}
+
+static void ofdpa_neigh_del(struct switchdev_trans *trans,
+ struct ofdpa_neigh_tbl_entry *entry)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return;
+ if (--entry->ref_count == 0) {
+ hash_del(&entry->entry);
+ ofdpa_kfree(trans, entry);
+ }
+}
+
+static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry,
+ struct switchdev_trans *trans,
+ const u8 *eth_dst, bool ttl_check)
+{
+ if (eth_dst) {
+ ether_addr_copy(entry->eth_dst, eth_dst);
+ entry->ttl_check = ttl_check;
+ } else if (!switchdev_trans_ph_prepare(trans)) {
+ entry->ref_count++;
+ }
+}
+
+static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ int flags, __be32 ip_addr, const u8 *eth_dst)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_neigh_tbl_entry *entry;
+ struct ofdpa_neigh_tbl_entry *found;
+ unsigned long lock_flags;
+ __be16 eth_type = htons(ETH_P_IP);
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id;
+ u32 priority = 0;
+ bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
+ bool updating;
+ bool removing;
+ int err = 0;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
+
+ found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
+
+ updating = found && adding;
+ removing = found && !adding;
+ adding = !found && adding;
+
+ if (adding) {
+ entry->ip_addr = ip_addr;
+ entry->dev = ofdpa_port->dev;
+ ether_addr_copy(entry->eth_dst, eth_dst);
+ entry->ttl_check = true;
+ ofdpa_neigh_add(ofdpa, trans, entry);
+ } else if (removing) {
+ memcpy(entry, found, sizeof(*entry));
+ ofdpa_neigh_del(trans, found);
+ } else if (updating) {
+ ofdpa_neigh_update(found, trans, eth_dst, true);
+ memcpy(entry, found, sizeof(*entry));
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
+
+ if (err)
+ goto err_out;
+
+ /* For each active neighbor, we have an L3 unicast group and
+ * a /32 route to the neighbor, which uses the L3 unicast
+ * group. The L3 unicast group can also be referred to by
+ * other routes' nexthops.
+ */
+
+ err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags,
+ entry->index,
+ ofdpa_port->dev->dev_addr,
+ entry->eth_dst,
+ ofdpa_port->internal_vlan_id,
+ entry->ttl_check,
+ ofdpa_port->pport);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n",
+ err, entry->index);
+ goto err_out;
+ }
+
+ if (adding || removing) {
+ group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans,
+ eth_type, ip_addr,
+ inet_make_mask(32),
+ priority, goto_tbl,
+ group_id, flags);
+
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
+ err, &entry->ip_addr, group_id);
+ }
+
+err_out:
+ if (!adding)
+ ofdpa_kfree(trans, entry);
+
+ return err;
+}
+
+static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ __be32 ip_addr)
+{
+ struct net_device *dev = ofdpa_port->dev;
+ struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
+ int err = 0;
+
+ if (!n) {
+ n = neigh_create(&arp_tbl, &ip_addr, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ }
+
+ /* If the neigh is already resolved, then go ahead and
+ * install the entry, otherwise start the ARP process to
+ * resolve the neigh.
+ */
+
+ if (n->nud_state & NUD_VALID)
+ err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0,
+ ip_addr, n->ha);
+ else
+ neigh_event_send(n, NULL);
+
+ neigh_release(n);
+ return err;
+}
+
+static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be32 ip_addr, u32 *index)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_neigh_tbl_entry *entry;
+ struct ofdpa_neigh_tbl_entry *found;
+ unsigned long lock_flags;
+ bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
+ bool updating;
+ bool removing;
+ bool resolved = true;
+ int err = 0;
+
+ entry = ofdpa_kzalloc(trans, flags, sizeof(*entry));
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
+
+ found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
+ if (found)
+ *index = found->index;
+
+ updating = found && adding;
+ removing = found && !adding;
+ adding = !found && adding;
+
+ if (adding) {
+ entry->ip_addr = ip_addr;
+ entry->dev = ofdpa_port->dev;
+ ofdpa_neigh_add(ofdpa, trans, entry);
+ *index = entry->index;
+ resolved = false;
+ } else if (removing) {
+ ofdpa_neigh_del(trans, found);
+ } else if (updating) {
+ ofdpa_neigh_update(found, trans, NULL, false);
+ resolved = !is_zero_ether_addr(found->eth_dst);
+ } else {
+ err = -ENOENT;
+ }
+
+ spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags);
+
+ if (!adding)
+ ofdpa_kfree(trans, entry);
+
+ if (err)
+ return err;
+
+ /* Resolved means neigh ip_addr is resolved to neigh mac. */
+
+ if (!resolved)
+ err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr);
+
+ return err;
+}
+
+static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa,
+ int port_index)
+{
+ struct rocker_port *rocker_port;
+
+ rocker_port = ofdpa->rocker->ports[port_index];
+ return rocker_port ? rocker_port->wpriv : NULL;
+}
+
+static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ int flags, __be16 vlan_id)
+{
+ struct ofdpa_port *p;
+ const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ unsigned int port_count = ofdpa->rocker->port_count;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 *group_ids;
+ u8 group_count = 0;
+ int err = 0;
+ int i;
+
+ group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32));
+ if (!group_ids)
+ return -ENOMEM;
+
+ /* Adjust the flood group for this VLAN. The flood group
+ * references an L2 interface group for each port in this
+ * VLAN.
+ */
+
+ for (i = 0; i < port_count; i++) {
+ p = ofdpa_port_get(ofdpa, i);
+ if (!p)
+ continue;
+ if (!ofdpa_port_is_bridged(p))
+ continue;
+ if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
+ group_ids[group_count++] =
+ ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
+ }
+ }
+
+ /* If there are no bridged ports in this VLAN, we're done */
+ if (group_count == 0)
+ goto no_ports_in_vlan;
+
+ err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id,
+ group_count, group_ids, group_id);
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
+
+no_ports_in_vlan:
+ ofdpa_kfree(trans, group_ids);
+ return err;
+}
+
+static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be16 vlan_id, bool pop_vlan)
+{
+ const struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ unsigned int port_count = ofdpa->rocker->port_count;
+ struct ofdpa_port *p;
+ bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
+ u32 out_pport;
+ int ref = 0;
+ int err;
+ int i;
+
+ /* An L2 interface group for this port in this VLAN, but
+ * only when port STP state is LEARNING|FORWARDING.
+ */
+
+ if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
+ ofdpa_port->stp_state == BR_STATE_FORWARDING) {
+ out_pport = ofdpa_port->pport;
+ err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
+ err, out_pport);
+ return err;
+ }
+ }
+
+ /* An L2 interface group for this VLAN to CPU port.
+ * Add when first port joins this VLAN and destroy when
+ * last port leaves this VLAN.
+ */
+
+ for (i = 0; i < port_count; i++) {
+ p = ofdpa_port_get(ofdpa, i);
+ if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ ref++;
+ }
+
+ if ((!adding || ref != 1) && (adding || ref != 0))
+ return 0;
+
+ out_pport = 0;
+ err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static struct ofdpa_ctrl {
+ const u8 *eth_dst;
+ const u8 *eth_dst_mask;
+ __be16 eth_type;
+ bool acl;
+ bool bridge;
+ bool term;
+ bool copy_to_cpu;
+} ofdpa_ctrls[] = {
+ [OFDPA_CTRL_LINK_LOCAL_MCAST] = {
+ /* pass link local multicast pkts up to CPU for filtering */
+ .eth_dst = ll_mac,
+ .eth_dst_mask = ll_mask,
+ .acl = true,
+ },
+ [OFDPA_CTRL_LOCAL_ARP] = {
+ /* pass local ARP pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .eth_type = htons(ETH_P_ARP),
+ .acl = true,
+ },
+ [OFDPA_CTRL_IPV4_MCAST] = {
+ /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
+ .eth_dst = ipv4_mcast,
+ .eth_dst_mask = ipv4_mask,
+ .eth_type = htons(ETH_P_IP),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [OFDPA_CTRL_IPV6_MCAST] = {
+ /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
+ .eth_dst = ipv6_mcast,
+ .eth_dst_mask = ipv6_mask,
+ .eth_type = htons(ETH_P_IPV6),
+ .term = true,
+ .copy_to_cpu = true,
+ },
+ [OFDPA_CTRL_DFLT_BRIDGING] = {
+ /* flood any pkts on vlan */
+ .bridge = true,
+ .copy_to_cpu = true,
+ },
+ [OFDPA_CTRL_DFLT_OVS] = {
+ /* pass all pkts up to CPU */
+ .eth_dst = zero_mac,
+ .eth_dst_mask = zero_mac,
+ .acl = true,
+ },
+};
+
+static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
+{
+ u32 in_pport = ofdpa_port->pport;
+ u32 in_pport_mask = 0xffffffff;
+ u32 out_pport = 0;
+ const u8 *eth_src = NULL;
+ const u8 *eth_src_mask = NULL;
+ __be16 vlan_id_mask = htons(0xffff);
+ u8 ip_proto = 0;
+ u8 ip_proto_mask = 0;
+ u8 ip_tos = 0;
+ u8 ip_tos_mask = 0;
+ u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
+ int err;
+
+ err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags,
+ in_pport, in_pport_mask,
+ eth_src, eth_src_mask,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ ctrl->eth_type,
+ vlan_id, vlan_id_mask,
+ ip_proto, ip_proto_mask,
+ ip_tos, ip_tos_mask,
+ group_id);
+
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err);
+
+ return err;
+}
+
+static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ int flags,
+ const struct ofdpa_ctrl *ctrl,
+ __be16 vlan_id)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
+ u32 tunnel_id = 0;
+ int err;
+
+ if (!ofdpa_port_is_bridged(ofdpa_port))
+ return 0;
+
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags,
+ ctrl->eth_dst, ctrl->eth_dst_mask,
+ vlan_id, tunnel_id,
+ goto_tbl, group_id, ctrl->copy_to_cpu);
+
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err);
+
+ return err;
+}
+
+static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
+{
+ u32 in_pport_mask = 0xffffffff;
+ __be16 vlan_id_mask = htons(0xffff);
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = ofdpa_port->internal_vlan_id;
+
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
+ ofdpa_port->pport, in_pport_mask,
+ ctrl->eth_type, ctrl->eth_dst,
+ ctrl->eth_dst_mask, vlan_id,
+ vlan_id_mask, ctrl->copy_to_cpu,
+ flags);
+
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err);
+
+ return err;
+}
+
+static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const struct ofdpa_ctrl *ctrl, __be16 vlan_id)
+{
+ if (ctrl->acl)
+ return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags,
+ ctrl, vlan_id);
+ if (ctrl->bridge)
+ return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags,
+ ctrl, vlan_id);
+
+ if (ctrl->term)
+ return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags,
+ ctrl, vlan_id);
+
+ return -EOPNOTSUPP;
+}
+
+static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be16 vlan_id)
+{
+ int err = 0;
+ int i;
+
+ for (i = 0; i < OFDPA_CTRL_MAX; i++) {
+ if (ofdpa_port->ctrls[i]) {
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ &ofdpa_ctrls[i], vlan_id);
+ if (err)
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const struct ofdpa_ctrl *ctrl)
+{
+ u16 vid;
+ int err = 0;
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, ofdpa_port->vlan_bitmap))
+ continue;
+ err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags,
+ ctrl, htons(vid));
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags, u16 vid)
+{
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+ u32 in_pport = ofdpa_port->pport;
+ __be16 vlan_id = htons(vid);
+ __be16 vlan_id_mask = htons(0xffff);
+ __be16 internal_vlan_id;
+ bool untagged;
+ bool adding = !(flags & OFDPA_OP_FLAG_REMOVE);
+ int err;
+
+ internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged);
+
+ if (adding &&
+ test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
+ return 0; /* already added */
+ else if (!adding &&
+ !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap))
+ return 0; /* already removed */
+
+ change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
+
+ if (adding) {
+ err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err);
+ goto err_out;
+ }
+ }
+
+ err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags,
+ internal_vlan_id, untagged);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err);
+ goto err_out;
+ }
+
+ err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags,
+ internal_vlan_id);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err);
+ goto err_out;
+ }
+
+ err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags,
+ in_pport, vlan_id, vlan_id_mask,
+ goto_tbl, untagged, internal_vlan_id);
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err);
+
+err_out:
+ if (switchdev_trans_ph_prepare(trans))
+ change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap);
+
+ return err;
+}
+
+static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags)
+{
+ enum rocker_of_dpa_table_id goto_tbl;
+ u32 in_pport;
+ u32 in_pport_mask;
+ int err;
+
+ /* Normal Ethernet Frames. Matches pkts from any local physical
+ * ports. Goto VLAN tbl.
+ */
+
+ in_pport = 0;
+ in_pport_mask = 0xffff0000;
+ goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
+
+ err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags,
+ in_pport, in_pport_mask,
+ goto_tbl);
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err);
+
+ return err;
+}
+
+struct ofdpa_fdb_learn_work {
+ struct work_struct work;
+ struct ofdpa_port *ofdpa_port;
+ struct switchdev_trans *trans;
+ int flags;
+ u8 addr[ETH_ALEN];
+ u16 vid;
+};
+
+static void ofdpa_port_fdb_learn_work(struct work_struct *work)
+{
+ const struct ofdpa_fdb_learn_work *lw =
+ container_of(work, struct ofdpa_fdb_learn_work, work);
+ bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE);
+ bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED);
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = lw->addr;
+ info.vid = lw->vid;
+
+ rtnl_lock();
+ if (learned && removing)
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+ lw->ofdpa_port->dev, &info.info);
+ else if (learned && !removing)
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+ lw->ofdpa_port->dev, &info.info);
+ rtnl_unlock();
+
+ ofdpa_kfree(lw->trans, work);
+}
+
+static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ const u8 *addr, __be16 vlan_id)
+{
+ struct ofdpa_fdb_learn_work *lw;
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 out_pport = ofdpa_port->pport;
+ u32 tunnel_id = 0;
+ u32 group_id = ROCKER_GROUP_NONE;
+ bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (ofdpa_port_is_bridged(ofdpa_port))
+ group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
+
+ if (!(flags & OFDPA_OP_FLAG_REFRESH)) {
+ err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr,
+ NULL, vlan_id, tunnel_id, goto_tbl,
+ group_id, copy_to_cpu);
+ if (err)
+ return err;
+ }
+
+ if (!syncing)
+ return 0;
+
+ if (!ofdpa_port_is_bridged(ofdpa_port))
+ return 0;
+
+ lw = ofdpa_kzalloc(trans, flags, sizeof(*lw));
+ if (!lw)
+ return -ENOMEM;
+
+ INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work);
+
+ lw->ofdpa_port = ofdpa_port;
+ lw->trans = trans;
+ lw->flags = flags;
+ ether_addr_copy(lw->addr, addr);
+ lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id);
+
+ if (switchdev_trans_ph_prepare(trans))
+ ofdpa_kfree(trans, lw);
+ else
+ schedule_work(&lw->work);
+
+ return 0;
+}
+
+static struct ofdpa_fdb_tbl_entry *
+ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa,
+ const struct ofdpa_fdb_tbl_entry *match)
+{
+ struct ofdpa_fdb_tbl_entry *found;
+
+ hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32)
+ if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
+ return found;
+
+ return NULL;
+}
+
+static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ const unsigned char *addr,
+ __be16 vlan_id, int flags)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_fdb_tbl_entry *fdb;
+ struct ofdpa_fdb_tbl_entry *found;
+ bool removing = (flags & OFDPA_OP_FLAG_REMOVE);
+ unsigned long lock_flags;
+
+ fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb));
+ if (!fdb)
+ return -ENOMEM;
+
+ fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED);
+ fdb->touched = jiffies;
+ fdb->key.ofdpa_port = ofdpa_port;
+ ether_addr_copy(fdb->key.addr, addr);
+ fdb->key.vlan_id = vlan_id;
+ fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
+
+ spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ found = ofdpa_fdb_tbl_find(ofdpa, fdb);
+
+ if (found) {
+ found->touched = jiffies;
+ if (removing) {
+ ofdpa_kfree(trans, fdb);
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ }
+ } else if (!removing) {
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_add(ofdpa->fdb_tbl, &fdb->entry,
+ fdb->key_crc32);
+ }
+
+ spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ /* Check if adding and already exists, or removing and can't find */
+ if (!found != !removing) {
+ ofdpa_kfree(trans, fdb);
+ if (!found && removing)
+ return 0;
+ /* Refreshing existing to update aging timers */
+ flags |= OFDPA_OP_FLAG_REFRESH;
+ }
+
+ return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id);
+}
+
+static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_fdb_tbl_entry *found;
+ unsigned long lock_flags;
+ struct hlist_node *tmp;
+ int bkt;
+ int err = 0;
+
+ if (ofdpa_port->stp_state == BR_STATE_LEARNING ||
+ ofdpa_port->stp_state == BR_STATE_FORWARDING)
+ return 0;
+
+ flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE;
+
+ spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.ofdpa_port != ofdpa_port)
+ continue;
+ if (!found->learned)
+ continue;
+ err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags,
+ found->key.addr,
+ found->key.vlan_id);
+ if (err)
+ goto err_out;
+ if (!switchdev_trans_ph_prepare(trans))
+ hash_del(&found->entry);
+ }
+
+err_out:
+ spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ return err;
+}
+
+static void ofdpa_fdb_cleanup(unsigned long data)
+{
+ struct ofdpa *ofdpa = (struct ofdpa *)data;
+ struct ofdpa_port *ofdpa_port;
+ struct ofdpa_fdb_tbl_entry *entry;
+ struct hlist_node *tmp;
+ unsigned long next_timer = jiffies + ofdpa->ageing_time;
+ unsigned long expires;
+ unsigned long lock_flags;
+ int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE |
+ OFDPA_OP_FLAG_LEARNED;
+ int bkt;
+
+ spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) {
+ if (!entry->learned)
+ continue;
+ ofdpa_port = entry->key.ofdpa_port;
+ expires = entry->touched + ofdpa_port->ageing_time;
+ if (time_before_eq(expires, jiffies)) {
+ ofdpa_port_fdb_learn(ofdpa_port, NULL,
+ flags, entry->key.addr,
+ entry->key.vlan_id);
+ hash_del(&entry->entry);
+ } else if (time_before(expires, next_timer)) {
+ next_timer = expires;
+ }
+ }
+
+ spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer));
+}
+
+static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags,
+ __be16 vlan_id)
+{
+ u32 in_pport_mask = 0xffffffff;
+ __be16 eth_type;
+ const u8 *dst_mac_mask = ff_mac;
+ __be16 vlan_id_mask = htons(0xffff);
+ bool copy_to_cpu = false;
+ int err;
+
+ if (ntohs(vlan_id) == 0)
+ vlan_id = ofdpa_port->internal_vlan_id;
+
+ eth_type = htons(ETH_P_IP);
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
+ ofdpa_port->pport, in_pport_mask,
+ eth_type, ofdpa_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+ if (err)
+ return err;
+
+ eth_type = htons(ETH_P_IPV6);
+ err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans,
+ ofdpa_port->pport, in_pport_mask,
+ eth_type, ofdpa_port->dev->dev_addr,
+ dst_mac_mask, vlan_id, vlan_id_mask,
+ copy_to_cpu, flags);
+
+ return err;
+}
+
+static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, int flags)
+{
+ bool pop_vlan;
+ u32 out_pport;
+ __be16 vlan_id;
+ u16 vid;
+ int err;
+
+ /* Port will be forwarding-enabled if its STP state is LEARNING
+ * or FORWARDING. Traffic from CPU can still egress, regardless of
+ * port STP state. Use L2 interface group on port VLANs as a way
+ * to toggle port forwarding: if forwarding is disabled, L2
+ * interface group will not exist.
+ */
+
+ if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
+ ofdpa_port->stp_state != BR_STATE_FORWARDING)
+ flags |= OFDPA_OP_FLAG_REMOVE;
+
+ out_pport = ofdpa_port->pport;
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, ofdpa_port->vlan_bitmap))
+ continue;
+ vlan_id = htons(vid);
+ pop_vlan = ofdpa_vlan_id_is_internal(vlan_id);
+ err = ofdpa_group_l2_interface(ofdpa_port, trans, flags,
+ vlan_id, out_pport, pop_vlan);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n",
+ err, out_pport);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ int flags, u8 state)
+{
+ bool want[OFDPA_CTRL_MAX] = { 0, };
+ bool prev_ctrls[OFDPA_CTRL_MAX];
+ u8 uninitialized_var(prev_state);
+ int err;
+ int i;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
+ prev_state = ofdpa_port->stp_state;
+ }
+
+ if (ofdpa_port->stp_state == state)
+ return 0;
+
+ ofdpa_port->stp_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ /* port is completely disabled */
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
+ break;
+ case BR_STATE_LEARNING:
+ case BR_STATE_FORWARDING:
+ if (!ofdpa_port_is_ovsed(ofdpa_port))
+ want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true;
+ want[OFDPA_CTRL_IPV4_MCAST] = true;
+ want[OFDPA_CTRL_IPV6_MCAST] = true;
+ if (ofdpa_port_is_bridged(ofdpa_port))
+ want[OFDPA_CTRL_DFLT_BRIDGING] = true;
+ else if (ofdpa_port_is_ovsed(ofdpa_port))
+ want[OFDPA_CTRL_DFLT_OVS] = true;
+ else
+ want[OFDPA_CTRL_LOCAL_ARP] = true;
+ break;
+ }
+
+ for (i = 0; i < OFDPA_CTRL_MAX; i++) {
+ if (want[i] != ofdpa_port->ctrls[i]) {
+ int ctrl_flags = flags |
+ (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE);
+ err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags,
+ &ofdpa_ctrls[i]);
+ if (err)
+ goto err_out;
+ ofdpa_port->ctrls[i] = want[i];
+ }
+ }
+
+ err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags);
+ if (err)
+ goto err_out;
+
+ err = ofdpa_port_fwding(ofdpa_port, trans, flags);
+
+err_out:
+ if (switchdev_trans_ph_prepare(trans)) {
+ memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+ ofdpa_port->stp_state = prev_state;
+ }
+
+ return err;
+}
+
+static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags)
+{
+ if (ofdpa_port_is_bridged(ofdpa_port))
+ /* bridge STP will enable port */
+ return 0;
+
+ /* port is not bridged, so simulate going to FORWARDING state */
+ return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ BR_STATE_FORWARDING);
+}
+
+static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags)
+{
+ if (ofdpa_port_is_bridged(ofdpa_port))
+ /* bridge STP will disable port */
+ return 0;
+
+ /* port is not bridged, so simulate going to DISABLED state */
+ return ofdpa_port_stp_update(ofdpa_port, NULL, flags,
+ BR_STATE_DISABLED);
+}
+
+static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans,
+ u16 vid, u16 flags)
+{
+ int err;
+
+ /* XXX deal with flags for PVID and untagged */
+
+ err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid);
+ if (err)
+ return err;
+
+ err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid));
+ if (err)
+ ofdpa_port_vlan(ofdpa_port, trans,
+ OFDPA_OP_FLAG_REMOVE, vid);
+
+ return err;
+}
+
+static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port,
+ u16 vid, u16 flags)
+{
+ int err;
+
+ err = ofdpa_port_router_mac(ofdpa_port, NULL,
+ OFDPA_OP_FLAG_REMOVE, htons(vid));
+ if (err)
+ return err;
+
+ return ofdpa_port_vlan(ofdpa_port, NULL,
+ OFDPA_OP_FLAG_REMOVE, vid);
+}
+
+static struct ofdpa_internal_vlan_tbl_entry *
+ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex)
+{
+ struct ofdpa_internal_vlan_tbl_entry *found;
+
+ hash_for_each_possible(ofdpa->internal_vlan_tbl, found,
+ entry, ifindex) {
+ if (found->ifindex == ifindex)
+ return found;
+ }
+
+ return NULL;
+}
+
+static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port,
+ int ifindex)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_internal_vlan_tbl_entry *entry;
+ struct ofdpa_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ int i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return 0;
+
+ entry->ifindex = ifindex;
+
+ spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
+
+ found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
+ if (found) {
+ kfree(entry);
+ goto found;
+ }
+
+ found = entry;
+ hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex);
+
+ for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) {
+ if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap))
+ continue;
+ found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i);
+ goto found;
+ }
+
+ netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n");
+
+found:
+ found->ref_count++;
+ spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
+
+ return found->vlan_id;
+}
+
+static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port,
+ struct switchdev_trans *trans, __be32 dst,
+ int dst_len, const struct fib_info *fi,
+ u32 tb_id, int flags)
+{
+ const struct fib_nh *nh;
+ __be16 eth_type = htons(ETH_P_IP);
+ __be32 dst_mask = inet_make_mask(dst_len);
+ __be16 internal_vlan_id = ofdpa_port->internal_vlan_id;
+ u32 priority = fi->fib_priority;
+ enum rocker_of_dpa_table_id goto_tbl =
+ ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+ u32 group_id;
+ bool nh_on_port;
+ bool has_gw;
+ u32 index;
+ int err;
+
+ /* XXX support ECMP */
+
+ nh = fi->fib_nh;
+ nh_on_port = (fi->fib_dev == ofdpa_port->dev);
+ has_gw = !!nh->nh_gw;
+
+ if (has_gw && nh_on_port) {
+ err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags,
+ nh->nh_gw, &index);
+ if (err)
+ return err;
+
+ group_id = ROCKER_GROUP_L3_UNICAST(index);
+ } else {
+ /* Send to CPU for processing */
+ group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
+ }
+
+ err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst,
+ dst_mask, priority, goto_tbl,
+ group_id, flags);
+ if (err)
+ netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n",
+ err, &dst);
+
+ return err;
+}
+
+static void
+ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port,
+ int ifindex)
+{
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_internal_vlan_tbl_entry *found;
+ unsigned long lock_flags;
+ unsigned long bit;
+
+ spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags);
+
+ found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex);
+ if (!found) {
+ netdev_err(ofdpa_port->dev,
+ "ifindex (%d) not found in internal VLAN tbl\n",
+ ifindex);
+ goto not_found;
+ }
+
+ if (--found->ref_count <= 0) {
+ bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE;
+ clear_bit(bit, ofdpa->internal_vlan_bitmap);
+ hash_del(&found->entry);
+ kfree(found);
+ }
+
+not_found:
+ spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags);
+}
+
+/**********************************
+ * Rocker world ops implementation
+ **********************************/
+
+static int ofdpa_init(struct rocker *rocker)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+
+ ofdpa->rocker = rocker;
+
+ hash_init(ofdpa->flow_tbl);
+ spin_lock_init(&ofdpa->flow_tbl_lock);
+
+ hash_init(ofdpa->group_tbl);
+ spin_lock_init(&ofdpa->group_tbl_lock);
+
+ hash_init(ofdpa->fdb_tbl);
+ spin_lock_init(&ofdpa->fdb_tbl_lock);
+
+ hash_init(ofdpa->internal_vlan_tbl);
+ spin_lock_init(&ofdpa->internal_vlan_tbl_lock);
+
+ hash_init(ofdpa->neigh_tbl);
+ spin_lock_init(&ofdpa->neigh_tbl_lock);
+
+ setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
+ (unsigned long) ofdpa);
+ mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
+
+ ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
+
+ return 0;
+}
+
+static void ofdpa_fini(struct rocker *rocker)
+{
+ struct ofdpa *ofdpa = rocker->wpriv;
+
+ unsigned long flags;
+ struct ofdpa_flow_tbl_entry *flow_entry;
+ struct ofdpa_group_tbl_entry *group_entry;
+ struct ofdpa_fdb_tbl_entry *fdb_entry;
+ struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry;
+ struct ofdpa_neigh_tbl_entry *neigh_entry;
+ struct hlist_node *tmp;
+ int bkt;
+
+ del_timer_sync(&ofdpa->fdb_cleanup_timer);
+
+ spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry)
+ hash_del(&flow_entry->entry);
+ spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags);
+
+ spin_lock_irqsave(&ofdpa->group_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry)
+ hash_del(&group_entry->entry);
+ spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags);
+
+ spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry)
+ hash_del(&fdb_entry->entry);
+ spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags);
+
+ spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt,
+ tmp, internal_vlan_entry, entry)
+ hash_del(&internal_vlan_entry->entry);
+ spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags);
+
+ spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags);
+ hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry)
+ hash_del(&neigh_entry->entry);
+ spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags);
+}
+
+static int ofdpa_port_pre_init(struct rocker_port *rocker_port)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ ofdpa_port->ofdpa = rocker_port->rocker->wpriv;
+ ofdpa_port->rocker_port = rocker_port;
+ ofdpa_port->dev = rocker_port->dev;
+ ofdpa_port->pport = rocker_port->pport;
+ ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+ ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME;
+ return 0;
+}
+
+static int ofdpa_port_init(struct rocker_port *rocker_port)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int err;
+
+ switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false);
+ rocker_port_set_learning(rocker_port,
+ !!(ofdpa_port->brport_flags & BR_LEARNING));
+
+ err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "install ig port table failed\n");
+ return err;
+ }
+
+ ofdpa_port->internal_vlan_id =
+ ofdpa_port_internal_vlan_id_get(ofdpa_port,
+ ofdpa_port->dev->ifindex);
+
+ err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ if (err) {
+ netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n");
+ goto err_untagged_vlan;
+ }
+ return 0;
+
+err_untagged_vlan:
+ ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+ return err;
+}
+
+static void ofdpa_port_fini(struct rocker_port *rocker_port)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE);
+}
+
+static int ofdpa_port_open(struct rocker_port *rocker_port)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ return ofdpa_port_fwd_enable(ofdpa_port, 0);
+}
+
+static void ofdpa_port_stop(struct rocker_port *rocker_port)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT);
+}
+
+static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
+ u8 state,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ return ofdpa_port_stp_update(ofdpa_port, trans, 0, state);
+}
+
+static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
+ unsigned long brport_flags,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ unsigned long orig_flags;
+ int err = 0;
+
+ orig_flags = ofdpa_port->brport_flags;
+ ofdpa_port->brport_flags = brport_flags;
+ if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
+ !switchdev_trans_ph_prepare(trans))
+ err = rocker_port_set_learning(ofdpa_port->rocker_port,
+ !!(ofdpa_port->brport_flags & BR_LEARNING));
+
+ if (switchdev_trans_ph_prepare(trans))
+ ofdpa_port->brport_flags = orig_flags;
+
+ return err;
+}
+
+static int
+ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port,
+ unsigned long *p_brport_flags)
+{
+ const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ *p_brport_flags = ofdpa_port->brport_flags;
+ return 0;
+}
+
+static int
+ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
+ u32 ageing_time,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+
+ if (!switchdev_trans_ph_prepare(trans)) {
+ ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (ofdpa_port->ageing_time < ofdpa->ageing_time)
+ ofdpa->ageing_time = ofdpa_port->ageing_time;
+ mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
+ }
+
+ return 0;
+}
+
+static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ u16 vid;
+ int err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_vlan *vlan,
+ switchdev_obj_dump_cb_t *cb)
+{
+ const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ u16 vid;
+ int err = 0;
+
+ for (vid = 1; vid < VLAN_N_VID; vid++) {
+ if (!test_bit(vid, ofdpa_port->vlan_bitmap))
+ continue;
+ vlan->flags = 0;
+ if (ofdpa_vlan_id_is_internal(htons(vid)))
+ vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+ vlan->vid_begin = vlan->vid_end = vid;
+ err = cb(&vlan->obj);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ return ofdpa_port_fib_ipv4(ofdpa_port, trans,
+ htonl(fib4->dst), fib4->dst_len,
+ &fib4->fi, fib4->tb_id, 0);
+}
+
+static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+
+ return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
+ htonl(fib4->dst), fib4->dst_len,
+ &fib4->fi, fib4->tb_id,
+ OFDPA_OP_FLAG_REMOVE);
+}
+
+static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+
+ if (!ofdpa_port_is_bridged(ofdpa_port))
+ return -EINVAL;
+
+ return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0);
+}
+
+static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL);
+ int flags = OFDPA_OP_FLAG_REMOVE;
+
+ if (!ofdpa_port_is_bridged(ofdpa_port))
+ return -EINVAL;
+
+ return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags);
+}
+
+static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port,
+ struct switchdev_obj_port_fdb *fdb,
+ switchdev_obj_dump_cb_t *cb)
+{
+ const struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ struct ofdpa *ofdpa = ofdpa_port->ofdpa;
+ struct ofdpa_fdb_tbl_entry *found;
+ struct hlist_node *tmp;
+ unsigned long lock_flags;
+ int bkt;
+ int err = 0;
+
+ spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags);
+ hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) {
+ if (found->key.ofdpa_port != ofdpa_port)
+ continue;
+ ether_addr_copy(fdb->addr, found->key.addr);
+ fdb->ndm_state = NUD_REACHABLE;
+ fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port,
+ found->key.vlan_id);
+ err = cb(&fdb->obj);
+ if (err)
+ break;
+ }
+ spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags);
+
+ return err;
+}
+
+static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port,
+ struct net_device *bridge)
+{
+ int err;
+
+ /* Port is joining bridge, so the internal VLAN for the
+ * port is going to change to the bridge internal VLAN.
+ * Let's remove untagged VLAN (vid=0) from port and
+ * re-add once internal VLAN has changed.
+ */
+
+ err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ if (err)
+ return err;
+
+ ofdpa_port_internal_vlan_id_put(ofdpa_port,
+ ofdpa_port->dev->ifindex);
+ ofdpa_port->internal_vlan_id =
+ ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex);
+
+ ofdpa_port->bridge_dev = bridge;
+ switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true);
+
+ return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+}
+
+static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port)
+{
+ int err;
+
+ err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0);
+ if (err)
+ return err;
+
+ ofdpa_port_internal_vlan_id_put(ofdpa_port,
+ ofdpa_port->bridge_dev->ifindex);
+ ofdpa_port->internal_vlan_id =
+ ofdpa_port_internal_vlan_id_get(ofdpa_port,
+ ofdpa_port->dev->ifindex);
+
+ switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev,
+ false);
+ ofdpa_port->bridge_dev = NULL;
+
+ err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0);
+ if (err)
+ return err;
+
+ if (ofdpa_port->dev->flags & IFF_UP)
+ err = ofdpa_port_fwd_enable(ofdpa_port, 0);
+
+ return err;
+}
+
+static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port,
+ struct net_device *master)
+{
+ int err;
+
+ ofdpa_port->bridge_dev = master;
+
+ err = ofdpa_port_fwd_disable(ofdpa_port, 0);
+ if (err)
+ return err;
+ err = ofdpa_port_fwd_enable(ofdpa_port, 0);
+
+ return err;
+}
+
+static int ofdpa_port_master_linked(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int err = 0;
+
+ if (netif_is_bridge_master(master))
+ err = ofdpa_port_bridge_join(ofdpa_port, master);
+ else if (netif_is_ovs_master(master))
+ err = ofdpa_port_ovs_changed(ofdpa_port, master);
+ return err;
+}
+
+static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port,
+ struct net_device *master)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int err = 0;
+
+ if (ofdpa_port_is_bridged(ofdpa_port))
+ err = ofdpa_port_bridge_leave(ofdpa_port);
+ else if (ofdpa_port_is_ovsed(ofdpa_port))
+ err = ofdpa_port_ovs_changed(ofdpa_port, NULL);
+ return err;
+}
+
+static int ofdpa_port_neigh_update(struct rocker_port *rocker_port,
+ struct neighbour *n)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) |
+ OFDPA_OP_FLAG_NOWAIT;
+ __be32 ip_addr = *(__be32 *) n->primary_key;
+
+ return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+}
+
+static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port,
+ struct neighbour *n)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT;
+ __be32 ip_addr = *(__be32 *) n->primary_key;
+
+ return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha);
+}
+
+static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
+ const unsigned char *addr,
+ __be16 vlan_id)
+{
+ struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
+ int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED;
+
+ if (ofdpa_port->stp_state != BR_STATE_LEARNING &&
+ ofdpa_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
+ return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags);
+}
+
+struct rocker_world_ops rocker_ofdpa_ops = {
+ .kind = "ofdpa",
+ .priv_size = sizeof(struct ofdpa),
+ .port_priv_size = sizeof(struct ofdpa_port),
+ .mode = ROCKER_PORT_MODE_OF_DPA,
+ .init = ofdpa_init,
+ .fini = ofdpa_fini,
+ .port_pre_init = ofdpa_port_pre_init,
+ .port_init = ofdpa_port_init,
+ .port_fini = ofdpa_port_fini,
+ .port_open = ofdpa_port_open,
+ .port_stop = ofdpa_port_stop,
+ .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set,
+ .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set,
+ .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get,
+ .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set,
+ .port_obj_vlan_add = ofdpa_port_obj_vlan_add,
+ .port_obj_vlan_del = ofdpa_port_obj_vlan_del,
+ .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump,
+ .port_obj_fib4_add = ofdpa_port_obj_fib4_add,
+ .port_obj_fib4_del = ofdpa_port_obj_fib4_del,
+ .port_obj_fdb_add = ofdpa_port_obj_fdb_add,
+ .port_obj_fdb_del = ofdpa_port_obj_fdb_del,
+ .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump,
+ .port_master_linked = ofdpa_port_master_linked,
+ .port_master_unlinked = ofdpa_port_master_unlinked,
+ .port_neigh_update = ofdpa_port_neigh_update,
+ .port_neigh_destroy = ofdpa_port_neigh_destroy,
+ .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen,
+};
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.c b/drivers/net/ethernet/rocker/rocker_tlv.c
new file mode 100644
index 000000000000..8185118f3492
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker_tlv.c
@@ -0,0 +1,53 @@
+/*
+ * drivers/net/ethernet/rocker/rocker_tlv.c - Rocker switch device driver
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+#include "rocker_hw.h"
+#include "rocker_tlv.h"
+
+void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
+ const char *buf, int buf_len)
+{
+ const struct rocker_tlv *tlv;
+ const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
+ int rem;
+
+ memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
+
+ rocker_tlv_for_each(tlv, head, buf_len, rem) {
+ u32 type = rocker_tlv_type(tlv);
+
+ if (type > 0 && type <= maxtype)
+ tb[type] = tlv;
+ }
+}
+
+int rocker_tlv_put(struct rocker_desc_info *desc_info,
+ int attrtype, int attrlen, const void *data)
+{
+ int tail_room = desc_info->data_size - desc_info->tlv_size;
+ int total_size = rocker_tlv_total_size(attrlen);
+ struct rocker_tlv *tlv;
+
+ if (unlikely(tail_room < total_size))
+ return -EMSGSIZE;
+
+ tlv = rocker_tlv_start(desc_info);
+ desc_info->tlv_size += total_size;
+ tlv->type = attrtype;
+ tlv->len = rocker_tlv_attr_size(attrlen);
+ memcpy(rocker_tlv_data(tlv), data, attrlen);
+ memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
+ return 0;
+}
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
new file mode 100644
index 000000000000..a63ef82e7c72
--- /dev/null
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -0,0 +1,201 @@
+/*
+ * drivers/net/ethernet/rocker/rocker_tlv.h - Rocker switch device driver
+ * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ROCKER_TLV_H
+#define _ROCKER_TLV_H
+
+#include <linux/types.h>
+
+#include "rocker_hw.h"
+#include "rocker.h"
+
+#define ROCKER_TLV_ALIGNTO 8U
+#define ROCKER_TLV_ALIGN(len) \
+ (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
+#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
+
+/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * | Header | Pad | Payload | Pad |
+ * | (struct rocker_tlv) | ing | | ing |
+ * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
+ * <--------------------------- tlv->len -------------------------->
+ */
+
+static inline struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
+ int *remaining)
+{
+ int totlen = ROCKER_TLV_ALIGN(tlv->len);
+
+ *remaining -= totlen;
+ return (struct rocker_tlv *) ((char *) tlv + totlen);
+}
+
+static inline int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
+{
+ return remaining >= (int) ROCKER_TLV_HDRLEN &&
+ tlv->len >= ROCKER_TLV_HDRLEN &&
+ tlv->len <= remaining;
+}
+
+#define rocker_tlv_for_each(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ rocker_tlv_ok(pos, rem); \
+ pos = rocker_tlv_next(pos, &(rem)))
+
+#define rocker_tlv_for_each_nested(pos, tlv, rem) \
+ rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
+ rocker_tlv_len(tlv), rem)
+
+static inline int rocker_tlv_attr_size(int payload)
+{
+ return ROCKER_TLV_HDRLEN + payload;
+}
+
+static inline int rocker_tlv_total_size(int payload)
+{
+ return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
+}
+
+static inline int rocker_tlv_padlen(int payload)
+{
+ return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
+}
+
+static inline int rocker_tlv_type(const struct rocker_tlv *tlv)
+{
+ return tlv->type;
+}
+
+static inline void *rocker_tlv_data(const struct rocker_tlv *tlv)
+{
+ return (char *) tlv + ROCKER_TLV_HDRLEN;
+}
+
+static inline int rocker_tlv_len(const struct rocker_tlv *tlv)
+{
+ return tlv->len - ROCKER_TLV_HDRLEN;
+}
+
+static inline u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
+{
+ return *(u8 *) rocker_tlv_data(tlv);
+}
+
+static inline u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
+{
+ return *(u16 *) rocker_tlv_data(tlv);
+}
+
+static inline __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
+{
+ return *(__be16 *) rocker_tlv_data(tlv);
+}
+
+static inline u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
+{
+ return *(u32 *) rocker_tlv_data(tlv);
+}
+
+static inline u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
+{
+ return *(u64 *) rocker_tlv_data(tlv);
+}
+
+void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
+ const char *buf, int buf_len);
+
+static inline void rocker_tlv_parse_nested(const struct rocker_tlv **tb,
+ int maxtype,
+ const struct rocker_tlv *tlv)
+{
+ rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
+ rocker_tlv_len(tlv));
+}
+
+static inline void
+rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+ const struct rocker_desc_info *desc_info)
+{
+ rocker_tlv_parse(tb, maxtype, desc_info->data,
+ desc_info->desc->tlv_size);
+}
+
+static inline struct rocker_tlv *
+rocker_tlv_start(struct rocker_desc_info *desc_info)
+{
+ return (struct rocker_tlv *) ((char *) desc_info->data +
+ desc_info->tlv_size);
+}
+
+int rocker_tlv_put(struct rocker_desc_info *desc_info,
+ int attrtype, int attrlen, const void *data);
+
+static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
+ int attrtype, u8 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+}
+
+static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
+ int attrtype, u16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+}
+
+static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
+ int attrtype, __be16 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+}
+
+static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
+ int attrtype, u32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+}
+
+static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
+ int attrtype, __be32 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+}
+
+static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
+ int attrtype, u64 value)
+{
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+}
+
+static inline struct rocker_tlv *
+rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
+{
+ struct rocker_tlv *start = rocker_tlv_start(desc_info);
+
+ if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
+ return NULL;
+
+ return start;
+}
+
+static inline void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
+ struct rocker_tlv *start)
+{
+ start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
+}
+
+static inline void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
+ const struct rocker_tlv *start)
+{
+ desc_info->tlv_size = (const char *) start - desc_info->data;
+}
+
+#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
index dcc80b9d4370..31e968561d5c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/Makefile
+++ b/drivers/net/ethernet/samsung/sxgbe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \
- sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
+ sxgbe_ethtool.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index b02eed12bfc5..73427e29df2a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -155,11 +155,11 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
return 0;
err_rx_irq_unmap:
- while (--i)
+ while (i--)
irq_dispose_mapping(priv->rxq[i]->irq_no);
i = SXGBE_TX_QUEUES;
err_tx_irq_unmap:
- while (--i)
+ while (i--)
irq_dispose_mapping(priv->txq[i]->irq_no);
irq_dispose_mapping(priv->irq);
err_drv_remove:
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
deleted file mode 100644
index 51c32194ba88..000000000000
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include "sxgbe_common.h"
-#include "sxgbe_xpcs.h"
-
-static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
-{
- u32 value;
- struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
- value = readl(priv->ioaddr + XPCS_OFFSET + reg);
-
- return value;
-}
-
-static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
-{
- struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
- writel(data, priv->ioaddr + XPCS_OFFSET + reg);
-
- return 0;
-}
-
-int sxgbe_xpcs_init(struct net_device *ndev)
-{
- u32 value;
-
- value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
- /* 10G XAUI mode */
- sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
- sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
- sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
- sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
- do {
- value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
- } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
-
- value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
- sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
- do {
- value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
- } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
- return 0;
-}
-
-int sxgbe_xpcs_init_1G(struct net_device *ndev)
-{
- int value;
-
- /* 10GBASE-X PCS (1G) mode */
- sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
- sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
- value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
- sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
-
- value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
- sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
- sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
- value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
- sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
- do {
- value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
- } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
- value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
- sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
- /* Auto Negotiation cluase 37 enable */
- value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
- sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
-
- return 0;
-}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
deleted file mode 100644
index 6b26a50724d3..000000000000
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Author: Byungho An <bh74.an@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __SXGBE_XPCS_H__
-#define __SXGBE_XPCS_H__
-
-/* XPCS Registers */
-#define XPCS_OFFSET 0x1A060000
-#define SR_PCS_MMD_CONTROL1 0x030000
-#define SR_PCS_CONTROL2 0x030007
-#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004
-#define VR_PCS_MMD_DIGITAL_STATUS 0x038010
-#define SR_MII_MMD_CONTROL 0x1F0000
-#define SR_MII_MMD_AN_ADV 0x1F0004
-#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005
-#define VR_MII_MMD_AN_CONTROL 0x1F8001
-#define VR_MII_MMD_AN_INT_STATUS 0x1F8002
-
-#define XPCS_QSEQ_STATE_STABLE 0x10
-#define XPCS_QSEQ_STATE_MPLLOFF 0x1c
-#define XPCS_TYPE_SEL_R 0x00
-#define XPCS_TYPE_SEL_X 0x01
-#define XPCS_TYPE_SEL_W 0x02
-#define XPCS_XAUI_MODE 0x00
-#define XPCS_RXAUI_MODE 0x01
-
-int sxgbe_xpcs_init(struct net_device *ndev);
-int sxgbe_xpcs_init_1G(struct net_device *ndev);
-
-#endif /* __SXGBE_XPCS_H__ */
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 10827476bc0b..5e3f93f04e62 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -32,7 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
extern unsigned int efx_piobuf_size;
extern bool efx_separate_tx_channels;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 034797661f96..445ccdb6bc67 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -783,14 +783,26 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
+#define IP_PROTO_FULL_MASK 0xFF
#define PORT_FULL_MASK ((__force __be16)~0)
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static inline void ip6_fill_mask(__be32 *mask)
+{
+ mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
+}
+
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
{
struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
@@ -833,6 +845,35 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
ip_entry->psrc = spec.rem_port;
ip_mask->psrc = PORT_FULL_MASK;
}
+ } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V6_FLOW : UDP_V6_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ memcpy(ip6_entry->ip6dst, spec.loc_host,
+ sizeof(ip6_entry->ip6dst));
+ ip6_fill_mask(ip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ memcpy(ip6_entry->ip6src, spec.rem_host,
+ sizeof(ip6_entry->ip6src));
+ ip6_fill_mask(ip6_mask->ip6src);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip6_entry->pdst = spec.loc_port;
+ ip6_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip6_entry->psrc = spec.rem_port;
+ ip6_mask->psrc = PORT_FULL_MASK;
+ }
} else if (!(spec.match_flags &
~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
@@ -855,6 +896,47 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
mac_entry->h_proto = spec.ether_type;
mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
}
+ } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV4_USER_FLOW;
+ uip_entry->ip_ver = ETH_RX_NFC_IP4;
+ if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+ uip_mask->proto = IP_PROTO_FULL_MASK;
+ uip_entry->proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ uip_entry->ip4dst = spec.loc_host[0];
+ uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ uip_entry->ip4src = spec.rem_host[0];
+ uip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ spec.ether_type == htons(ETH_P_IPV6) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO))) {
+ rule->flow_type = IPV6_USER_FLOW;
+ if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+ uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
+ uip6_entry->l4_proto = spec.ip_proto;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ memcpy(uip6_entry->ip6dst, spec.loc_host,
+ sizeof(uip6_entry->ip6dst));
+ ip6_fill_mask(uip6_mask->ip6dst);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ memcpy(uip6_entry->ip6src, spec.rem_host,
+ sizeof(uip6_entry->ip6src));
+ ip6_fill_mask(uip6_mask->ip6src);
+ }
} else {
/* The above should handle all filters that we insert */
WARN_ON(1);
@@ -946,11 +1028,27 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
}
}
+static inline bool ip6_mask_is_full(__be32 mask[4])
+{
+ return !~(mask[0] & mask[1] & mask[2] & mask[3]);
+}
+
+static inline bool ip6_mask_is_empty(__be32 mask[4])
+{
+ return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
static int efx_ethtool_set_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
{
struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+ struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
@@ -1012,6 +1110,92 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
return -EINVAL;
break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IPV6);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(ip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (ip6_mask->pdst) {
+ if (ip6_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip6_entry->pdst;
+ }
+ if (ip6_mask->psrc) {
+ if (ip6_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip6_entry->psrc;
+ }
+ if (ip6_mask->tclass)
+ return -EINVAL;
+ break;
+
+ case IPV4_USER_FLOW:
+ if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
+ uip_entry->ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IP);
+ if (uip_mask->ip4dst) {
+ if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = uip_entry->ip4dst;
+ }
+ if (uip_mask->ip4src) {
+ if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = uip_entry->ip4src;
+ }
+ if (uip_mask->proto) {
+ if (uip_mask->proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip_entry->proto;
+ }
+ break;
+
+ case IPV6_USER_FLOW:
+ if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
+ return -EINVAL;
+ spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = htons(ETH_P_IPV6);
+ if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6dst))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
+ }
+ if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
+ if (!ip6_mask_is_full(uip6_mask->ip6src))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
+ }
+ if (uip6_mask->l4_proto) {
+ if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ spec.ip_proto = uip6_entry->l4_proto;
+ }
+ break;
+
case ETHER_FLOW:
if (!is_zero_ether_addr(mac_mask->h_dest)) {
if (ether_addr_equal(mac_mask->h_dest,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index f7a0ec1bca97..233778911557 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -562,14 +562,20 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
efx->n_tx_channels : 0));
}
-int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *ntc)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
- unsigned tc;
+ unsigned tc, num_tc;
int rc;
+ if (ntc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
+ num_tc = ntc->tc;
+
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
return -EINVAL;
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index bd64eb982e52..a733868a43aa 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -73,6 +73,9 @@ static const char version[] =
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/pxa-dma.h>
+
#include <asm/io.h>
#include "smc911x.h"
@@ -1174,18 +1177,16 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
#ifdef SMC_USE_DMA
static void
-smc911x_tx_dma_irq(int dma, void *data)
+smc911x_tx_dma_irq(void *data)
{
- struct net_device *dev = (struct net_device *)data;
- struct smc911x_local *lp = netdev_priv(dev);
+ struct smc911x_local *lp = data;
+ struct net_device *dev = lp->netdev;
struct sk_buff *skb = lp->current_tx_skb;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
- /* Clear the DMA interrupt sources */
- SMC_DMA_ACK_IRQ(dev, dma);
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
dev->trans_start = jiffies;
@@ -1208,18 +1209,16 @@ smc911x_tx_dma_irq(int dma, void *data)
"TX DMA irq completed\n");
}
static void
-smc911x_rx_dma_irq(int dma, void *data)
+smc911x_rx_dma_irq(void *data)
{
- struct net_device *dev = (struct net_device *)data;
- struct smc911x_local *lp = netdev_priv(dev);
+ struct smc911x_local *lp = data;
+ struct net_device *dev = lp->netdev;
struct sk_buff *skb = lp->current_rx_skb;
unsigned long flags;
unsigned int pkts;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n");
- /* Clear the DMA interrupt sources */
- SMC_DMA_ACK_IRQ(dev, dma);
dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
BUG_ON(skb == NULL);
lp->current_rx_skb = NULL;
@@ -1792,6 +1791,11 @@ static int smc911x_probe(struct net_device *dev)
unsigned int val, chip_id, revision;
const char *version_string;
unsigned long irq_flags;
+#ifdef SMC_USE_DMA
+ struct dma_slave_config config;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+#endif
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -1963,11 +1967,40 @@ static int smc911x_probe(struct net_device *dev)
goto err_out;
#ifdef SMC_USE_DMA
- lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
- lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ param.prio = PXAD_PRIO_LOWEST;
+ param.drcmr = -1UL;
+
+ lp->rxdma =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "rx");
+ lp->txdma =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, &dev->dev, "tx");
lp->rxdma_active = 0;
lp->txdma_active = 0;
- dev->dma = lp->rxdma;
+
+ memset(&config, 0, sizeof(config));
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = lp->physaddr + RX_DATA_FIFO;
+ config.dst_addr = lp->physaddr + TX_DATA_FIFO;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ retval = dmaengine_slave_config(lp->rxdma, &config);
+ if (retval) {
+ dev_err(lp->dev, "dma rx channel configuration failed: %d\n",
+ retval);
+ goto err_out;
+ }
+ retval = dmaengine_slave_config(lp->txdma, &config);
+ if (retval) {
+ dev_err(lp->dev, "dma tx channel configuration failed: %d\n",
+ retval);
+ goto err_out;
+ }
#endif
retval = register_netdev(dev);
@@ -1978,11 +2011,11 @@ static int smc911x_probe(struct net_device *dev)
dev->base_addr, dev->irq);
#ifdef SMC_USE_DMA
- if (lp->rxdma != -1)
- pr_cont(" RXDMA %d", lp->rxdma);
+ if (lp->rxdma)
+ pr_cont(" RXDMA %p", lp->rxdma);
- if (lp->txdma != -1)
- pr_cont(" TXDMA %d", lp->txdma);
+ if (lp->txdma)
+ pr_cont(" TXDMA %p", lp->txdma);
#endif
pr_cont("\n");
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2005,12 +2038,10 @@ static int smc911x_probe(struct net_device *dev)
err_out:
#ifdef SMC_USE_DMA
if (retval) {
- if (lp->rxdma != -1) {
- SMC_DMA_FREE(dev, lp->rxdma);
- }
- if (lp->txdma != -1) {
- SMC_DMA_FREE(dev, lp->txdma);
- }
+ if (lp->rxdma)
+ dma_release_channel(lp->rxdma);
+ if (lp->txdma)
+ dma_release_channel(lp->txdma);
}
#endif
return retval;
@@ -2112,12 +2143,10 @@ static int smc911x_drv_remove(struct platform_device *pdev)
#ifdef SMC_USE_DMA
{
- if (lp->rxdma != -1) {
- SMC_DMA_FREE(dev, lp->rxdma);
- }
- if (lp->txdma != -1) {
- SMC_DMA_FREE(dev, lp->txdma);
- }
+ if (lp->rxdma)
+ dma_release_channel(lp->rxdma);
+ if (lp->txdma)
+ dma_release_channel(lp->txdma);
}
#endif
iounmap(lp->base);
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 04b35f55df97..fa528ea0ea51 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -101,8 +101,8 @@ struct smc911x_local {
#ifdef SMC_USE_DMA
/* DMA needs the physical address of the chip */
u_long physaddr;
- int rxdma;
- int txdma;
+ struct dma_chan *rxdma;
+ struct dma_chan *txdma;
int rxdma_active;
int txdma_active;
struct sk_buff *current_rx_skb;
@@ -210,27 +210,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
#ifdef SMC_USE_PXA_DMA
-#include <mach/dma.h>
-
-/*
- * Define the request and free functions
- * These are unfortunately architecture specific as no generic allocation
- * mechanism exits
- */
-#define SMC_DMA_REQUEST(dev, handler) \
- pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
-
-#define SMC_DMA_FREE(dev, dma) \
- pxa_free_dma(dma)
-
-#define SMC_DMA_ACK_IRQ(dev, dma) \
-{ \
- if (DCSR(dma) & DCSR_BUSERR) { \
- netdev_err(dev, "DMA %d bus error!\n", dma); \
- } \
- DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \
-}
-
/*
* Use a DMA for RX and TX packets.
*/
@@ -238,6 +217,8 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
static dma_addr_t rx_dmabuf, tx_dmabuf;
static int rx_dmalen, tx_dmalen;
+static void smc911x_rx_dma_irq(void *data);
+static void smc911x_tx_dma_irq(void *data);
#ifdef SMC_insl
#undef SMC_insl
@@ -246,8 +227,10 @@ static int rx_dmalen, tx_dmalen;
static inline void
smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
- int reg, int dma, u_char *buf, int len)
+ int reg, struct dma_chan *dma, u_char *buf, int len)
{
+ struct dma_async_tx_descriptor *tx;
+
/* 64 bit alignment is required for memory to memory DMA */
if ((long)buf & 4) {
*((u32 *)buf) = SMC_inl(lp, reg);
@@ -258,12 +241,14 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
len *= 4;
rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE);
rx_dmalen = len;
- DCSR(dma) = DCSR_NODESC;
- DTADR(dma) = rx_dmabuf;
- DSADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
- DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ tx = dmaengine_prep_slave_single(dma, rx_dmabuf, rx_dmalen,
+ DMA_DEV_TO_MEM, 0);
+ if (tx) {
+ tx->callback = smc911x_rx_dma_irq;
+ tx->callback_param = lp;
+ dmaengine_submit(tx);
+ dma_async_issue_pending(dma);
+ }
}
#endif
@@ -274,8 +259,10 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr,
static inline void
smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
- int reg, int dma, u_char *buf, int len)
+ int reg, struct dma_chan *dma, u_char *buf, int len)
{
+ struct dma_async_tx_descriptor *tx;
+
/* 64 bit alignment is required for memory to memory DMA */
if ((long)buf & 4) {
SMC_outl(*((u32 *)buf), lp, reg);
@@ -286,12 +273,14 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
len *= 4;
tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE);
tx_dmalen = len;
- DCSR(dma) = DCSR_NODESC;
- DSADR(dma) = tx_dmabuf;
- DTADR(dma) = physaddr + reg;
- DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
- DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
- DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+ tx = dmaengine_prep_slave_single(dma, tx_dmabuf, tx_dmalen,
+ DMA_DEV_TO_MEM, 0);
+ if (tx) {
+ tx->callback = smc911x_tx_dma_irq;
+ tx->callback_param = lp;
+ dmaengine_submit(tx);
+ dma_async_issue_pending(dma);
+ }
}
#endif
#endif /* SMC_USE_PXA_DMA */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index db7db8ac4ca3..c5ed27c54724 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev)
#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
#else
-#define smc_special_trylock(lock, flags) (flags == flags)
+#define smc_special_trylock(lock, flags) ((void)flags, true)
#define smc_special_lock(lock, flags) do { flags = 0; } while (0)
#define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index cf28daba4346..b3e669af3005 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -31,8 +31,7 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
+ unsigned int entry = priv->cur_tx;
struct dma_desc *desc = priv->dma_tx + entry;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax;
@@ -50,11 +49,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
+ priv->tx_skbuff_dma[entry].len = bmax;
+ /* do not close the descriptor and do not set own bit */
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+ 0, false);
while (len != 0) {
priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
desc = priv->dma_tx + entry;
if (len > bmax) {
@@ -64,9 +66,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = bmax;
priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
- STMMAC_CHAIN_MODE);
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_CHAIN_MODE, 1,
+ false);
len -= bmax;
i++;
} else {
@@ -76,12 +79,17 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = len;
+ /* last descriptor can be set now */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_CHAIN_MODE);
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_CHAIN_MODE, 1,
+ true);
len = 0;
}
}
+
+ priv->cur_tx = entry;
+
return entry;
}
@@ -138,23 +146,24 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = (unsigned int)(priv->dma_rx_phy +
(((priv->dirty_rx) + 1) %
- priv->dma_rx_size) *
+ DMA_RX_SIZE) *
sizeof(struct dma_desc));
}
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ unsigned int entry = priv->dirty_tx;
- if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
+ if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+ priv->hwts_tx_en)
/* NOTE: Device will overwrite des3 with timestamp value if
* 1588-2002 time stamping is enabled, hence reinitialize it
* to keep explicit chaining in the descriptor.
*/
- p->des3 = (unsigned int)(priv->dma_tx_phy +
- (((priv->dirty_tx + 1) %
- priv->dma_tx_size) *
- sizeof(struct dma_desc)));
+ p->des3 = (unsigned int)((priv->dma_tx_phy +
+ ((priv->dirty_tx + 1) % DMA_TX_SIZE))
+ * sizeof(struct dma_desc));
}
const struct stmmac_mode_ops chain_mode_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1e19c8fd8b82..f96d257308b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -27,6 +27,7 @@
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/module.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
@@ -41,6 +42,10 @@
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DMA_TX_SIZE 512
+#define DMA_RX_SIZE 512
+#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
+
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
@@ -95,7 +100,7 @@ struct stmmac_extra_stats {
unsigned long napi_poll;
unsigned long tx_normal_irq_n;
unsigned long tx_clean;
- unsigned long tx_reset_ic_bit;
+ unsigned long tx_set_ic_bit;
unsigned long irq_receive_pmt_irq_n;
/* MMC info */
unsigned long mmc_tx_irq_n;
@@ -233,10 +238,19 @@ struct stmmac_extra_stats {
/* Rx IPC status */
enum rx_frame_status {
- good_frame = 0,
- discard_frame = 1,
- csum_none = 2,
- llc_snap = 4,
+ good_frame = 0x0,
+ discard_frame = 0x1,
+ csum_none = 0x2,
+ llc_snap = 0x4,
+ dma_own = 0x8,
+};
+
+/* Tx status */
+enum tx_frame_status {
+ tx_done = 0x0,
+ tx_not_ls = 0x1,
+ tx_err = 0x2,
+ tx_dma_own = 0x4,
};
enum dma_irq_status {
@@ -332,17 +346,16 @@ struct stmmac_desc_ops {
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode);
+ bool csum_flag, int mode, bool tx_own,
+ bool ls);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
- /* Invoked by the xmit function to close the tx descriptor */
- void (*close_tx_desc) (struct dma_desc *p);
/* Clean the tx descriptor as soon as the tx irq is received */
void (*release_tx_desc) (struct dma_desc *p, int mode);
/* Clear interrupt on tx frame completion. When this bit is
* set an interrupt happens as soon as the frame is transmitted */
- void (*clear_tx_ic) (struct dma_desc *p);
+ void (*set_tx_ic)(struct dma_desc *p);
/* Last tx segment reports the transmit status */
int (*get_tx_ls) (struct dma_desc *p);
/* Return the transmit status looking at the TDES1 */
@@ -351,7 +364,6 @@ struct stmmac_desc_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
@@ -376,8 +388,11 @@ extern const struct stmmac_desc_ops ndesc_ops;
/* Specific DMA helpers */
struct stmmac_dma_ops {
/* DMA core initialization */
- int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds);
+ int (*reset)(void __iomem *ioaddr);
+ void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds);
+ /* Configure the AXI Bus Mode Register */
+ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
/* Dump DMA registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 799c2929c536..2e4c171a2b41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors.
- Enhanced descriptors have been in case of DWMAC1000 Cores.
+ Header File to describe the DMA descriptors and related definitions.
+ This is for DWMAC100 and 1000 cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -24,198 +24,164 @@
#ifndef __DESCS_H__
#define __DESCS_H__
+#include <linux/bitops.h>
+
+/* Normal receive descriptor defines */
+
+/* RDES0 */
+#define RDES0_PAYLOAD_CSUM_ERR BIT(0)
+#define RDES0_CRC_ERROR BIT(1)
+#define RDES0_DRIBBLING BIT(2)
+#define RDES0_MII_ERROR BIT(3)
+#define RDES0_RECEIVE_WATCHDOG BIT(4)
+#define RDES0_FRAME_TYPE BIT(5)
+#define RDES0_COLLISION BIT(6)
+#define RDES0_IPC_CSUM_ERROR BIT(7)
+#define RDES0_LAST_DESCRIPTOR BIT(8)
+#define RDES0_FIRST_DESCRIPTOR BIT(9)
+#define RDES0_VLAN_TAG BIT(10)
+#define RDES0_OVERFLOW_ERROR BIT(11)
+#define RDES0_LENGTH_ERROR BIT(12)
+#define RDES0_SA_FILTER_FAIL BIT(13)
+#define RDES0_DESCRIPTOR_ERROR BIT(14)
+#define RDES0_ERROR_SUMMARY BIT(15)
+#define RDES0_FRAME_LEN_MASK GENMASK(29, 16)
+#define RDES0_FRAME_LEN_SHIFT 16
+#define RDES0_DA_FILTER_FAIL BIT(30)
+#define RDES0_OWN BIT(31)
+ /* RDES1 */
+#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define RDES1_BUFFER2_SIZE_SHIFT 11
+#define RDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define RDES1_END_RING BIT(25)
+#define RDES1_DISABLE_IC BIT(31)
+
+/* Enhanced receive descriptor defines */
+
+/* RDES0 (similar to normal RDES) */
+#define ERDES0_RX_MAC_ADDR BIT(0)
+
+/* RDES1: completely differ from normal desc definitions */
+#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14)
+#define ERDES1_END_RING BIT(15)
+#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ERDES1_BUFFER2_SIZE_SHIFT 16
+#define ERDES1_DISABLE_IC BIT(31)
+
+/* Normal transmit descriptor defines */
+/* TDES0 */
+#define TDES0_DEFERRED BIT(0)
+#define TDES0_UNDERFLOW_ERROR BIT(1)
+#define TDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define TDES0_VLAN_FRAME BIT(7)
+#define TDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define TDES0_LATE_COLLISION BIT(9)
+#define TDES0_NO_CARRIER BIT(10)
+#define TDES0_LOSS_CARRIER BIT(11)
+#define TDES0_PAYLOAD_ERROR BIT(12)
+#define TDES0_FRAME_FLUSHED BIT(13)
+#define TDES0_JABBER_TIMEOUT BIT(14)
+#define TDES0_ERROR_SUMMARY BIT(15)
+#define TDES0_IP_HEADER_ERROR BIT(16)
+#define TDES0_TIME_STAMP_STATUS BIT(17)
+#define TDES0_OWN BIT(31)
+/* TDES1 */
+#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define TDES1_BUFFER2_SIZE_SHIFT 11
+#define TDES1_TIME_STAMP_ENABLE BIT(22)
+#define TDES1_DISABLE_PADDING BIT(23)
+#define TDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define TDES1_END_RING BIT(25)
+#define TDES1_CRC_DISABLE BIT(26)
+#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27)
+#define TDES1_CHECKSUM_INSERTION_SHIFT 27
+#define TDES1_FIRST_SEGMENT BIT(29)
+#define TDES1_LAST_SEGMENT BIT(30)
+#define TDES1_INTERRUPT BIT(31)
+
+/* Enhanced transmit descriptor defines */
+/* TDES0 */
+#define ETDES0_DEFERRED BIT(0)
+#define ETDES0_UNDERFLOW_ERROR BIT(1)
+#define ETDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define ETDES0_VLAN_FRAME BIT(7)
+#define ETDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define ETDES0_LATE_COLLISION BIT(9)
+#define ETDES0_NO_CARRIER BIT(10)
+#define ETDES0_LOSS_CARRIER BIT(11)
+#define ETDES0_PAYLOAD_ERROR BIT(12)
+#define ETDES0_FRAME_FLUSHED BIT(13)
+#define ETDES0_JABBER_TIMEOUT BIT(14)
+#define ETDES0_ERROR_SUMMARY BIT(15)
+#define ETDES0_IP_HEADER_ERROR BIT(16)
+#define ETDES0_TIME_STAMP_STATUS BIT(17)
+#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20)
+#define ETDES0_END_RING BIT(21)
+#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22)
+#define ETDES0_CHECKSUM_INSERTION_SHIFT 22
+#define ETDES0_TIME_STAMP_ENABLE BIT(25)
+#define ETDES0_DISABLE_PADDING BIT(26)
+#define ETDES0_CRC_DISABLE BIT(27)
+#define ETDES0_FIRST_SEGMENT BIT(28)
+#define ETDES0_LAST_SEGMENT BIT(29)
+#define ETDES0_INTERRUPT BIT(30)
+#define ETDES0_OWN BIT(31)
+/* TDES1 */
+#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ETDES1_BUFFER2_SIZE_SHIFT 16
+
+/* Extended Receive descriptor definitions */
+#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(2, 6)
+#define ERDES4_IP_HDR_ERR BIT(3)
+#define ERDES4_IP_PAYLOAD_ERR BIT(4)
+#define ERDES4_IP_CSUM_BYPASSED BIT(5)
+#define ERDES4_IPV4_PKT_RCVD BIT(6)
+#define ERDES4_IPV6_PKT_RCVD BIT(7)
+#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8)
+#define ERDES4_PTP_FRAME_TYPE BIT(12)
+#define ERDES4_PTP_VER BIT(13)
+#define ERDES4_TIMESTAMP_DROPPED BIT(14)
+#define ERDES4_AV_PKT_RCVD BIT(16)
+#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17)
+#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18)
+#define ERDES4_L3_FILTER_MATCH BIT(24)
+#define ERDES4_L4_FILTER_MATCH BIT(25)
+#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
+
+/* Extended RDES4 message type definitions */
+#define RDES_EXT_NO_PTP 0
+#define RDES_EXT_SYNC 1
+#define RDES_EXT_FOLLOW_UP 2
+#define RDES_EXT_DELAY_REQ 3
+#define RDES_EXT_DELAY_RESP 4
+#define RDES_EXT_PDELAY_REQ 5
+#define RDES_EXT_PDELAY_RESP 6
+#define RDES_EXT_PDELAY_FOLLOW_UP 7
+
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
- /* Receive descriptor */
- union {
- struct {
- /* RDES0 */
- u32 payload_csum_error:1;
- u32 crc_error:1;
- u32 dribbling:1;
- u32 mii_error:1;
- u32 receive_watchdog:1;
- u32 frame_type:1;
- u32 collision:1;
- u32 ipc_csum_error:1;
- u32 last_descriptor:1;
- u32 first_descriptor:1;
- u32 vlan_tag:1;
- u32 overflow_error:1;
- u32 length_error:1;
- u32 sa_filter_fail:1;
- u32 descriptor_error:1;
- u32 error_summary:1;
- u32 frame_length:14;
- u32 da_filter_fail:1;
- u32 own:1;
- /* RDES1 */
- u32 buffer1_size:11;
- u32 buffer2_size:11;
- u32 reserved1:2;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 reserved2:5;
- u32 disable_ic:1;
-
- } rx;
- struct {
- /* RDES0 */
- u32 rx_mac_addr:1;
- u32 crc_error:1;
- u32 dribbling:1;
- u32 error_gmii:1;
- u32 receive_watchdog:1;
- u32 frame_type:1;
- u32 late_collision:1;
- u32 ipc_csum_error:1;
- u32 last_descriptor:1;
- u32 first_descriptor:1;
- u32 vlan_tag:1;
- u32 overflow_error:1;
- u32 length_error:1;
- u32 sa_filter_fail:1;
- u32 descriptor_error:1;
- u32 error_summary:1;
- u32 frame_length:14;
- u32 da_filter_fail:1;
- u32 own:1;
- /* RDES1 */
- u32 buffer1_size:13;
- u32 reserved1:1;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 buffer2_size:13;
- u32 reserved2:2;
- u32 disable_ic:1;
- } erx; /* -- enhanced -- */
-
- /* Transmit descriptor */
- struct {
- /* TDES0 */
- u32 deferred:1;
- u32 underflow_error:1;
- u32 excessive_deferral:1;
- u32 collision_count:4;
- u32 vlan_frame:1;
- u32 excessive_collisions:1;
- u32 late_collision:1;
- u32 no_carrier:1;
- u32 loss_carrier:1;
- u32 payload_error:1;
- u32 frame_flushed:1;
- u32 jabber_timeout:1;
- u32 error_summary:1;
- u32 ip_header_error:1;
- u32 time_stamp_status:1;
- u32 reserved1:13;
- u32 own:1;
- /* TDES1 */
- u32 buffer1_size:11;
- u32 buffer2_size:11;
- u32 time_stamp_enable:1;
- u32 disable_padding:1;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 crc_disable:1;
- u32 checksum_insertion:2;
- u32 first_segment:1;
- u32 last_segment:1;
- u32 interrupt:1;
- } tx;
- struct {
- /* TDES0 */
- u32 deferred:1;
- u32 underflow_error:1;
- u32 excessive_deferral:1;
- u32 collision_count:4;
- u32 vlan_frame:1;
- u32 excessive_collisions:1;
- u32 late_collision:1;
- u32 no_carrier:1;
- u32 loss_carrier:1;
- u32 payload_error:1;
- u32 frame_flushed:1;
- u32 jabber_timeout:1;
- u32 error_summary:1;
- u32 ip_header_error:1;
- u32 time_stamp_status:1;
- u32 reserved1:2;
- u32 second_address_chained:1;
- u32 end_ring:1;
- u32 checksum_insertion:2;
- u32 reserved2:1;
- u32 time_stamp_enable:1;
- u32 disable_padding:1;
- u32 crc_disable:1;
- u32 first_segment:1;
- u32 last_segment:1;
- u32 interrupt:1;
- u32 own:1;
- /* TDES1 */
- u32 buffer1_size:13;
- u32 reserved3:3;
- u32 buffer2_size:13;
- u32 reserved4:3;
- } etx; /* -- enhanced -- */
-
- u64 all_flags;
- } des01;
+ unsigned int des0;
+ unsigned int des1;
unsigned int des2;
unsigned int des3;
};
-/* Extended descriptor structure (supported by new SYNP GMAC generations) */
+/* Extended descriptor structure (e.g. >= databook 3.50a) */
struct dma_extended_desc {
- struct dma_desc basic;
- union {
- struct {
- u32 ip_payload_type:3;
- u32 ip_hdr_err:1;
- u32 ip_payload_err:1;
- u32 ip_csum_bypassed:1;
- u32 ipv4_pkt_rcvd:1;
- u32 ipv6_pkt_rcvd:1;
- u32 msg_type:4;
- u32 ptp_frame_type:1;
- u32 ptp_ver:1;
- u32 timestamp_dropped:1;
- u32 reserved:1;
- u32 av_pkt_rcvd:1;
- u32 av_tagged_pkt_rcvd:1;
- u32 vlan_tag_priority_val:3;
- u32 reserved3:3;
- u32 l3_filter_match:1;
- u32 l4_filter_match:1;
- u32 l3_l4_filter_no_match:2;
- u32 reserved4:4;
- } erx;
- struct {
- u32 reserved;
- } etx;
- } des4;
+ struct dma_desc basic; /* Basic descriptors */
+ unsigned int des4; /* Extended Status */
unsigned int des5; /* Reserved */
unsigned int des6; /* Tx/Rx Timestamp Low */
unsigned int des7; /* Tx/Rx Timestamp High */
};
/* Transmit checksum insertion control */
-enum tdes_csum_insertion {
- cic_disabled = 0, /* Checksum Insertion Control */
- cic_only_ip = 1, /* Only IP header */
- /* IP header but pseudoheader is not calculated */
- cic_no_pseudoheader = 2,
- cic_full = 3, /* IP header and pseudoheader */
-};
-
-/* Extended RDES4 definitions */
-#define RDES_EXT_NO_PTP 0
-#define RDES_EXT_SYNC 0x1
-#define RDES_EXT_FOLLOW_UP 0x2
-#define RDES_EXT_DELAY_REQ 0x3
-#define RDES_EXT_DELAY_RESP 0x4
-#define RDES_EXT_PDELAY_REQ 0x5
-#define RDES_EXT_PDELAY_RESP 0x6
-#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 6f2cc78c5cf5..7635a464ce41 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -35,100 +35,91 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
- if (end)
- p->des01.erx.end_ring = 1;
-}
+ p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK;
-static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
-{
if (end)
- p->des01.etx.end_ring = 1;
+ p->des1 |= ERDES1_END_RING;
}
-static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
- p->des01.etx.end_ring = ter;
+ if (end)
+ p->des0 |= ETDES0_END_RING;
+ else
+ p->des0 &= ~ETDES0_END_RING;
}
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
- p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
+ & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
+ & ETDES1_BUFFER1_SIZE_MASK);
} else
- p->des01.etx.buffer1_size = len;
+ p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
- p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
- if (end)
- p->des01.rx.end_ring = 1;
-}
+ p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK;
-static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
-{
if (end)
- p->des01.tx.end_ring = 1;
+ p->des1 |= RDES1_END_RING;
}
-static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
- p->des01.tx.end_ring = ter;
+ if (end)
+ p->des1 |= TDES1_END_RING;
+ else
+ p->des1 &= ~TDES1_END_RING;
}
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_2KiB)) {
- p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
- p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
+ unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
+ & TDES1_BUFFER1_SIZE_MASK;
+ p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
+ & TDES1_BUFFER2_SIZE_MASK) | buffer1);
} else
- p->des01.tx.buffer1_size = len;
+ p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
}
/* Specific functions used for Chain mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
-{
- p->des01.erx.second_address_chained = 1;
-}
-
-static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
{
- p->des01.etx.second_address_chained = 1;
+ p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
}
-static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
{
- p->des01.etx.second_address_chained = 1;
+ p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
}
static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des01.etx.buffer1_size = len;
+ p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
- p->des01.rx.second_address_chained = 1;
-}
-
-static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
-{
- p->des01.tx.second_address_chained = 1;
+ p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
}
-static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
{
- p->des01.tx.second_address_chained = 1;
+ p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
}
static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
- p->des01.tx.buffer1_size = len;
+ p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
}
#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..44022b1845ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
#define EMAC_SPLITTER_CTRL_REG 0x0
#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -148,7 +151,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
int phymode = dwmac->interface;
u32 reg_offset = dwmac->reg_offset;
u32 reg_shift = dwmac->reg_shift;
- u32 ctrl, val;
+ u32 ctrl, val, module;
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
@@ -175,12 +178,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
- if (dwmac->f2h_ptp_ref_clk)
+ if (dwmac->f2h_ptp_ref_clk) {
ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
- else
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ &module);
+ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ module);
+ } else {
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+ }
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 2ec6aeae349e..1657acfa70c2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -95,7 +95,6 @@
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
#define DMA_BUS_MODE_DEFAULT 0x00000000
/* DMA Control register defines */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 8831a053ac13..b0593a4268ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -221,7 +221,6 @@ enum inter_frame_gap {
/*--- DMA BLOCK defines ---*/
/* DMA Bus Mode register defines */
-#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
@@ -241,7 +240,7 @@ enum rx_tx_priority_ratio {
#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
#define DMA_BUS_MODE_RPBL_SHIFT 17
#define DMA_BUS_MODE_USP 0x00800000
-#define DMA_BUS_MODE_PBL 0x01000000
+#define DMA_BUS_MODE_MAXPBL 0x01000000
#define DMA_BUS_MODE_AAL 0x02000000
/* DMA CRS Control and Status Register Mapping */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 0e8937c1184a..da32d6037e3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,37 +30,76 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
+ u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
+ int i;
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ pr_info("dwmac1000: Master AXI performs %s burst length\n",
+ !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
break;
- mdelay(10);
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
}
- if (limit < 0)
- return -EBUSY;
+
+ writel(value, ioaddr + DMA_AXI_BUS_MODE);
+}
+
+static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
/*
- * Set the DMA PBL (Programmable Burst Length) mode
- * Before stmmac core 3.50 this mode bit was 4xPBL, and
+ * Set the DMA PBL (Programmable Burst Length) mode.
+ *
+ * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
* post 3.5 mode bit acts as 8*PBL.
- * For core rev < 3.5, when the core is set for 4xPBL mode, the
- * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
- * depending on pbl value.
- * For core rev > 3.5, when the core is set for 8xPBL mode, the
- * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
- * depending on pbl value.
+ *
+ * This configuration doesn't take care about the Separate PBL
+ * so only the bits: 13-8 are programmed with the PBL passed from the
+ * platform.
*/
- value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
- (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+ value |= DMA_BUS_MODE_MAXPBL;
+ value &= ~DMA_BUS_MODE_PBL_MASK;
+ value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
/* Set the Fixed burst mode */
if (fb)
@@ -73,26 +112,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
if (atds)
value |= DMA_BUS_MODE_ATDS;
- writel(value, ioaddr + DMA_BUS_MODE);
+ if (aal)
+ value |= DMA_BUS_MODE_AAL;
- /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
- * for supported bursts.
- *
- * Note: This is applicable only for revision GMACv3.61a. For
- * older version this register is reserved and shall have no
- * effect.
- *
- * Note:
- * For Fixed Burst Mode: if we directly write 0xFF to this
- * register using the configurations pass from platform code,
- * this would ensure that all bursts supported by core are set
- * and those which are not supported would remain ineffective.
- *
- * For Non Fixed Burst Mode: provide the maximum value of the
- * burst length. Any burst equal or below the provided burst
- * length would be allowed to perform.
- */
- writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+ writel(value, ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
@@ -102,8 +125,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
*/
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
}
static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
@@ -205,7 +226,9 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
}
const struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .reset = dwmac_dma_reset,
.init = dwmac1000_dma_init,
+ .axi = dwmac1000_dma_axi,
.dump_regs = dwmac1000_dump_dma_regs,
.dma_mode = dwmac1000_dma_operation_mode,
.enable_dma_transmission = dwmac_enable_dma_transmission,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index 9d0971c1c2ee..61f54c99a7de 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,24 +32,9 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
{
- u32 value = readl(ioaddr + DMA_BUS_MODE);
- int limit;
-
- /* DMA SW reset */
- value |= DMA_BUS_MODE_SFT_RESET;
- writel(value, ioaddr + DMA_BUS_MODE);
- limit = 10;
- while (limit--) {
- if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
- break;
- mdelay(10);
- }
- if (limit < 0)
- return -EBUSY;
-
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
ioaddr + DMA_BUS_MODE);
@@ -62,8 +47,6 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
*/
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
-
- return 0;
}
/* Store and Forward capability is not used at all.
@@ -131,6 +114,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
}
const struct stmmac_dma_ops dwmac100_dma_ops = {
+ .reset = dwmac_dma_reset,
.init = dwmac100_dma_init,
.dump_regs = dwmac100_dump_dma_regs,
.dma_mode = dwmac100_dma_operation_mode,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index def266da55db..726d9d9aaf83 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -35,10 +35,46 @@
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+
+/* SW Reset */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+
/* Rx watchdog register */
#define DMA_RX_WATCHDOG 0x00001024
-/* AXI Bus Mode */
+
+/* AXI Master Bus Mode */
#define DMA_AXI_BUS_MODE 0x00001028
+
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 20
+#define DMA_AXI_WR_OSR_LMT_MASK 0xf
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+#define DMA_AXI_RD_OSR_LMT_MASK 0xf
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_UNDEF BIT(0)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
@@ -112,5 +148,6 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr);
void dwmac_dma_start_rx(void __iomem *ioaddr);
void dwmac_dma_stop_rx(void __iomem *ioaddr);
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+int dwmac_dma_reset(void __iomem *ioaddr);
#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 484e3cf9c414..84e3e84cec7d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -26,6 +26,27 @@
#define GMAC_HI_REG_AE 0x80000000
+int dwmac_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
/* CSR1 enables the transmit DMA to check for new descriptor */
void dwmac_enable_dma_transmission(void __iomem *ioaddr)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7d944449f5ef..cfb018c7c5eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -1,7 +1,7 @@
/*******************************************************************************
This contains the functions to handle the enhanced descriptors.
- Copyright (C) 2007-2009 STMicroelectronics Ltd
+ Copyright (C) 2007-2014 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,56 +29,64 @@
static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = p->des0;
+ int ret = tx_done;
- if (unlikely(p->des01.etx.error_summary)) {
- if (unlikely(p->des01.etx.jabber_timeout))
+ /* Get tx owner first */
+ if (unlikely(tdes0 & ETDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
x->tx_jabber++;
- if (unlikely(p->des01.etx.frame_flushed)) {
+ if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
x->tx_frame_flushed++;
dwmac_dma_flush_tx_fifo(ioaddr);
}
- if (unlikely(p->des01.etx.loss_carrier)) {
+ if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.etx.no_carrier)) {
+ if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
x->tx_carrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.etx.late_collision))
- stats->collisions += p->des01.etx.collision_count;
-
- if (unlikely(p->des01.etx.excessive_collisions))
- stats->collisions += p->des01.etx.collision_count;
+ if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
+ (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
+ stats->collisions +=
+ (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
- if (unlikely(p->des01.etx.excessive_deferral))
+ if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
- if (unlikely(p->des01.etx.underflow_error)) {
+ if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
dwmac_dma_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
- if (unlikely(p->des01.etx.ip_header_error))
+ if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
x->tx_ip_header_error++;
- if (unlikely(p->des01.etx.payload_error)) {
+ if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
x->tx_payload_error++;
dwmac_dma_flush_tx_fifo(ioaddr);
}
- ret = -1;
+ ret = tx_err;
}
- if (unlikely(p->des01.etx.deferred))
+ if (unlikely(tdes0 & ETDES0_DEFERRED))
x->tx_deferred++;
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.etx.vlan_frame)
+ if (tdes0 & ETDES0_VLAN_FRAME)
x->tx_vlan++;
#endif
@@ -87,7 +95,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int enh_desc_get_tx_len(struct dma_desc *p)
{
- return p->des01.etx.buffer1_size;
+ return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
}
static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
@@ -126,50 +134,55 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
struct dma_extended_desc *p)
{
- if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
- if (p->des4.erx.ip_hdr_err)
+ unsigned int rdes0 = p->basic.des0;
+ unsigned int rdes4 = p->des4;
+
+ if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
+ int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes4 & ERDES4_IP_HDR_ERR)
x->ip_hdr_err++;
- if (p->des4.erx.ip_payload_err)
+ if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
x->ip_payload_err++;
- if (p->des4.erx.ip_csum_bypassed)
+ if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++;
- if (p->des4.erx.ipv4_pkt_rcvd)
+ if (rdes4 & ERDES4_IPV4_PKT_RCVD)
x->ipv4_pkt_rcvd++;
- if (p->des4.erx.ipv6_pkt_rcvd)
+ if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++;
- if (p->des4.erx.msg_type == RDES_EXT_SYNC)
+ if (message_type == RDES_EXT_SYNC)
x->rx_msg_type_sync++;
- else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
+ else if (message_type == RDES_EXT_FOLLOW_UP)
x->rx_msg_type_follow_up++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ else if (message_type == RDES_EXT_DELAY_REQ)
x->rx_msg_type_delay_req++;
- else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
+ else if (message_type == RDES_EXT_DELAY_RESP)
x->rx_msg_type_delay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
+ else if (message_type == RDES_EXT_PDELAY_REQ)
x->rx_msg_type_pdelay_req++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
+ else if (message_type == RDES_EXT_PDELAY_RESP)
x->rx_msg_type_pdelay_resp++;
- else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
x->rx_msg_type_pdelay_follow_up++;
else
x->rx_msg_type_ext_no_ptp++;
- if (p->des4.erx.ptp_frame_type)
+ if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++;
- if (p->des4.erx.ptp_ver)
+ if (rdes4 & ERDES4_PTP_VER)
x->ptp_ver++;
- if (p->des4.erx.timestamp_dropped)
+ if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
x->timestamp_dropped++;
- if (p->des4.erx.av_pkt_rcvd)
+ if (rdes4 & ERDES4_AV_PKT_RCVD)
x->av_pkt_rcvd++;
- if (p->des4.erx.av_tagged_pkt_rcvd)
+ if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
x->av_tagged_pkt_rcvd++;
- if (p->des4.erx.vlan_tag_priority_val)
+ if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
x->vlan_tag_priority_val++;
- if (p->des4.erx.l3_filter_match)
+ if (rdes4 & ERDES4_L3_FILTER_MATCH)
x->l3_filter_match++;
- if (p->des4.erx.l4_filter_match)
+ if (rdes4 & ERDES4_L4_FILTER_MATCH)
x->l4_filter_match++;
- if (p->des4.erx.l3_l4_filter_no_match)
+ if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
x->l3_l4_filter_no_match++;
}
}
@@ -177,30 +190,33 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
- int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes0 = p->des0;
+ int ret = good_frame;
+
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
- if (unlikely(p->des01.erx.error_summary)) {
- if (unlikely(p->des01.erx.descriptor_error)) {
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
stats->rx_length_errors++;
}
- if (unlikely(p->des01.erx.overflow_error))
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
- if (unlikely(p->des01.erx.ipc_csum_error))
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
pr_err("\tIPC Csum Error/Giant frame\n");
- if (unlikely(p->des01.erx.late_collision)) {
+ if (unlikely(rdes0 & RDES0_COLLISION))
stats->collisions++;
- }
- if (unlikely(p->des01.erx.receive_watchdog))
+ if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
x->rx_watchdog++;
- if (unlikely(p->des01.erx.error_gmii))
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */
x->rx_mii++;
- if (unlikely(p->des01.erx.crc_error)) {
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc++;
stats->rx_crc_errors++;
}
@@ -211,26 +227,27 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
- if (unlikely(p->des01.erx.dribbling))
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
- if (unlikely(p->des01.erx.sa_filter_fail)) {
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
x->sa_rx_filter_fail++;
ret = discard_frame;
}
- if (unlikely(p->des01.erx.da_filter_fail)) {
+ if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
x->da_rx_filter_fail++;
ret = discard_frame;
}
- if (unlikely(p->des01.erx.length_error)) {
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.erx.vlan_tag)
+ if (rdes0 & RDES0_VLAN_TAG)
x->rx_vlan++;
#endif
@@ -240,110 +257,125 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
int mode, int end)
{
- p->des01.all_flags = 0;
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ p->des0 |= RDES0_OWN;
+ p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
- ehn_desc_rx_set_on_chain(p, end);
+ ehn_desc_rx_set_on_chain(p);
else
ehn_desc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
+ p->des1 |= ERDES1_DISABLE_IC;
}
static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.all_flags = 0;
+ p->des0 &= ~ETDES0_OWN;
if (mode == STMMAC_CHAIN_MODE)
- ehn_desc_tx_set_on_chain(p, end);
+ enh_desc_end_tx_desc_on_chain(p);
else
- ehn_desc_tx_set_on_ring(p, end);
+ enh_desc_end_tx_desc_on_ring(p, end);
}
static int enh_desc_get_tx_owner(struct dma_desc *p)
{
- return p->des01.etx.own;
-}
-
-static int enh_desc_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.erx.own;
+ return (p->des0 & ETDES0_OWN) >> 31;
}
static void enh_desc_set_tx_owner(struct dma_desc *p)
{
- p->des01.etx.own = 1;
+ p->des0 |= ETDES0_OWN;
}
static void enh_desc_set_rx_owner(struct dma_desc *p)
{
- p->des01.erx.own = 1;
+ p->des0 |= RDES0_OWN;
}
static int enh_desc_get_tx_ls(struct dma_desc *p)
{
- return p->des01.etx.last_segment;
+ return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
}
static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = p->des01.etx.end_ring;
+ int ter = (p->des0 & ETDES0_END_RING) >> 21;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
- enh_desc_end_tx_desc_on_chain(p, ter);
+ enh_desc_end_tx_desc_on_chain(p);
else
enh_desc_end_tx_desc_on_ring(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode)
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
{
- p->des01.etx.first_segment = is_fs;
+ unsigned int tdes0 = p->des0;
if (mode == STMMAC_CHAIN_MODE)
enh_set_tx_desc_len_on_chain(p, len);
else
enh_set_tx_desc_len_on_ring(p, len);
+ if (is_fs)
+ tdes0 |= ETDES0_FIRST_SEGMENT;
+ else
+ tdes0 &= ~ETDES0_FIRST_SEGMENT;
+
if (likely(csum_flag))
- p->des01.etx.checksum_insertion = cic_full;
-}
+ tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
-static void enh_desc_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.etx.interrupt = 0;
+ if (ls)
+ tdes0 |= ETDES0_LAST_SEGMENT;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes0 |= ETDES0_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des0 = tdes0;
}
-static void enh_desc_close_tx_desc(struct dma_desc *p)
+static void enh_desc_set_tx_ic(struct dma_desc *p)
{
- p->des01.etx.last_segment = 1;
- p->des01.etx.interrupt = 1;
+ p->des0 |= ETDES0_INTERRUPT;
}
static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
{
+ unsigned int csum = 0;
/* The type-1 checksum offload engines append the checksum at
* the end of frame and the two bytes of checksum are added in
* the length.
* Adjust for that in the framelen for type-1 checksum offload
- * engines. */
+ * engines.
+ */
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
- return p->des01.erx.frame_length - 2;
- else
- return p->des01.erx.frame_length;
+ csum = 2;
+
+ return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+ csum);
}
static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des01.etx.time_stamp_enable = 1;
+ p->des0 |= ETDES0_TIME_STAMP_ENABLE;
}
static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
{
- return p->des01.etx.time_stamp_status;
+ return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 enh_desc_get_timestamp(void *desc, u32 ats)
@@ -368,7 +400,7 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
{
if (ats) {
struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
- return p->basic.des01.erx.ipc_csum_error;
+ return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
} else {
struct dma_desc *p = (struct dma_desc *)desc;
if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
@@ -386,11 +418,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
.init_rx_desc = enh_desc_init_rx_desc,
.init_tx_desc = enh_desc_init_tx_desc,
.get_tx_owner = enh_desc_get_tx_owner,
- .get_rx_owner = enh_desc_get_rx_owner,
.release_tx_desc = enh_desc_release_tx_desc,
.prepare_tx_desc = enh_desc_prepare_tx_desc,
- .clear_tx_ic = enh_desc_clear_tx_ic,
- .close_tx_desc = enh_desc_close_tx_desc,
+ .set_tx_ic = enh_desc_set_tx_ic,
.get_tx_ls = enh_desc_get_tx_ls,
.set_tx_owner = enh_desc_set_tx_owner,
.set_rx_owner = enh_desc_set_rx_owner,
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 48c3456445b2..011386f6f24d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -29,33 +29,47 @@
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
- int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = p->des0;
+ unsigned int tdes1 = p->des1;
+ int ret = tx_done;
- if (unlikely(p->des01.tx.error_summary)) {
- if (unlikely(p->des01.tx.underflow_error)) {
+ /* Get tx owner first */
+ if (unlikely(tdes0 & TDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
x->tx_underflow++;
stats->tx_fifo_errors++;
}
- if (unlikely(p->des01.tx.no_carrier)) {
+ if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
x->tx_carrier++;
stats->tx_carrier_errors++;
}
- if (unlikely(p->des01.tx.loss_carrier)) {
+ if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
stats->tx_carrier_errors++;
}
- if (unlikely((p->des01.tx.excessive_deferral) ||
- (p->des01.tx.excessive_collisions) ||
- (p->des01.tx.late_collision)))
- stats->collisions += p->des01.tx.collision_count;
- ret = -1;
+ if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
+ (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
+ (tdes0 & TDES0_LATE_COLLISION))) {
+ unsigned int collisions;
+
+ collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
+ stats->collisions += collisions;
+ }
+ ret = tx_err;
}
- if (p->des01.etx.vlan_frame)
+ if (tdes0 & TDES0_VLAN_FRAME)
x->tx_vlan++;
- if (unlikely(p->des01.tx.deferred))
+ if (unlikely(tdes0 & TDES0_DEFERRED))
x->tx_deferred++;
return ret;
@@ -63,7 +77,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int ndesc_get_tx_len(struct dma_desc *p)
{
- return p->des01.tx.buffer1_size;
+ return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
}
/* This function verifies if each incoming frame has some errors
@@ -74,47 +88,51 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
+ unsigned int rdes0 = p->des0;
struct net_device_stats *stats = (struct net_device_stats *)data;
- if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
+
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}
- if (unlikely(p->des01.rx.error_summary)) {
- if (unlikely(p->des01.rx.descriptor_error))
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
x->rx_desc++;
- if (unlikely(p->des01.rx.sa_filter_fail))
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
x->sa_filter_fail++;
- if (unlikely(p->des01.rx.overflow_error))
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->overflow_error++;
- if (unlikely(p->des01.rx.ipc_csum_error))
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
x->ipc_csum_error++;
- if (unlikely(p->des01.rx.collision)) {
+ if (unlikely(rdes0 & RDES0_COLLISION)) {
x->rx_collision++;
stats->collisions++;
}
- if (unlikely(p->des01.rx.crc_error)) {
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc++;
stats->rx_crc_errors++;
}
ret = discard_frame;
}
- if (unlikely(p->des01.rx.dribbling))
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
- if (unlikely(p->des01.rx.length_error)) {
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
x->rx_length++;
ret = discard_frame;
}
- if (unlikely(p->des01.rx.mii_error)) {
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) {
x->rx_mii++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
- if (p->des01.rx.vlan_tag)
+ if (rdes0 & RDES0_VLAN_TAG)
x->vlan_tag++;
#endif
return ret;
@@ -123,9 +141,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
- p->des01.all_flags = 0;
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des0 |= RDES0_OWN;
+ p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
@@ -133,99 +150,110 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
ndesc_rx_set_on_ring(p, end);
if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
+ p->des1 |= RDES1_DISABLE_IC;
}
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- p->des01.all_flags = 0;
+ p->des0 &= ~TDES0_OWN;
if (mode == STMMAC_CHAIN_MODE)
- ndesc_tx_set_on_chain(p, end);
+ ndesc_tx_set_on_chain(p);
else
- ndesc_tx_set_on_ring(p, end);
+ ndesc_end_tx_desc_on_ring(p, end);
}
static int ndesc_get_tx_owner(struct dma_desc *p)
{
- return p->des01.tx.own;
-}
-
-static int ndesc_get_rx_owner(struct dma_desc *p)
-{
- return p->des01.rx.own;
+ return (p->des0 & TDES0_OWN) >> 31;
}
static void ndesc_set_tx_owner(struct dma_desc *p)
{
- p->des01.tx.own = 1;
+ p->des0 |= TDES0_OWN;
}
static void ndesc_set_rx_owner(struct dma_desc *p)
{
- p->des01.rx.own = 1;
+ p->des0 |= RDES0_OWN;
}
static int ndesc_get_tx_ls(struct dma_desc *p)
{
- return p->des01.tx.last_segment;
+ return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
}
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
- int ter = p->des01.tx.end_ring;
+ int ter = (p->des1 & TDES1_END_RING) >> 25;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
- ndesc_end_tx_desc_on_chain(p, ter);
+ ndesc_tx_set_on_chain(p);
else
ndesc_end_tx_desc_on_ring(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag, int mode)
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
{
- p->des01.tx.first_segment = is_fs;
+ unsigned int tdes1 = p->des1;
+
+ if (is_fs)
+ tdes1 |= TDES1_FIRST_SEGMENT;
+ else
+ tdes1 &= ~TDES1_FIRST_SEGMENT;
+
+ if (likely(csum_flag))
+ tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
+ else
+ tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes1 |= TDES1_LAST_SEGMENT;
+
+ p->des1 = tdes1;
+
if (mode == STMMAC_CHAIN_MODE)
norm_set_tx_desc_len_on_chain(p, len);
else
norm_set_tx_desc_len_on_ring(p, len);
- if (likely(csum_flag))
- p->des01.tx.checksum_insertion = cic_full;
-}
-
-static void ndesc_clear_tx_ic(struct dma_desc *p)
-{
- p->des01.tx.interrupt = 0;
+ if (tx_own)
+ p->des0 |= TDES0_OWN;
}
-static void ndesc_close_tx_desc(struct dma_desc *p)
+static void ndesc_set_tx_ic(struct dma_desc *p)
{
- p->des01.tx.last_segment = 1;
- p->des01.tx.interrupt = 1;
+ p->des1 |= TDES1_INTERRUPT;
}
static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
{
+ unsigned int csum = 0;
+
/* The type-1 checksum offload engines append the checksum at
* the end of frame and the two bytes of checksum are added in
* the length.
* Adjust for that in the framelen for type-1 checksum offload
- * engines. */
+ * engines
+ */
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
- return p->des01.rx.frame_length - 2;
- else
- return p->des01.rx.frame_length;
+ csum = 2;
+
+ return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
+ csum);
+
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
{
- p->des01.tx.time_stamp_enable = 1;
+ p->des1 |= TDES1_TIME_STAMP_ENABLE;
}
static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
{
- return p->des01.tx.time_stamp_status;
+ return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 ndesc_get_timestamp(void *desc, u32 ats)
@@ -258,11 +286,9 @@ const struct stmmac_desc_ops ndesc_ops = {
.init_rx_desc = ndesc_init_rx_desc,
.init_tx_desc = ndesc_init_tx_desc,
.get_tx_owner = ndesc_get_tx_owner,
- .get_rx_owner = ndesc_get_rx_owner,
.release_tx_desc = ndesc_release_tx_desc,
.prepare_tx_desc = ndesc_prepare_tx_desc,
- .clear_tx_ic = ndesc_clear_tx_ic,
- .close_tx_desc = ndesc_close_tx_desc,
+ .set_tx_ic = ndesc_set_tx_ic,
.get_tx_ls = ndesc_get_tx_ls,
.set_tx_owner = ndesc_set_tx_owner,
.set_rx_owner = ndesc_set_rx_owner,
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 5dd50c6cda5b..7723b5d2499a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -31,8 +31,7 @@
static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *)p;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int entry = priv->cur_tx % txsize;
+ unsigned int entry = priv->cur_tx;
struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax, len;
@@ -57,12 +56,14 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = bmax;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
+
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
- STMMAC_RING_MODE);
- wmb();
+ STMMAC_RING_MODE, 0, false);
priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
if (priv->extend_desc)
desc = (struct dma_desc *)(priv->dma_etx + entry);
@@ -74,22 +75,27 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = len;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
+
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
- STMMAC_RING_MODE);
- wmb();
- priv->hw->desc->set_tx_owner(desc);
+ STMMAC_RING_MODE, 1, true);
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, desc->des2))
return -1;
priv->tx_skbuff_dma[entry].buf = desc->des2;
+ priv->tx_skbuff_dma[entry].len = nopaged_len;
+ priv->tx_skbuff_dma[entry].is_jumbo = true;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
- STMMAC_RING_MODE);
+ STMMAC_RING_MODE, 0, true);
}
+ priv->cur_tx = entry;
+
return entry;
}
@@ -120,7 +126,13 @@ static void stmmac_init_desc3(struct dma_desc *p)
static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- if (unlikely(p->des3))
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+ unsigned int entry = priv->dirty_tx;
+
+ /* des3 is only used for jumbo frames tx or time stamping */
+ if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
+ (priv->tx_skbuff_dma[entry].last_segment &&
+ !priv->extend_desc && priv->hwts_tx_en)))
p->des3 = 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 1f3b33a6c6a8..8bbab97895fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "March_2013"
+#define DRV_MODULE_VERSION "Oct_2015"
#include <linux/clk.h>
#include <linux/stmmac.h>
@@ -45,6 +45,9 @@ struct stmmac_resources {
struct stmmac_tx_info {
dma_addr_t buf;
bool map_as_page;
+ unsigned len;
+ bool last_segment;
+ bool is_jumbo;
};
struct stmmac_priv {
@@ -54,7 +57,6 @@ struct stmmac_priv {
struct sk_buff **tx_skbuff;
unsigned int cur_tx;
unsigned int dirty_tx;
- unsigned int dma_tx_size;
u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
@@ -71,8 +73,9 @@ struct stmmac_priv {
struct sk_buff **rx_skbuff;
unsigned int cur_rx;
unsigned int dirty_rx;
- unsigned int dma_rx_size;
unsigned int dma_buf_sz;
+ unsigned int rx_copybreak;
+ unsigned int rx_zeroc_thresh;
u32 rx_riwt;
int hwts_rx_en;
dma_addr_t *rx_skbuff_dma;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 4c6486cc80fb..3c7928edfebb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -97,7 +97,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(napi_poll),
STMMAC_STAT(tx_normal_irq_n),
STMMAC_STAT(tx_clean),
- STMMAC_STAT(tx_reset_ic_bit),
+ STMMAC_STAT(tx_set_ic_bit),
STMMAC_STAT(irq_receive_pmt_irq_n),
/* MMC info */
STMMAC_STAT(mmc_tx_irq_n),
@@ -781,6 +781,43 @@ static int stmmac_get_ts_info(struct net_device *dev,
return ethtool_op_get_ts_info(dev, info);
}
+static int stmmac_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -803,6 +840,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_tunable = stmmac_get_tunable,
+ .set_tunable = stmmac_set_tunable,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c21015b68097..fcbd4be562e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -71,15 +71,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define DMA_TX_SIZE 256
-static int dma_txsize = DMA_TX_SIZE;
-module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
-
-#define DMA_RX_SIZE 256
-static int dma_rxsize = DMA_RX_SIZE;
-module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
+#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
static int flow_ctrl = FLOW_OFF;
module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
@@ -99,6 +92,8 @@ static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+#define STMMAC_RX_COPYBREAK 256
+
static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -134,10 +129,6 @@ static void stmmac_verify_args(void)
{
if (unlikely(watchdog < 0))
watchdog = TX_TIMEO;
- if (unlikely(dma_rxsize < 0))
- dma_rxsize = DMA_RX_SIZE;
- if (unlikely(dma_txsize < 0))
- dma_txsize = DMA_TX_SIZE;
if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
@@ -197,12 +188,28 @@ static void print_pkt(unsigned char *buf, int len)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
}
-/* minimum number of free TX descriptors required to wake up TX process */
-#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
-
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
{
- return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+ unsigned avail;
+
+ if (priv->dirty_tx > priv->cur_tx)
+ avail = priv->dirty_tx - priv->cur_tx - 1;
+ else
+ avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
+
+ return avail;
+}
+
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
+{
+ unsigned dirty;
+
+ if (priv->dirty_rx <= priv->cur_rx)
+ dirty = priv->cur_rx - priv->dirty_rx;
+ else
+ dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
+
+ return dirty;
}
/**
@@ -271,7 +278,6 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
- char *phy_bus_name = priv->plat->phy_bus_name;
unsigned long flags;
bool ret = false;
@@ -282,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
(priv->pcs == STMMAC_PCS_RTBI))
goto out;
- /* Never init EEE in case of a switch is attached */
- if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
- goto out;
-
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
int tx_lpi_timer = priv->tx_lpi_timer;
@@ -765,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* At this stage, it could be needed to setup the EEE or adjust some
- * MAC related HW registers.
- */
- priv->eee_enabled = stmmac_eee_init(priv);
+ if (phydev->is_pseudo_fixed_link)
+ /* Stop PHY layer to call the hook to adjust the link in case
+ * of a switch is attached to the stmmac driver.
+ */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+ else
+ /* At this stage, init the EEE if supported.
+ * Never called in case of fixed_link.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
}
/**
@@ -820,12 +828,8 @@ static int stmmac_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, priv->plat->phy_node,
&stmmac_adjust_link, 0, interface);
} else {
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
@@ -862,6 +866,7 @@ static int stmmac_init_phy(struct net_device *dev)
phy_disconnect(phydev);
return -ENODEV;
}
+
pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
" Link = %d\n", dev->name, phydev->phy_id, phydev->link);
@@ -906,19 +911,16 @@ static void stmmac_display_ring(void *head, int size, int extend_desc)
static void stmmac_display_rings(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
-
if (priv->extend_desc) {
pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
} else {
pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
}
}
@@ -947,28 +949,26 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
static void stmmac_clear_descriptors(struct stmmac_priv *priv)
{
int i;
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
/* Clear the Rx/Tx descriptors */
- for (i = 0; i < rxsize; i++)
+ for (i = 0; i < DMA_RX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == rxsize - 1));
+ (i == DMA_RX_SIZE - 1));
else
priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == rxsize - 1));
- for (i = 0; i < txsize; i++)
+ (i == DMA_RX_SIZE - 1));
+ for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
else
priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
}
/**
@@ -1031,8 +1031,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0;
int ret = -ENOMEM;
@@ -1044,10 +1042,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
priv->dma_buf_sz = bfsize;
- if (netif_msg_probe(priv))
- pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
- txsize, rxsize, bfsize);
-
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1055,7 +1049,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/* RX INITIALIZATION */
pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
}
- for (i = 0; i < rxsize; i++) {
+ for (i = 0; i < DMA_RX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((priv->dma_erx + i)->basic);
@@ -1072,26 +1066,26 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
(unsigned int)priv->rx_skbuff_dma[i]);
}
priv->cur_rx = 0;
- priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
buf_sz = bfsize;
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
+ DMA_RX_SIZE, 1);
priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ DMA_TX_SIZE, 1);
} else {
priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
+ DMA_RX_SIZE, 0);
priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ DMA_TX_SIZE, 0);
}
}
/* TX INITIALIZATION */
- for (i = 0; i < txsize; i++) {
+ for (i = 0; i < DMA_TX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((priv->dma_etx + i)->basic);
@@ -1100,6 +1094,8 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p->des2 = 0;
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
+ priv->tx_skbuff_dma[i].len = 0;
+ priv->tx_skbuff_dma[i].last_segment = false;
priv->tx_skbuff[i] = NULL;
}
@@ -1123,7 +1119,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < DMA_RX_SIZE; i++)
stmmac_free_rx_buffers(priv, i);
}
@@ -1131,7 +1127,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < DMA_TX_SIZE; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1143,12 +1139,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
if (priv->tx_skbuff_dma[i].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[i].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[i].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
}
@@ -1171,33 +1167,31 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
*/
static int alloc_dma_desc_resources(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
int ret = -ENOMEM;
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
GFP_KERNEL);
if (!priv->rx_skbuff_dma)
return -ENOMEM;
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->rx_skbuff)
goto err_rx_skbuff;
- priv->tx_skbuff_dma = kmalloc_array(txsize,
+ priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
sizeof(*priv->tx_skbuff_dma),
GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skbuff)
goto err_tx_skbuff;
if (priv->extend_desc) {
- priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
+ priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct
dma_extended_desc),
&priv->dma_rx_phy,
@@ -1205,31 +1199,31 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
if (!priv->dma_erx)
goto err_dma;
- priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
+ priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct
dma_extended_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
if (!priv->dma_etx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
goto err_dma;
}
} else {
- priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
+ priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
if (!priv->dma_rx)
goto err_dma;
- priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
+ priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
if (!priv->dma_tx) {
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
goto err_dma;
@@ -1258,16 +1252,16 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc) {
dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
+ DMA_TX_SIZE * sizeof(struct dma_desc),
priv->dma_tx, priv->dma_tx_phy);
dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
+ DMA_RX_SIZE * sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
} else {
- dma_free_coherent(priv->device, priv->dma_tx_size *
+ dma_free_coherent(priv->device, DMA_TX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_etx, priv->dma_tx_phy);
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, DMA_RX_SIZE *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
}
@@ -1312,62 +1306,59 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
*/
static void stmmac_tx_clean(struct stmmac_priv *priv)
{
- unsigned int txsize = priv->dma_tx_size;
unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry = priv->dirty_tx;
spin_lock(&priv->tx_lock);
priv->xstats.tx_clean++;
- while (priv->dirty_tx != priv->cur_tx) {
- int last;
- unsigned int entry = priv->dirty_tx % txsize;
+ while (entry != priv->cur_tx) {
struct sk_buff *skb = priv->tx_skbuff[entry];
struct dma_desc *p;
+ int status;
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_etx + entry);
else
p = priv->dma_tx + entry;
- /* Check if the descriptor is owned by the DMA. */
- if (priv->hw->desc->get_tx_owner(p))
- break;
-
- /* Verify tx error by looking at the last segment. */
- last = priv->hw->desc->get_tx_ls(p);
- if (likely(last)) {
- int tx_error =
- priv->hw->desc->tx_status(&priv->dev->stats,
+ status = priv->hw->desc->tx_status(&priv->dev->stats,
&priv->xstats, p,
priv->ioaddr);
- if (likely(tx_error == 0)) {
+ /* Check if the descriptor is owned by the DMA */
+ if (unlikely(status & tx_dma_own))
+ break;
+
+ /* Just consider the last segment and ...*/
+ if (likely(!(status & tx_not_ls))) {
+ /* ... verify the status error condition */
+ if (unlikely(status & tx_err)) {
+ priv->dev->stats.tx_errors++;
+ } else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
- } else
- priv->dev->stats.tx_errors++;
-
+ }
stmmac_get_tx_hwtstamp(priv, entry, skb);
}
- if (netif_msg_tx_done(priv))
- pr_debug("%s: curr %d, dirty %d\n", __func__,
- priv->cur_tx, priv->dirty_tx);
if (likely(priv->tx_skbuff_dma[entry].buf)) {
if (priv->tx_skbuff_dma[entry].map_as_page)
dma_unmap_page(priv->device,
priv->tx_skbuff_dma[entry].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
dma_unmap_single(priv->device,
priv->tx_skbuff_dma[entry].buf,
- priv->hw->desc->get_tx_len(p),
+ priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
priv->hw->mode->clean_desc3(priv, p);
+ priv->tx_skbuff_dma[entry].last_segment = false;
+ priv->tx_skbuff_dma[entry].is_jumbo = false;
if (likely(skb != NULL)) {
pkts_compl++;
@@ -1378,16 +1369,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p, priv->mode);
- priv->dirty_tx++;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
}
+ priv->dirty_tx = entry;
netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
if (unlikely(netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
netif_tx_lock(priv->dev);
if (netif_queue_stopped(priv->dev) &&
- stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
if (netif_msg_tx_done(priv))
pr_debug("%s: restart transmit\n", __func__);
netif_wake_queue(priv->dev);
@@ -1421,20 +1413,19 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
static void stmmac_tx_err(struct stmmac_priv *priv)
{
int i;
- int txsize = priv->dma_tx_size;
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
- for (i = 0; i < txsize; i++)
+ for (i = 0; i < DMA_TX_SIZE; i++)
if (priv->extend_desc)
priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
else
priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
priv->mode,
- (i == txsize - 1));
+ (i == DMA_TX_SIZE - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
netdev_reset_queue(priv->dev);
@@ -1635,23 +1626,35 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
*/
static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
- int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
+ int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
int mixed_burst = 0;
int atds = 0;
+ int ret = 0;
if (priv->plat->dma_cfg) {
pbl = priv->plat->dma_cfg->pbl;
fixed_burst = priv->plat->dma_cfg->fixed_burst;
mixed_burst = priv->plat->dma_cfg->mixed_burst;
- burst_len = priv->plat->dma_cfg->burst_len;
+ aal = priv->plat->dma_cfg->aal;
}
if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
atds = 1;
- return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
- burst_len, priv->dma_tx_phy,
- priv->dma_rx_phy, atds);
+ ret = priv->hw->dma->reset(priv->ioaddr);
+ if (ret) {
+ dev_err(priv->device, "Failed to reset the dma\n");
+ return ret;
+ }
+
+ priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
+ aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
+
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
+ (priv->plat->axi && priv->hw->dma->axi))
+ priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
+
+ return ret;
}
/**
@@ -1799,10 +1802,8 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- /* Create and initialize the TX/RX descriptors chains. */
- priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
- priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
@@ -1943,13 +1944,12 @@ static int stmmac_release(struct net_device *dev)
static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- int entry;
+ unsigned int nopaged_len = skb_headlen(skb);
int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int entry, first_entry;
struct dma_desc *desc, *first;
- unsigned int nopaged_len = skb_headlen(skb);
- unsigned int enh_desc = priv->plat->enh_desc;
+ unsigned int enh_desc;
spin_lock(&priv->tx_lock);
@@ -1966,31 +1966,26 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
- entry = priv->cur_tx % txsize;
+ entry = priv->cur_tx;
+ first_entry = entry;
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
- if (priv->extend_desc)
+ if (likely(priv->extend_desc))
desc = (struct dma_desc *)(priv->dma_etx + entry);
else
desc = priv->dma_tx + entry;
first = desc;
+ priv->tx_skbuff[first_entry] = skb;
+
+ enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (likely(!is_jumbo)) {
- desc->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
- goto dma_map_err;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion, priv->mode);
- } else {
- desc = first;
+ if (unlikely(is_jumbo)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -1999,10 +1994,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
+ bool last_segment = (i == (nfrags - 1));
- priv->tx_skbuff[entry] = NULL;
- entry = (++priv->cur_tx) % txsize;
- if (priv->extend_desc)
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+
+ if (likely(priv->extend_desc))
desc = (struct dma_desc *)(priv->dma_etx + entry);
else
desc = priv->dma_tx + entry;
@@ -2012,53 +2008,37 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, desc->des2))
goto dma_map_err; /* should reuse desc w/o issues */
+ priv->tx_skbuff[entry] = NULL;
priv->tx_skbuff_dma[entry].buf = desc->des2;
priv->tx_skbuff_dma[entry].map_as_page = true;
+ priv->tx_skbuff_dma[entry].len = len;
+ priv->tx_skbuff_dma[entry].last_segment = last_segment;
+
+ /* Prepare the descriptor and set the own bit too */
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
- priv->mode);
- wmb();
- priv->hw->desc->set_tx_owner(desc);
- wmb();
+ priv->mode, 1, last_segment);
}
- priv->tx_skbuff[entry] = skb;
+ entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
- /* Finalize the latest segment. */
- priv->hw->desc->close_tx_desc(desc);
-
- wmb();
- /* According to the coalesce parameter the IC bit for the latest
- * segment could be reset and the timer re-started to invoke the
- * stmmac_tx function. This approach takes care about the fragments.
- */
- priv->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames > priv->tx_count_frames) {
- priv->hw->desc->clear_tx_ic(desc);
- priv->xstats.tx_reset_ic_bit++;
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else
- priv->tx_count_frames = 0;
-
- /* To avoid raise condition */
- priv->hw->desc->set_tx_owner(first);
- wmb();
-
- priv->cur_tx++;
+ priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
- pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
- __func__, (priv->cur_tx % txsize),
- (priv->dirty_tx % txsize), entry, first, nfrags);
+ pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ stmmac_display_ring((void *)priv->dma_etx,
+ DMA_TX_SIZE, 1);
else
- stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ stmmac_display_ring((void *)priv->dma_tx,
+ DMA_TX_SIZE, 0);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
+
if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
if (netif_msg_hw(priv))
pr_debug("%s: stop transmitted packets\n", __func__);
@@ -2067,16 +2047,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- priv->hwts_tx_en)) {
- /* declare that device is doing timestamping */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- priv->hw->desc->enable_tx_timestamp(first);
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
}
if (!priv->hwts_tx_en)
skb_tx_timestamp(skb);
+ /* Ready to fill the first descriptor and set the OWN bit w/o any
+ * problems because all the descriptors are actually ready to be
+ * passed to the DMA engine.
+ */
+ if (likely(!is_jumbo)) {
+ bool last_segment = (nfrags == 0);
+
+ first->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, first->des2))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ priv->tx_skbuff_dma[first_entry].len = nopaged_len;
+ priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Prepare the first descriptor setting the OWN bit too */
+ priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
+ csum_insertion, priv->mode, 1,
+ last_segment);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+ }
+
netdev_sent_queue(dev, skb->len);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
@@ -2108,6 +2131,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
+static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
+{
+ if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
+ return 0;
+
+ return 1;
+}
+
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* @priv: driver private structure
@@ -2116,11 +2147,11 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
- unsigned int rxsize = priv->dma_rx_size;
int bfsize = priv->dma_buf_sz;
+ unsigned int entry = priv->dirty_rx;
+ int dirty = stmmac_rx_dirty(priv);
- for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
- unsigned int entry = priv->dirty_rx % rxsize;
+ while (dirty-- > 0) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -2132,9 +2163,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
-
- if (unlikely(skb == NULL))
+ if (unlikely(!skb)) {
+ /* so for a while no zero-copy! */
+ priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
+ if (unlikely(net_ratelimit()))
+ dev_err(priv->device,
+ "fail to alloc skb entry %d\n",
+ entry);
break;
+ }
priv->rx_skbuff[entry] = skb;
priv->rx_skbuff_dma[entry] =
@@ -2150,13 +2187,20 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
priv->hw->mode->refill_desc3(priv, p);
+ if (priv->rx_zeroc_thresh > 0)
+ priv->rx_zeroc_thresh--;
+
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
+
wmb();
priv->hw->desc->set_rx_owner(p);
wmb();
+
+ entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
+ priv->dirty_rx = entry;
}
/**
@@ -2168,8 +2212,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit)
{
- unsigned int rxsize = priv->dma_rx_size;
- unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int entry = priv->cur_rx;
unsigned int next_entry;
unsigned int count = 0;
int coe = priv->hw->rx_csum;
@@ -2177,9 +2220,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ stmmac_display_ring((void *)priv->dma_erx,
+ DMA_RX_SIZE, 1);
else
- stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ stmmac_display_ring((void *)priv->dma_rx,
+ DMA_RX_SIZE, 0);
}
while (count < limit) {
int status;
@@ -2190,20 +2235,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
else
p = priv->dma_rx + entry;
- if (priv->hw->desc->get_rx_owner(p))
+ /* read the status of the incoming frame */
+ status = priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
break;
count++;
- next_entry = (++priv->cur_rx) % rxsize;
+ priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
+ next_entry = priv->cur_rx;
+
if (priv->extend_desc)
prefetch(priv->dma_erx + next_entry);
else
prefetch(priv->dma_rx + next_entry);
- /* read the status of the incoming frame */
- status = priv->hw->desc->rx_status(&priv->dev->stats,
- &priv->xstats, p);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
&priv->xstats,
@@ -2248,23 +2296,54 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- skb = priv->rx_skbuff[entry];
- if (unlikely(!skb)) {
- pr_err("%s: Inconsistent Rx descriptor chain\n",
- priv->dev->name);
- priv->dev->stats.rx_dropped++;
- break;
+
+ if (unlikely((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv))) {
+ skb = netdev_alloc_skb_ip_align(priv->dev,
+ frame_len);
+ if (unlikely(!skb)) {
+ if (net_ratelimit())
+ dev_warn(priv->device,
+ "packet dropped\n");
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+
+ dma_sync_single_for_cpu(priv->device,
+ priv->rx_skbuff_dma
+ [entry], frame_len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb,
+ priv->
+ rx_skbuff[entry]->data,
+ frame_len);
+
+ skb_put(skb, frame_len);
+ dma_sync_single_for_device(priv->device,
+ priv->rx_skbuff_dma
+ [entry], frame_len,
+ DMA_FROM_DEVICE);
+ } else {
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+ priv->rx_zeroc_thresh++;
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
}
- prefetch(skb->data - NET_IP_ALIGN);
- priv->rx_skbuff[entry] = NULL;
stmmac_get_rx_hwtstamp(priv, entry, skb);
- skb_put(skb, frame_len);
- dma_unmap_single(priv->device,
- priv->rx_skbuff_dma[entry],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
-
if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len);
@@ -2555,19 +2634,17 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
- unsigned int txsize = priv->dma_tx_size;
- unsigned int rxsize = priv->dma_rx_size;
if (priv->extend_desc) {
seq_printf(seq, "Extended RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
+ sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
seq_printf(seq, "Extended TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
+ sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
} else {
seq_printf(seq, "RX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
+ sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
seq_printf(seq, "TX descriptor ring:\n");
- sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
+ sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
}
return 0;
@@ -3137,12 +3214,6 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "phyaddr:", 8)) {
if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
- } else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (kstrtoint(opt + 11, 0, &dma_txsize))
- goto err;
- } else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (kstrtoint(opt + 11, 0, &dma_rxsize))
- goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index efb54f356a67..06704ca6f9ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -198,20 +198,12 @@ int stmmac_mdio_register(struct net_device *ndev)
struct mii_bus *new_bus;
struct stmmac_priv *priv = netdev_priv(ndev);
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
- int addr, found;
struct device_node *mdio_node = priv->plat->mdio_node;
+ int addr, found;
if (!mdio_bus_data)
return 0;
- if (IS_ENABLED(CONFIG_OF)) {
- if (mdio_node) {
- netdev_dbg(ndev, "FOUND MDIO subnode\n");
- } else {
- netdev_warn(ndev, "No MDIO subnode found\n");
- }
- }
-
new_bus = mdiobus_alloc();
if (new_bus == NULL)
return -ENOMEM;
@@ -243,6 +235,9 @@ int stmmac_mdio_register(struct net_device *ndev)
goto bus_register_fail;
}
+ if (priv->plat->phy_node || mdio_node)
+ goto bus_register_done;
+
found = 0;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
@@ -298,6 +293,7 @@ int stmmac_mdio_register(struct net_device *ndev)
return -ENODEV;
}
+bus_register_done:
priv->mii = new_bus;
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index d71a721ea61c..ae4388735b7f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -81,7 +81,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 32;
- plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
+ /* TODO: AXI */
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -115,8 +115,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
plat->mdio_bus_data->phy_mask = 0;
plat->dma_cfg->pbl = 16;
- plat->dma_cfg->burst_len = DMA_AXI_BLEN_256;
plat->dma_cfg->fixed_burst = 1;
+ /* AXI (TODO) */
/* Set default value for multicast hash bins */
plat->multicast_filter_bins = HASH_TABLE_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4514ba73d961..cf37ea558ecc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -96,6 +96,105 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
}
/**
+ * stmmac_axi_setup - parse DT parameters for programming the AXI register
+ * @pdev: platform device
+ * @priv: driver private struct.
+ * Description:
+ * if required, from device-tree the AXI internal register can be tuned
+ * by using platform parameters.
+ */
+static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct stmmac_axi *axi;
+
+ np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
+ if (!np)
+ return NULL;
+
+ axi = kzalloc(sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
+ axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
+ axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
+ axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
+ axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
+ axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
+ axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
+
+ of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
+ of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
+ of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+
+ return axi;
+}
+
+/**
+ * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+ * @plat: driver data platform structure
+ * @np: device tree node
+ * @dev: device pointer
+ * Description:
+ * The mdio bus will be allocated in case of a phy transceiver is on board;
+ * it will be NULL if the fixed-link is configured.
+ * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+ * in any case (for DSA, mdio must be registered even if fixed-link).
+ * The table below sums the supported configurations:
+ * -------------------------------
+ * snps,phy-addr | Y
+ * -------------------------------
+ * phy-handle | Y
+ * -------------------------------
+ * fixed-link | N
+ * -------------------------------
+ * snps,dwmac-mdio |
+ * even if | Y
+ * fixed-link |
+ * -------------------------------
+ *
+ * It returns 0 in case of success otherwise -ENODEV.
+ */
+static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool mdio = true;
+
+ /* If phy-handle property is passed from DT, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (plat->phy_node)
+ dev_dbg(dev, "Found phy-handle subnode\n");
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ dev_dbg(dev, "Found fixed-link subnode\n");
+ plat->phy_node = of_node_get(np);
+ mdio = false;
+ }
+
+ /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
+ for_each_child_of_node(np, plat->mdio_node) {
+ if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
+ break;
+ }
+
+ if (plat->mdio_node) {
+ dev_dbg(dev, "Found MDIO subnode\n");
+ mdio = true;
+ }
+
+ if (mdio)
+ plat->mdio_bus_data =
+ devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+ GFP_KERNEL);
+ return 0;
+}
+
+/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @plat: driver data platform structure
@@ -110,7 +209,6 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
- struct device_node *child_node = NULL;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -130,36 +228,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
/* Default to phy auto-detection */
plat->phy_addr = -1;
- /* If we find a phy-handle property, use it as the PHY */
- plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
-
- /* If phy-handle is not specified, check if we have a fixed-phy */
- if (!plat->phy_node && of_phy_is_fixed_link(np)) {
- if ((of_phy_register_fixed_link(np) < 0))
- return ERR_PTR(-ENODEV);
-
- plat->phy_node = of_node_get(np);
- }
-
- for_each_child_of_node(np, child_node)
- if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
- plat->mdio_node = child_node;
- break;
- }
-
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
- plat->mdio_bus_data = NULL;
- else
- plat->mdio_bus_data =
- devm_kzalloc(&pdev->dev,
- sizeof(struct stmmac_mdio_bus_data),
- GFP_KERNEL);
+ /* To Configure PHY by using all device-tree supported properties */
+ if (stmmac_dt_phy(plat, np, &pdev->dev))
+ return ERR_PTR(-ENODEV);
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -223,13 +300,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+ dma_cfg->aal = of_property_read_bool(np, "snps,aal");
dma_cfg->fixed_burst =
of_property_read_bool(np, "snps,fixed-burst");
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
- of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len);
- if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256)
- dma_cfg->burst_len = 0;
}
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) {
@@ -237,6 +312,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
+ plat->axi = stmmac_axi_setup(pdev);
+
return plat;
}
#else
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index dee94b67638c..a4b40e3015e5 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -69,12 +69,28 @@ config CASSINI
Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
<http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
+config SUNVNET_COMMON
+ bool
+ depends on SUN_LDOMS
+ default y if SUN_LDOMS
+
config SUNVNET
tristate "Sun Virtual Network support"
depends on SUN_LDOMS
---help---
Support for virtual network devices under Sun Logical Domains.
+config LDMVSW
+ tristate "Sun4v LDoms Virtual Switch support"
+ depends on SUN_LDOMS
+ ---help---
+ Support for virtual switch devices under Sun4v Logical Domains.
+ This driver adds a network interface for every vsw-port node
+ found in the machine description of a service domain.
+ Linux bridge/switch software can use these interfaces for
+ guest domain network interconnectivity or guest domain
+ connection to a physical network on a service domain.
+
config NIU
tristate "Sun Neptune 10Gbit Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile
index 1e620ff88eba..37855438b3cb 100644
--- a/drivers/net/ethernet/sun/Makefile
+++ b/drivers/net/ethernet/sun/Makefile
@@ -7,5 +7,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o
obj-$(CONFIG_SUNBMAC) += sunbmac.o
obj-$(CONFIG_SUNGEM) += sungem.o
obj-$(CONFIG_CASSINI) += cassini.o
+obj-$(CONFIG_SUNVNET_COMMON) += sunvnet_common.o
obj-$(CONFIG_SUNVNET) += sunvnet.o
+obj-$(CONFIG_LDMVSW) += ldmvsw.o
obj-$(CONFIG_NIU) += niu.o
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
new file mode 100644
index 000000000000..e15bf84fc6b2
--- /dev/null
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -0,0 +1,468 @@
+/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
+ *
+ * Copyright (C) 2016 Oracle. All rights reserved.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_IPV6)
+#include <linux/icmpv6.h>
+#endif
+
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+/* This driver makes use of the common code in sunvnet_common.c */
+#include "sunvnet_common.h"
+
+/* Length of time before we decide the hardware is hung,
+ * and dev->tx_timeout() should be called to fix the problem.
+ */
+#define VSW_TX_TIMEOUT (10 * HZ)
+
+/* Static HW Addr used for the network interfaces representing vsw ports */
+static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+
+#define DRV_MODULE_NAME "ldmvsw"
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "Jan 15, 2016"
+
+static char version[] =
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+MODULE_AUTHOR("Oracle");
+MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+/* Ordered from largest major to lowest */
+static struct vio_version vsw_versions[] = {
+ { .major = 1, .minor = 8 },
+ { .major = 1, .minor = 7 },
+ { .major = 1, .minor = 6 },
+ { .major = 1, .minor = 0 },
+};
+
+static void vsw_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+}
+
+static u32 vsw_get_msglevel(struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ return port->vp->msg_enable;
+}
+
+static void vsw_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ port->vp->msg_enable = value;
+}
+
+static const struct ethtool_ops vsw_ethtool_ops = {
+ .get_drvinfo = vsw_get_drvinfo,
+ .get_msglevel = vsw_get_msglevel,
+ .set_msglevel = vsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
+
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
+
+/* func arg to vnet_start_xmit_common() to get the proper tx port */
+static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ return port;
+}
+
+static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ if (!port)
+ return 0;
+
+ return port->q_index;
+}
+
+/* Wrappers to common functions */
+static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find);
+}
+
+static void vsw_set_rx_mode(struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ return sunvnet_set_rx_mode_common(dev, port->vp);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void vsw_poll_controller(struct net_device *dev)
+{
+ struct vnet_port *port = netdev_priv(dev);
+
+ return sunvnet_poll_controller_common(dev, port->vp);
+}
+#endif
+
+static const struct net_device_ops vsw_ops = {
+ .ndo_open = sunvnet_open_common,
+ .ndo_stop = sunvnet_close_common,
+ .ndo_set_rx_mode = vsw_set_rx_mode,
+ .ndo_set_mac_address = sunvnet_set_mac_addr_common,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = sunvnet_tx_timeout_common,
+ .ndo_change_mtu = sunvnet_change_mtu_common,
+ .ndo_start_xmit = vsw_start_xmit,
+ .ndo_select_queue = vsw_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = vsw_poll_controller,
+#endif
+};
+
+static const char *local_mac_prop = "local-mac-address";
+static const char *cfg_handle_prop = "cfg-handle";
+
+static struct vnet *vsw_get_vnet(struct mdesc_handle *hp,
+ u64 port_node,
+ u64 *handle)
+{
+ struct vnet *vp;
+ struct vnet *iter;
+ const u64 *local_mac = NULL;
+ const u64 *cfghandle = NULL;
+ u64 a;
+
+ /* Get the parent virtual-network-switch macaddr and cfghandle */
+ mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
+ u64 target = mdesc_arc_target(hp, a);
+ const char *name;
+
+ name = mdesc_get_property(hp, target, "name", NULL);
+ if (!name || strcmp(name, "virtual-network-switch"))
+ continue;
+
+ local_mac = mdesc_get_property(hp, target,
+ local_mac_prop, NULL);
+ cfghandle = mdesc_get_property(hp, target,
+ cfg_handle_prop, NULL);
+ break;
+ }
+ if (!local_mac || !cfghandle)
+ return ERR_PTR(-ENODEV);
+
+ /* find or create associated vnet */
+ vp = NULL;
+ mutex_lock(&vnet_list_mutex);
+ list_for_each_entry(iter, &vnet_list, list) {
+ if (iter->local_mac == *local_mac) {
+ vp = iter;
+ break;
+ }
+ }
+
+ if (!vp) {
+ vp = kzalloc(sizeof(*vp), GFP_KERNEL);
+ if (unlikely(!vp)) {
+ mutex_unlock(&vnet_list_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&vp->lock);
+ INIT_LIST_HEAD(&vp->port_list);
+ INIT_LIST_HEAD(&vp->list);
+ vp->local_mac = *local_mac;
+ list_add(&vp->list, &vnet_list);
+ }
+
+ mutex_unlock(&vnet_list_mutex);
+
+ *handle = (u64)*cfghandle;
+
+ return vp;
+}
+
+static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
+ struct vio_dev *vdev,
+ u64 handle,
+ u64 port_id)
+{
+ struct net_device *dev;
+ struct vnet_port *port;
+ int i;
+
+ dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ dev->needed_headroom = VNET_PACKET_SKIP + 8;
+ dev->needed_tailroom = 8;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ dev->dev_addr[i] = hwaddr[i];
+ dev->perm_addr[i] = dev->dev_addr[i];
+ }
+
+ sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id);
+
+ dev->netdev_ops = &vsw_ops;
+ dev->ethtool_ops = &vsw_ethtool_ops;
+ dev->watchdog_timeo = VSW_TX_TIMEOUT;
+
+ dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_HW_CSUM | NETIF_F_SG;
+ dev->features = dev->hw_features;
+
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
+ return dev;
+}
+
+static struct ldc_channel_config vsw_ldc_cfg = {
+ .event = sunvnet_event_common,
+ .mtu = 64,
+ .mode = LDC_MODE_UNRELIABLE,
+};
+
+static struct vio_driver_ops vsw_vio_ops = {
+ .send_attr = sunvnet_send_attr_common,
+ .handle_attr = sunvnet_handle_attr_common,
+ .handshake_complete = sunvnet_handshake_complete_common,
+};
+
+static void print_version(void)
+{
+ printk_once(KERN_INFO "%s", version);
+}
+
+static const char *remote_macaddr_prop = "remote-mac-address";
+static const char *id_prop = "id";
+
+static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+ struct mdesc_handle *hp;
+ struct vnet_port *port;
+ unsigned long flags;
+ struct vnet *vp;
+ struct net_device *dev;
+ const u64 *rmac;
+ int len, i, err;
+ const u64 *port_id;
+ u64 handle;
+
+ print_version();
+
+ hp = mdesc_grab();
+
+ rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ err = -ENODEV;
+ if (!rmac) {
+ pr_err("Port lacks %s property\n", remote_macaddr_prop);
+ mdesc_release(hp);
+ return err;
+ }
+
+ port_id = mdesc_get_property(hp, vdev->mp, id_prop, NULL);
+ err = -ENODEV;
+ if (!port_id) {
+ pr_err("Port lacks %s property\n", id_prop);
+ mdesc_release(hp);
+ return err;
+ }
+
+ /* Get (or create) the vnet associated with this port */
+ vp = vsw_get_vnet(hp, vdev->mp, &handle);
+ if (unlikely(IS_ERR(vp))) {
+ err = PTR_ERR(vp);
+ pr_err("Failed to get vnet for vsw-port\n");
+ mdesc_release(hp);
+ return err;
+ }
+
+ mdesc_release(hp);
+
+ dev = vsw_alloc_netdev(vsw_port_hwaddr, vdev, handle, *port_id);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ pr_err("Failed to alloc netdev for vsw-port\n");
+ return err;
+ }
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->list);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
+
+ port->vp = vp;
+ port->dev = dev;
+ port->switch_port = 1;
+ port->tso = true;
+ port->tsolen = 0;
+
+ /* Mark the port as belonging to ldmvsw which directs the
+ * the common code to use the net_device in the vnet_port
+ * rather than the net_device in the vnet (which is used
+ * by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE
+ * macro.
+ */
+ port->vsw = 1;
+
+ err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
+ vsw_versions, ARRAY_SIZE(vsw_versions),
+ &vsw_vio_ops, dev->name);
+ if (err)
+ goto err_out_free_dev;
+
+ err = vio_ldc_alloc(&port->vio, &vsw_ldc_cfg, port);
+ if (err)
+ goto err_out_free_dev;
+
+ dev_set_drvdata(&vdev->dev, port);
+
+ netif_napi_add(dev, &port->napi, sunvnet_poll_common,
+ NAPI_POLL_WEIGHT);
+
+ spin_lock_irqsave(&vp->lock, flags);
+ list_add_rcu(&port->list, &vp->port_list);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common,
+ (unsigned long)port);
+
+ err = register_netdev(dev);
+ if (err) {
+ pr_err("Cannot register net device, aborting\n");
+ goto err_out_del_timer;
+ }
+
+ spin_lock_irqsave(&vp->lock, flags);
+ sunvnet_port_add_txq_common(port);
+ spin_unlock_irqrestore(&vp->lock, flags);
+
+ napi_enable(&port->napi);
+ vio_port_up(&port->vio);
+
+ netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr);
+
+ pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name,
+ port->raddr, " switch-port");
+
+ return 0;
+
+err_out_del_timer:
+ del_timer_sync(&port->clean_timer);
+ list_del_rcu(&port->list);
+ synchronize_rcu();
+ netif_napi_del(&port->napi);
+ dev_set_drvdata(&vdev->dev, NULL);
+ vio_ldc_free(&port->vio);
+
+err_out_free_dev:
+ free_netdev(dev);
+ return err;
+}
+
+static int vsw_port_remove(struct vio_dev *vdev)
+{
+ struct vnet_port *port = dev_get_drvdata(&vdev->dev);
+ unsigned long flags;
+
+ if (port) {
+ del_timer_sync(&port->vio.timer);
+
+ napi_disable(&port->napi);
+
+ list_del_rcu(&port->list);
+
+ synchronize_rcu();
+ del_timer_sync(&port->clean_timer);
+ spin_lock_irqsave(&port->vp->lock, flags);
+ sunvnet_port_rm_txq_common(port);
+ spin_unlock_irqrestore(&port->vp->lock, flags);
+ netif_napi_del(&port->napi);
+ sunvnet_port_free_tx_bufs_common(port);
+ vio_ldc_free(&port->vio);
+
+ dev_set_drvdata(&vdev->dev, NULL);
+
+ unregister_netdev(port->dev);
+ free_netdev(port->dev);
+ }
+
+ return 0;
+}
+
+static void vsw_cleanup(void)
+{
+ struct vnet *vp;
+
+ /* just need to free up the vnet list */
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ /* vio_unregister_driver() should have cleaned up port_list */
+ if (!list_empty(&vp->port_list))
+ pr_err("Ports not removed by VIO subsystem!\n");
+ kfree(vp);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
+static const struct vio_device_id vsw_port_match[] = {
+ {
+ .type = "vsw-port",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(vio, vsw_port_match);
+
+static struct vio_driver vsw_port_driver = {
+ .id_table = vsw_port_match,
+ .probe = vsw_port_probe,
+ .remove = vsw_port_remove,
+ .name = "vsw_port",
+};
+
+static int __init vsw_init(void)
+{
+ return vio_register_driver(&vsw_port_driver);
+}
+
+static void __exit vsw_exit(void)
+{
+ vio_unregister_driver(&vsw_port_driver);
+ vsw_cleanup();
+}
+
+module_init(vsw_init);
+module_exit(vsw_exit);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab6051a43134..9cc45649f477 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3341,7 +3341,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
niu_hash_page(rp, page, addr);
if (rp->rbr_blocks_per_page > 1)
- atomic_add(rp->rbr_blocks_per_page - 1, &page->_count);
+ page_ref_add(page, rp->rbr_blocks_per_page - 1);
for (i = 0; i < rp->rbr_blocks_per_page; i++) {
__le32 *rbr = &rp->rbr[start_index + i];
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e23a642357e7..2437227712dc 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -51,7 +51,6 @@
#endif
#ifdef CONFIG_PPC_PMAC
-#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23fa29877f5b..a2f9b47de187 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1,6 +1,7 @@
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2016 Oracle. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -29,7 +30,12 @@
#include <asm/vio.h>
#include <asm/ldc.h>
-#include "sunvnet.h"
+#include "sunvnet_common.h"
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define VNET_TX_TIMEOUT (5 * HZ)
#define DRV_MODULE_NAME "sunvnet"
#define DRV_MODULE_VERSION "1.0"
@@ -42,16 +48,6 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
-#define VNET_MAX_TXQS 16
-
-/* Heuristic for the number of times to exponentially backoff and
- * retry sending an LDC trigger when EAGAIN is encountered
- */
-#define VNET_MAX_RETRIES 10
-
-static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
-static void vnet_port_reset(struct vnet_port *port);
-
/* Ordered from largest major to lowest */
static struct vio_version vnet_versions[] = {
{ .major = 1, .minor = 8 },
@@ -60,866 +56,45 @@ static struct vio_version vnet_versions[] = {
{ .major = 1, .minor = 0 },
};
-static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
-{
- return vio_dring_avail(dr, VNET_TX_RING_SIZE);
-}
-
-static int vnet_handle_unknown(struct vnet_port *port, void *arg)
-{
- struct vio_msg_tag *pkt = arg;
-
- pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
- pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
- pr_err("Resetting connection\n");
-
- ldc_disconnect(port->vio.lp);
-
- return -ECONNRESET;
-}
-
-static int vnet_port_alloc_tx_ring(struct vnet_port *port);
-
-static int vnet_send_attr(struct vio_driver_state *vio)
-{
- struct vnet_port *port = to_vnet_port(vio);
- struct net_device *dev = port->vp->dev;
- struct vio_net_attr_info pkt;
- int framelen = ETH_FRAME_LEN;
- int i, err;
-
- err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
- if (err)
- return err;
-
- memset(&pkt, 0, sizeof(pkt));
- pkt.tag.type = VIO_TYPE_CTRL;
- pkt.tag.stype = VIO_SUBTYPE_INFO;
- pkt.tag.stype_env = VIO_ATTR_INFO;
- pkt.tag.sid = vio_send_sid(vio);
- if (vio_version_before(vio, 1, 2))
- pkt.xfer_mode = VIO_DRING_MODE;
- else
- pkt.xfer_mode = VIO_NEW_DRING_MODE;
- pkt.addr_type = VNET_ADDR_ETHERMAC;
- pkt.ack_freq = 0;
- for (i = 0; i < 6; i++)
- pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
- if (vio_version_after(vio, 1, 3)) {
- if (port->rmtu) {
- port->rmtu = min(VNET_MAXPACKET, port->rmtu);
- pkt.mtu = port->rmtu;
- } else {
- port->rmtu = VNET_MAXPACKET;
- pkt.mtu = port->rmtu;
- }
- if (vio_version_after_eq(vio, 1, 6))
- pkt.options = VIO_TX_DRING;
- } else if (vio_version_before(vio, 1, 3)) {
- pkt.mtu = framelen;
- } else { /* v1.3 */
- pkt.mtu = framelen + VLAN_HLEN;
- }
-
- pkt.cflags = 0;
- if (vio_version_after_eq(vio, 1, 7) && port->tso) {
- pkt.cflags |= VNET_LSO_IPV4_CAPAB;
- if (!port->tsolen)
- port->tsolen = VNET_MAXTSO;
- pkt.ipv4_lso_maxlen = port->tsolen;
- }
-
- pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
-
- viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
- "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
- "cflags[0x%04x] lso_max[%u]\n",
- pkt.xfer_mode, pkt.addr_type,
- (unsigned long long)pkt.addr,
- pkt.ack_freq, pkt.plnk_updt, pkt.options,
- (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
-
-
- return vio_ldc_send(vio, &pkt, sizeof(pkt));
-}
-
-static int handle_attr_info(struct vio_driver_state *vio,
- struct vio_net_attr_info *pkt)
-{
- struct vnet_port *port = to_vnet_port(vio);
- u64 localmtu;
- u8 xfer_mode;
-
- viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
- "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
- " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
- pkt->xfer_mode, pkt->addr_type,
- (unsigned long long)pkt->addr,
- pkt->ack_freq, pkt->plnk_updt, pkt->options,
- (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
- pkt->ipv4_lso_maxlen);
-
- pkt->tag.sid = vio_send_sid(vio);
-
- xfer_mode = pkt->xfer_mode;
- /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
- if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
- xfer_mode = VIO_NEW_DRING_MODE;
-
- /* MTU negotiation:
- * < v1.3 - ETH_FRAME_LEN exactly
- * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
- * pkt->mtu for ACK
- * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
- */
- if (vio_version_before(vio, 1, 3)) {
- localmtu = ETH_FRAME_LEN;
- } else if (vio_version_after(vio, 1, 3)) {
- localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
- localmtu = min(pkt->mtu, localmtu);
- pkt->mtu = localmtu;
- } else { /* v1.3 */
- localmtu = ETH_FRAME_LEN + VLAN_HLEN;
- }
- port->rmtu = localmtu;
-
- /* LSO negotiation */
- if (vio_version_after_eq(vio, 1, 7))
- port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
- else
- port->tso = false;
- if (port->tso) {
- if (!port->tsolen)
- port->tsolen = VNET_MAXTSO;
- port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
- if (port->tsolen < VNET_MINTSO) {
- port->tso = false;
- port->tsolen = 0;
- pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
- }
- pkt->ipv4_lso_maxlen = port->tsolen;
- } else {
- pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
- pkt->ipv4_lso_maxlen = 0;
- }
-
- /* for version >= 1.6, ACK packet mode we support */
- if (vio_version_after_eq(vio, 1, 6)) {
- pkt->xfer_mode = VIO_NEW_DRING_MODE;
- pkt->options = VIO_TX_DRING;
- }
-
- if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
- pkt->addr_type != VNET_ADDR_ETHERMAC ||
- pkt->mtu != localmtu) {
- viodbg(HS, "SEND NET ATTR NACK\n");
-
- pkt->tag.stype = VIO_SUBTYPE_NACK;
-
- (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
-
- return -ECONNRESET;
- } else {
- viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
- "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
- "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
- pkt->xfer_mode, pkt->addr_type,
- (unsigned long long)pkt->addr,
- pkt->ack_freq, pkt->plnk_updt, pkt->options,
- (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
- pkt->ipv4_lso_maxlen);
-
- pkt->tag.stype = VIO_SUBTYPE_ACK;
-
- return vio_ldc_send(vio, pkt, sizeof(*pkt));
- }
-
-}
-
-static int handle_attr_ack(struct vio_driver_state *vio,
- struct vio_net_attr_info *pkt)
-{
- viodbg(HS, "GOT NET ATTR ACK\n");
-
- return 0;
-}
-
-static int handle_attr_nack(struct vio_driver_state *vio,
- struct vio_net_attr_info *pkt)
-{
- viodbg(HS, "GOT NET ATTR NACK\n");
-
- return -ECONNRESET;
-}
-
-static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
-{
- struct vio_net_attr_info *pkt = arg;
-
- switch (pkt->tag.stype) {
- case VIO_SUBTYPE_INFO:
- return handle_attr_info(vio, pkt);
-
- case VIO_SUBTYPE_ACK:
- return handle_attr_ack(vio, pkt);
-
- case VIO_SUBTYPE_NACK:
- return handle_attr_nack(vio, pkt);
-
- default:
- return -ECONNRESET;
- }
-}
-
-static void vnet_handshake_complete(struct vio_driver_state *vio)
-{
- struct vio_dring_state *dr;
-
- dr = &vio->drings[VIO_DRIVER_RX_RING];
- dr->snd_nxt = dr->rcv_nxt = 1;
-
- dr = &vio->drings[VIO_DRIVER_TX_RING];
- dr->snd_nxt = dr->rcv_nxt = 1;
-}
-
-/* The hypervisor interface that implements copying to/from imported
- * memory from another domain requires that copies are done to 8-byte
- * aligned buffers, and that the lengths of such copies are also 8-byte
- * multiples.
- *
- * So we align skb->data to an 8-byte multiple and pad-out the data
- * area so we can round the copy length up to the next multiple of
- * 8 for the copy.
- *
- * The transmitter puts the actual start of the packet 6 bytes into
- * the buffer it sends over, so that the IP headers after the ethernet
- * header are aligned properly. These 6 bytes are not in the descriptor
- * length, they are simply implied. This offset is represented using
- * the VNET_PACKET_SKIP macro.
- */
-static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
- unsigned int len)
-{
- struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
- unsigned long addr, off;
-
- if (unlikely(!skb))
- return NULL;
-
- addr = (unsigned long) skb->data;
- off = ((addr + 7UL) & ~7UL) - addr;
- if (off)
- skb_reserve(skb, off);
-
- return skb;
-}
-
-static inline void vnet_fullcsum(struct sk_buff *skb)
-{
- struct iphdr *iph = ip_hdr(skb);
- int offset = skb_transport_offset(skb);
-
- if (skb->protocol != htons(ETH_P_IP))
- return;
- if (iph->protocol != IPPROTO_TCP &&
- iph->protocol != IPPROTO_UDP)
- return;
- skb->ip_summed = CHECKSUM_NONE;
- skb->csum_level = 1;
- skb->csum = 0;
- if (iph->protocol == IPPROTO_TCP) {
- struct tcphdr *ptcp = tcp_hdr(skb);
-
- ptcp->check = 0;
- skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
- ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - offset, IPPROTO_TCP,
- skb->csum);
- } else if (iph->protocol == IPPROTO_UDP) {
- struct udphdr *pudp = udp_hdr(skb);
-
- pudp->check = 0;
- skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
- pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - offset, IPPROTO_UDP,
- skb->csum);
- }
-}
-
-static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
-{
- struct net_device *dev = port->vp->dev;
- unsigned int len = desc->size;
- unsigned int copy_len;
- struct sk_buff *skb;
- int maxlen;
- int err;
-
- err = -EMSGSIZE;
- if (port->tso && port->tsolen > port->rmtu)
- maxlen = port->tsolen;
- else
- maxlen = port->rmtu;
- if (unlikely(len < ETH_ZLEN || len > maxlen)) {
- dev->stats.rx_length_errors++;
- goto out_dropped;
- }
-
- skb = alloc_and_align_skb(dev, len);
- err = -ENOMEM;
- if (unlikely(!skb)) {
- dev->stats.rx_missed_errors++;
- goto out_dropped;
- }
-
- copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
- skb_put(skb, copy_len);
- err = ldc_copy(port->vio.lp, LDC_COPY_IN,
- skb->data, copy_len, 0,
- desc->cookies, desc->ncookies);
- if (unlikely(err < 0)) {
- dev->stats.rx_frame_errors++;
- goto out_free_skb;
- }
-
- skb_pull(skb, VNET_PACKET_SKIP);
- skb_trim(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
-
- if (vio_version_after_eq(&port->vio, 1, 8)) {
- struct vio_net_dext *dext = vio_net_ext(desc);
-
- skb_reset_network_header(skb);
-
- if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
- if (skb->protocol == ETH_P_IP) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->check = 0;
- ip_send_check(iph);
- }
- }
- if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
- skb->ip_summed == CHECKSUM_NONE) {
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- int ihl = iph->ihl * 4;
-
- skb_reset_transport_header(skb);
- skb_set_transport_header(skb, ihl);
- vnet_fullcsum(skb);
- }
- }
- if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_level = 0;
- if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
- skb->csum_level = 1;
- }
- }
-
- skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
- napi_gro_receive(&port->napi, skb);
- return 0;
-
-out_free_skb:
- kfree_skb(skb);
-
-out_dropped:
- dev->stats.rx_dropped++;
- return err;
-}
-
-static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end, u8 vio_dring_state)
-{
- struct vio_dring_data hdr = {
- .tag = {
- .type = VIO_TYPE_DATA,
- .stype = VIO_SUBTYPE_ACK,
- .stype_env = VIO_DRING_DATA,
- .sid = vio_send_sid(&port->vio),
- },
- .dring_ident = dr->ident,
- .start_idx = start,
- .end_idx = end,
- .state = vio_dring_state,
- };
- int err, delay;
- int retries = 0;
-
- hdr.seq = dr->snd_nxt;
- delay = 1;
- do {
- err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
- if (err > 0) {
- dr->snd_nxt++;
- break;
- }
- udelay(delay);
- if ((delay <<= 1) > 128)
- delay = 128;
- if (retries++ > VNET_MAX_RETRIES) {
- pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
- port->raddr[0], port->raddr[1],
- port->raddr[2], port->raddr[3],
- port->raddr[4], port->raddr[5]);
- break;
- }
- } while (err == -EAGAIN);
-
- if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
- port->stop_rx_idx = end;
- port->stop_rx = true;
- } else {
- port->stop_rx_idx = 0;
- port->stop_rx = false;
- }
-
- return err;
-}
-
-static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
- struct vio_dring_state *dr,
- u32 index)
-{
- struct vio_net_desc *desc = port->vio.desc_buf;
- int err;
-
- err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
- (index * dr->entry_size),
- dr->cookies, dr->ncookies);
- if (err < 0)
- return ERR_PTR(err);
-
- return desc;
-}
-
-static int put_rx_desc(struct vnet_port *port,
- struct vio_dring_state *dr,
- struct vio_net_desc *desc,
- u32 index)
-{
- int err;
-
- err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
- (index * dr->entry_size),
- dr->cookies, dr->ncookies);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int vnet_walk_rx_one(struct vnet_port *port,
- struct vio_dring_state *dr,
- u32 index, int *needs_ack)
-{
- struct vio_net_desc *desc = get_rx_desc(port, dr, index);
- struct vio_driver_state *vio = &port->vio;
- int err;
-
- BUG_ON(desc == NULL);
- if (IS_ERR(desc))
- return PTR_ERR(desc);
-
- if (desc->hdr.state != VIO_DESC_READY)
- return 1;
-
- dma_rmb();
-
- viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
- desc->hdr.state, desc->hdr.ack,
- desc->size, desc->ncookies,
- desc->cookies[0].cookie_addr,
- desc->cookies[0].cookie_size);
-
- err = vnet_rx_one(port, desc);
- if (err == -ECONNRESET)
- return err;
- desc->hdr.state = VIO_DESC_DONE;
- err = put_rx_desc(port, dr, desc, index);
- if (err < 0)
- return err;
- *needs_ack = desc->hdr.ack;
- return 0;
-}
-
-static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
- u32 start, u32 end, int *npkts, int budget)
-{
- struct vio_driver_state *vio = &port->vio;
- int ack_start = -1, ack_end = -1;
- bool send_ack = true;
-
- end = (end == (u32) -1) ? vio_dring_prev(dr, start)
- : vio_dring_next(dr, end);
-
- viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
-
- while (start != end) {
- int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
- if (err == -ECONNRESET)
- return err;
- if (err != 0)
- break;
- (*npkts)++;
- if (ack_start == -1)
- ack_start = start;
- ack_end = start;
- start = vio_dring_next(dr, start);
- if (ack && start != end) {
- err = vnet_send_ack(port, dr, ack_start, ack_end,
- VIO_DRING_ACTIVE);
- if (err == -ECONNRESET)
- return err;
- ack_start = -1;
- }
- if ((*npkts) >= budget) {
- send_ack = false;
- break;
- }
- }
- if (unlikely(ack_start == -1))
- ack_start = ack_end = vio_dring_prev(dr, start);
- if (send_ack) {
- port->napi_resume = false;
- return vnet_send_ack(port, dr, ack_start, ack_end,
- VIO_DRING_STOPPED);
- } else {
- port->napi_resume = true;
- port->napi_stop_idx = ack_end;
- return 1;
- }
-}
-
-static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
- int budget)
-{
- struct vio_dring_data *pkt = msgbuf;
- struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
- struct vio_driver_state *vio = &port->vio;
-
- viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
- pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
-
- if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
- return 0;
- if (unlikely(pkt->seq != dr->rcv_nxt)) {
- pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
- pkt->seq, dr->rcv_nxt);
- return 0;
- }
-
- if (!port->napi_resume)
- dr->rcv_nxt++;
-
- /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
-
- return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
- npkts, budget);
-}
-
-static int idx_is_pending(struct vio_dring_state *dr, u32 end)
-{
- u32 idx = dr->cons;
- int found = 0;
-
- while (idx != dr->prod) {
- if (idx == end) {
- found = 1;
- break;
- }
- idx = vio_dring_next(dr, idx);
- }
- return found;
-}
-
-static int vnet_ack(struct vnet_port *port, void *msgbuf)
-{
- struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct vio_dring_data *pkt = msgbuf;
- struct net_device *dev;
- struct vnet *vp;
- u32 end;
- struct vio_net_desc *desc;
- struct netdev_queue *txq;
-
- if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
- return 0;
-
- end = pkt->end_idx;
- vp = port->vp;
- dev = vp->dev;
- netif_tx_lock(dev);
- if (unlikely(!idx_is_pending(dr, end))) {
- netif_tx_unlock(dev);
- return 0;
- }
-
- /* sync for race conditions with vnet_start_xmit() and tell xmit it
- * is time to send a trigger.
- */
- dr->cons = vio_dring_next(dr, end);
- desc = vio_dring_entry(dr, dr->cons);
- if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
- /* vnet_start_xmit() just populated this dring but missed
- * sending the "start" LDC message to the consumer.
- * Send a "start" trigger on its behalf.
- */
- if (__vnet_tx_trigger(port, dr->cons) > 0)
- port->start_cons = false;
- else
- port->start_cons = true;
- } else {
- port->start_cons = true;
- }
- netif_tx_unlock(dev);
-
- txq = netdev_get_tx_queue(dev, port->q_index);
- if (unlikely(netif_tx_queue_stopped(txq) &&
- vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
- return 1;
-
- return 0;
-}
-
-static int vnet_nack(struct vnet_port *port, void *msgbuf)
-{
- /* XXX just reset or similar XXX */
- return 0;
-}
-
-static int handle_mcast(struct vnet_port *port, void *msgbuf)
-{
- struct vio_net_mcast_info *pkt = msgbuf;
-
- if (pkt->tag.stype != VIO_SUBTYPE_ACK)
- pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
- port->vp->dev->name,
- pkt->tag.type,
- pkt->tag.stype,
- pkt->tag.stype_env,
- pkt->tag.sid);
-
- return 0;
-}
-
-/* Got back a STOPPED LDC message on port. If the queue is stopped,
- * wake it up so that we'll send out another START message at the
- * next TX.
- */
-static void maybe_tx_wakeup(struct vnet_port *port)
-{
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
- __netif_tx_lock(txq, smp_processor_id());
- if (likely(netif_tx_queue_stopped(txq))) {
- struct vio_dring_state *dr;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- netif_tx_wake_queue(txq);
- }
- __netif_tx_unlock(txq);
-}
-
-static inline bool port_is_up(struct vnet_port *vnet)
+static void vnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
- struct vio_driver_state *vio = &vnet->vio;
-
- return !!(vio->hs_state & VIO_HS_COMPLETE);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
-static int vnet_event_napi(struct vnet_port *port, int budget)
+static u32 vnet_get_msglevel(struct net_device *dev)
{
- struct vio_driver_state *vio = &port->vio;
- int tx_wakeup, err;
- int npkts = 0;
- int event = (port->rx_event & LDC_EVENT_RESET);
-
-ldc_ctrl:
- if (unlikely(event == LDC_EVENT_RESET ||
- event == LDC_EVENT_UP)) {
- vio_link_state_change(vio, event);
-
- if (event == LDC_EVENT_RESET) {
- vnet_port_reset(port);
- vio_port_up(vio);
- }
- port->rx_event = 0;
- return 0;
- }
- /* We may have multiple LDC events in rx_event. Unroll send_events() */
- event = (port->rx_event & LDC_EVENT_UP);
- port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
- if (event == LDC_EVENT_UP)
- goto ldc_ctrl;
- event = port->rx_event;
- if (!(event & LDC_EVENT_DATA_READY))
- return 0;
-
- /* we dont expect any other bits than RESET, UP, DATA_READY */
- BUG_ON(event != LDC_EVENT_DATA_READY);
-
- tx_wakeup = err = 0;
- while (1) {
- union {
- struct vio_msg_tag tag;
- u64 raw[8];
- } msgbuf;
-
- if (port->napi_resume) {
- struct vio_dring_data *pkt =
- (struct vio_dring_data *)&msgbuf;
- struct vio_dring_state *dr =
- &port->vio.drings[VIO_DRIVER_RX_RING];
-
- pkt->tag.type = VIO_TYPE_DATA;
- pkt->tag.stype = VIO_SUBTYPE_INFO;
- pkt->tag.stype_env = VIO_DRING_DATA;
- pkt->seq = dr->rcv_nxt;
- pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
- pkt->end_idx = -1;
- goto napi_resume;
- }
- err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
- if (unlikely(err < 0)) {
- if (err == -ECONNRESET)
- vio_conn_reset(vio);
- break;
- }
- if (err == 0)
- break;
- viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
- msgbuf.tag.type,
- msgbuf.tag.stype,
- msgbuf.tag.stype_env,
- msgbuf.tag.sid);
- err = vio_validate_sid(vio, &msgbuf.tag);
- if (err < 0)
- break;
-napi_resume:
- if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
- if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
- if (!port_is_up(port)) {
- /* failures like handshake_failure()
- * may have cleaned up dring, but
- * NAPI polling may bring us here.
- */
- err = -ECONNRESET;
- break;
- }
- err = vnet_rx(port, &msgbuf, &npkts, budget);
- if (npkts >= budget)
- break;
- if (npkts == 0)
- break;
- } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
- err = vnet_ack(port, &msgbuf);
- if (err > 0)
- tx_wakeup |= err;
- } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
- err = vnet_nack(port, &msgbuf);
- }
- } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
- if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
- err = handle_mcast(port, &msgbuf);
- else
- err = vio_control_pkt_engine(vio, &msgbuf);
- if (err)
- break;
- } else {
- err = vnet_handle_unknown(port, &msgbuf);
- }
- if (err == -ECONNRESET)
- break;
- }
- if (unlikely(tx_wakeup && err != -ECONNRESET))
- maybe_tx_wakeup(port);
- return npkts;
-}
+ struct vnet *vp = netdev_priv(dev);
-static int vnet_poll(struct napi_struct *napi, int budget)
-{
- struct vnet_port *port = container_of(napi, struct vnet_port, napi);
- struct vio_driver_state *vio = &port->vio;
- int processed = vnet_event_napi(port, budget);
-
- if (processed < budget) {
- napi_complete(napi);
- port->rx_event &= ~LDC_EVENT_DATA_READY;
- vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
- }
- return processed;
+ return vp->msg_enable;
}
-static void vnet_event(void *arg, int event)
+static void vnet_set_msglevel(struct net_device *dev, u32 value)
{
- struct vnet_port *port = arg;
- struct vio_driver_state *vio = &port->vio;
-
- port->rx_event |= event;
- vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
- napi_schedule(&port->napi);
+ struct vnet *vp = netdev_priv(dev);
+ vp->msg_enable = value;
}
-static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
-{
- struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct vio_dring_data hdr = {
- .tag = {
- .type = VIO_TYPE_DATA,
- .stype = VIO_SUBTYPE_INFO,
- .stype_env = VIO_DRING_DATA,
- .sid = vio_send_sid(&port->vio),
- },
- .dring_ident = dr->ident,
- .start_idx = start,
- .end_idx = (u32) -1,
- };
- int err, delay;
- int retries = 0;
-
- if (port->stop_rx) {
- err = vnet_send_ack(port,
- &port->vio.drings[VIO_DRIVER_RX_RING],
- port->stop_rx_idx, -1,
- VIO_DRING_STOPPED);
- if (err <= 0)
- return err;
- }
-
- hdr.seq = dr->snd_nxt;
- delay = 1;
- do {
- err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
- if (err > 0) {
- dr->snd_nxt++;
- break;
- }
- udelay(delay);
- if ((delay <<= 1) > 128)
- delay = 128;
- if (retries++ > VNET_MAX_RETRIES)
- break;
- } while (err == -EAGAIN);
+static const struct ethtool_ops vnet_ethtool_ops = {
+ .get_drvinfo = vnet_get_drvinfo,
+ .get_msglevel = vnet_get_msglevel,
+ .set_msglevel = vnet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+};
- return err;
-}
+static LIST_HEAD(vnet_list);
+static DEFINE_MUTEX(vnet_list_mutex);
-struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
+static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
struct hlist_head *hp = &vp->port_hash[hash];
struct vnet_port *port;
hlist_for_each_entry_rcu(port, hp, hash) {
- if (!port_is_up(port))
+ if (!sunvnet_port_is_up_common(port))
continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
@@ -927,838 +102,64 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
list_for_each_entry_rcu(port, &vp->port_list, list) {
if (!port->switch_port)
continue;
- if (!port_is_up(port))
+ if (!sunvnet_port_is_up_common(port))
continue;
return port;
}
return NULL;
}
-static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
- unsigned *pending)
-{
- struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct sk_buff *skb = NULL;
- int i, txi;
-
- *pending = 0;
-
- txi = dr->prod;
- for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
- struct vio_net_desc *d;
-
- --txi;
- if (txi < 0)
- txi = VNET_TX_RING_SIZE-1;
-
- d = vio_dring_entry(dr, txi);
-
- if (d->hdr.state == VIO_DESC_READY) {
- (*pending)++;
- continue;
- }
- if (port->tx_bufs[txi].skb) {
- if (d->hdr.state != VIO_DESC_DONE)
- pr_notice("invalid ring buffer state %d\n",
- d->hdr.state);
- BUG_ON(port->tx_bufs[txi].skb->next);
-
- port->tx_bufs[txi].skb->next = skb;
- skb = port->tx_bufs[txi].skb;
- port->tx_bufs[txi].skb = NULL;
-
- ldc_unmap(port->vio.lp,
- port->tx_bufs[txi].cookies,
- port->tx_bufs[txi].ncookies);
- } else if (d->hdr.state == VIO_DESC_FREE)
- break;
- d->hdr.state = VIO_DESC_FREE;
- }
- return skb;
-}
-
-static inline void vnet_free_skbs(struct sk_buff *skb)
-{
- struct sk_buff *next;
-
- while (skb) {
- next = skb->next;
- skb->next = NULL;
- dev_kfree_skb(skb);
- skb = next;
- }
-}
-
-static void vnet_clean_timer_expire(unsigned long port0)
-{
- struct vnet_port *port = (struct vnet_port *)port0;
- struct sk_buff *freeskbs;
- unsigned pending;
-
- netif_tx_lock(port->vp->dev);
- freeskbs = vnet_clean_tx_ring(port, &pending);
- netif_tx_unlock(port->vp->dev);
-
- vnet_free_skbs(freeskbs);
-
- if (pending)
- (void)mod_timer(&port->clean_timer,
- jiffies + VNET_CLEAN_TIMEOUT);
- else
- del_timer(&port->clean_timer);
-}
-
-static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
- struct ldc_trans_cookie *cookies, int ncookies,
- unsigned int map_perm)
+/* func arg to vnet_start_xmit_common() to get the proper tx port */
+static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
+ struct net_device *dev)
{
- int i, nc, err, blen;
-
- /* header */
- blen = skb_headlen(skb);
- if (blen < ETH_ZLEN)
- blen = ETH_ZLEN;
- blen += VNET_PACKET_SKIP;
- blen += 8 - (blen & 7);
-
- err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
- ncookies, map_perm);
- if (err < 0)
- return err;
- nc = err;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- u8 *vaddr;
-
- if (nc < ncookies) {
- vaddr = kmap_atomic(skb_frag_page(f));
- blen = skb_frag_size(f);
- blen += 8 - (blen & 7);
- err = ldc_map_single(lp, vaddr + f->page_offset,
- blen, cookies + nc, ncookies - nc,
- map_perm);
- kunmap_atomic(vaddr);
- } else {
- err = -EMSGSIZE;
- }
+ struct vnet *vp = netdev_priv(dev);
- if (err < 0) {
- ldc_unmap(lp, cookies, nc);
- return err;
- }
- nc += err;
- }
- return nc;
+ return __tx_port_find(vp, skb);
}
-static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
-{
- struct sk_buff *nskb;
- int i, len, pad, docopy;
-
- len = skb->len;
- pad = 0;
- if (len < ETH_ZLEN) {
- pad += ETH_ZLEN - skb->len;
- len += pad;
- }
- len += VNET_PACKET_SKIP;
- pad += 8 - (len & 7);
-
- /* make sure we have enough cookies and alignment in every frag */
- docopy = skb_shinfo(skb)->nr_frags >= ncookies;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-
- docopy |= f->page_offset & 7;
- }
- if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
- skb_tailroom(skb) < pad ||
- skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
- int start = 0, offset;
- __wsum csum;
-
- len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
- nskb = alloc_and_align_skb(skb->dev, len);
- if (nskb == NULL) {
- dev_kfree_skb(skb);
- return NULL;
- }
- skb_reserve(nskb, VNET_PACKET_SKIP);
-
- nskb->protocol = skb->protocol;
- offset = skb_mac_header(skb) - skb->data;
- skb_set_mac_header(nskb, offset);
- offset = skb_network_header(skb) - skb->data;
- skb_set_network_header(nskb, offset);
- offset = skb_transport_header(skb) - skb->data;
- skb_set_transport_header(nskb, offset);
-
- offset = 0;
- nskb->csum_offset = skb->csum_offset;
- nskb->ip_summed = skb->ip_summed;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- start = skb_checksum_start_offset(skb);
- if (start) {
- struct iphdr *iph = ip_hdr(nskb);
- int offset = start + nskb->csum_offset;
-
- if (skb_copy_bits(skb, 0, nskb->data, start)) {
- dev_kfree_skb(nskb);
- dev_kfree_skb(skb);
- return NULL;
- }
- *(__sum16 *)(skb->data + offset) = 0;
- csum = skb_copy_and_csum_bits(skb, start,
- nskb->data + start,
- skb->len - start, 0);
- if (iph->protocol == IPPROTO_TCP ||
- iph->protocol == IPPROTO_UDP) {
- csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - start,
- iph->protocol, csum);
- }
- *(__sum16 *)(nskb->data + offset) = csum;
-
- nskb->ip_summed = CHECKSUM_NONE;
- } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
- dev_kfree_skb(nskb);
- dev_kfree_skb(skb);
- return NULL;
- }
- (void)skb_put(nskb, skb->len);
- if (skb_is_gso(skb)) {
- skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
- skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
- }
- nskb->queue_mapping = skb->queue_mapping;
- dev_kfree_skb(skb);
- skb = nskb;
- }
- return skb;
-}
-
-static u16
-vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
- if (port == NULL)
+ if (!port)
return 0;
- return port->q_index;
-}
-
-static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
-
-static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
-{
- struct net_device *dev = port->vp->dev;
- struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct sk_buff *segs;
- int maclen, datalen;
- int status;
- int gso_size, gso_type, gso_segs;
- int hlen = skb_transport_header(skb) - skb_mac_header(skb);
- int proto = IPPROTO_IP;
-
- if (skb->protocol == htons(ETH_P_IP))
- proto = ip_hdr(skb)->protocol;
- else if (skb->protocol == htons(ETH_P_IPV6))
- proto = ipv6_hdr(skb)->nexthdr;
-
- if (proto == IPPROTO_TCP)
- hlen += tcp_hdr(skb)->doff * 4;
- else if (proto == IPPROTO_UDP)
- hlen += sizeof(struct udphdr);
- else {
- pr_err("vnet_handle_offloads GSO with unknown transport "
- "protocol %d tproto %d\n", skb->protocol, proto);
- hlen = 128; /* XXX */
- }
- datalen = port->tsolen - hlen;
-
- gso_size = skb_shinfo(skb)->gso_size;
- gso_type = skb_shinfo(skb)->gso_type;
- gso_segs = skb_shinfo(skb)->gso_segs;
-
- if (port->tso && gso_size < datalen)
- gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
-
- if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
- struct netdev_queue *txq;
-
- txq = netdev_get_tx_queue(dev, port->q_index);
- netif_tx_stop_queue(txq);
- if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
- return NETDEV_TX_BUSY;
- netif_tx_wake_queue(txq);
- }
- maclen = skb_network_header(skb) - skb_mac_header(skb);
- skb_pull(skb, maclen);
-
- if (port->tso && gso_size < datalen) {
- if (skb_unclone(skb, GFP_ATOMIC))
- goto out_dropped;
-
- /* segment to TSO size */
- skb_shinfo(skb)->gso_size = datalen;
- skb_shinfo(skb)->gso_segs = gso_segs;
- }
- segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
- if (IS_ERR(segs))
- goto out_dropped;
-
- skb_push(skb, maclen);
- skb_reset_mac_header(skb);
-
- status = 0;
- while (segs) {
- struct sk_buff *curr = segs;
-
- segs = segs->next;
- curr->next = NULL;
- if (port->tso && curr->len > dev->mtu) {
- skb_shinfo(curr)->gso_size = gso_size;
- skb_shinfo(curr)->gso_type = gso_type;
- skb_shinfo(curr)->gso_segs =
- DIV_ROUND_UP(curr->len - hlen, gso_size);
- } else
- skb_shinfo(curr)->gso_size = 0;
-
- skb_push(curr, maclen);
- skb_reset_mac_header(curr);
- memcpy(skb_mac_header(curr), skb_mac_header(skb),
- maclen);
- curr->csum_start = skb_transport_header(curr) - curr->head;
- if (ip_hdr(curr)->protocol == IPPROTO_TCP)
- curr->csum_offset = offsetof(struct tcphdr, check);
- else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
- curr->csum_offset = offsetof(struct udphdr, check);
-
- if (!(status & NETDEV_TX_MASK))
- status = vnet_start_xmit(curr, dev);
- if (status & NETDEV_TX_MASK)
- dev_kfree_skb_any(curr);
- }
-
- if (!(status & NETDEV_TX_MASK))
- dev_kfree_skb_any(skb);
- return status;
-out_dropped:
- dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ return port->q_index;
}
+/* Wrappers to common functions */
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port = NULL;
- struct vio_dring_state *dr;
- struct vio_net_desc *d;
- unsigned int len;
- struct sk_buff *freeskbs = NULL;
- int i, err, txi;
- unsigned pending = 0;
- struct netdev_queue *txq;
-
- rcu_read_lock();
- port = __tx_port_find(vp, skb);
- if (unlikely(!port)) {
- rcu_read_unlock();
- goto out_dropped;
- }
-
- if (skb_is_gso(skb) && skb->len > port->tsolen) {
- err = vnet_handle_offloads(port, skb);
- rcu_read_unlock();
- return err;
- }
-
- if (!skb_is_gso(skb) && skb->len > port->rmtu) {
- unsigned long localmtu = port->rmtu - ETH_HLEN;
-
- if (vio_version_after_eq(&port->vio, 1, 3))
- localmtu -= VLAN_HLEN;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct flowi4 fl4;
- struct rtable *rt = NULL;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = dev->ifindex;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.daddr = ip_hdr(skb)->daddr;
- fl4.saddr = ip_hdr(skb)->saddr;
-
- rt = ip_route_output_key(dev_net(dev), &fl4);
- rcu_read_unlock();
- if (!IS_ERR(rt)) {
- skb_dst_set(skb, &rt->dst);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED,
- htonl(localmtu));
- }
- }
-#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
-#endif
- goto out_dropped;
- }
-
- skb = vnet_skb_shape(skb, 2);
-
- if (unlikely(!skb))
- goto out_dropped;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vnet_fullcsum(skb);
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- i = skb_get_queue_mapping(skb);
- txq = netdev_get_tx_queue(dev, i);
- if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- if (!netif_tx_queue_stopped(txq)) {
- netif_tx_stop_queue(txq);
-
- /* This is a hard error, log it. */
- netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
- dev->stats.tx_errors++;
- }
- rcu_read_unlock();
- return NETDEV_TX_BUSY;
- }
-
- d = vio_dring_cur(dr);
-
- txi = dr->prod;
-
- freeskbs = vnet_clean_tx_ring(port, &pending);
-
- BUG_ON(port->tx_bufs[txi].skb);
-
- len = skb->len;
- if (len < ETH_ZLEN)
- len = ETH_ZLEN;
-
- err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
- (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
- if (err < 0) {
- netdev_info(dev, "tx buffer map error %d\n", err);
- goto out_dropped;
- }
-
- port->tx_bufs[txi].skb = skb;
- skb = NULL;
- port->tx_bufs[txi].ncookies = err;
-
- /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
- * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
- * the protocol itself does not require it as long as the peer
- * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
- *
- * An ACK for every packet in the ring is expensive as the
- * sending of LDC messages is slow and affects performance.
- */
- d->hdr.ack = VIO_ACK_DISABLE;
- d->size = len;
- d->ncookies = port->tx_bufs[txi].ncookies;
- for (i = 0; i < d->ncookies; i++)
- d->cookies[i] = port->tx_bufs[txi].cookies[i];
- if (vio_version_after_eq(&port->vio, 1, 7)) {
- struct vio_net_dext *dext = vio_net_ext(d);
-
- memset(dext, 0, sizeof(*dext));
- if (skb_is_gso(port->tx_bufs[txi].skb)) {
- dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
- ->gso_size;
- dext->flags |= VNET_PKT_IPV4_LSO;
- }
- if (vio_version_after_eq(&port->vio, 1, 8) &&
- !port->switch_port) {
- dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
- dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
- }
- }
-
- /* This has to be a non-SMP write barrier because we are writing
- * to memory which is shared with the peer LDOM.
- */
- dma_wmb();
-
- d->hdr.state = VIO_DESC_READY;
-
- /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
- * to notify the consumer that some descriptors are READY.
- * After that "start" trigger, no additional triggers are needed until
- * a DRING_STOPPED is received from the consumer. The dr->cons field
- * (set up by vnet_ack()) has the value of the next dring index
- * that has not yet been ack-ed. We send a "start" trigger here
- * if, and only if, start_cons is true (reset it afterward). Conversely,
- * vnet_ack() should check if the dring corresponding to cons
- * is marked READY, but start_cons was false.
- * If so, vnet_ack() should send out the missed "start" trigger.
- *
- * Note that the dma_wmb() above makes sure the cookies et al. are
- * not globally visible before the VIO_DESC_READY, and that the
- * stores are ordered correctly by the compiler. The consumer will
- * not proceed until the VIO_DESC_READY is visible assuring that
- * the consumer does not observe anything related to descriptors
- * out of order. The HV trap from the LDC start trigger is the
- * producer to consumer announcement that work is available to the
- * consumer
- */
- if (!port->start_cons)
- goto ldc_start_done; /* previous trigger suffices */
-
- err = __vnet_tx_trigger(port, dr->cons);
- if (unlikely(err < 0)) {
- netdev_info(dev, "TX trigger error %d\n", err);
- d->hdr.state = VIO_DESC_FREE;
- skb = port->tx_bufs[txi].skb;
- port->tx_bufs[txi].skb = NULL;
- dev->stats.tx_carrier_errors++;
- goto out_dropped;
- }
-
-ldc_start_done:
- port->start_cons = false;
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
-
- dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
- if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
- netif_tx_stop_queue(txq);
- if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
- netif_tx_wake_queue(txq);
- }
-
- (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
- rcu_read_unlock();
-
- vnet_free_skbs(freeskbs);
-
- return NETDEV_TX_OK;
-
-out_dropped:
- if (pending)
- (void)mod_timer(&port->clean_timer,
- jiffies + VNET_CLEAN_TIMEOUT);
- else if (port)
- del_timer(&port->clean_timer);
- if (port)
- rcu_read_unlock();
- if (skb)
- dev_kfree_skb(skb);
- vnet_free_skbs(freeskbs);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
-}
-
-static void vnet_tx_timeout(struct net_device *dev)
-{
- /* XXX Implement me XXX */
-}
-
-static int vnet_open(struct net_device *dev)
-{
- netif_carrier_on(dev);
- netif_tx_start_all_queues(dev);
-
- return 0;
-}
-
-static int vnet_close(struct net_device *dev)
-{
- netif_tx_stop_all_queues(dev);
- netif_carrier_off(dev);
-
- return 0;
-}
-
-static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
-{
- struct vnet_mcast_entry *m;
-
- for (m = vp->mcast_list; m; m = m->next) {
- if (ether_addr_equal(m->addr, addr))
- return m;
- }
- return NULL;
-}
-
-static void __update_mc_list(struct vnet *vp, struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
-
- netdev_for_each_mc_addr(ha, dev) {
- struct vnet_mcast_entry *m;
-
- m = __vnet_mc_find(vp, ha->addr);
- if (m) {
- m->hit = 1;
- continue;
- }
-
- if (!m) {
- m = kzalloc(sizeof(*m), GFP_ATOMIC);
- if (!m)
- continue;
- memcpy(m->addr, ha->addr, ETH_ALEN);
- m->hit = 1;
-
- m->next = vp->mcast_list;
- vp->mcast_list = m;
- }
- }
-}
-
-static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
-{
- struct vio_net_mcast_info info;
- struct vnet_mcast_entry *m, **pp;
- int n_addrs;
-
- memset(&info, 0, sizeof(info));
-
- info.tag.type = VIO_TYPE_CTRL;
- info.tag.stype = VIO_SUBTYPE_INFO;
- info.tag.stype_env = VNET_MCAST_INFO;
- info.tag.sid = vio_send_sid(&port->vio);
- info.set = 1;
-
- n_addrs = 0;
- for (m = vp->mcast_list; m; m = m->next) {
- if (m->sent)
- continue;
- m->sent = 1;
- memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
- m->addr, ETH_ALEN);
- if (++n_addrs == VNET_NUM_MCAST) {
- info.count = n_addrs;
-
- (void) vio_ldc_send(&port->vio, &info,
- sizeof(info));
- n_addrs = 0;
- }
- }
- if (n_addrs) {
- info.count = n_addrs;
- (void) vio_ldc_send(&port->vio, &info, sizeof(info));
- }
-
- info.set = 0;
-
- n_addrs = 0;
- pp = &vp->mcast_list;
- while ((m = *pp) != NULL) {
- if (m->hit) {
- m->hit = 0;
- pp = &m->next;
- continue;
- }
-
- memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
- m->addr, ETH_ALEN);
- if (++n_addrs == VNET_NUM_MCAST) {
- info.count = n_addrs;
- (void) vio_ldc_send(&port->vio, &info,
- sizeof(info));
- n_addrs = 0;
- }
-
- *pp = m->next;
- kfree(m);
- }
- if (n_addrs) {
- info.count = n_addrs;
- (void) vio_ldc_send(&port->vio, &info, sizeof(info));
- }
+ return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find);
}
static void vnet_set_rx_mode(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port;
-
- rcu_read_lock();
- list_for_each_entry_rcu(port, &vp->port_list, list) {
-
- if (port->switch_port) {
- __update_mc_list(vp, dev);
- __send_mc_list(vp, port);
- break;
- }
- }
- rcu_read_unlock();
-}
-
-static int vnet_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > 65535)
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
-}
-static int vnet_set_mac_addr(struct net_device *dev, void *p)
-{
- return -EINVAL;
-}
-
-static void vnet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
-}
-
-static u32 vnet_get_msglevel(struct net_device *dev)
-{
- struct vnet *vp = netdev_priv(dev);
- return vp->msg_enable;
-}
-
-static void vnet_set_msglevel(struct net_device *dev, u32 value)
-{
- struct vnet *vp = netdev_priv(dev);
- vp->msg_enable = value;
-}
-
-static const struct ethtool_ops vnet_ethtool_ops = {
- .get_drvinfo = vnet_get_drvinfo,
- .get_msglevel = vnet_get_msglevel,
- .set_msglevel = vnet_set_msglevel,
- .get_link = ethtool_op_get_link,
-};
-
-static void vnet_port_free_tx_bufs(struct vnet_port *port)
-{
- struct vio_dring_state *dr;
- int i;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-
- if (dr->base == NULL)
- return;
-
- for (i = 0; i < VNET_TX_RING_SIZE; i++) {
- struct vio_net_desc *d;
- void *skb = port->tx_bufs[i].skb;
-
- if (!skb)
- continue;
-
- d = vio_dring_entry(dr, i);
-
- ldc_unmap(port->vio.lp,
- port->tx_bufs[i].cookies,
- port->tx_bufs[i].ncookies);
- dev_kfree_skb(skb);
- port->tx_bufs[i].skb = NULL;
- d->hdr.state = VIO_DESC_FREE;
- }
- ldc_free_exp_dring(port->vio.lp, dr->base,
- (dr->entry_size * dr->num_entries),
- dr->cookies, dr->ncookies);
- dr->base = NULL;
- dr->entry_size = 0;
- dr->num_entries = 0;
- dr->pending = 0;
- dr->ncookies = 0;
-}
-
-static void vnet_port_reset(struct vnet_port *port)
-{
- del_timer(&port->clean_timer);
- vnet_port_free_tx_bufs(port);
- port->rmtu = 0;
- port->tso = true;
- port->tsolen = 0;
-}
-
-static int vnet_port_alloc_tx_ring(struct vnet_port *port)
-{
- struct vio_dring_state *dr;
- unsigned long len, elen;
- int i, err, ncookies;
- void *dring;
-
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-
- elen = sizeof(struct vio_net_desc) +
- sizeof(struct ldc_trans_cookie) * 2;
- if (vio_version_after_eq(&port->vio, 1, 7))
- elen += sizeof(struct vio_net_dext);
- len = VNET_TX_RING_SIZE * elen;
-
- ncookies = VIO_MAX_RING_COOKIES;
- dring = ldc_alloc_exp_dring(port->vio.lp, len,
- dr->cookies, &ncookies,
- (LDC_MAP_SHADOW |
- LDC_MAP_DIRECT |
- LDC_MAP_RW));
- if (IS_ERR(dring)) {
- err = PTR_ERR(dring);
- goto err_out;
- }
-
- dr->base = dring;
- dr->entry_size = elen;
- dr->num_entries = VNET_TX_RING_SIZE;
- dr->prod = dr->cons = 0;
- port->start_cons = true; /* need an initial trigger */
- dr->pending = VNET_TX_RING_SIZE;
- dr->ncookies = ncookies;
-
- for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
- struct vio_net_desc *d;
-
- d = vio_dring_entry(dr, i);
- d->hdr.state = VIO_DESC_FREE;
- }
- return 0;
-
-err_out:
- vnet_port_free_tx_bufs(port);
-
- return err;
+ return sunvnet_set_rx_mode_common(dev, vp);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void vnet_poll_controller(struct net_device *dev)
{
struct vnet *vp = netdev_priv(dev);
- struct vnet_port *port;
- unsigned long flags;
- spin_lock_irqsave(&vp->lock, flags);
- if (!list_empty(&vp->port_list)) {
- port = list_entry(vp->port_list.next, struct vnet_port, list);
- napi_schedule(&port->napi);
- }
- spin_unlock_irqrestore(&vp->lock, flags);
+ return sunvnet_poll_controller_common(dev, vp);
}
#endif
-static LIST_HEAD(vnet_list);
-static DEFINE_MUTEX(vnet_list_mutex);
static const struct net_device_ops vnet_ops = {
- .ndo_open = vnet_open,
- .ndo_stop = vnet_close,
+ .ndo_open = sunvnet_open_common,
+ .ndo_stop = sunvnet_close_common,
.ndo_set_rx_mode = vnet_set_rx_mode,
- .ndo_set_mac_address = vnet_set_mac_addr,
+ .ndo_set_mac_address = sunvnet_set_mac_addr_common,
.ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = vnet_tx_timeout,
- .ndo_change_mtu = vnet_change_mtu,
+ .ndo_tx_timeout = sunvnet_tx_timeout_common,
+ .ndo_change_mtu = sunvnet_change_mtu_common,
.ndo_start_xmit = vnet_start_xmit,
.ndo_select_queue = vnet_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1888,15 +289,15 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
}
static struct ldc_channel_config vnet_ldc_cfg = {
- .event = vnet_event,
+ .event = sunvnet_event_common,
.mtu = 64,
.mode = LDC_MODE_UNRELIABLE,
};
static struct vio_driver_ops vnet_vio_ops = {
- .send_attr = vnet_send_attr,
- .handle_attr = vnet_handle_attr,
- .handshake_complete = vnet_handshake_complete,
+ .send_attr = sunvnet_send_attr_common,
+ .handle_attr = sunvnet_handle_attr_common,
+ .handshake_complete = sunvnet_handshake_complete_common,
};
static void print_version(void)
@@ -1906,25 +307,6 @@ static void print_version(void)
const char *remote_macaddr_prop = "remote-mac-address";
-static void
-vnet_port_add_txq(struct vnet_port *port)
-{
- struct vnet *vp = port->vp;
- int n;
-
- n = vp->nports++;
- n = n & (VNET_MAX_TXQS - 1);
- port->q_index = n;
- netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
-}
-
-static void
-vnet_port_rm_txq(struct vnet_port *port)
-{
- port->vp->nports--;
- netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
-}
-
static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct mdesc_handle *hp;
@@ -1972,13 +354,14 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (err)
goto err_out_free_port;
- netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common,
+ NAPI_POLL_WEIGHT);
INIT_HLIST_NODE(&port->hash);
INIT_LIST_HEAD(&port->list);
switch_port = 0;
- if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
+ if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL))
switch_port = 1;
port->switch_port = switch_port;
port->tso = true;
@@ -1991,7 +374,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
list_add_tail_rcu(&port->list, &vp->port_list);
hlist_add_head_rcu(&port->hash,
&vp->port_hash[vnet_hashfn(port->raddr)]);
- vnet_port_add_txq(port);
+ sunvnet_port_add_txq_common(port);
spin_unlock_irqrestore(&vp->lock, flags);
dev_set_drvdata(&vdev->dev, port);
@@ -1999,7 +382,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
pr_info("%s: PORT ( remote-mac %pM%s )\n",
vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
- setup_timer(&port->clean_timer, vnet_clean_timer_expire,
+ setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common,
(unsigned long)port);
napi_enable(&port->napi);
@@ -2022,7 +405,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
struct vnet_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
-
del_timer_sync(&port->vio.timer);
napi_disable(&port->napi);
@@ -2032,15 +414,14 @@ static int vnet_port_remove(struct vio_dev *vdev)
synchronize_rcu();
del_timer_sync(&port->clean_timer);
- vnet_port_rm_txq(port);
+ sunvnet_port_rm_txq_common(port);
netif_napi_del(&port->napi);
- vnet_port_free_tx_bufs(port);
+ sunvnet_port_free_tx_bufs_common(port);
vio_ldc_free(&port->vio);
dev_set_drvdata(&vdev->dev, NULL);
kfree(port);
-
}
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
deleted file mode 100644
index 01ca78191683..000000000000
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ /dev/null
@@ -1,114 +0,0 @@
-#ifndef _SUNVNET_H
-#define _SUNVNET_H
-
-#include <linux/interrupt.h>
-
-#define DESC_NCOOKIES(entry_size) \
- ((entry_size) - sizeof(struct vio_net_desc))
-
-/* length of time before we decide the hardware is borked,
- * and dev->tx_timeout() should be called to fix the problem
- */
-#define VNET_TX_TIMEOUT (5 * HZ)
-
-/* length of time (or less) we expect pending descriptors to be marked
- * as VIO_DESC_DONE and skbs ready to be freed
- */
-#define VNET_CLEAN_TIMEOUT ((HZ/100)+1)
-
-#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
-#define VNET_TX_RING_SIZE 512
-#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
-
-#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
-#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
-
-/* VNET packets are sent in buffers with the first 6 bytes skipped
- * so that after the ethernet header the IPv4/IPv6 headers are aligned
- * properly.
- */
-#define VNET_PACKET_SKIP 6
-
-#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1)
-
-struct vnet_tx_entry {
- struct sk_buff *skb;
- unsigned int ncookies;
- struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
-};
-
-struct vnet;
-struct vnet_port {
- struct vio_driver_state vio;
-
- struct hlist_node hash;
- u8 raddr[ETH_ALEN];
- unsigned switch_port:1;
- unsigned tso:1;
- unsigned __pad:14;
-
- struct vnet *vp;
-
- struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
-
- struct list_head list;
-
- u32 stop_rx_idx;
- bool stop_rx;
- bool start_cons;
-
- struct timer_list clean_timer;
-
- u64 rmtu;
- u16 tsolen;
-
- struct napi_struct napi;
- u32 napi_stop_idx;
- bool napi_resume;
- int rx_event;
- u16 q_index;
-};
-
-static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
-{
- return container_of(vio, struct vnet_port, vio);
-}
-
-#define VNET_PORT_HASH_SIZE 16
-#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
-
-static inline unsigned int vnet_hashfn(u8 *mac)
-{
- unsigned int val = mac[4] ^ mac[5];
-
- return val & (VNET_PORT_HASH_MASK);
-}
-
-struct vnet_mcast_entry {
- u8 addr[ETH_ALEN];
- u8 sent;
- u8 hit;
- struct vnet_mcast_entry *next;
-};
-
-struct vnet {
- /* Protects port_list and port_hash. */
- spinlock_t lock;
-
- struct net_device *dev;
-
- u32 msg_enable;
-
- struct list_head port_list;
-
- struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
-
- struct vnet_mcast_entry *mcast_list;
-
- struct list_head list;
- u64 local_mac;
-
- int nports;
-};
-
-#endif /* _SUNVNET_H */
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
new file mode 100644
index 000000000000..904a5a12a85d
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -0,0 +1,1732 @@
+/* sunvnet.c: Sun LDOM Virtual Network Driver.
+ *
+ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2016 Oracle. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sunvnet.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/icmpv6.h>
+#endif
+
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/route.h>
+
+#include <asm/vio.h>
+#include <asm/ldc.h>
+
+#include "sunvnet_common.h"
+
+/* Heuristic for the number of times to exponentially backoff and
+ * retry sending an LDC trigger when EAGAIN is encountered
+ */
+#define VNET_MAX_RETRIES 10
+
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
+static void vnet_port_reset(struct vnet_port *port);
+
+static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
+{
+ return vio_dring_avail(dr, VNET_TX_RING_SIZE);
+}
+
+static int vnet_handle_unknown(struct vnet_port *port, void *arg)
+{
+ struct vio_msg_tag *pkt = arg;
+
+ pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
+ pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
+ pr_err("Resetting connection\n");
+
+ ldc_disconnect(port->vio.lp);
+
+ return -ECONNRESET;
+}
+
+static int vnet_port_alloc_tx_ring(struct vnet_port *port);
+
+int sunvnet_send_attr_common(struct vio_driver_state *vio)
+{
+ struct vnet_port *port = to_vnet_port(vio);
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
+ struct vio_net_attr_info pkt;
+ int framelen = ETH_FRAME_LEN;
+ int i, err;
+
+ err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
+ if (err)
+ return err;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.tag.type = VIO_TYPE_CTRL;
+ pkt.tag.stype = VIO_SUBTYPE_INFO;
+ pkt.tag.stype_env = VIO_ATTR_INFO;
+ pkt.tag.sid = vio_send_sid(vio);
+ if (vio_version_before(vio, 1, 2))
+ pkt.xfer_mode = VIO_DRING_MODE;
+ else
+ pkt.xfer_mode = VIO_NEW_DRING_MODE;
+ pkt.addr_type = VNET_ADDR_ETHERMAC;
+ pkt.ack_freq = 0;
+ for (i = 0; i < 6; i++)
+ pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
+ if (vio_version_after(vio, 1, 3)) {
+ if (port->rmtu) {
+ port->rmtu = min(VNET_MAXPACKET, port->rmtu);
+ pkt.mtu = port->rmtu;
+ } else {
+ port->rmtu = VNET_MAXPACKET;
+ pkt.mtu = port->rmtu;
+ }
+ if (vio_version_after_eq(vio, 1, 6))
+ pkt.options = VIO_TX_DRING;
+ } else if (vio_version_before(vio, 1, 3)) {
+ pkt.mtu = framelen;
+ } else { /* v1.3 */
+ pkt.mtu = framelen + VLAN_HLEN;
+ }
+
+ pkt.cflags = 0;
+ if (vio_version_after_eq(vio, 1, 7) && port->tso) {
+ pkt.cflags |= VNET_LSO_IPV4_CAPAB;
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ pkt.ipv4_lso_maxlen = port->tsolen;
+ }
+
+ pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
+
+ viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+ "cflags[0x%04x] lso_max[%u]\n",
+ pkt.xfer_mode, pkt.addr_type,
+ (unsigned long long)pkt.addr,
+ pkt.ack_freq, pkt.plnk_updt, pkt.options,
+ (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
+
+ return vio_ldc_send(vio, &pkt, sizeof(pkt));
+}
+EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
+
+static int handle_attr_info(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ struct vnet_port *port = to_vnet_port(vio);
+ u64 localmtu;
+ u8 xfer_mode;
+
+ viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
+ "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
+ " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
+ pkt->xfer_mode, pkt->addr_type,
+ (unsigned long long)pkt->addr,
+ pkt->ack_freq, pkt->plnk_updt, pkt->options,
+ (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+ pkt->ipv4_lso_maxlen);
+
+ pkt->tag.sid = vio_send_sid(vio);
+
+ xfer_mode = pkt->xfer_mode;
+ /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
+ if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
+ xfer_mode = VIO_NEW_DRING_MODE;
+
+ /* MTU negotiation:
+ * < v1.3 - ETH_FRAME_LEN exactly
+ * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
+ * pkt->mtu for ACK
+ * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
+ */
+ if (vio_version_before(vio, 1, 3)) {
+ localmtu = ETH_FRAME_LEN;
+ } else if (vio_version_after(vio, 1, 3)) {
+ localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
+ localmtu = min(pkt->mtu, localmtu);
+ pkt->mtu = localmtu;
+ } else { /* v1.3 */
+ localmtu = ETH_FRAME_LEN + VLAN_HLEN;
+ }
+ port->rmtu = localmtu;
+
+ /* LSO negotiation */
+ if (vio_version_after_eq(vio, 1, 7))
+ port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
+ else
+ port->tso = false;
+ if (port->tso) {
+ if (!port->tsolen)
+ port->tsolen = VNET_MAXTSO;
+ port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
+ if (port->tsolen < VNET_MINTSO) {
+ port->tso = false;
+ port->tsolen = 0;
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ }
+ pkt->ipv4_lso_maxlen = port->tsolen;
+ } else {
+ pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
+ pkt->ipv4_lso_maxlen = 0;
+ }
+
+ /* for version >= 1.6, ACK packet mode we support */
+ if (vio_version_after_eq(vio, 1, 6)) {
+ pkt->xfer_mode = VIO_NEW_DRING_MODE;
+ pkt->options = VIO_TX_DRING;
+ }
+
+ if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
+ pkt->addr_type != VNET_ADDR_ETHERMAC ||
+ pkt->mtu != localmtu) {
+ viodbg(HS, "SEND NET ATTR NACK\n");
+
+ pkt->tag.stype = VIO_SUBTYPE_NACK;
+
+ (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
+
+ return -ECONNRESET;
+ }
+
+ viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
+ "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
+ "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
+ pkt->xfer_mode, pkt->addr_type,
+ (unsigned long long)pkt->addr,
+ pkt->ack_freq, pkt->plnk_updt, pkt->options,
+ (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
+ pkt->ipv4_lso_maxlen);
+
+ pkt->tag.stype = VIO_SUBTYPE_ACK;
+
+ return vio_ldc_send(vio, pkt, sizeof(*pkt));
+}
+
+static int handle_attr_ack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR ACK\n");
+
+ return 0;
+}
+
+static int handle_attr_nack(struct vio_driver_state *vio,
+ struct vio_net_attr_info *pkt)
+{
+ viodbg(HS, "GOT NET ATTR NACK\n");
+
+ return -ECONNRESET;
+}
+
+int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
+{
+ struct vio_net_attr_info *pkt = arg;
+
+ switch (pkt->tag.stype) {
+ case VIO_SUBTYPE_INFO:
+ return handle_attr_info(vio, pkt);
+
+ case VIO_SUBTYPE_ACK:
+ return handle_attr_ack(vio, pkt);
+
+ case VIO_SUBTYPE_NACK:
+ return handle_attr_nack(vio, pkt);
+
+ default:
+ return -ECONNRESET;
+ }
+}
+EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
+
+void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
+{
+ struct vio_dring_state *dr;
+
+ dr = &vio->drings[VIO_DRIVER_RX_RING];
+ dr->rcv_nxt = 1;
+ dr->snd_nxt = 1;
+
+ dr = &vio->drings[VIO_DRIVER_TX_RING];
+ dr->rcv_nxt = 1;
+ dr->snd_nxt = 1;
+}
+EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
+
+/* The hypervisor interface that implements copying to/from imported
+ * memory from another domain requires that copies are done to 8-byte
+ * aligned buffers, and that the lengths of such copies are also 8-byte
+ * multiples.
+ *
+ * So we align skb->data to an 8-byte multiple and pad-out the data
+ * area so we can round the copy length up to the next multiple of
+ * 8 for the copy.
+ *
+ * The transmitter puts the actual start of the packet 6 bytes into
+ * the buffer it sends over, so that the IP headers after the ethernet
+ * header are aligned properly. These 6 bytes are not in the descriptor
+ * length, they are simply implied. This offset is represented using
+ * the VNET_PACKET_SKIP macro.
+ */
+static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+ unsigned long addr, off;
+
+ skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
+ if (unlikely(!skb))
+ return NULL;
+
+ addr = (unsigned long)skb->data;
+ off = ((addr + 7UL) & ~7UL) - addr;
+ if (off)
+ skb_reserve(skb, off);
+
+ return skb;
+}
+
+static inline void vnet_fullcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ int offset = skb_transport_offset(skb);
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+ if (iph->protocol != IPPROTO_TCP &&
+ iph->protocol != IPPROTO_UDP)
+ return;
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum_level = 1;
+ skb->csum = 0;
+ if (iph->protocol == IPPROTO_TCP) {
+ struct tcphdr *ptcp = tcp_hdr(skb);
+
+ ptcp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_TCP,
+ skb->csum);
+ } else if (iph->protocol == IPPROTO_UDP) {
+ struct udphdr *pudp = udp_hdr(skb);
+
+ pudp->check = 0;
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - offset, IPPROTO_UDP,
+ skb->csum);
+ }
+}
+
+static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
+{
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
+ unsigned int len = desc->size;
+ unsigned int copy_len;
+ struct sk_buff *skb;
+ int maxlen;
+ int err;
+
+ err = -EMSGSIZE;
+ if (port->tso && port->tsolen > port->rmtu)
+ maxlen = port->tsolen;
+ else
+ maxlen = port->rmtu;
+ if (unlikely(len < ETH_ZLEN || len > maxlen)) {
+ dev->stats.rx_length_errors++;
+ goto out_dropped;
+ }
+
+ skb = alloc_and_align_skb(dev, len);
+ err = -ENOMEM;
+ if (unlikely(!skb)) {
+ dev->stats.rx_missed_errors++;
+ goto out_dropped;
+ }
+
+ copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
+ skb_put(skb, copy_len);
+ err = ldc_copy(port->vio.lp, LDC_COPY_IN,
+ skb->data, copy_len, 0,
+ desc->cookies, desc->ncookies);
+ if (unlikely(err < 0)) {
+ dev->stats.rx_frame_errors++;
+ goto out_free_skb;
+ }
+
+ skb_pull(skb, VNET_PACKET_SKIP);
+ skb_trim(skb, len);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (vio_version_after_eq(&port->vio, 1, 8)) {
+ struct vio_net_dext *dext = vio_net_ext(desc);
+
+ skb_reset_network_header(skb);
+
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
+ if (skb->protocol == ETH_P_IP) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ iph->check = 0;
+ ip_send_check(iph);
+ }
+ }
+ if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
+ skb->ip_summed == CHECKSUM_NONE) {
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ int ihl = iph->ihl * 4;
+
+ skb_reset_transport_header(skb);
+ skb_set_transport_header(skb, ihl);
+ vnet_fullcsum(skb);
+ }
+ }
+ if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_level = 0;
+ if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
+ skb->csum_level = 1;
+ }
+ }
+
+ skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ napi_gro_receive(&port->napi, skb);
+ return 0;
+
+out_free_skb:
+ kfree_skb(skb);
+
+out_dropped:
+ dev->stats.rx_dropped++;
+ return err;
+}
+
+static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end, u8 vio_dring_state)
+{
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_ACK,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = start,
+ .end_idx = end,
+ .state = vio_dring_state,
+ };
+ int err, delay;
+ int retries = 0;
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ if (retries++ > VNET_MAX_RETRIES) {
+ pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
+ port->raddr[0], port->raddr[1],
+ port->raddr[2], port->raddr[3],
+ port->raddr[4], port->raddr[5]);
+ break;
+ }
+ } while (err == -EAGAIN);
+
+ if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
+ port->stop_rx_idx = end;
+ port->stop_rx = true;
+ } else {
+ port->stop_rx_idx = 0;
+ port->stop_rx = false;
+ }
+
+ return err;
+}
+
+static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index)
+{
+ struct vio_net_desc *desc = port->vio.desc_buf;
+ int err;
+
+ err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return desc;
+}
+
+static int put_rx_desc(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ struct vio_net_desc *desc,
+ u32 index)
+{
+ int err;
+
+ err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
+ (index * dr->entry_size),
+ dr->cookies, dr->ncookies);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int vnet_walk_rx_one(struct vnet_port *port,
+ struct vio_dring_state *dr,
+ u32 index, int *needs_ack)
+{
+ struct vio_net_desc *desc = get_rx_desc(port, dr, index);
+ struct vio_driver_state *vio = &port->vio;
+ int err;
+
+ BUG_ON(!desc);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ if (desc->hdr.state != VIO_DESC_READY)
+ return 1;
+
+ dma_rmb();
+
+ viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
+ desc->hdr.state, desc->hdr.ack,
+ desc->size, desc->ncookies,
+ desc->cookies[0].cookie_addr,
+ desc->cookies[0].cookie_size);
+
+ err = vnet_rx_one(port, desc);
+ if (err == -ECONNRESET)
+ return err;
+ trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
+ index, desc->hdr.ack);
+ desc->hdr.state = VIO_DESC_DONE;
+ err = put_rx_desc(port, dr, desc, index);
+ if (err < 0)
+ return err;
+ *needs_ack = desc->hdr.ack;
+ return 0;
+}
+
+static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
+ u32 start, u32 end, int *npkts, int budget)
+{
+ struct vio_driver_state *vio = &port->vio;
+ int ack_start = -1, ack_end = -1;
+ bool send_ack = true;
+
+ end = (end == (u32)-1) ? vio_dring_prev(dr, start)
+ : vio_dring_next(dr, end);
+
+ viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
+
+ while (start != end) {
+ int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
+
+ if (err == -ECONNRESET)
+ return err;
+ if (err != 0)
+ break;
+ (*npkts)++;
+ if (ack_start == -1)
+ ack_start = start;
+ ack_end = start;
+ start = vio_dring_next(dr, start);
+ if (ack && start != end) {
+ err = vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_ACTIVE);
+ if (err == -ECONNRESET)
+ return err;
+ ack_start = -1;
+ }
+ if ((*npkts) >= budget) {
+ send_ack = false;
+ break;
+ }
+ }
+ if (unlikely(ack_start == -1)) {
+ ack_end = vio_dring_prev(dr, start);
+ ack_start = ack_end;
+ }
+ if (send_ack) {
+ port->napi_resume = false;
+ trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
+ port->vio._peer_sid,
+ ack_end, *npkts);
+ return vnet_send_ack(port, dr, ack_start, ack_end,
+ VIO_DRING_STOPPED);
+ } else {
+ trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
+ port->vio._peer_sid,
+ ack_end, *npkts);
+ port->napi_resume = true;
+ port->napi_stop_idx = ack_end;
+ return 1;
+ }
+}
+
+static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
+ int budget)
+{
+ struct vio_dring_data *pkt = msgbuf;
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
+ struct vio_driver_state *vio = &port->vio;
+
+ viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
+ pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+ if (unlikely(pkt->seq != dr->rcv_nxt)) {
+ pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
+ pkt->seq, dr->rcv_nxt);
+ return 0;
+ }
+
+ if (!port->napi_resume)
+ dr->rcv_nxt++;
+
+ /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
+
+ return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
+ npkts, budget);
+}
+
+static int idx_is_pending(struct vio_dring_state *dr, u32 end)
+{
+ u32 idx = dr->cons;
+ int found = 0;
+
+ while (idx != dr->prod) {
+ if (idx == end) {
+ found = 1;
+ break;
+ }
+ idx = vio_dring_next(dr, idx);
+ }
+ return found;
+}
+
+static int vnet_ack(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data *pkt = msgbuf;
+ struct net_device *dev;
+ u32 end;
+ struct vio_net_desc *desc;
+ struct netdev_queue *txq;
+
+ if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
+ return 0;
+
+ end = pkt->end_idx;
+ dev = VNET_PORT_TO_NET_DEVICE(port);
+ netif_tx_lock(dev);
+ if (unlikely(!idx_is_pending(dr, end))) {
+ netif_tx_unlock(dev);
+ return 0;
+ }
+
+ /* sync for race conditions with vnet_start_xmit() and tell xmit it
+ * is time to send a trigger.
+ */
+ trace_vnet_rx_stopped_ack(port->vio._local_sid,
+ port->vio._peer_sid, end);
+ dr->cons = vio_dring_next(dr, end);
+ desc = vio_dring_entry(dr, dr->cons);
+ if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
+ /* vnet_start_xmit() just populated this dring but missed
+ * sending the "start" LDC message to the consumer.
+ * Send a "start" trigger on its behalf.
+ */
+ if (__vnet_tx_trigger(port, dr->cons) > 0)
+ port->start_cons = false;
+ else
+ port->start_cons = true;
+ } else {
+ port->start_cons = true;
+ }
+ netif_tx_unlock(dev);
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
+ return 1;
+
+ return 0;
+}
+
+static int vnet_nack(struct vnet_port *port, void *msgbuf)
+{
+ /* XXX just reset or similar XXX */
+ return 0;
+}
+
+static int handle_mcast(struct vnet_port *port, void *msgbuf)
+{
+ struct vio_net_mcast_info *pkt = msgbuf;
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
+
+ if (pkt->tag.stype != VIO_SUBTYPE_ACK)
+ pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
+ dev->name,
+ pkt->tag.type,
+ pkt->tag.stype,
+ pkt->tag.stype_env,
+ pkt->tag.sid);
+
+ return 0;
+}
+
+/* Got back a STOPPED LDC message on port. If the queue is stopped,
+ * wake it up so that we'll send out another START message at the
+ * next TX.
+ */
+static void maybe_tx_wakeup(struct vnet_port *port)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index);
+ __netif_tx_lock(txq, smp_processor_id());
+ if (likely(netif_tx_queue_stopped(txq))) {
+ struct vio_dring_state *dr;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ netif_tx_wake_queue(txq);
+ }
+ __netif_tx_unlock(txq);
+}
+
+bool sunvnet_port_is_up_common(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
+
+static int vnet_event_napi(struct vnet_port *port, int budget)
+{
+ struct vio_driver_state *vio = &port->vio;
+ int tx_wakeup, err;
+ int npkts = 0;
+ int event = (port->rx_event & LDC_EVENT_RESET);
+
+ldc_ctrl:
+ if (unlikely(event == LDC_EVENT_RESET ||
+ event == LDC_EVENT_UP)) {
+ vio_link_state_change(vio, event);
+
+ if (event == LDC_EVENT_RESET) {
+ vnet_port_reset(port);
+ vio_port_up(vio);
+ }
+ port->rx_event = 0;
+ return 0;
+ }
+ /* We may have multiple LDC events in rx_event. Unroll send_events() */
+ event = (port->rx_event & LDC_EVENT_UP);
+ port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
+ if (event == LDC_EVENT_UP)
+ goto ldc_ctrl;
+ event = port->rx_event;
+ if (!(event & LDC_EVENT_DATA_READY))
+ return 0;
+
+ /* we dont expect any other bits than RESET, UP, DATA_READY */
+ BUG_ON(event != LDC_EVENT_DATA_READY);
+
+ err = 0;
+ tx_wakeup = 0;
+ while (1) {
+ union {
+ struct vio_msg_tag tag;
+ u64 raw[8];
+ } msgbuf;
+
+ if (port->napi_resume) {
+ struct vio_dring_data *pkt =
+ (struct vio_dring_data *)&msgbuf;
+ struct vio_dring_state *dr =
+ &port->vio.drings[VIO_DRIVER_RX_RING];
+
+ pkt->tag.type = VIO_TYPE_DATA;
+ pkt->tag.stype = VIO_SUBTYPE_INFO;
+ pkt->tag.stype_env = VIO_DRING_DATA;
+ pkt->seq = dr->rcv_nxt;
+ pkt->start_idx = vio_dring_next(dr,
+ port->napi_stop_idx);
+ pkt->end_idx = -1;
+ goto napi_resume;
+ }
+ err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+ if (unlikely(err < 0)) {
+ if (err == -ECONNRESET)
+ vio_conn_reset(vio);
+ break;
+ }
+ if (err == 0)
+ break;
+ viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+ msgbuf.tag.type,
+ msgbuf.tag.stype,
+ msgbuf.tag.stype_env,
+ msgbuf.tag.sid);
+ err = vio_validate_sid(vio, &msgbuf.tag);
+ if (err < 0)
+ break;
+napi_resume:
+ if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
+ if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
+ if (!sunvnet_port_is_up_common(port)) {
+ /* failures like handshake_failure()
+ * may have cleaned up dring, but
+ * NAPI polling may bring us here.
+ */
+ err = -ECONNRESET;
+ break;
+ }
+ err = vnet_rx(port, &msgbuf, &npkts, budget);
+ if (npkts >= budget)
+ break;
+ if (npkts == 0)
+ break;
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
+ err = vnet_ack(port, &msgbuf);
+ if (err > 0)
+ tx_wakeup |= err;
+ } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
+ err = vnet_nack(port, &msgbuf);
+ }
+ } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
+ if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
+ err = handle_mcast(port, &msgbuf);
+ else
+ err = vio_control_pkt_engine(vio, &msgbuf);
+ if (err)
+ break;
+ } else {
+ err = vnet_handle_unknown(port, &msgbuf);
+ }
+ if (err == -ECONNRESET)
+ break;
+ }
+ if (unlikely(tx_wakeup && err != -ECONNRESET))
+ maybe_tx_wakeup(port);
+ return npkts;
+}
+
+int sunvnet_poll_common(struct napi_struct *napi, int budget)
+{
+ struct vnet_port *port = container_of(napi, struct vnet_port, napi);
+ struct vio_driver_state *vio = &port->vio;
+ int processed = vnet_event_napi(port, budget);
+
+ if (processed < budget) {
+ napi_complete(napi);
+ port->rx_event &= ~LDC_EVENT_DATA_READY;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
+ }
+ return processed;
+}
+EXPORT_SYMBOL_GPL(sunvnet_poll_common);
+
+void sunvnet_event_common(void *arg, int event)
+{
+ struct vnet_port *port = arg;
+ struct vio_driver_state *vio = &port->vio;
+
+ port->rx_event |= event;
+ vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
+ napi_schedule(&port->napi);
+}
+EXPORT_SYMBOL_GPL(sunvnet_event_common);
+
+static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct vio_dring_data hdr = {
+ .tag = {
+ .type = VIO_TYPE_DATA,
+ .stype = VIO_SUBTYPE_INFO,
+ .stype_env = VIO_DRING_DATA,
+ .sid = vio_send_sid(&port->vio),
+ },
+ .dring_ident = dr->ident,
+ .start_idx = start,
+ .end_idx = (u32)-1,
+ };
+ int err, delay;
+ int retries = 0;
+
+ if (port->stop_rx) {
+ trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
+ port->vio._peer_sid,
+ port->stop_rx_idx, -1);
+ err = vnet_send_ack(port,
+ &port->vio.drings[VIO_DRIVER_RX_RING],
+ port->stop_rx_idx, -1,
+ VIO_DRING_STOPPED);
+ if (err <= 0)
+ return err;
+ }
+
+ hdr.seq = dr->snd_nxt;
+ delay = 1;
+ do {
+ err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
+ if (err > 0) {
+ dr->snd_nxt++;
+ break;
+ }
+ udelay(delay);
+ if ((delay <<= 1) > 128)
+ delay = 128;
+ if (retries++ > VNET_MAX_RETRIES)
+ break;
+ } while (err == -EAGAIN);
+ trace_vnet_tx_trigger(port->vio._local_sid,
+ port->vio._peer_sid, start, err);
+
+ return err;
+}
+
+static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
+ unsigned *pending)
+{
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *skb = NULL;
+ int i, txi;
+
+ *pending = 0;
+
+ txi = dr->prod;
+ for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+ struct vio_net_desc *d;
+
+ --txi;
+ if (txi < 0)
+ txi = VNET_TX_RING_SIZE - 1;
+
+ d = vio_dring_entry(dr, txi);
+
+ if (d->hdr.state == VIO_DESC_READY) {
+ (*pending)++;
+ continue;
+ }
+ if (port->tx_bufs[txi].skb) {
+ if (d->hdr.state != VIO_DESC_DONE)
+ pr_notice("invalid ring buffer state %d\n",
+ d->hdr.state);
+ BUG_ON(port->tx_bufs[txi].skb->next);
+
+ port->tx_bufs[txi].skb->next = skb;
+ skb = port->tx_bufs[txi].skb;
+ port->tx_bufs[txi].skb = NULL;
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[txi].cookies,
+ port->tx_bufs[txi].ncookies);
+ } else if (d->hdr.state == VIO_DESC_FREE) {
+ break;
+ }
+ d->hdr.state = VIO_DESC_FREE;
+ }
+ return skb;
+}
+
+static inline void vnet_free_skbs(struct sk_buff *skb)
+{
+ struct sk_buff *next;
+
+ while (skb) {
+ next = skb->next;
+ skb->next = NULL;
+ dev_kfree_skb(skb);
+ skb = next;
+ }
+}
+
+void sunvnet_clean_timer_expire_common(unsigned long port0)
+{
+ struct vnet_port *port = (struct vnet_port *)port0;
+ struct sk_buff *freeskbs;
+ unsigned pending;
+
+ netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
+ freeskbs = vnet_clean_tx_ring(port, &pending);
+ netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
+
+ vnet_free_skbs(freeskbs);
+
+ if (pending)
+ (void)mod_timer(&port->clean_timer,
+ jiffies + VNET_CLEAN_TIMEOUT);
+ else
+ del_timer(&port->clean_timer);
+}
+EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
+
+static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
+ struct ldc_trans_cookie *cookies, int ncookies,
+ unsigned int map_perm)
+{
+ int i, nc, err, blen;
+
+ /* header */
+ blen = skb_headlen(skb);
+ if (blen < ETH_ZLEN)
+ blen = ETH_ZLEN;
+ blen += VNET_PACKET_SKIP;
+ blen += 8 - (blen & 7);
+
+ err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
+ ncookies, map_perm);
+ if (err < 0)
+ return err;
+ nc = err;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+ u8 *vaddr;
+
+ if (nc < ncookies) {
+ vaddr = kmap_atomic(skb_frag_page(f));
+ blen = skb_frag_size(f);
+ blen += 8 - (blen & 7);
+ err = ldc_map_single(lp, vaddr + f->page_offset,
+ blen, cookies + nc, ncookies - nc,
+ map_perm);
+ kunmap_atomic(vaddr);
+ } else {
+ err = -EMSGSIZE;
+ }
+
+ if (err < 0) {
+ ldc_unmap(lp, cookies, nc);
+ return err;
+ }
+ nc += err;
+ }
+ return nc;
+}
+
+static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
+{
+ struct sk_buff *nskb;
+ int i, len, pad, docopy;
+
+ len = skb->len;
+ pad = 0;
+ if (len < ETH_ZLEN) {
+ pad += ETH_ZLEN - skb->len;
+ len += pad;
+ }
+ len += VNET_PACKET_SKIP;
+ pad += 8 - (len & 7);
+
+ /* make sure we have enough cookies and alignment in every frag */
+ docopy = skb_shinfo(skb)->nr_frags >= ncookies;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+
+ docopy |= f->page_offset & 7;
+ }
+ if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
+ skb_tailroom(skb) < pad ||
+ skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
+ int start = 0, offset;
+ __wsum csum;
+
+ len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
+ nskb = alloc_and_align_skb(skb->dev, len);
+ if (!nskb) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ skb_reserve(nskb, VNET_PACKET_SKIP);
+
+ nskb->protocol = skb->protocol;
+ offset = skb_mac_header(skb) - skb->data;
+ skb_set_mac_header(nskb, offset);
+ offset = skb_network_header(skb) - skb->data;
+ skb_set_network_header(nskb, offset);
+ offset = skb_transport_header(skb) - skb->data;
+ skb_set_transport_header(nskb, offset);
+
+ offset = 0;
+ nskb->csum_offset = skb->csum_offset;
+ nskb->ip_summed = skb->ip_summed;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ start = skb_checksum_start_offset(skb);
+ if (start) {
+ struct iphdr *iph = ip_hdr(nskb);
+ int offset = start + nskb->csum_offset;
+
+ if (skb_copy_bits(skb, 0, nskb->data, start)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ *(__sum16 *)(skb->data + offset) = 0;
+ csum = skb_copy_and_csum_bits(skb, start,
+ nskb->data + start,
+ skb->len - start, 0);
+ if (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) {
+ csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - start,
+ iph->protocol, csum);
+ }
+ *(__sum16 *)(nskb->data + offset) = csum;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+ } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
+ dev_kfree_skb(nskb);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ (void)skb_put(nskb, skb->len);
+ if (skb_is_gso(skb)) {
+ skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
+ skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
+ }
+ nskb->queue_mapping = skb->queue_mapping;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+ return skb;
+}
+
+static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
+{
+ struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
+ struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ struct sk_buff *segs;
+ int maclen, datalen;
+ int status;
+ int gso_size, gso_type, gso_segs;
+ int hlen = skb_transport_header(skb) - skb_mac_header(skb);
+ int proto = IPPROTO_IP;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = ip_hdr(skb)->protocol;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = ipv6_hdr(skb)->nexthdr;
+
+ if (proto == IPPROTO_TCP) {
+ hlen += tcp_hdr(skb)->doff * 4;
+ } else if (proto == IPPROTO_UDP) {
+ hlen += sizeof(struct udphdr);
+ } else {
+ pr_err("vnet_handle_offloads GSO with unknown transport "
+ "protocol %d tproto %d\n", skb->protocol, proto);
+ hlen = 128; /* XXX */
+ }
+ datalen = port->tsolen - hlen;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ gso_type = skb_shinfo(skb)->gso_type;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (port->tso && gso_size < datalen)
+ gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
+
+ if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, port->q_index);
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
+ return NETDEV_TX_BUSY;
+ netif_tx_wake_queue(txq);
+ }
+
+ maclen = skb_network_header(skb) - skb_mac_header(skb);
+ skb_pull(skb, maclen);
+
+ if (port->tso && gso_size < datalen) {
+ if (skb_unclone(skb, GFP_ATOMIC))
+ goto out_dropped;
+
+ /* segment to TSO size */
+ skb_shinfo(skb)->gso_size = datalen;
+ skb_shinfo(skb)->gso_segs = gso_segs;
+ }
+ segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
+ if (IS_ERR(segs))
+ goto out_dropped;
+
+ skb_push(skb, maclen);
+ skb_reset_mac_header(skb);
+
+ status = 0;
+ while (segs) {
+ struct sk_buff *curr = segs;
+
+ segs = segs->next;
+ curr->next = NULL;
+ if (port->tso && curr->len > dev->mtu) {
+ skb_shinfo(curr)->gso_size = gso_size;
+ skb_shinfo(curr)->gso_type = gso_type;
+ skb_shinfo(curr)->gso_segs =
+ DIV_ROUND_UP(curr->len - hlen, gso_size);
+ } else {
+ skb_shinfo(curr)->gso_size = 0;
+ }
+
+ skb_push(curr, maclen);
+ skb_reset_mac_header(curr);
+ memcpy(skb_mac_header(curr), skb_mac_header(skb),
+ maclen);
+ curr->csum_start = skb_transport_header(curr) - curr->head;
+ if (ip_hdr(curr)->protocol == IPPROTO_TCP)
+ curr->csum_offset = offsetof(struct tcphdr, check);
+ else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
+ curr->csum_offset = offsetof(struct udphdr, check);
+
+ if (!(status & NETDEV_TX_MASK))
+ status = sunvnet_start_xmit_common(curr, dev,
+ vnet_tx_port);
+ if (status & NETDEV_TX_MASK)
+ dev_kfree_skb_any(curr);
+ }
+
+ if (!(status & NETDEV_TX_MASK))
+ dev_kfree_skb_any(skb);
+ return status;
+out_dropped:
+ dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *))
+{
+ struct vnet_port *port = NULL;
+ struct vio_dring_state *dr;
+ struct vio_net_desc *d;
+ unsigned int len;
+ struct sk_buff *freeskbs = NULL;
+ int i, err, txi;
+ unsigned pending = 0;
+ struct netdev_queue *txq;
+
+ rcu_read_lock();
+ port = vnet_tx_port(skb, dev);
+ if (unlikely(!port)) {
+ rcu_read_unlock();
+ goto out_dropped;
+ }
+
+ if (skb_is_gso(skb) && skb->len > port->tsolen) {
+ err = vnet_handle_offloads(port, skb, vnet_tx_port);
+ rcu_read_unlock();
+ return err;
+ }
+
+ if (!skb_is_gso(skb) && skb->len > port->rmtu) {
+ unsigned long localmtu = port->rmtu - ETH_HLEN;
+
+ if (vio_version_after_eq(&port->vio, 1, 3))
+ localmtu -= VLAN_HLEN;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct flowi4 fl4;
+ struct rtable *rt = NULL;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = dev->ifindex;
+ fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.daddr = ip_hdr(skb)->daddr;
+ fl4.saddr = ip_hdr(skb)->saddr;
+
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ rcu_read_unlock();
+ if (!IS_ERR(rt)) {
+ skb_dst_set(skb, &rt->dst);
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_FRAG_NEEDED,
+ htonl(localmtu));
+ }
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+#endif
+ goto out_dropped;
+ }
+
+ skb = vnet_skb_shape(skb, 2);
+
+ if (unlikely(!skb))
+ goto out_dropped;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vnet_fullcsum(skb);
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+ i = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, i);
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+
+ /* This is a hard error, log it. */
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+ dev->stats.tx_errors++;
+ }
+ rcu_read_unlock();
+ return NETDEV_TX_BUSY;
+ }
+
+ d = vio_dring_cur(dr);
+
+ txi = dr->prod;
+
+ freeskbs = vnet_clean_tx_ring(port, &pending);
+
+ BUG_ON(port->tx_bufs[txi].skb);
+
+ len = skb->len;
+ if (len < ETH_ZLEN)
+ len = ETH_ZLEN;
+
+ err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
+ (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
+ if (err < 0) {
+ netdev_info(dev, "tx buffer map error %d\n", err);
+ goto out_dropped;
+ }
+
+ port->tx_bufs[txi].skb = skb;
+ skb = NULL;
+ port->tx_bufs[txi].ncookies = err;
+
+ /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
+ * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
+ * the protocol itself does not require it as long as the peer
+ * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
+ *
+ * An ACK for every packet in the ring is expensive as the
+ * sending of LDC messages is slow and affects performance.
+ */
+ d->hdr.ack = VIO_ACK_DISABLE;
+ d->size = len;
+ d->ncookies = port->tx_bufs[txi].ncookies;
+ for (i = 0; i < d->ncookies; i++)
+ d->cookies[i] = port->tx_bufs[txi].cookies[i];
+ if (vio_version_after_eq(&port->vio, 1, 7)) {
+ struct vio_net_dext *dext = vio_net_ext(d);
+
+ memset(dext, 0, sizeof(*dext));
+ if (skb_is_gso(port->tx_bufs[txi].skb)) {
+ dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
+ ->gso_size;
+ dext->flags |= VNET_PKT_IPV4_LSO;
+ }
+ if (vio_version_after_eq(&port->vio, 1, 8) &&
+ !port->switch_port) {
+ dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
+ dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
+ }
+ }
+
+ /* This has to be a non-SMP write barrier because we are writing
+ * to memory which is shared with the peer LDOM.
+ */
+ dma_wmb();
+
+ d->hdr.state = VIO_DESC_READY;
+
+ /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
+ * to notify the consumer that some descriptors are READY.
+ * After that "start" trigger, no additional triggers are needed until
+ * a DRING_STOPPED is received from the consumer. The dr->cons field
+ * (set up by vnet_ack()) has the value of the next dring index
+ * that has not yet been ack-ed. We send a "start" trigger here
+ * if, and only if, start_cons is true (reset it afterward). Conversely,
+ * vnet_ack() should check if the dring corresponding to cons
+ * is marked READY, but start_cons was false.
+ * If so, vnet_ack() should send out the missed "start" trigger.
+ *
+ * Note that the dma_wmb() above makes sure the cookies et al. are
+ * not globally visible before the VIO_DESC_READY, and that the
+ * stores are ordered correctly by the compiler. The consumer will
+ * not proceed until the VIO_DESC_READY is visible assuring that
+ * the consumer does not observe anything related to descriptors
+ * out of order. The HV trap from the LDC start trigger is the
+ * producer to consumer announcement that work is available to the
+ * consumer
+ */
+ if (!port->start_cons) { /* previous trigger suffices */
+ trace_vnet_skip_tx_trigger(port->vio._local_sid,
+ port->vio._peer_sid, dr->cons);
+ goto ldc_start_done;
+ }
+
+ err = __vnet_tx_trigger(port, dr->cons);
+ if (unlikely(err < 0)) {
+ netdev_info(dev, "TX trigger error %d\n", err);
+ d->hdr.state = VIO_DESC_FREE;
+ skb = port->tx_bufs[txi].skb;
+ port->tx_bufs[txi].skb = NULL;
+ dev->stats.tx_carrier_errors++;
+ goto out_dropped;
+ }
+
+ldc_start_done:
+ port->start_cons = false;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
+
+ dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
+ if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
+ netif_tx_stop_queue(txq);
+ if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
+ netif_tx_wake_queue(txq);
+ }
+
+ (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
+ rcu_read_unlock();
+
+ vnet_free_skbs(freeskbs);
+
+ return NETDEV_TX_OK;
+
+out_dropped:
+ if (pending)
+ (void)mod_timer(&port->clean_timer,
+ jiffies + VNET_CLEAN_TIMEOUT);
+ else if (port)
+ del_timer(&port->clean_timer);
+ if (port)
+ rcu_read_unlock();
+ if (skb)
+ dev_kfree_skb(skb);
+ vnet_free_skbs(freeskbs);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
+
+void sunvnet_tx_timeout_common(struct net_device *dev)
+{
+ /* XXX Implement me XXX */
+}
+EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
+
+int sunvnet_open_common(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sunvnet_open_common);
+
+int sunvnet_close_common(struct net_device *dev)
+{
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sunvnet_close_common);
+
+static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
+{
+ struct vnet_mcast_entry *m;
+
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (ether_addr_equal(m->addr, addr))
+ return m;
+ }
+ return NULL;
+}
+
+static void __update_mc_list(struct vnet *vp, struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ struct vnet_mcast_entry *m;
+
+ m = __vnet_mc_find(vp, ha->addr);
+ if (m) {
+ m->hit = 1;
+ continue;
+ }
+
+ if (!m) {
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ continue;
+ memcpy(m->addr, ha->addr, ETH_ALEN);
+ m->hit = 1;
+
+ m->next = vp->mcast_list;
+ vp->mcast_list = m;
+ }
+ }
+}
+
+static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
+{
+ struct vio_net_mcast_info info;
+ struct vnet_mcast_entry *m, **pp;
+ int n_addrs;
+
+ memset(&info, 0, sizeof(info));
+
+ info.tag.type = VIO_TYPE_CTRL;
+ info.tag.stype = VIO_SUBTYPE_INFO;
+ info.tag.stype_env = VNET_MCAST_INFO;
+ info.tag.sid = vio_send_sid(&port->vio);
+ info.set = 1;
+
+ n_addrs = 0;
+ for (m = vp->mcast_list; m; m = m->next) {
+ if (m->sent)
+ continue;
+ m->sent = 1;
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+
+ (void)vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void)vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+
+ info.set = 0;
+
+ n_addrs = 0;
+ pp = &vp->mcast_list;
+ while ((m = *pp) != NULL) {
+ if (m->hit) {
+ m->hit = 0;
+ pp = &m->next;
+ continue;
+ }
+
+ memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
+ m->addr, ETH_ALEN);
+ if (++n_addrs == VNET_NUM_MCAST) {
+ info.count = n_addrs;
+ (void)vio_ldc_send(&port->vio, &info,
+ sizeof(info));
+ n_addrs = 0;
+ }
+
+ *pp = m->next;
+ kfree(m);
+ }
+ if (n_addrs) {
+ info.count = n_addrs;
+ (void)vio_ldc_send(&port->vio, &info, sizeof(info));
+ }
+}
+
+void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
+{
+ struct vnet_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &vp->port_list, list) {
+ if (port->switch_port) {
+ __update_mc_list(vp, dev);
+ __send_mc_list(vp, port);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
+
+int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > 65535)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
+
+int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
+
+void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ int i;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ if (!dr->base)
+ return;
+
+ for (i = 0; i < VNET_TX_RING_SIZE; i++) {
+ struct vio_net_desc *d;
+ void *skb = port->tx_bufs[i].skb;
+
+ if (!skb)
+ continue;
+
+ d = vio_dring_entry(dr, i);
+
+ ldc_unmap(port->vio.lp,
+ port->tx_bufs[i].cookies,
+ port->tx_bufs[i].ncookies);
+ dev_kfree_skb(skb);
+ port->tx_bufs[i].skb = NULL;
+ d->hdr.state = VIO_DESC_FREE;
+ }
+ ldc_free_exp_dring(port->vio.lp, dr->base,
+ (dr->entry_size * dr->num_entries),
+ dr->cookies, dr->ncookies);
+ dr->base = NULL;
+ dr->entry_size = 0;
+ dr->num_entries = 0;
+ dr->pending = 0;
+ dr->ncookies = 0;
+}
+EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
+
+static void vnet_port_reset(struct vnet_port *port)
+{
+ del_timer(&port->clean_timer);
+ sunvnet_port_free_tx_bufs_common(port);
+ port->rmtu = 0;
+ port->tso = true;
+ port->tsolen = 0;
+}
+
+static int vnet_port_alloc_tx_ring(struct vnet_port *port)
+{
+ struct vio_dring_state *dr;
+ unsigned long len, elen;
+ int i, err, ncookies;
+ void *dring;
+
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+
+ elen = sizeof(struct vio_net_desc) +
+ sizeof(struct ldc_trans_cookie) * 2;
+ if (vio_version_after_eq(&port->vio, 1, 7))
+ elen += sizeof(struct vio_net_dext);
+ len = VNET_TX_RING_SIZE * elen;
+
+ ncookies = VIO_MAX_RING_COOKIES;
+ dring = ldc_alloc_exp_dring(port->vio.lp, len,
+ dr->cookies, &ncookies,
+ (LDC_MAP_SHADOW |
+ LDC_MAP_DIRECT |
+ LDC_MAP_RW));
+ if (IS_ERR(dring)) {
+ err = PTR_ERR(dring);
+ goto err_out;
+ }
+
+ dr->base = dring;
+ dr->entry_size = elen;
+ dr->num_entries = VNET_TX_RING_SIZE;
+ dr->prod = 0;
+ dr->cons = 0;
+ port->start_cons = true; /* need an initial trigger */
+ dr->pending = VNET_TX_RING_SIZE;
+ dr->ncookies = ncookies;
+
+ for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
+ struct vio_net_desc *d;
+
+ d = vio_dring_entry(dr, i);
+ d->hdr.state = VIO_DESC_FREE;
+ }
+ return 0;
+
+err_out:
+ sunvnet_port_free_tx_bufs_common(port);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
+{
+ struct vnet_port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vp->lock, flags);
+ if (!list_empty(&vp->port_list)) {
+ port = list_entry(vp->port_list.next, struct vnet_port, list);
+ napi_schedule(&port->napi);
+ }
+ spin_unlock_irqrestore(&vp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
+#endif
+
+void sunvnet_port_add_txq_common(struct vnet_port *port)
+{
+ struct vnet *vp = port->vp;
+ int n;
+
+ n = vp->nports++;
+ n = n & (VNET_MAX_TXQS - 1);
+ port->q_index = n;
+ netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index));
+}
+EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
+
+void sunvnet_port_rm_txq_common(struct vnet_port *port)
+{
+ port->vp->nports--;
+ netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
+ port->q_index));
+}
+EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);
diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h
new file mode 100644
index 000000000000..bd36528af972
--- /dev/null
+++ b/drivers/net/ethernet/sun/sunvnet_common.h
@@ -0,0 +1,145 @@
+#ifndef _SUNVNETCOMMON_H
+#define _SUNVNETCOMMON_H
+
+#include <linux/interrupt.h>
+
+/* length of time (or less) we expect pending descriptors to be marked
+ * as VIO_DESC_DONE and skbs ready to be freed
+ */
+#define VNET_CLEAN_TIMEOUT ((HZ / 100) + 1)
+
+#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN)
+#define VNET_TX_RING_SIZE 512
+#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
+
+#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
+#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
+
+/* VNET packets are sent in buffers with the first 6 bytes skipped
+ * so that after the ethernet header the IPv4/IPv6 headers are aligned
+ * properly.
+ */
+#define VNET_PACKET_SKIP 6
+
+#define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1)
+
+#define VNET_MAX_TXQS 16
+
+struct vnet_tx_entry {
+ struct sk_buff *skb;
+ unsigned int ncookies;
+ struct ldc_trans_cookie cookies[VNET_MAXCOOKIES];
+};
+
+struct vnet;
+
+/* Structure to describe a vnet-port or vsw-port in the MD.
+ * If the vsw bit is set, this structure represents a vswitch
+ * port, and the net_device can be found from ->dev. If the
+ * vsw bit is not set, the net_device is available from ->vp->dev.
+ * See the VNET_PORT_TO_NET_DEVICE macro below.
+ */
+struct vnet_port {
+ struct vio_driver_state vio;
+
+ struct hlist_node hash;
+ u8 raddr[ETH_ALEN];
+ unsigned switch_port:1;
+ unsigned tso:1;
+ unsigned vsw:1;
+ unsigned __pad:13;
+
+ struct vnet *vp;
+ struct net_device *dev;
+
+ struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE];
+
+ struct list_head list;
+
+ u32 stop_rx_idx;
+ bool stop_rx;
+ bool start_cons;
+
+ struct timer_list clean_timer;
+
+ u64 rmtu;
+ u16 tsolen;
+
+ struct napi_struct napi;
+ u32 napi_stop_idx;
+ bool napi_resume;
+ int rx_event;
+ u16 q_index;
+};
+
+static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio)
+{
+ return container_of(vio, struct vnet_port, vio);
+}
+
+#define VNET_PORT_HASH_SIZE 16
+#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1)
+
+static inline unsigned int vnet_hashfn(u8 *mac)
+{
+ unsigned int val = mac[4] ^ mac[5];
+
+ return val & (VNET_PORT_HASH_MASK);
+}
+
+struct vnet_mcast_entry {
+ u8 addr[ETH_ALEN];
+ u8 sent;
+ u8 hit;
+ struct vnet_mcast_entry *next;
+};
+
+struct vnet {
+ /* Protects port_list and port_hash. */
+ spinlock_t lock;
+
+ struct net_device *dev;
+
+ u32 msg_enable;
+
+ struct list_head port_list;
+
+ struct hlist_head port_hash[VNET_PORT_HASH_SIZE];
+
+ struct vnet_mcast_entry *mcast_list;
+
+ struct list_head list;
+ u64 local_mac;
+
+ int nports;
+};
+
+/* Def used by common code to get the net_device from the proper location */
+#define VNET_PORT_TO_NET_DEVICE(__port) \
+ ((__port)->vsw ? (__port)->dev : (__port)->vp->dev)
+
+/* Common funcs */
+void sunvnet_clean_timer_expire_common(unsigned long port0);
+int sunvnet_open_common(struct net_device *dev);
+int sunvnet_close_common(struct net_device *dev);
+void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp);
+int sunvnet_set_mac_addr_common(struct net_device *dev, void *p);
+void sunvnet_tx_timeout_common(struct net_device *dev);
+int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu);
+int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ struct vnet_port *(*vnet_tx_port)
+ (struct sk_buff *, struct net_device *));
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp);
+#endif
+void sunvnet_event_common(void *arg, int event);
+int sunvnet_send_attr_common(struct vio_driver_state *vio);
+int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg);
+void sunvnet_handshake_complete_common(struct vio_driver_state *vio);
+int sunvnet_poll_common(struct napi_struct *napi, int budget);
+void sunvnet_port_free_tx_bufs_common(struct vnet_port *port);
+bool sunvnet_port_is_up_common(struct vnet_port *vnet);
+void sunvnet_port_add_txq_common(struct vnet_port *port);
+void sunvnet_port_rm_txq_common(struct vnet_port *port);
+
+#endif /* _SUNVNETCOMMON_H */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
+ pm_runtime_get_sync(&priv->pdev->dev);
+
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- pm_runtime_get_sync(&priv->pdev->dev);
-
reg = priv->version;
dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
- pdev->dev.platform_data = pdata;
-
return pdata;
}
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 029841f98c32..1d0942c53120 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1852,22 +1852,26 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
+static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
{
int i;
/* setup tc must be called under rtnl lock */
ASSERT_RTNL();
+ if (tc->type != TC_SETUP_MQPRIO)
+ return -EINVAL;
+
/* Sanity-check the number of traffic classes requested */
if ((dev->real_num_tx_queues <= 1) ||
- (dev->real_num_tx_queues < num_tc))
+ (dev->real_num_tx_queues < tc->tc))
return -EINVAL;
/* Configure traffic class to queue mappings */
- if (num_tc) {
- netdev_set_num_tc(dev, num_tc);
- for (i = 0; i < num_tc; i++)
+ if (tc->tc) {
+ netdev_set_num_tc(dev, tc->tc);
+ for (i = 0; i < tc->tc; i++)
netdev_set_tc_queue(dev, i, 1, i);
} else {
netdev_reset_tc(dev);
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3c54a2cae5df..67610270d171 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -48,7 +48,6 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
-#include <asm/pci-bridge.h>
#include <net/checksum.h>
#include "spider_net.h"
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0bf7edd99573..bc168894bda3 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -68,14 +68,16 @@ struct geneve_dev {
u8 tos; /* TOS override */
union geneve_addr remote; /* IP address for link partner */
struct list_head next; /* geneve's per namespace list */
+ __be32 label; /* IPv6 flowlabel override */
__be16 dst_port;
bool collect_md;
struct gro_cells gro_cells;
u32 flags;
+ struct dst_cache dst_cache;
};
/* Geneve device flags */
-#define GENEVE_F_UDP_CSUM BIT(0)
+#define GENEVE_F_UDP_ZERO_CSUM_TX BIT(0)
#define GENEVE_F_UDP_ZERO_CSUM6_TX BIT(1)
#define GENEVE_F_UDP_ZERO_CSUM6_RX BIT(2)
@@ -109,6 +111,11 @@ static __be64 vni_to_tunnel_id(const __u8 *vni)
#endif
}
+static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
+{
+ return gs->sock->sk->sk_family;
+}
+
static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
__be32 addr, u8 vni[])
{
@@ -152,58 +159,60 @@ static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
return (struct genevehdr *)(udp_hdr(skb) + 1);
}
-/* geneve receive/decap routine */
-static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
+static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs,
+ struct sk_buff *skb)
{
- struct genevehdr *gnvh = geneve_hdr(skb);
- struct metadata_dst *tun_dst = NULL;
- struct geneve_dev *geneve = NULL;
- struct pcpu_sw_netstats *stats;
- struct iphdr *iph = NULL;
+ u8 *vni;
__be32 addr;
static u8 zero_vni[3];
- u8 *vni;
- int err = 0;
- sa_family_t sa_family;
#if IS_ENABLED(CONFIG_IPV6)
- struct ipv6hdr *ip6h = NULL;
- struct in6_addr addr6;
static struct in6_addr zero_addr6;
#endif
- sa_family = gs->sock->sk->sk_family;
+ if (geneve_get_sk_family(gs) == AF_INET) {
+ struct iphdr *iph;
- if (sa_family == AF_INET) {
iph = ip_hdr(skb); /* outer IP header... */
if (gs->collect_md) {
vni = zero_vni;
addr = 0;
} else {
- vni = gnvh->vni;
-
+ vni = geneve_hdr(skb)->vni;
addr = iph->saddr;
}
- geneve = geneve_lookup(gs, addr, vni);
+ return geneve_lookup(gs, addr, vni);
#if IS_ENABLED(CONFIG_IPV6)
- } else if (sa_family == AF_INET6) {
+ } else if (geneve_get_sk_family(gs) == AF_INET6) {
+ struct ipv6hdr *ip6h;
+ struct in6_addr addr6;
+
ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
if (gs->collect_md) {
vni = zero_vni;
addr6 = zero_addr6;
} else {
- vni = gnvh->vni;
-
+ vni = geneve_hdr(skb)->vni;
addr6 = ip6h->saddr;
}
- geneve = geneve6_lookup(gs, addr6, vni);
+ return geneve6_lookup(gs, addr6, vni);
#endif
}
- if (!geneve)
- goto drop;
+ return NULL;
+}
+
+/* geneve receive/decap routine */
+static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
+ struct sk_buff *skb)
+{
+ struct genevehdr *gnvh = geneve_hdr(skb);
+ struct metadata_dst *tun_dst = NULL;
+ struct pcpu_sw_netstats *stats;
+ int err = 0;
+ void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
__be16 flags;
@@ -212,7 +221,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
(gnvh->oam ? TUNNEL_OAM : 0) |
(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
- tun_dst = udp_tun_rx_dst(skb, sa_family, flags,
+ tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
gnvh->opt_len * 4);
if (!tun_dst)
@@ -229,7 +238,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
}
skb_reset_mac_header(skb);
- skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
skb->protocol = eth_type_trans(skb, geneve->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -240,25 +248,27 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
goto drop;
+ oiph = skb_network_header(skb);
skb_reset_network_header(skb);
- if (iph)
- err = IP_ECN_decapsulate(iph, skb);
+ if (geneve_get_sk_family(gs) == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
- if (ip6h)
- err = IP6_ECN_decapsulate(ip6h, skb);
+ else
+ err = IP6_ECN_decapsulate(oiph, skb);
#endif
if (unlikely(err)) {
if (log_ecn_error) {
- if (iph)
+ if (geneve_get_sk_family(gs) == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
- &iph->saddr, iph->tos);
+ &((struct iphdr *)oiph)->saddr,
+ ((struct iphdr *)oiph)->tos);
#if IS_ENABLED(CONFIG_IPV6)
- if (ip6h)
+ else
net_info_ratelimited("non-ECT from %pI6\n",
- &ip6h->saddr);
+ &((struct ipv6hdr *)oiph)->saddr);
#endif
}
if (err > 1) {
@@ -297,6 +307,13 @@ static int geneve_init(struct net_device *dev)
return err;
}
+ err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL);
+ if (err) {
+ free_percpu(dev->tstats);
+ gro_cells_destroy(&geneve->gro_cells);
+ return err;
+ }
+
return 0;
}
@@ -304,6 +321,7 @@ static void geneve_uninit(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
+ dst_cache_destroy(&geneve->dst_cache);
gro_cells_destroy(&geneve->gro_cells);
free_percpu(dev->tstats);
}
@@ -312,6 +330,7 @@ static void geneve_uninit(struct net_device *dev)
static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct genevehdr *geneveh;
+ struct geneve_dev *geneve;
struct geneve_sock *gs;
int opts_len;
@@ -327,16 +346,21 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
goto error;
- opts_len = geneveh->opt_len * 4;
- if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
- htons(ETH_P_TEB)))
- goto drop;
-
gs = rcu_dereference_sk_user_data(sk);
if (!gs)
goto drop;
- geneve_rx(gs, skb);
+ geneve = geneve_lookup_skb(gs, skb);
+ if (!geneve)
+ goto drop;
+
+ opts_len = geneveh->opt_len * 4;
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+ htons(ETH_P_TEB),
+ !net_eq(geneve->net, dev_net(geneve->dev))))
+ goto drop;
+
+ geneve_rx(geneve, gs, skb);
return 0;
drop:
@@ -383,7 +407,7 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
struct net_device *dev;
struct sock *sk = gs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = geneve_get_sk_family(gs);
__be16 port = inet_sk(sk)->inet_sport;
int err;
@@ -439,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
goto out;
}
- flush = 0;
-
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -457,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
rcu_read_lock();
ptype = gro_find_receive_by_type(type);
- if (!ptype) {
- flush = 1;
+ if (!ptype)
goto out_unlock;
- }
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
pp = ptype->callbacks.gro_receive(head, skb);
+ flush = 0;
out_unlock:
rcu_read_unlock();
@@ -544,7 +565,7 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
struct net_device *dev;
struct sock *sk = gs->sock->sk;
struct net *net = sock_net(sk);
- sa_family_t sa_family = sk->sk_family;
+ sa_family_t sa_family = geneve_get_sk_family(gs);
__be16 port = inet_sk(sk)->inet_sport;
rcu_read_lock();
@@ -587,7 +608,7 @@ static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
list_for_each_entry(gs, &gn->sock_list, list) {
if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
- inet_sk(gs->sock->sk)->sk.sk_family == family) {
+ geneve_get_sk_family(gs) == family) {
return gs;
}
}
@@ -680,7 +701,7 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
struct genevehdr *gnvh;
int min_headroom;
int err;
- bool udp_sum = !!(flags & GENEVE_F_UDP_CSUM);
+ bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM_TX);
skb_scrub_packet(skb, xnet);
@@ -752,7 +773,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct flowi4 *fl4,
struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
+ struct dst_cache *dst_cache;
struct rtable *rt = NULL;
__u8 tos;
@@ -764,16 +787,25 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->daddr = info->key.u.ipv4.dst;
fl4->saddr = info->key.u.ipv4.src;
fl4->flowi4_tos = RT_TOS(info->key.tos);
+ dst_cache = &info->dst_cache;
} else {
tos = geneve->tos;
if (tos == 1) {
const struct iphdr *iip = ip_hdr(skb);
tos = ip_tunnel_get_dsfield(iip, skb);
+ use_cache = false;
}
fl4->flowi4_tos = RT_TOS(tos);
fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
+ dst_cache = &geneve->dst_cache;
+ }
+
+ if (use_cache) {
+ rt = dst_cache_get_ip4(dst_cache, &fl4->saddr);
+ if (rt)
+ return rt;
}
rt = ip_route_output_key(geneve->net, fl4);
@@ -786,6 +818,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
ip_rt_put(rt);
return ERR_PTR(-ELOOP);
}
+ if (use_cache)
+ dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr);
return rt;
}
@@ -795,9 +829,11 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct flowi6 *fl6,
struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
+ struct dst_cache *dst_cache;
__u8 prio;
memset(fl6, 0, sizeof(*fl6));
@@ -807,17 +843,28 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
if (info) {
fl6->daddr = info->key.u.ipv6.dst;
fl6->saddr = info->key.u.ipv6.src;
- fl6->flowi6_tos = RT_TOS(info->key.tos);
+ fl6->flowlabel = ip6_make_flowinfo(RT_TOS(info->key.tos),
+ info->key.label);
+ dst_cache = &info->dst_cache;
} else {
prio = geneve->tos;
if (prio == 1) {
const struct iphdr *iip = ip_hdr(skb);
prio = ip_tunnel_get_dsfield(iip, skb);
+ use_cache = false;
}
- fl6->flowi6_tos = RT_TOS(prio);
+ fl6->flowlabel = ip6_make_flowinfo(RT_TOS(prio),
+ geneve->label);
fl6->daddr = geneve->remote.sin6.sin6_addr;
+ dst_cache = &geneve->dst_cache;
+ }
+
+ if (use_cache) {
+ dst = dst_cache_get_ip6(dst_cache, &fl6->saddr);
+ if (dst)
+ return dst;
}
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
@@ -830,6 +877,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
return ERR_PTR(-ELOOP);
}
+ if (use_cache)
+ dst_cache_set_ip6(dst_cache, dst, &fl6->saddr);
return dst;
}
#endif
@@ -889,13 +938,13 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni);
- if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ if (info->options_len)
opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM)
- flags |= GENEVE_F_UDP_CSUM;
+ flags &= ~GENEVE_F_UDP_ZERO_CSUM_TX;
else
- flags &= ~GENEVE_F_UDP_CSUM;
+ flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet);
@@ -921,7 +970,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
tos, ttl, df, sport, geneve->dst_port,
!net_eq(geneve->net, dev_net(geneve->dev)),
- !(flags & GENEVE_F_UDP_CSUM));
+ !!(flags & GENEVE_F_UDP_ZERO_CSUM_TX));
return NETDEV_TX_OK;
@@ -949,6 +998,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct flowi6 fl6;
__u8 prio, ttl;
__be16 sport;
+ __be32 label;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
@@ -976,7 +1026,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
u8 vni[3];
tunnel_id_to_vni(key->tun_id, vni);
- if (key->tun_flags & TUNNEL_GENEVE_OPT)
+ if (info->options_len)
opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM)
@@ -992,20 +1042,24 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
+ label = info->key.label;
} else {
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
goto err;
- prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
+ prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
+ iip, skb);
ttl = geneve->ttl;
if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
ttl = 1;
ttl = ttl ? : ip6_dst_hoplimit(dst);
+ label = geneve->label;
}
+
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
- &fl6.saddr, &fl6.daddr, prio, ttl,
+ &fl6.saddr, &fl6.daddr, prio, ttl, label,
sport, geneve->dst_port,
!!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
return NETDEV_TX_OK;
@@ -1189,6 +1243,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) },
[IFLA_GENEVE_TTL] = { .type = NLA_U8 },
[IFLA_GENEVE_TOS] = { .type = NLA_U8 },
+ [IFLA_GENEVE_LABEL] = { .type = NLA_U32 },
[IFLA_GENEVE_PORT] = { .type = NLA_U16 },
[IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG },
[IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 },
@@ -1246,8 +1301,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
static int geneve_configure(struct net *net, struct net_device *dev,
union geneve_addr *remote,
- __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port,
- bool metadata, u32 flags)
+ __u32 vni, __u8 ttl, __u8 tos, __be32 label,
+ __be16 dst_port, bool metadata, u32 flags)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1257,7 +1312,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
if (!remote)
return -EINVAL;
if (metadata &&
- (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl))
+ (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl || label))
return -EINVAL;
geneve->net = net;
@@ -1272,10 +1327,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
(remote->sa.sa_family == AF_INET6 &&
ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
return -EINVAL;
+ if (label && remote->sa.sa_family != AF_INET6)
+ return -EINVAL;
+
geneve->remote = *remote;
geneve->ttl = ttl;
geneve->tos = tos;
+ geneve->label = label;
geneve->dst_port = dst_port;
geneve->collect_md = metadata;
geneve->flags = flags;
@@ -1301,6 +1360,8 @@ static int geneve_configure(struct net *net, struct net_device *dev,
return -EPERM;
}
+ dst_cache_reset(&geneve->dst_cache);
+
err = register_netdevice(dev);
if (err)
return err;
@@ -1316,6 +1377,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
__u8 ttl = 0, tos = 0;
bool metadata = false;
union geneve_addr remote = geneve_remote_unspec;
+ __be32 label = 0;
__u32 vni = 0;
u32 flags = 0;
@@ -1352,6 +1414,10 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (data[IFLA_GENEVE_TOS])
tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ if (data[IFLA_GENEVE_LABEL])
+ label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
+ IPV6_FLOWLABEL_MASK;
+
if (data[IFLA_GENEVE_PORT])
dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
@@ -1359,8 +1425,8 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
metadata = true;
if (data[IFLA_GENEVE_UDP_CSUM] &&
- nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
- flags |= GENEVE_F_UDP_CSUM;
+ !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
+ flags |= GENEVE_F_UDP_ZERO_CSUM_TX;
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
@@ -1370,8 +1436,8 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
flags |= GENEVE_F_UDP_ZERO_CSUM6_RX;
- return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port,
- metadata, flags);
+ return geneve_configure(net, dev, &remote, vni, ttl, tos, label,
+ dst_port, metadata, flags);
}
static void geneve_dellink(struct net_device *dev, struct list_head *head)
@@ -1388,6 +1454,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */
@@ -1418,7 +1485,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
- nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+ nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos) ||
+ nla_put_be32(skb, IFLA_GENEVE_LABEL, geneve->label))
goto nla_put_failure;
if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
@@ -1430,7 +1498,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
- !!(geneve->flags & GENEVE_F_UDP_CSUM)) ||
+ !(geneve->flags & GENEVE_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
!!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
@@ -1470,7 +1538,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
return dev;
err = geneve_configure(net, dev, &geneve_remote_unspec,
- 0, 0, 0, htons(dst_port), true,
+ 0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
if (err)
goto err;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 636b65c66d49..7b916d5b14b9 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -80,6 +80,7 @@
#include <linux/hdlcdrv.h>
#include <linux/baycom.h>
#include <linux/jiffies.h>
+#include <linux/time64.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -228,14 +229,15 @@ static inline unsigned int hweight8(unsigned int w)
/* --------------------------------------------------------------------- */
-static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timeval *tv, unsigned char curs)
+static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs)
{
int timediff;
int bdus8 = bc->baud_us >> 3;
int bdus4 = bc->baud_us >> 2;
int bdus2 = bc->baud_us >> 1;
- timediff = 1000000 + tv->tv_usec - bc->modem.ser12.pll_time;
+ timediff = 1000000 + ts->tv_nsec / NSEC_PER_USEC -
+ bc->modem.ser12.pll_time;
while (timediff >= 500000)
timediff -= 1000000;
while (timediff >= bdus2) {
@@ -287,7 +289,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct baycom_state *bc = netdev_priv(dev);
- struct timeval tv;
+ struct timespec64 ts;
unsigned char iir, msr;
unsigned int txcount = 0;
@@ -297,7 +299,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id)
if ((iir = inb(IIR(dev->base_addr))) & 1)
return IRQ_NONE;
/* get current time */
- do_gettimeofday(&tv);
+ ktime_get_ts64(&ts);
msr = inb(MSR(dev->base_addr));
/* delta DCD */
if ((msr & 8) && bc->opt_dcd)
@@ -340,7 +342,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id)
}
iir = inb(IIR(dev->base_addr));
} while (!(iir & 1));
- ser12_rx(dev, bc, &tv, msr & 0x10); /* CTS */
+ ser12_rx(dev, bc, &ts, msr & 0x10); /* CTS */
if (bc->modem.ptt && txcount) {
if (bc->modem.ser12.txshreg <= 1) {
bc->modem.ser12.txshreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv);
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index c3d377770616..e4137c1b3df9 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -451,7 +451,7 @@ static const struct net_device_ops scc_netdev_ops = {
static int __init setup_adapter(int card_base, int type, int n)
{
- int i, irq, chip;
+ int i, irq, chip, err;
struct scc_info *info;
struct net_device *dev;
struct scc_priv *priv;
@@ -463,14 +463,17 @@ static int __init setup_adapter(int card_base, int type, int n)
/* Initialize what is necessary for write_scc and write_scc_data */
info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
- if (!info)
+ if (!info) {
+ err = -ENOMEM;
goto out;
+ }
info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
if (!info->dev[0]) {
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
hw[type].name, card_base);
+ err = -ENOMEM;
goto out1;
}
@@ -479,6 +482,7 @@ static int __init setup_adapter(int card_base, int type, int n)
printk(KERN_ERR "dmascc: "
"could not allocate memory for %s at %#3x\n",
hw[type].name, card_base);
+ err = -ENOMEM;
goto out2;
}
spin_lock_init(&info->register_lock);
@@ -549,6 +553,7 @@ static int __init setup_adapter(int card_base, int type, int n)
printk(KERN_ERR
"dmascc: could not find irq of %s at %#3x (irq=%d)\n",
hw[type].name, card_base, irq);
+ err = -ENODEV;
goto out3;
}
@@ -585,11 +590,13 @@ static int __init setup_adapter(int card_base, int type, int n)
if (register_netdev(info->dev[0])) {
printk(KERN_ERR "dmascc: could not register %s\n",
info->dev[0]->name);
+ err = -ENODEV;
goto out3;
}
if (register_netdev(info->dev[1])) {
printk(KERN_ERR "dmascc: could not register %s\n",
info->dev[1]->name);
+ err = -ENODEV;
goto out4;
}
@@ -612,7 +619,7 @@ static int __init setup_adapter(int card_base, int type, int n)
out1:
kfree(info);
out:
- return -1;
+ return err;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index fcb92c0d0eb9..8b3bd8ecd1c4 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -619,6 +619,7 @@ struct nvsp_message {
#define NETVSC_PACKET_SIZE 4096
#define VRSS_SEND_TAB_SIZE 16
+#define VRSS_CHANNEL_MAX 64
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -658,6 +659,10 @@ struct net_device_context {
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
+
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
/* Per netvsc device */
@@ -696,13 +701,13 @@ struct netvsc_device {
struct net_device *ndev;
- struct vmbus_channel *chn_table[NR_CPUS];
+ struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered;
- atomic_t queue_sends[NR_CPUS];
+ atomic_t queue_sends[VRSS_CHANNEL_MAX];
/* Holds rndis device info */
void *extension;
@@ -714,7 +719,7 @@ struct netvsc_device {
/* The sub channel callback buffer */
unsigned char *sub_cb_buf;
- struct multi_send_data msd[NR_CPUS];
+ struct multi_send_data msd[VRSS_CHANNEL_MAX];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 98e34fee45c7..b8121eba33ff 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,6 +43,11 @@
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
+#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
+ NETIF_F_SG | \
+ NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_HW_CSUM)
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@@ -545,6 +550,8 @@ do_send:
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
skb, packet, &pb);
+ /* timestamp packet in software */
+ skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
@@ -792,6 +799,58 @@ static int netvsc_set_channels(struct net_device *net,
goto do_set;
}
+static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
+{
+ struct ethtool_cmd diff1 = *cmd;
+ struct ethtool_cmd diff2 = {};
+
+ ethtool_cmd_speed_set(&diff1, 0);
+ diff1.duplex = 0;
+ /* advertising and cmd are usually set */
+ diff1.advertising = 0;
+ diff1.cmd = 0;
+ /* We set port to PORT_OTHER */
+ diff2.port = PORT_OTHER;
+
+ return !memcmp(&diff1, &diff2, sizeof(diff1));
+}
+
+static void netvsc_init_settings(struct net_device *dev)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+
+ ndc->speed = SPEED_UNKNOWN;
+ ndc->duplex = DUPLEX_UNKNOWN;
+}
+
+static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+
+ ethtool_cmd_speed_set(cmd, ndc->speed);
+ cmd->duplex = ndc->duplex;
+ cmd->port = PORT_OTHER;
+
+ return 0;
+}
+
+static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ u32 speed;
+
+ speed = ethtool_cmd_speed(cmd);
+ if (!ethtool_validate_speed(speed) ||
+ !ethtool_validate_duplex(cmd->duplex) ||
+ !netvsc_validate_ethtool_ss_cmd(cmd))
+ return -EINVAL;
+
+ ndc->speed = speed;
+ ndc->duplex = cmd->duplex;
+
+ return 0;
+}
+
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -799,6 +858,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
+ u32 num_chn;
int ret = 0;
if (nvdev == NULL || nvdev->destroy)
@@ -814,6 +874,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
+ num_chn = nvdev->num_chn;
+
nvdev->start_remove = true;
rndis_filter_device_remove(hdev);
@@ -824,7 +886,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.num_chn = nvdev->num_chn;
+ device_info.num_chn = num_chn;
device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
@@ -915,6 +977,9 @@ static const struct ethtool_ops ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_channels = netvsc_get_channels,
.set_channels = netvsc_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_settings = netvsc_get_settings,
+ .set_settings = netvsc_set_settings,
};
static const struct net_device_ops device_ops = {
@@ -1081,10 +1146,8 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
- net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_TSO;
- net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_TSO;
+ net->hw_features = NETVSC_HW_FEATURES;
+ net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
@@ -1109,6 +1172,8 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
+ netvsc_init_settings(net);
+
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index a37bbda37ffa..c4e1e0408433 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -986,12 +986,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
- spin_lock_irqsave(&nvscdev->sc_lock, flags);
- nvscdev->num_sc_offered--;
- spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
- if (nvscdev->num_sc_offered == 0)
- complete(&nvscdev->channel_init_wait);
-
if (chn_index >= nvscdev->num_chn)
return;
@@ -1004,6 +998,12 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (ret == 0)
nvscdev->chn_table[chn_index] = new_sc;
+
+ spin_lock_irqsave(&nvscdev->sc_lock, flags);
+ nvscdev->num_sc_offered--;
+ spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+ if (nvscdev->num_sc_offered == 0)
+ complete(&nvscdev->channel_init_wait);
}
int rndis_filter_device_add(struct hv_device *dev,
@@ -1113,9 +1113,9 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
- num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+ net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, rsscap.num_recv_que);
- net_device->max_chn = rsscap.num_recv_que;
+ num_rss_qs = min(device_info->max_num_vrss_chns, net_device->max_chn);
/*
* We will limit the VRSS channels to the number CPUs in the NUMA node
@@ -1175,22 +1175,18 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
/*
- * Wait for the host to send us the sub-channel offers.
+ * Set the number of sub-channels to be received.
*/
spin_lock_irqsave(&net_device->sc_lock, flags);
sc_delta = num_rss_qs - (net_device->num_chn - 1);
net_device->num_sc_offered -= sc_delta;
spin_unlock_irqrestore(&net_device->sc_lock, flags);
- while (net_device->num_sc_offered != 0) {
- t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
- if (t == 0)
- WARN(1, "Netvsc: Waiting for sub-channel processing");
- }
out:
if (ret) {
net_device->max_chn = 1;
net_device->num_chn = 1;
+ net_device->num_sc_offered = 0;
}
return 0; /* return 0 because primary channel can be used alone */
@@ -1204,6 +1200,17 @@ void rndis_filter_device_remove(struct hv_device *dev)
{
struct netvsc_device *net_dev = hv_get_drvdata(dev);
struct rndis_device *rndis_dev = net_dev->extension;
+ unsigned long t;
+
+ /* If not all subchannel offers are complete, wait for them until
+ * completion to avoid race.
+ */
+ while (net_dev->num_sc_offered > 0) {
+ t = wait_for_completion_timeout(&net_dev->channel_init_wait,
+ 10 * HZ);
+ if (t == 0)
+ WARN(1, "Netvsc: Waiting for sub-channel processing");
+ }
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 0fbbba7a0cae..cb9e9fe6d77a 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -343,16 +343,26 @@ static const struct regmap_config at86rf230_regmap_spi_config = {
};
static void
-at86rf230_async_error_recover(void *context)
+at86rf230_async_error_recover_complete(void *context)
{
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
- at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
- ieee802154_wake_queue(lp->hw);
if (ctx->free)
kfree(ctx);
+
+ ieee802154_wake_queue(lp->hw);
+}
+
+static void
+at86rf230_async_error_recover(void *context)
+{
+ struct at86rf230_state_change *ctx = context;
+ struct at86rf230_local *lp = ctx->lp;
+
+ lp->is_tx = 0;
+ at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
+ at86rf230_async_error_recover_complete);
}
static inline void
@@ -892,14 +902,12 @@ at86rf230_xmit_start(void *context)
struct at86rf230_local *lp = ctx->lp;
/* check if we change from off state */
- if (lp->is_tx_from_off) {
- lp->is_tx_from_off = false;
+ if (lp->is_tx_from_off)
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
at86rf230_write_frame);
- } else {
+ else
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
at86rf230_xmit_tx_on);
- }
}
static int
@@ -923,6 +931,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
at86rf230_xmit_start);
} else {
+ lp->is_tx_from_off = false;
at86rf230_xmit_start(ctx);
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 4cdf51638972..764a2bddfaee 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -310,6 +310,7 @@ mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg)
case REG_TRISGPIO:
case REG_GPIO:
case REG_RFCTL:
+ case REG_SECCR2:
case REG_SLPACK:
case REG_BBREG0:
case REG_BBREG1:
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 9542b7bac61a..695a5dc9ace3 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -84,19 +84,19 @@ struct ipvl_addr {
#define ip4addr ipu.ip4
struct hlist_node hlnode; /* Hash-table linkage */
struct list_head anode; /* logical-interface linkage */
- struct rcu_head rcu;
ipvl_hdr_type atype;
+ struct rcu_head rcu;
};
struct ipvl_port {
struct net_device *dev;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
struct list_head ipvlans;
- struct rcu_head rcu;
+ u16 mode;
struct work_struct wq;
struct sk_buff_head backlog;
int count;
- u16 mode;
+ struct rcu_head rcu;
};
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
@@ -114,8 +114,6 @@ static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
return rtnl_dereference(d->rx_handler_data);
}
-void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev);
-void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
void ipvlan_init_secret(void);
unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
@@ -125,7 +123,5 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
-struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
- const void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8c48bb2a94ea..d6d0524ee5fd 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -53,8 +53,8 @@ static u8 ipvlan_get_v4_hash(const void *iaddr)
IPVLAN_HASH_MASK;
}
-struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
- const void *iaddr, bool is_v6)
+static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
+ const void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
u8 hash;
@@ -265,20 +265,25 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
len = skb->len + ETH_HLEN;
- if (unlikely(!(dev->flags & IFF_UP))) {
- kfree_skb(skb);
- goto out;
- }
+ /* Only packets exchanged between two local slaves need to have
+ * device-up check as well as skb-share check.
+ */
+ if (local) {
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto out;
+ }
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
- *pskb = skb;
+ *pskb = skb;
+ }
skb->dev = dev;
- skb->pkt_type = PACKET_HOST;
if (local) {
+ skb->pkt_type = PACKET_HOST;
if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
success = true;
} else {
@@ -342,7 +347,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
return addr;
}
-static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
@@ -365,7 +370,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
ip_rt_put(rt);
goto err;
}
- skb_dst_drop(skb);
+ skb_scrub_packet(skb, xnet);
skb_dst_set(skb, &rt->dst);
err = ip_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
@@ -380,7 +385,7 @@ out:
return ret;
}
-static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct net_device *dev = skb->dev;
@@ -403,7 +408,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
dst_release(dst);
goto err;
}
- skb_dst_drop(skb);
+ skb_scrub_packet(skb, xnet);
skb_dst_set(skb, dst);
err = ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(err)))
@@ -418,8 +423,7 @@ out:
return ret;
}
-static int ipvlan_process_outbound(struct sk_buff *skb,
- const struct ipvl_dev *ipvlan)
+static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet)
{
struct ethhdr *ethh = eth_hdr(skb);
int ret = NET_XMIT_DROP;
@@ -443,9 +447,9 @@ static int ipvlan_process_outbound(struct sk_buff *skb,
}
if (skb->protocol == htons(ETH_P_IPV6))
- ret = ipvlan_process_v6_outbound(skb);
+ ret = ipvlan_process_v6_outbound(skb, xnet);
else if (skb->protocol == htons(ETH_P_IP))
- ret = ipvlan_process_v4_outbound(skb);
+ ret = ipvlan_process_v4_outbound(skb, xnet);
else {
pr_warn_ratelimited("Dropped outbound packet type=%x\n",
ntohs(skb->protocol));
@@ -481,6 +485,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
void *lyr3h;
struct ipvl_addr *addr;
int addr_type;
+ bool xnet;
lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
if (!lyr3h)
@@ -491,8 +496,9 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
return ipvlan_rcv_frame(addr, &skb, true);
out:
+ xnet = !net_eq(dev_net(skb->dev), dev_net(ipvlan->phy_dev));
skb->dev = ipvlan->phy_dev;
- return ipvlan_process_outbound(skb, ipvlan);
+ return ipvlan_process_outbound(skb, xnet);
}
static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f94392d07126..57941d3f4227 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,12 +9,12 @@
#include "ipvlan.h"
-void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
+static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
}
-void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval)
+static void ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
@@ -119,6 +119,7 @@ static int ipvlan_init(struct net_device *dev)
dev->features = phy_dev->features & IPVLAN_FEATURES;
dev->features |= NETIF_F_LLTX;
dev->gso_max_size = phy_dev->gso_max_size;
+ dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
ipvlan_set_lockdep_class(dev);
@@ -346,12 +347,12 @@ static const struct header_ops ipvlan_header_ops = {
.cache_update = eth_header_cache_update,
};
-static int ipvlan_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
const struct ipvl_dev *ipvlan = netdev_priv(dev);
- return __ethtool_get_settings(ipvlan->phy_dev, cmd);
+ return __ethtool_get_link_ksettings(ipvlan->phy_dev, cmd);
}
static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -377,7 +378,7 @@ static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops ipvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = ipvlan_ethtool_get_settings,
+ .get_link_ksettings = ipvlan_ethtool_get_link_ksettings,
.get_drvinfo = ipvlan_ethtool_get_drvinfo,
.get_msglevel = ipvlan_ethtool_get_msglevel,
.set_msglevel = ipvlan_ethtool_set_msglevel,
@@ -442,6 +443,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_port *port;
struct net_device *phy_dev;
int err;
+ u16 mode = IPVLAN_MODE_L3;
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -460,14 +462,15 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
return err;
}
- port = ipvlan_port_get_rtnl(phy_dev);
if (data && data[IFLA_IPVLAN_MODE])
- port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+ mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
+ port = ipvlan_port_get_rtnl(phy_dev);
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
ipvlan->port = port;
ipvlan->sfeatures = IPVLAN_FEATURES;
+ ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
/* TODO Probably put random address here to be presented to the
@@ -488,6 +491,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
goto ipvlan_destroy_port;
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
+ ipvlan_set_port_mode(port, mode);
+
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
@@ -588,6 +593,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
ipvlan->dev->gso_max_size = dev->gso_max_size;
+ ipvlan->dev->gso_max_segs = dev->gso_max_segs;
netdev_features_change(ipvlan->dev);
}
break;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 696852eb23c3..7a3f990c1935 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
/* Module stuff handled via irda_ldisc.owner - Jean II */
- /* First make sure we're not already connected. */
- if (tty->disc_data != NULL) {
- priv = tty->disc_data;
- if (priv && priv->magic == IRTTY_MAGIC) {
- ret = -EEXIST;
- goto out;
- }
- tty->disc_data = NULL; /* ### */
- }
-
/* stop the underlying driver */
irtty_stop_receiver(tty, TRUE);
if (tty->ops->stop)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
new file mode 100644
index 000000000000..84d3e5ca8817
--- /dev/null
+++ b/drivers/net/macsec.c
@@ -0,0 +1,3297 @@
+/*
+ * drivers/net/macsec.c - MACsec device
+ *
+ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/module.h>
+#include <crypto/aead.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/sock.h>
+
+#include <uapi/linux/if_macsec.h>
+
+typedef u64 __bitwise sci_t;
+
+#define MACSEC_SCI_LEN 8
+
+/* SecTAG length = macsec_eth_header without the optional SCI */
+#define MACSEC_TAG_LEN 6
+
+struct macsec_eth_header {
+ struct ethhdr eth;
+ /* SecTAG */
+ u8 tci_an;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 short_length:6,
+ unused:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 unused:2,
+ short_length:6;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be32 packet_number;
+ u8 secure_channel_id[8]; /* optional */
+} __packed;
+
+#define MACSEC_TCI_VERSION 0x80
+#define MACSEC_TCI_ES 0x40 /* end station */
+#define MACSEC_TCI_SC 0x20 /* SCI present */
+#define MACSEC_TCI_SCB 0x10 /* epon */
+#define MACSEC_TCI_E 0x08 /* encryption */
+#define MACSEC_TCI_C 0x04 /* changed text */
+#define MACSEC_AN_MASK 0x03 /* association number */
+#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C)
+
+/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
+#define MIN_NON_SHORT_LEN 48
+
+#define GCM_AES_IV_LEN 12
+#define DEFAULT_ICV_LEN 16
+
+#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+
+#define for_each_rxsc(secy, sc) \
+ for (sc = rcu_dereference_bh(secy->rx_sc); \
+ sc; \
+ sc = rcu_dereference_bh(sc->next))
+#define for_each_rxsc_rtnl(secy, sc) \
+ for (sc = rtnl_dereference(secy->rx_sc); \
+ sc; \
+ sc = rtnl_dereference(sc->next))
+
+struct gcm_iv {
+ union {
+ u8 secure_channel_id[8];
+ sci_t sci;
+ };
+ __be32 pn;
+};
+
+/**
+ * struct macsec_key - SA key
+ * @id: user-provided key identifier
+ * @tfm: crypto struct, key storage
+ */
+struct macsec_key {
+ u64 id;
+ struct crypto_aead *tfm;
+};
+
+struct macsec_rx_sc_stats {
+ __u64 InOctetsValidated;
+ __u64 InOctetsDecrypted;
+ __u64 InPktsUnchecked;
+ __u64 InPktsDelayed;
+ __u64 InPktsOK;
+ __u64 InPktsInvalid;
+ __u64 InPktsLate;
+ __u64 InPktsNotValid;
+ __u64 InPktsNotUsingSA;
+ __u64 InPktsUnusedSA;
+};
+
+struct macsec_rx_sa_stats {
+ __u32 InPktsOK;
+ __u32 InPktsInvalid;
+ __u32 InPktsNotValid;
+ __u32 InPktsNotUsingSA;
+ __u32 InPktsUnusedSA;
+};
+
+struct macsec_tx_sa_stats {
+ __u32 OutPktsProtected;
+ __u32 OutPktsEncrypted;
+};
+
+struct macsec_tx_sc_stats {
+ __u64 OutPktsProtected;
+ __u64 OutPktsEncrypted;
+ __u64 OutOctetsProtected;
+ __u64 OutOctetsEncrypted;
+};
+
+struct macsec_dev_stats {
+ __u64 OutPktsUntagged;
+ __u64 InPktsUntagged;
+ __u64 OutPktsTooLong;
+ __u64 InPktsNoTag;
+ __u64 InPktsBadTag;
+ __u64 InPktsUnknownSCI;
+ __u64 InPktsNoSCI;
+ __u64 InPktsOverrun;
+};
+
+/**
+ * struct macsec_rx_sa - receive secure association
+ * @active:
+ * @next_pn: packet number expected for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_rx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ atomic_t refcnt;
+ bool active;
+ struct macsec_rx_sa_stats __percpu *stats;
+ struct macsec_rx_sc *sc;
+ struct rcu_head rcu;
+};
+
+struct pcpu_rx_sc_stats {
+ struct macsec_rx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_rx_sc - receive secure channel
+ * @sci: secure channel identifier for this SC
+ * @active: channel is active
+ * @sa: array of secure associations
+ * @stats: per-SC stats
+ */
+struct macsec_rx_sc {
+ struct macsec_rx_sc __rcu *next;
+ sci_t sci;
+ bool active;
+ struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_rx_sc_stats __percpu *stats;
+ atomic_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct macsec_tx_sa - transmit secure association
+ * @active:
+ * @next_pn: packet number to use for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_tx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ atomic_t refcnt;
+ bool active;
+ struct macsec_tx_sa_stats __percpu *stats;
+ struct rcu_head rcu;
+};
+
+struct pcpu_tx_sc_stats {
+ struct macsec_tx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_tx_sc - transmit secure channel
+ * @active:
+ * @encoding_sa: association number of the SA currently in use
+ * @encrypt: encrypt packets on transmit, or authenticate only
+ * @send_sci: always include the SCI in the SecTAG
+ * @end_station:
+ * @scb: single copy broadcast flag
+ * @sa: array of secure associations
+ * @stats: stats for this TXSC
+ */
+struct macsec_tx_sc {
+ bool active;
+ u8 encoding_sa;
+ bool encrypt;
+ bool send_sci;
+ bool end_station;
+ bool scb;
+ struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_tx_sc_stats __percpu *stats;
+};
+
+#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
+
+/**
+ * struct macsec_secy - MACsec Security Entity
+ * @netdev: netdevice for this SecY
+ * @n_rx_sc: number of receive secure channels configured on this SecY
+ * @sci: secure channel identifier used for tx
+ * @key_len: length of keys used by the cipher suite
+ * @icv_len: length of ICV used by the cipher suite
+ * @validate_frames: validation mode
+ * @operational: MAC_Operational flag
+ * @protect_frames: enable protection for this SecY
+ * @replay_protect: enable packet number checks on receive
+ * @replay_window: size of the replay window
+ * @tx_sc: transmit secure channel
+ * @rx_sc: linked list of receive secure channels
+ */
+struct macsec_secy {
+ struct net_device *netdev;
+ unsigned int n_rx_sc;
+ sci_t sci;
+ u16 key_len;
+ u16 icv_len;
+ enum macsec_validation_type validate_frames;
+ bool operational;
+ bool protect_frames;
+ bool replay_protect;
+ u32 replay_window;
+ struct macsec_tx_sc tx_sc;
+ struct macsec_rx_sc __rcu *rx_sc;
+};
+
+struct pcpu_secy_stats {
+ struct macsec_dev_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_dev - private data
+ * @secy: SecY config
+ * @real_dev: pointer to underlying netdevice
+ * @stats: MACsec device stats
+ * @secys: linked list of SecY's on the underlying device
+ */
+struct macsec_dev {
+ struct macsec_secy secy;
+ struct net_device *real_dev;
+ struct pcpu_secy_stats __percpu *stats;
+ struct list_head secys;
+};
+
+/**
+ * struct macsec_rxh_data - rx_handler private argument
+ * @secys: linked list of SecY's on this underlying device
+ */
+struct macsec_rxh_data {
+ struct list_head secys;
+};
+
+static struct macsec_dev *macsec_priv(const struct net_device *dev)
+{
+ return (struct macsec_dev *)netdev_priv(dev);
+}
+
+static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
+{
+ return rcu_dereference_bh(dev->rx_handler_data);
+}
+
+static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
+{
+ return rtnl_dereference(dev->rx_handler_data);
+}
+
+struct macsec_cb {
+ struct aead_request *req;
+ union {
+ struct macsec_tx_sa *tx_sa;
+ struct macsec_rx_sa *rx_sa;
+ };
+ u8 assoc_num;
+ bool valid;
+ bool has_sci;
+};
+
+static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
+{
+ struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
+
+ if (!sa || !sa->active)
+ return NULL;
+
+ if (!atomic_inc_not_zero(&sa->refcnt))
+ return NULL;
+
+ return sa;
+}
+
+static void free_rx_sc_rcu(struct rcu_head *head)
+{
+ struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
+
+ free_percpu(rx_sc->stats);
+ kfree(rx_sc);
+}
+
+static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
+{
+ return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL;
+}
+
+static void macsec_rxsc_put(struct macsec_rx_sc *sc)
+{
+ if (atomic_dec_and_test(&sc->refcnt))
+ call_rcu(&sc->rcu_head, free_rx_sc_rcu);
+}
+
+static void free_rxsa(struct rcu_head *head)
+{
+ struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
+
+ crypto_free_aead(sa->key.tfm);
+ free_percpu(sa->stats);
+ macsec_rxsc_put(sa->sc);
+ kfree(sa);
+}
+
+static void macsec_rxsa_put(struct macsec_rx_sa *sa)
+{
+ if (atomic_dec_and_test(&sa->refcnt))
+ call_rcu(&sa->rcu, free_rxsa);
+}
+
+static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
+{
+ struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
+
+ if (!sa || !sa->active)
+ return NULL;
+
+ if (!atomic_inc_not_zero(&sa->refcnt))
+ return NULL;
+
+ return sa;
+}
+
+static void free_txsa(struct rcu_head *head)
+{
+ struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
+
+ crypto_free_aead(sa->key.tfm);
+ free_percpu(sa->stats);
+ kfree(sa);
+}
+
+static void macsec_txsa_put(struct macsec_tx_sa *sa)
+{
+ if (atomic_dec_and_test(&sa->refcnt))
+ call_rcu(&sa->rcu, free_txsa);
+}
+
+static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
+ return (struct macsec_cb *)skb->cb;
+}
+
+#define MACSEC_PORT_ES (htons(0x0001))
+#define MACSEC_PORT_SCB (0x0000)
+#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
+
+#define DEFAULT_SAK_LEN 16
+#define DEFAULT_SEND_SCI true
+#define DEFAULT_ENCRYPT false
+#define DEFAULT_ENCODING_SA 0
+
+static sci_t make_sci(u8 *addr, __be16 port)
+{
+ sci_t sci;
+
+ memcpy(&sci, addr, ETH_ALEN);
+ memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
+
+ return sci;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+{
+ sci_t sci;
+
+ if (sci_present)
+ memcpy(&sci, hdr->secure_channel_id,
+ sizeof(hdr->secure_channel_id));
+ else
+ sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+
+ return sci;
+}
+
+static unsigned int macsec_sectag_len(bool sci_present)
+{
+ return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
+}
+
+static unsigned int macsec_hdr_len(bool sci_present)
+{
+ return macsec_sectag_len(sci_present) + ETH_HLEN;
+}
+
+static unsigned int macsec_extra_len(bool sci_present)
+{
+ return macsec_sectag_len(sci_present) + sizeof(__be16);
+}
+
+/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
+static void macsec_fill_sectag(struct macsec_eth_header *h,
+ const struct macsec_secy *secy, u32 pn)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ h->eth.h_proto = htons(ETH_P_MACSEC);
+
+ if (tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ h->tci_an |= MACSEC_TCI_SC;
+ memcpy(&h->secure_channel_id, &secy->sci,
+ sizeof(h->secure_channel_id));
+ } else {
+ if (tx_sc->end_station)
+ h->tci_an |= MACSEC_TCI_ES;
+ if (tx_sc->scb)
+ h->tci_an |= MACSEC_TCI_SCB;
+ }
+
+ h->packet_number = htonl(pn);
+
+ /* with GCM, C/E clear for !encrypt, both set for encrypt */
+ if (tx_sc->encrypt)
+ h->tci_an |= MACSEC_TCI_CONFID;
+ else if (secy->icv_len != DEFAULT_ICV_LEN)
+ h->tci_an |= MACSEC_TCI_C;
+
+ h->tci_an |= tx_sc->encoding_sa;
+}
+
+static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
+{
+ if (data_len < MIN_NON_SHORT_LEN)
+ h->short_length = data_len;
+}
+
+/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
+static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+{
+ struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
+ int len = skb->len - 2 * ETH_ALEN;
+ int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
+
+ /* a) It comprises at least 17 octets */
+ if (skb->len <= 16)
+ return false;
+
+ /* b) MACsec EtherType: already checked */
+
+ /* c) V bit is clear */
+ if (h->tci_an & MACSEC_TCI_VERSION)
+ return false;
+
+ /* d) ES or SCB => !SC */
+ if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
+ (h->tci_an & MACSEC_TCI_SC))
+ return false;
+
+ /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
+ if (h->unused)
+ return false;
+
+ /* rx.pn != 0 (figure 10-5) */
+ if (!h->packet_number)
+ return false;
+
+ /* length check, f) g) h) i) */
+ if (h->short_length)
+ return len == extra_len + h->short_length;
+ return len >= extra_len + MIN_NON_SHORT_LEN;
+}
+
+#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
+#define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN
+
+static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
+{
+ struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
+
+ gcm_iv->sci = sci;
+ gcm_iv->pn = htonl(pn);
+}
+
+static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
+{
+ return (struct macsec_eth_header *)skb_mac_header(skb);
+}
+
+static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+{
+ u32 pn;
+
+ spin_lock_bh(&tx_sa->lock);
+ pn = tx_sa->next_pn;
+
+ tx_sa->next_pn++;
+ if (tx_sa->next_pn == 0) {
+ pr_debug("PN wrapped, transitioning to !oper\n");
+ tx_sa->active = false;
+ if (secy->protect_frames)
+ secy->operational = false;
+ }
+ spin_unlock_bh(&tx_sa->lock);
+
+ return pn;
+}
+
+static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
+{
+ struct macsec_dev *macsec = netdev_priv(dev);
+
+ skb->dev = macsec->real_dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+}
+
+static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
+ struct macsec_tx_sa *tx_sa)
+{
+ struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
+
+ u64_stats_update_begin(&txsc_stats->syncp);
+ if (tx_sc->encrypt) {
+ txsc_stats->stats.OutOctetsEncrypted += skb->len;
+ txsc_stats->stats.OutPktsEncrypted++;
+ this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
+ } else {
+ txsc_stats->stats.OutOctetsProtected += skb->len;
+ txsc_stats->stats.OutPktsProtected++;
+ this_cpu_inc(tx_sa->stats->OutPktsProtected);
+ }
+ u64_stats_update_end(&txsc_stats->syncp);
+}
+
+static void count_tx(struct net_device *dev, int ret, int len)
+{
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+ struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ } else {
+ dev->stats.tx_dropped++;
+ }
+}
+
+static void macsec_encrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct net_device *dev = skb->dev;
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
+ int len, ret;
+
+ aead_request_free(macsec_skb_cb(skb)->req);
+
+ rcu_read_lock_bh();
+ macsec_encrypt_finish(skb, dev);
+ macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+ count_tx(dev, ret, len);
+ rcu_read_unlock_bh();
+
+ macsec_txsa_put(sa);
+ dev_put(dev);
+}
+
+static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ int ret;
+ struct scatterlist sg[MAX_SKB_FRAGS + 1];
+ unsigned char iv[GCM_AES_IV_LEN];
+ struct ethhdr *eth;
+ struct macsec_eth_header *hh;
+ size_t unprotected_len;
+ struct aead_request *req;
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+ struct macsec_dev *macsec = macsec_priv(dev);
+ u32 pn;
+
+ secy = &macsec->secy;
+ tx_sc = &secy->tx_sc;
+
+ /* 10.5.1 TX SA assignment */
+ tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
+ if (!tx_sa) {
+ secy->operational = false;
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+ skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+ struct sk_buff *nskb = skb_copy_expand(skb,
+ MACSEC_NEEDED_HEADROOM,
+ MACSEC_NEEDED_TAILROOM,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ macsec_txsa_put(tx_sa);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ unprotected_len = skb->len;
+ eth = eth_hdr(skb);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ memmove(hh, eth, 2 * ETH_ALEN);
+
+ pn = tx_sa_update_pn(tx_sa, secy);
+ if (pn == 0) {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-ENOLINK);
+ }
+ macsec_fill_sectag(hh, secy, pn);
+ macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
+
+ macsec_fill_iv(iv, secy->sci, pn);
+
+ skb_put(skb, secy->icv_len);
+
+ if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
+ struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
+
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.OutPktsTooLong++;
+ u64_stats_update_end(&secy_stats->syncp);
+
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ if (!req) {
+ macsec_txsa_put(tx_sa);
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg_init_table(sg, MAX_SKB_FRAGS + 1);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+
+ if (tx_sc->encrypt) {
+ int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ secy->icv_len;
+ aead_request_set_crypt(req, sg, sg, len, iv);
+ aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ } else {
+ aead_request_set_crypt(req, sg, sg, 0, iv);
+ aead_request_set_ad(req, skb->len - secy->icv_len);
+ }
+
+ macsec_skb_cb(skb)->req = req;
+ macsec_skb_cb(skb)->tx_sa = tx_sa;
+ aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
+
+ dev_hold(skb->dev);
+ ret = crypto_aead_encrypt(req);
+ if (ret == -EINPROGRESS) {
+ return ERR_PTR(ret);
+ } else if (ret != 0) {
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ aead_request_free(req);
+ macsec_txsa_put(tx_sa);
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_put(skb->dev);
+ aead_request_free(req);
+ macsec_txsa_put(tx_sa);
+
+ return skb;
+}
+
+static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
+{
+ struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
+ struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
+ struct macsec_eth_header *hdr = macsec_ethhdr(skb);
+ u32 lowest_pn = 0;
+
+ spin_lock(&rx_sa->lock);
+ if (rx_sa->next_pn >= secy->replay_window)
+ lowest_pn = rx_sa->next_pn - secy->replay_window;
+
+ /* Now perform replay protection check again
+ * (see IEEE 802.1AE-2006 figure 10-5)
+ */
+ if (secy->replay_protect && pn < lowest_pn) {
+ spin_unlock(&rx_sa->lock);
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsLate++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ return false;
+ }
+
+ if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ if (hdr->tci_an & MACSEC_TCI_E)
+ rxsc_stats->stats.InOctetsDecrypted += skb->len;
+ else
+ rxsc_stats->stats.InOctetsValidated += skb->len;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ }
+
+ if (!macsec_skb_cb(skb)->valid) {
+ spin_unlock(&rx_sa->lock);
+
+ /* 10.6.5 */
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotValid++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ return false;
+ }
+
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
+ rxsc_stats->stats.InPktsInvalid++;
+ this_cpu_inc(rx_sa->stats->InPktsInvalid);
+ } else if (pn < lowest_pn) {
+ rxsc_stats->stats.InPktsDelayed++;
+ } else {
+ rxsc_stats->stats.InPktsUnchecked++;
+ }
+ u64_stats_update_end(&rxsc_stats->syncp);
+ } else {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ if (pn < lowest_pn) {
+ rxsc_stats->stats.InPktsDelayed++;
+ } else {
+ rxsc_stats->stats.InPktsOK++;
+ this_cpu_inc(rx_sa->stats->InPktsOK);
+ }
+ u64_stats_update_end(&rxsc_stats->syncp);
+
+ if (pn >= rx_sa->next_pn)
+ rx_sa->next_pn = pn + 1;
+ spin_unlock(&rx_sa->lock);
+ }
+
+ return true;
+}
+
+static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb_reset_network_header(skb);
+ if (!skb_transport_header_was_set(skb))
+ skb_reset_transport_header(skb);
+ skb_reset_mac_len(skb);
+}
+
+static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
+{
+ memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, hdr_len);
+ pskb_trim_unique(skb, skb->len - icv_len);
+}
+
+static void count_rx(struct net_device *dev, int len)
+{
+ struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+}
+
+static void macsec_decrypt_done(struct crypto_async_request *base, int err)
+{
+ struct sk_buff *skb = base->data;
+ struct net_device *dev = skb->dev;
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
+ int len, ret;
+ u32 pn;
+
+ aead_request_free(macsec_skb_cb(skb)->req);
+
+ rcu_read_lock_bh();
+ pn = ntohl(macsec_ethhdr(skb)->packet_number);
+ if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
+ rcu_read_unlock_bh();
+ kfree_skb(skb);
+ goto out;
+ }
+
+ macsec_finalize_skb(skb, macsec->secy.icv_len,
+ macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ macsec_reset_skb(skb, macsec->secy.netdev);
+
+ len = skb->len;
+ ret = netif_rx(skb);
+ if (ret == NET_RX_SUCCESS)
+ count_rx(dev, len);
+ else
+ macsec->secy.netdev->stats.rx_dropped++;
+
+ rcu_read_unlock_bh();
+
+out:
+ macsec_rxsa_put(rx_sa);
+ dev_put(dev);
+ return;
+}
+
+static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
+ struct net_device *dev,
+ struct macsec_rx_sa *rx_sa,
+ sci_t sci,
+ struct macsec_secy *secy)
+{
+ int ret;
+ struct scatterlist sg[MAX_SKB_FRAGS + 1];
+ unsigned char iv[GCM_AES_IV_LEN];
+ struct aead_request *req;
+ struct macsec_eth_header *hdr;
+ u16 icv_len = secy->icv_len;
+
+ macsec_skb_cb(skb)->valid = false;
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ if (!req) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ hdr = (struct macsec_eth_header *)skb->data;
+ macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
+
+ sg_init_table(sg, MAX_SKB_FRAGS + 1);
+ skb_to_sgvec(skb, sg, 0, skb->len);
+
+ if (hdr->tci_an & MACSEC_TCI_E) {
+ /* confidentiality: ethernet + macsec header
+ * authenticated, encrypted payload
+ */
+ int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
+
+ aead_request_set_crypt(req, sg, sg, len, iv);
+ aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ aead_request_free(req);
+ return NULL;
+ }
+ } else {
+ /* integrity only: all headers + data authenticated */
+ aead_request_set_crypt(req, sg, sg, icv_len, iv);
+ aead_request_set_ad(req, skb->len - icv_len);
+ }
+
+ macsec_skb_cb(skb)->req = req;
+ macsec_skb_cb(skb)->rx_sa = rx_sa;
+ skb->dev = dev;
+ aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
+
+ dev_hold(dev);
+ ret = crypto_aead_decrypt(req);
+ if (ret == -EINPROGRESS) {
+ return NULL;
+ } else if (ret != 0) {
+ /* decryption/authentication failed
+ * 10.6 if validateFrames is disabled, deliver anyway
+ */
+ if (ret != -EBADMSG) {
+ kfree_skb(skb);
+ skb = NULL;
+ }
+ } else {
+ macsec_skb_cb(skb)->valid = true;
+ }
+ dev_put(dev);
+
+ aead_request_free(req);
+
+ return skb;
+}
+
+static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
+{
+ struct macsec_rx_sc *rx_sc;
+
+ for_each_rxsc(secy, rx_sc) {
+ if (rx_sc->sci == sci)
+ return rx_sc;
+ }
+
+ return NULL;
+}
+
+static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
+{
+ struct macsec_rx_sc *rx_sc;
+
+ for_each_rxsc_rtnl(secy, rx_sc) {
+ if (rx_sc->sci == sci)
+ return rx_sc;
+ }
+
+ return NULL;
+}
+
+static void handle_not_macsec(struct sk_buff *skb)
+{
+ struct macsec_rxh_data *rxd;
+ struct macsec_dev *macsec;
+
+ rcu_read_lock();
+ rxd = macsec_data_rcu(skb->dev);
+
+ /* 10.6 If the management control validateFrames is not
+ * Strict, frames without a SecTAG are received, counted, and
+ * delivered to the Controlled Port
+ */
+ list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ struct sk_buff *nskb;
+ int ret;
+ struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
+
+ if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsNoTag++;
+ u64_stats_update_end(&secy_stats->syncp);
+ continue;
+ }
+
+ /* deliver on this port */
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ break;
+
+ nskb->dev = macsec->secy.netdev;
+
+ ret = netif_rx(nskb);
+ if (ret == NET_RX_SUCCESS) {
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsUntagged++;
+ u64_stats_update_end(&secy_stats->syncp);
+ } else {
+ macsec->secy.netdev->stats.rx_dropped++;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct net_device *dev = skb->dev;
+ struct macsec_eth_header *hdr;
+ struct macsec_secy *secy = NULL;
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_rxh_data *rxd;
+ struct macsec_dev *macsec;
+ sci_t sci;
+ u32 pn;
+ bool cbit;
+ struct pcpu_rx_sc_stats *rxsc_stats;
+ struct pcpu_secy_stats *secy_stats;
+ bool pulled_sci;
+
+ if (skb_headroom(skb) < ETH_HLEN)
+ goto drop_direct;
+
+ hdr = macsec_ethhdr(skb);
+ if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
+ handle_not_macsec(skb);
+
+ /* and deliver to the uncontrolled port */
+ return RX_HANDLER_PASS;
+ }
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb) {
+ *pskb = NULL;
+ return RX_HANDLER_CONSUMED;
+ }
+
+ pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
+ if (!pulled_sci) {
+ if (!pskb_may_pull(skb, macsec_extra_len(false)))
+ goto drop_direct;
+ }
+
+ hdr = macsec_ethhdr(skb);
+
+ /* Frames with a SecTAG that has the TCI E bit set but the C
+ * bit clear are discarded, as this reserved encoding is used
+ * to identify frames with a SecTAG that are not to be
+ * delivered to the Controlled Port.
+ */
+ if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
+ return RX_HANDLER_PASS;
+
+ /* now, pull the extra length */
+ if (hdr->tci_an & MACSEC_TCI_SC) {
+ if (!pulled_sci)
+ goto drop_direct;
+ }
+
+ /* ethernet header is part of crypto processing */
+ skb_push(skb, ETH_HLEN);
+
+ macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
+ macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
+
+ rcu_read_lock();
+ rxd = macsec_data_rcu(skb->dev);
+
+ list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
+
+ if (sc) {
+ secy = &macsec->secy;
+ rx_sc = sc;
+ break;
+ }
+ }
+
+ if (!secy)
+ goto nosci;
+
+ dev = secy->netdev;
+ macsec = macsec_priv(dev);
+ secy_stats = this_cpu_ptr(macsec->stats);
+ rxsc_stats = this_cpu_ptr(rx_sc->stats);
+
+ if (!macsec_validate_skb(skb, secy->icv_len)) {
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsBadTag++;
+ u64_stats_update_end(&secy_stats->syncp);
+ goto drop_nosa;
+ }
+
+ rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
+ if (!rx_sa) {
+ /* 10.6.1 if the SA is not in use */
+
+ /* If validateFrames is Strict or the C bit in the
+ * SecTAG is set, discard
+ */
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotUsingSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ goto drop_nosa;
+ }
+
+ /* not Strict, the frame (with the SecTAG and ICV
+ * removed) is delivered to the Controlled Port.
+ */
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsUnusedSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ goto deliver;
+ }
+
+ /* First, PN check to avoid decrypting obviously wrong packets */
+ pn = ntohl(hdr->packet_number);
+ if (secy->replay_protect) {
+ bool late;
+
+ spin_lock(&rx_sa->lock);
+ late = rx_sa->next_pn >= secy->replay_window &&
+ pn < (rx_sa->next_pn - secy->replay_window);
+ spin_unlock(&rx_sa->lock);
+
+ if (late) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsLate++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ goto drop;
+ }
+ }
+
+ /* Disabled && !changed text => skip validation */
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames != MACSEC_VALIDATE_DISABLED)
+ skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
+
+ if (!skb) {
+ macsec_rxsa_put(rx_sa);
+ rcu_read_unlock();
+ *pskb = NULL;
+ return RX_HANDLER_CONSUMED;
+ }
+
+ if (!macsec_post_decrypt(skb, secy, pn))
+ goto drop;
+
+deliver:
+ macsec_finalize_skb(skb, secy->icv_len,
+ macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+ macsec_reset_skb(skb, secy->netdev);
+
+ macsec_rxsa_put(rx_sa);
+ count_rx(dev, skb->len);
+
+ rcu_read_unlock();
+
+ *pskb = skb;
+ return RX_HANDLER_ANOTHER;
+
+drop:
+ macsec_rxsa_put(rx_sa);
+drop_nosa:
+ rcu_read_unlock();
+drop_direct:
+ kfree_skb(skb);
+ *pskb = NULL;
+ return RX_HANDLER_CONSUMED;
+
+nosci:
+ /* 10.6.1 if the SC is not found */
+ cbit = !!(hdr->tci_an & MACSEC_TCI_C);
+ if (!cbit)
+ macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
+ macsec_extra_len(macsec_skb_cb(skb)->has_sci));
+
+ list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ struct sk_buff *nskb;
+ int ret;
+
+ secy_stats = this_cpu_ptr(macsec->stats);
+
+ /* If validateFrames is Strict or the C bit in the
+ * SecTAG is set, discard
+ */
+ if (cbit ||
+ macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsNoSCI++;
+ u64_stats_update_end(&secy_stats->syncp);
+ continue;
+ }
+
+ /* not strict, the frame (with the SecTAG and ICV
+ * removed) is delivered to the Controlled Port.
+ */
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ break;
+
+ macsec_reset_skb(nskb, macsec->secy.netdev);
+
+ ret = netif_rx(nskb);
+ if (ret == NET_RX_SUCCESS) {
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.InPktsUnknownSCI++;
+ u64_stats_update_end(&secy_stats->syncp);
+ } else {
+ macsec->secy.netdev->stats.rx_dropped++;
+ }
+ }
+
+ rcu_read_unlock();
+ *pskb = skb;
+ return RX_HANDLER_PASS;
+}
+
+static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
+{
+ struct crypto_aead *tfm;
+ int ret;
+
+ tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (!tfm || IS_ERR(tfm))
+ return NULL;
+
+ ret = crypto_aead_setkey(tfm, key, key_len);
+ if (ret < 0) {
+ crypto_free_aead(tfm);
+ return NULL;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, icv_len);
+ if (ret < 0) {
+ crypto_free_aead(tfm);
+ return NULL;
+ }
+
+ return tfm;
+}
+
+static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
+ int icv_len)
+{
+ rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
+ if (!rx_sa->stats)
+ return -1;
+
+ rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
+ if (!rx_sa->key.tfm) {
+ free_percpu(rx_sa->stats);
+ return -1;
+ }
+
+ rx_sa->active = false;
+ rx_sa->next_pn = 1;
+ atomic_set(&rx_sa->refcnt, 1);
+ spin_lock_init(&rx_sa->lock);
+
+ return 0;
+}
+
+static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
+{
+ rx_sa->active = false;
+
+ macsec_rxsa_put(rx_sa);
+}
+
+static void free_rx_sc(struct macsec_rx_sc *rx_sc)
+{
+ int i;
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
+
+ RCU_INIT_POINTER(rx_sc->sa[i], NULL);
+ if (sa)
+ clear_rx_sa(sa);
+ }
+
+ macsec_rxsc_put(rx_sc);
+}
+
+static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
+{
+ struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
+
+ for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
+ rx_sc;
+ rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
+ if (rx_sc->sci == sci) {
+ if (rx_sc->active)
+ secy->n_rx_sc--;
+ rcu_assign_pointer(*rx_scp, rx_sc->next);
+ return rx_sc;
+ }
+ }
+
+ return NULL;
+}
+
+static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
+{
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_dev *macsec;
+ struct net_device *real_dev = macsec_priv(dev)->real_dev;
+ struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ struct macsec_secy *secy;
+
+ list_for_each_entry(macsec, &rxd->secys, secys) {
+ if (find_rx_sc_rtnl(&macsec->secy, sci))
+ return ERR_PTR(-EEXIST);
+ }
+
+ rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
+ if (!rx_sc)
+ return ERR_PTR(-ENOMEM);
+
+ rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
+ if (!rx_sc->stats) {
+ kfree(rx_sc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rx_sc->sci = sci;
+ rx_sc->active = true;
+ atomic_set(&rx_sc->refcnt, 1);
+
+ secy = &macsec_priv(dev)->secy;
+ rcu_assign_pointer(rx_sc->next, secy->rx_sc);
+ rcu_assign_pointer(secy->rx_sc, rx_sc);
+
+ if (rx_sc->active)
+ secy->n_rx_sc++;
+
+ return rx_sc;
+}
+
+static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
+ int icv_len)
+{
+ tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
+ if (!tx_sa->stats)
+ return -1;
+
+ tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
+ if (!tx_sa->key.tfm) {
+ free_percpu(tx_sa->stats);
+ return -1;
+ }
+
+ tx_sa->active = false;
+ atomic_set(&tx_sa->refcnt, 1);
+ spin_lock_init(&tx_sa->lock);
+
+ return 0;
+}
+
+static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
+{
+ tx_sa->active = false;
+
+ macsec_txsa_put(tx_sa);
+}
+
+static struct genl_family macsec_fam = {
+ .id = GENL_ID_GENERATE,
+ .name = MACSEC_GENL_NAME,
+ .hdrsize = 0,
+ .version = MACSEC_GENL_VERSION,
+ .maxattr = MACSEC_ATTR_MAX,
+ .netnsok = true,
+};
+
+static struct net_device *get_dev_from_nl(struct net *net,
+ struct nlattr **attrs)
+{
+ int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
+ struct net_device *dev;
+
+ dev = __dev_get_by_index(net, ifindex);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ if (!netif_is_macsec(dev))
+ return ERR_PTR(-ENODEV);
+
+ return dev;
+}
+
+static sci_t nla_get_sci(const struct nlattr *nla)
+{
+ return (__force sci_t)nla_get_u64(nla);
+}
+
+static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value)
+{
+ return nla_put_u64(skb, attrtype, (__force u64)value);
+}
+
+static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
+ struct nlattr **attrs,
+ struct nlattr **tb_sa,
+ struct net_device **devp,
+ struct macsec_secy **secyp,
+ struct macsec_tx_sc **scp,
+ u8 *assoc_num)
+{
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+
+ if (!tb_sa[MACSEC_SA_ATTR_AN])
+ return ERR_PTR(-EINVAL);
+
+ *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
+
+ dev = get_dev_from_nl(net, attrs);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ if (*assoc_num >= MACSEC_NUM_AN)
+ return ERR_PTR(-EINVAL);
+
+ secy = &macsec_priv(dev)->secy;
+ tx_sc = &secy->tx_sc;
+
+ tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
+ if (!tx_sa)
+ return ERR_PTR(-ENODEV);
+
+ *devp = dev;
+ *scp = tx_sc;
+ *secyp = secy;
+ return tx_sa;
+}
+
+static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
+ struct nlattr **attrs,
+ struct nlattr **tb_rxsc,
+ struct net_device **devp,
+ struct macsec_secy **secyp)
+{
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ sci_t sci;
+
+ dev = get_dev_from_nl(net, attrs);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+
+ secy = &macsec_priv(dev)->secy;
+
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
+ return ERR_PTR(-EINVAL);
+
+ sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
+ rx_sc = find_rx_sc_rtnl(secy, sci);
+ if (!rx_sc)
+ return ERR_PTR(-ENODEV);
+
+ *secyp = secy;
+ *devp = dev;
+
+ return rx_sc;
+}
+
+static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
+ struct nlattr **attrs,
+ struct nlattr **tb_rxsc,
+ struct nlattr **tb_sa,
+ struct net_device **devp,
+ struct macsec_secy **secyp,
+ struct macsec_rx_sc **scp,
+ u8 *assoc_num)
+{
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+
+ if (!tb_sa[MACSEC_SA_ATTR_AN])
+ return ERR_PTR(-EINVAL);
+
+ *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
+ if (*assoc_num >= MACSEC_NUM_AN)
+ return ERR_PTR(-EINVAL);
+
+ rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
+ if (IS_ERR(rx_sc))
+ return ERR_CAST(rx_sc);
+
+ rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
+ if (!rx_sa)
+ return ERR_PTR(-ENODEV);
+
+ *scp = rx_sc;
+ return rx_sa;
+}
+
+
+static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
+ [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
+ [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
+ [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
+ [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
+ [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
+};
+
+static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
+ [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
+ [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
+ [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
+ [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 },
+ [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
+ .len = MACSEC_MAX_KEY_LEN, },
+};
+
+static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
+{
+ if (!attrs[MACSEC_ATTR_SA_CONFIG])
+ return -EINVAL;
+
+ if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG],
+ macsec_genl_sa_policy))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
+{
+ if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
+ return -EINVAL;
+
+ if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG],
+ macsec_genl_rxsc_policy))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool validate_add_rxsa(struct nlattr **attrs)
+{
+ if (!attrs[MACSEC_SA_ATTR_AN] ||
+ !attrs[MACSEC_SA_ATTR_KEY] ||
+ !attrs[MACSEC_SA_ATTR_KEYID])
+ return false;
+
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
+ return false;
+
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ return false;
+
+ if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
+ return false;
+ }
+
+ return true;
+}
+
+static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct nlattr **attrs = info->attrs;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ unsigned char assoc_num;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ if (!validate_add_rxsa(tb_sa))
+ return -EINVAL;
+
+ rtnl_lock();
+ rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
+ if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) {
+ rtnl_unlock();
+ return PTR_ERR(rx_sc);
+ }
+
+ assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
+ pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
+ if (rx_sa) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+
+ rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
+ if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
+ secy->icv_len)) {
+ rtnl_unlock();
+ return -ENOMEM;
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ spin_unlock_bh(&rx_sa->lock);
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+
+ rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+ rx_sa->sc = rx_sc;
+ rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static bool validate_add_rxsc(struct nlattr **attrs)
+{
+ if (!attrs[MACSEC_RXSC_ATTR_SCI])
+ return false;
+
+ if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
+ if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
+ return false;
+ }
+
+ return true;
+}
+
+static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ sci_t sci = MACSEC_UNDEF_SCI;
+ struct nlattr **attrs = info->attrs;
+ struct macsec_rx_sc *rx_sc;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ if (!validate_add_rxsc(tb_rxsc))
+ return -EINVAL;
+
+ rtnl_lock();
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ return PTR_ERR(dev);
+ }
+
+ sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
+
+ rx_sc = create_rx_sc(dev, sci);
+ if (IS_ERR(rx_sc)) {
+ rtnl_unlock();
+ return PTR_ERR(rx_sc);
+ }
+
+ if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
+ rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static bool validate_add_txsa(struct nlattr **attrs)
+{
+ if (!attrs[MACSEC_SA_ATTR_AN] ||
+ !attrs[MACSEC_SA_ATTR_PN] ||
+ !attrs[MACSEC_SA_ATTR_KEY] ||
+ !attrs[MACSEC_SA_ATTR_KEYID])
+ return false;
+
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
+ return false;
+
+ if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ return false;
+
+ if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
+ return false;
+ }
+
+ return true;
+}
+
+static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct nlattr **attrs = info->attrs;
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+ unsigned char assoc_num;
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ if (!validate_add_txsa(tb_sa))
+ return -EINVAL;
+
+ rtnl_lock();
+ dev = get_dev_from_nl(genl_info_net(info), attrs);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ return PTR_ERR(dev);
+ }
+
+ secy = &macsec_priv(dev)->secy;
+ tx_sc = &secy->tx_sc;
+
+ assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
+
+ if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
+ pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
+ nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
+ if (tx_sa) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+
+ tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
+ if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ secy->key_len, secy->icv_len)) {
+ rtnl_unlock();
+ return -ENOMEM;
+ }
+
+ tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ spin_unlock_bh(&tx_sa->lock);
+
+ if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+
+ if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
+ secy->operational = true;
+
+ rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ u8 assoc_num;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ rtnl_lock();
+ rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
+ &dev, &secy, &rx_sc, &assoc_num);
+ if (IS_ERR(rx_sa)) {
+ rtnl_unlock();
+ return PTR_ERR(rx_sa);
+ }
+
+ if (rx_sa->active) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+
+ RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
+ clear_rx_sa(rx_sa);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ sci_t sci;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
+ return -EINVAL;
+
+ rtnl_lock();
+ dev = get_dev_from_nl(genl_info_net(info), info->attrs);
+ if (IS_ERR(dev)) {
+ rtnl_unlock();
+ return PTR_ERR(dev);
+ }
+
+ secy = &macsec_priv(dev)->secy;
+ sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
+
+ rx_sc = del_rx_sc(secy, sci);
+ if (!rx_sc) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+
+ free_rx_sc(rx_sc);
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+ u8 assoc_num;
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ rtnl_lock();
+ tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
+ &dev, &secy, &tx_sc, &assoc_num);
+ if (IS_ERR(tx_sa)) {
+ rtnl_unlock();
+ return PTR_ERR(tx_sa);
+ }
+
+ if (tx_sa->active) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+
+ RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
+ clear_tx_sa(tx_sa);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static bool validate_upd_sa(struct nlattr **attrs)
+{
+ if (!attrs[MACSEC_SA_ATTR_AN] ||
+ attrs[MACSEC_SA_ATTR_KEY] ||
+ attrs[MACSEC_SA_ATTR_KEYID])
+ return false;
+
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
+ return false;
+
+ if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ return false;
+
+ if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
+ if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
+ return false;
+ }
+
+ return true;
+}
+
+static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_tx_sa *tx_sa;
+ u8 assoc_num;
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ if (!validate_upd_sa(tb_sa))
+ return -EINVAL;
+
+ rtnl_lock();
+ tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
+ &dev, &secy, &tx_sc, &assoc_num);
+ if (IS_ERR(tx_sa)) {
+ rtnl_unlock();
+ return PTR_ERR(tx_sa);
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&tx_sa->lock);
+ tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ spin_unlock_bh(&tx_sa->lock);
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+
+ if (assoc_num == tx_sc->encoding_sa)
+ secy->operational = tx_sa->active;
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_rx_sa *rx_sa;
+ u8 assoc_num;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ if (parse_sa_config(attrs, tb_sa))
+ return -EINVAL;
+
+ if (!validate_upd_sa(tb_sa))
+ return -EINVAL;
+
+ rtnl_lock();
+ rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
+ &dev, &secy, &rx_sc, &assoc_num);
+ if (IS_ERR(rx_sa)) {
+ rtnl_unlock();
+ return PTR_ERR(rx_sa);
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ spin_lock_bh(&rx_sa->lock);
+ rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ spin_unlock_bh(&rx_sa->lock);
+ }
+
+ if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+
+ rtnl_unlock();
+ return 0;
+}
+
+static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+ struct net_device *dev;
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+
+ if (!attrs[MACSEC_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (parse_rxsc_config(attrs, tb_rxsc))
+ return -EINVAL;
+
+ if (!validate_add_rxsc(tb_rxsc))
+ return -EINVAL;
+
+ rtnl_lock();
+ rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
+ if (IS_ERR(rx_sc)) {
+ rtnl_unlock();
+ return PTR_ERR(rx_sc);
+ }
+
+ if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
+ bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+
+ if (rx_sc->active != new)
+ secy->n_rx_sc += new ? 1 : -1;
+
+ rx_sc->active = new;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int copy_tx_sa_stats(struct sk_buff *skb,
+ struct macsec_tx_sa_stats __percpu *pstats)
+{
+ struct macsec_tx_sa_stats sum = {0, };
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+
+ sum.OutPktsProtected += stats->OutPktsProtected;
+ sum.OutPktsEncrypted += stats->OutPktsEncrypted;
+ }
+
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats __percpu *pstats)
+{
+ struct macsec_rx_sa_stats sum = {0, };
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
+
+ sum.InPktsOK += stats->InPktsOK;
+ sum.InPktsInvalid += stats->InPktsInvalid;
+ sum.InPktsNotValid += stats->InPktsNotValid;
+ sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
+ sum.InPktsUnusedSA += stats->InPktsUnusedSA;
+ }
+
+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int copy_rx_sc_stats(struct sk_buff *skb,
+ struct pcpu_rx_sc_stats __percpu *pstats)
+{
+ struct macsec_rx_sc_stats sum = {0, };
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct pcpu_rx_sc_stats *stats;
+ struct macsec_rx_sc_stats tmp;
+ unsigned int start;
+
+ stats = per_cpu_ptr(pstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ memcpy(&tmp, &stats->stats, sizeof(tmp));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ sum.InOctetsValidated += tmp.InOctetsValidated;
+ sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
+ sum.InPktsUnchecked += tmp.InPktsUnchecked;
+ sum.InPktsDelayed += tmp.InPktsDelayed;
+ sum.InPktsOK += tmp.InPktsOK;
+ sum.InPktsInvalid += tmp.InPktsInvalid;
+ sum.InPktsLate += tmp.InPktsLate;
+ sum.InPktsNotValid += tmp.InPktsNotValid;
+ sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
+ sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
+ }
+
+ if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
+ nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int copy_tx_sc_stats(struct sk_buff *skb,
+ struct pcpu_tx_sc_stats __percpu *pstats)
+{
+ struct macsec_tx_sc_stats sum = {0, };
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct pcpu_tx_sc_stats *stats;
+ struct macsec_tx_sc_stats tmp;
+ unsigned int start;
+
+ stats = per_cpu_ptr(pstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ memcpy(&tmp, &stats->stats, sizeof(tmp));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ sum.OutPktsProtected += tmp.OutPktsProtected;
+ sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
+ sum.OutOctetsProtected += tmp.OutOctetsProtected;
+ sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
+ }
+
+ if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
+ nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) ||
+ nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) ||
+ nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int copy_secy_stats(struct sk_buff *skb,
+ struct pcpu_secy_stats __percpu *pstats)
+{
+ struct macsec_dev_stats sum = {0, };
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct pcpu_secy_stats *stats;
+ struct macsec_dev_stats tmp;
+ unsigned int start;
+
+ stats = per_cpu_ptr(pstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ memcpy(&tmp, &stats->stats, sizeof(tmp));
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ sum.OutPktsUntagged += tmp.OutPktsUntagged;
+ sum.InPktsUntagged += tmp.InPktsUntagged;
+ sum.OutPktsTooLong += tmp.OutPktsTooLong;
+ sum.InPktsNoTag += tmp.InPktsNoTag;
+ sum.InPktsBadTag += tmp.InPktsBadTag;
+ sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
+ sum.InPktsNoSCI += tmp.InPktsNoSCI;
+ sum.InPktsOverrun += tmp.InPktsOverrun;
+ }
+
+ if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) ||
+ nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
+{
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+
+ if (!secy_nest)
+ return 1;
+
+ if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
+ nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
+ nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
+ goto cancel;
+
+ if (secy->replay_protect) {
+ if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
+ goto cancel;
+ }
+
+ nla_nest_end(skb, secy_nest);
+ return 0;
+
+cancel:
+ nla_nest_cancel(skb, secy_nest);
+ return 1;
+}
+
+static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct macsec_rx_sc *rx_sc;
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct nlattr *txsa_list, *rxsc_list;
+ int i, j;
+ void *hdr;
+ struct nlattr *attr;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ rtnl_lock();
+
+ if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
+ goto nla_put_failure;
+
+ if (nla_put_secy(secy, skb))
+ goto nla_put_failure;
+
+ attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
+ if (!attr)
+ goto nla_put_failure;
+ if (copy_tx_sc_stats(skb, tx_sc->stats)) {
+ nla_nest_cancel(skb, attr);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, attr);
+
+ attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
+ if (!attr)
+ goto nla_put_failure;
+ if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
+ nla_nest_cancel(skb, attr);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, attr);
+
+ txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
+ if (!txsa_list)
+ goto nla_put_failure;
+ for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
+ struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
+ struct nlattr *txsa_nest;
+
+ if (!tx_sa)
+ continue;
+
+ txsa_nest = nla_nest_start(skb, j++);
+ if (!txsa_nest) {
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+ nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
+ nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) ||
+ nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
+ nla_nest_cancel(skb, txsa_nest);
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+
+ attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ if (!attr) {
+ nla_nest_cancel(skb, txsa_nest);
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+ if (copy_tx_sa_stats(skb, tx_sa->stats)) {
+ nla_nest_cancel(skb, attr);
+ nla_nest_cancel(skb, txsa_nest);
+ nla_nest_cancel(skb, txsa_list);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, attr);
+
+ nla_nest_end(skb, txsa_nest);
+ }
+ nla_nest_end(skb, txsa_list);
+
+ rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
+ if (!rxsc_list)
+ goto nla_put_failure;
+
+ j = 1;
+ for_each_rxsc_rtnl(secy, rx_sc) {
+ int k;
+ struct nlattr *rxsa_list;
+ struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
+
+ if (!rxsc_nest) {
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
+ nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) {
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+
+ attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
+ if (!attr) {
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+ if (copy_rx_sc_stats(skb, rx_sc->stats)) {
+ nla_nest_cancel(skb, attr);
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, attr);
+
+ rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
+ if (!rxsa_list) {
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+
+ for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
+ struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
+ struct nlattr *rxsa_nest;
+
+ if (!rx_sa)
+ continue;
+
+ rxsa_nest = nla_nest_start(skb, k++);
+ if (!rxsa_nest) {
+ nla_nest_cancel(skb, rxsa_list);
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+
+ attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ if (!attr) {
+ nla_nest_cancel(skb, rxsa_list);
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+ if (copy_rx_sa_stats(skb, rx_sa->stats)) {
+ nla_nest_cancel(skb, attr);
+ nla_nest_cancel(skb, rxsa_list);
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, attr);
+
+ if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+ nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
+ nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) ||
+ nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
+ nla_nest_cancel(skb, rxsa_nest);
+ nla_nest_cancel(skb, rxsc_nest);
+ nla_nest_cancel(skb, rxsc_list);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, rxsa_nest);
+ }
+
+ nla_nest_end(skb, rxsa_list);
+ nla_nest_end(skb, rxsc_nest);
+ }
+
+ nla_nest_end(skb, rxsc_list);
+
+ rtnl_unlock();
+
+ genlmsg_end(skb, hdr);
+
+ return 0;
+
+nla_put_failure:
+ rtnl_unlock();
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int dev_idx, d;
+
+ dev_idx = cb->args[0];
+
+ d = 0;
+ for_each_netdev(net, dev) {
+ struct macsec_secy *secy;
+
+ if (d < dev_idx)
+ goto next;
+
+ if (!netif_is_macsec(dev))
+ goto next;
+
+ secy = &macsec_priv(dev)->secy;
+ if (dump_secy(secy, dev, skb, cb) < 0)
+ goto done;
+next:
+ d++;
+ }
+
+done:
+ cb->args[0] = d;
+ return skb->len;
+}
+
+static const struct genl_ops macsec_genl_ops[] = {
+ {
+ .cmd = MACSEC_CMD_GET_TXSC,
+ .dumpit = macsec_dump_txsc,
+ .policy = macsec_genl_policy,
+ },
+ {
+ .cmd = MACSEC_CMD_ADD_RXSC,
+ .doit = macsec_add_rxsc,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_DEL_RXSC,
+ .doit = macsec_del_rxsc,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_UPD_RXSC,
+ .doit = macsec_upd_rxsc,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_ADD_TXSA,
+ .doit = macsec_add_txsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_DEL_TXSA,
+ .doit = macsec_del_txsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_UPD_TXSA,
+ .doit = macsec_upd_txsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_ADD_RXSA,
+ .doit = macsec_add_rxsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_DEL_RXSA,
+ .doit = macsec_del_rxsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = MACSEC_CMD_UPD_RXSA,
+ .doit = macsec_upd_rxsa,
+ .policy = macsec_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct macsec_dev *macsec = netdev_priv(dev);
+ struct macsec_secy *secy = &macsec->secy;
+ struct pcpu_secy_stats *secy_stats;
+ int ret, len;
+
+ /* 10.5 */
+ if (!secy->protect_frames) {
+ secy_stats = this_cpu_ptr(macsec->stats);
+ u64_stats_update_begin(&secy_stats->syncp);
+ secy_stats->stats.OutPktsUntagged++;
+ u64_stats_update_end(&secy_stats->syncp);
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+ count_tx(dev, ret, len);
+ return ret;
+ }
+
+ if (!secy->operational) {
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ skb = macsec_encrypt(skb, dev);
+ if (IS_ERR(skb)) {
+ if (PTR_ERR(skb) != -EINPROGRESS)
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
+
+ macsec_encrypt_finish(skb, dev);
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+ count_tx(dev, ret, len);
+ return ret;
+}
+
+#define MACSEC_FEATURES \
+ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+static int macsec_dev_init(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ dev->features = real_dev->features & MACSEC_FEATURES;
+ dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+
+ dev->needed_headroom = real_dev->needed_headroom +
+ MACSEC_NEEDED_HEADROOM;
+ dev->needed_tailroom = real_dev->needed_tailroom +
+ MACSEC_NEEDED_TAILROOM;
+
+ if (is_zero_ether_addr(dev->dev_addr))
+ eth_hw_addr_inherit(dev, real_dev);
+ if (is_zero_ether_addr(dev->broadcast))
+ memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+
+ return 0;
+}
+
+static void macsec_dev_uninit(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
+static netdev_features_t macsec_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ features &= real_dev->features & MACSEC_FEATURES;
+ features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+
+ return features;
+}
+
+static int macsec_dev_open(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+ int err;
+
+ if (!(real_dev->flags & IFF_UP))
+ return -ENETDOWN;
+
+ err = dev_uc_add(real_dev, dev->dev_addr);
+ if (err < 0)
+ return err;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(real_dev, 1);
+ if (err < 0)
+ goto del_unicast;
+ }
+
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(real_dev, 1);
+ if (err < 0)
+ goto clear_allmulti;
+ }
+
+ if (netif_carrier_ok(real_dev))
+ netif_carrier_on(dev);
+
+ return 0;
+clear_allmulti:
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+del_unicast:
+ dev_uc_del(real_dev, dev->dev_addr);
+ netif_carrier_off(dev);
+ return err;
+}
+
+static int macsec_dev_stop(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ netif_carrier_off(dev);
+
+ dev_mc_unsync(real_dev, dev);
+ dev_uc_unsync(real_dev, dev);
+
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, -1);
+
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, -1);
+
+ dev_uc_del(real_dev, dev->dev_addr);
+
+ return 0;
+}
+
+static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
+{
+ struct net_device *real_dev = macsec_priv(dev)->real_dev;
+
+ if (!(dev->flags & IFF_UP))
+ return;
+
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+}
+
+static void macsec_dev_set_rx_mode(struct net_device *dev)
+{
+ struct net_device *real_dev = macsec_priv(dev)->real_dev;
+
+ dev_mc_sync(real_dev, dev);
+ dev_uc_sync(real_dev, dev);
+}
+
+static int macsec_set_mac_address(struct net_device *dev, void *p)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ err = dev_uc_add(real_dev, addr->sa_data);
+ if (err < 0)
+ return err;
+
+ dev_uc_del(real_dev, dev->dev_addr);
+
+out:
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+ return 0;
+}
+
+static int macsec_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
+
+ if (macsec->real_dev->mtu - extra < new_mtu)
+ return -ERANGE;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ int cpu;
+
+ if (!dev->tstats)
+ return s;
+
+ for_each_possible_cpu(cpu) {
+ struct pcpu_sw_netstats *stats;
+ struct pcpu_sw_netstats tmp;
+ int start;
+
+ stats = per_cpu_ptr(dev->tstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ tmp.rx_packets = stats->rx_packets;
+ tmp.rx_bytes = stats->rx_bytes;
+ tmp.tx_packets = stats->tx_packets;
+ tmp.tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ s->rx_packets += tmp.rx_packets;
+ s->rx_bytes += tmp.rx_bytes;
+ s->tx_packets += tmp.tx_packets;
+ s->tx_bytes += tmp.tx_bytes;
+ }
+
+ s->rx_dropped = dev->stats.rx_dropped;
+ s->tx_dropped = dev->stats.tx_dropped;
+
+ return s;
+}
+
+static int macsec_get_iflink(const struct net_device *dev)
+{
+ return macsec_priv(dev)->real_dev->ifindex;
+}
+
+static const struct net_device_ops macsec_netdev_ops = {
+ .ndo_init = macsec_dev_init,
+ .ndo_uninit = macsec_dev_uninit,
+ .ndo_open = macsec_dev_open,
+ .ndo_stop = macsec_dev_stop,
+ .ndo_fix_features = macsec_fix_features,
+ .ndo_change_mtu = macsec_change_mtu,
+ .ndo_set_rx_mode = macsec_dev_set_rx_mode,
+ .ndo_change_rx_flags = macsec_dev_change_rx_flags,
+ .ndo_set_mac_address = macsec_set_mac_address,
+ .ndo_start_xmit = macsec_start_xmit,
+ .ndo_get_stats64 = macsec_get_stats64,
+ .ndo_get_iflink = macsec_get_iflink,
+};
+
+static const struct device_type macsec_type = {
+ .name = "macsec",
+};
+
+static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
+ [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+ [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
+ [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
+ [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
+ [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
+ [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
+ [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
+ [IFLA_MACSEC_ES] = { .type = NLA_U8 },
+ [IFLA_MACSEC_SCB] = { .type = NLA_U8 },
+ [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
+ [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
+};
+
+static void macsec_free_netdev(struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+
+ free_percpu(macsec->stats);
+ free_percpu(macsec->secy.tx_sc.stats);
+
+ dev_put(real_dev);
+ free_netdev(dev);
+}
+
+static void macsec_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->tx_queue_len = 0;
+ dev->netdev_ops = &macsec_netdev_ops;
+ dev->destructor = macsec_free_netdev;
+
+ eth_zero_addr(dev->broadcast);
+}
+
+static void macsec_changelink_common(struct net_device *dev,
+ struct nlattr *data[])
+{
+ struct macsec_secy *secy;
+ struct macsec_tx_sc *tx_sc;
+
+ secy = &macsec_priv(dev)->secy;
+ tx_sc = &secy->tx_sc;
+
+ if (data[IFLA_MACSEC_ENCODING_SA]) {
+ struct macsec_tx_sa *tx_sa;
+
+ tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
+ tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
+
+ secy->operational = tx_sa && tx_sa->active;
+ }
+
+ if (data[IFLA_MACSEC_WINDOW])
+ secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+ if (data[IFLA_MACSEC_ENCRYPT])
+ tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
+
+ if (data[IFLA_MACSEC_PROTECT])
+ secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
+
+ if (data[IFLA_MACSEC_INC_SCI])
+ tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
+
+ if (data[IFLA_MACSEC_ES])
+ tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
+
+ if (data[IFLA_MACSEC_SCB])
+ tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
+
+ if (data[IFLA_MACSEC_REPLAY_PROTECT])
+ secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
+
+ if (data[IFLA_MACSEC_VALIDATION])
+ secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+}
+
+static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ if (!data)
+ return 0;
+
+ if (data[IFLA_MACSEC_CIPHER_SUITE] ||
+ data[IFLA_MACSEC_ICV_LEN] ||
+ data[IFLA_MACSEC_SCI] ||
+ data[IFLA_MACSEC_PORT])
+ return -EINVAL;
+
+ macsec_changelink_common(dev, data);
+
+ return 0;
+}
+
+static void macsec_del_dev(struct macsec_dev *macsec)
+{
+ int i;
+
+ while (macsec->secy.rx_sc) {
+ struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
+
+ rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
+ free_rx_sc(rx_sc);
+ }
+
+ for (i = 0; i < MACSEC_NUM_AN; i++) {
+ struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
+
+ if (sa) {
+ RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
+ clear_tx_sa(sa);
+ }
+ }
+}
+
+static void macsec_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev = macsec->real_dev;
+ struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+
+ unregister_netdevice_queue(dev, head);
+ list_del_rcu(&macsec->secys);
+ if (list_empty(&rxd->secys))
+ netdev_rx_handler_unregister(real_dev);
+
+ macsec_del_dev(macsec);
+}
+
+static int register_macsec_dev(struct net_device *real_dev,
+ struct net_device *dev)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+
+ if (!rxd) {
+ int err;
+
+ rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
+ if (!rxd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&rxd->secys);
+
+ err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
+ rxd);
+ if (err < 0)
+ return err;
+ }
+
+ list_add_tail_rcu(&macsec->secys, &rxd->secys);
+ return 0;
+}
+
+static bool sci_exists(struct net_device *dev, sci_t sci)
+{
+ struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
+ struct macsec_dev *macsec;
+
+ list_for_each_entry(macsec, &rxd->secys, secys) {
+ if (macsec->secy.sci == sci)
+ return true;
+ }
+
+ return false;
+}
+
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+ return make_sci(dev->dev_addr, port);
+}
+
+static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct macsec_secy *secy = &macsec->secy;
+
+ macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
+ if (!macsec->stats)
+ return -ENOMEM;
+
+ secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
+ if (!secy->tx_sc.stats) {
+ free_percpu(macsec->stats);
+ return -ENOMEM;
+ }
+
+ if (sci == MACSEC_UNDEF_SCI)
+ sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+ secy->netdev = dev;
+ secy->operational = true;
+ secy->key_len = DEFAULT_SAK_LEN;
+ secy->icv_len = icv_len;
+ secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
+ secy->protect_frames = true;
+ secy->replay_protect = false;
+
+ secy->sci = sci;
+ secy->tx_sc.active = true;
+ secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
+ secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
+ secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
+ secy->tx_sc.end_station = false;
+ secy->tx_sc.scb = false;
+
+ return 0;
+}
+
+static int macsec_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct macsec_dev *macsec = macsec_priv(dev);
+ struct net_device *real_dev;
+ int err;
+ sci_t sci;
+ u8 icv_len = DEFAULT_ICV_LEN;
+ rx_handler_func_t *rx_handler;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+ real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+
+ dev->priv_flags |= IFF_MACSEC;
+
+ macsec->real_dev = real_dev;
+
+ if (data && data[IFLA_MACSEC_ICV_LEN])
+ icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+ dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+
+ rx_handler = rtnl_dereference(real_dev->rx_handler);
+ if (rx_handler && rx_handler != macsec_handle_frame)
+ return -EBUSY;
+
+ err = register_netdevice(dev);
+ if (err < 0)
+ return err;
+
+ /* need to be already registered so that ->init has run and
+ * the MAC addr is set
+ */
+ if (data && data[IFLA_MACSEC_SCI])
+ sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
+ else if (data && data[IFLA_MACSEC_PORT])
+ sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
+ else
+ sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+ if (rx_handler && sci_exists(real_dev, sci)) {
+ err = -EBUSY;
+ goto unregister;
+ }
+
+ err = macsec_add_dev(dev, sci, icv_len);
+ if (err)
+ goto unregister;
+
+ if (data)
+ macsec_changelink_common(dev, data);
+
+ err = register_macsec_dev(real_dev, dev);
+ if (err < 0)
+ goto del_dev;
+
+ dev_hold(real_dev);
+
+ return 0;
+
+del_dev:
+ macsec_del_dev(macsec);
+unregister:
+ unregister_netdevice(dev);
+ return err;
+}
+
+static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
+{
+ u64 csid = DEFAULT_CIPHER_ID;
+ u8 icv_len = DEFAULT_ICV_LEN;
+ int flag;
+ bool es, scb, sci;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_MACSEC_CIPHER_SUITE])
+ csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
+
+ if (data[IFLA_MACSEC_ICV_LEN])
+ icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+
+ switch (csid) {
+ case DEFAULT_CIPHER_ID:
+ case DEFAULT_CIPHER_ALT:
+ if (icv_len < MACSEC_MIN_ICV_LEN ||
+ icv_len > MACSEC_MAX_ICV_LEN)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (data[IFLA_MACSEC_ENCODING_SA]) {
+ if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
+ return -EINVAL;
+ }
+
+ for (flag = IFLA_MACSEC_ENCODING_SA + 1;
+ flag < IFLA_MACSEC_VALIDATION;
+ flag++) {
+ if (data[flag]) {
+ if (nla_get_u8(data[flag]) > 1)
+ return -EINVAL;
+ }
+ }
+
+ es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
+ sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
+ scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
+
+ if ((sci && (scb || es)) || (scb && es))
+ return -EINVAL;
+
+ if (data[IFLA_MACSEC_VALIDATION] &&
+ nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
+ return -EINVAL;
+
+ if ((data[IFLA_MACSEC_PROTECT] &&
+ nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+ !data[IFLA_MACSEC_WINDOW])
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct net *macsec_get_link_net(const struct net_device *dev)
+{
+ return dev_net(macsec_priv(dev)->real_dev);
+}
+
+static size_t macsec_get_size(const struct net_device *dev)
+{
+ return 0 +
+ nla_total_size(8) + /* SCI */
+ nla_total_size(1) + /* ICV_LEN */
+ nla_total_size(8) + /* CIPHER_SUITE */
+ nla_total_size(4) + /* WINDOW */
+ nla_total_size(1) + /* ENCODING_SA */
+ nla_total_size(1) + /* ENCRYPT */
+ nla_total_size(1) + /* PROTECT */
+ nla_total_size(1) + /* INC_SCI */
+ nla_total_size(1) + /* ES */
+ nla_total_size(1) + /* SCB */
+ nla_total_size(1) + /* REPLAY_PROTECT */
+ nla_total_size(1) + /* VALIDATION */
+ 0;
+}
+
+static int macsec_fill_info(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct macsec_secy *secy = &macsec_priv(dev)->secy;
+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
+ nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
+ nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
+ nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
+ nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
+ nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
+ nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
+ nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
+ nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
+ nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+ 0)
+ goto nla_put_failure;
+
+ if (secy->replay_protect) {
+ if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops macsec_link_ops __read_mostly = {
+ .kind = "macsec",
+ .priv_size = sizeof(struct macsec_dev),
+ .maxtype = IFLA_MACSEC_MAX,
+ .policy = macsec_rtnl_policy,
+ .setup = macsec_setup,
+ .validate = macsec_validate_attr,
+ .newlink = macsec_newlink,
+ .changelink = macsec_changelink,
+ .dellink = macsec_dellink,
+ .get_size = macsec_get_size,
+ .fill_info = macsec_fill_info,
+ .get_link_net = macsec_get_link_net,
+};
+
+static bool is_macsec_master(struct net_device *dev)
+{
+ return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
+}
+
+static int macsec_notify(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
+ LIST_HEAD(head);
+
+ if (!is_macsec_master(real_dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER: {
+ struct macsec_dev *m, *n;
+ struct macsec_rxh_data *rxd;
+
+ rxd = macsec_data_rtnl(real_dev);
+ list_for_each_entry_safe(m, n, &rxd->secys, secys) {
+ macsec_dellink(m->secy.netdev, &head);
+ }
+ unregister_netdevice_many(&head);
+ break;
+ }
+ case NETDEV_CHANGEMTU: {
+ struct macsec_dev *m;
+ struct macsec_rxh_data *rxd;
+
+ rxd = macsec_data_rtnl(real_dev);
+ list_for_each_entry(m, &rxd->secys, secys) {
+ struct net_device *dev = m->secy.netdev;
+ unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
+ macsec_extra_len(true));
+
+ if (dev->mtu > mtu)
+ dev_set_mtu(dev, mtu);
+ }
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block macsec_notifier = {
+ .notifier_call = macsec_notify,
+};
+
+static int __init macsec_init(void)
+{
+ int err;
+
+ pr_info("MACsec IEEE 802.1AE\n");
+ err = register_netdevice_notifier(&macsec_notifier);
+ if (err)
+ return err;
+
+ err = rtnl_link_register(&macsec_link_ops);
+ if (err)
+ goto notifier;
+
+ err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops);
+ if (err)
+ goto rtnl;
+
+ return 0;
+
+rtnl:
+ rtnl_link_unregister(&macsec_link_ops);
+notifier:
+ unregister_netdevice_notifier(&macsec_notifier);
+ return err;
+}
+
+static void __exit macsec_exit(void)
+{
+ genl_unregister_family(&macsec_fam);
+ rtnl_link_unregister(&macsec_link_ops);
+ unregister_netdevice_notifier(&macsec_notifier);
+}
+
+module_init(macsec_init);
+module_exit(macsec_exit);
+
+MODULE_ALIAS_RTNL_LINK("macsec");
+
+MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 94e688805dd2..2bcf1f321bea 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -803,6 +803,7 @@ static int macvlan_init(struct net_device *dev)
dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
+ dev->gso_max_segs = lowerdev->gso_max_segs;
dev->hard_header_len = lowerdev->hard_header_len;
macvlan_set_lockdep_class(dev);
@@ -940,12 +941,12 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
}
-static int macvlan_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
const struct macvlan_dev *vlan = netdev_priv(dev);
- return __ethtool_get_settings(vlan->lowerdev, cmd);
+ return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
}
static netdev_features_t macvlan_fix_features(struct net_device *dev,
@@ -1020,7 +1021,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev)
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = macvlan_ethtool_get_settings,
+ .get_link_ksettings = macvlan_ethtool_get_link_ksettings,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
};
@@ -1069,7 +1070,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_setup);
static void macvlan_setup(struct net_device *dev)
{
macvlan_common_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
}
static int macvlan_port_create(struct net_device *dev)
@@ -1532,6 +1533,7 @@ static int macvlan_device_event(struct notifier_block *unused,
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
vlan->dev->gso_max_size = dev->gso_max_size;
+ vlan->dev->gso_max_segs = dev->gso_max_segs;
netdev_update_features(vlan->dev);
}
break;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index d636d051fac8..95394edd1ed5 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
+ else if (copylen < ETH_HLEN)
+ copylen = ETH_HLEN;
linear = copylen;
i = *from;
iov_iter_advance(&i, copylen);
@@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
if (!zerocopy) {
copylen = len;
- if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
+ linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ if (linear > good_linear)
linear = good_linear;
- else
- linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+ else if (linear < ETH_HLEN)
+ linear = ETH_HLEN;
}
skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f0a77020037a..6dad9a9c356c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -183,15 +183,29 @@ config MDIO_GPIO
To compile this driver as a module, choose M here: the module
will be called mdio-gpio.
+config MDIO_CAVIUM
+ tristate
+
config MDIO_OCTEON
- tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+ tristate "Support for MDIO buses on Octeon and some ThunderX SOCs"
depends on 64BIT
depends on HAS_IOMEM
+ select MDIO_CAVIUM
help
-
This module provides a driver for the Octeon and ThunderX MDIO
- busses. It is required by the Octeon and ThunderX ethernet device
- drivers.
+ buses. It is required by the Octeon and ThunderX ethernet device
+ drivers on some systems.
+
+config MDIO_THUNDER
+ tristate "Support for MDIO buses on ThunderX SOCs"
+ depends on 64BIT
+ depends on PCI
+ select MDIO_CAVIUM
+ help
+ This driver supports the MDIO interfaces found on Cavium
+ ThunderX SoCs when the MDIO bus device appears as a PCI
+ device.
+
config MDIO_SUN4I
tristate "Allwinner sun4i MDIO interface support"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 680e88f9915a..fcdbb9299fab 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
+obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
obj-$(CONFIG_AMD_PHY) += amd.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 2174ec937b4d..b3ffaee30858 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -52,6 +52,9 @@
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
+
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -206,6 +209,7 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
+ int ccr;
mutex_lock(&phydev->lock);
@@ -221,6 +225,16 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ goto done;
+
+ /* also power-down SGMII interface */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+ phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+done:
mutex_unlock(&phydev->lock);
return 0;
@@ -229,6 +243,7 @@ static int at803x_suspend(struct phy_device *phydev)
static int at803x_resume(struct phy_device *phydev)
{
int value;
+ int ccr;
mutex_lock(&phydev->lock);
@@ -236,6 +251,17 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
+ goto done;
+
+ /* also power-up SGMII interface */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+ value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
+ phy_write(phydev, MII_BMCR, value);
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+done:
mutex_unlock(&phydev->lock);
return 0;
@@ -251,12 +277,16 @@ static int at803x_probe(struct phy_device *phydev)
if (!priv)
return -ENOMEM;
- gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (phydev->drv->phy_id != ATH8030_PHY_ID)
+ goto does_not_require_reset_workaround;
+
+ gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod_reset))
return PTR_ERR(gpiod_reset);
priv->gpiod_reset = gpiod_reset;
+does_not_require_reset_workaround:
phydev->priv = priv;
return 0;
@@ -336,10 +366,10 @@ static void at803x_link_change_notify(struct phy_device *phydev)
at803x_context_save(phydev, &context);
- gpiod_set_value(priv->gpiod_reset, 0);
- msleep(1);
gpiod_set_value(priv->gpiod_reset, 1);
msleep(1);
+ gpiod_set_value(priv->gpiod_reset, 0);
+ msleep(1);
at803x_context_restore(phydev, &context);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index db507e3bcab9..9636da0b6efc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -24,7 +24,7 @@
#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
#define MII_BCM7XXX_100TX_DISC 0x14
#define MII_BCM7XXX_AUX_MODE 0x1d
-#define MII_BCM7XX_64CLK_MDIO BIT(12)
+#define MII_BCM7XXX_64CLK_MDIO BIT(12)
#define MII_BCM7XXX_TEST 0x1f
#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
@@ -247,7 +247,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
int ret;
/* Enable 64 clock MDIO */
- phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+ phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XXX_64CLK_MDIO);
phy_read(phydev, MII_BCM7XXX_AUX_MODE);
/* set shadow mode 2 */
@@ -317,6 +317,21 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
.resume = bcm7xxx_28nm_resume, \
}
+#define BCM7XXX_40NM_EPHY(_oui, _name) \
+{ \
+ .phy_id = (_oui), \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ .features = PHY_BASIC_FEATURES | \
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause, \
+ .flags = PHY_IS_INTERNAL, \
+ .config_init = bcm7xxx_config_init, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ .suspend = bcm7xxx_suspend, \
+ .resume = bcm7xxx_config_init, \
+}
+
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
@@ -324,48 +339,19 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
-{
- .phy_id = PHY_ID_BCM7425,
- .phy_id_mask = 0xfffffff0,
- .name = "Broadcom BCM7425",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_config_init,
-}, {
- .phy_id = PHY_ID_BCM7429,
- .phy_id_mask = 0xfffffff0,
- .name = "Broadcom BCM7429",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_config_init,
-}, {
- .phy_id = PHY_ID_BCM7435,
- .phy_id_mask = 0xfffffff0,
- .name = "Broadcom BCM7435",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_config_init,
-} };
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
+ BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
+};
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7346, 0xfffffff0, },
+ { PHY_ID_BCM7362, 0xfffffff0, },
{ PHY_ID_BCM7425, 0xfffffff0, },
{ PHY_ID_BCM7429, 0xfffffff0, },
{ PHY_ID_BCM7439, 0xfffffff0, },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 5e14e629c597..03d54c4adc88 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -1,7 +1,7 @@
/*
* Driver for the Texas Instruments DP83848 PHY
*
- * Copyright (C) 2015 Texas Instruments Inc.
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -16,11 +16,13 @@
#include <linux/module.h>
#include <linux/phy.h>
-#define DP83848_PHY_ID 0x20005c90
+#define TI_DP83848C_PHY_ID 0x20005ca0
+#define NS_DP83848C_PHY_ID 0x20005c90
+#define TLK10X_PHY_ID 0x2000a210
/* Registers */
-#define DP83848_MICR 0x11
-#define DP83848_MISR 0x12
+#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
+#define DP83848_MISR 0x12 /* MII Interrupt Status Register */
/* MICR Register Fields */
#define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */
@@ -36,6 +38,12 @@
#define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */
#define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */
+#define DP83848_INT_EN_MASK \
+ (DP83848_MISR_ANC_INT_EN | \
+ DP83848_MISR_DUP_INT_EN | \
+ DP83848_MISR_SPD_INT_EN | \
+ DP83848_MISR_LINK_INT_EN)
+
static int dp83848_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, DP83848_MISR);
@@ -45,50 +53,58 @@ static int dp83848_ack_interrupt(struct phy_device *phydev)
static int dp83848_config_intr(struct phy_device *phydev)
{
- int err;
+ int control, ret;
+
+ control = phy_read(phydev, DP83848_MICR);
+ if (control < 0)
+ return control;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_write(phydev, DP83848_MICR,
- DP83848_MICR_INT_OE |
- DP83848_MICR_INTEN);
- if (err < 0)
- return err;
-
- return phy_write(phydev, DP83848_MISR,
- DP83848_MISR_ANC_INT_EN |
- DP83848_MISR_DUP_INT_EN |
- DP83848_MISR_SPD_INT_EN |
- DP83848_MISR_LINK_INT_EN);
+ control |= DP83848_MICR_INT_OE;
+ control |= DP83848_MICR_INTEN;
+
+ ret = phy_write(phydev, DP83848_MISR, DP83848_INT_EN_MASK);
+ if (ret < 0)
+ return ret;
+ } else {
+ control &= ~DP83848_MICR_INTEN;
}
- return phy_write(phydev, DP83848_MICR, 0x0);
+ return phy_write(phydev, DP83848_MICR, control);
}
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
- { DP83848_PHY_ID, 0xfffffff0 },
+ { TI_DP83848C_PHY_ID, 0xfffffff0 },
+ { NS_DP83848C_PHY_ID, 0xfffffff0 },
+ { TLK10X_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+#define DP83848_PHY_DRIVER(_id, _name) \
+ { \
+ .phy_id = _id, \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ .features = PHY_BASIC_FEATURES, \
+ .flags = PHY_HAS_INTERRUPT, \
+ \
+ .soft_reset = genphy_soft_reset, \
+ .config_init = genphy_config_init, \
+ .suspend = genphy_suspend, \
+ .resume = genphy_resume, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ \
+ /* IRQ related */ \
+ .ack_interrupt = dp83848_ack_interrupt, \
+ .config_intr = dp83848_config_intr, \
+ }
+
static struct phy_driver dp83848_driver[] = {
- {
- .phy_id = DP83848_PHY_ID,
- .phy_id_mask = 0xfffffff0,
- .name = "TI DP83848",
- .features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
-
- .soft_reset = genphy_soft_reset,
- .config_init = genphy_config_init,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
-
- /* IRQ related */
- .ack_interrupt = dp83848_ack_interrupt,
- .config_intr = dp83848_config_intr,
- },
+ DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index ab9c473d75ea..fc07a8866020 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -285,7 +285,7 @@ err_regs:
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
-void fixed_phy_del(int phy_addr)
+static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp, *tmp;
@@ -300,7 +300,6 @@ void fixed_phy_del(int phy_addr)
}
}
}
-EXPORT_SYMBOL_GPL(fixed_phy_del);
static int phy_fixed_addr;
static DEFINE_SPINLOCK(phy_fixed_addr_lock);
@@ -371,6 +370,14 @@ struct phy_device *fixed_phy_register(unsigned int irq,
}
EXPORT_SYMBOL_GPL(fixed_phy_register);
+void fixed_phy_unregister(struct phy_device *phy)
+{
+ phy_device_remove(phy);
+
+ fixed_phy_del(phy->mdio.addr);
+}
+EXPORT_SYMBOL_GPL(fixed_phy_unregister);
+
static int __init fixed_mdio_bus_init(void)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index ab1d0fcaf1d9..280e8795b463 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -133,6 +133,11 @@
#define MII_88E3016_DISABLE_SCRAMBLER 0x0200
#define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030
+#define MII_88E1510_GEN_CTRL_REG_1 0x14
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7
+#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */
+#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
@@ -631,6 +636,41 @@ static int m88e1111_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1510_config_init(struct phy_device *phydev)
+{
+ int err;
+ int temp;
+
+ /* SGMII-to-Copper mode initialization */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Select page 18 */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18);
+ if (err < 0)
+ return err;
+
+ /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
+ temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
+ temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
+ temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
+ err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ if (err < 0)
+ return err;
+
+ /* PHY reset is necessary after changing MODE[2:0] */
+ temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
+ err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+ if (err < 0)
+ return err;
+
+ /* Reset page selection */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0);
+ if (err < 0)
+ return err;
+ }
+
+ return marvell_config_init(phydev);
+}
+
static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
@@ -1031,8 +1071,8 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
struct marvell_priv *priv = phydev->priv;
- int err, oldpage;
- u64 val;
+ int err, oldpage, val;
+ u64 ret;
oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
@@ -1042,16 +1082,16 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
val = phy_read(phydev, stat.reg);
if (val < 0) {
- val = UINT64_MAX;
+ ret = UINT64_MAX;
} else {
val = val & ((1 << stat.bits) - 1);
priv->stats[i] += val;
- val = priv->stats[i];
+ ret = priv->stats[i];
}
phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
- return val;
+ return ret;
}
static void marvell_get_stats(struct phy_device *phydev,
@@ -1268,7 +1308,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c
new file mode 100644
index 000000000000..6df2fa755bb4
--- /dev/null
+++ b/drivers/net/phy/mdio-cavium.c
@@ -0,0 +1,153 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2016 Cavium, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+
+#include "mdio-cavium.h"
+
+static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p,
+ enum cavium_mdiobus_mode m)
+{
+ union cvmx_smix_clk smi_clk;
+
+ if (m == p->mode)
+ return;
+
+ smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
+ smi_clk.s.mode = (m == C45) ? 1 : 0;
+ smi_clk.s.preamble = 1;
+ oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
+ p->mode = m;
+}
+
+static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
+ int phy_id, int regnum)
+{
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+
+ cavium_mdiobus_set_mode(p, C45);
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = regnum & 0xffff;
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
+
+ regnum = (regnum >> 16) & 0x1f;
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+ return 0;
+}
+
+int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
+ int timeout = 1000;
+
+ if (regnum & MII_ADDR_C45) {
+ int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+
+ if (r < 0)
+ return r;
+
+ regnum = (regnum >> 16) & 0x1f;
+ op = 3; /* MDIO_CLAUSE_45_READ */
+ } else {
+ cavium_mdiobus_set_mode(p, C22);
+ }
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
+ } while (smi_rd.s.pending && --timeout);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -EIO;
+}
+EXPORT_SYMBOL(cavium_mdiobus_read);
+
+int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
+ int timeout = 1000;
+
+ if (regnum & MII_ADDR_C45) {
+ int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+
+ if (r < 0)
+ return r;
+
+ regnum = (regnum >> 16) & 0x1f;
+ op = 1; /* MDIO_CLAUSE_45_WRITE */
+ } else {
+ cavium_mdiobus_set_mode(p, C22);
+ }
+
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(cavium_mdiobus_write);
+
+MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
new file mode 100644
index 000000000000..4bccd45d24e2
--- /dev/null
+++ b/drivers/net/phy/mdio-cavium.h
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2016 Cavium, Inc.
+ */
+
+enum cavium_mdiobus_mode {
+ UNINIT = 0,
+ C22,
+ C45
+};
+
+#define SMI_CMD 0x0
+#define SMI_WR_DAT 0x8
+#define SMI_RD_DAT 0x10
+#define SMI_CLK 0x18
+#define SMI_EN 0x20
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#endif
+
+union cvmx_smix_clk {
+ u64 u64;
+ struct cvmx_smix_clk_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+ OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+ OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+ ;))))))))))
+ } s;
+};
+
+union cvmx_smix_cmd {
+ u64 u64;
+ struct cvmx_smix_cmd_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+ OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+ ;))))))
+ } s;
+};
+
+union cvmx_smix_en {
+ u64 u64;
+ struct cvmx_smix_en_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+ OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+ ;))
+ } s;
+};
+
+union cvmx_smix_rd_dat {
+ u64 u64;
+ struct cvmx_smix_rd_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
+union cvmx_smix_wr_dat {
+ u64 u64;
+ struct cvmx_smix_wr_dat_s {
+ OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+ OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+ OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+ ;))))
+ } s;
+};
+
+struct cavium_mdiobus {
+ struct mii_bus *mii_bus;
+ u64 register_base;
+ enum cavium_mdiobus_mode mode;
+};
+
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+
+#include <asm/octeon/octeon.h>
+
+static inline void oct_mdio_writeq(u64 val, u64 addr)
+{
+ cvmx_write_csr(addr, val);
+}
+
+static inline u64 oct_mdio_readq(u64 addr)
+{
+ return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr) writeq(val, (void *)addr)
+#define oct_mdio_readq(addr) readq((void *)addr)
+#endif
+
+int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum);
+int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val);
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 47d4f2f263d1..ab6914f8bd50 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -3,272 +3,26 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2009-2012 Cavium, Inc.
+ * Copyright (C) 2009-2015 Cavium, Inc.
*/
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/delay.h>
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/phy.h>
#include <linux/io.h>
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-#include <asm/octeon/octeon.h>
-#endif
-
-#define DRV_VERSION "1.1"
-#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
-
-#define SMI_CMD 0x0
-#define SMI_WR_DAT 0x8
-#define SMI_RD_DAT 0x10
-#define SMI_CLK 0x18
-#define SMI_EN 0x20
-
-#ifdef __BIG_ENDIAN_BITFIELD
-#define OCT_MDIO_BITFIELD_FIELD(field, more) \
- field; \
- more
-
-#else
-#define OCT_MDIO_BITFIELD_FIELD(field, more) \
- more \
- field;
-
-#endif
-
-union cvmx_smix_clk {
- u64 u64;
- struct cvmx_smix_clk_s {
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
- OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
- OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
- OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
- OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
- OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
- OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
- OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
- ;))))))))))
- } s;
-};
-
-union cvmx_smix_cmd {
- u64 u64;
- struct cvmx_smix_cmd_s {
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
- OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
- OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
- OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
- ;))))))
- } s;
-};
-
-union cvmx_smix_en {
- u64 u64;
- struct cvmx_smix_en_s {
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
- OCT_MDIO_BITFIELD_FIELD(u64 en:1,
- ;))
- } s;
-};
-
-union cvmx_smix_rd_dat {
- u64 u64;
- struct cvmx_smix_rd_dat_s {
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
- OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
- OCT_MDIO_BITFIELD_FIELD(u64 val:1,
- OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
- ;))))
- } s;
-};
-
-union cvmx_smix_wr_dat {
- u64 u64;
- struct cvmx_smix_wr_dat_s {
- OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
- OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
- OCT_MDIO_BITFIELD_FIELD(u64 val:1,
- OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
- ;))))
- } s;
-};
-
-enum octeon_mdiobus_mode {
- UNINIT = 0,
- C22,
- C45
-};
-
-struct octeon_mdiobus {
- struct mii_bus *mii_bus;
- u64 register_base;
- resource_size_t mdio_phys;
- resource_size_t regsize;
- enum octeon_mdiobus_mode mode;
-};
-
-#ifdef CONFIG_CAVIUM_OCTEON_SOC
-static void oct_mdio_writeq(u64 val, u64 addr)
-{
- cvmx_write_csr(addr, val);
-}
-
-static u64 oct_mdio_readq(u64 addr)
-{
- return cvmx_read_csr(addr);
-}
-#else
-#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr)
-#define oct_mdio_readq(addr) readq_relaxed((void *)addr)
-#endif
-
-static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
- enum octeon_mdiobus_mode m)
-{
- union cvmx_smix_clk smi_clk;
-
- if (m == p->mode)
- return;
-
- smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
- smi_clk.s.mode = (m == C45) ? 1 : 0;
- smi_clk.s.preamble = 1;
- oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
- p->mode = m;
-}
-
-static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
- int phy_id, int regnum)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- octeon_mdiobus_set_mode(p, C45);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = regnum & 0xffff;
- oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
-
- regnum = (regnum >> 16) & 0x1f;
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
- oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
-
- do {
- /* Wait 1000 clocks so we don't saturate the RSL bus
- * doing reads.
- */
- __delay(1000);
- smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
- } while (smi_wr.s.pending && --timeout);
-
- if (timeout <= 0)
- return -EIO;
- return 0;
-}
-
-static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- struct octeon_mdiobus *p = bus->priv;
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
- int timeout = 1000;
-
- if (regnum & MII_ADDR_C45) {
- int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
- if (r < 0)
- return r;
-
- regnum = (regnum >> 16) & 0x1f;
- op = 3; /* MDIO_CLAUSE_45_READ */
- } else {
- octeon_mdiobus_set_mode(p, C22);
- }
-
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
- oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
-
- do {
- /* Wait 1000 clocks so we don't saturate the RSL bus
- * doing reads.
- */
- __delay(1000);
- smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
- } while (smi_rd.s.pending && --timeout);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return -EIO;
-}
-
-static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
- int regnum, u16 val)
-{
- struct octeon_mdiobus *p = bus->priv;
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
- int timeout = 1000;
-
-
- if (regnum & MII_ADDR_C45) {
- int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
- if (r < 0)
- return r;
-
- regnum = (regnum >> 16) & 0x1f;
- op = 1; /* MDIO_CLAUSE_45_WRITE */
- } else {
- octeon_mdiobus_set_mode(p, C22);
- }
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
- oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
-
- do {
- /* Wait 1000 clocks so we don't saturate the RSL bus
- * doing reads.
- */
- __delay(1000);
- smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
- } while (smi_wr.s.pending && --timeout);
-
- if (timeout <= 0)
- return -EIO;
-
- return 0;
-}
+#include "mdio-cavium.h"
static int octeon_mdiobus_probe(struct platform_device *pdev)
{
- struct octeon_mdiobus *bus;
+ struct cavium_mdiobus *bus;
struct mii_bus *mii_bus;
struct resource *res_mem;
+ resource_size_t mdio_phys;
+ resource_size_t regsize;
union cvmx_smix_en smi_en;
int err = -ENOENT;
@@ -284,17 +38,17 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
bus = mii_bus->priv;
bus->mii_bus = mii_bus;
- bus->mdio_phys = res_mem->start;
- bus->regsize = resource_size(res_mem);
+ mdio_phys = res_mem->start;
+ regsize = resource_size(res_mem);
- if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
+ if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize,
res_mem->name)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
return -ENXIO;
}
bus->register_base =
- (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+ (u64)devm_ioremap(&pdev->dev, mdio_phys, regsize);
if (!bus->register_base) {
dev_err(&pdev->dev, "dev_ioremap failed\n");
return -ENOMEM;
@@ -304,13 +58,12 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
smi_en.s.en = 1;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
- bus->mii_bus->priv = bus;
- bus->mii_bus->name = "mdio-octeon";
+ bus->mii_bus->name = KBUILD_MODNAME;
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base);
bus->mii_bus->parent = &pdev->dev;
- bus->mii_bus->read = octeon_mdiobus_read;
- bus->mii_bus->write = octeon_mdiobus_write;
+ bus->mii_bus->read = cavium_mdiobus_read;
+ bus->mii_bus->write = cavium_mdiobus_write;
platform_set_drvdata(pdev, bus);
@@ -318,7 +71,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
if (err)
goto fail_register;
- dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
+ dev_info(&pdev->dev, "Probed\n");
return 0;
fail_register:
@@ -330,7 +83,7 @@ fail_register:
static int octeon_mdiobus_remove(struct platform_device *pdev)
{
- struct octeon_mdiobus *bus;
+ struct cavium_mdiobus *bus;
union cvmx_smix_en smi_en;
bus = platform_get_drvdata(pdev);
@@ -352,7 +105,7 @@ MODULE_DEVICE_TABLE(of, octeon_mdiobus_match);
static struct platform_driver octeon_mdiobus_driver = {
.driver = {
- .name = "mdio-octeon",
+ .name = KBUILD_MODNAME,
.of_match_table = octeon_mdiobus_match,
},
.probe = octeon_mdiobus_probe,
@@ -367,7 +120,6 @@ EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
module_platform_driver(octeon_mdiobus_driver);
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver");
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index f70522c35163..135296508a7e 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -122,6 +122,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
dev_info(&pdev->dev, "no regulator found\n");
+ data->regulator = NULL;
} else {
ret = regulator_enable(data->regulator);
if (ret)
@@ -137,7 +138,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
return 0;
err_out_disable_regulator:
- regulator_disable(data->regulator);
+ if (data->regulator)
+ regulator_disable(data->regulator);
err_out_free_mdiobus:
mdiobus_free(bus);
return ret;
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c
new file mode 100644
index 000000000000..564616968cad
--- /dev/null
+++ b/drivers/net/phy/mdio-thunder.c
@@ -0,0 +1,154 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2016 Cavium, Inc.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/gfp.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+
+#include "mdio-cavium.h"
+
+struct thunder_mdiobus_nexus {
+ void __iomem *bar0;
+ struct cavium_mdiobus *buses[4];
+};
+
+static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device_node *node;
+ struct fwnode_handle *fwn;
+ struct thunder_mdiobus_nexus *nexus;
+ int err;
+ int i;
+
+ nexus = devm_kzalloc(&pdev->dev, sizeof(*nexus), GFP_KERNEL);
+ if (!nexus)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nexus);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_disable_device;
+ }
+
+ nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!nexus->bar0) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ i = 0;
+ device_for_each_child_node(&pdev->dev, fwn) {
+ struct resource r;
+ struct mii_bus *mii_bus;
+ struct cavium_mdiobus *bus;
+ union cvmx_smix_en smi_en;
+
+ /* If it is not an OF node we cannot handle it yet, so
+ * exit the loop.
+ */
+ node = to_of_node(fwn);
+ if (!node)
+ break;
+
+ err = of_address_to_resource(node, 0, &r);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't translate address for \"%s\"\n",
+ node->name);
+ break;
+ }
+
+ mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus));
+ if (!mii_bus)
+ break;
+ bus = mii_bus->priv;
+ bus->mii_bus = mii_bus;
+
+ nexus->buses[i] = bus;
+ i++;
+
+ bus->register_base = (u64)nexus->bar0 +
+ r.start - pci_resource_start(pdev, 0);
+
+ smi_en.u64 = 0;
+ smi_en.s.en = 1;
+ oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
+ bus->mii_bus->name = KBUILD_MODNAME;
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start);
+ bus->mii_bus->parent = &pdev->dev;
+ bus->mii_bus->read = cavium_mdiobus_read;
+ bus->mii_bus->write = cavium_mdiobus_write;
+
+ err = of_mdiobus_register(bus->mii_bus, node);
+ if (err)
+ dev_err(&pdev->dev, "of_mdiobus_register failed\n");
+
+ dev_info(&pdev->dev, "Added bus at %llx\n", r.start);
+ if (i >= ARRAY_SIZE(nexus->buses))
+ break;
+ }
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct thunder_mdiobus_nexus *nexus = pci_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(nexus->buses); i++) {
+ struct cavium_mdiobus *bus = nexus->buses[i];
+
+ if (!bus)
+ continue;
+
+ mdiobus_unregister(bus->mii_bus);
+ mdiobus_free(bus->mii_bus);
+ oct_mdio_writeq(0, bus->register_base + SMI_EN);
+ }
+ pci_set_drvdata(pdev, NULL);
+}
+
+static const struct pci_device_id thunder_mdiobus_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa02b) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, thunder_mdiobus_id_table);
+
+static struct pci_driver thunder_mdiobus_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = thunder_mdiobus_id_table,
+ .probe = thunder_mdiobus_pci_probe,
+ .remove = thunder_mdiobus_pci_remove,
+};
+
+module_pci_driver(thunder_mdiobus_driver);
+
+MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index dc85f7095e51..4516c8a4fd82 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -623,18 +623,19 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i)
{
struct kszphy_hw_stat stat = kszphy_hw_stats[i];
struct kszphy_priv *priv = phydev->priv;
- u64 val;
+ int val;
+ u64 ret;
val = phy_read(phydev, stat.reg);
if (val < 0) {
- val = UINT64_MAX;
+ ret = UINT64_MAX;
} else {
val = val & ((1 << stat.bits) - 1);
priv->stats[i] += val;
- val = priv->stats[i];
+ ret = priv->stats[i];
}
- return val;
+ return ret;
}
static void kszphy_get_stats(struct phy_device *phydev,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index c72c42206850..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -18,6 +18,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
@@ -74,6 +77,7 @@
#define KS8995_REGS_SIZE 0x80
#define KSZ8864_REGS_SIZE 0x100
+#define KSZ8795_REGS_SIZE 0x100
#define ID1_CHIPID_M 0xf
#define ID1_CHIPID_S 4
@@ -82,15 +86,63 @@
#define ID1_START_SW 1 /* start the switch */
#define FAMILY_KS8995 0x95
+#define FAMILY_KSZ8795 0x87
#define CHIPID_M 0
+#define KS8995_CHIP_ID 0x00
+#define KSZ8864_CHIP_ID 0x01
+#define KSZ8795_CHIP_ID 0x09
#define KS8995_CMD_WRITE 0x02U
#define KS8995_CMD_READ 0x03U
#define KS8995_RESET_DELAY 10 /* usec */
+enum ks8995_chip_variant {
+ ks8995,
+ ksz8864,
+ ksz8795,
+ max_variant
+};
+
+struct ks8995_chip_params {
+ char *name;
+ int family_id;
+ int chip_id;
+ int regs_size;
+ int addr_width;
+ int addr_shift;
+};
+
+static const struct ks8995_chip_params ks8995_chip[] = {
+ [ks8995] = {
+ .name = "KS8995MA",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KS8995_CHIP_ID,
+ .regs_size = KS8995_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8864] = {
+ .name = "KSZ8864RMN",
+ .family_id = FAMILY_KS8995,
+ .chip_id = KSZ8864_CHIP_ID,
+ .regs_size = KSZ8864_REGS_SIZE,
+ .addr_width = 8,
+ .addr_shift = 0,
+ },
+ [ksz8795] = {
+ .name = "KSZ8795CLX",
+ .family_id = FAMILY_KSZ8795,
+ .chip_id = KSZ8795_CHIP_ID,
+ .regs_size = KSZ8795_REGS_SIZE,
+ .addr_width = 12,
+ .addr_shift = 1,
+ },
+};
+
struct ks8995_pdata {
- /* not yet implemented */
+ int reset_gpio;
+ enum of_gpio_flags reset_gpio_flags;
};
struct ks8995_switch {
@@ -98,7 +150,17 @@ struct ks8995_switch {
struct mutex lock;
struct ks8995_pdata *pdata;
struct bin_attribute regs_attr;
+ const struct ks8995_chip_params *chip;
+ int revision_id;
+};
+
+static const struct spi_device_id ks8995_id[] = {
+ {"ks8995", ks8995},
+ {"ksz8864", ksz8864},
+ {"ksz8795", ksz8795},
+ { }
};
+MODULE_DEVICE_TABLE(spi, ks8995_id);
static inline u8 get_chip_id(u8 val)
{
@@ -110,20 +172,44 @@ static inline u8 get_chip_rev(u8 val)
return (val >> ID1_REVISION_S) & ID1_REVISION_M;
}
+/* create_spi_cmd - create a chip specific SPI command header
+ * @ks: pointer to switch instance
+ * @cmd: SPI command for switch
+ * @address: register address for command
+ *
+ * Different chip families use different bit pattern to address the switches
+ * registers:
+ *
+ * KS8995: 8bit command + 8bit address
+ * KSZ8795: 3bit command + 12bit address + 1bit TR (?)
+ */
+static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd,
+ unsigned address)
+{
+ u16 result = cmd;
+
+ /* make room for address (incl. address shift) */
+ result <<= ks->chip->addr_width + ks->chip->addr_shift;
+ /* add address */
+ result |= address << ks->chip->addr_shift;
+ /* SPI protocol needs big endian */
+ return cpu_to_be16(result);
+}
/* ------------------------------------------------------------------------ */
static int ks8995_read(struct ks8995_switch *ks, char *buf,
unsigned offset, size_t count)
{
- u8 cmd[2];
+ __be16 cmd;
struct spi_transfer t[2];
struct spi_message m;
int err;
+ cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
- t[0].tx_buf = cmd;
+ t[0].tx_buf = &cmd;
t[0].len = sizeof(cmd);
spi_message_add_tail(&t[0], &m);
@@ -131,9 +217,6 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf,
t[1].len = count;
spi_message_add_tail(&t[1], &m);
- cmd[0] = KS8995_CMD_READ;
- cmd[1] = offset;
-
mutex_lock(&ks->lock);
err = spi_sync(ks->spi, &m);
mutex_unlock(&ks->lock);
@@ -141,20 +224,20 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf,
return err ? err : count;
}
-
static int ks8995_write(struct ks8995_switch *ks, char *buf,
unsigned offset, size_t count)
{
- u8 cmd[2];
+ __be16 cmd;
struct spi_transfer t[2];
struct spi_message m;
int err;
+ cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset);
spi_message_init(&m);
memset(&t, 0, sizeof(t));
- t[0].tx_buf = cmd;
+ t[0].tx_buf = &cmd;
t[0].len = sizeof(cmd);
spi_message_add_tail(&t[0], &m);
@@ -162,9 +245,6 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf,
t[1].len = count;
spi_message_add_tail(&t[1], &m);
- cmd[0] = KS8995_CMD_WRITE;
- cmd[1] = offset;
-
mutex_lock(&ks->lock);
err = spi_sync(ks->spi, &m);
mutex_unlock(&ks->lock);
@@ -233,6 +313,107 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
return ks8995_write(ks8995, buf, off, count);
}
+/* ks8995_get_revision - get chip revision
+ * @ks: pointer to switch instance
+ *
+ * Verify chip family and id and get chip revision.
+ */
+static int ks8995_get_revision(struct ks8995_switch *ks)
+{
+ int err;
+ u8 id0, id1, ksz8864_id;
+
+ /* read family id */
+ err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify family id */
+ if (id0 != ks->chip->family_id) {
+ dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n",
+ ks->chip->family_id, id0);
+ err = -ENODEV;
+ goto err_out;
+ }
+
+ switch (ks->chip->family_id) {
+ case FAMILY_KS8995:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ /* verify chip id */
+ if ((get_chip_id(id1) == CHIPID_M) &&
+ (get_chip_id(id1) == ks->chip->chip_id)) {
+ /* KS8995MA */
+ ks->revision_id = get_chip_rev(id1);
+ } else if (get_chip_id(id1) != CHIPID_M) {
+ /* KSZ8864RMN */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if ((ksz8864_id & 0x80) &&
+ (ks->chip->chip_id == KSZ8864_CHIP_ID)) {
+ ks->revision_id = get_chip_rev(id1);
+ }
+
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ case FAMILY_KSZ8795:
+ /* try reading chip id at CHIP ID1 */
+ err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1);
+ if (err) {
+ err = -EIO;
+ goto err_out;
+ }
+
+ if (get_chip_id(id1) == ks->chip->chip_id) {
+ ks->revision_id = get_chip_rev(id1);
+ } else {
+ dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n",
+ id1);
+ err = -ENODEV;
+ }
+ break;
+ default:
+ dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0);
+ err = -ENODEV;
+ break;
+ }
+err_out:
+ return err;
+}
+
+/* ks8995_parse_dt - setup platform data from devicetree
+ * @ks: pointer to switch instance
+ *
+ * Parses supported DT properties and sets up platform data
+ * accordingly.
+ */
+static void ks8995_parse_dt(struct ks8995_switch *ks)
+{
+ struct device_node *np = ks->spi->dev.of_node;
+ struct ks8995_pdata *pdata = ks->pdata;
+
+ if (!np)
+ return;
+
+ pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0,
+ &pdata->reset_gpio_flags);
+}
+
static const struct bin_attribute ks8995_registers_attr = {
.attr = {
.name = "registers",
@@ -244,24 +425,58 @@ static const struct bin_attribute ks8995_registers_attr = {
};
/* ------------------------------------------------------------------------ */
-
static int ks8995_probe(struct spi_device *spi)
{
- struct ks8995_switch *ks;
- struct ks8995_pdata *pdata;
- u8 ids[2];
- int err;
+ struct ks8995_switch *ks;
+ int err;
+ int variant = spi_get_device_id(spi)->driver_data;
- /* Chip description */
- pdata = spi->dev.platform_data;
+ if (variant >= max_variant) {
+ dev_err(&spi->dev, "bad chip variant %d\n", variant);
+ return -ENODEV;
+ }
ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
if (!ks)
return -ENOMEM;
mutex_init(&ks->lock);
- ks->pdata = pdata;
- ks->spi = spi_dev_get(spi);
+ ks->spi = spi;
+ ks->chip = &ks8995_chip[variant];
+
+ if (ks->spi->dev.of_node) {
+ ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata),
+ GFP_KERNEL);
+ if (!ks->pdata)
+ return -ENOMEM;
+
+ ks->pdata->reset_gpio = -1;
+
+ ks8995_parse_dt(ks);
+ }
+
+ if (!ks->pdata)
+ ks->pdata = spi->dev.platform_data;
+
+ /* de-assert switch reset */
+ if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) {
+ unsigned long flags;
+
+ flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ?
+ GPIOF_ACTIVE_LOW : 0);
+
+ err = devm_gpio_request_one(&spi->dev,
+ ks->pdata->reset_gpio,
+ flags, "switch-reset");
+ if (err) {
+ dev_err(&spi->dev,
+ "failed to get reset-gpios: %d\n", err);
+ return -EIO;
+ }
+
+ gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0);
+ }
+
spi_set_drvdata(spi, ks);
spi->mode = SPI_MODE_0;
@@ -272,39 +487,12 @@ static int ks8995_probe(struct spi_device *spi)
return err;
}
- err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
- if (err < 0) {
- dev_err(&spi->dev, "unable to read id registers, err=%d\n",
- err);
+ err = ks8995_get_revision(ks);
+ if (err)
return err;
- }
-
- switch (ids[0]) {
- case FAMILY_KS8995:
- break;
- default:
- dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
- return -ENODEV;
- }
+ ks->regs_attr.size = ks->chip->regs_size;
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
- if (get_chip_id(ids[1]) != CHIPID_M) {
- u8 val;
-
- /* Check if this is a KSZ8864RMN */
- err = ks8995_read(ks, &val, KSZ8864_REG_ID1, sizeof(val));
- if (err < 0) {
- dev_err(&spi->dev,
- "unable to read chip id register, err=%d\n",
- err);
- return err;
- }
- if ((val & 0x80) == 0) {
- dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]);
- return err;
- }
- ks->regs_attr.size = KSZ8864_REGS_SIZE;
- }
err = ks8995_reset(ks);
if (err)
@@ -317,14 +505,8 @@ static int ks8995_probe(struct spi_device *spi)
return err;
}
- if (get_chip_id(ids[1]) == CHIPID_M) {
- dev_info(&spi->dev,
- "KS8995 device found, Chip ID:%x, Revision:%x\n",
- get_chip_id(ids[1]), get_chip_rev(ids[1]));
- } else {
- dev_info(&spi->dev, "KSZ8864 device found, Revision:%x\n",
- get_chip_rev(ids[1]));
- }
+ dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n",
+ ks->chip->name, ks->chip->chip_id, ks->revision_id);
return 0;
}
@@ -335,17 +517,21 @@ static int ks8995_remove(struct spi_device *spi)
sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr);
+ /* assert reset */
+ if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio))
+ gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1);
+
return 0;
}
/* ------------------------------------------------------------------------ */
-
static struct spi_driver ks8995_driver = {
.driver = {
.name = "spi-ks8995",
},
.probe = ks8995_probe,
.remove = ks8995_remove,
+ .id_table = ks8995_id,
};
module_spi_driver(ks8995_driver);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index d61da9ece3ba..f572b31a2b20 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct ppp_file *pf = file->private_data;
+ struct ppp_file *pf;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle idle;
@@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
int __user *p = argp;
- if (!pf)
- return ppp_unattached_ioctl(current->nsproxy->net_ns,
- pf, file, cmd, arg);
+ mutex_lock(&ppp_mutex);
+
+ pf = file->private_data;
+ if (!pf) {
+ err = ppp_unattached_ioctl(current->nsproxy->net_ns,
+ pf, file, cmd, arg);
+ goto out;
+ }
if (cmd == PPPIOCDETACH) {
/*
@@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* this fd and reopening /dev/ppp.
*/
err = -EINVAL;
- mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
rtnl_lock();
@@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
} else
pr_warn("PPPIOCDETACH file->f_count=%ld\n",
atomic_long_read(&file->f_count));
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind == CHANNEL) {
struct channel *pch;
struct ppp_channel *chan;
- mutex_lock(&ppp_mutex);
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
@@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
- mutex_unlock(&ppp_mutex);
- return err;
+ goto out;
}
if (pf->kind != INTERFACE) {
/* can't happen */
pr_err("PPP: not interface or channel??\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
- mutex_lock(&ppp_mutex);
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
@@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
err = -ENOTTY;
}
+
+out:
mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct ppp_net *pn;
int __user *p = (int __user *)arg;
- mutex_lock(&ppp_mutex);
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
@@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
default:
err = -ENOTTY;
}
- mutex_unlock(&ppp_mutex);
+
return err;
}
@@ -2304,7 +2307,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
- pch->chan_net = net;
+ pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@@ -2401,6 +2404,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2437,13 +2442,15 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg)
unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
err = -EFAULT;
- if (copy_from_user(&data, (void __user *) arg, sizeof(data)) ||
- (data.length <= CCP_MAX_OPTION_LENGTH &&
- copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
+ if (copy_from_user(&data, (void __user *) arg, sizeof(data)))
+ goto out;
+ if (data.length > CCP_MAX_OPTION_LENGTH)
goto out;
+ if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length))
+ goto out;
+
err = -EINVAL;
- if (data.length > CCP_MAX_OPTION_LENGTH ||
- ccp_option[1] < 2 || ccp_option[1] > data.length)
+ if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length)
goto out;
cp = try_then_request_module(
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 05005c660d4d..f60f7660b451 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -42,6 +42,8 @@
* deprecated in 2.6
*/
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -49,7 +51,6 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
@@ -94,8 +95,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_blkcipher *arc4;
- struct crypto_hash *sha1;
+ struct crypto_skcipher *arc4;
+ struct crypto_ahash *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -135,7 +136,7 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, state->sha1);
struct scatterlist sg[4];
unsigned int nbytes;
@@ -148,10 +149,12 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
sizeof(sha_pad->sha_pad2));
- desc.tfm = state->sha1;
- desc.flags = 0;
+ ahash_request_set_tfm(req, state->sha1);
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
- crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
+ crypto_ahash_digest(req);
+ ahash_request_zero(req);
}
/*
@@ -161,20 +164,23 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- struct blkcipher_desc desc = { .tfm = state->arc4 };
+ SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+
+ skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
setup_sg(sg_out, state->session_key, state->keylen);
- if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
- state->keylen) != 0) {
+ skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
+ NULL);
+ if (crypto_skcipher_encrypt(req))
printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
- }
} else {
memcpy(state->session_key, state->sha1_digest, state->keylen);
}
@@ -184,7 +190,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ skcipher_request_zero(req);
}
/*
@@ -204,19 +211,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
}
- state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+ state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->sha1)) {
state->sha1 = NULL;
goto out_free;
}
- digestsize = crypto_hash_digestsize(state->sha1);
+ digestsize = crypto_ahash_digestsize(state->sha1);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@@ -237,15 +244,12 @@ static void *mppe_alloc(unsigned char *options, int optlen)
return (void *)state;
- out_free:
- if (state->sha1_digest)
- kfree(state->sha1_digest);
- if (state->sha1)
- crypto_free_hash(state->sha1);
- if (state->arc4)
- crypto_free_blkcipher(state->arc4);
- kfree(state);
- out:
+out_free:
+ kfree(state->sha1_digest);
+ crypto_free_ahash(state->sha1);
+ crypto_free_skcipher(state->arc4);
+ kfree(state);
+out:
return NULL;
}
@@ -256,13 +260,10 @@ static void mppe_free(void *arg)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
if (state) {
- if (state->sha1_digest)
kfree(state->sha1_digest);
- if (state->sha1)
- crypto_free_hash(state->sha1);
- if (state->arc4)
- crypto_free_blkcipher(state->arc4);
- kfree(state);
+ crypto_free_ahash(state->sha1);
+ crypto_free_skcipher(state->arc4);
+ kfree(state);
}
}
@@ -368,8 +369,9 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- struct blkcipher_desc desc = { .tfm = state->arc4 };
+ SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
+ int err;
struct scatterlist sg_in[1], sg_out[1];
/*
@@ -426,7 +428,13 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
sg_init_table(sg_out, 1);
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
+
+ skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ if (err) {
printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
return -1;
}
@@ -475,7 +483,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- struct blkcipher_desc desc = { .tfm = state->arc4 };
+ SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -609,9 +617,14 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
sg_init_table(sg_out, 1);
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
+
+ skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
+ if (crypto_skcipher_decrypt(req)) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- return DECOMP_ERROR;
+ osize = DECOMP_ERROR;
+ goto out_zap_req;
}
/*
@@ -629,9 +642,11 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* And finally, decrypt the rest of the packet. */
setup_sg(sg_in, ibuf + 1, isize - 1);
setup_sg(sg_out, obuf + 1, osize - 1);
- if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
+ skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
+ if (crypto_skcipher_decrypt(req)) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
- return DECOMP_ERROR;
+ osize = DECOMP_ERROR;
+ goto out_zap_req;
}
state->stats.unc_bytes += osize;
@@ -642,6 +657,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* good packet credit */
state->sanity_errors >>= 1;
+out_zap_req:
+ skcipher_request_zero(req);
return osize;
sanity_error:
@@ -714,8 +731,8 @@ static struct compressor ppp_mppe = {
static int __init ppp_mppe_init(void)
{
int answer;
- if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
- crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
+ if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
+ crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 01f08a7751f7..9cfe6aeac84e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -24,6 +24,7 @@
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <linux/ethtool.h>
+#include <linux/reboot.h>
#define DRV_NAME "rionet"
#define DRV_VERSION "0.3"
@@ -48,6 +49,8 @@ MODULE_LICENSE("GPL");
#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
#define RIONET_MAX_NETS 8
+#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
+#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
struct rionet_private {
struct rio_mport *mport;
@@ -60,6 +63,7 @@ struct rionet_private {
spinlock_t lock;
spinlock_t tx_lock;
u32 msg_enable;
+ bool open;
};
struct rionet_peer {
@@ -71,6 +75,7 @@ struct rionet_peer {
struct rionet_net {
struct net_device *ndev;
struct list_head peers;
+ spinlock_t lock; /* net info access lock */
struct rio_dev **active;
int nact; /* number of active peers */
};
@@ -232,26 +237,32 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
struct rionet_peer *peer;
+ unsigned char netid = rnet->mport->id;
if (netif_msg_intr(rnet))
printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
DRV_NAME, sid, tid, info);
if (info == RIONET_DOORBELL_JOIN) {
- if (!nets[rnet->mport->id].active[sid]) {
- list_for_each_entry(peer,
- &nets[rnet->mport->id].peers, node) {
+ if (!nets[netid].active[sid]) {
+ spin_lock(&nets[netid].lock);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
if (peer->rdev->destid == sid) {
- nets[rnet->mport->id].active[sid] =
- peer->rdev;
- nets[rnet->mport->id].nact++;
+ nets[netid].active[sid] = peer->rdev;
+ nets[netid].nact++;
}
}
+ spin_unlock(&nets[netid].lock);
+
rio_mport_send_doorbell(mport, sid,
RIONET_DOORBELL_JOIN);
}
} else if (info == RIONET_DOORBELL_LEAVE) {
- nets[rnet->mport->id].active[sid] = NULL;
- nets[rnet->mport->id].nact--;
+ spin_lock(&nets[netid].lock);
+ if (nets[netid].active[sid]) {
+ nets[netid].active[sid] = NULL;
+ nets[netid].nact--;
+ }
+ spin_unlock(&nets[netid].lock);
} else {
if (netif_msg_intr(rnet))
printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -280,7 +291,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
struct net_device *ndev = dev_id;
struct rionet_private *rnet = netdev_priv(ndev);
- spin_lock(&rnet->lock);
+ spin_lock(&rnet->tx_lock);
if (netif_msg_intr(rnet))
printk(KERN_INFO
@@ -299,14 +310,16 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
netif_wake_queue(ndev);
- spin_unlock(&rnet->lock);
+ spin_unlock(&rnet->tx_lock);
}
static int rionet_open(struct net_device *ndev)
{
int i, rc = 0;
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
struct rionet_private *rnet = netdev_priv(ndev);
+ unsigned char netid = rnet->mport->id;
+ unsigned long flags;
if (netif_msg_ifup(rnet))
printk(KERN_INFO "%s: open\n", DRV_NAME);
@@ -345,20 +358,13 @@ static int rionet_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_start_queue(ndev);
- list_for_each_entry_safe(peer, tmp,
- &nets[rnet->mport->id].peers, node) {
- if (!(peer->res = rio_request_outb_dbell(peer->rdev,
- RIONET_DOORBELL_JOIN,
- RIONET_DOORBELL_LEAVE)))
- {
- printk(KERN_ERR "%s: error requesting doorbells\n",
- DRV_NAME);
- continue;
- }
-
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
/* Send a join message */
rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+ rnet->open = true;
out:
return rc;
@@ -367,7 +373,9 @@ static int rionet_open(struct net_device *ndev)
static int rionet_close(struct net_device *ndev)
{
struct rionet_private *rnet = netdev_priv(ndev);
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
+ unsigned char netid = rnet->mport->id;
+ unsigned long flags;
int i;
if (netif_msg_ifup(rnet))
@@ -375,18 +383,21 @@ static int rionet_close(struct net_device *ndev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
+ rnet->open = false;
for (i = 0; i < RIONET_RX_RING_SIZE; i++)
kfree_skb(rnet->rx_skb[i]);
- list_for_each_entry_safe(peer, tmp,
- &nets[rnet->mport->id].peers, node) {
- if (nets[rnet->mport->id].active[peer->rdev->destid]) {
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
+ if (nets[netid].active[peer->rdev->destid]) {
rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
- nets[rnet->mport->id].active[peer->rdev->destid] = NULL;
+ nets[netid].active[peer->rdev->destid] = NULL;
}
- rio_release_outb_dbell(peer->rdev, peer->res);
+ if (peer->res)
+ rio_release_outb_dbell(peer->rdev, peer->res);
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
RIONET_DOORBELL_LEAVE);
@@ -400,22 +411,38 @@ static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
{
struct rio_dev *rdev = to_rio_dev(dev);
unsigned char netid = rdev->net->hport->id;
- struct rionet_peer *peer, *tmp;
+ struct rionet_peer *peer;
+ int state, found = 0;
+ unsigned long flags;
- if (dev_rionet_capable(rdev)) {
- list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) {
- if (peer->rdev == rdev) {
- if (nets[netid].active[rdev->destid]) {
- nets[netid].active[rdev->destid] = NULL;
- nets[netid].nact--;
+ if (!dev_rionet_capable(rdev))
+ return;
+
+ spin_lock_irqsave(&nets[netid].lock, flags);
+ list_for_each_entry(peer, &nets[netid].peers, node) {
+ if (peer->rdev == rdev) {
+ list_del(&peer->node);
+ if (nets[netid].active[rdev->destid]) {
+ state = atomic_read(&rdev->state);
+ if (state != RIO_DEVICE_GONE &&
+ state != RIO_DEVICE_INITIALIZING) {
+ rio_send_doorbell(rdev,
+ RIONET_DOORBELL_LEAVE);
}
-
- list_del(&peer->node);
- kfree(peer);
- break;
+ nets[netid].active[rdev->destid] = NULL;
+ nets[netid].nact--;
}
+ found = 1;
+ break;
}
}
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+
+ if (found) {
+ if (peer->res)
+ rio_release_outb_dbell(rdev, peer->res);
+ kfree(peer);
+ }
}
static void rionet_get_drvinfo(struct net_device *ndev,
@@ -443,6 +470,17 @@ static void rionet_set_msglevel(struct net_device *ndev, u32 value)
rnet->msg_enable = value;
}
+static int rionet_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) {
+ printk(KERN_ERR "%s: Invalid MTU size %d\n",
+ ndev->name, new_mtu);
+ return -EINVAL;
+ }
+ ndev->mtu = new_mtu;
+ return 0;
+}
+
static const struct ethtool_ops rionet_ethtool_ops = {
.get_drvinfo = rionet_get_drvinfo,
.get_msglevel = rionet_get_msglevel,
@@ -454,7 +492,7 @@ static const struct net_device_ops rionet_netdev_ops = {
.ndo_open = rionet_open,
.ndo_stop = rionet_close,
.ndo_start_xmit = rionet_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = rionet_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -478,6 +516,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* Set up private area */
rnet = netdev_priv(ndev);
rnet->mport = mport;
+ rnet->open = false;
/* Set the default MAC address */
device_id = rio_local_get_device_id(mport);
@@ -489,7 +528,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
ndev->dev_addr[5] = device_id & 0xff;
ndev->netdev_ops = &rionet_netdev_ops;
- ndev->mtu = RIO_MAX_MSG_SIZE - 14;
+ ndev->mtu = RIONET_MAX_MTU;
ndev->features = NETIF_F_LLTX;
SET_NETDEV_DEV(ndev, &mport->dev);
ndev->ethtool_ops = &rionet_ethtool_ops;
@@ -500,8 +539,11 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
rc = register_netdev(ndev);
- if (rc != 0)
+ if (rc != 0) {
+ free_pages((unsigned long)nets[mport->id].active,
+ get_order(rionet_active_bytes));
goto out;
+ }
printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n",
ndev->name,
@@ -515,8 +557,6 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
return rc;
}
-static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1];
-
static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
{
int rc = -ENODEV;
@@ -525,19 +565,16 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
struct net_device *ndev = NULL;
struct rio_dev *rdev = to_rio_dev(dev);
unsigned char netid = rdev->net->hport->id;
- int oldnet;
if (netid >= RIONET_MAX_NETS)
return rc;
- oldnet = test_and_set_bit(netid, net_table);
-
/*
* If first time through this net, make sure local device is rionet
* capable and setup netdev (this step will be skipped in later probes
* on the same net).
*/
- if (!oldnet) {
+ if (!nets[netid].ndev) {
rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
&lsrc_ops);
rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
@@ -555,30 +592,56 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif)
rc = -ENOMEM;
goto out;
}
- nets[netid].ndev = ndev;
+
rc = rionet_setup_netdev(rdev->net->hport, ndev);
if (rc) {
printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n",
DRV_NAME, rc);
+ free_netdev(ndev);
goto out;
}
INIT_LIST_HEAD(&nets[netid].peers);
+ spin_lock_init(&nets[netid].lock);
nets[netid].nact = 0;
- } else if (nets[netid].ndev == NULL)
- goto out;
+ nets[netid].ndev = ndev;
+ }
/*
* If the remote device has mailbox/doorbell capabilities,
* add it to the peer list.
*/
if (dev_rionet_capable(rdev)) {
- if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
+ struct rionet_private *rnet;
+ unsigned long flags;
+
+ rnet = netdev_priv(nets[netid].ndev);
+
+ peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer) {
rc = -ENOMEM;
goto out;
}
peer->rdev = rdev;
+ peer->res = rio_request_outb_dbell(peer->rdev,
+ RIONET_DOORBELL_JOIN,
+ RIONET_DOORBELL_LEAVE);
+ if (!peer->res) {
+ pr_err("%s: error requesting doorbells\n", DRV_NAME);
+ kfree(peer);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&nets[netid].lock, flags);
list_add_tail(&peer->node, &nets[netid].peers);
+ spin_unlock_irqrestore(&nets[netid].lock, flags);
+ pr_debug("%s: %s add peer %s\n",
+ DRV_NAME, __func__, rio_name(rdev));
+
+ /* If netdev is already opened, send join request to new peer */
+ if (rnet->open)
+ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
}
return 0;
@@ -586,6 +649,61 @@ out:
return rc;
}
+static int rionet_shutdown(struct notifier_block *nb, unsigned long code,
+ void *unused)
+{
+ struct rionet_peer *peer;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s: %s\n", DRV_NAME, __func__);
+
+ for (i = 0; i < RIONET_MAX_NETS; i++) {
+ if (!nets[i].ndev)
+ continue;
+
+ spin_lock_irqsave(&nets[i].lock, flags);
+ list_for_each_entry(peer, &nets[i].peers, node) {
+ if (nets[i].active[peer->rdev->destid]) {
+ rio_send_doorbell(peer->rdev,
+ RIONET_DOORBELL_LEAVE);
+ nets[i].active[peer->rdev->destid] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&nets[i].lock, flags);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rionet_remove_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+ struct net_device *ndev;
+ int id = mport->id;
+
+ pr_debug("%s %s\n", __func__, mport->name);
+
+ WARN(nets[id].nact, "%s called when connected to %d peers\n",
+ __func__, nets[id].nact);
+ WARN(!nets[id].ndev, "%s called for mport without NDEV\n",
+ __func__);
+
+ if (nets[id].ndev) {
+ ndev = nets[id].ndev;
+ netif_stop_queue(ndev);
+ unregister_netdev(ndev);
+
+ free_pages((unsigned long)nets[id].active,
+ get_order(sizeof(void *) *
+ RIO_MAX_ROUTE_ENTRIES(mport->sys_size)));
+ nets[id].active = NULL;
+ free_netdev(ndev);
+ nets[id].ndev = NULL;
+ }
+}
+
#ifdef MODULE
static struct rio_device_id rionet_id_table[] = {
{RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)},
@@ -602,40 +720,43 @@ static struct subsys_interface rionet_interface = {
.remove_dev = rionet_remove_dev,
};
+static struct notifier_block rionet_notifier = {
+ .notifier_call = rionet_shutdown,
+};
+
+/* the rio_mport_interface is used to handle local mport devices */
+static struct class_interface rio_mport_interface __refdata = {
+ .class = &rio_mport_class,
+ .add_dev = NULL,
+ .remove_dev = rionet_remove_mport,
+};
+
static int __init rionet_init(void)
{
+ int ret;
+
+ ret = register_reboot_notifier(&rionet_notifier);
+ if (ret) {
+ pr_err("%s: failed to register reboot notifier (err=%d)\n",
+ DRV_NAME, ret);
+ return ret;
+ }
+
+ ret = class_interface_register(&rio_mport_interface);
+ if (ret) {
+ pr_err("%s: class_interface_register error: %d\n",
+ DRV_NAME, ret);
+ return ret;
+ }
+
return subsys_interface_register(&rionet_interface);
}
static void __exit rionet_exit(void)
{
- struct rionet_private *rnet;
- struct net_device *ndev;
- struct rionet_peer *peer, *tmp;
- int i;
-
- for (i = 0; i < RIONET_MAX_NETS; i++) {
- if (nets[i].ndev != NULL) {
- ndev = nets[i].ndev;
- rnet = netdev_priv(ndev);
- unregister_netdev(ndev);
-
- list_for_each_entry_safe(peer,
- tmp, &nets[i].peers, node) {
- list_del(&peer->node);
- kfree(peer);
- }
-
- free_pages((unsigned long)nets[i].active,
- get_order(sizeof(void *) *
- RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size)));
- nets[i].active = NULL;
-
- free_netdev(ndev);
- }
- }
-
+ unregister_reboot_notifier(&rionet_notifier);
subsys_interface_unregister(&rionet_interface);
+ class_interface_unregister(&rio_mport_interface);
}
late_initcall(rionet_init);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 718ceeab4dbc..a0f64cba86ba 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -758,6 +758,8 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
u64_stats_update_end(&pcpu_stats->syncp);
skb->dev = team->dev;
+ } else if (res == RX_HANDLER_EXACT) {
+ this_cpu_inc(team->pcpu_stats->rx_nohandler);
} else {
this_cpu_inc(team->pcpu_stats->rx_dropped);
}
@@ -1196,6 +1198,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
+ dev_uc_sync_multiple(port_dev, dev);
+ dev_mc_sync_multiple(port_dev, dev);
+
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
netdev_err(dev, "Failed to add vlan ids to device %s\n",
@@ -1259,6 +1264,8 @@ err_enable_netpoll:
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
+ dev_uc_unsync(port_dev, dev);
+ dev_mc_unsync(port_dev, dev);
dev_close(port_dev);
err_dev_open:
@@ -1807,7 +1814,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct team *team = netdev_priv(dev);
struct team_pcpu_stats *p;
u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
- u32 rx_dropped = 0, tx_dropped = 0;
+ u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
unsigned int start;
int i;
@@ -1828,14 +1835,16 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
/*
- * rx_dropped & tx_dropped are u32, updated
- * without syncp protection.
+ * rx_dropped, tx_dropped & rx_nohandler are u32,
+ * updated without syncp protection.
*/
rx_dropped += p->rx_dropped;
tx_dropped += p->tx_dropped;
+ rx_nohandler += p->rx_nohandler;
}
stats->rx_dropped = rx_dropped;
stats->tx_dropped = tx_dropped;
+ stats->rx_nohandler = rx_nohandler;
return stats;
}
@@ -2078,7 +2087,6 @@ static void team_setup(struct net_device *dev)
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
dev->destructor = team_destructor;
- dev->flags |= IFF_MULTICAST;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_NO_QUEUE;
dev->priv_flags |= IFF_TEAM;
@@ -2809,12 +2817,12 @@ static void __team_port_change_send(struct team_port *port, bool linkup)
port->state.linkup = linkup;
team_refresh_port_linkup(port);
if (linkup) {
- struct ethtool_cmd ecmd;
+ struct ethtool_link_ksettings ecmd;
- err = __ethtool_get_settings(port->dev, &ecmd);
+ err = __ethtool_get_link_ksettings(port->dev, &ecmd);
if (!err) {
- port->state.speed = ethtool_cmd_speed(&ecmd);
- port->state.duplex = ecmd.duplex;
+ port->state.speed = ecmd.base.speed;
+ port->state.duplex = ecmd.base.duplex;
goto send_event;
}
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 88bb8cc3555b..2c9e45f50edb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,6 +187,7 @@ struct tun_struct {
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
+ int align;
int vnet_hdr_sz;
int sndbuf;
struct tap_filter txflt;
@@ -621,7 +622,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (!err)
goto out;
}
@@ -934,6 +936,17 @@ static void tun_poll_controller(struct net_device *dev)
return;
}
#endif
+
+static void tun_set_headroom(struct net_device *dev, int new_hr)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (new_hr < NET_SKB_PAD)
+ new_hr = NET_SKB_PAD;
+
+ tun->align = new_hr;
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -945,6 +958,7 @@ static const struct net_device_ops tun_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
+ .ndo_set_rx_headroom = tun_set_headroom,
};
static const struct net_device_ops tap_netdev_ops = {
@@ -962,6 +976,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_features_check = passthru_features_check,
+ .ndo_set_rx_headroom = tun_set_headroom,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1000,7 +1015,6 @@ static void tun_net_init(struct net_device *dev)
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case IFF_TAP:
@@ -1012,7 +1026,6 @@ static void tun_net_init(struct net_device *dev)
eth_hw_addr_random(dev);
- dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
@@ -1086,7 +1099,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t total_len = iov_iter_count(from);
- size_t len = total_len, align = NET_SKB_PAD, linear;
+ size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
int good_linear;
int copylen;
@@ -1466,6 +1479,8 @@ static void tun_setup(struct net_device *dev)
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
+ /* We prefer our own queue length */
+ dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@@ -1694,6 +1709,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
@@ -1807,7 +1823,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- sk_detach_filter(tfile->socket.sk);
+ __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
}
tun->filter_attached = false;
@@ -1820,7 +1836,8 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
+ lockdep_rtnl_is_held());
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
- /* Huawei E3372 fails unless NDP comes after the IP packets */
- { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+ /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+ * (12d1:157d), are known to fail unless the NDP is placed
+ * after the IP packets. Applying the quirk to all Huawei
+ * devices is broader than necessary, but harmless.
+ */
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
},
/* default entry */
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 86ba30ba35e8..2fb31edab125 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1626,6 +1626,13 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* Telit LE910 V2 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x0036,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
+
/* DW5812 LTE Verizon Mobile Broadband Card
* Unlike DW5550 this device requires FLAG_NOARP
*/
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 1c299b8a162d..f20890ee03f3 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -36,7 +36,7 @@
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx"
-#define DRIVER_VERSION "1.0.2"
+#define DRIVER_VERSION "1.0.4"
#define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8)
@@ -86,6 +86,9 @@
/* default autosuspend delay (mSec)*/
#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
+/* statistic update interval (mSec) */
+#define STAT_UPDATE_TIMER (1 * 1000)
+
static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
"RX FCS Errors",
"RX Alignment Errors",
@@ -186,6 +189,56 @@ struct lan78xx_statstage {
u32 eee_tx_lpi_time;
};
+struct lan78xx_statstage64 {
+ u64 rx_fcs_errors;
+ u64 rx_alignment_errors;
+ u64 rx_fragment_errors;
+ u64 rx_jabber_errors;
+ u64 rx_undersize_frame_errors;
+ u64 rx_oversize_frame_errors;
+ u64 rx_dropped_frames;
+ u64 rx_unicast_byte_count;
+ u64 rx_broadcast_byte_count;
+ u64 rx_multicast_byte_count;
+ u64 rx_unicast_frames;
+ u64 rx_broadcast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_pause_frames;
+ u64 rx_64_byte_frames;
+ u64 rx_65_127_byte_frames;
+ u64 rx_128_255_byte_frames;
+ u64 rx_256_511_bytes_frames;
+ u64 rx_512_1023_byte_frames;
+ u64 rx_1024_1518_byte_frames;
+ u64 rx_greater_1518_byte_frames;
+ u64 eee_rx_lpi_transitions;
+ u64 eee_rx_lpi_time;
+ u64 tx_fcs_errors;
+ u64 tx_excess_deferral_errors;
+ u64 tx_carrier_errors;
+ u64 tx_bad_byte_count;
+ u64 tx_single_collisions;
+ u64 tx_multiple_collisions;
+ u64 tx_excessive_collision;
+ u64 tx_late_collisions;
+ u64 tx_unicast_byte_count;
+ u64 tx_broadcast_byte_count;
+ u64 tx_multicast_byte_count;
+ u64 tx_unicast_frames;
+ u64 tx_broadcast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_pause_frames;
+ u64 tx_64_byte_frames;
+ u64 tx_65_127_byte_frames;
+ u64 tx_128_255_byte_frames;
+ u64 tx_256_511_bytes_frames;
+ u64 tx_512_1023_byte_frames;
+ u64 tx_1024_1518_byte_frames;
+ u64 tx_greater_1518_byte_frames;
+ u64 eee_tx_lpi_transitions;
+ u64 eee_tx_lpi_time;
+};
+
struct lan78xx_net;
struct lan78xx_priv {
@@ -232,6 +285,15 @@ struct usb_context {
#define EVENT_DEV_WAKING 6
#define EVENT_DEV_ASLEEP 7
#define EVENT_DEV_OPEN 8
+#define EVENT_STAT_UPDATE 9
+
+struct statstage {
+ struct mutex access_lock; /* for stats access */
+ struct lan78xx_statstage saved;
+ struct lan78xx_statstage rollover_count;
+ struct lan78xx_statstage rollover_max;
+ struct lan78xx_statstage64 curr_stat;
+};
struct lan78xx_net {
struct net_device *net;
@@ -272,14 +334,22 @@ struct lan78xx_net {
unsigned maxpacket;
struct timer_list delay;
+ struct timer_list stat_monitor;
unsigned long data[5];
int link_on;
u8 mdix_ctrl;
- u32 devid;
+ u32 chipid;
+ u32 chiprev;
struct mii_bus *mdiobus;
+
+ int fc_autoneg;
+ u8 fc_request_control;
+
+ int delta;
+ struct statstage stats;
};
/* use ethtool to change the level for any given device */
@@ -378,6 +448,93 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
return ret;
}
+#define check_counter_rollover(struct1, dev_stats, member) { \
+ if (struct1->member < dev_stats.saved.member) \
+ dev_stats.rollover_count.member++; \
+ }
+
+static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
+ struct lan78xx_statstage *stats)
+{
+ check_counter_rollover(stats, dev->stats, rx_fcs_errors);
+ check_counter_rollover(stats, dev->stats, rx_alignment_errors);
+ check_counter_rollover(stats, dev->stats, rx_fragment_errors);
+ check_counter_rollover(stats, dev->stats, rx_jabber_errors);
+ check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
+ check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
+ check_counter_rollover(stats, dev->stats, rx_dropped_frames);
+ check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
+ check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
+ check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
+ check_counter_rollover(stats, dev->stats, rx_unicast_frames);
+ check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
+ check_counter_rollover(stats, dev->stats, rx_multicast_frames);
+ check_counter_rollover(stats, dev->stats, rx_pause_frames);
+ check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
+ check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
+ check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
+ check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
+ check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
+ check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
+ check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
+ check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
+ check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
+ check_counter_rollover(stats, dev->stats, tx_fcs_errors);
+ check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
+ check_counter_rollover(stats, dev->stats, tx_carrier_errors);
+ check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
+ check_counter_rollover(stats, dev->stats, tx_single_collisions);
+ check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
+ check_counter_rollover(stats, dev->stats, tx_excessive_collision);
+ check_counter_rollover(stats, dev->stats, tx_late_collisions);
+ check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
+ check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
+ check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
+ check_counter_rollover(stats, dev->stats, tx_unicast_frames);
+ check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
+ check_counter_rollover(stats, dev->stats, tx_multicast_frames);
+ check_counter_rollover(stats, dev->stats, tx_pause_frames);
+ check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
+ check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
+ check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
+ check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
+ check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
+ check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
+ check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
+ check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
+ check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
+
+ memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
+}
+
+static void lan78xx_update_stats(struct lan78xx_net *dev)
+{
+ u32 *p, *count, *max;
+ u64 *data;
+ int i;
+ struct lan78xx_statstage lan78xx_stats;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
+ p = (u32 *)&lan78xx_stats;
+ count = (u32 *)&dev->stats.rollover_count;
+ max = (u32 *)&dev->stats.rollover_max;
+ data = (u64 *)&dev->stats.curr_stat;
+
+ mutex_lock(&dev->stats.access_lock);
+
+ if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
+ lan78xx_check_stat_rollover(dev, &lan78xx_stats);
+
+ for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
+ data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
+
+ mutex_unlock(&dev->stats.access_lock);
+
+ usb_autopm_put_interface(dev->intf);
+}
+
/* Loop until the read is completed with timeout called with phy_mutex held */
static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
{
@@ -471,7 +628,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
@@ -505,7 +662,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
retval = 0;
exit:
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ if (dev->chipid == ID_REV_CHIP_ID_7800_)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
@@ -539,7 +696,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
*/
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
@@ -587,7 +744,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
retval = 0;
exit:
- if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+ if (dev->chipid == ID_REV_CHIP_ID_7800_)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
@@ -901,11 +1058,15 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
{
u32 flow = 0, fct_flow = 0;
int ret;
+ u8 cap;
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ if (dev->fc_autoneg)
+ cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ else
+ cap = dev->fc_request_control;
if (cap & FLOW_CTRL_TX)
- flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+ flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
if (cap & FLOW_CTRL_RX)
flow |= FLOW_CR_RX_FCEN_;
@@ -959,6 +1120,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO;
phy_mac_interrupt(phydev, 0);
+
+ del_timer(&dev->stat_monitor);
} else if (phydev->link && !dev->link_on) {
dev->link_on = true;
@@ -999,6 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
phy_mac_interrupt(phydev, 1);
+
+ if (!timer_pending(&dev->stat_monitor)) {
+ dev->delta = 1;
+ mod_timer(&dev->stat_monitor,
+ jiffies + STAT_UPDATE_TIMER);
+ }
}
return ret;
@@ -1091,20 +1260,12 @@ static void lan78xx_get_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct lan78xx_net *dev = netdev_priv(netdev);
- struct lan78xx_statstage lan78xx_stat;
- u32 *p;
- int i;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return;
- if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
- p = (u32 *)&lan78xx_stat;
- for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
- data[i] = p[i];
- }
+ lan78xx_update_stats(dev);
- usb_autopm_put_interface(dev->intf);
+ mutex_lock(&dev->stats.access_lock);
+ memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
+ mutex_unlock(&dev->stats.access_lock);
}
static void lan78xx_get_wol(struct net_device *netdev,
@@ -1385,6 +1546,62 @@ static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
return ret;
}
+static void lan78xx_get_pause(struct net_device *net,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct phy_device *phydev = net->phydev;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+
+ phy_ethtool_gset(phydev, &ecmd);
+
+ pause->autoneg = dev->fc_autoneg;
+
+ if (dev->fc_request_control & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+
+ if (dev->fc_request_control & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+}
+
+static int lan78xx_set_pause(struct net_device *net,
+ struct ethtool_pauseparam *pause)
+{
+ struct lan78xx_net *dev = netdev_priv(net);
+ struct phy_device *phydev = net->phydev;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ int ret;
+
+ phy_ethtool_gset(phydev, &ecmd);
+
+ if (pause->autoneg && !ecmd.autoneg) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ dev->fc_request_control = 0;
+ if (pause->rx_pause)
+ dev->fc_request_control |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ dev->fc_request_control |= FLOW_CTRL_TX;
+
+ if (ecmd.autoneg) {
+ u32 mii_adv;
+
+ ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
+ ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ phy_ethtool_sset(phydev, &ecmd);
+ }
+
+ dev->fc_autoneg = pause->autoneg;
+
+ ret = 0;
+exit:
+ return ret;
+}
+
static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_link = lan78xx_get_link,
.nway_reset = lan78xx_nway_reset,
@@ -1403,6 +1620,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.set_wol = lan78xx_set_wol,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
+ .get_pauseparam = lan78xx_get_pause,
+ .set_pauseparam = lan78xx_set_pause,
};
static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -1555,9 +1774,9 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum);
- switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
- case 0x78000000:
- case 0x78500000:
+ switch (dev->chipid) {
+ case ID_REV_CHIP_ID_7800_:
+ case ID_REV_CHIP_ID_7850_:
/* set to internal PHY id */
dev->mdiobus->phy_mask = ~(1 << 1);
break;
@@ -1590,6 +1809,7 @@ static void lan78xx_link_status_change(struct net_device *net)
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
int ret;
+ u32 mii_adv;
struct phy_device *phydev = dev->net->phydev;
phydev = phy_find_first(dev->mdiobus);
@@ -1622,14 +1842,17 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
/* MAC doesn't support 1000T Half */
phydev->supported &= ~SUPPORTED_1000baseT_Half;
- phydev->supported |= (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+
+ /* support both flow controls */
+ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
+ phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
+ phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+
genphy_config_aneg(phydev);
+ dev->fc_autoneg = phydev->autoneg;
+
phy_start(phydev);
netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -1918,7 +2141,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* save DEVID for later usage */
ret = lan78xx_read_reg(dev, ID_REV, &buf);
- dev->devid = buf;
+ dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
+ dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
/* Respond to the IN token with a NAK */
ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
@@ -2024,6 +2248,32 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return 0;
}
+static void lan78xx_init_stats(struct lan78xx_net *dev)
+{
+ u32 *p;
+ int i;
+
+ /* initialize for stats update
+ * some counters are 20bits and some are 32bits
+ */
+ p = (u32 *)&dev->stats.rollover_max;
+ for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
+ p[i] = 0xFFFFF;
+
+ dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
+ dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
+ dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
+ dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
+ dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
+
+ lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
+}
+
static int lan78xx_open(struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
@@ -2051,6 +2301,8 @@ static int lan78xx_open(struct net_device *net)
}
}
+ lan78xx_init_stats(dev);
+
set_bit(EVENT_DEV_OPEN, &dev->flags);
netif_start_queue(net);
@@ -2095,6 +2347,9 @@ int lan78xx_stop(struct net_device *net)
{
struct lan78xx_net *dev = netdev_priv(net);
+ if (timer_pending(&dev->stat_monitor))
+ del_timer_sync(&dev->stat_monitor);
+
phy_stop(net->phydev);
phy_disconnect(net->phydev);
net->phydev = NULL;
@@ -2839,6 +3094,13 @@ static void lan78xx_bh(unsigned long param)
}
if (netif_device_present(dev->net) && netif_running(dev->net)) {
+ /* reset update timer delta */
+ if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
+ dev->delta = 1;
+ mod_timer(&dev->stat_monitor,
+ jiffies + STAT_UPDATE_TIMER);
+ }
+
if (!skb_queue_empty(&dev->txq_pend))
lan78xx_tx_bh(dev);
@@ -2913,6 +3175,17 @@ skip_reset:
usb_autopm_put_interface(dev->intf);
}
}
+
+ if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
+ lan78xx_update_stats(dev);
+
+ clear_bit(EVENT_STAT_UPDATE, &dev->flags);
+
+ mod_timer(&dev->stat_monitor,
+ jiffies + (STAT_UPDATE_TIMER * dev->delta));
+
+ dev->delta = min((dev->delta * 2), 50);
+ }
}
static void intr_complete(struct urb *urb)
@@ -3003,6 +3276,15 @@ static const struct net_device_ops lan78xx_netdev_ops = {
.ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
};
+static void lan78xx_stat_monitor(unsigned long param)
+{
+ struct lan78xx_net *dev;
+
+ dev = (struct lan78xx_net *)param;
+
+ lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
+}
+
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -3049,6 +3331,13 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
netdev->ethtool_ops = &lan78xx_ethtool_ops;
+ dev->stat_monitor.function = lan78xx_stat_monitor;
+ dev->stat_monitor.data = (unsigned long)dev;
+ dev->delta = 1;
+ init_timer(&dev->stat_monitor);
+
+ mutex_init(&dev->stats.access_lock);
+
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@@ -3326,6 +3615,8 @@ int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
}
if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+ del_timer(&dev->stat_monitor);
+
if (PMSG_IS_AUTO(message)) {
/* auto suspend (selective suspend) */
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
@@ -3386,6 +3677,12 @@ int lan78xx_resume(struct usb_interface *intf)
int ret;
u32 buf;
+ if (!timer_pending(&dev->stat_monitor)) {
+ dev->delta = 1;
+ mod_timer(&dev->stat_monitor,
+ jiffies + STAT_UPDATE_TIMER);
+ }
+
if (!--dev->suspend_count) {
/* resume interrupt URBs */
if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
index a93fb653e7c5..40927906109a 100644
--- a/drivers/net/usb/lan78xx.h
+++ b/drivers/net/usb/lan78xx.h
@@ -107,6 +107,7 @@
#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_ID_7800_ (0x7800)
+#define ID_REV_CHIP_ID_7850_ (0x7850)
#define FPGA_REV (0x04)
#define FPGA_REV_MINOR_MASK_ (0x0000FF00)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..22e1a9a99a7d 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -38,7 +38,7 @@
* HEADS UP: this handshaking isn't all that robust. This driver
* gets confused easily if you unplug one end of the cable then
* try to connect it again; you'll need to restart both ends. The
- * "naplink" software (used by some PlayStation/2 deveopers) does
+ * "naplink" software (used by some PlayStation/2 developers) does
* the handshaking much better! Also, sometimes this hardware
* seems to get wedged under load. Prolific docs are weak, and
* don't identify differences between PL2301 and PL2302, much less
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a3a4ccf7cf52..9d1fce8a6e84 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -844,6 +844,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -881,6 +882,9 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ba21d072be31..4f30a6ae50d0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -35,6 +35,7 @@ struct pcpu_vstats {
struct veth_priv {
struct net_device __rcu *peer;
atomic64_t dropped;
+ unsigned requested_headroom;
};
/*
@@ -271,6 +272,29 @@ static int veth_get_iflink(const struct net_device *dev)
return iflink;
}
+static void veth_set_rx_headroom(struct net_device *dev, int new_hr)
+{
+ struct veth_priv *peer_priv, *priv = netdev_priv(dev);
+ struct net_device *peer;
+
+ if (new_hr < 0)
+ new_hr = 0;
+
+ rcu_read_lock();
+ peer = rcu_dereference(priv->peer);
+ if (unlikely(!peer))
+ goto out;
+
+ peer_priv = netdev_priv(peer);
+ priv->requested_headroom = new_hr;
+ new_hr = max(priv->requested_headroom, peer_priv->requested_headroom);
+ dev->needed_headroom = new_hr;
+ peer->needed_headroom = new_hr;
+
+out:
+ rcu_read_unlock();
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -285,6 +309,7 @@ static const struct net_device_ops veth_netdev_ops = {
#endif
.ndo_get_iflink = veth_get_iflink,
.ndo_features_check = passthru_features_check,
+ .ndo_set_rx_headroom = veth_set_rx_headroom,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
@@ -301,6 +326,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 767ab11a6e9f..49d84e540343 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -146,6 +146,10 @@ struct virtnet_info {
virtio_net_ctrl_ack ctrl_status;
u8 ctrl_promisc;
u8 ctrl_allmulti;
+
+ /* Ethtool settings */
+ u8 duplex;
+ u32 speed;
};
struct padded_vnet_hdr {
@@ -256,7 +260,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
p = page_address(page) + offset;
/* copy small packet so we can reuse these pages for small data */
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
if (unlikely(!skb))
return NULL;
@@ -1376,6 +1380,60 @@ static void virtnet_get_channels(struct net_device *dev,
channels->other_count = 0;
}
+/* Check if the user is trying to change anything besides speed/duplex */
+static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+{
+ struct ethtool_cmd diff1 = *cmd;
+ struct ethtool_cmd diff2 = {};
+
+ /* cmd is always set so we need to clear it, validate the port type
+ * and also without autonegotiation we can ignore advertising
+ */
+ ethtool_cmd_speed_set(&diff1, 0);
+ diff2.port = PORT_OTHER;
+ diff1.advertising = 0;
+ diff1.duplex = 0;
+ diff1.cmd = 0;
+
+ return !memcmp(&diff1, &diff2, sizeof(diff1));
+}
+
+static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u32 speed;
+
+ speed = ethtool_cmd_speed(cmd);
+ /* don't allow custom speed and duplex */
+ if (!ethtool_validate_speed(speed) ||
+ !ethtool_validate_duplex(cmd->duplex) ||
+ !virtnet_validate_ethtool_cmd(cmd))
+ return -EINVAL;
+ vi->speed = speed;
+ vi->duplex = cmd->duplex;
+
+ return 0;
+}
+
+static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ ethtool_cmd_speed_set(cmd, vi->speed);
+ cmd->duplex = vi->duplex;
+ cmd->port = PORT_OTHER;
+
+ return 0;
+}
+
+static void virtnet_init_settings(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ vi->speed = SPEED_UNKNOWN;
+ vi->duplex = DUPLEX_UNKNOWN;
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
.get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1383,6 +1441,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_settings = virtnet_get_settings,
+ .set_settings = virtnet_set_settings,
};
#define MIN_MTU 68
@@ -1855,6 +1915,8 @@ static int virtnet_probe(struct virtio_device *vdev)
netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
+ virtnet_init_settings(dev);
+
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fc895d0e85d9..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1022,14 +1022,16 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
if (ctx.mss) {
if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
VMXNET3_MAX_TX_BUF_SIZE)) {
- goto hdr_too_big;
+ tq->stats.drop_oversized_hdr++;
+ goto drop_pkt;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (unlikely(ctx.eth_ip_hdr_size +
skb->csum_offset >
VMXNET3_MAX_CSUM_OFFSET)) {
- goto hdr_too_big;
+ tq->stats.drop_oversized_hdr++;
+ goto drop_pkt;
}
}
}
@@ -1123,8 +1125,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
return NETDEV_TX_OK;
-hdr_too_big:
- tq->stats.drop_oversized_hdr++;
unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
- /* typical case: TCP/UDP over IP and both csums are correct */
- if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
- VMXNET3_RCD_CSUM_OK) {
+ if (gdesc->rcd.v4 &&
+ (le32_to_cpu(gdesc->dword[3]) &
+ VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+ BUG_ON(gdesc->rcd.frg);
+ } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+ (1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
- BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
BUG_ON(gdesc->rcd.frg);
} else {
if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bdcf617a9d52..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -32,7 +32,6 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
-#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/addrconf.h>
#include <net/l3mdev.h>
@@ -61,41 +60,6 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
- return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
- /* TO-DO: return max ethernet size? */
- return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
- /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
- return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
- .family = AF_INET,
- .local_out = vrf_ip_local_out,
- .check = vrf_ip_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
/* neighbor handling is done with actual device; do not want
* to flip skb->dev for those ndisc packets. This really fails
* for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -350,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
- return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
- .family = AF_INET6,
- .local_out = ip6_local_out,
- .check = vrf_ip6_check,
- .mtu = vrf_v4_mtu,
- .destroy = vrf_dst_destroy,
- .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
- vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
- sizeof(struct rt6_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops6.kmem_cachep)
- return -ENOMEM;
-
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
- kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
- skb->dev->stats.rx_errors++;
- kfree_skb(skb);
- return 0;
-}
-
/* modelled after ip6_finish_output2 */
static int vrf_finish_output6(struct net *net, struct sock *sk,
struct sk_buff *skb)
@@ -430,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_destroy(&vrf->rt6->dst);
- free_percpu(vrf->rt6->rt6i_pcpu);
+ dst_release(&vrf->rt6->dst);
vrf->rt6 = NULL;
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct dst_entry *dst;
+ struct net *net = dev_net(dev);
struct rt6_info *rt6;
- int cpu;
int rc = -ENOMEM;
- rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rt6 = ip6_dst_alloc(net, dev,
+ DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- dst = &rt6->dst;
-
- rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
- if (!rt6->rt6i_pcpu) {
- dst_destroy(dst);
- goto out;
- }
- for_each_possible_cpu(cpu) {
- struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
- *p = NULL;
- }
-
- memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
- INIT_LIST_HEAD(&rt6->rt6i_siblings);
- INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
- rt6->dst.input = vrf_input6;
rt6->dst.output = vrf_output6;
-
- rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
- atomic_set(&rt6->dst.__refcnt, 2);
-
+ rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+ dst_hold(&rt6->dst);
vrf->rt6 = rt6;
rc = 0;
out:
return rc;
}
#else
-static int init_dst_ops6_kmem_cachep(void)
-{
- return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
{
}
@@ -558,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
{
struct dst_entry *dst = (struct dst_entry *)vrf->rth;
- dst_destroy(dst);
+ dst_release(dst);
vrf->rth = NULL;
}
@@ -571,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
- rth = dst_alloc(&vrf_dst_ops, dev, 2,
- DST_OBSOLETE_NONE,
- (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
if (rth) {
rth->dst.output = vrf_output;
- rth->rt_genid = rt_genid_ipv4(dev_net(dev));
- rth->rt_flags = 0;
- rth->rt_type = RTN_UNICAST;
- rth->rt_is_input = 0;
- rth->rt_iif = 0;
- rth->rt_pmtu = 0;
- rth->rt_gateway = 0;
- rth->rt_uses_gateway = 0;
rth->rt_table_id = vrf->tb_id;
- INIT_LIST_HEAD(&rth->rt_uncached);
- rth->rt_uncached_list = NULL;
}
return rth;
@@ -674,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
struct net_device *port_dev;
struct list_head *iter;
- vrf_rtable_destroy(vrf);
- vrf_rt6_destroy(vrf);
+ vrf_rtable_release(vrf);
+ vrf_rt6_release(vrf);
netdev_for_each_lower_dev(dev, port_dev, iter)
vrf_del_slave(dev, port_dev);
@@ -705,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
return 0;
out_rth:
- vrf_rtable_destroy(vrf);
+ vrf_rtable_release(vrf);
out_stats:
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -738,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rth = vrf->rth;
- atomic_inc(&rth->dst.__refcnt);
+ dst_hold(&rth->dst);
}
return rth;
@@ -789,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
struct net_vrf *vrf = netdev_priv(dev);
rt = vrf->rt6;
- atomic_inc(&rt->dst.__refcnt);
+ dst_hold(&rt->dst);
}
return (struct dst_entry *)rt;
@@ -880,6 +759,24 @@ static int vrf_fillinfo(struct sk_buff *skb,
return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
}
+static size_t vrf_get_slave_size(const struct net_device *bond_dev,
+ const struct net_device *slave_dev)
+{
+ return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
+}
+
+static int vrf_fill_slave_info(struct sk_buff *skb,
+ const struct net_device *vrf_dev,
+ const struct net_device *slave_dev)
+{
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+
+ if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
[IFLA_VRF_TABLE] = { .type = NLA_U32 },
};
@@ -893,6 +790,9 @@ static struct rtnl_link_ops vrf_link_ops __read_mostly = {
.validate = vrf_validate,
.fill_info = vrf_fillinfo,
+ .get_slave_size = vrf_get_slave_size,
+ .fill_slave_info = vrf_fill_slave_info,
+
.newlink = vrf_newlink,
.dellink = vrf_dellink,
.setup = vrf_setup,
@@ -926,19 +826,6 @@ static int __init vrf_init_module(void)
{
int rc;
- vrf_dst_ops.kmem_cachep =
- kmem_cache_create("vrf_ip_dst_cache",
- sizeof(struct rtable), 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
-
- if (!vrf_dst_ops.kmem_cachep)
- return -ENOMEM;
-
- rc = init_dst_ops6_kmem_cachep();
- if (rc != 0)
- goto error2;
-
register_netdevice_notifier(&vrf_notifier_block);
rc = rtnl_link_register(&vrf_link_ops);
@@ -949,22 +836,10 @@ static int __init vrf_init_module(void)
error:
unregister_netdevice_notifier(&vrf_notifier_block);
- free_dst_ops6_kmem_cachep();
-error2:
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
return rc;
}
-static void __exit vrf_cleanup_module(void)
-{
- rtnl_link_unregister(&vrf_link_ops);
- unregister_netdevice_notifier(&vrf_notifier_block);
- kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
- free_dst_ops6_kmem_cachep();
-}
-
module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1c32bd104797..1c0fa364323e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -42,7 +42,7 @@
#include <net/netns/generic.h>
#include <net/vxlan.h>
#include <net/protocol.h>
-#include <net/udp_tunnel.h>
+
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#include <net/addrconf.h>
@@ -197,9 +197,9 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
#endif
/* Virtual Network hash table head */
-static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
+static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
{
- return &vs->vni_list[hash_32(id, VNI_HASH_BITS)];
+ return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
}
/* Socket hash table head */
@@ -242,12 +242,16 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
return NULL;
}
-static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni)
{
struct vxlan_dev *vxlan;
- hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
- if (vxlan->default_dst.remote_vni == id)
+ /* For flow based devices, map all packets to VNI 0 */
+ if (vs->flags & VXLAN_F_COLLECT_METADATA)
+ vni = 0;
+
+ hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) {
+ if (vxlan->default_dst.remote_vni == vni)
return vxlan;
}
@@ -255,7 +259,7 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni,
sa_family_t family, __be16 port,
u32 flags)
{
@@ -265,7 +269,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
if (!vs)
return NULL;
- return vxlan_vs_find_vni(vs, id);
+ return vxlan_vs_find_vni(vs, vni);
}
/* Fill in neighbour message in skbuff. */
@@ -315,7 +319,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
nla_put_be16(skb, NDA_PORT, rdst->remote_port))
goto nla_put_failure;
if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
- nla_put_u32(skb, NDA_VNI, rdst->remote_vni))
+ nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
goto nla_put_failure;
if (rdst->remote_ifindex &&
nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
@@ -383,7 +387,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
};
struct vxlan_rdst remote = {
.remote_ip = *ipa, /* goes to NDA_DST */
- .remote_vni = VXLAN_N_VID,
+ .remote_vni = cpu_to_be32(VXLAN_N_VID),
};
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
@@ -452,7 +456,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
- __u32 vni, __u32 ifindex)
+ __be32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
@@ -469,7 +473,8 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
- union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
+ union vxlan_addr *ip, __be16 port, __be32 vni,
+ __u32 ifindex)
{
struct vxlan_rdst *rd;
@@ -480,6 +485,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
if (!rd)
return 0;
+
+ dst_cache_reset(&rd->dst_cache);
rd->remote_ip = *ip;
rd->remote_port = port;
rd->remote_vni = vni;
@@ -489,7 +496,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
- union vxlan_addr *ip, __be16 port, __u32 vni,
+ union vxlan_addr *ip, __be16 port, __be32 vni,
__u32 ifindex, struct vxlan_rdst **rdp)
{
struct vxlan_rdst *rd;
@@ -501,6 +508,12 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
return -ENOBUFS;
+
+ if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
+ kfree(rd);
+ return -ENOBUFS;
+ }
+
rd->remote_ip = *ip;
rd->remote_port = port;
rd->remote_vni = vni;
@@ -515,7 +528,8 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
unsigned int off,
struct vxlanhdr *vh, size_t hdrlen,
- u32 data, struct gro_remcsum *grc,
+ __be32 vni_field,
+ struct gro_remcsum *grc,
bool nopartial)
{
size_t start, offset;
@@ -526,10 +540,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
if (!NAPI_GRO_CB(skb)->csum_valid)
return NULL;
- start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
- offset = start + ((data & VXLAN_RCO_UDP) ?
- offsetof(struct udphdr, check) :
- offsetof(struct tcphdr, check));
+ start = vxlan_rco_start(vni_field);
+ offset = start + vxlan_rco_offset(vni_field);
vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
start, offset, grc, nopartial);
@@ -549,7 +561,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
int flush = 1;
struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
udp_offloads);
- u32 flags;
+ __be32 flags;
struct gro_remcsum grc;
skb_gro_remcsum_init(&grc);
@@ -565,11 +577,11 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
- flags = ntohl(vh->vx_flags);
+ flags = vh->vx_flags;
if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
- ntohl(vh->vx_vni), &grc,
+ vh->vx_vni, &grc,
!!(vs->flags &
VXLAN_F_REMCSUM_NOPARTIAL));
@@ -579,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
- flush = 0;
-
for (p = *head; p; p = p->next) {
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -594,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
}
pp = eth_gro_receive(head, skb);
+ flush = 0;
out:
skb_gro_remcsum_cleanup(skb, &grc);
@@ -660,7 +671,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
- __be16 port, __u32 vni, __u32 ifindex,
+ __be16 port, __be32 vni, __u32 ifindex,
__u8 ndm_flags)
{
struct vxlan_rdst *rd = NULL;
@@ -749,8 +760,10 @@ static void vxlan_fdb_free(struct rcu_head *head)
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd;
- list_for_each_entry_safe(rd, nd, &f->remotes, list)
+ list_for_each_entry_safe(rd, nd, &f->remotes, list) {
+ dst_cache_destroy(&rd->dst_cache);
kfree(rd);
+ }
kfree(f);
}
@@ -767,7 +780,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
- union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
+ union vxlan_addr *ip, __be16 *port, __be32 *vni,
+ u32 *ifindex)
{
struct net *net = dev_net(vxlan->dev);
int err;
@@ -800,7 +814,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
if (tb[NDA_VNI]) {
if (nla_len(tb[NDA_VNI]) != sizeof(u32))
return -EINVAL;
- *vni = nla_get_u32(tb[NDA_VNI]);
+ *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
} else {
*vni = vxlan->default_dst.remote_vni;
}
@@ -830,7 +844,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* struct net *net = dev_net(vxlan->dev); */
union vxlan_addr ip;
__be16 port;
- u32 vni, ifindex;
+ __be32 vni;
+ u32 ifindex;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -867,7 +882,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct vxlan_rdst *rd = NULL;
union vxlan_addr ip;
__be16 port;
- u32 vni, ifindex;
+ __be32 vni;
+ u32 ifindex;
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
@@ -1124,177 +1140,166 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
return ret;
}
-static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
- size_t hdrlen, u32 data, bool nopartial)
+static bool vxlan_remcsum(struct vxlanhdr *unparsed,
+ struct sk_buff *skb, u32 vxflags)
{
- size_t start, offset, plen;
+ size_t start, offset;
- if (skb->remcsum_offload)
- return vh;
+ if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
+ goto out;
- start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
- offset = start + ((data & VXLAN_RCO_UDP) ?
- offsetof(struct udphdr, check) :
- offsetof(struct tcphdr, check));
+ start = vxlan_rco_start(unparsed->vx_vni);
+ offset = start + vxlan_rco_offset(unparsed->vx_vni);
- plen = hdrlen + offset + sizeof(u16);
+ if (!pskb_may_pull(skb, offset + sizeof(u16)))
+ return false;
- if (!pskb_may_pull(skb, plen))
- return NULL;
+ skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
+ !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
+out:
+ unparsed->vx_flags &= ~VXLAN_HF_RCO;
+ unparsed->vx_vni &= VXLAN_VNI_MASK;
+ return true;
+}
- vh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
+ struct sk_buff *skb, u32 vxflags,
+ struct vxlan_metadata *md)
+{
+ struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
+ struct metadata_dst *tun_dst;
- skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset,
- nopartial);
+ if (!(unparsed->vx_flags & VXLAN_HF_GBP))
+ goto out;
- return vh;
+ md->gbp = ntohs(gbp->policy_id);
+
+ tun_dst = (struct metadata_dst *)skb_dst(skb);
+ if (tun_dst) {
+ tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+ }
+ if (gbp->dont_learn)
+ md->gbp |= VXLAN_GBP_DONT_LEARN;
+
+ if (gbp->policy_applied)
+ md->gbp |= VXLAN_GBP_POLICY_APPLIED;
+
+ /* In flow-based mode, GBP is carried in dst_metadata */
+ if (!(vxflags & VXLAN_F_COLLECT_METADATA))
+ skb->mark = md->gbp;
+out:
+ unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
- struct vxlan_metadata *md, u32 vni,
- struct metadata_dst *tun_dst)
+static bool vxlan_set_mac(struct vxlan_dev *vxlan,
+ struct vxlan_sock *vs,
+ struct sk_buff *skb)
{
- struct iphdr *oip = NULL;
- struct ipv6hdr *oip6 = NULL;
- struct vxlan_dev *vxlan;
- struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
- int err = 0;
-
- /* For flow based devices, map all packets to VNI 0 */
- if (vs->flags & VXLAN_F_COLLECT_METADATA)
- vni = 0;
-
- /* Is this VNI defined? */
- vxlan = vxlan_vs_find_vni(vs, vni);
- if (!vxlan)
- goto drop;
skb_reset_mac_header(skb);
- skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
/* Ignore packet loops (and multicast echo) */
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
- goto drop;
+ return false;
- /* Get data from the outer IP header */
+ /* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
- oip = ip_hdr(skb);
- saddr.sin.sin_addr.s_addr = oip->saddr;
+ saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
saddr.sa.sa_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- oip6 = ipv6_hdr(skb);
- saddr.sin6.sin6_addr = oip6->saddr;
+ saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
saddr.sa.sa_family = AF_INET6;
#endif
}
- if (tun_dst) {
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- tun_dst = NULL;
- }
-
if ((vxlan->flags & VXLAN_F_LEARN) &&
vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
- goto drop;
-
- skb_reset_network_header(skb);
- /* In flow-based mode, GBP is carried in dst_metadata */
- if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
- skb->mark = md->gbp;
-
- if (oip6)
- err = IP6_ECN_decapsulate(oip6, skb);
- if (oip)
- err = IP_ECN_decapsulate(oip, skb);
-
- if (unlikely(err)) {
- if (log_ecn_error) {
- if (oip6)
- net_info_ratelimited("non-ECT from %pI6\n",
- &oip6->saddr);
- if (oip)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &oip->saddr, oip->tos);
- }
- if (err > 1) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
- goto drop;
- }
- }
+ return false;
- stats = this_cpu_ptr(vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ return true;
+}
- gro_cells_receive(&vxlan->gro_cells, skb);
+static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
+ struct sk_buff *skb)
+{
+ int err = 0;
- return;
-drop:
- if (tun_dst)
- dst_release((struct dst_entry *)tun_dst);
+ if (vxlan_get_sk_family(vs) == AF_INET)
+ err = IP_ECN_decapsulate(oiph, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ err = IP6_ECN_decapsulate(oiph, skb);
+#endif
- /* Consume bad packet */
- kfree_skb(skb);
+ if (unlikely(err) && log_ecn_error) {
+ if (vxlan_get_sk_family(vs) == AF_INET)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &((struct iphdr *)oiph)->saddr,
+ ((struct iphdr *)oiph)->tos);
+ else
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &((struct ipv6hdr *)oiph)->saddr);
+ }
+ return err <= 1;
}
/* Callback from net/ipv4/udp.c to receive packets */
-static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct metadata_dst *tun_dst = NULL;
+ struct pcpu_sw_netstats *stats;
+ struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
- struct vxlanhdr *vxh;
- u32 flags, vni;
+ struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ void *oiph;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
- goto error;
-
- vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
- flags = ntohl(vxh->vx_flags);
- vni = ntohl(vxh->vx_vni);
+ return 1;
- if (flags & VXLAN_HF_VNI) {
- flags &= ~VXLAN_HF_VNI;
- } else {
- /* VNI flag always required to be set */
- goto bad_flags;
+ unparsed = *vxlan_hdr(skb);
+ /* VNI flag always required to be set */
+ if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
+ netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
+ ntohl(vxlan_hdr(skb)->vx_flags),
+ ntohl(vxlan_hdr(skb)->vx_vni));
+ /* Return non vxlan pkt */
+ return 1;
}
-
- if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
- goto drop;
- vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
+ unparsed.vx_flags &= ~VXLAN_HF_VNI;
+ unparsed.vx_vni &= ~VXLAN_VNI_MASK;
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
- if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
- vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni,
- !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL));
- if (!vxh)
- goto drop;
+ vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
+ if (!vxlan)
+ goto drop;
- flags &= ~VXLAN_HF_RCO;
- vni &= VXLAN_VNI_MASK;
- }
+ if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
+ !net_eq(vxlan->net, dev_net(vxlan->dev))))
+ goto drop;
if (vxlan_collect_metadata(vs)) {
+ __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+ struct metadata_dst *tun_dst;
+
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
- cpu_to_be64(vni >> 8), sizeof(*md));
+ vxlan_vni_to_tun_id(vni), sizeof(*md));
if (!tun_dst)
goto drop;
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
} else {
memset(md, 0, sizeof(*md));
}
@@ -1302,27 +1307,13 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
/* For backwards compatibility, only allow reserved fields to be
* used by VXLAN extensions if explicitly requested.
*/
- if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) {
- struct vxlanhdr_gbp *gbp;
-
- gbp = (struct vxlanhdr_gbp *)vxh;
- md->gbp = ntohs(gbp->policy_id);
-
- if (tun_dst) {
- tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
- tun_dst->u.tun_info.options_len = sizeof(*md);
- }
-
- if (gbp->dont_learn)
- md->gbp |= VXLAN_GBP_DONT_LEARN;
-
- if (gbp->policy_applied)
- md->gbp |= VXLAN_GBP_POLICY_APPLIED;
-
- flags &= ~VXLAN_GBP_USED_BITS;
- }
+ if (vs->flags & VXLAN_F_REMCSUM_RX)
+ if (!vxlan_remcsum(&unparsed, skb, vs->flags))
+ goto drop;
+ if (vs->flags & VXLAN_F_GBP)
+ vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
- if (flags || vni & ~VXLAN_VNI_MASK) {
+ if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
* this as a malformed packet. This behavior diverges from
* VXLAN RFC (RFC7348) which stipulates that bits in reserved
@@ -1331,28 +1322,34 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
* is more robust and provides a little more security in
* adding extensions to VXLAN.
*/
+ goto drop;
+ }
- goto bad_flags;
+ if (!vxlan_set_mac(vxlan, vs, skb))
+ goto drop;
+
+ oiph = skb_network_header(skb);
+ skb_reset_network_header(skb);
+
+ if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
+ ++vxlan->dev->stats.rx_frame_errors;
+ ++vxlan->dev->stats.rx_errors;
+ goto drop;
}
- vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
+ stats = this_cpu_ptr(vxlan->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ gro_cells_receive(&vxlan->gro_cells, skb);
return 0;
drop:
/* Consume bad packet */
kfree_skb(skb);
return 0;
-
-bad_flags:
- netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
- ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
-
-error:
- if (tun_dst)
- dst_release((struct dst_entry *)tun_dst);
-
- /* Return non vxlan pkt */
- return 1;
}
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@ -1463,7 +1460,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
reply->dev = dev;
skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
skb_push(reply, sizeof(struct ethhdr));
- skb_set_mac_header(reply, 0);
+ skb_reset_mac_header(reply);
ns = (struct nd_msg *)skb_transport_header(request);
@@ -1483,7 +1480,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
reply->protocol = htons(ETH_P_IPV6);
skb_pull(reply, sizeof(struct ethhdr));
- skb_set_network_header(reply, 0);
+ skb_reset_network_header(reply);
skb_put(reply, sizeof(struct ipv6hdr));
/* IPv6 header */
@@ -1498,7 +1495,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
pip6->saddr = *(struct in6_addr *)n->primary_key;
skb_pull(reply, sizeof(struct ipv6hdr));
- skb_set_transport_header(reply, 0);
+ skb_reset_transport_header(reply);
na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
@@ -1677,7 +1674,7 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
return;
gbp = (struct vxlanhdr_gbp *)vxh;
- vxh->vx_flags |= htonl(VXLAN_HF_GBP);
+ vxh->vx_flags |= VXLAN_HF_GBP;
if (md->gbp & VXLAN_GBP_DONT_LEARN)
gbp->dont_learn = 1;
@@ -1688,20 +1685,15 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
-#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb,
- struct net_device *dev, struct in6_addr *saddr,
- struct in6_addr *daddr, __u8 prio, __u8 ttl,
- __be16 src_port, __be16 dst_port, __be32 vni,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
+ int iphdr_len, __be32 vni,
+ struct vxlan_metadata *md, u32 vxflags,
+ bool udp_sum)
{
struct vxlanhdr *vxh;
int min_headroom;
int err;
- bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
- u16 hdrlen = sizeof(struct vxlanhdr);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1710,50 +1702,39 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
if (csum_start <= VXLAN_MAX_REMCSUM_START &&
!(csum_start & VXLAN_RCO_SHIFT_MASK) &&
(skb->csum_offset == offsetof(struct udphdr, check) ||
- skb->csum_offset == offsetof(struct tcphdr, check))) {
- udp_sum = false;
+ skb->csum_offset == offsetof(struct tcphdr, check)))
type |= SKB_GSO_TUNNEL_REMCSUM;
- }
}
- skb_scrub_packet(skb, xnet);
-
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
- + VXLAN_HLEN + sizeof(struct ipv6hdr)
+ + VXLAN_HLEN + iphdr_len
+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) {
kfree_skb(skb);
- goto err;
+ return err;
}
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb)) {
- err = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(!skb))
+ return -ENOMEM;
- skb = iptunnel_handle_offloads(skb, udp_sum, type);
- if (IS_ERR(skb)) {
- err = -EINVAL;
- goto err;
- }
+ skb = iptunnel_handle_offloads(skb, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = vni;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(vni);
if (type & SKB_GSO_TUNNEL_REMCSUM) {
- u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
- VXLAN_RCO_SHIFT;
+ unsigned int start;
- if (skb->csum_offset == offsetof(struct udphdr, check))
- data |= VXLAN_RCO_UDP;
-
- vxh->vx_vni |= htonl(data);
- vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+ start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
+ vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
+ vxh->vx_flags |= VXLAN_HF_RCO;
if (!skb_is_gso(skb)) {
skb->ip_summed = CHECKSUM_NONE;
@@ -1765,106 +1746,71 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
vxlan_build_gbp_hdr(vxh, vxflags, md);
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
- udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
- ttl, src_port, dst_port,
- !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
return 0;
-err:
- dst_release(dst);
- return err;
}
-#endif
-static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
- __be16 src_port, __be16 dst_port, __be32 vni,
- struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
+ struct sk_buff *skb, int oif, u8 tos,
+ __be32 daddr, __be32 *saddr,
+ struct dst_cache *dst_cache,
+ const struct ip_tunnel_info *info)
{
- struct vxlanhdr *vxh;
- int min_headroom;
- int err;
- bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
- int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
- u16 hdrlen = sizeof(struct vxlanhdr);
-
- if ((vxflags & VXLAN_F_REMCSUM_TX) &&
- skb->ip_summed == CHECKSUM_PARTIAL) {
- int csum_start = skb_checksum_start_offset(skb);
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
- if (csum_start <= VXLAN_MAX_REMCSUM_START &&
- !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
- (skb->csum_offset == offsetof(struct udphdr, check) ||
- skb->csum_offset == offsetof(struct tcphdr, check))) {
- udp_sum = false;
- type |= SKB_GSO_TUNNEL_REMCSUM;
- }
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
+ rt = dst_cache_get_ip4(dst_cache, saddr);
+ if (rt)
+ return rt;
}
- min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
- + VXLAN_HLEN + sizeof(struct iphdr)
- + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
- /* Need space for new headers (invalidates iph ptr) */
- err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
-
- skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
-
- skb = iptunnel_handle_offloads(skb, udp_sum, type);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_HF_VNI);
- vxh->vx_vni = vni;
-
- if (type & SKB_GSO_TUNNEL_REMCSUM) {
- u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
- VXLAN_RCO_SHIFT;
-
- if (skb->csum_offset == offsetof(struct udphdr, check))
- data |= VXLAN_RCO_UDP;
-
- vxh->vx_vni |= htonl(data);
- vxh->vx_flags |= htonl(VXLAN_HF_RCO);
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = oif;
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.flowi4_mark = skb->mark;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.daddr = daddr;
+ fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
- if (!skb_is_gso(skb)) {
- skb->ip_summed = CHECKSUM_NONE;
- skb->encapsulation = 0;
- }
+ rt = ip_route_output_key(vxlan->net, &fl4);
+ if (!IS_ERR(rt)) {
+ *saddr = fl4.saddr;
+ if (use_cache)
+ dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
}
-
- if (vxflags & VXLAN_F_GBP)
- vxlan_build_gbp_hdr(vxh, vxflags, md);
-
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
- udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df,
- src_port, dst_port, xnet,
- !(vxflags & VXLAN_F_UDP_CSUM));
- return 0;
+ return rt;
}
#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
- struct sk_buff *skb, int oif,
+ struct sk_buff *skb, int oif, u8 tos,
+ __be32 label,
const struct in6_addr *daddr,
- struct in6_addr *saddr)
+ struct in6_addr *saddr,
+ struct dst_cache *dst_cache,
+ const struct ip_tunnel_info *info)
{
+ bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (tos && !info)
+ use_cache = false;
+ if (use_cache) {
+ ndst = dst_cache_get_ip6(dst_cache, saddr);
+ if (ndst)
+ return ndst;
+ }
+
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = oif;
fl6.daddr = *daddr;
fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
@@ -1875,6 +1821,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
return ERR_PTR(err);
*saddr = fl6.saddr;
+ if (use_cache)
+ dst_cache_set_ip6(dst_cache, ndst, saddr);
return ndst;
}
#endif
@@ -1927,22 +1875,24 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
+ struct dst_cache *dst_cache;
struct ip_tunnel_info *info;
struct vxlan_dev *vxlan = netdev_priv(dev);
struct sock *sk;
struct rtable *rt = NULL;
const struct iphdr *old_iph;
- struct flowi4 fl4;
union vxlan_addr *dst;
union vxlan_addr remote_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
__be16 src_port = 0, dst_port;
- u32 vni;
+ __be32 vni, label;
__be16 df = 0;
__u8 tos, ttl;
int err;
u32 flags = vxlan->flags;
+ bool udp_sum = false;
+ bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
info = skb_tunnel_info(skb);
@@ -1950,6 +1900,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
vni = rdst->remote_vni;
dst = &rdst->remote_ip;
+ dst_cache = &rdst->dst_cache;
} else {
if (!info) {
WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
@@ -1957,13 +1908,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
- vni = be64_to_cpu(info->key.tun_id);
+ vni = vxlan_tun_id_to_vni(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET)
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
else
remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
dst = &remote_ip;
+ dst_cache = &info->dst_cache;
}
if (vxlan_addr_any(dst)) {
@@ -1985,12 +1937,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
+ label = vxlan->cfg.label;
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
if (info) {
ttl = info->key.ttl;
tos = info->key.tos;
+ label = info->key.label;
+ udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
if (info->options_len)
md = ip_tunnel_info_opts(info);
@@ -1999,29 +1954,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
+ __be32 saddr;
+
if (!vxlan->vn4_sock)
goto drop;
sk = vxlan->vn4_sock->sock->sk;
- if (info) {
- if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
- df = htons(IP_DF);
-
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags |= VXLAN_F_UDP_CSUM;
- else
- flags &= ~VXLAN_F_UDP_CSUM;
- }
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
- fl4.flowi4_tos = RT_TOS(tos);
- fl4.flowi4_mark = skb->mark;
- fl4.flowi4_proto = IPPROTO_UDP;
- fl4.daddr = dst->sin.sin_addr.s_addr;
- fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
-
- rt = ip_route_output_key(vxlan->net, &fl4);
+ rt = vxlan_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0, tos,
+ dst->sin.sin_addr.s_addr, &saddr,
+ dst_cache, info);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to %pI4\n",
&dst->sin.sin_addr.s_addr);
@@ -2051,18 +1993,21 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
+ if (!info)
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
+ else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
- dst->sin.sin_addr.s_addr, tos, ttl, df,
- src_port, dst_port, htonl(vni << 8), md,
- !net_eq(vxlan->net, dev_net(vxlan->dev)),
- flags);
- if (err < 0) {
- /* skb is already freed. */
- skb = NULL;
- goto rt_tx_error;
- }
+ err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
+ vni, md, flags, udp_sum);
+ if (err < 0)
+ goto xmit_tx_error;
+
+ udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+ dst->sin.sin_addr.s_addr, tos, ttl, df,
+ src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
@@ -2074,8 +2019,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
sk = vxlan->vn6_sock->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
- rdst ? rdst->remote_ifindex : 0,
- &dst->sin6.sin6_addr, &saddr);
+ rdst ? rdst->remote_ifindex : 0, tos,
+ label, &dst->sin6.sin6_addr, &saddr,
+ dst_cache, info);
if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
@@ -2107,18 +2053,21 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return;
}
- if (info) {
- if (info->key.tun_flags & TUNNEL_CSUM)
- flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
- else
- flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
- }
+ if (!info)
+ udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
- 0, ttl, src_port, dst_port, htonl(vni << 8), md,
- !net_eq(vxlan->net, dev_net(vxlan->dev)),
- flags);
+ skb_scrub_packet(skb, xnet);
+ err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+ vni, md, flags, udp_sum);
+ if (err < 0) {
+ dst_release(ndst);
+ return;
+ }
+ udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+ &saddr, &dst->sin6.sin6_addr, tos, ttl,
+ label, src_port, dst_port, !udp_sum);
#endif
}
@@ -2128,6 +2077,9 @@ drop:
dev->stats.tx_dropped++;
goto tx_free;
+xmit_tx_error:
+ /* skb is already freed. */
+ skb = NULL;
rt_tx_error:
ip_rt_put(rt);
tx_error:
@@ -2267,7 +2219,7 @@ static void vxlan_cleanup(unsigned long arg)
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
- __u32 vni = vxlan->default_dst.remote_vni;
+ __be32 vni = vxlan->default_dst.remote_vni;
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
@@ -2410,31 +2362,6 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
}
-static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
- struct ip_tunnel_info *info,
- __be16 sport, __be16 dport)
-{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct rtable *rt;
- struct flowi4 fl4;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_tos = RT_TOS(info->key.tos);
- fl4.flowi4_mark = skb->mark;
- fl4.flowi4_proto = IPPROTO_UDP;
- fl4.daddr = info->key.u.ipv4.dst;
-
- rt = ip_route_output_key(vxlan->net, &fl4);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
- ip_rt_put(rt);
-
- info->key.u.ipv4.src = fl4.saddr;
- info->key.tp_src = sport;
- info->key.tp_dst = dport;
- return 0;
-}
-
static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2446,28 +2373,34 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct rtable *rt;
+
if (!vxlan->vn4_sock)
return -EINVAL;
- return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+ rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
+ info->key.u.ipv4.dst,
+ &info->key.u.ipv4.src, NULL, info);
+ if (IS_ERR(rt))
+ return PTR_ERR(rt);
+ ip_rt_put(rt);
} else {
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
if (!vxlan->vn6_sock)
return -EINVAL;
- ndst = vxlan6_get_route(vxlan, skb, 0,
- &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src);
+ ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
+ info->key.label, &info->key.u.ipv6.dst,
+ &info->key.u.ipv6.src, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
-
- info->key.tp_src = sport;
- info->key.tp_dst = dport;
#else /* !CONFIG_IPV6 */
return -EPFNOSUPPORT;
#endif
}
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
return 0;
}
@@ -2572,6 +2505,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
+ [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
[IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
[IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
@@ -2719,7 +2653,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
/* Mark socket as an encapsulation socket. */
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
- tunnel_cfg.encap_rcv = vxlan_udp_encap_recv;
+ tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -2806,6 +2740,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
vxlan->flags |= VXLAN_F_IPV6;
}
+ if (conf->label && !use_ipv6) {
+ pr_info("label only supported in use with IPv6\n");
+ return -EINVAL;
+ }
+
if (conf->remote_ifindex) {
lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
dst->remote_ifindex = conf->remote_ifindex;
@@ -2921,7 +2860,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
memset(&conf, 0, sizeof(conf));
if (data[IFLA_VXLAN_ID])
- conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+ conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
if (data[IFLA_VXLAN_GROUP]) {
conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
@@ -2954,6 +2893,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_TTL])
conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+ if (data[IFLA_VXLAN_LABEL])
+ conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
+ IPV6_FLOWLABEL_MASK;
+
if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
conf.flags |= VXLAN_F_LEARN;
@@ -2989,8 +2932,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_PORT])
conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
- if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
- conf.flags |= VXLAN_F_UDP_CSUM;
+ if (data[IFLA_VXLAN_UDP_CSUM] &&
+ !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+ conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
@@ -3025,7 +2969,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
break;
case -EEXIST:
- pr_info("duplicate VNI %u\n", conf.vni);
+ pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
break;
}
@@ -3056,6 +3000,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
@@ -3083,7 +3028,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
.high = htons(vxlan->cfg.port_max),
};
- if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
+ if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
goto nla_put_failure;
if (!vxlan_addr_any(&dst->remote_ip)) {
@@ -3119,6 +3064,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+ nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->flags & VXLAN_F_LEARN)) ||
nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -3134,7 +3080,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
- !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
+ !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
!!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 44541dbc5c28..69b994f3b8c5 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->mem_start = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
+ + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 317bc79cc8b9..bb33b242ab48 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -826,7 +826,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* lmc_trace(dev, "lmc_init_one in"); */
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
return err;
@@ -835,23 +835,20 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_request_regions(pdev, "lmc");
if (err) {
printk(KERN_ERR "lmc: pci_request_region failed\n");
- goto err_req_io;
+ return err;
}
/*
* Allocate our own device structure
*/
- sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
- if (!sc) {
- err = -ENOMEM;
- goto err_kzalloc;
- }
+ sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL);
+ if (!sc)
+ return -ENOMEM;
dev = alloc_hdlcdev(sc);
if (!dev) {
printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
- err = -ENOMEM;
- goto err_hdlcdev;
+ return -ENOMEM;
}
@@ -888,7 +885,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
free_netdev(dev);
- goto err_hdlcdev;
+ return err;
}
sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
@@ -971,14 +968,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lmc_trace(dev, "lmc_init_one out");
return 0;
-
-err_hdlcdev:
- kfree(sc);
-err_kzalloc:
- pci_release_regions(pdev);
-err_req_io:
- pci_disable_device(pdev);
- return err;
}
/*
@@ -992,8 +981,6 @@ static void lmc_remove_one(struct pci_dev *pdev)
printk(KERN_DEBUG "%s: removing...\n", dev->name);
unregister_hdlc_device(dev);
free_netdev(dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
}
}
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 03aa35f999a1..db1ca629cbd6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -15,6 +15,12 @@ config ATH10K_PCI
---help---
This module adds support for PCIE bus
+config ATH10K_AHB
+ bool "Atheros ath10k AHB support"
+ depends on ATH10K_PCI && OF && RESET_CONTROLLER
+ ---help---
+ This module adds support for AHB bus
+
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index c04fb00e7930..930fadd940d8 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -25,5 +25,7 @@ obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
+ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
new file mode 100644
index 000000000000..bd62bc19e758
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -0,0 +1,933 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include "core.h"
+#include "debug.h"
+#include "pci.h"
+#include "ahb.h"
+
+static const struct of_device_id ath10k_ahb_of_match[] = {
+ /* TODO: enable this entry once everything in place.
+ * { .compatible = "qcom,ipq4019-wifi",
+ * .data = (void *)ATH10K_HW_QCA4019 },
+ */
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+
+static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
+{
+ return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
+}
+
+static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->gcc_mem + offset);
+}
+
+static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static int ath10k_ahb_get_num_banks(struct ath10k *ar)
+{
+ if (ar->hw_rev == ATH10K_HW_QCA4019)
+ return 1;
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_ahb_clock_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+ int ret;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd");
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
+ ath10k_err(ar, "failed to get cmd clk: %ld\n",
+ PTR_ERR(ar_ahb->cmd_clk));
+ ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
+ goto out;
+ }
+
+ ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref");
+ if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
+ ath10k_err(ar, "failed to get ref clk: %ld\n",
+ PTR_ERR(ar_ahb->ref_clk));
+ ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
+ goto err_cmd_clk_put;
+ }
+
+ ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc");
+ if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "failed to get rtc clk: %ld\n",
+ PTR_ERR(ar_ahb->rtc_clk));
+ ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
+ goto err_ref_clk_put;
+ }
+
+ return 0;
+
+err_ref_clk_put:
+ clk_put(ar_ahb->ref_clk);
+
+err_cmd_clk_put:
+ clk_put(ar_ahb->cmd_clk);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_clock_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
+ clk_put(ar_ahb->cmd_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
+ clk_put(ar_ahb->ref_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
+ clk_put(ar_ahb->rtc_clk);
+
+ ar_ahb->cmd_clk = NULL;
+ ar_ahb->ref_clk = NULL;
+ ar_ahb->rtc_clk = NULL;
+}
+
+static int ath10k_ahb_clock_enable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+ int ret;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "clock(s) is/are not initialized\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->cmd_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable cmd clk: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->ref_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable ref clk: %d\n", ret);
+ goto err_cmd_clk_disable;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->rtc_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable rtc clk: %d\n", ret);
+ goto err_ref_clk_disable;
+ }
+
+ return 0;
+
+err_ref_clk_disable:
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+err_cmd_clk_disable:
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_clock_disable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
+ clk_disable_unprepare(ar_ahb->rtc_clk);
+}
+
+static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+ int ret;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold");
+ if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) {
+ ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->core_cold_rst));
+ ret = ar_ahb->core_cold_rst ?
+ PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV;
+ goto out;
+ }
+
+ ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold");
+ if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) {
+ ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_cold_rst));
+ ret = ar_ahb->radio_cold_rst ?
+ PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV;
+ goto err_core_cold_rst_put;
+ }
+
+ ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm");
+ if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) {
+ ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_warm_rst));
+ ret = ar_ahb->radio_warm_rst ?
+ PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV;
+ goto err_radio_cold_rst_put;
+ }
+
+ ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif");
+ if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) {
+ ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_srif_rst));
+ ret = ar_ahb->radio_srif_rst ?
+ PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV;
+ goto err_radio_warm_rst_put;
+ }
+
+ ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init");
+ if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->cpu_init_rst));
+ ret = ar_ahb->cpu_init_rst ?
+ PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV;
+ goto err_radio_srif_rst_put;
+ }
+
+ return 0;
+
+err_radio_srif_rst_put:
+ reset_control_put(ar_ahb->radio_srif_rst);
+
+err_radio_warm_rst_put:
+ reset_control_put(ar_ahb->radio_warm_rst);
+
+err_radio_cold_rst_put:
+ reset_control_put(ar_ahb->radio_cold_rst);
+
+err_core_cold_rst_put:
+ reset_control_put(ar_ahb->core_cold_rst);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst))
+ reset_control_put(ar_ahb->core_cold_rst);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst))
+ reset_control_put(ar_ahb->radio_cold_rst);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst))
+ reset_control_put(ar_ahb->radio_warm_rst);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst))
+ reset_control_put(ar_ahb->radio_srif_rst);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst))
+ reset_control_put(ar_ahb->cpu_init_rst);
+
+ ar_ahb->core_cold_rst = NULL;
+ ar_ahb->radio_cold_rst = NULL;
+ ar_ahb->radio_warm_rst = NULL;
+ ar_ahb->radio_srif_rst = NULL;
+ ar_ahb->cpu_init_rst = NULL;
+}
+
+static int ath10k_ahb_release_reset(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return -EINVAL;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_cold_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_warm_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_srif_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->cpu_init_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg,
+ u32 haltack_reg)
+{
+ unsigned long timeout;
+ u32 val;
+
+ /* Issue halt axi bus request */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val |= AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ /* Wait for axi bus halted ack */
+ timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT);
+ do {
+ val = ath10k_ahb_tcsr_read32(ar, haltack_reg);
+ if (val & AHB_AXI_BUS_HALT_ACK)
+ break;
+
+ mdelay(1);
+ } while (time_before(jiffies, timeout));
+
+ if (!(val & AHB_AXI_BUS_HALT_ACK)) {
+ ath10k_err(ar, "failed to halt axi bus: %d\n", val);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n");
+}
+
+static void ath10k_ahb_halt_chip(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg;
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return;
+ }
+
+ core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG);
+
+ switch (core_id) {
+ case 0:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK;
+ break;
+ case 1:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK;
+ break;
+ default:
+ ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n",
+ core_id);
+ return;
+ }
+
+ ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_assert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert core cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_warm_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_srif_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->cpu_init_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret);
+ msleep(10);
+
+ /* Clear halt req and core clock disable req before
+ * deasserting wifi core reset.
+ */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val &= ~AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_deassert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id);
+}
+
+static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ tasklet_schedule(&ar_pci->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_ahb->irq,
+ ath10k_ahb_interrupt_handler,
+ IRQF_SHARED, "ath10k_ahb", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_ahb->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ free_irq(ar_ahb->irq, ar);
+}
+
+static void ath10k_ahb_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+}
+
+static int ath10k_ahb_resource_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct platform_device *pdev;
+ struct device *dev;
+ struct resource *res;
+ int ret;
+
+ pdev = ar_ahb->pdev;
+ dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath10k_err(ar, "failed to get memory resource\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ar_ahb->mem)) {
+ ath10k_err(ar, "mem ioremap error\n");
+ ret = PTR_ERR(ar_ahb->mem);
+ goto out;
+ }
+
+ ar_ahb->mem_len = resource_size(res);
+
+ ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ATH10K_GCC_REG_SIZE);
+ if (!ar_ahb->gcc_mem) {
+ ath10k_err(ar, "gcc mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+
+ ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ATH10K_TCSR_REG_SIZE);
+ if (!ar_ahb->tcsr_mem) {
+ ath10k_err(ar, "tcsr mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_gcc_mem_unmap;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
+ ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = ath10k_ahb_clock_init(ar);
+ if (ret)
+ goto err_tcsr_mem_unmap;
+
+ ret = ath10k_ahb_rst_ctrl_init(ar);
+ if (ret)
+ goto err_clock_deinit;
+
+ ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
+ if (ar_ahb->irq < 0) {
+ ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ goto err_clock_deinit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
+ ar_ahb->mem, ar_ahb->mem_len,
+ ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
+ return 0;
+
+err_clock_deinit:
+ ath10k_ahb_clock_deinit(ar);
+
+err_tcsr_mem_unmap:
+ iounmap(ar_ahb->tcsr_mem);
+
+err_gcc_mem_unmap:
+ ar_ahb->tcsr_mem = NULL;
+ iounmap(ar_ahb->gcc_mem);
+
+err_mem_unmap:
+ ar_ahb->gcc_mem = NULL;
+ devm_iounmap(&pdev->dev, ar_ahb->mem);
+
+out:
+ ar_ahb->mem = NULL;
+ return ret;
+}
+
+static void ath10k_ahb_resource_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (ar_ahb->mem)
+ devm_iounmap(dev, ar_ahb->mem);
+
+ if (ar_ahb->gcc_mem)
+ iounmap(ar_ahb->gcc_mem);
+
+ if (ar_ahb->tcsr_mem)
+ iounmap(ar_ahb->tcsr_mem);
+
+ ar_ahb->mem = NULL;
+ ar_ahb->gcc_mem = NULL;
+ ar_ahb->tcsr_mem = NULL;
+
+ ath10k_ahb_clock_deinit(ar);
+ ath10k_ahb_rst_ctrl_deinit(ar);
+}
+
+static int ath10k_ahb_prepare_device(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_ahb_clock_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Clock for the target is supplied from outside of target (ie,
+ * external clock module controlled by the host). Target needs
+ * to know what frequency target cpu is configured which is needed
+ * for target internal use. Read target cpu frequency info from
+ * gcc register and write into target's scratch register where
+ * target expects this information.
+ */
+ val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV);
+ ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val);
+
+ ret = ath10k_ahb_release_reset(ar);
+ if (ret)
+ goto err_clk_disable;
+
+ ath10k_ahb_irq_disable(ar);
+
+ ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret)
+ goto err_halt_chip;
+
+ return 0;
+
+err_halt_chip:
+ ath10k_ahb_halt_chip(ar);
+
+err_clk_disable:
+ ath10k_ahb_clock_disable(ar);
+
+ return ret;
+}
+
+static int ath10k_ahb_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_ahb_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_ahb_read32(ar, addr);
+ val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK;
+ ath10k_ahb_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_ahb_hif_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+
+ ath10k_pci_rx_post(ar);
+
+ return 0;
+}
+
+static void ath10k_ahb_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n");
+
+ ath10k_ahb_irq_disable(ar);
+ synchronize_irq(ar_ahb->irq);
+
+ ath10k_pci_flush(ar);
+}
+
+static int ath10k_ahb_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n");
+
+ ret = ath10k_ahb_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ ret = ath10k_ahb_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ return 0;
+
+err_ce_deinit:
+ ath10k_pci_ce_deinit(ar);
+out:
+ return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_ahb_hif_start,
+ .stop = ath10k_ahb_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_ahb_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+};
+
+static const struct ath10k_bus_ops ath10k_ahb_bus_ops = {
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+ .get_num_banks = ath10k_ahb_get_num_banks,
+};
+
+static int ath10k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath10k *ar;
+ struct ath10k_ahb *ar_ahb;
+ struct ath10k_pci *ar_pci;
+ const struct of_device_id *of_id;
+ enum ath10k_hw_rev hw_rev;
+ size_t size;
+ int ret;
+ u32 chip_id;
+
+ of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ hw_rev = (enum ath10k_hw_rev)of_id->data;
+
+ size = sizeof(*ar_pci) + sizeof(*ar_ahb);
+ ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
+ hw_rev, &ath10k_ahb_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->pdev = pdev;
+ platform_set_drvdata(pdev, ar);
+
+ ret = ath10k_ahb_resource_init(ar);
+ if (ret)
+ goto err_core_destroy;
+
+ ar->dev_id = 0;
+ ar_pci->mem = ar_ahb->mem;
+ ar_pci->mem_len = ar_ahb->mem_len;
+ ar_pci->ar = ar;
+ ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_resource_deinit;
+ }
+
+ ath10k_pci_init_irq_tasklets(ar);
+
+ ret = ath10k_ahb_request_irq_legacy(ar);
+ if (ret)
+ goto err_free_pipes;
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ goto err_free_irq;
+
+ ath10k_pci_ce_deinit(ar);
+
+ chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ goto err_halt_device;
+ }
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_halt_device;
+ }
+
+ return 0;
+
+err_halt_device:
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+err_free_irq:
+ ath10k_ahb_release_irq_legacy(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_resource_deinit:
+ ath10k_ahb_resource_deinit(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath10k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_ahb *ar_ahb;
+
+ if (!ar)
+ return -EINVAL;
+
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!ar_ahb)
+ return -EINVAL;
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n");
+
+ ath10k_core_unregister(ar);
+ ath10k_ahb_irq_disable(ar);
+ ath10k_ahb_release_irq_legacy(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+ ath10k_ahb_resource_deinit(ar);
+ ath10k_core_destroy(ar);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath10k_ahb_driver = {
+ .driver = {
+ .name = "ath10k_ahb",
+ .of_match_table = ath10k_ahb_of_match,
+ },
+ .probe = ath10k_ahb_probe,
+ .remove = ath10k_ahb_remove,
+};
+
+int ath10k_ahb_init(void)
+{
+ int ret;
+
+ printk(KERN_ERR "AHB support is still work in progress\n");
+
+ ret = platform_driver_register(&ath10k_ahb_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+
+void ath10k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath10k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h
new file mode 100644
index 000000000000..d43e375215c8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _AHB_H_
+#define _AHB_H_
+
+#include <linux/platform_device.h>
+
+struct ath10k_ahb {
+ struct platform_device *pdev;
+ void __iomem *mem;
+ unsigned long mem_len;
+ void __iomem *gcc_mem;
+ void __iomem *tcsr_mem;
+
+ int irq;
+
+ struct clk *cmd_clk;
+ struct clk *ref_clk;
+ struct clk *rtc_clk;
+
+ struct reset_control *core_cold_rst;
+ struct reset_control *radio_cold_rst;
+ struct reset_control *radio_warm_rst;
+ struct reset_control *radio_srif_rst;
+ struct reset_control *cpu_init_rst;
+};
+
+#ifdef CONFIG_ATH10K_AHB
+
+#define ATH10K_GCC_REG_BASE 0x1800000
+#define ATH10K_GCC_REG_SIZE 0x60000
+
+#define ATH10K_TCSR_REG_BASE 0x1900000
+#define ATH10K_TCSR_REG_SIZE 0x80000
+
+#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020
+#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014
+
+#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030
+
+#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000
+#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004
+#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25)
+
+#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000
+#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010
+#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004
+#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014
+
+#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */
+#define AHB_AXI_BUS_HALT_REQ 1
+#define AHB_AXI_BUS_HALT_ACK 1
+
+#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1
+
+int ath10k_ahb_init(void);
+void ath10k_ahb_exit(void);
+
+#else /* CONFIG_ATH10K_AHB */
+
+static inline int ath10k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath10k_ahb_exit(void)
+{
+}
+
+#endif /* CONFIG_ATH10K_AHB */
+
+#endif /* _AHB_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b41eb3f4ee56..c84c2d30ef1f 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -156,6 +156,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
+ .num_msdu_desc = 1424,
+ .qcache_active_peers = 50,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
.fw = QCA99X0_HW_2_0_FW_FILE,
@@ -201,6 +206,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
},
+ {
+ .id = QCA4019_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "qca4019 hw1.0",
+ .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x0010000,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 125000,
+ .max_probe_resp_desc_thres = 24,
+ .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
+ .num_msdu_desc = 2500,
+ .qcache_active_peers = 35,
+ .tx_chain_mask = 0x3,
+ .rx_chain_mask = 0x3,
+ .max_spatial_stream = 2,
+ .fw = {
+ .dir = QCA4019_HW_1_0_FW_DIR,
+ .fw = QCA4019_HW_1_0_FW_FILE,
+ .otp = QCA4019_HW_1_0_OTP_FILE,
+ .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA4019_BOARD_DATA_SZ,
+ .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
+ },
+ },
};
static const char *const ath10k_core_fw_feature_str[] = {
@@ -217,6 +247,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
+ [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -1478,8 +1509,13 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- ar->max_num_peers = TARGET_10X_NUM_PEERS;
- ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
+ } else {
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ }
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
@@ -1502,9 +1538,9 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
- ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+ ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc;
ar->fw_stats_req_mask = WMI_STAT_PEER;
- ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
+ ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1979,6 +2015,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->regs = &qca99x0_regs;
ar->hw_values = &qca99x0_values;
break;
+ case ATH10K_HW_QCA4019:
+ ar->regs = &qca4019_regs;
+ ar->hw_values = &qca4019_values;
+ break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 7840cf3ef7a6..a62b62a62266 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -69,6 +69,7 @@ struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
+ ATH10K_BUS_AHB,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -76,6 +77,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
switch (bus) {
case ATH10K_BUS_PCI:
return "pci";
+ case ATH10K_BUS_AHB:
+ return "ahb";
}
return "unknown";
@@ -159,6 +162,7 @@ struct ath10k_fw_stats_peer {
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
+ u32 rx_duration;
};
struct ath10k_fw_stats_vdev {
@@ -315,6 +319,7 @@ struct ath10k_sta {
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
+ u64 rx_duration;
#endif
};
@@ -510,6 +515,15 @@ enum ath10k_fw_features {
/* Firmware supports management frame protection */
ATH10K_FW_FEATURE_MFP_SUPPORT = 12,
+ /* Firmware supports pull-push model where host shares it's software
+ * queue state with firmware and firmware generates fetch requests
+ * telling host which queues to dequeue tx from.
+ *
+ * Primary function of this is improved MU-MIMO performance with
+ * multiple clients.
+ */
+ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -666,6 +680,12 @@ struct ath10k {
/* The padding bytes's location is different on various chips */
enum ath10k_hw_4addr_pad hw_4addr_pad;
+ u32 num_msdu_desc;
+ u32 qcache_active_peers;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+
struct ath10k_hw_params_fw {
const char *dir;
const char *fw;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 2bdf5408b0d9..076d29b53ddf 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -276,7 +276,7 @@ static const struct file_operations fops_wmi_services = {
.llseek = default_llseek,
};
-static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
+static void ath10k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath10k_fw_stats_pdev *i, *tmp;
@@ -286,7 +286,7 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
}
}
-static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+static void ath10k_fw_stats_vdevs_free(struct list_head *head)
{
struct ath10k_fw_stats_vdev *i, *tmp;
@@ -296,7 +296,7 @@ static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
}
}
-static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+static void ath10k_fw_stats_peers_free(struct list_head *head)
{
struct ath10k_fw_stats_peer *i, *tmp;
@@ -310,16 +310,16 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false;
- ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
- ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
spin_unlock_bh(&ar->data_lock);
}
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
- bool is_start, is_started, is_end;
+ bool is_start, is_started, is_end, peer_stats_svc;
size_t num_peers;
size_t num_vdevs;
int ret;
@@ -347,8 +347,14 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* delivered which is treated as end-of-data and is itself discarded
*/
+ peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
+ if (peer_stats_svc)
+ ath10k_sta_update_rx_duration(ar, &stats.peers);
+
if (ar->debug.fw_stats_done) {
- ath10k_warn(ar, "received unsolicited stats update event\n");
+ if (!peer_stats_svc)
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+
goto free;
}
@@ -372,11 +378,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
/* Although this is unlikely impose a sane limit to
* prevent firmware from DoS-ing the host.
*/
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
ath10k_warn(ar, "dropping fw peer stats\n");
goto free;
}
if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
ath10k_warn(ar, "dropping fw vdev stats\n");
goto free;
}
@@ -391,9 +399,9 @@ free:
/* In some cases lists have been spliced and cleared. Free up
* resources if that is not the case.
*/
- ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
- ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
- ath10k_debug_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&stats.vdevs);
+ ath10k_fw_stats_peers_free(&stats.peers);
spin_unlock_bh(&ar->data_lock);
}
@@ -2106,6 +2114,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
struct ath10k *ar = file->private_data;
char buf[32];
size_t buf_size;
+ int ret = 0;
bool val;
buf_size = min(count, (sizeof(buf) - 1));
@@ -2119,6 +2128,12 @@ static ssize_t ath10k_write_btcoex(struct file *file,
mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val))
goto exit;
@@ -2127,17 +2142,15 @@ static ssize_t ath10k_write_btcoex(struct file *file,
else
clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
- if (ar->state != ATH10K_STATE_ON)
- goto exit;
-
ath10k_info(ar, "restarting firmware due to btcoex change");
queue_work(ar->workqueue, &ar->restart_work);
+ ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
- return count;
+ return ret;
}
static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf,
@@ -2176,9 +2189,6 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
mutex_lock(&ar->conf_mutex);
- if (len > buf_len)
- len = buf_len;
-
len += scnprintf(buf + len, buf_len - len,
"firmware-N.bin\t\t%08x\n",
crc32_le(0, ar->firmware->data, ar->firmware->size));
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 814719cf4f22..6206edd7c49f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -37,6 +37,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_PCI_PS = 0x00004000,
+ ATH10K_DBG_AHB = 0x00008000,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -153,6 +154,12 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#ifdef CONFIG_MAC80211_DEBUGFS
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
+void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *peer);
+#else
+static inline void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct list_head *peer)
+{
+}
#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 95b5c49374e0..67ef75b60567 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -18,6 +18,23 @@
#include "wmi-ops.h"
#include "debug.h"
+void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head)
+{ struct ieee80211_sta *sta;
+ struct ath10k_fw_stats_peer *peer;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, head, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -232,6 +249,28 @@ static const struct file_operations fops_delba = {
.llseek = default_llseek,
};
+static ssize_t ath10k_dbg_sta_read_rx_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ char buf[100];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf),
+ "%llu usecs\n", arsta->rx_duration);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_rx_duration = {
+ .read = ath10k_dbg_sta_read_rx_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
@@ -240,4 +279,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+ debugfs_create_file("rx_duration", S_IRUGO, dir, sta,
+ &fops_rx_duration);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 3e6ba63dfdff..7561f22f10f9 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -131,12 +131,12 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
- [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
- HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
- [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
- HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
};
int ath10k_htt_connect(struct ath10k_htt *htt)
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 47ca048feaf0..13391ea4422d 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -52,6 +52,7 @@ enum htt_h2t_msg_type { /* host-to-target */
/* This command is used for sending management frames in HTT < 3.0.
* HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+ HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
HTT_H2T_NUM_MSGS /* keep this last */
};
@@ -413,10 +414,10 @@ enum htt_10_4_t2h_msg_type {
HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
- HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
/* 0x19 to 0x2f are reserved */
- HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30,
+ HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
/* keep this last */
HTT_10_4_T2H_NUM_MSGS
};
@@ -449,8 +450,8 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_TEST,
HTT_T2H_MSG_TYPE_EN_STATS,
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
- HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
- HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -1306,9 +1307,43 @@ struct htt_frag_desc_bank_id {
* so we use a conservatively safe value for now */
#define HTT_FRAG_DESC_BANK_MAX 4
-#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
-#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
-#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
+
+enum htt_q_depth_type {
+ HTT_Q_DEPTH_TYPE_BYTES = 0,
+ HTT_Q_DEPTH_TYPE_MSDUS = 1,
+};
+
+#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
+ TARGET_10_4_NUM_VDEVS)
+#define HTT_TX_Q_STATE_NUM_TIDS 8
+#define HTT_TX_Q_STATE_ENTRY_SIZE 1
+#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
+
+/**
+ * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ *
+ * Defines host q state format and behavior. See htt_q_state.
+ *
+ * @record_size: Defines the size of each host q entry in bytes. In practice
+ * however firmware (at least 10.4.3-00191) ignores this host
+ * configuration value and uses hardcoded value of 1.
+ * @record_multiplier: This is valid only when q depth type is MSDUs. It
+ * defines the exponent for the power of 2 multiplication.
+ */
+struct htt_q_state_conf {
+ __le32 paddr;
+ __le16 num_peers;
+ __le16 num_tids;
+ u8 record_size;
+ u8 record_multiplier;
+ u8 pad[2];
+} __packed;
struct htt_frag_desc_bank_cfg {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
@@ -1316,6 +1351,114 @@ struct htt_frag_desc_bank_cfg {
u8 desc_size;
__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
+#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
+#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
+
+/**
+ * htt_q_state - shared between host and firmware via DMA
+ *
+ * This structure is used for the host to expose it's software queue state to
+ * firmware so that its rate control can schedule fetch requests for optimized
+ * performance. This is most notably used for MU-MIMO aggregation when multiple
+ * MU clients are connected.
+ *
+ * @count: Each element defines the host queue depth. When q depth type was
+ * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
+ * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
+ * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
+ * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
+ * record_multiplier (see htt_q_state_conf).
+ * @map: Used by firmware to quickly check which host queues are not empty. It
+ * is a bitmap simply saying.
+ * @seq: Used by firmware to quickly check if the host queues were updated
+ * since it last checked.
+ *
+ * FIXME: Is the q_state map[] size calculation really correct?
+ */
+struct htt_q_state {
+ u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
+ u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
+ __le32 seq;
+} __packed;
+
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
+#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
+#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
+
+struct htt_tx_fetch_record {
+ __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
+ __le16 num_msdus;
+ __le32 num_bytes;
+} __packed;
+
+struct htt_tx_fetch_ind {
+ u8 pad0;
+ __le16 fetch_seq_num;
+ __le32 token;
+ __le16 num_resp_ids;
+ __le16 num_records;
+ struct htt_tx_fetch_record records[0];
+ __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+} __packed;
+
+static inline void *
+ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
+{
+ return (void *)&ind->records[le16_to_cpu(ind->num_records)];
+}
+
+struct htt_tx_fetch_resp {
+ u8 pad0;
+ __le16 resp_id;
+ __le16 fetch_seq_num;
+ __le16 num_records;
+ __le32 token;
+ struct htt_tx_fetch_record records[0];
+} __packed;
+
+struct htt_tx_fetch_confirm {
+ u8 pad0;
+ __le16 num_resp_ids;
+ __le32 resp_ids[0];
+} __packed;
+
+enum htt_tx_mode_switch_mode {
+ HTT_TX_MODE_SWITCH_PUSH = 0,
+ HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
+};
+
+#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
+
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
+
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
+
+struct htt_tx_mode_switch_record {
+ __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
+ __le16 num_max_msdus;
+} __packed;
+
+struct htt_tx_mode_switch_ind {
+ u8 pad0;
+ __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
+ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
+ u8 pad1[2];
+ struct htt_tx_mode_switch_record records[0];
} __packed;
union htt_rx_pn_t {
@@ -1340,6 +1483,7 @@ struct htt_cmd {
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ struct htt_tx_fetch_resp tx_fetch_resp;
};
} __packed;
@@ -1364,6 +1508,9 @@ struct htt_resp {
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;
+ struct htt_tx_fetch_ind tx_fetch_ind;
+ struct htt_tx_fetch_confirm tx_fetch_confirm;
+ struct htt_tx_mode_switch_ind tx_mode_switch_ind;
};
} __packed;
@@ -1518,6 +1665,14 @@ struct ath10k_htt {
dma_addr_t paddr;
struct ath10k_htt_txbuf *vaddr;
} txbuf;
+
+ struct {
+ struct htt_q_state *vaddr;
+ dma_addr_t paddr;
+ u16 num_peers;
+ u16 num_tids;
+ enum htt_q_depth_type type;
+ } tx_q_state;
};
#define RX_HTT_HDR_STATUS_LEN 64
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 91afa3ae414c..ae9b686a4e91 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2011,9 +2011,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- spin_lock_bh(&htt->rx_ring.lock);
- __skb_queue_tail(&htt->rx_compl_q, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
+ skb_queue_tail(&htt->rx_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
return;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
@@ -2111,9 +2109,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- spin_lock_bh(&htt->rx_ring.lock);
- __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
return;
}
@@ -2123,10 +2119,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
- case HTT_T2H_MSG_TYPE_EN_STATS:
case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
- case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
- case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
+ /* TODO: Implement pull-push logic */
+ break;
+ case HTT_T2H_MSG_TYPE_EN_STATS:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
@@ -2143,11 +2141,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb)
{
- struct ath10k_pktlog_10_4_hdr *hdr =
- (struct ath10k_pktlog_10_4_hdr *)skb->data;
-
- trace_ath10k_htt_pktlog(ar, hdr->payload,
- sizeof(*hdr) + __le16_to_cpu(hdr->size));
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
@@ -2156,24 +2150,46 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct ath10k *ar = htt->ar;
+ struct sk_buff_head tx_q;
+ struct sk_buff_head rx_q;
+ struct sk_buff_head rx_ind_q;
struct htt_resp *resp;
struct sk_buff *skb;
+ unsigned long flags;
+
+ __skb_queue_head_init(&tx_q);
+ __skb_queue_head_init(&rx_q);
+ __skb_queue_head_init(&rx_ind_q);
- while ((skb = skb_dequeue(&htt->tx_compl_q))) {
+ spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
+ spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
+
+ spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
+ skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
+ spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
+
+ spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
+ skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
+ spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
+
+ while ((skb = __skb_dequeue(&tx_q))) {
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
}
- spin_lock_bh(&htt->rx_ring.lock);
- while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+ while ((skb = __skb_dequeue(&rx_q))) {
resp = (struct htt_resp *)skb->data;
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_handler(htt, &resp->rx_ind);
+ spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
}
- while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ while ((skb = __skb_dequeue(&rx_ind_q))) {
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
}
- spin_unlock_bh(&htt->rx_ring.lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index b3adadb5f824..95acb727c068 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -97,6 +97,85 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
+static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr)
+ return;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr,
+ htt->frag_desc.paddr);
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ return;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+
+ dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
+ kfree(htt->tx_q_state.vaddr);
+}
+
+static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+ int ret;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ return 0;
+
+ htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
+ htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
+ htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+ htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
+ if (!htt->tx_q_state.vaddr)
+ return -ENOMEM;
+
+ htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
+ size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
+ kfree(htt->tx_q_state.vaddr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
@@ -118,29 +197,32 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
goto free_idr_pending_tx;
}
- if (!ar->hw_params.continuous_frag_desc)
- goto skip_frag_desc_alloc;
-
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
- htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
- &htt->frag_desc.paddr,
- GFP_KERNEL);
- if (!htt->frag_desc.vaddr) {
- ath10k_warn(ar, "failed to alloc fragment desc memory\n");
- ret = -ENOMEM;
+ ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
}
-skip_frag_desc_alloc:
+ ret = ath10k_htt_tx_alloc_txq(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txq: %d\n", ret);
+ goto free_frag_desc;
+ }
+
return 0;
+free_frag_desc:
+ ath10k_htt_tx_free_cont_frag_desc(htt);
+
free_txbuf:
size = htt->max_num_pending_tx *
sizeof(struct ath10k_htt_txbuf);
dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
htt->txbuf.paddr);
+
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
+
return ret;
}
@@ -174,12 +256,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
htt->txbuf.paddr);
}
- if (htt->frag_desc.vaddr) {
- size = htt->max_num_pending_tx *
- sizeof(struct htt_msdu_ext_desc);
- dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
- htt->frag_desc.paddr);
- }
+ ath10k_htt_tx_free_txq(htt);
+ ath10k_htt_tx_free_cont_frag_desc(htt);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -268,7 +346,9 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg *cfg;
int ret, size;
+ u8 info;
if (!ar->hw_params.continuous_frag_desc)
return 0;
@@ -286,14 +366,30 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
skb_put(skb, size);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
- cmd->frag_desc_bank_cfg.info = 0;
- cmd->frag_desc_bank_cfg.num_banks = 1;
- cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
- cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
- __cpu_to_le32(htt->frag_desc.paddr);
- cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
- cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
- __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
+ cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 7b84d08a5154..f544d48518c3 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -109,6 +109,38 @@ const struct ath10k_hw_regs qca99x0_regs = {
.pcie_intr_clr_address = 0x00000010,
};
+const struct ath10k_hw_regs qca4019_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .soc_core_base_address = 0x00082000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* qca4019 supports upto 12 copy engines. Since base address
+ * of ce8 to ce11 are not directly referred in the code,
+ * no need have them in separate members in this table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .fw_indicator_address = 0x0004f00c,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
const struct ath10k_hw_values qca988x_values = {
.rtc_state_val_on = 3,
.ce_count = 8,
@@ -136,6 +168,13 @@ const struct ath10k_hw_values qca99x0_values = {
.ce_desc_meta_data_lsb = 4,
};
+const struct ath10k_hw_values qca4019_values = {
+ .ce_count = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 0678831e8671..f0cfbc745c97 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -106,6 +106,14 @@ enum qca9377_chip_id_rev {
#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA4019 1.0 definitions */
+#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
+#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
+#define QCA4019_HW_1_0_FW_FILE "firmware.bin"
+#define QCA4019_HW_1_0_OTP_FILE "otp.bin"
+#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -200,6 +208,7 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
ATH10K_HW_QCA9377,
+ ATH10K_HW_QCA4019,
};
struct ath10k_hw_regs {
@@ -232,6 +241,7 @@ struct ath10k_hw_regs {
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
+extern const struct ath10k_hw_regs qca4019_regs;
struct ath10k_hw_values {
u32 rtc_state_val_on;
@@ -245,6 +255,7 @@ struct ath10k_hw_values {
extern const struct ath10k_hw_values qca988x_values;
extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca4019_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
@@ -253,6 +264,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
/* Known pecularities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
@@ -363,14 +375,19 @@ enum ath10k_hw_4addr_pad {
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_TX_STATS_NUM_STATIONS 118
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
#define TARGET_10X_NUM_TIDS_MAX 256
#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
(TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_TX_STATS_NUM_PEERS) * 2)
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
@@ -414,16 +431,11 @@ enum ath10k_hw_4addr_pad {
#define TARGET_10_4_ACTIVE_PEERS 0
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
-#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
#define TARGET_10_4_NUM_PEER_KEYS 2
#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
#define TARGET_10_4_AST_SKID_LIMIT 32
-#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \
- BIT(2) | BIT(3))
-#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \
- BIT(2) | BIT(3))
/* 100 ms for video, best-effort, and background */
#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
@@ -449,7 +461,6 @@ enum ath10k_hw_4addr_pad {
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
-#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
#define TARGET_10_4_MAX_PEER_EXT_STATS 16
#define TARGET_10_4_SMART_ANT_CAP 0
@@ -601,6 +612,7 @@ enum ath10k_hw_4addr_pad {
#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
+#define FW_IND_HOST_READY 0x80000000
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 6146a293601a..78999c9de23b 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1358,10 +1358,7 @@ static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
const u8 *p2p_ie;
int ret;
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
- return 0;
-
- if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
return 0;
mgmt = (void *)bcn->data;
@@ -3259,8 +3256,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
/* This is case only for P2P_GO */
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
- arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
return;
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
@@ -3988,7 +3984,7 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- u32 burst_enable;
+ u32 param;
int ret = 0;
/*
@@ -4031,13 +4027,15 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_power_down;
}
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
+ param = ar->wmi.pdev_param->pmf_qos;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
goto err_core_stop;
}
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
+ param = ar->wmi.pdev_param->dynamic_bw;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
goto err_core_stop;
@@ -4053,8 +4051,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
- burst_enable = ar->wmi.pdev_param->burst_enable;
- ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+ param = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar, "failed to disable burst: %d\n", ret);
goto err_core_stop;
@@ -4072,8 +4070,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
* this problem.
*/
- ret = ath10k_wmi_pdev_set_param(ar,
- ar->wmi.pdev_param->arp_ac_override, 0);
+ param = ar->wmi.pdev_param->arp_ac_override;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
ret);
@@ -4092,8 +4090,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
- ret = ath10k_wmi_pdev_set_param(ar,
- ar->wmi.pdev_param->ani_enable, 1);
+ param = ar->wmi.pdev_param->ani_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable ani by default: %d\n",
ret);
@@ -4102,6 +4100,18 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->ani_enabled = true;
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ param = ar->wmi.pdev_param->peer_stats_update_period;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ PEER_DEFAULT_STATS_UPDATE_PERIOD);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set peer stats period : %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
@@ -4349,25 +4359,29 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
bit, ar->free_vdev_map);
arvif->vdev_id = bit;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+ arvif->vdev_subtype =
+ ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
break;
case NL80211_IFTYPE_ADHOC:
arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (test_bit(WMI_SERVICE_MESH, ar->wmi.svc_map)) {
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH;
+ if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_MESH_11S);
} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ret = -EINVAL;
ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
@@ -4379,7 +4393,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vdev_type = WMI_VDEV_TYPE_AP;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_GO);
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
@@ -6366,12 +6381,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
arvif->vdev_id, sta->addr, tid, action);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ee925c618535..b3cff1d3364a 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -94,7 +94,6 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
-static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
static int ath10k_pci_request_irq(struct ath10k *ar);
@@ -620,7 +619,7 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar)
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
-void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -642,7 +641,7 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
ath10k_pci_sleep(ar);
}
-u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val;
@@ -667,6 +666,20 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return val;
}
+inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ar_pci->bus_ops->write32(ar, offset, value);
+}
+
+inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ar_pci->bus_ops->read32(ar, offset);
+}
+
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
@@ -687,7 +700,7 @@ void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
}
-static bool ath10k_pci_irq_pending(struct ath10k *ar)
+bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -700,7 +713,7 @@ static bool ath10k_pci_irq_pending(struct ath10k *ar)
return false;
}
-static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
@@ -716,7 +729,7 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
PCIE_INTR_ENABLE_ADDRESS);
}
-static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS,
@@ -809,7 +822,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
}
}
-static void ath10k_pci_rx_post(struct ath10k *ar)
+void ath10k_pci_rx_post(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
@@ -818,7 +831,7 @@ static void ath10k_pci_rx_post(struct ath10k *ar)
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
}
-static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+void ath10k_pci_rx_replenish_retry(unsigned long ptr)
{
struct ath10k *ar = (void *)ptr;
@@ -838,6 +851,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
0x7ff) << 21;
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
}
@@ -1007,8 +1021,8 @@ static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
-static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
- const void *data, int nbytes)
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
@@ -1263,8 +1277,8 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
}
-static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
- struct ath10k_hif_sg_item *items, int n_items)
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
@@ -1332,13 +1346,13 @@ err:
return err;
}
-static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
- size_t buf_len)
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
{
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
}
-static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1406,8 +1420,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
queue_work(ar->workqueue, &ar->restart_work);
}
-static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
- int force)
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
{
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
@@ -1432,7 +1446,7 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
@@ -1446,8 +1460,8 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
del_timer_sync(&ar_pci->rx_post_retry);
}
-static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe)
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
@@ -1491,8 +1505,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
return 0;
}
-static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
- u8 *ul_pipe, u8 *dl_pipe)
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
{
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
@@ -1516,6 +1530,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
*/
@@ -1538,6 +1553,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq/MSI.
*/
@@ -1668,7 +1684,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
}
}
-static void ath10k_pci_ce_deinit(struct ath10k *ar)
+void ath10k_pci_ce_deinit(struct ath10k *ar)
{
int i;
@@ -1676,7 +1692,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
ath10k_ce_deinit_pipe(ar, i);
}
-static void ath10k_pci_flush(struct ath10k *ar)
+void ath10k_pci_flush(struct ath10k *ar)
{
ath10k_pci_kill_tasklet(ar);
ath10k_pci_buffer_cleanup(ar);
@@ -1711,9 +1727,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
-static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
- void *req, u32 req_len,
- void *resp, u32 *resp_len)
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
@@ -1756,7 +1772,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ar->dev, resp_paddr);
if (ret) {
- ret = EIO;
+ ret = -EIO;
goto err_req;
}
@@ -1907,7 +1923,14 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
return 1;
}
-static int ath10k_pci_init_config(struct ath10k *ar)
+static int ath10k_bus_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ar_pci->bus_ops->get_num_banks(ar);
+}
+
+int ath10k_pci_init_config(struct ath10k *ar)
{
u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0;
@@ -2018,7 +2041,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
/* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK);
- ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
+ ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
@@ -2071,7 +2094,7 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
}
-static int ath10k_pci_alloc_pipes(struct ath10k *ar)
+int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe;
@@ -2102,7 +2125,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_free_pipes(struct ath10k *ar)
+void ath10k_pci_free_pipes(struct ath10k *ar)
{
int i;
@@ -2110,7 +2133,7 @@ static void ath10k_pci_free_pipes(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_pci_init_pipes(struct ath10k *ar)
+int ath10k_pci_init_pipes(struct ath10k *ar)
{
int i, ret;
@@ -2453,7 +2476,7 @@ err_sleep:
return ret;
}
-static void ath10k_pci_hif_power_down(struct ath10k *ar)
+void ath10k_pci_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
@@ -2722,7 +2745,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
free_irq(ar_pci->pdev->irq + i, ar);
}
-static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
@@ -2808,7 +2831,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long timeout;
@@ -2989,6 +3012,43 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
return false;
}
+int ath10k_pci_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_init(&ar_pci->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+
+ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
+ (unsigned long)ar);
+
+ if (QCA_REV_6174(ar))
+ ath10k_pci_override_ce_config(ar);
+
+ ret = ath10k_pci_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_pci_release_resource(struct ath10k *ar)
+{
+ ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_free_pipes(ar);
+}
+
+static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
+ .read32 = ath10k_bus_pci_read32,
+ .write32 = ath10k_bus_pci_write32,
+ .get_num_banks = ath10k_pci_get_num_banks,
+};
+
static int ath10k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
@@ -3039,40 +3099,32 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->ar = ar;
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
+ ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
ar->id.subsystem_vendor = pdev->subsystem_vendor;
ar->id.subsystem_device = pdev->subsystem_device;
- spin_lock_init(&ar_pci->ce_lock);
- spin_lock_init(&ar_pci->ps_lock);
-
- setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
- (unsigned long)ar);
setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
(unsigned long)ar);
- ret = ath10k_pci_claim(ar);
+ ret = ath10k_pci_setup_resource(ar);
if (ret) {
- ath10k_err(ar, "failed to claim device: %d\n", ret);
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
goto err_core_destroy;
}
- if (QCA_REV_6174(ar))
- ath10k_pci_override_ce_config(ar);
-
- ret = ath10k_pci_alloc_pipes(ar);
+ ret = ath10k_pci_claim(ar);
if (ret) {
- ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
- ret);
- goto err_sleep;
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_free_pipes;
}
ret = ath10k_pci_force_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake up device : %d\n", ret);
- goto err_free_pipes;
+ goto err_sleep;
}
ath10k_pci_ce_deinit(ar);
@@ -3081,7 +3133,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
- goto err_free_pipes;
+ goto err_sleep;
}
ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
@@ -3127,13 +3179,13 @@ err_free_irq:
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
-err_free_pipes:
- ath10k_pci_free_pipes(ar);
-
err_sleep:
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
err_core_destroy:
ath10k_core_destroy(ar);
@@ -3157,10 +3209,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
- ath10k_pci_ce_deinit(ar);
- ath10k_pci_free_pipes(ar);
+ ath10k_pci_release_resource(ar);
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
@@ -3184,6 +3234,10 @@ static int __init ath10k_pci_init(void)
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
ret);
+ ret = ath10k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "ahb init failed: %d\n", ret);
+
return ret;
}
module_init(ath10k_pci_init);
@@ -3191,6 +3245,7 @@ module_init(ath10k_pci_init);
static void __exit ath10k_pci_exit(void)
{
pci_unregister_driver(&ath10k_pci_driver);
+ ath10k_ahb_exit();
}
module_exit(ath10k_pci_exit);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index f91bf333cb75..249c73a69800 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -22,6 +22,7 @@
#include "hw.h"
#include "ce.h"
+#include "ahb.h"
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
@@ -157,6 +158,12 @@ struct ath10k_pci_supp_chip {
u32 rev_id;
};
+struct ath10k_bus_ops {
+ u32 (*read32)(struct ath10k *ar, u32 offset);
+ void (*write32)(struct ath10k *ar, u32 offset, u32 value);
+ int (*get_num_banks)(struct ath10k *ar);
+};
+
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
@@ -225,6 +232,14 @@ struct ath10k_pci {
* on MMIO read/write.
*/
bool pci_ps;
+
+ const struct ath10k_bus_ops *bus_ops;
+
+ /* Keep this entry in the last, memory for struct ath10k_ahb is
+ * allocated (ahb support enabled case) in the continuation of
+ * this struct.
+ */
+ struct ath10k_ahb ahb[0];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -253,6 +268,40 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes);
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
+ void *resp, u32 *resp_len);
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe,
+ u8 *dl_pipe);
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force);
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
+void ath10k_pci_hif_power_down(struct ath10k *ar);
+int ath10k_pci_alloc_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_rx_replenish_retry(unsigned long ptr);
+void ath10k_pci_ce_deinit(struct ath10k *ar);
+void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
+void ath10k_pci_kill_tasklet(struct ath10k *ar);
+int ath10k_pci_init_pipes(struct ath10k *ar);
+int ath10k_pci_init_config(struct ath10k *ar);
+void ath10k_pci_rx_post(struct ath10k *ar);
+void ath10k_pci_flush(struct ath10k *ar);
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+bool ath10k_pci_irq_pending(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+int ath10k_pci_setup_resource(struct ath10k *ar);
+void ath10k_pci_release_resource(struct ath10k *ar);
+
/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
* frequently. To avoid this put SoC to sleep after a very conservative grace
* period. Adjust with great care.
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 05a421bc322a..361f143b019c 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -456,4 +456,7 @@ Fw Mode/SubMode Mask
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
+#define QCA4019_BOARD_DATA_SZ 12064
+#define QCA4019_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 71bdb368813d..e0d00cef0bd8 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -250,6 +250,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
@@ -257,14 +258,16 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "%s %s len %zu",
+ "%s %s %d len %zu",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->buf_len
)
);
@@ -277,6 +280,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -284,14 +288,16 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
- "%s %s size %hu",
+ "%s %s %d size %hu",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->buf_len
)
);
@@ -440,6 +446,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
@@ -447,14 +454,16 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
- "%s %s rxdesc len %d",
+ "%s %s %d rxdesc len %d",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->len
)
);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 8f4f6a892581..32ab34edceb5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -186,6 +186,8 @@ struct wmi_ops {
u8 enable,
u32 detect_level,
u32 detect_margin);
+ int (*get_vdev_subtype)(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1327,4 +1329,13 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
}
+static inline int
+ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
+{
+ if (!ar->wmi.ops->get_vdev_subtype)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->get_vdev_subtype(ar, subtype);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 3b3a27b859f3..108593202052 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3483,6 +3483,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index a7c3d299639b..70261387d1a5 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2862,11 +2862,20 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_2_4_peer_stats *src;
+ const struct wmi_10_2_4_ext_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+ bool ext_peer_stats_support;
+
+ ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
+ ar->wmi.svc_map);
+ if (ext_peer_stats_support)
+ stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
+ else
+ stats_len = sizeof(struct wmi_10_2_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, sizeof(*src)))
+ if (!skb_pull(skb, stats_len))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
@@ -2876,6 +2885,9 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
ath10k_wmi_pull_peer_stats(&src->common.old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+
+ if (ext_peer_stats_support)
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
/* FIXME: expose 10.2 specific values */
list_add_tail(&dst->list, &stats->peers);
@@ -3184,7 +3196,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
- if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (!arvif->vif->p2p)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
@@ -3244,6 +3256,50 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+ i++;
+ }
+
+ return 0;
+}
+
static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_swba_ev_arg *arg)
@@ -4562,9 +4618,9 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
- TARGET_10_4_NUM_VDEVS;
- ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
- TARGET_10_4_NUM_VDEVS;
+ ar->max_num_vdevs;
+ ar->num_active_peers = ar->hw_params.qcache_active_peers +
+ ar->max_num_vdevs;
ar->num_tids = ar->num_active_peers * 2;
ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
}
@@ -5460,9 +5516,15 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
u32 len, val, features;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
- config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
- config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
+ } else {
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ }
+
config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
@@ -5517,6 +5579,9 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ features |= WMI_10_2_PEER_STATS;
+
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -5543,8 +5608,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
- config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
- config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+ config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
+ config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
@@ -5575,7 +5640,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
config.gtk_offload_max_vdev =
__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
- config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+ config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
config.max_peer_ext_stats =
__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
@@ -7126,6 +7191,9 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate", peer->peer_rx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX duration", peer->rx_duration);
+
len += scnprintf(buf + len, buf_len - len, "\n");
*length = len;
}
@@ -7351,6 +7419,71 @@ unlock:
buf[len] = 0;
}
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_LEGACY_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_2_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
+ }
+ return -ENOTSUPP;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7410,6 +7543,7 @@ static const struct wmi_ops wmi_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7477,6 +7611,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7545,6 +7680,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
@@ -7566,7 +7702,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
- .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
@@ -7611,6 +7747,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.gen_pdev_enable_adaptive_cca =
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+ .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7677,6 +7814,7 @@ static const struct wmi_ops wmi_10_4_ops = {
/* shared with 10.2 */
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index d85ad7855d20..4d3cbc44fcd2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -176,7 +176,10 @@ enum wmi_service {
WMI_SERVICE_AUX_CHAN_LOAD_INTF,
WMI_SERVICE_BSS_CHANNEL_INFO_64,
WMI_SERVICE_EXT_RES_CFG_SUPPORT,
- WMI_SERVICE_MESH,
+ WMI_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_PEER_STATS,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT,
/* keep last */
WMI_SERVICE_MAX,
@@ -213,6 +216,7 @@ enum wmi_10x_service {
WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
WMI_10X_SERVICE_MESH,
WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10X_SERVICE_PEER_STATS,
};
enum wmi_main_service {
@@ -294,7 +298,10 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
- WMI_10_4_SERVICE_MESH,
+ WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_10_4_SERVICE_PEER_STATS,
+ WMI_10_4_SERVICE_MESH_11S,
};
static inline char *wmi_service_name(int service_id)
@@ -385,7 +392,10 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT);
- SVCSTR(WMI_SERVICE_MESH);
+ SVCSTR(WMI_SERVICE_MESH_11S);
+ SVCSTR(WMI_SERVICE_MESH_NON_11S);
+ SVCSTR(WMI_SERVICE_PEER_STATS);
+ SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
default:
return NULL;
}
@@ -460,9 +470,11 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
SVCMAP(WMI_10X_SERVICE_MESH,
- WMI_SERVICE_MESH, len);
+ WMI_SERVICE_MESH_11S, len);
SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -623,8 +635,14 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
- SVCMAP(WMI_10_4_SERVICE_MESH,
- WMI_SERVICE_MESH, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_MESH_NON_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
}
#undef SVCMAP
@@ -1800,7 +1818,6 @@ enum wmi_channel_change_cause {
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
-#define WMI_10_4_MAX_SPATIAL_STREAM 4
/* HT Capabilities*/
#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
@@ -2417,6 +2434,7 @@ enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_PEER_STATS = BIT(7),
};
struct wmi_resource_config_10_2 {
@@ -4227,7 +4245,13 @@ struct wmi_10_2_peer_stats {
struct wmi_10_2_4_peer_stats {
struct wmi_10_2_peer_stats common;
- __le32 unknown_value; /* FIXME: what is this word? */
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_4_ext_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+ __le32 rx_duration;
} __packed;
struct wmi_10_4_peer_stats {
@@ -4270,12 +4294,40 @@ enum wmi_vdev_type {
};
enum wmi_vdev_subtype {
- WMI_VDEV_SUBTYPE_NONE = 0,
- WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
- WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
- WMI_VDEV_SUBTYPE_P2P_GO = 3,
- WMI_VDEV_SUBTYPE_PROXY_STA = 4,
- WMI_VDEV_SUBTYPE_MESH = 5,
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+};
+
+enum wmi_vdev_subtype_legacy {
+ WMI_VDEV_SUBTYPE_LEGACY_NONE = 0,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4,
+};
+
+enum wmi_vdev_subtype_10_2_4 {
+ WMI_VDEV_SUBTYPE_10_2_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5,
+};
+
+enum wmi_vdev_subtype_10_4 {
+ WMI_VDEV_SUBTYPE_10_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5,
+ WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6,
};
/* values for vdev_subtype */
@@ -5442,6 +5494,16 @@ struct wmi_host_swba_event {
struct wmi_bcn_info bcn_info[0];
} __packed;
+struct wmi_10_2_4_bcn_info {
+ struct wmi_tim_info tim_info;
+ /* The 10.2.4 FW doesn't have p2p NOA info */
+} __packed;
+
+struct wmi_10_2_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_2_4_bcn_info bcn_info[0];
+} __packed;
+
/* 16 words = 512 client + 1 word = for guard */
#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
@@ -6436,5 +6498,7 @@ size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf);
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 25e45e4d1a60..815efe9fd208 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -126,12 +126,8 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
static void ath9k_ani_restart(struct ath_hw *ah)
{
- struct ar5416AniState *aniState;
-
- if (!ah->curchan)
- return;
+ struct ar5416AniState *aniState = &ah->ani;
- aniState = &ah->ani;
aniState->listenTime = 0;
ENABLE_REGWRITE_BUFFER(ah);
@@ -221,12 +217,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
{
- struct ar5416AniState *aniState;
-
- if (!ah->curchan)
- return;
-
- aniState = &ah->ani;
+ struct ar5416AniState *aniState = &ah->ani;
if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
@@ -281,12 +272,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
{
- struct ar5416AniState *aniState;
-
- if (!ah->curchan)
- return;
-
- aniState = &ah->ani;
+ struct ar5416AniState *aniState = &ah->ani;
if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
@@ -299,9 +285,7 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
*/
static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
{
- struct ar5416AniState *aniState;
-
- aniState = &ah->ani;
+ struct ar5416AniState *aniState = &ah->ani;
/* lower OFDM noise immunity */
if (aniState->ofdmNoiseImmunityLevel > 0 &&
@@ -329,7 +313,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
struct ath_common *common = ath9k_hw_common(ah);
int ofdm_nil, cck_nil;
- if (!ah->curchan)
+ if (!chan)
return;
BUG_ON(aniState == NULL);
@@ -416,14 +400,10 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
{
- struct ar5416AniState *aniState;
+ struct ar5416AniState *aniState = &ah->ani;
struct ath_common *common = ath9k_hw_common(ah);
u32 ofdmPhyErrRate, cckPhyErrRate;
- if (!ah->curchan)
- return;
-
- aniState = &ah->ani;
if (!ath9k_hw_ani_read_counters(ah))
return;
@@ -450,7 +430,9 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
} else if (cckPhyErrRate > ah->config.cck_trig_high) {
ath9k_hw_ani_cck_err_trigger(ah);
aniState->ofdmsTurn = true;
- }
+ } else
+ return;
+
ath9k_ani_restart(ah);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
index 1db119d77783..547cd46da260 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c
@@ -53,19 +53,19 @@ static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah)
return true;
}
-static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram,
+static int16_t ar9003_aic_find_valid(bool *cal_sram_valid,
bool dir, u8 index)
{
int16_t i;
if (dir) {
for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
- if (cal_sram[i].valid)
+ if (cal_sram_valid[i])
break;
}
} else {
for (i = index - 1; i >= 0; i--) {
- if (cal_sram[i].valid)
+ if (cal_sram_valid[i])
break;
}
}
@@ -264,7 +264,7 @@ static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count)
static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
{
struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic;
- struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL];
+ bool cal_sram_valid[ATH_AIC_MAX_BT_CHANNEL];
struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL];
u32 dir_path_gain_idx, quad_path_gain_idx, value;
u32 fixed_com_att_db;
@@ -272,33 +272,34 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
int16_t i;
bool ret = true;
- memset(&cal_sram, 0, sizeof(cal_sram));
+ memset(&cal_sram_valid, 0, sizeof(cal_sram_valid));
memset(&aic_sram, 0, sizeof(aic_sram));
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
+ struct ath_aic_sram_info sram;
value = aic->aic_sram[i];
- cal_sram[i].valid =
+ cal_sram_valid[i] = sram.valid =
MS(value, AR_PHY_AIC_SRAM_VALID);
- cal_sram[i].rot_quad_att_db =
+ sram.rot_quad_att_db =
MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB);
- cal_sram[i].vga_quad_sign =
+ sram.vga_quad_sign =
MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN);
- cal_sram[i].rot_dir_att_db =
+ sram.rot_dir_att_db =
MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB);
- cal_sram[i].vga_dir_sign =
+ sram.vga_dir_sign =
MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN);
- cal_sram[i].com_att_6db =
+ sram.com_att_6db =
MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB);
- if (cal_sram[i].valid) {
- dir_path_gain_idx = cal_sram[i].rot_dir_att_db +
- com_att_db_table[cal_sram[i].com_att_6db];
- quad_path_gain_idx = cal_sram[i].rot_quad_att_db +
- com_att_db_table[cal_sram[i].com_att_6db];
+ if (sram.valid) {
+ dir_path_gain_idx = sram.rot_dir_att_db +
+ com_att_db_table[sram.com_att_6db];
+ quad_path_gain_idx = sram.rot_quad_att_db +
+ com_att_db_table[sram.com_att_6db];
- dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1;
- quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1;
+ dir_path_sign = (sram.vga_dir_sign) ? 1 : -1;
+ quad_path_sign = (sram.vga_quad_sign) ? 1 : -1;
aic_sram[i].dir_path_gain_lin = dir_path_sign *
aic_lin_table[dir_path_gain_idx];
@@ -310,16 +311,16 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
int16_t start_idx, end_idx;
- if (cal_sram[i].valid)
+ if (cal_sram_valid[i])
continue;
- start_idx = ar9003_aic_find_valid(cal_sram, 0, i);
- end_idx = ar9003_aic_find_valid(cal_sram, 1, i);
+ start_idx = ar9003_aic_find_valid(cal_sram_valid, 0, i);
+ end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, i);
if (start_idx < 0) {
/* extrapolation */
start_idx = end_idx;
- end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx);
+ end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, start_idx);
if (end_idx < 0) {
ret = false;
@@ -342,7 +343,7 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
if (end_idx < 0) {
/* extrapolation */
- end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx);
+ end_idx = ar9003_aic_find_valid(cal_sram_valid, 0, start_idx);
if (end_idx < 0) {
ret = false;
@@ -378,19 +379,21 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
}
/* From dir/quad_path_gain_lin to sram. */
- i = ar9003_aic_find_valid(cal_sram, 1, 0);
+ i = ar9003_aic_find_valid(cal_sram_valid, 1, 0);
if (i < 0) {
i = 0;
ret = false;
}
- fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db];
+ fixed_com_att_db = com_att_db_table[MS(aic->aic_sram[i],
+ AR_PHY_AIC_SRAM_COM_ATT_6DB)];
for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
int16_t rot_dir_path_att_db, rot_quad_path_att_db;
+ struct ath_aic_sram_info sram;
- aic_sram[i].sram.vga_dir_sign =
+ sram.vga_dir_sign =
(aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0;
- aic_sram[i].sram.vga_quad_sign=
+ sram.vga_quad_sign =
(aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0;
rot_dir_path_att_db =
@@ -400,33 +403,31 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah)
ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) -
fixed_com_att_db;
- aic_sram[i].sram.com_att_6db =
+ sram.com_att_6db =
ar9003_aic_find_index(1, fixed_com_att_db);
- aic_sram[i].sram.valid = 1;
+ sram.valid = 1;
- aic_sram[i].sram.rot_dir_att_db =
+ sram.rot_dir_att_db =
min(max(rot_dir_path_att_db,
(int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB),
ATH_AIC_MAX_ROT_DIR_ATT_DB);
- aic_sram[i].sram.rot_quad_att_db =
+ sram.rot_quad_att_db =
min(max(rot_quad_path_att_db,
(int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB),
ATH_AIC_MAX_ROT_QUAD_ATT_DB);
- }
- for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) {
- aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign,
+ aic->aic_sram[i] = (SM(sram.vga_dir_sign,
AR_PHY_AIC_SRAM_VGA_DIR_SIGN) |
- SM(aic_sram[i].sram.vga_quad_sign,
+ SM(sram.vga_quad_sign,
AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) |
- SM(aic_sram[i].sram.com_att_6db,
+ SM(sram.com_att_6db,
AR_PHY_AIC_SRAM_COM_ATT_6DB) |
- SM(aic_sram[i].sram.valid,
+ SM(sram.valid,
AR_PHY_AIC_SRAM_VALID) |
- SM(aic_sram[i].sram.rot_dir_att_db,
+ SM(sram.rot_dir_att_db,
AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) |
- SM(aic_sram[i].sram.rot_quad_att_db,
+ SM(sram.rot_quad_att_db,
AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB));
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
index 86f40644be43..9512c63799f2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_aic.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.h
@@ -50,7 +50,6 @@ struct ath_aic_sram_info {
struct ath_aic_out_info {
int16_t dir_path_gain_lin;
int16_t quad_path_gain_lin;
- struct ath_aic_sram_info sram;
};
u8 ar9003_aic_calibration(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 8b4561e8ce1a..54ed2f72d35e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5485,11 +5485,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
AR9300_PAPRD_SCALE_1);
else {
if (chan->channel >= 5700)
- return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
- AR9300_PAPRD_SCALE_1);
+ return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
+ AR9300_PAPRD_SCALE_1);
else if (chan->channel >= 5400)
return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
- AR9300_PAPRD_SCALE_2);
+ AR9300_PAPRD_SCALE_2);
else
return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
AR9300_PAPRD_SCALE_1);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 8b238c15916d..2fe12b0de5b4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -698,6 +698,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9531_11(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ qca953x_1p1_modes_no_xpa_low_power_tx_gain_table);
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_low_ob_db_tx_gain_1_1);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 201425e7f9cb..06c1ca6e8290 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -976,9 +976,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
/*
* JAPAN regulatory.
*/
- if (chan->channel == 2484)
+ if (chan->channel == 2484) {
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
+ if (AR_SREV_9531(ah))
+ REG_RMW_FIELD(ah, AR_PHY_FCAL_2_0,
+ AR_PHY_FLC_PWR_THRESH, 0);
+ }
+
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
@@ -2071,7 +2076,8 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
* to be disabled.
*
* 0x04000409: Packet stuck on receive.
- * Full chip reset is required for all chips except AR9340.
+ * Full chip reset is required for all chips except
+ * AR9340, AR9531 and AR9561.
*/
/*
@@ -2100,7 +2106,7 @@ bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah)
case 0x04000b09:
return true;
case 0x04000409:
- if (AR_SREV_9340(ah) || AR_SREV_9531(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah))
return false;
else
return true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index c5f8bc4b5595..566da789f97e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -487,6 +487,9 @@
#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
+#define AR_PHY_FLC_PWR_THRESH 7
+#define AR_PHY_FLC_PWR_THRESH_S 0
+
#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3
#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index 6fc0d07e5ec6..c0b90daa3e3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -757,6 +757,71 @@ static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = {
{0x00016448, 0x6c927a70},
};
+static const u32 qca953x_1p1_modes_no_xpa_low_power_tx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a2dc, 0xfff55592},
+ {0x0000a2e0, 0xfff99924},
+ {0x0000a2e4, 0xfffe1e00},
+ {0x0000a2e8, 0xffffe000},
+ {0x0000a410, 0x000050d6},
+ {0x0000a500, 0x00000069},
+ {0x0000a504, 0x0400006b},
+ {0x0000a508, 0x0800006d},
+ {0x0000a50c, 0x0c000269},
+ {0x0000a510, 0x1000026b},
+ {0x0000a514, 0x1400026d},
+ {0x0000a518, 0x18000669},
+ {0x0000a51c, 0x1c00066b},
+ {0x0000a520, 0x1d000a68},
+ {0x0000a524, 0x21000a6a},
+ {0x0000a528, 0x25000a6c},
+ {0x0000a52c, 0x29000a6e},
+ {0x0000a530, 0x2d0012a9},
+ {0x0000a534, 0x310012ab},
+ {0x0000a538, 0x350012ad},
+ {0x0000a53c, 0x39001b0a},
+ {0x0000a540, 0x3d001b0c},
+ {0x0000a544, 0x41001b0e},
+ {0x0000a548, 0x43001bae},
+ {0x0000a54c, 0x45001914},
+ {0x0000a550, 0x47001916},
+ {0x0000a554, 0x49001b96},
+ {0x0000a558, 0x49001b96},
+ {0x0000a55c, 0x49001b96},
+ {0x0000a560, 0x49001b96},
+ {0x0000a564, 0x49001b96},
+ {0x0000a568, 0x49001b96},
+ {0x0000a56c, 0x49001b96},
+ {0x0000a570, 0x49001b96},
+ {0x0000a574, 0x49001b96},
+ {0x0000a578, 0x49001b96},
+ {0x0000a57c, 0x49001b96},
+ {0x0000a600, 0x00000000},
+ {0x0000a604, 0x00000000},
+ {0x0000a608, 0x00000000},
+ {0x0000a60c, 0x00000000},
+ {0x0000a610, 0x00000000},
+ {0x0000a614, 0x00000000},
+ {0x0000a618, 0x00804201},
+ {0x0000a61c, 0x01408201},
+ {0x0000a620, 0x01408502},
+ {0x0000a624, 0x01408502},
+ {0x0000a628, 0x01408502},
+ {0x0000a62c, 0x01408502},
+ {0x0000a630, 0x01408502},
+ {0x0000a634, 0x01408502},
+ {0x0000a638, 0x01408502},
+ {0x0000a63c, 0x01408502},
+ {0x0000b2dc, 0xfff55592},
+ {0x0000b2e0, 0xfff99924},
+ {0x0000b2e4, 0xfffe1e00},
+ {0x0000b2e8, 0xffffe000},
+ {0x00016044, 0x044922db},
+ {0x00016048, 0x6c927a70},
+ {0x00016444, 0x044922db},
+ {0x00016448, 0x6c927a70},
+};
+
static const u32 qca953x_2p0_baseband_core[][2] = {
/* Addr allmodes */
{0x00009800, 0xafe68e30},
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 3e2e24e4843f..37f6d66d1671 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -241,6 +241,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
+ u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
if (ah->caldata)
h = ah->caldata->nfCalHist;
@@ -264,6 +265,16 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
}
/*
+ * stop NF cal if ongoing to ensure NF load completes immediately
+ * (or after end rx/tx frame if ongoing)
+ */
+ if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_RMW_BUFFER_FLUSH(ah);
+ ENABLE_REG_RMW_BUFFER(ah);
+ }
+
+ /*
* Load software filtered NF value into baseband internal minCCApwr
* variable.
*/
@@ -276,18 +287,33 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
/*
* Wait for load to complete, should be fast, a few 10s of us.
- * The max delay was changed from an original 250us to 10000us
- * since 250us often results in NF load timeout and causes deaf
- * condition during stress testing 12/12/2009
+ * The max delay was changed from an original 250us to 22.2 msec.
+ * This would increase timeout to the longest possible frame
+ * (11n max length 22.1 msec)
*/
- for (j = 0; j < 10000; j++) {
+ for (j = 0; j < 22200; j++) {
if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
- AR_PHY_AGC_CONTROL_NF) == 0)
+ AR_PHY_AGC_CONTROL_NF) == 0)
break;
udelay(10);
}
/*
+ * Restart NF so it can continue.
+ */
+ if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
+ ENABLE_REG_RMW_BUFFER(ah);
+ if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF)
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_ENABLE_NF);
+ if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF)
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_RMW_BUFFER_FLUSH(ah);
+ }
+
+ /*
* We timed out waiting for the noisefloor to load, probably due to an
* in-progress rx. Simply return here and allow the load plenty of time
* to complete before the next calibration interval. We need to avoid
@@ -296,7 +322,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* here, the baseband nf cal will just be capped by our present
* noisefloor until the next calibration timer.
*/
- if (j == 10000) {
+ if (j == 22200) {
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 50e614b915f1..319cb5f25f58 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -226,7 +226,7 @@ static const char *chanctx_state_string(enum ath_chanctx_state state)
}
}
-static const u32 chanctx_event_delta(struct ath_softc *sc)
+static u32 chanctx_event_delta(struct ath_softc *sc)
{
u64 ms;
struct timespec ts, *old;
@@ -1454,7 +1454,7 @@ static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!sc->p2p_ps_timer)
return;
- if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p)
+ if (vif->type != NL80211_IFTYPE_STATION)
return;
sc->p2p_ps_vif = avp;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 73fb4232f9f2..a794157a147d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
if (match) {
if (AR_SREV_9287(ah)) {
- /* FIXME: array overrun? */
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_9287[idxL].pwrPdg[i],
data_9287[idxL].vpdPdg[i],
@@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else if (eeprom_4k) {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_4k[idxL].pwrPdg[i],
data_4k[idxL].vpdPdg[i],
@@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
} else {
for (i = 0; i < numXpdGains; i++) {
minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
- maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
+ maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
data_def[idxL].pwrPdg[i],
data_def[idxL].vpdPdg[i],
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 165dd202c365..8cbf4904db7b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -55,6 +55,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },
+ { USB_DEVICE(0x0930, 0x0a08),
+ .driver_info = AR9280_USB }, /* Toshiba WLM-20U2 and GN-1080 */
{ USB_DEVICE(0x0cf3, 0x20ff),
.driver_info = STORAGE_DEVICE },
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index fe1fd1a5ae15..639294a9e34d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath9k_htc_sta *ista;
int ret = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 257f46ed4a04..e7a31016f370 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1368,6 +1368,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_gpm_offset(ah);
+ /* DMA HALT added to resolve ar9300 and ar9580 bus error during
+ * RTC_RC reg read
+ */
+ if (AR_SREV_9300(ah) || AR_SREV_9580(ah)) {
+ REG_SET_BIT(ah, AR_CFG, AR_CFG_HALT_REQ);
+ ath9k_hw_wait(ah, AR_CFG, AR_CFG_HALT_ACK, AR_CFG_HALT_ACK,
+ 20 * AH_WAIT_TIMEOUT);
+ REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ);
+ }
+
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index ab7a1ac37849..1c226d63bb03 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -751,14 +751,6 @@ static const struct ieee80211_iface_combination if_comb_multi[] = {
#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
-static const struct ieee80211_iface_limit if_dfs_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC) },
-};
-
static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
@@ -766,6 +758,11 @@ static const struct ieee80211_iface_combination if_comb[] = {
.max_interfaces = 2048,
.num_different_channels = 1,
.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH9K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40),
+#endif
},
{
.limits = wds_limits,
@@ -774,18 +771,6 @@ static const struct ieee80211_iface_combination if_comb[] = {
.num_different_channels = 1,
.beacon_int_infra_match = true,
},
-#ifdef CONFIG_ATH9K_DFS_CERTIFIED
- {
- .limits = if_dfs_limits,
- .n_limits = ARRAY_SIZE(if_dfs_limits),
- .max_interfaces = 1,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40),
- }
-#endif
};
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
@@ -863,8 +848,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE);
- hw->wiphy->iface_combinations = if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ hw->wiphy->iface_combinations = if_comb;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
}
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c1b33fdcca08..3aed43a63f94 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -978,7 +978,7 @@ static void ath9k_update_bssid_mask(struct ath_softc *sc,
if (ctx->nvifs_assigned != 1)
continue;
- if (!avp->vif->p2p || !iter_data->has_hw_macaddr)
+ if (!iter_data->has_hw_macaddr)
continue;
ether_addr_copy(common->curbssid, avp->bssid);
@@ -1255,6 +1255,9 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->cur_chan->nvifs++;
+ if (vif->type == NL80211_IFTYPE_STATION && ath9k_is_chanctx_enabled())
+ vif->driver_flags |= IEEE80211_VIF_GET_NOA_UPDATE;
+
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
@@ -1864,14 +1867,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
bool flush = false;
int ret = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
mutex_lock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index caba54ddad25..c8d35febaf0f 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -34,8 +34,10 @@
#define AR_CFG_SWRG 0x00000010
#define AR_CFG_AP_ADHOC_INDICATION 0x00000020
#define AR_CFG_PHOK 0x00000100
-#define AR_CFG_CLK_GATE_DIS 0x00000400
#define AR_CFG_EEBS 0x00000200
+#define AR_CFG_CLK_GATE_DIS 0x00000400
+#define AR_CFG_HALT_REQ 0x00000800
+#define AR_CFG_HALT_ACK 0x00001000
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 9111d4ffc1b3..ea1d80f9a50e 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -56,6 +56,7 @@ enum carl9170_cmd_oids {
CARL9170_CMD_RX_FILTER = 0x07,
CARL9170_CMD_WOL = 0x08,
CARL9170_CMD_TALLY = 0x09,
+ CARL9170_CMD_WREGB = 0x0a,
/* CAM */
CARL9170_CMD_EKEY = 0x10,
@@ -123,6 +124,12 @@ struct carl9170_write_reg {
} regs[0] __packed;
} __packed;
+struct carl9170_write_reg_byte {
+ __le32 addr;
+ __le32 count;
+ u8 val[0];
+} __packed;
+
#define CARL9170FW_PHY_HT_ENABLE 0x4
#define CARL9170FW_PHY_HT_DYN2040 0x8
#define CARL9170FW_PHY_HT_EXT_CHAN_OFF 0x3
@@ -226,6 +233,7 @@ struct carl9170_cmd {
struct carl9170_u32_list echo;
struct carl9170_reg_list rreg;
struct carl9170_write_reg wreg;
+ struct carl9170_write_reg_byte wregb;
struct carl9170_rf_init rf_init;
struct carl9170_psm psm;
struct carl9170_wol_cmd wol;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 66848d47c88e..0533f79cb998 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -81,6 +81,12 @@ enum carl9170fw_feature_list {
/* Firmware will pass BA when BARs are queued */
CARL9170FW_RX_BA_FILTER,
+ /* Firmware has support to write a byte at a time */
+ CARL9170FW_HAS_WREGB_CMD,
+
+ /* Pattern generator */
+ CARL9170FW_PATTERN_GENERATOR,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 0db874abde50..08e0ae9c5836 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -453,9 +453,74 @@
#define AR9170_MC_REG_BASE 0x1d1000
#define AR9170_MC_REG_FLASH_WAIT_STATE (AR9170_MC_REG_BASE + 0x000)
-#define AR9170_MC_REG_SEEPROM_WP0 (AR9170_MC_REG_BASE + 0x400)
-#define AR9170_MC_REG_SEEPROM_WP1 (AR9170_MC_REG_BASE + 0x404)
-#define AR9170_MC_REG_SEEPROM_WP2 (AR9170_MC_REG_BASE + 0x408)
+
+#define AR9170_SPI_REG_BASE (AR9170_MC_REG_BASE + 0x200)
+#define AR9170_SPI_REG_CONTROL0 (AR9170_SPI_REG_BASE + 0x000)
+#define AR9170_SPI_CONTROL0_BUSY BIT(0)
+#define AR9170_SPI_CONTROL0_CMD_GO BIT(1)
+#define AR9170_SPI_CONTROL0_PAGE_WR BIT(2)
+#define AR9170_SPI_CONTROL0_SEQ_RD BIT(3)
+#define AR9170_SPI_CONTROL0_CMD_ABORT BIT(4)
+#define AR9170_SPI_CONTROL0_CMD_LEN_S 8
+#define AR9170_SPI_CONTROL0_CMD_LEN 0x00000f00
+#define AR9170_SPI_CONTROL0_RD_LEN_S 12
+#define AR9170_SPI_CONTROL0_RD_LEN 0x00007000
+
+#define AR9170_SPI_REG_CONTROL1 (AR9170_SPI_REG_BASE + 0x004)
+#define AR9170_SPI_CONTROL1_SCK_RATE BIT(0)
+#define AR9170_SPI_CONTROL1_DRIVE_SDO BIT(1)
+#define AR9170_SPI_CONTROL1_MODE_SEL_S 2
+#define AR9170_SPI_CONTROL1_MODE_SEL 0x000000c0
+#define AR9170_SPI_CONTROL1_WRITE_PROTECT BIT(4)
+
+#define AR9170_SPI_REG_COMMAND_PORT0 (AR9170_SPI_REG_BASE + 0x008)
+#define AR9170_SPI_COMMAND_PORT0_CMD0_S 0
+#define AR9170_SPI_COMMAND_PORT0_CMD0 0x000000ff
+#define AR9170_SPI_COMMAND_PORT0_CMD1_S 8
+#define AR9170_SPI_COMMAND_PORT0_CMD1 0x0000ff00
+#define AR9170_SPI_COMMAND_PORT0_CMD2_S 16
+#define AR9170_SPI_COMMAND_PORT0_CMD2 0x00ff0000
+#define AR9170_SPI_COMMAND_PORT0_CMD3_S 24
+#define AR9170_SPI_COMMAND_PORT0_CMD3 0xff000000
+
+#define AR9170_SPI_REG_COMMAND_PORT1 (AR9170_SPI_REG_BASE + 0x00C)
+#define AR9170_SPI_COMMAND_PORT1_CMD4_S 0
+#define AR9170_SPI_COMMAND_PORT1_CMD4 0x000000ff
+#define AR9170_SPI_COMMAND_PORT1_CMD5_S 8
+#define AR9170_SPI_COMMAND_PORT1_CMD5 0x0000ff00
+#define AR9170_SPI_COMMAND_PORT1_CMD6_S 16
+#define AR9170_SPI_COMMAND_PORT1_CMD6 0x00ff0000
+#define AR9170_SPI_COMMAND_PORT1_CMD7_S 24
+#define AR9170_SPI_COMMAND_PORT1_CMD7 0xff000000
+
+#define AR9170_SPI_REG_DATA_PORT (AR9170_SPI_REG_BASE + 0x010)
+#define AR9170_SPI_REG_PAGE_WRITE_LEN (AR9170_SPI_REG_BASE + 0x014)
+
+#define AR9170_EEPROM_REG_BASE (AR9170_MC_REG_BASE + 0x400)
+#define AR9170_EEPROM_REG_WP_MAGIC1 (AR9170_EEPROM_REG_BASE + 0x000)
+#define AR9170_EEPROM_WP_MAGIC1 0x12345678
+
+#define AR9170_EEPROM_REG_WP_MAGIC2 (AR9170_EEPROM_REG_BASE + 0x004)
+#define AR9170_EEPROM_WP_MAGIC2 0x55aa00ff
+
+#define AR9170_EEPROM_REG_WP_MAGIC3 (AR9170_EEPROM_REG_BASE + 0x008)
+#define AR9170_EEPROM_WP_MAGIC3 0x13579ace
+
+#define AR9170_EEPROM_REG_CLOCK_DIV (AR9170_EEPROM_REG_BASE + 0x00C)
+#define AR9170_EEPROM_CLOCK_DIV_FAC_S 0
+#define AR9170_EEPROM_CLOCK_DIV_FAC 0x000001ff
+#define AR9170_EEPROM_CLOCK_DIV_FAC_39KHZ 0xff
+#define AR9170_EEPROM_CLOCK_DIV_FAC_78KHZ 0x7f
+#define AR9170_EEPROM_CLOCK_DIV_FAC_312KHZ 0x1f
+#define AR9170_EEPROM_CLOCK_DIV_FAC_10MHZ 0x0
+#define AR9170_EEPROM_CLOCK_DIV_SOFT_RST BIT(9)
+
+#define AR9170_EEPROM_REG_MODE (AR9170_EEPROM_REG_BASE + 0x010)
+#define AR9170_EEPROM_MODE_EEPROM_SIZE_16K_PLUS BIT(31)
+
+#define AR9170_EEPROM_REG_WRITE_PROTECT (AR9170_EEPROM_REG_BASE + 0x014)
+#define AR9170_EEPROM_WRITE_PROTECT_WP_STATUS BIT(0)
+#define AR9170_EEPROM_WRITE_PROTECT_WP_SET BIT(8)
/* Interrupt Controller */
#define AR9170_MAX_INT_SRC 9
@@ -589,11 +654,13 @@
#define AR9170_USB_REG_EP10_MAP (AR9170_USB_REG_BASE + 0x039)
#define AR9170_USB_REG_EP_IN_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x03f)
+#define AR9170_USB_EP_IN_STALL 0x8
#define AR9170_USB_EP_IN_TOGGLE 0x10
#define AR9170_USB_REG_EP_IN_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x03e)
#define AR9170_USB_REG_EP_OUT_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x05f)
+#define AR9170_USB_EP_OUT_STALL 0x8
#define AR9170_USB_EP_OUT_TOGGLE 0x10
#define AR9170_USB_REG_EP_OUT_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x05e)
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 19d3d64416bf..4d1527a2e292 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work)
static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
struct ar9170 *ar = hw->priv;
struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
struct carl9170_sta_tid *tid_info;
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index 2282847d4bb8..a0410fe8c03a 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 12
-#define CARL9170FW_VERSION_MONTH 12
+#define CARL9170FW_VERSION_YEAR 16
+#define CARL9170FW_VERSION_MONTH 2
#define CARL9170FW_VERSION_DAY 15
-#define CARL9170FW_VERSION_GIT "1.9.7"
+#define CARL9170FW_VERSION_GIT "1.9.9"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 7c169abdbafe..a27279c2c695 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_sta *sta_priv = NULL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 20d07ef679e8..11f1bb8dfebe 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -422,6 +422,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
+ if (sme->pbss) {
+ wil_err(wil, "connect - PBSS not yet supported\n");
+ return -EOPNOTSUPP;
+ }
+
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
@@ -535,7 +540,18 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
- rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+ if (!(test_bit(wil_status_fwconnecting, wil->status) ||
+ test_bit(wil_status_fwconnected, wil->status))) {
+ wil_err(wil, "%s: Disconnect was called while disconnected\n",
+ __func__);
+ return 0;
+ }
+
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+ WMI_DISCONNECT_EVENTID, NULL, 0,
+ WIL6210_DISCONNECT_TO_MS);
+ if (rc)
+ wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
return rc;
}
@@ -696,6 +712,79 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
return rc;
}
+/**
+ * find a specific IE in a list of IEs
+ * return a pointer to the beginning of IE in the list
+ * or NULL if not found
+ */
+static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
+ u16 ie_len)
+{
+ struct ieee80211_vendor_ie *vie;
+ u32 oui;
+
+ /* IE tag at offset 0, length at offset 1 */
+ if (ie_len < 2 || 2 + ie[1] > ie_len)
+ return NULL;
+
+ if (ie[0] != WLAN_EID_VENDOR_SPECIFIC)
+ return cfg80211_find_ie(ie[0], ies, ies_len);
+
+ /* make sure there is room for 3 bytes OUI + 1 byte OUI type */
+ if (ie[1] < 4)
+ return NULL;
+ vie = (struct ieee80211_vendor_ie *)ie;
+ oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2];
+ return cfg80211_find_vendor_ie(oui, vie->oui_type, ies,
+ ies_len);
+}
+
+/**
+ * merge the IEs in two lists into a single list.
+ * do not include IEs from the second list which exist in the first list.
+ * add only vendor specific IEs from second list to keep
+ * the merged list sorted (since vendor-specific IE has the
+ * highest tag number)
+ * caller must free the allocated memory for merged IEs
+ */
+static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
+ const u8 *ies2, u16 ies2_len,
+ u8 **merged_ies, u16 *merged_len)
+{
+ u8 *buf, *dpos;
+ const u8 *spos;
+
+ if (ies1_len == 0 && ies2_len == 0) {
+ *merged_ies = NULL;
+ *merged_len = 0;
+ return 0;
+ }
+
+ buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, ies1, ies1_len);
+ dpos = buf + ies1_len;
+ spos = ies2;
+ while (spos + 1 < ies2 + ies2_len) {
+ /* IE tag at offset 0, length at offset 1 */
+ u16 ielen = 2 + spos[1];
+
+ if (spos + ielen > ies2 + ies2_len)
+ break;
+ if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
+ memcpy(dpos, spos, ielen);
+ dpos += ielen;
+ }
+ spos += ielen;
+ }
+
+ *merged_ies = buf;
+ *merged_len = dpos - buf;
+ return 0;
+}
+
static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
{
print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET,
@@ -712,49 +801,49 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b)
b->assocresp_ies, b->assocresp_ies_len);
}
-static int wil_fix_bcon(struct wil6210_priv *wil,
- struct cfg80211_beacon_data *bcon)
-{
- struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
- size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-
- if (bcon->probe_resp_len <= hlen)
- return 0;
-
-/* always use IE's from full probe frame, they has more info
- * notable RSN
- */
- bcon->proberesp_ies = f->u.probe_resp.variable;
- bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
- if (!bcon->assocresp_ies) {
- bcon->assocresp_ies = bcon->proberesp_ies;
- bcon->assocresp_ies_len = bcon->proberesp_ies_len;
- }
-
- return 1;
-}
-
/* internal functions for device reset and starting AP */
static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
struct cfg80211_beacon_data *bcon)
{
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ u16 len = 0, proberesp_len = 0;
+ u8 *ies = NULL, *proberesp = NULL;
+
+ if (bcon->probe_resp) {
+ struct ieee80211_mgmt *f =
+ (struct ieee80211_mgmt *)bcon->probe_resp;
+ size_t hlen = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ proberesp = f->u.probe_resp.variable;
+ proberesp_len = bcon->probe_resp_len - hlen;
+ }
+ rc = _wil_cfg80211_merge_extra_ies(proberesp,
+ proberesp_len,
+ bcon->proberesp_ies,
+ bcon->proberesp_ies_len,
+ &ies, &len);
- rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
- bcon->proberesp_ies);
if (rc)
- return rc;
+ goto out;
+
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies);
+ if (rc)
+ goto out;
- rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
- bcon->assocresp_ies);
+ if (bcon->assocresp_ies)
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
+ bcon->assocresp_ies_len, bcon->assocresp_ies);
+ else
+ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies);
#if 0 /* to use beacon IE's, remove this #if 0 */
if (rc)
- return rc;
+ goto out;
rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
#endif
-
+out:
+ kfree(ies);
return rc;
}
@@ -823,14 +912,9 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_bcon_data(bcon);
- if (wil_fix_bcon(wil, bcon)) {
- wil_dbg_misc(wil, "Fixed bcon\n");
- wil_print_bcon_data(bcon);
- }
-
- if (bcon->proberesp_ies &&
- cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
- bcon->proberesp_ies_len))
+ if (bcon->tail &&
+ cfg80211_find_ie(WLAN_EID_RSN, bcon->tail,
+ bcon->tail_len))
privacy = 1;
/* in case privacy has changed, need to restart the AP */
@@ -870,6 +954,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
+ if (info->pbss) {
+ wil_err(wil, "AP: PBSS not yet supported\n");
+ return -EOPNOTSUPP;
+ }
+
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -900,11 +989,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil_print_bcon_data(bcon);
wil_print_crypto(wil, crypto);
- if (wil_fix_bcon(wil, bcon)) {
- wil_dbg_misc(wil, "Fixed bcon\n");
- wil_print_bcon_data(bcon);
- }
-
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index a1d10b85989f..3bbe73b6d05a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -68,13 +68,13 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
- if (vring->va && (vring->size < 1025)) {
+ if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
for (i = 0; i < vring->size; i++) {
volatile struct vring_tx_desc *d = &vring->va[i].tx;
- if ((i % 64) == 0 && (i != 0))
+ if ((i % 128) == 0 && (i != 0))
seq_puts(s, "\n");
seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
_s : (vring->ctx[i].skb ? _h : 'h'));
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index b39f0bfc591e..78ba6e04c944 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -23,9 +23,6 @@
#include "wmi.h"
#include "boot_loader.h"
-#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
-#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
-
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
@@ -155,7 +152,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (sta->status != wil_sta_unused) {
if (!from_event)
- wmi_disconnect_sta(wil, sta->addr, reason_code);
+ wmi_disconnect_sta(wil, sta->addr, reason_code, true);
switch (wdev->iftype) {
case NL80211_IFTYPE_AP:
@@ -195,8 +192,8 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct wireless_dev *wdev = wil->wdev;
might_sleep();
- wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
- reason_code, from_event ? "+" : "-");
+ wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ reason_code, from_event ? "+" : "-");
/* Cases are:
* - disconnect single STA, still connected
@@ -258,13 +255,16 @@ static void wil_disconnect_worker(struct work_struct *work)
static void wil_connect_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
+ bool q;
- wil_dbg_misc(wil, "Connect timeout\n");
+ wil_err(wil, "Connect timeout detected, disconnect station\n");
/* reschedule to thread context - disconnect won't
- * run from atomic context
+ * run from atomic context.
+ * queue on wmi_wq to prevent race with connect event.
*/
- schedule_work(&wil->disconnect_worker);
+ q = queue_work(wil->wmi_wq, &wil->disconnect_worker);
+ wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q);
}
static void wil_scan_timer_fn(ulong x)
@@ -369,6 +369,32 @@ static int wil_find_free_vring(struct wil6210_priv *wil)
return -EINVAL;
}
+int wil_tx_init(struct wil6210_priv *wil, int cid)
+{
+ int rc = -EINVAL, ringid;
+
+ if (cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ goto out;
+ }
+ ringid = wil_find_free_vring(wil);
+ if (ringid < 0) {
+ wil_err(wil, "No free vring found\n");
+ goto out;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n",
+ cid, ringid);
+
+ rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0);
+ if (rc)
+ wil_err(wil, "wil_vring_init_tx for CID %d vring %d failed\n",
+ cid, ringid);
+
+out:
+ return rc;
+}
+
int wil_bcast_init(struct wil6210_priv *wil)
{
int ri = wil->bcast_vring, rc;
@@ -399,41 +425,6 @@ void wil_bcast_fini(struct wil6210_priv *wil)
wil_vring_fini_tx(wil, ri);
}
-static void wil_connect_worker(struct work_struct *work)
-{
- int rc, cid, ringid;
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- connect_worker);
- struct net_device *ndev = wil_to_ndev(wil);
-
- mutex_lock(&wil->mutex);
-
- cid = wil->pending_connect_cid;
- if (cid < 0) {
- wil_err(wil, "No connection pending\n");
- goto out;
- }
- ringid = wil_find_free_vring(wil);
- if (ringid < 0) {
- wil_err(wil, "No free vring found\n");
- goto out;
- }
-
- wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n",
- cid, ringid);
-
- rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0);
- wil->pending_connect_cid = -1;
- if (rc == 0) {
- wil->sta[cid].status = wil_sta_connected;
- netif_tx_wake_all_queues(ndev);
- } else {
- wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true);
- }
-out:
- mutex_unlock(&wil->mutex);
-}
-
int wil_priv_init(struct wil6210_priv *wil)
{
uint i;
@@ -444,6 +435,9 @@ int wil_priv_init(struct wil6210_priv *wil)
for (i = 0; i < WIL6210_MAX_CID; i++)
spin_lock_init(&wil->sta[i].tid_rx_lock);
+ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++)
+ spin_lock_init(&wil->vring_tx_data[i].lock);
+
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
mutex_init(&wil->back_rx_mutex);
@@ -453,12 +447,10 @@ int wil_priv_init(struct wil6210_priv *wil)
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
- wil->pending_connect_cid = -1;
wil->bcast_vring = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
- INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
@@ -844,7 +836,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
/* init after reset */
- wil->pending_connect_cid = -1;
wil->ap_isolate = 0;
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
@@ -948,8 +939,7 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
- int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS /
- WAIT_FOR_DISCONNECT_INTERVAL_MS;
+ int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
@@ -973,22 +963,16 @@ int __wil_down(struct wil6210_priv *wil)
}
if (test_bit(wil_status_fwconnected, wil->status) ||
- test_bit(wil_status_fwconnecting, wil->status))
- wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+ test_bit(wil_status_fwconnecting, wil->status)) {
- /* make sure wil is idle (not connected) */
- mutex_unlock(&wil->mutex);
- while (iter--) {
- int idle = !test_bit(wil_status_fwconnected, wil->status) &&
- !test_bit(wil_status_fwconnecting, wil->status);
- if (idle)
- break;
- msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS);
+ mutex_unlock(&wil->mutex);
+ rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
+ WMI_DISCONNECT_EVENTID, NULL, 0,
+ WIL6210_DISCONNECT_TO_MS);
+ mutex_lock(&wil->mutex);
+ if (rc)
+ wil_err(wil, "timeout waiting for disconnect\n");
}
- mutex_lock(&wil->mutex);
-
- if (iter < 0)
- wil_err(wil, "timeout waiting for idle FW/HW\n");
wil_reset(wil, false);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 56aaa2d4fb0e..ecc3c1bdae4b 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -108,8 +108,9 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
/* always process ALL Tx complete, regardless budget - it is fast */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
struct vring *vring = &wil->vring_tx[i];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[i];
- if (!vring->va)
+ if (!vring->va || !txdata->enabled)
continue;
tx_done += wil_tx_complete(wil, i);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 7887e6cfd817..6af20903cf89 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -717,6 +717,21 @@ void wil_rx_fini(struct wil6210_priv *wil)
wil_vring_free(wil, vring, 0);
}
+static inline void wil_tx_data_init(struct vring_tx_data *txdata)
+{
+ spin_lock_bh(&txdata->lock);
+ txdata->dot1x_open = 0;
+ txdata->enabled = 0;
+ txdata->idle = 0;
+ txdata->last_idle = 0;
+ txdata->begin = 0;
+ txdata->agg_wsize = 0;
+ txdata->agg_timeout = 0;
+ txdata->agg_amsdu = 0;
+ txdata->addba_in_progress = false;
+ spin_unlock_bh(&txdata->lock);
+}
+
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
int cid, int tid)
{
@@ -758,8 +773,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
goto out;
}
- memset(txdata, 0, sizeof(*txdata));
- spin_lock_init(&txdata->lock);
+ wil_tx_data_init(txdata);
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -791,9 +805,14 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
return 0;
out_free:
+ spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
txdata->enabled = 0;
+ spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring, 1);
+ wil->vring2cid_tid[id][0] = WIL6210_MAX_CID;
+ wil->vring2cid_tid[id][1] = 0;
+
out:
return rc;
@@ -831,8 +850,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
goto out;
}
- memset(txdata, 0, sizeof(*txdata));
- spin_lock_init(&txdata->lock);
+ wil_tx_data_init(txdata);
vring->size = size;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -862,8 +880,10 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
return 0;
out_free:
+ spin_lock_bh(&txdata->lock);
txdata->enabled = 0;
txdata->dot1x_open = false;
+ spin_unlock_bh(&txdata->lock);
wil_vring_free(wil, vring, 1);
out:
@@ -891,7 +911,6 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
napi_synchronize(&wil->napi_tx);
wil_vring_free(wil, vring, 1);
- memset(txdata, 0, sizeof(*txdata));
}
static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
@@ -911,10 +930,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
continue;
if (wil->vring2cid_tid[i][0] == cid) {
struct vring *v = &wil->vring_tx[i];
+ struct vring_tx_data *txdata = &wil->vring_tx_data[i];
wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
__func__, eth->h_dest, i);
- if (v->va) {
+ if (v->va && txdata->enabled) {
return v;
} else {
wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
@@ -935,6 +955,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
struct vring *v;
int i;
u8 cid;
+ struct vring_tx_data *txdata;
/* In the STA mode, it is expected to have only 1 VRING
* for the AP we connected to.
@@ -942,7 +963,8 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
*/
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
- if (!v->va)
+ txdata = &wil->vring_tx_data[i];
+ if (!v->va || !txdata->enabled)
continue;
cid = wil->vring2cid_tid[i][0];
@@ -978,12 +1000,14 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
struct sk_buff *skb)
{
struct vring *v;
+ struct vring_tx_data *txdata;
int i = wil->bcast_vring;
if (i < 0)
return NULL;
v = &wil->vring_tx[i];
- if (!v->va)
+ txdata = &wil->vring_tx_data[i];
+ if (!v->va || !txdata->enabled)
return NULL;
if (!wil->vring_tx_data[i].dot1x_open &&
(skb->protocol != cpu_to_be16(ETH_P_PAE)))
@@ -1010,11 +1034,13 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
u8 cid;
struct ethhdr *eth = (void *)skb->data;
char *src = eth->h_source;
+ struct vring_tx_data *txdata;
/* find 1-st vring eligible for data */
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
v = &wil->vring_tx[i];
- if (!v->va)
+ txdata = &wil->vring_tx_data[i];
+ if (!v->va || !txdata->enabled)
continue;
cid = wil->vring2cid_tid[i][0];
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 235e205ce2bc..8427d68b6fa8 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -51,7 +51,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
-#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12)
#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
/* limit ring size in range [32..32k] */
@@ -92,6 +92,7 @@ static inline u32 wil_mtu2macbuf(u32 mtu)
#define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */
#define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000)
#define WIL6210_SCAN_TO msecs_to_jiffies(10000)
+#define WIL6210_DISCONNECT_TO_MS (2000)
#define WIL6210_RX_HIGH_TRSH_INIT (0)
#define WIL6210_RX_HIGH_TRSH_DEFAULT \
(1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
@@ -581,12 +582,10 @@ struct wil6210_priv {
struct workqueue_struct *wmi_wq; /* for deferred calls */
struct work_struct wmi_event_worker;
struct workqueue_struct *wq_service;
- struct work_struct connect_worker;
struct work_struct disconnect_worker;
struct work_struct fw_error_worker; /* for FW error recovery */
struct timer_list connect_timer;
struct timer_list scan_timer; /* detect scan timeout */
- int pending_connect_cid;
struct list_head pending_wmi_ev;
/*
* protect pending_wmi_ev
@@ -756,7 +755,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
+ bool full_disconnect);
int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
@@ -807,6 +807,7 @@ void wil_rx_fini(struct wil6210_priv *wil);
int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
int cid, int tid);
void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+int wil_tx_init(struct wil6210_priv *wil, int cid);
int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size);
int wil_bcast_init(struct wil6210_priv *wil);
void wil_bcast_fini(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index e3ea74cdd4aa..493e721c4fa7 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -426,6 +426,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
const size_t assoc_req_ie_offset = sizeof(u16) * 2;
/* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+ int rc;
if (len < sizeof(*evt)) {
wil_err(wil, "Connect event too short : %d bytes\n", len);
@@ -445,8 +446,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
ch = evt->channel + 1;
- wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
- evt->bssid, ch, evt->cid);
+ wil_info(wil, "Connect %pM channel [%d] cid %d\n",
+ evt->bssid, ch, evt->cid);
wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
@@ -468,20 +469,67 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
assoc_resp_ielen = 0;
}
+ mutex_lock(&wil->mutex);
+ if (test_bit(wil_status_resetting, wil->status) ||
+ !test_bit(wil_status_fwready, wil->status)) {
+ wil_err(wil, "status_resetting, cancel connect event, CID %d\n",
+ evt->cid);
+ mutex_unlock(&wil->mutex);
+ /* no need for cleanup, wil_reset will do that */
+ return;
+ }
+
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
(wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
if (!test_bit(wil_status_fwconnecting, wil->status)) {
wil_err(wil, "Not in connecting state\n");
+ mutex_unlock(&wil->mutex);
return;
}
del_timer_sync(&wil->connect_timer);
- cfg80211_connect_result(ndev, evt->bssid,
- assoc_req_ie, assoc_req_ielen,
- assoc_resp_ie, assoc_resp_ielen,
- WLAN_STATUS_SUCCESS, GFP_KERNEL);
+ }
+
+ /* FIXME FW can transmit only ucast frames to peer */
+ /* FIXME real ring_id instead of hard coded 0 */
+ ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
+ wil->sta[evt->cid].status = wil_sta_conn_pending;
+ rc = wil_tx_init(wil, evt->cid);
+ if (rc) {
+ wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n",
+ __func__, evt->cid, rc);
+ wmi_disconnect_sta(wil, wil->sta[evt->cid].addr,
+ WLAN_REASON_UNSPECIFIED, false);
+ } else {
+ wil_info(wil, "%s: successful connection to CID %d\n",
+ __func__, evt->cid);
+ }
+
+ if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+ if (rc) {
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ wil_err(wil,
+ "%s: cfg80211_connect_result with failure\n",
+ __func__);
+ cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ goto out;
+ } else {
+ cfg80211_connect_result(ndev, evt->bssid,
+ assoc_req_ie, assoc_req_ielen,
+ assoc_resp_ie, assoc_resp_ielen,
+ WLAN_STATUS_SUCCESS,
+ GFP_KERNEL);
+ }
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ if (rc)
+ goto out;
+
memset(&sinfo, 0, sizeof(sinfo));
sinfo.generation = wil->sinfo_gen++;
@@ -492,17 +540,21 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+ } else {
+ wil_err(wil, "%s: unhandled iftype %d for CID %d\n",
+ __func__, wdev->iftype, evt->cid);
+ goto out;
}
- clear_bit(wil_status_fwconnecting, wil->status);
- set_bit(wil_status_fwconnected, wil->status);
- /* FIXME FW can transmit only ucast frames to peer */
- /* FIXME real ring_id instead of hard coded 0 */
- ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
- wil->sta[evt->cid].status = wil_sta_conn_pending;
+ wil->sta[evt->cid].status = wil_sta_connected;
+ set_bit(wil_status_fwconnected, wil->status);
+ netif_tx_wake_all_queues(ndev);
- wil->pending_connect_cid = evt->cid;
- queue_work(wil->wq_service, &wil->connect_worker);
+out:
+ if (rc)
+ wil->sta[evt->cid].status = wil_sta_unused;
+ clear_bit(wil_status_fwconnecting, wil->status);
+ mutex_unlock(&wil->mutex);
}
static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -511,8 +563,8 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
struct wmi_disconnect_event *evt = d;
u16 reason_code = le16_to_cpu(evt->protocol_reason_status);
- wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
- evt->bssid, reason_code, evt->disconnect_reason);
+ wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ evt->bssid, reason_code, evt->disconnect_reason);
wil->sinfo_gen++;
@@ -727,6 +779,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
void __iomem *src;
ulong flags;
unsigned n;
+ unsigned int num_immed_reply = 0;
if (!test_bit(wil_status_mbox_ready, wil->status)) {
wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
@@ -736,6 +789,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
for (n = 0;; n++) {
u16 len;
bool q;
+ bool immed_reply = false;
r->head = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.head));
@@ -784,6 +838,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
u16 id = le16_to_cpu(wmi->id);
u32 tstamp = le32_to_cpu(wmi->timestamp);
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ if (wil->reply_id && wil->reply_id == id) {
+ if (wil->reply_buf) {
+ memcpy(wil->reply_buf, wmi,
+ min(len, wil->reply_size));
+ immed_reply = true;
+ }
+ }
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
id, wmi->mid, tstamp);
@@ -799,15 +862,24 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
wil_w(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
- /* add to the pending list */
- spin_lock_irqsave(&wil->wmi_ev_lock, flags);
- list_add_tail(&evt->list, &wil->pending_wmi_ev);
- spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
- q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
- wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ if (immed_reply) {
+ wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
+ __func__, wil->reply_id);
+ kfree(evt);
+ num_immed_reply++;
+ complete(&wil->wmi_call);
+ } else {
+ /* add to the pending list */
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+ list_add_tail(&evt->list, &wil->pending_wmi_ev);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+ q = queue_work(wil->wmi_wq, &wil->wmi_event_worker);
+ wil_dbg_wmi(wil, "queue_work -> %d\n", q);
+ }
}
/* normally, 1 event per IRQ should be processed */
- wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n);
+ wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__,
+ n - num_immed_reply, num_immed_reply);
}
int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
@@ -818,13 +890,16 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
mutex_lock(&wil->wmi_mutex);
+ spin_lock(&wil->wmi_ev_lock);
+ wil->reply_id = reply_id;
+ wil->reply_buf = reply;
+ wil->reply_size = reply_size;
+ spin_unlock(&wil->wmi_ev_lock);
+
rc = __wmi_send(wil, cmdid, buf, len);
if (rc)
goto out;
- wil->reply_id = reply_id;
- wil->reply_buf = reply;
- wil->reply_size = reply_size;
remain = wait_for_completion_timeout(&wil->wmi_call,
msecs_to_jiffies(to_msec));
if (0 == remain) {
@@ -837,10 +912,14 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
cmdid, reply_id,
to_msec - jiffies_to_msecs(remain));
}
+
+out:
+ spin_lock(&wil->wmi_ev_lock);
wil->reply_id = 0;
wil->reply_buf = NULL;
wil->reply_size = 0;
- out:
+ spin_unlock(&wil->wmi_ev_lock);
+
mutex_unlock(&wil->wmi_mutex);
return rc;
@@ -1184,7 +1263,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
return 0;
}
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
+ bool full_disconnect)
{
int rc;
u16 reason_code;
@@ -1208,19 +1288,20 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
return rc;
}
- /* call event handler manually after processing wmi_call,
- * to avoid deadlock - disconnect event handler acquires wil->mutex
- * while it is already held here
- */
- reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
-
- wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
- reply.evt.bssid, reason_code,
- reply.evt.disconnect_reason);
+ if (full_disconnect) {
+ /* call event handler manually after processing wmi_call,
+ * to avoid deadlock - disconnect event handler acquires
+ * wil->mutex while it is already held here
+ */
+ reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
- wil->sinfo_gen++;
- wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+ wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+ reply.evt.bssid, reason_code,
+ reply.evt.disconnect_reason);
+ wil->sinfo_gen++;
+ wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+ }
return 0;
}
@@ -1348,14 +1429,11 @@ static void wmi_event_handle(struct wil6210_priv *wil,
id, wil->reply_id);
/* check if someone waits for this event */
if (wil->reply_id && wil->reply_id == id) {
- if (wil->reply_buf) {
- memcpy(wil->reply_buf, wmi,
- min(len, wil->reply_size));
- } else {
- wmi_evt_call_handler(wil, id, evt_data,
- len - sizeof(*wmi));
- }
- wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
+ WARN_ON(wil->reply_buf);
+ wmi_evt_call_handler(wil, id, evt_data,
+ len - sizeof(*wmi));
+ wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
+ __func__, id);
complete(&wil->wmi_call);
return;
}
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index dab25136214a..1efb1d66e0b7 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2481,9 +2481,7 @@ static int at76_probe(struct usb_interface *interface,
dev_err(&interface->dev,
"error %d downloading internal firmware\n",
ret);
- goto exit;
}
- usb_put_dev(udev);
goto exit;
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index ec013fbd6a81..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -1215,10 +1215,10 @@ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev)
case B43_BUS_BCMA:
bcma_cc = &dev->dev->bdev->bus->drv_cc;
- bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0);
- bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
- bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4);
- bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
+ bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0);
+ bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
+ bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4);
+ bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -4375,12 +4375,10 @@ redo:
/* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */
orig_dev = dev;
mutex_unlock(&wl->mutex);
- if (b43_bus_host_is_sdio(dev->dev)) {
+ if (b43_bus_host_is_sdio(dev->dev))
b43_sdio_free_irq(dev);
- } else {
- synchronize_irq(dev->dev->irq);
+ else
free_irq(dev->dev->irq, dev);
- }
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (!dev)
@@ -5682,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
INIT_WORK(&wl->firmware_load, b43_request_firmware);
schedule_work(&wl->firmware_load);
-bcma_out:
return err;
bcma_err_wireless_exit:
ieee80211_free_hw(wl->hw);
+bcma_out:
+ kfree(dev);
return err;
}
@@ -5714,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_rng_exit(wl);
b43_leds_unregister(wl);
-
ieee80211_free_hw(wl->hw);
+ kfree(wldev->dev);
}
static struct bcma_driver b43_bcma_driver = {
@@ -5798,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_leds_unregister(wl);
b43_wireless_exit(dev, wl);
+ kfree(dev);
}
static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index b98db8a0a069..da0cdd313880 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -27,8 +27,6 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/errno.h>
@@ -46,7 +44,6 @@
#include "bus.h"
#include "debug.h"
#include "sdio.h"
-#include "of.h"
#include "core.h"
#include "common.h"
@@ -106,18 +103,18 @@ static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
{
+ struct brcmfmac_sdio_pd *pdata;
int ret = 0;
u8 data;
u32 addr, gpiocontrol;
unsigned long flags;
- if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
+ pdata = &sdiodev->settings->bus.sdio;
+ if (pdata->oob_irq_supported) {
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
- sdiodev->pdata->oob_irq_nr);
- ret = request_irq(sdiodev->pdata->oob_irq_nr,
- brcmf_sdiod_oob_irqhandler,
- sdiodev->pdata->oob_irq_flags,
- "brcmf_oob_intr",
+ pdata->oob_irq_nr);
+ ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
+ pdata->oob_irq_flags, "brcmf_oob_intr",
&sdiodev->func[1]->dev);
if (ret != 0) {
brcmf_err("request_irq failed %d\n", ret);
@@ -129,7 +126,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
sdiodev->irq_en = true;
spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
- ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ ret = enable_irq_wake(pdata->oob_irq_nr);
if (ret != 0) {
brcmf_err("enable_irq_wake failed %d\n", ret);
return ret;
@@ -158,7 +155,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
/* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
- if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
+ if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
@@ -176,9 +173,12 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
{
+ struct brcmfmac_sdio_pd *pdata;
+
brcmf_dbg(SDIO, "Entering\n");
- if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) {
+ pdata = &sdiodev->settings->bus.sdio;
+ if (pdata->oob_irq_supported) {
sdio_claim_host(sdiodev->func[1]);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
@@ -187,11 +187,10 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
if (sdiodev->oob_irq_requested) {
sdiodev->oob_irq_requested = false;
if (sdiodev->irq_wake) {
- disable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ disable_irq_wake(pdata->oob_irq_nr);
sdiodev->irq_wake = false;
}
- free_irq(sdiodev->pdata->oob_irq_nr,
- &sdiodev->func[1]->dev);
+ free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
sdiodev->irq_en = false;
}
} else {
@@ -523,7 +522,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = pktlist;
/* for host with broken sg support, prepare a page aligned list */
__skb_queue_head_init(&local_list);
- if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+ if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
req_sz = 0;
skb_queue_walk(pktlist, pkt_next)
req_sz += pkt_next->len;
@@ -630,7 +629,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
}
}
- if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) {
+ if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
local_pkt_next = local_list.next;
orig_offset = 0;
skb_queue_walk(pktlist, pkt_next) {
@@ -901,7 +900,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
return;
nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
- sdiodev->bus_if->drvr->settings->sdiod_txglomsz);
+ sdiodev->settings->bus.sdio.txglomsz);
nents += (nents >> 4) + 1;
WARN_ON(nents > sdiodev->max_segment_count);
@@ -913,7 +912,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
sdiodev->sg_support = false;
}
- sdiodev->txglomsz = sdiodev->bus_if->drvr->settings->sdiod_txglomsz;
+ sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
}
#ifdef CONFIG_PM_SLEEP
@@ -1103,8 +1102,6 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
-static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
-
static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
int val)
@@ -1167,20 +1164,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
dev_set_drvdata(&func->dev, bus_if);
dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
sdiodev->dev = &sdiodev->func[1]->dev;
- sdiodev->pdata = brcmfmac_sdio_pdata;
-
- if (!sdiodev->pdata)
- brcmf_of_probe(sdiodev);
-
-#ifdef CONFIG_PM_SLEEP
- /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
- * is true or when platform data OOB irq is true).
- */
- if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
- ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
- (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
- bus_if->wowl_supported = true;
-#endif
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
@@ -1263,8 +1246,8 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
sdio_flags = MMC_PM_KEEP_POWER;
if (sdiodev->wowl_enabled) {
- if (sdiodev->pdata->oob_irq_supported)
- enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
else
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
@@ -1296,7 +1279,7 @@ static const struct dev_pm_ops brcmf_sdio_pm_ops = {
static struct sdio_driver brcmf_sdmmc_driver = {
.probe = brcmf_ops_sdio_probe,
.remove = brcmf_ops_sdio_remove,
- .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .name = KBUILD_MODNAME,
.id_table = brcmf_sdmmc_ids,
.drv = {
.owner = THIS_MODULE,
@@ -1306,37 +1289,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
-static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
-
- if (brcmfmac_sdio_pdata->power_on)
- brcmfmac_sdio_pdata->power_on();
-
- return 0;
-}
-
-static int brcmf_sdio_pd_remove(struct platform_device *pdev)
-{
- brcmf_dbg(SDIO, "Enter\n");
-
- if (brcmfmac_sdio_pdata->power_off)
- brcmfmac_sdio_pdata->power_off();
-
- sdio_unregister_driver(&brcmf_sdmmc_driver);
-
- return 0;
-}
-
-static struct platform_driver brcmf_sdio_pd = {
- .remove = brcmf_sdio_pd_remove,
- .driver = {
- .name = BRCMFMAC_SDIO_PDATA_NAME,
- }
-};
-
void brcmf_sdio_register(void)
{
int ret;
@@ -1350,19 +1302,6 @@ void brcmf_sdio_exit(void)
{
brcmf_dbg(SDIO, "Enter\n");
- if (brcmfmac_sdio_pdata)
- platform_driver_unregister(&brcmf_sdio_pd);
- else
- sdio_unregister_driver(&brcmf_sdmmc_driver);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-void __init brcmf_sdio_init(void)
-{
- int ret;
-
- brcmf_dbg(SDIO, "Enter\n");
-
- ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe);
- if (ret == -ENODEV)
- brcmf_dbg(SDIO, "No platform data available.\n");
-}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 36093f93bfbe..8e02a478e889 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -43,6 +43,8 @@ enum brcmf_bus_protocol_type {
BRCMF_PROTO_MSGBUF
};
+struct brcmf_mp_device;
+
struct brcmf_bus_dcmd {
char *name;
char *param;
@@ -217,7 +219,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
-int brcmf_attach(struct device *dev);
+int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
/* Indication from bus module regarding removal/absence of dongle */
void brcmf_detach(struct device *dev);
/* Indication from bus module that dongle should be reset */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 7b01e4ddb315..d5c2a27573b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -72,8 +72,13 @@
#define RSN_AKM_NONE 0 /* None (IBSS) */
#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */
+#define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */
#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
-#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
+#define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3))
+#define RSN_CAP_MFPR_MASK BIT(6)
+#define RSN_CAP_MFPC_MASK BIT(7)
+#define RSN_PMKID_COUNT_LEN 2
#define VNDR_IE_CMD_LEN 4 /* length of the set command
* string :"add", "del" (+ NUL)
@@ -211,12 +216,19 @@ static const struct ieee80211_regdomain brcmf_regdom = {
REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
};
-static const u32 __wl_cipher_suites[] = {
+/* Note: brcmf_cipher_suites is an array of int defining which cipher suites
+ * are supported. A pointer to this array and the number of entries is passed
+ * on to upper layers. AES_CMAC defines whether or not the driver supports MFP.
+ * So the cipher suite AES_CMAC has to be the last one in the array, and when
+ * device does not support MFP then the number of suites will be decreased by 1
+ */
+static const u32 brcmf_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
- WLAN_CIPHER_SUITE_AES_CMAC,
+ /* Keep as last entry: */
+ WLAN_CIPHER_SUITE_AES_CMAC
};
/* Vendor specific ie. id = 221, oui and type defines exact ie */
@@ -247,7 +259,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
ch->chan->center_freq, ch->center_freq1, ch->width);
ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
- primary_offset = ch->center_freq1 - ch->chan->center_freq;
+ primary_offset = ch->chan->center_freq - ch->center_freq1;
switch (ch->width) {
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -256,24 +268,21 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
break;
case NL80211_CHAN_WIDTH_40:
ch_inf.bw = BRCMU_CHAN_BW_40;
- if (primary_offset < 0)
+ if (primary_offset > 0)
ch_inf.sb = BRCMU_CHAN_SB_U;
else
ch_inf.sb = BRCMU_CHAN_SB_L;
break;
case NL80211_CHAN_WIDTH_80:
ch_inf.bw = BRCMU_CHAN_BW_80;
- if (primary_offset < 0) {
- if (primary_offset < -CH_10MHZ_APART)
- ch_inf.sb = BRCMU_CHAN_SB_UU;
- else
- ch_inf.sb = BRCMU_CHAN_SB_UL;
- } else {
- if (primary_offset > CH_10MHZ_APART)
- ch_inf.sb = BRCMU_CHAN_SB_LL;
- else
- ch_inf.sb = BRCMU_CHAN_SB_LU;
- }
+ if (primary_offset == -30)
+ ch_inf.sb = BRCMU_CHAN_SB_LL;
+ else if (primary_offset == -10)
+ ch_inf.sb = BRCMU_CHAN_SB_LU;
+ else if (primary_offset == 10)
+ ch_inf.sb = BRCMU_CHAN_SB_UL;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_UU;
break;
case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
@@ -459,7 +468,7 @@ send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key)
}
static s32
-brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
+brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable)
{
s32 err;
u32 mode;
@@ -487,6 +496,15 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
enable, mode);
}
+ err = brcmf_fil_iovar_int_set(ifp, "ndoe", enable);
+ if (err) {
+ brcmf_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n",
+ enable, err);
+ err = 0;
+ } else
+ brcmf_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n",
+ enable, mode);
+
return err;
}
@@ -567,8 +585,8 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name,
}
/* wait for firmware event */
- err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
- BRCMF_VIF_EVENT_TIMEOUT);
+ err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_ADD,
+ BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
brcmf_err("timeout occurred\n");
@@ -1128,7 +1146,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif,
/* Arm scan timeout timer */
mod_timer(&cfg->escan_timeout, jiffies +
- WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
+ BRCMF_ESCAN_TIMER_INTERVAL_MS * HZ / 1000);
return 0;
@@ -1527,7 +1545,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
static s32
brcmf_set_wsec_mode(struct net_device *ndev,
- struct cfg80211_connect_params *sme, bool mfp)
+ struct cfg80211_connect_params *sme)
{
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
struct brcmf_cfg80211_security *sec;
@@ -1586,10 +1604,7 @@ brcmf_set_wsec_mode(struct net_device *ndev,
sme->privacy)
pval = AES_ENABLED;
- if (mfp)
- wsec = pval | gval | MFP_CAPABLE;
- else
- wsec = pval | gval;
+ wsec = pval | gval;
err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
if (err) {
brcmf_err("error (%d)\n", err);
@@ -1606,56 +1621,100 @@ brcmf_set_wsec_mode(struct net_device *ndev,
static s32
brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
{
- struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
- struct brcmf_cfg80211_security *sec;
- s32 val = 0;
- s32 err = 0;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ s32 val;
+ s32 err;
+ const struct brcmf_tlv *rsn_ie;
+ const u8 *ie;
+ u32 ie_len;
+ u32 offset;
+ u16 rsn_cap;
+ u32 mfp;
+ u16 count;
- if (sme->crypto.n_akm_suites) {
- err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev),
- "wpa_auth", &val);
- if (err) {
- brcmf_err("could not get wpa_auth (%d)\n", err);
- return err;
+ if (!sme->crypto.n_akm_suites)
+ return 0;
+
+ err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val);
+ if (err) {
+ brcmf_err("could not get wpa_auth (%d)\n", err);
+ return err;
+ }
+ if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA_AUTH_PSK;
+ break;
+ default:
+ brcmf_err("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
}
- if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_PSK:
- val = WPA_AUTH_PSK;
- break;
- default:
- brcmf_err("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
- return -EINVAL;
- }
- } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
- switch (sme->crypto.akm_suites[0]) {
- case WLAN_AKM_SUITE_8021X:
- val = WPA2_AUTH_UNSPECIFIED;
- break;
- case WLAN_AKM_SUITE_PSK:
- val = WPA2_AUTH_PSK;
- break;
- default:
- brcmf_err("invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
- return -EINVAL;
- }
+ } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
+ switch (sme->crypto.akm_suites[0]) {
+ case WLAN_AKM_SUITE_8021X:
+ val = WPA2_AUTH_UNSPECIFIED;
+ break;
+ case WLAN_AKM_SUITE_8021X_SHA256:
+ val = WPA2_AUTH_1X_SHA256;
+ break;
+ case WLAN_AKM_SUITE_PSK_SHA256:
+ val = WPA2_AUTH_PSK_SHA256;
+ break;
+ case WLAN_AKM_SUITE_PSK:
+ val = WPA2_AUTH_PSK;
+ break;
+ default:
+ brcmf_err("invalid cipher group (%d)\n",
+ sme->crypto.cipher_group);
+ return -EINVAL;
}
+ }
- brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
- err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev),
- "wpa_auth", val);
- if (err) {
- brcmf_err("could not set wpa_auth (%d)\n", err);
- return err;
- }
+ if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
+ goto skip_mfp_config;
+ /* The MFP mode (1 or 2) needs to be determined, parse IEs. The
+ * IE will not be verified, just a quick search for MFP config
+ */
+ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie, sme->ie_len,
+ WLAN_EID_RSN);
+ if (!rsn_ie)
+ goto skip_mfp_config;
+ ie = (const u8 *)rsn_ie;
+ ie_len = rsn_ie->len + TLV_HDR_LEN;
+ /* Skip unicast suite */
+ offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN;
+ if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+ goto skip_mfp_config;
+ /* Skip multicast suite */
+ count = ie[offset] + (ie[offset + 1] << 8);
+ offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+ if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len)
+ goto skip_mfp_config;
+ /* Skip auth key management suite(s) */
+ count = ie[offset] + (ie[offset + 1] << 8);
+ offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN);
+ if (offset + WPA_IE_SUITE_COUNT_LEN > ie_len)
+ goto skip_mfp_config;
+ /* Ready to read capabilities */
+ mfp = BRCMF_MFP_NONE;
+ rsn_cap = ie[offset] + (ie[offset + 1] << 8);
+ if (rsn_cap & RSN_CAP_MFPR_MASK)
+ mfp = BRCMF_MFP_REQUIRED;
+ else if (rsn_cap & RSN_CAP_MFPC_MASK)
+ mfp = BRCMF_MFP_CAPABLE;
+ brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp);
+
+skip_mfp_config:
+ brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
+ err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val);
+ if (err) {
+ brcmf_err("could not set wpa_auth (%d)\n", err);
+ return err;
}
- sec = &profile->sec;
- sec->wpa_auth = sme->crypto.akm_suites[0];
return err;
}
@@ -1821,7 +1880,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
goto done;
}
- err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
+ err = brcmf_set_wsec_mode(ndev, sme);
if (err) {
brcmf_err("wl_set_set_cipher failed (%d)\n", err);
goto done;
@@ -2067,98 +2126,54 @@ done:
}
static s32
-brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, const u8 *mac_addr, struct key_params *params)
+brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
+ u8 key_idx, bool pairwise, const u8 *mac_addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_wsec_key key;
- s32 err = 0;
- u8 keybuf[8];
+ struct brcmf_wsec_key *key;
+ s32 err;
- memset(&key, 0, sizeof(key));
- key.index = (u32) key_idx;
- /* Instead of bcast for ea address for default wep keys,
- driver needs it to be Null */
- if (!is_multicast_ether_addr(mac_addr))
- memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
- key.len = (u32) params->key_len;
- /* check for key index change */
- if (key.len == 0) {
- /* key delete */
- err = send_key_to_dongle(ifp, &key);
- if (err)
- brcmf_err("key delete error (%d)\n", err);
- } else {
- if (key.len > sizeof(key.data)) {
- brcmf_err("Invalid key length (%d)\n", key.len);
- return -EINVAL;
- }
+ brcmf_dbg(TRACE, "Enter\n");
+ brcmf_dbg(CONN, "key index (%d)\n", key_idx);
- brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
- memcpy(key.data, params->key, key.len);
+ if (!check_vif_up(ifp->vif))
+ return -EIO;
- if (!brcmf_is_apmode(ifp->vif) &&
- (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
- brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
- memcpy(keybuf, &key.data[24], sizeof(keybuf));
- memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
- memcpy(&key.data[16], keybuf, sizeof(keybuf));
- }
+ if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
+ /* we ignore this key index in this case */
+ return -EINVAL;
+ }
- /* if IW_ENCODE_EXT_RX_SEQ_VALID set */
- if (params->seq && params->seq_len == 6) {
- /* rx iv */
- u8 *ivptr;
- ivptr = (u8 *) params->seq;
- key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) |
- (ivptr[3] << 8) | ivptr[2];
- key.rxiv.lo = (ivptr[1] << 8) | ivptr[0];
- key.iv_initialized = true;
- }
+ key = &ifp->vif->profile.key[key_idx];
- switch (params->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- key.algo = CRYPTO_ALGO_WEP1;
- brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- key.algo = CRYPTO_ALGO_WEP128;
- brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- key.algo = CRYPTO_ALGO_TKIP;
- brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key.algo = CRYPTO_ALGO_AES_CCM;
- brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- key.algo = CRYPTO_ALGO_AES_CCM;
- brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
- break;
- default:
- brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
- return -EINVAL;
- }
- err = send_key_to_dongle(ifp, &key);
- if (err)
- brcmf_err("wsec_key error (%d)\n", err);
+ if (key->algo == CRYPTO_ALGO_OFF) {
+ brcmf_dbg(CONN, "Ignore clearing of (never configured) key\n");
+ return -EINVAL;
}
+
+ memset(key, 0, sizeof(*key));
+ key->index = (u32)key_idx;
+ key->flags = BRCMF_PRIMARY_KEY;
+
+ /* Clear the key/index */
+ err = send_key_to_dongle(ifp, key);
+
+ brcmf_dbg(TRACE, "Exit\n");
return err;
}
static s32
brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
+ u8 key_idx, bool pairwise, const u8 *mac_addr,
+ struct key_params *params)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key *key;
s32 val;
s32 wsec;
- s32 err = 0;
+ s32 err;
u8 keybuf[8];
+ bool ext_key;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(CONN, "key index (%d)\n", key_idx);
@@ -2171,27 +2186,32 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -EINVAL;
}
- if (mac_addr &&
- (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
- (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
- brcmf_dbg(TRACE, "Exit");
- return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
- }
-
- key = &ifp->vif->profile.key[key_idx];
- memset(key, 0, sizeof(*key));
+ if (params->key_len == 0)
+ return brcmf_cfg80211_del_key(wiphy, ndev, key_idx, pairwise,
+ mac_addr);
if (params->key_len > sizeof(key->data)) {
brcmf_err("Too long key length (%u)\n", params->key_len);
- err = -EINVAL;
- goto done;
+ return -EINVAL;
+ }
+
+ ext_key = false;
+ if (mac_addr && (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
+ brcmf_dbg(TRACE, "Ext key, mac %pM", mac_addr);
+ ext_key = true;
}
+
+ key = &ifp->vif->profile.key[key_idx];
+ memset(key, 0, sizeof(*key));
+ if ((ext_key) && (!is_multicast_ether_addr(mac_addr)))
+ memcpy((char *)&key->ea, (void *)mac_addr, ETH_ALEN);
key->len = params->key_len;
key->index = key_idx;
-
memcpy(key->data, params->key, key->len);
+ if (!ext_key)
+ key->flags = BRCMF_PRIMARY_KEY;
- key->flags = BRCMF_PRIMARY_KEY;
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key->algo = CRYPTO_ALGO_WEP1;
@@ -2231,7 +2251,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
err = send_key_to_dongle(ifp, key);
- if (err)
+ if (ext_key || err)
goto done;
err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
@@ -2252,41 +2272,10 @@ done:
}
static s32
-brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr)
-{
- struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_wsec_key key;
- s32 err = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
- if (!check_vif_up(ifp->vif))
- return -EIO;
-
- if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) {
- /* we ignore this key index in this case */
- return -EINVAL;
- }
-
- memset(&key, 0, sizeof(key));
-
- key.index = (u32) key_idx;
- key.flags = BRCMF_PRIMARY_KEY;
- key.algo = CRYPTO_ALGO_OFF;
-
- brcmf_dbg(CONN, "key index (%d)\n", key_idx);
-
- /* Set the new key/index */
- err = send_key_to_dongle(ifp, &key);
-
- brcmf_dbg(TRACE, "Exit\n");
- return err;
-}
-
-static s32
-brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
- void (*callback) (void *cookie, struct key_params * params))
+brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx,
+ bool pairwise, const u8 *mac_addr, void *cookie,
+ void (*callback)(void *cookie,
+ struct key_params *params))
{
struct key_params params;
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -2338,8 +2327,15 @@ done:
static s32
brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
- struct net_device *ndev, u8 key_idx)
+ struct net_device *ndev, u8 key_idx)
{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+
+ brcmf_dbg(TRACE, "Enter key_idx %d\n", key_idx);
+
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
+ return 0;
+
brcmf_dbg(INFO, "Not supported\n");
return -EOPNOTSUPP;
@@ -3023,7 +3019,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
list = (struct brcmf_scan_results *)
cfg->escan_info.escan_buf;
- if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
+ if (bi_length > BRCMF_ESCAN_BUF_SIZE - list->buflen) {
brcmf_err("Buffer is too small: ignoring\n");
goto exit;
}
@@ -3036,8 +3032,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
bss_info_le))
goto exit;
}
- memcpy(&(cfg->escan_info.escan_buf[list->buflen]),
- bss_info_le, bi_length);
+ memcpy(&cfg->escan_info.escan_buf[list->buflen], bss_info_le,
+ bi_length);
list->version = le32_to_cpu(bss_info_le->version);
list->buflen += bi_length;
list->count++;
@@ -3095,6 +3091,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
brcmf_dbg(SCAN, "Enter\n");
+ if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
+ brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ return 0;
+ }
+
if (e->event_code == BRCMF_E_PFN_NET_LOST) {
brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n");
return 0;
@@ -3418,6 +3419,11 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
brcmf_dbg(SCAN, "Enter\n");
+ if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) {
+ brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ return 0;
+ }
+
pfn_result = (struct brcmf_pno_scanresults_le *)data;
if (e->event_code == BRCMF_E_PFN_NET_LOST) {
@@ -3510,6 +3516,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
else
wakeup_data.net_detect = cfg->wowl.nd_info;
}
+ if (wakeind & BRCMF_WOWL_GTK_FAILURE) {
+ brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_GTK_FAILURE\n");
+ wakeup_data.gtk_rekey_failure = true;
+ }
} else {
wakeup = NULL;
}
@@ -3536,7 +3546,8 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
brcmf_report_wowl_wakeind(wiphy, ifp);
brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0);
brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0);
- brcmf_configure_arp_offload(ifp, true);
+ if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ARP_ND))
+ brcmf_configure_arp_nd_offload(ifp, true);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM,
cfg->wowl.pre_pmmode);
cfg->wowl.active = false;
@@ -3560,7 +3571,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
brcmf_dbg(TRACE, "Suspend, wowl config.\n");
- brcmf_configure_arp_offload(ifp, false);
+ if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ARP_ND))
+ brcmf_configure_arp_nd_offload(ifp, false);
brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->wowl.pre_pmmode);
brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX);
@@ -3591,6 +3603,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
brcmf_wowl_nd_results);
}
+ if (wowl->gtk_rekey_failure)
+ wowl_config |= BRCMF_WOWL_GTK_FAILURE;
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
@@ -3821,7 +3835,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
u32 auth = 0; /* d11 open authentication */
u16 count;
s32 err = 0;
- s32 len = 0;
+ s32 len;
u32 i;
u32 wsec;
u32 pval = 0;
@@ -3831,6 +3845,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
u8 *data;
u16 rsn_cap;
u32 wme_bss_disable;
+ u32 mfp;
brcmf_dbg(TRACE, "Enter\n");
if (wpa_ie == NULL)
@@ -3945,19 +3960,53 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
(wpa_auth |= WPA_AUTH_PSK);
break;
+ case RSN_AKM_SHA256_PSK:
+ brcmf_dbg(TRACE, "RSN_AKM_MFP_PSK\n");
+ wpa_auth |= WPA2_AUTH_PSK_SHA256;
+ break;
+ case RSN_AKM_SHA256_1X:
+ brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n");
+ wpa_auth |= WPA2_AUTH_1X_SHA256;
+ break;
default:
brcmf_err("Ivalid key mgmt info\n");
}
offset++;
}
+ mfp = BRCMF_MFP_NONE;
if (is_rsn_ie) {
wme_bss_disable = 1;
if ((offset + RSN_CAP_LEN) <= len) {
rsn_cap = data[offset] + (data[offset + 1] << 8);
if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK)
wme_bss_disable = 0;
+ if (rsn_cap & RSN_CAP_MFPR_MASK) {
+ brcmf_dbg(TRACE, "MFP Required\n");
+ mfp = BRCMF_MFP_REQUIRED;
+ /* Firmware only supports mfp required in
+ * combination with WPA2_AUTH_PSK_SHA256 or
+ * WPA2_AUTH_1X_SHA256.
+ */
+ if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 |
+ WPA2_AUTH_1X_SHA256))) {
+ err = -EINVAL;
+ goto exit;
+ }
+ /* Firmware has requirement that WPA2_AUTH_PSK/
+ * WPA2_AUTH_UNSPECIFIED be set, if SHA256 OUI
+ * is to be included in the rsn ie.
+ */
+ if (wpa_auth & WPA2_AUTH_PSK_SHA256)
+ wpa_auth |= WPA2_AUTH_PSK;
+ else if (wpa_auth & WPA2_AUTH_1X_SHA256)
+ wpa_auth |= WPA2_AUTH_UNSPECIFIED;
+ } else if (rsn_cap & RSN_CAP_MFPC_MASK) {
+ brcmf_dbg(TRACE, "MFP Capable\n");
+ mfp = BRCMF_MFP_CAPABLE;
+ }
}
+ offset += RSN_CAP_LEN;
/* set wme_bss_disable to sync RSN Capabilities */
err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
wme_bss_disable);
@@ -3965,6 +4014,21 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_err("wme_bss_disable error %d\n", err);
goto exit;
}
+
+ /* Skip PMKID cnt as it is know to be 0 for AP. */
+ offset += RSN_PMKID_COUNT_LEN;
+
+ /* See if there is BIP wpa suite left for MFP */
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP) &&
+ ((offset + WPA_IE_MIN_OUI_LEN) <= len)) {
+ err = brcmf_fil_bsscfg_data_set(ifp, "bip",
+ &data[offset],
+ WPA_IE_MIN_OUI_LEN);
+ if (err < 0) {
+ brcmf_err("bip error %d\n", err);
+ goto exit;
+ }
+ }
}
/* FOR WPS , set SES_OW_ENABLED */
wsec = (pval | gval | SES_OW_ENABLED);
@@ -3981,6 +4045,16 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_err("wsec error %d\n", err);
goto exit;
}
+ /* Configure MFP, this needs to go after wsec otherwise the wsec command
+ * will overwrite the values set by MFP
+ */
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) {
+ err = brcmf_fil_bsscfg_int_set(ifp, "mfp", mfp);
+ if (err < 0) {
+ brcmf_err("mfp error %d\n", err);
+ goto exit;
+ }
+ }
/* set upper-layer auth */
err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
if (err < 0) {
@@ -4329,7 +4403,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (!mbss) {
brcmf_set_mpc(ifp, 0);
- brcmf_configure_arp_offload(ifp, false);
+ brcmf_configure_arp_nd_offload(ifp, false);
}
/* find the RSN_IE */
@@ -4475,7 +4549,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
exit:
if ((err) && (!mbss)) {
brcmf_set_mpc(ifp, 1);
- brcmf_configure_arp_offload(ifp, true);
+ brcmf_configure_arp_nd_offload(ifp, true);
}
return err;
}
@@ -4533,7 +4607,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
brcmf_err("bss_enable config failed %d\n", err);
}
brcmf_set_mpc(ifp, 1);
- brcmf_configure_arp_offload(ifp, true);
+ brcmf_configure_arp_nd_offload(ifp, true);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
brcmf_net_setcarrier(ifp, false);
@@ -4858,7 +4932,32 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
return ret;
}
-static struct cfg80211_ops wl_cfg80211_ops = {
+#ifdef CONFIG_PM
+static int
+brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev,
+ struct cfg80211_gtk_rekey_data *gtk)
+{
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_gtk_keyinfo_le gtk_le;
+ int ret;
+
+ brcmf_dbg(TRACE, "Enter, bssidx=%d\n", ifp->bsscfgidx);
+
+ memcpy(gtk_le.kck, gtk->kck, sizeof(gtk_le.kck));
+ memcpy(gtk_le.kek, gtk->kek, sizeof(gtk_le.kek));
+ memcpy(gtk_le.replay_counter, gtk->replay_ctr,
+ sizeof(gtk_le.replay_counter));
+
+ ret = brcmf_fil_iovar_data_set(ifp, "gtk_key_info", &gtk_le,
+ sizeof(gtk_le));
+ if (ret < 0)
+ brcmf_err("gtk_key_info iovar failed: ret=%d\n", ret);
+
+ return ret;
+}
+#endif
+
+static struct cfg80211_ops brcmf_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
.change_virtual_intf = brcmf_cfg80211_change_iface,
@@ -5405,14 +5504,14 @@ static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
{
kfree(cfg->conf);
cfg->conf = NULL;
- kfree(cfg->escan_ioctl_buf);
- cfg->escan_ioctl_buf = NULL;
kfree(cfg->extra_buf);
cfg->extra_buf = NULL;
kfree(cfg->wowl.nd);
cfg->wowl.nd = NULL;
kfree(cfg->wowl.nd_info);
cfg->wowl.nd_info = NULL;
+ kfree(cfg->escan_info.escan_buf);
+ cfg->escan_info.escan_buf = NULL;
}
static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
@@ -5420,9 +5519,6 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
if (!cfg->conf)
goto init_priv_mem_out;
- cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
- if (!cfg->escan_ioctl_buf)
- goto init_priv_mem_out;
cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
if (!cfg->extra_buf)
goto init_priv_mem_out;
@@ -5434,6 +5530,9 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
GFP_KERNEL);
if (!cfg->wowl.nd_info)
goto init_priv_mem_out;
+ cfg->escan_info.escan_buf = kzalloc(BRCMF_ESCAN_BUF_SIZE, GFP_KERNEL);
+ if (!cfg->escan_info.escan_buf)
+ goto init_priv_mem_out;
return 0;
@@ -6123,19 +6222,18 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
{
#ifdef CONFIG_PM
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
- s32 err;
- u32 wowl_cap;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
- err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap);
- if (!err) {
- if (wowl_cap & BRCMF_WOWL_PFN_FOUND) {
- brcmf_wowlan_support.flags |=
- WIPHY_WOWLAN_NET_DETECT;
- init_waitqueue_head(&cfg->wowl.nd_data_wait);
- }
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) {
+ brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ init_waitqueue_head(&cfg->wowl.nd_data_wait);
}
}
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) {
+ brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY;
+ brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+ }
+
wiphy->wowlan = &brcmf_wowlan_support;
#endif
}
@@ -6177,8 +6275,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->n_addresses = i;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wiphy->cipher_suites = __wl_cipher_suites;
- wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
+ wiphy->cipher_suites = brcmf_cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites);
+ if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
+ wiphy->n_cipher_suites--;
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -6280,7 +6380,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
if (err)
goto default_conf_out;
- brcmf_configure_arp_offload(ifp, true);
+ brcmf_configure_arp_nd_offload(ifp, true);
cfg->dongle_up = true;
default_conf_out:
@@ -6398,8 +6498,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
return armed;
}
-int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
- u8 action, ulong timeout)
+
+int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout)
{
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
@@ -6407,28 +6508,85 @@ int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
vif_event_equals(event, action), timeout);
}
+static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
+ struct brcmf_fil_country_le *ccreq)
+{
+ struct brcmfmac_pd_cc *country_codes;
+ struct brcmfmac_pd_cc_entry *cc;
+ s32 found_index;
+ int i;
+
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
+ if ((alpha2[0] == ccreq->country_abbrev[0]) &&
+ (alpha2[1] == ccreq->country_abbrev[1])) {
+ brcmf_dbg(TRACE, "Country code already set\n");
+ return -EAGAIN;
+ }
+
+ found_index = -1;
+ for (i = 0; i < country_codes->table_size; i++) {
+ cc = &country_codes->table[i];
+ if ((cc->iso3166[0] == '\0') && (found_index == -1))
+ found_index = i;
+ if ((cc->iso3166[0] == alpha2[0]) &&
+ (cc->iso3166[1] == alpha2[1])) {
+ found_index = i;
+ break;
+ }
+ }
+ if (found_index == -1) {
+ brcmf_dbg(TRACE, "No country code match found\n");
+ return -EINVAL;
+ }
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->rev = cpu_to_le32(country_codes->table[found_index].rev);
+ memcpy(ccreq->ccode, country_codes->table[found_index].cc,
+ BRCMF_COUNTRY_BUF_SZ);
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->country_abbrev[2] = 0;
+
+ return 0;
+}
+
static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
{
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
struct brcmf_fil_country_le ccreq;
+ s32 err;
int i;
- brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator,
- req->alpha2[0], req->alpha2[1]);
-
/* ignore non-ISO3166 country codes */
for (i = 0; i < sizeof(req->alpha2); i++)
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
- brcmf_err("not a ISO3166 code\n");
+ brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
+ req->alpha2[0], req->alpha2[1]);
return;
}
- memset(&ccreq, 0, sizeof(ccreq));
- ccreq.rev = cpu_to_le32(-1);
- memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
- if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
- brcmf_err("firmware rejected country setting\n");
+
+ brcmf_dbg(TRACE, "Enter: initiator=%d, alpha=%c%c\n", req->initiator,
+ req->alpha2[0], req->alpha2[1]);
+
+ err = brcmf_fil_iovar_data_get(ifp, "country", &ccreq, sizeof(ccreq));
+ if (err) {
+ brcmf_err("Country code iovar returned err = %d\n", err);
+ return;
+ }
+
+ err = brcmf_translate_country_code(ifp->drvr, req->alpha2, &ccreq);
+ if (err)
+ return;
+
+ err = brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+ if (err) {
+ brcmf_err("Firmware rejected country setting\n");
return;
}
brcmf_setup_wiphybands(wiphy);
@@ -6464,6 +6622,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
struct brcmf_cfg80211_info *cfg;
struct wiphy *wiphy;
+ struct cfg80211_ops *ops;
struct brcmf_cfg80211_vif *vif;
struct brcmf_if *ifp;
s32 err = 0;
@@ -6475,8 +6634,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
return NULL;
}
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ return NULL;
+
+ memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops));
ifp = netdev_priv(ndev);
- wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
+#ifdef CONFIG_PM
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK))
+ ops->set_rekey_data = brcmf_cfg80211_set_rekey_data;
+#endif
+ wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info));
if (!wiphy) {
brcmf_err("Could not allocate wiphy device\n");
return NULL;
@@ -6486,6 +6654,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
cfg = wiphy_priv(wiphy);
cfg->wiphy = wiphy;
+ cfg->ops = ops;
cfg->pub = drvr;
init_vif_event(&cfg->vif_event);
INIT_LIST_HEAD(&cfg->vif_list);
@@ -6596,7 +6765,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_RANDOM_MAC)) {
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR;
#ifdef CONFIG_PM
- if (wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT)
+ if (wiphy->wowlan &&
+ wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT)
wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
#endif
}
@@ -6611,6 +6781,7 @@ priv_out:
ifp->vif = NULL;
wiphy_out:
brcmf_free_wiphy(wiphy);
+ kfree(ops);
return NULL;
}
@@ -6621,6 +6792,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
+ kfree(cfg->ops);
wl_deinit_priv(cfg);
brcmf_free_wiphy(cfg->wiphy);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 40efb539ac26..95e35bcc16ce 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -28,8 +28,11 @@
#define WL_ROAM_TRIGGER_LEVEL -75
#define WL_ROAM_DELTA 20
-#define WL_ESCAN_BUF_SIZE (1024 * 64)
-#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */
+/* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be
+ * problematic on some systems and should be avoided.
+ */
+#define BRCMF_ESCAN_BUF_SIZE 65000
+#define BRCMF_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */
#define WL_ESCAN_ACTION_START 1
#define WL_ESCAN_ACTION_CONTINUE 2
@@ -69,7 +72,7 @@
#define BRCMF_VNDR_IE_P2PAF_SHIFT 12
-#define BRCMF_MAX_DEFAULT_KEYS 4
+#define BRCMF_MAX_DEFAULT_KEYS 6
/* beacon loss timeout defaults */
#define BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON 2
@@ -104,7 +107,6 @@ struct brcmf_cfg80211_security {
u32 auth_type;
u32 cipher_pairwise;
u32 cipher_group;
- u32 wpa_auth;
};
/**
@@ -205,7 +207,7 @@ enum wl_escan_state {
struct escan_info {
u32 escan_state;
- u8 escan_buf[WL_ESCAN_BUF_SIZE];
+ u8 *escan_buf;
struct wiphy *wiphy;
struct brcmf_if *ifp;
s32 (*run)(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
@@ -253,6 +255,7 @@ struct brcmf_cfg80211_wowl {
* struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
*
* @wiphy: wiphy object for cfg80211 interface.
+ * @ops: pointer to copy of ops as registered with wiphy object.
* @conf: dongle configuration.
* @p2p: peer-to-peer specific information.
* @btcoex: Bluetooth coexistence information.
@@ -278,7 +281,6 @@ struct brcmf_cfg80211_wowl {
* @escan_info: escan information.
* @escan_timeout: Timer for catch scan timeout.
* @escan_timeout_work: scan timeout worker.
- * @escan_ioctl_buf: dongle command buffer for escan commands.
* @vif_list: linked list of vif instances.
* @vif_cnt: number of vif instances.
* @vif_event: vif event signalling.
@@ -286,6 +288,7 @@ struct brcmf_cfg80211_wowl {
*/
struct brcmf_cfg80211_info {
struct wiphy *wiphy;
+ struct cfg80211_ops *ops;
struct brcmf_cfg80211_conf *conf;
struct brcmf_p2p_info p2p;
struct brcmf_btcoex_info *btcoex;
@@ -309,7 +312,6 @@ struct brcmf_cfg80211_info {
struct escan_info escan_info;
struct timer_list escan_timeout;
struct work_struct escan_timeout_work;
- u8 *escan_ioctl_buf;
struct list_head vif_list;
struct brcmf_cfg80211_vif_event vif_event;
struct completion vif_disabled;
@@ -402,8 +404,8 @@ bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_vif *vif);
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg);
-int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg,
- u8 action, ulong timeout);
+int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg,
+ u8 action, ulong timeout);
s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
struct brcmf_if *ifp, bool aborted,
bool fw_abort);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 82e4382eb177..0e8f2a079907 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -803,7 +803,14 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
*eromaddr -= 4;
return -EFAULT;
}
- } while (desc != DMP_DESC_ADDRESS);
+ } while (desc != DMP_DESC_ADDRESS &&
+ desc != DMP_DESC_COMPONENT);
+
+ /* stop if we crossed current component border */
+ if (desc == DMP_DESC_COMPONENT) {
+ *eromaddr -= 4;
+ return 0;
+ }
/* skip upper 32-bit address descriptor */
if (val & DMP_DESC_ADDRSIZE_GT32)
@@ -876,7 +883,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
/* need core with ports */
- if (nmw + nsw == 0)
+ if (nmw + nsw == 0 &&
+ id != BCMA_CORE_PMU)
continue;
/* try to obtain register address info */
@@ -1006,6 +1014,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
{
struct brcmf_chip *pub;
struct brcmf_core_priv *cc;
+ struct brcmf_core *pmu;
u32 base;
u32 val;
int ret = 0;
@@ -1017,11 +1026,15 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
/* get chipcommon capabilites */
pub->cc_caps = chip->ops->read32(chip->ctx,
CORE_CC_REG(base, capabilities));
+ pub->cc_caps_ext = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base,
+ capabilities_ext));
/* get pmu caps & rev */
+ pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */
if (pub->cc_caps & CC_CAP_PMU) {
val = chip->ops->read32(chip->ctx,
- CORE_CC_REG(base, pmucapabilities));
+ CORE_CC_REG(pmu->base, pmucapabilities));
pub->pmurev = val & PCAP_REV_MASK;
pub->pmucaps = val;
}
@@ -1120,6 +1133,23 @@ struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
return &cc->pub;
}
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub)
+{
+ struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub);
+ struct brcmf_core *pmu;
+
+ /* See if there is separated PMU core available */
+ if (cc->rev >= 35 &&
+ pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+ pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU);
+ if (pmu)
+ return pmu;
+ }
+
+ /* Fallback to ChipCommon core for older hardware */
+ return cc;
+}
+
bool brcmf_chip_iscoreup(struct brcmf_core *pub)
{
struct brcmf_core_priv *core;
@@ -1290,6 +1320,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
{
u32 base, addr, reg, pmu_cc3_mask = ~0;
struct brcmf_chip_priv *chip;
+ struct brcmf_core *pmu = brcmf_chip_get_pmu(pub);
brcmf_dbg(TRACE, "Enter\n");
@@ -1309,9 +1340,9 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
/* read PMU chipcontrol register 3 */
- addr = CORE_CC_REG(base, chipcontrol_addr);
+ addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
chip->ops->write32(chip->ctx, addr, 3);
- addr = CORE_CC_REG(base, chipcontrol_data);
+ addr = CORE_CC_REG(pmu->base, chipcontrol_data);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & pmu_cc3_mask) != 0;
case BRCM_CC_43430_CHIP_ID:
@@ -1319,12 +1350,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
default:
- addr = CORE_CC_REG(base, pmucapabilities_ext);
+ addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
reg = chip->ops->read32(chip->ctx, addr);
if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
return false;
- addr = CORE_CC_REG(base, retention_ctl);
+ addr = CORE_CC_REG(pmu->base, retention_ctl);
reg = chip->ops->read32(chip->ctx, addr);
return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
index f6b5feea23d2..dd0ec3eba6a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
@@ -27,6 +27,7 @@
* @chip: chip identifier.
* @chiprev: chip revision.
* @cc_caps: chipcommon core capabilities.
+ * @cc_caps_ext: chipcommon core extended capabilities.
* @pmucaps: PMU capabilities.
* @pmurev: PMU revision.
* @rambase: RAM base address (only applicable for ARM CR4 chips).
@@ -38,6 +39,7 @@ struct brcmf_chip {
u32 chip;
u32 chiprev;
u32 cc_caps;
+ u32 cc_caps_ext;
u32 pmucaps;
u32 pmurev;
u32 rambase;
@@ -83,6 +85,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
void brcmf_chip_detach(struct brcmf_chip *chip);
struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
bool brcmf_chip_iscoreup(struct brcmf_core *core);
void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index cfee477a6eb1..9e909e3c2f0c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -27,6 +27,11 @@
#include "fwil_types.h"
#include "tracepoint.h"
#include "common.h"
+#include "of.h"
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
+MODULE_LICENSE("Dual BSD/GPL");
const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -75,6 +80,7 @@ module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0);
MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
#endif
+static struct brcmfmac_platform_data *brcmfmac_pdata;
struct brcmf_mp_global_t brcmf_mp_global;
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
@@ -221,33 +227,147 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
}
#endif
-void brcmf_mp_attach(void)
+static void brcmf_mp_attach(void)
{
+ /* If module param firmware path is set then this will always be used,
+ * if not set then if available use the platform data version. To make
+ * sure it gets initialized at all, always copy the module param version
+ */
strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path,
BRCMF_FW_ALTPATH_LEN);
+ if ((brcmfmac_pdata) && (brcmfmac_pdata->fw_alternative_path) &&
+ (brcmf_mp_global.firmware_path[0] == '\0')) {
+ strlcpy(brcmf_mp_global.firmware_path,
+ brcmfmac_pdata->fw_alternative_path,
+ BRCMF_FW_ALTPATH_LEN);
+ }
}
-int brcmf_mp_device_attach(struct brcmf_pub *drvr)
+struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
+ enum brcmf_bus_type bus_type,
+ u32 chip, u32 chiprev)
{
- drvr->settings = kzalloc(sizeof(*drvr->settings), GFP_ATOMIC);
- if (!drvr->settings) {
- brcmf_err("Failed to alloca storage space for settings\n");
- return -ENOMEM;
- }
-
- drvr->settings->sdiod_txglomsz = brcmf_sdiod_txglomsz;
- drvr->settings->p2p_enable = !!brcmf_p2p_enable;
- drvr->settings->feature_disable = brcmf_feature_disable;
- drvr->settings->fcmode = brcmf_fcmode;
- drvr->settings->roamoff = !!brcmf_roamoff;
+ struct brcmf_mp_device *settings;
+ struct brcmfmac_pd_device *device_pd;
+ bool found;
+ int i;
+
+ brcmf_dbg(INFO, "Enter, bus=%d, chip=%d, rev=%d\n", bus_type, chip,
+ chiprev);
+ settings = kzalloc(sizeof(*settings), GFP_ATOMIC);
+ if (!settings)
+ return NULL;
+
+ /* start by using the module paramaters */
+ settings->p2p_enable = !!brcmf_p2p_enable;
+ settings->feature_disable = brcmf_feature_disable;
+ settings->fcmode = brcmf_fcmode;
+ settings->roamoff = !!brcmf_roamoff;
#ifdef DEBUG
- drvr->settings->ignore_probe_fail = !!brcmf_ignore_probe_fail;
+ settings->ignore_probe_fail = !!brcmf_ignore_probe_fail;
#endif
+
+ if (bus_type == BRCMF_BUSTYPE_SDIO)
+ settings->bus.sdio.txglomsz = brcmf_sdiod_txglomsz;
+
+ /* See if there is any device specific platform data configured */
+ found = false;
+ if (brcmfmac_pdata) {
+ for (i = 0; i < brcmfmac_pdata->device_count; i++) {
+ device_pd = &brcmfmac_pdata->devices[i];
+ if ((device_pd->bus_type == bus_type) &&
+ (device_pd->id == chip) &&
+ ((device_pd->rev == chiprev) ||
+ (device_pd->rev == -1))) {
+ brcmf_dbg(INFO, "Platform data for device found\n");
+ settings->country_codes =
+ device_pd->country_codes;
+ if (device_pd->bus_type == BRCMF_BUSTYPE_SDIO)
+ memcpy(&settings->bus.sdio,
+ &device_pd->bus.sdio,
+ sizeof(settings->bus.sdio));
+ found = true;
+ break;
+ }
+ }
+ }
+ if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) {
+ /* No platform data for this device. In case of SDIO try OF
+ * (Open Firwmare) Device Tree.
+ */
+ brcmf_of_probe(dev, &settings->bus.sdio);
+ }
+ return settings;
+}
+
+void brcmf_release_module_param(struct brcmf_mp_device *module_param)
+{
+ kfree(module_param);
+}
+
+static int __init brcmf_common_pd_probe(struct platform_device *pdev)
+{
+ brcmf_dbg(INFO, "Enter\n");
+
+ brcmfmac_pdata = dev_get_platdata(&pdev->dev);
+
+ if (brcmfmac_pdata->power_on)
+ brcmfmac_pdata->power_on();
+
+ return 0;
+}
+
+static int brcmf_common_pd_remove(struct platform_device *pdev)
+{
+ brcmf_dbg(INFO, "Enter\n");
+
+ if (brcmfmac_pdata->power_off)
+ brcmfmac_pdata->power_off();
+
return 0;
}
-void brcmf_mp_device_detach(struct brcmf_pub *drvr)
+static struct platform_driver brcmf_pd = {
+ .remove = brcmf_common_pd_remove,
+ .driver = {
+ .name = BRCMFMAC_PDATA_NAME,
+ }
+};
+
+static int __init brcmfmac_module_init(void)
+{
+ int err;
+
+ /* Initialize debug system first */
+ brcmf_debugfs_init();
+
+ /* Get the platform data (if available) for our devices */
+ err = platform_driver_probe(&brcmf_pd, brcmf_common_pd_probe);
+ if (err == -ENODEV)
+ brcmf_dbg(INFO, "No platform data available.\n");
+
+ /* Initialize global module paramaters */
+ brcmf_mp_attach();
+
+ /* Continue the initialization by registering the different busses */
+ err = brcmf_core_init();
+ if (err) {
+ brcmf_debugfs_exit();
+ if (brcmfmac_pdata)
+ platform_driver_unregister(&brcmf_pd);
+ }
+
+ return err;
+}
+
+static void __exit brcmfmac_module_exit(void)
{
- kfree(drvr->settings);
+ brcmf_core_exit();
+ if (brcmfmac_pdata)
+ platform_driver_unregister(&brcmf_pd);
+ brcmf_debugfs_exit();
}
+module_init(brcmfmac_module_init);
+module_exit(brcmfmac_module_exit);
+
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index 3b0a63b98e99..bd095abca393 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -15,6 +15,10 @@
#ifndef BRCMFMAC_COMMON_H
#define BRCMFMAC_COMMON_H
+#include <linux/platform_device.h>
+#include <linux/platform_data/brcmfmac.h>
+#include "fwil_types.h"
+
extern const u8 ALLFFMAC[ETH_ALEN];
#define BRCMF_FW_ALTPATH_LEN 256
@@ -41,37 +45,30 @@ extern struct brcmf_mp_global_t brcmf_mp_global;
/**
* struct brcmf_mp_device - Device module paramaters.
*
- * @sdiod_txglomsz: SDIO txglom size.
- * @joinboost_5g_rssi: 5g rssi booost for preferred join selection.
* @p2p_enable: Legacy P2P0 enable (old wpa_supplicant).
* @feature_disable: Feature_disable bitmask.
* @fcmode: FWS flow control.
* @roamoff: Firmware roaming off?
+ * @ignore_probe_fail: Ignore probe failure.
+ * @country_codes: If available, pointer to struct for translating country codes
+ * @bus: Bus specific platform data. Only SDIO at the mmoment.
*/
struct brcmf_mp_device {
- int sdiod_txglomsz;
- int joinboost_5g_rssi;
- bool p2p_enable;
- int feature_disable;
- int fcmode;
- bool roamoff;
- bool ignore_probe_fail;
+ bool p2p_enable;
+ unsigned int feature_disable;
+ int fcmode;
+ bool roamoff;
+ bool ignore_probe_fail;
+ struct brcmfmac_pd_cc *country_codes;
+ union {
+ struct brcmfmac_sdio_pd sdio;
+ } bus;
};
-void brcmf_mp_attach(void);
-int brcmf_mp_device_attach(struct brcmf_pub *drvr);
-void brcmf_mp_device_detach(struct brcmf_pub *drvr);
-#ifdef DEBUG
-static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr)
-{
- return drvr->settings->ignore_probe_fail;
-}
-#else
-static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr)
-{
- return false;
-}
-#endif
+struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
+ enum brcmf_bus_type bus_type,
+ u32 chip, u32 chiprev);
+void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index ed9998b69709..ff825cd7739e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -20,6 +20,8 @@
#include <linux/inetdevice.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -36,11 +38,7 @@
#include "pcie.h"
#include "common.h"
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
-MODULE_LICENSE("Dual BSD/GPL");
-
-#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(50)
+#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950)
/* AMPDU rx reordering definitions */
#define BRCMF_RXREORDER_FLOWID_OFFSET 0
@@ -172,6 +170,35 @@ _brcmf_set_mac_address(struct work_struct *work)
}
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void _brcmf_update_ndtable(struct work_struct *work)
+{
+ struct brcmf_if *ifp;
+ int i, ret;
+
+ ifp = container_of(work, struct brcmf_if, ndoffload_work);
+
+ /* clear the table in firmware */
+ ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip_clear", NULL, 0);
+ if (ret) {
+ brcmf_dbg(TRACE, "fail to clear nd ip table err:%d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < ifp->ipv6addr_idx; i++) {
+ ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip",
+ &ifp->ipv6_addr_tbl[i],
+ sizeof(struct in6_addr));
+ if (ret)
+ brcmf_err("add nd ip err %d\n", ret);
+ }
+}
+#else
+static void _brcmf_update_ndtable(struct work_struct *work)
+{
+}
+#endif
+
static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
struct brcmf_if *ifp = netdev_priv(ndev);
@@ -685,6 +712,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked)
INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
+ INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable);
if (rtnl_locked)
err = register_netdevice(ndev);
@@ -884,6 +912,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx)
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
cancel_work_sync(&ifp->setmacaddr_work);
cancel_work_sync(&ifp->multicast_work);
+ cancel_work_sync(&ifp->ndoffload_work);
}
brcmf_net_detach(ifp->ndev);
} else {
@@ -1006,14 +1035,14 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
return NOTIFY_OK;
}
for (i = 0; i < ARPOL_MAX_ENTRIES; i++) {
- if (addr_table[i] != 0) {
- brcmf_fil_iovar_data_set(ifp,
- "arp_hostip", &addr_table[i],
- sizeof(addr_table[i]));
- if (ret)
- brcmf_err("add arp ip err %d\n",
- ret);
- }
+ if (addr_table[i] == 0)
+ continue;
+ ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip",
+ &addr_table[i],
+ sizeof(addr_table[i]));
+ if (ret)
+ brcmf_err("add arp ip err %d\n",
+ ret);
}
}
break;
@@ -1025,7 +1054,57 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb,
}
#endif
-int brcmf_attach(struct device *dev)
+#if IS_ENABLED(CONFIG_IPV6)
+static int brcmf_inet6addr_changed(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct brcmf_pub *drvr = container_of(nb, struct brcmf_pub,
+ inet6addr_notifier);
+ struct inet6_ifaddr *ifa = data;
+ struct brcmf_if *ifp;
+ int i;
+ struct in6_addr *table;
+
+ /* Only handle primary interface */
+ ifp = drvr->iflist[0];
+ if (!ifp)
+ return NOTIFY_DONE;
+ if (ifp->ndev != ifa->idev->dev)
+ return NOTIFY_DONE;
+
+ table = ifp->ipv6_addr_tbl;
+ for (i = 0; i < NDOL_MAX_ENTRIES; i++)
+ if (ipv6_addr_equal(&ifa->addr, &table[i]))
+ break;
+
+ switch (action) {
+ case NETDEV_UP:
+ if (i == NDOL_MAX_ENTRIES) {
+ if (ifp->ipv6addr_idx < NDOL_MAX_ENTRIES) {
+ table[ifp->ipv6addr_idx++] = ifa->addr;
+ } else {
+ for (i = 0; i < NDOL_MAX_ENTRIES - 1; i++)
+ table[i] = table[i + 1];
+ table[NDOL_MAX_ENTRIES - 1] = ifa->addr;
+ }
+ }
+ break;
+ case NETDEV_DOWN:
+ if (i < NDOL_MAX_ENTRIES)
+ for (; i < ifp->ipv6addr_idx; i++)
+ table[i] = table[i + 1];
+ break;
+ default:
+ break;
+ }
+
+ schedule_work(&ifp->ndoffload_work);
+
+ return NOTIFY_OK;
+}
+#endif
+
+int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
{
struct brcmf_pub *drvr = NULL;
int ret = 0;
@@ -1047,10 +1126,7 @@ int brcmf_attach(struct device *dev)
drvr->hdrlen = 0;
drvr->bus_if = dev_get_drvdata(dev);
drvr->bus_if->drvr = drvr;
-
- /* Initialize device specific settings */
- if (brcmf_mp_device_attach(drvr))
- goto fail;
+ drvr->settings = settings;
/* attach debug facilities */
brcmf_debug_attach(drvr);
@@ -1164,30 +1240,41 @@ int brcmf_bus_start(struct device *dev)
#ifdef CONFIG_INET
drvr->inetaddr_notifier.notifier_call = brcmf_inetaddr_changed;
ret = register_inetaddr_notifier(&drvr->inetaddr_notifier);
+ if (ret)
+ goto fail;
+
+#if IS_ENABLED(CONFIG_IPV6)
+ drvr->inet6addr_notifier.notifier_call = brcmf_inet6addr_changed;
+ ret = register_inet6addr_notifier(&drvr->inet6addr_notifier);
+ if (ret) {
+ unregister_inetaddr_notifier(&drvr->inetaddr_notifier);
+ goto fail;
+ }
#endif
+#endif /* CONFIG_INET */
+
+ return 0;
fail:
- if (ret < 0) {
- brcmf_err("failed: %d\n", ret);
- if (drvr->config) {
- brcmf_cfg80211_detach(drvr->config);
- drvr->config = NULL;
- }
- if (drvr->fws) {
- brcmf_fws_del_interface(ifp);
- brcmf_fws_deinit(drvr);
- }
- if (ifp)
- brcmf_net_detach(ifp->ndev);
- if (p2p_ifp)
- brcmf_net_detach(p2p_ifp->ndev);
- drvr->iflist[0] = NULL;
- drvr->iflist[1] = NULL;
- if (brcmf_ignoring_probe_fail(drvr))
- ret = 0;
- return ret;
+ brcmf_err("failed: %d\n", ret);
+ if (drvr->config) {
+ brcmf_cfg80211_detach(drvr->config);
+ drvr->config = NULL;
}
- return 0;
+ if (drvr->fws) {
+ brcmf_fws_del_interface(ifp);
+ brcmf_fws_deinit(drvr);
+ }
+ if (ifp)
+ brcmf_net_detach(ifp->ndev);
+ if (p2p_ifp)
+ brcmf_net_detach(p2p_ifp->ndev);
+ drvr->iflist[0] = NULL;
+ drvr->iflist[1] = NULL;
+ if (drvr->settings->ignore_probe_fail)
+ ret = 0;
+
+ return ret;
}
void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
@@ -1237,6 +1324,10 @@ void brcmf_detach(struct device *dev)
unregister_inetaddr_notifier(&drvr->inetaddr_notifier);
#endif
+#if IS_ENABLED(CONFIG_IPV6)
+ unregister_inet6addr_notifier(&drvr->inet6addr_notifier);
+#endif
+
/* stop firmware event handling */
brcmf_fweh_detach(drvr);
if (drvr->config)
@@ -1256,8 +1347,6 @@ void brcmf_detach(struct device *dev)
brcmf_proto_detach(drvr);
- brcmf_mp_device_detach(drvr);
-
brcmf_debug_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
@@ -1324,19 +1413,15 @@ static void brcmf_driver_register(struct work_struct *work)
}
static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-static int __init brcmfmac_module_init(void)
+int __init brcmf_core_init(void)
{
- brcmf_debugfs_init();
-#ifdef CONFIG_BRCMFMAC_SDIO
- brcmf_sdio_init();
-#endif
if (!schedule_work(&brcmf_driver_work))
return -EBUSY;
return 0;
}
-static void __exit brcmfmac_module_exit(void)
+void __exit brcmf_core_exit(void)
{
cancel_work_sync(&brcmf_driver_work);
@@ -1349,8 +1434,5 @@ static void __exit brcmfmac_module_exit(void)
#ifdef CONFIG_BRCMFMAC_PCIE
brcmf_pcie_exit();
#endif
- brcmf_debugfs_exit();
}
-module_init(brcmfmac_module_init);
-module_exit(brcmfmac_module_exit);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 8f39435f976f..7bdb6fef99c3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -48,6 +48,8 @@
*/
#define BRCMF_DRIVER_FIRMWARE_VERSION_LEN 32
+#define NDOL_MAX_ENTRIES 8
+
/**
* struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
*
@@ -143,6 +145,7 @@ struct brcmf_pub {
#endif
struct notifier_block inetaddr_notifier;
+ struct notifier_block inet6addr_notifier;
struct brcmf_mp_device *settings;
};
@@ -175,6 +178,7 @@ enum brcmf_netif_stop_reason {
* @stats: interface specific network statistics.
* @setmacaddr_work: worker object for setting mac address.
* @multicast_work: worker object for multicast provisioning.
+ * @ndoffload_work: worker object for neighbor discovery offload configuration.
* @fws_desc: interface specific firmware-signalling descriptor.
* @ifidx: interface index in device firmware.
* @bsscfgidx: index of bss associated with this interface.
@@ -191,6 +195,7 @@ struct brcmf_if {
struct net_device_stats stats;
struct work_struct setmacaddr_work;
struct work_struct multicast_work;
+ struct work_struct ndoffload_work;
struct brcmf_fws_mac_descriptor *fws_desc;
int ifidx;
s32 bsscfgidx;
@@ -199,6 +204,8 @@ struct brcmf_if {
spinlock_t netif_stop_lock;
atomic_t pend_8021x_cnt;
wait_queue_head_t pend_8021x_wait;
+ struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
+ u8 ipv6addr_idx;
};
struct brcmf_skb_reorder_data {
@@ -220,5 +227,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+int __init brcmf_core_init(void);
+void __exit brcmf_core_exit(void);
#endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 1ffa95f1b8d2..62985f2c0853 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -136,6 +136,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
{
struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
struct brcmf_pno_macaddr_le pfn_mac;
+ u32 wowl_cap;
s32 err;
brcmf_feat_firmware_capabilities(ifp);
@@ -143,11 +144,24 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) {
+ err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap);
+ if (!err) {
+ ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_WOWL_ARP_ND);
+ if (wowl_cap & BRCMF_WOWL_PFN_FOUND)
+ ifp->drvr->feat_flags |=
+ BIT(BRCMF_FEAT_WOWL_ND);
+ if (wowl_cap & BRCMF_WOWL_GTK_FAILURE)
+ ifp->drvr->feat_flags |=
+ BIT(BRCMF_FEAT_WOWL_GTK);
+ }
+ }
/* MBSS does not work for 43362 */
if (drvr->bus_if->chip == BRCM_CC_43362_CHIP_ID)
ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS);
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable");
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp");
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
err = brcmf_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index 2e2479d41337..db4733a95e28 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -27,6 +27,10 @@
* RSDB: Real Simultaneous Dual Band
* TDLS: Tunneled Direct Link Setup
* SCAN_RANDOM_MAC: Random MAC during (net detect) scheduled scan.
+ * WOWL_ND: WOWL net detect (PNO)
+ * WOWL_GTK: (WOWL) GTK rekeying offload
+ * WOWL_ARP_ND: ARP and Neighbor Discovery offload support during WOWL.
+ * MFP: 802.11w Management Frame Protection.
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -36,7 +40,11 @@
BRCMF_FEAT_DEF(P2P) \
BRCMF_FEAT_DEF(RSDB) \
BRCMF_FEAT_DEF(TDLS) \
- BRCMF_FEAT_DEF(SCAN_RANDOM_MAC)
+ BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) \
+ BRCMF_FEAT_DEF(WOWL_ND) \
+ BRCMF_FEAT_DEF(WOWL_GTK) \
+ BRCMF_FEAT_DEF(WOWL_ARP_ND) \
+ BRCMF_FEAT_DEF(MFP)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 1365c12b78fc..7269056d0044 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -93,7 +93,7 @@ static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
c = nvp->data[nvp->pos];
if (c == '\n')
return COMMENT;
- if (is_whitespace(c))
+ if (is_whitespace(c) || c == '\0')
goto proceed;
if (c == '#')
return COMMENT;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
index 2ca783fa50cf..7e269f9aa607 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c
@@ -32,7 +32,7 @@
#define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256)
#define BRCMF_FLOWRING_INVALID_IFIDX 0xff
-#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16)
+#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
#define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
static const u8 brcmf_flowring_prio2fifo[] = {
@@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx)
{
struct brcmf_flowring_hash *hash;
- u8 hash_idx;
+ u16 hash_idx;
u32 i;
bool found;
bool sta;
@@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
found = false;
hash = flow->hash;
for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
@@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
break;
}
hash_idx++;
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
}
if (found)
return hash[hash_idx].flowid;
@@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
{
struct brcmf_flowring_ring *ring;
struct brcmf_flowring_hash *hash;
- u8 hash_idx;
+ u16 hash_idx;
u32 i;
bool found;
u8 fifo;
@@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
found = false;
hash = flow->hash;
for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
@@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
break;
}
hash_idx++;
+ hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
}
if (found) {
for (i = 0; i < flow->nrofrings; i++) {
@@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
}
-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid)
}
-static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
+static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
bool blocked)
{
struct brcmf_flowring_ring *ring;
@@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid,
}
-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
- u8 hash_idx;
+ u16 hash_idx;
struct sk_buff *skb;
ring = flow->rings[flowid];
@@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
}
-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
}
-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
struct sk_buff *skb;
@@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb)
{
struct brcmf_flowring_ring *ring;
@@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
}
-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid)
}
-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
@@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid)
}
-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid)
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
{
struct brcmf_flowring_ring *ring;
- u8 hash_idx;
+ u16 hash_idx;
ring = flow->rings[flowid];
hash_idx = ring->hash_id;
@@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow)
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_flowring_tdls_entry *search;
struct brcmf_flowring_tdls_entry *remove;
- u8 flowid;
+ u16 flowid;
for (flowid = 0; flowid < flow->nrofrings; flowid++) {
if (flow->rings[flowid])
@@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
struct brcmf_pub *drvr = bus_if->drvr;
u32 i;
- u8 flowid;
+ u16 flowid;
if (flow->addr_mode[ifidx] != addr_mode) {
for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
@@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
struct brcmf_flowring_tdls_entry *prev;
struct brcmf_flowring_tdls_entry *search;
u32 i;
- u8 flowid;
+ u16 flowid;
bool sta;
sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
index 95fd1c9675d1..068e68d94999 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h
@@ -16,7 +16,7 @@
#define BRCMFMAC_FLOWRING_H
-#define BRCMF_FLOWRING_HASHSIZE 256
+#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */
#define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF
@@ -24,7 +24,7 @@ struct brcmf_flowring_hash {
u8 mac[ETH_ALEN];
u8 fifo;
u8 ifidx;
- u8 flowid;
+ u16 flowid;
};
enum ring_status {
@@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx);
u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
u8 prio, u8 ifidx);
-void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
-u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
+void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
+u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb);
-struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
+void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
struct sk_buff *skb);
-u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid);
-u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid);
+u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
+u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings);
void brcmf_flowring_detach(struct brcmf_flowring *flow);
void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 7b26fb1b437c..d414fbbcc814 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -26,50 +26,6 @@
#include "fwil.h"
/**
- * struct brcm_ethhdr - broadcom specific ether header.
- *
- * @subtype: subtype for this packet.
- * @length: TODO: length of appended data.
- * @version: version indication.
- * @oui: OUI of this packet.
- * @usr_subtype: subtype for this OUI.
- */
-struct brcm_ethhdr {
- __be16 subtype;
- __be16 length;
- u8 version;
- u8 oui[3];
- __be16 usr_subtype;
-} __packed;
-
-struct brcmf_event_msg_be {
- __be16 version;
- __be16 flags;
- __be32 event_type;
- __be32 status;
- __be32 reason;
- __be32 auth_type;
- __be32 datalen;
- u8 addr[ETH_ALEN];
- char ifname[IFNAMSIZ];
- u8 ifidx;
- u8 bsscfgidx;
-} __packed;
-
-/**
- * struct brcmf_event - contents of broadcom event packet.
- *
- * @eth: standard ether header.
- * @hdr: broadcom specific ether header.
- * @msg: common part of the actual event message.
- */
-struct brcmf_event {
- struct ethhdr eth;
- struct brcm_ethhdr hdr;
- struct brcmf_event_msg_be msg;
-} __packed;
-
-/**
* struct brcmf_fweh_queue_item - event item on event queue.
*
* @q: list element for queuing.
@@ -85,6 +41,7 @@ struct brcmf_fweh_queue_item {
u8 ifidx;
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
+ u32 datalen;
u8 data[0];
};
@@ -294,6 +251,11 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
min_t(u32, emsg.datalen, 64),
"event payload, len=%d\n", emsg.datalen);
+ if (emsg.datalen > event->datalen) {
+ brcmf_err("event invalid length header=%d, msg=%d\n",
+ event->datalen, emsg.datalen);
+ goto event_free;
+ }
/* special handling of interface event */
if (event->code == BRCMF_E_IF) {
@@ -439,7 +401,8 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
* dispatch the event to a registered handler (using worker).
*/
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
- struct brcmf_event *event_packet)
+ struct brcmf_event *event_packet,
+ u32 packet_len)
{
enum brcmf_fweh_event_code code;
struct brcmf_fweh_info *fweh = &drvr->fweh;
@@ -459,6 +422,9 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (code != BRCMF_E_IF && !fweh->evt_handler[code])
return;
+ if (datalen > BRCMF_DCMD_MAXLEN)
+ return;
+
if (in_interrupt())
alloc_flag = GFP_ATOMIC;
@@ -472,6 +438,7 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
/* use memcpy to get aligned event message */
memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
memcpy(event->data, data, datalen);
+ event->datalen = datalen;
memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
brcmf_fweh_queue_event(fweh, event);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 5e39e2a9e388..26ff5a9648f3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -27,7 +27,6 @@
struct brcmf_pub;
struct brcmf_if;
struct brcmf_cfg80211_info;
-struct brcmf_event;
/* list of firmware events */
#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
@@ -180,13 +179,55 @@ enum brcmf_fweh_event_code {
/**
* definitions for event packet validation.
*/
-#define BRCMF_EVENT_OUI_OFFSET 19
-#define BRCM_OUI "\x00\x10\x18"
-#define DOT11_OUI_LEN 3
-#define BCMILCP_BCM_SUBTYPE_EVENT 1
+#define BRCM_OUI "\x00\x10\x18"
+#define BCMILCP_BCM_SUBTYPE_EVENT 1
/**
+ * struct brcm_ethhdr - broadcom specific ether header.
+ *
+ * @subtype: subtype for this packet.
+ * @length: TODO: length of appended data.
+ * @version: version indication.
+ * @oui: OUI of this packet.
+ * @usr_subtype: subtype for this OUI.
+ */
+struct brcm_ethhdr {
+ __be16 subtype;
+ __be16 length;
+ u8 version;
+ u8 oui[3];
+ __be16 usr_subtype;
+} __packed;
+
+struct brcmf_event_msg_be {
+ __be16 version;
+ __be16 flags;
+ __be32 event_type;
+ __be32 status;
+ __be32 reason;
+ __be32 auth_type;
+ __be32 datalen;
+ u8 addr[ETH_ALEN];
+ char ifname[IFNAMSIZ];
+ u8 ifidx;
+ u8 bsscfgidx;
+} __packed;
+
+/**
+ * struct brcmf_event - contents of broadcom event packet.
+ *
+ * @eth: standard ether header.
+ * @hdr: broadcom specific ether header.
+ * @msg: common part of the actual event message.
+ */
+struct brcmf_event {
+ struct ethhdr eth;
+ struct brcm_ethhdr hdr;
+ struct brcmf_event_msg_be msg;
+} __packed;
+
+/**
* struct brcmf_event_msg - firmware event message.
*
* @version: version information.
@@ -256,34 +297,35 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
enum brcmf_fweh_event_code code);
int brcmf_fweh_activate_events(struct brcmf_if *ifp);
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
- struct brcmf_event *event_packet);
+ struct brcmf_event *event_packet,
+ u32 packet_len);
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
struct sk_buff *skb)
{
struct brcmf_event *event_packet;
- u8 *data;
u16 usr_stype;
/* only process events when protocol matches */
if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
return;
+ if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
+ return;
+
/* check for BRCM oui match */
event_packet = (struct brcmf_event *)skb_mac_header(skb);
- data = (u8 *)event_packet;
- data += BRCMF_EVENT_OUI_OFFSET;
- if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN))
+ if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0],
+ sizeof(event_packet->hdr.oui)))
return;
/* final match on usr_subtype */
- data += DOT11_OUI_LEN;
- usr_stype = get_unaligned_be16(data);
+ usr_stype = get_unaligned_be16(&event_packet->hdr.usr_subtype);
if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
return;
- brcmf_fweh_process_event(drvr, event_packet);
+ brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN);
}
#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 1afc2ad83b6c..a4118c0ef6ca 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -111,7 +111,9 @@
/* Wakeup if received matched secured pattern: */
#define BRCMF_WOWL_SECURE (1 << 25)
/* Wakeup on finding preferred network */
-#define BRCMF_WOWL_PFN_FOUND (1 << 26)
+#define BRCMF_WOWL_PFN_FOUND (1 << 27)
+/* Wakeup on receiving pairwise key EAP packets: */
+#define WIPHY_WOWL_EAP_PK (1 << 28)
/* Link Down indication in WoWL mode: */
#define BRCMF_WOWL_LINKDOWN (1 << 31)
@@ -134,6 +136,16 @@
#define BRCMF_PFN_MAC_OUI_ONLY BIT(0)
#define BRCMF_PFN_SET_MAC_UNASSOC BIT(1)
+#define BRCMF_MCSSET_LEN 16
+
+#define BRCMF_RSN_KCK_LENGTH 16
+#define BRCMF_RSN_KEK_LENGTH 16
+#define BRCMF_RSN_REPLAY_LEN 8
+
+#define BRCMF_MFP_NONE 0
+#define BRCMF_MFP_CAPABLE 1
+#define BRCMF_MFP_REQUIRED 2
+
/* join preference types for join_pref iovar */
enum brcmf_join_pref_types {
BRCMF_JOIN_PREF_RSSI = 1,
@@ -279,7 +291,7 @@ struct brcmf_bss_info_le {
__le32 reserved32[1]; /* Reserved for expansion of BSS properties */
u8 flags; /* flags */
u8 reserved[3]; /* Reserved for expansion of BSS properties */
- u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */
+ u8 basic_mcs[BRCMF_MCSSET_LEN]; /* 802.11N BSS required MCS set */
__le16 ie_offset; /* offset at which IEs start, from beginning */
__le32 ie_length; /* byte length of Information Elements */
@@ -787,4 +799,17 @@ struct brcmf_pktcnt_le {
__le32 rx_ocast_good_pkt;
};
+/**
+ * struct brcmf_gtk_keyinfo_le - GTP rekey data
+ *
+ * @kck: key confirmation key.
+ * @kek: key encryption key.
+ * @replay_counter: replay counter.
+ */
+struct brcmf_gtk_keyinfo_le {
+ u8 kck[BRCMF_RSN_KCK_LENGTH];
+ u8 kek[BRCMF_RSN_KEK_LENGTH];
+ u8 replay_counter[BRCMF_RSN_REPLAY_LEN];
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index c2bdb91746cf..922966734a7f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
}
-static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid)
+static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
{
struct brcmf_flowring *flow = msgbuf->flow;
struct brcmf_commonring *commonring;
@@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
}
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid)
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
{
struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
struct msgbuf_tx_flowring_delete_req *delete;
@@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
u32 count;
if_msgbuf = drvr->bus_if->msgbuf;
+
+ if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
+ brcmf_err("driver not configured for this many flowrings %d\n",
+ if_msgbuf->nrof_flowrings);
+ if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
+ }
+
msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
if (!msgbuf)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
index 3d513e407e3d..ee6906a3c3f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h
@@ -33,7 +33,7 @@
int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
-void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
+void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid);
int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
#else
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 03f35e0c52ca..425c41dc0a59 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -16,17 +16,15 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/mmc/card.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <linux/mmc/sdio_func.h>
#include <defs.h>
#include "debug.h"
-#include "sdio.h"
+#include "core.h"
+#include "common.h"
+#include "of.h"
-void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
{
- struct device *dev = sdiodev->dev;
struct device_node *np = dev->of_node;
int irq;
u32 irqf;
@@ -35,12 +33,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
return;
- sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL);
- if (!sdiodev->pdata)
- return;
-
if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
- sdiodev->pdata->drive_strength = val;
+ sdio->drive_strength = val;
/* make sure there are interrupts defined in the node */
if (!of_find_property(np, "interrupts", NULL))
@@ -53,7 +47,7 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
}
irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
- sdiodev->pdata->oob_irq_supported = true;
- sdiodev->pdata->oob_irq_nr = irq;
- sdiodev->pdata->oob_irq_flags = irqf;
+ sdio->oob_irq_supported = true;
+ sdio->oob_irq_nr = irq;
+ sdio->oob_irq_flags = irqf;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
index 5f7c3550deda..a9d94c15d0f5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
@@ -14,9 +14,9 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef CONFIG_OF
-void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev);
+void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio);
#else
-static void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
+static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
{
}
#endif /* CONFIG_OF */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 821b6494f9d1..b5a49e564f25 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1361,6 +1361,11 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
u16 mgmt_type;
u8 action;
+ if (e->datalen < sizeof(*rxframe)) {
+ brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ return 0;
+ }
+
ch.chspec = be16_to_cpu(rxframe->chanspec);
cfg->d11inf.decchspec(&ch);
/* Check if wpa_supplicant has registered for this frame */
@@ -1858,6 +1863,11 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code,
e->reason);
+ if (e->datalen < sizeof(*rxframe)) {
+ brcmf_dbg(SCAN, "Event data to small. Ignore\n");
+ return 0;
+ }
+
ch.chspec = be16_to_cpu(rxframe->chanspec);
cfg->d11inf.decchspec(&ch);
@@ -1988,8 +1998,8 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
brcmf_cfg80211_arm_vif_event(cfg, NULL);
return err;
}
- err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE,
- BRCMF_VIF_EVENT_TIMEOUT);
+ err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_CHANGE,
+ BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
brcmf_err("No BRCMF_E_IF_CHANGE event received\n");
@@ -2090,8 +2100,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
}
/* wait for firmware event */
- err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD,
- BRCMF_VIF_EVENT_TIMEOUT);
+ err = brcmf_cfg80211_wait_vif_event(p2p->cfg, BRCMF_E_IF_ADD,
+ BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
brcmf_fweh_p2pdev_setup(pri_ifp, false);
if (!err) {
@@ -2180,8 +2190,8 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
}
/* wait for firmware event */
- err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD,
- BRCMF_VIF_EVENT_TIMEOUT);
+ err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_ADD,
+ BRCMF_VIF_EVENT_TIMEOUT);
brcmf_cfg80211_arm_vif_event(cfg, NULL);
if (!err) {
brcmf_err("timeout occurred\n");
@@ -2272,8 +2282,8 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
}
if (!err) {
/* wait for firmware event */
- err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
- BRCMF_VIF_EVENT_TIMEOUT);
+ err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL,
+ BRCMF_VIF_EVENT_TIMEOUT);
if (!err)
err = -EIO;
else
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 0480b70e3eb8..0af8db82da0c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -37,6 +37,8 @@
#include "pcie.h"
#include "firmware.h"
#include "chip.h"
+#include "core.h"
+#include "common.h"
enum brcmf_pcie_state {
@@ -53,6 +55,7 @@ BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt");
BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt");
BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt");
BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt");
+BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt");
BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt");
static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
@@ -66,13 +69,13 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFFF, 4366B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
-#define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024)
#define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
/* backplane addres space accessed by BAR0 */
@@ -99,9 +102,6 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140
-#define BRCMF_PCIE_GENREV1 1
-#define BRCMF_PCIE_GENREV2 2
-
#define BRCMF_PCIE2_INTA 0x01
#define BRCMF_PCIE2_INTB 0x02
@@ -207,6 +207,10 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
#define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
#define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
+/* Magic number at a magic location to find RAM size */
+#define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
+#define BRCMF_RAMSIZE_OFFSET 0x6c
+
struct brcmf_pcie_console {
u32 base_addr;
@@ -248,14 +252,11 @@ struct brcmf_pciedev_info {
char nvram_name[BRCMF_FW_NAME_LEN];
void __iomem *regs;
void __iomem *tcm;
- u32 tcm_size;
u32 ram_base;
u32 ram_size;
struct brcmf_chip *ci;
u32 coreid;
- u32 generic_corerev;
struct brcmf_pcie_shared_info shared;
- void (*ringbell)(struct brcmf_pciedev_info *devinfo);
wait_queue_head_t mbdata_resp_wait;
bool mbdata_completed;
bool irq_allocated;
@@ -267,6 +268,7 @@ struct brcmf_pciedev_info {
u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
u16 value);
+ struct brcmf_mp_device *settings;
};
struct brcmf_pcie_ringbuf {
@@ -675,10 +677,8 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
- if (waitqueue_active(&devinfo->mbdata_resp_wait)) {
- devinfo->mbdata_completed = true;
- wake_up(&devinfo->mbdata_resp_wait);
- }
+ devinfo->mbdata_completed = true;
+ wake_up(&devinfo->mbdata_resp_wait);
}
}
@@ -742,68 +742,22 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
}
-static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo)
-{
- u32 reg_value;
-
- brcmf_dbg(PCIE, "RING !\n");
- reg_value = brcmf_pcie_read_reg32(devinfo,
- BRCMF_PCIE_PCIE2REG_MAILBOXINT);
- reg_value |= BRCMF_PCIE2_INTB;
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
- reg_value);
-}
-
-
-static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo)
-{
- brcmf_dbg(PCIE, "RING !\n");
- /* Any arbitrary value will do, lets use 1 */
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
-}
-
-
static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
{
- if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
- pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
- 0);
- else
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- 0);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
}
static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
{
- if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1)
- pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK,
- BRCMF_PCIE_INT_DEF);
- else
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
- BRCMF_PCIE_MB_INT_D2H_DB |
- BRCMF_PCIE_MB_INT_FN0_0 |
- BRCMF_PCIE_MB_INT_FN0_1);
-}
-
-
-static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg)
-{
- struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
- u32 status;
-
- status = 0;
- pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
- if (status) {
- brcmf_pcie_intr_disable(devinfo);
- brcmf_dbg(PCIE, "Enter\n");
- return IRQ_WAKE_THREAD;
- }
- return IRQ_NONE;
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
+ BRCMF_PCIE_MB_INT_D2H_DB |
+ BRCMF_PCIE_MB_INT_FN0_0 |
+ BRCMF_PCIE_MB_INT_FN0_1);
}
-static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
+static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
@@ -816,29 +770,7 @@ static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg)
}
-static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg)
-{
- struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
- const struct pci_dev *pdev = devinfo->pdev;
- u32 status;
-
- devinfo->in_irq = true;
- status = 0;
- pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
- brcmf_dbg(PCIE, "Enter %x\n", status);
- if (status) {
- pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
- if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
- brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev);
- }
- if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
- brcmf_pcie_intr_enable(devinfo);
- devinfo->in_irq = false;
- return IRQ_HANDLED;
-}
-
-
-static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg)
+static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
{
struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
u32 status;
@@ -875,28 +807,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
brcmf_pcie_intr_disable(devinfo);
brcmf_dbg(PCIE, "Enter\n");
- /* is it a v1 or v2 implementation */
+
pci_enable_msi(pdev);
- if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
- if (request_threaded_irq(pdev->irq,
- brcmf_pcie_quick_check_isr_v1,
- brcmf_pcie_isr_thread_v1,
- IRQF_SHARED, "brcmf_pcie_intr",
- devinfo)) {
- pci_disable_msi(pdev);
- brcmf_err("Failed to request IRQ %d\n", pdev->irq);
- return -EIO;
- }
- } else {
- if (request_threaded_irq(pdev->irq,
- brcmf_pcie_quick_check_isr_v2,
- brcmf_pcie_isr_thread_v2,
- IRQF_SHARED, "brcmf_pcie_intr",
- devinfo)) {
- pci_disable_msi(pdev);
- brcmf_err("Failed to request IRQ %d\n", pdev->irq);
- return -EIO;
- }
+ if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
+ brcmf_pcie_isr_thread, IRQF_SHARED,
+ "brcmf_pcie_intr", devinfo)) {
+ pci_disable_msi(pdev);
+ brcmf_err("Failed to request IRQ %d\n", pdev->irq);
+ return -EIO;
}
devinfo->irq_allocated = true;
return 0;
@@ -927,16 +845,9 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
if (devinfo->in_irq)
brcmf_err("Still in IRQ (processing) !!!\n");
- if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) {
- status = 0;
- pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status);
- pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status);
- } else {
- status = brcmf_pcie_read_reg32(devinfo,
- BRCMF_PCIE_PCIE2REG_MAILBOXINT);
- brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
- status);
- }
+ status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
+
devinfo->irq_allocated = false;
}
@@ -985,7 +896,9 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
return -EIO;
- devinfo->ringbell(devinfo);
+ brcmf_dbg(PCIE, "RING !\n");
+ /* Any arbitrary value will do, lets use 1 */
+ brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1);
return 0;
}
@@ -1412,6 +1325,28 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
};
+static void
+brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
+ u32 data_len)
+{
+ __le32 *field;
+ u32 newsize;
+
+ if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
+ return;
+
+ field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
+ if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
+ return;
+ field++;
+ newsize = le32_to_cpup(field);
+
+ brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
+ newsize);
+ devinfo->ci->ramsize = newsize;
+}
+
+
static int
brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
u32 sharedram_addr)
@@ -1477,9 +1412,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
u32 address;
u32 resetintr;
- devinfo->ringbell = brcmf_pcie_ringbell_v2;
- devinfo->generic_corerev = BRCMF_PCIE_GENREV2;
-
brcmf_dbg(PCIE, "Halt ARM.\n");
err = brcmf_pcie_enter_download_state(devinfo);
if (err)
@@ -1566,8 +1498,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
}
devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
- devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE);
- devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE;
+ devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
if (!devinfo->regs || !devinfo->tcm) {
brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs,
@@ -1576,8 +1507,9 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
}
brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
devinfo->regs, (unsigned long long)bar0_addr);
- brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n",
- devinfo->tcm, (unsigned long long)bar1_addr);
+ brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
+ devinfo->tcm, (unsigned long long)bar1_addr,
+ (unsigned int)bar1_size);
return 0;
}
@@ -1594,16 +1526,16 @@ static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
}
-static int brcmf_pcie_attach_bus(struct device *dev)
+static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo)
{
int ret;
/* Attach to the common driver interface */
- ret = brcmf_attach(dev);
+ ret = brcmf_attach(&devinfo->pdev->dev, devinfo->settings);
if (ret) {
brcmf_err("brcmf_attach failed\n");
} else {
- ret = brcmf_bus_start(dev);
+ ret = brcmf_bus_start(&devinfo->pdev->dev);
if (ret)
brcmf_err("dongle is not responding\n");
}
@@ -1694,6 +1626,13 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
brcmf_pcie_attach(devinfo);
+ /* Some of the firmwares have the size of the memory of the device
+ * defined inside the firmware. This is because part of the memory in
+ * the device is shared and the devision is determined by FW. Parse
+ * the firmware and adjust the chip memory size now.
+ */
+ brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
+
ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
if (ret)
goto fail;
@@ -1734,7 +1673,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
init_waitqueue_head(&devinfo->mbdata_resp_wait);
brcmf_pcie_intr_enable(devinfo);
- if (brcmf_pcie_attach_bus(bus->dev) == 0)
+ if (brcmf_pcie_attach_bus(devinfo) == 0)
return;
brcmf_pcie_bus_console_read(devinfo);
@@ -1778,6 +1717,15 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto fail;
}
+ devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
+ BRCMF_BUSTYPE_PCIE,
+ devinfo->ci->chip,
+ devinfo->ci->chiprev);
+ if (!devinfo->settings) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus) {
ret = -ENOMEM;
@@ -1822,6 +1770,8 @@ fail:
brcmf_pcie_release_resource(devinfo);
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
+ if (devinfo->settings)
+ brcmf_release_module_param(devinfo->settings);
kfree(pcie_bus_dev);
kfree(devinfo);
return ret;
@@ -1861,6 +1811,8 @@ brcmf_pcie_remove(struct pci_dev *pdev)
if (devinfo->ci)
brcmf_chip_detach(devinfo->ci);
+ if (devinfo->settings)
+ brcmf_release_module_param(devinfo->settings);
kfree(devinfo);
dev_set_drvdata(&pdev->dev, NULL);
@@ -1951,6 +1903,9 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
#define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
+#define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
+ BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
+ subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
@@ -1966,6 +1921,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
+ BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index a14d9d9da094..43fd3f402eba 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -33,8 +33,6 @@
#include <linux/bcma/bcma.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
-#include <linux/platform_data/brcmfmac-sdio.h>
-#include <linux/moduleparam.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -44,9 +42,11 @@
#include "sdio.h"
#include "chip.h"
#include "firmware.h"
+#include "core.h"
+#include "common.h"
-#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2000)
-#define CTL_DONE_TIMEOUT msecs_to_jiffies(2000)
+#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500)
+#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500)
#ifdef DEBUG
@@ -2442,15 +2442,17 @@ static void brcmf_sdio_bus_stop(struct device *dev)
static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiodev;
unsigned long flags;
- if (bus->sdiodev->oob_irq_requested) {
- spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags);
- if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) {
- enable_irq(bus->sdiodev->pdata->oob_irq_nr);
- bus->sdiodev->irq_en = true;
+ sdiodev = bus->sdiodev;
+ if (sdiodev->oob_irq_requested) {
+ spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
+ if (!sdiodev->irq_en && !atomic_read(&bus->ipend)) {
+ enable_irq(sdiodev->settings->bus.sdio.oob_irq_nr);
+ sdiodev->irq_en = true;
}
- spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags);
+ spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
}
}
@@ -3394,9 +3396,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
sizeof(u32));
} else {
/* otherwise, set txglomalign */
- value = 4;
- if (sdiodev->pdata)
- value = sdiodev->pdata->sd_sgentry_align;
+ value = sdiodev->settings->bus.sdio.sd_sgentry_align;
/* SDIO ADMA requires at least 32 bit alignment */
value = max_t(u32, value, 4);
err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
@@ -3615,7 +3615,6 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
const struct sdiod_drive_str *str_tab = NULL;
u32 str_mask;
u32 str_shift;
- u32 base;
u32 i;
u32 drivestrength_sel = 0;
u32 cc_data_temp;
@@ -3658,14 +3657,15 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
}
if (str_tab != NULL) {
+ struct brcmf_core *pmu = brcmf_chip_get_pmu(ci);
+
for (i = 0; str_tab[i].strength != 0; i++) {
if (drivestrength >= str_tab[i].strength) {
drivestrength_sel = str_tab[i].sel;
break;
}
}
- base = brcmf_chip_get_chipcommon(ci)->base;
- addr = CORE_CC_REG(base, chipcontrol_addr);
+ addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
cc_data_temp &= ~str_mask;
@@ -3775,26 +3775,28 @@ static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
+ struct brcmf_sdio_dev *sdiodev;
u8 clkctl = 0;
int err = 0;
int reg_addr;
u32 reg_val;
u32 drivestrength;
- sdio_claim_host(bus->sdiodev->func[1]);
+ sdiodev = bus->sdiodev;
+ sdio_claim_host(sdiodev->func[1]);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
- brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
+ brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
/*
* Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
- brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
BRCMF_INIT_CLKCTL1, &err);
if (!err)
- clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+ clkctl = brcmf_sdiod_regrb(sdiodev,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
@@ -3803,51 +3805,81 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
}
- bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+ bus->ci = brcmf_chip_attach(sdiodev, &brcmf_sdio_buscore_ops);
if (IS_ERR(bus->ci)) {
brcmf_err("brcmf_chip_attach failed!\n");
bus->ci = NULL;
goto fail;
}
+ sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
+ BRCMF_BUSTYPE_SDIO,
+ bus->ci->chip,
+ bus->ci->chiprev);
+ if (!sdiodev->settings) {
+ brcmf_err("Failed to get device parameters\n");
+ goto fail;
+ }
+ /* platform specific configuration:
+ * alignments must be at least 4 bytes for ADMA
+ */
+ bus->head_align = ALIGNMENT;
+ bus->sgentry_align = ALIGNMENT;
+ if (sdiodev->settings->bus.sdio.sd_head_align > ALIGNMENT)
+ bus->head_align = sdiodev->settings->bus.sdio.sd_head_align;
+ if (sdiodev->settings->bus.sdio.sd_sgentry_align > ALIGNMENT)
+ bus->sgentry_align =
+ sdiodev->settings->bus.sdio.sd_sgentry_align;
+
+ /* allocate scatter-gather table. sg support
+ * will be disabled upon allocation failure.
+ */
+ brcmf_sdiod_sgtable_alloc(sdiodev);
+
+#ifdef CONFIG_PM_SLEEP
+ /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
+ * is true or when platform data OOB irq is true).
+ */
+ if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
+ ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
+ (sdiodev->settings->bus.sdio.oob_irq_supported)))
+ sdiodev->bus_if->wowl_supported = true;
+#endif
if (brcmf_sdio_kso_init(bus)) {
brcmf_err("error enabling KSO\n");
goto fail;
}
- if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength))
- drivestrength = bus->sdiodev->pdata->drive_strength;
+ if (sdiodev->settings->bus.sdio.drive_strength)
+ drivestrength = sdiodev->settings->bus.sdio.drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
- brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+ brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
/* Set card control so an SDIO card reset does a WLAN backplane reset */
- reg_val = brcmf_sdiod_regrb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, &err);
+ reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
if (err)
goto fail;
reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
- brcmf_sdiod_regwb(bus->sdiodev,
- SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+ brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
if (err)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
- reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
- pmucontrol);
- reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
+ reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
+ reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3868,7 +3900,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
return true;
fail:
- sdio_release_host(bus->sdiodev->func[1]);
+ sdio_release_host(sdiodev->func[1]);
return false;
}
@@ -4046,18 +4078,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQ_WRAP - 1;
- /* platform specific configuration:
- * alignments must be at least 4 bytes for ADMA
- */
- bus->head_align = ALIGNMENT;
- bus->sgentry_align = ALIGNMENT;
- if (sdiodev->pdata) {
- if (sdiodev->pdata->sd_head_align > ALIGNMENT)
- bus->head_align = sdiodev->pdata->sd_head_align;
- if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT)
- bus->sgentry_align = sdiodev->pdata->sd_sgentry_align;
- }
-
/* single-threaded workqueue */
wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
dev_name(&sdiodev->func[1]->dev));
@@ -4108,7 +4128,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
/* Attach to the common layer, reserve hdr space */
- ret = brcmf_attach(bus->sdiodev->dev);
+ ret = brcmf_attach(bus->sdiodev->dev, bus->sdiodev->settings);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@@ -4212,6 +4232,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
}
brcmf_chip_detach(bus->ci);
}
+ if (bus->sdiodev->settings)
+ brcmf_release_module_param(bus->sdiodev->settings);
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 23f223150cef..dcf0ce8cd2c1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -184,7 +184,7 @@ struct brcmf_sdio_dev {
struct brcmf_sdio *bus;
struct device *dev;
struct brcmf_bus *bus_if;
- struct brcmfmac_sdio_platform_data *pdata;
+ struct brcmf_mp_device *settings;
bool oob_irq_requested;
bool irq_en; /* irq enable flags */
spinlock_t irq_en_lock;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index c72b7b352a77..869eb82db8b1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -27,6 +27,8 @@
#include "debug.h"
#include "firmware.h"
#include "usb.h"
+#include "core.h"
+#include "common.h"
#define IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000)
@@ -171,6 +173,7 @@ struct brcmf_usbdev_info {
struct urb *bulk_urb; /* used for FW download */
bool wowl_enabled;
+ struct brcmf_mp_device *settings;
};
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
@@ -1027,6 +1030,9 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
kfree(devinfo->tx_reqs);
kfree(devinfo->rx_reqs);
+
+ if (devinfo->settings)
+ brcmf_release_module_param(devinfo->settings);
}
@@ -1136,7 +1142,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
int ret;
/* Attach to the common driver interface */
- ret = brcmf_attach(devinfo->dev);
+ ret = brcmf_attach(devinfo->dev, devinfo->settings);
if (ret) {
brcmf_err("brcmf_attach failed\n");
return ret;
@@ -1223,6 +1229,14 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
bus->wowl_supported = true;
#endif
+ devinfo->settings = brcmf_get_module_param(bus->dev, BRCMF_BUSTYPE_USB,
+ bus_pub->devid,
+ bus_pub->chiprev);
+ if (!devinfo->settings) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
if (!brcmf_usb_dlneeded(devinfo)) {
ret = brcmf_usb_bus_setup(devinfo);
if (ret)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index bec2dc1ca2e4..61ae2768132a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static int
brcms_ops_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct brcms_info *wl = hw->priv;
struct scb *scb = &wl->wlc->pri_scb;
int status;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u8 buf_size = params->buf_size;
if (WARN_ON(scb->magic != SCB_MAGIC))
return -EIDRM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
index 3f68dd5ecd11..7b9a77981df1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
@@ -236,6 +236,8 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
#define WPA2_AUTH_RESERVED3 0x0200
#define WPA2_AUTH_RESERVED4 0x0400
#define WPA2_AUTH_RESERVED5 0x0800
+#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */
+#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */
#define DOT11_DEFAULT_RTS_LEN 2347
#define DOT11_DEFAULT_FRAG_LEN 2346
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index fd38aa0763e4..b75f4ef3cdc7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct il_priv *il = hw->priv;
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index 8ab8706f9422..e432715e02d8 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 7438fbeef744..16c4f383488f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -98,6 +98,18 @@ config IWLWIFI_UAPSD
If unsure, say N.
+config IWLWIFI_PCIE_RTPM
+ bool "Enable runtime power management mode for PCIe devices"
+ depends on IWLMVM && PM
+ default false
+ help
+ Say Y here to enable runtime power management for PCIe
+ devices. If enabled, the device will go into low power mode
+ when idle for a short period of time, allowing for improved
+ power saving during runtime.
+
+ If unsure, say N.
+
menu "Debugging Options"
config IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index 1aabb5ec096f..1bbd17ada974 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
unsigned long on = 0;
+ unsigned long off = 0;
if (brightness > 0)
on = IWL_LED_SOLID;
+ else
+ off = IWL_LED_SOLID;
- iwl_led_cmd(priv, on, 0);
+ iwl_led_cmd(priv, on, off);
}
static int iwl_led_blink_set(struct led_classdev *led_cdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 4841be2aa499..1799469268ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -943,14 +943,16 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
+ u64 pn64;
+
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
@@ -996,19 +998,13 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_CCMP:
if (sta) {
- u8 *pn = seq.ccmp.pn;
+ u64 pn64;
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
- ieee80211_get_key_tx_seq(key, &seq);
- aes_tx_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
+ pn64 = atomic64_read(&key->tx_pn);
+ aes_tx_sc->pn = cpu_to_le64(pn64);
} else
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 29ea1c6705b4..c63ea79571ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
- iwl_trans_d3_suspend(priv->trans, false);
+ iwl_trans_d3_suspend(priv->trans, false, true);
goto out;
@@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
/* we'll clear ctx->vif during iwlagn_prepare_restart() */
vif = ctx->vif;
- ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
+ ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
if (ret)
goto out_unlock;
@@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index f62c2d727ddb..85628127947f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1652,10 +1652,10 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.line,
- table.blink1, table.blink2, table.ilink1,
- table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.ucode_ver,
- table.hw_ver, 0, table.brd_ver);
+ table.blink2, table.ilink1, table.ilink2,
+ table.bcon_time, table.gp1, table.gp2,
+ table.gp3, table.ucode_ver, table.hw_ver,
+ 0, table.brd_ver);
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index fa41a5e1c890..fc475ce59b47 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
-#define IWL7265D_UCODE_API_MAX 20
-#define IWL3168_UCODE_API_MAX 20
+#define IWL7265D_UCODE_API_MAX 21
+#define IWL3168_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index bce9b3420a13..97be104d1203 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 20
-#define IWL8265_UCODE_API_MAX 20
+#define IWL8000_UCODE_API_MAX 21
+#define IWL8265_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 13
@@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .vht_mu_mimo_supported = true,
};
const struct iwl_cfg iwl4165_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index ecbf4822cd69..318b1dc171f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 20
+#define IWL9000_UCODE_API_MAX 21
/* Oldest version we won't warn about */
#define IWL9000_UCODE_API_OK 13
@@ -138,7 +138,10 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.smem_offset = IWL9000_SMEM_OFFSET, \
.smem_len = IWL9000_SMEM_LEN, \
.thermal_params = &iwl9000_tt_params, \
- .apmg_not_supported = true
+ .apmg_not_supported = true, \
+ .mq_rx_supported = true, \
+ .vht_mu_mimo_supported = true, \
+ .mac_addr_from_csr = true
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f99048135fb9..3e4d346be350 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
* @nvm_hw_section_num: the ID of the HW NVM section
+ * @mac_addr_from_csr: read HW address from CSR registers
* @features: hw features, any combination of feature_whitelist
* @pwr_tx_backoffs: translation table between power limits and backoffs
* @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
@@ -311,6 +312,8 @@ struct iwl_pwr_tx_backoff {
* @dccm2_len: length of the second DCCM
* @smem_offset: offset from which the SMEM begins
* @smem_len: the length of SMEM
+ * @mq_rx_supported: multi-queue rx support
+ * @vht_mu_mimo_supported: VHT MU-MIMO support
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -343,6 +346,7 @@ struct iwl_cfg {
const bool host_interrupt_operation_mode;
bool high_temp;
u8 nvm_hw_section_num;
+ bool mac_addr_from_csr;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
bool no_power_up_nic_in_init;
@@ -362,6 +366,8 @@ struct iwl_cfg {
const u32 smem_len;
const struct iwl_tt_params *thermal_params;
bool apmg_not_supported;
+ bool mq_rx_supported;
+ bool vht_mu_mimo_supported;
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 163b21bc20cb..b978f6cae55c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -549,4 +550,62 @@ enum dtd_diode_reg {
DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
};
+/*****************************************************************************
+ * MSIX related registers *
+ *****************************************************************************/
+
+#define CSR_MSIX_BASE (0x2000)
+#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800)
+#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804)
+#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808)
+#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C)
+#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810)
+#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880)
+#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890)
+#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000)
+#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause))
+#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause))
+
+#define MSIX_FH_INT_CAUSES_Q(q) (q)
+
+/*
+ * Causes for the FH register interrupts
+ */
+enum msix_fh_int_causes {
+ MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
+ MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
+ MSIX_FH_INT_CAUSES_S2D = BIT(19),
+ MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
+};
+
+/*
+ * Causes for the HW register interrupts
+ */
+enum msix_hw_int_causes {
+ MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
+ MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
+ MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
+ MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
+ MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
+ MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
+ MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
+ MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
+ MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
+};
+
+#define MSIX_MIN_INTERRUPT_VECTORS 2
+#define MSIX_AUTO_CLEAR_CAUSE 0
+#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
+
+/*****************************************************************************
+ * HW address related registers *
+ *****************************************************************************/
+
+#define CSR_ADDR_BASE (0x380)
+#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE)
+#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4)
+#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8)
+#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC)
+
#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 22786d7dc00a..f02e2c89abbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -2,6 +2,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -73,12 +74,12 @@ TRACE_EVENT(iwlwifi_dev_rx,
TP_ARGS(dev, trans, pkt, len),
TP_STRUCT__entry(
DEV_ENTRY
- __field(u8, cmd)
+ __field(u16, cmd)
__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
),
TP_fast_assign(
DEV_ASSIGN;
- __entry->cmd = pkt->hdr.cmd;
+ __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
memcpy(__get_dynamic_array(rxbuf), pkt,
iwl_rx_trace_len(trans, pkt, len));
),
@@ -121,13 +122,12 @@ TRACE_EVENT(iwlwifi_dev_tx,
TRACE_EVENT(iwlwifi_dev_ucode_error,
TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
- u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver,
- u32 brd_ver),
+ u32 data1, u32 data2, u32 line, u32 blink2, u32 ilink1,
+ u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 rev_type,
+ u32 major, u32 minor, u32 hw_ver, u32 brd_ver),
TP_ARGS(dev, desc, tsf_low, data1, data2, line,
- blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
- gp3, major, minor, hw_ver, brd_ver),
+ blink2, ilink1, ilink2, bcon_time, gp1, gp2,
+ rev_type, major, minor, hw_ver, brd_ver),
TP_STRUCT__entry(
DEV_ENTRY
__field(u32, desc)
@@ -135,14 +135,13 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__field(u32, data1)
__field(u32, data2)
__field(u32, line)
- __field(u32, blink1)
__field(u32, blink2)
__field(u32, ilink1)
__field(u32, ilink2)
__field(u32, bcon_time)
__field(u32, gp1)
__field(u32, gp2)
- __field(u32, gp3)
+ __field(u32, rev_type)
__field(u32, major)
__field(u32, minor)
__field(u32, hw_ver)
@@ -155,29 +154,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
__entry->data1 = data1;
__entry->data2 = data2;
__entry->line = line;
- __entry->blink1 = blink1;
__entry->blink2 = blink2;
__entry->ilink1 = ilink1;
__entry->ilink2 = ilink2;
__entry->bcon_time = bcon_time;
__entry->gp1 = gp1;
__entry->gp2 = gp2;
- __entry->gp3 = gp3;
+ __entry->rev_type = rev_type;
__entry->major = major;
__entry->minor = minor;
__entry->hw_ver = hw_ver;
__entry->brd_ver = brd_ver;
),
TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
- "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X "
+ "blink2 0x%05X ilink 0x%05X 0x%05X "
+ "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
"minor 0x%08X hw 0x%08X brd 0x%08X",
__get_str(dev), __entry->desc, __entry->tsf_low,
- __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
+ __entry->data1, __entry->data2, __entry->line,
__entry->blink2, __entry->ilink1, __entry->ilink2,
__entry->bcon_time, __entry->gp1, __entry->gp2,
- __entry->gp3, __entry->major, __entry->minor,
+ __entry->rev_type, __entry->major, __entry->minor,
__entry->hw_ver, __entry->brd_ver)
);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ab4c2a0470b2..f899666acb41 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -374,15 +376,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0;
}
-static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
- const u32 len)
+static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+ const u32 len)
{
struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
- if (len < sizeof(*fw_capa))
- return -EINVAL;
-
capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
capa->max_ap_cache_per_scan =
@@ -395,7 +394,15 @@ static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
le32_to_cpu(fw_capa->max_significant_change_aps);
capa->max_bssid_history_entries =
le32_to_cpu(fw_capa->max_bssid_history_entries);
- return 0;
+ capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
+ capa->max_number_epno_networks =
+ le32_to_cpu(fw_capa->max_number_epno_networks);
+ capa->max_number_epno_networks_by_ssid =
+ le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
+ capa->max_number_of_white_listed_ssid =
+ le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
+ capa->max_number_of_black_listed_ssid =
+ le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
}
/*
@@ -1023,8 +1030,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
le32_to_cpup((__le32 *)tlv_data);
break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA:
- if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
- goto invalid_tlv_len;
+ /*
+ * Don't return an error in case of a shorter tlv_len
+ * to enable loading of FW that has an old format
+ * of GSCAN capabilities TLV.
+ */
+ if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
+ break;
+
+ iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
gscan_capa = true;
break;
default:
@@ -1033,7 +1047,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
}
}
- if (usniffer_req && !*usniffer_images) {
+ if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
+ usniffer_req && !*usniffer_images) {
IWL_ERR(drv,
"user selected to work with usniffer but usniffer image isn't available in ucode package\n");
return -EINVAL;
@@ -1045,12 +1060,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
- /*
- * If ucode advertises that it supports GSCAN but GSCAN
- * capabilities TLV is not present, warn and continue without GSCAN.
- */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
- WARN(!gscan_capa,
+ if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+ !gscan_capa,
"GSCAN is supported but capabilities TLV is unavailable\n"))
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
capa->_capa);
@@ -1718,3 +1729,7 @@ MODULE_PARM_DESC(fw_monitor,
module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay,
uint, S_IRUGO);
MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
+
+module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 5cc6be927eab..582008a66069 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -312,6 +314,81 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
#define FH_MEM_TB_MAX_LENGTH (0x00020000)
+/* 9000 rx series registers */
+
+#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_FRBDCB_WIDX 0xA08080
+#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Read index table */
+#define RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_URBDCB_WIDX 0xA08180
+#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define RFH_Q0_URBDCB_VAID 0xA081C0
+#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define RFH_RBDBUF_RBD0_LSB 0xA08300
+#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/* DMA configuration */
+#define RFH_RXF_DMA_CFG 0xA09820
+/* RB size */
+#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define RFH_RXF_DMA_RB_SIZE_POS 16
+#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24
+#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
+#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define RFH_DMA_EN_ENABLE_VAL BIT(31)
+
+#define RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define RFH_GEN_CFG 0xA09800
+#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
+#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
+#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
+
+#define DEFAULT_RXQ_NUM 0
+
+/* end of 9000 rx series registers */
+
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
@@ -434,6 +511,13 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
*/
#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+#define MQ_RX_TABLE_SIZE 512
+#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1)
+#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1)
+#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \
+ IWL_MAX_RX_HW_QUEUES * \
+ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+
#define RX_QUEUE_SIZE 256
#define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index a5aaf6853704..8425e1a587d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
* threshold.
* @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ * the firmware sends a tx reply.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_BA,
FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS,
+ FW_DBG_TRIGGER_TX_STATUS,
/* must be last */
FW_DBG_TRIGGER_MAX,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 84f8aeb926c8..15ec4e2907d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -297,10 +297,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
* which also implies support for the scheduler configuration command
* @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
* @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
* @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
* @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
* @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
* @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
* @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
* sources for the MCC. This TLV bit is a future replacement to
@@ -313,7 +315,15 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
* @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
* antenna the beacon should be transmitted
+ * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -330,10 +340,12 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11,
IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12,
IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18,
IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19,
IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22,
+ IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26,
IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28,
IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29,
IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30,
@@ -341,8 +353,14 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
+ IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
+ IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
+ IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
+ IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
+ IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
@@ -748,6 +766,19 @@ struct iwl_fw_dbg_trigger_tdls {
} __packed;
/**
+ * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response
+ * status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tx_status {
+ struct tx_status {
+ u8 status;
+ u8 reserved[3];
+ } __packed statuses[16];
+ __le32 reserved[2];
+} __packed;
+
+/**
* struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
* @id: conf id
* @usniffer: should the uSniffer image be used
@@ -778,6 +809,12 @@ struct iwl_fw_dbg_conf_tlv {
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
+ * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
+ * @max_number_epno_networks: max number of epno entries.
+ * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
+ * specified.
+ * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
+ * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
*/
struct iwl_fw_gscan_capabilities {
__le32 max_scan_cache_size;
@@ -788,6 +825,11 @@ struct iwl_fw_gscan_capabilities {
__le32 max_hotlist_aps;
__le32 max_significant_change_aps;
__le32 max_bssid_history_entries;
+ __le32 max_hotlist_ssids;
+ __le32 max_number_epno_networks;
+ __le32 max_number_epno_networks_by_ssid;
+ __le32 max_number_of_white_listed_ssid;
+ __le32 max_number_of_black_listed_ssid;
} __packed;
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 85d6d6d55e2f..2942571c613f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -205,6 +207,12 @@ struct iwl_fw_cscheme_list {
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
+ * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
+ * @max_number_epno_networks: max number of epno entries.
+ * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
+ * specified.
+ * @max_number_of_white_listed_ssid: max number of white listed SSIDs.
+ * @max_number_of_black_listed_ssid: max number of black listed SSIDs.
*/
struct iwl_gscan_capabilities {
u32 max_scan_cache_size;
@@ -215,6 +223,11 @@ struct iwl_gscan_capabilities {
u32 max_hotlist_aps;
u32 max_significant_change_aps;
u32 max_bssid_history_entries;
+ u32 max_hotlist_ssids;
+ u32 max_number_epno_networks;
+ u32 max_number_epno_networks_by_ssid;
+ u32 max_number_of_white_listed_ssid;
+ u32 max_number_of_black_listed_ssid;
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index fd42f63f5e84..d1a5dd1602f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -108,11 +108,14 @@ enum iwl_amsdu_size {
* @power_level: power level, default = 1
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
+ * @nvm_file: specifies a external NVM file
+ * @uapsd_disable: disable U-APSD, default = 1
* @d0i3_disable: disable d0i3, default = 1,
* @d0i3_entry_delay: time to wait after no refs are taken before
* entering D0i3 (in msecs)
* @lar_disable: disable LAR (regulatory), default = 0
* @fw_monitor: allow to use firmware monitor
+ * @disable_11ac: disable VHT capabilities, default = false.
*/
struct iwl_mod_params {
int sw_crypto;
@@ -133,6 +136,7 @@ struct iwl_mod_params {
unsigned int d0i3_entry_delay;
bool lar_disable;
bool fw_monitor;
+ bool disable_11ac;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 7b89bfc8c8ac..93a689583dff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -69,6 +70,9 @@
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
@@ -366,6 +370,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (cfg->vht_mu_mimo_supported)
+ vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
if (cfg->ht_params->ldpc)
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
@@ -449,7 +456,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
IEEE80211_BAND_5GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
- if (data->sku_cap_11ac_enable)
+ if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
@@ -519,27 +526,41 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
}
-static void iwl_set_hw_address(const struct iwl_cfg *cfg,
- struct iwl_nvm_data *data,
- const __le16 *nvm_sec)
+static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
{
- const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR);
-
- /* The byte order is little endian 16 bit, meaning 214365 */
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
+ const u8 *hw_addr;
+
+ hw_addr = (const u8 *)&mac_addr0;
+ dest[0] = hw_addr[3];
+ dest[1] = hw_addr[2];
+ dest[2] = hw_addr[1];
+ dest[3] = hw_addr[0];
+
+ hw_addr = (const u8 *)&mac_addr1;
+ dest[4] = hw_addr[1];
+ dest[5] = hw_addr[0];
}
-static void iwl_set_hw_address_family_8000(struct device *dev,
+static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+ struct iwl_nvm_data *data)
+{
+ __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
+ __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
+
+ /* If OEM did not fuse address - get it from OTP */
+ if (!mac_addr0 && !mac_addr1) {
+ mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
+ mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
+ }
+
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+}
+
+static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *mac_override,
- const __le16 *nvm_hw,
- u32 mac_addr0, u32 mac_addr1)
+ const __le16 *nvm_hw)
{
const u8 *hw_addr;
@@ -565,44 +586,68 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
return;
- IWL_ERR_DEV(dev,
- "mac address from nvm override section is not valid\n");
+ IWL_ERR(trans,
+ "mac address from nvm override section is not valid\n");
}
if (nvm_hw) {
- /* read the MAC address from HW resisters */
- hw_addr = (const u8 *)&mac_addr0;
- data->hw_addr[0] = hw_addr[3];
- data->hw_addr[1] = hw_addr[2];
- data->hw_addr[2] = hw_addr[1];
- data->hw_addr[3] = hw_addr[0];
+ /* read the mac address from WFMP registers */
+ __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans,
+ WFMP_MAC_ADDR_0));
+ __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans,
+ WFMP_MAC_ADDR_1));
- hw_addr = (const u8 *)&mac_addr1;
- data->hw_addr[4] = hw_addr[1];
- data->hw_addr[5] = hw_addr[0];
-
- if (!is_valid_ether_addr(data->hw_addr))
- IWL_ERR_DEV(dev,
- "mac address from hw section is not valid\n");
+ iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
return;
}
- IWL_ERR_DEV(dev, "mac address is not found\n");
+ IWL_ERR(trans, "mac address is not found\n");
+}
+
+static int iwl_set_hw_address(struct iwl_trans *trans,
+ const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data, const __le16 *nvm_hw,
+ const __le16 *mac_override)
+{
+ if (cfg->mac_addr_from_csr) {
+ iwl_set_hw_address_from_csr(trans, data);
+ } else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
+
+ /* The byte order is little endian 16 bit, meaning 214365 */
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+ } else {
+ iwl_set_hw_address_family_8000(trans, cfg, data,
+ mac_override, nvm_hw);
+ }
+
+ if (!is_valid_ether_addr(data->hw_addr)) {
+ IWL_ERR(trans, "no valid mac address was found\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
- u32 mac_addr0, u32 mac_addr1)
+ u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
{
+ struct device *dev = trans->dev;
struct iwl_nvm_data *data;
- u32 sku;
- u32 radio_cfg;
+ bool lar_enabled;
+ u32 sku, radio_cfg;
u16 lar_config;
+ const __le16 *ch_section;
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
data = kzalloc(sizeof(*data) +
@@ -641,21 +686,16 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
/* Checking for required sections */
if (!nvm_calib) {
- IWL_ERR_DEV(dev,
- "Can't parse empty Calib NVM sections\n");
+ IWL_ERR(trans,
+ "Can't parse empty Calib NVM sections\n");
kfree(data);
return NULL;
}
/* in family 8000 Xtal calibration values moved to OTP */
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
- }
-
- if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
- iwl_set_hw_address(cfg, data, nvm_hw);
-
- iwl_init_sbands(dev, cfg, data, nvm_sw,
- tx_chains, rx_chains, lar_fw_supported);
+ lar_enabled = true;
+ ch_section = nvm_sw;
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
@@ -664,16 +704,18 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
lar_config = le16_to_cpup(regulatory + lar_offset);
data->lar_enabled = !!(lar_config &
NVM_LAR_ENABLED_FAMILY_8000);
+ lar_enabled = data->lar_enabled;
+ ch_section = regulatory;
+ }
- /* MAC address in family 8000 */
- iwl_set_hw_address_family_8000(dev, cfg, data, mac_override,
- nvm_hw, mac_addr0, mac_addr1);
-
- iwl_init_sbands(dev, cfg, data, regulatory,
- tx_chains, rx_chains,
- lar_fw_supported && data->lar_enabled);
+ /* If no valid mac address was found - bail out */
+ if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
+ kfree(data);
+ return NULL;
}
+ iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
+ lar_fw_supported && lar_enabled);
data->calib_version = 255;
return data;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 92466ee72806..d704d52aa7ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -74,12 +74,11 @@
* later with iwl_free_nvm_data().
*/
struct iwl_nvm_data *
-iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
const __le16 *nvm_calib, const __le16 *regulatory,
const __le16 *mac_override, const __le16 *phy_sku,
- u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
- u32 mac_addr0, u32 mac_addr1);
+ u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
/**
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 5bde23a472b4..c46e596e12b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -404,4 +404,6 @@ enum {
LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
};
+#define UREG_CHICK (0xA05C00)
+#define UREG_CHICK_MSIX_ENABLE BIT(25)
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 82fb3a97a46d..91d74b3f666b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -506,7 +506,7 @@ struct iwl_trans_config {
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
-
+
u32 sdio_adma_addr;
};
@@ -618,9 +618,9 @@ struct iwl_trans_ops {
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans, bool low_power);
- void (*d3_suspend)(struct iwl_trans *trans, bool test);
+ void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test);
+ bool test, bool reset);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@@ -736,6 +736,11 @@ enum iwl_plat_pm_mode {
IWL_PLAT_PM_MODE_D0I3,
};
+/* Max time to wait for trans to become idle/non-idle on d0i3
+ * enter/exit (in msecs).
+ */
+#define IWL_TRANS_IDLE_TIMEOUT 2000
+
/**
* struct iwl_trans - transport common data
*
@@ -831,6 +836,7 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
+ bool suspending;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -920,22 +926,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
_iwl_trans_stop_device(trans, true);
}
-static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
+ bool reset)
{
might_sleep();
if (trans->ops->d3_suspend)
- trans->ops->d3_suspend(trans, test);
+ trans->ops->d3_suspend(trans, test, reset);
}
static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
- bool test)
+ bool test, bool reset)
{
might_sleep();
if (!trans->ops->d3_resume)
return 0;
- return trans->ops->d3_resume(trans, status, test);
+ return trans->ops->d3_resume(trans, status, test, reset);
}
static inline void iwl_trans_ref(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index b00c03fcd447..4b560e4417ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -6,7 +6,8 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +33,8 @@
* BSD LICENSE
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +75,7 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_UAPSD_STANDALONE 0
#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
@@ -107,6 +110,7 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
+#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index d3e21d95cece..c1a313149eed 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -249,16 +251,19 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
+ u64 pn64;
+
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+ pn64 = atomic64_read(&key->tx_pn);
+ tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+ tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
- ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
+ p1k);
iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
memcpy(data->tkip->mic_keys.tx,
@@ -811,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
{
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
- iwl_trans_stop_device(mvm->trans);
-
+ iwl_mvm_stop_device(mvm);
/*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
@@ -1023,14 +1027,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct ieee80211_sta *ap_sta)
{
int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
- ret = iwl_mvm_switch_to_d3(mvm);
- if (ret)
- return ret;
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
- ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
- if (ret)
- return ret;
+ ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+ if (ret)
+ return ret;
+ }
if (!iwlwifi_mod_params.sw_crypto) {
/*
@@ -1072,10 +1080,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
{
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
int ret;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
- ret = iwl_mvm_switch_to_d3(mvm);
- if (ret)
- return ret;
+ if (!unified_image) {
+ ret = iwl_mvm_switch_to_d3(mvm);
+ if (ret)
+ return ret;
+ }
/* rfkill release can be either for wowlan or netdetect */
if (wowlan->rfkill_release)
@@ -1151,6 +1163,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret;
int len __maybe_unused;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
if (!wowlan) {
/*
@@ -1236,7 +1250,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
- iwl_trans_d3_suspend(mvm->trans, test);
+ iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
out:
if (ret < 0) {
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1299,7 +1313,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
mutex_unlock(&mvm->d0i3_suspend_mutex);
- iwl_trans_d3_suspend(trans, false);
+ iwl_trans_d3_suspend(trans, false, false);
return 0;
}
@@ -1601,7 +1615,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_TKIP:
iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
- ieee80211_set_key_tx_seq(key, &seq);
+ atomic64_set(&key->tx_pn,
+ (u64)seq.tkip.iv16 |
+ ((u64)seq.tkip.iv32 << 16));
break;
}
@@ -2041,9 +2057,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
- int ret;
+ int ret = 1;
enum iwl_d3_status d3_status;
bool keep = false;
+ bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
mutex_lock(&mvm->mutex);
@@ -2052,7 +2073,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
if (IS_ERR_OR_NULL(vif))
goto err;
- ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
+ ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
if (ret)
goto err;
@@ -2095,17 +2116,28 @@ out_iterate:
iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
out:
- /* return 1 to reconfigure the device */
+ if (unified_image && !ret) {
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (!ret) /* D3 ended successfully - no need to reset device */
+ return 0;
+ }
+
+ /*
+ * Reconfigure the device in one of the following cases:
+ * 1. We are not using a unified image
+ * 2. We are using a unified image but had an error while exiting D3
+ */
set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
-
- /* We always return 1, which causes mac80211 to do a reconfig
- * with IEEE80211_RECONFIG_TYPE_RESTART. This type of
- * reconfig calls iwl_mvm_restart_complete(), where we unref
- * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
- * reference here.
+ /*
+ * When switching images we return 1, which causes mac80211
+ * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+ * This type of reconfig calls iwl_mvm_restart_complete(),
+ * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+ * to take the reference here.
*/
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
return 1;
}
@@ -2122,7 +2154,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
enum iwl_d3_status d3_status;
struct iwl_trans *trans = mvm->trans;
- iwl_trans_d3_resume(trans, &d3_status, false);
+ iwl_trans_d3_resume(trans, &d3_status, false, false);
/*
* make sure to clear D0I3_DEFER_WAKEUP before
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 9e0d46368cdd..14004456bf55 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
+ bool prev;
u8 value;
int ret;
@@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
return -EINVAL;
mutex_lock(&mvm->mutex);
- iwl_mvm_update_low_latency(mvm, vif, value);
+ prev = iwl_mvm_vif_low_latency(mvmvif);
+ mvmvif->low_latency_dbgfs = value;
+ iwl_mvm_update_low_latency(mvm, vif, prev);
mutex_unlock(&mvm->mutex);
return count;
@@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
{
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[2];
+ char buf[30] = {};
+ int len;
- buf[0] = mvmvif->low_latency ? '1' : '0';
- buf[1] = '\n';
- return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+ len = snprintf(buf, sizeof(buf) - 1,
+ "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+ mvmvif->low_latency_traffic,
+ mvmvif->low_latency_dbgfs,
+ mvmvif->low_latency_vcmd);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
@@ -1363,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
}
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int *ret = data;
+
+ if (mvmvif->dbgfs_quota_min)
+ *ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 95)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmvif->dbgfs_quota_min = 0;
+ ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_dbgfs_quota_check, &ret);
+ if (ret == 0) {
+ mvmvif->dbgfs_quota_min = value;
+ iwl_mvm_update_quotas(mvm, false, NULL);
+ }
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[10];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1386,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -1423,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 90500e2d107b..a43b3921c4c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -63,6 +64,7 @@
*
*****************************************************************************/
#include <linux/vmalloc.h>
+#include <linux/ieee80211.h>
#include "mvm.h"
#include "fw-dbg.h"
@@ -71,6 +73,44 @@
#include "debugfs.h"
#include "iwl-fw-error-dump.h"
+static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ char buf[16];
+ int pos, budget;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+ mutex_unlock(&mvm->mutex);
+
+ if (budget < 0)
+ return budget;
+
+ pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+
+ if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
+ return -EIO;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
@@ -261,17 +301,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
{
struct iwl_mvm *mvm = file->private_data;
char buf[16];
- int pos, temp;
+ int pos, ret;
+ s32 temp;
if (!mvm->ucode_loaded)
return -EIO;
mutex_lock(&mvm->mutex);
- temp = iwl_mvm_get_temp(mvm);
+ ret = iwl_mvm_get_temp(mvm, &temp);
mutex_unlock(&mvm->mutex);
- if (temp < 0)
- return temp;
+ if (ret)
+ return -EIO;
pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
@@ -942,6 +983,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+ IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ };
+ int ret, i, num_repeats, nbytes = count / 2;
+
+ ret = hex2bin(cmd.indirection_table, buf, nbytes);
+ if (ret)
+ return ret;
+
+ /*
+ * The input is the redirection table, partial or full.
+ * Repeat the pattern if needed.
+ * For example, input of 01020F will be repeated 42 times,
+ * indirecting RSS hash results to queues 1, 2, 15 (skipping
+ * queues 3 - 14).
+ */
+ num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+ for (i = 1; i < num_repeats; i++)
+ memcpy(&cmd.indirection_table[i * nbytes],
+ cmd.indirection_table, nbytes);
+ /* handle cut in the middle pattern for the last places */
+ memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+ ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+ memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -983,7 +1065,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
return -EOPNOTSUPP;
- ret = kstrtouint(buf, 0, &rec_mode);
+ ret = kstrtoint(buf, 0, &rec_mode);
if (ret)
return ret;
@@ -1037,6 +1119,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return count;
}
+static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ unsigned int max_amsdu_len;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &max_amsdu_len);
+
+ if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
+ return -EINVAL;
+ mvm->max_amsdu_len = max_amsdu_len;
+
+ return count;
+}
+
#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1433,6 +1531,8 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
/* Device wide debugfs entries */
+MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
+MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
@@ -1454,6 +1554,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
+ (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1479,6 +1582,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
@@ -1496,13 +1601,18 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
if (!debugfs_create_bool("enable_scan_iteration_notif",
S_IRUSR | S_IWUSR,
mvm->debugfs_dir,
&mvm->scan_iter_notif_enabled))
goto err;
+ if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR,
+ mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
+ goto err;
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
index 62b9a0a96700..eec52c57f718 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
@@ -251,6 +251,7 @@ enum iwl_wowlan_flags {
ENABLE_L3_FILTERING = BIT(1),
ENABLE_NBNS_FILTERING = BIT(2),
ENABLE_DHCP_FILTERING = BIT(3),
+ ENABLE_STORE_BEACON = BIT(4),
};
struct iwl_wowlan_config_cmd {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index fb6d341d6f3d..7a16e55df012 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,9 +264,8 @@ enum iwl_rx_mpdu_mac_flags2 {
};
enum iwl_rx_mpdu_amsdu_info {
- IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x3f,
- IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x40,
- /* 0x80 bit reserved for now */
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f,
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
enum iwl_rx_l3l4_flags {
@@ -287,16 +286,13 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4),
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
- /* TODO - verify this is the correct value */
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8,
IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8,
IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8,
IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8,
IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8,
- /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */
IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8,
- /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */
IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8,
IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11),
IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
@@ -350,11 +346,11 @@ struct iwl_rx_mpdu_desc {
/* DW8 */
__le32 filter_match;
/* DW9 */
- __le32 gp2_on_air_rise;
- /* DW10 */
__le32 rate_n_flags;
+ /* DW10 */
+ u8 energy_a, energy_b, channel, reserved;
/* DW11 */
- u8 energy_a, energy_b, energy_c, channel;
+ __le32 gp2_on_air_rise;
/* DW12 & DW13 */
__le64 tsf_on_air_rise;
} __packed;
@@ -365,4 +361,85 @@ struct iwl_frame_release {
__le16 nssn;
};
+enum iwl_rss_hash_func_en {
+ IWL_RSS_HASH_TYPE_IPV4_TCP,
+ IWL_RSS_HASH_TYPE_IPV4_UDP,
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+ IWL_RSS_HASH_TYPE_IPV6_TCP,
+ IWL_RSS_HASH_TYPE_IPV6_UDP,
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+};
+
+#define IWL_RSS_HASH_KEY_CNT 10
+#define IWL_RSS_INDIRECTION_TABLE_SIZE 128
+#define IWL_RSS_ENABLE 1
+
+/**
+ * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration
+ *
+ * @flags: 1 - enable, 0 - disable
+ * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en
+ * @secret_key: 320 bit input of random key configuration from driver
+ * @indirection_table: indirection table
+ */
+struct iwl_rss_config_cmd {
+ __le32 flags;
+ u8 hash_mask;
+ u8 reserved[3];
+ __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+ u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
+#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
+#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
+
+/**
+ * struct iwl_rxq_sync_cmd - RXQ notification trigger
+ *
+ * @flags: flags of the notification. bit 0:3 are the sender queue
+ * @rxq_mask: rx queues to send the notification on
+ * @count: number of bytes in payload, should be DWORD aligned
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_cmd {
+ __le32 flags;
+ __le32 rxq_mask;
+ __le32 count;
+ u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rxq_sync_notification - Notification triggered by RXQ
+ * sync command
+ *
+ * @count: number of bytes in payload
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_notification {
+ __le32 count;
+ u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+* Internal message identifier
+*
+* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+*/
+enum iwl_mvm_rxq_notif_type {
+ IWL_MVM_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
+* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+*
+* @type: value from &iwl_mvm_rxq_notif_type
+* @data: payload
+*/
+struct iwl_mvm_internal_rxq_notif {
+ u32 type;
+ u8 data[];
+} __packed;
+
#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 6fca4fb1d306..90d911394836 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -253,6 +255,68 @@ struct iwl_mvm_keyinfo {
__le64 hw_tkip_mic_tx_key;
} __packed;
+#define IWL_ADD_STA_STATUS_MASK 0xFF
+#define IWL_ADD_STA_BAID_MASK 0xFF00
+
+/**
+ * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd_v7 {
+ u8 add_modify;
+ u8 awake_acs;
+ __le16 tid_disable_tx;
+ __le32 mac_id_n_color;
+ u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ __le16 reserved2;
+ u8 sta_id;
+ u8 modify_mask;
+ __le16 reserved3;
+ __le32 station_flags;
+ __le32 station_flags_msk;
+ u8 add_immediate_ba_tid;
+ u8 remove_immediate_ba_tid;
+ __le16 add_immediate_ba_ssn;
+ __le16 sleep_tx_count;
+ __le16 sleep_state_flags;
+ __le16 assoc_id;
+ __le16 beamform_flags;
+ __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+
/**
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
* ( REPLY_ADD_STA = 0x18 )
@@ -282,6 +346,7 @@ struct iwl_mvm_keyinfo {
* mac-addr.
* @beamform_flags: beam forming controls
* @tfd_queue_msk: tfd queues used by this station
+ * @rx_ba_window: aggregation window size
*
* The device contains an internal table of per-station information, with info
* on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -310,7 +375,9 @@ struct iwl_mvm_add_sta_cmd {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+ __le16 rx_ba_window;
+ __le16 reserved;
+} __packed; /* ADD_STA_CMD_API_S_VER_8 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 82049bb139c2..4a0fc47c81f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -119,6 +119,8 @@ enum {
SCAN_ABORT_UMAC = 0xe,
SCAN_COMPLETE_UMAC = 0xf,
+ BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
+
/* station table */
ADD_STA_KEY = 0x17,
ADD_STA = 0x18,
@@ -213,6 +215,8 @@ enum {
MFUART_LOAD_NOTIFICATION = 0xb1,
+ RSS_CONFIG_CMD = 0xb3,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
FRAME_RELEASE = 0xc3,
@@ -277,14 +281,30 @@ enum {
*/
enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+ CTDP_CONFIG_CMD = 0x03,
+ TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+ CT_KILL_NOTIFICATION = 0xFE,
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
+enum iwl_data_path_subcmd_ids {
+ UPDATE_MU_GROUPS_CMD = 0x1,
+ TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
+ MU_GROUP_MGMT_NOTIF = 0xFE,
+ RX_QUEUES_NOTIFICATION = 0xFF,
+};
+
+enum iwl_prot_offload_subcmd_ids {
+ STORED_BEACON_NTF = 0xFF,
+};
+
/* command groups */
enum {
LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1,
PHY_OPS_GROUP = 0x4,
+ DATA_PATH_GROUP = 0x5,
+ PROT_OFFLOAD_GROUP = 0xb,
};
/**
@@ -1271,6 +1291,26 @@ struct iwl_fw_bcast_filter {
struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
} __packed; /* BCAST_FILTER_S_VER_1 */
+#define BA_WINDOW_STREAMS_MAX 16
+#define BA_WINDOW_STATUS_TID_MSK 0x000F
+#define BA_WINDOW_STATUS_STA_ID_POS 4
+#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
+#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
+
+/**
+ * struct iwl_ba_window_status_notif - reordering window's status notification
+ * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
+ * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
+ * @start_seq_num: the start sequence number of the bitmap
+ * @mpdu_rx_count: the number of received MPDUs since entering D0i3
+ */
+struct iwl_ba_window_status_notif {
+ __le64 bitmap[BA_WINDOW_STREAMS_MAX];
+ __le16 ra_tid[BA_WINDOW_STREAMS_MAX];
+ __le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
+ __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
+} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+
/**
* struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
* @default_discard: default action for this mac (discard (1) / pass (0)).
@@ -1668,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd {
} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
/**
- * iwl_dts_measurement_notif - notification received with the measurements
+ * struct iwl_dts_measurement_notif_v1 - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
*/
-struct iwl_dts_measurement_notif {
+struct iwl_dts_measurement_notif_v1 {
__le32 temp;
__le32 voltage;
-} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
+
+/**
+ * struct iwl_dts_measurement_notif_v2 - measurements notification
+ *
+ * @temp: the measured temperature
+ * @voltage: the measured voltage
+ * @threshold_idx: the trip index that was crossed
+ */
+struct iwl_dts_measurement_notif_v2 {
+ __le32 temp;
+ __le32 voltage;
+ __le32 threshold_idx;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
+
+/**
+ * struct ct_kill_notif - CT-kill entry notification
+ *
+ * @temperature: the current temperature in celsius
+ * @reserved: reserved
+ */
+struct ct_kill_notif {
+ __le16 temperature;
+ __le16 reserved;
+} __packed; /* GRP_PHY_CT_KILL_NTF */
+
+/**
+* enum ctdp_cmd_operation - CTDP command operations
+* @CTDP_CMD_OPERATION_START: update the current budget
+* @CTDP_CMD_OPERATION_STOP: stop ctdp
+* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget
+*/
+enum iwl_mvm_ctdp_cmd_operation {
+ CTDP_CMD_OPERATION_START = 0x1,
+ CTDP_CMD_OPERATION_STOP = 0x2,
+ CTDP_CMD_OPERATION_REPORT = 0x4,
+};/* CTDP_CMD_OPERATION_TYPE_E */
+
+/**
+ * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
+ *
+ * @operation: see &enum iwl_mvm_ctdp_cmd_operation
+ * @budget: the budget in milliwatt
+ * @window_size: defined in API but not used
+ */
+struct iwl_mvm_ctdp_cmd {
+ __le32 operation;
+ __le32 budget;
+ __le32 window_size;
+} __packed;
+
+#define IWL_MAX_DTS_TRIPS 8
+
+/**
+ * struct iwl_temp_report_ths_cmd - set temperature thresholds
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct temp_report_ths_cmd {
+ __le32 num_temps;
+ __le16 thresholds[IWL_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
/***********************************
* TDLS API
@@ -1851,4 +1953,53 @@ struct iwl_shared_mem_cfg {
__le32 page_buff_size;
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+/**
+ * VHT MU-MIMO group configuration
+ *
+ * @membership_status: a bitmap of MU groups
+ * @user_position:the position of station in a group. If the station is in the
+ * group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_cmd {
+ __le32 reserved;
+ __le32 membership_status[2];
+ __le32 user_position[4];
+} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
+
+/**
+ * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
+ *
+ * @membership_status: a bitmap of MU groups
+ * @user_position: the position of station in a group. If the station is in the
+ * group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_notif {
+ __le32 membership_status[2];
+ __le32 user_position[4];
+} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
+
+#define MAX_STORED_BEACON_SIZE 600
+
+/**
+ * Stored beacon notification
+ *
+ * @system_time: system time on air rise
+ * @tsf: TSF on air rise
+ * @beacon_timestamp: beacon on air rise
+ * @phy_flags: general phy flags: band, modulation, etc.
+ * @channel: channel this beacon was received on
+ * @rates: rate in ucode internal format
+ * @byte_count: frame's byte count
+ */
+struct iwl_stored_beacon_notif {
+ __le32 system_time;
+ __le64 tsf;
+ __le32 beacon_timestamp;
+ __le16 phy_flags;
+ __le16 channel;
+ __le32 rates;
+ __le32 byte_count;
+ u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 0813f8184e10..4856eac120f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
bool monitor_dump_only = false;
int i;
+ if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
+ !mvm->trans->dbg_dest_tlv)
+ return;
+
lockdep_assert_held(&mvm->mutex);
/* there's no point in fw dump if the bus is dead */
@@ -640,8 +644,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
/* Dump fw's virtual image */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
- u32 i;
-
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
struct iwl_fw_error_dump_paging *paging;
struct page *pages =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 0ccc697fef76..594cd0dc7df9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
sizeof(tx_ant_cmd), &tx_ant_cmd);
}
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+ .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+ IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+ cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
+ memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+
+ return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
void iwl_free_fw_paging(struct iwl_mvm *mvm)
{
int i;
@@ -520,7 +539,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
struct iwl_sf_region st_fwrd_space;
if (ucode_type == IWL_UCODE_REGULAR &&
- iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
+ iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
+ !(fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
else
fw = iwl_get_ucode_image(mvm, ucode_type);
@@ -896,6 +917,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ /* Init RSS configuration */
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ ret = iwl_send_rss_cfg_cmd(mvm);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+ ret);
+ goto error;
+ }
+ }
+
/* init the fw <-> mac80211 STA mapping */
for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
@@ -925,8 +956,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
+#ifdef CONFIG_THERMAL
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ /* in order to give the responsibility of ct-kill and
+ * TX backoff to FW we need to send empty temperature reporting
+ * cmd during init time
+ */
+ iwl_mvm_send_temp_report_ths_cmd(mvm);
+ } else {
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+ }
+
+ /* TODO: read the budget from BIOS / Platform NVM */
+ if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ mvm->cooling_dev.cur_state);
+#else
/* Initialize tx backoffs to the minimal possible */
iwl_mvm_tt_tx_backoff(mvm, 0);
+#endif
WARN_ON(iwl_mvm_config_ltr(mvm));
@@ -962,7 +1011,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- iwl_trans_stop_device(mvm->trans);
+ iwl_mvm_stop_device(mvm);
return ret;
}
@@ -1006,7 +1055,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
return 0;
error:
- iwl_trans_stop_device(mvm->trans);
+ iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index bf1e5eb5dbdb..e885db3464b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
* wake-ups.
*/
cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
- if (mvmvif->ap_assoc_sta_count) {
+ if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
} else {
@@ -1462,3 +1462,42 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
iwl_mvm_beacon_loss_iterator,
mb);
}
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb;
+ u32 size = le32_to_cpu(sb->byte_count);
+
+ if (size == 0)
+ return;
+
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ /* update rx_status according to the notification's metadata */
+ memset(&rx_status, 0, sizeof(rx_status));
+ rx_status.mactime = le64_to_cpu(sb->tsf);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+ rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+ rx_status.band =
+ (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+ IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+ rx_status.band);
+
+ /* copy the data */
+ memcpy(skb_put(skb, size), sb->data, size);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+ /* pass it as regular rx to mac80211 */
+ ieee80211_rx_napi(mvm->hw, skb, NULL);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index d70a1716f3e0..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -69,7 +70,6 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
-#include <linux/devcoredump.h>
#include <linux/time.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
@@ -85,7 +85,6 @@
#include "testmode.h"
#include "iwl-fw-error-dump.h"
#include "iwl-prph.h"
-#include "iwl-csr.h"
#include "iwl-nvm-parse.h"
#include "fw-dbg.h"
@@ -837,13 +836,17 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
+ bool amsdu = params->amsdu;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -884,10 +887,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
break;
case IEEE80211_AMPDU_TX_START:
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
@@ -904,7 +907,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
- ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
+ ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
+ buf_size, amsdu);
break;
default:
WARN_ON_ONCE(1);
@@ -966,7 +970,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
- iwl_trans_stop_device(mvm->trans);
+ iwl_mvm_stop_device(mvm);
mvm->scan_status = 0;
mvm->ps_disabled = false;
@@ -1135,7 +1139,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
*/
flush_work(&mvm->roc_done_wk);
- iwl_trans_stop_device(mvm->trans);
+ iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -1143,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ iwl_free_fw_paging(mvm);
+
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
@@ -1166,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
mvm->scan_uid_status[i] = 0;
}
}
-
- mvm->ucode_loaded = false;
}
static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
@@ -1759,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
}
#endif
+static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mu_group_mgmt_cmd cmd = {};
+
+ memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
+ WLAN_MEMBERSHIP_LEN);
+ memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
+ WLAN_USER_POSITION_LEN);
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ UPDATE_MU_GROUPS_CMD),
+ 0, sizeof(cmd), &cmd);
+}
+
+static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->mu_mimo_owner) {
+ struct iwl_mu_group_mgmt_notif *notif = _data;
+
+ /*
+ * MU-MIMO Group Id action frame is little endian. We treat
+ * the data received from firmware as if it came from the
+ * action frame, so no conversion is needed.
+ */
+ ieee80211_update_mu_groups(vif,
+ (u8 *)&notif->membership_status,
+ (u8 *)&notif->user_position);
+ }
+}
+
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_mu_mimo_iface_iterator, notif);
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1867,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
vif->addr);
}
+ /*
+ * The firmware tracks the MU-MIMO group on its own.
+ * However, on HW restart we should restore this data.
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
+ ret = iwl_mvm_update_mu_groups(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to update VHT MU_MIMO groups\n");
+ }
+
iwl_mvm_recalc_multicast(mvm);
iwl_mvm_configure_bcast_filter(mvm);
@@ -1893,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
}
- if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
+ if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
+ /*
+ * Send power command on every beacon change,
+ * because we may have not enabled beacon abort yet.
+ */
+ BSS_CHANGED_BEACON_INFO)) {
ret = iwl_mvm_power_update_mac(mvm);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
@@ -2080,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
-
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2273,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
return;
+ if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
+ vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+ return;
+ }
+
if (iwlwifi_mod_params.uapsd_disable) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
@@ -2487,10 +2556,8 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
- 200 + vif->bss_conf.beacon_int);
- u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
- 100 + vif->bss_conf.beacon_int);
+ u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+ u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
if (WARN_ON_ONCE(vif->bss_conf.assoc))
return;
@@ -2582,7 +2649,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_CCMP:
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
@@ -2621,8 +2688,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same.
+ * CMAC in AP/IBSS modes must be done in software.
*/
- ret = 0;
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
key->hw_key_idx = STA_KEY_IDX_INVALID;
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ff7c6df9f941..9abbc93e3c06 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +73,10 @@
#include <linux/leds.h>
#include <linux/in6.h>
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+#endif
+
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "iwl-notif-wait.h"
@@ -346,8 +352,9 @@ struct iwl_mvm_vif_bf_data {
* @pm_enabled - Indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @low_latency: indicates that this interface is in low-latency mode
- * (VMACLowLatencyMode)
+ * @low_latency_traffic: indicates low latency traffic was detected
+ * @low_latency_dbgfs: low latency mode set from debugfs
+ * @low_latency_vcmd: low latency mode set from vendor command
* @ps_disabled: indicates that this interface requires PS to be disabled
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
@@ -375,7 +382,7 @@ struct iwl_mvm_vif {
bool ap_ibss_active;
bool pm_enabled;
bool monitor_active;
- bool low_latency;
+ bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd;
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
@@ -432,6 +439,7 @@ struct iwl_mvm_vif {
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
struct iwl_mac_power_cmd mac_pwr_cmd;
+ int dbgfs_quota_min;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
@@ -485,6 +493,12 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_FRAGMENTED,
};
+enum iwl_mvm_sched_scan_pass_all_states {
+ SCHED_SCAN_PASS_ALL_DISABLED,
+ SCHED_SCAN_PASS_ALL_ENABLED,
+ SCHED_SCAN_PASS_ALL_FOUND,
+};
+
/**
* struct iwl_nvm_section - describes an NVM section in memory.
*
@@ -515,6 +529,30 @@ struct iwl_mvm_tt_mgmt {
bool throttle;
};
+#ifdef CONFIG_THERMAL
+/**
+ *struct iwl_mvm_thermal_device - thermal zone related data
+ * @temp_trips: temperature thresholds for report
+ * @fw_trips_index: keep indexes to original array - temp_trips
+ * @tzone: thermal zone device data
+*/
+struct iwl_mvm_thermal_device {
+ s16 temp_trips[IWL_MAX_DTS_TRIPS];
+ u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
+ struct thermal_zone_device *tzone;
+};
+
+/*
+ * struct iwl_mvm_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mvm_cooling_device {
+ u32 cur_state;
+ struct thermal_cooling_device *cdev;
+};
+#endif
+
#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
struct iwl_mvm_frame_stats {
@@ -645,6 +683,7 @@ struct iwl_mvm {
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
+ u32 secret_key[IWL_RSS_HASH_KEY_CNT];
/* configured by mac80211 */
u32 rts_threshold;
@@ -654,6 +693,7 @@ struct iwl_mvm {
void *scan_cmd;
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type;
+ enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
@@ -794,6 +834,11 @@ struct iwl_mvm {
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
+#ifdef CONFIG_THERMAL
+ struct iwl_mvm_thermal_device tz_device;
+ struct iwl_mvm_cooling_device cooling_dev;
+#endif
+
s32 temperature; /* Celsius */
/*
* Debug option to set the NIC temperature. This option makes the
@@ -816,6 +861,7 @@ struct iwl_mvm {
/* Indicate if device power save is allowed */
u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
+ unsigned int max_amsdu_len; /* used for debugfs only */
struct ieee80211_vif __rcu *csa_vif;
struct ieee80211_vif __rcu *csa_tx_blocked_vif;
@@ -856,6 +902,12 @@ struct iwl_mvm {
u32 ciphers[6];
struct iwl_mvm_tof_data tof_data;
+
+ /*
+ * Drop beacons from other APs in AP mode when there are no connected
+ * clients.
+ */
+ bool drop_bcn_ap_mode;
};
/* Extract MVM priv from op_mode and _hw */
@@ -934,8 +986,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
{
- return fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
+ /* Make sure DQA isn't allowed in driver until feature is complete */
+ return false && fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
}
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
@@ -1005,10 +1058,40 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
IWL_MVM_BT_COEX_MPLUT;
}
+static inline
+bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
+ IWL_MVM_P2P_UAPSD_STANDALONE;
+}
+
static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
{
- /* firmware flag isn't defined yet */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_THERMAL
+ /* these two TLV are redundant since the responsibility to CT-kill by
+ * FW happens only after we send at least one command of
+ * temperature THs report.
+ */
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
+#else /* CONFIG_THERMAL */
return false;
+#endif /* CONFIG_THERMAL */
+}
+
+static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -1143,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, int queue);
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+ const u8 *data, u32 count);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
@@ -1184,6 +1271,12 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1420,8 +1513,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
* binding, so this has no real impact. For now, just return
* the current desired low-latency state.
*/
-
- return mvmvif->low_latency;
+ return mvmvif->low_latency_dbgfs ||
+ mvmvif->low_latency_traffic ||
+ mvmvif->low_latency_vcmd;
}
/* hw scheduler queue config */
@@ -1459,32 +1553,29 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
}
-static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
- int mac80211_queue, int fifo,
- int sta_id, int tid, int frame_limit,
- u16 ssn, unsigned int wdg_timeout)
+static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
- struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = fifo,
- .sta_id = sta_id,
- .tid = tid,
- .frame_limit = frame_limit,
- .aggregate = true,
- };
-
- iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
+ mvm->ucode_loaded = false;
+ iwl_trans_stop_device(mvm->trans);
}
+/* Stop/start all mac queues in a given bitmap */
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
-void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
-int iwl_mvm_get_temp(struct iwl_mvm *mvm);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
/* Location Aware Regulatory */
struct iwl_mcc_update_resp *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 7a3da2da6fd0..25a98401a64f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -300,7 +300,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
struct iwl_nvm_section *sections = mvm->nvm_sections;
const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
bool lar_enabled;
- u32 mac_addr0, mac_addr1;
/* Checking for required sections */
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -336,10 +335,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
if (WARN_ON(!mvm->cfg))
return NULL;
- /* read the mac address from WFMP registers */
- mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
- mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
-
hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
@@ -352,10 +347,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
- return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
+ return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
regulatory, mac_override, phy_sku,
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
- lar_enabled, mac_addr0, mac_addr1);
+ lar_enabled);
}
#define MAX_NVM_FILE_LEN 16384
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index e80be9a59520..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -33,6 +33,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -204,70 +205,107 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
+/**
+ * enum iwl_rx_handler_context context for Rx handler
+ * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
+ * which can't acquire mvm->mutex.
+ * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
+ * (and only in this case!), it should be set as ASYNC. In that case,
+ * it will be called from a worker with mvm->mutex held.
+ * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
+ * mutex itself, it will be called from a worker without mvm->mutex held.
+ */
+enum iwl_rx_handler_context {
+ RX_HANDLER_SYNC,
+ RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_UNLOCKED,
+};
+
+/**
+ * struct iwl_rx_handlers handler for FW notification
+ * @cmd_id: command id
+ * @context: see &iwl_rx_handler_context
+ * @fn: the function is called when notification is received
+ */
struct iwl_rx_handlers {
u16 cmd_id;
- bool async;
+ enum iwl_rx_handler_context context;
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
-#define RX_HANDLER(_cmd_id, _fn, _async) \
- { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
-#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \
- { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
+#define RX_HANDLER(_cmd_id, _fn, _context) \
+ { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \
+ { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
/*
* Handlers for fw notifications
* Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
* This list should be in order of frequency for performance purposes.
*
- * The handler can be SYNC - this means that it will be called in the Rx path
- * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
- * only in this case!), it should be set as ASYNC. In that case, it will be
- * called from a worker with mvm->mutex held.
+ * The handler can be one from three contexts, see &iwl_rx_handler_context
*/
static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
- RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
- RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
-
- RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
- RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true),
- RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+ RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
+ RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+ RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
- iwl_mvm_rx_ant_coupling_notif, true),
+ iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED),
+
+ RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
+ iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
- RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
- RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
+ RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
+ RX_HANDLER_ASYNC_LOCKED),
- RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
RX_HANDLER(SCAN_ITERATION_COMPLETE,
- iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
+ iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_lmac_scan_complete_notif, true),
+ iwl_mvm_rx_lmac_scan_complete_notif,
+ RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
- false),
+ RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
- true),
+ RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
- iwl_mvm_rx_umac_scan_iter_complete_notif, false),
+ iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
- RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
+ RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
+ RX_HANDLER_SYNC),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
- false),
+ RX_HANDLER_SYNC),
- RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
+ RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
- iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
- RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+ iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
+ RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+ RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
- iwl_mvm_temp_notif, true),
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+ iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
- true),
- RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
- RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
-
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
+ RX_HANDLER_SYNC),
+ RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
+ RX_HANDLER_ASYNC_LOCKED),
+ RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+ iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
+ RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+ iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -289,6 +327,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(SCAN_COMPLETE_UMAC),
HCMD_NAME(TOF_CMD),
HCMD_NAME(TOF_NOTIFICATION),
+ HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
HCMD_NAME(ADD_STA_KEY),
HCMD_NAME(ADD_STA),
HCMD_NAME(REMOVE_STA),
@@ -344,6 +383,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MAC_PM_POWER_TABLE),
HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+ HCMD_NAME(RSS_CONFIG_CMD),
HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
HCMD_NAME(REPLY_RX_PHY_CMD),
HCMD_NAME(REPLY_RX_MPDU_CMD),
@@ -383,16 +423,37 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+ HCMD_NAME(CTDP_CONFIG_CMD),
+ HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+ HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+ HCMD_NAME(UPDATE_MU_GROUPS_CMD),
+ HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+ HCMD_NAME(RX_QUEUES_NOTIFICATION),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+ HCMD_NAME(STORED_BEACON_NTF),
+};
+
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+ [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+ [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
};
-
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
@@ -463,8 +524,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq;
+ trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
} else {
op_mode->ops = &iwl_mvm_ops;
+ trans->rx_mpdu_cmd_hdr_size =
+ sizeof(struct iwl_rx_mpdu_res_start);
if (WARN_ON(trans->num_rx_queues > 1))
goto out_free;
@@ -481,6 +545,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
mvm->sf_state = SF_UNINIT;
mvm->cur_ucode = IWL_UCODE_INIT;
+ mvm->drop_bcn_ap_mode = true;
mutex_init(&mvm->mutex);
mutex_init(&mvm->d0i3_suspend_mutex);
@@ -555,7 +620,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
- trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
@@ -575,9 +639,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- min_backoff = calc_min_backoff(trans, cfg);
- iwl_mvm_tt_initialize(mvm, min_backoff);
-
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
else
@@ -607,7 +668,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
if (!err || !iwlmvm_mod_params.init_dbg)
- iwl_trans_stop_device(trans);
+ iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
mutex_unlock(&mvm->mutex);
/* returns 0 if successful, 1 if success but in rfkill */
@@ -630,22 +691,31 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_free;
+ min_backoff = calc_min_backoff(trans, cfg);
+ iwl_mvm_thermal_initialize(mvm, min_backoff);
+
err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
if (err)
goto out_unregister;
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
- /* rpm starts with a taken reference, we can release it now */
- iwl_trans_unref(mvm->trans);
+ /* The transport always starts with a taken reference, we can
+ * release it now if d0i3 is supported */
+ if (iwl_mvm_is_d0i3_supported(mvm))
+ iwl_trans_unref(mvm->trans);
iwl_mvm_tof_init(mvm);
+ /* init RSS hash key */
+ get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+
return op_mode;
out_unregister:
ieee80211_unregister_hw(mvm->hw);
iwl_mvm_leds_exit(mvm);
+ iwl_mvm_thermal_exit(mvm);
out_free:
flush_delayed_work(&mvm->fw_dump_wk);
iwl_phy_db_free(mvm->phy_db);
@@ -661,9 +731,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
int i;
+ /* If d0i3 is supported, we have released the reference that
+ * the transport started with, so we should take it back now
+ * that we are leaving.
+ */
+ if (iwl_mvm_is_d0i3_supported(mvm))
+ iwl_trans_ref(mvm->trans);
+
iwl_mvm_leds_exit(mvm);
- iwl_mvm_tt_exit(mvm);
+ iwl_mvm_thermal_exit(mvm);
ieee80211_unregister_hw(mvm->hw);
@@ -684,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
- iwl_free_fw_paging(mvm);
-
iwl_mvm_tof_clean(mvm);
ieee80211_free_hw(mvm->hw);
@@ -694,6 +769,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
struct iwl_async_handler_entry {
struct list_head list;
struct iwl_rx_cmd_buffer rxb;
+ enum iwl_rx_handler_context context;
void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
};
@@ -720,7 +796,6 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
INIT_LIST_HEAD(&local_list);
/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
- mutex_lock(&mvm->mutex);
/*
* Sync with Rx path with a lock. Remove all the entries from this list,
@@ -731,12 +806,15 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
+ if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
+ if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ mutex_unlock(&mvm->mutex);
kfree(entry);
}
- mutex_unlock(&mvm->mutex);
}
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
@@ -793,7 +871,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
continue;
- if (!rx_h->async) {
+ if (rx_h->context == RX_HANDLER_SYNC) {
rx_h->fn(mvm, rxb);
return;
}
@@ -807,6 +885,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
entry->rxb._offset = rxb->_offset;
entry->rxb._rx_page_order = rxb->_rx_page_order;
entry->fn = rx_h->fn;
+ entry->context = rx_h->context;
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
@@ -843,28 +922,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
+ else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
+ pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
+ iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
-static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
{
- struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq;
int q;
- spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[queue].hw_queue_to_mac80211;
- spin_unlock_bh(&mvm->queue_info_lock);
-
if (WARN_ON_ONCE(!mq))
return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
IWL_DEBUG_TX_QUEUES(mvm,
- "queue %d (mac80211 %d) already stopped\n",
- queue, q);
+ "mac80211 %d already stopped\n", q);
continue;
}
@@ -884,24 +959,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
iwl_trans_block_txq_ptrs(mvm->trans, false);
}
-static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
unsigned long mq;
- int q;
spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+ mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
spin_unlock_bh(&mvm->queue_info_lock);
+ iwl_mvm_stop_mac_queues(mvm, mq);
+}
+
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+ int q;
+
if (WARN_ON_ONCE(!mq))
return;
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
IWL_DEBUG_TX_QUEUES(mvm,
- "queue %d (mac80211 %d) still stopped\n",
- queue, q);
+ "mac80211 %d still stopped\n", q);
continue;
}
@@ -909,6 +989,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
}
}
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ unsigned long mq;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ iwl_mvm_start_mac_queues(mvm, mq);
+}
+
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
{
if (state)
@@ -1198,7 +1290,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
- ENABLE_DHCP_FILTERING;
+ ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
@@ -1219,8 +1311,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
- IWL_WOWLAN_WAKEUP_LINK_CHANGE |
- IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE),
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
@@ -1270,6 +1361,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
/* configure wowlan configuration only if needed */
if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+ /* wake on beacons only if beacon storing isn't supported */
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING))
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+
iwl_mvm_wowlan_config_key_params(mvm,
d0i3_iter_data.connected_vif,
true, flags);
@@ -1510,6 +1607,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, rxb, queue);
+ else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
+ pkt->hdr.group_id == DATA_PATH_GROUP))
+ iwl_mvm_rx_queue_notif(mvm, rxb, queue);
else
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 9de159f1ef2d..f313910cd026 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
+static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ bool *is_p2p_standalone = _data;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ *is_p2p_standalone = false;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->bss_conf.assoc)
+ *is_p2p_standalone = false;
+ break;
+
+ default:
+ break;
+ }
+}
+
static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
ETH_ALEN))
return false;
- if (vif->p2p &&
- !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
- return false;
/*
* Avoid using uAPSD if P2P client is associated to GO that uses
* opportunistic power save. This is due to current FW limitation.
@@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
if (iwl_mvm_phy_ctx_count(mvm) >= 2)
return false;
+ if (vif->p2p) {
+ /* Allow U-APSD only if p2p is stand alone */
+ bool is_p2p_standalone = true;
+
+ if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm))
+ return false;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_p2p_standalone_iterator,
+ &is_p2p_standalone);
+
+ if (!is_p2p_standalone)
+ return false;
+ }
+
return true;
}
@@ -544,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_power_vifs {
struct iwl_mvm *mvm;
- struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
struct ieee80211_vif *ap_vif;
@@ -617,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
if (mvmvif->phy_ctxt)
if (mvmvif->phy_ctxt->id < MAX_PHYS)
power_iterator->bss_active = true;
-
- if (mvmvif->bf_data.bf_enabled &&
- !WARN_ON(power_iterator->bf_vif))
- power_iterator->bf_vif = vif;
-
break;
default:
@@ -850,29 +877,9 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
}
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool enable)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- };
-
- if (!mvmvif->bf_data.bf_enabled)
- return 0;
-
- if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
- cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
- mvmvif->bf_data.ba_enabled = enable;
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
-}
-
-int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags, bool d0i3)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -883,12 +890,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
- if (!ret)
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = false;
return ret;
}
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+}
+
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
{
bool disable_ps;
@@ -918,21 +933,26 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
}
static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
- struct iwl_power_vifs *vifs)
+ struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif;
- bool ba_enable;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
- if (!vifs->bf_vif)
+ if (!mvmvif->bf_data.bf_enabled)
return 0;
- mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif);
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
- ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
- !vifs->bf_vif->bss_conf.ps ||
- iwl_mvm_vif_low_latency(mvmvif));
+ mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->bss_conf.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
- return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
@@ -953,7 +973,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
if (ret)
return ret;
- return iwl_mvm_power_set_ba(mvm, &vifs);
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
}
int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
@@ -988,7 +1011,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
return ret;
}
- return iwl_mvm_power_set_ba(mvm, &vifs);
+ if (vifs.bss_vif)
+ return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+ return 0;
}
int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
@@ -1025,8 +1051,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
IWL_BF_CMD_CONFIG_D0I3,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
- flags, true);
+ /*
+ * When beacon storing is supported - disable beacon filtering
+ * altogether - the latest beacon will be sent when exiting d0i3
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_BEACON_STORING))
+ ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags,
+ true);
+ else
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
} else {
if (mvmvif->bf_data.bf_enabled)
ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
index 0b762b4f8fad..2141db5bff82 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int dbgfs_min[MAX_BINDINGS];
+#endif
int n_low_latency_bindings;
struct ieee80211_vif *disabled_vif;
};
@@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
data->n_interfaces[id]++;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_quota_min)
+ data->dbgfs_min[id] = max(data->dbgfs_min[id],
+ mvmvif->dbgfs_quota_min);
+#endif
+
if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
data->n_low_latency_bindings++;
data->low_latency[id] = true;
@@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ else if (data.dbgfs_min[i])
+ cmd.quotas[idx].quota =
+ cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
data.low_latency[i])
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 94caa88df442..61d0a8cd13f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate)
if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
rate_str = legacy_rates[rate->index];
else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
+ (rate->index >= IWL_RATE_MCS_0_INDEX) &&
(rate->index <= IWL_RATE_MCS_9_INDEX))
rate_str = ht_vht_rates[rate->index];
else
@@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
}
}
+static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ struct iwl_scale_tbl_info *tbl,
+ enum rs_action scale_action)
+{
+ struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
+
+ if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
+ tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
+ scale_action == RS_ACTION_DOWNSCALE)
+ sta_priv->tlc_amsdu = false;
+ else
+ sta_priv->tlc_amsdu = true;
+}
+
/*
* setup rate table in uCode
*/
@@ -2062,7 +2077,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
}
/* try decreasing first if applicable */
- if (weak != TPC_INVALID) {
+ if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+ weak != TPC_INVALID) {
if (weak_tpt == IWL_INVALID_VALUE &&
(strong_tpt == IWL_INVALID_VALUE ||
current_tpt >= strong_tpt)) {
@@ -2414,6 +2430,7 @@ lq_update:
tbl->rate.index = index;
if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
+ rs_set_amsdu_len(mvm, sta, tbl, scale_action);
rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
}
@@ -3097,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
sband = hw->wiphy->bands[band];
lq_sta->lq.sta_id = sta_priv->sta_id;
+ sta_priv->tlc_amsdu = false;
for (j = 0; j < LQ_SIZE; j++)
rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
@@ -3656,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
ssize_t ret;
struct iwl_lq_sta *lq_sta = file->private_data;
+ struct iwl_mvm_sta *mvmsta =
+ container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
struct iwl_mvm *mvm;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct rs_rate *rate = &tbl->rate;
u32 ss_params;
+
mvm = lq_sta->pers.drv;
buff = kmalloc(2048, GFP_KERNEL);
if (!buff)
@@ -3685,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_ht20(rate)) ? "20MHz" :
(is_ht40(rate)) ? "40MHz" :
(is_ht80(rate)) ? "80Mhz" : "BAD BW");
- desc += sprintf(buff + desc, " %s %s %s\n",
+ desc += sprintf(buff + desc, " %s %s %s %s\n",
(rate->sgi) ? "SGI" : "NGI",
(rate->ldpc) ? "LDPC" : "BCC",
- (lq_sta->is_agg) ? "AGG on" : "");
+ (lq_sta->is_agg) ? "AGG on" : "",
+ (mvmsta->tlc_amsdu) ? "AMSDU on" : "");
}
desc += sprintf(buff+desc, "last tx rate=0x%X\n",
lq_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 145ec68ce6f9..485cfc1a4daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
rx_status->band);
- /*
- * TSF as indicated by the fw is at INA time, but mac80211 expects the
- * TSF at the beginning of the MPDU.
- */
- /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
+
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
@@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_update_frame_stats(mvm, rate_n_flags,
rx_status->flag & RX_FLAG_AMPDU_DETAILS);
#endif
+
+ if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
crypt_len, rxb);
}
@@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
}
+
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
+ int i;
+ u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ONCE(pkt_len != sizeof(*notif),
+ "Received window status notification of wrong size (%u)\n",
+ pkt_len))
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
+ struct ieee80211_sta *sta;
+ u8 sta_id, tid;
+ u64 bitmap;
+ u32 ssn;
+ u16 ratid;
+ u16 received_mpdu;
+
+ ratid = le16_to_cpu(notif->ra_tid[i]);
+ /* check that this TID is valid */
+ if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
+ continue;
+
+ received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
+ if (received_mpdu == 0)
+ continue;
+
+ tid = ratid & BA_WINDOW_STATUS_TID_MSK;
+ /* get the station */
+ sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
+ >> BA_WINDOW_STATUS_STA_ID_POS;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+ bitmap = le64_to_cpu(notif->bitmap[i]);
+ ssn = le32_to_cpu(notif->start_seq_num[i]);
+
+ /* update mac80211 with the bitmap for the reordering buffer */
+ ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
+ received_mpdu);
+ }
+ rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 0c073e02fd4c..9a54f2d2a66b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
u16 len, u8 crypt_len,
struct iwl_rx_cmd_buffer *rxb)
{
- unsigned int hdrlen, fraglen;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+ unsigned int headlen, fraglen, pad_len = 0;
+ unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
+ pad_len = 2;
+ len -= pad_len;
/* If frame is small enough to fit in skb->head, pull it completely.
* If not, only pull ieee80211_hdr (including crypto if present, and
@@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
* If the latter changes (there are efforts in the standards group
* to do so) we should revisit this and ieee80211_data_to_8023().
*/
- hdrlen = (len <= skb_tailroom(skb)) ? len :
- sizeof(*hdr) + crypt_len + 8;
+ headlen = (len <= skb_tailroom(skb)) ? len :
+ hdrlen + crypt_len + 8;
+ /* The firmware may align the packet to DWORD.
+ * The padding is inserted after the IV.
+ * After copying the header + IV skip the padding if
+ * present before copying packet data.
+ */
+ hdrlen += crypt_len;
memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
- fraglen = len - hdrlen;
+ memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
+ headlen - hdrlen);
+
+ fraglen = len - headlen;
if (fraglen) {
- int offset = (void *)hdr + hdrlen -
+ int offset = (void *)hdr + headlen + pad_len -
rxb_addr(rxb) + rxb_offset(rxb);
skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
@@ -201,25 +217,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc,
struct ieee80211_rx_status *rx_status)
{
- int energy_a, energy_b, energy_c, max_energy;
+ int energy_a, energy_b, max_energy;
energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN;
energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN;
- energy_c = desc->energy_c;
- energy_c = energy_c ? -energy_c : S8_MIN;
max_energy = max(energy_a, energy_b);
- max_energy = max(max_energy, energy_c);
- IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
- energy_a, energy_b, energy_c, max_energy);
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+ energy_a, energy_b, max_energy);
rx_status->signal = max_energy;
rx_status->chains = 0; /* TODO: phy info */
rx_status->chain_signal[0] = energy_a;
rx_status->chain_signal[1] = energy_b;
- rx_status->chain_signal[2] = energy_c;
+ rx_status->chain_signal[2] = S8_MIN;
}
static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
@@ -288,13 +301,121 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+/*
+ * returns true if a packet outside BA session is a duplicate and
+ * should be dropped
+ */
+static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_rxq_dup_data *dup_data;
+ u8 baid, tid, sub_frame_idx;
+
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ baid = (le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ dup_data = &mvm_sta->dup_data[queue];
+
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)) {
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ return false;
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ /* frame has qos control */
+ tid = *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_TID_MASK;
+ else
+ tid = IWL_MAX_TID_COUNT;
+
+ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+ sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+ dup_data->last_seq[tid] == hdr->seq_ctrl &&
+ dup_data->last_sub_frame[tid] >= sub_frame_idx))
+ return true;
+
+ dup_data->last_seq[tid] = hdr->seq_ctrl;
+ dup_data->last_sub_frame[tid] = sub_frame_idx;
+
+ rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+ return false;
+}
+
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+ const u8 *data, u32 count)
+{
+ struct iwl_rxq_sync_cmd *cmd;
+ u32 data_size = sizeof(*cmd) + count;
+ int ret;
+
+ /* should be DWORD aligned */
+ if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
+ return -EINVAL;
+
+ cmd = kzalloc(data_size, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->rxq_mask = cpu_to_le32(rxq_mask);
+ cmd->count = cpu_to_le32(count);
+ cmd->flags = 0;
+ memcpy(cmd->payload, data, count);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(DATA_PATH_GROUP,
+ TRIGGER_RX_QUEUES_NOTIF_CMD),
+ 0, data_size, cmd);
+
+ kfree(cmd);
+ return ret;
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+ int queue)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rxq_sync_notification *notif;
+ struct iwl_mvm_internal_rxq_notif *internal_notif;
+
+ notif = (void *)pkt->data;
+ internal_notif = (void *)notif->payload;
+
+ switch (internal_notif->type) {
+ case IWL_MVM_RXQ_NOTIF_DEL_BA:
+ /* TODO */
+ break;
+ default:
+ WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
+ }
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
- struct ieee80211_hdr *hdr = (void *)(desc + 1);
+ struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
u32 len = le16_to_cpu(desc->mpdu_len);
u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
struct ieee80211_sta *sta = NULL;
@@ -335,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
+ /* TSF as indicated by the firmware is at INA time */
+ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
rcu_read_lock();
@@ -390,6 +513,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (ieee80211_is_data(hdr->frame_control))
iwl_mvm_rx_csum(sta, skb, desc);
+
+ if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
+ kfree_skb(skb);
+ rcu_read_unlock();
+ return;
+ }
+
+ /*
+ * Our hardware de-aggregates AMSDUs but copies the mac header
+ * as it to the de-aggregated MPDUs. We need to turn off the
+ * AMSDU bit in the QoS control ourselves.
+ */
+ if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+ !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index ea1e177c2ea1..09eb72c4ae43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
}
void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
@@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
aborted ? "aborted" : "completed",
@@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
IWL_DEBUG_SCAN(mvm,
"Sending scheduled scan with filtering, n_match_sets %d\n",
req->n_match_sets);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
return false;
}
IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
return true;
}
@@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
#endif
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED)
@@ -930,8 +945,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
return -ENOBUFS;
- if (type == mvm->scan_type)
+ if (type == mvm->scan_type) {
+ IWL_DEBUG_SCAN(mvm,
+ "Ignoring UMAC scan config of the same type\n");
return 0;
+ }
cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
@@ -1071,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
#endif
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED)
@@ -1109,7 +1130,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
vif));
- if (type == IWL_MVM_SCAN_SCHED)
+ if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
if (iwl_mvm_scan_use_ebs(mvm, vif))
@@ -1298,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
return -EBUSY;
}
- /* we don't support "match all" in the firmware */
- if (!req->n_match_sets)
- return -EOPNOTSUPP;
-
ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
@@ -1355,7 +1372,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
- ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
+ ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
} else {
hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
ret = iwl_mvm_scan_lmac(mvm, vif, &params);
@@ -1397,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
@@ -1431,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
iwl_mvm_dump_channel_list(notif->results,
notif->scanned_channels, buf,
sizeof(buf)));
+
+ if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+ IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+ ieee80211_sched_scan_results(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+ }
}
static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
@@ -1525,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
if (uid >= 0 && !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
@@ -1546,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
* restart_hw, so do not report if FW is about to be
* restarted.
*/
- if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
+ if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
+ !mvm->restart_fw) {
ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ }
}
}
@@ -1583,6 +1611,7 @@ out:
ieee80211_scan_completed(mvm->hw, true);
} else if (notify) {
ieee80211_sched_scan_stopped(mvm->hw);
+ mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index b556e33658d7..ef99942d7169 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,18 @@
#include "sta.h"
#include "rs.h"
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_has_new_rx_api(mvm) ?
+ sizeof(struct iwl_mvm_add_sta_cmd) :
+ sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
enum nl80211_iftype iftype)
{
@@ -187,12 +201,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
&add_sta_cmd, &status);
if (ret)
return ret;
- switch (status) {
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
break;
@@ -265,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_rxq_dup_data *dup_data;
int i, ret, sta_id;
lockdep_assert_held(&mvm->mutex);
@@ -312,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
}
mvm_sta->agg_tids = 0;
+ if (iwl_mvm_has_new_rx_api(mvm) &&
+ !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ dup_data = kcalloc(mvm->trans->num_rx_queues,
+ sizeof(*dup_data),
+ GFP_KERNEL);
+ if (!dup_data)
+ return -ENOMEM;
+ mvm_sta->dup_data = dup_data;
+ }
+
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
if (ret)
goto err;
@@ -357,12 +383,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
- switch (status) {
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
mvmsta->sta_id);
@@ -492,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_has_new_rx_api(mvm))
+ kfree(mvm_sta->dup_data);
+
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
@@ -623,12 +653,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
if (addr)
memcpy(cmd.addr, addr, ETH_ALEN);
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
- switch (status) {
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_INFO(mvm, "Internal station added.\n");
return 0;
@@ -819,7 +850,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#define IWL_MAX_RX_BA_SESSIONS 16
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start)
+ int tid, u16 ssn, bool start, u8 buf_size)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
@@ -839,6 +870,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
@@ -846,12 +878,13 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
STA_MODIFY_REMOVE_BA_TID;
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
- switch (status) {
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
start ? "start" : "stopp");
@@ -904,12 +937,13 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
status = ADD_STA_SUCCESS;
- ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+ iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
return ret;
- switch (status) {
+ switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
break;
default:
@@ -1011,15 +1045,23 @@ release_locks:
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size)
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
- int queue, fifo, ret;
+ int queue, ret;
u16 ssn;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = buf_size,
+ .aggregate = true,
+ };
+
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
!= IWL_MAX_TID_COUNT);
@@ -1031,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->state = IWL_AGG_ON;
mvmsta->agg_tids |= BIT(tid);
tid_data->ssn = 0xffff;
+ tid_data->amsdu_in_ampdu_allowed = amsdu;
spin_unlock_bh(&mvmsta->lock);
- fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
- iwl_mvm_enable_agg_txq(mvm, queue,
- vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
- mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
+ ssn, &cfg, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1640,7 +1682,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1731,7 +1774,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
- sizeof(cmd), &cmd);
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
@@ -1766,7 +1809,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
};
int ret;
- ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 39fdf5224e81..1a8f69a41405 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -258,8 +258,7 @@ enum iwl_mvm_agg_state {
* This is basically (last acked packet++).
* @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
- * @reduced_tpc: Reduced tx power. Holds the data between the
- * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* @state: state of the BA agreement establishment / tear down.
* @txq_id: Tx queue used by the BA session
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -273,7 +272,7 @@ struct iwl_mvm_tid_data {
u16 next_reclaimed;
/* The rest is Tx AGG related */
u32 rate_n_flags;
- u8 reduced_tpc;
+ bool amsdu_in_ampdu_allowed;
enum iwl_mvm_agg_state state;
u16 txq_id;
u16 ssn;
@@ -294,6 +293,16 @@ struct iwl_mvm_key_pn {
};
/**
+ * struct iwl_mvm_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwl_mvm_rxq_dup_data {
+ __le16 last_seq[IWL_MAX_TID_COUNT + 1];
+ u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
* @tfd_queue_msk: the tfd queues used by the station
@@ -311,6 +320,7 @@ struct iwl_mvm_key_pn {
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
+ * @tlc_amsdu: true if A-MSDU is allowed
* @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
* @sleep_tx_count: the number of frames that we told the firmware to let out
* even when that station is asleep. This is useful in case the queue
@@ -318,6 +328,7 @@ struct iwl_mvm_key_pn {
* we are sending frames from an AMPDU queue and there was a hole in
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
+ * @dup_data: per queue duplicate packet detection data
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -337,14 +348,15 @@ struct iwl_mvm_sta {
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
-
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
+ struct iwl_mvm_rxq_dup_data *dup_data;
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
bool disable_tx;
+ bool tlc_amsdu;
u8 agg_tids;
u8 sleep_tx_count;
};
@@ -401,11 +413,12 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start);
+ int tid, u16 ssn, bool start, u8 buf_size);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u8 buf_size);
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size,
+ bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 924dd6a41626..2c12789e7550 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -371,20 +371,13 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
iwl_mvm_te_check_trigger(mvm, notif, te_data);
- if (!le32_to_cpu(notif->status)) {
- IWL_DEBUG_TE(mvm,
- "ERROR: Aux ROC Time Event %s notification failure\n",
- (le32_to_cpu(notif->action) &
- TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end");
- return -EINVAL;
- }
-
IWL_DEBUG_TE(mvm,
- "Aux ROC time event notification - UID = 0x%x action %d\n",
+ "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n",
le32_to_cpu(notif->unique_id),
- le32_to_cpu(notif->action));
+ le32_to_cpu(notif->action), le32_to_cpu(notif->status));
- if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+ if (!le32_to_cpu(notif->status) ||
+ le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
/* End TE, notify mac80211 */
ieee80211_remain_on_channel_expired(mvm->hw);
iwl_mvm_roc_finished(mvm); /* flush aux queue */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 99d9a35ad5b1..3d2e8b6159bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -115,7 +115,7 @@
* needed by the driver.
*/
-#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600
#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index fb76004eede4..f1f28255a3a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -64,6 +65,8 @@
*
*****************************************************************************/
+#include <linux/sort.h>
+
#include "mvm.h"
#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
@@ -79,8 +82,10 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Enter CT Kill\n");
iwl_mvm_set_hw_ctkill_state(mvm, true);
- tt->throttle = false;
- tt->dynamic_smps = false;
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ tt->throttle = false;
+ tt->dynamic_smps = false;
+ }
/* Don't schedule an exit work if we're in test mode, since
* the temperature will not change unless we manually set it
@@ -116,18 +121,21 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
- struct iwl_dts_measurement_notif *notif;
+ struct iwl_dts_measurement_notif_v1 *notif_v1;
int len = iwl_rx_packet_payload_len(pkt);
int temp;
- if (WARN_ON_ONCE(len < sizeof(*notif))) {
+ /* we can use notif_v1 only, because v2 only adds an additional
+ * parameter, which is not used in this function.
+ */
+ if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
return -EINVAL;
}
- notif = (void *)pkt->data;
+ notif_v1 = (void *)pkt->data;
- temp = le32_to_cpu(notif->temp);
+ temp = le32_to_cpu(notif_v1->temp);
/* shouldn't be negative, but since it's s32, make sure it isn't */
if (WARN_ON_ONCE(temp < 0))
@@ -158,17 +166,78 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_dts_measurement_notif_v2 *notif_v2;
+ int len = iwl_rx_packet_payload_len(pkt);
int temp;
+ u32 ths_crossed;
/* the notification is handled synchronously in ctkill, so skip here */
if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
return;
temp = iwl_mvm_temp_notif_parse(mvm, pkt);
- if (temp < 0)
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ if (temp >= 0)
+ iwl_mvm_tt_temp_changed(mvm, temp);
return;
+ }
+
+ if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
+ IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+ return;
+ }
+
+ notif_v2 = (void *)pkt->data;
+ ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
- iwl_mvm_tt_temp_changed(mvm, temp);
+ /* 0xFF in ths_crossed means the notification is not related
+ * to a trip, so we can ignore it here.
+ */
+ if (ths_crossed == 0xFF)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
+ temp, ths_crossed);
+
+#ifdef CONFIG_THERMAL
+ if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
+ return;
+
+ /*
+ * We are now handling a temperature notification from the firmware
+ * in ASYNC and hold the mutex. thermal_notify_framework will call
+ * us back through get_temp() which ought to send a SYNC command to
+ * the firmware and hence to take the mutex.
+ * Avoid the deadlock by unlocking the mutex here.
+ */
+ if (mvm->tz_device.tzone) {
+ struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
+
+ mutex_unlock(&mvm->mutex);
+ thermal_notify_framework(tz_dev->tzone,
+ tz_dev->fw_trips_index[ths_crossed]);
+ mutex_lock(&mvm->mutex);
+ }
+#endif /* CONFIG_THERMAL */
+}
+
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct ct_kill_notif *notif;
+ int len = iwl_rx_packet_payload_len(pkt);
+
+ if (WARN_ON_ONCE(len != sizeof(*notif))) {
+ IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
+ return;
+ }
+
+ notif = (struct ct_kill_notif *)pkt->data;
+ IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
+ notif->temperature);
+
+ iwl_mvm_enter_ctkill(mvm);
}
static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -194,12 +263,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
}
-int iwl_mvm_get_temp(struct iwl_mvm *mvm)
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
{
struct iwl_notification_wait wait_temp_notif;
static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
DTS_MEASUREMENT_NOTIF_WIDE) };
- int ret, temp;
+ int ret;
if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
@@ -208,7 +277,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
temp_notif, ARRAY_SIZE(temp_notif),
- iwl_mvm_temp_notif_wait, &temp);
+ iwl_mvm_temp_notif_wait, temp);
ret = iwl_mvm_get_temp_cmd(mvm);
if (ret) {
@@ -219,12 +288,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
- if (ret) {
+ if (ret)
IWL_ERR(mvm, "Getting the temperature timed out\n");
- return ret;
- }
- return temp;
+ return ret;
}
static void check_exit_ctkill(struct work_struct *work)
@@ -233,10 +300,17 @@ static void check_exit_ctkill(struct work_struct *work)
struct iwl_mvm *mvm;
u32 duration;
s32 temp;
+ int ret;
tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+ if (iwl_mvm_is_tt_in_fw(mvm)) {
+ iwl_mvm_exit_ctkill(mvm);
+
+ return;
+ }
+
duration = tt->params.ct_kill_duration;
mutex_lock(&mvm->mutex);
@@ -250,13 +324,13 @@ static void check_exit_ctkill(struct work_struct *work)
goto reschedule;
}
- temp = iwl_mvm_get_temp(mvm);
+ ret = iwl_mvm_get_temp(mvm, &temp);
iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
__iwl_mvm_mac_stop(mvm);
- if (temp < 0)
+ if (ret)
goto reschedule;
IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
@@ -436,7 +510,378 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
.support_tx_backoff = true,
};
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
+/* budget in mWatt */
+static const u32 iwl_mvm_cdev_budgets[] = {
+ 2000, /* cooling state 0 */
+ 1800, /* cooling state 1 */
+ 1600, /* cooling state 2 */
+ 1400, /* cooling state 3 */
+ 1200, /* cooling state 4 */
+ 1000, /* cooling state 5 */
+ 900, /* cooling state 6 */
+ 800, /* cooling state 7 */
+ 700, /* cooling state 8 */
+ 650, /* cooling state 9 */
+ 600, /* cooling state 10 */
+ 550, /* cooling state 11 */
+ 500, /* cooling state 12 */
+ 450, /* cooling state 13 */
+ 400, /* cooling state 14 */
+ 350, /* cooling state 15 */
+ 300, /* cooling state 16 */
+ 250, /* cooling state 17 */
+ 200, /* cooling state 18 */
+ 150, /* cooling state 19 */
+};
+
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
+{
+ struct iwl_mvm_ctdp_cmd cmd = {
+ .operation = cpu_to_le32(op),
+ .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
+ .window_size = 0,
+ };
+ int ret;
+ u32 status;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
+ CTDP_CONFIG_CMD),
+ sizeof(cmd), &cmd, &status);
+
+ if (ret) {
+ IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
+ return ret;
+ }
+
+ switch (op) {
+ case CTDP_CMD_OPERATION_START:
+#ifdef CONFIG_THERMAL
+ mvm->cooling_dev.cur_state = state;
+#endif /* CONFIG_THERMAL */
+ break;
+ case CTDP_CMD_OPERATION_REPORT:
+ IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
+ /* when the function is called with CTDP_CMD_OPERATION_REPORT
+ * option the function should return the average budget value
+ * that is received from the FW.
+ * The budget can't be less or equal to 0, so it's possible
+ * to distinguish between error values and budgets.
+ */
+ return status;
+ case CTDP_CMD_OPERATION_STOP:
+ IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n");
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_THERMAL
+static int compare_temps(const void *a, const void *b)
+{
+ return ((s16)le16_to_cpu(*(__le16 *)a) -
+ (s16)le16_to_cpu(*(__le16 *)b));
+}
+
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
+{
+ struct temp_report_ths_cmd cmd = {0};
+ int ret, i, j, idx = 0;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->tz_device.tzone)
+ return -EINVAL;
+
+ /* The driver holds array of temperature trips that are unsorted
+ * and uncompressed, the FW should get it compressed and sorted
+ */
+
+ /* compress temp_trips to cmd array, remove uninitialized values*/
+ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+ if (mvm->tz_device.temp_trips[i] != S16_MIN) {
+ cmd.thresholds[idx++] =
+ cpu_to_le16(mvm->tz_device.temp_trips[i]);
+ }
+ }
+ cmd.num_temps = cpu_to_le32(idx);
+
+ if (!idx)
+ goto send;
+
+ /*sort cmd array*/
+ sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
+
+ /* we should save the indexes of trips because we sort
+ * and compress the orginal array
+ */
+ for (i = 0; i < idx; i++) {
+ for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
+ if (le16_to_cpu(cmd.thresholds[i]) ==
+ mvm->tz_device.temp_trips[j])
+ mvm->tz_device.fw_trips_index[i] = j;
+ }
+ }
+
+send:
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+ TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+ ret);
+
+ return ret;
+}
+
+static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
+ int *temperature)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+ int ret;
+ int temp;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = iwl_mvm_get_temp(mvm, &temp);
+ if (ret)
+ goto out;
+
+ *temperature = temp * 1000;
+
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
+ int trip, int *temp)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+ return -EINVAL;
+
+ *temp = mvm->tz_device.temp_trips[trip] * 1000;
+
+ return 0;
+}
+
+static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+ return -EINVAL;
+
+ *type = THERMAL_TRIP_PASSIVE;
+
+ return 0;
+}
+
+static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
+ int trip, int temp)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+ struct iwl_mvm_thermal_device *tzone;
+ int i, ret;
+ s16 temperature;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if ((temp / 1000) > S16_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ temperature = (s16)(temp / 1000);
+ tzone = &mvm->tz_device;
+
+ if (!tzone) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* no updates*/
+ if (tzone->temp_trips[trip] == temperature) {
+ ret = 0;
+ goto out;
+ }
+
+ /* already existing temperature */
+ for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+ if (tzone->temp_trips[i] == temperature) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ tzone->temp_trips[trip] = temperature;
+
+ ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
+out:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static struct thermal_zone_device_ops tzone_ops = {
+ .get_temp = iwl_mvm_tzone_get_temp,
+ .get_trip_temp = iwl_mvm_tzone_get_trip_temp,
+ .get_trip_type = iwl_mvm_tzone_get_trip_type,
+ .set_trip_temp = iwl_mvm_tzone_set_trip_temp,
+};
+
+/* make all trips writable */
+#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
+
+static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+{
+ int i;
+ char name[] = "iwlwifi";
+
+ if (!iwl_mvm_is_tt_in_fw(mvm)) {
+ mvm->tz_device.tzone = NULL;
+
+ return;
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mvm->tz_device.tzone = thermal_zone_device_register(name,
+ IWL_MAX_DTS_TRIPS,
+ IWL_WRITABLE_TRIPS_MSK,
+ mvm, &tzone_ops,
+ NULL, 0, 0);
+ if (IS_ERR(mvm->tz_device.tzone)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to thermal zone (err = %ld)\n",
+ PTR_ERR(mvm->tz_device.tzone));
+ mvm->tz_device.tzone = NULL;
+ return;
+ }
+
+ /* 0 is a valid temperature,
+ * so initialize the array with S16_MIN which invalid temperature
+ */
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
+ mvm->tz_device.temp_trips[i] = S16_MIN;
+}
+
+static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+
+ if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+ return -EBUSY;
+
+ *state = mvm->cooling_dev.cur_state;
+
+ return 0;
+}
+
+static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long new_state)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+ int ret;
+
+ if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
+ return -EIO;
+
+ if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+ return -EBUSY;
+
+ mutex_lock(&mvm->mutex);
+
+ if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
+
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static struct thermal_cooling_device_ops tcooling_ops = {
+ .get_max_state = iwl_mvm_tcool_get_max_state,
+ .get_cur_state = iwl_mvm_tcool_get_cur_state,
+ .set_cur_state = iwl_mvm_tcool_set_cur_state,
+};
+
+static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
+{
+ char name[] = "iwlwifi";
+
+ if (!iwl_mvm_is_ctdp_supported(mvm))
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+ mvm->cooling_dev.cdev =
+ thermal_cooling_device_register(name,
+ mvm,
+ &tcooling_ops);
+
+ if (IS_ERR(mvm->cooling_dev.cdev)) {
+ IWL_DEBUG_TEMP(mvm,
+ "Failed to register to cooling device (err = %ld)\n",
+ PTR_ERR(mvm->cooling_dev.cdev));
+ mvm->cooling_dev.cdev = NULL;
+ return;
+ }
+}
+
+static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+}
+
+static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
+{
+ if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev)
+ return;
+
+ IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -451,10 +896,20 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
tt->dynamic_smps = false;
tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_register(mvm);
+ iwl_mvm_thermal_zone_register(mvm);
+#endif
}
-void iwl_mvm_tt_exit(struct iwl_mvm *mvm)
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
{
cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+
+#ifdef CONFIG_THERMAL
+ iwl_mvm_cooling_device_unregister(mvm);
+ iwl_mvm_thermal_zone_unregister(mvm);
+#endif
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a040edc55057..75870e68a7c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -65,6 +66,7 @@
#include <linux/ieee80211.h>
#include <linux/etherdevice.h>
#include <linux/tcp.h>
+#include <net/ip.h>
#include "iwl-trans.h"
#include "iwl-eeprom-parse.h"
@@ -182,7 +184,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->tx_flags = cpu_to_le32(tx_flags);
/* Total # bytes to be transmitted */
- tx_cmd->len = cpu_to_le16((u16)skb->len);
+ tx_cmd->len = cpu_to_le16((u16)skb->len +
+ (uintptr_t)info->driver_data[0]);
tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
@@ -299,6 +302,8 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_TKIP:
tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
break;
@@ -370,6 +375,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info->hw_queue != info->control.vif->cab_queue)))
return -1;
+ /* This holds the amsdu headers length */
+ info->driver_data[0] = (void *)(uintptr_t)0;
+
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
* in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
@@ -435,33 +443,194 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
return 0;
}
-static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso,
+#ifdef CONFIG_INET
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct sk_buff_head *mpdus_skb)
{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
struct sk_buff *tmp, *next;
- char cb[sizeof(skb_gso->cb)];
+ char cb[sizeof(skb->cb)];
+ unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+ u16 amsdu_add, snap_ip_tcp, pad, i = 0;
+ unsigned int dbg_max_amsdu_len;
+ u8 *qc, tid, txf;
+
+ snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
+ tcp_hdrlen(skb);
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (!sta->max_amsdu_len ||
+ !ieee80211_is_data_qos(hdr->frame_control) ||
+ !mvmsta->tlc_amsdu) {
+ num_subframes = 1;
+ pad = 0;
+ goto segment;
+ }
+
+ /*
+ * No need to lock amsdu_in_ampdu_allowed since it can't be modified
+ * during an BA session.
+ */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
+ num_subframes = 1;
+ pad = 0;
+ goto segment;
+ }
+
+ max_amsdu_len = sta->max_amsdu_len;
+ dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+
+ /* the Tx FIFO to which this A-MSDU will be routed */
+ txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ /*
+ * Don't send an AMSDU that will be longer than the TXF.
+ * Add a security margin of 256 for the TX command + headers.
+ * We also want to have the start of the next packet inside the
+ * fifo to be able to send bursts.
+ */
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len,
+ mvm->shared_mem_cfg.txfifo_size[txf] - 256);
+
+ if (dbg_max_amsdu_len)
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len,
+ dbg_max_amsdu_len);
+
+ /*
+ * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
+ * supported. This is a spec requirement (IEEE 802.11-2015
+ * section 8.7.3 NOTE 3).
+ */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !sta->vht_cap.vht_supported)
+ max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
+
+ /* Sub frame header + SNAP + IP header + TCP header + MSS */
+ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+ pad = (4 - subf_len) & 0x3;
+
+ /*
+ * If we have N subframes in the A-MSDU, then the A-MSDU's size is
+ * N * subf_len + (N - 1) * pad.
+ */
+ num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
+ if (num_subframes > 1)
+ *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+ tcp_hdrlen(skb) + skb->data_len;
+
+ /*
+ * Make sure we have enough TBs for the A-MSDU:
+ * 2 for each subframe
+ * 1 more for each fragment
+ * 1 more for the potential data in the header
+ */
+ num_subframes =
+ min_t(unsigned int, num_subframes,
+ (mvm->trans->max_skb_frags - 1 -
+ skb_shinfo(skb)->nr_frags) / 2);
+
+ /* This skb fits in one single A-MSDU */
+ if (num_subframes * mss >= tcp_payload_len) {
+ /*
+ * Compute the length of all the data added for the A-MSDU.
+ * This will be used to compute the length to write in the TX
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
+ * ETH header for n subframes. Note that the original skb
+ * already had one set of SNAP / IP / TCP headers.
+ */
+ num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
+ info = IEEE80211_SKB_CB(skb);
+ amsdu_add = num_subframes * sizeof(struct ethhdr) +
+ (num_subframes - 1) * (snap_ip_tcp + pad);
+ /* This holds the amsdu headers length */
+ info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+
+ __skb_queue_tail(mpdus_skb, skb);
+ return 0;
+ }
+
+ /*
+ * Trick the segmentation function to make it
+ * create SKBs that can fit into one A-MSDU.
+ */
+segment:
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+ memcpy(cb, skb->cb, sizeof(cb));
- memcpy(cb, skb_gso->cb, sizeof(cb));
- next = skb_gso_segment(skb_gso, 0);
- if (IS_ERR(next))
+ next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+ skb_shinfo(skb)->gso_size = mss;
+ if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
else if (next)
- consume_skb(skb_gso);
+ consume_skb(skb);
while (next) {
tmp = next;
next = tmp->next;
+
memcpy(tmp->cb, cb, sizeof(tmp->cb));
+ /*
+ * Compute the length of all the data added for the A-MSDU.
+ * This will be used to compute the length to write in the TX
+ * command. We have: SNAP + IP + TCP for n -1 subframes and
+ * ETH header for n subframes.
+ */
+ tcp_payload_len = skb_tail_pointer(tmp) -
+ skb_transport_header(tmp) -
+ tcp_hdrlen(tmp) + tmp->data_len;
+
+ if (ipv4)
+ ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
+
+ if (tcp_payload_len > mss) {
+ num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
+ info = IEEE80211_SKB_CB(tmp);
+ amsdu_add = num_subframes * sizeof(struct ethhdr) +
+ (num_subframes - 1) * (snap_ip_tcp + pad);
+ info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+ skb_shinfo(tmp)->gso_size = mss;
+ } else {
+ qc = ieee80211_get_qos_ctl((void *)tmp->data);
+
+ if (ipv4)
+ ip_send_check(ip_hdr(tmp));
+ *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ skb_shinfo(tmp)->gso_size = 0;
+ }
tmp->prev = NULL;
tmp->next = NULL;
__skb_queue_tail(mpdus_skb, tmp);
+ i++;
}
return 0;
}
+#else /* CONFIG_INET */
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct sk_buff_head *mpdus_skb)
+{
+ /* Impossible to get TSO with CONFIG_INET */
+ WARN_ON(1);
+
+ return -1;
+}
+#endif
/*
* Sets the fields in the Tx cmd that are crypto related
@@ -567,6 +736,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct sk_buff_head mpdus_skbs;
unsigned int payload_len;
int ret;
@@ -577,6 +747,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
return -1;
+ /* This holds the amsdu headers length */
+ info->driver_data[0] = (void *)(uintptr_t)0;
+
if (!skb_is_gso(skb))
return iwl_mvm_tx_mpdu(mvm, skb, sta);
@@ -596,7 +769,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
return ret;
while (!skb_queue_empty(&mpdus_skbs)) {
- struct sk_buff *skb = __skb_dequeue(&mpdus_skbs);
+ skb = __skb_dequeue(&mpdus_skbs);
ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
if (ret) {
@@ -745,6 +918,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
}
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+ u32 status)
+{
+ struct iwl_fw_dbg_trigger_tlv *trig;
+ struct iwl_fw_dbg_trigger_tx_status *status_trig;
+ int i;
+
+ if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+ return;
+
+ trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
+ status_trig = (void *)trig->data;
+
+ if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+ /* don't collect on status 0 */
+ if (!status_trig->statuses[i].status)
+ break;
+
+ if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+ continue;
+
+ iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+ "Tx status %d was received",
+ status & TX_STATUS_MSK);
+ break;
+ }
+}
+
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -760,6 +964,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
struct sk_buff_head skbs;
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
+ bool is_ndp = false;
__skb_queue_head_init(&skbs);
@@ -793,6 +998,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
break;
}
+ iwl_mvm_tx_status_check_trigger(mvm, status);
+
info->status.rates[0].count = tx_resp->failure_frame + 1;
iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
info);
@@ -811,6 +1018,20 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
seq_ctl = le16_to_cpu(hdr->seq_ctrl);
}
+ if (unlikely(!seq_ctl)) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ /*
+ * If it is an NDP, we can't update next_reclaim since
+ * its sequence control is 0. Note that for that same
+ * reason, NDPs are never sent to A-MPDU'able queues
+ * so that we can never have more than one freed frame
+ * for a single Tx resonse (see WARN_ON below).
+ */
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ is_ndp = true;
+ }
+
/*
* TODO: this is not accurate if we are freeing more than one
* packet.
@@ -874,9 +1095,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
- tid_data->next_reclaimed = next_reclaimed;
- IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n",
- next_reclaimed);
+ if (!is_ndp) {
+ tid_data->next_reclaimed = next_reclaimed;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "Next reclaimed packet:%d\n",
+ next_reclaimed);
+ } else {
+ IWL_DEBUG_TX_REPLY(mvm,
+ "NDP - don't update next_reclaimed\n");
+ }
+
iwl_mvm_check_ratid_empty(mvm, sta, tid);
if (mvmsta->sleep_tx_count) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 3a989f5c20db..53cdc5760f68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -376,8 +376,8 @@ struct iwl_error_event_table_v1 {
struct iwl_error_event_table {
u32 valid; /* (nonzero) valid, (0) log is empty */
u32 error_id; /* type of error */
- u32 pc; /* program counter */
- u32 blink1; /* branch link */
+ u32 trm_hw_status0; /* TRM HW status */
+ u32 trm_hw_status1; /* TRM HW status */
u32 blink2; /* branch link */
u32 ilink1; /* interrupt link */
u32 ilink2; /* interrupt link */
@@ -389,7 +389,7 @@ struct iwl_error_event_table {
u32 tsf_hi; /* network timestamp function timer */
u32 gp1; /* GP1 timer register */
u32 gp2; /* GP2 timer register */
- u32 gp3; /* GP3 timer register */
+ u32 fw_rev_type; /* firmware revision type */
u32 major; /* uCode version major */
u32 minor; /* uCode version minor */
u32 hw_ver; /* HW Silicon version */
@@ -408,7 +408,7 @@ struct iwl_error_event_table {
* time_flag */
u32 isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
- u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ u32 last_cmd_id; /* last HCMD id handled by the firmware */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
@@ -419,7 +419,7 @@ struct iwl_error_event_table {
u32 u_timestamp; /* indicate when the date and time of the
* compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */;
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
/*
* UMAC error struct - relevant starting from family 8000 chip.
@@ -529,9 +529,9 @@ static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
- table.blink1, table.blink2, table.ilink1,
- table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.ucode_ver, 0,
+ table.blink2, table.ilink1, table.ilink2,
+ table.bcon_time, table.gp1, table.gp2,
+ table.gp3, table.ucode_ver, 0,
table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
@@ -615,14 +615,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
- table.blink1, table.blink2, table.ilink1,
+ table.blink2, table.ilink1,
table.ilink2, table.bcon_time, table.gp1,
- table.gp2, table.gp3, table.major,
+ table.gp2, table.fw_rev_type, table.major,
table.minor, table.hw_ver, table.brd_ver);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
desc_lookup(table.error_id));
- IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
- IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+ IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
@@ -634,7 +634,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
+ IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
@@ -645,7 +645,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
- IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
@@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
}
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- bool value)
+ bool prev)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int res;
lockdep_assert_held(&mvm->mutex);
- if (mvmvif->low_latency == value)
+ if (iwl_mvm_vif_low_latency(mvmvif) == prev)
return 0;
- mvmvif->low_latency = value;
-
res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
return res;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 00335ea6b3eb..05b968506836 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -66,6 +67,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
@@ -627,6 +629,33 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_free_drv;
+ /* if RTPM is in use, enable it in our device */
+ if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+ /* We explicitly set the device to active here to
+ * clear contingent errors.
+ */
+ pm_runtime_set_active(&pdev->dev);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ iwlwifi_mod_params.d0i3_entry_delay);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ /* We are not supposed to call pm_runtime_allow() by
+ * ourselves, but let userspace enable runtime PM via
+ * sysfs. However, since we don't enable this from
+ * userspace yet, we need to allow/forbid() ourselves.
+ */
+ pm_runtime_allow(&pdev->dev);
+ }
+
+ /* The PCI device starts with a reference taken and we are
+ * supposed to release it here. But to simplify the
+ * interaction with the opmode, we don't do it now, but let
+ * the opmode release it when it's ready. To account for this
+ * reference, we start with ref_count set to 1.
+ */
+ trans_pcie->ref_count = 1;
+
return 0;
out_free_drv:
@@ -641,7 +670,17 @@ static void iwl_pci_remove(struct pci_dev *pdev)
struct iwl_trans *trans = pci_get_drvdata(pdev);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ /* if RTPM was in use, restore it to the state before probe */
+ if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+ /* We should not call forbid here, but we do for now.
+ * Check the comment to pm_runtime_allow() in
+ * iwl_pci_probe().
+ */
+ pm_runtime_forbid(trans->dev);
+ }
+
iwl_drv_stop(trans_pcie->drv);
+
iwl_trans_pcie_free(trans);
}
@@ -693,15 +732,173 @@ static int iwl_pci_resume(struct device *device)
return 0;
}
-static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status))
+ return 0;
+
+ set_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+ /* config the fw */
+ ret = iwl_op_mode_enter_d0i3(trans->op_mode);
+ if (ret == 1) {
+ IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n");
+ clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+ return -EBUSY;
+ }
+ if (ret)
+ goto err;
+
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ test_bit(STATUS_TRANS_IDLE, &trans->status),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout entering D0i3\n");
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+ return 0;
+err:
+ clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+ iwl_trans_fw_error(trans);
+ return ret;
+}
+
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int ret;
+
+ /* sometimes a D0i3 entry is not followed through */
+ if (!test_bit(STATUS_TRANS_IDLE, &trans->status))
+ return 0;
+
+ /* config the fw */
+ ret = iwl_op_mode_exit_d0i3(trans->op_mode);
+ if (ret)
+ goto err;
+
+ /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */
+
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ !test_bit(STATUS_TRANS_IDLE, &trans->status),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D0i3\n");
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ return 0;
+err:
+ clear_bit(STATUS_TRANS_IDLE, &trans->status);
+ iwl_trans_fw_error(trans);
+ return ret;
+}
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+static int iwl_pci_runtime_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ int ret;
+
+ IWL_DEBUG_RPM(trans, "entering runtime suspend\n");
+
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ ret = iwl_pci_fw_enter_d0i3(trans);
+ if (ret < 0)
+ return ret;
+ }
+
+ trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+ iwl_trans_d3_suspend(trans, false, false);
+
+ return 0;
+}
+
+static int iwl_pci_runtime_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ enum iwl_d3_status d3_status;
+
+ IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n");
+
+ iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return iwl_pci_fw_exit_d0i3(trans);
+
+ return 0;
+}
+
+static int iwl_pci_system_prepare(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+ IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
+
+ /* This is called before entering system suspend and before
+ * the runtime resume is called. Set the suspending flag to
+ * prevent the wakelock from being taken.
+ */
+ trans->suspending = true;
+
+ /* Wake the device up from runtime suspend before going to
+ * platform suspend. This is needed because we don't know
+ * whether wowlan any is set and, if it's not, mac80211 will
+ * disconnect (in which case, we can't be in D0i3).
+ */
+ pm_runtime_resume(device);
+
+ return 0;
+}
+
+static void iwl_pci_system_complete(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+ IWL_DEBUG_RPM(trans, "completing system suspend\n");
+
+ /* This is called as a counterpart to the prepare op. It is
+ * called either when suspending fails or when suspend
+ * completed successfully. Now there's no risk of grabbing
+ * the wakelock anymore, so we can release the suspending
+ * flag.
+ */
+ trans->suspending = false;
+}
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
+static const struct dev_pm_ops iwl_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
+ iwl_pci_resume)
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+ SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
+ iwl_pci_runtime_resume,
+ NULL)
+ .prepare = iwl_pci_system_prepare,
+ .complete = iwl_pci_system_complete,
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+};
#define IWL_PM_OPS (&iwl_dev_pm_ops)
-#else
+#else /* CONFIG_PM_SLEEP */
#define IWL_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
static struct pci_driver iwl_pci_driver = {
.name = DRV_NAME,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 73c95594eabe..dadafbdef9d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -2,6 +2,7 @@
*
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -56,17 +57,23 @@
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
+#define RX_PENDING_WATERMARK 16
struct iwl_host_cmd;
/*This file includes the declaration that are internal to the
* trans_pcie layer */
+/**
+ * struct iwl_rx_mem_buffer
+ * @page_dma: bus address of rxb page
+ * @page: driver's pointer to the rxb page
+ * @vid: index of this rxb in the global table
+ */
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
struct page *page;
+ u16 vid;
struct list_head list;
};
@@ -90,8 +97,12 @@ struct isr_statistics {
/**
* struct iwl_rxq - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @id: queue index
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
+ * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
@@ -103,32 +114,34 @@ struct isr_statistics {
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
* @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
+ * @queue: actual rx queue. Not used for multi-rx queue.
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/
struct iwl_rxq {
- __le32 *bd;
+ int id;
+ void *bd;
dma_addr_t bd_dma;
+ __le32 *used_bd;
+ dma_addr_t used_bd_dma;
u32 read;
u32 write;
u32 free_count;
u32 used_count;
u32 write_actual;
+ u32 queue_size;
struct list_head rx_free;
struct list_head rx_used;
bool need_update;
struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma;
spinlock_t lock;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+ struct napi_struct napi;
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};
/**
* struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
* @req_pending: number of requests the allcator had not processed yet
* @req_ready: number of requests honored and ready for claiming
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
@@ -140,7 +153,6 @@ struct iwl_rxq {
* @rx_alloc: work struct for background calls
*/
struct iwl_rb_allocator {
- struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
atomic_t req_pending;
atomic_t req_ready;
struct list_head rbd_allocated;
@@ -280,6 +292,7 @@ struct iwl_txq {
bool ampdu;
bool block;
unsigned long wd_timeout;
+ struct sk_buff_head overflow_q;
};
static inline dma_addr_t
@@ -297,6 +310,8 @@ struct iwl_tso_hdr_page {
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
+ * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
+ * @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area
@@ -321,15 +336,24 @@ struct iwl_tso_hdr_page {
* @fw_mon_phys: physical address of the buffer for the firmware monitor
* @fw_mon_page: points to the first page of the buffer for the firmware monitor
* @fw_mon_size: size of the buffer for the firmware monitor
+ * @msix_entries: array of MSI-X entries
+ * @msix_enabled: true if managed to enable MSI-X
+ * @allocated_vector: the number of interrupt vector allocated by the OS
+ * @default_irq_num: default irq for non rx interrupt
+ * @fh_init_mask: initial unmasked fh causes
+ * @hw_init_mask: initial unmasked hw causes
+ * @fh_mask: current unmasked fh causes
+ * @hw_mask: current unmasked hw causes
*/
struct iwl_trans_pcie {
- struct iwl_rxq rxq;
+ struct iwl_rxq *rxq;
+ struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
+ struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
struct net_device napi_dev;
- struct napi_struct napi;
struct __percpu iwl_tso_hdr_page *tso_hdr_page;
@@ -359,6 +383,7 @@ struct iwl_trans_pcie {
bool ucode_write_complete;
wait_queue_head_t ucode_write_waitq;
wait_queue_head_t wait_command_queue;
+ wait_queue_head_t d0i3_waitq;
u8 cmd_queue;
u8 cmd_fifo;
@@ -385,6 +410,15 @@ struct iwl_trans_pcie {
dma_addr_t fw_mon_phys;
struct page *fw_mon_page;
u32 fw_mon_size;
+
+ struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
+ bool msix_enabled;
+ u32 allocated_vector;
+ u32 default_irq_num;
+ u32 fh_init_mask;
+ u32 hw_init_mask;
+ u32 fh_mask;
+ u32 hw_mask;
};
static inline struct iwl_trans_pcie *
@@ -413,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
* RX
******************************************************/
int iwl_pcie_rx_init(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
@@ -468,15 +505,24 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
- clear_bit(STATUS_INT_ENABLED, &trans->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(trans, CSR_INT, 0xffffffff);
- iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+ clear_bit(STATUS_INT_ENABLED, &trans->status);
+ if (!trans_pcie->msix_enabled) {
+ /* disable interrupts from uCode/NIC to host */
+ iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ iwl_write32(trans, CSR_INT, 0xffffffff);
+ iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+ } else {
+ /* disable all the interrupt we might use */
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ trans_pcie->fh_init_mask);
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ trans_pcie->hw_init_mask);
+ }
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
@@ -486,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &trans->status);
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ /*
+ * fh/hw_mask keeps all the unmasked causes.
+ * Unlike msi, in msix cause is enabled when it is unset.
+ */
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+ trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ ~trans_pcie->fh_mask);
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ ~trans_pcie->hw_mask);
+ }
+}
+
+static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
+ trans_pcie->hw_mask = msk;
+}
+
+static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
+ trans_pcie->fh_mask = msk;
}
static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
@@ -495,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
- trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+ trans_pcie->hw_init_mask);
+ iwl_enable_fh_int_msk_msix(trans,
+ MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
+ }
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
@@ -504,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
- iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ if (!trans_pcie->msix_enabled) {
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+ } else {
+ iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+ trans_pcie->fh_init_mask);
+ iwl_enable_hw_int_msk_msix(trans,
+ MSIX_HW_INT_CAUSES_REG_RF_KILL);
+ }
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -588,4 +677,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
}
#endif
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 152cf9ad9566..4be3c35afd19 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -2,6 +2,7 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -140,8 +141,8 @@
*/
static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- /* Make sure RX_QUEUE_SIZE is a power of 2 */
- BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+ /* Make sure rx queue size is a power of 2 */
+ WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
/*
* There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
@@ -149,7 +150,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq)
* The following is equivalent to modulo by RX_QUEUE_SIZE and is well
* defined for negative dividends.
*/
- return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
+ return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
}
/*
@@ -160,6 +161,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
+static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+ iwl_write_prph(trans, ofs, val & 0xffffffff);
+ iwl_write_prph(trans, ofs + 4, val >> 32);
+}
+
/*
* iwl_pcie_rx_stop - stops the Rx DMA
*/
@@ -173,10 +180,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
*/
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
u32 reg;
lockdep_assert_held(&rxq->lock);
@@ -201,41 +207,84 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
}
rxq->write_actual = round_down(rxq->write, 8);
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ if (trans->cfg->mq_rx_supported)
+ iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
+ rxq->write_actual);
+ else
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
+ int i;
- spin_lock(&rxq->lock);
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ if (!rxq->need_update)
+ continue;
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ rxq->need_update = false;
+ spin_unlock(&rxq->lock);
+ }
+}
+
+/*
+ * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx
+ */
+static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
+{
+ struct iwl_rx_mem_buffer *rxb;
+
+ /*
+ * If the device isn't enabled - no need to try to add buffers...
+ * This can happen when we stop the device and still have an interrupt
+ * pending. We stop the APM before we sync the interrupts because we
+ * have to (see comment there). On the other hand, since the APM is
+ * stopped, we cannot access the HW (in particular not prph).
+ * So don't try to restock if the APM has been already stopped.
+ */
+ if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+ return;
- if (!rxq->need_update)
- goto exit_unlock;
+ spin_lock(&rxq->lock);
+ while (rxq->free_count) {
+ __le64 *bd = (__le64 *)rxq->bd;
- iwl_pcie_rxq_inc_wr_ptr(trans);
- rxq->need_update = false;
+ /* Get next free Rx buffer, remove from free list */
+ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+ list);
+ list_del(&rxb->list);
- exit_unlock:
+ /* 12 first bits are expected to be empty */
+ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+ /* Point to Rx buffer via next RBD in circular buffer */
+ bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+ rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+ rxq->free_count--;
+ }
spin_unlock(&rxq->lock);
+
+ /*
+ * If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8.
+ */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock(&rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+ spin_unlock(&rxq->lock);
+ }
}
/*
- * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
+ * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx
*/
-static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
/*
@@ -251,6 +300,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+ __le32 *bd = (__le32 *)rxq->bd;
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
BUG_ON(rxb && rxb->page);
@@ -261,7 +311,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
list_del(&rxb->list);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@@ -272,12 +322,32 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
spin_lock(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans);
+ iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
spin_unlock(&rxq->lock);
}
}
/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static
+void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+ if (trans->cfg->mq_rx_supported)
+ iwl_pcie_rxq_mq_restock(trans, rxq);
+ else
+ iwl_pcie_rxq_sq_restock(trans, rxq);
+}
+
+/*
* iwl_pcie_rx_alloc_page - allocates and returns a page.
*
*/
@@ -285,13 +355,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
gfp_t priority)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct page *page;
gfp_t gfp_mask = priority;
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
if (trans_pcie->rx_page_order > 0)
gfp_mask |= __GFP_COMP;
@@ -301,16 +367,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
trans_pcie->rx_page_order);
- /* Issue an error if the hardware has consumed more than half
- * of its free buffer list and we don't have enough
- * pre-allocated buffers.
+ /*
+ * Issue an error if we don't have enough pre-allocated
+ * buffers.
` */
- if (rxq->free_count <= RX_LOW_WATERMARK &&
- iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
- net_ratelimit())
+ if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
IWL_CRIT(trans,
- "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
- rxq->free_count);
+ "Failed to alloc_pages\n");
return NULL;
}
return page;
@@ -325,10 +388,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers.
*/
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
@@ -372,10 +435,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
__free_pages(page, trans_pcie->rx_page_order);
return;
}
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
spin_lock(&rxq->lock);
@@ -386,41 +445,24 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
}
}
-static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
int i;
- lockdep_assert_held(&rxq->lock);
-
- for (i = 0; i < RX_QUEUE_SIZE; i++) {
- if (!rxq->pool[i].page)
+ for (i = 0; i < RX_POOL_SIZE; i++) {
+ if (!trans_pcie->rx_pool[i].page)
continue;
- dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+ dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
- __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
- rxq->pool[i].page = NULL;
+ __free_pages(trans_pcie->rx_pool[i].page,
+ trans_pcie->rx_page_order);
+ trans_pcie->rx_pool[i].page = NULL;
}
}
/*
- * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
- *
- * When moving to rx_free an page is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
- */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
- iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
- iwl_pcie_rxq_restock(trans);
-}
-
-/*
* iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
*
* Allocates for each received request 8 pages
@@ -444,6 +486,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
while (pending) {
int i;
struct list_head local_allocated;
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ /* Do not post a warning if there are only a few requests */
+ if (pending < RX_PENDING_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
INIT_LIST_HEAD(&local_allocated);
@@ -463,7 +510,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
BUG_ON(rxb->page);
/* Alloc a new receive buffer */
- page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+ page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
if (!page)
continue;
rxb->page = page;
@@ -477,10 +524,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
__free_pages(page, trans_pcie->rx_page_order);
continue;
}
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
/* move the allocated entry to the out list */
list_move(&rxb->list, &local_allocated);
@@ -512,40 +555,46 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
}
/*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+ * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
.*
.* Called by queue when the queue posted allocation request and
* has freed 8 RBDs in order to restock itself.
+ * This function directly moves the allocated RBs to the queue's ownership
+ * and updates the relevant counters.
*/
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
- struct iwl_rx_mem_buffer
- *out[RX_CLAIM_REQ_ALLOC])
+static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+ struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i;
+ lockdep_assert_held(&rxq->lock);
+
/*
* atomic_dec_if_positive returns req_ready - 1 for any scenario.
* If req_ready is 0 atomic_dec_if_positive will return -1 and this
- * function will return -ENOMEM, as there are no ready requests.
+ * function will return early, as there are no ready requests.
* atomic_dec_if_positive will perofrm the *actual* decrement only if
* req_ready > 0, i.e. - there are ready requests and the function
* hands one request to the caller.
*/
if (atomic_dec_if_positive(&rba->req_ready) < 0)
- return -ENOMEM;
+ return;
spin_lock(&rba->lock);
for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
/* Get next free Rx buffer, remove it from free list */
- out[i] = list_first_entry(&rba->rbd_allocated,
- struct iwl_rx_mem_buffer, list);
- list_del(&out[i]->list);
+ struct iwl_rx_mem_buffer *rxb =
+ list_first_entry(&rba->rbd_allocated,
+ struct iwl_rx_mem_buffer, list);
+
+ list_move(&rxb->list, &rxq->rx_free);
}
spin_unlock(&rba->lock);
- return 0;
+ rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+ rxq->free_count += RX_CLAIM_REQ_ALLOC;
}
static void iwl_pcie_rx_allocator_work(struct work_struct *data)
@@ -561,38 +610,83 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data)
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
+ int i;
+ int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
- memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+ if (WARN_ON(trans_pcie->rxq))
+ return -EINVAL;
+
+ trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+ GFP_KERNEL);
+ if (!trans_pcie->rxq)
+ return -EINVAL;
- spin_lock_init(&rxq->lock);
spin_lock_init(&rba->lock);
- if (WARN_ON(rxq->bd || rxq->rb_stts))
- return -EINVAL;
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
+ spin_lock_init(&rxq->lock);
+ if (trans->cfg->mq_rx_supported)
+ rxq->queue_size = MQ_RX_TABLE_SIZE;
+ else
+ rxq->queue_size = RX_QUEUE_SIZE;
- /*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb_stts;
+ /*
+ * Allocate the circular buffer of Read Buffer Descriptors
+ * (RBDs)
+ */
+ rxq->bd = dma_zalloc_coherent(dev,
+ free_size * rxq->queue_size,
+ &rxq->bd_dma, GFP_KERNEL);
+ if (!rxq->bd)
+ goto err;
+
+ if (trans->cfg->mq_rx_supported) {
+ rxq->used_bd = dma_zalloc_coherent(dev,
+ sizeof(__le32) *
+ rxq->queue_size,
+ &rxq->used_bd_dma,
+ GFP_KERNEL);
+ if (!rxq->used_bd)
+ goto err;
+ }
+ /*Allocate the driver's pointer to receive buffer status */
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma,
+ GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err;
+ }
return 0;
-err_rb_stts:
- dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-err_bd:
+err:
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ if (rxq->bd)
+ dma_free_coherent(dev, free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+
+ if (rxq->used_bd)
+ dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+ }
+ kfree(trans_pcie->rxq);
+
return -ENOMEM;
}
@@ -659,65 +753,112 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 rb_size, enabled = 0;
int i;
- lockdep_assert_held(&rxq->lock);
-
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- rxq->free_count = 0;
- rxq->used_count = 0;
+ switch (trans_pcie->rx_buf_size) {
+ case IWL_AMSDU_4K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+ break;
+ case IWL_AMSDU_8K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+ break;
+ case IWL_AMSDU_12K:
+ rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+ break;
+ default:
+ WARN_ON(1);
+ rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+ }
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- list_add(&rxq->pool[i].list, &rxq->rx_used);
-}
+ /* Stop Rx DMA */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ /* disable free amd used rx queue operation */
+ iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ /* Tell device where to find RBD free table in DRAM */
+ iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
+ (u64)(trans_pcie->rxq[i].bd_dma));
+ /* Tell device where to find RBD used table in DRAM */
+ iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
+ (u64)(trans_pcie->rxq[i].used_bd_dma));
+ /* Tell device where in DRAM to update its Rx status */
+ iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
+ /* Reset device indice tables */
+ iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+ iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+ iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+
+ enabled |= BIT(i) | BIT(i + 16);
+ }
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
- int i;
+ /* restock default queue */
+ iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
- lockdep_assert_held(&rba->lock);
+ /*
+ * Enable Rx DMA
+ * Single frame mode
+ * Rx buffer size 4 or 8k or 12k
+ * Min RB size 4 or 8
+ * Drop frames that exceed RB size
+ * 512 RBDs
+ */
+ iwl_write_prph(trans, RFH_RXF_DMA_CFG,
+ RFH_DMA_EN_ENABLE_VAL |
+ rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+ RFH_RXF_DMA_MIN_RB_4_8 |
+ RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+ RFH_RXF_DMA_RBDCB_SIZE_512);
- INIT_LIST_HEAD(&rba->rbd_allocated);
- INIT_LIST_HEAD(&rba->rbd_empty);
+ /*
+ * Activate DMA snooping.
+ * Set RX DMA chunk size to 64B
+ * Default queue is 0
+ */
+ iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+ (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+ /* Enable the relevant rx queues */
+ iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
- for (i = 0; i < RX_POOL_SIZE; i++)
- list_add(&rba->pool[i].list, &rba->rbd_empty);
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
}
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i;
+ lockdep_assert_held(&rxq->lock);
- lockdep_assert_held(&rba->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ rxq->free_count = 0;
+ rxq->used_count = 0;
+}
- for (i = 0; i < RX_POOL_SIZE; i++) {
- if (!rba->pool[i].page)
- continue;
- dma_unmap_page(trans->dev, rba->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
- __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
- rba->pool[i].page = NULL;
- }
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+ WARN_ON(1);
+ return 0;
}
int iwl_pcie_rx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
+ struct iwl_rxq *def_rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- int i, err;
+ int i, err, queue_size, allocator_pool_size, num_alloc;
- if (!rxq->bd) {
+ if (!trans_pcie->rxq) {
err = iwl_pcie_rx_alloc(trans);
if (err)
return err;
}
+ def_rxq = trans_pcie->rxq;
if (!rba->alloc_wq)
rba->alloc_wq = alloc_workqueue("rb_allocator",
WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -726,34 +867,69 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
- /* free all first - we might be reconfigured for a different size */
- iwl_pcie_rx_free_rba(trans);
- iwl_pcie_rx_init_rba(rba);
+ INIT_LIST_HEAD(&rba->rbd_allocated);
+ INIT_LIST_HEAD(&rba->rbd_empty);
spin_unlock(&rba->lock);
- spin_lock(&rxq->lock);
-
/* free all first - we might be reconfigured for a different size */
- iwl_pcie_rxq_free_rbs(trans);
- iwl_pcie_rx_init_rxb_lists(rxq);
+ iwl_pcie_free_rbs_pool(trans);
for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
+ def_rxq->queue[i] = NULL;
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock(&rxq->lock);
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
- iwl_pcie_rx_replenish(trans);
+ rxq->id = i;
- iwl_pcie_rx_hw_init(trans, rxq);
+ spin_lock(&rxq->lock);
+ /*
+ * Set read write pointer to reflect that we have processed
+ * and used all buffers, but have not restocked the Rx queue
+ * with fresh buffers
+ */
+ rxq->read = 0;
+ rxq->write = 0;
+ rxq->write_actual = 0;
+ memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_lock(&rxq->lock);
- iwl_pcie_rxq_inc_wr_ptr(trans);
- spin_unlock(&rxq->lock);
+ iwl_pcie_rx_init_rxb_lists(rxq);
+
+ if (!rxq->napi.poll)
+ netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+ iwl_pcie_dummy_napi_poll, 64);
+
+ spin_unlock(&rxq->lock);
+ }
+
+ /* move the pool to the default queue and allocator ownerships */
+ queue_size = trans->cfg->mq_rx_supported ?
+ MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+ allocator_pool_size = trans->num_rx_queues *
+ (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+ num_alloc = queue_size + allocator_pool_size;
+ for (i = 0; i < num_alloc; i++) {
+ struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
+
+ if (i < allocator_pool_size)
+ list_add(&rxb->list, &rba->rbd_empty);
+ else
+ list_add(&rxb->list, &def_rxq->rx_used);
+ trans_pcie->global_table[i] = rxb;
+ rxb->vid = (u16)i;
+ }
+
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+ if (trans->cfg->mq_rx_supported) {
+ iwl_pcie_rx_mq_hw_init(trans);
+ } else {
+ iwl_pcie_rxq_sq_restock(trans, def_rxq);
+ iwl_pcie_rx_hw_init(trans, def_rxq);
+ }
+
+ spin_lock(&def_rxq->lock);
+ iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
+ spin_unlock(&def_rxq->lock);
return 0;
}
@@ -761,12 +937,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rb_allocator *rba = &trans_pcie->rba;
+ int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+ sizeof(__le32);
+ int i;
- /*if rxq->bd is NULL, it means that nothing has been allocated,
- * exit now */
- if (!rxq->bd) {
+ /*
+ * if rxq is NULL, it means that nothing has been allocated,
+ * exit now
+ */
+ if (!trans_pcie->rxq) {
IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
return;
}
@@ -777,27 +957,37 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
rba->alloc_wq = NULL;
}
- spin_lock(&rba->lock);
- iwl_pcie_rx_free_rba(trans);
- spin_unlock(&rba->lock);
-
- spin_lock(&rxq->lock);
- iwl_pcie_rxq_free_rbs(trans);
- spin_unlock(&rxq->lock);
-
- dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
- rxq->bd_dma = 0;
- rxq->bd = NULL;
-
- if (rxq->rb_stts)
- dma_free_coherent(trans->dev,
- sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- else
- IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
- rxq->rb_stts_dma = 0;
- rxq->rb_stts = NULL;
+ iwl_pcie_free_rbs_pool(trans);
+
+ for (i = 0; i < trans->num_rx_queues; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ if (rxq->bd)
+ dma_free_coherent(trans->dev,
+ free_size * rxq->queue_size,
+ rxq->bd, rxq->bd_dma);
+ rxq->bd_dma = 0;
+ rxq->bd = NULL;
+
+ if (rxq->rb_stts)
+ dma_free_coherent(trans->dev,
+ sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ else
+ IWL_DEBUG_INFO(trans,
+ "Free rxq->rb_stts which is NULL\n");
+
+ if (rxq->used_bd)
+ dma_free_coherent(trans->dev,
+ sizeof(__le32) * rxq->queue_size,
+ rxq->used_bd, rxq->used_bd_dma);
+ rxq->used_bd_dma = 0;
+ rxq->used_bd = NULL;
+
+ if (rxq->napi.poll)
+ netif_napi_del(&rxq->napi);
+ }
+ kfree(trans_pcie->rxq);
}
/*
@@ -841,11 +1031,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
}
static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+ struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb,
bool emergency)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -911,7 +1101,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
- iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
+ if (rxq->id == 0)
+ iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+ &rxcb);
+ else
+ iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
+ &rxcb, rxq->id);
if (reclaim) {
kzfree(txq->entries[cmd_index].free_buf);
@@ -972,11 +1167,11 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/
-static void iwl_pcie_rx_handle(struct iwl_trans *trans)
+static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
- u32 r, i, j, count = 0;
+ struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+ u32 r, i, count = 0;
bool emergency = false;
restart:
@@ -986,80 +1181,73 @@ restart:
r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
i = rxq->read;
+ /* W/A 9000 device step A0 wrap-around bug */
+ r &= (rxq->queue_size - 1);
+
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
- IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
+ IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) {
struct iwl_rx_mem_buffer *rxb;
- if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+ if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true;
- rxb = rxq->queue[i];
- rxq->queue[i] = NULL;
+ if (trans->cfg->mq_rx_supported) {
+ /*
+ * used_bd is a 32 bit but only 12 are used to retrieve
+ * the vid
+ */
+ u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
+
+ if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
+ "Invalid rxb index from HW %u\n", (u32)vid))
+ goto out;
+ rxb = trans_pcie->global_table[vid];
+ } else {
+ rxb = rxq->queue[i];
+ rxq->queue[i] = NULL;
+ }
- IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
- iwl_pcie_rx_handle_rb(trans, rxb, emergency);
+ IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+ iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
- i = (i + 1) & RX_QUEUE_MASK;
+ i = (i + 1) & (rxq->queue_size - 1);
- /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
- * try to claim the pre-allocated buffers from the allocator */
- if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+ /*
+ * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+ * try to claim the pre-allocated buffers from the allocator.
+ * If not ready - will try to reclaim next time.
+ * There is no need to reschedule work - allocator exits only
+ * on success
+ */
+ if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
+ iwl_pcie_rx_allocator_get(trans, rxq);
+
+ if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
- struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
- if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
- !emergency) {
- /* Add the remaining 6 empty RBDs
- * for allocator use
- */
- spin_lock(&rba->lock);
- list_splice_tail_init(&rxq->rx_used,
- &rba->rbd_empty);
- spin_unlock(&rba->lock);
- }
- /* If not ready - continue, will try to reclaim later.
- * No need to reschedule work - allocator exits only on
- * success */
- if (!iwl_pcie_rx_allocator_get(trans, out)) {
- /* If success - then RX_CLAIM_REQ_ALLOC
- * buffers were retrieved and should be added
- * to free list */
- rxq->used_count -= RX_CLAIM_REQ_ALLOC;
- for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
- list_add_tail(&out[j]->list,
- &rxq->rx_free);
- rxq->free_count++;
- }
- }
- }
- if (emergency) {
+ /* Add the remaining empty RBDs for allocator use */
+ spin_lock(&rba->lock);
+ list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+ spin_unlock(&rba->lock);
+ } else if (emergency) {
count++;
if (count == 8) {
count = 0;
- if (rxq->used_count < RX_QUEUE_SIZE / 3)
+ if (rxq->used_count < rxq->queue_size / 3)
emergency = false;
+
+ rxq->read = i;
spin_unlock(&rxq->lock);
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
- spin_lock(&rxq->lock);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+ iwl_pcie_rxq_restock(trans, rxq);
+ goto restart;
}
}
- /* handle restock for three cases, can be all of them at once:
- * - we just pulled buffers from the allocator
- * - we have 8+ unstolen pages accumulated
- * - we are in emergency and allocated buffers
- */
- if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
- rxq->read = i;
- spin_unlock(&rxq->lock);
- iwl_pcie_rxq_restock(trans);
- goto restart;
- }
}
-
+out:
/* Backtrack one entry */
rxq->read = i;
spin_unlock(&rxq->lock);
@@ -1077,10 +1265,60 @@ restart:
* will be restocked by the next call of iwl_pcie_rxq_restock.
*/
if (unlikely(emergency && count))
- iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+ iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+
+ if (rxq->napi.poll)
+ napi_gro_flush(&rxq->napi, false);
+
+ iwl_pcie_rxq_restock(trans, rxq);
+}
+
+static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
+{
+ u8 queue = entry->entry;
+ struct msix_entry *entries = entry - queue;
+
+ return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
+}
- if (trans_pcie->napi.poll)
- napi_gro_flush(&trans_pcie->napi, false);
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+ struct msix_entry *entry)
+{
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
+/*
+ * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
+ * This interrupt handler should be used with RSS queue only.
+ */
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+{
+ struct msix_entry *entry = dev_id;
+ struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ if (WARN_ON(entry->entry >= trans->num_rx_queues))
+ return IRQ_NONE;
+
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ local_bh_disable();
+ iwl_pcie_rx_handle(trans, entry->entry);
+ local_bh_enable();
+
+ iwl_pcie_clear_irq(trans, entry);
+
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return IRQ_HANDLED;
}
/*
@@ -1413,7 +1651,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rx++;
local_bh_disable();
- iwl_pcie_rx_handle(trans);
+ iwl_pcie_rx_handle(trans, 0);
local_bh_enable();
}
@@ -1556,3 +1794,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data)
return IRQ_WAKE_THREAD;
}
+
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+{
+ struct msix_entry *entry = dev_id;
+ struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+ struct iwl_trans *trans = trans_pcie->trans;
+ struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+ u32 inta_fh, inta_hw;
+
+ lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+ spin_lock(&trans_pcie->irq_lock);
+ inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ /*
+ * Clear causes registers to avoid being handling the same cause.
+ */
+ iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ if (unlikely(!(inta_fh | inta_hw))) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
+ inta_fh,
+ iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+ isr_stats->tx++;
+ /*
+ * Wake up uCode load routine,
+ * now that load is complete
+ */
+ trans_pcie->ucode_write_complete = true;
+ wake_up(&trans_pcie->ucode_write_waitq);
+ }
+
+ /* Error detected by uCode */
+ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
+ (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ IWL_ERR(trans,
+ "Microcode SW error detected. Restarting 0x%X.\n",
+ inta_fh);
+ isr_stats->sw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+
+ /* After checking FH register check HW register */
+ if (iwl_have_debug_level(IWL_DL_ISR))
+ IWL_DEBUG_ISR(trans,
+ "ISR inta_hw 0x%08x, enabled 0x%08x\n",
+ inta_hw,
+ iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
+ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+ isr_stats->alive++;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+ iwl_pcie_rxq_check_wrptr(trans);
+ iwl_pcie_txq_check_wrptrs(trans);
+
+ isr_stats->wakeup++;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
+ IWL_ERR(trans, "Microcode CT kill error detected.\n");
+ isr_stats->ctkill++;
+ }
+
+ /* HW RF KILL switch toggled */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
+ bool hw_rfkill;
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+ hw_rfkill ? "disable radio" : "enable radio");
+
+ isr_stats->rfkill++;
+
+ mutex_lock(&trans_pcie->mutex);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ mutex_unlock(&trans_pcie->mutex);
+ if (hw_rfkill) {
+ set_bit(STATUS_RFKILL, &trans->status);
+ if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status))
+ IWL_DEBUG_RF_KILL(trans,
+ "Rfkill while SYNC HCMD in flight\n");
+ wake_up(&trans_pcie->wait_command_queue);
+ } else {
+ clear_bit(STATUS_RFKILL, &trans->status);
+ }
+ }
+
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ IWL_ERR(trans,
+ "Hardware error detected. Restarting.\n");
+
+ isr_stats->hw++;
+ iwl_pcie_irq_handle_error(trans);
+ }
+
+ iwl_pcie_clear_irq(trans, entry);
+
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 5a854c609477..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -72,6 +72,7 @@
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -615,38 +616,38 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
dma_addr_t phy_addr, u32 byte_cnt)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
int ret;
trans_pcie->ucode_write_complete = false;
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return -EIO;
+
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(trans,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
- dst_addr);
+ iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
- iwl_write_direct32(trans,
- FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
- phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+ iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+ phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(trans,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+ iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+ iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
+ BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+ iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+
+ iwl_trans_release_nic_access(trans, &flags);
ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
trans_pcie->ucode_write_complete, 5 * HZ);
@@ -731,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
*/
val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
if (val & (BIT(1) | BIT(17))) {
- IWL_INFO(trans,
- "can't access the RSA semaphore it is write protected\n");
+ IWL_DEBUG_INFO(trans,
+ "can't access the RSA semaphore it is write protected\n");
return 0;
}
@@ -1122,6 +1123,20 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
iwl_pcie_prepare_card_hw(trans);
}
+static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans_pcie->msix_enabled) {
+ int i;
+
+ for (i = 0; i < trans_pcie->allocated_vector; i++)
+ synchronize_irq(trans_pcie->msix_entries[i].vector);
+ } else {
+ synchronize_irq(trans_pcie->pci_dev->irq);
+ }
+}
+
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill)
{
@@ -1148,7 +1163,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
iwl_disable_interrupts(trans);
/* Make sure it finished running */
- synchronize_irq(trans_pcie->pci_dev->irq);
+ iwl_pcie_synchronize_irqs(trans);
mutex_lock(&trans_pcie->mutex);
@@ -1248,11 +1263,10 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
_iwl_trans_pcie_stop_device(trans, true);
}
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
+ bool reset)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+ if (!reset) {
/* Enable persistence mode to avoid reset */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
@@ -1269,14 +1283,14 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
iwl_pcie_disable_ict(trans);
- synchronize_irq(trans_pcie->pci_dev->irq);
+ iwl_pcie_synchronize_irqs(trans);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
- if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
+ if (reset) {
/*
* reset TX queues -- some of their registers reset during S3
* so if we don't reset everything here the D3 image would try
@@ -1290,7 +1304,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
enum iwl_d3_status *status,
- bool test)
+ bool test, bool reset)
{
u32 val;
int ret;
@@ -1325,7 +1339,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_set_pwr(trans, false);
- if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+ if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
@@ -1348,6 +1362,153 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return 0;
}
+struct iwl_causes_list {
+ u32 cause_num;
+ u32 mask_reg;
+ u8 addr;
+};
+
+static struct iwl_causes_list causes_list[] = {
+ {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
+ {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
+ {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
+ {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
+ {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
+ {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
+ {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
+ {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
+ {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+ {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+ {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+ {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+ {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
+static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+{
+ u32 val, max_rx_vector, i;
+ struct iwl_trans *trans = trans_pcie->trans;
+
+ max_rx_vector = trans_pcie->allocated_vector - 1;
+
+ if (!trans_pcie->msix_enabled)
+ return;
+
+ iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+
+ /*
+ * Each cause from the list above and the RX causes is represented as
+ * a byte in the IVAR table. We access the first (N - 1) bytes and map
+ * them to the (N - 1) vectors so these vectors will be used as rx
+ * vectors. Then access all non rx causes and map them to the
+ * default queue (N'th queue).
+ */
+ for (i = 0; i < max_rx_vector; i++) {
+ iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
+ iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
+ BIT(MSIX_FH_INT_CAUSES_Q(i)));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+ val = trans_pcie->default_irq_num |
+ MSIX_NON_AUTO_CLEAR_CAUSE;
+ iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+ iwl_clear_bit(trans, causes_list[i].mask_reg,
+ causes_list[i].cause_num);
+ }
+ trans_pcie->fh_init_mask =
+ ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
+ trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+ trans_pcie->hw_init_mask =
+ ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
+ trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+}
+
+static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
+ struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 pci_cmd;
+ int max_vector;
+ int ret, i;
+
+ if (trans->cfg->mq_rx_supported) {
+ max_vector = min_t(u32, (num_possible_cpus() + 1),
+ IWL_MAX_RX_HW_QUEUES);
+ for (i = 0; i < max_vector; i++)
+ trans_pcie->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+ MSIX_MIN_INTERRUPT_VECTORS,
+ max_vector);
+ if (ret > 1) {
+ IWL_DEBUG_INFO(trans,
+ "Enable MSI-X allocate %d interrupt vector\n",
+ ret);
+ trans_pcie->allocated_vector = ret;
+ trans_pcie->default_irq_num =
+ trans_pcie->allocated_vector - 1;
+ trans_pcie->trans->num_rx_queues =
+ trans_pcie->allocated_vector - 1;
+ trans_pcie->msix_enabled = true;
+
+ return;
+ }
+ IWL_DEBUG_INFO(trans,
+ "ret = %d %s move to msi mode\n", ret,
+ (ret == 1) ?
+ "can't allocate more than 1 interrupt vector" :
+ "failed to enable msi-x mode");
+ pci_disable_msix(pdev);
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+ }
+ }
+}
+
+static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
+ struct iwl_trans_pcie *trans_pcie)
+{
+ int i, last_vector;
+
+ last_vector = trans_pcie->trans->num_rx_queues;
+
+ for (i = 0; i < trans_pcie->allocated_vector; i++) {
+ int ret;
+
+ ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
+ iwl_pcie_msix_isr,
+ (i == last_vector) ?
+ iwl_pcie_irq_msix_handler :
+ iwl_pcie_irq_rx_msix_handler,
+ IRQF_SHARED,
+ DRV_NAME,
+ &trans_pcie->msix_entries[i]);
+ if (ret) {
+ int j;
+
+ IWL_ERR(trans_pcie->trans,
+ "Error allocating IRQ %d\n", i);
+ for (j = 0; j < i; j++)
+ free_irq(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->msix_entries[i]);
+ pci_disable_msix(pdev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1369,6 +1530,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
iwl_pcie_apm_init(trans);
+ iwl_pcie_init_msix(trans_pcie);
/* From now on, the op_mode will be kept updated about RF kill state */
iwl_enable_rfkill_int(trans);
@@ -1383,6 +1545,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* ... rfkill can call stop_device and set it false if needed */
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ /* Make sure we sync here, because we'll need full access later */
+ if (low_power)
+ pm_runtime_resume(trans->dev);
+
return 0;
}
@@ -1419,7 +1585,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
mutex_unlock(&trans_pcie->mutex);
- synchronize_irq(trans_pcie->pci_dev->irq);
+ iwl_pcie_synchronize_irqs(trans);
}
static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1452,12 +1618,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
-{
- WARN_ON(1);
- return 0;
-}
-
static void iwl_trans_pcie_configure(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg)
{
@@ -1486,19 +1646,13 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
- /* init ref_count to 1 (should be cleared when ucode is loaded) */
- trans_pcie->ref_count = 1;
-
/* Initialize NAPI here - it should be before registering to mac80211
* in the opmode but after the HW struct is allocated.
* As this function may be called again in some corner cases don't
* do anything if NAPI was already initialized.
*/
- if (!trans_pcie->napi.poll) {
+ if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
init_dummy_netdev(&trans_pcie->napi_dev);
- netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
- iwl_pcie_dummy_napi_poll, 64);
- }
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1506,22 +1660,29 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
- synchronize_irq(trans_pcie->pci_dev->irq);
+ iwl_pcie_synchronize_irqs(trans);
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
- free_irq(trans_pcie->pci_dev->irq, trans);
- iwl_pcie_free_ict(trans);
+ if (trans_pcie->msix_enabled) {
+ for (i = 0; i < trans_pcie->allocated_vector; i++)
+ free_irq(trans_pcie->msix_entries[i].vector,
+ &trans_pcie->msix_entries[i]);
- pci_disable_msi(trans_pcie->pci_dev);
+ pci_disable_msix(trans_pcie->pci_dev);
+ trans_pcie->msix_enabled = false;
+ } else {
+ free_irq(trans_pcie->pci_dev->irq, trans);
+
+ iwl_pcie_free_ict(trans);
+
+ pci_disable_msi(trans_pcie->pci_dev);
+ }
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
- if (trans_pcie->napi.poll)
- netif_napi_del(&trans_pcie->napi);
-
iwl_pcie_free_fw_monitor(trans);
for_each_possible_cpu(i) {
@@ -1861,6 +2022,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans)
spin_lock_irqsave(&trans_pcie->ref_lock, flags);
IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
trans_pcie->ref_count++;
+ pm_runtime_get(&trans_pcie->pci_dev->dev);
spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
}
@@ -1879,6 +2041,10 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans)
return;
}
trans_pcie->ref_count--;
+
+ pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
+ pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
+
spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
}
@@ -2031,29 +2197,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
- rxq->write_actual);
- pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
- rxq->need_update);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
+ char *buf;
+ int pos = 0, i, ret;
+ size_t bufsz = sizeof(buf);
+
+ bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+
+ if (!trans_pcie->rxq)
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+ struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+ pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+ i);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+ rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
+ rxq->write);
+ pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
+ rxq->write_actual);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
+ rxq->need_update);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tclosed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->closed_rb_num) &
+ 0x0FFF);
+ } else {
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tclosed_rb_num: Not Allocated\n");
+ }
}
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
}
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
@@ -2218,7 +2403,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
- struct iwl_rxq *rxq = &trans_pcie->rxq;
+ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0;
spin_lock(&rxq->lock);
@@ -2413,7 +2599,8 @@ static struct iwl_trans_dump_data
u32 len, num_rbs;
u32 monitor_len;
int i, ptr;
- bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
+ bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+ !trans->cfg->mq_rx_supported;
/* transport dump header */
len = sizeof(*dump_data);
@@ -2468,11 +2655,12 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
if (dump_rbs) {
+ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+ struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */
- num_rbs = le16_to_cpu(ACCESS_ONCE(
- trans_pcie->rxq.rb_stts->closed_rb_num))
+ num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
& 0x0FFF;
- num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+ num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) +
(PAGE_SIZE << trans_pcie->rx_page_order));
@@ -2523,6 +2711,22 @@ static struct iwl_trans_dump_data
return dump_data;
}
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ return iwl_pci_fw_enter_d0i3(trans);
+
+ return 0;
+}
+
+static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+ if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+ iwl_pci_fw_exit_d0i3(trans);
+}
+#endif /* CONFIG_PM_SLEEP */
+
static const struct iwl_trans_ops trans_ops_pcie = {
.start_hw = iwl_trans_pcie_start_hw,
.op_mode_leave = iwl_trans_pcie_op_mode_leave,
@@ -2533,6 +2737,11 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.d3_suspend = iwl_trans_pcie_d3_suspend,
.d3_resume = iwl_trans_pcie_d3_resume,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwl_trans_pcie_suspend,
+ .resume = iwl_trans_pcie_resume,
+#endif /* CONFIG_PM_SLEEP */
+
.send_cmd = iwl_trans_pcie_send_hcmd,
.tx = iwl_trans_pcie_tx,
@@ -2570,8 +2779,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- u16 pci_cmd;
- int ret;
+ int ret, addr_size;
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
&pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2609,11 +2817,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM);
}
+ if (cfg->mq_rx_supported)
+ addr_size = 64;
+ else
+ addr_size = 36;
+
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
if (!ret)
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ ret = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(addr_size));
if (ret) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!ret)
@@ -2647,17 +2861,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
- ret = pci_enable_msi(pdev);
- if (ret) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
- }
- }
-
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
/*
* In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
@@ -2709,6 +2912,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
+ iwl_pcie_set_interrupt_capa(pdev, trans);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
"PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
@@ -2716,19 +2920,31 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans_pcie->wait_command_queue);
- ret = iwl_pcie_alloc_ict(trans);
- if (ret)
- goto out_pci_disable_msi;
+ init_waitqueue_head(&trans_pcie->d0i3_waitq);
- ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans);
- if (ret) {
- IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
- goto out_free_ict;
- }
+ if (trans_pcie->msix_enabled) {
+ if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
+ goto out_pci_release_regions;
+ } else {
+ ret = iwl_pcie_alloc_ict(trans);
+ if (ret)
+ goto out_pci_disable_msi;
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (ret) {
+ IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+ goto out_free_ict;
+ }
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+ }
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+ trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+#else
+ trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
return trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 5262028b5505..16ad820ca824 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,7 +1,8 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -33,7 +34,6 @@
#include <linux/sched.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
-#include <net/ip6_checksum.h>
#include "iwl-debug.h"
#include "iwl-csr.h"
@@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return ret;
spin_lock_init(&txq->lock);
+ __skb_queue_head_init(&txq->overflow_q);
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
@@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
}
txq->active = false;
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
spin_unlock_bh(&txq->lock);
/* just in case - this queue may have been stopped */
@@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark)
- iwl_wake_queue(trans, txq);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ test_bit(txq_id, trans_pcie->queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
+
+ __skb_queue_head_init(&overflow_skbs);
+ skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
+
+ /*
+ * This is tricky: we are in reclaim path which is non
+ * re-entrant, so noone will try to take the access the
+ * txq data from that path. We stopped tx, so we can't
+ * have tx as well. Bottom line, we can unlock and re-lock
+ * later.
+ */
+ spin_unlock_bh(&txq->lock);
+
+ while (!skb_queue_empty(&overflow_skbs)) {
+ struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
+ struct iwl_device_cmd *dev_cmd =
+ info->driver_data[dev_cmd_idx];
+
+ /*
+ * Note that we can very well be overflowing again.
+ * In that case, iwl_queue_space will be small again
+ * and we won't wake mac80211's queue.
+ */
+ iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
+ }
+ spin_lock_bh(&txq->lock);
+
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+ iwl_wake_queue(trans, txq);
+ }
if (q->read_ptr == q->write_ptr) {
IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
@@ -1686,6 +1727,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
wake_up(&trans_pcie->wait_command_queue);
}
+ if (meta->flags & CMD_MAKE_TRANS_IDLE) {
+ IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
+ iwl_get_cmd_string(trans, cmd->hdr.cmd));
+ set_bit(STATUS_TRANS_IDLE, &trans->status);
+ wake_up(&trans_pcie->d0i3_waitq);
+ }
+
+ if (meta->flags & CMD_WAKE_UP_TRANS) {
+ IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
+ iwl_get_cmd_string(trans, cmd->hdr.cmd));
+ clear_bit(STATUS_TRANS_IDLE, &trans->status);
+ wake_up(&trans_pcie->d0i3_waitq);
+ }
+
meta->flags = 0;
spin_unlock_bh(&txq->lock);
@@ -2161,6 +2216,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
csum = skb_checksum(skb, offs, skb->len - offs, 0);
*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (skb_is_nonlinear(skb) &&
@@ -2177,6 +2234,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
+ if (iwl_queue_space(q) < q->high_mark) {
+ iwl_stop_queue(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_queue_space(q) < 3)) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
+ dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
/* In AGG mode, the index in the ring must correspond to the WiFi
* sequence number. This is a HW requirements to help the SCD to parse
* the BA.
@@ -2281,12 +2354,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually.
*/
- if (iwl_queue_space(q) < q->high_mark) {
- if (wait_write_ptr)
- iwl_pcie_txq_inc_wr_ptr(trans, txq);
- else
- iwl_stop_queue(trans, txq);
- }
spin_unlock(&txq->lock);
return 0;
out_err:
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 6df3ee561d52..515aa3f993f3 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
+
+ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
+ goto unlock;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (!res && exact_len && rlen != len) {
+ if (exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, buf, len);
+ res = hfa384x_from_bap(dev, BAP0, buf, len);
+unlock:
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index fce4a843e656..bc7397d709d3 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -6,7 +6,7 @@
#include <linux/string.h>
#include <linux/if_ether.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
#include "orinoco.h"
#include "mic.h"
@@ -16,7 +16,8 @@
/********************************************************************/
int orinoco_mic_init(struct orinoco_private *priv)
{
- priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+ priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
"crypto API michael_mic\n");
@@ -24,7 +25,8 @@ int orinoco_mic_init(struct orinoco_private *priv)
return -ENOMEM;
}
- priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+ priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
"crypto API michael_mic\n");
@@ -38,18 +40,19 @@ int orinoco_mic_init(struct orinoco_private *priv)
void orinoco_mic_free(struct orinoco_private *priv)
{
if (priv->tx_tfm_mic)
- crypto_free_hash(priv->tx_tfm_mic);
+ crypto_free_ahash(priv->tx_tfm_mic);
if (priv->rx_tfm_mic)
- crypto_free_hash(priv->rx_tfm_mic);
+ crypto_free_ahash(priv->rx_tfm_mic);
}
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm_michael);
struct scatterlist sg[2];
u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
+ int err;
if (tfm_michael == NULL) {
printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
@@ -69,11 +72,13 @@ int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
sg_set_buf(&sg[0], hdr, sizeof(hdr));
sg_set_buf(&sg[1], data, data_len);
- if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
+ if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
return -1;
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
- mic);
+ ahash_request_set_tfm(req, tfm_michael);
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
+ err = crypto_ahash_digest(req);
+ ahash_request_zero(req);
+ return err;
}
diff --git a/drivers/net/wireless/intersil/orinoco/mic.h b/drivers/net/wireless/intersil/orinoco/mic.h
index 04d05bc566d6..ce731d05cc98 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.h
+++ b/drivers/net/wireless/intersil/orinoco/mic.h
@@ -11,11 +11,11 @@
/* Forward declarations */
struct orinoco_private;
-struct crypto_hash;
+struct crypto_ahash;
int orinoco_mic_init(struct orinoco_private *priv);
void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
u8 *da, u8 *sa, u8 priority,
u8 *data, size_t data_len, u8 *mic);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco.h b/drivers/net/wireless/intersil/orinoco/orinoco.h
index eebd2be21ee9..2f0c84b1c440 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco.h
+++ b/drivers/net/wireless/intersil/orinoco/orinoco.h
@@ -152,8 +152,8 @@ struct orinoco_private {
u8 *wpa_ie;
int wpa_ie_len;
- struct crypto_hash *rx_tfm_mic;
- struct crypto_hash *tx_tfm_mic;
+ struct crypto_ahash *rx_tfm_mic;
+ struct crypto_ahash *tx_tfm_mic;
unsigned int wpa_enabled:1;
unsigned int tkip_cm_active:1;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a28414c50edf..e85e0737771c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -844,7 +844,7 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
hdr->rt_chbitmask = cpu_to_le16(flags);
skb->dev = hwsim_mon;
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -887,7 +887,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
memcpy(hdr11->addr1, addr, ETH_ALEN);
skb->dev = hwsim_mon;
- skb_set_mac_header(skb, 0);
+ skb_reset_mac_header(skb);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
@@ -1334,10 +1334,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
data->tx_bytes += skb->len;
ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
- if (ack && skb->len >= 16) {
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ if (ack && skb->len >= 16)
mac80211_hwsim_monitor_ack(channel, hdr->addr2);
- }
ieee80211_tx_info_clear_status(txi);
@@ -1846,10 +1844,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+
switch (action) {
case IEEE80211_AMPDU_TX_START:
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 86955c416b30..2eea76a340b7 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -2039,6 +2039,43 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
+int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+
+ if (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
+ if (!enabled)
+ return 0;
+ else
+ return -EINVAL;
+ }
+ /* firmware does not work well with too long latency with power saving
+ * enabled, so do not enable it if there is only polling, no
+ * interrupts (like in some sdio hosts which can only
+ * poll for sdio irqs)
+ */
+ if (priv->is_polling) {
+ if (!enabled)
+ return 0;
+ else
+ return -EINVAL;
+ }
+ if (!enabled) {
+ priv->psmode = LBS802_11POWERMODECAM;
+ if (priv->psstate != PS_STATE_FULL_POWER)
+ lbs_set_ps_mode(priv,
+ PS_MODE_ACTION_EXIT_PS,
+ true);
+ return 0;
+ }
+ if (priv->psmode != LBS802_11POWERMODECAM)
+ return 0;
+ priv->psmode = LBS802_11POWERMODEMAX_PSP;
+ if (priv->connect_status == LBS_CONNECTED)
+ lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true);
+ return 0;
+}
/*
* Initialization
@@ -2057,6 +2094,7 @@ static struct cfg80211_ops lbs_cfg80211_ops = {
.change_virtual_intf = lbs_change_intf,
.join_ibss = lbs_join_ibss,
.leave_ibss = lbs_leave_ibss,
+ .set_power_mgmt = lbs_set_power_mgmt,
};
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 0387a5b380c8..4ddd0e5a6b85 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -957,7 +957,7 @@ static void lbs_queue_cmd(struct lbs_private *priv,
/* Exit_PS command needs to be queued in the header always. */
if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) {
- struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf;
+ struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf;
if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
if (priv->psstate != PS_STATE_FULL_POWER)
@@ -1387,7 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
* PS command. Ignore it if it is not Exit_PS.
* otherwise send it down immediately.
*/
- struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1];
+ struct cmd_ds_802_11_ps_mode *psm = (void *)cmd;
lbs_deb_host(
"EXEC_NEXT_CMD: PS cmd, action 0x%02x\n",
@@ -1428,40 +1428,14 @@ int lbs_execute_next_command(struct lbs_private *priv)
* check if in power save mode, if yes, put the device back
* to PS mode
*/
-#ifdef TODO
- /*
- * This was the old code for libertas+wext. Someone that
- * understands this beast should re-code it in a sane way.
- *
- * I actually don't understand why this is related to WPA
- * and to connection status, shouldn't powering should be
- * independ of such things?
- */
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
- ((priv->connect_status == LBS_CONNECTED) ||
- lbs_mesh_connected(priv))) {
- if (priv->secinfo.WPAenabled ||
- priv->secinfo.WPA2enabled) {
- /* check for valid WPA group keys */
- if (priv->wpa_mcast_key.len ||
- priv->wpa_unicast_key.len) {
- lbs_deb_host(
- "EXEC_NEXT_CMD: WPA enabled and GTK_SET"
- " go back to PS_SLEEP");
- lbs_set_ps_mode(priv,
- PS_MODE_ACTION_ENTER_PS,
- false);
- }
- } else {
- lbs_deb_host(
- "EXEC_NEXT_CMD: cmdpendingq empty, "
- "go back to PS_SLEEP");
- lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
- false);
- }
+ (priv->connect_status == LBS_CONNECTED)) {
+ lbs_deb_host(
+ "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP");
+ lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
+ false);
}
-#endif
}
ret = 0;
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index e5442e8956f7..c95bf6dc9522 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -123,7 +123,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
priv->cmd_timed_out = 0;
if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
- struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1];
+ /* struct cmd_ds_802_11_ps_mode also contains
+ * the header
+ */
+ struct cmd_ds_802_11_ps_mode *psmode = (void *)resp;
u16 action = le16_to_cpu(psmode->action);
lbs_deb_host(
@@ -254,6 +257,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
"EVENT: in FULL POWER mode, ignoring PS_SLEEP\n");
break;
}
+ if (!list_empty(&priv->cmdpendingq)) {
+ lbs_deb_cmd("EVENT: commands in queue, do not sleep\n");
+ break;
+ }
priv->psstate = PS_STATE_PRE_SLEEP;
lbs_ps_confirm_sleep(priv);
diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h
index 6bd1608992b0..edf710bc5e77 100644
--- a/drivers/net/wireless/marvell/libertas/dev.h
+++ b/drivers/net/wireless/marvell/libertas/dev.h
@@ -99,6 +99,7 @@ struct lbs_private {
/* Hardware access */
void *card;
bool iface_running;
+ u8 is_polling; /* host has to poll the card irq */
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 68fd3a9779bd..13eae9ff8c35 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1267,7 +1267,7 @@ static int if_sdio_probe(struct sdio_func *func,
priv->reset_card = if_sdio_reset_card;
priv->power_save = if_sdio_power_save;
priv->power_restore = if_sdio_power_restore;
-
+ priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ);
ret = if_sdio_power_on(card);
if (ret)
goto err_activate_card;
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index dff08a2896a3..aba0c9995b14 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -267,6 +267,7 @@ static int if_usb_probe(struct usb_interface *intf,
priv->enter_deep_sleep = NULL;
priv->exit_deep_sleep = NULL;
priv->reset_deep_sleep_wakeup = NULL;
+ priv->is_polling = false;
#ifdef CONFIG_OLPC
if (machine_is_olpc())
priv->reset_card = if_usb_reset_olpc_card;
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 8079560f4965..8541cbed786d 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -1060,7 +1060,12 @@ void lbs_remove_card(struct lbs_private *priv)
if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
priv->psmode = LBS802_11POWERMODECAM;
- lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
+ /* no need to wakeup if already woken up,
+ * on suspend, this exit ps command is not processed
+ * the driver hangs
+ */
+ if (priv->psstate != PS_STATE_FULL_POWER)
+ lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
}
if (priv->is_deep_sleep) {
@@ -1113,7 +1118,8 @@ int lbs_start_card(struct lbs_private *priv)
else
pr_info("%s: mesh disabled\n", dev->name);
- if (lbs_cfg_register(priv)) {
+ ret = lbs_cfg_register(priv);
+ if (ret) {
pr_err("cannot register device\n");
goto done;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 71a1b580796f..81c60d0a1bda 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -123,8 +123,7 @@ void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer,
void mwifiex_dfs_cac_work_queue(struct work_struct *work)
{
struct cfg80211_chan_def chandef;
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
struct mwifiex_private *priv =
container_of(delayed_work, struct mwifiex_private,
dfs_cac_work);
@@ -289,8 +288,7 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
{
struct mwifiex_uap_bss_param *bss_cfg;
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
struct mwifiex_private *priv =
container_of(delayed_work, struct mwifiex_private,
dfs_chan_sw_work);
diff --git a/drivers/net/wireless/marvell/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README
index 2f0f9b5609d0..24e649b1eb24 100644
--- a/drivers/net/wireless/marvell/mwifiex/README
+++ b/drivers/net/wireless/marvell/mwifiex/README
@@ -237,4 +237,14 @@ device_dump
cat fw_dump
+verext
+ This command is used to get extended firmware version string using
+ different configuration parameters.
+
+ Usage:
+ echo "[version_str_sel]" > verext
+ cat verext
+
+ [version_str_sel]: firmware support several extend version
+ string cases, include 0/1/10/20/21/99
===============================================================================
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index e7adef72c05f..bb7235e1b9d1 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -20,6 +20,7 @@
#include "cfg80211.h"
#include "main.h"
#include "11n.h"
+#include "wmm.h"
static char *reg_alpha2;
module_param(reg_alpha2, charp, 0);
@@ -676,7 +677,7 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
}
break;
- case MWIFIEX_BSS_ROLE_STA:
+ case MWIFIEX_BSS_ROLE_STA:
if (priv->media_connected) {
mwifiex_dbg(adapter, ERROR,
"cannot change wiphy params when connected");
@@ -1962,6 +1963,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
@@ -2217,6 +2221,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
"info: Trying to associate to %s and bssid %pM\n",
(char *)sme->ssid, sme->bssid);
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
if (!ret) {
@@ -2420,6 +2427,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
+ if (!mwifiex_stop_bg_scan(priv))
+ cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
if (!user_scan_cfg)
return -ENOMEM;
@@ -2487,6 +2497,125 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
return 0;
}
+/* CFG802.11 operation handler for sched_scan_start.
+ *
+ * This function issues a bgscan config request to the firmware based upon
+ * the user specified sched_scan configuration. On successful completion,
+ * firmware will generate BGSCAN_REPORT event, driver should issue bgscan
+ * query command to get sched_scan results from firmware.
+ */
+static int
+mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int i, offset;
+ struct ieee80211_channel *chan;
+ struct mwifiex_bg_scan_cfg *bgscan_cfg;
+ struct ieee_types_header *ie;
+
+ if (!request || (!request->n_ssids && !request->n_match_sets)) {
+ wiphy_err(wiphy, "%s : Invalid Sched_scan parameters",
+ __func__);
+ return -EINVAL;
+ }
+
+ wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ",
+ request->n_ssids, request->n_match_sets);
+ wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n",
+ request->n_channels, request->scan_plans->interval,
+ (int)request->ie_len);
+
+ bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+ if (!bgscan_cfg)
+ return -ENOMEM;
+
+ if (priv->scan_request || priv->scan_aborting)
+ bgscan_cfg->start_later = true;
+
+ bgscan_cfg->num_ssids = request->n_match_sets;
+ bgscan_cfg->ssid_list = request->match_sets;
+
+ if (request->ie && request->ie_len) {
+ offset = 0;
+ for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
+ if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
+ continue;
+ priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN;
+ ie = (struct ieee_types_header *)(request->ie + offset);
+ memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+ offset += sizeof(*ie) + ie->len;
+
+ if (offset >= request->ie_len)
+ break;
+ }
+ }
+
+ for (i = 0; i < min_t(u32, request->n_channels,
+ MWIFIEX_BG_SCAN_CHAN_MAX); i++) {
+ chan = request->channels[i];
+ bgscan_cfg->chan_list[i].chan_number = chan->hw_value;
+ bgscan_cfg->chan_list[i].radio_type = chan->band;
+
+ if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids)
+ bgscan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_PASSIVE;
+ else
+ bgscan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+
+ bgscan_cfg->chan_list[i].scan_time = 0;
+ }
+
+ bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels,
+ MWIFIEX_BG_SCAN_CHAN_MAX);
+
+ /* Use at least 15 second for per scan cycle */
+ bgscan_cfg->scan_interval = (request->scan_plans->interval >
+ MWIFIEX_BGSCAN_INTERVAL) ?
+ request->scan_plans->interval :
+ MWIFIEX_BGSCAN_INTERVAL;
+
+ bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT;
+ bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH |
+ MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE;
+ bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+ bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+ bgscan_cfg->enable = true;
+ if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+ bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH;
+ bgscan_cfg->rssi_threshold = request->min_rssi_thold;
+ }
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+ HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+ kfree(bgscan_cfg);
+ return -EFAULT;
+ }
+
+ priv->sched_scanning = true;
+
+ kfree(bgscan_cfg);
+ return 0;
+}
+
+/* CFG802.11 operation handler for sched_scan_stop.
+ *
+ * This function issues a bgscan config command to disable
+ * previous bgscan configuration in the firmware
+ */
+static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ wiphy_info(wiphy, "sched scan stop!");
+ mwifiex_stop_bg_scan(priv);
+
+ return 0;
+}
+
static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
struct mwifiex_private *priv)
{
@@ -2848,6 +2977,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
mwifiex_dev_debugfs_remove(priv);
#endif
+ if (priv->sched_scanning)
+ priv->sched_scanning = false;
+
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
@@ -3044,10 +3176,12 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
sizeof(byte_seq));
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
- if (first_pat)
+ if (first_pat) {
first_pat = false;
- else
+ mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n");
+ } else {
mef_entry->filter[filt_num].filt_action = TYPE_AND;
+ }
filt_num++;
}
@@ -3073,6 +3207,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
mef_entry->filter[filt_num].offset = 56;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
mef_entry->filter[filt_num].filt_action = TYPE_OR;
+ mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n");
}
return ret;
}
@@ -3125,7 +3260,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_ds_hs_cfg hs_cfg;
- int i, ret = 0;
+ int i, ret = 0, retry_num = 10;
struct mwifiex_private *priv;
for (i = 0; i < adapter->priv_num; i++) {
@@ -3135,6 +3270,21 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mwifiex_cancel_all_pending_cmd(adapter);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && priv->netdev)
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ }
+
+ for (i = 0; i < retry_num; i++) {
+ if (!mwifiex_wmm_lists_empty(adapter) ||
+ !mwifiex_bypass_txlist_empty(adapter) ||
+ !skb_queue_empty(&adapter->tx_data_q))
+ usleep_range(10000, 15000);
+ else
+ break;
+ }
+
if (!wowlan) {
mwifiex_dbg(adapter, ERROR,
"None of the WOWLAN triggers enabled\n");
@@ -3143,7 +3293,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
- if (!priv->media_connected) {
+ if (!priv->media_connected && !wowlan->nd_config) {
mwifiex_dbg(adapter, ERROR,
"Can not configure WOWLAN in disconnected state\n");
return 0;
@@ -3155,19 +3305,30 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
return ret;
}
+ memset(&hs_cfg, 0, sizeof(hs_cfg));
+ hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions);
+
+ if (wowlan->nd_config) {
+ mwifiex_dbg(adapter, INFO, "Wake on net detect\n");
+ hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+ mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev,
+ wowlan->nd_config);
+ }
+
if (wowlan->disconnect) {
- memset(&hs_cfg, 0, sizeof(hs_cfg));
- hs_cfg.is_invoke_hostcmd = false;
- hs_cfg.conditions = HS_CFG_COND_MAC_EVENT;
- hs_cfg.gpio = adapter->hs_cfg.gpio;
- hs_cfg.gap = adapter->hs_cfg.gap;
- ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
- MWIFIEX_SYNC_CMD, &hs_cfg);
- if (ret) {
- mwifiex_dbg(adapter, ERROR,
- "Failed to set HS params\n");
- return ret;
- }
+ hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+ mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n");
+ }
+
+ hs_cfg.is_invoke_hostcmd = false;
+ hs_cfg.gpio = adapter->hs_cfg.gpio;
+ hs_cfg.gap = adapter->hs_cfg.gap;
+ ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+ MWIFIEX_SYNC_CMD, &hs_cfg);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR,
+ "Failed to set HS params\n");
+ return ret;
}
return ret;
@@ -3175,6 +3336,70 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv;
+ struct mwifiex_ds_wakeup_reason wakeup_reason;
+ struct cfg80211_wowlan_wakeup wakeup_report;
+ int i;
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && priv->netdev)
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+ }
+
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+ mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
+ &wakeup_reason);
+ memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup));
+
+ wakeup_report.pattern_idx = -1;
+
+ switch (wakeup_reason.hs_wakeup_reason) {
+ case NO_HSWAKEUP_REASON:
+ break;
+ case BCAST_DATA_MATCHED:
+ break;
+ case MCAST_DATA_MATCHED:
+ break;
+ case UCAST_DATA_MATCHED:
+ break;
+ case MASKTABLE_EVENT_MATCHED:
+ break;
+ case NON_MASKABLE_EVENT_MATCHED:
+ if (wiphy->wowlan_config->disconnect)
+ wakeup_report.disconnect = true;
+ if (wiphy->wowlan_config->nd_config)
+ wakeup_report.net_detect = adapter->nd_info;
+ break;
+ case NON_MASKABLE_CONDITION_MATCHED:
+ break;
+ case MAGIC_PATTERN_MATCHED:
+ if (wiphy->wowlan_config->magic_pkt)
+ wakeup_report.magic_pkt = true;
+ if (wiphy->wowlan_config->n_patterns)
+ wakeup_report.pattern_idx = 1;
+ break;
+ case CONTROL_FRAME_MATCHED:
+ break;
+ case MANAGEMENT_FRAME_MATCHED:
+ break;
+ default:
+ break;
+ }
+
+ if ((wakeup_reason.hs_wakeup_reason > 0) &&
+ (wakeup_reason.hs_wakeup_reason <= 7))
+ cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
+ GFP_KERNEL);
+
+ if (adapter->nd_info) {
+ for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+ kfree(adapter->nd_info->matches[i]);
+ kfree(adapter->nd_info);
+ adapter->nd_info = NULL;
+ }
+
return 0;
}
@@ -3590,8 +3815,8 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
freq = ieee80211_channel_to_frequency(curr_bss->channel, band);
chan = ieee80211_get_channel(wiphy, freq);
- if (curr_bss->bcn_ht_oper) {
- second_chan_offset = curr_bss->bcn_ht_oper->ht_param &
+ if (priv->ht_param_present) {
+ second_chan_offset = priv->assoc_resp_ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
chan_type = mwifiex_sec_chan_offset_to_chan_type
(second_chan_offset);
@@ -3701,6 +3926,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
.set_antenna = mwifiex_cfg80211_set_antenna,
.del_station = mwifiex_cfg80211_del_station,
+ .sched_scan_start = mwifiex_cfg80211_sched_scan_start,
+ .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop,
#ifdef CONFIG_PM
.suspend = mwifiex_cfg80211_suspend,
.resume = mwifiex_cfg80211_resume,
@@ -3720,11 +3947,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
- .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_NET_DETECT,
.n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+ .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS,
};
#endif
@@ -3829,6 +4058,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -3847,6 +4077,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
+ wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+ wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH;
+
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index cb25aa7e90db..a12adee776c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1657,3 +1657,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
return 0;
}
+
+/* This function handles the command response of hs wakeup reason
+ * command.
+ */
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp,
+ struct host_cmd_ds_wakeup_reason *wakeup_reason)
+{
+ wakeup_reason->wakeup_reason =
+ resp->params.hs_wakeup_reason.wakeup_reason;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 0b9c580af988..bccf17ad588e 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -95,8 +95,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1);
- if (!priv->version_str[0])
- mwifiex_get_ver_ext(priv);
+ mwifiex_get_ver_ext(priv, 0);
p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
p += sprintf(p, "driver_version = %s", fmt);
@@ -583,6 +582,52 @@ done:
return ret;
}
+/* debugfs verext file write handler.
+ * This function is called when the 'verext' file is opened for write
+ */
+static ssize_t
+mwifiex_verext_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int ret;
+ u32 versionstrsel;
+ struct mwifiex_private *priv = (void *)file->private_data;
+ char buf[16];
+
+ memset(buf, 0, sizeof(buf));
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ ret = kstrtou32(buf, 10, &versionstrsel);
+ if (ret)
+ return ret;
+
+ priv->versionstrsel = versionstrsel;
+
+ return count;
+}
+
+/* Proc verext file read handler.
+ * This function is called when the 'verext' file is opened for reading
+ * This function can be used read driver exteneed verion string.
+ */
+static ssize_t
+mwifiex_verext_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mwifiex_private *priv =
+ (struct mwifiex_private *)file->private_data;
+ char buf[256];
+ int ret;
+
+ mwifiex_get_ver_ext(priv, priv->versionstrsel);
+ ret = snprintf(buf, sizeof(buf), "version string: %s\n",
+ priv->version_str);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+}
+
/* Proc memrw file write handler.
* This function is called when the 'memrw' file is opened for writing
* This function can be used to write to a memory location.
@@ -880,14 +925,12 @@ mwifiex_reset_write(struct file *file,
{
struct mwifiex_private *priv = file->private_data;
struct mwifiex_adapter *adapter = priv->adapter;
- char cmd;
bool result;
+ int rc;
- if (copy_from_user(&cmd, ubuf, sizeof(cmd)))
- return -EFAULT;
-
- if (strtobool(&cmd, &result))
- return -EINVAL;
+ rc = kstrtobool_from_user(ubuf, count, &result);
+ if (rc)
+ return rc;
if (!result)
return -EINVAL;
@@ -940,6 +983,7 @@ MWIFIEX_DFS_FILE_OPS(histogram);
MWIFIEX_DFS_FILE_OPS(debug_mask);
MWIFIEX_DFS_FILE_OPS(timeshare_coex);
MWIFIEX_DFS_FILE_WRITE_OPS(reset);
+MWIFIEX_DFS_FILE_OPS(verext);
/*
* This function creates the debug FS directory structure and the files.
@@ -968,6 +1012,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
MWIFIEX_DFS_ADD_FILE(debug_mask);
MWIFIEX_DFS_ADD_FILE(timeshare_coex);
MWIFIEX_DFS_ADD_FILE(reset);
+ MWIFIEX_DFS_ADD_FILE(verext);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h
index d9c15cd36f12..bec300b9c2ea 100644
--- a/drivers/net/wireless/marvell/mwifiex/decl.h
+++ b/drivers/net/wireless/marvell/mwifiex/decl.h
@@ -122,6 +122,8 @@
#define BLOCK_NUMBER_OFFSET 15
#define SDIO_HEADER_OFFSET 28
+#define MWIFIEX_SIZE_4K 0x4000
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -270,4 +272,26 @@ struct mwifiex_11h_intf_state {
bool is_11h_enabled;
bool is_11h_active;
} __packed;
+
+#define MWIFIEX_FW_DUMP_IDX 0xff
+#define MWIFIEX_FW_DUMP_MAX_MEMSIZE 0x160000
+#define MWIFIEX_DRV_INFO_IDX 20
+#define FW_DUMP_MAX_NAME_LEN 8
+#define FW_DUMP_HOST_READY 0xEE
+#define FW_DUMP_DONE 0xFF
+#define FW_DUMP_READ_DONE 0xFE
+
+struct memory_type_mapping {
+ u8 mem_name[FW_DUMP_MAX_NAME_LEN];
+ u8 *mem_ptr;
+ u32 mem_size;
+ u8 done_flag;
+};
+
+enum rdwr_status {
+ RDWR_STATUS_SUCCESS = 0,
+ RDWR_STATUS_FAILURE = 1,
+ RDWR_STATUS_DONE = 2
+};
+
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index ced7af2be29a..c134cf865291 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -96,7 +96,7 @@ enum KEY_TYPE_ID {
#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
#define MAX_POLL_TRIES 100
-#define MAX_FIRMWARE_POLL_TRIES 100
+#define MAX_FIRMWARE_POLL_TRIES 150
#define FIRMWARE_READY_SDIO 0xfedc
#define FIRMWARE_READY_PCIE 0xfedcba00
@@ -144,6 +144,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18)
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
+#define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
@@ -177,6 +178,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176)
#define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183)
#define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184)
#define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194)
@@ -331,6 +333,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D
#define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b
#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e
+#define HostCmd_CMD_802_11_BG_SCAN_CONFIG 0x006b
#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c
#define HostCmd_CMD_WMM_GET_STATUS 0x0071
#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
@@ -370,6 +373,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116
#define HostCmd_CMD_TDLS_CONFIG 0x0100
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
@@ -523,6 +527,7 @@ enum P2P_MODES {
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
+#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
#define EVENT_TX_STATUS_REPORT 0x00000074
@@ -539,6 +544,8 @@ enum P2P_MODES {
#define MWIFIEX_MAX_PATTERN_LEN 40
#define MWIFIEX_MAX_OFFSET_LEN 100
+#define MWIFIEX_MAX_ND_MATCH_SETS 10
+
#define STACK_NBYTES 100
#define TYPE_DNUM 1
#define TYPE_BYTESEQ 2
@@ -601,6 +608,20 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01
#define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20
+enum HS_WAKEUP_REASON {
+ NO_HSWAKEUP_REASON = 0,
+ BCAST_DATA_MATCHED,
+ MCAST_DATA_MATCHED,
+ UCAST_DATA_MATCHED,
+ MASKTABLE_EVENT_MATCHED,
+ NON_MASKABLE_EVENT_MATCHED,
+ NON_MASKABLE_CONDITION_MATCHED,
+ MAGIC_PATTERN_MATCHED,
+ CONTROL_FRAME_MATCHED,
+ MANAGEMENT_FRAME_MATCHED,
+ RESERVED
+};
+
struct txpd {
u8 bss_type;
u8 bss_num;
@@ -733,6 +754,21 @@ struct mwifiex_ie_types_num_probes {
__le16 num_probes;
} __packed;
+struct mwifiex_ie_types_repeat_count {
+ struct mwifiex_ie_types_header header;
+ __le16 repeat_count;
+} __packed;
+
+struct mwifiex_ie_types_min_rssi_threshold {
+ struct mwifiex_ie_types_header header;
+ __le16 rssi_threshold;
+} __packed;
+
+struct mwifiex_ie_types_bgscan_start_later {
+ struct mwifiex_ie_types_header header;
+ __le16 start_later;
+} __packed;
+
struct mwifiex_ie_types_scan_chan_gap {
struct mwifiex_ie_types_header header;
/* time gap in TUs to be used between two consecutive channels scan */
@@ -1027,7 +1063,7 @@ struct ieee_types_assoc_rsp {
__le16 cap_info_bitmap;
__le16 status_code;
__le16 a_id;
- u8 ie_buffer[1];
+ u8 ie_buffer[0];
} __packed;
struct host_cmd_ds_802_11_associate_rsp {
@@ -1425,6 +1461,36 @@ struct mwifiex_user_scan_cfg {
u16 scan_chan_gap;
} __packed;
+#define MWIFIEX_BG_SCAN_CHAN_MAX 38
+#define MWIFIEX_BSS_MODE_INFRA 1
+#define MWIFIEX_BGSCAN_ACT_GET 0x0000
+#define MWIFIEX_BGSCAN_ACT_SET 0x0001
+#define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01
+/** ssid match */
+#define MWIFIEX_BGSCAN_SSID_MATCH 0x0001
+/** ssid match and RSSI exceeded */
+#define MWIFIEX_BGSCAN_SSID_RSSI_MATCH 0x0004
+/**wait for all channel scan to complete to report scan result*/
+#define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE 0x80000000
+
+struct mwifiex_bg_scan_cfg {
+ u16 action;
+ u8 enable;
+ u8 bss_type;
+ u8 chan_per_scan;
+ u32 scan_interval;
+ u32 report_condition;
+ u8 num_probes;
+ u8 rssi_threshold;
+ u8 snr_threshold;
+ u16 repeat_count;
+ u16 start_later;
+ struct cfg80211_match_set *ssid_list;
+ u8 num_ssids;
+ struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX];
+ u16 scan_chan_gap;
+} __packed;
+
struct ie_body {
u8 grp_key_oui[4];
u8 ptk_cnt[2];
@@ -1470,6 +1536,20 @@ struct mwifiex_ie_types_bss_scan_info {
__le64 tsf;
} __packed;
+struct host_cmd_ds_802_11_bg_scan_config {
+ __le16 action;
+ u8 enable;
+ u8 bss_type;
+ u8 chan_per_scan;
+ u8 reserved;
+ __le16 reserved1;
+ __le32 scan_interval;
+ __le32 reserved2;
+ __le32 report_condition;
+ __le16 reserved3;
+ u8 tlv[0];
+} __packed;
+
struct host_cmd_ds_802_11_bg_scan_query {
u8 flush;
} __packed;
@@ -2099,6 +2179,10 @@ struct host_cmd_ds_robust_coex {
__le16 reserved;
} __packed;
+struct host_cmd_ds_wakeup_reason {
+ u16 wakeup_reason;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2124,6 +2208,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_scan scan;
struct host_cmd_ds_802_11_scan_ext ext_scan;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
+ struct host_cmd_ds_802_11_bg_scan_config bg_scan_config;
struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
struct host_cmd_ds_802_11_associate associate;
@@ -2170,6 +2255,7 @@ struct host_cmd_ds_command {
struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
struct host_cmd_ds_multi_chan_policy mc_policy;
struct host_cmd_ds_robust_coex coex;
+ struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 6f7876ec31b7..517653b3adab 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -741,8 +741,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
u32 poll_num = 1;
if (adapter->if_ops.check_fw_status) {
- adapter->winner = 0;
-
/* check if firmware is already running */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
if (!ret) {
@@ -750,13 +748,23 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
"WLAN FW already running! Skip FW dnld\n");
return 0;
}
+ }
+
+ /* check if we are the winner for downloading FW */
+ if (adapter->if_ops.check_winner_status) {
+ adapter->winner = 0;
+ ret = adapter->if_ops.check_winner_status(adapter);
poll_num = MAX_FIRMWARE_POLL_TRIES;
+ if (ret) {
+ mwifiex_dbg(adapter, MSG,
+ "WLAN read winner status failed!\n");
+ return ret;
+ }
- /* check if we are the winner for downloading FW */
if (!adapter->winner) {
mwifiex_dbg(adapter, MSG,
- "FW already running! Skip FW dnld\n");
+ "WLAN is not the winner! Skip FW dnld\n");
goto poll_fw;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
index 4f0174c64946..a5a48c183d37 100644
--- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
+++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
@@ -184,6 +184,7 @@ struct mwifiex_ds_tx_ba_stream_tbl {
};
#define DBG_CMD_NUM 5
+#define MWIFIEX_DBG_SDIO_MP_NUM 10
struct tdls_peer_info {
u8 peer_addr[ETH_ALEN];
@@ -235,6 +236,11 @@ struct mwifiex_debug_info {
u8 cmd_sent;
u8 cmd_resp_received;
u8 event_received;
+ u32 last_mp_wr_bitmap[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_wr_ports[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_wr_len[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_curr_wr_port[MWIFIEX_DBG_SDIO_MP_NUM];
+ u8 last_sdio_mp_index;
};
#define MWIFIEX_KEY_INDEX_UNICAST 0x40000000
@@ -271,6 +277,10 @@ struct mwifiex_ds_hs_cfg {
u32 gap;
};
+struct mwifiex_ds_wakeup_reason {
+ u16 hs_wakeup_reason;
+};
+
#define DEEP_SLEEP_ON 1
#define DEEP_SLEEP_OFF 0
#define DEEP_SLEEP_IDLE_TIME 100
@@ -414,6 +424,7 @@ struct mwifiex_ds_mef_cfg {
#define MWIFIEX_VSIE_MASK_SCAN 0x01
#define MWIFIEX_VSIE_MASK_ASSOC 0x02
#define MWIFIEX_VSIE_MASK_ADHOC 0x04
+#define MWIFIEX_VSIE_MASK_BGSCAN 0x08
enum {
MWIFIEX_FUNC_INIT = 1,
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index cc09a81dbf6a..62211fca91b7 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -644,6 +644,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc;
bool enable_data = true;
u16 cap_info, status_code, aid;
+ const u8 *ie_ptr;
+ struct ieee80211_ht_operation *assoc_resp_ht_oper;
assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
@@ -733,6 +735,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
= ((bss_desc->wmm_ie.qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
+ /* Store the bandwidth information from assoc response */
+ ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer,
+ priv->assoc_rsp_size
+ - sizeof(struct ieee_types_assoc_rsp));
+ if (ie_ptr) {
+ assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr
+ + sizeof(struct ieee_types_header));
+ priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param;
+ priv->ht_param_present = true;
+ } else {
+ priv->ht_param_present = false;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"info: ASSOC_RESP: curr_pkt_filter is %#x\n",
priv->curr_pkt_filter);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 79c16de8743e..3cfa94677a8e 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -132,6 +132,13 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
}
}
+ if (adapter->nd_info) {
+ for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+ kfree(adapter->nd_info->matches[i]);
+ kfree(adapter->nd_info);
+ adapter->nd_info = NULL;
+ }
+
vfree(adapter->chan_stats);
kfree(adapter);
return 0;
@@ -746,6 +753,13 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
mwifiex_queue_main_work(priv->adapter);
+ if (priv->sched_scanning) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "aborting bgscan on ndo_stop\n");
+ mwifiex_stop_bg_scan(priv);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 2f7f478ce04b..aafc4ab4e5ae 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -198,6 +198,11 @@ do { \
buf, len, false); \
} while (0)
+/** Min BGSCAN interval 15 second */
+#define MWIFIEX_BGSCAN_INTERVAL 15000
+/** default repeat count */
+#define MWIFIEX_BGSCAN_REPEAT_COUNT 6
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -218,6 +223,11 @@ struct mwifiex_dbg {
u16 last_cmd_resp_index;
u16 last_event[DBG_CMD_NUM];
u16 last_event_index;
+ u32 last_mp_wr_bitmap[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_wr_ports[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_wr_len[MWIFIEX_DBG_SDIO_MP_NUM];
+ u32 last_mp_curr_wr_port[MWIFIEX_DBG_SDIO_MP_NUM];
+ u8 last_sdio_mp_index;
};
enum MWIFIEX_HARDWARE_STATUS {
@@ -293,6 +303,7 @@ struct mwifiex_tid_tbl {
#define WMM_HIGHEST_PRIORITY 7
#define HIGH_PRIO_TID 7
#define LOW_PRIO_TID 0
+#define MWIFIEX_WMM_DRV_DELAY_MAX 510
struct mwifiex_wmm_desc {
struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
@@ -483,26 +494,6 @@ struct mwifiex_roc_cfg {
struct ieee80211_channel chan;
};
-#define MWIFIEX_FW_DUMP_IDX 0xff
-#define MWIFIEX_DRV_INFO_IDX 20
-#define FW_DUMP_MAX_NAME_LEN 8
-#define FW_DUMP_HOST_READY 0xEE
-#define FW_DUMP_DONE 0xFF
-#define FW_DUMP_READ_DONE 0xFE
-
-struct memory_type_mapping {
- u8 mem_name[FW_DUMP_MAX_NAME_LEN];
- u8 *mem_ptr;
- u32 mem_size;
- u8 done_flag;
-};
-
-enum rdwr_status {
- RDWR_STATUS_SUCCESS = 0,
- RDWR_STATUS_FAILURE = 1,
- RDWR_STATUS_DONE = 2
-};
-
enum mwifiex_iface_work_flags {
MWIFIEX_IFACE_WORK_DEVICE_DUMP,
MWIFIEX_IFACE_WORK_CARD_RESET,
@@ -616,6 +607,7 @@ struct mwifiex_private {
spinlock_t curr_bcn_buf_lock;
struct wireless_dev wdev;
struct mwifiex_chan_freq_power cfp;
+ u32 versionstrsel;
char version_str[128];
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_dev_dir;
@@ -640,6 +632,7 @@ struct mwifiex_private {
u32 mgmt_frame_mask;
struct mwifiex_roc_cfg roc_cfg;
bool scan_aborting;
+ u8 sched_scanning;
u8 csa_chan;
unsigned long csa_expire_time;
u8 del_list_idx;
@@ -667,6 +660,8 @@ struct mwifiex_private {
struct mwifiex_ds_mem_rw mem_rw;
struct sk_buff_head bypass_txq;
struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
+ u8 assoc_resp_ht_param;
+ bool ht_param_present;
};
@@ -791,6 +786,7 @@ struct mwifiex_if_ops {
int (*init_if) (struct mwifiex_adapter *);
void (*cleanup_if) (struct mwifiex_adapter *);
int (*check_fw_status) (struct mwifiex_adapter *, u32);
+ int (*check_winner_status)(struct mwifiex_adapter *);
int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
int (*register_dev) (struct mwifiex_adapter *);
void (*unregister_dev) (struct mwifiex_adapter *);
@@ -994,6 +990,7 @@ struct mwifiex_adapter {
u8 active_scan_triggered;
bool usb_mc_status;
bool usb_mc_setup;
+ struct cfg80211_wowlan_nd_info *nd_info;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1196,6 +1193,10 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp);
int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
void *buf);
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv);
/*
* This function checks if the queuing is RA based or not.
@@ -1417,7 +1418,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
-int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel);
int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
struct ieee80211_channel *chan,
@@ -1586,6 +1587,12 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+ int cmd_type,
+ struct mwifiex_ds_wakeup_reason *wakeup_reason);
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp,
+ struct host_cmd_ds_wakeup_reason *wakeup_reason);
void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 6d0dc40e20e5..de364381fe7b 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -37,17 +37,6 @@ static struct mwifiex_if_ops pcie_ops;
static struct semaphore add_remove_card_sem;
-static struct memory_type_mapping mem_type_mapping_tbl[] = {
- {"ITCM", NULL, 0, 0xF0},
- {"DTCM", NULL, 0, 0xF1},
- {"SQRAM", NULL, 0, 0xF2},
- {"IRAM", NULL, 0, 0xF3},
- {"APU", NULL, 0, 0xF4},
- {"CIU", NULL, 0, 0xF5},
- {"ICU", NULL, 0, 0xF6},
- {"MAC", NULL, 0, 0xF7},
-};
-
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
size_t size, int flags)
@@ -206,6 +195,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size;
card->pcie.can_dump_fw = data->can_dump_fw;
+ card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
+ card->pcie.num_mem_types = data->num_mem_types;
card->pcie.can_ext_scan = data->can_ext_scan;
}
@@ -323,6 +314,8 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
struct pcie_service_card *card = adapter->card;
*data = ioread32(card->pci_mmap1 + reg);
+ if (*data == 0xffffffff)
+ return 0xffffffff;
return 0;
}
@@ -1408,7 +1401,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -2007,14 +2000,12 @@ done:
/*
* This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
*/
static int
mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
{
int ret = 0;
- u32 firmware_stat, winner_status;
+ u32 firmware_stat;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 tries;
@@ -2054,19 +2045,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
}
}
- if (ret) {
- if (mwifiex_read_reg(adapter, reg->fw_status,
- &winner_status))
- ret = -1;
- else if (!winner_status) {
- mwifiex_dbg(adapter, INFO,
- "PCI-E is the winner\n");
- adapter->winner = 1;
- } else {
- mwifiex_dbg(adapter, ERROR,
- "PCI-E is not the winner <%#x,%d>, exit dnld\n",
- ret, adapter->winner);
- }
+ return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int
+mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+ u32 winner = 0;
+ int ret = 0;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) {
+ ret = -1;
+ } else if (!winner) {
+ mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n");
+ adapter->winner = 1;
+ } else {
+ mwifiex_dbg(adapter, ERROR,
+ "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+ ret, adapter->winner);
}
return ret;
@@ -2075,20 +2075,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
/*
* This function reads the interrupt status from card.
*/
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter,
+ int msg_id)
{
u32 pcie_ireg;
unsigned long flags;
+ struct pcie_service_card *card = adapter->card;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
return;
- if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
- mwifiex_dbg(adapter, ERROR, "Read register failed\n");
- return;
- }
+ if (card->msix_enable && msg_id >= 0) {
+ pcie_ireg = BIT(msg_id);
+ } else {
+ if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
+ &pcie_ireg)) {
+ mwifiex_dbg(adapter, ERROR, "Read register failed\n");
+ return;
+ }
+
+ if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg)
+ return;
- if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
mwifiex_pcie_disable_host_int(adapter);
@@ -2099,21 +2107,24 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
"Write register failed\n");
return;
}
- spin_lock_irqsave(&adapter->int_lock, flags);
- adapter->int_status |= pcie_ireg;
- spin_unlock_irqrestore(&adapter->int_lock, flags);
-
- if (!adapter->pps_uapsd_mode &&
- adapter->ps_state == PS_STATE_SLEEP &&
- mwifiex_pcie_ok_to_access_hw(adapter)) {
- /* Potentially for PCIe we could get other
- * interrupts like shared. Don't change power
- * state until cookie is set */
- adapter->ps_state = PS_STATE_AWAKE;
- adapter->pm_wakeup_fw_try = false;
- del_timer(&adapter->wakeup_timer);
- }
}
+
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP &&
+ mwifiex_pcie_ok_to_access_hw(adapter)) {
+ /* Potentially for PCIe we could get other
+ * interrupts like shared. Don't change power
+ * state until cookie is set
+ */
+ adapter->ps_state = PS_STATE_AWAKE;
+ adapter->pm_wakeup_fw_try = false;
+ del_timer(&adapter->wakeup_timer);
+ }
+
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status |= pcie_ireg;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg);
}
/*
@@ -2124,7 +2135,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
*/
static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
{
- struct pci_dev *pdev = (struct pci_dev *)context;
+ struct mwifiex_msix_context *ctx = context;
+ struct pci_dev *pdev = ctx->dev;
struct pcie_service_card *card;
struct mwifiex_adapter *adapter;
@@ -2144,7 +2156,11 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
if (adapter->surprise_removed)
goto exit;
- mwifiex_interrupt_status(adapter);
+ if (card->msix_enable)
+ mwifiex_interrupt_status(adapter, ctx->msg_id);
+ else
+ mwifiex_interrupt_status(adapter, -1);
+
mwifiex_queue_main_work(adapter);
exit:
@@ -2164,7 +2180,7 @@ exit:
* In case of Rx packets received, the packets are uploaded from card to
* host and processed accordingly.
*/
-static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
{
int ret;
u32 pcie_ireg;
@@ -2244,6 +2260,69 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
return 0;
}
+static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter)
+{
+ int ret;
+ u32 pcie_ireg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ /* Clear out unused interrupts */
+ pcie_ireg = adapter->int_status;
+ adapter->int_status = 0;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+ if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+ mwifiex_dbg(adapter, INTR,
+ "info: TX DNLD Done\n");
+ ret = mwifiex_pcie_send_data_complete(adapter);
+ if (ret)
+ return ret;
+ }
+ if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx DATA\n");
+ ret = mwifiex_pcie_process_recv_data(adapter);
+ if (ret)
+ return ret;
+ }
+ if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+ mwifiex_dbg(adapter, INTR,
+ "info: Rx EVENT\n");
+ ret = mwifiex_pcie_process_event_ready(adapter);
+ if (ret)
+ return ret;
+ }
+
+ if (pcie_ireg & HOST_INTR_CMD_DONE) {
+ if (adapter->cmd_sent) {
+ mwifiex_dbg(adapter, INTR,
+ "info: CMD sent Interrupt\n");
+ adapter->cmd_sent = false;
+ }
+ /* Handle command response */
+ ret = mwifiex_pcie_process_cmd_complete(adapter);
+ if (ret)
+ return ret;
+ }
+
+ mwifiex_dbg(adapter, INTR,
+ "info: cmd_sent=%d data_sent=%d\n",
+ adapter->cmd_sent, adapter->data_sent);
+
+ return 0;
+}
+
+static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+
+ if (card->msix_enable)
+ return mwifiex_process_msix_int(adapter);
+ else
+ return mwifiex_process_pcie_int(adapter);
+}
+
/*
* This function downloads data from driver to card.
*
@@ -2278,10 +2357,15 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
{
int ret, tries;
u8 ctrl_data;
+ u32 fw_status;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
+ if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status))
+ return RDWR_STATUS_FAILURE;
+
+ ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
+ reg->fw_dump_host_ready);
if (ret) {
mwifiex_dbg(adapter, ERROR,
"PCIE write err\n");
@@ -2294,11 +2378,11 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
return RDWR_STATUS_SUCCESS;
if (doneflag && ctrl_data == doneflag)
return RDWR_STATUS_DONE;
- if (ctrl_data != FW_DUMP_HOST_READY) {
+ if (ctrl_data != reg->fw_dump_host_ready) {
mwifiex_dbg(adapter, WARN,
"The ctrl reg was changed, re-try again!\n");
ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
- FW_DUMP_HOST_READY);
+ reg->fw_dump_host_ready);
if (ret) {
mwifiex_dbg(adapter, ERROR,
"PCIE write err\n");
@@ -2318,7 +2402,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
unsigned int reg, reg_start, reg_end;
- u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
+ u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num;
+ u8 idx, i, read_reg, doneflag = 0;
enum rdwr_status stat;
u32 memory_size;
int ret;
@@ -2326,8 +2411,9 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
if (!card->pcie.can_dump_fw)
return;
- for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
- struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+ for (idx = 0; idx < adapter->num_mem_types; idx++) {
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
if (entry->mem_ptr) {
vfree(entry->mem_ptr);
@@ -2336,7 +2422,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
entry->mem_size = 0;
}
- mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
/* Read the number of the memories which will dump */
stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
@@ -2344,28 +2430,38 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
return;
reg = creg->fw_dump_start;
- mwifiex_read_reg_byte(adapter, reg, &dump_num);
+ mwifiex_read_reg_byte(adapter, reg, &fw_dump_num);
+
+ /* W8997 chipset firmware dump will be restore in single region*/
+ if (fw_dump_num == 0)
+ dump_num = 1;
+ else
+ dump_num = fw_dump_num;
/* Read the length of every memory which will dump */
for (idx = 0; idx < dump_num; idx++) {
- struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
-
- stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
- if (stat == RDWR_STATUS_FAILURE)
- return;
-
+ struct memory_type_mapping *entry =
+ &adapter->mem_type_mapping_tbl[idx];
memory_size = 0;
- reg = creg->fw_dump_start;
- for (i = 0; i < 4; i++) {
- mwifiex_read_reg_byte(adapter, reg, &read_reg);
- memory_size |= (read_reg << (i * 8));
- reg++;
+ if (fw_dump_num != 0) {
+ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+ if (stat == RDWR_STATUS_FAILURE)
+ return;
+
+ reg = creg->fw_dump_start;
+ for (i = 0; i < 4; i++) {
+ mwifiex_read_reg_byte(adapter, reg, &read_reg);
+ memory_size |= (read_reg << (i * 8));
+ reg++;
+ }
+ } else {
+ memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE;
}
if (memory_size == 0) {
mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
- FW_DUMP_READ_DONE);
+ creg->fw_dump_read_done);
if (ret) {
mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
return;
@@ -2400,11 +2496,21 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
mwifiex_read_reg_byte(adapter, reg, dbg_ptr);
if (dbg_ptr < end_ptr) {
dbg_ptr++;
- } else {
- mwifiex_dbg(adapter, ERROR,
- "Allocated buf not enough\n");
- return;
+ continue;
}
+ mwifiex_dbg(adapter, ERROR,
+ "pre-allocated buf not enough\n");
+ tmp_ptr =
+ vzalloc(memory_size + MWIFIEX_SIZE_4K);
+ if (!tmp_ptr)
+ return;
+ memcpy(tmp_ptr, entry->mem_ptr, memory_size);
+ vfree(entry->mem_ptr);
+ entry->mem_ptr = tmp_ptr;
+ tmp_ptr = NULL;
+ dbg_ptr = entry->mem_ptr + memory_size;
+ memory_size += MWIFIEX_SIZE_4K;
+ end_ptr = entry->mem_ptr + memory_size;
}
if (stat != RDWR_STATUS_DONE)
@@ -2416,7 +2522,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
break;
} while (true);
}
- mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+ mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
}
static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
@@ -2595,10 +2701,43 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
{
- int ret;
+ int ret, i, j;
struct pcie_service_card *card = adapter->card;
struct pci_dev *pdev = card->dev;
+ if (card->pcie.reg->msix_support) {
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+ card->msix_entries[i].entry = i;
+ ret = pci_enable_msix_exact(pdev, card->msix_entries,
+ MWIFIEX_NUM_MSIX_VECTORS);
+ if (!ret) {
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) {
+ card->msix_ctx[i].dev = pdev;
+ card->msix_ctx[i].msg_id = i;
+
+ ret = request_irq(card->msix_entries[i].vector,
+ mwifiex_pcie_interrupt, 0,
+ "MWIFIEX_PCIE_MSIX",
+ &card->msix_ctx[i]);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n",
+ ret);
+ for (j = 0; j < i; j++)
+ free_irq(card->msix_entries[j].vector,
+ &card->msix_ctx[i]);
+ pci_disable_msix(pdev);
+ } else {
+ mwifiex_dbg(adapter, MSG, "MSIx enabled!");
+ card->msix_enable = 1;
+ return 0;
+ }
+ }
+ }
+
if (pci_enable_msi(pdev) != 0)
pci_disable_msi(pdev);
else
@@ -2606,8 +2745,10 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable);
+ card->share_irq_ctx.dev = pdev;
+ card->share_irq_ctx.msg_id = -1;
ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
- "MRVL_PCIE", pdev);
+ "MRVL_PCIE", &card->share_irq_ctx);
if (ret) {
pr_err("request_irq failed: ret=%d\n", ret);
adapter->card = NULL;
@@ -2635,8 +2776,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return -1;
adapter->tx_buf_size = card->pcie.tx_buf_size;
- adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
- adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+ adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
+ adapter->num_mem_types = card->pcie.num_mem_types;
strcpy(adapter->fw_name, card->pcie.firmware);
adapter->ext_scan = card->pcie.can_ext_scan;
@@ -2653,11 +2794,28 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg;
+ struct pci_dev *pdev = card->dev;
+ int i;
if (card) {
- mwifiex_dbg(adapter, INFO,
- "%s(): calling free_irq()\n", __func__);
- free_irq(card->dev->irq, card->dev);
+ if (card->msix_enable) {
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+ synchronize_irq(card->msix_entries[i].vector);
+
+ for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+ free_irq(card->msix_entries[i].vector,
+ &card->msix_ctx[i]);
+
+ card->msix_enable = 0;
+ pci_disable_msix(pdev);
+ } else {
+ mwifiex_dbg(adapter, INFO,
+ "%s(): calling free_irq()\n", __func__);
+ free_irq(card->dev->irq, &card->share_irq_ctx);
+
+ if (card->msi_enable)
+ pci_disable_msi(pdev);
+ }
reg = card->pcie.reg;
if (reg->sleep_cookie)
@@ -2675,6 +2833,7 @@ static struct mwifiex_if_ops pcie_ops = {
.init_if = mwifiex_pcie_init,
.cleanup_if = mwifiex_pcie_cleanup,
.check_fw_status = mwifiex_check_fw_status,
+ .check_winner_status = mwifiex_check_winner_status,
.prog_fw = mwifiex_prog_fw_w_helper,
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index 6fc28737b576..29e58ce877e3 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -26,6 +26,7 @@
#include <linux/pcieport_if.h>
#include <linux/interrupt.h>
+#include "decl.h"
#include "main.h"
#define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
@@ -135,6 +136,9 @@ struct mwifiex_pcie_card_reg {
u16 fw_dump_ctrl;
u16 fw_dump_start;
u16 fw_dump_end;
+ u8 fw_dump_host_ready;
+ u8 fw_dump_read_done;
+ u8 msix_support;
};
static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
@@ -166,6 +170,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
.ring_tx_start_ptr = 0,
.pfu_enabled = 0,
.sleep_cookie = 1,
+ .msix_support = 0,
};
static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
@@ -200,6 +205,9 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
.fw_dump_ctrl = 0xcf4,
.fw_dump_start = 0xcf8,
.fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xee,
+ .fw_dump_read_done = 0xfe,
+ .msix_support = 0,
};
static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
@@ -231,6 +239,27 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
.ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
.pfu_enabled = 1,
.sleep_cookie = 0,
+ .fw_dump_ctrl = 0xcf4,
+ .fw_dump_start = 0xcf8,
+ .fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_read_done = 0xdd,
+ .msix_support = 1,
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
+ {"ITCM", NULL, 0, 0xF0},
+ {"DTCM", NULL, 0, 0xF1},
+ {"SQRAM", NULL, 0, 0xF2},
+ {"IRAM", NULL, 0, 0xF3},
+ {"APU", NULL, 0, 0xF4},
+ {"CIU", NULL, 0, 0xF5},
+ {"ICU", NULL, 0, 0xF6},
+ {"MAC", NULL, 0, 0xF7},
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
+ {"DUMP", NULL, 0, 0xDD},
};
struct mwifiex_pcie_device {
@@ -239,6 +268,8 @@ struct mwifiex_pcie_device {
u16 blksz_fw_dl;
u16 tx_buf_size;
bool can_dump_fw;
+ struct memory_type_mapping *mem_type_mapping_tbl;
+ u8 num_mem_types;
bool can_ext_scan;
};
@@ -257,6 +288,8 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
.can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
.can_ext_scan = true,
};
@@ -265,7 +298,9 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
.reg = &mwifiex_reg_8997,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .can_dump_fw = false,
+ .can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
.can_ext_scan = true,
};
@@ -290,6 +325,13 @@ struct mwifiex_pfu_buf_desc {
u32 reserved;
} __packed;
+#define MWIFIEX_NUM_MSIX_VECTORS 4
+
+struct mwifiex_msix_context {
+ struct pci_dev *dev;
+ u16 msg_id;
+};
+
struct pcie_service_card {
struct pci_dev *dev;
struct mwifiex_adapter *adapter;
@@ -327,6 +369,12 @@ struct pcie_service_card {
void __iomem *pci_mmap;
void __iomem *pci_mmap1;
int msi_enable;
+ int msix_enable;
+#ifdef CONFIG_PCI
+ struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS];
+#endif
+ struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS];
+ struct mwifiex_msix_context share_irq_ctx;
};
static inline int
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index c20017ced566..489f7a911a83 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -547,6 +547,61 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
return chan_idx;
}
+/* This function creates a channel list tlv for bgscan config, based
+ * on region/band information.
+ */
+static int
+mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
+ const struct mwifiex_bg_scan_cfg
+ *bgscan_cfg_in,
+ struct mwifiex_chan_scan_param_set
+ *scan_chan_list)
+{
+ enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int chan_idx = 0, i;
+
+ for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+ if (!priv->wdev.wiphy->bands[band])
+ continue;
+
+ sband = priv->wdev.wiphy->bands[band];
+
+ for (i = 0; (i < sband->n_channels) ; i++) {
+ ch = &sband->channels[i];
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+ scan_chan_list[chan_idx].radio_type = band;
+
+ if (bgscan_cfg_in->chan_list[0].scan_time)
+ scan_chan_list[chan_idx].max_scan_time =
+ cpu_to_le16((u16)bgscan_cfg_in->
+ chan_list[0].scan_time);
+ else if (ch->flags & IEEE80211_CHAN_NO_IR)
+ scan_chan_list[chan_idx].max_scan_time =
+ cpu_to_le16(adapter->passive_scan_time);
+ else
+ scan_chan_list[chan_idx].max_scan_time =
+ cpu_to_le16(adapter->
+ specific_scan_time);
+
+ if (ch->flags & IEEE80211_CHAN_NO_IR)
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ |= MWIFIEX_PASSIVE_SCAN;
+ else
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ &= ~MWIFIEX_PASSIVE_SCAN;
+
+ scan_chan_list[chan_idx].chan_number =
+ (u32)ch->hw_value;
+ chan_idx++;
+ }
+ }
+ return chan_idx;
+}
+
/* This function appends rate TLV to scan config command. */
static int
mwifiex_append_rate_tlv(struct mwifiex_private *priv,
@@ -2037,6 +2092,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
u8 is_bgscan_resp;
__le64 fw_tsf = 0;
u8 *radio_type;
+ struct cfg80211_wowlan_nd_match *pmatch;
+ struct cfg80211_sched_scan_request *nd_config = NULL;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -2099,6 +2156,21 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
(struct mwifiex_ie_types_data **)
&chan_band_tlv);
+#ifdef CONFIG_PM
+ if (priv->wdev.wiphy->wowlan_config)
+ nd_config = priv->wdev.wiphy->wowlan_config->nd_config;
+#endif
+
+ if (nd_config) {
+ adapter->nd_info =
+ kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
+ sizeof(struct cfg80211_wowlan_nd_match *) *
+ scan_rsp->number_of_sets, GFP_ATOMIC);
+
+ if (adapter->nd_info)
+ adapter->nd_info->n_matches = scan_rsp->number_of_sets;
+ }
+
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
/*
* If the TSF TLV was appended to the scan results, save this
@@ -2117,6 +2189,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
radio_type = NULL;
}
+ if (chan_band_tlv && adapter->nd_info) {
+ adapter->nd_info->matches[idx] =
+ kzalloc(sizeof(*pmatch) +
+ sizeof(u32), GFP_ATOMIC);
+
+ pmatch = adapter->nd_info->matches[idx];
+
+ if (pmatch) {
+ memset(pmatch, 0, sizeof(*pmatch));
+ if (chan_band_tlv) {
+ pmatch->n_channels = 1;
+ pmatch->channels[0] =
+ chan_band->chan_number;
+ }
+ }
+ }
+
ret = mwifiex_parse_single_response_buf(priv, &bss_info,
&bytes_left,
le64_to_cpu(fw_tsf),
@@ -2155,6 +2244,227 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
return 0;
}
+/* This function prepares an background scan config command to be sent
+ * to the firmware
+ */
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_802_11_bg_scan_config *bgscan_config =
+ &cmd->params.bg_scan_config;
+ struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf;
+ u8 *tlv_pos = bgscan_config->tlv;
+ u8 num_probes;
+ u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num;
+ int i;
+ struct mwifiex_ie_types_num_probes *num_probes_tlv;
+ struct mwifiex_ie_types_repeat_count *repeat_count_tlv;
+ struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv;
+ struct mwifiex_ie_types_bgscan_start_later *start_later_tlv;
+ struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+ struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv;
+ struct mwifiex_chan_scan_param_set *temp_chan;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG);
+ cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN);
+
+ bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action);
+ bgscan_config->enable = bgscan_cfg_in->enable;
+ bgscan_config->bss_type = bgscan_cfg_in->bss_type;
+ bgscan_config->scan_interval =
+ cpu_to_le32(bgscan_cfg_in->scan_interval);
+ bgscan_config->report_condition =
+ cpu_to_le32(bgscan_cfg_in->report_condition);
+
+ /* stop sched scan */
+ if (!bgscan_config->enable)
+ return 0;
+
+ bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan;
+
+ num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in->
+ num_probes : priv->adapter->scan_probes);
+
+ if (num_probes) {
+ num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos;
+ num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
+ num_probes_tlv->header.len =
+ cpu_to_le16(sizeof(num_probes_tlv->num_probes));
+ num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes);
+
+ tlv_pos += sizeof(num_probes_tlv->header) +
+ le16_to_cpu(num_probes_tlv->header.len);
+ }
+
+ if (bgscan_cfg_in->repeat_count) {
+ repeat_count_tlv =
+ (struct mwifiex_ie_types_repeat_count *)tlv_pos;
+ repeat_count_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_REPEAT_COUNT);
+ repeat_count_tlv->header.len =
+ cpu_to_le16(sizeof(repeat_count_tlv->repeat_count));
+ repeat_count_tlv->repeat_count =
+ cpu_to_le16(bgscan_cfg_in->repeat_count);
+
+ tlv_pos += sizeof(repeat_count_tlv->header) +
+ le16_to_cpu(repeat_count_tlv->header.len);
+ }
+
+ if (bgscan_cfg_in->rssi_threshold) {
+ rssi_threshold_tlv =
+ (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos;
+ rssi_threshold_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_RSSI_LOW);
+ rssi_threshold_tlv->header.len =
+ cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold));
+ rssi_threshold_tlv->rssi_threshold =
+ cpu_to_le16(bgscan_cfg_in->rssi_threshold);
+
+ tlv_pos += sizeof(rssi_threshold_tlv->header) +
+ le16_to_cpu(rssi_threshold_tlv->header.len);
+ }
+
+ for (i = 0; i < bgscan_cfg_in->num_ssids; i++) {
+ ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len;
+
+ wildcard_ssid_tlv =
+ (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos;
+ wildcard_ssid_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_WILDCARDSSID);
+ wildcard_ssid_tlv->header.len = cpu_to_le16(
+ (u16)(ssid_len + sizeof(wildcard_ssid_tlv->
+ max_ssid_length)));
+
+ /* max_ssid_length = 0 tells firmware to perform
+ * specific scan for the SSID filled, whereas
+ * max_ssid_length = IEEE80211_MAX_SSID_LEN is for
+ * wildcard scan.
+ */
+ if (ssid_len)
+ wildcard_ssid_tlv->max_ssid_length = 0;
+ else
+ wildcard_ssid_tlv->max_ssid_length =
+ IEEE80211_MAX_SSID_LEN;
+
+ memcpy(wildcard_ssid_tlv->ssid,
+ bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len);
+
+ tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+ + le16_to_cpu(wildcard_ssid_tlv->header.len));
+ }
+
+ chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos;
+
+ if (bgscan_cfg_in->chan_list[0].chan_number) {
+ dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n");
+
+ chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
+
+ for (chan_idx = 0;
+ chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
+ bgscan_cfg_in->chan_list[chan_idx].chan_number;
+ chan_idx++) {
+ temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+
+ /* Increment the TLV header length by size appended */
+ le16_add_cpu(&chan_list_tlv->header.len,
+ sizeof(chan_list_tlv->chan_scan_param));
+
+ temp_chan->chan_number =
+ bgscan_cfg_in->chan_list[chan_idx].chan_number;
+ temp_chan->radio_type =
+ bgscan_cfg_in->chan_list[chan_idx].radio_type;
+
+ scan_type =
+ bgscan_cfg_in->chan_list[chan_idx].scan_type;
+
+ if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+ temp_chan->chan_scan_mode_bitmap
+ |= MWIFIEX_PASSIVE_SCAN;
+ else
+ temp_chan->chan_scan_mode_bitmap
+ &= ~MWIFIEX_PASSIVE_SCAN;
+
+ if (bgscan_cfg_in->chan_list[chan_idx].scan_time) {
+ scan_dur = (u16)bgscan_cfg_in->
+ chan_list[chan_idx].scan_time;
+ } else {
+ scan_dur = (scan_type ==
+ MWIFIEX_SCAN_TYPE_PASSIVE) ?
+ priv->adapter->passive_scan_time :
+ priv->adapter->specific_scan_time;
+ }
+
+ temp_chan->min_scan_time = cpu_to_le16(scan_dur);
+ temp_chan->max_scan_time = cpu_to_le16(scan_dur);
+ }
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: bgscan: Creating full region channel list\n");
+ chan_num =
+ mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in,
+ chan_list_tlv->
+ chan_scan_param);
+ le16_add_cpu(&chan_list_tlv->header.len,
+ chan_num *
+ sizeof(chan_list_tlv->chan_scan_param[0]));
+ }
+
+ tlv_pos += (sizeof(chan_list_tlv->header)
+ + le16_to_cpu(chan_list_tlv->header.len));
+
+ if (bgscan_cfg_in->start_later) {
+ start_later_tlv =
+ (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos;
+ start_later_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER);
+ start_later_tlv->header.len =
+ cpu_to_le16(sizeof(start_later_tlv->start_later));
+ start_later_tlv->start_later =
+ cpu_to_le16(bgscan_cfg_in->start_later);
+
+ tlv_pos += sizeof(start_later_tlv->header) +
+ le16_to_cpu(start_later_tlv->header.len);
+ }
+
+ /* Append vendor specific IE TLV */
+ mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos);
+
+ le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
+
+ return 0;
+}
+
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv)
+{
+ struct mwifiex_bg_scan_cfg *bgscan_cfg;
+
+ if (!priv->sched_scanning) {
+ dev_dbg(priv->adapter->dev, "bgscan already stopped!\n");
+ return 0;
+ }
+
+ bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+ if (!bgscan_cfg)
+ return -ENOMEM;
+
+ bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+ bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+ bgscan_cfg->enable = false;
+
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+ HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+ kfree(bgscan_cfg);
+ return -EFAULT;
+ }
+
+ kfree(bgscan_cfg);
+ priv->sched_scanning = false;
+
+ return 0;
+}
+
static void
mwifiex_update_chan_statistics(struct mwifiex_private *priv,
struct mwifiex_ietypes_chanstats *tlv_stat)
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 4c8cae682c89..b2c839ae2c3c 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
/* Disable Host Sleep */
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
- MWIFIEX_ASYNC_CMD);
+ MWIFIEX_SYNC_CMD);
return 0;
}
@@ -1039,19 +1039,14 @@ done:
/*
* This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
*/
static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
u32 poll_num)
{
- struct sdio_mmc_card *card = adapter->card;
int ret = 0;
u16 firmware_stat;
u32 tries;
- u8 winner_status;
- /* Wait for firmware initialization event */
for (tries = 0; tries < poll_num; tries++) {
ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
if (ret)
@@ -1065,16 +1060,25 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
}
}
- if (ret) {
- if (mwifiex_read_reg
- (adapter, card->reg->status_reg_0, &winner_status))
- winner_status = 0;
+ return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+ int ret = 0;
+ u8 winner = 0;
+ struct sdio_mmc_card *card = adapter->card;
+
+ if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
+ return -1;
+
+ if (winner)
+ adapter->winner = 0;
+ else
+ adapter->winner = 1;
- if (winner_status)
- adapter->winner = 0;
- else
- adapter->winner = 1;
- }
return ret;
}
@@ -1351,6 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
card->mpa_rx.start_port;
}
+ if (card->mpa_rx.pkt_cnt == 1)
+ mport = adapter->ioport + port;
+
if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf,
card->mpa_rx.buf_len, mport, 1))
goto error;
@@ -1680,6 +1687,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
s32 f_precopy_cur_buf = 0;
s32 f_postcopy_cur_buf = 0;
u32 mport;
+ int index;
if (!card->mpa_tx.enabled ||
(card->has_control_mask && (port == CTRL_PORT)) ||
@@ -1781,9 +1789,21 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
card->mpa_tx.start_port;
}
+ if (card->mpa_tx.pkt_cnt == 1)
+ mport = adapter->ioport + port;
+
ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf,
card->mpa_tx.buf_len, mport);
+ /* Save the last multi port tx aggreagation info to debug log */
+ index = adapter->dbg.last_sdio_mp_index;
+ index = (index + 1) % MWIFIEX_DBG_SDIO_MP_NUM;
+ adapter->dbg.last_sdio_mp_index = index;
+ adapter->dbg.last_mp_wr_ports[index] = mport;
+ adapter->dbg.last_mp_wr_bitmap[index] = card->mp_wr_bitmap;
+ adapter->dbg.last_mp_wr_len[index] = card->mpa_tx.buf_len;
+ adapter->dbg.last_mp_curr_wr_port[index] = card->curr_wr_port;
+
MP_TX_AGGR_BUF_RESET(card);
}
@@ -2620,6 +2640,7 @@ static struct mwifiex_if_ops sdio_ops = {
.init_if = mwifiex_init_sdio,
.cleanup_if = mwifiex_cleanup_sdio,
.check_fw_status = mwifiex_check_fw_status,
+ .check_winner_status = mwifiex_check_winner_status,
.prog_fw = mwifiex_prog_fw_w_helper,
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index e486867a4c67..30f152601c57 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1813,6 +1813,22 @@ static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
return 0;
}
+/* This function prepares command to get HS wakeup reason.
+ *
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Ensuring correct endian-ness
+ */
+static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd)
+{
+ cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON);
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) +
+ S_DS_GEN);
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1873,6 +1889,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_802_11_SCAN:
ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+ ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr,
+ data_buf);
+ break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr);
break;
@@ -2063,6 +2083,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_HS_WAKEUP_REASON:
+ ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr);
+ break;
case HostCmd_CMD_MC_POLICY:
ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
data_buf);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 9ac7aa2431b4..d96523e10eb4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1076,9 +1076,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
+ cfg80211_sched_scan_results(priv->wdev.wiphy);
mwifiex_dbg(adapter, CMD,
"info: CMD_RESP: BG_SCAN result is ready!\n");
break;
+ case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+ break;
case HostCmd_CMD_TXPWR_CFG:
ret = mwifiex_ret_tx_power_cfg(priv, resp);
break;
@@ -1233,6 +1236,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
break;
+ case HostCmd_CMD_HS_WAKEUP_REASON:
+ ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf);
+ break;
case HostCmd_CMD_TDLS_CONFIG:
break;
case HostCmd_CMD_ROBUST_COEX:
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index ff3ee9dfbbd5..070bce401151 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -92,6 +92,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->is_data_rate_auto = true;
priv->data_rate = 0;
+ priv->assoc_resp_ht_param = 0;
+ priv->ht_param_present = false;
+
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data)
mwifiex_hist_data_reset(priv);
@@ -607,11 +610,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_PS_AWAKE:
mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
- if (!adapter->pps_uapsd_mode && priv->port_open &&
+ if (!adapter->pps_uapsd_mode &&
+ (priv->port_open ||
+ (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
priv->media_connected && adapter->sleep_period.period) {
- adapter->pps_uapsd_mode = true;
- mwifiex_dbg(adapter, EVENT,
- "event: PPS/UAPSD mode activated\n");
+ adapter->pps_uapsd_mode = true;
+ mwifiex_dbg(adapter, EVENT,
+ "event: PPS/UAPSD mode activated\n");
}
adapter->tx_lock_flag = false;
if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -686,6 +691,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
+ case EVENT_BG_SCAN_STOPPED:
+ dev_dbg(adapter->dev, "event: BGS_STOPPED\n");
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ if (priv->sched_scanning)
+ priv->sched_scanning = false;
+ break;
+
case EVENT_PORT_RELEASE:
mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
priv->port_open = true;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 6a4fc5d183cf..d5c56eb9e985 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
mwifiex_dbg(adapter, ERROR,
"Attempt to reconnect on csa closed chan(%d)\n",
bss_desc->channel);
+ ret = -1;
goto done;
}
@@ -504,6 +505,20 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
}
}
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ if (priv && priv->sched_scanning) {
+#ifdef CONFIG_PM
+ if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+#endif
+ mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
+ mwifiex_stop_bg_scan(priv);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+#ifdef CONFIG_PM
+ }
+#endif
+ }
+
if (adapter->hs_activated) {
mwifiex_dbg(adapter, CMD,
"cmd: HS Already activated\n");
@@ -1114,11 +1129,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
* with requisite parameters and calls the IOCTL handler.
*/
int
-mwifiex_get_ver_ext(struct mwifiex_private *priv)
+mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
{
struct mwifiex_ver_ext ver_ext;
memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+ ver_ext.version_str_sel = version_str_sel;
if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
return -1;
@@ -1450,3 +1466,19 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
return 0;
}
+
+/* This function get Host Sleep wake up reason.
+ *
+ */
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+ int cmd_type,
+ struct mwifiex_ds_wakeup_reason *wakeup_reason)
+{
+ int status = 0;
+
+ status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON,
+ HostCmd_ACT_GEN_GET, 0, wakeup_reason,
+ cmd_type == MWIFIEX_SYNC_CMD);
+
+ return status;
+}
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 9275f9c3f869..150649602e98 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -680,6 +680,13 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
__net_timestamp(skb);
mwifiex_queue_tx_pkt(priv, skb);
+ /* Delay 10ms to make sure tdls setup confirm/teardown frame
+ * is received by peer
+ */
+ if (action_code == WLAN_TDLS_SETUP_CONFIRM ||
+ action_code == WLAN_TDLS_TEARDOWN)
+ msleep_interruptible(10);
+
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index e791166d90c4..16d95b22fe5c 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -192,7 +192,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
}
priv->ap_11n_enabled = 1;
} else {
- memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
+ memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap));
bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index e43aff932360..05108618430d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -244,9 +244,9 @@ setup_for_next:
if (card->rx_cmd_ep == context->ep) {
mwifiex_usb_submit_rx_urb(context, size);
} else {
- if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
+ if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING) {
mwifiex_usb_submit_rx_urb(context, size);
- }else{
+ } else {
context->skb = NULL;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 0cec8a64473e..6681be0511c7 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -78,6 +78,16 @@ static struct mwifiex_debug_data items[] = {
item_addr(last_event), DBG_CMD_NUM},
{"last_event_index", item_size(last_event_index),
item_addr(last_event_index), 1},
+ {"last_mp_wr_bitmap", item_size(last_mp_wr_bitmap),
+ item_addr(last_mp_wr_bitmap), MWIFIEX_DBG_SDIO_MP_NUM},
+ {"last_mp_wr_ports", item_size(last_mp_wr_ports),
+ item_addr(last_mp_wr_ports), MWIFIEX_DBG_SDIO_MP_NUM},
+ {"last_mp_wr_len", item_size(last_mp_wr_len),
+ item_addr(last_mp_wr_len), MWIFIEX_DBG_SDIO_MP_NUM},
+ {"last_mp_curr_wr_port", item_size(last_mp_curr_wr_port),
+ item_addr(last_mp_curr_wr_port), MWIFIEX_DBG_SDIO_MP_NUM},
+ {"last_sdio_mp_index", item_size(last_sdio_mp_index),
+ item_addr(last_sdio_mp_index), 1},
{"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure),
item_addr(num_cmd_host_to_card_failure), 1},
{"num_cmd_sleep_cfm_fail",
@@ -233,6 +243,16 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
memcpy(info->last_event, adapter->dbg.last_event,
sizeof(adapter->dbg.last_event));
info->last_event_index = adapter->dbg.last_event_index;
+ memcpy(info->last_mp_wr_bitmap, adapter->dbg.last_mp_wr_bitmap,
+ sizeof(adapter->dbg.last_mp_wr_bitmap));
+ memcpy(info->last_mp_wr_ports, adapter->dbg.last_mp_wr_ports,
+ sizeof(adapter->dbg.last_mp_wr_ports));
+ memcpy(info->last_mp_curr_wr_port,
+ adapter->dbg.last_mp_curr_wr_port,
+ sizeof(adapter->dbg.last_mp_curr_wr_port));
+ memcpy(info->last_mp_wr_len, adapter->dbg.last_mp_wr_len,
+ sizeof(adapter->dbg.last_mp_wr_len));
+ info->last_sdio_mp_index = adapter->dbg.last_sdio_mp_index;
info->data_sent = adapter->data_sent;
info->cmd_sent = adapter->cmd_sent;
info->cmd_resp_received = adapter->cmd_resp_received;
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index acccd6734e3b..0eb246502e1d 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -438,6 +438,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
mwifiex_set_ba_params(priv);
mwifiex_reset_11n_rx_seq_num(priv);
+ priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
@@ -475,7 +476,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
priv = adapter->priv[i];
if (!priv)
continue;
- if (!priv->port_open)
+ if (!priv->port_open &&
+ (priv->bss_mode != NL80211_IFTYPE_ADHOC))
continue;
if (adapter->if_ops.is_port_ready &&
!adapter->if_ops.is_port_ready(priv))
@@ -1099,7 +1101,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
- if (!priv_tmp->port_open ||
+ if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
+ !priv_tmp->port_open) ||
(atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
continue;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 30e3aaae32e2..088429d0a634 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
-
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_ampdu_stream *stream;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index f715eee39851..e70dd9523911 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
static int
mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct mt7601u_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
WARN_ON(msta->wcid.idx > GROUP_WCID(0));
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index fbb1986eda3c..91c4b3427965 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -362,7 +362,9 @@ mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
int i, ret;
ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
- if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+ if (!ivb)
+ return -ENOMEM;
+ if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
ret = -ENOMEM;
goto error;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a26afcab03ed..7fa0128de7e3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -7936,10 +7936,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
int ret = 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 440790b92b19..83f1a44fb9b4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params);
u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index bf9afbf46c1b..4b0bb6b4f6f1 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x01a2) },
{ USB_DEVICE(0x0411, 0x01ee) },
{ USB_DEVICE(0x0411, 0x01a8) },
+ { USB_DEVICE(0x0411, 0x01fd) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 26427140a963..6418620f95ff 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -107,7 +107,7 @@
* amount of bytes needed to move the data.
*/
#define ALIGN_SIZE(__skb, __header) \
- ( ((unsigned long)((__skb)->data + (__header))) & 3 )
+ (((unsigned long)((__skb)->data + (__header))) & 3)
/*
* Constants for extra TX headroom for alignment purposes.
@@ -128,14 +128,14 @@
#define SLOT_TIME 20
#define SHORT_SLOT_TIME 9
#define SIFS 10
-#define PIFS ( SIFS + SLOT_TIME )
-#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME )
-#define DIFS ( PIFS + SLOT_TIME )
-#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME )
-#define EIFS ( SIFS + DIFS + \
- GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
- GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
+#define PIFS (SIFS + SLOT_TIME)
+#define SHORT_PIFS (SIFS + SHORT_SLOT_TIME)
+#define DIFS (PIFS + SLOT_TIME)
+#define SHORT_DIFS (SHORT_PIFS + SHORT_SLOT_TIME)
+#define EIFS (SIFS + DIFS + \
+ GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
+#define SHORT_EIFS (SIFS + SHORT_DIFS + \
+ GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index 90fdb02b55e7..72ae530e4a3b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -478,7 +478,7 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
{ \
struct rt2x00debug_intf *intf = file->private_data; \
const struct rt2x00debug *debug = intf->debug; \
- char line[16]; \
+ char line[17]; \
size_t size; \
unsigned int index = intf->offset_##__name; \
__type value; \
@@ -494,7 +494,8 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
\
if (copy_from_user(line, buf, length)) \
return -EFAULT; \
- \
+ line[16] = 0; \
+ \
size = strlen(line); \
value = simple_strtoul(line, NULL, 0); \
\
@@ -629,7 +630,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
data += sprintf(data, "register\tbase\twords\twordsize\n");
#define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \
{ \
- if(debug->__name.read) \
+ if (debug->__name.read) \
data += sprintf(data, __stringify(__name) \
"\t%d\t%d\t%d\n", \
debug->__name.word_base, \
@@ -699,7 +700,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
({ \
- if(debug->__name.read) { \
+ if (debug->__name.read) { \
(__intf)->__name##_off_entry = \
debugfs_create_u32(__stringify(__name) "_offset", \
S_IRUSR | S_IWUSR, \
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
index 1442075a8382..ab8641547a1f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h
@@ -138,14 +138,14 @@
#define PAIRWISE_TA_TABLE_BASE 0x1a00
#define SHARED_KEY_ENTRY(__idx) \
- ( SHARED_KEY_TABLE_BASE + \
- ((__idx) * sizeof(struct hw_key_entry)) )
+ (SHARED_KEY_TABLE_BASE + \
+ ((__idx) * sizeof(struct hw_key_entry)))
#define PAIRWISE_KEY_ENTRY(__idx) \
- ( PAIRWISE_KEY_TABLE_BASE + \
- ((__idx) * sizeof(struct hw_key_entry)) )
+ (PAIRWISE_KEY_TABLE_BASE + \
+ ((__idx) * sizeof(struct hw_key_entry)))
#define PAIRWISE_TA_ENTRY(__idx) \
- ( PAIRWISE_TA_TABLE_BASE + \
- ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
+ (PAIRWISE_TA_TABLE_BASE + \
+ ((__idx) * sizeof(struct hw_pairwise_ta_entry)))
struct hw_key_entry {
u8 key[16];
@@ -180,7 +180,7 @@ struct hw_pairwise_ta_entry {
#define HW_BEACON_BASE3 0x2f00
#define HW_BEACON_OFFSET(__index) \
- ( HW_BEACON_BASE0 + (__index * 0x0100) )
+ (HW_BEACON_BASE0 + (__index * 0x0100))
/*
* HOST-MCU shared memory.
@@ -1287,9 +1287,9 @@ struct hw_pairwise_ta_entry {
/*
* DMA descriptor defines.
*/
-#define TXD_DESC_SIZE ( 16 * sizeof(__le32) )
-#define TXINFO_SIZE ( 6 * sizeof(__le32) )
-#define RXD_DESC_SIZE ( 16 * sizeof(__le32) )
+#define TXD_DESC_SIZE (16 * sizeof(__le32))
+#define TXINFO_SIZE (6 * sizeof(__le32))
+#define RXD_DESC_SIZE (16 * sizeof(__le32))
/*
* TX descriptor format for TX, PRIO and Beacon Ring.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index dd4d626aecbc..8f053c350227 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -13,7 +13,7 @@ config RTL8XXXU
This driver is under development and has a limited feature
set. In particular it does not yet support 40MHz channels
and power management. However it should have a smaller
- memory footprint than the vendor drivers and benetifs
+ memory footprint than the vendor drivers and benefits
from the in kernel mac80211 stack.
It can coexist with drivers from drivers/staging/rtl8723au,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
index 6aed923a709a..abdff458b80f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -42,7 +42,7 @@
#define DRIVER_NAME "rtl8xxxu"
-static int rtl8xxxu_debug;
+static int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
static bool rtl8xxxu_ht40_2g;
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
@@ -54,6 +54,9 @@ MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192eu_nic.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723bu_nic.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723bu_bt.bin");
module_param_named(debug, rtl8xxxu_debug, int, 0600);
MODULE_PARM_DESC(debug, "Set debug mask");
@@ -150,6 +153,37 @@ static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
};
+static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
+ {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
+ {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
+ {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+ {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+ {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+ {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+ {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x516, 0x0a}, {0x525, 0x4f},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff},
+ {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff},
+ {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00},
+ {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04},
+ {0xffff, 0xff},
+};
+
static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -248,6 +282,107 @@ static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0xffff, 0xffffffff},
};
+static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00190204},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a11a9}, {0x85c, 0x01000014},
+ {0x860, 0x66f60110}, {0x864, 0x061f0649},
+ {0x868, 0x00000000}, {0x86c, 0x27272700},
+ {0x870, 0x07000760}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
+ {0xa10, 0x9500bb78}, {0xa14, 0x1114d028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
+ {0xa80, 0x21806490}, {0xb2c, 0x00000000},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x00013149}, {0xc5c, 0x00250492},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x390000e4}, {0xc84, 0x20f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00020e1a}, {0xc94, 0x00000000},
+ {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x000300a0},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00000740}, {0xd04, 0x40020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc53},
+ {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
+ {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
+ {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
+ {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+ {0xe68, 0x001b2556}, {0xe6c, 0x00c00096},
+ {0xe70, 0x00c00096}, {0xe74, 0x01000056},
+ {0xe78, 0x01000014}, {0xe7c, 0x01000056},
+ {0xe80, 0x01000014}, {0xe84, 0x00c00096},
+ {0xe88, 0x01000056}, {0xe8c, 0x00c00096},
+ {0xed0, 0x00c00096}, {0xed4, 0x00c00096},
+ {0xed8, 0x00c00096}, {0xedc, 0x000000d6},
+ {0xee0, 0x000000d6}, {0xeec, 0x01c00016},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0x820, 0x01000100}, {0x800, 0x83040000},
+ {0xffff, 0xffffffff},
+};
+
static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
{0x800, 0x80040002}, {0x804, 0x00000003},
@@ -613,6 +748,77 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
{0xffff, 0xffffffff}
};
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
+ {0xc78, 0xfd000001}, {0xc78, 0xfc010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfa030001},
+ {0xc78, 0xf9040001}, {0xc78, 0xf8050001},
+ {0xc78, 0xf7060001}, {0xc78, 0xf6070001},
+ {0xc78, 0xf5080001}, {0xc78, 0xf4090001},
+ {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001},
+ {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001},
+ {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001},
+ {0xc78, 0xed100001}, {0xc78, 0xec110001},
+ {0xc78, 0xeb120001}, {0xc78, 0xea130001},
+ {0xc78, 0xe9140001}, {0xc78, 0xe8150001},
+ {0xc78, 0xe7160001}, {0xc78, 0xe6170001},
+ {0xc78, 0xe5180001}, {0xc78, 0xe4190001},
+ {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001},
+ {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001},
+ {0xc78, 0x671e0001}, {0xc78, 0x661f0001},
+ {0xc78, 0x65200001}, {0xc78, 0x64210001},
+ {0xc78, 0x63220001}, {0xc78, 0x4a230001},
+ {0xc78, 0x49240001}, {0xc78, 0x48250001},
+ {0xc78, 0x47260001}, {0xc78, 0x46270001},
+ {0xc78, 0x45280001}, {0xc78, 0x44290001},
+ {0xc78, 0x432a0001}, {0xc78, 0x422b0001},
+ {0xc78, 0x292c0001}, {0xc78, 0x282d0001},
+ {0xc78, 0x272e0001}, {0xc78, 0x262f0001},
+ {0xc78, 0x0a300001}, {0xc78, 0x09310001},
+ {0xc78, 0x08320001}, {0xc78, 0x07330001},
+ {0xc78, 0x06340001}, {0xc78, 0x05350001},
+ {0xc78, 0x04360001}, {0xc78, 0x03370001},
+ {0xc78, 0x02380001}, {0xc78, 0x01390001},
+ {0xc78, 0x013a0001}, {0xc78, 0x013b0001},
+ {0xc78, 0x013c0001}, {0xc78, 0x013d0001},
+ {0xc78, 0x013e0001}, {0xc78, 0x013f0001},
+ {0xc78, 0xfc400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfa420001}, {0xc78, 0xf9430001},
+ {0xc78, 0xf8440001}, {0xc78, 0xf7450001},
+ {0xc78, 0xf6460001}, {0xc78, 0xf5470001},
+ {0xc78, 0xf4480001}, {0xc78, 0xf3490001},
+ {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001},
+ {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001},
+ {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001},
+ {0xc78, 0xec500001}, {0xc78, 0xeb510001},
+ {0xc78, 0xea520001}, {0xc78, 0xe9530001},
+ {0xc78, 0xe8540001}, {0xc78, 0xe7550001},
+ {0xc78, 0xe6560001}, {0xc78, 0xe5570001},
+ {0xc78, 0xe4580001}, {0xc78, 0xe3590001},
+ {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001},
+ {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001},
+ {0xc78, 0x675e0001}, {0xc78, 0x665f0001},
+ {0xc78, 0x65600001}, {0xc78, 0x64610001},
+ {0xc78, 0x63620001}, {0xc78, 0x62630001},
+ {0xc78, 0x61640001}, {0xc78, 0x48650001},
+ {0xc78, 0x47660001}, {0xc78, 0x46670001},
+ {0xc78, 0x45680001}, {0xc78, 0x44690001},
+ {0xc78, 0x436a0001}, {0xc78, 0x426b0001},
+ {0xc78, 0x286c0001}, {0xc78, 0x276d0001},
+ {0xc78, 0x266e0001}, {0xc78, 0x256f0001},
+ {0xc78, 0x24700001}, {0xc78, 0x09710001},
+ {0xc78, 0x08720001}, {0xc78, 0x07730001},
+ {0xc78, 0x06740001}, {0xc78, 0x05750001},
+ {0xc78, 0x04760001}, {0xc78, 0x03770001},
+ {0xc78, 0x02780001}, {0xc78, 0x01790001},
+ {0xc78, 0x017a0001}, {0xc78, 0x017b0001},
+ {0xc78, 0x017c0001}, {0xc78, 0x017d0001},
+ {0xc78, 0x017e0001}, {0xc78, 0x017f0001},
+ {0xc50, 0x69553422},
+ {0xc50, 0x69553420},
+ {0x824, 0x00390204},
+ {0xffff, 0xffffffff}
+};
+
static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00039c63},
@@ -688,6 +894,75 @@ static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0xff, 0xffffffff}
};
+static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
+ {0x00, 0x00010000}, {0xb0, 0x000dffe0},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb1, 0x00000018},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb2, 0x00084c00},
+ {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa},
+ {0xb7, 0x00000010}, {0xb8, 0x0000907f},
+ {0x5c, 0x00000002}, {0x7c, 0x00000002},
+ {0x7e, 0x00000005}, {0x8b, 0x0006fc00},
+ {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2},
+ {0x1e, 0x00000000}, {0xdf, 0x00000780},
+ {0x50, 0x00067435},
+ /*
+ * The 8723bu vendor driver indicates that bit 8 should be set in
+ * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However
+ * they never actually check the package type - and just default
+ * to not setting it.
+ */
+ {0x51, 0x0006b04e},
+ {0x52, 0x000007d2}, {0x53, 0x00000000},
+ {0x54, 0x00050400}, {0x55, 0x0004026e},
+ {0xdd, 0x0000004c}, {0x70, 0x00067435},
+ /*
+ * 0x71 has same package type condition as for register 0x51
+ */
+ {0x71, 0x0006b04e},
+ {0x72, 0x000007d2}, {0x73, 0x00000000},
+ {0x74, 0x00050400}, {0x75, 0x0004026e},
+ {0xef, 0x00000100}, {0x34, 0x0000add7},
+ {0x35, 0x00005c00}, {0x34, 0x00009dd4},
+ {0x35, 0x00005000}, {0x34, 0x00008dd1},
+ {0x35, 0x00004400}, {0x34, 0x00007dce},
+ {0x35, 0x00003800}, {0x34, 0x00006cd1},
+ {0x35, 0x00004400}, {0x34, 0x00005cce},
+ {0x35, 0x00003800}, {0x34, 0x000048ce},
+ {0x35, 0x00004400}, {0x34, 0x000034ce},
+ {0x35, 0x00003800}, {0x34, 0x00002451},
+ {0x35, 0x00004400}, {0x34, 0x0000144e},
+ {0x35, 0x00003800}, {0x34, 0x00000051},
+ {0x35, 0x00004400}, {0xef, 0x00000000},
+ {0xef, 0x00000100}, {0xed, 0x00000010},
+ {0x44, 0x0000add7}, {0x44, 0x00009dd4},
+ {0x44, 0x00008dd1}, {0x44, 0x00007dce},
+ {0x44, 0x00006cc1}, {0x44, 0x00005cce},
+ {0x44, 0x000044d1}, {0x44, 0x000034ce},
+ {0x44, 0x00002451}, {0x44, 0x0000144e},
+ {0x44, 0x00000051}, {0xef, 0x00000000},
+ {0xed, 0x00000000}, {0x7f, 0x00020080},
+ {0xef, 0x00002000}, {0x3b, 0x000380ef},
+ {0x3b, 0x000302fe}, {0x3b, 0x00028ce6},
+ {0x3b, 0x000200bc}, {0x3b, 0x000188a5},
+ {0x3b, 0x00010fbc}, {0x3b, 0x00008f71},
+ {0x3b, 0x00000900}, {0xef, 0x00000000},
+ {0xed, 0x00000001}, {0x40, 0x000380ef},
+ {0x40, 0x000302fe}, {0x40, 0x00028ce6},
+ {0x40, 0x000200bc}, {0x40, 0x000188a5},
+ {0x40, 0x00010fbc}, {0x40, 0x00008f71},
+ {0x40, 0x00000900}, {0xed, 0x00000000},
+ {0x82, 0x00080000}, {0x83, 0x00008000},
+ {0x84, 0x00048d80}, {0x85, 0x00068000},
+ {0xa2, 0x00080000}, {0xa3, 0x00008000},
+ {0xa4, 0x00048d80}, {0xa5, 0x00068000},
+ {0xed, 0x00000002}, {0xef, 0x00000002},
+ {0x56, 0x00000032}, {0x76, 0x00000032},
+ {0x01, 0x00000780},
+ {0xff, 0xffffffff}
+};
+
static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
@@ -1166,6 +1441,11 @@ static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
return retval;
}
+/*
+ * The RTL8723BU driver indicates that registers 0xb2 and 0xb6 can
+ * have write issues in high temperature conditions. We may have to
+ * retry writing them.
+ */
static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
enum rtl8xxxu_rfpath path, u8 reg, u32 data)
{
@@ -1191,7 +1471,8 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
return retval;
}
-static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
+static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
+ struct h2c_cmd *h2c, int len)
{
struct device *dev = &priv->udev->dev;
int mbox_nr, retry, retval = 0;
@@ -1202,7 +1483,8 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
mbox_nr = priv->next_mbox;
mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
- mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
+ mbox_ext_reg = priv->fops->mbox_ext_reg +
+ (mbox_nr * priv->fops->mbox_ext_width);
/*
* MBOX ready?
@@ -1215,7 +1497,7 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
} while (retry--);
if (!retry) {
- dev_dbg(dev, "%s: Mailbox busy\n", __func__);
+ dev_info(dev, "%s: Mailbox busy\n", __func__);
retval = -EBUSY;
goto error;
}
@@ -1223,12 +1505,20 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
/*
* Need to swap as it's being swapped again by rtl8xxxu_write16/32()
*/
- if (h2c->cmd.cmd & H2C_EXT) {
- rtl8xxxu_write16(priv, mbox_ext_reg,
- le16_to_cpu(h2c->raw.ext));
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
- dev_info(dev, "H2C_EXT %04x\n",
- le16_to_cpu(h2c->raw.ext));
+ if (len > sizeof(u32)) {
+ if (priv->fops->mbox_ext_width == 4) {
+ rtl8xxxu_write32(priv, mbox_ext_reg,
+ le32_to_cpu(h2c->raw_wide.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %08x\n",
+ le32_to_cpu(h2c->raw_wide.ext));
+ } else {
+ rtl8xxxu_write16(priv, mbox_ext_reg,
+ le16_to_cpu(h2c->raw.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %04x\n",
+ le16_to_cpu(h2c->raw.ext));
+ }
}
rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
@@ -1241,6 +1531,27 @@ error:
return retval;
}
+static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
+{
+ struct h2c_cmd h2c;
+ int reqnum = 0;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.data = data;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+
+ reqnum++;
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.addr = reg;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+}
+
static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -1365,6 +1676,24 @@ static int rtl8723a_channel_to_group(int channel)
return group;
}
+static int rtl8723b_channel_to_group(int channel)
+{
+ int group;
+
+ if (channel < 3)
+ group = 0;
+ else if (channel < 6)
+ group = 1;
+ else if (channel < 9)
+ group = 2;
+ else if (channel < 12)
+ group = 3;
+ else
+ group = 4;
+
+ return group;
+}
+
static void rtl8723au_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -1487,6 +1816,136 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw)
}
}
+static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u32 val32, rsr;
+ u8 val8, subchannel;
+ u16 rf_mode_bw;
+ bool ht = true;
+ int sec_ch_above, channel;
+ int i;
+
+ rf_mode_bw = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL);
+ rf_mode_bw &= ~WMAC_TRXPTCL_CTL_BW_MASK;
+ rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ channel = hw->conf.chandef.chan->hw_value;
+
+/* Hack */
+ subchannel = 0;
+
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ ht = false;
+ case NL80211_CHAN_WIDTH_20:
+ rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20;
+ subchannel = 0;
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT);
+ val32 &= ~(BIT(30) | BIT(31));
+ rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32);
+
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_40;
+
+ if (hw->conf.chandef.center_freq1 >
+ hw->conf.chandef.chan->center_freq) {
+ sec_ch_above = 1;
+ channel += 2;
+ } else {
+ sec_ch_above = 0;
+ channel -= 2;
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ /*
+ * Set Control channel to upper or lower. These settings
+ * are required only for 40MHz
+ */
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+ val32 &= ~CCK0_SIDEBAND;
+ if (!sec_ch_above)
+ val32 |= CCK0_SIDEBAND;
+ rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+ val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+ if (sec_ch_above)
+ val32 |= OFDM_LSTF_PRIME_CH_LOW;
+ else
+ val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+ if (sec_ch_above)
+ val32 |= FPGA0_PS_UPPER_CHANNEL;
+ else
+ val32 |= FPGA0_PS_LOWER_CHANNEL;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_80;
+ break;
+ default:
+ break;
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_CHANNEL_MASK;
+ val32 |= channel;
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+
+ rtl8xxxu_write16(priv, REG_WMAC_TRXPTCL_CTL, rf_mode_bw);
+ rtl8xxxu_write8(priv, REG_DATA_SUBCHANNEL, subchannel);
+
+ if (ht)
+ val8 = 0x0e;
+ else
+ val8 = 0x0a;
+
+ rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8);
+ rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8);
+
+ rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808);
+ rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a);
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_BW_MASK;
+ switch(hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_80:
+ val32 |= MODE_AG_BW_80MHZ_8723B;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ val32 |= MODE_AG_BW_40MHZ_8723B;
+ break;
+ default:
+ val32 |= MODE_AG_BW_20MHZ_8723B;
+ break;
+ }
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+}
+
static void
rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
@@ -1596,12 +2055,51 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
}
+static void
+rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, tx_idx;
+
+ tx_idx = 0;
+ group = rtl8723b_channel_to_group(channel);
+
+ cck = priv->cck_tx_power_index_B[group];
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_B[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+}
+
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
enum nl80211_iftype linktype)
{
- u16 val8;
+ u8 val8;
- val8 = rtl8xxxu_read16(priv, REG_MSR);
+ val8 = rtl8xxxu_read8(priv, REG_MSR);
val8 &= ~MSR_LINKTYPE_MASK;
switch (linktype) {
@@ -1662,16 +2160,24 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
case 1:
cut = "B";
break;
+ case 2:
+ cut = "C";
+ break;
+ case 3:
+ cut = "D";
+ break;
+ case 4:
+ cut = "E";
+ break;
default:
cut = "unknown";
}
dev_info(dev,
"RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
- priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC",
- priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
- priv->has_wifi, priv->has_bluetooth, priv->has_gps,
- priv->hi_pa);
+ priv->chip_name, cut, priv->chip_vendor, priv->tx_paths,
+ priv->rx_paths, priv->ep_tx_count, priv->has_wifi,
+ priv->has_bluetooth, priv->has_gps, priv->hi_pa);
dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
}
@@ -1691,11 +2197,18 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
}
if (val32 & SYS_CFG_BT_FUNC) {
- sprintf(priv->chip_name, "8723AU");
+ if (priv->chip_cut >= 3) {
+ sprintf(priv->chip_name, "8723BU");
+ priv->rtlchip = 0x8723b;
+ } else {
+ sprintf(priv->chip_name, "8723AU");
+ priv->usb_interrupts = 1;
+ priv->rtlchip = 0x8723a;
+ }
+
priv->rf_paths = 1;
priv->rx_paths = 1;
priv->tx_paths = 1;
- priv->rtlchip = 0x8723a;
val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
if (val32 & MULTI_WIFI_FUNC_EN)
@@ -1704,20 +2217,37 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->has_bluetooth = 1;
if (val32 & MULTI_GPS_FUNC_EN)
priv->has_gps = 1;
+ priv->is_multi_func = 1;
} else if (val32 & SYS_CFG_TYPE_ID) {
bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
bonding &= HPON_FSM_BONDING_MASK;
- if (bonding == HPON_FSM_BONDING_1T2R) {
+ if (priv->chip_cut >= 3) {
+ if (bonding == HPON_FSM_BONDING_1T2R) {
+ sprintf(priv->chip_name, "8191EU");
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ priv->tx_paths = 1;
+ priv->rtlchip = 0x8191e;
+ } else {
+ sprintf(priv->chip_name, "8192EU");
+ priv->rf_paths = 2;
+ priv->rx_paths = 2;
+ priv->tx_paths = 2;
+ priv->rtlchip = 0x8192e;
+ }
+ } else if (bonding == HPON_FSM_BONDING_1T2R) {
sprintf(priv->chip_name, "8191CU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 1;
+ priv->usb_interrupts = 1;
priv->rtlchip = 0x8191c;
} else {
sprintf(priv->chip_name, "8192CU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 2;
+ priv->usb_interrupts = 1;
priv->rtlchip = 0x8192c;
}
priv->has_wifi = 1;
@@ -1727,11 +2257,38 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->rx_paths = 1;
priv->tx_paths = 1;
priv->rtlchip = 0x8188c;
+ priv->usb_interrupts = 1;
priv->has_wifi = 1;
}
- if (val32 & SYS_CFG_VENDOR_ID)
- priv->vendor_umc = 1;
+ switch (priv->rtlchip) {
+ case 0x8188e:
+ case 0x8192e:
+ case 0x8723b:
+ switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
+ case SYS_CFG_VENDOR_ID_TSMC:
+ sprintf(priv->chip_vendor, "TSMC");
+ break;
+ case SYS_CFG_VENDOR_ID_SMIC:
+ sprintf(priv->chip_vendor, "SMIC");
+ priv->vendor_smic = 1;
+ break;
+ case SYS_CFG_VENDOR_ID_UMC:
+ sprintf(priv->chip_vendor, "UMC");
+ priv->vendor_umc = 1;
+ break;
+ default:
+ sprintf(priv->chip_vendor, "unknown");
+ }
+ break;
+ default:
+ if (val32 & SYS_CFG_VENDOR_ID) {
+ sprintf(priv->chip_vendor, "UMC");
+ priv->vendor_umc = 1;
+ } else {
+ sprintf(priv->chip_vendor, "TSMC");
+ }
+ }
val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
@@ -1757,6 +2314,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
*/
if (!priv->ep_tx_count) {
switch (priv->nr_out_eps) {
+ case 4:
case 3:
priv->ep_tx_low_queue = 1;
priv->ep_tx_count++;
@@ -1778,43 +2336,126 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
{
- if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129))
+ struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
- ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr);
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
memcpy(priv->cck_tx_power_index_A,
- priv->efuse_wifi.efuse8723.cck_tx_power_index_A,
- sizeof(priv->cck_tx_power_index_A));
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
memcpy(priv->cck_tx_power_index_B,
- priv->efuse_wifi.efuse8723.cck_tx_power_index_B,
- sizeof(priv->cck_tx_power_index_B));
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
memcpy(priv->ht40_1s_tx_power_index_A,
- priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A,
- sizeof(priv->ht40_1s_tx_power_index_A));
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
memcpy(priv->ht40_1s_tx_power_index_B,
- priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B,
- sizeof(priv->ht40_1s_tx_power_index_B));
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
memcpy(priv->ht20_tx_power_index_diff,
- priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff,
- sizeof(priv->ht20_tx_power_index_diff));
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
memcpy(priv->ofdm_tx_power_index_diff,
- priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff,
- sizeof(priv->ofdm_tx_power_index_diff));
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
memcpy(priv->ht40_max_power_offset,
- priv->efuse_wifi.efuse8723.ht40_max_power_offset,
- sizeof(priv->ht40_max_power_offset));
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
memcpy(priv->ht20_max_power_offset,
- priv->efuse_wifi.efuse8723.ht20_max_power_offset,
- sizeof(priv->ht20_max_power_offset));
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
+ if (priv->efuse_wifi.efuse8723.version >= 0x01) {
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ }
dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- priv->efuse_wifi.efuse8723.vendor_name);
+ efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.41s\n",
- priv->efuse_wifi.efuse8723.device_name);
+ efuse->device_name);
+ return 0;
+}
+
+static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+ memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+ sizeof(efuse->tx_power_index_B.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->tx_power_index_B.ht40_base,
+ sizeof(efuse->tx_power_index_B.ht40_base));
+
+ priv->ofdm_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
+ priv->ofdm_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.a;
+
+ priv->ht20_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+ priv->ht20_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+ priv->ht40_tx_power_diff[0].a = 0;
+ priv->ht40_tx_power_diff[0].b = 0;
+
+ for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+ priv->ofdm_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+ priv->ofdm_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+ priv->ht20_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+ priv->ht20_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+ priv->ht40_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+ priv->ht40_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+ }
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ int i;
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8723bu_efuse));
+ for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+
return 0;
}
@@ -1822,50 +2463,51 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
{
+ struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
int i;
- if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129))
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
- ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr);
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
memcpy(priv->cck_tx_power_index_A,
- priv->efuse_wifi.efuse8192.cck_tx_power_index_A,
- sizeof(priv->cck_tx_power_index_A));
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
memcpy(priv->cck_tx_power_index_B,
- priv->efuse_wifi.efuse8192.cck_tx_power_index_B,
- sizeof(priv->cck_tx_power_index_B));
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
memcpy(priv->ht40_1s_tx_power_index_A,
- priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A,
- sizeof(priv->ht40_1s_tx_power_index_A));
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
memcpy(priv->ht40_1s_tx_power_index_B,
- priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B,
- sizeof(priv->ht40_1s_tx_power_index_B));
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
memcpy(priv->ht40_2s_tx_power_index_diff,
- priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff,
- sizeof(priv->ht40_2s_tx_power_index_diff));
+ efuse->ht40_2s_tx_power_index_diff,
+ sizeof(efuse->ht40_2s_tx_power_index_diff));
memcpy(priv->ht20_tx_power_index_diff,
- priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff,
- sizeof(priv->ht20_tx_power_index_diff));
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
memcpy(priv->ofdm_tx_power_index_diff,
- priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff,
- sizeof(priv->ofdm_tx_power_index_diff));
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
memcpy(priv->ht40_max_power_offset,
- priv->efuse_wifi.efuse8192.ht40_max_power_offset,
- sizeof(priv->ht40_max_power_offset));
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
memcpy(priv->ht20_max_power_offset,
- priv->efuse_wifi.efuse8192.ht20_max_power_offset,
- sizeof(priv->ht20_max_power_offset));
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- priv->efuse_wifi.efuse8192.vendor_name);
+ efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.20s\n",
- priv->efuse_wifi.efuse8192.device_name);
+ efuse->device_name);
- if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) {
+ if (efuse->rf_regulatory & 0x20) {
sprintf(priv->chip_name, "8188RU");
priv->hi_pa = 1;
}
@@ -1889,6 +2531,44 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
#endif
+static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192eu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ /*
+ * Temporarily disable 8192eu support
+ */
+ return -EINVAL;
+ return 0;
+}
+
static int
rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
{
@@ -1938,9 +2618,11 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
if (val16 & EEPROM_BOOT)
priv->boot_eeprom = 1;
- val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
- val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
- rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+ if (priv->is_multi_func) {
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
+ val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
+ rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+ }
dev_dbg(dev, "Booting from %s\n",
priv->boot_eeprom ? "EEPROM" : "EFUSE");
@@ -1970,10 +2652,12 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
}
/* Default value is 0xff */
- memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A);
+ memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN);
efuse_addr = 0;
while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) {
+ u16 map_addr;
+
ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header);
if (ret || header == 0xff)
goto exit;
@@ -1996,45 +2680,34 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
word_mask = header & 0x0f;
}
- if (offset < EFUSE_MAX_SECTION_8723A) {
- u16 map_addr;
- /* Get word enable value from PG header */
+ /* Get word enable value from PG header */
- /* We have 8 bits to indicate validity */
- map_addr = offset * 8;
- if (map_addr >= EFUSE_MAP_LEN_8723A) {
- dev_warn(dev, "%s: Illegal map_addr (%04x), "
- "efuse corrupt!\n",
- __func__, map_addr);
- ret = -EINVAL;
- goto exit;
- }
- for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
- /* Check word enable condition in the section */
- if (!(word_mask & BIT(i))) {
- ret = rtl8xxxu_read_efuse8(priv,
- efuse_addr++,
- &val8);
- if (ret)
- goto exit;
- priv->efuse_wifi.raw[map_addr++] = val8;
-
- ret = rtl8xxxu_read_efuse8(priv,
- efuse_addr++,
- &val8);
- if (ret)
- goto exit;
- priv->efuse_wifi.raw[map_addr++] = val8;
- } else
- map_addr += 2;
- }
- } else {
- dev_warn(dev,
- "%s: Illegal offset (%04x), efuse corrupt!\n",
- __func__, offset);
+ /* We have 8 bits to indicate validity */
+ map_addr = offset * 8;
+ if (map_addr >= EFUSE_MAP_LEN) {
+ dev_warn(dev, "%s: Illegal map_addr (%04x), "
+ "efuse corrupt!\n",
+ __func__, map_addr);
ret = -EINVAL;
goto exit;
}
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (word_mask & BIT(i)) {
+ map_addr += 2;
+ continue;
+ }
+
+ ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
+ if (ret)
+ goto exit;
+ priv->efuse_wifi.raw[map_addr++] = val8;
+
+ ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8);
+ if (ret)
+ goto exit;
+ priv->efuse_wifi.raw[map_addr++] = val8;
+ }
}
exit:
@@ -2043,6 +2716,56 @@ exit:
return ret;
}
+static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 sys_func;
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
+static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 sys_func;
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
@@ -2067,6 +2790,12 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
val32 &= ~MCU_WINT_INIT_READY;
rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
+ /*
+ * Reset the 8051 in order for the firmware to start running,
+ * otherwise it won't come up on the 8192eu
+ */
+ priv->fops->reset_8051(priv);
+
/* Wait for firmware to become ready */
for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
@@ -2082,6 +2811,11 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
goto exit;
}
+ /*
+ * Init H2C command
+ */
+ if (priv->rtlchip == 0x8723b)
+ rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
exit:
return ret;
}
@@ -2100,19 +2834,30 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
/* 8051 enable */
val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE);
+ val16 |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+ if (val8 & MCU_FW_RAM_SEL) {
+ pr_info("do the RAM reset\n");
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+ priv->fops->reset_8051(priv);
+ }
/* MCU firmware download enable */
val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
- rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE);
+ val8 |= MCU_FW_DL_ENABLE;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8);
/* 8051 reset */
val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
- rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19));
+ val32 &= ~BIT(19);
+ rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
/* Reset firmware download checksum */
val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
- rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT);
+ val8 |= MCU_FW_DL_CSUM_REPORT;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8);
pages = priv->fw_size / RTL_FW_PAGE_SIZE;
remainder = priv->fw_size % RTL_FW_PAGE_SIZE;
@@ -2121,7 +2866,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
for (i = 0; i < pages; i++) {
val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
- rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+ val8 |= i;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8);
ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
fwptr, RTL_FW_PAGE_SIZE);
@@ -2135,7 +2881,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
if (remainder) {
val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
- rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+ val8 |= i;
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8);
ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
fwptr, remainder);
if (ret != remainder) {
@@ -2148,8 +2895,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
fw_abort:
/* MCU firmware download disable */
val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL);
- rtl8xxxu_write16(priv, REG_MCU_FW_DL,
- val16 & (~MCU_FW_DL_ENABLE & 0xff));
+ val16 &= ~MCU_FW_DL_ENABLE;
+ rtl8xxxu_write16(priv, REG_MCU_FW_DL, val16);
return ret;
}
@@ -2174,12 +2921,18 @@ static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
}
priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+ if (!priv->fw_data) {
+ ret = -ENOMEM;
+ goto exit;
+ }
priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header);
signature = le16_to_cpu(priv->fw_data->signature);
switch (signature & 0xfff0) {
+ case 0x92e0:
case 0x92c0:
case 0x88c0:
+ case 0x5300:
case 0x2300:
break;
default:
@@ -2221,6 +2974,20 @@ static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
return ret;
}
+static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (priv->enable_bluetooth)
+ fw_name = "rtlwifi/rtl8723bu_bt.bin";
+ else
+ fw_name = "rtlwifi/rtl8723bu_nic.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
#ifdef CONFIG_RTL8XXXU_UNTESTED
static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
@@ -2242,6 +3009,18 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
#endif
+static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ fw_name = "rtlwifi/rtl8192eu_nic.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
{
u16 val16;
@@ -2269,6 +3048,44 @@ static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
}
}
+static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, 0x64);
+ val32 &= ~(BIT(20) | BIT(24));
+ rtl8xxxu_write32(priv, 0x64, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 &= ~BIT(4);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 |= BIT(3);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= BIT(24);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC);
+ val32 &= 0xffffff00;
+ val32 |= 0x77;
+ rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+}
+
static int
rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
{
@@ -2291,7 +3108,8 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
}
}
- rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+ if (priv->rtlchip != 0x8723b)
+ rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
return 0;
}
@@ -2328,6 +3146,7 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
{
u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
+ u16 val16;
u32 val32;
/*
@@ -2335,25 +3154,36 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
* addresses, which is initialized here. Do we need this?
*/
- val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
- udelay(2);
- val8 |= AFE_PLL_320_ENABLE;
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
- udelay(2);
+ if (priv->rtlchip == 0x8723b) {
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
+ SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
- udelay(2);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ } else {
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ udelay(2);
+ val8 |= AFE_PLL_320_ENABLE;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+ udelay(2);
- val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
- val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
- rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+ udelay(2);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+ }
- /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
- val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
- val32 &= ~AFE_XTAL_RF_GATE;
- if (priv->has_bluetooth)
- val32 &= ~AFE_XTAL_BT_GATE;
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+ if (priv->rtlchip != 0x8723b) {
+ /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 &= ~AFE_XTAL_RF_GATE;
+ if (priv->has_bluetooth)
+ val32 &= ~AFE_XTAL_BT_GATE;
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+ }
/* 6. 0x1f[7:0] = 0x07 */
val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
@@ -2363,7 +3193,14 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
else if (priv->tx_paths == 2)
rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
- else
+ else if (priv->rtlchip == 0x8723b) {
+ /*
+ * Why?
+ */
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
+ rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
+ } else
rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
@@ -2429,29 +3266,33 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
}
- if (priv->hi_pa)
+ if (priv->rtlchip == 0x8723b)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
+ else if (priv->hi_pa)
rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
else
rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
- if (priv->rtlchip == 0x8723a &&
- priv->efuse_wifi.efuse8723.version >= 0x01) {
+ if (priv->has_xtalk) {
val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
- val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ val8 = priv->xtalk;
val32 &= 0xff000fff;
val32 |= ((val8 | (val8 << 6)) << 12);
rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
}
- ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
- ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
- ldohci12 = 0x57;
- lpldo = 1;
- val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+ if (priv->rtlchip != 0x8723bu) {
+ ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+ ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+ ldohci12 = 0x57;
+ lpldo = 1;
+ val32 = (lpldo << 24) | (ldohci12 << 16) |
+ (ldov12d << 8) | ldoa15;
- rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+ rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+ }
return 0;
}
@@ -2492,8 +3333,6 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
continue;
}
- reg &= 0x3f;
-
ret = rtl8xxxu_write_rfreg(priv, path, reg, val);
if (ret) {
dev_warn(&priv->udev->dev,
@@ -2623,6 +3462,31 @@ exit:
return ret;
}
+static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+{
+ u32 val32;
+ int ret = 0;
+ int i;
+
+ val32 = rtl8xxxu_read32(priv, REG_AUTO_LLT);
+ val32 |= AUTO_LLT_INIT_LLT;
+ rtl8xxxu_write32(priv, REG_AUTO_LLT, val32);
+
+ for (i = 500; i; i--) {
+ val32 = rtl8xxxu_read32(priv, REG_AUTO_LLT);
+ if (!(val32 & AUTO_LLT_INIT_LLT))
+ break;
+ usleep_range(2, 4);
+ }
+
+ if (!i) {
+ ret = -EBUSY;
+ dev_warn(&priv->udev->dev, "LLT table init failed\n");
+ }
+
+ return ret;
+}
+
static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
{
u16 val16, hi, lo;
@@ -2954,6 +3818,91 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
return false;
}
+static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
+{
+ u32 i, j, diff, simubitmap, bound = 0;
+ int candidate[2] = {-1, -1}; /* for path A and path B */
+ int tmp1, tmp2;
+ bool retval = true;
+
+ if (priv->tx_paths > 1)
+ bound = 8;
+ else
+ bound = 4;
+
+ simubitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ if (i & 1) {
+ if ((result[c1][i] & 0x00000200))
+ tmp1 = result[c1][i] | 0xfffffc00;
+ else
+ tmp1 = result[c1][i];
+
+ if ((result[c2][i]& 0x00000200))
+ tmp2 = result[c2][i] | 0xfffffc00;
+ else
+ tmp2 = result[c2][i];
+ } else {
+ tmp1 = result[c1][i];
+ tmp2 = result[c2][i];
+ }
+
+ diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1);
+
+ if (diff > MAX_TOLERANCE) {
+ if ((i == 2 || i == 6) && !simubitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ candidate[(i / 4)] = c1;
+ else
+ simubitmap = simubitmap | (1 << i);
+ } else {
+ simubitmap = simubitmap | (1 << i);
+ }
+ }
+ }
+
+ if (simubitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (candidate[i] >= 0) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] = result[candidate[i]][j];
+ retval = false;
+ }
+ }
+ return retval;
+ } else {
+ if (!(simubitmap & 0x03)) {
+ /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(simubitmap & 0x0c)) {
+ /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ /* path B RX OK */
+ for (i = 4; i < 6; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(simubitmap & 0x30) && priv->tx_paths > 1) {
+ /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ result[3][i] = result[c1][i];
+ }
+ }
+
+ return false;
+}
+
static void
rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
{
@@ -3001,11 +3950,13 @@ static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
u32 path_on;
int i;
- path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4;
if (priv->tx_paths == 1) {
- path_on = 0x0bdb25a0;
- rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0);
+ path_on = priv->fops->adda_1t_path_on;
+ rtl8xxxu_write32(priv, regs[0], priv->fops->adda_1t_init);
} else {
+ path_on = path_a_on ? priv->fops->adda_2t_path_on_a :
+ priv->fops->adda_2t_path_on_b;
+
rtl8xxxu_write32(priv, regs[0], path_on);
}
@@ -3119,6 +4070,369 @@ out:
return result;
}
+static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+out:
+ return result;
+}
+
+static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+ val32 = 0x80007c00 | (reg_e94 &0x3ff0000) |
+ ((reg_e9c & 0x3ff0000) >> 16);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /*
+ * Modify RX IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77);
+
+ /*
+ * PA, PAD setting
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f);
+
+ /*
+ * RX IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Disable BT
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780);
+
+ val32 = (reg_eac >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000) &&
+ ((reg_ea4 & 0x03ff0000) < 0x01100000) &&
+ ((reg_ea4 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x02;
+ else /* If TX not OK, ignore RX */
+ goto out;
+out:
+ return result;
+}
+
+#ifdef RTL8723BU_PATH_B
+static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* One shot, path B LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+
+ mdelay(1);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+ reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ goto out;
+
+ if (!(reg_eac & BIT(30)) &&
+ (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
+ (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+ __func__);
+out:
+ return result;
+}
+#endif
+
static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
int result[][8], int t)
{
@@ -3321,7 +4635,249 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
}
}
-static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok /*, path_b_ok */;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+ };
+ u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+ u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 |= 0x0f000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+#ifdef RTL8723BU_PATH_B
+ /* Set RF mode to standby Path B */
+ if (priv->tx_paths > 1)
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000);
+#endif
+
+#if 0
+ /* Page B init */
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
+
+ if (priv->tx_paths > 1)
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000);
+#endif
+
+ /*
+ * RX IQ calibration setting for 8723B D cut large current issue
+ * when leaving IPS
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+#if 0 /* Only needed in restore case, we may need this when going to suspend */
+ priv->RFCalibrateInfo.TxLOK[RF_A] =
+ rtl8xxxu_read_rfreg(priv, RF_A,
+ RF6052_REG_TXM_IDAC);
+#endif
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ if (priv->tx_paths > 1) {
+#if 1
+ dev_warn(dev, "%s: Path B not supported\n", __func__);
+#else
+
+ /*
+ * Path A into standby
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8xxxu_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+#endif
+ }
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t) {
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+ if (priv->tx_paths > 1) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | xb_agc);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
+{
+ struct h2c_cmd h2c;
+
+ if (priv->fops->mbox_ext_width < 4)
+ return;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_wlan_calibration.cmd = H2C_8723B_BT_WLAN_CALIBRATION;
+ h2c.bt_wlan_calibration.data = start;
+
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
+}
+
+static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
int result[4][8]; /* last is final result */
@@ -3332,6 +4888,8 @@ static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
s32 reg_tmp = 0;
bool simu;
+ rtl8xxxu_prepare_calibrate(priv, 1);
+
memset(result, 0, sizeof(result));
candidate = -1;
@@ -3419,6 +4977,135 @@ static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+
+ rtl8xxxu_prepare_calibrate(priv, 0);
+}
+
+static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u32 val32, bt_control;
+ s32 reg_tmp = 0;
+ bool simu;
+
+ rtl8xxxu_prepare_calibrate(priv, 1);
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU);
+
+ for (i = 0; i < 3; i++) {
+ rtl8723bu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8723bu_simularity_compare(priv, result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8723bu_simularity_compare(priv, result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8723bu_simularity_compare(priv, result, 1, 2);
+ if (simu) {
+ candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp)
+ candidate = 3;
+ else
+ candidate = -1;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->tx_paths > 1 && reg_eb4)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
+
+ if (priv->rf_paths > 1) {
+ dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__);
+#ifdef RTL8723BU_PATH_B
+ if (RF_Path == 0x0) //S1
+ ODM_SetIQCbyRFpath(pDM_Odm, 0);
+ else //S0
+ ODM_SetIQCbyRFpath(pDM_Odm, 1);
+#endif
+ }
+ rtl8xxxu_prepare_calibrate(priv, 0);
}
static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
@@ -3456,12 +5143,17 @@ static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
}
/* Start LC calibration */
+ if (priv->fops->has_s0s1)
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, 0xdfbe0);
val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
val32 |= 0x08000;
rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
msleep(100);
+ if (priv->fops->has_s0s1)
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, 0xdffe0);
+
/* Restore original parameters */
if (lstf & OFDM_LSTF_MASK) {
/* Path-A */
@@ -3584,6 +5276,64 @@ exit:
return ret;
}
+static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int count, ret;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Enable rising edge triggering interrupt */
+ val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM);
+ val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ;
+ rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_GPIO_INTM, val32);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable BT control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 &= ~AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 |= SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 &= ~LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+ return ret;
+}
+
static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -3640,7 +5390,7 @@ exit:
return ret;
}
-static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
+static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -3660,7 +5410,82 @@ static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
}
-static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv)
+static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Clear suspend enable and power down enable*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* disable HWPDN 0x04[15]=0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv)
{
u8 val8;
u32 val32;
@@ -3752,6 +5577,127 @@ exit:
return ret;
}
+static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* Disable SW LPS 0x04[10]= 0 */
+ val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_SW_LPS;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Wait until 0x04[17] = 1 power ready */
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable HWPDN 0x04[15]= 0*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_HW_POWERDOWN;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable WL suspend*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable WL control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 |= AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* Enable falling edge triggering interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8);
+
+ /* Enable GPIO9 interrupt mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8);
+
+ /* Enable GPIO9 input mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8);
+
+ /* Enable HSISR GPIO[C:0] interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_HSIMR, val8);
+
+ /* Enable HSISR GPIO9 interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_HSIMR + 2, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL);
+ val8 |= MULTI_WIFI_HW_ROF_EN;
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8);
+
+ /* For GPIO9 internal pull high setting BIT(14) */
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1);
+ val8 |= BIT(6);
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8);
+
+exit:
+ return ret;
+}
+
static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -3777,6 +5723,39 @@ static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
return 0;
}
+static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ val32 = rtl8xxxu_read32(priv, REG_RXPKT_NUM);
+ val32 |= RXPKT_NUM_RW_RELEASE_EN;
+ rtl8xxxu_write32(priv, REG_RXPKT_NUM, val32);
+
+ retry = 100;
+ retval = -EBUSY;
+
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_RXPKT_NUM);
+ if (val32 & RXPKT_NUM_RXDMA_IDLE) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ rtl8xxxu_write16(priv, REG_RQPN_NPQ, 0);
+ rtl8xxxu_write32(priv, REG_RQPN, 0x80000000);
+ mdelay(2);
+
+ if (!retry)
+ dev_warn(dev, "Failed to flush FIFO\n");
+
+ return retval;
+}
+
static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -3789,9 +5768,9 @@ static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
*/
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
- rtl8xxxu_disabled_to_emu(priv);
+ rtl8723a_disabled_to_emu(priv);
- ret = rtl8xxxu_emu_to_active(priv);
+ ret = rtl8723a_emu_to_active(priv);
if (ret)
goto exit;
@@ -3823,6 +5802,62 @@ exit:
return ret;
}
+static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ rtl8723a_disabled_to_emu(priv);
+
+ ret = rtl8723b_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /*
+ * BT coexist power on settings. This is identical for 1 and 2
+ * antenna parts.
+ */
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18);
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ /* Antenna inverse */
+ rtl8xxxu_write8(priv, 0xfe08, 0x01);
+
+ val16 = rtl8xxxu_read16(priv, REG_PWR_DATA);
+ val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write16(priv, REG_PWR_DATA, val16);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= LEDCFG0_DPDT_SELECT;
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA;
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+exit:
+ return ret;
+}
+
#ifdef CONFIG_RTL8XXXU_UNTESTED
static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
@@ -3930,6 +5965,52 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
#endif
+static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ ret = 0;
+
+ val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ if (val32 & SYS_CFG_SPS_LDO_SEL) {
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
+ } else {
+ /*
+ * Raise 1.2V voltage
+ */
+ val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL);
+ val32 &= 0xff0fffff;
+ val32 |= 0x00500000;
+ rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32);
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
+ }
+
+ rtl8192e_disabled_to_emu(priv);
+
+ ret = rtl8192e_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ rtl8xxxu_write16(priv, REG_CR, 0x0000);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -3945,6 +6026,8 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
}
+ rtl8xxxu_flush_fifo(priv);
+
rtl8xxxu_active_to_lps(priv);
/* Turn off RF */
@@ -3978,10 +6061,215 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv)
+static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
{
- if (!priv->has_bluetooth)
- return;
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ /*
+ * Disable TX report timer
+ */
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ rtl8xxxu_write16(priv, REG_CR, 0x0000);
+
+ rtl8xxxu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8723bu_active_to_emu(priv);
+ rtl8xxxu_emu_to_disabled(priv);
+}
+
+#ifdef NEED_PS_TDMA
+static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
+ u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
+{
+ struct h2c_cmd h2c;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.b_type_dma.cmd = H2C_8723B_B_TYPE_TDMA;
+ h2c.b_type_dma.data1 = arg1;
+ h2c.b_type_dma.data2 = arg2;
+ h2c.b_type_dma.data3 = arg3;
+ h2c.b_type_dma.data4 = arg4;
+ h2c.b_type_dma.data5 = arg5;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
+}
+#endif
+
+static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ struct h2c_cmd h2c;
+ u32 val32;
+ u8 val8;
+
+ /*
+ * No indication anywhere as to what 0x0790 does. The 2 antenna
+ * vendor code preserves bits 6-7 here.
+ */
+ rtl8xxxu_write8(priv, 0x0790, 0x05);
+ /*
+ * 0x0778 seems to be related to enabling the number of antennas
+ * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it
+ * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01
+ */
+ rtl8xxxu_write8(priv, 0x0778, 0x01);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
+
+ rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */
+
+ /*
+ * Set BT grant to low
+ */
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_grant.cmd = H2C_8723B_BT_GRANT;
+ h2c.bt_grant.data = 0;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant));
+
+ /*
+ * WLAN action by PTA
+ */
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+ /*
+ * BT select S0/S1 controlled by WiFi
+ */
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+ /*
+ * Bits 6/7 are marked in/out ... but for what?
+ */
+ rtl8xxxu_write8(priv, 0x0974, 0xff);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(24);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ /*
+ * Fix external switch Main->S1, Aux->S0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV;
+ h2c.ant_sel_rsv.ant_inverse = 1;
+ h2c.ant_sel_rsv.int_switch_type = 0;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
+
+ /*
+ * 0x280, 0x00, 0x200, 0x80 - not clear
+ */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+ /*
+ * Software control, antenna at WiFi side
+ */
+#ifdef NEED_PS_TDMA
+ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
+#endif
+
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_info.cmd = H2C_8723B_BT_INFO;
+ h2c.bt_info.data = BIT(0);
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info));
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT;
+ h2c.ignore_wlan.data = 0;
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
+}
+
+static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 &= ~(BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+}
+
+static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u32 agg_rx;
+ u8 agg_ctrl;
+
+ /*
+ * For now simply disable RX aggregation
+ */
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+
+ agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
+ agg_rx &= ~RXDMA_USB_AGG_ENABLE;
+ agg_rx &= ~0xff0f;
+
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+ rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
+}
+
+static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ /* Time duration for NHM unit: 4us, 0x2710=40ms */
+ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710);
+ rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
+ rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52);
+ rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
+ /* TH8 */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 |= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ /* Enable CCK */
+ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
+ val32 |= BIT(8) | BIT(9) | BIT(10);
+ rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
+ /* Max power amongst all RX antennas */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
+ val32 |= BIT(7);
+ rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
}
static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
@@ -4015,11 +6303,30 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM);
+ ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
if (ret) {
dev_warn(dev, "%s: LLT table init failed\n", __func__);
goto exit;
}
+
+ /*
+ * Presumably this is for 8188EU as well
+ * Enable TX report and TX report timer
+ */
+ if (priv->rtlchip == 0x8723bu) {
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+ /* Set MAX RPT MACID */
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
+ /* TX report Timer. Unit: 32us */
+ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+
+ /* tmp ps ? */
+ val8 = rtl8xxxu_read8(priv, 0xa3);
+ val8 &= 0xf8;
+ rtl8xxxu_write8(priv, 0xa3, val8);
+ }
}
ret = rtl8xxxu_download_firmware(priv);
@@ -4031,7 +6338,42 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+ /* Solve too many protocol error on USB bus */
+ /* Can't do this for 8188/8192 UMC A cut parts */
+ if (priv->rtlchip == 0x8723a ||
+ ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c ||
+ priv->rtlchip == 0x8188c) &&
+ (priv->chip_cut || !priv->vendor_umc))) {
+ rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+ rtl8xxxu_write8(priv, 0xfe41, 0x94);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x19);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+ rtl8xxxu_write8(priv, 0xfe41, 0x91);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+ rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+ rtl8xxxu_write8(priv, 0xfe41, 0x81);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+ }
+
+ if (priv->rtlchip == 0x8192e) {
+ rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
+ rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+ }
+
+ if (priv->fops->phy_init_antenna_selection)
+ priv->fops->phy_init_antenna_selection(priv);
+
+ if (priv->rtlchip == 0x8723b)
+ ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
+ else
+ ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+
dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
if (ret)
goto exit;
@@ -4046,6 +6388,17 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rftable = rtl8723au_radioa_1t_init_table;
ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
break;
+ case 0x8723b:
+ rftable = rtl8723bu_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ /*
+ * PHY LCK
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
+ msleep(200);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
+ break;
case 0x8188c:
if (priv->hi_pa)
rftable = rtl8188ru_radioa_1t_highpa_table;
@@ -4072,27 +6425,27 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- /* Reduce 80M spur */
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
-
- /* RFSW Control - clear bit 14 ?? */
- rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
- /* 0x07000760 */
- val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
- FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
- ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
- FPGA0_RF_BD_CTRL_SHIFT);
- rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
- /* 0x860[6:5]= 00 - why? - this sets antenna B */
- rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
-
- priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
- RF6052_REG_MODE_AG);
+ /*
+ * Chip specific quirks
+ */
+ if (priv->rtlchip == 0x8723a) {
+ /* Fix USB interface interference issue */
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+ /* Reduce 80M spur */
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+ } else {
+ val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+ val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+ }
- dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
if (priv->ep_tx_normal_queue)
val8 = TX_PAGE_NUM_NORM_PQ;
@@ -4114,6 +6467,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Set TX buffer boundary
*/
val8 = TX_TOTAL_PAGE_NUM + 1;
+
+ if (priv->rtlchip == 0x8723b)
+ val8 -= 1;
+
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
@@ -4126,15 +6483,37 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
+ /* RFSW Control - clear bit 14 ?? */
+ if (priv->rtlchip != 0x8723b)
+ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+ /* 0x07000760 */
+ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+ FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
+ ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
+ FPGA0_RF_BD_CTRL_SHIFT);
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ /* 0x860[6:5]= 00 - why? - this sets antenna B */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+
+ priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
+ RF6052_REG_MODE_AG);
+
/*
* Set RX page boundary
*/
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
+ if (priv->rtlchip == 0x8723b)
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
+ else
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
/*
* Transfer page size is always 128
*/
- val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
- (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
+ if (priv->rtlchip == 0x8723b)
+ val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
+ (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
+ else
+ val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
+ (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
rtl8xxxu_write8(priv, REG_PBP, val8);
/*
@@ -4155,7 +6534,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Configure initial WMAC settings
*/
val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST |
- /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */
RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
rtl8xxxu_write32(priv, REG_RCR, val32);
@@ -4220,6 +6598,42 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
/*
+ * Initialize burst parameters
+ */
+ if (priv->rtlchip == 0x8723b) {
+ /*
+ * For USB high speed set 512B packets
+ */
+ val8 = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B);
+ val8 &= ~(BIT(4) | BIT(5));
+ val8 |= BIT(4);
+ val8 |= BIT(1) | BIT(2) | BIT(3);
+ rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, val8);
+
+ /*
+ * For USB high speed set 512B packets
+ */
+ val8 = rtl8xxxu_read8(priv, REG_HT_SINGLE_AMPDU_8723B);
+ val8 |= BIT(7);
+ rtl8xxxu_write8(priv, REG_HT_SINGLE_AMPDU_8723B, val8);
+
+ rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0c14);
+ rtl8xxxu_write8(priv, REG_AMPDU_MAX_TIME_8723B, 0x5e);
+ rtl8xxxu_write32(priv, REG_AGGLEN_LMT, 0xffffffff);
+ rtl8xxxu_write8(priv, REG_RX_PKT_LIMIT, 0x18);
+ rtl8xxxu_write8(priv, REG_PIFS, 0x00);
+ rtl8xxxu_write8(priv, REG_USTIME_TSF_8723B, 0x50);
+ rtl8xxxu_write8(priv, REG_USTIME_EDCA, 0x50);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 |= BIT(5) | BIT(6);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+ }
+
+ if (priv->fops->init_aggregation)
+ priv->fops->init_aggregation(priv);
+
+ /*
* Enable CCK and OFDM block
*/
val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
@@ -4234,7 +6648,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Start out with default power levels for channel 6, 20MHz
*/
- rtl8723a_set_tx_power(priv, 1, false);
+ priv->fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -4248,78 +6662,37 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
- /*
- * Not sure if we should get into this at all
- */
- if (priv->iqk_initialized) {
- rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
- priv->bb_recovery_backup,
- RTL8XXXU_BB_REGS);
- } else {
- rtl8723a_phy_iq_calibrate(priv);
- priv->iqk_initialized = true;
- }
-
- /*
- * This should enable thermal meter
- */
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+ if (priv->fops->init_statistics)
+ priv->fops->init_statistics(priv);
rtl8723a_phy_lc_calibrate(priv);
- /* fix USB interface interference issue */
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x8d);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
- rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
-
- /* Solve too many protocol error on USB bus */
- /* Can't do this for 8188/8192 UMC A cut parts */
- rtl8xxxu_write8(priv, 0xfe40, 0xe6);
- rtl8xxxu_write8(priv, 0xfe41, 0x94);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x19);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe5);
- rtl8xxxu_write8(priv, 0xfe41, 0x91);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe2);
- rtl8xxxu_write8(priv, 0xfe41, 0x81);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- /* Init BT hw config. */
- rtl8xxxu_init_bt(priv);
+ priv->fops->phy_iq_calibrate(priv);
/*
- * Not sure if we really need to save these parameters, but the
- * vendor driver does
+ * This should enable thermal meter
*/
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
- if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)
- priv->path_a_hi_power = 1;
-
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
- priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK;
-
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
- priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK;
+ if (priv->fops->has_s0s1)
+ rtl8xxxu_write_rfreg(priv,
+ RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
+ else
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
/* Set NAV_UPPER to 30000us */
val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
- /*
- * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
- * but we need to fin root cause.
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
- if ((val32 & 0xff000000) != 0x83000000) {
- val32 |= FPGA_RF_MODE_CCK;
- rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+ if (priv->rtlchip == 0x8723a) {
+ /*
+ * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
+ * but we need to find root cause.
+ * This is 8723au only.
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ if ((val32 & 0xff000000) != 0x83000000) {
+ val32 |= FPGA_RF_MODE_CCK;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+ }
}
val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
@@ -4335,7 +6708,7 @@ static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
- rtl8xxxu_power_off(priv);
+ priv->fops->power_off(priv);
}
static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
@@ -4381,7 +6754,7 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
}
static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, const u8* mac)
+ struct ieee80211_vif *vif, const u8 *mac)
{
struct rtl8xxxu_priv *priv = hw->priv;
u8 val8;
@@ -4402,11 +6775,13 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi)
{
struct h2c_cmd h2c;
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
h2c.ramask.cmd = H2C_SET_RATE_MASK;
h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff);
h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16);
@@ -4415,9 +6790,68 @@ static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
if (sgi)
h2c.ramask.arg |= 0x20;
- dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__,
- ramask, h2c.ramask.arg);
- rtl8723a_h2c_cmd(priv, &h2c);
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
+ __func__, ramask, h2c.ramask.arg, sizeof(h2c.ramask));
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
+}
+
+static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi)
+{
+ struct h2c_cmd h2c;
+ u8 bw = 0;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.b_macid_cfg.cmd = H2C_8723B_MACID_CFG_RAID;
+ h2c.b_macid_cfg.ramask0 = ramask & 0xff;
+ h2c.b_macid_cfg.ramask1 = (ramask >> 8) & 0xff;
+ h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
+ h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
+
+ h2c.ramask.arg = 0x80;
+ h2c.b_macid_cfg.data1 = 0;
+ if (sgi)
+ h2c.b_macid_cfg.data1 |= BIT(7);
+
+ h2c.b_macid_cfg.data2 = bw;
+
+ dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
+ __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
+}
+
+static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
+{
+ struct h2c_cmd h2c;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
+
+ if (connect)
+ h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+ else
+ h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
+}
+
+static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
+{
+ struct h2c_cmd h2c;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.media_status_rpt.cmd = H2C_8723B_MEDIA_STATUS_RPT;
+ if (connect)
+ h2c.media_status_rpt.parm |= BIT(0);
+ else
+ h2c.media_status_rpt.parm &= ~BIT(0);
+
+ rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
@@ -4452,11 +6886,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 val8;
if (changed & BSS_CHANGED_ASSOC) {
- struct h2c_cmd h2c;
-
dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
- memset(&h2c, 0, sizeof(struct h2c_cmd));
rtl8xxxu_set_linktype(priv, vif->type);
if (bss_conf->assoc) {
@@ -4486,14 +6917,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
sgi = 1;
rcu_read_unlock();
- rtl8xxxu_update_rate_mask(priv, ramask, sgi);
-
- val32 = rtl8xxxu_read32(priv, REG_RCR);
- val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON;
- rtl8xxxu_write32(priv, REG_RCR, val32);
-
- /* Enable RX of data frames */
- rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+ priv->fops->update_rate_mask(priv, ramask, sgi);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -4503,23 +6927,14 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
0xc000 | bss_conf->aid);
- h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+ priv->fops->report_connect(priv, 0, true);
} else {
- val32 = rtl8xxxu_read32(priv, REG_RCR);
- val32 &= ~(RCR_CHECK_BSSID_MATCH |
- RCR_CHECK_BSSID_BEACON);
- rtl8xxxu_write32(priv, REG_RCR, val32);
-
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_DISABLE_TSF_UPDATE;
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- /* Disable RX of data frames */
- rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
- h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+ priv->fops->report_connect(priv, 0, false);
}
- h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
- rtl8723a_h2c_cmd(priv, &h2c);
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -4594,7 +7009,12 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
return queue;
}
-static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
+/*
+ * Despite newer chips 8723b/8812/8821 having a larger TX descriptor
+ * format. The descriptor checksum is still only calculated over the
+ * initial 32 bytes of the descriptor!
+ */
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
{
__le16 *ptr = (__le16 *)tx_desc;
u16 csum = 0;
@@ -4606,7 +7026,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
*/
tx_desc->csum = cpu_to_le16(0);
- for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++)
+ for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++)
csum = csum ^ le16_to_cpu(ptr[i]);
tx_desc->csum |= cpu_to_le16(csum);
@@ -4675,13 +7095,15 @@ static void rtl8xxxu_tx_complete(struct urb *urb)
struct sk_buff *skb = (struct sk_buff *)urb->context;
struct ieee80211_tx_info *tx_info;
struct ieee80211_hw *hw;
+ struct rtl8xxxu_priv *priv;
struct rtl8xxxu_tx_urb *tx_urb =
container_of(urb, struct rtl8xxxu_tx_urb, urb);
tx_info = IEEE80211_SKB_CB(skb);
hw = tx_info->rate_driver_data[0];
+ priv = hw->priv;
- skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc));
+ skb_pull(skb, priv->fops->tx_desc_size);
ieee80211_tx_info_clear_status(tx_info);
tx_info->status.rates[0].idx = -1;
@@ -4692,7 +7114,7 @@ static void rtl8xxxu_tx_complete(struct urb *urb)
ieee80211_tx_status_irqsafe(hw, skb);
- rtl8xxxu_free_tx_urb(hw->priv, tx_urb);
+ rtl8xxxu_free_tx_urb(priv, tx_urb);
}
static void rtl8xxxu_dump_action(struct device *dev,
@@ -4742,7 +7164,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
- struct rtl8xxxu_tx_desc *tx_desc;
+ struct rtl8723au_tx_desc *tx_desc;
+ struct rtl8723bu_tx_desc *tx_desc40;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
@@ -4751,16 +7174,18 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
u16 pktlen = skb->len;
u16 seq_number;
u16 rate_flag = tx_info->control.rates[0].flags;
+ int tx_desc_size = priv->fops->tx_desc_size;
int ret;
+ bool usedesc40, ampdu_enable;
- if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) {
+ if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
"%s: Not enough headroom (%i) for tx descriptor\n",
__func__, skb_headroom(skb));
goto error;
}
- if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) {
+ if (unlikely(skb->len > (65535 - tx_desc_size))) {
dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n",
__func__, skb->len);
goto error;
@@ -4779,17 +7204,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (ieee80211_is_action(hdr->frame_control))
rtl8xxxu_dump_action(dev, hdr);
+ usedesc40 = (tx_desc_size == 40);
tx_info->rate_driver_data[0] = hw;
if (control && control->sta)
sta = control->sta;
- tx_desc = (struct rtl8xxxu_tx_desc *)
- skb_push(skb, sizeof(struct rtl8xxxu_tx_desc));
+ tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size);
- memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc));
+ memset(tx_desc, 0, tx_desc_size);
tx_desc->pkt_size = cpu_to_le16(pktlen);
- tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc);
+ tx_desc->pkt_offset = tx_desc_size;
tx_desc->txdw0 =
TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
@@ -4815,19 +7240,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT);
-
- if (rate_flag & IEEE80211_TX_RC_MCS)
- rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
- else
- rate = tx_rate->hw_value;
- tx_desc->txdw5 = cpu_to_le32(rate);
-
- if (ieee80211_is_data(hdr->frame_control))
- tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
-
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
+ ampdu_enable = false;
if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
if (sta->ht_cap.ht_supported) {
u32 ampdu, val32;
@@ -4835,35 +7249,118 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
ampdu = (u32)sta->ht_cap.ampdu_density;
val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
tx_desc->txdw2 |= cpu_to_le32(val32);
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE);
- } else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
- } else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
- if (ieee80211_is_data_qos(hdr->frame_control))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS);
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE);
- if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
- (ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->ht_cap.cap &
- (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
- tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
- }
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE);
- tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT);
- tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE);
+ ampdu_enable = true;
+ }
}
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /* Use RTS rate 24M - does the mac80211 tell us which to use? */
- tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M);
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE);
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE);
+ if (rate_flag & IEEE80211_TX_RC_MCS)
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ if (!usedesc40) {
+ tx_desc->txdw5 = cpu_to_le32(rate);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+ tx_desc->txdw3 =
+ cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A);
+
+ if (ampdu_enable)
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A);
+ else
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
+ tx_desc->txdw4 |=
+ cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A);
+ tx_desc->txdw5 |=
+ cpu_to_le32(6 <<
+ TXDESC_RETRY_LIMIT_SHIFT_8723A);
+ tx_desc->txdw5 |=
+ cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A);
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+ (sta && vif && vif->bss_conf.use_short_preamble))
+ tx_desc->txdw4 |=
+ cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
+
+ if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+ (ieee80211_is_data_qos(hdr->frame_control) &&
+ sta && sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+ }
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc->txdw4 |=
+ cpu_to_le32(DESC_RATE_24M <<
+ TXDESC_RTS_RATE_SHIFT_8723A);
+ tx_desc->txdw4 |=
+ cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A);
+ tx_desc->txdw4 |=
+ cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
+ }
+ } else {
+ tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc;
+
+ tx_desc40->txdw4 = cpu_to_le32(rate);
+ if (ieee80211_is_data(hdr->frame_control)) {
+ tx_desc->txdw4 |=
+ cpu_to_le32(0x1f <<
+ TXDESC_DATA_RATE_FB_SHIFT_8723B);
+ }
+
+ tx_desc40->txdw9 =
+ cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B);
+
+ if (ampdu_enable)
+ tx_desc40->txdw2 |=
+ cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
+ else
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
+ tx_desc40->txdw3 |=
+ cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B);
+ tx_desc40->txdw4 |=
+ cpu_to_le32(6 <<
+ TXDESC_RETRY_LIMIT_SHIFT_8723B);
+ tx_desc40->txdw4 |=
+ cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B);
+ }
+
+ if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+ (sta && vif && vif->bss_conf.use_short_preamble))
+ tx_desc40->txdw5 |=
+ cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc->txdw4 |=
+ cpu_to_le32(DESC_RATE_24M <<
+ TXDESC_RTS_RATE_SHIFT_8723B);
+ tx_desc->txdw3 |=
+ cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B);
+ tx_desc->txdw3 |=
+ cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
+ }
}
rtl8xxxu_calc_tx_desc_csum(tx_desc);
@@ -4885,13 +7382,13 @@ error:
static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
- struct rtl8xxxu_rx_desc *rx_desc,
- struct rtl8723au_phy_stats *phy_stats)
+ struct rtl8723au_phy_stats *phy_stats,
+ u32 rxmcs)
{
if (phy_stats->sgi_en)
rx_status->flag |= RX_FLAG_SHORT_GI;
- if (rx_desc->rxmcs < DESC_RATE_6M) {
+ if (rxmcs < DESC_RATE_6M) {
/*
* Handle PHY stats for CCK rates
*/
@@ -5002,6 +7499,138 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
+static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+ struct rtl8723au_phy_stats *phy_stats;
+ int drvinfo_sz, desc_shift;
+
+ skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+ drvinfo_sz = rx_desc->drvinfo_sz * 8;
+ desc_shift = rx_desc->shift;
+ skb_pull(skb, drvinfo_sz + desc_shift);
+
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs);
+
+ rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (rx_desc->rxht) {
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
+
+ return RX_TYPE_DATA_PKT;
+}
+
+static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct rtl8723bu_rx_desc *rx_desc =
+ (struct rtl8723bu_rx_desc *)skb->data;
+ struct rtl8723au_phy_stats *phy_stats;
+ int drvinfo_sz, desc_shift;
+
+ skb_pull(skb, sizeof(struct rtl8723bu_rx_desc));
+
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+ drvinfo_sz = rx_desc->drvinfo_sz * 8;
+ desc_shift = rx_desc->shift;
+ skb_pull(skb, drvinfo_sz + desc_shift);
+
+ if (rx_desc->rpt_sel) {
+ struct device *dev = &priv->udev->dev;
+ dev_dbg(dev, "%s: C2H packet\n", __func__);
+ return RX_TYPE_C2H;
+ }
+
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs);
+
+ rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->flag |= RX_FLAG_40MHZ;
+
+ if (rx_desc->rxmcs >= DESC_RATE_MCS0) {
+ rx_status->flag |= RX_FLAG_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
+
+ return RX_TYPE_DATA_PKT;
+}
+
+static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
+ struct sk_buff *skb)
+{
+ struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data;
+ struct device *dev = &priv->udev->dev;
+ int len;
+
+ len = skb->len - 2;
+
+ dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n",
+ c2h->id, c2h->seq, len, c2h->bt_info.response_source);
+
+ switch(c2h->id) {
+ case C2H_8723B_BT_INFO:
+ if (c2h->bt_info.response_source >
+ BT_INFO_SRC_8723B_BT_ACTIVE_SEND)
+ dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n");
+ else
+ dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n");
+
+ if (c2h->bt_info.bt_has_reset)
+ dev_dbg(dev, "BT has been reset\n");
+ if (c2h->bt_info.tx_rx_mask)
+ dev_dbg(dev, "BT TRx mask\n");
+
+ break;
+ case C2H_8723B_BT_MP_INFO:
+ dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n",
+ c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status);
+ break;
+ case C2H_8723B_RA_REPORT:
+ dev_dbg(dev,
+ "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n",
+ c2h->ra_report.rate, c2h->ra_report.dummy0_0,
+ c2h->ra_report.macid, c2h->ra_report.noisy_state);
+ break;
+ default:
+ dev_info(dev, "Unhandled C2H event %02x seq %02x\n",
+ c2h->id, c2h->seq);
+ print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE,
+ 16, 1, c2h->raw.payload, len, false);
+ break;
+ }
+}
+
static void rtl8xxxu_rx_complete(struct urb *urb)
{
struct rtl8xxxu_rx_urb *rx_urb =
@@ -5009,59 +7638,32 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
struct ieee80211_hw *hw = rx_urb->hw;
struct rtl8xxxu_priv *priv = hw->priv;
struct sk_buff *skb = (struct sk_buff *)urb->context;
- struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
- struct rtl8723au_phy_stats *phy_stats;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_mgmt *mgmt;
struct device *dev = &priv->udev->dev;
__le32 *_rx_desc_le = (__le32 *)skb->data;
u32 *_rx_desc = (u32 *)skb->data;
- int cnt, len, drvinfo_sz, desc_shift, i;
+ int rx_type, i;
for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
- cnt = rx_desc->frag;
- len = rx_desc->pktlen;
- drvinfo_sz = rx_desc->drvinfo_sz * 8;
- desc_shift = rx_desc->shift;
skb_put(skb, urb->actual_length);
if (urb->status == 0) {
- skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
- phy_stats = (struct rtl8723au_phy_stats *)skb->data;
-
- skb_pull(skb, drvinfo_sz + desc_shift);
-
- mgmt = (struct ieee80211_mgmt *)skb->data;
-
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rx_desc->phy_stats)
- rtl8xxxu_rx_parse_phystats(priv, rx_status,
- rx_desc, phy_stats);
+ rx_type = priv->fops->parse_rx_desc(priv, skb, rx_status);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
- rx_status->flag |= RX_FLAG_MACTIME_START;
-
- if (!rx_desc->swdec)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (rx_desc->crc32)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_desc->bw)
- rx_status->flag |= RX_FLAG_40MHZ;
-
- if (rx_desc->rxht) {
- rx_status->flag |= RX_FLAG_HT;
- rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
- } else {
- rx_status->rate_idx = rx_desc->rxmcs;
+ if (rx_type == RX_TYPE_DATA_PKT)
+ ieee80211_rx_irqsafe(hw, skb);
+ else {
+ rtl8723bu_handle_c2h(priv, skb);
+ dev_kfree_skb(skb);
}
- ieee80211_rx_irqsafe(hw, skb);
skb = NULL;
rx_urb->urb.context = NULL;
rtl8xxxu_queue_rx_urb(priv, rx_urb);
@@ -5218,9 +7820,9 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
channel = hw->conf.chandef.chan->hw_value;
- rtl8723a_set_tx_power(priv, channel, ht40);
+ priv->fops->set_tx_power(priv, channel, ht40);
- rtl8723au_config_channel(hw);
+ priv->fops->config_channel(hw);
}
exit:
@@ -5284,11 +7886,56 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags, u64 multicast)
{
struct rtl8xxxu_priv *priv = hw->priv;
+ u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
__func__, changed_flags, *total_flags);
- *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC);
+ /*
+ * FIF_ALLMULTI ignored as all multicast frames are accepted (REG_MAR)
+ */
+
+ if (*total_flags & FIF_FCSFAIL)
+ rcr |= RCR_ACCEPT_CRC32;
+ else
+ rcr &= ~RCR_ACCEPT_CRC32;
+
+ /*
+ * FIF_PLCPFAIL not supported?
+ */
+
+ if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
+ rcr &= ~RCR_CHECK_BSSID_BEACON;
+ else
+ rcr |= RCR_CHECK_BSSID_BEACON;
+
+ if (*total_flags & FIF_CONTROL)
+ rcr |= RCR_ACCEPT_CTRL_FRAME;
+ else
+ rcr &= ~RCR_ACCEPT_CTRL_FRAME;
+
+ if (*total_flags & FIF_OTHER_BSS) {
+ rcr |= RCR_ACCEPT_AP;
+ rcr &= ~RCR_CHECK_BSSID_MATCH;
+ } else {
+ rcr &= ~RCR_ACCEPT_AP;
+ rcr |= RCR_CHECK_BSSID_MATCH;
+ }
+
+ if (*total_flags & FIF_PSPOLL)
+ rcr |= RCR_ACCEPT_PM;
+ else
+ rcr &= ~RCR_ACCEPT_PM;
+
+ /*
+ * FIF_PROBE_REQ ignored as probe requests always seem to be accepted
+ */
+
+ rtl8xxxu_write32(priv, REG_RCR, rcr);
+
+ *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
+ FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
+ FIF_PROBE_REQ);
}
static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
@@ -5375,13 +8022,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
static int
rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 ampdu_factor, ampdu_density;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
switch (action) {
case IEEE80211_AMPDU_TX_START:
@@ -5431,10 +8078,12 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
init_usb_anchor(&priv->tx_anchor);
init_usb_anchor(&priv->int_anchor);
- rtl8723a_enable_rf(priv);
- ret = rtl8xxxu_submit_int_urb(hw);
- if (ret)
- goto exit;
+ priv->fops->enable_rf(priv);
+ if (priv->usb_interrupts) {
+ ret = rtl8xxxu_submit_int_urb(hw);
+ if (ret)
+ goto exit;
+ }
for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
@@ -5473,12 +8122,9 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
}
exit:
/*
- * Disable all data frames
- */
- rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
- /*
- * Accept all mgmt frames
+ * Accept all data and mgmt frames
*/
+ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
@@ -5512,14 +8158,16 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
usb_kill_anchored_urbs(&priv->rx_anchor);
usb_kill_anchored_urbs(&priv->tx_anchor);
- usb_kill_anchored_urbs(&priv->int_anchor);
+ if (priv->usb_interrupts)
+ usb_kill_anchored_urbs(&priv->int_anchor);
- rtl8723a_disable_rf(priv);
+ priv->fops->disable_rf(priv);
/*
* Disable interrupts
*/
- rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ if (priv->usb_interrupts)
+ rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
@@ -5650,7 +8298,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
if (untested) {
- rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+ rtl8xxxu_debug |= RTL8XXXU_DEBUG_EFUSE;
dev_info(&udev->dev,
"This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n",
id->idVendor, id->idProduct);
@@ -5744,7 +8392,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
SET_IEEE80211_DEV(priv->hw, &interface->dev);
SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr);
- hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc);
+ hw->extra_tx_headroom = priv->fops->tx_desc_size;
ieee80211_hw_set(hw, SIGNAL_DBM);
/*
* The firmware handles rate control
@@ -5792,7 +8440,54 @@ static struct rtl8xxxu_fileops rtl8723au_fops = {
.parse_efuse = rtl8723au_parse_efuse,
.load_firmware = rtl8723au_load_firmware,
.power_on = rtl8723au_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
+ .config_channel = rtl8723au_config_channel,
+ .parse_rx_desc = rtl8723au_parse_rx_desc,
+ .enable_rf = rtl8723a_enable_rf,
+ .disable_rf = rtl8723a_disable_rf,
+ .set_tx_power = rtl8723a_set_tx_power,
+ .update_rate_mask = rtl8723au_update_rate_mask,
+ .report_connect = rtl8723au_report_connect,
+ .writeN_block_size = 1024,
+ .mbox_ext_reg = REG_HMBOX_EXT_0,
+ .mbox_ext_width = 2,
+ .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
+};
+
+static struct rtl8xxxu_fileops rtl8723bu_fops = {
+ .parse_efuse = rtl8723bu_parse_efuse,
+ .load_firmware = rtl8723bu_load_firmware,
+ .power_on = rtl8723bu_power_on,
+ .power_off = rtl8723bu_power_off,
+ .reset_8051 = rtl8723bu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
+ .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
+ .config_channel = rtl8723bu_config_channel,
+ .parse_rx_desc = rtl8723bu_parse_rx_desc,
+ .init_aggregation = rtl8723bu_init_aggregation,
+ .init_statistics = rtl8723bu_init_statistics,
+ .enable_rf = rtl8723b_enable_rf,
+ .disable_rf = rtl8723b_disable_rf,
+ .set_tx_power = rtl8723b_set_tx_power,
+ .update_rate_mask = rtl8723bu_update_rate_mask,
+ .report_connect = rtl8723bu_report_connect,
.writeN_block_size = 1024,
+ .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
+ .mbox_ext_width = 4,
+ .tx_desc_size = sizeof(struct rtl8723bu_tx_desc),
+ .has_s0s1 = 1,
+ .adda_1t_init = 0x01c00014,
+ .adda_1t_path_on = 0x01c00014,
+ .adda_2t_path_on_a = 0x01c00014,
+ .adda_2t_path_on_b = 0x01c00014,
};
#ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -5801,11 +8496,55 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = {
.parse_efuse = rtl8192cu_parse_efuse,
.load_firmware = rtl8192cu_load_firmware,
.power_on = rtl8192cu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
+ .config_channel = rtl8723au_config_channel,
+ .parse_rx_desc = rtl8723au_parse_rx_desc,
+ .enable_rf = rtl8723a_enable_rf,
+ .disable_rf = rtl8723a_disable_rf,
+ .set_tx_power = rtl8723a_set_tx_power,
+ .update_rate_mask = rtl8723au_update_rate_mask,
+ .report_connect = rtl8723au_report_connect,
.writeN_block_size = 128,
+ .mbox_ext_reg = REG_HMBOX_EXT_0,
+ .mbox_ext_width = 2,
+ .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
};
#endif
+static struct rtl8xxxu_fileops rtl8192eu_fops = {
+ .parse_efuse = rtl8192eu_parse_efuse,
+ .load_firmware = rtl8192eu_load_firmware,
+ .power_on = rtl8192eu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
+ .config_channel = rtl8723bu_config_channel,
+ .parse_rx_desc = rtl8723bu_parse_rx_desc,
+ .enable_rf = rtl8723b_enable_rf,
+ .disable_rf = rtl8723b_disable_rf,
+ .set_tx_power = rtl8723b_set_tx_power,
+ .update_rate_mask = rtl8723au_update_rate_mask,
+ .report_connect = rtl8723au_report_connect,
+ .writeN_block_size = 128,
+ .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
+ .mbox_ext_width = 4,
+ .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
+ .has_s0s1 = 1,
+ .adda_1t_init = 0x0fc01616,
+ .adda_1t_path_on = 0x0fc01616,
+ .adda_2t_path_on_a = 0x0fc01616,
+ .adda_2t_path_on_b = 0x0fc01616,
+};
+
static struct usb_device_id dev_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723au_fops},
@@ -5813,6 +8552,10 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8723au_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
@@ -5891,8 +8634,6 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index f2a1bac6c8ec..7b73654e1368 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -34,7 +34,7 @@
#define RTL8XXXU_MAX_REG_POLL 500
#define USB_INTR_CONTENT_LENGTH 56
-#define RTL8XXXU_OUT_ENDPOINTS 3
+#define RTL8XXXU_OUT_ENDPOINTS 4
#define REALTEK_USB_READ 0xc0
#define REALTEK_USB_WRITE 0x40
@@ -53,14 +53,24 @@
#define RTL8723A_CHANNEL_GROUPS 3
#define RTL8723A_MAX_RF_PATHS 2
+#define RTL8723B_CHANNEL_GROUPS 6
+#define RTL8723B_TX_COUNT 4
+#define RTL8723B_MAX_RF_PATHS 4
+#define RTL8XXXU_MAX_CHANNEL_GROUPS 6
#define RF6052_MAX_TX_PWR 0x3f
-#define EFUSE_MAP_LEN_8723A 256
-#define EFUSE_MAX_SECTION_8723A 32
+#define EFUSE_MAP_LEN 512
+#define EFUSE_MAX_SECTION_8723A 64
#define EFUSE_REAL_CONTENT_LEN_8723A 512
#define EFUSE_BT_MAP_LEN_8723A 1024
#define EFUSE_MAX_WORD_UNIT 4
+enum rtl8xxxu_rx_type {
+ RX_TYPE_DATA_PKT = 0,
+ RX_TYPE_C2H = 1,
+ RX_TYPE_ERROR = -1
+};
+
struct rtl8xxxu_rx_desc {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
@@ -197,7 +207,146 @@ struct rtl8xxxu_rx_desc {
#endif
};
-struct rtl8xxxu_tx_desc {
+struct rtl8723bu_rx_desc {
+#ifdef __LITTLE_ENDIAN
+ u32 pktlen:14;
+ u32 crc32:1;
+ u32 icverr:1;
+ u32 drvinfo_sz:4;
+ u32 security:3;
+ u32 qos:1;
+ u32 shift:2;
+ u32 phy_stats:1;
+ u32 swdec:1;
+ u32 ls:1;
+ u32 fs:1;
+ u32 eor:1;
+ u32 own:1;
+
+ u32 macid:7;
+ u32 dummy1_0:1;
+ u32 tid:4;
+ u32 dummy1_1:1;
+ u32 amsdu:1;
+ u32 rxid_match:1;
+ u32 paggr:1;
+ u32 a1fit:4; /* 16 */
+ u32 chkerr:1;
+ u32 ipver:1;
+ u32 tcpudp:1;
+ u32 chkvld:1;
+ u32 pam:1;
+ u32 pwr:1;
+ u32 more_data:1;
+ u32 more_frag:1;
+ u32 type:2;
+ u32 mc:1;
+ u32 bc:1;
+
+ u32 seq:12;
+ u32 frag:4;
+ u32 rx_is_qos:1; /* 16 */
+ u32 dummy2_0:1;
+ u32 wlanhd_iv_len:6;
+ u32 dummy2_1:4;
+ u32 rpt_sel:1;
+ u32 dummy2_2:3;
+
+ u32 rxmcs:7;
+ u32 dummy3_0:3;
+ u32 htc:1;
+ u32 eosp:1;
+ u32 bssidfit:2;
+ u32 dummy3_1:2;
+ u32 usb_agg_pktnum:8; /* 16 */
+ u32 dummy3_2:5;
+ u32 pattern_match:1;
+ u32 unicast_match:1;
+ u32 magic_match:1;
+
+ u32 splcp:1;
+ u32 ldcp:1;
+ u32 stbc:1;
+ u32 dummy4_0:1;
+ u32 bw:2;
+ u32 dummy4_1:26;
+#else
+ u32 own:1;
+ u32 eor:1;
+ u32 fs:1;
+ u32 ls:1;
+ u32 swdec:1;
+ u32 phy_stats:1;
+ u32 shift:2;
+ u32 qos:1;
+ u32 security:3;
+ u32 drvinfo_sz:4;
+ u32 icverr:1;
+ u32 crc32:1;
+ u32 pktlen:14;
+
+ u32 bc:1;
+ u32 mc:1;
+ u32 type:2;
+ u32 mf:1;
+ u32 md:1;
+ u32 pwr:1;
+ u32 pam:1;
+ u32 a2fit:4;
+ u32 a1fit:4;
+ u32 faggr:1;
+ u32 paggr:1;
+ u32 amsdu:1;
+ u32 hwrsvd:4;
+ u32 tid:4;
+ u32 macid:5;
+
+ u32 dummy2_2:3;
+ u32 rpt_sel:1;
+ u32 dummy2_1:4;
+ u32 wlanhd_iv_len:6;
+ u32 dummy2_0:1;
+ u32 rx_is_qos:1;
+ u32 frag:4; /* 16 */
+ u32 seq:12;
+
+ u32 magic_match:1;
+ u32 unicast_match:1;
+ u32 pattern_match:1;
+ u32 dummy3_2:5;
+ u32 usb_agg_pktnum:8;
+ u32 dummy3_1:2; /* 16 */
+ u32 bssidfit:2;
+ u32 eosp:1;
+ u32 htc:1;
+ u32 dummy3_0:3;
+ u32 rxmcs:7;
+
+ u32 dumm4_1:26;
+ u32 bw:2;
+ u32 dummy4_0:1;
+ u32 stbc:1;
+ u32 ldcp:1;
+ u32 splcp:1;
+#endif
+ __le32 tsfl;
+};
+
+struct rtl8723au_tx_desc {
+ __le16 pkt_size;
+ u8 pkt_offset;
+ u8 txdw0;
+ __le32 txdw1;
+ __le32 txdw2;
+ __le32 txdw3;
+ __le32 txdw4;
+ __le32 txdw5;
+ __le32 txdw6;
+ __le16 csum;
+ __le16 txdw7;
+};
+
+struct rtl8723bu_tx_desc {
__le16 pkt_size;
u8 pkt_offset;
u8 txdw0;
@@ -209,6 +358,8 @@ struct rtl8xxxu_tx_desc {
__le32 txdw6;
__le16 csum;
__le16 txdw7;
+ __le32 txdw8;
+ __le32 txdw9;
};
/* CCK Rates, TxHT = 0 */
@@ -256,15 +407,25 @@ struct rtl8xxxu_tx_desc {
#define TXDESC_OWN BIT(31)
#else
#define TXDESC_BROADMULTICAST BIT(0)
+#define TXDESC_HTC BIT(1)
#define TXDESC_LAST_SEGMENT BIT(2)
#define TXDESC_FIRST_SEGMENT BIT(3)
+#define TXDESC_LINIP BIT(4)
+#define TXDESC_NO_ACM BIT(5)
+#define TXDESC_GF BIT(6)
#define TXDESC_OWN BIT(7)
#endif
/* Word 1 */
+/*
+ * Bits 0-7 differ dependent on chip generation. For 8723au bits 5/6 are
+ * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
+ */
#define TXDESC_PKT_OFFSET_SZ 0
-#define TXDESC_AGG_ENABLE BIT(5)
-#define TXDESC_BK BIT(6)
+#define TXDESC_AGG_ENABLE_8723A BIT(5)
+#define TXDESC_AGG_BREAK_8723A BIT(6)
+#define TXDESC_MACID_SHIFT_8723B 0
+#define TXDESC_MACID_MASK_8723B 0x00f0
#define TXDESC_QUEUE_SHIFT 8
#define TXDESC_QUEUE_MASK 0x1f00
#define TXDESC_QUEUE_BK 0x2
@@ -276,6 +437,9 @@ struct rtl8xxxu_tx_desc {
#define TXDESC_QUEUE_MGNT 0x12
#define TXDESC_QUEUE_CMD 0x13
#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1)
+#define TXDESC_RDG_NAV_EXT_8723B BIT(13)
+#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14)
+#define TXDESC_PIFS_8723B BIT(15)
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
@@ -287,41 +451,72 @@ struct rtl8xxxu_tx_desc {
#define TXDESC_HWPC BIT(31)
/* Word 2 */
-#define TXDESC_ACK_REPORT BIT(19)
+#define TXDESC_PAID_SHIFT_8723B 0
+#define TXDESC_PAID_MASK_8723B 0x1ff
+#define TXDESC_CCA_RTS_SHIFT_8723B 10
+#define TXDESC_CCA_RTS_MASK_8723B 0xc00
+#define TXDESC_AGG_ENABLE_8723B BIT(12)
+#define TXDESC_RDG_ENABLE_8723B BIT(13)
+#define TXDESC_AGG_BREAK_8723B BIT(16)
+#define TXDESC_MORE_FRAG_8723B BIT(17)
+#define TXDESC_RAW_8723B BIT(18)
+#define TXDESC_ACK_REPORT_8723A BIT(19)
+#define TXDESC_SPE_RPT_8723B BIT(19)
#define TXDESC_AMPDU_DENSITY_SHIFT 20
+#define TXDESC_BT_INT_8723B BIT(23)
+#define TXDESC_GID_8723B BIT(24)
/* Word 3 */
-#define TXDESC_SEQ_SHIFT 16
-#define TXDESC_SEQ_MASK 0x0fff0000
+#define TXDESC_USE_DRIVER_RATE_8723B BIT(8)
+#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11)
+#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12)
+#define TXDESC_HW_RTS_ENABLE_8723B BIT(13)
+#define TXDESC_SEQ_SHIFT_8723A 16
+#define TXDESC_SEQ_MASK_8723A 0x0fff0000
/* Word 4 */
-#define TXDESC_QOS BIT(6)
-#define TXDESC_HW_SEQ_ENABLE BIT(7)
-#define TXDESC_USE_DRIVER_RATE BIT(8)
+#define TXDESC_RTS_RATE_SHIFT_8723A 0
+#define TXDESC_RTS_RATE_MASK_8723A 0x3f
+#define TXDESC_QOS_8723A BIT(6)
+#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7)
+#define TXDESC_USE_DRIVER_RATE_8723A BIT(8)
#define TXDESC_DISABLE_DATA_FB BIT(10)
-#define TXDESC_CTS_SELF_ENABLE BIT(11)
-#define TXDESC_RTS_CTS_ENABLE BIT(12)
-#define TXDESC_HW_RTS_ENABLE BIT(13)
+#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11)
+#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12)
+#define TXDESC_HW_RTS_ENABLE_8723A BIT(13)
#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
-#define TXDESC_SHORT_PREAMBLE BIT(24)
+#define TXDESC_SHORT_PREAMBLE_8723A BIT(24)
#define TXDESC_DATA_BW BIT(25)
#define TXDESC_RTS_DATA_BW BIT(27)
#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28)
#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29)
+#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8
+#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00
+#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18
+#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000
+#define TXDESC_RTS_RATE_SHIFT_8723B 24
+#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000
/* Word 5 */
-#define TXDESC_RTS_RATE_SHIFT 0
-#define TXDESC_RTS_RATE_MASK 0x3f
+#define TXDESC_SHORT_PREAMBLE_8723B BIT(4)
#define TXDESC_SHORT_GI BIT(6)
#define TXDESC_CCX_TAG BIT(7)
-#define TXDESC_RETRY_LIMIT_ENABLE BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT 18
-#define TXDESC_RETRY_LIMIT_MASK 0x00fc0000
+#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18
+#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000
/* Word 6 */
#define TXDESC_MAX_AGG_SHIFT 11
+/* Word 8 */
+#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15)
+
+/* Word 9 */
+#define TXDESC_SEQ_SHIFT_8723B 12
+#define TXDESC_SEQ_MASK_8723B 0x00fff000
+
struct phy_rx_agc_info {
#ifdef __LITTLE_ENDIAN
u8 gain:7, trsw:1;
@@ -500,6 +695,125 @@ struct rtl8192cu_efuse {
u8 customer_id;
};
+struct rtl8723bu_pwr_idx {
+#ifdef __LITTLE_ENDIAN
+ int ht20:4;
+ int ht40:4;
+ int ofdm:4;
+ int cck:4;
+#else
+ int cck:4;
+ int ofdm:4;
+ int ht40:4;
+ int ht20:4;
+#endif
+} __attribute__((packed));
+
+struct rtl8723bu_efuse_tx_power {
+ u8 cck_base[6];
+ u8 ht40_base[5];
+ struct rtl8723au_idx ht20_ofdm_1s_diff;
+ struct rtl8723bu_pwr_idx pwr_diff[3];
+ u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */
+};
+
+struct rtl8723bu_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+ struct rtl8723bu_efuse_tx_power tx_power_index_A; /* 0x10 */
+ struct rtl8723bu_efuse_tx_power tx_power_index_B; /* 0x3a */
+ struct rtl8723bu_efuse_tx_power tx_power_index_C; /* 0x64 */
+ struct rtl8723bu_efuse_tx_power tx_power_index_D; /* 0x8e */
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g; /* 0xbd */
+ u8 res2[3];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 res3[2];
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 res4[9];
+ u8 usb_optional_function;
+ u8 res5[0x1e];
+ u8 res6[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 vid; /* 0x100 */
+ u8 res7;
+ u8 pid;
+ u8 res8[4];
+ u8 mac_addr[ETH_ALEN]; /* 0x107 */
+ u8 res9[2];
+ u8 vendor_name[0x07];
+ u8 res10[2];
+ u8 device_name[0x14];
+ u8 res11[0xcf];
+ u8 package_type; /* 0x1fb */
+ u8 res12[0x4];
+};
+
+struct rtl8192eu_efuse_tx_power {
+ u8 cck_base[6];
+ u8 ht40_base[5];
+ struct rtl8723au_idx ht20_ofdm_1s_diff;
+ struct rtl8723au_idx ht40_ht20_2s_diff;
+ struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */
+ struct rtl8723au_idx ht40_ht20_3s_diff;
+ struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */
+ struct rtl8723au_idx ht40_ht20_4s_diff;
+ struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */
+};
+
+struct rtl8192eu_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+ struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */
+ struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x22 */
+ struct rtl8192eu_efuse_tx_power tx_power_index_C; /* 0x34 */
+ struct rtl8192eu_efuse_tx_power tx_power_index_D; /* 0x46 */
+ u8 res1[0x60];
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g; /* 0xbd */
+ u8 res2[1];
+ u8 lna_type_5g; /* 0xbf */
+ u8 res13[1];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 res3[3];
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 res4[6];
+ u8 vid; /* 0xd0 */
+ u8 res5[1];
+ u8 pid; /* 0xd2 */
+ u8 res6[1];
+ u8 usb_optional_function;
+ u8 res7[2];
+ u8 mac_addr[ETH_ALEN]; /* 0xd7 */
+ u8 res8[2];
+ u8 vendor_name[7];
+ u8 res9[2];
+ u8 device_name[0x0b]; /* 0xe8 */
+ u8 res10[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 res11[0x30];
+ u8 unknown[0x0d]; /* 0x130 */
+ u8 res12[0xc3];
+};
+
struct rtl8xxxu_reg8val {
u16 reg;
u8 val;
@@ -531,27 +845,110 @@ struct rtl8xxxu_rfregs {
#define H2C_MAX_MBOX 4
#define H2C_EXT BIT(7)
-#define H2C_SET_POWER_MODE 1
-#define H2C_JOIN_BSS_REPORT 2
#define H2C_JOIN_BSS_DISCONNECT 0
#define H2C_JOIN_BSS_CONNECT 1
-#define H2C_SET_RSSI 5
-#define H2C_SET_RATE_MASK (6 | H2C_EXT)
+
+/*
+ * H2C (firmware) commands differ between the older generation chips
+ * 8188[cr]u, 819[12]cu, and 8723au, and the more recent chips 8723bu,
+ * 8192[de]u, 8192eu, and 8812.
+ */
+enum h2c_cmd_8723a {
+ H2C_SET_POWER_MODE = 1,
+ H2C_JOIN_BSS_REPORT = 2,
+ H2C_SET_RSSI = 5,
+ H2C_SET_RATE_MASK = (6 | H2C_EXT),
+};
+
+enum h2c_cmd_8723b {
+ /*
+ * Common Class: 000
+ */
+ H2C_8723B_RSVD_PAGE = 0x00,
+ H2C_8723B_MEDIA_STATUS_RPT = 0x01,
+ H2C_8723B_SCAN_ENABLE = 0x02,
+ H2C_8723B_KEEP_ALIVE = 0x03,
+ H2C_8723B_DISCON_DECISION = 0x04,
+ H2C_8723B_PSD_OFFLOAD = 0x05,
+ H2C_8723B_AP_OFFLOAD = 0x08,
+ H2C_8723B_BCN_RSVDPAGE = 0x09,
+ H2C_8723B_PROBERSP_RSVDPAGE = 0x0A,
+ H2C_8723B_FCS_RSVDPAGE = 0x10,
+ H2C_8723B_FCS_INFO = 0x11,
+ H2C_8723B_AP_WOW_GPIO_CTRL = 0x13,
+
+ /*
+ * PoweSave Class: 001
+ */
+ H2C_8723B_SET_PWR_MODE = 0x20,
+ H2C_8723B_PS_TUNING_PARA = 0x21,
+ H2C_8723B_PS_TUNING_PARA2 = 0x22,
+ H2C_8723B_P2P_LPS_PARAM = 0x23,
+ H2C_8723B_P2P_PS_OFFLOAD = 0x24,
+ H2C_8723B_PS_SCAN_ENABLE = 0x25,
+ H2C_8723B_SAP_PS_ = 0x26,
+ H2C_8723B_INACTIVE_PS_ = 0x27,
+ H2C_8723B_FWLPS_IN_IPS_ = 0x28,
+
+ /*
+ * Dynamic Mechanism Class: 010
+ */
+ H2C_8723B_MACID_CFG_RAID = 0x40,
+ H2C_8723B_TXBF = 0x41,
+ H2C_8723B_RSSI_SETTING = 0x42,
+ H2C_8723B_AP_REQ_TXRPT = 0x43,
+ H2C_8723B_INIT_RATE_COLLECT = 0x44,
+
+ /*
+ * BT Class: 011
+ */
+ H2C_8723B_B_TYPE_TDMA = 0x60,
+ H2C_8723B_BT_INFO = 0x61,
+ H2C_8723B_FORCE_BT_TXPWR = 0x62,
+ H2C_8723B_BT_IGNORE_WLANACT = 0x63,
+ H2C_8723B_DAC_SWING_VALUE = 0x64,
+ H2C_8723B_ANT_SEL_RSV = 0x65,
+ H2C_8723B_WL_OPMODE = 0x66,
+ H2C_8723B_BT_MP_OPER = 0x67,
+ H2C_8723B_BT_CONTROL = 0x68,
+ H2C_8723B_BT_WIFI_CTRL = 0x69,
+ H2C_8723B_BT_FW_PATCH = 0x6a,
+ H2C_8723B_BT_WLAN_CALIBRATION = 0x6d,
+ H2C_8723B_BT_GRANT = 0x6e,
+
+ /*
+ * WOWLAN Class: 100
+ */
+ H2C_8723B_WOWLAN = 0x80,
+ H2C_8723B_REMOTE_WAKE_CTRL = 0x81,
+ H2C_8723B_AOAC_GLOBAL_INFO = 0x82,
+ H2C_8723B_AOAC_RSVD_PAGE = 0x83,
+ H2C_8723B_AOAC_RSVD_PAGE2 = 0x84,
+ H2C_8723B_D0_SCAN_OFFLOAD_CTRL = 0x85,
+ H2C_8723B_D0_SCAN_OFFLOAD_INFO = 0x86,
+ H2C_8723B_CHNL_SWITCH_OFFLOAD = 0x87,
+
+ H2C_8723B_RESET_TSF = 0xC0,
+};
+
struct h2c_cmd {
union {
struct {
u8 cmd;
- u8 data[5];
+ u8 data[7];
} __packed cmd;
struct {
__le32 data;
__le16 ext;
} __packed raw;
struct {
+ __le32 data;
+ __le32 ext;
+ } __packed raw_wide;
+ struct {
u8 cmd;
u8 data;
- u8 pad[4];
} __packed joinbss;
struct {
u8 cmd;
@@ -559,6 +956,182 @@ struct h2c_cmd {
u8 arg;
__le16 mask_lo;
} __packed ramask;
+ struct {
+ u8 cmd;
+ u8 parm;
+ u8 macid;
+ u8 macid_end;
+ } __packed media_status_rpt;
+ struct {
+ u8 cmd;
+ u8 macid;
+ /*
+ * [0:4] - RAID
+ * [7] - SGI
+ */
+ u8 data1;
+ /*
+ * [0:1] - Bandwidth
+ * [3] - No Update
+ * [4:5] - VHT enable
+ * [6] - DISPT
+ * [7] - DISRA
+ */
+ u8 data2;
+ u8 ramask0;
+ u8 ramask1;
+ u8 ramask2;
+ u8 ramask3;
+ } __packed b_macid_cfg;
+ struct {
+ u8 cmd;
+ u8 data1;
+ u8 data2;
+ u8 data3;
+ u8 data4;
+ u8 data5;
+ } __packed b_type_dma;
+ struct {
+ u8 cmd;
+ u8 data;
+ } __packed bt_info;
+ struct {
+ u8 cmd;
+ u8 operreq;
+ u8 opcode;
+ u8 data;
+ u8 addr;
+ } __packed bt_mp_oper;
+ struct {
+ u8 cmd;
+ u8 data;
+ } __packed bt_wlan_calibration;
+ struct {
+ u8 cmd;
+ u8 data;
+ } __packed ignore_wlan;
+ struct {
+ u8 cmd;
+ u8 ant_inverse;
+ u8 int_switch_type;
+ } __packed ant_sel_rsv;
+ struct {
+ u8 cmd;
+ u8 data;
+ } __packed bt_grant;
+ };
+};
+
+enum c2h_evt_8723b {
+ C2H_8723B_DEBUG = 0,
+ C2H_8723B_TSF = 1,
+ C2H_8723B_AP_RPT_RSP = 2,
+ C2H_8723B_CCX_TX_RPT = 3,
+ C2H_8723B_BT_RSSI = 4,
+ C2H_8723B_BT_OP_MODE = 5,
+ C2H_8723B_EXT_RA_RPT = 6,
+ C2H_8723B_BT_INFO = 9,
+ C2H_8723B_HW_INFO_EXCH = 0x0a,
+ C2H_8723B_BT_MP_INFO = 0x0b,
+ C2H_8723B_RA_REPORT = 0x0c,
+ C2H_8723B_FW_DEBUG = 0xff,
+};
+
+enum bt_info_src_8723b {
+ BT_INFO_SRC_8723B_WIFI_FW = 0x0,
+ BT_INFO_SRC_8723B_BT_RSP = 0x1,
+ BT_INFO_SRC_8723B_BT_ACTIVE_SEND = 0x2,
+};
+
+enum bt_mp_oper_opcode_8723b {
+ BT_MP_OP_GET_BT_VERSION = 0x00,
+ BT_MP_OP_RESET = 0x01,
+ BT_MP_OP_TEST_CTRL = 0x02,
+ BT_MP_OP_SET_BT_MODE = 0x03,
+ BT_MP_OP_SET_CHNL_TX_GAIN = 0x04,
+ BT_MP_OP_SET_PKT_TYPE_LEN = 0x05,
+ BT_MP_OP_SET_PKT_CNT_L_PL_TYPE = 0x06,
+ BT_MP_OP_SET_PKT_CNT_H_PKT_INTV = 0x07,
+ BT_MP_OP_SET_PKT_HEADER = 0x08,
+ BT_MP_OP_SET_WHITENCOEFF = 0x09,
+ BT_MP_OP_SET_BD_ADDR_L = 0x0a,
+ BT_MP_OP_SET_BD_ADDR_H = 0x0b,
+ BT_MP_OP_WRITE_REG_ADDR = 0x0c,
+ BT_MP_OP_WRITE_REG_VALUE = 0x0d,
+ BT_MP_OP_GET_BT_STATUS = 0x0e,
+ BT_MP_OP_GET_BD_ADDR_L = 0x0f,
+ BT_MP_OP_GET_BD_ADDR_H = 0x10,
+ BT_MP_OP_READ_REG = 0x11,
+ BT_MP_OP_SET_TARGET_BD_ADDR_L = 0x12,
+ BT_MP_OP_SET_TARGET_BD_ADDR_H = 0x13,
+ BT_MP_OP_SET_TX_POWER_CALIBRATION = 0x14,
+ BT_MP_OP_GET_RX_PKT_CNT_L = 0x15,
+ BT_MP_OP_GET_RX_PKT_CNT_H = 0x16,
+ BT_MP_OP_GET_RX_ERROR_BITS_L = 0x17,
+ BT_MP_OP_GET_RX_ERROR_BITS_H = 0x18,
+ BT_MP_OP_GET_RSSI = 0x19,
+ BT_MP_OP_GET_CFO_HDR_QUALITY_L = 0x1a,
+ BT_MP_OP_GET_CFO_HDR_QUALITY_H = 0x1b,
+ BT_MP_OP_GET_TARGET_BD_ADDR_L = 0x1c,
+ BT_MP_OP_GET_TARGET_BD_ADDR_H = 0x1d,
+ BT_MP_OP_GET_AFH_MAP_L = 0x1e,
+ BT_MP_OP_GET_AFH_MAP_M = 0x1f,
+ BT_MP_OP_GET_AFH_MAP_H = 0x20,
+ BT_MP_OP_GET_AFH_STATUS = 0x21,
+ BT_MP_OP_SET_TRACKING_INTERVAL = 0x22,
+ BT_MP_OP_SET_THERMAL_METER = 0x23,
+ BT_MP_OP_ENABLE_CFO_TRACKING = 0x24,
+};
+
+struct rtl8723bu_c2h {
+ u8 id;
+ u8 seq;
+ union {
+ struct {
+ u8 payload[0];
+ } __packed raw;
+ struct {
+ u8 ext_id;
+ u8 status:4;
+ u8 retlen:4;
+ u8 opcode_ver:4;
+ u8 req_num:4;
+ u8 payload[2];
+ } __packed bt_mp_info;
+ struct {
+ u8 response_source:4;
+ u8 dummy0_0:4;
+
+ u8 bt_info;
+
+ u8 retry_count:4;
+ u8 dummy2_0:1;
+ u8 bt_page:1;
+ u8 tx_rx_mask:1;
+ u8 dummy2_2:1;
+
+ u8 rssi;
+
+ u8 basic_rate:1;
+ u8 bt_has_reset:1;
+ u8 dummy4_1:1;;
+ u8 ignore_wlan:1;
+ u8 auto_report:1;
+ u8 dummy4_2:3;
+
+ u8 a4;
+ u8 a5;
+ } __packed bt_info;
+ struct {
+ u8 rate:7;
+ u8 dummy0_0:1;
+ u8 macid;
+ u8 ldpc:1;
+ u8 txbf:1;
+ u8 noisy_state:1;
+ u8 dummy2_0:5;
+ u8 dummy3_0;
+ } __packed ra_report;
};
};
@@ -582,40 +1155,51 @@ struct rtl8xxxu_priv {
u8 mac_addr[ETH_ALEN];
char chip_name[8];
- u8 cck_tx_power_index_A[3]; /* 0x10 */
- u8 cck_tx_power_index_B[3];
- u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */
- u8 ht40_1s_tx_power_index_B[3];
+ char chip_vendor[8];
+ u8 cck_tx_power_index_A[RTL8XXXU_MAX_CHANNEL_GROUPS];
+ u8 cck_tx_power_index_B[RTL8XXXU_MAX_CHANNEL_GROUPS];
+ u8 ht40_1s_tx_power_index_A[RTL8XXXU_MAX_CHANNEL_GROUPS];
+ u8 ht40_1s_tx_power_index_B[RTL8XXXU_MAX_CHANNEL_GROUPS];
/*
* The following entries are half-bytes split as:
* bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
*/
- struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
- struct rtl8723au_idx ht20_tx_power_index_diff[3];
- struct rtl8723au_idx ofdm_tx_power_index_diff[3];
- struct rtl8723au_idx ht40_max_power_offset[3];
- struct rtl8723au_idx ht20_max_power_offset[3];
+ struct rtl8723au_idx ht40_2s_tx_power_index_diff[
+ RTL8723A_CHANNEL_GROUPS];
+ struct rtl8723au_idx ht20_tx_power_index_diff[RTL8723A_CHANNEL_GROUPS];
+ struct rtl8723au_idx ofdm_tx_power_index_diff[RTL8723A_CHANNEL_GROUPS];
+ struct rtl8723au_idx ht40_max_power_offset[RTL8723A_CHANNEL_GROUPS];
+ struct rtl8723au_idx ht20_max_power_offset[RTL8723A_CHANNEL_GROUPS];
+ /*
+ * Newer generation chips only keep power diffs per TX count,
+ * not per channel group.
+ */
+ struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT];
+ struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT];
+ struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT];
u32 chip_cut:4;
u32 rom_rev:4;
+ u32 is_multi_func:1;
u32 has_wifi:1;
u32 has_bluetooth:1;
u32 enable_bluetooth:1;
u32 has_gps:1;
u32 hi_pa:1;
u32 vendor_umc:1;
+ u32 vendor_smic:1;
u32 has_polarity_ctrl:1;
u32 has_eeprom:1;
u32 boot_eeprom:1;
+ u32 usb_interrupts:1;
u32 ep_tx_high_queue:1;
u32 ep_tx_normal_queue:1;
u32 ep_tx_low_queue:1;
- u32 path_a_hi_power:1;
- u32 path_a_rf_paths:4;
+ u32 has_xtalk:1;
+ u8 xtalk;
unsigned int pipe_interrupt;
unsigned int pipe_in;
unsigned int pipe_out[TXDESC_QUEUE_MAX];
u8 out_ep[RTL8XXXU_OUT_ENDPOINTS];
- u8 path_a_ig_value;
u8 ep_tx_count;
u8 rf_paths;
u8 rx_paths;
@@ -642,9 +1226,11 @@ struct rtl8xxxu_priv {
u8 val8;
} usb_buf;
union {
- u8 raw[EFUSE_MAP_LEN_8723A];
+ u8 raw[EFUSE_MAP_LEN];
struct rtl8723au_efuse efuse8723;
+ struct rtl8723bu_efuse efuse8723bu;
struct rtl8192cu_efuse efuse8192;
+ struct rtl8192eu_efuse efuse8192eu;
} efuse_wifi;
u32 adda_backup[RTL8XXXU_ADDA_REGS];
u32 mac_backup[RTL8XXXU_MAC_REGS];
@@ -652,7 +1238,6 @@ struct rtl8xxxu_priv {
u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
u32 rtlchip;
u8 pi_enabled:1;
- u8 iqk_initialized:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
};
@@ -672,5 +1257,31 @@ struct rtl8xxxu_fileops {
int (*parse_efuse) (struct rtl8xxxu_priv *priv);
int (*load_firmware) (struct rtl8xxxu_priv *priv);
int (*power_on) (struct rtl8xxxu_priv *priv);
+ void (*power_off) (struct rtl8xxxu_priv *priv);
+ void (*reset_8051) (struct rtl8xxxu_priv *priv);
+ int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
+ void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
+ void (*config_channel) (struct ieee80211_hw *hw);
+ int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status);
+ void (*init_aggregation) (struct rtl8xxxu_priv *priv);
+ void (*init_statistics) (struct rtl8xxxu_priv *priv);
+ void (*enable_rf) (struct rtl8xxxu_priv *priv);
+ void (*disable_rf) (struct rtl8xxxu_priv *priv);
+ void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
+ bool ht40);
+ void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi);
+ void (*report_connect) (struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect);
int writeN_block_size;
+ u16 mbox_ext_reg;
+ char mbox_ext_width;
+ char tx_desc_size;
+ char has_s0s1;
+ u32 adda_1t_init;
+ u32 adda_1t_path_on;
+ u32 adda_2t_path_on_a;
+ u32 adda_2t_path_on_b;
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 23208f79b97c..e545e849f5a3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -45,6 +45,7 @@
#define APS_FSMCO_ENABLE_POWERDOWN BIT(4)
#define APS_FSMCO_MAC_ENABLE BIT(8)
#define APS_FSMCO_MAC_OFF BIT(9)
+#define APS_FSMCO_SW_LPS BIT(10)
#define APS_FSMCO_HW_SUSPEND BIT(11)
#define APS_FSMCO_PCIE BIT(12)
#define APS_FSMCO_HW_POWERDOWN BIT(15)
@@ -69,8 +70,11 @@
#define REG_EE_VPD 0x000c
#define REG_AFE_MISC 0x0010
+#define AFE_MISC_WL_XTAL_CTRL BIT(6)
+
#define REG_SPS0_CTRL 0x0011
#define REG_SPS_OCP_CFG 0x0018
+#define REG_8192E_LDOV12_CTRL 0x0014
#define REG_RSV_CTRL 0x001c
#define REG_RF_CTRL 0x001f
@@ -131,6 +135,8 @@
#define EFUSE_ACCESS_DISABLE 0x00 /* RTL8723 only */
#define REG_PWR_DATA 0x0038
+#define PWR_DATA_EEPRPAD_RFE_CTRL_EN BIT(11)
+
#define REG_CAL_TIMER 0x003c
#define REG_ACLK_MON 0x003e
#define REG_GPIO_MUXCFG 0x0040
@@ -138,7 +144,10 @@
#define REG_MAC_PINMUX_CFG 0x0043
#define REG_GPIO_PIN_CTRL 0x0044
#define REG_GPIO_INTM 0x0048
+#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9)
+
#define REG_LEDCFG0 0x004c
+#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
#define REG_LEDCFG2 0x004e
#define LEDCFG2_DPDT_SELECT BIT(7)
@@ -152,6 +161,12 @@
#define REG_GPIO_PIN_CTRL_2 0x0060
/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
#define REG_GPIO_IO_SEL_2 0x0062
+#define GPIO_IO_SEL_2_GPIO09_INPUT BIT(1)
+#define GPIO_IO_SEL_2_GPIO09_IRQ BIT(9)
+
+/* RTL8723B */
+#define REG_PAD_CTRL1 0x0064
+#define PAD_CTRL1_SW_DPDT_SEL_DATA BIT(0)
/* RTL8723 only WIFI/BT/GPS Multi-Function control source. */
#define REG_MULTI_FUNC_CTRL 0x0068
@@ -177,6 +192,8 @@
control */
#define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */
+#define REG_LDO_SW_CTRL 0x007c /* 8192eu */
+
#define REG_MCU_FW_DL 0x0080
#define MCU_FW_DL_ENABLE BIT(0)
#define MCU_FW_DL_READY BIT(1)
@@ -192,6 +209,12 @@
#define REG_HMBOX_EXT_1 0x008a
#define REG_HMBOX_EXT_2 0x008c
#define REG_HMBOX_EXT_3 0x008e
+/* Interrupt registers for 8192e/8723bu/8812 */
+#define REG_HIMR0 0x00b0
+#define REG_HISR0 0x00b4
+#define REG_HIMR1 0x00b8
+#define REG_HISR1 0x00bc
+
/* Host suspend counter on FPGA platform */
#define REG_HOST_SUSP_CNT 0x00bc
/* Efuse access protection for RTL8723 */
@@ -213,6 +236,7 @@
#define SYS_CFG_PCIRSTB BIT(4)
#define SYS_CFG_V15_VLD BIT(5)
#define SYS_CFG_TRP_B15V_EN BIT(7)
+#define SYS_CFG_SW_OFFLOAD_EN BIT(7) /* For chips with IOL support */
#define SYS_CFG_SIC_IDLE BIT(8)
#define SYS_CFG_BD_MAC2 BIT(9)
#define SYS_CFG_BD_MAC1 BIT(10)
@@ -220,9 +244,14 @@
#define SYS_CFG_CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15))
#define SYS_CFG_BT_FUNC BIT(16)
#define SYS_CFG_VENDOR_ID BIT(19)
+#define SYS_CFG_VENDOR_EXT_MASK (BIT(18) | BIT(19))
+#define SYS_CFG_VENDOR_ID_TSMC 0
+#define SYS_CFG_VENDOR_ID_SMIC BIT(18)
+#define SYS_CFG_VENDOR_ID_UMC BIT(19)
#define SYS_CFG_PAD_HWPD_IDN BIT(22)
#define SYS_CFG_TRP_VAUX_EN BIT(23)
#define SYS_CFG_TRP_BT_EN BIT(24)
+#define SYS_CFG_SPS_LDO_SEL BIT(24) /* 8192eu */
#define SYS_CFG_BD_PKG_SEL BIT(25)
#define SYS_CFG_BD_HCI_SEL BIT(26)
#define SYS_CFG_TYPE_ID BIT(27)
@@ -255,6 +284,8 @@
#define GPIO_USB_SUSEN BIT(23)
#define GPIO_RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+#define REG_SYS_CFG2 0x00fc /* 8192eu */
+
/* 0x0100 ~ 0x01FF MACTOP General Configuration */
#define REG_CR 0x0100
#define CR_HCI_TXDMA_ENABLE BIT(0)
@@ -287,6 +318,7 @@
#define PBP_PAGE_SIZE_1024 0x4
#define REG_TRXDMA_CTRL 0x010c
+#define TRXDMA_CTRL_RXDMA_AGG_EN BIT(2)
#define TRXDMA_CTRL_VOQ_SHIFT 4
#define TRXDMA_CTRL_VIQ_SHIFT 6
#define TRXDMA_CTRL_BEQ_SHIFT 8
@@ -321,6 +353,8 @@
#define REG_MBIST_DONE 0x0178
#define REG_MBIST_FAIL 0x017c
#define REG_C2HEVT_MSG_NORMAL 0x01a0
+/* 8192EU/8723BU/8812 */
+#define REG_C2HEVT_CMD_ID_8723B 0x01ae
#define REG_C2HEVT_CLEAR 0x01af
#define REG_C2HEVT_MSG_TEST 0x01b8
#define REG_MCUTST_1 0x01c0
@@ -340,6 +374,11 @@
#define REG_BB_ACCEESS_CTRL 0x01e8
#define REG_BB_ACCESS_DATA 0x01ec
+#define REG_HMBOX_EXT0_8723B 0x01f0
+#define REG_HMBOX_EXT1_8723B 0x01f4
+#define REG_HMBOX_EXT2_8723B 0x01f8
+#define REG_HMBOX_EXT3_8723B 0x01fc
+
/* 0x0200 ~ 0x027F TXDMA Configuration */
#define REG_RQPN 0x0200
#define RQPN_HI_PQ_SHIFT 0
@@ -350,14 +389,29 @@
#define REG_FIFOPAGE 0x0204
#define REG_TDECTRL 0x0208
#define REG_TXDMA_OFFSET_CHK 0x020c
+#define TXDMA_OFFSET_DROP_DATA_EN BIT(9)
#define REG_TXDMA_STATUS 0x0210
#define REG_RQPN_NPQ 0x0214
+#define RQPN_NPQ_SHIFT 0
+#define RQPN_EPQ_SHIFT 16
+
+#define REG_AUTO_LLT 0x0224
+#define AUTO_LLT_INIT_LLT BIT(16)
+
+#define REG_DWBCN1_CTRL_8723B 0x0228
/* 0x0280 ~ 0x02FF RXDMA Configuration */
#define REG_RXDMA_AGG_PG_TH 0x0280
+#define RXDMA_USB_AGG_ENABLE BIT(31)
#define REG_RXPKT_NUM 0x0284
+#define RXPKT_NUM_RXDMA_IDLE BIT(17)
+#define RXPKT_NUM_RW_RELEASE_EN BIT(18)
#define REG_RXDMA_STATUS 0x0288
+/* Presumably only found on newer chips such as 8723bu */
+#define REG_RX_DMA_CTRL_8723B 0x0286
+#define REG_RXDMA_PRO_8723B 0x0290
+
#define REG_RF_BB_CMD_ADDR 0x02c0
#define REG_RF_BB_CMD_DATA 0x02c4
@@ -429,20 +483,26 @@
#define REG_ARFR1 0x0448
#define REG_ARFR2 0x044c
#define REG_ARFR3 0x0450
+#define REG_AMPDU_MAX_TIME_8723B 0x0456
#define REG_AGGLEN_LMT 0x0458
#define REG_AMPDU_MIN_SPACE 0x045c
#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045d
#define REG_FAST_EDCA_CTRL 0x0460
#define REG_RD_RESP_PKT_TH 0x0463
#define REG_INIRTS_RATE_SEL 0x0480
+/* 8723bu */
+#define REG_DATA_SUBCHANNEL 0x0483
+/* 8723au */
#define REG_INIDATA_RATE_SEL 0x0484
#define REG_POWER_STATUS 0x04a4
#define REG_POWER_STAGE1 0x04b4
#define REG_POWER_STAGE2 0x04b8
+#define REG_AMPDU_BURST_MODE_8723B 0x04bc
#define REG_PKT_VO_VI_LIFE_TIME 0x04c0
#define REG_PKT_BE_BK_LIFE_TIME 0x04c2
#define REG_STBC_SETTING 0x04c4
+#define REG_HT_SINGLE_AMPDU_8723B 0x04c7
#define REG_PROT_MODE_CTRL 0x04c8
#define REG_MAX_AGGR_NUM 0x04ca
#define REG_RTS_MAX_AGGR_NUM 0x04cb
@@ -453,6 +513,10 @@
#define REG_NEED_CPU_HANDLE 0x04e0
#define REG_PKT_LOSE_RPT 0x04e1
#define REG_PTCL_ERR_STATUS 0x04e2
+#define REG_TX_REPORT_CTRL 0x04ec
+#define TX_REPORT_CTRL_TIMER_ENABLE BIT(1)
+
+#define REG_TX_REPORT_TIME 0x04f0
#define REG_DUMMY 0x04fc
/* 0x0500 ~ 0x05FF EDCA Configuration */
@@ -505,6 +569,7 @@
#define BEACON_DMA_ATIME_INT_TIME 2
#define REG_ATIMWND 0x055a
+#define REG_USTIME_TSF_8723B 0x055c
#define REG_BCN_MAX_ERR 0x055d
#define REG_RXTSF_OFFSET_CCK 0x055e
#define REG_RXTSF_OFFSET_OFDM 0x055f
@@ -559,13 +624,25 @@
(Rx beacon, probe rsp) */
#define RCR_ACCEPT_CRC32 BIT(8) /* Accept CRC32 error packet */
#define RCR_ACCEPT_ICV BIT(9) /* Accept ICV error packet */
-#define RCR_ACCEPT_DATA_FRAME BIT(11)
-#define RCR_ACCEPT_CTRL_FRAME BIT(12)
-#define RCR_ACCEPT_MGMT_FRAME BIT(13)
+#define RCR_ACCEPT_DATA_FRAME BIT(11) /* Accept all data pkt or use
+ REG_RXFLTMAP2 */
+#define RCR_ACCEPT_CTRL_FRAME BIT(12) /* Accept all control pkt or use
+ REG_RXFLTMAP1 */
+#define RCR_ACCEPT_MGMT_FRAME BIT(13) /* Accept all mgmt pkt or use
+ REG_RXFLTMAP0 */
#define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
+#define RCR_UC_DATA_PKT_INT_ENABLE BIT(16) /* Enable unicast data packet
+ interrupt */
+#define RCR_BM_DATA_PKT_INT_ENABLE BIT(17) /* Enable broadcast data packet
+ interrupt */
+#define RCR_TIM_PARSER_ENABLE BIT(18) /* Enable RX beacon TIM parser*/
#define RCR_MFBEN BIT(22)
-#define RCR_LSIGEN BIT(23)
+#define RCR_LSIG_ENABLE BIT(23) /* Enable LSIG TXOP Protection
+ function. Search KEYCAM for
+ each rx packet to check if
+ LSIGEN bit is set. */
#define RCR_MULTI_BSSID_ENABLE BIT(24) /* Enable Multiple BssId */
+#define RCR_FORCE_ACK BIT(26)
#define RCR_ACCEPT_BA_SSN BIT(27) /* Accept BA SSN */
#define RCR_APPEND_PHYSTAT BIT(28)
#define RCR_APPEND_ICV BIT(29)
@@ -605,6 +682,10 @@
#define REG_FWDLY 0x0661
#define REG_RXERR_RPT 0x0664
#define REG_WMAC_TRXPTCL_CTL 0x0668
+#define WMAC_TRXPTCL_CTL_BW_MASK (BIT(7) | BIT(8))
+#define WMAC_TRXPTCL_CTL_BW_20 0
+#define WMAC_TRXPTCL_CTL_BW_40 BIT(7)
+#define WMAC_TRXPTCL_CTL_BW_80 BIT(8)
/* Security */
#define REG_CAM_CMD 0x0670
@@ -632,18 +713,40 @@
#define REG_LPNAV_CTRL 0x0694
#define REG_WKFMCAM_CMD 0x0698
#define REG_WKFMCAM_RWD 0x069c
-#define REG_RXFLTMAP0 0x06a0
-#define REG_RXFLTMAP1 0x06a2
-#define REG_RXFLTMAP2 0x06a4
+
+/*
+ * RX Filters: each bit corresponds to the numerical value of the subtype.
+ * If it is set the subtype frame type is passed. The filter is only used when
+ * the RCR_ACCEPT_DATA_FRAME, RCR_ACCEPT_CTRL_FRAME, RCR_ACCEPT_MGMT_FRAME bit
+ * in the RCR are low.
+ *
+ * Example: Beacon subtype is binary 1000 which is decimal 8 so we have to set
+ * bit 8 (0x100) in REG_RXFLTMAP0 to enable reception.
+ */
+#define REG_RXFLTMAP0 0x06a0 /* Management frames */
+#define REG_RXFLTMAP1 0x06a2 /* Control frames */
+#define REG_RXFLTMAP2 0x06a4 /* Data frames */
+
#define REG_BCN_PSR_RPT 0x06a8
#define REG_CALB32K_CTRL 0x06ac
#define REG_PKT_MON_CTRL 0x06b4
-#define REG_BT_COEX_TABLE 0x06c0
+#define REG_BT_COEX_TABLE1 0x06c0
+#define REG_BT_COEX_TABLE2 0x06c4
+#define REG_BT_COEX_TABLE3 0x06c8
+#define REG_BT_COEX_TABLE4 0x06cc
#define REG_WMAC_RESP_TXINFO 0x06d8
#define REG_MACID1 0x0700
#define REG_BSSID1 0x0708
+/*
+ * This seems to be 8723bu specific
+ */
+#define REG_BT_CONTROL_8723BU 0x0764
+#define BT_CONTROL_BT_GRANT BIT(12)
+
+#define REG_WLAN_ACT_CONTROL_8723B 0x076e
+
#define REG_FPGA0_RF_MODE 0x0800
#define FPGA_RF_MODE BIT(0)
#define FPGA_RF_MODE_JAPAN BIT(1)
@@ -734,6 +837,11 @@
#define REG_FPGA0_ANALOG3 0x0888
#define REG_FPGA0_ANALOG4 0x088c
+#define REG_NHM_TH9_TH10_8723B 0x0890
+#define REG_NHM_TIMER_8723B 0x0894
+#define REG_NHM_TH3_TO_TH0_8723B 0x0898
+#define REG_NHM_TH7_TO_TH4_8723B 0x089c
+
#define REG_FPGA0_XA_LSSI_READBACK 0x08a0 /* Tranceiver LSSI Readback */
#define REG_FPGA0_XB_LSSI_READBACK 0x08a4
#define REG_HSPI_XA_READBACK 0x08b8 /* Transceiver A HSPI read */
@@ -742,6 +850,11 @@
#define REG_FPGA1_RF_MODE 0x0900
#define REG_FPGA1_TX_INFO 0x090c
+#define REG_DPDT_CTRL 0x092c /* 8723BU */
+#define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */
+#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */
+#define REG_RFE_BUFFER 0x0944 /* 8723BU */
+#define REG_S0S1_PATH_SWITCH 0x0948 /* 8723BU */
#define REG_CCK0_SYSTEM 0x0a00
#define CCK0_SIDEBAND BIT(4)
@@ -765,11 +878,16 @@
#define REG_OFDM0_TR_MUX_PAR 0x0c08
+#define REG_OFDM0_FA_RSTC 0x0c0c
+
#define REG_OFDM0_XA_RX_IQ_IMBALANCE 0x0c14
#define REG_OFDM0_XB_RX_IQ_IMBALANCE 0x0c1c
#define REG_OFDM0_ENERGY_CCA_THRES 0x0c4c
+#define REG_OFDM0_RX_D_SYNC_PATH 0x0c40
+#define OFDM0_SYNC_PATH_NOTCH_FILTER BIT(1)
+
#define REG_OFDM0_XA_AGC_CORE1 0x0c50
#define REG_OFDM0_XA_AGC_CORE2 0x0c54
#define REG_OFDM0_XB_AGC_CORE1 0x0c58
@@ -794,6 +912,9 @@
#define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0
+/* 8723bu */
+#define REG_OFDM0_TX_PSDO_NOISE_WEIGHT 0x0ce4
+
#define REG_OFDM1_LSTF 0x0d00
#define OFDM_LSTF_PRIME_CH_LOW BIT(10)
#define OFDM_LSTF_PRIME_CH_HIGH BIT(11)
@@ -952,6 +1073,10 @@
#define RF6052_REG_MODE_AG 0x18 /* RF channel and BW switch */
#define MODE_AG_CHANNEL_MASK 0x3ff
#define MODE_AG_CHANNEL_20MHZ BIT(10)
+#define MODE_AG_BW_MASK (BIT(10) | BIT(11))
+#define MODE_AG_BW_20MHZ_8723B (BIT(10) | BIT(11))
+#define MODE_AG_BW_40MHZ_8723B BIT(10)
+#define MODE_AG_BW_80MHZ_8723B 0
#define RF6052_REG_TOP 0x19
#define RF6052_REG_RX_G1 0x1a
@@ -979,3 +1104,14 @@
#define RF6052_REG_TXPA_G1 0x31 /* RF TX PA control */
#define RF6052_REG_TXPA_G2 0x32 /* RF TX PA control */
#define RF6052_REG_TXPA_G3 0x33 /* RF TX PA control */
+
+/*
+ * NextGen regs: 8723BU
+ */
+#define RF6052_REG_T_METER_8723B 0x42
+#define RF6052_REG_UNKNOWN_43 0x43
+#define RF6052_REG_UNKNOWN_55 0x55
+#define RF6052_REG_S0S1 0xb0
+#define RF6052_REG_UNKNOWN_DF 0xdf
+#define RF6052_REG_UNKNOWN_ED 0xed
+#define RF6052_REG_WE_LUT 0xef
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4ae421ef30d9..0f48048b8654 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -35,62 +35,58 @@
#include <linux/export.h>
#include <net/cfg80211.h>
+u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
+ 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
+ 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
+ 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
+ 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
+ 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
+ 165, 167, 169, 171, 173, 175, 177 /* Band 4 */
+};
+EXPORT_SYMBOL(channel5g);
+
+u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
+ 42, 58, 106, 122, 138, 155, 171
+};
+EXPORT_SYMBOL(channel5g_80m);
+
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
- mdelay(50);
+ msleep(50);
else if (addr == 0xfd)
- mdelay(5);
+ msleep(5);
else if (addr == 0xfc)
- mdelay(1);
+ msleep(1);
else if (addr == 0xfb)
- udelay(50);
+ usleep_range(50, 100);
else if (addr == 0xfa)
- udelay(5);
+ usleep_range(5, 10);
else if (addr == 0xf9)
- udelay(1);
+ usleep_range(1, 2);
}
EXPORT_SYMBOL(rtl_addr_delay);
void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
u32 mask, u32 data)
{
- if (addr == 0xfe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
+ if (addr >= 0xf9 && addr <= 0xfe) {
+ rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
- udelay(1);
+ usleep_range(1, 2);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
{
- if (addr == 0xfe) {
- mdelay(50);
- } else if (addr == 0xfd) {
- mdelay(5);
- } else if (addr == 0xfc) {
- mdelay(1);
- } else if (addr == 0xfb) {
- udelay(50);
- } else if (addr == 0xfa) {
- udelay(5);
- } else if (addr == 0xf9) {
- udelay(1);
+ if (addr >= 0xf9 && addr <= 0xfe) {
+ rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
- udelay(1);
+ usleep_range(1, 2);
}
}
EXPORT_SYMBOL(rtl_bb_delay);
@@ -1371,11 +1367,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 7f471bff435c..283d608b9973 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -855,7 +855,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
}
/* handle command packet here */
if (rtlpriv->cfg->ops->rx_command_packet &&
- rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
+ rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) {
dev_kfree_skb_any(skb);
goto new_trx_end;
}
@@ -2392,7 +2392,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtlpriv->cfg->ops->deinit_sw_vars(hw);
if (rtlpci->irq_alloc) {
- synchronize_irq(rtlpci->pdev->irq);
free_irq(rtlpci->pdev->irq, hw);
rtlpci->irq_alloc = 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 28f7010e7108..1aca77719521 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_sta_info *sta_entry = NULL;
- u8 wireless_mode = 0;
+ u16 wireless_mode = 0;
/*
*this rate is no use for true rate, firmware
@@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
{
struct rtl_mac *mac = rtl_mac(rtlpriv);
struct rtl_sta_info *sta_entry = NULL;
- u8 wireless_mode = 0;
+ u16 wireless_mode = 0;
u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0;
if (sta) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 791efbe6b18c..11701064b0e1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -851,7 +851,7 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
}
u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb)
{
return 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index eab5ae0eb46c..5a24d194ac76 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -790,7 +790,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index bb06fe836fe7..7810fe87dca7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -924,19 +924,11 @@ static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
{
- u8 channel_5g[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128,
- 130, 132, 134, 136, 138, 140, 149, 151,
- 153, 155, 157, 159, 161, 163, 165
- };
u8 place = chnl;
if (chnl > 14) {
- for (place = 14; place < sizeof(channel_5g); place++) {
- if (channel_5g[place] == chnl) {
+ for (place = 14; place < sizeof(channel5g); place++) {
+ if (channel5g[place] == chnl) {
place++;
break;
}
@@ -2471,16 +2463,9 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
{
int i;
- u8 channel_5g[45] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165
- };
- for (i = 0; i < sizeof(channel_5g); i++)
- if (channel == channel_5g[i])
+ for (i = 0; i < sizeof(channel5g); i++)
+ if (channel == channel5g[i])
return true;
return false;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 5f14308e8eb3..9fd3f1b6e4a8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -2018,18 +2018,6 @@ static void _rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw));
struct txpower_info_2g pwr2g;
struct txpower_info_5g pwr5g;
- u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
- 56, 58, 60, 62, 64, 100, 102, 104, 106,
- 108, 110, 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136, 138,
- 140, 142, 144, 149, 151, 153, 155, 157,
- 159, 161, 163, 165, 167, 168, 169, 171,
- 173, 175, 177
- };
- u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
- 42, 58, 106, 122, 138, 155, 171
- };
u8 rf, idx;
u8 i;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index d39ee67f6113..24eff8ea4c2e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -1105,13 +1105,13 @@ void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
}
u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb)
{
u32 result = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- switch (status.packet_report_type) {
+ switch (status->packet_report_type) {
case NORMAL_RX:
result = 0;
break;
@@ -1121,7 +1121,7 @@ u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw,
break;
default:
RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE,
- "Unknown packet type %d\n", status.packet_report_type);
+ "Unknown packet type %d\n", status->packet_report_type);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
index 8f78ac9e6040..a4c38345233e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
@@ -857,6 +857,6 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index 2f7c144d7980..7b4a9b63583b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -710,7 +710,7 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
}
u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb)
{
return 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
index 017da7e194d8..32970bf18856 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
@@ -716,6 +716,6 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 338ec9a9d09b..60345975f9fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -758,13 +758,13 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
}
u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb)
{
u32 result = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- switch (status.packet_report_type) {
+ switch (status->packet_report_type) {
case NORMAL_RX:
result = 0;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
index 45949ac4854c..40c36607b8b9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
@@ -620,6 +620,6 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index b57cfd965196..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -626,7 +626,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnet PWDB = 0x%x\n",
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
rtl_dm_dig->min_undec_pwdb_for_dm);
}
RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index 525eb234627c..a4fc70e8c9c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -271,7 +271,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
err = _rtl8821ae_fw_free_to_go(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+ RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
"Firmware is not ready to run!\n");
} else {
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index bbb789f8990b..fe900badd468 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2786,14 +2786,6 @@ static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct txpower_info_2g pwrinfo24g;
struct txpower_info_5g pwrinfo5g;
- u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
- 56, 58, 60, 62, 64, 100, 102, 104, 106,
- 108, 110, 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136, 138,
- 140, 142, 144, 149, 151, 153, 155, 157,
- 159, 161, 163, 165, 167, 168, 169, 171, 173, 175, 177};
- u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171};
u8 rf_path, index;
u8 i;
@@ -2872,16 +2864,6 @@ static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct txpower_info_2g pwrinfo24g;
struct txpower_info_5g pwrinfo5g;
- u8 channel5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54,
- 56, 58, 60, 62, 64, 100, 102, 104, 106,
- 108, 110, 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136, 138,
- 140, 142, 144, 149, 151, 153, 155, 157,
- 159, 161, 163, 165, 167, 168, 169, 171,
- 173, 175, 177};
- u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
- 42, 58, 106, 122, 138, 155, 171};
u8 rf_path, index;
u8 i;
@@ -3855,7 +3837,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u8 wireless_mode = mac->mode;
+ u16 wireless_mode = mac->mode;
u8 sifs_timer, r2t_sifs;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 9b4d8a637915..74165b3eb362 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -1472,18 +1472,13 @@ static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
char channel_index = -1;
- u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64,
- 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 149,
- 151, 153, 155, 157, 159, 161, 163, 165, 167, 168, 169, 171,
- 173, 175, 177};
u8 i = 0;
+
if (band == BAND_ON_2_4G)
channel_index = channel - 1;
else if (band == BAND_ON_5G) {
- for (i = 0; i < sizeof(channel_5g)/sizeof(u8); ++i) {
- if (channel_5g[i] == channel)
+ for (i = 0; i < sizeof(channel5g)/sizeof(u8); ++i) {
+ if (channel5g[i] == channel)
channel_index = i;
}
} else
@@ -2240,13 +2235,6 @@ void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
{
- u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = {
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
- 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118,
- 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140,
- 142, 144, 149, 151, 153, 155, 157, 159, 161, 163, 165,
- 167, 168, 169, 171, 173, 175, 177
- };
u8 i = 0;
bool in_24g = true;
@@ -2257,7 +2245,7 @@ static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index)
in_24g = false;
for (i = 0; i < CHANNEL_MAX_NUMBER_5G; ++i) {
- if (channel_5g[i] == channel) {
+ if (channel5g[i] == channel) {
*chnl_index = i;
return in_24g;
}
@@ -2728,13 +2716,10 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
rate <= DESC_RATEVHT2SS_MCS9))
txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S];
} else if (bandwidth == HT_CHANNEL_WIDTH_80) {
- u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {
- 42, 58, 106, 122, 138, 155, 171
- };
u8 i;
- for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i)
- if (channel_5g_80m[i] == channel)
+ for (i = 0; i < sizeof(channel5g_80m) / sizeof(u8); ++i)
+ if (channel5g_80m[i] == channel)
index = i;
if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index 174743aef943..41efaa148d13 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -998,13 +998,13 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
}
u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb)
{
u32 result = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- switch (status.packet_report_type) {
+ switch (status->packet_report_type) {
case NORMAL_RX:
result = 0;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
index 31409042d8dd..ad565bebf1d5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
@@ -615,6 +615,6 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
bool firstseg, bool lastseg,
struct sk_buff *skb);
u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw,
- struct rtl_stats status,
+ const struct rtl_stats *status,
struct sk_buff *skb);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 4544752a2ba8..554d81420f19 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -116,17 +116,12 @@
#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */
#define CHANNEL_MAX_NUMBER_2G 14
-#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
+#define CHANNEL_MAX_NUMBER_5G 49 /* Please refer to
*"phy_GetChnlGroup8812A" and
* "Hal_ReadTxPowerInfo8812A"
*/
#define CHANNEL_MAX_NUMBER_5G_80M 7
#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */
-#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to
- *"phy_GetChnlGroup8812A" and
- * "Hal_ReadTxPowerInfo8812A"
- */
-#define CHANNEL_MAX_NUMBER_5G_80M 7
#define MAX_PG_GROUP 13
#define CHANNEL_GROUP_MAX_2G 3
#define CHANNEL_GROUP_IDX_5GL 3
@@ -1323,14 +1318,13 @@ struct rtl_tid_data {
struct rtl_sta_info {
struct list_head list;
- u8 ratr_index;
- u8 wireless_mode;
- u8 mimo_ps;
- u8 mac_addr[ETH_ALEN];
struct rtl_tid_data tids[MAX_TID_COUNT];
-
/* just used for ap adhoc or mesh*/
struct rssi_sta rssi_stat;
+ u16 wireless_mode;
+ u8 ratr_index;
+ u8 mimo_ps;
+ u8 mac_addr[ETH_ALEN];
} __packed;
struct rtl_priv;
@@ -2194,7 +2188,7 @@ struct rtl_hal_ops {
bool (*get_btc_status) (void);
bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
u32 (*rx_command_packet)(struct ieee80211_hw *hw,
- struct rtl_stats status, struct sk_buff *skb);
+ const struct rtl_stats *status, struct sk_buff *skb);
void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
struct rtl_wow_pattern *rtl_pattern,
u8 index);
@@ -2904,6 +2898,10 @@ value to host byte ordering.*/
#define STBC_VHT_TEST_TX_ENABLE BIT(2)
#define STBC_VHT_CAP_TX BIT(3)
+extern u8 channel5g[CHANNEL_MAX_NUMBER_5G];
+
+extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
+
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
return rtlpriv->io.read8_sync(rtlpriv, addr);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index b5bcc933a2a6..4df992de7d07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
* informs the f/w regarding this.
* @hw: Pointer to the ieee80211_hw structure.
* @vif: Pointer to the ieee80211_vif structure.
- * @action: ieee80211_ampdu_mlme_action enum.
- * @sta: Pointer to the ieee80211_sta structure.
- * @tid: Traffic identifier.
- * @ssn: Pointer to ssn value.
- * @buf_size: Buffer size (for kernel version > 2.6.38).
- * @amsdu: is AMSDU in AMPDU allowed
+ * @params: Pointer to A-MPDU action parameters
*
* Return: status: 0 on success, negative error code on failure.
*/
static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta,
- unsigned short tid,
- unsigned short *ssn,
- unsigned char buf_size,
- bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
int status = -EOPNOTSUPP;
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
u16 seq_no = 0;
u8 ii = 0;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ u8 buf_size = params->buf_size;
for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
if (vif == adapter->vifs[ii])
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index a740083634d8..63f95e9c2992 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -446,8 +446,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
return 0;
}
-#ifdef CONFIG_PM
-static int cw1200_spi_suspend(struct device *dev)
+static int __maybe_unused cw1200_spi_suspend(struct device *dev)
{
struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev));
@@ -460,16 +459,12 @@ static int cw1200_spi_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL);
-#endif
-
static struct spi_driver spi_driver = {
.probe = cw1200_spi_probe,
.remove = cw1200_spi_disconnect,
.driver = {
.name = "cw1200_wlan_spi",
-#ifdef CONFIG_PM
- .pm = &cw1200_pm_ops,
-#endif
+ .pm = IS_ENABLED(CONFIG_PM) ? &cw1200_pm_ops : NULL,
},
};
diff --git a/drivers/net/wireless/st/cw1200/pm.h b/drivers/net/wireless/st/cw1200/pm.h
index 3ed90ff22bb8..534548470ebc 100644
--- a/drivers/net/wireless/st/cw1200/pm.h
+++ b/drivers/net/wireless/st/cw1200/pm.h
@@ -31,13 +31,18 @@ int cw1200_pm_init(struct cw1200_pm_state *pm,
void cw1200_pm_deinit(struct cw1200_pm_state *pm);
int cw1200_wow_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
-int cw1200_wow_resume(struct ieee80211_hw *hw);
int cw1200_can_suspend(struct cw1200_common *priv);
+int cw1200_wow_resume(struct ieee80211_hw *hw);
void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
unsigned long tmo);
#else
static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm,
- unsigned long tmo) {
+ unsigned long tmo)
+{
+}
+static inline int cw1200_can_suspend(struct cw1200_common *priv)
+{
+ return 0;
}
#endif
#endif
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 06321c799c90..d0ddcde6c695 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -2129,9 +2129,7 @@ void cw1200_mcast_timeout(unsigned long arg)
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
/* Aggregation is implemented fully in firmware,
* including block ack negotiation. Do not allow
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index bebb3379017f..a0bacaa39b31 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
u32 changed);
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
void cw1200_suspend_resume(struct cw1200_common *priv,
struct wsm_suspend_resume *arg);
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 4edfe28395f0..86ccf84ea0c6 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -345,6 +345,69 @@ static const struct file_operations dynamic_fw_traces_ops = {
.llseek = default_llseek,
};
+#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+static ssize_t radar_debug_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 10, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal radar_debug_mode value!");
+ return -EINVAL;
+ }
+
+ /* valid values: 0/1 */
+ if (!(value == 0 || value == 1)) {
+ wl1271_warning("value is not in valid!");
+ return -EINVAL;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->radar_debug_mode = value;
+
+ if (unlikely(wl->state != WLCORE_STATE_ON))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ wlcore_cmd_generic_cfg(wl, wlvif,
+ WLCORE_CFG_FEATURE_RADAR_DEBUG,
+ wl->radar_debug_mode, 0);
+ }
+
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static ssize_t radar_debug_mode_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(userbuf, count, ppos,
+ "%d\n", wl->radar_debug_mode);
+}
+
+static const struct file_operations radar_debug_mode_ops = {
+ .write = radar_debug_mode_write,
+ .read = radar_debug_mode_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+#endif /* CFG80211_CERTIFICATION_ONUS */
+
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
@@ -510,6 +573,9 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(conf, moddir);
DEBUGFS_ADD(radar_detection, moddir);
+#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+ DEBUGFS_ADD(radar_debug_mode, moddir);
+#endif
DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 719907a0a2c2..ff6e46dd61f8 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -146,7 +146,8 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
mbox->radar_channel,
wl18xx_radar_type_decode(mbox->radar_type));
- ieee80211_radar_detected(wl->hw);
+ if (!wl->radar_debug_mode)
+ ieee80211_radar_detected(wl->hw);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 969c9d79bfc8..8a8f1e711384 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -13,7 +13,7 @@ config WLCORE
config WLCORE_SPI
tristate "TI wlcore SPI support"
- depends on WLCORE && SPI_MASTER
+ depends on WLCORE && SPI_MASTER && OF
select CRC7
---help---
This module adds support for the SPI interface of adapters using
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index c96405498bf4..4b59f67724de 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -38,7 +38,7 @@
int wlcore_event_fw_logger(struct wl1271 *wl)
{
- u32 ret;
+ int ret;
struct fw_logger_information fw_log;
u8 *buffer;
u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index e92f2639af2c..d0b7734030ef 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -558,6 +558,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (ret < 0)
return ret;
+ if (wl->radar_debug_mode)
+ wlcore_cmd_generic_cfg(wl, wlvif,
+ WLCORE_CFG_FEATURE_RADAR_DEBUG,
+ wl->radar_debug_mode, 0);
+
return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d1109c4f0f0d..dde36203ca42 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5187,14 +5187,16 @@ out:
static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
tid);
@@ -5493,7 +5495,7 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
- int channel, ret = 0;
+ int channel, active_roc, ret = 0;
channel = ieee80211_frequency_to_channel(chan->center_freq);
@@ -5506,9 +5508,9 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
goto out;
/* return EBUSY if we can't ROC right now */
- if (WARN_ON(wl->roc_vif ||
- find_first_bit(wl->roc_map,
- WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
+ active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
+ if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
+ wl1271_warning("active roc on role %d", active_roc);
ret = -EBUSY;
goto out;
}
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 44f059f7f34e..020ac1a4b408 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -30,6 +30,8 @@
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -81,6 +83,7 @@
struct wl12xx_spi_glue {
struct device *dev;
struct platform_device *core;
+ struct regulator *reg; /* Power regulator */
};
static void wl12xx_spi_reset(struct device *child)
@@ -318,14 +321,76 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
return 0;
}
+/**
+ * wl12xx_spi_set_power - power on/off the wl12xx unit
+ * @child: wl12xx device handle.
+ * @enable: true/false to power on/off the unit.
+ *
+ * use the WiFi enable regulator to enable/disable the WiFi unit.
+ */
+static int wl12xx_spi_set_power(struct device *child, bool enable)
+{
+ int ret = 0;
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+
+ WARN_ON(!glue->reg);
+
+ /* Update regulator state */
+ if (enable) {
+ ret = regulator_enable(glue->reg);
+ if (ret)
+ dev_err(child, "Power enable failure\n");
+ } else {
+ ret = regulator_disable(glue->reg);
+ if (ret)
+ dev_err(child, "Power disable failure\n");
+ }
+
+ return ret;
+}
+
static struct wl1271_if_operations spi_ops = {
.read = wl12xx_spi_raw_read,
.write = wl12xx_spi_raw_write,
.reset = wl12xx_spi_reset,
.init = wl12xx_spi_init,
+ .power = wl12xx_spi_set_power,
.set_block_size = NULL,
};
+static const struct of_device_id wlcore_spi_of_match_table[] = {
+ { .compatible = "ti,wl1271" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table);
+
+/**
+ * wlcore_probe_of - DT node parsing.
+ * @spi: SPI slave device parameters.
+ * @res: resource parameters.
+ * @glue: wl12xx SPI bus to slave device glue parameters.
+ * @pdev_data: wlcore device parameters
+ */
+static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
+ struct wlcore_platdev_data *pdev_data)
+{
+ struct device_node *dt_node = spi->dev.of_node;
+ int ret;
+
+ if (of_find_property(dt_node, "clock-xtal", NULL))
+ pdev_data->ref_clock_xtal = true;
+
+ ret = of_property_read_u32(dt_node, "ref-clock-frequency",
+ &pdev_data->ref_clock_freq);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(glue->dev,
+ "can't get reference clock frequency (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
@@ -335,8 +400,6 @@ static int wl1271_probe(struct spi_device *spi)
memset(&pdev_data, 0x00, sizeof(pdev_data));
- /* TODO: add DT parsing when needed */
-
pdev_data.if_ops = &spi_ops;
glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
@@ -353,6 +416,21 @@ static int wl1271_probe(struct spi_device *spi)
* comes from the board-peripherals file */
spi->bits_per_word = 32;
+ glue->reg = devm_regulator_get(&spi->dev, "vwlan");
+ if (PTR_ERR(glue->reg) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (IS_ERR(glue->reg)) {
+ dev_err(glue->dev, "can't get regulator\n");
+ return PTR_ERR(glue->reg);
+ }
+
+ ret = wlcore_probe_of(spi, glue, &pdev_data);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(glue->dev,
+ "can't get device tree parameters (%d)\n", ret);
+ return ret;
+ }
+
ret = spi_setup(spi);
if (ret < 0) {
dev_err(glue->dev, "spi_setup failed\n");
@@ -370,7 +448,7 @@ static int wl1271_probe(struct spi_device *spi)
memset(res, 0x00, sizeof(res));
res[0].start = spi->irq;
- res[0].flags = IORESOURCE_IRQ;
+ res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq);
res[0].name = "irq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
@@ -408,10 +486,10 @@ static int wl1271_remove(struct spi_device *spi)
return 0;
}
-
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
+ .of_match_table = of_match_ptr(wlcore_spi_of_match_table),
},
.probe = wl1271_probe,
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index dda01b118c26..72c31a8edcfb 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -463,6 +463,7 @@ struct wl1271 {
/* the current dfs region */
enum nl80211_dfs_regions dfs_region;
+ bool radar_debug_mode;
/* size of the private FW status data */
size_t fw_status_len;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0333ab0fd926..f44b38846420 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t;
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
+ unsigned int extra_count;
/* Callback data for released SKBs. The callback is always
* xenvif_zerocopy_callback, desc contains the pending_idx, which is
* also an index in pending_tx_info array. It is initialized in
@@ -251,6 +252,7 @@ struct xenvif {
unsigned int stalled_queues;
struct xenbus_watch credit_watch;
+ struct xenbus_watch mcast_ctrl_watch;
spinlock_t lock;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 61b97c34bb3b..b42f26029225 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
@@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data)
}
static void xenvif_tx_err(struct xenvif_queue *queue,
- struct xen_netif_tx_request *txp, RING_IDX end)
+ struct xen_netif_tx_request *txp,
+ unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
unsigned long flags;
do {
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
@@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first,
+ unsigned int extra_count,
struct xen_netif_tx_request *txp,
int work_to_do)
{
@@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
} while (more_data);
if (drop_err) {
- xenvif_tx_err(queue, first, cons + slots);
+ xenvif_tx_err(queue, first, extra_count, cons + slots);
return drop_err;
}
@@ -827,9 +830,10 @@ struct xenvif_tx_cb {
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
- u16 pending_idx,
- struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *mop)
+ u16 pending_idx,
+ struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
+ struct gnttab_map_grant_ref *mop)
{
queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
@@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
+ queue->pending_tx_info[pending_idx].extra_count = extra_count;
}
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
@@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
- xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
}
@@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
- xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
+ gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
}
@@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
}
static int xenvif_get_extras(struct xenvif_queue *queue,
- struct xen_netif_extra_info *extras,
- int work_to_do)
+ struct xen_netif_extra_info *extras,
+ unsigned int *extra_count,
+ int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = queue->tx.req_cons;
@@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
}
RING_COPY_REQUEST(&queue->tx, cons, &extra);
+
+ queue->tx.req_cons = ++cons;
+ (*extra_count)++;
+
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
- queue->tx.req_cons = ++cons;
netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type);
xenvif_fatal_tx_err(queue->vif);
@@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
- queue->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
@@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
+ unsigned int extra_count;
u16 pending_idx;
RING_IDX idx;
int work_to_do;
@@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
+ extra_count = 0;
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xenvif_get_extras(queue, extras,
+ &extra_count,
work_to_do);
idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0))
@@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
- make_tx_response(queue, &txreq,
+ make_tx_response(queue, &txreq, extra_count,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
@@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
- make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+ make_tx_response(queue, &txreq, extra_count,
+ XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
continue;
}
- ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(queue, &txreq, extra_count,
+ txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size);
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
@@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (unlikely(skb == NULL)) {
netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n");
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
@@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
@@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
- xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
+ xenvif_tx_create_map_op(queue, pending_idx, &txreq,
+ extra_count, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
- memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
- sizeof(txreq));
+ memcpy(&queue->pending_tx_info[pending_idx].req,
+ &txreq, sizeof(txreq));
+ queue->pending_tx_info[pending_idx].extra_count =
+ extra_count;
}
queue->pending_cons++;
@@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
spin_lock_irqsave(&queue->response_lock, flags);
- make_tx_response(queue, &pending_tx_info->req, status);
+ make_tx_response(queue, &pending_tx_info->req,
+ pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
@@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
+ unsigned int extra_count,
s8 st)
{
RING_IDX i = queue->tx.rsp_prod_pvt;
@@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue,
resp->id = txp->id;
resp->status = st;
- if (txp->flags & XEN_NETTXF_extra_info)
+ while (extra_count-- != 0)
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 56ebd8267386..bd182cd55dda 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
- /* We support multicast-control. */
+ /* We support dynamic multicast-control. */
err = xenbus_printf(xbt, dev->nodename,
"feature-multicast-control", "%d", 1);
if (err) {
@@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-dynamic-multicast-control",
+ "%d", 1);
+ if (err) {
+ message = "writing feature-dynamic-multicast-control";
+ goto abort_transaction;
+ }
+
err = xenbus_transaction_end(xbt, 0);
} while (err == -EAGAIN);
@@ -503,8 +511,6 @@ static void set_backend_state(struct backend_info *be,
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
- pr_info("%s: prepare for reconnect\n",
- be->dev->nodename);
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosing:
@@ -683,7 +689,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch,
}
}
-static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+static int xen_register_credit_watch(struct xenbus_device *dev,
+ struct xenvif *vif)
{
int err = 0;
char *node;
@@ -708,7 +715,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
return err;
}
-static void xen_unregister_watchers(struct xenvif *vif)
+static void xen_unregister_credit_watch(struct xenvif *vif)
{
if (vif->credit_watch.node) {
unregister_xenbus_watch(&vif->credit_watch);
@@ -717,6 +724,75 @@ static void xen_unregister_watchers(struct xenvif *vif)
}
}
+static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xenvif *vif = container_of(watch, struct xenvif,
+ mcast_ctrl_watch);
+ struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
+ int val;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend,
+ "request-multicast-control", "%d", &val) < 0)
+ val = 0;
+ vif->multicast_control = !!val;
+}
+
+static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
+ struct xenvif *vif)
+{
+ int err = 0;
+ char *node;
+ unsigned maxlen = strlen(dev->otherend) +
+ sizeof("/request-multicast-control");
+
+ if (vif->mcast_ctrl_watch.node) {
+ pr_err_ratelimited("Watch is already registered\n");
+ return -EADDRINUSE;
+ }
+
+ node = kmalloc(maxlen, GFP_KERNEL);
+ if (!node) {
+ pr_err("Failed to allocate memory for watch\n");
+ return -ENOMEM;
+ }
+ snprintf(node, maxlen, "%s/request-multicast-control",
+ dev->otherend);
+ vif->mcast_ctrl_watch.node = node;
+ vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
+ err = register_xenbus_watch(&vif->mcast_ctrl_watch);
+ if (err) {
+ pr_err("Failed to set watcher %s\n",
+ vif->mcast_ctrl_watch.node);
+ kfree(node);
+ vif->mcast_ctrl_watch.node = NULL;
+ vif->mcast_ctrl_watch.callback = NULL;
+ }
+ return err;
+}
+
+static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
+{
+ if (vif->mcast_ctrl_watch.node) {
+ unregister_xenbus_watch(&vif->mcast_ctrl_watch);
+ kfree(vif->mcast_ctrl_watch.node);
+ vif->mcast_ctrl_watch.node = NULL;
+ }
+}
+
+static void xen_register_watchers(struct xenbus_device *dev,
+ struct xenvif *vif)
+{
+ xen_register_credit_watch(dev, vif);
+ xen_register_mcast_ctrl_watch(dev, vif);
+}
+
+static void xen_unregister_watchers(struct xenvif *vif)
+{
+ xen_unregister_mcast_ctrl_watch(vif);
+ xen_unregister_credit_watch(vif);
+}
+
static void unregister_hotplug_status_watch(struct backend_info *be)
{
if (be->have_hotplug_status_watch) {
@@ -1030,11 +1106,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
val = 0;
vif->ipv6_csum = !!val;
- if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
- "%d", &val) < 0)
- val = 0;
- vif->multicast_control = !!val;
-
return 0;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 918e8f2eac47..e0e8afd27849 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -246,18 +246,10 @@ static int microread_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct microread_i2c_phy *phy;
- struct microread_nfc_platform_data *pdata =
- dev_get_platdata(&client->dev);
int r;
dev_dbg(&client->dev, "client %p\n", client);
- if (!pdata) {
- nfc_err(&client->dev, "client %p: missing platform data\n",
- client);
- return -EINVAL;
- }
-
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
if (!phy)
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 76c318444304..45d0e667d7ae 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -877,20 +877,8 @@ exit_state_wait_secure_write_answer:
static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
- const struct acpi_device_id *id;
struct gpio_desc *gpiod_en, *gpiod_fw;
- struct device *dev;
-
- if (!client)
- return -EINVAL;
-
- dev = &client->dev;
-
- /* Match the struct device against a given list of ACPI IDs */
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
-
- if (!id)
- return -ENODEV;
+ struct device *dev = &client->dev;
/* Get EN GPIO from ACPI */
gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1,
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 64a90252c57f..5f97da1947e3 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -19,7 +19,7 @@
#include <linux/completion.h>
#include <linux/firmware.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
#include <crypto/sha.h>
#include "s3fwrn5.h"
@@ -429,8 +429,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
{
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
- struct scatterlist sg;
- struct hash_desc desc;
+ struct crypto_shash *tfm;
u32 image_size, off;
int ret;
@@ -438,12 +437,31 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
/* Compute SHA of firmware data */
- sg_init_one(&sg, fw->image, image_size);
- desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
- crypto_hash_init(&desc);
- crypto_hash_update(&desc, &sg, image_size);
- crypto_hash_final(&desc, hash_data);
- crypto_free_hash(desc.tfm);
+ tfm = crypto_alloc_shash("sha1", 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Cannot allocate shash (code=%d)\n", ret);
+ goto out;
+ }
+
+ {
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ desc->tfm = tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ret = crypto_shash_digest(desc, fw->image, image_size,
+ hash_data);
+ shash_desc_zero(desc);
+ }
+
+ crypto_free_shash(tfm);
+ if (ret) {
+ dev_err(&fw_info->ndev->nfc_dev->dev,
+ "Cannot compute hash (code=%d)\n", ret);
+ goto out;
+ }
/* Firmware update process */
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 588803ad6847..6ccba0d862df 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
return 0;
}
-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
- phys_addr_t *db_addr,
- resource_size_t *db_size)
-{
- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-
- if (db_addr)
- *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
- if (db_size)
- *db_size = sizeof(u32);
-
- return 0;
-}
-
static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
return 0;
}
-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
- phys_addr_t *spad_addr)
-{
- struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-
- if (idx < 0 || idx >= ndev->spad_count)
- return -EINVAL;
-
- if (spad_addr)
- *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
- ndev->peer_spad + (idx << 2));
- return 0;
-}
-
static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
{
struct amd_ntb_dev *ndev = ntb_ndev(ntb);
@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
.db_clear = amd_ntb_db_clear,
.db_set_mask = amd_ntb_db_set_mask,
.db_clear_mask = amd_ntb_db_clear_mask,
- .peer_db_addr = amd_ntb_peer_db_addr,
.peer_db_set = amd_ntb_peer_db_set,
.spad_count = amd_ntb_spad_count,
.spad_read = amd_ntb_spad_read,
.spad_write = amd_ntb_spad_write,
- .peer_spad_addr = amd_ntb_peer_spad_addr,
.peer_spad_read = amd_ntb_peer_spad_read,
.peer_spad_write = amd_ntb_peer_spad_write,
};
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index ec4775f0ec16..2ef9d9130864 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -124,6 +124,7 @@ struct ntb_transport_qp {
bool client_ready;
bool link_is_up;
+ bool active;
u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
u64 qp_bit;
@@ -719,6 +720,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
+ qp->active = false;
qp->tx_index = 0;
qp->rx_index = 0;
@@ -827,7 +829,7 @@ static void ntb_transport_link_work(struct work_struct *work)
struct pci_dev *pdev = ndev->pdev;
resource_size_t size;
u32 val;
- int rc, i, spad;
+ int rc = 0, i, spad;
/* send the local info, in the opposite order of the way we read it */
for (i = 0; i < nt->mw_count; i++) {
@@ -897,6 +899,13 @@ static void ntb_transport_link_work(struct work_struct *work)
out1:
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
+
+ /* if there's an actual failure, we should just bail */
+ if (rc < 0) {
+ ntb_link_disable(ndev);
+ return;
+ }
+
out:
if (ntb_link_is_up(ndev, NULL, NULL) == 1)
schedule_delayed_work(&nt->link_work,
@@ -926,11 +935,13 @@ static void ntb_qp_link_work(struct work_struct *work)
if (val & BIT(qp->qp_num)) {
dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
qp->link_is_up = true;
+ qp->active = true;
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
- tasklet_schedule(&qp->rxc_db_work);
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -1411,7 +1422,8 @@ static void ntb_transport_rxc_db(unsigned long data)
if (i == qp->rx_max_entry) {
/* there is more work to do */
- tasklet_schedule(&qp->rxc_db_work);
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
} else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
/* the doorbell bit is set: clear it */
ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1422,7 +1434,8 @@ static void ntb_transport_rxc_db(unsigned long data)
* ntb_process_rxc and clearing the doorbell bit:
* there might be some more work to do.
*/
- tasklet_schedule(&qp->rxc_db_work);
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
}
}
@@ -1760,6 +1773,8 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
pdev = qp->ndev->pdev;
+ qp->active = false;
+
if (qp->tx_dma_chan) {
struct dma_chan *chan = qp->tx_dma_chan;
/* Putting the dma_chan to NULL will force any new traffic to be
@@ -1793,7 +1808,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
qp_bit = BIT_ULL(qp->qp_num);
ntb_db_set_mask(qp->ndev, qp_bit);
- tasklet_disable(&qp->rxc_db_work);
+ tasklet_kill(&qp->rxc_db_work);
cancel_delayed_work_sync(&qp->link_work);
@@ -1886,7 +1901,8 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
- tasklet_schedule(&qp->rxc_db_work);
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
return 0;
}
@@ -2069,7 +2085,8 @@ static void ntb_transport_doorbell_callback(void *data, int vector)
qp_num = __ffs(db_bits);
qp = &nt->qp_vec[qp_num];
- tasklet_schedule(&qp->rxc_db_work);
+ if (qp->active)
+ tasklet_schedule(&qp->rxc_db_work);
db_bits &= ~BIT_ULL(qp_num);
}
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index c8a37ba4b4f9..8dfce9c9aad0 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
atomic_dec(&pctx->dma_sync);
}
-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
char *src, size_t size)
{
struct perf_ctx *perf = pctx->perf;
@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
dma_cookie_t cookie;
size_t src_off, dst_off;
struct perf_mw *mw = &perf->mw;
- u64 vbase, dst_vaddr;
+ void __iomem *vbase;
+ void __iomem *dst_vaddr;
dma_addr_t dst_phys;
int retries = 0;
@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
}
device = chan->device;
- src_off = (size_t)src & ~PAGE_MASK;
- dst_off = (size_t)dst & ~PAGE_MASK;
+ src_off = (uintptr_t)src & ~PAGE_MASK;
+ dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, src_off, dst_off, size))
return -ENODEV;
- vbase = (u64)(u64 *)mw->vbase;
- dst_vaddr = (u64)(u64 *)dst;
+ vbase = mw->vbase;
+ dst_vaddr = dst;
dst_phys = mw->phys_addr + (dst_vaddr - vbase);
unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
@@ -261,13 +262,13 @@ err_get_unmap:
return 0;
}
-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
+static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
u64 buf_size, u64 win_size, u64 total)
{
int chunks, total_chunks, i;
int copied_chunks = 0;
u64 copied = 0, result;
- char *tmp = dst;
+ char __iomem *tmp = dst;
u64 perf, diff_us;
ktime_t kstart, kstop, kdiff;
@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
struct perf_ctx *perf = pctx->perf;
struct pci_dev *pdev = perf->ntb->pdev;
struct perf_mw *mw = &perf->mw;
- char *dst;
+ char __iomem *dst;
u64 win_size, buf_size, total;
void *src;
int rc, node, i;
@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
if (buf_size > MAX_TEST_SIZE)
buf_size = MAX_TEST_SIZE;
- dst = (char *)mw->vbase;
+ dst = (char __iomem *)mw->vbase;
atomic_inc(&perf->tsync);
while (atomic_read(&perf->tsync) != perf->perf_threads)
@@ -424,6 +425,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
{
struct perf_mw *mw = &perf->mw;
size_t xlat_size, buf_size;
+ int rc;
if (!size)
return -EINVAL;
@@ -447,6 +449,13 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
mw->buf_size = 0;
}
+ rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
+ if (rc) {
+ dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
+ perf_free_mw(perf);
+ return -EIO;
+ }
+
return 0;
}
@@ -541,6 +550,8 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
return 0;
buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
out_offset = snprintf(buf, 64, "%d\n", perf->run);
ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
kfree(buf);
@@ -548,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
return ret;
}
+static void threads_cleanup(struct perf_ctx *perf)
+{
+ struct pthr_ctx *pctx;
+ int i;
+
+ perf->run = false;
+ for (i = 0; i < MAX_THREADS; i++) {
+ pctx = &perf->pthr_ctx[i];
+ if (pctx->thread) {
+ kthread_stop(pctx->thread);
+ pctx->thread = NULL;
+ }
+ }
+}
+
static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *offp)
{
@@ -563,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
if (atomic_read(&perf->tsync) == 0)
perf->run = false;
- if (perf->run) {
- /* lets stop the threads */
- perf->run = false;
- for (i = 0; i < MAX_THREADS; i++) {
- if (perf->pthr_ctx[i].thread) {
- kthread_stop(perf->pthr_ctx[i].thread);
- perf->pthr_ctx[i].thread = NULL;
- } else
- break;
- }
- } else {
+ if (perf->run)
+ threads_cleanup(perf);
+ else {
perf->run = true;
if (perf->perf_threads > MAX_THREADS) {
@@ -604,17 +622,11 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
kthread_create_on_node(ntb_perf_thread,
(void *)pctx,
node, "ntb_perf %d", i);
- if (pctx->thread)
+ if (IS_ERR(pctx->thread)) {
+ pctx->thread = NULL;
+ goto err;
+ } else
wake_up_process(pctx->thread);
- else {
- perf->run = false;
- for (i = 0; i < MAX_THREADS; i++) {
- if (pctx->thread) {
- kthread_stop(pctx->thread);
- pctx->thread = NULL;
- }
- }
- }
if (perf->run == false)
return -ENXIO;
@@ -623,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
}
return count;
+
+err:
+ threads_cleanup(perf);
+ return -ENXIO;
}
static const struct file_operations ntb_perf_debugfs_run = {
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 91a336ea8c4f..e9ff9229d942 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -31,8 +31,6 @@ struct nd_blk_device {
u32 internal_lbasize;
};
-static int nd_blk_major;
-
static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
{
return blk_dev->nsblk->lbasize - blk_dev->sector_size;
@@ -264,7 +262,6 @@ static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
}
disk->driverfs_dev = &ndns->dev;
- disk->major = nd_blk_major;
disk->first_minor = 0;
disk->fops = &nd_blk_fops;
disk->private_data = blk_dev;
@@ -358,25 +355,12 @@ static struct nd_device_driver nd_blk_driver = {
static int __init nd_blk_init(void)
{
- int rc;
-
- rc = register_blkdev(0, "nd_blk");
- if (rc < 0)
- return rc;
-
- nd_blk_major = rc;
- rc = nd_driver_register(&nd_blk_driver);
-
- if (rc < 0)
- unregister_blkdev(nd_blk_major, "nd_blk");
-
- return rc;
+ return nd_driver_register(&nd_blk_driver);
}
static void __exit nd_blk_exit(void)
{
driver_unregister(&nd_blk_driver.drv);
- unregister_blkdev(nd_blk_major, "nd_blk");
}
MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index efb2c1ceef98..f068b6513cd2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -31,8 +31,6 @@ enum log_ent_request {
LOG_OLD_ENT
};
-static int btt_major;
-
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
void *buf, size_t n)
{
@@ -1206,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, 0);
return 0;
}
@@ -1246,7 +1244,6 @@ static int btt_blk_init(struct btt *btt)
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
- btt->btt_disk->major = btt_major;
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
@@ -1423,22 +1420,11 @@ EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
static int __init nd_btt_init(void)
{
- int rc;
-
- btt_major = register_blkdev(0, "btt");
- if (btt_major < 0)
- return btt_major;
+ int rc = 0;
debugfs_root = debugfs_create_dir("btt", NULL);
- if (IS_ERR_OR_NULL(debugfs_root)) {
+ if (IS_ERR_OR_NULL(debugfs_root))
rc = -ENXIO;
- goto err_debugfs;
- }
-
- return 0;
-
- err_debugfs:
- unregister_blkdev(btt_major, "btt");
return rc;
}
@@ -1446,7 +1432,6 @@ static int __init nd_btt_init(void)
static void __exit nd_btt_exit(void)
{
debugfs_remove_recursive(debugfs_root);
- unregister_blkdev(btt_major, "btt");
}
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5d28e9405f32..19f822d7f652 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -62,7 +62,7 @@ static int nvdimm_bus_match(struct device *dev, struct device_driver *drv)
{
struct nd_device_driver *nd_drv = to_nd_device_driver(drv);
- return test_bit(to_nd_device_type(dev), &nd_drv->type);
+ return !!test_bit(to_nd_device_type(dev), &nd_drv->type);
}
static struct module *to_bus_provider(struct device *dev)
@@ -133,6 +133,78 @@ static int nvdimm_bus_remove(struct device *dev)
return rc;
}
+void nd_device_notify(struct device *dev, enum nvdimm_event event)
+{
+ device_lock(dev);
+ if (dev->driver) {
+ struct nd_device_driver *nd_drv;
+
+ nd_drv = to_nd_device_driver(dev->driver);
+ if (nd_drv->notify)
+ nd_drv->notify(dev, event);
+ }
+ device_unlock(dev);
+}
+EXPORT_SYMBOL(nd_device_notify);
+
+void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+
+ if (!nvdimm_bus)
+ return;
+
+ /* caller is responsible for holding a reference on the device */
+ nd_device_notify(&nd_region->dev, event);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_notify);
+
+long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
+ unsigned int len)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc;
+ struct nd_cmd_clear_error clear_err;
+ struct nd_cmd_ars_cap ars_cap;
+ u32 clear_err_unit, mask;
+ int cmd_rc, rc;
+
+ if (!nvdimm_bus)
+ return -ENXIO;
+
+ nd_desc = nvdimm_bus->nd_desc;
+ if (!nd_desc->ndctl)
+ return -ENXIO;
+
+ memset(&ars_cap, 0, sizeof(ars_cap));
+ ars_cap.address = phys;
+ ars_cap.length = len;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
+ sizeof(ars_cap), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ if (cmd_rc < 0)
+ return cmd_rc;
+ clear_err_unit = ars_cap.clear_err_unit;
+ if (!clear_err_unit || !is_power_of_2(clear_err_unit))
+ return -ENXIO;
+
+ mask = clear_err_unit - 1;
+ if ((phys | len) & mask)
+ return -ENXIO;
+ memset(&clear_err, 0, sizeof(clear_err));
+ clear_err.address = phys;
+ clear_err.length = len;
+ rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
+ sizeof(clear_err), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ if (cmd_rc < 0)
+ return cmd_rc;
+ return clear_err.cleared;
+}
+EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
+
static struct bus_type nvdimm_bus_type = {
.name = "nd",
.uevent = nvdimm_bus_uevent,
@@ -335,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
[ND_CMD_IMPLEMENTED] = { },
[ND_CMD_SMART] = {
.out_num = 2,
- .out_sizes = { 4, 8, },
+ .out_sizes = { 4, 128, },
},
[ND_CMD_SMART_THRESHOLD] = {
.out_num = 2,
@@ -395,6 +467,12 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
.out_num = 3,
.out_sizes = { 4, 4, UINT_MAX, },
},
+ [ND_CMD_CLEAR_ERROR] = {
+ .in_num = 2,
+ .in_sizes = { 8, 8, },
+ .out_num = 3,
+ .out_sizes = { 4, 4, 8, },
+ },
};
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
@@ -463,17 +541,37 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
} while (true);
}
+static int pmem_active(struct device *dev, void *data)
+{
+ if (is_nd_pmem(dev) && dev->driver)
+ return -EBUSY;
+ return 0;
+}
+
/* set_config requires an idle interleave set */
-static int nd_cmd_clear_to_send(struct nvdimm *nvdimm, unsigned int cmd)
+static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
+ struct nvdimm *nvdimm, unsigned int cmd)
{
- struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+
+ /* ask the bus provider if it would like to block this request */
+ if (nd_desc->clear_to_send) {
+ int rc = nd_desc->clear_to_send(nd_desc, nvdimm, cmd);
+
+ if (rc)
+ return rc;
+ }
+
+ /* require clear error to go through the pmem driver */
+ if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
+ return device_for_each_child(&nvdimm_bus->dev, NULL,
+ pmem_active);
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
return 0;
- nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
+ /* prevent label manipulation while the kernel owns label updates */
wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
-
if (atomic_read(&nvdimm->busy))
return -EBUSY;
return 0;
@@ -513,10 +611,11 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
/* fail write commands (when read-only) */
if (read_only)
- switch (ioctl_cmd) {
- case ND_IOCTL_VENDOR:
- case ND_IOCTL_SET_CONFIG_DATA:
- case ND_IOCTL_ARS_START:
+ switch (cmd) {
+ case ND_CMD_VENDOR:
+ case ND_CMD_SET_CONFIG_DATA:
+ case ND_CMD_ARS_START:
+ case ND_CMD_CLEAR_ERROR:
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
@@ -583,11 +682,11 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
nvdimm_bus_lock(&nvdimm_bus->dev);
- rc = nd_cmd_clear_to_send(nvdimm, cmd);
+ rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd);
if (rc)
goto out_unlock;
- rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len);
+ rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
if (rc < 0)
goto out_unlock;
if (copy_to_user(p, buf, buf_len))
@@ -602,14 +701,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long id = (long) file->private_data;
- int rc = -ENXIO, read_only;
+ int rc = -ENXIO, ro;
struct nvdimm_bus *nvdimm_bus;
- read_only = (O_RDWR != (file->f_flags & O_ACCMODE));
+ ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
if (nvdimm_bus->id == id) {
- rc = __nd_ioctl(nvdimm_bus, NULL, read_only, cmd, arg);
+ rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
break;
}
}
@@ -633,10 +732,10 @@ static int match_dimm(struct device *dev, void *data)
static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- int rc = -ENXIO, read_only;
+ int rc = -ENXIO, ro;
struct nvdimm_bus *nvdimm_bus;
- read_only = (O_RDWR != (file->f_flags & O_ACCMODE));
+ ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
struct device *dev = device_find_child(&nvdimm_bus->dev,
@@ -647,7 +746,7 @@ static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
continue;
nvdimm = to_nvdimm(dev);
- rc = __nd_ioctl(nvdimm_bus, nvdimm, read_only, cmd, arg);
+ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
put_device(dev);
break;
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 2e2832b83c93..182a93fe3712 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -298,6 +298,15 @@ static int flush_regions_dimms(struct device *dev, void *data)
static ssize_t wait_probe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ int rc;
+
+ if (nd_desc->flush_probe) {
+ rc = nd_desc->flush_probe(nd_desc);
+ if (rc)
+ return rc;
+ }
nd_synchronize();
device_for_each_child(dev, NULL, flush_regions_dimms);
return sprintf(buf, "1\n");
@@ -408,33 +417,11 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
set_badblock(bb, start_sector, num_sectors);
}
-/**
- * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks
- * @ndns: the namespace containing poison ranges
- * @bb: badblocks instance to populate
- * @offset: offset at the start of the namespace before 'sector 0'
- *
- * The poison list generated during NFIT initialization may contain multiple,
- * possibly overlapping ranges in the SPA (System Physical Address) space.
- * Compare each of these ranges to the namespace currently being initialized,
- * and add badblocks to the gendisk for all matching sub-ranges
- */
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
- struct badblocks *bb, resource_size_t offset)
+static void badblocks_populate(struct list_head *poison_list,
+ struct badblocks *bb, const struct resource *res)
{
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
- struct nvdimm_bus *nvdimm_bus;
- struct list_head *poison_list;
- u64 ns_start, ns_end, ns_size;
struct nd_poison *pl;
- ns_size = nvdimm_namespace_capacity(ndns) - offset;
- ns_start = nsio->res.start + offset;
- ns_end = nsio->res.end;
-
- nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent);
- poison_list = &nvdimm_bus->poison_list;
if (list_empty(poison_list))
return;
@@ -442,37 +429,68 @@ void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
u64 pl_end = pl->start + pl->length - 1;
/* Discard intervals with no intersection */
- if (pl_end < ns_start)
+ if (pl_end < res->start)
continue;
- if (pl->start > ns_end)
+ if (pl->start > res->end)
continue;
/* Deal with any overlap after start of the namespace */
- if (pl->start >= ns_start) {
+ if (pl->start >= res->start) {
u64 start = pl->start;
u64 len;
- if (pl_end <= ns_end)
+ if (pl_end <= res->end)
len = pl->length;
else
- len = ns_start + ns_size - pl->start;
- __add_badblock_range(bb, start - ns_start, len);
+ len = res->start + resource_size(res)
+ - pl->start;
+ __add_badblock_range(bb, start - res->start, len);
continue;
}
/* Deal with overlap for poison starting before the namespace */
- if (pl->start < ns_start) {
+ if (pl->start < res->start) {
u64 len;
- if (pl_end < ns_end)
- len = pl->start + pl->length - ns_start;
+ if (pl_end < res->end)
+ len = pl->start + pl->length - res->start;
else
- len = ns_size;
+ len = resource_size(res);
__add_badblock_range(bb, 0, len);
}
}
}
-EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison);
-static int __add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+/**
+ * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
+ * @region: parent region of the range to interrogate
+ * @bb: badblocks instance to populate
+ * @res: resource range to consider
+ *
+ * The poison list generated during bus initialization may contain
+ * multiple, possibly overlapping physical address ranges. Compare each
+ * of these ranges to the resource range currently being initialized,
+ * and add badblocks entries for all matching sub-ranges
+ */
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res)
+{
+ struct nvdimm_bus *nvdimm_bus;
+ struct list_head *poison_list;
+
+ if (!is_nd_pmem(&nd_region->dev)) {
+ dev_WARN_ONCE(&nd_region->dev, 1,
+ "%s only valid for pmem regions\n", __func__);
+ return;
+ }
+ nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+ poison_list = &nvdimm_bus->poison_list;
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ badblocks_populate(poison_list, bb, res);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+
+static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
struct nd_poison *pl;
@@ -487,12 +505,12 @@ static int __add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
return 0;
}
-int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
struct nd_poison *pl;
if (list_empty(&nvdimm_bus->poison_list))
- return __add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length);
/*
* There is a chance this is a duplicate, check for those first.
@@ -512,7 +530,18 @@ int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
- return __add_poison(nvdimm_bus, addr, length);
+ return add_poison(nvdimm_bus, addr, length);
+}
+
+int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
+{
+ int rc;
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ rc = bus_add_poison(nvdimm_bus, addr, length);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
@@ -553,7 +582,11 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
+
+ nvdimm_bus_lock(&nvdimm_bus->dev);
free_poison_list(&nvdimm_bus->poison_list);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
nvdimm_bus_destroy_ndctl(nvdimm_bus);
device_unregister(&nvdimm_bus->dev);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 651b8d19d324..c56f88217924 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -75,7 +75,7 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
memset(cmd, 0, sizeof(*cmd));
nd_desc = nvdimm_bus->nd_desc;
return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd));
+ ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
}
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
@@ -120,7 +120,7 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
cmd->in_offset = offset;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd));
+ cmd->in_length + sizeof(*cmd), NULL);
if (rc || cmd->status) {
rc = -ENXIO;
break;
@@ -171,7 +171,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
status = ((void *) cmd) + cmd_size - sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size);
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
if (rc || *status) {
rc = rc ? rc : -ENXIO;
break;
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index b0045a505dc8..95825b38559a 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
for (p = iomem_resource.child; p ; p = p->sibling) {
struct nd_region_desc ndr_desc;
- if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0)
+ if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY)
continue;
memset(&ndr_desc, 0, sizeof(ndr_desc));
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 9edf7eb7d17c..f5cb88601359 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -133,6 +133,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_io *nsio;
if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
return false;
@@ -143,6 +144,12 @@ bool pmem_should_map_pages(struct device *dev)
if (is_nd_pfn(dev) || is_nd_btt(dev))
return false;
+ nsio = to_nd_namespace_io(dev);
+ if (region_intersects(nsio->res.start, resource_size(&nsio->res),
+ IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED)
+ return false;
+
#ifdef ARCH_MEMREMAP_PMEM
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
#else
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ba1633b9da31..875c524fafb0 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/ndctl.h>
#include <linux/types.h>
+#include <linux/nd.h>
#include "label.h"
enum {
@@ -168,6 +169,7 @@ int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
void wait_nvdimm_bus_probe_idle(struct device *dev);
void nd_device_register(struct device *dev);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
+void nd_device_notify(struct device *dev, enum nvdimm_event event);
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
size_t len);
ssize_t nd_sector_size_show(unsigned long current_lbasize,
@@ -184,6 +186,8 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
+long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
+ unsigned int len);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
@@ -262,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
-void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
- struct badblocks *bb, resource_size_t offset);
+void nvdimm_badblocks_populate(struct nd_region *nd_region,
+ struct badblocks *bb, const struct resource *res);
int nd_blk_region_init(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index cc243754acef..8e343a3ca873 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -15,6 +15,7 @@
#define __NVDIMM_PFN_H
#include <linux/types.h>
+#include <linux/mmzone.h>
#define PFN_SIG_LEN 16
#define PFN_SIG "NVDIMM_PFN_INFO\0"
@@ -26,10 +27,28 @@ struct nd_pfn_sb {
__le32 flags;
__le16 version_major;
__le16 version_minor;
- __le64 dataoff;
+ __le64 dataoff; /* relative to namespace_base + start_pad */
__le64 npfns;
__le32 mode;
- u8 padding[4012];
+ /* minor-version-1 additions for section alignment */
+ __le32 start_pad;
+ __le32 end_trunc;
+ u8 padding[4004];
__le64 checksum;
};
+
+#ifdef CONFIG_SPARSEMEM
+#define PFN_SECTION_ALIGN_DOWN(x) SECTION_ALIGN_DOWN(x)
+#define PFN_SECTION_ALIGN_UP(x) SECTION_ALIGN_UP(x)
+#else
+/*
+ * In this case ZONE_DEVICE=n and we will disable 'pfn' device support,
+ * but we still want pmem to compile.
+ */
+#define PFN_SECTION_ALIGN_DOWN(x) (x)
+#define PFN_SECTION_ALIGN_UP(x) (x)
+#endif
+
+#define PHYS_SECTION_ALIGN_DOWN(x) PFN_PHYS(PFN_SECTION_ALIGN_DOWN(PHYS_PFN(x)))
+#define PHYS_SECTION_ALIGN_UP(x) PFN_PHYS(PFN_SECTION_ALIGN_UP(PHYS_PFN(x)))
#endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index ae81a2f1da50..e071e214feba 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -205,11 +205,67 @@ static ssize_t namespace_store(struct device *dev,
}
static DEVICE_ATTR_RW(namespace);
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ if (dev->driver) {
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ u64 offset = __le64_to_cpu(pfn_sb->dataoff);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+ rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
+ + start_pad + offset);
+ } else {
+ /* no address to convey if the pfn instance is disabled */
+ rc = -ENXIO;
+ }
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ if (dev->driver) {
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ u64 offset = __le64_to_cpu(pfn_sb->dataoff);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+ u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+ rc = sprintf(buf, "%llu\n", (unsigned long long)
+ resource_size(&nsio->res) - start_pad
+ - end_trunc - offset);
+ } else {
+ /* no size to convey if the pfn instance is disabled */
+ rc = -ENXIO;
+ }
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(size);
+
static struct attribute *nd_pfn_attributes[] = {
&dev_attr_mode.attr,
&dev_attr_namespace.attr,
&dev_attr_uuid.attr,
&dev_attr_align.attr,
+ &dev_attr_resource.attr,
+ &dev_attr_size.attr,
NULL,
};
@@ -299,6 +355,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
if (memcmp(pfn_sb->parent_uuid, parent_uuid, 16) != 0)
return -ENODEV;
+ if (__le16_to_cpu(pfn_sb->version_minor) < 1) {
+ pfn_sb->start_pad = 0;
+ pfn_sb->end_trunc = 0;
+ }
+
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
case PFN_MODE_PMEM:
@@ -315,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
} else {
/* from init we validate */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
- return -EINVAL;
+ return -ENODEV;
}
if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 8d0b54670184..f798899338ed 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -43,12 +43,13 @@ struct pmem_device {
phys_addr_t data_offset;
u64 pfn_flags;
void __pmem *virt_addr;
+ /* immutable base size of the namespace */
size_t size;
+ /* trim size when namespace capacity has been section aligned */
+ u32 pfn_pad;
struct badblocks bb;
};
-static int pmem_major;
-
static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
{
if (bb->count) {
@@ -62,26 +63,70 @@ static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
return false;
}
+static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+ unsigned int len)
+{
+ struct device *dev = disk_to_dev(pmem->pmem_disk);
+ sector_t sector;
+ long cleared;
+
+ sector = (offset - pmem->data_offset) / 512;
+ cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
+
+ if (cleared > 0 && cleared / 512) {
+ dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
+ __func__, (unsigned long long) sector,
+ cleared / 512, cleared / 512 > 1 ? "s" : "");
+ badblocks_clear(&pmem->bb, sector, cleared / 512);
+ }
+ invalidate_pmem(pmem->virt_addr + offset, len);
+}
+
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, int rw,
sector_t sector)
{
+ int rc = 0;
+ bool bad_pmem = false;
void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
+ bad_pmem = true;
+
if (rw == READ) {
- if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
- return -EIO;
- memcpy_from_pmem(mem + off, pmem_addr, len);
- flush_dcache_page(page);
+ if (unlikely(bad_pmem))
+ rc = -EIO;
+ else {
+ rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+ flush_dcache_page(page);
+ }
} else {
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
flush_dcache_page(page);
memcpy_to_pmem(pmem_addr, mem + off, len);
+ if (unlikely(bad_pmem)) {
+ pmem_clear_poison(pmem, pmem_off, len);
+ memcpy_to_pmem(pmem_addr, mem + off, len);
+ }
}
kunmap_atomic(mem);
- return 0;
+ return rc;
}
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
@@ -120,7 +165,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_disk->private_data;
int rc;
- rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
if (rw & WRITE)
wmb_pmem();
@@ -145,7 +190,7 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector,
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
- return pmem->size - offset;
+ return pmem->size - pmem->pfn_pad - offset;
}
static const struct block_device_operations pmem_fops = {
@@ -213,7 +258,9 @@ static void pmem_detach_disk(struct pmem_device *pmem)
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns, struct pmem_device *pmem)
{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
int nid = dev_to_node(dev);
+ struct resource bb_res;
struct gendisk *disk;
blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
@@ -228,21 +275,29 @@ static int pmem_attach_disk(struct device *dev,
return -ENOMEM;
}
- disk->major = pmem_major;
- disk->first_minor = 0;
disk->fops = &pmem_fops;
disk->private_data = pmem;
disk->queue = pmem->pmem_queue;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
disk->driverfs_dev = dev;
- set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
+ set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
+ / 512);
pmem->pmem_disk = disk;
devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
-
+ bb_res.start = nsio->res.start + pmem->data_offset;
+ bb_res.end = nsio->res.end;
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
+ bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+ }
+ nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
+ &bb_res);
disk->bb = &pmem->bb;
add_disk(disk);
revalidate_disk(disk);
@@ -265,7 +320,7 @@ static int pmem_rw_bytes(struct nd_namespace_common *ndns,
if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
return -EIO;
- memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
+ return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
} else {
memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
wmb_pmem();
@@ -279,6 +334,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = 0, end_trunc = 0;
+ resource_size_t start, size;
+ struct nd_namespace_io *nsio;
struct nd_region *nd_region;
unsigned long npfns;
phys_addr_t offset;
@@ -304,21 +362,56 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
}
memset(pfn_sb, 0, sizeof(*pfn_sb));
- npfns = (pmem->size - SZ_8K) / SZ_4K;
+
+ /*
+ * Check if pmem collides with 'System RAM' when section aligned and
+ * trim it accordingly
+ */
+ nsio = to_nd_namespace_io(&ndns->dev);
+ start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
+ size = resource_size(&nsio->res);
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+
+ start = nsio->res.start;
+ start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+ }
+
+ start = nsio->res.start;
+ size = PHYS_SECTION_ALIGN_UP(start + size) - start;
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+ size = resource_size(&nsio->res);
+ end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ }
+
+ if (start_pad + end_trunc)
+ dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_name(&ndns->dev), start_pad + end_trunc);
+
/*
* Note, we use 64 here for the standard size of struct page,
* debugging options may cause it to be larger in which case the
* implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap.
*/
+ start += start_pad;
+ npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
if (nd_pfn->mode == PFN_MODE_PMEM)
- offset = ALIGN(SZ_8K + 64 * npfns, nd_pfn->align);
+ offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+ - start;
else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(SZ_8K, nd_pfn->align);
+ offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
else
goto err;
- npfns = (pmem->size - offset) / SZ_4K;
+ if (offset + start_pad + end_trunc >= pmem->size) {
+ dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
+ dev_name(&ndns->dev));
+ goto err;
+ }
+
+ npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
pfn_sb->dataoff = cpu_to_le64(offset);
pfn_sb->npfns = cpu_to_le64(npfns);
@@ -326,6 +419,9 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
pfn_sb->version_major = cpu_to_le16(1);
+ pfn_sb->version_minor = cpu_to_le16(1);
+ pfn_sb->start_pad = cpu_to_le32(start_pad);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
@@ -356,41 +452,56 @@ static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
return 0;
}
-static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
+/*
+ * We hotplug memory at section granularity, pad the reserved area from
+ * the previous section base to the namespace base address.
+ */
+static unsigned long init_altmap_base(resource_size_t base)
+{
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ return PFN_SECTION_ALIGN_DOWN(base_pfn);
+}
+
+static unsigned long init_altmap_reserve(resource_size_t base)
+{
+ unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return reserve;
+}
+
+static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
{
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
- struct device *dev = &nd_pfn->dev;
- struct nd_region *nd_region;
- struct vmem_altmap *altmap;
- struct nd_pfn_sb *pfn_sb;
- struct pmem_device *pmem;
- struct request_queue *q;
- phys_addr_t offset;
int rc;
+ struct resource res;
+ struct request_queue *q;
+ struct pmem_device *pmem;
+ struct vmem_altmap *altmap;
+ struct device *dev = &nd_pfn->dev;
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+ u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ resource_size_t base = nsio->res.start + start_pad;
struct vmem_altmap __altmap = {
- .base_pfn = __phys_to_pfn(nsio->res.start),
- .reserve = __phys_to_pfn(SZ_8K),
+ .base_pfn = init_altmap_base(base),
+ .reserve = init_altmap_reserve(base),
};
- if (!nd_pfn->uuid || !nd_pfn->ndns)
- return -ENODEV;
-
- nd_region = to_nd_region(dev->parent);
- rc = nd_pfn_init(nd_pfn);
- if (rc)
- return rc;
-
- pfn_sb = nd_pfn->pfn_sb;
- offset = le64_to_cpu(pfn_sb->dataoff);
+ pmem = dev_get_drvdata(dev);
+ pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
+ pmem->pfn_pad = start_pad + end_trunc;
nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
if (nd_pfn->mode == PFN_MODE_RAM) {
- if (offset < SZ_8K)
+ if (pmem->data_offset < SZ_8K)
return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
altmap = NULL;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = (resource_size(&nsio->res) - offset)
+ nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ PAGE_SIZE;
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
@@ -398,7 +509,7 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
le64_to_cpu(nd_pfn->pfn_sb->npfns),
nd_pfn->npfns);
altmap = & __altmap;
- altmap->free = __phys_to_pfn(offset - SZ_8K);
+ altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
altmap->alloc = 0;
} else {
rc = -ENXIO;
@@ -406,10 +517,12 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
}
/* establish pfn range for lookup, and switch to direct map */
- pmem = dev_get_drvdata(dev);
q = pmem->pmem_queue;
+ memcpy(&res, &nsio->res, sizeof(res));
+ res.start += start_pad;
+ res.end -= end_trunc;
devm_memunmap(dev, (void __force *) pmem->virt_addr);
- pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res,
+ pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
&q->q_usage_counter, altmap);
pmem->pfn_flags |= PFN_MAP;
if (IS_ERR(pmem->virt_addr)) {
@@ -418,7 +531,6 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
}
/* attach pmem disk in "pfn-mode" */
- pmem->data_offset = offset;
rc = pmem_attach_disk(dev, ndns, pmem);
if (rc)
goto err;
@@ -427,6 +539,22 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
err:
nvdimm_namespace_detach_pfn(ndns);
return rc;
+
+}
+
+static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
+{
+ struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
+ int rc;
+
+ if (!nd_pfn->uuid || !nd_pfn->ndns)
+ return -ENODEV;
+
+ rc = nd_pfn_init(nd_pfn);
+ if (rc)
+ return rc;
+ /* we need a valid pfn_sb before we can init a vmem_altmap */
+ return __nvdimm_namespace_attach_pfn(nd_pfn);
}
static int nd_pmem_probe(struct device *dev)
@@ -450,7 +578,7 @@ static int nd_pmem_probe(struct device *dev)
ndns->rw_bytes = pmem_rw_bytes;
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
if (is_nd_btt(dev)) {
/* btt allocates its own request_queue */
@@ -488,12 +616,38 @@ static int nd_pmem_remove(struct device *dev)
return 0;
}
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+ struct pmem_device *pmem = dev_get_drvdata(dev);
+ struct nd_namespace_common *ndns = pmem->ndns;
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct resource res = {
+ .start = nsio->res.start + pmem->data_offset,
+ .end = nsio->res.end,
+ };
+
+ if (event != NVDIMM_REVALIDATE_POISON)
+ return;
+
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ res.start += __le32_to_cpu(pfn_sb->start_pad);
+ res.end -= __le32_to_cpu(pfn_sb->end_trunc);
+ }
+
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
+}
+
MODULE_ALIAS("pmem");
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
static struct nd_device_driver nd_pmem_driver = {
.probe = nd_pmem_probe,
.remove = nd_pmem_remove,
+ .notify = nd_pmem_notify,
.drv = {
.name = "nd_pmem",
},
@@ -502,26 +656,13 @@ static struct nd_device_driver nd_pmem_driver = {
static int __init pmem_init(void)
{
- int error;
-
- pmem_major = register_blkdev(0, "pmem");
- if (pmem_major < 0)
- return pmem_major;
-
- error = nd_driver_register(&nd_pmem_driver);
- if (error) {
- unregister_blkdev(pmem_major, "pmem");
- return error;
- }
-
- return 0;
+ return nd_driver_register(&nd_pmem_driver);
}
module_init(pmem_init);
static void pmem_exit(void)
{
driver_unregister(&nd_pmem_driver.drv);
- unregister_blkdev(pmem_major, "pmem");
}
module_exit(pmem_exit);
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 7da63eac78ee..4b7715e29cff 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -93,9 +93,21 @@ static int nd_region_remove(struct device *dev)
return 0;
}
+static int child_notify(struct device *dev, void *data)
+{
+ nd_device_notify(dev, *(enum nvdimm_event *) data);
+ return 0;
+}
+
+static void nd_region_notify(struct device *dev, enum nvdimm_event event)
+{
+ device_for_each_child(dev, &event, child_notify);
+}
+
static struct nd_device_driver nd_region_driver = {
.probe = nd_region_probe,
.remove = nd_region_remove,
+ .notify = nd_region_notify,
.drv = {
.name = "nd_region",
},
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b586d84f2518..c894841c6456 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,6 +1,10 @@
+config NVME_CORE
+ tristate
+
config BLK_DEV_NVME
tristate "NVM Express block device"
depends on PCI && BLOCK
+ select NVME_CORE
---help---
The NVM Express driver is for solid state drives directly
connected to the PCI or PCI Express bus. If you know you
@@ -11,7 +15,7 @@ config BLK_DEV_NVME
config BLK_DEV_NVME_SCSI
bool "SCSI emulation for NVMe device nodes"
- depends on BLK_DEV_NVME
+ depends on NVME_CORE
---help---
This adds support for the SG_IO ioctl on the NVMe character
and block devices nodes, as well a a translation for a small
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 51bf90871549..9a3ca892b4a7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,6 +1,8 @@
+obj-$(CONFIG_NVME_CORE) += nvme-core.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
-obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+nvme-core-y := core.o
+nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
+nvme-core-$(CONFIG_NVM) += lightnvm.o
-lightnvm-$(CONFIG_NVM) := lightnvm.o
-nvme-y += core.o pci.o $(lightnvm-y)
-nvme-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
+nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 03c46412fff4..643f457131c2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -33,6 +33,20 @@
#define NVME_MINORS (1U << MINORBITS)
+unsigned char admin_timeout = 60;
+module_param(admin_timeout, byte, 0644);
+MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
+EXPORT_SYMBOL_GPL(admin_timeout);
+
+unsigned char nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+EXPORT_SYMBOL_GPL(nvme_io_timeout);
+
+unsigned char shutdown_timeout = 5;
+module_param(shutdown_timeout, byte, 0644);
+MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
+
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -40,7 +54,7 @@ static int nvme_char_major;
module_param(nvme_char_major, int, 0);
static LIST_HEAD(nvme_ctrl_list);
-DEFINE_SPINLOCK(dev_list_lock);
+static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
@@ -72,11 +86,21 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
spin_lock(&dev_list_lock);
ns = disk->private_data;
- if (ns && !kref_get_unless_zero(&ns->kref))
- ns = NULL;
+ if (ns) {
+ if (!kref_get_unless_zero(&ns->kref))
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+ }
spin_unlock(&dev_list_lock);
return ns;
+
+fail_put_ns:
+ kref_put(&ns->kref, nvme_free_ns);
+fail:
+ spin_unlock(&dev_list_lock);
+ return NULL;
}
void nvme_requeue_req(struct request *req)
@@ -89,6 +113,7 @@ void nvme_requeue_req(struct request *req)
blk_mq_kick_requeue_list(req->q);
spin_unlock_irqrestore(req->q->queue_lock, flags);
}
+EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags)
@@ -108,17 +133,18 @@ struct request *nvme_alloc_request(struct request_queue *q,
req->cmd = (unsigned char *)cmd;
req->cmd_len = sizeof(struct nvme_command);
- req->special = (void *)0;
return req;
}
+EXPORT_SYMBOL_GPL(nvme_alloc_request);
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
+ struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ unsigned timeout)
{
struct request *req;
int ret;
@@ -128,6 +154,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ req->special = cqe;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -136,8 +163,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
blk_execute_rq(req->q, NULL, req, 0);
- if (result)
- *result = (u32)(uintptr_t)req->special;
ret = req->errors;
out:
blk_mq_free_request(req);
@@ -147,8 +172,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
}
+EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
@@ -156,6 +182,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
bool write = cmd->common.opcode & 1;
+ struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
struct request *req;
@@ -168,6 +195,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ req->special = &cqe;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -222,7 +250,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_execute_rq(req->q, disk, req, 0);
ret = req->errors;
if (result)
- *result = (u32)(uintptr_t)req->special;
+ *result = le32_to_cpu(cqe.result);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -303,6 +331,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
+ struct nvme_completion cqe;
+ int ret;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
@@ -310,13 +340,18 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ if (ret >= 0)
+ *result = le32_to_cpu(cqe.result);
+ return ret;
}
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
+ struct nvme_completion cqe;
+ int ret;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
@@ -324,7 +359,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ if (ret >= 0)
+ *result = le32_to_cpu(cqe.result);
+ return ret;
}
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
@@ -364,6 +402,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
*count = min(*count, nr_io_queues);
return 0;
}
+EXPORT_SYMBOL_GPL(nvme_set_queue_count);
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
@@ -504,7 +543,10 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -545,8 +587,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
static void nvme_config_discard(struct nvme_ns *ns)
{
+ struct nvme_ctrl *ctrl = ns->ctrl;
u32 logical_block_size = queue_logical_block_size(ns->queue);
- ns->queue->limits.discard_zeroes_data = 0;
+
+ if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
+ ns->queue->limits.discard_zeroes_data = 1;
+ else
+ ns->queue->limits.discard_zeroes_data = 0;
+
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
@@ -566,8 +614,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
- dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
- __func__, ns->ctrl->instance, ns->ns_id);
+ dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+ __func__);
return -ENODEV;
}
if (id->ncap == 0) {
@@ -577,7 +625,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
if (nvme_nvm_register(ns->queue, disk->disk_name)) {
- dev_warn(ns->ctrl->dev,
+ dev_warn(disk_to_dev(ns->disk),
"%s: LightNVM init failure\n", __func__);
kfree(id);
return -ENODEV;
@@ -750,7 +798,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Device not ready; aborting %s\n", enabled ?
"initialisation" : "reset");
return -ENODEV;
@@ -778,6 +826,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
return ret;
return nvme_wait_ready(ctrl, cap, false);
}
+EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
{
@@ -790,7 +839,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
int ret;
if (page_shift < dev_page_min) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
1 << dev_page_min, 1 << page_shift);
return -ENODEV;
@@ -809,6 +858,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
return ret;
return nvme_wait_ready(ctrl, cap, true);
}
+EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
@@ -831,7 +881,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Device shutdown incomplete; abort shutdown\n");
return -ENODEV;
}
@@ -839,6 +889,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q)
@@ -870,13 +921,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
- dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+ dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
return ret;
}
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
if (ret) {
- dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+ dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
return ret;
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
@@ -886,13 +937,15 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = nvme_identify_ctrl(ctrl, &id);
if (ret) {
- dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+ dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
return -EIO;
}
+ ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
+ ctrl->cntlid = le16_to_cpup(&id->cntlid);
memcpy(ctrl->serial, id->sn, sizeof(id->sn));
memcpy(ctrl->model, id->mn, sizeof(id->mn));
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
@@ -919,6 +972,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
kfree(id);
return 0;
}
+EXPORT_SYMBOL_GPL(nvme_init_identify);
static int nvme_dev_open(struct inode *inode, struct file *file)
{
@@ -965,13 +1019,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
- dev_warn(ctrl->dev,
+ dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
ret = -EINVAL;
goto out_unlock;
}
- dev_warn(ctrl->dev,
+ dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref);
mutex_unlock(&ctrl->namespaces_mutex);
@@ -997,7 +1051,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
- dev_warn(ctrl->dev, "resetting controller\n");
+ dev_warn(ctrl->device, "resetting controller\n");
return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
@@ -1028,6 +1082,30 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int serial_len = sizeof(ctrl->serial);
+ int model_len = sizeof(ctrl->model);
+
+ if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+ return sprintf(buf, "eui.%16phN\n", ns->uuid);
+
+ if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+ return sprintf(buf, "eui.%8phN\n", ns->eui);
+
+ while (ctrl->serial[serial_len - 1] == ' ')
+ serial_len--;
+ while (ctrl->model[model_len - 1] == ' ')
+ model_len--;
+
+ return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+ serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
+}
+static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
+
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1053,6 +1131,7 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
static struct attribute *nvme_ns_attrs[] = {
+ &dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_eui.attr,
&dev_attr_nsid.attr,
@@ -1081,7 +1160,7 @@ static const struct attribute_group nvme_ns_attr_group = {
.is_visible = nvme_attrs_are_visible,
};
-#define nvme_show_function(field) \
+#define nvme_show_str_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
@@ -1090,15 +1169,26 @@ static ssize_t field##_show(struct device *dev, \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-nvme_show_function(model);
-nvme_show_function(serial);
-nvme_show_function(firmware_rev);
+#define nvme_show_int_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sprintf(buf, "%d\n", ctrl->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_model.attr,
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
+ &dev_attr_cntlid.attr,
NULL
};
@@ -1308,6 +1398,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
}
+EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
@@ -1316,6 +1407,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
}
+EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
static DEFINE_IDA(nvme_instance_ida);
@@ -1347,13 +1439,14 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
}
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
- {
+{
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
list_del(&ctrl->node);
spin_unlock(&dev_list_lock);
}
+EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
static void nvme_free_ctrl(struct kref *kref)
{
@@ -1370,6 +1463,7 @@ void nvme_put_ctrl(struct nvme_ctrl *ctrl)
{
kref_put(&ctrl->kref, nvme_free_ctrl);
}
+EXPORT_SYMBOL_GPL(nvme_put_ctrl);
/*
* Initialize a NVMe controller structures. This needs to be called during
@@ -1394,14 +1488,13 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
MKDEV(nvme_char_major, ctrl->instance),
- dev, nvme_dev_attr_groups,
+ ctrl, nvme_dev_attr_groups,
"nvme%d", ctrl->instance);
if (IS_ERR(ctrl->device)) {
ret = PTR_ERR(ctrl->device);
goto out_release_instance;
}
get_device(ctrl->device);
- dev_set_drvdata(ctrl->device, ctrl);
ida_init(&ctrl->ns_ida);
spin_lock(&dev_list_lock);
@@ -1414,6 +1507,7 @@ out_release_instance:
out:
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
/**
* nvme_kill_queues(): Ends all namespace queues
@@ -1446,6 +1540,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_kill_queues);
void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
@@ -1462,6 +1557,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_stop_queues);
void nvme_start_queues(struct nvme_ctrl *ctrl)
{
@@ -1475,6 +1571,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_start_queues);
int __init nvme_core_init(void)
{
@@ -1514,3 +1611,8 @@ void nvme_core_exit(void)
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
}
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+module_init(nvme_core_init);
+module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6bb15e4926dc..9461dd639acd 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,6 +146,14 @@ struct nvme_nvm_command {
};
};
+struct nvme_nvm_completion {
+ __le64 result; /* Used by LightNVM to return ppa completions */
+ __le16 sq_head; /* how much of this queue may be reclaimed */
+ __le16 sq_id; /* submission queue that generated this entry */
+ __u16 command_id; /* of the command which completed */
+ __le16 status; /* did the command fail, and if so, why? */
+};
+
#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
__u16 num_pairs;
@@ -379,8 +387,31 @@ out:
return ret;
}
+static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
+ int nr_dst_blks, u8 *dst_blks,
+ int nr_src_blks, u8 *src_blks)
+{
+ int blk, offset, pl, blktype;
+
+ for (blk = 0; blk < nr_dst_blks; blk++) {
+ offset = blk * nvmdev->plane_mode;
+ blktype = src_blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < nvmdev->plane_mode; pl++) {
+ if (src_blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = src_blks[offset + pl];
+ break;
+ }
+ }
+
+ dst_blks[blk] = blktype;
+ }
+}
+
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_blocks, nvm_bb_update_fn *update_bbtbl,
+ int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
void *priv)
{
struct request_queue *q = nvmdev->q;
@@ -388,7 +419,9 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
+ u8 *dst_blks = NULL;
+ int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -399,6 +432,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
+ dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
+ if (!dst_blks) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
@@ -420,16 +459,21 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
ret = -EINVAL;
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_blocks);
+ le32_to_cpu(bb_tbl->tblks), nr_src_blks);
goto out;
}
+ nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
+ nr_src_blks, bb_tbl->blk);
+
ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
+ ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
+
out:
+ kfree(dst_blks);
kfree(bb_tbl);
return ret;
}
@@ -471,6 +515,10 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
static void nvme_nvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
+ struct nvme_nvm_completion *cqe = rq->special;
+
+ if (cqe)
+ rqd->ppa_status = le64_to_cpu(cqe->result);
nvm_end_io(rqd, error);
@@ -490,7 +538,8 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
if (IS_ERR(rq))
return -ENOMEM;
- cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct nvme_nvm_command) +
+ sizeof(struct nvme_nvm_completion), GFP_KERNEL);
if (!cmd) {
blk_mq_free_request(rq);
return -ENOMEM;
@@ -509,7 +558,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
rq->cmd = (unsigned char *)cmd;
rq->cmd_len = sizeof(struct nvme_nvm_command);
- rq->special = (void *)0;
+ rq->special = cmd + 1;
rq->end_io_data = rqd;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fb15ba5f5d19..f846da4eb338 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -59,6 +59,12 @@ enum nvme_quirks {
* correctly.
*/
NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
+
+ /*
+ * The controller deterministically returns O's on reads to discarded
+ * logical blocks.
+ */
+ NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
};
struct nvme_ctrl {
@@ -78,6 +84,7 @@ struct nvme_ctrl {
char serial[20];
char model[40];
char firmware_rev[8];
+ int cntlid;
u32 ctrl_config;
@@ -85,6 +92,7 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 stripe_size;
u16 oncs;
+ u16 vid;
atomic_t abort_limit;
u8 event_limit;
u8 vwc;
@@ -124,6 +132,7 @@ struct nvme_ns {
};
struct nvme_ctrl_ops {
+ struct module *module;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -255,7 +264,8 @@ void nvme_requeue_req(struct request *req);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
+ struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ unsigned timeout);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
@@ -273,8 +283,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
-extern spinlock_t dev_list_lock;
-
struct sg_io_hdr;
int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 680f5780750c..4fd733ff72b1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -27,7 +27,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
-#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -39,6 +38,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/t10-pi.h>
+#include <linux/timer.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/unaligned.h>
@@ -57,18 +57,6 @@
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
-unsigned char admin_timeout = 60;
-module_param(admin_timeout, byte, 0644);
-MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
-
-unsigned char nvme_io_timeout = 30;
-module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
-MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
-
-unsigned char shutdown_timeout = 5;
-module_param(shutdown_timeout, byte, 0644);
-MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -76,10 +64,7 @@ static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
-static LIST_HEAD(dev_list);
-static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
-static wait_queue_head_t nvme_kthread_wait;
struct nvme_dev;
struct nvme_queue;
@@ -92,7 +77,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct list_head node;
struct nvme_queue **queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
@@ -110,6 +94,8 @@ struct nvme_dev {
struct work_struct reset_work;
struct work_struct scan_work;
struct work_struct remove_work;
+ struct work_struct async_work;
+ struct timer_list watchdog_timer;
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
@@ -148,7 +134,6 @@ struct nvme_queue {
u32 __iomem *q_db;
u16 q_depth;
s16 cq_vector;
- u16 sq_head;
u16 sq_tail;
u16 cq_head;
u16 qid;
@@ -303,17 +288,20 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
u16 status = le16_to_cpu(cqe->status) >> 1;
u32 result = le32_to_cpu(cqe->result);
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
+ if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
++dev->ctrl.event_limit;
+ queue_work(nvme_workq, &dev->async_work);
+ }
+
if (status != NVME_SC_SUCCESS)
return;
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
- dev_info(dev->dev, "rescanning\n");
+ dev_info(dev->ctrl.device, "rescanning\n");
nvme_queue_scan(dev);
default:
- dev_warn(dev->dev, "async event result %08x\n", result);
+ dev_warn(dev->ctrl.device, "async event result %08x\n", result);
}
}
@@ -727,7 +715,7 @@ static void nvme_complete_rq(struct request *req)
}
if (unlikely(iod->aborted)) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"completing aborted command with status: %04x\n",
req->errors);
}
@@ -735,6 +723,13 @@ static void nvme_complete_rq(struct request *req)
blk_mq_end_request(req, error);
}
+/* We read the CQE phase first to check if the rest of the entry is valid */
+static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
+ u16 phase)
+{
+ return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
+}
+
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
{
u16 head, phase;
@@ -742,14 +737,10 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
head = nvmeq->cq_head;
phase = nvmeq->cq_phase;
- for (;;) {
+ while (nvme_cqe_valid(nvmeq, head, phase)) {
struct nvme_completion cqe = nvmeq->cqes[head];
- u16 status = le16_to_cpu(cqe.status);
struct request *req;
- if ((status & 1) != phase)
- break;
- nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
if (++head == nvmeq->q_depth) {
head = 0;
phase = !phase;
@@ -759,7 +750,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*tag = -1;
if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
- dev_warn(nvmeq->q_dmadev,
+ dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe.command_id, le16_to_cpu(cqe.sq_id));
continue;
@@ -778,11 +769,9 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
- u32 result = le32_to_cpu(cqe.result);
- req->special = (void *)(uintptr_t)result;
- }
- blk_mq_complete_request(req, status >> 1);
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+ memcpy(req->special, &cqe, sizeof(cqe));
+ blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
}
@@ -823,18 +812,16 @@ static irqreturn_t nvme_irq(int irq, void *data)
static irqreturn_t nvme_irq_check(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
- struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
- if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
- return IRQ_NONE;
- return IRQ_WAKE_THREAD;
+ if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ return IRQ_WAKE_THREAD;
+ return IRQ_NONE;
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
struct nvme_queue *nvmeq = hctx->driver_data;
- if ((le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
- nvmeq->cq_phase) {
+ if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
spin_lock_irq(&nvmeq->q_lock);
__nvme_process_cq(nvmeq, &tag);
spin_unlock_irq(&nvmeq->q_lock);
@@ -846,15 +833,22 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return 0;
}
-static void nvme_submit_async_event(struct nvme_dev *dev)
+static void nvme_async_event_work(struct work_struct *work)
{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+ struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
- __nvme_submit_cmd(dev->queues[0], &c);
+ spin_lock_irq(&nvmeq->q_lock);
+ while (dev->ctrl.event_limit > 0) {
+ c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
+ --dev->ctrl.event_limit;
+ __nvme_submit_cmd(nvmeq, &c);
+ }
+ spin_unlock_irq(&nvmeq->q_lock);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -924,12 +918,10 @@ static void abort_endio(struct request *req, int error)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
- u32 result = (u32)(uintptr_t)req->special;
u16 status = req->errors;
- dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+ dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
-
blk_mq_free_request(req);
}
@@ -948,7 +940,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* shutdown, so we return BLK_EH_HANDLED.
*/
if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
@@ -962,7 +954,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* returned to the driver, or if this is the admin queue.
*/
if (!nvmeq->qid || iod->aborted) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
@@ -988,8 +980,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
- dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ dev_warn(nvmeq->dev->ctrl.device,
+ "I/O %d QID %d timeout, aborting\n",
+ req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
BLK_MQ_REQ_NOWAIT);
@@ -1018,7 +1011,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
if (!blk_mq_request_started(req))
return;
- dev_dbg_ratelimited(nvmeq->q_dmadev,
+ dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
"Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
status = NVME_SC_ABORT_REQ;
@@ -1173,9 +1166,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq;
-
- /* make sure queue descriptor is set before queue count, for kthread */
- mb();
dev->queue_count++;
return nvmeq;
@@ -1360,53 +1350,31 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;
}
-static int nvme_kthread(void *data)
-{
- struct nvme_dev *dev, *next;
-
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock(&dev_list_lock);
- list_for_each_entry_safe(dev, next, &dev_list, node) {
- int i;
- u32 csts = readl(dev->bar + NVME_REG_CSTS);
-
- /*
- * Skip controllers currently under reset.
- */
- if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
- continue;
-
- if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
- csts & NVME_CSTS_CFS) {
- if (queue_work(nvme_workq, &dev->reset_work)) {
- dev_warn(dev->dev,
- "Failed status: %x, reset controller\n",
- readl(dev->bar + NVME_REG_CSTS));
- }
- continue;
- }
- for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq = dev->queues[i];
- if (!nvmeq)
- continue;
- spin_lock_irq(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
-
- while (i == 0 && dev->ctrl.event_limit > 0)
- nvme_submit_async_event(dev);
- spin_unlock_irq(&nvmeq->q_lock);
- }
+static void nvme_watchdog_timer(unsigned long data)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)data;
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+ /*
+ * Skip controllers currently under reset.
+ */
+ if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
+ ((csts & NVME_CSTS_CFS) ||
+ (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
+ if (queue_work(nvme_workq, &dev->reset_work)) {
+ dev_warn(dev->dev,
+ "Failed status: 0x%x, reset controller.\n",
+ csts);
}
- spin_unlock(&dev_list_lock);
- schedule_timeout(round_jiffies_relative(HZ));
+ return;
}
- return 0;
+
+ mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
}
static int nvme_create_io_queues(struct nvme_dev *dev)
{
- unsigned i;
+ unsigned i, max;
int ret = 0;
for (i = dev->queue_count; i <= dev->max_qid; i++) {
@@ -1416,7 +1384,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
}
}
- for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
+ max = min(dev->max_qid, dev->queue_count - 1);
+ for (i = dev->online_queues; i <= max; i++) {
ret = nvme_create_queue(dev->queues[i], i);
if (ret) {
nvme_free_queues(dev, i);
@@ -1507,9 +1476,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* access to the admin queue, as that might be only way to fix them up.
*/
if (result > 0) {
- dev_err(dev->dev, "Could not set queue count (%d)\n", result);
- nr_io_queues = 0;
- result = 0;
+ dev_err(dev->ctrl.device,
+ "Could not set queue count (%d)\n", result);
+ return 0;
}
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
@@ -1543,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* If we enable msix early due to not intx, disable it again before
* setting up the full range we need.
*/
- if (!pdev->irq)
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ else if (pdev->msix_enabled)
pci_disable_msix(pdev);
for (i = 0; i < nr_io_queues; i++)
@@ -1573,9 +1544,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
adminq->cq_vector = -1;
goto free_queues;
}
-
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, nr_io_queues + 1);
return nvme_create_io_queues(dev);
free_queues:
@@ -1709,7 +1677,13 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (blk_mq_alloc_tag_set(&dev->tagset))
return 0;
dev->ctrl.tagset = &dev->tagset;
+ } else {
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+
+ /* Free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
+
nvme_queue_scan(dev);
return 0;
}
@@ -1723,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev))
return result;
- dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
@@ -1736,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
}
/*
- * Some devices don't advertse INTx interrupts, pre-enable a single
- * MSIX vec for setup. We'll adjust this later.
+ * Some devices and/or platforms don't advertise or work with INTx
+ * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
+ * adjust this later.
*/
- if (!pdev->irq) {
- result = pci_enable_msix(pdev, dev->entry, 1);
- if (result < 0)
- goto disable;
+ if (pci_enable_msix(pdev, dev->entry, 1)) {
+ pci_enable_msi(pdev);
+ dev->entry[0].vector = pdev->irq;
+ }
+
+ if (!dev->entry[0].vector) {
+ result = -ENODEV;
+ goto disable;
}
cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1796,56 +1774,12 @@ static void nvme_pci_disable(struct nvme_dev *dev)
}
}
-static int nvme_dev_list_add(struct nvme_dev *dev)
-{
- bool start_thread = false;
-
- spin_lock(&dev_list_lock);
- if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
- start_thread = true;
- nvme_thread = NULL;
- }
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
- if (start_thread) {
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- wake_up_all(&nvme_kthread_wait);
- } else
- wait_event_killable(nvme_kthread_wait, nvme_thread);
-
- if (IS_ERR_OR_NULL(nvme_thread))
- return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-
- return 0;
-}
-
-/*
-* Remove the node from the device list and check
-* for whether or not we need to stop the nvme_thread.
-*/
-static void nvme_dev_list_remove(struct nvme_dev *dev)
-{
- struct task_struct *tmp = NULL;
-
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
- tmp = nvme_thread;
- nvme_thread = NULL;
- }
- spin_unlock(&dev_list_lock);
-
- if (tmp)
- kthread_stop(tmp);
-}
-
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
int i;
u32 csts = -1;
- nvme_dev_list_remove(dev);
+ del_timer_sync(&dev->watchdog_timer);
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(to_pci_dev(dev->dev))) {
@@ -1907,7 +1841,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
- dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
+ dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
kref_get(&dev->ctrl.kref);
nvme_dev_disable(dev, false);
@@ -1930,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ goto out;
+
set_bit(NVME_CTRL_RESETTING, &dev->flags);
result = nvme_pci_enable(dev);
@@ -1954,17 +1891,16 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+ queue_work(nvme_workq, &dev->async_work);
- result = nvme_dev_list_add(dev);
- if (result)
- goto out;
+ mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
/*
* Keep the controller around but remove all namespaces if we don't have
* any working I/O queue.
*/
if (dev->online_queues < 2) {
- dev_warn(dev->dev, "IO queues not created\n");
+ dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_remove_namespaces(&dev->ctrl);
} else {
nvme_start_queues(&dev->ctrl);
@@ -2032,6 +1968,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .module = THIS_MODULE,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -2089,10 +2026,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto free;
- INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+ INIT_WORK(&dev->async_work, nvme_async_event_work);
+ setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
+ (unsigned long)dev);
mutex_init(&dev->shutdown_lock);
init_completion(&dev->ioq_wait);
@@ -2105,6 +2044,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
+ dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
queue_work(nvme_workq, &dev->reset_work);
return 0;
@@ -2147,6 +2088,8 @@ static void nvme_remove(struct pci_dev *pdev)
set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->async_work);
+ flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
@@ -2192,7 +2135,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback.
*/
- dev_warn(&pdev->dev, "error detected: state:%d\n", state);
+ dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
@@ -2209,7 +2152,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "restart after slot reset\n");
+ dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
queue_work(nvme_workq, &dev->reset_work);
return PCI_ERS_RESULT_RECOVERED;
@@ -2232,7 +2175,8 @@ static const struct pci_error_handlers nvme_err_handler = {
static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
- .driver_data = NVME_QUIRK_STRIPE_SIZE, },
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
@@ -2257,34 +2201,20 @@ static int __init nvme_init(void)
{
int result;
- init_waitqueue_head(&nvme_kthread_wait);
-
nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!nvme_workq)
return -ENOMEM;
- result = nvme_core_init();
- if (result < 0)
- goto kill_workq;
-
result = pci_register_driver(&nvme_driver);
if (result)
- goto core_exit;
- return 0;
-
- core_exit:
- nvme_core_exit();
- kill_workq:
- destroy_workqueue(nvme_workq);
+ destroy_workqueue(nvme_workq);
return result;
}
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
- nvme_core_exit();
destroy_workqueue(nvme_workq);
- BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
_nvme_check_size();
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index bc4ea585b42e..ca52952d850f 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -25,9 +25,19 @@ config NVMEM_IMX_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-imx-ocotp.
+config NVMEM_LPC18XX_EEPROM
+ tristate "NXP LPC18XX EEPROM Memory Support"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ help
+ Say Y here to include support for NXP LPC18xx EEPROM memory found in
+ NXP LPC185x/3x and LPC435x/3x/2x/1x devices.
+ To compile this driver as a module, choose M here: the module
+ will be called nvmem_lpc18xx_eeprom.
+
config NVMEM_MXS_OCOTP
tristate "Freescale MXS On-Chip OTP Memory Support"
depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM
help
If you say Y here, you will get readonly access to the
One Time Programmable memory pages that are stored
@@ -36,9 +46,21 @@ config NVMEM_MXS_OCOTP
This driver can also be built as a module. If so, the module
will be called nvmem-mxs-ocotp.
+config MTK_EFUSE
+ tristate "Mediatek SoCs EFUSE support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This is a driver to access hardware related data like sensor
+ calibration, HDMI impedance etc.
+
+ This driver can also be built as a module. If so, the module
+ will be called efuse-mtk.
+
config QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
select REGMAP_MMIO
help
Say y here to enable QFPROM support. The QFPROM provides access
@@ -50,6 +72,7 @@ config QCOM_QFPROM
config ROCKCHIP_EFUSE
tristate "Rockchip eFuse Support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
help
This is a simple drive to dump specified values of Rockchip SoC
from eFuse, such as cpu-leakage.
@@ -71,6 +94,7 @@ config NVMEM_SUNXI_SID
config NVMEM_VF610_OCOTP
tristate "VF610 SoC OCOTP support"
depends on SOC_VF610 || COMPILE_TEST
+ depends on HAS_IOMEM
help
This is a driver for the 'OCOTP' peripheral available on Vybrid
devices like VF5xx and VF6xx.
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 95dde3f8f085..45ab1ae08fa9 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -8,8 +8,12 @@ nvmem_core-y := core.o
# Devices
obj-$(CONFIG_NVMEM_IMX_OCOTP) += nvmem-imx-ocotp.o
nvmem-imx-ocotp-y := imx-ocotp.o
+obj-$(CONFIG_NVMEM_LPC18XX_EEPROM) += nvmem_lpc18xx_eeprom.o
+nvmem_lpc18xx_eeprom-y := lpc18xx_eeprom.o
obj-$(CONFIG_NVMEM_MXS_OCOTP) += nvmem-mxs-ocotp.o
nvmem-mxs-ocotp-y := mxs-ocotp.o
+obj-$(CONFIG_MTK_EFUSE) += nvmem_mtk-efuse.o
+nvmem_mtk-efuse-y := mtk-efuse.o
obj-$(CONFIG_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 9d11d9837312..0de3d878c439 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -38,8 +38,13 @@ struct nvmem_device {
int users;
size_t size;
bool read_only;
+ int flags;
+ struct bin_attribute eeprom;
+ struct device *base_dev;
};
+#define FLAG_COMPAT BIT(0)
+
struct nvmem_cell {
const char *name;
int offset;
@@ -56,16 +61,26 @@ static DEFINE_IDA(nvmem_ida);
static LIST_HEAD(nvmem_cells);
static DEFINE_MUTEX(nvmem_cells_mutex);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key eeprom_lock_key;
+#endif
+
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvmem_device *nvmem = to_nvmem_device(dev);
+ struct device *dev;
+ struct nvmem_device *nvmem;
int rc;
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
/* Stop the user from reading */
if (pos >= nvmem->size)
return 0;
@@ -90,10 +105,16 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct nvmem_device *nvmem = to_nvmem_device(dev);
+ struct device *dev;
+ struct nvmem_device *nvmem;
int rc;
+ if (attr->private)
+ dev = attr->private;
+ else
+ dev = container_of(kobj, struct device, kobj);
+ nvmem = to_nvmem_device(dev);
+
/* Stop the user from writing */
if (pos >= nvmem->size)
return 0;
@@ -161,6 +182,53 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = {
NULL,
};
+/* default read/write permissions, root only */
+static struct bin_attribute bin_attr_rw_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = S_IWUSR | S_IRUSR,
+ },
+ .read = bin_attr_nvmem_read,
+ .write = bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
+ &bin_attr_rw_root_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_root_group = {
+ .bin_attrs = nvmem_bin_rw_root_attributes,
+};
+
+static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
+ &nvmem_bin_rw_root_group,
+ NULL,
+};
+
+/* read only permission, root only */
+static struct bin_attribute bin_attr_ro_root_nvmem = {
+ .attr = {
+ .name = "nvmem",
+ .mode = S_IRUSR,
+ },
+ .read = bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
+ &bin_attr_ro_root_nvmem,
+ NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_root_group = {
+ .bin_attrs = nvmem_bin_ro_root_attributes,
+};
+
+static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
+ &nvmem_bin_ro_root_group,
+ NULL,
+};
+
static void nvmem_release(struct device *dev)
{
struct nvmem_device *nvmem = to_nvmem_device(dev);
@@ -294,12 +362,51 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
return 0;
err:
- while (--i)
+ while (i--)
nvmem_cell_drop(cells[i]);
+ kfree(cells);
+
return rval;
}
+/*
+ * nvmem_setup_compat() - Create an additional binary entry in
+ * drivers sys directory, to be backwards compatible with the older
+ * drivers/misc/eeprom drivers.
+ */
+static int nvmem_setup_compat(struct nvmem_device *nvmem,
+ const struct nvmem_config *config)
+{
+ int rval;
+
+ if (!config->base_dev)
+ return -EINVAL;
+
+ if (nvmem->read_only)
+ nvmem->eeprom = bin_attr_ro_root_nvmem;
+ else
+ nvmem->eeprom = bin_attr_rw_root_nvmem;
+ nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom.size = nvmem->size;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ nvmem->eeprom.attr.key = &eeprom_lock_key;
+#endif
+ nvmem->eeprom.private = &nvmem->dev;
+ nvmem->base_dev = config->base_dev;
+
+ rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
+ if (rval) {
+ dev_err(&nvmem->dev,
+ "Failed to create eeprom binary file %d\n", rval);
+ return rval;
+ }
+
+ nvmem->flags |= FLAG_COMPAT;
+
+ return 0;
+}
+
/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
* Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
@@ -353,24 +460,37 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->read_only = of_property_read_bool(np, "read-only") |
config->read_only;
- nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups :
- nvmem_rw_dev_groups;
+ if (config->root_only)
+ nvmem->dev.groups = nvmem->read_only ?
+ nvmem_ro_root_dev_groups :
+ nvmem_rw_root_dev_groups;
+ else
+ nvmem->dev.groups = nvmem->read_only ?
+ nvmem_ro_dev_groups :
+ nvmem_rw_dev_groups;
device_initialize(&nvmem->dev);
dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
rval = device_add(&nvmem->dev);
- if (rval) {
- ida_simple_remove(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
+ if (rval)
+ goto out;
+
+ if (config->compat) {
+ rval = nvmem_setup_compat(nvmem, config);
+ if (rval)
+ goto out;
}
if (config->cells)
nvmem_add_cells(nvmem, config);
return nvmem;
+out:
+ ida_simple_remove(&nvmem_ida, nvmem->id);
+ kfree(nvmem);
+ return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(nvmem_register);
@@ -390,6 +510,9 @@ int nvmem_unregister(struct nvmem_device *nvmem)
}
mutex_unlock(&nvmem_mutex);
+ if (nvmem->flags & FLAG_COMPAT)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
+
nvmem_device_remove_all_cells(nvmem);
device_del(&nvmem->dev);
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index b7971d410b60..d7796eb5421f 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -51,7 +51,7 @@ static int imx_ocotp_read(void *context, const void *reg, size_t reg_size,
val += 4;
}
- return (i - index) * 4;
+ return 0;
}
static int imx_ocotp_write(void *context, const void *data, size_t count)
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
new file mode 100644
index 000000000000..878fce789341
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -0,0 +1,330 @@
+/*
+ * NXP LPC18xx/LPC43xx EEPROM memory NVMEM driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* Registers */
+#define LPC18XX_EEPROM_AUTOPROG 0x00c
+#define LPC18XX_EEPROM_AUTOPROG_WORD 0x1
+
+#define LPC18XX_EEPROM_CLKDIV 0x014
+
+#define LPC18XX_EEPROM_PWRDWN 0x018
+#define LPC18XX_EEPROM_PWRDWN_NO 0x0
+#define LPC18XX_EEPROM_PWRDWN_YES 0x1
+
+#define LPC18XX_EEPROM_INTSTAT 0xfe0
+#define LPC18XX_EEPROM_INTSTAT_END_OF_PROG BIT(2)
+
+#define LPC18XX_EEPROM_INTSTATCLR 0xfe8
+#define LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST BIT(2)
+
+/* Fixed page size (bytes) */
+#define LPC18XX_EEPROM_PAGE_SIZE 0x80
+
+/* EEPROM device requires a ~1500 kHz clock (min 800 kHz, max 1600 kHz) */
+#define LPC18XX_EEPROM_CLOCK_HZ 1500000
+
+/* EEPROM requires 3 ms of erase/program time between each writing */
+#define LPC18XX_EEPROM_PROGRAM_TIME 3
+
+struct lpc18xx_eeprom_dev {
+ struct clk *clk;
+ void __iomem *reg_base;
+ void __iomem *mem_base;
+ struct nvmem_device *nvmem;
+ unsigned reg_bytes;
+ unsigned val_bytes;
+};
+
+static struct regmap_config lpc18xx_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
+ u32 reg, u32 val)
+{
+ writel(val, eeprom->reg_base + reg);
+}
+
+static inline u32 lpc18xx_eeprom_readl(struct lpc18xx_eeprom_dev *eeprom,
+ u32 reg)
+{
+ return readl(eeprom->reg_base + reg);
+}
+
+static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
+{
+ unsigned long end;
+ u32 val;
+
+ /* Wait until EEPROM program operation has finished */
+ end = jiffies + msecs_to_jiffies(LPC18XX_EEPROM_PROGRAM_TIME * 10);
+
+ while (time_is_after_jiffies(end)) {
+ val = lpc18xx_eeprom_readl(eeprom, LPC18XX_EEPROM_INTSTAT);
+
+ if (val & LPC18XX_EEPROM_INTSTAT_END_OF_PROG) {
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_INTSTATCLR,
+ LPC18XX_EEPROM_INTSTATCLR_PROG_CLR_ST);
+ return 0;
+ }
+
+ usleep_range(LPC18XX_EEPROM_PROGRAM_TIME * USEC_PER_MSEC,
+ (LPC18XX_EEPROM_PROGRAM_TIME + 1) * USEC_PER_MSEC);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
+ size_t reg_size, const void *val,
+ size_t val_size)
+{
+ struct lpc18xx_eeprom_dev *eeprom = context;
+ unsigned int offset = *(u32 *)reg;
+ int ret;
+
+ if (offset % lpc18xx_regmap_config.reg_stride)
+ return -EINVAL;
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_NO);
+
+ /* Wait 100 us while the EEPROM wakes up */
+ usleep_range(100, 200);
+
+ while (val_size) {
+ writel(*(u32 *)val, eeprom->mem_base + offset);
+ ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
+ if (ret < 0)
+ return ret;
+
+ val_size -= eeprom->val_bytes;
+ val += eeprom->val_bytes;
+ offset += eeprom->val_bytes;
+ }
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ return 0;
+}
+
+static int lpc18xx_eeprom_write(void *context, const void *data, size_t count)
+{
+ struct lpc18xx_eeprom_dev *eeprom = context;
+ unsigned int offset = eeprom->reg_bytes;
+
+ if (count <= offset)
+ return -EINVAL;
+
+ return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes,
+ data + offset, count - offset);
+}
+
+static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct lpc18xx_eeprom_dev *eeprom = context;
+ unsigned int offset = *(u32 *)reg;
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_NO);
+
+ /* Wait 100 us while the EEPROM wakes up */
+ usleep_range(100, 200);
+
+ while (val_size) {
+ *(u32 *)val = readl(eeprom->mem_base + offset);
+ val_size -= eeprom->val_bytes;
+ val += eeprom->val_bytes;
+ offset += eeprom->val_bytes;
+ }
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ return 0;
+}
+
+static struct regmap_bus lpc18xx_eeprom_bus = {
+ .write = lpc18xx_eeprom_write,
+ .gather_write = lpc18xx_eeprom_gather_write,
+ .read = lpc18xx_eeprom_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg)
+{
+ /*
+ * The last page contains the EEPROM initialization data and is not
+ * writable.
+ */
+ return reg <= lpc18xx_regmap_config.max_register -
+ LPC18XX_EEPROM_PAGE_SIZE;
+}
+
+static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg)
+{
+ return reg <= lpc18xx_regmap_config.max_register;
+}
+
+static struct nvmem_config lpc18xx_nvmem_config = {
+ .name = "lpc18xx-eeprom",
+ .owner = THIS_MODULE,
+};
+
+static int lpc18xx_eeprom_probe(struct platform_device *pdev)
+{
+ struct lpc18xx_eeprom_dev *eeprom;
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ unsigned long clk_rate;
+ struct regmap *regmap;
+ struct resource *res;
+ int ret;
+
+ eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
+ if (!eeprom)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
+ eeprom->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(eeprom->reg_base))
+ return PTR_ERR(eeprom->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
+ eeprom->mem_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(eeprom->mem_base))
+ return PTR_ERR(eeprom->mem_base);
+
+ eeprom->clk = devm_clk_get(&pdev->dev, "eeprom");
+ if (IS_ERR(eeprom->clk)) {
+ dev_err(&pdev->dev, "failed to get eeprom clock\n");
+ return PTR_ERR(eeprom->clk);
+ }
+
+ ret = clk_prepare_enable(eeprom->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to prepare/enable eeprom clk: %d\n", ret);
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset: %ld\n", PTR_ERR(rst));
+ ret = PTR_ERR(rst);
+ goto err_clk;
+ }
+
+ ret = reset_control_assert(rst);
+ if (ret < 0) {
+ dev_err(dev, "failed to assert reset: %d\n", ret);
+ goto err_clk;
+ }
+
+ eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE;
+ eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE;
+
+ /*
+ * Clock rate is generated by dividing the system bus clock by the
+ * division factor, contained in the divider register (minus 1 encoded).
+ */
+ clk_rate = clk_get_rate(eeprom->clk);
+ clk_rate = DIV_ROUND_UP(clk_rate, LPC18XX_EEPROM_CLOCK_HZ) - 1;
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_CLKDIV, clk_rate);
+
+ /*
+ * Writing a single word to the page will start the erase/program cycle
+ * automatically
+ */
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_AUTOPROG,
+ LPC18XX_EEPROM_AUTOPROG_WORD);
+
+ lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
+ LPC18XX_EEPROM_PWRDWN_YES);
+
+ lpc18xx_regmap_config.max_register = resource_size(res) - 1;
+ lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg;
+ lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg;
+
+ regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom,
+ &lpc18xx_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap));
+ ret = PTR_ERR(regmap);
+ goto err_clk;
+ }
+
+ lpc18xx_nvmem_config.dev = dev;
+
+ eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
+ if (IS_ERR(eeprom->nvmem)) {
+ ret = PTR_ERR(eeprom->nvmem);
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, eeprom);
+
+ return 0;
+
+err_clk:
+ clk_disable_unprepare(eeprom->clk);
+
+ return ret;
+}
+
+static int lpc18xx_eeprom_remove(struct platform_device *pdev)
+{
+ struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = nvmem_unregister(eeprom->nvmem);
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(eeprom->clk);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_eeprom_of_match[] = {
+ { .compatible = "nxp,lpc1857-eeprom" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_eeprom_of_match);
+
+static struct platform_driver lpc18xx_eeprom_driver = {
+ .probe = lpc18xx_eeprom_probe,
+ .remove = lpc18xx_eeprom_remove,
+ .driver = {
+ .name = "lpc18xx-eeprom",
+ .of_match_table = lpc18xx_eeprom_of_match,
+ },
+};
+
+module_platform_driver(lpc18xx_eeprom_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx EEPROM memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
new file mode 100644
index 000000000000..9c49369beea5
--- /dev/null
+++ b/drivers/nvmem/mtk-efuse.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config mtk_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int mtk_efuse_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct nvmem_device *nvmem;
+ struct nvmem_config *econfig;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
+ if (!econfig)
+ return -ENOMEM;
+
+ mtk_regmap_config.max_register = resource_size(res) - 1;
+
+ regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(regmap);
+ }
+
+ econfig->dev = dev;
+ econfig->owner = THIS_MODULE;
+ nvmem = nvmem_register(econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int mtk_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id mtk_efuse_of_match[] = {
+ { .compatible = "mediatek,mt8173-efuse",},
+ { .compatible = "mediatek,efuse",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+
+static struct platform_driver mtk_efuse_driver = {
+ .probe = mtk_efuse_probe,
+ .remove = mtk_efuse_remove,
+ .driver = {
+ .name = "mediatek,efuse",
+ .of_match_table = mtk_efuse_of_match,
+ },
+};
+
+static int __init mtk_efuse_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&mtk_efuse_driver);
+ if (ret) {
+ pr_err("Failed to register efuse driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mtk_efuse_exit(void)
+{
+ return platform_driver_unregister(&mtk_efuse_driver);
+}
+
+subsys_initcall(mtk_efuse_init);
+module_exit(mtk_efuse_exit);
+
+MODULE_AUTHOR("Andrew-CT Chen <andrew-ct.chen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek EFUSE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index f55213424222..a009795111e9 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -14,16 +14,16 @@
* more details.
*/
-#include <linux/platform_device.h>
-#include <linux/nvmem-provider.h>
-#include <linux/slab.h>
-#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/delay.h>
+#include <linux/nvmem-provider.h>
+#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#define EFUSE_A_SHIFT 6
#define EFUSE_A_MASK 0x3ff
@@ -35,10 +35,10 @@
#define REG_EFUSE_CTRL 0x0000
#define REG_EFUSE_DOUT 0x0004
-struct rockchip_efuse_context {
+struct rockchip_efuse_chip {
struct device *dev;
void __iomem *base;
- struct clk *efuse_clk;
+ struct clk *clk;
};
static int rockchip_efuse_write(void *context, const void *data, size_t count)
@@ -52,34 +52,32 @@ static int rockchip_efuse_read(void *context,
void *val, size_t val_size)
{
unsigned int offset = *(u32 *)reg;
- struct rockchip_efuse_context *_context = context;
- void __iomem *base = _context->base;
- struct clk *clk = _context->efuse_clk;
+ struct rockchip_efuse_chip *efuse = context;
u8 *buf = val;
int ret;
- ret = clk_prepare_enable(clk);
+ ret = clk_prepare_enable(efuse->clk);
if (ret < 0) {
- dev_err(_context->dev, "failed to prepare/enable efuse clk\n");
+ dev_err(efuse->dev, "failed to prepare/enable efuse clk\n");
return ret;
}
- writel(EFUSE_LOAD | EFUSE_PGENB, base + REG_EFUSE_CTRL);
+ writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
udelay(1);
while (val_size) {
- writel(readl(base + REG_EFUSE_CTRL) &
+ writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
- base + REG_EFUSE_CTRL);
- writel(readl(base + REG_EFUSE_CTRL) |
+ efuse->base + REG_EFUSE_CTRL);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) |
((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
- base + REG_EFUSE_CTRL);
+ efuse->base + REG_EFUSE_CTRL);
udelay(1);
- writel(readl(base + REG_EFUSE_CTRL) |
- EFUSE_STROBE, base + REG_EFUSE_CTRL);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) |
+ EFUSE_STROBE, efuse->base + REG_EFUSE_CTRL);
udelay(1);
- *buf++ = readb(base + REG_EFUSE_DOUT);
- writel(readl(base + REG_EFUSE_CTRL) &
- (~EFUSE_STROBE), base + REG_EFUSE_CTRL);
+ *buf++ = readb(efuse->base + REG_EFUSE_DOUT);
+ writel(readl(efuse->base + REG_EFUSE_CTRL) &
+ (~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
udelay(1);
val_size -= 1;
@@ -87,9 +85,9 @@ static int rockchip_efuse_read(void *context,
}
/* Switch to standby mode */
- writel(EFUSE_PGENB | EFUSE_CSB, base + REG_EFUSE_CTRL);
+ writel(EFUSE_PGENB | EFUSE_CSB, efuse->base + REG_EFUSE_CTRL);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(efuse->clk);
return 0;
}
@@ -114,48 +112,44 @@ static struct nvmem_config econfig = {
};
static const struct of_device_id rockchip_efuse_match[] = {
- { .compatible = "rockchip,rockchip-efuse",},
+ { .compatible = "rockchip,rockchip-efuse", },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, rockchip_efuse_match);
static int rockchip_efuse_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct regmap *regmap;
- void __iomem *base;
- struct clk *clk;
- struct rockchip_efuse_context *context;
+ struct rockchip_efuse_chip *efuse;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
+ GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
- context = devm_kzalloc(dev, sizeof(struct rockchip_efuse_context),
- GFP_KERNEL);
- if (IS_ERR(context))
- return PTR_ERR(context);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ efuse->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(efuse->base))
+ return PTR_ERR(efuse->base);
- clk = devm_clk_get(dev, "pclk_efuse");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ efuse->clk = devm_clk_get(&pdev->dev, "pclk_efuse");
+ if (IS_ERR(efuse->clk))
+ return PTR_ERR(efuse->clk);
- context->dev = dev;
- context->base = base;
- context->efuse_clk = clk;
+ efuse->dev = &pdev->dev;
rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
- regmap = devm_regmap_init(dev, &rockchip_efuse_bus,
- context, &rockchip_efuse_regmap_config);
+ regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus,
+ efuse, &rockchip_efuse_regmap_config);
if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
+ dev_err(efuse->dev, "regmap init failed\n");
return PTR_ERR(regmap);
}
- econfig.dev = dev;
+
+ econfig.dev = efuse->dev;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index cfa3b85064dd..bc88b4084055 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -13,10 +13,8 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -27,7 +25,6 @@
#include <linux/slab.h>
#include <linux/random.h>
-
static struct nvmem_config econfig = {
.name = "sunxi-sid",
.read_only = true,
@@ -55,8 +52,8 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
}
static int sunxi_sid_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
{
struct sunxi_sid *sid = context;
unsigned int offset = *(u32 *)reg;
@@ -130,7 +127,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
- randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+ randomness = kzalloc(sizeof(u8) * (size), GFP_KERNEL);
if (!randomness) {
ret = -EINVAL;
goto err_unreg_nvmem;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 017dd94f16ea..b299de2b3afa 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1341,10 +1341,10 @@ EXPORT_SYMBOL_GPL(of_property_read_u64_array);
*
* The out_string pointer is modified only if a valid string can be decoded.
*/
-int of_property_read_string(struct device_node *np, const char *propname,
+int of_property_read_string(const struct device_node *np, const char *propname,
const char **out_string)
{
- struct property *prop = of_find_property(np, propname, NULL);
+ const struct property *prop = of_find_property(np, propname, NULL);
if (!prop)
return -EINVAL;
if (!prop->value)
@@ -1365,10 +1365,10 @@ EXPORT_SYMBOL_GPL(of_property_read_string);
* This function searches a string list property and returns the index
* of a specific string value.
*/
-int of_property_match_string(struct device_node *np, const char *propname,
+int of_property_match_string(const struct device_node *np, const char *propname,
const char *string)
{
- struct property *prop = of_find_property(np, propname, NULL);
+ const struct property *prop = of_find_property(np, propname, NULL);
size_t l;
int i;
const char *p, *end;
@@ -1404,10 +1404,11 @@ EXPORT_SYMBOL_GPL(of_property_match_string);
* Don't call this function directly. It is a utility helper for the
* of_property_read_string*() family of functions.
*/
-int of_property_read_string_helper(struct device_node *np, const char *propname,
- const char **out_strs, size_t sz, int skip)
+int of_property_read_string_helper(const struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz, int skip)
{
- struct property *prop = of_find_property(np, propname, NULL);
+ const struct property *prop = of_find_property(np, propname, NULL);
int l = 0, i = 0;
const char *p, *end;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 655f79db7899..3349d2aa6634 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -760,6 +760,16 @@ const void * __init of_flat_dt_match_machine(const void *default_match,
}
#ifdef CONFIG_BLK_DEV_INITRD
+#ifndef __early_init_dt_declare_initrd
+static void __early_init_dt_declare_initrd(unsigned long start,
+ unsigned long end)
+{
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(end);
+ initrd_below_start_ok = 1;
+}
+#endif
+
/**
* early_init_dt_check_for_initrd - Decode initrd location from flat tree
* @node: reference to node containing initrd location ('chosen')
@@ -782,9 +792,7 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
return;
end = of_read_number(prop, len/4);
- initrd_start = (unsigned long)__va(start);
- initrd_end = (unsigned long)__va(end);
- initrd_below_start_ok = 1;
+ __early_init_dt_declare_initrd(start, end);
pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
(unsigned long long)start, (unsigned long long)end);
@@ -796,14 +804,13 @@ static inline void early_init_dt_check_for_initrd(unsigned long node)
#endif /* CONFIG_BLK_DEV_INITRD */
#ifdef CONFIG_SERIAL_EARLYCON
-extern struct of_device_id __earlycon_of_table[];
static int __init early_init_dt_scan_chosen_serial(void)
{
int offset;
- const char *p;
+ const char *p, *q, *options = NULL;
int l;
- const struct of_device_id *match = __earlycon_of_table;
+ const struct earlycon_id *match;
const void *fdt = initial_boot_params;
offset = fdt_path_offset(fdt, "/chosen");
@@ -818,27 +825,26 @@ static int __init early_init_dt_scan_chosen_serial(void)
if (!p || !l)
return -ENOENT;
- /* Remove console options if present */
- l = strchrnul(p, ':') - p;
+ q = strchrnul(p, ':');
+ if (*q != '\0')
+ options = q + 1;
+ l = q - p;
/* Get the node specified by stdout-path */
offset = fdt_path_offset_namelen(fdt, p, l);
- if (offset < 0)
- return -ENODEV;
-
- while (match->compatible[0]) {
- u64 addr;
+ if (offset < 0) {
+ pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
+ return 0;
+ }
- if (fdt_node_check_compatible(fdt, offset, match->compatible)) {
- match++;
+ for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+ if (!match->compatible[0])
continue;
- }
- addr = fdt_translate_address(fdt, offset);
- if (addr == OF_BAD_ADDR)
- return -ENXIO;
+ if (fdt_node_check_compatible(fdt, offset, match->compatible))
+ continue;
- of_setup_earlycon(addr, match->data);
+ of_setup_earlycon(match, offset, options);
return 0;
}
return -ENODEV;
@@ -976,13 +982,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
}
#ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
+#endif
#ifndef MAX_MEMBLOCK_ADDR
#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
#endif
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
- const u64 phys_offset = __pa(PAGE_OFFSET);
+ const u64 phys_offset = MIN_MEMBLOCK_ADDR;
if (!PAGE_ALIGNED(base)) {
if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c
index 8d3dc6fbdb7a..dca8f9b93745 100644
--- a/drivers/of/fdt_address.c
+++ b/drivers/of/fdt_address.c
@@ -161,7 +161,7 @@ static int __init fdt_translate_one(const void *blob, int parent,
* that can be mapped to a cpu physical address). This is not really specified
* that way, but this is traditionally the way IBM at least do things
*/
-u64 __init fdt_translate_address(const void *blob, int node_offset)
+static u64 __init fdt_translate_address(const void *blob, int node_offset)
{
int parent, len;
const struct of_bus *bus, *pbus;
@@ -239,3 +239,12 @@ u64 __init fdt_translate_address(const void *blob, int node_offset)
bail:
return result;
}
+
+/**
+ * of_flat_dt_translate_address - translate DT addr into CPU phys addr
+ * @node: node in the flat blob
+ */
+u64 __init of_flat_dt_translate_address(unsigned long node)
+{
+ return fdt_translate_address(initial_boot_params, node);
+}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 365dc7e83ab4..8453f08d2ef4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -56,7 +56,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
else
phy = get_phy_device(mdio, addr, is_c45);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR_OR_NULL(phy))
return 1;
rc = irq_of_parse_and_map(child, 0);
@@ -98,7 +98,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
int rc;
mdiodev = mdio_device_create(mdio, addr);
- if (!mdiodev || IS_ERR(mdiodev))
+ if (IS_ERR(mdiodev))
return 1;
/* Associate the OF node with the device structure so it
@@ -211,7 +211,6 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
struct device_node *child;
- const __be32 *paddr;
bool scanphys = false;
int addr, rc;
@@ -246,8 +245,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
/* Skip PHYs with reg property set */
- paddr = of_get_property(child, "reg", NULL);
- if (paddr)
+ if (of_find_property(child, "reg", NULL))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -414,7 +412,7 @@ int of_phy_register_fixed_link(struct device_node *np)
if (strcmp(managed, "in-band-status") == 0) {
/* status is zeroed, namely its .link member */
phy = fixed_phy_register(PHY_POLL, &status, -1, np);
- return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+ return PTR_ERR_OR_ZERO(phy);
}
}
@@ -436,7 +434,7 @@ int of_phy_register_fixed_link(struct device_node *np)
return -EPROBE_DEFER;
phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np);
- return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+ return PTR_ERR_OR_ZERO(phy);
}
/* Old binding */
@@ -448,7 +446,7 @@ int of_phy_register_fixed_link(struct device_node *np)
status.pause = be32_to_cpu(fixed_link_prop[3]);
status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
phy = fixed_phy_register(PHY_POLL, &status, -1, np);
- return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+ return PTR_ERR_OR_ZERO(phy);
}
return -ENODEV;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index b1449f71601c..13f4fed38048 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -5,7 +5,6 @@
#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
static inline int __of_pci_pci_compare(struct device_node *node,
unsigned int data)
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 1a3556a9e9ea..ed01c0172e4a 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -32,11 +32,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
{
+ phys_addr_t base;
/*
* We use __memblock_alloc_base() because memblock_alloc_base()
* panic()s on allocation failure.
*/
- phys_addr_t base = __memblock_alloc_base(size, align, end);
+ end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
+ base = __memblock_alloc_base(size, align, end);
if (!base)
return -ENOMEM;
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 640eb4cb46e3..d313d492f278 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -36,12 +36,14 @@ static struct device_node *__of_find_node_by_full_name(struct device_node *node,
/* check */
if (of_node_cmp(node->full_name, full_name) == 0)
- return node;
+ return of_node_get(node);
for_each_child_of_node(node, child) {
found = __of_find_node_by_full_name(child, full_name);
- if (found != NULL)
+ if (found != NULL) {
+ of_node_put(child);
return found;
+ }
}
return NULL;
@@ -174,6 +176,7 @@ static int __of_adjust_phandle_ref(struct device_node *node,
if (of_prop_cmp(sprop->name, propstr) == 0)
break;
}
+ of_node_put(refnode);
if (!sprop) {
pr_err("%s: Could not find property '%s'\n",
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 979b6e415cea..e986e6ee52e0 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1165,6 +1165,11 @@ static void of_unittest_destroy_tracked_overlays(void)
continue;
ret = of_overlay_destroy(id + overlay_first_id);
+ if (ret == -ENODEV) {
+ pr_warn("%s: no overlay to destroy for #%d\n",
+ __func__, id + overlay_first_id);
+ continue;
+ }
if (ret != 0) {
defers++;
pr_warn("%s: overlay destroy failed for #%d\n",
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index b48ac6300c79..a0e5260bd006 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root_inode;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = OPROFILEFS_MAGIC;
sb->s_op = &s_ops;
sb->s_time_gran = 1;
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 592de566e72f..3a102a84d637 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -110,8 +110,6 @@ config IOMMU_HELPER
source "drivers/pcmcia/Kconfig"
-source "drivers/pci/hotplug/Kconfig"
-
endmenu
menu "PA-RISC specific drivers"
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index a656d9e83343..21905fef2cbf 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -91,7 +91,7 @@ static int configure_memory(const unsigned char *buf,
for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
c = get_8(buf+len);
- if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+ if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
int result;
res->name = name;
@@ -183,7 +183,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
c = get_8(buf+len);
- if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+ if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
res->name = board;
res->start = get_16(buf+len+1);
res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..209292e067d2 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -1,6 +1,9 @@
#
# PCI configuration
#
+
+source "drivers/pci/pcie/Kconfig"
+
config PCI_BUS_ADDR_T_64BIT
def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
depends on PCI
@@ -118,4 +121,12 @@ config PCI_LABEL
def_bool y if (DMI || ACPI)
select NLS
+config PCI_HYPERV
+ tristate "Hyper-V PCI Frontend"
+ depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
+source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/host/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index be3f631c3f75..2154092ddee8 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
# Some architectures use the generic PCI setup functions
#
obj-$(CONFIG_ALPHA) += setup-irq.o
+obj-$(CONFIG_ARC) += setup-irq.o
obj-$(CONFIG_ARM) += setup-irq.o
obj-$(CONFIG_ARM64) += setup-irq.o
obj-$(CONFIG_UNICORE32) += setup-irq.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 8c05b5ceeaec..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -174,38 +174,6 @@ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
}
EXPORT_SYMBOL(pci_bus_set_ops);
-/**
- * pci_read_vpd - Read one entry from Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to read
- * @buf: pointer to where to store result
- *
- */
-ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->read(dev, pos, count, buf);
-}
-EXPORT_SYMBOL(pci_read_vpd);
-
-/**
- * pci_write_vpd - Write entry to Vital Product Data
- * @dev: pci device struct
- * @pos: offset in vpd space
- * @count: number of bytes to write
- * @buf: buffer containing write data
- *
- */
-ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
-{
- if (!dev->vpd || !dev->vpd->ops)
- return -ENODEV;
- return dev->vpd->ops->write(dev, pos, count, buf);
-}
-EXPORT_SYMBOL(pci_write_vpd);
-
/*
* The following routines are to prevent the user from accessing PCI config
* space when it's unsafe to do so. Some devices require this during BIST and
@@ -277,15 +245,104 @@ PCI_USER_WRITE_CONFIG(dword, u32)
/* VPD access through PCI 2.2+ VPD capability */
-#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
-struct pci_vpd_pci22 {
- struct pci_vpd base;
- struct mutex lock;
- u16 flag;
- bool busy;
- u8 cap;
-};
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev: pci device struct
+ * @len: size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
+#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
+
+/**
+ * pci_vpd_size - determine actual size of Vital Product Data
+ * @dev: pci device struct
+ * @old_size: current assumed size, also maximum allowed size
+ */
+static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
+{
+ size_t off = 0;
+ unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
+
+ while (off < old_size &&
+ pci_read_vpd(dev, off, 1, header) == 1) {
+ unsigned char tag;
+
+ if (header[0] & PCI_VPD_LRDT) {
+ /* Large Resource Data Type Tag */
+ tag = pci_vpd_lrdt_tag(header);
+ /* Only read length from known tag items */
+ if ((tag == PCI_VPD_LTIN_ID_STRING) ||
+ (tag == PCI_VPD_LTIN_RO_DATA) ||
+ (tag == PCI_VPD_LTIN_RW_DATA)) {
+ if (pci_read_vpd(dev, off+1, 2,
+ &header[1]) != 2) {
+ dev_warn(&dev->dev,
+ "invalid large VPD tag %02x size at offset %zu",
+ tag, off + 1);
+ return 0;
+ }
+ off += PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(header);
+ }
+ } else {
+ /* Short Resource Data Type Tag */
+ off += PCI_VPD_SRDT_TAG_SIZE +
+ pci_vpd_srdt_size(header);
+ tag = pci_vpd_srdt_tag(header);
+ }
+
+ if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
+ return off;
+
+ if ((tag != PCI_VPD_LTIN_ID_STRING) &&
+ (tag != PCI_VPD_LTIN_RO_DATA) &&
+ (tag != PCI_VPD_LTIN_RW_DATA)) {
+ dev_warn(&dev->dev,
+ "invalid %s VPD tag %02x at offset %zu",
+ (header[0] & PCI_VPD_LRDT) ? "large" : "short",
+ tag, off);
+ return 0;
+ }
+ }
+ return 0;
+}
/*
* Wait for last operation to complete.
@@ -295,55 +352,71 @@ struct pci_vpd_pci22 {
*
* Returns 0 on success, negative values indicate error.
*/
-static int pci_vpd_pci22_wait(struct pci_dev *dev)
+static int pci_vpd_wait(struct pci_dev *dev)
{
- struct pci_vpd_pci22 *vpd =
- container_of(dev->vpd, struct pci_vpd_pci22, base);
- unsigned long timeout = jiffies + HZ/20 + 2;
+ struct pci_vpd *vpd = dev->vpd;
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ unsigned long max_sleep = 16;
u16 status;
int ret;
if (!vpd->busy)
return 0;
- for (;;) {
+ while (time_before(jiffies, timeout)) {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
return ret;
if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
- vpd->busy = false;
+ vpd->busy = 0;
return 0;
}
- if (time_after(jiffies, timeout)) {
- dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
- return -ETIMEDOUT;
- }
if (fatal_signal_pending(current))
return -EINTR;
- if (!cond_resched())
- udelay(10);
+
+ usleep_range(10, max_sleep);
+ if (max_sleep < 1024)
+ max_sleep *= 2;
}
+
+ dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
+ return -ETIMEDOUT;
}
-static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
+static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
{
- struct pci_vpd_pci22 *vpd =
- container_of(dev->vpd, struct pci_vpd_pci22, base);
+ struct pci_vpd *vpd = dev->vpd;
int ret;
loff_t end = pos + count;
u8 *buf = arg;
- if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
+ if (pos < 0)
return -EINVAL;
+ if (!vpd->valid) {
+ vpd->valid = 1;
+ vpd->len = pci_vpd_size(dev, vpd->len);
+ }
+
+ if (vpd->len == 0)
+ return -EIO;
+
+ if (pos > vpd->len)
+ return 0;
+
+ if (end > vpd->len) {
+ end = vpd->len;
+ count = end - pos;
+ }
+
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_pci22_wait(dev);
+ ret = pci_vpd_wait(dev);
if (ret < 0)
goto out;
@@ -355,9 +428,9 @@ static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
pos & ~3);
if (ret < 0)
break;
- vpd->busy = true;
+ vpd->busy = 1;
vpd->flag = PCI_VPD_ADDR_F;
- ret = pci_vpd_pci22_wait(dev);
+ ret = pci_vpd_wait(dev);
if (ret < 0)
break;
@@ -380,22 +453,32 @@ out:
return ret ? ret : count;
}
-static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
+static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
{
- struct pci_vpd_pci22 *vpd =
- container_of(dev->vpd, struct pci_vpd_pci22, base);
+ struct pci_vpd *vpd = dev->vpd;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
- if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
+ if (pos < 0 || (pos & 3) || (count & 3))
+ return -EINVAL;
+
+ if (!vpd->valid) {
+ vpd->valid = 1;
+ vpd->len = pci_vpd_size(dev, vpd->len);
+ }
+
+ if (vpd->len == 0)
+ return -EIO;
+
+ if (end > vpd->len)
return -EINVAL;
if (mutex_lock_killable(&vpd->lock))
return -EINTR;
- ret = pci_vpd_pci22_wait(dev);
+ ret = pci_vpd_wait(dev);
if (ret < 0)
goto out;
@@ -415,9 +498,9 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
if (ret < 0)
break;
- vpd->busy = true;
+ vpd->busy = 1;
vpd->flag = 0;
- ret = pci_vpd_pci22_wait(dev);
+ ret = pci_vpd_wait(dev);
if (ret < 0)
break;
@@ -428,15 +511,23 @@ out:
return ret ? ret : count;
}
-static void pci_vpd_pci22_release(struct pci_dev *dev)
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
{
- kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
+ struct pci_vpd *vpd = dev->vpd;
+
+ if (len == 0 || len > PCI_VPD_MAX_SIZE)
+ return -EIO;
+
+ vpd->valid = 1;
+ vpd->len = len;
+
+ return 0;
}
-static const struct pci_vpd_ops pci_vpd_pci22_ops = {
- .read = pci_vpd_pci22_read,
- .write = pci_vpd_pci22_write,
- .release = pci_vpd_pci22_release,
+static const struct pci_vpd_ops pci_vpd_ops = {
+ .read = pci_vpd_read,
+ .write = pci_vpd_write,
+ .set_size = pci_vpd_set_size,
};
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -469,15 +560,29 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
return ret;
}
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ int ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_set_vpd_size(tdev, len);
+ pci_dev_put(tdev);
+ return ret;
+}
+
static const struct pci_vpd_ops pci_vpd_f0_ops = {
.read = pci_vpd_f0_read,
.write = pci_vpd_f0_write,
- .release = pci_vpd_pci22_release,
+ .set_size = pci_vpd_f0_set_size,
};
-int pci_vpd_pci22_init(struct pci_dev *dev)
+int pci_vpd_init(struct pci_dev *dev)
{
- struct pci_vpd_pci22 *vpd;
+ struct pci_vpd *vpd;
u8 cap;
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
@@ -488,18 +593,24 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
if (!vpd)
return -ENOMEM;
- vpd->base.len = PCI_VPD_PCI22_SIZE;
+ vpd->len = PCI_VPD_MAX_SIZE;
if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
- vpd->base.ops = &pci_vpd_f0_ops;
+ vpd->ops = &pci_vpd_f0_ops;
else
- vpd->base.ops = &pci_vpd_pci22_ops;
+ vpd->ops = &pci_vpd_ops;
mutex_init(&vpd->lock);
vpd->cap = cap;
- vpd->busy = false;
- dev->vpd = &vpd->base;
+ vpd->busy = 0;
+ vpd->valid = 0;
+ dev->vpd = vpd;
return 0;
}
+void pci_vpd_release(struct pci_dev *dev)
+{
+ kfree(dev->vpd);
+}
+
/**
* pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 89b3befc7155..6c9f5467bc5f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -271,6 +271,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
+void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
+
/**
* pci_bus_add_device - start driver for a single device
* @dev: device to add
@@ -285,13 +287,19 @@ void pci_bus_add_device(struct pci_dev *dev)
* Can not put in pci_device_add yet because resources
* are not assigned yet for some devices.
*/
+ pcibios_bus_add_device(dev);
pci_fixup_device(pci_fixup_final, dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
dev->match_driver = true;
retval = device_attach(&dev->dev);
- WARN_ON(retval < 0);
+ if (retval < 0) {
+ dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+ return;
+ }
dev->is_added = 1;
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d1cdd9c992ac..7a0780d56d2d 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,7 +5,6 @@ config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
- depends on BROKEN
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
@@ -17,6 +16,28 @@ config PCI_MVEBU
depends on ARM
depends on OF
+
+config PCIE_XILINX_NWL
+ bool "NWL PCIe Core"
+ depends on ARCH_ZYNQMP
+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI
+ help
+ Say 'Y' here if you want kernel support for Xilinx
+ NWL PCIe controller. The controller can act as Root Port
+ or End Point. The current option selection will only
+ support root port enabling.
+
+config PCIE_DW_PLAT
+ bool "Platform bus based DesignWare PCIe Controller"
+ select PCIE_DW
+ ---help---
+ This selects the DesignWare PCIe controller support. Select this if
+ you have a PCIe controller on Platform bus.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config PCIE_DW
bool
@@ -42,7 +63,7 @@ config PCI_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
depends on ARM
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
@@ -50,13 +71,17 @@ config PCI_RCAR_GEN2
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
- depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+config PCI_HOST_COMMON
+ bool
+
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on (ARM || ARM64) && OF
+ select PCI_HOST_COMMON
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -82,7 +107,7 @@ config PCI_KEYSTONE
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
- depends on ARCH_ZYNQ
+ depends on ARCH_ZYNQ || MICROBLAZE
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@@ -192,4 +217,18 @@ config PCIE_QCOM
PCIe controller uses the Designware core plus Qualcomm-specific
hardware wrappers.
+config PCI_HOST_THUNDER_PEM
+ bool "Cavium Thunder PCIe controller to off-chip devices"
+ depends on OF && ARM64
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
+
+config PCI_HOST_THUNDER_ECAM
+ bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
+ depends on OF && ARM64
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 7b2f20c6ccc6..d85b5faf9bbc 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,15 +1,19 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
@@ -22,3 +26,5 @@ obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
+obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 923607bdabc5..2ca3a1f30ebf 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -108,7 +107,6 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
u32 reg;
- unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link is already up\n");
@@ -119,14 +117,7 @@ static int dra7xx_pcie_establish_link(struct pcie_port *pp)
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- for (retries = 0; retries < 1000; retries++) {
- if (dw_pcie_link_up(pp))
- return 0;
- usleep_range(10, 20);
- }
-
- dev_err(pp->dev, "link is not up\n");
- return -EINVAL;
+ return dw_pcie_wait_for_link(pp);
}
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
index d997d22d4231..219976103efc 100644
--- a/drivers/pci/host/pci-exynos.c
+++ b/drivers/pci/host/pci-exynos.c
@@ -318,7 +318,6 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val;
- unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
@@ -357,13 +356,8 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
PCIE_APP_LTSSM_ENABLE);
/* check if the link is up or not */
- for (retries = 0; retries < 10; retries++) {
- if (dw_pcie_link_up(pp)) {
- dev_info(pp->dev, "Link up\n");
- return 0;
- }
- mdelay(100);
- }
+ if (!dw_pcie_wait_for_link(pp))
+ return 0;
while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED);
@@ -372,8 +366,7 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
/* power off phy */
exynos_pcie_power_off_phy(pp);
- dev_err(pp->dev, "PCIe Link Fail\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
new file mode 100644
index 000000000000..e9f850f07968
--- /dev/null
+++ b/drivers/pci/host/pci-host-common.c
@@ -0,0 +1,194 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include "pci-host-common.h"
+
+static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
+{
+ pci_free_resource_list(&pci->resources);
+}
+
+static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+{
+ int err, res_valid = 0;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+ resource_size_t iobase;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
+ &iobase);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, &pci->resources) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = pci_remap_iospace(res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ case IORESOURCE_BUS:
+ pci->cfg.bus_range = res;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+}
+
+static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+{
+ int err;
+ u8 bus_max;
+ resource_size_t busn;
+ struct resource *bus_range;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
+
+ err = of_address_to_resource(np, 0, &pci->cfg.res);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ /* Limit the bus-range to fit within reg */
+ bus_max = pci->cfg.bus_range->start +
+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
+ pci->cfg.bus_range->end = min_t(resource_size_t,
+ pci->cfg.bus_range->end, bus_max);
+
+ pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
+ sizeof(*pci->cfg.win), GFP_KERNEL);
+ if (!pci->cfg.win)
+ return -ENOMEM;
+
+ /* Map our Configuration Space windows */
+ if (!devm_request_mem_region(dev, pci->cfg.res.start,
+ resource_size(&pci->cfg.res),
+ "Configuration Space"))
+ return -ENOMEM;
+
+ bus_range = pci->cfg.bus_range;
+ for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
+ u32 idx = busn - bus_range->start;
+
+ pci->cfg.win[idx] = devm_ioremap(dev,
+ pci->cfg.res.start + idx * sz,
+ sz);
+ if (!pci->cfg.win[idx])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int pci_host_common_probe(struct platform_device *pdev,
+ struct gen_pci *pci)
+{
+ int err;
+ const char *type;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_bus *bus, *child;
+
+ type = of_get_property(np, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ of_pci_check_probe_only();
+
+ pci->host.dev.parent = dev;
+ INIT_LIST_HEAD(&pci->host.windows);
+ INIT_LIST_HEAD(&pci->resources);
+
+ /* Parse our PCI ranges and request their resources */
+ err = gen_pci_parse_request_of_pci_ranges(pci);
+ if (err)
+ return err;
+
+ /* Parse and map our Configuration Space windows */
+ err = gen_pci_parse_map_cfg_windows(pci);
+ if (err) {
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+ }
+
+ /* Do not reassign resources if probe only */
+ if (!pci_has_flag(PCI_PROBE_ONLY))
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+
+
+ bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
+ &pci->cfg.ops->ops, pci, &pci->resources);
+ if (!bus) {
+ dev_err(dev, "Scanning rootbus failed");
+ return -ENODEV;
+ }
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ }
+
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+MODULE_DESCRIPTION("Generic PCI host driver common code");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-host-common.h b/drivers/pci/host/pci-host-common.h
new file mode 100644
index 000000000000..09f3fa0a55d7
--- /dev/null
+++ b/drivers/pci/host/pci-host-common.h
@@ -0,0 +1,47 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+struct gen_pci_cfg_bus_ops {
+ u32 bus_shift;
+ struct pci_ops ops;
+};
+
+struct gen_pci_cfg_windows {
+ struct resource res;
+ struct resource *bus_range;
+ void __iomem **win;
+
+ struct gen_pci_cfg_bus_ops *ops;
+};
+
+struct gen_pci {
+ struct pci_host_bridge host;
+ struct gen_pci_cfg_windows cfg;
+ struct list_head resources;
+};
+
+int pci_host_common_probe(struct platform_device *pdev,
+ struct gen_pci *pci);
+
+#endif /* _PCI_HOST_COMMON_H */
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index 1652bc70b145..e8aa78faa16d 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -25,24 +25,7 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
-struct gen_pci_cfg_bus_ops {
- u32 bus_shift;
- struct pci_ops ops;
-};
-
-struct gen_pci_cfg_windows {
- struct resource res;
- struct resource *bus_range;
- void __iomem **win;
-
- struct gen_pci_cfg_bus_ops *ops;
-};
-
-struct gen_pci {
- struct pci_host_bridge host;
- struct gen_pci_cfg_windows cfg;
- struct list_head resources;
-};
+#include "pci-host-common.h"
static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
unsigned int devfn,
@@ -93,175 +76,19 @@ static const struct of_device_id gen_pci_of_match[] = {
};
MODULE_DEVICE_TABLE(of, gen_pci_of_match);
-static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
-{
- pci_free_resource_list(&pci->resources);
-}
-
-static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
-{
- int err, res_valid = 0;
- struct device *dev = pci->host.dev.parent;
- struct device_node *np = dev->of_node;
- resource_size_t iobase;
- struct resource_entry *win;
-
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
- &iobase);
- if (err)
- return err;
-
- resource_list_for_each_entry(win, &pci->resources) {
- struct resource *parent, *res = win->res;
-
- switch (resource_type(res)) {
- case IORESOURCE_IO:
- parent = &ioport_resource;
- err = pci_remap_iospace(res, iobase);
- if (err) {
- dev_warn(dev, "error %d: failed to map resource %pR\n",
- err, res);
- continue;
- }
- break;
- case IORESOURCE_MEM:
- parent = &iomem_resource;
- res_valid |= !(res->flags & IORESOURCE_PREFETCH);
- break;
- case IORESOURCE_BUS:
- pci->cfg.bus_range = res;
- default:
- continue;
- }
-
- err = devm_request_resource(dev, parent, res);
- if (err)
- goto out_release_res;
- }
-
- if (!res_valid) {
- dev_err(dev, "non-prefetchable memory resource required\n");
- err = -EINVAL;
- goto out_release_res;
- }
-
- return 0;
-
-out_release_res:
- gen_pci_release_of_pci_ranges(pci);
- return err;
-}
-
-static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
-{
- int err;
- u8 bus_max;
- resource_size_t busn;
- struct resource *bus_range;
- struct device *dev = pci->host.dev.parent;
- struct device_node *np = dev->of_node;
- u32 sz = 1 << pci->cfg.ops->bus_shift;
-
- err = of_address_to_resource(np, 0, &pci->cfg.res);
- if (err) {
- dev_err(dev, "missing \"reg\" property\n");
- return err;
- }
-
- /* Limit the bus-range to fit within reg */
- bus_max = pci->cfg.bus_range->start +
- (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
- pci->cfg.bus_range->end = min_t(resource_size_t,
- pci->cfg.bus_range->end, bus_max);
-
- pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
- sizeof(*pci->cfg.win), GFP_KERNEL);
- if (!pci->cfg.win)
- return -ENOMEM;
-
- /* Map our Configuration Space windows */
- if (!devm_request_mem_region(dev, pci->cfg.res.start,
- resource_size(&pci->cfg.res),
- "Configuration Space"))
- return -ENOMEM;
-
- bus_range = pci->cfg.bus_range;
- for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
- u32 idx = busn - bus_range->start;
-
- pci->cfg.win[idx] = devm_ioremap(dev,
- pci->cfg.res.start + idx * sz,
- sz);
- if (!pci->cfg.win[idx])
- return -ENOMEM;
- }
-
- return 0;
-}
-
static int gen_pci_probe(struct platform_device *pdev)
{
- int err;
- const char *type;
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ const struct of_device_id *of_id;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- struct pci_bus *bus, *child;
if (!pci)
return -ENOMEM;
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
- of_pci_check_probe_only();
-
- of_id = of_match_node(gen_pci_of_match, np);
+ of_id = of_match_node(gen_pci_of_match, dev->of_node);
pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
- pci->host.dev.parent = dev;
- INIT_LIST_HEAD(&pci->host.windows);
- INIT_LIST_HEAD(&pci->resources);
-
- /* Parse our PCI ranges and request their resources */
- err = gen_pci_parse_request_of_pci_ranges(pci);
- if (err)
- return err;
-
- /* Parse and map our Configuration Space windows */
- err = gen_pci_parse_map_cfg_windows(pci);
- if (err) {
- gen_pci_release_of_pci_ranges(pci);
- return err;
- }
-
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
-
-
- bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
- &pci->cfg.ops->ops, pci, &pci->resources);
- if (!bus) {
- dev_err(dev, "Scanning rootbus failed");
- return -ENODEV;
- }
-
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
- pci_bus_add_devices(bus);
- return 0;
+ return pci_host_common_probe(pdev, pci);
}
static struct platform_driver gen_pci_driver = {
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
new file mode 100644
index 000000000000..ed651baa7c50
--- /dev/null
+++ b/drivers/pci/host/pci-hyperv.c
@@ -0,0 +1,2346 @@
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ * Jake Oshins <jakeo@microsoft.com>
+ *
+ * This driver acts as a paravirtual front-end for PCI Express root buses.
+ * When a PCI Express function (either an entire device or an SR-IOV
+ * Virtual Function) is being passed through to the VM, this driver exposes
+ * a new bus to the guest VM. This is modeled as a root PCI bus because
+ * no bridges are being exposed to the VM. In fact, with a "Generation 2"
+ * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
+ * until a device as been exposed using this driver.
+ *
+ * Each root PCI bus has its own PCI domain, which is called "Segment" in
+ * the PCI Firmware Specifications. Thus while each device passed through
+ * to the VM using this front-end will appear at "device 0", the domain will
+ * be unique. Typically, each bus will have one PCI function on it, though
+ * this driver does support more than one.
+ *
+ * In order to map the interrupts from the device through to the guest VM,
+ * this driver also implements an IRQ Domain, which handles interrupts (either
+ * MSI or MSI-X) associated with the functions on the bus. As interrupts are
+ * set up, torn down, or reaffined, this driver communicates with the
+ * underlying hypervisor to adjust the mappings in the I/O MMU so that each
+ * interrupt will be delivered to the correct virtual processor at the right
+ * vector. This driver does not support level-triggered (line-based)
+ * interrupts, and will report that the Interrupt Line register in the
+ * function's configuration space is zero.
+ *
+ * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
+ * facilities. For instance, the configuration space of a function exposed
+ * by Hyper-V is mapped into a single page of memory space, and the
+ * read and write handlers for config space must be aware of this mechanism.
+ * Similarly, device setup and teardown involves messages sent to and from
+ * the PCI back-end driver in Hyper-V.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/irqdomain.h>
+#include <asm/irqdomain.h>
+#include <asm/apic.h>
+#include <linux/msi.h>
+#include <linux/hyperv.h>
+#include <asm/mshyperv.h>
+
+/*
+ * Protocol versions. The low word is the minor version, the high word the
+ * major version.
+ */
+
+#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major)))
+#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
+#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
+
+enum {
+ PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
+ PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
+};
+
+#define PCI_CONFIG_MMIO_LENGTH 0x2000
+#define CFG_PAGE_OFFSET 0x1000
+#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
+
+#define MAX_SUPPORTED_MSI_MESSAGES 0x400
+
+/*
+ * Message Types
+ */
+
+enum pci_message_type {
+ /*
+ * Version 1.1
+ */
+ PCI_MESSAGE_BASE = 0x42490000,
+ PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
+ PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
+ PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
+ PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
+ PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
+ PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
+ PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
+ PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
+ PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
+ PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
+ PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
+ PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
+ PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
+ PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
+ PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
+ PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
+ PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
+ PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
+ PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
+ PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
+ PCI_MESSAGE_MAXIMUM
+};
+
+/*
+ * Structures defining the virtual PCI Express protocol.
+ */
+
+union pci_version {
+ struct {
+ u16 minor_version;
+ u16 major_version;
+ } parts;
+ u32 version;
+} __packed;
+
+/*
+ * Function numbers are 8-bits wide on Express, as interpreted through ARI,
+ * which is all this driver does. This representation is the one used in
+ * Windows, which is what is expected when sending this back and forth with
+ * the Hyper-V parent partition.
+ */
+union win_slot_encoding {
+ struct {
+ u32 func:8;
+ u32 reserved:24;
+ } bits;
+ u32 slot;
+} __packed;
+
+/*
+ * Pretty much as defined in the PCI Specifications.
+ */
+struct pci_function_description {
+ u16 v_id; /* vendor ID */
+ u16 d_id; /* device ID */
+ u8 rev;
+ u8 prog_intf;
+ u8 subclass;
+ u8 base_class;
+ u32 subsystem_id;
+ union win_slot_encoding win_slot;
+ u32 ser; /* serial number */
+} __packed;
+
+/**
+ * struct hv_msi_desc
+ * @vector: IDT entry
+ * @delivery_mode: As defined in Intel's Programmer's
+ * Reference Manual, Volume 3, Chapter 8.
+ * @vector_count: Number of contiguous entries in the
+ * Interrupt Descriptor Table that are
+ * occupied by this Message-Signaled
+ * Interrupt. For "MSI", as first defined
+ * in PCI 2.2, this can be between 1 and
+ * 32. For "MSI-X," as first defined in PCI
+ * 3.0, this must be 1, as each MSI-X table
+ * entry would have its own descriptor.
+ * @reserved: Empty space
+ * @cpu_mask: All the target virtual processors.
+ */
+struct hv_msi_desc {
+ u8 vector;
+ u8 delivery_mode;
+ u16 vector_count;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+/**
+ * struct tran_int_desc
+ * @reserved: unused, padding
+ * @vector_count: same as in hv_msi_desc
+ * @data: This is the "data payload" value that is
+ * written by the device when it generates
+ * a message-signaled interrupt, either MSI
+ * or MSI-X.
+ * @address: This is the address to which the data
+ * payload is written on interrupt
+ * generation.
+ */
+struct tran_int_desc {
+ u16 reserved;
+ u16 vector_count;
+ u32 data;
+ u64 address;
+} __packed;
+
+/*
+ * A generic message format for virtual PCI.
+ * Specific message formats are defined later in the file.
+ */
+
+struct pci_message {
+ u32 message_type;
+} __packed;
+
+struct pci_child_message {
+ u32 message_type;
+ union win_slot_encoding wslot;
+} __packed;
+
+struct pci_incoming_message {
+ struct vmpacket_descriptor hdr;
+ struct pci_message message_type;
+} __packed;
+
+struct pci_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+} __packed;
+
+struct pci_packet {
+ void (*completion_func)(void *context, struct pci_response *resp,
+ int resp_packet_size);
+ void *compl_ctxt;
+ struct pci_message message;
+};
+
+/*
+ * Specific message types supporting the PCI protocol.
+ */
+
+/*
+ * Version negotiation message. Sent from the guest to the host.
+ * The guest is free to try different versions until the host
+ * accepts the version.
+ *
+ * pci_version: The protocol version requested.
+ * is_last_attempt: If TRUE, this is the last version guest will request.
+ * reservedz: Reserved field, set to zero.
+ */
+
+struct pci_version_request {
+ struct pci_message message_type;
+ enum pci_message_type protocol_version;
+} __packed;
+
+/*
+ * Bus D0 Entry. This is sent from the guest to the host when the virtual
+ * bus (PCI Express port) is ready for action.
+ */
+
+struct pci_bus_d0_entry {
+ struct pci_message message_type;
+ u32 reserved;
+ u64 mmio_base;
+} __packed;
+
+struct pci_bus_relations {
+ struct pci_incoming_message incoming;
+ u32 device_count;
+ struct pci_function_description func[1];
+} __packed;
+
+struct pci_q_res_req_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+ u32 probed_bar[6];
+} __packed;
+
+struct pci_set_power {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u32 power_state; /* In Windows terms */
+ u32 reserved;
+} __packed;
+
+struct pci_set_power_response {
+ struct vmpacket_descriptor hdr;
+ s32 status; /* negative values are failures */
+ union win_slot_encoding wslot;
+ u32 resultant_state; /* In Windows terms */
+ u32 reserved;
+} __packed;
+
+struct pci_resources_assigned {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u8 memory_range[0x14][6]; /* not used here */
+ u32 msi_descriptors;
+ u32 reserved[4];
+} __packed;
+
+struct pci_create_interrupt {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc int_desc;
+} __packed;
+
+struct pci_create_int_response {
+ struct pci_response response;
+ u32 reserved;
+ struct tran_int_desc int_desc;
+} __packed;
+
+struct pci_delete_interrupt {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct tran_int_desc int_desc;
+} __packed;
+
+struct pci_dev_incoming {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+} __packed;
+
+struct pci_eject_response {
+ u32 message_type;
+ union win_slot_encoding wslot;
+ u32 status;
+} __packed;
+
+static int pci_ring_size = (4 * PAGE_SIZE);
+
+/*
+ * Definitions or interrupt steering hypercall.
+ */
+#define HV_PARTITION_ID_SELF ((u64)-1)
+#define HVCALL_RETARGET_INTERRUPT 0x7e
+
+struct retarget_msi_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ u32 source; /* 1 for MSI(-X) */
+ u32 reserved1;
+ u32 address;
+ u32 data;
+ u64 reserved2;
+ u32 vector;
+ u32 flags;
+ u64 vp_mask;
+} __packed;
+
+/*
+ * Driver specific state.
+ */
+
+enum hv_pcibus_state {
+ hv_pcibus_init = 0,
+ hv_pcibus_probed,
+ hv_pcibus_installed,
+ hv_pcibus_maximum
+};
+
+struct hv_pcibus_device {
+ struct pci_sysdata sysdata;
+ enum hv_pcibus_state state;
+ atomic_t remove_lock;
+ struct hv_device *hdev;
+ resource_size_t low_mmio_space;
+ resource_size_t high_mmio_space;
+ struct resource *mem_config;
+ struct resource *low_mmio_res;
+ struct resource *high_mmio_res;
+ struct completion *survey_event;
+ struct completion remove_event;
+ struct pci_bus *pci_bus;
+ spinlock_t config_lock; /* Avoid two threads writing index page */
+ spinlock_t device_list_lock; /* Protect lists below */
+ void __iomem *cfg_addr;
+
+ struct semaphore enum_sem;
+ struct list_head resources_for_children;
+
+ struct list_head children;
+ struct list_head dr_list;
+ struct work_struct wrk;
+
+ struct msi_domain_info msi_info;
+ struct msi_controller msi_chip;
+ struct irq_domain *irq_domain;
+};
+
+/*
+ * Tracks "Device Relations" messages from the host, which must be both
+ * processed in order and deferred so that they don't run in the context
+ * of the incoming packet callback.
+ */
+struct hv_dr_work {
+ struct work_struct wrk;
+ struct hv_pcibus_device *bus;
+};
+
+struct hv_dr_state {
+ struct list_head list_entry;
+ u32 device_count;
+ struct pci_function_description func[1];
+};
+
+enum hv_pcichild_state {
+ hv_pcichild_init = 0,
+ hv_pcichild_requirements,
+ hv_pcichild_resourced,
+ hv_pcichild_ejecting,
+ hv_pcichild_maximum
+};
+
+enum hv_pcidev_ref_reason {
+ hv_pcidev_ref_invalid = 0,
+ hv_pcidev_ref_initial,
+ hv_pcidev_ref_by_slot,
+ hv_pcidev_ref_packet,
+ hv_pcidev_ref_pnp,
+ hv_pcidev_ref_childlist,
+ hv_pcidev_irqdata,
+ hv_pcidev_ref_max
+};
+
+struct hv_pci_dev {
+ /* List protected by pci_rescan_remove_lock */
+ struct list_head list_entry;
+ atomic_t refs;
+ enum hv_pcichild_state state;
+ struct pci_function_description desc;
+ bool reported_missing;
+ struct hv_pcibus_device *hbus;
+ struct work_struct wrk;
+
+ /*
+ * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
+ * read it back, for each of the BAR offsets within config space.
+ */
+ u32 probed_bar[6];
+};
+
+struct hv_pci_compl {
+ struct completion host_event;
+ s32 completion_status;
+};
+
+/**
+ * hv_pci_generic_compl() - Invoked for a completion packet
+ * @context: Set up by the sender of the packet.
+ * @resp: The response packet
+ * @resp_packet_size: Size in bytes of the packet
+ *
+ * This function is used to trigger an event and report status
+ * for any message for which the completion packet contains a
+ * status and nothing else.
+ */
+static
+void
+hv_pci_generic_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_pci_compl *comp_pkt = context;
+
+ if (resp_packet_size >= offsetofend(struct pci_response, status))
+ comp_pkt->completion_status = resp->status;
+ complete(&comp_pkt->host_event);
+}
+
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+ u32 wslot);
+static void get_pcichild(struct hv_pci_dev *hv_pcidev,
+ enum hv_pcidev_ref_reason reason);
+static void put_pcichild(struct hv_pci_dev *hv_pcidev,
+ enum hv_pcidev_ref_reason reason);
+
+static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+
+/**
+ * devfn_to_wslot() - Convert from Linux PCI slot to Windows
+ * @devfn: The Linux representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Windows representation
+ */
+static u32 devfn_to_wslot(int devfn)
+{
+ union win_slot_encoding wslot;
+
+ wslot.slot = 0;
+ wslot.bits.func = PCI_SLOT(devfn) | (PCI_FUNC(devfn) << 5);
+
+ return wslot.slot;
+}
+
+/**
+ * wslot_to_devfn() - Convert from Windows PCI slot to Linux
+ * @wslot: The Windows representation of PCI slot
+ *
+ * Windows uses a slightly different representation of PCI slot.
+ *
+ * Return: The Linux representation
+ */
+static int wslot_to_devfn(u32 wslot)
+{
+ union win_slot_encoding slot_no;
+
+ slot_no.slot = wslot;
+ return PCI_DEVFN(0, slot_no.bits.func);
+}
+
+/*
+ * PCI Configuration Space for these root PCI buses is implemented as a pair
+ * of pages in memory-mapped I/O space. Writing to the first page chooses
+ * the PCI function being written or read. Once the first page has been
+ * written to, the following page maps in the entire configuration space of
+ * the function.
+ */
+
+/**
+ * _hv_pcifront_read_config() - Internal PCI config read
+ * @hpdev: The PCI driver's representation of the device
+ * @where: Offset within config space
+ * @size: Size of the transfer
+ * @val: Pointer to the buffer receiving the data
+ */
+static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+ int size, u32 *val)
+{
+ unsigned long flags;
+ void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
+
+ /*
+ * If the attempt is to read the IDs or the ROM BAR, simulate that.
+ */
+ if (where + size <= PCI_COMMAND) {
+ memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
+ } else if (where >= PCI_CLASS_REVISION && where + size <=
+ PCI_CACHE_LINE_SIZE) {
+ memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
+ PCI_CLASS_REVISION, size);
+ } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
+ PCI_ROM_ADDRESS) {
+ memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
+ PCI_SUBSYSTEM_VENDOR_ID, size);
+ } else if (where >= PCI_ROM_ADDRESS && where + size <=
+ PCI_CAPABILITY_LIST) {
+ /* ROM BARs are unimplemented */
+ *val = 0;
+ } else if (where >= PCI_INTERRUPT_LINE && where + size <=
+ PCI_INTERRUPT_PIN) {
+ /*
+ * Interrupt Line and Interrupt PIN are hard-wired to zero
+ * because this front-end only supports message-signaled
+ * interrupts.
+ */
+ *val = 0;
+ } else if (where + size <= CFG_PAGE_SIZE) {
+ spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+ /* Choose the function to be read. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+ /* Read from that function's config space. */
+ switch (size) {
+ case 1:
+ *val = readb(addr);
+ break;
+ case 2:
+ *val = readw(addr);
+ break;
+ default:
+ *val = readl(addr);
+ break;
+ }
+ spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ } else {
+ dev_err(&hpdev->hbus->hdev->device,
+ "Attempt to read beyond a function's config space.\n");
+ }
+}
+
+/**
+ * _hv_pcifront_write_config() - Internal PCI config write
+ * @hpdev: The PCI driver's representation of the device
+ * @where: Offset within config space
+ * @size: Size of the transfer
+ * @val: The data being transferred
+ */
+static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
+ int size, u32 val)
+{
+ unsigned long flags;
+ void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
+
+ if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
+ where + size <= PCI_CAPABILITY_LIST) {
+ /* SSIDs and ROM BARs are read-only */
+ } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
+ spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
+ /* Choose the function to be written. (See comment above) */
+ writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+ /* Write to that function's config space. */
+ switch (size) {
+ case 1:
+ writeb(val, addr);
+ break;
+ case 2:
+ writew(val, addr);
+ break;
+ default:
+ writel(val, addr);
+ break;
+ }
+ spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
+ } else {
+ dev_err(&hpdev->hbus->hdev->device,
+ "Attempt to write beyond a function's config space.\n");
+ }
+}
+
+/**
+ * hv_pcifront_read_config() - Read configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be read
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+ if (!hpdev)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ _hv_pcifront_read_config(hpdev, where, size, val);
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * hv_pcifront_write_config() - Write configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be written to device
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ * PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
+ if (!hpdev)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ _hv_pcifront_write_config(hpdev, where, size, val);
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCIe operations */
+static struct pci_ops hv_pcifront_ops = {
+ .read = hv_pcifront_read_config,
+ .write = hv_pcifront_write_config,
+};
+
+/* Interrupt management hooks */
+static void hv_int_desc_free(struct hv_pci_dev *hpdev,
+ struct tran_int_desc *int_desc)
+{
+ struct pci_delete_interrupt *int_pkt;
+ struct {
+ struct pci_packet pkt;
+ u8 buffer[sizeof(struct pci_delete_interrupt) -
+ sizeof(struct pci_message)];
+ } ctxt;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt->message_type.message_type =
+ PCI_DELETE_INTERRUPT_MESSAGE;
+ int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+ int_pkt->int_desc = *int_desc;
+ vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
+ (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
+ kfree(int_desc);
+}
+
+/**
+ * hv_msi_free() - Free the MSI.
+ * @domain: The interrupt domain pointer
+ * @info: Extra MSI-related context
+ * @irq: Identifies the IRQ.
+ *
+ * The Hyper-V parent partition and hypervisor are tracking the
+ * messages that are in use, keeping the interrupt redirection
+ * table up to date. This callback sends a message that frees
+ * the IRT entry and related tracking nonsense.
+ */
+static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
+ unsigned int irq)
+{
+ struct hv_pcibus_device *hbus;
+ struct hv_pci_dev *hpdev;
+ struct pci_dev *pdev;
+ struct tran_int_desc *int_desc;
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
+ struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
+
+ pdev = msi_desc_to_pci_dev(msi);
+ hbus = info->data;
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ return;
+
+ int_desc = irq_data_get_irq_chip_data(irq_data);
+ if (int_desc) {
+ irq_data->chip_data = NULL;
+ hv_int_desc_free(hpdev, int_desc);
+ }
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+}
+
+static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
+ bool force)
+{
+ struct irq_data *parent = data->parent_data;
+
+ return parent->chip->irq_set_affinity(parent, dest, force);
+}
+
+void hv_irq_mask(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+}
+
+/**
+ * hv_irq_unmask() - "Unmask" the IRQ by setting its current
+ * affinity.
+ * @data: Describes the IRQ
+ *
+ * Build new a destination for the MSI and make a hypercall to
+ * update the Interrupt Redirection Table. "Device Logical ID"
+ * is built out of this PCI bus's instance GUID and the function
+ * number of the device.
+ */
+void hv_irq_unmask(struct irq_data *data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct retarget_msi_interrupt params;
+ struct hv_pcibus_device *hbus;
+ struct cpumask *dest;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ int cpu;
+
+ dest = irq_data_get_affinity_mask(data);
+ pdev = msi_desc_to_pci_dev(msi_desc);
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+
+ memset(&params, 0, sizeof(params));
+ params.partition_id = HV_PARTITION_ID_SELF;
+ params.source = 1; /* MSI(-X) */
+ params.address = msi_desc->msg.address_lo;
+ params.data = msi_desc->msg.data;
+ params.device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+ (hbus->hdev->dev_instance.b[4] << 16) |
+ (hbus->hdev->dev_instance.b[7] << 8) |
+ (hbus->hdev->dev_instance.b[6] & 0xf8) |
+ PCI_FUNC(pdev->devfn);
+ params.vector = cfg->vector;
+
+ for_each_cpu_and(cpu, dest, cpu_online_mask)
+ params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+
+ hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, &params, NULL);
+
+ pci_msi_unmask_irq(data);
+}
+
+struct compose_comp_ctxt {
+ struct hv_pci_compl comp_pkt;
+ struct tran_int_desc int_desc;
+};
+
+static void hv_pci_compose_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct compose_comp_ctxt *comp_pkt = context;
+ struct pci_create_int_response *int_resp =
+ (struct pci_create_int_response *)resp;
+
+ comp_pkt->comp_pkt.completion_status = resp->status;
+ comp_pkt->int_desc = int_resp->int_desc;
+ complete(&comp_pkt->comp_pkt.host_event);
+}
+
+/**
+ * hv_compose_msi_msg() - Supplies a valid MSI address/data
+ * @data: Everything about this MSI
+ * @msg: Buffer that is filled in by this function
+ *
+ * This function unpacks the IRQ looking for target CPU set, IDT
+ * vector and mode and sends a message to the parent partition
+ * asking for a mapping for that tuple in this partition. The
+ * response supplies a data value and address to which that data
+ * should be written to trigger that interrupt.
+ */
+static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+ struct hv_pcibus_device *hbus;
+ struct hv_pci_dev *hpdev;
+ struct pci_bus *pbus;
+ struct pci_dev *pdev;
+ struct pci_create_interrupt *int_pkt;
+ struct compose_comp_ctxt comp;
+ struct tran_int_desc *int_desc;
+ struct cpumask *affinity;
+ struct {
+ struct pci_packet pkt;
+ u8 buffer[sizeof(struct pci_create_interrupt) -
+ sizeof(struct pci_message)];
+ } ctxt;
+ int cpu;
+ int ret;
+
+ pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
+ pbus = pdev->bus;
+ hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ goto return_null_message;
+
+ /* Free any previous message that might have already been composed. */
+ if (data->chip_data) {
+ int_desc = data->chip_data;
+ data->chip_data = NULL;
+ hv_int_desc_free(hpdev, int_desc);
+ }
+
+ int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+ if (!int_desc)
+ goto drop_reference;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ init_completion(&comp.comp_pkt.host_event);
+ ctxt.pkt.completion_func = hv_pci_compose_compl;
+ ctxt.pkt.compl_ctxt = &comp;
+ int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
+ int_pkt->message_type.message_type = PCI_CREATE_INTERRUPT_MESSAGE;
+ int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+ int_pkt->int_desc.vector = cfg->vector;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode =
+ (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0;
+
+ /*
+ * This bit doesn't have to work on machines with more than 64
+ * processors because Hyper-V only supports 64 in a guest.
+ */
+ affinity = irq_data_get_affinity_mask(data);
+ for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+ int_pkt->int_desc.cpu_mask |=
+ (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ }
+
+ ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
+ sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ wait_for_completion(&comp.comp_pkt.host_event);
+
+ if (comp.comp_pkt.completion_status < 0) {
+ dev_err(&hbus->hdev->device,
+ "Request for interrupt failed: 0x%x",
+ comp.comp_pkt.completion_status);
+ goto free_int_desc;
+ }
+
+ /*
+ * Record the assignment so that this can be unwound later. Using
+ * irq_set_chip_data() here would be appropriate, but the lock it takes
+ * is already held.
+ */
+ *int_desc = comp.int_desc;
+ data->chip_data = int_desc;
+
+ /* Pass up the result. */
+ msg->address_hi = comp.int_desc.address >> 32;
+ msg->address_lo = comp.int_desc.address & 0xffffffff;
+ msg->data = comp.int_desc.data;
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ return;
+
+free_int_desc:
+ kfree(int_desc);
+drop_reference:
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+return_null_message:
+ msg->address_hi = 0;
+ msg->address_lo = 0;
+ msg->data = 0;
+}
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip hv_msi_irq_chip = {
+ .name = "Hyper-V PCIe MSI",
+ .irq_compose_msi_msg = hv_compose_msi_msg,
+ .irq_set_affinity = hv_set_affinity,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = hv_irq_mask,
+ .irq_unmask = hv_irq_unmask,
+};
+
+static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
+ msi_alloc_info_t *arg)
+{
+ return arg->msi_hwirq;
+}
+
+static struct msi_domain_ops hv_msi_ops = {
+ .get_hwirq = hv_msi_domain_ops_get_hwirq,
+ .msi_prepare = pci_msi_prepare,
+ .set_desc = pci_msi_set_desc,
+ .msi_free = hv_msi_free,
+};
+
+/**
+ * hv_pcie_init_irq_domain() - Initialize IRQ domain
+ * @hbus: The root PCI bus
+ *
+ * This function creates an IRQ domain which will be used for
+ * interrupts from devices that have been passed through. These
+ * devices only support MSI and MSI-X, not line-based interrupts
+ * or simulations of line-based interrupts through PCIe's
+ * fabric-layer messages. Because interrupts are remapped, we
+ * can support multi-message MSI here.
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
+{
+ hbus->msi_info.chip = &hv_msi_irq_chip;
+ hbus->msi_info.ops = &hv_msi_ops;
+ hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX);
+ hbus->msi_info.handler = handle_edge_irq;
+ hbus->msi_info.handler_name = "edge";
+ hbus->msi_info.data = hbus;
+ hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
+ &hbus->msi_info,
+ x86_vector_domain);
+ if (!hbus->irq_domain) {
+ dev_err(&hbus->hdev->device,
+ "Failed to build an MSI IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * get_bar_size() - Get the address space consumed by a BAR
+ * @bar_val: Value that a BAR returned after -1 was written
+ * to it.
+ *
+ * This function returns the size of the BAR, rounded up to 1
+ * page. It has to be rounded up because the hypervisor's page
+ * table entry that maps the BAR into the VM can't specify an
+ * offset within a page. The invariant is that the hypervisor
+ * must place any BARs of smaller than page length at the
+ * beginning of a page.
+ *
+ * Return: Size in bytes of the consumed MMIO space.
+ */
+static u64 get_bar_size(u64 bar_val)
+{
+ return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
+ PAGE_SIZE);
+}
+
+/**
+ * survey_child_resources() - Total all MMIO requirements
+ * @hbus: Root PCI bus, as understood by this driver
+ */
+static void survey_child_resources(struct hv_pcibus_device *hbus)
+{
+ struct list_head *iter;
+ struct hv_pci_dev *hpdev;
+ resource_size_t bar_size = 0;
+ unsigned long flags;
+ struct completion *event;
+ u64 bar_val;
+ int i;
+
+ /* If nobody is waiting on the answer, don't compute it. */
+ event = xchg(&hbus->survey_event, NULL);
+ if (!event)
+ return;
+
+ /* If the answer has already been computed, go with it. */
+ if (hbus->low_mmio_space || hbus->high_mmio_space) {
+ complete(event);
+ return;
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+ /*
+ * Due to an interesting quirk of the PCI spec, all memory regions
+ * for a child device are a power of 2 in size and aligned in memory,
+ * so it's sufficient to just add them up without tracking alignment.
+ */
+ list_for_each(iter, &hbus->children) {
+ hpdev = container_of(iter, struct hv_pci_dev, list_entry);
+ for (i = 0; i < 6; i++) {
+ if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
+ dev_err(&hbus->hdev->device,
+ "There's an I/O BAR in this list!\n");
+
+ if (hpdev->probed_bar[i] != 0) {
+ /*
+ * A probed BAR has all the upper bits set that
+ * can be changed.
+ */
+
+ bar_val = hpdev->probed_bar[i];
+ if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ bar_val |=
+ ((u64)hpdev->probed_bar[++i] << 32);
+ else
+ bar_val |= 0xffffffff00000000ULL;
+
+ bar_size = get_bar_size(bar_val);
+
+ if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ hbus->high_mmio_space += bar_size;
+ else
+ hbus->low_mmio_space += bar_size;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ complete(event);
+}
+
+/**
+ * prepopulate_bars() - Fill in BARs with defaults
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * The core PCI driver code seems much, much happier if the BARs
+ * for a device have values upon first scan. So fill them in.
+ * The algorithm below works down from large sizes to small,
+ * attempting to pack the assignments optimally. The assumption,
+ * enforced in other parts of the code, is that the beginning of
+ * the memory-mapped I/O space will be aligned on the largest
+ * BAR size.
+ */
+static void prepopulate_bars(struct hv_pcibus_device *hbus)
+{
+ resource_size_t high_size = 0;
+ resource_size_t low_size = 0;
+ resource_size_t high_base = 0;
+ resource_size_t low_base = 0;
+ resource_size_t bar_size;
+ struct hv_pci_dev *hpdev;
+ struct list_head *iter;
+ unsigned long flags;
+ u64 bar_val;
+ u32 command;
+ bool high;
+ int i;
+
+ if (hbus->low_mmio_space) {
+ low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+ low_base = hbus->low_mmio_res->start;
+ }
+
+ if (hbus->high_mmio_space) {
+ high_size = 1ULL <<
+ (63 - __builtin_clzll(hbus->high_mmio_space));
+ high_base = hbus->high_mmio_res->start;
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+ /* Pick addresses for the BARs. */
+ do {
+ list_for_each(iter, &hbus->children) {
+ hpdev = container_of(iter, struct hv_pci_dev,
+ list_entry);
+ for (i = 0; i < 6; i++) {
+ bar_val = hpdev->probed_bar[i];
+ if (bar_val == 0)
+ continue;
+ high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (high) {
+ bar_val |=
+ ((u64)hpdev->probed_bar[i + 1]
+ << 32);
+ } else {
+ bar_val |= 0xffffffffULL << 32;
+ }
+ bar_size = get_bar_size(bar_val);
+ if (high) {
+ if (high_size != bar_size) {
+ i++;
+ continue;
+ }
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4,
+ (u32)(high_base & 0xffffff00));
+ i++;
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4, (u32)(high_base >> 32));
+ high_base += bar_size;
+ } else {
+ if (low_size != bar_size)
+ continue;
+ _hv_pcifront_write_config(hpdev,
+ PCI_BASE_ADDRESS_0 + (4 * i),
+ 4,
+ (u32)(low_base & 0xffffff00));
+ low_base += bar_size;
+ }
+ }
+ if (high_size <= 1 && low_size <= 1) {
+ /* Set the memory enable bit. */
+ _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
+ &command);
+ command |= PCI_COMMAND_MEMORY;
+ _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
+ command);
+ break;
+ }
+ }
+
+ high_size >>= 1;
+ low_size >>= 1;
+ } while (high_size || low_size);
+
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+}
+
+/**
+ * create_root_hv_pci_bus() - Expose a new root PCI bus
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
+{
+ /* Register the device */
+ hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
+ 0, /* bus number is always zero */
+ &hv_pcifront_ops,
+ &hbus->sysdata,
+ &hbus->resources_for_children);
+ if (!hbus->pci_bus)
+ return -ENODEV;
+
+ hbus->pci_bus->msi = &hbus->msi_chip;
+ hbus->pci_bus->msi->dev = &hbus->hdev->device;
+
+ pci_scan_child_bus(hbus->pci_bus);
+ pci_bus_assign_resources(hbus->pci_bus);
+ pci_bus_add_devices(hbus->pci_bus);
+ hbus->state = hv_pcibus_installed;
+ return 0;
+}
+
+struct q_res_req_compl {
+ struct completion host_event;
+ struct hv_pci_dev *hpdev;
+};
+
+/**
+ * q_resource_requirements() - Query Resource Requirements
+ * @context: The completion context.
+ * @resp: The response that came from the host.
+ * @resp_packet_size: The size in bytes of resp.
+ *
+ * This function is invoked on completion of a Query Resource
+ * Requirements packet.
+ */
+static void q_resource_requirements(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct q_res_req_compl *completion = context;
+ struct pci_q_res_req_response *q_res_req =
+ (struct pci_q_res_req_response *)resp;
+ int i;
+
+ if (resp->status < 0) {
+ dev_err(&completion->hpdev->hbus->hdev->device,
+ "query resource requirements failed: %x\n",
+ resp->status);
+ } else {
+ for (i = 0; i < 6; i++) {
+ completion->hpdev->probed_bar[i] =
+ q_res_req->probed_bar[i];
+ }
+ }
+
+ complete(&completion->host_event);
+}
+
+static void get_pcichild(struct hv_pci_dev *hpdev,
+ enum hv_pcidev_ref_reason reason)
+{
+ atomic_inc(&hpdev->refs);
+}
+
+static void put_pcichild(struct hv_pci_dev *hpdev,
+ enum hv_pcidev_ref_reason reason)
+{
+ if (atomic_dec_and_test(&hpdev->refs))
+ kfree(hpdev);
+}
+
+/**
+ * new_pcichild_device() - Create a new child device
+ * @hbus: The internal struct tracking this root PCI bus.
+ * @desc: The information supplied so far from the host
+ * about the device.
+ *
+ * This function creates the tracking structure for a new child
+ * device and kicks off the process of figuring out what it is.
+ *
+ * Return: Pointer to the new tracking struct
+ */
+static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
+ struct pci_function_description *desc)
+{
+ struct hv_pci_dev *hpdev;
+ struct pci_child_message *res_req;
+ struct q_res_req_compl comp_pkt;
+ union {
+ struct pci_packet init_packet;
+ u8 buffer[0x100];
+ } pkt;
+ unsigned long flags;
+ int ret;
+
+ hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC);
+ if (!hpdev)
+ return NULL;
+
+ hpdev->hbus = hbus;
+
+ memset(&pkt, 0, sizeof(pkt));
+ init_completion(&comp_pkt.host_event);
+ comp_pkt.hpdev = hpdev;
+ pkt.init_packet.compl_ctxt = &comp_pkt;
+ pkt.init_packet.completion_func = q_resource_requirements;
+ res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req->message_type = PCI_QUERY_RESOURCE_REQUIREMENTS;
+ res_req->wslot.slot = desc->win_slot.slot;
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
+ sizeof(struct pci_child_message),
+ (unsigned long)&pkt.init_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto error;
+
+ wait_for_completion(&comp_pkt.host_event);
+
+ hpdev->desc = *desc;
+ get_pcichild(hpdev, hv_pcidev_ref_initial);
+ get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_add_tail(&hpdev->list_entry, &hbus->children);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ return hpdev;
+
+error:
+ kfree(hpdev);
+ return NULL;
+}
+
+/**
+ * get_pcichild_wslot() - Find device from slot
+ * @hbus: Root PCI bus, as understood by this driver
+ * @wslot: Location on the bus
+ *
+ * This function looks up a PCI device and returns the internal
+ * representation of it. It acquires a reference on it, so that
+ * the device won't be deleted while somebody is using it. The
+ * caller is responsible for calling put_pcichild() to release
+ * this reference.
+ *
+ * Return: Internal representation of a PCI device
+ */
+static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+ u32 wslot)
+{
+ unsigned long flags;
+ struct hv_pci_dev *iter, *hpdev = NULL;
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each_entry(iter, &hbus->children, list_entry) {
+ if (iter->desc.win_slot.slot == wslot) {
+ hpdev = iter;
+ get_pcichild(hpdev, hv_pcidev_ref_by_slot);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ return hpdev;
+}
+
+/**
+ * pci_devices_present_work() - Handle new list of child devices
+ * @work: Work struct embedded in struct hv_dr_work
+ *
+ * "Bus Relations" is the Windows term for "children of this
+ * bus." The terminology is preserved here for people trying to
+ * debug the interaction between Hyper-V and Linux. This
+ * function is called when the parent partition reports a list
+ * of functions that should be observed under this PCI Express
+ * port (bus).
+ *
+ * This function updates the list, and must tolerate being
+ * called multiple times with the same information. The typical
+ * number of child devices is one, with very atypical cases
+ * involving three or four, so the algorithms used here can be
+ * simple and inefficient.
+ *
+ * It must also treat the omission of a previously observed device as
+ * notification that the device no longer exists.
+ *
+ * Note that this function is a work item, and it may not be
+ * invoked in the order that it was queued. Back to back
+ * updates of the list of present devices may involve queuing
+ * multiple work items, and this one may run before ones that
+ * were sent later. As such, this function only does something
+ * if is the last one in the queue.
+ */
+static void pci_devices_present_work(struct work_struct *work)
+{
+ u32 child_no;
+ bool found;
+ struct list_head *iter;
+ struct pci_function_description *new_desc;
+ struct hv_pci_dev *hpdev;
+ struct hv_pcibus_device *hbus;
+ struct list_head removed;
+ struct hv_dr_work *dr_wrk;
+ struct hv_dr_state *dr = NULL;
+ unsigned long flags;
+
+ dr_wrk = container_of(work, struct hv_dr_work, wrk);
+ hbus = dr_wrk->bus;
+ kfree(dr_wrk);
+
+ INIT_LIST_HEAD(&removed);
+
+ if (down_interruptible(&hbus->enum_sem)) {
+ put_hvpcibus(hbus);
+ return;
+ }
+
+ /* Pull this off the queue and process it if it was the last one. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ while (!list_empty(&hbus->dr_list)) {
+ dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
+ list_entry);
+ list_del(&dr->list_entry);
+
+ /* Throw this away if the list still has stuff in it. */
+ if (!list_empty(&hbus->dr_list)) {
+ kfree(dr);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (!dr) {
+ up(&hbus->enum_sem);
+ put_hvpcibus(hbus);
+ return;
+ }
+
+ /* First, mark all existing children as reported missing. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each(iter, &hbus->children) {
+ hpdev = container_of(iter, struct hv_pci_dev,
+ list_entry);
+ hpdev->reported_missing = true;
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Next, add back any reported devices. */
+ for (child_no = 0; child_no < dr->device_count; child_no++) {
+ found = false;
+ new_desc = &dr->func[child_no];
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_for_each(iter, &hbus->children) {
+ hpdev = container_of(iter, struct hv_pci_dev,
+ list_entry);
+ if ((hpdev->desc.win_slot.slot ==
+ new_desc->win_slot.slot) &&
+ (hpdev->desc.v_id == new_desc->v_id) &&
+ (hpdev->desc.d_id == new_desc->d_id) &&
+ (hpdev->desc.ser == new_desc->ser)) {
+ hpdev->reported_missing = false;
+ found = true;
+ }
+ }
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (!found) {
+ hpdev = new_pcichild_device(hbus, new_desc);
+ if (!hpdev)
+ dev_err(&hbus->hdev->device,
+ "couldn't record a child device.\n");
+ }
+ }
+
+ /* Move missing children to a list on the stack. */
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ do {
+ found = false;
+ list_for_each(iter, &hbus->children) {
+ hpdev = container_of(iter, struct hv_pci_dev,
+ list_entry);
+ if (hpdev->reported_missing) {
+ found = true;
+ put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ list_del(&hpdev->list_entry);
+ list_add_tail(&hpdev->list_entry, &removed);
+ break;
+ }
+ }
+ } while (found);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ /* Delete everything that should no longer exist. */
+ while (!list_empty(&removed)) {
+ hpdev = list_first_entry(&removed, struct hv_pci_dev,
+ list_entry);
+ list_del(&hpdev->list_entry);
+ put_pcichild(hpdev, hv_pcidev_ref_initial);
+ }
+
+ /* Tell the core to rescan bus because there may have been changes. */
+ if (hbus->state == hv_pcibus_installed) {
+ pci_lock_rescan_remove();
+ pci_scan_child_bus(hbus->pci_bus);
+ pci_unlock_rescan_remove();
+ } else {
+ survey_child_resources(hbus);
+ }
+
+ up(&hbus->enum_sem);
+ put_hvpcibus(hbus);
+ kfree(dr);
+}
+
+/**
+ * hv_pci_devices_present() - Handles list of new children
+ * @hbus: Root PCI bus, as understood by this driver
+ * @relations: Packet from host listing children
+ *
+ * This function is invoked whenever a new list of devices for
+ * this bus appears.
+ */
+static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ struct pci_bus_relations *relations)
+{
+ struct hv_dr_state *dr;
+ struct hv_dr_work *dr_wrk;
+ unsigned long flags;
+
+ dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
+ if (!dr_wrk)
+ return;
+
+ dr = kzalloc(offsetof(struct hv_dr_state, func) +
+ (sizeof(struct pci_function_description) *
+ (relations->device_count)), GFP_NOWAIT);
+ if (!dr) {
+ kfree(dr_wrk);
+ return;
+ }
+
+ INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
+ dr_wrk->bus = hbus;
+ dr->device_count = relations->device_count;
+ if (dr->device_count != 0) {
+ memcpy(dr->func, relations->func,
+ sizeof(struct pci_function_description) *
+ dr->device_count);
+ }
+
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_add_tail(&dr->list_entry, &hbus->dr_list);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ get_hvpcibus(hbus);
+ schedule_work(&dr_wrk->wrk);
+}
+
+/**
+ * hv_eject_device_work() - Asynchronously handles ejection
+ * @work: Work struct embedded in internal device struct
+ *
+ * This function handles ejecting a device. Windows will
+ * attempt to gracefully eject a device, waiting 60 seconds to
+ * hear back from the guest OS that this completed successfully.
+ * If this timer expires, the device will be forcibly removed.
+ */
+static void hv_eject_device_work(struct work_struct *work)
+{
+ struct pci_eject_response *ejct_pkt;
+ struct hv_pci_dev *hpdev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ int wslot;
+ struct {
+ struct pci_packet pkt;
+ u8 buffer[sizeof(struct pci_eject_response) -
+ sizeof(struct pci_message)];
+ } ctxt;
+
+ hpdev = container_of(work, struct hv_pci_dev, wrk);
+
+ if (hpdev->state != hv_pcichild_ejecting) {
+ put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ return;
+ }
+
+ /*
+ * Ejection can come before or after the PCI bus has been set up, so
+ * attempt to find it and tear down the bus state, if it exists. This
+ * must be done without constructs like pci_domain_nr(hbus->pci_bus)
+ * because hbus->pci_bus may not exist yet.
+ */
+ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
+ pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
+ wslot);
+ if (pdev) {
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt->message_type = PCI_EJECTION_COMPLETE;
+ ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+ vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
+ sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+ VM_PKT_DATA_INBAND, 0);
+
+ spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+ spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
+ put_pcichild(hpdev, hv_pcidev_ref_childlist);
+ put_pcichild(hpdev, hv_pcidev_ref_pnp);
+ put_hvpcibus(hpdev->hbus);
+}
+
+/**
+ * hv_pci_eject_device() - Handles device ejection
+ * @hpdev: Internal device tracking struct
+ *
+ * This function is invoked when an ejection packet arrives. It
+ * just schedules work so that we don't re-enter the packet
+ * delivery code handling the ejection.
+ */
+static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
+{
+ hpdev->state = hv_pcichild_ejecting;
+ get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+ get_hvpcibus(hpdev->hbus);
+ schedule_work(&hpdev->wrk);
+}
+
+/**
+ * hv_pci_onchannelcallback() - Handles incoming packets
+ * @context: Internal bus tracking struct
+ *
+ * This function is invoked whenever the host sends a packet to
+ * this channel (which is private to this root PCI bus).
+ */
+static void hv_pci_onchannelcallback(void *context)
+{
+ const int packet_size = 0x100;
+ int ret;
+ struct hv_pcibus_device *hbus = context;
+ u32 bytes_recvd;
+ u64 req_id;
+ struct vmpacket_descriptor *desc;
+ unsigned char *buffer;
+ int bufferlen = packet_size;
+ struct pci_packet *comp_packet;
+ struct pci_response *response;
+ struct pci_incoming_message *new_message;
+ struct pci_bus_relations *bus_rel;
+ struct pci_dev_incoming *dev_message;
+ struct hv_pci_dev *hpdev;
+
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
+
+ while (1) {
+ ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
+ bufferlen, &bytes_recvd, &req_id);
+
+ if (ret == -ENOBUFS) {
+ kfree(buffer);
+ /* Handle large packet */
+ bufferlen = bytes_recvd;
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+ if (!buffer)
+ return;
+ continue;
+ }
+
+ /*
+ * All incoming packets must be at least as large as a
+ * response.
+ */
+ if (bytes_recvd <= sizeof(struct pci_response)) {
+ kfree(buffer);
+ return;
+ }
+ desc = (struct vmpacket_descriptor *)buffer;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+
+ /*
+ * The host is trusted, and thus it's safe to interpret
+ * this transaction ID as a pointer.
+ */
+ comp_packet = (struct pci_packet *)req_id;
+ response = (struct pci_response *)buffer;
+ comp_packet->completion_func(comp_packet->compl_ctxt,
+ response,
+ bytes_recvd);
+ kfree(buffer);
+ return;
+
+ case VM_PKT_DATA_INBAND:
+
+ new_message = (struct pci_incoming_message *)buffer;
+ switch (new_message->message_type.message_type) {
+ case PCI_BUS_RELATIONS:
+
+ bus_rel = (struct pci_bus_relations *)buffer;
+ if (bytes_recvd <
+ offsetof(struct pci_bus_relations, func) +
+ (sizeof(struct pci_function_description) *
+ (bus_rel->device_count))) {
+ dev_err(&hbus->hdev->device,
+ "bus relations too small\n");
+ break;
+ }
+
+ hv_pci_devices_present(hbus, bus_rel);
+ break;
+
+ case PCI_EJECT:
+
+ dev_message = (struct pci_dev_incoming *)buffer;
+ hpdev = get_pcichild_wslot(hbus,
+ dev_message->wslot.slot);
+ if (hpdev) {
+ hv_pci_eject_device(hpdev);
+ put_pcichild(hpdev,
+ hv_pcidev_ref_by_slot);
+ }
+ break;
+
+ default:
+ dev_warn(&hbus->hdev->device,
+ "Unimplemented protocol message %x\n",
+ new_message->message_type.message_type);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(&hbus->hdev->device,
+ "unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
+ break;
+ }
+ break;
+ }
+}
+
+/**
+ * hv_pci_protocol_negotiation() - Set up protocol
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * This driver is intended to support running on Windows 10
+ * (server) and later versions. It will not run on earlier
+ * versions, as they assume that many of the operations which
+ * Linux needs accomplished with a spinlock held were done via
+ * asynchronous messaging via VMBus. Windows 10 increases the
+ * surface area of PCI emulation so that these actions can take
+ * place by suspending a virtual processor for their duration.
+ *
+ * This function negotiates the channel protocol version,
+ * failing if the host doesn't support the necessary protocol
+ * level.
+ */
+static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+{
+ struct pci_version_request *version_req;
+ struct hv_pci_compl comp_pkt;
+ struct pci_packet *pkt;
+ int ret;
+
+ /*
+ * Initiate the handshake with the host and negotiate
+ * a version that the host can support. We start with the
+ * highest version number and go down if the host cannot
+ * support it.
+ */
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+ version_req = (struct pci_version_request *)&pkt->message;
+ version_req->message_type.message_type = PCI_QUERY_PROTOCOL_VERSION;
+ version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
+
+ ret = vmbus_sendpacket(hdev->channel, version_req,
+ sizeof(struct pci_version_request),
+ (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto exit;
+
+ wait_for_completion(&comp_pkt.host_event);
+
+ if (comp_pkt.completion_status < 0) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed version request %x\n",
+ comp_pkt.completion_status);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ret = 0;
+
+exit:
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_pci_free_bridge_windows() - Release memory regions for the
+ * bus
+ * @hbus: Root PCI bus, as understood by this driver
+ */
+static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
+{
+ /*
+ * Set the resources back to the way they looked when they
+ * were allocated by setting IORESOURCE_BUSY again.
+ */
+
+ if (hbus->low_mmio_space && hbus->low_mmio_res) {
+ hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
+ release_mem_region(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
+ }
+
+ if (hbus->high_mmio_space && hbus->high_mmio_res) {
+ hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
+ release_mem_region(hbus->high_mmio_res->start,
+ resource_size(hbus->high_mmio_res));
+ }
+}
+
+/**
+ * hv_pci_allocate_bridge_windows() - Allocate memory regions
+ * for the bus
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * This function calls vmbus_allocate_mmio(), which is itself a
+ * bit of a compromise. Ideally, we might change the pnp layer
+ * in the kernel such that it comprehends either PCI devices
+ * which are "grandchildren of ACPI," with some intermediate bus
+ * node (in this case, VMBus) or change it such that it
+ * understands VMBus. The pnp layer, however, has been declared
+ * deprecated, and not subject to change.
+ *
+ * The workaround, implemented here, is to ask VMBus to allocate
+ * MMIO space for this bus. VMBus itself knows which ranges are
+ * appropriate by looking at its own ACPI objects. Then, after
+ * these ranges are claimed, they're modified to look like they
+ * would have looked if the ACPI and pnp code had allocated
+ * bridge windows. These descriptors have to exist in this form
+ * in order to satisfy the code which will get invoked when the
+ * endpoint PCI function driver calls request_mem_region() or
+ * request_mem_region_exclusive().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
+{
+ resource_size_t align;
+ int ret;
+
+ if (hbus->low_mmio_space) {
+ align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
+ ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
+ (u64)(u32)0xffffffff,
+ hbus->low_mmio_space,
+ align, false);
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
+ hbus->low_mmio_space);
+ return ret;
+ }
+
+ /* Modify this resource to become a bridge window. */
+ hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
+ hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
+ pci_add_resource(&hbus->resources_for_children,
+ hbus->low_mmio_res);
+ }
+
+ if (hbus->high_mmio_space) {
+ align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
+ ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
+ 0x100000000, -1,
+ hbus->high_mmio_space, align,
+ false);
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
+ hbus->high_mmio_space);
+ goto release_low_mmio;
+ }
+
+ /* Modify this resource to become a bridge window. */
+ hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
+ hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
+ pci_add_resource(&hbus->resources_for_children,
+ hbus->high_mmio_res);
+ }
+
+ return 0;
+
+release_low_mmio:
+ if (hbus->low_mmio_res) {
+ release_mem_region(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
+ }
+
+ return ret;
+}
+
+/**
+ * hv_allocate_config_window() - Find MMIO space for PCI Config
+ * @hbus: Root PCI bus, as understood by this driver
+ *
+ * This function claims memory-mapped I/O space for accessing
+ * configuration space for the functions on this bus.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
+{
+ int ret;
+
+ /*
+ * Set up a region of MMIO space to use for accessing configuration
+ * space.
+ */
+ ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
+ PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
+ if (ret)
+ return ret;
+
+ /*
+ * vmbus_allocate_mmio() gets used for allocating both device endpoint
+ * resource claims (those which cannot be overlapped) and the ranges
+ * which are valid for the children of this bus, which are intended
+ * to be overlapped by those children. Set the flag on this claim
+ * meaning that this region can't be overlapped.
+ */
+
+ hbus->mem_config->flags |= IORESOURCE_BUSY;
+
+ return 0;
+}
+
+static void hv_free_config_window(struct hv_pcibus_device *hbus)
+{
+ release_mem_region(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
+}
+
+/**
+ * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_enter_d0(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_bus_d0_entry *d0_entry;
+ struct hv_pci_compl comp_pkt;
+ struct pci_packet *pkt;
+ int ret;
+
+ /*
+ * Tell the host that the bus is ready to use, and moved into the
+ * powered-on state. This includes telling the host which region
+ * of memory-mapped I/O space has been chosen for configuration space
+ * access.
+ */
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+ d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry->message_type.message_type = PCI_BUS_D0ENTRY;
+ d0_entry->mmio_base = hbus->mem_config->start;
+
+ ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
+ (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ goto exit;
+
+ wait_for_completion(&comp_pkt.host_event);
+
+ if (comp_pkt.completion_status < 0) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed D0 Entry with status %x\n",
+ comp_pkt.completion_status);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ret = 0;
+
+exit:
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_pci_query_relations() - Ask host to send list of child
+ * devices
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_query_relations(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_message message;
+ struct completion comp;
+ int ret;
+
+ /* Ask the host to send along the list of child devices */
+ init_completion(&comp);
+ if (cmpxchg(&hbus->survey_event, NULL, &comp))
+ return -ENOTEMPTY;
+
+ memset(&message, 0, sizeof(message));
+ message.message_type = PCI_QUERY_BUS_RELATIONS;
+
+ ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
+ 0, VM_PKT_DATA_INBAND, 0);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&comp);
+ return 0;
+}
+
+/**
+ * hv_send_resources_allocated() - Report local resource choices
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * The host OS is expecting to be sent a request as a message
+ * which contains all the resources that the device will use.
+ * The response contains those same resources, "translated"
+ * which is to say, the values which should be used by the
+ * hardware, when it delivers an interrupt. (MMIO resources are
+ * used in local terms.) This is nice for Windows, and lines up
+ * with the FDO/PDO split, which doesn't exist in Linux. Linux
+ * is deeply expecting to scan an emulated PCI configuration
+ * space. So this message is sent here only to drive the state
+ * machine on the host forward.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_allocated(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_resources_assigned *res_assigned;
+ struct hv_pci_compl comp_pkt;
+ struct hv_pci_dev *hpdev;
+ struct pci_packet *pkt;
+ u32 wslot;
+ int ret;
+
+ pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ ret = 0;
+
+ for (wslot = 0; wslot < 256; wslot++) {
+ hpdev = get_pcichild_wslot(hbus, wslot);
+ if (!hpdev)
+ continue;
+
+ memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned));
+ init_completion(&comp_pkt.host_event);
+ pkt->completion_func = hv_pci_generic_compl;
+ pkt->compl_ctxt = &comp_pkt;
+ pkt->message.message_type = PCI_RESOURCES_ASSIGNED;
+ res_assigned = (struct pci_resources_assigned *)&pkt->message;
+ res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+
+ ret = vmbus_sendpacket(
+ hdev->channel, &pkt->message,
+ sizeof(*res_assigned),
+ (unsigned long)pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ break;
+
+ wait_for_completion(&comp_pkt.host_event);
+
+ if (comp_pkt.completion_status < 0) {
+ ret = -EPROTO;
+ dev_err(&hdev->device,
+ "resource allocated returned 0x%x",
+ comp_pkt.completion_status);
+ break;
+ }
+ }
+
+ kfree(pkt);
+ return ret;
+}
+
+/**
+ * hv_send_resources_released() - Report local resources
+ * released
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_send_resources_released(struct hv_device *hdev)
+{
+ struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+ struct pci_child_message pkt;
+ struct hv_pci_dev *hpdev;
+ u32 wslot;
+ int ret;
+
+ for (wslot = 0; wslot < 256; wslot++) {
+ hpdev = get_pcichild_wslot(hbus, wslot);
+ if (!hpdev)
+ continue;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.message_type = PCI_RESOURCES_RELEASED;
+ pkt.wslot.slot = hpdev->desc.win_slot.slot;
+
+ put_pcichild(hpdev, hv_pcidev_ref_by_slot);
+
+ ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
+ VM_PKT_DATA_INBAND, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void get_hvpcibus(struct hv_pcibus_device *hbus)
+{
+ atomic_inc(&hbus->remove_lock);
+}
+
+static void put_hvpcibus(struct hv_pcibus_device *hbus)
+{
+ if (atomic_dec_and_test(&hbus->remove_lock))
+ complete(&hbus->remove_event);
+}
+
+/**
+ * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ * @dev_id: Identifies the device itself
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_probe(struct hv_device *hdev,
+ const struct hv_vmbus_device_id *dev_id)
+{
+ struct hv_pcibus_device *hbus;
+ int ret;
+
+ hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
+ if (!hbus)
+ return -ENOMEM;
+
+ /*
+ * The PCI bus "domain" is what is called "segment" in ACPI and
+ * other specs. Pull it from the instance ID, to get something
+ * unique. Bytes 8 and 9 are what is used in Windows guests, so
+ * do the same thing for consistency. Note that, since this code
+ * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee
+ * that (1) the only domain in use for something that looks like
+ * a physical PCI bus (which is actually emulated by the
+ * hypervisor) is domain 0 and (2) there will be no overlap
+ * between domains derived from these instance IDs in the same
+ * VM.
+ */
+ hbus->sysdata.domain = hdev->dev_instance.b[9] |
+ hdev->dev_instance.b[8] << 8;
+
+ hbus->hdev = hdev;
+ atomic_inc(&hbus->remove_lock);
+ INIT_LIST_HEAD(&hbus->children);
+ INIT_LIST_HEAD(&hbus->dr_list);
+ INIT_LIST_HEAD(&hbus->resources_for_children);
+ spin_lock_init(&hbus->config_lock);
+ spin_lock_init(&hbus->device_list_lock);
+ sema_init(&hbus->enum_sem, 1);
+ init_completion(&hbus->remove_event);
+
+ ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ hv_pci_onchannelcallback, hbus);
+ if (ret)
+ goto free_bus;
+
+ hv_set_drvdata(hdev, hbus);
+
+ ret = hv_pci_protocol_negotiation(hdev);
+ if (ret)
+ goto close;
+
+ ret = hv_allocate_config_window(hbus);
+ if (ret)
+ goto close;
+
+ hbus->cfg_addr = ioremap(hbus->mem_config->start,
+ PCI_CONFIG_MMIO_LENGTH);
+ if (!hbus->cfg_addr) {
+ dev_err(&hdev->device,
+ "Unable to map a virtual address for config space\n");
+ ret = -ENOMEM;
+ goto free_config;
+ }
+
+ hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus);
+ if (!hbus->sysdata.fwnode) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ret = hv_pcie_init_irq_domain(hbus);
+ if (ret)
+ goto free_fwnode;
+
+ ret = hv_pci_query_relations(hdev);
+ if (ret)
+ goto free_irq_domain;
+
+ ret = hv_pci_enter_d0(hdev);
+ if (ret)
+ goto free_irq_domain;
+
+ ret = hv_pci_allocate_bridge_windows(hbus);
+ if (ret)
+ goto free_irq_domain;
+
+ ret = hv_send_resources_allocated(hdev);
+ if (ret)
+ goto free_windows;
+
+ prepopulate_bars(hbus);
+
+ hbus->state = hv_pcibus_probed;
+
+ ret = create_root_hv_pci_bus(hbus);
+ if (ret)
+ goto free_windows;
+
+ return 0;
+
+free_windows:
+ hv_pci_free_bridge_windows(hbus);
+free_irq_domain:
+ irq_domain_remove(hbus->irq_domain);
+free_fwnode:
+ irq_domain_free_fwnode(hbus->sysdata.fwnode);
+unmap:
+ iounmap(hbus->cfg_addr);
+free_config:
+ hv_free_config_window(hbus);
+close:
+ vmbus_close(hdev->channel);
+free_bus:
+ kfree(hbus);
+ return ret;
+}
+
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev: VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+ int ret;
+ struct hv_pcibus_device *hbus;
+ union {
+ struct pci_packet teardown_packet;
+ u8 buffer[0x100];
+ } pkt;
+ struct pci_bus_relations relations;
+ struct hv_pci_compl comp_pkt;
+
+ hbus = hv_get_drvdata(hdev);
+
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
+
+ memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
+ init_completion(&comp_pkt.host_event);
+ pkt.teardown_packet.completion_func = hv_pci_generic_compl;
+ pkt.teardown_packet.compl_ctxt = &comp_pkt;
+ pkt.teardown_packet.message.message_type = PCI_BUS_D0EXIT;
+
+ ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
+ sizeof(struct pci_message),
+ (unsigned long)&pkt.teardown_packet,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (!ret)
+ wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+
+ if (hbus->state == hv_pcibus_installed) {
+ /* Remove the bus from PCI's point of view. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(hbus->pci_bus);
+ pci_remove_root_bus(hbus->pci_bus);
+ pci_unlock_rescan_remove();
+ }
+
+ vmbus_close(hdev->channel);
+
+ /* Delete any children which might still exist. */
+ memset(&relations, 0, sizeof(relations));
+ hv_pci_devices_present(hbus, &relations);
+
+ iounmap(hbus->cfg_addr);
+ hv_free_config_window(hbus);
+ pci_free_resource_list(&hbus->resources_for_children);
+ hv_pci_free_bridge_windows(hbus);
+ irq_domain_remove(hbus->irq_domain);
+ irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ put_hvpcibus(hbus);
+ wait_for_completion(&hbus->remove_event);
+ kfree(hbus);
+ return 0;
+}
+
+static const struct hv_vmbus_device_id hv_pci_id_table[] = {
+ /* PCI Pass-through Class ID */
+ /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
+ { HV_PCIE_GUID, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
+
+static struct hv_driver hv_pci_drv = {
+ .name = "hv_pci",
+ .id_table = hv_pci_id_table,
+ .probe = hv_pci_probe,
+ .remove = hv_pci_remove,
+};
+
+static void __exit exit_hv_pci_drv(void)
+{
+ vmbus_driver_unregister(&hv_pci_drv);
+}
+
+static int __init init_hv_pci_drv(void)
+{
+ return vmbus_driver_register(&hv_pci_drv);
+}
+
+module_init(init_hv_pci_drv);
+module_exit(exit_hv_pci_drv);
+
+MODULE_DESCRIPTION("Hyper-V PCI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index fe600964fa50..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,13 +32,18 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
struct imx6_pcie {
- struct gpio_desc *reset_gpio;
+ int reset_gpio;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
void __iomem *mem_base;
+ u32 tx_deemph_gen1;
+ u32 tx_deemph_gen2_3p5db;
+ u32 tx_deemph_gen2_6db;
+ u32 tx_swing_full;
+ u32 tx_swing_low;
};
/* PCIe Root Complex registers (memory-mapped) */
@@ -202,6 +207,23 @@ static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
return 0;
}
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+ u32 tmp;
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+ tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+}
+
/* Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
@@ -287,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
- if (imx6_pcie->reset_gpio) {
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
msleep(100);
- gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
}
return 0;
@@ -317,32 +339,32 @@ static void imx6_pcie_init_phy(struct pcie_port *pp)
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx6_pcie->tx_deemph_gen1 << 0);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx6_pcie->tx_deemph_gen2_3p5db << 6);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx6_pcie->tx_deemph_gen2_6db << 12);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx6_pcie->tx_swing_full << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx6_pcie->tx_swing_low << 25);
}
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
{
- unsigned int retries;
-
- for (retries = 0; retries < 200; retries++) {
- if (dw_pcie_link_up(pp))
- return 0;
- usleep_range(100, 1000);
- }
+ /* check if the link is up or not */
+ if (!dw_pcie_wait_for_link(pp))
+ return 0;
- dev_err(pp->dev, "phy link never came up\n");
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
- return -EINVAL;
+ return -ETIMEDOUT;
}
static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp)
@@ -390,8 +412,10 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(pp);
- if (ret)
- return ret;
+ if (ret) {
+ dev_info(pp->dev, "Link never came up\n");
+ goto err_reset_phy;
+ }
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
@@ -410,19 +434,28 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
ret = imx6_pcie_wait_for_speed_change(pp);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
- return ret;
+ goto err_reset_phy;
}
/* Make sure link training is finished as well! */
ret = imx6_pcie_wait_for_link(pp);
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
- return ret;
+ goto err_reset_phy;
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+
return 0;
+
+err_reset_phy:
+ dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ imx6_pcie_reset_phy(pp);
+
+ return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
@@ -441,81 +474,10 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
dw_pcie_msi_init(pp);
}
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
-{
- u32 tmp;
-
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
- tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
- PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-
- usleep_range(2000, 3000);
-
- pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
- tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
- PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-}
-
static int imx6_pcie_link_up(struct pcie_port *pp)
{
- u32 rc, debug_r0, rx_valid;
- int count = 5;
-
- /*
- * Test if the PHY reports that the link is up and also that the LTSSM
- * training finished. There are three possible states of the link when
- * this code is called:
- * 1) The link is DOWN (unlikely)
- * The link didn't come up yet for some reason. This usually means
- * we have a real problem somewhere. Reset the PHY and exit. This
- * state calls for inspection of the DEBUG registers.
- * 2) The link is UP, but still in LTSSM training
- * Wait for the training to finish, which should take a very short
- * time. If the training does not finish, we have a problem and we
- * need to inspect the DEBUG registers. If the training does finish,
- * the link is up and operating correctly.
- * 3) The link is UP and no longer in LTSSM training
- * The link is up and operating correctly.
- */
- while (1) {
- rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
- if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
- break;
- if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
- return 1;
- if (!count--)
- break;
- dev_dbg(pp->dev, "Link is up, but still in training\n");
- /*
- * Wait a little bit, then re-check if the link finished
- * the training.
- */
- usleep_range(1000, 2000);
- }
- /*
- * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
- * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
- * If (MAC/LTSSM.state == Recovery.RcvrLock)
- * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
- * to gen2 is stuck
- */
- pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
- debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
-
- if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID)
- return 0;
-
- if ((debug_r0 & 0x3f) != 0x0d)
- return 0;
-
- dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
- dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
-
- imx6_pcie_reset_phy(pp);
-
- return 0;
+ return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+ PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
static struct pcie_host_ops imx6_pcie_host_ops = {
@@ -561,7 +523,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
+ struct device_node *node = pdev->dev.of_node;
int ret;
imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
@@ -581,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
@@ -614,6 +585,27 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
+ /* Grab PCIe PHY Tx Settings */
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
+ &imx6_pcie->tx_deemph_gen1))
+ imx6_pcie->tx_deemph_gen1 = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
+ &imx6_pcie->tx_deemph_gen2_3p5db))
+ imx6_pcie->tx_deemph_gen2_3p5db = 0;
+
+ if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
+ &imx6_pcie->tx_deemph_gen2_6db))
+ imx6_pcie->tx_deemph_gen2_6db = 20;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-full",
+ &imx6_pcie->tx_swing_full))
+ imx6_pcie->tx_swing_full = 127;
+
+ if (of_property_read_u32(node, "fsl,tx-swing-low",
+ &imx6_pcie->tx_swing_low))
+ imx6_pcie->tx_swing_low = 127;
+
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 0aa81bd3de12..b71f55bb0315 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -97,17 +97,15 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return 0;
}
- ks_dw_pcie_initiate_link_train(ks_pcie);
/* check if the link is up or not */
- for (retries = 0; retries < 200; retries++) {
- if (dw_pcie_link_up(pp))
- return 0;
- usleep_range(100, 1000);
+ for (retries = 0; retries < 5; retries++) {
ks_dw_pcie_initiate_link_train(ks_pcie);
+ if (!dw_pcie_wait_for_link(pp))
+ return 0;
}
dev_err(pp->dev, "phy link never came up\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
@@ -359,6 +357,9 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
+ if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
+ return PTR_ERR(phy);
+
if (!IS_ERR_OR_NULL(phy)) {
ret = phy_init(phy);
if (ret < 0)
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index f39961bcf7aa..a21e229d95e0 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -208,6 +208,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
{ },
};
MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 30323114c53c..68d1f41b3cbf 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -281,6 +281,11 @@ struct tegra_pcie {
struct resource prefetch;
struct resource busn;
+ struct {
+ resource_size_t mem;
+ resource_size_t io;
+ } offset;
+
struct clk *pex_clk;
struct clk *afi_clk;
struct clk *pll_e;
@@ -295,7 +300,6 @@ struct tegra_pcie {
struct tegra_msi msi;
struct list_head ports;
- unsigned int num_ports;
u32 xbar_config;
struct regulator_bulk_data *supplies;
@@ -426,31 +430,38 @@ free:
return ERR_PTR(err);
}
-/*
- * Look up a virtual address mapping for the specified bus number. If no such
- * mapping exists, try to create one.
- */
-static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
- unsigned int busnr)
+static int tegra_pcie_add_bus(struct pci_bus *bus)
{
- struct tegra_pcie_bus *bus;
+ struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct tegra_pcie_bus *b;
- list_for_each_entry(bus, &pcie->buses, list)
- if (bus->nr == busnr)
- return (void __iomem *)bus->area->addr;
+ b = tegra_pcie_bus_alloc(pcie, bus->number);
+ if (IS_ERR(b))
+ return PTR_ERR(b);
- bus = tegra_pcie_bus_alloc(pcie, busnr);
- if (IS_ERR(bus))
- return NULL;
+ list_add_tail(&b->list, &pcie->buses);
- list_add_tail(&bus->list, &pcie->buses);
+ return 0;
+}
- return (void __iomem *)bus->area->addr;
+static void tegra_pcie_remove_bus(struct pci_bus *child)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
+ struct tegra_pcie_bus *bus, *tmp;
+
+ list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
+ if (bus->nr == child->number) {
+ vunmap(bus->area->addr);
+ list_del(&bus->list);
+ kfree(bus);
+ break;
+ }
+ }
}
-static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
- unsigned int devfn,
- int where)
+static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
{
struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
void __iomem *addr = NULL;
@@ -466,7 +477,12 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
}
}
} else {
- addr = tegra_pcie_bus_map(pcie, bus->number);
+ struct tegra_pcie_bus *b;
+
+ list_for_each_entry(b, &pcie->buses, list)
+ if (b->nr == bus->number)
+ addr = (void __iomem *)b->area->addr;
+
if (!addr) {
dev_err(pcie->dev,
"failed to map cfg. space for bus %u\n",
@@ -481,7 +497,9 @@ static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
}
static struct pci_ops tegra_pcie_ops = {
- .map_bus = tegra_pcie_conf_address,
+ .add_bus = tegra_pcie_add_bus,
+ .remove_bus = tegra_pcie_remove_bus,
+ .map_bus = tegra_pcie_map_bus,
.read = pci_generic_config_read32,
.write = pci_generic_config_write32,
};
@@ -598,6 +616,17 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
struct tegra_pcie *pcie = sys_to_pcie(sys);
int err;
+ sys->mem_offset = pcie->offset.mem;
+ sys->io_offset = pcie->offset.io;
+
+ err = devm_request_resource(pcie->dev, &pcie->all, &pcie->io);
+ if (err < 0)
+ return err;
+
+ err = devm_request_resource(pcie->dev, &ioport_resource, &pcie->pio);
+ if (err < 0)
+ return err;
+
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
if (err < 0)
return err;
@@ -606,6 +635,7 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err)
return err;
+ pci_add_resource_offset(&sys->resources, &pcie->pio, sys->io_offset);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
@@ -741,7 +771,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_FPCI_BAR5);
/* map all upstream transactions as uncached */
- afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
@@ -1601,6 +1631,9 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
switch (res.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
+ /* Track the bus -> CPU I/O mapping offset. */
+ pcie->offset.io = res.start - range.pci_addr;
+
memcpy(&pcie->pio, &res, sizeof(res));
pcie->pio.name = np->full_name;
@@ -1621,6 +1654,14 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
break;
case IORESOURCE_MEM:
+ /*
+ * Track the bus -> CPU memory mapping offset. This
+ * assumes that the prefetchable and non-prefetchable
+ * regions will be the last of type IORESOURCE_MEM in
+ * the ranges property.
+ * */
+ pcie->offset.mem = res.start - range.pci_addr;
+
if (res.flags & IORESOURCE_PREFETCH) {
memcpy(&pcie->prefetch, &res, sizeof(res));
pcie->prefetch.name = "prefetchable";
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
new file mode 100644
index 000000000000..d71935cb2678
--- /dev/null
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -0,0 +1,403 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2015, 2016 Cavium, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/of_pci.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pci-host-common.h"
+
+/* Mapping is standard ECAM */
+static void __iomem *thunder_ecam_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct gen_pci *pci = bus->sysdata;
+ resource_size_t idx = bus->number - pci->cfg.bus_range->start;
+
+ return pci->cfg.win[idx] + ((devfn << 12) | where);
+}
+
+static void set_val(u32 v, int where, int size, u32 *val)
+{
+ int shift = (where & 3) * 8;
+
+ pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v);
+ v >>= shift;
+ if (size == 1)
+ v &= 0xff;
+ else if (size == 2)
+ v &= 0xffff;
+ *val = v;
+}
+
+static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ void __iomem *addr;
+ u32 v;
+
+ /* Entries are 16-byte aligned; bits[2,3] select word in entry */
+ int where_a = where & 0xc;
+
+ if (where_a == 0) {
+ set_val(e0, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0x4) {
+ addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ v = readl(addr);
+ v &= ~0xf;
+ v |= 2; /* EA entry-1. Base-L */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0x8) {
+ u32 barl_orig;
+ u32 barl_rb;
+
+ addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ barl_orig = readl(addr + 0);
+ writel(0xffffffff, addr + 0);
+ barl_rb = readl(addr + 0);
+ writel(barl_orig, addr + 0);
+ /* zeros in unsettable bits */
+ v = ~barl_rb & ~3;
+ v |= 0xc; /* EA entry-2. Offset-L */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc) {
+ addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ v = readl(addr); /* EA entry-3. Base-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
+static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct gen_pci *pci = bus->sysdata;
+ int where_a = where & ~3;
+ void __iomem *addr;
+ u32 node_bits;
+ u32 v;
+
+ /* EA Base[63:32] may be missing some bits ... */
+ switch (where_a) {
+ case 0xa8:
+ case 0xbc:
+ case 0xd0:
+ case 0xe4:
+ break;
+ default:
+ return pci_generic_config_read(bus, devfn, where, size, val);
+ }
+
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ v = readl(addr);
+
+ /*
+ * Bit 44 of the 64-bit Base must match the same bit in
+ * the config space access window. Since we are working with
+ * the high-order 32 bits, shift everything down by 32 bits.
+ */
+ node_bits = (pci->cfg.res.start >> 32) & (1 << 12);
+
+ v |= node_bits;
+ set_val(v, where, size, val);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 v;
+ u32 vendor_device;
+ u32 class_rev;
+ void __iomem *addr;
+ int cfg_type;
+ int where_a = where & ~3;
+
+ addr = bus->ops->map_bus(bus, devfn, 0xc);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ v = readl(addr);
+
+ /* Check for non type-00 header */
+ cfg_type = (v >> 16) & 0x7f;
+
+ addr = bus->ops->map_bus(bus, devfn, 8);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ class_rev = readl(addr);
+ if (class_rev == 0xffffffff)
+ goto no_emulation;
+
+ if ((class_rev & 0xff) >= 8) {
+ /* Pass-2 handling */
+ if (cfg_type)
+ goto no_emulation;
+ return thunder_ecam_p2_config_read(bus, devfn, where,
+ size, val);
+ }
+
+ /*
+ * All BARs have fixed addresses specified by the EA
+ * capability; they must return zero on read.
+ */
+ if (cfg_type == 0 &&
+ ((where >= 0x10 && where < 0x2c) ||
+ (where >= 0x1a4 && where < 0x1bc))) {
+ /* BAR or SR-IOV BAR */
+ *val = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = bus->ops->map_bus(bus, devfn, 0);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ vendor_device = readl(addr);
+ if (vendor_device == 0xffffffff)
+ goto no_emulation;
+
+ pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n",
+ vendor_device & 0xffff, vendor_device >> 16, class_rev,
+ (unsigned) where, devfn);
+
+ /* Check for non type-00 header */
+ if (cfg_type == 0) {
+ bool has_msix;
+ bool is_nic = (vendor_device == 0xa01e177d);
+ bool is_tns = (vendor_device == 0xa01f177d);
+
+ addr = bus->ops->map_bus(bus, devfn, 0x70);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ /* E_CAP */
+ v = readl(addr);
+ has_msix = (v & 0xff00) != 0;
+
+ if (!has_msix && where_a == 0x70) {
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xb0) {
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ v = readl(addr);
+ if (v & 0xff00)
+ pr_err("Bad MSIX cap header: %08x\n", v);
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xbc) {
+ if (is_nic)
+ v = 0x40014; /* EA last in chain, 4 entries */
+ else if (is_tns)
+ v = 0x30014; /* EA last in chain, 3 entries */
+ else if (has_msix)
+ v = 0x20014; /* EA last in chain, 2 entries */
+ else
+ v = 0x10014; /* EA last in chain, 1 entry */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a >= 0xc0 && where_a < 0xd0)
+ /* EA entry-0. PP=0, BAR0 Size:3 */
+ return handle_ea_bar(0x80ff0003,
+ 0x10, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xd0 && where_a < 0xe0 && has_msix)
+ /* EA entry-1. PP=0, BAR4 Size:3 */
+ return handle_ea_bar(0x80ff0043,
+ 0x20, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xe0 && where_a < 0xf0 && is_tns)
+ /* EA entry-2. PP=0, BAR2, Size:3 */
+ return handle_ea_bar(0x80ff0023,
+ 0x18, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xe0 && where_a < 0xf0 && is_nic)
+ /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */
+ return handle_ea_bar(0x80ff0493,
+ 0x1a4, bus, devfn, where,
+ size, val);
+ if (where_a >= 0xf0 && where_a < 0x100 && is_nic)
+ /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */
+ return handle_ea_bar(0x80ff04d3,
+ 0x1b4, bus, devfn, where,
+ size, val);
+ } else if (cfg_type == 1) {
+ bool is_rsl_bridge = devfn == 0x08;
+ bool is_rad_bridge = devfn == 0xa0;
+ bool is_zip_bridge = devfn == 0xa8;
+ bool is_dfa_bridge = devfn == 0xb0;
+ bool is_nic_bridge = devfn == 0x10;
+
+ if (where_a == 0x70) {
+ addr = bus->ops->map_bus(bus, devfn, where_a);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+ v = readl(addr);
+ if (v & 0xff00)
+ pr_err("Bad PCIe cap header: %08x\n", v);
+ v |= 0xbc00; /* next capability is EA at 0xbc */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xbc) {
+ if (is_nic_bridge)
+ v = 0x10014; /* EA last in chain, 1 entry */
+ else
+ v = 0x00014; /* EA last in chain, no entries */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc0) {
+ if (is_rsl_bridge || is_nic_bridge)
+ v = 0x0101; /* subordinate:secondary = 1:1 */
+ else if (is_rad_bridge)
+ v = 0x0202; /* subordinate:secondary = 2:2 */
+ else if (is_zip_bridge)
+ v = 0x0303; /* subordinate:secondary = 3:3 */
+ else if (is_dfa_bridge)
+ v = 0x0404; /* subordinate:secondary = 4:4 */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc4 && is_nic_bridge) {
+ /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */
+ v = 0x80ff0564;
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xc8 && is_nic_bridge) {
+ v = 0x00000002; /* Base-L 64-bit */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xcc && is_nic_bridge) {
+ v = 0xfffffffe; /* MaxOffset-L 64-bit */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xd0 && is_nic_bridge) {
+ v = 0x00008430; /* NIC Base-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ if (where_a == 0xd4 && is_nic_bridge) {
+ v = 0x0000000f; /* MaxOffset-H */
+ set_val(v, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ }
+no_emulation:
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ /*
+ * All BARs have fixed addresses; ignore BAR writes so they
+ * don't get corrupted.
+ */
+ if ((where >= 0x10 && where < 0x2c) ||
+ (where >= 0x1a4 && where < 0x1bc))
+ /* BAR or SR-IOV BAR */
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct gen_pci_cfg_bus_ops thunder_ecam_bus_ops = {
+ .bus_shift = 20,
+ .ops = {
+ .map_bus = thunder_ecam_map_bus,
+ .read = thunder_ecam_config_read,
+ .write = thunder_ecam_config_write,
+ }
+};
+
+static const struct of_device_id thunder_ecam_of_match[] = {
+ { .compatible = "cavium,pci-host-thunder-ecam",
+ .data = &thunder_ecam_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
+
+static int thunder_ecam_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+
+ if (!pci)
+ return -ENOMEM;
+
+ of_id = of_match_node(thunder_ecam_of_match, dev->of_node);
+ pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+
+ return pci_host_common_probe(pdev, pci);
+}
+
+static struct platform_driver thunder_ecam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = thunder_ecam_of_match,
+ },
+ .probe = thunder_ecam_probe,
+};
+module_platform_driver(thunder_ecam_driver);
+
+MODULE_DESCRIPTION("Thunder ECAM PCI host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
new file mode 100644
index 000000000000..cabb92a514ac
--- /dev/null
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -0,0 +1,346 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2015 - 2016 Cavium, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include "pci-host-common.h"
+
+#define PEM_CFG_WR 0x28
+#define PEM_CFG_RD 0x30
+
+struct thunder_pem_pci {
+ struct gen_pci gen_pci;
+ u32 ea_entry[3];
+ void __iomem *pem_reg_base;
+};
+
+static void __iomem *thunder_pem_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct gen_pci *pci = bus->sysdata;
+ resource_size_t idx = bus->number - pci->cfg.bus_range->start;
+
+ return pci->cfg.win[idx] + ((devfn << 16) | where);
+}
+
+static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u64 read_val;
+ struct thunder_pem_pci *pem_pci;
+ struct gen_pci *pci = bus->sysdata;
+
+ pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
+
+ if (devfn != 0 || where >= 2048) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /*
+ * 32-bit accesses only. Write the address to the low order
+ * bits of PEM_CFG_RD, then trigger the read by reading back.
+ * The config data lands in the upper 32-bits of PEM_CFG_RD.
+ */
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+
+ /*
+ * The config space contains some garbage, fix it up. Also
+ * synthesize an EA capability for the BAR used by MSI-X.
+ */
+ switch (where & ~3) {
+ case 0x40:
+ read_val &= 0xffff00ff;
+ read_val |= 0x00007000; /* Skip MSI CAP */
+ break;
+ case 0x70: /* Express Cap */
+ /* PME interrupt on vector 2*/
+ read_val |= (2u << 25);
+ break;
+ case 0xb0: /* MSI-X Cap */
+ /* TableSize=4, Next Cap is EA */
+ read_val &= 0xc00000ff;
+ read_val |= 0x0003bc00;
+ break;
+ case 0xb4:
+ /* Table offset=0, BIR=0 */
+ read_val = 0x00000000;
+ break;
+ case 0xb8:
+ /* BPA offset=0xf0000, BIR=0 */
+ read_val = 0x000f0000;
+ break;
+ case 0xbc:
+ /* EA, 1 entry, no next Cap */
+ read_val = 0x00010014;
+ break;
+ case 0xc0:
+ /* DW2 for type-1 */
+ read_val = 0x00000000;
+ break;
+ case 0xc4:
+ /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */
+ read_val = 0x80ff0003;
+ break;
+ case 0xc8:
+ read_val = pem_pci->ea_entry[0];
+ break;
+ case 0xcc:
+ read_val = pem_pci->ea_entry[1];
+ break;
+ case 0xd0:
+ read_val = pem_pci->ea_entry[2];
+ break;
+ default:
+ break;
+ }
+ read_val >>= (8 * (where & 3));
+ switch (size) {
+ case 1:
+ read_val &= 0xff;
+ break;
+ case 2:
+ read_val &= 0xffff;
+ break;
+ default:
+ break;
+ }
+ *val = read_val;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct gen_pci *pci = bus->sysdata;
+
+ if (bus->number < pci->cfg.bus_range->start ||
+ bus->number > pci->cfg.bus_range->end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == pci->cfg.bus_range->start)
+ return thunder_pem_bridge_read(bus, devfn, where, size, val);
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+/*
+ * Some of the w1c_bits below also include read-only or non-writable
+ * reserved bits, this makes the code simpler and is OK as the bits
+ * are not affected by writing zeros to them.
+ */
+static u32 thunder_pem_bridge_w1c_bits(int where)
+{
+ u32 w1c_bits = 0;
+
+ switch (where & ~3) {
+ case 0x04: /* Command/Status */
+ case 0x1c: /* Base and I/O Limit/Secondary Status */
+ w1c_bits = 0xff000000;
+ break;
+ case 0x44: /* Power Management Control and Status */
+ w1c_bits = 0xfffffe00;
+ break;
+ case 0x78: /* Device Control/Device Status */
+ case 0x80: /* Link Control/Link Status */
+ case 0x88: /* Slot Control/Slot Status */
+ case 0x90: /* Root Status */
+ case 0xa0: /* Link Control 2 Registers/Link Status 2 */
+ w1c_bits = 0xffff0000;
+ break;
+ case 0x104: /* Uncorrectable Error Status */
+ case 0x110: /* Correctable Error Status */
+ case 0x130: /* Error Status */
+ case 0x160: /* Link Control 4 */
+ w1c_bits = 0xffffffff;
+ break;
+ default:
+ break;
+ }
+ return w1c_bits;
+}
+
+static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct gen_pci *pci = bus->sysdata;
+ struct thunder_pem_pci *pem_pci;
+ u64 write_val, read_val;
+ u32 mask = 0;
+
+ pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
+
+ if (devfn != 0 || where >= 2048)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * 32-bit accesses only. If the write is for a size smaller
+ * than 32-bits, we must first read the 32-bit value and merge
+ * in the desired bits and then write the whole 32-bits back
+ * out.
+ */
+ switch (size) {
+ case 1:
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ case 2:
+ read_val = where & ~3ull;
+ writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
+ read_val >>= 32;
+ mask = ~(0xffff << (8 * (where & 3)));
+ read_val &= mask;
+ val = (val & 0xffff) << (8 * (where & 3));
+ val |= (u32)read_val;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * By expanding the write width to 32 bits, we may
+ * inadvertently hit some W1C bits that were not intended to
+ * be written. Calculate the mask that must be applied to the
+ * data to be written to avoid these cases.
+ */
+ if (mask) {
+ u32 w1c_bits = thunder_pem_bridge_w1c_bits(where);
+
+ if (w1c_bits) {
+ mask &= w1c_bits;
+ val &= ~mask;
+ }
+ }
+
+ /*
+ * Low order bits are the config address, the high order 32
+ * bits are the data to be written.
+ */
+ write_val = where & ~3ull;
+ write_val |= (((u64)val) << 32);
+ writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct gen_pci *pci = bus->sysdata;
+
+ if (bus->number < pci->cfg.bus_range->start ||
+ bus->number > pci->cfg.bus_range->end)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * The first device on the bus is the PEM PCIe bridge.
+ * Special case its config access.
+ */
+ if (bus->number == pci->cfg.bus_range->start)
+ return thunder_pem_bridge_write(bus, devfn, where, size, val);
+
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct gen_pci_cfg_bus_ops thunder_pem_bus_ops = {
+ .bus_shift = 24,
+ .ops = {
+ .map_bus = thunder_pem_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+static const struct of_device_id thunder_pem_of_match[] = {
+ { .compatible = "cavium,pci-host-thunder-pem",
+ .data = &thunder_pem_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
+
+static int thunder_pem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+ resource_size_t bar4_start;
+ struct resource *res_pem;
+ struct thunder_pem_pci *pem_pci;
+
+ pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
+ if (!pem_pci)
+ return -ENOMEM;
+
+ of_id = of_match_node(thunder_pem_of_match, dev->of_node);
+ pem_pci->gen_pci.cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+
+ /*
+ * The second register range is the PEM bridge to the PCIe
+ * bus. It has a different config access method than those
+ * devices behind the bridge.
+ */
+ res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_pem) {
+ dev_err(dev, "missing \"reg[1]\"property\n");
+ return -EINVAL;
+ }
+
+ pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
+ if (!pem_pci->pem_reg_base)
+ return -ENOMEM;
+
+ /*
+ * The MSI-X BAR for the PEM and AER interrupts is located at
+ * a fixed offset from the PEM register base. Generate a
+ * fragment of the synthesized Enhanced Allocation capability
+ * structure here for the BAR.
+ */
+ bar4_start = res_pem->start + 0xf00000;
+ pem_pci->ea_entry[0] = (u32)bar4_start | 2;
+ pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
+ pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
+
+ return pci_host_common_probe(pdev, &pem_pci->gen_pci);
+}
+
+static struct platform_driver thunder_pem_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = thunder_pem_of_match,
+ },
+ .probe = thunder_pem_probe,
+};
+module_platform_driver(thunder_pem_driver);
+
+MODULE_DESCRIPTION("Thunder PEM PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 99da549d5d06..dbac6fb3f0bd 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -40,6 +40,7 @@
#define P2A_INT_ENABLE 0x3070
#define P2A_INT_ENA_ALL 0xf
#define RP_LTSSM 0x3c64
+#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
/* TLP configuration type 0 and 1 */
@@ -140,7 +141,7 @@ static void tlp_write_tx(struct altera_pcie *pcie,
static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
{
- return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0);
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
new file mode 100644
index 000000000000..b3500994d08a
--- /dev/null
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -0,0 +1,138 @@
+/*
+ * PCIe RC driver for Synopsys DesignWare Core
+ *
+ * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Authors: Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+struct dw_plat_pcie {
+ void __iomem *mem_base;
+ struct pcie_port pp;
+};
+
+static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void dw_plat_pcie_host_init(struct pcie_port *pp)
+{
+ dw_pcie_setup_rc(pp);
+ dw_pcie_wait_for_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+}
+
+static struct pcie_host_ops dw_plat_pcie_host_ops = {
+ .host_init = dw_plat_pcie_host_init,
+};
+
+static int dw_plat_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (pp->msi_irq < 0)
+ return pp->msi_irq;
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ dw_plat_pcie_msi_irq_handler,
+ IRQF_SHARED, "dw-plat-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI IRQ\n");
+ return ret;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &dw_plat_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_plat_pcie_probe(struct platform_device *pdev)
+{
+ struct dw_plat_pcie *dw_plat_pcie;
+ struct pcie_port *pp;
+ struct resource *res; /* Resource from DT */
+ int ret;
+
+ dw_plat_pcie = devm_kzalloc(&pdev->dev, sizeof(*dw_plat_pcie),
+ GFP_KERNEL);
+ if (!dw_plat_pcie)
+ return -ENOMEM;
+
+ pp = &dw_plat_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ dw_plat_pcie->mem_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dw_plat_pcie->mem_base))
+ return PTR_ERR(dw_plat_pcie->mem_base);
+
+ pp->dbi_base = dw_plat_pcie->mem_base;
+
+ ret = dw_plat_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, dw_plat_pcie);
+ return 0;
+}
+
+static const struct of_device_id dw_plat_pcie_of_match[] = {
+ { .compatible = "snps,dw-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_plat_pcie_of_match);
+
+static struct platform_driver dw_plat_pcie_driver = {
+ .driver = {
+ .name = "dw-pcie",
+ .of_match_table = dw_plat_pcie_of_match,
+ },
+ .probe = dw_plat_pcie_probe,
+};
+
+module_platform_driver(dw_plat_pcie_driver);
+
+MODULE_AUTHOR("Joao Pinto <Joao.Pinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys PCIe host controller glue platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 21716827847a..a4cccd356304 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -22,6 +22,7 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
#include <linux/types.h>
+#include <linux/delay.h>
#include "pcie-designware.h"
@@ -69,6 +70,11 @@
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
+/* PCIe Port Logic registers */
+#define PLR_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
+#define PCIE_PHY_DEBUG_R1_LINK_UP 0x00000010
+
static struct pci_ops dw_pcie_ops;
int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val)
@@ -380,12 +386,33 @@ static struct msi_controller dw_pcie_msi_chip = {
.teardown_irq = dw_msi_teardown_irq,
};
+int dw_pcie_wait_for_link(struct pcie_port *pp)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (dw_pcie_link_up(pp)) {
+ dev_info(pp->dev, "link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(pp->dev, "phy link never came up\n");
+
+ return -ETIMEDOUT;
+}
+
int dw_pcie_link_up(struct pcie_port *pp)
{
+ u32 val;
+
if (pp->ops->link_up)
return pp->ops->link_up(pp);
- return 0;
+ val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ return val & PCIE_PHY_DEBUG_R1_LINK_UP;
}
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
@@ -517,6 +544,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pp->ops->host_init)
pp->ops->host_init(pp);
+ /*
+ * If the platform provides ->rd_other_conf, it means the platform
+ * uses its own address translation component rather than ATU, so
+ * we should not program the ATU here.
+ */
if (!pp->ops->rd_other_conf)
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_MEM, pp->mem_base,
@@ -551,13 +583,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index 2356d29e8527..f437f9b5be04 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -22,6 +22,11 @@
#define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
@@ -76,6 +81,7 @@ int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
+int dw_pcie_wait_for_link(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index e845fba19632..f2f90c50f75d 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -116,8 +116,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
{
- struct device *dev = pcie->dev;
- unsigned int retries = 0;
u32 val;
if (dw_pcie_link_up(&pcie->pp))
@@ -128,15 +126,7 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
- do {
- if (dw_pcie_link_up(&pcie->pp))
- return 0;
- usleep_range(250, 1000);
- } while (retries < 200);
-
- dev_warn(dev, "phy link never came up\n");
-
- return -ETIMEDOUT;
+ return dw_pcie_wait_for_link(&pcie->pp);
}
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 4edb5181f4e2..35092188039b 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -390,9 +390,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
rcar_pcie_setup(&res, pcie);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
if (IS_ENABLED(CONFIG_PCI_MSI))
bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
@@ -408,13 +406,11 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index a6cd8233e8c0..a4060b85ab23 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -13,7 +13,6 @@
*/
#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -149,7 +148,6 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
- unsigned int retries;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link already up\n");
@@ -200,17 +198,7 @@ static int spear13xx_pcie_establish_link(struct pcie_port *pp)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
- /* check if the link is up or not */
- for (retries = 0; retries < 10; retries++) {
- if (dw_pcie_link_up(pp)) {
- dev_info(pp->dev, "link up\n");
- return 0;
- }
- mdelay(100);
- }
-
- dev_err(pp->dev, "link Fail\n");
- return -EINVAL;
+ return dw_pcie_wait_for_link(pp);
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
new file mode 100644
index 000000000000..5139e6443bbd
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -0,0 +1,881 @@
+/*
+ * PCIe host controller driver for NWL PCIe Bridge
+ * Based on pcie-xilinx.c, pci-tegra.c
+ *
+ * (C) Copyright 2014 - 2015, Xilinx, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/irqchip/chained_irq.h>
+
+/* Bridge core config registers */
+#define BRCFG_PCIE_RX0 0x00000000
+#define BRCFG_INTERRUPT 0x00000010
+#define BRCFG_PCIE_RX_MSG_FILTER 0x00000020
+
+/* Egress - Bridge translation registers */
+#define E_BREG_CAPABILITIES 0x00000200
+#define E_BREG_CONTROL 0x00000208
+#define E_BREG_BASE_LO 0x00000210
+#define E_BREG_BASE_HI 0x00000214
+#define E_ECAM_CAPABILITIES 0x00000220
+#define E_ECAM_CONTROL 0x00000228
+#define E_ECAM_BASE_LO 0x00000230
+#define E_ECAM_BASE_HI 0x00000234
+
+/* Ingress - address translations */
+#define I_MSII_CAPABILITIES 0x00000300
+#define I_MSII_CONTROL 0x00000308
+#define I_MSII_BASE_LO 0x00000310
+#define I_MSII_BASE_HI 0x00000314
+
+#define I_ISUB_CONTROL 0x000003E8
+#define SET_ISUB_CONTROL BIT(0)
+/* Rxed msg fifo - Interrupt status registers */
+#define MSGF_MISC_STATUS 0x00000400
+#define MSGF_MISC_MASK 0x00000404
+#define MSGF_LEG_STATUS 0x00000420
+#define MSGF_LEG_MASK 0x00000424
+#define MSGF_MSI_STATUS_LO 0x00000440
+#define MSGF_MSI_STATUS_HI 0x00000444
+#define MSGF_MSI_MASK_LO 0x00000448
+#define MSGF_MSI_MASK_HI 0x0000044C
+
+/* Msg filter mask bits */
+#define CFG_ENABLE_PM_MSG_FWD BIT(1)
+#define CFG_ENABLE_INT_MSG_FWD BIT(2)
+#define CFG_ENABLE_ERR_MSG_FWD BIT(3)
+#define CFG_ENABLE_SLT_MSG_FWD BIT(5)
+#define CFG_ENABLE_VEN_MSG_FWD BIT(7)
+#define CFG_ENABLE_OTH_MSG_FWD BIT(13)
+#define CFG_ENABLE_VEN_MSG_EN BIT(14)
+#define CFG_ENABLE_VEN_MSG_VEN_INV BIT(15)
+#define CFG_ENABLE_VEN_MSG_VEN_ID GENMASK(31, 16)
+#define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \
+ CFG_ENABLE_INT_MSG_FWD | \
+ CFG_ENABLE_ERR_MSG_FWD | \
+ CFG_ENABLE_SLT_MSG_FWD | \
+ CFG_ENABLE_VEN_MSG_FWD | \
+ CFG_ENABLE_OTH_MSG_FWD | \
+ CFG_ENABLE_VEN_MSG_EN | \
+ CFG_ENABLE_VEN_MSG_VEN_INV | \
+ CFG_ENABLE_VEN_MSG_VEN_ID)
+
+/* Misc interrupt status mask bits */
+#define MSGF_MISC_SR_RXMSG_AVAIL BIT(0)
+#define MSGF_MISC_SR_RXMSG_OVER BIT(1)
+#define MSGF_MISC_SR_SLAVE_ERR BIT(4)
+#define MSGF_MISC_SR_MASTER_ERR BIT(5)
+#define MSGF_MISC_SR_I_ADDR_ERR BIT(6)
+#define MSGF_MISC_SR_E_ADDR_ERR BIT(7)
+#define MSGF_MISC_SR_UR_DETECT BIT(20)
+
+#define MSGF_MISC_SR_PCIE_CORE GENMASK(18, 16)
+#define MSGF_MISC_SR_PCIE_CORE_ERR GENMASK(31, 22)
+
+#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
+ MSGF_MISC_SR_RXMSG_OVER | \
+ MSGF_MISC_SR_SLAVE_ERR | \
+ MSGF_MISC_SR_MASTER_ERR | \
+ MSGF_MISC_SR_I_ADDR_ERR | \
+ MSGF_MISC_SR_E_ADDR_ERR | \
+ MSGF_MISC_SR_UR_DETECT | \
+ MSGF_MISC_SR_PCIE_CORE | \
+ MSGF_MISC_SR_PCIE_CORE_ERR)
+
+/* Legacy interrupt status mask bits */
+#define MSGF_LEG_SR_INTA BIT(0)
+#define MSGF_LEG_SR_INTB BIT(1)
+#define MSGF_LEG_SR_INTC BIT(2)
+#define MSGF_LEG_SR_INTD BIT(3)
+#define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \
+ MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD)
+
+/* MSI interrupt status mask bits */
+#define MSGF_MSI_SR_LO_MASK BIT(0)
+#define MSGF_MSI_SR_HI_MASK BIT(0)
+
+#define MSII_PRESENT BIT(0)
+#define MSII_ENABLE BIT(0)
+#define MSII_STATUS_ENABLE BIT(15)
+
+/* Bridge config interrupt mask */
+#define BRCFG_INTERRUPT_MASK BIT(0)
+#define BREG_PRESENT BIT(0)
+#define BREG_ENABLE BIT(0)
+#define BREG_ENABLE_FORCE BIT(1)
+
+/* E_ECAM status mask bits */
+#define E_ECAM_PRESENT BIT(0)
+#define E_ECAM_CR_ENABLE BIT(0)
+#define E_ECAM_SIZE_LOC GENMASK(20, 16)
+#define E_ECAM_SIZE_SHIFT 16
+#define ECAM_BUS_LOC_SHIFT 20
+#define ECAM_DEV_LOC_SHIFT 12
+#define NWL_ECAM_VALUE_DEFAULT 12
+
+#define CFG_DMA_REG_BAR GENMASK(2, 0)
+
+#define INT_PCI_MSI_NR (2 * 32)
+#define INTX_NUM 4
+
+/* Readin the PS_LINKUP */
+#define PS_LINKUP_OFFSET 0x00000238
+#define PCIE_PHY_LINKUP_BIT BIT(0)
+#define PHY_RDY_LINKUP_BIT BIT(1)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct nwl_msi { /* MSI information */
+ struct irq_domain *msi_domain;
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* protect bitmap variable */
+ int irq_msi0;
+ int irq_msi1;
+};
+
+struct nwl_pcie {
+ struct device *dev;
+ void __iomem *breg_base;
+ void __iomem *pcireg_base;
+ void __iomem *ecam_base;
+ phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
+ phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
+ phys_addr_t phys_ecam_base; /* Physical Configuration Base */
+ u32 breg_size;
+ u32 pcie_reg_size;
+ u32 ecam_size;
+ int irq_intx;
+ int irq_misc;
+ u32 ecam_value;
+ u8 last_busno;
+ u8 root_busno;
+ struct nwl_msi msi;
+ struct irq_domain *legacy_irq_domain;
+};
+
+static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
+{
+ return readl(pcie->breg_base + off);
+}
+
+static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off)
+{
+ writel(val, pcie->breg_base + off);
+}
+
+static bool nwl_pcie_link_up(struct nwl_pcie *pcie)
+{
+ if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT)
+ return true;
+ return false;
+}
+
+static bool nwl_phy_link_up(struct nwl_pcie *pcie)
+{
+ if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT)
+ return true;
+ return false;
+}
+
+static int nwl_wait_for_link(struct nwl_pcie *pcie)
+{
+ int retries;
+
+ /* check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (nwl_phy_link_up(pcie))
+ return 0;
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ dev_err(pcie->dev, "PHY link never came up\n");
+ return -ETIMEDOUT;
+}
+
+static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct nwl_pcie *pcie = bus->sysdata;
+
+ /* Check link before accessing downstream ports */
+ if (bus->number != pcie->root_busno) {
+ if (!nwl_pcie_link_up(pcie))
+ return false;
+ }
+
+ /* Only one device down on each root port */
+ if (bus->number == pcie->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * nwl_pcie_map_bus - Get configuration base
+ *
+ * @bus: Bus structure of current bus
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct nwl_pcie *pcie = bus->sysdata;
+ int relbus;
+
+ if (!nwl_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
+ (devfn << ECAM_DEV_LOC_SHIFT);
+
+ return pcie->ecam_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops nwl_pcie_ops = {
+ .map_bus = nwl_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
+{
+ struct nwl_pcie *pcie = data;
+ u32 misc_stat;
+
+ /* Checking for misc interrupts */
+ misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+ MSGF_MISC_SR_MASKALL;
+ if (!misc_stat)
+ return IRQ_NONE;
+
+ if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
+ dev_err(pcie->dev, "Received Message FIFO Overflow\n");
+
+ if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
+ dev_err(pcie->dev, "Slave error\n");
+
+ if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
+ dev_err(pcie->dev, "Master error\n");
+
+ if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
+ dev_err(pcie->dev,
+ "In Misc Ingress address translation error\n");
+
+ if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
+ dev_err(pcie->dev,
+ "In Misc Egress address translation error\n");
+
+ if (misc_stat & MSGF_MISC_SR_PCIE_CORE_ERR)
+ dev_err(pcie->dev, "PCIe Core error\n");
+
+ /* Clear misc interrupt status */
+ nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void nwl_pcie_leg_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+
+ while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+ MSGF_LEG_SR_MASKALL) != 0) {
+ for_each_set_bit(bit, &status, INTX_NUM) {
+ virq = irq_find_mapping(pcie->legacy_irq_domain,
+ bit + 1);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
+{
+ struct nwl_msi *msi;
+ unsigned long status;
+ u32 bit;
+ u32 virq;
+
+ msi = &pcie->msi;
+
+ while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ nwl_bridge_writel(pcie, 1 << bit, status_reg);
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void nwl_pcie_msi_handler_high(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI);
+ chained_irq_exit(chip, desc);
+}
+
+static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct nwl_pcie *pcie = irq_desc_get_handler_data(desc);
+
+ chained_irq_enter(chip, desc);
+ nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO);
+ chained_irq_exit(chip, desc);
+}
+
+static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops legacy_domain_ops = {
+ .map = nwl_legacy_map,
+};
+
+#ifdef CONFIG_PCI_MSI
+static struct irq_chip nwl_msi_irq_chip = {
+ .name = "nwl_pcie:msi",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+
+};
+
+static struct msi_domain_info nwl_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &nwl_msi_irq_chip,
+};
+#endif
+
+static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = pcie->phys_pcie_reg_base;
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static int nwl_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip nwl_irq_chip = {
+ .name = "Xilinx MSI",
+ .irq_compose_msi_msg = nwl_compose_msi_msg,
+ .irq_set_affinity = nwl_msi_set_affinity,
+};
+
+static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct nwl_pcie *pcie = domain->host_data;
+ struct nwl_msi *msi = &pcie->msi;
+ int bit;
+ int i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
+ nr_irqs, 0);
+ if (bit >= INT_PCI_MSI_NR) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ bitmap_set(msi->bitmap, bit, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+ return 0;
+}
+
+static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct nwl_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = nwl_irq_domain_alloc,
+ .free = nwl_irq_domain_free,
+};
+
+static void nwl_msi_free_irq_domain(struct nwl_pcie *pcie)
+{
+ struct nwl_msi *msi = &pcie->msi;
+
+ if (msi->irq_msi0)
+ irq_set_chained_handler_and_data(msi->irq_msi0, NULL, NULL);
+ if (msi->irq_msi1)
+ irq_set_chained_handler_and_data(msi->irq_msi1, NULL, NULL);
+
+ if (msi->msi_domain)
+ irq_domain_remove(msi->msi_domain);
+ if (msi->dev_domain)
+ irq_domain_remove(msi->dev_domain);
+
+ kfree(msi->bitmap);
+ msi->bitmap = NULL;
+}
+
+static void nwl_pcie_free_irq_domain(struct nwl_pcie *pcie)
+{
+ int i;
+ u32 irq;
+
+ for (i = 0; i < INTX_NUM; i++) {
+ irq = irq_find_mapping(pcie->legacy_irq_domain, i + 1);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+ if (pcie->legacy_irq_domain)
+ irq_domain_remove(pcie->legacy_irq_domain);
+
+ nwl_msi_free_irq_domain(pcie);
+}
+
+static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
+{
+#ifdef CONFIG_PCI_MSI
+ struct fwnode_handle *fwnode = of_node_to_fwnode(pcie->dev->of_node);
+ struct nwl_msi *msi = &pcie->msi;
+
+ msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
+ &dev_msi_domain_ops, pcie);
+ if (!msi->dev_domain) {
+ dev_err(pcie->dev, "failed to create dev IRQ domain\n");
+ return -ENOMEM;
+ }
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &nwl_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(pcie->dev, "failed to create msi IRQ domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
+{
+ struct device_node *node = pcie->dev->of_node;
+ struct device_node *legacy_intc_node;
+
+ legacy_intc_node = of_get_next_child(node, NULL);
+ if (!legacy_intc_node) {
+ dev_err(pcie->dev, "No legacy intc node found\n");
+ return -EINVAL;
+ }
+
+ pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
+ INTX_NUM,
+ &legacy_domain_ops,
+ pcie);
+
+ if (!pcie->legacy_irq_domain) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ nwl_pcie_init_msi_irq_domain(pcie);
+ return 0;
+}
+
+static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct nwl_msi *msi = &pcie->msi;
+ unsigned long base;
+ int ret;
+ int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
+
+ mutex_init(&msi->lock);
+
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ return -ENOMEM;
+
+ /* Get msi_1 IRQ number */
+ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (msi->irq_msi1 < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(msi->irq_msi1,
+ nwl_pcie_msi_handler_high, pcie);
+
+ /* Get msi_0 IRQ number */
+ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (msi->irq_msi0 < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ#%d\n", msi->irq_msi0);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ irq_set_chained_handler_and_data(msi->irq_msi0,
+ nwl_pcie_msi_handler_low, pcie);
+
+ /* Check for msii_present bit */
+ ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
+ if (!ret) {
+ dev_err(pcie->dev, "MSI not present\n");
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Enable MSII */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+ MSII_ENABLE, I_MSII_CONTROL);
+
+ /* Enable MSII status */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) |
+ MSII_STATUS_ENABLE, I_MSII_CONTROL);
+
+ /* setup AFI/FPCI range */
+ base = pcie->phys_pcie_reg_base;
+ nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI);
+
+ /*
+ * For high range MSI interrupts: disable, clear any pending,
+ * and enable
+ */
+ nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) &
+ MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI);
+
+ nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI);
+
+ /*
+ * For low range MSI interrupts: disable, clear any pending,
+ * and enable
+ */
+ nwl_bridge_writel(pcie, (u32)~MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) &
+ MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO);
+
+ nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
+
+ return 0;
+err:
+ kfree(msi->bitmap);
+ msi->bitmap = NULL;
+ return ret;
+}
+
+static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ u32 breg_val, ecam_val, first_busno = 0;
+ int err;
+
+ breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
+ if (!breg_val) {
+ dev_err(pcie->dev, "BREG is not present\n");
+ return breg_val;
+ }
+
+ /* Write bridge_off to breg base */
+ nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base),
+ E_BREG_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base),
+ E_BREG_BASE_HI);
+
+ /* Enable BREG */
+ nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE,
+ E_BREG_CONTROL);
+
+ /* Disable DMA channel registers */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) |
+ CFG_DMA_REG_BAR, BRCFG_PCIE_RX0);
+
+ /* Enable Ingress subtractive decode translation */
+ nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL);
+
+ /* Enable msg filtering details */
+ nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK,
+ BRCFG_PCIE_RX_MSG_FILTER);
+
+ err = nwl_wait_for_link(pcie);
+ if (err)
+ return err;
+
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT;
+ if (!ecam_val) {
+ dev_err(pcie->dev, "ECAM is not present\n");
+ return ecam_val;
+ }
+
+ /* Enable ECAM */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+ E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
+
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
+ (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
+ E_ECAM_CONTROL);
+
+ nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
+ E_ECAM_BASE_LO);
+ nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
+ E_ECAM_BASE_HI);
+
+ /* Get bus range */
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
+ /* Write primary, secondary and subordinate bus numbers */
+ ecam_val = first_busno;
+ ecam_val |= (first_busno + 1) << 8;
+ ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
+ writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
+
+ if (nwl_pcie_link_up(pcie))
+ dev_info(pcie->dev, "Link is UP\n");
+ else
+ dev_info(pcie->dev, "Link is DOWN\n");
+
+ /* Get misc IRQ number */
+ pcie->irq_misc = platform_get_irq_byname(pdev, "misc");
+ if (pcie->irq_misc < 0) {
+ dev_err(&pdev->dev, "failed to get misc IRQ %d\n",
+ pcie->irq_misc);
+ return -EINVAL;
+ }
+
+ err = devm_request_irq(pcie->dev, pcie->irq_misc,
+ nwl_pcie_misc_handler, IRQF_SHARED,
+ "nwl_pcie:misc", pcie);
+ if (err) {
+ dev_err(pcie->dev, "fail to register misc IRQ#%d\n",
+ pcie->irq_misc);
+ return err;
+ }
+
+ /* Disable all misc interrupts */
+ nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+ /* Clear pending misc interrupts */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) &
+ MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS);
+
+ /* Enable all misc interrupts */
+ nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
+
+
+ /* Disable all legacy interrupts */
+ nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+ /* Clear pending legacy interrupts */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
+ MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
+
+ /* Enable all legacy interrupts */
+ nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
+
+ /* Enable the bridge config interrupt */
+ nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) |
+ BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT);
+
+ return 0;
+}
+
+static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device_node *node = pcie->dev->of_node;
+ struct resource *res;
+ const char *type;
+
+ /* Check for device type */
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(pcie->dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ pcie->breg_base = devm_ioremap_resource(pcie->dev, res);
+ if (IS_ERR(pcie->breg_base))
+ return PTR_ERR(pcie->breg_base);
+ pcie->phys_breg_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg");
+ pcie->pcireg_base = devm_ioremap_resource(pcie->dev, res);
+ if (IS_ERR(pcie->pcireg_base))
+ return PTR_ERR(pcie->pcireg_base);
+ pcie->phys_pcie_reg_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ pcie->ecam_base = devm_ioremap_resource(pcie->dev, res);
+ if (IS_ERR(pcie->ecam_base))
+ return PTR_ERR(pcie->ecam_base);
+ pcie->phys_ecam_base = res->start;
+
+ /* Get intx IRQ number */
+ pcie->irq_intx = platform_get_irq_byname(pdev, "intx");
+ if (pcie->irq_intx < 0) {
+ dev_err(&pdev->dev, "failed to get intx IRQ %d\n",
+ pcie->irq_intx);
+ return -EINVAL;
+ }
+
+ irq_set_chained_handler_and_data(pcie->irq_intx,
+ nwl_pcie_leg_handler, pcie);
+
+ return 0;
+}
+
+static const struct of_device_id nwl_pcie_of_match[] = {
+ { .compatible = "xlnx,nwl-pcie-2.11", },
+ {}
+};
+
+static int nwl_pcie_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct nwl_pcie *pcie;
+ struct pci_bus *bus;
+ struct pci_bus *child;
+ int err;
+ resource_size_t iobase = 0;
+ LIST_HEAD(res);
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+ pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
+
+ err = nwl_pcie_parse_dt(pcie, pdev);
+ if (err) {
+ dev_err(pcie->dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ err = nwl_pcie_bridge_init(pcie);
+ if (err) {
+ dev_err(pcie->dev, "HW Initalization failed\n");
+ return err;
+ }
+
+ err = of_pci_get_host_bridge_resources(node, 0, 0xff, &res, &iobase);
+ if (err) {
+ pr_err("Getting bridge resources failed\n");
+ return err;
+ }
+
+ err = nwl_pcie_init_irq_domain(pcie);
+ if (err) {
+ dev_err(pcie->dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ bus = pci_create_root_bus(&pdev->dev, pcie->root_busno,
+ &nwl_pcie_ops, pcie, &res);
+ if (!bus)
+ return -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = nwl_pcie_enable_msi(pcie, bus);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n", err);
+ return err;
+ }
+ }
+ pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_bus_add_devices(bus);
+ platform_set_drvdata(pdev, pcie);
+ return 0;
+}
+
+static int nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_free_irq_domain(pcie);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver nwl_pcie_driver = {
+ .driver = {
+ .name = "nwl-pcie",
+ .of_match_table = nwl_pcie_of_match,
+ },
+ .probe = nwl_pcie_probe,
+ .remove = nwl_pcie_remove,
+};
+module_platform_driver(nwl_pcie_driver);
+
+MODULE_AUTHOR("Xilinx, Inc");
+MODULE_DESCRIPTION("NWL PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 4cfa46360d12..65f0fe0c2eaf 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -94,9 +94,6 @@
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 128
-/* Number of Memory Resources */
-#define XILINX_MAX_NUM_RESOURCES 3
-
/**
* struct xilinx_pcie_port - PCIe port information
* @reg_base: IO Mapped Register Base
@@ -105,7 +102,6 @@
* @root_busno: Root Bus number
* @dev: Device pointer
* @irq_domain: IRQ domain pointer
- * @bus_range: Bus range
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
@@ -115,17 +111,11 @@ struct xilinx_pcie_port {
u8 root_busno;
struct device *dev;
struct irq_domain *irq_domain;
- struct resource bus_range;
struct list_head resources;
};
static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
-static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
-{
- return sys->private_data;
-}
-
static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
{
return readl(port->reg_base + reg);
@@ -167,7 +157,7 @@ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
*/
static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
- struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+ struct xilinx_pcie_port *port = bus->sysdata;
/* Check if link is up when trying to access downstream ports */
if (bus->number != port->root_busno)
@@ -200,7 +190,7 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+ struct xilinx_pcie_port *port = bus->sysdata;
int relbus;
if (!xilinx_pcie_valid_device(bus, devfn))
@@ -232,7 +222,7 @@ static void xilinx_pcie_destroy_msi(unsigned int irq)
if (!test_bit(irq, msi_irq_in_use)) {
msi = irq_get_msi_desc(irq);
- port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
+ port = msi_desc_to_pci_sysdata(msi);
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
} else {
clear_bit(irq, msi_irq_in_use);
@@ -281,7 +271,7 @@ static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
struct pci_dev *pdev,
struct msi_desc *desc)
{
- struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
+ struct xilinx_pcie_port *port = pdev->bus->sysdata;
unsigned int irq;
int hwirq;
struct msi_msg msg;
@@ -618,138 +608,6 @@ static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
}
/**
- * xilinx_pcie_setup - Setup memory resources
- * @nr: Bus number
- * @sys: Per controller structure
- *
- * Return: '1' on success and error value on failure
- */
-static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct xilinx_pcie_port *port = sys_to_pcie(sys);
-
- list_splice_init(&port->resources, &sys->resources);
-
- return 1;
-}
-
-/**
- * xilinx_pcie_scan_bus - Scan PCIe bus for devices
- * @nr: Bus number
- * @sys: Per controller structure
- *
- * Return: Valid Bus pointer on success and NULL on failure
- */
-static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
-{
- struct xilinx_pcie_port *port = sys_to_pcie(sys);
- struct pci_bus *bus;
-
- port->root_busno = sys->busnr;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- bus = pci_scan_root_bus_msi(port->dev, sys->busnr,
- &xilinx_pcie_ops, sys,
- &sys->resources,
- &xilinx_pcie_msi_chip);
- else
- bus = pci_scan_root_bus(port->dev, sys->busnr,
- &xilinx_pcie_ops, sys, &sys->resources);
- return bus;
-}
-
-/**
- * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
- * @port: PCIe port information
- *
- * Return: '0' on success and error value on failure
- */
-static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct resource *mem;
- resource_size_t offset;
- struct of_pci_range_parser parser;
- struct of_pci_range range;
- struct resource_entry *win;
- int err = 0, mem_resno = 0;
-
- /* Get the ranges */
- if (of_pci_range_parser_init(&parser, node)) {
- dev_err(dev, "missing \"ranges\" property\n");
- return -EINVAL;
- }
-
- /* Parse the ranges and add the resources found to the list */
- for_each_of_pci_range(&parser, &range) {
-
- if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
- dev_err(dev, "Maximum memory resources exceeded\n");
- return -EINVAL;
- }
-
- mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
- if (!mem) {
- err = -ENOMEM;
- goto free_resources;
- }
-
- of_pci_range_to_resource(&range, node, mem);
-
- switch (mem->flags & IORESOURCE_TYPE_BITS) {
- case IORESOURCE_MEM:
- offset = range.cpu_addr - range.pci_addr;
- mem_resno++;
- break;
- default:
- err = -EINVAL;
- break;
- }
-
- if (err < 0) {
- dev_warn(dev, "Invalid resource found %pR\n", mem);
- continue;
- }
-
- err = request_resource(&iomem_resource, mem);
- if (err)
- goto free_resources;
-
- pci_add_resource_offset(&port->resources, mem, offset);
- }
-
- /* Get the bus range */
- if (of_pci_parse_bus_range(node, &port->bus_range)) {
- u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
- u8 last;
-
- last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
- XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
-
- port->bus_range = (struct resource) {
- .name = node->name,
- .start = 0,
- .end = last,
- .flags = IORESOURCE_BUS,
- };
- }
-
- /* Register bus resource */
- pci_add_resource(&port->resources, &port->bus_range);
-
- return 0;
-
-free_resources:
- release_child_resources(&iomem_resource);
- resource_list_for_each_entry(win, &port->resources)
- devm_kfree(dev, win->res);
- pci_free_resource_list(&port->resources);
-
- return err;
-}
-
-/**
* xilinx_pcie_parse_dt - Parse Device tree
* @port: PCIe port information
*
@@ -800,9 +658,12 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct xilinx_pcie_port *port;
- struct hw_pci hw;
struct device *dev = &pdev->dev;
+ struct pci_bus *bus;
+
int err;
+ resource_size_t iobase = 0;
+ LIST_HEAD(res);
if (!dev->of_node)
return -ENODEV;
@@ -827,34 +688,28 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
return err;
}
- /*
- * Parse PCI ranges, configuration bus range and
- * request their resources
- */
- INIT_LIST_HEAD(&port->resources);
- err = xilinx_pcie_parse_and_add_res(port);
+ err = of_pci_get_host_bridge_resources(dev->of_node, 0, 0xff, &res,
+ &iobase);
if (err) {
- dev_err(dev, "Failed adding resources\n");
+ dev_err(dev, "Getting bridge resources failed\n");
return err;
}
-
- platform_set_drvdata(pdev, port);
-
- /* Register the device */
- memset(&hw, 0, sizeof(hw));
- hw = (struct hw_pci) {
- .nr_controllers = 1,
- .private_data = (void **)&port,
- .setup = xilinx_pcie_setup,
- .map_irq = of_irq_parse_and_map_pci,
- .scan = xilinx_pcie_scan_bus,
- .ops = &xilinx_pcie_ops,
- };
+ bus = pci_create_root_bus(&pdev->dev, 0,
+ &xilinx_pcie_ops, port, &res);
+ if (!bus)
+ return -ENOMEM;
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = port->dev;
+ bus->msi = &xilinx_pcie_msi_chip;
#endif
- pci_common_init_dev(dev, &hw);
+ pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+#ifndef CONFIG_MICROBLAZE
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+#endif
+ pci_bus_add_devices(bus);
+ platform_set_drvdata(pdev, port);
return 0;
}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index eb5efaef06ea..50b8b7d54416 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -93,13 +93,17 @@ out_deconfigure:
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
+ struct pci_dev *pdev;
int rc;
if (!zpci_fn_configured(slot->zdev->state))
return -EIO;
- if (slot->zdev->pdev)
- pci_stop_and_remove_bus_device_locked(slot->zdev->pdev);
+ pdev = pci_get_slot(slot->zdev->bus, ZPCI_DEVFN);
+ if (pdev) {
+ pci_stop_and_remove_bus_device_locked(pdev);
+ pci_dev_put(pdev);
+ }
rc = zpci_disable_device(slot->zdev);
if (rc)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 31f31d460fc9..2194b447201d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -113,7 +113,7 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
}
-static int virtfn_add(struct pci_dev *dev, int id, int reset)
+int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
{
int i;
int rc = -ENOMEM;
@@ -188,7 +188,7 @@ failed:
return rc;
}
-static void virtfn_remove(struct pci_dev *dev, int id, int reset)
+void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
@@ -321,7 +321,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
}
for (i = 0; i < initial; i++) {
- rc = virtfn_add(dev, i, 0);
+ rc = pci_iov_add_virtfn(dev, i, 0);
if (rc)
goto failed;
}
@@ -333,7 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
failed:
while (i--)
- virtfn_remove(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i, 0);
pcibios_sriov_disable(dev);
err_pcibios:
@@ -359,7 +359,7 @@ static void sriov_disable(struct pci_dev *dev)
return;
for (i = 0; i < iov->num_VFs; i++)
- virtfn_remove(dev, i, 0);
+ pci_iov_remove_virtfn(dev, i, 0);
pcibios_sriov_disable(dev);
@@ -387,10 +387,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
struct pci_dev *pdev;
- if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
- pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
- return -ENODEV;
-
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 0ae74d96ed85..51357377efbc 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -16,7 +16,7 @@
* the instance number and string from the type 41 record and exports
* it to sysfs.
*
- * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more
+ * Please see http://linux.dell.com/files/biosdevname/ for more
* information.
*/
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 95d9e7bd933b..342b6918bbde 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -636,7 +636,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+ if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN))
size = dev->cfg_size;
else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
size = 128;
@@ -769,10 +769,12 @@ static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
+ if (bin_attr->size > 0) {
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+ }
return pci_read_vpd(dev, off, count, buf);
}
@@ -783,10 +785,12 @@ static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
- if (off > bin_attr->size)
- count = 0;
- else if (count > bin_attr->size - off)
- count = bin_attr->size - off;
+ if (bin_attr->size > 0) {
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+ }
return pci_write_vpd(dev, off, count, buf);
}
@@ -1134,33 +1138,36 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
/* allocate attribute structure, piggyback attribute name */
int name_len = write_combine ? 13 : 10;
struct bin_attribute *res_attr;
+ char *res_attr_name;
int retval;
res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
- if (res_attr) {
- char *res_attr_name = (char *)(res_attr + 1);
-
- sysfs_bin_attr_init(res_attr);
- if (write_combine) {
- pdev->res_attr_wc[num] = res_attr;
- sprintf(res_attr_name, "resource%d_wc", num);
- res_attr->mmap = pci_mmap_resource_wc;
- } else {
- pdev->res_attr[num] = res_attr;
- sprintf(res_attr_name, "resource%d", num);
- res_attr->mmap = pci_mmap_resource_uc;
- }
- if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
- }
- res_attr->attr.name = res_attr_name;
- res_attr->attr.mode = S_IRUSR | S_IWUSR;
- res_attr->size = pci_resource_len(pdev, num);
- res_attr->private = &pdev->resource[num];
- retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
- } else
- retval = -ENOMEM;
+ if (!res_attr)
+ return -ENOMEM;
+
+ res_attr_name = (char *)(res_attr + 1);
+
+ sysfs_bin_attr_init(res_attr);
+ if (write_combine) {
+ pdev->res_attr_wc[num] = res_attr;
+ sprintf(res_attr_name, "resource%d_wc", num);
+ res_attr->mmap = pci_mmap_resource_wc;
+ } else {
+ pdev->res_attr[num] = res_attr;
+ sprintf(res_attr_name, "resource%d", num);
+ res_attr->mmap = pci_mmap_resource_uc;
+ }
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+ res_attr->write = pci_write_resource_io;
+ }
+ res_attr->attr.name = res_attr_name;
+ res_attr->attr.mode = S_IRUSR | S_IWUSR;
+ res_attr->size = pci_resource_len(pdev, num);
+ res_attr->private = &pdev->resource[num];
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
+ if (retval)
+ kfree(res_attr);
return retval;
}
@@ -1319,7 +1326,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
return -ENOMEM;
sysfs_bin_attr_init(attr);
- attr->size = dev->vpd->len;
+ attr->size = 0;
attr->attr.name = "vpd";
attr->attr.mode = S_IRUSR | S_IWUSR;
attr->read = read_vpd_attr;
@@ -1356,7 +1363,7 @@ error:
int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
{
int retval;
- int rom_size = 0;
+ int rom_size;
struct bin_attribute *attr;
if (!sysfs_initialized)
@@ -1373,12 +1380,8 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
if (retval)
goto err_config_file;
- if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
- rom_size = 0x20000;
-
/* If the device has a ROM, try to expose it in sysfs. */
+ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
if (rom_size) {
attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
if (!attr) {
@@ -1409,7 +1412,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
return 0;
err_rom_file:
- if (rom_size) {
+ if (pdev->rom_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
kfree(pdev->rom_attr);
pdev->rom_attr = NULL;
@@ -1447,8 +1450,6 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
*/
void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
{
- int rom_size = 0;
-
if (!sysfs_initialized)
return;
@@ -1461,18 +1462,13 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
pci_remove_resource_files(pdev);
- if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
- rom_size = 0x20000;
-
- if (rom_size && pdev->rom_attr) {
+ if (pdev->rom_attr) {
sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
kfree(pdev->rom_attr);
+ pdev->rom_attr = NULL;
}
pci_remove_firmware_label_files(pdev);
-
}
static int __init pci_sysfs_init(void)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f89db3af0607..25e0327d4429 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -25,7 +25,6 @@
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
-#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include <linux/aer.h>
#include "pci.h"
@@ -3386,18 +3385,6 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
-{
- return dma_set_max_seg_size(&dev->dev, size);
-}
-EXPORT_SYMBOL(pci_set_dma_max_seg_size);
-
-int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
-{
- return dma_set_seg_boundary(&dev->dev, mask);
-}
-EXPORT_SYMBOL(pci_set_dma_seg_boundary);
-
/**
* pci_wait_for_pending_transaction - waits for pending transaction
* @dev: the PCI device to operate on
@@ -3414,6 +3401,29 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+/*
+ * We should only need to wait 100ms after FLR, but some devices take longer.
+ * Wait for up to 1000ms for config space to return something other than -1.
+ * Intel IGD requires this when an LCD panel is attached. We read the 2nd
+ * dword because VFs don't implement the 1st dword.
+ */
+static void pci_flr_wait(struct pci_dev *dev)
+{
+ int i = 0;
+ u32 id;
+
+ do {
+ msleep(100);
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ } while (i++ < 10 && id == ~0);
+
+ if (id == ~0)
+ dev_warn(&dev->dev, "Failed to return from FLR\n");
+ else if (i > 1)
+ dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
+ (i - 1) * 100);
+}
+
static int pcie_flr(struct pci_dev *dev, int probe)
{
u32 cap;
@@ -3429,7 +3439,7 @@ static int pcie_flr(struct pci_dev *dev, int probe)
dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
+ pci_flr_wait(dev);
return 0;
}
@@ -3459,7 +3469,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- msleep(100);
+ pci_flr_wait(dev);
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9a1660f592ef..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,21 +97,22 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
- void (*release)(struct pci_dev *dev);
+ int (*set_size)(struct pci_dev *dev, size_t len);
};
struct pci_vpd {
- unsigned int len;
const struct pci_vpd_ops *ops;
struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
+ struct mutex lock;
+ unsigned int len;
+ u16 flag;
+ u8 cap;
+ u8 busy:1;
+ u8 valid:1;
};
-int pci_vpd_pci22_init(struct pci_dev *dev);
-static inline void pci_vpd_release(struct pci_dev *dev)
-{
- if (dev->vpd)
- dev->vpd->ops->release(dev);
-}
+int pci_vpd_init(struct pci_dev *dev);
+void pci_vpd_release(struct pci_dev *dev);
/* PCI /proc functions */
#ifdef CONFIG_PROC_FS
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index e294713c8143..72db7f4209ca 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -44,6 +44,7 @@ config PCIEASPM
/sys/module/pcie_aspm/parameters/policy
When in doubt, say Y.
+
config PCIEASPM_DEBUG
bool "Debug PCI Express ASPM"
depends on PCIEASPM
@@ -58,20 +59,20 @@ choice
depends on PCIEASPM
config PCIEASPM_DEFAULT
- bool "BIOS default"
+ bool "BIOS default"
depends on PCIEASPM
help
Use the BIOS defaults for PCI Express ASPM.
config PCIEASPM_POWERSAVE
- bool "Powersave"
+ bool "Powersave"
depends on PCIEASPM
help
Enable PCI Express ASPM L0s and L1 where possible, even if the
BIOS did not.
config PCIEASPM_PERFORMANCE
- bool "Performance"
+ bool "Performance"
depends on PCIEASPM
help
Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 20db790465dd..db553dc22c8e 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/stddef.h>
+#include <linux/device.h>
#include "aerdrv.h"
/* Override the existing corrected and uncorrected error masks */
@@ -124,16 +125,13 @@ static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
static struct pci_bus_ops *pci_bus_ops_pop(void)
{
unsigned long flags;
- struct pci_bus_ops *bus_ops = NULL;
+ struct pci_bus_ops *bus_ops;
spin_lock_irqsave(&inject_lock, flags);
- if (list_empty(&pci_bus_ops_list))
- bus_ops = NULL;
- else {
- struct list_head *lh = pci_bus_ops_list.next;
- list_del(lh);
- bus_ops = list_entry(lh, struct pci_bus_ops, list);
- }
+ bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
+ struct pci_bus_ops, list);
+ if (bus_ops)
+ list_del(&bus_ops->list);
spin_unlock_irqrestore(&inject_lock, flags);
return bus_ops;
}
@@ -181,14 +179,16 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
return target;
}
-static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *val)
+static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
struct pci_ops *ops;
+ struct pci_ops *my_ops;
int domain;
+ int rv;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
@@ -208,19 +208,32 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
}
out:
ops = __find_pci_bus_ops(bus);
+ /*
+ * pci_lock must already be held, so we can directly
+ * manipulate bus->ops. Many config access functions,
+ * including pci_generic_config_read() require the original
+ * bus->ops be installed to function, so temporarily put them
+ * back.
+ */
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->read(bus, devfn, where, size, val);
+ bus->ops = my_ops;
spin_unlock_irqrestore(&inject_lock, flags);
- return ops->read(bus, devfn, where, size, val);
+ return rv;
}
-static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 val)
+static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
int rw1cs;
struct pci_ops *ops;
+ struct pci_ops *my_ops;
int domain;
+ int rv;
spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
@@ -243,13 +256,24 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
}
out:
ops = __find_pci_bus_ops(bus);
+ /*
+ * pci_lock must already be held, so we can directly
+ * manipulate bus->ops. Many config access functions,
+ * including pci_generic_config_write() require the original
+ * bus->ops be installed to function, so temporarily put them
+ * back.
+ */
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->write(bus, devfn, where, size, val);
+ bus->ops = my_ops;
spin_unlock_irqrestore(&inject_lock, flags);
- return ops->write(bus, devfn, where, size, val);
+ return rv;
}
-static struct pci_ops pci_ops_aer = {
- .read = pci_read_aer,
- .write = pci_write_aer,
+static struct pci_ops aer_inj_pci_ops = {
+ .read = aer_inj_read_config,
+ .write = aer_inj_write_config,
};
static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
@@ -270,9 +294,9 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
- ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
spin_lock_irqsave(&inject_lock, flags);
- if (ops == &pci_ops_aer)
+ if (ops == &aer_inj_pci_ops)
goto out;
pci_bus_ops_init(bus_ops, bus, ops);
list_add(&bus_ops->list, &pci_bus_ops_list);
@@ -334,13 +358,15 @@ static int aer_inject(struct aer_error_inj *einj)
return -ENODEV;
rpdev = pcie_find_root_port(dev);
if (!rpdev) {
+ dev_err(&dev->dev, "aer_inject: Root port not found\n");
ret = -ENODEV;
goto out_put;
}
pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos_cap_err) {
- ret = -EPERM;
+ dev_err(&dev->dev, "aer_inject: Device doesn't support AER\n");
+ ret = -EPROTONOSUPPORT;
goto out_put;
}
pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
@@ -350,7 +376,9 @@ static int aer_inject(struct aer_error_inj *einj)
rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
if (!rp_pos_cap_err) {
- ret = -EPERM;
+ dev_err(&rpdev->dev,
+ "aer_inject: Root port doesn't support AER\n");
+ ret = -EPROTONOSUPPORT;
goto out_put;
}
@@ -397,14 +425,16 @@ static int aer_inject(struct aer_error_inj *einj)
if (!aer_mask_override && einj->cor_status &&
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The correctable error(s) is masked by device\n");
+ dev_warn(&dev->dev,
+ "aer_inject: The correctable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
- printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n");
+ dev_warn(&dev->dev,
+ "aer_inject: The uncorrectable error(s) is masked by device\n");
spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -457,13 +487,19 @@ static int aer_inject(struct aer_error_inj *einj)
if (find_aer_device(rpdev, &edev)) {
if (!get_service_data(edev)) {
- printk(KERN_WARNING "AER service is not initialized\n");
- ret = -EINVAL;
+ dev_warn(&edev->device,
+ "aer_inject: AER service is not initialized\n");
+ ret = -EPROTONOSUPPORT;
goto out_put;
}
+ dev_info(&edev->device,
+ "aer_inject: Injecting errors %08x/%08x into device %s\n",
+ einj->cor_status, einj->uncor_status, pci_name(dev));
aer_irq(-1, edev);
- } else
- ret = -EINVAL;
+ } else {
+ dev_err(&rpdev->dev, "aer_inject: AER device not found\n");
+ ret = -ENODEV;
+ }
out_put:
kfree(err_alloc);
kfree(rperr_alloc);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 63fc63911295..1ae4c73e7a3c 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -396,7 +396,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
{
struct pcie_pme_service_data *data = get_service_data(srv);
struct pci_dev *port = srv->port;
- bool wakeup;
+ bool wakeup, wake_irq_enabled = false;
int ret;
if (device_may_wakeup(&port->dev)) {
@@ -409,11 +409,12 @@ static int pcie_pme_suspend(struct pcie_device *srv)
spin_lock_irq(&data->lock);
if (wakeup) {
ret = enable_irq_wake(srv->irq);
- data->suspend_level = PME_SUSPEND_WAKEUP;
+ if (ret == 0) {
+ data->suspend_level = PME_SUSPEND_WAKEUP;
+ wake_irq_enabled = true;
+ }
}
- if (!wakeup || ret) {
- struct pci_dev *port = srv->port;
-
+ if (!wake_irq_enabled) {
pcie_pme_interrupt_enable(port, false);
pcie_clear_root_pme_status(port);
data->suspend_level = PME_SUSPEND_NOIRQ;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6d7ab9bb0d5a..8004f67c57ec 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -15,7 +15,7 @@
#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <linux/acpi.h>
-#include <asm-generic/pci-bridge.h>
+#include <linux/irqdomain.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -179,6 +179,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u16 orig_cmd;
struct pci_bus_region region, inverted_region;
+ if (dev->non_compliant_bars)
+ return 0;
+
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
@@ -675,6 +678,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
if (!d)
d = pci_host_bridge_acpi_msi_domain(bus);
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+ /*
+ * If no IRQ domain was found via the OF tree, try looking it up
+ * directly through the fwnode_handle.
+ */
+ if (!d) {
+ struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
+
+ if (fwnode)
+ d = irq_find_matching_fwnode(fwnode,
+ DOMAIN_BUS_PCI_MSI);
+ }
+#endif
+
return d;
}
@@ -758,6 +775,12 @@ add_dev:
pcibios_add_bus(child);
+ if (child->ops->add_bus) {
+ ret = child->ops->add_bus(child);
+ if (WARN_ON(ret < 0))
+ dev_err(&child->dev, "failed to add bus: %d\n", ret);
+ }
+
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(child);
@@ -1171,6 +1194,7 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
int pci_setup_device(struct pci_dev *dev)
{
u32 class;
+ u16 cmd;
u8 hdr_type;
int pos = 0;
struct pci_bus_region region;
@@ -1214,6 +1238,16 @@ int pci_setup_device(struct pci_dev *dev)
/* device class may be changed after fixup */
class = dev->class >> 8;
+ if (dev->non_compliant_bars) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+ dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
+ cmd &= ~PCI_COMMAND_IO;
+ cmd &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ }
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
@@ -1608,7 +1642,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_pm_init(dev);
/* Vital Product Data */
- pci_vpd_pci22_init(dev);
+ pci_vpd_init(dev);
/* Alternative Routing-ID Forwarding */
pci_configure_ari(dev);
@@ -1803,6 +1837,13 @@ static int only_one_child(struct pci_bus *bus)
return 0;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
return 1;
+
+ /*
+ * PCIe downstream ports are bridges that normally lead to only a
+ * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
+ * possible devices, not just device 0. See PCIe spec r3.0,
+ * sec 7.3.1.
+ */
if (parent->has_secondary_link &&
!pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
return 1;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0575a1e026b4..8e678027b900 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -438,7 +438,7 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
u32 class = pdev->class;
/* Use "USB Device (not host controller)" class */
- pdev->class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe;
+ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
class, pdev->class);
}
@@ -2135,6 +2135,35 @@ static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
/*
+ * If a device follows the VPD format spec, the PCI core will not read or
+ * write past the VPD End Tag. But some vendors do not follow the VPD
+ * format spec, so we can't tell how much data is safe to access. Devices
+ * may behave unpredictably if we access too much. Blacklist these devices
+ * so we don't touch VPD at all.
+ */
+static void quirk_blacklist_vpd(struct pci_dev *dev)
+{
+ if (dev->vpd) {
+ dev->vpd->len = 0;
+ dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
+ quirk_blacklist_vpd);
+
+/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
* VPD end tag will hang the device. This problem was initially
* observed when a vpd entry was created in sysfs
@@ -3832,6 +3861,19 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
#endif
}
+static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Cavium devices matching this quirk do not perform peer-to-peer
+ * with other functions, allowing masking out these bits as if they
+ * were unimplemented in the ACS capability.
+ */
+ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+ return acs_flags ? 0 : 1;
+}
+
/*
* Many Intel PCH root ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
@@ -3984,6 +4026,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+ /* Cavium ThunderX */
+ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
{ 0 }
};
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 8a280e9c2ad1..8982026637d5 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -7,7 +7,6 @@ static void pci_free_resources(struct pci_dev *dev)
{
int i;
- pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = dev->resource + i;
if (res->parent)
@@ -54,6 +53,10 @@ void pci_remove_bus(struct pci_bus *bus)
pci_bus_release_busn_res(bus);
up_write(&pci_bus_sem);
pci_remove_legacy_files(bus);
+
+ if (bus->ops->remove_bus)
+ bus->ops->remove_bus(bus);
+
pcibios_remove_bus(bus);
device_unregister(&bus->dev);
}
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 9eaca39ef38d..06663d391b39 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -24,13 +24,17 @@
*/
int pci_enable_rom(struct pci_dev *pdev)
{
- struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
struct pci_bus_region region;
u32 rom_addr;
if (!res->flags)
return -1;
+ /* Nothing to enable if we're using a shadow copy in RAM */
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return 0;
+
pcibios_resource_to_bus(pdev->bus, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
@@ -49,7 +53,12 @@ EXPORT_SYMBOL_GPL(pci_enable_rom);
*/
void pci_disable_rom(struct pci_dev *pdev)
{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
u32 rom_addr;
+
+ if (res->flags & IORESOURCE_ROM_SHADOW)
+ return;
+
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
@@ -119,43 +128,23 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
loff_t start;
void __iomem *rom;
- /*
- * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
- * memory map if the VGA enable bit of the Bridge Control register is
- * set for embedded VGA.
- */
- if (res->flags & IORESOURCE_ROM_SHADOW) {
- /* primary video rom always starts here */
- start = (loff_t)0xC0000;
- *size = 0x20000; /* cover C000:0 through E000:0 */
- } else {
- if (res->flags &
- (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
- *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- return (void __iomem *)(unsigned long)
- pci_resource_start(pdev, PCI_ROM_RESOURCE);
- } else {
- /* assign the ROM an address if it doesn't have one */
- if (res->parent == NULL &&
- pci_assign_resource(pdev, PCI_ROM_RESOURCE))
- return NULL;
- start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
- *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (*size == 0)
- return NULL;
-
- /* Enable ROM space decodes */
- if (pci_enable_rom(pdev))
- return NULL;
- }
- }
+ /* assign the ROM an address if it doesn't have one */
+ if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE))
+ return NULL;
+
+ start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
+ *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ if (*size == 0)
+ return NULL;
+
+ /* Enable ROM space decodes */
+ if (pci_enable_rom(pdev))
+ return NULL;
rom = ioremap(start, *size);
if (!rom) {
/* restore enable if ioremap fails */
- if (!(res->flags & (IORESOURCE_ROM_ENABLE |
- IORESOURCE_ROM_SHADOW |
- IORESOURCE_ROM_COPY)))
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
pci_disable_rom(pdev);
return NULL;
}
@@ -181,37 +170,15 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
- if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
- return;
-
iounmap(rom);
- /* Disable again before continuing, leave enabled if pci=rom */
- if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
+ /* Disable again before continuing */
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
pci_disable_rom(pdev);
}
EXPORT_SYMBOL(pci_unmap_rom);
/**
- * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
- * @pdev: pointer to pci device struct
- *
- * Free the copied ROM if we allocated one.
- */
-void pci_cleanup_rom(struct pci_dev *pdev)
-{
- struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
-
- if (res->flags & IORESOURCE_ROM_COPY) {
- kfree((void *)(unsigned long)res->start);
- res->flags |= IORESOURCE_UNSET;
- res->flags &= ~IORESOURCE_ROM_COPY;
- res->start = 0;
- res->end = 0;
- }
-}
-
-/**
* pci_platform_rom - provides a pointer to any ROM image provided by the
* platform
* @pdev: pointer to pci device struct
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7796d0a5befa..55641a39a3e9 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,7 +25,6 @@
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
#include "pci.h"
unsigned int pci_flags;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 604011e047d6..66c4d8f42233 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -276,6 +276,9 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
resource_size_t align, size;
int ret;
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return 0;
+
res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
@@ -321,6 +324,9 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
resource_size_t new_size;
int ret;
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return 0;
+
flags = res->flags;
res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index bba1dcbb8075..8b0923fd76c6 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -36,10 +36,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/gpio.h>
#include <pcmcia/ss.h>
#include <pcmcia/cisreg.h>
-#include <asm/gpio.h>
#define SZ_1K 0x00000400
#define SZ_8K 0x00002000
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 4c2fa05b4589..944674ee3464 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -56,6 +56,7 @@ struct db1x_pcmcia_sock {
int stschg_irq; /* card-status-change irq */
int card_irq; /* card irq */
int eject_irq; /* db1200/pb1200 have these */
+ int insert_gpio; /* db1000 carddetect gpio */
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
@@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
/* carddetect gpio: low-active */
static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
{
- return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+ return !gpio_get_value(sock->insert_gpio);
}
static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
@@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
sock->card_irq = r ? r->start : 0;
- /* insert: irq which triggers on card insertion/ejection */
+ /* insert: irq which triggers on card insertion/ejection
+ * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here!
+ */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
sock->insert_irq = r ? r->start : -1;
+ if (sock->board_type == BOARD_TYPE_DEFAULT) {
+ sock->insert_gpio = r ? r->start : -1;
+ sock->insert_irq = r ? gpio_to_irq(r->start) : -1;
+ }
/* stschg: irq which trigger on card status change (optional) */
r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index a47dcd24a26a..33c5b8823367 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -17,7 +17,6 @@
#include <asm/mach-types.h>
-#include <asm/gpio.h>
#include <mach/vpac270.h>
#include "soc_common.h"
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 166637f2917c..32346b5a8a11 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -13,6 +13,7 @@
#include <linux/bitmap.h>
#include <linux/cpumask.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of_device.h>
@@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
return NOTIFY_OK;
}
+#ifdef CONFIG_CPU_PM
+static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
+{
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+ struct perf_event *event;
+ int idx;
+
+ for (idx = 0; idx < armpmu->num_events; idx++) {
+ /*
+ * If the counter is not used skip it, there is no
+ * need of stopping/restarting it.
+ */
+ if (!test_bit(idx, hw_events->used_mask))
+ continue;
+
+ event = hw_events->events[idx];
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /*
+ * Stop and update the counter
+ */
+ armpmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ /* Restore and enable the counter */
+ armpmu_start(event, PERF_EF_RELOAD);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+ void *v)
+{
+ struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+ int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+ return NOTIFY_DONE;
+
+ /*
+ * Always reset the PMU registers on power-up even if
+ * there are no events running.
+ */
+ if (cmd == CPU_PM_EXIT && armpmu->reset)
+ armpmu->reset(armpmu);
+
+ if (!enabled)
+ return NOTIFY_OK;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ armpmu->stop(armpmu);
+ cpu_pm_pmu_setup(armpmu, cmd);
+ break;
+ case CPU_PM_EXIT:
+ cpu_pm_pmu_setup(armpmu, cmd);
+ case CPU_PM_ENTER_FAILED:
+ armpmu->start(armpmu);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
+ return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
+}
+
+static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
+{
+ cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
+}
+#else
+static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
+static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+#endif
+
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
@@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (err)
goto out_hw_events;
+ err = cpu_pm_pmu_register(cpu_pmu);
+ if (err)
+ goto out_unregister;
+
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
raw_spin_lock_init(&events->pmu_lock);
@@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
return 0;
+out_unregister:
+ unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
out_hw_events:
free_percpu(cpu_hw_events);
return err;
@@ -753,6 +847,7 @@ out_hw_events:
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
{
+ cpu_pm_pmu_unregister(cpu_pmu);
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
free_percpu(cpu_pmu->hw_events);
}
@@ -889,6 +984,15 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
init_fn = of_id->data;
+ pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ "secure-reg-access");
+
+ /* arm64 systems boot only as non-secure */
+ if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
+ pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ pmu->secure_access = false;
+ }
+
ret = of_pmu_irq_cfg(pmu);
if (!ret)
ret = init_fn(pmu);
@@ -898,7 +1002,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("failed to probe PMU!\n");
+ pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
goto out_free;
}
@@ -918,7 +1022,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
out_destroy:
cpu_pmu_destroy(pmu);
out_free:
- pr_info("failed to register PMU devices!\n");
+ pr_info("%s: failed to register PMU devices!\n",
+ of_node_full_name(node));
kfree(pmu);
return ret;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 0124d17bd9fe..26566db09de0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -32,7 +32,7 @@ config PHY_BERLIN_SATA
config ARMADA375_USBCLUSTER_PHY
def_bool y
depends on MACH_ARMADA_375 || COMPILE_TEST
- depends on OF
+ depends on OF && HAS_IOMEM
select GENERIC_PHY
config PHY_DM816X_USB
@@ -337,6 +337,20 @@ config PHY_ROCKCHIP_USB
help
Enable this to support the Rockchip USB 2.0 PHY.
+config PHY_ROCKCHIP_EMMC
+ tristate "Rockchip EMMC PHY Driver"
+ depends on ARCH_ROCKCHIP && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip EMMC PHY.
+
+config PHY_ROCKCHIP_DP
+ tristate "Rockchip Display Port PHY Driver"
+ depends on ARCH_ROCKCHIP && OF
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip Display Port PHY.
+
config PHY_ST_SPEAR1310_MIPHY
tristate "ST SPEAR1310-MIPHY driver"
select GENERIC_PHY
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c80f09df3bb8..24596a96a887 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -37,6 +37,8 @@ phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_ROCKCHIP_USB) += phy-rockchip-usb.o
+obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
diff --git a/drivers/phy/phy-dm816x-usb.c b/drivers/phy/phy-dm816x-usb.c
index b4bbef664d20..cbcce7cf0028 100644
--- a/drivers/phy/phy-dm816x-usb.c
+++ b/drivers/phy/phy-dm816x-usb.c
@@ -118,7 +118,7 @@ static const struct phy_ops ops = {
.owner = THIS_MODULE,
};
-static int dm816x_usb_phy_runtime_suspend(struct device *dev)
+static int __maybe_unused dm816x_usb_phy_runtime_suspend(struct device *dev)
{
struct dm816x_usb_phy *phy = dev_get_drvdata(dev);
unsigned int mask, val;
@@ -136,7 +136,7 @@ static int dm816x_usb_phy_runtime_suspend(struct device *dev)
return 0;
}
-static int dm816x_usb_phy_runtime_resume(struct device *dev)
+static int __maybe_unused dm816x_usb_phy_runtime_resume(struct device *dev)
{
struct dm816x_usb_phy *phy = dev_get_drvdata(dev);
unsigned int mask, val;
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index ef332ef4abc7..bc4f7dd821aa 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -74,20 +74,6 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
-/******* HSUSB registers (original offset is +0x100) *******/
-#define HSUSB_LPSTS 0x02
-#define HSUSB_UGCTRL2 0x84
-
-/* Low Power Status register (LPSTS) */
-#define HSUSB_LPSTS_SUSPM 0x4000
-
-/* USB General control register 2 (UGCTRL2) */
-#define HSUSB_UGCTRL2_MASK 0x00000031 /* bit[31:6] should be 0 */
-#define HSUSB_UGCTRL2_USB0SEL 0x00000030
-#define HSUSB_UGCTRL2_USB0SEL_HOST 0x00000010
-#define HSUSB_UGCTRL2_USB0SEL_HS_USB 0x00000020
-#define HSUSB_UGCTRL2_USB0SEL_OTG 0x00000030
-
struct rcar_gen3_data {
void __iomem *base;
struct clk *clk;
@@ -95,8 +81,8 @@ struct rcar_gen3_data {
struct rcar_gen3_chan {
struct rcar_gen3_data usb2;
- struct rcar_gen3_data hsusb;
struct phy *phy;
+ bool has_otg;
};
static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
@@ -202,24 +188,15 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
{
struct rcar_gen3_chan *channel = phy_get_drvdata(p);
void __iomem *usb2_base = channel->usb2.base;
- void __iomem *hsusb_base = channel->hsusb.base;
- u32 val;
/* Initialize USB2 part */
writel(USB2_INT_ENABLE_INIT, usb2_base + USB2_INT_ENABLE);
writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET);
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
- /* Initialize HSUSB part */
- if (hsusb_base) {
- val = readl(hsusb_base + HSUSB_UGCTRL2);
- val = (val & ~HSUSB_UGCTRL2_USB0SEL) |
- HSUSB_UGCTRL2_USB0SEL_OTG;
- writel(val & HSUSB_UGCTRL2_MASK, hsusb_base + HSUSB_UGCTRL2);
-
- /* Initialize otg part */
+ /* Initialize otg part */
+ if (channel->has_otg)
rcar_gen3_init_otg(channel);
- }
return 0;
}
@@ -237,7 +214,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
{
struct rcar_gen3_chan *channel = phy_get_drvdata(p);
void __iomem *usb2_base = channel->usb2.base;
- void __iomem *hsusb_base = channel->hsusb.base;
u32 val;
val = readl(usb2_base + USB2_USBCTR);
@@ -246,33 +222,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
val &= ~USB2_USBCTR_PLL_RST;
writel(val, usb2_base + USB2_USBCTR);
- /*
- * TODO: To reduce power consuming, this driver should set the SUSPM
- * after the PHY detects ID pin as peripheral.
- */
- if (hsusb_base) {
- /* Power on HSUSB PHY */
- val = readw(hsusb_base + HSUSB_LPSTS);
- val |= HSUSB_LPSTS_SUSPM;
- writew(val, hsusb_base + HSUSB_LPSTS);
- }
-
- return 0;
-}
-
-static int rcar_gen3_phy_usb2_power_off(struct phy *p)
-{
- struct rcar_gen3_chan *channel = phy_get_drvdata(p);
- void __iomem *hsusb_base = channel->hsusb.base;
- u32 val;
-
- if (hsusb_base) {
- /* Power off HSUSB PHY */
- val = readw(hsusb_base + HSUSB_LPSTS);
- val &= ~HSUSB_LPSTS_SUSPM;
- writew(val, hsusb_base + HSUSB_LPSTS);
- }
-
return 0;
}
@@ -280,7 +229,6 @@ static struct phy_ops rcar_gen3_phy_usb2_ops = {
.init = rcar_gen3_phy_usb2_init,
.exit = rcar_gen3_phy_usb2_exit,
.power_on = rcar_gen3_phy_usb2_power_on,
- .power_off = rcar_gen3_phy_usb2_power_off,
.owner = THIS_MODULE,
};
@@ -313,6 +261,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
struct resource *res;
+ int irq;
if (!dev->of_node) {
dev_err(dev, "This driver needs device tree\n");
@@ -323,29 +272,19 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (!channel)
return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb2_host");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
channel->usb2.base = devm_ioremap_resource(dev, res);
if (IS_ERR(channel->usb2.base))
return PTR_ERR(channel->usb2.base);
- /* "hsusb" memory resource is optional */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsusb");
-
- /* To avoid error message by devm_ioremap_resource() */
- if (res) {
- int irq;
-
- channel->hsusb.base = devm_ioremap_resource(dev, res);
- if (IS_ERR(channel->hsusb.base))
- channel->hsusb.base = NULL;
- /* call request_irq for OTG */
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0)
- irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
- IRQF_SHARED, dev_name(dev),
- channel);
+ /* call request_irq for OTG */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0) {
+ irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
+ IRQF_SHARED, dev_name(dev), channel);
if (irq < 0)
dev_err(dev, "No irq handler (%d)\n", irq);
+ channel->has_otg = true;
}
/* devm_phy_create() will call pm_runtime_enable(dev); */
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
new file mode 100644
index 000000000000..77e2d02e6bee
--- /dev/null
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -0,0 +1,151 @@
+/*
+ * Rockchip DP PHY driver
+ *
+ * Copyright (C) 2016 FuZhou Rockchip Co., Ltd.
+ * Author: Yakir Yang <ykk@@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define GRF_SOC_CON12 0x0274
+
+#define GRF_EDP_REF_CLK_SEL_INTER_HIWORD_MASK BIT(20)
+#define GRF_EDP_REF_CLK_SEL_INTER BIT(4)
+
+#define GRF_EDP_PHY_SIDDQ_HIWORD_MASK BIT(21)
+#define GRF_EDP_PHY_SIDDQ_ON 0
+#define GRF_EDP_PHY_SIDDQ_OFF BIT(5)
+
+struct rockchip_dp_phy {
+ struct device *dev;
+ struct regmap *grf;
+ struct clk *phy_24m;
+};
+
+static int rockchip_set_phy_state(struct phy *phy, bool enable)
+{
+ struct rockchip_dp_phy *dp = phy_get_drvdata(phy);
+ int ret;
+
+ if (enable) {
+ ret = regmap_write(dp->grf, GRF_SOC_CON12,
+ GRF_EDP_PHY_SIDDQ_HIWORD_MASK |
+ GRF_EDP_PHY_SIDDQ_ON);
+ if (ret < 0) {
+ dev_err(dp->dev, "Can't enable PHY power %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dp->phy_24m);
+ } else {
+ clk_disable_unprepare(dp->phy_24m);
+
+ ret = regmap_write(dp->grf, GRF_SOC_CON12,
+ GRF_EDP_PHY_SIDDQ_HIWORD_MASK |
+ GRF_EDP_PHY_SIDDQ_OFF);
+ }
+
+ return ret;
+}
+
+static int rockchip_dp_phy_power_on(struct phy *phy)
+{
+ return rockchip_set_phy_state(phy, true);
+}
+
+static int rockchip_dp_phy_power_off(struct phy *phy)
+{
+ return rockchip_set_phy_state(phy, false);
+}
+
+static const struct phy_ops rockchip_dp_phy_ops = {
+ .power_on = rockchip_dp_phy_power_on,
+ .power_off = rockchip_dp_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_dp_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_provider *phy_provider;
+ struct rockchip_dp_phy *dp;
+ struct phy *phy;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (IS_ERR(dp))
+ return -ENOMEM;
+
+ dp->dev = dev;
+
+ dp->phy_24m = devm_clk_get(dev, "24m");
+ if (IS_ERR(dp->phy_24m)) {
+ dev_err(dev, "cannot get clock 24m\n");
+ return PTR_ERR(dp->phy_24m);
+ }
+
+ ret = clk_set_rate(dp->phy_24m, 24000000);
+ if (ret < 0) {
+ dev_err(dp->dev, "cannot set clock phy_24m %d\n", ret);
+ return ret;
+ }
+
+ dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(dp->grf)) {
+ dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+ return PTR_ERR(dp->grf);
+ }
+
+ ret = regmap_write(dp->grf, GRF_SOC_CON12, GRF_EDP_REF_CLK_SEL_INTER |
+ GRF_EDP_REF_CLK_SEL_INTER_HIWORD_MASK);
+ if (ret != 0) {
+ dev_err(dp->dev, "Could not config GRF edp ref clk: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, np, &rockchip_dp_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
+ }
+ phy_set_drvdata(phy, dp);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id rockchip_dp_phy_dt_ids[] = {
+ { .compatible = "rockchip,rk3288-dp-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_dp_phy_dt_ids);
+
+static struct platform_driver rockchip_dp_phy_driver = {
+ .probe = rockchip_dp_phy_probe,
+ .driver = {
+ .name = "rockchip-dp-phy",
+ .of_match_table = rockchip_dp_phy_dt_ids,
+ },
+};
+
+module_platform_driver(rockchip_dp_phy_driver);
+
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip DP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
new file mode 100644
index 000000000000..887b4c27195f
--- /dev/null
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -0,0 +1,229 @@
+/*
+ * Rockchip emmc PHY driver
+ *
+ * Copyright (C) 2016 Shawn Lin <shawn.lin@rock-chips.com>
+ * Copyright (C) 2016 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/*
+ * The higher 16-bit of this register is used for write protection
+ * only if BIT(x + 16) set to 1 the BIT(x) can be written.
+ */
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+/* Register definition */
+#define GRF_EMMCPHY_CON0 0x0
+#define GRF_EMMCPHY_CON1 0x4
+#define GRF_EMMCPHY_CON2 0x8
+#define GRF_EMMCPHY_CON3 0xc
+#define GRF_EMMCPHY_CON4 0x10
+#define GRF_EMMCPHY_CON5 0x14
+#define GRF_EMMCPHY_CON6 0x18
+#define GRF_EMMCPHY_STATUS 0x20
+
+#define PHYCTRL_PDB_MASK 0x1
+#define PHYCTRL_PDB_SHIFT 0x0
+#define PHYCTRL_PDB_PWR_ON 0x1
+#define PHYCTRL_PDB_PWR_OFF 0x0
+#define PHYCTRL_ENDLL_MASK 0x1
+#define PHYCTRL_ENDLL_SHIFT 0x1
+#define PHYCTRL_ENDLL_ENABLE 0x1
+#define PHYCTRL_ENDLL_DISABLE 0x0
+#define PHYCTRL_CALDONE_MASK 0x1
+#define PHYCTRL_CALDONE_SHIFT 0x6
+#define PHYCTRL_CALDONE_DONE 0x1
+#define PHYCTRL_CALDONE_GOING 0x0
+#define PHYCTRL_DLLRDY_MASK 0x1
+#define PHYCTRL_DLLRDY_SHIFT 0x5
+#define PHYCTRL_DLLRDY_DONE 0x1
+#define PHYCTRL_DLLRDY_GOING 0x0
+
+struct rockchip_emmc_phy {
+ unsigned int reg_offset;
+ struct regmap *reg_base;
+};
+
+static int rockchip_emmc_phy_power(struct rockchip_emmc_phy *rk_phy,
+ bool on_off)
+{
+ unsigned int caldone;
+ unsigned int dllrdy;
+
+ /*
+ * Keep phyctrl_pdb and phyctrl_endll low to allow
+ * initialization of CALIO state M/C DFFs
+ */
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_PDB_PWR_OFF,
+ PHYCTRL_PDB_MASK,
+ PHYCTRL_PDB_SHIFT));
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_ENDLL_DISABLE,
+ PHYCTRL_ENDLL_MASK,
+ PHYCTRL_ENDLL_SHIFT));
+
+ /* Already finish power_off above */
+ if (on_off == PHYCTRL_PDB_PWR_OFF)
+ return 0;
+
+ /*
+ * According to the user manual, calpad calibration
+ * cycle takes more than 2us without the minimal recommended
+ * value, so we may need a little margin here
+ */
+ udelay(3);
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_PDB_PWR_ON,
+ PHYCTRL_PDB_MASK,
+ PHYCTRL_PDB_SHIFT));
+
+ /*
+ * According to the user manual, it asks driver to
+ * wait 5us for calpad busy trimming
+ */
+ udelay(5);
+ regmap_read(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
+ &caldone);
+ caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK;
+ if (caldone != PHYCTRL_CALDONE_DONE) {
+ pr_err("rockchip_emmc_phy_power: caldone timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_write(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_CON6,
+ HIWORD_UPDATE(PHYCTRL_ENDLL_ENABLE,
+ PHYCTRL_ENDLL_MASK,
+ PHYCTRL_ENDLL_SHIFT));
+ /*
+ * After enable analog DLL circuits, we need extra 10.2us
+ * for dll to be ready for work.
+ */
+ udelay(11);
+ regmap_read(rk_phy->reg_base,
+ rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
+ &dllrdy);
+ dllrdy = (dllrdy >> PHYCTRL_DLLRDY_SHIFT) & PHYCTRL_DLLRDY_MASK;
+ if (dllrdy != PHYCTRL_DLLRDY_DONE) {
+ pr_err("rockchip_emmc_phy_power: dllrdy timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rockchip_emmc_phy_power_off(struct phy *phy)
+{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+
+ /* Power down emmc phy analog blocks */
+ ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_OFF);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rockchip_emmc_phy_power_on(struct phy *phy)
+{
+ struct rockchip_emmc_phy *rk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+
+ /* Power up emmc phy analog blocks */
+ ret = rockchip_emmc_phy_power(rk_phy, PHYCTRL_PDB_PWR_ON);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .power_on = rockchip_emmc_phy_power_on,
+ .power_off = rockchip_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int rockchip_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_emmc_phy *rk_phy;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ struct regmap *grf;
+ unsigned int reg_offset;
+
+ grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+ if (IS_ERR(grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return PTR_ERR(grf);
+ }
+
+ rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
+ if (!rk_phy)
+ return -ENOMEM;
+
+ if (of_property_read_u32(dev->of_node, "reg", &reg_offset)) {
+ dev_err(dev, "missing reg property in node %s\n",
+ dev->of_node->name);
+ return -EINVAL;
+ }
+
+ rk_phy->reg_offset = reg_offset;
+ rk_phy->reg_base = grf;
+
+ generic_phy = devm_phy_create(dev, dev->of_node, &ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, rk_phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id rockchip_emmc_phy_dt_ids[] = {
+ { .compatible = "rockchip,rk3399-emmc-phy" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_emmc_phy_dt_ids);
+
+static struct platform_driver rockchip_emmc_driver = {
+ .probe = rockchip_emmc_phy_probe,
+ .driver = {
+ .name = "rockchip-emmc-phy",
+ .of_match_table = rockchip_emmc_phy_dt_ids,
+ },
+};
+
+module_platform_driver(rockchip_emmc_driver);
+
+MODULE_AUTHOR("Shawn Lin <shawn.lin@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip EMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 33a80eba1cb4..f62d899063a3 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -30,21 +30,23 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
-/*
- * The higher 16-bit of this register is used for write protection
- * only if BIT(13 + 16) set to 1 the BIT(13) can be written.
- */
-#define SIDDQ_WRITE_ENA BIT(29)
-#define SIDDQ_ON BIT(13)
-#define SIDDQ_OFF (0 << 13)
+static int enable_usb_uart;
+
+#define HIWORD_UPDATE(val, mask) \
+ ((val) | (mask) << 16)
+
+#define UOC_CON0_SIDDQ BIT(13)
struct rockchip_usb_phys {
int reg;
const char *pll_name;
};
+struct rockchip_usb_phy_base;
struct rockchip_usb_phy_pdata {
struct rockchip_usb_phys *phys;
+ int (*init_usb_uart)(struct regmap *grf);
+ int usb_uart_phy;
};
struct rockchip_usb_phy_base {
@@ -61,13 +63,15 @@ struct rockchip_usb_phy {
struct clk *clk480m;
struct clk_hw clk480m_hw;
struct phy *phy;
+ bool uart_enabled;
};
static int rockchip_usb_phy_power(struct rockchip_usb_phy *phy,
bool siddq)
{
- return regmap_write(phy->base->reg_base, phy->reg_offset,
- SIDDQ_WRITE_ENA | (siddq ? SIDDQ_ON : SIDDQ_OFF));
+ u32 val = HIWORD_UPDATE(siddq ? UOC_CON0_SIDDQ : 0, UOC_CON0_SIDDQ);
+
+ return regmap_write(phy->base->reg_base, phy->reg_offset, val);
}
static unsigned long rockchip_usb_phy480m_recalc_rate(struct clk_hw *hw,
@@ -108,7 +112,7 @@ static int rockchip_usb_phy480m_is_enabled(struct clk_hw *hw)
if (ret < 0)
return ret;
- return (val & SIDDQ_ON) ? 0 : 1;
+ return (val & UOC_CON0_SIDDQ) ? 0 : 1;
}
static const struct clk_ops rockchip_usb_phy480m_ops = {
@@ -122,6 +126,9 @@ static int rockchip_usb_phy_power_off(struct phy *_phy)
{
struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
+ if (phy->uart_enabled)
+ return -EBUSY;
+
clk_disable_unprepare(phy->clk480m);
return 0;
@@ -131,6 +138,9 @@ static int rockchip_usb_phy_power_on(struct phy *_phy)
{
struct rockchip_usb_phy *phy = phy_get_drvdata(_phy);
+ if (phy->uart_enabled)
+ return -EBUSY;
+
return clk_prepare_enable(phy->clk480m);
}
@@ -144,8 +154,10 @@ static void rockchip_usb_phy_action(void *data)
{
struct rockchip_usb_phy *rk_phy = data;
- of_clk_del_provider(rk_phy->np);
- clk_unregister(rk_phy->clk480m);
+ if (!rk_phy->uart_enabled) {
+ of_clk_del_provider(rk_phy->np);
+ clk_unregister(rk_phy->clk480m);
+ }
if (rk_phy->clk)
clk_put(rk_phy->clk);
@@ -194,30 +206,35 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
return -EINVAL;
}
- if (rk_phy->clk) {
- clk_name = __clk_get_name(rk_phy->clk);
- init.flags = 0;
- init.parent_names = &clk_name;
- init.num_parents = 1;
+ if (enable_usb_uart && base->pdata->usb_uart_phy == i) {
+ dev_dbg(base->dev, "phy%d used as uart output\n", i);
+ rk_phy->uart_enabled = true;
} else {
- init.flags = CLK_IS_ROOT;
- init.parent_names = NULL;
- init.num_parents = 0;
- }
+ if (rk_phy->clk) {
+ clk_name = __clk_get_name(rk_phy->clk);
+ init.flags = 0;
+ init.parent_names = &clk_name;
+ init.num_parents = 1;
+ } else {
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ }
- init.ops = &rockchip_usb_phy480m_ops;
- rk_phy->clk480m_hw.init = &init;
+ init.ops = &rockchip_usb_phy480m_ops;
+ rk_phy->clk480m_hw.init = &init;
- rk_phy->clk480m = clk_register(base->dev, &rk_phy->clk480m_hw);
- if (IS_ERR(rk_phy->clk480m)) {
- err = PTR_ERR(rk_phy->clk480m);
- goto err_clk;
- }
+ rk_phy->clk480m = clk_register(base->dev, &rk_phy->clk480m_hw);
+ if (IS_ERR(rk_phy->clk480m)) {
+ err = PTR_ERR(rk_phy->clk480m);
+ goto err_clk;
+ }
- err = of_clk_add_provider(child, of_clk_src_simple_get,
- rk_phy->clk480m);
- if (err < 0)
- goto err_clk_prov;
+ err = of_clk_add_provider(child, of_clk_src_simple_get,
+ rk_phy->clk480m);
+ if (err < 0)
+ goto err_clk_prov;
+ }
err = devm_add_action(base->dev, rockchip_usb_phy_action, rk_phy);
if (err)
@@ -230,13 +247,21 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
}
phy_set_drvdata(rk_phy->phy, rk_phy);
- /* only power up usb phy when it use, so disable it when init*/
- return rockchip_usb_phy_power(rk_phy, 1);
+ /*
+ * When acting as uart-pipe, just keep clock on otherwise
+ * only power up usb phy when it use, so disable it when init
+ */
+ if (rk_phy->uart_enabled)
+ return clk_prepare_enable(rk_phy->clk);
+ else
+ return rockchip_usb_phy_power(rk_phy, 1);
err_devm_action:
- of_clk_del_provider(child);
+ if (!rk_phy->uart_enabled)
+ of_clk_del_provider(child);
err_clk_prov:
- clk_unregister(rk_phy->clk480m);
+ if (!rk_phy->uart_enabled)
+ clk_unregister(rk_phy->clk480m);
err_clk:
if (rk_phy->clk)
clk_put(rk_phy->clk);
@@ -259,6 +284,86 @@ static const struct rockchip_usb_phy_pdata rk3188_pdata = {
},
};
+#define RK3288_UOC0_CON0 0x320
+#define RK3288_UOC0_CON0_COMMON_ON_N BIT(0)
+#define RK3288_UOC0_CON0_DISABLE BIT(4)
+
+#define RK3288_UOC0_CON2 0x328
+#define RK3288_UOC0_CON2_SOFT_CON_SEL BIT(2)
+
+#define RK3288_UOC0_CON3 0x32c
+#define RK3288_UOC0_CON3_UTMI_SUSPENDN BIT(0)
+#define RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
+#define RK3288_UOC0_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
+#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
+#define RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
+#define RK3288_UOC0_CON3_BYPASSDMEN BIT(6)
+#define RK3288_UOC0_CON3_BYPASSSEL BIT(7)
+
+/*
+ * Enable the bypass of uart2 data through the otg usb phy.
+ * Original description in the TRM.
+ * 1. Disable the OTG block by setting OTGDISABLE0 to 1’b1.
+ * 2. Disable the pull-up resistance on the D+ line by setting
+ * OPMODE0[1:0] to 2’b01.
+ * 3. To ensure that the XO, Bias, and PLL blocks are powered down in Suspend
+ * mode, set COMMONONN to 1’b1.
+ * 4. Place the USB PHY in Suspend mode by setting SUSPENDM0 to 1’b0.
+ * 5. Set BYPASSSEL0 to 1’b1.
+ * 6. To transmit data, controls BYPASSDMEN0, and BYPASSDMDATA0.
+ * To receive data, monitor FSVPLUS0.
+ *
+ * The actual code in the vendor kernel does some things differently.
+ */
+static int __init rk3288_init_usb_uart(struct regmap *grf)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * COMMON_ON and DISABLE settings are described in the TRM,
+ * but were not present in the original code.
+ * Also disable the analog phy components to save power.
+ */
+ val = HIWORD_UPDATE(RK3288_UOC0_CON0_COMMON_ON_N
+ | RK3288_UOC0_CON0_DISABLE
+ | UOC_CON0_SIDDQ,
+ RK3288_UOC0_CON0_COMMON_ON_N
+ | RK3288_UOC0_CON0_DISABLE
+ | UOC_CON0_SIDDQ);
+ ret = regmap_write(grf, RK3288_UOC0_CON0, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3288_UOC0_CON2_SOFT_CON_SEL,
+ RK3288_UOC0_CON2_SOFT_CON_SEL);
+ ret = regmap_write(grf, RK3288_UOC0_CON2, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING
+ | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC
+ | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED,
+ RK3288_UOC0_CON3_UTMI_SUSPENDN
+ | RK3288_UOC0_CON3_UTMI_OPMODE_MASK
+ | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK
+ | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED);
+ ret = regmap_write(grf, RK3288_UOC0_CON3, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3288_UOC0_CON3_BYPASSSEL
+ | RK3288_UOC0_CON3_BYPASSDMEN,
+ RK3288_UOC0_CON3_BYPASSSEL
+ | RK3288_UOC0_CON3_BYPASSDMEN);
+ ret = regmap_write(grf, RK3288_UOC0_CON3, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct rockchip_usb_phy_pdata rk3288_pdata = {
.phys = (struct rockchip_usb_phys[]){
{ .reg = 0x320, .pll_name = "sclk_otgphy0_480m" },
@@ -266,6 +371,8 @@ static const struct rockchip_usb_phy_pdata rk3288_pdata = {
{ .reg = 0x348, .pll_name = "sclk_otgphy2_480m" },
{ /* sentinel */ }
},
+ .init_usb_uart = rk3288_init_usb_uart,
+ .usb_uart_phy = 0,
};
static int rockchip_usb_phy_probe(struct platform_device *pdev)
@@ -328,6 +435,60 @@ static struct platform_driver rockchip_usb_driver = {
module_platform_driver(rockchip_usb_driver);
+#ifndef MODULE
+static int __init rockchip_init_usb_uart(void)
+{
+ const struct of_device_id *match;
+ const struct rockchip_usb_phy_pdata *data;
+ struct device_node *np;
+ struct regmap *grf;
+ int ret;
+
+ if (!enable_usb_uart)
+ return 0;
+
+ np = of_find_matching_node_and_match(NULL, rockchip_usb_phy_dt_ids,
+ &match);
+ if (!np) {
+ pr_err("%s: failed to find usbphy node\n", __func__);
+ return -ENOTSUPP;
+ }
+
+ pr_debug("%s: using settings for %s\n", __func__, match->compatible);
+ data = match->data;
+
+ if (!data->init_usb_uart) {
+ pr_err("%s: usb-uart not available on %s\n",
+ __func__, match->compatible);
+ return -ENOTSUPP;
+ }
+
+ grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(grf)) {
+ pr_err("%s: Missing rockchip,grf property, %lu\n",
+ __func__, PTR_ERR(grf));
+ return PTR_ERR(grf);
+ }
+
+ ret = data->init_usb_uart(grf);
+ if (ret) {
+ pr_err("%s: could not init usb_uart, %d\n", __func__, ret);
+ enable_usb_uart = 0;
+ return ret;
+ }
+
+ return 0;
+}
+early_initcall(rockchip_init_usb_uart);
+
+static int __init rockchip_usb_uart(char *buf)
+{
+ enable_usb_uart = true;
+ return 0;
+}
+early_param("rockchip.usb_uart", rockchip_usb_uart);
+#endif
+
MODULE_AUTHOR("Yunzhi Li <lyz@rock-chips.com>");
MODULE_DESCRIPTION("Rockchip USB 2.0 PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 840f3eae428b..6b6af6cba454 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -391,7 +391,7 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
-static int twl4030_usb_runtime_suspend(struct device *dev)
+static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
@@ -405,7 +405,7 @@ static int twl4030_usb_runtime_suspend(struct device *dev)
return 0;
}
-static int twl4030_usb_runtime_resume(struct device *dev)
+static int __maybe_unused twl4030_usb_runtime_resume(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
int res;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 99a4c10ed43f..fb8200b8e8ec 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -79,7 +79,7 @@ config PINCTRL_AT91PIO4
controller available on sama5d2 SoC.
config PINCTRL_AMD
- bool "AMD GPIO pin control"
+ tristate "AMD GPIO pin control"
depends on GPIOLIB
select GPIOLIB_IRQCHIP
select PINCONF
@@ -168,37 +168,6 @@ config PINCTRL_ST
select PINCONF
select GPIOLIB_IRQCHIP
-config PINCTRL_TEGRA
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_TEGRA20
- bool
- select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA30
- bool
- select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA114
- bool
- select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA124
- bool
- select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA210
- bool
- select PINCTRL_TEGRA
-
-config PINCTRL_TEGRA_XUSB
- def_bool y if ARCH_TEGRA
- select GENERIC_PHY
- select PINCONF
- select PINMUX
-
config PINCTRL_TZ1090
bool "Toumaz Xenif TZ1090 pin control driver"
depends on SOC_TZ1090
@@ -238,6 +207,23 @@ config PINCTRL_PALMAS
open drain configuration for the Palmas series devices like
TPS65913, TPS80036 etc.
+config PINCTRL_PIC32
+ bool "Microchip PIC32 pin controller driver"
+ depends on OF
+ depends on MACH_PIC32
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+ help
+ This is the pin controller and gpio driver for Microchip PIC32
+ microcontrollers. This option is selected automatically when specific
+ machine and arch are selected to build.
+
+config PINCTRL_PIC32MZDA
+ def_bool y if PIC32MZDA
+ select PINCTRL_PIC32
+
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
depends on ARCH_ZYNQ
@@ -257,7 +243,9 @@ source "drivers/pinctrl/qcom/Kconfig"
source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
+source "drivers/pinctrl/stm32/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/tegra/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index bf1b5ca5180b..e4bc1151e04f 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -18,17 +18,12 @@ obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
+obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
-obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
-obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
-obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
-obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
-obj-$(CONFIG_PINCTRL_TEGRA210) += pinctrl-tegra210.o
-obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
+obj-$(CONFIG_PINCTRL_TEGRA) += tegra/
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
@@ -46,12 +41,13 @@ obj-y += freescale/
obj-$(CONFIG_X86) += intel/
obj-$(CONFIG_PINCTRL_MVEBU) += mvebu/
obj-y += nomadik/
-obj-$(CONFIG_ARCH_PXA) += pxa/
+obj-$(CONFIG_PINCTRL_PXA) += pxa/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_PINCTRL_SAMSUNG) += samsung/
obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc/
obj-$(CONFIG_PINCTRL_SPEAR) += spear/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_PINCTRL_STM32) += stm32/
+obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
+obj-$(CONFIG_PINCTRL_MTK) += mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 0f5997ceb494..08b1d93da9fe 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -779,7 +779,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (num_pulls) {
err = of_property_read_u32_index(np, "brcm,pull",
- (num_funcs > 1) ? i : 0, &pull);
+ (num_pulls > 1) ? i : 0, &pull);
if (err)
goto out;
err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index 3b2ac8f771ed..d530ab4b9d85 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/ioport.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -98,11 +98,6 @@ struct iproc_gpio {
struct pinctrl_desc pctldesc;
};
-static inline struct iproc_gpio *to_iproc_gpio(struct gpio_chip *gc)
-{
- return container_of(gc, struct iproc_gpio, gc);
-}
-
/*
* Mapping from PINCONF pins to GPIO pins is 1-to-1
*/
@@ -147,7 +142,7 @@ static inline bool iproc_get_bit(struct iproc_gpio *chip, unsigned int reg,
static void iproc_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
int i, bit;
@@ -180,7 +175,7 @@ static void iproc_gpio_irq_handler(struct irq_desc *desc)
static void iproc_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
unsigned int offset = IPROC_GPIO_REG(gpio,
IPROC_GPIO_INT_CLR_OFFSET);
@@ -199,7 +194,7 @@ static void iproc_gpio_irq_ack(struct irq_data *d)
static void iproc_gpio_irq_set_mask(struct irq_data *d, bool unmask)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
iproc_set_bit(chip, IPROC_GPIO_INT_MSK_OFFSET, gpio, unmask);
@@ -208,7 +203,7 @@ static void iproc_gpio_irq_set_mask(struct irq_data *d, bool unmask)
static void iproc_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
@@ -219,7 +214,7 @@ static void iproc_gpio_irq_mask(struct irq_data *d)
static void iproc_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
@@ -230,7 +225,7 @@ static void iproc_gpio_irq_unmask(struct irq_data *d)
static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = d->hwirq;
bool level_triggered = false;
bool dual_edge = false;
@@ -292,7 +287,7 @@ static struct irq_chip iproc_gpio_irq_chip = {
*/
static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = gc->base + offset;
/* not all Iproc GPIO pins can be muxed individually */
@@ -304,7 +299,7 @@ static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned gpio = gc->base + offset;
if (!chip->pinmux_is_supported)
@@ -315,7 +310,7 @@ static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
@@ -330,7 +325,7 @@ static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
static int iproc_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
int val)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
@@ -345,7 +340,7 @@ static int iproc_gpio_direction_output(struct gpio_chip *gc, unsigned gpio,
static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
@@ -357,7 +352,7 @@ static void iproc_gpio_set(struct gpio_chip *gc, unsigned gpio, int val)
static int iproc_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
- struct iproc_gpio *chip = to_iproc_gpio(gc);
+ struct iproc_gpio *chip = gpiochip_get_data(gc);
unsigned int offset = IPROC_GPIO_REG(gpio,
IPROC_GPIO_DATA_IN_OFFSET);
unsigned int shift = IPROC_GPIO_SHIFT(gpio);
@@ -706,7 +701,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->pinmux_is_supported = of_property_read_bool(dev->of_node,
"gpio-ranges");
- ret = gpiochip_add(gc);
+ ret = gpiochip_add_data(gc, chip);
if (ret < 0) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 2686a4450dfc..f67a8b7a4e18 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -481,18 +481,12 @@ int pinctrl_get_group_pins(struct pinctrl_dev *pctldev, const char *pin_group,
}
EXPORT_SYMBOL_GPL(pinctrl_get_group_pins);
-/**
- * pinctrl_find_gpio_range_from_pin() - locate the GPIO range for a pin
- * @pctldev: the pin controller device to look in
- * @pin: a controller-local number to find the range for
- */
struct pinctrl_gpio_range *
-pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
- unsigned int pin)
+pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
+ unsigned int pin)
{
struct pinctrl_gpio_range *range;
- mutex_lock(&pctldev->mutex);
/* Loop over the ranges */
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
/* Check if we're in the valid range */
@@ -500,15 +494,32 @@ pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
int a;
for (a = 0; a < range->npins; a++) {
if (range->pins[a] == pin)
- goto out;
+ return range;
}
} else if (pin >= range->pin_base &&
pin < range->pin_base + range->npins)
- goto out;
+ return range;
}
- range = NULL;
-out:
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pinctrl_find_gpio_range_from_pin_nolock);
+
+/**
+ * pinctrl_find_gpio_range_from_pin() - locate the GPIO range for a pin
+ * @pctldev: the pin controller device to look in
+ * @pin: a controller-local number to find the range for
+ */
+struct pinctrl_gpio_range *
+pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin)
+{
+ struct pinctrl_gpio_range *range;
+
+ mutex_lock(&pctldev->mutex);
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
mutex_unlock(&pctldev->mutex);
+
return range;
}
EXPORT_SYMBOL_GPL(pinctrl_find_gpio_range_from_pin);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index b24ea846c867..ca08723b9ee1 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -182,6 +182,10 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
return radix_tree_lookup(&pctldev->pin_desc_tree, pin);
}
+extern struct pinctrl_gpio_range *
+pinctrl_find_gpio_range_from_pin_nolock(struct pinctrl_dev *pctldev,
+ unsigned int pin);
+
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
bool dup);
void pinctrl_unregister_map(struct pinctrl_map const *map);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index a5bb93987378..9cfa544072b5 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -24,6 +25,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/slab.h>
+#include <linux/regmap.h>
#include "../core.h"
#include "pinctrl-imx.h"
@@ -341,6 +343,31 @@ mux_pin:
return 0;
}
+static void imx_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct imx_pinctrl_soc_info *info = ipctl->info;
+ const struct imx_pin_reg *pin_reg;
+ u32 reg;
+
+ /*
+ * Only Vybrid has the input/output buffer enable flags (IBE/OBE)
+ * They are part of the shared mux/conf register.
+ */
+ if (!(info->flags & SHARE_MUX_CONF_REG))
+ return;
+
+ pin_reg = &info->pin_regs[offset];
+ if (pin_reg->mux_reg == -1)
+ return;
+
+ /* Clear IBE/OBE/PUE to disable the pin (Hi-Z) */
+ reg = readl(ipctl->base + pin_reg->mux_reg);
+ reg &= ~0x7;
+ writel(reg, ipctl->base + pin_reg->mux_reg);
+}
+
static int imx_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned offset, bool input)
{
@@ -377,6 +404,7 @@ static const struct pinmux_ops imx_pmx_ops = {
.get_function_groups = imx_pmx_get_groups,
.set_mux = imx_pmx_set,
.gpio_request_enable = imx_pmx_gpio_request_enable,
+ .gpio_disable_free = imx_pmx_gpio_disable_free,
.gpio_set_direction = imx_pmx_gpio_set_direction,
};
@@ -692,10 +720,12 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
int imx_pinctrl_probe(struct platform_device *pdev,
struct imx_pinctrl_soc_info *info)
{
+ struct regmap_config config = { .name = "gpr" };
struct device_node *dev_np = pdev->dev.of_node;
struct device_node *np;
struct imx_pinctrl *ipctl;
struct resource *res;
+ struct regmap *gpr;
int ret, i;
if (!info || !info->pins || !info->npins) {
@@ -704,6 +734,12 @@ int imx_pinctrl_probe(struct platform_device *pdev,
}
info->dev = &pdev->dev;
+ if (info->gpr_compatible) {
+ gpr = syscon_regmap_lookup_by_compatible(info->gpr_compatible);
+ if (!IS_ERR(gpr))
+ regmap_attach_dev(&pdev->dev, gpr, &config);
+ }
+
/* Create state holders etc for this driver */
ipctl = devm_kzalloc(&pdev->dev, sizeof(*ipctl), GFP_KERNEL);
if (!ipctl)
@@ -726,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev,
if (of_property_read_bool(dev_np, "fsl,input-sel")) {
np = of_parse_phandle(dev_np, "fsl,input-sel", 0);
- if (np) {
- ipctl->input_sel_base = of_iomap(np, 0);
- if (IS_ERR(ipctl->input_sel_base)) {
- of_node_put(np);
- dev_err(&pdev->dev,
- "iomuxc input select base address not found\n");
- return PTR_ERR(ipctl->input_sel_base);
- }
- } else {
+ if (!np) {
dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n");
return -EINVAL;
}
+
+ ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
+ if (!ipctl->input_sel_base) {
+ dev_err(&pdev->dev,
+ "iomuxc input select base address not found\n");
+ return -ENOMEM;
+ }
}
imx_pinctrl_desc.name = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 2a592f657c18..3b8bd81a39a4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -82,6 +82,7 @@ struct imx_pinctrl_soc_info {
struct imx_pmx_func *functions;
unsigned int nfunctions;
unsigned int flags;
+ const char *gpr_compatible;
};
#define SHARE_MUX_CONF_REG 0x1
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index 51b31df96273..8acc4d960cfa 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -389,6 +389,7 @@ static const struct pinctrl_pin_desc imx50_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx50_pinctrl_info = {
.pins = imx50_pinctrl_pads,
.npins = ARRAY_SIZE(imx50_pinctrl_pads),
+ .gpr_compatible = "fsl,imx50-iomuxc-gpr",
};
static const struct of_device_id imx50_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index 7344d340013c..d39dfd6a3a44 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -452,6 +452,7 @@ static const struct pinctrl_pin_desc imx53_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx53_pinctrl_info = {
.pins = imx53_pinctrl_pads,
.npins = ARRAY_SIZE(imx53_pinctrl_pads),
+ .gpr_compatible = "fsl,imx53-iomuxc-gpr",
};
static const struct of_device_id imx53_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 6805c678c3b2..5a2cdb0549ce 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -458,6 +458,7 @@ static const struct pinctrl_pin_desc imx6dl_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6dl_pinctrl_info = {
.pins = imx6dl_pinctrl_pads,
.npins = ARRAY_SIZE(imx6dl_pinctrl_pads),
+ .gpr_compatible = "fsl,imx6q-iomuxc-gpr",
};
static const struct of_device_id imx6dl_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 4d1fcb861ac1..7d50a36b1086 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -464,6 +464,7 @@ static const struct pinctrl_pin_desc imx6q_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6q_pinctrl_info = {
.pins = imx6q_pinctrl_pads,
.npins = ARRAY_SIZE(imx6q_pinctrl_pads),
+ .gpr_compatible = "fsl,imx6q-iomuxc-gpr",
};
static const struct of_device_id imx6q_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index 83fa5f19ae89..e27d17fdc69d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -364,6 +364,7 @@ static const struct pinctrl_pin_desc imx6sl_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6sl_pinctrl_info = {
.pins = imx6sl_pinctrl_pads,
.npins = ARRAY_SIZE(imx6sl_pinctrl_pads),
+ .gpr_compatible = "fsl,imx6sl-iomuxc-gpr",
};
static const struct of_device_id imx6sl_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 0d78fe690818..117180c26c50 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -368,6 +368,7 @@ static const struct pinctrl_pin_desc imx6sx_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6sx_pinctrl_info = {
.pins = imx6sx_pinctrl_pads,
.npins = ARRAY_SIZE(imx6sx_pinctrl_pads),
+ .gpr_compatible = "fsl,imx6sx-iomuxc-gpr",
};
static const struct of_device_id imx6sx_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 08e75764e7be..78627c70c6ba 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -284,6 +284,7 @@ static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
.pins = imx6ul_pinctrl_pads,
.npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+ .gpr_compatible = "fsl,imx6ul-iomuxc-gpr",
};
static struct of_device_id imx6ul_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 16dc925117de..1c89613eb4b7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -359,6 +359,7 @@ static const struct pinctrl_pin_desc imx7d_lpsr_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
.pins = imx7d_pinctrl_pads,
.npins = ARRAY_SIZE(imx7d_pinctrl_pads),
+ .gpr_compatible = "fsl,imx7d-iomuxc-gpr",
};
static struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index c0f5586218c4..6c2c816f8e5f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -11,13 +11,9 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/acpi.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
-#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -669,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d)
spin_unlock(&pctrl->lock);
}
+static void intel_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct intel_community *community;
+ unsigned pin = irqd_to_hwirq(d);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+
+ community = intel_get_community(pctrl, pin);
+ if (community) {
+ unsigned padno = pin_to_padno(community, pin);
+ unsigned gpp_size = community->gpp_size;
+ unsigned gpp_offset = padno % gpp_size;
+ unsigned gpp = padno / gpp_size;
+ u32 value;
+
+ /* Clear interrupt status first to avoid unexpected interrupt */
+ writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+
+ value = readl(community->regs + community->ie_offset + gpp * 4);
+ value |= BIT(gpp_offset);
+ writel(value, community->regs + community->ie_offset + gpp * 4);
+ }
+
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -745,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
value |= PADCFG0_RXINV;
} else if (type & IRQ_TYPE_EDGE_RISING) {
value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
- } else if (type & IRQ_TYPE_LEVEL_LOW) {
- value |= PADCFG0_RXINV;
+ } else if (type & IRQ_TYPE_LEVEL_MASK) {
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ value |= PADCFG0_RXINV;
} else {
value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
}
@@ -856,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
+ .irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
.irq_unmask = intel_gpio_irq_unmask,
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 02f6f92df86c..4f0bc8a103f4 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -1,6 +1,6 @@
if ARCH_MEDIATEK || COMPILE_TEST
-config PINCTRL_MTK_COMMON
+config PINCTRL_MTK
bool
depends on OF
select PINMUX
@@ -9,17 +9,29 @@ config PINCTRL_MTK_COMMON
select OF_GPIO
# For ARMv7 SoCs
+config PINCTRL_MT2701
+ bool "Mediatek MT2701 pin control" if COMPILE_TEST && !MACH_MT2701
+ depends on OF
+ default MACH_MT2701
+ select PINCTRL_MTK
+
+config PINCTRL_MT7623
+ bool "Mediatek MT7623 pin control" if COMPILE_TEST && !MACH_MT7623
+ depends on OF
+ default MACH_MT7623
+ select PINCTRL_MTK_COMMON
+
config PINCTRL_MT8135
bool "Mediatek MT8135 pin control" if COMPILE_TEST && !MACH_MT8135
depends on OF
default MACH_MT8135
- select PINCTRL_MTK_COMMON
+ select PINCTRL_MTK
config PINCTRL_MT8127
bool "Mediatek MT8127 pin control" if COMPILE_TEST && !MACH_MT8127
depends on OF
default MACH_MT8127
- select PINCTRL_MTK_COMMON
+ select PINCTRL_MTK
# For ARMv8 SoCs
config PINCTRL_MT8173
@@ -27,13 +39,13 @@ config PINCTRL_MT8173
depends on OF
depends on ARM64 || COMPILE_TEST
default ARM64 && ARCH_MEDIATEK
- select PINCTRL_MTK_COMMON
+ select PINCTRL_MTK
# For PMIC
config PINCTRL_MT6397
bool "Mediatek MT6397 pin control" if COMPILE_TEST && !MFD_MT6397
depends on OF
default MFD_MT6397
- select PINCTRL_MTK_COMMON
+ select PINCTRL_MTK
endif
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index eb923d64d387..3e3390a14716 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -1,8 +1,10 @@
# Core
-obj-$(CONFIG_PINCTRL_MTK_COMMON) += pinctrl-mtk-common.o
+obj-y += pinctrl-mtk-common.o
# SoC Drivers
-obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
-obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o
-obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
-obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
+obj-$(CONFIG_PINCTRL_MT2701) += pinctrl-mt2701.o
+obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
+obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
+obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o
+obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
+obj-$(CONFIG_PINCTRL_MT6397) += pinctrl-mt6397.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
new file mode 100644
index 000000000000..8d802fa7decd
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt2701.h"
+
+/**
+ * struct mtk_spec_pinmux_set
+ * - For special pins' mode setting
+ * @pin: The pin number.
+ * @offset: The offset of extra setting register.
+ * @bit: The bit of extra setting register.
+ */
+struct mtk_spec_pinmux_set {
+ unsigned short pin;
+ unsigned short offset;
+ unsigned char bit;
+};
+
+#define MTK_PINMUX_SPEC(_pin, _offset, _bit) \
+ { \
+ .pin = _pin, \
+ .offset = _offset, \
+ .bit = _bit, \
+ }
+
+static const struct mtk_drv_group_desc mt2701_drv_grp[] = {
+ /* 0E4E8SR 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* 0E2E4SR 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 1, 2, 2),
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+static const struct mtk_pin_drv_grp mt2701_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(1, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(2, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(3, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(4, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(5, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(6, 0xf50, 0, 1),
+ MTK_PIN_DRV_GRP(7, 0xf50, 4, 1),
+ MTK_PIN_DRV_GRP(8, 0xf50, 4, 1),
+ MTK_PIN_DRV_GRP(9, 0xf50, 4, 1),
+ MTK_PIN_DRV_GRP(10, 0xf50, 8, 1),
+ MTK_PIN_DRV_GRP(11, 0xf50, 8, 1),
+ MTK_PIN_DRV_GRP(12, 0xf50, 8, 1),
+ MTK_PIN_DRV_GRP(13, 0xf50, 8, 1),
+ MTK_PIN_DRV_GRP(14, 0xf50, 12, 0),
+ MTK_PIN_DRV_GRP(15, 0xf50, 12, 0),
+ MTK_PIN_DRV_GRP(16, 0xf60, 0, 0),
+ MTK_PIN_DRV_GRP(17, 0xf60, 0, 0),
+ MTK_PIN_DRV_GRP(18, 0xf60, 4, 0),
+ MTK_PIN_DRV_GRP(19, 0xf60, 4, 0),
+ MTK_PIN_DRV_GRP(20, 0xf60, 4, 0),
+ MTK_PIN_DRV_GRP(21, 0xf60, 4, 0),
+ MTK_PIN_DRV_GRP(22, 0xf60, 8, 0),
+ MTK_PIN_DRV_GRP(23, 0xf60, 8, 0),
+ MTK_PIN_DRV_GRP(24, 0xf60, 8, 0),
+ MTK_PIN_DRV_GRP(25, 0xf60, 8, 0),
+ MTK_PIN_DRV_GRP(26, 0xf60, 8, 0),
+ MTK_PIN_DRV_GRP(27, 0xf60, 12, 0),
+ MTK_PIN_DRV_GRP(28, 0xf60, 12, 0),
+ MTK_PIN_DRV_GRP(29, 0xf60, 12, 0),
+ MTK_PIN_DRV_GRP(30, 0xf60, 0, 0),
+ MTK_PIN_DRV_GRP(31, 0xf60, 0, 0),
+ MTK_PIN_DRV_GRP(32, 0xf60, 0, 0),
+ MTK_PIN_DRV_GRP(33, 0xf70, 0, 0),
+ MTK_PIN_DRV_GRP(34, 0xf70, 0, 0),
+ MTK_PIN_DRV_GRP(35, 0xf70, 0, 0),
+ MTK_PIN_DRV_GRP(36, 0xf70, 0, 0),
+ MTK_PIN_DRV_GRP(37, 0xf70, 0, 0),
+ MTK_PIN_DRV_GRP(38, 0xf70, 4, 0),
+ MTK_PIN_DRV_GRP(39, 0xf70, 8, 1),
+ MTK_PIN_DRV_GRP(40, 0xf70, 8, 1),
+ MTK_PIN_DRV_GRP(41, 0xf70, 8, 1),
+ MTK_PIN_DRV_GRP(42, 0xf70, 8, 1),
+ MTK_PIN_DRV_GRP(43, 0xf70, 12, 0),
+ MTK_PIN_DRV_GRP(44, 0xf70, 12, 0),
+ MTK_PIN_DRV_GRP(45, 0xf70, 12, 0),
+ MTK_PIN_DRV_GRP(47, 0xf80, 0, 0),
+ MTK_PIN_DRV_GRP(48, 0xf80, 0, 0),
+ MTK_PIN_DRV_GRP(49, 0xf80, 4, 0),
+ MTK_PIN_DRV_GRP(50, 0xf70, 4, 0),
+ MTK_PIN_DRV_GRP(51, 0xf70, 4, 0),
+ MTK_PIN_DRV_GRP(52, 0xf70, 4, 0),
+ MTK_PIN_DRV_GRP(53, 0xf80, 12, 0),
+ MTK_PIN_DRV_GRP(54, 0xf80, 12, 0),
+ MTK_PIN_DRV_GRP(55, 0xf80, 12, 0),
+ MTK_PIN_DRV_GRP(56, 0xf80, 12, 0),
+ MTK_PIN_DRV_GRP(60, 0xf90, 8, 1),
+ MTK_PIN_DRV_GRP(61, 0xf90, 8, 1),
+ MTK_PIN_DRV_GRP(62, 0xf90, 8, 1),
+ MTK_PIN_DRV_GRP(63, 0xf90, 12, 1),
+ MTK_PIN_DRV_GRP(64, 0xf90, 12, 1),
+ MTK_PIN_DRV_GRP(65, 0xf90, 12, 1),
+ MTK_PIN_DRV_GRP(66, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(67, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(68, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(69, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(70, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(71, 0xfa0, 0, 1),
+ MTK_PIN_DRV_GRP(72, 0xf80, 4, 0),
+ MTK_PIN_DRV_GRP(73, 0xf80, 4, 0),
+ MTK_PIN_DRV_GRP(74, 0xf80, 4, 0),
+ MTK_PIN_DRV_GRP(85, 0xda0, 0, 2),
+ MTK_PIN_DRV_GRP(86, 0xd90, 0, 2),
+ MTK_PIN_DRV_GRP(87, 0xdb0, 0, 2),
+ MTK_PIN_DRV_GRP(88, 0xdb0, 0, 2),
+ MTK_PIN_DRV_GRP(89, 0xdb0, 0, 2),
+ MTK_PIN_DRV_GRP(90, 0xdb0, 0, 2),
+ MTK_PIN_DRV_GRP(105, 0xd40, 0, 2),
+ MTK_PIN_DRV_GRP(106, 0xd30, 0, 2),
+ MTK_PIN_DRV_GRP(107, 0xd50, 0, 2),
+ MTK_PIN_DRV_GRP(108, 0xd50, 0, 2),
+ MTK_PIN_DRV_GRP(109, 0xd50, 0, 2),
+ MTK_PIN_DRV_GRP(110, 0xd50, 0, 2),
+ MTK_PIN_DRV_GRP(111, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(112, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(113, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(114, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(115, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(116, 0xcd0, 0, 2),
+ MTK_PIN_DRV_GRP(117, 0xcc0, 0, 2),
+ MTK_PIN_DRV_GRP(118, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(119, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(120, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(121, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(126, 0xf80, 4, 0),
+ MTK_PIN_DRV_GRP(188, 0xf70, 4, 0),
+ MTK_PIN_DRV_GRP(189, 0xfe0, 8, 0),
+ MTK_PIN_DRV_GRP(190, 0xfe0, 8, 0),
+ MTK_PIN_DRV_GRP(191, 0xfe0, 8, 0),
+ MTK_PIN_DRV_GRP(192, 0xfe0, 8, 0),
+ MTK_PIN_DRV_GRP(193, 0xfe0, 8, 0),
+ MTK_PIN_DRV_GRP(194, 0xfe0, 12, 0),
+ MTK_PIN_DRV_GRP(195, 0xfe0, 12, 0),
+ MTK_PIN_DRV_GRP(196, 0xfe0, 12, 0),
+ MTK_PIN_DRV_GRP(197, 0xfe0, 12, 0),
+ MTK_PIN_DRV_GRP(198, 0xfe0, 12, 0),
+ MTK_PIN_DRV_GRP(199, 0xf50, 4, 1),
+ MTK_PIN_DRV_GRP(200, 0xfd0, 0, 0),
+ MTK_PIN_DRV_GRP(201, 0xfd0, 0, 0),
+ MTK_PIN_DRV_GRP(202, 0xfd0, 0, 0),
+ MTK_PIN_DRV_GRP(203, 0xfd0, 4, 0),
+ MTK_PIN_DRV_GRP(204, 0xfd0, 4, 0),
+ MTK_PIN_DRV_GRP(205, 0xfd0, 4, 0),
+ MTK_PIN_DRV_GRP(206, 0xfd0, 4, 0),
+ MTK_PIN_DRV_GRP(207, 0xfd0, 4, 0),
+ MTK_PIN_DRV_GRP(208, 0xfd0, 8, 0),
+ MTK_PIN_DRV_GRP(209, 0xfd0, 8, 0),
+ MTK_PIN_DRV_GRP(210, 0xfd0, 12, 1),
+ MTK_PIN_DRV_GRP(211, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(212, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(213, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(214, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(215, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(216, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(217, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(218, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(219, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(220, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(221, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(222, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(223, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(224, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(225, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(226, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(227, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(228, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(229, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(230, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(231, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(232, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(233, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(234, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(235, 0xff0, 0, 1),
+ MTK_PIN_DRV_GRP(236, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(237, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(238, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(239, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(240, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(241, 0xff0, 4, 0),
+ MTK_PIN_DRV_GRP(242, 0xff0, 8, 0),
+ MTK_PIN_DRV_GRP(243, 0xff0, 8, 0),
+ MTK_PIN_DRV_GRP(248, 0xf00, 0, 0),
+ MTK_PIN_DRV_GRP(249, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(250, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(251, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(252, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(253, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(254, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(255, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(256, 0xfc0, 0, 2),
+ MTK_PIN_DRV_GRP(257, 0xce0, 0, 2),
+ MTK_PIN_DRV_GRP(258, 0xcb0, 0, 2),
+ MTK_PIN_DRV_GRP(259, 0xc90, 0, 2),
+ MTK_PIN_DRV_GRP(260, 0x3a0, 0, 2),
+ MTK_PIN_DRV_GRP(261, 0xd50, 0, 2),
+ MTK_PIN_DRV_GRP(262, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(263, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(264, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(265, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(266, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(267, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(268, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(269, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(270, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(271, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(272, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(273, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(274, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(275, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(276, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(277, 0xf00, 8, 0),
+ MTK_PIN_DRV_GRP(278, 0xf70, 8, 1),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt2701_spec_pupd[] = {
+ MTK_PIN_PUPD_SPEC_SR(111, 0xd00, 12, 13, 14), /* ms0 data7 */
+ MTK_PIN_PUPD_SPEC_SR(112, 0xd00, 8, 9, 10), /* ms0 data6 */
+ MTK_PIN_PUPD_SPEC_SR(113, 0xd00, 4, 5, 6), /* ms0 data5 */
+ MTK_PIN_PUPD_SPEC_SR(114, 0xd00, 0, 1, 2), /* ms0 data4 */
+ MTK_PIN_PUPD_SPEC_SR(115, 0xd10, 0, 1, 2), /* ms0 rstb */
+ MTK_PIN_PUPD_SPEC_SR(116, 0xcd0, 8, 9, 10), /* ms0 cmd */
+ MTK_PIN_PUPD_SPEC_SR(117, 0xcc0, 8, 9, 10), /* ms0 clk */
+ MTK_PIN_PUPD_SPEC_SR(118, 0xcf0, 12, 13, 14), /* ms0 data3 */
+ MTK_PIN_PUPD_SPEC_SR(119, 0xcf0, 8, 9, 10), /* ms0 data2 */
+ MTK_PIN_PUPD_SPEC_SR(120, 0xcf0, 4, 5, 6), /* ms0 data1 */
+ MTK_PIN_PUPD_SPEC_SR(121, 0xcf0, 0, 1, 2), /* ms0 data0 */
+
+ MTK_PIN_PUPD_SPEC_SR(105, 0xd40, 8, 9, 10), /* ms1 cmd */
+ MTK_PIN_PUPD_SPEC_SR(106, 0xd30, 8, 9, 10), /* ms1 clk */
+ MTK_PIN_PUPD_SPEC_SR(107, 0xd60, 0, 1, 2), /* ms1 dat0 */
+ MTK_PIN_PUPD_SPEC_SR(108, 0xd60, 10, 9, 8), /* ms1 dat1 */
+ MTK_PIN_PUPD_SPEC_SR(109, 0xd60, 4, 5, 6), /* ms1 dat2 */
+ MTK_PIN_PUPD_SPEC_SR(110, 0xc60, 12, 13, 14), /* ms1 dat3 */
+
+ MTK_PIN_PUPD_SPEC_SR(85, 0xda0, 8, 9, 10), /* ms2 cmd */
+ MTK_PIN_PUPD_SPEC_SR(86, 0xd90, 8, 9, 10), /* ms2 clk */
+ MTK_PIN_PUPD_SPEC_SR(87, 0xdc0, 0, 1, 2), /* ms2 dat0 */
+ MTK_PIN_PUPD_SPEC_SR(88, 0xdc0, 10, 9, 8), /* ms2 dat1 */
+ MTK_PIN_PUPD_SPEC_SR(89, 0xdc0, 4, 5, 6), /* ms2 dat2 */
+ MTK_PIN_PUPD_SPEC_SR(90, 0xdc0, 12, 13, 14), /* ms2 dat3 */
+
+ MTK_PIN_PUPD_SPEC_SR(249, 0x140, 0, 1, 2), /* ms0e rstb */
+ MTK_PIN_PUPD_SPEC_SR(250, 0x130, 12, 13, 14), /* ms0e dat7 */
+ MTK_PIN_PUPD_SPEC_SR(251, 0x130, 8, 9, 10), /* ms0e dat6 */
+ MTK_PIN_PUPD_SPEC_SR(252, 0x130, 4, 5, 6), /* ms0e dat5 */
+ MTK_PIN_PUPD_SPEC_SR(253, 0x130, 0, 1, 2), /* ms0e dat4 */
+ MTK_PIN_PUPD_SPEC_SR(254, 0xf40, 12, 13, 14), /* ms0e dat3 */
+ MTK_PIN_PUPD_SPEC_SR(255, 0xf40, 8, 9, 10), /* ms0e dat2 */
+ MTK_PIN_PUPD_SPEC_SR(256, 0xf40, 4, 5, 6), /* ms0e dat1 */
+ MTK_PIN_PUPD_SPEC_SR(257, 0xf40, 0, 1, 2), /* ms0e dat0 */
+ MTK_PIN_PUPD_SPEC_SR(258, 0xcb0, 8, 9, 10), /* ms0e cmd */
+ MTK_PIN_PUPD_SPEC_SR(259, 0xc90, 8, 9, 10), /* ms0e clk */
+ MTK_PIN_PUPD_SPEC_SR(261, 0x140, 8, 9, 10), /* ms1 ins */
+};
+
+static int mt2701_spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ return mtk_pctrl_spec_pull_set_samereg(regmap, mt2701_spec_pupd,
+ ARRAY_SIZE(mt2701_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt2701_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0xb20, 0),
+ MTK_PIN_IES_SMT_SPEC(7, 9, 0xb20, 1),
+ MTK_PIN_IES_SMT_SPEC(10, 13, 0xb30, 3),
+ MTK_PIN_IES_SMT_SPEC(14, 15, 0xb30, 13),
+ MTK_PIN_IES_SMT_SPEC(16, 17, 0xb40, 7),
+ MTK_PIN_IES_SMT_SPEC(18, 21, 0xb40, 13),
+ MTK_PIN_IES_SMT_SPEC(22, 26, 0xb40, 13),
+ MTK_PIN_IES_SMT_SPEC(27, 29, 0xb40, 13),
+ MTK_PIN_IES_SMT_SPEC(30, 32, 0xb40, 7),
+ MTK_PIN_IES_SMT_SPEC(33, 37, 0xb40, 13),
+ MTK_PIN_IES_SMT_SPEC(38, 38, 0xb20, 13),
+ MTK_PIN_IES_SMT_SPEC(39, 42, 0xb40, 13),
+ MTK_PIN_IES_SMT_SPEC(43, 45, 0xb20, 10),
+ MTK_PIN_IES_SMT_SPEC(47, 48, 0xb20, 11),
+ MTK_PIN_IES_SMT_SPEC(49, 49, 0xb20, 12),
+ MTK_PIN_IES_SMT_SPEC(50, 52, 0xb20, 13),
+ MTK_PIN_IES_SMT_SPEC(53, 56, 0xb20, 14),
+ MTK_PIN_IES_SMT_SPEC(57, 58, 0xb20, 15),
+ MTK_PIN_IES_SMT_SPEC(59, 59, 0xb30, 10),
+ MTK_PIN_IES_SMT_SPEC(60, 62, 0xb30, 0),
+ MTK_PIN_IES_SMT_SPEC(63, 65, 0xb30, 1),
+ MTK_PIN_IES_SMT_SPEC(66, 71, 0xb30, 2),
+ MTK_PIN_IES_SMT_SPEC(72, 74, 0xb20, 12),
+ MTK_PIN_IES_SMT_SPEC(75, 76, 0xb30, 3),
+ MTK_PIN_IES_SMT_SPEC(77, 78, 0xb30, 4),
+ MTK_PIN_IES_SMT_SPEC(79, 82, 0xb30, 5),
+ MTK_PIN_IES_SMT_SPEC(83, 84, 0xb30, 2),
+ MTK_PIN_IES_SMT_SPEC(85, 85, 0xda0, 4),
+ MTK_PIN_IES_SMT_SPEC(86, 86, 0xd90, 4),
+ MTK_PIN_IES_SMT_SPEC(87, 90, 0xdb0, 4),
+ MTK_PIN_IES_SMT_SPEC(101, 104, 0xb30, 6),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0xd40, 4),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0xd30, 4),
+ MTK_PIN_IES_SMT_SPEC(107, 110, 0xd50, 4),
+ MTK_PIN_IES_SMT_SPEC(111, 115, 0xce0, 4),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0xcd0, 4),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0xcc0, 4),
+ MTK_PIN_IES_SMT_SPEC(118, 121, 0xce0, 4),
+ MTK_PIN_IES_SMT_SPEC(122, 125, 0xb30, 7),
+ MTK_PIN_IES_SMT_SPEC(126, 126, 0xb20, 12),
+ MTK_PIN_IES_SMT_SPEC(127, 142, 0xb30, 9),
+ MTK_PIN_IES_SMT_SPEC(143, 160, 0xb30, 10),
+ MTK_PIN_IES_SMT_SPEC(161, 168, 0xb30, 12),
+ MTK_PIN_IES_SMT_SPEC(169, 183, 0xb30, 10),
+ MTK_PIN_IES_SMT_SPEC(184, 186, 0xb30, 9),
+ MTK_PIN_IES_SMT_SPEC(187, 187, 0xb30, 14),
+ MTK_PIN_IES_SMT_SPEC(188, 188, 0xb20, 13),
+ MTK_PIN_IES_SMT_SPEC(189, 193, 0xb30, 15),
+ MTK_PIN_IES_SMT_SPEC(194, 198, 0xb40, 0),
+ MTK_PIN_IES_SMT_SPEC(199, 199, 0xb20, 1),
+ MTK_PIN_IES_SMT_SPEC(200, 202, 0xb40, 1),
+ MTK_PIN_IES_SMT_SPEC(203, 207, 0xb40, 2),
+ MTK_PIN_IES_SMT_SPEC(208, 209, 0xb40, 3),
+ MTK_PIN_IES_SMT_SPEC(210, 210, 0xb40, 4),
+ MTK_PIN_IES_SMT_SPEC(211, 235, 0xb40, 5),
+ MTK_PIN_IES_SMT_SPEC(236, 241, 0xb40, 6),
+ MTK_PIN_IES_SMT_SPEC(242, 243, 0xb40, 7),
+ MTK_PIN_IES_SMT_SPEC(244, 247, 0xb40, 8),
+ MTK_PIN_IES_SMT_SPEC(248, 248, 0xb40, 9),
+ MTK_PIN_IES_SMT_SPEC(249, 257, 0xfc0, 4),
+ MTK_PIN_IES_SMT_SPEC(258, 258, 0xcb0, 4),
+ MTK_PIN_IES_SMT_SPEC(259, 259, 0xc90, 4),
+ MTK_PIN_IES_SMT_SPEC(260, 260, 0x3a0, 4),
+ MTK_PIN_IES_SMT_SPEC(261, 261, 0xd50, 4),
+ MTK_PIN_IES_SMT_SPEC(262, 277, 0xb40, 12),
+ MTK_PIN_IES_SMT_SPEC(278, 278, 0xb40, 13),
+};
+
+static const struct mtk_pin_ies_smt_set mt2701_smt_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, 0xb50, 0),
+ MTK_PIN_IES_SMT_SPEC(7, 9, 0xb50, 1),
+ MTK_PIN_IES_SMT_SPEC(10, 13, 0xb60, 3),
+ MTK_PIN_IES_SMT_SPEC(14, 15, 0xb60, 13),
+ MTK_PIN_IES_SMT_SPEC(16, 17, 0xb70, 7),
+ MTK_PIN_IES_SMT_SPEC(18, 21, 0xb70, 13),
+ MTK_PIN_IES_SMT_SPEC(22, 26, 0xb70, 13),
+ MTK_PIN_IES_SMT_SPEC(27, 29, 0xb70, 13),
+ MTK_PIN_IES_SMT_SPEC(30, 32, 0xb70, 7),
+ MTK_PIN_IES_SMT_SPEC(33, 37, 0xb70, 13),
+ MTK_PIN_IES_SMT_SPEC(38, 38, 0xb50, 13),
+ MTK_PIN_IES_SMT_SPEC(39, 42, 0xb70, 13),
+ MTK_PIN_IES_SMT_SPEC(43, 45, 0xb50, 10),
+ MTK_PIN_IES_SMT_SPEC(47, 48, 0xb50, 11),
+ MTK_PIN_IES_SMT_SPEC(49, 49, 0xb50, 12),
+ MTK_PIN_IES_SMT_SPEC(50, 52, 0xb50, 13),
+ MTK_PIN_IES_SMT_SPEC(53, 56, 0xb50, 14),
+ MTK_PIN_IES_SMT_SPEC(57, 58, 0xb50, 15),
+ MTK_PIN_IES_SMT_SPEC(59, 59, 0xb60, 10),
+ MTK_PIN_IES_SMT_SPEC(60, 62, 0xb60, 0),
+ MTK_PIN_IES_SMT_SPEC(63, 65, 0xb60, 1),
+ MTK_PIN_IES_SMT_SPEC(66, 71, 0xb60, 2),
+ MTK_PIN_IES_SMT_SPEC(72, 74, 0xb50, 12),
+ MTK_PIN_IES_SMT_SPEC(75, 76, 0xb60, 3),
+ MTK_PIN_IES_SMT_SPEC(77, 78, 0xb60, 4),
+ MTK_PIN_IES_SMT_SPEC(79, 82, 0xb60, 5),
+ MTK_PIN_IES_SMT_SPEC(83, 84, 0xb60, 2),
+ MTK_PIN_IES_SMT_SPEC(85, 85, 0xda0, 11),
+ MTK_PIN_IES_SMT_SPEC(86, 86, 0xd90, 11),
+ MTK_PIN_IES_SMT_SPEC(87, 87, 0xdc0, 3),
+ MTK_PIN_IES_SMT_SPEC(88, 88, 0xdc0, 7),
+ MTK_PIN_IES_SMT_SPEC(89, 89, 0xdc0, 11),
+ MTK_PIN_IES_SMT_SPEC(90, 90, 0xdc0, 15),
+ MTK_PIN_IES_SMT_SPEC(101, 104, 0xb60, 6),
+ MTK_PIN_IES_SMT_SPEC(105, 105, 0xd40, 11),
+ MTK_PIN_IES_SMT_SPEC(106, 106, 0xd30, 11),
+ MTK_PIN_IES_SMT_SPEC(107, 107, 0xd60, 3),
+ MTK_PIN_IES_SMT_SPEC(108, 108, 0xd60, 7),
+ MTK_PIN_IES_SMT_SPEC(109, 109, 0xd60, 11),
+ MTK_PIN_IES_SMT_SPEC(110, 110, 0xd60, 15),
+ MTK_PIN_IES_SMT_SPEC(111, 111, 0xd00, 15),
+ MTK_PIN_IES_SMT_SPEC(112, 112, 0xd00, 11),
+ MTK_PIN_IES_SMT_SPEC(113, 113, 0xd00, 7),
+ MTK_PIN_IES_SMT_SPEC(114, 114, 0xd00, 3),
+ MTK_PIN_IES_SMT_SPEC(115, 115, 0xd10, 3),
+ MTK_PIN_IES_SMT_SPEC(116, 116, 0xcd0, 11),
+ MTK_PIN_IES_SMT_SPEC(117, 117, 0xcc0, 11),
+ MTK_PIN_IES_SMT_SPEC(118, 118, 0xcf0, 15),
+ MTK_PIN_IES_SMT_SPEC(119, 119, 0xcf0, 11),
+ MTK_PIN_IES_SMT_SPEC(120, 120, 0xcf0, 7),
+ MTK_PIN_IES_SMT_SPEC(121, 121, 0xcf0, 3),
+ MTK_PIN_IES_SMT_SPEC(122, 125, 0xb60, 7),
+ MTK_PIN_IES_SMT_SPEC(126, 126, 0xb50, 12),
+ MTK_PIN_IES_SMT_SPEC(127, 142, 0xb60, 9),
+ MTK_PIN_IES_SMT_SPEC(143, 160, 0xb60, 10),
+ MTK_PIN_IES_SMT_SPEC(161, 168, 0xb60, 12),
+ MTK_PIN_IES_SMT_SPEC(169, 183, 0xb60, 10),
+ MTK_PIN_IES_SMT_SPEC(184, 186, 0xb60, 9),
+ MTK_PIN_IES_SMT_SPEC(187, 187, 0xb60, 14),
+ MTK_PIN_IES_SMT_SPEC(188, 188, 0xb50, 13),
+ MTK_PIN_IES_SMT_SPEC(189, 193, 0xb60, 15),
+ MTK_PIN_IES_SMT_SPEC(194, 198, 0xb70, 0),
+ MTK_PIN_IES_SMT_SPEC(199, 199, 0xb50, 1),
+ MTK_PIN_IES_SMT_SPEC(200, 202, 0xb70, 1),
+ MTK_PIN_IES_SMT_SPEC(203, 207, 0xb70, 2),
+ MTK_PIN_IES_SMT_SPEC(208, 209, 0xb70, 3),
+ MTK_PIN_IES_SMT_SPEC(210, 210, 0xb70, 4),
+ MTK_PIN_IES_SMT_SPEC(211, 235, 0xb70, 5),
+ MTK_PIN_IES_SMT_SPEC(236, 241, 0xb70, 6),
+ MTK_PIN_IES_SMT_SPEC(242, 243, 0xb70, 7),
+ MTK_PIN_IES_SMT_SPEC(244, 247, 0xb70, 8),
+ MTK_PIN_IES_SMT_SPEC(248, 248, 0xb70, 9),
+ MTK_PIN_IES_SMT_SPEC(249, 249, 0x140, 3),
+ MTK_PIN_IES_SMT_SPEC(250, 250, 0x130, 15),
+ MTK_PIN_IES_SMT_SPEC(251, 251, 0x130, 11),
+ MTK_PIN_IES_SMT_SPEC(252, 252, 0x130, 7),
+ MTK_PIN_IES_SMT_SPEC(253, 253, 0x130, 3),
+ MTK_PIN_IES_SMT_SPEC(254, 254, 0xf40, 15),
+ MTK_PIN_IES_SMT_SPEC(255, 255, 0xf40, 11),
+ MTK_PIN_IES_SMT_SPEC(256, 256, 0xf40, 7),
+ MTK_PIN_IES_SMT_SPEC(257, 257, 0xf40, 3),
+ MTK_PIN_IES_SMT_SPEC(258, 258, 0xcb0, 11),
+ MTK_PIN_IES_SMT_SPEC(259, 259, 0xc90, 11),
+ MTK_PIN_IES_SMT_SPEC(260, 260, 0x3a0, 11),
+ MTK_PIN_IES_SMT_SPEC(261, 261, 0x0b0, 3),
+ MTK_PIN_IES_SMT_SPEC(262, 277, 0xb70, 12),
+ MTK_PIN_IES_SMT_SPEC(278, 278, 0xb70, 13),
+};
+
+static int mt2701_ies_smt_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, int value, enum pin_config_param arg)
+{
+ if (arg == PIN_CONFIG_INPUT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt2701_ies_set,
+ ARRAY_SIZE(mt2701_ies_set), pin, align, value);
+ else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt2701_smt_set,
+ ARRAY_SIZE(mt2701_smt_set), pin, align, value);
+ return -EINVAL;
+}
+
+static const struct mtk_spec_pinmux_set mt2701_spec_pinmux[] = {
+ MTK_PINMUX_SPEC(22, 0xb10, 3),
+ MTK_PINMUX_SPEC(23, 0xb10, 4),
+ MTK_PINMUX_SPEC(24, 0xb10, 5),
+ MTK_PINMUX_SPEC(29, 0xb10, 9),
+ MTK_PINMUX_SPEC(208, 0xb10, 7),
+ MTK_PINMUX_SPEC(209, 0xb10, 8),
+ MTK_PINMUX_SPEC(203, 0xf20, 0),
+ MTK_PINMUX_SPEC(204, 0xf20, 1),
+ MTK_PINMUX_SPEC(249, 0xef0, 0),
+ MTK_PINMUX_SPEC(250, 0xef0, 0),
+ MTK_PINMUX_SPEC(251, 0xef0, 0),
+ MTK_PINMUX_SPEC(252, 0xef0, 0),
+ MTK_PINMUX_SPEC(253, 0xef0, 0),
+ MTK_PINMUX_SPEC(254, 0xef0, 0),
+ MTK_PINMUX_SPEC(255, 0xef0, 0),
+ MTK_PINMUX_SPEC(256, 0xef0, 0),
+ MTK_PINMUX_SPEC(257, 0xef0, 0),
+ MTK_PINMUX_SPEC(258, 0xef0, 0),
+ MTK_PINMUX_SPEC(259, 0xef0, 0),
+ MTK_PINMUX_SPEC(260, 0xef0, 0),
+};
+
+static void mt2701_spec_pinmux_set(struct regmap *reg, unsigned int pin,
+ unsigned int mode)
+{
+ unsigned int i, value, mask;
+ unsigned int info_num = ARRAY_SIZE(mt2701_spec_pinmux);
+ unsigned int spec_flag;
+
+ for (i = 0; i < info_num; i++) {
+ if (pin == mt2701_spec_pinmux[i].pin)
+ break;
+ }
+
+ if (i == info_num)
+ return;
+
+ spec_flag = (mode >> 3);
+ mask = BIT(mt2701_spec_pinmux[i].bit);
+ if (!spec_flag)
+ value = mask;
+ else
+ value = 0;
+ regmap_update_bits(reg, mt2701_spec_pinmux[i].offset, mask, value);
+}
+
+static void mt2701_spec_dir_set(unsigned int *reg_addr, unsigned int pin)
+{
+ if (pin > 175)
+ *reg_addr += 0x10;
+}
+
+static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = {
+ .pins = mtk_pins_mt2701,
+ .npins = ARRAY_SIZE(mtk_pins_mt2701),
+ .grp_desc = mt2701_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt2701_drv_grp),
+ .pin_drv_grp = mt2701_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt2701_pin_drv),
+ .spec_pull_set = mt2701_spec_pull_set,
+ .spec_ies_smt_set = mt2701_ies_smt_set,
+ .spec_pinmux_set = mt2701_spec_pinmux_set,
+ .spec_dir_set = mt2701_spec_dir_set,
+ .dir_offset = 0x0000,
+ .pullen_offset = 0x0150,
+ .pullsel_offset = 0x0280,
+ .dout_offset = 0x0500,
+ .din_offset = 0x0630,
+ .pinmux_offset = 0x0760,
+ .type1_start = 280,
+ .type1_end = 280,
+ .port_shf = 4,
+ .port_mask = 0x1f,
+ .port_align = 4,
+ .eint_offsets = {
+ .name = "mt2701_eint",
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+ .port_mask = 6,
+ .ports = 6,
+ },
+ .ap_num = 169,
+ .db_cnt = 16,
+};
+
+static int mt2701_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt2701_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt2701_pctrl_match[] = {
+ { .compatible = "mediatek,mt2701-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt2701_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt2701_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt2701-pinctrl",
+ .of_match_table = mt2701_pctrl_match,
+ .pm = &mtk_eint_pm_ops,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6397.c b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
index f9751ae28e32..6eccb85c02cd 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt6397.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt6397.c
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -55,7 +55,6 @@ static const struct of_device_id mt6397_pctrl_match[] = {
{ .compatible = "mediatek,mt6397-pinctrl", },
{ }
};
-MODULE_DEVICE_TABLE(of, mt6397_pctrl_match);
static struct platform_driver mtk_pinctrl_driver = {
.probe = mt6397_pinctrl_probe,
@@ -69,9 +68,4 @@ static int __init mtk_pinctrl_init(void)
{
return platform_driver_register(&mtk_pinctrl_driver);
}
-
-module_init(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek MT6397 Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
+device_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
new file mode 100644
index 000000000000..67895f8234e3
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
+
+#include "pinctrl-mtk-common.h"
+#include "pinctrl-mtk-mt7623.h"
+
+static const struct mtk_drv_group_desc mt7623_drv_grp[] = {
+ /* 0E4E8SR 4/8/12/16 */
+ MTK_DRV_GRP(4, 16, 1, 2, 4),
+ /* 0E2E4SR 2/4/6/8 */
+ MTK_DRV_GRP(2, 8, 1, 2, 2),
+ /* E8E4E2 2/4/6/8/10/12/14/16 */
+ MTK_DRV_GRP(2, 16, 0, 2, 2)
+};
+
+#define DRV_SEL0 0xf50
+#define DRV_SEL1 0xf60
+#define DRV_SEL2 0xf70
+#define DRV_SEL3 0xf80
+#define DRV_SEL4 0xf90
+#define DRV_SEL5 0xfa0
+#define DRV_SEL6 0xfb0
+#define DRV_SEL7 0xfe0
+#define DRV_SEL8 0xfd0
+#define DRV_SEL9 0xff0
+#define DRV_SEL10 0xf00
+
+#define MSDC0_CTRL0 0xcc0
+#define MSDC0_CTRL1 0xcd0
+#define MSDC0_CTRL2 0xce0
+#define MSDC0_CTRL3 0xcf0
+#define MSDC0_CTRL4 0xd00
+#define MSDC0_CTRL5 0xd10
+#define MSDC0_CTRL6 0xd20
+#define MSDC1_CTRL0 0xd30
+#define MSDC1_CTRL1 0xd40
+#define MSDC1_CTRL2 0xd50
+#define MSDC1_CTRL3 0xd60
+#define MSDC1_CTRL4 0xd70
+#define MSDC1_CTRL5 0xd80
+#define MSDC1_CTRL6 0xd90
+
+#define IES_EN0 0xb20
+#define IES_EN1 0xb30
+#define IES_EN2 0xb40
+
+#define SMT_EN0 0xb50
+#define SMT_EN1 0xb60
+#define SMT_EN2 0xb70
+
+static const struct mtk_pin_drv_grp mt7623_pin_drv[] = {
+ MTK_PIN_DRV_GRP(0, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(1, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(2, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(3, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(4, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(5, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(6, DRV_SEL0, 0, 1),
+ MTK_PIN_DRV_GRP(7, DRV_SEL0, 4, 1),
+ MTK_PIN_DRV_GRP(8, DRV_SEL0, 4, 1),
+ MTK_PIN_DRV_GRP(9, DRV_SEL0, 4, 1),
+ MTK_PIN_DRV_GRP(10, DRV_SEL0, 8, 1),
+ MTK_PIN_DRV_GRP(11, DRV_SEL0, 8, 1),
+ MTK_PIN_DRV_GRP(12, DRV_SEL0, 8, 1),
+ MTK_PIN_DRV_GRP(13, DRV_SEL0, 8, 1),
+ MTK_PIN_DRV_GRP(14, DRV_SEL0, 12, 0),
+ MTK_PIN_DRV_GRP(15, DRV_SEL0, 12, 0),
+ MTK_PIN_DRV_GRP(18, DRV_SEL1, 4, 0),
+ MTK_PIN_DRV_GRP(19, DRV_SEL1, 4, 0),
+ MTK_PIN_DRV_GRP(20, DRV_SEL1, 4, 0),
+ MTK_PIN_DRV_GRP(21, DRV_SEL1, 4, 0),
+ MTK_PIN_DRV_GRP(22, DRV_SEL1, 8, 0),
+ MTK_PIN_DRV_GRP(23, DRV_SEL1, 8, 0),
+ MTK_PIN_DRV_GRP(24, DRV_SEL1, 8, 0),
+ MTK_PIN_DRV_GRP(25, DRV_SEL1, 8, 0),
+ MTK_PIN_DRV_GRP(26, DRV_SEL1, 8, 0),
+ MTK_PIN_DRV_GRP(27, DRV_SEL1, 12, 0),
+ MTK_PIN_DRV_GRP(28, DRV_SEL1, 12, 0),
+ MTK_PIN_DRV_GRP(29, DRV_SEL1, 12, 0),
+ MTK_PIN_DRV_GRP(33, DRV_SEL2, 0, 0),
+ MTK_PIN_DRV_GRP(34, DRV_SEL2, 0, 0),
+ MTK_PIN_DRV_GRP(35, DRV_SEL2, 0, 0),
+ MTK_PIN_DRV_GRP(36, DRV_SEL2, 0, 0),
+ MTK_PIN_DRV_GRP(37, DRV_SEL2, 0, 0),
+ MTK_PIN_DRV_GRP(39, DRV_SEL2, 8, 1),
+ MTK_PIN_DRV_GRP(40, DRV_SEL2, 8, 1),
+ MTK_PIN_DRV_GRP(41, DRV_SEL2, 8, 1),
+ MTK_PIN_DRV_GRP(42, DRV_SEL2, 8, 1),
+ MTK_PIN_DRV_GRP(43, DRV_SEL2, 12, 0),
+ MTK_PIN_DRV_GRP(44, DRV_SEL2, 12, 0),
+ MTK_PIN_DRV_GRP(45, DRV_SEL2, 12, 0),
+ MTK_PIN_DRV_GRP(47, DRV_SEL3, 0, 0),
+ MTK_PIN_DRV_GRP(48, DRV_SEL3, 0, 0),
+ MTK_PIN_DRV_GRP(49, DRV_SEL3, 4, 0),
+ MTK_PIN_DRV_GRP(53, DRV_SEL3, 12, 0),
+ MTK_PIN_DRV_GRP(54, DRV_SEL3, 12, 0),
+ MTK_PIN_DRV_GRP(55, DRV_SEL3, 12, 0),
+ MTK_PIN_DRV_GRP(56, DRV_SEL3, 12, 0),
+ MTK_PIN_DRV_GRP(60, DRV_SEL4, 8, 1),
+ MTK_PIN_DRV_GRP(61, DRV_SEL4, 8, 1),
+ MTK_PIN_DRV_GRP(62, DRV_SEL4, 8, 1),
+ MTK_PIN_DRV_GRP(63, DRV_SEL4, 12, 1),
+ MTK_PIN_DRV_GRP(64, DRV_SEL4, 12, 1),
+ MTK_PIN_DRV_GRP(65, DRV_SEL4, 12, 1),
+ MTK_PIN_DRV_GRP(66, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(67, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(68, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(69, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(70, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(71, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(72, DRV_SEL3, 4, 0),
+ MTK_PIN_DRV_GRP(73, DRV_SEL3, 4, 0),
+ MTK_PIN_DRV_GRP(74, DRV_SEL3, 4, 0),
+ MTK_PIN_DRV_GRP(83, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(84, DRV_SEL5, 0, 1),
+ MTK_PIN_DRV_GRP(105, MSDC1_CTRL1, 0, 1),
+ MTK_PIN_DRV_GRP(106, MSDC1_CTRL0, 0, 1),
+ MTK_PIN_DRV_GRP(107, MSDC1_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(108, MSDC1_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(109, MSDC1_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(110, MSDC1_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(111, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(112, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(113, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(114, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(115, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(116, MSDC0_CTRL1, 0, 1),
+ MTK_PIN_DRV_GRP(117, MSDC0_CTRL0, 0, 1),
+ MTK_PIN_DRV_GRP(118, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(119, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(120, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(121, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(126, DRV_SEL3, 4, 0),
+ MTK_PIN_DRV_GRP(199, DRV_SEL0, 4, 1),
+ MTK_PIN_DRV_GRP(200, DRV_SEL8, 0, 0),
+ MTK_PIN_DRV_GRP(201, DRV_SEL8, 0, 0),
+ MTK_PIN_DRV_GRP(203, DRV_SEL8, 4, 0),
+ MTK_PIN_DRV_GRP(204, DRV_SEL8, 4, 0),
+ MTK_PIN_DRV_GRP(205, DRV_SEL8, 4, 0),
+ MTK_PIN_DRV_GRP(206, DRV_SEL8, 4, 0),
+ MTK_PIN_DRV_GRP(207, DRV_SEL8, 4, 0),
+ MTK_PIN_DRV_GRP(208, DRV_SEL8, 8, 0),
+ MTK_PIN_DRV_GRP(209, DRV_SEL8, 8, 0),
+ MTK_PIN_DRV_GRP(236, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(237, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(238, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(239, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(240, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(241, DRV_SEL9, 4, 0),
+ MTK_PIN_DRV_GRP(242, DRV_SEL9, 8, 0),
+ MTK_PIN_DRV_GRP(243, DRV_SEL9, 8, 0),
+ MTK_PIN_DRV_GRP(257, MSDC0_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(261, MSDC1_CTRL2, 0, 1),
+ MTK_PIN_DRV_GRP(262, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(263, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(264, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(265, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(266, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(267, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(268, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(269, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(270, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(271, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(272, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(274, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(275, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(276, DRV_SEL10, 8, 0),
+ MTK_PIN_DRV_GRP(278, DRV_SEL2, 8, 1),
+};
+
+static const struct mtk_pin_spec_pupd_set_samereg mt7623_spec_pupd[] = {
+ MTK_PIN_PUPD_SPEC_SR(105, MSDC1_CTRL1, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(106, MSDC1_CTRL0, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(107, MSDC1_CTRL3, 0, 1, 2),
+ MTK_PIN_PUPD_SPEC_SR(108, MSDC1_CTRL3, 4, 5, 6),
+ MTK_PIN_PUPD_SPEC_SR(109, MSDC1_CTRL3, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(110, MSDC1_CTRL3, 12, 13, 14),
+ MTK_PIN_PUPD_SPEC_SR(111, MSDC0_CTRL4, 12, 13, 14),
+ MTK_PIN_PUPD_SPEC_SR(112, MSDC0_CTRL4, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(113, MSDC0_CTRL4, 4, 5, 6),
+ MTK_PIN_PUPD_SPEC_SR(114, MSDC0_CTRL4, 0, 1, 2),
+ MTK_PIN_PUPD_SPEC_SR(115, MSDC0_CTRL5, 0, 1, 2),
+ MTK_PIN_PUPD_SPEC_SR(116, MSDC0_CTRL1, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(117, MSDC0_CTRL0, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(118, MSDC0_CTRL3, 12, 13, 14),
+ MTK_PIN_PUPD_SPEC_SR(119, MSDC0_CTRL3, 8, 9, 10),
+ MTK_PIN_PUPD_SPEC_SR(120, MSDC0_CTRL3, 4, 5, 6),
+ MTK_PIN_PUPD_SPEC_SR(121, MSDC0_CTRL3, 0, 1, 2),
+};
+
+static int mt7623_spec_pull_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, bool isup, unsigned int r1r0)
+{
+ return mtk_pctrl_spec_pull_set_samereg(regmap, mt7623_spec_pupd,
+ ARRAY_SIZE(mt7623_spec_pupd), pin, align, isup, r1r0);
+}
+
+static const struct mtk_pin_ies_smt_set mt7623_ies_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, IES_EN0, 0),
+ MTK_PIN_IES_SMT_SPEC(7, 9, IES_EN0, 1),
+ MTK_PIN_IES_SMT_SPEC(10, 13, IES_EN0, 2),
+ MTK_PIN_IES_SMT_SPEC(14, 15, IES_EN0, 3),
+ MTK_PIN_IES_SMT_SPEC(18, 21, IES_EN0, 5),
+ MTK_PIN_IES_SMT_SPEC(22, 26, IES_EN0, 6),
+ MTK_PIN_IES_SMT_SPEC(27, 29, IES_EN0, 7),
+ MTK_PIN_IES_SMT_SPEC(33, 37, IES_EN0, 8),
+ MTK_PIN_IES_SMT_SPEC(39, 42, IES_EN0, 9),
+ MTK_PIN_IES_SMT_SPEC(43, 45, IES_EN0, 10),
+ MTK_PIN_IES_SMT_SPEC(47, 48, IES_EN0, 11),
+ MTK_PIN_IES_SMT_SPEC(49, 49, IES_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(53, 56, IES_EN0, 14),
+ MTK_PIN_IES_SMT_SPEC(60, 62, IES_EN1, 0),
+ MTK_PIN_IES_SMT_SPEC(63, 65, IES_EN1, 1),
+ MTK_PIN_IES_SMT_SPEC(66, 71, IES_EN1, 2),
+ MTK_PIN_IES_SMT_SPEC(72, 74, IES_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(75, 76, IES_EN1, 3),
+ MTK_PIN_IES_SMT_SPEC(83, 84, IES_EN1, 2),
+ MTK_PIN_IES_SMT_SPEC(105, 121, MSDC1_CTRL1, 4),
+ MTK_PIN_IES_SMT_SPEC(122, 125, IES_EN1, 7),
+ MTK_PIN_IES_SMT_SPEC(126, 126, IES_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(199, 201, IES_EN0, 1),
+ MTK_PIN_IES_SMT_SPEC(203, 207, IES_EN2, 2),
+ MTK_PIN_IES_SMT_SPEC(208, 209, IES_EN2, 3),
+ MTK_PIN_IES_SMT_SPEC(236, 241, IES_EN2, 6),
+ MTK_PIN_IES_SMT_SPEC(242, 243, IES_EN2, 7),
+ MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL2, 4),
+ MTK_PIN_IES_SMT_SPEC(262, 272, IES_EN2, 12),
+ MTK_PIN_IES_SMT_SPEC(274, 276, IES_EN2, 12),
+ MTK_PIN_IES_SMT_SPEC(278, 278, IES_EN2, 13),
+};
+
+static const struct mtk_pin_ies_smt_set mt7623_smt_set[] = {
+ MTK_PIN_IES_SMT_SPEC(0, 6, SMT_EN0, 0),
+ MTK_PIN_IES_SMT_SPEC(7, 9, SMT_EN0, 1),
+ MTK_PIN_IES_SMT_SPEC(10, 13, SMT_EN0, 2),
+ MTK_PIN_IES_SMT_SPEC(14, 15, SMT_EN0, 3),
+ MTK_PIN_IES_SMT_SPEC(18, 21, SMT_EN0, 5),
+ MTK_PIN_IES_SMT_SPEC(22, 26, SMT_EN0, 6),
+ MTK_PIN_IES_SMT_SPEC(27, 29, SMT_EN0, 7),
+ MTK_PIN_IES_SMT_SPEC(33, 37, SMT_EN0, 8),
+ MTK_PIN_IES_SMT_SPEC(39, 42, SMT_EN0, 9),
+ MTK_PIN_IES_SMT_SPEC(43, 45, SMT_EN0, 10),
+ MTK_PIN_IES_SMT_SPEC(47, 48, SMT_EN0, 11),
+ MTK_PIN_IES_SMT_SPEC(49, 49, SMT_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(53, 56, SMT_EN0, 14),
+ MTK_PIN_IES_SMT_SPEC(60, 62, SMT_EN1, 0),
+ MTK_PIN_IES_SMT_SPEC(63, 65, SMT_EN1, 1),
+ MTK_PIN_IES_SMT_SPEC(66, 71, SMT_EN1, 2),
+ MTK_PIN_IES_SMT_SPEC(72, 74, SMT_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(75, 76, SMT_EN1, 3),
+ MTK_PIN_IES_SMT_SPEC(83, 84, SMT_EN1, 2),
+ MTK_PIN_IES_SMT_SPEC(105, 106, MSDC1_CTRL1, 11),
+ MTK_PIN_IES_SMT_SPEC(107, 107, MSDC1_CTRL3, 3),
+ MTK_PIN_IES_SMT_SPEC(108, 108, MSDC1_CTRL3, 7),
+ MTK_PIN_IES_SMT_SPEC(109, 109, MSDC1_CTRL3, 11),
+ MTK_PIN_IES_SMT_SPEC(110, 111, MSDC1_CTRL3, 15),
+ MTK_PIN_IES_SMT_SPEC(112, 112, MSDC0_CTRL4, 11),
+ MTK_PIN_IES_SMT_SPEC(113, 113, MSDC0_CTRL4, 7),
+ MTK_PIN_IES_SMT_SPEC(114, 115, MSDC0_CTRL4, 3),
+ MTK_PIN_IES_SMT_SPEC(116, 117, MSDC0_CTRL1, 11),
+ MTK_PIN_IES_SMT_SPEC(118, 118, MSDC0_CTRL3, 15),
+ MTK_PIN_IES_SMT_SPEC(119, 119, MSDC0_CTRL3, 11),
+ MTK_PIN_IES_SMT_SPEC(120, 120, MSDC0_CTRL3, 7),
+ MTK_PIN_IES_SMT_SPEC(121, 121, MSDC0_CTRL3, 3),
+ MTK_PIN_IES_SMT_SPEC(122, 125, SMT_EN1, 7),
+ MTK_PIN_IES_SMT_SPEC(126, 126, SMT_EN0, 12),
+ MTK_PIN_IES_SMT_SPEC(199, 201, SMT_EN0, 1),
+ MTK_PIN_IES_SMT_SPEC(203, 207, SMT_EN2, 2),
+ MTK_PIN_IES_SMT_SPEC(208, 209, SMT_EN2, 3),
+ MTK_PIN_IES_SMT_SPEC(236, 241, SMT_EN2, 6),
+ MTK_PIN_IES_SMT_SPEC(242, 243, SMT_EN2, 7),
+ MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL6, 3),
+ MTK_PIN_IES_SMT_SPEC(262, 272, SMT_EN2, 12),
+ MTK_PIN_IES_SMT_SPEC(274, 276, SMT_EN2, 12),
+ MTK_PIN_IES_SMT_SPEC(278, 278, SMT_EN2, 13),
+};
+
+static int mt7623_ies_smt_set(struct regmap *regmap, unsigned int pin,
+ unsigned char align, int value, enum pin_config_param arg)
+{
+ if (arg == PIN_CONFIG_INPUT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_ies_set,
+ ARRAY_SIZE(mt7623_ies_set), pin, align, value);
+ else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
+ return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_smt_set,
+ ARRAY_SIZE(mt7623_smt_set), pin, align, value);
+ return -EINVAL;
+}
+
+static const struct mtk_pinctrl_devdata mt7623_pinctrl_data = {
+ .pins = mtk_pins_mt7623,
+ .npins = ARRAY_SIZE(mtk_pins_mt7623),
+ .grp_desc = mt7623_drv_grp,
+ .n_grp_cls = ARRAY_SIZE(mt7623_drv_grp),
+ .pin_drv_grp = mt7623_pin_drv,
+ .n_pin_drv_grps = ARRAY_SIZE(mt7623_pin_drv),
+ .spec_pull_set = mt7623_spec_pull_set,
+ .spec_ies_smt_set = mt7623_ies_smt_set,
+ .dir_offset = 0x0000,
+ .pullen_offset = 0x0150,
+ .pullsel_offset = 0x0280,
+ .dout_offset = 0x0500,
+ .din_offset = 0x0630,
+ .pinmux_offset = 0x0760,
+ .type1_start = 280,
+ .type1_end = 280,
+ .port_shf = 4,
+ .port_mask = 0x1f,
+ .port_align = 4,
+ .eint_offsets = {
+ .name = "mt7623_eint",
+ .stat = 0x000,
+ .ack = 0x040,
+ .mask = 0x080,
+ .mask_set = 0x0c0,
+ .mask_clr = 0x100,
+ .sens = 0x140,
+ .sens_set = 0x180,
+ .sens_clr = 0x1c0,
+ .soft = 0x200,
+ .soft_set = 0x240,
+ .soft_clr = 0x280,
+ .pol = 0x300,
+ .pol_set = 0x340,
+ .pol_clr = 0x380,
+ .dom_en = 0x400,
+ .dbnc_ctrl = 0x500,
+ .dbnc_set = 0x600,
+ .dbnc_clr = 0x700,
+ .port_mask = 6,
+ .ports = 6,
+ },
+ .ap_num = 169,
+ .db_cnt = 16,
+};
+
+static int mt7623_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_pctrl_init(pdev, &mt7623_pinctrl_data, NULL);
+}
+
+static const struct of_device_id mt7623_pctrl_match[] = {
+ { .compatible = "mediatek,mt7623-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt7623_pctrl_match);
+
+static struct platform_driver mtk_pinctrl_driver = {
+ .probe = mt7623_pinctrl_probe,
+ .driver = {
+ .name = "mediatek-mt7623-pinctrl",
+ .of_match_table = mt7623_pctrl_match,
+ },
+};
+
+static int __init mtk_pinctrl_init(void)
+{
+ return platform_driver_register(&mtk_pinctrl_driver);
+}
+
+arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
index 98e0bebfdf92..d76491574841 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c
@@ -13,7 +13,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -336,7 +336,6 @@ static const struct of_device_id mt8127_pctrl_match[] = {
{ .compatible = "mediatek,mt8127-pinctrl", },
{ }
};
-MODULE_DEVICE_TABLE(of, mt8127_pctrl_match);
static struct platform_driver mtk_pinctrl_driver = {
.probe = mt8127_pinctrl_probe,
@@ -350,9 +349,4 @@ static int __init mtk_pinctrl_init(void)
{
return platform_driver_register(&mtk_pinctrl_driver);
}
-
arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek MT8127 Pinctrl Driver");
-MODULE_AUTHOR("Yingjoe Chen <yingjoe.chen@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
index 1c153b860f36..d8c645f16f21 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -351,7 +351,6 @@ static const struct of_device_id mt8135_pctrl_match[] = {
},
{ }
};
-MODULE_DEVICE_TABLE(of, mt8135_pctrl_match);
static struct platform_driver mtk_pinctrl_driver = {
.probe = mt8135_pinctrl_probe,
@@ -365,9 +364,4 @@ static int __init mtk_pinctrl_init(void)
{
return platform_driver_register(&mtk_pinctrl_driver);
}
-
arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index a62514eb2129..8bfd427b9135 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -378,7 +378,6 @@ static const struct of_device_id mt8173_pctrl_match[] = {
},
{ }
};
-MODULE_DEVICE_TABLE(of, mt8173_pctrl_match);
static struct platform_driver mtk_pinctrl_driver = {
.probe = mt8173_pinctrl_probe,
@@ -393,9 +392,4 @@ static int __init mtk_pinctrl_init(void)
{
return platform_driver_register(&mtk_pinctrl_driver);
}
-
arch_initcall(mtk_pinctrl_init);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek Pinctrl Driver");
-MODULE_AUTHOR("Hongzhou Yang <hongzhou.yang@mediatek.com>");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index e96e86d2e745..2bbe6f7964a7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -43,10 +43,13 @@
#define MAX_GPIO_MODE_PER_REG 5
#define GPIO_MODE_BITS 3
+#define GPIO_MODE_PREFIX "GPIO"
static const char * const mtk_gpio_functions[] = {
"func0", "func1", "func2", "func3",
"func4", "func5", "func6", "func7",
+ "func8", "func9", "func10", "func11",
+ "func12", "func13", "func14", "func15",
};
/*
@@ -81,6 +84,9 @@ static int mtk_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
bit = BIT(offset & 0xf);
+ if (pctl->devdata->spec_dir_set)
+ pctl->devdata->spec_dir_set(&reg_addr, offset);
+
if (input)
/* Different SoC has different alignment offset. */
reg_addr = CLR_ADDR(reg_addr, pctl);
@@ -677,9 +683,14 @@ static int mtk_pmx_set_mode(struct pinctrl_dev *pctldev,
unsigned int mask = (1L << GPIO_MODE_BITS) - 1;
struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ if (pctl->devdata->spec_pinmux_set)
+ pctl->devdata->spec_pinmux_set(mtk_get_regmap(pctl, pin),
+ pin, mode);
+
reg_addr = ((pin / MAX_GPIO_MODE_PER_REG) << pctl->devdata->port_shf)
+ pctl->devdata->pinmux_offset;
+ mode &= mask;
bit = pin % MAX_GPIO_MODE_PER_REG;
mask <<= (GPIO_MODE_BITS * bit);
val = (mode << (GPIO_MODE_BITS * bit));
@@ -725,12 +736,48 @@ static int mtk_pmx_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int mtk_pmx_find_gpio_mode(struct mtk_pinctrl *pctl,
+ unsigned offset)
+{
+ const struct mtk_desc_pin *pin = pctl->devdata->pins + offset;
+ const struct mtk_desc_function *func = pin->functions;
+
+ while (func && func->name) {
+ if (!strncmp(func->name, GPIO_MODE_PREFIX,
+ sizeof(GPIO_MODE_PREFIX)-1))
+ return func->muxval;
+ func++;
+ }
+ return -EINVAL;
+}
+
+static int mtk_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ int muxval;
+ struct mtk_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ muxval = mtk_pmx_find_gpio_mode(pctl, offset);
+
+ if (muxval < 0) {
+ dev_err(pctl->dev, "invalid gpio pin %d.\n", offset);
+ return -EINVAL;
+ }
+
+ mtk_pmx_set_mode(pctldev, offset, muxval);
+ mtk_pconf_set_ies_smt(pctl, offset, 1, PIN_CONFIG_INPUT_ENABLE);
+
+ return 0;
+}
+
static const struct pinmux_ops mtk_pmx_ops = {
.get_functions_count = mtk_pmx_get_funcs_cnt,
.get_function_name = mtk_pmx_get_func_name,
.get_function_groups = mtk_pmx_get_func_groups,
.set_mux = mtk_pmx_set_mux,
.gpio_set_direction = mtk_pmx_gpio_set_direction,
+ .gpio_request_enable = mtk_pmx_gpio_request_enable,
};
static int mtk_gpio_direction_input(struct gpio_chip *chip,
@@ -756,6 +803,10 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset;
bit = BIT(offset & 0xf);
+
+ if (pctl->devdata->spec_dir_set)
+ pctl->devdata->spec_dir_set(&reg_addr, offset);
+
regmap_read(pctl->regmap1, reg_addr, &read_val);
return !(read_val & bit);
}
@@ -814,6 +865,10 @@ static int mtk_pinctrl_irq_request_resources(struct irq_data *d)
/* set mux to INT mode */
mtk_pmx_set_mode(pctl->pctl_dev, pin->pin.number, pin->eint.eintmux);
+ /* set gpio direction to input */
+ mtk_pmx_gpio_set_direction(pctl->pctl_dev, NULL, pin->pin.number, true);
+ /* set input-enable */
+ mtk_pconf_set_ies_smt(pctl, pin->pin.number, 1, PIN_CONFIG_INPUT_ENABLE);
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 55a534338931..8543bc478a1e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -209,7 +209,14 @@ struct mtk_eint_offsets {
* means when user set smt, input enable is set at the same time. So they
* also need special control. If special control is success, this should
* return 0, otherwise return non-zero value.
- *
+ * @spec_pinmux_set: In some cases, there are two pinmux functions share
+ * the same value in the same segment of pinmux control register. If user
+ * want to use one of the two functions, they need an extra bit setting to
+ * select the right one.
+ * @spec_dir_set: In very few SoCs, direction control registers are not
+ * arranged continuously, they may be cut to parts. So they need special
+ * dir setting.
+
* @dir_offset: The direction register offset.
* @pullen_offset: The pull-up/pull-down enable register offset.
* @pinmux_offset: The pinmux register offset.
@@ -234,6 +241,9 @@ struct mtk_pinctrl_devdata {
unsigned char align, bool isup, unsigned int arg);
int (*spec_ies_smt_set)(struct regmap *reg, unsigned int pin,
unsigned char align, int value, enum pin_config_param arg);
+ void (*spec_pinmux_set)(struct regmap *reg, unsigned int pin,
+ unsigned int mode);
+ void (*spec_dir_set)(unsigned int *reg_addr, unsigned int pin);
unsigned int dir_offset;
unsigned int ies_offset;
unsigned int smt_offset;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h
new file mode 100644
index 000000000000..f90642078c31
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h
@@ -0,0 +1,2323 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Biao Huang <biao.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT2701_H
+#define __PINCTRL_MTK_MT2701_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt2701[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "PWRAP_SPI0_MI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 148),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "PWRAP_SPIDO"),
+ MTK_FUNCTION(2, "PWRAP_SPIDI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "PWRAP_SPI0_MO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 149),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "PWRAP_SPIDI"),
+ MTK_FUNCTION(2, "PWRAP_SPIDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "PWRAP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 150),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "PWRAP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "PWRAP_SPI0_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 151),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "PWRAP_SPICK_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "PWRAP_SPI0_CSN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 152),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "PWRAP_SPICS_B_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "PWRAP_SPI0_CK2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 153),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "PWRAP_SPICK2_I"),
+ MTK_FUNCTION(5, "ANT_SEL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "PWRAP_SPI0_CSN2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 154),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "PWRAP_SPICS2_B_I"),
+ MTK_FUNCTION(5, "ANT_SEL0"),
+ MTK_FUNCTION(7, "DBG_MON_A[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "SPI1_CSN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 155),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI1_CS"),
+ MTK_FUNCTION(4, "KCOL0"),
+ MTK_FUNCTION(7, "DBG_MON_B[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "SPI1_MI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 156),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SPI1_MI"),
+ MTK_FUNCTION(2, "SPI1_MO"),
+ MTK_FUNCTION(4, "KCOL1"),
+ MTK_FUNCTION(7, "DBG_MON_B[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "SPI1_MO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 157),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SPI1_MO"),
+ MTK_FUNCTION(2, "SPI1_MI"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "KCOL2"),
+ MTK_FUNCTION(7, "DBG_MON_B[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "RTC32K_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 158),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "WATCHDOG"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 159),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "SRCLKENA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 160),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "SRCLKENA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "SRCLKENAI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 161),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "SRCLKENAI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "URXD2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 162),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "UTXD2"),
+ MTK_FUNCTION(5, "SRCCLKENAI2"),
+ MTK_FUNCTION(7, "DBG_MON_B[30]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "UTXD2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 163),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "URXD2"),
+ MTK_FUNCTION(7, "DBG_MON_B[31]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "I2S5_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 164),
+ MTK_FUNCTION(0, "GPIO16"),
+ MTK_FUNCTION(1, "I2S5_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(4, "ANT_SEL4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "I2S5_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 165),
+ MTK_FUNCTION(0, "GPIO17"),
+ MTK_FUNCTION(1, "I2S5_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(4, "ANT_SEL2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "PCM_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 166),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "PCM_CLK0"),
+ MTK_FUNCTION(2, "MRG_CLK"),
+ MTK_FUNCTION(4, "MM_TEST_CK"),
+ MTK_FUNCTION(5, "CONN_DSP_JCK"),
+ MTK_FUNCTION(6, "WCN_PCM_CLKO"),
+ MTK_FUNCTION(7, "DBG_MON_A[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "PCM_SYNC"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 167),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "PCM_SYNC"),
+ MTK_FUNCTION(2, "MRG_SYNC"),
+ MTK_FUNCTION(5, "CONN_DSP_JINTP"),
+ MTK_FUNCTION(6, "WCN_PCM_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "PCM_RX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "PCM_RX"),
+ MTK_FUNCTION(2, "MRG_RX"),
+ MTK_FUNCTION(3, "MRG_TX"),
+ MTK_FUNCTION(4, "PCM_TX"),
+ MTK_FUNCTION(5, "CONN_DSP_JDI"),
+ MTK_FUNCTION(6, "WCN_PCM_RX"),
+ MTK_FUNCTION(7, "DBG_MON_A[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "PCM_TX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "PCM_TX"),
+ MTK_FUNCTION(2, "MRG_TX"),
+ MTK_FUNCTION(3, "MRG_RX"),
+ MTK_FUNCTION(4, "PCM_RX"),
+ MTK_FUNCTION(5, "CONN_DSP_JMS"),
+ MTK_FUNCTION(6, "WCN_PCM_TX"),
+ MTK_FUNCTION(7, "DBG_MON_A[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "EINT0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 0),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(3, "KCOL3"),
+ MTK_FUNCTION(4, "CONN_DSP_JDO"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A[30]"),
+ MTK_FUNCTION(10, "PCIE0_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "EINT1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 1),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(3, "KCOL2"),
+ MTK_FUNCTION(4, "CONN_MCU_TDO"),
+ MTK_FUNCTION(5, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_A[29]"),
+ MTK_FUNCTION(10, "PCIE1_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "EINT2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 2),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(3, "KCOL1"),
+ MTK_FUNCTION(4, "CONN_MCU_DBGACK_N"),
+ MTK_FUNCTION(7, "DBG_MON_A[28]"),
+ MTK_FUNCTION(10, "PCIE2_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "EINT3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 3),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "URTS1"),
+ MTK_FUNCTION(3, "KCOL0"),
+ MTK_FUNCTION(4, "CONN_MCU_DBGI_N"),
+ MTK_FUNCTION(7, "DBG_MON_A[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "EINT4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 4),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "UCTS3"),
+ MTK_FUNCTION(2, "DRV_VBUS_P1"),
+ MTK_FUNCTION(3, "KROW3"),
+ MTK_FUNCTION(4, "CONN_MCU_TCK0"),
+ MTK_FUNCTION(5, "CONN_MCU_AICE_JCKC"),
+ MTK_FUNCTION(6, "PCIE2_WAKE_N"),
+ MTK_FUNCTION(7, "DBG_MON_A[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "EINT5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 5),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "URTS3"),
+ MTK_FUNCTION(2, "IDDIG_P1"),
+ MTK_FUNCTION(3, "KROW2"),
+ MTK_FUNCTION(4, "CONN_MCU_TDI"),
+ MTK_FUNCTION(6, "PCIE1_WAKE_N"),
+ MTK_FUNCTION(7, "DBG_MON_A[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "EINT6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 6),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "DRV_VBUS"),
+ MTK_FUNCTION(3, "KROW1"),
+ MTK_FUNCTION(4, "CONN_MCU_TRST_B"),
+ MTK_FUNCTION(6, "PCIE0_WAKE_N"),
+ MTK_FUNCTION(7, "DBG_MON_A[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "EINT7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 7),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "MSDC1_WP"),
+ MTK_FUNCTION(3, "KROW0"),
+ MTK_FUNCTION(4, "CONN_MCU_TMS"),
+ MTK_FUNCTION(5, "CONN_MCU_AICE_JMSC"),
+ MTK_FUNCTION(7, "DBG_MON_A[23]"),
+ MTK_FUNCTION(14, "PCIE2_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "I2S5_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 12),
+ MTK_FUNCTION(0, "GPIO30"),
+ MTK_FUNCTION(1, "I2S5_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(4, "ANT_SEL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "I2S5_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 13),
+ MTK_FUNCTION(0, "GPIO31"),
+ MTK_FUNCTION(1, "I2S5_MCLK"),
+ MTK_FUNCTION(4, "ANT_SEL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "I2S5_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 14),
+ MTK_FUNCTION(0, "GPIO32"),
+ MTK_FUNCTION(1, "I2S5_DATA"),
+ MTK_FUNCTION(2, "I2S5_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(4, "ANT_SEL3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "I2S1_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 15),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "I2S1_DATA"),
+ MTK_FUNCTION(2, "I2S1_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(4, "IMG_TEST_CK"),
+ MTK_FUNCTION(5, "G1_RXD0"),
+ MTK_FUNCTION(6, "WCN_PCM_TX"),
+ MTK_FUNCTION(7, "DBG_MON_B[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "I2S1_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 16),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "I2S1_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(4, "VDEC_TEST_CK"),
+ MTK_FUNCTION(5, "G1_RXD1"),
+ MTK_FUNCTION(6, "WCN_PCM_RX"),
+ MTK_FUNCTION(7, "DBG_MON_B[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "I2S1_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 17),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(5, "G1_RXD2"),
+ MTK_FUNCTION(6, "WCN_PCM_CLKO"),
+ MTK_FUNCTION(7, "DBG_MON_B[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "I2S1_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 18),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(5, "G1_RXD3"),
+ MTK_FUNCTION(6, "WCN_PCM_SYNC"),
+ MTK_FUNCTION(7, "DBG_MON_B[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "I2S1_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 19),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "I2S1_MCLK"),
+ MTK_FUNCTION(5, "G1_RXDV"),
+ MTK_FUNCTION(7, "DBG_MON_B[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "I2S2_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 20),
+ MTK_FUNCTION(0, "GPIO38"),
+ MTK_FUNCTION(2, "I2S2_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(4, "DMIC_DAT0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "JTMS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 21),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "JTMS"),
+ MTK_FUNCTION(2, "CONN_MCU_TMS"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JMSC"),
+ MTK_FUNCTION(4, "DFD_TMS_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "JTCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 22),
+ MTK_FUNCTION(0, "GPIO40"),
+ MTK_FUNCTION(1, "JTCK"),
+ MTK_FUNCTION(2, "CONN_MCU_TCK1"),
+ MTK_FUNCTION(3, "CONN_MCU_AICE_JCKC"),
+ MTK_FUNCTION(4, "DFD_TCK_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "JTDI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 23),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "JTDI"),
+ MTK_FUNCTION(2, "CONN_MCU_TDI"),
+ MTK_FUNCTION(4, "DFD_TDI_XI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "JTDO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 24),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "JTDO"),
+ MTK_FUNCTION(2, "CONN_MCU_TDO"),
+ MTK_FUNCTION(4, "DFD_TDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "NCLE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 25),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "NCLE"),
+ MTK_FUNCTION(2, "EXT_XCS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "NCEB1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 26),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "NCEB1"),
+ MTK_FUNCTION(2, "IDDIG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "NCEB0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 27),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "NCEB0"),
+ MTK_FUNCTION(2, "DRV_VBUS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "IR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 28),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "IR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "NREB"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 29),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "NREB"),
+ MTK_FUNCTION(2, "IDDIG_P1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "NRNB"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 30),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "NRNB"),
+ MTK_FUNCTION(2, "DRV_VBUS_P1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "I2S0_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 31),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "I2S0_DATA"),
+ MTK_FUNCTION(2, "I2S0_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(6, "WCN_I2S_DO"),
+ MTK_FUNCTION(7, "DBG_MON_B[3]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "I2S2_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 32),
+ MTK_FUNCTION(0, "GPIO50"),
+ MTK_FUNCTION(1, "I2S2_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(4, "DMIC_SCK1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "I2S2_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 33),
+ MTK_FUNCTION(0, "GPIO51"),
+ MTK_FUNCTION(1, "I2S2_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(4, "DMIC_SCK0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "I2S2_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 34),
+ MTK_FUNCTION(0, "GPIO52"),
+ MTK_FUNCTION(1, "I2S2_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(4, "DMIC_DAT1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "SPI0_CSN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 35),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SPI0_CS"),
+ MTK_FUNCTION(3, "SPDIF"),
+ MTK_FUNCTION(4, "ADC_CK"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(7, "DBG_MON_A[7]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "SPI0_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 36),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "SPI0_CK"),
+ MTK_FUNCTION(3, "SPDIF_IN1"),
+ MTK_FUNCTION(4, "ADC_DAT_IN"),
+ MTK_FUNCTION(7, "DBG_MON_A[10]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "SPI0_MI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 37),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "SPI0_MO"),
+ MTK_FUNCTION(3, "MSDC1_WP"),
+ MTK_FUNCTION(4, "ADC_WS"),
+ MTK_FUNCTION(5, "PWM2"),
+ MTK_FUNCTION(7, "DBG_MON_A[8]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "SPI0_MO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 38),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "SPI0_MI"),
+ MTK_FUNCTION(3, "SPDIF_IN0"),
+ MTK_FUNCTION(7, "DBG_MON_A[9]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "SDA1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 39),
+ MTK_FUNCTION(0, "GPIO57"),
+ MTK_FUNCTION(1, "SDA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "SCL1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 40),
+ MTK_FUNCTION(0, "GPIO58"),
+ MTK_FUNCTION(1, "SCL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "RAMBUF_I_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO59"),
+ MTK_FUNCTION(1, "RAMBUF_I_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "WB_RSTB"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 41),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "WB_RSTB"),
+ MTK_FUNCTION(7, "DBG_MON_A[11]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "F2W_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 42),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "F2W_DATA"),
+ MTK_FUNCTION(7, "DBG_MON_A[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "F2W_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 43),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "F2W_CK"),
+ MTK_FUNCTION(7, "DBG_MON_A[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "WB_SCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 44),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "WB_SCLK"),
+ MTK_FUNCTION(7, "DBG_MON_A[13]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "WB_SDATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 45),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "WB_SDATA"),
+ MTK_FUNCTION(7, "DBG_MON_A[12]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "WB_SEN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 46),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "WB_SEN"),
+ MTK_FUNCTION(7, "DBG_MON_A[14]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "WB_CRTL0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 47),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "WB_CRTL0"),
+ MTK_FUNCTION(5, "DFD_NTRST_XI"),
+ MTK_FUNCTION(7, "DBG_MON_A[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "WB_CRTL1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 48),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "WB_CRTL1"),
+ MTK_FUNCTION(5, "DFD_TMS_XI"),
+ MTK_FUNCTION(7, "DBG_MON_A[18]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "WB_CRTL2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 49),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "WB_CRTL2"),
+ MTK_FUNCTION(5, "DFD_TCK_XI"),
+ MTK_FUNCTION(7, "DBG_MON_A[19]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "WB_CRTL3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 50),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "WB_CRTL3"),
+ MTK_FUNCTION(5, "DFD_TDI_XI"),
+ MTK_FUNCTION(7, "DBG_MON_A[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "WB_CRTL4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 51),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "WB_CRTL4"),
+ MTK_FUNCTION(5, "DFD_TDO"),
+ MTK_FUNCTION(7, "DBG_MON_A[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "WB_CRTL5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 52),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "WB_CRTL5"),
+ MTK_FUNCTION(7, "DBG_MON_A[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "I2S0_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 53),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "I2S0_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(4, "PWM0"),
+ MTK_FUNCTION(5, "DISP_PWM"),
+ MTK_FUNCTION(6, "WCN_I2S_DI"),
+ MTK_FUNCTION(7, "DBG_MON_B[2]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "I2S0_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 54),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "I2S0_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(6, "WCN_I2S_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[5]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "I2S0_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 55),
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(6, "WCN_I2S_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[4]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "SDA0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 56),
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "SDA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "SCL0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 57),
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SCL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "SDA2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 58),
+ MTK_FUNCTION(0, "GPIO77"),
+ MTK_FUNCTION(1, "SDA2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "SCL2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 59),
+ MTK_FUNCTION(0, "GPIO78"),
+ MTK_FUNCTION(1, "SCL2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "URXD0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 60),
+ MTK_FUNCTION(0, "GPIO79"),
+ MTK_FUNCTION(1, "URXD0"),
+ MTK_FUNCTION(2, "UTXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "UTXD0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 61),
+ MTK_FUNCTION(0, "GPIO80"),
+ MTK_FUNCTION(1, "UTXD0"),
+ MTK_FUNCTION(2, "URXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "URXD1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 62),
+ MTK_FUNCTION(0, "GPIO81"),
+ MTK_FUNCTION(1, "URXD1"),
+ MTK_FUNCTION(2, "UTXD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "UTXD1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 63),
+ MTK_FUNCTION(0, "GPIO82"),
+ MTK_FUNCTION(1, "UTXD1"),
+ MTK_FUNCTION(2, "URXD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "LCM_RST"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 64),
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "LCM_RST"),
+ MTK_FUNCTION(2, "VDAC_CK_XI"),
+ MTK_FUNCTION(7, "DBG_MON_B[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "DSI_TE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 65),
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "DSI_TE"),
+ MTK_FUNCTION(7, "DBG_MON_B[0]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "MSDC2_CMD"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 66),
+ MTK_FUNCTION(0, "GPIO85"),
+ MTK_FUNCTION(1, "MSDC2_CMD"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(6, "I2SOUT_BCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "MSDC2_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 67),
+ MTK_FUNCTION(0, "GPIO86"),
+ MTK_FUNCTION(1, "MSDC2_CLK"),
+ MTK_FUNCTION(2, "ANT_SEL1"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(6, "I2SOUT_LRCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "MSDC2_DAT0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 68),
+ MTK_FUNCTION(0, "GPIO87"),
+ MTK_FUNCTION(1, "MSDC2_DAT0"),
+ MTK_FUNCTION(2, "ANT_SEL2"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "I2SOUT_DATA_OUT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "MSDC2_DAT1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 71),
+ MTK_FUNCTION(0, "GPIO88"),
+ MTK_FUNCTION(1, "MSDC2_DAT1"),
+ MTK_FUNCTION(2, "ANT_SEL3"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "MSDC2_DAT2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 72),
+ MTK_FUNCTION(0, "GPIO89"),
+ MTK_FUNCTION(1, "MSDC2_DAT2"),
+ MTK_FUNCTION(2, "ANT_SEL4"),
+ MTK_FUNCTION(3, "SDA2"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "MSDC2_DAT3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 73),
+ MTK_FUNCTION(0, "GPIO90"),
+ MTK_FUNCTION(1, "MSDC2_DAT3"),
+ MTK_FUNCTION(2, "ANT_SEL5"),
+ MTK_FUNCTION(3, "SCL2"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "TDN3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI91"),
+ MTK_FUNCTION(1, "TDN3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "TDP3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI92"),
+ MTK_FUNCTION(1, "TDP3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "TDN2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI93"),
+ MTK_FUNCTION(1, "TDN2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "TDP2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI94"),
+ MTK_FUNCTION(1, "TDP2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "TCN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI95"),
+ MTK_FUNCTION(1, "TCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "TCP"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI96"),
+ MTK_FUNCTION(1, "TCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "TDN1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI97"),
+ MTK_FUNCTION(1, "TDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "TDP1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI98"),
+ MTK_FUNCTION(1, "TDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "TDN0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI99"),
+ MTK_FUNCTION(1, "TDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "TDP0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPI100"),
+ MTK_FUNCTION(1, "TDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "SPI2_CSN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 74),
+ MTK_FUNCTION(0, "GPIO101"),
+ MTK_FUNCTION(1, "SPI2_CS"),
+ MTK_FUNCTION(3, "SCL3"),
+ MTK_FUNCTION(4, "KROW0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "SPI2_MI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 75),
+ MTK_FUNCTION(0, "GPIO102"),
+ MTK_FUNCTION(1, "SPI2_MI"),
+ MTK_FUNCTION(2, "SPI2_MO"),
+ MTK_FUNCTION(3, "SDA3"),
+ MTK_FUNCTION(4, "KROW1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "SPI2_MO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 76),
+ MTK_FUNCTION(0, "GPIO103"),
+ MTK_FUNCTION(1, "SPI2_MO"),
+ MTK_FUNCTION(2, "SPI2_MI"),
+ MTK_FUNCTION(3, "SCL3"),
+ MTK_FUNCTION(4, "KROW2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "SPI2_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 77),
+ MTK_FUNCTION(0, "GPIO104"),
+ MTK_FUNCTION(1, "SPI2_CK"),
+ MTK_FUNCTION(3, "SDA3"),
+ MTK_FUNCTION(4, "KROW3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "MSDC1_CMD"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 78),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(2, "ANT_SEL0"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(6, "I2SOUT_BCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[27]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "MSDC1_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 79),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(2, "ANT_SEL1"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(6, "I2SOUT_LRCK"),
+ MTK_FUNCTION(7, "DBG_MON_B[28]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "MSDC1_DAT0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 80),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(2, "ANT_SEL2"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "I2SOUT_DATA_OUT"),
+ MTK_FUNCTION(7, "DBG_MON_B[26]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "MSDC1_DAT1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 81),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(2, "ANT_SEL3"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "PWM1"),
+ MTK_FUNCTION(7, "DBG_MON_B[25]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "MSDC1_DAT2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 82),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(2, "ANT_SEL4"),
+ MTK_FUNCTION(3, "SDA2"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "PWM2"),
+ MTK_FUNCTION(7, "DBG_MON_B[24]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "MSDC1_DAT3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 83),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(2, "ANT_SEL5"),
+ MTK_FUNCTION(3, "SCL2"),
+ MTK_FUNCTION(4, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "PWM3"),
+ MTK_FUNCTION(7, "DBG_MON_B[23]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "MSDC0_DAT7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 84),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(4, "NLD7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "MSDC0_DAT6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 85),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(4, "NLD6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "MSDC0_DAT5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 86),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(4, "NLD5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "MSDC0_DAT4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 87),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(4, "NLD4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "MSDC0_RSTB"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 88),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(4, "NLD8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "MSDC0_CMD"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 89),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(4, "NALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "MSDC0_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 90),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(4, "NWEB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "MSDC0_DAT3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 91),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(4, "NLD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "MSDC0_DAT2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 92),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(4, "NLD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "MSDC0_DAT1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 93),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(4, "NLD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "MSDC0_DAT0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 94),
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(4, "NLD0"),
+ MTK_FUNCTION(5, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "CEC"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 95),
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "CEC"),
+ MTK_FUNCTION(4, "SDA2"),
+ MTK_FUNCTION(5, "URXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "HTPLG"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 96),
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "HTPLG"),
+ MTK_FUNCTION(4, "SCL2"),
+ MTK_FUNCTION(5, "UTXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "HDMISCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 97),
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "HDMISCK"),
+ MTK_FUNCTION(4, "SDA1"),
+ MTK_FUNCTION(5, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(125, "HDMISD"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 98),
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "HDMISD"),
+ MTK_FUNCTION(4, "SCL1"),
+ MTK_FUNCTION(5, "PWM4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(126, "I2S0_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 99),
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "I2S0_MCLK"),
+ MTK_FUNCTION(6, "WCN_I2S_MCLK"),
+ MTK_FUNCTION(7, "DBG_MON_B[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(127, "RAMBUF_IDATA0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO127"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(128, "RAMBUF_IDATA1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO128"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(129, "RAMBUF_IDATA2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO129"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(130, "RAMBUF_IDATA3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO130"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(131, "RAMBUF_IDATA4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO131"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(132, "RAMBUF_IDATA5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO132"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(133, "RAMBUF_IDATA6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO133"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(134, "RAMBUF_IDATA7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO134"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(135, "RAMBUF_IDATA8"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO135"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(136, "RAMBUF_IDATA9"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO136"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(137, "RAMBUF_IDATA10"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO137"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(138, "RAMBUF_IDATA11"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO138"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA11")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(139, "RAMBUF_IDATA12"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO139"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA12")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(140, "RAMBUF_IDATA13"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO140"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA13")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(141, "RAMBUF_IDATA14"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO141"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA14")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(142, "RAMBUF_IDATA15"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO142"),
+ MTK_FUNCTION(1, "RAMBUF_IDATA15")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(143, "RAMBUF_ODATA0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO143"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(144, "RAMBUF_ODATA1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO144"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(145, "RAMBUF_ODATA2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO145"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(146, "RAMBUF_ODATA3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO146"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(147, "RAMBUF_ODATA4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO147"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(148, "RAMBUF_ODATA5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO148"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(149, "RAMBUF_ODATA6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO149"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(150, "RAMBUF_ODATA7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO150"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(151, "RAMBUF_ODATA8"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO151"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(152, "RAMBUF_ODATA9"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO152"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(153, "RAMBUF_ODATA10"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO153"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(154, "RAMBUF_ODATA11"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO154"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA11")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(155, "RAMBUF_ODATA12"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO155"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA12")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(156, "RAMBUF_ODATA13"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO156"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA13")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(157, "RAMBUF_ODATA14"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO157"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA14")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(158, "RAMBUF_ODATA15"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO158"),
+ MTK_FUNCTION(1, "RAMBUF_ODATA15")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(159, "RAMBUF_BE0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO159"),
+ MTK_FUNCTION(1, "RAMBUF_BE0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(160, "RAMBUF_BE1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO160"),
+ MTK_FUNCTION(1, "RAMBUF_BE1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(161, "AP2PT_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO161"),
+ MTK_FUNCTION(1, "AP2PT_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(162, "AP2PT_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO162"),
+ MTK_FUNCTION(1, "AP2PT_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(163, "PT2AP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO163"),
+ MTK_FUNCTION(1, "PT2AP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(164, "PT2AP_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO164"),
+ MTK_FUNCTION(1, "PT2AP_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(165, "AP2UP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO165"),
+ MTK_FUNCTION(1, "AP2UP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(166, "AP2UP_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO166"),
+ MTK_FUNCTION(1, "AP2UP_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(167, "UP2AP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO167"),
+ MTK_FUNCTION(1, "UP2AP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(168, "UP2AP_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO168"),
+ MTK_FUNCTION(1, "UP2AP_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(169, "RAMBUF_ADDR0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO169"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(170, "RAMBUF_ADDR1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO170"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(171, "RAMBUF_ADDR2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO171"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(172, "RAMBUF_ADDR3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO172"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(173, "RAMBUF_ADDR4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO173"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(174, "RAMBUF_ADDR5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO174"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(175, "RAMBUF_ADDR6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO175"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(176, "RAMBUF_ADDR7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO176"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(177, "RAMBUF_ADDR8"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO177"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(178, "RAMBUF_ADDR9"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO178"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR9")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(179, "RAMBUF_ADDR10"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO179"),
+ MTK_FUNCTION(1, "RAMBUF_ADDR10")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(180, "RAMBUF_RW"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO180"),
+ MTK_FUNCTION(1, "RAMBUF_RW")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(181, "RAMBUF_LAST"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO181"),
+ MTK_FUNCTION(1, "RAMBUF_LAST")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(182, "RAMBUF_HP"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO182"),
+ MTK_FUNCTION(1, "RAMBUF_HP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(183, "RAMBUF_REQ"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO183"),
+ MTK_FUNCTION(1, "RAMBUF_REQ")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(184, "RAMBUF_ALE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO184"),
+ MTK_FUNCTION(1, "RAMBUF_ALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(185, "RAMBUF_DLE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO185"),
+ MTK_FUNCTION(1, "RAMBUF_DLE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(186, "RAMBUF_WDLE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO186"),
+ MTK_FUNCTION(1, "RAMBUF_WDLE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(187, "RAMBUF_O_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO187"),
+ MTK_FUNCTION(1, "RAMBUF_O_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(188, "I2S2_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 100),
+ MTK_FUNCTION(0, "GPIO188"),
+ MTK_FUNCTION(1, "I2S2_MCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(189, "I2S3_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 101),
+ MTK_FUNCTION(0, "GPIO189"),
+ MTK_FUNCTION(2, "I2S3_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(190, "I2S3_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 102),
+ MTK_FUNCTION(0, "GPIO190"),
+ MTK_FUNCTION(1, "I2S3_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(191, "I2S3_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 103),
+ MTK_FUNCTION(0, "GPIO191"),
+ MTK_FUNCTION(1, "I2S3_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(192, "I2S3_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 104),
+ MTK_FUNCTION(0, "GPIO192"),
+ MTK_FUNCTION(1, "I2S3_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(193, "I2S3_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 105),
+ MTK_FUNCTION(0, "GPIO193"),
+ MTK_FUNCTION(1, "I2S3_MCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(194, "I2S4_DATA"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 106),
+ MTK_FUNCTION(0, "GPIO194"),
+ MTK_FUNCTION(1, "I2S4_DATA"),
+ MTK_FUNCTION(2, "I2S4_DATA_BYPS"),
+ MTK_FUNCTION(3, "PCM_TX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(195, "I2S4_DATA_IN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 107),
+ MTK_FUNCTION(0, "GPIO195"),
+ MTK_FUNCTION(1, "I2S4_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(196, "I2S4_BCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 108),
+ MTK_FUNCTION(0, "GPIO196"),
+ MTK_FUNCTION(1, "I2S4_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(197, "I2S4_LRCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 109),
+ MTK_FUNCTION(0, "GPIO197"),
+ MTK_FUNCTION(1, "I2S4_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(198, "I2S4_MCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 110),
+ MTK_FUNCTION(0, "GPIO198"),
+ MTK_FUNCTION(1, "I2S4_MCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(199, "SPI1_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 111),
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "SPI1_CK"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(4, "KCOL3"),
+ MTK_FUNCTION(7, "DBG_MON_B[15]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(200, "SPDIF_OUT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 112),
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(1, "SPDIF_OUT"),
+ MTK_FUNCTION(5, "G1_TXD3"),
+ MTK_FUNCTION(6, "URXD2"),
+ MTK_FUNCTION(7, "DBG_MON_B[16]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(201, "SPDIF_IN0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 113),
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(1, "SPDIF_IN0"),
+ MTK_FUNCTION(5, "G1_TXEN"),
+ MTK_FUNCTION(6, "UTXD2"),
+ MTK_FUNCTION(7, "DBG_MON_B[17]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(202, "SPDIF_IN1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 114),
+ MTK_FUNCTION(0, "GPIO202"),
+ MTK_FUNCTION(1, "SPDIF_IN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(203, "PWM0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 115),
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "PWM0"),
+ MTK_FUNCTION(2, "DISP_PWM"),
+ MTK_FUNCTION(5, "G1_TXD2"),
+ MTK_FUNCTION(7, "DBG_MON_B[18]"),
+ MTK_FUNCTION(9, "I2S2_DATA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(204, "PWM1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 116),
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "PWM1"),
+ MTK_FUNCTION(2, "CLKM3"),
+ MTK_FUNCTION(5, "G1_TXD1"),
+ MTK_FUNCTION(7, "DBG_MON_B[19]"),
+ MTK_FUNCTION(9, "I2S3_DATA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(205, "PWM2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 117),
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "PWM2"),
+ MTK_FUNCTION(2, "CLKM2"),
+ MTK_FUNCTION(5, "G1_TXD0"),
+ MTK_FUNCTION(7, "DBG_MON_B[20]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(206, "PWM3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 118),
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(1, "PWM3"),
+ MTK_FUNCTION(2, "CLKM1"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "G1_TXC"),
+ MTK_FUNCTION(7, "DBG_MON_B[21]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(207, "PWM4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 119),
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "PWM4"),
+ MTK_FUNCTION(2, "CLKM0"),
+ MTK_FUNCTION(3, "EXT_FRAME_SYNC"),
+ MTK_FUNCTION(5, "G1_RXC"),
+ MTK_FUNCTION(7, "DBG_MON_B[22]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(208, "AUD_EXT_CK1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 120),
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "AUD_EXT_CK1"),
+ MTK_FUNCTION(2, "PWM0"),
+ MTK_FUNCTION(4, "ANT_SEL5"),
+ MTK_FUNCTION(5, "DISP_PWM"),
+ MTK_FUNCTION(7, "DBG_MON_A[31]"),
+ MTK_FUNCTION(11, "PCIE0_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(209, "AUD_EXT_CK2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 121),
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "AUD_EXT_CK2"),
+ MTK_FUNCTION(2, "MSDC1_WP"),
+ MTK_FUNCTION(5, "PWM1"),
+ MTK_FUNCTION(7, "DBG_MON_A[32]"),
+ MTK_FUNCTION(11, "PCIE1_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(210, "AUD_CLOCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO210"),
+ MTK_FUNCTION(1, "AUD_CLOCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(211, "DVP_RESET"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO211"),
+ MTK_FUNCTION(1, "DVP_RESET")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(212, "DVP_CLOCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO212"),
+ MTK_FUNCTION(1, "DVP_CLOCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(213, "DVP_CS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO213"),
+ MTK_FUNCTION(1, "DVP_CS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(214, "DVP_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO214"),
+ MTK_FUNCTION(1, "DVP_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(215, "DVP_DI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO215"),
+ MTK_FUNCTION(1, "DVP_DI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(216, "DVP_DO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO216"),
+ MTK_FUNCTION(1, "DVP_DO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(217, "AP_CS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO217"),
+ MTK_FUNCTION(1, "AP_CS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(218, "AP_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO218"),
+ MTK_FUNCTION(1, "AP_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(219, "AP_DI"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO219"),
+ MTK_FUNCTION(1, "AP_DI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(220, "AP_DO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO220"),
+ MTK_FUNCTION(1, "AP_DO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(221, "DVD_BCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO221"),
+ MTK_FUNCTION(1, "DVD_BCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(222, "T8032_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO222"),
+ MTK_FUNCTION(1, "T8032_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(223, "AP_BCLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO223"),
+ MTK_FUNCTION(1, "AP_BCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(224, "HOST_CS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO224"),
+ MTK_FUNCTION(1, "HOST_CS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(225, "HOST_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO225"),
+ MTK_FUNCTION(1, "HOST_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(226, "HOST_DO0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO226"),
+ MTK_FUNCTION(1, "HOST_DO0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(227, "HOST_DO1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO227"),
+ MTK_FUNCTION(1, "HOST_DO1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(228, "SLV_CS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO228"),
+ MTK_FUNCTION(1, "SLV_CS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(229, "SLV_CK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO229"),
+ MTK_FUNCTION(1, "SLV_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(230, "SLV_DI0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO230"),
+ MTK_FUNCTION(1, "SLV_DI0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(231, "SLV_DI1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO231"),
+ MTK_FUNCTION(1, "SLV_DI1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(232, "AP2DSP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO232"),
+ MTK_FUNCTION(1, "AP2DSP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(233, "AP2DSP_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO233"),
+ MTK_FUNCTION(1, "AP2DSP_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(234, "DSP2AP_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO234"),
+ MTK_FUNCTION(1, "DSP2AP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(235, "DSP2AP_INT_CLR"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO235"),
+ MTK_FUNCTION(1, "DSP2AP_INT_CLR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(236, "EXT_SDIO3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 122),
+ MTK_FUNCTION(0, "GPIO236"),
+ MTK_FUNCTION(1, "EXT_SDIO3"),
+ MTK_FUNCTION(2, "IDDIG"),
+ MTK_FUNCTION(7, "DBG_MON_A[1]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(237, "EXT_SDIO2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 123),
+ MTK_FUNCTION(0, "GPIO237"),
+ MTK_FUNCTION(1, "EXT_SDIO2"),
+ MTK_FUNCTION(2, "DRV_VBUS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(238, "EXT_SDIO1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 124),
+ MTK_FUNCTION(0, "GPIO238"),
+ MTK_FUNCTION(1, "EXT_SDIO1"),
+ MTK_FUNCTION(2, "IDDIG_P1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(239, "EXT_SDIO0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 125),
+ MTK_FUNCTION(0, "GPIO239"),
+ MTK_FUNCTION(1, "EXT_SDIO0"),
+ MTK_FUNCTION(2, "DRV_VBUS_P1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(240, "EXT_XCS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 126),
+ MTK_FUNCTION(0, "GPIO240"),
+ MTK_FUNCTION(1, "EXT_XCS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(241, "EXT_SCK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 127),
+ MTK_FUNCTION(0, "GPIO241"),
+ MTK_FUNCTION(1, "EXT_SCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(242, "URTS2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 128),
+ MTK_FUNCTION(0, "GPIO242"),
+ MTK_FUNCTION(1, "URTS2"),
+ MTK_FUNCTION(2, "UTXD3"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "SCL1"),
+ MTK_FUNCTION(7, "DBG_MON_B[32]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(243, "UCTS2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 129),
+ MTK_FUNCTION(0, "GPIO243"),
+ MTK_FUNCTION(1, "UCTS2"),
+ MTK_FUNCTION(2, "URXD3"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "SDA1"),
+ MTK_FUNCTION(7, "DBG_MON_A[6]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(244, "HDMI_SDA_RX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 130),
+ MTK_FUNCTION(0, "GPIO244"),
+ MTK_FUNCTION(1, "HDMI_SDA_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(245, "HDMI_SCL_RX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 131),
+ MTK_FUNCTION(0, "GPIO245"),
+ MTK_FUNCTION(1, "HDMI_SCL_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(246, "MHL_SENCE"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 132),
+ MTK_FUNCTION(0, "GPIO246")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(247, "HDMI_HPD_CBUS_RX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 69),
+ MTK_FUNCTION(0, "GPIO247"),
+ MTK_FUNCTION(1, "HDMI_HPD_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(248, "HDMI_TESTOUTP_RX"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 133),
+ MTK_FUNCTION(0, "GPIO248"),
+ MTK_FUNCTION(1, "HDMI_TESTOUTP_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(249, "MSDC0E_RSTB"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 134),
+ MTK_FUNCTION(0, "GPIO249"),
+ MTK_FUNCTION(1, "MSDC0E_RSTB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(250, "MSDC0E_DAT7"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 135),
+ MTK_FUNCTION(0, "GPIO250"),
+ MTK_FUNCTION(1, "MSDC3_DAT7"),
+ MTK_FUNCTION(6, "PCIE0_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(251, "MSDC0E_DAT6"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 136),
+ MTK_FUNCTION(0, "GPIO251"),
+ MTK_FUNCTION(1, "MSDC3_DAT6"),
+ MTK_FUNCTION(6, "PCIE0_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(252, "MSDC0E_DAT5"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 137),
+ MTK_FUNCTION(0, "GPIO252"),
+ MTK_FUNCTION(1, "MSDC3_DAT5"),
+ MTK_FUNCTION(6, "PCIE1_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(253, "MSDC0E_DAT4"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 138),
+ MTK_FUNCTION(0, "GPIO253"),
+ MTK_FUNCTION(1, "MSDC3_DAT4"),
+ MTK_FUNCTION(6, "PCIE1_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(254, "MSDC0E_DAT3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 139),
+ MTK_FUNCTION(0, "GPIO254"),
+ MTK_FUNCTION(1, "MSDC3_DAT3"),
+ MTK_FUNCTION(6, "PCIE2_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(255, "MSDC0E_DAT2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 140),
+ MTK_FUNCTION(0, "GPIO255"),
+ MTK_FUNCTION(1, "MSDC3_DAT2"),
+ MTK_FUNCTION(6, "PCIE2_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(256, "MSDC0E_DAT1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 141),
+ MTK_FUNCTION(0, "GPIO256"),
+ MTK_FUNCTION(1, "MSDC3_DAT1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(257, "MSDC0E_DAT0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 142),
+ MTK_FUNCTION(0, "GPIO257"),
+ MTK_FUNCTION(1, "MSDC3_DAT0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(258, "MSDC0E_CMD"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 143),
+ MTK_FUNCTION(0, "GPIO258"),
+ MTK_FUNCTION(1, "MSDC3_CMD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(259, "MSDC0E_CLK"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 144),
+ MTK_FUNCTION(0, "GPIO259"),
+ MTK_FUNCTION(1, "MSDC3_CLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(260, "MSDC0E_DSL"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 145),
+ MTK_FUNCTION(0, "GPIO260"),
+ MTK_FUNCTION(1, "MSDC3_DSL")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(261, "MSDC1_INS"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 146),
+ MTK_FUNCTION(0, "GPIO261"),
+ MTK_FUNCTION(1, "MSDC1_INS"),
+ MTK_FUNCTION(7, "DBG_MON_B[29]")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(262, "G2_TXEN"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 8),
+ MTK_FUNCTION(0, "GPIO262"),
+ MTK_FUNCTION(1, "G2_TXEN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(263, "G2_TXD3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 9),
+ MTK_FUNCTION(0, "GPIO263"),
+ MTK_FUNCTION(1, "G2_TXD3"),
+ MTK_FUNCTION(6, "ANT_SEL5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(264, "G2_TXD2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 10),
+ MTK_FUNCTION(0, "GPIO264"),
+ MTK_FUNCTION(1, "G2_TXD2"),
+ MTK_FUNCTION(6, "ANT_SEL4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(265, "G2_TXD1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 11),
+ MTK_FUNCTION(0, "GPIO265"),
+ MTK_FUNCTION(1, "G2_TXD1"),
+ MTK_FUNCTION(6, "ANT_SEL3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(266, "G2_TXD0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO266"),
+ MTK_FUNCTION(1, "G2_TXD0"),
+ MTK_FUNCTION(6, "ANT_SEL2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(267, "G2_TXC"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO267"),
+ MTK_FUNCTION(1, "G2_TXC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(268, "G2_RXC"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO268"),
+ MTK_FUNCTION(1, "G2_RXC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(269, "G2_RXD0"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO269"),
+ MTK_FUNCTION(1, "G2_RXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(270, "G2_RXD1"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO270"),
+ MTK_FUNCTION(1, "G2_RXD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(271, "G2_RXD2"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO271"),
+ MTK_FUNCTION(1, "G2_RXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(272, "G2_RXD3"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO272"),
+ MTK_FUNCTION(1, "G2_RXD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(273, "ESW_INT"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 168),
+ MTK_FUNCTION(0, "GPIO273"),
+ MTK_FUNCTION(1, "ESW_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(274, "G2_RXDV"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO274"),
+ MTK_FUNCTION(1, "G2_RXDV")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(275, "MDC"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO275"),
+ MTK_FUNCTION(1, "MDC"),
+ MTK_FUNCTION(6, "ANT_SEL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(276, "MDIO"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO276"),
+ MTK_FUNCTION(1, "MDIO"),
+ MTK_FUNCTION(6, "ANT_SEL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(277, "ESW_RST"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO277"),
+ MTK_FUNCTION(1, "ESW_RST")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(278, "JTAG_RESET"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(0, 147),
+ MTK_FUNCTION(0, "GPIO278"),
+ MTK_FUNCTION(1, "JTAG_RESET")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(279, "USB3_RES_BOND"),
+ NULL, "mt2701",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO279"),
+ MTK_FUNCTION(1, "USB3_RES_BOND")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT2701_H */
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
new file mode 100644
index 000000000000..3472a76ad422
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
@@ -0,0 +1,1936 @@
+/*
+ * Copyright (c) 2016 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_MTK_MT7623_H
+#define __PINCTRL_MTK_MT7623_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include "pinctrl-mtk-common.h"
+
+static const struct mtk_desc_pin mtk_pins_mt7623[] = {
+ MTK_PIN(
+ PINCTRL_PIN(0, "PWRAP_SPI0_MI"),
+ "J20", "mt7623",
+ MTK_EINT_FUNCTION(0, 148),
+ MTK_FUNCTION(0, "GPIO0"),
+ MTK_FUNCTION(1, "PWRAP_SPIDO"),
+ MTK_FUNCTION(2, "PWRAP_SPIDI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(1, "PWRAP_SPI0_MO"),
+ "D10", "mt7623",
+ MTK_EINT_FUNCTION(0, 149),
+ MTK_FUNCTION(0, "GPIO1"),
+ MTK_FUNCTION(1, "PWRAP_SPIDI"),
+ MTK_FUNCTION(2, "PWRAP_SPIDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(2, "PWRAP_INT"),
+ "E11", "mt7623",
+ MTK_EINT_FUNCTION(0, 150),
+ MTK_FUNCTION(0, "GPIO2"),
+ MTK_FUNCTION(1, "PWRAP_INT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(3, "PWRAP_SPI0_CK"),
+ "H12", "mt7623",
+ MTK_EINT_FUNCTION(0, 151),
+ MTK_FUNCTION(0, "GPIO3"),
+ MTK_FUNCTION(1, "PWRAP_SPICK_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(4, "PWRAP_SPI0_CSN"),
+ "E12", "mt7623",
+ MTK_EINT_FUNCTION(0, 152),
+ MTK_FUNCTION(0, "GPIO4"),
+ MTK_FUNCTION(1, "PWRAP_SPICS_B_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(5, "PWRAP_SPI0_CK2"),
+ "H11", "mt7623",
+ MTK_EINT_FUNCTION(0, 155),
+ MTK_FUNCTION(0, "GPIO5"),
+ MTK_FUNCTION(1, "PWRAP_SPICK2_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(6, "PWRAP_SPI0_CSN2"),
+ "G11", "mt7623",
+ MTK_EINT_FUNCTION(0, 156),
+ MTK_FUNCTION(0, "GPIO6"),
+ MTK_FUNCTION(1, "PWRAP_SPICS2_B_I")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(7, "SPI1_CSN"),
+ "G19", "mt7623",
+ MTK_EINT_FUNCTION(0, 153),
+ MTK_FUNCTION(0, "GPIO7"),
+ MTK_FUNCTION(1, "SPI1_CS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(8, "SPI1_MI"),
+ "F19", "mt7623",
+ MTK_EINT_FUNCTION(0, 154),
+ MTK_FUNCTION(0, "GPIO8"),
+ MTK_FUNCTION(1, "SPI1_MI"),
+ MTK_FUNCTION(2, "SPI1_MO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(9, "SPI1_MO"),
+ "G20", "mt7623",
+ MTK_EINT_FUNCTION(0, 157),
+ MTK_FUNCTION(0, "GPIO9"),
+ MTK_FUNCTION(1, "SPI1_MO"),
+ MTK_FUNCTION(2, "SPI1_MI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(10, "RTC32K_CK"),
+ "A13", "mt7623",
+ MTK_EINT_FUNCTION(0, 158),
+ MTK_FUNCTION(0, "GPIO10"),
+ MTK_FUNCTION(1, "RTC32K_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(11, "WATCHDOG"),
+ "D14", "mt7623",
+ MTK_EINT_FUNCTION(0, 159),
+ MTK_FUNCTION(0, "GPIO11"),
+ MTK_FUNCTION(1, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(12, "SRCLKENA"),
+ "C13", "mt7623",
+ MTK_EINT_FUNCTION(0, 169),
+ MTK_FUNCTION(0, "GPIO12"),
+ MTK_FUNCTION(1, "SRCLKENA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(13, "SRCLKENAI"),
+ "B13", "mt7623",
+ MTK_EINT_FUNCTION(0, 161),
+ MTK_FUNCTION(0, "GPIO13"),
+ MTK_FUNCTION(1, "SRCLKENAI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(14, "GPIO14"),
+ "E18", "mt7623",
+ MTK_EINT_FUNCTION(0, 162),
+ MTK_FUNCTION(0, "GPIO14"),
+ MTK_FUNCTION(1, "URXD2"),
+ MTK_FUNCTION(2, "UTXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(15, "GPIO15"),
+ "E17", "mt7623",
+ MTK_EINT_FUNCTION(0, 163),
+ MTK_FUNCTION(0, "GPIO15"),
+ MTK_FUNCTION(1, "UTXD2"),
+ MTK_FUNCTION(2, "URXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(16, "GPIO16"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO16")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(17, "GPIO17"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO17")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(18, "PCM_CLK"),
+ "C19", "mt7623",
+ MTK_EINT_FUNCTION(0, 166),
+ MTK_FUNCTION(0, "GPIO18"),
+ MTK_FUNCTION(1, "PCM_CLK0"),
+ MTK_FUNCTION(6, "AP_PCM_CLKO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(19, "PCM_SYNC"),
+ "D19", "mt7623",
+ MTK_EINT_FUNCTION(0, 167),
+ MTK_FUNCTION(0, "GPIO19"),
+ MTK_FUNCTION(1, "PCM_SYNC"),
+ MTK_FUNCTION(6, "AP_PCM_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(20, "PCM_RX"),
+ "D18", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO20"),
+ MTK_FUNCTION(1, "PCM_RX"),
+ MTK_FUNCTION(4, "PCM_TX"),
+ MTK_FUNCTION(6, "AP_PCM_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(21, "PCM_TX"),
+ "C18", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO21"),
+ MTK_FUNCTION(1, "PCM_TX"),
+ MTK_FUNCTION(4, "PCM_RX"),
+ MTK_FUNCTION(6, "AP_PCM_TX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(22, "EINT0"),
+ "H15", "mt7623",
+ MTK_EINT_FUNCTION(0, 0),
+ MTK_FUNCTION(0, "GPIO22"),
+ MTK_FUNCTION(1, "UCTS0"),
+ MTK_FUNCTION(2, "PCIE0_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(23, "EINT1"),
+ "J16", "mt7623",
+ MTK_EINT_FUNCTION(0, 1),
+ MTK_FUNCTION(0, "GPIO23"),
+ MTK_FUNCTION(1, "URTS0"),
+ MTK_FUNCTION(2, "PCIE1_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(24, "EINT2"),
+ "H16", "mt7623",
+ MTK_EINT_FUNCTION(0, 2),
+ MTK_FUNCTION(0, "GPIO24"),
+ MTK_FUNCTION(1, "UCTS1"),
+ MTK_FUNCTION(2, "PCIE2_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(25, "EINT3"),
+ "K15", "mt7623",
+ MTK_EINT_FUNCTION(0, 3),
+ MTK_FUNCTION(0, "GPIO25"),
+ MTK_FUNCTION(1, "URTS1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(26, "EINT4"),
+ "G15", "mt7623",
+ MTK_EINT_FUNCTION(0, 4),
+ MTK_FUNCTION(0, "GPIO26"),
+ MTK_FUNCTION(1, "UCTS3"),
+ MTK_FUNCTION(6, "PCIE2_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(27, "EINT5"),
+ "F15", "mt7623",
+ MTK_EINT_FUNCTION(0, 5),
+ MTK_FUNCTION(0, "GPIO27"),
+ MTK_FUNCTION(1, "URTS3"),
+ MTK_FUNCTION(6, "PCIE1_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(28, "EINT6"),
+ "J15", "mt7623",
+ MTK_EINT_FUNCTION(0, 6),
+ MTK_FUNCTION(0, "GPIO28"),
+ MTK_FUNCTION(1, "DRV_VBUS"),
+ MTK_FUNCTION(6, "PCIE0_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(29, "EINT7"),
+ "E15", "mt7623",
+ MTK_EINT_FUNCTION(0, 7),
+ MTK_FUNCTION(0, "GPIO29"),
+ MTK_FUNCTION(1, "IDDIG"),
+ MTK_FUNCTION(2, "MSDC1_WP"),
+ MTK_FUNCTION(6, "PCIE2_PERST_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(30, "GPIO30"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO30")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(31, "GPIO31"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO31")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(32, "GPIO32"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO32")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(33, "I2S1_DATA"),
+ "Y18", "mt7623",
+ MTK_EINT_FUNCTION(0, 15),
+ MTK_FUNCTION(0, "GPIO33"),
+ MTK_FUNCTION(1, "I2S1_DATA"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(6, "AP_PCM_TX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(34, "I2S1_DATA_IN"),
+ "Y17", "mt7623",
+ MTK_EINT_FUNCTION(0, 16),
+ MTK_FUNCTION(0, "GPIO34"),
+ MTK_FUNCTION(1, "I2S1_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(6, "AP_PCM_RX")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(35, "I2S1_BCK"),
+ "V17", "mt7623",
+ MTK_EINT_FUNCTION(0, 17),
+ MTK_FUNCTION(0, "GPIO35"),
+ MTK_FUNCTION(1, "I2S1_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(6, "AP_PCM_CLKO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(36, "I2S1_LRCK"),
+ "W17", "mt7623",
+ MTK_EINT_FUNCTION(0, 18),
+ MTK_FUNCTION(0, "GPIO36"),
+ MTK_FUNCTION(1, "I2S1_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(6, "AP_PCM_SYNC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(37, "I2S1_MCLK"),
+ "AA18", "mt7623",
+ MTK_EINT_FUNCTION(0, 19),
+ MTK_FUNCTION(0, "GPIO37"),
+ MTK_FUNCTION(1, "I2S1_MCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(38, "GPIO38"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO38")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(39, "JTMS"),
+ "G21", "mt7623",
+ MTK_EINT_FUNCTION(0, 21),
+ MTK_FUNCTION(0, "GPIO39"),
+ MTK_FUNCTION(1, "JTMS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(40, "GPIO40"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO40")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(41, "JTDI"),
+ "H22", "mt7623",
+ MTK_EINT_FUNCTION(0, 23),
+ MTK_FUNCTION(0, "GPIO41"),
+ MTK_FUNCTION(1, "JTDI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(42, "JTDO"),
+ "H21", "mt7623",
+ MTK_EINT_FUNCTION(0, 24),
+ MTK_FUNCTION(0, "GPIO42"),
+ MTK_FUNCTION(1, "JTDO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(43, "NCLE"),
+ "C7", "mt7623",
+ MTK_EINT_FUNCTION(0, 25),
+ MTK_FUNCTION(0, "GPIO43"),
+ MTK_FUNCTION(1, "NCLE"),
+ MTK_FUNCTION(2, "EXT_XCS2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(44, "NCEB1"),
+ "C6", "mt7623",
+ MTK_EINT_FUNCTION(0, 26),
+ MTK_FUNCTION(0, "GPIO44"),
+ MTK_FUNCTION(1, "NCEB1"),
+ MTK_FUNCTION(2, "IDDIG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(45, "NCEB0"),
+ "D7", "mt7623",
+ MTK_EINT_FUNCTION(0, 27),
+ MTK_FUNCTION(0, "GPIO45"),
+ MTK_FUNCTION(1, "NCEB0"),
+ MTK_FUNCTION(2, "DRV_VBUS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(46, "IR"),
+ "D15", "mt7623",
+ MTK_EINT_FUNCTION(0, 28),
+ MTK_FUNCTION(0, "GPIO46"),
+ MTK_FUNCTION(1, "IR")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(47, "NREB"),
+ "A6", "mt7623",
+ MTK_EINT_FUNCTION(0, 29),
+ MTK_FUNCTION(0, "GPIO47"),
+ MTK_FUNCTION(1, "NREB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(48, "NRNB"),
+ "B6", "mt7623",
+ MTK_EINT_FUNCTION(0, 30),
+ MTK_FUNCTION(0, "GPIO48"),
+ MTK_FUNCTION(1, "NRNB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(49, "I2S0_DATA"),
+ "AB18", "mt7623",
+ MTK_EINT_FUNCTION(0, 31),
+ MTK_FUNCTION(0, "GPIO49"),
+ MTK_FUNCTION(1, "I2S0_DATA"),
+ MTK_FUNCTION(3, "PCM_TX"),
+ MTK_FUNCTION(6, "AP_I2S_DO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(50, "GPIO50"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO50")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(51, "GPIO51"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO51")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(52, "GPIO52"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO52")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(53, "SPI0_CSN"),
+ "E7", "mt7623",
+ MTK_EINT_FUNCTION(0, 35),
+ MTK_FUNCTION(0, "GPIO53"),
+ MTK_FUNCTION(1, "SPI0_CS"),
+ MTK_FUNCTION(5, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(54, "SPI0_CK"),
+ "F7", "mt7623",
+ MTK_EINT_FUNCTION(0, 36),
+ MTK_FUNCTION(0, "GPIO54"),
+ MTK_FUNCTION(1, "SPI0_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(55, "SPI0_MI"),
+ "E6", "mt7623",
+ MTK_EINT_FUNCTION(0, 37),
+ MTK_FUNCTION(0, "GPIO55"),
+ MTK_FUNCTION(1, "SPI0_MI"),
+ MTK_FUNCTION(2, "SPI0_MO"),
+ MTK_FUNCTION(3, "MSDC1_WP"),
+ MTK_FUNCTION(5, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(56, "SPI0_MO"),
+ "G7", "mt7623",
+ MTK_EINT_FUNCTION(0, 38),
+ MTK_FUNCTION(0, "GPIO56"),
+ MTK_FUNCTION(1, "SPI0_MO"),
+ MTK_FUNCTION(2, "SPI0_MI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(57, "GPIO57"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO57")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(58, "GPIO58"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO58")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(59, "GPIO59"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO59")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(60, "WB_RSTB"),
+ "Y21", "mt7623",
+ MTK_EINT_FUNCTION(0, 41),
+ MTK_FUNCTION(0, "GPIO60"),
+ MTK_FUNCTION(1, "WB_RSTB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(61, "GPIO61"),
+ "AA21", "mt7623",
+ MTK_EINT_FUNCTION(0, 42),
+ MTK_FUNCTION(0, "GPIO61"),
+ MTK_FUNCTION(1, "TEST_FD")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(62, "GPIO62"),
+ "AB22", "mt7623",
+ MTK_EINT_FUNCTION(0, 43),
+ MTK_FUNCTION(0, "GPIO62"),
+ MTK_FUNCTION(1, "TEST_FC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(63, "WB_SCLK"),
+ "AC23", "mt7623",
+ MTK_EINT_FUNCTION(0, 44),
+ MTK_FUNCTION(0, "GPIO63"),
+ MTK_FUNCTION(1, "WB_SCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(64, "WB_SDATA"),
+ "AB21", "mt7623",
+ MTK_EINT_FUNCTION(0, 45),
+ MTK_FUNCTION(0, "GPIO64"),
+ MTK_FUNCTION(1, "WB_SDATA")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(65, "WB_SEN"),
+ "AB24", "mt7623",
+ MTK_EINT_FUNCTION(0, 46),
+ MTK_FUNCTION(0, "GPIO65"),
+ MTK_FUNCTION(1, "WB_SEN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(66, "WB_CRTL0"),
+ "AB20", "mt7623",
+ MTK_EINT_FUNCTION(0, 47),
+ MTK_FUNCTION(0, "GPIO66"),
+ MTK_FUNCTION(1, "WB_CRTL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(67, "WB_CRTL1"),
+ "AC20", "mt7623",
+ MTK_EINT_FUNCTION(0, 48),
+ MTK_FUNCTION(0, "GPIO67"),
+ MTK_FUNCTION(1, "WB_CRTL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(68, "WB_CRTL2"),
+ "AB19", "mt7623",
+ MTK_EINT_FUNCTION(0, 49),
+ MTK_FUNCTION(0, "GPIO68"),
+ MTK_FUNCTION(1, "WB_CRTL2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(69, "WB_CRTL3"),
+ "AC19", "mt7623",
+ MTK_EINT_FUNCTION(0, 50),
+ MTK_FUNCTION(0, "GPIO69"),
+ MTK_FUNCTION(1, "WB_CRTL3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(70, "WB_CRTL4"),
+ "AD19", "mt7623",
+ MTK_EINT_FUNCTION(0, 51),
+ MTK_FUNCTION(0, "GPIO70"),
+ MTK_FUNCTION(1, "WB_CRTL4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(71, "WB_CRTL5"),
+ "AE19", "mt7623",
+ MTK_EINT_FUNCTION(0, 52),
+ MTK_FUNCTION(0, "GPIO71"),
+ MTK_FUNCTION(1, "WB_CRTL5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(72, "I2S0_DATA_IN"),
+ "AA20", "mt7623",
+ MTK_EINT_FUNCTION(0, 53),
+ MTK_FUNCTION(0, "GPIO72"),
+ MTK_FUNCTION(1, "I2S0_DATA_IN"),
+ MTK_FUNCTION(3, "PCM_RX"),
+ MTK_FUNCTION(4, "PWM0"),
+ MTK_FUNCTION(5, "DISP_PWM"),
+ MTK_FUNCTION(6, "AP_I2S_DI")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(73, "I2S0_LRCK"),
+ "Y20", "mt7623",
+ MTK_EINT_FUNCTION(0, 54),
+ MTK_FUNCTION(0, "GPIO73"),
+ MTK_FUNCTION(1, "I2S0_LRCK"),
+ MTK_FUNCTION(3, "PCM_SYNC"),
+ MTK_FUNCTION(6, "AP_I2S_LRCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(74, "I2S0_BCK"),
+ "Y19", "mt7623",
+ MTK_EINT_FUNCTION(0, 55),
+ MTK_FUNCTION(0, "GPIO74"),
+ MTK_FUNCTION(1, "I2S0_BCK"),
+ MTK_FUNCTION(3, "PCM_CLK0"),
+ MTK_FUNCTION(6, "AP_I2S_BCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(75, "SDA0"),
+ "K19", "mt7623",
+ MTK_EINT_FUNCTION(0, 56),
+ MTK_FUNCTION(0, "GPIO75"),
+ MTK_FUNCTION(1, "SDA0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(76, "SCL0"),
+ "K20", "mt7623",
+ MTK_EINT_FUNCTION(0, 57),
+ MTK_FUNCTION(0, "GPIO76"),
+ MTK_FUNCTION(1, "SCL0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(77, "GPIO77"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO77")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(78, "GPIO78"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO78")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(79, "GPIO79"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO79")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(80, "GPIO80"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO80")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(81, "GPIO81"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO81")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(82, "GPIO82"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO82")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(83, "LCM_RST"),
+ "V16", "mt7623",
+ MTK_EINT_FUNCTION(0, 64),
+ MTK_FUNCTION(0, "GPIO83"),
+ MTK_FUNCTION(1, "LCM_RST")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(84, "DSI_TE"),
+ "V14", "mt7623",
+ MTK_EINT_FUNCTION(0, 65),
+ MTK_FUNCTION(0, "GPIO84"),
+ MTK_FUNCTION(1, "DSI_TE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(85, "GPIO85"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO85")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(86, "GPIO86"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO86")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(87, "GPIO87"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO87")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(88, "GPIO88"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO88")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(89, "GPIO89"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO89")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(90, "GPIO90"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO90")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(91, "GPIO91"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO91")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(92, "GPIO92"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO92")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(93, "GPIO93"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO93")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(94, "GPIO94"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO94")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(95, "MIPI_TCN"),
+ "AB14", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO95"),
+ MTK_FUNCTION(1, "TCN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(96, "MIPI_TCP"),
+ "AC14", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO96"),
+ MTK_FUNCTION(1, "TCP")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(97, "MIPI_TDN1"),
+ "AE15", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO97"),
+ MTK_FUNCTION(1, "TDN1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(98, "MIPI_TDP1"),
+ "AD15", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO98"),
+ MTK_FUNCTION(1, "TDP1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(99, "MIPI_TDN0"),
+ "AB15", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO99"),
+ MTK_FUNCTION(1, "TDN0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(100, "MIPI_TDP0"),
+ "AC15", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO100"),
+ MTK_FUNCTION(1, "TDP0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(101, "GPIO101"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO101")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(102, "GPIO102"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO102")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(103, "GPIO103"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO103")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(104, "GPIO104"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO104")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(105, "MSDC1_CMD"),
+ "AD2", "mt7623",
+ MTK_EINT_FUNCTION(0, 78),
+ MTK_FUNCTION(0, "GPIO105"),
+ MTK_FUNCTION(1, "MSDC1_CMD"),
+ MTK_FUNCTION(3, "SDA1"),
+ MTK_FUNCTION(6, "I2SOUT_BCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(106, "MSDC1_CLK"),
+ "AD3", "mt7623",
+ MTK_EINT_FUNCTION(0, 79),
+ MTK_FUNCTION(0, "GPIO106"),
+ MTK_FUNCTION(1, "MSDC1_CLK"),
+ MTK_FUNCTION(3, "SCL1"),
+ MTK_FUNCTION(6, "I2SOUT_LRCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(107, "MSDC1_DAT0"),
+ "AE2", "mt7623",
+ MTK_EINT_FUNCTION(0, 80),
+ MTK_FUNCTION(0, "GPIO107"),
+ MTK_FUNCTION(1, "MSDC1_DAT0"),
+ MTK_FUNCTION(5, "UTXD0"),
+ MTK_FUNCTION(6, "I2SOUT_DATA_OUT")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(108, "MSDC1_DAT1"),
+ "AC1", "mt7623",
+ MTK_EINT_FUNCTION(0, 81),
+ MTK_FUNCTION(0, "GPIO108"),
+ MTK_FUNCTION(1, "MSDC1_DAT1"),
+ MTK_FUNCTION(3, "PWM0"),
+ MTK_FUNCTION(5, "URXD0"),
+ MTK_FUNCTION(6, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(109, "MSDC1_DAT2"),
+ "AC3", "mt7623",
+ MTK_EINT_FUNCTION(0, 82),
+ MTK_FUNCTION(0, "GPIO109"),
+ MTK_FUNCTION(1, "MSDC1_DAT2"),
+ MTK_FUNCTION(3, "SDA2"),
+ MTK_FUNCTION(5, "UTXD1"),
+ MTK_FUNCTION(6, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(110, "MSDC1_DAT3"),
+ "AC4", "mt7623",
+ MTK_EINT_FUNCTION(0, 83),
+ MTK_FUNCTION(0, "GPIO110"),
+ MTK_FUNCTION(1, "MSDC1_DAT3"),
+ MTK_FUNCTION(3, "SCL2"),
+ MTK_FUNCTION(5, "URXD1"),
+ MTK_FUNCTION(6, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(111, "MSDC0_DAT7"),
+ "A2", "mt7623",
+ MTK_EINT_FUNCTION(0, 84),
+ MTK_FUNCTION(0, "GPIO111"),
+ MTK_FUNCTION(1, "MSDC0_DAT7"),
+ MTK_FUNCTION(4, "NLD7")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(112, "MSDC0_DAT6"),
+ "B3", "mt7623",
+ MTK_EINT_FUNCTION(0, 85),
+ MTK_FUNCTION(0, "GPIO112"),
+ MTK_FUNCTION(1, "MSDC0_DAT6"),
+ MTK_FUNCTION(4, "NLD6")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(113, "MSDC0_DAT5"),
+ "C4", "mt7623",
+ MTK_EINT_FUNCTION(0, 86),
+ MTK_FUNCTION(0, "GPIO113"),
+ MTK_FUNCTION(1, "MSDC0_DAT5"),
+ MTK_FUNCTION(4, "NLD5")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(114, "MSDC0_DAT4"),
+ "A4", "mt7623",
+ MTK_EINT_FUNCTION(0, 87),
+ MTK_FUNCTION(0, "GPIO114"),
+ MTK_FUNCTION(1, "MSDC0_DAT4"),
+ MTK_FUNCTION(4, "NLD4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(115, "MSDC0_RSTB"),
+ "C5", "mt7623",
+ MTK_EINT_FUNCTION(0, 88),
+ MTK_FUNCTION(0, "GPIO115"),
+ MTK_FUNCTION(1, "MSDC0_RSTB"),
+ MTK_FUNCTION(4, "NLD8")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(116, "MSDC0_CMD"),
+ "D5", "mt7623",
+ MTK_EINT_FUNCTION(0, 89),
+ MTK_FUNCTION(0, "GPIO116"),
+ MTK_FUNCTION(1, "MSDC0_CMD"),
+ MTK_FUNCTION(4, "NALE")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(117, "MSDC0_CLK"),
+ "B1", "mt7623",
+ MTK_EINT_FUNCTION(0, 90),
+ MTK_FUNCTION(0, "GPIO117"),
+ MTK_FUNCTION(1, "MSDC0_CLK"),
+ MTK_FUNCTION(4, "NWEB")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(118, "MSDC0_DAT3"),
+ "D6", "mt7623",
+ MTK_EINT_FUNCTION(0, 91),
+ MTK_FUNCTION(0, "GPIO118"),
+ MTK_FUNCTION(1, "MSDC0_DAT3"),
+ MTK_FUNCTION(4, "NLD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(119, "MSDC0_DAT2"),
+ "B2", "mt7623",
+ MTK_EINT_FUNCTION(0, 92),
+ MTK_FUNCTION(0, "GPIO119"),
+ MTK_FUNCTION(1, "MSDC0_DAT2"),
+ MTK_FUNCTION(4, "NLD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(120, "MSDC0_DAT1"),
+ "A3", "mt7623",
+ MTK_EINT_FUNCTION(0, 93),
+ MTK_FUNCTION(0, "GPIO120"),
+ MTK_FUNCTION(1, "MSDC0_DAT1"),
+ MTK_FUNCTION(4, "NLD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(121, "MSDC0_DAT0"),
+ "B4", "mt7623",
+ MTK_EINT_FUNCTION(0, 94),
+ MTK_FUNCTION(0, "GPIO121"),
+ MTK_FUNCTION(1, "MSDC0_DAT0"),
+ MTK_FUNCTION(4, "NLD0"),
+ MTK_FUNCTION(5, "WATCHDOG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(122, "GPIO122"),
+ "H17", "mt7623",
+ MTK_EINT_FUNCTION(0, 95),
+ MTK_FUNCTION(0, "GPIO122"),
+ MTK_FUNCTION(1, "TEST"),
+ MTK_FUNCTION(4, "SDA2"),
+ MTK_FUNCTION(5, "URXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(123, "GPIO123"),
+ "F17", "mt7623",
+ MTK_EINT_FUNCTION(0, 96),
+ MTK_FUNCTION(0, "GPIO123"),
+ MTK_FUNCTION(1, "TEST"),
+ MTK_FUNCTION(4, "SCL2"),
+ MTK_FUNCTION(5, "UTXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(124, "GPIO124"),
+ "H18", "mt7623",
+ MTK_EINT_FUNCTION(0, 97),
+ MTK_FUNCTION(0, "GPIO124"),
+ MTK_FUNCTION(1, "TEST"),
+ MTK_FUNCTION(4, "SDA1"),
+ MTK_FUNCTION(5, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(125, "GPIO125"),
+ "G17", "mt7623",
+ MTK_EINT_FUNCTION(0, 98),
+ MTK_FUNCTION(0, "GPIO125"),
+ MTK_FUNCTION(1, "TEST"),
+ MTK_FUNCTION(4, "SCL1"),
+ MTK_FUNCTION(5, "PWM4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(126, "I2S0_MCLK"),
+ "AA19", "mt7623",
+ MTK_EINT_FUNCTION(0, 99),
+ MTK_FUNCTION(0, "GPIO126"),
+ MTK_FUNCTION(1, "I2S0_MCLK"),
+ MTK_FUNCTION(6, "AP_I2S_MCLK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(127, "GPIO127"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO127")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(128, "GPIO128"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO128")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(129, "GPIO129"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO129")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(130, "GPIO130"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO130")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(131, "GPIO131"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO131")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(132, "GPIO132"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO132")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(133, "GPIO133"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO133")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(134, "GPIO134"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO134")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(135, "GPIO135"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO135")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(136, "GPIO136"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO136")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(137, "GPIO137"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO137")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(138, "GPIO138"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO138")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(139, "GPIO139"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO139")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(140, "GPIO140"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO140")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(141, "GPIO141"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO141")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(142, "GPIO142"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO142")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(143, "GPIO143"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO143")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(144, "GPIO144"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO144")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(145, "GPIO145"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO145")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(146, "GPIO146"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO146")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(147, "GPIO147"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO147")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(148, "GPIO148"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO148")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(149, "GPIO149"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO149")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(150, "GPIO150"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO150")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(151, "GPIO151"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO151")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(152, "GPIO152"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO152")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(153, "GPIO153"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO153")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(154, "GPIO154"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO154")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(155, "GPIO155"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO155")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(156, "GPIO156"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO156")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(157, "GPIO157"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO157")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(158, "GPIO158"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO158")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(159, "GPIO159"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO159")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(160, "GPIO160"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO160")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(161, "GPIO161"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO161")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(162, "GPIO162"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO162")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(163, "GPIO163"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO163")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(164, "GPIO164"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO164")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(165, "GPIO165"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO165")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(166, "GPIO166"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO166")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(167, "GPIO167"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO167")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(168, "GPIO168"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO168")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(169, "GPIO169"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO169")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(170, "GPIO170"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO170")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(171, "GPIO171"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO171")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(172, "GPIO172"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO172")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(173, "GPIO173"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO173")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(174, "GPIO174"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO174")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(175, "GPIO175"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO175")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(176, "GPIO176"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO176")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(177, "GPIO177"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO177")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(178, "GPIO178"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO178")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(179, "GPIO179"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO179")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(180, "GPIO180"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO180")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(181, "GPIO181"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO181")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(182, "GPIO182"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO182")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(183, "GPIO183"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO183")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(184, "GPIO184"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO184")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(185, "GPIO185"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO185")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(186, "GPIO186"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO186")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(187, "GPIO187"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO187")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(188, "GPIO188"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO188")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(189, "GPIO189"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO189")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(190, "GPIO190"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO190")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(191, "GPIO191"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO191")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(192, "GPIO192"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO192")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(193, "GPIO193"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO193")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(194, "GPIO194"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO194")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(195, "GPIO195"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO195")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(196, "GPIO196"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO196")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(197, "GPIO197"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO197")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(198, "GPIO198"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO198")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(199, "SPI1_CK"),
+ "E19", "mt7623",
+ MTK_EINT_FUNCTION(0, 111),
+ MTK_FUNCTION(0, "GPIO199"),
+ MTK_FUNCTION(1, "SPI1_CK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(200, "URXD2"),
+ "K18", "mt7623",
+ MTK_EINT_FUNCTION(0, 112),
+ MTK_FUNCTION(0, "GPIO200"),
+ MTK_FUNCTION(6, "URXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(201, "UTXD2"),
+ "L18", "mt7623",
+ MTK_EINT_FUNCTION(0, 113),
+ MTK_FUNCTION(0, "GPIO201"),
+ MTK_FUNCTION(6, "UTXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(202, "GPIO202"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO202")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(203, "PWM0"),
+ "AA16", "mt7623",
+ MTK_EINT_FUNCTION(0, 115),
+ MTK_FUNCTION(0, "GPIO203"),
+ MTK_FUNCTION(1, "PWM0"),
+ MTK_FUNCTION(2, "DISP_PWM")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(204, "PWM1"),
+ "Y16", "mt7623",
+ MTK_EINT_FUNCTION(0, 116),
+ MTK_FUNCTION(0, "GPIO204"),
+ MTK_FUNCTION(1, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(205, "PWM2"),
+ "AA15", "mt7623",
+ MTK_EINT_FUNCTION(0, 117),
+ MTK_FUNCTION(0, "GPIO205"),
+ MTK_FUNCTION(1, "PWM2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(206, "PWM3"),
+ "AA17", "mt7623",
+ MTK_EINT_FUNCTION(0, 118),
+ MTK_FUNCTION(0, "GPIO206"),
+ MTK_FUNCTION(1, "PWM3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(207, "PWM4"),
+ "Y15", "mt7623",
+ MTK_EINT_FUNCTION(0, 119),
+ MTK_FUNCTION(0, "GPIO207"),
+ MTK_FUNCTION(1, "PWM4")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(208, "AUD_EXT_CK1"),
+ "W14", "mt7623",
+ MTK_EINT_FUNCTION(0, 120),
+ MTK_FUNCTION(0, "GPIO208"),
+ MTK_FUNCTION(1, "AUD_EXT_CK1"),
+ MTK_FUNCTION(2, "PWM0"),
+ MTK_FUNCTION(3, "PCIE0_PERST_N"),
+ MTK_FUNCTION(5, "DISP_PWM")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(209, "AUD_EXT_CK2"),
+ "V15", "mt7623",
+ MTK_EINT_FUNCTION(0, 121),
+ MTK_FUNCTION(0, "GPIO209"),
+ MTK_FUNCTION(1, "AUD_EXT_CK2"),
+ MTK_FUNCTION(2, "MSDC1_WP"),
+ MTK_FUNCTION(3, "PCIE1_PERST_N"),
+ MTK_FUNCTION(5, "PWM1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(210, "GPIO210"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO210")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(211, "GPIO211"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO211")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(212, "GPIO212"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO212")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(213, "GPIO213"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO213")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(214, "GPIO214"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO214")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(215, "GPIO215"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO215")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(216, "GPIO216"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO216")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(217, "GPIO217"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO217")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(218, "GPIO218"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO218")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(219, "GPIO219"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO219")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(220, "GPIO220"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO220")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(221, "GPIO221"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO221")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(222, "GPIO222"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO222")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(223, "GPIO223"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO223")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(224, "GPIO224"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO224")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(225, "GPIO225"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO225")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(226, "GPIO226"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO226")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(227, "GPIO227"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO227")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(228, "GPIO228"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO228")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(229, "GPIO229"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO229")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(230, "GPIO230"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO230")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(231, "GPIO231"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO231")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(232, "GPIO232"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO232")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(233, "GPIO233"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO233")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(234, "GPIO234"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO234")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(235, "GPIO235"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO235")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(236, "EXT_SDIO3"),
+ "A8", "mt7623",
+ MTK_EINT_FUNCTION(0, 122),
+ MTK_FUNCTION(0, "GPIO236"),
+ MTK_FUNCTION(1, "EXT_SDIO3"),
+ MTK_FUNCTION(2, "IDDIG")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(237, "EXT_SDIO2"),
+ "D8", "mt7623",
+ MTK_EINT_FUNCTION(0, 123),
+ MTK_FUNCTION(0, "GPIO237"),
+ MTK_FUNCTION(1, "EXT_SDIO2"),
+ MTK_FUNCTION(2, "DRV_VBUS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(238, "EXT_SDIO1"),
+ "D9", "mt7623",
+ MTK_EINT_FUNCTION(0, 124),
+ MTK_FUNCTION(0, "GPIO238"),
+ MTK_FUNCTION(1, "EXT_SDIO1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(239, "EXT_SDIO0"),
+ "B8", "mt7623",
+ MTK_EINT_FUNCTION(0, 125),
+ MTK_FUNCTION(0, "GPIO239"),
+ MTK_FUNCTION(1, "EXT_SDIO0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(240, "EXT_XCS"),
+ "C9", "mt7623",
+ MTK_EINT_FUNCTION(0, 126),
+ MTK_FUNCTION(0, "GPIO240"),
+ MTK_FUNCTION(1, "EXT_XCS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(241, "EXT_SCK"),
+ "C8", "mt7623",
+ MTK_EINT_FUNCTION(0, 127),
+ MTK_FUNCTION(0, "GPIO241"),
+ MTK_FUNCTION(1, "EXT_SCK")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(242, "URTS2"),
+ "G18", "mt7623",
+ MTK_EINT_FUNCTION(0, 128),
+ MTK_FUNCTION(0, "GPIO242"),
+ MTK_FUNCTION(1, "URTS2"),
+ MTK_FUNCTION(2, "UTXD3"),
+ MTK_FUNCTION(3, "URXD3"),
+ MTK_FUNCTION(4, "SCL1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(243, "UCTS2"),
+ "H19", "mt7623",
+ MTK_EINT_FUNCTION(0, 129),
+ MTK_FUNCTION(0, "GPIO243"),
+ MTK_FUNCTION(1, "UCTS2"),
+ MTK_FUNCTION(2, "URXD3"),
+ MTK_FUNCTION(3, "UTXD3"),
+ MTK_FUNCTION(4, "SDA1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(244, "GPIO244"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO244")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(245, "GPIO245"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO245")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(246, "GPIO246"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO246")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(247, "GPIO247"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO247")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(248, "GPIO248"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO248")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(249, "GPIO249"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO249")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(250, "GPIO250"),
+ "A15", "mt7623",
+ MTK_EINT_FUNCTION(0, 135),
+ MTK_FUNCTION(0, "GPIO250"),
+ MTK_FUNCTION(1, "TEST_MD7"),
+ MTK_FUNCTION(6, "PCIE0_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(251, "GPIO251"),
+ "B15", "mt7623",
+ MTK_EINT_FUNCTION(0, 136),
+ MTK_FUNCTION(0, "GPIO251"),
+ MTK_FUNCTION(1, "TEST_MD6"),
+ MTK_FUNCTION(6, "PCIE0_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(252, "GPIO252"),
+ "C16", "mt7623",
+ MTK_EINT_FUNCTION(0, 137),
+ MTK_FUNCTION(0, "GPIO252"),
+ MTK_FUNCTION(1, "TEST_MD5"),
+ MTK_FUNCTION(6, "PCIE1_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(253, "GPIO253"),
+ "D17", "mt7623",
+ MTK_EINT_FUNCTION(0, 138),
+ MTK_FUNCTION(0, "GPIO253"),
+ MTK_FUNCTION(1, "TEST_MD4"),
+ MTK_FUNCTION(6, "PCIE1_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(254, "GPIO254"),
+ "D16", "mt7623",
+ MTK_EINT_FUNCTION(0, 139),
+ MTK_FUNCTION(0, "GPIO254"),
+ MTK_FUNCTION(1, "TEST_MD3"),
+ MTK_FUNCTION(6, "PCIE2_CLKREQ_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(255, "GPIO255"),
+ "C17", "mt7623",
+ MTK_EINT_FUNCTION(0, 140),
+ MTK_FUNCTION(0, "GPIO255"),
+ MTK_FUNCTION(1, "TEST_MD2"),
+ MTK_FUNCTION(6, "PCIE2_WAKE_N")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(256, "GPIO256"),
+ "B17", "mt7623",
+ MTK_EINT_FUNCTION(0, 141),
+ MTK_FUNCTION(0, "GPIO256"),
+ MTK_FUNCTION(1, "TEST_MD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(257, "GPIO257"),
+ "C15", "mt7623",
+ MTK_EINT_FUNCTION(0, 142),
+ MTK_FUNCTION(0, "GPIO257"),
+ MTK_FUNCTION(1, "TEST_MD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(258, "GPIO258"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO258")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(259, "GPIO259"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO259")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(260, "GPIO260"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO260")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(261, "MSDC1_INS"),
+ "AD1", "mt7623",
+ MTK_EINT_FUNCTION(0, 146),
+ MTK_FUNCTION(0, "GPIO261"),
+ MTK_FUNCTION(1, "MSDC1_INS")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(262, "G2_TXEN"),
+ "A23", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO262"),
+ MTK_FUNCTION(1, "G2_TXEN")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(263, "G2_TXD3"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO263"),
+ MTK_FUNCTION(1, "G2_TXD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(264, "G2_TXD2"),
+ "C24", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO264"),
+ MTK_FUNCTION(1, "G2_TXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(265, "G2_TXD1"),
+ "B25", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO265"),
+ MTK_FUNCTION(1, "G2_TXD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(266, "G2_TXD0"),
+ "A24", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO266"),
+ MTK_FUNCTION(1, "G2_TXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(267, "G2_TXCLK"),
+ "C23", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO267"),
+ MTK_FUNCTION(1, "G2_TXC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(268, "G2_RXCLK"),
+ "B23", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO268"),
+ MTK_FUNCTION(1, "G2_RXC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(269, "G2_RXD0"),
+ "D21", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO269"),
+ MTK_FUNCTION(1, "G2_RXD0")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(270, "G2_RXD1"),
+ "B22", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO270"),
+ MTK_FUNCTION(1, "G2_RXD1")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(271, "G2_RXD2"),
+ "A22", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO271"),
+ MTK_FUNCTION(1, "G2_RXD2")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(272, "G2_RXD3"),
+ "C22", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO272"),
+ MTK_FUNCTION(1, "G2_RXD3")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(273, "GPIO273"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO273")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(274, "G2_RXDV"),
+ "C21", "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO274"),
+ MTK_FUNCTION(1, "G2_RXDV")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(275, "G2_MDC"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO275"),
+ MTK_FUNCTION(1, "MDC")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(276, "G2_MDIO"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO276"),
+ MTK_FUNCTION(1, "MDIO")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(277, "GPIO277"),
+ NULL, "mt7623",
+ MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
+ MTK_FUNCTION(0, "GPIO277")
+ ),
+ MTK_PIN(
+ PINCTRL_PIN(278, "JTAG_RESET"),
+ "H20", "mt7623",
+ MTK_EINT_FUNCTION(0, 147),
+ MTK_FUNCTION(0, "GPIO278"),
+ MTK_FUNCTION(1, "JTAG_RESET")
+ ),
+};
+
+#endif /* __PINCTRL_MTK_MT7623_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 50cab27c64d4..0bdb8fd3afd1 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -49,7 +49,6 @@
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pinctrl/pinconf-generic.h>
@@ -104,15 +103,13 @@ static int meson_get_domain_and_bank(struct meson_pinctrl *pc, unsigned int pin,
struct meson_bank **bank)
{
struct meson_domain *d;
- int i;
- for (i = 0; i < pc->data->num_domains; i++) {
- d = &pc->domains[i];
- if (pin >= d->data->pin_base &&
- pin < d->data->pin_base + d->data->num_pins) {
- *domain = d;
- return meson_get_bank(d, pin, bank);
- }
+ d = pc->domain;
+
+ if (pin >= d->data->pin_base &&
+ pin < d->data->pin_base + d->data->num_pins) {
+ *domain = d;
+ return meson_get_bank(d, pin, bank);
}
return -EINVAL;
@@ -204,7 +201,7 @@ static void meson_pmx_disable_other_groups(struct meson_pinctrl *pc,
for (j = 0; j < group->num_pins; j++) {
if (group->pins[j] == pin) {
/* We have found a group using the pin */
- domain = &pc->domains[group->domain];
+ domain = pc->domain;
regmap_update_bits(domain->reg_mux,
group->reg * 4,
BIT(group->bit), 0);
@@ -219,7 +216,7 @@ static int meson_pmx_set_mux(struct pinctrl_dev *pcdev, unsigned func_num,
struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
struct meson_pmx_func *func = &pc->data->funcs[func_num];
struct meson_pmx_group *group = &pc->data->groups[group_num];
- struct meson_domain *domain = &pc->domains[group->domain];
+ struct meson_domain *domain = pc->domain;
int i, ret = 0;
dev_dbg(pc->dev, "enable function %s, group %s\n", func->name,
@@ -537,76 +534,67 @@ static int meson_gpio_get(struct gpio_chip *chip, unsigned gpio)
static const struct of_device_id meson_pinctrl_dt_match[] = {
{
- .compatible = "amlogic,meson8-pinctrl",
- .data = &meson8_pinctrl_data,
+ .compatible = "amlogic,meson8-cbus-pinctrl",
+ .data = &meson8_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8b-cbus-pinctrl",
+ .data = &meson8b_cbus_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson8-aobus-pinctrl",
+ .data = &meson8_aobus_pinctrl_data,
},
{
- .compatible = "amlogic,meson8b-pinctrl",
- .data = &meson8b_pinctrl_data,
+ .compatible = "amlogic,meson8b-aobus-pinctrl",
+ .data = &meson8b_aobus_pinctrl_data,
},
{ },
};
-MODULE_DEVICE_TABLE(of, meson_pinctrl_dt_match);
static int meson_gpiolib_register(struct meson_pinctrl *pc)
{
struct meson_domain *domain;
- int i, ret;
+ int ret;
- for (i = 0; i < pc->data->num_domains; i++) {
- domain = &pc->domains[i];
-
- domain->chip.label = domain->data->name;
- domain->chip.parent = pc->dev;
- domain->chip.request = meson_gpio_request;
- domain->chip.free = meson_gpio_free;
- domain->chip.direction_input = meson_gpio_direction_input;
- domain->chip.direction_output = meson_gpio_direction_output;
- domain->chip.get = meson_gpio_get;
- domain->chip.set = meson_gpio_set;
- domain->chip.base = domain->data->pin_base;
- domain->chip.ngpio = domain->data->num_pins;
- domain->chip.can_sleep = false;
- domain->chip.of_node = domain->of_node;
- domain->chip.of_gpio_n_cells = 2;
-
- ret = gpiochip_add_data(&domain->chip, domain);
- if (ret) {
- dev_err(pc->dev, "can't add gpio chip %s\n",
- domain->data->name);
- goto fail;
- }
+ domain = pc->domain;
+
+ domain->chip.label = domain->data->name;
+ domain->chip.parent = pc->dev;
+ domain->chip.request = meson_gpio_request;
+ domain->chip.free = meson_gpio_free;
+ domain->chip.direction_input = meson_gpio_direction_input;
+ domain->chip.direction_output = meson_gpio_direction_output;
+ domain->chip.get = meson_gpio_get;
+ domain->chip.set = meson_gpio_set;
+ domain->chip.base = domain->data->pin_base;
+ domain->chip.ngpio = domain->data->num_pins;
+ domain->chip.can_sleep = false;
+ domain->chip.of_node = domain->of_node;
+ domain->chip.of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(&domain->chip, domain);
+ if (ret) {
+ dev_err(pc->dev, "can't add gpio chip %s\n",
+ domain->data->name);
+ goto fail;
+ }
- ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
- 0, domain->data->pin_base,
- domain->chip.ngpio);
- if (ret) {
- dev_err(pc->dev, "can't add pin range\n");
- goto fail;
- }
+ ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev),
+ 0, domain->data->pin_base,
+ domain->chip.ngpio);
+ if (ret) {
+ dev_err(pc->dev, "can't add pin range\n");
+ goto fail;
}
return 0;
fail:
- for (i--; i >= 0; i--)
- gpiochip_remove(&pc->domains[i].chip);
+ gpiochip_remove(&pc->domain->chip);
return ret;
}
-static struct meson_domain_data *meson_get_domain_data(struct meson_pinctrl *pc,
- struct device_node *np)
-{
- int i;
-
- for (i = 0; i < pc->data->num_domains; i++) {
- if (!strcmp(np->name, pc->data->domain_data[i].name))
- return &pc->data->domain_data[i];
- }
-
- return NULL;
-}
-
static struct regmap_config meson_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -643,7 +631,7 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
{
struct device_node *np;
struct meson_domain *domain;
- int i = 0, num_domains = 0;
+ int num_domains = 0;
for_each_child_of_node(node, np) {
if (!of_find_property(np, "gpio-controller", NULL))
@@ -651,29 +639,22 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
num_domains++;
}
- if (num_domains != pc->data->num_domains) {
+ if (num_domains != 1) {
dev_err(pc->dev, "wrong number of subnodes\n");
return -EINVAL;
}
- pc->domains = devm_kzalloc(pc->dev, num_domains *
- sizeof(struct meson_domain), GFP_KERNEL);
- if (!pc->domains)
+ pc->domain = devm_kzalloc(pc->dev, sizeof(struct meson_domain), GFP_KERNEL);
+ if (!pc->domain)
return -ENOMEM;
+ domain = pc->domain;
+ domain->data = pc->data->domain_data;
+
for_each_child_of_node(node, np) {
if (!of_find_property(np, "gpio-controller", NULL))
continue;
- domain = &pc->domains[i];
-
- domain->data = meson_get_domain_data(pc, np);
- if (!domain->data) {
- dev_err(pc->dev, "domain data not found for node %s\n",
- np->name);
- return -ENODEV;
- }
-
domain->of_node = np;
domain->reg_mux = meson_map_resource(pc, np, "mux");
@@ -699,7 +680,7 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
return PTR_ERR(domain->reg_gpio);
}
- i++;
+ break;
}
return 0;
@@ -718,7 +699,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
pc->dev = dev;
match = of_match_node(meson_pinctrl_dt_match, pdev->dev.of_node);
- pc->data = (struct meson_pinctrl_data *)match->data;
+ pc->data = (struct meson_pinctrl_data *) match->data;
ret = meson_pinctrl_parse_dt(pc, pdev->dev.of_node);
if (ret)
@@ -754,8 +735,4 @@ static struct platform_driver meson_pinctrl_driver = {
.of_match_table = meson_pinctrl_dt_match,
},
};
-module_platform_driver(meson_pinctrl_driver);
-
-MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
-MODULE_DESCRIPTION("Amlogic Meson pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(meson_pinctrl_driver);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 0fe7d53849ce..9c93e0d494a3 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -34,7 +34,6 @@ struct meson_pmx_group {
bool is_gpio;
unsigned int reg;
unsigned int bit;
- unsigned int domain;
};
/**
@@ -144,7 +143,6 @@ struct meson_pinctrl_data {
unsigned int num_pins;
unsigned int num_groups;
unsigned int num_funcs;
- unsigned int num_domains;
};
struct meson_pinctrl {
@@ -152,7 +150,7 @@ struct meson_pinctrl {
struct pinctrl_dev *pcdev;
struct pinctrl_desc desc;
struct meson_pinctrl_data *data;
- struct meson_domain *domains;
+ struct meson_domain *domain;
};
#define PIN(x, b) (b + x)
@@ -164,7 +162,6 @@ struct meson_pinctrl {
.num_pins = ARRAY_SIZE(grp ## _pins), \
.reg = r, \
.bit = b, \
- .domain = 0, \
}
#define GPIO_GROUP(gpio, b) \
@@ -175,16 +172,6 @@ struct meson_pinctrl {
.is_gpio = true, \
}
-#define GROUP_AO(grp, r, b) \
- { \
- .name = #grp, \
- .pins = grp ## _pins, \
- .num_pins = ARRAY_SIZE(grp ## _pins), \
- .reg = r, \
- .bit = b, \
- .domain = 1, \
- }
-
#define FUNCTION(fn) \
{ \
.name = #fn, \
@@ -208,5 +195,7 @@ struct meson_pinctrl {
#define MESON_PIN(x, b) PINCTRL_PIN(PIN(x, b), #x)
-extern struct meson_pinctrl_data meson8_pinctrl_data;
-extern struct meson_pinctrl_data meson8b_pinctrl_data;
+extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
+extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
+extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
+extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 7b1cc91733ef..32de191e0807 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -16,7 +16,7 @@
#define AO_OFF 120
-static const struct pinctrl_pin_desc meson8_pins[] = {
+static const struct pinctrl_pin_desc meson8_cbus_pins[] = {
MESON_PIN(GPIOX_0, 0),
MESON_PIN(GPIOX_1, 0),
MESON_PIN(GPIOX_2, 0),
@@ -137,6 +137,9 @@ static const struct pinctrl_pin_desc meson8_pins[] = {
MESON_PIN(BOOT_16, 0),
MESON_PIN(BOOT_17, 0),
MESON_PIN(BOOT_18, 0),
+};
+
+static const struct pinctrl_pin_desc meson8_aobus_pins[] = {
MESON_PIN(GPIOAO_0, AO_OFF),
MESON_PIN(GPIOAO_1, AO_OFF),
MESON_PIN(GPIOAO_2, AO_OFF),
@@ -379,7 +382,7 @@ static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
-static struct meson_pmx_group meson8_groups[] = {
+static struct meson_pmx_group meson8_cbus_groups[] = {
GPIO_GROUP(GPIOX_0, 0),
GPIO_GROUP(GPIOX_1, 0),
GPIO_GROUP(GPIOX_2, 0),
@@ -474,22 +477,6 @@ static struct meson_pmx_group meson8_groups[] = {
GPIO_GROUP(GPIOZ_12, 0),
GPIO_GROUP(GPIOZ_13, 0),
GPIO_GROUP(GPIOZ_14, 0),
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
/* bank X */
GROUP(sd_d0_a, 8, 5),
@@ -675,26 +662,45 @@ static struct meson_pmx_group meson8_groups[] = {
GROUP(sdxc_d0_b, 2, 7),
GROUP(sdxc_clk_b, 2, 5),
GROUP(sdxc_cmd_b, 2, 4),
+};
+
+static struct meson_pmx_group meson8_aobus_groups[] = {
+ GPIO_GROUP(GPIOAO_0, AO_OFF),
+ GPIO_GROUP(GPIOAO_1, AO_OFF),
+ GPIO_GROUP(GPIOAO_2, AO_OFF),
+ GPIO_GROUP(GPIOAO_3, AO_OFF),
+ GPIO_GROUP(GPIOAO_4, AO_OFF),
+ GPIO_GROUP(GPIOAO_5, AO_OFF),
+ GPIO_GROUP(GPIOAO_6, AO_OFF),
+ GPIO_GROUP(GPIOAO_7, AO_OFF),
+ GPIO_GROUP(GPIOAO_8, AO_OFF),
+ GPIO_GROUP(GPIOAO_9, AO_OFF),
+ GPIO_GROUP(GPIOAO_10, AO_OFF),
+ GPIO_GROUP(GPIOAO_11, AO_OFF),
+ GPIO_GROUP(GPIOAO_12, AO_OFF),
+ GPIO_GROUP(GPIOAO_13, AO_OFF),
+ GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+ GPIO_GROUP(GPIO_TEST_N, AO_OFF),
/* bank AO */
- GROUP_AO(uart_tx_ao_a, 0, 12),
- GROUP_AO(uart_rx_ao_a, 0, 11),
- GROUP_AO(uart_cts_ao_a, 0, 10),
- GROUP_AO(uart_rts_ao_a, 0, 9),
+ GROUP(uart_tx_ao_a, 0, 12),
+ GROUP(uart_rx_ao_a, 0, 11),
+ GROUP(uart_cts_ao_a, 0, 10),
+ GROUP(uart_rts_ao_a, 0, 9),
- GROUP_AO(remote_input, 0, 0),
+ GROUP(remote_input, 0, 0),
- GROUP_AO(i2c_slave_sck_ao, 0, 2),
- GROUP_AO(i2c_slave_sda_ao, 0, 1),
+ GROUP(i2c_slave_sck_ao, 0, 2),
+ GROUP(i2c_slave_sda_ao, 0, 1),
- GROUP_AO(uart_tx_ao_b0, 0, 26),
- GROUP_AO(uart_rx_ao_b0, 0, 25),
+ GROUP(uart_tx_ao_b0, 0, 26),
+ GROUP(uart_rx_ao_b0, 0, 25),
- GROUP_AO(uart_tx_ao_b1, 0, 24),
- GROUP_AO(uart_rx_ao_b1, 0, 23),
+ GROUP(uart_tx_ao_b1, 0, 24),
+ GROUP(uart_rx_ao_b1, 0, 23),
- GROUP_AO(i2c_mst_sck_ao, 0, 6),
- GROUP_AO(i2c_mst_sda_ao, 0, 5),
+ GROUP(i2c_mst_sck_ao, 0, 6),
+ GROUP(i2c_mst_sda_ao, 0, 5),
};
static const char * const gpio_groups[] = {
@@ -872,7 +878,7 @@ static const char * const i2c_mst_ao_groups[] = {
"i2c_mst_sck_ao", "i2c_mst_sda_ao"
};
-static struct meson_pmx_func meson8_functions[] = {
+static struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(gpio),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -899,6 +905,9 @@ static struct meson_pmx_func meson8_functions[] = {
FUNCTION(nor),
FUNCTION(sd_b),
FUNCTION(sdxc_b),
+};
+
+static struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(uart_ao),
FUNCTION(remote),
FUNCTION(i2c_slave_ao),
@@ -906,7 +915,7 @@ static struct meson_pmx_func meson8_functions[] = {
FUNCTION(i2c_mst_ao),
};
-static struct meson_bank meson8_banks[] = {
+static struct meson_bank meson8_cbus_banks[] = {
/* name first last pullen pull dir out in */
BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
@@ -917,35 +926,43 @@ static struct meson_bank meson8_banks[] = {
BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
-static struct meson_bank meson8_ao_banks[] = {
+static struct meson_bank meson8_aobus_banks[] = {
/* name first last pullen pull dir out in */
BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-static struct meson_domain_data meson8_domain_data[] = {
- {
- .name = "banks",
- .banks = meson8_banks,
- .num_banks = ARRAY_SIZE(meson8_banks),
- .pin_base = 0,
- .num_pins = 120,
- },
- {
- .name = "ao-bank",
- .banks = meson8_ao_banks,
- .num_banks = ARRAY_SIZE(meson8_ao_banks),
- .pin_base = 120,
- .num_pins = 16,
- },
-};
-
-struct meson_pinctrl_data meson8_pinctrl_data = {
- .pins = meson8_pins,
- .groups = meson8_groups,
- .funcs = meson8_functions,
- .domain_data = meson8_domain_data,
- .num_pins = ARRAY_SIZE(meson8_pins),
- .num_groups = ARRAY_SIZE(meson8_groups),
- .num_funcs = ARRAY_SIZE(meson8_functions),
- .num_domains = ARRAY_SIZE(meson8_domain_data),
+static struct meson_domain_data meson8_cbus_domain_data = {
+ .name = "cbus-banks",
+ .banks = meson8_cbus_banks,
+ .num_banks = ARRAY_SIZE(meson8_cbus_banks),
+ .pin_base = 0,
+ .num_pins = 120,
+};
+
+static struct meson_domain_data meson8_aobus_domain_data = {
+ .name = "ao-bank",
+ .banks = meson8_aobus_banks,
+ .num_banks = ARRAY_SIZE(meson8_aobus_banks),
+ .pin_base = 120,
+ .num_pins = 16,
+};
+
+struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
+ .pins = meson8_cbus_pins,
+ .groups = meson8_cbus_groups,
+ .funcs = meson8_cbus_functions,
+ .domain_data = &meson8_cbus_domain_data,
+ .num_pins = ARRAY_SIZE(meson8_cbus_pins),
+ .num_groups = ARRAY_SIZE(meson8_cbus_groups),
+ .num_funcs = ARRAY_SIZE(meson8_cbus_functions),
+};
+
+struct meson_pinctrl_data meson8_aobus_pinctrl_data = {
+ .pins = meson8_aobus_pins,
+ .groups = meson8_aobus_groups,
+ .funcs = meson8_aobus_functions,
+ .domain_data = &meson8_aobus_domain_data,
+ .num_pins = ARRAY_SIZE(meson8_aobus_pins),
+ .num_groups = ARRAY_SIZE(meson8_aobus_groups),
+ .num_funcs = ARRAY_SIZE(meson8_aobus_functions),
};
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 9677807db364..a100bcf4b17f 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -17,7 +17,7 @@
#define AO_OFF 130
-static const struct pinctrl_pin_desc meson8b_pins[] = {
+static const struct pinctrl_pin_desc meson8b_cbus_pins[] = {
MESON_PIN(GPIOX_0, 0),
MESON_PIN(GPIOX_1, 0),
MESON_PIN(GPIOX_2, 0),
@@ -107,7 +107,9 @@ static const struct pinctrl_pin_desc meson8b_pins[] = {
MESON_PIN(DIF_3_N, 0),
MESON_PIN(DIF_4_P, 0),
MESON_PIN(DIF_4_N, 0),
+};
+static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
MESON_PIN(GPIOAO_0, AO_OFF),
MESON_PIN(GPIOAO_1, AO_OFF),
MESON_PIN(GPIOAO_2, AO_OFF),
@@ -346,7 +348,7 @@ static const unsigned int eth_ref_clk_pins[] = { PIN(DIF_3_N, 0) };
static const unsigned int eth_mdc_pins[] = { PIN(DIF_4_P, 0) };
static const unsigned int eth_mdio_en_pins[] = { PIN(DIF_4_N, 0) };
-static struct meson_pmx_group meson8b_groups[] = {
+static struct meson_pmx_group meson8b_cbus_groups[] = {
GPIO_GROUP(GPIOX_0, 0),
GPIO_GROUP(GPIOX_1, 0),
GPIO_GROUP(GPIOX_2, 0),
@@ -409,23 +411,6 @@ static struct meson_pmx_group meson8b_groups[] = {
GPIO_GROUP(DIF_4_P, 0),
GPIO_GROUP(DIF_4_N, 0),
- GPIO_GROUP(GPIOAO_0, AO_OFF),
- GPIO_GROUP(GPIOAO_1, AO_OFF),
- GPIO_GROUP(GPIOAO_2, AO_OFF),
- GPIO_GROUP(GPIOAO_3, AO_OFF),
- GPIO_GROUP(GPIOAO_4, AO_OFF),
- GPIO_GROUP(GPIOAO_5, AO_OFF),
- GPIO_GROUP(GPIOAO_6, AO_OFF),
- GPIO_GROUP(GPIOAO_7, AO_OFF),
- GPIO_GROUP(GPIOAO_8, AO_OFF),
- GPIO_GROUP(GPIOAO_9, AO_OFF),
- GPIO_GROUP(GPIOAO_10, AO_OFF),
- GPIO_GROUP(GPIOAO_11, AO_OFF),
- GPIO_GROUP(GPIOAO_12, AO_OFF),
- GPIO_GROUP(GPIOAO_13, AO_OFF),
- GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
- GPIO_GROUP(GPIO_TEST_N, AO_OFF),
-
/* bank X */
GROUP(sd_d0_a, 8, 5),
GROUP(sd_d1_a, 8, 4),
@@ -572,6 +557,37 @@ static struct meson_pmx_group meson8b_groups[] = {
GROUP(sdxc_clk_b, 2, 5),
GROUP(sdxc_cmd_b, 2, 4),
+ /* bank DIF */
+ GROUP(eth_rxd1, 6, 0),
+ GROUP(eth_rxd0, 6, 1),
+ GROUP(eth_rx_dv, 6, 2),
+ GROUP(eth_rx_clk, 6, 3),
+ GROUP(eth_txd0_1, 6, 4),
+ GROUP(eth_txd1_1, 6, 5),
+ GROUP(eth_tx_en, 6, 0),
+ GROUP(eth_ref_clk, 6, 8),
+ GROUP(eth_mdc, 6, 9),
+ GROUP(eth_mdio_en, 6, 10),
+};
+
+static struct meson_pmx_group meson8b_aobus_groups[] = {
+ GPIO_GROUP(GPIOAO_0, AO_OFF),
+ GPIO_GROUP(GPIOAO_1, AO_OFF),
+ GPIO_GROUP(GPIOAO_2, AO_OFF),
+ GPIO_GROUP(GPIOAO_3, AO_OFF),
+ GPIO_GROUP(GPIOAO_4, AO_OFF),
+ GPIO_GROUP(GPIOAO_5, AO_OFF),
+ GPIO_GROUP(GPIOAO_6, AO_OFF),
+ GPIO_GROUP(GPIOAO_7, AO_OFF),
+ GPIO_GROUP(GPIOAO_8, AO_OFF),
+ GPIO_GROUP(GPIOAO_9, AO_OFF),
+ GPIO_GROUP(GPIOAO_10, AO_OFF),
+ GPIO_GROUP(GPIOAO_11, AO_OFF),
+ GPIO_GROUP(GPIOAO_12, AO_OFF),
+ GPIO_GROUP(GPIOAO_13, AO_OFF),
+ GPIO_GROUP(GPIO_BSD_EN, AO_OFF),
+ GPIO_GROUP(GPIO_TEST_N, AO_OFF),
+
/* bank AO */
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
@@ -601,18 +617,6 @@ static struct meson_pmx_group meson8b_groups[] = {
GROUP(i2s_in_ch01, 0, 13),
GROUP(i2s_ao_clk_in, 0, 15),
GROUP(i2s_lr_clk_in, 0, 14),
-
- /* bank DIF */
- GROUP(eth_rxd1, 6, 0),
- GROUP(eth_rxd0, 6, 1),
- GROUP(eth_rx_dv, 6, 2),
- GROUP(eth_rx_clk, 6, 3),
- GROUP(eth_txd0_1, 6, 4),
- GROUP(eth_txd1_1, 6, 5),
- GROUP(eth_tx_en, 6, 0),
- GROUP(eth_ref_clk, 6, 8),
- GROUP(eth_mdc, 6, 9),
- GROUP(eth_mdio_en, 6, 10),
};
static const char * const gpio_groups[] = {
@@ -694,7 +698,10 @@ static const char * const i2c_c_groups[] = {
};
static const char * const hdmi_groups[] = {
- "hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0",
+ "hdmi_hpd", "hdmi_sda", "hdmi_scl", "hdmi_cec_0"
+};
+
+static const char * const hdmi_cec_groups[] = {
"hdmi_cec_1"
};
@@ -770,12 +777,20 @@ static const char * const i2c_mst_ao_groups[] = {
"i2c_mst_sck_ao", "i2c_mst_sda_ao"
};
-static const char * const clk_groups[] = {
- "clk_24m_out", "clk_32k_in_out"
+static const char * const clk_24m_groups[] = {
+ "clk_24m_out"
};
-static const char * const spdif_groups[] = {
- "spdif_out_1", "spdif_out_0"
+static const char * const clk_32k_groups[] = {
+ "clk_32k_in_out"
+};
+
+static const char * const spdif_0_groups[] = {
+ "spdif_out_0"
+};
+
+static const char * const spdif_1_groups[] = {
+ "spdif_out_1"
};
static const char * const i2s_groups[] = {
@@ -789,7 +804,11 @@ static const char * const pwm_b_groups[] = {
};
static const char * const pwm_c_groups[] = {
- "pwm_c0", "pwm_c1", "pwm_c2"
+ "pwm_c0", "pwm_c1"
+};
+
+static const char * const pwm_c_ao_groups[] = {
+ "pwm_c2"
};
static const char * const pwm_d_groups[] = {
@@ -814,7 +833,7 @@ static const char * const tsin_b_groups[] = {
"tsin_d0_b", "tsin_clk_b", "tsin_sop_b", "tsin_d_valid_b"
};
-static struct meson_pmx_func meson8b_functions[] = {
+static struct meson_pmx_func meson8b_cbus_functions[] = {
FUNCTION(gpio),
FUNCTION(sd_a),
FUNCTION(sdxc_a),
@@ -837,14 +856,7 @@ static struct meson_pmx_func meson8b_functions[] = {
FUNCTION(nor),
FUNCTION(sd_b),
FUNCTION(sdxc_b),
- FUNCTION(uart_ao),
- FUNCTION(remote),
- FUNCTION(i2c_slave_ao),
- FUNCTION(uart_ao_b),
- FUNCTION(i2c_mst_ao),
- FUNCTION(clk),
- FUNCTION(spdif),
- FUNCTION(i2s),
+ FUNCTION(spdif_0),
FUNCTION(pwm_b),
FUNCTION(pwm_c),
FUNCTION(pwm_d),
@@ -852,9 +864,23 @@ static struct meson_pmx_func meson8b_functions[] = {
FUNCTION(pwm_vs),
FUNCTION(tsin_a),
FUNCTION(tsin_b),
+ FUNCTION(clk_24m),
+};
+
+static struct meson_pmx_func meson8b_aobus_functions[] = {
+ FUNCTION(uart_ao),
+ FUNCTION(uart_ao_b),
+ FUNCTION(i2c_slave_ao),
+ FUNCTION(i2c_mst_ao),
+ FUNCTION(i2s),
+ FUNCTION(remote),
+ FUNCTION(clk_32k),
+ FUNCTION(pwm_c_ao),
+ FUNCTION(spdif_1),
+ FUNCTION(hdmi_cec),
};
-static struct meson_bank meson8b_banks[] = {
+static struct meson_bank meson8b_cbus_banks[] = {
/* name first last pullen pull dir out in */
BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
@@ -865,35 +891,43 @@ static struct meson_bank meson8b_banks[] = {
BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
-static struct meson_bank meson8b_ao_banks[] = {
+static struct meson_bank meson8b_aobus_banks[] = {
/* name first last pullen pull dir out in */
BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
-static struct meson_domain_data meson8b_domain_data[] = {
- {
- .name = "banks",
- .banks = meson8b_banks,
- .num_banks = ARRAY_SIZE(meson8b_banks),
- .pin_base = 0,
- .num_pins = 130,
- },
- {
- .name = "ao-bank",
- .banks = meson8b_ao_banks,
- .num_banks = ARRAY_SIZE(meson8b_ao_banks),
- .pin_base = 130,
- .num_pins = 16,
- },
-};
-
-struct meson_pinctrl_data meson8b_pinctrl_data = {
- .pins = meson8b_pins,
- .groups = meson8b_groups,
- .funcs = meson8b_functions,
- .domain_data = meson8b_domain_data,
- .num_pins = ARRAY_SIZE(meson8b_pins),
- .num_groups = ARRAY_SIZE(meson8b_groups),
- .num_funcs = ARRAY_SIZE(meson8b_functions),
- .num_domains = ARRAY_SIZE(meson8b_domain_data),
+static struct meson_domain_data meson8b_cbus_domain_data = {
+ .name = "cbus-banks",
+ .banks = meson8b_cbus_banks,
+ .num_banks = ARRAY_SIZE(meson8b_cbus_banks),
+ .pin_base = 0,
+ .num_pins = 130,
+};
+
+static struct meson_domain_data meson8b_aobus_domain_data = {
+ .name = "aobus-banks",
+ .banks = meson8b_aobus_banks,
+ .num_banks = ARRAY_SIZE(meson8b_aobus_banks),
+ .pin_base = 130,
+ .num_pins = 16,
+};
+
+struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
+ .pins = meson8b_cbus_pins,
+ .groups = meson8b_cbus_groups,
+ .funcs = meson8b_cbus_functions,
+ .domain_data = &meson8b_cbus_domain_data,
+ .num_pins = ARRAY_SIZE(meson8b_cbus_pins),
+ .num_groups = ARRAY_SIZE(meson8b_cbus_groups),
+ .num_funcs = ARRAY_SIZE(meson8b_cbus_functions),
+};
+
+struct meson_pinctrl_data meson8b_aobus_pinctrl_data = {
+ .pins = meson8b_aobus_pins,
+ .groups = meson8b_aobus_groups,
+ .funcs = meson8b_aobus_functions,
+ .domain_data = &meson8b_aobus_domain_data,
+ .num_pins = ARRAY_SIZE(meson8b_aobus_pins),
+ .num_groups = ARRAY_SIZE(meson8b_aobus_groups),
+ .num_funcs = ARRAY_SIZE(meson8b_aobus_functions),
};
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 587b222f12f3..e852048c4c04 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -287,6 +287,10 @@ static const unsigned i2c0_a_1_pins[] = { STN8815_PIN_D3, STN8815_PIN_D2 };
/* Altfunction B */
static const unsigned u1_b_1_pins[] = { STN8815_PIN_B16, STN8815_PIN_A16 };
static const unsigned i2cusb_b_1_pins[] = { STN8815_PIN_C21, STN8815_PIN_C20 };
+static const unsigned clcd_16_23_b_1_pins[] = { STN8815_PIN_AB6,
+ STN8815_PIN_AA6, STN8815_PIN_Y6, STN8815_PIN_Y5, STN8815_PIN_AA5,
+ STN8815_PIN_AB5, STN8815_PIN_AB4, STN8815_PIN_Y4 };
+
#define STN8815_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins, \
.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
@@ -302,6 +306,7 @@ static const struct nmk_pingroup nmk_stn8815_groups[] = {
STN8815_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A),
STN8815_PIN_GROUP(u1_b_1, NMK_GPIO_ALT_B),
STN8815_PIN_GROUP(i2cusb_b_1, NMK_GPIO_ALT_B),
+ STN8815_PIN_GROUP(clcd_16_23_b_1, NMK_GPIO_ALT_B),
};
/* We use this macro to define the groups applicable to a function */
@@ -314,6 +319,7 @@ STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
STN8815_FUNC_GROUPS(i2c0, "i2c0_a_1");
STN8815_FUNC_GROUPS(i2cusb, "i2cusb_b_1");
+STN8815_FUNC_GROUPS(clcd, "clcd_16_23_b_1");
#define FUNCTION(fname) \
{ \
@@ -329,6 +335,7 @@ static const struct nmk_function nmk_stn8815_functions[] = {
FUNCTION(i2c1),
FUNCTION(i2c0),
FUNCTION(i2cusb),
+ FUNCTION(clcd),
};
static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352406108fa0..c8969dd49449 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
int val;
if (pull)
- pullidx = data_out ? 1 : 2;
+ pullidx = data_out ? 2 : 1;
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
gpio,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 657449431301..5c025f5b5048 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -753,8 +753,8 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
- if (IS_ERR(gpio_dev->base))
- return PTR_ERR(gpio_dev->base);
+ if (!gpio_dev->base)
+ return -ENOMEM;
irq_base = platform_get_irq(pdev, 0);
if (irq_base < 0) {
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index ee69db6ae1c7..4429312e848d 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -824,7 +824,7 @@ static struct pinctrl_desc atmel_pinctrl_desc = {
.pmxops = &atmel_pmxops,
};
-static int atmel_pctrl_suspend(struct device *dev)
+static int __maybe_unused atmel_pctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
@@ -844,7 +844,7 @@ static int atmel_pctrl_suspend(struct device *dev)
return 0;
}
-static int atmel_pctrl_resume(struct device *dev)
+static int __maybe_unused atmel_pctrl_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct atmel_pioctrl *atmel_pioctrl = platform_get_drvdata(pdev);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index cf7788df0f95..741b39eaeb8b 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -127,7 +127,7 @@ struct u300_gpio_confdata {
}
/* Initial configuration */
-static const struct __initconst u300_gpio_confdata
+static const struct u300_gpio_confdata __initconst
bs335_gpio_config[U300_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
/* Port 0, pins 0-7 */
{
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index f0bebbe0682b..b1767f7e45d1 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -49,6 +49,18 @@
#define LPC18XX_SCU_FUNC_PER_PIN 8
+/* LPC18XX SCU pin interrupt select registers */
+#define LPC18XX_SCU_PINTSEL0 0xe00
+#define LPC18XX_SCU_PINTSEL1 0xe04
+#define LPC18XX_SCU_PINTSEL_VAL_MASK 0xff
+#define LPC18XX_SCU_PINTSEL_PORT_SHIFT 5
+#define LPC18XX_SCU_IRQ_PER_PINTSEL 4
+#define LPC18XX_GPIO_PINS_PER_PORT 32
+#define LPC18XX_GPIO_PIN_INT_MAX 8
+
+#define LPC18XX_SCU_PINTSEL_VAL(val, n) \
+ ((val) << (((n) % LPC18XX_SCU_IRQ_PER_PINTSEL) * 8))
+
/* LPC18xx pin types */
enum {
TYPE_ND, /* Normal-drive */
@@ -618,6 +630,25 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = {
LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA),
};
+/**
+ * enum lpc18xx_pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt
+ * controller.
+ */
+enum lpc18xx_pin_config_param {
+ PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params lpc18xx_params[] = {
+ {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item lpc18xx_conf_items[ARRAY_SIZE(lpc18xx_params)] = {
+ PCONFDUMP(PIN_CONFIG_GPIO_PIN_INT, "gpio pin int", NULL, true),
+};
+#endif
+
static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
{
switch (param) {
@@ -693,7 +724,71 @@ static int lpc18xx_pconf_get_i2c0(enum pin_config_param param, int *arg, u32 reg
return 0;
}
-static int lpc18xx_pconf_get_pin(enum pin_config_param param, int *arg, u32 reg,
+static int lpc18xx_pin_to_gpio(struct pinctrl_dev *pctldev, unsigned pin)
+{
+ struct pinctrl_gpio_range *range;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ if (!range)
+ return -EINVAL;
+
+ return pin - range->pin_base + range->base;
+}
+
+static int lpc18xx_get_pintsel(void __iomem *addr, u32 val, int *arg)
+{
+ u32 reg_val;
+ int i;
+
+ reg_val = readl(addr);
+ for (i = 0; i < LPC18XX_SCU_IRQ_PER_PINTSEL; i++) {
+ if ((reg_val & LPC18XX_SCU_PINTSEL_VAL_MASK) == val)
+ return 0;
+
+ reg_val >>= BITS_PER_BYTE;
+ *arg += 1;
+ }
+
+ return -EINVAL;
+}
+
+static u32 lpc18xx_gpio_to_pintsel_val(int gpio)
+{
+ unsigned int gpio_port, gpio_pin;
+
+ gpio_port = gpio / LPC18XX_GPIO_PINS_PER_PORT;
+ gpio_pin = gpio % LPC18XX_GPIO_PINS_PER_PORT;
+
+ return gpio_pin | (gpio_port << LPC18XX_SCU_PINTSEL_PORT_SHIFT);
+}
+
+static int lpc18xx_pconf_get_gpio_pin_int(struct pinctrl_dev *pctldev,
+ int *arg, unsigned pin)
+{
+ struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
+ int gpio, ret;
+ u32 val;
+
+ gpio = lpc18xx_pin_to_gpio(pctldev, pin);
+ if (gpio < 0)
+ return -ENOTSUPP;
+
+ val = lpc18xx_gpio_to_pintsel_val(gpio);
+
+ /*
+ * Check if this pin has been enabled as a interrupt in any of the two
+ * PINTSEL registers. *arg indicates which interrupt number (0-7).
+ */
+ *arg = 0;
+ ret = lpc18xx_get_pintsel(scu->base + LPC18XX_SCU_PINTSEL0, val, arg);
+ if (ret == 0)
+ return ret;
+
+ return lpc18xx_get_pintsel(scu->base + LPC18XX_SCU_PINTSEL1, val, arg);
+}
+
+static int lpc18xx_pconf_get_pin(struct pinctrl_dev *pctldev, unsigned param,
+ int *arg, u32 reg, unsigned pin,
struct lpc18xx_pin_caps *pin_cap)
{
switch (param) {
@@ -755,6 +850,9 @@ static int lpc18xx_pconf_get_pin(enum pin_config_param param, int *arg, u32 reg,
}
break;
+ case PIN_CONFIG_GPIO_PIN_INT:
+ return lpc18xx_pconf_get_gpio_pin_int(pctldev, arg, pin);
+
default:
return -ENOTSUPP;
}
@@ -794,7 +892,7 @@ static int lpc18xx_pconf_get(struct pinctrl_dev *pctldev, unsigned pin,
else if (pin_cap->type == TYPE_USB1)
ret = lpc18xx_pconf_get_usb1(param, &arg, reg);
else
- ret = lpc18xx_pconf_get_pin(param, &arg, reg, pin_cap);
+ ret = lpc18xx_pconf_get_pin(pctldev, param, &arg, reg, pin, pin_cap);
if (ret < 0)
return ret;
@@ -883,9 +981,34 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
return 0;
}
-static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
- enum pin_config_param param,
- u16 param_val, u32 *reg,
+static int lpc18xx_pconf_set_gpio_pin_int(struct pinctrl_dev *pctldev,
+ u16 param_val, unsigned pin)
+{
+ struct lpc18xx_scu_data *scu = pinctrl_dev_get_drvdata(pctldev);
+ u32 val, reg_val, reg_offset = LPC18XX_SCU_PINTSEL0;
+ int gpio;
+
+ if (param_val >= LPC18XX_GPIO_PIN_INT_MAX)
+ return -EINVAL;
+
+ gpio = lpc18xx_pin_to_gpio(pctldev, pin);
+ if (gpio < 0)
+ return -ENOTSUPP;
+
+ val = lpc18xx_gpio_to_pintsel_val(gpio);
+
+ reg_offset += (param_val / LPC18XX_SCU_IRQ_PER_PINTSEL) * sizeof(u32);
+
+ reg_val = readl(scu->base + reg_offset);
+ reg_val &= ~LPC18XX_SCU_PINTSEL_VAL(LPC18XX_SCU_PINTSEL_VAL_MASK, param_val);
+ reg_val |= LPC18XX_SCU_PINTSEL_VAL(val, param_val);
+ writel(reg_val, scu->base + reg_offset);
+
+ return 0;
+}
+
+static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param,
+ u16 param_val, u32 *reg, unsigned pin,
struct lpc18xx_pin_caps *pin_cap)
{
switch (param) {
@@ -948,6 +1071,9 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
*reg |= param_val << LPC18XX_SCU_PIN_EHD_POS;
break;
+ case PIN_CONFIG_GPIO_PIN_INT:
+ return lpc18xx_pconf_set_gpio_pin_int(pctldev, param_val, pin);
+
default:
dev_err(pctldev->dev, "Property not supported\n");
return -ENOTSUPP;
@@ -982,7 +1108,7 @@ static int lpc18xx_pconf_set(struct pinctrl_dev *pctldev, unsigned pin,
else if (pin_cap->type == TYPE_USB1)
ret = lpc18xx_pconf_set_usb1(pctldev, param, param_val, &reg);
else
- ret = lpc18xx_pconf_set_pin(pctldev, param, param_val, &reg, pin_cap);
+ ret = lpc18xx_pconf_set_pin(pctldev, param, param_val, &reg, pin, pin_cap);
if (ret)
return ret;
@@ -1136,6 +1262,11 @@ static struct pinctrl_desc lpc18xx_scu_desc = {
.pctlops = &lpc18xx_pctl_ops,
.pmxops = &lpc18xx_pmx_ops,
.confops = &lpc18xx_pconf_ops,
+ .num_custom_params = ARRAY_SIZE(lpc18xx_params),
+ .custom_params = lpc18xx_params,
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = lpc18xx_conf_items,
+#endif
.owner = THIS_MODULE,
};
@@ -1170,9 +1301,8 @@ static int lpc18xx_create_group_func_map(struct device *dev,
u16 pins[ARRAY_SIZE(lpc18xx_pins)];
int func, ngroups, i;
- for (func = 0; func < FUNC_MAX; ngroups = 0, func++) {
-
- for (i = 0; i < ARRAY_SIZE(lpc18xx_pins); i++) {
+ for (func = 0; func < FUNC_MAX; func++) {
+ for (ngroups = 0, i = 0; i < ARRAY_SIZE(lpc18xx_pins); i++) {
if (lpc18xx_valid_pin_function(i, func))
pins[ngroups++] = i;
}
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
new file mode 100644
index 000000000000..0b07d4bdab95
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -0,0 +1,2312 @@
+/*
+ * PIC32 pinctrl driver
+ *
+ * Joshua Henderson, <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#include "pinctrl-utils.h"
+#include "pinctrl-pic32.h"
+
+#define PINS_PER_BANK 16
+
+#define PIC32_CNCON_EDGE 11
+#define PIC32_CNCON_ON 15
+
+#define PIN_CONFIG_MICROCHIP_DIGITAL (PIN_CONFIG_END + 1)
+#define PIN_CONFIG_MICROCHIP_ANALOG (PIN_CONFIG_END + 2)
+
+static const struct pinconf_generic_params pic32_mpp_bindings[] = {
+ {"microchip,digital", PIN_CONFIG_MICROCHIP_DIGITAL, 0},
+ {"microchip,analog", PIN_CONFIG_MICROCHIP_ANALOG, 0},
+};
+
+#define GPIO_BANK_START(bank) ((bank) * PINS_PER_BANK)
+
+struct pic32_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+struct pic32_pin_group {
+ const char *name;
+ unsigned int pin;
+ struct pic32_desc_function *functions;
+};
+
+struct pic32_desc_function {
+ const char *name;
+ u32 muxreg;
+ u32 muxval;
+};
+
+struct pic32_gpio_bank {
+ void __iomem *reg_base;
+ struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ struct clk *clk;
+};
+
+struct pic32_pinctrl {
+ void __iomem *reg_base;
+ struct device *dev;
+ struct pinctrl_dev *pctldev;
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct pic32_function *functions;
+ unsigned int nfunctions;
+ const struct pic32_pin_group *groups;
+ unsigned int ngroups;
+ struct pic32_gpio_bank *gpio_banks;
+ unsigned int nbanks;
+ struct clk *clk;
+};
+
+static const struct pinctrl_pin_desc pic32_pins[] = {
+ PINCTRL_PIN(0, "A0"),
+ PINCTRL_PIN(1, "A1"),
+ PINCTRL_PIN(2, "A2"),
+ PINCTRL_PIN(3, "A3"),
+ PINCTRL_PIN(4, "A4"),
+ PINCTRL_PIN(5, "A5"),
+ PINCTRL_PIN(6, "A6"),
+ PINCTRL_PIN(7, "A7"),
+ PINCTRL_PIN(8, "A8"),
+ PINCTRL_PIN(9, "A9"),
+ PINCTRL_PIN(10, "A10"),
+ PINCTRL_PIN(11, "A11"),
+ PINCTRL_PIN(12, "A12"),
+ PINCTRL_PIN(13, "A13"),
+ PINCTRL_PIN(14, "A14"),
+ PINCTRL_PIN(15, "A15"),
+ PINCTRL_PIN(16, "B0"),
+ PINCTRL_PIN(17, "B1"),
+ PINCTRL_PIN(18, "B2"),
+ PINCTRL_PIN(19, "B3"),
+ PINCTRL_PIN(20, "B4"),
+ PINCTRL_PIN(21, "B5"),
+ PINCTRL_PIN(22, "B6"),
+ PINCTRL_PIN(23, "B7"),
+ PINCTRL_PIN(24, "B8"),
+ PINCTRL_PIN(25, "B9"),
+ PINCTRL_PIN(26, "B10"),
+ PINCTRL_PIN(27, "B11"),
+ PINCTRL_PIN(28, "B12"),
+ PINCTRL_PIN(29, "B13"),
+ PINCTRL_PIN(30, "B14"),
+ PINCTRL_PIN(31, "B15"),
+ PINCTRL_PIN(33, "C1"),
+ PINCTRL_PIN(34, "C2"),
+ PINCTRL_PIN(35, "C3"),
+ PINCTRL_PIN(36, "C4"),
+ PINCTRL_PIN(44, "C12"),
+ PINCTRL_PIN(45, "C13"),
+ PINCTRL_PIN(46, "C14"),
+ PINCTRL_PIN(47, "C15"),
+ PINCTRL_PIN(48, "D0"),
+ PINCTRL_PIN(49, "D1"),
+ PINCTRL_PIN(50, "D2"),
+ PINCTRL_PIN(51, "D3"),
+ PINCTRL_PIN(52, "D4"),
+ PINCTRL_PIN(53, "D5"),
+ PINCTRL_PIN(54, "D6"),
+ PINCTRL_PIN(55, "D7"),
+ PINCTRL_PIN(57, "D9"),
+ PINCTRL_PIN(58, "D10"),
+ PINCTRL_PIN(59, "D11"),
+ PINCTRL_PIN(60, "D12"),
+ PINCTRL_PIN(61, "D13"),
+ PINCTRL_PIN(62, "D14"),
+ PINCTRL_PIN(63, "D15"),
+ PINCTRL_PIN(64, "E0"),
+ PINCTRL_PIN(65, "E1"),
+ PINCTRL_PIN(66, "E2"),
+ PINCTRL_PIN(67, "E3"),
+ PINCTRL_PIN(68, "E4"),
+ PINCTRL_PIN(69, "E5"),
+ PINCTRL_PIN(70, "E6"),
+ PINCTRL_PIN(71, "E7"),
+ PINCTRL_PIN(72, "E8"),
+ PINCTRL_PIN(73, "E9"),
+ PINCTRL_PIN(80, "F0"),
+ PINCTRL_PIN(81, "F1"),
+ PINCTRL_PIN(82, "F2"),
+ PINCTRL_PIN(83, "F3"),
+ PINCTRL_PIN(84, "F4"),
+ PINCTRL_PIN(85, "F5"),
+ PINCTRL_PIN(88, "F8"),
+ PINCTRL_PIN(92, "F12"),
+ PINCTRL_PIN(93, "F13"),
+ PINCTRL_PIN(96, "G0"),
+ PINCTRL_PIN(97, "G1"),
+ PINCTRL_PIN(102, "G6"),
+ PINCTRL_PIN(103, "G7"),
+ PINCTRL_PIN(104, "G8"),
+ PINCTRL_PIN(105, "G9"),
+ PINCTRL_PIN(108, "G12"),
+ PINCTRL_PIN(109, "G13"),
+ PINCTRL_PIN(110, "G14"),
+ PINCTRL_PIN(111, "G15"),
+ PINCTRL_PIN(112, "H0"),
+ PINCTRL_PIN(113, "H1"),
+ PINCTRL_PIN(114, "H2"),
+ PINCTRL_PIN(115, "H3"),
+ PINCTRL_PIN(116, "H4"),
+ PINCTRL_PIN(117, "H5"),
+ PINCTRL_PIN(118, "H6"),
+ PINCTRL_PIN(119, "H7"),
+ PINCTRL_PIN(120, "H8"),
+ PINCTRL_PIN(121, "H9"),
+ PINCTRL_PIN(122, "H10"),
+ PINCTRL_PIN(123, "H11"),
+ PINCTRL_PIN(124, "H12"),
+ PINCTRL_PIN(125, "H13"),
+ PINCTRL_PIN(126, "H14"),
+ PINCTRL_PIN(127, "H15"),
+ PINCTRL_PIN(128, "J0"),
+ PINCTRL_PIN(129, "J1"),
+ PINCTRL_PIN(130, "J2"),
+ PINCTRL_PIN(131, "J3"),
+ PINCTRL_PIN(132, "J4"),
+ PINCTRL_PIN(133, "J5"),
+ PINCTRL_PIN(134, "J6"),
+ PINCTRL_PIN(135, "J7"),
+ PINCTRL_PIN(136, "J8"),
+ PINCTRL_PIN(137, "J9"),
+ PINCTRL_PIN(138, "J10"),
+ PINCTRL_PIN(139, "J11"),
+ PINCTRL_PIN(140, "J12"),
+ PINCTRL_PIN(141, "J13"),
+ PINCTRL_PIN(142, "J14"),
+ PINCTRL_PIN(143, "J15"),
+ PINCTRL_PIN(144, "K0"),
+ PINCTRL_PIN(145, "K1"),
+ PINCTRL_PIN(146, "K2"),
+ PINCTRL_PIN(147, "K3"),
+ PINCTRL_PIN(148, "K4"),
+ PINCTRL_PIN(149, "K5"),
+ PINCTRL_PIN(150, "K6"),
+ PINCTRL_PIN(151, "K7"),
+};
+
+static const char * const pic32_input0_group[] = {
+ "D2", "G8", "F4", "F1", "B9", "B10", "C14", "B5",
+ "C1", "D14", "G1", "A14", "D6",
+};
+
+static const char * const pic32_input1_group[] = {
+ "D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+ "B3", "C4", "G0", "A15", "D7",
+};
+
+static const char * const pic32_input2_group[] = {
+ "D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+ "F12", "D12", "F8", "C3", "E9",
+};
+
+static const char * const pic32_input3_group[] = {
+ "G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+ "F2", "C2", "E8",
+};
+
+static const char * const pic32_output0_group[] = {
+ "D2", "G8", "F4", "D10", "F1", "B9", "B10", "C14",
+ "B5", "C1", "D14", "G1", "A14", "D6",
+};
+
+static const char * const pic32_output0_1_group[] = {
+ "D2", "G8", "F4", "D10", "F1", "B9", "B10", "C14",
+ "B5", "C1", "D14", "G1", "A14", "D6",
+ "D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+ "B3", "C4", "D15", "G0", "A15", "D7",
+};
+
+static const char *const pic32_output1_group[] = {
+ "D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+ "B3", "C4", "D15", "G0", "A15", "D7",
+};
+
+static const char *const pic32_output1_3_group[] = {
+ "D3", "G7", "F5", "D11", "F0", "B1", "E5", "C13",
+ "B3", "C4", "D15", "G0", "A15", "D7",
+ "G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+ "C2", "E8", "F2",
+};
+
+static const char * const pic32_output2_group[] = {
+ "D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+ "F12", "D12", "F8", "C3", "E9",
+};
+
+static const char * const pic32_output2_3_group[] = {
+ "D9", "G6", "B8", "B15", "D4", "B0", "E3", "B7",
+ "F12", "D12", "F8", "C3", "E9",
+ "G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+ "C2", "E8", "F2",
+};
+
+static const char * const pic32_output3_group[] = {
+ "G9", "B14", "D0", "B6", "D5", "B2", "F3", "F13",
+ "C2", "E8", "F2",
+};
+
+#define FUNCTION(_name, _gr) \
+ { \
+ .name = #_name, \
+ .groups = pic32_##_gr##_group, \
+ .ngroups = ARRAY_SIZE(pic32_##_gr##_group), \
+ }
+
+static const struct pic32_function pic32_functions[] = {
+ FUNCTION(INT3, input0),
+ FUNCTION(T2CK, input0),
+ FUNCTION(T6CK, input0),
+ FUNCTION(IC3, input0),
+ FUNCTION(IC7, input0),
+ FUNCTION(U1RX, input0),
+ FUNCTION(U2CTS, input0),
+ FUNCTION(U5RX, input0),
+ FUNCTION(U6CTS, input0),
+ FUNCTION(SDI1, input0),
+ FUNCTION(SDI3, input0),
+ FUNCTION(SDI5, input0),
+ FUNCTION(SS6IN, input0),
+ FUNCTION(REFCLKI1, input0),
+ FUNCTION(INT4, input1),
+ FUNCTION(T5CK, input1),
+ FUNCTION(T7CK, input1),
+ FUNCTION(IC4, input1),
+ FUNCTION(IC8, input1),
+ FUNCTION(U3RX, input1),
+ FUNCTION(U4CTS, input1),
+ FUNCTION(SDI2, input1),
+ FUNCTION(SDI4, input1),
+ FUNCTION(C1RX, input1),
+ FUNCTION(REFCLKI4, input1),
+ FUNCTION(INT2, input2),
+ FUNCTION(T3CK, input2),
+ FUNCTION(T8CK, input2),
+ FUNCTION(IC2, input2),
+ FUNCTION(IC5, input2),
+ FUNCTION(IC9, input2),
+ FUNCTION(U1CTS, input2),
+ FUNCTION(U2RX, input2),
+ FUNCTION(U5CTS, input2),
+ FUNCTION(SS1IN, input2),
+ FUNCTION(SS3IN, input2),
+ FUNCTION(SS4IN, input2),
+ FUNCTION(SS5IN, input2),
+ FUNCTION(C2RX, input2),
+ FUNCTION(INT1, input3),
+ FUNCTION(T4CK, input3),
+ FUNCTION(T9CK, input3),
+ FUNCTION(IC1, input3),
+ FUNCTION(IC6, input3),
+ FUNCTION(U3CTS, input3),
+ FUNCTION(U4RX, input3),
+ FUNCTION(U6RX, input3),
+ FUNCTION(SS2IN, input3),
+ FUNCTION(SDI6, input3),
+ FUNCTION(OCFA, input3),
+ FUNCTION(REFCLKI3, input3),
+ FUNCTION(U3TX, output0),
+ FUNCTION(U4RTS, output0),
+ FUNCTION(SDO1, output0_1),
+ FUNCTION(SDO2, output0_1),
+ FUNCTION(SDO3, output0_1),
+ FUNCTION(SDO5, output0_1),
+ FUNCTION(SS6OUT, output0),
+ FUNCTION(OC3, output0),
+ FUNCTION(OC6, output0),
+ FUNCTION(REFCLKO4, output0),
+ FUNCTION(C2OUT, output0),
+ FUNCTION(C1TX, output0),
+ FUNCTION(U1TX, output1),
+ FUNCTION(U2RTS, output1),
+ FUNCTION(U5TX, output1),
+ FUNCTION(U6RTS, output1),
+ FUNCTION(SDO4, output1_3),
+ FUNCTION(OC4, output1),
+ FUNCTION(OC7, output1),
+ FUNCTION(REFCLKO1, output1),
+ FUNCTION(U3RTS, output2),
+ FUNCTION(U4TX, output2),
+ FUNCTION(U6TX, output2_3),
+ FUNCTION(SS1OUT, output2),
+ FUNCTION(SS3OUT, output2),
+ FUNCTION(SS4OUT, output2),
+ FUNCTION(SS5OUT, output2),
+ FUNCTION(SDO6, output2_3),
+ FUNCTION(OC5, output2),
+ FUNCTION(OC8, output2),
+ FUNCTION(C1OUT, output2),
+ FUNCTION(REFCLKO3, output2),
+ FUNCTION(U1RTS, output3),
+ FUNCTION(U2TX, output3),
+ FUNCTION(U5RTS, output3),
+ FUNCTION(SS2OUT, output3),
+ FUNCTION(OC2, output3),
+ FUNCTION(OC1, output3),
+ FUNCTION(OC9, output3),
+ FUNCTION(C2TX, output3),
+};
+
+#define PIC32_PINCTRL_GROUP(_pin, _name, ...) \
+ { \
+ .name = #_name, \
+ .pin = _pin, \
+ .functions = (struct pic32_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define PIC32_PINCTRL_FUNCTION(_name, _muxreg, _muxval) \
+ { \
+ .name = #_name, \
+ .muxreg = _muxreg, \
+ .muxval = _muxval, \
+ }
+
+static const struct pic32_pin_group pic32_groups[] = {
+ PIC32_PINCTRL_GROUP(14, A14,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 13),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 13),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 13),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 13),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 13),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 13),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 13),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 13),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 13),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 13),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPA14R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPA14R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPA14R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPA14R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPA14R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPA14R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPA14R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPA14R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPA14R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPA14R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPA14R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPA14R, 15)),
+ PIC32_PINCTRL_GROUP(15, A15,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 13),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 13),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 13),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 13),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 13),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 13),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 13),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 13),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 13),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPA15R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPA15R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPA15R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPA15R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPA15R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPA15R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPA15R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPA15R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPA15R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPA15R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPA15R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPA15R, 15)),
+ PIC32_PINCTRL_GROUP(16, B0,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 5),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 5),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 5),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 5),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 5),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 5),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 5),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 5),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 5),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 5),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 5),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 5),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 5),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 5),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPB0R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPB0R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB0R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPB0R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPB0R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPB0R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPB0R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB0R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPB0R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPB0R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPB0R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB0R, 15)),
+ PIC32_PINCTRL_GROUP(17, B1,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 5),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 5),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 5),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 5),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 5),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 5),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 5),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 5),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 5),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 5),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 5),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPB1R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPB1R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPB1R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPB1R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPB1R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPB1R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPB1R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPB1R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPB1R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPB1R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPB1R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPB1R, 15)),
+ PIC32_PINCTRL_GROUP(18, B2,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 7),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 7),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 7),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 7),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 7),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 7),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 7),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 7),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 7),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 7),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPB2R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPB2R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPB2R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB2R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPB2R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPB2R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB2R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPB2R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPB2R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPB2R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPB2R, 15)),
+ PIC32_PINCTRL_GROUP(19, B3,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 8),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 8),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 8),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 8),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 8),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 8),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 8),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 8),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 8),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 8),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 8),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPB3R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPB3R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPB3R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPB3R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPB3R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPB3R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPB3R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPB3R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPB3R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPB3R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPB3R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPB3R, 15)),
+ PIC32_PINCTRL_GROUP(21, B5,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 8),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 8),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 8),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 8),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 8),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 8),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 8),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 8),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 8),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 8),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 8),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 8),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 8),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 8),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPB5R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPB5R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPB5R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPB5R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPB5R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPB5R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPB5R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPB5R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPB5R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB5R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPB5R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPB5R, 15)),
+ PIC32_PINCTRL_GROUP(22, B6,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 4),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 4),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 4),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 4),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 4),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 4),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 4),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 4),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 4),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 4),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPB6R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPB6R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPB6R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB6R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPB6R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPB6R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB6R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPB6R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPB6R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPB6R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPB6R, 15)),
+ PIC32_PINCTRL_GROUP(23, B7,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 7),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 7),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 7),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 7),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 7),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 7),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 7),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 7),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 7),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 7),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPB7R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPB7R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB7R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPB7R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPB7R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPB7R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPB7R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB7R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPB7R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPB7R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPB7R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB7R, 15)),
+ PIC32_PINCTRL_GROUP(24, B8,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 2),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 2),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 2),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 2),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 2),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 2),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 2),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 2),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 2),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 2),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPB8R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPB8R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB8R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPB8R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPB8R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPB8R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPB8R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB8R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPB8R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPB8R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPB8R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB8R, 15)),
+ PIC32_PINCTRL_GROUP(25, B9,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 5),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 5),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 5),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 5),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 5),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 5),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 5),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 5),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 5),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 5),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 5),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 5),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 5),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 5),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPB9R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPB9R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPB9R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPB9R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPB9R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPB9R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPB9R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPB9R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPB9R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB9R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPB9R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPB9R, 15)),
+ PIC32_PINCTRL_GROUP(26, B10,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 6),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 6),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 6),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 6),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 6),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 6),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 6),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 6),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 6),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 6),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPB10R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPB10R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPB10R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPB10R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPB10R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPB10R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPB10R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPB10R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPB10R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPB10R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPB10R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPB10R, 15)),
+ PIC32_PINCTRL_GROUP(30, B14,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 2),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 2),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 2),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 2),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 2),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 2),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 2),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 2),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 2),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 2),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPB14R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPB14R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPB14R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB14R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPB14R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPB14R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB14R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPB14R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPB14R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPB14R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPB14R, 15)),
+ PIC32_PINCTRL_GROUP(31, B15,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 3),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 3),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 3),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 3),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 3),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 3),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 3),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 3),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 3),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 3),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 3),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 3),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 3),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 3),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPB15R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPB15R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPB15R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPB15R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPB15R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPB15R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPB15R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPB15R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPB15R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPB15R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPB15R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPB15R, 15)),
+ PIC32_PINCTRL_GROUP(33, C1,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 10),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 10),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 10),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 10),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 10),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 10),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 10),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 10),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 10),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 10),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 10),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 10),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 10),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 10),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPC1R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPC1R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPC1R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPC1R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPC1R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPC1R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPC1R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPC1R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPC1R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPC1R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPC1R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPC1R, 15)),
+ PIC32_PINCTRL_GROUP(34, C2,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 12),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 12),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 12),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 12),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 12),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 12),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 12),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 12),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 12),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPC2R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPC2R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPC2R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPC2R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPC2R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPC2R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPC2R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPC2R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPC2R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPC2R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPC2R, 15)),
+ PIC32_PINCTRL_GROUP(35, C3,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 12),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 12),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 12),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 12),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 12),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 12),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 12),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 12),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 12),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 12),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPC3R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPC3R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPC3R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPC3R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPC3R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPC3R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPC3R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPC3R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPC3R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPC3R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPC3R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPC3R, 15)),
+ PIC32_PINCTRL_GROUP(36, C4,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 10),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 10),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 10),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 10),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 10),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 10),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 10),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 10),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 10),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 10),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 10),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPC4R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPC4R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPC4R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPC4R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPC4R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPC4R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPC4R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPC4R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPC4R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPC4R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPC4R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPC4R, 15)),
+ PIC32_PINCTRL_GROUP(45, C13,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 7),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 7),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 7),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 7),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 7),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 7),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 7),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 7),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 7),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPC13R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPC13R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPC13R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPC13R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPC13R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPC13R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPC13R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPC13R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPC13R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPC13R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPC13R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPC13R, 15)),
+ PIC32_PINCTRL_GROUP(46, C14,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 7),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 7),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 7),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 7),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 7),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 7),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 7),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 7),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 7),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 7),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 7),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 7),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPC14R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPC14R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPC14R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPC14R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPC14R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPC14R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPC14R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPC14R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPC14R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPC14R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPC14R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPC14R, 15)),
+ PIC32_PINCTRL_GROUP(48, D0,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 3),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 3),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 3),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 3),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 3),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 3),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 3),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 3),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 3),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 3),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 3),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 3),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPD0R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPD0R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPD0R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPD0R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPD0R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD0R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPD0R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPD0R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPD0R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPD0R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPD0R, 15)),
+ PIC32_PINCTRL_GROUP(50, D2,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 0),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 0),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 0),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 0),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 0),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 0),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 0),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 0),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 0),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 0),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 0),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 0),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 0),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 0),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPD2R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPD2R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD2R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD2R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD2R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD2R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPD2R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPD2R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPD2R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD2R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPD2R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPD2R, 15)),
+ PIC32_PINCTRL_GROUP(51, D3,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 0),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 0),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 0),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 0),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 0),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 0),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 0),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 0),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 0),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 0),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 0),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPD3R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPD3R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPD3R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPD3R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD3R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD3R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD3R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD3R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD3R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPD3R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPD3R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD3R, 15)),
+ PIC32_PINCTRL_GROUP(52, D4,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 4),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 4),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 4),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 4),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 4),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 4),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 4),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 4),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 4),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 4),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPD4R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPD4R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPD4R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPD4R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPD4R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPD4R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPD4R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPD4R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPD4R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPD4R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPD4R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD4R, 15)),
+ PIC32_PINCTRL_GROUP(53, D5,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 6),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 6),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 6),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 6),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 6),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 6),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 6),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 6),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 6),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 6),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPD5R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPD5R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPD5R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPD5R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPD5R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD5R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPD5R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPD5R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPD5R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPD5R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPD5R, 15)),
+ PIC32_PINCTRL_GROUP(54, D6,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 14),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 14),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 14),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 14),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 14),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 14),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 14),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 14),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 14),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 14),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 14),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 14),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 14),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPD6R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPD6R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD6R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD6R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD6R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD6R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPD6R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPD6R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPD6R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD6R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPD6R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPD6R, 15)),
+ PIC32_PINCTRL_GROUP(55, D7,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 14),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 14),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 14),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 14),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 14),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 14),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 14),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 14),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 14),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 14),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPD7R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPD7R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPD7R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPD7R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD7R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD7R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD7R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD7R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD7R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPD7R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPD7R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD7R, 15)),
+ PIC32_PINCTRL_GROUP(57, D9,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 0),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 0),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 0),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 0),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 0),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 0),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 0),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 0),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 0),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 0),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 0),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 0),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 0),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 0),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPD9R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPD9R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPD9R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPD9R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPD9R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPD9R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPD9R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPD9R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPD9R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPD9R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPD9R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD9R, 15)),
+ PIC32_PINCTRL_GROUP(58, D10,
+ PIC32_PINCTRL_FUNCTION(U3TX, RPD10R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPD10R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD10R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD10R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD10R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD10R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPD10R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPD10R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPD10R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD10R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPD10R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPD10R, 15)),
+ PIC32_PINCTRL_GROUP(59, D11,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 3),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 3),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 3),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 3),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 3),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 3),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 3),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 3),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 3),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 3),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 3),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPD11R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPD11R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPD11R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPD11R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD11R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD11R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD11R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD11R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD11R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPD11R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPD11R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD11R, 15)),
+ PIC32_PINCTRL_GROUP(60, D12,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 10),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 10),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 10),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 10),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 10),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 10),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 10),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 10),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 10),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 10),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 10),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 10),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 10),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 10),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPD12R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPD12R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPD12R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPD12R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPD12R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPD12R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPD12R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPD12R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPD12R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPD12R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPD12R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPD12R, 15)),
+ PIC32_PINCTRL_GROUP(62, D14,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 11),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 11),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 11),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 11),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 11),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 11),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 11),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 11),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 11),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 11),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 11),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 11),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 11),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 11),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPD14R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPD14R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD14R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD14R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD14R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD14R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPD14R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPD14R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPD14R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPD14R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPD14R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPD14R, 15)),
+ PIC32_PINCTRL_GROUP(63, D15,
+ PIC32_PINCTRL_FUNCTION(U1TX, RPD15R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPD15R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPD15R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPD15R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPD15R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPD15R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPD15R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPD15R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPD15R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPD15R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPD15R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPD15R, 15)),
+ PIC32_PINCTRL_GROUP(67, E3,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 6),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 6),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 6),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 6),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 6),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 6),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 6),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 6),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 6),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 6),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPE3R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPE3R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPE3R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPE3R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPE3R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPE3R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPE3R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPE3R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPE3R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPE3R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPE3R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPE3R, 15)),
+ PIC32_PINCTRL_GROUP(69, E5,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 6),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 6),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 6),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 6),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 6),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 6),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 6),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 6),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 6),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 6),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 6),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPE5R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPE5R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPE5R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPE5R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPE5R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPE5R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPE5R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPE5R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPE5R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPE5R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPE5R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPE5R, 15)),
+ PIC32_PINCTRL_GROUP(72, E8,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 13),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 13),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 13),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 13),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 13),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 13),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 13),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 13),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 13),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 13),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPE8R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPE8R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPE8R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPE8R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPE8R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPE8R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPE8R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPE8R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPE8R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPE8R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPE8R, 15)),
+ PIC32_PINCTRL_GROUP(73, E9,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 13),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 13),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 13),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 13),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 13),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 13),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 13),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 13),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 13),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 13),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 13),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 13),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPE9R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPE9R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPE9R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPE9R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPE9R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPE9R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPE9R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPE9R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPE9R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPE9R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPE9R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPE9R, 15)),
+ PIC32_PINCTRL_GROUP(80, F0,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 4),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 4),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 4),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 4),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 4),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 4),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 4),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 4),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 4),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPF0R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPF0R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPF0R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPF0R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPF0R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPF0R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPF0R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPF0R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPF0R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPF0R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPF0R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPF0R, 15)),
+ PIC32_PINCTRL_GROUP(81, F1,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 4),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 4),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 4),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 4),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 4),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 4),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 4),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 4),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 4),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 4),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 4),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 4),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPF1R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPF1R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPF1R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPF1R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPF1R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPF1R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPF1R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPF1R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPF1R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPF1R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPF1R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPF1R, 15)),
+ PIC32_PINCTRL_GROUP(82, F2,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 11),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 11),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 11),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 11),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 11),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 11),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 11),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 11),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 11),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 11),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 11),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 11),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPF2R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPF2R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPF2R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPF2R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPF2R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPF2R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPF2R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPF2R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPF2R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPF2R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPF2R, 15)),
+ PIC32_PINCTRL_GROUP(83, F3,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 8),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 8),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 8),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 8),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 8),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 8),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 8),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 8),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 8),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 8),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 8),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 8),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPF3R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPF3R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPF3R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPF3R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPF3R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPF3R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPF3R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPF3R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPF3R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPF3R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPF3R, 15)),
+ PIC32_PINCTRL_GROUP(84, F4,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 2),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 2),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 2),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 2),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 2),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 2),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 2),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 2),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 2),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 2),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPF4R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPF4R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPF4R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPF4R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPF4R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPF4R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPF4R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPF4R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPF4R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPF4R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPF4R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPF4R, 15)),
+ PIC32_PINCTRL_GROUP(85, F5,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 2),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 2),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 2),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 2),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 2),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 2),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 2),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 2),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 2),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 2),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 2),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPF5R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPF5R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPF5R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPF5R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPF5R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPF5R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPF5R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPF5R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPF5R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPF5R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPF5R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPF5R, 15)),
+ PIC32_PINCTRL_GROUP(88, F8,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 11),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 11),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 11),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 11),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 11),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 11),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 11),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 11),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 11),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 11),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 11),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 11),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 11),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 11),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPF8R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPF8R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPF8R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPF8R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPF8R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPF8R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPF8R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPF8R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPF8R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPF8R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPF8R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPF8R, 15)),
+ PIC32_PINCTRL_GROUP(92, F12,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 9),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 9),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 9),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 9),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 9),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 9),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 9),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 9),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 9),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 9),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 9),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 9),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 9),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 9),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPF12R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPF12R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPF12R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPF12R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPF12R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPF12R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPF12R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPF12R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPF12R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPF12R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPF12R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPF12R, 15)),
+ PIC32_PINCTRL_GROUP(93, F13,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 9),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 9),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 9),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 9),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 9),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 9),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 9),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 9),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 9),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 9),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 9),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 9),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPF13R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPF13R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPF13R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPF13R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPF13R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPF13R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPF13R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPF13R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPF13R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPF13R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPF13R, 15)),
+ PIC32_PINCTRL_GROUP(96, G0,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 12),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 12),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 12),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 12),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 12),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 12),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 12),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 12),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPG0R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPG0R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPG0R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPG0R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPG0R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPG0R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPG0R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPG0R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPG0R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPG0R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPG0R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPG0R, 15)),
+ PIC32_PINCTRL_GROUP(97, G1,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 12),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 12),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 12),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 12),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 12),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 12),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 12),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 12),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 12),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 12),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 12),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPG1R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPG1R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPG1R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPG1R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPG1R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPG1R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPG1R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPG1R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPG1R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPG1R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPG1R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPG1R, 15)),
+ PIC32_PINCTRL_GROUP(102, G6,
+ PIC32_PINCTRL_FUNCTION(INT2, INT2R, 1),
+ PIC32_PINCTRL_FUNCTION(T3CK, T3CKR, 1),
+ PIC32_PINCTRL_FUNCTION(T8CK, T8CKR, 1),
+ PIC32_PINCTRL_FUNCTION(IC2, IC2R, 1),
+ PIC32_PINCTRL_FUNCTION(IC5, IC5R, 1),
+ PIC32_PINCTRL_FUNCTION(IC9, IC9R, 1),
+ PIC32_PINCTRL_FUNCTION(U1CTS, U1CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(U2RX, U2RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U5CTS, U5CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(SS1IN, SS1INR, 1),
+ PIC32_PINCTRL_FUNCTION(SS3IN, SS3INR, 1),
+ PIC32_PINCTRL_FUNCTION(SS4IN, SS4INR, 1),
+ PIC32_PINCTRL_FUNCTION(SS5IN, SS5INR, 1),
+ PIC32_PINCTRL_FUNCTION(C2RX, C2RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U3RTS, RPG6R, 1),
+ PIC32_PINCTRL_FUNCTION(U4TX, RPG6R, 2),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPG6R, 4),
+ PIC32_PINCTRL_FUNCTION(SS1OUT, RPG6R, 5),
+ PIC32_PINCTRL_FUNCTION(SS3OUT, RPG6R, 7),
+ PIC32_PINCTRL_FUNCTION(SS4OUT, RPG6R, 8),
+ PIC32_PINCTRL_FUNCTION(SS5OUT, RPG6R, 9),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPG6R, 10),
+ PIC32_PINCTRL_FUNCTION(OC5, RPG6R, 11),
+ PIC32_PINCTRL_FUNCTION(OC8, RPG6R, 12),
+ PIC32_PINCTRL_FUNCTION(C1OUT, RPG6R, 14),
+ PIC32_PINCTRL_FUNCTION(REFCLKO3, RPG6R, 15)),
+ PIC32_PINCTRL_GROUP(103, G7,
+ PIC32_PINCTRL_FUNCTION(INT4, INT4R, 1),
+ PIC32_PINCTRL_FUNCTION(T5CK, T5CKR, 1),
+ PIC32_PINCTRL_FUNCTION(T7CK, T7CKR, 1),
+ PIC32_PINCTRL_FUNCTION(IC4, IC4R, 1),
+ PIC32_PINCTRL_FUNCTION(IC8, IC8R, 1),
+ PIC32_PINCTRL_FUNCTION(U3RX, U3RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U4CTS, U4CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(SDI2, SDI2R, 1),
+ PIC32_PINCTRL_FUNCTION(SDI4, SDI4R, 1),
+ PIC32_PINCTRL_FUNCTION(C1RX, C1RXR, 1),
+ PIC32_PINCTRL_FUNCTION(REFCLKI4, REFCLKI4R, 1),
+ PIC32_PINCTRL_FUNCTION(U1TX, RPG7R, 1),
+ PIC32_PINCTRL_FUNCTION(U2RTS, RPG7R, 2),
+ PIC32_PINCTRL_FUNCTION(U5TX, RPG7R, 3),
+ PIC32_PINCTRL_FUNCTION(U6RTS, RPG7R, 4),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPG7R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPG7R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPG7R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPG7R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPG7R, 9),
+ PIC32_PINCTRL_FUNCTION(OC4, RPG7R, 11),
+ PIC32_PINCTRL_FUNCTION(OC7, RPG7R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO1, RPG7R, 15)),
+ PIC32_PINCTRL_GROUP(104, G8,
+ PIC32_PINCTRL_FUNCTION(INT3, INT3R, 1),
+ PIC32_PINCTRL_FUNCTION(T2CK, T2CKR, 1),
+ PIC32_PINCTRL_FUNCTION(T6CK, T6CKR, 1),
+ PIC32_PINCTRL_FUNCTION(IC3, IC3R, 1),
+ PIC32_PINCTRL_FUNCTION(IC7, IC7R, 1),
+ PIC32_PINCTRL_FUNCTION(U1RX, U1RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U2CTS, U2CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(U5RX, U5RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U6CTS, U6CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(SDI1, SDI1R, 1),
+ PIC32_PINCTRL_FUNCTION(SDI3, SDI3R, 1),
+ PIC32_PINCTRL_FUNCTION(SDI5, SDI5R, 1),
+ PIC32_PINCTRL_FUNCTION(SS6IN, SS6INR, 1),
+ PIC32_PINCTRL_FUNCTION(REFCLKI1, REFCLKI1R, 1),
+ PIC32_PINCTRL_FUNCTION(U3TX, RPG8R, 1),
+ PIC32_PINCTRL_FUNCTION(U4RTS, RPG8R, 2),
+ PIC32_PINCTRL_FUNCTION(SDO1, RPG8R, 5),
+ PIC32_PINCTRL_FUNCTION(SDO2, RPG8R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO3, RPG8R, 7),
+ PIC32_PINCTRL_FUNCTION(SDO5, RPG8R, 9),
+ PIC32_PINCTRL_FUNCTION(SS6OUT, RPG8R, 10),
+ PIC32_PINCTRL_FUNCTION(OC3, RPG8R, 11),
+ PIC32_PINCTRL_FUNCTION(OC6, RPG8R, 12),
+ PIC32_PINCTRL_FUNCTION(REFCLKO4, RPG8R, 13),
+ PIC32_PINCTRL_FUNCTION(C2OUT, RPG8R, 14),
+ PIC32_PINCTRL_FUNCTION(C1TX, RPG8R, 15)),
+ PIC32_PINCTRL_GROUP(105, G9,
+ PIC32_PINCTRL_FUNCTION(INT1, INT1R, 1),
+ PIC32_PINCTRL_FUNCTION(T4CK, T4CKR, 1),
+ PIC32_PINCTRL_FUNCTION(T9CK, T9CKR, 1),
+ PIC32_PINCTRL_FUNCTION(IC1, IC1R, 1),
+ PIC32_PINCTRL_FUNCTION(IC6, IC6R, 1),
+ PIC32_PINCTRL_FUNCTION(U3CTS, U3CTSR, 1),
+ PIC32_PINCTRL_FUNCTION(U4RX, U4RXR, 1),
+ PIC32_PINCTRL_FUNCTION(U6RX, U6RXR, 1),
+ PIC32_PINCTRL_FUNCTION(SS2IN, SS2INR, 1),
+ PIC32_PINCTRL_FUNCTION(SDI6, SDI6R, 1),
+ PIC32_PINCTRL_FUNCTION(OCFA, OCFAR, 1),
+ PIC32_PINCTRL_FUNCTION(REFCLKI3, REFCLKI3R, 1),
+ PIC32_PINCTRL_FUNCTION(U1RTS, RPG9R, 1),
+ PIC32_PINCTRL_FUNCTION(U2TX, RPG9R, 2),
+ PIC32_PINCTRL_FUNCTION(U5RTS, RPG9R, 3),
+ PIC32_PINCTRL_FUNCTION(U6TX, RPG9R, 4),
+ PIC32_PINCTRL_FUNCTION(SS2OUT, RPG9R, 6),
+ PIC32_PINCTRL_FUNCTION(SDO4, RPG9R, 8),
+ PIC32_PINCTRL_FUNCTION(SDO6, RPG9R, 10),
+ PIC32_PINCTRL_FUNCTION(OC2, RPG9R, 11),
+ PIC32_PINCTRL_FUNCTION(OC1, RPG9R, 12),
+ PIC32_PINCTRL_FUNCTION(OC9, RPG9R, 13),
+ PIC32_PINCTRL_FUNCTION(C2TX, RPG9R, 15)),
+};
+
+static inline struct pic32_gpio_bank *irqd_to_bank(struct irq_data *d)
+{
+ return gpiochip_get_data(irq_data_get_irq_chip_data(d));
+}
+
+static inline struct pic32_gpio_bank *pctl_to_bank(struct pic32_pinctrl *pctl,
+ unsigned pin)
+{
+ return &pctl->gpio_banks[pin / PINS_PER_BANK];
+}
+
+static int pic32_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->ngroups;
+}
+
+static const char *pic32_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->groups[group].name;
+}
+
+static int pic32_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pctl->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops pic32_pinctrl_ops = {
+ .get_groups_count = pic32_pinctrl_get_groups_count,
+ .get_group_name = pic32_pinctrl_get_group_name,
+ .get_group_pins = pic32_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int pic32_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->nfunctions;
+}
+
+static const char *
+pic32_pinmux_get_function_name(struct pinctrl_dev *pctldev, unsigned func)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->functions[func].name;
+}
+
+static int pic32_pinmux_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned func,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->functions[func].groups;
+ *num_groups = pctl->functions[func].ngroups;
+
+ return 0;
+}
+
+static int pic32_pinmux_enable(struct pinctrl_dev *pctldev,
+ unsigned func, unsigned group)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ const struct pic32_pin_group *pg = &pctl->groups[group];
+ const struct pic32_function *pf = &pctl->functions[func];
+ const char *fname = pf->name;
+ struct pic32_desc_function *functions = pg->functions;
+
+ while (functions->name) {
+ if (!strcmp(functions->name, fname)) {
+ dev_dbg(pctl->dev,
+ "setting function %s reg 0x%x = %d\n",
+ fname, functions->muxreg, functions->muxval);
+
+ writel(functions->muxval, pctl->reg_base + functions->muxreg);
+
+ return 0;
+ }
+
+ functions++;
+ }
+
+ dev_err(pctl->dev, "cannot mux pin %u to function %u\n", group, func);
+
+ return -EINVAL;
+}
+
+static int pic32_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pic32_gpio_bank *bank = gpiochip_get_data(range->gc);
+ u32 mask = BIT(offset - bank->gpio_chip.base);
+
+ dev_dbg(pctl->dev, "requesting gpio %d in bank %d with mask 0x%x\n",
+ offset, bank->gpio_chip.base, mask);
+
+ writel(mask, bank->reg_base + PIC32_CLR(ANSEL_REG));
+
+ return 0;
+}
+
+static int pic32_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ writel(mask, bank->reg_base + PIC32_SET(TRIS_REG));
+
+ return 0;
+}
+
+static int pic32_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl(bank->reg_base + PORT_REG) & BIT(offset));
+}
+
+static void pic32_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ if (value)
+ writel(mask, bank->reg_base + PIC32_SET(PORT_REG));
+ else
+ writel(mask, bank->reg_base + PIC32_CLR(PORT_REG));
+}
+
+static int pic32_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+ u32 mask = BIT(offset);
+
+ pic32_gpio_set(chip, offset, value);
+ writel(mask, bank->reg_base + PIC32_CLR(TRIS_REG));
+
+ return 0;
+}
+
+static int pic32_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct gpio_chip *chip = range->gc;
+
+ if (input)
+ pic32_gpio_direction_input(chip, offset);
+ else
+ pic32_gpio_direction_output(chip, offset, 0);
+
+ return 0;
+}
+
+static const struct pinmux_ops pic32_pinmux_ops = {
+ .get_functions_count = pic32_pinmux_get_functions_count,
+ .get_function_name = pic32_pinmux_get_function_name,
+ .get_function_groups = pic32_pinmux_get_function_groups,
+ .set_mux = pic32_pinmux_enable,
+ .gpio_request_enable = pic32_gpio_request_enable,
+ .gpio_set_direction = pic32_gpio_set_direction,
+};
+
+static int pic32_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pic32_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned param = pinconf_to_config_param(*config);
+ u32 mask = BIT(pin - bank->gpio_chip.base);
+ u32 arg;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = !!(readl(bank->reg_base + CNPU_REG) & mask);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = !!(readl(bank->reg_base + CNPD_REG) & mask);
+ break;
+ case PIN_CONFIG_MICROCHIP_DIGITAL:
+ arg = !(readl(bank->reg_base + ANSEL_REG) & mask);
+ break;
+ case PIN_CONFIG_MICROCHIP_ANALOG:
+ arg = !!(readl(bank->reg_base + ANSEL_REG) & mask);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ arg = !!(readl(bank->reg_base + ODCU_REG) & mask);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = !!(readl(bank->reg_base + TRIS_REG) & mask);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ arg = !(readl(bank->reg_base + TRIS_REG) & mask);
+ break;
+ default:
+ dev_err(pctl->dev, "Property %u not supported\n", param);
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int pic32_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct pic32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct pic32_gpio_bank *bank = pctl_to_bank(pctl, pin);
+ unsigned param;
+ u32 arg;
+ unsigned int i;
+ u32 offset = pin - bank->gpio_chip.base;
+ u32 mask = BIT(offset);
+
+ dev_dbg(pctl->dev, "setting pin %d bank %d mask 0x%x\n",
+ pin, bank->gpio_chip.base, mask);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ dev_dbg(pctl->dev, " pullup\n");
+ writel(mask, bank->reg_base +PIC32_SET(CNPU_REG));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ dev_dbg(pctl->dev, " pulldown\n");
+ writel(mask, bank->reg_base + PIC32_SET(CNPD_REG));
+ break;
+ case PIN_CONFIG_MICROCHIP_DIGITAL:
+ dev_dbg(pctl->dev, " digital\n");
+ writel(mask, bank->reg_base + PIC32_CLR(ANSEL_REG));
+ break;
+ case PIN_CONFIG_MICROCHIP_ANALOG:
+ dev_dbg(pctl->dev, " analog\n");
+ writel(mask, bank->reg_base + PIC32_SET(ANSEL_REG));
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ dev_dbg(pctl->dev, " opendrain\n");
+ writel(mask, bank->reg_base + PIC32_SET(ODCU_REG));
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pic32_gpio_direction_input(&bank->gpio_chip, offset);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pic32_gpio_direction_output(&bank->gpio_chip,
+ offset, arg);
+ break;
+ default:
+ dev_err(pctl->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops pic32_pinconf_ops = {
+ .pin_config_get = pic32_pinconf_get,
+ .pin_config_set = pic32_pinconf_set,
+ .is_generic = true,
+};
+
+static struct pinctrl_desc pic32_pinctrl_desc = {
+ .name = "pic32-pinctrl",
+ .pctlops = &pic32_pinctrl_ops,
+ .pmxops = &pic32_pinmux_ops,
+ .confops = &pic32_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static int pic32_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl(bank->reg_base + TRIS_REG) & BIT(offset));
+}
+
+static void pic32_gpio_irq_ack(struct irq_data *data)
+{
+ struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+ writel(0, bank->reg_base + CNF_REG);
+}
+
+static void pic32_gpio_irq_mask(struct irq_data *data)
+{
+ struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+ writel(BIT(PIC32_CNCON_ON), bank->reg_base + PIC32_CLR(CNCON_REG));
+}
+
+static void pic32_gpio_irq_unmask(struct irq_data *data)
+{
+ struct pic32_gpio_bank *bank = irqd_to_bank(data);
+
+ writel(BIT(PIC32_CNCON_ON), bank->reg_base + PIC32_SET(CNCON_REG));
+}
+
+static unsigned int pic32_gpio_irq_startup(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+
+ pic32_gpio_direction_input(chip, data->hwirq);
+ pic32_gpio_irq_unmask(data);
+
+ return 0;
+}
+
+static int pic32_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct pic32_gpio_bank *bank = irqd_to_bank(data);
+ u32 mask = BIT(data->hwirq);
+
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_RISING:
+ /* enable RISE */
+ writel(mask, bank->reg_base + PIC32_SET(CNEN_REG));
+ /* disable FALL */
+ writel(mask, bank->reg_base + PIC32_CLR(CNNE_REG));
+ /* enable EDGE */
+ writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ /* disable RISE */
+ writel(mask, bank->reg_base + PIC32_CLR(CNEN_REG));
+ /* enable FALL */
+ writel(mask, bank->reg_base + PIC32_SET(CNNE_REG));
+ /* enable EDGE */
+ writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ /* enable RISE */
+ writel(mask, bank->reg_base + PIC32_SET(CNEN_REG));
+ /* enable FALL */
+ writel(mask, bank->reg_base + PIC32_SET(CNNE_REG));
+ /* enable EDGE */
+ writel(BIT(PIC32_CNCON_EDGE), bank->reg_base + PIC32_SET(CNCON_REG));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_set_handler_locked(data, handle_edge_irq);
+
+ return 0;
+}
+
+static u32 pic32_gpio_get_pending(struct gpio_chip *gc, unsigned long status)
+{
+ struct pic32_gpio_bank *bank = gpiochip_get_data(gc);
+ u32 pending = 0;
+ u32 cnen_rise, cnne_fall;
+ u32 pin;
+
+ cnen_rise = readl(bank->reg_base + CNEN_REG);
+ cnne_fall = readl(bank->reg_base + CNNE_REG);
+
+ for_each_set_bit(pin, &status, BITS_PER_LONG) {
+ u32 mask = BIT(pin);
+
+ if ((mask & cnen_rise) || (mask && cnne_fall))
+ pending |= mask;
+ }
+
+ return pending;
+}
+
+static void pic32_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct pic32_gpio_bank *bank = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long pending;
+ unsigned int pin;
+ u32 stat;
+
+ chained_irq_enter(chip, desc);
+
+ stat = readl(bank->reg_base + CNF_REG);
+ pending = pic32_gpio_get_pending(gc, stat);
+
+ for_each_set_bit(pin, &pending, BITS_PER_LONG)
+ generic_handle_irq(irq_linear_revmap(gc->irqdomain, pin));
+
+ chained_irq_exit(chip, desc);
+}
+
+#define GPIO_BANK(_bank, _npins) \
+ { \
+ .gpio_chip = { \
+ .label = "GPIO" #_bank, \
+ .request = gpiochip_generic_request, \
+ .free = gpiochip_generic_free, \
+ .get_direction = pic32_gpio_get_direction, \
+ .direction_input = pic32_gpio_direction_input, \
+ .direction_output = pic32_gpio_direction_output, \
+ .get = pic32_gpio_get, \
+ .set = pic32_gpio_set, \
+ .ngpio = _npins, \
+ .base = GPIO_BANK_START(_bank), \
+ .owner = THIS_MODULE, \
+ .can_sleep = 0, \
+ }, \
+ .irq_chip = { \
+ .name = "GPIO" #_bank, \
+ .irq_startup = pic32_gpio_irq_startup, \
+ .irq_ack = pic32_gpio_irq_ack, \
+ .irq_mask = pic32_gpio_irq_mask, \
+ .irq_unmask = pic32_gpio_irq_unmask, \
+ .irq_set_type = pic32_gpio_irq_set_type, \
+ }, \
+ }
+
+static struct pic32_gpio_bank pic32_gpio_banks[] = {
+ GPIO_BANK(0, PINS_PER_BANK),
+ GPIO_BANK(1, PINS_PER_BANK),
+ GPIO_BANK(2, PINS_PER_BANK),
+ GPIO_BANK(3, PINS_PER_BANK),
+ GPIO_BANK(4, PINS_PER_BANK),
+ GPIO_BANK(5, PINS_PER_BANK),
+ GPIO_BANK(6, PINS_PER_BANK),
+ GPIO_BANK(7, PINS_PER_BANK),
+ GPIO_BANK(8, PINS_PER_BANK),
+ GPIO_BANK(9, PINS_PER_BANK),
+};
+
+static int pic32_pinctrl_probe(struct platform_device *pdev)
+{
+ struct pic32_pinctrl *pctl;
+ struct resource *res;
+ int ret;
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+ pctl->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, pctl);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pctl->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pctl->reg_base))
+ return PTR_ERR(pctl->reg_base);
+
+ pctl->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pctl->clk)) {
+ ret = PTR_ERR(pctl->clk);
+ dev_err(&pdev->dev, "clk get failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pctl->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ pctl->pins = pic32_pins;
+ pctl->npins = ARRAY_SIZE(pic32_pins);
+ pctl->functions = pic32_functions;
+ pctl->nfunctions = ARRAY_SIZE(pic32_functions);
+ pctl->groups = pic32_groups;
+ pctl->ngroups = ARRAY_SIZE(pic32_groups);
+ pctl->gpio_banks = pic32_gpio_banks;
+ pctl->nbanks = ARRAY_SIZE(pic32_gpio_banks);
+
+ pic32_pinctrl_desc.pins = pctl->pins;
+ pic32_pinctrl_desc.npins = pctl->npins;
+ pic32_pinctrl_desc.custom_params = pic32_mpp_bindings;
+ pic32_pinctrl_desc.num_custom_params = ARRAY_SIZE(pic32_mpp_bindings);
+
+ pctl->pctldev = pinctrl_register(&pic32_pinctrl_desc, &pdev->dev, pctl);
+ if (IS_ERR(pctl->pctldev)) {
+ dev_err(&pdev->dev, "Failed to register pinctrl device\n");
+ return PTR_ERR(pctl->pctldev);
+ }
+
+ return 0;
+}
+
+static int pic32_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pic32_gpio_bank *bank;
+ u32 id;
+ int irq, ret;
+ struct resource *res;
+
+ if (of_property_read_u32(np, "microchip,gpio-bank", &id)) {
+ dev_err(&pdev->dev, "microchip,gpio-bank property not found\n");
+ return -EINVAL;
+ }
+
+ if (id >= ARRAY_SIZE(pic32_gpio_banks)) {
+ dev_err(&pdev->dev, "invalid microchip,gpio-bank property\n");
+ return -EINVAL;
+ }
+
+ bank = &pic32_gpio_banks[id];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bank->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bank->reg_base))
+ return PTR_ERR(bank->reg_base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq get failed\n");
+ return irq;
+ }
+
+ bank->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bank->clk)) {
+ ret = PTR_ERR(bank->clk);
+ dev_err(&pdev->dev, "clk get failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(bank->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ bank->gpio_chip.parent = &pdev->dev;
+ bank->gpio_chip.of_node = np;
+ ret = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add GPIO chip %u: %d\n",
+ id, ret);
+ return ret;
+ }
+
+ ret = gpiochip_irqchip_add(&bank->gpio_chip, &bank->irq_chip,
+ 0, handle_level_irq, IRQ_TYPE_NONE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to add IRQ chip %u: %d\n",
+ id, ret);
+ gpiochip_remove(&bank->gpio_chip);
+ return ret;
+ }
+
+ gpiochip_set_chained_irqchip(&bank->gpio_chip, &bank->irq_chip,
+ irq, pic32_gpio_irq_handler);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_pinctrl_of_match[] = {
+ { .compatible = "microchip,pic32mzda-pinctrl", },
+ { },
+};
+
+static struct platform_driver pic32_pinctrl_driver = {
+ .driver = {
+ .name = "pic32-pinctrl",
+ .of_match_table = pic32_pinctrl_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pic32_pinctrl_probe,
+};
+
+static const struct of_device_id pic32_gpio_of_match[] = {
+ { .compatible = "microchip,pic32mzda-gpio", },
+ { },
+};
+
+static struct platform_driver pic32_gpio_driver = {
+ .driver = {
+ .name = "pic32-gpio",
+ .of_match_table = pic32_gpio_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pic32_gpio_probe,
+};
+
+static int __init pic32_gpio_register(void)
+{
+ return platform_driver_register(&pic32_gpio_driver);
+}
+arch_initcall(pic32_gpio_register);
+
+static int __init pic32_pinctrl_register(void)
+{
+ return platform_driver_register(&pic32_pinctrl_driver);
+}
+arch_initcall(pic32_pinctrl_register);
diff --git a/drivers/pinctrl/pinctrl-pic32.h b/drivers/pinctrl/pinctrl-pic32.h
new file mode 100644
index 000000000000..12826267dc96
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-pic32.h
@@ -0,0 +1,141 @@
+/*
+ * PIC32 pinctrl driver
+ *
+ * Joshua Henderson, <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef PINCTRL_PINCTRL_PIC32_H
+#define PINCTRL_PINCTRL_PIC32_H
+
+/* PORT Registers */
+#define ANSEL_REG 0x00
+#define TRIS_REG 0x10
+#define PORT_REG 0x20
+#define LAT_REG 0x30
+#define ODCU_REG 0x40
+#define CNPU_REG 0x50
+#define CNPD_REG 0x60
+#define CNCON_REG 0x70
+#define CNEN_REG 0x80
+#define CNSTAT_REG 0x90
+#define CNNE_REG 0xA0
+#define CNF_REG 0xB0
+
+/* Input PPS Registers */
+#define INT1R 0x04
+#define INT2R 0x08
+#define INT3R 0x0C
+#define INT4R 0x10
+#define T2CKR 0x18
+#define T3CKR 0x1C
+#define T4CKR 0x20
+#define T5CKR 0x24
+#define T6CKR 0x28
+#define T7CKR 0x2C
+#define T8CKR 0x30
+#define T9CKR 0x34
+#define IC1R 0x38
+#define IC2R 0x3C
+#define IC3R 0x40
+#define IC4R 0x44
+#define IC5R 0x48
+#define IC6R 0x4C
+#define IC7R 0x50
+#define IC8R 0x54
+#define IC9R 0x58
+#define OCFAR 0x60
+#define U1RXR 0x68
+#define U1CTSR 0x6C
+#define U2RXR 0x70
+#define U2CTSR 0x74
+#define U3RXR 0x78
+#define U3CTSR 0x7C
+#define U4RXR 0x80
+#define U4CTSR 0x84
+#define U5RXR 0x88
+#define U5CTSR 0x8C
+#define U6RXR 0x90
+#define U6CTSR 0x94
+#define SDI1R 0x9C
+#define SS1INR 0xA0
+#define SDI2R 0xA8
+#define SS2INR 0xAC
+#define SDI3R 0xB4
+#define SS3INR 0xB8
+#define SDI4R 0xC0
+#define SS4INR 0xC4
+#define SDI5R 0xCC
+#define SS5INR 0xD0
+#define SDI6R 0xD8
+#define SS6INR 0xDC
+#define C1RXR 0xE0
+#define C2RXR 0xE4
+#define REFCLKI1R 0xE8
+#define REFCLKI3R 0xF0
+#define REFCLKI4R 0xF4
+
+/* Output PPS Registers */
+#define RPA14R 0x138
+#define RPA15R 0x13C
+#define RPB0R 0x140
+#define RPB1R 0x144
+#define RPB2R 0x148
+#define RPB3R 0x14C
+#define RPB5R 0x154
+#define RPB6R 0x158
+#define RPB7R 0x15C
+#define RPB8R 0x160
+#define RPB9R 0x164
+#define RPB10R 0x168
+#define RPB14R 0x178
+#define RPB15R 0x17C
+#define RPC1R 0x184
+#define RPC2R 0x188
+#define RPC3R 0x18C
+#define RPC4R 0x190
+#define RPC13R 0x1B4
+#define RPC14R 0x1B8
+#define RPD0R 0x1C0
+#define RPD1R 0x1C4
+#define RPD2R 0x1C8
+#define RPD3R 0x1CC
+#define RPD4R 0x1D0
+#define RPD5R 0x1D4
+#define RPD6R 0x1D8
+#define RPD7R 0x1DC
+#define RPD9R 0x1E4
+#define RPD10R 0x1E8
+#define RPD11R 0x1EC
+#define RPD12R 0x1F0
+#define RPD14R 0x1F8
+#define RPD15R 0x1FC
+#define RPE3R 0x20C
+#define RPE5R 0x214
+#define RPE8R 0x220
+#define RPE9R 0x224
+#define RPF0R 0x240
+#define RPF1R 0x244
+#define RPF2R 0x248
+#define RPF3R 0x24C
+#define RPF4R 0x250
+#define RPF5R 0x254
+#define RPF8R 0x260
+#define RPF12R 0x270
+#define RPF13R 0x274
+#define RPG0R 0x280
+#define RPG1R 0x284
+#define RPG6R 0x298
+#define RPG7R 0x29C
+#define RPG8R 0x2A0
+#define RPG9R 0x2A4
+
+#endif /* PINCTRL_PINCTRL_PIC32_H */
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 856f736cb1a6..2673cd9d106e 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = {
"mfio83",
};
-static const char * const pistachio_sys_pll_lock_groups[] = {
+static const char * const pistachio_audio_pll_lock_groups[] = {
"mfio84",
};
-static const char * const pistachio_wifi_pll_lock_groups[] = {
+static const char * const pistachio_rpu_v_pll_lock_groups[] = {
"mfio85",
};
-static const char * const pistachio_bt_pll_lock_groups[] = {
+static const char * const pistachio_rpu_l_pll_lock_groups[] = {
"mfio86",
};
-static const char * const pistachio_rpu_v_pll_lock_groups[] = {
+static const char * const pistachio_sys_pll_lock_groups[] = {
"mfio87",
};
-static const char * const pistachio_rpu_l_pll_lock_groups[] = {
+static const char * const pistachio_wifi_pll_lock_groups[] = {
"mfio88",
};
-static const char * const pistachio_audio_pll_lock_groups[] = {
+static const char * const pistachio_bt_pll_lock_groups[] = {
"mfio89",
};
@@ -559,12 +559,12 @@ enum pistachio_mux_option {
PISTACHIO_FUNCTION_DREQ4,
PISTACHIO_FUNCTION_DREQ5,
PISTACHIO_FUNCTION_MIPS_PLL_LOCK,
+ PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
+ PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
PISTACHIO_FUNCTION_SYS_PLL_LOCK,
PISTACHIO_FUNCTION_WIFI_PLL_LOCK,
PISTACHIO_FUNCTION_BT_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_V_PLL_LOCK,
- PISTACHIO_FUNCTION_RPU_L_PLL_LOCK,
- PISTACHIO_FUNCTION_AUDIO_PLL_LOCK,
PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND,
PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND,
@@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = {
FUNCTION(dreq4),
FUNCTION(dreq5),
FUNCTION(mips_pll_lock),
+ FUNCTION(audio_pll_lock),
+ FUNCTION(rpu_v_pll_lock),
+ FUNCTION(rpu_l_pll_lock),
FUNCTION(sys_pll_lock),
FUNCTION(wifi_pll_lock),
FUNCTION(bt_pll_lock),
- FUNCTION(rpu_v_pll_lock),
- FUNCTION(rpu_l_pll_lock),
- FUNCTION(audio_pll_lock),
FUNCTION(debug_raw_cca_ind),
FUNCTION(debug_ed_sec20_cca_ind),
FUNCTION(debug_ed_sec40_cca_ind),
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 183545a068ad..bf032b9b4c57 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -64,6 +64,7 @@ enum rockchip_pinctrl_type {
RK3188,
RK3288,
RK3368,
+ RK3399,
};
/**
@@ -86,6 +87,31 @@ struct rockchip_iomux {
};
/**
+ * enum type index corresponding to rockchip_perpin_drv_list arrays index.
+ */
+enum rockchip_pin_drv_type {
+ DRV_TYPE_IO_DEFAULT = 0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_1V8_3V0_AUTO,
+ DRV_TYPE_IO_3V3_ONLY,
+ DRV_TYPE_MAX
+};
+
+/**
+ * @drv_type: drive strength variant using rockchip_perpin_drv_type
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ * an initial offset value the relevant source offset can be reset
+ * to a new value for autocalculating the following drive strength
+ * registers. if used chips own cal_drv func instead to calculate
+ * registers offset, the variant could be ignored.
+ */
+struct rockchip_drv {
+ enum rockchip_pin_drv_type drv_type;
+ int offset;
+};
+
+/**
* @reg_base: register base of the gpio bank
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
@@ -96,6 +122,7 @@ struct rockchip_iomux {
* @name: name of the bank
* @bank_num: number of the bank, to account for holes
* @iomux: array describing the 4 iomux sources of the bank
+ * @drv: array describing the 4 drive strength sources of the bank
* @valid: are all necessary informations present
* @of_node: dt node of this bank
* @drvdata: common pinctrl basedata
@@ -115,6 +142,7 @@ struct rockchip_pin_bank {
char *name;
u8 bank_num;
struct rockchip_iomux iomux[4];
+ struct rockchip_drv drv[4];
bool valid;
struct device_node *of_node;
struct rockchip_pinctrl *drvdata;
@@ -151,6 +179,47 @@ struct rockchip_pin_bank {
}, \
}
+#define PIN_BANK_DRV_FLAGS(id, pins, label, type0, type1, type2, type3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ }, \
+ .drv = { \
+ { .drv_type = type0, .offset = -1 }, \
+ { .drv_type = type1, .offset = -1 }, \
+ { .drv_type = type2, .offset = -1 }, \
+ { .drv_type = type3, .offset = -1 }, \
+ }, \
+ }
+
+#define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1, \
+ iom2, iom3, drv0, drv1, drv2, \
+ drv3, offset0, offset1, \
+ offset2, offset3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = -1 }, \
+ { .type = iom1, .offset = -1 }, \
+ { .type = iom2, .offset = -1 }, \
+ { .type = iom3, .offset = -1 }, \
+ }, \
+ .drv = { \
+ { .drv_type = drv0, .offset = offset0 }, \
+ { .drv_type = drv1, .offset = offset1 }, \
+ { .drv_type = drv2, .offset = offset2 }, \
+ { .drv_type = drv3, .offset = offset3 }, \
+ }, \
+ }
+
/**
*/
struct rockchip_pin_ctrl {
@@ -161,6 +230,9 @@ struct rockchip_pin_ctrl {
enum rockchip_pinctrl_type type;
int grf_mux_offset;
int pmu_mux_offset;
+ int grf_drv_offset;
+ int pmu_drv_offset;
+
void (*pull_calc_reg)(struct rockchip_pin_bank *bank,
int pin_num, struct regmap **regmap,
int *reg, u8 *bit);
@@ -705,7 +777,68 @@ static void rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
}
}
-static int rockchip_perpin_drv_list[] = { 2, 4, 8, 12 };
+#define RK3399_PULL_GRF_OFFSET 0xe040
+#define RK3399_PULL_PMU_OFFSET 0x40
+#define RK3399_DRV_3BITS_PER_PIN 3
+
+static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The bank0:16 and bank1:32 pins are located in PMU */
+ if ((bank->bank_num == 0) || (bank->bank_num == 1)) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3399_PULL_PMU_OFFSET;
+
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3188_PULL_PINS_PER_REG;
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3399_PULL_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 3rd bank */
+ *reg -= 0x20;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ }
+}
+
+static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ int drv_num = (pin_num / 8);
+
+ /* The bank0:16 and bank1:32 pins are located in PMU */
+ if ((bank->bank_num == 0) || (bank->bank_num == 1))
+ *regmap = info->regmap_pmu;
+ else
+ *regmap = info->regmap_base;
+
+ *reg = bank->drv[drv_num].offset;
+ if ((bank->drv[drv_num].drv_type == DRV_TYPE_IO_1V8_3V0_AUTO) ||
+ (bank->drv[drv_num].drv_type == DRV_TYPE_IO_3V3_ONLY))
+ *bit = (pin_num % 8) * 3;
+ else
+ *bit = (pin_num % 8) * 2;
+}
+
+static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = {
+ { 2, 4, 8, 12, -1, -1, -1, -1 },
+ { 3, 6, 9, 12, -1, -1, -1, -1 },
+ { 5, 10, 15, 20, -1, -1, -1, -1 },
+ { 4, 6, 8, 10, 12, 14, 16, 18 },
+ { 4, 7, 10, 13, 16, 19, 22, 26 }
+};
static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
int pin_num)
@@ -714,19 +847,74 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
struct rockchip_pin_ctrl *ctrl = info->ctrl;
struct regmap *regmap;
int reg, ret;
- u32 data;
+ u32 data, temp, rmask_bits;
u8 bit;
+ int drv_type = bank->drv[pin_num / 8].drv_type;
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
+ switch (drv_type) {
+ case DRV_TYPE_IO_1V8_3V0_AUTO:
+ case DRV_TYPE_IO_3V3_ONLY:
+ rmask_bits = RK3399_DRV_3BITS_PER_PIN;
+ switch (bit) {
+ case 0 ... 12:
+ /* regular case, nothing to do */
+ break;
+ case 15:
+ /*
+ * drive-strength offset is special, as it is
+ * spread over 2 registers
+ */
+ ret = regmap_read(regmap, reg, &data);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(regmap, reg + 0x4, &temp);
+ if (ret)
+ return ret;
+
+ /*
+ * the bit data[15] contains bit 0 of the value
+ * while temp[1:0] contains bits 2 and 1
+ */
+ data >>= 15;
+ temp &= 0x3;
+ temp <<= 1;
+ data |= temp;
+
+ return rockchip_perpin_drv_list[drv_type][data];
+ case 18 ... 21:
+ /* setting fully enclosed in the second register */
+ reg += 4;
+ bit -= 16;
+ break;
+ default:
+ dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ bit, drv_type);
+ return -EINVAL;
+ }
+
+ break;
+ case DRV_TYPE_IO_DEFAULT:
+ case DRV_TYPE_IO_1V8_OR_3V0:
+ case DRV_TYPE_IO_1V8_ONLY:
+ rmask_bits = RK3288_DRV_BITS_PER_PIN;
+ break;
+ default:
+ dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
+ drv_type);
+ return -EINVAL;
+ }
+
ret = regmap_read(regmap, reg, &data);
if (ret)
return ret;
data >>= bit;
- data &= (1 << RK3288_DRV_BITS_PER_PIN) - 1;
+ data &= (1 << rmask_bits) - 1;
- return rockchip_perpin_drv_list[data];
+ return rockchip_perpin_drv_list[drv_type][data];
}
static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
@@ -737,16 +925,23 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
struct regmap *regmap;
unsigned long flags;
int reg, ret, i;
- u32 data, rmask;
+ u32 data, rmask, rmask_bits, temp;
u8 bit;
+ int drv_type = bank->drv[pin_num / 8].drv_type;
+
+ dev_dbg(info->dev, "setting drive of GPIO%d-%d to %d\n",
+ bank->bank_num, pin_num, strength);
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list); i++) {
- if (rockchip_perpin_drv_list[i] == strength) {
+ for (i = 0; i < ARRAY_SIZE(rockchip_perpin_drv_list[drv_type]); i++) {
+ if (rockchip_perpin_drv_list[drv_type][i] == strength) {
ret = i;
break;
+ } else if (rockchip_perpin_drv_list[drv_type][i] < 0) {
+ ret = rockchip_perpin_drv_list[drv_type][i];
+ break;
}
}
@@ -758,8 +953,64 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
spin_lock_irqsave(&bank->slock, flags);
+ switch (drv_type) {
+ case DRV_TYPE_IO_1V8_3V0_AUTO:
+ case DRV_TYPE_IO_3V3_ONLY:
+ rmask_bits = RK3399_DRV_3BITS_PER_PIN;
+ switch (bit) {
+ case 0 ... 12:
+ /* regular case, nothing to do */
+ break;
+ case 15:
+ /*
+ * drive-strength offset is special, as it is spread
+ * over 2 registers, the bit data[15] contains bit 0
+ * of the value while temp[1:0] contains bits 2 and 1
+ */
+ data = (ret & 0x1) << 15;
+ temp = (ret >> 0x1) & 0x3;
+
+ rmask = BIT(15) | BIT(31);
+ data |= BIT(31);
+ ret = regmap_update_bits(regmap, reg, rmask, data);
+ if (ret) {
+ spin_unlock_irqrestore(&bank->slock, flags);
+ return ret;
+ }
+
+ rmask = 0x3 | (0x3 << 16);
+ temp |= (0x3 << 16);
+ reg += 0x4;
+ ret = regmap_update_bits(regmap, reg, rmask, temp);
+
+ spin_unlock_irqrestore(&bank->slock, flags);
+ return ret;
+ case 18 ... 21:
+ /* setting fully enclosed in the second register */
+ reg += 4;
+ bit -= 16;
+ break;
+ default:
+ spin_unlock_irqrestore(&bank->slock, flags);
+ dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ bit, drv_type);
+ return -EINVAL;
+ }
+ break;
+ case DRV_TYPE_IO_DEFAULT:
+ case DRV_TYPE_IO_1V8_OR_3V0:
+ case DRV_TYPE_IO_1V8_ONLY:
+ rmask_bits = RK3288_DRV_BITS_PER_PIN;
+ break;
+ default:
+ spin_unlock_irqrestore(&bank->slock, flags);
+ dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
+ drv_type);
+ return -EINVAL;
+ }
+
/* enable the write to the equivalent lower bits */
- data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+ data = ((1 << rmask_bits) - 1) << (bit + 16);
rmask = data | (data >> 16);
data |= (ret << bit);
@@ -796,6 +1047,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3188:
case RK3288:
case RK3368:
+ case RK3399:
data >>= bit;
data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
@@ -852,6 +1104,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3188:
case RK3288:
case RK3368:
+ case RK3399:
spin_lock_irqsave(&bank->slock, flags);
/* enable the write to the equivalent lower bits */
@@ -1032,6 +1285,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3188:
case RK3288:
case RK3368:
+ case RK3399:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -1892,7 +2146,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
struct device_node *np;
struct rockchip_pin_ctrl *ctrl;
struct rockchip_pin_bank *bank;
- int grf_offs, pmu_offs, i, j;
+ int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
match = of_match_node(rockchip_pinctrl_dt_match, node);
ctrl = (struct rockchip_pin_ctrl *)match->data;
@@ -1916,6 +2170,8 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
grf_offs = ctrl->grf_mux_offset;
pmu_offs = ctrl->pmu_mux_offset;
+ drv_pmu_offs = ctrl->pmu_drv_offset;
+ drv_grf_offs = ctrl->grf_drv_offset;
bank = ctrl->pin_banks;
for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
int bank_pins = 0;
@@ -1925,27 +2181,39 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
bank->pin_base = ctrl->nr_pins;
ctrl->nr_pins += bank->nr_pins;
- /* calculate iomux offsets */
+ /* calculate iomux and drv offsets */
for (j = 0; j < 4; j++) {
struct rockchip_iomux *iom = &bank->iomux[j];
+ struct rockchip_drv *drv = &bank->drv[j];
int inc;
if (bank_pins >= bank->nr_pins)
break;
- /* preset offset value, set new start value */
+ /* preset iomux offset value, set new start value */
if (iom->offset >= 0) {
if (iom->type & IOMUX_SOURCE_PMU)
pmu_offs = iom->offset;
else
grf_offs = iom->offset;
- } else { /* set current offset */
+ } else { /* set current iomux offset */
iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
pmu_offs : grf_offs;
}
- dev_dbg(d->dev, "bank %d, iomux %d has offset 0x%x\n",
- i, j, iom->offset);
+ /* preset drv offset value, set new start value */
+ if (drv->offset >= 0) {
+ if (iom->type & IOMUX_SOURCE_PMU)
+ drv_pmu_offs = drv->offset;
+ else
+ drv_grf_offs = drv->offset;
+ } else { /* set current drv offset */
+ drv->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+ drv_pmu_offs : drv_grf_offs;
+ }
+
+ dev_dbg(d->dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
+ i, j, iom->offset, drv->offset);
/*
* Increase offset according to iomux width.
@@ -1957,6 +2225,21 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
else
grf_offs += inc;
+ /*
+ * Increase offset according to drv width.
+ * 3bit drive-strenth'es are spread over two registers.
+ */
+ if ((drv->drv_type == DRV_TYPE_IO_1V8_3V0_AUTO) ||
+ (drv->drv_type == DRV_TYPE_IO_3V3_ONLY))
+ inc = 8;
+ else
+ inc = 4;
+
+ if (iom->type & IOMUX_SOURCE_PMU)
+ drv_pmu_offs += inc;
+ else
+ drv_grf_offs += inc;
+
bank_pins += 8;
}
}
@@ -2257,6 +2540,62 @@ static struct rockchip_pin_ctrl rk3368_pin_ctrl = {
.drv_calc_reg = rk3368_calc_drv_reg_and_bit,
};
+static struct rockchip_pin_bank rk3399_pin_banks[] = {
+ PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(0, 32, "gpio0", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_DEFAULT,
+ DRV_TYPE_IO_DEFAULT,
+ 0x0,
+ 0x8,
+ -1,
+ -1
+ ),
+ PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(1, 32, "gpio1", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ 0x20,
+ 0x28,
+ 0x30,
+ 0x38
+ ),
+ PIN_BANK_DRV_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_1V8_ONLY
+ ),
+ PIN_BANK_DRV_FLAGS(3, 32, "gpio3", DRV_TYPE_IO_3V3_ONLY,
+ DRV_TYPE_IO_3V3_ONLY,
+ DRV_TYPE_IO_3V3_ONLY,
+ DRV_TYPE_IO_1V8_OR_3V0
+ ),
+ PIN_BANK_DRV_FLAGS(4, 32, "gpio4", DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_3V0_AUTO,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0
+ ),
+};
+
+static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
+ .pin_banks = rk3399_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3399_pin_banks),
+ .label = "RK3399-GPIO",
+ .type = RK3399,
+ .grf_mux_offset = 0xe000,
+ .pmu_mux_offset = 0x0,
+ .grf_drv_offset = 0xe100,
+ .pmu_drv_offset = 0x80,
+ .pull_calc_reg = rk3399_calc_pull_reg_and_bit,
+ .drv_calc_reg = rk3399_calc_drv_reg_and_bit,
+};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,rk2928-pinctrl",
@@ -2275,6 +2614,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = (void *)&rk3288_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
.data = (void *)&rk3368_pin_ctrl },
+ { .compatible = "rockchip,rk3399-pinctrl",
+ .data = (void *)&rk3399_pin_ctrl },
{},
};
MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index d24e5f1d1525..fb126d56ad40 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -255,6 +255,13 @@ static enum pin_config_param pcs_bias[] = {
};
/*
+ * This lock class tells lockdep that irqchip core that this single
+ * pinctrl can be in a different category than its parents, so it won't
+ * report false recursion.
+ */
+static struct lock_class_key pcs_lock_class;
+
+/*
* REVISIT: Reads and writes could eventually use regmap or something
* generic. But at least on omaps, some mux registers are performance
* critical as they may need to be remuxed every time before and after
@@ -1713,6 +1720,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, pcs_soc);
irq_set_chip_and_handler(irq, &pcs->chip,
handle_level_irq);
+ irq_set_lockdep_class(irq, &pcs_lock_class);
irq_set_noprobe(irq);
return 0;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index fac844a85cb4..cab66c64149f 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -985,6 +985,7 @@ static struct pinmux_ops st_pmxops = {
.get_function_groups = st_pmx_get_groups,
.set_mux = st_pmx_set_mux,
.gpio_set_direction = st_pmx_set_gpio_direction,
+ .strict = true,
};
/* Pinconf */
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 412c6b78140a..a13f2b6f6fc0 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val)
return 0;
}
+/*
+ * gpiolib gpiod_to_irq callback function.
+ * Returns the mapped IRQ (external interrupt) number for a given GPIO pin.
+ */
+static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent);
+ int i;
+
+ for (i = 0; i < info->num_exin; i++)
+ if (info->exin[i] == offset)
+ return ltq_eiu_get_irq(i);
+
+ return -1;
+}
+
static struct gpio_chip xway_chip = {
.label = "gpio-xway",
.direction_input = xway_gpio_dir_in,
@@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = {
.set = xway_gpio_set,
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
+ .to_irq = xway_gpio_to_irq,
.base = -1,
};
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index d57b5eca7b98..76f1abd71e31 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -590,7 +590,7 @@ static const char * const usb1_groups[] = {"usb1_0_grp"};
static const char * const mdio0_groups[] = {"mdio0_0_grp"};
static const char * const mdio1_groups[] = {"mdio1_0_grp"};
static const char * const qspi0_groups[] = {"qspi0_0_grp"};
-static const char * const qspi1_groups[] = {"qspi0_1_grp"};
+static const char * const qspi1_groups[] = {"qspi1_0_grp"};
static const char * const qspi_fbclk_groups[] = {"qspi_fbclk_grp"};
static const char * const qspi_cs1_groups[] = {"qspi_cs1_grp"};
static const char * const spi0_groups[] = {"spi0_0_grp", "spi0_1_grp",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 29984b36926a..c223a9ef1fe1 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -334,7 +334,6 @@ int pinmux_map_to_setting(struct pinctrl_map const *map,
unsigned num_groups;
int ret;
const char *group;
- int i;
if (!pmxops) {
dev_err(pctldev->dev, "does not support mux function\n");
@@ -363,19 +362,13 @@ int pinmux_map_to_setting(struct pinctrl_map const *map,
return -EINVAL;
}
if (map->data.mux.group) {
- bool found = false;
group = map->data.mux.group;
- for (i = 0; i < num_groups; i++) {
- if (!strcmp(group, groups[i])) {
- found = true;
- break;
- }
- }
- if (!found) {
+ ret = match_string(groups, num_groups, group);
+ if (ret < 0) {
dev_err(pctldev->dev,
"invalid group \"%s\" for function \"%s\"\n",
group, map->data.mux.function);
- return -EINVAL;
+ return ret;
}
} else {
group = groups[0];
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index 216f227c6009..f553313bc2ef 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,7 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
return 0;
}
-EXPORT_SYMBOL(pxa2xx_pinctrl_init);
+EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_init);
int pxa2xx_pinctrl_exit(struct platform_device *pdev)
{
@@ -435,3 +435,4 @@ int pxa2xx_pinctrl_exit(struct platform_device *pdev)
pinctrl_unregister(pctl->pctl_dev);
return 0;
}
+EXPORT_SYMBOL_GPL(pxa2xx_pinctrl_exit);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index eeac8cba8a21..67bc70dcda64 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -23,6 +23,14 @@ config PINCTRL_APQ8084
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm APQ8084 platform.
+config PINCTRL_IPQ4019
+ tristate "Qualcomm IPQ4019 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm IPQ4019 platform.
+
config PINCTRL_IPQ8064
tristate "Qualcomm IPQ8064 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index dfb50a9fe04a..c964a2c4b90a 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
+obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
new file mode 100644
index 000000000000..b68ae424cee2
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc ipq4019_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+};
+
+#define DECLARE_QCA_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_QCA_GPIO_PINS(0);
+DECLARE_QCA_GPIO_PINS(1);
+DECLARE_QCA_GPIO_PINS(2);
+DECLARE_QCA_GPIO_PINS(3);
+DECLARE_QCA_GPIO_PINS(4);
+DECLARE_QCA_GPIO_PINS(5);
+DECLARE_QCA_GPIO_PINS(6);
+DECLARE_QCA_GPIO_PINS(7);
+DECLARE_QCA_GPIO_PINS(8);
+DECLARE_QCA_GPIO_PINS(9);
+DECLARE_QCA_GPIO_PINS(10);
+DECLARE_QCA_GPIO_PINS(11);
+DECLARE_QCA_GPIO_PINS(12);
+DECLARE_QCA_GPIO_PINS(13);
+DECLARE_QCA_GPIO_PINS(14);
+DECLARE_QCA_GPIO_PINS(15);
+DECLARE_QCA_GPIO_PINS(16);
+DECLARE_QCA_GPIO_PINS(17);
+DECLARE_QCA_GPIO_PINS(18);
+DECLARE_QCA_GPIO_PINS(19);
+DECLARE_QCA_GPIO_PINS(20);
+DECLARE_QCA_GPIO_PINS(21);
+DECLARE_QCA_GPIO_PINS(22);
+DECLARE_QCA_GPIO_PINS(23);
+DECLARE_QCA_GPIO_PINS(24);
+DECLARE_QCA_GPIO_PINS(25);
+DECLARE_QCA_GPIO_PINS(26);
+DECLARE_QCA_GPIO_PINS(27);
+DECLARE_QCA_GPIO_PINS(28);
+DECLARE_QCA_GPIO_PINS(29);
+DECLARE_QCA_GPIO_PINS(30);
+DECLARE_QCA_GPIO_PINS(31);
+DECLARE_QCA_GPIO_PINS(32);
+DECLARE_QCA_GPIO_PINS(33);
+DECLARE_QCA_GPIO_PINS(34);
+DECLARE_QCA_GPIO_PINS(35);
+DECLARE_QCA_GPIO_PINS(36);
+DECLARE_QCA_GPIO_PINS(37);
+DECLARE_QCA_GPIO_PINS(38);
+DECLARE_QCA_GPIO_PINS(39);
+DECLARE_QCA_GPIO_PINS(40);
+DECLARE_QCA_GPIO_PINS(41);
+DECLARE_QCA_GPIO_PINS(42);
+DECLARE_QCA_GPIO_PINS(43);
+DECLARE_QCA_GPIO_PINS(44);
+DECLARE_QCA_GPIO_PINS(45);
+DECLARE_QCA_GPIO_PINS(46);
+DECLARE_QCA_GPIO_PINS(47);
+DECLARE_QCA_GPIO_PINS(48);
+DECLARE_QCA_GPIO_PINS(49);
+DECLARE_QCA_GPIO_PINS(50);
+DECLARE_QCA_GPIO_PINS(51);
+DECLARE_QCA_GPIO_PINS(52);
+DECLARE_QCA_GPIO_PINS(53);
+DECLARE_QCA_GPIO_PINS(54);
+DECLARE_QCA_GPIO_PINS(55);
+DECLARE_QCA_GPIO_PINS(56);
+DECLARE_QCA_GPIO_PINS(57);
+DECLARE_QCA_GPIO_PINS(58);
+DECLARE_QCA_GPIO_PINS(59);
+DECLARE_QCA_GPIO_PINS(60);
+DECLARE_QCA_GPIO_PINS(61);
+DECLARE_QCA_GPIO_PINS(62);
+DECLARE_QCA_GPIO_PINS(63);
+DECLARE_QCA_GPIO_PINS(64);
+DECLARE_QCA_GPIO_PINS(65);
+DECLARE_QCA_GPIO_PINS(66);
+DECLARE_QCA_GPIO_PINS(67);
+DECLARE_QCA_GPIO_PINS(68);
+DECLARE_QCA_GPIO_PINS(69);
+DECLARE_QCA_GPIO_PINS(70);
+DECLARE_QCA_GPIO_PINS(71);
+DECLARE_QCA_GPIO_PINS(72);
+DECLARE_QCA_GPIO_PINS(73);
+DECLARE_QCA_GPIO_PINS(74);
+DECLARE_QCA_GPIO_PINS(75);
+DECLARE_QCA_GPIO_PINS(76);
+DECLARE_QCA_GPIO_PINS(77);
+DECLARE_QCA_GPIO_PINS(78);
+DECLARE_QCA_GPIO_PINS(79);
+DECLARE_QCA_GPIO_PINS(80);
+DECLARE_QCA_GPIO_PINS(81);
+DECLARE_QCA_GPIO_PINS(82);
+DECLARE_QCA_GPIO_PINS(83);
+DECLARE_QCA_GPIO_PINS(84);
+DECLARE_QCA_GPIO_PINS(85);
+DECLARE_QCA_GPIO_PINS(86);
+DECLARE_QCA_GPIO_PINS(87);
+DECLARE_QCA_GPIO_PINS(88);
+DECLARE_QCA_GPIO_PINS(89);
+DECLARE_QCA_GPIO_PINS(90);
+DECLARE_QCA_GPIO_PINS(91);
+DECLARE_QCA_GPIO_PINS(92);
+DECLARE_QCA_GPIO_PINS(93);
+DECLARE_QCA_GPIO_PINS(94);
+DECLARE_QCA_GPIO_PINS(95);
+DECLARE_QCA_GPIO_PINS(96);
+DECLARE_QCA_GPIO_PINS(97);
+DECLARE_QCA_GPIO_PINS(98);
+DECLARE_QCA_GPIO_PINS(99);
+
+#define FUNCTION(fname) \
+ [qca_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ qca_mux_gpio, /* gpio mode */ \
+ qca_mux_##f1, \
+ qca_mux_##f2, \
+ qca_mux_##f3, \
+ qca_mux_##f4, \
+ qca_mux_##f5, \
+ qca_mux_##f6, \
+ qca_mux_##f7, \
+ qca_mux_##f8, \
+ qca_mux_##f9, \
+ qca_mux_##f10, \
+ qca_mux_##f11, \
+ qca_mux_##f12, \
+ qca_mux_##f13, \
+ qca_mux_##f14 \
+ }, \
+ .nfuncs = 15, \
+ .ctl_reg = 0x0 + 0x1000 * id, \
+ .io_reg = 0x4 + 0x1000 * id, \
+ .intr_cfg_reg = 0x8 + 0x1000 * id, \
+ .intr_status_reg = 0xc + 0x1000 * id, \
+ .intr_target_reg = 0x8 + 0x1000 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+
+enum ipq4019_functions {
+ qca_mux_gpio,
+ qca_mux_blsp_uart1,
+ qca_mux_blsp_i2c0,
+ qca_mux_blsp_i2c1,
+ qca_mux_blsp_uart0,
+ qca_mux_blsp_spi1,
+ qca_mux_blsp_spi0,
+ qca_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99",
+};
+
+static const char * const blsp_uart1_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_i2c0_groups[] = {
+ "gpio10", "gpio11", "gpio20", "gpio21", "gpio58", "gpio59",
+};
+static const char * const blsp_spi0_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15", "gpio45",
+ "gpio54", "gpio55", "gpio56", "gpio57",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio12", "gpio13", "gpio34", "gpio35",
+};
+static const char * const blsp_uart0_groups[] = {
+ "gpio16", "gpio17", "gpio60", "gpio61",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const struct msm_function ipq4019_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_i2c0),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_uart0),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi0),
+};
+
+static const struct msm_pingroup ipq4019_groups[] = {
+ PINGROUP(0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(5, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(6, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, blsp_uart1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(10, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, blsp_uart1, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, blsp_spi0, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(20, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(23, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(24, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(25, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(26, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(27, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(30, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, blsp_i2c1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(39, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(40, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, NA, blsp_spi1, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(47, NA, blsp_spi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, NA, blsp_spi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, NA, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, NA, blsp_i2c0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, NA, blsp_uart0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+};
+
+static const struct msm_pinctrl_soc_data ipq4019_pinctrl = {
+ .pins = ipq4019_pins,
+ .npins = ARRAY_SIZE(ipq4019_pins),
+ .functions = ipq4019_functions,
+ .nfunctions = ARRAY_SIZE(ipq4019_functions),
+ .groups = ipq4019_groups,
+ .ngroups = ARRAY_SIZE(ipq4019_groups),
+ .ngpios = 100,
+};
+
+static int ipq4019_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &ipq4019_pinctrl);
+}
+
+static const struct of_device_id ipq4019_pinctrl_of_match[] = {
+ { .compatible = "qcom,ipq4019-pinctrl", },
+ { },
+};
+
+static struct platform_driver ipq4019_pinctrl_driver = {
+ .driver = {
+ .name = "ipq4019-pinctrl",
+ .of_match_table = ipq4019_pinctrl_of_match,
+ },
+ .probe = ipq4019_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init ipq4019_pinctrl_init(void)
+{
+ return platform_driver_register(&ipq4019_pinctrl_driver);
+}
+arch_initcall(ipq4019_pinctrl_init);
+
+static void __exit ipq4019_pinctrl_exit(void)
+{
+ platform_driver_unregister(&ipq4019_pinctrl_driver);
+}
+module_exit(ipq4019_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm ipq4019 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ipq4019_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 2f18323571a6..2a3e5490a483 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -117,6 +117,7 @@
* @output_enabled: Set to true if MPP output logic is enabled.
* @input_enabled: Set to true if MPP input buffer logic is enabled.
* @paired: Pin operates in paired mode
+ * @has_pullup: Pin has support to configure pullup
* @num_sources: Number of power-sources supported by this MPP.
* @power_source: Current power-source used.
* @amux_input: Set the source for analog input.
@@ -134,6 +135,7 @@ struct pmic_mpp_pad {
bool output_enabled;
bool input_enabled;
bool paired;
+ bool has_pullup;
unsigned int num_sources;
unsigned int power_source;
unsigned int amux_input;
@@ -477,11 +479,14 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
- val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
+ if (pad->has_pullup) {
+ val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
- ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL, val);
- if (ret < 0)
- return ret;
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_PULL_CTL,
+ val);
+ if (ret < 0)
+ return ret;
+ }
val = pad->amux_input & PMIC_MPP_REG_AIN_ROUTE_MASK;
@@ -534,7 +539,8 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
seq_printf(s, " vin-%d", pad->power_source);
seq_printf(s, " %d", pad->aout_level);
- seq_printf(s, " %-8s", biases[pad->pullup]);
+ if (pad->has_pullup)
+ seq_printf(s, " %-8s", biases[pad->pullup]);
seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
if (pad->dtest)
seq_printf(s, " dtest%d", pad->dtest);
@@ -748,12 +754,16 @@ static int pmic_mpp_populate(struct pmic_mpp_state *state,
pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
pad->power_source &= PMIC_MPP_REG_VIN_MASK;
- val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
- if (val < 0)
- return val;
+ if (subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT &&
+ subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK) {
+ val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
+ if (val < 0)
+ return val;
- pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
- pad->pullup &= PMIC_MPP_REG_PULL_MASK;
+ pad->pullup = val >> PMIC_MPP_REG_PULL_SHIFT;
+ pad->pullup &= PMIC_MPP_REG_PULL_MASK;
+ pad->has_pullup = true;
+ }
val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AIN_CTL);
if (val < 0)
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 35d6e95fa21f..415dd8023063 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -2,10 +2,9 @@
# Renesas SH and SH Mobile PINCTRL drivers
#
-if ARCH_SHMOBILE || SUPERH
+if ARCH_RENESAS || SUPERH
config PINCTRL_SH_PFC
- select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
select PINMUX
select PINCONF
select GENERIC_PINCONF
@@ -13,12 +12,12 @@ config PINCTRL_SH_PFC
help
This enables pin control drivers for SH and SH Mobile platforms
-config GPIO_SH_PFC
- bool "SuperH PFC GPIO support"
- depends on PINCTRL_SH_PFC && GPIOLIB
+config PINCTRL_SH_PFC_GPIO
+ select GPIOLIB
+ select PINCTRL_SH_PFC
+ bool
help
- This enables support for GPIOs within the SoC's pin function
- controller.
+ This enables pin control and GPIO drivers for SH/SH Mobile platforms
config PINCTRL_PFC_EMEV2
def_bool y
@@ -28,12 +27,12 @@ config PINCTRL_PFC_EMEV2
config PINCTRL_PFC_R8A73A4
def_bool y
depends on ARCH_R8A73A4
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_R8A7740
def_bool y
depends on ARCH_R8A7740
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_R8A7778
def_bool y
@@ -73,79 +72,66 @@ config PINCTRL_PFC_R8A7795
config PINCTRL_PFC_SH7203
def_bool y
depends on CPU_SUBTYPE_SH7203
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7264
def_bool y
depends on CPU_SUBTYPE_SH7264
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7269
def_bool y
depends on CPU_SUBTYPE_SH7269
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH73A0
def_bool y
depends on ARCH_SH73A0
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
select REGULATOR
config PINCTRL_PFC_SH7720
def_bool y
depends on CPU_SUBTYPE_SH7720
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7722
def_bool y
depends on CPU_SUBTYPE_SH7722
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7723
def_bool y
depends on CPU_SUBTYPE_SH7723
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7724
def_bool y
depends on CPU_SUBTYPE_SH7724
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7734
def_bool y
depends on CPU_SUBTYPE_SH7734
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7757
def_bool y
depends on CPU_SUBTYPE_SH7757
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7785
def_bool y
depends on CPU_SUBTYPE_SH7785
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SH7786
def_bool y
depends on CPU_SUBTYPE_SH7786
- depends on GPIOLIB
- select PINCTRL_SH_PFC
+ select PINCTRL_SH_PFC_GPIO
config PINCTRL_PFC_SHX3
def_bool y
depends on CPU_SUBTYPE_SHX3
- depends on GPIOLIB
- select PINCTRL_SH_PFC
-
+ select PINCTRL_SH_PFC_GPIO
endif
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 173305fa3811..8a2c8710fc93 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -1,8 +1,5 @@
-sh-pfc-objs = core.o pinctrl.o
-ifeq ($(CONFIG_GPIO_SH_PFC),y)
-sh-pfc-objs += gpio.o
-endif
-obj-$(CONFIG_PINCTRL_SH_PFC) += sh-pfc.o
+obj-$(CONFIG_PINCTRL_SH_PFC) += core.o pinctrl.o
+obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 181ea98a63b7..ee0c1f2567d9 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -1,5 +1,7 @@
/*
- * SuperH Pin Function Controller support.
+ * Pin Control and GPIO driver for SuperH Pin Function Controller.
+ *
+ * Authors: Magnus Damm, Paul Mundt, Laurent Pinchart
*
* Copyright (C) 2008 Magnus Damm
* Copyright (C) 2009 - 2012 Paul Mundt
@@ -17,7 +19,7 @@
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/machine.h>
@@ -503,7 +505,6 @@ static const struct of_device_id sh_pfc_of_table[] = {
#endif
{ },
};
-MODULE_DEVICE_TABLE(of, sh_pfc_of_table);
#endif
static int sh_pfc_probe(struct platform_device *pdev)
@@ -518,7 +519,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
if (np)
- info = of_match_device(sh_pfc_of_table, &pdev->dev)->data;
+ info = of_device_get_match_data(&pdev->dev);
else
#endif
info = platid ? (const void *)platid->driver_data : NULL;
@@ -545,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
return ret;
}
- pinctrl_provide_dummies();
+ /* Enable dummy states for those platforms without pinctrl support */
+ if (!of_have_populated_dt())
+ pinctrl_provide_dummies();
ret = sh_pfc_init_ranges(pfc);
if (ret < 0)
@@ -558,7 +561,7 @@ static int sh_pfc_probe(struct platform_device *pdev)
if (unlikely(ret != 0))
return ret;
-#ifdef CONFIG_GPIO_SH_PFC
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
/*
* Then the GPIO chip
*/
@@ -584,7 +587,7 @@ static int sh_pfc_remove(struct platform_device *pdev)
{
struct sh_pfc *pfc = platform_get_drvdata(pdev);
-#ifdef CONFIG_GPIO_SH_PFC
+#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
sh_pfc_unregister_gpiochip(pfc);
#endif
sh_pfc_unregister_pinctrl(pfc);
@@ -632,7 +635,6 @@ static const struct platform_device_id sh_pfc_id_table[] = {
{ "sh-pfc", 0 },
{ },
};
-MODULE_DEVICE_TABLE(platform, sh_pfc_id_table);
static struct platform_driver sh_pfc_driver = {
.probe = sh_pfc_probe,
@@ -649,13 +651,3 @@ static int __init sh_pfc_init(void)
return platform_driver_register(&sh_pfc_driver);
}
postcore_initcall(sh_pfc_init);
-
-static void __exit sh_pfc_exit(void)
-{
- platform_driver_unregister(&sh_pfc_driver);
-}
-module_exit(sh_pfc_exit);
-
-MODULE_AUTHOR("Magnus Damm, Paul Mundt, Laurent Pinchart");
-MODULE_DESCRIPTION("Pin Control and GPIO driver for SuperH pin function controller");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index ad09a670c2ff..411d0887ba19 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -561,82 +561,82 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(AVS2),
/* IPSR0 */
- PINMUX_IPSR_DATA(IP0_1_0, PRESETOUT),
- PINMUX_IPSR_DATA(IP0_1_0, PWM1),
+ PINMUX_IPSR_GPSR(IP0_1_0, PRESETOUT),
+ PINMUX_IPSR_GPSR(IP0_1_0, PWM1),
- PINMUX_IPSR_DATA(IP0_4_2, AUDATA0),
- PINMUX_IPSR_DATA(IP0_4_2, ARM_TRACEDATA_0),
+ PINMUX_IPSR_GPSR(IP0_4_2, AUDATA0),
+ PINMUX_IPSR_GPSR(IP0_4_2, ARM_TRACEDATA_0),
PINMUX_IPSR_MSEL(IP0_4_2, GPSCLK_C, SEL_GPS_C),
- PINMUX_IPSR_DATA(IP0_4_2, USB_OVC0),
- PINMUX_IPSR_DATA(IP0_4_2, TX2_E),
+ PINMUX_IPSR_GPSR(IP0_4_2, USB_OVC0),
+ PINMUX_IPSR_GPSR(IP0_4_2, TX2_E),
PINMUX_IPSR_MSEL(IP0_4_2, SDA2_B, SEL_I2C2_B),
- PINMUX_IPSR_DATA(IP0_7_5, AUDATA1),
- PINMUX_IPSR_DATA(IP0_7_5, ARM_TRACEDATA_1),
+ PINMUX_IPSR_GPSR(IP0_7_5, AUDATA1),
+ PINMUX_IPSR_GPSR(IP0_7_5, ARM_TRACEDATA_1),
PINMUX_IPSR_MSEL(IP0_7_5, GPSIN_C, SEL_GPS_C),
- PINMUX_IPSR_DATA(IP0_7_5, USB_OVC1),
+ PINMUX_IPSR_GPSR(IP0_7_5, USB_OVC1),
PINMUX_IPSR_MSEL(IP0_7_5, RX2_E, SEL_SCIF2_E),
PINMUX_IPSR_MSEL(IP0_7_5, SCL2_B, SEL_I2C2_B),
PINMUX_IPSR_MSEL(IP0_11_8, SD1_DAT2_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP0_11_8, MMC_D2),
- PINMUX_IPSR_DATA(IP0_11_8, BS),
- PINMUX_IPSR_DATA(IP0_11_8, ATADIR0_A),
- PINMUX_IPSR_DATA(IP0_11_8, SDSELF_A),
- PINMUX_IPSR_DATA(IP0_11_8, PWM4_B),
+ PINMUX_IPSR_GPSR(IP0_11_8, MMC_D2),
+ PINMUX_IPSR_GPSR(IP0_11_8, BS),
+ PINMUX_IPSR_GPSR(IP0_11_8, ATADIR0_A),
+ PINMUX_IPSR_GPSR(IP0_11_8, SDSELF_A),
+ PINMUX_IPSR_GPSR(IP0_11_8, PWM4_B),
PINMUX_IPSR_MSEL(IP0_14_12, SD1_DAT3_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP0_14_12, MMC_D3),
- PINMUX_IPSR_DATA(IP0_14_12, A0),
- PINMUX_IPSR_DATA(IP0_14_12, ATAG0_A),
+ PINMUX_IPSR_GPSR(IP0_14_12, MMC_D3),
+ PINMUX_IPSR_GPSR(IP0_14_12, A0),
+ PINMUX_IPSR_GPSR(IP0_14_12, ATAG0_A),
PINMUX_IPSR_MSEL(IP0_14_12, REMOCON_B, SEL_REMOCON_B),
- PINMUX_IPSR_DATA(IP0_15, A4),
- PINMUX_IPSR_DATA(IP0_16, A5),
- PINMUX_IPSR_DATA(IP0_17, A6),
- PINMUX_IPSR_DATA(IP0_18, A7),
- PINMUX_IPSR_DATA(IP0_19, A8),
- PINMUX_IPSR_DATA(IP0_20, A9),
- PINMUX_IPSR_DATA(IP0_21, A10),
- PINMUX_IPSR_DATA(IP0_22, A11),
- PINMUX_IPSR_DATA(IP0_23, A12),
- PINMUX_IPSR_DATA(IP0_24, A13),
- PINMUX_IPSR_DATA(IP0_25, A14),
- PINMUX_IPSR_DATA(IP0_26, A15),
- PINMUX_IPSR_DATA(IP0_27, A16),
- PINMUX_IPSR_DATA(IP0_28, A17),
- PINMUX_IPSR_DATA(IP0_29, A18),
- PINMUX_IPSR_DATA(IP0_30, A19),
+ PINMUX_IPSR_GPSR(IP0_15, A4),
+ PINMUX_IPSR_GPSR(IP0_16, A5),
+ PINMUX_IPSR_GPSR(IP0_17, A6),
+ PINMUX_IPSR_GPSR(IP0_18, A7),
+ PINMUX_IPSR_GPSR(IP0_19, A8),
+ PINMUX_IPSR_GPSR(IP0_20, A9),
+ PINMUX_IPSR_GPSR(IP0_21, A10),
+ PINMUX_IPSR_GPSR(IP0_22, A11),
+ PINMUX_IPSR_GPSR(IP0_23, A12),
+ PINMUX_IPSR_GPSR(IP0_24, A13),
+ PINMUX_IPSR_GPSR(IP0_25, A14),
+ PINMUX_IPSR_GPSR(IP0_26, A15),
+ PINMUX_IPSR_GPSR(IP0_27, A16),
+ PINMUX_IPSR_GPSR(IP0_28, A17),
+ PINMUX_IPSR_GPSR(IP0_29, A18),
+ PINMUX_IPSR_GPSR(IP0_30, A19),
/* IPSR1 */
- PINMUX_IPSR_DATA(IP1_0, A20),
+ PINMUX_IPSR_GPSR(IP1_0, A20),
PINMUX_IPSR_MSEL(IP1_0, HSPI_CS1_B, SEL_HSPI1_B),
- PINMUX_IPSR_DATA(IP1_1, A21),
+ PINMUX_IPSR_GPSR(IP1_1, A21),
PINMUX_IPSR_MSEL(IP1_1, HSPI_CLK1_B, SEL_HSPI1_B),
- PINMUX_IPSR_DATA(IP1_4_2, A22),
+ PINMUX_IPSR_GPSR(IP1_4_2, A22),
PINMUX_IPSR_MSEL(IP1_4_2, HRTS0_B, SEL_HSCIF0_B),
PINMUX_IPSR_MSEL(IP1_4_2, RX2_B, SEL_SCIF2_B),
PINMUX_IPSR_MSEL(IP1_4_2, DREQ2_A, SEL_DREQ2_A),
- PINMUX_IPSR_DATA(IP1_7_5, A23),
- PINMUX_IPSR_DATA(IP1_7_5, HTX0_B),
- PINMUX_IPSR_DATA(IP1_7_5, TX2_B),
- PINMUX_IPSR_DATA(IP1_7_5, DACK2_A),
+ PINMUX_IPSR_GPSR(IP1_7_5, A23),
+ PINMUX_IPSR_GPSR(IP1_7_5, HTX0_B),
+ PINMUX_IPSR_GPSR(IP1_7_5, TX2_B),
+ PINMUX_IPSR_GPSR(IP1_7_5, DACK2_A),
PINMUX_IPSR_MSEL(IP1_7_5, TS_SDEN0_A, SEL_TSIF0_A),
PINMUX_IPSR_MSEL(IP1_10_8, SD1_CD_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP1_10_8, MMC_D6),
- PINMUX_IPSR_DATA(IP1_10_8, A24),
+ PINMUX_IPSR_GPSR(IP1_10_8, MMC_D6),
+ PINMUX_IPSR_GPSR(IP1_10_8, A24),
PINMUX_IPSR_MSEL(IP1_10_8, DREQ1_A, SEL_DREQ1_A),
PINMUX_IPSR_MSEL(IP1_10_8, HRX0_B, SEL_HSCIF0_B),
PINMUX_IPSR_MSEL(IP1_10_8, TS_SPSYNC0_A, SEL_TSIF0_A),
PINMUX_IPSR_MSEL(IP1_14_11, SD1_WP_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP1_14_11, MMC_D7),
- PINMUX_IPSR_DATA(IP1_14_11, A25),
- PINMUX_IPSR_DATA(IP1_14_11, DACK1_A),
+ PINMUX_IPSR_GPSR(IP1_14_11, MMC_D7),
+ PINMUX_IPSR_GPSR(IP1_14_11, A25),
+ PINMUX_IPSR_GPSR(IP1_14_11, DACK1_A),
PINMUX_IPSR_MSEL(IP1_14_11, HCTS0_B, SEL_HSCIF0_B),
PINMUX_IPSR_MSEL(IP1_14_11, RX3_C, SEL_SCIF3_C),
PINMUX_IPSR_MSEL(IP1_14_11, TS_SDAT0_A, SEL_TSIF0_A),
@@ -654,54 +654,54 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_NOGM(IP1_20_18, SDA2_A, SEL_I2C2_A),
PINMUX_IPSR_NOGM(IP1_20_18, SCK2_B, SEL_SCIF2_B),
- PINMUX_IPSR_DATA(IP1_23_21, MMC_D5),
- PINMUX_IPSR_DATA(IP1_23_21, ATADIR0_B),
- PINMUX_IPSR_DATA(IP1_23_21, RD_WR),
+ PINMUX_IPSR_GPSR(IP1_23_21, MMC_D5),
+ PINMUX_IPSR_GPSR(IP1_23_21, ATADIR0_B),
+ PINMUX_IPSR_GPSR(IP1_23_21, RD_WR),
- PINMUX_IPSR_DATA(IP1_24, WE1),
- PINMUX_IPSR_DATA(IP1_24, ATAWR0_B),
+ PINMUX_IPSR_GPSR(IP1_24, WE1),
+ PINMUX_IPSR_GPSR(IP1_24, ATAWR0_B),
PINMUX_IPSR_MSEL(IP1_27_25, SSI_WS1_B, SEL_SSI1_B),
- PINMUX_IPSR_DATA(IP1_27_25, EX_CS0),
+ PINMUX_IPSR_GPSR(IP1_27_25, EX_CS0),
PINMUX_IPSR_MSEL(IP1_27_25, SCL2_A, SEL_I2C2_A),
- PINMUX_IPSR_DATA(IP1_27_25, TX3_C),
+ PINMUX_IPSR_GPSR(IP1_27_25, TX3_C),
PINMUX_IPSR_MSEL(IP1_27_25, TS_SCK0_A, SEL_TSIF0_A),
- PINMUX_IPSR_DATA(IP1_29_28, EX_CS1),
- PINMUX_IPSR_DATA(IP1_29_28, MMC_D4),
+ PINMUX_IPSR_GPSR(IP1_29_28, EX_CS1),
+ PINMUX_IPSR_GPSR(IP1_29_28, MMC_D4),
/* IPSR2 */
- PINMUX_IPSR_DATA(IP2_2_0, SD1_CLK_A),
- PINMUX_IPSR_DATA(IP2_2_0, MMC_CLK),
- PINMUX_IPSR_DATA(IP2_2_0, ATACS00),
- PINMUX_IPSR_DATA(IP2_2_0, EX_CS2),
+ PINMUX_IPSR_GPSR(IP2_2_0, SD1_CLK_A),
+ PINMUX_IPSR_GPSR(IP2_2_0, MMC_CLK),
+ PINMUX_IPSR_GPSR(IP2_2_0, ATACS00),
+ PINMUX_IPSR_GPSR(IP2_2_0, EX_CS2),
PINMUX_IPSR_MSEL(IP2_5_3, SD1_CMD_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP2_5_3, MMC_CMD),
- PINMUX_IPSR_DATA(IP2_5_3, ATACS10),
- PINMUX_IPSR_DATA(IP2_5_3, EX_CS3),
+ PINMUX_IPSR_GPSR(IP2_5_3, MMC_CMD),
+ PINMUX_IPSR_GPSR(IP2_5_3, ATACS10),
+ PINMUX_IPSR_GPSR(IP2_5_3, EX_CS3),
PINMUX_IPSR_MSEL(IP2_8_6, SD1_DAT0_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP2_8_6, MMC_D0),
- PINMUX_IPSR_DATA(IP2_8_6, ATARD0),
- PINMUX_IPSR_DATA(IP2_8_6, EX_CS4),
+ PINMUX_IPSR_GPSR(IP2_8_6, MMC_D0),
+ PINMUX_IPSR_GPSR(IP2_8_6, ATARD0),
+ PINMUX_IPSR_GPSR(IP2_8_6, EX_CS4),
PINMUX_IPSR_MSEL(IP2_8_6, EX_WAIT1_A, SEL_WAIT1_A),
PINMUX_IPSR_MSEL(IP2_11_9, SD1_DAT1_A, SEL_SD1_A),
- PINMUX_IPSR_DATA(IP2_11_9, MMC_D1),
- PINMUX_IPSR_DATA(IP2_11_9, ATAWR0_A),
- PINMUX_IPSR_DATA(IP2_11_9, EX_CS5),
+ PINMUX_IPSR_GPSR(IP2_11_9, MMC_D1),
+ PINMUX_IPSR_GPSR(IP2_11_9, ATAWR0_A),
+ PINMUX_IPSR_GPSR(IP2_11_9, EX_CS5),
PINMUX_IPSR_MSEL(IP2_11_9, EX_WAIT2_A, SEL_WAIT2_A),
PINMUX_IPSR_MSEL(IP2_13_12, DREQ0_A, SEL_DREQ0_A),
PINMUX_IPSR_MSEL(IP2_13_12, RX3_A, SEL_SCIF3_A),
- PINMUX_IPSR_DATA(IP2_16_14, DACK0),
- PINMUX_IPSR_DATA(IP2_16_14, TX3_A),
- PINMUX_IPSR_DATA(IP2_16_14, DRACK0),
+ PINMUX_IPSR_GPSR(IP2_16_14, DACK0),
+ PINMUX_IPSR_GPSR(IP2_16_14, TX3_A),
+ PINMUX_IPSR_GPSR(IP2_16_14, DRACK0),
- PINMUX_IPSR_DATA(IP2_17, EX_WAIT0),
- PINMUX_IPSR_DATA(IP2_17, PWM0_C),
+ PINMUX_IPSR_GPSR(IP2_17, EX_WAIT0),
+ PINMUX_IPSR_GPSR(IP2_17, PWM0_C),
PINMUX_IPSR_NOGP(IP2_18, D0),
PINMUX_IPSR_NOGP(IP2_19, D1),
@@ -716,33 +716,33 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_NOGP(IP2_28, D10),
PINMUX_IPSR_NOGP(IP2_29, D11),
- PINMUX_IPSR_DATA(IP2_30, RD_WR_B),
- PINMUX_IPSR_DATA(IP2_30, IRQ0),
+ PINMUX_IPSR_GPSR(IP2_30, RD_WR_B),
+ PINMUX_IPSR_GPSR(IP2_30, IRQ0),
- PINMUX_IPSR_DATA(IP2_31, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP2_31, MLB_CLK),
PINMUX_IPSR_MSEL(IP2_31, IRQ1_A, SEL_IRQ1_A),
/* IPSR3 */
- PINMUX_IPSR_DATA(IP3_1_0, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP3_1_0, MLB_SIG),
PINMUX_IPSR_MSEL(IP3_1_0, RX5_B, SEL_SCIF5_B),
PINMUX_IPSR_MSEL(IP3_1_0, SDA3_A, SEL_I2C3_A),
PINMUX_IPSR_MSEL(IP3_1_0, IRQ2_A, SEL_IRQ2_A),
- PINMUX_IPSR_DATA(IP3_4_2, MLB_DAT),
- PINMUX_IPSR_DATA(IP3_4_2, TX5_B),
+ PINMUX_IPSR_GPSR(IP3_4_2, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP3_4_2, TX5_B),
PINMUX_IPSR_MSEL(IP3_4_2, SCL3_A, SEL_I2C3_A),
PINMUX_IPSR_MSEL(IP3_4_2, IRQ3_A, SEL_IRQ3_A),
- PINMUX_IPSR_DATA(IP3_4_2, SDSELF_B),
+ PINMUX_IPSR_GPSR(IP3_4_2, SDSELF_B),
PINMUX_IPSR_MSEL(IP3_7_5, SD1_CMD_B, SEL_SD1_B),
- PINMUX_IPSR_DATA(IP3_7_5, SCIF_CLK),
- PINMUX_IPSR_DATA(IP3_7_5, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP3_7_5, SCIF_CLK),
+ PINMUX_IPSR_GPSR(IP3_7_5, AUDIO_CLKOUT_B),
PINMUX_IPSR_MSEL(IP3_7_5, CAN_CLK_B, SEL_CANCLK_B),
PINMUX_IPSR_MSEL(IP3_7_5, SDA3_B, SEL_I2C3_B),
- PINMUX_IPSR_DATA(IP3_9_8, SD1_CLK_B),
- PINMUX_IPSR_DATA(IP3_9_8, HTX0_A),
- PINMUX_IPSR_DATA(IP3_9_8, TX0_A),
+ PINMUX_IPSR_GPSR(IP3_9_8, SD1_CLK_B),
+ PINMUX_IPSR_GPSR(IP3_9_8, HTX0_A),
+ PINMUX_IPSR_GPSR(IP3_9_8, TX0_A),
PINMUX_IPSR_MSEL(IP3_12_10, SD1_DAT0_B, SEL_SD1_B),
PINMUX_IPSR_MSEL(IP3_12_10, HRX0_A, SEL_HSCIF0_A),
@@ -750,513 +750,513 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP3_15_13, SD1_DAT1_B, SEL_SD1_B),
PINMUX_IPSR_MSEL(IP3_15_13, HSCK0, SEL_HSCIF0_A),
- PINMUX_IPSR_DATA(IP3_15_13, SCK0),
+ PINMUX_IPSR_GPSR(IP3_15_13, SCK0),
PINMUX_IPSR_MSEL(IP3_15_13, SCL3_B, SEL_I2C3_B),
PINMUX_IPSR_MSEL(IP3_18_16, SD1_DAT2_B, SEL_SD1_B),
PINMUX_IPSR_MSEL(IP3_18_16, HCTS0_A, SEL_HSCIF0_A),
- PINMUX_IPSR_DATA(IP3_18_16, CTS0),
+ PINMUX_IPSR_GPSR(IP3_18_16, CTS0),
PINMUX_IPSR_MSEL(IP3_20_19, SD1_DAT3_B, SEL_SD1_B),
PINMUX_IPSR_MSEL(IP3_20_19, HRTS0_A, SEL_HSCIF0_A),
- PINMUX_IPSR_DATA(IP3_20_19, RTS0),
+ PINMUX_IPSR_GPSR(IP3_20_19, RTS0),
- PINMUX_IPSR_DATA(IP3_23_21, SSI_SCK4),
- PINMUX_IPSR_DATA(IP3_23_21, DU0_DR0),
- PINMUX_IPSR_DATA(IP3_23_21, LCDOUT0),
- PINMUX_IPSR_DATA(IP3_23_21, AUDATA2),
- PINMUX_IPSR_DATA(IP3_23_21, ARM_TRACEDATA_2),
+ PINMUX_IPSR_GPSR(IP3_23_21, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP3_23_21, DU0_DR0),
+ PINMUX_IPSR_GPSR(IP3_23_21, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP3_23_21, AUDATA2),
+ PINMUX_IPSR_GPSR(IP3_23_21, ARM_TRACEDATA_2),
PINMUX_IPSR_MSEL(IP3_23_21, SDA3_C, SEL_I2C3_C),
- PINMUX_IPSR_DATA(IP3_23_21, ADICHS1),
+ PINMUX_IPSR_GPSR(IP3_23_21, ADICHS1),
PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN0_B, SEL_TSIF0_B),
- PINMUX_IPSR_DATA(IP3_26_24, SSI_WS4),
- PINMUX_IPSR_DATA(IP3_26_24, DU0_DR1),
- PINMUX_IPSR_DATA(IP3_26_24, LCDOUT1),
- PINMUX_IPSR_DATA(IP3_26_24, AUDATA3),
- PINMUX_IPSR_DATA(IP3_26_24, ARM_TRACEDATA_3),
+ PINMUX_IPSR_GPSR(IP3_26_24, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP3_26_24, DU0_DR1),
+ PINMUX_IPSR_GPSR(IP3_26_24, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP3_26_24, AUDATA3),
+ PINMUX_IPSR_GPSR(IP3_26_24, ARM_TRACEDATA_3),
PINMUX_IPSR_MSEL(IP3_26_24, SCL3_C, SEL_I2C3_C),
- PINMUX_IPSR_DATA(IP3_26_24, ADICHS2),
+ PINMUX_IPSR_GPSR(IP3_26_24, ADICHS2),
PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC0_B, SEL_TSIF0_B),
- PINMUX_IPSR_DATA(IP3_27, DU0_DR2),
- PINMUX_IPSR_DATA(IP3_27, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP3_27, DU0_DR2),
+ PINMUX_IPSR_GPSR(IP3_27, LCDOUT2),
- PINMUX_IPSR_DATA(IP3_28, DU0_DR3),
- PINMUX_IPSR_DATA(IP3_28, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP3_28, DU0_DR3),
+ PINMUX_IPSR_GPSR(IP3_28, LCDOUT3),
- PINMUX_IPSR_DATA(IP3_29, DU0_DR4),
- PINMUX_IPSR_DATA(IP3_29, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP3_29, DU0_DR4),
+ PINMUX_IPSR_GPSR(IP3_29, LCDOUT4),
- PINMUX_IPSR_DATA(IP3_30, DU0_DR5),
- PINMUX_IPSR_DATA(IP3_30, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP3_30, DU0_DR5),
+ PINMUX_IPSR_GPSR(IP3_30, LCDOUT5),
- PINMUX_IPSR_DATA(IP3_31, DU0_DR6),
- PINMUX_IPSR_DATA(IP3_31, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP3_31, DU0_DR6),
+ PINMUX_IPSR_GPSR(IP3_31, LCDOUT6),
/* IPSR4 */
- PINMUX_IPSR_DATA(IP4_0, DU0_DR7),
- PINMUX_IPSR_DATA(IP4_0, LCDOUT7),
-
- PINMUX_IPSR_DATA(IP4_3_1, DU0_DG0),
- PINMUX_IPSR_DATA(IP4_3_1, LCDOUT8),
- PINMUX_IPSR_DATA(IP4_3_1, AUDATA4),
- PINMUX_IPSR_DATA(IP4_3_1, ARM_TRACEDATA_4),
- PINMUX_IPSR_DATA(IP4_3_1, TX1_D),
- PINMUX_IPSR_DATA(IP4_3_1, CAN0_TX_A),
- PINMUX_IPSR_DATA(IP4_3_1, ADICHS0),
-
- PINMUX_IPSR_DATA(IP4_6_4, DU0_DG1),
- PINMUX_IPSR_DATA(IP4_6_4, LCDOUT9),
- PINMUX_IPSR_DATA(IP4_6_4, AUDATA5),
- PINMUX_IPSR_DATA(IP4_6_4, ARM_TRACEDATA_5),
+ PINMUX_IPSR_GPSR(IP4_0, DU0_DR7),
+ PINMUX_IPSR_GPSR(IP4_0, LCDOUT7),
+
+ PINMUX_IPSR_GPSR(IP4_3_1, DU0_DG0),
+ PINMUX_IPSR_GPSR(IP4_3_1, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP4_3_1, AUDATA4),
+ PINMUX_IPSR_GPSR(IP4_3_1, ARM_TRACEDATA_4),
+ PINMUX_IPSR_GPSR(IP4_3_1, TX1_D),
+ PINMUX_IPSR_GPSR(IP4_3_1, CAN0_TX_A),
+ PINMUX_IPSR_GPSR(IP4_3_1, ADICHS0),
+
+ PINMUX_IPSR_GPSR(IP4_6_4, DU0_DG1),
+ PINMUX_IPSR_GPSR(IP4_6_4, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP4_6_4, AUDATA5),
+ PINMUX_IPSR_GPSR(IP4_6_4, ARM_TRACEDATA_5),
PINMUX_IPSR_MSEL(IP4_6_4, RX1_D, SEL_SCIF1_D),
PINMUX_IPSR_MSEL(IP4_6_4, CAN0_RX_A, SEL_CAN0_A),
- PINMUX_IPSR_DATA(IP4_6_4, ADIDATA),
+ PINMUX_IPSR_GPSR(IP4_6_4, ADIDATA),
- PINMUX_IPSR_DATA(IP4_7, DU0_DG2),
- PINMUX_IPSR_DATA(IP4_7, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP4_7, DU0_DG2),
+ PINMUX_IPSR_GPSR(IP4_7, LCDOUT10),
- PINMUX_IPSR_DATA(IP4_8, DU0_DG3),
- PINMUX_IPSR_DATA(IP4_8, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP4_8, DU0_DG3),
+ PINMUX_IPSR_GPSR(IP4_8, LCDOUT11),
- PINMUX_IPSR_DATA(IP4_10_9, DU0_DG4),
- PINMUX_IPSR_DATA(IP4_10_9, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP4_10_9, DU0_DG4),
+ PINMUX_IPSR_GPSR(IP4_10_9, LCDOUT12),
PINMUX_IPSR_MSEL(IP4_10_9, RX0_B, SEL_SCIF0_B),
- PINMUX_IPSR_DATA(IP4_12_11, DU0_DG5),
- PINMUX_IPSR_DATA(IP4_12_11, LCDOUT13),
- PINMUX_IPSR_DATA(IP4_12_11, TX0_B),
+ PINMUX_IPSR_GPSR(IP4_12_11, DU0_DG5),
+ PINMUX_IPSR_GPSR(IP4_12_11, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP4_12_11, TX0_B),
- PINMUX_IPSR_DATA(IP4_14_13, DU0_DG6),
- PINMUX_IPSR_DATA(IP4_14_13, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP4_14_13, DU0_DG6),
+ PINMUX_IPSR_GPSR(IP4_14_13, LCDOUT14),
PINMUX_IPSR_MSEL(IP4_14_13, RX4_A, SEL_SCIF4_A),
- PINMUX_IPSR_DATA(IP4_16_15, DU0_DG7),
- PINMUX_IPSR_DATA(IP4_16_15, LCDOUT15),
- PINMUX_IPSR_DATA(IP4_16_15, TX4_A),
+ PINMUX_IPSR_GPSR(IP4_16_15, DU0_DG7),
+ PINMUX_IPSR_GPSR(IP4_16_15, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP4_16_15, TX4_A),
PINMUX_IPSR_MSEL(IP4_20_17, SSI_SCK2_B, SEL_SSI2_B),
PINMUX_DATA(VI0_R0_B_MARK, FN_IP4_20_17, FN_VI0_R0_B, FN_SEL_VI0_B), /* see sel_vi0 */
PINMUX_DATA(VI0_R0_D_MARK, FN_IP4_20_17, FN_VI0_R0_B, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP4_20_17, DU0_DB0),
- PINMUX_IPSR_DATA(IP4_20_17, LCDOUT16),
- PINMUX_IPSR_DATA(IP4_20_17, AUDATA6),
- PINMUX_IPSR_DATA(IP4_20_17, ARM_TRACEDATA_6),
+ PINMUX_IPSR_GPSR(IP4_20_17, DU0_DB0),
+ PINMUX_IPSR_GPSR(IP4_20_17, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP4_20_17, AUDATA6),
+ PINMUX_IPSR_GPSR(IP4_20_17, ARM_TRACEDATA_6),
PINMUX_IPSR_MSEL(IP4_20_17, GPSCLK_A, SEL_GPS_A),
- PINMUX_IPSR_DATA(IP4_20_17, PWM0_A),
- PINMUX_IPSR_DATA(IP4_20_17, ADICLK),
+ PINMUX_IPSR_GPSR(IP4_20_17, PWM0_A),
+ PINMUX_IPSR_GPSR(IP4_20_17, ADICLK),
PINMUX_IPSR_MSEL(IP4_20_17, TS_SDAT0_B, SEL_TSIF0_B),
- PINMUX_IPSR_DATA(IP4_24_21, AUDIO_CLKC),
+ PINMUX_IPSR_GPSR(IP4_24_21, AUDIO_CLKC),
PINMUX_DATA(VI0_R1_B_MARK, FN_IP4_24_21, FN_VI0_R1_B, FN_SEL_VI0_B), /* see sel_vi0 */
PINMUX_DATA(VI0_R1_D_MARK, FN_IP4_24_21, FN_VI0_R1_B, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP4_24_21, DU0_DB1),
- PINMUX_IPSR_DATA(IP4_24_21, LCDOUT17),
- PINMUX_IPSR_DATA(IP4_24_21, AUDATA7),
- PINMUX_IPSR_DATA(IP4_24_21, ARM_TRACEDATA_7),
+ PINMUX_IPSR_GPSR(IP4_24_21, DU0_DB1),
+ PINMUX_IPSR_GPSR(IP4_24_21, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP4_24_21, AUDATA7),
+ PINMUX_IPSR_GPSR(IP4_24_21, ARM_TRACEDATA_7),
PINMUX_IPSR_MSEL(IP4_24_21, GPSIN_A, SEL_GPS_A),
- PINMUX_IPSR_DATA(IP4_24_21, ADICS_SAMP),
+ PINMUX_IPSR_GPSR(IP4_24_21, ADICS_SAMP),
PINMUX_IPSR_MSEL(IP4_24_21, TS_SCK0_B, SEL_TSIF0_B),
PINMUX_DATA(VI0_R2_B_MARK, FN_IP4_26_25, FN_VI0_R2_B, FN_SEL_VI0_B), /* see sel_vi0 */
PINMUX_DATA(VI0_R2_D_MARK, FN_IP4_26_25, FN_VI0_R2_B, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP4_26_25, DU0_DB2),
- PINMUX_IPSR_DATA(IP4_26_25, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP4_26_25, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP4_26_25, LCDOUT18),
PINMUX_IPSR_MSEL(IP4_28_27, VI0_R3_B, SEL_VI0_B),
- PINMUX_IPSR_DATA(IP4_28_27, DU0_DB3),
- PINMUX_IPSR_DATA(IP4_28_27, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP4_28_27, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP4_28_27, LCDOUT19),
PINMUX_DATA(VI0_R4_B_MARK, FN_IP4_30_29, FN_VI0_R4_B, FN_SEL_VI0_B), /* see sel_vi0 */
PINMUX_DATA(VI0_R4_D_MARK, FN_IP4_30_29, FN_VI0_R4_B, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP4_30_29, DU0_DB4),
- PINMUX_IPSR_DATA(IP4_30_29, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP4_30_29, DU0_DB4),
+ PINMUX_IPSR_GPSR(IP4_30_29, LCDOUT20),
/* IPSR5 */
PINMUX_DATA(VI0_R5_B_MARK, FN_IP5_1_0, FN_VI0_R5_B, FN_SEL_VI0_B), /* see sel_vi0 */
PINMUX_DATA(VI0_R5_D_MARK, FN_IP5_1_0, FN_VI0_R5_B, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP5_1_0, DU0_DB5),
- PINMUX_IPSR_DATA(IP5_1_0, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP5_1_0, DU0_DB5),
+ PINMUX_IPSR_GPSR(IP5_1_0, LCDOUT21),
PINMUX_IPSR_MSEL(IP5_3_2, VI1_DATA10_B, SEL_VI1_B),
- PINMUX_IPSR_DATA(IP5_3_2, DU0_DB6),
- PINMUX_IPSR_DATA(IP5_3_2, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP5_3_2, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP5_3_2, LCDOUT22),
PINMUX_IPSR_MSEL(IP5_5_4, VI1_DATA11_B, SEL_VI1_B),
- PINMUX_IPSR_DATA(IP5_5_4, DU0_DB7),
- PINMUX_IPSR_DATA(IP5_5_4, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP5_5_4, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP5_5_4, LCDOUT23),
- PINMUX_IPSR_DATA(IP5_6, DU0_DOTCLKIN),
- PINMUX_IPSR_DATA(IP5_6, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP5_6, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP5_6, QSTVA_QVS),
- PINMUX_IPSR_DATA(IP5_7, DU0_DOTCLKO_UT0),
- PINMUX_IPSR_DATA(IP5_7, QCLK),
+ PINMUX_IPSR_GPSR(IP5_7, DU0_DOTCLKO_UT0),
+ PINMUX_IPSR_GPSR(IP5_7, QCLK),
- PINMUX_IPSR_DATA(IP5_9_8, DU0_DOTCLKO_UT1),
- PINMUX_IPSR_DATA(IP5_9_8, QSTVB_QVE),
- PINMUX_IPSR_DATA(IP5_9_8, AUDIO_CLKOUT_A),
+ PINMUX_IPSR_GPSR(IP5_9_8, DU0_DOTCLKO_UT1),
+ PINMUX_IPSR_GPSR(IP5_9_8, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP5_9_8, AUDIO_CLKOUT_A),
PINMUX_IPSR_MSEL(IP5_9_8, REMOCON_C, SEL_REMOCON_C),
PINMUX_IPSR_MSEL(IP5_11_10, SSI_WS2_B, SEL_SSI2_B),
- PINMUX_IPSR_DATA(IP5_11_10, DU0_EXHSYNC_DU0_HSYNC),
- PINMUX_IPSR_DATA(IP5_11_10, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP5_11_10, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_GPSR(IP5_11_10, QSTH_QHS),
- PINMUX_IPSR_DATA(IP5_12, DU0_EXVSYNC_DU0_VSYNC),
- PINMUX_IPSR_DATA(IP5_12, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP5_12, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_GPSR(IP5_12, QSTB_QHE),
- PINMUX_IPSR_DATA(IP5_14_13, DU0_EXODDF_DU0_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP5_14_13, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP5_14_13, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP5_14_13, QCPV_QDE),
PINMUX_IPSR_MSEL(IP5_14_13, FMCLK_D, SEL_FM_D),
PINMUX_IPSR_MSEL(IP5_17_15, SSI_SCK1_A, SEL_SSI1_A),
- PINMUX_IPSR_DATA(IP5_17_15, DU0_DISP),
- PINMUX_IPSR_DATA(IP5_17_15, QPOLA),
- PINMUX_IPSR_DATA(IP5_17_15, AUDCK),
- PINMUX_IPSR_DATA(IP5_17_15, ARM_TRACECLK),
- PINMUX_IPSR_DATA(IP5_17_15, BPFCLK_D),
+ PINMUX_IPSR_GPSR(IP5_17_15, DU0_DISP),
+ PINMUX_IPSR_GPSR(IP5_17_15, QPOLA),
+ PINMUX_IPSR_GPSR(IP5_17_15, AUDCK),
+ PINMUX_IPSR_GPSR(IP5_17_15, ARM_TRACECLK),
+ PINMUX_IPSR_GPSR(IP5_17_15, BPFCLK_D),
PINMUX_IPSR_MSEL(IP5_20_18, SSI_WS1_A, SEL_SSI1_A),
- PINMUX_IPSR_DATA(IP5_20_18, DU0_CDE),
- PINMUX_IPSR_DATA(IP5_20_18, QPOLB),
- PINMUX_IPSR_DATA(IP5_20_18, AUDSYNC),
- PINMUX_IPSR_DATA(IP5_20_18, ARM_TRACECTL),
+ PINMUX_IPSR_GPSR(IP5_20_18, DU0_CDE),
+ PINMUX_IPSR_GPSR(IP5_20_18, QPOLB),
+ PINMUX_IPSR_GPSR(IP5_20_18, AUDSYNC),
+ PINMUX_IPSR_GPSR(IP5_20_18, ARM_TRACECTL),
PINMUX_IPSR_MSEL(IP5_20_18, FMIN_D, SEL_FM_D),
PINMUX_IPSR_MSEL(IP5_22_21, SD1_CD_B, SEL_SD1_B),
- PINMUX_IPSR_DATA(IP5_22_21, SSI_SCK78),
+ PINMUX_IPSR_GPSR(IP5_22_21, SSI_SCK78),
PINMUX_IPSR_MSEL(IP5_22_21, HSPI_RX0_B, SEL_HSPI0_B),
- PINMUX_IPSR_DATA(IP5_22_21, TX1_B),
+ PINMUX_IPSR_GPSR(IP5_22_21, TX1_B),
PINMUX_IPSR_MSEL(IP5_25_23, SD1_WP_B, SEL_SD1_B),
- PINMUX_IPSR_DATA(IP5_25_23, SSI_WS78),
+ PINMUX_IPSR_GPSR(IP5_25_23, SSI_WS78),
PINMUX_IPSR_MSEL(IP5_25_23, HSPI_CLK0_B, SEL_HSPI0_B),
PINMUX_IPSR_MSEL(IP5_25_23, RX1_B, SEL_SCIF1_B),
PINMUX_IPSR_MSEL(IP5_25_23, CAN_CLK_D, SEL_CANCLK_D),
- PINMUX_IPSR_DATA(IP5_28_26, SSI_SDATA8),
+ PINMUX_IPSR_GPSR(IP5_28_26, SSI_SDATA8),
PINMUX_IPSR_MSEL(IP5_28_26, SSI_SCK2_A, SEL_SSI2_A),
PINMUX_IPSR_MSEL(IP5_28_26, HSPI_CS0_B, SEL_HSPI0_B),
- PINMUX_IPSR_DATA(IP5_28_26, TX2_A),
- PINMUX_IPSR_DATA(IP5_28_26, CAN0_TX_B),
+ PINMUX_IPSR_GPSR(IP5_28_26, TX2_A),
+ PINMUX_IPSR_GPSR(IP5_28_26, CAN0_TX_B),
- PINMUX_IPSR_DATA(IP5_30_29, SSI_SDATA7),
- PINMUX_IPSR_DATA(IP5_30_29, HSPI_TX0_B),
+ PINMUX_IPSR_GPSR(IP5_30_29, SSI_SDATA7),
+ PINMUX_IPSR_GPSR(IP5_30_29, HSPI_TX0_B),
PINMUX_IPSR_MSEL(IP5_30_29, RX2_A, SEL_SCIF2_A),
PINMUX_IPSR_MSEL(IP5_30_29, CAN0_RX_B, SEL_CAN0_B),
/* IPSR6 */
- PINMUX_IPSR_DATA(IP6_1_0, SSI_SCK6),
+ PINMUX_IPSR_GPSR(IP6_1_0, SSI_SCK6),
PINMUX_IPSR_MSEL(IP6_1_0, HSPI_RX2_A, SEL_HSPI2_A),
PINMUX_IPSR_MSEL(IP6_1_0, FMCLK_B, SEL_FM_B),
- PINMUX_IPSR_DATA(IP6_1_0, CAN1_TX_B),
+ PINMUX_IPSR_GPSR(IP6_1_0, CAN1_TX_B),
- PINMUX_IPSR_DATA(IP6_4_2, SSI_WS6),
+ PINMUX_IPSR_GPSR(IP6_4_2, SSI_WS6),
PINMUX_IPSR_MSEL(IP6_4_2, HSPI_CLK2_A, SEL_HSPI2_A),
- PINMUX_IPSR_DATA(IP6_4_2, BPFCLK_B),
+ PINMUX_IPSR_GPSR(IP6_4_2, BPFCLK_B),
PINMUX_IPSR_MSEL(IP6_4_2, CAN1_RX_B, SEL_CAN1_B),
- PINMUX_IPSR_DATA(IP6_6_5, SSI_SDATA6),
- PINMUX_IPSR_DATA(IP6_6_5, HSPI_TX2_A),
+ PINMUX_IPSR_GPSR(IP6_6_5, SSI_SDATA6),
+ PINMUX_IPSR_GPSR(IP6_6_5, HSPI_TX2_A),
PINMUX_IPSR_MSEL(IP6_6_5, FMIN_B, SEL_FM_B),
- PINMUX_IPSR_DATA(IP6_7, SSI_SCK5),
+ PINMUX_IPSR_GPSR(IP6_7, SSI_SCK5),
PINMUX_IPSR_MSEL(IP6_7, RX4_C, SEL_SCIF4_C),
- PINMUX_IPSR_DATA(IP6_8, SSI_WS5),
- PINMUX_IPSR_DATA(IP6_8, TX4_C),
+ PINMUX_IPSR_GPSR(IP6_8, SSI_WS5),
+ PINMUX_IPSR_GPSR(IP6_8, TX4_C),
- PINMUX_IPSR_DATA(IP6_9, SSI_SDATA5),
+ PINMUX_IPSR_GPSR(IP6_9, SSI_SDATA5),
PINMUX_IPSR_MSEL(IP6_9, RX0_D, SEL_SCIF0_D),
- PINMUX_IPSR_DATA(IP6_10, SSI_WS34),
- PINMUX_IPSR_DATA(IP6_10, ARM_TRACEDATA_8),
+ PINMUX_IPSR_GPSR(IP6_10, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP6_10, ARM_TRACEDATA_8),
- PINMUX_IPSR_DATA(IP6_12_11, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP6_12_11, SSI_SDATA4),
PINMUX_IPSR_MSEL(IP6_12_11, SSI_WS2_A, SEL_SSI2_A),
- PINMUX_IPSR_DATA(IP6_12_11, ARM_TRACEDATA_9),
+ PINMUX_IPSR_GPSR(IP6_12_11, ARM_TRACEDATA_9),
- PINMUX_IPSR_DATA(IP6_13, SSI_SDATA3),
- PINMUX_IPSR_DATA(IP6_13, ARM_TRACEDATA_10),
+ PINMUX_IPSR_GPSR(IP6_13, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP6_13, ARM_TRACEDATA_10),
- PINMUX_IPSR_DATA(IP6_15_14, SSI_SCK012),
- PINMUX_IPSR_DATA(IP6_15_14, ARM_TRACEDATA_11),
- PINMUX_IPSR_DATA(IP6_15_14, TX0_D),
+ PINMUX_IPSR_GPSR(IP6_15_14, SSI_SCK012),
+ PINMUX_IPSR_GPSR(IP6_15_14, ARM_TRACEDATA_11),
+ PINMUX_IPSR_GPSR(IP6_15_14, TX0_D),
- PINMUX_IPSR_DATA(IP6_16, SSI_WS012),
- PINMUX_IPSR_DATA(IP6_16, ARM_TRACEDATA_12),
+ PINMUX_IPSR_GPSR(IP6_16, SSI_WS012),
+ PINMUX_IPSR_GPSR(IP6_16, ARM_TRACEDATA_12),
- PINMUX_IPSR_DATA(IP6_18_17, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP6_18_17, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP6_18_17, HSPI_CS2_A, SEL_HSPI2_A),
- PINMUX_IPSR_DATA(IP6_18_17, ARM_TRACEDATA_13),
+ PINMUX_IPSR_GPSR(IP6_18_17, ARM_TRACEDATA_13),
PINMUX_IPSR_MSEL(IP6_18_17, SDA1_A, SEL_I2C1_A),
- PINMUX_IPSR_DATA(IP6_20_19, SSI_SDATA1),
- PINMUX_IPSR_DATA(IP6_20_19, ARM_TRACEDATA_14),
+ PINMUX_IPSR_GPSR(IP6_20_19, SSI_SDATA1),
+ PINMUX_IPSR_GPSR(IP6_20_19, ARM_TRACEDATA_14),
PINMUX_IPSR_MSEL(IP6_20_19, SCL1_A, SEL_I2C1_A),
PINMUX_IPSR_MSEL(IP6_20_19, SCK2_A, SEL_SCIF2_A),
- PINMUX_IPSR_DATA(IP6_21, SSI_SDATA0),
- PINMUX_IPSR_DATA(IP6_21, ARM_TRACEDATA_15),
+ PINMUX_IPSR_GPSR(IP6_21, SSI_SDATA0),
+ PINMUX_IPSR_GPSR(IP6_21, ARM_TRACEDATA_15),
- PINMUX_IPSR_DATA(IP6_23_22, SD0_CLK),
- PINMUX_IPSR_DATA(IP6_23_22, SUB_TDO),
+ PINMUX_IPSR_GPSR(IP6_23_22, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP6_23_22, SUB_TDO),
- PINMUX_IPSR_DATA(IP6_25_24, SD0_CMD),
- PINMUX_IPSR_DATA(IP6_25_24, SUB_TRST),
+ PINMUX_IPSR_GPSR(IP6_25_24, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP6_25_24, SUB_TRST),
- PINMUX_IPSR_DATA(IP6_27_26, SD0_DAT0),
- PINMUX_IPSR_DATA(IP6_27_26, SUB_TMS),
+ PINMUX_IPSR_GPSR(IP6_27_26, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP6_27_26, SUB_TMS),
- PINMUX_IPSR_DATA(IP6_29_28, SD0_DAT1),
- PINMUX_IPSR_DATA(IP6_29_28, SUB_TCK),
+ PINMUX_IPSR_GPSR(IP6_29_28, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP6_29_28, SUB_TCK),
- PINMUX_IPSR_DATA(IP6_31_30, SD0_DAT2),
- PINMUX_IPSR_DATA(IP6_31_30, SUB_TDI),
+ PINMUX_IPSR_GPSR(IP6_31_30, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP6_31_30, SUB_TDI),
/* IPSR7 */
- PINMUX_IPSR_DATA(IP7_1_0, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP7_1_0, SD0_DAT3),
PINMUX_IPSR_MSEL(IP7_1_0, IRQ1_B, SEL_IRQ1_B),
- PINMUX_IPSR_DATA(IP7_3_2, SD0_CD),
- PINMUX_IPSR_DATA(IP7_3_2, TX5_A),
+ PINMUX_IPSR_GPSR(IP7_3_2, SD0_CD),
+ PINMUX_IPSR_GPSR(IP7_3_2, TX5_A),
- PINMUX_IPSR_DATA(IP7_5_4, SD0_WP),
+ PINMUX_IPSR_GPSR(IP7_5_4, SD0_WP),
PINMUX_IPSR_MSEL(IP7_5_4, RX5_A, SEL_SCIF5_A),
- PINMUX_IPSR_DATA(IP7_8_6, VI1_CLKENB),
+ PINMUX_IPSR_GPSR(IP7_8_6, VI1_CLKENB),
PINMUX_IPSR_MSEL(IP7_8_6, HSPI_CLK0_A, SEL_HSPI0_A),
- PINMUX_IPSR_DATA(IP7_8_6, HTX1_A),
+ PINMUX_IPSR_GPSR(IP7_8_6, HTX1_A),
PINMUX_IPSR_MSEL(IP7_8_6, RTS1_C, SEL_SCIF1_C),
- PINMUX_IPSR_DATA(IP7_11_9, VI1_FIELD),
+ PINMUX_IPSR_GPSR(IP7_11_9, VI1_FIELD),
PINMUX_IPSR_MSEL(IP7_11_9, HSPI_CS0_A, SEL_HSPI0_A),
PINMUX_IPSR_MSEL(IP7_11_9, HRX1_A, SEL_HSCIF1_A),
PINMUX_IPSR_MSEL(IP7_11_9, SCK1_C, SEL_SCIF1_C),
- PINMUX_IPSR_DATA(IP7_14_12, VI1_HSYNC),
+ PINMUX_IPSR_GPSR(IP7_14_12, VI1_HSYNC),
PINMUX_IPSR_MSEL(IP7_14_12, HSPI_RX0_A, SEL_HSPI0_A),
PINMUX_IPSR_MSEL(IP7_14_12, HRTS1_A, SEL_HSCIF1_A),
PINMUX_IPSR_MSEL(IP7_14_12, FMCLK_A, SEL_FM_A),
PINMUX_IPSR_MSEL(IP7_14_12, RX1_C, SEL_SCIF1_C),
- PINMUX_IPSR_DATA(IP7_17_15, VI1_VSYNC),
- PINMUX_IPSR_DATA(IP7_17_15, HSPI_TX0),
+ PINMUX_IPSR_GPSR(IP7_17_15, VI1_VSYNC),
+ PINMUX_IPSR_GPSR(IP7_17_15, HSPI_TX0),
PINMUX_IPSR_MSEL(IP7_17_15, HCTS1_A, SEL_HSCIF1_A),
- PINMUX_IPSR_DATA(IP7_17_15, BPFCLK_A),
- PINMUX_IPSR_DATA(IP7_17_15, TX1_C),
+ PINMUX_IPSR_GPSR(IP7_17_15, BPFCLK_A),
+ PINMUX_IPSR_GPSR(IP7_17_15, TX1_C),
- PINMUX_IPSR_DATA(IP7_20_18, TCLK0),
+ PINMUX_IPSR_GPSR(IP7_20_18, TCLK0),
PINMUX_IPSR_MSEL(IP7_20_18, HSCK1_A, SEL_HSCIF1_A),
PINMUX_IPSR_MSEL(IP7_20_18, FMIN_A, SEL_FM_A),
PINMUX_IPSR_MSEL(IP7_20_18, IRQ2_C, SEL_IRQ2_C),
PINMUX_IPSR_MSEL(IP7_20_18, CTS1_C, SEL_SCIF1_C),
- PINMUX_IPSR_DATA(IP7_20_18, SPEEDIN),
+ PINMUX_IPSR_GPSR(IP7_20_18, SPEEDIN),
- PINMUX_IPSR_DATA(IP7_21, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP7_21, VI0_CLK),
PINMUX_IPSR_MSEL(IP7_21, CAN_CLK_A, SEL_CANCLK_A),
- PINMUX_IPSR_DATA(IP7_24_22, VI0_CLKENB),
+ PINMUX_IPSR_GPSR(IP7_24_22, VI0_CLKENB),
PINMUX_IPSR_MSEL(IP7_24_22, SD2_DAT2_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP7_24_22, VI1_DATA0),
- PINMUX_IPSR_DATA(IP7_24_22, DU1_DG6),
+ PINMUX_IPSR_GPSR(IP7_24_22, VI1_DATA0),
+ PINMUX_IPSR_GPSR(IP7_24_22, DU1_DG6),
PINMUX_IPSR_MSEL(IP7_24_22, HSPI_RX1_A, SEL_HSPI1_A),
PINMUX_IPSR_MSEL(IP7_24_22, RX4_B, SEL_SCIF4_B),
- PINMUX_IPSR_DATA(IP7_28_25, VI0_FIELD),
+ PINMUX_IPSR_GPSR(IP7_28_25, VI0_FIELD),
PINMUX_IPSR_MSEL(IP7_28_25, SD2_DAT3_B, SEL_SD2_B),
PINMUX_DATA(VI0_R3_C_MARK, FN_IP7_28_25, FN_VI0_R3_C, FN_SEL_VI0_C), /* see sel_vi0 */
PINMUX_DATA(VI0_R3_D_MARK, FN_IP7_28_25, FN_VI0_R3_C, FN_SEL_VI0_D), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP7_28_25, VI1_DATA1),
- PINMUX_IPSR_DATA(IP7_28_25, DU1_DG7),
+ PINMUX_IPSR_GPSR(IP7_28_25, VI1_DATA1),
+ PINMUX_IPSR_GPSR(IP7_28_25, DU1_DG7),
PINMUX_IPSR_MSEL(IP7_28_25, HSPI_CLK1_A, SEL_HSPI1_A),
- PINMUX_IPSR_DATA(IP7_28_25, TX4_B),
+ PINMUX_IPSR_GPSR(IP7_28_25, TX4_B),
- PINMUX_IPSR_DATA(IP7_31_29, VI0_HSYNC),
+ PINMUX_IPSR_GPSR(IP7_31_29, VI0_HSYNC),
PINMUX_IPSR_MSEL(IP7_31_29, SD2_CD_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP7_31_29, VI1_DATA2),
- PINMUX_IPSR_DATA(IP7_31_29, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP7_31_29, VI1_DATA2),
+ PINMUX_IPSR_GPSR(IP7_31_29, DU1_DR2),
PINMUX_IPSR_MSEL(IP7_31_29, HSPI_CS1_A, SEL_HSPI1_A),
PINMUX_IPSR_MSEL(IP7_31_29, RX3_B, SEL_SCIF3_B),
/* IPSR8 */
- PINMUX_IPSR_DATA(IP8_2_0, VI0_VSYNC),
+ PINMUX_IPSR_GPSR(IP8_2_0, VI0_VSYNC),
PINMUX_IPSR_MSEL(IP8_2_0, SD2_WP_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP8_2_0, VI1_DATA3),
- PINMUX_IPSR_DATA(IP8_2_0, DU1_DR3),
- PINMUX_IPSR_DATA(IP8_2_0, HSPI_TX1_A),
- PINMUX_IPSR_DATA(IP8_2_0, TX3_B),
+ PINMUX_IPSR_GPSR(IP8_2_0, VI1_DATA3),
+ PINMUX_IPSR_GPSR(IP8_2_0, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP8_2_0, HSPI_TX1_A),
+ PINMUX_IPSR_GPSR(IP8_2_0, TX3_B),
- PINMUX_IPSR_DATA(IP8_5_3, VI0_DATA0_VI0_B0),
- PINMUX_IPSR_DATA(IP8_5_3, DU1_DG2),
+ PINMUX_IPSR_GPSR(IP8_5_3, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_GPSR(IP8_5_3, DU1_DG2),
PINMUX_IPSR_MSEL(IP8_5_3, IRQ2_B, SEL_IRQ2_B),
PINMUX_IPSR_MSEL(IP8_5_3, RX3_D, SEL_SCIF3_D),
- PINMUX_IPSR_DATA(IP8_8_6, VI0_DATA1_VI0_B1),
- PINMUX_IPSR_DATA(IP8_8_6, DU1_DG3),
+ PINMUX_IPSR_GPSR(IP8_8_6, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_GPSR(IP8_8_6, DU1_DG3),
PINMUX_IPSR_MSEL(IP8_8_6, IRQ3_B, SEL_IRQ3_B),
- PINMUX_IPSR_DATA(IP8_8_6, TX3_D),
+ PINMUX_IPSR_GPSR(IP8_8_6, TX3_D),
- PINMUX_IPSR_DATA(IP8_10_9, VI0_DATA2_VI0_B2),
- PINMUX_IPSR_DATA(IP8_10_9, DU1_DG4),
+ PINMUX_IPSR_GPSR(IP8_10_9, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_GPSR(IP8_10_9, DU1_DG4),
PINMUX_IPSR_MSEL(IP8_10_9, RX0_C, SEL_SCIF0_C),
- PINMUX_IPSR_DATA(IP8_13_11, VI0_DATA3_VI0_B3),
- PINMUX_IPSR_DATA(IP8_13_11, DU1_DG5),
- PINMUX_IPSR_DATA(IP8_13_11, TX1_A),
- PINMUX_IPSR_DATA(IP8_13_11, TX0_C),
+ PINMUX_IPSR_GPSR(IP8_13_11, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_GPSR(IP8_13_11, DU1_DG5),
+ PINMUX_IPSR_GPSR(IP8_13_11, TX1_A),
+ PINMUX_IPSR_GPSR(IP8_13_11, TX0_C),
- PINMUX_IPSR_DATA(IP8_15_14, VI0_DATA4_VI0_B4),
- PINMUX_IPSR_DATA(IP8_15_14, DU1_DB2),
+ PINMUX_IPSR_GPSR(IP8_15_14, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_GPSR(IP8_15_14, DU1_DB2),
PINMUX_IPSR_MSEL(IP8_15_14, RX1_A, SEL_SCIF1_A),
- PINMUX_IPSR_DATA(IP8_18_16, VI0_DATA5_VI0_B5),
- PINMUX_IPSR_DATA(IP8_18_16, DU1_DB3),
+ PINMUX_IPSR_GPSR(IP8_18_16, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_GPSR(IP8_18_16, DU1_DB3),
PINMUX_IPSR_MSEL(IP8_18_16, SCK1_A, SEL_SCIF1_A),
- PINMUX_IPSR_DATA(IP8_18_16, PWM4),
+ PINMUX_IPSR_GPSR(IP8_18_16, PWM4),
PINMUX_IPSR_MSEL(IP8_18_16, HSCK1_B, SEL_HSCIF1_B),
- PINMUX_IPSR_DATA(IP8_21_19, VI0_DATA6_VI0_G0),
- PINMUX_IPSR_DATA(IP8_21_19, DU1_DB4),
+ PINMUX_IPSR_GPSR(IP8_21_19, VI0_DATA6_VI0_G0),
+ PINMUX_IPSR_GPSR(IP8_21_19, DU1_DB4),
PINMUX_IPSR_MSEL(IP8_21_19, CTS1_A, SEL_SCIF1_A),
- PINMUX_IPSR_DATA(IP8_21_19, PWM5),
+ PINMUX_IPSR_GPSR(IP8_21_19, PWM5),
- PINMUX_IPSR_DATA(IP8_23_22, VI0_DATA7_VI0_G1),
- PINMUX_IPSR_DATA(IP8_23_22, DU1_DB5),
+ PINMUX_IPSR_GPSR(IP8_23_22, VI0_DATA7_VI0_G1),
+ PINMUX_IPSR_GPSR(IP8_23_22, DU1_DB5),
PINMUX_IPSR_MSEL(IP8_23_22, RTS1_A, SEL_SCIF1_A),
- PINMUX_IPSR_DATA(IP8_26_24, VI0_G2),
- PINMUX_IPSR_DATA(IP8_26_24, SD2_CLK_B),
- PINMUX_IPSR_DATA(IP8_26_24, VI1_DATA4),
- PINMUX_IPSR_DATA(IP8_26_24, DU1_DR4),
- PINMUX_IPSR_DATA(IP8_26_24, HTX1_B),
+ PINMUX_IPSR_GPSR(IP8_26_24, VI0_G2),
+ PINMUX_IPSR_GPSR(IP8_26_24, SD2_CLK_B),
+ PINMUX_IPSR_GPSR(IP8_26_24, VI1_DATA4),
+ PINMUX_IPSR_GPSR(IP8_26_24, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP8_26_24, HTX1_B),
- PINMUX_IPSR_DATA(IP8_29_27, VI0_G3),
+ PINMUX_IPSR_GPSR(IP8_29_27, VI0_G3),
PINMUX_IPSR_MSEL(IP8_29_27, SD2_CMD_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP8_29_27, VI1_DATA5),
- PINMUX_IPSR_DATA(IP8_29_27, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP8_29_27, VI1_DATA5),
+ PINMUX_IPSR_GPSR(IP8_29_27, DU1_DR5),
PINMUX_IPSR_MSEL(IP8_29_27, HRX1_B, SEL_HSCIF1_B),
/* IPSR9 */
- PINMUX_IPSR_DATA(IP9_2_0, VI0_G4),
+ PINMUX_IPSR_GPSR(IP9_2_0, VI0_G4),
PINMUX_IPSR_MSEL(IP9_2_0, SD2_DAT0_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP9_2_0, VI1_DATA6),
- PINMUX_IPSR_DATA(IP9_2_0, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP9_2_0, VI1_DATA6),
+ PINMUX_IPSR_GPSR(IP9_2_0, DU1_DR6),
PINMUX_IPSR_MSEL(IP9_2_0, HRTS1_B, SEL_HSCIF1_B),
- PINMUX_IPSR_DATA(IP9_5_3, VI0_G5),
+ PINMUX_IPSR_GPSR(IP9_5_3, VI0_G5),
PINMUX_IPSR_MSEL(IP9_5_3, SD2_DAT1_B, SEL_SD2_B),
- PINMUX_IPSR_DATA(IP9_5_3, VI1_DATA7),
- PINMUX_IPSR_DATA(IP9_5_3, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP9_5_3, VI1_DATA7),
+ PINMUX_IPSR_GPSR(IP9_5_3, DU1_DR7),
PINMUX_IPSR_MSEL(IP9_5_3, HCTS1_B, SEL_HSCIF1_B),
PINMUX_DATA(VI0_R0_A_MARK, FN_IP9_8_6, FN_VI0_R0_A, FN_SEL_VI0_A), /* see sel_vi0 */
PINMUX_DATA(VI0_R0_C_MARK, FN_IP9_8_6, FN_VI0_R0_A, FN_SEL_VI0_C), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP9_8_6, VI1_CLK),
- PINMUX_IPSR_DATA(IP9_8_6, ETH_REF_CLK),
- PINMUX_IPSR_DATA(IP9_8_6, DU1_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP9_8_6, VI1_CLK),
+ PINMUX_IPSR_GPSR(IP9_8_6, ETH_REF_CLK),
+ PINMUX_IPSR_GPSR(IP9_8_6, DU1_DOTCLKIN),
PINMUX_DATA(VI0_R1_A_MARK, FN_IP9_11_9, FN_VI0_R1_A, FN_SEL_VI0_A), /* see sel_vi0 */
PINMUX_DATA(VI0_R1_C_MARK, FN_IP9_11_9, FN_VI0_R1_A, FN_SEL_VI0_C), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP9_11_9, VI1_DATA8),
- PINMUX_IPSR_DATA(IP9_11_9, DU1_DB6),
- PINMUX_IPSR_DATA(IP9_11_9, ETH_TXD0),
- PINMUX_IPSR_DATA(IP9_11_9, PWM2),
- PINMUX_IPSR_DATA(IP9_11_9, TCLK1),
+ PINMUX_IPSR_GPSR(IP9_11_9, VI1_DATA8),
+ PINMUX_IPSR_GPSR(IP9_11_9, DU1_DB6),
+ PINMUX_IPSR_GPSR(IP9_11_9, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP9_11_9, PWM2),
+ PINMUX_IPSR_GPSR(IP9_11_9, TCLK1),
PINMUX_DATA(VI0_R2_A_MARK, FN_IP9_14_12, FN_VI0_R2_A, FN_SEL_VI0_A), /* see sel_vi0 */
PINMUX_DATA(VI0_R2_C_MARK, FN_IP9_14_12, FN_VI0_R2_A, FN_SEL_VI0_C), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP9_14_12, VI1_DATA9),
- PINMUX_IPSR_DATA(IP9_14_12, DU1_DB7),
- PINMUX_IPSR_DATA(IP9_14_12, ETH_TXD1),
- PINMUX_IPSR_DATA(IP9_14_12, PWM3),
+ PINMUX_IPSR_GPSR(IP9_14_12, VI1_DATA9),
+ PINMUX_IPSR_GPSR(IP9_14_12, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP9_14_12, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP9_14_12, PWM3),
PINMUX_IPSR_MSEL(IP9_17_15, VI0_R3_A, SEL_VI0_A),
- PINMUX_IPSR_DATA(IP9_17_15, ETH_CRS_DV),
- PINMUX_IPSR_DATA(IP9_17_15, IECLK),
+ PINMUX_IPSR_GPSR(IP9_17_15, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP9_17_15, IECLK),
PINMUX_IPSR_MSEL(IP9_17_15, SCK2_C, SEL_SCIF2_C),
PINMUX_DATA(VI0_R4_A_MARK, FN_IP9_20_18, FN_VI0_R4_A, FN_SEL_VI0_A), /* see sel_vi0 */
PINMUX_DATA(VI0_R3_C_MARK, FN_IP9_20_18, FN_VI0_R4_A, FN_SEL_VI0_C), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP9_20_18, ETH_TX_EN),
- PINMUX_IPSR_DATA(IP9_20_18, IETX),
- PINMUX_IPSR_DATA(IP9_20_18, TX2_C),
+ PINMUX_IPSR_GPSR(IP9_20_18, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_20_18, IETX),
+ PINMUX_IPSR_GPSR(IP9_20_18, TX2_C),
PINMUX_DATA(VI0_R5_A_MARK, FN_IP9_23_21, FN_VI0_R5_A, FN_SEL_VI0_A), /* see sel_vi0 */
PINMUX_DATA(VI0_R5_C_MARK, FN_IP9_23_21, FN_VI0_R5_A, FN_SEL_VI0_C), /* see sel_vi0 */
- PINMUX_IPSR_DATA(IP9_23_21, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP9_23_21, ETH_RX_ER),
PINMUX_IPSR_MSEL(IP9_23_21, FMCLK_C, SEL_FM_C),
- PINMUX_IPSR_DATA(IP9_23_21, IERX),
+ PINMUX_IPSR_GPSR(IP9_23_21, IERX),
PINMUX_IPSR_MSEL(IP9_23_21, RX2_C, SEL_SCIF2_C),
PINMUX_IPSR_MSEL(IP9_26_24, VI1_DATA10_A, SEL_VI1_A),
- PINMUX_IPSR_DATA(IP9_26_24, DU1_DOTCLKOUT),
- PINMUX_IPSR_DATA(IP9_26_24, ETH_RXD0),
- PINMUX_IPSR_DATA(IP9_26_24, BPFCLK_C),
- PINMUX_IPSR_DATA(IP9_26_24, TX2_D),
+ PINMUX_IPSR_GPSR(IP9_26_24, DU1_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP9_26_24, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP9_26_24, BPFCLK_C),
+ PINMUX_IPSR_GPSR(IP9_26_24, TX2_D),
PINMUX_IPSR_MSEL(IP9_26_24, SDA2_C, SEL_I2C2_C),
PINMUX_IPSR_MSEL(IP9_29_27, VI1_DATA11_A, SEL_VI1_A),
- PINMUX_IPSR_DATA(IP9_29_27, DU1_EXHSYNC_DU1_HSYNC),
- PINMUX_IPSR_DATA(IP9_29_27, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP9_29_27, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_GPSR(IP9_29_27, ETH_RXD1),
PINMUX_IPSR_MSEL(IP9_29_27, FMIN_C, SEL_FM_C),
PINMUX_IPSR_MSEL(IP9_29_27, RX2_D, SEL_SCIF2_D),
PINMUX_IPSR_MSEL(IP9_29_27, SCL2_C, SEL_I2C2_C),
/* IPSR10 */
- PINMUX_IPSR_DATA(IP10_2_0, SD2_CLK_A),
- PINMUX_IPSR_DATA(IP10_2_0, DU1_EXVSYNC_DU1_VSYNC),
- PINMUX_IPSR_DATA(IP10_2_0, ATARD1),
- PINMUX_IPSR_DATA(IP10_2_0, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP10_2_0, SD2_CLK_A),
+ PINMUX_IPSR_GPSR(IP10_2_0, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_GPSR(IP10_2_0, ATARD1),
+ PINMUX_IPSR_GPSR(IP10_2_0, ETH_MDC),
PINMUX_IPSR_MSEL(IP10_2_0, SDA1_B, SEL_I2C1_B),
PINMUX_IPSR_MSEL(IP10_5_3, SD2_CMD_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_5_3, DU1_EXODDF_DU1_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP10_5_3, ATAWR1),
- PINMUX_IPSR_DATA(IP10_5_3, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP10_5_3, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP10_5_3, ATAWR1),
+ PINMUX_IPSR_GPSR(IP10_5_3, ETH_MDIO),
PINMUX_IPSR_MSEL(IP10_5_3, SCL1_B, SEL_I2C1_B),
PINMUX_IPSR_MSEL(IP10_8_6, SD2_DAT0_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_8_6, DU1_DISP),
- PINMUX_IPSR_DATA(IP10_8_6, ATACS01),
+ PINMUX_IPSR_GPSR(IP10_8_6, DU1_DISP),
+ PINMUX_IPSR_GPSR(IP10_8_6, ATACS01),
PINMUX_IPSR_MSEL(IP10_8_6, DREQ1_B, SEL_DREQ1_B),
- PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
+ PINMUX_IPSR_GPSR(IP10_8_6, ETH_LINK),
PINMUX_IPSR_MSEL(IP10_8_6, CAN1_RX_A, SEL_CAN1_A),
PINMUX_IPSR_MSEL(IP10_12_9, SD2_DAT1_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_12_9, DU1_CDE),
- PINMUX_IPSR_DATA(IP10_12_9, ATACS11),
- PINMUX_IPSR_DATA(IP10_12_9, DACK1_B),
- PINMUX_IPSR_DATA(IP10_12_9, ETH_MAGIC),
- PINMUX_IPSR_DATA(IP10_12_9, CAN1_TX_A),
- PINMUX_IPSR_DATA(IP10_12_9, PWM6),
+ PINMUX_IPSR_GPSR(IP10_12_9, DU1_CDE),
+ PINMUX_IPSR_GPSR(IP10_12_9, ATACS11),
+ PINMUX_IPSR_GPSR(IP10_12_9, DACK1_B),
+ PINMUX_IPSR_GPSR(IP10_12_9, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP10_12_9, CAN1_TX_A),
+ PINMUX_IPSR_GPSR(IP10_12_9, PWM6),
PINMUX_IPSR_MSEL(IP10_15_13, SD2_DAT2_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_15_13, VI1_DATA12),
+ PINMUX_IPSR_GPSR(IP10_15_13, VI1_DATA12),
PINMUX_IPSR_MSEL(IP10_15_13, DREQ2_B, SEL_DREQ2_B),
- PINMUX_IPSR_DATA(IP10_15_13, ATADIR1),
+ PINMUX_IPSR_GPSR(IP10_15_13, ATADIR1),
PINMUX_IPSR_MSEL(IP10_15_13, HSPI_CLK2_B, SEL_HSPI2_B),
PINMUX_IPSR_MSEL(IP10_15_13, GPSCLK_B, SEL_GPS_B),
PINMUX_IPSR_MSEL(IP10_18_16, SD2_DAT3_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_18_16, VI1_DATA13),
- PINMUX_IPSR_DATA(IP10_18_16, DACK2_B),
- PINMUX_IPSR_DATA(IP10_18_16, ATAG1),
+ PINMUX_IPSR_GPSR(IP10_18_16, VI1_DATA13),
+ PINMUX_IPSR_GPSR(IP10_18_16, DACK2_B),
+ PINMUX_IPSR_GPSR(IP10_18_16, ATAG1),
PINMUX_IPSR_MSEL(IP10_18_16, HSPI_CS2_B, SEL_HSPI2_B),
PINMUX_IPSR_MSEL(IP10_18_16, GPSIN_B, SEL_GPS_B),
PINMUX_IPSR_MSEL(IP10_21_19, SD2_CD_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_21_19, VI1_DATA14),
+ PINMUX_IPSR_GPSR(IP10_21_19, VI1_DATA14),
PINMUX_IPSR_MSEL(IP10_21_19, EX_WAIT1_B, SEL_WAIT1_B),
PINMUX_IPSR_MSEL(IP10_21_19, DREQ0_B, SEL_DREQ0_B),
PINMUX_IPSR_MSEL(IP10_21_19, HSPI_RX2_B, SEL_HSPI2_B),
PINMUX_IPSR_MSEL(IP10_21_19, REMOCON_A, SEL_REMOCON_A),
PINMUX_IPSR_MSEL(IP10_24_22, SD2_WP_A, SEL_SD2_A),
- PINMUX_IPSR_DATA(IP10_24_22, VI1_DATA15),
+ PINMUX_IPSR_GPSR(IP10_24_22, VI1_DATA15),
PINMUX_IPSR_MSEL(IP10_24_22, EX_WAIT2_B, SEL_WAIT2_B),
- PINMUX_IPSR_DATA(IP10_24_22, DACK0_B),
- PINMUX_IPSR_DATA(IP10_24_22, HSPI_TX2_B),
+ PINMUX_IPSR_GPSR(IP10_24_22, DACK0_B),
+ PINMUX_IPSR_GPSR(IP10_24_22, HSPI_TX2_B),
PINMUX_IPSR_MSEL(IP10_24_22, CAN_CLK_C, SEL_CANCLK_C),
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index bd17eccb6a89..5bef934f823d 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -611,577 +611,577 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(USB_PENC0),
PINMUX_SINGLE(USB_PENC1),
- PINMUX_IPSR_DATA(IP0_2_0, USB_PENC2),
+ PINMUX_IPSR_GPSR(IP0_2_0, USB_PENC2),
PINMUX_IPSR_MSEL(IP0_2_0, SCK0, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP0_2_0, PWM1),
+ PINMUX_IPSR_GPSR(IP0_2_0, PWM1),
PINMUX_IPSR_MSEL(IP0_2_0, PWMFSW0, SEL_PWMFSW_0),
PINMUX_IPSR_MSEL(IP0_2_0, SCIF_CLK, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP0_2_0, TCLK0_C, SEL_TMU0_2),
- PINMUX_IPSR_DATA(IP0_5_3, BS),
- PINMUX_IPSR_DATA(IP0_5_3, SD1_DAT2),
- PINMUX_IPSR_DATA(IP0_5_3, MMC0_D2),
- PINMUX_IPSR_DATA(IP0_5_3, FD2),
- PINMUX_IPSR_DATA(IP0_5_3, ATADIR0),
- PINMUX_IPSR_DATA(IP0_5_3, SDSELF),
+ PINMUX_IPSR_GPSR(IP0_5_3, BS),
+ PINMUX_IPSR_GPSR(IP0_5_3, SD1_DAT2),
+ PINMUX_IPSR_GPSR(IP0_5_3, MMC0_D2),
+ PINMUX_IPSR_GPSR(IP0_5_3, FD2),
+ PINMUX_IPSR_GPSR(IP0_5_3, ATADIR0),
+ PINMUX_IPSR_GPSR(IP0_5_3, SDSELF),
PINMUX_IPSR_MSEL(IP0_5_3, HCTS1, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP0_5_3, TX4_C),
- PINMUX_IPSR_DATA(IP0_7_6, A0),
- PINMUX_IPSR_DATA(IP0_7_6, SD1_DAT3),
- PINMUX_IPSR_DATA(IP0_7_6, MMC0_D3),
- PINMUX_IPSR_DATA(IP0_7_6, FD3),
- PINMUX_IPSR_DATA(IP0_9_8, A20),
- PINMUX_IPSR_DATA(IP0_9_8, TX5_D),
- PINMUX_IPSR_DATA(IP0_9_8, HSPI_TX2_B),
- PINMUX_IPSR_DATA(IP0_11_10, A21),
+ PINMUX_IPSR_GPSR(IP0_5_3, TX4_C),
+ PINMUX_IPSR_GPSR(IP0_7_6, A0),
+ PINMUX_IPSR_GPSR(IP0_7_6, SD1_DAT3),
+ PINMUX_IPSR_GPSR(IP0_7_6, MMC0_D3),
+ PINMUX_IPSR_GPSR(IP0_7_6, FD3),
+ PINMUX_IPSR_GPSR(IP0_9_8, A20),
+ PINMUX_IPSR_GPSR(IP0_9_8, TX5_D),
+ PINMUX_IPSR_GPSR(IP0_9_8, HSPI_TX2_B),
+ PINMUX_IPSR_GPSR(IP0_11_10, A21),
PINMUX_IPSR_MSEL(IP0_11_10, SCK5_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP0_11_10, HSPI_CLK2_B, SEL_HSPI2_1),
- PINMUX_IPSR_DATA(IP0_13_12, A22),
+ PINMUX_IPSR_GPSR(IP0_13_12, A22),
PINMUX_IPSR_MSEL(IP0_13_12, RX5_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP0_13_12, HSPI_RX2_B, SEL_HSPI2_1),
- PINMUX_IPSR_DATA(IP0_13_12, VI1_R0),
- PINMUX_IPSR_DATA(IP0_15_14, A23),
- PINMUX_IPSR_DATA(IP0_15_14, FCLE),
+ PINMUX_IPSR_GPSR(IP0_13_12, VI1_R0),
+ PINMUX_IPSR_GPSR(IP0_15_14, A23),
+ PINMUX_IPSR_GPSR(IP0_15_14, FCLE),
PINMUX_IPSR_MSEL(IP0_15_14, HSPI_CLK2, SEL_HSPI2_0),
- PINMUX_IPSR_DATA(IP0_15_14, VI1_R1),
- PINMUX_IPSR_DATA(IP0_18_16, A24),
- PINMUX_IPSR_DATA(IP0_18_16, SD1_CD),
- PINMUX_IPSR_DATA(IP0_18_16, MMC0_D4),
- PINMUX_IPSR_DATA(IP0_18_16, FD4),
+ PINMUX_IPSR_GPSR(IP0_15_14, VI1_R1),
+ PINMUX_IPSR_GPSR(IP0_18_16, A24),
+ PINMUX_IPSR_GPSR(IP0_18_16, SD1_CD),
+ PINMUX_IPSR_GPSR(IP0_18_16, MMC0_D4),
+ PINMUX_IPSR_GPSR(IP0_18_16, FD4),
PINMUX_IPSR_MSEL(IP0_18_16, HSPI_CS2, SEL_HSPI2_0),
- PINMUX_IPSR_DATA(IP0_18_16, VI1_R2),
+ PINMUX_IPSR_GPSR(IP0_18_16, VI1_R2),
PINMUX_IPSR_MSEL(IP0_18_16, SSI_WS78_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP0_22_19, A25),
- PINMUX_IPSR_DATA(IP0_22_19, SD1_WP),
- PINMUX_IPSR_DATA(IP0_22_19, MMC0_D5),
- PINMUX_IPSR_DATA(IP0_22_19, FD5),
+ PINMUX_IPSR_GPSR(IP0_22_19, A25),
+ PINMUX_IPSR_GPSR(IP0_22_19, SD1_WP),
+ PINMUX_IPSR_GPSR(IP0_22_19, MMC0_D5),
+ PINMUX_IPSR_GPSR(IP0_22_19, FD5),
PINMUX_IPSR_MSEL(IP0_22_19, HSPI_RX2, SEL_HSPI2_0),
- PINMUX_IPSR_DATA(IP0_22_19, VI1_R3),
- PINMUX_IPSR_DATA(IP0_22_19, TX5_B),
+ PINMUX_IPSR_GPSR(IP0_22_19, VI1_R3),
+ PINMUX_IPSR_GPSR(IP0_22_19, TX5_B),
PINMUX_IPSR_MSEL(IP0_22_19, SSI_SDATA7_B, SEL_SSI7_1),
PINMUX_IPSR_MSEL(IP0_22_19, CTS0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP0_24_23, CLKOUT),
- PINMUX_IPSR_DATA(IP0_24_23, TX3C_IRDA_TX_C),
- PINMUX_IPSR_DATA(IP0_24_23, PWM0_B),
- PINMUX_IPSR_DATA(IP0_25, CS0),
+ PINMUX_IPSR_GPSR(IP0_24_23, CLKOUT),
+ PINMUX_IPSR_GPSR(IP0_24_23, TX3C_IRDA_TX_C),
+ PINMUX_IPSR_GPSR(IP0_24_23, PWM0_B),
+ PINMUX_IPSR_GPSR(IP0_25, CS0),
PINMUX_IPSR_MSEL(IP0_25, HSPI_CS2_B, SEL_HSPI2_1),
- PINMUX_IPSR_DATA(IP0_27_26, CS1_A26),
- PINMUX_IPSR_DATA(IP0_27_26, HSPI_TX2),
- PINMUX_IPSR_DATA(IP0_27_26, SDSELF_B),
- PINMUX_IPSR_DATA(IP0_30_28, RD_WR),
- PINMUX_IPSR_DATA(IP0_30_28, FWE),
- PINMUX_IPSR_DATA(IP0_30_28, ATAG0),
- PINMUX_IPSR_DATA(IP0_30_28, VI1_R7),
+ PINMUX_IPSR_GPSR(IP0_27_26, CS1_A26),
+ PINMUX_IPSR_GPSR(IP0_27_26, HSPI_TX2),
+ PINMUX_IPSR_GPSR(IP0_27_26, SDSELF_B),
+ PINMUX_IPSR_GPSR(IP0_30_28, RD_WR),
+ PINMUX_IPSR_GPSR(IP0_30_28, FWE),
+ PINMUX_IPSR_GPSR(IP0_30_28, ATAG0),
+ PINMUX_IPSR_GPSR(IP0_30_28, VI1_R7),
PINMUX_IPSR_MSEL(IP0_30_28, HRTS1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP0_30_28, RX4_C, SEL_SCIF4_2),
- PINMUX_IPSR_DATA(IP1_1_0, EX_CS0),
+ PINMUX_IPSR_GPSR(IP1_1_0, EX_CS0),
PINMUX_IPSR_MSEL(IP1_1_0, RX3_C_IRDA_RX_C, SEL_SCIF3_2),
- PINMUX_IPSR_DATA(IP1_1_0, MMC0_D6),
- PINMUX_IPSR_DATA(IP1_1_0, FD6),
- PINMUX_IPSR_DATA(IP1_3_2, EX_CS1),
- PINMUX_IPSR_DATA(IP1_3_2, MMC0_D7),
- PINMUX_IPSR_DATA(IP1_3_2, FD7),
- PINMUX_IPSR_DATA(IP1_6_4, EX_CS2),
- PINMUX_IPSR_DATA(IP1_6_4, SD1_CLK),
- PINMUX_IPSR_DATA(IP1_6_4, MMC0_CLK),
- PINMUX_IPSR_DATA(IP1_6_4, FALE),
- PINMUX_IPSR_DATA(IP1_6_4, ATACS00),
- PINMUX_IPSR_DATA(IP1_10_7, EX_CS3),
- PINMUX_IPSR_DATA(IP1_10_7, SD1_CMD),
- PINMUX_IPSR_DATA(IP1_10_7, MMC0_CMD),
- PINMUX_IPSR_DATA(IP1_10_7, FRE),
- PINMUX_IPSR_DATA(IP1_10_7, ATACS10),
- PINMUX_IPSR_DATA(IP1_10_7, VI1_R4),
+ PINMUX_IPSR_GPSR(IP1_1_0, MMC0_D6),
+ PINMUX_IPSR_GPSR(IP1_1_0, FD6),
+ PINMUX_IPSR_GPSR(IP1_3_2, EX_CS1),
+ PINMUX_IPSR_GPSR(IP1_3_2, MMC0_D7),
+ PINMUX_IPSR_GPSR(IP1_3_2, FD7),
+ PINMUX_IPSR_GPSR(IP1_6_4, EX_CS2),
+ PINMUX_IPSR_GPSR(IP1_6_4, SD1_CLK),
+ PINMUX_IPSR_GPSR(IP1_6_4, MMC0_CLK),
+ PINMUX_IPSR_GPSR(IP1_6_4, FALE),
+ PINMUX_IPSR_GPSR(IP1_6_4, ATACS00),
+ PINMUX_IPSR_GPSR(IP1_10_7, EX_CS3),
+ PINMUX_IPSR_GPSR(IP1_10_7, SD1_CMD),
+ PINMUX_IPSR_GPSR(IP1_10_7, MMC0_CMD),
+ PINMUX_IPSR_GPSR(IP1_10_7, FRE),
+ PINMUX_IPSR_GPSR(IP1_10_7, ATACS10),
+ PINMUX_IPSR_GPSR(IP1_10_7, VI1_R4),
PINMUX_IPSR_MSEL(IP1_10_7, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP1_10_7, HSCK1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA8_B, SEL_SSI8_1),
PINMUX_IPSR_MSEL(IP1_10_7, RTS0_B_TANS_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP1_10_7, SSI_SDATA9, SEL_SSI9_0),
- PINMUX_IPSR_DATA(IP1_14_11, EX_CS4),
- PINMUX_IPSR_DATA(IP1_14_11, SD1_DAT0),
- PINMUX_IPSR_DATA(IP1_14_11, MMC0_D0),
- PINMUX_IPSR_DATA(IP1_14_11, FD0),
- PINMUX_IPSR_DATA(IP1_14_11, ATARD0),
- PINMUX_IPSR_DATA(IP1_14_11, VI1_R5),
+ PINMUX_IPSR_GPSR(IP1_14_11, EX_CS4),
+ PINMUX_IPSR_GPSR(IP1_14_11, SD1_DAT0),
+ PINMUX_IPSR_GPSR(IP1_14_11, MMC0_D0),
+ PINMUX_IPSR_GPSR(IP1_14_11, FD0),
+ PINMUX_IPSR_GPSR(IP1_14_11, ATARD0),
+ PINMUX_IPSR_GPSR(IP1_14_11, VI1_R5),
PINMUX_IPSR_MSEL(IP1_14_11, SCK5_B, SEL_SCIF5_1),
- PINMUX_IPSR_DATA(IP1_14_11, HTX1),
- PINMUX_IPSR_DATA(IP1_14_11, TX2_E),
- PINMUX_IPSR_DATA(IP1_14_11, TX0_B),
+ PINMUX_IPSR_GPSR(IP1_14_11, HTX1),
+ PINMUX_IPSR_GPSR(IP1_14_11, TX2_E),
+ PINMUX_IPSR_GPSR(IP1_14_11, TX0_B),
PINMUX_IPSR_MSEL(IP1_14_11, SSI_SCK9, SEL_SSI9_0),
- PINMUX_IPSR_DATA(IP1_18_15, EX_CS5),
- PINMUX_IPSR_DATA(IP1_18_15, SD1_DAT1),
- PINMUX_IPSR_DATA(IP1_18_15, MMC0_D1),
- PINMUX_IPSR_DATA(IP1_18_15, FD1),
- PINMUX_IPSR_DATA(IP1_18_15, ATAWR0),
- PINMUX_IPSR_DATA(IP1_18_15, VI1_R6),
+ PINMUX_IPSR_GPSR(IP1_18_15, EX_CS5),
+ PINMUX_IPSR_GPSR(IP1_18_15, SD1_DAT1),
+ PINMUX_IPSR_GPSR(IP1_18_15, MMC0_D1),
+ PINMUX_IPSR_GPSR(IP1_18_15, FD1),
+ PINMUX_IPSR_GPSR(IP1_18_15, ATAWR0),
+ PINMUX_IPSR_GPSR(IP1_18_15, VI1_R6),
PINMUX_IPSR_MSEL(IP1_18_15, HRX1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP1_18_15, RX2_E, SEL_SCIF2_4),
PINMUX_IPSR_MSEL(IP1_18_15, RX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP1_18_15, SSI_WS9, SEL_SSI9_0),
- PINMUX_IPSR_DATA(IP1_20_19, MLB_CLK),
- PINMUX_IPSR_DATA(IP1_20_19, PWM2),
+ PINMUX_IPSR_GPSR(IP1_20_19, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP1_20_19, PWM2),
PINMUX_IPSR_MSEL(IP1_20_19, SCK4, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP1_22_21, MLB_SIG),
- PINMUX_IPSR_DATA(IP1_22_21, PWM3),
- PINMUX_IPSR_DATA(IP1_22_21, TX4),
- PINMUX_IPSR_DATA(IP1_24_23, MLB_DAT),
- PINMUX_IPSR_DATA(IP1_24_23, PWM4),
+ PINMUX_IPSR_GPSR(IP1_22_21, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP1_22_21, PWM3),
+ PINMUX_IPSR_GPSR(IP1_22_21, TX4),
+ PINMUX_IPSR_GPSR(IP1_24_23, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP1_24_23, PWM4),
PINMUX_IPSR_MSEL(IP1_24_23, RX4, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP1_28_25, HTX0),
- PINMUX_IPSR_DATA(IP1_28_25, TX1),
- PINMUX_IPSR_DATA(IP1_28_25, SDATA),
+ PINMUX_IPSR_GPSR(IP1_28_25, HTX0),
+ PINMUX_IPSR_GPSR(IP1_28_25, TX1),
+ PINMUX_IPSR_GPSR(IP1_28_25, SDATA),
PINMUX_IPSR_MSEL(IP1_28_25, CTS0_C, SEL_SCIF0_2),
- PINMUX_IPSR_DATA(IP1_28_25, SUB_TCK),
- PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE2),
- PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE10),
- PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE18),
- PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE26),
- PINMUX_IPSR_DATA(IP1_28_25, CC5_STATE34),
+ PINMUX_IPSR_GPSR(IP1_28_25, SUB_TCK),
+ PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE2),
+ PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE10),
+ PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE18),
+ PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE26),
+ PINMUX_IPSR_GPSR(IP1_28_25, CC5_STATE34),
PINMUX_IPSR_MSEL(IP2_3_0, HRX0, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP2_3_0, RX1, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP2_3_0, SCKZ),
+ PINMUX_IPSR_GPSR(IP2_3_0, SCKZ),
PINMUX_IPSR_MSEL(IP2_3_0, RTS0_C_TANS_C, SEL_SCIF0_2),
- PINMUX_IPSR_DATA(IP2_3_0, SUB_TDI),
- PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE3),
- PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE11),
- PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE19),
- PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE27),
- PINMUX_IPSR_DATA(IP2_3_0, CC5_STATE35),
+ PINMUX_IPSR_GPSR(IP2_3_0, SUB_TDI),
+ PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE3),
+ PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE11),
+ PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE19),
+ PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE27),
+ PINMUX_IPSR_GPSR(IP2_3_0, CC5_STATE35),
PINMUX_IPSR_MSEL(IP2_7_4, HSCK0, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP2_7_4, SCK1, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP2_7_4, MTS),
- PINMUX_IPSR_DATA(IP2_7_4, PWM5),
+ PINMUX_IPSR_GPSR(IP2_7_4, MTS),
+ PINMUX_IPSR_GPSR(IP2_7_4, PWM5),
PINMUX_IPSR_MSEL(IP2_7_4, SCK0_C, SEL_SCIF0_2),
PINMUX_IPSR_MSEL(IP2_7_4, SSI_SDATA9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP2_7_4, SUB_TDO),
- PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE0),
- PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE8),
- PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE16),
- PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE24),
- PINMUX_IPSR_DATA(IP2_7_4, CC5_STATE32),
+ PINMUX_IPSR_GPSR(IP2_7_4, SUB_TDO),
+ PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE0),
+ PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE8),
+ PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE16),
+ PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE24),
+ PINMUX_IPSR_GPSR(IP2_7_4, CC5_STATE32),
PINMUX_IPSR_MSEL(IP2_11_8, HCTS0, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP2_11_8, CTS1, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP2_11_8, STM),
- PINMUX_IPSR_DATA(IP2_11_8, PWM0_D),
+ PINMUX_IPSR_GPSR(IP2_11_8, STM),
+ PINMUX_IPSR_GPSR(IP2_11_8, PWM0_D),
PINMUX_IPSR_MSEL(IP2_11_8, RX0_C, SEL_SCIF0_2),
PINMUX_IPSR_MSEL(IP2_11_8, SCIF_CLK_C, SEL_SCIF_2),
- PINMUX_IPSR_DATA(IP2_11_8, SUB_TRST),
+ PINMUX_IPSR_GPSR(IP2_11_8, SUB_TRST),
PINMUX_IPSR_MSEL(IP2_11_8, TCLK1_B, SEL_TMU1_1),
- PINMUX_IPSR_DATA(IP2_11_8, CC5_OSCOUT),
+ PINMUX_IPSR_GPSR(IP2_11_8, CC5_OSCOUT),
PINMUX_IPSR_MSEL(IP2_15_12, HRTS0, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP2_15_12, RTS1_TANS, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP2_15_12, MDATA),
- PINMUX_IPSR_DATA(IP2_15_12, TX0_C),
- PINMUX_IPSR_DATA(IP2_15_12, SUB_TMS),
- PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE1),
- PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE9),
- PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE17),
- PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE25),
- PINMUX_IPSR_DATA(IP2_15_12, CC5_STATE33),
- PINMUX_IPSR_DATA(IP2_18_16, DU0_DR0),
- PINMUX_IPSR_DATA(IP2_18_16, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP2_15_12, MDATA),
+ PINMUX_IPSR_GPSR(IP2_15_12, TX0_C),
+ PINMUX_IPSR_GPSR(IP2_15_12, SUB_TMS),
+ PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE1),
+ PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE9),
+ PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE17),
+ PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE25),
+ PINMUX_IPSR_GPSR(IP2_15_12, CC5_STATE33),
+ PINMUX_IPSR_GPSR(IP2_18_16, DU0_DR0),
+ PINMUX_IPSR_GPSR(IP2_18_16, LCDOUT0),
PINMUX_IPSR_MSEL(IP2_18_16, DREQ0, SEL_EXBUS0_0),
PINMUX_IPSR_MSEL(IP2_18_16, GPS_CLK_B, SEL_GPS_1),
- PINMUX_IPSR_DATA(IP2_18_16, AUDATA0),
- PINMUX_IPSR_DATA(IP2_18_16, TX5_C),
- PINMUX_IPSR_DATA(IP2_21_19, DU0_DR1),
- PINMUX_IPSR_DATA(IP2_21_19, LCDOUT1),
- PINMUX_IPSR_DATA(IP2_21_19, DACK0),
- PINMUX_IPSR_DATA(IP2_21_19, DRACK0),
+ PINMUX_IPSR_GPSR(IP2_18_16, AUDATA0),
+ PINMUX_IPSR_GPSR(IP2_18_16, TX5_C),
+ PINMUX_IPSR_GPSR(IP2_21_19, DU0_DR1),
+ PINMUX_IPSR_GPSR(IP2_21_19, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP2_21_19, DACK0),
+ PINMUX_IPSR_GPSR(IP2_21_19, DRACK0),
PINMUX_IPSR_MSEL(IP2_21_19, GPS_SIGN_B, SEL_GPS_1),
- PINMUX_IPSR_DATA(IP2_21_19, AUDATA1),
+ PINMUX_IPSR_GPSR(IP2_21_19, AUDATA1),
PINMUX_IPSR_MSEL(IP2_21_19, RX5_C, SEL_SCIF5_2),
- PINMUX_IPSR_DATA(IP2_22, DU0_DR2),
- PINMUX_IPSR_DATA(IP2_22, LCDOUT2),
- PINMUX_IPSR_DATA(IP2_23, DU0_DR3),
- PINMUX_IPSR_DATA(IP2_23, LCDOUT3),
- PINMUX_IPSR_DATA(IP2_24, DU0_DR4),
- PINMUX_IPSR_DATA(IP2_24, LCDOUT4),
- PINMUX_IPSR_DATA(IP2_25, DU0_DR5),
- PINMUX_IPSR_DATA(IP2_25, LCDOUT5),
- PINMUX_IPSR_DATA(IP2_26, DU0_DR6),
- PINMUX_IPSR_DATA(IP2_26, LCDOUT6),
- PINMUX_IPSR_DATA(IP2_27, DU0_DR7),
- PINMUX_IPSR_DATA(IP2_27, LCDOUT7),
- PINMUX_IPSR_DATA(IP2_30_28, DU0_DG0),
- PINMUX_IPSR_DATA(IP2_30_28, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP2_22, DU0_DR2),
+ PINMUX_IPSR_GPSR(IP2_22, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP2_23, DU0_DR3),
+ PINMUX_IPSR_GPSR(IP2_23, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP2_24, DU0_DR4),
+ PINMUX_IPSR_GPSR(IP2_24, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP2_25, DU0_DR5),
+ PINMUX_IPSR_GPSR(IP2_25, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP2_26, DU0_DR6),
+ PINMUX_IPSR_GPSR(IP2_26, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP2_27, DU0_DR7),
+ PINMUX_IPSR_GPSR(IP2_27, LCDOUT7),
+ PINMUX_IPSR_GPSR(IP2_30_28, DU0_DG0),
+ PINMUX_IPSR_GPSR(IP2_30_28, LCDOUT8),
PINMUX_IPSR_MSEL(IP2_30_28, DREQ1, SEL_EXBUS1_0),
PINMUX_IPSR_MSEL(IP2_30_28, SCL2, SEL_I2C2_0),
- PINMUX_IPSR_DATA(IP2_30_28, AUDATA2),
+ PINMUX_IPSR_GPSR(IP2_30_28, AUDATA2),
- PINMUX_IPSR_DATA(IP3_2_0, DU0_DG1),
- PINMUX_IPSR_DATA(IP3_2_0, LCDOUT9),
- PINMUX_IPSR_DATA(IP3_2_0, DACK1),
+ PINMUX_IPSR_GPSR(IP3_2_0, DU0_DG1),
+ PINMUX_IPSR_GPSR(IP3_2_0, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP3_2_0, DACK1),
PINMUX_IPSR_MSEL(IP3_2_0, SDA2, SEL_I2C2_0),
- PINMUX_IPSR_DATA(IP3_2_0, AUDATA3),
- PINMUX_IPSR_DATA(IP3_3, DU0_DG2),
- PINMUX_IPSR_DATA(IP3_3, LCDOUT10),
- PINMUX_IPSR_DATA(IP3_4, DU0_DG3),
- PINMUX_IPSR_DATA(IP3_4, LCDOUT11),
- PINMUX_IPSR_DATA(IP3_5, DU0_DG4),
- PINMUX_IPSR_DATA(IP3_5, LCDOUT12),
- PINMUX_IPSR_DATA(IP3_6, DU0_DG5),
- PINMUX_IPSR_DATA(IP3_6, LCDOUT13),
- PINMUX_IPSR_DATA(IP3_7, DU0_DG6),
- PINMUX_IPSR_DATA(IP3_7, LCDOUT14),
- PINMUX_IPSR_DATA(IP3_8, DU0_DG7),
- PINMUX_IPSR_DATA(IP3_8, LCDOUT15),
- PINMUX_IPSR_DATA(IP3_11_9, DU0_DB0),
- PINMUX_IPSR_DATA(IP3_11_9, LCDOUT16),
- PINMUX_IPSR_DATA(IP3_11_9, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP3_2_0, AUDATA3),
+ PINMUX_IPSR_GPSR(IP3_3, DU0_DG2),
+ PINMUX_IPSR_GPSR(IP3_3, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP3_4, DU0_DG3),
+ PINMUX_IPSR_GPSR(IP3_4, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP3_5, DU0_DG4),
+ PINMUX_IPSR_GPSR(IP3_5, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP3_6, DU0_DG5),
+ PINMUX_IPSR_GPSR(IP3_6, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP3_7, DU0_DG6),
+ PINMUX_IPSR_GPSR(IP3_7, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP3_8, DU0_DG7),
+ PINMUX_IPSR_GPSR(IP3_8, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP3_11_9, DU0_DB0),
+ PINMUX_IPSR_GPSR(IP3_11_9, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP3_11_9, EX_WAIT1),
PINMUX_IPSR_MSEL(IP3_11_9, SCL1, SEL_I2C1_0),
PINMUX_IPSR_MSEL(IP3_11_9, TCLK1, SEL_TMU1_0),
- PINMUX_IPSR_DATA(IP3_11_9, AUDATA4),
- PINMUX_IPSR_DATA(IP3_14_12, DU0_DB1),
- PINMUX_IPSR_DATA(IP3_14_12, LCDOUT17),
- PINMUX_IPSR_DATA(IP3_14_12, EX_WAIT2),
+ PINMUX_IPSR_GPSR(IP3_11_9, AUDATA4),
+ PINMUX_IPSR_GPSR(IP3_14_12, DU0_DB1),
+ PINMUX_IPSR_GPSR(IP3_14_12, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP3_14_12, EX_WAIT2),
PINMUX_IPSR_MSEL(IP3_14_12, SDA1, SEL_I2C1_0),
PINMUX_IPSR_MSEL(IP3_14_12, GPS_MAG_B, SEL_GPS_1),
- PINMUX_IPSR_DATA(IP3_14_12, AUDATA5),
+ PINMUX_IPSR_GPSR(IP3_14_12, AUDATA5),
PINMUX_IPSR_MSEL(IP3_14_12, SCK5_C, SEL_SCIF5_2),
- PINMUX_IPSR_DATA(IP3_15, DU0_DB2),
- PINMUX_IPSR_DATA(IP3_15, LCDOUT18),
- PINMUX_IPSR_DATA(IP3_16, DU0_DB3),
- PINMUX_IPSR_DATA(IP3_16, LCDOUT19),
- PINMUX_IPSR_DATA(IP3_17, DU0_DB4),
- PINMUX_IPSR_DATA(IP3_17, LCDOUT20),
- PINMUX_IPSR_DATA(IP3_18, DU0_DB5),
- PINMUX_IPSR_DATA(IP3_18, LCDOUT21),
- PINMUX_IPSR_DATA(IP3_19, DU0_DB6),
- PINMUX_IPSR_DATA(IP3_19, LCDOUT22),
- PINMUX_IPSR_DATA(IP3_20, DU0_DB7),
- PINMUX_IPSR_DATA(IP3_20, LCDOUT23),
- PINMUX_IPSR_DATA(IP3_22_21, DU0_DOTCLKIN),
- PINMUX_IPSR_DATA(IP3_22_21, QSTVA_QVS),
- PINMUX_IPSR_DATA(IP3_22_21, TX3_D_IRDA_TX_D),
+ PINMUX_IPSR_GPSR(IP3_15, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP3_15, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP3_16, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP3_16, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP3_17, DU0_DB4),
+ PINMUX_IPSR_GPSR(IP3_17, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP3_18, DU0_DB5),
+ PINMUX_IPSR_GPSR(IP3_18, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP3_19, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP3_19, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP3_20, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP3_20, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP3_22_21, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP3_22_21, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP3_22_21, TX3_D_IRDA_TX_D),
PINMUX_IPSR_MSEL(IP3_22_21, SCL3_B, SEL_I2C3_1),
- PINMUX_IPSR_DATA(IP3_23, DU0_DOTCLKOUT0),
- PINMUX_IPSR_DATA(IP3_23, QCLK),
- PINMUX_IPSR_DATA(IP3_26_24, DU0_DOTCLKOUT1),
- PINMUX_IPSR_DATA(IP3_26_24, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP3_23, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP3_23, QCLK),
+ PINMUX_IPSR_GPSR(IP3_26_24, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_GPSR(IP3_26_24, QSTVB_QVE),
PINMUX_IPSR_MSEL(IP3_26_24, RX3_D_IRDA_RX_D, SEL_SCIF3_3),
PINMUX_IPSR_MSEL(IP3_26_24, SDA3_B, SEL_I2C3_1),
PINMUX_IPSR_MSEL(IP3_26_24, SDA2_C, SEL_I2C2_2),
- PINMUX_IPSR_DATA(IP3_26_24, DACK0_B),
- PINMUX_IPSR_DATA(IP3_26_24, DRACK0_B),
- PINMUX_IPSR_DATA(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
- PINMUX_IPSR_DATA(IP3_27, QSTH_QHS),
- PINMUX_IPSR_DATA(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
- PINMUX_IPSR_DATA(IP3_28, QSTB_QHE),
- PINMUX_IPSR_DATA(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP3_31_29, QCPV_QDE),
- PINMUX_IPSR_DATA(IP3_31_29, CAN1_TX),
- PINMUX_IPSR_DATA(IP3_31_29, TX2_C),
+ PINMUX_IPSR_GPSR(IP3_26_24, DACK0_B),
+ PINMUX_IPSR_GPSR(IP3_26_24, DRACK0_B),
+ PINMUX_IPSR_GPSR(IP3_27, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_GPSR(IP3_27, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP3_28, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_GPSR(IP3_28, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP3_31_29, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP3_31_29, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP3_31_29, CAN1_TX),
+ PINMUX_IPSR_GPSR(IP3_31_29, TX2_C),
PINMUX_IPSR_MSEL(IP3_31_29, SCL2_C, SEL_I2C2_2),
- PINMUX_IPSR_DATA(IP3_31_29, REMOCON),
+ PINMUX_IPSR_GPSR(IP3_31_29, REMOCON),
- PINMUX_IPSR_DATA(IP4_1_0, DU0_DISP),
- PINMUX_IPSR_DATA(IP4_1_0, QPOLA),
+ PINMUX_IPSR_GPSR(IP4_1_0, DU0_DISP),
+ PINMUX_IPSR_GPSR(IP4_1_0, QPOLA),
PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_C, SEL_CANCLK_2),
PINMUX_IPSR_MSEL(IP4_1_0, SCK2_C, SEL_SCIF2_2),
- PINMUX_IPSR_DATA(IP4_4_2, DU0_CDE),
- PINMUX_IPSR_DATA(IP4_4_2, QPOLB),
- PINMUX_IPSR_DATA(IP4_4_2, CAN1_RX),
+ PINMUX_IPSR_GPSR(IP4_4_2, DU0_CDE),
+ PINMUX_IPSR_GPSR(IP4_4_2, QPOLB),
+ PINMUX_IPSR_GPSR(IP4_4_2, CAN1_RX),
PINMUX_IPSR_MSEL(IP4_4_2, RX2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP4_4_2, DREQ0_B, SEL_EXBUS0_1),
PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK78_B, SEL_SSI7_1),
PINMUX_IPSR_MSEL(IP4_4_2, SCK0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP4_7_5, DU1_DR0),
- PINMUX_IPSR_DATA(IP4_7_5, VI2_DATA0_VI2_B0),
- PINMUX_IPSR_DATA(IP4_7_5, PWM6),
- PINMUX_IPSR_DATA(IP4_7_5, SD3_CLK),
- PINMUX_IPSR_DATA(IP4_7_5, TX3_E_IRDA_TX_E),
- PINMUX_IPSR_DATA(IP4_7_5, AUDCK),
+ PINMUX_IPSR_GPSR(IP4_7_5, DU1_DR0),
+ PINMUX_IPSR_GPSR(IP4_7_5, VI2_DATA0_VI2_B0),
+ PINMUX_IPSR_GPSR(IP4_7_5, PWM6),
+ PINMUX_IPSR_GPSR(IP4_7_5, SD3_CLK),
+ PINMUX_IPSR_GPSR(IP4_7_5, TX3_E_IRDA_TX_E),
+ PINMUX_IPSR_GPSR(IP4_7_5, AUDCK),
PINMUX_IPSR_MSEL(IP4_7_5, PWMFSW0_B, SEL_PWMFSW_1),
- PINMUX_IPSR_DATA(IP4_10_8, DU1_DR1),
- PINMUX_IPSR_DATA(IP4_10_8, VI2_DATA1_VI2_B1),
- PINMUX_IPSR_DATA(IP4_10_8, PWM0),
- PINMUX_IPSR_DATA(IP4_10_8, SD3_CMD),
+ PINMUX_IPSR_GPSR(IP4_10_8, DU1_DR1),
+ PINMUX_IPSR_GPSR(IP4_10_8, VI2_DATA1_VI2_B1),
+ PINMUX_IPSR_GPSR(IP4_10_8, PWM0),
+ PINMUX_IPSR_GPSR(IP4_10_8, SD3_CMD),
PINMUX_IPSR_MSEL(IP4_10_8, RX3_E_IRDA_RX_E, SEL_SCIF3_4),
- PINMUX_IPSR_DATA(IP4_10_8, AUDSYNC),
+ PINMUX_IPSR_GPSR(IP4_10_8, AUDSYNC),
PINMUX_IPSR_MSEL(IP4_10_8, CTS0_D, SEL_SCIF0_3),
- PINMUX_IPSR_DATA(IP4_11, DU1_DR2),
- PINMUX_IPSR_DATA(IP4_11, VI2_G0),
- PINMUX_IPSR_DATA(IP4_12, DU1_DR3),
- PINMUX_IPSR_DATA(IP4_12, VI2_G1),
- PINMUX_IPSR_DATA(IP4_13, DU1_DR4),
- PINMUX_IPSR_DATA(IP4_13, VI2_G2),
- PINMUX_IPSR_DATA(IP4_14, DU1_DR5),
- PINMUX_IPSR_DATA(IP4_14, VI2_G3),
- PINMUX_IPSR_DATA(IP4_15, DU1_DR6),
- PINMUX_IPSR_DATA(IP4_15, VI2_G4),
- PINMUX_IPSR_DATA(IP4_16, DU1_DR7),
- PINMUX_IPSR_DATA(IP4_16, VI2_G5),
- PINMUX_IPSR_DATA(IP4_19_17, DU1_DG0),
- PINMUX_IPSR_DATA(IP4_19_17, VI2_DATA2_VI2_B2),
+ PINMUX_IPSR_GPSR(IP4_11, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP4_11, VI2_G0),
+ PINMUX_IPSR_GPSR(IP4_12, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP4_12, VI2_G1),
+ PINMUX_IPSR_GPSR(IP4_13, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP4_13, VI2_G2),
+ PINMUX_IPSR_GPSR(IP4_14, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP4_14, VI2_G3),
+ PINMUX_IPSR_GPSR(IP4_15, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP4_15, VI2_G4),
+ PINMUX_IPSR_GPSR(IP4_16, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP4_16, VI2_G5),
+ PINMUX_IPSR_GPSR(IP4_19_17, DU1_DG0),
+ PINMUX_IPSR_GPSR(IP4_19_17, VI2_DATA2_VI2_B2),
PINMUX_IPSR_MSEL(IP4_19_17, SCL1_B, SEL_I2C1_1),
- PINMUX_IPSR_DATA(IP4_19_17, SD3_DAT2),
+ PINMUX_IPSR_GPSR(IP4_19_17, SD3_DAT2),
PINMUX_IPSR_MSEL(IP4_19_17, SCK3_E, SEL_SCIF3_4),
- PINMUX_IPSR_DATA(IP4_19_17, AUDATA6),
- PINMUX_IPSR_DATA(IP4_19_17, TX0_D),
- PINMUX_IPSR_DATA(IP4_22_20, DU1_DG1),
- PINMUX_IPSR_DATA(IP4_22_20, VI2_DATA3_VI2_B3),
+ PINMUX_IPSR_GPSR(IP4_19_17, AUDATA6),
+ PINMUX_IPSR_GPSR(IP4_19_17, TX0_D),
+ PINMUX_IPSR_GPSR(IP4_22_20, DU1_DG1),
+ PINMUX_IPSR_GPSR(IP4_22_20, VI2_DATA3_VI2_B3),
PINMUX_IPSR_MSEL(IP4_22_20, SDA1_B, SEL_I2C1_1),
- PINMUX_IPSR_DATA(IP4_22_20, SD3_DAT3),
+ PINMUX_IPSR_GPSR(IP4_22_20, SD3_DAT3),
PINMUX_IPSR_MSEL(IP4_22_20, SCK5, SEL_SCIF5_0),
- PINMUX_IPSR_DATA(IP4_22_20, AUDATA7),
+ PINMUX_IPSR_GPSR(IP4_22_20, AUDATA7),
PINMUX_IPSR_MSEL(IP4_22_20, RX0_D, SEL_SCIF0_3),
- PINMUX_IPSR_DATA(IP4_23, DU1_DG2),
- PINMUX_IPSR_DATA(IP4_23, VI2_G6),
- PINMUX_IPSR_DATA(IP4_24, DU1_DG3),
- PINMUX_IPSR_DATA(IP4_24, VI2_G7),
- PINMUX_IPSR_DATA(IP4_25, DU1_DG4),
- PINMUX_IPSR_DATA(IP4_25, VI2_R0),
- PINMUX_IPSR_DATA(IP4_26, DU1_DG5),
- PINMUX_IPSR_DATA(IP4_26, VI2_R1),
- PINMUX_IPSR_DATA(IP4_27, DU1_DG6),
- PINMUX_IPSR_DATA(IP4_27, VI2_R2),
- PINMUX_IPSR_DATA(IP4_28, DU1_DG7),
- PINMUX_IPSR_DATA(IP4_28, VI2_R3),
- PINMUX_IPSR_DATA(IP4_31_29, DU1_DB0),
- PINMUX_IPSR_DATA(IP4_31_29, VI2_DATA4_VI2_B4),
+ PINMUX_IPSR_GPSR(IP4_23, DU1_DG2),
+ PINMUX_IPSR_GPSR(IP4_23, VI2_G6),
+ PINMUX_IPSR_GPSR(IP4_24, DU1_DG3),
+ PINMUX_IPSR_GPSR(IP4_24, VI2_G7),
+ PINMUX_IPSR_GPSR(IP4_25, DU1_DG4),
+ PINMUX_IPSR_GPSR(IP4_25, VI2_R0),
+ PINMUX_IPSR_GPSR(IP4_26, DU1_DG5),
+ PINMUX_IPSR_GPSR(IP4_26, VI2_R1),
+ PINMUX_IPSR_GPSR(IP4_27, DU1_DG6),
+ PINMUX_IPSR_GPSR(IP4_27, VI2_R2),
+ PINMUX_IPSR_GPSR(IP4_28, DU1_DG7),
+ PINMUX_IPSR_GPSR(IP4_28, VI2_R3),
+ PINMUX_IPSR_GPSR(IP4_31_29, DU1_DB0),
+ PINMUX_IPSR_GPSR(IP4_31_29, VI2_DATA4_VI2_B4),
PINMUX_IPSR_MSEL(IP4_31_29, SCL2_B, SEL_I2C2_1),
- PINMUX_IPSR_DATA(IP4_31_29, SD3_DAT0),
- PINMUX_IPSR_DATA(IP4_31_29, TX5),
+ PINMUX_IPSR_GPSR(IP4_31_29, SD3_DAT0),
+ PINMUX_IPSR_GPSR(IP4_31_29, TX5),
PINMUX_IPSR_MSEL(IP4_31_29, SCK0_D, SEL_SCIF0_3),
- PINMUX_IPSR_DATA(IP5_2_0, DU1_DB1),
- PINMUX_IPSR_DATA(IP5_2_0, VI2_DATA5_VI2_B5),
+ PINMUX_IPSR_GPSR(IP5_2_0, DU1_DB1),
+ PINMUX_IPSR_GPSR(IP5_2_0, VI2_DATA5_VI2_B5),
PINMUX_IPSR_MSEL(IP5_2_0, SDA2_B, SEL_I2C2_1),
- PINMUX_IPSR_DATA(IP5_2_0, SD3_DAT1),
+ PINMUX_IPSR_GPSR(IP5_2_0, SD3_DAT1),
PINMUX_IPSR_MSEL(IP5_2_0, RX5, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP5_2_0, RTS0_D_TANS_D, SEL_SCIF0_3),
- PINMUX_IPSR_DATA(IP5_3, DU1_DB2),
- PINMUX_IPSR_DATA(IP5_3, VI2_R4),
- PINMUX_IPSR_DATA(IP5_4, DU1_DB3),
- PINMUX_IPSR_DATA(IP5_4, VI2_R5),
- PINMUX_IPSR_DATA(IP5_5, DU1_DB4),
- PINMUX_IPSR_DATA(IP5_5, VI2_R6),
- PINMUX_IPSR_DATA(IP5_6, DU1_DB5),
- PINMUX_IPSR_DATA(IP5_6, VI2_R7),
- PINMUX_IPSR_DATA(IP5_7, DU1_DB6),
+ PINMUX_IPSR_GPSR(IP5_3, DU1_DB2),
+ PINMUX_IPSR_GPSR(IP5_3, VI2_R4),
+ PINMUX_IPSR_GPSR(IP5_4, DU1_DB3),
+ PINMUX_IPSR_GPSR(IP5_4, VI2_R5),
+ PINMUX_IPSR_GPSR(IP5_5, DU1_DB4),
+ PINMUX_IPSR_GPSR(IP5_5, VI2_R6),
+ PINMUX_IPSR_GPSR(IP5_6, DU1_DB5),
+ PINMUX_IPSR_GPSR(IP5_6, VI2_R7),
+ PINMUX_IPSR_GPSR(IP5_7, DU1_DB6),
PINMUX_IPSR_MSEL(IP5_7, SCL2_D, SEL_I2C2_3),
- PINMUX_IPSR_DATA(IP5_8, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP5_8, DU1_DB7),
PINMUX_IPSR_MSEL(IP5_8, SDA2_D, SEL_I2C2_3),
- PINMUX_IPSR_DATA(IP5_10_9, DU1_DOTCLKIN),
- PINMUX_IPSR_DATA(IP5_10_9, VI2_CLKENB),
+ PINMUX_IPSR_GPSR(IP5_10_9, DU1_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP5_10_9, VI2_CLKENB),
PINMUX_IPSR_MSEL(IP5_10_9, HSPI_CS1, SEL_HSPI1_0),
PINMUX_IPSR_MSEL(IP5_10_9, SCL1_D, SEL_I2C1_3),
- PINMUX_IPSR_DATA(IP5_12_11, DU1_DOTCLKOUT),
- PINMUX_IPSR_DATA(IP5_12_11, VI2_FIELD),
+ PINMUX_IPSR_GPSR(IP5_12_11, DU1_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP5_12_11, VI2_FIELD),
PINMUX_IPSR_MSEL(IP5_12_11, SDA1_D, SEL_I2C1_3),
- PINMUX_IPSR_DATA(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
- PINMUX_IPSR_DATA(IP5_14_13, VI2_HSYNC),
- PINMUX_IPSR_DATA(IP5_14_13, VI3_HSYNC),
- PINMUX_IPSR_DATA(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
- PINMUX_IPSR_DATA(IP5_16_15, VI2_VSYNC),
- PINMUX_IPSR_DATA(IP5_16_15, VI3_VSYNC),
- PINMUX_IPSR_DATA(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP5_20_17, VI2_CLK),
- PINMUX_IPSR_DATA(IP5_20_17, TX3_B_IRDA_TX_B),
- PINMUX_IPSR_DATA(IP5_20_17, SD3_CD),
- PINMUX_IPSR_DATA(IP5_20_17, HSPI_TX1),
- PINMUX_IPSR_DATA(IP5_20_17, VI1_CLKENB),
- PINMUX_IPSR_DATA(IP5_20_17, VI3_CLKENB),
- PINMUX_IPSR_DATA(IP5_20_17, AUDIO_CLKC),
- PINMUX_IPSR_DATA(IP5_20_17, TX2_D),
- PINMUX_IPSR_DATA(IP5_20_17, SPEEDIN),
+ PINMUX_IPSR_GPSR(IP5_14_13, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_GPSR(IP5_14_13, VI2_HSYNC),
+ PINMUX_IPSR_GPSR(IP5_14_13, VI3_HSYNC),
+ PINMUX_IPSR_GPSR(IP5_16_15, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_GPSR(IP5_16_15, VI2_VSYNC),
+ PINMUX_IPSR_GPSR(IP5_16_15, VI3_VSYNC),
+ PINMUX_IPSR_GPSR(IP5_20_17, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP5_20_17, VI2_CLK),
+ PINMUX_IPSR_GPSR(IP5_20_17, TX3_B_IRDA_TX_B),
+ PINMUX_IPSR_GPSR(IP5_20_17, SD3_CD),
+ PINMUX_IPSR_GPSR(IP5_20_17, HSPI_TX1),
+ PINMUX_IPSR_GPSR(IP5_20_17, VI1_CLKENB),
+ PINMUX_IPSR_GPSR(IP5_20_17, VI3_CLKENB),
+ PINMUX_IPSR_GPSR(IP5_20_17, AUDIO_CLKC),
+ PINMUX_IPSR_GPSR(IP5_20_17, TX2_D),
+ PINMUX_IPSR_GPSR(IP5_20_17, SPEEDIN),
PINMUX_IPSR_MSEL(IP5_20_17, GPS_SIGN_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP5_23_21, DU1_DISP),
- PINMUX_IPSR_DATA(IP5_23_21, VI2_DATA6_VI2_B6),
+ PINMUX_IPSR_GPSR(IP5_23_21, DU1_DISP),
+ PINMUX_IPSR_GPSR(IP5_23_21, VI2_DATA6_VI2_B6),
PINMUX_IPSR_MSEL(IP5_23_21, TCLK0, SEL_TMU0_0),
- PINMUX_IPSR_DATA(IP5_23_21, QSTVA_B_QVS_B),
+ PINMUX_IPSR_GPSR(IP5_23_21, QSTVA_B_QVS_B),
PINMUX_IPSR_MSEL(IP5_23_21, HSPI_CLK1, SEL_HSPI1_0),
PINMUX_IPSR_MSEL(IP5_23_21, SCK2_D, SEL_SCIF2_3),
- PINMUX_IPSR_DATA(IP5_23_21, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP5_23_21, AUDIO_CLKOUT_B),
PINMUX_IPSR_MSEL(IP5_23_21, GPS_MAG_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP5_27_24, DU1_CDE),
- PINMUX_IPSR_DATA(IP5_27_24, VI2_DATA7_VI2_B7),
+ PINMUX_IPSR_GPSR(IP5_27_24, DU1_CDE),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI2_DATA7_VI2_B7),
PINMUX_IPSR_MSEL(IP5_27_24, RX3_B_IRDA_RX_B, SEL_SCIF3_1),
- PINMUX_IPSR_DATA(IP5_27_24, SD3_WP),
+ PINMUX_IPSR_GPSR(IP5_27_24, SD3_WP),
PINMUX_IPSR_MSEL(IP5_27_24, HSPI_RX1, SEL_HSPI1_0),
- PINMUX_IPSR_DATA(IP5_27_24, VI1_FIELD),
- PINMUX_IPSR_DATA(IP5_27_24, VI3_FIELD),
- PINMUX_IPSR_DATA(IP5_27_24, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI1_FIELD),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI3_FIELD),
+ PINMUX_IPSR_GPSR(IP5_27_24, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP5_27_24, RX2_D, SEL_SCIF2_3),
PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP5_27_24, GPS_CLK_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP5_28, AUDIO_CLKA),
- PINMUX_IPSR_DATA(IP5_28, CAN_TXCLK),
- PINMUX_IPSR_DATA(IP5_30_29, AUDIO_CLKB),
- PINMUX_IPSR_DATA(IP5_30_29, USB_OVC2),
- PINMUX_IPSR_DATA(IP5_30_29, CAN_DEBUGOUT0),
- PINMUX_IPSR_DATA(IP5_30_29, MOUT0),
-
- PINMUX_IPSR_DATA(IP6_1_0, SSI_SCK0129),
- PINMUX_IPSR_DATA(IP6_1_0, CAN_DEBUGOUT1),
- PINMUX_IPSR_DATA(IP6_1_0, MOUT1),
- PINMUX_IPSR_DATA(IP6_3_2, SSI_WS0129),
- PINMUX_IPSR_DATA(IP6_3_2, CAN_DEBUGOUT2),
- PINMUX_IPSR_DATA(IP6_3_2, MOUT2),
- PINMUX_IPSR_DATA(IP6_5_4, SSI_SDATA0),
- PINMUX_IPSR_DATA(IP6_5_4, CAN_DEBUGOUT3),
- PINMUX_IPSR_DATA(IP6_5_4, MOUT5),
- PINMUX_IPSR_DATA(IP6_7_6, SSI_SDATA1),
- PINMUX_IPSR_DATA(IP6_7_6, CAN_DEBUGOUT4),
- PINMUX_IPSR_DATA(IP6_7_6, MOUT6),
- PINMUX_IPSR_DATA(IP6_8, SSI_SDATA2),
- PINMUX_IPSR_DATA(IP6_8, CAN_DEBUGOUT5),
- PINMUX_IPSR_DATA(IP6_11_9, SSI_SCK34),
- PINMUX_IPSR_DATA(IP6_11_9, CAN_DEBUGOUT6),
- PINMUX_IPSR_DATA(IP6_11_9, CAN0_TX_B),
+ PINMUX_IPSR_GPSR(IP5_28, AUDIO_CLKA),
+ PINMUX_IPSR_GPSR(IP5_28, CAN_TXCLK),
+ PINMUX_IPSR_GPSR(IP5_30_29, AUDIO_CLKB),
+ PINMUX_IPSR_GPSR(IP5_30_29, USB_OVC2),
+ PINMUX_IPSR_GPSR(IP5_30_29, CAN_DEBUGOUT0),
+ PINMUX_IPSR_GPSR(IP5_30_29, MOUT0),
+
+ PINMUX_IPSR_GPSR(IP6_1_0, SSI_SCK0129),
+ PINMUX_IPSR_GPSR(IP6_1_0, CAN_DEBUGOUT1),
+ PINMUX_IPSR_GPSR(IP6_1_0, MOUT1),
+ PINMUX_IPSR_GPSR(IP6_3_2, SSI_WS0129),
+ PINMUX_IPSR_GPSR(IP6_3_2, CAN_DEBUGOUT2),
+ PINMUX_IPSR_GPSR(IP6_3_2, MOUT2),
+ PINMUX_IPSR_GPSR(IP6_5_4, SSI_SDATA0),
+ PINMUX_IPSR_GPSR(IP6_5_4, CAN_DEBUGOUT3),
+ PINMUX_IPSR_GPSR(IP6_5_4, MOUT5),
+ PINMUX_IPSR_GPSR(IP6_7_6, SSI_SDATA1),
+ PINMUX_IPSR_GPSR(IP6_7_6, CAN_DEBUGOUT4),
+ PINMUX_IPSR_GPSR(IP6_7_6, MOUT6),
+ PINMUX_IPSR_GPSR(IP6_8, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP6_8, CAN_DEBUGOUT5),
+ PINMUX_IPSR_GPSR(IP6_11_9, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP6_11_9, CAN_DEBUGOUT6),
+ PINMUX_IPSR_GPSR(IP6_11_9, CAN0_TX_B),
PINMUX_IPSR_MSEL(IP6_11_9, IERX, SEL_IE_0),
PINMUX_IPSR_MSEL(IP6_11_9, SSI_SCK9_C, SEL_SSI9_2),
- PINMUX_IPSR_DATA(IP6_14_12, SSI_WS34),
- PINMUX_IPSR_DATA(IP6_14_12, CAN_DEBUGOUT7),
+ PINMUX_IPSR_GPSR(IP6_14_12, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP6_14_12, CAN_DEBUGOUT7),
PINMUX_IPSR_MSEL(IP6_14_12, CAN0_RX_B, SEL_CAN0_1),
- PINMUX_IPSR_DATA(IP6_14_12, IETX),
+ PINMUX_IPSR_GPSR(IP6_14_12, IETX),
PINMUX_IPSR_MSEL(IP6_14_12, SSI_WS9_C, SEL_SSI9_2),
- PINMUX_IPSR_DATA(IP6_17_15, SSI_SDATA3),
- PINMUX_IPSR_DATA(IP6_17_15, PWM0_C),
- PINMUX_IPSR_DATA(IP6_17_15, CAN_DEBUGOUT8),
+ PINMUX_IPSR_GPSR(IP6_17_15, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP6_17_15, PWM0_C),
+ PINMUX_IPSR_GPSR(IP6_17_15, CAN_DEBUGOUT8),
PINMUX_IPSR_MSEL(IP6_17_15, CAN_CLK_B, SEL_CANCLK_1),
PINMUX_IPSR_MSEL(IP6_17_15, IECLK, SEL_IE_0),
PINMUX_IPSR_MSEL(IP6_17_15, SCIF_CLK_B, SEL_SCIF_1),
PINMUX_IPSR_MSEL(IP6_17_15, TCLK0_B, SEL_TMU0_1),
- PINMUX_IPSR_DATA(IP6_19_18, SSI_SDATA4),
- PINMUX_IPSR_DATA(IP6_19_18, CAN_DEBUGOUT9),
+ PINMUX_IPSR_GPSR(IP6_19_18, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP6_19_18, CAN_DEBUGOUT9),
PINMUX_IPSR_MSEL(IP6_19_18, SSI_SDATA9_C, SEL_SSI9_2),
- PINMUX_IPSR_DATA(IP6_22_20, SSI_SCK5),
- PINMUX_IPSR_DATA(IP6_22_20, ADICLK),
- PINMUX_IPSR_DATA(IP6_22_20, CAN_DEBUGOUT10),
+ PINMUX_IPSR_GPSR(IP6_22_20, SSI_SCK5),
+ PINMUX_IPSR_GPSR(IP6_22_20, ADICLK),
+ PINMUX_IPSR_GPSR(IP6_22_20, CAN_DEBUGOUT10),
PINMUX_IPSR_MSEL(IP6_22_20, SCK3, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP6_22_20, TCLK0_D, SEL_TMU0_3),
- PINMUX_IPSR_DATA(IP6_24_23, SSI_WS5),
+ PINMUX_IPSR_GPSR(IP6_24_23, SSI_WS5),
PINMUX_IPSR_MSEL(IP6_24_23, ADICS_SAMP, SEL_ADI_0),
- PINMUX_IPSR_DATA(IP6_24_23, CAN_DEBUGOUT11),
- PINMUX_IPSR_DATA(IP6_24_23, TX3_IRDA_TX),
- PINMUX_IPSR_DATA(IP6_26_25, SSI_SDATA5),
+ PINMUX_IPSR_GPSR(IP6_24_23, CAN_DEBUGOUT11),
+ PINMUX_IPSR_GPSR(IP6_24_23, TX3_IRDA_TX),
+ PINMUX_IPSR_GPSR(IP6_26_25, SSI_SDATA5),
PINMUX_IPSR_MSEL(IP6_26_25, ADIDATA, SEL_ADI_0),
- PINMUX_IPSR_DATA(IP6_26_25, CAN_DEBUGOUT12),
+ PINMUX_IPSR_GPSR(IP6_26_25, CAN_DEBUGOUT12),
PINMUX_IPSR_MSEL(IP6_26_25, RX3_IRDA_RX, SEL_SCIF3_0),
- PINMUX_IPSR_DATA(IP6_30_29, SSI_SCK6),
- PINMUX_IPSR_DATA(IP6_30_29, ADICHS0),
- PINMUX_IPSR_DATA(IP6_30_29, CAN0_TX),
+ PINMUX_IPSR_GPSR(IP6_30_29, SSI_SCK6),
+ PINMUX_IPSR_GPSR(IP6_30_29, ADICHS0),
+ PINMUX_IPSR_GPSR(IP6_30_29, CAN0_TX),
PINMUX_IPSR_MSEL(IP6_30_29, IERX_B, SEL_IE_1),
- PINMUX_IPSR_DATA(IP7_1_0, SSI_WS6),
- PINMUX_IPSR_DATA(IP7_1_0, ADICHS1),
+ PINMUX_IPSR_GPSR(IP7_1_0, SSI_WS6),
+ PINMUX_IPSR_GPSR(IP7_1_0, ADICHS1),
PINMUX_IPSR_MSEL(IP7_1_0, CAN0_RX, SEL_CAN0_0),
- PINMUX_IPSR_DATA(IP7_1_0, IETX_B),
- PINMUX_IPSR_DATA(IP7_3_2, SSI_SDATA6),
- PINMUX_IPSR_DATA(IP7_3_2, ADICHS2),
+ PINMUX_IPSR_GPSR(IP7_1_0, IETX_B),
+ PINMUX_IPSR_GPSR(IP7_3_2, SSI_SDATA6),
+ PINMUX_IPSR_GPSR(IP7_3_2, ADICHS2),
PINMUX_IPSR_MSEL(IP7_3_2, CAN_CLK, SEL_CANCLK_0),
PINMUX_IPSR_MSEL(IP7_3_2, IECLK_B, SEL_IE_1),
PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK78, SEL_SSI7_0),
- PINMUX_IPSR_DATA(IP7_6_4, CAN_DEBUGOUT13),
+ PINMUX_IPSR_GPSR(IP7_6_4, CAN_DEBUGOUT13),
PINMUX_IPSR_MSEL(IP7_6_4, IRQ0_B, SEL_INT0_1),
PINMUX_IPSR_MSEL(IP7_6_4, SSI_SCK9_B, SEL_SSI9_1),
PINMUX_IPSR_MSEL(IP7_6_4, HSPI_CLK1_C, SEL_HSPI1_2),
PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS78, SEL_SSI7_0),
- PINMUX_IPSR_DATA(IP7_9_7, CAN_DEBUGOUT14),
+ PINMUX_IPSR_GPSR(IP7_9_7, CAN_DEBUGOUT14),
PINMUX_IPSR_MSEL(IP7_9_7, IRQ1_B, SEL_INT1_1),
PINMUX_IPSR_MSEL(IP7_9_7, SSI_WS9_B, SEL_SSI9_1),
PINMUX_IPSR_MSEL(IP7_9_7, HSPI_CS1_C, SEL_HSPI1_2),
PINMUX_IPSR_MSEL(IP7_12_10, SSI_SDATA7, SEL_SSI7_0),
- PINMUX_IPSR_DATA(IP7_12_10, CAN_DEBUGOUT15),
+ PINMUX_IPSR_GPSR(IP7_12_10, CAN_DEBUGOUT15),
PINMUX_IPSR_MSEL(IP7_12_10, IRQ2_B, SEL_INT2_1),
PINMUX_IPSR_MSEL(IP7_12_10, TCLK1_C, SEL_TMU1_2),
- PINMUX_IPSR_DATA(IP7_12_10, HSPI_TX1_C),
+ PINMUX_IPSR_GPSR(IP7_12_10, HSPI_TX1_C),
PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA8, SEL_SSI8_0),
- PINMUX_IPSR_DATA(IP7_14_13, VSP),
+ PINMUX_IPSR_GPSR(IP7_14_13, VSP),
PINMUX_IPSR_MSEL(IP7_14_13, IRQ3_B, SEL_INT3_1),
PINMUX_IPSR_MSEL(IP7_14_13, HSPI_RX1_C, SEL_HSPI1_2),
- PINMUX_IPSR_DATA(IP7_16_15, SD0_CLK),
- PINMUX_IPSR_DATA(IP7_16_15, ATACS01),
+ PINMUX_IPSR_GPSR(IP7_16_15, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP7_16_15, ATACS01),
PINMUX_IPSR_MSEL(IP7_16_15, SCK1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP7_18_17, SD0_CMD),
- PINMUX_IPSR_DATA(IP7_18_17, ATACS11),
- PINMUX_IPSR_DATA(IP7_18_17, TX1_B),
- PINMUX_IPSR_DATA(IP7_18_17, CC5_TDO),
- PINMUX_IPSR_DATA(IP7_20_19, SD0_DAT0),
- PINMUX_IPSR_DATA(IP7_20_19, ATADIR1),
+ PINMUX_IPSR_GPSR(IP7_18_17, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP7_18_17, ATACS11),
+ PINMUX_IPSR_GPSR(IP7_18_17, TX1_B),
+ PINMUX_IPSR_GPSR(IP7_18_17, CC5_TDO),
+ PINMUX_IPSR_GPSR(IP7_20_19, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP7_20_19, ATADIR1),
PINMUX_IPSR_MSEL(IP7_20_19, RX1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP7_20_19, CC5_TRST),
- PINMUX_IPSR_DATA(IP7_22_21, SD0_DAT1),
- PINMUX_IPSR_DATA(IP7_22_21, ATAG1),
+ PINMUX_IPSR_GPSR(IP7_20_19, CC5_TRST),
+ PINMUX_IPSR_GPSR(IP7_22_21, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP7_22_21, ATAG1),
PINMUX_IPSR_MSEL(IP7_22_21, SCK2_B, SEL_SCIF2_1),
- PINMUX_IPSR_DATA(IP7_22_21, CC5_TMS),
- PINMUX_IPSR_DATA(IP7_24_23, SD0_DAT2),
- PINMUX_IPSR_DATA(IP7_24_23, ATARD1),
- PINMUX_IPSR_DATA(IP7_24_23, TX2_B),
- PINMUX_IPSR_DATA(IP7_24_23, CC5_TCK),
- PINMUX_IPSR_DATA(IP7_26_25, SD0_DAT3),
- PINMUX_IPSR_DATA(IP7_26_25, ATAWR1),
+ PINMUX_IPSR_GPSR(IP7_22_21, CC5_TMS),
+ PINMUX_IPSR_GPSR(IP7_24_23, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP7_24_23, ATARD1),
+ PINMUX_IPSR_GPSR(IP7_24_23, TX2_B),
+ PINMUX_IPSR_GPSR(IP7_24_23, CC5_TCK),
+ PINMUX_IPSR_GPSR(IP7_26_25, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP7_26_25, ATAWR1),
PINMUX_IPSR_MSEL(IP7_26_25, RX2_B, SEL_SCIF2_1),
- PINMUX_IPSR_DATA(IP7_26_25, CC5_TDI),
- PINMUX_IPSR_DATA(IP7_28_27, SD0_CD),
+ PINMUX_IPSR_GPSR(IP7_26_25, CC5_TDI),
+ PINMUX_IPSR_GPSR(IP7_28_27, SD0_CD),
PINMUX_IPSR_MSEL(IP7_28_27, DREQ2, SEL_EXBUS2_0),
PINMUX_IPSR_MSEL(IP7_28_27, RTS1_B_TANS_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP7_30_29, SD0_WP),
- PINMUX_IPSR_DATA(IP7_30_29, DACK2),
+ PINMUX_IPSR_GPSR(IP7_30_29, SD0_WP),
+ PINMUX_IPSR_GPSR(IP7_30_29, DACK2),
PINMUX_IPSR_MSEL(IP7_30_29, CTS1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP8_3_0, HSPI_CLK0),
+ PINMUX_IPSR_GPSR(IP8_3_0, HSPI_CLK0),
PINMUX_IPSR_MSEL(IP8_3_0, CTS0, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP8_3_0, USB_OVC0),
- PINMUX_IPSR_DATA(IP8_3_0, AD_CLK),
- PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE4),
- PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE12),
- PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE20),
- PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE28),
- PINMUX_IPSR_DATA(IP8_3_0, CC5_STATE36),
- PINMUX_IPSR_DATA(IP8_7_4, HSPI_CS0),
+ PINMUX_IPSR_GPSR(IP8_3_0, USB_OVC0),
+ PINMUX_IPSR_GPSR(IP8_3_0, AD_CLK),
+ PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE4),
+ PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE12),
+ PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE20),
+ PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE28),
+ PINMUX_IPSR_GPSR(IP8_3_0, CC5_STATE36),
+ PINMUX_IPSR_GPSR(IP8_7_4, HSPI_CS0),
PINMUX_IPSR_MSEL(IP8_7_4, RTS0_TANS, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP8_7_4, USB_OVC1),
- PINMUX_IPSR_DATA(IP8_7_4, AD_DI),
- PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE5),
- PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE13),
- PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE21),
- PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE29),
- PINMUX_IPSR_DATA(IP8_7_4, CC5_STATE37),
- PINMUX_IPSR_DATA(IP8_11_8, HSPI_TX0),
- PINMUX_IPSR_DATA(IP8_11_8, TX0),
- PINMUX_IPSR_DATA(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
- PINMUX_IPSR_DATA(IP8_11_8, AD_DO),
- PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE6),
- PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE14),
- PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE22),
- PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE30),
- PINMUX_IPSR_DATA(IP8_11_8, CC5_STATE38),
- PINMUX_IPSR_DATA(IP8_15_12, HSPI_RX0),
+ PINMUX_IPSR_GPSR(IP8_7_4, USB_OVC1),
+ PINMUX_IPSR_GPSR(IP8_7_4, AD_DI),
+ PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE5),
+ PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE13),
+ PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE21),
+ PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE29),
+ PINMUX_IPSR_GPSR(IP8_7_4, CC5_STATE37),
+ PINMUX_IPSR_GPSR(IP8_11_8, HSPI_TX0),
+ PINMUX_IPSR_GPSR(IP8_11_8, TX0),
+ PINMUX_IPSR_GPSR(IP8_11_8, CAN_DEBUG_HW_TRIGGER),
+ PINMUX_IPSR_GPSR(IP8_11_8, AD_DO),
+ PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE6),
+ PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE14),
+ PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE22),
+ PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE30),
+ PINMUX_IPSR_GPSR(IP8_11_8, CC5_STATE38),
+ PINMUX_IPSR_GPSR(IP8_15_12, HSPI_RX0),
PINMUX_IPSR_MSEL(IP8_15_12, RX0, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP8_15_12, CAN_STEP0),
- PINMUX_IPSR_DATA(IP8_15_12, AD_NCS),
- PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE7),
- PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE15),
- PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE23),
- PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE31),
- PINMUX_IPSR_DATA(IP8_15_12, CC5_STATE39),
- PINMUX_IPSR_DATA(IP8_17_16, FMCLK),
- PINMUX_IPSR_DATA(IP8_17_16, RDS_CLK),
- PINMUX_IPSR_DATA(IP8_17_16, PCMOE),
- PINMUX_IPSR_DATA(IP8_18, BPFCLK),
- PINMUX_IPSR_DATA(IP8_18, PCMWE),
- PINMUX_IPSR_DATA(IP8_19, FMIN),
- PINMUX_IPSR_DATA(IP8_19, RDS_DATA),
- PINMUX_IPSR_DATA(IP8_20, VI0_CLK),
- PINMUX_IPSR_DATA(IP8_20, MMC1_CLK),
- PINMUX_IPSR_DATA(IP8_22_21, VI0_CLKENB),
- PINMUX_IPSR_DATA(IP8_22_21, TX1_C),
- PINMUX_IPSR_DATA(IP8_22_21, HTX1_B),
- PINMUX_IPSR_DATA(IP8_22_21, MT1_SYNC),
- PINMUX_IPSR_DATA(IP8_24_23, VI0_FIELD),
+ PINMUX_IPSR_GPSR(IP8_15_12, CAN_STEP0),
+ PINMUX_IPSR_GPSR(IP8_15_12, AD_NCS),
+ PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE7),
+ PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE15),
+ PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE23),
+ PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE31),
+ PINMUX_IPSR_GPSR(IP8_15_12, CC5_STATE39),
+ PINMUX_IPSR_GPSR(IP8_17_16, FMCLK),
+ PINMUX_IPSR_GPSR(IP8_17_16, RDS_CLK),
+ PINMUX_IPSR_GPSR(IP8_17_16, PCMOE),
+ PINMUX_IPSR_GPSR(IP8_18, BPFCLK),
+ PINMUX_IPSR_GPSR(IP8_18, PCMWE),
+ PINMUX_IPSR_GPSR(IP8_19, FMIN),
+ PINMUX_IPSR_GPSR(IP8_19, RDS_DATA),
+ PINMUX_IPSR_GPSR(IP8_20, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP8_20, MMC1_CLK),
+ PINMUX_IPSR_GPSR(IP8_22_21, VI0_CLKENB),
+ PINMUX_IPSR_GPSR(IP8_22_21, TX1_C),
+ PINMUX_IPSR_GPSR(IP8_22_21, HTX1_B),
+ PINMUX_IPSR_GPSR(IP8_22_21, MT1_SYNC),
+ PINMUX_IPSR_GPSR(IP8_24_23, VI0_FIELD),
PINMUX_IPSR_MSEL(IP8_24_23, RX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP8_24_23, HRX1_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP8_27_25, VI0_HSYNC),
+ PINMUX_IPSR_GPSR(IP8_27_25, VI0_HSYNC),
PINMUX_IPSR_MSEL(IP8_27_25, VI0_DATA0_B_VI0_B0_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP8_27_25, CTS1_C, SEL_SCIF1_2),
- PINMUX_IPSR_DATA(IP8_27_25, TX4_D),
- PINMUX_IPSR_DATA(IP8_27_25, MMC1_CMD),
+ PINMUX_IPSR_GPSR(IP8_27_25, TX4_D),
+ PINMUX_IPSR_GPSR(IP8_27_25, MMC1_CMD),
PINMUX_IPSR_MSEL(IP8_27_25, HSCK1_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP8_30_28, VI0_VSYNC),
+ PINMUX_IPSR_GPSR(IP8_30_28, VI0_VSYNC),
PINMUX_IPSR_MSEL(IP8_30_28, VI0_DATA1_B_VI0_B1_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP8_30_28, RTS1_C_TANS_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP8_30_28, RX4_D, SEL_SCIF4_3),
@@ -1189,216 +1189,216 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP9_1_0, VI0_DATA0_VI0_B0, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP9_1_0, HRTS1_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP9_1_0, MT1_VCXO),
+ PINMUX_IPSR_GPSR(IP9_1_0, MT1_VCXO),
PINMUX_IPSR_MSEL(IP9_3_2, VI0_DATA1_VI0_B1, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP9_3_2, HCTS1_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP9_3_2, MT1_PWM),
- PINMUX_IPSR_DATA(IP9_4, VI0_DATA2_VI0_B2),
- PINMUX_IPSR_DATA(IP9_4, MMC1_D0),
- PINMUX_IPSR_DATA(IP9_5, VI0_DATA3_VI0_B3),
- PINMUX_IPSR_DATA(IP9_5, MMC1_D1),
- PINMUX_IPSR_DATA(IP9_6, VI0_DATA4_VI0_B4),
- PINMUX_IPSR_DATA(IP9_6, MMC1_D2),
- PINMUX_IPSR_DATA(IP9_7, VI0_DATA5_VI0_B5),
- PINMUX_IPSR_DATA(IP9_7, MMC1_D3),
- PINMUX_IPSR_DATA(IP9_9_8, VI0_DATA6_VI0_B6),
- PINMUX_IPSR_DATA(IP9_9_8, MMC1_D4),
- PINMUX_IPSR_DATA(IP9_9_8, ARM_TRACEDATA_0),
- PINMUX_IPSR_DATA(IP9_11_10, VI0_DATA7_VI0_B7),
- PINMUX_IPSR_DATA(IP9_11_10, MMC1_D5),
- PINMUX_IPSR_DATA(IP9_11_10, ARM_TRACEDATA_1),
- PINMUX_IPSR_DATA(IP9_13_12, VI0_G0),
+ PINMUX_IPSR_GPSR(IP9_3_2, MT1_PWM),
+ PINMUX_IPSR_GPSR(IP9_4, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_GPSR(IP9_4, MMC1_D0),
+ PINMUX_IPSR_GPSR(IP9_5, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_GPSR(IP9_5, MMC1_D1),
+ PINMUX_IPSR_GPSR(IP9_6, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_GPSR(IP9_6, MMC1_D2),
+ PINMUX_IPSR_GPSR(IP9_7, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_GPSR(IP9_7, MMC1_D3),
+ PINMUX_IPSR_GPSR(IP9_9_8, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_GPSR(IP9_9_8, MMC1_D4),
+ PINMUX_IPSR_GPSR(IP9_9_8, ARM_TRACEDATA_0),
+ PINMUX_IPSR_GPSR(IP9_11_10, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_GPSR(IP9_11_10, MMC1_D5),
+ PINMUX_IPSR_GPSR(IP9_11_10, ARM_TRACEDATA_1),
+ PINMUX_IPSR_GPSR(IP9_13_12, VI0_G0),
PINMUX_IPSR_MSEL(IP9_13_12, SSI_SCK78_C, SEL_SSI7_2),
PINMUX_IPSR_MSEL(IP9_13_12, IRQ0, SEL_INT0_0),
- PINMUX_IPSR_DATA(IP9_13_12, ARM_TRACEDATA_2),
- PINMUX_IPSR_DATA(IP9_15_14, VI0_G1),
+ PINMUX_IPSR_GPSR(IP9_13_12, ARM_TRACEDATA_2),
+ PINMUX_IPSR_GPSR(IP9_15_14, VI0_G1),
PINMUX_IPSR_MSEL(IP9_15_14, SSI_WS78_C, SEL_SSI7_2),
PINMUX_IPSR_MSEL(IP9_15_14, IRQ1, SEL_INT1_0),
- PINMUX_IPSR_DATA(IP9_15_14, ARM_TRACEDATA_3),
- PINMUX_IPSR_DATA(IP9_18_16, VI0_G2),
- PINMUX_IPSR_DATA(IP9_18_16, ETH_TXD1),
- PINMUX_IPSR_DATA(IP9_18_16, MMC1_D6),
- PINMUX_IPSR_DATA(IP9_18_16, ARM_TRACEDATA_4),
- PINMUX_IPSR_DATA(IP9_18_16, TS_SPSYNC0),
- PINMUX_IPSR_DATA(IP9_21_19, VI0_G3),
- PINMUX_IPSR_DATA(IP9_21_19, ETH_CRS_DV),
- PINMUX_IPSR_DATA(IP9_21_19, MMC1_D7),
- PINMUX_IPSR_DATA(IP9_21_19, ARM_TRACEDATA_5),
- PINMUX_IPSR_DATA(IP9_21_19, TS_SDAT0),
- PINMUX_IPSR_DATA(IP9_23_22, VI0_G4),
- PINMUX_IPSR_DATA(IP9_23_22, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_15_14, ARM_TRACEDATA_3),
+ PINMUX_IPSR_GPSR(IP9_18_16, VI0_G2),
+ PINMUX_IPSR_GPSR(IP9_18_16, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP9_18_16, MMC1_D6),
+ PINMUX_IPSR_GPSR(IP9_18_16, ARM_TRACEDATA_4),
+ PINMUX_IPSR_GPSR(IP9_18_16, TS_SPSYNC0),
+ PINMUX_IPSR_GPSR(IP9_21_19, VI0_G3),
+ PINMUX_IPSR_GPSR(IP9_21_19, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP9_21_19, MMC1_D7),
+ PINMUX_IPSR_GPSR(IP9_21_19, ARM_TRACEDATA_5),
+ PINMUX_IPSR_GPSR(IP9_21_19, TS_SDAT0),
+ PINMUX_IPSR_GPSR(IP9_23_22, VI0_G4),
+ PINMUX_IPSR_GPSR(IP9_23_22, ETH_TX_EN),
PINMUX_IPSR_MSEL(IP9_23_22, SD2_DAT0_B, SEL_SD2_1),
- PINMUX_IPSR_DATA(IP9_23_22, ARM_TRACEDATA_6),
- PINMUX_IPSR_DATA(IP9_25_24, VI0_G5),
- PINMUX_IPSR_DATA(IP9_25_24, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP9_23_22, ARM_TRACEDATA_6),
+ PINMUX_IPSR_GPSR(IP9_25_24, VI0_G5),
+ PINMUX_IPSR_GPSR(IP9_25_24, ETH_RX_ER),
PINMUX_IPSR_MSEL(IP9_25_24, SD2_DAT1_B, SEL_SD2_1),
- PINMUX_IPSR_DATA(IP9_25_24, ARM_TRACEDATA_7),
- PINMUX_IPSR_DATA(IP9_27_26, VI0_G6),
- PINMUX_IPSR_DATA(IP9_27_26, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP9_25_24, ARM_TRACEDATA_7),
+ PINMUX_IPSR_GPSR(IP9_27_26, VI0_G6),
+ PINMUX_IPSR_GPSR(IP9_27_26, ETH_RXD0),
PINMUX_IPSR_MSEL(IP9_27_26, SD2_DAT2_B, SEL_SD2_1),
- PINMUX_IPSR_DATA(IP9_27_26, ARM_TRACEDATA_8),
- PINMUX_IPSR_DATA(IP9_29_28, VI0_G7),
- PINMUX_IPSR_DATA(IP9_29_28, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP9_27_26, ARM_TRACEDATA_8),
+ PINMUX_IPSR_GPSR(IP9_29_28, VI0_G7),
+ PINMUX_IPSR_GPSR(IP9_29_28, ETH_RXD1),
PINMUX_IPSR_MSEL(IP9_29_28, SD2_DAT3_B, SEL_SD2_1),
- PINMUX_IPSR_DATA(IP9_29_28, ARM_TRACEDATA_9),
+ PINMUX_IPSR_GPSR(IP9_29_28, ARM_TRACEDATA_9),
- PINMUX_IPSR_DATA(IP10_2_0, VI0_R0),
+ PINMUX_IPSR_GPSR(IP10_2_0, VI0_R0),
PINMUX_IPSR_MSEL(IP10_2_0, SSI_SDATA7_C, SEL_SSI7_2),
PINMUX_IPSR_MSEL(IP10_2_0, SCK1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP10_2_0, DREQ1_B, SEL_EXBUS1_0),
- PINMUX_IPSR_DATA(IP10_2_0, ARM_TRACEDATA_10),
+ PINMUX_IPSR_GPSR(IP10_2_0, ARM_TRACEDATA_10),
PINMUX_IPSR_MSEL(IP10_2_0, DREQ0_C, SEL_EXBUS0_2),
- PINMUX_IPSR_DATA(IP10_5_3, VI0_R1),
+ PINMUX_IPSR_GPSR(IP10_5_3, VI0_R1),
PINMUX_IPSR_MSEL(IP10_5_3, SSI_SDATA8_C, SEL_SSI8_2),
- PINMUX_IPSR_DATA(IP10_5_3, DACK1_B),
- PINMUX_IPSR_DATA(IP10_5_3, ARM_TRACEDATA_11),
- PINMUX_IPSR_DATA(IP10_5_3, DACK0_C),
- PINMUX_IPSR_DATA(IP10_5_3, DRACK0_C),
- PINMUX_IPSR_DATA(IP10_8_6, VI0_R2),
- PINMUX_IPSR_DATA(IP10_8_6, ETH_LINK),
- PINMUX_IPSR_DATA(IP10_8_6, SD2_CLK_B),
+ PINMUX_IPSR_GPSR(IP10_5_3, DACK1_B),
+ PINMUX_IPSR_GPSR(IP10_5_3, ARM_TRACEDATA_11),
+ PINMUX_IPSR_GPSR(IP10_5_3, DACK0_C),
+ PINMUX_IPSR_GPSR(IP10_5_3, DRACK0_C),
+ PINMUX_IPSR_GPSR(IP10_8_6, VI0_R2),
+ PINMUX_IPSR_GPSR(IP10_8_6, ETH_LINK),
+ PINMUX_IPSR_GPSR(IP10_8_6, SD2_CLK_B),
PINMUX_IPSR_MSEL(IP10_8_6, IRQ2, SEL_INT2_0),
- PINMUX_IPSR_DATA(IP10_8_6, ARM_TRACEDATA_12),
- PINMUX_IPSR_DATA(IP10_11_9, VI0_R3),
- PINMUX_IPSR_DATA(IP10_11_9, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP10_8_6, ARM_TRACEDATA_12),
+ PINMUX_IPSR_GPSR(IP10_11_9, VI0_R3),
+ PINMUX_IPSR_GPSR(IP10_11_9, ETH_MAGIC),
PINMUX_IPSR_MSEL(IP10_11_9, SD2_CMD_B, SEL_SD2_1),
PINMUX_IPSR_MSEL(IP10_11_9, IRQ3, SEL_INT3_0),
- PINMUX_IPSR_DATA(IP10_11_9, ARM_TRACEDATA_13),
- PINMUX_IPSR_DATA(IP10_14_12, VI0_R4),
- PINMUX_IPSR_DATA(IP10_14_12, ETH_REFCLK),
+ PINMUX_IPSR_GPSR(IP10_11_9, ARM_TRACEDATA_13),
+ PINMUX_IPSR_GPSR(IP10_14_12, VI0_R4),
+ PINMUX_IPSR_GPSR(IP10_14_12, ETH_REFCLK),
PINMUX_IPSR_MSEL(IP10_14_12, SD2_CD_B, SEL_SD2_1),
PINMUX_IPSR_MSEL(IP10_14_12, HSPI_CLK1_B, SEL_HSPI1_1),
- PINMUX_IPSR_DATA(IP10_14_12, ARM_TRACEDATA_14),
- PINMUX_IPSR_DATA(IP10_14_12, MT1_CLK),
- PINMUX_IPSR_DATA(IP10_14_12, TS_SCK0),
- PINMUX_IPSR_DATA(IP10_17_15, VI0_R5),
- PINMUX_IPSR_DATA(IP10_17_15, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP10_14_12, ARM_TRACEDATA_14),
+ PINMUX_IPSR_GPSR(IP10_14_12, MT1_CLK),
+ PINMUX_IPSR_GPSR(IP10_14_12, TS_SCK0),
+ PINMUX_IPSR_GPSR(IP10_17_15, VI0_R5),
+ PINMUX_IPSR_GPSR(IP10_17_15, ETH_TXD0),
PINMUX_IPSR_MSEL(IP10_17_15, SD2_WP_B, SEL_SD2_1),
PINMUX_IPSR_MSEL(IP10_17_15, HSPI_CS1_B, SEL_HSPI1_1),
- PINMUX_IPSR_DATA(IP10_17_15, ARM_TRACEDATA_15),
- PINMUX_IPSR_DATA(IP10_17_15, MT1_D),
- PINMUX_IPSR_DATA(IP10_17_15, TS_SDEN0),
- PINMUX_IPSR_DATA(IP10_20_18, VI0_R6),
- PINMUX_IPSR_DATA(IP10_20_18, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP10_17_15, ARM_TRACEDATA_15),
+ PINMUX_IPSR_GPSR(IP10_17_15, MT1_D),
+ PINMUX_IPSR_GPSR(IP10_17_15, TS_SDEN0),
+ PINMUX_IPSR_GPSR(IP10_20_18, VI0_R6),
+ PINMUX_IPSR_GPSR(IP10_20_18, ETH_MDC),
PINMUX_IPSR_MSEL(IP10_20_18, DREQ2_C, SEL_EXBUS2_2),
- PINMUX_IPSR_DATA(IP10_20_18, HSPI_TX1_B),
- PINMUX_IPSR_DATA(IP10_20_18, TRACECLK),
- PINMUX_IPSR_DATA(IP10_20_18, MT1_BEN),
+ PINMUX_IPSR_GPSR(IP10_20_18, HSPI_TX1_B),
+ PINMUX_IPSR_GPSR(IP10_20_18, TRACECLK),
+ PINMUX_IPSR_GPSR(IP10_20_18, MT1_BEN),
PINMUX_IPSR_MSEL(IP10_20_18, PWMFSW0_D, SEL_PWMFSW_3),
- PINMUX_IPSR_DATA(IP10_23_21, VI0_R7),
- PINMUX_IPSR_DATA(IP10_23_21, ETH_MDIO),
- PINMUX_IPSR_DATA(IP10_23_21, DACK2_C),
+ PINMUX_IPSR_GPSR(IP10_23_21, VI0_R7),
+ PINMUX_IPSR_GPSR(IP10_23_21, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP10_23_21, DACK2_C),
PINMUX_IPSR_MSEL(IP10_23_21, HSPI_RX1_B, SEL_HSPI1_1),
PINMUX_IPSR_MSEL(IP10_23_21, SCIF_CLK_D, SEL_SCIF_3),
- PINMUX_IPSR_DATA(IP10_23_21, TRACECTL),
- PINMUX_IPSR_DATA(IP10_23_21, MT1_PEN),
- PINMUX_IPSR_DATA(IP10_25_24, VI1_CLK),
+ PINMUX_IPSR_GPSR(IP10_23_21, TRACECTL),
+ PINMUX_IPSR_GPSR(IP10_23_21, MT1_PEN),
+ PINMUX_IPSR_GPSR(IP10_25_24, VI1_CLK),
PINMUX_IPSR_MSEL(IP10_25_24, SIM_D, SEL_SIM_0),
PINMUX_IPSR_MSEL(IP10_25_24, SDA3, SEL_I2C3_0),
- PINMUX_IPSR_DATA(IP10_28_26, VI1_HSYNC),
- PINMUX_IPSR_DATA(IP10_28_26, VI3_CLK),
- PINMUX_IPSR_DATA(IP10_28_26, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP10_28_26, VI1_HSYNC),
+ PINMUX_IPSR_GPSR(IP10_28_26, VI3_CLK),
+ PINMUX_IPSR_GPSR(IP10_28_26, SSI_SCK4),
PINMUX_IPSR_MSEL(IP10_28_26, GPS_SIGN_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP10_28_26, PWMFSW0_E, SEL_PWMFSW_4),
- PINMUX_IPSR_DATA(IP10_31_29, VI1_VSYNC),
- PINMUX_IPSR_DATA(IP10_31_29, AUDIO_CLKOUT_C),
- PINMUX_IPSR_DATA(IP10_31_29, SSI_WS4),
- PINMUX_IPSR_DATA(IP10_31_29, SIM_CLK),
+ PINMUX_IPSR_GPSR(IP10_31_29, VI1_VSYNC),
+ PINMUX_IPSR_GPSR(IP10_31_29, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_GPSR(IP10_31_29, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP10_31_29, SIM_CLK),
PINMUX_IPSR_MSEL(IP10_31_29, GPS_MAG_C, SEL_GPS_2),
- PINMUX_IPSR_DATA(IP10_31_29, SPV_TRST),
+ PINMUX_IPSR_GPSR(IP10_31_29, SPV_TRST),
PINMUX_IPSR_MSEL(IP10_31_29, SCL3, SEL_I2C3_0),
- PINMUX_IPSR_DATA(IP11_2_0, VI1_DATA0_VI1_B0),
+ PINMUX_IPSR_GPSR(IP11_2_0, VI1_DATA0_VI1_B0),
PINMUX_IPSR_MSEL(IP11_2_0, SD2_DAT0, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_2_0, SIM_RST),
- PINMUX_IPSR_DATA(IP11_2_0, SPV_TCK),
- PINMUX_IPSR_DATA(IP11_2_0, ADICLK_B),
- PINMUX_IPSR_DATA(IP11_5_3, VI1_DATA1_VI1_B1),
+ PINMUX_IPSR_GPSR(IP11_2_0, SIM_RST),
+ PINMUX_IPSR_GPSR(IP11_2_0, SPV_TCK),
+ PINMUX_IPSR_GPSR(IP11_2_0, ADICLK_B),
+ PINMUX_IPSR_GPSR(IP11_5_3, VI1_DATA1_VI1_B1),
PINMUX_IPSR_MSEL(IP11_5_3, SD2_DAT1, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_5_3, MT0_CLK),
- PINMUX_IPSR_DATA(IP11_5_3, SPV_TMS),
+ PINMUX_IPSR_GPSR(IP11_5_3, MT0_CLK),
+ PINMUX_IPSR_GPSR(IP11_5_3, SPV_TMS),
PINMUX_IPSR_MSEL(IP11_5_3, ADICS_B_SAMP_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP11_8_6, VI1_DATA2_VI1_B2),
+ PINMUX_IPSR_GPSR(IP11_8_6, VI1_DATA2_VI1_B2),
PINMUX_IPSR_MSEL(IP11_8_6, SD2_DAT2, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_8_6, MT0_D),
- PINMUX_IPSR_DATA(IP11_8_6, SPVTDI),
+ PINMUX_IPSR_GPSR(IP11_8_6, MT0_D),
+ PINMUX_IPSR_GPSR(IP11_8_6, SPVTDI),
PINMUX_IPSR_MSEL(IP11_8_6, ADIDATA_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP11_11_9, VI1_DATA3_VI1_B3),
+ PINMUX_IPSR_GPSR(IP11_11_9, VI1_DATA3_VI1_B3),
PINMUX_IPSR_MSEL(IP11_11_9, SD2_DAT3, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_11_9, MT0_BEN),
- PINMUX_IPSR_DATA(IP11_11_9, SPV_TDO),
- PINMUX_IPSR_DATA(IP11_11_9, ADICHS0_B),
- PINMUX_IPSR_DATA(IP11_14_12, VI1_DATA4_VI1_B4),
- PINMUX_IPSR_DATA(IP11_14_12, SD2_CLK),
- PINMUX_IPSR_DATA(IP11_14_12, MT0_PEN),
- PINMUX_IPSR_DATA(IP11_14_12, SPA_TRST),
+ PINMUX_IPSR_GPSR(IP11_11_9, MT0_BEN),
+ PINMUX_IPSR_GPSR(IP11_11_9, SPV_TDO),
+ PINMUX_IPSR_GPSR(IP11_11_9, ADICHS0_B),
+ PINMUX_IPSR_GPSR(IP11_14_12, VI1_DATA4_VI1_B4),
+ PINMUX_IPSR_GPSR(IP11_14_12, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP11_14_12, MT0_PEN),
+ PINMUX_IPSR_GPSR(IP11_14_12, SPA_TRST),
PINMUX_IPSR_MSEL(IP11_14_12, HSPI_CLK1_D, SEL_HSPI1_3),
- PINMUX_IPSR_DATA(IP11_14_12, ADICHS1_B),
- PINMUX_IPSR_DATA(IP11_17_15, VI1_DATA5_VI1_B5),
+ PINMUX_IPSR_GPSR(IP11_14_12, ADICHS1_B),
+ PINMUX_IPSR_GPSR(IP11_17_15, VI1_DATA5_VI1_B5),
PINMUX_IPSR_MSEL(IP11_17_15, SD2_CMD, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_17_15, MT0_SYNC),
- PINMUX_IPSR_DATA(IP11_17_15, SPA_TCK),
+ PINMUX_IPSR_GPSR(IP11_17_15, MT0_SYNC),
+ PINMUX_IPSR_GPSR(IP11_17_15, SPA_TCK),
PINMUX_IPSR_MSEL(IP11_17_15, HSPI_CS1_D, SEL_HSPI1_3),
- PINMUX_IPSR_DATA(IP11_17_15, ADICHS2_B),
- PINMUX_IPSR_DATA(IP11_20_18, VI1_DATA6_VI1_B6),
+ PINMUX_IPSR_GPSR(IP11_17_15, ADICHS2_B),
+ PINMUX_IPSR_GPSR(IP11_20_18, VI1_DATA6_VI1_B6),
PINMUX_IPSR_MSEL(IP11_20_18, SD2_CD, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_20_18, MT0_VCXO),
- PINMUX_IPSR_DATA(IP11_20_18, SPA_TMS),
- PINMUX_IPSR_DATA(IP11_20_18, HSPI_TX1_D),
- PINMUX_IPSR_DATA(IP11_23_21, VI1_DATA7_VI1_B7),
+ PINMUX_IPSR_GPSR(IP11_20_18, MT0_VCXO),
+ PINMUX_IPSR_GPSR(IP11_20_18, SPA_TMS),
+ PINMUX_IPSR_GPSR(IP11_20_18, HSPI_TX1_D),
+ PINMUX_IPSR_GPSR(IP11_23_21, VI1_DATA7_VI1_B7),
PINMUX_IPSR_MSEL(IP11_23_21, SD2_WP, SEL_SD2_0),
- PINMUX_IPSR_DATA(IP11_23_21, MT0_PWM),
- PINMUX_IPSR_DATA(IP11_23_21, SPA_TDI),
+ PINMUX_IPSR_GPSR(IP11_23_21, MT0_PWM),
+ PINMUX_IPSR_GPSR(IP11_23_21, SPA_TDI),
PINMUX_IPSR_MSEL(IP11_23_21, HSPI_RX1_D, SEL_HSPI1_3),
- PINMUX_IPSR_DATA(IP11_26_24, VI1_G0),
- PINMUX_IPSR_DATA(IP11_26_24, VI3_DATA0),
- PINMUX_IPSR_DATA(IP11_26_24, TS_SCK1),
+ PINMUX_IPSR_GPSR(IP11_26_24, VI1_G0),
+ PINMUX_IPSR_GPSR(IP11_26_24, VI3_DATA0),
+ PINMUX_IPSR_GPSR(IP11_26_24, TS_SCK1),
PINMUX_IPSR_MSEL(IP11_26_24, DREQ2_B, SEL_EXBUS2_1),
- PINMUX_IPSR_DATA(IP11_26_24, TX2),
- PINMUX_IPSR_DATA(IP11_26_24, SPA_TDO),
+ PINMUX_IPSR_GPSR(IP11_26_24, TX2),
+ PINMUX_IPSR_GPSR(IP11_26_24, SPA_TDO),
PINMUX_IPSR_MSEL(IP11_26_24, HCTS0_B, SEL_HSCIF0_1),
- PINMUX_IPSR_DATA(IP11_29_27, VI1_G1),
- PINMUX_IPSR_DATA(IP11_29_27, VI3_DATA1),
- PINMUX_IPSR_DATA(IP11_29_27, SSI_SCK1),
- PINMUX_IPSR_DATA(IP11_29_27, TS_SDEN1),
- PINMUX_IPSR_DATA(IP11_29_27, DACK2_B),
+ PINMUX_IPSR_GPSR(IP11_29_27, VI1_G1),
+ PINMUX_IPSR_GPSR(IP11_29_27, VI3_DATA1),
+ PINMUX_IPSR_GPSR(IP11_29_27, SSI_SCK1),
+ PINMUX_IPSR_GPSR(IP11_29_27, TS_SDEN1),
+ PINMUX_IPSR_GPSR(IP11_29_27, DACK2_B),
PINMUX_IPSR_MSEL(IP11_29_27, RX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP11_29_27, HRTS0_B, SEL_HSCIF0_1),
- PINMUX_IPSR_DATA(IP12_2_0, VI1_G2),
- PINMUX_IPSR_DATA(IP12_2_0, VI3_DATA2),
- PINMUX_IPSR_DATA(IP12_2_0, SSI_WS1),
- PINMUX_IPSR_DATA(IP12_2_0, TS_SPSYNC1),
+ PINMUX_IPSR_GPSR(IP12_2_0, VI1_G2),
+ PINMUX_IPSR_GPSR(IP12_2_0, VI3_DATA2),
+ PINMUX_IPSR_GPSR(IP12_2_0, SSI_WS1),
+ PINMUX_IPSR_GPSR(IP12_2_0, TS_SPSYNC1),
PINMUX_IPSR_MSEL(IP12_2_0, SCK2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP12_2_0, HSCK0_B, SEL_HSCIF0_1),
- PINMUX_IPSR_DATA(IP12_5_3, VI1_G3),
- PINMUX_IPSR_DATA(IP12_5_3, VI3_DATA3),
- PINMUX_IPSR_DATA(IP12_5_3, SSI_SCK2),
- PINMUX_IPSR_DATA(IP12_5_3, TS_SDAT1),
+ PINMUX_IPSR_GPSR(IP12_5_3, VI1_G3),
+ PINMUX_IPSR_GPSR(IP12_5_3, VI3_DATA3),
+ PINMUX_IPSR_GPSR(IP12_5_3, SSI_SCK2),
+ PINMUX_IPSR_GPSR(IP12_5_3, TS_SDAT1),
PINMUX_IPSR_MSEL(IP12_5_3, SCL1_C, SEL_I2C1_2),
- PINMUX_IPSR_DATA(IP12_5_3, HTX0_B),
- PINMUX_IPSR_DATA(IP12_8_6, VI1_G4),
- PINMUX_IPSR_DATA(IP12_8_6, VI3_DATA4),
- PINMUX_IPSR_DATA(IP12_8_6, SSI_WS2),
+ PINMUX_IPSR_GPSR(IP12_5_3, HTX0_B),
+ PINMUX_IPSR_GPSR(IP12_8_6, VI1_G4),
+ PINMUX_IPSR_GPSR(IP12_8_6, VI3_DATA4),
+ PINMUX_IPSR_GPSR(IP12_8_6, SSI_WS2),
PINMUX_IPSR_MSEL(IP12_8_6, SDA1_C, SEL_I2C1_2),
- PINMUX_IPSR_DATA(IP12_8_6, SIM_RST_B),
+ PINMUX_IPSR_GPSR(IP12_8_6, SIM_RST_B),
PINMUX_IPSR_MSEL(IP12_8_6, HRX0_B, SEL_HSCIF0_1),
- PINMUX_IPSR_DATA(IP12_11_9, VI1_G5),
- PINMUX_IPSR_DATA(IP12_11_9, VI3_DATA5),
+ PINMUX_IPSR_GPSR(IP12_11_9, VI1_G5),
+ PINMUX_IPSR_GPSR(IP12_11_9, VI3_DATA5),
PINMUX_IPSR_MSEL(IP12_11_9, GPS_CLK, SEL_GPS_0),
- PINMUX_IPSR_DATA(IP12_11_9, FSE),
- PINMUX_IPSR_DATA(IP12_11_9, TX4_B),
+ PINMUX_IPSR_GPSR(IP12_11_9, FSE),
+ PINMUX_IPSR_GPSR(IP12_11_9, TX4_B),
PINMUX_IPSR_MSEL(IP12_11_9, SIM_D_B, SEL_SIM_1),
- PINMUX_IPSR_DATA(IP12_14_12, VI1_G6),
- PINMUX_IPSR_DATA(IP12_14_12, VI3_DATA6),
+ PINMUX_IPSR_GPSR(IP12_14_12, VI1_G6),
+ PINMUX_IPSR_GPSR(IP12_14_12, VI3_DATA6),
PINMUX_IPSR_MSEL(IP12_14_12, GPS_SIGN, SEL_GPS_0),
- PINMUX_IPSR_DATA(IP12_14_12, FRB),
+ PINMUX_IPSR_GPSR(IP12_14_12, FRB),
PINMUX_IPSR_MSEL(IP12_14_12, RX4_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP12_14_12, SIM_CLK_B),
- PINMUX_IPSR_DATA(IP12_17_15, VI1_G7),
- PINMUX_IPSR_DATA(IP12_17_15, VI3_DATA7),
+ PINMUX_IPSR_GPSR(IP12_14_12, SIM_CLK_B),
+ PINMUX_IPSR_GPSR(IP12_17_15, VI1_G7),
+ PINMUX_IPSR_GPSR(IP12_17_15, VI3_DATA7),
PINMUX_IPSR_MSEL(IP12_17_15, GPS_MAG, SEL_GPS_0),
- PINMUX_IPSR_DATA(IP12_17_15, FCE),
+ PINMUX_IPSR_GPSR(IP12_17_15, FCE),
PINMUX_IPSR_MSEL(IP12_17_15, SCK4_B, SEL_SCIF4_1),
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index a8b629bc7a55..0f4d48f9400b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -799,47 +799,47 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(DU_DOTCLKIN0),
PINMUX_SINGLE(DU_DOTCLKIN2),
- PINMUX_IPSR_DATA(IP0_2_0, D0),
+ PINMUX_IPSR_GPSR(IP0_2_0, D0),
PINMUX_IPSR_MSEL(IP0_2_0, MSIOF3_SCK_B, SEL_SOF3_1),
PINMUX_IPSR_MSEL(IP0_2_0, VI3_DATA0, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_2_0, VI0_G4_B, SEL_VI0_1),
- PINMUX_IPSR_DATA(IP0_5_3, D1),
+ PINMUX_IPSR_GPSR(IP0_5_3, D1),
PINMUX_IPSR_MSEL(IP0_5_3, MSIOF3_SYNC_B, SEL_SOF3_1),
PINMUX_IPSR_MSEL(IP0_5_3, VI3_DATA1, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_5_3, VI0_G5_B, SEL_VI0_1),
- PINMUX_IPSR_DATA(IP0_8_6, D2),
+ PINMUX_IPSR_GPSR(IP0_8_6, D2),
PINMUX_IPSR_MSEL(IP0_8_6, MSIOF3_RXD_B, SEL_SOF3_1),
PINMUX_IPSR_MSEL(IP0_8_6, VI3_DATA2, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_8_6, VI0_G6_B, SEL_VI0_1),
- PINMUX_IPSR_DATA(IP0_11_9, D3),
+ PINMUX_IPSR_GPSR(IP0_11_9, D3),
PINMUX_IPSR_MSEL(IP0_11_9, MSIOF3_TXD_B, SEL_SOF3_1),
PINMUX_IPSR_MSEL(IP0_11_9, VI3_DATA3, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_11_9, VI0_G7_B, SEL_VI0_1),
- PINMUX_IPSR_DATA(IP0_15_12, D4),
+ PINMUX_IPSR_GPSR(IP0_15_12, D4),
PINMUX_IPSR_MSEL(IP0_15_12, SCIFB1_RXD_F, SEL_SCIFB1_5),
PINMUX_IPSR_MSEL(IP0_15_12, SCIFB0_RXD_C, SEL_SCIFB_2),
PINMUX_IPSR_MSEL(IP0_15_12, VI3_DATA4, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_15_12, VI0_R0_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP0_15_12, RX0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP0_19_16, D5),
+ PINMUX_IPSR_GPSR(IP0_19_16, D5),
PINMUX_IPSR_MSEL(IP0_19_16, SCIFB1_TXD_F, SEL_SCIFB1_5),
PINMUX_IPSR_MSEL(IP0_19_16, SCIFB0_TXD_C, SEL_SCIFB_2),
PINMUX_IPSR_MSEL(IP0_19_16, VI3_DATA5, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_19_16, VI0_R1_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP0_19_16, TX0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP0_22_20, D6),
+ PINMUX_IPSR_GPSR(IP0_22_20, D6),
PINMUX_IPSR_MSEL(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
PINMUX_IPSR_MSEL(IP0_22_20, VI3_DATA6, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_22_20, VI0_R2_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
- PINMUX_IPSR_DATA(IP0_26_23, D7),
+ PINMUX_IPSR_GPSR(IP0_26_23, D7),
PINMUX_IPSR_MSEL(IP0_26_23, AD_DI_B, SEL_ADI_1),
PINMUX_IPSR_MSEL(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
PINMUX_IPSR_MSEL(IP0_26_23, VI3_DATA7, SEL_VI3_0),
@@ -847,81 +847,81 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP0_26_23, VI0_R3_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
PINMUX_IPSR_MSEL(IP0_26_23, TCLK1, SEL_TMU1_0),
- PINMUX_IPSR_DATA(IP0_30_27, D8),
+ PINMUX_IPSR_GPSR(IP0_30_27, D8),
PINMUX_IPSR_MSEL(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
- PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0),
+ PINMUX_IPSR_GPSR(IP0_30_27, AVB_TXD0),
PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP0_30_27, VI0_G0_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_3_0, D9),
+ PINMUX_IPSR_GPSR(IP1_3_0, D9),
PINMUX_IPSR_MSEL(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
- PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP1_3_0, AVB_TXD1),
PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_3_0, VI0_G1_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_7_4, D10),
+ PINMUX_IPSR_GPSR(IP1_7_4, D10),
PINMUX_IPSR_MSEL(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
- PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2),
+ PINMUX_IPSR_GPSR(IP1_7_4, AVB_TXD2),
PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_7_4, VI0_G2_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_11_8, D11),
+ PINMUX_IPSR_GPSR(IP1_11_8, D11),
PINMUX_IPSR_MSEL(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
- PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3),
+ PINMUX_IPSR_GPSR(IP1_11_8, AVB_TXD3),
PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_11_8, VI0_G3_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_14_12, D12),
+ PINMUX_IPSR_GPSR(IP1_14_12, D12),
PINMUX_IPSR_MSEL(IP1_14_12, SCIFA1_RTS_N_C, SEL_SCIFA1_2),
- PINMUX_IPSR_DATA(IP1_14_12, AVB_TXD4),
+ PINMUX_IPSR_GPSR(IP1_14_12, AVB_TXD4),
PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_17_15, D13),
- PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5),
+ PINMUX_IPSR_GPSR(IP1_17_15, D13),
+ PINMUX_IPSR_GPSR(IP1_17_15, AVB_TXD5),
PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_21_18, D14),
+ PINMUX_IPSR_GPSR(IP1_21_18, D14),
PINMUX_IPSR_MSEL(IP1_21_18, SCIFB1_RXD_C, SEL_SCIFB1_2),
- PINMUX_IPSR_DATA(IP1_21_18, AVB_TXD6),
+ PINMUX_IPSR_GPSR(IP1_21_18, AVB_TXD6),
PINMUX_IPSR_MSEL(IP1_21_18, RX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_21_18, VI0_CLKENB_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_21_18, VI2_DATA6_VI2_B6, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_25_22, D15),
+ PINMUX_IPSR_GPSR(IP1_25_22, D15),
PINMUX_IPSR_MSEL(IP1_25_22, SCIFB1_TXD_C, SEL_SCIFB1_2),
- PINMUX_IPSR_DATA(IP1_25_22, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP1_25_22, AVB_TXD7),
PINMUX_IPSR_MSEL(IP1_25_22, TX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP1_25_22, VI0_FIELD_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP1_25_22, VI2_DATA7_VI2_B7, SEL_VI2_0),
- PINMUX_IPSR_DATA(IP1_27_26, A0),
- PINMUX_IPSR_DATA(IP1_27_26, PWM3),
- PINMUX_IPSR_DATA(IP1_29_28, A1),
- PINMUX_IPSR_DATA(IP1_29_28, PWM4),
+ PINMUX_IPSR_GPSR(IP1_27_26, A0),
+ PINMUX_IPSR_GPSR(IP1_27_26, PWM3),
+ PINMUX_IPSR_GPSR(IP1_29_28, A1),
+ PINMUX_IPSR_GPSR(IP1_29_28, PWM4),
- PINMUX_IPSR_DATA(IP2_2_0, A2),
- PINMUX_IPSR_DATA(IP2_2_0, PWM5),
+ PINMUX_IPSR_GPSR(IP2_2_0, A2),
+ PINMUX_IPSR_GPSR(IP2_2_0, PWM5),
PINMUX_IPSR_MSEL(IP2_2_0, MSIOF1_SS1_B, SEL_SOF1_1),
- PINMUX_IPSR_DATA(IP2_5_3, A3),
- PINMUX_IPSR_DATA(IP2_5_3, PWM6),
+ PINMUX_IPSR_GPSR(IP2_5_3, A3),
+ PINMUX_IPSR_GPSR(IP2_5_3, PWM6),
PINMUX_IPSR_MSEL(IP2_5_3, MSIOF1_SS2_B, SEL_SOF1_1),
- PINMUX_IPSR_DATA(IP2_8_6, A4),
+ PINMUX_IPSR_GPSR(IP2_8_6, A4),
PINMUX_IPSR_MSEL(IP2_8_6, MSIOF1_TXD_B, SEL_SOF1_1),
- PINMUX_IPSR_DATA(IP2_8_6, TPU0TO0),
- PINMUX_IPSR_DATA(IP2_11_9, A5),
+ PINMUX_IPSR_GPSR(IP2_8_6, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP2_11_9, A5),
PINMUX_IPSR_MSEL(IP2_11_9, SCIFA1_TXD_B, SEL_SCIFA1_1),
- PINMUX_IPSR_DATA(IP2_11_9, TPU0TO1),
- PINMUX_IPSR_DATA(IP2_14_12, A6),
+ PINMUX_IPSR_GPSR(IP2_11_9, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP2_14_12, A6),
PINMUX_IPSR_MSEL(IP2_14_12, SCIFA1_RTS_N_B, SEL_SCIFA1_1),
- PINMUX_IPSR_DATA(IP2_14_12, TPU0TO2),
- PINMUX_IPSR_DATA(IP2_17_15, A7),
+ PINMUX_IPSR_GPSR(IP2_14_12, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP2_17_15, A7),
PINMUX_IPSR_MSEL(IP2_17_15, SCIFA1_SCK_B, SEL_SCIFA1_1),
- PINMUX_IPSR_DATA(IP2_17_15, AUDIO_CLKOUT_B),
- PINMUX_IPSR_DATA(IP2_17_15, TPU0TO3),
- PINMUX_IPSR_DATA(IP2_21_18, A8),
+ PINMUX_IPSR_GPSR(IP2_17_15, AUDIO_CLKOUT_B),
+ PINMUX_IPSR_GPSR(IP2_17_15, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP2_21_18, A8),
PINMUX_IPSR_MSEL(IP2_21_18, SCIFA1_RXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP2_21_18, SSI_SCK5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP2_21_18, VI0_R4, SEL_VI0_0),
@@ -929,7 +929,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
PINMUX_IPSR_MSEL(IP2_21_18, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP2_25_22, A9),
+ PINMUX_IPSR_GPSR(IP2_25_22, A9),
PINMUX_IPSR_MSEL(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP2_25_22, SSI_WS5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP2_25_22, VI0_R5, SEL_VI0_0),
@@ -937,392 +937,392 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
PINMUX_IPSR_MSEL(IP2_25_22, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP2_28_26, A10),
+ PINMUX_IPSR_GPSR(IP2_28_26, A10),
PINMUX_IPSR_MSEL(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
- PINMUX_IPSR_DATA(IP2_28_26, MSIOF2_SYNC),
+ PINMUX_IPSR_GPSR(IP2_28_26, MSIOF2_SYNC),
PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP2_28_26, VI0_R6_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP2_28_26, VI2_DATA2_VI2_B2_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP3_3_0, A11),
+ PINMUX_IPSR_GPSR(IP3_3_0, A11),
PINMUX_IPSR_MSEL(IP3_3_0, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
- PINMUX_IPSR_DATA(IP3_3_0, MSIOF2_SCK),
+ PINMUX_IPSR_GPSR(IP3_3_0, MSIOF2_SCK),
PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP3_3_0, VI1_R0_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP3_3_0, VI2_G0),
+ PINMUX_IPSR_GPSR(IP3_3_0, VI2_G0),
PINMUX_IPSR_MSEL(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP3_7_4, A12),
+ PINMUX_IPSR_GPSR(IP3_7_4, A12),
PINMUX_IPSR_MSEL(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
- PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD),
+ PINMUX_IPSR_GPSR(IP3_7_4, MSIOF2_TXD),
PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP3_7_4, VI1_R1_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP3_7_4, VI2_G1),
+ PINMUX_IPSR_GPSR(IP3_7_4, VI2_G1),
PINMUX_IPSR_MSEL(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP3_11_8, A13),
+ PINMUX_IPSR_GPSR(IP3_11_8, A13),
PINMUX_IPSR_MSEL(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
- PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2),
- PINMUX_IPSR_DATA(IP3_11_8, MSIOF2_RXD),
+ PINMUX_IPSR_GPSR(IP3_11_8, EX_WAIT2),
+ PINMUX_IPSR_GPSR(IP3_11_8, MSIOF2_RXD),
PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP3_11_8, VI1_R2_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP3_11_8, VI2_G2),
+ PINMUX_IPSR_GPSR(IP3_11_8, VI2_G2),
PINMUX_IPSR_MSEL(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP3_14_12, A14),
+ PINMUX_IPSR_GPSR(IP3_14_12, A14),
PINMUX_IPSR_MSEL(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
- PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N),
- PINMUX_IPSR_DATA(IP3_14_12, MSIOF2_SS1),
- PINMUX_IPSR_DATA(IP3_17_15, A15),
+ PINMUX_IPSR_GPSR(IP3_14_12, ATACS11_N),
+ PINMUX_IPSR_GPSR(IP3_14_12, MSIOF2_SS1),
+ PINMUX_IPSR_GPSR(IP3_17_15, A15),
PINMUX_IPSR_MSEL(IP3_17_15, SCIFB2_SCK_B, SEL_SCIFB2_1),
- PINMUX_IPSR_DATA(IP3_17_15, ATARD1_N),
- PINMUX_IPSR_DATA(IP3_17_15, MSIOF2_SS2),
- PINMUX_IPSR_DATA(IP3_19_18, A16),
- PINMUX_IPSR_DATA(IP3_19_18, ATAWR1_N),
- PINMUX_IPSR_DATA(IP3_22_20, A17),
+ PINMUX_IPSR_GPSR(IP3_17_15, ATARD1_N),
+ PINMUX_IPSR_GPSR(IP3_17_15, MSIOF2_SS2),
+ PINMUX_IPSR_GPSR(IP3_19_18, A16),
+ PINMUX_IPSR_GPSR(IP3_19_18, ATAWR1_N),
+ PINMUX_IPSR_GPSR(IP3_22_20, A17),
PINMUX_IPSR_MSEL(IP3_22_20, AD_DO_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP3_22_20, ATADIR1_N),
- PINMUX_IPSR_DATA(IP3_25_23, A18),
+ PINMUX_IPSR_GPSR(IP3_22_20, ATADIR1_N),
+ PINMUX_IPSR_GPSR(IP3_25_23, A18),
PINMUX_IPSR_MSEL(IP3_25_23, AD_CLK_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP3_25_23, ATAG1_N),
- PINMUX_IPSR_DATA(IP3_28_26, A19),
+ PINMUX_IPSR_GPSR(IP3_25_23, ATAG1_N),
+ PINMUX_IPSR_GPSR(IP3_28_26, A19),
PINMUX_IPSR_MSEL(IP3_28_26, AD_NCS_N_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP3_28_26, ATACS01_N),
+ PINMUX_IPSR_GPSR(IP3_28_26, ATACS01_N),
PINMUX_IPSR_MSEL(IP3_28_26, EX_WAIT0_B, SEL_LBS_1),
- PINMUX_IPSR_DATA(IP3_31_29, A20),
- PINMUX_IPSR_DATA(IP3_31_29, SPCLK),
+ PINMUX_IPSR_GPSR(IP3_31_29, A20),
+ PINMUX_IPSR_GPSR(IP3_31_29, SPCLK),
PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP3_31_29, VI1_R3_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP3_31_29, VI2_G4),
+ PINMUX_IPSR_GPSR(IP3_31_29, VI2_G4),
- PINMUX_IPSR_DATA(IP4_2_0, A21),
- PINMUX_IPSR_DATA(IP4_2_0, MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP4_2_0, A21),
+ PINMUX_IPSR_GPSR(IP4_2_0, MOSI_IO0),
PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_2_0, VI1_R4_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_2_0, VI2_G5),
- PINMUX_IPSR_DATA(IP4_5_3, A22),
- PINMUX_IPSR_DATA(IP4_5_3, MISO_IO1),
+ PINMUX_IPSR_GPSR(IP4_2_0, VI2_G5),
+ PINMUX_IPSR_GPSR(IP4_5_3, A22),
+ PINMUX_IPSR_GPSR(IP4_5_3, MISO_IO1),
PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_5_3, VI1_R5_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_5_3, VI2_G6),
- PINMUX_IPSR_DATA(IP4_8_6, A23),
- PINMUX_IPSR_DATA(IP4_8_6, IO2),
+ PINMUX_IPSR_GPSR(IP4_5_3, VI2_G6),
+ PINMUX_IPSR_GPSR(IP4_8_6, A23),
+ PINMUX_IPSR_GPSR(IP4_8_6, IO2),
PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_8_6, VI1_G7_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_8_6, VI2_G7),
- PINMUX_IPSR_DATA(IP4_11_9, A24),
- PINMUX_IPSR_DATA(IP4_11_9, IO3),
+ PINMUX_IPSR_GPSR(IP4_8_6, VI2_G7),
+ PINMUX_IPSR_GPSR(IP4_11_9, A24),
+ PINMUX_IPSR_GPSR(IP4_11_9, IO3),
PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_11_9, VI1_R7_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB, SEL_VI2_0),
PINMUX_IPSR_MSEL(IP4_11_9, VI2_CLKENB_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP4_14_12, A25),
- PINMUX_IPSR_DATA(IP4_14_12, SSL),
+ PINMUX_IPSR_GPSR(IP4_14_12, A25),
+ PINMUX_IPSR_GPSR(IP4_14_12, SSL),
PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_14_12, VI1_G6_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD, SEL_VI2_0),
PINMUX_IPSR_MSEL(IP4_14_12, VI2_FIELD_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP4_17_15, CS0_N),
+ PINMUX_IPSR_GPSR(IP4_17_15, CS0_N),
PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_17_15, VI1_R6_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_17_15, VI2_G3),
+ PINMUX_IPSR_GPSR(IP4_17_15, VI2_G3),
PINMUX_IPSR_MSEL(IP4_17_15, MSIOF0_SS2_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP4_20_18, CS1_N_A26),
- PINMUX_IPSR_DATA(IP4_20_18, SPEEDIN),
+ PINMUX_IPSR_GPSR(IP4_20_18, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP4_20_18, SPEEDIN),
PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7, SEL_VI0_0),
PINMUX_IPSR_MSEL(IP4_20_18, VI0_R7_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK, SEL_VI2_0),
PINMUX_IPSR_MSEL(IP4_20_18, VI2_CLK_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP4_23_21, EX_CS0_N),
+ PINMUX_IPSR_GPSR(IP4_23_21, EX_CS0_N),
PINMUX_IPSR_MSEL(IP4_23_21, HRX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_23_21, VI1_G5_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_23_21, VI2_R0),
+ PINMUX_IPSR_GPSR(IP4_23_21, VI2_R0),
PINMUX_IPSR_MSEL(IP4_23_21, HTX0_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP4_23_21, MSIOF0_SS1_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP4_26_24, EX_CS1_N),
- PINMUX_IPSR_DATA(IP4_26_24, GPS_CLK),
+ PINMUX_IPSR_GPSR(IP4_26_24, EX_CS1_N),
+ PINMUX_IPSR_GPSR(IP4_26_24, GPS_CLK),
PINMUX_IPSR_MSEL(IP4_26_24, HCTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_26_24, VI1_FIELD_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_26_24, VI2_R1),
- PINMUX_IPSR_DATA(IP4_29_27, EX_CS2_N),
- PINMUX_IPSR_DATA(IP4_29_27, GPS_SIGN),
+ PINMUX_IPSR_GPSR(IP4_26_24, VI2_R1),
+ PINMUX_IPSR_GPSR(IP4_29_27, EX_CS2_N),
+ PINMUX_IPSR_GPSR(IP4_29_27, GPS_SIGN),
PINMUX_IPSR_MSEL(IP4_29_27, HRTS1_N_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP4_29_27, VI3_CLKENB),
+ PINMUX_IPSR_GPSR(IP4_29_27, VI3_CLKENB),
PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP4_29_27, VI1_G0_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP4_29_27, VI2_R2),
+ PINMUX_IPSR_GPSR(IP4_29_27, VI2_R2),
- PINMUX_IPSR_DATA(IP5_2_0, EX_CS3_N),
- PINMUX_IPSR_DATA(IP5_2_0, GPS_MAG),
- PINMUX_IPSR_DATA(IP5_2_0, VI3_FIELD),
+ PINMUX_IPSR_GPSR(IP5_2_0, EX_CS3_N),
+ PINMUX_IPSR_GPSR(IP5_2_0, GPS_MAG),
+ PINMUX_IPSR_GPSR(IP5_2_0, VI3_FIELD),
PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP5_2_0, VI1_G1_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP5_2_0, VI2_R3),
- PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N),
+ PINMUX_IPSR_GPSR(IP5_2_0, VI2_R3),
+ PINMUX_IPSR_GPSR(IP5_5_3, EX_CS4_N),
PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
- PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP5_5_3, VI3_HSYNC_N),
PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
PINMUX_IPSR_MSEL(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
PINMUX_IPSR_MSEL(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N),
+ PINMUX_IPSR_GPSR(IP5_5_3, INTC_EN0_N),
PINMUX_IPSR_MSEL(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
- PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N),
+ PINMUX_IPSR_GPSR(IP5_9_6, EX_CS5_N),
PINMUX_IPSR_MSEL(IP5_9_6, CAN0_RX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
- PINMUX_IPSR_DATA(IP5_9_6, VI3_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP5_9_6, VI3_VSYNC_N),
PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP5_9_6, VI1_G2_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP5_9_6, VI2_R4),
+ PINMUX_IPSR_GPSR(IP5_9_6, VI2_R4),
PINMUX_IPSR_MSEL(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
- PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N),
+ PINMUX_IPSR_GPSR(IP5_9_6, INTC_EN1_N),
PINMUX_IPSR_MSEL(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
- PINMUX_IPSR_DATA(IP5_12_10, BS_N),
+ PINMUX_IPSR_GPSR(IP5_12_10, BS_N),
PINMUX_IPSR_MSEL(IP5_12_10, IETX, SEL_IEB_0),
PINMUX_IPSR_MSEL(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP5_12_10, CAN1_TX, SEL_CAN1_0),
- PINMUX_IPSR_DATA(IP5_12_10, DRACK0),
+ PINMUX_IPSR_GPSR(IP5_12_10, DRACK0),
PINMUX_IPSR_MSEL(IP5_12_10, IETX_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP5_14_13, RD_N),
+ PINMUX_IPSR_GPSR(IP5_14_13, RD_N),
PINMUX_IPSR_MSEL(IP5_14_13, CAN0_TX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP5_14_13, SCIFA0_SCK_B, SEL_SCFA_1),
- PINMUX_IPSR_DATA(IP5_17_15, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP5_17_15, RD_WR_N),
PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP5_17_15, VI1_G3_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP5_17_15, VI2_R5),
+ PINMUX_IPSR_GPSR(IP5_17_15, VI2_R5),
PINMUX_IPSR_MSEL(IP5_17_15, SCIFA0_RXD_B, SEL_SCFA_1),
- PINMUX_IPSR_DATA(IP5_17_15, INTC_IRQ4_N),
- PINMUX_IPSR_DATA(IP5_20_18, WE0_N),
+ PINMUX_IPSR_GPSR(IP5_17_15, INTC_IRQ4_N),
+ PINMUX_IPSR_GPSR(IP5_20_18, WE0_N),
PINMUX_IPSR_MSEL(IP5_20_18, IECLK, SEL_IEB_0),
PINMUX_IPSR_MSEL(IP5_20_18, CAN_CLK, SEL_CANCLK_0),
PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N, SEL_VI2_0),
PINMUX_IPSR_MSEL(IP5_20_18, SCIFA0_TXD_B, SEL_SCFA_1),
PINMUX_IPSR_MSEL(IP5_20_18, VI2_VSYNC_N_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP5_23_21, WE1_N),
+ PINMUX_IPSR_GPSR(IP5_23_21, WE1_N),
PINMUX_IPSR_MSEL(IP5_23_21, IERX, SEL_IEB_0),
PINMUX_IPSR_MSEL(IP5_23_21, CAN1_RX, SEL_CAN1_0),
PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP5_23_21, VI1_G4_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP5_23_21, VI2_R6),
+ PINMUX_IPSR_GPSR(IP5_23_21, VI2_R6),
PINMUX_IPSR_MSEL(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
PINMUX_IPSR_MSEL(IP5_23_21, IERX_C, SEL_IEB_2),
PINMUX_IPSR_MSEL(IP5_26_24, EX_WAIT0, SEL_LBS_0),
- PINMUX_IPSR_DATA(IP5_26_24, IRQ3),
- PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N),
+ PINMUX_IPSR_GPSR(IP5_26_24, IRQ3),
+ PINMUX_IPSR_GPSR(IP5_26_24, INTC_IRQ3_N),
PINMUX_IPSR_MSEL(IP5_26_24, VI3_CLK, SEL_VI3_0),
PINMUX_IPSR_MSEL(IP5_26_24, SCIFA0_RTS_N_B, SEL_SCFA_1),
PINMUX_IPSR_MSEL(IP5_26_24, HRX0_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP5_26_24, MSIOF0_SCK_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP5_29_27, DREQ0_N),
+ PINMUX_IPSR_GPSR(IP5_29_27, DREQ0_N),
PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP5_29_27, VI1_HSYNC_N_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP5_29_27, VI2_R7),
+ PINMUX_IPSR_GPSR(IP5_29_27, VI2_R7),
PINMUX_IPSR_MSEL(IP5_29_27, SSI_SCK78_C, SEL_SSI7_2),
PINMUX_IPSR_MSEL(IP5_29_27, SSI_WS78_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP6_2_0, DACK0),
- PINMUX_IPSR_DATA(IP6_2_0, IRQ0),
- PINMUX_IPSR_DATA(IP6_2_0, INTC_IRQ0_N),
+ PINMUX_IPSR_GPSR(IP6_2_0, DACK0),
+ PINMUX_IPSR_GPSR(IP6_2_0, IRQ0),
+ PINMUX_IPSR_GPSR(IP6_2_0, INTC_IRQ0_N),
PINMUX_IPSR_MSEL(IP6_2_0, SSI_SCK6_B, SEL_SSI6_1),
PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP6_2_0, VI1_VSYNC_N_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP6_2_0, SSI_WS78_C, SEL_SSI7_2),
- PINMUX_IPSR_DATA(IP6_5_3, DREQ1_N),
+ PINMUX_IPSR_GPSR(IP6_5_3, DREQ1_N),
PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP6_5_3, VI1_CLKENB_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP6_5_3, SSI_SDATA7_C, SEL_SSI7_2),
PINMUX_IPSR_MSEL(IP6_5_3, SSI_SCK78_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP6_8_6, DACK1),
- PINMUX_IPSR_DATA(IP6_8_6, IRQ1),
- PINMUX_IPSR_DATA(IP6_8_6, INTC_IRQ1_N),
+ PINMUX_IPSR_GPSR(IP6_8_6, DACK1),
+ PINMUX_IPSR_GPSR(IP6_8_6, IRQ1),
+ PINMUX_IPSR_GPSR(IP6_8_6, INTC_IRQ1_N),
PINMUX_IPSR_MSEL(IP6_8_6, SSI_WS6_B, SEL_SSI6_1),
PINMUX_IPSR_MSEL(IP6_8_6, SSI_SDATA8_C, SEL_SSI8_2),
- PINMUX_IPSR_DATA(IP6_10_9, DREQ2_N),
+ PINMUX_IPSR_GPSR(IP6_10_9, DREQ2_N),
PINMUX_IPSR_MSEL(IP6_10_9, HSCK1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP6_10_9, HCTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP6_10_9, MSIOF0_TXD_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP6_13_11, DACK2),
- PINMUX_IPSR_DATA(IP6_13_11, IRQ2),
- PINMUX_IPSR_DATA(IP6_13_11, INTC_IRQ2_N),
+ PINMUX_IPSR_GPSR(IP6_13_11, DACK2),
+ PINMUX_IPSR_GPSR(IP6_13_11, IRQ2),
+ PINMUX_IPSR_GPSR(IP6_13_11, INTC_IRQ2_N),
PINMUX_IPSR_MSEL(IP6_13_11, SSI_SDATA6_B, SEL_SSI6_1),
PINMUX_IPSR_MSEL(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP6_16_14, ETH_CRS_DV),
PINMUX_IPSR_MSEL(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
PINMUX_IPSR_MSEL(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
- PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP6_19_17, ETH_RX_ER),
PINMUX_IPSR_MSEL(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
PINMUX_IPSR_MSEL(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
- PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP6_22_20, ETH_RXD0),
PINMUX_IPSR_MSEL(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP6_22_20, GLO_I0_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
PINMUX_IPSR_MSEL(IP6_22_20, SCK1_E, SEL_SCIF1_4),
- PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP6_25_23, ETH_RXD1),
PINMUX_IPSR_MSEL(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
PINMUX_IPSR_MSEL(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP6_25_23, GLO_I1_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
PINMUX_IPSR_MSEL(IP6_25_23, RX1_E, SEL_SCIF1_4),
- PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK),
+ PINMUX_IPSR_GPSR(IP6_28_26, ETH_LINK),
PINMUX_IPSR_MSEL(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
PINMUX_IPSR_MSEL(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
PINMUX_IPSR_MSEL(IP6_28_26, TX1_E, SEL_SCIF1_4),
- PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK),
+ PINMUX_IPSR_GPSR(IP6_31_29, ETH_REF_CLK),
PINMUX_IPSR_MSEL(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
PINMUX_IPSR_MSEL(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
- PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP7_2_0, ETH_MDIO),
PINMUX_IPSR_MSEL(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
PINMUX_IPSR_MSEL(IP7_2_0, SIM0_D_C, SEL_SIM_2),
PINMUX_IPSR_MSEL(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
- PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP7_5_3, ETH_TXD1),
PINMUX_IPSR_MSEL(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
PINMUX_IPSR_MSEL(IP7_5_3, BPFCLK_G, SEL_FM_6),
- PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP7_7_6, ETH_TX_EN),
PINMUX_IPSR_MSEL(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
PINMUX_IPSR_MSEL(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
- PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP7_9_8, ETH_MAGIC),
PINMUX_IPSR_MSEL(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
- PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP7_12_10, ETH_TXD0),
PINMUX_IPSR_MSEL(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
- PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP7_15_13, ETH_MDC),
PINMUX_IPSR_MSEL(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
- PINMUX_IPSR_DATA(IP7_18_16, PWM0),
+ PINMUX_IPSR_GPSR(IP7_18_16, PWM0),
PINMUX_IPSR_MSEL(IP7_18_16, SCIFA2_SCK_C, SEL_SCIFA2_2),
PINMUX_IPSR_MSEL(IP7_18_16, STP_ISEN_1_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP7_18_16, TS_SDAT1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP7_18_16, GLO_SS_C, SEL_GPS_2),
- PINMUX_IPSR_DATA(IP7_21_19, PWM1),
+ PINMUX_IPSR_GPSR(IP7_21_19, PWM1),
PINMUX_IPSR_MSEL(IP7_21_19, SCIFA2_TXD_C, SEL_SCIFA2_2),
PINMUX_IPSR_MSEL(IP7_21_19, STP_ISSYNC_1_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP7_21_19, TS_SCK1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP7_21_19, GLO_RFON_C, SEL_GPS_2),
- PINMUX_IPSR_DATA(IP7_21_19, PCMOE_N),
- PINMUX_IPSR_DATA(IP7_24_22, PWM2),
- PINMUX_IPSR_DATA(IP7_24_22, PWMFSW0),
+ PINMUX_IPSR_GPSR(IP7_21_19, PCMOE_N),
+ PINMUX_IPSR_GPSR(IP7_24_22, PWM2),
+ PINMUX_IPSR_GPSR(IP7_24_22, PWMFSW0),
PINMUX_IPSR_MSEL(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
- PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N),
+ PINMUX_IPSR_GPSR(IP7_24_22, PCMWE_N),
PINMUX_IPSR_MSEL(IP7_24_22, IECLK_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1),
- PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC),
- PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C),
+ PINMUX_IPSR_GPSR(IP7_26_25, DU_DOTCLKIN1),
+ PINMUX_IPSR_GPSR(IP7_26_25, AUDIO_CLKC),
+ PINMUX_IPSR_GPSR(IP7_26_25, AUDIO_CLKOUT_C),
PINMUX_IPSR_MSEL(IP7_28_27, VI0_CLK, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N),
- PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP7_28_27, ATACS00_N),
+ PINMUX_IPSR_GPSR(IP7_28_27, AVB_RXD1),
PINMUX_IPSR_MSEL(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N),
- PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP7_30_29, ATACS10_N),
+ PINMUX_IPSR_GPSR(IP7_30_29, AVB_RXD2),
PINMUX_IPSR_MSEL(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N),
- PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP8_1_0, ATARD0_N),
+ PINMUX_IPSR_GPSR(IP8_1_0, AVB_RXD3),
PINMUX_IPSR_MSEL(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N),
- PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP8_3_2, ATAWR0_N),
+ PINMUX_IPSR_GPSR(IP8_3_2, AVB_RXD4),
PINMUX_IPSR_MSEL(IP8_5_4, VI0_DATA3_VI0_B3, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_5_4, ATADIR0_N),
- PINMUX_IPSR_DATA(IP8_5_4, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP8_5_4, ATADIR0_N),
+ PINMUX_IPSR_GPSR(IP8_5_4, AVB_RXD5),
PINMUX_IPSR_MSEL(IP8_7_6, VI0_DATA4_VI0_B4, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_7_6, ATAG0_N),
- PINMUX_IPSR_DATA(IP8_7_6, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP8_7_6, ATAG0_N),
+ PINMUX_IPSR_GPSR(IP8_7_6, AVB_RXD6),
PINMUX_IPSR_MSEL(IP8_9_8, VI0_DATA5_VI0_B5, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_9_8, EX_WAIT1),
- PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP8_9_8, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP8_9_8, AVB_RXD7),
PINMUX_IPSR_MSEL(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP8_11_10, AVB_RX_ER),
PINMUX_IPSR_MSEL(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
- PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP8_13_12, AVB_RX_CLK),
PINMUX_IPSR_MSEL(IP8_15_14, VI1_CLK, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP8_15_14, AVB_RX_DV),
PINMUX_IPSR_MSEL(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
- PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP8_17_16, AVB_CRS),
PINMUX_IPSR_MSEL(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
- PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP8_19_18, AVB_MDC),
PINMUX_IPSR_MSEL(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
- PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO),
+ PINMUX_IPSR_GPSR(IP8_21_20, AVB_MDIO),
PINMUX_IPSR_MSEL(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
- PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK),
+ PINMUX_IPSR_GPSR(IP8_23_22, AVB_GTX_CLK),
PINMUX_IPSR_MSEL(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
PINMUX_IPSR_MSEL(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
- PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP8_25_24, AVB_MAGIC),
PINMUX_IPSR_MSEL(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP8_26, AVB_PHY_INT),
PINMUX_IPSR_MSEL(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK),
- PINMUX_IPSR_DATA(IP8_28, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP8_27, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP8_28, SD0_CLK),
PINMUX_IPSR_MSEL(IP8_28, VI1_DATA0_VI1_B0_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP8_30_29, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP8_30_29, SD0_CMD),
PINMUX_IPSR_MSEL(IP8_30_29, SCIFB1_SCK_B, SEL_SCIFB1_1),
PINMUX_IPSR_MSEL(IP8_30_29, VI1_DATA1_VI1_B1_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP9_1_0, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP9_1_0, SD0_DAT0),
PINMUX_IPSR_MSEL(IP9_1_0, SCIFB1_RXD_B, SEL_SCIFB1_1),
PINMUX_IPSR_MSEL(IP9_1_0, VI1_DATA2_VI1_B2_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP9_3_2, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP9_3_2, SD0_DAT1),
PINMUX_IPSR_MSEL(IP9_3_2, SCIFB1_TXD_B, SEL_SCIFB1_1),
PINMUX_IPSR_MSEL(IP9_3_2, VI1_DATA3_VI1_B3_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP9_5_4, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP9_5_4, SD0_DAT2),
PINMUX_IPSR_MSEL(IP9_5_4, SCIFB1_CTS_N_B, SEL_SCIFB1_1),
PINMUX_IPSR_MSEL(IP9_5_4, VI1_DATA4_VI1_B4_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP9_7_6, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP9_7_6, SD0_DAT3),
PINMUX_IPSR_MSEL(IP9_7_6, SCIFB1_RTS_N_B, SEL_SCIFB1_1),
PINMUX_IPSR_MSEL(IP9_7_6, VI1_DATA5_VI1_B5_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP9_11_8, SD0_CD),
- PINMUX_IPSR_DATA(IP9_11_8, MMC0_D6),
+ PINMUX_IPSR_GPSR(IP9_11_8, SD0_CD),
+ PINMUX_IPSR_GPSR(IP9_11_8, MMC0_D6),
PINMUX_IPSR_MSEL(IP9_11_8, TS_SDEN0_B, SEL_TSIF0_1),
- PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP),
+ PINMUX_IPSR_GPSR(IP9_11_8, USB0_EXTP),
PINMUX_IPSR_MSEL(IP9_11_8, GLO_SCLK, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
PINMUX_IPSR_MSEL(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP9_15_12, SD0_WP),
- PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7),
+ PINMUX_IPSR_GPSR(IP9_15_12, SD0_WP),
+ PINMUX_IPSR_GPSR(IP9_15_12, MMC0_D7),
PINMUX_IPSR_MSEL(IP9_15_12, TS_SPSYNC0_B, SEL_TSIF0_1),
- PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN),
+ PINMUX_IPSR_GPSR(IP9_15_12, USB0_IDIN),
PINMUX_IPSR_MSEL(IP9_15_12, GLO_SDATA, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
PINMUX_IPSR_MSEL(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
PINMUX_IPSR_MSEL(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
- PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK),
- PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN),
- PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD),
- PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP9_17_16, SD1_CLK),
+ PINMUX_IPSR_GPSR(IP9_17_16, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP9_19_18, SD1_CMD),
+ PINMUX_IPSR_GPSR(IP9_19_18, AVB_TX_ER),
PINMUX_IPSR_MSEL(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0),
- PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP9_21_20, SD1_DAT0),
+ PINMUX_IPSR_GPSR(IP9_21_20, AVB_TX_CLK),
PINMUX_IPSR_MSEL(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1),
- PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP9_23_22, SD1_DAT1),
+ PINMUX_IPSR_GPSR(IP9_23_22, AVB_LINK),
PINMUX_IPSR_MSEL(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2),
- PINMUX_IPSR_DATA(IP9_25_24, AVB_COL),
+ PINMUX_IPSR_GPSR(IP9_25_24, SD1_DAT2),
+ PINMUX_IPSR_GPSR(IP9_25_24, AVB_COL),
PINMUX_IPSR_MSEL(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3),
- PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP9_27_26, SD1_DAT3),
+ PINMUX_IPSR_GPSR(IP9_27_26, AVB_RXD0),
PINMUX_IPSR_MSEL(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP9_31_28, SD1_CD),
- PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6),
+ PINMUX_IPSR_GPSR(IP9_31_28, SD1_CD),
+ PINMUX_IPSR_GPSR(IP9_31_28, MMC1_D6),
PINMUX_IPSR_MSEL(IP9_31_28, TS_SDEN1, SEL_TSIF1_0),
- PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP),
+ PINMUX_IPSR_GPSR(IP9_31_28, USB1_EXTP),
PINMUX_IPSR_MSEL(IP9_31_28, GLO_SS, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
@@ -1330,24 +1330,24 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
PINMUX_IPSR_MSEL(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_3_0, SD1_WP),
- PINMUX_IPSR_DATA(IP10_3_0, MMC1_D7),
+ PINMUX_IPSR_GPSR(IP10_3_0, SD1_WP),
+ PINMUX_IPSR_GPSR(IP10_3_0, MMC1_D7),
PINMUX_IPSR_MSEL(IP10_3_0, TS_SPSYNC1, SEL_TSIF1_0),
- PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN),
+ PINMUX_IPSR_GPSR(IP10_3_0, USB1_IDIN),
PINMUX_IPSR_MSEL(IP10_3_0, GLO_RFON, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
PINMUX_IPSR_MSEL(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
PINMUX_IPSR_MSEL(IP10_3_0, SIM0_D_B, SEL_SIM_1),
- PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK),
- PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK),
+ PINMUX_IPSR_GPSR(IP10_6_4, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP10_6_4, MMC0_CLK),
PINMUX_IPSR_MSEL(IP10_6_4, SIM0_CLK, SEL_SIM_0),
PINMUX_IPSR_MSEL(IP10_6_4, VI0_DATA0_VI0_B0_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_6_4, TS_SDEN0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP10_6_4, GLO_SCLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_6_4, VI3_DATA0_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_10_7, SD2_CMD),
- PINMUX_IPSR_DATA(IP10_10_7, MMC0_CMD),
+ PINMUX_IPSR_GPSR(IP10_10_7, SD2_CMD),
+ PINMUX_IPSR_GPSR(IP10_10_7, MMC0_CMD),
PINMUX_IPSR_MSEL(IP10_10_7, SIM0_D, SEL_SIM_0),
PINMUX_IPSR_MSEL(IP10_10_7, VI0_DATA1_VI0_B1_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_10_7, SCIFB1_SCK_E, SEL_SCIFB1_4),
@@ -1355,8 +1355,8 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_10_7, TS_SPSYNC0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP10_10_7, GLO_SDATA_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_10_7, VI3_DATA1_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_14_11, SD2_DAT0),
- PINMUX_IPSR_DATA(IP10_14_11, MMC0_D0),
+ PINMUX_IPSR_GPSR(IP10_14_11, SD2_DAT0),
+ PINMUX_IPSR_GPSR(IP10_14_11, MMC0_D0),
PINMUX_IPSR_MSEL(IP10_14_11, FMCLK_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP10_14_11, VI0_DATA2_VI0_B2_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_14_11, SCIFB1_RXD_E, SEL_SCIFB1_4),
@@ -1364,8 +1364,8 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_14_11, TS_SDAT0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP10_14_11, GLO_SS_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_14_11, VI3_DATA2_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1),
- PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1),
+ PINMUX_IPSR_GPSR(IP10_18_15, SD2_DAT1),
+ PINMUX_IPSR_GPSR(IP10_18_15, MMC0_D1),
PINMUX_IPSR_MSEL(IP10_18_15, FMIN_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
@@ -1373,26 +1373,26 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_18_15, TS_SCK0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP10_18_15, GLO_RFON_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_18_15, VI3_DATA3_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2),
- PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2),
+ PINMUX_IPSR_GPSR(IP10_22_19, SD2_DAT2),
+ PINMUX_IPSR_GPSR(IP10_22_19, MMC0_D2),
PINMUX_IPSR_MSEL(IP10_22_19, BPFCLK_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
PINMUX_IPSR_MSEL(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP10_22_19, GLO_Q0_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_22_19, VI3_DATA4_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_25_23, SD2_DAT3),
- PINMUX_IPSR_DATA(IP10_25_23, MMC0_D3),
+ PINMUX_IPSR_GPSR(IP10_25_23, SD2_DAT3),
+ PINMUX_IPSR_GPSR(IP10_25_23, MMC0_D3),
PINMUX_IPSR_MSEL(IP10_25_23, SIM0_RST, SEL_SIM_0),
PINMUX_IPSR_MSEL(IP10_25_23, VI0_DATA5_VI0_B5_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_25_23, HTX0_D, SEL_HSCIF0_3),
PINMUX_IPSR_MSEL(IP10_25_23, TS_SPSYNC1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP10_25_23, GLO_Q1_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_25_23, VI3_DATA5_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP10_29_26, SD2_CD),
- PINMUX_IPSR_DATA(IP10_29_26, MMC0_D4),
+ PINMUX_IPSR_GPSR(IP10_29_26, SD2_CD),
+ PINMUX_IPSR_GPSR(IP10_29_26, MMC0_D4),
PINMUX_IPSR_MSEL(IP10_29_26, TS_SDAT0_B, SEL_TSIF0_1),
- PINMUX_IPSR_DATA(IP10_29_26, USB2_EXTP),
+ PINMUX_IPSR_GPSR(IP10_29_26, USB2_EXTP),
PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP10_29_26, VI0_DATA6_VI0_B6_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP10_29_26, HCTS0_N_D, SEL_HSCIF0_3),
@@ -1400,164 +1400,164 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_29_26, GLO_I0_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_29_26, VI3_DATA6_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP11_3_0, SD2_WP),
- PINMUX_IPSR_DATA(IP11_3_0, MMC0_D5),
+ PINMUX_IPSR_GPSR(IP11_3_0, SD2_WP),
+ PINMUX_IPSR_GPSR(IP11_3_0, MMC0_D5),
PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK0_B, SEL_TSIF0_1),
- PINMUX_IPSR_DATA(IP11_3_0, USB2_IDIN),
+ PINMUX_IPSR_GPSR(IP11_3_0, USB2_IDIN),
PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP11_3_0, VI0_DATA7_VI0_B7_B, SEL_VI0_1),
PINMUX_IPSR_MSEL(IP11_3_0, HRTS0_N_D, SEL_HSCIF0_3),
PINMUX_IPSR_MSEL(IP11_3_0, TS_SCK1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP11_3_0, GLO_I1_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_3_0, VI3_DATA7_B, SEL_VI3_1),
- PINMUX_IPSR_DATA(IP11_4, SD3_CLK),
- PINMUX_IPSR_DATA(IP11_4, MMC1_CLK),
- PINMUX_IPSR_DATA(IP11_6_5, SD3_CMD),
- PINMUX_IPSR_DATA(IP11_6_5, MMC1_CMD),
- PINMUX_IPSR_DATA(IP11_6_5, MTS_N),
- PINMUX_IPSR_DATA(IP11_8_7, SD3_DAT0),
- PINMUX_IPSR_DATA(IP11_8_7, MMC1_D0),
- PINMUX_IPSR_DATA(IP11_8_7, STM_N),
- PINMUX_IPSR_DATA(IP11_10_9, SD3_DAT1),
- PINMUX_IPSR_DATA(IP11_10_9, MMC1_D1),
- PINMUX_IPSR_DATA(IP11_10_9, MDATA),
- PINMUX_IPSR_DATA(IP11_12_11, SD3_DAT2),
- PINMUX_IPSR_DATA(IP11_12_11, MMC1_D2),
- PINMUX_IPSR_DATA(IP11_12_11, SDATA),
- PINMUX_IPSR_DATA(IP11_14_13, SD3_DAT3),
- PINMUX_IPSR_DATA(IP11_14_13, MMC1_D3),
- PINMUX_IPSR_DATA(IP11_14_13, SCKZ),
- PINMUX_IPSR_DATA(IP11_17_15, SD3_CD),
- PINMUX_IPSR_DATA(IP11_17_15, MMC1_D4),
+ PINMUX_IPSR_GPSR(IP11_4, SD3_CLK),
+ PINMUX_IPSR_GPSR(IP11_4, MMC1_CLK),
+ PINMUX_IPSR_GPSR(IP11_6_5, SD3_CMD),
+ PINMUX_IPSR_GPSR(IP11_6_5, MMC1_CMD),
+ PINMUX_IPSR_GPSR(IP11_6_5, MTS_N),
+ PINMUX_IPSR_GPSR(IP11_8_7, SD3_DAT0),
+ PINMUX_IPSR_GPSR(IP11_8_7, MMC1_D0),
+ PINMUX_IPSR_GPSR(IP11_8_7, STM_N),
+ PINMUX_IPSR_GPSR(IP11_10_9, SD3_DAT1),
+ PINMUX_IPSR_GPSR(IP11_10_9, MMC1_D1),
+ PINMUX_IPSR_GPSR(IP11_10_9, MDATA),
+ PINMUX_IPSR_GPSR(IP11_12_11, SD3_DAT2),
+ PINMUX_IPSR_GPSR(IP11_12_11, MMC1_D2),
+ PINMUX_IPSR_GPSR(IP11_12_11, SDATA),
+ PINMUX_IPSR_GPSR(IP11_14_13, SD3_DAT3),
+ PINMUX_IPSR_GPSR(IP11_14_13, MMC1_D3),
+ PINMUX_IPSR_GPSR(IP11_14_13, SCKZ),
+ PINMUX_IPSR_GPSR(IP11_17_15, SD3_CD),
+ PINMUX_IPSR_GPSR(IP11_17_15, MMC1_D4),
PINMUX_IPSR_MSEL(IP11_17_15, TS_SDAT1, SEL_TSIF1_0),
- PINMUX_IPSR_DATA(IP11_17_15, VSP),
+ PINMUX_IPSR_GPSR(IP11_17_15, VSP),
PINMUX_IPSR_MSEL(IP11_17_15, GLO_Q0, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP11_17_15, SIM0_RST_B, SEL_SIM_1),
- PINMUX_IPSR_DATA(IP11_21_18, SD3_WP),
- PINMUX_IPSR_DATA(IP11_21_18, MMC1_D5),
+ PINMUX_IPSR_GPSR(IP11_21_18, SD3_WP),
+ PINMUX_IPSR_GPSR(IP11_21_18, MMC1_D5),
PINMUX_IPSR_MSEL(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
PINMUX_IPSR_MSEL(IP11_21_18, GLO_Q1, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP11_21_18, FMIN_C, SEL_FM_2),
PINMUX_IPSR_MSEL(IP11_21_18, FMIN_E, SEL_FM_4),
PINMUX_IPSR_MSEL(IP11_21_18, FMIN_F, SEL_FM_5),
- PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP11_23_22, MLB_CLK),
PINMUX_IPSR_MSEL(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
PINMUX_IPSR_MSEL(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
- PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP11_26_24, MLB_SIG),
PINMUX_IPSR_MSEL(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
PINMUX_IPSR_MSEL(IP11_26_24, RX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
PINMUX_IPSR_MSEL(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
- PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP11_29_27, MLB_DAT),
PINMUX_IPSR_MSEL(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
PINMUX_IPSR_MSEL(IP11_29_27, TX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP11_29_27, BPFCLK_C, SEL_FM_2),
- PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129),
+ PINMUX_IPSR_GPSR(IP11_31_30, SSI_SCK0129),
PINMUX_IPSR_MSEL(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
- PINMUX_IPSR_DATA(IP11_31_30, MOUT0),
+ PINMUX_IPSR_GPSR(IP11_31_30, MOUT0),
- PINMUX_IPSR_DATA(IP12_1_0, SSI_WS0129),
+ PINMUX_IPSR_GPSR(IP12_1_0, SSI_WS0129),
PINMUX_IPSR_MSEL(IP12_1_0, CAN0_TX_B, SEL_CAN0_1),
- PINMUX_IPSR_DATA(IP12_1_0, MOUT1),
- PINMUX_IPSR_DATA(IP12_3_2, SSI_SDATA0),
+ PINMUX_IPSR_GPSR(IP12_1_0, MOUT1),
+ PINMUX_IPSR_GPSR(IP12_3_2, SSI_SDATA0),
PINMUX_IPSR_MSEL(IP12_3_2, CAN0_RX_B, SEL_CAN0_1),
- PINMUX_IPSR_DATA(IP12_3_2, MOUT2),
- PINMUX_IPSR_DATA(IP12_5_4, SSI_SDATA1),
+ PINMUX_IPSR_GPSR(IP12_3_2, MOUT2),
+ PINMUX_IPSR_GPSR(IP12_5_4, SSI_SDATA1),
PINMUX_IPSR_MSEL(IP12_5_4, CAN1_TX_B, SEL_CAN1_1),
- PINMUX_IPSR_DATA(IP12_5_4, MOUT5),
- PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP12_5_4, MOUT5),
+ PINMUX_IPSR_GPSR(IP12_7_6, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
- PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1),
- PINMUX_IPSR_DATA(IP12_7_6, MOUT6),
- PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34),
- PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0),
+ PINMUX_IPSR_GPSR(IP12_7_6, SSI_SCK1),
+ PINMUX_IPSR_GPSR(IP12_7_6, MOUT6),
+ PINMUX_IPSR_GPSR(IP12_10_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP12_10_8, STP_OPWM_0),
PINMUX_IPSR_MSEL(IP12_10_8, SCIFB0_SCK, SEL_SCIFB_0),
PINMUX_IPSR_MSEL(IP12_10_8, MSIOF1_SCK, SEL_SOF1_0),
- PINMUX_IPSR_DATA(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
- PINMUX_IPSR_DATA(IP12_13_11, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP12_10_8, CAN_DEBUG_HW_TRIGGER),
+ PINMUX_IPSR_GPSR(IP12_13_11, SSI_WS34),
PINMUX_IPSR_MSEL(IP12_13_11, STP_IVCXO27_0, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP12_13_11, SCIFB0_RXD, SEL_SCIFB_0),
- PINMUX_IPSR_DATA(IP12_13_11, MSIOF1_SYNC),
- PINMUX_IPSR_DATA(IP12_13_11, CAN_STEP0),
- PINMUX_IPSR_DATA(IP12_16_14, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP12_13_11, MSIOF1_SYNC),
+ PINMUX_IPSR_GPSR(IP12_13_11, CAN_STEP0),
+ PINMUX_IPSR_GPSR(IP12_16_14, SSI_SDATA3),
PINMUX_IPSR_MSEL(IP12_16_14, STP_ISCLK_0, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP12_16_14, SCIFB0_TXD, SEL_SCIFB_0),
PINMUX_IPSR_MSEL(IP12_16_14, MSIOF1_SS1, SEL_SOF1_0),
- PINMUX_IPSR_DATA(IP12_16_14, CAN_TXCLK),
- PINMUX_IPSR_DATA(IP12_19_17, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP12_16_14, CAN_TXCLK),
+ PINMUX_IPSR_GPSR(IP12_19_17, SSI_SCK4),
PINMUX_IPSR_MSEL(IP12_19_17, STP_ISD_0, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP12_19_17, SCIFB0_CTS_N, SEL_SCIFB_0),
PINMUX_IPSR_MSEL(IP12_19_17, MSIOF1_SS2, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP12_19_17, SSI_SCK5_C, SEL_SSI5_2),
- PINMUX_IPSR_DATA(IP12_19_17, CAN_DEBUGOUT0),
- PINMUX_IPSR_DATA(IP12_22_20, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP12_19_17, CAN_DEBUGOUT0),
+ PINMUX_IPSR_GPSR(IP12_22_20, SSI_WS4),
PINMUX_IPSR_MSEL(IP12_22_20, STP_ISEN_0, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP12_22_20, SCIFB0_RTS_N, SEL_SCIFB_0),
PINMUX_IPSR_MSEL(IP12_22_20, MSIOF1_TXD, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP12_22_20, SSI_WS5_C, SEL_SSI5_2),
- PINMUX_IPSR_DATA(IP12_22_20, CAN_DEBUGOUT1),
- PINMUX_IPSR_DATA(IP12_24_23, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP12_22_20, CAN_DEBUGOUT1),
+ PINMUX_IPSR_GPSR(IP12_24_23, SSI_SDATA4),
PINMUX_IPSR_MSEL(IP12_24_23, STP_ISSYNC_0, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP12_24_23, MSIOF1_RXD, SEL_SOF1_0),
- PINMUX_IPSR_DATA(IP12_24_23, CAN_DEBUGOUT2),
+ PINMUX_IPSR_GPSR(IP12_24_23, CAN_DEBUGOUT2),
PINMUX_IPSR_MSEL(IP12_27_25, SSI_SCK5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP12_27_25, SCIFB1_SCK, SEL_SCIFB1_0),
PINMUX_IPSR_MSEL(IP12_27_25, IERX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
- PINMUX_IPSR_DATA(IP12_27_25, QSTH_QHS),
- PINMUX_IPSR_DATA(IP12_27_25, CAN_DEBUGOUT3),
+ PINMUX_IPSR_GPSR(IP12_27_25, DU2_EXHSYNC_DU2_HSYNC),
+ PINMUX_IPSR_GPSR(IP12_27_25, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP12_27_25, CAN_DEBUGOUT3),
PINMUX_IPSR_MSEL(IP12_30_28, SSI_WS5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP12_30_28, SCIFB1_RXD, SEL_SCIFB1_0),
PINMUX_IPSR_MSEL(IP12_30_28, IECLK_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
- PINMUX_IPSR_DATA(IP12_30_28, QSTB_QHE),
- PINMUX_IPSR_DATA(IP12_30_28, CAN_DEBUGOUT4),
+ PINMUX_IPSR_GPSR(IP12_30_28, DU2_EXVSYNC_DU2_VSYNC),
+ PINMUX_IPSR_GPSR(IP12_30_28, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP12_30_28, CAN_DEBUGOUT4),
PINMUX_IPSR_MSEL(IP13_2_0, SSI_SDATA5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP13_2_0, SCIFB1_TXD, SEL_SCIFB1_0),
PINMUX_IPSR_MSEL(IP13_2_0, IETX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP13_2_0, DU2_DR2),
- PINMUX_IPSR_DATA(IP13_2_0, LCDOUT2),
- PINMUX_IPSR_DATA(IP13_2_0, CAN_DEBUGOUT5),
+ PINMUX_IPSR_GPSR(IP13_2_0, DU2_DR2),
+ PINMUX_IPSR_GPSR(IP13_2_0, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP13_2_0, CAN_DEBUGOUT5),
PINMUX_IPSR_MSEL(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_D, SEL_FM_3),
- PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3),
- PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3),
- PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6),
+ PINMUX_IPSR_GPSR(IP13_6_3, DU2_DR3),
+ PINMUX_IPSR_GPSR(IP13_6_3, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP13_6_3, CAN_DEBUGOUT6),
PINMUX_IPSR_MSEL(IP13_6_3, BPFCLK_F, SEL_FM_5),
PINMUX_IPSR_MSEL(IP13_9_7, SSI_WS6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
PINMUX_IPSR_MSEL(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
- PINMUX_IPSR_DATA(IP13_9_7, DU2_DR4),
- PINMUX_IPSR_DATA(IP13_9_7, LCDOUT4),
- PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7),
+ PINMUX_IPSR_GPSR(IP13_9_7, DU2_DR4),
+ PINMUX_IPSR_GPSR(IP13_9_7, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP13_9_7, CAN_DEBUGOUT7),
PINMUX_IPSR_MSEL(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP13_12_10, FMIN_D, SEL_FM_3),
- PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5),
- PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5),
- PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8),
+ PINMUX_IPSR_GPSR(IP13_12_10, DU2_DR5),
+ PINMUX_IPSR_GPSR(IP13_12_10, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP13_12_10, CAN_DEBUGOUT8),
PINMUX_IPSR_MSEL(IP13_15_13, SSI_SCK78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP13_15_13, STP_IVCXO27_1, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP13_15_13, SCK1, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP13_15_13, SCIFA1_SCK, SEL_SCIFA1_0),
- PINMUX_IPSR_DATA(IP13_15_13, DU2_DR6),
- PINMUX_IPSR_DATA(IP13_15_13, LCDOUT6),
- PINMUX_IPSR_DATA(IP13_15_13, CAN_DEBUGOUT9),
+ PINMUX_IPSR_GPSR(IP13_15_13, DU2_DR6),
+ PINMUX_IPSR_GPSR(IP13_15_13, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP13_15_13, CAN_DEBUGOUT9),
PINMUX_IPSR_MSEL(IP13_18_16, SSI_WS78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP13_18_16, STP_ISCLK_1, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP13_18_16, SCIFB2_SCK, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP13_18_16, SCIFA2_CTS_N),
- PINMUX_IPSR_DATA(IP13_18_16, DU2_DR7),
- PINMUX_IPSR_DATA(IP13_18_16, LCDOUT7),
- PINMUX_IPSR_DATA(IP13_18_16, CAN_DEBUGOUT10),
+ PINMUX_IPSR_GPSR(IP13_18_16, SCIFA2_CTS_N),
+ PINMUX_IPSR_GPSR(IP13_18_16, DU2_DR7),
+ PINMUX_IPSR_GPSR(IP13_18_16, LCDOUT7),
+ PINMUX_IPSR_GPSR(IP13_18_16, CAN_DEBUGOUT10),
PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP13_22_19, STP_ISD_1, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP13_22_19, SCIFB2_RXD, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP13_22_19, SCIFA2_RTS_N),
- PINMUX_IPSR_DATA(IP13_22_19, TCLK2),
- PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS),
- PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11),
+ PINMUX_IPSR_GPSR(IP13_22_19, SCIFA2_RTS_N),
+ PINMUX_IPSR_GPSR(IP13_22_19, TCLK2),
+ PINMUX_IPSR_GPSR(IP13_22_19, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP13_22_19, CAN_DEBUGOUT11),
PINMUX_IPSR_MSEL(IP13_22_19, BPFCLK_E, SEL_FM_4),
PINMUX_IPSR_MSEL(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
PINMUX_IPSR_MSEL(IP13_22_19, FMIN_G, SEL_FM_6),
@@ -1565,161 +1565,161 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
PINMUX_IPSR_MSEL(IP13_25_23, CAN0_TX_C, SEL_CAN0_2),
- PINMUX_IPSR_DATA(IP13_25_23, CAN_DEBUGOUT12),
+ PINMUX_IPSR_GPSR(IP13_25_23, CAN_DEBUGOUT12),
PINMUX_IPSR_MSEL(IP13_25_23, SSI_SDATA8_B, SEL_SSI8_1),
- PINMUX_IPSR_DATA(IP13_28_26, SSI_SDATA9),
+ PINMUX_IPSR_GPSR(IP13_28_26, SSI_SDATA9),
PINMUX_IPSR_MSEL(IP13_28_26, STP_ISSYNC_1, SEL_SSP_0),
PINMUX_IPSR_MSEL(IP13_28_26, SCIFB2_CTS_N, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP13_28_26, SSI_WS1),
+ PINMUX_IPSR_GPSR(IP13_28_26, SSI_WS1),
PINMUX_IPSR_MSEL(IP13_28_26, SSI_SDATA5_C, SEL_SSI5_2),
- PINMUX_IPSR_DATA(IP13_28_26, CAN_DEBUGOUT13),
- PINMUX_IPSR_DATA(IP13_30_29, AUDIO_CLKA),
+ PINMUX_IPSR_GPSR(IP13_28_26, CAN_DEBUGOUT13),
+ PINMUX_IPSR_GPSR(IP13_30_29, AUDIO_CLKA),
PINMUX_IPSR_MSEL(IP13_30_29, SCIFB2_RTS_N, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP13_30_29, CAN_DEBUGOUT14),
+ PINMUX_IPSR_GPSR(IP13_30_29, CAN_DEBUGOUT14),
- PINMUX_IPSR_DATA(IP14_2_0, AUDIO_CLKB),
+ PINMUX_IPSR_GPSR(IP14_2_0, AUDIO_CLKB),
PINMUX_IPSR_MSEL(IP14_2_0, SCIF_CLK, SEL_SCIFCLK_0),
PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_D, SEL_CAN0_3),
- PINMUX_IPSR_DATA(IP14_2_0, DVC_MUTE),
+ PINMUX_IPSR_GPSR(IP14_2_0, DVC_MUTE),
PINMUX_IPSR_MSEL(IP14_2_0, CAN0_RX_C, SEL_CAN0_2),
- PINMUX_IPSR_DATA(IP14_2_0, CAN_DEBUGOUT15),
- PINMUX_IPSR_DATA(IP14_2_0, REMOCON),
+ PINMUX_IPSR_GPSR(IP14_2_0, CAN_DEBUGOUT15),
+ PINMUX_IPSR_GPSR(IP14_2_0, REMOCON),
PINMUX_IPSR_MSEL(IP14_5_3, SCIFA0_SCK, SEL_SCFA_0),
PINMUX_IPSR_MSEL(IP14_5_3, HSCK1, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP14_5_3, SCK0),
- PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2),
- PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2),
- PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP14_5_3, SCK0),
+ PINMUX_IPSR_GPSR(IP14_5_3, MSIOF3_SS2),
+ PINMUX_IPSR_GPSR(IP14_5_3, DU2_DG2),
+ PINMUX_IPSR_GPSR(IP14_5_3, LCDOUT10),
PINMUX_IPSR_MSEL(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
PINMUX_IPSR_MSEL(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
PINMUX_IPSR_MSEL(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
PINMUX_IPSR_MSEL(IP14_8_6, HRX1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP14_8_6, RX0, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP14_8_6, DU2_DR0),
- PINMUX_IPSR_DATA(IP14_8_6, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP14_8_6, DU2_DR0),
+ PINMUX_IPSR_GPSR(IP14_8_6, LCDOUT0),
PINMUX_IPSR_MSEL(IP14_11_9, SCIFA0_TXD, SEL_SCFA_0),
PINMUX_IPSR_MSEL(IP14_11_9, HTX1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP14_11_9, TX0, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP14_11_9, DU2_DR1),
- PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP14_11_9, DU2_DR1),
+ PINMUX_IPSR_GPSR(IP14_11_9, LCDOUT1),
PINMUX_IPSR_MSEL(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
PINMUX_IPSR_MSEL(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP14_15_12, CTS0_N),
+ PINMUX_IPSR_GPSR(IP14_15_12, CTS0_N),
PINMUX_IPSR_MSEL(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
- PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3),
- PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11),
- PINMUX_IPSR_DATA(IP14_15_12, PWM0_B),
+ PINMUX_IPSR_GPSR(IP14_15_12, DU2_DG3),
+ PINMUX_IPSR_GPSR(IP14_15_12, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP14_15_12, PWM0_B),
PINMUX_IPSR_MSEL(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
PINMUX_IPSR_MSEL(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
PINMUX_IPSR_MSEL(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
PINMUX_IPSR_MSEL(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP14_18_16, RTS0_N),
- PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1),
- PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0),
- PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8),
- PINMUX_IPSR_DATA(IP14_18_16, PWM1_B),
+ PINMUX_IPSR_GPSR(IP14_18_16, RTS0_N),
+ PINMUX_IPSR_GPSR(IP14_18_16, MSIOF3_SS1),
+ PINMUX_IPSR_GPSR(IP14_18_16, DU2_DG0),
+ PINMUX_IPSR_GPSR(IP14_18_16, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP14_18_16, PWM1_B),
PINMUX_IPSR_MSEL(IP14_21_19, SCIFA1_RXD, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP14_21_19, AD_DI, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP14_21_19, RX1, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP14_21_19, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP14_21_19, DU2_EXODDF_DU2_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP14_21_19, QCPV_QDE),
PINMUX_IPSR_MSEL(IP14_24_22, SCIFA1_TXD, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP14_24_22, AD_DO, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP14_24_22, TX1, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP14_24_22, DU2_DG1),
- PINMUX_IPSR_DATA(IP14_24_22, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP14_24_22, DU2_DG1),
+ PINMUX_IPSR_GPSR(IP14_24_22, LCDOUT9),
PINMUX_IPSR_MSEL(IP14_27_25, SCIFA1_CTS_N, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP14_27_25, AD_CLK, SEL_ADI_0),
- PINMUX_IPSR_DATA(IP14_27_25, CTS1_N),
+ PINMUX_IPSR_GPSR(IP14_27_25, CTS1_N),
PINMUX_IPSR_MSEL(IP14_27_25, MSIOF3_RXD, SEL_SOF3_0),
- PINMUX_IPSR_DATA(IP14_27_25, DU0_DOTCLKOUT),
- PINMUX_IPSR_DATA(IP14_27_25, QCLK),
+ PINMUX_IPSR_GPSR(IP14_27_25, DU0_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP14_27_25, QCLK),
PINMUX_IPSR_MSEL(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP14_30_28, AD_NCS_N, SEL_ADI_0),
- PINMUX_IPSR_DATA(IP14_30_28, RTS1_N),
+ PINMUX_IPSR_GPSR(IP14_30_28, RTS1_N),
PINMUX_IPSR_MSEL(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
- PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT),
- PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP14_30_28, DU1_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP14_30_28, QSTVB_QVE),
PINMUX_IPSR_MSEL(IP14_30_28, HRTS0_N_C, SEL_HSCIF0_2),
PINMUX_IPSR_MSEL(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP15_2_0, FMCLK, SEL_FM_0),
- PINMUX_IPSR_DATA(IP15_2_0, SCK2),
+ PINMUX_IPSR_GPSR(IP15_2_0, SCK2),
PINMUX_IPSR_MSEL(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
- PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7),
- PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP15_2_0, DU2_DG7),
+ PINMUX_IPSR_GPSR(IP15_2_0, LCDOUT15),
PINMUX_IPSR_MSEL(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
PINMUX_IPSR_MSEL(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP15_5_3, FMIN, SEL_FM_0),
PINMUX_IPSR_MSEL(IP15_5_3, TX2, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0),
- PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP15_5_3, DU2_DB0),
+ PINMUX_IPSR_GPSR(IP15_5_3, LCDOUT16),
PINMUX_IPSR_MSEL(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP15_8_6, BPFCLK, SEL_FM_0),
PINMUX_IPSR_MSEL(IP15_8_6, RX2, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1),
- PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP15_8_6, DU2_DB1),
+ PINMUX_IPSR_GPSR(IP15_8_6, LCDOUT17),
PINMUX_IPSR_MSEL(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
- PINMUX_IPSR_DATA(IP15_11_9, HSCK0),
+ PINMUX_IPSR_GPSR(IP15_11_9, HSCK0),
PINMUX_IPSR_MSEL(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4),
- PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP15_11_9, DU2_DG4),
+ PINMUX_IPSR_GPSR(IP15_11_9, LCDOUT12),
PINMUX_IPSR_MSEL(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
PINMUX_IPSR_MSEL(IP15_13_12, HRX0, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2),
- PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP15_13_12, DU2_DB2),
+ PINMUX_IPSR_GPSR(IP15_13_12, LCDOUT18),
PINMUX_IPSR_MSEL(IP15_15_14, HTX0, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP15_15_14, DU2_DB3),
- PINMUX_IPSR_DATA(IP15_15_14, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP15_15_14, DU2_DB3),
+ PINMUX_IPSR_GPSR(IP15_15_14, LCDOUT19),
PINMUX_IPSR_MSEL(IP15_17_16, HCTS0_N, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP15_17_16, SSI_SCK9),
- PINMUX_IPSR_DATA(IP15_17_16, DU2_DB4),
- PINMUX_IPSR_DATA(IP15_17_16, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP15_17_16, SSI_SCK9),
+ PINMUX_IPSR_GPSR(IP15_17_16, DU2_DB4),
+ PINMUX_IPSR_GPSR(IP15_17_16, LCDOUT20),
PINMUX_IPSR_MSEL(IP15_19_18, HRTS0_N, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP15_19_18, SSI_WS9),
- PINMUX_IPSR_DATA(IP15_19_18, DU2_DB5),
- PINMUX_IPSR_DATA(IP15_19_18, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP15_19_18, SSI_WS9),
+ PINMUX_IPSR_GPSR(IP15_19_18, DU2_DB5),
+ PINMUX_IPSR_GPSR(IP15_19_18, LCDOUT21),
PINMUX_IPSR_MSEL(IP15_22_20, MSIOF0_SCK, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP15_22_20, TS_SDAT0, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP15_22_20, ADICLK),
- PINMUX_IPSR_DATA(IP15_22_20, DU2_DB6),
- PINMUX_IPSR_DATA(IP15_22_20, LCDOUT22),
- PINMUX_IPSR_DATA(IP15_25_23, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP15_22_20, ADICLK),
+ PINMUX_IPSR_GPSR(IP15_22_20, DU2_DB6),
+ PINMUX_IPSR_GPSR(IP15_22_20, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP15_25_23, MSIOF0_SYNC),
PINMUX_IPSR_MSEL(IP15_25_23, TS_SCK0, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP15_25_23, SSI_SCK2),
- PINMUX_IPSR_DATA(IP15_25_23, ADIDATA),
- PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7),
- PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP15_25_23, SSI_SCK2),
+ PINMUX_IPSR_GPSR(IP15_25_23, ADIDATA),
+ PINMUX_IPSR_GPSR(IP15_25_23, DU2_DB7),
+ PINMUX_IPSR_GPSR(IP15_25_23, LCDOUT23),
PINMUX_IPSR_MSEL(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
- PINMUX_IPSR_DATA(IP15_27_26, ADICHS0),
- PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5),
- PINMUX_IPSR_DATA(IP15_27_26, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP15_27_26, ADICHS0),
+ PINMUX_IPSR_GPSR(IP15_27_26, DU2_DG5),
+ PINMUX_IPSR_GPSR(IP15_27_26, LCDOUT13),
PINMUX_IPSR_MSEL(IP15_29_28, MSIOF0_TXD, SEL_SOF0_0),
- PINMUX_IPSR_DATA(IP15_29_28, ADICHS1),
- PINMUX_IPSR_DATA(IP15_29_28, DU2_DG6),
- PINMUX_IPSR_DATA(IP15_29_28, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP15_29_28, ADICHS1),
+ PINMUX_IPSR_GPSR(IP15_29_28, DU2_DG6),
+ PINMUX_IPSR_GPSR(IP15_29_28, LCDOUT14),
PINMUX_IPSR_MSEL(IP16_2_0, MSIOF0_SS2, SEL_SOF0_0),
- PINMUX_IPSR_DATA(IP16_2_0, AUDIO_CLKOUT),
- PINMUX_IPSR_DATA(IP16_2_0, ADICHS2),
- PINMUX_IPSR_DATA(IP16_2_0, DU2_DISP),
- PINMUX_IPSR_DATA(IP16_2_0, QPOLA),
+ PINMUX_IPSR_GPSR(IP16_2_0, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP16_2_0, ADICHS2),
+ PINMUX_IPSR_GPSR(IP16_2_0, DU2_DISP),
+ PINMUX_IPSR_GPSR(IP16_2_0, QPOLA),
PINMUX_IPSR_MSEL(IP16_2_0, HTX0_C, SEL_HSCIF0_2),
PINMUX_IPSR_MSEL(IP16_2_0, SCIFA2_TXD_B, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP16_5_3, MSIOF0_RXD, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP16_5_3, TS_SPSYNC0, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP16_5_3, SSI_WS2),
- PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP),
- PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE),
- PINMUX_IPSR_DATA(IP16_5_3, QPOLB),
+ PINMUX_IPSR_GPSR(IP16_5_3, SSI_WS2),
+ PINMUX_IPSR_GPSR(IP16_5_3, ADICS_SAMP),
+ PINMUX_IPSR_GPSR(IP16_5_3, DU2_CDE),
+ PINMUX_IPSR_GPSR(IP16_5_3, QPOLB),
PINMUX_IPSR_MSEL(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
- PINMUX_IPSR_DATA(IP16_6, USB1_PWEN),
- PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
- PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
+ PINMUX_IPSR_GPSR(IP16_6, USB1_PWEN),
+ PINMUX_IPSR_GPSR(IP16_6, AUDIO_CLKOUT_D),
+ PINMUX_IPSR_GPSR(IP16_7, USB1_OVC),
PINMUX_IPSR_MSEL(IP16_7, TCLK1_B, SEL_TMU1_1),
PINMUX_DATA(IIC0_SCL_MARK, FN_SEL_IIC0_0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 4cfbb94ad5d0..01abbd5b4e49 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -792,171 +792,171 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(SD1_CLK),
/* IPSR0 */
- PINMUX_IPSR_DATA(IP0_0, D0),
- PINMUX_IPSR_DATA(IP0_1, D1),
- PINMUX_IPSR_DATA(IP0_2, D2),
- PINMUX_IPSR_DATA(IP0_3, D3),
- PINMUX_IPSR_DATA(IP0_4, D4),
- PINMUX_IPSR_DATA(IP0_5, D5),
- PINMUX_IPSR_DATA(IP0_6, D6),
- PINMUX_IPSR_DATA(IP0_7, D7),
- PINMUX_IPSR_DATA(IP0_8, D8),
- PINMUX_IPSR_DATA(IP0_9, D9),
- PINMUX_IPSR_DATA(IP0_10, D10),
- PINMUX_IPSR_DATA(IP0_11, D11),
- PINMUX_IPSR_DATA(IP0_12, D12),
- PINMUX_IPSR_DATA(IP0_13, D13),
- PINMUX_IPSR_DATA(IP0_14, D14),
- PINMUX_IPSR_DATA(IP0_15, D15),
- PINMUX_IPSR_DATA(IP0_18_16, A0),
+ PINMUX_IPSR_GPSR(IP0_0, D0),
+ PINMUX_IPSR_GPSR(IP0_1, D1),
+ PINMUX_IPSR_GPSR(IP0_2, D2),
+ PINMUX_IPSR_GPSR(IP0_3, D3),
+ PINMUX_IPSR_GPSR(IP0_4, D4),
+ PINMUX_IPSR_GPSR(IP0_5, D5),
+ PINMUX_IPSR_GPSR(IP0_6, D6),
+ PINMUX_IPSR_GPSR(IP0_7, D7),
+ PINMUX_IPSR_GPSR(IP0_8, D8),
+ PINMUX_IPSR_GPSR(IP0_9, D9),
+ PINMUX_IPSR_GPSR(IP0_10, D10),
+ PINMUX_IPSR_GPSR(IP0_11, D11),
+ PINMUX_IPSR_GPSR(IP0_12, D12),
+ PINMUX_IPSR_GPSR(IP0_13, D13),
+ PINMUX_IPSR_GPSR(IP0_14, D14),
+ PINMUX_IPSR_GPSR(IP0_15, D15),
+ PINMUX_IPSR_GPSR(IP0_18_16, A0),
PINMUX_IPSR_MSEL(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
PINMUX_IPSR_MSEL(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
PINMUX_IPSR_MSEL(IP0_18_16, SCL0_C, SEL_IIC0_2),
- PINMUX_IPSR_DATA(IP0_18_16, PWM2_B),
- PINMUX_IPSR_DATA(IP0_20_19, A1),
+ PINMUX_IPSR_GPSR(IP0_18_16, PWM2_B),
+ PINMUX_IPSR_GPSR(IP0_20_19, A1),
PINMUX_IPSR_MSEL(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP0_22_21, A2),
+ PINMUX_IPSR_GPSR(IP0_22_21, A2),
PINMUX_IPSR_MSEL(IP0_22_21, MSIOF0_SS1_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP0_24_23, A3),
+ PINMUX_IPSR_GPSR(IP0_24_23, A3),
PINMUX_IPSR_MSEL(IP0_24_23, MSIOF0_SS2_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP0_26_25, A4),
+ PINMUX_IPSR_GPSR(IP0_26_25, A4),
PINMUX_IPSR_MSEL(IP0_26_25, MSIOF0_TXD_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP0_28_27, A5),
+ PINMUX_IPSR_GPSR(IP0_28_27, A5),
PINMUX_IPSR_MSEL(IP0_28_27, MSIOF0_RXD_B, SEL_SOF0_1),
- PINMUX_IPSR_DATA(IP0_30_29, A6),
+ PINMUX_IPSR_GPSR(IP0_30_29, A6),
PINMUX_IPSR_MSEL(IP0_30_29, MSIOF1_SCK, SEL_SOF1_0),
/* IPSR1 */
- PINMUX_IPSR_DATA(IP1_1_0, A7),
+ PINMUX_IPSR_GPSR(IP1_1_0, A7),
PINMUX_IPSR_MSEL(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
- PINMUX_IPSR_DATA(IP1_3_2, A8),
+ PINMUX_IPSR_GPSR(IP1_3_2, A8),
PINMUX_IPSR_MSEL(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP1_3_2, SCL0, SEL_IIC0_0),
- PINMUX_IPSR_DATA(IP1_5_4, A9),
+ PINMUX_IPSR_GPSR(IP1_5_4, A9),
PINMUX_IPSR_MSEL(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP1_5_4, SDA0, SEL_IIC0_0),
- PINMUX_IPSR_DATA(IP1_7_6, A10),
+ PINMUX_IPSR_GPSR(IP1_7_6, A10),
PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
- PINMUX_IPSR_DATA(IP1_10_8, A11),
+ PINMUX_IPSR_GPSR(IP1_10_8, A11),
PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP1_10_8, SCL3_D, SEL_IIC3_3),
PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
- PINMUX_IPSR_DATA(IP1_13_11, A12),
+ PINMUX_IPSR_GPSR(IP1_13_11, A12),
PINMUX_IPSR_MSEL(IP1_13_11, FMCLK, SEL_FM_0),
PINMUX_IPSR_MSEL(IP1_13_11, SDA3_D, SEL_IIC3_3),
PINMUX_IPSR_MSEL(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
- PINMUX_IPSR_DATA(IP1_16_14, A13),
+ PINMUX_IPSR_GPSR(IP1_16_14, A13),
PINMUX_IPSR_MSEL(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
PINMUX_IPSR_MSEL(IP1_16_14, BPFCLK, SEL_FM_0),
PINMUX_IPSR_MSEL(IP1_16_14, MSIOF1_SS1_D, SEL_SOF1_3),
- PINMUX_IPSR_DATA(IP1_19_17, A14),
+ PINMUX_IPSR_GPSR(IP1_19_17, A14),
PINMUX_IPSR_MSEL(IP1_19_17, ATADIR0_N_C, SEL_LBS_2),
PINMUX_IPSR_MSEL(IP1_19_17, FMIN, SEL_FM_0),
PINMUX_IPSR_MSEL(IP1_19_17, FMIN_C, SEL_FM_2),
PINMUX_IPSR_MSEL(IP1_19_17, MSIOF1_SYNC_D, SEL_SOF1_3),
- PINMUX_IPSR_DATA(IP1_22_20, A15),
+ PINMUX_IPSR_GPSR(IP1_22_20, A15),
PINMUX_IPSR_MSEL(IP1_22_20, BPFCLK_C, SEL_FM_2),
- PINMUX_IPSR_DATA(IP1_25_23, A16),
+ PINMUX_IPSR_GPSR(IP1_25_23, A16),
PINMUX_IPSR_MSEL(IP1_25_23, DREQ2_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP1_25_23, FMCLK_C, SEL_FM_2),
PINMUX_IPSR_MSEL(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
- PINMUX_IPSR_DATA(IP1_28_26, A17),
+ PINMUX_IPSR_GPSR(IP1_28_26, A17),
PINMUX_IPSR_MSEL(IP1_28_26, DACK2_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP1_28_26, SDA0_C, SEL_IIC0_2),
- PINMUX_IPSR_DATA(IP1_31_29, A18),
+ PINMUX_IPSR_GPSR(IP1_31_29, A18),
PINMUX_IPSR_MSEL(IP1_31_29, DREQ1, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP1_31_29, SCIFB1_RXD_C, SEL_SCIFB1_2),
/* IPSR2 */
- PINMUX_IPSR_DATA(IP2_2_0, A19),
- PINMUX_IPSR_DATA(IP2_2_0, DACK1),
+ PINMUX_IPSR_GPSR(IP2_2_0, A19),
+ PINMUX_IPSR_GPSR(IP2_2_0, DACK1),
PINMUX_IPSR_MSEL(IP2_2_0, SCIFA1_TXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_TXD_C, SEL_SCIFB1_2),
PINMUX_IPSR_MSEL(IP2_2_0, SCIFB1_SCK_B, SEL_SCIFB1_1),
- PINMUX_IPSR_DATA(IP2_2_0, A20),
+ PINMUX_IPSR_GPSR(IP2_2_0, A20),
PINMUX_IPSR_MSEL(IP2_4_3, SPCLK, SEL_QSP_0),
- PINMUX_IPSR_DATA(IP2_6_5, A21),
+ PINMUX_IPSR_GPSR(IP2_6_5, A21),
PINMUX_IPSR_MSEL(IP2_6_5, ATAWR0_N_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP2_6_5, MOSI_IO0, SEL_QSP_0),
- PINMUX_IPSR_DATA(IP2_9_7, A22),
+ PINMUX_IPSR_GPSR(IP2_9_7, A22),
PINMUX_IPSR_MSEL(IP2_9_7, MISO_IO1, SEL_QSP_0),
PINMUX_IPSR_MSEL(IP2_9_7, FMCLK_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP2_9_7, TX0, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP2_9_7, SCIFA0_TXD, SEL_SCFA_0),
- PINMUX_IPSR_DATA(IP2_12_10, A23),
+ PINMUX_IPSR_GPSR(IP2_12_10, A23),
PINMUX_IPSR_MSEL(IP2_12_10, IO2, SEL_QSP_0),
PINMUX_IPSR_MSEL(IP2_12_10, BPFCLK_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP2_12_10, RX0, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP2_12_10, SCIFA0_RXD, SEL_SCFA_0),
- PINMUX_IPSR_DATA(IP2_15_13, A24),
+ PINMUX_IPSR_GPSR(IP2_15_13, A24),
PINMUX_IPSR_MSEL(IP2_15_13, DREQ2, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP2_15_13, IO3, SEL_QSP_0),
PINMUX_IPSR_MSEL(IP2_15_13, TX1, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP2_15_13, SCIFA1_TXD, SEL_SCIFA1_0),
- PINMUX_IPSR_DATA(IP2_18_16, A25),
+ PINMUX_IPSR_GPSR(IP2_18_16, A25),
PINMUX_IPSR_MSEL(IP2_18_16, DACK2, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP2_18_16, SSL, SEL_QSP_0),
PINMUX_IPSR_MSEL(IP2_18_16, DREQ1_C, SEL_LBS_2),
PINMUX_IPSR_MSEL(IP2_18_16, RX1, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
- PINMUX_IPSR_DATA(IP2_20_19, CS0_N),
+ PINMUX_IPSR_GPSR(IP2_20_19, CS0_N),
PINMUX_IPSR_MSEL(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP2_20_19, SCL1, SEL_IIC1_0),
- PINMUX_IPSR_DATA(IP2_22_21, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP2_22_21, CS1_N_A26),
PINMUX_IPSR_MSEL(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP2_22_21, SDA1, SEL_IIC1_0),
- PINMUX_IPSR_DATA(IP2_24_23, EX_CS1_N),
+ PINMUX_IPSR_GPSR(IP2_24_23, EX_CS1_N),
PINMUX_IPSR_MSEL(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
- PINMUX_IPSR_DATA(IP2_26_25, EX_CS2_N),
+ PINMUX_IPSR_GPSR(IP2_26_25, EX_CS2_N),
PINMUX_IPSR_MSEL(IP2_26_25, ATAWR0_N, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP2_26_25, MSIOF2_SYNC, SEL_SOF2_0),
- PINMUX_IPSR_DATA(IP2_29_27, EX_CS3_N),
+ PINMUX_IPSR_GPSR(IP2_29_27, EX_CS3_N),
PINMUX_IPSR_MSEL(IP2_29_27, ATADIR0_N, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_TXD, SEL_SOF2_0),
PINMUX_IPSR_MSEL(IP2_29_27, ATAG0_N, SEL_LBS_0),
- PINMUX_IPSR_DATA(IP2_29_27, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP2_29_27, EX_WAIT1),
/* IPSR3 */
- PINMUX_IPSR_DATA(IP3_2_0, EX_CS4_N),
+ PINMUX_IPSR_GPSR(IP3_2_0, EX_CS4_N),
PINMUX_IPSR_MSEL(IP3_2_0, ATARD0_N, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP3_2_0, MSIOF2_RXD, SEL_SOF2_0),
- PINMUX_IPSR_DATA(IP3_2_0, EX_WAIT2),
- PINMUX_IPSR_DATA(IP3_5_3, EX_CS5_N),
- PINMUX_IPSR_DATA(IP3_5_3, ATACS00_N),
+ PINMUX_IPSR_GPSR(IP3_2_0, EX_WAIT2),
+ PINMUX_IPSR_GPSR(IP3_5_3, EX_CS5_N),
+ PINMUX_IPSR_GPSR(IP3_5_3, ATACS00_N),
PINMUX_IPSR_MSEL(IP3_5_3, MSIOF2_SS1, SEL_SOF2_0),
PINMUX_IPSR_MSEL(IP3_5_3, HRX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP3_5_3, SCIFB1_RXD_B, SEL_SCIFB1_1),
- PINMUX_IPSR_DATA(IP3_5_3, PWM1),
- PINMUX_IPSR_DATA(IP3_5_3, TPU_TO1),
- PINMUX_IPSR_DATA(IP3_8_6, BS_N),
- PINMUX_IPSR_DATA(IP3_8_6, ATACS10_N),
+ PINMUX_IPSR_GPSR(IP3_5_3, PWM1),
+ PINMUX_IPSR_GPSR(IP3_5_3, TPU_TO1),
+ PINMUX_IPSR_GPSR(IP3_8_6, BS_N),
+ PINMUX_IPSR_GPSR(IP3_8_6, ATACS10_N),
PINMUX_IPSR_MSEL(IP3_8_6, MSIOF2_SS2, SEL_SOF2_0),
PINMUX_IPSR_MSEL(IP3_8_6, HTX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP3_8_6, SCIFB1_TXD_B, SEL_SCIFB1_1),
- PINMUX_IPSR_DATA(IP3_8_6, PWM2),
- PINMUX_IPSR_DATA(IP3_8_6, TPU_TO2),
- PINMUX_IPSR_DATA(IP3_11_9, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP3_8_6, PWM2),
+ PINMUX_IPSR_GPSR(IP3_8_6, TPU_TO2),
+ PINMUX_IPSR_GPSR(IP3_11_9, RD_WR_N),
PINMUX_IPSR_MSEL(IP3_11_9, HRX2_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP3_11_9, FMIN_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP3_11_9, SCIFB0_RXD_B, SEL_SCIFB_1),
PINMUX_IPSR_MSEL(IP3_11_9, DREQ1_D, SEL_LBS_1),
- PINMUX_IPSR_DATA(IP3_13_12, WE0_N),
+ PINMUX_IPSR_GPSR(IP3_13_12, WE0_N),
PINMUX_IPSR_MSEL(IP3_13_12, HCTS2_N_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP3_13_12, SCIFB0_TXD_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP3_15_14, WE1_N),
+ PINMUX_IPSR_GPSR(IP3_15_14, WE1_N),
PINMUX_IPSR_MSEL(IP3_15_14, ATARD0_N_B, SEL_LBS_1),
PINMUX_IPSR_MSEL(IP3_15_14, HTX2_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP3_15_14, SCIFB0_RTS_N_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP3_17_16, EX_WAIT0),
+ PINMUX_IPSR_GPSR(IP3_17_16, EX_WAIT0),
PINMUX_IPSR_MSEL(IP3_17_16, HRTS2_N_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP3_17_16, SCIFB0_CTS_N_B, SEL_SCIFB_1),
- PINMUX_IPSR_DATA(IP3_19_18, DREQ0),
- PINMUX_IPSR_DATA(IP3_19_18, PWM3),
- PINMUX_IPSR_DATA(IP3_19_18, TPU_TO3),
- PINMUX_IPSR_DATA(IP3_21_20, DACK0),
- PINMUX_IPSR_DATA(IP3_21_20, DRACK0),
+ PINMUX_IPSR_GPSR(IP3_19_18, DREQ0),
+ PINMUX_IPSR_GPSR(IP3_19_18, PWM3),
+ PINMUX_IPSR_GPSR(IP3_19_18, TPU_TO3),
+ PINMUX_IPSR_GPSR(IP3_21_20, DACK0),
+ PINMUX_IPSR_GPSR(IP3_21_20, DRACK0),
PINMUX_IPSR_MSEL(IP3_21_20, REMOCON, SEL_RCN_0),
PINMUX_IPSR_MSEL(IP3_24_22, SPEEDIN, SEL_RSP_0),
PINMUX_IPSR_MSEL(IP3_24_22, HSCK0_C, SEL_HSCIF0_2),
@@ -995,61 +995,61 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP4_9_8, SDA1_B, SEL_IIC1_1),
PINMUX_IPSR_MSEL(IP4_9_8, SDA8_B, SEL_IIC8_1),
PINMUX_IPSR_MSEL(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
- PINMUX_IPSR_DATA(IP4_12_10, SSI_SCK2),
+ PINMUX_IPSR_GPSR(IP4_12_10, SSI_SCK2),
PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP4_15_13, SSI_WS2),
+ PINMUX_IPSR_GPSR(IP4_15_13, SSI_WS2),
PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP4_18_16, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP4_18_16, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP4_18_16, GPS_MAG_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_18_16, TX2_E, SEL_SCIF2_4),
- PINMUX_IPSR_DATA(IP4_19, SSI_SCK34),
- PINMUX_IPSR_DATA(IP4_20, SSI_WS34),
- PINMUX_IPSR_DATA(IP4_21, SSI_SDATA3),
- PINMUX_IPSR_DATA(IP4_23_22, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP4_19, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP4_20, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP4_21, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP4_23_22, SSI_SCK4),
PINMUX_IPSR_MSEL(IP4_23_22, GLO_SS_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP4_25_24, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP4_25_24, SSI_WS4),
PINMUX_IPSR_MSEL(IP4_25_24, GLO_RFON_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP4_27_26, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP4_27_26, SSI_SDATA4),
PINMUX_IPSR_MSEL(IP4_27_26, MSIOF2_SCK_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP4_30_28, SSI_SCK5),
+ PINMUX_IPSR_GPSR(IP4_30_28, SSI_SCK5),
PINMUX_IPSR_MSEL(IP4_30_28, MSIOF1_SCK_C, SEL_SOF1_2),
PINMUX_IPSR_MSEL(IP4_30_28, TS_SDATA0, SEL_TSIF0_0),
PINMUX_IPSR_MSEL(IP4_30_28, GLO_I0, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP4_30_28, MSIOF2_SYNC_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP4_30_28, VI1_R2_B),
+ PINMUX_IPSR_GPSR(IP4_30_28, VI1_R2_B),
/* IPSR5 */
- PINMUX_IPSR_DATA(IP5_2_0, SSI_WS5),
+ PINMUX_IPSR_GPSR(IP5_2_0, SSI_WS5),
PINMUX_IPSR_MSEL(IP5_2_0, MSIOF1_SYNC_C, SEL_SOF1_2),
PINMUX_IPSR_MSEL(IP5_2_0, TS_SCK0, SEL_TSIF0_0),
PINMUX_IPSR_MSEL(IP5_2_0, GLO_I1, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP5_2_0, MSIOF2_TXD_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP5_2_0, VI1_R3_B),
- PINMUX_IPSR_DATA(IP5_5_3, SSI_SDATA5),
+ PINMUX_IPSR_GPSR(IP5_2_0, VI1_R3_B),
+ PINMUX_IPSR_GPSR(IP5_5_3, SSI_SDATA5),
PINMUX_IPSR_MSEL(IP5_5_3, MSIOF1_TXD_C, SEL_SOF1_2),
PINMUX_IPSR_MSEL(IP5_5_3, TS_SDEN0, SEL_TSIF0_0),
PINMUX_IPSR_MSEL(IP5_5_3, GLO_Q0, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP5_5_3, MSIOF2_SS1_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP5_5_3, VI1_R4_B),
- PINMUX_IPSR_DATA(IP5_8_6, SSI_SCK6),
+ PINMUX_IPSR_GPSR(IP5_5_3, VI1_R4_B),
+ PINMUX_IPSR_GPSR(IP5_8_6, SSI_SCK6),
PINMUX_IPSR_MSEL(IP5_8_6, MSIOF1_RXD_C, SEL_SOF1_2),
PINMUX_IPSR_MSEL(IP5_8_6, TS_SPSYNC0, SEL_TSIF0_0),
PINMUX_IPSR_MSEL(IP5_8_6, GLO_Q1, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP5_8_6, MSIOF2_RXD_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP5_8_6, VI1_R5_B),
- PINMUX_IPSR_DATA(IP5_11_9, SSI_WS6),
+ PINMUX_IPSR_GPSR(IP5_8_6, VI1_R5_B),
+ PINMUX_IPSR_GPSR(IP5_11_9, SSI_WS6),
PINMUX_IPSR_MSEL(IP5_11_9, GLO_SCLK, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP5_11_9, MSIOF2_SS2_D, SEL_SOF2_3),
- PINMUX_IPSR_DATA(IP5_11_9, VI1_R6_B),
- PINMUX_IPSR_DATA(IP5_14_12, SSI_SDATA6),
+ PINMUX_IPSR_GPSR(IP5_11_9, VI1_R6_B),
+ PINMUX_IPSR_GPSR(IP5_14_12, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP5_14_12, STP_IVCXO27_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP5_14_12, GLO_SDATA, SEL_GPS_0),
- PINMUX_IPSR_DATA(IP5_14_12, VI1_R7_B),
+ PINMUX_IPSR_GPSR(IP5_14_12, VI1_R7_B),
PINMUX_IPSR_MSEL(IP5_16_15, SSI_SCK78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP5_16_15, STP_ISCLK_0_B, SEL_SSP_1),
PINMUX_IPSR_MSEL(IP5_16_15, GLO_SS, SEL_GPS_0),
@@ -1080,307 +1080,307 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_2_0, MSIOF1_SCK_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK, SEL_SCIF_0),
PINMUX_IPSR_MSEL(IP6_2_0, BPFCLK_E, SEL_FM_4),
- PINMUX_IPSR_DATA(IP6_5_3, AUDIO_CLKC),
+ PINMUX_IPSR_GPSR(IP6_5_3, AUDIO_CLKC),
PINMUX_IPSR_MSEL(IP6_5_3, SCIFB0_SCK_C, SEL_SCIFB_2),
PINMUX_IPSR_MSEL(IP6_5_3, MSIOF1_SYNC_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_5_3, RX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
- PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
- PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
+ PINMUX_IPSR_GPSR(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
- PINMUX_IPSR_DATA(IP6_9_8, INTC_IRQ0_N),
- PINMUX_IPSR_DATA(IP6_11_10, IRQ1),
+ PINMUX_IPSR_GPSR(IP6_9_8, INTC_IRQ0_N),
+ PINMUX_IPSR_GPSR(IP6_11_10, IRQ1),
PINMUX_IPSR_MSEL(IP6_11_10, SCIFB1_SCK_C, SEL_SCIFB1_2),
- PINMUX_IPSR_DATA(IP6_11_10, INTC_IRQ1_N),
- PINMUX_IPSR_DATA(IP6_13_12, IRQ2),
+ PINMUX_IPSR_GPSR(IP6_11_10, INTC_IRQ1_N),
+ PINMUX_IPSR_GPSR(IP6_13_12, IRQ2),
PINMUX_IPSR_MSEL(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
- PINMUX_IPSR_DATA(IP6_13_12, INTC_IRQ2_N),
- PINMUX_IPSR_DATA(IP6_15_14, IRQ3),
+ PINMUX_IPSR_GPSR(IP6_13_12, INTC_IRQ2_N),
+ PINMUX_IPSR_GPSR(IP6_15_14, IRQ3),
PINMUX_IPSR_MSEL(IP6_15_14, SCL4_C, SEL_IIC4_2),
PINMUX_IPSR_MSEL(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
- PINMUX_IPSR_DATA(IP6_15_14, INTC_IRQ4_N),
- PINMUX_IPSR_DATA(IP6_18_16, IRQ4),
+ PINMUX_IPSR_GPSR(IP6_15_14, INTC_IRQ4_N),
+ PINMUX_IPSR_GPSR(IP6_18_16, IRQ4),
PINMUX_IPSR_MSEL(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_18_16, SDA4_C, SEL_IIC4_2),
PINMUX_IPSR_MSEL(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
- PINMUX_IPSR_DATA(IP6_18_16, INTC_IRQ4_N),
- PINMUX_IPSR_DATA(IP6_20_19, IRQ5),
+ PINMUX_IPSR_GPSR(IP6_18_16, INTC_IRQ4_N),
+ PINMUX_IPSR_GPSR(IP6_20_19, IRQ5),
PINMUX_IPSR_MSEL(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_20_19, SCL1_E, SEL_IIC1_4),
PINMUX_IPSR_MSEL(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
- PINMUX_IPSR_DATA(IP6_23_21, IRQ6),
+ PINMUX_IPSR_GPSR(IP6_23_21, IRQ6),
PINMUX_IPSR_MSEL(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_23_21, SDA1_E, SEL_IIC1_4),
PINMUX_IPSR_MSEL(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
- PINMUX_IPSR_DATA(IP6_26_24, IRQ7),
+ PINMUX_IPSR_GPSR(IP6_26_24, IRQ7),
PINMUX_IPSR_MSEL(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_26_24, MSIOF1_TXD_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_26_24, GPS_CLK_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP6_29_27, IRQ8),
+ PINMUX_IPSR_GPSR(IP6_29_27, IRQ8),
PINMUX_IPSR_MSEL(IP6_29_27, HRTS1_N_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_29_27, MSIOF1_RXD_B, SEL_SOF1_1),
PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP6_29_27, GPS_SIGN_D, SEL_GPS_3),
/* IPSR7 */
- PINMUX_IPSR_DATA(IP7_2_0, IRQ9),
+ PINMUX_IPSR_GPSR(IP7_2_0, IRQ9),
PINMUX_IPSR_MSEL(IP7_2_0, DU1_DOTCLKIN_B, SEL_DIS_1),
PINMUX_IPSR_MSEL(IP7_2_0, CAN_CLK_D, SEL_CANCLK_3),
PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP7_2_0, SCIF_CLK_B, SEL_SCIF_1),
PINMUX_IPSR_MSEL(IP7_2_0, GPS_MAG_D, SEL_GPS_3),
- PINMUX_IPSR_DATA(IP7_5_3, DU1_DR0),
- PINMUX_IPSR_DATA(IP7_5_3, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP7_5_3, DU1_DR0),
+ PINMUX_IPSR_GPSR(IP7_5_3, LCDOUT0),
PINMUX_IPSR_MSEL(IP7_5_3, VI1_DATA0_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP7_5_3, TX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP7_5_3, SCIFA0_TXD_B, SEL_SCFA_1),
PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP7_8_6, DU1_DR1),
- PINMUX_IPSR_DATA(IP7_8_6, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP7_8_6, DU1_DR1),
+ PINMUX_IPSR_GPSR(IP7_8_6, LCDOUT1),
PINMUX_IPSR_MSEL(IP7_8_6, VI1_DATA1_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP7_8_6, RX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP7_8_6, SCIFA0_RXD_B, SEL_SCFA_1),
PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP7_10_9, DU1_DR2),
- PINMUX_IPSR_DATA(IP7_10_9, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP7_10_9, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP7_10_9, LCDOUT2),
PINMUX_IPSR_MSEL(IP7_10_9, SSI_SCK0129_B, SEL_SSI0_1),
- PINMUX_IPSR_DATA(IP7_12_11, DU1_DR3),
- PINMUX_IPSR_DATA(IP7_12_11, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP7_12_11, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP7_12_11, LCDOUT3),
PINMUX_IPSR_MSEL(IP7_12_11, SSI_WS0129_B, SEL_SSI0_1),
- PINMUX_IPSR_DATA(IP7_14_13, DU1_DR4),
- PINMUX_IPSR_DATA(IP7_14_13, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP7_14_13, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP7_14_13, LCDOUT4),
PINMUX_IPSR_MSEL(IP7_14_13, SSI_SDATA0_B, SEL_SSI0_1),
- PINMUX_IPSR_DATA(IP7_16_15, DU1_DR5),
- PINMUX_IPSR_DATA(IP7_16_15, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP7_16_15, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP7_16_15, LCDOUT5),
PINMUX_IPSR_MSEL(IP7_16_15, SSI_SCK1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP7_18_17, DU1_DR6),
- PINMUX_IPSR_DATA(IP7_18_17, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP7_18_17, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP7_18_17, LCDOUT6),
PINMUX_IPSR_MSEL(IP7_18_17, SSI_WS1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP7_20_19, DU1_DR7),
- PINMUX_IPSR_DATA(IP7_20_19, LCDOUT7),
+ PINMUX_IPSR_GPSR(IP7_20_19, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP7_20_19, LCDOUT7),
PINMUX_IPSR_MSEL(IP7_20_19, SSI_SDATA1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP7_23_21, DU1_DG0),
- PINMUX_IPSR_DATA(IP7_23_21, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP7_23_21, DU1_DG0),
+ PINMUX_IPSR_GPSR(IP7_23_21, LCDOUT8),
PINMUX_IPSR_MSEL(IP7_23_21, VI1_DATA2_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP7_23_21, TX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP7_23_21, SCIFA1_TXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP7_23_21, MSIOF2_SS1_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP7_26_24, DU1_DG1),
- PINMUX_IPSR_DATA(IP7_26_24, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP7_26_24, DU1_DG1),
+ PINMUX_IPSR_GPSR(IP7_26_24, LCDOUT9),
PINMUX_IPSR_MSEL(IP7_26_24, VI1_DATA3_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP7_26_24, RX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP7_26_24, SCIFA1_RXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP7_26_24, MSIOF2_SS2_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP7_29_27, DU1_DG2),
- PINMUX_IPSR_DATA(IP7_29_27, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP7_29_27, DU1_DG2),
+ PINMUX_IPSR_GPSR(IP7_29_27, LCDOUT10),
PINMUX_IPSR_MSEL(IP7_29_27, VI1_DATA4_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP7_29_27, SCIF1_SCK_B),
+ PINMUX_IPSR_GPSR(IP7_29_27, SCIF1_SCK_B),
PINMUX_IPSR_MSEL(IP7_29_27, SCIFA1_SCK, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP7_29_27, SSI_SCK78_B, SEL_SSI7_1),
/* IPSR8 */
- PINMUX_IPSR_DATA(IP8_2_0, DU1_DG3),
- PINMUX_IPSR_DATA(IP8_2_0, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP8_2_0, DU1_DG3),
+ PINMUX_IPSR_GPSR(IP8_2_0, LCDOUT11),
PINMUX_IPSR_MSEL(IP8_2_0, VI1_DATA5_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_2_0, SSI_WS78_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP8_5_3, DU1_DG4),
- PINMUX_IPSR_DATA(IP8_5_3, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP8_5_3, DU1_DG4),
+ PINMUX_IPSR_GPSR(IP8_5_3, LCDOUT12),
PINMUX_IPSR_MSEL(IP8_5_3, VI1_DATA6_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_5_3, HRX0_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP8_5_3, SCIFB2_RXD_B, SEL_SCIFB2_1),
PINMUX_IPSR_MSEL(IP8_5_3, SSI_SDATA7_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP8_8_6, DU1_DG5),
- PINMUX_IPSR_DATA(IP8_8_6, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP8_8_6, DU1_DG5),
+ PINMUX_IPSR_GPSR(IP8_8_6, LCDOUT13),
PINMUX_IPSR_MSEL(IP8_8_6, VI1_DATA7_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_8_6, HCTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP8_8_6, SCIFB2_TXD_B, SEL_SCIFB2_1),
PINMUX_IPSR_MSEL(IP8_8_6, SSI_SDATA8_B, SEL_SSI8_1),
- PINMUX_IPSR_DATA(IP8_11_9, DU1_DG6),
- PINMUX_IPSR_DATA(IP8_11_9, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP8_11_9, DU1_DG6),
+ PINMUX_IPSR_GPSR(IP8_11_9, LCDOUT14),
PINMUX_IPSR_MSEL(IP8_11_9, HRTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP8_11_9, SCIFB2_CTS_N_B, SEL_SCIFB2_1),
PINMUX_IPSR_MSEL(IP8_11_9, SSI_SCK9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP8_14_12, DU1_DG7),
- PINMUX_IPSR_DATA(IP8_14_12, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP8_14_12, DU1_DG7),
+ PINMUX_IPSR_GPSR(IP8_14_12, LCDOUT15),
PINMUX_IPSR_MSEL(IP8_14_12, HTX0_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP8_14_12, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
PINMUX_IPSR_MSEL(IP8_14_12, SSI_WS9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP8_17_15, DU1_DB0),
- PINMUX_IPSR_DATA(IP8_17_15, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP8_17_15, DU1_DB0),
+ PINMUX_IPSR_GPSR(IP8_17_15, LCDOUT16),
PINMUX_IPSR_MSEL(IP8_17_15, VI1_CLK_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_17_15, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP8_17_15, SCIFA2_TXD_B, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP8_17_15, MSIOF2_TXD_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP8_20_18, DU1_DB1),
- PINMUX_IPSR_DATA(IP8_20_18, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP8_20_18, DU1_DB1),
+ PINMUX_IPSR_GPSR(IP8_20_18, LCDOUT17),
PINMUX_IPSR_MSEL(IP8_20_18, VI1_HSYNC_N_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_20_18, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP8_20_18, SCIFA2_RXD_B, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP8_20_18, MSIOF2_RXD_B, SEL_SOF2_1),
- PINMUX_IPSR_DATA(IP8_23_21, DU1_DB2),
- PINMUX_IPSR_DATA(IP8_23_21, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP8_23_21, DU1_DB2),
+ PINMUX_IPSR_GPSR(IP8_23_21, LCDOUT18),
PINMUX_IPSR_MSEL(IP8_23_21, VI1_VSYNC_N_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP8_23_21, SCIF2_SCK_B),
+ PINMUX_IPSR_GPSR(IP8_23_21, SCIF2_SCK_B),
PINMUX_IPSR_MSEL(IP8_23_21, SCIFA2_SCK, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP8_23_21, SSI_SDATA9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP8_25_24, DU1_DB3),
- PINMUX_IPSR_DATA(IP8_25_24, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP8_25_24, DU1_DB3),
+ PINMUX_IPSR_GPSR(IP8_25_24, LCDOUT19),
PINMUX_IPSR_MSEL(IP8_25_24, VI1_CLKENB_B, SEL_VI1_1),
- PINMUX_IPSR_DATA(IP8_27_26, DU1_DB4),
- PINMUX_IPSR_DATA(IP8_27_26, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP8_27_26, DU1_DB4),
+ PINMUX_IPSR_GPSR(IP8_27_26, LCDOUT20),
PINMUX_IPSR_MSEL(IP8_27_26, VI1_FIELD_B, SEL_VI1_1),
PINMUX_IPSR_MSEL(IP8_27_26, CAN1_RX, SEL_CAN1_0),
- PINMUX_IPSR_DATA(IP8_30_28, DU1_DB5),
- PINMUX_IPSR_DATA(IP8_30_28, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP8_30_28, DU1_DB5),
+ PINMUX_IPSR_GPSR(IP8_30_28, LCDOUT21),
PINMUX_IPSR_MSEL(IP8_30_28, TX3, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP8_30_28, SCIFA3_TXD, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP8_30_28, CAN1_TX, SEL_CAN1_0),
/* IPSR9 */
- PINMUX_IPSR_DATA(IP9_2_0, DU1_DB6),
- PINMUX_IPSR_DATA(IP9_2_0, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP9_2_0, DU1_DB6),
+ PINMUX_IPSR_GPSR(IP9_2_0, LCDOUT22),
PINMUX_IPSR_MSEL(IP9_2_0, SCL3_C, SEL_IIC3_2),
PINMUX_IPSR_MSEL(IP9_2_0, RX3, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
- PINMUX_IPSR_DATA(IP9_5_3, DU1_DB7),
- PINMUX_IPSR_DATA(IP9_5_3, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP9_5_3, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP9_5_3, LCDOUT23),
PINMUX_IPSR_MSEL(IP9_5_3, SDA3_C, SEL_IIC3_2),
PINMUX_IPSR_MSEL(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
- PINMUX_IPSR_DATA(IP9_6, QSTVA_QVS),
- PINMUX_IPSR_DATA(IP9_7, DU1_DOTCLKOUT0),
- PINMUX_IPSR_DATA(IP9_7, QCLK),
- PINMUX_IPSR_DATA(IP9_10_8, DU1_DOTCLKOUT1),
- PINMUX_IPSR_DATA(IP9_10_8, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP9_6, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP9_7, DU1_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP9_7, QCLK),
+ PINMUX_IPSR_GPSR(IP9_10_8, DU1_DOTCLKOUT1),
+ PINMUX_IPSR_GPSR(IP9_10_8, QSTVB_QVE),
PINMUX_IPSR_MSEL(IP9_10_8, CAN0_TX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP9_10_8, TX3_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP9_10_8, SCL2_B, SEL_IIC2_1),
- PINMUX_IPSR_DATA(IP9_10_8, PWM4),
- PINMUX_IPSR_DATA(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
- PINMUX_IPSR_DATA(IP9_11, QSTH_QHS),
- PINMUX_IPSR_DATA(IP9_12, DU1_EXVSYNC_DU1_VSYNC),
- PINMUX_IPSR_DATA(IP9_12, QSTB_QHE),
- PINMUX_IPSR_DATA(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP9_15_13, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP9_10_8, PWM4),
+ PINMUX_IPSR_GPSR(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_GPSR(IP9_11, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP9_12, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_GPSR(IP9_12, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP9_15_13, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP9_15_13, QCPV_QDE),
PINMUX_IPSR_MSEL(IP9_15_13, CAN0_RX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP9_15_13, RX3_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP9_15_13, SDA2_B, SEL_IIC2_1),
- PINMUX_IPSR_DATA(IP9_16, DU1_DISP),
- PINMUX_IPSR_DATA(IP9_16, QPOLA),
- PINMUX_IPSR_DATA(IP9_18_17, DU1_CDE),
- PINMUX_IPSR_DATA(IP9_18_17, QPOLB),
- PINMUX_IPSR_DATA(IP9_18_17, PWM4_B),
- PINMUX_IPSR_DATA(IP9_20_19, VI0_CLKENB),
+ PINMUX_IPSR_GPSR(IP9_16, DU1_DISP),
+ PINMUX_IPSR_GPSR(IP9_16, QPOLA),
+ PINMUX_IPSR_GPSR(IP9_18_17, DU1_CDE),
+ PINMUX_IPSR_GPSR(IP9_18_17, QPOLB),
+ PINMUX_IPSR_GPSR(IP9_18_17, PWM4_B),
+ PINMUX_IPSR_GPSR(IP9_20_19, VI0_CLKENB),
PINMUX_IPSR_MSEL(IP9_20_19, TX4, SEL_SCIF4_0),
PINMUX_IPSR_MSEL(IP9_20_19, SCIFA4_TXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_20_19, TS_SDATA0_D, SEL_TSIF0_3),
- PINMUX_IPSR_DATA(IP9_22_21, VI0_FIELD),
+ PINMUX_IPSR_GPSR(IP9_22_21, VI0_FIELD),
PINMUX_IPSR_MSEL(IP9_22_21, RX4, SEL_SCIF4_0),
PINMUX_IPSR_MSEL(IP9_22_21, SCIFA4_RXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_22_21, TS_SCK0_D, SEL_TSIF0_3),
- PINMUX_IPSR_DATA(IP9_24_23, VI0_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP9_24_23, VI0_HSYNC_N),
PINMUX_IPSR_MSEL(IP9_24_23, TX5, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP9_24_23, SCIFA5_TXD, SEL_SCIFA5_0),
PINMUX_IPSR_MSEL(IP9_24_23, TS_SDEN0_D, SEL_TSIF0_3),
- PINMUX_IPSR_DATA(IP9_26_25, VI0_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP9_26_25, VI0_VSYNC_N),
PINMUX_IPSR_MSEL(IP9_26_25, RX5, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP9_26_25, SCIFA5_RXD, SEL_SCIFA5_0),
PINMUX_IPSR_MSEL(IP9_26_25, TS_SPSYNC0_D, SEL_TSIF0_3),
- PINMUX_IPSR_DATA(IP9_28_27, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_GPSR(IP9_28_27, VI0_DATA3_VI0_B3),
PINMUX_IPSR_MSEL(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
- PINMUX_IPSR_DATA(IP9_31_29, VI0_G0),
+ PINMUX_IPSR_GPSR(IP9_31_29, VI0_G0),
PINMUX_IPSR_MSEL(IP9_31_29, SCL8, SEL_IIC8_0),
PINMUX_IPSR_MSEL(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP9_31_29, SCL4, SEL_IIC4_0),
PINMUX_IPSR_MSEL(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP9_31_29, ATAWR1_N),
+ PINMUX_IPSR_GPSR(IP9_31_29, ATAWR1_N),
/* IPSR10 */
- PINMUX_IPSR_DATA(IP10_2_0, VI0_G1),
+ PINMUX_IPSR_GPSR(IP10_2_0, VI0_G1),
PINMUX_IPSR_MSEL(IP10_2_0, SDA8, SEL_IIC8_0),
PINMUX_IPSR_MSEL(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP10_2_0, SDA4, SEL_IIC4_0),
PINMUX_IPSR_MSEL(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP10_2_0, ATADIR1_N),
- PINMUX_IPSR_DATA(IP10_5_3, VI0_G2),
- PINMUX_IPSR_DATA(IP10_5_3, VI2_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP10_2_0, ATADIR1_N),
+ PINMUX_IPSR_GPSR(IP10_5_3, VI0_G2),
+ PINMUX_IPSR_GPSR(IP10_5_3, VI2_HSYNC_N),
PINMUX_IPSR_MSEL(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP10_5_3, SCL3_B, SEL_IIC3_1),
PINMUX_IPSR_MSEL(IP10_5_3, HSCK2, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP10_5_3, ATARD1_N),
- PINMUX_IPSR_DATA(IP10_8_6, VI0_G3),
- PINMUX_IPSR_DATA(IP10_8_6, VI2_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP10_5_3, ATARD1_N),
+ PINMUX_IPSR_GPSR(IP10_8_6, VI0_G3),
+ PINMUX_IPSR_GPSR(IP10_8_6, VI2_VSYNC_N),
PINMUX_IPSR_MSEL(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP10_8_6, SDA3_B, SEL_IIC3_1),
PINMUX_IPSR_MSEL(IP10_8_6, HRX2, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
- PINMUX_IPSR_DATA(IP10_8_6, ATACS01_N),
- PINMUX_IPSR_DATA(IP10_11_9, VI0_G4),
- PINMUX_IPSR_DATA(IP10_11_9, VI2_CLKENB),
+ PINMUX_IPSR_GPSR(IP10_8_6, ATACS01_N),
+ PINMUX_IPSR_GPSR(IP10_11_9, VI0_G4),
+ PINMUX_IPSR_GPSR(IP10_11_9, VI2_CLKENB),
PINMUX_IPSR_MSEL(IP10_11_9, STP_ISSYNC_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP10_11_9, HTX2, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_11_9, SCIFB2_TXD, SEL_SCIFB2_0),
PINMUX_IPSR_MSEL(IP10_11_9, SCIFB0_SCK_D, SEL_SCIFB_3),
- PINMUX_IPSR_DATA(IP10_14_12, VI0_G5),
- PINMUX_IPSR_DATA(IP10_14_12, VI2_FIELD),
+ PINMUX_IPSR_GPSR(IP10_14_12, VI0_G5),
+ PINMUX_IPSR_GPSR(IP10_14_12, VI2_FIELD),
PINMUX_IPSR_MSEL(IP10_14_12, STP_OPWM_0_C, SEL_SSP_2),
PINMUX_IPSR_MSEL(IP10_14_12, FMCLK_D, SEL_FM_3),
PINMUX_IPSR_MSEL(IP10_14_12, CAN0_TX_E, SEL_CAN0_4),
PINMUX_IPSR_MSEL(IP10_14_12, HTX1_D, SEL_HSCIF1_3),
PINMUX_IPSR_MSEL(IP10_14_12, SCIFB0_TXD_D, SEL_SCIFB_3),
- PINMUX_IPSR_DATA(IP10_16_15, VI0_G6),
- PINMUX_IPSR_DATA(IP10_16_15, VI2_CLK),
+ PINMUX_IPSR_GPSR(IP10_16_15, VI0_G6),
+ PINMUX_IPSR_GPSR(IP10_16_15, VI2_CLK),
PINMUX_IPSR_MSEL(IP10_16_15, BPFCLK_D, SEL_FM_3),
- PINMUX_IPSR_DATA(IP10_18_17, VI0_G7),
- PINMUX_IPSR_DATA(IP10_18_17, VI2_DATA0),
+ PINMUX_IPSR_GPSR(IP10_18_17, VI0_G7),
+ PINMUX_IPSR_GPSR(IP10_18_17, VI2_DATA0),
PINMUX_IPSR_MSEL(IP10_18_17, FMIN_D, SEL_FM_3),
- PINMUX_IPSR_DATA(IP10_21_19, VI0_R0),
- PINMUX_IPSR_DATA(IP10_21_19, VI2_DATA1),
+ PINMUX_IPSR_GPSR(IP10_21_19, VI0_R0),
+ PINMUX_IPSR_GPSR(IP10_21_19, VI2_DATA1),
PINMUX_IPSR_MSEL(IP10_21_19, GLO_I0_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_21_19, TS_SDATA0_C, SEL_TSIF0_2),
- PINMUX_IPSR_DATA(IP10_21_19, ATACS11_N),
- PINMUX_IPSR_DATA(IP10_24_22, VI0_R1),
- PINMUX_IPSR_DATA(IP10_24_22, VI2_DATA2),
+ PINMUX_IPSR_GPSR(IP10_21_19, ATACS11_N),
+ PINMUX_IPSR_GPSR(IP10_24_22, VI0_R1),
+ PINMUX_IPSR_GPSR(IP10_24_22, VI2_DATA2),
PINMUX_IPSR_MSEL(IP10_24_22, GLO_I1_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_24_22, TS_SCK0_C, SEL_TSIF0_2),
- PINMUX_IPSR_DATA(IP10_24_22, ATAG1_N),
- PINMUX_IPSR_DATA(IP10_26_25, VI0_R2),
- PINMUX_IPSR_DATA(IP10_26_25, VI2_DATA3),
+ PINMUX_IPSR_GPSR(IP10_24_22, ATAG1_N),
+ PINMUX_IPSR_GPSR(IP10_26_25, VI0_R2),
+ PINMUX_IPSR_GPSR(IP10_26_25, VI2_DATA3),
PINMUX_IPSR_MSEL(IP10_26_25, GLO_Q0_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_26_25, TS_SDEN0_C, SEL_TSIF0_2),
- PINMUX_IPSR_DATA(IP10_28_27, VI0_R3),
- PINMUX_IPSR_DATA(IP10_28_27, VI2_DATA4),
+ PINMUX_IPSR_GPSR(IP10_28_27, VI0_R3),
+ PINMUX_IPSR_GPSR(IP10_28_27, VI2_DATA4),
PINMUX_IPSR_MSEL(IP10_28_27, GLO_Q1_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_28_27, TS_SPSYNC0_C, SEL_TSIF0_2),
- PINMUX_IPSR_DATA(IP10_31_29, VI0_R4),
- PINMUX_IPSR_DATA(IP10_31_29, VI2_DATA5),
+ PINMUX_IPSR_GPSR(IP10_31_29, VI0_R4),
+ PINMUX_IPSR_GPSR(IP10_31_29, VI2_DATA5),
PINMUX_IPSR_MSEL(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_31_29, TX0_C, SEL_SCIF0_2),
PINMUX_IPSR_MSEL(IP10_31_29, SCL1_D, SEL_IIC1_3),
/* IPSR11 */
- PINMUX_IPSR_DATA(IP11_2_0, VI0_R5),
- PINMUX_IPSR_DATA(IP11_2_0, VI2_DATA6),
+ PINMUX_IPSR_GPSR(IP11_2_0, VI0_R5),
+ PINMUX_IPSR_GPSR(IP11_2_0, VI2_DATA6),
PINMUX_IPSR_MSEL(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_2_0, RX0_C, SEL_SCIF0_2),
PINMUX_IPSR_MSEL(IP11_2_0, SDA1_D, SEL_IIC1_3),
- PINMUX_IPSR_DATA(IP11_5_3, VI0_R6),
- PINMUX_IPSR_DATA(IP11_5_3, VI2_DATA7),
+ PINMUX_IPSR_GPSR(IP11_5_3, VI0_R6),
+ PINMUX_IPSR_GPSR(IP11_5_3, VI2_DATA7),
PINMUX_IPSR_MSEL(IP11_5_3, GLO_SS_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_5_3, TX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP11_5_3, SCL4_B, SEL_IIC4_1),
- PINMUX_IPSR_DATA(IP11_8_6, VI0_R7),
+ PINMUX_IPSR_GPSR(IP11_8_6, VI0_R7),
PINMUX_IPSR_MSEL(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_8_6, RX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
@@ -1388,180 +1388,180 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
PINMUX_IPSR_MSEL(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
PINMUX_IPSR_MSEL(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_11_9, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP11_11_9, AVB_RXD0),
PINMUX_IPSR_MSEL(IP11_11_9, TS_SDATA0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP11_11_9, TX4_B, SEL_SCIF4_1),
PINMUX_IPSR_MSEL(IP11_11_9, SCIFA4_TXD_B, SEL_SCIFA4_1),
PINMUX_IPSR_MSEL(IP11_14_12, VI1_VSYNC_N, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_14_12, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP11_14_12, AVB_RXD1),
PINMUX_IPSR_MSEL(IP11_14_12, TS_SCK0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP11_14_12, RX4_B, SEL_SCIF4_1),
PINMUX_IPSR_MSEL(IP11_14_12, SCIFA4_RXD_B, SEL_SCIFA4_1),
PINMUX_IPSR_MSEL(IP11_16_15, VI1_CLKENB, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_16_15, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP11_16_15, AVB_RXD2),
PINMUX_IPSR_MSEL(IP11_16_15, TS_SDEN0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP11_18_17, VI1_FIELD, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_18_17, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP11_18_17, AVB_RXD3),
PINMUX_IPSR_MSEL(IP11_18_17, TS_SPSYNC0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP11_19, VI1_CLK, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_19, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP11_19, AVB_RXD4),
PINMUX_IPSR_MSEL(IP11_20, VI1_DATA0, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_20, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP11_20, AVB_RXD5),
PINMUX_IPSR_MSEL(IP11_21, VI1_DATA1, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_21, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP11_21, AVB_RXD6),
PINMUX_IPSR_MSEL(IP11_22, VI1_DATA2, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_22, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP11_22, AVB_RXD7),
PINMUX_IPSR_MSEL(IP11_23, VI1_DATA3, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_23, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP11_23, AVB_RX_ER),
PINMUX_IPSR_MSEL(IP11_24, VI1_DATA4, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_24, AVB_MDIO),
+ PINMUX_IPSR_GPSR(IP11_24, AVB_MDIO),
PINMUX_IPSR_MSEL(IP11_25, VI1_DATA5, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_25, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP11_25, AVB_RX_DV),
PINMUX_IPSR_MSEL(IP11_26, VI1_DATA6, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_26, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP11_26, AVB_MAGIC),
PINMUX_IPSR_MSEL(IP11_27, VI1_DATA7, SEL_VI1_0),
- PINMUX_IPSR_DATA(IP11_27, AVB_MDC),
- PINMUX_IPSR_DATA(IP11_29_28, ETH_MDIO),
- PINMUX_IPSR_DATA(IP11_29_28, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP11_27, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP11_29_28, ETH_MDIO),
+ PINMUX_IPSR_GPSR(IP11_29_28, AVB_RX_CLK),
PINMUX_IPSR_MSEL(IP11_29_28, SCL2_C, SEL_IIC2_2),
- PINMUX_IPSR_DATA(IP11_31_30, ETH_CRS_DV),
- PINMUX_IPSR_DATA(IP11_31_30, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP11_31_30, ETH_CRS_DV),
+ PINMUX_IPSR_GPSR(IP11_31_30, AVB_LINK),
PINMUX_IPSR_MSEL(IP11_31_30, SDA2_C, SEL_IIC2_2),
/* IPSR12 */
- PINMUX_IPSR_DATA(IP12_1_0, ETH_RX_ER),
- PINMUX_IPSR_DATA(IP12_1_0, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP12_1_0, ETH_RX_ER),
+ PINMUX_IPSR_GPSR(IP12_1_0, AVB_CRS),
PINMUX_IPSR_MSEL(IP12_1_0, SCL3, SEL_IIC3_0),
PINMUX_IPSR_MSEL(IP12_1_0, SCL7, SEL_IIC7_0),
- PINMUX_IPSR_DATA(IP12_3_2, ETH_RXD0),
- PINMUX_IPSR_DATA(IP12_3_2, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP12_3_2, ETH_RXD0),
+ PINMUX_IPSR_GPSR(IP12_3_2, AVB_PHY_INT),
PINMUX_IPSR_MSEL(IP12_3_2, SDA3, SEL_IIC3_0),
PINMUX_IPSR_MSEL(IP12_3_2, SDA7, SEL_IIC7_0),
- PINMUX_IPSR_DATA(IP12_6_4, ETH_RXD1),
- PINMUX_IPSR_DATA(IP12_6_4, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP12_6_4, ETH_RXD1),
+ PINMUX_IPSR_GPSR(IP12_6_4, AVB_GTXREFCLK),
PINMUX_IPSR_MSEL(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
PINMUX_IPSR_MSEL(IP12_6_4, SCL2_D, SEL_IIC2_3),
PINMUX_IPSR_MSEL(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
- PINMUX_IPSR_DATA(IP12_9_7, ETH_LINK),
- PINMUX_IPSR_DATA(IP12_9_7, AVB_TXD0),
+ PINMUX_IPSR_GPSR(IP12_9_7, ETH_LINK),
+ PINMUX_IPSR_GPSR(IP12_9_7, AVB_TXD0),
PINMUX_IPSR_MSEL(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
PINMUX_IPSR_MSEL(IP12_9_7, SDA2_D, SEL_IIC2_3),
PINMUX_IPSR_MSEL(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
- PINMUX_IPSR_DATA(IP12_12_10, ETH_REFCLK),
- PINMUX_IPSR_DATA(IP12_12_10, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP12_12_10, ETH_REFCLK),
+ PINMUX_IPSR_GPSR(IP12_12_10, AVB_TXD1),
PINMUX_IPSR_MSEL(IP12_12_10, SCIFA3_RXD_B, SEL_SCIFA3_1),
PINMUX_IPSR_MSEL(IP12_12_10, CAN1_RX_C, SEL_CAN1_2),
PINMUX_IPSR_MSEL(IP12_12_10, MSIOF1_SYNC_E, SEL_SOF1_4),
- PINMUX_IPSR_DATA(IP12_15_13, ETH_TXD1),
- PINMUX_IPSR_DATA(IP12_15_13, AVB_TXD2),
+ PINMUX_IPSR_GPSR(IP12_15_13, ETH_TXD1),
+ PINMUX_IPSR_GPSR(IP12_15_13, AVB_TXD2),
PINMUX_IPSR_MSEL(IP12_15_13, SCIFA3_TXD_B, SEL_SCIFA3_1),
PINMUX_IPSR_MSEL(IP12_15_13, CAN1_TX_C, SEL_CAN1_2),
PINMUX_IPSR_MSEL(IP12_15_13, MSIOF1_TXD_E, SEL_SOF1_4),
- PINMUX_IPSR_DATA(IP12_17_16, ETH_TX_EN),
- PINMUX_IPSR_DATA(IP12_17_16, AVB_TXD3),
+ PINMUX_IPSR_GPSR(IP12_17_16, ETH_TX_EN),
+ PINMUX_IPSR_GPSR(IP12_17_16, AVB_TXD3),
PINMUX_IPSR_MSEL(IP12_17_16, TCLK1_B, SEL_TMU1_0),
PINMUX_IPSR_MSEL(IP12_17_16, CAN_CLK_B, SEL_CANCLK_1),
- PINMUX_IPSR_DATA(IP12_19_18, ETH_MAGIC),
- PINMUX_IPSR_DATA(IP12_19_18, AVB_TXD4),
+ PINMUX_IPSR_GPSR(IP12_19_18, ETH_MAGIC),
+ PINMUX_IPSR_GPSR(IP12_19_18, AVB_TXD4),
PINMUX_IPSR_MSEL(IP12_19_18, IETX_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP12_21_20, ETH_TXD0),
- PINMUX_IPSR_DATA(IP12_21_20, AVB_TXD5),
+ PINMUX_IPSR_GPSR(IP12_21_20, ETH_TXD0),
+ PINMUX_IPSR_GPSR(IP12_21_20, AVB_TXD5),
PINMUX_IPSR_MSEL(IP12_21_20, IECLK_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP12_23_22, ETH_MDC),
- PINMUX_IPSR_DATA(IP12_23_22, AVB_TXD6),
+ PINMUX_IPSR_GPSR(IP12_23_22, ETH_MDC),
+ PINMUX_IPSR_GPSR(IP12_23_22, AVB_TXD6),
PINMUX_IPSR_MSEL(IP12_23_22, IERX_C, SEL_IEB_2),
PINMUX_IPSR_MSEL(IP12_26_24, STP_IVCXO27_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP12_26_24, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP12_26_24, AVB_TXD7),
PINMUX_IPSR_MSEL(IP12_26_24, SCIFB2_TXD_D, SEL_SCIFB2_3),
PINMUX_IPSR_MSEL(IP12_26_24, ADIDATA_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP12_26_24, MSIOF0_SYNC_C, SEL_SOF0_2),
PINMUX_IPSR_MSEL(IP12_29_27, STP_ISCLK_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP12_29_27, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP12_29_27, AVB_TX_EN),
PINMUX_IPSR_MSEL(IP12_29_27, SCIFB2_RXD_D, SEL_SCIFB2_3),
PINMUX_IPSR_MSEL(IP12_29_27, ADICS_SAMP_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP12_29_27, MSIOF0_SCK_C, SEL_SOF0_2),
/* IPSR13 */
PINMUX_IPSR_MSEL(IP13_2_0, STP_ISD_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP13_2_0, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP13_2_0, AVB_TX_ER),
PINMUX_IPSR_MSEL(IP13_2_0, SCIFB2_SCK_C, SEL_SCIFB2_2),
PINMUX_IPSR_MSEL(IP13_2_0, ADICLK_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP13_2_0, MSIOF0_SS1_C, SEL_SOF0_2),
PINMUX_IPSR_MSEL(IP13_4_3, STP_ISEN_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP13_4_3, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP13_4_3, AVB_TX_CLK),
PINMUX_IPSR_MSEL(IP13_4_3, ADICHS0_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP13_4_3, MSIOF0_SS2_C, SEL_SOF0_2),
PINMUX_IPSR_MSEL(IP13_6_5, STP_ISSYNC_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP13_6_5, AVB_COL),
+ PINMUX_IPSR_GPSR(IP13_6_5, AVB_COL),
PINMUX_IPSR_MSEL(IP13_6_5, ADICHS1_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP13_6_5, MSIOF0_RXD_C, SEL_SOF0_2),
PINMUX_IPSR_MSEL(IP13_9_7, STP_OPWM_0, SEL_SSP_0),
- PINMUX_IPSR_DATA(IP13_9_7, AVB_GTX_CLK),
- PINMUX_IPSR_DATA(IP13_9_7, PWM0_B),
+ PINMUX_IPSR_GPSR(IP13_9_7, AVB_GTX_CLK),
+ PINMUX_IPSR_GPSR(IP13_9_7, PWM0_B),
PINMUX_IPSR_MSEL(IP13_9_7, ADICHS2_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP13_9_7, MSIOF0_TXD_C, SEL_SOF0_2),
- PINMUX_IPSR_DATA(IP13_10, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP13_10, SD0_CLK),
PINMUX_IPSR_MSEL(IP13_10, SPCLK_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_11, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP13_11, SD0_CMD),
PINMUX_IPSR_MSEL(IP13_11, MOSI_IO0_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_12, SD0_DATA0),
+ PINMUX_IPSR_GPSR(IP13_12, SD0_DATA0),
PINMUX_IPSR_MSEL(IP13_12, MISO_IO1_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_13, SD0_DATA1),
+ PINMUX_IPSR_GPSR(IP13_13, SD0_DATA1),
PINMUX_IPSR_MSEL(IP13_13, IO2_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_14, SD0_DATA2),
+ PINMUX_IPSR_GPSR(IP13_14, SD0_DATA2),
PINMUX_IPSR_MSEL(IP13_14, IO3_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_15, SD0_DATA3),
+ PINMUX_IPSR_GPSR(IP13_15, SD0_DATA3),
PINMUX_IPSR_MSEL(IP13_15, SSL_B, SEL_QSP_1),
- PINMUX_IPSR_DATA(IP13_18_16, SD0_CD),
+ PINMUX_IPSR_GPSR(IP13_18_16, SD0_CD),
PINMUX_IPSR_MSEL(IP13_18_16, MMC_D6_B, SEL_MMC_1),
PINMUX_IPSR_MSEL(IP13_18_16, SIM0_RST_B, SEL_SIM_1),
PINMUX_IPSR_MSEL(IP13_18_16, CAN0_RX_F, SEL_CAN0_5),
PINMUX_IPSR_MSEL(IP13_18_16, SCIFA5_TXD_B, SEL_SCIFA5_1),
PINMUX_IPSR_MSEL(IP13_18_16, TX3_C, SEL_SCIF3_2),
- PINMUX_IPSR_DATA(IP13_21_19, SD0_WP),
+ PINMUX_IPSR_GPSR(IP13_21_19, SD0_WP),
PINMUX_IPSR_MSEL(IP13_21_19, MMC_D7_B, SEL_MMC_1),
PINMUX_IPSR_MSEL(IP13_21_19, SIM0_D_B, SEL_SIM_1),
PINMUX_IPSR_MSEL(IP13_21_19, CAN0_TX_F, SEL_CAN0_5),
PINMUX_IPSR_MSEL(IP13_21_19, SCIFA5_RXD_B, SEL_SCIFA5_1),
PINMUX_IPSR_MSEL(IP13_21_19, RX3_C, SEL_SCIF3_2),
- PINMUX_IPSR_DATA(IP13_22, SD1_CMD),
+ PINMUX_IPSR_GPSR(IP13_22, SD1_CMD),
PINMUX_IPSR_MSEL(IP13_22, REMOCON_B, SEL_RCN_1),
- PINMUX_IPSR_DATA(IP13_24_23, SD1_DATA0),
+ PINMUX_IPSR_GPSR(IP13_24_23, SD1_DATA0),
PINMUX_IPSR_MSEL(IP13_24_23, SPEEDIN_B, SEL_RSP_1),
- PINMUX_IPSR_DATA(IP13_25, SD1_DATA1),
+ PINMUX_IPSR_GPSR(IP13_25, SD1_DATA1),
PINMUX_IPSR_MSEL(IP13_25, IETX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP13_26, SD1_DATA2),
+ PINMUX_IPSR_GPSR(IP13_26, SD1_DATA2),
PINMUX_IPSR_MSEL(IP13_26, IECLK_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP13_27, SD1_DATA3),
+ PINMUX_IPSR_GPSR(IP13_27, SD1_DATA3),
PINMUX_IPSR_MSEL(IP13_27, IERX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP13_30_28, SD1_CD),
- PINMUX_IPSR_DATA(IP13_30_28, PWM0),
- PINMUX_IPSR_DATA(IP13_30_28, TPU_TO0),
+ PINMUX_IPSR_GPSR(IP13_30_28, SD1_CD),
+ PINMUX_IPSR_GPSR(IP13_30_28, PWM0),
+ PINMUX_IPSR_GPSR(IP13_30_28, TPU_TO0),
PINMUX_IPSR_MSEL(IP13_30_28, SCL1_C, SEL_IIC1_2),
/* IPSR14 */
- PINMUX_IPSR_DATA(IP14_1_0, SD1_WP),
- PINMUX_IPSR_DATA(IP14_1_0, PWM1_B),
+ PINMUX_IPSR_GPSR(IP14_1_0, SD1_WP),
+ PINMUX_IPSR_GPSR(IP14_1_0, PWM1_B),
PINMUX_IPSR_MSEL(IP14_1_0, SDA1_C, SEL_IIC1_2),
- PINMUX_IPSR_DATA(IP14_2, SD2_CLK),
- PINMUX_IPSR_DATA(IP14_2, MMC_CLK),
- PINMUX_IPSR_DATA(IP14_3, SD2_CMD),
- PINMUX_IPSR_DATA(IP14_3, MMC_CMD),
- PINMUX_IPSR_DATA(IP14_4, SD2_DATA0),
- PINMUX_IPSR_DATA(IP14_4, MMC_D0),
- PINMUX_IPSR_DATA(IP14_5, SD2_DATA1),
- PINMUX_IPSR_DATA(IP14_5, MMC_D1),
- PINMUX_IPSR_DATA(IP14_6, SD2_DATA2),
- PINMUX_IPSR_DATA(IP14_6, MMC_D2),
- PINMUX_IPSR_DATA(IP14_7, SD2_DATA3),
- PINMUX_IPSR_DATA(IP14_7, MMC_D3),
- PINMUX_IPSR_DATA(IP14_10_8, SD2_CD),
- PINMUX_IPSR_DATA(IP14_10_8, MMC_D4),
+ PINMUX_IPSR_GPSR(IP14_2, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP14_2, MMC_CLK),
+ PINMUX_IPSR_GPSR(IP14_3, SD2_CMD),
+ PINMUX_IPSR_GPSR(IP14_3, MMC_CMD),
+ PINMUX_IPSR_GPSR(IP14_4, SD2_DATA0),
+ PINMUX_IPSR_GPSR(IP14_4, MMC_D0),
+ PINMUX_IPSR_GPSR(IP14_5, SD2_DATA1),
+ PINMUX_IPSR_GPSR(IP14_5, MMC_D1),
+ PINMUX_IPSR_GPSR(IP14_6, SD2_DATA2),
+ PINMUX_IPSR_GPSR(IP14_6, MMC_D2),
+ PINMUX_IPSR_GPSR(IP14_7, SD2_DATA3),
+ PINMUX_IPSR_GPSR(IP14_7, MMC_D3),
+ PINMUX_IPSR_GPSR(IP14_10_8, SD2_CD),
+ PINMUX_IPSR_GPSR(IP14_10_8, MMC_D4),
PINMUX_IPSR_MSEL(IP14_10_8, SCL8_C, SEL_IIC8_2),
PINMUX_IPSR_MSEL(IP14_10_8, TX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
- PINMUX_IPSR_DATA(IP14_13_11, SD2_WP),
- PINMUX_IPSR_DATA(IP14_13_11, MMC_D5),
+ PINMUX_IPSR_GPSR(IP14_13_11, SD2_WP),
+ PINMUX_IPSR_GPSR(IP14_13_11, MMC_D5),
PINMUX_IPSR_MSEL(IP14_13_11, SDA8_C, SEL_IIC8_2),
PINMUX_IPSR_MSEL(IP14_13_11, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
@@ -1569,40 +1569,40 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_16_14, RX2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP14_16_14, ADIDATA, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_16_14, VI1_CLK_C, SEL_VI1_2),
- PINMUX_IPSR_DATA(IP14_16_14, VI1_G0_B),
+ PINMUX_IPSR_GPSR(IP14_16_14, VI1_G0_B),
PINMUX_IPSR_MSEL(IP14_19_17, MSIOF0_SYNC, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_19_17, TX2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP14_19_17, ADICS_SAMP, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_19_17, VI1_CLKENB_C, SEL_VI1_2),
- PINMUX_IPSR_DATA(IP14_19_17, VI1_G1_B),
+ PINMUX_IPSR_GPSR(IP14_19_17, VI1_G1_B),
PINMUX_IPSR_MSEL(IP14_22_20, MSIOF0_TXD, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_22_20, ADICLK, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_22_20, VI1_FIELD_C, SEL_VI1_2),
- PINMUX_IPSR_DATA(IP14_22_20, VI1_G2_B),
+ PINMUX_IPSR_GPSR(IP14_22_20, VI1_G2_B),
PINMUX_IPSR_MSEL(IP14_25_23, MSIOF0_RXD, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_25_23, ADICHS0, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_25_23, VI1_DATA0_C, SEL_VI1_2),
- PINMUX_IPSR_DATA(IP14_25_23, VI1_G3_B),
+ PINMUX_IPSR_GPSR(IP14_25_23, VI1_G3_B),
PINMUX_IPSR_MSEL(IP14_28_26, MSIOF0_SS1, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_28_26, MMC_D6, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP14_28_26, ADICHS1, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_28_26, TX0_E, SEL_SCIF0_4),
PINMUX_IPSR_MSEL(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
PINMUX_IPSR_MSEL(IP14_28_26, SCL7_C, SEL_IIC7_2),
- PINMUX_IPSR_DATA(IP14_28_26, VI1_G4_B),
+ PINMUX_IPSR_GPSR(IP14_28_26, VI1_G4_B),
PINMUX_IPSR_MSEL(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_31_29, MMC_D7, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP14_31_29, ADICHS2, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_31_29, RX0_E, SEL_SCIF0_4),
PINMUX_IPSR_MSEL(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
PINMUX_IPSR_MSEL(IP14_31_29, SDA7_C, SEL_IIC7_2),
- PINMUX_IPSR_DATA(IP14_31_29, VI1_G5_B),
+ PINMUX_IPSR_GPSR(IP14_31_29, VI1_G5_B),
/* IPSR15 */
PINMUX_IPSR_MSEL(IP15_1_0, SIM0_RST, SEL_SIM_0),
PINMUX_IPSR_MSEL(IP15_1_0, IETX, SEL_IEB_0),
PINMUX_IPSR_MSEL(IP15_1_0, CAN1_TX_D, SEL_CAN1_3),
- PINMUX_IPSR_DATA(IP15_3_2, SIM0_CLK),
+ PINMUX_IPSR_GPSR(IP15_3_2, SIM0_CLK),
PINMUX_IPSR_MSEL(IP15_3_2, IECLK, SEL_IEB_0),
PINMUX_IPSR_MSEL(IP15_3_2, CAN_CLK_C, SEL_CANCLK_2),
PINMUX_IPSR_MSEL(IP15_5_4, SIM0_D, SEL_SIM_0),
@@ -1611,19 +1611,19 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_8_6, GPS_CLK, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP15_8_6, DU1_DOTCLKIN_C, SEL_DIS_2),
PINMUX_IPSR_MSEL(IP15_8_6, AUDIO_CLKB_B, SEL_ADG_1),
- PINMUX_IPSR_DATA(IP15_8_6, PWM5_B),
+ PINMUX_IPSR_GPSR(IP15_8_6, PWM5_B),
PINMUX_IPSR_MSEL(IP15_8_6, SCIFA3_TXD_C, SEL_SCIFA3_2),
PINMUX_IPSR_MSEL(IP15_11_9, GPS_SIGN, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP15_11_9, TX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP15_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
- PINMUX_IPSR_DATA(IP15_11_9, PWM5),
- PINMUX_IPSR_DATA(IP15_11_9, VI1_G6_B),
+ PINMUX_IPSR_GPSR(IP15_11_9, PWM5),
+ PINMUX_IPSR_GPSR(IP15_11_9, VI1_G6_B),
PINMUX_IPSR_MSEL(IP15_11_9, SCIFA3_RXD_C, SEL_SCIFA3_2),
PINMUX_IPSR_MSEL(IP15_14_12, GPS_MAG, SEL_GPS_0),
PINMUX_IPSR_MSEL(IP15_14_12, RX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP15_14_12, SCIFA4_RXD_C, SEL_SCIFA4_2),
- PINMUX_IPSR_DATA(IP15_14_12, PWM6),
- PINMUX_IPSR_DATA(IP15_14_12, VI1_G7_B),
+ PINMUX_IPSR_GPSR(IP15_14_12, PWM6),
+ PINMUX_IPSR_GPSR(IP15_14_12, VI1_G7_B),
PINMUX_IPSR_MSEL(IP15_14_12, SCIFA3_SCK_C, SEL_SCIFA3_2),
PINMUX_IPSR_MSEL(IP15_17_15, HCTS0_N, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP15_17_15, SCIFB0_CTS_N, SEL_SCIFB_0),
@@ -1638,7 +1638,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_23_21, SCIFB0_SCK, SEL_SCIFB_0),
PINMUX_IPSR_MSEL(IP15_23_21, GLO_Q0_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP15_23_21, CAN_CLK, SEL_CANCLK_0),
- PINMUX_IPSR_DATA(IP15_23_21, TCLK2),
+ PINMUX_IPSR_GPSR(IP15_23_21, TCLK2),
PINMUX_IPSR_MSEL(IP15_23_21, VI1_DATA3_C, SEL_VI1_2),
PINMUX_IPSR_MSEL(IP15_26_24, HRX0, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP15_26_24, SCIFB0_RXD, SEL_SCIFB_0),
@@ -1654,25 +1654,25 @@ static const u16 pinmux_data[] = {
/* IPSR16 */
PINMUX_IPSR_MSEL(IP16_2_0, HRX1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP16_2_0, SCIFB1_RXD, SEL_SCIFB1_0),
- PINMUX_IPSR_DATA(IP16_2_0, VI1_R0_B),
+ PINMUX_IPSR_GPSR(IP16_2_0, VI1_R0_B),
PINMUX_IPSR_MSEL(IP16_2_0, GLO_SDATA_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP16_2_0, VI1_DATA6_C, SEL_VI1_2),
PINMUX_IPSR_MSEL(IP16_5_3, HTX1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP16_5_3, SCIFB1_TXD, SEL_SCIFB1_0),
- PINMUX_IPSR_DATA(IP16_5_3, VI1_R1_B),
+ PINMUX_IPSR_GPSR(IP16_5_3, VI1_R1_B),
PINMUX_IPSR_MSEL(IP16_5_3, GLO_SS_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP16_5_3, VI1_DATA7_C, SEL_VI1_2),
PINMUX_IPSR_MSEL(IP16_7_6, HSCK1, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP16_7_6, SCIFB1_SCK, SEL_SCIFB1_0),
- PINMUX_IPSR_DATA(IP16_7_6, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP16_7_6, MLB_CLK),
PINMUX_IPSR_MSEL(IP16_7_6, GLO_RFON_C, SEL_GPS_2),
PINMUX_IPSR_MSEL(IP16_9_8, HCTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP16_9_8, SCIFB1_CTS_N),
- PINMUX_IPSR_DATA(IP16_9_8, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP16_9_8, SCIFB1_CTS_N),
+ PINMUX_IPSR_GPSR(IP16_9_8, MLB_SIG),
PINMUX_IPSR_MSEL(IP16_9_8, CAN1_TX_B, SEL_CAN1_1),
PINMUX_IPSR_MSEL(IP16_11_10, HRTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP16_11_10, SCIFB1_RTS_N),
- PINMUX_IPSR_DATA(IP16_11_10, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP16_11_10, SCIFB1_RTS_N),
+ PINMUX_IPSR_GPSR(IP16_11_10, MLB_DAT),
PINMUX_IPSR_MSEL(IP16_11_10, CAN1_RX_B, SEL_CAN1_1),
};
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 3718c7846bfd..38912cff597b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1,9 +1,9 @@
/*
* r8a7794 processor support - PFC hardware block.
*
- * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc., <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc., <source@cogentembedded.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -623,848 +623,848 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(SD1_DATA3),
/* IPSR0 */
- PINMUX_IPSR_DATA(IP0_0, SD1_CD),
+ PINMUX_IPSR_GPSR(IP0_0, SD1_CD),
PINMUX_IPSR_MSEL(IP0_0, CAN0_RX, SEL_CAN0_0),
- PINMUX_IPSR_DATA(IP0_9_8, SD1_WP),
- PINMUX_IPSR_DATA(IP0_9_8, IRQ7),
+ PINMUX_IPSR_GPSR(IP0_9_8, SD1_WP),
+ PINMUX_IPSR_GPSR(IP0_9_8, IRQ7),
PINMUX_IPSR_MSEL(IP0_9_8, CAN0_TX, SEL_CAN0_0),
- PINMUX_IPSR_DATA(IP0_10, MMC_CLK),
- PINMUX_IPSR_DATA(IP0_10, SD2_CLK),
- PINMUX_IPSR_DATA(IP0_11, MMC_CMD),
- PINMUX_IPSR_DATA(IP0_11, SD2_CMD),
- PINMUX_IPSR_DATA(IP0_12, MMC_D0),
- PINMUX_IPSR_DATA(IP0_12, SD2_DATA0),
- PINMUX_IPSR_DATA(IP0_13, MMC_D1),
- PINMUX_IPSR_DATA(IP0_13, SD2_DATA1),
- PINMUX_IPSR_DATA(IP0_14, MMC_D2),
- PINMUX_IPSR_DATA(IP0_14, SD2_DATA2),
- PINMUX_IPSR_DATA(IP0_15, MMC_D3),
- PINMUX_IPSR_DATA(IP0_15, SD2_DATA3),
- PINMUX_IPSR_DATA(IP0_16, MMC_D4),
- PINMUX_IPSR_DATA(IP0_16, SD2_CD),
- PINMUX_IPSR_DATA(IP0_17, MMC_D5),
- PINMUX_IPSR_DATA(IP0_17, SD2_WP),
- PINMUX_IPSR_DATA(IP0_19_18, MMC_D6),
+ PINMUX_IPSR_GPSR(IP0_10, MMC_CLK),
+ PINMUX_IPSR_GPSR(IP0_10, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP0_11, MMC_CMD),
+ PINMUX_IPSR_GPSR(IP0_11, SD2_CMD),
+ PINMUX_IPSR_GPSR(IP0_12, MMC_D0),
+ PINMUX_IPSR_GPSR(IP0_12, SD2_DATA0),
+ PINMUX_IPSR_GPSR(IP0_13, MMC_D1),
+ PINMUX_IPSR_GPSR(IP0_13, SD2_DATA1),
+ PINMUX_IPSR_GPSR(IP0_14, MMC_D2),
+ PINMUX_IPSR_GPSR(IP0_14, SD2_DATA2),
+ PINMUX_IPSR_GPSR(IP0_15, MMC_D3),
+ PINMUX_IPSR_GPSR(IP0_15, SD2_DATA3),
+ PINMUX_IPSR_GPSR(IP0_16, MMC_D4),
+ PINMUX_IPSR_GPSR(IP0_16, SD2_CD),
+ PINMUX_IPSR_GPSR(IP0_17, MMC_D5),
+ PINMUX_IPSR_GPSR(IP0_17, SD2_WP),
+ PINMUX_IPSR_GPSR(IP0_19_18, MMC_D6),
PINMUX_IPSR_MSEL(IP0_19_18, SCIF0_RXD, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP0_19_18, I2C2_SCL_B, SEL_I2C02_1),
PINMUX_IPSR_MSEL(IP0_19_18, CAN1_RX, SEL_CAN1_0),
- PINMUX_IPSR_DATA(IP0_21_20, MMC_D7),
+ PINMUX_IPSR_GPSR(IP0_21_20, MMC_D7),
PINMUX_IPSR_MSEL(IP0_21_20, SCIF0_TXD, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP0_21_20, I2C2_SDA_B, SEL_I2C02_1),
PINMUX_IPSR_MSEL(IP0_21_20, CAN1_TX, SEL_CAN1_0),
- PINMUX_IPSR_DATA(IP0_23_22, D0),
+ PINMUX_IPSR_GPSR(IP0_23_22, D0),
PINMUX_IPSR_MSEL(IP0_23_22, SCIFA3_SCK_B, SEL_SCIFA3_1),
- PINMUX_IPSR_DATA(IP0_23_22, IRQ4),
- PINMUX_IPSR_DATA(IP0_24, D1),
+ PINMUX_IPSR_GPSR(IP0_23_22, IRQ4),
+ PINMUX_IPSR_GPSR(IP0_24, D1),
PINMUX_IPSR_MSEL(IP0_24, SCIFA3_RXD_B, SEL_SCIFA3_1),
- PINMUX_IPSR_DATA(IP0_25, D2),
+ PINMUX_IPSR_GPSR(IP0_25, D2),
PINMUX_IPSR_MSEL(IP0_25, SCIFA3_TXD_B, SEL_SCIFA3_1),
- PINMUX_IPSR_DATA(IP0_27_26, D3),
+ PINMUX_IPSR_GPSR(IP0_27_26, D3),
PINMUX_IPSR_MSEL(IP0_27_26, I2C3_SCL_B, SEL_I2C03_1),
PINMUX_IPSR_MSEL(IP0_27_26, SCIF5_RXD_B, SEL_SCIF5_1),
- PINMUX_IPSR_DATA(IP0_29_28, D4),
+ PINMUX_IPSR_GPSR(IP0_29_28, D4),
PINMUX_IPSR_MSEL(IP0_29_28, I2C3_SDA_B, SEL_I2C03_1),
PINMUX_IPSR_MSEL(IP0_29_28, SCIF5_TXD_B, SEL_SCIF5_1),
- PINMUX_IPSR_DATA(IP0_31_30, D5),
+ PINMUX_IPSR_GPSR(IP0_31_30, D5),
PINMUX_IPSR_MSEL(IP0_31_30, SCIF4_RXD_B, SEL_SCIF4_1),
PINMUX_IPSR_MSEL(IP0_31_30, I2C0_SCL_D, SEL_I2C00_3),
/* IPSR1 */
- PINMUX_IPSR_DATA(IP1_1_0, D6),
+ PINMUX_IPSR_GPSR(IP1_1_0, D6),
PINMUX_IPSR_MSEL(IP1_1_0, SCIF4_TXD_B, SEL_SCIF4_1),
PINMUX_IPSR_MSEL(IP1_1_0, I2C0_SDA_D, SEL_I2C00_3),
- PINMUX_IPSR_DATA(IP1_3_2, D7),
- PINMUX_IPSR_DATA(IP1_3_2, IRQ3),
+ PINMUX_IPSR_GPSR(IP1_3_2, D7),
+ PINMUX_IPSR_GPSR(IP1_3_2, IRQ3),
PINMUX_IPSR_MSEL(IP1_3_2, TCLK1, SEL_TMU_0),
- PINMUX_IPSR_DATA(IP1_3_2, PWM6_B),
- PINMUX_IPSR_DATA(IP1_5_4, D8),
- PINMUX_IPSR_DATA(IP1_5_4, HSCIF2_HRX),
+ PINMUX_IPSR_GPSR(IP1_3_2, PWM6_B),
+ PINMUX_IPSR_GPSR(IP1_5_4, D8),
+ PINMUX_IPSR_GPSR(IP1_5_4, HSCIF2_HRX),
PINMUX_IPSR_MSEL(IP1_5_4, I2C1_SCL_B, SEL_I2C01_1),
- PINMUX_IPSR_DATA(IP1_7_6, D9),
- PINMUX_IPSR_DATA(IP1_7_6, HSCIF2_HTX),
+ PINMUX_IPSR_GPSR(IP1_7_6, D9),
+ PINMUX_IPSR_GPSR(IP1_7_6, HSCIF2_HTX),
PINMUX_IPSR_MSEL(IP1_7_6, I2C1_SDA_B, SEL_I2C01_1),
- PINMUX_IPSR_DATA(IP1_10_8, D10),
- PINMUX_IPSR_DATA(IP1_10_8, HSCIF2_HSCK),
+ PINMUX_IPSR_GPSR(IP1_10_8, D10),
+ PINMUX_IPSR_GPSR(IP1_10_8, HSCIF2_HSCK),
PINMUX_IPSR_MSEL(IP1_10_8, SCIF1_SCK_C, SEL_SCIF1_2),
- PINMUX_IPSR_DATA(IP1_10_8, IRQ6),
- PINMUX_IPSR_DATA(IP1_10_8, PWM5_C),
- PINMUX_IPSR_DATA(IP1_12_11, D11),
- PINMUX_IPSR_DATA(IP1_12_11, HSCIF2_HCTS_N),
+ PINMUX_IPSR_GPSR(IP1_10_8, IRQ6),
+ PINMUX_IPSR_GPSR(IP1_10_8, PWM5_C),
+ PINMUX_IPSR_GPSR(IP1_12_11, D11),
+ PINMUX_IPSR_GPSR(IP1_12_11, HSCIF2_HCTS_N),
PINMUX_IPSR_MSEL(IP1_12_11, SCIF1_RXD_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP1_12_11, I2C1_SCL_D, SEL_I2C01_3),
- PINMUX_IPSR_DATA(IP1_14_13, D12),
- PINMUX_IPSR_DATA(IP1_14_13, HSCIF2_HRTS_N),
+ PINMUX_IPSR_GPSR(IP1_14_13, D12),
+ PINMUX_IPSR_GPSR(IP1_14_13, HSCIF2_HRTS_N),
PINMUX_IPSR_MSEL(IP1_14_13, SCIF1_TXD_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
- PINMUX_IPSR_DATA(IP1_17_15, D13),
+ PINMUX_IPSR_GPSR(IP1_17_15, D13),
PINMUX_IPSR_MSEL(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
- PINMUX_IPSR_DATA(IP1_17_15, TANS1),
- PINMUX_IPSR_DATA(IP1_17_15, PWM2_C),
+ PINMUX_IPSR_GPSR(IP1_17_15, TANS1),
+ PINMUX_IPSR_GPSR(IP1_17_15, PWM2_C),
PINMUX_IPSR_MSEL(IP1_17_15, TCLK2_B, SEL_TMU_1),
- PINMUX_IPSR_DATA(IP1_19_18, D14),
+ PINMUX_IPSR_GPSR(IP1_19_18, D14),
PINMUX_IPSR_MSEL(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
- PINMUX_IPSR_DATA(IP1_21_20, D15),
+ PINMUX_IPSR_GPSR(IP1_21_20, D15),
PINMUX_IPSR_MSEL(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
PINMUX_IPSR_MSEL(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
- PINMUX_IPSR_DATA(IP1_23_22, A0),
- PINMUX_IPSR_DATA(IP1_23_22, SCIFB1_SCK),
- PINMUX_IPSR_DATA(IP1_23_22, PWM3_B),
- PINMUX_IPSR_DATA(IP1_24, A1),
- PINMUX_IPSR_DATA(IP1_24, SCIFB1_TXD),
- PINMUX_IPSR_DATA(IP1_26, A3),
- PINMUX_IPSR_DATA(IP1_26, SCIFB0_SCK),
- PINMUX_IPSR_DATA(IP1_27, A4),
- PINMUX_IPSR_DATA(IP1_27, SCIFB0_TXD),
- PINMUX_IPSR_DATA(IP1_29_28, A5),
- PINMUX_IPSR_DATA(IP1_29_28, SCIFB0_RXD),
- PINMUX_IPSR_DATA(IP1_29_28, PWM4_B),
- PINMUX_IPSR_DATA(IP1_29_28, TPUTO3_C),
- PINMUX_IPSR_DATA(IP1_31_30, A6),
- PINMUX_IPSR_DATA(IP1_31_30, SCIFB0_CTS_N),
+ PINMUX_IPSR_GPSR(IP1_23_22, A0),
+ PINMUX_IPSR_GPSR(IP1_23_22, SCIFB1_SCK),
+ PINMUX_IPSR_GPSR(IP1_23_22, PWM3_B),
+ PINMUX_IPSR_GPSR(IP1_24, A1),
+ PINMUX_IPSR_GPSR(IP1_24, SCIFB1_TXD),
+ PINMUX_IPSR_GPSR(IP1_26, A3),
+ PINMUX_IPSR_GPSR(IP1_26, SCIFB0_SCK),
+ PINMUX_IPSR_GPSR(IP1_27, A4),
+ PINMUX_IPSR_GPSR(IP1_27, SCIFB0_TXD),
+ PINMUX_IPSR_GPSR(IP1_29_28, A5),
+ PINMUX_IPSR_GPSR(IP1_29_28, SCIFB0_RXD),
+ PINMUX_IPSR_GPSR(IP1_29_28, PWM4_B),
+ PINMUX_IPSR_GPSR(IP1_29_28, TPUTO3_C),
+ PINMUX_IPSR_GPSR(IP1_31_30, A6),
+ PINMUX_IPSR_GPSR(IP1_31_30, SCIFB0_CTS_N),
PINMUX_IPSR_MSEL(IP1_31_30, SCIFA4_RXD_B, SEL_SCIFA4_1),
- PINMUX_IPSR_DATA(IP1_31_30, TPUTO2_C),
+ PINMUX_IPSR_GPSR(IP1_31_30, TPUTO2_C),
/* IPSR2 */
- PINMUX_IPSR_DATA(IP2_1_0, A7),
- PINMUX_IPSR_DATA(IP2_1_0, SCIFB0_RTS_N),
+ PINMUX_IPSR_GPSR(IP2_1_0, A7),
+ PINMUX_IPSR_GPSR(IP2_1_0, SCIFB0_RTS_N),
PINMUX_IPSR_MSEL(IP2_1_0, SCIFA4_TXD_B, SEL_SCIFA4_1),
- PINMUX_IPSR_DATA(IP2_3_2, A8),
+ PINMUX_IPSR_GPSR(IP2_3_2, A8),
PINMUX_IPSR_MSEL(IP2_3_2, MSIOF1_RXD, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_3_2, SCIFA0_RXD_B, SEL_SCIFA0_1),
- PINMUX_IPSR_DATA(IP2_5_4, A9),
+ PINMUX_IPSR_GPSR(IP2_5_4, A9),
PINMUX_IPSR_MSEL(IP2_5_4, MSIOF1_TXD, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
- PINMUX_IPSR_DATA(IP2_7_6, A10),
+ PINMUX_IPSR_GPSR(IP2_7_6, A10),
PINMUX_IPSR_MSEL(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
- PINMUX_IPSR_DATA(IP2_9_8, A11),
+ PINMUX_IPSR_GPSR(IP2_9_8, A11),
PINMUX_IPSR_MSEL(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
- PINMUX_IPSR_DATA(IP2_11_10, A12),
+ PINMUX_IPSR_GPSR(IP2_11_10, A12),
PINMUX_IPSR_MSEL(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
- PINMUX_IPSR_DATA(IP2_13_12, A13),
+ PINMUX_IPSR_GPSR(IP2_13_12, A13),
PINMUX_IPSR_MSEL(IP2_13_12, MSIOF1_SS2, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_13_12, SCIFA5_TXD_B, SEL_SCIFA5_1),
- PINMUX_IPSR_DATA(IP2_15_14, A14),
+ PINMUX_IPSR_GPSR(IP2_15_14, A14),
PINMUX_IPSR_MSEL(IP2_15_14, MSIOF2_RXD, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_15_14, HSCIF0_HRX_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP2_15_14, DREQ1_N, SEL_LBS_0),
- PINMUX_IPSR_DATA(IP2_17_16, A15),
+ PINMUX_IPSR_GPSR(IP2_17_16, A15),
PINMUX_IPSR_MSEL(IP2_17_16, MSIOF2_TXD, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_17_16, HSCIF0_HTX_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP2_17_16, DACK1, SEL_LBS_0),
- PINMUX_IPSR_DATA(IP2_20_18, A16),
+ PINMUX_IPSR_GPSR(IP2_20_18, A16),
PINMUX_IPSR_MSEL(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP2_20_18, SPEEDIN, SEL_RSP_0),
PINMUX_IPSR_MSEL(IP2_20_18, VSP, SEL_SPDM_0),
PINMUX_IPSR_MSEL(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
- PINMUX_IPSR_DATA(IP2_20_18, TPUTO2_B),
- PINMUX_IPSR_DATA(IP2_23_21, A17),
+ PINMUX_IPSR_GPSR(IP2_20_18, TPUTO2_B),
+ PINMUX_IPSR_GPSR(IP2_23_21, A17),
PINMUX_IPSR_MSEL(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
PINMUX_IPSR_MSEL(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
PINMUX_IPSR_MSEL(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
- PINMUX_IPSR_DATA(IP2_26_24, A18),
+ PINMUX_IPSR_GPSR(IP2_26_24, A18),
PINMUX_IPSR_MSEL(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
PINMUX_IPSR_MSEL(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
PINMUX_IPSR_MSEL(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
- PINMUX_IPSR_DATA(IP2_29_27, A19),
+ PINMUX_IPSR_GPSR(IP2_29_27, A19),
PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
- PINMUX_IPSR_DATA(IP2_29_27, PWM4),
- PINMUX_IPSR_DATA(IP2_29_27, TPUTO2),
- PINMUX_IPSR_DATA(IP2_29_27, MOUT0),
- PINMUX_IPSR_DATA(IP2_31_30, A20),
- PINMUX_IPSR_DATA(IP2_31_30, SPCLK),
- PINMUX_IPSR_DATA(IP2_29_27, MOUT1),
+ PINMUX_IPSR_GPSR(IP2_29_27, PWM4),
+ PINMUX_IPSR_GPSR(IP2_29_27, TPUTO2),
+ PINMUX_IPSR_GPSR(IP2_29_27, MOUT0),
+ PINMUX_IPSR_GPSR(IP2_31_30, A20),
+ PINMUX_IPSR_GPSR(IP2_31_30, SPCLK),
+ PINMUX_IPSR_GPSR(IP2_29_27, MOUT1),
/* IPSR3 */
- PINMUX_IPSR_DATA(IP3_1_0, A21),
- PINMUX_IPSR_DATA(IP3_1_0, MOSI_IO0),
- PINMUX_IPSR_DATA(IP3_1_0, MOUT2),
- PINMUX_IPSR_DATA(IP3_3_2, A22),
- PINMUX_IPSR_DATA(IP3_3_2, MISO_IO1),
- PINMUX_IPSR_DATA(IP3_3_2, MOUT5),
- PINMUX_IPSR_DATA(IP3_3_2, ATADIR1_N),
- PINMUX_IPSR_DATA(IP3_5_4, A23),
- PINMUX_IPSR_DATA(IP3_5_4, IO2),
- PINMUX_IPSR_DATA(IP3_5_4, MOUT6),
- PINMUX_IPSR_DATA(IP3_5_4, ATAWR1_N),
- PINMUX_IPSR_DATA(IP3_7_6, A24),
- PINMUX_IPSR_DATA(IP3_7_6, IO3),
- PINMUX_IPSR_DATA(IP3_7_6, EX_WAIT2),
- PINMUX_IPSR_DATA(IP3_9_8, A25),
- PINMUX_IPSR_DATA(IP3_9_8, SSL),
- PINMUX_IPSR_DATA(IP3_9_8, ATARD1_N),
- PINMUX_IPSR_DATA(IP3_10, CS0_N),
- PINMUX_IPSR_DATA(IP3_10, VI1_DATA8),
- PINMUX_IPSR_DATA(IP3_11, CS1_N_A26),
- PINMUX_IPSR_DATA(IP3_11, VI1_DATA9),
- PINMUX_IPSR_DATA(IP3_12, EX_CS0_N),
- PINMUX_IPSR_DATA(IP3_12, VI1_DATA10),
- PINMUX_IPSR_DATA(IP3_14_13, EX_CS1_N),
- PINMUX_IPSR_DATA(IP3_14_13, TPUTO3_B),
- PINMUX_IPSR_DATA(IP3_14_13, SCIFB2_RXD),
- PINMUX_IPSR_DATA(IP3_14_13, VI1_DATA11),
- PINMUX_IPSR_DATA(IP3_17_15, EX_CS2_N),
- PINMUX_IPSR_DATA(IP3_17_15, PWM0),
+ PINMUX_IPSR_GPSR(IP3_1_0, A21),
+ PINMUX_IPSR_GPSR(IP3_1_0, MOSI_IO0),
+ PINMUX_IPSR_GPSR(IP3_1_0, MOUT2),
+ PINMUX_IPSR_GPSR(IP3_3_2, A22),
+ PINMUX_IPSR_GPSR(IP3_3_2, MISO_IO1),
+ PINMUX_IPSR_GPSR(IP3_3_2, MOUT5),
+ PINMUX_IPSR_GPSR(IP3_3_2, ATADIR1_N),
+ PINMUX_IPSR_GPSR(IP3_5_4, A23),
+ PINMUX_IPSR_GPSR(IP3_5_4, IO2),
+ PINMUX_IPSR_GPSR(IP3_5_4, MOUT6),
+ PINMUX_IPSR_GPSR(IP3_5_4, ATAWR1_N),
+ PINMUX_IPSR_GPSR(IP3_7_6, A24),
+ PINMUX_IPSR_GPSR(IP3_7_6, IO3),
+ PINMUX_IPSR_GPSR(IP3_7_6, EX_WAIT2),
+ PINMUX_IPSR_GPSR(IP3_9_8, A25),
+ PINMUX_IPSR_GPSR(IP3_9_8, SSL),
+ PINMUX_IPSR_GPSR(IP3_9_8, ATARD1_N),
+ PINMUX_IPSR_GPSR(IP3_10, CS0_N),
+ PINMUX_IPSR_GPSR(IP3_10, VI1_DATA8),
+ PINMUX_IPSR_GPSR(IP3_11, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP3_11, VI1_DATA9),
+ PINMUX_IPSR_GPSR(IP3_12, EX_CS0_N),
+ PINMUX_IPSR_GPSR(IP3_12, VI1_DATA10),
+ PINMUX_IPSR_GPSR(IP3_14_13, EX_CS1_N),
+ PINMUX_IPSR_GPSR(IP3_14_13, TPUTO3_B),
+ PINMUX_IPSR_GPSR(IP3_14_13, SCIFB2_RXD),
+ PINMUX_IPSR_GPSR(IP3_14_13, VI1_DATA11),
+ PINMUX_IPSR_GPSR(IP3_17_15, EX_CS2_N),
+ PINMUX_IPSR_GPSR(IP3_17_15, PWM0),
PINMUX_IPSR_MSEL(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
- PINMUX_IPSR_DATA(IP3_17_15, TPUTO3),
- PINMUX_IPSR_DATA(IP3_17_15, SCIFB2_TXD),
+ PINMUX_IPSR_GPSR(IP3_17_15, TPUTO3),
+ PINMUX_IPSR_GPSR(IP3_17_15, SCIFB2_TXD),
PINMUX_IPSR_MSEL(IP3_17_15, SDATA_B, SEL_FSN_1),
- PINMUX_IPSR_DATA(IP3_20_18, EX_CS3_N),
+ PINMUX_IPSR_GPSR(IP3_20_18, EX_CS3_N),
PINMUX_IPSR_MSEL(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP3_20_18, RIF0_CLK, SEL_DR0_0),
PINMUX_IPSR_MSEL(IP3_20_18, BPFCLK, SEL_DARC_0),
- PINMUX_IPSR_DATA(IP3_20_18, SCIFB2_SCK),
+ PINMUX_IPSR_GPSR(IP3_20_18, SCIFB2_SCK),
PINMUX_IPSR_MSEL(IP3_20_18, MDATA_B, SEL_FSN_1),
- PINMUX_IPSR_DATA(IP3_23_21, EX_CS4_N),
+ PINMUX_IPSR_GPSR(IP3_23_21, EX_CS4_N),
PINMUX_IPSR_MSEL(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP3_23_21, RIF0_D0, SEL_DR0_0),
PINMUX_IPSR_MSEL(IP3_23_21, FMCLK, SEL_DARC_0),
- PINMUX_IPSR_DATA(IP3_23_21, SCIFB2_CTS_N),
+ PINMUX_IPSR_GPSR(IP3_23_21, SCIFB2_CTS_N),
PINMUX_IPSR_MSEL(IP3_23_21, SCKZ_B, SEL_FSN_1),
- PINMUX_IPSR_DATA(IP3_26_24, EX_CS5_N),
+ PINMUX_IPSR_GPSR(IP3_26_24, EX_CS5_N),
PINMUX_IPSR_MSEL(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP3_26_24, RIF0_D1, SEL_DR1_0),
PINMUX_IPSR_MSEL(IP3_26_24, FMIN, SEL_DARC_0),
- PINMUX_IPSR_DATA(IP3_26_24, SCIFB2_RTS_N),
+ PINMUX_IPSR_GPSR(IP3_26_24, SCIFB2_RTS_N),
PINMUX_IPSR_MSEL(IP3_26_24, STM_N_B, SEL_FSN_1),
- PINMUX_IPSR_DATA(IP3_29_27, BS_N),
- PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
- PINMUX_IPSR_DATA(IP3_29_27, PWM1_C),
- PINMUX_IPSR_DATA(IP3_29_27, TPUTO0_C),
- PINMUX_IPSR_DATA(IP3_29_27, ATACS01_N),
+ PINMUX_IPSR_GPSR(IP3_29_27, BS_N),
+ PINMUX_IPSR_GPSR(IP3_29_27, DRACK0),
+ PINMUX_IPSR_GPSR(IP3_29_27, PWM1_C),
+ PINMUX_IPSR_GPSR(IP3_29_27, TPUTO0_C),
+ PINMUX_IPSR_GPSR(IP3_29_27, ATACS01_N),
PINMUX_IPSR_MSEL(IP3_29_27, MTS_N_B, SEL_FSN_1),
- PINMUX_IPSR_DATA(IP3_30, RD_N),
- PINMUX_IPSR_DATA(IP3_30, ATACS11_N),
- PINMUX_IPSR_DATA(IP3_31, RD_WR_N),
- PINMUX_IPSR_DATA(IP3_31, ATAG1_N),
+ PINMUX_IPSR_GPSR(IP3_30, RD_N),
+ PINMUX_IPSR_GPSR(IP3_30, ATACS11_N),
+ PINMUX_IPSR_GPSR(IP3_31, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP3_31, ATAG1_N),
/* IPSR4 */
- PINMUX_IPSR_DATA(IP4_1_0, EX_WAIT0),
+ PINMUX_IPSR_GPSR(IP4_1_0, EX_WAIT0),
PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
PINMUX_IPSR_MSEL(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
- PINMUX_IPSR_DATA(IP4_1_0, PWMFSW0),
- PINMUX_IPSR_DATA(IP4_4_2, DU0_DR0),
- PINMUX_IPSR_DATA(IP4_4_2, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP4_1_0, PWMFSW0),
+ PINMUX_IPSR_GPSR(IP4_4_2, DU0_DR0),
+ PINMUX_IPSR_GPSR(IP4_4_2, LCDOUT16),
PINMUX_IPSR_MSEL(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
PINMUX_IPSR_MSEL(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
- PINMUX_IPSR_DATA(IP4_4_2, CC50_STATE0),
- PINMUX_IPSR_DATA(IP4_7_5, DU0_DR1),
- PINMUX_IPSR_DATA(IP4_7_5, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP4_4_2, CC50_STATE0),
+ PINMUX_IPSR_GPSR(IP4_7_5, DU0_DR1),
+ PINMUX_IPSR_GPSR(IP4_7_5, LCDOUT17),
PINMUX_IPSR_MSEL(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
PINMUX_IPSR_MSEL(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
- PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE1),
- PINMUX_IPSR_DATA(IP4_9_8, DU0_DR2),
- PINMUX_IPSR_DATA(IP4_9_8, LCDOUT18),
- PINMUX_IPSR_DATA(IP4_9_8, CC50_STATE2),
- PINMUX_IPSR_DATA(IP4_11_10, DU0_DR3),
- PINMUX_IPSR_DATA(IP4_11_10, LCDOUT19),
- PINMUX_IPSR_DATA(IP4_11_10, CC50_STATE3),
- PINMUX_IPSR_DATA(IP4_13_12, DU0_DR4),
- PINMUX_IPSR_DATA(IP4_13_12, LCDOUT20),
- PINMUX_IPSR_DATA(IP4_13_12, CC50_STATE4),
- PINMUX_IPSR_DATA(IP4_15_14, DU0_DR5),
- PINMUX_IPSR_DATA(IP4_15_14, LCDOUT21),
- PINMUX_IPSR_DATA(IP4_15_14, CC50_STATE5),
- PINMUX_IPSR_DATA(IP4_17_16, DU0_DR6),
- PINMUX_IPSR_DATA(IP4_17_16, LCDOUT22),
- PINMUX_IPSR_DATA(IP4_17_16, CC50_STATE6),
- PINMUX_IPSR_DATA(IP4_19_18, DU0_DR7),
- PINMUX_IPSR_DATA(IP4_19_18, LCDOUT23),
- PINMUX_IPSR_DATA(IP4_19_18, CC50_STATE7),
- PINMUX_IPSR_DATA(IP4_22_20, DU0_DG0),
- PINMUX_IPSR_DATA(IP4_22_20, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE1),
+ PINMUX_IPSR_GPSR(IP4_9_8, DU0_DR2),
+ PINMUX_IPSR_GPSR(IP4_9_8, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE2),
+ PINMUX_IPSR_GPSR(IP4_11_10, DU0_DR3),
+ PINMUX_IPSR_GPSR(IP4_11_10, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP4_11_10, CC50_STATE3),
+ PINMUX_IPSR_GPSR(IP4_13_12, DU0_DR4),
+ PINMUX_IPSR_GPSR(IP4_13_12, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP4_13_12, CC50_STATE4),
+ PINMUX_IPSR_GPSR(IP4_15_14, DU0_DR5),
+ PINMUX_IPSR_GPSR(IP4_15_14, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP4_15_14, CC50_STATE5),
+ PINMUX_IPSR_GPSR(IP4_17_16, DU0_DR6),
+ PINMUX_IPSR_GPSR(IP4_17_16, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP4_17_16, CC50_STATE6),
+ PINMUX_IPSR_GPSR(IP4_19_18, DU0_DR7),
+ PINMUX_IPSR_GPSR(IP4_19_18, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP4_19_18, CC50_STATE7),
+ PINMUX_IPSR_GPSR(IP4_22_20, DU0_DG0),
+ PINMUX_IPSR_GPSR(IP4_22_20, LCDOUT8),
PINMUX_IPSR_MSEL(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
PINMUX_IPSR_MSEL(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
- PINMUX_IPSR_DATA(IP4_22_20, CC50_STATE8),
- PINMUX_IPSR_DATA(IP4_25_23, DU0_DG1),
- PINMUX_IPSR_DATA(IP4_25_23, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP4_22_20, CC50_STATE8),
+ PINMUX_IPSR_GPSR(IP4_25_23, DU0_DG1),
+ PINMUX_IPSR_GPSR(IP4_25_23, LCDOUT9),
PINMUX_IPSR_MSEL(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
PINMUX_IPSR_MSEL(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
- PINMUX_IPSR_DATA(IP4_25_23, CC50_STATE9),
- PINMUX_IPSR_DATA(IP4_27_26, DU0_DG2),
- PINMUX_IPSR_DATA(IP4_27_26, LCDOUT10),
- PINMUX_IPSR_DATA(IP4_27_26, CC50_STATE10),
- PINMUX_IPSR_DATA(IP4_29_28, DU0_DG3),
- PINMUX_IPSR_DATA(IP4_29_28, LCDOUT11),
- PINMUX_IPSR_DATA(IP4_29_28, CC50_STATE11),
- PINMUX_IPSR_DATA(IP4_31_30, DU0_DG4),
- PINMUX_IPSR_DATA(IP4_31_30, LCDOUT12),
- PINMUX_IPSR_DATA(IP4_31_30, CC50_STATE12),
+ PINMUX_IPSR_GPSR(IP4_25_23, CC50_STATE9),
+ PINMUX_IPSR_GPSR(IP4_27_26, DU0_DG2),
+ PINMUX_IPSR_GPSR(IP4_27_26, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP4_27_26, CC50_STATE10),
+ PINMUX_IPSR_GPSR(IP4_29_28, DU0_DG3),
+ PINMUX_IPSR_GPSR(IP4_29_28, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP4_29_28, CC50_STATE11),
+ PINMUX_IPSR_GPSR(IP4_31_30, DU0_DG4),
+ PINMUX_IPSR_GPSR(IP4_31_30, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP4_31_30, CC50_STATE12),
/* IPSR5 */
- PINMUX_IPSR_DATA(IP5_1_0, DU0_DG5),
- PINMUX_IPSR_DATA(IP5_1_0, LCDOUT13),
- PINMUX_IPSR_DATA(IP5_1_0, CC50_STATE13),
- PINMUX_IPSR_DATA(IP5_3_2, DU0_DG6),
- PINMUX_IPSR_DATA(IP5_3_2, LCDOUT14),
- PINMUX_IPSR_DATA(IP5_3_2, CC50_STATE14),
- PINMUX_IPSR_DATA(IP5_5_4, DU0_DG7),
- PINMUX_IPSR_DATA(IP5_5_4, LCDOUT15),
- PINMUX_IPSR_DATA(IP5_5_4, CC50_STATE15),
- PINMUX_IPSR_DATA(IP5_8_6, DU0_DB0),
- PINMUX_IPSR_DATA(IP5_8_6, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP5_1_0, DU0_DG5),
+ PINMUX_IPSR_GPSR(IP5_1_0, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP5_1_0, CC50_STATE13),
+ PINMUX_IPSR_GPSR(IP5_3_2, DU0_DG6),
+ PINMUX_IPSR_GPSR(IP5_3_2, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP5_3_2, CC50_STATE14),
+ PINMUX_IPSR_GPSR(IP5_5_4, DU0_DG7),
+ PINMUX_IPSR_GPSR(IP5_5_4, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP5_5_4, CC50_STATE15),
+ PINMUX_IPSR_GPSR(IP5_8_6, DU0_DB0),
+ PINMUX_IPSR_GPSR(IP5_8_6, LCDOUT0),
PINMUX_IPSR_MSEL(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
PINMUX_IPSR_MSEL(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
PINMUX_IPSR_MSEL(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
- PINMUX_IPSR_DATA(IP5_8_6, CC50_STATE16),
- PINMUX_IPSR_DATA(IP5_11_9, DU0_DB1),
- PINMUX_IPSR_DATA(IP5_11_9, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP5_8_6, CC50_STATE16),
+ PINMUX_IPSR_GPSR(IP5_11_9, DU0_DB1),
+ PINMUX_IPSR_GPSR(IP5_11_9, LCDOUT1),
PINMUX_IPSR_MSEL(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
PINMUX_IPSR_MSEL(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
PINMUX_IPSR_MSEL(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
- PINMUX_IPSR_DATA(IP5_11_9, CC50_STATE17),
- PINMUX_IPSR_DATA(IP5_13_12, DU0_DB2),
- PINMUX_IPSR_DATA(IP5_13_12, LCDOUT2),
- PINMUX_IPSR_DATA(IP5_13_12, CC50_STATE18),
- PINMUX_IPSR_DATA(IP5_15_14, DU0_DB3),
- PINMUX_IPSR_DATA(IP5_15_14, LCDOUT3),
- PINMUX_IPSR_DATA(IP5_15_14, CC50_STATE19),
- PINMUX_IPSR_DATA(IP5_17_16, DU0_DB4),
- PINMUX_IPSR_DATA(IP5_17_16, LCDOUT4),
- PINMUX_IPSR_DATA(IP5_17_16, CC50_STATE20),
- PINMUX_IPSR_DATA(IP5_19_18, DU0_DB5),
- PINMUX_IPSR_DATA(IP5_19_18, LCDOUT5),
- PINMUX_IPSR_DATA(IP5_19_18, CC50_STATE21),
- PINMUX_IPSR_DATA(IP5_21_20, DU0_DB6),
- PINMUX_IPSR_DATA(IP5_21_20, LCDOUT6),
- PINMUX_IPSR_DATA(IP5_21_20, CC50_STATE22),
- PINMUX_IPSR_DATA(IP5_23_22, DU0_DB7),
- PINMUX_IPSR_DATA(IP5_23_22, LCDOUT7),
- PINMUX_IPSR_DATA(IP5_23_22, CC50_STATE23),
- PINMUX_IPSR_DATA(IP5_25_24, DU0_DOTCLKIN),
- PINMUX_IPSR_DATA(IP5_25_24, QSTVA_QVS),
- PINMUX_IPSR_DATA(IP5_25_24, CC50_STATE24),
- PINMUX_IPSR_DATA(IP5_27_26, DU0_DOTCLKOUT0),
- PINMUX_IPSR_DATA(IP5_27_26, QCLK),
- PINMUX_IPSR_DATA(IP5_27_26, CC50_STATE25),
- PINMUX_IPSR_DATA(IP5_29_28, DU0_DOTCLKOUT1),
- PINMUX_IPSR_DATA(IP5_29_28, QSTVB_QVE),
- PINMUX_IPSR_DATA(IP5_29_28, CC50_STATE26),
- PINMUX_IPSR_DATA(IP5_31_30, DU0_EXHSYNC_DU0_HSYNC),
- PINMUX_IPSR_DATA(IP5_31_30, QSTH_QHS),
- PINMUX_IPSR_DATA(IP5_31_30, CC50_STATE27),
+ PINMUX_IPSR_GPSR(IP5_11_9, CC50_STATE17),
+ PINMUX_IPSR_GPSR(IP5_13_12, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP5_13_12, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP5_13_12, CC50_STATE18),
+ PINMUX_IPSR_GPSR(IP5_15_14, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP5_15_14, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP5_15_14, CC50_STATE19),
+ PINMUX_IPSR_GPSR(IP5_17_16, DU0_DB4),
+ PINMUX_IPSR_GPSR(IP5_17_16, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP5_17_16, CC50_STATE20),
+ PINMUX_IPSR_GPSR(IP5_19_18, DU0_DB5),
+ PINMUX_IPSR_GPSR(IP5_19_18, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP5_19_18, CC50_STATE21),
+ PINMUX_IPSR_GPSR(IP5_21_20, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP5_21_20, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP5_21_20, CC50_STATE22),
+ PINMUX_IPSR_GPSR(IP5_23_22, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP5_23_22, LCDOUT7),
+ PINMUX_IPSR_GPSR(IP5_23_22, CC50_STATE23),
+ PINMUX_IPSR_GPSR(IP5_25_24, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP5_25_24, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP5_25_24, CC50_STATE24),
+ PINMUX_IPSR_GPSR(IP5_27_26, DU0_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP5_27_26, QCLK),
+ PINMUX_IPSR_GPSR(IP5_27_26, CC50_STATE25),
+ PINMUX_IPSR_GPSR(IP5_29_28, DU0_DOTCLKOUT1),
+ PINMUX_IPSR_GPSR(IP5_29_28, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP5_29_28, CC50_STATE26),
+ PINMUX_IPSR_GPSR(IP5_31_30, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_GPSR(IP5_31_30, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP5_31_30, CC50_STATE27),
/* IPSR6 */
- PINMUX_IPSR_DATA(IP6_1_0, DU0_EXVSYNC_DU0_VSYNC),
- PINMUX_IPSR_DATA(IP6_1_0, QSTB_QHE),
- PINMUX_IPSR_DATA(IP6_1_0, CC50_STATE28),
- PINMUX_IPSR_DATA(IP6_3_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP6_3_2, QCPV_QDE),
- PINMUX_IPSR_DATA(IP6_3_2, CC50_STATE29),
- PINMUX_IPSR_DATA(IP6_5_4, DU0_DISP),
- PINMUX_IPSR_DATA(IP6_5_4, QPOLA),
- PINMUX_IPSR_DATA(IP6_5_4, CC50_STATE30),
- PINMUX_IPSR_DATA(IP6_7_6, DU0_CDE),
- PINMUX_IPSR_DATA(IP6_7_6, QPOLB),
- PINMUX_IPSR_DATA(IP6_7_6, CC50_STATE31),
- PINMUX_IPSR_DATA(IP6_8, VI0_CLK),
- PINMUX_IPSR_DATA(IP6_8, AVB_RX_CLK),
- PINMUX_IPSR_DATA(IP6_9, VI0_DATA0_VI0_B0),
- PINMUX_IPSR_DATA(IP6_9, AVB_RX_DV),
- PINMUX_IPSR_DATA(IP6_10, VI0_DATA1_VI0_B1),
- PINMUX_IPSR_DATA(IP6_10, AVB_RXD0),
- PINMUX_IPSR_DATA(IP6_11, VI0_DATA2_VI0_B2),
- PINMUX_IPSR_DATA(IP6_11, AVB_RXD1),
- PINMUX_IPSR_DATA(IP6_12, VI0_DATA3_VI0_B3),
- PINMUX_IPSR_DATA(IP6_12, AVB_RXD2),
- PINMUX_IPSR_DATA(IP6_13, VI0_DATA4_VI0_B4),
- PINMUX_IPSR_DATA(IP6_13, AVB_RXD3),
- PINMUX_IPSR_DATA(IP6_14, VI0_DATA5_VI0_B5),
- PINMUX_IPSR_DATA(IP6_14, AVB_RXD4),
- PINMUX_IPSR_DATA(IP6_15, VI0_DATA6_VI0_B6),
- PINMUX_IPSR_DATA(IP6_15, AVB_RXD5),
- PINMUX_IPSR_DATA(IP6_16, VI0_DATA7_VI0_B7),
- PINMUX_IPSR_DATA(IP6_16, AVB_RXD6),
- PINMUX_IPSR_DATA(IP6_19_17, VI0_CLKENB),
+ PINMUX_IPSR_GPSR(IP6_1_0, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_GPSR(IP6_1_0, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP6_1_0, CC50_STATE28),
+ PINMUX_IPSR_GPSR(IP6_3_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP6_3_2, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP6_3_2, CC50_STATE29),
+ PINMUX_IPSR_GPSR(IP6_5_4, DU0_DISP),
+ PINMUX_IPSR_GPSR(IP6_5_4, QPOLA),
+ PINMUX_IPSR_GPSR(IP6_5_4, CC50_STATE30),
+ PINMUX_IPSR_GPSR(IP6_7_6, DU0_CDE),
+ PINMUX_IPSR_GPSR(IP6_7_6, QPOLB),
+ PINMUX_IPSR_GPSR(IP6_7_6, CC50_STATE31),
+ PINMUX_IPSR_GPSR(IP6_8, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP6_8, AVB_RX_CLK),
+ PINMUX_IPSR_GPSR(IP6_9, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_GPSR(IP6_9, AVB_RX_DV),
+ PINMUX_IPSR_GPSR(IP6_10, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_GPSR(IP6_10, AVB_RXD0),
+ PINMUX_IPSR_GPSR(IP6_11, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_GPSR(IP6_11, AVB_RXD1),
+ PINMUX_IPSR_GPSR(IP6_12, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_GPSR(IP6_12, AVB_RXD2),
+ PINMUX_IPSR_GPSR(IP6_13, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_GPSR(IP6_13, AVB_RXD3),
+ PINMUX_IPSR_GPSR(IP6_14, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_GPSR(IP6_14, AVB_RXD4),
+ PINMUX_IPSR_GPSR(IP6_15, VI0_DATA6_VI0_B6),
+ PINMUX_IPSR_GPSR(IP6_15, AVB_RXD5),
+ PINMUX_IPSR_GPSR(IP6_16, VI0_DATA7_VI0_B7),
+ PINMUX_IPSR_GPSR(IP6_16, AVB_RXD6),
+ PINMUX_IPSR_GPSR(IP6_19_17, VI0_CLKENB),
PINMUX_IPSR_MSEL(IP6_19_17, I2C3_SCL, SEL_I2C03_0),
PINMUX_IPSR_MSEL(IP6_19_17, SCIFA5_RXD_C, SEL_SCIFA5_2),
PINMUX_IPSR_MSEL(IP6_19_17, IETX_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP6_19_17, AVB_RXD7),
- PINMUX_IPSR_DATA(IP6_22_20, VI0_FIELD),
+ PINMUX_IPSR_GPSR(IP6_19_17, AVB_RXD7),
+ PINMUX_IPSR_GPSR(IP6_22_20, VI0_FIELD),
PINMUX_IPSR_MSEL(IP6_22_20, I2C3_SDA, SEL_I2C03_0),
PINMUX_IPSR_MSEL(IP6_22_20, SCIFA5_TXD_C, SEL_SCIFA5_2),
PINMUX_IPSR_MSEL(IP6_22_20, IECLK_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP6_22_20, AVB_RX_ER),
- PINMUX_IPSR_DATA(IP6_25_23, VI0_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP6_22_20, AVB_RX_ER),
+ PINMUX_IPSR_GPSR(IP6_25_23, VI0_HSYNC_N),
PINMUX_IPSR_MSEL(IP6_25_23, SCIF0_RXD_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_25_23, I2C0_SCL_C, SEL_I2C00_2),
PINMUX_IPSR_MSEL(IP6_25_23, IERX_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP6_25_23, AVB_COL),
- PINMUX_IPSR_DATA(IP6_28_26, VI0_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP6_25_23, AVB_COL),
+ PINMUX_IPSR_GPSR(IP6_28_26, VI0_VSYNC_N),
PINMUX_IPSR_MSEL(IP6_28_26, SCIF0_TXD_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_28_26, I2C0_SDA_C, SEL_I2C00_2),
PINMUX_IPSR_MSEL(IP6_28_26, AUDIO_CLKOUT_B, SEL_ADG_1),
- PINMUX_IPSR_DATA(IP6_28_26, AVB_TX_EN),
+ PINMUX_IPSR_GPSR(IP6_28_26, AVB_TX_EN),
PINMUX_IPSR_MSEL(IP6_31_29, ETH_MDIO, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP6_31_29, VI0_G0),
+ PINMUX_IPSR_GPSR(IP6_31_29, VI0_G0),
PINMUX_IPSR_MSEL(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
- PINMUX_IPSR_DATA(IP6_31_29, AVB_TX_CLK),
+ PINMUX_IPSR_GPSR(IP6_31_29, AVB_TX_CLK),
PINMUX_IPSR_MSEL(IP6_31_29, ADIDATA, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP6_31_29, AD_DI, SEL_ADI_0),
/* IPSR7 */
PINMUX_IPSR_MSEL(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_2_0, VI0_G1),
+ PINMUX_IPSR_GPSR(IP7_2_0, VI0_G1),
PINMUX_IPSR_MSEL(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
- PINMUX_IPSR_DATA(IP7_2_0, AVB_TXD0),
+ PINMUX_IPSR_GPSR(IP7_2_0, AVB_TXD0),
PINMUX_IPSR_MSEL(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP7_2_0, AD_DO, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_5_3, VI0_G2),
+ PINMUX_IPSR_GPSR(IP7_5_3, VI0_G2),
PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
- PINMUX_IPSR_DATA(IP7_5_3, AVB_TXD1),
+ PINMUX_IPSR_GPSR(IP7_5_3, AVB_TXD1),
PINMUX_IPSR_MSEL(IP7_5_3, ADICLK, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP7_5_3, AD_CLK, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_8_6, ETH_RXD0, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_8_6, VI0_G3),
+ PINMUX_IPSR_GPSR(IP7_8_6, VI0_G3),
PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
- PINMUX_IPSR_DATA(IP7_8_6, AVB_TXD2),
+ PINMUX_IPSR_GPSR(IP7_8_6, AVB_TXD2),
PINMUX_IPSR_MSEL(IP7_8_6, ADICHS0, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP7_8_6, AD_NCS_N, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_11_9, ETH_RXD1, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_11_9, VI0_G4),
+ PINMUX_IPSR_GPSR(IP7_11_9, VI0_G4),
PINMUX_IPSR_MSEL(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_11_9, SCIF4_RXD_D, SEL_SCIF4_3),
- PINMUX_IPSR_DATA(IP7_11_9, AVB_TXD3),
+ PINMUX_IPSR_GPSR(IP7_11_9, AVB_TXD3),
PINMUX_IPSR_MSEL(IP7_11_9, ADICHS1, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP7_14_12, ETH_LINK, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_14_12, VI0_G5),
+ PINMUX_IPSR_GPSR(IP7_14_12, VI0_G5),
PINMUX_IPSR_MSEL(IP7_14_12, MSIOF2_SS2_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_14_12, SCIF4_TXD_D, SEL_SCIF4_3),
- PINMUX_IPSR_DATA(IP7_14_12, AVB_TXD4),
+ PINMUX_IPSR_GPSR(IP7_14_12, AVB_TXD4),
PINMUX_IPSR_MSEL(IP7_14_12, ADICHS2, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP7_17_15, ETH_REFCLK, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_17_15, VI0_G6),
+ PINMUX_IPSR_GPSR(IP7_17_15, VI0_G6),
PINMUX_IPSR_MSEL(IP7_17_15, SCIF2_SCK_C, SEL_SCIF2_2),
- PINMUX_IPSR_DATA(IP7_17_15, AVB_TXD5),
+ PINMUX_IPSR_GPSR(IP7_17_15, AVB_TXD5),
PINMUX_IPSR_MSEL(IP7_17_15, SSI_SCK5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP7_20_18, ETH_TXD1, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_20_18, VI0_G7),
+ PINMUX_IPSR_GPSR(IP7_20_18, VI0_G7),
PINMUX_IPSR_MSEL(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
- PINMUX_IPSR_DATA(IP7_20_18, AVB_TXD6),
+ PINMUX_IPSR_GPSR(IP7_20_18, AVB_TXD6),
PINMUX_IPSR_MSEL(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_23_21, VI0_R0),
+ PINMUX_IPSR_GPSR(IP7_23_21, VI0_R0),
PINMUX_IPSR_MSEL(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
- PINMUX_IPSR_DATA(IP7_23_21, AVB_TXD7),
+ PINMUX_IPSR_GPSR(IP7_23_21, AVB_TXD7),
PINMUX_IPSR_MSEL(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_26_24, VI0_R1),
+ PINMUX_IPSR_GPSR(IP7_26_24, VI0_R1),
PINMUX_IPSR_MSEL(IP7_26_24, SCIF3_SCK_B, SEL_SCIF3_1),
- PINMUX_IPSR_DATA(IP7_26_24, AVB_TX_ER),
+ PINMUX_IPSR_GPSR(IP7_26_24, AVB_TX_ER),
PINMUX_IPSR_MSEL(IP7_26_24, SSI_SCK6_B, SEL_SSI6_1),
PINMUX_IPSR_MSEL(IP7_29_27, ETH_TXD0, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP7_29_27, VI0_R2),
+ PINMUX_IPSR_GPSR(IP7_29_27, VI0_R2),
PINMUX_IPSR_MSEL(IP7_29_27, SCIF3_RXD_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP7_29_27, I2C4_SCL_E, SEL_I2C04_4),
- PINMUX_IPSR_DATA(IP7_29_27, AVB_GTX_CLK),
+ PINMUX_IPSR_GPSR(IP7_29_27, AVB_GTX_CLK),
PINMUX_IPSR_MSEL(IP7_29_27, SSI_WS6_B, SEL_SSI6_1),
- PINMUX_IPSR_DATA(IP7_31, DREQ0_N),
- PINMUX_IPSR_DATA(IP7_31, SCIFB1_RXD),
+ PINMUX_IPSR_GPSR(IP7_31, DREQ0_N),
+ PINMUX_IPSR_GPSR(IP7_31, SCIFB1_RXD),
/* IPSR8 */
PINMUX_IPSR_MSEL(IP8_2_0, ETH_MDC, SEL_ETH_0),
- PINMUX_IPSR_DATA(IP8_2_0, VI0_R3),
+ PINMUX_IPSR_GPSR(IP8_2_0, VI0_R3),
PINMUX_IPSR_MSEL(IP8_2_0, SCIF3_TXD_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP8_2_0, I2C4_SDA_E, SEL_I2C04_4),
- PINMUX_IPSR_DATA(IP8_2_0, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP8_2_0, AVB_MDC),
PINMUX_IPSR_MSEL(IP8_2_0, SSI_SDATA6_B, SEL_SSI6_1),
PINMUX_IPSR_MSEL(IP8_5_3, HSCIF0_HRX, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP8_5_3, VI0_R4),
+ PINMUX_IPSR_GPSR(IP8_5_3, VI0_R4),
PINMUX_IPSR_MSEL(IP8_5_3, I2C1_SCL_C, SEL_I2C01_2),
PINMUX_IPSR_MSEL(IP8_5_3, AUDIO_CLKA_B, SEL_ADG_1),
- PINMUX_IPSR_DATA(IP8_5_3, AVB_MDIO),
+ PINMUX_IPSR_GPSR(IP8_5_3, AVB_MDIO),
PINMUX_IPSR_MSEL(IP8_5_3, SSI_SCK78_B, SEL_SSI7_1),
PINMUX_IPSR_MSEL(IP8_8_6, HSCIF0_HTX, SEL_HSCIF0_0),
- PINMUX_IPSR_DATA(IP8_8_6, VI0_R5),
+ PINMUX_IPSR_GPSR(IP8_8_6, VI0_R5),
PINMUX_IPSR_MSEL(IP8_8_6, I2C1_SDA_C, SEL_I2C01_2),
PINMUX_IPSR_MSEL(IP8_8_6, AUDIO_CLKB_B, SEL_ADG_1),
- PINMUX_IPSR_DATA(IP8_5_3, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP8_5_3, AVB_LINK),
PINMUX_IPSR_MSEL(IP8_8_6, SSI_WS78_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP8_11_9, HSCIF0_HCTS_N),
- PINMUX_IPSR_DATA(IP8_11_9, VI0_R6),
+ PINMUX_IPSR_GPSR(IP8_11_9, HSCIF0_HCTS_N),
+ PINMUX_IPSR_GPSR(IP8_11_9, VI0_R6),
PINMUX_IPSR_MSEL(IP8_11_9, SCIF0_RXD_D, SEL_SCIF0_3),
PINMUX_IPSR_MSEL(IP8_11_9, I2C0_SCL_E, SEL_I2C00_4),
- PINMUX_IPSR_DATA(IP8_11_9, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP8_11_9, AVB_MAGIC),
PINMUX_IPSR_MSEL(IP8_11_9, SSI_SDATA7_B, SEL_SSI7_1),
- PINMUX_IPSR_DATA(IP8_14_12, HSCIF0_HRTS_N),
- PINMUX_IPSR_DATA(IP8_14_12, VI0_R7),
+ PINMUX_IPSR_GPSR(IP8_14_12, HSCIF0_HRTS_N),
+ PINMUX_IPSR_GPSR(IP8_14_12, VI0_R7),
PINMUX_IPSR_MSEL(IP8_14_12, SCIF0_TXD_D, SEL_SCIF0_3),
PINMUX_IPSR_MSEL(IP8_14_12, I2C0_SDA_E, SEL_I2C00_4),
- PINMUX_IPSR_DATA(IP8_14_12, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP8_14_12, AVB_PHY_INT),
PINMUX_IPSR_MSEL(IP8_14_12, SSI_SDATA8_B, SEL_SSI8_1),
PINMUX_IPSR_MSEL(IP8_16_15, HSCIF0_HSCK, SEL_HSCIF0_0),
PINMUX_IPSR_MSEL(IP8_16_15, SCIF_CLK_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP8_16_15, AVB_CRS),
+ PINMUX_IPSR_GPSR(IP8_16_15, AVB_CRS),
PINMUX_IPSR_MSEL(IP8_16_15, AUDIO_CLKC_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP8_19_17, I2C0_SCL, SEL_I2C00_0),
PINMUX_IPSR_MSEL(IP8_19_17, SCIF0_RXD_C, SEL_SCIF0_2),
- PINMUX_IPSR_DATA(IP8_19_17, PWM5),
+ PINMUX_IPSR_GPSR(IP8_19_17, PWM5),
PINMUX_IPSR_MSEL(IP8_19_17, TCLK1_B, SEL_TMU_1),
- PINMUX_IPSR_DATA(IP8_19_17, AVB_GTXREFCLK),
+ PINMUX_IPSR_GPSR(IP8_19_17, AVB_GTXREFCLK),
PINMUX_IPSR_MSEL(IP8_19_17, CAN1_RX_D, SEL_CAN1_3),
- PINMUX_IPSR_DATA(IP8_19_17, TPUTO0_B),
+ PINMUX_IPSR_GPSR(IP8_19_17, TPUTO0_B),
PINMUX_IPSR_MSEL(IP8_22_20, I2C0_SDA, SEL_I2C00_0),
PINMUX_IPSR_MSEL(IP8_22_20, SCIF0_TXD_C, SEL_SCIF0_2),
- PINMUX_IPSR_DATA(IP8_22_20, TPUTO0),
+ PINMUX_IPSR_GPSR(IP8_22_20, TPUTO0),
PINMUX_IPSR_MSEL(IP8_22_20, CAN_CLK, SEL_CAN_0),
- PINMUX_IPSR_DATA(IP8_22_20, DVC_MUTE),
+ PINMUX_IPSR_GPSR(IP8_22_20, DVC_MUTE),
PINMUX_IPSR_MSEL(IP8_22_20, CAN1_TX_D, SEL_CAN1_3),
PINMUX_IPSR_MSEL(IP8_25_23, I2C1_SCL, SEL_I2C01_0),
PINMUX_IPSR_MSEL(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP8_25_23, PWM5_B),
- PINMUX_IPSR_DATA(IP8_25_23, DU1_DR0),
+ PINMUX_IPSR_GPSR(IP8_25_23, PWM5_B),
+ PINMUX_IPSR_GPSR(IP8_25_23, DU1_DR0),
PINMUX_IPSR_MSEL(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
- PINMUX_IPSR_DATA(IP8_25_23, TPUTO1_B),
+ PINMUX_IPSR_GPSR(IP8_25_23, TPUTO1_B),
PINMUX_IPSR_MSEL(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
PINMUX_IPSR_MSEL(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP8_28_26, IRQ5),
- PINMUX_IPSR_DATA(IP8_28_26, DU1_DR1),
+ PINMUX_IPSR_GPSR(IP8_28_26, IRQ5),
+ PINMUX_IPSR_GPSR(IP8_28_26, DU1_DR1),
PINMUX_IPSR_MSEL(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP8_28_26, BPFCLK_C, SEL_DARC_2),
- PINMUX_IPSR_DATA(IP8_31_29, MSIOF0_RXD),
+ PINMUX_IPSR_GPSR(IP8_31_29, MSIOF0_RXD),
PINMUX_IPSR_MSEL(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
- PINMUX_IPSR_DATA(IP8_31_29, DU1_DR2),
+ PINMUX_IPSR_GPSR(IP8_31_29, DU1_DR2),
PINMUX_IPSR_MSEL(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP8_31_29, FMCLK_C, SEL_DARC_2),
PINMUX_IPSR_MSEL(IP8_31_29, RDS_CLK, SEL_RDS_0),
/* IPSR9 */
- PINMUX_IPSR_DATA(IP9_2_0, MSIOF0_TXD),
+ PINMUX_IPSR_GPSR(IP9_2_0, MSIOF0_TXD),
PINMUX_IPSR_MSEL(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
- PINMUX_IPSR_DATA(IP9_2_0, DU1_DR3),
+ PINMUX_IPSR_GPSR(IP9_2_0, DU1_DR3),
PINMUX_IPSR_MSEL(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
PINMUX_IPSR_MSEL(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP9_2_0, FMIN_C, SEL_DARC_2),
PINMUX_IPSR_MSEL(IP9_2_0, RDS_DATA, SEL_RDS_0),
- PINMUX_IPSR_DATA(IP9_5_3, MSIOF0_SCK),
- PINMUX_IPSR_DATA(IP9_5_3, IRQ0),
+ PINMUX_IPSR_GPSR(IP9_5_3, MSIOF0_SCK),
+ PINMUX_IPSR_GPSR(IP9_5_3, IRQ0),
PINMUX_IPSR_MSEL(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP9_5_3, DU1_DR4),
+ PINMUX_IPSR_GPSR(IP9_5_3, DU1_DR4),
PINMUX_IPSR_MSEL(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
- PINMUX_IPSR_DATA(IP9_5_3, TPUTO1_C),
- PINMUX_IPSR_DATA(IP9_8_6, MSIOF0_SYNC),
- PINMUX_IPSR_DATA(IP9_8_6, PWM1),
+ PINMUX_IPSR_GPSR(IP9_5_3, TPUTO1_C),
+ PINMUX_IPSR_GPSR(IP9_8_6, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP9_8_6, PWM1),
PINMUX_IPSR_MSEL(IP9_8_6, TS_SCK, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP9_8_6, DU1_DR5),
+ PINMUX_IPSR_GPSR(IP9_8_6, DU1_DR5),
PINMUX_IPSR_MSEL(IP9_8_6, RIF1_CLK, SEL_DR2_0),
PINMUX_IPSR_MSEL(IP9_8_6, BPFCLK_B, SEL_DARC_1),
- PINMUX_IPSR_DATA(IP9_11_9, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP9_11_9, MSIOF0_SS1),
PINMUX_IPSR_MSEL(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
PINMUX_IPSR_MSEL(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP9_11_9, DU1_DR6),
+ PINMUX_IPSR_GPSR(IP9_11_9, DU1_DR6),
PINMUX_IPSR_MSEL(IP9_11_9, RIF1_D0, SEL_DR2_0),
PINMUX_IPSR_MSEL(IP9_11_9, FMCLK_B, SEL_DARC_1),
PINMUX_IPSR_MSEL(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
- PINMUX_IPSR_DATA(IP9_14_12, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP9_14_12, MSIOF0_SS2),
PINMUX_IPSR_MSEL(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
PINMUX_IPSR_MSEL(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
- PINMUX_IPSR_DATA(IP9_14_12, DU1_DR7),
+ PINMUX_IPSR_GPSR(IP9_14_12, DU1_DR7),
PINMUX_IPSR_MSEL(IP9_14_12, RIF1_D1, SEL_DR3_0),
PINMUX_IPSR_MSEL(IP9_14_12, FMIN_B, SEL_DARC_1),
PINMUX_IPSR_MSEL(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
PINMUX_IPSR_MSEL(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
- PINMUX_IPSR_DATA(IP9_16_15, PWM6),
- PINMUX_IPSR_DATA(IP9_16_15, DU1_DG0),
+ PINMUX_IPSR_GPSR(IP9_16_15, PWM6),
+ PINMUX_IPSR_GPSR(IP9_16_15, DU1_DG0),
PINMUX_IPSR_MSEL(IP9_18_17, HSCIF1_HTX, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_18_17, I2C4_SDA, SEL_I2C04_0),
- PINMUX_IPSR_DATA(IP9_18_17, TPUTO1),
- PINMUX_IPSR_DATA(IP9_18_17, DU1_DG1),
- PINMUX_IPSR_DATA(IP9_21_19, HSCIF1_HSCK),
- PINMUX_IPSR_DATA(IP9_21_19, PWM2),
+ PINMUX_IPSR_GPSR(IP9_18_17, TPUTO1),
+ PINMUX_IPSR_GPSR(IP9_18_17, DU1_DG1),
+ PINMUX_IPSR_GPSR(IP9_21_19, HSCIF1_HSCK),
+ PINMUX_IPSR_GPSR(IP9_21_19, PWM2),
PINMUX_IPSR_MSEL(IP9_21_19, IETX, SEL_IEB_0),
- PINMUX_IPSR_DATA(IP9_21_19, DU1_DG2),
+ PINMUX_IPSR_GPSR(IP9_21_19, DU1_DG2),
PINMUX_IPSR_MSEL(IP9_21_19, REMOCON_B, SEL_RCN_1),
PINMUX_IPSR_MSEL(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
PINMUX_IPSR_MSEL(IP9_21_19, VSP_B, SEL_SPDM_1),
PINMUX_IPSR_MSEL(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_24_22, IECLK, SEL_IEB_0),
- PINMUX_IPSR_DATA(IP9_24_22, DU1_DG3),
+ PINMUX_IPSR_GPSR(IP9_24_22, DU1_DG3),
PINMUX_IPSR_MSEL(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
- PINMUX_IPSR_DATA(IP9_24_22, CC50_STATE32),
+ PINMUX_IPSR_GPSR(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
+ PINMUX_IPSR_GPSR(IP9_24_22, CC50_STATE32),
PINMUX_IPSR_MSEL(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_27_25, IERX, SEL_IEB_0),
- PINMUX_IPSR_DATA(IP9_27_25, DU1_DG4),
+ PINMUX_IPSR_GPSR(IP9_27_25, DU1_DG4),
PINMUX_IPSR_MSEL(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP9_27_25, CAN_STEP0),
- PINMUX_IPSR_DATA(IP9_27_25, CC50_STATE33),
+ PINMUX_IPSR_GPSR(IP9_27_25, CAN_STEP0),
+ PINMUX_IPSR_GPSR(IP9_27_25, CC50_STATE33),
PINMUX_IPSR_MSEL(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP9_30_28, PWM3),
+ PINMUX_IPSR_GPSR(IP9_30_28, PWM3),
PINMUX_IPSR_MSEL(IP9_30_28, TCLK2, SEL_TMU_0),
- PINMUX_IPSR_DATA(IP9_30_28, DU1_DG5),
+ PINMUX_IPSR_GPSR(IP9_30_28, DU1_DG5),
PINMUX_IPSR_MSEL(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP9_30_28, CAN_TXCLK),
- PINMUX_IPSR_DATA(IP9_30_28, CC50_STATE34),
+ PINMUX_IPSR_GPSR(IP9_30_28, CAN_TXCLK),
+ PINMUX_IPSR_GPSR(IP9_30_28, CC50_STATE34),
/* IPSR10 */
PINMUX_IPSR_MSEL(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
- PINMUX_IPSR_DATA(IP10_2_0, DU1_DG6),
+ PINMUX_IPSR_GPSR(IP10_2_0, DU1_DG6),
PINMUX_IPSR_MSEL(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
- PINMUX_IPSR_DATA(IP10_2_0, CAN_DEBUGOUT0),
- PINMUX_IPSR_DATA(IP10_2_0, CC50_STATE35),
+ PINMUX_IPSR_GPSR(IP10_2_0, CAN_DEBUGOUT0),
+ PINMUX_IPSR_GPSR(IP10_2_0, CC50_STATE35),
PINMUX_IPSR_MSEL(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
- PINMUX_IPSR_DATA(IP10_5_3, DU1_DG7),
+ PINMUX_IPSR_GPSR(IP10_5_3, DU1_DG7),
PINMUX_IPSR_MSEL(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
- PINMUX_IPSR_DATA(IP10_5_3, CAN_DEBUGOUT1),
- PINMUX_IPSR_DATA(IP10_5_3, CC50_STATE36),
+ PINMUX_IPSR_GPSR(IP10_5_3, CAN_DEBUGOUT1),
+ PINMUX_IPSR_GPSR(IP10_5_3, CC50_STATE36),
PINMUX_IPSR_MSEL(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
- PINMUX_IPSR_DATA(IP10_8_6, DU1_DB0),
+ PINMUX_IPSR_GPSR(IP10_8_6, DU1_DB0),
PINMUX_IPSR_MSEL(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
- PINMUX_IPSR_DATA(IP10_8_6, USB0_EXTLP),
- PINMUX_IPSR_DATA(IP10_8_6, CAN_DEBUGOUT2),
- PINMUX_IPSR_DATA(IP10_8_6, CC50_STATE37),
+ PINMUX_IPSR_GPSR(IP10_8_6, USB0_EXTLP),
+ PINMUX_IPSR_GPSR(IP10_8_6, CAN_DEBUGOUT2),
+ PINMUX_IPSR_GPSR(IP10_8_6, CC50_STATE37),
PINMUX_IPSR_MSEL(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
- PINMUX_IPSR_DATA(IP10_11_9, DU1_DB1),
+ PINMUX_IPSR_GPSR(IP10_11_9, DU1_DB1),
PINMUX_IPSR_MSEL(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP10_11_9, USB0_OVC1),
- PINMUX_IPSR_DATA(IP10_11_9, CAN_DEBUGOUT3),
- PINMUX_IPSR_DATA(IP10_11_9, CC50_STATE38),
+ PINMUX_IPSR_GPSR(IP10_11_9, USB0_OVC1),
+ PINMUX_IPSR_GPSR(IP10_11_9, CAN_DEBUGOUT3),
+ PINMUX_IPSR_GPSR(IP10_11_9, CC50_STATE38),
PINMUX_IPSR_MSEL(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP10_14_12, IRQ1),
- PINMUX_IPSR_DATA(IP10_14_12, DU1_DB2),
+ PINMUX_IPSR_GPSR(IP10_14_12, IRQ1),
+ PINMUX_IPSR_GPSR(IP10_14_12, DU1_DB2),
PINMUX_IPSR_MSEL(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP10_14_12, USB0_IDIN),
- PINMUX_IPSR_DATA(IP10_14_12, CAN_DEBUGOUT4),
- PINMUX_IPSR_DATA(IP10_14_12, CC50_STATE39),
+ PINMUX_IPSR_GPSR(IP10_14_12, USB0_IDIN),
+ PINMUX_IPSR_GPSR(IP10_14_12, CAN_DEBUGOUT4),
+ PINMUX_IPSR_GPSR(IP10_14_12, CC50_STATE39),
PINMUX_IPSR_MSEL(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
- PINMUX_IPSR_DATA(IP10_17_15, IRQ2),
+ PINMUX_IPSR_GPSR(IP10_17_15, IRQ2),
PINMUX_IPSR_MSEL(IP10_17_15, BPFCLK_D, SEL_DARC_3),
- PINMUX_IPSR_DATA(IP10_17_15, DU1_DB3),
+ PINMUX_IPSR_GPSR(IP10_17_15, DU1_DB3),
PINMUX_IPSR_MSEL(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
- PINMUX_IPSR_DATA(IP10_17_15, TANS2),
- PINMUX_IPSR_DATA(IP10_17_15, CAN_DEBUGOUT5),
- PINMUX_IPSR_DATA(IP10_17_15, CC50_OSCOUT),
+ PINMUX_IPSR_GPSR(IP10_17_15, TANS2),
+ PINMUX_IPSR_GPSR(IP10_17_15, CAN_DEBUGOUT5),
+ PINMUX_IPSR_GPSR(IP10_17_15, CC50_OSCOUT),
PINMUX_IPSR_MSEL(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
PINMUX_IPSR_MSEL(IP10_20_18, FMCLK_D, SEL_DARC_3),
- PINMUX_IPSR_DATA(IP10_20_18, DU1_DB4),
+ PINMUX_IPSR_GPSR(IP10_20_18, DU1_DB4),
PINMUX_IPSR_MSEL(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
- PINMUX_IPSR_DATA(IP10_20_18, CAN_DEBUGOUT6),
+ PINMUX_IPSR_GPSR(IP10_20_18, CAN_DEBUGOUT6),
PINMUX_IPSR_MSEL(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
PINMUX_IPSR_MSEL(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
PINMUX_IPSR_MSEL(IP10_23_21, FMIN_D, SEL_DARC_3),
- PINMUX_IPSR_DATA(IP10_23_21, DU1_DB5),
+ PINMUX_IPSR_GPSR(IP10_23_21, DU1_DB5),
PINMUX_IPSR_MSEL(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
- PINMUX_IPSR_DATA(IP10_23_21, CAN_DEBUGOUT7),
+ PINMUX_IPSR_GPSR(IP10_23_21, CAN_DEBUGOUT7),
PINMUX_IPSR_MSEL(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
PINMUX_IPSR_MSEL(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
PINMUX_IPSR_MSEL(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
- PINMUX_IPSR_DATA(IP10_26_24, DU1_DB6),
+ PINMUX_IPSR_GPSR(IP10_26_24, DU1_DB6),
PINMUX_IPSR_MSEL(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
- PINMUX_IPSR_DATA(IP10_26_24, CAN_DEBUGOUT8),
+ PINMUX_IPSR_GPSR(IP10_26_24, CAN_DEBUGOUT8),
PINMUX_IPSR_MSEL(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
PINMUX_IPSR_MSEL(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
- PINMUX_IPSR_DATA(IP10_29_27, DU1_DB7),
+ PINMUX_IPSR_GPSR(IP10_29_27, DU1_DB7),
PINMUX_IPSR_MSEL(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
- PINMUX_IPSR_DATA(IP10_29_27, CAN_DEBUGOUT9),
+ PINMUX_IPSR_GPSR(IP10_29_27, CAN_DEBUGOUT9),
PINMUX_IPSR_MSEL(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
- PINMUX_IPSR_DATA(IP10_31_30, DU1_DOTCLKIN),
- PINMUX_IPSR_DATA(IP10_31_30, CAN_DEBUGOUT10),
+ PINMUX_IPSR_GPSR(IP10_31_30, DU1_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP10_31_30, CAN_DEBUGOUT10),
/* IPSR11 */
PINMUX_IPSR_MSEL(IP11_2_0, SSI_WS5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
- PINMUX_IPSR_DATA(IP11_2_0, DU1_DOTCLKOUT0),
- PINMUX_IPSR_DATA(IP11_2_0, CAN_DEBUGOUT11),
+ PINMUX_IPSR_GPSR(IP11_2_0, DU1_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP11_2_0, CAN_DEBUGOUT11),
PINMUX_IPSR_MSEL(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
- PINMUX_IPSR_DATA(IP11_5_3, DU1_DOTCLKOUT1),
- PINMUX_IPSR_DATA(IP11_5_3, CAN_DEBUGOUT12),
+ PINMUX_IPSR_GPSR(IP11_5_3, DU1_DOTCLKOUT1),
+ PINMUX_IPSR_GPSR(IP11_5_3, CAN_DEBUGOUT12),
PINMUX_IPSR_MSEL(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
- PINMUX_IPSR_DATA(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
- PINMUX_IPSR_DATA(IP11_7_6, CAN_DEBUGOUT13),
+ PINMUX_IPSR_GPSR(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
+ PINMUX_IPSR_GPSR(IP11_7_6, CAN_DEBUGOUT13),
PINMUX_IPSR_MSEL(IP11_10_8, SSI_WS6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
- PINMUX_IPSR_DATA(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
- PINMUX_IPSR_DATA(IP11_10_8, CAN_DEBUGOUT14),
+ PINMUX_IPSR_GPSR(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
+ PINMUX_IPSR_GPSR(IP11_10_8, CAN_DEBUGOUT14),
PINMUX_IPSR_MSEL(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
- PINMUX_IPSR_DATA(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
- PINMUX_IPSR_DATA(IP11_13_11, CAN_DEBUGOUT15),
+ PINMUX_IPSR_GPSR(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP11_13_11, CAN_DEBUGOUT15),
PINMUX_IPSR_MSEL(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
- PINMUX_IPSR_DATA(IP11_15_14, DU1_DISP),
+ PINMUX_IPSR_GPSR(IP11_15_14, DU1_DISP),
PINMUX_IPSR_MSEL(IP11_17_16, SSI_WS78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
PINMUX_IPSR_MSEL(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
- PINMUX_IPSR_DATA(IP11_17_16, DU1_CDE),
+ PINMUX_IPSR_GPSR(IP11_17_16, DU1_CDE),
PINMUX_IPSR_MSEL(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
- PINMUX_IPSR_DATA(IP11_20_18, IRQ8),
+ PINMUX_IPSR_GPSR(IP11_20_18, IRQ8),
PINMUX_IPSR_MSEL(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
PINMUX_IPSR_MSEL(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
- PINMUX_IPSR_DATA(IP11_20_18, PCMOE_N),
- PINMUX_IPSR_DATA(IP11_23_21, SSI_SCK0129),
+ PINMUX_IPSR_GPSR(IP11_20_18, PCMOE_N),
+ PINMUX_IPSR_GPSR(IP11_23_21, SSI_SCK0129),
PINMUX_IPSR_MSEL(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_23_21, ADIDATA_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP11_23_21, AD_DI_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP11_23_21, PCMWE_N),
- PINMUX_IPSR_DATA(IP11_26_24, SSI_WS0129),
+ PINMUX_IPSR_GPSR(IP11_23_21, PCMWE_N),
+ PINMUX_IPSR_GPSR(IP11_26_24, SSI_WS0129),
PINMUX_IPSR_MSEL(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP11_26_24, AD_DO_B, SEL_ADI_1),
- PINMUX_IPSR_DATA(IP11_29_27, SSI_SDATA0),
+ PINMUX_IPSR_GPSR(IP11_29_27, SSI_SDATA0),
PINMUX_IPSR_MSEL(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
- PINMUX_IPSR_DATA(IP11_29_27, PWM0_B),
+ PINMUX_IPSR_GPSR(IP11_29_27, PWM0_B),
PINMUX_IPSR_MSEL(IP11_29_27, ADICLK_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP11_29_27, AD_CLK_B, SEL_ADI_1),
/* IPSR12 */
- PINMUX_IPSR_DATA(IP12_2_0, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP12_2_0, SSI_SCK34),
PINMUX_IPSR_MSEL(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP12_2_0, ADICHS0_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
PINMUX_IPSR_MSEL(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
- PINMUX_IPSR_DATA(IP12_5_3, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP12_5_3, SSI_WS34),
PINMUX_IPSR_MSEL(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP12_5_3, SCIFA1_RXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP12_5_3, ADICHS1_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP12_5_3, CAN1_RX_C, SEL_CAN1_2),
PINMUX_IPSR_MSEL(IP12_5_3, DACK1_B, SEL_LBS_1),
- PINMUX_IPSR_DATA(IP12_8_6, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP12_8_6, SSI_SDATA3),
PINMUX_IPSR_MSEL(IP12_8_6, MSIOF1_SS2_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP12_8_6, SCIFA1_TXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP12_8_6, ADICHS2_B, SEL_RAD_1),
PINMUX_IPSR_MSEL(IP12_8_6, CAN1_TX_C, SEL_CAN1_2),
- PINMUX_IPSR_DATA(IP12_8_6, DREQ2_N),
+ PINMUX_IPSR_GPSR(IP12_8_6, DREQ2_N),
PINMUX_IPSR_MSEL(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
- PINMUX_IPSR_DATA(IP12_10_9, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP12_10_9, MLB_CLK),
PINMUX_IPSR_MSEL(IP12_10_9, IETX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP12_10_9, IRD_TX),
+ PINMUX_IPSR_GPSR(IP12_10_9, IRD_TX),
PINMUX_IPSR_MSEL(IP12_12_11, SSI_WS4, SEL_SSI4_0),
- PINMUX_IPSR_DATA(IP12_12_11, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP12_12_11, MLB_SIG),
PINMUX_IPSR_MSEL(IP12_12_11, IECLK_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP12_12_11, IRD_RX),
+ PINMUX_IPSR_GPSR(IP12_12_11, IRD_RX),
PINMUX_IPSR_MSEL(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
- PINMUX_IPSR_DATA(IP12_14_13, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP12_14_13, MLB_DAT),
PINMUX_IPSR_MSEL(IP12_14_13, IERX_B, SEL_IEB_1),
- PINMUX_IPSR_DATA(IP12_14_13, IRD_SCK),
+ PINMUX_IPSR_GPSR(IP12_14_13, IRD_SCK),
PINMUX_IPSR_MSEL(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
PINMUX_IPSR_MSEL(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP12_17_15, PWM1_B),
- PINMUX_IPSR_DATA(IP12_17_15, IRQ9),
+ PINMUX_IPSR_GPSR(IP12_17_15, PWM1_B),
+ PINMUX_IPSR_GPSR(IP12_17_15, IRQ9),
PINMUX_IPSR_MSEL(IP12_17_15, REMOCON, SEL_RCN_0),
- PINMUX_IPSR_DATA(IP12_17_15, DACK2),
+ PINMUX_IPSR_GPSR(IP12_17_15, DACK2),
PINMUX_IPSR_MSEL(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
- PINMUX_IPSR_DATA(IP12_20_18, VI1_CLK),
+ PINMUX_IPSR_GPSR(IP12_20_18, VI1_CLK),
PINMUX_IPSR_MSEL(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
PINMUX_IPSR_MSEL(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
PINMUX_IPSR_MSEL(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_23_21, SSI_WS1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
- PINMUX_IPSR_DATA(IP12_23_21, VI1_DATA0),
+ PINMUX_IPSR_GPSR(IP12_23_21, VI1_DATA0),
PINMUX_IPSR_MSEL(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
PINMUX_IPSR_MSEL(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
PINMUX_IPSR_MSEL(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP12_26_24, VI1_DATA1),
+ PINMUX_IPSR_GPSR(IP12_26_24, VI1_DATA1),
PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
- PINMUX_IPSR_DATA(IP12_26_24, ATAG0_N),
+ PINMUX_IPSR_GPSR(IP12_26_24, ATAG0_N),
PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
- PINMUX_IPSR_DATA(IP12_29_27, VI1_DATA2),
+ PINMUX_IPSR_GPSR(IP12_29_27, VI1_DATA2),
PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
- PINMUX_IPSR_DATA(IP12_29_27, ATAWR0_N),
+ PINMUX_IPSR_GPSR(IP12_29_27, ATAWR0_N),
PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
/* IPSR13 */
PINMUX_IPSR_MSEL(IP13_2_0, SSI_WS2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
- PINMUX_IPSR_DATA(IP13_2_0, VI1_DATA3),
+ PINMUX_IPSR_GPSR(IP13_2_0, VI1_DATA3),
PINMUX_IPSR_MSEL(IP13_2_0, SCKZ, SEL_FSN_0),
- PINMUX_IPSR_DATA(IP13_2_0, ATACS00_N),
+ PINMUX_IPSR_GPSR(IP13_2_0, ATACS00_N),
PINMUX_IPSR_MSEL(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
- PINMUX_IPSR_DATA(IP13_5_3, VI1_DATA4),
+ PINMUX_IPSR_GPSR(IP13_5_3, VI1_DATA4),
PINMUX_IPSR_MSEL(IP13_5_3, STM_N, SEL_FSN_0),
- PINMUX_IPSR_DATA(IP13_5_3, ATACS10_N),
+ PINMUX_IPSR_GPSR(IP13_5_3, ATACS10_N),
PINMUX_IPSR_MSEL(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
PINMUX_IPSR_MSEL(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
- PINMUX_IPSR_DATA(IP13_8_6, PWM2_B),
- PINMUX_IPSR_DATA(IP13_8_6, VI1_DATA5),
+ PINMUX_IPSR_GPSR(IP13_8_6, PWM2_B),
+ PINMUX_IPSR_GPSR(IP13_8_6, VI1_DATA5),
PINMUX_IPSR_MSEL(IP13_8_6, MTS_N, SEL_FSN_0),
- PINMUX_IPSR_DATA(IP13_8_6, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP13_8_6, EX_WAIT1),
PINMUX_IPSR_MSEL(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_11_9, SSI_WS9, SEL_SSI9_0),
PINMUX_IPSR_MSEL(IP13_11_9, SCIF2_RXD_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP13_11_9, I2C3_SCL_E, SEL_I2C03_4),
- PINMUX_IPSR_DATA(IP13_11_9, VI1_DATA6),
- PINMUX_IPSR_DATA(IP13_11_9, ATARD0_N),
+ PINMUX_IPSR_GPSR(IP13_11_9, VI1_DATA6),
+ PINMUX_IPSR_GPSR(IP13_11_9, ATARD0_N),
PINMUX_IPSR_MSEL(IP13_11_9, ETH_TX_EN_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_14_12, SSI_SDATA9, SEL_SSI9_0),
PINMUX_IPSR_MSEL(IP13_14_12, SCIF2_TXD_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP13_14_12, I2C3_SDA_E, SEL_I2C03_4),
- PINMUX_IPSR_DATA(IP13_14_12, VI1_DATA7),
- PINMUX_IPSR_DATA(IP13_14_12, ATADIR0_N),
+ PINMUX_IPSR_GPSR(IP13_14_12, VI1_DATA7),
+ PINMUX_IPSR_GPSR(IP13_14_12, ATADIR0_N),
PINMUX_IPSR_MSEL(IP13_14_12, ETH_MAGIC_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_17_15, AUDIO_CLKA, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_17_15, I2C0_SCL_B, SEL_I2C00_1),
PINMUX_IPSR_MSEL(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
- PINMUX_IPSR_DATA(IP13_17_15, VI1_CLKENB),
+ PINMUX_IPSR_GPSR(IP13_17_15, VI1_CLKENB),
PINMUX_IPSR_MSEL(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
PINMUX_IPSR_MSEL(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
- PINMUX_IPSR_DATA(IP13_20_18, VI1_FIELD),
+ PINMUX_IPSR_GPSR(IP13_20_18, VI1_FIELD),
PINMUX_IPSR_MSEL(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_20_18, BPFCLK_E, SEL_DARC_4),
@@ -1472,7 +1472,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_23_21, I2C4_SCL_B, SEL_I2C04_1),
PINMUX_IPSR_MSEL(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
- PINMUX_IPSR_DATA(IP13_23_21, VI1_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP13_23_21, VI1_HSYNC_N),
PINMUX_IPSR_MSEL(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_23_21, FMCLK_E, SEL_DARC_4),
@@ -1480,7 +1480,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
PINMUX_IPSR_MSEL(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
- PINMUX_IPSR_DATA(IP13_26_24, VI1_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP13_26_24, VI1_VSYNC_N),
PINMUX_IPSR_MSEL(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
PINMUX_IPSR_MSEL(IP13_26_24, FMIN_E, SEL_DARC_4),
@@ -1491,6 +1491,197 @@ static const struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - Audio Clock ------------------------------------------------------------ */
+static const unsigned int audio_clka_pins[] = {
+ /* CLKA */
+ RCAR_GP_PIN(5, 20),
+};
+static const unsigned int audio_clka_mux[] = {
+ AUDIO_CLKA_MARK,
+};
+static const unsigned int audio_clka_b_pins[] = {
+ /* CLKA */
+ RCAR_GP_PIN(3, 25),
+};
+static const unsigned int audio_clka_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clka_c_pins[] = {
+ /* CLKA */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int audio_clka_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clka_d_pins[] = {
+ /* CLKA */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clka_d_mux[] = {
+ AUDIO_CLKA_D_MARK,
+};
+static const unsigned int audio_clkb_pins[] = {
+ /* CLKB */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkb_mux[] = {
+ AUDIO_CLKB_MARK,
+};
+static const unsigned int audio_clkb_b_pins[] = {
+ /* CLKB */
+ RCAR_GP_PIN(3, 26),
+};
+static const unsigned int audio_clkb_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clkb_c_pins[] = {
+ /* CLKB */
+ RCAR_GP_PIN(4, 21),
+};
+static const unsigned int audio_clkb_c_mux[] = {
+ AUDIO_CLKB_C_MARK,
+};
+static const unsigned int audio_clkc_pins[] = {
+ /* CLKC */
+ RCAR_GP_PIN(5, 22),
+};
+static const unsigned int audio_clkc_mux[] = {
+ AUDIO_CLKC_MARK,
+};
+static const unsigned int audio_clkc_b_pins[] = {
+ /* CLKC */
+ RCAR_GP_PIN(3, 29),
+};
+static const unsigned int audio_clkc_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkc_c_pins[] = {
+ /* CLKC */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int audio_clkc_c_mux[] = {
+ AUDIO_CLKC_C_MARK,
+};
+static const unsigned int audio_clkout_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 23),
+};
+static const unsigned int audio_clkout_mux[] = {
+ AUDIO_CLKOUT_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(3, 12),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(4, 23),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+/* - AVB -------------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ RCAR_GP_PIN(3, 26),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ RCAR_GP_PIN(3, 27),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ RCAR_GP_PIN(3, 28),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdio_pins[] = {
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int avb_mdio_mux[] = {
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+ RCAR_GP_PIN(3, 17),
+
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+ RCAR_GP_PIN(3, 5),
+
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+ RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 22),
+ RCAR_GP_PIN(3, 13), RCAR_GP_PIN(3, 11),
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_TX_EN_MARK, AVB_TX_ER_MARK,
+ AVB_TX_CLK_MARK, AVB_COL_MARK,
+};
+static const unsigned int avb_gmii_pins[] = {
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16),
+ RCAR_GP_PIN(3, 17), RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
+
+ RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4),
+ RCAR_GP_PIN(3, 5), RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9),
+
+ RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1),
+ RCAR_GP_PIN(3, 29), RCAR_GP_PIN(3, 23), RCAR_GP_PIN(3, 30),
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 13),
+ RCAR_GP_PIN(3, 11),
+};
+static const unsigned int avb_gmii_mux[] = {
+ AVB_TXD0_MARK, AVB_TXD1_MARK, AVB_TXD2_MARK,
+ AVB_TXD3_MARK, AVB_TXD4_MARK, AVB_TXD5_MARK,
+ AVB_TXD6_MARK, AVB_TXD7_MARK,
+
+ AVB_RXD0_MARK, AVB_RXD1_MARK, AVB_RXD2_MARK,
+ AVB_RXD3_MARK, AVB_RXD4_MARK, AVB_RXD5_MARK,
+ AVB_RXD6_MARK, AVB_RXD7_MARK,
+
+ AVB_RX_ER_MARK, AVB_RX_CLK_MARK, AVB_RX_DV_MARK,
+ AVB_CRS_MARK, AVB_GTX_CLK_MARK, AVB_GTXREFCLK_MARK,
+ AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
+ AVB_COL_MARK,
+};
+static const unsigned int avb_avtp_capture_pins[] = {
+ RCAR_GP_PIN(5, 11),
+};
+static const unsigned int avb_avtp_capture_mux[] = {
+ AVB_AVTP_CAPTURE_MARK,
+};
+static const unsigned int avb_avtp_match_pins[] = {
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int avb_avtp_match_mux[] = {
+ AVB_AVTP_MATCH_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+ AVB_AVTP_CAPTURE_B_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+ AVB_AVTP_MATCH_B_MARK,
+};
/* - ETH -------------------------------------------------------------------- */
static const unsigned int eth_link_pins[] = {
/* LINK */
@@ -2751,6 +2942,245 @@ static const unsigned int sdhi2_wp_pins[] = {
static const unsigned int sdhi2_wp_mux[] = {
SD2_WP_MARK,
};
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA0 */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi0129_ctrl_pins[] = {
+ /* SCK0129, WS0129 */
+ RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int ssi0129_ctrl_mux[] = {
+ SSI_SCK0129_MARK, SSI_WS0129_MARK,
+};
+static const unsigned int ssi1_data_pins[] = {
+ /* SDATA1 */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi1_data_mux[] = {
+ SSI_SDATA1_MARK,
+};
+static const unsigned int ssi1_ctrl_pins[] = {
+ /* SCK1, WS1 */
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_ctrl_mux[] = {
+ SSI_SCK1_MARK, SSI_WS1_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA1 */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK1, WS1 */
+ RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 12),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_pins[] = {
+ /* SDATA2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi2_data_mux[] = {
+ SSI_SDATA2_MARK,
+};
+static const unsigned int ssi2_ctrl_pins[] = {
+ /* SCK2, WS2 */
+ RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 15),
+};
+static const unsigned int ssi2_ctrl_mux[] = {
+ SSI_SCK2_MARK, SSI_WS2_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA2 */
+ RCAR_GP_PIN(4, 16),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK2, WS2 */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 15),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA3 */
+ RCAR_GP_PIN(5, 6),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK
+};
+static const unsigned int ssi34_ctrl_pins[] = {
+ /* SCK34, WS34 */
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int ssi34_ctrl_mux[] = {
+ SSI_SCK34_MARK, SSI_WS34_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA4 */
+ RCAR_GP_PIN(5, 9),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK4, WS4 */
+ RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi4_data_b_pins[] = {
+ /* SDATA4 */
+ RCAR_GP_PIN(4, 22),
+};
+static const unsigned int ssi4_data_b_mux[] = {
+ SSI_SDATA4_B_MARK,
+};
+static const unsigned int ssi4_ctrl_b_pins[] = {
+ /* SCK4, WS4 */
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 21),
+};
+static const unsigned int ssi4_ctrl_b_mux[] = {
+ SSI_SCK4_B_MARK, SSI_WS4_B_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA5 */
+ RCAR_GP_PIN(4, 26),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK5, WS5 */
+ RCAR_GP_PIN(4, 24), RCAR_GP_PIN(4, 25),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi5_data_b_pins[] = {
+ /* SDATA5 */
+ RCAR_GP_PIN(3, 21),
+};
+static const unsigned int ssi5_data_b_mux[] = {
+ SSI_SDATA5_B_MARK,
+};
+static const unsigned int ssi5_ctrl_b_pins[] = {
+ /* SCK5, WS5 */
+ RCAR_GP_PIN(3, 19), RCAR_GP_PIN(3, 20),
+};
+static const unsigned int ssi5_ctrl_b_mux[] = {
+ SSI_SCK5_B_MARK, SSI_WS5_B_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA6 */
+ RCAR_GP_PIN(4, 29),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK6, WS6 */
+ RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi6_data_b_pins[] = {
+ /* SDATA6 */
+ RCAR_GP_PIN(3, 24),
+};
+static const unsigned int ssi6_data_b_mux[] = {
+ SSI_SDATA6_B_MARK,
+};
+static const unsigned int ssi6_ctrl_b_pins[] = {
+ /* SCK6, WS6 */
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
+};
+static const unsigned int ssi6_ctrl_b_mux[] = {
+ SSI_SCK6_B_MARK, SSI_WS6_B_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA7 */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK78, WS78 */
+ RCAR_GP_PIN(4, 30), RCAR_GP_PIN(4, 31),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi7_data_b_pins[] = {
+ /* SDATA7 */
+ RCAR_GP_PIN(3, 27),
+};
+static const unsigned int ssi7_data_b_mux[] = {
+ SSI_SDATA7_B_MARK,
+};
+static const unsigned int ssi78_ctrl_b_pins[] = {
+ /* SCK78, WS78 */
+ RCAR_GP_PIN(3, 25), RCAR_GP_PIN(3, 26),
+};
+static const unsigned int ssi78_ctrl_b_mux[] = {
+ SSI_SCK78_B_MARK, SSI_WS78_B_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA8 */
+ RCAR_GP_PIN(5, 10),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi8_data_b_pins[] = {
+ /* SDATA8 */
+ RCAR_GP_PIN(3, 28),
+};
+static const unsigned int ssi8_data_b_mux[] = {
+ SSI_SDATA8_B_MARK,
+};
+static const unsigned int ssi9_data_pins[] = {
+ /* SDATA9 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int ssi9_data_mux[] = {
+ SSI_SDATA9_MARK,
+};
+static const unsigned int ssi9_ctrl_pins[] = {
+ /* SCK9, WS9 */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+static const unsigned int ssi9_ctrl_mux[] = {
+ SSI_SCK9_MARK, SSI_WS9_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA9 */
+ RCAR_GP_PIN(4, 19),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK9, WS9 */
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
RCAR_GP_PIN(5, 24), /* PWEN */
@@ -2911,6 +3341,29 @@ static const unsigned int vin1_clk_mux[] = {
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clka),
+ SH_PFC_PIN_GROUP(audio_clka_b),
+ SH_PFC_PIN_GROUP(audio_clka_c),
+ SH_PFC_PIN_GROUP(audio_clka_d),
+ SH_PFC_PIN_GROUP(audio_clkb),
+ SH_PFC_PIN_GROUP(audio_clkb_b),
+ SH_PFC_PIN_GROUP(audio_clkb_c),
+ SH_PFC_PIN_GROUP(audio_clkc),
+ SH_PFC_PIN_GROUP(audio_clkc_b),
+ SH_PFC_PIN_GROUP(audio_clkc_c),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(avb_avtp_capture),
+ SH_PFC_PIN_GROUP(avb_avtp_match),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(eth_link),
SH_PFC_PIN_GROUP(eth_magic),
SH_PFC_PIN_GROUP(eth_mdio),
@@ -3084,6 +3537,40 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi2_ctrl),
SH_PFC_PIN_GROUP(sdhi2_cd),
SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi1_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi2_ctrl),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data_b),
+ SH_PFC_PIN_GROUP(ssi4_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data_b),
+ SH_PFC_PIN_GROUP(ssi5_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data_b),
+ SH_PFC_PIN_GROUP(ssi6_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data_b),
+ SH_PFC_PIN_GROUP(ssi78_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi8_data_b),
+ SH_PFC_PIN_GROUP(ssi9_data),
+ SH_PFC_PIN_GROUP(ssi9_ctrl),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
SH_PFC_PIN_GROUP(usb0),
SH_PFC_PIN_GROUP(usb1),
VIN_DATA_PIN_GROUP(vin0_data, 24),
@@ -3106,6 +3593,35 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(vin1_clk),
};
+static const char * const audio_clk_groups[] = {
+ "audio_clka",
+ "audio_clka_b",
+ "audio_clka_c",
+ "audio_clka_d",
+ "audio_clkb",
+ "audio_clkb_b",
+ "audio_clkb_c",
+ "audio_clkc",
+ "audio_clkc_b",
+ "audio_clkc_c",
+ "audio_clkout",
+ "audio_clkout_b",
+ "audio_clkout_c",
+};
+
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdio",
+ "avb_mii",
+ "avb_gmii",
+ "avb_avtp_capture",
+ "avb_avtp_match",
+ "avb_avtp_capture_b",
+ "avb_avtp_match_b",
+};
+
static const char * const eth_groups[] = {
"eth_link",
"eth_magic",
@@ -3381,6 +3897,43 @@ static const char * const sdhi2_groups[] = {
"sdhi2_wp",
};
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi0129_ctrl",
+ "ssi1_data",
+ "ssi1_ctrl",
+ "ssi1_data_b",
+ "ssi1_ctrl_b",
+ "ssi2_data",
+ "ssi2_ctrl",
+ "ssi2_data_b",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi34_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi4_data_b",
+ "ssi4_ctrl_b",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi5_data_b",
+ "ssi5_ctrl_b",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi6_data_b",
+ "ssi6_ctrl_b",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi7_data_b",
+ "ssi78_ctrl_b",
+ "ssi8_data",
+ "ssi8_data_b",
+ "ssi9_data",
+ "ssi9_ctrl",
+ "ssi9_data_b",
+ "ssi9_ctrl_b",
+};
+
static const char * const usb0_groups[] = {
"usb0",
};
@@ -3414,6 +3967,8 @@ static const char * const vin1_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(eth),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
@@ -3448,6 +4003,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi0),
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(ssi),
SH_PFC_FUNCTION(usb0),
SH_PFC_FUNCTION(usb1),
SH_PFC_FUNCTION(vin0),
@@ -3974,6 +4530,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU0_DISP, FN_QPOLA, FN_CC50_STATE30, 0,
/* IP6_3_2 [2] */
FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CC50_STATE29,
+ 0,
/* IP6_1_0 [2] */
FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, FN_CC50_STATE28, 0, }
},
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index ce4f5cdb0579..5979dabc02fa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -189,8 +189,8 @@
#define GPSR6_4 F_(SSI_SDATA2_A, IP14_7_4)
#define GPSR6_3 F_(SSI_SDATA1_A, IP14_3_0)
#define GPSR6_2 F_(SSI_SDATA0, IP13_31_28)
-#define GPSR6_1 F_(SSI_WS0129, IP13_27_24)
-#define GPSR6_0 F_(SSI_SCK0129, IP13_23_20)
+#define GPSR6_1 F_(SSI_WS01239, IP13_27_24)
+#define GPSR6_0 F_(SSI_SCK01239, IP13_23_20)
/* GPSR7 */
#define GPSR7_3 FM(HDMI1_CEC)
@@ -315,8 +315,8 @@
#define IP13_11_8 FM(MLB_CLK) F_(0, 0) FM(MSIOF1_SCK_F) F_(0, 0) FM(SCL1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP13_15_12 FM(MLB_SIG) FM(RX1_B) FM(MSIOF1_SYNC_F) F_(0, 0) FM(SDA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP13_19_16 FM(MLB_DAT) FM(TX1_B) FM(MSIOF1_RXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_23_20 FM(SSI_SCK0129) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP13_27_24 FM(SSI_WS0129) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_23_20 FM(SSI_SCK01239) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP13_27_24 FM(SSI_WS01239) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP13_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -478,7 +478,6 @@ FM(IP16_31_28) IP16_31_28
#define MOD_SEL2_31 FM(I2C_SEL_5_0) FM(I2C_SEL_5_1)
#define MOD_SEL2_30 FM(I2C_SEL_3_0) FM(I2C_SEL_3_1)
#define MOD_SEL2_29 FM(I2C_SEL_0_0) FM(I2C_SEL_0_1)
-#define MOD_SEL2_2_1 FM(SEL_VSP_0) FM(SEL_VSP_1) FM(SEL_VSP_2) FM(SEL_VSP_3)
#define MOD_SEL2_0 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
#define PINMUX_MOD_SELS\
@@ -512,7 +511,7 @@ MOD_SEL0_7_6 \
MOD_SEL0_5_4 MOD_SEL1_5 \
MOD_SEL1_4 \
MOD_SEL0_3 MOD_SEL1_3 \
-MOD_SEL0_2_1 MOD_SEL1_2 MOD_SEL2_2_1 \
+MOD_SEL0_2_1 MOD_SEL1_2 \
MOD_SEL1_1 \
MOD_SEL1_0 MOD_SEL2_0
@@ -569,18 +568,18 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(SSI_WS5),
/* IPSR0 */
- PINMUX_IPSR_DATA(IP0_3_0, AVB_MDC),
+ PINMUX_IPSR_GPSR(IP0_3_0, AVB_MDC),
PINMUX_IPSR_MSEL(IP0_3_0, MSIOF2_SS2_C, SEL_MSIOF2_2),
- PINMUX_IPSR_DATA(IP0_7_4, AVB_MAGIC),
+ PINMUX_IPSR_GPSR(IP0_7_4, AVB_MAGIC),
PINMUX_IPSR_MSEL(IP0_7_4, MSIOF2_SS1_C, SEL_MSIOF2_2),
PINMUX_IPSR_MSEL(IP0_7_4, SCK4_A, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP0_11_8, AVB_PHY_INT),
+ PINMUX_IPSR_GPSR(IP0_11_8, AVB_PHY_INT),
PINMUX_IPSR_MSEL(IP0_11_8, MSIOF2_SYNC_C, SEL_MSIOF2_2),
PINMUX_IPSR_MSEL(IP0_11_8, RX4_A, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP0_15_12, AVB_LINK),
+ PINMUX_IPSR_GPSR(IP0_15_12, AVB_LINK),
PINMUX_IPSR_MSEL(IP0_15_12, MSIOF2_SCK_C, SEL_MSIOF2_2),
PINMUX_IPSR_MSEL(IP0_15_12, TX4_A, SEL_SCIF4_0),
@@ -592,126 +591,126 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP0_23_20, MSIOF2_TXD_C, SEL_MSIOF2_2),
PINMUX_IPSR_MSEL(IP0_23_20, RTS4_N_TANS_A, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP0_27_24, IRQ0),
- PINMUX_IPSR_DATA(IP0_27_24, QPOLB),
- PINMUX_IPSR_DATA(IP0_27_24, DU_CDE),
+ PINMUX_IPSR_GPSR(IP0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0_27_24, QPOLB),
+ PINMUX_IPSR_GPSR(IP0_27_24, DU_CDE),
PINMUX_IPSR_MSEL(IP0_27_24, VI4_DATA0_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP0_27_24, CAN0_TX_B, SEL_RCAN0_1),
PINMUX_IPSR_MSEL(IP0_27_24, CANFD0_TX_B, SEL_CANFD0_1),
- PINMUX_IPSR_DATA(IP0_31_28, IRQ1),
- PINMUX_IPSR_DATA(IP0_31_28, QPOLA),
- PINMUX_IPSR_DATA(IP0_31_28, DU_DISP),
+ PINMUX_IPSR_GPSR(IP0_31_28, IRQ1),
+ PINMUX_IPSR_GPSR(IP0_31_28, QPOLA),
+ PINMUX_IPSR_GPSR(IP0_31_28, DU_DISP),
PINMUX_IPSR_MSEL(IP0_31_28, VI4_DATA1_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP0_31_28, CAN0_RX_B, SEL_RCAN0_1),
PINMUX_IPSR_MSEL(IP0_31_28, CANFD0_RX_B, SEL_CANFD0_1),
/* IPSR1 */
- PINMUX_IPSR_DATA(IP1_3_0, IRQ2),
- PINMUX_IPSR_DATA(IP1_3_0, QCPV_QDE),
- PINMUX_IPSR_DATA(IP1_3_0, DU_EXODDF_DU_ODDF_DISP_CDE),
+ PINMUX_IPSR_GPSR(IP1_3_0, IRQ2),
+ PINMUX_IPSR_GPSR(IP1_3_0, QCPV_QDE),
+ PINMUX_IPSR_GPSR(IP1_3_0, DU_EXODDF_DU_ODDF_DISP_CDE),
PINMUX_IPSR_MSEL(IP1_3_0, VI4_DATA2_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_3_0, PWM3_B, SEL_PWM3_1),
- PINMUX_IPSR_DATA(IP1_7_4, IRQ3),
- PINMUX_IPSR_DATA(IP1_7_4, QSTVB_QVE),
- PINMUX_IPSR_DATA(IP1_7_4, A25),
- PINMUX_IPSR_DATA(IP1_7_4, DU_DOTCLKOUT1),
+ PINMUX_IPSR_GPSR(IP1_7_4, IRQ3),
+ PINMUX_IPSR_GPSR(IP1_7_4, QSTVB_QVE),
+ PINMUX_IPSR_GPSR(IP1_7_4, A25),
+ PINMUX_IPSR_GPSR(IP1_7_4, DU_DOTCLKOUT1),
PINMUX_IPSR_MSEL(IP1_7_4, VI4_DATA3_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_7_4, PWM4_B, SEL_PWM4_1),
- PINMUX_IPSR_DATA(IP1_11_8, IRQ4),
- PINMUX_IPSR_DATA(IP1_11_8, QSTH_QHS),
- PINMUX_IPSR_DATA(IP1_11_8, A24),
- PINMUX_IPSR_DATA(IP1_11_8, DU_EXHSYNC_DU_HSYNC),
+ PINMUX_IPSR_GPSR(IP1_11_8, IRQ4),
+ PINMUX_IPSR_GPSR(IP1_11_8, QSTH_QHS),
+ PINMUX_IPSR_GPSR(IP1_11_8, A24),
+ PINMUX_IPSR_GPSR(IP1_11_8, DU_EXHSYNC_DU_HSYNC),
PINMUX_IPSR_MSEL(IP1_11_8, VI4_DATA4_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_11_8, PWM5_B, SEL_PWM5_1),
- PINMUX_IPSR_DATA(IP1_15_12, IRQ5),
- PINMUX_IPSR_DATA(IP1_15_12, QSTB_QHE),
- PINMUX_IPSR_DATA(IP1_15_12, A23),
- PINMUX_IPSR_DATA(IP1_15_12, DU_EXVSYNC_DU_VSYNC),
+ PINMUX_IPSR_GPSR(IP1_15_12, IRQ5),
+ PINMUX_IPSR_GPSR(IP1_15_12, QSTB_QHE),
+ PINMUX_IPSR_GPSR(IP1_15_12, A23),
+ PINMUX_IPSR_GPSR(IP1_15_12, DU_EXVSYNC_DU_VSYNC),
PINMUX_IPSR_MSEL(IP1_15_12, VI4_DATA5_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_15_12, PWM6_B, SEL_PWM6_1),
- PINMUX_IPSR_DATA(IP1_19_16, PWM0),
- PINMUX_IPSR_DATA(IP1_19_16, AVB_AVTP_PPS),
- PINMUX_IPSR_DATA(IP1_19_16, A22),
+ PINMUX_IPSR_GPSR(IP1_19_16, PWM0),
+ PINMUX_IPSR_GPSR(IP1_19_16, AVB_AVTP_PPS),
+ PINMUX_IPSR_GPSR(IP1_19_16, A22),
PINMUX_IPSR_MSEL(IP1_19_16, VI4_DATA6_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_19_16, IECLK_B, SEL_IEBUS_1),
PINMUX_IPSR_MSEL(IP1_23_20, PWM1_A, SEL_PWM1_0),
- PINMUX_IPSR_DATA(IP1_23_20, A21),
+ PINMUX_IPSR_GPSR(IP1_23_20, A21),
PINMUX_IPSR_MSEL(IP1_23_20, HRX3_D, SEL_HSCIF3_3),
PINMUX_IPSR_MSEL(IP1_23_20, VI4_DATA7_B, SEL_VIN4_1),
PINMUX_IPSR_MSEL(IP1_23_20, IERX_B, SEL_IEBUS_1),
PINMUX_IPSR_MSEL(IP1_27_24, PWM2_A, SEL_PWM2_0),
- PINMUX_IPSR_DATA(IP1_27_24, A20),
+ PINMUX_IPSR_GPSR(IP1_27_24, A20),
PINMUX_IPSR_MSEL(IP1_27_24, HTX3_D, SEL_HSCIF3_3),
PINMUX_IPSR_MSEL(IP1_27_24, IETX_B, SEL_IEBUS_1),
- PINMUX_IPSR_DATA(IP1_31_28, A0),
- PINMUX_IPSR_DATA(IP1_31_28, LCDOUT16),
+ PINMUX_IPSR_GPSR(IP1_31_28, A0),
+ PINMUX_IPSR_GPSR(IP1_31_28, LCDOUT16),
PINMUX_IPSR_MSEL(IP1_31_28, MSIOF3_SYNC_B, SEL_MSIOF3_1),
- PINMUX_IPSR_DATA(IP1_31_28, VI4_DATA8),
- PINMUX_IPSR_DATA(IP1_31_28, DU_DB0),
+ PINMUX_IPSR_GPSR(IP1_31_28, VI4_DATA8),
+ PINMUX_IPSR_GPSR(IP1_31_28, DU_DB0),
PINMUX_IPSR_MSEL(IP1_31_28, PWM3_A, SEL_PWM3_0),
/* IPSR2 */
- PINMUX_IPSR_DATA(IP2_3_0, A1),
- PINMUX_IPSR_DATA(IP2_3_0, LCDOUT17),
+ PINMUX_IPSR_GPSR(IP2_3_0, A1),
+ PINMUX_IPSR_GPSR(IP2_3_0, LCDOUT17),
PINMUX_IPSR_MSEL(IP2_3_0, MSIOF3_TXD_B, SEL_MSIOF3_1),
- PINMUX_IPSR_DATA(IP2_3_0, VI4_DATA9),
- PINMUX_IPSR_DATA(IP2_3_0, DU_DB1),
+ PINMUX_IPSR_GPSR(IP2_3_0, VI4_DATA9),
+ PINMUX_IPSR_GPSR(IP2_3_0, DU_DB1),
PINMUX_IPSR_MSEL(IP2_3_0, PWM4_A, SEL_PWM4_0),
- PINMUX_IPSR_DATA(IP2_7_4, A2),
- PINMUX_IPSR_DATA(IP2_7_4, LCDOUT18),
+ PINMUX_IPSR_GPSR(IP2_7_4, A2),
+ PINMUX_IPSR_GPSR(IP2_7_4, LCDOUT18),
PINMUX_IPSR_MSEL(IP2_7_4, MSIOF3_SCK_B, SEL_MSIOF3_1),
- PINMUX_IPSR_DATA(IP2_7_4, VI4_DATA10),
- PINMUX_IPSR_DATA(IP2_7_4, DU_DB2),
+ PINMUX_IPSR_GPSR(IP2_7_4, VI4_DATA10),
+ PINMUX_IPSR_GPSR(IP2_7_4, DU_DB2),
PINMUX_IPSR_MSEL(IP2_7_4, PWM5_A, SEL_PWM5_0),
- PINMUX_IPSR_DATA(IP2_11_8, A3),
- PINMUX_IPSR_DATA(IP2_11_8, LCDOUT19),
+ PINMUX_IPSR_GPSR(IP2_11_8, A3),
+ PINMUX_IPSR_GPSR(IP2_11_8, LCDOUT19),
PINMUX_IPSR_MSEL(IP2_11_8, MSIOF3_RXD_B, SEL_MSIOF3_1),
- PINMUX_IPSR_DATA(IP2_11_8, VI4_DATA11),
- PINMUX_IPSR_DATA(IP2_11_8, DU_DB3),
+ PINMUX_IPSR_GPSR(IP2_11_8, VI4_DATA11),
+ PINMUX_IPSR_GPSR(IP2_11_8, DU_DB3),
PINMUX_IPSR_MSEL(IP2_11_8, PWM6_A, SEL_PWM6_0),
- PINMUX_IPSR_DATA(IP2_15_12, A4),
- PINMUX_IPSR_DATA(IP2_15_12, LCDOUT20),
+ PINMUX_IPSR_GPSR(IP2_15_12, A4),
+ PINMUX_IPSR_GPSR(IP2_15_12, LCDOUT20),
PINMUX_IPSR_MSEL(IP2_15_12, MSIOF3_SS1_B, SEL_MSIOF3_1),
- PINMUX_IPSR_DATA(IP2_15_12, VI4_DATA12),
- PINMUX_IPSR_DATA(IP2_15_12, VI5_DATA12),
- PINMUX_IPSR_DATA(IP2_15_12, DU_DB4),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI4_DATA12),
+ PINMUX_IPSR_GPSR(IP2_15_12, VI5_DATA12),
+ PINMUX_IPSR_GPSR(IP2_15_12, DU_DB4),
- PINMUX_IPSR_DATA(IP2_19_16, A5),
- PINMUX_IPSR_DATA(IP2_19_16, LCDOUT21),
+ PINMUX_IPSR_GPSR(IP2_19_16, A5),
+ PINMUX_IPSR_GPSR(IP2_19_16, LCDOUT21),
PINMUX_IPSR_MSEL(IP2_19_16, MSIOF3_SS2_B, SEL_MSIOF3_1),
PINMUX_IPSR_MSEL(IP2_19_16, SCK4_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP2_19_16, VI4_DATA13),
- PINMUX_IPSR_DATA(IP2_19_16, VI5_DATA13),
- PINMUX_IPSR_DATA(IP2_19_16, DU_DB5),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI4_DATA13),
+ PINMUX_IPSR_GPSR(IP2_19_16, VI5_DATA13),
+ PINMUX_IPSR_GPSR(IP2_19_16, DU_DB5),
- PINMUX_IPSR_DATA(IP2_23_20, A6),
- PINMUX_IPSR_DATA(IP2_23_20, LCDOUT22),
+ PINMUX_IPSR_GPSR(IP2_23_20, A6),
+ PINMUX_IPSR_GPSR(IP2_23_20, LCDOUT22),
PINMUX_IPSR_MSEL(IP2_23_20, MSIOF2_SS1_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP2_23_20, RX4_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP2_23_20, VI4_DATA14),
- PINMUX_IPSR_DATA(IP2_23_20, VI5_DATA14),
- PINMUX_IPSR_DATA(IP2_23_20, DU_DB6),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI4_DATA14),
+ PINMUX_IPSR_GPSR(IP2_23_20, VI5_DATA14),
+ PINMUX_IPSR_GPSR(IP2_23_20, DU_DB6),
- PINMUX_IPSR_DATA(IP2_27_24, A7),
- PINMUX_IPSR_DATA(IP2_27_24, LCDOUT23),
+ PINMUX_IPSR_GPSR(IP2_27_24, A7),
+ PINMUX_IPSR_GPSR(IP2_27_24, LCDOUT23),
PINMUX_IPSR_MSEL(IP2_27_24, MSIOF2_SS2_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP2_27_24, TX4_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP2_27_24, VI4_DATA15),
- PINMUX_IPSR_DATA(IP2_27_24, VI5_DATA15),
- PINMUX_IPSR_DATA(IP2_27_24, DU_DB7),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI4_DATA15),
+ PINMUX_IPSR_GPSR(IP2_27_24, VI5_DATA15),
+ PINMUX_IPSR_GPSR(IP2_27_24, DU_DB7),
- PINMUX_IPSR_DATA(IP2_31_28, A8),
+ PINMUX_IPSR_GPSR(IP2_31_28, A8),
PINMUX_IPSR_MSEL(IP2_31_28, RX3_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP2_31_28, MSIOF2_SYNC_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP2_31_28, HRX4_B, SEL_HSCIF4_1),
@@ -720,99 +719,99 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_31_28, PWM1_B, SEL_PWM1_1),
/* IPSR3 */
- PINMUX_IPSR_DATA(IP3_3_0, A9),
+ PINMUX_IPSR_GPSR(IP3_3_0, A9),
PINMUX_IPSR_MSEL(IP3_3_0, MSIOF2_SCK_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP3_3_0, CTS4_N_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP3_3_0, VI5_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP3_3_0, VI5_VSYNC_N),
- PINMUX_IPSR_DATA(IP3_7_4, A10),
+ PINMUX_IPSR_GPSR(IP3_7_4, A10),
PINMUX_IPSR_MSEL(IP3_7_4, MSIOF2_RXD_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP3_7_4, RTS4_N_TANS_B, SEL_SCIF4_1),
- PINMUX_IPSR_DATA(IP3_7_4, VI5_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP3_7_4, VI5_HSYNC_N),
- PINMUX_IPSR_DATA(IP3_11_8, A11),
+ PINMUX_IPSR_GPSR(IP3_11_8, A11),
PINMUX_IPSR_MSEL(IP3_11_8, TX3_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP3_11_8, MSIOF2_TXD_A, SEL_MSIOF2_0),
PINMUX_IPSR_MSEL(IP3_11_8, HTX4_B, SEL_HSCIF4_1),
- PINMUX_IPSR_DATA(IP3_11_8, HSCK4),
- PINMUX_IPSR_DATA(IP3_11_8, VI5_FIELD),
+ PINMUX_IPSR_GPSR(IP3_11_8, HSCK4),
+ PINMUX_IPSR_GPSR(IP3_11_8, VI5_FIELD),
PINMUX_IPSR_MSEL(IP3_11_8, SCL6_A, SEL_I2C6_0),
PINMUX_IPSR_MSEL(IP3_11_8, AVB_AVTP_CAPTURE_B, SEL_ETHERAVB_1),
PINMUX_IPSR_MSEL(IP3_11_8, PWM2_B, SEL_PWM2_1),
- PINMUX_IPSR_DATA(IP3_15_12, A12),
- PINMUX_IPSR_DATA(IP3_15_12, LCDOUT12),
+ PINMUX_IPSR_GPSR(IP3_15_12, A12),
+ PINMUX_IPSR_GPSR(IP3_15_12, LCDOUT12),
PINMUX_IPSR_MSEL(IP3_15_12, MSIOF3_SCK_C, SEL_MSIOF3_2),
PINMUX_IPSR_MSEL(IP3_15_12, HRX4_A, SEL_HSCIF4_0),
- PINMUX_IPSR_DATA(IP3_15_12, VI5_DATA8),
- PINMUX_IPSR_DATA(IP3_15_12, DU_DG4),
+ PINMUX_IPSR_GPSR(IP3_15_12, VI5_DATA8),
+ PINMUX_IPSR_GPSR(IP3_15_12, DU_DG4),
- PINMUX_IPSR_DATA(IP3_19_16, A13),
- PINMUX_IPSR_DATA(IP3_19_16, LCDOUT13),
+ PINMUX_IPSR_GPSR(IP3_19_16, A13),
+ PINMUX_IPSR_GPSR(IP3_19_16, LCDOUT13),
PINMUX_IPSR_MSEL(IP3_19_16, MSIOF3_SYNC_C, SEL_MSIOF3_2),
PINMUX_IPSR_MSEL(IP3_19_16, HTX4_A, SEL_HSCIF4_0),
- PINMUX_IPSR_DATA(IP3_19_16, VI5_DATA9),
- PINMUX_IPSR_DATA(IP3_19_16, DU_DG5),
+ PINMUX_IPSR_GPSR(IP3_19_16, VI5_DATA9),
+ PINMUX_IPSR_GPSR(IP3_19_16, DU_DG5),
- PINMUX_IPSR_DATA(IP3_23_20, A14),
- PINMUX_IPSR_DATA(IP3_23_20, LCDOUT14),
+ PINMUX_IPSR_GPSR(IP3_23_20, A14),
+ PINMUX_IPSR_GPSR(IP3_23_20, LCDOUT14),
PINMUX_IPSR_MSEL(IP3_23_20, MSIOF3_RXD_C, SEL_MSIOF3_2),
- PINMUX_IPSR_DATA(IP3_23_20, HCTS4_N),
- PINMUX_IPSR_DATA(IP3_23_20, VI5_DATA10),
- PINMUX_IPSR_DATA(IP3_23_20, DU_DG6),
+ PINMUX_IPSR_GPSR(IP3_23_20, HCTS4_N),
+ PINMUX_IPSR_GPSR(IP3_23_20, VI5_DATA10),
+ PINMUX_IPSR_GPSR(IP3_23_20, DU_DG6),
- PINMUX_IPSR_DATA(IP3_27_24, A15),
- PINMUX_IPSR_DATA(IP3_27_24, LCDOUT15),
+ PINMUX_IPSR_GPSR(IP3_27_24, A15),
+ PINMUX_IPSR_GPSR(IP3_27_24, LCDOUT15),
PINMUX_IPSR_MSEL(IP3_27_24, MSIOF3_TXD_C, SEL_MSIOF3_2),
- PINMUX_IPSR_DATA(IP3_27_24, HRTS4_N),
- PINMUX_IPSR_DATA(IP3_27_24, VI5_DATA11),
- PINMUX_IPSR_DATA(IP3_27_24, DU_DG7),
+ PINMUX_IPSR_GPSR(IP3_27_24, HRTS4_N),
+ PINMUX_IPSR_GPSR(IP3_27_24, VI5_DATA11),
+ PINMUX_IPSR_GPSR(IP3_27_24, DU_DG7),
- PINMUX_IPSR_DATA(IP3_31_28, A16),
- PINMUX_IPSR_DATA(IP3_31_28, LCDOUT8),
- PINMUX_IPSR_DATA(IP3_31_28, VI4_FIELD),
- PINMUX_IPSR_DATA(IP3_31_28, DU_DG0),
+ PINMUX_IPSR_GPSR(IP3_31_28, A16),
+ PINMUX_IPSR_GPSR(IP3_31_28, LCDOUT8),
+ PINMUX_IPSR_GPSR(IP3_31_28, VI4_FIELD),
+ PINMUX_IPSR_GPSR(IP3_31_28, DU_DG0),
/* IPSR4 */
- PINMUX_IPSR_DATA(IP4_3_0, A17),
- PINMUX_IPSR_DATA(IP4_3_0, LCDOUT9),
- PINMUX_IPSR_DATA(IP4_3_0, VI4_VSYNC_N),
- PINMUX_IPSR_DATA(IP4_3_0, DU_DG1),
-
- PINMUX_IPSR_DATA(IP4_7_4, A18),
- PINMUX_IPSR_DATA(IP4_7_4, LCDOUT10),
- PINMUX_IPSR_DATA(IP4_7_4, VI4_HSYNC_N),
- PINMUX_IPSR_DATA(IP4_7_4, DU_DG2),
-
- PINMUX_IPSR_DATA(IP4_11_8, A19),
- PINMUX_IPSR_DATA(IP4_11_8, LCDOUT11),
- PINMUX_IPSR_DATA(IP4_11_8, VI4_CLKENB),
- PINMUX_IPSR_DATA(IP4_11_8, DU_DG3),
-
- PINMUX_IPSR_DATA(IP4_15_12, CS0_N),
- PINMUX_IPSR_DATA(IP4_15_12, VI5_CLKENB),
-
- PINMUX_IPSR_DATA(IP4_19_16, CS1_N_A26),
- PINMUX_IPSR_DATA(IP4_19_16, VI5_CLK),
+ PINMUX_IPSR_GPSR(IP4_3_0, A17),
+ PINMUX_IPSR_GPSR(IP4_3_0, LCDOUT9),
+ PINMUX_IPSR_GPSR(IP4_3_0, VI4_VSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_3_0, DU_DG1),
+
+ PINMUX_IPSR_GPSR(IP4_7_4, A18),
+ PINMUX_IPSR_GPSR(IP4_7_4, LCDOUT10),
+ PINMUX_IPSR_GPSR(IP4_7_4, VI4_HSYNC_N),
+ PINMUX_IPSR_GPSR(IP4_7_4, DU_DG2),
+
+ PINMUX_IPSR_GPSR(IP4_11_8, A19),
+ PINMUX_IPSR_GPSR(IP4_11_8, LCDOUT11),
+ PINMUX_IPSR_GPSR(IP4_11_8, VI4_CLKENB),
+ PINMUX_IPSR_GPSR(IP4_11_8, DU_DG3),
+
+ PINMUX_IPSR_GPSR(IP4_15_12, CS0_N),
+ PINMUX_IPSR_GPSR(IP4_15_12, VI5_CLKENB),
+
+ PINMUX_IPSR_GPSR(IP4_19_16, CS1_N_A26),
+ PINMUX_IPSR_GPSR(IP4_19_16, VI5_CLK),
PINMUX_IPSR_MSEL(IP4_19_16, EX_WAIT0_B, SEL_LBSC_1),
- PINMUX_IPSR_DATA(IP4_23_20, BS_N),
- PINMUX_IPSR_DATA(IP4_23_20, QSTVA_QVS),
+ PINMUX_IPSR_GPSR(IP4_23_20, BS_N),
+ PINMUX_IPSR_GPSR(IP4_23_20, QSTVA_QVS),
PINMUX_IPSR_MSEL(IP4_23_20, MSIOF3_SCK_D, SEL_MSIOF3_3),
- PINMUX_IPSR_DATA(IP4_23_20, SCK3),
- PINMUX_IPSR_DATA(IP4_23_20, HSCK3),
- PINMUX_IPSR_DATA(IP4_23_20, CAN1_TX),
- PINMUX_IPSR_DATA(IP4_23_20, CANFD1_TX),
+ PINMUX_IPSR_GPSR(IP4_23_20, SCK3),
+ PINMUX_IPSR_GPSR(IP4_23_20, HSCK3),
+ PINMUX_IPSR_GPSR(IP4_23_20, CAN1_TX),
+ PINMUX_IPSR_GPSR(IP4_23_20, CANFD1_TX),
PINMUX_IPSR_MSEL(IP4_23_20, IETX_A, SEL_IEBUS_0),
- PINMUX_IPSR_DATA(IP4_27_24, RD_N),
+ PINMUX_IPSR_GPSR(IP4_27_24, RD_N),
PINMUX_IPSR_MSEL(IP4_27_24, MSIOF3_SYNC_D, SEL_MSIOF3_3),
PINMUX_IPSR_MSEL(IP4_27_24, RX3_A, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP4_27_24, HRX3_A, SEL_HSCIF3_0),
PINMUX_IPSR_MSEL(IP4_27_24, CAN0_TX_A, SEL_RCAN0_0),
PINMUX_IPSR_MSEL(IP4_27_24, CANFD0_TX_A, SEL_CANFD0_0),
- PINMUX_IPSR_DATA(IP4_31_28, RD_WR_N),
+ PINMUX_IPSR_GPSR(IP4_31_28, RD_WR_N),
PINMUX_IPSR_MSEL(IP4_31_28, MSIOF3_RXD_D, SEL_MSIOF3_3),
PINMUX_IPSR_MSEL(IP4_31_28, TX3_A, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP4_31_28, HTX3_A, SEL_HSCIF3_0),
@@ -820,236 +819,236 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP4_31_28, CANFD0_RX_A, SEL_CANFD0_0),
/* IPSR5 */
- PINMUX_IPSR_DATA(IP5_3_0, WE0_N),
+ PINMUX_IPSR_GPSR(IP5_3_0, WE0_N),
PINMUX_IPSR_MSEL(IP5_3_0, MSIOF3_TXD_D, SEL_MSIOF3_3),
- PINMUX_IPSR_DATA(IP5_3_0, CTS3_N),
- PINMUX_IPSR_DATA(IP5_3_0, HCTS3_N),
+ PINMUX_IPSR_GPSR(IP5_3_0, CTS3_N),
+ PINMUX_IPSR_GPSR(IP5_3_0, HCTS3_N),
PINMUX_IPSR_MSEL(IP5_3_0, SCL6_B, SEL_I2C6_1),
- PINMUX_IPSR_DATA(IP5_3_0, CAN_CLK),
+ PINMUX_IPSR_GPSR(IP5_3_0, CAN_CLK),
PINMUX_IPSR_MSEL(IP5_3_0, IECLK_A, SEL_IEBUS_0),
- PINMUX_IPSR_DATA(IP5_7_4, WE1_N),
+ PINMUX_IPSR_GPSR(IP5_7_4, WE1_N),
PINMUX_IPSR_MSEL(IP5_7_4, MSIOF3_SS1_D, SEL_MSIOF3_3),
- PINMUX_IPSR_DATA(IP5_7_4, RTS3_N_TANS),
- PINMUX_IPSR_DATA(IP5_7_4, HRTS3_N),
+ PINMUX_IPSR_GPSR(IP5_7_4, RTS3_N_TANS),
+ PINMUX_IPSR_GPSR(IP5_7_4, HRTS3_N),
PINMUX_IPSR_MSEL(IP5_7_4, SDA6_B, SEL_I2C6_1),
- PINMUX_IPSR_DATA(IP5_7_4, CAN1_RX),
- PINMUX_IPSR_DATA(IP5_7_4, CANFD1_RX),
+ PINMUX_IPSR_GPSR(IP5_7_4, CAN1_RX),
+ PINMUX_IPSR_GPSR(IP5_7_4, CANFD1_RX),
PINMUX_IPSR_MSEL(IP5_7_4, IERX_A, SEL_IEBUS_0),
PINMUX_IPSR_MSEL(IP5_11_8, EX_WAIT0_A, SEL_LBSC_0),
- PINMUX_IPSR_DATA(IP5_11_8, QCLK),
- PINMUX_IPSR_DATA(IP5_11_8, VI4_CLK),
- PINMUX_IPSR_DATA(IP5_11_8, DU_DOTCLKOUT0),
+ PINMUX_IPSR_GPSR(IP5_11_8, QCLK),
+ PINMUX_IPSR_GPSR(IP5_11_8, VI4_CLK),
+ PINMUX_IPSR_GPSR(IP5_11_8, DU_DOTCLKOUT0),
- PINMUX_IPSR_DATA(IP5_15_12, D0),
+ PINMUX_IPSR_GPSR(IP5_15_12, D0),
PINMUX_IPSR_MSEL(IP5_15_12, MSIOF2_SS1_B, SEL_MSIOF2_1),
PINMUX_IPSR_MSEL(IP5_15_12, MSIOF3_SCK_A, SEL_MSIOF3_0),
- PINMUX_IPSR_DATA(IP5_15_12, VI4_DATA16),
- PINMUX_IPSR_DATA(IP5_15_12, VI5_DATA0),
+ PINMUX_IPSR_GPSR(IP5_15_12, VI4_DATA16),
+ PINMUX_IPSR_GPSR(IP5_15_12, VI5_DATA0),
- PINMUX_IPSR_DATA(IP5_19_16, D1),
+ PINMUX_IPSR_GPSR(IP5_19_16, D1),
PINMUX_IPSR_MSEL(IP5_19_16, MSIOF2_SS2_B, SEL_MSIOF2_1),
PINMUX_IPSR_MSEL(IP5_19_16, MSIOF3_SYNC_A, SEL_MSIOF3_0),
- PINMUX_IPSR_DATA(IP5_19_16, VI4_DATA17),
- PINMUX_IPSR_DATA(IP5_19_16, VI5_DATA1),
+ PINMUX_IPSR_GPSR(IP5_19_16, VI4_DATA17),
+ PINMUX_IPSR_GPSR(IP5_19_16, VI5_DATA1),
- PINMUX_IPSR_DATA(IP5_23_20, D2),
+ PINMUX_IPSR_GPSR(IP5_23_20, D2),
PINMUX_IPSR_MSEL(IP5_23_20, MSIOF3_RXD_A, SEL_MSIOF3_0),
- PINMUX_IPSR_DATA(IP5_23_20, VI4_DATA18),
- PINMUX_IPSR_DATA(IP5_23_20, VI5_DATA2),
+ PINMUX_IPSR_GPSR(IP5_23_20, VI4_DATA18),
+ PINMUX_IPSR_GPSR(IP5_23_20, VI5_DATA2),
- PINMUX_IPSR_DATA(IP5_27_24, D3),
+ PINMUX_IPSR_GPSR(IP5_27_24, D3),
PINMUX_IPSR_MSEL(IP5_27_24, MSIOF3_TXD_A, SEL_MSIOF3_0),
- PINMUX_IPSR_DATA(IP5_27_24, VI4_DATA19),
- PINMUX_IPSR_DATA(IP5_27_24, VI5_DATA3),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI4_DATA19),
+ PINMUX_IPSR_GPSR(IP5_27_24, VI5_DATA3),
- PINMUX_IPSR_DATA(IP5_31_28, D4),
+ PINMUX_IPSR_GPSR(IP5_31_28, D4),
PINMUX_IPSR_MSEL(IP5_31_28, MSIOF2_SCK_B, SEL_MSIOF2_1),
- PINMUX_IPSR_DATA(IP5_31_28, VI4_DATA20),
- PINMUX_IPSR_DATA(IP5_31_28, VI5_DATA4),
+ PINMUX_IPSR_GPSR(IP5_31_28, VI4_DATA20),
+ PINMUX_IPSR_GPSR(IP5_31_28, VI5_DATA4),
/* IPSR6 */
- PINMUX_IPSR_DATA(IP6_3_0, D5),
+ PINMUX_IPSR_GPSR(IP6_3_0, D5),
PINMUX_IPSR_MSEL(IP6_3_0, MSIOF2_SYNC_B, SEL_MSIOF2_1),
- PINMUX_IPSR_DATA(IP6_3_0, VI4_DATA21),
- PINMUX_IPSR_DATA(IP6_3_0, VI5_DATA5),
+ PINMUX_IPSR_GPSR(IP6_3_0, VI4_DATA21),
+ PINMUX_IPSR_GPSR(IP6_3_0, VI5_DATA5),
- PINMUX_IPSR_DATA(IP6_7_4, D6),
+ PINMUX_IPSR_GPSR(IP6_7_4, D6),
PINMUX_IPSR_MSEL(IP6_7_4, MSIOF2_RXD_B, SEL_MSIOF2_1),
- PINMUX_IPSR_DATA(IP6_7_4, VI4_DATA22),
- PINMUX_IPSR_DATA(IP6_7_4, VI5_DATA6),
+ PINMUX_IPSR_GPSR(IP6_7_4, VI4_DATA22),
+ PINMUX_IPSR_GPSR(IP6_7_4, VI5_DATA6),
- PINMUX_IPSR_DATA(IP6_11_8, D7),
+ PINMUX_IPSR_GPSR(IP6_11_8, D7),
PINMUX_IPSR_MSEL(IP6_11_8, MSIOF2_TXD_B, SEL_MSIOF2_1),
- PINMUX_IPSR_DATA(IP6_11_8, VI4_DATA23),
- PINMUX_IPSR_DATA(IP6_11_8, VI5_DATA7),
+ PINMUX_IPSR_GPSR(IP6_11_8, VI4_DATA23),
+ PINMUX_IPSR_GPSR(IP6_11_8, VI5_DATA7),
- PINMUX_IPSR_DATA(IP6_15_12, D8),
- PINMUX_IPSR_DATA(IP6_15_12, LCDOUT0),
+ PINMUX_IPSR_GPSR(IP6_15_12, D8),
+ PINMUX_IPSR_GPSR(IP6_15_12, LCDOUT0),
PINMUX_IPSR_MSEL(IP6_15_12, MSIOF2_SCK_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_15_12, SCK4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP6_15_12, VI4_DATA0_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP6_15_12, DU_DR0),
+ PINMUX_IPSR_GPSR(IP6_15_12, DU_DR0),
- PINMUX_IPSR_DATA(IP6_19_16, D9),
- PINMUX_IPSR_DATA(IP6_19_16, LCDOUT1),
+ PINMUX_IPSR_GPSR(IP6_19_16, D9),
+ PINMUX_IPSR_GPSR(IP6_19_16, LCDOUT1),
PINMUX_IPSR_MSEL(IP6_19_16, MSIOF2_SYNC_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_19_16, VI4_DATA1_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP6_19_16, DU_DR1),
+ PINMUX_IPSR_GPSR(IP6_19_16, DU_DR1),
- PINMUX_IPSR_DATA(IP6_23_20, D10),
- PINMUX_IPSR_DATA(IP6_23_20, LCDOUT2),
+ PINMUX_IPSR_GPSR(IP6_23_20, D10),
+ PINMUX_IPSR_GPSR(IP6_23_20, LCDOUT2),
PINMUX_IPSR_MSEL(IP6_23_20, MSIOF2_RXD_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_23_20, HRX3_B, SEL_HSCIF3_1),
PINMUX_IPSR_MSEL(IP6_23_20, VI4_DATA2_A, SEL_VIN4_0),
PINMUX_IPSR_MSEL(IP6_23_20, CTS4_N_C, SEL_SCIF4_2),
- PINMUX_IPSR_DATA(IP6_23_20, DU_DR2),
+ PINMUX_IPSR_GPSR(IP6_23_20, DU_DR2),
- PINMUX_IPSR_DATA(IP6_27_24, D11),
- PINMUX_IPSR_DATA(IP6_27_24, LCDOUT3),
+ PINMUX_IPSR_GPSR(IP6_27_24, D11),
+ PINMUX_IPSR_GPSR(IP6_27_24, LCDOUT3),
PINMUX_IPSR_MSEL(IP6_27_24, MSIOF2_TXD_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_27_24, HTX3_B, SEL_HSCIF3_1),
PINMUX_IPSR_MSEL(IP6_27_24, VI4_DATA3_A, SEL_VIN4_0),
PINMUX_IPSR_MSEL(IP6_27_24, RTS4_N_TANS_C, SEL_SCIF4_2),
- PINMUX_IPSR_DATA(IP6_27_24, DU_DR3),
+ PINMUX_IPSR_GPSR(IP6_27_24, DU_DR3),
- PINMUX_IPSR_DATA(IP6_31_28, D12),
- PINMUX_IPSR_DATA(IP6_31_28, LCDOUT4),
+ PINMUX_IPSR_GPSR(IP6_31_28, D12),
+ PINMUX_IPSR_GPSR(IP6_31_28, LCDOUT4),
PINMUX_IPSR_MSEL(IP6_31_28, MSIOF2_SS1_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP6_31_28, RX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP6_31_28, VI4_DATA4_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP6_31_28, DU_DR4),
+ PINMUX_IPSR_GPSR(IP6_31_28, DU_DR4),
/* IPSR7 */
- PINMUX_IPSR_DATA(IP7_3_0, D13),
- PINMUX_IPSR_DATA(IP7_3_0, LCDOUT5),
+ PINMUX_IPSR_GPSR(IP7_3_0, D13),
+ PINMUX_IPSR_GPSR(IP7_3_0, LCDOUT5),
PINMUX_IPSR_MSEL(IP7_3_0, MSIOF2_SS2_D, SEL_MSIOF2_3),
PINMUX_IPSR_MSEL(IP7_3_0, TX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP7_3_0, VI4_DATA5_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP7_3_0, DU_DR5),
+ PINMUX_IPSR_GPSR(IP7_3_0, DU_DR5),
- PINMUX_IPSR_DATA(IP7_7_4, D14),
- PINMUX_IPSR_DATA(IP7_7_4, LCDOUT6),
+ PINMUX_IPSR_GPSR(IP7_7_4, D14),
+ PINMUX_IPSR_GPSR(IP7_7_4, LCDOUT6),
PINMUX_IPSR_MSEL(IP7_7_4, MSIOF3_SS1_A, SEL_MSIOF3_0),
PINMUX_IPSR_MSEL(IP7_7_4, HRX3_C, SEL_HSCIF3_2),
PINMUX_IPSR_MSEL(IP7_7_4, VI4_DATA6_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP7_7_4, DU_DR6),
+ PINMUX_IPSR_GPSR(IP7_7_4, DU_DR6),
PINMUX_IPSR_MSEL(IP7_7_4, SCL6_C, SEL_I2C6_2),
- PINMUX_IPSR_DATA(IP7_11_8, D15),
- PINMUX_IPSR_DATA(IP7_11_8, LCDOUT7),
+ PINMUX_IPSR_GPSR(IP7_11_8, D15),
+ PINMUX_IPSR_GPSR(IP7_11_8, LCDOUT7),
PINMUX_IPSR_MSEL(IP7_11_8, MSIOF3_SS2_A, SEL_MSIOF3_0),
PINMUX_IPSR_MSEL(IP7_11_8, HTX3_C, SEL_HSCIF3_2),
PINMUX_IPSR_MSEL(IP7_11_8, VI4_DATA7_A, SEL_VIN4_0),
- PINMUX_IPSR_DATA(IP7_11_8, DU_DR7),
+ PINMUX_IPSR_GPSR(IP7_11_8, DU_DR7),
PINMUX_IPSR_MSEL(IP7_11_8, SDA6_C, SEL_I2C6_2),
- PINMUX_IPSR_DATA(IP7_15_12, FSCLKST),
+ PINMUX_IPSR_GPSR(IP7_15_12, FSCLKST),
- PINMUX_IPSR_DATA(IP7_19_16, SD0_CLK),
+ PINMUX_IPSR_GPSR(IP7_19_16, SD0_CLK),
PINMUX_IPSR_MSEL(IP7_19_16, MSIOF1_SCK_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP7_19_16, STP_OPWM_0_B, SEL_SSP1_0_1),
- PINMUX_IPSR_DATA(IP7_23_20, SD0_CMD),
+ PINMUX_IPSR_GPSR(IP7_23_20, SD0_CMD),
PINMUX_IPSR_MSEL(IP7_23_20, MSIOF1_SYNC_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP7_23_20, STP_IVCXO27_0_B, SEL_SSP1_0_1),
- PINMUX_IPSR_DATA(IP7_27_24, SD0_DAT0),
+ PINMUX_IPSR_GPSR(IP7_27_24, SD0_DAT0),
PINMUX_IPSR_MSEL(IP7_27_24, MSIOF1_RXD_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP7_27_24, TS_SCK0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP7_27_24, STP_ISCLK_0_B, SEL_SSP1_0_1),
- PINMUX_IPSR_DATA(IP7_31_28, SD0_DAT1),
+ PINMUX_IPSR_GPSR(IP7_31_28, SD0_DAT1),
PINMUX_IPSR_MSEL(IP7_31_28, MSIOF1_TXD_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP7_31_28, TS_SPSYNC0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP7_31_28, STP_ISSYNC_0_B, SEL_SSP1_0_1),
/* IPSR8 */
- PINMUX_IPSR_DATA(IP8_3_0, SD0_DAT2),
+ PINMUX_IPSR_GPSR(IP8_3_0, SD0_DAT2),
PINMUX_IPSR_MSEL(IP8_3_0, MSIOF1_SS1_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP8_3_0, TS_SDAT0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP8_3_0, STP_ISD_0_B, SEL_SSP1_0_1),
- PINMUX_IPSR_DATA(IP8_7_4, SD0_DAT3),
+ PINMUX_IPSR_GPSR(IP8_7_4, SD0_DAT3),
PINMUX_IPSR_MSEL(IP8_7_4, MSIOF1_SS2_E, SEL_MSIOF1_4),
PINMUX_IPSR_MSEL(IP8_7_4, TS_SDEN0_B, SEL_TSIF0_1),
PINMUX_IPSR_MSEL(IP8_7_4, STP_ISEN_0_B, SEL_SSP1_0_1),
- PINMUX_IPSR_DATA(IP8_11_8, SD1_CLK),
+ PINMUX_IPSR_GPSR(IP8_11_8, SD1_CLK),
PINMUX_IPSR_MSEL(IP8_11_8, MSIOF1_SCK_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_11_8, SIM0_CLK_A, SEL_SIMCARD_0),
- PINMUX_IPSR_DATA(IP8_15_12, SD1_CMD),
+ PINMUX_IPSR_GPSR(IP8_15_12, SD1_CMD),
PINMUX_IPSR_MSEL(IP8_15_12, MSIOF1_SYNC_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_15_12, SIM0_D_A, SEL_SIMCARD_0),
PINMUX_IPSR_MSEL(IP8_15_12, STP_IVCXO27_1_B, SEL_SSP1_1_1),
- PINMUX_IPSR_DATA(IP8_19_16, SD1_DAT0),
- PINMUX_IPSR_DATA(IP8_19_16, SD2_DAT4),
+ PINMUX_IPSR_GPSR(IP8_19_16, SD1_DAT0),
+ PINMUX_IPSR_GPSR(IP8_19_16, SD2_DAT4),
PINMUX_IPSR_MSEL(IP8_19_16, MSIOF1_RXD_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_19_16, TS_SCK1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_19_16, STP_ISCLK_1_B, SEL_SSP1_1_1),
- PINMUX_IPSR_DATA(IP8_23_20, SD1_DAT1),
- PINMUX_IPSR_DATA(IP8_23_20, SD2_DAT5),
+ PINMUX_IPSR_GPSR(IP8_23_20, SD1_DAT1),
+ PINMUX_IPSR_GPSR(IP8_23_20, SD2_DAT5),
PINMUX_IPSR_MSEL(IP8_23_20, MSIOF1_TXD_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_23_20, TS_SPSYNC1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_23_20, STP_ISSYNC_1_B, SEL_SSP1_1_1),
- PINMUX_IPSR_DATA(IP8_27_24, SD1_DAT2),
- PINMUX_IPSR_DATA(IP8_27_24, SD2_DAT6),
+ PINMUX_IPSR_GPSR(IP8_27_24, SD1_DAT2),
+ PINMUX_IPSR_GPSR(IP8_27_24, SD2_DAT6),
PINMUX_IPSR_MSEL(IP8_27_24, MSIOF1_SS1_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_27_24, TS_SDAT1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_27_24, STP_ISD_1_B, SEL_SSP1_1_1),
- PINMUX_IPSR_DATA(IP8_31_28, SD1_DAT3),
- PINMUX_IPSR_DATA(IP8_31_28, SD2_DAT7),
+ PINMUX_IPSR_GPSR(IP8_31_28, SD1_DAT3),
+ PINMUX_IPSR_GPSR(IP8_31_28, SD2_DAT7),
PINMUX_IPSR_MSEL(IP8_31_28, MSIOF1_SS2_G, SEL_MSIOF1_6),
PINMUX_IPSR_MSEL(IP8_31_28, TS_SDEN1_B, SEL_TSIF1_1),
PINMUX_IPSR_MSEL(IP8_31_28, STP_ISEN_1_B, SEL_SSP1_1_1),
/* IPSR9 */
- PINMUX_IPSR_DATA(IP9_3_0, SD2_CLK),
+ PINMUX_IPSR_GPSR(IP9_3_0, SD2_CLK),
- PINMUX_IPSR_DATA(IP9_7_4, SD2_DAT0),
+ PINMUX_IPSR_GPSR(IP9_7_4, SD2_DAT0),
- PINMUX_IPSR_DATA(IP9_11_8, SD2_DAT1),
+ PINMUX_IPSR_GPSR(IP9_11_8, SD2_DAT1),
- PINMUX_IPSR_DATA(IP9_15_12, SD2_DAT2),
+ PINMUX_IPSR_GPSR(IP9_15_12, SD2_DAT2),
- PINMUX_IPSR_DATA(IP9_19_16, SD2_DAT3),
+ PINMUX_IPSR_GPSR(IP9_19_16, SD2_DAT3),
- PINMUX_IPSR_DATA(IP9_23_20, SD2_DS),
+ PINMUX_IPSR_GPSR(IP9_23_20, SD2_DS),
PINMUX_IPSR_MSEL(IP9_23_20, SATA_DEVSLP_B, SEL_SATA_1),
- PINMUX_IPSR_DATA(IP9_27_24, SD3_DAT4),
+ PINMUX_IPSR_GPSR(IP9_27_24, SD3_DAT4),
PINMUX_IPSR_MSEL(IP9_27_24, SD2_CD_A, SEL_SDHI2_0),
- PINMUX_IPSR_DATA(IP9_31_28, SD3_DAT5),
+ PINMUX_IPSR_GPSR(IP9_31_28, SD3_DAT5),
PINMUX_IPSR_MSEL(IP9_31_28, SD2_WP_A, SEL_SDHI2_0),
/* IPSR10 */
- PINMUX_IPSR_DATA(IP10_3_0, SD3_DAT6),
- PINMUX_IPSR_DATA(IP10_3_0, SD3_CD),
+ PINMUX_IPSR_GPSR(IP10_3_0, SD3_DAT6),
+ PINMUX_IPSR_GPSR(IP10_3_0, SD3_CD),
- PINMUX_IPSR_DATA(IP10_7_4, SD3_DAT7),
- PINMUX_IPSR_DATA(IP10_7_4, SD3_WP),
+ PINMUX_IPSR_GPSR(IP10_7_4, SD3_DAT7),
+ PINMUX_IPSR_GPSR(IP10_7_4, SD3_WP),
- PINMUX_IPSR_DATA(IP10_11_8, SD0_CD),
+ PINMUX_IPSR_GPSR(IP10_11_8, SD0_CD),
PINMUX_IPSR_MSEL(IP10_11_8, SCL2_B, SEL_I2C2_1),
PINMUX_IPSR_MSEL(IP10_11_8, SIM0_RST_A, SEL_SIMCARD_0),
- PINMUX_IPSR_DATA(IP10_15_12, SD0_WP),
+ PINMUX_IPSR_GPSR(IP10_15_12, SD0_WP),
PINMUX_IPSR_MSEL(IP10_15_12, SDA2_B, SEL_I2C2_1),
- PINMUX_IPSR_DATA(IP10_19_16, SD1_CD),
+ PINMUX_IPSR_GPSR(IP10_19_16, SD1_CD),
PINMUX_IPSR_MSEL(IP10_19_16, SIM0_CLK_B, SEL_SIMCARD_1),
- PINMUX_IPSR_DATA(IP10_23_20, SD1_WP),
+ PINMUX_IPSR_GPSR(IP10_23_20, SD1_WP),
PINMUX_IPSR_MSEL(IP10_23_20, SIM0_D_B, SEL_SIMCARD_1),
- PINMUX_IPSR_DATA(IP10_27_24, SCK0),
+ PINMUX_IPSR_GPSR(IP10_27_24, SCK0),
PINMUX_IPSR_MSEL(IP10_27_24, HSCK1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP10_27_24, MSIOF1_SS2_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP10_27_24, AUDIO_CLKC_B, SEL_ADG_1),
@@ -1057,38 +1056,38 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_27_24, SIM0_RST_B, SEL_SIMCARD_1),
PINMUX_IPSR_MSEL(IP10_27_24, STP_OPWM_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_MSEL(IP10_27_24, RIF0_CLK_B, SEL_DRIF0_1),
- PINMUX_IPSR_DATA(IP10_27_24, ADICHS2),
+ PINMUX_IPSR_GPSR(IP10_27_24, ADICHS2),
- PINMUX_IPSR_DATA(IP10_31_28, RX0),
+ PINMUX_IPSR_GPSR(IP10_31_28, RX0),
PINMUX_IPSR_MSEL(IP10_31_28, HRX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP10_31_28, TS_SCK0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP10_31_28, STP_ISCLK_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_MSEL(IP10_31_28, RIF0_D0_B, SEL_DRIF0_1),
/* IPSR11 */
- PINMUX_IPSR_DATA(IP11_3_0, TX0),
+ PINMUX_IPSR_GPSR(IP11_3_0, TX0),
PINMUX_IPSR_MSEL(IP11_3_0, HTX1_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_3_0, TS_SPSYNC0_C, SEL_TSIF0_2),
PINMUX_IPSR_MSEL(IP11_3_0, STP_ISSYNC_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_MSEL(IP11_3_0, RIF0_D1_B, SEL_DRIF0_1),
- PINMUX_IPSR_DATA(IP11_7_4, CTS0_N),
+ PINMUX_IPSR_GPSR(IP11_7_4, CTS0_N),
PINMUX_IPSR_MSEL(IP11_7_4, HCTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_7_4, MSIOF1_SYNC_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_7_4, TS_SPSYNC1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP11_7_4, STP_ISSYNC_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP11_7_4, RIF1_SYNC_B, SEL_DRIF1_1),
PINMUX_IPSR_MSEL(IP11_7_4, AUDIO_CLKOUT_C, SEL_ADG_2),
- PINMUX_IPSR_DATA(IP11_7_4, ADICS_SAMP),
+ PINMUX_IPSR_GPSR(IP11_7_4, ADICS_SAMP),
- PINMUX_IPSR_DATA(IP11_11_8, RTS0_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_11_8, RTS0_N_TANS),
PINMUX_IPSR_MSEL(IP11_11_8, HRTS1_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP11_11_8, MSIOF1_SS1_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_11_8, AUDIO_CLKA_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP11_11_8, SCL2_A, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP11_11_8, STP_IVCXO27_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP11_11_8, RIF0_SYNC_B, SEL_DRIF0_1),
- PINMUX_IPSR_DATA(IP11_11_8, ADICHS1),
+ PINMUX_IPSR_GPSR(IP11_11_8, ADICHS1),
PINMUX_IPSR_MSEL(IP11_15_12, RX1_A, SEL_SCIF1_0),
PINMUX_IPSR_MSEL(IP11_15_12, HRX1_A, SEL_HSCIF1_0),
@@ -1102,29 +1101,29 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP11_19_16, STP_ISEN_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_MSEL(IP11_19_16, RIF1_D0_C, SEL_DRIF1_2),
- PINMUX_IPSR_DATA(IP11_23_20, CTS1_N),
+ PINMUX_IPSR_GPSR(IP11_23_20, CTS1_N),
PINMUX_IPSR_MSEL(IP11_23_20, HCTS1_N_A, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP11_23_20, MSIOF1_RXD_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_23_20, TS_SDEN1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP11_23_20, STP_ISEN_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP11_23_20, RIF1_D0_B, SEL_DRIF1_1),
- PINMUX_IPSR_DATA(IP11_23_20, ADIDATA),
+ PINMUX_IPSR_GPSR(IP11_23_20, ADIDATA),
- PINMUX_IPSR_DATA(IP11_27_24, RTS1_N_TANS),
+ PINMUX_IPSR_GPSR(IP11_27_24, RTS1_N_TANS),
PINMUX_IPSR_MSEL(IP11_27_24, HRTS1_N_A, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP11_27_24, MSIOF1_TXD_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_27_24, TS_SDAT1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP11_27_24, STP_ISD_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP11_27_24, RIF1_D1_B, SEL_DRIF1_1),
- PINMUX_IPSR_DATA(IP11_27_24, ADICHS0),
+ PINMUX_IPSR_GPSR(IP11_27_24, ADICHS0),
- PINMUX_IPSR_DATA(IP11_31_28, SCK2),
+ PINMUX_IPSR_GPSR(IP11_31_28, SCK2),
PINMUX_IPSR_MSEL(IP11_31_28, SCIF_CLK_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP11_31_28, MSIOF1_SCK_B, SEL_MSIOF1_1),
PINMUX_IPSR_MSEL(IP11_31_28, TS_SCK1_C, SEL_TSIF1_2),
PINMUX_IPSR_MSEL(IP11_31_28, STP_ISCLK_1_C, SEL_SSP1_1_2),
PINMUX_IPSR_MSEL(IP11_31_28, RIF1_CLK_B, SEL_DRIF1_1),
- PINMUX_IPSR_DATA(IP11_31_28, ADICLK),
+ PINMUX_IPSR_GPSR(IP11_31_28, ADICLK),
/* IPSR12 */
PINMUX_IPSR_MSEL(IP12_3_0, TX2_A, SEL_SCIF2_0),
@@ -1141,7 +1140,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_7_4, RIF1_SYNC_C, SEL_DRIF1_2),
PINMUX_IPSR_MSEL(IP12_7_4, FSO_CFE_1_B, SEL_FSO_1),
- PINMUX_IPSR_DATA(IP12_11_8, HSCK0),
+ PINMUX_IPSR_GPSR(IP12_11_8, HSCK0),
PINMUX_IPSR_MSEL(IP12_11_8, MSIOF1_SCK_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP12_11_8, AUDIO_CLKB_A, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP12_11_8, SSI_SDATA1_B, SEL_SSI_1),
@@ -1149,21 +1148,21 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_11_8, STP_ISCLK_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_MSEL(IP12_11_8, RIF0_CLK_C, SEL_DRIF0_2),
- PINMUX_IPSR_DATA(IP12_15_12, HRX0),
+ PINMUX_IPSR_GPSR(IP12_15_12, HRX0),
PINMUX_IPSR_MSEL(IP12_15_12, MSIOF1_RXD_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP12_15_12, SSI_SDATA2_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP12_15_12, TS_SDEN0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP12_15_12, STP_ISEN_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_MSEL(IP12_15_12, RIF0_D0_C, SEL_DRIF0_2),
- PINMUX_IPSR_DATA(IP12_19_16, HTX0),
+ PINMUX_IPSR_GPSR(IP12_19_16, HTX0),
PINMUX_IPSR_MSEL(IP12_19_16, MSIOF1_TXD_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP12_19_16, SSI_SDATA9_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP12_19_16, TS_SDAT0_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP12_19_16, STP_ISD_0_D, SEL_SSP1_0_3),
PINMUX_IPSR_MSEL(IP12_19_16, RIF0_D1_C, SEL_DRIF0_2),
- PINMUX_IPSR_DATA(IP12_23_20, HCTS0_N),
+ PINMUX_IPSR_GPSR(IP12_23_20, HCTS0_N),
PINMUX_IPSR_MSEL(IP12_23_20, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP12_23_20, MSIOF1_SYNC_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP12_23_20, SSI_SCK9_A, SEL_SSI_0),
@@ -1172,7 +1171,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_23_20, RIF0_SYNC_C, SEL_DRIF0_2),
PINMUX_IPSR_MSEL(IP12_23_20, AUDIO_CLKOUT1_A, SEL_ADG_0),
- PINMUX_IPSR_DATA(IP12_27_24, HRTS0_N),
+ PINMUX_IPSR_GPSR(IP12_27_24, HRTS0_N),
PINMUX_IPSR_MSEL(IP12_27_24, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP12_27_24, MSIOF1_SS1_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP12_27_24, SSI_WS9_A, SEL_SSI_0),
@@ -1180,20 +1179,20 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_27_24, BPFCLK_A, SEL_FM_0),
PINMUX_IPSR_MSEL(IP12_27_24, AUDIO_CLKOUT2_A, SEL_ADG_0),
- PINMUX_IPSR_DATA(IP12_31_28, MSIOF0_SYNC),
+ PINMUX_IPSR_GPSR(IP12_31_28, MSIOF0_SYNC),
PINMUX_IPSR_MSEL(IP12_31_28, AUDIO_CLKOUT_A, SEL_ADG_0),
/* IPSR13 */
- PINMUX_IPSR_DATA(IP13_3_0, MSIOF0_SS1),
- PINMUX_IPSR_DATA(IP13_3_0, RX5),
+ PINMUX_IPSR_GPSR(IP13_3_0, MSIOF0_SS1),
+ PINMUX_IPSR_GPSR(IP13_3_0, RX5),
PINMUX_IPSR_MSEL(IP13_3_0, AUDIO_CLKA_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP13_3_0, SSI_SCK2_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP13_3_0, STP_IVCXO27_0_C, SEL_SSP1_0_2),
PINMUX_IPSR_MSEL(IP13_3_0, AUDIO_CLKOUT3_A, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_3_0, TCLK1_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_DATA(IP13_7_4, MSIOF0_SS2),
- PINMUX_IPSR_DATA(IP13_7_4, TX5),
+ PINMUX_IPSR_GPSR(IP13_7_4, MSIOF0_SS2),
+ PINMUX_IPSR_GPSR(IP13_7_4, TX5),
PINMUX_IPSR_MSEL(IP13_7_4, MSIOF1_SS2_D, SEL_MSIOF1_3),
PINMUX_IPSR_MSEL(IP13_7_4, AUDIO_CLKC_A, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_7_4, SSI_WS2_A, SEL_SSI_0),
@@ -1201,26 +1200,26 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_7_4, AUDIO_CLKOUT_D, SEL_ADG_3),
PINMUX_IPSR_MSEL(IP13_7_4, SPEEDIN_B, SEL_SPEED_PULSE_1),
- PINMUX_IPSR_DATA(IP13_11_8, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP13_11_8, MLB_CLK),
PINMUX_IPSR_MSEL(IP13_11_8, MSIOF1_SCK_F, SEL_MSIOF1_5),
PINMUX_IPSR_MSEL(IP13_11_8, SCL1_B, SEL_I2C1_1),
- PINMUX_IPSR_DATA(IP13_15_12, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP13_15_12, MLB_SIG),
PINMUX_IPSR_MSEL(IP13_15_12, RX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP13_15_12, MSIOF1_SYNC_F, SEL_MSIOF1_5),
PINMUX_IPSR_MSEL(IP13_15_12, SDA1_B, SEL_I2C1_1),
- PINMUX_IPSR_DATA(IP13_19_16, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP13_19_16, MLB_DAT),
PINMUX_IPSR_MSEL(IP13_19_16, TX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP13_19_16, MSIOF1_RXD_F, SEL_MSIOF1_5),
- PINMUX_IPSR_DATA(IP13_23_20, SSI_SCK0129),
+ PINMUX_IPSR_GPSR(IP13_23_20, SSI_SCK01239),
PINMUX_IPSR_MSEL(IP13_23_20, MSIOF1_TXD_F, SEL_MSIOF1_5),
- PINMUX_IPSR_DATA(IP13_27_24, SSI_WS0129),
+ PINMUX_IPSR_GPSR(IP13_27_24, SSI_WS01239),
PINMUX_IPSR_MSEL(IP13_27_24, MSIOF1_SS1_F, SEL_MSIOF1_5),
- PINMUX_IPSR_DATA(IP13_31_28, SSI_SDATA0),
+ PINMUX_IPSR_GPSR(IP13_31_28, SSI_SDATA0),
PINMUX_IPSR_MSEL(IP13_31_28, MSIOF1_SS2_F, SEL_MSIOF1_5),
/* IPSR14 */
@@ -1229,16 +1228,16 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_7_4, SSI_SDATA2_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP14_7_4, SSI_SCK1_B, SEL_SSI_1),
- PINMUX_IPSR_DATA(IP14_11_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP14_11_8, SSI_SCK34),
PINMUX_IPSR_MSEL(IP14_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
- PINMUX_IPSR_DATA(IP14_15_12, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP14_15_12, SSI_WS34),
PINMUX_IPSR_MSEL(IP14_15_12, HCTS2_N_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
- PINMUX_IPSR_DATA(IP14_19_16, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP14_19_16, SSI_SDATA3),
PINMUX_IPSR_MSEL(IP14_19_16, HRTS2_N_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_19_16, MSIOF1_TXD_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_19_16, TS_SCK0_A, SEL_TSIF0_0),
@@ -1246,7 +1245,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_19_16, RIF0_D1_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP14_19_16, RIF2_D0_A, SEL_DRIF2_0),
- PINMUX_IPSR_DATA(IP14_23_20, SSI_SCK4),
+ PINMUX_IPSR_GPSR(IP14_23_20, SSI_SCK4),
PINMUX_IPSR_MSEL(IP14_23_20, HRX2_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_23_20, MSIOF1_SCK_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_23_20, TS_SDAT0_A, SEL_TSIF0_0),
@@ -1254,7 +1253,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_23_20, RIF0_CLK_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP14_23_20, RIF2_CLK_A, SEL_DRIF2_0),
- PINMUX_IPSR_DATA(IP14_27_24, SSI_WS4),
+ PINMUX_IPSR_GPSR(IP14_27_24, SSI_WS4),
PINMUX_IPSR_MSEL(IP14_27_24, HTX2_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_27_24, MSIOF1_SYNC_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_27_24, TS_SDEN0_A, SEL_TSIF0_0),
@@ -1262,7 +1261,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_27_24, RIF0_SYNC_A, SEL_DRIF0_0),
PINMUX_IPSR_MSEL(IP14_27_24, RIF2_SYNC_A, SEL_DRIF2_0),
- PINMUX_IPSR_DATA(IP14_31_28, SSI_SDATA4),
+ PINMUX_IPSR_GPSR(IP14_31_28, SSI_SDATA4),
PINMUX_IPSR_MSEL(IP14_31_28, HSCK2_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_31_28, MSIOF1_RXD_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_31_28, TS_SPSYNC0_A, SEL_TSIF0_0),
@@ -1271,19 +1270,19 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_31_28, RIF2_D1_A, SEL_DRIF2_0),
/* IPSR15 */
- PINMUX_IPSR_DATA(IP15_3_0, SSI_SCK6),
- PINMUX_IPSR_DATA(IP15_3_0, USB2_PWEN),
+ PINMUX_IPSR_GPSR(IP15_3_0, SSI_SCK6),
+ PINMUX_IPSR_GPSR(IP15_3_0, USB2_PWEN),
PINMUX_IPSR_MSEL(IP15_3_0, SIM0_RST_D, SEL_SIMCARD_3),
- PINMUX_IPSR_DATA(IP15_7_4, SSI_WS6),
- PINMUX_IPSR_DATA(IP15_7_4, USB2_OVC),
+ PINMUX_IPSR_GPSR(IP15_7_4, SSI_WS6),
+ PINMUX_IPSR_GPSR(IP15_7_4, USB2_OVC),
PINMUX_IPSR_MSEL(IP15_7_4, SIM0_D_D, SEL_SIMCARD_3),
- PINMUX_IPSR_DATA(IP15_11_8, SSI_SDATA6),
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SDATA6),
PINMUX_IPSR_MSEL(IP15_11_8, SIM0_CLK_D, SEL_SIMCARD_3),
PINMUX_IPSR_MSEL(IP15_11_8, SATA_DEVSLP_A, SEL_SATA_0),
- PINMUX_IPSR_DATA(IP15_15_12, SSI_SCK78),
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_SCK78),
PINMUX_IPSR_MSEL(IP15_15_12, HRX2_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SCK_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP15_15_12, TS_SCK1_A, SEL_TSIF1_0),
@@ -1291,7 +1290,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_15_12, RIF1_CLK_A, SEL_DRIF1_0),
PINMUX_IPSR_MSEL(IP15_15_12, RIF3_CLK_A, SEL_DRIF3_0),
- PINMUX_IPSR_DATA(IP15_19_16, SSI_WS78),
+ PINMUX_IPSR_GPSR(IP15_19_16, SSI_WS78),
PINMUX_IPSR_MSEL(IP15_19_16, HTX2_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP15_19_16, MSIOF1_SYNC_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP15_19_16, TS_SDAT1_A, SEL_TSIF1_0),
@@ -1299,7 +1298,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_19_16, RIF1_SYNC_A, SEL_DRIF1_0),
PINMUX_IPSR_MSEL(IP15_19_16, RIF3_SYNC_A, SEL_DRIF3_0),
- PINMUX_IPSR_DATA(IP15_23_20, SSI_SDATA7),
+ PINMUX_IPSR_GPSR(IP15_23_20, SSI_SDATA7),
PINMUX_IPSR_MSEL(IP15_23_20, HCTS2_N_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP15_23_20, MSIOF1_RXD_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP15_23_20, TS_SDEN1_A, SEL_TSIF1_0),
@@ -1308,7 +1307,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_23_20, RIF3_D0_A, SEL_DRIF3_0),
PINMUX_IPSR_MSEL(IP15_23_20, TCLK2_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_DATA(IP15_27_24, SSI_SDATA8),
+ PINMUX_IPSR_GPSR(IP15_27_24, SSI_SDATA8),
PINMUX_IPSR_MSEL(IP15_27_24, HRTS2_N_B, SEL_HSCIF2_1),
PINMUX_IPSR_MSEL(IP15_27_24, MSIOF1_TXD_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP15_27_24, TS_SPSYNC1_A, SEL_TSIF1_0),
@@ -1321,13 +1320,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_31_28, MSIOF1_SS1_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP15_31_28, HSCK1_A, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP15_31_28, SSI_WS1_B, SEL_SSI_1),
- PINMUX_IPSR_DATA(IP15_31_28, SCK1),
+ PINMUX_IPSR_GPSR(IP15_31_28, SCK1),
PINMUX_IPSR_MSEL(IP15_31_28, STP_IVCXO27_1_A, SEL_SSP1_1_0),
- PINMUX_IPSR_DATA(IP15_31_28, SCK5),
+ PINMUX_IPSR_GPSR(IP15_31_28, SCK5),
/* IPSR16 */
PINMUX_IPSR_MSEL(IP16_3_0, AUDIO_CLKA_A, SEL_ADG_0),
- PINMUX_IPSR_DATA(IP16_3_0, CC5_OSCOUT),
+ PINMUX_IPSR_GPSR(IP16_3_0, CC5_OSCOUT),
PINMUX_IPSR_MSEL(IP16_7_4, AUDIO_CLKB_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP16_7_4, SCIF_CLK_A, SEL_SCIF1_0),
@@ -1335,20 +1334,20 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_7_4, REMOCON_A, SEL_REMOCON_0),
PINMUX_IPSR_MSEL(IP16_7_4, TCLK1_A, SEL_TIMER_TMU_0),
- PINMUX_IPSR_DATA(IP16_11_8, USB0_PWEN),
+ PINMUX_IPSR_GPSR(IP16_11_8, USB0_PWEN),
PINMUX_IPSR_MSEL(IP16_11_8, SIM0_RST_C, SEL_SIMCARD_2),
PINMUX_IPSR_MSEL(IP16_11_8, TS_SCK1_D, SEL_TSIF1_3),
PINMUX_IPSR_MSEL(IP16_11_8, STP_ISCLK_1_D, SEL_SSP1_1_3),
PINMUX_IPSR_MSEL(IP16_11_8, BPFCLK_B, SEL_FM_1),
PINMUX_IPSR_MSEL(IP16_11_8, RIF3_CLK_B, SEL_DRIF3_1),
- PINMUX_IPSR_DATA(IP16_15_12, USB0_OVC),
+ PINMUX_IPSR_GPSR(IP16_15_12, USB0_OVC),
PINMUX_IPSR_MSEL(IP16_11_8, SIM0_D_C, SEL_SIMCARD_2),
PINMUX_IPSR_MSEL(IP16_11_8, TS_SDAT1_D, SEL_TSIF1_3),
PINMUX_IPSR_MSEL(IP16_11_8, STP_ISD_1_D, SEL_SSP1_1_3),
PINMUX_IPSR_MSEL(IP16_11_8, RIF3_SYNC_B, SEL_DRIF3_1),
- PINMUX_IPSR_DATA(IP16_19_16, USB1_PWEN),
+ PINMUX_IPSR_GPSR(IP16_19_16, USB1_PWEN),
PINMUX_IPSR_MSEL(IP16_19_16, SIM0_CLK_C, SEL_SIMCARD_2),
PINMUX_IPSR_MSEL(IP16_19_16, SSI_SCK1_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP16_19_16, TS_SCK0_E, SEL_TSIF0_4),
@@ -1357,7 +1356,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_19_16, RIF2_CLK_B, SEL_DRIF2_1),
PINMUX_IPSR_MSEL(IP16_19_16, SPEEDIN_A, SEL_SPEED_PULSE_0),
- PINMUX_IPSR_DATA(IP16_23_20, USB1_OVC),
+ PINMUX_IPSR_GPSR(IP16_23_20, USB1_OVC),
PINMUX_IPSR_MSEL(IP16_23_20, MSIOF1_SS2_C, SEL_MSIOF1_2),
PINMUX_IPSR_MSEL(IP16_23_20, SSI_WS1_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP16_23_20, TS_SDAT0_E, SEL_TSIF0_4),
@@ -1366,7 +1365,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_23_20, RIF2_SYNC_B, SEL_DRIF2_1),
PINMUX_IPSR_MSEL(IP16_23_20, REMOCON_B, SEL_REMOCON_1),
- PINMUX_IPSR_DATA(IP16_27_24, USB30_PWEN),
+ PINMUX_IPSR_GPSR(IP16_27_24, USB30_PWEN),
PINMUX_IPSR_MSEL(IP16_27_24, AUDIO_CLKOUT_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP16_27_24, SSI_SCK2_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP16_27_24, TS_SDEN1_D, SEL_TSIF1_3),
@@ -1374,9 +1373,9 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_27_24, STP_OPWM_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP16_27_24, RIF3_D0_B, SEL_DRIF3_1),
PINMUX_IPSR_MSEL(IP16_27_24, TCLK2_B, SEL_TIMER_TMU_1),
- PINMUX_IPSR_DATA(IP16_27_24, TPU0TO0),
+ PINMUX_IPSR_GPSR(IP16_27_24, TPU0TO0),
- PINMUX_IPSR_DATA(IP16_31_28, USB30_OVC),
+ PINMUX_IPSR_GPSR(IP16_31_28, USB30_OVC),
PINMUX_IPSR_MSEL(IP16_31_28, AUDIO_CLKOUT1_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP16_31_28, SSI_WS2_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP16_31_28, TS_SPSYNC1_D, SEL_TSIF1_3),
@@ -1384,24 +1383,24 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP16_31_28, STP_IVCXO27_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP16_31_28, RIF3_D1_B, SEL_DRIF3_1),
PINMUX_IPSR_MSEL(IP16_31_28, FSO_TOE_B, SEL_FSO_1),
- PINMUX_IPSR_DATA(IP16_31_28, TPU0TO1),
+ PINMUX_IPSR_GPSR(IP16_31_28, TPU0TO1),
/* IPSR17 */
- PINMUX_IPSR_DATA(IP17_3_0, USB31_PWEN),
+ PINMUX_IPSR_GPSR(IP17_3_0, USB31_PWEN),
PINMUX_IPSR_MSEL(IP17_3_0, AUDIO_CLKOUT2_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP17_3_0, SSI_SCK9_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP17_3_0, TS_SDEN0_E, SEL_TSIF0_4),
PINMUX_IPSR_MSEL(IP17_3_0, STP_ISEN_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP17_3_0, RIF2_D0_B, SEL_DRIF2_1),
- PINMUX_IPSR_DATA(IP17_3_0, TPU0TO2),
+ PINMUX_IPSR_GPSR(IP17_3_0, TPU0TO2),
- PINMUX_IPSR_DATA(IP17_7_4, USB31_OVC),
+ PINMUX_IPSR_GPSR(IP17_7_4, USB31_OVC),
PINMUX_IPSR_MSEL(IP17_7_4, AUDIO_CLKOUT3_B, SEL_ADG_1),
PINMUX_IPSR_MSEL(IP17_7_4, SSI_WS9_B, SEL_SSI_1),
PINMUX_IPSR_MSEL(IP17_7_4, TS_SPSYNC0_E, SEL_TSIF0_4),
PINMUX_IPSR_MSEL(IP17_7_4, STP_ISSYNC_0_E, SEL_SSP1_0_4),
PINMUX_IPSR_MSEL(IP17_7_4, RIF2_D1_B, SEL_DRIF2_1),
- PINMUX_IPSR_DATA(IP17_7_4, TPU0TO3),
+ PINMUX_IPSR_GPSR(IP17_7_4, TPU0TO3),
/* I2C */
PINMUX_IPSR_NOGP(0, I2C_SEL_0_1),
@@ -1600,6 +1599,61 @@ static const unsigned int avb_avtp_capture_b_mux[] = {
AVB_AVTP_CAPTURE_B_MARK,
};
+/* - CAN ------------------------------------------------------------------ */
+static const unsigned int can0_data_a_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int can0_data_a_mux[] = {
+ CAN0_TX_A_MARK, CAN0_RX_A_MARK,
+};
+static const unsigned int can0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int can0_data_b_mux[] = {
+ CAN0_TX_B_MARK, CAN0_RX_B_MARK,
+};
+static const unsigned int can1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 26),
+};
+static const unsigned int can1_data_mux[] = {
+ CAN1_TX_MARK, CAN1_RX_MARK,
+};
+
+/* - CAN Clock -------------------------------------------------------------- */
+static const unsigned int can_clk_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+/* - CAN FD --------------------------------------------------------------- */
+static const unsigned int canfd0_data_a_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 24),
+};
+static const unsigned int canfd0_data_a_mux[] = {
+ CANFD0_TX_A_MARK, CANFD0_RX_A_MARK,
+};
+static const unsigned int canfd0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int canfd0_data_b_mux[] = {
+ CANFD0_TX_B_MARK, CANFD0_RX_B_MARK,
+};
+static const unsigned int canfd1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(1, 22), RCAR_GP_PIN(1, 26),
+};
+static const unsigned int canfd1_data_mux[] = {
+ CANFD1_TX_MARK, CANFD1_RX_MARK,
+};
+
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -1836,6 +1890,50 @@ static const unsigned int i2c6_c_mux[] = {
SDA6_C_MARK, SCL6_C_MARK,
};
+/* - INTC-EX ---------------------------------------------------------------- */
+static const unsigned int intc_ex_irq0_pins[] = {
+ /* IRQ0 */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int intc_ex_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_ex_irq1_pins[] = {
+ /* IRQ1 */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int intc_ex_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_ex_irq2_pins[] = {
+ /* IRQ2 */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int intc_ex_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_ex_irq3_pins[] = {
+ /* IRQ3 */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int intc_ex_irq3_mux[] = {
+ IRQ3_MARK,
+};
+static const unsigned int intc_ex_irq4_pins[] = {
+ /* IRQ4 */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int intc_ex_irq4_mux[] = {
+ IRQ4_MARK,
+};
+static const unsigned int intc_ex_irq5_pins[] = {
+ /* IRQ5 */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int intc_ex_irq5_mux[] = {
+ IRQ5_MARK,
+};
+
/* - MSIOF0 ----------------------------------------------------------------- */
static const unsigned int msiof0_clk_pins[] = {
/* SCK */
@@ -2492,6 +2590,105 @@ static const unsigned int msiof3_rxd_d_mux[] = {
MSIOF3_RXD_D_MARK,
};
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SATA --------------------------------------------------------------------*/
static const unsigned int sata0_devslp_a_pins[] = {
/* DEVSLP */
@@ -2926,7 +3123,7 @@ static const unsigned int ssi01239_ctrl_pins[] = {
RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
};
static const unsigned int ssi01239_ctrl_mux[] = {
- SSI_SCK0129_MARK, SSI_WS0129_MARK,
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
};
static const unsigned int ssi1_data_a_pins[] = {
/* SDATA */
@@ -3090,6 +3287,31 @@ static const unsigned int ssi9_ctrl_b_mux[] = {
SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25),
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK, USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int usb1_mux[] = {
+ USB1_PWEN_MARK, USB1_OVC_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int usb2_mux[] = {
+ USB2_PWEN_MARK, USB2_OVC_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -3117,6 +3339,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(can0_data_a),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(canfd0_data_a),
+ SH_PFC_PIN_GROUP(canfd0_data_b),
+ SH_PFC_PIN_GROUP(canfd1_data),
SH_PFC_PIN_GROUP(hscif0_data),
SH_PFC_PIN_GROUP(hscif0_clk),
SH_PFC_PIN_GROUP(hscif0_ctrl),
@@ -3149,6 +3378,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(i2c6_a),
SH_PFC_PIN_GROUP(i2c6_b),
SH_PFC_PIN_GROUP(i2c6_c),
+ SH_PFC_PIN_GROUP(intc_ex_irq0),
+ SH_PFC_PIN_GROUP(intc_ex_irq1),
+ SH_PFC_PIN_GROUP(intc_ex_irq2),
+ SH_PFC_PIN_GROUP(intc_ex_irq3),
+ SH_PFC_PIN_GROUP(intc_ex_irq4),
+ SH_PFC_PIN_GROUP(intc_ex_irq5),
SH_PFC_PIN_GROUP(msiof0_clk),
SH_PFC_PIN_GROUP(msiof0_sync),
SH_PFC_PIN_GROUP(msiof0_ss1),
@@ -3242,6 +3477,19 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof3_ss1_d),
SH_PFC_PIN_GROUP(msiof3_txd_d),
SH_PFC_PIN_GROUP(msiof3_rxd_d),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(sata0_devslp_a),
SH_PFC_PIN_GROUP(sata0_devslp_b),
SH_PFC_PIN_GROUP(scif0_data),
@@ -3322,6 +3570,9 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(ssi9_data_b),
SH_PFC_PIN_GROUP(ssi9_ctrl_a),
SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb2),
};
static const char * const audio_clk_groups[] = {
@@ -3356,6 +3607,28 @@ static const char * const avb_groups[] = {
"avb_avtp_capture_b",
};
+static const char * const can0_groups[] = {
+ "can0_data_a",
+ "can0_data_b",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data",
+};
+
+static const char * const can_clk_groups[] = {
+ "can_clk",
+};
+
+static const char * const canfd0_groups[] = {
+ "canfd0_data_a",
+ "canfd0_data_b",
+};
+
+static const char * const canfd1_groups[] = {
+ "canfd1_data",
+};
+
static const char * const hscif0_groups[] = {
"hscif0_data",
"hscif0_clk",
@@ -3412,6 +3685,15 @@ static const char * const i2c6_groups[] = {
"i2c6_c",
};
+static const char * const intc_ex_groups[] = {
+ "intc_ex_irq0",
+ "intc_ex_irq1",
+ "intc_ex_irq2",
+ "intc_ex_irq3",
+ "intc_ex_irq4",
+ "intc_ex_irq5",
+};
+
static const char * const msiof0_groups[] = {
"msiof0_clk",
"msiof0_sync",
@@ -3517,6 +3799,40 @@ static const char * const msiof3_groups[] = {
"msiof3_rxd_d",
};
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const sata0_groups[] = {
"sata0_devslp_a",
"sata0_devslp_b",
@@ -3636,9 +3952,26 @@ static const char * const ssi_groups[] = {
"ssi9_ctrl_b",
};
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
+static const char * const usb1_groups[] = {
+ "usb1",
+};
+
+static const char * const usb2_groups[] = {
+ "usb2",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(can_clk),
+ SH_PFC_FUNCTION(canfd0),
+ SH_PFC_FUNCTION(canfd1),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(hscif2),
@@ -3647,10 +3980,18 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
SH_PFC_FUNCTION(i2c6),
+ SH_PFC_FUNCTION(intc_ex),
SH_PFC_FUNCTION(msiof0),
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(sata0),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
@@ -3664,6 +4005,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb2),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4213,7 +4557,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* RESERVED 3 */
0, 0,
- MOD_SEL2_2_1
+ /* RESERVED 2, 1 */
+ 0, 0, 0, 0,
MOD_SEL2_0 }
},
{ },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index b0b328b3130b..6502e676d368 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -591,547 +591,547 @@ static const u16 pinmux_data[] = {
PINMUX_SINGLE(IRQ3_B),
/* IPSR0 */
- PINMUX_IPSR_DATA(IP0_1_0, A0),
- PINMUX_IPSR_DATA(IP0_1_0, ST0_CLKIN),
+ PINMUX_IPSR_GPSR(IP0_1_0, A0),
+ PINMUX_IPSR_GPSR(IP0_1_0, ST0_CLKIN),
PINMUX_IPSR_MSEL(IP0_1_0, LCD_DATA0_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_1_0, TCLKA_C, SEL_MTU2_CLK_1),
- PINMUX_IPSR_DATA(IP0_3_2, A1),
- PINMUX_IPSR_DATA(IP0_3_2, ST0_REQ),
+ PINMUX_IPSR_GPSR(IP0_3_2, A1),
+ PINMUX_IPSR_GPSR(IP0_3_2, ST0_REQ),
PINMUX_IPSR_MSEL(IP0_3_2, LCD_DATA1_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_3_2, TCLKB_C, SEL_MTU2_CLK_1),
- PINMUX_IPSR_DATA(IP0_5_4, A2),
- PINMUX_IPSR_DATA(IP0_5_4, ST0_SYC),
+ PINMUX_IPSR_GPSR(IP0_5_4, A2),
+ PINMUX_IPSR_GPSR(IP0_5_4, ST0_SYC),
PINMUX_IPSR_MSEL(IP0_5_4, LCD_DATA2_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_5_4, TCLKC_C, SEL_MTU2_CLK_1),
- PINMUX_IPSR_DATA(IP0_7_6, A3),
- PINMUX_IPSR_DATA(IP0_7_6, ST0_VLD),
+ PINMUX_IPSR_GPSR(IP0_7_6, A3),
+ PINMUX_IPSR_GPSR(IP0_7_6, ST0_VLD),
PINMUX_IPSR_MSEL(IP0_7_6, LCD_DATA3_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_7_6, TCLKD_C, SEL_MTU2_CLK_1),
- PINMUX_IPSR_DATA(IP0_9_8, A4),
- PINMUX_IPSR_DATA(IP0_9_8, ST0_D0),
+ PINMUX_IPSR_GPSR(IP0_9_8, A4),
+ PINMUX_IPSR_GPSR(IP0_9_8, ST0_D0),
PINMUX_IPSR_MSEL(IP0_9_8, LCD_DATA4_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_9_8, TIOC0A_C, SEL_MTU2_CH0_1),
- PINMUX_IPSR_DATA(IP0_11_10, A5),
- PINMUX_IPSR_DATA(IP0_11_10, ST0_D1),
+ PINMUX_IPSR_GPSR(IP0_11_10, A5),
+ PINMUX_IPSR_GPSR(IP0_11_10, ST0_D1),
PINMUX_IPSR_MSEL(IP0_11_10, LCD_DATA5_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_11_10, TIOC0B_C, SEL_MTU2_CH0_1),
- PINMUX_IPSR_DATA(IP0_13_12, A6),
- PINMUX_IPSR_DATA(IP0_13_12, ST0_D2),
+ PINMUX_IPSR_GPSR(IP0_13_12, A6),
+ PINMUX_IPSR_GPSR(IP0_13_12, ST0_D2),
PINMUX_IPSR_MSEL(IP0_13_12, LCD_DATA6_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_13_12, TIOC0C_C, SEL_MTU2_CH0_1),
- PINMUX_IPSR_DATA(IP0_15_14, A7),
- PINMUX_IPSR_DATA(IP0_15_14, ST0_D3),
+ PINMUX_IPSR_GPSR(IP0_15_14, A7),
+ PINMUX_IPSR_GPSR(IP0_15_14, ST0_D3),
PINMUX_IPSR_MSEL(IP0_15_14, LCD_DATA7_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_15_14, TIOC0D_C, SEL_MTU2_CH0_1),
- PINMUX_IPSR_DATA(IP0_17_16, A8),
- PINMUX_IPSR_DATA(IP0_17_16, ST0_D4),
+ PINMUX_IPSR_GPSR(IP0_17_16, A8),
+ PINMUX_IPSR_GPSR(IP0_17_16, ST0_D4),
PINMUX_IPSR_MSEL(IP0_17_16, LCD_DATA8_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_17_16, TIOC1A_C, SEL_MTU2_CH1_2),
- PINMUX_IPSR_DATA(IP0_19_18, A9),
- PINMUX_IPSR_DATA(IP0_19_18, ST0_D5),
+ PINMUX_IPSR_GPSR(IP0_19_18, A9),
+ PINMUX_IPSR_GPSR(IP0_19_18, ST0_D5),
PINMUX_IPSR_MSEL(IP0_19_18, LCD_DATA9_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_19_18, TIOC1B_C, SEL_MTU2_CH1_2),
- PINMUX_IPSR_DATA(IP0_21_20, A10),
- PINMUX_IPSR_DATA(IP0_21_20, ST0_D6),
+ PINMUX_IPSR_GPSR(IP0_21_20, A10),
+ PINMUX_IPSR_GPSR(IP0_21_20, ST0_D6),
PINMUX_IPSR_MSEL(IP0_21_20, LCD_DATA10_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_21_20, TIOC2A_C, SEL_MTU2_CH2_2),
- PINMUX_IPSR_DATA(IP0_23_22, A11),
- PINMUX_IPSR_DATA(IP0_23_22, ST0_D7),
+ PINMUX_IPSR_GPSR(IP0_23_22, A11),
+ PINMUX_IPSR_GPSR(IP0_23_22, ST0_D7),
PINMUX_IPSR_MSEL(IP0_23_22, LCD_DATA11_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_23_22, TIOC2B_C, SEL_MTU2_CH2_2),
- PINMUX_IPSR_DATA(IP0_25_24, A12),
+ PINMUX_IPSR_GPSR(IP0_25_24, A12),
PINMUX_IPSR_MSEL(IP0_25_24, LCD_DATA12_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_25_24, TIOC3A_C, SEL_MTU2_CH3_1),
- PINMUX_IPSR_DATA(IP0_27_26, A13),
+ PINMUX_IPSR_GPSR(IP0_27_26, A13),
PINMUX_IPSR_MSEL(IP0_27_26, LCD_DATA13_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_27_26, TIOC3B_C, SEL_MTU2_CH3_1),
- PINMUX_IPSR_DATA(IP0_29_28, A14),
+ PINMUX_IPSR_GPSR(IP0_29_28, A14),
PINMUX_IPSR_MSEL(IP0_29_28, LCD_DATA14_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_29_28, TIOC3C_C, SEL_MTU2_CH3_1),
- PINMUX_IPSR_DATA(IP0_31_30, A15),
- PINMUX_IPSR_DATA(IP0_31_30, ST0_VCO_CLKIN),
+ PINMUX_IPSR_GPSR(IP0_31_30, A15),
+ PINMUX_IPSR_GPSR(IP0_31_30, ST0_VCO_CLKIN),
PINMUX_IPSR_MSEL(IP0_31_30, LCD_DATA15_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP0_31_30, TIOC3D_C, SEL_MTU2_CH3_1),
/* IPSR1 */
- PINMUX_IPSR_DATA(IP1_1_0, A16),
- PINMUX_IPSR_DATA(IP1_1_0, ST0_PWM),
+ PINMUX_IPSR_GPSR(IP1_1_0, A16),
+ PINMUX_IPSR_GPSR(IP1_1_0, ST0_PWM),
PINMUX_IPSR_MSEL(IP1_1_0, LCD_DON_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP1_1_0, TIOC4A_C, SEL_MTU2_CH4_1),
- PINMUX_IPSR_DATA(IP1_3_2, A17),
- PINMUX_IPSR_DATA(IP1_3_2, ST1_VCO_CLKIN),
+ PINMUX_IPSR_GPSR(IP1_3_2, A17),
+ PINMUX_IPSR_GPSR(IP1_3_2, ST1_VCO_CLKIN),
PINMUX_IPSR_MSEL(IP1_3_2, LCD_CL1_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP1_3_2, TIOC4B_C, SEL_MTU2_CH4_1),
- PINMUX_IPSR_DATA(IP1_5_4, A18),
- PINMUX_IPSR_DATA(IP1_5_4, ST1_PWM),
+ PINMUX_IPSR_GPSR(IP1_5_4, A18),
+ PINMUX_IPSR_GPSR(IP1_5_4, ST1_PWM),
PINMUX_IPSR_MSEL(IP1_5_4, LCD_CL2_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP1_5_4, TIOC4C_C, SEL_MTU2_CH4_1),
- PINMUX_IPSR_DATA(IP1_7_6, A19),
- PINMUX_IPSR_DATA(IP1_7_6, ST1_CLKIN),
+ PINMUX_IPSR_GPSR(IP1_7_6, A19),
+ PINMUX_IPSR_GPSR(IP1_7_6, ST1_CLKIN),
PINMUX_IPSR_MSEL(IP1_7_6, LCD_CLK_A, SEL_LCDC_0),
PINMUX_IPSR_MSEL(IP1_7_6, TIOC4D_C, SEL_MTU2_CH4_1),
- PINMUX_IPSR_DATA(IP1_9_8, A20),
- PINMUX_IPSR_DATA(IP1_9_8, ST1_REQ),
+ PINMUX_IPSR_GPSR(IP1_9_8, A20),
+ PINMUX_IPSR_GPSR(IP1_9_8, ST1_REQ),
PINMUX_IPSR_MSEL(IP1_9_8, LCD_FLM_A, SEL_LCDC_0),
- PINMUX_IPSR_DATA(IP1_11_10, A21),
- PINMUX_IPSR_DATA(IP1_11_10, ST1_SYC),
+ PINMUX_IPSR_GPSR(IP1_11_10, A21),
+ PINMUX_IPSR_GPSR(IP1_11_10, ST1_SYC),
PINMUX_IPSR_MSEL(IP1_11_10, LCD_VCPWC_A, SEL_LCDC_0),
- PINMUX_IPSR_DATA(IP1_13_12, A22),
- PINMUX_IPSR_DATA(IP1_13_12, ST1_VLD),
+ PINMUX_IPSR_GPSR(IP1_13_12, A22),
+ PINMUX_IPSR_GPSR(IP1_13_12, ST1_VLD),
PINMUX_IPSR_MSEL(IP1_13_12, LCD_VEPWC_A, SEL_LCDC_0),
- PINMUX_IPSR_DATA(IP1_15_14, A23),
- PINMUX_IPSR_DATA(IP1_15_14, ST1_D0),
+ PINMUX_IPSR_GPSR(IP1_15_14, A23),
+ PINMUX_IPSR_GPSR(IP1_15_14, ST1_D0),
PINMUX_IPSR_MSEL(IP1_15_14, LCD_M_DISP_A, SEL_LCDC_0),
- PINMUX_IPSR_DATA(IP1_17_16, A24),
+ PINMUX_IPSR_GPSR(IP1_17_16, A24),
PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
- PINMUX_IPSR_DATA(IP1_17_16, ST1_D1),
+ PINMUX_IPSR_GPSR(IP1_17_16, ST1_D1),
- PINMUX_IPSR_DATA(IP1_19_18, A25),
+ PINMUX_IPSR_GPSR(IP1_19_18, A25),
PINMUX_IPSR_MSEL(IP1_17_16, RX2_D, SEL_SCIF2_3),
- PINMUX_IPSR_DATA(IP1_17_16, ST1_D2),
+ PINMUX_IPSR_GPSR(IP1_17_16, ST1_D2),
- PINMUX_IPSR_DATA(IP1_22_20, D0),
+ PINMUX_IPSR_GPSR(IP1_22_20, D0),
PINMUX_IPSR_MSEL(IP1_22_20, SD0_DAT0_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP1_22_20, MMC_D0_A, SEL_MMC_0),
- PINMUX_IPSR_DATA(IP1_22_20, ST1_D3),
+ PINMUX_IPSR_GPSR(IP1_22_20, ST1_D3),
PINMUX_IPSR_MSEL(IP1_22_20, FD0_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP1_25_23, D1),
+ PINMUX_IPSR_GPSR(IP1_25_23, D1),
PINMUX_IPSR_MSEL(IP1_25_23, SD0_DAT0_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP1_25_23, MMC_D1_A, SEL_MMC_0),
- PINMUX_IPSR_DATA(IP1_25_23, ST1_D4),
+ PINMUX_IPSR_GPSR(IP1_25_23, ST1_D4),
PINMUX_IPSR_MSEL(IP1_25_23, FD1_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP1_28_26, D2),
+ PINMUX_IPSR_GPSR(IP1_28_26, D2),
PINMUX_IPSR_MSEL(IP1_28_26, SD0_DAT0_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP1_28_26, MMC_D2_A, SEL_MMC_0),
- PINMUX_IPSR_DATA(IP1_28_26, ST1_D5),
+ PINMUX_IPSR_GPSR(IP1_28_26, ST1_D5),
PINMUX_IPSR_MSEL(IP1_28_26, FD2_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP1_31_29, D3),
+ PINMUX_IPSR_GPSR(IP1_31_29, D3),
PINMUX_IPSR_MSEL(IP1_31_29, SD0_DAT0_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP1_31_29, MMC_D3_A, SEL_MMC_0),
- PINMUX_IPSR_DATA(IP1_31_29, ST1_D6),
+ PINMUX_IPSR_GPSR(IP1_31_29, ST1_D6),
PINMUX_IPSR_MSEL(IP1_31_29, FD3_A, SEL_FLCTL_0),
/* IPSR2 */
- PINMUX_IPSR_DATA(IP2_2_0, D4),
+ PINMUX_IPSR_GPSR(IP2_2_0, D4),
PINMUX_IPSR_MSEL(IP2_2_0, SD0_CD_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP2_2_0, MMC_D4_A, SEL_MMC_0),
- PINMUX_IPSR_DATA(IP2_2_0, ST1_D7),
+ PINMUX_IPSR_GPSR(IP2_2_0, ST1_D7),
PINMUX_IPSR_MSEL(IP2_2_0, FD4_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP2_4_3, D5),
+ PINMUX_IPSR_GPSR(IP2_4_3, D5),
PINMUX_IPSR_MSEL(IP2_4_3, SD0_WP_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP2_4_3, MMC_D5_A, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP2_4_3, FD5_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP2_7_5, D6),
+ PINMUX_IPSR_GPSR(IP2_7_5, D6),
PINMUX_IPSR_MSEL(IP2_7_5, RSPI_RSPCK_A, SEL_RSPI_0),
PINMUX_IPSR_MSEL(IP2_7_5, MMC_D6_A, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP2_7_5, QSPCLK_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_7_5, FD6_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP2_10_8, D7),
+ PINMUX_IPSR_GPSR(IP2_10_8, D7),
PINMUX_IPSR_MSEL(IP2_10_8, RSPI_SSL_A, SEL_RSPI_0),
PINMUX_IPSR_MSEL(IP2_10_8, MMC_D7_A, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP2_10_8, QSSL_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_10_8, FD7_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP2_13_11, D8),
+ PINMUX_IPSR_GPSR(IP2_13_11, D8),
PINMUX_IPSR_MSEL(IP2_13_11, SD0_CLK_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP2_13_11, MMC_CLK_A, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP2_13_11, QIO2_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_13_11, FCE_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_13_11, ET0_GTX_CLK_B, SEL_ET0_1),
- PINMUX_IPSR_DATA(IP2_16_14, D9),
+ PINMUX_IPSR_GPSR(IP2_16_14, D9),
PINMUX_IPSR_MSEL(IP2_16_14, SD0_CMD_A, SEL_SDHI0_0),
PINMUX_IPSR_MSEL(IP2_16_14, MMC_CMD_A, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP2_16_14, QIO3_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_16_14, FCLE_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_16_14, ET0_ETXD1_B, SEL_ET0_1),
- PINMUX_IPSR_DATA(IP2_19_17, D10),
+ PINMUX_IPSR_GPSR(IP2_19_17, D10),
PINMUX_IPSR_MSEL(IP2_19_17, RSPI_MOSI_A, SEL_RSPI_0),
PINMUX_IPSR_MSEL(IP2_19_17, QMO_QIO0_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_19_17, FALE_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_19_17, ET0_ETXD2_B, SEL_ET0_1),
- PINMUX_IPSR_DATA(IP2_22_20, D11),
+ PINMUX_IPSR_GPSR(IP2_22_20, D11),
PINMUX_IPSR_MSEL(IP2_22_20, RSPI_MISO_A, SEL_RSPI_0),
PINMUX_IPSR_MSEL(IP2_22_20, QMI_QIO1_A, SEL_RQSPI_0),
PINMUX_IPSR_MSEL(IP2_22_20, FRE_A, SEL_FLCTL_0),
- PINMUX_IPSR_DATA(IP2_24_23, D12),
+ PINMUX_IPSR_GPSR(IP2_24_23, D12),
PINMUX_IPSR_MSEL(IP2_24_23, FWE_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_24_23, ET0_ETXD5_B, SEL_ET0_1),
- PINMUX_IPSR_DATA(IP2_27_25, D13),
+ PINMUX_IPSR_GPSR(IP2_27_25, D13),
PINMUX_IPSR_MSEL(IP2_27_25, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP2_27_25, FRB_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_27_25, ET0_ETXD6_B, SEL_ET0_1),
- PINMUX_IPSR_DATA(IP2_30_28, D14),
+ PINMUX_IPSR_GPSR(IP2_30_28, D14),
PINMUX_IPSR_MSEL(IP2_30_28, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MSEL(IP2_30_28, FSE_A, SEL_FLCTL_0),
PINMUX_IPSR_MSEL(IP2_30_28, ET0_TX_CLK_B, SEL_ET0_1),
/* IPSR3 */
- PINMUX_IPSR_DATA(IP3_1_0, D15),
+ PINMUX_IPSR_GPSR(IP3_1_0, D15),
PINMUX_IPSR_MSEL(IP3_1_0, SCK2_B, SEL_SCIF2_1),
- PINMUX_IPSR_DATA(IP3_2, CS1_A26),
+ PINMUX_IPSR_GPSR(IP3_2, CS1_A26),
PINMUX_IPSR_MSEL(IP3_2, QIO3_B, SEL_RQSPI_1),
- PINMUX_IPSR_DATA(IP3_5_3, EX_CS1),
+ PINMUX_IPSR_GPSR(IP3_5_3, EX_CS1),
PINMUX_IPSR_MSEL(IP3_5_3, RX3_B, SEL_SCIF2_1),
- PINMUX_IPSR_DATA(IP3_5_3, ATACS0),
+ PINMUX_IPSR_GPSR(IP3_5_3, ATACS0),
PINMUX_IPSR_MSEL(IP3_5_3, QIO2_B, SEL_RQSPI_1),
- PINMUX_IPSR_DATA(IP3_5_3, ET0_ETXD0),
+ PINMUX_IPSR_GPSR(IP3_5_3, ET0_ETXD0),
- PINMUX_IPSR_DATA(IP3_8_6, EX_CS2),
+ PINMUX_IPSR_GPSR(IP3_8_6, EX_CS2),
PINMUX_IPSR_MSEL(IP3_8_6, TX3_B, SEL_SCIF3_1),
- PINMUX_IPSR_DATA(IP3_8_6, ATACS1),
+ PINMUX_IPSR_GPSR(IP3_8_6, ATACS1),
PINMUX_IPSR_MSEL(IP3_8_6, QSPCLK_B, SEL_RQSPI_1),
PINMUX_IPSR_MSEL(IP3_8_6, ET0_GTX_CLK_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_11_9, EX_CS3),
+ PINMUX_IPSR_GPSR(IP3_11_9, EX_CS3),
PINMUX_IPSR_MSEL(IP3_11_9, SD1_CD_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_11_9, ATARD),
+ PINMUX_IPSR_GPSR(IP3_11_9, ATARD),
PINMUX_IPSR_MSEL(IP3_11_9, QMO_QIO0_B, SEL_RQSPI_1),
PINMUX_IPSR_MSEL(IP3_11_9, ET0_ETXD1_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_14_12, EX_CS4),
+ PINMUX_IPSR_GPSR(IP3_14_12, EX_CS4),
PINMUX_IPSR_MSEL(IP3_14_12, SD1_WP_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_14_12, ATAWR),
+ PINMUX_IPSR_GPSR(IP3_14_12, ATAWR),
PINMUX_IPSR_MSEL(IP3_14_12, QMI_QIO1_B, SEL_RQSPI_1),
PINMUX_IPSR_MSEL(IP3_14_12, ET0_ETXD2_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_17_15, EX_CS5),
+ PINMUX_IPSR_GPSR(IP3_17_15, EX_CS5),
PINMUX_IPSR_MSEL(IP3_17_15, SD1_CMD_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_17_15, ATADIR),
+ PINMUX_IPSR_GPSR(IP3_17_15, ATADIR),
PINMUX_IPSR_MSEL(IP3_17_15, QSSL_B, SEL_RQSPI_1),
PINMUX_IPSR_MSEL(IP3_17_15, ET0_ETXD3_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_19_18, RD_WR),
- PINMUX_IPSR_DATA(IP3_19_18, TCLK0),
+ PINMUX_IPSR_GPSR(IP3_19_18, RD_WR),
+ PINMUX_IPSR_GPSR(IP3_19_18, TCLK0),
PINMUX_IPSR_MSEL(IP3_19_18, CAN_CLK_B, SEL_RCAN_CLK_1),
- PINMUX_IPSR_DATA(IP3_19_18, ET0_ETXD4),
+ PINMUX_IPSR_GPSR(IP3_19_18, ET0_ETXD4),
- PINMUX_IPSR_DATA(IP3_20, EX_WAIT0),
+ PINMUX_IPSR_GPSR(IP3_20, EX_WAIT0),
PINMUX_IPSR_MSEL(IP3_20, TCLK1_B, SEL_TMU_1),
- PINMUX_IPSR_DATA(IP3_23_21, EX_WAIT1),
+ PINMUX_IPSR_GPSR(IP3_23_21, EX_WAIT1),
PINMUX_IPSR_MSEL(IP3_23_21, SD1_DAT0_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_23_21, DREQ2),
+ PINMUX_IPSR_GPSR(IP3_23_21, DREQ2),
PINMUX_IPSR_MSEL(IP3_23_21, CAN1_TX_C, SEL_RCAN1_2),
PINMUX_IPSR_MSEL(IP3_23_21, ET0_LINK_C, SEL_ET0_CTL_2),
PINMUX_IPSR_MSEL(IP3_23_21, ET0_ETXD5_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_26_24, EX_WAIT2),
+ PINMUX_IPSR_GPSR(IP3_26_24, EX_WAIT2),
PINMUX_IPSR_MSEL(IP3_26_24, SD1_DAT1_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_26_24, DACK2),
+ PINMUX_IPSR_GPSR(IP3_26_24, DACK2),
PINMUX_IPSR_MSEL(IP3_26_24, CAN1_RX_C, SEL_RCAN1_2),
PINMUX_IPSR_MSEL(IP3_26_24, ET0_MAGIC_C, SEL_ET0_CTL_2),
PINMUX_IPSR_MSEL(IP3_26_24, ET0_ETXD6_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP3_29_27, DRACK0),
+ PINMUX_IPSR_GPSR(IP3_29_27, DRACK0),
PINMUX_IPSR_MSEL(IP3_29_27, SD1_DAT2_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP3_29_27, ATAG),
+ PINMUX_IPSR_GPSR(IP3_29_27, ATAG),
PINMUX_IPSR_MSEL(IP3_29_27, TCLK1_A, SEL_TMU_0),
- PINMUX_IPSR_DATA(IP3_29_27, ET0_ETXD7),
+ PINMUX_IPSR_GPSR(IP3_29_27, ET0_ETXD7),
/* IPSR4 */
PINMUX_IPSR_MSEL(IP4_2_0, HCTS0_A, SEL_HSCIF_0),
PINMUX_IPSR_MSEL(IP4_2_0, CTS1_A, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP4_2_0, VI0_FIELD),
+ PINMUX_IPSR_GPSR(IP4_2_0, VI0_FIELD),
PINMUX_IPSR_MSEL(IP4_2_0, RMII0_RXD1_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_2_0, ET0_ERXD7),
+ PINMUX_IPSR_GPSR(IP4_2_0, ET0_ERXD7),
PINMUX_IPSR_MSEL(IP4_5_3, HRTS0_A, SEL_HSCIF_0),
PINMUX_IPSR_MSEL(IP4_5_3, RTS1_A, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP4_5_3, VI0_HSYNC),
+ PINMUX_IPSR_GPSR(IP4_5_3, VI0_HSYNC),
PINMUX_IPSR_MSEL(IP4_5_3, RMII0_TXD_EN_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_5_3, ET0_RX_DV),
+ PINMUX_IPSR_GPSR(IP4_5_3, ET0_RX_DV),
PINMUX_IPSR_MSEL(IP4_8_6, HSCK0_A, SEL_HSCIF_0),
PINMUX_IPSR_MSEL(IP4_8_6, SCK1_A, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP4_8_6, VI0_VSYNC),
+ PINMUX_IPSR_GPSR(IP4_8_6, VI0_VSYNC),
PINMUX_IPSR_MSEL(IP4_8_6, RMII0_RX_ER_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_8_6, ET0_RX_ER),
+ PINMUX_IPSR_GPSR(IP4_8_6, ET0_RX_ER),
PINMUX_IPSR_MSEL(IP4_11_9, HRX0_A, SEL_HSCIF_0),
PINMUX_IPSR_MSEL(IP4_11_9, RX1_A, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP4_11_9, VI0_DATA0_VI0_B0),
+ PINMUX_IPSR_GPSR(IP4_11_9, VI0_DATA0_VI0_B0),
PINMUX_IPSR_MSEL(IP4_11_9, RMII0_CRS_DV_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_11_9, ET0_CRS),
+ PINMUX_IPSR_GPSR(IP4_11_9, ET0_CRS),
PINMUX_IPSR_MSEL(IP4_14_12, HTX0_A, SEL_HSCIF_0),
PINMUX_IPSR_MSEL(IP4_14_12, TX1_A, SEL_SCIF1_0),
- PINMUX_IPSR_DATA(IP4_14_12, VI0_DATA1_VI0_B1),
+ PINMUX_IPSR_GPSR(IP4_14_12, VI0_DATA1_VI0_B1),
PINMUX_IPSR_MSEL(IP4_14_12, RMII0_MDC_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_14_12, ET0_COL),
+ PINMUX_IPSR_GPSR(IP4_14_12, ET0_COL),
PINMUX_IPSR_MSEL(IP4_17_15, CTS0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP4_17_15, VI0_DATA2_VI0_B2),
+ PINMUX_IPSR_GPSR(IP4_17_15, VI0_DATA2_VI0_B2),
PINMUX_IPSR_MSEL(IP4_17_15, RMII0_MDIO_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP4_17_15, ET0_MDC),
+ PINMUX_IPSR_GPSR(IP4_17_15, ET0_MDC),
PINMUX_IPSR_MSEL(IP4_19_18, RTS0_B, SEL_SCIF0_1),
- PINMUX_IPSR_DATA(IP4_19_18, VI0_DATA3_VI0_B3),
+ PINMUX_IPSR_GPSR(IP4_19_18, VI0_DATA3_VI0_B3),
PINMUX_IPSR_MSEL(IP4_19_18, ET0_MDIO_A, SEL_ET0_0),
PINMUX_IPSR_MSEL(IP4_21_20, SCK1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP4_21_20, VI0_DATA4_VI0_B4),
+ PINMUX_IPSR_GPSR(IP4_21_20, VI0_DATA4_VI0_B4),
PINMUX_IPSR_MSEL(IP4_21_20, ET0_LINK_A, SEL_ET0_CTL_0),
PINMUX_IPSR_MSEL(IP4_23_22, RX1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP4_23_22, VI0_DATA5_VI0_B5),
+ PINMUX_IPSR_GPSR(IP4_23_22, VI0_DATA5_VI0_B5),
PINMUX_IPSR_MSEL(IP4_23_22, ET0_MAGIC_A, SEL_ET0_CTL_0),
PINMUX_IPSR_MSEL(IP4_25_24, TX1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP4_25_24, VI0_DATA6_VI0_G0),
+ PINMUX_IPSR_GPSR(IP4_25_24, VI0_DATA6_VI0_G0),
PINMUX_IPSR_MSEL(IP4_25_24, ET0_PHY_INT_A, SEL_ET0_CTL_0),
PINMUX_IPSR_MSEL(IP4_27_26, CTS1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP4_27_26, VI0_DATA7_VI0_G1),
+ PINMUX_IPSR_GPSR(IP4_27_26, VI0_DATA7_VI0_G1),
PINMUX_IPSR_MSEL(IP4_29_28, RTS1_B, SEL_SCIF1_1),
- PINMUX_IPSR_DATA(IP4_29_28, VI0_G2),
+ PINMUX_IPSR_GPSR(IP4_29_28, VI0_G2),
PINMUX_IPSR_MSEL(IP4_31_30, SCK2_A, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP4_31_30, VI0_G3),
+ PINMUX_IPSR_GPSR(IP4_31_30, VI0_G3),
/* IPSR5 */
PINMUX_IPSR_MSEL(IP5_2_0, SD2_CLK_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_2_0, RX2_A, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP5_2_0, VI0_G4),
+ PINMUX_IPSR_GPSR(IP5_2_0, VI0_G4),
PINMUX_IPSR_MSEL(IP5_2_0, ET0_RX_CLK_B, SEL_ET0_1),
PINMUX_IPSR_MSEL(IP5_5_3, SD2_CMD_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_5_3, TX2_A, SEL_SCIF2_0),
- PINMUX_IPSR_DATA(IP5_5_3, VI0_G5),
+ PINMUX_IPSR_GPSR(IP5_5_3, VI0_G5),
PINMUX_IPSR_MSEL(IP5_5_3, ET0_ERXD2_B, SEL_ET0_1),
PINMUX_IPSR_MSEL(IP5_8_6, SD2_DAT0_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_8_6, RX3_A, SEL_SCIF3_0),
- PINMUX_IPSR_DATA(IP4_8_6, VI0_R0),
+ PINMUX_IPSR_GPSR(IP4_8_6, VI0_R0),
PINMUX_IPSR_MSEL(IP4_8_6, ET0_ERXD2_B, SEL_ET0_1),
PINMUX_IPSR_MSEL(IP5_11_9, SD2_DAT1_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_11_9, TX3_A, SEL_SCIF3_0),
- PINMUX_IPSR_DATA(IP5_11_9, VI0_R1),
+ PINMUX_IPSR_GPSR(IP5_11_9, VI0_R1),
PINMUX_IPSR_MSEL(IP5_11_9, ET0_MDIO_B, SEL_ET0_1),
PINMUX_IPSR_MSEL(IP5_14_12, SD2_DAT2_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_14_12, RX4_A, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP5_14_12, VI0_R2),
+ PINMUX_IPSR_GPSR(IP5_14_12, VI0_R2),
PINMUX_IPSR_MSEL(IP5_14_12, ET0_LINK_B, SEL_ET0_CTL_1),
PINMUX_IPSR_MSEL(IP5_17_15, SD2_DAT3_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_17_15, TX4_A, SEL_SCIF4_0),
- PINMUX_IPSR_DATA(IP5_17_15, VI0_R3),
+ PINMUX_IPSR_GPSR(IP5_17_15, VI0_R3),
PINMUX_IPSR_MSEL(IP5_17_15, ET0_MAGIC_B, SEL_ET0_CTL_1),
PINMUX_IPSR_MSEL(IP5_20_18, SD2_CD_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_20_18, RX5_A, SEL_SCIF5_0),
- PINMUX_IPSR_DATA(IP5_20_18, VI0_R4),
+ PINMUX_IPSR_GPSR(IP5_20_18, VI0_R4),
PINMUX_IPSR_MSEL(IP5_20_18, ET0_PHY_INT_B, SEL_ET0_CTL_1),
PINMUX_IPSR_MSEL(IP5_22_21, SD2_WP_A, SEL_SDHI2_0),
PINMUX_IPSR_MSEL(IP5_22_21, TX5_A, SEL_SCIF5_0),
- PINMUX_IPSR_DATA(IP5_22_21, VI0_R5),
+ PINMUX_IPSR_GPSR(IP5_22_21, VI0_R5),
- PINMUX_IPSR_DATA(IP5_24_23, REF125CK),
- PINMUX_IPSR_DATA(IP5_24_23, ADTRG),
+ PINMUX_IPSR_GPSR(IP5_24_23, REF125CK),
+ PINMUX_IPSR_GPSR(IP5_24_23, ADTRG),
PINMUX_IPSR_MSEL(IP5_24_23, RX5_C, SEL_SCIF5_2),
- PINMUX_IPSR_DATA(IP5_26_25, REF50CK),
+ PINMUX_IPSR_GPSR(IP5_26_25, REF50CK),
PINMUX_IPSR_MSEL(IP5_26_25, CTS1_E, SEL_SCIF1_3),
PINMUX_IPSR_MSEL(IP5_26_25, HCTS0_D, SEL_HSCIF_3),
/* IPSR6 */
- PINMUX_IPSR_DATA(IP6_2_0, DU0_DR0),
+ PINMUX_IPSR_GPSR(IP6_2_0, DU0_DR0),
PINMUX_IPSR_MSEL(IP6_2_0, SCIF_CLK_B, SEL_SCIF_CLK_1),
PINMUX_IPSR_MSEL(IP6_2_0, HRX0_D, SEL_HSCIF_3),
PINMUX_IPSR_MSEL(IP6_2_0, IETX_A, SEL_IEBUS_0),
PINMUX_IPSR_MSEL(IP6_2_0, TCLKA_A, SEL_MTU2_CLK_0),
- PINMUX_IPSR_DATA(IP6_2_0, HIFD00),
+ PINMUX_IPSR_GPSR(IP6_2_0, HIFD00),
- PINMUX_IPSR_DATA(IP6_5_3, DU0_DR1),
+ PINMUX_IPSR_GPSR(IP6_5_3, DU0_DR1),
PINMUX_IPSR_MSEL(IP6_5_3, SCK0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_5_3, HTX0_D, SEL_HSCIF_3),
PINMUX_IPSR_MSEL(IP6_5_3, IERX_A, SEL_IEBUS_0),
PINMUX_IPSR_MSEL(IP6_5_3, TCLKB_A, SEL_MTU2_CLK_0),
- PINMUX_IPSR_DATA(IP6_5_3, HIFD01),
+ PINMUX_IPSR_GPSR(IP6_5_3, HIFD01),
- PINMUX_IPSR_DATA(IP6_7_6, DU0_DR2),
+ PINMUX_IPSR_GPSR(IP6_7_6, DU0_DR2),
PINMUX_IPSR_MSEL(IP6_7_6, RX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_7_6, TCLKC_A, SEL_MTU2_CLK_0),
- PINMUX_IPSR_DATA(IP6_7_6, HIFD02),
+ PINMUX_IPSR_GPSR(IP6_7_6, HIFD02),
- PINMUX_IPSR_DATA(IP6_9_8, DU0_DR3),
+ PINMUX_IPSR_GPSR(IP6_9_8, DU0_DR3),
PINMUX_IPSR_MSEL(IP6_9_8, TX0_B, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_9_8, TCLKD_A, SEL_MTU2_CLK_0),
- PINMUX_IPSR_DATA(IP6_9_8, HIFD03),
+ PINMUX_IPSR_GPSR(IP6_9_8, HIFD03),
- PINMUX_IPSR_DATA(IP6_11_10, DU0_DR4),
+ PINMUX_IPSR_GPSR(IP6_11_10, DU0_DR4),
PINMUX_IPSR_MSEL(IP6_11_10, CTS0_C, SEL_SCIF0_2),
PINMUX_IPSR_MSEL(IP6_11_10, TIOC0A_A, SEL_MTU2_CH0_0),
- PINMUX_IPSR_DATA(IP6_11_10, HIFD04),
+ PINMUX_IPSR_GPSR(IP6_11_10, HIFD04),
- PINMUX_IPSR_DATA(IP6_13_12, DU0_DR5),
+ PINMUX_IPSR_GPSR(IP6_13_12, DU0_DR5),
PINMUX_IPSR_MSEL(IP6_13_12, RTS0_C, SEL_SCIF0_1),
PINMUX_IPSR_MSEL(IP6_13_12, TIOC0B_A, SEL_MTU2_CH0_0),
- PINMUX_IPSR_DATA(IP6_13_12, HIFD05),
+ PINMUX_IPSR_GPSR(IP6_13_12, HIFD05),
- PINMUX_IPSR_DATA(IP6_15_14, DU0_DR6),
+ PINMUX_IPSR_GPSR(IP6_15_14, DU0_DR6),
PINMUX_IPSR_MSEL(IP6_15_14, SCK1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP6_15_14, TIOC0C_A, SEL_MTU2_CH0_0),
- PINMUX_IPSR_DATA(IP6_15_14, HIFD06),
+ PINMUX_IPSR_GPSR(IP6_15_14, HIFD06),
- PINMUX_IPSR_DATA(IP6_17_16, DU0_DR7),
+ PINMUX_IPSR_GPSR(IP6_17_16, DU0_DR7),
PINMUX_IPSR_MSEL(IP6_17_16, RX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP6_17_16, TIOC0D_A, SEL_MTU2_CH0_0),
- PINMUX_IPSR_DATA(IP6_17_16, HIFD07),
+ PINMUX_IPSR_GPSR(IP6_17_16, HIFD07),
- PINMUX_IPSR_DATA(IP6_20_18, DU0_DG0),
+ PINMUX_IPSR_GPSR(IP6_20_18, DU0_DG0),
PINMUX_IPSR_MSEL(IP6_20_18, TX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP6_20_18, HSCK0_D, SEL_HSCIF_3),
PINMUX_IPSR_MSEL(IP6_20_18, IECLK_A, SEL_IEBUS_0),
PINMUX_IPSR_MSEL(IP6_20_18, TIOC1A_A, SEL_MTU2_CH1_0),
- PINMUX_IPSR_DATA(IP6_20_18, HIFD08),
+ PINMUX_IPSR_GPSR(IP6_20_18, HIFD08),
- PINMUX_IPSR_DATA(IP6_23_21, DU0_DG1),
+ PINMUX_IPSR_GPSR(IP6_23_21, DU0_DG1),
PINMUX_IPSR_MSEL(IP6_23_21, CTS1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP6_23_21, HRTS0_D, SEL_HSCIF_3),
PINMUX_IPSR_MSEL(IP6_23_21, TIOC1B_A, SEL_MTU2_CH1_0),
- PINMUX_IPSR_DATA(IP6_23_21, HIFD09),
+ PINMUX_IPSR_GPSR(IP6_23_21, HIFD09),
/* IPSR7 */
- PINMUX_IPSR_DATA(IP7_2_0, DU0_DG2),
+ PINMUX_IPSR_GPSR(IP7_2_0, DU0_DG2),
PINMUX_IPSR_MSEL(IP7_2_0, RTS1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP7_2_0, RMII0_MDC_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_2_0, TIOC2A_A, SEL_MTU2_CH2_0),
- PINMUX_IPSR_DATA(IP7_2_0, HIFD10),
+ PINMUX_IPSR_GPSR(IP7_2_0, HIFD10),
- PINMUX_IPSR_DATA(IP7_5_3, DU0_DG3),
+ PINMUX_IPSR_GPSR(IP7_5_3, DU0_DG3),
PINMUX_IPSR_MSEL(IP7_5_3, SCK2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP7_5_3, RMII0_MDIO_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_5_3, TIOC2B_A, SEL_MTU2_CH2_0),
- PINMUX_IPSR_DATA(IP7_5_3, HIFD11),
+ PINMUX_IPSR_GPSR(IP7_5_3, HIFD11),
- PINMUX_IPSR_DATA(IP7_8_6, DU0_DG4),
+ PINMUX_IPSR_GPSR(IP7_8_6, DU0_DG4),
PINMUX_IPSR_MSEL(IP7_8_6, RX2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP7_8_6, RMII0_CRS_DV_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_8_6, TIOC3A_A, SEL_MTU2_CH3_0),
- PINMUX_IPSR_DATA(IP7_8_6, HIFD12),
+ PINMUX_IPSR_GPSR(IP7_8_6, HIFD12),
- PINMUX_IPSR_DATA(IP7_11_9, DU0_DG5),
+ PINMUX_IPSR_GPSR(IP7_11_9, DU0_DG5),
PINMUX_IPSR_MSEL(IP7_11_9, TX2_C, SEL_SCIF2_2),
PINMUX_IPSR_MSEL(IP7_11_9, RMII0_RX_ER_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_11_9, TIOC3B_A, SEL_MTU2_CH3_0),
- PINMUX_IPSR_DATA(IP7_11_9, HIFD13),
+ PINMUX_IPSR_GPSR(IP7_11_9, HIFD13),
- PINMUX_IPSR_DATA(IP7_14_12, DU0_DG6),
+ PINMUX_IPSR_GPSR(IP7_14_12, DU0_DG6),
PINMUX_IPSR_MSEL(IP7_14_12, RX3_C, SEL_SCIF3_2),
PINMUX_IPSR_MSEL(IP7_14_12, RMII0_RXD0_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_14_12, TIOC3C_A, SEL_MTU2_CH3_0),
- PINMUX_IPSR_DATA(IP7_14_12, HIFD14),
+ PINMUX_IPSR_GPSR(IP7_14_12, HIFD14),
- PINMUX_IPSR_DATA(IP7_17_15, DU0_DG7),
+ PINMUX_IPSR_GPSR(IP7_17_15, DU0_DG7),
PINMUX_IPSR_MSEL(IP7_17_15, TX3_C, SEL_SCIF3_2),
PINMUX_IPSR_MSEL(IP7_17_15, RMII0_RXD1_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_17_15, TIOC3D_A, SEL_MTU2_CH3_0),
- PINMUX_IPSR_DATA(IP7_17_15, HIFD15),
+ PINMUX_IPSR_GPSR(IP7_17_15, HIFD15),
- PINMUX_IPSR_DATA(IP7_20_18, DU0_DB0),
+ PINMUX_IPSR_GPSR(IP7_20_18, DU0_DB0),
PINMUX_IPSR_MSEL(IP7_20_18, RX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP7_20_18, RMII0_TXD_EN_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_20_18, TIOC4A_A, SEL_MTU2_CH4_0),
- PINMUX_IPSR_DATA(IP7_20_18, HIFCS),
+ PINMUX_IPSR_GPSR(IP7_20_18, HIFCS),
- PINMUX_IPSR_DATA(IP7_23_21, DU0_DB1),
+ PINMUX_IPSR_GPSR(IP7_23_21, DU0_DB1),
PINMUX_IPSR_MSEL(IP7_23_21, TX4_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP7_23_21, RMII0_TXD0_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_23_21, TIOC4B_A, SEL_MTU2_CH4_0),
- PINMUX_IPSR_DATA(IP7_23_21, HIFWR),
+ PINMUX_IPSR_GPSR(IP7_23_21, HIFWR),
- PINMUX_IPSR_DATA(IP7_26_24, DU0_DB2),
+ PINMUX_IPSR_GPSR(IP7_26_24, DU0_DB2),
PINMUX_IPSR_MSEL(IP7_26_24, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP7_26_24, RMII0_TXD1_B, SEL_RMII_1),
PINMUX_IPSR_MSEL(IP7_26_24, TIOC4C_A, SEL_MTU2_CH4_0),
- PINMUX_IPSR_DATA(IP7_28_27, DU0_DB3),
+ PINMUX_IPSR_GPSR(IP7_28_27, DU0_DB3),
PINMUX_IPSR_MSEL(IP7_28_27, TX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP7_28_27, TIOC4D_A, SEL_MTU2_CH4_0),
- PINMUX_IPSR_DATA(IP7_28_27, HIFRD),
+ PINMUX_IPSR_GPSR(IP7_28_27, HIFRD),
- PINMUX_IPSR_DATA(IP7_30_29, DU0_DB4),
- PINMUX_IPSR_DATA(IP7_30_29, HIFINT),
+ PINMUX_IPSR_GPSR(IP7_30_29, DU0_DB4),
+ PINMUX_IPSR_GPSR(IP7_30_29, HIFINT),
/* IPSR8 */
- PINMUX_IPSR_DATA(IP8_1_0, DU0_DB5),
- PINMUX_IPSR_DATA(IP8_1_0, HIFDREQ),
+ PINMUX_IPSR_GPSR(IP8_1_0, DU0_DB5),
+ PINMUX_IPSR_GPSR(IP8_1_0, HIFDREQ),
- PINMUX_IPSR_DATA(IP8_3_2, DU0_DB6),
- PINMUX_IPSR_DATA(IP8_3_2, HIFRDY),
+ PINMUX_IPSR_GPSR(IP8_3_2, DU0_DB6),
+ PINMUX_IPSR_GPSR(IP8_3_2, HIFRDY),
- PINMUX_IPSR_DATA(IP8_5_4, DU0_DB7),
+ PINMUX_IPSR_GPSR(IP8_5_4, DU0_DB7),
PINMUX_IPSR_MSEL(IP8_5_4, SSI_SCK0_B, SEL_SSI0_1),
PINMUX_IPSR_MSEL(IP8_5_4, HIFEBL_B, SEL_HIF_1),
- PINMUX_IPSR_DATA(IP8_7_6, DU0_DOTCLKIN),
+ PINMUX_IPSR_GPSR(IP8_7_6, DU0_DOTCLKIN),
PINMUX_IPSR_MSEL(IP8_7_6, HSPI_CS0_C, SEL_HSPI_2),
PINMUX_IPSR_MSEL(IP8_7_6, SSI_WS0_B, SEL_SSI0_1),
- PINMUX_IPSR_DATA(IP8_9_8, DU0_DOTCLKOUT),
+ PINMUX_IPSR_GPSR(IP8_9_8, DU0_DOTCLKOUT),
PINMUX_IPSR_MSEL(IP8_9_8, HSPI_CLK0_C, SEL_HSPI_2),
PINMUX_IPSR_MSEL(IP8_9_8, SSI_SDATA0_B, SEL_SSI0_1),
- PINMUX_IPSR_DATA(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
+ PINMUX_IPSR_GPSR(IP8_11_10, DU0_EXHSYNC_DU0_HSYNC),
PINMUX_IPSR_MSEL(IP8_11_10, HSPI_TX0_C, SEL_HSPI_2),
PINMUX_IPSR_MSEL(IP8_11_10, SSI_SCK1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
+ PINMUX_IPSR_GPSR(IP8_13_12, DU0_EXVSYNC_DU0_VSYNC),
PINMUX_IPSR_MSEL(IP8_13_12, HSPI_RX0_C, SEL_HSPI_2),
PINMUX_IPSR_MSEL(IP8_13_12, SSI_WS1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP8_15_14, DU0_EXODDF_DU0_ODDF),
+ PINMUX_IPSR_GPSR(IP8_15_14, DU0_EXODDF_DU0_ODDF),
PINMUX_IPSR_MSEL(IP8_15_14, CAN0_RX_B, SEL_RCAN0_1),
PINMUX_IPSR_MSEL(IP8_15_14, HSCK0_B, SEL_HSCIF_1),
PINMUX_IPSR_MSEL(IP8_15_14, SSI_SDATA1_B, SEL_SSI1_1),
- PINMUX_IPSR_DATA(IP8_17_16, DU0_DISP),
+ PINMUX_IPSR_GPSR(IP8_17_16, DU0_DISP),
PINMUX_IPSR_MSEL(IP8_17_16, CAN0_TX_B, SEL_RCAN0_1),
PINMUX_IPSR_MSEL(IP8_17_16, HRX0_B, SEL_HSCIF_1),
PINMUX_IPSR_MSEL(IP8_17_16, AUDIO_CLKA_B, SEL_AUDIO_CLKA_1),
- PINMUX_IPSR_DATA(IP8_19_18, DU0_CDE),
+ PINMUX_IPSR_GPSR(IP8_19_18, DU0_CDE),
PINMUX_IPSR_MSEL(IP8_19_18, HTX0_B, SEL_HSCIF_1),
PINMUX_IPSR_MSEL(IP8_19_18, AUDIO_CLKB_B, SEL_AUDIO_CLKB_1),
PINMUX_IPSR_MSEL(IP8_19_18, LCD_VCPWC_B, SEL_LCDC_1),
@@ -1139,12 +1139,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP8_22_20, IRQ0_A, SEL_INTC_0),
PINMUX_IPSR_MSEL(IP8_22_20, HSPI_TX_B, SEL_HSPI_1),
PINMUX_IPSR_MSEL(IP8_22_20, RX3_E, SEL_SCIF3_4),
- PINMUX_IPSR_DATA(IP8_22_20, ET0_ERXD0),
+ PINMUX_IPSR_GPSR(IP8_22_20, ET0_ERXD0),
PINMUX_IPSR_MSEL(IP8_25_23, IRQ1_A, SEL_INTC_0),
PINMUX_IPSR_MSEL(IP8_25_23, HSPI_RX_B, SEL_HSPI_1),
PINMUX_IPSR_MSEL(IP8_25_23, TX3_E, SEL_SCIF3_4),
- PINMUX_IPSR_DATA(IP8_25_23, ET0_ERXD1),
+ PINMUX_IPSR_GPSR(IP8_25_23, ET0_ERXD1),
PINMUX_IPSR_MSEL(IP8_27_26, IRQ2_A, SEL_INTC_0),
PINMUX_IPSR_MSEL(IP8_27_26, CTS0_A, SEL_SCIF0_0),
@@ -1220,26 +1220,26 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP9_29_28, LCD_DATA14_B, SEL_LCDC_1),
/* IPSE10 */
- PINMUX_IPSR_DATA(IP10_2_0, SSI_SCK23),
+ PINMUX_IPSR_GPSR(IP10_2_0, SSI_SCK23),
PINMUX_IPSR_MSEL(IP10_2_0, VI1_4_B, SEL_VIN1_1),
PINMUX_IPSR_MSEL(IP10_2_0, RX1_D, SEL_SCIF1_3),
PINMUX_IPSR_MSEL(IP10_2_0, FCLE_B, SEL_FLCTL_1),
PINMUX_IPSR_MSEL(IP10_2_0, LCD_DATA15_B, SEL_LCDC_1),
- PINMUX_IPSR_DATA(IP10_5_3, SSI_WS23),
+ PINMUX_IPSR_GPSR(IP10_5_3, SSI_WS23),
PINMUX_IPSR_MSEL(IP10_5_3, VI1_5_B, SEL_VIN1_1),
PINMUX_IPSR_MSEL(IP10_5_3, TX1_D, SEL_SCIF1_3),
PINMUX_IPSR_MSEL(IP10_5_3, HSCK0_C, SEL_HSCIF_2),
PINMUX_IPSR_MSEL(IP10_5_3, FALE_B, SEL_FLCTL_1),
PINMUX_IPSR_MSEL(IP10_5_3, LCD_DON_B, SEL_LCDC_1),
- PINMUX_IPSR_DATA(IP10_8_6, SSI_SDATA2),
+ PINMUX_IPSR_GPSR(IP10_8_6, SSI_SDATA2),
PINMUX_IPSR_MSEL(IP10_8_6, VI1_6_B, SEL_VIN1_1),
PINMUX_IPSR_MSEL(IP10_8_6, HRX0_C, SEL_HSCIF_2),
PINMUX_IPSR_MSEL(IP10_8_6, FRE_B, SEL_FLCTL_1),
PINMUX_IPSR_MSEL(IP10_8_6, LCD_CL1_B, SEL_LCDC_1),
- PINMUX_IPSR_DATA(IP10_11_9, SSI_SDATA3),
+ PINMUX_IPSR_GPSR(IP10_11_9, SSI_SDATA3),
PINMUX_IPSR_MSEL(IP10_11_9, VI1_7_B, SEL_VIN1_1),
PINMUX_IPSR_MSEL(IP10_11_9, HTX0_C, SEL_HSCIF_2),
PINMUX_IPSR_MSEL(IP10_11_9, FWE_B, SEL_FLCTL_1),
@@ -1254,13 +1254,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_15, AUDIO_CLKB_A, SEL_AUDIO_CLKB_0),
PINMUX_IPSR_MSEL(IP10_15, LCD_CLK_B, SEL_LCDC_1),
- PINMUX_IPSR_DATA(IP10_18_16, AUDIO_CLKC),
+ PINMUX_IPSR_GPSR(IP10_18_16, AUDIO_CLKC),
PINMUX_IPSR_MSEL(IP10_18_16, SCK1_E, SEL_SCIF1_4),
PINMUX_IPSR_MSEL(IP10_18_16, HCTS0_C, SEL_HSCIF_2),
PINMUX_IPSR_MSEL(IP10_18_16, FRB_B, SEL_FLCTL_1),
PINMUX_IPSR_MSEL(IP10_18_16, LCD_VEPWC_B, SEL_LCDC_1),
- PINMUX_IPSR_DATA(IP10_21_19, AUDIO_CLKOUT),
+ PINMUX_IPSR_GPSR(IP10_21_19, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP10_21_19, TX1_E, SEL_SCIF1_4),
PINMUX_IPSR_MSEL(IP10_21_19, HRTS0_C, SEL_HSCIF_2),
PINMUX_IPSR_MSEL(IP10_21_19, FSE_B, SEL_FLCTL_1),
@@ -1271,85 +1271,85 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_24_23, CAN0_TX_A, SEL_RCAN0_0),
PINMUX_IPSR_MSEL(IP10_24_23, TX4_D, SEL_SCIF4_3),
- PINMUX_IPSR_DATA(IP10_24_23, MLB_CLK),
+ PINMUX_IPSR_GPSR(IP10_24_23, MLB_CLK),
PINMUX_IPSR_MSEL(IP10_25, CAN1_RX_A, SEL_RCAN1_0),
PINMUX_IPSR_MSEL(IP10_25, IRQ1_B, SEL_INTC_1),
PINMUX_IPSR_MSEL(IP10_27_26, CAN0_RX_A, SEL_RCAN0_0),
PINMUX_IPSR_MSEL(IP10_27_26, IRQ0_B, SEL_INTC_1),
- PINMUX_IPSR_DATA(IP10_27_26, MLB_SIG),
+ PINMUX_IPSR_GPSR(IP10_27_26, MLB_SIG),
PINMUX_IPSR_MSEL(IP10_29_28, CAN1_TX_A, SEL_RCAN1_0),
PINMUX_IPSR_MSEL(IP10_29_28, TX5_C, SEL_SCIF1_2),
- PINMUX_IPSR_DATA(IP10_29_28, MLB_DAT),
+ PINMUX_IPSR_GPSR(IP10_29_28, MLB_DAT),
/* IPSR11 */
- PINMUX_IPSR_DATA(IP11_0, SCL1),
+ PINMUX_IPSR_GPSR(IP11_0, SCL1),
PINMUX_IPSR_MSEL(IP11_0, SCIF_CLK_C, SEL_SCIF_CLK_2),
- PINMUX_IPSR_DATA(IP11_1, SDA1),
+ PINMUX_IPSR_GPSR(IP11_1, SDA1),
PINMUX_IPSR_MSEL(IP11_0, RX1_E, SEL_SCIF1_4),
- PINMUX_IPSR_DATA(IP11_2, SDA0),
+ PINMUX_IPSR_GPSR(IP11_2, SDA0),
PINMUX_IPSR_MSEL(IP11_2, HIFEBL_A, SEL_HIF_0),
- PINMUX_IPSR_DATA(IP11_3, SDSELF),
+ PINMUX_IPSR_GPSR(IP11_3, SDSELF),
PINMUX_IPSR_MSEL(IP11_3, RTS1_E, SEL_SCIF1_3),
PINMUX_IPSR_MSEL(IP11_6_4, SCIF_CLK_A, SEL_SCIF_CLK_0),
PINMUX_IPSR_MSEL(IP11_6_4, HSPI_CLK_A, SEL_HSPI_0),
- PINMUX_IPSR_DATA(IP11_6_4, VI0_CLK),
+ PINMUX_IPSR_GPSR(IP11_6_4, VI0_CLK),
PINMUX_IPSR_MSEL(IP11_6_4, RMII0_TXD0_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP11_6_4, ET0_ERXD4),
+ PINMUX_IPSR_GPSR(IP11_6_4, ET0_ERXD4),
PINMUX_IPSR_MSEL(IP11_9_7, SCK0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_9_7, HSPI_CS_A, SEL_HSPI_0),
- PINMUX_IPSR_DATA(IP11_9_7, VI0_CLKENB),
+ PINMUX_IPSR_GPSR(IP11_9_7, VI0_CLKENB),
PINMUX_IPSR_MSEL(IP11_9_7, RMII0_TXD1_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP11_9_7, ET0_ERXD5),
+ PINMUX_IPSR_GPSR(IP11_9_7, ET0_ERXD5),
PINMUX_IPSR_MSEL(IP11_11_10, RX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_11_10, HSPI_RX_A, SEL_HSPI_0),
PINMUX_IPSR_MSEL(IP11_11_10, RMII0_RXD0_A, SEL_RMII_0),
- PINMUX_IPSR_DATA(IP11_11_10, ET0_ERXD6),
+ PINMUX_IPSR_GPSR(IP11_11_10, ET0_ERXD6),
PINMUX_IPSR_MSEL(IP11_12, TX0_A, SEL_SCIF0_0),
PINMUX_IPSR_MSEL(IP11_12, HSPI_TX_A, SEL_HSPI_0),
- PINMUX_IPSR_DATA(IP11_15_13, PENC1),
+ PINMUX_IPSR_GPSR(IP11_15_13, PENC1),
PINMUX_IPSR_MSEL(IP11_15_13, TX3_D, SEL_SCIF3_3),
PINMUX_IPSR_MSEL(IP11_15_13, CAN1_TX_B, SEL_RCAN1_1),
PINMUX_IPSR_MSEL(IP11_15_13, TX5_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_15_13, IETX_B, SEL_IEBUS_1),
- PINMUX_IPSR_DATA(IP11_18_16, USB_OVC1),
+ PINMUX_IPSR_GPSR(IP11_18_16, USB_OVC1),
PINMUX_IPSR_MSEL(IP11_18_16, RX3_D, SEL_SCIF3_3),
PINMUX_IPSR_MSEL(IP11_18_16, CAN1_RX_B, SEL_RCAN1_1),
PINMUX_IPSR_MSEL(IP11_18_16, RX5_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_18_16, IERX_B, SEL_IEBUS_1),
- PINMUX_IPSR_DATA(IP11_20_19, DREQ0),
+ PINMUX_IPSR_GPSR(IP11_20_19, DREQ0),
PINMUX_IPSR_MSEL(IP11_20_19, SD1_CLK_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP11_20_19, ET0_TX_EN),
+ PINMUX_IPSR_GPSR(IP11_20_19, ET0_TX_EN),
- PINMUX_IPSR_DATA(IP11_22_21, DACK0),
+ PINMUX_IPSR_GPSR(IP11_22_21, DACK0),
PINMUX_IPSR_MSEL(IP11_22_21, SD1_DAT3_A, SEL_SDHI1_0),
- PINMUX_IPSR_DATA(IP11_22_21, ET0_TX_ER),
+ PINMUX_IPSR_GPSR(IP11_22_21, ET0_TX_ER),
- PINMUX_IPSR_DATA(IP11_25_23, DREQ1),
+ PINMUX_IPSR_GPSR(IP11_25_23, DREQ1),
PINMUX_IPSR_MSEL(IP11_25_23, HSPI_CLK_B, SEL_HSPI_1),
PINMUX_IPSR_MSEL(IP11_25_23, RX4_B, SEL_SCIF4_1),
PINMUX_IPSR_MSEL(IP11_25_23, ET0_PHY_INT_C, SEL_ET0_CTL_0),
PINMUX_IPSR_MSEL(IP11_25_23, ET0_TX_CLK_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP11_27_26, DACK1),
+ PINMUX_IPSR_GPSR(IP11_27_26, DACK1),
PINMUX_IPSR_MSEL(IP11_27_26, HSPI_CS_B, SEL_HSPI_1),
PINMUX_IPSR_MSEL(IP11_27_26, TX4_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP11_27_26, ET0_RX_CLK_A, SEL_ET0_0),
- PINMUX_IPSR_DATA(IP11_28, PRESETOUT),
- PINMUX_IPSR_DATA(IP11_28, ST_CLKOUT),
+ PINMUX_IPSR_GPSR(IP11_28, PRESETOUT),
+ PINMUX_IPSR_GPSR(IP11_28, ST_CLKOUT),
};
static const struct sh_pfc_pin pinmux_pins[] = {
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 2123ab49d6a5..a490834e2089 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -100,10 +100,31 @@ struct pinmux_cfg_reg {
const u8 *var_field_width;
};
+/*
+ * Describe a config register consisting of several fields of the same width
+ * - name: Register name (unused, for documentation purposes only)
+ * - r: Physical register address
+ * - r_width: Width of the register (in bits)
+ * - f_width: Width of the fixed-width register fields (in bits)
+ * This macro must be followed by initialization data: For each register field
+ * (from left to right, i.e. MSB to LSB), 2^f_width enum IDs must be specified,
+ * one for each possible combination of the register field bit values.
+ */
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
.reg = r, .reg_width = r_width, .field_width = f_width, \
.enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)])
+/*
+ * Describe a config register consisting of several fields of different widths
+ * - name: Register name (unused, for documentation purposes only)
+ * - r: Physical register address
+ * - r_width: Width of the register (in bits)
+ * - var_fw0, var_fwn...: List of widths of the register fields (in bits),
+ * From left to right (i.e. MSB to LSB)
+ * This macro must be followed by initialization data: For each register field
+ * (from left to right, i.e. MSB to LSB), 2^var_fwi enum IDs must be specified,
+ * one for each possible combination of the register field bit values.
+ */
#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
.reg = r, .reg_width = r_width, \
.var_field_width = (const u8 [r_width]) \
@@ -116,6 +137,14 @@ struct pinmux_data_reg {
const u16 *enum_ids;
};
+/*
+ * Describe a data register
+ * - name: Register name (unused, for documentation purposes only)
+ * - r: Physical register address
+ * - r_width: Width of the register (in bits)
+ * This macro must be followed by initialization data: For each register bit
+ * (from left to right, i.e. MSB to LSB), one enum ID must be specified.
+ */
#define PINMUX_DATA_REG(name, r, r_width) \
.reg = r, .reg_width = r_width, \
.enum_ids = (const u16 [r_width]) \
@@ -124,6 +153,10 @@ struct pinmux_irq {
const short *gpios;
};
+/*
+ * Describe the mapping from GPIOs to a single IRQ
+ * - ids...: List of GPIOs that are mapped to the same IRQ
+ */
#define PINMUX_IRQ(ids...) \
{ .gpios = (const short []) { ids, -1 } }
@@ -185,18 +218,65 @@ struct sh_pfc_soc_info {
* sh_pfc_soc_info pinmux_data array macros
*/
+/*
+ * Describe generic pinmux data
+ * - data_or_mark: *_DATA or *_MARK enum ID
+ * - ids...: List of enum IDs to associate with data_or_mark
+ */
#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
-#define PINMUX_IPSR_NOGP(ispr, fn) \
+/*
+ * Describe a pinmux configuration without GPIO function that needs
+ * configuration in a Peripheral Function Select Register (IPSR)
+ * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - fn: Function name, referring to a field in the IPSR
+ */
+#define PINMUX_IPSR_NOGP(ipsr, fn) \
PINMUX_DATA(fn##_MARK, FN_##fn)
-#define PINMUX_IPSR_DATA(ipsr, fn) \
+
+/*
+ * Describe a pinmux configuration with GPIO function that needs configuration
+ * in both a Peripheral Function Select Register (IPSR) and in a
+ * GPIO/Peripheral Function Select Register (GPSR)
+ * - ipsr: IPSR field
+ * - fn: Function name, also referring to the IPSR field
+ */
+#define PINMUX_IPSR_GPSR(ipsr, fn) \
PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr)
-#define PINMUX_IPSR_NOGM(ispr, fn, ms) \
- PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ms)
-#define PINMUX_IPSR_NOFN(ipsr, fn, ms) \
- PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##ms)
-#define PINMUX_IPSR_MSEL(ipsr, fn, ms) \
- PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn)
+
+/*
+ * Describe a pinmux configuration without GPIO function that needs
+ * configuration in a Peripheral Function Select Register (IPSR), and where the
+ * pinmux function has a representation in a Module Select Register (MOD_SEL).
+ * - ipsr: IPSR field (unused, for documentation purposes only)
+ * - fn: Function name, also referring to the IPSR field
+ * - msel: Module selector
+ */
+#define PINMUX_IPSR_NOGM(ipsr, fn, msel) \
+ PINMUX_DATA(fn##_MARK, FN_##fn, FN_##msel)
+
+/*
+ * Describe a pinmux configuration with GPIO function where the pinmux function
+ * has no representation in a Peripheral Function Select Register (IPSR), but
+ * instead solely depends on a group selection.
+ * - gpsr: GPSR field
+ * - fn: Function name, also referring to the GPSR field
+ * - gsel: Group selector
+ */
+#define PINMUX_IPSR_NOFN(gpsr, fn, gsel) \
+ PINMUX_DATA(fn##_MARK, FN_##gpsr, FN_##gsel)
+
+/*
+ * Describe a pinmux configuration with GPIO function that needs configuration
+ * in both a Peripheral Function Select Register (IPSR) and a GPIO/Peripheral
+ * Function Select Register (GPSR), and where the pinmux function has a
+ * representation in a Module Select Register (MOD_SEL).
+ * - ipsr: IPSR field
+ * - fn: Function name, also referring to the IPSR field
+ * - msel: Module selector
+ */
+#define PINMUX_IPSR_MSEL(ipsr, fn, msel) \
+ PINMUX_DATA(fn##_MARK, FN_##msel, FN_##ipsr, FN_##fn)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
@@ -381,7 +461,7 @@ struct sh_pfc_soc_info {
PINMUX_GPIO_FN(GPIO_FN_##str, PINMUX_FN_BASE, str##_MARK)
/*
- * PORTnCR macro
+ * PORTnCR helper macro for SH-Mobile/R-Mobile
*/
#define PORTCR(nr, reg) \
{ \
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index beb024c31a5d..3d233fc3448a 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -338,7 +338,6 @@ struct atlas7_pinctrl_data {
#define ATLAS7_GPIO_CTL_DATAIN_MASK BIT(7)
struct atlas7_gpio_bank {
- struct pinctrl_dev *pctldev;
int id;
int irq;
void __iomem *base;
@@ -6070,7 +6069,6 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
}
for (idx = 0; idx < nbank; idx++) {
- struct gpio_pin_range *pin_range;
struct atlas7_gpio_bank *bank;
bank = &a7gc->banks[idx];
@@ -6088,22 +6086,6 @@ static int atlas7_gpio_probe(struct platform_device *pdev)
gpiochip_set_chained_irqchip(chip, &atlas7_gpio_irq_chip,
bank->irq, atlas7_gpio_handle_irq);
-
- /* Records gpio_pin_range to a7gc */
- list_for_each_entry(pin_range, &chip->pin_ranges, node) {
- struct pinctrl_gpio_range *range;
-
- range = &pin_range->range;
- if (range->id == NGPIO_OF_BANK * idx) {
- bank->gpio_offset = range->id;
- bank->ngpio = range->npins;
- bank->gpio_pins = range->pins;
- bank->pctldev = pin_range->pctldev;
- break;
- }
- }
-
- BUG_ON(!bank->pctldev);
}
platform_set_drvdata(pdev, a7gc);
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
new file mode 100644
index 000000000000..0f28841b2332
--- /dev/null
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -0,0 +1,16 @@
+if ARCH_STM32 || COMPILE_TEST
+
+config PINCTRL_STM32
+ bool
+ depends on OF
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB
+
+config PINCTRL_STM32F429
+ bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429
+ depends on OF
+ default MACH_STM32F429
+ select PINCTRL_STM32
+
+endif
diff --git a/drivers/pinctrl/stm32/Makefile b/drivers/pinctrl/stm32/Makefile
new file mode 100644
index 000000000000..fc17d4238845
--- /dev/null
+++ b/drivers/pinctrl/stm32/Makefile
@@ -0,0 +1,5 @@
+# Core
+obj-$(CONFIG_PINCTRL_STM32) += pinctrl-stm32.o
+
+# SoC Drivers
+obj-$(CONFIG_PINCTRL_STM32F429) += pinctrl-stm32f429.o
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
new file mode 100644
index 000000000000..8deb566ed4cd
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Heavily based on Mediatek's pinctrl driver
+ */
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-stm32.h"
+
+#define STM32_GPIO_MODER 0x00
+#define STM32_GPIO_TYPER 0x04
+#define STM32_GPIO_SPEEDR 0x08
+#define STM32_GPIO_PUPDR 0x0c
+#define STM32_GPIO_IDR 0x10
+#define STM32_GPIO_ODR 0x14
+#define STM32_GPIO_BSRR 0x18
+#define STM32_GPIO_LCKR 0x1c
+#define STM32_GPIO_AFRL 0x20
+#define STM32_GPIO_AFRH 0x24
+
+#define STM32_GPIO_PINS_PER_BANK 16
+
+#define gpio_range_to_bank(chip) \
+ container_of(chip, struct stm32_gpio_bank, range)
+
+static const char * const stm32_gpio_functions[] = {
+ "gpio", "af0", "af1",
+ "af2", "af3", "af4",
+ "af5", "af6", "af7",
+ "af8", "af9", "af10",
+ "af11", "af12", "af13",
+ "af14", "af15", "analog",
+};
+
+struct stm32_pinctrl_group {
+ const char *name;
+ unsigned long config;
+ unsigned pin;
+};
+
+struct stm32_gpio_bank {
+ void __iomem *base;
+ struct clk *clk;
+ spinlock_t lock;
+ struct gpio_chip gpio_chip;
+ struct pinctrl_gpio_range range;
+};
+
+struct stm32_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ struct stm32_pinctrl_group *groups;
+ unsigned ngroups;
+ const char **grp_names;
+ struct stm32_gpio_bank *banks;
+ unsigned nbanks;
+ const struct stm32_pinctrl_match_data *match_data;
+};
+
+static inline int stm32_gpio_pin(int gpio)
+{
+ return gpio % STM32_GPIO_PINS_PER_BANK;
+}
+
+static inline u32 stm32_gpio_get_mode(u32 function)
+{
+ switch (function) {
+ case STM32_PIN_GPIO:
+ return 0;
+ case STM32_PIN_AF(0) ... STM32_PIN_AF(15):
+ return 2;
+ case STM32_PIN_ANALOG:
+ return 3;
+ }
+
+ return 0;
+}
+
+static inline u32 stm32_gpio_get_alt(u32 function)
+{
+ switch (function) {
+ case STM32_PIN_GPIO:
+ return 0;
+ case STM32_PIN_AF(0) ... STM32_PIN_AF(15):
+ return function - 1;
+ case STM32_PIN_ANALOG:
+ return 0;
+ }
+
+ return 0;
+}
+
+/* GPIO functions */
+
+static inline void __stm32_gpio_set(struct stm32_gpio_bank *bank,
+ unsigned offset, int value)
+{
+ if (!value)
+ offset += STM32_GPIO_PINS_PER_BANK;
+
+ clk_enable(bank->clk);
+
+ writel_relaxed(BIT(offset), bank->base + STM32_GPIO_BSRR);
+
+ clk_disable(bank->clk);
+}
+
+static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+ int ret;
+
+ clk_enable(bank->clk);
+
+ ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+
+ clk_disable(bank->clk);
+
+ return ret;
+}
+
+static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ __stm32_gpio_set(bank, offset, value);
+}
+
+static int stm32_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int stm32_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ __stm32_gpio_set(bank, offset, value);
+ pinctrl_gpio_direction_output(chip->base + offset);
+
+ return 0;
+}
+
+static struct gpio_chip stm32_gpio_template = {
+ .request = stm32_gpio_request,
+ .free = stm32_gpio_free,
+ .get = stm32_gpio_get,
+ .set = stm32_gpio_set,
+ .direction_input = stm32_gpio_direction_input,
+ .direction_output = stm32_gpio_direction_output,
+};
+
+/* Pinctrl functions */
+
+static struct stm32_pinctrl_group *
+stm32_pctrl_find_group_by_pin(struct stm32_pinctrl *pctl, u32 pin)
+{
+ int i;
+
+ for (i = 0; i < pctl->ngroups; i++) {
+ struct stm32_pinctrl_group *grp = pctl->groups + i;
+
+ if (grp->pin == pin)
+ return grp;
+ }
+
+ return NULL;
+}
+
+static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl,
+ u32 pin_num, u32 fnum)
+{
+ int i;
+
+ for (i = 0; i < pctl->match_data->npins; i++) {
+ const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+ const struct stm32_desc_function *func = pin->functions;
+
+ if (pin->pin.number != pin_num)
+ continue;
+
+ while (func && func->name) {
+ if (func->num == fnum)
+ return true;
+ func++;
+ }
+
+ break;
+ }
+
+ return false;
+}
+
+static int stm32_pctrl_dt_node_to_map_func(struct stm32_pinctrl *pctl,
+ u32 pin, u32 fnum, struct stm32_pinctrl_group *grp,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ if (*num_maps == *reserved_maps)
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = grp->name;
+
+ if (!stm32_pctrl_is_function_valid(pctl, pin, fnum)) {
+ dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+ fnum, pin);
+ return -EINVAL;
+ }
+
+ (*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum];
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ struct stm32_pinctrl *pctl;
+ struct stm32_pinctrl_group *grp;
+ struct property *pins;
+ u32 pinfunc, pin, func;
+ unsigned long *configs;
+ unsigned int num_configs;
+ bool has_config = 0;
+ unsigned reserve = 0;
+ int num_pins, num_funcs, maps_per_pin, i, err;
+
+ pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ pins = of_find_property(node, "pinmux", NULL);
+ if (!pins) {
+ dev_err(pctl->dev, "missing pins property in node %s .\n",
+ node->name);
+ return -EINVAL;
+ }
+
+ err = pinconf_generic_parse_dt_config(node, pctldev, &configs,
+ &num_configs);
+ if (err)
+ return err;
+
+ if (num_configs)
+ has_config = 1;
+
+ num_pins = pins->length / sizeof(u32);
+ num_funcs = num_pins;
+ maps_per_pin = 0;
+ if (num_funcs)
+ maps_per_pin++;
+ if (has_config && num_pins >= 1)
+ maps_per_pin++;
+
+ if (!num_pins || !maps_per_pin)
+ return -EINVAL;
+
+ reserve = num_pins * maps_per_pin;
+
+ err = pinctrl_utils_reserve_map(pctldev, map,
+ reserved_maps, num_maps, reserve);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_pins; i++) {
+ err = of_property_read_u32_index(node, "pinmux",
+ i, &pinfunc);
+ if (err)
+ return err;
+
+ pin = STM32_GET_PIN_NO(pinfunc);
+ func = STM32_GET_PIN_FUNC(pinfunc);
+
+ if (pin >= pctl->match_data->npins) {
+ dev_err(pctl->dev, "invalid pin number.\n");
+ return -EINVAL;
+ }
+
+ if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
+ dev_err(pctl->dev, "invalid function.\n");
+ return -EINVAL;
+ }
+
+ grp = stm32_pctrl_find_group_by_pin(pctl, pin);
+ if (!grp) {
+ dev_err(pctl->dev, "unable to match pin %d to group\n",
+ pin);
+ return -EINVAL;
+ }
+
+ err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
+ reserved_maps, num_maps);
+ if (err)
+ return err;
+
+ if (has_config) {
+ err = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps, num_maps, grp->name,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ struct device_node *np;
+ unsigned reserved_maps;
+ int ret;
+
+ *map = NULL;
+ *num_maps = 0;
+ reserved_maps = 0;
+
+ for_each_child_of_node(np_config, np) {
+ ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps);
+ if (ret < 0) {
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int stm32_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->ngroups;
+}
+
+static const char *stm32_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->groups[group].name;
+}
+
+static int stm32_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned *)&pctl->groups[group].pin;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const struct pinctrl_ops stm32_pctrl_ops = {
+ .dt_node_to_map = stm32_pctrl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+ .get_groups_count = stm32_pctrl_get_groups_count,
+ .get_group_name = stm32_pctrl_get_group_name,
+ .get_group_pins = stm32_pctrl_get_group_pins,
+};
+
+
+/* Pinmux functions */
+
+static int stm32_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(stm32_gpio_functions);
+}
+
+static const char *stm32_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ return stm32_gpio_functions[selector];
+}
+
+static int stm32_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->grp_names;
+ *num_groups = pctl->ngroups;
+
+ return 0;
+}
+
+static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
+ int pin, u32 mode, u32 alt)
+{
+ u32 val;
+ int alt_shift = (pin % 8) * 4;
+ int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4;
+ unsigned long flags;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + alt_offset);
+ val &= ~GENMASK(alt_shift + 3, alt_shift);
+ val |= (alt << alt_shift);
+ writel_relaxed(val, bank->base + alt_offset);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_MODER);
+ val &= ~GENMASK(pin * 2 + 1, pin * 2);
+ val |= mode << (pin * 2);
+ writel_relaxed(val, bank->base + STM32_GPIO_MODER);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+}
+
+static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned function,
+ unsigned group)
+{
+ bool ret;
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_pinctrl_group *g = pctl->groups + group;
+ struct pinctrl_gpio_range *range;
+ struct stm32_gpio_bank *bank;
+ u32 mode, alt;
+ int pin;
+
+ ret = stm32_pctrl_is_function_valid(pctl, g->pin, function);
+ if (!ret) {
+ dev_err(pctl->dev, "invalid function %d on group %d .\n",
+ function, group);
+ return -EINVAL;
+ }
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, g->pin);
+ bank = gpio_range_to_bank(range);
+ pin = stm32_gpio_pin(g->pin);
+
+ mode = stm32_gpio_get_mode(function);
+ alt = stm32_gpio_get_alt(function);
+
+ stm32_pmx_set_mode(bank, pin, mode, alt);
+
+ return 0;
+}
+
+static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned gpio,
+ bool input)
+{
+ struct stm32_gpio_bank *bank = gpio_range_to_bank(range);
+ int pin = stm32_gpio_pin(gpio);
+
+ stm32_pmx_set_mode(bank, pin, !input, 0);
+
+ return 0;
+}
+
+static const struct pinmux_ops stm32_pmx_ops = {
+ .get_functions_count = stm32_pmx_get_funcs_cnt,
+ .get_function_name = stm32_pmx_get_func_name,
+ .get_function_groups = stm32_pmx_get_func_groups,
+ .set_mux = stm32_pmx_set_mux,
+ .gpio_set_direction = stm32_pmx_gpio_set_direction,
+};
+
+/* Pinconf functions */
+
+static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 drive)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
+ val &= ~BIT(offset);
+ val |= drive << offset;
+ writel_relaxed(val, bank->base + STM32_GPIO_TYPER);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+}
+
+static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 speed)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
+ val &= ~GENMASK(offset * 2 + 1, offset * 2);
+ val |= speed << (offset * 2);
+ writel_relaxed(val, bank->base + STM32_GPIO_SPEEDR);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+}
+
+static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
+ unsigned offset, u32 bias)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
+ val &= ~GENMASK(offset * 2 + 1, offset * 2);
+ val |= bias << (offset * 2);
+ writel_relaxed(val, bank->base + STM32_GPIO_PUPDR);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+}
+
+static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
+ unsigned int pin, enum pin_config_param param,
+ enum pin_config_param arg)
+{
+ struct pinctrl_gpio_range *range;
+ struct stm32_gpio_bank *bank;
+ int offset, ret = 0;
+
+ range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
+ bank = gpio_range_to_bank(range);
+ offset = stm32_gpio_pin(pin);
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ stm32_pconf_set_driving(bank, offset, 0);
+ break;
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ stm32_pconf_set_driving(bank, offset, 1);
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ stm32_pconf_set_speed(bank, offset, arg);
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ stm32_pconf_set_bias(bank, offset, 0);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ stm32_pconf_set_bias(bank, offset, 1);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ stm32_pconf_set_bias(bank, offset, 2);
+ break;
+ case PIN_CONFIG_OUTPUT:
+ __stm32_gpio_set(bank, offset, arg);
+ ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int stm32_pconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned group,
+ unsigned long *config)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *config = pctl->groups[group].config;
+
+ return 0;
+}
+
+static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
+ unsigned long *configs, unsigned num_configs)
+{
+ struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct stm32_pinctrl_group *g = &pctl->groups[group];
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = stm32_pconf_parse_conf(pctldev, g->pin,
+ pinconf_to_config_param(configs[i]),
+ pinconf_to_config_argument(configs[i]));
+ if (ret < 0)
+ return ret;
+
+ g->config = configs[i];
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops stm32_pconf_ops = {
+ .pin_config_group_get = stm32_pconf_group_get,
+ .pin_config_group_set = stm32_pconf_group_set,
+};
+
+static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
+ struct device_node *np)
+{
+ int bank_nr = pctl->nbanks;
+ struct stm32_gpio_bank *bank = &pctl->banks[bank_nr];
+ struct pinctrl_gpio_range *range = &bank->range;
+ struct device *dev = pctl->dev;
+ struct resource res;
+ struct reset_control *rstc;
+ int err, npins;
+
+ rstc = of_reset_control_get(np, NULL);
+ if (!IS_ERR(rstc))
+ reset_control_deassert(rstc);
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ bank->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(bank->base))
+ return PTR_ERR(bank->base);
+
+ bank->clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(bank->clk)) {
+ dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk));
+ return PTR_ERR(bank->clk);
+ }
+
+ err = clk_prepare(bank->clk);
+ if (err) {
+ dev_err(dev, "failed to prepare clk (%d)\n", err);
+ return err;
+ }
+
+ npins = pctl->match_data->npins;
+ npins -= bank_nr * STM32_GPIO_PINS_PER_BANK;
+ if (npins < 0)
+ return -EINVAL;
+ else if (npins > STM32_GPIO_PINS_PER_BANK)
+ npins = STM32_GPIO_PINS_PER_BANK;
+
+ bank->gpio_chip = stm32_gpio_template;
+ bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
+ bank->gpio_chip.ngpio = npins;
+ bank->gpio_chip.of_node = np;
+ bank->gpio_chip.parent = dev;
+ spin_lock_init(&bank->lock);
+
+ of_property_read_string(np, "st,bank-name", &range->name);
+ bank->gpio_chip.label = range->name;
+
+ range->id = bank_nr;
+ range->pin_base = range->base = range->id * STM32_GPIO_PINS_PER_BANK;
+ range->npins = bank->gpio_chip.ngpio;
+ range->gc = &bank->gpio_chip;
+ err = gpiochip_add_data(&bank->gpio_chip, bank);
+ if (err) {
+ dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr);
+ return err;
+ }
+
+ dev_info(dev, "%s bank added\n", range->name);
+ return 0;
+}
+
+static int stm32_pctrl_build_state(struct platform_device *pdev)
+{
+ struct stm32_pinctrl *pctl = platform_get_drvdata(pdev);
+ int i;
+
+ pctl->ngroups = pctl->match_data->npins;
+
+ /* Allocate groups */
+ pctl->groups = devm_kcalloc(&pdev->dev, pctl->ngroups,
+ sizeof(*pctl->groups), GFP_KERNEL);
+ if (!pctl->groups)
+ return -ENOMEM;
+
+ /* We assume that one pin is one group, use pin name as group name. */
+ pctl->grp_names = devm_kcalloc(&pdev->dev, pctl->ngroups,
+ sizeof(*pctl->grp_names), GFP_KERNEL);
+ if (!pctl->grp_names)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->match_data->npins; i++) {
+ const struct stm32_desc_pin *pin = pctl->match_data->pins + i;
+ struct stm32_pinctrl_group *group = pctl->groups + i;
+
+ group->name = pin->pin.name;
+ group->pin = pin->pin.number;
+
+ pctl->grp_names[i] = pin->pin.name;
+ }
+
+ return 0;
+}
+
+int stm32_pctl_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct stm32_pinctrl *pctl;
+ struct pinctrl_pin_desc *pins;
+ int i, ret, banks = 0;
+
+ if (!np)
+ return -EINVAL;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
+ if (!of_find_property(np, "pins-are-numbered", NULL)) {
+ dev_err(dev, "only support pins-are-numbered format\n");
+ return -EINVAL;
+ }
+
+ pctl = devm_kzalloc(dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pctl);
+
+ pctl->dev = dev;
+ pctl->match_data = match->data;
+ ret = stm32_pctrl_build_state(pdev);
+ if (ret) {
+ dev_err(dev, "build state failed: %d\n", ret);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, child)
+ if (of_property_read_bool(child, "gpio-controller"))
+ banks++;
+
+ if (!banks) {
+ dev_err(dev, "at least one GPIO bank is required\n");
+ return -EINVAL;
+ }
+
+ pctl->banks = devm_kcalloc(dev, banks, sizeof(*pctl->banks),
+ GFP_KERNEL);
+ if (!pctl->banks)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ if (of_property_read_bool(child, "gpio-controller")) {
+ ret = stm32_gpiolib_register_bank(pctl, child);
+ if (ret)
+ return ret;
+
+ pctl->nbanks++;
+ }
+ }
+
+ pins = devm_kcalloc(&pdev->dev, pctl->match_data->npins, sizeof(*pins),
+ GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl->match_data->npins; i++)
+ pins[i] = pctl->match_data->pins[i].pin;
+
+ pctl->pctl_desc.name = dev_name(&pdev->dev);
+ pctl->pctl_desc.owner = THIS_MODULE;
+ pctl->pctl_desc.pins = pins;
+ pctl->pctl_desc.npins = pctl->match_data->npins;
+ pctl->pctl_desc.confops = &stm32_pconf_ops;
+ pctl->pctl_desc.pctlops = &stm32_pctrl_ops;
+ pctl->pctl_desc.pmxops = &stm32_pmx_ops;
+ pctl->dev = &pdev->dev;
+
+ pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
+ if (!pctl->pctl_dev) {
+ dev_err(&pdev->dev, "Failed pinctrl registration\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pctl->nbanks; i++)
+ pinctrl_add_gpio_range(pctl->pctl_dev, &pctl->banks[i].range);
+
+ dev_info(dev, "Pinctrl STM32 initialized\n");
+
+ return 0;
+}
+
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
new file mode 100644
index 000000000000..35ebc94c01e4
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#ifndef __PINCTRL_STM32_H
+#define __PINCTRL_STM32_H
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#define STM32_PIN_NO(x) ((x) << 8)
+#define STM32_GET_PIN_NO(x) ((x) >> 8)
+#define STM32_GET_PIN_FUNC(x) ((x) & 0xff)
+
+#define STM32_PIN_GPIO 0
+#define STM32_PIN_AF(x) ((x) + 1)
+#define STM32_PIN_ANALOG (STM32_PIN_AF(15) + 1)
+
+struct stm32_desc_function {
+ const char *name;
+ const unsigned char num;
+};
+
+struct stm32_desc_pin {
+ struct pinctrl_pin_desc pin;
+ const struct stm32_desc_function *functions;
+};
+
+#define STM32_PIN(_pin, ...) \
+ { \
+ .pin = _pin, \
+ .functions = (struct stm32_desc_function[]){ \
+ __VA_ARGS__, { } }, \
+ }
+
+#define STM32_FUNCTION(_num, _name) \
+ { \
+ .num = _num, \
+ .name = _name, \
+ }
+
+struct stm32_pinctrl_match_data {
+ const struct stm32_desc_pin *pins;
+ const unsigned int npins;
+};
+
+int stm32_pctl_probe(struct platform_device *pdev);
+
+#endif /* __PINCTRL_STM32_H */
+
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32f429.c b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
new file mode 100644
index 000000000000..e9b15dc0654b
--- /dev/null
+++ b/drivers/pinctrl/stm32/pinctrl-stm32f429.c
@@ -0,0 +1,1591 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-stm32.h"
+
+static const struct stm32_desc_pin stm32f429_pins[] = {
+ STM32_PIN(
+ PINCTRL_PIN(0, "PA0"),
+ STM32_FUNCTION(0, "GPIOA0"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(8, "USART2_CTS"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(1, "PA1"),
+ STM32_FUNCTION(0, "GPIOA1"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(12, "ETH_MII_RX_CLK ETH_RMII_REF_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(2, "PA2"),
+ STM32_FUNCTION(0, "GPIOA2"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(4, "TIM9_CH1"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(12, "ETH_MDIO"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(3, "PA3"),
+ STM32_FUNCTION(0, "GPIOA3"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(4, "TIM9_CH2"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D0"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(4, "PA4"),
+ STM32_FUNCTION(0, "GPIOA4"),
+ STM32_FUNCTION(6, "SPI1_NSS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(13, "OTG_HS_SOF"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(5, "PA5"),
+ STM32_FUNCTION(0, "GPIOA5"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_SCK"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_CK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(6, "PA6"),
+ STM32_FUNCTION(0, "GPIOA6"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(6, "SPI1_MISO"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(14, "DCMI_PIXCLK"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(7, "PA7"),
+ STM32_FUNCTION(0, "GPIOA7"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(6, "SPI1_MOSI"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(12, "ETH_MII_RX_DV ETH_RMII_CRS_DV"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(8, "PA8"),
+ STM32_FUNCTION(0, "GPIOA8"),
+ STM32_FUNCTION(1, "MCO1"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(8, "USART1_CK"),
+ STM32_FUNCTION(11, "OTG_FS_SOF"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(9, "PA9"),
+ STM32_FUNCTION(0, "GPIOA9"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(10, "PA10"),
+ STM32_FUNCTION(0, "GPIOA10"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(11, "OTG_FS_ID"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(11, "PA11"),
+ STM32_FUNCTION(0, "GPIOA11"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(8, "USART1_CTS"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(11, "OTG_FS_DM"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(12, "PA12"),
+ STM32_FUNCTION(0, "GPIOA12"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(8, "USART1_RTS"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(11, "OTG_FS_DP"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(13, "PA13"),
+ STM32_FUNCTION(0, "GPIOA13"),
+ STM32_FUNCTION(1, "JTMS SWDIO"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(14, "PA14"),
+ STM32_FUNCTION(0, "GPIOA14"),
+ STM32_FUNCTION(1, "JTCK SWCLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(15, "PA15"),
+ STM32_FUNCTION(0, "GPIOA15"),
+ STM32_FUNCTION(1, "JTDI"),
+ STM32_FUNCTION(2, "TIM2_CH1 TIM2_ETR"),
+ STM32_FUNCTION(6, "SPI1_NSS"),
+ STM32_FUNCTION(7, "SPI3_NSS I2S3_WS"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(16, "PB0"),
+ STM32_FUNCTION(0, "GPIOB0"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(10, "LCD_R3"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D1"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(17, "PB1"),
+ STM32_FUNCTION(0, "GPIOB1"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(10, "LCD_R6"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D2"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(18, "PB2"),
+ STM32_FUNCTION(0, "GPIOB2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(19, "PB3"),
+ STM32_FUNCTION(0, "GPIOB3"),
+ STM32_FUNCTION(1, "JTDO TRACESWO"),
+ STM32_FUNCTION(2, "TIM2_CH2"),
+ STM32_FUNCTION(6, "SPI1_SCK"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(20, "PB4"),
+ STM32_FUNCTION(0, "GPIOB4"),
+ STM32_FUNCTION(1, "NJTRST"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(6, "SPI1_MISO"),
+ STM32_FUNCTION(7, "SPI3_MISO"),
+ STM32_FUNCTION(8, "I2S3EXT_SD"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(21, "PB5"),
+ STM32_FUNCTION(0, "GPIOB5"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(5, "I2C1_SMBA"),
+ STM32_FUNCTION(6, "SPI1_MOSI"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D7"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(22, "PB6"),
+ STM32_FUNCTION(0, "GPIOB6"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(8, "USART1_TX"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(23, "PB7"),
+ STM32_FUNCTION(0, "GPIOB7"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(8, "USART1_RX"),
+ STM32_FUNCTION(13, "FMC_NL"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(24, "PB8"),
+ STM32_FUNCTION(0, "GPIOB8"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(4, "TIM10_CH1"),
+ STM32_FUNCTION(5, "I2C1_SCL"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "SDIO_D4"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(25, "PB9"),
+ STM32_FUNCTION(0, "GPIOB9"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(4, "TIM11_CH1"),
+ STM32_FUNCTION(5, "I2C1_SDA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "SDIO_D5"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(26, "PB10"),
+ STM32_FUNCTION(0, "GPIOB10"),
+ STM32_FUNCTION(2, "TIM2_CH3"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D3"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(27, "PB11"),
+ STM32_FUNCTION(0, "GPIOB11"),
+ STM32_FUNCTION(2, "TIM2_CH4"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D4"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(28, "PB12"),
+ STM32_FUNCTION(0, "GPIOB12"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(10, "CAN2_RX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D5"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "OTG_HS_ID"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(29, "PB13"),
+ STM32_FUNCTION(0, "GPIOB13"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART3_CTS"),
+ STM32_FUNCTION(10, "CAN2_TX"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_D6"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(30, "PB14"),
+ STM32_FUNCTION(0, "GPIOB14"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(7, "I2S2EXT_SD"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(10, "TIM12_CH1"),
+ STM32_FUNCTION(13, "OTG_HS_DM"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(31, "PB15"),
+ STM32_FUNCTION(0, "GPIOB15"),
+ STM32_FUNCTION(1, "RTC_REFIN"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(10, "TIM12_CH2"),
+ STM32_FUNCTION(13, "OTG_HS_DP"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(32, "PC0"),
+ STM32_FUNCTION(0, "GPIOC0"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_STP"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(33, "PC1"),
+ STM32_FUNCTION(0, "GPIOC1"),
+ STM32_FUNCTION(12, "ETH_MDC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(34, "PC2"),
+ STM32_FUNCTION(0, "GPIOC2"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(7, "I2S2EXT_SD"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(12, "ETH_MII_TXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(35, "PC3"),
+ STM32_FUNCTION(0, "GPIOC3"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(12, "ETH_MII_TX_CLK"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(36, "PC4"),
+ STM32_FUNCTION(0, "GPIOC4"),
+ STM32_FUNCTION(12, "ETH_MII_RXD0 ETH_RMII_RXD0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(37, "PC5"),
+ STM32_FUNCTION(0, "GPIOC5"),
+ STM32_FUNCTION(12, "ETH_MII_RXD1 ETH_RMII_RXD1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(38, "PC6"),
+ STM32_FUNCTION(0, "GPIOC6"),
+ STM32_FUNCTION(3, "TIM3_CH1"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(6, "I2S2_MCK"),
+ STM32_FUNCTION(9, "USART6_TX"),
+ STM32_FUNCTION(13, "SDIO_D6"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(39, "PC7"),
+ STM32_FUNCTION(0, "GPIOC7"),
+ STM32_FUNCTION(3, "TIM3_CH2"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(7, "I2S3_MCK"),
+ STM32_FUNCTION(9, "USART6_RX"),
+ STM32_FUNCTION(13, "SDIO_D7"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(40, "PC8"),
+ STM32_FUNCTION(0, "GPIOC8"),
+ STM32_FUNCTION(3, "TIM3_CH3"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(9, "USART6_CK"),
+ STM32_FUNCTION(13, "SDIO_D0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(41, "PC9"),
+ STM32_FUNCTION(0, "GPIOC9"),
+ STM32_FUNCTION(1, "MCO2"),
+ STM32_FUNCTION(3, "TIM3_CH4"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(6, "I2S_CKIN"),
+ STM32_FUNCTION(13, "SDIO_D1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(42, "PC10"),
+ STM32_FUNCTION(0, "GPIOC10"),
+ STM32_FUNCTION(7, "SPI3_SCK I2S3_CK"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(9, "UART4_TX"),
+ STM32_FUNCTION(13, "SDIO_D2"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(43, "PC11"),
+ STM32_FUNCTION(0, "GPIOC11"),
+ STM32_FUNCTION(6, "I2S3EXT_SD"),
+ STM32_FUNCTION(7, "SPI3_MISO"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(9, "UART4_RX"),
+ STM32_FUNCTION(13, "SDIO_D3"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(44, "PC12"),
+ STM32_FUNCTION(0, "GPIOC12"),
+ STM32_FUNCTION(7, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(9, "UART5_TX"),
+ STM32_FUNCTION(13, "SDIO_CK"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(45, "PC13"),
+ STM32_FUNCTION(0, "GPIOC13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(46, "PC14"),
+ STM32_FUNCTION(0, "GPIOC14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(47, "PC15"),
+ STM32_FUNCTION(0, "GPIOC15"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(48, "PD0"),
+ STM32_FUNCTION(0, "GPIOD0"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(49, "PD1"),
+ STM32_FUNCTION(0, "GPIOD1"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(50, "PD2"),
+ STM32_FUNCTION(0, "GPIOD2"),
+ STM32_FUNCTION(3, "TIM3_ETR"),
+ STM32_FUNCTION(9, "UART5_RX"),
+ STM32_FUNCTION(13, "SDIO_CMD"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(51, "PD3"),
+ STM32_FUNCTION(0, "GPIOD3"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(8, "USART2_CTS"),
+ STM32_FUNCTION(13, "FMC_CLK"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(52, "PD4"),
+ STM32_FUNCTION(0, "GPIOD4"),
+ STM32_FUNCTION(8, "USART2_RTS"),
+ STM32_FUNCTION(13, "FMC_NOE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(53, "PD5"),
+ STM32_FUNCTION(0, "GPIOD5"),
+ STM32_FUNCTION(8, "USART2_TX"),
+ STM32_FUNCTION(13, "FMC_NWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(54, "PD6"),
+ STM32_FUNCTION(0, "GPIOD6"),
+ STM32_FUNCTION(6, "SPI3_MOSI I2S3_SD"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(8, "USART2_RX"),
+ STM32_FUNCTION(13, "FMC_NWAIT"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(55, "PD7"),
+ STM32_FUNCTION(0, "GPIOD7"),
+ STM32_FUNCTION(8, "USART2_CK"),
+ STM32_FUNCTION(13, "FMC_NE1 FMC_NCE2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(56, "PD8"),
+ STM32_FUNCTION(0, "GPIOD8"),
+ STM32_FUNCTION(8, "USART3_TX"),
+ STM32_FUNCTION(13, "FMC_D13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(57, "PD9"),
+ STM32_FUNCTION(0, "GPIOD9"),
+ STM32_FUNCTION(8, "USART3_RX"),
+ STM32_FUNCTION(13, "FMC_D14"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(58, "PD10"),
+ STM32_FUNCTION(0, "GPIOD10"),
+ STM32_FUNCTION(8, "USART3_CK"),
+ STM32_FUNCTION(13, "FMC_D15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(59, "PD11"),
+ STM32_FUNCTION(0, "GPIOD11"),
+ STM32_FUNCTION(8, "USART3_CTS"),
+ STM32_FUNCTION(13, "FMC_A16"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(60, "PD12"),
+ STM32_FUNCTION(0, "GPIOD12"),
+ STM32_FUNCTION(3, "TIM4_CH1"),
+ STM32_FUNCTION(8, "USART3_RTS"),
+ STM32_FUNCTION(13, "FMC_A17"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(61, "PD13"),
+ STM32_FUNCTION(0, "GPIOD13"),
+ STM32_FUNCTION(3, "TIM4_CH2"),
+ STM32_FUNCTION(13, "FMC_A18"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(62, "PD14"),
+ STM32_FUNCTION(0, "GPIOD14"),
+ STM32_FUNCTION(3, "TIM4_CH3"),
+ STM32_FUNCTION(13, "FMC_D0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(63, "PD15"),
+ STM32_FUNCTION(0, "GPIOD15"),
+ STM32_FUNCTION(3, "TIM4_CH4"),
+ STM32_FUNCTION(13, "FMC_D1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(64, "PE0"),
+ STM32_FUNCTION(0, "GPIOE0"),
+ STM32_FUNCTION(3, "TIM4_ETR"),
+ STM32_FUNCTION(9, "UART8_RX"),
+ STM32_FUNCTION(13, "FMC_NBL0"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(65, "PE1"),
+ STM32_FUNCTION(0, "GPIOE1"),
+ STM32_FUNCTION(9, "UART8_TX"),
+ STM32_FUNCTION(13, "FMC_NBL1"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(66, "PE2"),
+ STM32_FUNCTION(0, "GPIOE2"),
+ STM32_FUNCTION(1, "TRACECLK"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_A"),
+ STM32_FUNCTION(12, "ETH_MII_TXD3"),
+ STM32_FUNCTION(13, "FMC_A23"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(67, "PE3"),
+ STM32_FUNCTION(0, "GPIOE3"),
+ STM32_FUNCTION(1, "TRACED0"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(13, "FMC_A19"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(68, "PE4"),
+ STM32_FUNCTION(0, "GPIOE4"),
+ STM32_FUNCTION(1, "TRACED1"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(7, "SAI1_FS_A"),
+ STM32_FUNCTION(13, "FMC_A20"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(69, "PE5"),
+ STM32_FUNCTION(0, "GPIOE5"),
+ STM32_FUNCTION(1, "TRACED2"),
+ STM32_FUNCTION(4, "TIM9_CH1"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_A"),
+ STM32_FUNCTION(13, "FMC_A21"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(70, "PE6"),
+ STM32_FUNCTION(0, "GPIOE6"),
+ STM32_FUNCTION(1, "TRACED3"),
+ STM32_FUNCTION(4, "TIM9_CH2"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(7, "SAI1_SD_A"),
+ STM32_FUNCTION(13, "FMC_A22"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(71, "PE7"),
+ STM32_FUNCTION(0, "GPIOE7"),
+ STM32_FUNCTION(2, "TIM1_ETR"),
+ STM32_FUNCTION(9, "UART7_RX"),
+ STM32_FUNCTION(13, "FMC_D4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(72, "PE8"),
+ STM32_FUNCTION(0, "GPIOE8"),
+ STM32_FUNCTION(2, "TIM1_CH1N"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(13, "FMC_D5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(73, "PE9"),
+ STM32_FUNCTION(0, "GPIOE9"),
+ STM32_FUNCTION(2, "TIM1_CH1"),
+ STM32_FUNCTION(13, "FMC_D6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(74, "PE10"),
+ STM32_FUNCTION(0, "GPIOE10"),
+ STM32_FUNCTION(2, "TIM1_CH2N"),
+ STM32_FUNCTION(13, "FMC_D7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(75, "PE11"),
+ STM32_FUNCTION(0, "GPIOE11"),
+ STM32_FUNCTION(2, "TIM1_CH2"),
+ STM32_FUNCTION(6, "SPI4_NSS"),
+ STM32_FUNCTION(13, "FMC_D8"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(76, "PE12"),
+ STM32_FUNCTION(0, "GPIOE12"),
+ STM32_FUNCTION(2, "TIM1_CH3N"),
+ STM32_FUNCTION(6, "SPI4_SCK"),
+ STM32_FUNCTION(13, "FMC_D9"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(77, "PE13"),
+ STM32_FUNCTION(0, "GPIOE13"),
+ STM32_FUNCTION(2, "TIM1_CH3"),
+ STM32_FUNCTION(6, "SPI4_MISO"),
+ STM32_FUNCTION(13, "FMC_D10"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(78, "PE14"),
+ STM32_FUNCTION(0, "GPIOE14"),
+ STM32_FUNCTION(2, "TIM1_CH4"),
+ STM32_FUNCTION(6, "SPI4_MOSI"),
+ STM32_FUNCTION(13, "FMC_D11"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(79, "PE15"),
+ STM32_FUNCTION(0, "GPIOE15"),
+ STM32_FUNCTION(2, "TIM1_BKIN"),
+ STM32_FUNCTION(13, "FMC_D12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(80, "PF0"),
+ STM32_FUNCTION(0, "GPIOF0"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(13, "FMC_A0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(81, "PF1"),
+ STM32_FUNCTION(0, "GPIOF1"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(13, "FMC_A1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(82, "PF2"),
+ STM32_FUNCTION(0, "GPIOF2"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(13, "FMC_A2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(83, "PF3"),
+ STM32_FUNCTION(0, "GPIOF3"),
+ STM32_FUNCTION(13, "FMC_A3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(84, "PF4"),
+ STM32_FUNCTION(0, "GPIOF4"),
+ STM32_FUNCTION(13, "FMC_A4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(85, "PF5"),
+ STM32_FUNCTION(0, "GPIOF5"),
+ STM32_FUNCTION(13, "FMC_A5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(86, "PF6"),
+ STM32_FUNCTION(0, "GPIOF6"),
+ STM32_FUNCTION(4, "TIM10_CH1"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(7, "SAI1_SD_B"),
+ STM32_FUNCTION(9, "UART7_RX"),
+ STM32_FUNCTION(13, "FMC_NIORD"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(87, "PF7"),
+ STM32_FUNCTION(0, "GPIOF7"),
+ STM32_FUNCTION(4, "TIM11_CH1"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(7, "SAI1_MCLK_B"),
+ STM32_FUNCTION(9, "UART7_TX"),
+ STM32_FUNCTION(13, "FMC_NREG"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(88, "PF8"),
+ STM32_FUNCTION(0, "GPIOF8"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(7, "SAI1_SCK_B"),
+ STM32_FUNCTION(10, "TIM13_CH1"),
+ STM32_FUNCTION(13, "FMC_NIOWR"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(89, "PF9"),
+ STM32_FUNCTION(0, "GPIOF9"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(7, "SAI1_FS_B"),
+ STM32_FUNCTION(10, "TIM14_CH1"),
+ STM32_FUNCTION(13, "FMC_CD"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(90, "PF10"),
+ STM32_FUNCTION(0, "GPIOF10"),
+ STM32_FUNCTION(13, "FMC_INTR"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(91, "PF11"),
+ STM32_FUNCTION(0, "GPIOF11"),
+ STM32_FUNCTION(6, "SPI5_MOSI"),
+ STM32_FUNCTION(13, "FMC_SDNRAS"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(92, "PF12"),
+ STM32_FUNCTION(0, "GPIOF12"),
+ STM32_FUNCTION(13, "FMC_A6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(93, "PF13"),
+ STM32_FUNCTION(0, "GPIOF13"),
+ STM32_FUNCTION(13, "FMC_A7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(94, "PF14"),
+ STM32_FUNCTION(0, "GPIOF14"),
+ STM32_FUNCTION(13, "FMC_A8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(95, "PF15"),
+ STM32_FUNCTION(0, "GPIOF15"),
+ STM32_FUNCTION(13, "FMC_A9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(96, "PG0"),
+ STM32_FUNCTION(0, "GPIOG0"),
+ STM32_FUNCTION(13, "FMC_A10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(97, "PG1"),
+ STM32_FUNCTION(0, "GPIOG1"),
+ STM32_FUNCTION(13, "FMC_A11"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(98, "PG2"),
+ STM32_FUNCTION(0, "GPIOG2"),
+ STM32_FUNCTION(13, "FMC_A12"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(99, "PG3"),
+ STM32_FUNCTION(0, "GPIOG3"),
+ STM32_FUNCTION(13, "FMC_A13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(100, "PG4"),
+ STM32_FUNCTION(0, "GPIOG4"),
+ STM32_FUNCTION(13, "FMC_A14 FMC_BA0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(101, "PG5"),
+ STM32_FUNCTION(0, "GPIOG5"),
+ STM32_FUNCTION(13, "FMC_A15 FMC_BA1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(102, "PG6"),
+ STM32_FUNCTION(0, "GPIOG6"),
+ STM32_FUNCTION(13, "FMC_INT2"),
+ STM32_FUNCTION(14, "DCMI_D12"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(103, "PG7"),
+ STM32_FUNCTION(0, "GPIOG7"),
+ STM32_FUNCTION(9, "USART6_CK"),
+ STM32_FUNCTION(13, "FMC_INT3"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(104, "PG8"),
+ STM32_FUNCTION(0, "GPIOG8"),
+ STM32_FUNCTION(6, "SPI6_NSS"),
+ STM32_FUNCTION(9, "USART6_RTS"),
+ STM32_FUNCTION(12, "ETH_PPS_OUT"),
+ STM32_FUNCTION(13, "FMC_SDCLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(105, "PG9"),
+ STM32_FUNCTION(0, "GPIOG9"),
+ STM32_FUNCTION(9, "USART6_RX"),
+ STM32_FUNCTION(13, "FMC_NE2 FMC_NCE3"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(106, "PG10"),
+ STM32_FUNCTION(0, "GPIOG10"),
+ STM32_FUNCTION(10, "LCD_G3"),
+ STM32_FUNCTION(13, "FMC_NCE4_1 FMC_NE3"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(107, "PG11"),
+ STM32_FUNCTION(0, "GPIOG11"),
+ STM32_FUNCTION(12, "ETH_MII_TX_EN ETH_RMII_TX_EN"),
+ STM32_FUNCTION(13, "FMC_NCE4_2"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(108, "PG12"),
+ STM32_FUNCTION(0, "GPIOG12"),
+ STM32_FUNCTION(6, "SPI6_MISO"),
+ STM32_FUNCTION(9, "USART6_RTS"),
+ STM32_FUNCTION(10, "LCD_B4"),
+ STM32_FUNCTION(13, "FMC_NE4"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(109, "PG13"),
+ STM32_FUNCTION(0, "GPIOG13"),
+ STM32_FUNCTION(6, "SPI6_SCK"),
+ STM32_FUNCTION(9, "USART6_CTS"),
+ STM32_FUNCTION(12, "ETH_MII_TXD0 ETH_RMII_TXD0"),
+ STM32_FUNCTION(13, "FMC_A24"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(110, "PG14"),
+ STM32_FUNCTION(0, "GPIOG14"),
+ STM32_FUNCTION(6, "SPI6_MOSI"),
+ STM32_FUNCTION(9, "USART6_TX"),
+ STM32_FUNCTION(12, "ETH_MII_TXD1 ETH_RMII_TXD1"),
+ STM32_FUNCTION(13, "FMC_A25"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(111, "PG15"),
+ STM32_FUNCTION(0, "GPIOG15"),
+ STM32_FUNCTION(9, "USART6_CTS"),
+ STM32_FUNCTION(13, "FMC_SDNCAS"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(112, "PH0"),
+ STM32_FUNCTION(0, "GPIOH0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(113, "PH1"),
+ STM32_FUNCTION(0, "GPIOH1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(114, "PH2"),
+ STM32_FUNCTION(0, "GPIOH2"),
+ STM32_FUNCTION(12, "ETH_MII_CRS"),
+ STM32_FUNCTION(13, "FMC_SDCKE0"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(115, "PH3"),
+ STM32_FUNCTION(0, "GPIOH3"),
+ STM32_FUNCTION(12, "ETH_MII_COL"),
+ STM32_FUNCTION(13, "FMC_SDNE0"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(116, "PH4"),
+ STM32_FUNCTION(0, "GPIOH4"),
+ STM32_FUNCTION(5, "I2C2_SCL"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_NXT"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(117, "PH5"),
+ STM32_FUNCTION(0, "GPIOH5"),
+ STM32_FUNCTION(5, "I2C2_SDA"),
+ STM32_FUNCTION(6, "SPI5_NSS"),
+ STM32_FUNCTION(13, "FMC_SDNWE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(118, "PH6"),
+ STM32_FUNCTION(0, "GPIOH6"),
+ STM32_FUNCTION(5, "I2C2_SMBA"),
+ STM32_FUNCTION(6, "SPI5_SCK"),
+ STM32_FUNCTION(10, "TIM12_CH1"),
+ STM32_FUNCTION(12, "ETH_MII_RXD2"),
+ STM32_FUNCTION(13, "FMC_SDNE1"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(119, "PH7"),
+ STM32_FUNCTION(0, "GPIOH7"),
+ STM32_FUNCTION(5, "I2C3_SCL"),
+ STM32_FUNCTION(6, "SPI5_MISO"),
+ STM32_FUNCTION(12, "ETH_MII_RXD3"),
+ STM32_FUNCTION(13, "FMC_SDCKE1"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(120, "PH8"),
+ STM32_FUNCTION(0, "GPIOH8"),
+ STM32_FUNCTION(5, "I2C3_SDA"),
+ STM32_FUNCTION(13, "FMC_D16"),
+ STM32_FUNCTION(14, "DCMI_HSYNC"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(121, "PH9"),
+ STM32_FUNCTION(0, "GPIOH9"),
+ STM32_FUNCTION(5, "I2C3_SMBA"),
+ STM32_FUNCTION(10, "TIM12_CH2"),
+ STM32_FUNCTION(13, "FMC_D17"),
+ STM32_FUNCTION(14, "DCMI_D0"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(122, "PH10"),
+ STM32_FUNCTION(0, "GPIOH10"),
+ STM32_FUNCTION(3, "TIM5_CH1"),
+ STM32_FUNCTION(13, "FMC_D18"),
+ STM32_FUNCTION(14, "DCMI_D1"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(123, "PH11"),
+ STM32_FUNCTION(0, "GPIOH11"),
+ STM32_FUNCTION(3, "TIM5_CH2"),
+ STM32_FUNCTION(13, "FMC_D19"),
+ STM32_FUNCTION(14, "DCMI_D2"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(124, "PH12"),
+ STM32_FUNCTION(0, "GPIOH12"),
+ STM32_FUNCTION(3, "TIM5_CH3"),
+ STM32_FUNCTION(13, "FMC_D20"),
+ STM32_FUNCTION(14, "DCMI_D3"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(125, "PH13"),
+ STM32_FUNCTION(0, "GPIOH13"),
+ STM32_FUNCTION(4, "TIM8_CH1N"),
+ STM32_FUNCTION(10, "CAN1_TX"),
+ STM32_FUNCTION(13, "FMC_D21"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(126, "PH14"),
+ STM32_FUNCTION(0, "GPIOH14"),
+ STM32_FUNCTION(4, "TIM8_CH2N"),
+ STM32_FUNCTION(13, "FMC_D22"),
+ STM32_FUNCTION(14, "DCMI_D4"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(127, "PH15"),
+ STM32_FUNCTION(0, "GPIOH15"),
+ STM32_FUNCTION(4, "TIM8_CH3N"),
+ STM32_FUNCTION(13, "FMC_D23"),
+ STM32_FUNCTION(14, "DCMI_D11"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(128, "PI0"),
+ STM32_FUNCTION(0, "GPIOI0"),
+ STM32_FUNCTION(3, "TIM5_CH4"),
+ STM32_FUNCTION(6, "SPI2_NSS I2S2_WS"),
+ STM32_FUNCTION(13, "FMC_D24"),
+ STM32_FUNCTION(14, "DCMI_D13"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(129, "PI1"),
+ STM32_FUNCTION(0, "GPIOI1"),
+ STM32_FUNCTION(6, "SPI2_SCK I2S2_CK"),
+ STM32_FUNCTION(13, "FMC_D25"),
+ STM32_FUNCTION(14, "DCMI_D8"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(130, "PI2"),
+ STM32_FUNCTION(0, "GPIOI2"),
+ STM32_FUNCTION(4, "TIM8_CH4"),
+ STM32_FUNCTION(6, "SPI2_MISO"),
+ STM32_FUNCTION(7, "I2S2EXT_SD"),
+ STM32_FUNCTION(13, "FMC_D26"),
+ STM32_FUNCTION(14, "DCMI_D9"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(131, "PI3"),
+ STM32_FUNCTION(0, "GPIOI3"),
+ STM32_FUNCTION(4, "TIM8_ETR"),
+ STM32_FUNCTION(6, "SPI2_MOSI I2S2_SD"),
+ STM32_FUNCTION(13, "FMC_D27"),
+ STM32_FUNCTION(14, "DCMI_D10"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(132, "PI4"),
+ STM32_FUNCTION(0, "GPIOI4"),
+ STM32_FUNCTION(4, "TIM8_BKIN"),
+ STM32_FUNCTION(13, "FMC_NBL2"),
+ STM32_FUNCTION(14, "DCMI_D5"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(133, "PI5"),
+ STM32_FUNCTION(0, "GPIOI5"),
+ STM32_FUNCTION(4, "TIM8_CH1"),
+ STM32_FUNCTION(13, "FMC_NBL3"),
+ STM32_FUNCTION(14, "DCMI_VSYNC"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(134, "PI6"),
+ STM32_FUNCTION(0, "GPIOI6"),
+ STM32_FUNCTION(4, "TIM8_CH2"),
+ STM32_FUNCTION(13, "FMC_D28"),
+ STM32_FUNCTION(14, "DCMI_D6"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(135, "PI7"),
+ STM32_FUNCTION(0, "GPIOI7"),
+ STM32_FUNCTION(4, "TIM8_CH3"),
+ STM32_FUNCTION(13, "FMC_D29"),
+ STM32_FUNCTION(14, "DCMI_D7"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(136, "PI8"),
+ STM32_FUNCTION(0, "GPIOI8"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(137, "PI9"),
+ STM32_FUNCTION(0, "GPIOI9"),
+ STM32_FUNCTION(10, "CAN1_RX"),
+ STM32_FUNCTION(13, "FMC_D30"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(138, "PI10"),
+ STM32_FUNCTION(0, "GPIOI10"),
+ STM32_FUNCTION(12, "ETH_MII_RX_ER"),
+ STM32_FUNCTION(13, "FMC_D31"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(139, "PI11"),
+ STM32_FUNCTION(0, "GPIOI11"),
+ STM32_FUNCTION(11, "OTG_HS_ULPI_DIR"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(140, "PI12"),
+ STM32_FUNCTION(0, "GPIOI12"),
+ STM32_FUNCTION(15, "LCD_HSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(141, "PI13"),
+ STM32_FUNCTION(0, "GPIOI13"),
+ STM32_FUNCTION(15, "LCD_VSYNC"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(142, "PI14"),
+ STM32_FUNCTION(0, "GPIOI14"),
+ STM32_FUNCTION(15, "LCD_CLK"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(143, "PI15"),
+ STM32_FUNCTION(0, "GPIOI15"),
+ STM32_FUNCTION(15, "LCD_R0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(144, "PJ0"),
+ STM32_FUNCTION(0, "GPIOJ0"),
+ STM32_FUNCTION(15, "LCD_R1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(145, "PJ1"),
+ STM32_FUNCTION(0, "GPIOJ1"),
+ STM32_FUNCTION(15, "LCD_R2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(146, "PJ2"),
+ STM32_FUNCTION(0, "GPIOJ2"),
+ STM32_FUNCTION(15, "LCD_R3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(147, "PJ3"),
+ STM32_FUNCTION(0, "GPIOJ3"),
+ STM32_FUNCTION(15, "LCD_R4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(148, "PJ4"),
+ STM32_FUNCTION(0, "GPIOJ4"),
+ STM32_FUNCTION(15, "LCD_R5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(149, "PJ5"),
+ STM32_FUNCTION(0, "GPIOJ5"),
+ STM32_FUNCTION(15, "LCD_R6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(150, "PJ6"),
+ STM32_FUNCTION(0, "GPIOJ6"),
+ STM32_FUNCTION(15, "LCD_R7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(151, "PJ7"),
+ STM32_FUNCTION(0, "GPIOJ7"),
+ STM32_FUNCTION(15, "LCD_G0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(152, "PJ8"),
+ STM32_FUNCTION(0, "GPIOJ8"),
+ STM32_FUNCTION(15, "LCD_G1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(153, "PJ9"),
+ STM32_FUNCTION(0, "GPIOJ9"),
+ STM32_FUNCTION(15, "LCD_G2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(154, "PJ10"),
+ STM32_FUNCTION(0, "GPIOJ10"),
+ STM32_FUNCTION(15, "LCD_G3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(155, "PJ11"),
+ STM32_FUNCTION(0, "GPIOJ11"),
+ STM32_FUNCTION(15, "LCD_G4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(156, "PJ12"),
+ STM32_FUNCTION(0, "GPIOJ12"),
+ STM32_FUNCTION(15, "LCD_B0"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(157, "PJ13"),
+ STM32_FUNCTION(0, "GPIOJ13"),
+ STM32_FUNCTION(15, "LCD_B1"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(158, "PJ14"),
+ STM32_FUNCTION(0, "GPIOJ14"),
+ STM32_FUNCTION(15, "LCD_B2"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(159, "PJ15"),
+ STM32_FUNCTION(0, "GPIOJ15"),
+ STM32_FUNCTION(15, "LCD_B3"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(160, "PK0"),
+ STM32_FUNCTION(0, "GPIOK0"),
+ STM32_FUNCTION(15, "LCD_G5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(161, "PK1"),
+ STM32_FUNCTION(0, "GPIOK1"),
+ STM32_FUNCTION(15, "LCD_G6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(162, "PK2"),
+ STM32_FUNCTION(0, "GPIOK2"),
+ STM32_FUNCTION(15, "LCD_G7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(163, "PK3"),
+ STM32_FUNCTION(0, "GPIOK3"),
+ STM32_FUNCTION(15, "LCD_B4"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(164, "PK4"),
+ STM32_FUNCTION(0, "GPIOK4"),
+ STM32_FUNCTION(15, "LCD_B5"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(165, "PK5"),
+ STM32_FUNCTION(0, "GPIOK5"),
+ STM32_FUNCTION(15, "LCD_B6"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(166, "PK6"),
+ STM32_FUNCTION(0, "GPIOK6"),
+ STM32_FUNCTION(15, "LCD_B7"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+ STM32_PIN(
+ PINCTRL_PIN(167, "PK7"),
+ STM32_FUNCTION(0, "GPIOK7"),
+ STM32_FUNCTION(15, "LCD_DE"),
+ STM32_FUNCTION(16, "EVENTOUT"),
+ STM32_FUNCTION(17, "ANALOG")
+ ),
+};
+
+static struct stm32_pinctrl_match_data stm32f429_match_data = {
+ .pins = stm32f429_pins,
+ .npins = ARRAY_SIZE(stm32f429_pins),
+};
+
+static const struct of_device_id stm32f429_pctrl_match[] = {
+ {
+ .compatible = "st,stm32f429-pinctrl",
+ .data = &stm32f429_match_data,
+ },
+ { }
+};
+
+static struct platform_driver stm32f429_pinctrl_driver = {
+ .probe = stm32_pctl_probe,
+ .driver = {
+ .name = "stm32f429-pinctrl",
+ .of_match_table = stm32f429_pctrl_match,
+ },
+};
+
+static int __init stm32f429_pinctrl_init(void)
+{
+ return platform_driver_register(&stm32f429_pinctrl_driver);
+}
+device_initcall(stm32f429_pinctrl_init);
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index f8dbc8bec0e1..aaf075b972f5 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -1,67 +1,75 @@
if ARCH_SUNXI
-config PINCTRL_SUNXI_COMMON
+config PINCTRL_SUNXI
bool
select PINMUX
select GENERIC_PINCONF
config PINCTRL_SUN4I_A10
def_bool MACH_SUN4I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN5I_A10S
def_bool MACH_SUN5I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN5I_A13
def_bool MACH_SUN5I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN6I_A31
def_bool MACH_SUN6I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN6I_A31S
def_bool MACH_SUN6I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN6I_A31_R
def_bool MACH_SUN6I
depends on RESET_CONTROLLER
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN7I_A20
def_bool MACH_SUN7I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23
def_bool MACH_SUN8I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN8I_A33
def_bool MACH_SUN8I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN8I_A83T
def_bool MACH_SUN8I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN8I_A23_R
def_bool MACH_SUN8I
depends on RESET_CONTROLLER
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN8I_H3
def_bool MACH_SUN8I
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN8I_H3_R
+ def_bool MACH_SUN8I
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN9I_A80
def_bool MACH_SUN9I
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
config PINCTRL_SUN9I_A80_R
def_bool MACH_SUN9I
depends on RESET_CONTROLLER
- select PINCTRL_SUNXI_COMMON
+ select PINCTRL_SUNXI
+
+config PINCTRL_SUN50I_A64
+ bool
+ select PINCTRL_SUNXI
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index ef82f22bb9ef..2d8b64e222e0 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -1,5 +1,5 @@
# Core
-obj-$(CONFIG_PINCTRL_SUNXI_COMMON) += pinctrl-sunxi.o
+obj-y += pinctrl-sunxi.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
@@ -12,7 +12,9 @@ obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o
+obj-$(CONFIG_PINCTRL_SUN50I_A64) += pinctrl-sun50i-a64.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
+obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN9I_A80) += pinctrl-sun9i-a80.o
obj-$(CONFIG_PINCTRL_SUN9I_A80_R) += pinctrl-sun9i-a80-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
new file mode 100644
index 000000000000..4f2a726bbaeb
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a64.c
@@ -0,0 +1,601 @@
+/*
+ * Allwinner A64 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 - ARM Ltd.
+ * Author: Andre Przywara <andre.przywara@arm.com>
+ *
+ * Based on pinctrl-sun7i-a20.c, which is:
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin a64_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x4, "jtag"), /* MS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x4, "jtag"), /* CK0 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VCCEN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x4, "jtag"), /* DO0 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VPPEN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x4, "jtag"), /* DI0 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VPPPP */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif2"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* SYNC */
+ SUNXI_FUNCTION(0x5, "sim"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif2"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x5, "sim"), /* DATA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif2"), /* DOUT */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DOUT */
+ SUNXI_FUNCTION(0x5, "sim"), /* RST */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif2"), /* DIN */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DIN */
+ SUNXI_FUNCTION(0x5, "sim"), /* DET */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart0"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "uart0"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* EINT9 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
+ SUNXI_FUNCTION(0x4, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* DS */
+ SUNXI_FUNCTION(0x4, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
+ SUNXI_FUNCTION(0x4, "spi0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE1 */
+ SUNXI_FUNCTION(0x4, "spi0")), /* CS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRE# */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NRB1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQS */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CS */
+ SUNXI_FUNCTION(0x5, "ccir")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x5, "ccir")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "ccir")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x5, "ccir")), /* VSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* RTS */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "uart4"), /* CTS */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x4, "emac"), /* ERXD3 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x4, "emac"), /* ERXD2 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ERXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ERXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP0 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ERXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN0 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ERXCTL */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP1 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ENULL */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN1 */
+ SUNXI_FUNCTION(0x4, "emac"), /* ETXD3 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP2 */
+ SUNXI_FUNCTION(0x4, "emac"), /* ETXD2 */
+ SUNXI_FUNCTION(0x5, "ccir")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN2 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ETXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VPC */
+ SUNXI_FUNCTION(0x4, "emac")), /* ETXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VNC */
+ SUNXI_FUNCTION(0x4, "emac")), /* ETXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VP3 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ETXCTL */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "lvds0"), /* VN3 */
+ SUNXI_FUNCTION(0x4, "emac")), /* ECLKIN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm"), /* PWM0 */
+ SUNXI_FUNCTION(0x4, "emac")), /* EMDC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x4, "emac")), /* EMDIO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* PCK */
+ SUNXI_FUNCTION(0x4, "ts0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* CK */
+ SUNXI_FUNCTION(0x4, "ts0")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* HSYNC */
+ SUNXI_FUNCTION(0x4, "ts0")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* VSYNC */
+ SUNXI_FUNCTION(0x4, "ts0")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D4 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D5 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D6 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0"), /* D7 */
+ SUNXI_FUNCTION(0x4, "ts0")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pll"), /* LOCK_DBG */
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* MSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* CK1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 8)), /* EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 9)), /* EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif3"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 10)), /* EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif3"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 11)), /* EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif3"), /* DOUT */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 12)), /* EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "aif3"), /* DIN */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 13)), /* EINT13 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spdif"), /* OUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mic"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mic"), /* DATA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc a64_pinctrl_data = {
+ .pins = a64_pins,
+ .npins = ARRAY_SIZE(a64_pins),
+ .irq_banks = 3,
+};
+
+static int a64_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &a64_pinctrl_data);
+}
+
+static const struct of_device_id a64_pinctrl_match[] = {
+ { .compatible = "allwinner,sun50i-a64-pinctrl", },
+ {}
+};
+
+static struct platform_driver a64_pinctrl_driver = {
+ .probe = a64_pinctrl_probe,
+ .driver = {
+ .name = "sun50i-a64-pinctrl",
+ .of_match_table = a64_pinctrl_match,
+ },
+};
+builtin_platform_driver(a64_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index cf1ce0c02600..435ad30f45db 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -343,26 +343,22 @@ static const struct sunxi_desc_pin sun7i_a20_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE4 */
- SUNXI_FUNCTION(0x3, "spi2"), /* CS0 */
- SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
+ SUNXI_FUNCTION(0x3, "spi2")), /* CS0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 20),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE5 */
- SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
+ SUNXI_FUNCTION(0x3, "spi2")), /* CLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 21),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE6 */
- SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
- SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
+ SUNXI_FUNCTION(0x3, "spi2")), /* MOSI */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 22),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "nand0"), /* NCE7 */
- SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
- SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
+ SUNXI_FUNCTION(0x3, "spi2")), /* MISO */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 23),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -960,65 +956,65 @@ static const struct sunxi_desc_pin sun7i_a20_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
SUNXI_FUNCTION(0x3, "uart5"), /* TX */
- SUNXI_FUNCTION_IRQ(0x5, 22)), /* EINT22 */
+ SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
SUNXI_FUNCTION(0x3, "uart5"), /* RX */
- SUNXI_FUNCTION_IRQ(0x5, 23)), /* EINT23 */
+ SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
SUNXI_FUNCTION(0x3, "uart6"), /* TX */
SUNXI_FUNCTION(0x4, "clk_out_a"), /* CLK_OUT_A */
- SUNXI_FUNCTION_IRQ(0x5, 24)), /* EINT24 */
+ SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
SUNXI_FUNCTION(0x3, "uart6"), /* RX */
SUNXI_FUNCTION(0x4, "clk_out_b"), /* CLK_OUT_B */
- SUNXI_FUNCTION_IRQ(0x5, 25)), /* EINT25 */
+ SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* CS1 */
SUNXI_FUNCTION(0x3, "ps2"), /* SCK1 */
SUNXI_FUNCTION(0x4, "timer4"), /* TCLKIN0 */
- SUNXI_FUNCTION_IRQ(0x5, 26)), /* EINT26 */
+ SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
SUNXI_FUNCTION(0x3, "ps2"), /* SDA1 */
SUNXI_FUNCTION(0x4, "timer5"), /* TCLKIN1 */
- SUNXI_FUNCTION_IRQ(0x5, 27)), /* EINT27 */
+ SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x5, 28)), /* EINT28 */
+ SUNXI_FUNCTION_IRQ(0x6, 28)), /* EINT28 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x5, 29)), /* EINT29 */
+ SUNXI_FUNCTION_IRQ(0x6, 29)), /* EINT29 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
SUNXI_FUNCTION(0x3, "uart2"), /* TX */
- SUNXI_FUNCTION_IRQ(0x5, 30)), /* EINT30 */
+ SUNXI_FUNCTION_IRQ(0x6, 30)), /* EINT30 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
SUNXI_FUNCTION(0x3, "uart2"), /* RX */
- SUNXI_FUNCTION_IRQ(0x5, 31)), /* EINT31 */
+ SUNXI_FUNCTION_IRQ(0x6, 31)), /* EINT31 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 20),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 00265f0435a7..8b381d69df86 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = {
.pins = sun8i_a33_pins,
.npins = ARRAY_SIZE(sun8i_a33_pins),
.irq_banks = 2,
+ .irq_bank_base = 1,
};
static int sun8i_a33_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
new file mode 100644
index 000000000000..686ec212120b
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c
@@ -0,0 +1,106 @@
+/*
+ * Allwinner H3 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2016 Krzysztof Adamski <k@japko.eu>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_h3_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_twi"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_twi"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PL_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PL_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PL_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PL_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PL_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PL_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PL_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PL_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PL_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_cir_rx"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PL_EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_h3_r_pinctrl_data = {
+ .pins = sun8i_h3_r_pins,
+ .npins = ARRAY_SIZE(sun8i_h3_r_pins),
+ .irq_banks = 1,
+ .pin_base = PL_BASE,
+ .irq_read_needs_mux = true
+};
+
+static int sun8i_h3_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun8i_h3_r_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_h3_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-h3-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun8i_h3_r_pinctrl_driver = {
+ .probe = sun8i_h3_r_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-h3-r-pinctrl",
+ .of_match_table = sun8i_h3_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun8i_h3_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
index 42547ffa20a8..92a873f73697 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c
@@ -9,7 +9,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -164,7 +164,6 @@ static const struct of_device_id sun9i_a80_r_pinctrl_match[] = {
{ .compatible = "allwinner,sun9i-a80-r-pinctrl", },
{}
};
-MODULE_DEVICE_TABLE(of, sun9i_a80_r_pinctrl_match);
static struct platform_driver sun9i_a80_r_pinctrl_driver = {
.probe = sun9i_a80_r_pinctrl_probe,
@@ -174,8 +173,4 @@ static struct platform_driver sun9i_a80_r_pinctrl_driver = {
.of_match_table = sun9i_a80_r_pinctrl_match,
},
};
-module_platform_driver(sun9i_a80_r_pinctrl_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner A80 R_PIO pinctrl driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sun9i_a80_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 7a2465f5e71e..3b017dbd289c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -15,7 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -457,17 +457,18 @@ static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
u32 reg = sunxi_data_reg(offset);
u8 index = sunxi_data_offset(offset);
- u32 set_mux = pctl->desc->irq_read_needs_mux &&
- test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
+ bool set_mux = pctl->desc->irq_read_needs_mux &&
+ gpiochip_line_is_irq(chip, offset);
+ u32 pin = offset + chip->base;
u32 val;
if (set_mux)
- sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_INPUT);
+ sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_INPUT);
val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
if (set_mux)
- sunxi_pmx_set(pctl->pctl_dev, offset, SUN4I_FUNC_IRQ);
+ sunxi_pmx_set(pctl->pctl_dev, pin, SUN4I_FUNC_IRQ);
return !!val;
}
@@ -578,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_cfg_reg(d->hwirq);
+ u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
u32 regval;
@@ -625,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 status_reg = sunxi_irq_status_reg(d->hwirq);
+ u32 status_reg = sunxi_irq_status_reg(d->hwirq,
+ pctl->desc->irq_bank_base);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
/* Clear the IRQ */
@@ -635,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d)
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -652,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
+ u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
@@ -744,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
if (bank == pctl->desc->irq_banks)
return;
- reg = sunxi_irq_status_reg_from_bank(bank);
+ reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base);
val = readl(pctl->membase + reg);
if (val) {
@@ -1023,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
for (i = 0; i < pctl->desc->irq_banks; i++) {
/* Mask and clear all IRQs before registering a handler */
- writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
writel(0xffffffff,
- pctl->membase + sunxi_irq_status_reg_from_bank(i));
+ pctl->membase + sunxi_irq_status_reg_from_bank(i,
+ pctl->desc->irq_bank_base));
irq_set_chained_handler_and_data(pctl->irq[i],
sunxi_pinctrl_irq_handler,
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e248e81a0f9e..0afce1ab12d0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc {
int npins;
unsigned pin_base;
unsigned irq_banks;
+ unsigned irq_bank_base;
bool irq_read_needs_mux;
};
@@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin)
return pin_num * PULL_PINS_BITS;
}
-static inline u32 sunxi_irq_cfg_reg(u16 irq)
+static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
- return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
+ return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
-static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_ctrl_reg(u16 irq)
+static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_ctrl_reg_from_bank(bank);
+ return sunxi_irq_ctrl_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
-static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base)
{
- return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+ return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE;
}
-static inline u32 sunxi_irq_status_reg(u16 irq)
+static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base)
{
u8 bank = irq / IRQ_PER_BANK;
- return sunxi_irq_status_reg_from_bank(bank);
+ return sunxi_irq_status_reg_from_bank(bank, bank_base);
}
static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/pinctrl/tegra/Kconfig b/drivers/pinctrl/tegra/Kconfig
new file mode 100644
index 000000000000..24e20cc08d5b
--- /dev/null
+++ b/drivers/pinctrl/tegra/Kconfig
@@ -0,0 +1,30 @@
+config PINCTRL_TEGRA
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_TEGRA20
+ bool
+ select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA30
+ bool
+ select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA114
+ bool
+ select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA124
+ bool
+ select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA210
+ bool
+ select PINCTRL_TEGRA
+
+config PINCTRL_TEGRA_XUSB
+ def_bool y if ARCH_TEGRA
+ select GENERIC_PHY
+ select PINCONF
+ select PINMUX
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
new file mode 100644
index 000000000000..a927379b6794
--- /dev/null
+++ b/drivers/pinctrl/tegra/Makefile
@@ -0,0 +1,7 @@
+obj-y += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
+obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
+obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
+obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
+obj-$(CONFIG_PINCTRL_TEGRA210) += pinctrl-tegra210.o
+obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index bd3aa5a4fd6d..2f06029c9405 100644
--- a/drivers/pinctrl/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -24,8 +24,8 @@
#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
-#include "core.h"
-#include "pinctrl-utils.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
#define XUSB_PADCTL_ELPG_PROGRAM 0x01c
#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 9da4da219a07..49388822c0e9 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -30,9 +30,9 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/slab.h>
-#include "core.h"
+#include "../core.h"
+#include "../pinctrl-utils.h"
#include "pinctrl-tegra.h"
-#include "pinctrl-utils.h"
struct tegra_pmx {
struct device *dev;
diff --git a/drivers/pinctrl/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 1615db7e3a4b..1615db7e3a4b 100644
--- a/drivers/pinctrl/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 05e49d5137ab..05e49d5137ab 100644
--- a/drivers/pinctrl/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index 7cd44c7c296d..7cd44c7c296d 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
diff --git a/drivers/pinctrl/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 4833db4433d9..4833db4433d9 100644
--- a/drivers/pinctrl/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
diff --git a/drivers/pinctrl/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index 252b464901c0..252b464901c0 100644
--- a/drivers/pinctrl/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
diff --git a/drivers/pinctrl/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 47b2fd8bb2e9..47b2fd8bb2e9 100644
--- a/drivers/pinctrl/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
index 7abd614dc383..0b40ded5738f 100644
--- a/drivers/pinctrl/uniphier/Kconfig
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -1,6 +1,6 @@
menuconfig PINCTRL_UNIPHIER
bool "UniPhier SoC pinctrl drivers"
- depends on ARCH_UNIPHIER
+ depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF && MFD_SYSCON
default y
select PINMUX
@@ -8,27 +8,27 @@ menuconfig PINCTRL_UNIPHIER
if PINCTRL_UNIPHIER
-config PINCTRL_UNIPHIER_PH1_LD4
+config PINCTRL_UNIPHIER_LD4
tristate "UniPhier PH1-LD4 SoC pinctrl driver"
default y
-config PINCTRL_UNIPHIER_PH1_PRO4
+config PINCTRL_UNIPHIER_PRO4
tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
default y
-config PINCTRL_UNIPHIER_PH1_SLD8
+config PINCTRL_UNIPHIER_SLD8
tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
default y
-config PINCTRL_UNIPHIER_PH1_PRO5
+config PINCTRL_UNIPHIER_PRO5
tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
default y
-config PINCTRL_UNIPHIER_PROXSTREAM2
+config PINCTRL_UNIPHIER_PXS2
tristate "UniPhier ProXstream2 SoC pinctrl driver"
default y
-config PINCTRL_UNIPHIER_PH1_LD6B
+config PINCTRL_UNIPHIER_LD6B
tristate "UniPhier PH1-LD6b SoC pinctrl driver"
default y
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
index e7ce9670306c..3b8f9ee0bb6f 100644
--- a/drivers/pinctrl/uniphier/Makefile
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -1,8 +1,8 @@
-obj-y += pinctrl-uniphier-core.o
+obj-y += pinctrl-uniphier-core.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4) += pinctrl-ph1-ld4.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4) += pinctrl-ph1-pro4.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8) += pinctrl-ph1-sld8.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5) += pinctrl-ph1-pro5.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2) += pinctrl-proxstream2.o
-obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B) += pinctrl-ph1-ld6b.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD4) += pinctrl-uniphier-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PRO4) += pinctrl-uniphier-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_SLD8) += pinctrl-uniphier-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PRO5) += pinctrl-uniphier-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PXS2) += pinctrl-uniphier-pxs2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_LD6B) += pinctrl-uniphier-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index a7056dccfa53..a7056dccfa53 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 1824831bb4da..1824831bb4da 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index ec8e92dfaf8c..ec8e92dfaf8c 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index e3d648eae85a..e3d648eae85a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index bc00d7591c59..bc00d7591c59 100644
--- a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index c3700a33a5da..c3700a33a5da 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 0adccbf5c83f..c11db8bceea1 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -4,8 +4,7 @@ endif
if MIPS
source "drivers/platform/mips/Kconfig"
endif
-if GOLDFISH
+
source "drivers/platform/goldfish/Kconfig"
-endif
source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 635ef25cc722..fefbb8370da0 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,5 +1,24 @@
+menuconfig GOLDFISH
+ bool "Platform support for Goldfish virtual devices"
+ depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+ depends on HAS_IOMEM
+ ---help---
+ Say Y here to get to see options for the Goldfish virtual platform.
+ This option alone does not add any kernel code.
+
+ Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+ bool "Goldfish platform bus"
+ ---help---
+ This is a virtual bus to host Goldfish Android Virtual Devices.
+
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
---help---
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index a0022395eee9..d3487125838c 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,5 @@
#
# Makefile for Goldfish platform specific drivers
#
-obj-$(CONFIG_GOLDFISH) += pdev_bus.o
+obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index e7a29e2750c6..07462d79d040 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -2,6 +2,7 @@
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -57,6 +58,9 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/goldfish.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
/*
* IMPORTANT: The following constants must match the ones used and defined
@@ -75,6 +79,7 @@
#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
+#define PIPE_REG_VERSION 0x24 /* read: device version */
/* list of commands for PIPE_REG_COMMAND */
#define CMD_OPEN 1 /* open new channel */
@@ -90,12 +95,6 @@
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
is possible */
-
-/* The following commands are related to read operations, they must be
- * listed in the same order than the corresponding write ones, since we
- * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
- * in goldfish_pipe_read_write() below.
- */
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
* is possible */
@@ -130,6 +129,7 @@ struct goldfish_pipe_dev {
unsigned char __iomem *base;
struct access_params *aps;
int irq;
+ u32 version;
};
static struct goldfish_pipe_dev pipe_dev[1];
@@ -217,17 +217,16 @@ static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
static int setup_access_params_addr(struct platform_device *pdev,
struct goldfish_pipe_dev *dev)
{
- u64 paddr;
+ dma_addr_t dma_handle;
struct access_params *aps;
- aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
+ aps = dmam_alloc_coherent(&pdev->dev, sizeof(struct access_params),
+ &dma_handle, GFP_KERNEL);
if (!aps)
- return -1;
+ return -ENOMEM;
- /* FIXME */
- paddr = __pa(aps);
- writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
- writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
+ writel(upper_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
+ writel(lower_32_bits(dma_handle), dev->base + PIPE_REG_PARAMS_ADDR_LOW);
if (valid_batchbuffer_addr(dev, aps)) {
dev->aps = aps;
@@ -263,19 +262,14 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
return 0;
}
-/* This function is used for both reading from and writing to a given
- * pipe.
- */
static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
- size_t bufflen, int is_write)
+ size_t bufflen, int is_write)
{
unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
- const int cmd_offset = is_write ? 0
- : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
unsigned long address, address_end;
- int ret = 0;
+ int count = 0, ret = -EINVAL;
/* If the emulator already closed the pipe, no need to go further */
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -298,79 +292,106 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
address_end = address + bufflen;
while (address < address_end) {
- unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
- unsigned long next = page_end < address_end ? page_end
- : address_end;
- unsigned long avail = next - address;
+ unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ unsigned long next = page_end < address_end ? page_end
+ : address_end;
+ unsigned long avail = next - address;
int status, wakeBit;
+ struct page *page;
+
+ /* Either vaddr or paddr depending on the device version */
+ unsigned long xaddr;
+
+ /*
+ * We grab the pages on a page-by-page basis in case user
+ * space gives us a potentially huge buffer but the read only
+ * returns a small amount, then there's no need to pin that
+ * much memory to the process.
+ */
+ down_read(&current->mm->mmap_sem);
+ ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (ret < 0)
+ break;
- /* Ensure that the corresponding page is properly mapped */
- /* FIXME: this isn't safe or sufficient - use get_user_pages */
- if (is_write) {
- char c;
- /* Ensure that the page is mapped and readable */
- if (__get_user(c, (char __user *)address)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
+ if (dev->version) {
+ /* Device version 1 or newer (qemu-android) expects the
+ * physical address.
+ */
+ xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
} else {
- /* Ensure that the page is mapped and writable */
- if (__put_user(0, (char __user *)address)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
+ /* Device version 0 (classic emulator) expects the
+ * virtual address.
+ */
+ xaddr = address;
}
/* Now, try to transfer the bytes in the current page */
spin_lock_irqsave(&dev->lock, irq_flags);
- if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
- address, avail, pipe, &status)) {
+ if (access_with_param(dev,
+ is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+ xaddr, avail, pipe, &status)) {
gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(avail, dev->base + PIPE_REG_SIZE);
- gf_write_ptr((void *)address,
+ gf_write_ptr((void *)xaddr,
dev->base + PIPE_REG_ADDRESS,
dev->base + PIPE_REG_ADDRESS_HIGH);
- writel(CMD_WRITE_BUFFER + cmd_offset,
+ writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
+ if (status > 0 && !is_write)
+ set_page_dirty(page);
+ put_page(page);
+
if (status > 0) { /* Correct transfer */
- ret += status;
+ count += status;
address += status;
continue;
- }
-
- if (status == 0) /* EOF */
+ } else if (status == 0) { /* EOF */
+ ret = 0;
break;
-
- /* An error occured. If we already transfered stuff, just
- * return with its count. We expect the next call to return
- * an error code */
- if (ret > 0)
+ } else if (status < 0 && count > 0) {
+ /*
+ * An error occurred and we already transferred
+ * something on one of the previous pages.
+ * Just return what we already copied and log this
+ * err.
+ *
+ * Note: This seems like an incorrect approach but
+ * cannot change it until we check if any user space
+ * ABI relies on this behavior.
+ */
+ if (status != PIPE_ERROR_AGAIN)
+ pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+ status, is_write ? "write" : "read");
+ ret = 0;
break;
+ }
- /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
- * non-blocking mode, just return the error code.
- */
+ /*
+ * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * non-blocking mode, just return the error code.
+ */
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
- /* We will have to wait until more data/space is available.
- * First, mark the pipe as waiting for a specific wake signal.
- */
+ /*
+ * The backend blocked the read/write, wait until the backend
+ * tells us it's ready to process more data.
+ */
wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+ goldfish_cmd(pipe,
+ is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
/* Unlock the pipe, then wait for the wake signal */
mutex_unlock(&pipe->lock);
@@ -388,12 +409,13 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
/* Try to re-acquire the lock */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
-
- /* Try the transfer again */
- continue;
}
mutex_unlock(&pipe->lock);
- return ret;
+
+ if (ret < 0)
+ return ret;
+ else
+ return count;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -446,10 +468,11 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
unsigned long irq_flags;
int count = 0;
- /* We're going to read from the emulator a list of (channel,flags)
- * pairs corresponding to the wake events that occured on each
- * blocked pipe (i.e. channel).
- */
+ /*
+ * We're going to read from the emulator a list of (channel,flags)
+ * pairs corresponding to the wake events that occurred on each
+ * blocked pipe (i.e. channel).
+ */
spin_lock_irqsave(&dev->lock, irq_flags);
for (;;) {
/* First read the channel, 0 means the end of the list */
@@ -600,6 +623,12 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
goto error;
}
setup_access_params_addr(pdev, dev);
+
+ /* Although the pipe device in the classic Android emulator does not
+ * recognize the 'version' register, it won't treat this as an error
+ * either and will simply return 0, which is fine.
+ */
+ dev->version = readl(dev->base + PIPE_REG_VERSION);
return 0;
error:
@@ -615,11 +644,26 @@ static int goldfish_pipe_remove(struct platform_device *pdev)
return 0;
}
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+ { "GFSH0003", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+ { .compatible = "google,android-pipe", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
static struct platform_driver goldfish_pipe = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
- .name = "goldfish_pipe"
+ .name = "goldfish_pipe",
+ .owner = THIS_MODULE,
+ .of_match_table = goldfish_pipe_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
}
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 69f93a576e45..ed2004be13cf 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -91,10 +91,21 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
+config DELL_SMBIOS
+ tristate "Dell SMBIOS Support"
+ depends on DCDBAS
+ default n
+ ---help---
+ This module provides common functions for kernel modules using
+ Dell SMBIOS.
+
+ If you have a Dell laptop, say Y or M here.
+
config DELL_LAPTOP
tristate "Dell Laptop Extras"
depends on X86
- depends on DCDBAS
+ depends on DELL_SMBIOS
+ depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on RFKILL || RFKILL = n
@@ -110,8 +121,10 @@ config DELL_LAPTOP
config DELL_WMI
tristate "Dell WMI extras"
depends on ACPI_WMI
+ depends on DMI
depends on INPUT
depends on ACPI_VIDEO || ACPI_VIDEO = n
+ depends on DELL_SMBIOS
select INPUT_SPARSEKMAP
---help---
Say Y here if you want to support WMI-based hotkeys on Dell laptops.
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 40574e7390f3..448443c3baba 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
+obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o
obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
obj-$(CONFIG_DELL_WMI) += dell-wmi.o
obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 1e1e59423889..005629447b0c 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -33,6 +33,9 @@
#define WMAX_METHOD_BRIGHTNESS 0x3
#define WMAX_METHOD_ZONE_CONTROL 0x4
#define WMAX_METHOD_HDMI_CABLE 0x5
+#define WMAX_METHOD_AMPLIFIER_CABLE 0x6
+#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
+#define WMAX_METHOD_DEEP_SLEEP_STATUS 0x0C
MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
MODULE_DESCRIPTION("Alienware special feature control");
@@ -60,6 +63,8 @@ enum WMAX_CONTROL_STATES {
struct quirk_entry {
u8 num_zones;
u8 hdmi_mux;
+ u8 amplifier;
+ u8 deepslp;
};
static struct quirk_entry *quirks;
@@ -67,16 +72,43 @@ static struct quirk_entry *quirks;
static struct quirk_entry quirk_unknown = {
.num_zones = 2,
.hdmi_mux = 0,
+ .amplifier = 0,
+ .deepslp = 0,
};
-static struct quirk_entry quirk_x51_family = {
+static struct quirk_entry quirk_x51_r1_r2 = {
.num_zones = 3,
- .hdmi_mux = 0.
+ .hdmi_mux = 0,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_x51_r3 = {
+ .num_zones = 4,
+ .hdmi_mux = 0,
+ .amplifier = 1,
+ .deepslp = 0,
};
static struct quirk_entry quirk_asm100 = {
.num_zones = 2,
.hdmi_mux = 1,
+ .amplifier = 0,
+ .deepslp = 0,
+};
+
+static struct quirk_entry quirk_asm200 = {
+ .num_zones = 2,
+ .hdmi_mux = 1,
+ .amplifier = 0,
+ .deepslp = 1,
+};
+
+static struct quirk_entry quirk_asm201 = {
+ .num_zones = 2,
+ .hdmi_mux = 1,
+ .amplifier = 1,
+ .deepslp = 1,
};
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -88,12 +120,12 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
static const struct dmi_system_id alienware_quirks[] __initconst = {
{
.callback = dmi_matched,
- .ident = "Alienware X51 R1",
+ .ident = "Alienware X51 R3",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
},
- .driver_data = &quirk_x51_family,
+ .driver_data = &quirk_x51_r3,
},
{
.callback = dmi_matched,
@@ -102,17 +134,44 @@ static const struct dmi_system_id alienware_quirks[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
},
- .driver_data = &quirk_x51_family,
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware X51 R1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+ },
+ .driver_data = &quirk_x51_r1_r2,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
+ },
+ .driver_data = &quirk_asm100,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Alienware ASM200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
+ },
+ .driver_data = &quirk_asm200,
},
{
- .callback = dmi_matched,
- .ident = "Alienware ASM100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
- },
- .driver_data = &quirk_asm100,
- },
+ .callback = dmi_matched,
+ .ident = "Alienware ASM201",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
+ },
+ .driver_data = &quirk_asm201,
+ },
{}
};
@@ -133,7 +192,7 @@ struct wmax_brightness_args {
u32 percentage;
};
-struct hdmi_args {
+struct wmax_basic_args {
u8 arg;
};
@@ -170,7 +229,7 @@ static u8 global_brightness;
/*
* Helpers used for zone control
-*/
+ */
static int parse_rgb(const char *buf, struct platform_zone *zone)
{
long unsigned int rgb;
@@ -210,7 +269,7 @@ static struct platform_zone *match_zone(struct device_attribute *attr)
/*
* Individual RGB zone control
-*/
+ */
static int alienware_update_led(struct platform_zone *zone)
{
int method_id;
@@ -218,16 +277,16 @@ static int alienware_update_led(struct platform_zone *zone)
char *guid;
struct acpi_buffer input;
struct legacy_led_args legacy_args;
- struct wmax_led_args wmax_args;
+ struct wmax_led_args wmax_basic_args;
if (interface == WMAX) {
- wmax_args.led_mask = 1 << zone->location;
- wmax_args.colors = zone->colors;
- wmax_args.state = lighting_control_state;
+ wmax_basic_args.led_mask = 1 << zone->location;
+ wmax_basic_args.colors = zone->colors;
+ wmax_basic_args.state = lighting_control_state;
guid = WMAX_CONTROL_GUID;
method_id = WMAX_METHOD_ZONE_CONTROL;
- input.length = (acpi_size) sizeof(wmax_args);
- input.pointer = &wmax_args;
+ input.length = (acpi_size) sizeof(wmax_basic_args);
+ input.pointer = &wmax_basic_args;
} else {
legacy_args.colors = zone->colors;
legacy_args.brightness = global_brightness;
@@ -283,7 +342,7 @@ static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
/*
* LED Brightness (Global)
-*/
+ */
static int wmax_brightness(int brightness)
{
acpi_status status;
@@ -327,7 +386,7 @@ static struct led_classdev global_led = {
/*
* Lighting control state device attribute (Global)
-*/
+ */
static ssize_t show_control_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -435,11 +494,7 @@ static void alienware_zone_exit(struct platform_device *dev)
kfree(zone_attrs);
}
-/*
- The HDMI mux sysfs node indicates the status of the HDMI input mux.
- It can toggle between standard system GPU output and HDMI input.
-*/
-static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
+static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
u32 command, int *out_data)
{
acpi_status status;
@@ -467,16 +522,20 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
}
+/*
+ * The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ * It can toggle between standard system GPU output and HDMI input.
+ */
static ssize_t show_hdmi_cable(struct device *dev,
struct device_attribute *attr, char *buf)
{
acpi_status status;
u32 out_data;
- struct hdmi_args in_args = {
+ struct wmax_basic_args in_args = {
.arg = 0,
};
status =
- alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_CABLE,
+ alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_CABLE,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
if (out_data == 0)
@@ -495,11 +554,11 @@ static ssize_t show_hdmi_source(struct device *dev,
{
acpi_status status;
u32 out_data;
- struct hdmi_args in_args = {
+ struct wmax_basic_args in_args = {
.arg = 0,
};
status =
- alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_STATUS,
+ alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_STATUS,
(u32 *) &out_data);
if (ACPI_SUCCESS(status)) {
@@ -519,7 +578,7 @@ static ssize_t toggle_hdmi_source(struct device *dev,
const char *buf, size_t count)
{
acpi_status status;
- struct hdmi_args args;
+ struct wmax_basic_args args;
if (strcmp(buf, "gpu\n") == 0)
args.arg = 1;
else if (strcmp(buf, "input\n") == 0)
@@ -528,7 +587,7 @@ static ssize_t toggle_hdmi_source(struct device *dev,
args.arg = 3;
pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
- status = alienware_hdmi_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
+ status = alienware_wmax_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
@@ -563,11 +622,144 @@ static int create_hdmi(struct platform_device *dev)
ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
if (ret)
- goto error_create_hdmi;
- return 0;
+ remove_hdmi(dev);
+ return ret;
+}
-error_create_hdmi:
- remove_hdmi(dev);
+/*
+ * Alienware GFX amplifier support
+ * - Currently supports reading cable status
+ * - Leaving expansion room to possibly support dock/undock events later
+ */
+static ssize_t show_amplifier_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status =
+ alienware_wmax_command(&in_args, WMAX_METHOD_AMPLIFIER_CABLE,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[unconnected] connected unknown\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "unconnected [connected] unknown\n");
+ }
+ pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "unconnected connected [unknown]\n");
+}
+
+static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+
+static struct attribute *amplifier_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static struct attribute_group amplifier_attribute_group = {
+ .name = "amplifier",
+ .attrs = amplifier_attrs,
+};
+
+static void remove_amplifier(struct platform_device *dev)
+{
+ if (quirks->amplifier > 0)
+ sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
+}
+
+static int create_amplifier(struct platform_device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
+ if (ret)
+ remove_amplifier(dev);
+ return ret;
+}
+
+/*
+ * Deep Sleep Control support
+ * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
+ */
+static ssize_t show_deepsleep_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ u32 out_data;
+ struct wmax_basic_args in_args = {
+ .arg = 0,
+ };
+ status = alienware_wmax_command(&in_args, WMAX_METHOD_DEEP_SLEEP_STATUS,
+ (u32 *) &out_data);
+ if (ACPI_SUCCESS(status)) {
+ if (out_data == 0)
+ return scnprintf(buf, PAGE_SIZE,
+ "[disabled] s5 s5_s4\n");
+ else if (out_data == 1)
+ return scnprintf(buf, PAGE_SIZE,
+ "disabled [s5] s5_s4\n");
+ else if (out_data == 2)
+ return scnprintf(buf, PAGE_SIZE,
+ "disabled s5 [s5_s4]\n");
+ }
+ pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
+ return scnprintf(buf, PAGE_SIZE, "disabled s5 s5_s4 [unknown]\n");
+}
+
+static ssize_t toggle_deepsleep(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ acpi_status status;
+ struct wmax_basic_args args;
+
+ if (strcmp(buf, "disabled\n") == 0)
+ args.arg = 0;
+ else if (strcmp(buf, "s5\n") == 0)
+ args.arg = 1;
+ else
+ args.arg = 2;
+ pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
+
+ status = alienware_wmax_command(&args, WMAX_METHOD_DEEP_SLEEP_CONTROL,
+ NULL);
+
+ if (ACPI_FAILURE(status))
+ pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
+ status);
+ return count;
+}
+
+static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+
+static struct attribute *deepsleep_attrs[] = {
+ &dev_attr_deepsleep.attr,
+ NULL,
+};
+
+static struct attribute_group deepsleep_attribute_group = {
+ .name = "deepsleep",
+ .attrs = deepsleep_attrs,
+};
+
+static void remove_deepsleep(struct platform_device *dev)
+{
+ if (quirks->deepslp > 0)
+ sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
+}
+
+static int create_deepsleep(struct platform_device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
+ if (ret)
+ remove_deepsleep(dev);
return ret;
}
@@ -606,6 +798,18 @@ static int __init alienware_wmi_init(void)
goto fail_prep_hdmi;
}
+ if (quirks->amplifier > 0) {
+ ret = create_amplifier(platform_device);
+ if (ret)
+ goto fail_prep_amplifier;
+ }
+
+ if (quirks->deepslp > 0) {
+ ret = create_deepsleep(platform_device);
+ if (ret)
+ goto fail_prep_deepsleep;
+ }
+
ret = alienware_zone_init(platform_device);
if (ret)
goto fail_prep_zones;
@@ -614,6 +818,8 @@ static int __init alienware_wmi_init(void)
fail_prep_zones:
alienware_zone_exit(platform_device);
+fail_prep_deepsleep:
+fail_prep_amplifier:
fail_prep_hdmi:
platform_device_del(platform_device);
fail_platform_device2:
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 131fee2b093e..091ca7ada8fc 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -272,6 +272,15 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X75VD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X75VD"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. 1015E",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index aaeeae81e3a9..2c2f02b2e08a 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -28,12 +28,11 @@
#include <linux/acpi.h>
#include <linux/mm.h>
#include <linux/i8042.h>
-#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <acpi/video.h>
-#include "../../firmware/dcdbas.h"
#include "dell-rbtn.h"
+#include "dell-smbios.h"
#define BRIGHTNESS_TOKEN 0x7d
#define KBD_LED_OFF_TOKEN 0x01E1
@@ -44,33 +43,6 @@
#define KBD_LED_AUTO_75_TOKEN 0x02EC
#define KBD_LED_AUTO_100_TOKEN 0x02F6
-/* This structure will be modified by the firmware when we enter
- * system management mode, hence the volatiles */
-
-struct calling_interface_buffer {
- u16 class;
- u16 select;
- volatile u32 input[4];
- volatile u32 output[4];
-} __packed;
-
-struct calling_interface_token {
- u16 tokenID;
- u16 location;
- union {
- u16 value;
- u16 stringlength;
- };
-};
-
-struct calling_interface_structure {
- struct dmi_header header;
- u16 cmdIOAddress;
- u8 cmdIOCode;
- u32 supportedCmds;
- struct calling_interface_token tokens[];
-} __packed;
-
struct quirk_entry {
u8 touchpad_led;
@@ -103,11 +75,6 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
.kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
};
-static int da_command_address;
-static int da_command_code;
-static int da_num_tokens;
-static struct calling_interface_token *da_tokens;
-
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -306,126 +273,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
{ }
};
-static struct calling_interface_buffer *buffer;
-static DEFINE_MUTEX(buffer_mutex);
-
-static void clear_buffer(void)
-{
- memset(buffer, 0, sizeof(struct calling_interface_buffer));
-}
-
-static void get_buffer(void)
-{
- mutex_lock(&buffer_mutex);
- clear_buffer();
-}
-
-static void release_buffer(void)
-{
- mutex_unlock(&buffer_mutex);
-}
-
-static void __init parse_da_table(const struct dmi_header *dm)
-{
- /* Final token is a terminator, so we don't want to copy it */
- int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
- struct calling_interface_token *new_da_tokens;
- struct calling_interface_structure *table =
- container_of(dm, struct calling_interface_structure, header);
-
- /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
- 6 bytes of entry */
-
- if (dm->length < 17)
- return;
-
- da_command_address = table->cmdIOAddress;
- da_command_code = table->cmdIOCode;
-
- new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
- sizeof(struct calling_interface_token),
- GFP_KERNEL);
-
- if (!new_da_tokens)
- return;
- da_tokens = new_da_tokens;
-
- memcpy(da_tokens+da_num_tokens, table->tokens,
- sizeof(struct calling_interface_token) * tokens);
-
- da_num_tokens += tokens;
-}
-
-static void __init find_tokens(const struct dmi_header *dm, void *dummy)
-{
- switch (dm->type) {
- case 0xd4: /* Indexed IO */
- case 0xd5: /* Protected Area Type 1 */
- case 0xd6: /* Protected Area Type 2 */
- break;
- case 0xda: /* Calling interface */
- parse_da_table(dm);
- break;
- }
-}
-
-static int find_token_id(int tokenid)
-{
- int i;
-
- for (i = 0; i < da_num_tokens; i++) {
- if (da_tokens[i].tokenID == tokenid)
- return i;
- }
-
- return -1;
-}
-
-static int find_token_location(int tokenid)
-{
- int id;
-
- id = find_token_id(tokenid);
- if (id == -1)
- return -1;
-
- return da_tokens[id].location;
-}
-
-static struct calling_interface_buffer *
-dell_send_request(struct calling_interface_buffer *buffer, int class,
- int select)
-{
- struct smi_cmd command;
-
- command.magic = SMI_CMD_MAGIC;
- command.command_address = da_command_address;
- command.command_code = da_command_code;
- command.ebx = virt_to_phys(buffer);
- command.ecx = 0x42534931;
-
- buffer->class = class;
- buffer->select = select;
-
- dcdbas_smi_request(&command);
-
- return buffer;
-}
-
-static inline int dell_smi_error(int value)
-{
- switch (value) {
- case 0: /* Completed successfully */
- return 0;
- case -1: /* Completed with error */
- return -EIO;
- case -2: /* Function not supported */
- return -ENXIO;
- default: /* Unknown error */
- return -EINVAL;
- }
-}
-
/*
* Derived from information in smbios-wireless-ctl:
*
@@ -548,6 +395,7 @@ static inline int dell_smi_error(int value)
static int dell_rfkill_set(void *data, bool blocked)
{
+ struct calling_interface_buffer *buffer;
int disable = blocked ? 1 : 0;
unsigned long radio = (unsigned long)data;
int hwswitch_bit = (unsigned long)data - 1;
@@ -555,19 +403,19 @@ static int dell_rfkill_set(void *data, bool blocked)
int status;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
status = buffer->output[1];
if (ret != 0)
goto out;
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
hwswitch = buffer->output[1];
@@ -577,27 +425,28 @@ static int dell_rfkill_set(void *data, bool blocked)
(status & BIT(0)) && !(status & BIT(16)))
disable = 1;
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = (1 | (radio<<8) | (disable << 16));
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
out:
- release_buffer();
- return dell_smi_error(ret);
+ dell_smbios_release_buffer();
+ return dell_smbios_error(ret);
}
/* Must be called with the buffer held */
static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
- int status)
+ int status,
+ struct calling_interface_buffer *buffer)
{
if (status & BIT(0)) {
/* Has hw-switch, sync sw_state to BIOS */
int block = rfkill_blocked(rfkill);
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = (1 | (radio << 8) | (block << 16));
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
} else {
/* No hw-switch, sync BIOS state to sw_state */
rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16)));
@@ -613,30 +462,31 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
static void dell_rfkill_query(struct rfkill *rfkill, void *data)
{
+ struct calling_interface_buffer *buffer;
int radio = ((unsigned long)data & 0xF);
int hwswitch;
int status;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
status = buffer->output[1];
if (ret != 0 || !(status & BIT(0))) {
- release_buffer();
+ dell_smbios_release_buffer();
return;
}
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
hwswitch = buffer->output[1];
- release_buffer();
+ dell_smbios_release_buffer();
if (ret != 0)
return;
@@ -653,25 +503,26 @@ static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
{
+ struct calling_interface_buffer *buffer;
int hwswitch_state;
int hwswitch_ret;
int status;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
status = buffer->output[1];
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
hwswitch_ret = buffer->output[0];
hwswitch_state = buffer->output[1];
- release_buffer();
+ dell_smbios_release_buffer();
seq_printf(s, "return:\t%d\n", ret);
seq_printf(s, "status:\t0x%X\n", status);
@@ -752,23 +603,24 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ struct calling_interface_buffer *buffer;
int hwswitch = 0;
int status;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
status = buffer->output[1];
if (ret != 0)
goto out;
- clear_buffer();
+ dell_smbios_clear_buffer();
buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
if (ret == 0 && (status & BIT(0)))
@@ -776,20 +628,21 @@ static void dell_update_rfkill(struct work_struct *ignored)
if (wifi_rfkill) {
dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
- dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
+ dell_rfkill_update_sw_state(wifi_rfkill, 1, status, buffer);
}
if (bluetooth_rfkill) {
dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
hwswitch);
- dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
+ dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status,
+ buffer);
}
if (wwan_rfkill) {
dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
- dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
+ dell_rfkill_update_sw_state(wwan_rfkill, 3, status, buffer);
}
out:
- release_buffer();
+ dell_smbios_release_buffer();
}
static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -833,6 +686,7 @@ static struct notifier_block dell_laptop_rbtn_notifier = {
static int __init dell_setup_rfkill(void)
{
+ struct calling_interface_buffer *buffer;
int status, ret, whitelisted;
const char *product;
@@ -848,11 +702,11 @@ static int __init dell_setup_rfkill(void)
if (!force_rfkill && !whitelisted)
return 0;
- get_buffer();
- dell_send_request(buffer, 17, 11);
+ buffer = dell_smbios_get_buffer();
+ dell_smbios_send_request(17, 11);
ret = buffer->output[0];
status = buffer->output[1];
- release_buffer();
+ dell_smbios_release_buffer();
/* dell wireless info smbios call is not supported */
if (ret != 0)
@@ -1005,51 +859,53 @@ static void dell_cleanup_rfkill(void)
static int dell_send_intensity(struct backlight_device *bd)
{
- int token;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
int ret;
- token = find_token_location(BRIGHTNESS_TOKEN);
- if (token == -1)
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (!token)
return -ENODEV;
- get_buffer();
- buffer->input[0] = token;
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
buffer->input[1] = bd->props.brightness;
if (power_supply_is_system_supplied() > 0)
- dell_send_request(buffer, 1, 2);
+ dell_smbios_send_request(1, 2);
else
- dell_send_request(buffer, 1, 1);
+ dell_smbios_send_request(1, 1);
- ret = dell_smi_error(buffer->output[0]);
+ ret = dell_smbios_error(buffer->output[0]);
- release_buffer();
+ dell_smbios_release_buffer();
return ret;
}
static int dell_get_intensity(struct backlight_device *bd)
{
- int token;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
int ret;
- token = find_token_location(BRIGHTNESS_TOKEN);
- if (token == -1)
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (!token)
return -ENODEV;
- get_buffer();
- buffer->input[0] = token;
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
if (power_supply_is_system_supplied() > 0)
- dell_send_request(buffer, 0, 2);
+ dell_smbios_send_request(0, 2);
else
- dell_send_request(buffer, 0, 1);
+ dell_smbios_send_request(0, 1);
if (buffer->output[0])
- ret = dell_smi_error(buffer->output[0]);
+ ret = dell_smbios_error(buffer->output[0]);
else
ret = buffer->output[1];
- release_buffer();
+ dell_smbios_release_buffer();
return ret;
}
@@ -1293,17 +1149,18 @@ static bool kbd_led_present;
static int kbd_get_info(struct kbd_info *info)
{
+ struct calling_interface_buffer *buffer;
u8 units;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
buffer->input[0] = 0x0;
- dell_send_request(buffer, 4, 11);
+ dell_smbios_send_request(4, 11);
ret = buffer->output[0];
if (ret) {
- ret = dell_smi_error(ret);
+ ret = dell_smbios_error(ret);
goto out;
}
@@ -1323,7 +1180,7 @@ static int kbd_get_info(struct kbd_info *info)
info->days = (buffer->output[3] >> 24) & 0xFF;
out:
- release_buffer();
+ dell_smbios_release_buffer();
return ret;
}
@@ -1382,16 +1239,17 @@ static int kbd_set_level(struct kbd_state *state, u8 level)
static int kbd_get_state(struct kbd_state *state)
{
+ struct calling_interface_buffer *buffer;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
buffer->input[0] = 0x1;
- dell_send_request(buffer, 4, 11);
+ dell_smbios_send_request(4, 11);
ret = buffer->output[0];
if (ret) {
- ret = dell_smi_error(ret);
+ ret = dell_smbios_error(ret);
goto out;
}
@@ -1407,15 +1265,16 @@ static int kbd_get_state(struct kbd_state *state)
state->level = (buffer->output[2] >> 16) & 0xFF;
out:
- release_buffer();
+ dell_smbios_release_buffer();
return ret;
}
static int kbd_set_state(struct kbd_state *state)
{
+ struct calling_interface_buffer *buffer;
int ret;
- get_buffer();
+ buffer = dell_smbios_get_buffer();
buffer->input[0] = 0x2;
buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
buffer->input[1] |= (state->triggers & 0xFF) << 16;
@@ -1423,11 +1282,11 @@ static int kbd_set_state(struct kbd_state *state)
buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
buffer->input[2] = state->als_setting & 0xFF;
buffer->input[2] |= (state->level & 0xFF) << 16;
- dell_send_request(buffer, 4, 11);
+ dell_smbios_send_request(4, 11);
ret = buffer->output[0];
- release_buffer();
+ dell_smbios_release_buffer();
- return dell_smi_error(ret);
+ return dell_smbios_error(ret);
}
static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
@@ -1452,50 +1311,52 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
static int kbd_set_token_bit(u8 bit)
{
- int id;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
int ret;
if (bit >= ARRAY_SIZE(kbd_tokens))
return -EINVAL;
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
+ token = dell_smbios_find_token(kbd_tokens[bit]);
+ if (!token)
return -EINVAL;
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- buffer->input[1] = da_tokens[id].value;
- dell_send_request(buffer, 1, 0);
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
+ buffer->input[1] = token->value;
+ dell_smbios_send_request(1, 0);
ret = buffer->output[0];
- release_buffer();
+ dell_smbios_release_buffer();
- return dell_smi_error(ret);
+ return dell_smbios_error(ret);
}
static int kbd_get_token_bit(u8 bit)
{
- int id;
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
int ret;
int val;
if (bit >= ARRAY_SIZE(kbd_tokens))
return -EINVAL;
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
+ token = dell_smbios_find_token(kbd_tokens[bit]);
+ if (!token)
return -EINVAL;
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- dell_send_request(buffer, 0, 0);
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
+ dell_smbios_send_request(0, 0);
ret = buffer->output[0];
val = buffer->output[1];
- release_buffer();
+ dell_smbios_release_buffer();
if (ret)
- return dell_smi_error(ret);
+ return dell_smbios_error(ret);
- return (val == da_tokens[id].value);
+ return (val == token->value);
}
static int kbd_get_first_active_token_bit(void)
@@ -1597,7 +1458,7 @@ static inline void kbd_init_tokens(void)
int i;
for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
- if (find_token_id(kbd_tokens[i]) != -1)
+ if (dell_smbios_find_token(kbd_tokens[i]))
kbd_token_bits |= BIT(i);
}
@@ -2111,8 +1972,9 @@ static void kbd_led_exit(void)
static int __init dell_init(void)
{
+ struct calling_interface_buffer *buffer;
+ struct calling_interface_token *token;
int max_intensity = 0;
- int token;
int ret;
if (!dmi_check_system(dell_device_table))
@@ -2122,13 +1984,6 @@ static int __init dell_init(void)
/* find if this machine support other functions */
dmi_check_system(dell_quirks);
- dmi_walk(find_tokens, NULL);
-
- if (!da_tokens) {
- pr_info("Unable to find dmi tokens\n");
- return -ENODEV;
- }
-
ret = platform_driver_register(&platform_driver);
if (ret)
goto fail_platform_driver;
@@ -2141,16 +1996,6 @@ static int __init dell_init(void)
if (ret)
goto fail_platform_device2;
- /*
- * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
- * is passed to SMI handler.
- */
- buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
- if (!buffer) {
- ret = -ENOMEM;
- goto fail_buffer;
- }
-
ret = dell_setup_rfkill();
if (ret) {
@@ -2171,14 +2016,14 @@ static int __init dell_init(void)
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
- token = find_token_location(BRIGHTNESS_TOKEN);
- if (token != -1) {
- get_buffer();
- buffer->input[0] = token;
- dell_send_request(buffer, 0, 2);
+ token = dell_smbios_find_token(BRIGHTNESS_TOKEN);
+ if (token) {
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = token->location;
+ dell_smbios_send_request(0, 2);
if (buffer->output[0] == 0)
max_intensity = buffer->output[3];
- release_buffer();
+ dell_smbios_release_buffer();
}
if (max_intensity) {
@@ -2208,15 +2053,12 @@ static int __init dell_init(void)
fail_backlight:
dell_cleanup_rfkill();
fail_rfkill:
- free_page((unsigned long)buffer);
-fail_buffer:
platform_device_del(platform_device);
fail_platform_device2:
platform_device_put(platform_device);
fail_platform_device1:
platform_driver_unregister(&platform_driver);
fail_platform_driver:
- kfree(da_tokens);
return ret;
}
@@ -2232,8 +2074,6 @@ static void __exit dell_exit(void)
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
- kfree(da_tokens);
- free_page((unsigned long)buffer);
}
/* dell-rbtn.c driver export functions which will not work correctly (and could
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index cd410e392550..b51a2008d782 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -217,6 +217,21 @@ static void rbtn_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id rbtn_ids[] = {
{ "DELRBTN", 0 },
{ "DELLABCE", 0 },
+
+ /*
+ * This driver can also handle the "DELLABC6" device that
+ * appears on the XPS 13 9350, but that device is disabled
+ * by the DSDT unless booted with acpi_osi="!Windows 2012"
+ * acpi_osi="!Windows 2013". Even if we boot that and bind
+ * the driver, we seem to have inconsistent behavior in
+ * which NetworkManager can get out of sync with the rfkill
+ * state.
+ *
+ * On the XPS 13 9350 and similar laptops, we're not supposed to
+ * use DELLABC6 at all. Instead, we handle the rfkill button
+ * via the intel-hid driver.
+ */
+
{ "", 0 },
};
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios.c
new file mode 100644
index 000000000000..d2412ab097da
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios.c
@@ -0,0 +1,193 @@
+/*
+ * Common functions for kernel modules using Dell SMBIOS
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "../../firmware/dcdbas.h"
+#include "dell-smbios.h"
+
+struct calling_interface_structure {
+ struct dmi_header header;
+ u16 cmdIOAddress;
+ u8 cmdIOCode;
+ u32 supportedCmds;
+ struct calling_interface_token tokens[];
+} __packed;
+
+static struct calling_interface_buffer *buffer;
+static DEFINE_MUTEX(buffer_mutex);
+
+static int da_command_address;
+static int da_command_code;
+static int da_num_tokens;
+static struct calling_interface_token *da_tokens;
+
+int dell_smbios_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(dell_smbios_error);
+
+struct calling_interface_buffer *dell_smbios_get_buffer(void)
+{
+ mutex_lock(&buffer_mutex);
+ dell_smbios_clear_buffer();
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_get_buffer);
+
+void dell_smbios_clear_buffer(void)
+{
+ memset(buffer, 0, sizeof(struct calling_interface_buffer));
+}
+EXPORT_SYMBOL_GPL(dell_smbios_clear_buffer);
+
+void dell_smbios_release_buffer(void)
+{
+ mutex_unlock(&buffer_mutex);
+}
+EXPORT_SYMBOL_GPL(dell_smbios_release_buffer);
+
+void dell_smbios_send_request(int class, int select)
+{
+ struct smi_cmd command;
+
+ command.magic = SMI_CMD_MAGIC;
+ command.command_address = da_command_address;
+ command.command_code = da_command_code;
+ command.ebx = virt_to_phys(buffer);
+ command.ecx = 0x42534931;
+
+ buffer->class = class;
+ buffer->select = select;
+
+ dcdbas_smi_request(&command);
+}
+EXPORT_SYMBOL_GPL(dell_smbios_send_request);
+
+struct calling_interface_token *dell_smbios_find_token(int tokenid)
+{
+ int i;
+
+ for (i = 0; i < da_num_tokens; i++) {
+ if (da_tokens[i].tokenID == tokenid)
+ return &da_tokens[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dell_smbios_find_token);
+
+static void __init parse_da_table(const struct dmi_header *dm)
+{
+ /* Final token is a terminator, so we don't want to copy it */
+ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1;
+ struct calling_interface_token *new_da_tokens;
+ struct calling_interface_structure *table =
+ container_of(dm, struct calling_interface_structure, header);
+
+ /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least
+ 6 bytes of entry */
+
+ if (dm->length < 17)
+ return;
+
+ da_command_address = table->cmdIOAddress;
+ da_command_code = table->cmdIOCode;
+
+ new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) *
+ sizeof(struct calling_interface_token),
+ GFP_KERNEL);
+
+ if (!new_da_tokens)
+ return;
+ da_tokens = new_da_tokens;
+
+ memcpy(da_tokens+da_num_tokens, table->tokens,
+ sizeof(struct calling_interface_token) * tokens);
+
+ da_num_tokens += tokens;
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0xd4: /* Indexed IO */
+ case 0xd5: /* Protected Area Type 1 */
+ case 0xd6: /* Protected Area Type 2 */
+ break;
+ case 0xda: /* Calling interface */
+ parse_da_table(dm);
+ break;
+ }
+}
+
+static int __init dell_smbios_init(void)
+{
+ int ret;
+
+ dmi_walk(find_tokens, NULL);
+
+ if (!da_tokens) {
+ pr_info("Unable to find dmi tokens\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ * is passed to SMI handler.
+ */
+ buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto fail_buffer;
+ }
+
+ return 0;
+
+fail_buffer:
+ kfree(da_tokens);
+ return ret;
+}
+
+static void __exit dell_smbios_exit(void)
+{
+ kfree(da_tokens);
+ free_page((unsigned long)buffer);
+}
+
+subsys_initcall(dell_smbios_init);
+module_exit(dell_smbios_exit);
+
+MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
new file mode 100644
index 000000000000..ec7d40ae5e6e
--- /dev/null
+++ b/drivers/platform/x86/dell-smbios.h
@@ -0,0 +1,46 @@
+/*
+ * Common functions for kernel modules using Dell SMBIOS
+ *
+ * Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DELL_SMBIOS_H_
+#define _DELL_SMBIOS_H_
+
+/* This structure will be modified by the firmware when we enter
+ * system management mode, hence the volatiles */
+
+struct calling_interface_buffer {
+ u16 class;
+ u16 select;
+ volatile u32 input[4];
+ volatile u32 output[4];
+} __packed;
+
+struct calling_interface_token {
+ u16 tokenID;
+ u16 location;
+ union {
+ u16 value;
+ u16 stringlength;
+ };
+};
+
+int dell_smbios_error(int value);
+
+struct calling_interface_buffer *dell_smbios_get_buffer(void);
+void dell_smbios_clear_buffer(void);
+void dell_smbios_release_buffer(void);
+void dell_smbios_send_request(int class, int select);
+
+struct calling_interface_token *dell_smbios_find_token(int tokenid);
+#endif
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 368e193c2741..15c6f1191aec 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -37,6 +37,7 @@
#include <linux/string.h>
#include <linux/dmi.h>
#include <acpi/video.h>
+#include "dell-smbios.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -47,10 +48,37 @@ MODULE_LICENSE("GPL");
#define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492"
static u32 dell_wmi_interface_version;
+static bool wmi_requires_smbios_request;
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID);
+static int __init dmi_matched(const struct dmi_system_id *dmi)
+{
+ wmi_requires_smbios_request = 1;
+ return 1;
+}
+
+static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron M5110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+ },
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ },
+ },
+ { }
+};
+
/*
* Certain keys are flagged as KE_IGNORE. All of these are either
* notifications (rather than requests for change) or are also sent
@@ -90,8 +118,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = {
{ KE_IGNORE, 0xe020, { KEY_MUTE } },
- /* Shortcut and audio panel keys */
- { KE_IGNORE, 0xe025, { KEY_RESERVED } },
+ /* Dell Instant Launch key */
+ { KE_KEY, 0xe025, { KEY_PROG4 } },
+ { KE_KEY, 0xe029, { KEY_PROG4 } },
+
+ /* Audio panel key */
{ KE_IGNORE, 0xe026, { KEY_RESERVED } },
{ KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } },
@@ -120,7 +151,10 @@ struct dell_bios_hotkey_table {
};
-static const struct dell_bios_hotkey_table *dell_bios_hotkey_table;
+struct dell_dmi_results {
+ int err;
+ struct key_entry *keymap;
+};
/* Uninitialized entries here are KEY_RESERVED == 0. */
static const u16 bios_to_linux_keycode[256] __initconst = {
@@ -166,6 +200,30 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
[255] = KEY_PROG3,
};
+/*
+ * These are applied if the 0xB2 DMI hotkey table is present and doesn't
+ * override them.
+ */
+static const struct key_entry dell_wmi_extra_keymap[] __initconst = {
+ /* Fn-lock */
+ { KE_IGNORE, 0x151, { KEY_RESERVED } },
+
+ /* Change keyboard illumination */
+ { KE_IGNORE, 0x152, { KEY_KBDILLUMTOGGLE } },
+
+ /*
+ * Radio disable (notify only -- there is no model for which the
+ * WMI event is supposed to trigger an action).
+ */
+ { KE_IGNORE, 0x153, { KEY_RFKILL } },
+
+ /* RGB keyboard backlight control */
+ { KE_IGNORE, 0x154, { KEY_RESERVED } },
+
+ /* Stealth mode toggle */
+ { KE_IGNORE, 0x155, { KEY_RESERVED } },
+};
+
static struct input_dev *dell_wmi_input_dev;
static void dell_wmi_process_key(int reported_key)
@@ -188,6 +246,9 @@ static void dell_wmi_process_key(int reported_key)
acpi_video_handles_brightness_key_presses())
return;
+ if (reported_key == 0xe025 && !wmi_requires_smbios_request)
+ return;
+
sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
}
@@ -337,20 +398,60 @@ static void dell_wmi_notify(u32 value, void *context)
kfree(obj);
}
-static const struct key_entry * __init dell_wmi_prepare_new_keymap(void)
+static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
{
- int hotkey_num = (dell_bios_hotkey_table->header.length - 4) /
- sizeof(struct dell_bios_keymap_entry);
- struct key_entry *keymap;
int i;
- keymap = kcalloc(hotkey_num + 1, sizeof(struct key_entry), GFP_KERNEL);
- if (!keymap)
- return NULL;
+ for (i = 0; i < len; i++)
+ if (keymap[i].code == scancode)
+ return true;
+
+ return false;
+}
+
+static void __init handle_dmi_entry(const struct dmi_header *dm,
+
+ void *opaque)
+
+{
+ struct dell_dmi_results *results = opaque;
+ struct dell_bios_hotkey_table *table;
+ int hotkey_num, i, pos = 0;
+ struct key_entry *keymap;
+ int num_bios_keys;
+
+ if (results->err || results->keymap)
+ return; /* We already found the hotkey table. */
+
+ if (dm->type != 0xb2)
+ return;
+
+ table = container_of(dm, struct dell_bios_hotkey_table, header);
+
+ hotkey_num = (table->header.length -
+ sizeof(struct dell_bios_hotkey_table)) /
+ sizeof(struct dell_bios_keymap_entry);
+ if (hotkey_num < 1) {
+ /*
+ * Historically, dell-wmi would ignore a DMI entry of
+ * fewer than 7 bytes. Sizes between 4 and 8 bytes are
+ * nonsensical (both the header and all entries are 4
+ * bytes), so we approximate the old behavior by
+ * ignoring tables with fewer than one entry.
+ */
+ return;
+ }
+
+ keymap = kcalloc(hotkey_num + ARRAY_SIZE(dell_wmi_extra_keymap) + 1,
+ sizeof(struct key_entry), GFP_KERNEL);
+ if (!keymap) {
+ results->err = -ENOMEM;
+ return;
+ }
for (i = 0; i < hotkey_num; i++) {
const struct dell_bios_keymap_entry *bios_entry =
- &dell_bios_hotkey_table->keymap[i];
+ &table->keymap[i];
/* Uninitialized entries are 0 aka KEY_RESERVED. */
u16 keycode = (bios_entry->keycode <
@@ -370,20 +471,39 @@ static const struct key_entry * __init dell_wmi_prepare_new_keymap(void)
}
if (keycode == KEY_KBDILLUMTOGGLE)
- keymap[i].type = KE_IGNORE;
+ keymap[pos].type = KE_IGNORE;
else
- keymap[i].type = KE_KEY;
- keymap[i].code = bios_entry->scancode;
- keymap[i].keycode = keycode;
+ keymap[pos].type = KE_KEY;
+ keymap[pos].code = bios_entry->scancode;
+ keymap[pos].keycode = keycode;
+
+ pos++;
+ }
+
+ num_bios_keys = pos;
+
+ for (i = 0; i < ARRAY_SIZE(dell_wmi_extra_keymap); i++) {
+ const struct key_entry *entry = &dell_wmi_extra_keymap[i];
+
+ /*
+ * Check if we've already found this scancode. This takes
+ * quadratic time, but it doesn't matter unless the list
+ * of extra keys gets very long.
+ */
+ if (!have_scancode(entry->code, keymap, num_bios_keys)) {
+ keymap[pos] = *entry;
+ pos++;
+ }
}
- keymap[hotkey_num].type = KE_END;
+ keymap[pos].type = KE_END;
- return keymap;
+ results->keymap = keymap;
}
static int __init dell_wmi_input_setup(void)
{
+ struct dell_dmi_results dmi_results = {};
int err;
dell_wmi_input_dev = input_allocate_device();
@@ -394,20 +514,31 @@ static int __init dell_wmi_input_setup(void)
dell_wmi_input_dev->phys = "wmi/input0";
dell_wmi_input_dev->id.bustype = BUS_HOST;
- if (dell_new_hk_type) {
- const struct key_entry *keymap = dell_wmi_prepare_new_keymap();
- if (!keymap) {
- err = -ENOMEM;
- goto err_free_dev;
- }
+ if (dmi_walk(handle_dmi_entry, &dmi_results)) {
+ /*
+ * Historically, dell-wmi ignored dmi_walk errors. A failure
+ * is certainly surprising, but it probably just indicates
+ * a very old laptop.
+ */
+ pr_warn("no DMI; using the old-style hotkey interface\n");
+ }
- err = sparse_keymap_setup(dell_wmi_input_dev, keymap, NULL);
+ if (dmi_results.err) {
+ err = dmi_results.err;
+ goto err_free_dev;
+ }
+
+ if (dmi_results.keymap) {
+ dell_new_hk_type = true;
+
+ err = sparse_keymap_setup(dell_wmi_input_dev,
+ dmi_results.keymap, NULL);
/*
* Sparse keymap library makes a copy of keymap so we
* don't need the original one that was allocated.
*/
- kfree(keymap);
+ kfree(dmi_results.keymap);
} else {
err = sparse_keymap_setup(dell_wmi_input_dev,
dell_wmi_legacy_keymap, NULL);
@@ -434,15 +565,6 @@ static void dell_wmi_input_destroy(void)
input_unregister_device(dell_wmi_input_dev);
}
-static void __init find_hk_type(const struct dmi_header *dm, void *dummy)
-{
- if (dm->type == 0xb2 && dm->length > 6) {
- dell_new_hk_type = true;
- dell_bios_hotkey_table =
- container_of(dm, struct dell_bios_hotkey_table, header);
- }
-}
-
/*
* Descriptor buffer is 128 byte long and contains:
*
@@ -509,6 +631,38 @@ static int __init dell_wmi_check_descriptor_buffer(void)
return 0;
}
+/*
+ * According to Dell SMBIOS documentation:
+ *
+ * 17 3 Application Program Registration
+ *
+ * cbArg1 Application ID 1 = 0x00010000
+ * cbArg2 Application ID 2
+ * QUICKSET/DCP = 0x51534554 "QSET"
+ * ALS Driver = 0x416c7353 "AlsS"
+ * Latitude ON = 0x4c6f6e52 "LonR"
+ * cbArg3 Application version or revision number
+ * cbArg4 0 = Unregister application
+ * 1 = Register application
+ * cbRes1 Standard return codes (0, -1, -2)
+ */
+
+static int dell_wmi_events_set_enabled(bool enable)
+{
+ struct calling_interface_buffer *buffer;
+ int ret;
+
+ buffer = dell_smbios_get_buffer();
+ buffer->input[0] = 0x10000;
+ buffer->input[1] = 0x51534554;
+ buffer->input[3] = enable;
+ dell_smbios_send_request(17, 3);
+ ret = buffer->output[0];
+ dell_smbios_release_buffer();
+
+ return dell_smbios_error(ret);
+}
+
static int __init dell_wmi_init(void)
{
int err;
@@ -524,8 +678,6 @@ static int __init dell_wmi_init(void)
if (err)
return err;
- dmi_walk(find_hk_type, NULL);
-
err = dell_wmi_input_setup();
if (err)
return err;
@@ -538,12 +690,26 @@ static int __init dell_wmi_init(void)
return -ENODEV;
}
+ dmi_check_system(dell_wmi_smbios_list);
+
+ if (wmi_requires_smbios_request) {
+ err = dell_wmi_events_set_enabled(true);
+ if (err) {
+ pr_err("Failed to enable WMI events\n");
+ wmi_remove_notify_handler(DELL_EVENT_GUID);
+ dell_wmi_input_destroy();
+ return err;
+ }
+ }
+
return 0;
}
module_init(dell_wmi_init);
static void __exit dell_wmi_exit(void)
{
+ if (wmi_requires_smbios_request)
+ dell_wmi_events_set_enabled(false);
wmi_remove_notify_handler(DELL_EVENT_GUID);
dell_wmi_input_destroy();
}
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 1c62caff93fd..ffc84cc7b1c7 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -114,6 +114,7 @@
#define KEY2_CODE 0x411
#define KEY3_CODE 0x412
#define KEY4_CODE 0x413
+#define KEY5_CODE 0x420
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
#define RINGBUFFERSIZE 40
@@ -149,7 +150,7 @@ struct fujitsu_t {
char phys[32];
struct backlight_device *bl_device;
struct platform_device *pf_device;
- int keycode1, keycode2, keycode3, keycode4;
+ int keycode1, keycode2, keycode3, keycode4, keycode5;
unsigned int max_brightness;
unsigned int brightness_changed;
@@ -823,6 +824,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
set_bit(fujitsu->keycode2, input->keybit);
set_bit(fujitsu->keycode3, input->keybit);
set_bit(fujitsu->keycode4, input->keybit);
+ set_bit(fujitsu->keycode5, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
error = input_register_device(input);
@@ -962,6 +964,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
case KEY4_CODE:
keycode = fujitsu->keycode4;
break;
+ case KEY5_CODE:
+ keycode = fujitsu->keycode5;
+ break;
case 0:
keycode = 0;
break;
@@ -1072,6 +1077,7 @@ static int __init fujitsu_init(void)
fujitsu->keycode2 = KEY_PROG2;
fujitsu->keycode3 = KEY_PROG3;
fujitsu->keycode4 = KEY_PROG4;
+ fujitsu->keycode5 = KEY_RFKILL;
dmi_check_system(fujitsu_dmi_table);
result = acpi_bus_register_driver(&acpi_fujitsu_driver);
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index fb4dd7b3ee71..6f145f2d004d 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -157,7 +157,6 @@ static struct platform_device *hp_wmi_platform_dev;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
-static struct rfkill *gps_rfkill;
struct rfkill2_device {
u8 id;
@@ -613,10 +612,6 @@ static void hp_wmi_notify(u32 value, void *context)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
- if (gps_rfkill)
- rfkill_set_states(gps_rfkill,
- hp_wmi_get_sw_state(HPWMI_GPS),
- hp_wmi_get_hw_state(HPWMI_GPS));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
@@ -746,7 +741,7 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_BLUETOOTH);
if (!bluetooth_rfkill) {
err = -ENOMEM;
- goto register_wifi_error;
+ goto register_bluetooth_error;
}
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
@@ -764,7 +759,7 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
- goto register_bluetooth_error;
+ goto register_wwan_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -775,35 +770,13 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
goto register_wwan_error;
}
- if (wireless & 0x8) {
- gps_rfkill = rfkill_alloc("hp-gps", &device->dev,
- RFKILL_TYPE_GPS,
- &hp_wmi_rfkill_ops,
- (void *) HPWMI_GPS);
- if (!gps_rfkill) {
- err = -ENOMEM;
- goto register_wwan_error;
- }
- rfkill_init_sw_state(gps_rfkill,
- hp_wmi_get_sw_state(HPWMI_GPS));
- rfkill_set_hw_state(gps_rfkill,
- hp_wmi_get_hw_state(HPWMI_GPS));
- err = rfkill_register(gps_rfkill);
- if (err)
- goto register_gps_error;
- }
-
return 0;
-register_gps_error:
- rfkill_destroy(gps_rfkill);
- gps_rfkill = NULL;
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
+
register_wwan_error:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
- if (gps_rfkill)
- rfkill_unregister(gps_rfkill);
+ if (bluetooth_rfkill)
+ rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
rfkill_destroy(bluetooth_rfkill);
bluetooth_rfkill = NULL;
@@ -907,7 +880,6 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
- gps_rfkill = NULL;
rfkill2_count = 0;
if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
@@ -960,10 +932,6 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
- if (gps_rfkill) {
- rfkill_unregister(gps_rfkill);
- rfkill_destroy(gps_rfkill);
- }
return 0;
}
@@ -999,10 +967,6 @@ static int hp_wmi_resume_handler(struct device *device)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
- if (gps_rfkill)
- rfkill_set_states(gps_rfkill,
- hp_wmi_get_sw_state(HPWMI_GPS),
- hp_wmi_get_hw_state(HPWMI_GPS));
return 0;
}
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
arg0.integer.value = reg;
status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
*ret = lret;
- return (status != AE_OK) ? -EINVAL : 0;
+ return 0;
}
/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
DEFINE_CONV(normal, 1, 2, 3);
DEFINE_CONV(y_inverted, 1, -2, 3);
DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
DEFINE_CONV(z_inverted, 1, 2, -3);
DEFINE_CONV(xy_swap, 2, 1, 3);
DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+ AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d78ee151c9e4..be3bc2f4edd4 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -865,6 +865,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo ideapad Y700-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
+ },
+ },
+ {
+ .ident = "Lenovo ideapad Y700 Touch-15ISK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
+ },
+ },
+ {
.ident = "Lenovo ideapad Y700-17ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index e20f23e04c24..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
}
static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+ .freeze = intel_hid_pl_suspend_handler,
+ .restore = intel_hid_pl_resume_handler,
.suspend = intel_hid_pl_suspend_handler,
.resume = intel_hid_pl_resume_handler,
};
@@ -180,8 +182,7 @@ static int intel_hid_probe(struct platform_device *device)
return -ENODEV;
}
- priv = devm_kzalloc(&device->dev,
- sizeof(struct intel_hid_priv *), GFP_KERNEL);
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 092519e37de6..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -67,7 +67,8 @@
/* exported resources from IFWI */
#define PLAT_RESOURCE_IPC_INDEX 0
#define PLAT_RESOURCE_IPC_SIZE 0x1000
-#define PLAT_RESOURCE_GCR_SIZE 0x1000
+#define PLAT_RESOURCE_GCR_OFFSET 0x1008
+#define PLAT_RESOURCE_GCR_SIZE 0x4
#define PLAT_RESOURCE_BIOS_DATA_INDEX 1
#define PLAT_RESOURCE_BIOS_IFACE_INDEX 2
#define PLAT_RESOURCE_TELEM_SSRAM_INDEX 3
@@ -686,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res);
- /* This is index 0 to cover BIOS data register */
punit_res = punit_res_array;
+ /* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX);
if (!res) {
@@ -697,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
*punit_res = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
+ /* This is index 1 to cover BIOS interface register */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_IFACE_INDEX);
if (!res) {
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO;
}
- /* This is index 1 to cover BIOS interface register */
*++punit_res = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
+ /* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
}
- /* This is index 2 to cover ISP data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
+ /* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
}
- /* This is index 3 to cover ISP interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
+ /* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
}
- /* This is index 4 to cover GTD data register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
+ /* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
- return -ENXIO;
+ ++punit_res;
+ if (res) {
+ *punit_res = *res;
+ dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
}
- /* This is index 5 to cover GTD interface register */
- *++punit_res = *res;
- dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_IPC_INDEX);
@@ -766,7 +763,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
}
ipcdev.ipc_base = addr;
- ipcdev.gcr_base = res->start + size;
+ ipcdev.gcr_base = res->start + PLAT_RESOURCE_GCR_OFFSET;
ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
dev_info(&pdev->dev, "ipc res: %pR\n", res);
@@ -824,7 +821,8 @@ static int ipc_plat_probe(struct platform_device *pdev)
goto err_device;
}
- if (request_irq(ipcdev.irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+ if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND,
+ "intel_pmc_ipc", &ipcdev)) {
dev_err(&pdev->dev, "Failed to request irq\n");
ret = -EBUSY;
goto err_irq;
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
struct resource *res;
void __iomem *addr;
+ /*
+ * The following resources are required
+ * - BIOS_IPC BASE_DATA
+ * - BIOS_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
return PTR_ERR(addr);
punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
+ /*
+ * The following resources are optional
+ * - ISPDRIVER_IPC BASE_DATA
+ * - ISPDRIVER_IPC BASE_IFACE
+ * - GTDRIVER_IPC BASE_DATA
+ * - GTDRIVER_IPC BASE_IFACE
+ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- addr = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(addr))
- return PTR_ERR(addr);
- punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ if (res) {
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (!IS_ERR(addr))
+ punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+ }
return 0;
}
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index f94b730540e2..e81daff65f62 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -24,7 +24,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sfi.h>
-#include <linux/module.h>
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
@@ -611,28 +610,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
}
-/**
- * ipc_remove - remove a bound IPC device
- * @pdev: PCI device
- *
- * In practice the SCU is not removable but this function is also
- * called for each device on a module unload or cleanup which is the
- * path that will get used.
- *
- * Free up the mappings and release the PCI resources
- */
-static void ipc_remove(struct pci_dev *pdev)
-{
- struct intel_scu_ipc_dev *scu = pci_get_drvdata(pdev);
-
- mutex_lock(&ipclock);
- scu->dev = NULL;
- mutex_unlock(&ipclock);
-
- iounmap(scu->i2c_base);
- intel_scu_devices_destroy();
-}
-
static const struct pci_device_id pci_ids[] = {
{
PCI_VDEVICE(INTEL, PCI_DEVICE_ID_LINCROFT),
@@ -650,17 +627,13 @@ static const struct pci_device_id pci_ids[] = {
0,
}
};
-MODULE_DEVICE_TABLE(pci, pci_ids);
static struct pci_driver ipc_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = "intel_scu_ipc",
.id_table = pci_ids,
.probe = ipc_probe,
- .remove = ipc_remove,
};
-
-module_pci_driver(ipc_driver);
-
-MODULE_AUTHOR("Sreedhara DS <sreedhara.ds@intel.com>");
-MODULE_DESCRIPTION("Intel SCU IPC driver");
-MODULE_LICENSE("GPL");
+builtin_pci_driver(ipc_driver);
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index f97019b0106f..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
{
u32 telem_ctrl = 0;
- int ret;
+ int ret = 0;
mutex_lock(&(telm_conf->telem_lock));
if (ioss_period) {
@@ -1030,8 +1030,19 @@ static int telemetry_plt_set_trace_verbosity(enum telemetry_unit telem_unit,
switch (telem_unit) {
case TELEM_PSS:
ret = intel_punit_ipc_command(
+ IPC_PUNIT_BIOS_READ_TELE_TRACE_CTRL,
+ 0, 0, NULL, &temp);
+ if (ret) {
+ pr_err("PSS TRACE_CTRL Read Failed\n");
+ goto out;
+ }
+
+ TELEM_CLEAR_VERBOSITY_BITS(temp);
+ TELEM_SET_VERBOSITY_BITS(temp, verbosity);
+
+ ret = intel_punit_ipc_command(
IPC_PUNIT_BIOS_WRITE_TELE_TRACE_CTRL,
- 0, 0, &verbosity, NULL);
+ 0, 0, &temp, NULL);
if (ret) {
pr_err("PSS TRACE_CTRL Verbosity Set Failed\n");
goto out;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a268a7abf8ab..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6653,18 +6653,16 @@ static void __init tpacpi_detect_brightness_capabilities(void)
switch (b) {
case 16:
bright_maxlvl = 15;
- pr_info("detected a 16-level brightness capable ThinkPad\n");
break;
case 8:
case 0:
bright_maxlvl = 7;
- pr_info("detected a 8-level brightness capable ThinkPad\n");
break;
default:
- pr_info("Unsupported brightness interface\n");
tp_features.bright_unkfw = 1;
bright_maxlvl = b - 1;
}
+ pr_debug("detected %u brightness levels\n", bright_maxlvl + 1);
}
static int __init brightness_init(struct ibm_init_struct *iibm)
@@ -7974,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
fan_update_desired_level(s);
mutex_unlock(&fan_mutex);
+ if (rc)
+ return rc;
if (status)
*status = s;
- return rc;
+ return 0;
}
static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 73833079bac8..df1f1a76a862 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
@@ -117,6 +118,7 @@ MODULE_LICENSE("GPL");
#define HCI_LCD_BRIGHTNESS 0x002a
#define HCI_WIRELESS 0x0056
#define HCI_ACCELEROMETER 0x006d
+#define HCI_COOLING_METHOD 0x007f
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
#define HCI_ACCELEROMETER2 0x00a6
@@ -186,6 +188,7 @@ struct toshiba_acpi_dev {
int usbsc_bat_level;
int usbsc_mode_base;
int hotkey_event_type;
+ int max_cooling_method;
unsigned int illumination_supported:1;
unsigned int video_supported:1;
@@ -205,6 +208,7 @@ struct toshiba_acpi_dev {
unsigned int panel_power_on_supported:1;
unsigned int usb_three_supported:1;
unsigned int wwan_supported:1;
+ unsigned int cooling_method_supported:1;
unsigned int sysfs_created:1;
unsigned int special_functions;
@@ -217,6 +221,10 @@ struct toshiba_acpi_dev {
static struct toshiba_acpi_dev *toshiba_acpi;
+static bool disable_hotkeys;
+module_param(disable_hotkeys, bool, 0444);
+MODULE_PARM_DESC(disable_hotkeys, "Disables the hotkeys activation");
+
static const struct acpi_device_id toshiba_device_ids[] = {
{"TOS6200", 0},
{"TOS6207", 0},
@@ -1194,6 +1202,53 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state)
return out[0] == TOS_SUCCESS ? 0 : -EIO;
}
+/* Cooling Method */
+static void toshiba_cooling_method_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { HCI_GET, HCI_COOLING_METHOD, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ dev->cooling_method_supported = 0;
+ dev->max_cooling_method = 0;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status))
+ pr_err("ACPI call to get Cooling Method failed\n");
+
+ if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+ return;
+
+ dev->cooling_method_supported = 1;
+ dev->max_cooling_method = out[3];
+}
+
+static int toshiba_cooling_method_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+ u32 result = hci_read(dev, HCI_COOLING_METHOD, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Cooling Method failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+ u32 result = hci_write(dev, HCI_COOLING_METHOD, state);
+
+ if (result == TOS_FAILURE)
+ pr_err("ACPI call to get Cooling Method failed\n");
+
+ if (result == TOS_NOT_SUPPORTED)
+ return -ENODEV;
+
+ return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
/* Transflective Backlight */
static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
{
@@ -2239,6 +2294,54 @@ static ssize_t usb_three_store(struct device *dev,
}
static DEVICE_ATTR_RW(usb_three);
+static ssize_t cooling_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = toshiba_cooling_method_get(toshiba, &state);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d %d\n", state, toshiba->max_cooling_method);
+}
+
+static ssize_t cooling_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+ int state;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check for supported values
+ * Depending on the laptop model, some only support these two:
+ * 0 - Maximum Performance
+ * 1 - Battery Optimized
+ *
+ * While some others support all three methods:
+ * 0 - Maximum Performance
+ * 1 - Performance
+ * 2 - Battery Optimized
+ */
+ if (state < 0 || state > toshiba->max_cooling_method)
+ return -EINVAL;
+
+ ret = toshiba_cooling_method_set(toshiba, state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(cooling_method);
+
static struct attribute *toshiba_attributes[] = {
&dev_attr_version.attr,
&dev_attr_fan.attr,
@@ -2255,6 +2358,7 @@ static struct attribute *toshiba_attributes[] = {
&dev_attr_kbd_function_keys.attr,
&dev_attr_panel_power_on.attr,
&dev_attr_usb_three.attr,
+ &dev_attr_cooling_method.attr,
NULL,
};
@@ -2289,6 +2393,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
exists = (drv->panel_power_on_supported) ? true : false;
else if (attr == &dev_attr_usb_three.attr)
exists = (drv->usb_three_supported) ? true : false;
+ else if (attr == &dev_attr_cooling_method.attr)
+ exists = (drv->cooling_method_supported) ? true : false;
return exists ? attr->mode : 0;
}
@@ -2591,6 +2697,11 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
acpi_handle ec_handle;
int error;
+ if (disable_hotkeys) {
+ pr_info("Hotkeys disabled by module parameter\n");
+ return 0;
+ }
+
if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) {
pr_info("WMI event detected, hotkeys will not be monitored\n");
return 0;
@@ -2779,6 +2890,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
pr_cont(" usb3");
if (dev->wwan_supported)
pr_cont(" wwan");
+ if (dev->cooling_method_supported)
+ pr_cont(" cooling-method");
pr_cont("\n");
}
@@ -2963,6 +3076,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
if (dev->wwan_supported)
toshiba_acpi_setup_wwan_rfkill(dev);
+ toshiba_cooling_method_available(dev);
+
print_supported_features(dev);
ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 05796495be0e..4b717c699313 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -252,6 +252,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
+ case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+ /* serial bus connections (I2C/SPI/UART) are not pnp */
+ break;
+
default:
dev_warn(&dev->dev, "unknown resource type %d in _CRS\n",
res->type);
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
index 297e72dc70e6..2b82e44d9027 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/88pm860x_charger.c
@@ -435,7 +435,7 @@ static irqreturn_t pm860x_temp_handler(int irq, void *data)
psy = power_supply_get_by_name(pm860x_supplied_to[0]);
if (!psy)
- goto out;
+ return IRQ_HANDLED;
ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &temp);
if (ret)
goto out;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 1ddd13cc0c07..421770ddafa3 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -75,6 +75,13 @@ config BATTERY_88PM860X
help
Say Y here to enable battery monitor for Marvell 88PM860x chip.
+config BATTERY_ACT8945A
+ tristate "Active-semi ACT8945A charger driver"
+ depends on MFD_ACT8945A || COMPILE_TEST
+ help
+ Say Y here to enable support for power supply provided by
+ Active-semi ActivePath ACT8945A charger.
+
config BATTERY_DS2760
tristate "DS2760 battery driver (HP iPAQ & others)"
depends on W1 && W1_SLAVE_DS2760
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 0e4eab55f8d7..e46b75d448a5 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
obj-$(CONFIG_TEST_POWER) += test_power.o
obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
+obj-$(CONFIG_BATTERY_ACT8945A) += act8945a_charger.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 8f8044e1acf3..bf2e5dd301e7 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -906,26 +906,21 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
- struct power_supply *ext;
+ struct power_supply *ext = dev_get_drvdata(dev);
+ const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_btemp *di;
union power_supply_propval ret;
- int i, j;
- bool psy_found = false;
+ int j;
psy = (struct power_supply *)data;
- ext = dev_get_drvdata(dev);
di = power_supply_get_drvdata(psy);
/*
* For all psy where the name of your driver
* appears in any supplied_to
*/
- for (i = 0; i < ext->num_supplicants; i++) {
- if (!strcmp(ext->supplied_to[i], psy->desc->name))
- psy_found = true;
- }
-
- if (!psy_found)
+ j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+ if (j < 0)
return 0;
/* Go through all properties for the psy */
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e388171f4e58..30de5d42b26a 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -1929,11 +1929,11 @@ static int ab8540_charger_usb_pre_chg_enable(struct ux500_charger *charger,
static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
- struct power_supply *ext;
+ struct power_supply *ext = dev_get_drvdata(dev);
+ const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_charger *di;
union power_supply_propval ret;
- int i, j;
- bool psy_found = false;
+ int j;
struct ux500_charger *usb_chg;
usb_chg = (struct ux500_charger *)data;
@@ -1941,15 +1941,9 @@ static int ab8500_charger_get_ext_psy_data(struct device *dev, void *data)
di = to_ab8500_charger_usb_device_info(usb_chg);
- ext = dev_get_drvdata(dev);
-
/* For all psy where the driver name appears in any supplied_to */
- for (i = 0; i < ext->num_supplicants; i++) {
- if (!strcmp(ext->supplied_to[i], psy->desc->name))
- psy_found = true;
- }
-
- if (!psy_found)
+ j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+ if (j < 0)
return 0;
/* Go through all properties for the psy */
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 3830dade5d69..5a36cf88578a 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2168,26 +2168,21 @@ static int ab8500_fg_get_property(struct power_supply *psy,
static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
- struct power_supply *ext;
+ struct power_supply *ext = dev_get_drvdata(dev);
+ const char **supplicants = (const char **)ext->supplied_to;
struct ab8500_fg *di;
union power_supply_propval ret;
- int i, j;
- bool psy_found = false;
+ int j;
psy = (struct power_supply *)data;
- ext = dev_get_drvdata(dev);
di = power_supply_get_drvdata(psy);
/*
* For all psy where the name of your driver
* appears in any supplied_to
*/
- for (i = 0; i < ext->num_supplicants; i++) {
- if (!strcmp(ext->supplied_to[i], psy->desc->name))
- psy_found = true;
- }
-
- if (!psy_found)
+ j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+ if (j < 0)
return 0;
/* Go through all properties for the psy */
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 541f702e0451..d9104b1ab7cf 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -975,22 +975,18 @@ static void handle_maxim_chg_curr(struct abx500_chargalg *di)
static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
{
struct power_supply *psy;
- struct power_supply *ext;
+ struct power_supply *ext = dev_get_drvdata(dev);
+ const char **supplicants = (const char **)ext->supplied_to;
struct abx500_chargalg *di;
union power_supply_propval ret;
- int i, j;
- bool psy_found = false;
+ int j;
bool capacity_updated = false;
psy = (struct power_supply *)data;
- ext = dev_get_drvdata(dev);
di = power_supply_get_drvdata(psy);
/* For all psy where the driver name appears in any supplied_to */
- for (i = 0; i < ext->num_supplicants; i++) {
- if (!strcmp(ext->supplied_to[i], psy->desc->name))
- psy_found = true;
- }
- if (!psy_found)
+ j = match_string(supplicants, ext->num_supplicants, psy->desc->name);
+ if (j < 0)
return 0;
/*
diff --git a/drivers/power/act8945a_charger.c b/drivers/power/act8945a_charger.c
new file mode 100644
index 000000000000..b5c00e45741e
--- /dev/null
+++ b/drivers/power/act8945a_charger.c
@@ -0,0 +1,359 @@
+/*
+ * Power supply driver for the Active-semi ACT8945A PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+
+static const char *act8945a_charger_model = "ACT8945A";
+static const char *act8945a_charger_manufacturer = "Active-semi";
+
+/**
+ * ACT8945A Charger Register Map
+ */
+
+/* 0x70: Reserved */
+#define ACT8945A_APCH_CFG 0x71
+#define ACT8945A_APCH_STATUS 0x78
+#define ACT8945A_APCH_CTRL 0x79
+#define ACT8945A_APCH_STATE 0x7A
+
+/* ACT8945A_APCH_CFG */
+#define APCH_CFG_OVPSET (0x3 << 0)
+#define APCH_CFG_OVPSET_6V6 (0x0 << 0)
+#define APCH_CFG_OVPSET_7V (0x1 << 0)
+#define APCH_CFG_OVPSET_7V5 (0x2 << 0)
+#define APCH_CFG_OVPSET_8V (0x3 << 0)
+#define APCH_CFG_PRETIMO (0x3 << 2)
+#define APCH_CFG_PRETIMO_40_MIN (0x0 << 2)
+#define APCH_CFG_PRETIMO_60_MIN (0x1 << 2)
+#define APCH_CFG_PRETIMO_80_MIN (0x2 << 2)
+#define APCH_CFG_PRETIMO_DISABLED (0x3 << 2)
+#define APCH_CFG_TOTTIMO (0x3 << 4)
+#define APCH_CFG_TOTTIMO_3_HOUR (0x0 << 4)
+#define APCH_CFG_TOTTIMO_4_HOUR (0x1 << 4)
+#define APCH_CFG_TOTTIMO_5_HOUR (0x2 << 4)
+#define APCH_CFG_TOTTIMO_DISABLED (0x3 << 4)
+#define APCH_CFG_SUSCHG (0x1 << 7)
+
+#define APCH_STATUS_CHGDAT BIT(0)
+#define APCH_STATUS_INDAT BIT(1)
+#define APCH_STATUS_TEMPDAT BIT(2)
+#define APCH_STATUS_TIMRDAT BIT(3)
+#define APCH_STATUS_CHGSTAT BIT(4)
+#define APCH_STATUS_INSTAT BIT(5)
+#define APCH_STATUS_TEMPSTAT BIT(6)
+#define APCH_STATUS_TIMRSTAT BIT(7)
+
+#define APCH_CTRL_CHGEOCOUT BIT(0)
+#define APCH_CTRL_INDIS BIT(1)
+#define APCH_CTRL_TEMPOUT BIT(2)
+#define APCH_CTRL_TIMRPRE BIT(3)
+#define APCH_CTRL_CHGEOCIN BIT(4)
+#define APCH_CTRL_INCON BIT(5)
+#define APCH_CTRL_TEMPIN BIT(6)
+#define APCH_CTRL_TIMRTOT BIT(7)
+
+#define APCH_STATE_ACINSTAT (0x1 << 1)
+#define APCH_STATE_CSTATE (0x3 << 4)
+#define APCH_STATE_CSTATE_SHIFT 4
+#define APCH_STATE_CSTATE_DISABLED 0x00
+#define APCH_STATE_CSTATE_EOC 0x01
+#define APCH_STATE_CSTATE_FAST 0x02
+#define APCH_STATE_CSTATE_PRE 0x03
+
+struct act8945a_charger {
+ struct regmap *regmap;
+ bool battery_temperature;
+};
+
+static int act8945a_get_charger_state(struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status, state;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ if (state == APCH_STATE_CSTATE_EOC) {
+ if (status & APCH_STATUS_CHGDAT)
+ *val = POWER_SUPPLY_STATUS_FULL;
+ else
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else if ((state == APCH_STATE_CSTATE_FAST) ||
+ (state == APCH_STATE_CSTATE_PRE)) {
+ *val = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ *val = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ }
+
+ return 0;
+}
+
+static int act8945a_get_charge_type(struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int state;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATE, &state);
+ if (ret < 0)
+ return ret;
+
+ state &= APCH_STATE_CSTATE;
+ state >>= APCH_STATE_CSTATE_SHIFT;
+
+ switch (state) {
+ case APCH_STATE_CSTATE_PRE:
+ *val = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case APCH_STATE_CSTATE_FAST:
+ *val = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ break;
+ case APCH_STATE_CSTATE_EOC:
+ case APCH_STATE_CSTATE_DISABLED:
+ default:
+ *val = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ }
+
+ return 0;
+}
+
+static int act8945a_get_battery_health(struct act8945a_charger *charger,
+ struct regmap *regmap, int *val)
+{
+ int ret;
+ unsigned int status;
+
+ ret = regmap_read(regmap, ACT8945A_APCH_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ if (charger->battery_temperature && !(status & APCH_STATUS_TEMPDAT))
+ *val = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (!(status & APCH_STATUS_INDAT))
+ *val = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (status & APCH_STATUS_TIMRDAT)
+ *val = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE;
+ else
+ *val = POWER_SUPPLY_HEALTH_GOOD;
+
+ return 0;
+}
+
+static enum power_supply_property act8945a_charger_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER
+};
+
+static int act8945a_charger_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct act8945a_charger *charger = power_supply_get_drvdata(psy);
+ struct regmap *regmap = charger->regmap;
+ int ret = 0;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = act8945a_get_charger_state(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ ret = act8945a_get_charge_type(regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = act8945a_get_battery_health(charger,
+ regmap, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = act8945a_charger_model;
+ break;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = act8945a_charger_manufacturer;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct power_supply_desc act8945a_charger_desc = {
+ .name = "act8945a-charger",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .get_property = act8945a_charger_get_property,
+ .properties = act8945a_charger_props,
+ .num_properties = ARRAY_SIZE(act8945a_charger_props),
+};
+
+#define DEFAULT_TOTAL_TIME_OUT 3
+#define DEFAULT_PRE_TIME_OUT 40
+#define DEFAULT_INPUT_OVP_THRESHOLD 6600
+
+static int act8945a_charger_config(struct device *dev,
+ struct act8945a_charger *charger)
+{
+ struct device_node *np = dev->of_node;
+ enum of_gpio_flags flags;
+ struct regmap *regmap = charger->regmap;
+
+ u32 total_time_out;
+ u32 pre_time_out;
+ u32 input_voltage_threshold;
+ int chglev_pin;
+
+ unsigned int value = 0;
+
+ if (!np) {
+ dev_err(dev, "no charger of node\n");
+ return -EINVAL;
+ }
+
+ charger->battery_temperature = of_property_read_bool(np,
+ "active-semi,check-battery-temperature");
+
+ chglev_pin = of_get_named_gpio_flags(np,
+ "active-semi,chglev-gpios", 0, &flags);
+
+ if (gpio_is_valid(chglev_pin)) {
+ gpio_set_value(chglev_pin,
+ ((flags == OF_GPIO_ACTIVE_LOW) ? 0 : 1));
+ }
+
+ if (of_property_read_u32(np,
+ "active-semi,input-voltage-threshold-microvolt",
+ &input_voltage_threshold))
+ input_voltage_threshold = DEFAULT_INPUT_OVP_THRESHOLD;
+
+ if (of_property_read_u32(np,
+ "active-semi,precondition-timeout",
+ &pre_time_out))
+ pre_time_out = DEFAULT_PRE_TIME_OUT;
+
+ if (of_property_read_u32(np, "active-semi,total-timeout",
+ &total_time_out))
+ total_time_out = DEFAULT_TOTAL_TIME_OUT;
+
+ switch (input_voltage_threshold) {
+ case 8000:
+ value |= APCH_CFG_OVPSET_8V;
+ break;
+ case 7500:
+ value |= APCH_CFG_OVPSET_7V5;
+ break;
+ case 7000:
+ value |= APCH_CFG_OVPSET_7V;
+ break;
+ case 6600:
+ default:
+ value |= APCH_CFG_OVPSET_6V6;
+ break;
+ }
+
+ switch (pre_time_out) {
+ case 60:
+ value |= APCH_CFG_PRETIMO_60_MIN;
+ break;
+ case 80:
+ value |= APCH_CFG_PRETIMO_80_MIN;
+ break;
+ case 0:
+ value |= APCH_CFG_PRETIMO_DISABLED;
+ break;
+ case 40:
+ default:
+ value |= APCH_CFG_PRETIMO_40_MIN;
+ break;
+ }
+
+ switch (total_time_out) {
+ case 4:
+ value |= APCH_CFG_TOTTIMO_4_HOUR;
+ break;
+ case 5:
+ value |= APCH_CFG_TOTTIMO_5_HOUR;
+ break;
+ case 0:
+ value |= APCH_CFG_TOTTIMO_DISABLED;
+ break;
+ case 3:
+ default:
+ value |= APCH_CFG_TOTTIMO_3_HOUR;
+ break;
+ }
+
+ return regmap_write(regmap, ACT8945A_APCH_CFG, value);
+}
+
+static int act8945a_charger_probe(struct platform_device *pdev)
+{
+ struct act8945a_charger *charger;
+ struct power_supply *psy;
+ struct power_supply_config psy_cfg = {};
+ int ret;
+
+ charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
+ if (!charger)
+ return -ENOMEM;
+
+ charger->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!charger->regmap) {
+ dev_err(&pdev->dev, "Parent did not provide regmap\n");
+ return -EINVAL;
+ }
+
+ ret = act8945a_charger_config(pdev->dev.parent, charger);
+ if (ret)
+ return ret;
+
+ psy_cfg.of_node = pdev->dev.parent->of_node;
+ psy_cfg.drv_data = charger;
+
+ psy = devm_power_supply_register(&pdev->dev,
+ &act8945a_charger_desc,
+ &psy_cfg);
+ if (IS_ERR(psy)) {
+ dev_err(&pdev->dev, "failed to register power supply\n");
+ return PTR_ERR(psy);
+ }
+
+ return 0;
+}
+
+static struct platform_driver act8945a_charger_driver = {
+ .driver = {
+ .name = "act8945a-charger",
+ },
+ .probe = act8945a_charger_probe,
+};
+module_platform_driver(act8945a_charger_driver);
+
+MODULE_DESCRIPTION("Active-semi ACT8945A ActivePath charger driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 80994566a1c8..8986382718dd 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -47,6 +47,10 @@
#define RK3368_SOC_CON15_FLASH0 BIT(14)
#define RK3368_SOC_FLASH_SUPPLY_NUM 2
+#define RK3399_PMUGRF_CON0 0x180
+#define RK3399_PMUGRF_CON0_VSEL BIT(8)
+#define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9
+
struct rockchip_iodomain;
/**
@@ -181,6 +185,25 @@ static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
}
+static void rk3399_pmu_iodomain_init(struct rockchip_iodomain *iod)
+{
+ int ret;
+ u32 val;
+
+ /* if no pmu io supply we should leave things alone */
+ if (!iod->supplies[RK3399_PMUGRF_VSEL_SUPPLY_NUM].reg)
+ return;
+
+ /*
+ * set pmu io iodomain to also use this framework
+ * instead of a special gpio.
+ */
+ val = RK3399_PMUGRF_CON0_VSEL | (RK3399_PMUGRF_CON0_VSEL << 16);
+ ret = regmap_write(iod->grf, RK3399_PMUGRF_CON0, val);
+ if (ret < 0)
+ dev_warn(iod->dev, "couldn't update pmu io iodomain ctrl\n");
+}
+
/*
* On the rk3188 the io-domains are handled by a shared register with the
* lower 8 bits being still being continuing drive-strength settings.
@@ -252,6 +275,33 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
},
};
+static const struct rockchip_iodomain_soc_data soc_data_rk3399 = {
+ .grf_offset = 0xe640,
+ .supply_names = {
+ "bt656", /* APIO2_VDD */
+ "audio", /* APIO5_VDD */
+ "sdmmc", /* SDMMC0_VDD */
+ "gpio1830", /* APIO4_VDD */
+ },
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
+ .grf_offset = 0x180,
+ .supply_names = {
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "pmu1830", /* PMUIO2_VDD */
+ },
+ .init = rk3399_pmu_iodomain_init,
+};
+
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
@@ -269,6 +319,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
.data = (void *)&soc_data_rk3368_pmu
},
+ {
+ .compatible = "rockchip,rk3399-io-voltage-domain",
+ .data = (void *)&soc_data_rk3399
+ },
+ {
+ .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
+ .data = (void *)&soc_data_rk3399_pmu
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index 27e89536689a..73e2f0b79dd4 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1759,6 +1759,7 @@ static const struct i2c_device_id bq2415x_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, bq2415x_i2c_id_table);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id bq2415x_i2c_acpi_match[] = {
{ "BQ2415X", BQUNKNOWN },
{ "BQ241500", BQ24150 },
@@ -1776,10 +1777,31 @@ static const struct acpi_device_id bq2415x_i2c_acpi_match[] = {
{},
};
MODULE_DEVICE_TABLE(acpi, bq2415x_i2c_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id bq2415x_of_match_table[] = {
+ { .compatible = "ti,bq24150" },
+ { .compatible = "ti,bq24150a" },
+ { .compatible = "ti,bq24151" },
+ { .compatible = "ti,bq24151a" },
+ { .compatible = "ti,bq24152" },
+ { .compatible = "ti,bq24153" },
+ { .compatible = "ti,bq24153a" },
+ { .compatible = "ti,bq24155" },
+ { .compatible = "ti,bq24156" },
+ { .compatible = "ti,bq24156a" },
+ { .compatible = "ti,bq24157s" },
+ { .compatible = "ti,bq24158" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bq2415x_of_match_table);
+#endif
static struct i2c_driver bq2415x_driver = {
.driver = {
.name = "bq2415x-charger",
+ .of_match_table = of_match_ptr(bq2415x_of_match_table),
.acpi_match_table = ACPI_PTR(bq2415x_i2c_acpi_match),
},
.probe = bq2415x_probe,
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c
index eb2b3689de97..fa454c19ce17 100644
--- a/drivers/power/bq24735-charger.c
+++ b/drivers/power/bq24735-charger.c
@@ -48,6 +48,8 @@ struct bq24735 {
struct power_supply_desc charger_desc;
struct i2c_client *client;
struct bq24735_platform *pdata;
+ struct mutex lock;
+ bool charging;
};
static inline struct bq24735 *to_bq24735(struct power_supply *psy)
@@ -56,9 +58,23 @@ static inline struct bq24735 *to_bq24735(struct power_supply *psy)
}
static enum power_supply_property bq24735_charger_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
};
+static int bq24735_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static inline int bq24735_write_word(struct i2c_client *client, u8 reg,
u16 value)
{
@@ -90,6 +106,9 @@ static int bq24735_update_word(struct i2c_client *client, u8 reg,
static inline int bq24735_enable_charging(struct bq24735 *charger)
{
+ if (charger->pdata->ext_control)
+ return 0;
+
return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
BQ24735_CHG_OPT_CHARGE_DISABLE,
~BQ24735_CHG_OPT_CHARGE_DISABLE);
@@ -97,6 +116,9 @@ static inline int bq24735_enable_charging(struct bq24735 *charger)
static inline int bq24735_disable_charging(struct bq24735 *charger)
{
+ if (charger->pdata->ext_control)
+ return 0;
+
return bq24735_update_word(charger->client, BQ24735_CHG_OPT,
BQ24735_CHG_OPT_CHARGE_DISABLE,
BQ24735_CHG_OPT_CHARGE_DISABLE);
@@ -108,6 +130,9 @@ static int bq24735_config_charger(struct bq24735 *charger)
int ret;
u16 value;
+ if (pdata->ext_control)
+ return 0;
+
if (pdata->charge_current) {
value = pdata->charge_current & BQ24735_CHARGE_CURRENT_MASK;
@@ -174,16 +199,30 @@ static bool bq24735_charger_is_present(struct bq24735 *charger)
return false;
}
+static int bq24735_charger_is_charging(struct bq24735 *charger)
+{
+ int ret = bq24735_read_word(charger->client, BQ24735_CHG_OPT);
+
+ if (ret < 0)
+ return ret;
+
+ return !(ret & BQ24735_CHG_OPT_CHARGE_DISABLE);
+}
+
static irqreturn_t bq24735_charger_isr(int irq, void *devid)
{
struct power_supply *psy = devid;
struct bq24735 *charger = to_bq24735(psy);
- if (bq24735_charger_is_present(charger))
+ mutex_lock(&charger->lock);
+
+ if (charger->charging && bq24735_charger_is_present(charger))
bq24735_enable_charging(charger);
else
bq24735_disable_charging(charger);
+ mutex_unlock(&charger->lock);
+
power_supply_changed(psy);
return IRQ_HANDLED;
@@ -199,6 +238,19 @@ static int bq24735_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
val->intval = bq24735_charger_is_present(charger) ? 1 : 0;
break;
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (bq24735_charger_is_charging(charger)) {
+ case 1:
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 0:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
+ break;
default:
return -EINVAL;
}
@@ -206,6 +258,46 @@ static int bq24735_charger_get_property(struct power_supply *psy,
return 0;
}
+static int bq24735_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct bq24735 *charger = to_bq24735(psy);
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ switch (val->intval) {
+ case POWER_SUPPLY_STATUS_CHARGING:
+ mutex_lock(&charger->lock);
+ charger->charging = true;
+ ret = bq24735_enable_charging(charger);
+ mutex_unlock(&charger->lock);
+ if (ret)
+ return ret;
+ bq24735_config_charger(charger);
+ break;
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ case POWER_SUPPLY_STATUS_NOT_CHARGING:
+ mutex_lock(&charger->lock);
+ charger->charging = false;
+ ret = bq24735_disable_charging(charger);
+ mutex_unlock(&charger->lock);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ power_supply_changed(psy);
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
{
struct bq24735_platform *pdata;
@@ -239,6 +331,8 @@ static struct bq24735_platform *bq24735_parse_dt_data(struct i2c_client *client)
if (!ret)
pdata->input_current = val;
+ pdata->ext_control = of_property_read_bool(np, "ti,external-control");
+
return pdata;
}
@@ -255,6 +349,8 @@ static int bq24735_charger_probe(struct i2c_client *client,
if (!charger)
return -ENOMEM;
+ mutex_init(&charger->lock);
+ charger->charging = true;
charger->pdata = client->dev.platform_data;
if (IS_ENABLED(CONFIG_OF) && !charger->pdata && client->dev.of_node)
@@ -285,6 +381,9 @@ static int bq24735_charger_probe(struct i2c_client *client,
supply_desc->properties = bq24735_charger_properties;
supply_desc->num_properties = ARRAY_SIZE(bq24735_charger_properties);
supply_desc->get_property = bq24735_charger_get_property;
+ supply_desc->set_property = bq24735_charger_set_property;
+ supply_desc->property_is_writeable =
+ bq24735_charger_property_is_writeable;
psy_cfg.supplied_to = charger->pdata->supplied_to;
psy_cfg.num_supplicants = charger->pdata->num_supplicants;
@@ -293,27 +392,6 @@ static int bq24735_charger_probe(struct i2c_client *client,
i2c_set_clientdata(client, charger);
- ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
- ret);
- return ret;
- } else if (ret != 0x0040) {
- dev_err(&client->dev,
- "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
- return -ENODEV;
- }
-
- ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to read device id : %d\n", ret);
- return ret;
- } else if (ret != 0x000B) {
- dev_err(&client->dev,
- "device id mismatch. 0x000b != 0x%04x\n", ret);
- return -ENODEV;
- }
-
if (gpio_is_valid(charger->pdata->status_gpio)) {
ret = devm_gpio_request(&client->dev,
charger->pdata->status_gpio,
@@ -327,6 +405,30 @@ static int bq24735_charger_probe(struct i2c_client *client,
charger->pdata->status_gpio_valid = !ret;
}
+ if (!charger->pdata->status_gpio_valid
+ || bq24735_charger_is_present(charger)) {
+ ret = bq24735_read_word(client, BQ24735_MANUFACTURER_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
+ ret);
+ return ret;
+ } else if (ret != 0x0040) {
+ dev_err(&client->dev,
+ "manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
+ return -ENODEV;
+ }
+
+ ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read device id : %d\n", ret);
+ return ret;
+ } else if (ret != 0x000B) {
+ dev_err(&client->dev,
+ "device id mismatch. 0x000b != 0x%04x\n", ret);
+ return -ENODEV;
+ }
+ }
+
ret = bq24735_config_charger(charger);
if (ret < 0) {
dev_err(&client->dev, "failed in configuring charger");
diff --git a/drivers/power/bq27xxx_battery.c b/drivers/power/bq27xxx_battery.c
index 6b027a418943..45f6ebf88df6 100644
--- a/drivers/power/bq27xxx_battery.c
+++ b/drivers/power/bq27xxx_battery.c
@@ -46,6 +46,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/power/bq27xxx_battery.h>
@@ -1090,16 +1091,27 @@ static const struct platform_device_id bq27xxx_battery_platform_id_table[] = {
};
MODULE_DEVICE_TABLE(platform, bq27xxx_battery_platform_id_table);
+#ifdef CONFIG_OF
+static const struct of_device_id bq27xxx_battery_platform_of_match_table[] = {
+ { .compatible = "ti,bq27000" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bq27xxx_battery_platform_of_match_table);
+#endif
+
static struct platform_driver bq27xxx_battery_platform_driver = {
.probe = bq27xxx_battery_platform_probe,
.remove = bq27xxx_battery_platform_remove,
.driver = {
.name = "bq27000-battery",
+ .of_match_table = of_match_ptr(bq27xxx_battery_platform_of_match_table),
},
.id_table = bq27xxx_battery_platform_id_table,
};
module_platform_driver(bq27xxx_battery_platform_driver);
+MODULE_ALIAS("platform:bq27000-battery");
+
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("BQ27xxx battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 8eafc6f0df88..b8f8d3ade31b 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -166,9 +166,33 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table);
+#ifdef CONFIG_OF
+static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
+ { .compatible = "ti,bq27200" },
+ { .compatible = "ti,bq27210" },
+ { .compatible = "ti,bq27500" },
+ { .compatible = "ti,bq27510" },
+ { .compatible = "ti,bq27520" },
+ { .compatible = "ti,bq27530" },
+ { .compatible = "ti,bq27531" },
+ { .compatible = "ti,bq27541" },
+ { .compatible = "ti,bq27542" },
+ { .compatible = "ti,bq27546" },
+ { .compatible = "ti,bq27742" },
+ { .compatible = "ti,bq27545" },
+ { .compatible = "ti,bq27421" },
+ { .compatible = "ti,bq27425" },
+ { .compatible = "ti,bq27441" },
+ { .compatible = "ti,bq27621" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bq27xxx_battery_i2c_of_match_table);
+#endif
+
static struct i2c_driver bq27xxx_battery_i2c_driver = {
.driver = {
.name = "bq27xxx-battery",
+ .of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
},
.probe = bq27xxx_battery_i2c_probe,
.remove = bq27xxx_battery_i2c_remove,
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 1ea5d1aa268b..e664ca7c0afd 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -2020,27 +2020,6 @@ static void __exit charger_manager_cleanup(void)
module_exit(charger_manager_cleanup);
/**
- * find_power_supply - find the associated power_supply of charger
- * @cm: the Charger Manager representing the battery
- * @psy: pointer to instance of charger's power_supply
- */
-static bool find_power_supply(struct charger_manager *cm,
- struct power_supply *psy)
-{
- int i;
- bool found = false;
-
- for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
- if (!strcmp(psy->desc->name, cm->desc->psy_charger_stat[i])) {
- found = true;
- break;
- }
- }
-
- return found;
-}
-
-/**
* cm_notify_event - charger driver notify Charger Manager of charger event
* @psy: pointer to instance of charger's power_supply
* @type: type of charger event
@@ -2057,9 +2036,11 @@ void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
- found_power_supply = find_power_supply(cm, psy);
- if (found_power_supply)
+ if (match_string(cm->desc->psy_charger_stat, -1,
+ psy->desc->name) >= 0) {
+ found_power_supply = true;
break;
+ }
}
mutex_unlock(&cm_list_mtx);
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 8a971b3dbe58..3a0bc608d4b5 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -26,7 +26,6 @@
static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
static struct work_struct bat_work;
static struct ucb1x00 *ucb;
-static int wakeup_enabled;
struct collie_bat {
int status;
@@ -291,6 +290,8 @@ static struct gpio collie_batt_gpios[] = {
};
#ifdef CONFIG_PM
+static int wakeup_enabled;
+
static int collie_bat_suspend(struct ucb1x00_dev *dev)
{
/* flush all pending status updates */
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
index a50bb988c69a..f5c525e4482a 100644
--- a/drivers/power/goldfish_battery.c
+++ b/drivers/power/goldfish_battery.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/acpi.h>
struct goldfish_battery_data {
void __iomem *reg_base;
@@ -227,11 +228,25 @@ static int goldfish_battery_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_battery_of_match[] = {
+ { .compatible = "google,goldfish-battery", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_battery_of_match);
+
+static const struct acpi_device_id goldfish_battery_acpi_match[] = {
+ { "GFSH0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
+
static struct platform_driver goldfish_battery_device = {
.probe = goldfish_battery_probe,
.remove = goldfish_battery_remove,
.driver = {
- .name = "goldfish-battery"
+ .name = "goldfish-battery",
+ .of_match_table = goldfish_battery_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_battery_acpi_match),
}
};
module_platform_driver(goldfish_battery_device);
diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
index f03014ea1dc4..3f314b1a30d7 100644
--- a/drivers/power/ipaq_micro_battery.c
+++ b/drivers/power/ipaq_micro_battery.c
@@ -281,7 +281,7 @@ static int micro_batt_remove(struct platform_device *pdev)
return 0;
}
-static int micro_batt_suspend(struct device *dev)
+static int __maybe_unused micro_batt_suspend(struct device *dev)
{
struct micro_battery *mb = dev_get_drvdata(dev);
@@ -289,7 +289,7 @@ static int micro_batt_suspend(struct device *dev)
return 0;
}
-static int micro_batt_resume(struct device *dev)
+static int __maybe_unused micro_batt_resume(struct device *dev)
{
struct micro_battery *mb = dev_get_drvdata(dev);
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 46a292aa182d..4cd6899b961e 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -411,8 +411,10 @@ static int isp1704_charger_probe(struct platform_device *pdev)
if (np) {
int gpio = of_get_named_gpio(np, "nxp,enable-gpio", 0);
- if (gpio < 0)
+ if (gpio < 0) {
+ dev_err(&pdev->dev, "missing DT GPIO nxp,enable-gpio\n");
return gpio;
+ }
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct isp1704_charger_data), GFP_KERNEL);
@@ -422,8 +424,10 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = devm_gpio_request_one(&pdev->dev, pdata->enable_gpio,
GPIOF_OUT_INIT_HIGH, "isp1704_reset");
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "gpio request failed\n");
goto fail0;
+ }
}
if (!pdata) {
@@ -443,6 +447,7 @@ static int isp1704_charger_probe(struct platform_device *pdev)
if (IS_ERR(isp->phy)) {
ret = PTR_ERR(isp->phy);
+ dev_err(&pdev->dev, "usb_get_phy failed\n");
goto fail0;
}
@@ -452,8 +457,10 @@ static int isp1704_charger_probe(struct platform_device *pdev)
isp1704_charger_set_power(isp, 1);
ret = isp1704_test_ulpi(isp);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(&pdev->dev, "isp1704_test_ulpi failed\n");
goto fail1;
+ }
isp->psy_desc.name = "isp1704";
isp->psy_desc.type = POWER_SUPPLY_TYPE_USB;
@@ -466,6 +473,7 @@ static int isp1704_charger_probe(struct platform_device *pdev)
isp->psy = power_supply_register(isp->dev, &isp->psy_desc, &psy_cfg);
if (IS_ERR(isp->psy)) {
ret = PTR_ERR(isp->psy);
+ dev_err(&pdev->dev, "power_supply_register failed\n");
goto fail1;
}
@@ -478,8 +486,10 @@ static int isp1704_charger_probe(struct platform_device *pdev)
isp->nb.notifier_call = isp1704_notifier_call;
ret = usb_register_notifier(isp->phy, &isp->nb);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "usb_register_notifier failed\n");
goto fail2;
+ }
dev_info(isp->dev, "registered with product id %s\n", isp->model);
@@ -526,6 +536,7 @@ static int isp1704_charger_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id omap_isp1704_of_match[] = {
{ .compatible = "nxp,isp1704", },
+ { .compatible = "nxp,isp1707", },
{},
};
MODULE_DEVICE_TABLE(of, omap_isp1704_of_match);
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index abdfc21ec13f..88f04f4d1a70 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -208,7 +208,7 @@ static void jz_battery_update(struct jz_battery *jz_battery)
}
voltage = jz_battery_read_voltage(jz_battery);
- if (abs(voltage - jz_battery->voltage) < 50000) {
+ if (voltage >= 0 && abs(voltage - jz_battery->voltage) > 50000) {
jz_battery->voltage = voltage;
has_changed = true;
}
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index f5a48fd68b01..7321b727d484 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -455,7 +455,7 @@ static void lp8788_charger_event(struct work_struct *work)
static bool lp8788_find_irq_id(struct lp8788_charger *pchg, int virq, int *id)
{
- bool found;
+ bool found = false;
int i;
for (i = 0; i < pchg->num_irqs; i++) {
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index 8f9bd1d0eeb6..fb62ed3fc38c 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -911,11 +911,7 @@ static struct pm2xxx_irq pm2xxx_charger_irq[] = {
{"PM2XXX_IRQ_INT", pm2xxx_irq_int},
};
-#ifdef CONFIG_PM
-
-#ifdef CONFIG_PM_SLEEP
-
-static int pm2xxx_wall_charger_resume(struct device *dev)
+static int __maybe_unused pm2xxx_wall_charger_resume(struct device *dev)
{
struct i2c_client *i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
@@ -931,7 +927,7 @@ static int pm2xxx_wall_charger_resume(struct device *dev)
return 0;
}
-static int pm2xxx_wall_charger_suspend(struct device *dev)
+static int __maybe_unused pm2xxx_wall_charger_suspend(struct device *dev)
{
struct i2c_client *i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
@@ -949,9 +945,7 @@ static int pm2xxx_wall_charger_suspend(struct device *dev)
return 0;
}
-#endif
-
-static int pm2xxx_runtime_suspend(struct device *dev)
+static int __maybe_unused pm2xxx_runtime_suspend(struct device *dev)
{
struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
@@ -962,7 +956,7 @@ static int pm2xxx_runtime_suspend(struct device *dev)
return 0;
}
-static int pm2xxx_runtime_resume(struct device *dev)
+static int __maybe_unused pm2xxx_runtime_resume(struct device *dev)
{
struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
struct pm2xxx_charger *pm2;
@@ -975,15 +969,11 @@ static int pm2xxx_runtime_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops pm2xxx_pm_ops = {
+static const struct dev_pm_ops pm2xxx_pm_ops __maybe_unused = {
SET_SYSTEM_SLEEP_PM_OPS(pm2xxx_wall_charger_suspend,
pm2xxx_wall_charger_resume)
SET_RUNTIME_PM_OPS(pm2xxx_runtime_suspend, pm2xxx_runtime_resume, NULL)
};
-#define PM2XXX_PM_OPS (&pm2xxx_pm_ops)
-#else
-#define PM2XXX_PM_OPS NULL
-#endif
static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
@@ -1244,7 +1234,7 @@ static struct i2c_driver pm2xxx_charger_driver = {
.remove = pm2xxx_wall_charger_remove,
.driver = {
.name = "pm2xxx-wall_charger",
- .pm = PM2XXX_PM_OPS,
+ .pm = IS_ENABLED(CONFIG_PM) ? &pm2xxx_pm_ops : NULL,
},
.id_table = pm2xxx_id,
};
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index ed2d7fd0c734..80fed98832f9 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -45,7 +45,8 @@ static ssize_t power_supply_show_property(struct device *dev,
char *buf) {
static char *type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB",
- "USB_DCP", "USB_CDP", "USB_ACA"
+ "USB_DCP", "USB_CDP", "USB_ACA", "USB_C",
+ "USB_PD", "USB_PD_DRP"
};
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 1131cf75acc6..0a6408a39c66 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -148,6 +148,7 @@ config POWER_RESET_KEYSTONE
config POWER_RESET_SYSCON
bool "Generic SYSCON regmap reset driver"
depends on OF
+ depends on HAS_IOMEM
select MFD_SYSCON
help
Reboot support for generic SYSCON mapped register reset.
@@ -155,6 +156,7 @@ config POWER_RESET_SYSCON
config POWER_RESET_SYSCON_POWEROFF
bool "Generic SYSCON regmap poweroff driver"
depends on OF
+ depends on HAS_IOMEM
select MFD_SYSCON
help
Poweroff support for generic SYSCON mapped register poweroff.
diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c
index b208073c887d..06d34ab47df5 100644
--- a/drivers/power/reset/arm-versatile-reboot.c
+++ b/drivers/power/reset/arm-versatile-reboot.c
@@ -18,8 +18,8 @@
#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
#define INTEGRATOR_CM_CTRL_RESET (1 << 3)
-#define REALVIEW_SYS_LOCK_OFFSET 0x20
-#define REALVIEW_SYS_RESETCTL_OFFSET 0x40
+#define VERSATILE_SYS_LOCK_OFFSET 0x20
+#define VERSATILE_SYS_RESETCTL_OFFSET 0x40
/* Magic unlocking token used on all Versatile boards */
#define VERSATILE_LOCK_VAL 0xA05F
@@ -29,6 +29,7 @@
*/
enum versatile_reboot {
INTEGRATOR_REBOOT_CM,
+ VERSATILE_REBOOT_CM,
REALVIEW_REBOOT_EB,
REALVIEW_REBOOT_PB1176,
REALVIEW_REBOOT_PB11MP,
@@ -46,6 +47,10 @@ static const struct of_device_id versatile_reboot_of_match[] = {
.data = (void *)INTEGRATOR_REBOOT_CM
},
{
+ .compatible = "arm,core-module-versatile",
+ .data = (void *)VERSATILE_REBOOT_CM,
+ },
+ {
.compatible = "arm,realview-eb-syscon",
.data = (void *)REALVIEW_REBOOT_EB,
},
@@ -82,33 +87,43 @@ static int versatile_reboot(struct notifier_block *this, unsigned long mode,
INTEGRATOR_CM_CTRL_RESET,
INTEGRATOR_CM_CTRL_RESET);
break;
+ case VERSATILE_REBOOT_CM:
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
+ VERSATILE_LOCK_VAL);
+ regmap_update_bits(syscon_regmap,
+ VERSATILE_SYS_RESETCTL_OFFSET,
+ 0x0107,
+ 0x0105);
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
+ 0);
+ break;
case REALVIEW_REBOOT_EB:
- regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap,
- REALVIEW_SYS_RESETCTL_OFFSET, 0x0008);
+ VERSATILE_SYS_RESETCTL_OFFSET, 0x0008);
break;
case REALVIEW_REBOOT_PB1176:
- regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
regmap_write(syscon_regmap,
- REALVIEW_SYS_RESETCTL_OFFSET, 0x0100);
+ VERSATILE_SYS_RESETCTL_OFFSET, 0x0100);
break;
case REALVIEW_REBOOT_PB11MP:
case REALVIEW_REBOOT_PBA8:
- regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
- regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_RESETCTL_OFFSET,
0x0000);
- regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_RESETCTL_OFFSET,
0x0004);
break;
case REALVIEW_REBOOT_PBX:
- regmap_write(syscon_regmap, REALVIEW_SYS_LOCK_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_LOCK_OFFSET,
VERSATILE_LOCK_VAL);
- regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_RESETCTL_OFFSET,
0x00f0);
- regmap_write(syscon_regmap, REALVIEW_SYS_RESETCTL_OFFSET,
+ regmap_write(syscon_regmap, VERSATILE_SYS_RESETCTL_OFFSET,
0x00f4);
break;
}
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 6c592dc71aee..8fad0a7044d3 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -133,6 +133,12 @@ struct rapl_domain_data {
unsigned long timestamp;
};
+struct msrl_action {
+ u32 msr_no;
+ u64 clear_mask;
+ u64 set_mask;
+ int err;
+};
#define DOMAIN_STATE_INACTIVE BIT(0)
#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
@@ -149,6 +155,7 @@ struct rapl_power_limit {
static const char pl1_name[] = "long_term";
static const char pl2_name[] = "short_term";
+struct rapl_package;
struct rapl_domain {
const char *name;
enum rapl_domain_type id;
@@ -159,7 +166,7 @@ struct rapl_domain {
u64 attr_map; /* track capabilities */
unsigned int state;
unsigned int domain_energy_unit;
- int package_id;
+ struct rapl_package *rp;
};
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
@@ -184,6 +191,7 @@ struct rapl_package {
* notify interrupt enable status.
*/
struct list_head plist;
+ int lead_cpu; /* one active cpu per package for access */
};
struct rapl_defaults {
@@ -231,10 +239,10 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value);
-static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
+static u64 rapl_unit_xlate(struct rapl_domain *rd,
enum unit_type type, u64 value,
int to_raw);
-static void package_power_limit_irq_save(int package_id);
+static void package_power_limit_irq_save(struct rapl_package *rp);
static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
@@ -260,20 +268,6 @@ static struct rapl_package *find_package_by_id(int id)
return NULL;
}
-/* caller to ensure CPU hotplug lock is held */
-static int find_active_cpu_on_package(int package_id)
-{
- int i;
-
- for_each_online_cpu(i) {
- if (topology_physical_package_id(i) == package_id)
- return i;
- }
- /* all CPUs on this package are offline */
-
- return -ENODEV;
-}
-
/* caller must hold cpu hotplug lock */
static void rapl_cleanup_data(void)
{
@@ -312,25 +306,19 @@ static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
- *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+ *energy = rapl_unit_xlate(rd, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
return 0;
}
static int release_zone(struct powercap_zone *power_zone)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
- struct rapl_package *rp;
+ struct rapl_package *rp = rd->rp;
/* package zone is the last zone of a package, we can free
* memory here since all children has been unregistered.
*/
if (rd->id == RAPL_DOMAIN_PACKAGE) {
- rp = find_package_by_id(rd->package_id);
- if (!rp) {
- dev_warn(&power_zone->dev, "no package id %s\n",
- rd->name);
- return -ENODEV;
- }
kfree(rd);
rp->domains = NULL;
}
@@ -432,11 +420,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int id,
get_online_cpus();
rd = power_zone_to_rapl_domain(power_zone);
- rp = find_package_by_id(rd->package_id);
- if (!rp) {
- ret = -ENODEV;
- goto set_exit;
- }
+ rp = rd->rp;
if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
@@ -456,7 +440,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int id,
ret = -EINVAL;
}
if (!ret)
- package_power_limit_irq_save(rd->package_id);
+ package_power_limit_irq_save(rp);
set_exit:
put_online_cpus();
return ret;
@@ -655,24 +639,19 @@ static void rapl_init_domains(struct rapl_package *rp)
break;
}
if (mask) {
- rd->package_id = rp->id;
+ rd->rp = rp;
rd++;
}
}
}
-static u64 rapl_unit_xlate(struct rapl_domain *rd, int package,
- enum unit_type type, u64 value,
- int to_raw)
+static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
+ u64 value, int to_raw)
{
u64 units = 1;
- struct rapl_package *rp;
+ struct rapl_package *rp = rd->rp;
u64 scale = 1;
- rp = find_package_by_id(package);
- if (!rp)
- return value;
-
switch (type) {
case POWER_UNIT:
units = rp->power_unit;
@@ -769,10 +748,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
msr = rd->msrs[rp->id];
if (!msr)
return -EINVAL;
- /* use physical package id to look up active cpus */
- cpu = find_active_cpu_on_package(rd->package_id);
- if (cpu < 0)
- return cpu;
+
+ cpu = rd->rp->lead_cpu;
/* special-case package domain, which uses a different bit*/
if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
@@ -793,42 +770,66 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
final = value & rp->mask;
final = final >> rp->shift;
if (xlate)
- *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0);
+ *data = rapl_unit_xlate(rd, rp->unit, final, 0);
else
*data = final;
return 0;
}
+
+static int msrl_update_safe(u32 msr_no, u64 clear_mask, u64 set_mask)
+{
+ int err;
+ u64 val;
+
+ err = rdmsrl_safe(msr_no, &val);
+ if (err)
+ goto out;
+
+ val &= ~clear_mask;
+ val |= set_mask;
+
+ err = wrmsrl_safe(msr_no, val);
+
+out:
+ return err;
+}
+
+static void msrl_update_func(void *info)
+{
+ struct msrl_action *ma = info;
+
+ ma->err = msrl_update_safe(ma->msr_no, ma->clear_mask, ma->set_mask);
+}
+
/* Similar use of primitive info in the read counterpart */
static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value)
{
- u64 msr_val;
- u32 msr;
struct rapl_primitive_info *rp = &rpi[prim];
int cpu;
+ u64 bits;
+ struct msrl_action ma;
+ int ret;
- cpu = find_active_cpu_on_package(rd->package_id);
- if (cpu < 0)
- return cpu;
- msr = rd->msrs[rp->id];
- if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
- dev_dbg(&rd->power_zone.dev,
- "failed to read msr 0x%x on cpu %d\n", msr, cpu);
- return -EIO;
- }
- value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
- msr_val &= ~rp->mask;
- msr_val |= value << rp->shift;
- if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
- dev_dbg(&rd->power_zone.dev,
- "failed to write msr 0x%x on cpu %d\n", msr, cpu);
- return -EIO;
- }
+ cpu = rd->rp->lead_cpu;
+ bits = rapl_unit_xlate(rd, rp->unit, value, 1);
+ bits |= bits << rp->shift;
+ memset(&ma, 0, sizeof(ma));
- return 0;
+ ma.msr_no = rd->msrs[rp->id];
+ ma.clear_mask = rp->mask;
+ ma.set_mask = bits;
+
+ ret = smp_call_function_single(cpu, msrl_update_func, &ma, 1);
+ if (ret)
+ WARN_ON_ONCE(ret);
+ else
+ ret = ma.err;
+
+ return ret;
}
/*
@@ -893,6 +894,21 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
return 0;
}
+static void power_limit_irq_save_cpu(void *info)
+{
+ u32 l, h = 0;
+ struct rapl_package *rp = (struct rapl_package *)info;
+
+ /* save the state of PLN irq mask bit before disabling it */
+ rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+ if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+ rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+ rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+ }
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+ wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
/* REVISIT:
* When package power limit is set artificially low by RAPL, LVT
@@ -904,61 +920,40 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
* to do by adding an atomic notifier.
*/
-static void package_power_limit_irq_save(int package_id)
+static void package_power_limit_irq_save(struct rapl_package *rp)
{
- u32 l, h = 0;
- int cpu;
- struct rapl_package *rp;
-
- rp = find_package_by_id(package_id);
- if (!rp)
- return;
-
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
- cpu = find_active_cpu_on_package(package_id);
- if (cpu < 0)
- return;
- /* save the state of PLN irq mask bit before disabling it */
- rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
- if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
- rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
- rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
- }
- l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
- wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
}
-/* restore per package power limit interrupt enable state */
-static void package_power_limit_irq_restore(int package_id)
+static void power_limit_irq_restore_cpu(void *info)
{
- u32 l, h;
- int cpu;
- struct rapl_package *rp;
+ u32 l, h = 0;
+ struct rapl_package *rp = (struct rapl_package *)info;
- rp = find_package_by_id(package_id);
- if (!rp)
- return;
+ rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
- if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
- return;
+ if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+ l |= PACKAGE_THERM_INT_PLN_ENABLE;
+ else
+ l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+ wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
- cpu = find_active_cpu_on_package(package_id);
- if (cpu < 0)
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(struct rapl_package *rp)
+{
+ if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
/* irq enable state not saved, nothing to restore */
if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
return;
- rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
-
- if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
- l |= PACKAGE_THERM_INT_PLN_ENABLE;
- else
- l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
- wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+ smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1);
}
static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
@@ -1096,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
@@ -1141,7 +1137,7 @@ static int rapl_unregister_powercap(void)
* hotplug lock held
*/
list_for_each_entry(rp, &rapl_packages, plist) {
- package_power_limit_irq_restore(rp->id);
+ package_power_limit_irq_restore(rp);
for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
rd++) {
@@ -1392,7 +1388,8 @@ static int rapl_detect_topology(void)
/* add the new package to the list */
new_package->id = phy_package_id;
new_package->nr_cpus = 1;
-
+ /* use the first active cpu of the package to access */
+ new_package->lead_cpu = i;
/* check if the package contains valid domains */
if (rapl_detect_domains(new_package, i) ||
rapl_defaults->check_unit(new_package, i)) {
@@ -1448,6 +1445,8 @@ static int rapl_add_package(int cpu)
/* add the new package to the list */
rp->id = phy_package_id;
rp->nr_cpus = 1;
+ rp->lead_cpu = cpu;
+
/* check if the package contains valid domains */
if (rapl_detect_domains(rp, cpu) ||
rapl_defaults->check_unit(rp, cpu)) {
@@ -1480,6 +1479,7 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
unsigned long cpu = (unsigned long)hcpu;
int phy_package_id;
struct rapl_package *rp;
+ int lead_cpu;
phy_package_id = topology_physical_package_id(cpu);
switch (action) {
@@ -1500,6 +1500,15 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
break;
if (--rp->nr_cpus == 0)
rapl_remove_package(rp);
+ else if (cpu == rp->lead_cpu) {
+ /* choose another active cpu in the package */
+ lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+ if (lead_cpu < nr_cpu_ids)
+ rp->lead_cpu = lead_cpu;
+ else /* should never go here */
+ pr_err("no active cpu available for package %d\n",
+ phy_package_id);
+ }
}
return NOTIFY_OK;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index da7bae991552..579fd65299a0 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -22,6 +22,7 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include "ptp_private.h"
@@ -120,11 +121,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
struct ptp_clock_caps caps;
struct ptp_clock_request req;
struct ptp_sys_offset *sysoff = NULL;
+ struct ptp_sys_offset_precise precise_offset;
struct ptp_pin_desc pd;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops = ptp->info;
struct ptp_clock_time *pct;
struct timespec64 ts;
+ struct system_device_crosststamp xtstamp;
int enable, err = 0;
unsigned int i, pin_index;
@@ -138,6 +141,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
caps.n_per_out = ptp->info->n_per_out;
caps.pps = ptp->info->pps;
caps.n_pins = ptp->info->n_pins;
+ caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
err = -EFAULT;
break;
@@ -180,6 +184,29 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = ops->enable(ops, &req, enable);
break;
+ case PTP_SYS_OFFSET_PRECISE:
+ if (!ptp->info->getcrosststamp) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+ err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
+ if (err)
+ break;
+
+ ts = ktime_to_timespec64(xtstamp.device);
+ precise_offset.device.sec = ts.tv_sec;
+ precise_offset.device.nsec = ts.tv_nsec;
+ ts = ktime_to_timespec64(xtstamp.sys_realtime);
+ precise_offset.sys_realtime.sec = ts.tv_sec;
+ precise_offset.sys_realtime.nsec = ts.tv_nsec;
+ ts = ktime_to_timespec64(xtstamp.sys_monoraw);
+ precise_offset.sys_monoraw.sec = ts.tv_sec;
+ precise_offset.sys_monoraw.nsec = ts.tv_nsec;
+ if (copy_to_user((void __user *)arg, &precise_offset,
+ sizeof(precise_offset)))
+ err = -EFAULT;
+ break;
+
case PTP_SYS_OFFSET:
sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
if (!sysoff) {
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 8cf0dae78555..c182efc62c7b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -316,7 +316,7 @@ config PWM_RCAR
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
help
This driver exposes the Timer Pulse Unit (TPU) PWM controller found
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 423ce087cd9c..5d5adee16886 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p->base = devm_ioremap_resource(&pdev->dev, res);
- if (!p->base) {
- ret = -ENOMEM;
+ if (IS_ERR(p->base)) {
+ ret = PTR_ERR(p->base);
goto out_clk;
}
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 7225ac6b3df5..fad968eb75f6 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -392,7 +392,7 @@ static const struct regmap_config fsl_pwm_regmap_config = {
.max_register = FTM_PWMLOAD,
.volatile_reg = fsl_pwm_volatile_reg,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_FLAT,
};
static int fsl_pwm_probe(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 8a029f9bc18c..2fb30deee345 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -237,6 +237,11 @@ static int img_pwm_probe(struct platform_device *pdev)
}
clk_rate = clk_get_rate(pwm->pwm_clk);
+ if (!clk_rate) {
+ dev_err(&pdev->dev, "pwm clock has no frequency\n");
+ ret = -EINVAL;
+ goto disable_pwmclk;
+ }
/* The maximum input clock divider is 512 */
val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 9163085101bc..9861fed4e67d 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -360,6 +360,11 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
}
lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
+ if (!lpc18xx_pwm->clk_rate) {
+ dev_err(&pdev->dev, "pwm clock has no frequency\n");
+ ret = -EINVAL;
+ goto disable_pwmclk;
+ }
mutex_init(&lpc18xx_pwm->res_lock);
mutex_init(&lpc18xx_pwm->period_lock);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 826634ec0d5c..b7e6ecba7d5c 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -31,6 +31,7 @@
#include <linux/time.h>
#define DM_TIMER_LOAD_MIN 0xfffffffe
+#define DM_TIMER_MAX 0xffffffff
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
@@ -46,13 +47,9 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
}
-static int pwm_omap_dmtimer_calc_value(unsigned long clk_rate, int ns)
+static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
{
- u64 c = (u64)clk_rate * ns;
-
- do_div(c, NSEC_PER_SEC);
-
- return DM_TIMER_LOAD_MIN - c;
+ return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
}
static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
@@ -99,12 +96,14 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
int duty_ns, int period_ns)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
- int load_value, match_value;
+ u32 period_cycles, duty_cycles;
+ u32 load_value, match_value;
struct clk *fclk;
unsigned long clk_rate;
bool timer_active;
- dev_dbg(chip->dev, "duty cycle: %d, period %d\n", duty_ns, period_ns);
+ dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
+ duty_ns, period_ns);
mutex_lock(&omap->mutex);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
@@ -117,15 +116,13 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
dev_err(chip->dev, "invalid pmtimer fclk\n");
- mutex_unlock(&omap->mutex);
- return -EINVAL;
+ goto err_einval;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
dev_err(chip->dev, "invalid pmtimer fclk rate\n");
- mutex_unlock(&omap->mutex);
- return -EINVAL;
+ goto err_einval;
}
dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
@@ -133,11 +130,51 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
/*
* Calculate the appropriate load and match values based on the
* specified period and duty cycle. The load value determines the
- * cycle time and the match value determines the duty cycle.
+ * period time and the match value determines the duty time.
+ *
+ * The period lasts for (DM_TIMER_MAX-load_value+1) clock cycles.
+ * Similarly, the active time lasts (match_value-load_value+1) cycles.
+ * The non-active time is the remainder: (DM_TIMER_MAX-match_value)
+ * clock cycles.
+ *
+ * NOTE: It is required that: load_value <= match_value < DM_TIMER_MAX
+ *
+ * References:
+ * OMAP4430/60/70 TRM sections 22.2.4.10 and 22.2.4.11
+ * AM335x Sitara TRM sections 20.1.3.5 and 20.1.3.6
*/
- load_value = pwm_omap_dmtimer_calc_value(clk_rate, period_ns);
- match_value = pwm_omap_dmtimer_calc_value(clk_rate,
- period_ns - duty_ns);
+ period_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, period_ns);
+ duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
+
+ if (period_cycles < 2) {
+ dev_info(chip->dev,
+ "period %d ns too short for clock rate %lu Hz\n",
+ period_ns, clk_rate);
+ goto err_einval;
+ }
+
+ if (duty_cycles < 1) {
+ dev_dbg(chip->dev,
+ "duty cycle %d ns is too short for clock rate %lu Hz\n",
+ duty_ns, clk_rate);
+ dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
+ duty_cycles = 1;
+ } else if (duty_cycles >= period_cycles) {
+ dev_dbg(chip->dev,
+ "duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
+ duty_ns, period_ns, clk_rate);
+ dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
+ duty_cycles = period_cycles - 1;
+ }
+
+ dev_dbg(chip->dev, "effective duty cycle: %lld ns, period: %lld ns\n",
+ DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * duty_cycles,
+ clk_rate),
+ DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * period_cycles,
+ clk_rate));
+
+ load_value = (DM_TIMER_MAX - period_cycles) + 1;
+ match_value = load_value + duty_cycles - 1;
/*
* We MUST stop the associated dual-mode timer before attempting to
@@ -166,6 +203,11 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
mutex_unlock(&omap->mutex);
return 0;
+
+err_einval:
+ mutex_unlock(&omap->mutex);
+
+ return -EINVAL;
}
static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index 3e3be57e9a1a..b5a10d3c92c7 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -67,6 +67,14 @@ config RAPIDIO_ENUM_BASIC
endchoice
+config RAPIDIO_MPORT_CDEV
+ tristate "RapidIO /dev mport device driver"
+ depends on RAPIDIO
+ help
+ This option includes generic RapidIO mport device driver which
+ allows to user space applications to perform RapidIO-specific
+ operations through selected RapidIO mport.
+
menu "RapidIO Switch drivers"
depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 9432c494cf57..927dbf89592b 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o
tsi721_mport-y := tsi721.o
tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o
+obj-$(CONFIG_RAPIDIO_MPORT_CDEV) += rio_mport_cdev.o
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
new file mode 100644
index 000000000000..5d4d91846357
--- /dev/null
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -0,0 +1,2720 @@
+/*
+ * RapidIO mport character device
+ *
+ * Copyright 2014-2015 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ * Copyright 2014-2015 Prodrive Technologies
+ * Andre van Herk <andre.van.herk@prodrive-technologies.com>
+ * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Aurelien Jacquiot <a-jacquiot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/cdev.h>
+#include <linux/ioctl.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/kfifo.h>
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+
+#include <linux/dma-mapping.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
+
+#include <linux/rio.h>
+#include <linux/rio_ids.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_mport_cdev.h>
+
+#include "../rio.h"
+
+#define DRV_NAME "rio_mport"
+#define DRV_PREFIX DRV_NAME ": "
+#define DEV_NAME "rio_mport"
+#define DRV_VERSION "1.0.0"
+
+/* Debug output filtering masks */
+enum {
+ DBG_NONE = 0,
+ DBG_INIT = BIT(0), /* driver init */
+ DBG_EXIT = BIT(1), /* driver exit */
+ DBG_MPORT = BIT(2), /* mport add/remove */
+ DBG_RDEV = BIT(3), /* RapidIO device add/remove */
+ DBG_DMA = BIT(4), /* DMA transfer messages */
+ DBG_MMAP = BIT(5), /* mapping messages */
+ DBG_IBW = BIT(6), /* inbound window */
+ DBG_EVENT = BIT(7), /* event handling messages */
+ DBG_OBW = BIT(8), /* outbound window messages */
+ DBG_DBELL = BIT(9), /* doorbell messages */
+ DBG_ALL = ~0,
+};
+
+#ifdef DEBUG
+#define rmcd_debug(level, fmt, arg...) \
+ do { \
+ if (DBG_##level & dbg_level) \
+ pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
+ } while (0)
+#else
+#define rmcd_debug(level, fmt, arg...) \
+ no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
+#endif
+
+#define rmcd_warn(fmt, arg...) \
+ pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
+
+#define rmcd_error(fmt, arg...) \
+ pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
+
+MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
+MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
+MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
+MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
+MODULE_DESCRIPTION("RapidIO mport character device driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int dma_timeout = 3000; /* DMA transfer timeout in msec */
+module_param(dma_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
+
+#ifdef DEBUG
+static u32 dbg_level = DBG_NONE;
+module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+/*
+ * An internal DMA coherent buffer
+ */
+struct mport_dma_buf {
+ void *ib_base;
+ dma_addr_t ib_phys;
+ u32 ib_size;
+ u64 ib_rio_base;
+ bool ib_map;
+ struct file *filp;
+};
+
+/*
+ * Internal memory mapping structure
+ */
+enum rio_mport_map_dir {
+ MAP_INBOUND,
+ MAP_OUTBOUND,
+ MAP_DMA,
+};
+
+struct rio_mport_mapping {
+ struct list_head node;
+ struct mport_dev *md;
+ enum rio_mport_map_dir dir;
+ u32 rioid;
+ u64 rio_addr;
+ dma_addr_t phys_addr; /* for mmap */
+ void *virt_addr; /* kernel address, for dma_free_coherent */
+ u64 size;
+ struct kref ref; /* refcount of vmas sharing the mapping */
+ struct file *filp;
+};
+
+struct rio_mport_dma_map {
+ int valid;
+ uint64_t length;
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+#define MPORT_MAX_DMA_BUFS 16
+#define MPORT_EVENT_DEPTH 10
+
+/*
+ * mport_dev driver-specific structure that represents mport device
+ * @active mport device status flag
+ * @node list node to maintain list of registered mports
+ * @cdev character device
+ * @dev associated device object
+ * @mport associated subsystem's master port device object
+ * @buf_mutex lock for buffer handling
+ * @file_mutex - lock for open files list
+ * @file_list - list of open files on given mport
+ * @properties properties of this mport
+ * @portwrites queue of inbound portwrites
+ * @pw_lock lock for port write queue
+ * @mappings queue for memory mappings
+ * @dma_chan DMA channels associated with this device
+ * @dma_ref:
+ * @comp:
+ */
+struct mport_dev {
+ atomic_t active;
+ struct list_head node;
+ struct cdev cdev;
+ struct device dev;
+ struct rio_mport *mport;
+ struct mutex buf_mutex;
+ struct mutex file_mutex;
+ struct list_head file_list;
+ struct rio_mport_properties properties;
+ struct list_head doorbells;
+ spinlock_t db_lock;
+ struct list_head portwrites;
+ spinlock_t pw_lock;
+ struct list_head mappings;
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ struct dma_chan *dma_chan;
+ struct kref dma_ref;
+ struct completion comp;
+#endif
+};
+
+/*
+ * mport_cdev_priv - data structure specific to individual file object
+ * associated with an open device
+ * @md master port character device object
+ * @async_queue - asynchronous notification queue
+ * @list - file objects tracking list
+ * @db_filters inbound doorbell filters for this descriptor
+ * @pw_filters portwrite filters for this descriptor
+ * @event_fifo event fifo for this descriptor
+ * @event_rx_wait wait queue for this descriptor
+ * @fifo_lock lock for event_fifo
+ * @event_mask event mask for this descriptor
+ * @dmach DMA engine channel allocated for specific file object
+ */
+struct mport_cdev_priv {
+ struct mport_dev *md;
+ struct fasync_struct *async_queue;
+ struct list_head list;
+ struct list_head db_filters;
+ struct list_head pw_filters;
+ struct kfifo event_fifo;
+ wait_queue_head_t event_rx_wait;
+ spinlock_t fifo_lock;
+ unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ struct dma_chan *dmach;
+ struct list_head async_list;
+ struct list_head pend_list;
+ spinlock_t req_lock;
+ struct mutex dma_lock;
+ struct kref dma_ref;
+ struct completion comp;
+#endif
+};
+
+/*
+ * rio_mport_pw_filter - structure to describe a portwrite filter
+ * md_node node in mport device's list
+ * priv_node node in private file object's list
+ * priv reference to private data
+ * filter actual portwrite filter
+ */
+struct rio_mport_pw_filter {
+ struct list_head md_node;
+ struct list_head priv_node;
+ struct mport_cdev_priv *priv;
+ struct rio_pw_filter filter;
+};
+
+/*
+ * rio_mport_db_filter - structure to describe a doorbell filter
+ * @data_node reference to device node
+ * @priv_node node in private data
+ * @priv reference to private data
+ * @filter actual doorbell filter
+ */
+struct rio_mport_db_filter {
+ struct list_head data_node;
+ struct list_head priv_node;
+ struct mport_cdev_priv *priv;
+ struct rio_doorbell_filter filter;
+};
+
+static LIST_HEAD(mport_devs);
+static DEFINE_MUTEX(mport_devs_lock);
+
+#if (0) /* used by commented out portion of poll function : FIXME */
+static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
+#endif
+
+static struct class *dev_class;
+static dev_t dev_number;
+
+static struct workqueue_struct *dma_wq;
+
+static void mport_release_mapping(struct kref *ref);
+
+static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
+ int local)
+{
+ struct rio_mport *mport = priv->md->mport;
+ struct rio_mport_maint_io maint_io;
+ u32 *buffer;
+ u32 offset;
+ size_t length;
+ int ret, i;
+
+ if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
+ return -EFAULT;
+
+ if ((maint_io.offset % 4) ||
+ (maint_io.length == 0) || (maint_io.length % 4))
+ return -EINVAL;
+
+ buffer = vmalloc(maint_io.length);
+ if (buffer == NULL)
+ return -ENOMEM;
+ length = maint_io.length/sizeof(u32);
+ offset = maint_io.offset;
+
+ for (i = 0; i < length; i++) {
+ if (local)
+ ret = __rio_local_read_config_32(mport,
+ offset, &buffer[i]);
+ else
+ ret = rio_mport_read_config_32(mport, maint_io.rioid,
+ maint_io.hopcount, offset, &buffer[i]);
+ if (ret)
+ goto out;
+
+ offset += 4;
+ }
+
+ if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+ ret = -EFAULT;
+out:
+ vfree(buffer);
+ return ret;
+}
+
+static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
+ int local)
+{
+ struct rio_mport *mport = priv->md->mport;
+ struct rio_mport_maint_io maint_io;
+ u32 *buffer;
+ u32 offset;
+ size_t length;
+ int ret = -EINVAL, i;
+
+ if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
+ return -EFAULT;
+
+ if ((maint_io.offset % 4) ||
+ (maint_io.length == 0) || (maint_io.length % 4))
+ return -EINVAL;
+
+ buffer = vmalloc(maint_io.length);
+ if (buffer == NULL)
+ return -ENOMEM;
+ length = maint_io.length;
+
+ if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ offset = maint_io.offset;
+ length /= sizeof(u32);
+
+ for (i = 0; i < length; i++) {
+ if (local)
+ ret = __rio_local_write_config_32(mport,
+ offset, buffer[i]);
+ else
+ ret = rio_mport_write_config_32(mport, maint_io.rioid,
+ maint_io.hopcount,
+ offset, buffer[i]);
+ if (ret)
+ goto out;
+
+ offset += 4;
+ }
+
+out:
+ vfree(buffer);
+ return ret;
+}
+
+
+/*
+ * Inbound/outbound memory mapping functions
+ */
+static int
+rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
+ u32 rioid, u64 raddr, u32 size,
+ dma_addr_t *paddr)
+{
+ struct rio_mport *mport = md->mport;
+ struct rio_mport_mapping *map;
+ int ret;
+
+ rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
+
+ map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
+ if (ret < 0)
+ goto err_map_outb;
+
+ map->dir = MAP_OUTBOUND;
+ map->rioid = rioid;
+ map->rio_addr = raddr;
+ map->size = size;
+ map->phys_addr = *paddr;
+ map->filp = filp;
+ map->md = md;
+ kref_init(&map->ref);
+ list_add_tail(&map->node, &md->mappings);
+ return 0;
+err_map_outb:
+ kfree(map);
+ return ret;
+}
+
+static int
+rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
+ u32 rioid, u64 raddr, u32 size,
+ dma_addr_t *paddr)
+{
+ struct rio_mport_mapping *map;
+ int err = -ENOMEM;
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry(map, &md->mappings, node) {
+ if (map->dir != MAP_OUTBOUND)
+ continue;
+ if (rioid == map->rioid &&
+ raddr == map->rio_addr && size == map->size) {
+ *paddr = map->phys_addr;
+ err = 0;
+ break;
+ } else if (rioid == map->rioid &&
+ raddr < (map->rio_addr + map->size - 1) &&
+ (raddr + size) > map->rio_addr) {
+ err = -EBUSY;
+ break;
+ }
+ }
+
+ /* If not found, create new */
+ if (err == -ENOMEM)
+ err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
+ size, paddr);
+ mutex_unlock(&md->buf_mutex);
+ return err;
+}
+
+static int rio_mport_obw_map(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *data = priv->md;
+ struct rio_mmap map;
+ dma_addr_t paddr;
+ int ret;
+
+ if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+ return -EFAULT;
+
+ rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
+ map.rioid, map.rio_addr, map.length);
+
+ ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
+ map.rio_addr, map.length, &paddr);
+ if (ret < 0) {
+ rmcd_error("Failed to set OBW err= %d", ret);
+ return ret;
+ }
+
+ map.handle = paddr;
+
+ if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
+ *
+ * @priv: driver private data
+ * @arg: buffer handle returned by allocation routine
+ */
+static int rio_mport_obw_free(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md = priv->md;
+ u64 handle;
+ struct rio_mport_mapping *map, *_map;
+
+ if (!md->mport->ops->unmap_outb)
+ return -EPROTONOSUPPORT;
+
+ if (copy_from_user(&handle, arg, sizeof(u64)))
+ return -EFAULT;
+
+ rmcd_debug(OBW, "h=0x%llx", handle);
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry_safe(map, _map, &md->mappings, node) {
+ if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
+ if (map->filp == filp) {
+ rmcd_debug(OBW, "kref_put h=0x%llx", handle);
+ map->filp = NULL;
+ kref_put(&map->ref, mport_release_mapping);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ return 0;
+}
+
+/*
+ * maint_hdid_set() - Set the host Device ID
+ * @priv: driver private data
+ * @arg: Device Id
+ */
+static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ uint16_t hdid;
+
+ if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+ return -EFAULT;
+
+ md->mport->host_deviceid = hdid;
+ md->properties.hdid = hdid;
+ rio_local_set_device_id(md->mport, hdid);
+
+ rmcd_debug(MPORT, "Set host device Id to %d", hdid);
+
+ return 0;
+}
+
+/*
+ * maint_comptag_set() - Set the host Component Tag
+ * @priv: driver private data
+ * @arg: Component Tag
+ */
+static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ uint32_t comptag;
+
+ if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+ return -EFAULT;
+
+ rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
+
+ rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
+
+ return 0;
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct mport_dma_req {
+ struct list_head node;
+ struct file *filp;
+ struct mport_cdev_priv *priv;
+ enum rio_transfer_sync sync;
+ struct sg_table sgt;
+ struct page **page_list;
+ unsigned int nr_pages;
+ struct rio_mport_mapping *map;
+ struct dma_chan *dmach;
+ enum dma_data_direction dir;
+ dma_cookie_t cookie;
+ enum dma_status status;
+ struct completion req_comp;
+};
+
+struct mport_faf_work {
+ struct work_struct work;
+ struct mport_dma_req *req;
+};
+
+static void mport_release_def_dma(struct kref *dma_ref)
+{
+ struct mport_dev *md =
+ container_of(dma_ref, struct mport_dev, dma_ref);
+
+ rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
+ rio_release_dma(md->dma_chan);
+ md->dma_chan = NULL;
+}
+
+static void mport_release_dma(struct kref *dma_ref)
+{
+ struct mport_cdev_priv *priv =
+ container_of(dma_ref, struct mport_cdev_priv, dma_ref);
+
+ rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
+ complete(&priv->comp);
+}
+
+static void dma_req_free(struct mport_dma_req *req)
+{
+ struct mport_cdev_priv *priv = req->priv;
+ unsigned int i;
+
+ dma_unmap_sg(req->dmach->device->dev,
+ req->sgt.sgl, req->sgt.nents, req->dir);
+ sg_free_table(&req->sgt);
+ if (req->page_list) {
+ for (i = 0; i < req->nr_pages; i++)
+ put_page(req->page_list[i]);
+ kfree(req->page_list);
+ }
+
+ if (req->map) {
+ mutex_lock(&req->map->md->buf_mutex);
+ kref_put(&req->map->ref, mport_release_mapping);
+ mutex_unlock(&req->map->md->buf_mutex);
+ }
+
+ kref_put(&priv->dma_ref, mport_release_dma);
+
+ kfree(req);
+}
+
+static void dma_xfer_callback(void *param)
+{
+ struct mport_dma_req *req = (struct mport_dma_req *)param;
+ struct mport_cdev_priv *priv = req->priv;
+
+ req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
+ NULL, NULL);
+ complete(&req->req_comp);
+}
+
+static void dma_faf_cleanup(struct work_struct *_work)
+{
+ struct mport_faf_work *work = container_of(_work,
+ struct mport_faf_work, work);
+ struct mport_dma_req *req = work->req;
+
+ dma_req_free(req);
+ kfree(work);
+}
+
+static void dma_faf_callback(void *param)
+{
+ struct mport_dma_req *req = (struct mport_dma_req *)param;
+ struct mport_faf_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, dma_faf_cleanup);
+ work->req = req;
+ queue_work(dma_wq, &work->work);
+}
+
+/*
+ * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
+ * transfer object.
+ * Returns pointer to DMA transaction descriptor allocated by DMA driver on
+ * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
+ * non-NULL pointer using IS_ERR macro.
+ */
+static struct dma_async_tx_descriptor
+*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
+ struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
+ enum dma_ctrl_flags flags)
+{
+ struct rio_dma_data tx_data;
+
+ tx_data.sg = sgt->sgl;
+ tx_data.sg_len = nents;
+ tx_data.rio_addr_u = 0;
+ tx_data.rio_addr = transfer->rio_addr;
+ if (dir == DMA_MEM_TO_DEV) {
+ switch (transfer->method) {
+ case RIO_EXCHANGE_NWRITE:
+ tx_data.wr_type = RDW_ALL_NWRITE;
+ break;
+ case RIO_EXCHANGE_NWRITE_R_ALL:
+ tx_data.wr_type = RDW_ALL_NWRITE_R;
+ break;
+ case RIO_EXCHANGE_NWRITE_R:
+ tx_data.wr_type = RDW_LAST_NWRITE_R;
+ break;
+ case RIO_EXCHANGE_DEFAULT:
+ tx_data.wr_type = RDW_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
+}
+
+/* Request DMA channel associated with this mport device.
+ * Try to request DMA channel for every new process that opened given
+ * mport. If a new DMA channel is not available use default channel
+ * which is the first DMA channel opened on mport device.
+ */
+static int get_dma_channel(struct mport_cdev_priv *priv)
+{
+ mutex_lock(&priv->dma_lock);
+ if (!priv->dmach) {
+ priv->dmach = rio_request_mport_dma(priv->md->mport);
+ if (!priv->dmach) {
+ /* Use default DMA channel if available */
+ if (priv->md->dma_chan) {
+ priv->dmach = priv->md->dma_chan;
+ kref_get(&priv->md->dma_ref);
+ } else {
+ rmcd_error("Failed to get DMA channel");
+ mutex_unlock(&priv->dma_lock);
+ return -ENODEV;
+ }
+ } else if (!priv->md->dma_chan) {
+ /* Register default DMA channel if we do not have one */
+ priv->md->dma_chan = priv->dmach;
+ kref_init(&priv->md->dma_ref);
+ rmcd_debug(DMA, "Register DMA_chan %d as default",
+ priv->dmach->chan_id);
+ }
+
+ kref_init(&priv->dma_ref);
+ init_completion(&priv->comp);
+ }
+
+ kref_get(&priv->dma_ref);
+ mutex_unlock(&priv->dma_lock);
+ return 0;
+}
+
+static void put_dma_channel(struct mport_cdev_priv *priv)
+{
+ kref_put(&priv->dma_ref, mport_release_dma);
+}
+
+/*
+ * DMA transfer functions
+ */
+static int do_dma_request(struct mport_dma_req *req,
+ struct rio_transfer_io *xfer,
+ enum rio_transfer_sync sync, int nents)
+{
+ struct mport_cdev_priv *priv;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+ unsigned long tmo = msecs_to_jiffies(dma_timeout);
+ enum dma_transfer_direction dir;
+ long wret;
+ int ret = 0;
+
+ priv = req->priv;
+ sgt = &req->sgt;
+
+ chan = priv->dmach;
+ dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+
+ rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
+ current->comm, task_pid_nr(current),
+ dev_name(&chan->dev->device),
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
+
+ /* Initialize DMA transaction request */
+ tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+
+ if (!tx) {
+ rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+ xfer->rio_addr, xfer->length);
+ ret = -EIO;
+ goto err_out;
+ } else if (IS_ERR(tx)) {
+ ret = PTR_ERR(tx);
+ rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+ xfer->rio_addr, xfer->length);
+ goto err_out;
+ }
+
+ if (sync == RIO_TRANSFER_FAF)
+ tx->callback = dma_faf_callback;
+ else
+ tx->callback = dma_xfer_callback;
+ tx->callback_param = req;
+
+ req->dmach = chan;
+ req->sync = sync;
+ req->status = DMA_IN_PROGRESS;
+ init_completion(&req->req_comp);
+
+ cookie = dmaengine_submit(tx);
+ req->cookie = cookie;
+
+ rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+
+ if (dma_submit_error(cookie)) {
+ rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
+ cookie, xfer->rio_addr, xfer->length);
+ ret = -EIO;
+ goto err_out;
+ }
+
+ dma_async_issue_pending(chan);
+
+ if (sync == RIO_TRANSFER_ASYNC) {
+ spin_lock(&priv->req_lock);
+ list_add_tail(&req->node, &priv->async_list);
+ spin_unlock(&priv->req_lock);
+ return cookie;
+ } else if (sync == RIO_TRANSFER_FAF)
+ return 0;
+
+ wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
+
+ if (wret == 0) {
+ /* Timeout on wait occurred */
+ rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
+ current->comm, task_pid_nr(current),
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+ return -ETIMEDOUT;
+ } else if (wret == -ERESTARTSYS) {
+ /* Wait_for_completion was interrupted by a signal but DMA may
+ * be in progress
+ */
+ rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
+ current->comm, task_pid_nr(current),
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
+ return -EINTR;
+ }
+
+ if (req->status != DMA_COMPLETE) {
+ /* DMA transaction completion was signaled with error */
+ rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
+ current->comm, task_pid_nr(current),
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
+ cookie, req->status, ret);
+ ret = -EIO;
+ }
+
+err_out:
+ return ret;
+}
+
+/*
+ * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
+ * the remote RapidIO device
+ * @filp: file pointer associated with the call
+ * @transfer_mode: DMA transfer mode
+ * @sync: synchronization mode
+ * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
+ * DMA_DEV_TO_MEM = read)
+ * @xfer: data transfer descriptor structure
+ */
+static int
+rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+ enum rio_transfer_sync sync, enum dma_data_direction dir,
+ struct rio_transfer_io *xfer)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ unsigned long nr_pages = 0;
+ struct page **page_list = NULL;
+ struct mport_dma_req *req;
+ struct mport_dev *md = priv->md;
+ struct dma_chan *chan;
+ int i, ret;
+ int nents;
+
+ if (xfer->length == 0)
+ return -EINVAL;
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = get_dma_channel(priv);
+ if (ret) {
+ kfree(req);
+ return ret;
+ }
+
+ /*
+ * If parameter loc_addr != NULL, we are transferring data from/to
+ * data buffer allocated in user-space: lock in memory user-space
+ * buffer pages and build an SG table for DMA transfer request
+ *
+ * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
+ * used for DMA data transfers: build single entry SG table using
+ * offset within the internal buffer specified by handle parameter.
+ */
+ if (xfer->loc_addr) {
+ unsigned long offset;
+ long pinned;
+
+ offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+ nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
+
+ page_list = kmalloc_array(nr_pages,
+ sizeof(*page_list), GFP_KERNEL);
+ if (page_list == NULL) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ pinned = get_user_pages(
+ (unsigned long)xfer->loc_addr & PAGE_MASK,
+ nr_pages, dir == DMA_FROM_DEVICE, 0,
+ page_list, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (pinned != nr_pages) {
+ if (pinned < 0) {
+ rmcd_error("get_user_pages err=%ld", pinned);
+ nr_pages = 0;
+ } else
+ rmcd_error("pinned %ld out of %ld pages",
+ pinned, nr_pages);
+ ret = -EFAULT;
+ goto err_pg;
+ }
+
+ ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
+ offset, xfer->length, GFP_KERNEL);
+ if (ret) {
+ rmcd_error("sg_alloc_table failed with err=%d", ret);
+ goto err_pg;
+ }
+
+ req->page_list = page_list;
+ req->nr_pages = nr_pages;
+ } else {
+ dma_addr_t baddr;
+ struct rio_mport_mapping *map;
+
+ baddr = (dma_addr_t)xfer->handle;
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry(map, &md->mappings, node) {
+ if (baddr >= map->phys_addr &&
+ baddr < (map->phys_addr + map->size)) {
+ kref_get(&map->ref);
+ req->map = map;
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ if (req->map == NULL) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ if (xfer->length + xfer->offset > map->size) {
+ ret = -EINVAL;
+ goto err_req;
+ }
+
+ ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
+ if (unlikely(ret)) {
+ rmcd_error("sg_alloc_table failed for internal buf");
+ goto err_req;
+ }
+
+ sg_set_buf(req->sgt.sgl,
+ map->virt_addr + (baddr - map->phys_addr) +
+ xfer->offset, xfer->length);
+ }
+
+ req->dir = dir;
+ req->filp = filp;
+ req->priv = priv;
+ chan = priv->dmach;
+
+ nents = dma_map_sg(chan->device->dev,
+ req->sgt.sgl, req->sgt.nents, dir);
+ if (nents == -EFAULT) {
+ rmcd_error("Failed to map SG list");
+ return -EFAULT;
+ }
+
+ ret = do_dma_request(req, xfer, sync, nents);
+
+ if (ret >= 0) {
+ if (sync == RIO_TRANSFER_SYNC)
+ goto sync_out;
+ return ret; /* return ASYNC cookie */
+ }
+
+ if (ret == -ETIMEDOUT || ret == -EINTR) {
+ /*
+ * This can happen only in case of SYNC transfer.
+ * Do not free unfinished request structure immediately.
+ * Place it into pending list and deal with it later
+ */
+ spin_lock(&priv->req_lock);
+ list_add_tail(&req->node, &priv->pend_list);
+ spin_unlock(&priv->req_lock);
+ return ret;
+ }
+
+
+ rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
+sync_out:
+ dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
+ sg_free_table(&req->sgt);
+err_pg:
+ if (page_list) {
+ for (i = 0; i < nr_pages; i++)
+ put_page(page_list[i]);
+ kfree(page_list);
+ }
+err_req:
+ if (req->map) {
+ mutex_lock(&md->buf_mutex);
+ kref_put(&req->map->ref, mport_release_mapping);
+ mutex_unlock(&md->buf_mutex);
+ }
+ put_dma_channel(priv);
+ kfree(req);
+ return ret;
+}
+
+static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct rio_transaction transaction;
+ struct rio_transfer_io *transfer;
+ enum dma_data_direction dir;
+ int i, ret = 0;
+
+ if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
+ return -EFAULT;
+
+ if (transaction.count != 1)
+ return -EINVAL;
+
+ if ((transaction.transfer_mode &
+ priv->md->properties.transfer_mode) == 0)
+ return -ENODEV;
+
+ transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+ if (!transfer)
+ return -ENOMEM;
+
+ if (unlikely(copy_from_user(transfer, transaction.block,
+ transaction.count * sizeof(struct rio_transfer_io)))) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ for (i = 0; i < transaction.count && ret == 0; i++)
+ ret = rio_dma_transfer(filp, transaction.transfer_mode,
+ transaction.sync, dir, &transfer[i]);
+
+ if (unlikely(copy_to_user(transaction.block, transfer,
+ transaction.count * sizeof(struct rio_transfer_io))))
+ ret = -EFAULT;
+
+out_free:
+ vfree(transfer);
+
+ return ret;
+}
+
+static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv;
+ struct mport_dev *md;
+ struct rio_async_tx_wait w_param;
+ struct mport_dma_req *req;
+ dma_cookie_t cookie;
+ unsigned long tmo;
+ long wret;
+ int found = 0;
+ int ret;
+
+ priv = (struct mport_cdev_priv *)filp->private_data;
+ md = priv->md;
+
+ if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
+ return -EFAULT;
+
+ cookie = w_param.token;
+ if (w_param.timeout)
+ tmo = msecs_to_jiffies(w_param.timeout);
+ else /* Use default DMA timeout */
+ tmo = msecs_to_jiffies(dma_timeout);
+
+ spin_lock(&priv->req_lock);
+ list_for_each_entry(req, &priv->async_list, node) {
+ if (req->cookie == cookie) {
+ list_del(&req->node);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&priv->req_lock);
+
+ if (!found)
+ return -EAGAIN;
+
+ wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
+
+ if (wret == 0) {
+ /* Timeout on wait occurred */
+ rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
+ current->comm, task_pid_nr(current),
+ (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
+ ret = -ETIMEDOUT;
+ goto err_tmo;
+ } else if (wret == -ERESTARTSYS) {
+ /* Wait_for_completion was interrupted by a signal but DMA may
+ * be still in progress
+ */
+ rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
+ current->comm, task_pid_nr(current),
+ (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
+ ret = -EINTR;
+ goto err_tmo;
+ }
+
+ if (req->status != DMA_COMPLETE) {
+ /* DMA transaction completion signaled with transfer error */
+ rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
+ current->comm, task_pid_nr(current),
+ (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
+ req->status);
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
+ dma_req_free(req);
+
+ return ret;
+
+err_tmo:
+ /* Return request back into async queue */
+ spin_lock(&priv->req_lock);
+ list_add_tail(&req->node, &priv->async_list);
+ spin_unlock(&priv->req_lock);
+ return ret;
+}
+
+static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
+ uint64_t size, struct rio_mport_mapping **mapping)
+{
+ struct rio_mport_mapping *map;
+
+ map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
+ &map->phys_addr, GFP_KERNEL);
+ if (map->virt_addr == NULL) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->dir = MAP_DMA;
+ map->size = size;
+ map->filp = filp;
+ map->md = md;
+ kref_init(&map->ref);
+ mutex_lock(&md->buf_mutex);
+ list_add_tail(&map->node, &md->mappings);
+ mutex_unlock(&md->buf_mutex);
+ *mapping = map;
+
+ return 0;
+}
+
+static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md = priv->md;
+ struct rio_dma_mem map;
+ struct rio_mport_mapping *mapping = NULL;
+ int ret;
+
+ if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+ return -EFAULT;
+
+ ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
+ if (ret)
+ return ret;
+
+ map.dma_handle = mapping->phys_addr;
+
+ if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+ mutex_lock(&md->buf_mutex);
+ kref_put(&mapping->ref, mport_release_mapping);
+ mutex_unlock(&md->buf_mutex);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int rio_mport_free_dma(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md = priv->md;
+ u64 handle;
+ int ret = -EFAULT;
+ struct rio_mport_mapping *map, *_map;
+
+ if (copy_from_user(&handle, arg, sizeof(u64)))
+ return -EFAULT;
+ rmcd_debug(EXIT, "filp=%p", filp);
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry_safe(map, _map, &md->mappings, node) {
+ if (map->dir == MAP_DMA && map->phys_addr == handle &&
+ map->filp == filp) {
+ kref_put(&map->ref, mport_release_mapping);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ if (ret == -EFAULT) {
+ rmcd_debug(DMA, "ERR no matching mapping");
+ return ret;
+ }
+
+ return 0;
+}
+#else
+static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
+{
+ return -ENODEV;
+}
+
+static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
+{
+ return -ENODEV;
+}
+
+static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
+{
+ return -ENODEV;
+}
+
+static int rio_mport_free_dma(struct file *filp, void __user *arg)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+/*
+ * Inbound/outbound memory mapping functions
+ */
+
+static int
+rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
+ u64 raddr, u32 size,
+ struct rio_mport_mapping **mapping)
+{
+ struct rio_mport *mport = md->mport;
+ struct rio_mport_mapping *map;
+ int ret;
+
+ map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
+ &map->phys_addr, GFP_KERNEL);
+ if (map->virt_addr == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ if (raddr == RIO_MAP_ANY_ADDR)
+ raddr = map->phys_addr;
+ ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+ if (ret < 0)
+ goto err_map_inb;
+
+ map->dir = MAP_INBOUND;
+ map->rio_addr = raddr;
+ map->size = size;
+ map->filp = filp;
+ map->md = md;
+ kref_init(&map->ref);
+ mutex_lock(&md->buf_mutex);
+ list_add_tail(&map->node, &md->mappings);
+ mutex_unlock(&md->buf_mutex);
+ *mapping = map;
+ return 0;
+
+err_map_inb:
+ dma_free_coherent(mport->dev.parent, size,
+ map->virt_addr, map->phys_addr);
+err_dma_alloc:
+ kfree(map);
+ return ret;
+}
+
+static int
+rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
+ u64 raddr, u32 size,
+ struct rio_mport_mapping **mapping)
+{
+ struct rio_mport_mapping *map;
+ int err = -ENOMEM;
+
+ if (raddr == RIO_MAP_ANY_ADDR)
+ goto get_new;
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry(map, &md->mappings, node) {
+ if (map->dir != MAP_INBOUND)
+ continue;
+ if (raddr == map->rio_addr && size == map->size) {
+ /* allow exact match only */
+ *mapping = map;
+ err = 0;
+ break;
+ } else if (raddr < (map->rio_addr + map->size - 1) &&
+ (raddr + size) > map->rio_addr) {
+ err = -EBUSY;
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ if (err != -ENOMEM)
+ return err;
+get_new:
+ /* not found, create new */
+ return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
+}
+
+static int rio_mport_map_inbound(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md = priv->md;
+ struct rio_mmap map;
+ struct rio_mport_mapping *mapping = NULL;
+ int ret;
+
+ if (!md->mport->ops->map_inb)
+ return -EPROTONOSUPPORT;
+ if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+ return -EFAULT;
+
+ rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+ ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
+ map.length, &mapping);
+ if (ret)
+ return ret;
+
+ map.handle = mapping->phys_addr;
+ map.rio_addr = mapping->rio_addr;
+
+ if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+ /* Delete mapping if it was created by this request */
+ if (ret == 0 && mapping->filp == filp) {
+ mutex_lock(&md->buf_mutex);
+ kref_put(&mapping->ref, mport_release_mapping);
+ mutex_unlock(&md->buf_mutex);
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * rio_mport_inbound_free() - unmap from RapidIO address space and free
+ * previously allocated inbound DMA coherent buffer
+ * @priv: driver private data
+ * @arg: buffer handle returned by allocation routine
+ */
+static int rio_mport_inbound_free(struct file *filp, void __user *arg)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md = priv->md;
+ u64 handle;
+ struct rio_mport_mapping *map, *_map;
+
+ rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+ if (!md->mport->ops->unmap_inb)
+ return -EPROTONOSUPPORT;
+
+ if (copy_from_user(&handle, arg, sizeof(u64)))
+ return -EFAULT;
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry_safe(map, _map, &md->mappings, node) {
+ if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
+ if (map->filp == filp) {
+ map->filp = NULL;
+ kref_put(&map->ref, mport_release_mapping);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ return 0;
+}
+
+/*
+ * maint_port_idx_get() - Get the port index of the mport instance
+ * @priv: driver private data
+ * @arg: port index
+ */
+static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ uint32_t port_idx = md->mport->index;
+
+ rmcd_debug(MPORT, "port_index=%d", port_idx);
+
+ if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int rio_mport_add_event(struct mport_cdev_priv *priv,
+ struct rio_event *event)
+{
+ int overflow;
+
+ if (!(priv->event_mask & event->header))
+ return -EACCES;
+
+ spin_lock(&priv->fifo_lock);
+ overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
+ || kfifo_in(&priv->event_fifo, (unsigned char *)event,
+ sizeof(*event)) != sizeof(*event);
+ spin_unlock(&priv->fifo_lock);
+
+ wake_up_interruptible(&priv->event_rx_wait);
+
+ if (overflow) {
+ dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
+ u16 src, u16 dst, u16 info)
+{
+ struct mport_dev *data = dev_id;
+ struct mport_cdev_priv *priv;
+ struct rio_mport_db_filter *db_filter;
+ struct rio_event event;
+ int handled;
+
+ event.header = RIO_DOORBELL;
+ event.u.doorbell.rioid = src;
+ event.u.doorbell.payload = info;
+
+ handled = 0;
+ spin_lock(&data->db_lock);
+ list_for_each_entry(db_filter, &data->doorbells, data_node) {
+ if (((db_filter->filter.rioid == 0xffffffff ||
+ db_filter->filter.rioid == src)) &&
+ info >= db_filter->filter.low &&
+ info <= db_filter->filter.high) {
+ priv = db_filter->priv;
+ rio_mport_add_event(priv, &event);
+ handled = 1;
+ }
+ }
+ spin_unlock(&data->db_lock);
+
+ if (!handled)
+ dev_warn(&data->dev,
+ "%s: spurious DB received from 0x%x, info=0x%04x\n",
+ __func__, src, info);
+}
+
+static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
+ void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ struct rio_mport_db_filter *db_filter;
+ struct rio_doorbell_filter filter;
+ unsigned long flags;
+ int ret;
+
+ if (copy_from_user(&filter, arg, sizeof(filter)))
+ return -EFAULT;
+
+ if (filter.low > filter.high)
+ return -EINVAL;
+
+ ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
+ rio_mport_doorbell_handler);
+ if (ret) {
+ rmcd_error("%s failed to register IBDB, err=%d",
+ dev_name(&md->dev), ret);
+ return ret;
+ }
+
+ db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
+ if (db_filter == NULL) {
+ rio_release_inb_dbell(md->mport, filter.low, filter.high);
+ return -ENOMEM;
+ }
+
+ db_filter->filter = filter;
+ db_filter->priv = priv;
+ spin_lock_irqsave(&md->db_lock, flags);
+ list_add_tail(&db_filter->priv_node, &priv->db_filters);
+ list_add_tail(&db_filter->data_node, &md->doorbells);
+ spin_unlock_irqrestore(&md->db_lock, flags);
+
+ return 0;
+}
+
+static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
+{
+ list_del(&db_filter->data_node);
+ list_del(&db_filter->priv_node);
+ kfree(db_filter);
+}
+
+static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
+ void __user *arg)
+{
+ struct rio_mport_db_filter *db_filter;
+ struct rio_doorbell_filter filter;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (copy_from_user(&filter, arg, sizeof(filter)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&priv->md->db_lock, flags);
+ list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
+ if (db_filter->filter.rioid == filter.rioid &&
+ db_filter->filter.low == filter.low &&
+ db_filter->filter.high == filter.high) {
+ rio_mport_delete_db_filter(db_filter);
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->md->db_lock, flags);
+
+ if (!ret)
+ rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
+
+ return ret;
+}
+
+static int rio_mport_match_pw(union rio_pw_msg *msg,
+ struct rio_pw_filter *filter)
+{
+ if ((msg->em.comptag & filter->mask) < filter->low ||
+ (msg->em.comptag & filter->mask) > filter->high)
+ return 0;
+ return 1;
+}
+
+static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
+ union rio_pw_msg *msg, int step)
+{
+ struct mport_dev *md = context;
+ struct mport_cdev_priv *priv;
+ struct rio_mport_pw_filter *pw_filter;
+ struct rio_event event;
+ int handled;
+
+ event.header = RIO_PORTWRITE;
+ memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
+
+ handled = 0;
+ spin_lock(&md->pw_lock);
+ list_for_each_entry(pw_filter, &md->portwrites, md_node) {
+ if (rio_mport_match_pw(msg, &pw_filter->filter)) {
+ priv = pw_filter->priv;
+ rio_mport_add_event(priv, &event);
+ handled = 1;
+ }
+ }
+ spin_unlock(&md->pw_lock);
+
+ if (!handled) {
+ printk_ratelimited(KERN_WARNING DRV_NAME
+ ": mport%d received spurious PW from 0x%08x\n",
+ mport->id, msg->em.comptag);
+ }
+
+ return 0;
+}
+
+static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
+ void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ struct rio_mport_pw_filter *pw_filter;
+ struct rio_pw_filter filter;
+ unsigned long flags;
+ int hadd = 0;
+
+ if (copy_from_user(&filter, arg, sizeof(filter)))
+ return -EFAULT;
+
+ pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
+ if (pw_filter == NULL)
+ return -ENOMEM;
+
+ pw_filter->filter = filter;
+ pw_filter->priv = priv;
+ spin_lock_irqsave(&md->pw_lock, flags);
+ if (list_empty(&md->portwrites))
+ hadd = 1;
+ list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
+ list_add_tail(&pw_filter->md_node, &md->portwrites);
+ spin_unlock_irqrestore(&md->pw_lock, flags);
+
+ if (hadd) {
+ int ret;
+
+ ret = rio_add_mport_pw_handler(md->mport, md,
+ rio_mport_pw_handler);
+ if (ret) {
+ dev_err(&md->dev,
+ "%s: failed to add IB_PW handler, err=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ rio_pw_enable(md->mport, 1);
+ }
+
+ return 0;
+}
+
+static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
+{
+ list_del(&pw_filter->md_node);
+ list_del(&pw_filter->priv_node);
+ kfree(pw_filter);
+}
+
+static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
+ struct rio_pw_filter *b)
+{
+ if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
+ return 1;
+ return 0;
+}
+
+static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
+ void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ struct rio_mport_pw_filter *pw_filter;
+ struct rio_pw_filter filter;
+ unsigned long flags;
+ int ret = -EINVAL;
+ int hdel = 0;
+
+ if (copy_from_user(&filter, arg, sizeof(filter)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&md->pw_lock, flags);
+ list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
+ if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
+ rio_mport_delete_pw_filter(pw_filter);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (list_empty(&md->portwrites))
+ hdel = 1;
+ spin_unlock_irqrestore(&md->pw_lock, flags);
+
+ if (hdel) {
+ rio_del_mport_pw_handler(md->mport, priv->md,
+ rio_mport_pw_handler);
+ rio_pw_enable(md->mport, 0);
+ }
+
+ return ret;
+}
+
+/*
+ * rio_release_dev - release routine for kernel RIO device object
+ * @dev: kernel device object associated with a RIO device structure
+ *
+ * Frees a RIO device struct associated a RIO device struct.
+ * The RIO device struct is freed.
+ */
+static void rio_release_dev(struct device *dev)
+{
+ struct rio_dev *rdev;
+
+ rdev = to_rio_dev(dev);
+ pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
+ kfree(rdev);
+}
+
+
+static void rio_release_net(struct device *dev)
+{
+ struct rio_net *net;
+
+ net = to_rio_net(dev);
+ rmcd_debug(RDEV, "net_%d", net->id);
+ kfree(net);
+}
+
+
+/*
+ * rio_mport_add_riodev - creates a kernel RIO device object
+ *
+ * Allocates a RIO device data structure and initializes required fields based
+ * on device's configuration space contents.
+ * If the device has switch capabilities, then a switch specific portion is
+ * allocated and configured.
+ */
+static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+ void __user *arg)
+{
+ struct mport_dev *md = priv->md;
+ struct rio_rdev_info dev_info;
+ struct rio_dev *rdev;
+ struct rio_switch *rswitch = NULL;
+ struct rio_mport *mport;
+ size_t size;
+ u32 rval;
+ u32 swpinfo = 0;
+ u16 destid;
+ u8 hopcount;
+ int err;
+
+ if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+ return -EFAULT;
+
+ rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+ dev_info.comptag, dev_info.destid, dev_info.hopcount);
+
+ if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
+ rmcd_debug(RDEV, "device %s already exists", dev_info.name);
+ return -EEXIST;
+ }
+
+ size = sizeof(struct rio_dev);
+ mport = md->mport;
+ destid = (u16)dev_info.destid;
+ hopcount = (u8)dev_info.hopcount;
+
+ if (rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_PEF_CAR, &rval))
+ return -EIO;
+
+ if (rval & RIO_PEF_SWITCH) {
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_SWP_INFO_CAR, &swpinfo);
+ size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+ sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+ }
+
+ rdev = kzalloc(size, GFP_KERNEL);
+ if (rdev == NULL)
+ return -ENOMEM;
+
+ if (mport->net == NULL) {
+ struct rio_net *net;
+
+ net = rio_alloc_net(mport);
+ if (!net) {
+ err = -ENOMEM;
+ rmcd_debug(RDEV, "failed to allocate net object");
+ goto cleanup;
+ }
+
+ net->id = mport->id;
+ net->hport = mport;
+ dev_set_name(&net->dev, "rnet_%d", net->id);
+ net->dev.parent = &mport->dev;
+ net->dev.release = rio_release_net;
+ err = rio_add_net(net);
+ if (err) {
+ rmcd_debug(RDEV, "failed to register net, err=%d", err);
+ kfree(net);
+ goto cleanup;
+ }
+ }
+
+ rdev->net = mport->net;
+ rdev->pef = rval;
+ rdev->swpinfo = swpinfo;
+ rio_mport_read_config_32(mport, destid, hopcount,
+ RIO_DEV_ID_CAR, &rval);
+ rdev->did = rval >> 16;
+ rdev->vid = rval & 0xffff;
+ rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
+ &rdev->device_rev);
+ rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
+ &rval);
+ rdev->asm_did = rval >> 16;
+ rdev->asm_vid = rval & 0xffff;
+ rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
+ &rval);
+ rdev->asm_rev = rval >> 16;
+
+ if (rdev->pef & RIO_PEF_EXT_FEATURES) {
+ rdev->efptr = rval & 0xffff;
+ rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
+ hopcount);
+
+ rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
+ hopcount, RIO_EFB_ERR_MGMNT);
+ }
+
+ rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
+ &rdev->src_ops);
+ rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
+ &rdev->dst_ops);
+
+ rdev->comp_tag = dev_info.comptag;
+ rdev->destid = destid;
+ /* hopcount is stored as specified by a caller, regardles of EP or SW */
+ rdev->hopcount = hopcount;
+
+ if (rdev->pef & RIO_PEF_SWITCH) {
+ rswitch = rdev->rswitch;
+ rswitch->route_table = NULL;
+ }
+
+ if (strlen(dev_info.name))
+ dev_set_name(&rdev->dev, "%s", dev_info.name);
+ else if (rdev->pef & RIO_PEF_SWITCH)
+ dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
+ rdev->comp_tag & RIO_CTAG_UDEVID);
+ else
+ dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
+ rdev->comp_tag & RIO_CTAG_UDEVID);
+
+ INIT_LIST_HEAD(&rdev->net_list);
+ rdev->dev.parent = &mport->net->dev;
+ rio_attach_device(rdev);
+ rdev->dev.release = rio_release_dev;
+
+ if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
+ rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
+ 0, 0xffff);
+ err = rio_add_device(rdev);
+ if (err)
+ goto cleanup;
+ rio_dev_get(rdev);
+
+ return 0;
+cleanup:
+ kfree(rdev);
+ return err;
+}
+
+static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
+{
+ struct rio_rdev_info dev_info;
+ struct rio_dev *rdev = NULL;
+ struct device *dev;
+ struct rio_mport *mport;
+ struct rio_net *net;
+
+ if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+ return -EFAULT;
+
+ mport = priv->md->mport;
+
+ /* If device name is specified, removal by name has priority */
+ if (strlen(dev_info.name)) {
+ dev = bus_find_device_by_name(&rio_bus_type, NULL,
+ dev_info.name);
+ if (dev)
+ rdev = to_rio_dev(dev);
+ } else {
+ do {
+ rdev = rio_get_comptag(dev_info.comptag, rdev);
+ if (rdev && rdev->dev.parent == &mport->net->dev &&
+ rdev->destid == (u16)dev_info.destid &&
+ rdev->hopcount == (u8)dev_info.hopcount)
+ break;
+ } while (rdev);
+ }
+
+ if (!rdev) {
+ rmcd_debug(RDEV,
+ "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
+ dev_info.name, dev_info.comptag, dev_info.destid,
+ dev_info.hopcount);
+ return -ENODEV;
+ }
+
+ net = rdev->net;
+ rio_dev_put(rdev);
+ rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
+
+ if (list_empty(&net->devices)) {
+ rio_free_net(net);
+ mport->net = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * Mport cdev management
+ */
+
+/*
+ * mport_cdev_open() - Open character device (mport)
+ */
+static int mport_cdev_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ int minor = iminor(inode);
+ struct mport_dev *chdev;
+ struct mport_cdev_priv *priv;
+
+ /* Test for valid device */
+ if (minor >= RIO_MAX_MPORTS) {
+ rmcd_error("Invalid minor device number");
+ return -EINVAL;
+ }
+
+ chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
+
+ rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
+
+ if (atomic_read(&chdev->active) == 0)
+ return -ENODEV;
+
+ get_device(&chdev->dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ put_device(&chdev->dev);
+ return -ENOMEM;
+ }
+
+ priv->md = chdev;
+
+ mutex_lock(&chdev->file_mutex);
+ list_add_tail(&priv->list, &chdev->file_list);
+ mutex_unlock(&chdev->file_mutex);
+
+ INIT_LIST_HEAD(&priv->db_filters);
+ INIT_LIST_HEAD(&priv->pw_filters);
+ spin_lock_init(&priv->fifo_lock);
+ init_waitqueue_head(&priv->event_rx_wait);
+ ret = kfifo_alloc(&priv->event_fifo,
+ sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_fifo;
+ }
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ INIT_LIST_HEAD(&priv->async_list);
+ INIT_LIST_HEAD(&priv->pend_list);
+ spin_lock_init(&priv->req_lock);
+ mutex_init(&priv->dma_lock);
+#endif
+
+ filp->private_data = priv;
+ goto out;
+err_fifo:
+ kfree(priv);
+out:
+ return ret;
+}
+
+static int mport_cdev_fasync(int fd, struct file *filp, int mode)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+
+ return fasync_helper(fd, filp, mode, &priv->async_queue);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+static void mport_cdev_release_dma(struct file *filp)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md;
+ struct mport_dma_req *req, *req_next;
+ unsigned long tmo = msecs_to_jiffies(dma_timeout);
+ long wret;
+ LIST_HEAD(list);
+
+ rmcd_debug(EXIT, "from filp=%p %s(%d)",
+ filp, current->comm, task_pid_nr(current));
+
+ if (!priv->dmach) {
+ rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
+ return;
+ }
+
+ md = priv->md;
+
+ flush_workqueue(dma_wq);
+
+ spin_lock(&priv->req_lock);
+ if (!list_empty(&priv->async_list)) {
+ rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
+ filp, current->comm, task_pid_nr(current));
+ list_splice_init(&priv->async_list, &list);
+ }
+ spin_unlock(&priv->req_lock);
+
+ if (!list_empty(&list)) {
+ rmcd_debug(EXIT, "temp list not empty");
+ list_for_each_entry_safe(req, req_next, &list, node) {
+ rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
+ req->filp, req->cookie,
+ completion_done(&req->req_comp)?"yes":"no");
+ list_del(&req->node);
+ dma_req_free(req);
+ }
+ }
+
+ if (!list_empty(&priv->pend_list)) {
+ rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
+ filp, current->comm, task_pid_nr(current));
+ list_for_each_entry_safe(req,
+ req_next, &priv->pend_list, node) {
+ rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
+ req->filp, req->cookie,
+ completion_done(&req->req_comp)?"yes":"no");
+ list_del(&req->node);
+ dma_req_free(req);
+ }
+ }
+
+ put_dma_channel(priv);
+ wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
+
+ if (wret <= 0) {
+ rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
+ current->comm, task_pid_nr(current), wret);
+ }
+
+ spin_lock(&priv->req_lock);
+
+ if (!list_empty(&priv->pend_list)) {
+ rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
+ filp, current->comm, task_pid_nr(current));
+ }
+
+ spin_unlock(&priv->req_lock);
+
+ if (priv->dmach != priv->md->dma_chan) {
+ rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
+ filp, current->comm, task_pid_nr(current));
+ rio_release_dma(priv->dmach);
+ } else {
+ rmcd_debug(EXIT, "Adjust default DMA channel refcount");
+ kref_put(&md->dma_ref, mport_release_def_dma);
+ }
+
+ priv->dmach = NULL;
+}
+#else
+#define mport_cdev_release_dma(priv) do {} while (0)
+#endif
+
+/*
+ * mport_cdev_release() - Release character device
+ */
+static int mport_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *chdev;
+ struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
+ struct rio_mport_db_filter *db_filter, *db_filter_next;
+ struct rio_mport_mapping *map, *_map;
+ unsigned long flags;
+
+ rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
+
+ chdev = priv->md;
+ mport_cdev_release_dma(filp);
+
+ priv->event_mask = 0;
+
+ spin_lock_irqsave(&chdev->pw_lock, flags);
+ if (!list_empty(&priv->pw_filters)) {
+ list_for_each_entry_safe(pw_filter, pw_filter_next,
+ &priv->pw_filters, priv_node)
+ rio_mport_delete_pw_filter(pw_filter);
+ }
+ spin_unlock_irqrestore(&chdev->pw_lock, flags);
+
+ spin_lock_irqsave(&chdev->db_lock, flags);
+ list_for_each_entry_safe(db_filter, db_filter_next,
+ &priv->db_filters, priv_node) {
+ rio_mport_delete_db_filter(db_filter);
+ }
+ spin_unlock_irqrestore(&chdev->db_lock, flags);
+
+ kfifo_free(&priv->event_fifo);
+
+ mutex_lock(&chdev->buf_mutex);
+ list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
+ if (map->filp == filp) {
+ rmcd_debug(EXIT, "release mapping %p filp=%p",
+ map->virt_addr, filp);
+ kref_put(&map->ref, mport_release_mapping);
+ }
+ }
+ mutex_unlock(&chdev->buf_mutex);
+
+ mport_cdev_fasync(-1, filp, 0);
+ filp->private_data = NULL;
+ mutex_lock(&chdev->file_mutex);
+ list_del(&priv->list);
+ mutex_unlock(&chdev->file_mutex);
+ put_device(&chdev->dev);
+ kfree(priv);
+ return 0;
+}
+
+/*
+ * mport_cdev_ioctl() - IOCTLs for character device
+ */
+static long mport_cdev_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = -EINVAL;
+ struct mport_cdev_priv *data = filp->private_data;
+ struct mport_dev *md = data->md;
+
+ if (atomic_read(&md->active) == 0)
+ return -ENODEV;
+
+ switch (cmd) {
+ case RIO_MPORT_MAINT_READ_LOCAL:
+ return rio_mport_maint_rd(data, (void __user *)arg, 1);
+ case RIO_MPORT_MAINT_WRITE_LOCAL:
+ return rio_mport_maint_wr(data, (void __user *)arg, 1);
+ case RIO_MPORT_MAINT_READ_REMOTE:
+ return rio_mport_maint_rd(data, (void __user *)arg, 0);
+ case RIO_MPORT_MAINT_WRITE_REMOTE:
+ return rio_mport_maint_wr(data, (void __user *)arg, 0);
+ case RIO_MPORT_MAINT_HDID_SET:
+ return maint_hdid_set(data, (void __user *)arg);
+ case RIO_MPORT_MAINT_COMPTAG_SET:
+ return maint_comptag_set(data, (void __user *)arg);
+ case RIO_MPORT_MAINT_PORT_IDX_GET:
+ return maint_port_idx_get(data, (void __user *)arg);
+ case RIO_MPORT_GET_PROPERTIES:
+ md->properties.hdid = md->mport->host_deviceid;
+ if (copy_to_user((void __user *)arg, &(data->md->properties),
+ sizeof(data->md->properties)))
+ return -EFAULT;
+ return 0;
+ case RIO_ENABLE_DOORBELL_RANGE:
+ return rio_mport_add_db_filter(data, (void __user *)arg);
+ case RIO_DISABLE_DOORBELL_RANGE:
+ return rio_mport_remove_db_filter(data, (void __user *)arg);
+ case RIO_ENABLE_PORTWRITE_RANGE:
+ return rio_mport_add_pw_filter(data, (void __user *)arg);
+ case RIO_DISABLE_PORTWRITE_RANGE:
+ return rio_mport_remove_pw_filter(data, (void __user *)arg);
+ case RIO_SET_EVENT_MASK:
+ data->event_mask = arg;
+ return 0;
+ case RIO_GET_EVENT_MASK:
+ if (copy_to_user((void __user *)arg, &data->event_mask,
+ sizeof(data->event_mask)))
+ return -EFAULT;
+ return 0;
+ case RIO_MAP_OUTBOUND:
+ return rio_mport_obw_map(filp, (void __user *)arg);
+ case RIO_MAP_INBOUND:
+ return rio_mport_map_inbound(filp, (void __user *)arg);
+ case RIO_UNMAP_OUTBOUND:
+ return rio_mport_obw_free(filp, (void __user *)arg);
+ case RIO_UNMAP_INBOUND:
+ return rio_mport_inbound_free(filp, (void __user *)arg);
+ case RIO_ALLOC_DMA:
+ return rio_mport_alloc_dma(filp, (void __user *)arg);
+ case RIO_FREE_DMA:
+ return rio_mport_free_dma(filp, (void __user *)arg);
+ case RIO_WAIT_FOR_ASYNC:
+ return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
+ case RIO_TRANSFER:
+ return rio_mport_transfer_ioctl(filp, (void __user *)arg);
+ case RIO_DEV_ADD:
+ return rio_mport_add_riodev(data, (void __user *)arg);
+ case RIO_DEV_DEL:
+ return rio_mport_del_riodev(data, (void __user *)arg);
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * mport_release_mapping - free mapping resources and info structure
+ * @ref: a pointer to the kref within struct rio_mport_mapping
+ *
+ * NOTE: Shall be called while holding buf_mutex.
+ */
+static void mport_release_mapping(struct kref *ref)
+{
+ struct rio_mport_mapping *map =
+ container_of(ref, struct rio_mport_mapping, ref);
+ struct rio_mport *mport = map->md->mport;
+
+ rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
+ map->dir, map->virt_addr,
+ &map->phys_addr, mport->name);
+
+ list_del(&map->node);
+
+ switch (map->dir) {
+ case MAP_INBOUND:
+ rio_unmap_inb_region(mport, map->phys_addr);
+ case MAP_DMA:
+ dma_free_coherent(mport->dev.parent, map->size,
+ map->virt_addr, map->phys_addr);
+ break;
+ case MAP_OUTBOUND:
+ rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
+ break;
+ }
+ kfree(map);
+}
+
+static void mport_mm_open(struct vm_area_struct *vma)
+{
+ struct rio_mport_mapping *map = vma->vm_private_data;
+
+rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ kref_get(&map->ref);
+}
+
+static void mport_mm_close(struct vm_area_struct *vma)
+{
+ struct rio_mport_mapping *map = vma->vm_private_data;
+
+rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+ mutex_lock(&map->md->buf_mutex);
+ kref_put(&map->ref, mport_release_mapping);
+ mutex_unlock(&map->md->buf_mutex);
+}
+
+static const struct vm_operations_struct vm_ops = {
+ .open = mport_mm_open,
+ .close = mport_mm_close,
+};
+
+static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct mport_dev *md;
+ size_t size = vma->vm_end - vma->vm_start;
+ dma_addr_t baddr;
+ unsigned long offset;
+ int found = 0, ret;
+ struct rio_mport_mapping *map;
+
+ rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
+ (unsigned int)size, vma->vm_pgoff);
+
+ md = priv->md;
+ baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
+
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry(map, &md->mappings, node) {
+ if (baddr >= map->phys_addr &&
+ baddr < (map->phys_addr + map->size)) {
+ found = 1;
+ break;
+ }
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ if (!found)
+ return -ENOMEM;
+
+ offset = baddr - map->phys_addr;
+
+ if (size + offset > map->size)
+ return -EINVAL;
+
+ vma->vm_pgoff = offset >> PAGE_SHIFT;
+ rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
+
+ if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
+ ret = dma_mmap_coherent(md->mport->dev.parent, vma,
+ map->virt_addr, map->phys_addr, map->size);
+ else if (map->dir == MAP_OUTBOUND) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ ret = vm_iomap_memory(vma, map->phys_addr, map->size);
+ } else {
+ rmcd_error("Attempt to mmap unsupported mapping type");
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ vma->vm_private_data = map;
+ vma->vm_ops = &vm_ops;
+ mport_mm_open(vma);
+ } else {
+ rmcd_error("MMAP exit with err=%d", ret);
+ }
+
+ return ret;
+}
+
+static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+
+ poll_wait(filp, &priv->event_rx_wait, wait);
+ if (kfifo_len(&priv->event_fifo))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ int copied;
+ ssize_t ret;
+
+ if (!count)
+ return 0;
+
+ if (kfifo_is_empty(&priv->event_fifo) &&
+ (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (count % sizeof(struct rio_event))
+ return -EINVAL;
+
+ ret = wait_event_interruptible(priv->event_rx_wait,
+ kfifo_len(&priv->event_fifo) != 0);
+ if (ret)
+ return ret;
+
+ while (ret < count) {
+ if (kfifo_to_user(&priv->event_fifo, buf,
+ sizeof(struct rio_event), &copied))
+ return -EFAULT;
+ ret += copied;
+ buf += copied;
+ }
+
+ return ret;
+}
+
+static ssize_t mport_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mport_cdev_priv *priv = filp->private_data;
+ struct rio_mport *mport = priv->md->mport;
+ struct rio_event event;
+ int len, ret;
+
+ if (!count)
+ return 0;
+
+ if (count % sizeof(event))
+ return -EINVAL;
+
+ len = 0;
+ while ((count - len) >= (int)sizeof(event)) {
+ if (copy_from_user(&event, buf, sizeof(event)))
+ return -EFAULT;
+
+ if (event.header != RIO_DOORBELL)
+ return -EINVAL;
+
+ ret = rio_mport_send_doorbell(mport,
+ (u16)event.u.doorbell.rioid,
+ event.u.doorbell.payload);
+ if (ret < 0)
+ return ret;
+
+ len += sizeof(event);
+ buf += sizeof(event);
+ }
+
+ return len;
+}
+
+static const struct file_operations mport_fops = {
+ .owner = THIS_MODULE,
+ .open = mport_cdev_open,
+ .release = mport_cdev_release,
+ .poll = mport_cdev_poll,
+ .read = mport_read,
+ .write = mport_write,
+ .mmap = mport_cdev_mmap,
+ .fasync = mport_cdev_fasync,
+ .unlocked_ioctl = mport_cdev_ioctl
+};
+
+/*
+ * Character device management
+ */
+
+static void mport_device_release(struct device *dev)
+{
+ struct mport_dev *md;
+
+ rmcd_debug(EXIT, "%s", dev_name(dev));
+ md = container_of(dev, struct mport_dev, dev);
+ kfree(md);
+}
+
+/*
+ * mport_cdev_add() - Create mport_dev from rio_mport
+ * @mport: RapidIO master port
+ */
+static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+{
+ int ret = 0;
+ struct mport_dev *md;
+ struct rio_mport_attr attr;
+
+ md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+ if (!md) {
+ rmcd_error("Unable allocate a device object");
+ return NULL;
+ }
+
+ md->mport = mport;
+ mutex_init(&md->buf_mutex);
+ mutex_init(&md->file_mutex);
+ INIT_LIST_HEAD(&md->file_list);
+ cdev_init(&md->cdev, &mport_fops);
+ md->cdev.owner = THIS_MODULE;
+ ret = cdev_add(&md->cdev, MKDEV(MAJOR(dev_number), mport->id), 1);
+ if (ret < 0) {
+ kfree(md);
+ rmcd_error("Unable to register a device, err=%d", ret);
+ return NULL;
+ }
+
+ md->dev.devt = md->cdev.dev;
+ md->dev.class = dev_class;
+ md->dev.parent = &mport->dev;
+ md->dev.release = mport_device_release;
+ dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
+ atomic_set(&md->active, 1);
+
+ ret = device_register(&md->dev);
+ if (ret) {
+ rmcd_error("Failed to register mport %d (err=%d)",
+ mport->id, ret);
+ goto err_cdev;
+ }
+
+ get_device(&md->dev);
+
+ INIT_LIST_HEAD(&md->doorbells);
+ spin_lock_init(&md->db_lock);
+ INIT_LIST_HEAD(&md->portwrites);
+ spin_lock_init(&md->pw_lock);
+ INIT_LIST_HEAD(&md->mappings);
+
+ md->properties.id = mport->id;
+ md->properties.sys_size = mport->sys_size;
+ md->properties.hdid = mport->host_deviceid;
+ md->properties.index = mport->index;
+
+ /* The transfer_mode property will be returned through mport query
+ * interface
+ */
+#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+ md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
+#else
+ md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
+#endif
+ ret = rio_query_mport(mport, &attr);
+ if (!ret) {
+ md->properties.flags = attr.flags;
+ md->properties.link_speed = attr.link_speed;
+ md->properties.link_width = attr.link_width;
+ md->properties.dma_max_sge = attr.dma_max_sge;
+ md->properties.dma_max_size = attr.dma_max_size;
+ md->properties.dma_align = attr.dma_align;
+ md->properties.cap_sys_size = 0;
+ md->properties.cap_transfer_mode = 0;
+ md->properties.cap_addr_size = 0;
+ } else
+ pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
+ mport->name, MAJOR(dev_number), mport->id);
+
+ mutex_lock(&mport_devs_lock);
+ list_add_tail(&md->node, &mport_devs);
+ mutex_unlock(&mport_devs_lock);
+
+ pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
+ mport->name, MAJOR(dev_number), mport->id);
+
+ return md;
+
+err_cdev:
+ cdev_del(&md->cdev);
+ kfree(md);
+ return NULL;
+}
+
+/*
+ * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
+ * associated DMA channels.
+ */
+static void mport_cdev_terminate_dma(struct mport_dev *md)
+{
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ struct mport_cdev_priv *client;
+
+ rmcd_debug(DMA, "%s", dev_name(&md->dev));
+
+ mutex_lock(&md->file_mutex);
+ list_for_each_entry(client, &md->file_list, list) {
+ if (client->dmach) {
+ dmaengine_terminate_all(client->dmach);
+ rio_release_dma(client->dmach);
+ }
+ }
+ mutex_unlock(&md->file_mutex);
+
+ if (md->dma_chan) {
+ dmaengine_terminate_all(md->dma_chan);
+ rio_release_dma(md->dma_chan);
+ md->dma_chan = NULL;
+ }
+#endif
+}
+
+
+/*
+ * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
+ * mport_cdev files.
+ */
+static int mport_cdev_kill_fasync(struct mport_dev *md)
+{
+ unsigned int files = 0;
+ struct mport_cdev_priv *client;
+
+ mutex_lock(&md->file_mutex);
+ list_for_each_entry(client, &md->file_list, list) {
+ if (client->async_queue)
+ kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
+ files++;
+ }
+ mutex_unlock(&md->file_mutex);
+ return files;
+}
+
+/*
+ * mport_cdev_remove() - Remove mport character device
+ * @dev: Mport device to remove
+ */
+static void mport_cdev_remove(struct mport_dev *md)
+{
+ struct rio_mport_mapping *map, *_map;
+
+ rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
+ atomic_set(&md->active, 0);
+ mport_cdev_terminate_dma(md);
+ rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
+ cdev_del(&(md->cdev));
+ mport_cdev_kill_fasync(md);
+
+ flush_workqueue(dma_wq);
+
+ /* TODO: do we need to give clients some time to close file
+ * descriptors? Simple wait for XX, or kref?
+ */
+
+ /*
+ * Release DMA buffers allocated for the mport device.
+ * Disable associated inbound Rapidio requests mapping if applicable.
+ */
+ mutex_lock(&md->buf_mutex);
+ list_for_each_entry_safe(map, _map, &md->mappings, node) {
+ kref_put(&map->ref, mport_release_mapping);
+ }
+ mutex_unlock(&md->buf_mutex);
+
+ if (!list_empty(&md->mappings))
+ rmcd_warn("WARNING: %s pending mappings on removal",
+ md->mport->name);
+
+ rio_release_inb_dbell(md->mport, 0, 0x0fff);
+
+ device_unregister(&md->dev);
+ put_device(&md->dev);
+}
+
+/*
+ * RIO rio_mport_interface driver
+ */
+
+/*
+ * mport_add_mport() - Add rio_mport from LDM device struct
+ * @dev: Linux device model struct
+ * @class_intf: Linux class_interface
+ */
+static int mport_add_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = NULL;
+ struct mport_dev *chdev = NULL;
+
+ mport = to_rio_mport(dev);
+ if (!mport)
+ return -ENODEV;
+
+ chdev = mport_cdev_add(mport);
+ if (!chdev)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * mport_remove_mport() - Remove rio_mport from global list
+ * TODO remove device from global mport_dev list
+ */
+static void mport_remove_mport(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct rio_mport *mport = NULL;
+ struct mport_dev *chdev;
+ int found = 0;
+
+ mport = to_rio_mport(dev);
+ rmcd_debug(EXIT, "Remove %s", mport->name);
+
+ mutex_lock(&mport_devs_lock);
+ list_for_each_entry(chdev, &mport_devs, node) {
+ if (chdev->mport->id == mport->id) {
+ atomic_set(&chdev->active, 0);
+ list_del(&chdev->node);
+ found = 1;
+ break;
+ }
+ }
+ mutex_unlock(&mport_devs_lock);
+
+ if (found)
+ mport_cdev_remove(chdev);
+}
+
+/* the rio_mport_interface is used to handle local mport devices */
+static struct class_interface rio_mport_interface __refdata = {
+ .class = &rio_mport_class,
+ .add_dev = mport_add_mport,
+ .remove_dev = mport_remove_mport,
+};
+
+/*
+ * Linux kernel module
+ */
+
+/*
+ * mport_init - Driver module loading
+ */
+static int __init mport_init(void)
+{
+ int ret;
+
+ /* Create device class needed by udev */
+ dev_class = class_create(THIS_MODULE, DRV_NAME);
+ if (!dev_class) {
+ rmcd_error("Unable to create " DRV_NAME " class");
+ return -EINVAL;
+ }
+
+ ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
+ if (ret < 0)
+ goto err_chr;
+
+ rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
+
+ /* Register to rio_mport_interface */
+ ret = class_interface_register(&rio_mport_interface);
+ if (ret) {
+ rmcd_error("class_interface_register() failed, err=%d", ret);
+ goto err_cli;
+ }
+
+ dma_wq = create_singlethread_workqueue("dma_wq");
+ if (!dma_wq) {
+ rmcd_error("failed to create DMA work queue");
+ ret = -ENOMEM;
+ goto err_wq;
+ }
+
+ return 0;
+
+err_wq:
+ class_interface_unregister(&rio_mport_interface);
+err_cli:
+ unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
+err_chr:
+ class_destroy(dev_class);
+ return ret;
+}
+
+/**
+ * mport_exit - Driver module unloading
+ */
+static void __exit mport_exit(void)
+{
+ class_interface_unregister(&rio_mport_interface);
+ class_destroy(dev_class);
+ unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
+ destroy_workqueue(dma_wq);
+}
+
+module_init(mport_init);
+module_exit(mport_exit);
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index eeca70ddbf61..b5b455614f8a 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -36,7 +36,11 @@
#include "tsi721.h"
-#define DEBUG_PW /* Inbound Port-Write debugging */
+#ifdef DEBUG
+u32 dbg_level = DBG_INIT | DBG_EXIT;
+module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
@@ -143,9 +147,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
& TSI721_DMAC_STS_RUN) {
udelay(1);
if (++i >= 5000000) {
- dev_dbg(&priv->pdev->dev,
- "%s : DMA[%d] read timeout ch_status=%x\n",
- __func__, priv->mdma.ch_id, ch_stat);
+ tsi_debug(MAINT, &priv->pdev->dev,
+ "DMA[%d] read timeout ch_status=%x",
+ priv->mdma.ch_id, ch_stat);
if (!do_wr)
*data = 0xffffffff;
err = -EIO;
@@ -157,10 +161,12 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
/* If DMA operation aborted due to error,
* reinitialize DMA channel
*/
- dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n",
- __func__, ch_stat);
- dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
- do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
+ tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
+ ch_stat);
+ tsi_debug(MAINT, &priv->pdev->dev,
+ "OP=%d : destid=%x hc=%x off=%x",
+ do_wr ? MAINT_WR : MAINT_RD,
+ destid, hopcount, offset);
iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
udelay(10);
@@ -236,16 +242,15 @@ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
/**
* tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
- * @mport: RapidIO master port structure
+ * @priv: tsi721 device private structure
*
* Handles inbound port-write interrupts. Copies PW message from an internal
* buffer into PW message FIFO and schedules deferred routine to process
* queued messages.
*/
static int
-tsi721_pw_handler(struct rio_mport *mport)
+tsi721_pw_handler(struct tsi721_device *priv)
{
- struct tsi721_device *priv = mport->priv;
u32 pw_stat;
u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
@@ -283,30 +288,15 @@ static void tsi721_pw_dpc(struct work_struct *work)
{
struct tsi721_device *priv = container_of(work, struct tsi721_device,
pw_work);
- u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message
- buffer for RIO layer */
+ union rio_pw_msg pwmsg;
/*
* Process port-write messages
*/
- while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer,
+ while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
- /* Process one message */
-#ifdef DEBUG_PW
- {
- u32 i;
- pr_debug("%s : Port-Write Message:", __func__);
- for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) {
- pr_debug("0x%02x: %08x %08x %08x %08x", i*4,
- msg_buffer[i], msg_buffer[i + 1],
- msg_buffer[i + 2], msg_buffer[i + 3]);
- i += 4;
- }
- pr_debug("\n");
- }
-#endif
/* Pass the port-write message to RIO core for processing */
- rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
+ rio_inb_pwrite_handler(&priv->mport, &pwmsg);
}
}
@@ -354,8 +344,8 @@ static int tsi721_dsend(struct rio_mport *mport, int index,
offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
(destid << 2);
- dev_dbg(&priv->pdev->dev,
- "Send Doorbell 0x%04x to destID 0x%x\n", data, destid);
+ tsi_debug(DBELL, &priv->pdev->dev,
+ "Send Doorbell 0x%04x to destID 0x%x", data, destid);
iowrite16be(data, priv->odb_base + offset);
return 0;
@@ -363,16 +353,15 @@ static int tsi721_dsend(struct rio_mport *mport, int index,
/**
* tsi721_dbell_handler - Tsi721 doorbell interrupt handler
- * @mport: RapidIO master port structure
+ * @priv: tsi721 device-specific data structure
*
* Handles inbound doorbell interrupts. Copies doorbell entry from an internal
* buffer into DB message FIFO and schedules deferred routine to process
* queued DBs.
*/
static int
-tsi721_dbell_handler(struct rio_mport *mport)
+tsi721_dbell_handler(struct tsi721_device *priv)
{
- struct tsi721_device *priv = mport->priv;
u32 regval;
/* Disable IDB interrupts */
@@ -404,7 +393,7 @@ static void tsi721_db_dpc(struct work_struct *work)
/*
* Process queued inbound doorbells
*/
- mport = priv->mport;
+ mport = &priv->mport;
wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
@@ -430,10 +419,10 @@ static void tsi721_db_dpc(struct work_struct *work)
dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
} else {
- dev_dbg(&priv->pdev->dev,
- "spurious inb doorbell, sid %2.2x tid %2.2x"
- " info %4.4x\n", DBELL_SID(idb.bytes),
- DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
+ tsi_debug(DBELL, &priv->pdev->dev,
+ "spurious IDB sid %2.2x tid %2.2x info %4.4x",
+ DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
+ DBELL_INF(idb.bytes));
}
wr_ptr = ioread32(priv->regs +
@@ -457,15 +446,14 @@ static void tsi721_db_dpc(struct work_struct *work)
/**
* tsi721_irqhandler - Tsi721 interrupt handler
* @irq: Linux interrupt number
- * @ptr: Pointer to interrupt-specific data (mport structure)
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
* interrupt events and calls an event-specific handler(s).
*/
static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
{
- struct rio_mport *mport = (struct rio_mport *)ptr;
- struct tsi721_device *priv = mport->priv;
+ struct tsi721_device *priv = (struct tsi721_device *)ptr;
u32 dev_int;
u32 dev_ch_int;
u32 intval;
@@ -488,10 +476,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
intval = ioread32(priv->regs +
TSI721_SR_CHINT(IDB_QUEUE));
if (intval & TSI721_SR_CHINT_IDBQRCV)
- tsi721_dbell_handler(mport);
+ tsi721_dbell_handler(priv);
else
- dev_info(&priv->pdev->dev,
- "Unsupported SR_CH_INT %x\n", intval);
+ tsi_info(&priv->pdev->dev,
+ "Unsupported SR_CH_INT %x", intval);
/* Clear interrupts */
iowrite32(intval,
@@ -545,7 +533,7 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
/* Service SRIO MAC interrupts */
intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
- tsi721_pw_handler(mport);
+ tsi721_pw_handler(priv);
}
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
@@ -553,8 +541,8 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
int ch;
if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
- dev_dbg(&priv->pdev->dev,
- "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+ tsi_debug(DMA, &priv->pdev->dev,
+ "IRQ from DMA channel 0x%08x", dev_ch_int);
for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
@@ -613,13 +601,13 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
/**
* tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
* @irq: Linux interrupt number
- * @ptr: Pointer to interrupt-specific data (mport structure)
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles outbound messaging interrupts signaled using MSI-X.
*/
static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
{
- struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ struct tsi721_device *priv = (struct tsi721_device *)ptr;
int mbox;
mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
@@ -630,13 +618,13 @@ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
/**
* tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
* @irq: Linux interrupt number
- * @ptr: Pointer to interrupt-specific data (mport structure)
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles inbound messaging interrupts signaled using MSI-X.
*/
static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
{
- struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ struct tsi721_device *priv = (struct tsi721_device *)ptr;
int mbox;
mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
@@ -647,19 +635,19 @@ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
/**
* tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
* @irq: Linux interrupt number
- * @ptr: Pointer to interrupt-specific data (mport structure)
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles Tsi721 interrupts from SRIO MAC.
*/
static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
{
- struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ struct tsi721_device *priv = (struct tsi721_device *)ptr;
u32 srio_int;
/* Service SRIO MAC interrupts */
srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
- tsi721_pw_handler((struct rio_mport *)ptr);
+ tsi721_pw_handler(priv);
return IRQ_HANDLED;
}
@@ -667,7 +655,7 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
/**
* tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
* @irq: Linux interrupt number
- * @ptr: Pointer to interrupt-specific data (mport structure)
+ * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
*
* Handles Tsi721 interrupts from SR2PC Channel.
* NOTE: At this moment services only one SR2PC channel associated with inbound
@@ -675,13 +663,13 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
*/
static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
{
- struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
+ struct tsi721_device *priv = (struct tsi721_device *)ptr;
u32 sr_ch_int;
/* Service Inbound DB interrupt from SR2PC channel */
sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
- tsi721_dbell_handler((struct rio_mport *)ptr);
+ tsi721_dbell_handler(priv);
/* Clear interrupts */
iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
@@ -693,32 +681,31 @@ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
/**
* tsi721_request_msix - register interrupt service for MSI-X mode.
- * @mport: RapidIO master port structure
+ * @priv: tsi721 device-specific data structure
*
* Registers MSI-X interrupt service routines for interrupts that are active
* immediately after mport initialization. Messaging interrupt service routines
* should be registered during corresponding open requests.
*/
-static int tsi721_request_msix(struct rio_mport *mport)
+static int tsi721_request_msix(struct tsi721_device *priv)
{
- struct tsi721_device *priv = mport->priv;
int err = 0;
err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
tsi721_sr2pc_ch_msix, 0,
- priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport);
+ priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
if (err)
- goto out;
+ return err;
err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
tsi721_srio_msix, 0,
- priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport);
- if (err)
- free_irq(
- priv->msix[TSI721_VECT_IDB].vector,
- (void *)mport);
-out:
- return err;
+ priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
+ if (err) {
+ free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
+ return err;
+ }
+
+ return 0;
}
/**
@@ -770,8 +757,8 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
if (err) {
- dev_err(&priv->pdev->dev,
- "Failed to enable MSI-X (err=%d)\n", err);
+ tsi_err(&priv->pdev->dev,
+ "Failed to enable MSI-X (err=%d)", err);
return err;
}
@@ -831,27 +818,209 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
}
#endif /* CONFIG_PCI_MSI */
-static int tsi721_request_irq(struct rio_mport *mport)
+static int tsi721_request_irq(struct tsi721_device *priv)
{
- struct tsi721_device *priv = mport->priv;
int err;
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX)
- err = tsi721_request_msix(mport);
+ err = tsi721_request_msix(priv);
else
#endif
err = request_irq(priv->pdev->irq, tsi721_irqhandler,
(priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
- DRV_NAME, (void *)mport);
+ DRV_NAME, (void *)priv);
if (err)
- dev_err(&priv->pdev->dev,
- "Unable to allocate interrupt, Error: %d\n", err);
+ tsi_err(&priv->pdev->dev,
+ "Unable to allocate interrupt, err=%d", err);
return err;
}
+static void tsi721_free_irq(struct tsi721_device *priv)
+{
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
+ free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
+ } else
+#endif
+ free_irq(priv->pdev->irq, (void *)priv);
+}
+
+static int
+tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
+ u32 size, int *win_id)
+{
+ u64 win_base;
+ u64 bar_base;
+ u64 bar_end;
+ u32 align;
+ struct tsi721_ob_win *win;
+ struct tsi721_ob_win *new_win = NULL;
+ int new_win_idx = -1;
+ int i = 0;
+
+ bar_base = pbar->base;
+ bar_end = bar_base + pbar->size;
+ win_base = bar_base;
+ align = size/TSI721_PC2SR_ZONES;
+
+ while (i < TSI721_IBWIN_NUM) {
+ for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+ if (!priv->ob_win[i].active) {
+ if (new_win == NULL) {
+ new_win = &priv->ob_win[i];
+ new_win_idx = i;
+ }
+ continue;
+ }
+
+ /*
+ * If this window belongs to the current BAR check it
+ * for overlap
+ */
+ win = &priv->ob_win[i];
+
+ if (win->base >= bar_base && win->base < bar_end) {
+ if (win_base < (win->base + win->size) &&
+ (win_base + size) > win->base) {
+ /* Overlap detected */
+ win_base = win->base + win->size;
+ win_base = ALIGN(win_base, align);
+ break;
+ }
+ }
+ }
+ }
+
+ if (win_base + size > bar_end)
+ return -ENOMEM;
+
+ if (!new_win) {
+ tsi_err(&priv->pdev->dev, "OBW count tracking failed");
+ return -EIO;
+ }
+
+ new_win->active = true;
+ new_win->base = win_base;
+ new_win->size = size;
+ new_win->pbar = pbar;
+ priv->obwin_cnt--;
+ pbar->free -= size;
+ *win_id = new_win_idx;
+ return 0;
+}
+
+static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
+ u32 size, u32 flags, dma_addr_t *laddr)
+{
+ struct tsi721_device *priv = mport->priv;
+ int i;
+ struct tsi721_obw_bar *pbar;
+ struct tsi721_ob_win *ob_win;
+ int obw = -1;
+ u32 rval;
+ u64 rio_addr;
+ u32 zsize;
+ int ret = -ENOMEM;
+
+ tsi_debug(OBW, &priv->pdev->dev,
+ "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
+
+ if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
+ return -EINVAL;
+
+ if (priv->obwin_cnt == 0)
+ return -EBUSY;
+
+ for (i = 0; i < 2; i++) {
+ if (priv->p2r_bar[i].free >= size) {
+ pbar = &priv->p2r_bar[i];
+ ret = tsi721_obw_alloc(priv, pbar, size, &obw);
+ if (!ret)
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ WARN_ON(obw == -1);
+ ob_win = &priv->ob_win[obw];
+ ob_win->destid = destid;
+ ob_win->rstart = rstart;
+ tsi_debug(OBW, &priv->pdev->dev,
+ "allocated OBW%d @%llx", obw, ob_win->base);
+
+ /*
+ * Configure Outbound Window
+ */
+
+ zsize = size/TSI721_PC2SR_ZONES;
+ rio_addr = rstart;
+
+ /*
+ * Program Address Translation Zones:
+ * This implementation uses all 8 zones associated wit window.
+ */
+ for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
+
+ while (ioread32(priv->regs + TSI721_ZONE_SEL) &
+ TSI721_ZONE_SEL_GO) {
+ udelay(1);
+ }
+
+ rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
+ TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
+ iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
+ rval = (u32)(rio_addr >> 32);
+ iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
+ rval = destid;
+ iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
+
+ rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
+ iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
+
+ rio_addr += zsize;
+ }
+
+ iowrite32(TSI721_OBWIN_SIZE(size) << 8,
+ priv->regs + TSI721_OBWINSZ(obw));
+ iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
+ iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
+ priv->regs + TSI721_OBWINLB(obw));
+
+ *laddr = ob_win->base;
+ return 0;
+}
+
+static void tsi721_unmap_outb_win(struct rio_mport *mport,
+ u16 destid, u64 rstart)
+{
+ struct tsi721_device *priv = mport->priv;
+ struct tsi721_ob_win *ob_win;
+ int i;
+
+ tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
+
+ for (i = 0; i < TSI721_OBWIN_NUM; i++) {
+ ob_win = &priv->ob_win[i];
+
+ if (ob_win->active &&
+ ob_win->destid == destid && ob_win->rstart == rstart) {
+ tsi_debug(OBW, &priv->pdev->dev,
+ "free OBW%d @%llx", i, ob_win->base);
+ ob_win->active = false;
+ iowrite32(0, priv->regs + TSI721_OBWINLB(i));
+ ob_win->pbar->free += ob_win->size;
+ priv->obwin_cnt++;
+ break;
+ }
+ }
+}
+
/**
* tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
* translation regions.
@@ -861,11 +1030,41 @@ static int tsi721_request_irq(struct rio_mport *mport)
*/
static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
{
- int i;
+ int i, z;
+ u32 rval;
/* Disable all PC2SR translation windows */
for (i = 0; i < TSI721_OBWIN_NUM; i++)
iowrite32(0, priv->regs + TSI721_OBWINLB(i));
+
+ /* Initialize zone lookup tables to avoid ECC errors on reads */
+ iowrite32(0, priv->regs + TSI721_LUT_DATA0);
+ iowrite32(0, priv->regs + TSI721_LUT_DATA1);
+ iowrite32(0, priv->regs + TSI721_LUT_DATA2);
+
+ for (i = 0; i < TSI721_OBWIN_NUM; i++) {
+ for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
+ while (ioread32(priv->regs + TSI721_ZONE_SEL) &
+ TSI721_ZONE_SEL_GO) {
+ udelay(1);
+ }
+ rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
+ iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
+ }
+ }
+
+ if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
+ priv->obwin_cnt = 0;
+ return;
+ }
+
+ priv->p2r_bar[0].free = priv->p2r_bar[0].size;
+ priv->p2r_bar[1].free = priv->p2r_bar[1].size;
+
+ for (i = 0; i < TSI721_OBWIN_NUM; i++)
+ priv->ob_win[i].active = false;
+
+ priv->obwin_cnt = TSI721_OBWIN_NUM;
}
/**
@@ -885,45 +1084,148 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
u64 rstart, u32 size, u32 flags)
{
struct tsi721_device *priv = mport->priv;
- int i;
+ int i, avail = -1;
u32 regval;
+ struct tsi721_ib_win *ib_win;
+ bool direct = (lstart == rstart);
+ u64 ibw_size;
+ dma_addr_t loc_start;
+ u64 ibw_start;
+ struct tsi721_ib_win_mapping *map = NULL;
+ int ret = -EBUSY;
+
+ if (direct) {
+ /* Calculate minimal acceptable window size and base address */
+
+ ibw_size = roundup_pow_of_two(size);
+ ibw_start = lstart & ~(ibw_size - 1);
+
+ tsi_debug(IBW, &priv->pdev->dev,
+ "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx",
+ rstart, &lstart, size, ibw_start);
+
+ while ((lstart + size) > (ibw_start + ibw_size)) {
+ ibw_size *= 2;
+ ibw_start = lstart & ~(ibw_size - 1);
+ if (ibw_size > 0x80000000) { /* Limit max size to 2GB */
+ return -EBUSY;
+ }
+ }
- if (!is_power_of_2(size) || size < 0x1000 ||
- ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
- return -EINVAL;
+ loc_start = ibw_start;
+
+ map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
+ if (map == NULL)
+ return -ENOMEM;
+
+ } else {
+ tsi_debug(IBW, &priv->pdev->dev,
+ "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x",
+ rstart, &lstart, size);
+
+ if (!is_power_of_2(size) || size < 0x1000 ||
+ ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
+ return -EINVAL;
+ if (priv->ibwin_cnt == 0)
+ return -EBUSY;
+ ibw_start = rstart;
+ ibw_size = size;
+ loc_start = lstart;
+ }
- /* Search for free inbound translation window */
+ /*
+ * Scan for overlapping with active regions and mark the first available
+ * IB window at the same time.
+ */
for (i = 0; i < TSI721_IBWIN_NUM; i++) {
- regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
- if (!(regval & TSI721_IBWIN_LB_WEN))
+ ib_win = &priv->ib_win[i];
+
+ if (!ib_win->active) {
+ if (avail == -1) {
+ avail = i;
+ ret = 0;
+ }
+ } else if (ibw_start < (ib_win->rstart + ib_win->size) &&
+ (ibw_start + ibw_size) > ib_win->rstart) {
+ /* Return error if address translation involved */
+ if (direct && ib_win->xlat) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /*
+ * Direct mappings usually are larger than originally
+ * requested fragments - check if this new request fits
+ * into it.
+ */
+ if (rstart >= ib_win->rstart &&
+ (rstart + size) <= (ib_win->rstart +
+ ib_win->size)) {
+ /* We are in - no further mapping required */
+ map->lstart = lstart;
+ list_add_tail(&map->node, &ib_win->mappings);
+ return 0;
+ }
+
+ ret = -EFAULT;
break;
+ }
}
- if (i >= TSI721_IBWIN_NUM) {
- dev_err(&priv->pdev->dev,
- "Unable to find free inbound window\n");
- return -EBUSY;
+ if (ret)
+ goto out;
+ i = avail;
+
+ /* Sanity check: available IB window must be disabled at this point */
+ regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
+ if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ib_win = &priv->ib_win[i];
+ ib_win->active = true;
+ ib_win->rstart = ibw_start;
+ ib_win->lstart = loc_start;
+ ib_win->size = ibw_size;
+ ib_win->xlat = (lstart != rstart);
+ INIT_LIST_HEAD(&ib_win->mappings);
+
+ /*
+ * When using direct IBW mapping and have larger than requested IBW size
+ * we can have multiple local memory blocks mapped through the same IBW
+ * To handle this situation we maintain list of "clients" for such IBWs.
+ */
+ if (direct) {
+ map->lstart = lstart;
+ list_add_tail(&map->node, &ib_win->mappings);
}
- iowrite32(TSI721_IBWIN_SIZE(size) << 8,
+ iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
priv->regs + TSI721_IBWIN_SZ(i));
- iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i));
- iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD),
+ iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
+ iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
priv->regs + TSI721_IBWIN_TLA(i));
- iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i));
- iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
+ iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
+ iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
priv->regs + TSI721_IBWIN_LB(i));
- dev_dbg(&priv->pdev->dev,
- "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n",
- i, rstart, (unsigned long long)lstart);
+
+ priv->ibwin_cnt--;
+
+ tsi_debug(IBW, &priv->pdev->dev,
+ "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx",
+ i, ibw_start, &loc_start, ibw_size);
return 0;
+out:
+ kfree(map);
+ return ret;
}
/**
- * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region.
+ * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
* @mport: RapidIO master port
* @lstart: Local memory space start address.
*/
@@ -931,25 +1233,56 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
dma_addr_t lstart)
{
struct tsi721_device *priv = mport->priv;
+ struct tsi721_ib_win *ib_win;
int i;
- u64 addr;
- u32 regval;
+
+ tsi_debug(IBW, &priv->pdev->dev,
+ "Unmap IBW mapped to PCIe_0x%pad", &lstart);
/* Search for matching active inbound translation window */
for (i = 0; i < TSI721_IBWIN_NUM; i++) {
- regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
- if (regval & TSI721_IBWIN_LB_WEN) {
- regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i));
- addr = (u64)regval << 32;
- regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i));
- addr |= regval & TSI721_IBWIN_TLA_ADD;
-
- if (addr == (u64)lstart) {
- iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
- break;
+ ib_win = &priv->ib_win[i];
+
+ /* Address translating IBWs must to be an exact march */
+ if (!ib_win->active ||
+ (ib_win->xlat && lstart != ib_win->lstart))
+ continue;
+
+ if (lstart >= ib_win->lstart &&
+ lstart < (ib_win->lstart + ib_win->size)) {
+
+ if (!ib_win->xlat) {
+ struct tsi721_ib_win_mapping *map;
+ int found = 0;
+
+ list_for_each_entry(map,
+ &ib_win->mappings, node) {
+ if (map->lstart == lstart) {
+ list_del(&map->node);
+ kfree(map);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ continue;
+
+ if (!list_empty(&ib_win->mappings))
+ break;
}
+
+ tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
+ iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+ ib_win->active = false;
+ priv->ibwin_cnt++;
+ break;
}
}
+
+ if (i == TSI721_IBWIN_NUM)
+ tsi_debug(IBW, &priv->pdev->dev,
+ "IB window mapped to %pad not found", &lstart);
}
/**
@@ -966,6 +1299,27 @@ static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
/* Disable all SR2PC inbound windows */
for (i = 0; i < TSI721_IBWIN_NUM; i++)
iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+ priv->ibwin_cnt = TSI721_IBWIN_NUM;
+}
+
+/*
+ * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
+ * translation regions.
+ * @priv: pointer to tsi721 device private data
+ */
+static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
+{
+ struct tsi721_ib_win *ib_win;
+ int i;
+
+ /* Disable all active SR2PC inbound windows */
+ for (i = 0; i < TSI721_IBWIN_NUM; i++) {
+ ib_win = &priv->ib_win[i];
+ if (ib_win->active) {
+ iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
+ ib_win->active = false;
+ }
+ }
}
/**
@@ -982,7 +1336,7 @@ static int tsi721_port_write_init(struct tsi721_device *priv)
spin_lock_init(&priv->pw_fifo_lock);
if (kfifo_alloc(&priv->pw_fifo,
TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
- dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n");
+ tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
return -ENOMEM;
}
@@ -991,6 +1345,11 @@ static int tsi721_port_write_init(struct tsi721_device *priv)
return 0;
}
+static void tsi721_port_write_free(struct tsi721_device *priv)
+{
+ kfifo_free(&priv->pw_fifo);
+}
+
static int tsi721_doorbell_init(struct tsi721_device *priv)
{
/* Outbound Doorbells do not require any setup.
@@ -1009,8 +1368,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
if (!priv->idb_base)
return -ENOMEM;
- dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
- priv->idb_base, (unsigned long long)priv->idb_dma);
+ tsi_debug(DBELL, &priv->pdev->dev,
+ "Allocated IDB buffer @ %p (phys = %pad)",
+ priv->idb_base, &priv->idb_dma);
iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
@@ -1056,9 +1416,8 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
int bd_num = 2;
void __iomem *regs;
- dev_dbg(&priv->pdev->dev,
- "Init Block DMA Engine for Maintenance requests, CH%d\n",
- TSI721_DMACH_MAINT);
+ tsi_debug(MAINT, &priv->pdev->dev,
+ "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
/*
* Initialize DMA channel for maintenance requests
@@ -1078,8 +1437,8 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
priv->mdma.bd_phys = bd_phys;
priv->mdma.bd_base = bd_ptr;
- dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
- bd_ptr, (unsigned long long)bd_phys);
+ tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
+ bd_ptr, &bd_phys);
/* Allocate space for descriptor status FIFO */
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
@@ -1101,9 +1460,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
priv->mdma.sts_base = sts_ptr;
priv->mdma.sts_size = sts_size;
- dev_dbg(&priv->pdev->dev,
- "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
- sts_ptr, (unsigned long long)sts_phys, sts_size);
+ tsi_debug(MAINT, &priv->pdev->dev,
+ "desc status FIFO @ %p (phys = %pad) size=0x%x",
+ sts_ptr, &sts_phys, sts_size);
/* Initialize DMA descriptors ring */
bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
@@ -1304,11 +1663,14 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
struct tsi721_device *priv = mport->priv;
struct tsi721_omsg_desc *desc;
u32 tx_slot;
+ unsigned long flags;
if (!priv->omsg_init[mbox] ||
len > TSI721_MSG_MAX_SIZE || len < 8)
return -EINVAL;
+ spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
+
tx_slot = priv->omsg_ring[mbox].tx_slot;
/* Copy copy message into transfer buffer */
@@ -1320,9 +1682,11 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
/* Build descriptor associated with buffer */
desc = priv->omsg_ring[mbox].omd_base;
desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
+#ifdef TSI721_OMSG_DESC_INT
+ /* Request IOF_DONE interrupt generation for each N-th frame in queue */
if (tx_slot % 4 == 0)
desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
-
+#endif
desc[tx_slot].msg_info =
cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
(0xe << 12) | (len & 0xff8));
@@ -1348,6 +1712,8 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
+ spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
+
return 0;
}
@@ -1361,20 +1727,23 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
{
u32 omsg_int;
+ struct rio_mport *mport = &priv->mport;
+ void *dev_id = NULL;
+ u32 tx_slot = 0xffffffff;
+ int do_callback = 0;
spin_lock(&priv->omsg_ring[ch].lock);
omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
- dev_info(&priv->pdev->dev,
- "OB MBOX%d: Status FIFO is full\n", ch);
+ tsi_info(&priv->pdev->dev,
+ "OB MBOX%d: Status FIFO is full", ch);
if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
u32 srd_ptr;
u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
int i, j;
- u32 tx_slot;
/*
* Find last successfully processed descriptor
@@ -1402,7 +1771,7 @@ static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
priv->omsg_ring[ch].sts_rdptr = srd_ptr;
iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
- if (!priv->mport->outb_msg[ch].mcback)
+ if (!mport->outb_msg[ch].mcback)
goto no_sts_update;
/* Inform upper layer about transfer completion */
@@ -1424,14 +1793,19 @@ static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
goto no_sts_update;
}
+ if (tx_slot >= priv->omsg_ring[ch].size)
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "OB_MSG tx_slot=%x > size=%x",
+ tx_slot, priv->omsg_ring[ch].size);
+ WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
+
/* Move slot index to the next message to be sent */
++tx_slot;
if (tx_slot == priv->omsg_ring[ch].size)
tx_slot = 0;
- BUG_ON(tx_slot >= priv->omsg_ring[ch].size);
- priv->mport->outb_msg[ch].mcback(priv->mport,
- priv->omsg_ring[ch].dev_id, ch,
- tx_slot);
+
+ dev_id = priv->omsg_ring[ch].dev_id;
+ do_callback = 1;
}
no_sts_update:
@@ -1442,20 +1816,20 @@ no_sts_update:
* reinitialize OB MSG channel
*/
- dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n",
- ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
+ tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
+ ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
iowrite32(TSI721_OBDMAC_INT_ERROR,
priv->regs + TSI721_OBDMAC_INT(ch));
- iowrite32(TSI721_OBDMAC_CTL_INIT,
+ iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
priv->regs + TSI721_OBDMAC_CTL(ch));
ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
/* Inform upper level to clear all pending tx slots */
- if (priv->mport->outb_msg[ch].mcback)
- priv->mport->outb_msg[ch].mcback(priv->mport,
- priv->omsg_ring[ch].dev_id, ch,
- priv->omsg_ring[ch].tx_slot);
+ dev_id = priv->omsg_ring[ch].dev_id;
+ tx_slot = priv->omsg_ring[ch].tx_slot;
+ do_callback = 1;
+
/* Synch tx_slot tracking */
iowrite32(priv->omsg_ring[ch].tx_slot,
priv->regs + TSI721_OBDMAC_DRDCNT(ch));
@@ -1477,6 +1851,9 @@ no_sts_update:
}
spin_unlock(&priv->omsg_ring[ch].lock);
+
+ if (mport->outb_msg[ch].mcback && do_callback)
+ mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
}
/**
@@ -1514,9 +1891,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
&priv->omsg_ring[mbox].omq_phys[i],
GFP_KERNEL);
if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate OB MSG data buffer for"
- " MBOX%d\n", mbox);
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "ENOMEM for OB_MSG_%d data buffer", mbox);
rc = -ENOMEM;
goto out_buf;
}
@@ -1528,9 +1904,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
(entries + 1) * sizeof(struct tsi721_omsg_desc),
&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
if (priv->omsg_ring[mbox].omd_base == NULL) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate OB MSG descriptor memory "
- "for MBOX%d\n", mbox);
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "ENOMEM for OB_MSG_%d descriptor memory", mbox);
rc = -ENOMEM;
goto out_buf;
}
@@ -1544,9 +1919,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
if (priv->omsg_ring[mbox].sts_base == NULL) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate OB MSG descriptor status FIFO "
- "for MBOX%d\n", mbox);
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "ENOMEM for OB_MSG_%d status FIFO", mbox);
rc = -ENOMEM;
goto out_desc;
}
@@ -1575,32 +1949,28 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
+ int idx = TSI721_VECT_OMB0_DONE + mbox;
+
/* Request interrupt service if we are in MSI-X mode */
- rc = request_irq(
- priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
- tsi721_omsg_msix, 0,
- priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name,
- (void *)mport);
+ rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
+ priv->msix[idx].irq_name, (void *)priv);
if (rc) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate MSI-X interrupt for "
- "OBOX%d-DONE\n", mbox);
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "Unable to get MSI-X IRQ for OBOX%d-DONE",
+ mbox);
goto out_stat;
}
- rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
- tsi721_omsg_msix, 0,
- priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name,
- (void *)mport);
+ idx = TSI721_VECT_OMB0_INT + mbox;
+ rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
+ priv->msix[idx].irq_name, (void *)priv);
if (rc) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate MSI-X interrupt for "
- "MBOX%d-INT\n", mbox);
- free_irq(
- priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
- (void *)mport);
+ tsi_debug(OMSG, &priv->pdev->dev,
+ "Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
+ idx = TSI721_VECT_OMB0_DONE + mbox;
+ free_irq(priv->msix[idx].vector, (void *)priv);
goto out_stat;
}
}
@@ -1621,7 +1991,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
mb();
/* Initialize Outbound Message engine */
- iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox));
+ iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
+ priv->regs + TSI721_OBDMAC_CTL(mbox));
ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
udelay(10);
@@ -1684,9 +2055,9 @@ static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
- (void *)mport);
+ (void *)priv);
free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
- (void *)mport);
+ (void *)priv);
}
#endif /* CONFIG_PCI_MSI */
@@ -1731,30 +2102,28 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
{
u32 mbox = ch - 4;
u32 imsg_int;
+ struct rio_mport *mport = &priv->mport;
spin_lock(&priv->imsg_ring[mbox].lock);
imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
if (imsg_int & TSI721_IBDMAC_INT_SRTO)
- dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n",
- mbox);
+ tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
- dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n",
- mbox);
+ tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
- dev_info(&priv->pdev->dev,
- "IB MBOX%d IB free queue low\n", mbox);
+ tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
/* Clear IB channel interrupts */
iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
/* If an IB Msg is received notify the upper layer */
if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
- priv->mport->inb_msg[mbox].mcback)
- priv->mport->inb_msg[mbox].mcback(priv->mport,
+ mport->inb_msg[mbox].mcback)
+ mport->inb_msg[mbox].mcback(mport,
priv->imsg_ring[mbox].dev_id, mbox, -1);
if (!(priv->flags & TSI721_USING_MSIX)) {
@@ -1810,8 +2179,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
GFP_KERNEL);
if (priv->imsg_ring[mbox].buf_base == NULL) {
- dev_err(&priv->pdev->dev,
- "Failed to allocate buffers for IB MBOX%d\n", mbox);
+ tsi_err(&priv->pdev->dev,
+ "Failed to allocate buffers for IB MBOX%d", mbox);
rc = -ENOMEM;
goto out;
}
@@ -1824,8 +2193,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
GFP_KERNEL);
if (priv->imsg_ring[mbox].imfq_base == NULL) {
- dev_err(&priv->pdev->dev,
- "Failed to allocate free queue for IB MBOX%d\n", mbox);
+ tsi_err(&priv->pdev->dev,
+ "Failed to allocate free queue for IB MBOX%d", mbox);
rc = -ENOMEM;
goto out_buf;
}
@@ -1837,8 +2206,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
&priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
if (priv->imsg_ring[mbox].imd_base == NULL) {
- dev_err(&priv->pdev->dev,
- "Failed to allocate descriptor memory for IB MBOX%d\n",
+ tsi_err(&priv->pdev->dev,
+ "Failed to allocate descriptor memory for IB MBOX%d",
mbox);
rc = -ENOMEM;
goto out_dma;
@@ -1859,7 +2228,7 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
* once when first inbound mailbox is requested.
*/
if (!(priv->flags & TSI721_IMSGID_SET)) {
- iowrite32((u32)priv->mport->host_deviceid,
+ iowrite32((u32)priv->mport.host_deviceid,
priv->regs + TSI721_IB_DEVID);
priv->flags |= TSI721_IMSGID_SET;
}
@@ -1890,31 +2259,29 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
+ int idx = TSI721_VECT_IMB0_RCV + mbox;
+
/* Request interrupt service if we are in MSI-X mode */
- rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
- tsi721_imsg_msix, 0,
- priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name,
- (void *)mport);
+ rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
+ priv->msix[idx].irq_name, (void *)priv);
if (rc) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate MSI-X interrupt for "
- "IBOX%d-DONE\n", mbox);
+ tsi_debug(IMSG, &priv->pdev->dev,
+ "Unable to get MSI-X IRQ for IBOX%d-DONE",
+ mbox);
goto out_desc;
}
- rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
- tsi721_imsg_msix, 0,
- priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name,
- (void *)mport);
+ idx = TSI721_VECT_IMB0_INT + mbox;
+ rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
+ priv->msix[idx].irq_name, (void *)priv);
if (rc) {
- dev_dbg(&priv->pdev->dev,
- "Unable to allocate MSI-X interrupt for "
- "IBOX%d-INT\n", mbox);
+ tsi_debug(IMSG, &priv->pdev->dev,
+ "Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
free_irq(
priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
- (void *)mport);
+ (void *)priv);
goto out_desc;
}
}
@@ -1985,9 +2352,9 @@ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
- (void *)mport);
+ (void *)priv);
free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
- (void *)mport);
+ (void *)priv);
}
#endif /* CONFIG_PCI_MSI */
@@ -2034,8 +2401,8 @@ static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
rx_slot = priv->imsg_ring[mbox].rx_slot;
if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
- dev_err(&priv->pdev->dev,
- "Error adding inbound buffer %d, buffer exists\n",
+ tsi_err(&priv->pdev->dev,
+ "Error adding inbound buffer %d, buffer exists",
rx_slot);
rc = -EINVAL;
goto out;
@@ -2153,6 +2520,39 @@ static int tsi721_messages_init(struct tsi721_device *priv)
}
/**
+ * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
+ * @mport: Master port implementing the Inbound Messaging Engine
+ * @mbox: Inbound mailbox number
+ *
+ * Returns pointer to the message on success or NULL on failure.
+ */
+static int tsi721_query_mport(struct rio_mport *mport,
+ struct rio_mport_attr *attr)
+{
+ struct tsi721_device *priv = mport->priv;
+ u32 rval;
+
+ rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0)));
+ if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
+ rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0)));
+ attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
+ rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0)));
+ attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
+ } else
+ attr->link_speed = RIO_LINK_DOWN;
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
+ attr->dma_max_sge = 0;
+ attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
+ attr->dma_align = 0;
+#else
+ attr->flags = 0;
+#endif
+ return 0;
+}
+
+/**
* tsi721_disable_ints - disables all device interrupts
* @priv: pointer to tsi721 private data
*/
@@ -2203,6 +2603,34 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
}
+static struct rio_ops tsi721_rio_ops = {
+ .lcread = tsi721_lcread,
+ .lcwrite = tsi721_lcwrite,
+ .cread = tsi721_cread_dma,
+ .cwrite = tsi721_cwrite_dma,
+ .dsend = tsi721_dsend,
+ .open_inb_mbox = tsi721_open_inb_mbox,
+ .close_inb_mbox = tsi721_close_inb_mbox,
+ .open_outb_mbox = tsi721_open_outb_mbox,
+ .close_outb_mbox = tsi721_close_outb_mbox,
+ .add_outb_message = tsi721_add_outb_message,
+ .add_inb_buffer = tsi721_add_inb_buffer,
+ .get_inb_message = tsi721_get_inb_message,
+ .map_inb = tsi721_rio_map_inb_mem,
+ .unmap_inb = tsi721_rio_unmap_inb_mem,
+ .pwenable = tsi721_pw_enable,
+ .query_mport = tsi721_query_mport,
+ .map_outb = tsi721_map_outb_win,
+ .unmap_outb = tsi721_unmap_outb_win,
+};
+
+static void tsi721_mport_release(struct device *dev)
+{
+ struct rio_mport *mport = to_rio_mport(dev);
+
+ tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
+}
+
/**
* tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
* @priv: pointer to tsi721 private data
@@ -2213,46 +2641,20 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
{
struct pci_dev *pdev = priv->pdev;
int err = 0;
- struct rio_ops *ops;
-
- struct rio_mport *mport;
+ struct rio_mport *mport = &priv->mport;
- ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
- if (!ops) {
- dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n");
- return -ENOMEM;
- }
-
- ops->lcread = tsi721_lcread;
- ops->lcwrite = tsi721_lcwrite;
- ops->cread = tsi721_cread_dma;
- ops->cwrite = tsi721_cwrite_dma;
- ops->dsend = tsi721_dsend;
- ops->open_inb_mbox = tsi721_open_inb_mbox;
- ops->close_inb_mbox = tsi721_close_inb_mbox;
- ops->open_outb_mbox = tsi721_open_outb_mbox;
- ops->close_outb_mbox = tsi721_close_outb_mbox;
- ops->add_outb_message = tsi721_add_outb_message;
- ops->add_inb_buffer = tsi721_add_inb_buffer;
- ops->get_inb_message = tsi721_get_inb_message;
- ops->map_inb = tsi721_rio_map_inb_mem;
- ops->unmap_inb = tsi721_rio_unmap_inb_mem;
-
- mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
- if (!mport) {
- kfree(ops);
- dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n");
- return -ENOMEM;
- }
+ err = rio_mport_initialize(mport);
+ if (err)
+ return err;
- mport->ops = ops;
+ mport->ops = &tsi721_rio_ops;
mport->index = 0;
mport->sys_size = 0; /* small system */
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
mport->dev.parent = &pdev->dev;
- priv->mport = mport;
+ mport->dev.release = tsi721_mport_release;
INIT_LIST_HEAD(&mport->dbells);
@@ -2270,31 +2672,28 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
else if (!pci_enable_msi(pdev))
priv->flags |= TSI721_USING_MSI;
else
- dev_info(&pdev->dev,
- "MSI/MSI-X is not available. Using legacy INTx.\n");
+ tsi_debug(MPORT, &pdev->dev,
+ "MSI/MSI-X is not available. Using legacy INTx.");
#endif /* CONFIG_PCI_MSI */
- err = tsi721_request_irq(mport);
+ err = tsi721_request_irq(priv);
- if (!err) {
- tsi721_interrupts_init(priv);
- ops->pwenable = tsi721_pw_enable;
- } else {
- dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
- "vector %02X err=0x%x\n", pdev->irq, err);
- goto err_exit;
+ if (err) {
+ tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
+ pdev->irq, err);
+ return err;
}
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
- tsi721_register_dma(priv);
+ err = tsi721_register_dma(priv);
+ if (err)
+ goto err_exit;
#endif
/* Enable SRIO link */
iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
TSI721_DEVCTL_SRBOOT_CMPL,
priv->regs + TSI721_DEVCTL);
- rio_register_mport(mport);
-
if (mport->host_deviceid >= 0)
iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
RIO_PORT_GEN_DISCOVERED,
@@ -2302,11 +2701,16 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
else
iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
+ err = rio_register_mport(mport);
+ if (err) {
+ tsi721_unregister_dma(priv);
+ goto err_exit;
+ }
+
return 0;
err_exit:
- kfree(mport);
- kfree(ops);
+ tsi721_free_irq(priv);
return err;
}
@@ -2317,15 +2721,14 @@ static int tsi721_probe(struct pci_dev *pdev,
int err;
priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
- if (priv == NULL) {
- dev_err(&pdev->dev, "Failed to allocate memory for device\n");
+ if (!priv) {
err = -ENOMEM;
goto err_exit;
}
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ tsi_err(&pdev->dev, "Failed to enable PCI device");
goto err_clean;
}
@@ -2333,13 +2736,12 @@ static int tsi721_probe(struct pci_dev *pdev,
#ifdef DEBUG
{
- int i;
- for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
- dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
- i, (unsigned long long)pci_resource_start(pdev, i),
- (unsigned long)pci_resource_len(pdev, i),
- pci_resource_flags(pdev, i));
- }
+ int i;
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ tsi_debug(INIT, &pdev->dev, "res%d %pR",
+ i, &pdev->resource[i]);
+ }
}
#endif
/*
@@ -2350,8 +2752,7 @@ static int tsi721_probe(struct pci_dev *pdev,
if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
- dev_err(&pdev->dev,
- "Missing or misconfigured CSR BAR0, aborting.\n");
+ tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2360,8 +2761,7 @@ static int tsi721_probe(struct pci_dev *pdev,
if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
- dev_err(&pdev->dev,
- "Missing or misconfigured Doorbell BAR1, aborting.\n");
+ tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2373,20 +2773,32 @@ static int tsi721_probe(struct pci_dev *pdev,
* It may be a good idea to keep them disabled using HW configuration
* to save PCI memory space.
*/
- if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) &&
- (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) {
- dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n");
+
+ priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
+
+ if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
+ if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
+ tsi_debug(INIT, &pdev->dev,
+ "Prefetchable OBW BAR2 will not be used");
+ else {
+ priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
+ priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
+ }
}
- if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) &&
- (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) {
- dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n");
+ if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
+ if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
+ tsi_debug(INIT, &pdev->dev,
+ "Prefetchable OBW BAR4 will not be used");
+ else {
+ priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
+ priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
+ }
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, "
- "aborting.\n");
+ tsi_err(&pdev->dev, "Unable to obtain PCI resources");
goto err_disable_pdev;
}
@@ -2394,16 +2806,14 @@ static int tsi721_probe(struct pci_dev *pdev,
priv->regs = pci_ioremap_bar(pdev, BAR_0);
if (!priv->regs) {
- dev_err(&pdev->dev,
- "Unable to map device registers space, aborting\n");
+ tsi_err(&pdev->dev, "Unable to map device registers space");
err = -ENOMEM;
goto err_free_res;
}
priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
if (!priv->odb_base) {
- dev_err(&pdev->dev,
- "Unable to map outbound doorbells space, aborting\n");
+ tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
err = -ENOMEM;
goto err_unmap_bars;
}
@@ -2412,25 +2822,23 @@ static int tsi721_probe(struct pci_dev *pdev,
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_info(&pdev->dev, "Unable to set DMA mask\n");
+ tsi_err(&pdev->dev, "Unable to set DMA mask");
goto err_unmap_bars;
}
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
+ tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
- dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
+ tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
}
BUG_ON(!pci_is_pcie(pdev));
- /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ /* Clear "no snoop" and "relaxed ordering" bits. */
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
- PCI_EXP_DEVCTL_NOSNOOP_EN,
- PCI_EXP_DEVCTL_READRQ_512B);
+ PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
/* Adjust PCIe completion timeout. */
pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
@@ -2452,7 +2860,7 @@ static int tsi721_probe(struct pci_dev *pdev,
tsi721_init_sr2pc_mapping(priv);
if (tsi721_bdma_maint_init(priv)) {
- dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
+ tsi_err(&pdev->dev, "BDMA initialization failed");
err = -ENOMEM;
goto err_unmap_bars;
}
@@ -2471,9 +2879,13 @@ static int tsi721_probe(struct pci_dev *pdev,
if (err)
goto err_free_consistent;
+ pci_set_drvdata(pdev, priv);
+ tsi721_interrupts_init(priv);
+
return 0;
err_free_consistent:
+ tsi721_port_write_free(priv);
tsi721_doorbell_free(priv);
err_free_bdma:
tsi721_bdma_maint_free(priv);
@@ -2493,6 +2905,53 @@ err_exit:
return err;
}
+static void tsi721_remove(struct pci_dev *pdev)
+{
+ struct tsi721_device *priv = pci_get_drvdata(pdev);
+
+ tsi_debug(EXIT, &pdev->dev, "enter");
+
+ tsi721_disable_ints(priv);
+ tsi721_free_irq(priv);
+ flush_scheduled_work();
+ rio_unregister_mport(&priv->mport);
+
+ tsi721_unregister_dma(priv);
+ tsi721_bdma_maint_free(priv);
+ tsi721_doorbell_free(priv);
+ tsi721_port_write_free(priv);
+ tsi721_close_sr2pc_mapping(priv);
+
+ if (priv->regs)
+ iounmap(priv->regs);
+ if (priv->odb_base)
+ iounmap(priv->odb_base);
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX)
+ pci_disable_msix(priv->pdev);
+ else if (priv->flags & TSI721_USING_MSI)
+ pci_disable_msi(priv->pdev);
+#endif
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ kfree(priv);
+ tsi_debug(EXIT, &pdev->dev, "exit");
+}
+
+static void tsi721_shutdown(struct pci_dev *pdev)
+{
+ struct tsi721_device *priv = pci_get_drvdata(pdev);
+
+ tsi_debug(EXIT, &pdev->dev, "enter");
+
+ tsi721_disable_ints(priv);
+ tsi721_dma_stop_all(priv);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
static const struct pci_device_id tsi721_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
{ 0, } /* terminate list */
@@ -2504,14 +2963,11 @@ static struct pci_driver tsi721_driver = {
.name = "tsi721",
.id_table = tsi721_pci_tbl,
.probe = tsi721_probe,
+ .remove = tsi721_remove,
+ .shutdown = tsi721_shutdown,
};
-static int __init tsi721_init(void)
-{
- return pci_register_driver(&tsi721_driver);
-}
-
-device_initcall(tsi721_init);
+module_pci_driver(tsi721_driver);
MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
MODULE_AUTHOR("Integrated Device Technology, Inc.");
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 9d2502543ef6..5456dbddc929 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -21,6 +21,46 @@
#ifndef __TSI721_H
#define __TSI721_H
+/* Debug output filtering masks */
+enum {
+ DBG_NONE = 0,
+ DBG_INIT = BIT(0), /* driver init */
+ DBG_EXIT = BIT(1), /* driver exit */
+ DBG_MPORT = BIT(2), /* mport add/remove */
+ DBG_MAINT = BIT(3), /* maintenance ops messages */
+ DBG_DMA = BIT(4), /* DMA transfer messages */
+ DBG_DMAV = BIT(5), /* verbose DMA transfer messages */
+ DBG_IBW = BIT(6), /* inbound window */
+ DBG_EVENT = BIT(7), /* event handling messages */
+ DBG_OBW = BIT(8), /* outbound window messages */
+ DBG_DBELL = BIT(9), /* doorbell messages */
+ DBG_OMSG = BIT(10), /* doorbell messages */
+ DBG_IMSG = BIT(11), /* doorbell messages */
+ DBG_ALL = ~0,
+};
+
+#ifdef DEBUG
+extern u32 dbg_level;
+
+#define tsi_debug(level, dev, fmt, arg...) \
+ do { \
+ if (DBG_##level & dbg_level) \
+ dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \
+ } while (0)
+#else
+#define tsi_debug(level, dev, fmt, arg...) \
+ no_printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##arg)
+#endif
+
+#define tsi_info(dev, fmt, arg...) \
+ dev_info(dev, "%s: " fmt "\n", __func__, ##arg)
+
+#define tsi_warn(dev, fmt, arg...) \
+ dev_warn(dev, "%s: WARNING " fmt "\n", __func__, ##arg)
+
+#define tsi_err(dev, fmt, arg...) \
+ dev_err(dev, "%s: ERROR " fmt "\n", __func__, ##arg)
+
#define DRV_NAME "tsi721"
#define DEFAULT_HOPCOUNT 0xff
@@ -674,7 +714,7 @@ struct tsi721_bdma_chan {
struct dma_chan dchan;
struct tsi721_tx_desc *tx_desc;
spinlock_t lock;
- struct list_head active_list;
+ struct tsi721_tx_desc *active_tx;
struct list_head queue;
struct list_head free_list;
struct tasklet_struct tasklet;
@@ -808,9 +848,38 @@ struct msix_irq {
};
#endif /* CONFIG_PCI_MSI */
+struct tsi721_ib_win_mapping {
+ struct list_head node;
+ dma_addr_t lstart;
+};
+
+struct tsi721_ib_win {
+ u64 rstart;
+ u32 size;
+ dma_addr_t lstart;
+ bool active;
+ bool xlat;
+ struct list_head mappings;
+};
+
+struct tsi721_obw_bar {
+ u64 base;
+ u64 size;
+ u64 free;
+};
+
+struct tsi721_ob_win {
+ u64 base;
+ u32 size;
+ u16 destid;
+ u64 rstart;
+ bool active;
+ struct tsi721_obw_bar *pbar;
+};
+
struct tsi721_device {
struct pci_dev *pdev;
- struct rio_mport *mport;
+ struct rio_mport mport;
u32 flags;
void __iomem *regs;
#ifdef CONFIG_PCI_MSI
@@ -843,11 +912,25 @@ struct tsi721_device {
/* Outbound Messaging */
int omsg_init[TSI721_OMSG_CHNUM];
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
+
+ /* Inbound Mapping Windows */
+ struct tsi721_ib_win ib_win[TSI721_IBWIN_NUM];
+ int ibwin_cnt;
+
+ /* Outbound Mapping Windows */
+ struct tsi721_obw_bar p2r_bar[2];
+ struct tsi721_ob_win ob_win[TSI721_OBWIN_NUM];
+ int obwin_cnt;
};
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
extern int tsi721_register_dma(struct tsi721_device *priv);
+extern void tsi721_unregister_dma(struct tsi721_device *priv);
+extern void tsi721_dma_stop_all(struct tsi721_device *priv);
+#else
+#define tsi721_dma_stop_all(priv) do {} while (0)
+#define tsi721_unregister_dma(priv) do {} while (0)
#endif
#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 47295940a868..155cae1e62de 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
+#include <linux/sched.h>
#include <linux/delay.h>
#include "../../dma/dmaengine.h"
@@ -63,14 +64,6 @@ struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
return container_of(txd, struct tsi721_tx_desc, txd);
}
-static inline
-struct tsi721_tx_desc *tsi721_dma_first_active(
- struct tsi721_bdma_chan *bdma_chan)
-{
- return list_first_entry(&bdma_chan->active_list,
- struct tsi721_tx_desc, desc_node);
-}
-
static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
{
struct tsi721_dma_desc *bd_ptr;
@@ -83,7 +76,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
#endif
- dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
/*
* Allocate space for DMA descriptors
@@ -91,7 +84,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
*/
bd_ptr = dma_zalloc_coherent(dev,
(bd_num + 1) * sizeof(struct tsi721_dma_desc),
- &bd_phys, GFP_KERNEL);
+ &bd_phys, GFP_ATOMIC);
if (!bd_ptr)
return -ENOMEM;
@@ -99,8 +92,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
bdma_chan->bd_phys = bd_phys;
bdma_chan->bd_base = bd_ptr;
- dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
- bd_ptr, (unsigned long long)bd_phys);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+ "DMAC%d descriptors @ %p (phys = %pad)",
+ bdma_chan->id, bd_ptr, &bd_phys);
/* Allocate space for descriptor status FIFO */
sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
@@ -108,7 +102,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
sts_size = roundup_pow_of_two(sts_size);
sts_ptr = dma_zalloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
- &sts_phys, GFP_KERNEL);
+ &sts_phys, GFP_ATOMIC);
if (!sts_ptr) {
/* Free space allocated for DMA descriptors */
dma_free_coherent(dev,
@@ -122,9 +116,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
bdma_chan->sts_base = sts_ptr;
bdma_chan->sts_size = sts_size;
- dev_dbg(dev,
- "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
- sts_ptr, (unsigned long long)sts_phys, sts_size);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+ "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
+ bdma_chan->id, sts_ptr, &sts_phys, sts_size);
/* Initialize DMA descriptors ring using added link descriptor */
bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
@@ -163,8 +157,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
priv->msix[idx].irq_name, (void *)bdma_chan);
if (rc) {
- dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
- bdma_chan->id);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+ "Unable to get MSI-X for DMAC%d-DONE",
+ bdma_chan->id);
goto err_out;
}
@@ -174,8 +169,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
priv->msix[idx].irq_name, (void *)bdma_chan);
if (rc) {
- dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
- bdma_chan->id);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+ "Unable to get MSI-X for DMAC%d-INT",
+ bdma_chan->id);
free_irq(
priv->msix[TSI721_VECT_DMA0_DONE +
bdma_chan->id].vector,
@@ -286,7 +282,7 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
if (bdma_chan->active)
- tasklet_schedule(&bdma_chan->tasklet);
+ tasklet_hi_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
@@ -301,7 +297,8 @@ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
{
struct tsi721_bdma_chan *bdma_chan = ptr;
- tsi721_bdma_handler(bdma_chan);
+ if (bdma_chan->active)
+ tasklet_hi_schedule(&bdma_chan->tasklet);
return IRQ_HANDLED;
}
#endif /* CONFIG_PCI_MSI */
@@ -310,20 +307,22 @@ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
{
if (!tsi721_dma_is_idle(bdma_chan)) {
- dev_err(bdma_chan->dchan.device->dev,
- "BUG: Attempt to start non-idle channel\n");
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "DMAC%d Attempt to start non-idle channel",
+ bdma_chan->id);
return;
}
if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
- dev_err(bdma_chan->dchan.device->dev,
- "BUG: Attempt to start DMA with no BDs ready\n");
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "DMAC%d Attempt to start DMA with no BDs ready %d",
+ bdma_chan->id, task_pid_nr(current));
return;
}
- dev_dbg(bdma_chan->dchan.device->dev,
- "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
- bdma_chan->wr_count_next);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
+ bdma_chan->id, bdma_chan->wr_count_next,
+ task_pid_nr(current));
iowrite32(bdma_chan->wr_count_next,
bdma_chan->regs + TSI721_DMAC_DWRCNT);
@@ -425,10 +424,11 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
struct tsi721_dma_desc *bd_ptr = NULL;
u32 idx, rd_idx;
u32 add_count = 0;
+ struct device *ch_dev = &dchan->dev->device;
if (!tsi721_dma_is_idle(bdma_chan)) {
- dev_err(bdma_chan->dchan.device->dev,
- "BUG: Attempt to use non-idle channel\n");
+ tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
+ bdma_chan->id);
return -EIO;
}
@@ -439,7 +439,7 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
rio_addr = desc->rio_addr;
next_addr = -1;
bcount = 0;
- sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
+ sys_size = dma_to_mport(dchan->device)->sys_size;
rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
rd_idx %= (bdma_chan->bd_num + 1);
@@ -451,18 +451,18 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
add_count++;
}
- dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
- __func__, rd_idx, idx);
+ tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
+ bdma_chan->id, rd_idx, idx);
for_each_sg(desc->sg, sg, desc->sg_len, i) {
- dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
- i, desc->sg_len,
+ tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
+ bdma_chan->id, i, desc->sg_len,
(unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
- dev_err(dchan->device->dev,
- "%s: SG entry %d is too large\n", __func__, i);
+ tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
+ bdma_chan->id, i);
err = -EINVAL;
break;
}
@@ -479,17 +479,16 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
} else if (next_addr != -1) {
/* Finalize descriptor using total byte count value */
tsi721_desc_fill_end(bd_ptr, bcount, 0);
- dev_dbg(dchan->device->dev,
- "%s: prev desc final len: %d\n",
- __func__, bcount);
+ tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
+ bdma_chan->id, bcount);
}
desc->rio_addr = rio_addr;
if (i && idx == rd_idx) {
- dev_dbg(dchan->device->dev,
- "%s: HW descriptor ring is full @ %d\n",
- __func__, i);
+ tsi_debug(DMAV, ch_dev,
+ "DMAC%d HW descriptor ring is full @ %d",
+ bdma_chan->id, i);
desc->sg = sg;
desc->sg_len -= i;
break;
@@ -498,13 +497,12 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
if (err) {
- dev_err(dchan->device->dev,
- "Failed to build desc: err=%d\n", err);
+ tsi_err(ch_dev, "Failed to build desc: err=%d", err);
break;
}
- dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
- bd_ptr, desc->destid, desc->rio_addr);
+ tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
+ bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
next_addr = sg_dma_address(sg);
bcount = sg_dma_len(sg);
@@ -519,8 +517,9 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
entry_done:
if (sg_is_last(sg)) {
tsi721_desc_fill_end(bd_ptr, bcount, 0);
- dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
- __func__, bcount);
+ tsi_debug(DMAV, ch_dev,
+ "DMAC%d last desc final len: %d",
+ bdma_chan->id, bcount);
desc->sg_len = 0;
} else {
rio_addr += sg_dma_len(sg);
@@ -534,35 +533,43 @@ entry_done:
return err;
}
-static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
{
- struct tsi721_tx_desc *desc;
int err;
- dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
+
+ if (!tsi721_dma_is_idle(bdma_chan))
+ return;
/*
- * If there are any new transactions in the queue add them
- * into the processing list
- */
- if (!list_empty(&bdma_chan->queue))
- list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+ * If there is no data transfer in progress, fetch new descriptor from
+ * the pending queue.
+ */
+
+ if (desc == NULL && bdma_chan->active_tx == NULL &&
+ !list_empty(&bdma_chan->queue)) {
+ desc = list_first_entry(&bdma_chan->queue,
+ struct tsi721_tx_desc, desc_node);
+ list_del_init((&desc->desc_node));
+ bdma_chan->active_tx = desc;
+ }
- /* Start new transaction (if available) */
- if (!list_empty(&bdma_chan->active_list)) {
- desc = tsi721_dma_first_active(bdma_chan);
+ if (desc) {
err = tsi721_submit_sg(desc);
if (!err)
tsi721_start_dma(bdma_chan);
else {
tsi721_dma_tx_err(bdma_chan, desc);
- dev_dbg(bdma_chan->dchan.device->dev,
- "ERR: tsi721_submit_sg failed with err=%d\n",
- err);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device,
+ "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
+ bdma_chan->id, err);
}
}
- dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
+ bdma_chan->id);
}
static void tsi721_dma_tasklet(unsigned long data)
@@ -571,22 +578,84 @@ static void tsi721_dma_tasklet(unsigned long data)
u32 dmac_int, dmac_sts;
dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
- dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
- __func__, bdma_chan->id, dmac_int);
+ tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
+ bdma_chan->id, dmac_int);
/* Clear channel interrupts */
iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
if (dmac_int & TSI721_DMAC_INT_ERR) {
+ int i = 10000;
+ struct tsi721_tx_desc *desc;
+
+ desc = bdma_chan->active_tx;
dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
- dev_err(bdma_chan->dchan.device->dev,
- "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
- __func__, bdma_chan->id, dmac_sts);
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
+ bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
+
+ /* Re-initialize DMA channel if possible */
+
+ if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
+ goto err_out;
+
+ tsi721_clr_stat(bdma_chan);
+
+ spin_lock(&bdma_chan->lock);
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT,
+ bdma_chan->regs + TSI721_DMAC_CTL);
+ do {
+ udelay(1);
+ dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ i--;
+ } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
+
+ if (dmac_sts & TSI721_DMAC_STS_ABORT) {
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "Failed to re-initiate DMAC%d", bdma_chan->id);
+ spin_unlock(&bdma_chan->lock);
+ goto err_out;
+ }
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bdma_chan->bd_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DPTRH);
+ iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)bdma_chan->sts_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DSBH);
+ iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DSBL);
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
+ bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+ bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+ bdma_chan->sts_rdptr = 0;
+ udelay(10);
+
+ desc = bdma_chan->active_tx;
+ desc->status = DMA_ERROR;
+ dma_cookie_complete(&desc->txd);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->active_tx = NULL;
+ if (bdma_chan->active)
+ tsi721_advance_work(bdma_chan, NULL);
+ spin_unlock(&bdma_chan->lock);
}
if (dmac_int & TSI721_DMAC_INT_STFULL) {
- dev_err(bdma_chan->dchan.device->dev,
- "%s: DMAC%d descriptor status FIFO is full\n",
- __func__, bdma_chan->id);
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "DMAC%d descriptor status FIFO is full",
+ bdma_chan->id);
}
if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
@@ -594,7 +663,7 @@ static void tsi721_dma_tasklet(unsigned long data)
tsi721_clr_stat(bdma_chan);
spin_lock(&bdma_chan->lock);
- desc = tsi721_dma_first_active(bdma_chan);
+ desc = bdma_chan->active_tx;
if (desc->sg_len == 0) {
dma_async_tx_callback callback = NULL;
@@ -606,17 +675,21 @@ static void tsi721_dma_tasklet(unsigned long data)
callback = desc->txd.callback;
param = desc->txd.callback_param;
}
- list_move(&desc->desc_node, &bdma_chan->free_list);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->active_tx = NULL;
+ if (bdma_chan->active)
+ tsi721_advance_work(bdma_chan, NULL);
spin_unlock(&bdma_chan->lock);
if (callback)
callback(param);
- spin_lock(&bdma_chan->lock);
+ } else {
+ if (bdma_chan->active)
+ tsi721_advance_work(bdma_chan,
+ bdma_chan->active_tx);
+ spin_unlock(&bdma_chan->lock);
}
-
- tsi721_advance_work(bdma_chan);
- spin_unlock(&bdma_chan->lock);
}
-
+err_out:
/* Re-Enable BDMA channel interrupts */
iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
}
@@ -629,8 +702,9 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
/* Check if the descriptor is detached from any lists */
if (!list_empty(&desc->desc_node)) {
- dev_err(bdma_chan->dchan.device->dev,
- "%s: wrong state of descriptor %p\n", __func__, txd);
+ tsi_err(&bdma_chan->dchan.dev->device,
+ "DMAC%d wrong state of descriptor %p",
+ bdma_chan->id, txd);
return -EIO;
}
@@ -655,25 +729,25 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
struct tsi721_tx_desc *desc = NULL;
int i;
- dev_dbg(dchan->device->dev, "%s: for channel %d\n",
- __func__, bdma_chan->id);
+ tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
if (bdma_chan->bd_base)
return TSI721_DMA_TX_QUEUE_SZ;
/* Initialize BDMA channel */
if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
- dev_err(dchan->device->dev, "Unable to initialize data DMA"
- " channel %d, aborting\n", bdma_chan->id);
+ tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
+ bdma_chan->id);
return -ENODEV;
}
/* Allocate queue of transaction descriptors */
desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!desc) {
- dev_err(dchan->device->dev,
- "Failed to allocate logical descriptors\n");
+ tsi_err(&dchan->dev->device,
+ "DMAC%d Failed to allocate logical descriptors",
+ bdma_chan->id);
tsi721_bdma_ch_free(bdma_chan);
return -ENOMEM;
}
@@ -714,15 +788,11 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- dev_dbg(dchan->device->dev, "%s: for channel %d\n",
- __func__, bdma_chan->id);
+ tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
if (bdma_chan->bd_base == NULL)
return;
- BUG_ON(!list_empty(&bdma_chan->active_list));
- BUG_ON(!list_empty(&bdma_chan->queue));
-
tsi721_bdma_interrupt_enable(bdma_chan, 0);
bdma_chan->active = false;
tsi721_sync_dma_irq(bdma_chan);
@@ -736,20 +806,26 @@ static
enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- return dma_cookie_status(dchan, cookie, txstate);
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ enum dma_status status;
+
+ spin_lock_bh(&bdma_chan->lock);
+ status = dma_cookie_status(dchan, cookie, txstate);
+ spin_unlock_bh(&bdma_chan->lock);
+ return status;
}
static void tsi721_issue_pending(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
+ tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
+ spin_lock_bh(&bdma_chan->lock);
if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
- spin_lock_bh(&bdma_chan->lock);
- tsi721_advance_work(bdma_chan);
- spin_unlock_bh(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan, NULL);
}
+ spin_unlock_bh(&bdma_chan->lock);
}
static
@@ -759,18 +835,19 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
void *tinfo)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- struct tsi721_tx_desc *desc, *_d;
+ struct tsi721_tx_desc *desc;
struct rio_dma_ext *rext = tinfo;
enum dma_rtype rtype;
struct dma_async_tx_descriptor *txd = NULL;
if (!sgl || !sg_len) {
- dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
- return NULL;
+ tsi_err(&dchan->dev->device, "DMAC%d No SG list",
+ bdma_chan->id);
+ return ERR_PTR(-EINVAL);
}
- dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
- (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
+ tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
if (dir == DMA_DEV_TO_MEM)
rtype = NREAD;
@@ -788,30 +865,36 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
break;
}
} else {
- dev_err(dchan->device->dev,
- "%s: Unsupported DMA direction option\n", __func__);
- return NULL;
+ tsi_err(&dchan->dev->device,
+ "DMAC%d Unsupported DMA direction option",
+ bdma_chan->id);
+ return ERR_PTR(-EINVAL);
}
spin_lock_bh(&bdma_chan->lock);
- list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
- if (async_tx_test_ack(&desc->txd)) {
- list_del_init(&desc->desc_node);
- desc->destid = rext->destid;
- desc->rio_addr = rext->rio_addr;
- desc->rio_addr_u = 0;
- desc->rtype = rtype;
- desc->sg_len = sg_len;
- desc->sg = sgl;
- txd = &desc->txd;
- txd->flags = flags;
- break;
- }
+ if (!list_empty(&bdma_chan->free_list)) {
+ desc = list_first_entry(&bdma_chan->free_list,
+ struct tsi721_tx_desc, desc_node);
+ list_del_init(&desc->desc_node);
+ desc->destid = rext->destid;
+ desc->rio_addr = rext->rio_addr;
+ desc->rio_addr_u = 0;
+ desc->rtype = rtype;
+ desc->sg_len = sg_len;
+ desc->sg = sgl;
+ txd = &desc->txd;
+ txd->flags = flags;
}
spin_unlock_bh(&bdma_chan->lock);
+ if (!txd) {
+ tsi_debug(DMA, &dchan->dev->device,
+ "DMAC%d free TXD is not available", bdma_chan->id);
+ return ERR_PTR(-EBUSY);
+ }
+
return txd;
}
@@ -819,16 +902,18 @@ static int tsi721_terminate_all(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d;
- u32 dmac_int;
LIST_HEAD(list);
- dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+ tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
spin_lock_bh(&bdma_chan->lock);
bdma_chan->active = false;
- if (!tsi721_dma_is_idle(bdma_chan)) {
+ while (!tsi721_dma_is_idle(bdma_chan)) {
+
+ udelay(5);
+#if (0)
/* make sure to stop the transfer */
iowrite32(TSI721_DMAC_CTL_SUSP,
bdma_chan->regs + TSI721_DMAC_CTL);
@@ -837,9 +922,11 @@ static int tsi721_terminate_all(struct dma_chan *dchan)
do {
dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
} while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
+#endif
}
- list_splice_init(&bdma_chan->active_list, &list);
+ if (bdma_chan->active_tx)
+ list_add(&bdma_chan->active_tx->desc_node, &list);
list_splice_init(&bdma_chan->queue, &list);
list_for_each_entry_safe(desc, _d, &list, desc_node)
@@ -850,12 +937,42 @@ static int tsi721_terminate_all(struct dma_chan *dchan)
return 0;
}
+static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (!bdma_chan->active)
+ return;
+ spin_lock_bh(&bdma_chan->lock);
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ int timeout = 100000;
+
+ /* stop the transfer in progress */
+ iowrite32(TSI721_DMAC_CTL_SUSP,
+ bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Wait until DMA channel stops */
+ while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
+ udelay(1);
+ }
+
+ spin_unlock_bh(&bdma_chan->lock);
+}
+
+void tsi721_dma_stop_all(struct tsi721_device *priv)
+{
+ int i;
+
+ for (i = 0; i < TSI721_DMA_MAXCH; i++) {
+ if (i != TSI721_DMACH_MAINT)
+ tsi721_dma_stop(&priv->bdma[i]);
+ }
+}
+
int tsi721_register_dma(struct tsi721_device *priv)
{
int i;
int nr_channels = 0;
int err;
- struct rio_mport *mport = priv->mport;
+ struct rio_mport *mport = &priv->mport;
INIT_LIST_HEAD(&mport->dma.channels);
@@ -875,7 +992,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
spin_lock_init(&bdma_chan->lock);
- INIT_LIST_HEAD(&bdma_chan->active_list);
+ bdma_chan->active_tx = NULL;
INIT_LIST_HEAD(&bdma_chan->queue);
INIT_LIST_HEAD(&bdma_chan->free_list);
@@ -901,7 +1018,33 @@ int tsi721_register_dma(struct tsi721_device *priv)
err = dma_async_device_register(&mport->dma);
if (err)
- dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+ tsi_err(&priv->pdev->dev, "Failed to register DMA device");
return err;
}
+
+void tsi721_unregister_dma(struct tsi721_device *priv)
+{
+ struct rio_mport *mport = &priv->mport;
+ struct dma_chan *chan, *_c;
+ struct tsi721_bdma_chan *bdma_chan;
+
+ tsi721_dma_stop_all(priv);
+ dma_async_device_unregister(&mport->dma);
+
+ list_for_each_entry_safe(chan, _c, &mport->dma.channels,
+ device_node) {
+ bdma_chan = to_tsi721_chan(chan);
+ if (bdma_chan->active) {
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+ tsi721_sync_dma_irq(bdma_chan);
+ tasklet_kill(&bdma_chan->tasklet);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
+ kfree(bdma_chan->tx_desc);
+ tsi721_bdma_ch_free(bdma_chan);
+ }
+
+ list_del(&chan->device_node);
+ }
+}
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index f301f059bb85..128350f4d17a 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -131,6 +131,17 @@ static int rio_device_remove(struct device *dev)
return 0;
}
+static void rio_device_shutdown(struct device *dev)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+ struct rio_driver *rdrv = rdev->driver;
+
+ dev_dbg(dev, "RIO: %s\n", __func__);
+
+ if (rdrv && rdrv->shutdown)
+ rdrv->shutdown(rdev);
+}
+
/**
* rio_register_driver - register a new RIO driver
* @rdrv: the RIO driver structure to register
@@ -229,6 +240,7 @@ struct bus_type rio_bus_type = {
.bus_groups = rio_bus_groups,
.probe = rio_device_probe,
.remove = rio_device_remove,
+ .shutdown = rio_device_shutdown,
.uevent = rio_uevent,
};
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index d6a126c17c03..a63a380809d1 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -39,6 +39,13 @@
static void rio_init_em(struct rio_dev *rdev);
+struct rio_id_table {
+ u16 start; /* logical minimal id */
+ u32 max; /* max number of IDs in table */
+ spinlock_t lock;
+ unsigned long table[0];
+};
+
static int next_destid = 0;
static int next_comptag = 1;
@@ -62,7 +69,7 @@ static int rio_mport_phys_table[] = {
static u16 rio_destid_alloc(struct rio_net *net)
{
int destid;
- struct rio_id_table *idtab = &net->destid_table;
+ struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
spin_lock(&idtab->lock);
destid = find_first_zero_bit(idtab->table, idtab->max);
@@ -88,7 +95,7 @@ static u16 rio_destid_alloc(struct rio_net *net)
static int rio_destid_reserve(struct rio_net *net, u16 destid)
{
int oldbit;
- struct rio_id_table *idtab = &net->destid_table;
+ struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
destid -= idtab->start;
spin_lock(&idtab->lock);
@@ -106,7 +113,7 @@ static int rio_destid_reserve(struct rio_net *net, u16 destid)
*/
static void rio_destid_free(struct rio_net *net, u16 destid)
{
- struct rio_id_table *idtab = &net->destid_table;
+ struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
destid -= idtab->start;
spin_lock(&idtab->lock);
@@ -121,7 +128,7 @@ static void rio_destid_free(struct rio_net *net, u16 destid)
static u16 rio_destid_first(struct rio_net *net)
{
int destid;
- struct rio_id_table *idtab = &net->destid_table;
+ struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
spin_lock(&idtab->lock);
destid = find_first_bit(idtab->table, idtab->max);
@@ -141,7 +148,7 @@ static u16 rio_destid_first(struct rio_net *net)
static u16 rio_destid_next(struct rio_net *net, u16 from)
{
int destid;
- struct rio_id_table *idtab = &net->destid_table;
+ struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data;
spin_lock(&idtab->lock);
destid = find_next_bit(idtab->table, idtab->max, from);
@@ -187,19 +194,6 @@ static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u
}
/**
- * rio_local_set_device_id - Set the base/extended device id for a port
- * @port: RIO master port
- * @did: Device ID value to be written
- *
- * Writes the base/extended device id from a device.
- */
-static void rio_local_set_device_id(struct rio_mport *port, u16 did)
-{
- rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size,
- did));
-}
-
-/**
* rio_clear_locks- Release all host locks and signal enumeration complete
* @net: RIO network to run on
*
@@ -449,9 +443,6 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
if (do_enum)
rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0);
-
- list_add_tail(&rswitch->node, &net->switches);
-
} else {
if (do_enum)
/*Enable Input Output Port (transmitter reviever)*/
@@ -461,13 +452,9 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rdev->comp_tag & RIO_CTAG_UDEVID);
}
- rdev->dev.parent = &port->dev;
+ rdev->dev.parent = &net->dev;
rio_attach_device(rdev);
-
- device_initialize(&rdev->dev);
rdev->dev.release = rio_release_dev;
- rio_dev_get(rdev);
-
rdev->dma_mask = DMA_BIT_MASK(32);
rdev->dev.dma_mask = &rdev->dma_mask;
rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -480,6 +467,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
if (ret)
goto cleanup;
+ rio_dev_get(rdev);
+
return rdev;
cleanup:
@@ -621,8 +610,6 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size),
hopcount, 1);
if (rdev) {
- /* Add device to the global and bus/net specific list. */
- list_add_tail(&rdev->net_list, &net->devices);
rdev->prev = prev;
if (prev && rio_is_switch(prev))
prev->rswitch->nextdev[prev_port] = rdev;
@@ -778,8 +765,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
/* Setup new RIO device */
if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
- /* Add device to the global and bus/net specific list. */
- list_add_tail(&rdev->net_list, &net->devices);
rdev->prev = prev;
if (prev && rio_is_switch(prev))
prev->rswitch->nextdev[prev_port] = rdev;
@@ -864,50 +849,71 @@ static int rio_mport_is_active(struct rio_mport *port)
return result & RIO_PORT_N_ERR_STS_PORT_OK;
}
-/**
- * rio_alloc_net- Allocate and configure a new RIO network
- * @port: Master port associated with the RIO network
+static void rio_scan_release_net(struct rio_net *net)
+{
+ pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id);
+ kfree(net->enum_data);
+}
+
+static void rio_scan_release_dev(struct device *dev)
+{
+ struct rio_net *net;
+
+ net = to_rio_net(dev);
+ pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id);
+ kfree(net);
+}
+
+/*
+ * rio_scan_alloc_net - Allocate and configure a new RIO network
+ * @mport: Master port associated with the RIO network
* @do_enum: Enumeration/Discovery mode flag
* @start: logical minimal start id for new net
*
- * Allocates a RIO network structure, initializes per-network
- * list heads, and adds the associated master port to the
- * network list of associated master ports. Returns a
- * RIO network pointer on success or %NULL on failure.
+ * Allocates a new RIO network structure and initializes enumerator-specific
+ * part of it (if required).
+ * Returns a RIO network pointer on success or %NULL on failure.
*/
-static struct rio_net *rio_alloc_net(struct rio_mport *port,
- int do_enum, u16 start)
+static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
+ int do_enum, u16 start)
{
struct rio_net *net;
- net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
+ net = rio_alloc_net(mport);
+
if (net && do_enum) {
- net->destid_table.table = kcalloc(
- BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)),
- sizeof(long),
- GFP_KERNEL);
+ struct rio_id_table *idtab;
+ size_t size;
+
+ size = sizeof(struct rio_id_table) +
+ BITS_TO_LONGS(
+ RIO_MAX_ROUTE_ENTRIES(mport->sys_size)
+ ) * sizeof(long);
+
+ idtab = kzalloc(size, GFP_KERNEL);
- if (net->destid_table.table == NULL) {
+ if (idtab == NULL) {
pr_err("RIO: failed to allocate destID table\n");
- kfree(net);
+ rio_free_net(net);
net = NULL;
} else {
- net->destid_table.start = start;
- net->destid_table.max =
- RIO_MAX_ROUTE_ENTRIES(port->sys_size);
- spin_lock_init(&net->destid_table.lock);
+ net->enum_data = idtab;
+ net->release = rio_scan_release_net;
+ idtab->start = start;
+ idtab->max = RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
+ spin_lock_init(&idtab->lock);
}
}
if (net) {
- INIT_LIST_HEAD(&net->node);
- INIT_LIST_HEAD(&net->devices);
- INIT_LIST_HEAD(&net->switches);
- INIT_LIST_HEAD(&net->mports);
- list_add_tail(&port->nnode, &net->mports);
- net->hport = port;
- net->id = port->id;
+ net->id = mport->id;
+ net->hport = mport;
+ dev_set_name(&net->dev, "rnet_%d", net->id);
+ net->dev.parent = &mport->dev;
+ net->dev.release = rio_scan_release_dev;
+ rio_add_net(net);
}
+
return net;
}
@@ -968,17 +974,6 @@ static void rio_init_em(struct rio_dev *rdev)
}
/**
- * rio_pw_enable - Enables/disables port-write handling by a master port
- * @port: Master port associated with port-write handling
- * @enable: 1=enable, 0=disable
- */
-static void rio_pw_enable(struct rio_mport *port, int enable)
-{
- if (port->ops->pwenable)
- port->ops->pwenable(port, enable);
-}
-
-/**
* rio_enum_mport- Start enumeration through a master port
* @mport: Master port to send transactions
* @flags: Enumeration control flags
@@ -1016,7 +1011,7 @@ static int rio_enum_mport(struct rio_mport *mport, u32 flags)
/* If master port has an active link, allocate net and enum peers */
if (rio_mport_is_active(mport)) {
- net = rio_alloc_net(mport, 1, 0);
+ net = rio_scan_alloc_net(mport, 1, 0);
if (!net) {
printk(KERN_ERR "RIO: failed to allocate new net\n");
rc = -ENOMEM;
@@ -1133,7 +1128,7 @@ static int rio_disc_mport(struct rio_mport *mport, u32 flags)
enum_done:
pr_debug("RIO: ... enumeration done\n");
- net = rio_alloc_net(mport, 0, 0);
+ net = rio_scan_alloc_net(mport, 0, 0);
if (!net) {
printk(KERN_ERR "RIO: Failed to allocate new net\n");
goto bail;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index d7b87c64b7cd..0dcaa660cba1 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -30,6 +30,20 @@
#include "rio.h"
+/*
+ * struct rio_pwrite - RIO portwrite event
+ * @node: Node in list of doorbell events
+ * @pwcback: Doorbell event callback
+ * @context: Handler specific context to pass on event
+ */
+struct rio_pwrite {
+ struct list_head node;
+
+ int (*pwcback)(struct rio_mport *mport, void *context,
+ union rio_pw_msg *msg, int step);
+ void *context;
+};
+
MODULE_DESCRIPTION("RapidIO Subsystem Core");
MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
@@ -42,6 +56,7 @@ MODULE_PARM_DESC(hdid,
"Destination ID assignment to local RapidIO controllers");
static LIST_HEAD(rio_devices);
+static LIST_HEAD(rio_nets);
static DEFINE_SPINLOCK(rio_global_list_lock);
static LIST_HEAD(rio_mports);
@@ -68,6 +83,89 @@ u16 rio_local_get_device_id(struct rio_mport *port)
}
/**
+ * rio_query_mport - Query mport device attributes
+ * @port: mport device to query
+ * @mport_attr: mport attributes data structure
+ *
+ * Returns attributes of specified mport through the
+ * pointer to attributes data structure.
+ */
+int rio_query_mport(struct rio_mport *port,
+ struct rio_mport_attr *mport_attr)
+{
+ if (!port->ops->query_mport)
+ return -ENODATA;
+ return port->ops->query_mport(port, mport_attr);
+}
+EXPORT_SYMBOL(rio_query_mport);
+
+/**
+ * rio_alloc_net- Allocate and initialize a new RIO network data structure
+ * @mport: Master port associated with the RIO network
+ *
+ * Allocates a RIO network structure, initializes per-network
+ * list heads, and adds the associated master port to the
+ * network list of associated master ports. Returns a
+ * RIO network pointer on success or %NULL on failure.
+ */
+struct rio_net *rio_alloc_net(struct rio_mport *mport)
+{
+ struct rio_net *net;
+
+ net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
+ if (net) {
+ INIT_LIST_HEAD(&net->node);
+ INIT_LIST_HEAD(&net->devices);
+ INIT_LIST_HEAD(&net->switches);
+ INIT_LIST_HEAD(&net->mports);
+ mport->net = net;
+ }
+ return net;
+}
+EXPORT_SYMBOL_GPL(rio_alloc_net);
+
+int rio_add_net(struct rio_net *net)
+{
+ int err;
+
+ err = device_register(&net->dev);
+ if (err)
+ return err;
+ spin_lock(&rio_global_list_lock);
+ list_add_tail(&net->node, &rio_nets);
+ spin_unlock(&rio_global_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_add_net);
+
+void rio_free_net(struct rio_net *net)
+{
+ spin_lock(&rio_global_list_lock);
+ if (!list_empty(&net->node))
+ list_del(&net->node);
+ spin_unlock(&rio_global_list_lock);
+ if (net->release)
+ net->release(net);
+ device_unregister(&net->dev);
+}
+EXPORT_SYMBOL_GPL(rio_free_net);
+
+/**
+ * rio_local_set_device_id - Set the base/extended device id for a port
+ * @port: RIO master port
+ * @did: Device ID value to be written
+ *
+ * Writes the base/extended device id from a device.
+ */
+void rio_local_set_device_id(struct rio_mport *port, u16 did)
+{
+ rio_local_write_config_32(port, RIO_DID_CSR,
+ RIO_SET_DID(port->sys_size, did));
+}
+EXPORT_SYMBOL_GPL(rio_local_set_device_id);
+
+/**
* rio_add_device- Adds a RIO device to the device model
* @rdev: RIO device
*
@@ -79,12 +177,19 @@ int rio_add_device(struct rio_dev *rdev)
{
int err;
- err = device_add(&rdev->dev);
+ atomic_set(&rdev->state, RIO_DEVICE_RUNNING);
+ err = device_register(&rdev->dev);
if (err)
return err;
spin_lock(&rio_global_list_lock);
list_add_tail(&rdev->global_list, &rio_devices);
+ if (rdev->net) {
+ list_add_tail(&rdev->net_list, &rdev->net->devices);
+ if (rdev->pef & RIO_PEF_SWITCH)
+ list_add_tail(&rdev->rswitch->node,
+ &rdev->net->switches);
+ }
spin_unlock(&rio_global_list_lock);
rio_create_sysfs_dev_files(rdev);
@@ -93,6 +198,33 @@ int rio_add_device(struct rio_dev *rdev)
}
EXPORT_SYMBOL_GPL(rio_add_device);
+/*
+ * rio_del_device - removes a RIO device from the device model
+ * @rdev: RIO device
+ * @state: device state to set during removal process
+ *
+ * Removes the RIO device to the kernel device list and subsystem's device list.
+ * Clears sysfs entries for the removed device.
+ */
+void rio_del_device(struct rio_dev *rdev, enum rio_device_state state)
+{
+ pr_debug("RIO: %s: removing %s\n", __func__, rio_name(rdev));
+ atomic_set(&rdev->state, state);
+ spin_lock(&rio_global_list_lock);
+ list_del(&rdev->global_list);
+ if (rdev->net) {
+ list_del(&rdev->net_list);
+ if (rdev->pef & RIO_PEF_SWITCH) {
+ list_del(&rdev->rswitch->node);
+ kfree(rdev->rswitch->route_table);
+ }
+ }
+ spin_unlock(&rio_global_list_lock);
+ rio_remove_sysfs_dev_files(rdev);
+ device_unregister(&rdev->dev);
+}
+EXPORT_SYMBOL_GPL(rio_del_device);
+
/**
* rio_request_inb_mbox - request inbound mailbox service
* @mport: RIO master port from which to allocate the mailbox resource
@@ -117,7 +249,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,
if (mport->ops->open_inb_mbox == NULL)
goto out;
- res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
@@ -185,7 +317,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,
if (mport->ops->open_outb_mbox == NULL)
goto out;
- res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_mbox_res(res, mbox, mbox);
@@ -258,7 +390,9 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res,
dbell->dinb = dinb;
dbell->dev_id = dev_id;
+ mutex_lock(&mport->lock);
list_add_tail(&dbell->node, &mport->dbells);
+ mutex_unlock(&mport->lock);
out:
return rc;
@@ -285,7 +419,7 @@ int rio_request_inb_dbell(struct rio_mport *mport,
{
int rc = 0;
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_dbell_res(res, start, end);
@@ -322,12 +456,15 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
int rc = 0, found = 0;
struct rio_dbell *dbell;
+ mutex_lock(&mport->lock);
list_for_each_entry(dbell, &mport->dbells, node) {
if ((dbell->res->start == start) && (dbell->res->end == end)) {
+ list_del(&dbell->node);
found = 1;
break;
}
}
+ mutex_unlock(&mport->lock);
/* If we can't find an exact match, fail */
if (!found) {
@@ -335,9 +472,6 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
goto out;
}
- /* Delete from list */
- list_del(&dbell->node);
-
/* Release the doorbell resource */
rc = release_resource(dbell->res);
@@ -360,7 +494,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
u16 end)
{
- struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (res) {
rio_init_dbell_res(res, start, end);
@@ -394,7 +528,71 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
}
/**
- * rio_request_inb_pwrite - request inbound port-write message service
+ * rio_add_mport_pw_handler - add port-write message handler into the list
+ * of mport specific pw handlers
+ * @mport: RIO master port to bind the portwrite callback
+ * @context: Handler specific context to pass on event
+ * @pwcback: Callback to execute when portwrite is received
+ *
+ * Returns 0 if the request has been satisfied.
+ */
+int rio_add_mport_pw_handler(struct rio_mport *mport, void *context,
+ int (*pwcback)(struct rio_mport *mport,
+ void *context, union rio_pw_msg *msg, int step))
+{
+ int rc = 0;
+ struct rio_pwrite *pwrite;
+
+ pwrite = kzalloc(sizeof(struct rio_pwrite), GFP_KERNEL);
+ if (!pwrite) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ pwrite->pwcback = pwcback;
+ pwrite->context = context;
+ mutex_lock(&mport->lock);
+ list_add_tail(&pwrite->node, &mport->pwrites);
+ mutex_unlock(&mport->lock);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler);
+
+/**
+ * rio_del_mport_pw_handler - remove port-write message handler from the list
+ * of mport specific pw handlers
+ * @mport: RIO master port to bind the portwrite callback
+ * @context: Registered handler specific context to pass on event
+ * @pwcback: Registered callback function
+ *
+ * Returns 0 if the request has been satisfied.
+ */
+int rio_del_mport_pw_handler(struct rio_mport *mport, void *context,
+ int (*pwcback)(struct rio_mport *mport,
+ void *context, union rio_pw_msg *msg, int step))
+{
+ int rc = -EINVAL;
+ struct rio_pwrite *pwrite;
+
+ mutex_lock(&mport->lock);
+ list_for_each_entry(pwrite, &mport->pwrites, node) {
+ if (pwrite->pwcback == pwcback && pwrite->context == context) {
+ list_del(&pwrite->node);
+ kfree(pwrite);
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&mport->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_del_mport_pw_handler);
+
+/**
+ * rio_request_inb_pwrite - request inbound port-write message service for
+ * specific RapidIO device
* @rdev: RIO device to which register inbound port-write callback routine
* @pwcback: Callback routine to execute when port-write is received
*
@@ -419,6 +617,7 @@ EXPORT_SYMBOL_GPL(rio_request_inb_pwrite);
/**
* rio_release_inb_pwrite - release inbound port-write message service
+ * associated with specific RapidIO device
* @rdev: RIO device which registered for inbound port-write callback
*
* Removes callback from the rio_dev structure. Returns 0 if the request
@@ -440,6 +639,24 @@ int rio_release_inb_pwrite(struct rio_dev *rdev)
EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
/**
+ * rio_pw_enable - Enables/disables port-write handling by a master port
+ * @mport: Master port associated with port-write handling
+ * @enable: 1=enable, 0=disable
+ */
+void rio_pw_enable(struct rio_mport *mport, int enable)
+{
+ if (mport->ops->pwenable) {
+ mutex_lock(&mport->lock);
+
+ if ((enable && ++mport->pwe_refcnt == 1) ||
+ (!enable && mport->pwe_refcnt && --mport->pwe_refcnt == 0))
+ mport->ops->pwenable(mport, enable);
+ mutex_unlock(&mport->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(rio_pw_enable);
+
+/**
* rio_map_inb_region -- Map inbound memory region.
* @mport: Master port.
* @local: physical address of memory region to be mapped
@@ -483,6 +700,56 @@ void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart)
EXPORT_SYMBOL_GPL(rio_unmap_inb_region);
/**
+ * rio_map_outb_region -- Map outbound memory region.
+ * @mport: Master port.
+ * @destid: destination id window points to
+ * @rbase: RIO base address window translates to
+ * @size: Size of the memory region
+ * @rflags: Flags for mapping.
+ * @local: physical address of memory region mapped
+ *
+ * Return: 0 -- Success.
+ *
+ * This function will create the mapping from RIO space to local memory.
+ */
+int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
+ u32 size, u32 rflags, dma_addr_t *local)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!mport->ops->map_outb)
+ return -ENODEV;
+
+ spin_lock_irqsave(&rio_mmap_lock, flags);
+ rc = mport->ops->map_outb(mport, destid, rbase, size,
+ rflags, local);
+ spin_unlock_irqrestore(&rio_mmap_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(rio_map_outb_region);
+
+/**
+ * rio_unmap_inb_region -- Unmap the inbound memory region
+ * @mport: Master port
+ * @destid: destination id mapping points to
+ * @rstart: RIO base address window translates to
+ */
+void rio_unmap_outb_region(struct rio_mport *mport, u16 destid, u64 rstart)
+{
+ unsigned long flags;
+
+ if (!mport->ops->unmap_outb)
+ return;
+
+ spin_lock_irqsave(&rio_mmap_lock, flags);
+ mport->ops->unmap_outb(mport, destid, rstart);
+ spin_unlock_irqrestore(&rio_mmap_lock, flags);
+}
+EXPORT_SYMBOL_GPL(rio_unmap_outb_region);
+
+/**
* rio_mport_get_physefb - Helper function that returns register offset
* for Physical Layer Extended Features Block.
* @port: Master port to issue transaction
@@ -864,52 +1131,66 @@ rd_err:
}
/**
- * rio_inb_pwrite_handler - process inbound port-write message
+ * rio_inb_pwrite_handler - inbound port-write message handler
+ * @mport: mport device associated with port-write
* @pw_msg: pointer to inbound port-write message
*
* Processes an inbound port-write message. Returns 0 if the request
* has been satisfied.
*/
-int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
+int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
{
struct rio_dev *rdev;
u32 err_status, em_perrdet, em_ltlerrdet;
int rc, portnum;
-
- rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
- if (rdev == NULL) {
- /* Device removed or enumeration error */
- pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
- __func__, pw_msg->em.comptag);
- return -EIO;
- }
-
- pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
+ struct rio_pwrite *pwrite;
#ifdef DEBUG_PW
{
- u32 i;
- for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) {
+ u32 i;
+
+ pr_debug("%s: PW to mport_%d:\n", __func__, mport->id);
+ for (i = 0; i < RIO_PW_MSG_SIZE / sizeof(u32); i = i + 4) {
pr_debug("0x%02x: %08x %08x %08x %08x\n",
- i*4, pw_msg->raw[i], pw_msg->raw[i + 1],
- pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
- i += 4;
- }
+ i * 4, pw_msg->raw[i], pw_msg->raw[i + 1],
+ pw_msg->raw[i + 2], pw_msg->raw[i + 3]);
+ }
}
#endif
- /* Call an external service function (if such is registered
- * for this device). This may be the service for endpoints that send
- * device-specific port-write messages. End-point messages expected
- * to be handled completely by EP specific device driver.
+ rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
+ if (rdev) {
+ pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev));
+ } else {
+ pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
+ __func__, pw_msg->em.comptag);
+ }
+
+ /* Call a device-specific handler (if it is registered for the device).
+ * This may be the service for endpoints that send device-specific
+ * port-write messages. End-point messages expected to be handled
+ * completely by EP specific device driver.
* For switches rc==0 signals that no standard processing required.
*/
- if (rdev->pwcback != NULL) {
+ if (rdev && rdev->pwcback) {
rc = rdev->pwcback(rdev, pw_msg, 0);
if (rc == 0)
return 0;
}
+ mutex_lock(&mport->lock);
+ list_for_each_entry(pwrite, &mport->pwrites, node)
+ pwrite->pwcback(mport, pwrite->context, pw_msg, 0);
+ mutex_unlock(&mport->lock);
+
+ if (!rdev)
+ return 0;
+
+ /*
+ * FIXME: The code below stays as it was before for now until we decide
+ * how to do default PW handling in combination with per-mport callbacks
+ */
+
portnum = pw_msg->em.is_port & 0xFF;
/* Check if device and route to it are functional:
@@ -1909,32 +2190,31 @@ static int rio_get_hdid(int index)
return hdid[index];
}
-int rio_register_mport(struct rio_mport *port)
+int rio_mport_initialize(struct rio_mport *mport)
{
- struct rio_scan_node *scan = NULL;
- int res = 0;
-
if (next_portid >= RIO_MAX_MPORTS) {
pr_err("RIO: reached specified max number of mports\n");
- return 1;
+ return -ENODEV;
}
- port->id = next_portid++;
- port->host_deviceid = rio_get_hdid(port->id);
- port->nscan = NULL;
+ atomic_set(&mport->state, RIO_DEVICE_INITIALIZING);
+ mport->id = next_portid++;
+ mport->host_deviceid = rio_get_hdid(mport->id);
+ mport->nscan = NULL;
+ mutex_init(&mport->lock);
+ mport->pwe_refcnt = 0;
+ INIT_LIST_HEAD(&mport->pwrites);
- dev_set_name(&port->dev, "rapidio%d", port->id);
- port->dev.class = &rio_mport_class;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rio_mport_initialize);
- res = device_register(&port->dev);
- if (res)
- dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
- port->id, res);
- else
- dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id);
+int rio_register_mport(struct rio_mport *port)
+{
+ struct rio_scan_node *scan = NULL;
+ int res = 0;
mutex_lock(&rio_mport_list_lock);
- list_add_tail(&port->node, &rio_mports);
/*
* Check if there are any registered enumeration/discovery operations
@@ -1948,12 +2228,73 @@ int rio_register_mport(struct rio_mport *port)
break;
}
}
+
+ list_add_tail(&port->node, &rio_mports);
mutex_unlock(&rio_mport_list_lock);
+ dev_set_name(&port->dev, "rapidio%d", port->id);
+ port->dev.class = &rio_mport_class;
+ atomic_set(&port->state, RIO_DEVICE_RUNNING);
+
+ res = device_register(&port->dev);
+ if (res)
+ dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
+ port->id, res);
+ else
+ dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(rio_register_mport);
+
+static int rio_mport_cleanup_callback(struct device *dev, void *data)
+{
+ struct rio_dev *rdev = to_rio_dev(dev);
+
+ if (dev->bus == &rio_bus_type)
+ rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
+ return 0;
+}
+
+static int rio_net_remove_children(struct rio_net *net)
+{
+ /*
+ * Unregister all RapidIO devices residing on this net (this will
+ * invoke notification of registered subsystem interfaces as well).
+ */
+ device_for_each_child(&net->dev, NULL, rio_mport_cleanup_callback);
+ return 0;
+}
+
+int rio_unregister_mport(struct rio_mport *port)
+{
pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id);
+
+ /* Transition mport to the SHUTDOWN state */
+ if (atomic_cmpxchg(&port->state,
+ RIO_DEVICE_RUNNING,
+ RIO_DEVICE_SHUTDOWN) != RIO_DEVICE_RUNNING) {
+ pr_err("RIO: %s unexpected state transition for mport %s\n",
+ __func__, port->name);
+ }
+
+ if (port->net && port->net->hport == port) {
+ rio_net_remove_children(port->net);
+ rio_free_net(port->net);
+ }
+
+ /*
+ * Unregister all RapidIO devices attached to this mport (this will
+ * invoke notification of registered subsystem interfaces as well).
+ */
+ mutex_lock(&rio_mport_list_lock);
+ list_del(&port->node);
+ mutex_unlock(&rio_mport_list_lock);
+ device_unregister(&port->dev);
+
return 0;
}
-EXPORT_SYMBOL_GPL(rio_register_mport);
+EXPORT_SYMBOL_GPL(rio_unregister_mport);
EXPORT_SYMBOL_GPL(rio_local_get_device_id);
EXPORT_SYMBOL_GPL(rio_get_device);
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index 2d0550e08ea2..625d09add001 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -28,6 +28,7 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
u8 hopcount);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
+extern void rio_remove_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_lock_device(struct rio_mport *port, u16 destid,
u8 hopcount, int wait_ms);
extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount);
@@ -38,7 +39,11 @@ extern int rio_route_get_entry(struct rio_dev *rdev, u16 table,
extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock);
extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
+extern struct rio_net *rio_alloc_net(struct rio_mport *mport);
+extern int rio_add_net(struct rio_net *net);
+extern void rio_free_net(struct rio_net *net);
extern int rio_add_device(struct rio_dev *rdev);
+extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state);
extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u8 port_num);
extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8155e80dd3f8..c77dc08b1202 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -78,6 +78,15 @@ config REGULATOR_ACT8865
This driver controls a active-semi act8865 voltage output
regulator via I2C bus.
+config REGULATOR_ACT8945A
+ tristate "Active-semi ACT8945A voltage regulator"
+ depends on MFD_ACT8945A
+ help
+ This driver controls a active-semi ACT8945A voltage regulator
+ via I2C bus. The ACT8945A features three step-down DC/DC converters
+ and four low-dropout linear regulators, along with a ActivePath
+ battery charger.
+
config REGULATOR_AD5398
tristate "Analog Devices AD5398/AD5821 regulators"
depends on I2C
@@ -261,6 +270,14 @@ config REGULATOR_HI6421
21 general purpose LDOs, 3 dedicated LDOs, and 5 BUCKs. All
of them come with support to either ECO (idle) or sleep mode.
+config REGULATOR_HI655X
+ tristate "Hisilicon HI655X PMIC regulators support"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on MFD_HI655X_PMIC && OF
+ help
+ This driver provides support for the voltage regulators of the
+ Hisilicon Hi655x PMIC device.
+
config REGULATOR_ISL9305
tristate "Intersil ISL9305 regulator"
depends on I2C
@@ -343,6 +360,15 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX77620
+ tristate "Maxim 77620/MAX20024 voltage regulator"
+ depends on MFD_MAX77620
+ help
+ This driver controls Maxim MAX77620 voltage output regulator
+ via I2C bus. The provided regulator is suitable for Tegra
+ chip to control Step-Down DC-DC and LDOs. Say Y here to
+ enable the regulator driver.
+
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
@@ -762,7 +788,7 @@ config REGULATOR_TPS65910
config REGULATOR_TPS65912
tristate "TI TPS65912 Power regulator"
- depends on (MFD_TPS65912_I2C || MFD_TPS65912_SPI)
+ depends on MFD_TPS65912
help
This driver supports TPS65912 voltage regulator chip.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 980b1943fa81..61bfbb9d4a0c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
+obj-$(CONFIG_REGULATOR_ACT8945A) += act8945a-regulator.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
@@ -34,6 +35,7 @@ obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_HI6421) += hi6421-regulator.o
+obj-$(CONFIG_REGULATOR_HI655X) += hi655x-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_ISL9305) += isl9305.o
obj-$(CONFIG_REGULATOR_LM363X) += lm363x-regulator.o
@@ -46,6 +48,7 @@ obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
+obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
@@ -54,9 +57,9 @@ obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
-obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
+obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
-obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
+obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
@@ -98,7 +101,7 @@ obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
-obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
+obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index f8d4cd3d1397..000d566e32a4 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -218,7 +218,7 @@ static const struct regulator_desc act8600_regulators[] = {
.ops = &act8865_ldo_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = 1,
- .fixed_uV = 1800000,
+ .fixed_uV = 3300000,
.enable_reg = ACT8600_LDO910_CTRL,
.enable_mask = ACT8865_ENA,
.owner = THIS_MODULE,
@@ -369,7 +369,7 @@ static int act8865_pdata_from_dt(struct device *dev,
for (i = 0; i < num_matches; i++) {
regulator->id = i;
regulator->name = matches[i].name;
- regulator->platform_data = matches[i].init_data;
+ regulator->init_data = matches[i].init_data;
of_node[i] = matches[i].of_node;
regulator++;
}
@@ -396,7 +396,7 @@ static struct regulator_init_data
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
- return pdata->regulators[i].platform_data;
+ return pdata->regulators[i].init_data;
}
return NULL;
@@ -415,7 +415,7 @@ static void act8865_power_off(void)
static int act8865_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
- static const struct regulator_desc *regulators;
+ const struct regulator_desc *regulators;
struct act8865_platform_data pdata_of, *pdata;
struct device *dev = &client->dev;
struct device_node **of_node;
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
new file mode 100644
index 000000000000..441864b9fece
--- /dev/null
+++ b/drivers/regulator/act8945a-regulator.c
@@ -0,0 +1,165 @@
+/*
+ * Voltage regulation driver for active-semi ACT8945A PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/**
+ * ACT8945A Global Register Map.
+ */
+#define ACT8945A_SYS_MODE 0x00
+#define ACT8945A_SYS_CTRL 0x01
+#define ACT8945A_DCDC1_VSET1 0x20
+#define ACT8945A_DCDC1_VSET2 0x21
+#define ACT8945A_DCDC1_CTRL 0x22
+#define ACT8945A_DCDC2_VSET1 0x30
+#define ACT8945A_DCDC2_VSET2 0x31
+#define ACT8945A_DCDC2_CTRL 0x32
+#define ACT8945A_DCDC3_VSET1 0x40
+#define ACT8945A_DCDC3_VSET2 0x41
+#define ACT8945A_DCDC3_CTRL 0x42
+#define ACT8945A_LDO1_VSET 0x50
+#define ACT8945A_LDO1_CTRL 0x51
+#define ACT8945A_LDO2_VSET 0x54
+#define ACT8945A_LDO2_CTRL 0x55
+#define ACT8945A_LDO3_VSET 0x60
+#define ACT8945A_LDO3_CTRL 0x61
+#define ACT8945A_LDO4_VSET 0x64
+#define ACT8945A_LDO4_CTRL 0x65
+
+/**
+ * Field Definitions.
+ */
+#define ACT8945A_ENA 0x80 /* ON - [7] */
+#define ACT8945A_VSEL_MASK 0x3F /* VSET - [5:0] */
+
+/**
+ * ACT8945A Voltage Number
+ */
+#define ACT8945A_VOLTAGE_NUM 64
+
+enum {
+ ACT8945A_ID_DCDC1,
+ ACT8945A_ID_DCDC2,
+ ACT8945A_ID_DCDC3,
+ ACT8945A_ID_LDO1,
+ ACT8945A_ID_LDO2,
+ ACT8945A_ID_LDO3,
+ ACT8945A_ID_LDO4,
+ ACT8945A_REG_NUM,
+};
+
+static const struct regulator_linear_range act8945a_voltage_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
+ REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
+ REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
+};
+
+static struct regulator_ops act8945a_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+};
+
+#define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply) \
+ [_family##_ID_##_id] = { \
+ .name = _name, \
+ .supply_name = _supply, \
+ .of_match = of_match_ptr("REG_"#_id), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = _family##_ID_##_id, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = &act8945a_ops, \
+ .n_voltages = ACT8945A_VOLTAGE_NUM, \
+ .linear_ranges = act8945a_voltage_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(act8945a_voltage_ranges), \
+ .vsel_reg = _family##_##_id##_##_vsel_reg, \
+ .vsel_mask = ACT8945A_VSEL_MASK, \
+ .enable_reg = _family##_##_id##_CTRL, \
+ .enable_mask = ACT8945A_ENA, \
+ .owner = THIS_MODULE, \
+ }
+
+static const struct regulator_desc act8945a_regulators[] = {
+ ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET1, "vp1"),
+ ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET1, "vp2"),
+ ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET1, "vp3"),
+ ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+ ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+ ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+ ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static const struct regulator_desc act8945a_alt_regulators[] = {
+ ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET2, "vp1"),
+ ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET2, "vp2"),
+ ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET2, "vp3"),
+ ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+ ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+ ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+ ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static int act8945a_pmic_probe(struct platform_device *pdev)
+{
+ struct regulator_config config = { };
+ const struct regulator_desc *regulators;
+ struct regulator_dev *rdev;
+ int i, num_regulators;
+ bool voltage_select;
+
+ voltage_select = of_property_read_bool(pdev->dev.parent->of_node,
+ "active-semi,vsel-high");
+
+ if (voltage_select) {
+ regulators = act8945a_alt_regulators;
+ num_regulators = ARRAY_SIZE(act8945a_alt_regulators);
+ } else {
+ regulators = act8945a_regulators;
+ num_regulators = ARRAY_SIZE(act8945a_regulators);
+ }
+
+ config.dev = &pdev->dev;
+ config.dev->of_node = pdev->dev.parent->of_node;
+ for (i = 0; i < num_regulators; i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i], &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev,
+ "failed to register %s regulator\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver act8945a_pmic_driver = {
+ .driver = {
+ .name = "act8945a-regulator",
+ },
+ .probe = act8945a_pmic_probe,
+};
+module_platform_driver(act8945a_pmic_driver);
+
+MODULE_DESCRIPTION("Active-semi ACT8945A voltage regulator driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index ea50a886ba63..8b0f788a9bbb 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -58,10 +58,12 @@ static int ad5398_write_reg(struct i2c_client *client, const unsigned short data
val = cpu_to_be16(data);
ret = i2c_master_send(client, (char *)&val, 2);
- if (ret < 0)
+ if (ret != 2) {
dev_err(&client->dev, "I2C write error\n");
+ return ret < 0 ? ret : -EIO;
+ }
- return ret;
+ return 0;
}
static int ad5398_get_current_limit(struct regulator_dev *rdev)
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f2e1a39ce0f3..40cd894e4df5 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -39,7 +39,7 @@
#define AXP_DESC_IO(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask, _enable_val, _disable_val) \
[_family##_##_id] = { \
- .name = #_id, \
+ .name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
@@ -61,7 +61,7 @@
#define AXP_DESC(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask) \
[_family##_##_id] = { \
- .name = #_id, \
+ .name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
@@ -78,21 +78,15 @@
.ops = &axp20x_ops, \
}
-#define AXP_DESC_SW(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
- _vmask, _ereg, _emask) \
+#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask) \
[_family##_##_id] = { \
- .name = #_id, \
+ .name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
- .n_voltages = (((_max) - (_min)) / (_step) + 1), \
.owner = THIS_MODULE, \
- .min_uV = (_min) * 1000, \
- .uV_step = (_step) * 1000, \
- .vsel_reg = (_vreg), \
- .vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
.ops = &axp20x_ops_sw, \
@@ -100,7 +94,7 @@
#define AXP_DESC_FIXED(_family, _id, _match, _supply, _volt) \
[_family##_##_id] = { \
- .name = #_id, \
+ .name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
@@ -112,39 +106,34 @@
.ops = &axp20x_ops_fixed \
}
-#define AXP_DESC_TABLE(_family, _id, _match, _supply, _table, _vreg, _vmask, \
- _ereg, _emask) \
+#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages, \
+ _vreg, _vmask, _ereg, _emask) \
[_family##_##_id] = { \
- .name = #_id, \
+ .name = (_match), \
.supply_name = (_supply), \
.of_match = of_match_ptr(_match), \
.regulators_node = of_match_ptr("regulators"), \
.type = REGULATOR_VOLTAGE, \
.id = _family##_##_id, \
- .n_voltages = ARRAY_SIZE(_table), \
+ .n_voltages = (_n_voltages), \
.owner = THIS_MODULE, \
.vsel_reg = (_vreg), \
.vsel_mask = (_vmask), \
.enable_reg = (_ereg), \
.enable_mask = (_emask), \
- .volt_table = (_table), \
- .ops = &axp20x_ops_table, \
+ .linear_ranges = (_ranges), \
+ .n_linear_ranges = ARRAY_SIZE(_ranges), \
+ .ops = &axp20x_ops_range, \
}
-static const int axp20x_ldo4_data[] = { 1250000, 1300000, 1400000, 1500000, 1600000,
- 1700000, 1800000, 1900000, 2000000, 2500000,
- 2700000, 2800000, 3000000, 3100000, 3200000,
- 3300000 };
-
static struct regulator_ops axp20x_ops_fixed = {
.list_voltage = regulator_list_voltage_linear,
};
-static struct regulator_ops axp20x_ops_table = {
+static struct regulator_ops axp20x_ops_range = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_table,
- .map_voltage = regulator_map_voltage_ascend,
+ .list_voltage = regulator_list_voltage_linear_range,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -160,13 +149,17 @@ static struct regulator_ops axp20x_ops = {
};
static struct regulator_ops axp20x_ops_sw = {
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = regulator_list_voltage_linear,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
};
+static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
+ REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
+ REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+};
+
static const struct regulator_desc axp20x_regulators[] = {
AXP_DESC(AXP20X, DCDC2, "dcdc2", "vin2", 700, 2275, 25,
AXP20X_DCDC2_V_OUT, 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
@@ -177,8 +170,9 @@ static const struct regulator_desc axp20x_regulators[] = {
AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
AXP20X_LDO3_V_OUT, 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
- AXP_DESC_TABLE(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
- AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+ AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges,
+ 16, AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL,
+ 0x08),
AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
@@ -196,8 +190,8 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
/* secondary switchable output of DCDC1 */
- AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, 1600, 3400, 100,
- AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
+ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
+ BIT(7)),
/* LDO regulator internally chained to DCDC5 */
AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
@@ -244,6 +238,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
step = 75;
break;
case AXP221_ID:
+ case AXP223_ID:
min = 1800;
max = 4050;
def = 3000;
@@ -322,6 +317,7 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work
break;
case AXP221_ID:
+ case AXP223_ID:
if (id < AXP22X_DCDC1 || id > AXP22X_DCDC5)
return -EINVAL;
@@ -360,6 +356,7 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
nregulators = AXP20X_REG_ID_MAX;
break;
case AXP221_ID:
+ case AXP223_ID:
regulators = axp22x_regulators;
nregulators = AXP22X_REG_ID_MAX;
break;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 744c9889f88d..e0b764284773 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1057,18 +1057,18 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = machine_constraints_voltage(rdev, rdev->constraints);
if (ret != 0)
- goto out;
+ return ret;
ret = machine_constraints_current(rdev, rdev->constraints);
if (ret != 0)
- goto out;
+ return ret;
if (rdev->constraints->ilim_uA && ops->set_input_current_limit) {
ret = ops->set_input_current_limit(rdev,
rdev->constraints->ilim_uA);
if (ret < 0) {
rdev_err(rdev, "failed to set input limit\n");
- goto out;
+ return ret;
}
}
@@ -1077,21 +1077,20 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
- goto out;
+ return ret;
}
}
if (rdev->constraints->initial_mode) {
if (!ops->set_mode) {
rdev_err(rdev, "no set_mode operation\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
if (ret < 0) {
rdev_err(rdev, "failed to set initial mode: %d\n", ret);
- goto out;
+ return ret;
}
}
@@ -1102,7 +1101,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = _regulator_do_enable(rdev);
if (ret < 0 && ret != -EINVAL) {
rdev_err(rdev, "failed to enable\n");
- goto out;
+ return ret;
}
}
@@ -1111,7 +1110,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
if (ret < 0) {
rdev_err(rdev, "failed to set ramp_delay\n");
- goto out;
+ return ret;
}
}
@@ -1119,7 +1118,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_pull_down(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to set pull down\n");
- goto out;
+ return ret;
}
}
@@ -1127,7 +1126,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_soft_start(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to set soft start\n");
- goto out;
+ return ret;
}
}
@@ -1136,16 +1135,34 @@ static int set_machine_constraints(struct regulator_dev *rdev,
ret = ops->set_over_current_protection(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to set over current protection\n");
- goto out;
+ return ret;
+ }
+ }
+
+ if (rdev->constraints->active_discharge && ops->set_active_discharge) {
+ bool ad_state = (rdev->constraints->active_discharge ==
+ REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
+
+ ret = ops->set_active_discharge(rdev, ad_state);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to set active discharge\n");
+ return ret;
+ }
+ }
+
+ if (rdev->constraints->active_discharge && ops->set_active_discharge) {
+ bool ad_state = (rdev->constraints->active_discharge ==
+ REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
+
+ ret = ops->set_active_discharge(rdev, ad_state);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to set active discharge\n");
+ return ret;
}
}
print_constraints(rdev);
return 0;
-out:
- kfree(rdev->constraints);
- rdev->constraints = NULL;
- return ret;
}
/**
@@ -3918,6 +3935,16 @@ regulator_register(const struct regulator_desc *regulator_desc,
goto clean;
}
+ if ((config->ena_gpio || config->ena_gpio_initialized) &&
+ gpio_is_valid(config->ena_gpio)) {
+ ret = regulator_ena_gpio_request(rdev, config);
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+ config->ena_gpio, ret);
+ goto clean;
+ }
+ }
+
/* register with sysfs */
rdev->dev.class = &regulator_class;
rdev->dev.parent = dev;
@@ -3926,21 +3953,11 @@ regulator_register(const struct regulator_desc *regulator_desc,
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
- goto clean;
+ goto wash;
}
dev_set_drvdata(&rdev->dev, rdev);
- if ((config->ena_gpio || config->ena_gpio_initialized) &&
- gpio_is_valid(config->ena_gpio)) {
- ret = regulator_ena_gpio_request(rdev, config);
- if (ret != 0) {
- rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
- config->ena_gpio, ret);
- goto wash;
- }
- }
-
/* set regulator constraints */
if (init_data)
constraints = &init_data->constraints;
@@ -3979,13 +3996,13 @@ unset_supplies:
scrub:
regulator_ena_gpio_free(rdev);
- kfree(rdev->constraints);
-wash:
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);
goto out;
+wash:
+ regulator_ena_gpio_free(rdev);
clean:
kfree(rdev);
rdev = ERR_PTR(ret);
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 8b3cc9f0cd64..01c0e3709b66 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -132,6 +132,8 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
if (error < 0)
goto error_i2c;
+ mutex_lock(&chip->rdev->mutex);
+
if (val & DA9210_E_OVCURR) {
regulator_notifier_call_chain(chip->rdev,
REGULATOR_EVENT_OVER_CURRENT,
@@ -155,6 +157,9 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
NULL);
handled |= DA9210_E_VMAX;
}
+
+ mutex_unlock(&chip->rdev->mutex);
+
if (handled) {
/* Clear handled events */
error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 4940e8287df6..2cb5cc311610 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -114,6 +114,22 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return 0;
}
+static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
+{
+ struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+ return regmap_update_bits(di->regmap, di->sleep_reg,
+ VSEL_BUCK_EN, VSEL_BUCK_EN);
+}
+
+static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
+{
+ struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+ return regmap_update_bits(di->regmap, di->sleep_reg,
+ VSEL_BUCK_EN, 0);
+}
+
static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct fan53555_device_info *di = rdev_get_drvdata(rdev);
@@ -192,6 +208,8 @@ static struct regulator_ops fan53555_regulator_ops = {
.set_mode = fan53555_set_mode,
.get_mode = fan53555_get_mode,
.set_ramp_delay = fan53555_set_ramp,
+ .set_suspend_enable = fan53555_set_suspend_enable,
+ .set_suspend_disable = fan53555_set_suspend_disable,
};
static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 7bba8b747f30..a8718e98674a 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -283,8 +283,10 @@ static int gpio_regulator_probe(struct platform_device *pdev)
drvdata->nr_gpios = config->nr_gpios;
ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
if (ret) {
- dev_err(&pdev->dev,
- "Could not obtain regulator setting GPIOs: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Could not obtain regulator setting GPIOs: %d\n",
+ ret);
goto err_memstate;
}
}
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index 3bbb32680a94..b1e32e7482e9 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -465,3 +465,26 @@ int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
return 0;
}
EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
+
+/**
+ * regulator_set_active_discharge_regmap - Default set_active_discharge()
+ * using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: state to set, 0 to disable and 1 to enable.
+ */
+int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
+ bool enable)
+{
+ unsigned int val;
+
+ if (enable)
+ val = rdev->desc->active_discharge_on;
+ else
+ val = rdev->desc->active_discharge_off;
+
+ return regmap_update_bits(rdev->regmap,
+ rdev->desc->active_discharge_reg,
+ rdev->desc->active_discharge_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap);
diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c
new file mode 100644
index 000000000000..aca18466f522
--- /dev/null
+++ b/drivers/regulator/hi655x-regulator.c
@@ -0,0 +1,227 @@
+/*
+ * Device driver for regulators in Hi655x IC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/hi655x-pmic.h>
+
+struct hi655x_regulator {
+ unsigned int disable_reg;
+ unsigned int status_reg;
+ unsigned int ctrl_regs;
+ unsigned int ctrl_mask;
+ struct regulator_desc rdesc;
+};
+
+/* LDO7 & LDO10 */
+static const unsigned int ldo7_voltages[] = {
+ 1800000, 1850000, 2850000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
+};
+
+static const unsigned int ldo19_voltages[] = {
+ 1800000, 1850000, 1900000, 1750000,
+ 2800000, 2850000, 2900000, 3000000,
+};
+
+static const unsigned int ldo22_voltages[] = {
+ 900000, 1000000, 1050000, 1100000,
+ 1150000, 1175000, 1185000, 1200000,
+};
+
+enum hi655x_regulator_id {
+ HI655X_LDO0,
+ HI655X_LDO1,
+ HI655X_LDO2,
+ HI655X_LDO3,
+ HI655X_LDO4,
+ HI655X_LDO5,
+ HI655X_LDO6,
+ HI655X_LDO7,
+ HI655X_LDO8,
+ HI655X_LDO9,
+ HI655X_LDO10,
+ HI655X_LDO11,
+ HI655X_LDO12,
+ HI655X_LDO13,
+ HI655X_LDO14,
+ HI655X_LDO15,
+ HI655X_LDO16,
+ HI655X_LDO17,
+ HI655X_LDO18,
+ HI655X_LDO19,
+ HI655X_LDO20,
+ HI655X_LDO21,
+ HI655X_LDO22,
+};
+
+static int hi655x_is_enabled(struct regulator_dev *rdev)
+{
+ unsigned int value = 0;
+
+ struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+ regmap_read(rdev->regmap, regulator->status_reg, &value);
+ return (value & BIT(regulator->ctrl_mask));
+}
+
+static int hi655x_disable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+
+ struct hi655x_regulator *regulator = rdev_get_drvdata(rdev);
+
+ ret = regmap_write(rdev->regmap, regulator->disable_reg,
+ BIT(regulator->ctrl_mask));
+ return ret;
+}
+
+static struct regulator_ops hi655x_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = hi655x_disable,
+ .is_enabled = hi655x_is_enabled,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static struct regulator_ops hi655x_ldo_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = hi655x_disable,
+ .is_enabled = hi655x_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define HI655X_LDO(_ID, vreg, vmask, ereg, dreg, \
+ sreg, cmask, vtable) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &hi655x_regulator_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI655X_##_ID, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(vtable), \
+ .volt_table = vtable, \
+ .vsel_reg = HI655X_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI655X_BUS_ADDR(ereg), \
+ .enable_mask = BIT(cmask), \
+ }, \
+ .disable_reg = HI655X_BUS_ADDR(dreg), \
+ .status_reg = HI655X_BUS_ADDR(sreg), \
+ .ctrl_mask = cmask, \
+}
+
+#define HI655X_LDO_LINEAR(_ID, vreg, vmask, ereg, dreg, \
+ sreg, cmask, minv, nvolt, vstep) { \
+ .rdesc = { \
+ .name = #_ID, \
+ .of_match = of_match_ptr(#_ID), \
+ .ops = &hi655x_ldo_linear_ops, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = HI655X_##_ID, \
+ .owner = THIS_MODULE, \
+ .min_uV = minv, \
+ .n_voltages = nvolt, \
+ .uV_step = vstep, \
+ .vsel_reg = HI655X_BUS_ADDR(vreg), \
+ .vsel_mask = vmask, \
+ .enable_reg = HI655X_BUS_ADDR(ereg), \
+ .enable_mask = BIT(cmask), \
+ }, \
+ .disable_reg = HI655X_BUS_ADDR(dreg), \
+ .status_reg = HI655X_BUS_ADDR(sreg), \
+ .ctrl_mask = cmask, \
+}
+
+static struct hi655x_regulator regulators[] = {
+ HI655X_LDO_LINEAR(LDO2, 0x72, 0x07, 0x29, 0x2a, 0x2b, 0x01,
+ 2500000, 8, 100000),
+ HI655X_LDO(LDO7, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x06, ldo7_voltages),
+ HI655X_LDO(LDO10, 0x78, 0x07, 0x29, 0x2a, 0x2b, 0x01, ldo7_voltages),
+ HI655X_LDO_LINEAR(LDO13, 0x7e, 0x07, 0x2c, 0x2d, 0x2e, 0x04,
+ 1600000, 8, 50000),
+ HI655X_LDO_LINEAR(LDO14, 0x7f, 0x07, 0x2c, 0x2d, 0x2e, 0x05,
+ 2500000, 8, 100000),
+ HI655X_LDO_LINEAR(LDO15, 0x80, 0x07, 0x2c, 0x2d, 0x2e, 0x06,
+ 1600000, 8, 50000),
+ HI655X_LDO_LINEAR(LDO17, 0x82, 0x07, 0x2f, 0x30, 0x31, 0x00,
+ 2500000, 8, 100000),
+ HI655X_LDO(LDO19, 0x84, 0x07, 0x2f, 0x30, 0x31, 0x02, ldo19_voltages),
+ HI655X_LDO_LINEAR(LDO21, 0x86, 0x07, 0x2f, 0x30, 0x31, 0x04,
+ 1650000, 8, 50000),
+ HI655X_LDO(LDO22, 0x87, 0x07, 0x2f, 0x30, 0x31, 0x05, ldo22_voltages),
+};
+
+static int hi655x_regulator_probe(struct platform_device *pdev)
+{
+ unsigned int i;
+ struct hi655x_regulator *regulator;
+ struct hi655x_pmic *pmic;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ pmic = dev_get_drvdata(pdev->dev.parent);
+ if (!pmic) {
+ dev_err(&pdev->dev, "no pmic in the regulator parent node\n");
+ return -ENODEV;
+ }
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(*regulator), GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, regulator);
+
+ config.dev = pdev->dev.parent;
+ config.regmap = pmic->regmap;
+ config.driver_data = regulator;
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev,
+ &regulators[i].rdesc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->rdesc.name);
+ return PTR_ERR(rdev);
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver hi655x_regulator_driver = {
+ .driver = {
+ .name = "hi655x-regulator",
+ },
+ .probe = hi655x_regulator_probe,
+};
+module_platform_driver(hi655x_regulator_driver);
+
+MODULE_AUTHOR("Chen Feng <puck.chen@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon Hi655x regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 19d758486553..38992112fd6e 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -15,6 +15,7 @@
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
#include <linux/regulator/lp872x.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
@@ -738,10 +739,8 @@ static int lp872x_init_dvs(struct lp872x *lp)
goto set_default_dvs_mode;
gpio = dvs->gpio;
- if (!gpio_is_valid(gpio)) {
- dev_warn(lp->dev, "invalid gpio: %d\n", gpio);
+ if (!gpio_is_valid(gpio))
goto set_default_dvs_mode;
- }
pinstate = dvs->init_state;
ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
@@ -759,6 +758,33 @@ set_default_dvs_mode:
default_dvs_mode[lp->chipid]);
}
+static int lp872x_hw_enable(struct lp872x *lp)
+{
+ int ret, gpio;
+
+ if (!lp->pdata)
+ return -EINVAL;
+
+ gpio = lp->pdata->enable_gpio;
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ /* Always set enable GPIO high. */
+ ret = devm_gpio_request_one(lp->dev, gpio, GPIOF_OUT_INIT_HIGH, "LP872X EN");
+ if (ret) {
+ dev_err(lp->dev, "gpio request err: %d\n", ret);
+ return ret;
+ }
+
+ /* Each chip has a different enable delay. */
+ if (lp->chipid == LP8720)
+ usleep_range(LP8720_ENABLE_DELAY, 1.5 * LP8720_ENABLE_DELAY);
+ else
+ usleep_range(LP8725_ENABLE_DELAY, 1.5 * LP8725_ENABLE_DELAY);
+
+ return 0;
+}
+
static int lp872x_config(struct lp872x *lp)
{
struct lp872x_platform_data *pdata = lp->pdata;
@@ -877,6 +903,8 @@ static struct lp872x_platform_data
of_property_read_u8(np, "ti,dvs-state", &dvs_state);
pdata->dvs->init_state = dvs_state ? DVS_HIGH : DVS_LOW;
+ pdata->enable_gpio = of_get_named_gpio(np, "enable-gpios", 0);
+
if (of_get_child_count(np) == 0)
goto out;
@@ -950,6 +978,10 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp->chipid = id->driver_data;
i2c_set_clientdata(cl, lp);
+ ret = lp872x_hw_enable(lp);
+ if (ret)
+ return ret;
+
ret = lp872x_config(lp);
if (ret)
return ret;
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 972c386b2690..47bef328fb58 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -520,12 +520,15 @@ static int ltc3589_probe(struct i2c_client *client,
}
}
- ret = devm_request_threaded_irq(dev, client->irq, NULL, ltc3589_isr,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, ltc3589);
- if (ret) {
- dev_err(dev, "Failed to request IRQ: %d\n", ret);
- return ret;
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ ltc3589_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, ltc3589);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", ret);
+ return ret;
+ }
}
return 0;
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
new file mode 100644
index 000000000000..73a3356a5c19
--- /dev/null
+++ b/drivers/regulator/max77620-regulator.c
@@ -0,0 +1,813 @@
+/*
+ * Maxim MAX77620 Regulator driver
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Mallikarjun Kasoju <mkasoju@nvidia.com>
+ * Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define max77620_rails(_name) "max77620-"#_name
+
+/* Power Mode */
+#define MAX77620_POWER_MODE_NORMAL 3
+#define MAX77620_POWER_MODE_LPM 2
+#define MAX77620_POWER_MODE_GLPM 1
+#define MAX77620_POWER_MODE_DISABLE 0
+
+/* SD Slew Rate */
+#define MAX77620_SD_SR_13_75 0
+#define MAX77620_SD_SR_27_5 1
+#define MAX77620_SD_SR_55 2
+#define MAX77620_SD_SR_100 3
+
+enum max77620_regulators {
+ MAX77620_REGULATOR_ID_SD0,
+ MAX77620_REGULATOR_ID_SD1,
+ MAX77620_REGULATOR_ID_SD2,
+ MAX77620_REGULATOR_ID_SD3,
+ MAX77620_REGULATOR_ID_SD4,
+ MAX77620_REGULATOR_ID_LDO0,
+ MAX77620_REGULATOR_ID_LDO1,
+ MAX77620_REGULATOR_ID_LDO2,
+ MAX77620_REGULATOR_ID_LDO3,
+ MAX77620_REGULATOR_ID_LDO4,
+ MAX77620_REGULATOR_ID_LDO5,
+ MAX77620_REGULATOR_ID_LDO6,
+ MAX77620_REGULATOR_ID_LDO7,
+ MAX77620_REGULATOR_ID_LDO8,
+ MAX77620_NUM_REGS,
+};
+
+/* Regulator types */
+enum max77620_regulator_type {
+ MAX77620_REGULATOR_TYPE_SD,
+ MAX77620_REGULATOR_TYPE_LDO_N,
+ MAX77620_REGULATOR_TYPE_LDO_P,
+};
+
+struct max77620_regulator_info {
+ u8 type;
+ u8 fps_addr;
+ u8 volt_addr;
+ u8 cfg_addr;
+ u8 power_mode_mask;
+ u8 power_mode_shift;
+ u8 remote_sense_addr;
+ u8 remote_sense_mask;
+ struct regulator_desc desc;
+};
+
+struct max77620_regulator_pdata {
+ struct regulator_init_data *reg_idata;
+ int active_fps_src;
+ int active_fps_pd_slot;
+ int active_fps_pu_slot;
+ int suspend_fps_src;
+ int suspend_fps_pd_slot;
+ int suspend_fps_pu_slot;
+ int current_mode;
+};
+
+struct max77620_regulator {
+ struct device *dev;
+ struct regmap *rmap;
+ struct max77620_regulator_info *rinfo[MAX77620_NUM_REGS];
+ struct max77620_regulator_pdata reg_pdata[MAX77620_NUM_REGS];
+ int enable_power_mode[MAX77620_NUM_REGS];
+ int current_power_mode[MAX77620_NUM_REGS];
+ int active_fps_src[MAX77620_NUM_REGS];
+};
+
+#define fps_src_name(fps_src) \
+ (fps_src == MAX77620_FPS_SRC_0 ? "FPS_SRC_0" : \
+ fps_src == MAX77620_FPS_SRC_1 ? "FPS_SRC_1" : \
+ fps_src == MAX77620_FPS_SRC_2 ? "FPS_SRC_2" : "FPS_SRC_NONE")
+
+static int max77620_regulator_get_fps_src(struct max77620_regulator *pmic,
+ int id)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(pmic->rmap, rinfo->fps_addr, &val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x read failed %d\n",
+ rinfo->fps_addr, ret);
+ return ret;
+ }
+
+ return (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT;
+}
+
+static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
+ int fps_src, int id)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val;
+ int ret;
+
+ switch (fps_src) {
+ case MAX77620_FPS_SRC_0:
+ case MAX77620_FPS_SRC_1:
+ case MAX77620_FPS_SRC_2:
+ case MAX77620_FPS_SRC_NONE:
+ break;
+
+ case MAX77620_FPS_SRC_DEF:
+ ret = regmap_read(pmic->rmap, rinfo->fps_addr, &val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x read failed %d\n",
+ rinfo->fps_addr, ret);
+ return ret;
+ }
+ ret = (val & MAX77620_FPS_SRC_MASK) >> MAX77620_FPS_SRC_SHIFT;
+ pmic->active_fps_src[id] = ret;
+ return 0;
+
+ default:
+ dev_err(pmic->dev, "Invalid FPS %d for regulator %d\n",
+ fps_src, id);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(pmic->rmap, rinfo->fps_addr,
+ MAX77620_FPS_SRC_MASK,
+ fps_src << MAX77620_FPS_SRC_SHIFT);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x update failed %d\n",
+ rinfo->fps_addr, ret);
+ return ret;
+ }
+ pmic->active_fps_src[id] = fps_src;
+
+ return 0;
+}
+
+static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
+ int id, bool is_suspend)
+{
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val = 0;
+ unsigned int mask = 0;
+ int pu = rpdata->active_fps_pu_slot;
+ int pd = rpdata->active_fps_pd_slot;
+ int ret = 0;
+
+ if (is_suspend) {
+ pu = rpdata->suspend_fps_pu_slot;
+ pd = rpdata->suspend_fps_pd_slot;
+ }
+
+ /* FPS power up period setting */
+ if (pu >= 0) {
+ val |= (pu << MAX77620_FPS_PU_PERIOD_SHIFT);
+ mask |= MAX77620_FPS_PU_PERIOD_MASK;
+ }
+
+ /* FPS power down period setting */
+ if (pd >= 0) {
+ val |= (pd << MAX77620_FPS_PD_PERIOD_SHIFT);
+ mask |= MAX77620_FPS_PD_PERIOD_MASK;
+ }
+
+ if (mask) {
+ ret = regmap_update_bits(pmic->rmap, rinfo->fps_addr,
+ mask, val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+ rinfo->fps_addr, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int max77620_regulator_set_power_mode(struct max77620_regulator *pmic,
+ int power_mode, int id)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ u8 mask = rinfo->power_mode_mask;
+ u8 shift = rinfo->power_mode_shift;
+ u8 addr;
+ int ret;
+
+ switch (rinfo->type) {
+ case MAX77620_REGULATOR_TYPE_SD:
+ addr = rinfo->cfg_addr;
+ break;
+ default:
+ addr = rinfo->volt_addr;
+ break;
+ }
+
+ ret = regmap_update_bits(pmic->rmap, addr, mask, power_mode << shift);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Regulator %d mode set failed: %d\n",
+ id, ret);
+ return ret;
+ }
+ pmic->current_power_mode[id] = power_mode;
+
+ return ret;
+}
+
+static int max77620_regulator_get_power_mode(struct max77620_regulator *pmic,
+ int id)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val, addr;
+ u8 mask = rinfo->power_mode_mask;
+ u8 shift = rinfo->power_mode_shift;
+ int ret;
+
+ switch (rinfo->type) {
+ case MAX77620_REGULATOR_TYPE_SD:
+ addr = rinfo->cfg_addr;
+ break;
+ default:
+ addr = rinfo->volt_addr;
+ break;
+ }
+
+ ret = regmap_read(pmic->rmap, addr, &val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Regulator %d: Reg 0x%02x read failed: %d\n",
+ id, addr, ret);
+ return ret;
+ }
+
+ return (val & mask) >> shift;
+}
+
+static int max77620_read_slew_rate(struct max77620_regulator *pmic, int id)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int rval;
+ int slew_rate;
+ int ret;
+
+ ret = regmap_read(pmic->rmap, rinfo->cfg_addr, &rval);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Register 0x%02x read failed: %d\n",
+ rinfo->cfg_addr, ret);
+ return ret;
+ }
+
+ switch (rinfo->type) {
+ case MAX77620_REGULATOR_TYPE_SD:
+ slew_rate = (rval >> MAX77620_SD_SR_SHIFT) & 0x3;
+ switch (slew_rate) {
+ case 0:
+ slew_rate = 13750;
+ break;
+ case 1:
+ slew_rate = 27500;
+ break;
+ case 2:
+ slew_rate = 55000;
+ break;
+ case 3:
+ slew_rate = 100000;
+ break;
+ }
+ rinfo->desc.ramp_delay = slew_rate;
+ break;
+ default:
+ slew_rate = rval & 0x1;
+ switch (slew_rate) {
+ case 0:
+ slew_rate = 100000;
+ break;
+ case 1:
+ slew_rate = 5000;
+ break;
+ }
+ rinfo->desc.ramp_delay = slew_rate;
+ break;
+ }
+
+ return 0;
+}
+
+static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
+{
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+ int ret;
+
+ /* Update power mode */
+ ret = max77620_regulator_get_power_mode(pmic, id);
+ if (ret < 0)
+ return ret;
+
+ pmic->current_power_mode[id] = ret;
+ pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+
+ if (rpdata->active_fps_src == MAX77620_FPS_SRC_DEF) {
+ ret = max77620_regulator_get_fps_src(pmic, id);
+ if (ret < 0)
+ return ret;
+ rpdata->active_fps_src = ret;
+ }
+
+ /* If rails are externally control of FPS then enable it always. */
+ if (rpdata->active_fps_src == MAX77620_FPS_SRC_NONE) {
+ ret = max77620_regulator_set_power_mode(pmic,
+ pmic->enable_power_mode[id], id);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (pmic->current_power_mode[id] !=
+ pmic->enable_power_mode[id]) {
+ ret = max77620_regulator_set_power_mode(pmic,
+ pmic->enable_power_mode[id], id);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ ret = max77620_regulator_set_fps_src(pmic, rpdata->active_fps_src, id);
+ if (ret < 0)
+ return ret;
+
+ ret = max77620_regulator_set_fps_slots(pmic, id, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int max77620_regulator_enable(struct regulator_dev *rdev)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+
+ if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+ return 0;
+
+ return max77620_regulator_set_power_mode(pmic,
+ pmic->enable_power_mode[id], id);
+}
+
+static int max77620_regulator_disable(struct regulator_dev *rdev)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+
+ if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+ return 0;
+
+ return max77620_regulator_set_power_mode(pmic,
+ MAX77620_POWER_MODE_DISABLE, id);
+}
+
+static int max77620_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret = 1;
+
+ if (pmic->active_fps_src[id] != MAX77620_FPS_SRC_NONE)
+ return 1;
+
+ ret = max77620_regulator_get_power_mode(pmic, id);
+ if (ret < 0)
+ return ret;
+
+ if (ret != MAX77620_POWER_MODE_DISABLE)
+ return 1;
+
+ return 0;
+}
+
+static int max77620_regulator_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
+ bool fpwm = false;
+ int power_mode;
+ int ret;
+ u8 val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ fpwm = true;
+ power_mode = MAX77620_POWER_MODE_NORMAL;
+ break;
+
+ case REGULATOR_MODE_NORMAL:
+ power_mode = MAX77620_POWER_MODE_NORMAL;
+ break;
+
+ case REGULATOR_MODE_IDLE:
+ power_mode = MAX77620_POWER_MODE_LPM;
+ break;
+
+ default:
+ dev_err(pmic->dev, "Regulator %d mode %d is invalid\n",
+ id, mode);
+ return -EINVAL;
+ }
+
+ if (rinfo->type != MAX77620_REGULATOR_TYPE_SD)
+ goto skip_fpwm;
+
+ val = (fpwm) ? MAX77620_SD_FPWM_MASK : 0;
+ ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr,
+ MAX77620_SD_FPWM_MASK, val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+ rinfo->cfg_addr, ret);
+ return ret;
+ }
+ rpdata->current_mode = mode;
+
+skip_fpwm:
+ ret = max77620_regulator_set_power_mode(pmic, power_mode, id);
+ if (ret < 0)
+ return ret;
+
+ pmic->enable_power_mode[id] = power_mode;
+
+ return 0;
+}
+
+static unsigned int max77620_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ int fpwm = 0;
+ int ret;
+ int pm_mode, reg_mode;
+ unsigned int val;
+
+ ret = max77620_regulator_get_power_mode(pmic, id);
+ if (ret < 0)
+ return 0;
+
+ pm_mode = ret;
+
+ if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+ ret = regmap_read(pmic->rmap, rinfo->cfg_addr, &val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Reg 0x%02x read failed: %d\n",
+ rinfo->cfg_addr, ret);
+ return ret;
+ }
+ fpwm = !!(val & MAX77620_SD_FPWM_MASK);
+ }
+
+ switch (pm_mode) {
+ case MAX77620_POWER_MODE_NORMAL:
+ case MAX77620_POWER_MODE_DISABLE:
+ if (fpwm)
+ reg_mode = REGULATOR_MODE_FAST;
+ else
+ reg_mode = REGULATOR_MODE_NORMAL;
+ break;
+ case MAX77620_POWER_MODE_LPM:
+ case MAX77620_POWER_MODE_GLPM:
+ reg_mode = REGULATOR_MODE_IDLE;
+ break;
+ default:
+ return 0;
+ }
+
+ return reg_mode;
+}
+
+static int max77620_regulator_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ int ret, val;
+ u8 mask;
+
+ if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+ if (ramp_delay <= 13750)
+ val = 0;
+ else if (ramp_delay <= 27500)
+ val = 1;
+ else if (ramp_delay <= 55000)
+ val = 2;
+ else
+ val = 3;
+ val <<= MAX77620_SD_SR_SHIFT;
+ mask = MAX77620_SD_SR_MASK;
+ } else {
+ if (ramp_delay <= 5000)
+ val = 1;
+ else
+ val = 0;
+ mask = MAX77620_LDO_SLEW_RATE_MASK;
+ }
+
+ ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
+ if (ret < 0)
+ dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
+ rinfo->cfg_addr, ret);
+
+ return ret;
+}
+
+static int max77620_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct max77620_regulator *pmic = config->driver_data;
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[desc->id];
+ u32 pval;
+ int ret;
+
+ ret = of_property_read_u32(np, "maxim,active-fps-source", &pval);
+ rpdata->active_fps_src = (!ret) ? pval : MAX77620_FPS_SRC_DEF;
+
+ ret = of_property_read_u32(np, "maxim,active-fps-power-up-slot", &pval);
+ rpdata->active_fps_pu_slot = (!ret) ? pval : -1;
+
+ ret = of_property_read_u32(
+ np, "maxim,active-fps-power-down-slot", &pval);
+ rpdata->active_fps_pd_slot = (!ret) ? pval : -1;
+
+ ret = of_property_read_u32(np, "maxim,suspend-fps-source", &pval);
+ rpdata->suspend_fps_src = (!ret) ? pval : -1;
+
+ ret = of_property_read_u32(
+ np, "maxim,suspend-fps-power-up-slot", &pval);
+ rpdata->suspend_fps_pu_slot = (!ret) ? pval : -1;
+
+ ret = of_property_read_u32(
+ np, "maxim,suspend-fps-power-down-slot", &pval);
+ rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
+
+ return max77620_init_pmic(pmic, desc->id);
+}
+
+static struct regulator_ops max77620_regulator_ops = {
+ .is_enabled = max77620_regulator_is_enabled,
+ .enable = max77620_regulator_enable,
+ .disable = max77620_regulator_disable,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_mode = max77620_regulator_set_mode,
+ .get_mode = max77620_regulator_get_mode,
+ .set_ramp_delay = max77620_regulator_set_ramp_delay,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+#define MAX77620_SD_CNF2_ROVS_EN_NONE 0
+#define RAIL_SD(_id, _name, _sname, _volt_mask, _min_uV, _max_uV, \
+ _step_uV, _rs_add, _rs_mask) \
+ [MAX77620_REGULATOR_ID_##_id] = { \
+ .type = MAX77620_REGULATOR_TYPE_SD, \
+ .volt_addr = MAX77620_REG_##_id, \
+ .cfg_addr = MAX77620_REG_##_id##_CFG, \
+ .fps_addr = MAX77620_REG_FPS_##_id, \
+ .remote_sense_addr = _rs_add, \
+ .remote_sense_mask = MAX77620_SD_CNF2_ROVS_EN_##_rs_mask, \
+ .power_mode_mask = MAX77620_SD_POWER_MODE_MASK, \
+ .power_mode_shift = MAX77620_SD_POWER_MODE_SHIFT, \
+ .desc = { \
+ .name = max77620_rails(_name), \
+ .of_match = of_match_ptr(#_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = max77620_of_parse_cb, \
+ .supply_name = _sname, \
+ .id = MAX77620_REGULATOR_ID_##_id, \
+ .ops = &max77620_regulator_ops, \
+ .n_voltages = ((_max_uV - _min_uV) / _step_uV) + 1, \
+ .min_uV = _min_uV, \
+ .uV_step = _step_uV, \
+ .enable_time = 500, \
+ .vsel_mask = MAX77620_##_volt_mask##_VOLT_MASK, \
+ .vsel_reg = MAX77620_REG_##_id, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = MAX77620_SD_CFG1_ADE_ENABLE, \
+ .active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \
+ .active_discharge_reg = MAX77620_REG_##_id##_CFG, \
+ .type = REGULATOR_VOLTAGE, \
+ }, \
+ }
+
+#define RAIL_LDO(_id, _name, _sname, _type, _min_uV, _max_uV, _step_uV) \
+ [MAX77620_REGULATOR_ID_##_id] = { \
+ .type = MAX77620_REGULATOR_TYPE_LDO_##_type, \
+ .volt_addr = MAX77620_REG_##_id##_CFG, \
+ .cfg_addr = MAX77620_REG_##_id##_CFG2, \
+ .fps_addr = MAX77620_REG_FPS_##_id, \
+ .remote_sense_addr = 0xFF, \
+ .power_mode_mask = MAX77620_LDO_POWER_MODE_MASK, \
+ .power_mode_shift = MAX77620_LDO_POWER_MODE_SHIFT, \
+ .desc = { \
+ .name = max77620_rails(_name), \
+ .of_match = of_match_ptr(#_name), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .of_parse_cb = max77620_of_parse_cb, \
+ .supply_name = _sname, \
+ .id = MAX77620_REGULATOR_ID_##_id, \
+ .ops = &max77620_regulator_ops, \
+ .n_voltages = ((_max_uV - _min_uV) / _step_uV) + 1, \
+ .min_uV = _min_uV, \
+ .uV_step = _step_uV, \
+ .enable_time = 500, \
+ .vsel_mask = MAX77620_LDO_VOLT_MASK, \
+ .vsel_reg = MAX77620_REG_##_id##_CFG, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = MAX77620_LDO_CFG2_ADE_ENABLE, \
+ .active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \
+ .active_discharge_reg = MAX77620_REG_##_id##_CFG2, \
+ .type = REGULATOR_VOLTAGE, \
+ }, \
+ }
+
+static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
+ RAIL_SD(SD0, sd0, "in-sd0", SD0, 600000, 1400000, 12500, 0x22, SD0),
+ RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
+ RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+
+ RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO2, ldo2, "in-ldo2", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO3, ldo3, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO4, ldo4, "in-ldo4-6", P, 800000, 1587500, 12500),
+ RAIL_LDO(LDO5, ldo5, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO6, ldo6, "in-ldo4-6", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO7, ldo7, "in-ldo7-8", N, 800000, 3950000, 50000),
+ RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
+};
+
+static struct max77620_regulator_info max20024_regs_info[MAX77620_NUM_REGS] = {
+ RAIL_SD(SD0, sd0, "in-sd0", SD0, 800000, 1587500, 12500, 0x22, SD0),
+ RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 3387500, 12500, 0x22, SD1),
+ RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+ RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
+
+ RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
+ RAIL_LDO(LDO2, ldo2, "in-ldo2", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO3, ldo3, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO4, ldo4, "in-ldo4-6", P, 800000, 1587500, 12500),
+ RAIL_LDO(LDO5, ldo5, "in-ldo3-5", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO6, ldo6, "in-ldo4-6", P, 800000, 3950000, 50000),
+ RAIL_LDO(LDO7, ldo7, "in-ldo7-8", N, 800000, 3950000, 50000),
+ RAIL_LDO(LDO8, ldo8, "in-ldo7-8", N, 800000, 3950000, 50000),
+};
+
+static int max77620_regulator_probe(struct platform_device *pdev)
+{
+ struct max77620_chip *max77620_chip = dev_get_drvdata(pdev->dev.parent);
+ struct max77620_regulator_info *rinfo;
+ struct device *dev = &pdev->dev;
+ struct regulator_config config = { };
+ struct max77620_regulator *pmic;
+ int ret = 0;
+ int id;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pmic);
+ pmic->dev = dev;
+ pmic->rmap = max77620_chip->rmap;
+ if (!dev->of_node)
+ dev->of_node = pdev->dev.parent->of_node;
+
+ switch (max77620_chip->chip_id) {
+ case MAX77620:
+ rinfo = max77620_regs_info;
+ break;
+ default:
+ rinfo = max20024_regs_info;
+ break;
+ }
+
+ config.regmap = pmic->rmap;
+ config.dev = dev;
+ config.driver_data = pmic;
+
+ for (id = 0; id < MAX77620_NUM_REGS; id++) {
+ struct regulator_dev *rdev;
+ struct regulator_desc *rdesc;
+
+ if ((max77620_chip->chip_id == MAX77620) &&
+ (id == MAX77620_REGULATOR_ID_SD4))
+ continue;
+
+ rdesc = &rinfo[id].desc;
+ pmic->rinfo[id] = &max77620_regs_info[id];
+ pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL;
+
+ ret = max77620_read_slew_rate(pmic, id);
+ if (ret < 0)
+ return ret;
+
+ rdev = devm_regulator_register(dev, rdesc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "Regulator registration %s failed: %d\n",
+ rdesc->name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77620_regulator_suspend(struct device *dev)
+{
+ struct max77620_regulator *pmic = dev_get_drvdata(dev);
+ struct max77620_regulator_pdata *reg_pdata;
+ int id;
+
+ for (id = 0; id < MAX77620_NUM_REGS; id++) {
+ reg_pdata = &pmic->reg_pdata[id];
+
+ max77620_regulator_set_fps_slots(pmic, id, true);
+ if (reg_pdata->suspend_fps_src < 0)
+ continue;
+
+ max77620_regulator_set_fps_src(pmic, reg_pdata->suspend_fps_src,
+ id);
+ }
+
+ return 0;
+}
+
+static int max77620_regulator_resume(struct device *dev)
+{
+ struct max77620_regulator *pmic = dev_get_drvdata(dev);
+ struct max77620_regulator_pdata *reg_pdata;
+ int id;
+
+ for (id = 0; id < MAX77620_NUM_REGS; id++) {
+ reg_pdata = &pmic->reg_pdata[id];
+
+ max77620_regulator_set_fps_slots(pmic, id, false);
+ if (reg_pdata->active_fps_src < 0)
+ continue;
+ max77620_regulator_set_fps_src(pmic, reg_pdata->active_fps_src,
+ id);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max77620_regulator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max77620_regulator_suspend,
+ max77620_regulator_resume)
+};
+
+static const struct platform_device_id max77620_regulator_devtype[] = {
+ { .name = "max77620-pmic", },
+ { .name = "max20024-pmic", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, max77620_regulator_devtype);
+
+static struct platform_driver max77620_regulator_driver = {
+ .probe = max77620_regulator_probe,
+ .id_table = max77620_regulator_devtype,
+ .driver = {
+ .name = "max77620-pmic",
+ .pm = &max77620_regulator_pm_ops,
+ },
+};
+
+module_platform_driver(max77620_regulator_driver);
+
+MODULE_DESCRIPTION("MAX77620/MAX20024 regulator driver");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686-regulator.c
index 17ccf365a9c0..17ccf365a9c0 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686-regulator.c
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802-regulator.c
index c07ee13bd470..c07ee13bd470 100644
--- a/drivers/regulator/max77802.c
+++ b/drivers/regulator/max77802-regulator.c
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
index a5b2f4762677..17a5b6c2d6a9 100644
--- a/drivers/regulator/mt6397-regulator.c
+++ b/drivers/regulator/mt6397-regulator.c
@@ -317,11 +317,25 @@ static int mt6397_regulator_probe(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id mt6397_platform_ids[] = {
+ {"mt6397-regulator", 0},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6397_platform_ids);
+
+static const struct of_device_id mt6397_of_match[] = {
+ { .compatible = "mediatek,mt6397-regulator", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt6397_of_match);
+
static struct platform_driver mt6397_regulator_driver = {
.driver = {
.name = "mt6397-regulator",
+ .of_match_table = of_match_ptr(mt6397_of_match),
},
.probe = mt6397_regulator_probe,
+ .id_table = mt6397_platform_ids,
};
module_platform_driver(mt6397_regulator_driver);
@@ -329,4 +343,3 @@ module_platform_driver(mt6397_regulator_driver);
MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mt6397-regulator");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 499e437c7e91..6b0aa80b22fd 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -28,7 +28,6 @@ static void of_get_regulation_constraints(struct device_node *np,
struct regulator_init_data **init_data,
const struct regulator_desc *desc)
{
- const __be32 *min_uV, *max_uV;
struct regulation_constraints *constraints = &(*init_data)->constraints;
struct regulator_state *suspend_state;
struct device_node *suspend_np;
@@ -37,18 +36,18 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->name = of_get_property(np, "regulator-name", NULL);
- min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
- if (min_uV)
- constraints->min_uV = be32_to_cpu(*min_uV);
- max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
- if (max_uV)
- constraints->max_uV = be32_to_cpu(*max_uV);
+ if (!of_property_read_u32(np, "regulator-min-microvolt", &pval))
+ constraints->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-max-microvolt", &pval))
+ constraints->max_uV = pval;
/* Voltage change possible? */
if (constraints->min_uV != constraints->max_uV)
constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
/* Only one voltage? Then make sure it's set. */
- if (min_uV && max_uV && constraints->min_uV == constraints->max_uV)
+ if (constraints->min_uV && constraints->max_uV &&
+ constraints->min_uV == constraints->max_uV)
constraints->apply_uV = true;
if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
@@ -93,6 +92,12 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->soft_start = of_property_read_bool(np,
"regulator-soft-start");
+ ret = of_property_read_u32(np, "regulator-active-discharge", &pval);
+ if (!ret) {
+ constraints->active_discharge =
+ (pval) ? REGULATOR_ACTIVE_DISCHARGE_ENABLE :
+ REGULATOR_ACTIVE_DISCHARGE_DISABLE;
+ }
if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) {
if (desc && desc->of_map_mode) {
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index 094376c8de4b..c448b727f5f8 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -285,8 +285,8 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
}
}
- err = regmap_update_bits(chip->regmap, PV88060_REG_EVENT_A,
- PV88060_E_VDD_FLT, PV88060_E_VDD_FLT);
+ err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
+ PV88060_E_VDD_FLT);
if (err < 0)
goto error_i2c;
@@ -302,8 +302,8 @@ static irqreturn_t pv88060_irq_handler(int irq, void *data)
}
}
- err = regmap_update_bits(chip->regmap, PV88060_REG_EVENT_A,
- PV88060_E_OVER_TEMP, PV88060_E_OVER_TEMP);
+ err = regmap_write(chip->regmap, PV88060_REG_EVENT_A,
+ PV88060_E_OVER_TEMP);
if (err < 0)
goto error_i2c;
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index ac15f31b5fe0..0057c6740d6f 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -283,8 +283,8 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
}
}
- err = regmap_update_bits(chip->regmap, PV88090_REG_EVENT_A,
- PV88090_E_VDD_FLT, PV88090_E_VDD_FLT);
+ err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
+ PV88090_E_VDD_FLT);
if (err < 0)
goto error_i2c;
@@ -300,8 +300,8 @@ static irqreturn_t pv88090_irq_handler(int irq, void *data)
}
}
- err = regmap_update_bits(chip->regmap, PV88090_REG_EVENT_A,
- PV88090_E_OVER_TEMP, PV88090_E_OVER_TEMP);
+ err = regmap_write(chip->regmap, PV88090_REG_EVENT_A,
+ PV88090_E_OVER_TEMP);
if (err < 0)
goto error_i2c;
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 3aca067b9901..4689d62f4841 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -27,6 +27,13 @@ struct pwm_regulator_data {
/* Voltage table */
struct pwm_voltages *duty_cycle_table;
+
+ /* regulator descriptor */
+ struct regulator_desc desc;
+
+ /* Regulator ops */
+ struct regulator_ops ops;
+
int state;
/* Continuous voltage */
@@ -115,7 +122,7 @@ static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int
int max_uV = rdev->constraints->max_uV;
int diff = max_uV - min_uV;
- return 100 - (((req_uV * 100) - (min_uV * 100)) / diff);
+ return ((req_uV * 100) - (min_uV * 100)) / diff;
}
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
@@ -212,8 +219,10 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
}
drvdata->duty_cycle_table = duty_cycle_table;
- pwm_regulator_desc.ops = &pwm_regulator_voltage_table_ops;
- pwm_regulator_desc.n_voltages = length / sizeof(*duty_cycle_table);
+ memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops,
+ sizeof(drvdata->ops));
+ drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
return 0;
}
@@ -221,8 +230,10 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
static int pwm_regulator_init_continuous(struct platform_device *pdev,
struct pwm_regulator_data *drvdata)
{
- pwm_regulator_desc.ops = &pwm_regulator_voltage_continuous_ops;
- pwm_regulator_desc.continuous_voltage_range = true;
+ memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops,
+ sizeof(drvdata->ops));
+ drvdata->desc.ops = &drvdata->ops;
+ drvdata->desc.continuous_voltage_range = true;
return 0;
}
@@ -245,6 +256,8 @@ static int pwm_regulator_probe(struct platform_device *pdev)
if (!drvdata)
return -ENOMEM;
+ memcpy(&drvdata->desc, &pwm_regulator_desc, sizeof(drvdata->desc));
+
if (of_find_property(np, "voltage-table", NULL))
ret = pwm_regulator_init_table(pdev, drvdata);
else
@@ -253,7 +266,7 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return ret;
init_data = of_get_regulator_init_data(&pdev->dev, np,
- &pwm_regulator_desc);
+ &drvdata->desc);
if (!init_data)
return -ENOMEM;
@@ -269,10 +282,10 @@ static int pwm_regulator_probe(struct platform_device *pdev)
}
regulator = devm_regulator_register(&pdev->dev,
- &pwm_regulator_desc, &config);
+ &drvdata->desc, &config);
if (IS_ERR(regulator)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
- pwm_regulator_desc.name);
+ drvdata->desc.name);
return PTR_ERR(regulator);
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 3242ffc0cb25..d24e2c783dc5 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -38,7 +38,6 @@
/* The highest number of possible regulators for supported devices. */
#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
struct s2mps11_info {
- unsigned int rdev_num;
int ramp_delay2;
int ramp_delay34;
int ramp_delay5;
@@ -54,7 +53,10 @@ struct s2mps11_info {
*/
DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
- /* Array of size rdev_num with GPIO-s for external sleep control */
+ /*
+ * Array (size: number of regulators) with GPIO-s for external
+ * sleep control.
+ */
int *ext_control_gpio;
};
@@ -819,7 +821,8 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
}
static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
- struct of_regulator_match *rdata, struct s2mps11_info *s2mps11)
+ struct of_regulator_match *rdata, struct s2mps11_info *s2mps11,
+ unsigned int rdev_num)
{
struct device_node *reg_np;
@@ -829,7 +832,7 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
return -EINVAL;
}
- of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
+ of_regulator_match(&pdev->dev, reg_np, rdata, rdev_num);
if (s2mps11->dev_type == S2MPS14X)
s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11);
@@ -1077,6 +1080,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
struct of_regulator_match *rdata = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
+ unsigned int rdev_num = 0;
int i, ret = 0;
const struct regulator_desc *regulators;
@@ -1088,28 +1092,29 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
s2mps11->dev_type = platform_get_device_id(pdev)->driver_data;
switch (s2mps11->dev_type) {
case S2MPS11X:
- s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+ rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
- BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps11_regulators));
break;
case S2MPS13X:
- s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
+ rdev_num = ARRAY_SIZE(s2mps13_regulators);
regulators = s2mps13_regulators;
- BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps13_regulators));
break;
case S2MPS14X:
- s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+ rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
- BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps14_regulators));
break;
case S2MPS15X:
- s2mps11->rdev_num = ARRAY_SIZE(s2mps15_regulators);
+ rdev_num = ARRAY_SIZE(s2mps15_regulators);
regulators = s2mps15_regulators;
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mps15_regulators));
break;
case S2MPU02:
- s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
+ rdev_num = ARRAY_SIZE(s2mpu02_regulators);
regulators = s2mpu02_regulators;
- BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ BUILD_BUG_ON(S2MPS_REGULATOR_MAX < ARRAY_SIZE(s2mpu02_regulators));
break;
default:
dev_err(&pdev->dev, "Invalid device type: %u\n",
@@ -1118,7 +1123,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
}
s2mps11->ext_control_gpio = devm_kmalloc(&pdev->dev,
- sizeof(*s2mps11->ext_control_gpio) * s2mps11->rdev_num,
+ sizeof(*s2mps11->ext_control_gpio) * rdev_num,
GFP_KERNEL);
if (!s2mps11->ext_control_gpio)
return -ENOMEM;
@@ -1126,7 +1131,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
* 0 is a valid GPIO so initialize all GPIO-s to negative value
* to indicate that external control won't be used for this regulator.
*/
- for (i = 0; i < s2mps11->rdev_num; i++)
+ for (i = 0; i < rdev_num; i++)
s2mps11->ext_control_gpio[i] = -EINVAL;
if (!iodev->dev->of_node) {
@@ -1140,14 +1145,14 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
}
}
- rdata = kzalloc(sizeof(*rdata) * s2mps11->rdev_num, GFP_KERNEL);
+ rdata = kzalloc(sizeof(*rdata) * rdev_num, GFP_KERNEL);
if (!rdata)
return -ENOMEM;
- for (i = 0; i < s2mps11->rdev_num; i++)
+ for (i = 0; i < rdev_num; i++)
rdata[i].name = regulators[i].name;
- ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11);
+ ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, rdev_num);
if (ret)
goto out;
@@ -1159,7 +1164,7 @@ common_reg:
config.driver_data = s2mps11;
config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
config.ena_gpio_initialized = true;
- for (i = 0; i < s2mps11->rdev_num; i++) {
+ for (i = 0; i < rdev_num; i++) {
struct regulator_dev *regulator;
if (pdata) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 58f5d3b8e981..27343e1c43ef 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
}
}
- if (i < s5m8767->num_regulators)
- *enable_ctrl =
- s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ if (i >= s5m8767->num_regulators)
+ return -EINVAL;
+
+ *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
return 0;
}
@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
else
regulators[id].vsel_mask = 0xff;
- s5m8767_get_register(s5m8767, id, &enable_reg,
+ ret = s5m8767_get_register(s5m8767, id, &enable_reg,
&enable_val);
+ if (ret) {
+ dev_err(s5m8767->dev, "error reading registers\n");
+ return ret;
+ }
regulators[id].enable_reg = enable_reg;
regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
regulators[id].enable_val = enable_val;
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 9503d5481a52..a4921a70da55 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -1,541 +1,168 @@
/*
- * tps65912.c -- TI tps65912
+ * Regulator driver for TI TPS65912x PMICs
*
- * Copyright 2011 Texas Instruments Inc.
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*
- * Author: Margarita Olaya Cabrera <magi@slimlogic.co.uk>
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
*
- * This driver is based on wm8350 implementation.
+ * Based on the TPS65218 driver and the previous TPS65912 driver by
+ * Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65912.h>
-
-/* DCDC's */
-#define TPS65912_REG_DCDC1 0
-#define TPS65912_REG_DCDC2 1
-#define TPS65912_REG_DCDC3 2
-#define TPS65912_REG_DCDC4 3
-
-/* LDOs */
-#define TPS65912_REG_LDO1 4
-#define TPS65912_REG_LDO2 5
-#define TPS65912_REG_LDO3 6
-#define TPS65912_REG_LDO4 7
-#define TPS65912_REG_LDO5 8
-#define TPS65912_REG_LDO6 9
-#define TPS65912_REG_LDO7 10
-#define TPS65912_REG_LDO8 11
-#define TPS65912_REG_LDO9 12
-#define TPS65912_REG_LDO10 13
-
-/* Number of step-down converters available */
-#define TPS65912_NUM_DCDC 4
-
-/* Number of LDO voltage regulators available */
-#define TPS65912_NUM_LDO 10
-/* Number of total regulators available */
-#define TPS65912_NUM_REGULATOR (TPS65912_NUM_DCDC + TPS65912_NUM_LDO)
-
-#define TPS65912_REG_ENABLED 0x80
-#define OP_SELREG_MASK 0x40
-#define OP_SELREG_SHIFT 6
-
-struct tps_info {
- const char *name;
-};
+#include <linux/mfd/tps65912.h>
-static struct tps_info tps65912_regs[] = {
- {
- .name = "DCDC1",
- },
- {
- .name = "DCDC2",
- },
- {
- .name = "DCDC3",
- },
- {
- .name = "DCDC4",
- },
- {
- .name = "LDO1",
- },
- {
- .name = "LDO2",
- },
- {
- .name = "LDO3",
- },
- {
- .name = "LDO4",
- },
- {
- .name = "LDO5",
- },
- {
- .name = "LDO6",
- },
- {
- .name = "LDO7",
- },
- {
- .name = "LDO8",
- },
- {
- .name = "LDO9",
- },
- {
- .name = "LDO10",
- },
-};
+enum tps65912_regulators { DCDC1, DCDC2, DCDC3, DCDC4, LDO1, LDO2, LDO3,
+ LDO4, LDO5, LDO6, LDO7, LDO8, LDO9, LDO10 };
+
+#define TPS65912_REGULATOR(_name, _id, _of_match, _ops, _vr, _er, _lr) \
+ [_id] = { \
+ .name = _name, \
+ .of_match = _of_match, \
+ .regulators_node = "regulators", \
+ .id = _id, \
+ .ops = &_ops, \
+ .n_voltages = 64, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = 0x3f, \
+ .enable_reg = _er, \
+ .enable_mask = BIT(7), \
+ .volt_table = NULL, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = ARRAY_SIZE(_lr), \
+ }
-struct tps65912_reg {
- struct regulator_desc desc[TPS65912_NUM_REGULATOR];
- struct tps65912 *mfd;
- struct regulator_dev *rdev[TPS65912_NUM_REGULATOR];
- struct tps_info *info[TPS65912_NUM_REGULATOR];
- /* for read/write access */
- struct mutex io_lock;
- int mode;
- int (*get_ctrl_reg)(int);
- int dcdc_range[TPS65912_NUM_DCDC];
- int pwm_mode_reg;
- int eco_reg;
+static const struct regulator_linear_range tps65912_dcdc_ranges[] = {
+ REGULATOR_LINEAR_RANGE(500000, 0x0, 0x3f, 50000),
};
static const struct regulator_linear_range tps65912_ldo_ranges[] = {
- REGULATOR_LINEAR_RANGE(800000, 0, 32, 25000),
- REGULATOR_LINEAR_RANGE(1650000, 33, 60, 50000),
- REGULATOR_LINEAR_RANGE(3100000, 61, 63, 100000),
+ REGULATOR_LINEAR_RANGE(800000, 0x0, 0x20, 25000),
+ REGULATOR_LINEAR_RANGE(1650000, 0x21, 0x3c, 50000),
+ REGULATOR_LINEAR_RANGE(3100000, 0x3d, 0x3f, 100000),
};
-static int tps65912_get_range(struct tps65912_reg *pmic, int id)
-{
- struct tps65912 *mfd = pmic->mfd;
- int range;
-
- switch (id) {
- case TPS65912_REG_DCDC1:
- range = tps65912_reg_read(mfd, TPS65912_DCDC1_LIMIT);
- break;
- case TPS65912_REG_DCDC2:
- range = tps65912_reg_read(mfd, TPS65912_DCDC2_LIMIT);
- break;
- case TPS65912_REG_DCDC3:
- range = tps65912_reg_read(mfd, TPS65912_DCDC3_LIMIT);
- break;
- case TPS65912_REG_DCDC4:
- range = tps65912_reg_read(mfd, TPS65912_DCDC4_LIMIT);
- break;
- default:
- return 0;
- }
-
- if (range >= 0)
- range = (range & DCDC_LIMIT_RANGE_MASK)
- >> DCDC_LIMIT_RANGE_SHIFT;
-
- pmic->dcdc_range[id] = range;
- return range;
-}
-
-static unsigned long tps65912_vsel_to_uv_range0(u8 vsel)
-{
- unsigned long uv;
-
- uv = ((vsel * 12500) + 500000);
- return uv;
-}
-
-static unsigned long tps65912_vsel_to_uv_range1(u8 vsel)
-{
- unsigned long uv;
-
- uv = ((vsel * 12500) + 700000);
- return uv;
-}
-
-static unsigned long tps65912_vsel_to_uv_range2(u8 vsel)
-{
- unsigned long uv;
-
- uv = ((vsel * 25000) + 500000);
- return uv;
-}
-
-static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
-{
- unsigned long uv;
-
- if (vsel == 0x3f)
- uv = 3800000;
- else
- uv = ((vsel * 50000) + 500000);
-
- return uv;
-}
-
-static int tps65912_get_ctrl_register(int id)
-{
- if (id >= TPS65912_REG_DCDC1 && id <= TPS65912_REG_LDO4)
- return id * 3 + TPS65912_DCDC1_AVS;
- else if (id >= TPS65912_REG_LDO5 && id <= TPS65912_REG_LDO10)
- return id - TPS65912_REG_LDO5 + TPS65912_LDO5;
- else
- return -EINVAL;
-}
-
-static int tps65912_get_sel_register(struct tps65912_reg *pmic, int id)
-{
- struct tps65912 *mfd = pmic->mfd;
- int opvsel;
- u8 reg = 0;
-
- if (id >= TPS65912_REG_DCDC1 && id <= TPS65912_REG_LDO4) {
- opvsel = tps65912_reg_read(mfd, id * 3 + TPS65912_DCDC1_OP);
- if (opvsel & OP_SELREG_MASK)
- reg = id * 3 + TPS65912_DCDC1_AVS;
- else
- reg = id * 3 + TPS65912_DCDC1_OP;
- } else if (id >= TPS65912_REG_LDO5 && id <= TPS65912_REG_LDO10) {
- reg = id - TPS65912_REG_LDO5 + TPS65912_LDO5;
- } else {
- return -EINVAL;
- }
-
- return reg;
-}
-
-static int tps65912_get_mode_regiters(struct tps65912_reg *pmic, int id)
-{
- switch (id) {
- case TPS65912_REG_DCDC1:
- pmic->pwm_mode_reg = TPS65912_DCDC1_CTRL;
- pmic->eco_reg = TPS65912_DCDC1_AVS;
- break;
- case TPS65912_REG_DCDC2:
- pmic->pwm_mode_reg = TPS65912_DCDC2_CTRL;
- pmic->eco_reg = TPS65912_DCDC2_AVS;
- break;
- case TPS65912_REG_DCDC3:
- pmic->pwm_mode_reg = TPS65912_DCDC3_CTRL;
- pmic->eco_reg = TPS65912_DCDC3_AVS;
- break;
- case TPS65912_REG_DCDC4:
- pmic->pwm_mode_reg = TPS65912_DCDC4_CTRL;
- pmic->eco_reg = TPS65912_DCDC4_AVS;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tps65912_reg_is_enabled(struct regulator_dev *dev)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int reg, value, id = rdev_get_id(dev);
-
- if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
- return -EINVAL;
-
- reg = pmic->get_ctrl_reg(id);
- if (reg < 0)
- return reg;
-
- value = tps65912_reg_read(mfd, reg);
- if (value < 0)
- return value;
-
- return value & TPS65912_REG_ENABLED;
-}
-
-static int tps65912_reg_enable(struct regulator_dev *dev)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int id = rdev_get_id(dev);
- int reg;
-
- if (id < TPS65912_REG_DCDC1 || id > TPS65912_REG_LDO10)
- return -EINVAL;
-
- reg = pmic->get_ctrl_reg(id);
- if (reg < 0)
- return reg;
-
- return tps65912_set_bits(mfd, reg, TPS65912_REG_ENABLED);
-}
-
-static int tps65912_reg_disable(struct regulator_dev *dev)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int id = rdev_get_id(dev), reg;
-
- reg = pmic->get_ctrl_reg(id);
- if (reg < 0)
- return reg;
-
- return tps65912_clear_bits(mfd, reg, TPS65912_REG_ENABLED);
-}
-
-static int tps65912_set_mode(struct regulator_dev *dev, unsigned int mode)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int pwm_mode, eco, id = rdev_get_id(dev);
-
- tps65912_get_mode_regiters(pmic, id);
-
- pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
- eco = tps65912_reg_read(mfd, pmic->eco_reg);
-
- pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
- eco &= DCDC_AVS_ECO_MASK;
-
- switch (mode) {
- case REGULATOR_MODE_FAST:
- /* Verify if mode alredy set */
- if (pwm_mode && !eco)
- break;
- tps65912_set_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
- tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
- break;
- case REGULATOR_MODE_NORMAL:
- case REGULATOR_MODE_IDLE:
- if (!pwm_mode && !eco)
- break;
- tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
- tps65912_clear_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
- break;
- case REGULATOR_MODE_STANDBY:
- if (!pwm_mode && eco)
- break;
- tps65912_clear_bits(mfd, pmic->pwm_mode_reg, DCDCCTRL_DCDC_MODE_MASK);
- tps65912_set_bits(mfd, pmic->eco_reg, DCDC_AVS_ECO_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static unsigned int tps65912_get_mode(struct regulator_dev *dev)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int pwm_mode, eco, mode = 0, id = rdev_get_id(dev);
-
- tps65912_get_mode_regiters(pmic, id);
-
- pwm_mode = tps65912_reg_read(mfd, pmic->pwm_mode_reg);
- eco = tps65912_reg_read(mfd, pmic->eco_reg);
-
- pwm_mode &= DCDCCTRL_DCDC_MODE_MASK;
- eco &= DCDC_AVS_ECO_MASK;
-
- if (pwm_mode && !eco)
- mode = REGULATOR_MODE_FAST;
- else if (!pwm_mode && !eco)
- mode = REGULATOR_MODE_NORMAL;
- else if (!pwm_mode && eco)
- mode = REGULATOR_MODE_STANDBY;
-
- return mode;
-}
-
-static int tps65912_list_voltage(struct regulator_dev *dev, unsigned selector)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- int range, voltage = 0, id = rdev_get_id(dev);
-
- if (id > TPS65912_REG_DCDC4)
- return -EINVAL;
-
- range = pmic->dcdc_range[id];
-
- switch (range) {
- case 0:
- /* 0.5 - 1.2875V in 12.5mV steps */
- voltage = tps65912_vsel_to_uv_range0(selector);
- break;
- case 1:
- /* 0.7 - 1.4875V in 12.5mV steps */
- voltage = tps65912_vsel_to_uv_range1(selector);
- break;
- case 2:
- /* 0.5 - 2.075V in 25mV steps */
- voltage = tps65912_vsel_to_uv_range2(selector);
- break;
- case 3:
- /* 0.5 - 3.8V in 50mV steps */
- voltage = tps65912_vsel_to_uv_range3(selector);
- break;
- }
- return voltage;
-}
-
-static int tps65912_get_voltage_sel(struct regulator_dev *dev)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int id = rdev_get_id(dev);
- int reg, vsel;
-
- reg = tps65912_get_sel_register(pmic, id);
- if (reg < 0)
- return reg;
-
- vsel = tps65912_reg_read(mfd, reg);
- vsel &= 0x3F;
-
- return vsel;
-}
-
-static int tps65912_set_voltage_sel(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps65912_reg *pmic = rdev_get_drvdata(dev);
- struct tps65912 *mfd = pmic->mfd;
- int id = rdev_get_id(dev);
- int value;
- u8 reg;
-
- reg = tps65912_get_sel_register(pmic, id);
- value = tps65912_reg_read(mfd, reg);
- value &= 0xC0;
- return tps65912_reg_write(mfd, reg, selector | value);
-}
-
/* Operations permitted on DCDCx */
static struct regulator_ops tps65912_ops_dcdc = {
- .is_enabled = tps65912_reg_is_enabled,
- .enable = tps65912_reg_enable,
- .disable = tps65912_reg_disable,
- .set_mode = tps65912_set_mode,
- .get_mode = tps65912_get_mode,
- .get_voltage_sel = tps65912_get_voltage_sel,
- .set_voltage_sel = tps65912_set_voltage_sel,
- .list_voltage = tps65912_list_voltage,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
};
/* Operations permitted on LDOx */
static struct regulator_ops tps65912_ops_ldo = {
- .is_enabled = tps65912_reg_is_enabled,
- .enable = tps65912_reg_enable,
- .disable = tps65912_reg_disable,
- .get_voltage_sel = tps65912_get_voltage_sel,
- .set_voltage_sel = tps65912_set_voltage_sel,
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_desc regulators[] = {
+ TPS65912_REGULATOR("DCDC1", DCDC1, "dcdc1", tps65912_ops_dcdc,
+ TPS65912_DCDC1_OP, TPS65912_DCDC1_CTRL,
+ tps65912_dcdc_ranges),
+ TPS65912_REGULATOR("DCDC2", DCDC2, "dcdc2", tps65912_ops_dcdc,
+ TPS65912_DCDC2_OP, TPS65912_DCDC2_CTRL,
+ tps65912_dcdc_ranges),
+ TPS65912_REGULATOR("DCDC3", DCDC3, "dcdc3", tps65912_ops_dcdc,
+ TPS65912_DCDC3_OP, TPS65912_DCDC3_CTRL,
+ tps65912_dcdc_ranges),
+ TPS65912_REGULATOR("DCDC4", DCDC4, "dcdc4", tps65912_ops_dcdc,
+ TPS65912_DCDC4_OP, TPS65912_DCDC4_CTRL,
+ tps65912_dcdc_ranges),
+ TPS65912_REGULATOR("LDO1", LDO1, "ldo1", tps65912_ops_ldo,
+ TPS65912_LDO1_OP, TPS65912_LDO1_AVS,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO2", LDO2, "ldo2", tps65912_ops_ldo,
+ TPS65912_LDO2_OP, TPS65912_LDO2_AVS,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO3", LDO3, "ldo3", tps65912_ops_ldo,
+ TPS65912_LDO3_OP, TPS65912_LDO3_AVS,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO4", LDO4, "ldo4", tps65912_ops_ldo,
+ TPS65912_LDO4_OP, TPS65912_LDO4_AVS,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO5", LDO5, "ldo5", tps65912_ops_ldo,
+ TPS65912_LDO5, TPS65912_LDO5,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO6", LDO6, "ldo6", tps65912_ops_ldo,
+ TPS65912_LDO6, TPS65912_LDO6,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO7", LDO7, "ldo7", tps65912_ops_ldo,
+ TPS65912_LDO7, TPS65912_LDO7,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO8", LDO8, "ldo8", tps65912_ops_ldo,
+ TPS65912_LDO8, TPS65912_LDO8,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO9", LDO9, "ldo9", tps65912_ops_ldo,
+ TPS65912_LDO9, TPS65912_LDO9,
+ tps65912_ldo_ranges),
+ TPS65912_REGULATOR("LDO10", LDO10, "ldo10", tps65912_ops_ldo,
+ TPS65912_LDO10, TPS65912_LDO10,
+ tps65912_ldo_ranges),
};
-static int tps65912_probe(struct platform_device *pdev)
+static int tps65912_regulator_probe(struct platform_device *pdev)
{
- struct tps65912 *tps65912 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65912 *tps = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
- struct tps_info *info;
- struct regulator_init_data *reg_data;
struct regulator_dev *rdev;
- struct tps65912_reg *pmic;
- struct tps65912_board *pmic_plat_data;
int i;
- pmic_plat_data = dev_get_platdata(tps65912->dev);
- if (!pmic_plat_data)
- return -EINVAL;
+ platform_set_drvdata(pdev, tps);
- reg_data = pmic_plat_data->tps65912_pmic_init_data;
+ config.dev = &pdev->dev;
+ config.driver_data = tps;
+ config.dev->of_node = tps->dev->of_node;
+ config.regmap = tps->regmap;
- pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- mutex_init(&pmic->io_lock);
- pmic->mfd = tps65912;
- platform_set_drvdata(pdev, pmic);
-
- pmic->get_ctrl_reg = &tps65912_get_ctrl_register;
- info = tps65912_regs;
-
- for (i = 0; i < TPS65912_NUM_REGULATOR; i++, info++, reg_data++) {
- int range = 0;
- /* Register the regulators */
- pmic->info[i] = info;
-
- pmic->desc[i].name = info->name;
- pmic->desc[i].id = i;
- pmic->desc[i].n_voltages = 64;
- if (i > TPS65912_REG_DCDC4) {
- pmic->desc[i].ops = &tps65912_ops_ldo;
- pmic->desc[i].linear_ranges = tps65912_ldo_ranges;
- pmic->desc[i].n_linear_ranges =
- ARRAY_SIZE(tps65912_ldo_ranges);
- } else {
- pmic->desc[i].ops = &tps65912_ops_dcdc;
- }
- pmic->desc[i].type = REGULATOR_VOLTAGE;
- pmic->desc[i].owner = THIS_MODULE;
- range = tps65912_get_range(pmic, i);
-
- config.dev = tps65912->dev;
- config.init_data = reg_data;
- config.driver_data = pmic;
-
- rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
&config);
if (IS_ERR(rdev)) {
- dev_err(tps65912->dev,
- "failed to register %s regulator\n",
+ dev_err(tps->dev, "failed to register %s regulator\n",
pdev->name);
return PTR_ERR(rdev);
}
-
- /* Save regulator for cleanup */
- pmic->rdev[i] = rdev;
}
+
return 0;
}
-static struct platform_driver tps65912_driver = {
+static const struct platform_device_id tps65912_regulator_id_table[] = {
+ { "tps65912-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps65912_regulator_id_table);
+
+static struct platform_driver tps65912_regulator_driver = {
.driver = {
- .name = "tps65912-pmic",
+ .name = "tps65912-regulator",
},
- .probe = tps65912_probe,
+ .probe = tps65912_regulator_probe,
+ .id_table = tps65912_regulator_id_table,
};
+module_platform_driver(tps65912_regulator_driver);
-static int __init tps65912_init(void)
-{
- return platform_driver_register(&tps65912_driver);
-}
-subsys_initcall(tps65912_init);
-
-static void __exit tps65912_cleanup(void)
-{
- platform_driver_unregister(&tps65912_driver);
-}
-module_exit(tps65912_cleanup);
-
-MODULE_AUTHOR("Margarita Olaya Cabrera <magi@slimlogic.co.uk>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
MODULE_DESCRIPTION("TPS65912 voltage regulator driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps65912-pmic");
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress-regulator.c
index c810cbbd463f..c810cbbd463f 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress-regulator.c
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 28c711f0ac6b..72e97d7a5209 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -77,4 +77,13 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config ST_REMOTEPROC
+ tristate "ST remoteproc support"
+ depends on ARCH_STI
+ select REMOTEPROC
+ help
+ Say y here to support ST's adjunct processors via the remote
+ processor framework.
+ This can be either built-in or a loadable module.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 81b04d1e2e58..279cb2edc880 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9e03d158f411..3d7d58a109d8 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -823,8 +823,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw, &tablesz);
- if (!table)
+ if (!table) {
+ dev_err(dev, "Failed to find resource table\n");
goto clean_up;
+ }
/* Verify that resource table in loaded fw is unchanged */
if (rproc->table_csum != crc32(0, table, tablesz)) {
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 916af5096f57..74a120b6e206 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -88,8 +88,42 @@ static ssize_t rproc_state_read(struct file *filp, char __user *userbuf,
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
}
+static ssize_t rproc_state_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[10];
+ int ret;
+
+ if (count > sizeof(buf) || count <= 0)
+ return -EINVAL;
+
+ ret = copy_from_user(buf, userbuf, count);
+ if (ret)
+ return -EFAULT;
+
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (!strncmp(buf, "start", count)) {
+ ret = rproc_boot(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "Boot failed: %d\n", ret);
+ return ret;
+ }
+ } else if (!strncmp(buf, "stop", count)) {
+ rproc_shutdown(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
static const struct file_operations rproc_state_ops = {
.read = rproc_state_read,
+ .write = rproc_state_write,
.open = simple_open,
.llseek = generic_file_llseek,
};
@@ -157,7 +191,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
int ret;
if (count < 1 || count > sizeof(buf))
- return count;
+ return -EINVAL;
ret = copy_from_user(buf, user_buf, count);
if (ret)
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
new file mode 100644
index 000000000000..6f056caa8a56
--- /dev/null
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -0,0 +1,297 @@
+/*
+ * ST's Remote Processor Control Driver
+ *
+ * Copyright (C) 2015 STMicroelectronics - All Rights Reserved
+ *
+ * Author: Ludovic Barre <ludovic.barre@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+
+struct st_rproc_config {
+ bool sw_reset;
+ bool pwr_reset;
+ unsigned long bootaddr_mask;
+};
+
+struct st_rproc {
+ struct st_rproc_config *config;
+ struct reset_control *sw_reset;
+ struct reset_control *pwr_reset;
+ struct clk *clk;
+ u32 clk_rate;
+ struct regmap *boot_base;
+ u32 boot_offset;
+};
+
+static int st_rproc_start(struct rproc *rproc)
+{
+ struct st_rproc *ddata = rproc->priv;
+ int err;
+
+ regmap_update_bits(ddata->boot_base, ddata->boot_offset,
+ ddata->config->bootaddr_mask, rproc->bootaddr);
+
+ err = clk_enable(ddata->clk);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to enable clock\n");
+ return err;
+ }
+
+ if (ddata->config->sw_reset) {
+ err = reset_control_deassert(ddata->sw_reset);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to deassert S/W Reset\n");
+ goto sw_reset_fail;
+ }
+ }
+
+ if (ddata->config->pwr_reset) {
+ err = reset_control_deassert(ddata->pwr_reset);
+ if (err) {
+ dev_err(&rproc->dev, "Failed to deassert Power Reset\n");
+ goto pwr_reset_fail;
+ }
+ }
+
+ dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr);
+
+ return 0;
+
+
+pwr_reset_fail:
+ if (ddata->config->pwr_reset)
+ reset_control_assert(ddata->sw_reset);
+sw_reset_fail:
+ clk_disable(ddata->clk);
+
+ return err;
+}
+
+static int st_rproc_stop(struct rproc *rproc)
+{
+ struct st_rproc *ddata = rproc->priv;
+ int sw_err = 0, pwr_err = 0;
+
+ if (ddata->config->sw_reset) {
+ sw_err = reset_control_assert(ddata->sw_reset);
+ if (sw_err)
+ dev_err(&rproc->dev, "Failed to assert S/W Reset\n");
+ }
+
+ if (ddata->config->pwr_reset) {
+ pwr_err = reset_control_assert(ddata->pwr_reset);
+ if (pwr_err)
+ dev_err(&rproc->dev, "Failed to assert Power Reset\n");
+ }
+
+ clk_disable(ddata->clk);
+
+ return sw_err ?: pwr_err;
+}
+
+static struct rproc_ops st_rproc_ops = {
+ .start = st_rproc_start,
+ .stop = st_rproc_stop,
+};
+
+/*
+ * Fetch state of the processor: 0 is off, 1 is on.
+ */
+static int st_rproc_state(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+ int reset_sw = 0, reset_pwr = 0;
+
+ if (ddata->config->sw_reset)
+ reset_sw = reset_control_status(ddata->sw_reset);
+
+ if (ddata->config->pwr_reset)
+ reset_pwr = reset_control_status(ddata->pwr_reset);
+
+ if (reset_sw < 0 || reset_pwr < 0)
+ return -EINVAL;
+
+ return !reset_sw && !reset_pwr;
+}
+
+static const struct st_rproc_config st40_rproc_cfg = {
+ .sw_reset = true,
+ .pwr_reset = true,
+ .bootaddr_mask = GENMASK(28, 1),
+};
+
+static const struct st_rproc_config st231_rproc_cfg = {
+ .sw_reset = true,
+ .pwr_reset = false,
+ .bootaddr_mask = GENMASK(31, 6),
+};
+
+static const struct of_device_id st_rproc_match[] = {
+ { .compatible = "st,st40-rproc", .data = &st40_rproc_cfg },
+ { .compatible = "st,st231-rproc", .data = &st231_rproc_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, st_rproc_match);
+
+static int st_rproc_parse_dt(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+ struct device_node *np = dev->of_node;
+ int err;
+
+ if (ddata->config->sw_reset) {
+ ddata->sw_reset = devm_reset_control_get(dev, "sw_reset");
+ if (IS_ERR(ddata->sw_reset)) {
+ dev_err(dev, "Failed to get S/W Reset\n");
+ return PTR_ERR(ddata->sw_reset);
+ }
+ }
+
+ if (ddata->config->pwr_reset) {
+ ddata->pwr_reset = devm_reset_control_get(dev, "pwr_reset");
+ if (IS_ERR(ddata->pwr_reset)) {
+ dev_err(dev, "Failed to get Power Reset\n");
+ return PTR_ERR(ddata->pwr_reset);
+ }
+ }
+
+ ddata->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddata->clk);
+ }
+
+ err = of_property_read_u32(np, "clock-frequency", &ddata->clk_rate);
+ if (err) {
+ dev_err(dev, "failed to get clock frequency\n");
+ return err;
+ }
+
+ ddata->boot_base = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
+ if (IS_ERR(ddata->boot_base)) {
+ dev_err(dev, "Boot base not found\n");
+ return PTR_ERR(ddata->boot_base);
+ }
+
+ err = of_property_read_u32_index(np, "st,syscfg", 1,
+ &ddata->boot_offset);
+ if (err) {
+ dev_err(dev, "Boot offset not found\n");
+ return -EINVAL;
+ }
+
+ err = of_reserved_mem_device_init(dev);
+ if (err) {
+ dev_err(dev, "Failed to obtain shared memory\n");
+ return err;
+ }
+
+ err = clk_prepare(ddata->clk);
+ if (err)
+ dev_err(dev, "failed to get clock\n");
+
+ return err;
+}
+
+static int st_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct st_rproc *ddata;
+ struct device_node *np = dev->of_node;
+ struct rproc *rproc;
+ int enabled;
+ int ret;
+
+ match = of_match_device(st_rproc_match, dev);
+ if (!match || !match->data) {
+ dev_err(dev, "No device match found\n");
+ return -ENODEV;
+ }
+
+ rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ ddata = rproc->priv;
+ ddata->config = (struct st_rproc_config *)match->data;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = st_rproc_parse_dt(pdev);
+ if (ret)
+ goto free_rproc;
+
+ enabled = st_rproc_state(pdev);
+ if (enabled < 0)
+ goto free_rproc;
+
+ if (enabled) {
+ atomic_inc(&rproc->power);
+ rproc->state = RPROC_RUNNING;
+ } else {
+ clk_set_rate(ddata->clk, ddata->clk_rate);
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+ return ret;
+}
+
+static int st_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct st_rproc *ddata = rproc->priv;
+
+ rproc_del(rproc);
+
+ clk_disable_unprepare(ddata->clk);
+
+ of_reserved_mem_device_release(&pdev->dev);
+
+ rproc_put(rproc);
+
+ return 0;
+}
+
+static struct platform_driver st_rproc_driver = {
+ .probe = st_rproc_probe,
+ .remove = st_rproc_remove,
+ .driver = {
+ .name = "st-rproc",
+ .of_match_table = of_match_ptr(st_rproc_match),
+ },
+};
+module_platform_driver(st_rproc_driver);
+
+MODULE_DESCRIPTION("ST Remote Processor Control Driver");
+MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index edf81819cce1..02d271d101b4 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -122,6 +122,7 @@ static const struct of_device_id wkup_m3_rproc_of_match[] = {
{ .compatible = "ti,am4372-wkup-m3", },
{},
};
+MODULE_DEVICE_TABLE(of, wkup_m3_rproc_of_match);
static int wkup_m3_rproc_probe(struct platform_device *pdev)
{
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 4d7178e46afa..a1fc8eda79f3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -2,6 +2,7 @@ obj-y += core.o
obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
+obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 87376638948d..f15f150b79da 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -45,9 +45,6 @@ struct reset_control {
static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
- if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
- return -EINVAL;
-
if (reset_spec->args[0] >= rcdev->nr_resets)
return -EINVAL;
@@ -152,7 +149,7 @@ EXPORT_SYMBOL_GPL(reset_control_status);
struct reset_control *of_reset_control_get_by_index(struct device_node *node,
int index)
{
- struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER);
+ struct reset_control *rstc;
struct reset_controller_dev *r, *rcdev;
struct of_phandle_args args;
int rstc_id;
@@ -178,6 +175,11 @@ struct reset_control *of_reset_control_get_by_index(struct device_node *node,
return ERR_PTR(-EPROBE_DEFER);
}
+ if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
+ mutex_unlock(&reset_controller_list_mutex);
+ return ERR_PTR(-EINVAL);
+ }
+
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
mutex_unlock(&reset_controller_list_mutex);
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 7787a9b1cc67..8f55fd4a2630 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -57,7 +57,7 @@ static int hi6220_reset_deassert(struct reset_controller_dev *rc_dev,
return 0;
}
-static struct reset_control_ops hi6220_reset_ops = {
+static const struct reset_control_ops hi6220_reset_ops = {
.assert = hi6220_reset_assert,
.deassert = hi6220_reset_deassert,
};
@@ -83,9 +83,7 @@ static int hi6220_reset_probe(struct platform_device *pdev)
data->rc_dev.ops = &hi6220_reset_ops;
data->rc_dev.of_node = pdev->dev.of_node;
- reset_controller_register(&data->rc_dev);
-
- return 0;
+ return reset_controller_register(&data->rc_dev);
}
static const struct of_device_id hi6220_reset_match[] = {
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 692fc890e94b..ccb940a8d9fb 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -70,7 +70,7 @@ static int ath79_reset_status(struct reset_controller_dev *rcdev,
return !!(val & BIT(id));
}
-static struct reset_control_ops ath79_reset_ops = {
+static const struct reset_control_ops ath79_reset_ops = {
.assert = ath79_reset_assert,
.deassert = ath79_reset_deassert,
.status = ath79_reset_status,
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 970b1ad60293..369f3917fd8e 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -46,7 +46,7 @@ static int berlin_reset_reset(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops berlin_reset_ops = {
+static const struct reset_control_ops berlin_reset_ops = {
.reset = berlin_reset_reset,
};
@@ -55,9 +55,6 @@ static int berlin_reset_xlate(struct reset_controller_dev *rcdev,
{
unsigned offset, bit;
- if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
- return -EINVAL;
-
offset = reset_spec->args[0];
bit = reset_spec->args[1];
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 70922e9ac27f..3b8a4f5a1ff6 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -136,7 +136,7 @@ static int lpc18xx_rgu_status(struct reset_controller_dev *rcdev,
return !(readl(rc->base + offset) & bit);
}
-static struct reset_control_ops lpc18xx_rgu_ops = {
+static const struct reset_control_ops lpc18xx_rgu_ops = {
.reset = lpc18xx_rgu_reset,
.assert = lpc18xx_rgu_assert,
.deassert = lpc18xx_rgu_deassert,
diff --git a/drivers/reset/reset-pistachio.c b/drivers/reset/reset-pistachio.c
new file mode 100644
index 000000000000..72a97a15a4c8
--- /dev/null
+++ b/drivers/reset/reset-pistachio.c
@@ -0,0 +1,154 @@
+/*
+ * Pistachio SoC Reset Controller driver
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ *
+ * Author: Damien Horsley <Damien.Horsley@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include <dt-bindings/reset/pistachio-resets.h>
+
+#define PISTACHIO_SOFT_RESET 0
+
+struct pistachio_reset_data {
+ struct reset_controller_dev rcdev;
+ struct regmap *periph_regs;
+};
+
+static inline int pistachio_reset_shift(unsigned long id)
+{
+ switch (id) {
+ case PISTACHIO_RESET_I2C0:
+ case PISTACHIO_RESET_I2C1:
+ case PISTACHIO_RESET_I2C2:
+ case PISTACHIO_RESET_I2C3:
+ case PISTACHIO_RESET_I2S_IN:
+ case PISTACHIO_RESET_PRL_OUT:
+ case PISTACHIO_RESET_SPDIF_OUT:
+ case PISTACHIO_RESET_SPI:
+ case PISTACHIO_RESET_PWM_PDM:
+ case PISTACHIO_RESET_UART0:
+ case PISTACHIO_RESET_UART1:
+ case PISTACHIO_RESET_QSPI:
+ case PISTACHIO_RESET_MDC:
+ case PISTACHIO_RESET_SDHOST:
+ case PISTACHIO_RESET_ETHERNET:
+ case PISTACHIO_RESET_IR:
+ case PISTACHIO_RESET_HASH:
+ case PISTACHIO_RESET_TIMER:
+ return id;
+ case PISTACHIO_RESET_I2S_OUT:
+ case PISTACHIO_RESET_SPDIF_IN:
+ case PISTACHIO_RESET_EVT:
+ return id + 6;
+ case PISTACHIO_RESET_USB_H:
+ case PISTACHIO_RESET_USB_PR:
+ case PISTACHIO_RESET_USB_PHY_PR:
+ case PISTACHIO_RESET_USB_PHY_PON:
+ return id + 7;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pistachio_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct pistachio_reset_data *rd;
+ u32 mask;
+ int shift;
+
+ rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
+ shift = pistachio_reset_shift(id);
+ if (shift < 0)
+ return shift;
+ mask = BIT(shift);
+
+ return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
+ mask, mask);
+}
+
+static int pistachio_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct pistachio_reset_data *rd;
+ u32 mask;
+ int shift;
+
+ rd = container_of(rcdev, struct pistachio_reset_data, rcdev);
+ shift = pistachio_reset_shift(id);
+ if (shift < 0)
+ return shift;
+ mask = BIT(shift);
+
+ return regmap_update_bits(rd->periph_regs, PISTACHIO_SOFT_RESET,
+ mask, 0);
+}
+
+static const struct reset_control_ops pistachio_reset_ops = {
+ .assert = pistachio_reset_assert,
+ .deassert = pistachio_reset_deassert,
+};
+
+static int pistachio_reset_probe(struct platform_device *pdev)
+{
+ struct pistachio_reset_data *rd;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ rd = devm_kzalloc(dev, sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->periph_regs = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(rd->periph_regs))
+ return PTR_ERR(rd->periph_regs);
+
+ rd->rcdev.owner = THIS_MODULE;
+ rd->rcdev.nr_resets = PISTACHIO_RESET_MAX + 1;
+ rd->rcdev.ops = &pistachio_reset_ops;
+ rd->rcdev.of_node = np;
+
+ return reset_controller_register(&rd->rcdev);
+}
+
+static int pistachio_reset_remove(struct platform_device *pdev)
+{
+ struct pistachio_reset_data *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+
+ return 0;
+}
+
+static const struct of_device_id pistachio_reset_dt_ids[] = {
+ { .compatible = "img,pistachio-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, pistachio_reset_dt_ids);
+
+static struct platform_driver pistachio_reset_driver = {
+ .probe = pistachio_reset_probe,
+ .remove = pistachio_reset_remove,
+ .driver = {
+ .name = "pistachio-reset",
+ .of_match_table = pistachio_reset_dt_ids,
+ },
+};
+module_platform_driver(pistachio_reset_driver);
+
+MODULE_AUTHOR("Damien Horsley <Damien.Horsley@imgtec.com>");
+MODULE_DESCRIPTION("Pistacho Reset Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index b7d773d9248c..cd05a7032b17 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -90,7 +90,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
return !(reg & BIT(offset));
}
-static struct reset_control_ops socfpga_reset_ops = {
+static const struct reset_control_ops socfpga_reset_ops = {
.assert = socfpga_reset_assert,
.deassert = socfpga_reset_deassert,
.status = socfpga_reset_status,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 8d41a18da17f..677f86555212 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -70,7 +70,7 @@ static int sunxi_reset_deassert(struct reset_controller_dev *rcdev,
return 0;
}
-static struct reset_control_ops sunxi_reset_ops = {
+static const struct reset_control_ops sunxi_reset_ops = {
.assert = sunxi_reset_assert,
.deassert = sunxi_reset_deassert,
};
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index c6b3cd8b40ad..a7e87bc45885 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -86,7 +86,7 @@ static int zynq_reset_status(struct reset_controller_dev *rcdev,
return !!(reg & BIT(offset));
}
-static struct reset_control_ops zynq_reset_ops = {
+static const struct reset_control_ops zynq_reset_ops = {
.assert = zynq_reset_assert,
.deassert = zynq_reset_deassert,
.status = zynq_reset_status,
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index 1600cc7557f5..9bd57a5eee72 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -134,7 +134,7 @@ static int syscfg_reset_status(struct reset_controller_dev *rcdev,
return rst->active_low ? !ret_val : !!ret_val;
}
-static struct reset_control_ops syscfg_reset_ops = {
+static const struct reset_control_ops syscfg_reset_ops = {
.reset = syscfg_reset_dev,
.assert = syscfg_reset_assert,
.deassert = syscfg_reset_deassert,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 376322f71fd5..3e84315c6f12 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -140,7 +140,6 @@ config RTC_DRV_TEST
will be called rtc-test.
comment "I2C RTC drivers"
- depends on I2C
if I2C
@@ -212,6 +211,15 @@ config RTC_DRV_DS1307
This driver can also be built as a module. If so, the module
will be called rtc-ds1307.
+config RTC_DRV_DS1307_HWMON
+ bool "HWMON support for rtc-ds1307"
+ depends on RTC_DRV_DS1307 && HWMON
+ depends on !(RTC_DRV_DS1307=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose temperature sensor data on
+ rtc-ds1307 (only DS3231)
+
config RTC_DRV_DS1374
tristate "Dallas/Maxim DS1374"
help
@@ -239,16 +247,6 @@ config RTC_DRV_DS1672
This driver can also be built as a module. If so, the module
will be called rtc-ds1672.
-config RTC_DRV_DS3232
- tristate "Dallas/Maxim DS3232"
- help
- If you say yes here you get support for Dallas Semiconductor
- DS3232 real-time clock chips. If an interrupt is associated
- with the device, the alarm functionality is supported.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-ds3232.
-
config RTC_DRV_HYM8563
tristate "Haoyu Microelectronics HYM8563"
depends on OF
@@ -317,10 +315,10 @@ config RTC_DRV_MAX8997
config RTC_DRV_MAX77686
tristate "Maxim MAX77686"
- depends on MFD_MAX77686
+ depends on MFD_MAX77686 || MFD_MAX77620
help
If you say yes here you will get support for the
- RTC of Maxim MAX77686 PMIC.
+ RTC of Maxim MAX77686/MAX77620/MAX77802 PMIC.
This driver can also be built as a module. If so, the module
will be called rtc-max77686.
@@ -335,16 +333,6 @@ config RTC_DRV_RK808
This driver can also be built as a module. If so, the module
will be called rk808-rtc.
-config RTC_DRV_MAX77802
- tristate "Maxim 77802 RTC"
- depends on MFD_MAX77686
- help
- If you say yes here you will get support for the
- RTC of Maxim MAX77802 PMIC.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-max77802.
-
config RTC_DRV_RS5C372
tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
help
@@ -391,25 +379,6 @@ config RTC_DRV_X1205
This driver can also be built as a module. If so, the module
will be called rtc-x1205.
-config RTC_DRV_PALMAS
- tristate "TI Palmas RTC driver"
- depends on MFD_PALMAS
- help
- If you say yes here you get support for the RTC of TI PALMA series PMIC
- chips.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-palma.
-
-config RTC_DRV_PCF2127
- tristate "NXP PCF2127"
- help
- If you say yes here you get support for the NXP PCF2127/29 RTC
- chips.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-pcf2127.
-
config RTC_DRV_PCF8523
tristate "NXP PCF8523"
help
@@ -419,6 +388,14 @@ config RTC_DRV_PCF8523
This driver can also be built as a module. If so, the module
will be called rtc-pcf8523.
+config RTC_DRV_PCF85063
+ tristate "NXP PCF85063"
+ help
+ If you say yes here you get support for the PCF85063 RTC chip
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85063.
+
config RTC_DRV_PCF8563
tristate "Philips PCF8563/Epson RTC8564"
help
@@ -429,14 +406,6 @@ config RTC_DRV_PCF8563
This driver can also be built as a module. If so, the module
will be called rtc-pcf8563.
-config RTC_DRV_PCF85063
- tristate "nxp PCF85063"
- help
- If you say yes here you get support for the PCF85063 RTC chip
-
- This driver can also be built as a module. If so, the module
- will be called rtc-pcf85063.
-
config RTC_DRV_PCF8583
tristate "Philips PCF8583"
help
@@ -501,6 +470,16 @@ config RTC_DRV_TWL4030
This driver can also be built as a module. If so, the module
will be called rtc-twl.
+config RTC_DRV_PALMAS
+ tristate "TI Palmas RTC driver"
+ depends on MFD_PALMAS
+ help
+ If you say yes here you get support for the RTC of TI PALMA series PMIC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-palma.
+
config RTC_DRV_TPS6586X
tristate "TI TPS6586X RTC driver"
depends on MFD_TPS6586X
@@ -595,14 +574,23 @@ config RTC_DRV_EM3027
will be called rtc-em3027.
config RTC_DRV_RV3029C2
- tristate "Micro Crystal RTC"
+ tristate "Micro Crystal RV3029"
help
If you say yes here you get support for the Micro Crystal
- RV3029-C2 RTC chips.
+ RV3029 RTC chips.
This driver can also be built as a module. If so, the module
will be called rtc-rv3029c2.
+config RTC_DRV_RV3029_HWMON
+ bool "HWMON support for RV3029"
+ depends on RTC_DRV_RV3029C2 && HWMON
+ depends on !(RTC_DRV_RV3029C2=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose temperature sensor data on
+ rtc-rv3029.
+
config RTC_DRV_RV8803
tristate "Micro Crystal RV8803"
help
@@ -691,15 +679,6 @@ config RTC_DRV_DS1390
This driver can also be built as a module. If so, the module
will be called rtc-ds1390.
-config RTC_DRV_MAX6902
- tristate "Maxim MAX6902"
- help
- If you say yes here you will get support for the
- Maxim MAX6902 SPI RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-max6902.
-
config RTC_DRV_R9701
tristate "Epson RTC-9701JE"
help
@@ -709,6 +688,23 @@ config RTC_DRV_R9701
This driver can also be built as a module. If so, the module
will be called rtc-r9701.
+config RTC_DRV_RX4581
+ tristate "Epson RX-4581"
+ help
+ If you say yes here you will get support for the Epson RX-4581.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx4581.
+
+config RTC_DRV_RX6110
+ tristate "Epson RX-6110"
+ select REGMAP_SPI
+ help
+ If you say yes here you will get support for the Epson RX-6610.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx6110.
+
config RTC_DRV_RS5C348
tristate "Ricoh RS5C348A/B"
help
@@ -718,14 +714,14 @@ config RTC_DRV_RS5C348
This driver can also be built as a module. If so, the module
will be called rtc-rs5c348.
-config RTC_DRV_DS3234
- tristate "Maxim/Dallas DS3234"
+config RTC_DRV_MAX6902
+ tristate "Maxim MAX6902"
help
- If you say yes here you get support for the
- Maxim/Dallas DS3234 SPI RTC chip.
+ If you say yes here you will get support for the
+ Maxim MAX6902 SPI RTC chip.
This driver can also be built as a module. If so, the module
- will be called rtc-ds3234.
+ will be called rtc-max6902.
config RTC_DRV_PCF2123
tristate "NXP PCF2123"
@@ -736,14 +732,6 @@ config RTC_DRV_PCF2123
This driver can also be built as a module. If so, the module
will be called rtc-pcf2123.
-config RTC_DRV_RX4581
- tristate "Epson RX-4581"
- help
- If you say yes here you will get support for the Epson RX-4581.
-
- This driver can also be built as a module. If so the module
- will be called rtc-rx4581.
-
config RTC_DRV_MCP795
tristate "Microchip MCP795"
help
@@ -754,6 +742,41 @@ config RTC_DRV_MCP795
endif # SPI_MASTER
+#
+# Helper to resolve issues with configs that have SPI enabled but I2C
+# modular. See SND_SOC_I2C_AND_SPI for more information
+#
+config RTC_I2C_AND_SPI
+ tristate
+ default m if I2C=m
+ default y if I2C=y
+ default y if SPI_MASTER=y
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+
+comment "SPI and I2C RTC drivers"
+
+config RTC_DRV_DS3232
+ tristate "Dallas/Maxim DS3232/DS3234"
+ depends on RTC_I2C_AND_SPI
+ help
+ If you say yes here you get support for Dallas Semiconductor
+ DS3232 and DS3234 real-time clock chips. If an interrupt is associated
+ with the device, the alarm functionality is supported.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds3232.
+
+config RTC_DRV_PCF2127
+ tristate "NXP PCF2127"
+ depends on RTC_I2C_AND_SPI
+ help
+ If you say yes here you get support for the NXP PCF2127/29 RTC
+ chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf2127.
+
comment "Platform RTC drivers"
# this 'CMOS' RTC driver is arch dependent because <asm-generic/rtc.h>
@@ -1087,7 +1110,7 @@ config RTC_DRV_WM8350
config RTC_DRV_SPEAR
tristate "SPEAR ST RTC"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
default y
help
If you say Y here you will get support for the RTC found on
@@ -1119,7 +1142,7 @@ config RTC_DRV_AB8500
config RTC_DRV_NUC900
tristate "NUC910/NUC920 RTC driver"
- depends on ARCH_W90X900
+ depends on ARCH_W90X900 || COMPILE_TEST
help
If you say yes here you get support for the RTC subsystem of the
NUC910/NUC920 used in embedded systems.
@@ -1144,9 +1167,19 @@ config RTC_DRV_ZYNQMP
comment "on-CPU RTC drivers"
+config RTC_DRV_ASM9260
+ tristate "Alphascale asm9260 RTC"
+ depends on MACH_ASM9260
+ help
+ If you say yes here you get support for the RTC on the
+ Alphascale asm9260 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-asm9260.
+
config RTC_DRV_DAVINCI
tristate "TI DaVinci RTC"
- depends on ARCH_DAVINCI_DM365
+ depends on ARCH_DAVINCI_DM365 || COMPILE_TEST
help
If you say yes here you get support for the RTC on the
DaVinci platforms (DM365).
@@ -1156,7 +1189,7 @@ config RTC_DRV_DAVINCI
config RTC_DRV_DIGICOLOR
tristate "Conexant Digicolor RTC"
- depends on ARCH_DIGICOLOR
+ depends on ARCH_DIGICOLOR || COMPILE_TEST
help
If you say yes here you get support for the RTC on Conexant
Digicolor platforms. This currently includes the CX92755 SoC.
@@ -1175,7 +1208,7 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
- depends on ARCH_OMAP || ARCH_DAVINCI
+ depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
help
Say "yes" here to support the on chip real time clock
present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
@@ -1192,7 +1225,7 @@ config HAVE_S3C_RTC
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C64XX || HAVE_S3C_RTC
+ depends on ARCH_S3C64XX || HAVE_S3C_RTC || COMPILE_TEST
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
@@ -1208,7 +1241,7 @@ config RTC_DRV_S3C
config RTC_DRV_EP93XX
tristate "Cirrus Logic EP93XX"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
help
If you say yes here you get support for the
RTC embedded in the Cirrus Logic EP93XX processors.
@@ -1238,7 +1271,7 @@ config RTC_DRV_SH
config RTC_DRV_VR41XX
tristate "NEC VR41XX"
- depends on CPU_VR41XX
+ depends on CPU_VR41XX || COMPILE_TEST
help
If you say Y here you will get access to the real time clock
built into your NEC VR41XX CPU.
@@ -1268,14 +1301,14 @@ config RTC_DRV_PL031
config RTC_DRV_AT32AP700X
tristate "AT32AP700X series RTC"
- depends on PLATFORM_AT32AP
+ depends on PLATFORM_AT32AP || COMPILE_TEST
help
Driver for the internal RTC (Realtime Clock) on Atmel AVR32
AT32AP700x family processors.
config RTC_DRV_AT91RM9200
tristate "AT91RM9200 or some AT91SAM9 RTC"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
help
Driver for the internal RTC (Realtime Clock) module found on
Atmel AT91RM9200's and some AT91SAM9 chips. On AT91SAM9 chips
@@ -1283,7 +1316,7 @@ config RTC_DRV_AT91RM9200
config RTC_DRV_AT91SAM9
tristate "AT91SAM9 RTT as RTC"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
select MFD_SYSCON
help
Some AT91SAM9 SoCs provide an RTT (Real Time Timer) block which
@@ -1325,17 +1358,17 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
- depends on PARISC || M68K || PPC || SUPERH32
+ depends on PARISC || M68K || PPC || SUPERH32 || COMPILE_TEST
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
just say Y.
config RTC_DRV_PXA
- tristate "PXA27x/PXA3xx"
- depends on ARCH_PXA
- select RTC_DRV_SA1100
- help
+ tristate "PXA27x/PXA3xx"
+ depends on ARCH_PXA
+ select RTC_DRV_SA1100
+ help
If you say Y here you will get access to the real time clock
built into your PXA27x or PXA3xx CPU. This RTC is actually 2 RTCs
consisting of an SA1100 compatible RTC and the extended PXA RTC.
@@ -1345,7 +1378,7 @@ config RTC_DRV_PXA
config RTC_DRV_VT8500
tristate "VIA/WonderMedia 85xx SoC RTC"
- depends on ARCH_VT8500
+ depends on ARCH_VT8500 || COMPILE_TEST
help
If you say Y here you will get access to the real time clock
built into your VIA VT8500 SoC or its relatives.
@@ -1360,14 +1393,15 @@ config RTC_DRV_SUN4V
config RTC_DRV_SUN6I
tristate "Allwinner A31 RTC"
- depends on MACH_SUN6I || MACH_SUN8I
+ default MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+ depends on ARCH_SUNXI
help
- If you say Y here you will get support for the RTC found on
- Allwinner A31.
+ If you say Y here you will get support for the RTC found in
+ some Allwinner SoCs like the A31 or the A64.
config RTC_DRV_SUNXI
tristate "Allwinner sun4i/sun7i RTC"
- depends on MACH_SUN4I || MACH_SUN7I
+ depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
help
If you say Y here you will get support for the RTC found on
Allwinner A10/A20.
@@ -1388,7 +1422,7 @@ config RTC_DRV_TX4939
config RTC_DRV_MV
tristate "Marvell SoC RTC"
- depends on ARCH_DOVE || ARCH_MVEBU
+ depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
help
If you say yes here you will get support for the in-chip RTC
that can be found in some of Marvell's SoC devices, such as
@@ -1399,7 +1433,7 @@ config RTC_DRV_MV
config RTC_DRV_ARMADA38X
tristate "Armada 38x Marvell SoC RTC"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
help
If you say yes here you will get support for the in-chip RTC
that can be found in the Armada 38x Marvell's SoC device
@@ -1429,7 +1463,7 @@ config RTC_DRV_PS3
config RTC_DRV_COH901331
tristate "ST-Ericsson COH 901 331 RTC"
- depends on ARCH_U300
+ depends on ARCH_U300 || COMPILE_TEST
help
If you say Y here you will get access to ST-Ericsson
COH 901 331 RTC clock found in some ST-Ericsson Mobile
@@ -1441,7 +1475,7 @@ config RTC_DRV_COH901331
config RTC_DRV_STMP
tristate "Freescale STMP3xxx/i.MX23/i.MX28 RTC"
- depends on ARCH_MXS
+ depends on ARCH_MXS || COMPILE_TEST
select STMP_DEVICE
help
If you say yes here you will get support for the onboard
@@ -1476,7 +1510,7 @@ config RTC_DRV_MPC5121
config RTC_DRV_JZ4740
tristate "Ingenic JZ4740 SoC"
- depends on MACH_JZ4740
+ depends on MACH_JZ4740 || COMPILE_TEST
help
If you say yes here you get support for the Ingenic JZ4740 SoC RTC
controller.
@@ -1497,7 +1531,7 @@ config RTC_DRV_LPC24XX
so, the module will be called rtc-lpc24xx.
config RTC_DRV_LPC32XX
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
tristate "NXP LPC32XX RTC"
help
This enables support for the NXP RTC in the LPC32XX
@@ -1507,7 +1541,7 @@ config RTC_DRV_LPC32XX
config RTC_DRV_PM8XXX
tristate "Qualcomm PMIC8XXX RTC"
- depends on MFD_PM8XXX || MFD_SPMI_PMIC
+ depends on MFD_PM8XXX || MFD_SPMI_PMIC || COMPILE_TEST
help
If you say yes here you get support for the
Qualcomm PMIC8XXX RTC.
@@ -1517,7 +1551,7 @@ config RTC_DRV_PM8XXX
config RTC_DRV_TEGRA
tristate "NVIDIA Tegra Internal RTC driver"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
help
If you say yes here you get support for the
Tegra 200 series internal RTC module.
@@ -1603,7 +1637,7 @@ config RTC_DRV_MOXART
config RTC_DRV_MT6397
tristate "Mediatek Real Time Clock driver"
- depends on MFD_MT6397 || COMPILE_TEST
+ depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
help
This selects the Mediatek(R) RTC driver. RTC is part of Mediatek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
@@ -1622,6 +1656,16 @@ config RTC_DRV_XGENE
This driver can also be built as a module, if so, the module
will be called "rtc-xgene".
+config RTC_DRV_PIC32
+ tristate "Microchip PIC32 RTC"
+ depends on MACH_PIC32
+ default y
+ help
+ If you say yes here you get support for the PIC32 RTC module.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pic32
+
comment "HID Sensor RTC drivers"
config RTC_DRV_HID_SENSOR_TIME
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 62d61b26ca7e..ea2833723fa9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o
obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
+obj-$(CONFIG_RTC_DRV_ASM9260) += rtc-asm9260.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
@@ -59,7 +60,6 @@ obj-$(CONFIG_RTC_DRV_DS1685_FAMILY) += rtc-ds1685.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
obj-$(CONFIG_RTC_DRV_DS2404) += rtc-ds2404.o
obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o
-obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o
obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o
obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
@@ -86,7 +86,6 @@ obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX6902) += rtc-max6902.o
obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
-obj-$(CONFIG_RTC_DRV_MAX77802) += rtc-max77802.o
obj-$(CONFIG_RTC_DRV_MAX8907) += rtc-max8907.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8997) += rtc-max8997.o
@@ -112,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
+obj-$(CONFIG_RTC_DRV_PIC32) += rtc-pic32.o
obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
@@ -128,6 +128,7 @@ obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
obj-$(CONFIG_RTC_DRV_RV8803) += rtc-rv8803.o
obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o
+obj-$(CONFIG_RTC_DRV_RX6110) += rtc-rx6110.o
obj-$(CONFIG_RTC_DRV_RX8010) += rtc-rx8010.o
obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o
obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index de86578bcd6d..74fd9746aeca 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -361,17 +361,4 @@ static int __init rtc_init(void)
rtc_dev_init();
return 0;
}
-
-static void __exit rtc_exit(void)
-{
- rtc_dev_exit();
- class_destroy(rtc_class);
- ida_destroy(&rtc_ida);
-}
-
subsys_initcall(rtc_init);
-module_exit(rtc_exit);
-
-MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
-MODULE_DESCRIPTION("RTC class support");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5836751b8203..9ef5f6f89f98 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -939,4 +939,58 @@ void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
mutex_unlock(&rtc->ops_lock);
}
+/**
+ * rtc_read_offset - Read the amount of rtc offset in parts per billion
+ * @ rtc: rtc device to be used
+ * @ offset: the offset in parts per billion
+ *
+ * see below for details.
+ *
+ * Kernel interface to read rtc clock offset
+ * Returns 0 on success, or a negative number on error.
+ * If read_offset() is not implemented for the rtc, return -EINVAL
+ */
+int rtc_read_offset(struct rtc_device *rtc, long *offset)
+{
+ int ret;
+
+ if (!rtc->ops)
+ return -ENODEV;
+
+ if (!rtc->ops->read_offset)
+ return -EINVAL;
+
+ mutex_lock(&rtc->ops_lock);
+ ret = rtc->ops->read_offset(rtc->dev.parent, offset);
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
+/**
+ * rtc_set_offset - Adjusts the duration of the average second
+ * @ rtc: rtc device to be used
+ * @ offset: the offset in parts per billion
+ *
+ * Some rtc's allow an adjustment to the average duration of a second
+ * to compensate for differences in the actual clock rate due to temperature,
+ * the crystal, capacitor, etc.
+ *
+ * Kernel interface to adjust an rtc clock offset.
+ * Return 0 on success, or a negative number on error.
+ * If the rtc offset is not setable (or not implemented), return -EINVAL
+ */
+int rtc_set_offset(struct rtc_device *rtc, long offset)
+{
+ int ret;
+
+ if (!rtc->ops)
+ return -ENODEV;
+
+ if (!rtc->ops->set_offset)
+ return -EINVAL;
+
+ mutex_lock(&rtc->ops_lock);
+ ret = rtc->ops->set_offset(rtc->dev.parent, offset);
+ mutex_unlock(&rtc->ops_lock);
+ return ret;
+}
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index d41bbcd653f6..ba0d61934d35 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -49,7 +49,20 @@
#define ABX8XX_REG_CD_TIMER_CTL 0x18
+#define ABX8XX_REG_OSC 0x1c
+#define ABX8XX_OSC_FOS BIT(3)
+#define ABX8XX_OSC_BOS BIT(4)
+#define ABX8XX_OSC_ACAL_512 BIT(5)
+#define ABX8XX_OSC_ACAL_1024 BIT(6)
+
+#define ABX8XX_OSC_OSEL BIT(7)
+
+#define ABX8XX_REG_OSS 0x1d
+#define ABX8XX_OSS_OF BIT(1)
+#define ABX8XX_OSS_OMODE BIT(4)
+
#define ABX8XX_REG_CFG_KEY 0x1f
+#define ABX8XX_CFG_KEY_OSC 0xa1
#define ABX8XX_CFG_KEY_MISC 0x9d
#define ABX8XX_REG_ID0 0x28
@@ -81,6 +94,20 @@ static struct abx80x_cap abx80x_caps[] = {
[ABX80X] = {.pn = 0}
};
+static int abx80x_is_rc_mode(struct i2c_client *client)
+{
+ int flags = 0;
+
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS);
+ if (flags < 0) {
+ dev_err(&client->dev,
+ "Failed to read autocalibration attribute\n");
+ return flags;
+ }
+
+ return (flags & ABX8XX_OSS_OMODE) ? 1 : 0;
+}
+
static int abx80x_enable_trickle_charger(struct i2c_client *client,
u8 trickle_cfg)
{
@@ -112,7 +139,23 @@ static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[8];
- int err;
+ int err, flags, rc_mode = 0;
+
+ /* Read the Oscillator Failure only in XT mode */
+ rc_mode = abx80x_is_rc_mode(client);
+ if (rc_mode < 0)
+ return rc_mode;
+
+ if (!rc_mode) {
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS);
+ if (flags < 0)
+ return flags;
+
+ if (flags & ABX8XX_OSS_OF) {
+ dev_err(dev, "Oscillator failure, data is invalid.\n");
+ return -EINVAL;
+ }
+ }
err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH,
sizeof(buf), buf);
@@ -140,7 +183,7 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char buf[8];
- int err;
+ int err, flags;
if (tm->tm_year < 100)
return -EINVAL;
@@ -161,6 +204,18 @@ static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
return -EIO;
}
+ /* Clear the OF bit of Oscillator Status Register */
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSS);
+ if (flags < 0)
+ return flags;
+
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSS,
+ flags & ~ABX8XX_OSS_OF);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write oscillator status register\n");
+ return err;
+ }
+
return 0;
}
@@ -248,6 +303,174 @@ static int abx80x_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+static int abx80x_rtc_set_autocalibration(struct device *dev,
+ int autocalibration)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int retval, flags = 0;
+
+ if ((autocalibration != 0) && (autocalibration != 1024) &&
+ (autocalibration != 512)) {
+ dev_err(dev, "autocalibration value outside permitted range\n");
+ return -EINVAL;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC);
+ if (flags < 0)
+ return flags;
+
+ if (autocalibration == 0) {
+ flags &= ~(ABX8XX_OSC_ACAL_512 | ABX8XX_OSC_ACAL_1024);
+ } else if (autocalibration == 1024) {
+ /* 1024 autocalibration is 0x10 */
+ flags |= ABX8XX_OSC_ACAL_1024;
+ flags &= ~(ABX8XX_OSC_ACAL_512);
+ } else {
+ /* 512 autocalibration is 0x11 */
+ flags |= (ABX8XX_OSC_ACAL_1024 | ABX8XX_OSC_ACAL_512);
+ }
+
+ /* Unlock write access to Oscillator Control Register */
+ retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
+ ABX8XX_CFG_KEY_OSC);
+ if (retval < 0) {
+ dev_err(dev, "Failed to write CONFIG_KEY register\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags);
+
+ return retval;
+}
+
+static int abx80x_rtc_get_autocalibration(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int flags = 0, autocalibration;
+
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC);
+ if (flags < 0)
+ return flags;
+
+ if (flags & ABX8XX_OSC_ACAL_512)
+ autocalibration = 512;
+ else if (flags & ABX8XX_OSC_ACAL_1024)
+ autocalibration = 1024;
+ else
+ autocalibration = 0;
+
+ return autocalibration;
+}
+
+static ssize_t autocalibration_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval;
+ unsigned long autocalibration = 0;
+
+ retval = kstrtoul(buf, 10, &autocalibration);
+ if (retval < 0) {
+ dev_err(dev, "Failed to store RTC autocalibration attribute\n");
+ return -EINVAL;
+ }
+
+ retval = abx80x_rtc_set_autocalibration(dev, autocalibration);
+
+ return retval ? retval : count;
+}
+
+static ssize_t autocalibration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int autocalibration = 0;
+
+ autocalibration = abx80x_rtc_get_autocalibration(dev);
+ if (autocalibration < 0) {
+ dev_err(dev, "Failed to read RTC autocalibration\n");
+ sprintf(buf, "0\n");
+ return autocalibration;
+ }
+
+ return sprintf(buf, "%d\n", autocalibration);
+}
+
+static DEVICE_ATTR_RW(autocalibration);
+
+static ssize_t oscillator_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int retval, flags, rc_mode = 0;
+
+ if (strncmp(buf, "rc", 2) == 0) {
+ rc_mode = 1;
+ } else if (strncmp(buf, "xtal", 4) == 0) {
+ rc_mode = 0;
+ } else {
+ dev_err(dev, "Oscillator selection value outside permitted ones\n");
+ return -EINVAL;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, ABX8XX_REG_OSC);
+ if (flags < 0)
+ return flags;
+
+ if (rc_mode == 0)
+ flags &= ~(ABX8XX_OSC_OSEL);
+ else
+ flags |= (ABX8XX_OSC_OSEL);
+
+ /* Unlock write access on Oscillator Control register */
+ retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
+ ABX8XX_CFG_KEY_OSC);
+ if (retval < 0) {
+ dev_err(dev, "Failed to write CONFIG_KEY register\n");
+ return retval;
+ }
+
+ retval = i2c_smbus_write_byte_data(client, ABX8XX_REG_OSC, flags);
+ if (retval < 0) {
+ dev_err(dev, "Failed to write Oscillator Control register\n");
+ return retval;
+ }
+
+ return retval ? retval : count;
+}
+
+static ssize_t oscillator_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int rc_mode = 0;
+ struct i2c_client *client = to_i2c_client(dev);
+
+ rc_mode = abx80x_is_rc_mode(client);
+
+ if (rc_mode < 0) {
+ dev_err(dev, "Failed to read RTC oscillator selection\n");
+ sprintf(buf, "\n");
+ return rc_mode;
+ }
+
+ if (rc_mode)
+ return sprintf(buf, "rc\n");
+ else
+ return sprintf(buf, "xtal\n");
+}
+
+static DEVICE_ATTR_RW(oscillator);
+
+static struct attribute *rtc_calib_attrs[] = {
+ &dev_attr_autocalibration.attr,
+ &dev_attr_oscillator.attr,
+ NULL,
+};
+
+static const struct attribute_group rtc_calib_attr_group = {
+ .attrs = rtc_calib_attrs,
+};
+
static int abx80x_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -303,6 +526,13 @@ static int abx80x_dt_trickle_cfg(struct device_node *np)
return (trickle_cfg | i);
}
+static void rtc_calib_remove_sysfs_group(void *_dev)
+{
+ struct device *dev = _dev;
+
+ sysfs_remove_group(&dev->kobj, &rtc_calib_attr_group);
+}
+
static int abx80x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -405,6 +635,24 @@ static int abx80x_probe(struct i2c_client *client,
}
}
+ /* Export sysfs entries */
+ err = sysfs_create_group(&(&client->dev)->kobj, &rtc_calib_attr_group);
+ if (err) {
+ dev_err(&client->dev, "Failed to create sysfs group: %d\n",
+ err);
+ return err;
+ }
+
+ err = devm_add_action(&client->dev, rtc_calib_remove_sysfs_group,
+ &client->dev);
+ if (err) {
+ rtc_calib_remove_sysfs_group(&client->dev);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n",
+ err);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/rtc/rtc-as3722.c b/drivers/rtc/rtc-as3722.c
index 56cc5821118b..6ef0c887e6ca 100644
--- a/drivers/rtc/rtc-as3722.c
+++ b/drivers/rtc/rtc-as3722.c
@@ -210,7 +210,7 @@ static int as3722_rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "RTC interrupt %d\n", as3722_rtc->alarm_irq);
ret = devm_request_threaded_irq(&pdev->dev, as3722_rtc->alarm_irq, NULL,
- as3722_alarm_irq, IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ as3722_alarm_irq, IRQF_ONESHOT,
"rtc-alarm", as3722_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
new file mode 100644
index 000000000000..355fdb97a006
--- /dev/null
+++ b/drivers/rtc/rtc-asm9260.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2016 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+
+/* Miscellaneous registers */
+/* Interrupt Location Register */
+#define HW_ILR 0x00
+#define BM_RTCALF BIT(1)
+#define BM_RTCCIF BIT(0)
+
+/* Clock Control Register */
+#define HW_CCR 0x08
+/* Calibration counter disable */
+#define BM_CCALOFF BIT(4)
+/* Reset internal oscillator divider */
+#define BM_CTCRST BIT(1)
+/* Clock Enable */
+#define BM_CLKEN BIT(0)
+
+/* Counter Increment Interrupt Register */
+#define HW_CIIR 0x0C
+#define BM_CIIR_IMYEAR BIT(7)
+#define BM_CIIR_IMMON BIT(6)
+#define BM_CIIR_IMDOY BIT(5)
+#define BM_CIIR_IMDOW BIT(4)
+#define BM_CIIR_IMDOM BIT(3)
+#define BM_CIIR_IMHOUR BIT(2)
+#define BM_CIIR_IMMIN BIT(1)
+#define BM_CIIR_IMSEC BIT(0)
+
+/* Alarm Mask Register */
+#define HW_AMR 0x10
+#define BM_AMR_IMYEAR BIT(7)
+#define BM_AMR_IMMON BIT(6)
+#define BM_AMR_IMDOY BIT(5)
+#define BM_AMR_IMDOW BIT(4)
+#define BM_AMR_IMDOM BIT(3)
+#define BM_AMR_IMHOUR BIT(2)
+#define BM_AMR_IMMIN BIT(1)
+#define BM_AMR_IMSEC BIT(0)
+#define BM_AMR_OFF 0xff
+
+/* Consolidated time registers */
+#define HW_CTIME0 0x14
+#define BM_CTIME0_DOW_S 24
+#define BM_CTIME0_DOW_M 0x7
+#define BM_CTIME0_HOUR_S 16
+#define BM_CTIME0_HOUR_M 0x1f
+#define BM_CTIME0_MIN_S 8
+#define BM_CTIME0_MIN_M 0x3f
+#define BM_CTIME0_SEC_S 0
+#define BM_CTIME0_SEC_M 0x3f
+
+#define HW_CTIME1 0x18
+#define BM_CTIME1_YEAR_S 16
+#define BM_CTIME1_YEAR_M 0xfff
+#define BM_CTIME1_MON_S 8
+#define BM_CTIME1_MON_M 0xf
+#define BM_CTIME1_DOM_S 0
+#define BM_CTIME1_DOM_M 0x1f
+
+#define HW_CTIME2 0x1C
+#define BM_CTIME2_DOY_S 0
+#define BM_CTIME2_DOY_M 0xfff
+
+/* Time counter registers */
+#define HW_SEC 0x20
+#define HW_MIN 0x24
+#define HW_HOUR 0x28
+#define HW_DOM 0x2C
+#define HW_DOW 0x30
+#define HW_DOY 0x34
+#define HW_MONTH 0x38
+#define HW_YEAR 0x3C
+
+#define HW_CALIBRATION 0x40
+#define BM_CALDIR_BACK BIT(17)
+#define BM_CALVAL_M 0x1ffff
+
+/* General purpose registers */
+#define HW_GPREG0 0x44
+#define HW_GPREG1 0x48
+#define HW_GPREG2 0x4C
+#define HW_GPREG3 0x50
+#define HW_GPREG4 0x54
+
+/* Alarm register group */
+#define HW_ALSEC 0x60
+#define HW_ALMIN 0x64
+#define HW_ALHOUR 0x68
+#define HW_ALDOM 0x6C
+#define HW_ALDOW 0x70
+#define HW_ALDOY 0x74
+#define HW_ALMON 0x78
+#define HW_ALYEAR 0x7C
+
+struct asm9260_rtc_priv {
+ struct device *dev;
+ void __iomem *iobase;
+ struct rtc_device *rtc;
+ struct clk *clk;
+ /* io lock */
+ spinlock_t lock;
+};
+
+static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
+{
+ struct asm9260_rtc_priv *priv = dev_id;
+ u32 isr;
+ unsigned long events = 0;
+
+ isr = ioread32(priv->iobase + HW_CIIR);
+ if (!isr)
+ return IRQ_NONE;
+
+ iowrite32(0, priv->iobase + HW_CIIR);
+
+ events |= RTC_AF | RTC_IRQF;
+
+ rtc_update_irq(priv->rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+ u32 ctime0, ctime1, ctime2;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->lock, irq_flags);
+ ctime0 = ioread32(priv->iobase + HW_CTIME0);
+ ctime1 = ioread32(priv->iobase + HW_CTIME1);
+ ctime2 = ioread32(priv->iobase + HW_CTIME2);
+
+ if (ctime1 != ioread32(priv->iobase + HW_CTIME1)) {
+ /*
+ * woops, counter flipped right now. Now we are safe
+ * to reread.
+ */
+ ctime0 = ioread32(priv->iobase + HW_CTIME0);
+ ctime1 = ioread32(priv->iobase + HW_CTIME1);
+ ctime2 = ioread32(priv->iobase + HW_CTIME2);
+ }
+ spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+ tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M;
+ tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M;
+ tm->tm_hour = (ctime0 >> BM_CTIME0_HOUR_S) & BM_CTIME0_HOUR_M;
+ tm->tm_wday = (ctime0 >> BM_CTIME0_DOW_S) & BM_CTIME0_DOW_M;
+
+ tm->tm_mday = (ctime1 >> BM_CTIME1_DOM_S) & BM_CTIME1_DOM_M;
+ tm->tm_mon = (ctime1 >> BM_CTIME1_MON_S) & BM_CTIME1_MON_M;
+ tm->tm_year = (ctime1 >> BM_CTIME1_YEAR_S) & BM_CTIME1_YEAR_M;
+
+ tm->tm_yday = (ctime2 >> BM_CTIME2_DOY_S) & BM_CTIME2_DOY_M;
+
+ return 0;
+}
+
+static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->lock, irq_flags);
+ /*
+ * make sure SEC counter will not flip other counter on write time,
+ * real value will be written at the enf of sequence.
+ */
+ iowrite32(0, priv->iobase + HW_SEC);
+
+ iowrite32(tm->tm_year, priv->iobase + HW_YEAR);
+ iowrite32(tm->tm_mon, priv->iobase + HW_MONTH);
+ iowrite32(tm->tm_mday, priv->iobase + HW_DOM);
+ iowrite32(tm->tm_wday, priv->iobase + HW_DOW);
+ iowrite32(tm->tm_yday, priv->iobase + HW_DOY);
+ iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
+ iowrite32(tm->tm_min, priv->iobase + HW_MIN);
+ iowrite32(tm->tm_sec, priv->iobase + HW_SEC);
+ spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+ return 0;
+}
+
+static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->lock, irq_flags);
+ alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
+ alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON);
+ alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
+ alrm->time.tm_wday = ioread32(priv->iobase + HW_ALDOW);
+ alrm->time.tm_yday = ioread32(priv->iobase + HW_ALDOY);
+ alrm->time.tm_hour = ioread32(priv->iobase + HW_ALHOUR);
+ alrm->time.tm_min = ioread32(priv->iobase + HW_ALMIN);
+ alrm->time.tm_sec = ioread32(priv->iobase + HW_ALSEC);
+
+ alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
+ alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
+ spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+ return rtc_valid_tm(&alrm->time);
+}
+
+static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&priv->lock, irq_flags);
+ iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
+ iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON);
+ iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
+ iowrite32(alrm->time.tm_wday, priv->iobase + HW_ALDOW);
+ iowrite32(alrm->time.tm_yday, priv->iobase + HW_ALDOY);
+ iowrite32(alrm->time.tm_hour, priv->iobase + HW_ALHOUR);
+ iowrite32(alrm->time.tm_min, priv->iobase + HW_ALMIN);
+ iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC);
+
+ iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
+ spin_unlock_irqrestore(&priv->lock, irq_flags);
+
+ return 0;
+}
+
+static int asm9260_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
+
+ iowrite32(enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
+ return 0;
+}
+
+static const struct rtc_class_ops asm9260_rtc_ops = {
+ .read_time = asm9260_rtc_read_time,
+ .set_time = asm9260_rtc_set_time,
+ .read_alarm = asm9260_rtc_read_alarm,
+ .set_alarm = asm9260_rtc_set_alarm,
+ .alarm_irq_enable = asm9260_alarm_irq_enable,
+};
+
+static int asm9260_rtc_probe(struct platform_device *pdev)
+{
+ struct asm9260_rtc_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq_alarm, ret;
+ u32 ccr;
+
+ priv = devm_kzalloc(dev, sizeof(struct asm9260_rtc_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ platform_set_drvdata(pdev, priv);
+
+ irq_alarm = platform_get_irq(pdev, 0);
+ if (irq_alarm < 0) {
+ dev_err(dev, "No alarm IRQ resource defined\n");
+ return irq_alarm;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->iobase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->iobase))
+ return PTR_ERR(priv->iobase);
+
+ priv->clk = devm_clk_get(dev, "ahb");
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clk!\n");
+ return ret;
+ }
+
+ ccr = ioread32(priv->iobase + HW_CCR);
+ /* if dev is not enabled, reset it */
+ if ((ccr & (BM_CLKEN | BM_CTCRST)) != BM_CLKEN) {
+ iowrite32(BM_CTCRST, priv->iobase + HW_CCR);
+ ccr = 0;
+ }
+
+ iowrite32(BM_CLKEN | ccr, priv->iobase + HW_CCR);
+ iowrite32(0, priv->iobase + HW_CIIR);
+ iowrite32(BM_AMR_OFF, priv->iobase + HW_AMR);
+
+ priv->rtc = devm_rtc_device_register(dev, dev_name(dev),
+ &asm9260_rtc_ops, THIS_MODULE);
+ if (IS_ERR(priv->rtc)) {
+ ret = PTR_ERR(priv->rtc);
+ dev_err(dev, "Failed to register RTC device: %d\n", ret);
+ goto err_return;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq_alarm, NULL,
+ asm9260_rtc_irq, IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "can't get irq %i, err %d\n",
+ irq_alarm, ret);
+ goto err_return;
+ }
+
+ return 0;
+
+err_return:
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int asm9260_rtc_remove(struct platform_device *pdev)
+{
+ struct asm9260_rtc_priv *priv = platform_get_drvdata(pdev);
+
+ /* Disable alarm matching */
+ iowrite32(BM_AMR_OFF, priv->iobase + HW_AMR);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static const struct of_device_id asm9260_dt_ids[] = {
+ { .compatible = "alphascale,asm9260-rtc", },
+ {}
+};
+
+static struct platform_driver asm9260_rtc_driver = {
+ .probe = asm9260_rtc_probe,
+ .remove = asm9260_rtc_remove,
+ .driver = {
+ .name = "asm9260-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = asm9260_dt_ids,
+ },
+};
+
+module_platform_driver(asm9260_rtc_driver);
+
+MODULE_AUTHOR("Oleksij Rempel <linux@rempel-privat.de>");
+MODULE_DESCRIPTION("Alphascale asm9260 SoC Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index f39691eea736..8e41c4613e51 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -532,7 +532,7 @@ ds1305_nvram_read(struct file *filp, struct kobject *kobj,
struct spi_transfer x[2];
int status;
- spi = container_of(kobj, struct spi_device, dev.kobj);
+ spi = to_spi_device(kobj_to_dev(kobj));
addr = DS1305_NVRAM + off;
msg_init(&m, x, &addr, count, NULL, buf);
@@ -554,7 +554,7 @@ ds1305_nvram_write(struct file *filp, struct kobject *kobj,
struct spi_transfer x[2];
int status;
- spi = container_of(kobj, struct spi_device, dev.kobj);
+ spi = to_spi_device(kobj_to_dev(kobj));
addr = (DS1305_WRITE | DS1305_NVRAM) + off;
msg_init(&m, x, &addr, count, buf, NULL);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cf685f67b391..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -19,6 +19,9 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/clk-provider.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -89,6 +92,7 @@ enum ds_type {
# define DS1340_BIT_OSF 0x80
#define DS1337_REG_STATUS 0x0f
# define DS1337_BIT_OSF 0x80
+# define DS3231_BIT_EN32KHZ 0x08
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
@@ -118,6 +122,9 @@ struct ds1307 {
u8 length, u8 *values);
s32 (*write_block_data)(const struct i2c_client *client, u8 command,
u8 length, const u8 *values);
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw clks[2];
+#endif
};
struct chip_desc {
@@ -842,6 +849,378 @@ out:
return;
}
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_RTC_DRV_DS1307_HWMON
+
+/*
+ * Temperature sensor support for ds3231 devices.
+ */
+
+#define DS3231_REG_TEMPERATURE 0x11
+
+/*
+ * A user-initiated temperature conversion is not started by this function,
+ * so the temperature is updated once every 64 seconds.
+ */
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
+{
+ struct ds1307 *ds1307 = dev_get_drvdata(dev);
+ u8 temp_buf[2];
+ s16 temp;
+ int ret;
+
+ ret = ds1307->read_block_data(ds1307->client, DS3231_REG_TEMPERATURE,
+ sizeof(temp_buf), temp_buf);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(temp_buf))
+ return -EIO;
+
+ /*
+ * Temperature is represented as a 10-bit code with a resolution of
+ * 0.25 degree celsius and encoded in two's complement format.
+ */
+ temp = (temp_buf[0] << 8) | temp_buf[1];
+ temp >>= 6;
+ *mC = temp * 250;
+
+ return 0;
+}
+
+static ssize_t ds3231_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ s32 temp;
+
+ ret = ds3231_hwmon_read_temp(dev, &temp);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ds3231_hwmon_show_temp,
+ NULL, 0);
+
+static struct attribute *ds3231_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ds3231_hwmon);
+
+static void ds1307_hwmon_register(struct ds1307 *ds1307)
+{
+ struct device *dev;
+
+ if (ds1307->type != ds_3231)
+ return;
+
+ dev = devm_hwmon_device_register_with_groups(&ds1307->client->dev,
+ ds1307->client->name,
+ ds1307, ds3231_hwmon_groups);
+ if (IS_ERR(dev)) {
+ dev_warn(&ds1307->client->dev,
+ "unable to register hwmon device %ld\n", PTR_ERR(dev));
+ }
+}
+
+#else
+
+static void ds1307_hwmon_register(struct ds1307 *ds1307)
+{
+}
+
+#endif /* CONFIG_RTC_DRV_DS1307_HWMON */
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Square-wave output support for DS3231
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/DS3231.pdf
+ */
+#ifdef CONFIG_COMMON_CLK
+
+enum {
+ DS3231_CLK_SQW = 0,
+ DS3231_CLK_32KHZ,
+};
+
+#define clk_sqw_to_ds1307(clk) \
+ container_of(clk, struct ds1307, clks[DS3231_CLK_SQW])
+#define clk_32khz_to_ds1307(clk) \
+ container_of(clk, struct ds1307, clks[DS3231_CLK_32KHZ])
+
+static int ds3231_clk_sqw_rates[] = {
+ 1,
+ 1024,
+ 4096,
+ 8192,
+};
+
+static int ds1337_write_control(struct ds1307 *ds1307, u8 mask, u8 value)
+{
+ struct i2c_client *client = ds1307->client;
+ struct mutex *lock = &ds1307->rtc->ops_lock;
+ int control;
+ int ret;
+
+ mutex_lock(lock);
+
+ control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+ if (control < 0) {
+ ret = control;
+ goto out;
+ }
+
+ control &= ~mask;
+ control |= value;
+
+ ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control);
+out:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+ int control;
+ int rate_sel = 0;
+
+ control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
+ if (control < 0)
+ return control;
+ if (control & DS1337_BIT_RS1)
+ rate_sel += 1;
+ if (control & DS1337_BIT_RS2)
+ rate_sel += 2;
+
+ return ds3231_clk_sqw_rates[rate_sel];
+}
+
+static long ds3231_clk_sqw_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(ds3231_clk_sqw_rates) - 1; i >= 0; i--) {
+ if (ds3231_clk_sqw_rates[i] <= rate)
+ return ds3231_clk_sqw_rates[i];
+ }
+
+ return 0;
+}
+
+static int ds3231_clk_sqw_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+ int control = 0;
+ int rate_sel;
+
+ for (rate_sel = 0; rate_sel < ARRAY_SIZE(ds3231_clk_sqw_rates);
+ rate_sel++) {
+ if (ds3231_clk_sqw_rates[rate_sel] == rate)
+ break;
+ }
+
+ if (rate_sel == ARRAY_SIZE(ds3231_clk_sqw_rates))
+ return -EINVAL;
+
+ if (rate_sel & 1)
+ control |= DS1337_BIT_RS1;
+ if (rate_sel & 2)
+ control |= DS1337_BIT_RS2;
+
+ return ds1337_write_control(ds1307, DS1337_BIT_RS1 | DS1337_BIT_RS2,
+ control);
+}
+
+static int ds3231_clk_sqw_prepare(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+
+ return ds1337_write_control(ds1307, DS1337_BIT_INTCN, 0);
+}
+
+static void ds3231_clk_sqw_unprepare(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+
+ ds1337_write_control(ds1307, DS1337_BIT_INTCN, DS1337_BIT_INTCN);
+}
+
+static int ds3231_clk_sqw_is_prepared(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw);
+ int control;
+
+ control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL);
+ if (control < 0)
+ return control;
+
+ return !(control & DS1337_BIT_INTCN);
+}
+
+static const struct clk_ops ds3231_clk_sqw_ops = {
+ .prepare = ds3231_clk_sqw_prepare,
+ .unprepare = ds3231_clk_sqw_unprepare,
+ .is_prepared = ds3231_clk_sqw_is_prepared,
+ .recalc_rate = ds3231_clk_sqw_recalc_rate,
+ .round_rate = ds3231_clk_sqw_round_rate,
+ .set_rate = ds3231_clk_sqw_set_rate,
+};
+
+static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return 32768;
+}
+
+static int ds3231_clk_32khz_control(struct ds1307 *ds1307, bool enable)
+{
+ struct i2c_client *client = ds1307->client;
+ struct mutex *lock = &ds1307->rtc->ops_lock;
+ int status;
+ int ret;
+
+ mutex_lock(lock);
+
+ status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS);
+ if (status < 0) {
+ ret = status;
+ goto out;
+ }
+
+ if (enable)
+ status |= DS3231_BIT_EN32KHZ;
+ else
+ status &= ~DS3231_BIT_EN32KHZ;
+
+ ret = i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, status);
+out:
+ mutex_unlock(lock);
+
+ return ret;
+}
+
+static int ds3231_clk_32khz_prepare(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+
+ return ds3231_clk_32khz_control(ds1307, true);
+}
+
+static void ds3231_clk_32khz_unprepare(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+
+ ds3231_clk_32khz_control(ds1307, false);
+}
+
+static int ds3231_clk_32khz_is_prepared(struct clk_hw *hw)
+{
+ struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw);
+ int status;
+
+ status = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_STATUS);
+ if (status < 0)
+ return status;
+
+ return !!(status & DS3231_BIT_EN32KHZ);
+}
+
+static const struct clk_ops ds3231_clk_32khz_ops = {
+ .prepare = ds3231_clk_32khz_prepare,
+ .unprepare = ds3231_clk_32khz_unprepare,
+ .is_prepared = ds3231_clk_32khz_is_prepared,
+ .recalc_rate = ds3231_clk_32khz_recalc_rate,
+};
+
+static struct clk_init_data ds3231_clks_init[] = {
+ [DS3231_CLK_SQW] = {
+ .name = "ds3231_clk_sqw",
+ .ops = &ds3231_clk_sqw_ops,
+ .flags = CLK_IS_ROOT,
+ },
+ [DS3231_CLK_32KHZ] = {
+ .name = "ds3231_clk_32khz",
+ .ops = &ds3231_clk_32khz_ops,
+ .flags = CLK_IS_ROOT,
+ },
+};
+
+static int ds3231_clks_register(struct ds1307 *ds1307)
+{
+ struct i2c_client *client = ds1307->client;
+ struct device_node *node = client->dev.of_node;
+ struct clk_onecell_data *onecell;
+ int i;
+
+ onecell = devm_kzalloc(&client->dev, sizeof(*onecell), GFP_KERNEL);
+ if (!onecell)
+ return -ENOMEM;
+
+ onecell->clk_num = ARRAY_SIZE(ds3231_clks_init);
+ onecell->clks = devm_kcalloc(&client->dev, onecell->clk_num,
+ sizeof(onecell->clks[0]), GFP_KERNEL);
+ if (!onecell->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ds3231_clks_init); i++) {
+ struct clk_init_data init = ds3231_clks_init[i];
+
+ /*
+ * Interrupt signal due to alarm conditions and square-wave
+ * output share same pin, so don't initialize both.
+ */
+ if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
+ continue;
+
+ /* optional override of the clockname */
+ of_property_read_string_index(node, "clock-output-names", i,
+ &init.name);
+ ds1307->clks[i].init = &init;
+
+ onecell->clks[i] = devm_clk_register(&client->dev,
+ &ds1307->clks[i]);
+ if (IS_ERR(onecell->clks[i]))
+ return PTR_ERR(onecell->clks[i]);
+ }
+
+ if (!node)
+ return 0;
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
+
+ return 0;
+}
+
+static void ds1307_clks_register(struct ds1307 *ds1307)
+{
+ int ret;
+
+ if (ds1307->type != ds_3231)
+ return;
+
+ ret = ds3231_clks_register(ds1307);
+ if (ret) {
+ dev_warn(&ds1307->client->dev,
+ "unable to register clock device %d\n", ret);
+ }
+}
+
+#else
+
+static void ds1307_clks_register(struct ds1307 *ds1307)
+{
+}
+
+#endif /* CONFIG_COMMON_CLK */
+
static int ds1307_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -851,6 +1230,7 @@ static int ds1307_probe(struct i2c_client *client,
struct chip_desc *chip = &chips[id->driver_data];
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
bool want_irq = false;
+ bool ds1307_can_wakeup_device = false;
unsigned char *buf;
struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev);
irq_handler_t irq_handler = ds1307_irq;
@@ -898,6 +1278,20 @@ static int ds1307_probe(struct i2c_client *client,
ds1307->write_block_data = ds1307_write_block_data;
}
+#ifdef CONFIG_OF
+/*
+ * For devices with no IRQ directly connected to the SoC, the RTC chip
+ * can be forced as a wakeup source by stating that explicitly in
+ * the device's .dts file using the "wakeup-source" boolean property.
+ * If the "wakeup-source" property is set, don't request an IRQ.
+ * This will guarantee the 'wakealarm' sysfs entry is available on the device,
+ * if supported by the RTC.
+ */
+ if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
+ ds1307_can_wakeup_device = true;
+ }
+#endif
+
switch (ds1307->type) {
case ds_1337:
case ds_1339:
@@ -916,11 +1310,13 @@ static int ds1307_probe(struct i2c_client *client,
ds1307->regs[0] &= ~DS1337_BIT_nEOSC;
/*
- * Using IRQ? Disable the square wave and both alarms.
+ * Using IRQ or defined as wakeup-source?
+ * Disable the square wave and both alarms.
* For some variants, be sure alarms can trigger when we're
* running on Vbackup (BBSQI/BBSQW)
*/
- if (ds1307->client->irq > 0 && chip->alarm) {
+ if (chip->alarm && (ds1307->client->irq > 0 ||
+ ds1307_can_wakeup_device)) {
ds1307->regs[0] |= DS1337_BIT_INTCN
| bbsqi_bitpos[ds1307->type];
ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE);
@@ -1135,6 +1531,14 @@ read_rtc:
return PTR_ERR(ds1307->rtc);
}
+ if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
+ /* Disable request for an IRQ */
+ want_irq = false;
+ dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
+ /* We cannot support UIE mode if we do not have an IRQ line */
+ ds1307->rtc->uie_unsupported = 1;
+ }
+
if (want_irq) {
err = devm_request_threaded_irq(&client->dev,
client->irq, NULL, irq_handler,
@@ -1182,6 +1586,9 @@ read_rtc:
}
}
+ ds1307_hwmon_register(ds1307);
+ ds1307_clks_register(ds1307);
+
return 0;
exit:
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 535050fc5e9f..1e6cfc84b1f6 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
* Only use this where you are certain another lock will not be held.
*/
static inline void
-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
+ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
{
- spin_lock_irqsave(&rtc->lock, flags);
+ spin_lock_irqsave(&rtc->lock, *flags);
ds1685_rtc_switch_to_bank1(rtc);
}
@@ -1300,7 +1300,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
{
struct ds1685_priv *rtc = dev_get_drvdata(dev);
u8 reg = 0, bit = 0, tmp;
- unsigned long flags = 0;
+ unsigned long flags;
long int val = 0;
const struct ds1685_rtc_ctrl_regs *reg_info =
ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
@@ -1321,7 +1321,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
bit = reg_info->bit;
/* Safe to spinlock during a write. */
- ds1685_rtc_begin_ctrl_access(rtc, flags);
+ ds1685_rtc_begin_ctrl_access(rtc, &flags);
tmp = rtc->read(rtc, reg);
rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
ds1685_rtc_end_ctrl_access(rtc, flags);
@@ -2161,6 +2161,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
/* Check for valid RTC data, else, spin forever. */
if (unlikely(!pdev)) {
pr_emerg("platform device data not available, spinning forever ...\n");
+ while(1);
unreachable();
} else {
/* Get the rtc data. */
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 4e99ace66f74..7edc889729c5 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,19 +1,15 @@
/*
- * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
+ * RTC client/driver for the Maxim/Dallas DS3232/DS3234 Real-Time Clock
*
* Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
+ * Copyright (C) 2008 MIMOMax Wireless Ltd.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
-/*
- * It would be more efficient to use i2c msgs/i2c_transfer directly but, as
- * recommened in .../Documentation/i2c/writing-clients section
- * "Sending and receiving", using SMBus level communication is preferred.
- */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,10 +17,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/spi/spi.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
-#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/regmap.h>
#define DS3232_REG_SECONDS 0x00
#define DS3232_REG_MINUTES 0x01
@@ -50,39 +47,33 @@
# define DS3232_REG_SR_A1F 0x01
struct ds3232 {
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
struct rtc_device *rtc;
- struct work_struct work;
- /* The mutex protects alarm operations, and prevents a race
- * between the enable_irq() in the workqueue and the free_irq()
- * in the remove function.
- */
- struct mutex mutex;
bool suspended;
- int exiting;
};
-static struct i2c_driver ds3232_driver;
-
-static int ds3232_check_rtc_status(struct i2c_client *client)
+static int ds3232_check_rtc_status(struct device *dev)
{
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
int ret = 0;
int control, stat;
- stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
- if (stat < 0)
- return stat;
+ ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+ if (ret)
+ return ret;
if (stat & DS3232_REG_SR_OSF)
- dev_warn(&client->dev,
+ dev_warn(dev,
"oscillator discontinuity flagged, "
"time unreliable\n");
stat &= ~(DS3232_REG_SR_OSF | DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
- ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
- if (ret < 0)
+ ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+ if (ret)
return ret;
/* If the alarm is pending, clear it before requesting
@@ -90,31 +81,28 @@ static int ds3232_check_rtc_status(struct i2c_client *client)
* before everything is initialized.
*/
- control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (control < 0)
- return control;
+ ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+ if (ret)
+ return ret;
control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
control |= DS3232_REG_CR_INTCN;
- return i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ return regmap_write(ds3232->regmap, DS3232_REG_CR, control);
}
static int ds3232_read_time(struct device *dev, struct rtc_time *time)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
int ret;
u8 buf[7];
unsigned int year, month, day, hour, minute, second;
unsigned int week, twelve_hr, am_pm;
unsigned int century, add_century = 0;
- ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_SECONDS, 7, buf);
-
- if (ret < 0)
+ ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_SECONDS, buf, 7);
+ if (ret)
return ret;
- if (ret < 7)
- return -EIO;
second = buf[0];
minute = buf[1];
@@ -159,7 +147,7 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
static int ds3232_set_time(struct device *dev, struct rtc_time *time)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
u8 buf[7];
/* Extract time from rtc_time and load into ds3232*/
@@ -179,8 +167,7 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[6] = bin2bcd(time->tm_year);
}
- return i2c_smbus_write_i2c_block_data(client,
- DS3232_REG_SECONDS, 7, buf);
+ return regmap_bulk_write(ds3232->regmap, DS3232_REG_SECONDS, buf, 7);
}
/*
@@ -190,24 +177,19 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
*/
static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
int control, stat;
int ret;
u8 buf[4];
- mutex_lock(&ds3232->mutex);
-
- ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
- if (ret < 0)
+ ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+ if (ret)
goto out;
- stat = ret;
- ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (ret < 0)
+ ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+ if (ret)
goto out;
- control = ret;
- ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
- if (ret < 0)
+ ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_ALARM1, buf, 4);
+ if (ret)
goto out;
alarm->time.tm_sec = bcd2bin(buf[0] & 0x7F);
@@ -226,7 +208,6 @@ static int ds3232_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
ret = 0;
out:
- mutex_unlock(&ds3232->mutex);
return ret;
}
@@ -236,166 +217,129 @@ out:
*/
static int ds3232_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
int control, stat;
int ret;
u8 buf[4];
- if (client->irq <= 0)
+ if (ds3232->irq <= 0)
return -EINVAL;
- mutex_lock(&ds3232->mutex);
-
buf[0] = bin2bcd(alarm->time.tm_sec);
buf[1] = bin2bcd(alarm->time.tm_min);
buf[2] = bin2bcd(alarm->time.tm_hour);
buf[3] = bin2bcd(alarm->time.tm_mday);
/* clear alarm interrupt enable bit */
- ret = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (ret < 0)
+ ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+ if (ret)
goto out;
- control = ret;
control &= ~(DS3232_REG_CR_A1IE | DS3232_REG_CR_A2IE);
- ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
- if (ret < 0)
+ ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
+ if (ret)
goto out;
/* clear any pending alarm flag */
- ret = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
- if (ret < 0)
+ ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+ if (ret)
goto out;
- stat = ret;
stat &= ~(DS3232_REG_SR_A1F | DS3232_REG_SR_A2F);
- ret = i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
- if (ret < 0)
+ ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+ if (ret)
goto out;
- ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
+ ret = regmap_bulk_write(ds3232->regmap, DS3232_REG_ALARM1, buf, 4);
+ if (ret)
+ goto out;
if (alarm->enabled) {
control |= DS3232_REG_CR_A1IE;
- ret = i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
}
out:
- mutex_unlock(&ds3232->mutex);
return ret;
}
-static void ds3232_update_alarm(struct i2c_client *client)
+static int ds3232_update_alarm(struct device *dev, unsigned int enabled)
{
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
int control;
int ret;
- u8 buf[4];
-
- mutex_lock(&ds3232->mutex);
-
- ret = i2c_smbus_read_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
- if (ret < 0)
- goto unlock;
-
- buf[0] = bcd2bin(buf[0]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
- 0x80 : buf[0];
- buf[1] = bcd2bin(buf[1]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
- 0x80 : buf[1];
- buf[2] = bcd2bin(buf[2]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
- 0x80 : buf[2];
- buf[3] = bcd2bin(buf[3]) < 0 || (ds3232->rtc->irq_data & RTC_UF) ?
- 0x80 : buf[3];
-
- ret = i2c_smbus_write_i2c_block_data(client, DS3232_REG_ALARM1, 4, buf);
- if (ret < 0)
- goto unlock;
- control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (control < 0)
- goto unlock;
+ ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+ if (ret)
+ return ret;
- if (ds3232->rtc->irq_data & (RTC_AF | RTC_UF))
+ if (enabled)
/* enable alarm1 interrupt */
control |= DS3232_REG_CR_A1IE;
else
/* disable alarm1 interrupt */
control &= ~(DS3232_REG_CR_A1IE);
- i2c_smbus_write_byte_data(client, DS3232_REG_CR, control);
+ ret = regmap_write(ds3232->regmap, DS3232_REG_CR, control);
-unlock:
- mutex_unlock(&ds3232->mutex);
+ return ret;
}
static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
- if (client->irq <= 0)
+ if (ds3232->irq <= 0)
return -EINVAL;
- if (enabled)
- ds3232->rtc->irq_data |= RTC_AF;
- else
- ds3232->rtc->irq_data &= ~RTC_AF;
-
- ds3232_update_alarm(client);
- return 0;
+ return ds3232_update_alarm(dev, enabled);
}
static irqreturn_t ds3232_irq(int irq, void *dev_id)
{
- struct i2c_client *client = dev_id;
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
- disable_irq_nosync(irq);
-
- /*
- * If rtc as a wakeup source, can't schedule the work
- * at system resume flow, because at this time the i2c bus
- * has not been resumed.
- */
- if (!ds3232->suspended)
- schedule_work(&ds3232->work);
-
- return IRQ_HANDLED;
-}
-
-static void ds3232_work(struct work_struct *work)
-{
- struct ds3232 *ds3232 = container_of(work, struct ds3232, work);
- struct i2c_client *client = ds3232->client;
+ struct device *dev = dev_id;
+ struct ds3232 *ds3232 = dev_get_drvdata(dev);
+ struct mutex *lock = &ds3232->rtc->ops_lock;
+ int ret;
int stat, control;
- mutex_lock(&ds3232->mutex);
+ mutex_lock(lock);
- stat = i2c_smbus_read_byte_data(client, DS3232_REG_SR);
- if (stat < 0)
+ ret = regmap_read(ds3232->regmap, DS3232_REG_SR, &stat);
+ if (ret)
goto unlock;
if (stat & DS3232_REG_SR_A1F) {
- control = i2c_smbus_read_byte_data(client, DS3232_REG_CR);
- if (control < 0) {
- pr_warn("Read Control Register error - Disable IRQ%d\n",
- client->irq);
+ ret = regmap_read(ds3232->regmap, DS3232_REG_CR, &control);
+ if (ret) {
+ dev_warn(ds3232->dev,
+ "Read Control Register error %d\n", ret);
} else {
/* disable alarm1 interrupt */
control &= ~(DS3232_REG_CR_A1IE);
- i2c_smbus_write_byte_data(client, DS3232_REG_CR,
- control);
+ ret = regmap_write(ds3232->regmap, DS3232_REG_CR,
+ control);
+ if (ret) {
+ dev_warn(ds3232->dev,
+ "Write Control Register error %d\n",
+ ret);
+ goto unlock;
+ }
/* clear the alarm pend flag */
stat &= ~DS3232_REG_SR_A1F;
- i2c_smbus_write_byte_data(client, DS3232_REG_SR, stat);
+ ret = regmap_write(ds3232->regmap, DS3232_REG_SR, stat);
+ if (ret) {
+ dev_warn(ds3232->dev,
+ "Write Status Register error %d\n",
+ ret);
+ goto unlock;
+ }
rtc_update_irq(ds3232->rtc, 1, RTC_AF | RTC_IRQF);
-
- if (!ds3232->exiting)
- enable_irq(client->irq);
}
}
unlock:
- mutex_unlock(&ds3232->mutex);
+ mutex_unlock(lock);
+
+ return IRQ_HANDLED;
}
static const struct rtc_class_ops ds3232_rtc_ops = {
@@ -406,67 +350,50 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
.alarm_irq_enable = ds3232_alarm_irq_enable,
};
-static int ds3232_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name)
{
struct ds3232 *ds3232;
int ret;
- ds3232 = devm_kzalloc(&client->dev, sizeof(struct ds3232), GFP_KERNEL);
+ ds3232 = devm_kzalloc(dev, sizeof(*ds3232), GFP_KERNEL);
if (!ds3232)
return -ENOMEM;
- ds3232->client = client;
- i2c_set_clientdata(client, ds3232);
-
- INIT_WORK(&ds3232->work, ds3232_work);
- mutex_init(&ds3232->mutex);
+ ds3232->regmap = regmap;
+ ds3232->irq = irq;
+ ds3232->dev = dev;
+ dev_set_drvdata(dev, ds3232);
- ret = ds3232_check_rtc_status(client);
+ ret = ds3232_check_rtc_status(dev);
if (ret)
return ret;
- if (client->irq > 0) {
- ret = devm_request_irq(&client->dev, client->irq, ds3232_irq,
- IRQF_SHARED, "ds3232", client);
+ if (ds3232->irq > 0) {
+ ret = devm_request_threaded_irq(dev, ds3232->irq, NULL,
+ ds3232_irq,
+ IRQF_SHARED | IRQF_ONESHOT,
+ name, dev);
if (ret) {
- dev_err(&client->dev, "unable to request IRQ\n");
- }
- device_init_wakeup(&client->dev, 1);
- }
- ds3232->rtc = devm_rtc_device_register(&client->dev, client->name,
- &ds3232_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(ds3232->rtc);
-}
-
-static int ds3232_remove(struct i2c_client *client)
-{
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
- if (client->irq > 0) {
- mutex_lock(&ds3232->mutex);
- ds3232->exiting = 1;
- mutex_unlock(&ds3232->mutex);
-
- devm_free_irq(&client->dev, client->irq, client);
- cancel_work_sync(&ds3232->work);
+ ds3232->irq = 0;
+ dev_err(dev, "unable to request IRQ\n");
+ } else
+ device_init_wakeup(dev, 1);
}
+ ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops,
+ THIS_MODULE);
- return 0;
+ return PTR_ERR_OR_ZERO(ds3232->rtc);
}
#ifdef CONFIG_PM_SLEEP
static int ds3232_suspend(struct device *dev)
{
struct ds3232 *ds3232 = dev_get_drvdata(dev);
- struct i2c_client *client = to_i2c_client(dev);
- if (device_can_wakeup(dev)) {
- ds3232->suspended = true;
- if (irq_set_irq_wake(client->irq, 1)) {
+ if (device_may_wakeup(dev)) {
+ if (enable_irq_wake(ds3232->irq))
dev_warn_once(dev, "Cannot set wakeup source\n");
- ds3232->suspended = false;
- }
}
return 0;
@@ -475,16 +402,9 @@ static int ds3232_suspend(struct device *dev)
static int ds3232_resume(struct device *dev)
{
struct ds3232 *ds3232 = dev_get_drvdata(dev);
- struct i2c_client *client = to_i2c_client(dev);
- if (ds3232->suspended) {
- ds3232->suspended = false;
-
- /* Clear the hardware alarm pend flag */
- schedule_work(&ds3232->work);
-
- irq_set_irq_wake(client->irq, 0);
- }
+ if (device_may_wakeup(dev))
+ disable_irq_wake(ds3232->irq);
return 0;
}
@@ -494,6 +414,27 @@ static const struct dev_pm_ops ds3232_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ds3232_suspend, ds3232_resume)
};
+#if IS_ENABLED(CONFIG_I2C)
+
+static int ds3232_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return ds3232_probe(&client->dev, regmap, client->irq, client->name);
+}
+
static const struct i2c_device_id ds3232_id[] = {
{ "ds3232", 0 },
{ }
@@ -505,13 +446,162 @@ static struct i2c_driver ds3232_driver = {
.name = "rtc-ds3232",
.pm = &ds3232_pm_ops,
},
- .probe = ds3232_probe,
- .remove = ds3232_remove,
+ .probe = ds3232_i2c_probe,
.id_table = ds3232_id,
};
-module_i2c_driver(ds3232_driver);
+static int ds3232_register_driver(void)
+{
+ return i2c_add_driver(&ds3232_driver);
+}
+
+static void ds3232_unregister_driver(void)
+{
+ i2c_del_driver(&ds3232_driver);
+}
+
+#else
+
+static int ds3232_register_driver(void)
+{
+ return 0;
+}
+
+static void ds3232_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int ds3234_probe(struct spi_device *spi)
+{
+ int res;
+ unsigned int tmp;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .write_flag_mask = 0x80,
+ };
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ spi->mode = SPI_MODE_3;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ res = regmap_read(regmap, DS3232_REG_SECONDS, &tmp);
+ if (res)
+ return res;
+
+ /* Control settings
+ *
+ * CONTROL_REG
+ * BIT 7 6 5 4 3 2 1 0
+ * EOSC BBSQW CONV RS2 RS1 INTCN A2IE A1IE
+ *
+ * 0 0 0 1 1 1 0 0
+ *
+ * CONTROL_STAT_REG
+ * BIT 7 6 5 4 3 2 1 0
+ * OSF BB32kHz CRATE1 CRATE0 EN32kHz BSY A2F A1F
+ *
+ * 1 0 0 0 1 0 0 0
+ */
+ res = regmap_read(regmap, DS3232_REG_CR, &tmp);
+ if (res)
+ return res;
+ res = regmap_write(regmap, DS3232_REG_CR, tmp & 0x1c);
+ if (res)
+ return res;
+
+ res = regmap_read(regmap, DS3232_REG_SR, &tmp);
+ if (res)
+ return res;
+ res = regmap_write(regmap, DS3232_REG_SR, tmp & 0x88);
+ if (res)
+ return res;
+
+ /* Print our settings */
+ res = regmap_read(regmap, DS3232_REG_CR, &tmp);
+ if (res)
+ return res;
+ dev_info(&spi->dev, "Control Reg: 0x%02x\n", tmp);
+
+ res = regmap_read(regmap, DS3232_REG_SR, &tmp);
+ if (res)
+ return res;
+ dev_info(&spi->dev, "Ctrl/Stat Reg: 0x%02x\n", tmp);
+
+ return ds3232_probe(&spi->dev, regmap, spi->irq, "ds3234");
+}
+
+static struct spi_driver ds3234_driver = {
+ .driver = {
+ .name = "ds3234",
+ },
+ .probe = ds3234_probe,
+};
+
+static int ds3234_register_driver(void)
+{
+ return spi_register_driver(&ds3234_driver);
+}
+
+static void ds3234_unregister_driver(void)
+{
+ spi_unregister_driver(&ds3234_driver);
+}
+
+#else
+
+static int ds3234_register_driver(void)
+{
+ return 0;
+}
+
+static void ds3234_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ds323x_init(void)
+{
+ int ret;
+
+ ret = ds3232_register_driver();
+ if (ret) {
+ pr_err("Failed to register ds3232 driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = ds3234_register_driver();
+ if (ret) {
+ pr_err("Failed to register ds3234 driver: %d\n", ret);
+ ds3232_unregister_driver();
+ }
+
+ return ret;
+}
+module_init(ds323x_init)
+
+static void __exit ds323x_exit(void)
+{
+ ds3234_unregister_driver();
+ ds3232_unregister_driver();
+}
+module_exit(ds323x_exit)
MODULE_AUTHOR("Srikanth Srinivasan <srikanth.srinivasan@freescale.com>");
-MODULE_DESCRIPTION("Maxim/Dallas DS3232 RTC Driver");
+MODULE_AUTHOR("Dennis Aberilla <denzzzhome@yahoo.com>");
+MODULE_DESCRIPTION("Maxim/Dallas DS3232/DS3234 RTC Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:ds3234");
diff --git a/drivers/rtc/rtc-ds3234.c b/drivers/rtc/rtc-ds3234.c
deleted file mode 100644
index 570ab28fc354..000000000000
--- a/drivers/rtc/rtc-ds3234.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/* rtc-ds3234.c
- *
- * Driver for Dallas Semiconductor (DS3234) SPI RTC with Integrated Crystal
- * and SRAM.
- *
- * Copyright (C) 2008 MIMOMax Wireless Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/rtc.h>
-#include <linux/spi/spi.h>
-#include <linux/bcd.h>
-
-#define DS3234_REG_SECONDS 0x00
-#define DS3234_REG_MINUTES 0x01
-#define DS3234_REG_HOURS 0x02
-#define DS3234_REG_DAY 0x03
-#define DS3234_REG_DATE 0x04
-#define DS3234_REG_MONTH 0x05
-#define DS3234_REG_YEAR 0x06
-#define DS3234_REG_CENTURY (1 << 7) /* Bit 7 of the Month register */
-
-#define DS3234_REG_CONTROL 0x0E
-#define DS3234_REG_CONT_STAT 0x0F
-
-static int ds3234_set_reg(struct device *dev, unsigned char address,
- unsigned char data)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[2];
-
- /* MSB must be '1' to indicate write */
- buf[0] = address | 0x80;
- buf[1] = data;
-
- return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
-
-static int ds3234_get_reg(struct device *dev, unsigned char address,
- unsigned char *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- *data = address & 0x7f;
-
- return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds3234_read_time(struct device *dev, struct rtc_time *dt)
-{
- int err;
- unsigned char buf[8];
- struct spi_device *spi = to_spi_device(dev);
-
- buf[0] = 0x00; /* Start address */
-
- err = spi_write_then_read(spi, buf, 1, buf, 8);
- if (err != 0)
- return err;
-
- /* Seconds, Minutes, Hours, Day, Date, Month, Year */
- dt->tm_sec = bcd2bin(buf[0]);
- dt->tm_min = bcd2bin(buf[1]);
- dt->tm_hour = bcd2bin(buf[2] & 0x3f);
- dt->tm_wday = bcd2bin(buf[3]) - 1; /* 0 = Sun */
- dt->tm_mday = bcd2bin(buf[4]);
- dt->tm_mon = bcd2bin(buf[5] & 0x1f) - 1; /* 0 = Jan */
- dt->tm_year = bcd2bin(buf[6] & 0xff) + 100; /* Assume 20YY */
-
- return rtc_valid_tm(dt);
-}
-
-static int ds3234_set_time(struct device *dev, struct rtc_time *dt)
-{
- ds3234_set_reg(dev, DS3234_REG_SECONDS, bin2bcd(dt->tm_sec));
- ds3234_set_reg(dev, DS3234_REG_MINUTES, bin2bcd(dt->tm_min));
- ds3234_set_reg(dev, DS3234_REG_HOURS, bin2bcd(dt->tm_hour) & 0x3f);
-
- /* 0 = Sun */
- ds3234_set_reg(dev, DS3234_REG_DAY, bin2bcd(dt->tm_wday + 1));
- ds3234_set_reg(dev, DS3234_REG_DATE, bin2bcd(dt->tm_mday));
-
- /* 0 = Jan */
- ds3234_set_reg(dev, DS3234_REG_MONTH, bin2bcd(dt->tm_mon + 1));
-
- /* Assume 20YY although we just want to make sure not to go negative. */
- if (dt->tm_year > 100)
- dt->tm_year -= 100;
-
- ds3234_set_reg(dev, DS3234_REG_YEAR, bin2bcd(dt->tm_year));
-
- return 0;
-}
-
-static const struct rtc_class_ops ds3234_rtc_ops = {
- .read_time = ds3234_read_time,
- .set_time = ds3234_set_time,
-};
-
-static int ds3234_probe(struct spi_device *spi)
-{
- struct rtc_device *rtc;
- unsigned char tmp;
- int res;
-
- spi->mode = SPI_MODE_3;
- spi->bits_per_word = 8;
- spi_setup(spi);
-
- res = ds3234_get_reg(&spi->dev, DS3234_REG_SECONDS, &tmp);
- if (res != 0)
- return res;
-
- /* Control settings
- *
- * CONTROL_REG
- * BIT 7 6 5 4 3 2 1 0
- * EOSC BBSQW CONV RS2 RS1 INTCN A2IE A1IE
- *
- * 0 0 0 1 1 1 0 0
- *
- * CONTROL_STAT_REG
- * BIT 7 6 5 4 3 2 1 0
- * OSF BB32kHz CRATE1 CRATE0 EN32kHz BSY A2F A1F
- *
- * 1 0 0 0 1 0 0 0
- */
- ds3234_get_reg(&spi->dev, DS3234_REG_CONTROL, &tmp);
- ds3234_set_reg(&spi->dev, DS3234_REG_CONTROL, tmp & 0x1c);
-
- ds3234_get_reg(&spi->dev, DS3234_REG_CONT_STAT, &tmp);
- ds3234_set_reg(&spi->dev, DS3234_REG_CONT_STAT, tmp & 0x88);
-
- /* Print our settings */
- ds3234_get_reg(&spi->dev, DS3234_REG_CONTROL, &tmp);
- dev_info(&spi->dev, "Control Reg: 0x%02x\n", tmp);
-
- ds3234_get_reg(&spi->dev, DS3234_REG_CONT_STAT, &tmp);
- dev_info(&spi->dev, "Ctrl/Stat Reg: 0x%02x\n", tmp);
-
- rtc = devm_rtc_device_register(&spi->dev, "ds3234",
- &ds3234_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- spi_set_drvdata(spi, rtc);
-
- return 0;
-}
-
-static struct spi_driver ds3234_driver = {
- .driver = {
- .name = "ds3234",
- },
- .probe = ds3234_probe,
-};
-
-module_spi_driver(ds3234_driver);
-
-MODULE_DESCRIPTION("DS3234 SPI RTC driver");
-MODULE_AUTHOR("Dennis Aberilla <denzzzhome@yahoo.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:ds3234");
diff --git a/drivers/rtc/rtc-generic.c b/drivers/rtc/rtc-generic.c
index e782ebd719b2..d726c6aa96a8 100644
--- a/drivers/rtc/rtc-generic.c
+++ b/drivers/rtc/rtc-generic.c
@@ -9,6 +9,8 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
+#if defined(CONFIG_M68K) || defined(CONFIG_PARISC) || \
+ defined(CONFIG_PPC) || defined(CONFIG_SUPERH32)
#include <asm/rtc.h>
static int generic_get_time(struct device *dev, struct rtc_time *tm)
@@ -33,13 +35,21 @@ static const struct rtc_class_ops generic_rtc_ops = {
.read_time = generic_get_time,
.set_time = generic_set_time,
};
+#else
+#define generic_rtc_ops *(struct rtc_class_ops*)NULL
+#endif
static int __init generic_rtc_probe(struct platform_device *dev)
{
struct rtc_device *rtc;
+ const struct rtc_class_ops *ops;
+
+ ops = dev_get_platdata(&dev->dev);
+ if (!ops)
+ ops = &generic_rtc_ops;
rtc = devm_rtc_device_register(&dev->dev, "rtc-generic",
- &generic_rtc_ops, THIS_MODULE);
+ ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 097325d96db5..b1b4746a0eab 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
* it does not seem to carry it over a subsequent write/read.
* So we'll limit ourself to 100 years, starting at 2000 for now.
*/
- buf[6] = tm->tm_year - 100;
+ buf[6] = bin2bcd(tm->tm_year - 100);
/*
* CTL1 only contains TEST-mode bits apart from stop,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index a82937e2f824..d107a8e72a7d 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -176,7 +176,13 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
buf[M41T80_REG_MON] =
bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
+
/* assume 20YY not 19YY */
+ if (tm->tm_year < 100 || tm->tm_year > 199) {
+ dev_err(&client->dev, "Year must be between 2000 and 2099. It's %d.\n",
+ tm->tm_year + 1900);
+ return -EINVAL;
+ }
buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100);
if (i2c_transfer(client->adapter, msgs, 1) != 1) {
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 7184a0eda793..182fdd00e290 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -1,5 +1,5 @@
/*
- * RTC driver for Maxim MAX77686
+ * RTC driver for Maxim MAX77686 and MAX77802
*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
*
@@ -12,8 +12,7 @@
*
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
+#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/rtc.h>
#include <linux/delay.h>
@@ -24,24 +23,38 @@
#include <linux/irqdomain.h>
#include <linux/regmap.h>
+#define MAX77686_I2C_ADDR_RTC (0x0C >> 1)
+#define MAX77620_I2C_ADDR_RTC 0x68
+#define MAX77686_INVALID_I2C_ADDR (-1)
+
+/* Define non existing register */
+#define MAX77686_INVALID_REG (-1)
+
/* RTC Control Register */
#define BCD_EN_SHIFT 0
-#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define BCD_EN_MASK BIT(BCD_EN_SHIFT)
#define MODEL24_SHIFT 1
-#define MODEL24_MASK (1 << MODEL24_SHIFT)
+#define MODEL24_MASK BIT(MODEL24_SHIFT)
/* RTC Update Register1 */
#define RTC_UDR_SHIFT 0
-#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
+#define RTC_UDR_MASK BIT(RTC_UDR_SHIFT)
#define RTC_RBUDR_SHIFT 4
-#define RTC_RBUDR_MASK (1 << RTC_RBUDR_SHIFT)
+#define RTC_RBUDR_MASK BIT(RTC_RBUDR_SHIFT)
/* RTC Hour register */
#define HOUR_PM_SHIFT 6
-#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+#define HOUR_PM_MASK BIT(HOUR_PM_SHIFT)
/* RTC Alarm Enable */
#define ALARM_ENABLE_SHIFT 7
-#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+#define ALARM_ENABLE_MASK BIT(ALARM_ENABLE_SHIFT)
-#define MAX77686_RTC_UPDATE_DELAY 16
+#define REG_RTC_NONE 0xdeadbeef
+
+/*
+ * MAX77802 has separate register (RTCAE1) for alarm enable instead
+ * using 1 bit from registers RTC{SEC,MIN,HOUR,DAY,MONTH,YEAR,DATE}
+ * as in done in MAX77686.
+ */
+#define MAX77802_ALARM_ENABLE_VALUE 0x77
enum {
RTC_SEC = 0,
@@ -54,15 +67,38 @@ enum {
RTC_NR_TIME
};
+struct max77686_rtc_driver_data {
+ /* Minimum usecs needed for a RTC update */
+ unsigned long delay;
+ /* Mask used to read RTC registers value */
+ u8 mask;
+ /* Registers offset to I2C addresses map */
+ const unsigned int *map;
+ /* Has a separate alarm enable register? */
+ bool alarm_enable_reg;
+ /* I2C address for RTC block */
+ int rtc_i2c_addr;
+ /* RTC interrupt via platform resource */
+ bool rtc_irq_from_platform;
+ /* Pending alarm status register */
+ int alarm_pending_status_reg;
+ /* RTC IRQ CHIP for regmap */
+ const struct regmap_irq_chip *rtc_irq_chip;
+};
+
struct max77686_rtc_info {
struct device *dev;
- struct max77686_dev *max77686;
struct i2c_client *rtc;
struct rtc_device *rtc_dev;
struct mutex lock;
struct regmap *regmap;
+ struct regmap *rtc_regmap;
+
+ const struct max77686_rtc_driver_data *drv_data;
+ struct regmap_irq_chip_data *rtc_irq_data;
+ int rtc_irq;
int virq;
int rtc_24hr_mode;
};
@@ -72,29 +108,190 @@ enum MAX77686_RTC_OP {
MAX77686_RTC_READ,
};
+/* These are not registers but just offsets that are mapped to addresses */
+enum max77686_rtc_reg_offset {
+ REG_RTC_CONTROLM = 0,
+ REG_RTC_CONTROL,
+ REG_RTC_UPDATE0,
+ REG_WTSR_SMPL_CNTL,
+ REG_RTC_SEC,
+ REG_RTC_MIN,
+ REG_RTC_HOUR,
+ REG_RTC_WEEKDAY,
+ REG_RTC_MONTH,
+ REG_RTC_YEAR,
+ REG_RTC_DATE,
+ REG_ALARM1_SEC,
+ REG_ALARM1_MIN,
+ REG_ALARM1_HOUR,
+ REG_ALARM1_WEEKDAY,
+ REG_ALARM1_MONTH,
+ REG_ALARM1_YEAR,
+ REG_ALARM1_DATE,
+ REG_ALARM2_SEC,
+ REG_ALARM2_MIN,
+ REG_ALARM2_HOUR,
+ REG_ALARM2_WEEKDAY,
+ REG_ALARM2_MONTH,
+ REG_ALARM2_YEAR,
+ REG_ALARM2_DATE,
+ REG_RTC_AE1,
+ REG_RTC_END,
+};
+
+/* Maps RTC registers offset to the MAX77686 register addresses */
+static const unsigned int max77686_map[REG_RTC_END] = {
+ [REG_RTC_CONTROLM] = MAX77686_RTC_CONTROLM,
+ [REG_RTC_CONTROL] = MAX77686_RTC_CONTROL,
+ [REG_RTC_UPDATE0] = MAX77686_RTC_UPDATE0,
+ [REG_WTSR_SMPL_CNTL] = MAX77686_WTSR_SMPL_CNTL,
+ [REG_RTC_SEC] = MAX77686_RTC_SEC,
+ [REG_RTC_MIN] = MAX77686_RTC_MIN,
+ [REG_RTC_HOUR] = MAX77686_RTC_HOUR,
+ [REG_RTC_WEEKDAY] = MAX77686_RTC_WEEKDAY,
+ [REG_RTC_MONTH] = MAX77686_RTC_MONTH,
+ [REG_RTC_YEAR] = MAX77686_RTC_YEAR,
+ [REG_RTC_DATE] = MAX77686_RTC_DATE,
+ [REG_ALARM1_SEC] = MAX77686_ALARM1_SEC,
+ [REG_ALARM1_MIN] = MAX77686_ALARM1_MIN,
+ [REG_ALARM1_HOUR] = MAX77686_ALARM1_HOUR,
+ [REG_ALARM1_WEEKDAY] = MAX77686_ALARM1_WEEKDAY,
+ [REG_ALARM1_MONTH] = MAX77686_ALARM1_MONTH,
+ [REG_ALARM1_YEAR] = MAX77686_ALARM1_YEAR,
+ [REG_ALARM1_DATE] = MAX77686_ALARM1_DATE,
+ [REG_ALARM2_SEC] = MAX77686_ALARM2_SEC,
+ [REG_ALARM2_MIN] = MAX77686_ALARM2_MIN,
+ [REG_ALARM2_HOUR] = MAX77686_ALARM2_HOUR,
+ [REG_ALARM2_WEEKDAY] = MAX77686_ALARM2_WEEKDAY,
+ [REG_ALARM2_MONTH] = MAX77686_ALARM2_MONTH,
+ [REG_ALARM2_YEAR] = MAX77686_ALARM2_YEAR,
+ [REG_ALARM2_DATE] = MAX77686_ALARM2_DATE,
+ [REG_RTC_AE1] = REG_RTC_NONE,
+};
+
+static const struct regmap_irq max77686_rtc_irqs[] = {
+ /* RTC interrupts */
+ REGMAP_IRQ_REG(0, 0, MAX77686_RTCINT_RTC60S_MSK),
+ REGMAP_IRQ_REG(1, 0, MAX77686_RTCINT_RTCA1_MSK),
+ REGMAP_IRQ_REG(2, 0, MAX77686_RTCINT_RTCA2_MSK),
+ REGMAP_IRQ_REG(3, 0, MAX77686_RTCINT_SMPL_MSK),
+ REGMAP_IRQ_REG(4, 0, MAX77686_RTCINT_RTC1S_MSK),
+ REGMAP_IRQ_REG(5, 0, MAX77686_RTCINT_WTSR_MSK),
+};
+
+static const struct regmap_irq_chip max77686_rtc_irq_chip = {
+ .name = "max77686-rtc",
+ .status_base = MAX77686_RTC_INT,
+ .mask_base = MAX77686_RTC_INTM,
+ .num_regs = 1,
+ .irqs = max77686_rtc_irqs,
+ .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
+};
+
+static const struct max77686_rtc_driver_data max77686_drv_data = {
+ .delay = 16000,
+ .mask = 0x7f,
+ .map = max77686_map,
+ .alarm_enable_reg = false,
+ .rtc_irq_from_platform = false,
+ .alarm_pending_status_reg = MAX77686_REG_STATUS2,
+ .rtc_i2c_addr = MAX77686_I2C_ADDR_RTC,
+ .rtc_irq_chip = &max77686_rtc_irq_chip,
+};
+
+static const struct max77686_rtc_driver_data max77620_drv_data = {
+ .delay = 16000,
+ .mask = 0x7f,
+ .map = max77686_map,
+ .alarm_enable_reg = false,
+ .rtc_irq_from_platform = true,
+ .alarm_pending_status_reg = MAX77686_INVALID_REG,
+ .rtc_i2c_addr = MAX77620_I2C_ADDR_RTC,
+ .rtc_irq_chip = &max77686_rtc_irq_chip,
+};
+
+static const unsigned int max77802_map[REG_RTC_END] = {
+ [REG_RTC_CONTROLM] = MAX77802_RTC_CONTROLM,
+ [REG_RTC_CONTROL] = MAX77802_RTC_CONTROL,
+ [REG_RTC_UPDATE0] = MAX77802_RTC_UPDATE0,
+ [REG_WTSR_SMPL_CNTL] = MAX77802_WTSR_SMPL_CNTL,
+ [REG_RTC_SEC] = MAX77802_RTC_SEC,
+ [REG_RTC_MIN] = MAX77802_RTC_MIN,
+ [REG_RTC_HOUR] = MAX77802_RTC_HOUR,
+ [REG_RTC_WEEKDAY] = MAX77802_RTC_WEEKDAY,
+ [REG_RTC_MONTH] = MAX77802_RTC_MONTH,
+ [REG_RTC_YEAR] = MAX77802_RTC_YEAR,
+ [REG_RTC_DATE] = MAX77802_RTC_DATE,
+ [REG_ALARM1_SEC] = MAX77802_ALARM1_SEC,
+ [REG_ALARM1_MIN] = MAX77802_ALARM1_MIN,
+ [REG_ALARM1_HOUR] = MAX77802_ALARM1_HOUR,
+ [REG_ALARM1_WEEKDAY] = MAX77802_ALARM1_WEEKDAY,
+ [REG_ALARM1_MONTH] = MAX77802_ALARM1_MONTH,
+ [REG_ALARM1_YEAR] = MAX77802_ALARM1_YEAR,
+ [REG_ALARM1_DATE] = MAX77802_ALARM1_DATE,
+ [REG_ALARM2_SEC] = MAX77802_ALARM2_SEC,
+ [REG_ALARM2_MIN] = MAX77802_ALARM2_MIN,
+ [REG_ALARM2_HOUR] = MAX77802_ALARM2_HOUR,
+ [REG_ALARM2_WEEKDAY] = MAX77802_ALARM2_WEEKDAY,
+ [REG_ALARM2_MONTH] = MAX77802_ALARM2_MONTH,
+ [REG_ALARM2_YEAR] = MAX77802_ALARM2_YEAR,
+ [REG_ALARM2_DATE] = MAX77802_ALARM2_DATE,
+ [REG_RTC_AE1] = MAX77802_RTC_AE1,
+};
+
+static const struct regmap_irq_chip max77802_rtc_irq_chip = {
+ .name = "max77802-rtc",
+ .status_base = MAX77802_RTC_INT,
+ .mask_base = MAX77802_RTC_INTM,
+ .num_regs = 1,
+ .irqs = max77686_rtc_irqs, /* same masks as 77686 */
+ .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
+};
+
+static const struct max77686_rtc_driver_data max77802_drv_data = {
+ .delay = 200,
+ .mask = 0xff,
+ .map = max77802_map,
+ .alarm_enable_reg = true,
+ .rtc_irq_from_platform = false,
+ .alarm_pending_status_reg = MAX77686_REG_STATUS2,
+ .rtc_i2c_addr = MAX77686_INVALID_I2C_ADDR,
+ .rtc_irq_chip = &max77802_rtc_irq_chip,
+};
+
static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
- int rtc_24hr_mode)
+ struct max77686_rtc_info *info)
{
- tm->tm_sec = data[RTC_SEC] & 0x7f;
- tm->tm_min = data[RTC_MIN] & 0x7f;
- if (rtc_24hr_mode)
+ u8 mask = info->drv_data->mask;
+
+ tm->tm_sec = data[RTC_SEC] & mask;
+ tm->tm_min = data[RTC_MIN] & mask;
+ if (info->rtc_24hr_mode) {
tm->tm_hour = data[RTC_HOUR] & 0x1f;
- else {
+ } else {
tm->tm_hour = data[RTC_HOUR] & 0x0f;
if (data[RTC_HOUR] & HOUR_PM_MASK)
tm->tm_hour += 12;
}
/* Only a single bit is set in data[], so fls() would be equivalent */
- tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f) - 1;
+ tm->tm_wday = ffs(data[RTC_WEEKDAY] & mask) - 1;
tm->tm_mday = data[RTC_DATE] & 0x1f;
tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
- tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+ tm->tm_year = data[RTC_YEAR] & mask;
tm->tm_yday = 0;
tm->tm_isdst = 0;
+
+ /*
+ * MAX77686 uses 1 bit from sec/min/hour/etc RTC registers and the
+ * year values are just 0..99 so add 100 to support up to 2099.
+ */
+ if (!info->drv_data->alarm_enable_reg)
+ tm->tm_year += 100;
}
-static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data,
+ struct max77686_rtc_info *info)
{
data[RTC_SEC] = tm->tm_sec;
data[RTC_MIN] = tm->tm_min;
@@ -102,35 +299,44 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
data[RTC_WEEKDAY] = 1 << tm->tm_wday;
data[RTC_DATE] = tm->tm_mday;
data[RTC_MONTH] = tm->tm_mon + 1;
+
+ if (info->drv_data->alarm_enable_reg) {
+ data[RTC_YEAR] = tm->tm_year;
+ return 0;
+ }
+
data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
if (tm->tm_year < 100) {
- pr_warn("RTC cannot handle the year %d. Assume it's 2000.\n",
+ dev_err(info->dev, "RTC cannot handle the year %d.\n",
1900 + tm->tm_year);
return -EINVAL;
}
+
return 0;
}
static int max77686_rtc_update(struct max77686_rtc_info *info,
- enum MAX77686_RTC_OP op)
+ enum MAX77686_RTC_OP op)
{
int ret;
unsigned int data;
+ unsigned long delay = info->drv_data->delay;
if (op == MAX77686_RTC_WRITE)
data = 1 << RTC_UDR_SHIFT;
else
data = 1 << RTC_RBUDR_SHIFT;
- ret = regmap_update_bits(info->max77686->rtc_regmap,
- MAX77686_RTC_UPDATE0, data, data);
+ ret = regmap_update_bits(info->rtc_regmap,
+ info->drv_data->map[REG_RTC_UPDATE0],
+ data, data);
if (ret < 0)
- dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
- __func__, ret, data);
+ dev_err(info->dev, "Fail to write update reg(ret=%d, data=0x%x)\n",
+ ret, data);
else {
- /* Minimum 16ms delay required before RTC update. */
- msleep(MAX77686_RTC_UPDATE_DELAY);
+ /* Minimum delay required before RTC update. */
+ usleep_range(delay, delay * 2);
}
return ret;
@@ -148,14 +354,15 @@ static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ret < 0)
goto out;
- ret = regmap_bulk_read(info->max77686->rtc_regmap,
- MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ ret = regmap_bulk_read(info->rtc_regmap,
+ info->drv_data->map[REG_RTC_SEC],
+ data, ARRAY_SIZE(data));
if (ret < 0) {
- dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__, ret);
+ dev_err(info->dev, "Fail to read time reg(%d)\n", ret);
goto out;
}
- max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+ max77686_rtc_data_to_tm(data, tm, info);
ret = rtc_valid_tm(tm);
@@ -170,17 +377,17 @@ static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 data[RTC_NR_TIME];
int ret;
- ret = max77686_rtc_tm_to_data(tm, data);
+ ret = max77686_rtc_tm_to_data(tm, data, info);
if (ret < 0)
return ret;
mutex_lock(&info->lock);
- ret = regmap_bulk_write(info->max77686->rtc_regmap,
- MAX77686_RTC_SEC, data, RTC_NR_TIME);
+ ret = regmap_bulk_write(info->rtc_regmap,
+ info->drv_data->map[REG_RTC_SEC],
+ data, ARRAY_SIZE(data));
if (ret < 0) {
- dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
- ret);
+ dev_err(info->dev, "Fail to write time reg(%d)\n", ret);
goto out;
}
@@ -196,6 +403,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct max77686_rtc_info *info = dev_get_drvdata(dev);
u8 data[RTC_NR_TIME];
unsigned int val;
+ const unsigned int *map = info->drv_data->map;
int i, ret;
mutex_lock(&info->lock);
@@ -204,29 +412,53 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
goto out;
- ret = regmap_bulk_read(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
- __func__, __LINE__, ret);
+ dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
goto out;
}
- max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+ max77686_rtc_data_to_tm(data, &alrm->time, info);
alrm->enabled = 0;
- for (i = 0; i < RTC_NR_TIME; i++) {
- if (data[i] & ALARM_ENABLE_MASK) {
+
+ if (info->drv_data->alarm_enable_reg) {
+ if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+ ret = -EINVAL;
+ dev_err(info->dev,
+ "alarm enable register not set(%d)\n", ret);
+ goto out;
+ }
+
+ ret = regmap_read(info->rtc_regmap, map[REG_RTC_AE1], &val);
+ if (ret < 0) {
+ dev_err(info->dev,
+ "fail to read alarm enable(%d)\n", ret);
+ goto out;
+ }
+
+ if (val)
alrm->enabled = 1;
- break;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ if (data[i] & ALARM_ENABLE_MASK) {
+ alrm->enabled = 1;
+ break;
+ }
}
}
alrm->pending = 0;
- ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
+
+ if (info->drv_data->alarm_pending_status_reg == MAX77686_INVALID_REG)
+ goto out;
+
+ ret = regmap_read(info->regmap,
+ info->drv_data->alarm_pending_status_reg, &val);
if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
- __func__, __LINE__, ret);
+ dev_err(info->dev,
+ "Fail to read alarm pending status reg(%d)\n", ret);
goto out;
}
@@ -235,7 +467,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
out:
mutex_unlock(&info->lock);
- return 0;
+ return ret;
}
static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
@@ -243,6 +475,7 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
u8 data[RTC_NR_TIME];
int ret, i;
struct rtc_time tm;
+ const unsigned int *map = info->drv_data->map;
if (!mutex_is_locked(&info->lock))
dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -251,24 +484,34 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
if (ret < 0)
goto out;
- ret = regmap_bulk_read(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
- __func__, ret);
- goto out;
- }
+ if (info->drv_data->alarm_enable_reg) {
+ if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+ ret = -EINVAL;
+ dev_err(info->dev,
+ "alarm enable register not set(%d)\n", ret);
+ goto out;
+ }
+
+ ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1], 0);
+ } else {
+ ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
+ if (ret < 0) {
+ dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+ goto out;
+ }
- max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+ max77686_rtc_data_to_tm(data, &tm, info);
- for (i = 0; i < RTC_NR_TIME; i++)
- data[i] &= ~ALARM_ENABLE_MASK;
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ data[i] &= ~ALARM_ENABLE_MASK;
+
+ ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
+ }
- ret = regmap_bulk_write(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
if (ret < 0) {
- dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
- __func__, ret);
+ dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
goto out;
}
@@ -282,6 +525,7 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
u8 data[RTC_NR_TIME];
int ret;
struct rtc_time tm;
+ const unsigned int *map = info->drv_data->map;
if (!mutex_is_locked(&info->lock))
dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -290,32 +534,36 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
if (ret < 0)
goto out;
- ret = regmap_bulk_read(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
- __func__, ret);
- goto out;
+ if (info->drv_data->alarm_enable_reg) {
+ ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1],
+ MAX77802_ALARM_ENABLE_VALUE);
+ } else {
+ ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
+ if (ret < 0) {
+ dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+ goto out;
+ }
+
+ max77686_rtc_data_to_tm(data, &tm, info);
+
+ data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+ data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+ if (data[RTC_MONTH] & 0xf)
+ data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_YEAR] & info->drv_data->mask)
+ data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+ if (data[RTC_DATE] & 0x1f)
+ data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+ ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
}
- max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
-
- data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
- data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
- data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
- data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
- if (data[RTC_MONTH] & 0xf)
- data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
- if (data[RTC_YEAR] & 0x7f)
- data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
- if (data[RTC_DATE] & 0x1f)
- data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
-
- ret = regmap_bulk_write(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
if (ret < 0) {
- dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
- __func__, ret);
+ dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
goto out;
}
@@ -330,7 +578,7 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u8 data[RTC_NR_TIME];
int ret;
- ret = max77686_rtc_tm_to_data(&alrm->time, data);
+ ret = max77686_rtc_tm_to_data(&alrm->time, data, info);
if (ret < 0)
return ret;
@@ -340,12 +588,12 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
goto out;
- ret = regmap_bulk_write(info->max77686->rtc_regmap,
- MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+ ret = regmap_bulk_write(info->rtc_regmap,
+ info->drv_data->map[REG_ALARM1_SEC],
+ data, ARRAY_SIZE(data));
if (ret < 0) {
- dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
- __func__, ret);
+ dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
goto out;
}
@@ -361,7 +609,7 @@ out:
}
static int max77686_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
+ unsigned int enabled)
{
struct max77686_rtc_info *info = dev_get_drvdata(dev);
int ret;
@@ -380,7 +628,7 @@ static irqreturn_t max77686_rtc_alarm_irq(int irq, void *data)
{
struct max77686_rtc_info *info = data;
- dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+ dev_dbg(info->dev, "RTC alarm IRQ: %d\n", irq);
rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -406,10 +654,11 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
info->rtc_24hr_mode = 1;
- ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+ ret = regmap_bulk_write(info->rtc_regmap,
+ info->drv_data->map[REG_RTC_CONTROLM],
+ data, ARRAY_SIZE(data));
if (ret < 0) {
- dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
- __func__, ret);
+ dev_err(info->dev, "Fail to write controlm reg(%d)\n", ret);
return ret;
}
@@ -417,28 +666,97 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
return ret;
}
+static const struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int max77686_init_rtc_regmap(struct max77686_rtc_info *info)
+{
+ struct device *parent = info->dev->parent;
+ struct i2c_client *parent_i2c = to_i2c_client(parent);
+ int ret;
+
+ if (info->drv_data->rtc_irq_from_platform) {
+ struct platform_device *pdev = to_platform_device(info->dev);
+
+ info->rtc_irq = platform_get_irq(pdev, 0);
+ if (info->rtc_irq < 0) {
+ dev_err(info->dev, "Failed to get rtc interrupts: %d\n",
+ info->rtc_irq);
+ return info->rtc_irq;
+ }
+ } else {
+ info->rtc_irq = parent_i2c->irq;
+ }
+
+ info->regmap = dev_get_regmap(parent, NULL);
+ if (!info->regmap) {
+ dev_err(info->dev, "Failed to get rtc regmap\n");
+ return -ENODEV;
+ }
+
+ if (info->drv_data->rtc_i2c_addr == MAX77686_INVALID_I2C_ADDR) {
+ info->rtc_regmap = info->regmap;
+ goto add_rtc_irq;
+ }
+
+ info->rtc = i2c_new_dummy(parent_i2c->adapter,
+ info->drv_data->rtc_i2c_addr);
+ if (!info->rtc) {
+ dev_err(info->dev, "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
+
+ info->rtc_regmap = devm_regmap_init_i2c(info->rtc,
+ &max77686_rtc_regmap_config);
+ if (IS_ERR(info->rtc_regmap)) {
+ ret = PTR_ERR(info->rtc_regmap);
+ dev_err(info->dev, "Failed to allocate RTC regmap: %d\n", ret);
+ goto err_unregister_i2c;
+ }
+
+add_rtc_irq:
+ ret = regmap_add_irq_chip(info->rtc_regmap, info->rtc_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_SHARED, 0, info->drv_data->rtc_irq_chip,
+ &info->rtc_irq_data);
+ if (ret < 0) {
+ dev_err(info->dev, "Failed to add RTC irq chip: %d\n", ret);
+ goto err_unregister_i2c;
+ }
+
+ return 0;
+
+err_unregister_i2c:
+ if (info->rtc)
+ i2c_unregister_device(info->rtc);
+ return ret;
+}
+
static int max77686_rtc_probe(struct platform_device *pdev)
{
- struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
struct max77686_rtc_info *info;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
int ret;
- dev_info(&pdev->dev, "%s\n", __func__);
-
info = devm_kzalloc(&pdev->dev, sizeof(struct max77686_rtc_info),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
mutex_init(&info->lock);
info->dev = &pdev->dev;
- info->max77686 = max77686;
- info->rtc = max77686->rtc;
+ info->drv_data = (const struct max77686_rtc_driver_data *)
+ id->driver_data;
+
+ ret = max77686_init_rtc_regmap(info);
+ if (ret < 0)
+ return ret;
platform_set_drvdata(pdev, info);
ret = max77686_rtc_init_reg(info);
-
if (ret < 0) {
dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
goto err_rtc;
@@ -446,7 +764,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77686-rtc",
+ info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
&max77686_rtc_ops, THIS_MODULE);
if (IS_ERR(info->rtc_dev)) {
@@ -457,29 +775,43 @@ static int max77686_rtc_probe(struct platform_device *pdev)
goto err_rtc;
}
- if (!max77686->rtc_irq_data) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "%s: no RTC regmap IRQ chip\n", __func__);
- goto err_rtc;
- }
-
- info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ info->virq = regmap_irq_get_virq(info->rtc_irq_data,
MAX77686_RTCIRQ_RTCA1);
- if (!info->virq) {
+ if (info->virq <= 0) {
ret = -ENXIO;
goto err_rtc;
}
- ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
- max77686_rtc_alarm_irq, 0, "rtc-alarm1", info);
- if (ret < 0)
+ ret = request_threaded_irq(info->virq, NULL, max77686_rtc_alarm_irq, 0,
+ "rtc-alarm1", info);
+ if (ret < 0) {
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->virq, ret);
+ goto err_rtc;
+ }
+
+ return 0;
err_rtc:
+ regmap_del_irq_chip(info->rtc_irq, info->rtc_irq_data);
+ if (info->rtc)
+ i2c_unregister_device(info->rtc);
+
return ret;
}
+static int max77686_rtc_remove(struct platform_device *pdev)
+{
+ struct max77686_rtc_info *info = platform_get_drvdata(pdev);
+
+ free_irq(info->virq, info);
+ regmap_del_irq_chip(info->rtc_irq, info->rtc_irq_data);
+ if (info->rtc)
+ i2c_unregister_device(info->rtc);
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int max77686_rtc_suspend(struct device *dev)
{
@@ -508,7 +840,9 @@ static SIMPLE_DEV_PM_OPS(max77686_rtc_pm_ops,
max77686_rtc_suspend, max77686_rtc_resume);
static const struct platform_device_id rtc_id[] = {
- { "max77686-rtc", 0 },
+ { "max77686-rtc", .driver_data = (kernel_ulong_t)&max77686_drv_data, },
+ { "max77802-rtc", .driver_data = (kernel_ulong_t)&max77802_drv_data, },
+ { "max77620-rtc", .driver_data = (kernel_ulong_t)&max77620_drv_data, },
{},
};
MODULE_DEVICE_TABLE(platform, rtc_id);
@@ -519,6 +853,7 @@ static struct platform_driver max77686_rtc_driver = {
.pm = &max77686_rtc_pm_ops,
},
.probe = max77686_rtc_probe,
+ .remove = max77686_rtc_remove,
.id_table = rtc_id,
};
diff --git a/drivers/rtc/rtc-max77802.c b/drivers/rtc/rtc-max77802.c
deleted file mode 100644
index 82ffcc5a5345..000000000000
--- a/drivers/rtc/rtc-max77802.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * RTC driver for Maxim MAX77802
- *
- * Copyright (C) 2013 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- *
- * based on rtc-max8997.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-/* RTC Control Register */
-#define BCD_EN_SHIFT 0
-#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
-#define MODEL24_SHIFT 1
-#define MODEL24_MASK (1 << MODEL24_SHIFT)
-/* RTC Update Register1 */
-#define RTC_UDR_SHIFT 0
-#define RTC_UDR_MASK (1 << RTC_UDR_SHIFT)
-#define RTC_RBUDR_SHIFT 4
-#define RTC_RBUDR_MASK (1 << RTC_RBUDR_SHIFT)
-/* RTC Hour register */
-#define HOUR_PM_SHIFT 6
-#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
-/* RTC Alarm Enable */
-#define ALARM_ENABLE_SHIFT 7
-#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
-
-/* For the RTCAE1 register, we write this value to enable the alarm */
-#define ALARM_ENABLE_VALUE 0x77
-
-#define MAX77802_RTC_UPDATE_DELAY_US 200
-
-enum {
- RTC_SEC = 0,
- RTC_MIN,
- RTC_HOUR,
- RTC_WEEKDAY,
- RTC_MONTH,
- RTC_YEAR,
- RTC_DATE,
- RTC_NR_TIME
-};
-
-struct max77802_rtc_info {
- struct device *dev;
- struct max77686_dev *max77802;
- struct i2c_client *rtc;
- struct rtc_device *rtc_dev;
- struct mutex lock;
-
- struct regmap *regmap;
-
- int virq;
- int rtc_24hr_mode;
-};
-
-enum MAX77802_RTC_OP {
- MAX77802_RTC_WRITE,
- MAX77802_RTC_READ,
-};
-
-static void max77802_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
- int rtc_24hr_mode)
-{
- tm->tm_sec = data[RTC_SEC] & 0xff;
- tm->tm_min = data[RTC_MIN] & 0xff;
- if (rtc_24hr_mode)
- tm->tm_hour = data[RTC_HOUR] & 0x1f;
- else {
- tm->tm_hour = data[RTC_HOUR] & 0x0f;
- if (data[RTC_HOUR] & HOUR_PM_MASK)
- tm->tm_hour += 12;
- }
-
- /* Only a single bit is set in data[], so fls() would be equivalent */
- tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0xff) - 1;
- tm->tm_mday = data[RTC_DATE] & 0x1f;
- tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-
- tm->tm_year = data[RTC_YEAR] & 0xff;
- tm->tm_yday = 0;
- tm->tm_isdst = 0;
-}
-
-static int max77802_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
-{
- data[RTC_SEC] = tm->tm_sec;
- data[RTC_MIN] = tm->tm_min;
- data[RTC_HOUR] = tm->tm_hour;
- data[RTC_WEEKDAY] = 1 << tm->tm_wday;
- data[RTC_DATE] = tm->tm_mday;
- data[RTC_MONTH] = tm->tm_mon + 1;
- data[RTC_YEAR] = tm->tm_year;
-
- return 0;
-}
-
-static int max77802_rtc_update(struct max77802_rtc_info *info,
- enum MAX77802_RTC_OP op)
-{
- int ret;
- unsigned int data;
-
- if (op == MAX77802_RTC_WRITE)
- data = 1 << RTC_UDR_SHIFT;
- else
- data = 1 << RTC_RBUDR_SHIFT;
-
- ret = regmap_update_bits(info->max77802->regmap,
- MAX77802_RTC_UPDATE0, data, data);
- if (ret < 0)
- dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
- __func__, ret, data);
- else {
- /* Minimum delay required before RTC update. */
- usleep_range(MAX77802_RTC_UPDATE_DELAY_US,
- MAX77802_RTC_UPDATE_DELAY_US * 2);
- }
-
- return ret;
-}
-
-static int max77802_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
- u8 data[RTC_NR_TIME];
- int ret;
-
- mutex_lock(&info->lock);
-
- ret = max77802_rtc_update(info, MAX77802_RTC_READ);
- if (ret < 0)
- goto out;
-
- ret = regmap_bulk_read(info->max77802->regmap,
- MAX77802_RTC_SEC, data, RTC_NR_TIME);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
- ret);
- goto out;
- }
-
- max77802_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
-
- ret = rtc_valid_tm(tm);
-
-out:
- mutex_unlock(&info->lock);
- return ret;
-}
-
-static int max77802_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
- u8 data[RTC_NR_TIME];
- int ret;
-
- ret = max77802_rtc_tm_to_data(tm, data);
- if (ret < 0)
- return ret;
-
- mutex_lock(&info->lock);
-
- ret = regmap_bulk_write(info->max77802->regmap,
- MAX77802_RTC_SEC, data, RTC_NR_TIME);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
- ret);
- goto out;
- }
-
- ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-
-out:
- mutex_unlock(&info->lock);
- return ret;
-}
-
-static int max77802_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
- u8 data[RTC_NR_TIME];
- unsigned int val;
- int ret;
-
- mutex_lock(&info->lock);
-
- ret = max77802_rtc_update(info, MAX77802_RTC_READ);
- if (ret < 0)
- goto out;
-
- ret = regmap_bulk_read(info->max77802->regmap,
- MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
- if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
- __func__, __LINE__, ret);
- goto out;
- }
-
- max77802_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
-
- alrm->enabled = 0;
- ret = regmap_read(info->max77802->regmap,
- MAX77802_RTC_AE1, &val);
- if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read alarm enable(%d)\n",
- __func__, __LINE__, ret);
- goto out;
- }
- if (val)
- alrm->enabled = 1;
-
- alrm->pending = 0;
- ret = regmap_read(info->max77802->regmap, MAX77802_REG_STATUS2, &val);
- if (ret < 0) {
- dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
- __func__, __LINE__, ret);
- goto out;
- }
-
- if (val & (1 << 2)) /* RTCA1 */
- alrm->pending = 1;
-
-out:
- mutex_unlock(&info->lock);
- return 0;
-}
-
-static int max77802_rtc_stop_alarm(struct max77802_rtc_info *info)
-{
- int ret;
-
- if (!mutex_is_locked(&info->lock))
- dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
-
- ret = max77802_rtc_update(info, MAX77802_RTC_READ);
- if (ret < 0)
- goto out;
-
- ret = regmap_write(info->max77802->regmap,
- MAX77802_RTC_AE1, 0);
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
- __func__, ret);
- goto out;
- }
-
- ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
- return ret;
-}
-
-static int max77802_rtc_start_alarm(struct max77802_rtc_info *info)
-{
- int ret;
-
- if (!mutex_is_locked(&info->lock))
- dev_warn(info->dev, "%s: should have mutex locked\n",
- __func__);
-
- ret = max77802_rtc_update(info, MAX77802_RTC_READ);
- if (ret < 0)
- goto out;
-
- ret = regmap_write(info->max77802->regmap,
- MAX77802_RTC_AE1,
- ALARM_ENABLE_VALUE);
-
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
- __func__, ret);
- goto out;
- }
-
- ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
- return ret;
-}
-
-static int max77802_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
- u8 data[RTC_NR_TIME];
- int ret;
-
- ret = max77802_rtc_tm_to_data(&alrm->time, data);
- if (ret < 0)
- return ret;
-
- mutex_lock(&info->lock);
-
- ret = max77802_rtc_stop_alarm(info);
- if (ret < 0)
- goto out;
-
- ret = regmap_bulk_write(info->max77802->regmap,
- MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
- __func__, ret);
- goto out;
- }
-
- ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
- if (ret < 0)
- goto out;
-
- if (alrm->enabled)
- ret = max77802_rtc_start_alarm(info);
-out:
- mutex_unlock(&info->lock);
- return ret;
-}
-
-static int max77802_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&info->lock);
- if (enabled)
- ret = max77802_rtc_start_alarm(info);
- else
- ret = max77802_rtc_stop_alarm(info);
- mutex_unlock(&info->lock);
-
- return ret;
-}
-
-static irqreturn_t max77802_rtc_alarm_irq(int irq, void *data)
-{
- struct max77802_rtc_info *info = data;
-
- dev_dbg(info->dev, "%s:irq(%d)\n", __func__, irq);
-
- rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
-
- return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops max77802_rtc_ops = {
- .read_time = max77802_rtc_read_time,
- .set_time = max77802_rtc_set_time,
- .read_alarm = max77802_rtc_read_alarm,
- .set_alarm = max77802_rtc_set_alarm,
- .alarm_irq_enable = max77802_rtc_alarm_irq_enable,
-};
-
-static int max77802_rtc_init_reg(struct max77802_rtc_info *info)
-{
- u8 data[2];
- int ret;
-
- max77802_rtc_update(info, MAX77802_RTC_READ);
-
- /* Set RTC control register : Binary mode, 24hour mdoe */
- data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
- data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-
- info->rtc_24hr_mode = 1;
-
- ret = regmap_bulk_write(info->max77802->regmap,
- MAX77802_RTC_CONTROLM, data, ARRAY_SIZE(data));
- if (ret < 0) {
- dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
- __func__, ret);
- return ret;
- }
-
- ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
- return ret;
-}
-
-static int max77802_rtc_probe(struct platform_device *pdev)
-{
- struct max77686_dev *max77802 = dev_get_drvdata(pdev->dev.parent);
- struct max77802_rtc_info *info;
- int ret;
-
- dev_dbg(&pdev->dev, "%s\n", __func__);
-
- info = devm_kzalloc(&pdev->dev, sizeof(struct max77802_rtc_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- mutex_init(&info->lock);
- info->dev = &pdev->dev;
- info->max77802 = max77802;
- info->rtc = max77802->i2c;
-
- platform_set_drvdata(pdev, info);
-
- ret = max77802_rtc_init_reg(info);
-
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
- return ret;
- }
-
- device_init_wakeup(&pdev->dev, 1);
-
- info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77802-rtc",
- &max77802_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(info->rtc_dev)) {
- ret = PTR_ERR(info->rtc_dev);
- dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
- if (ret == 0)
- ret = -EINVAL;
- return ret;
- }
-
- if (!max77802->rtc_irq_data) {
- dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
- return -EINVAL;
- }
-
- info->virq = regmap_irq_get_virq(max77802->rtc_irq_data,
- MAX77686_RTCIRQ_RTCA1);
-
- if (info->virq <= 0) {
- dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
- MAX77686_RTCIRQ_RTCA1);
- return -EINVAL;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
- max77802_rtc_alarm_irq, 0, "rtc-alarm1",
- info);
- if (ret < 0)
- dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
- info->virq, ret);
-
- return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int max77802_rtc_suspend(struct device *dev)
-{
- if (device_may_wakeup(dev)) {
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
- return enable_irq_wake(info->virq);
- }
-
- return 0;
-}
-
-static int max77802_rtc_resume(struct device *dev)
-{
- if (device_may_wakeup(dev)) {
- struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
- return disable_irq_wake(info->virq);
- }
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(max77802_rtc_pm_ops,
- max77802_rtc_suspend, max77802_rtc_resume);
-
-static const struct platform_device_id rtc_id[] = {
- { "max77802-rtc", 0 },
- {},
-};
-MODULE_DEVICE_TABLE(platform, rtc_id);
-
-static struct platform_driver max77802_rtc_driver = {
- .driver = {
- .name = "max77802-rtc",
- .pm = &max77802_rtc_pm_ops,
- },
- .probe = max77802_rtc_probe,
- .id_table = rtc_id,
-};
-
-module_platform_driver(max77802_rtc_driver);
-
-MODULE_DESCRIPTION("Maxim MAX77802 RTC driver");
-MODULE_AUTHOR("Simon Glass <sjg@chromium.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 1c91ce8a6d75..025bb33b9cd2 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -20,6 +20,7 @@
#include <linux/printk.h>
#include <linux/spi/spi.h>
#include <linux/rtc.h>
+#include <linux/of.h>
/* MCP795 Instructions, see datasheet table 3-1 */
#define MCP795_EEREAD 0x03
@@ -183,9 +184,18 @@ static int mcp795_probe(struct spi_device *spi)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id mcp795_of_match[] = {
+ { .compatible = "maxim,mcp795" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mcp795_of_match);
+#endif
+
static struct spi_driver mcp795_driver = {
.driver = {
.name = "rtc-mcp795",
+ .of_match_table = of_match_ptr(mcp795_of_match),
},
.probe = mcp795_probe,
};
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 06a5c52b292f..44f622c3e048 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -419,4 +419,3 @@ module_platform_driver(mtk_rtc_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Tianping Fang <tianping.fang@mediatek.com>");
MODULE_DESCRIPTION("RTC Driver for MediaTek MT6397 PMIC");
-MODULE_ALIAS("platform:mt6397-rtc");
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 7ea2c471feca..6080e0edef63 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -311,8 +311,7 @@ static int palmas_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, palmas_rtc->irq, NULL,
palmas_rtc_interrupt,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT |
- IRQF_EARLY_RESUME,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev_name(&pdev->dev), palmas_rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ request failed, err = %d\n", ret);
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index ea8a31c91641..da27738b1242 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -48,6 +48,7 @@
#define DRV_VERSION "0.6"
+/* REGISTERS */
#define PCF2123_REG_CTRL1 (0x00) /* Control Register 1 */
#define PCF2123_REG_CTRL2 (0x01) /* Control Register 2 */
#define PCF2123_REG_SC (0x02) /* datetime */
@@ -57,10 +58,54 @@
#define PCF2123_REG_DW (0x06)
#define PCF2123_REG_MO (0x07)
#define PCF2123_REG_YR (0x08)
+#define PCF2123_REG_ALRM_MN (0x09) /* Alarm Registers */
+#define PCF2123_REG_ALRM_HR (0x0a)
+#define PCF2123_REG_ALRM_DM (0x0b)
+#define PCF2123_REG_ALRM_DW (0x0c)
+#define PCF2123_REG_OFFSET (0x0d) /* Clock Rate Offset Register */
+#define PCF2123_REG_TMR_CLKOUT (0x0e) /* Timer Registers */
+#define PCF2123_REG_CTDWN_TMR (0x0f)
+
+/* PCF2123_REG_CTRL1 BITS */
+#define CTRL1_CLEAR (0) /* Clear */
+#define CTRL1_CORR_INT BIT(1) /* Correction irq enable */
+#define CTRL1_12_HOUR BIT(2) /* 12 hour time */
+#define CTRL1_SW_RESET (BIT(3) | BIT(4) | BIT(6)) /* Software reset */
+#define CTRL1_STOP BIT(5) /* Stop the clock */
+#define CTRL1_EXT_TEST BIT(7) /* External clock test mode */
+
+/* PCF2123_REG_CTRL2 BITS */
+#define CTRL2_TIE BIT(0) /* Countdown timer irq enable */
+#define CTRL2_AIE BIT(1) /* Alarm irq enable */
+#define CTRL2_TF BIT(2) /* Countdown timer flag */
+#define CTRL2_AF BIT(3) /* Alarm flag */
+#define CTRL2_TI_TP BIT(4) /* Irq pin generates pulse */
+#define CTRL2_MSF BIT(5) /* Minute or second irq flag */
+#define CTRL2_SI BIT(6) /* Second irq enable */
+#define CTRL2_MI BIT(7) /* Minute irq enable */
+
+/* PCF2123_REG_SC BITS */
+#define OSC_HAS_STOPPED BIT(7) /* Clock has been stopped */
+
+/* PCF2123_REG_ALRM_XX BITS */
+#define ALRM_ENABLE BIT(7) /* MN, HR, DM, or DW alarm enable */
+
+/* PCF2123_REG_TMR_CLKOUT BITS */
+#define CD_TMR_4096KHZ (0) /* 4096 KHz countdown timer */
+#define CD_TMR_64HZ (1) /* 64 Hz countdown timer */
+#define CD_TMR_1HZ (2) /* 1 Hz countdown timer */
+#define CD_TMR_60th_HZ (3) /* 60th Hz countdown timer */
+#define CD_TMR_TE BIT(3) /* Countdown timer enable */
+
+/* PCF2123_REG_OFFSET BITS */
+#define OFFSET_SIGN_BIT BIT(6) /* 2's complement sign bit */
+#define OFFSET_COARSE BIT(7) /* Coarse mode offset */
+#define OFFSET_STEP (2170) /* Offset step in parts per billion */
+
+/* READ/WRITE ADDRESS BITS */
+#define PCF2123_WRITE BIT(4)
+#define PCF2123_READ (BIT(4) | BIT(7))
-#define PCF2123_SUBADDR (1 << 4)
-#define PCF2123_WRITE ((0 << 7) | PCF2123_SUBADDR)
-#define PCF2123_READ ((1 << 7) | PCF2123_SUBADDR)
static struct spi_driver pcf2123_driver;
@@ -84,12 +129,44 @@ static inline void pcf2123_delay_trec(void)
ndelay(30);
}
+static int pcf2123_read(struct device *dev, u8 reg, u8 *rxbuf, size_t size)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ reg |= PCF2123_READ;
+ ret = spi_write_then_read(spi, &reg, 1, rxbuf, size);
+ pcf2123_delay_trec();
+
+ return ret;
+}
+
+static int pcf2123_write(struct device *dev, u8 *txbuf, size_t size)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ int ret;
+
+ txbuf[0] |= PCF2123_WRITE;
+ ret = spi_write(spi, txbuf, size);
+ pcf2123_delay_trec();
+
+ return ret;
+}
+
+static int pcf2123_write_reg(struct device *dev, u8 reg, u8 val)
+{
+ u8 txbuf[2];
+
+ txbuf[0] = reg;
+ txbuf[1] = val;
+ return pcf2123_write(dev, txbuf, sizeof(txbuf));
+}
+
static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
char *buffer)
{
- struct spi_device *spi = to_spi_device(dev);
struct pcf2123_sysfs_reg *r;
- u8 txbuf[1], rxbuf[1];
+ u8 rxbuf[1];
unsigned long reg;
int ret;
@@ -99,19 +176,16 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- txbuf[0] = PCF2123_READ | reg;
- ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
+ ret = pcf2123_read(dev, reg, rxbuf, 1);
if (ret < 0)
return -EIO;
- pcf2123_delay_trec();
+
return sprintf(buffer, "0x%x\n", rxbuf[0]);
}
static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
const char *buffer, size_t count) {
- struct spi_device *spi = to_spi_device(dev);
struct pcf2123_sysfs_reg *r;
- u8 txbuf[2];
unsigned long reg;
unsigned long val;
@@ -127,27 +201,78 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- txbuf[0] = PCF2123_WRITE | reg;
- txbuf[1] = val;
- ret = spi_write(spi, txbuf, sizeof(txbuf));
+ pcf2123_write_reg(dev, reg, val);
if (ret < 0)
return -EIO;
- pcf2123_delay_trec();
return count;
}
+static int pcf2123_read_offset(struct device *dev, long *offset)
+{
+ int ret;
+ s8 reg;
+
+ ret = pcf2123_read(dev, PCF2123_REG_OFFSET, &reg, 1);
+ if (ret < 0)
+ return ret;
+
+ if (reg & OFFSET_COARSE)
+ reg <<= 1; /* multiply by 2 and sign extend */
+ else
+ reg |= (reg & OFFSET_SIGN_BIT) << 1; /* sign extend only */
+
+ *offset = ((long)reg) * OFFSET_STEP;
+
+ return 0;
+}
+
+/*
+ * The offset register is a 7 bit signed value with a coarse bit in bit 7.
+ * The main difference between the two is normal offset adjusts the first
+ * second of n minutes every other hour, with 61, 62 and 63 being shoved
+ * into the 60th minute.
+ * The coarse adjustment does the same, but every hour.
+ * the two overlap, with every even normal offset value corresponding
+ * to a coarse offset. Based on this algorithm, it seems that despite the
+ * name, coarse offset is a better fit for overlapping values.
+ */
+static int pcf2123_set_offset(struct device *dev, long offset)
+{
+ s8 reg;
+
+ if (offset > OFFSET_STEP * 127)
+ reg = 127;
+ else if (offset < OFFSET_STEP * -128)
+ reg = -128;
+ else
+ reg = (s8)((offset + (OFFSET_STEP >> 1)) / OFFSET_STEP);
+
+ /* choose fine offset only for odd values in the normal range */
+ if (reg & 1 && reg <= 63 && reg >= -64) {
+ /* Normal offset. Clear the coarse bit */
+ reg &= ~OFFSET_COARSE;
+ } else {
+ /* Coarse offset. Divide by 2 and set the coarse bit */
+ reg >>= 1;
+ reg |= OFFSET_COARSE;
+ }
+
+ return pcf2123_write_reg(dev, PCF2123_REG_OFFSET, reg);
+}
+
static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct spi_device *spi = to_spi_device(dev);
- u8 txbuf[1], rxbuf[7];
+ u8 rxbuf[7];
int ret;
- txbuf[0] = PCF2123_READ | PCF2123_REG_SC;
- ret = spi_write_then_read(spi, txbuf, sizeof(txbuf),
- rxbuf, sizeof(rxbuf));
+ ret = pcf2123_read(dev, PCF2123_REG_SC, rxbuf, sizeof(rxbuf));
if (ret < 0)
return ret;
- pcf2123_delay_trec();
+
+ if (rxbuf[0] & OSC_HAS_STOPPED) {
+ dev_info(dev, "clock was stopped. Time is not valid\n");
+ return -EINVAL;
+ }
tm->tm_sec = bcd2bin(rxbuf[0] & 0x7F);
tm->tm_min = bcd2bin(rxbuf[1] & 0x7F);
@@ -170,7 +295,6 @@ static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct spi_device *spi = to_spi_device(dev);
u8 txbuf[8];
int ret;
@@ -181,15 +305,12 @@ static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
/* Stop the counter first */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
- txbuf[1] = 0x20;
- ret = spi_write(spi, txbuf, 2);
+ ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP);
if (ret < 0)
return ret;
- pcf2123_delay_trec();
/* Set the new time */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_SC;
+ txbuf[0] = PCF2123_REG_SC;
txbuf[1] = bin2bcd(tm->tm_sec & 0x7F);
txbuf[2] = bin2bcd(tm->tm_min & 0x7F);
txbuf[3] = bin2bcd(tm->tm_hour & 0x3F);
@@ -198,18 +319,48 @@ static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
txbuf[6] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */
txbuf[7] = bin2bcd(tm->tm_year < 100 ? tm->tm_year : tm->tm_year - 100);
- ret = spi_write(spi, txbuf, sizeof(txbuf));
+ ret = pcf2123_write(dev, txbuf, sizeof(txbuf));
if (ret < 0)
return ret;
- pcf2123_delay_trec();
/* Start the counter */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
- txbuf[1] = 0x00;
- ret = spi_write(spi, txbuf, 2);
+ ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pcf2123_reset(struct device *dev)
+{
+ int ret;
+ u8 rxbuf[2];
+
+ ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_SW_RESET);
+ if (ret < 0)
+ return ret;
+
+ /* Stop the counter */
+ dev_dbg(dev, "stopping RTC\n");
+ ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP);
+ if (ret < 0)
+ return ret;
+
+ /* See if the counter was actually stopped */
+ dev_dbg(dev, "checking for presence of RTC\n");
+ ret = pcf2123_read(dev, PCF2123_REG_CTRL1, rxbuf, sizeof(rxbuf));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "received data from RTC (0x%02X 0x%02X)\n",
+ rxbuf[0], rxbuf[1]);
+ if (!(rxbuf[0] & CTRL1_STOP))
+ return -ENODEV;
+
+ /* Start the counter */
+ ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR);
if (ret < 0)
return ret;
- pcf2123_delay_trec();
return 0;
}
@@ -217,13 +368,16 @@ static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm)
static const struct rtc_class_ops pcf2123_rtc_ops = {
.read_time = pcf2123_rtc_read_time,
.set_time = pcf2123_rtc_set_time,
+ .read_offset = pcf2123_read_offset,
+ .set_offset = pcf2123_set_offset,
+
};
static int pcf2123_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
+ struct rtc_time tm;
struct pcf2123_plat_data *pdata;
- u8 txbuf[2], rxbuf[2];
int ret, i;
pdata = devm_kzalloc(&spi->dev, sizeof(struct pcf2123_plat_data),
@@ -232,56 +386,19 @@ static int pcf2123_probe(struct spi_device *spi)
return -ENOMEM;
spi->dev.platform_data = pdata;
- /* Send a software reset command */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
- txbuf[1] = 0x58;
- dev_dbg(&spi->dev, "resetting RTC (0x%02X 0x%02X)\n",
- txbuf[0], txbuf[1]);
- ret = spi_write(spi, txbuf, 2 * sizeof(u8));
- if (ret < 0)
- goto kfree_exit;
- pcf2123_delay_trec();
-
- /* Stop the counter */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
- txbuf[1] = 0x20;
- dev_dbg(&spi->dev, "stopping RTC (0x%02X 0x%02X)\n",
- txbuf[0], txbuf[1]);
- ret = spi_write(spi, txbuf, 2 * sizeof(u8));
- if (ret < 0)
- goto kfree_exit;
- pcf2123_delay_trec();
-
- /* See if the counter was actually stopped */
- txbuf[0] = PCF2123_READ | PCF2123_REG_CTRL1;
- dev_dbg(&spi->dev, "checking for presence of RTC (0x%02X)\n",
- txbuf[0]);
- ret = spi_write_then_read(spi, txbuf, 1 * sizeof(u8),
- rxbuf, 2 * sizeof(u8));
- dev_dbg(&spi->dev, "received data from RTC (0x%02X 0x%02X)\n",
- rxbuf[0], rxbuf[1]);
- if (ret < 0)
- goto kfree_exit;
- pcf2123_delay_trec();
-
- if (!(rxbuf[0] & 0x20)) {
- dev_err(&spi->dev, "chip not found\n");
- ret = -ENODEV;
- goto kfree_exit;
+ ret = pcf2123_rtc_read_time(&spi->dev, &tm);
+ if (ret < 0) {
+ ret = pcf2123_reset(&spi->dev);
+ if (ret < 0) {
+ dev_err(&spi->dev, "chip not found\n");
+ goto kfree_exit;
+ }
}
dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
dev_info(&spi->dev, "spiclk %u KHz.\n",
(spi->max_speed_hz + 500) / 1000);
- /* Start the counter */
- txbuf[0] = PCF2123_WRITE | PCF2123_REG_CTRL1;
- txbuf[1] = 0x00;
- ret = spi_write(spi, txbuf, sizeof(txbuf));
- if (ret < 0)
- goto kfree_exit;
- pcf2123_delay_trec();
-
/* Finalize the initialization */
rtc = devm_rtc_device_register(&spi->dev, pcf2123_driver.driver.name,
&pcf2123_rtc_ops, THIS_MODULE);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 629bfdf8c745..2bfdf638b673 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1,12 +1,12 @@
/*
- * An I2C driver for the NXP PCF2127 RTC
+ * An I2C and SPI driver for the NXP PCF2127/29 RTC
* Copyright 2013 Til-Technologies
*
* Author: Renaud Cerrato <r.cerrato@til-technologies.fr>
*
* based on the other drivers in this same directory.
*
- * http://www.nxp.com/documents/data_sheet/PCF2127AT.pdf
+ * Datasheet: http://cache.nxp.com/documents/data_sheet/PCF2127.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -14,11 +14,13 @@
*/
#include <linux/i2c.h>
+#include <linux/spi/spi.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/regmap.h>
#define PCF2127_REG_CTRL1 (0x00) /* Control Register 1 */
#define PCF2127_REG_CTRL2 (0x01) /* Control Register 2 */
@@ -36,29 +38,30 @@
#define PCF2127_OSF BIT(7) /* Oscillator Fail flag */
-static struct i2c_driver pcf2127_driver;
-
struct pcf2127 {
struct rtc_device *rtc;
+ struct regmap *regmap;
};
/*
* In the routines that deal directly with the pcf2127 hardware, we use
* rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
*/
-static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- unsigned char buf[10] = { PCF2127_REG_CTRL1 };
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ unsigned char buf[10];
+ int ret;
- /* read registers */
- if (i2c_master_send(client, buf, 1) != 1 ||
- i2c_master_recv(client, buf, sizeof(buf)) != sizeof(buf)) {
- dev_err(&client->dev, "%s: read error\n", __func__);
- return -EIO;
+ ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, buf,
+ sizeof(buf));
+ if (ret) {
+ dev_err(dev, "%s: read error\n", __func__);
+ return ret;
}
if (buf[PCF2127_REG_CTRL3] & PCF2127_REG_CTRL3_BLF)
- dev_info(&client->dev,
+ dev_info(dev,
"low voltage detected, check/replace RTC battery.\n");
if (buf[PCF2127_REG_SC] & PCF2127_OSF) {
@@ -66,12 +69,12 @@ static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
* no need clear the flag here,
* it will be cleared once the new date is saved
*/
- dev_warn(&client->dev,
+ dev_warn(dev,
"oscillator stop detected, date/time is not reliable\n");
return -EINVAL;
}
- dev_dbg(&client->dev,
+ dev_dbg(dev,
"%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, "
"sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
@@ -91,7 +94,7 @@ static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
if (tm->tm_year < 70)
tm->tm_year += 100; /* assume we are in 1970...2069 */
- dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
@@ -100,20 +103,18 @@ static int pcf2127_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return rtc_valid_tm(tm);
}
-static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- unsigned char buf[8];
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ unsigned char buf[7];
int i = 0, err;
- dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
+ dev_dbg(dev, "%s: secs=%d, mins=%d, hours=%d, "
"mday=%d, mon=%d, year=%d, wday=%d\n",
__func__,
tm->tm_sec, tm->tm_min, tm->tm_hour,
tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
- /* start register address */
- buf[i++] = PCF2127_REG_SC;
-
/* hours, minutes and seconds */
buf[i++] = bin2bcd(tm->tm_sec); /* this will also clear OSF flag */
buf[i++] = bin2bcd(tm->tm_min);
@@ -128,11 +129,11 @@ static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[i++] = bin2bcd(tm->tm_year % 100);
/* write register's data */
- err = i2c_master_send(client, buf, i);
- if (err != i) {
- dev_err(&client->dev,
+ err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
+ if (err) {
+ dev_err(dev,
"%s: err=%d", __func__, err);
- return -EIO;
+ return err;
}
return 0;
@@ -142,26 +143,17 @@ static int pcf2127_set_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf2127_rtc_ioctl(struct device *dev,
unsigned int cmd, unsigned long arg)
{
- struct i2c_client *client = to_i2c_client(dev);
- unsigned char buf = PCF2127_REG_CTRL3;
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
int touser;
int ret;
switch (cmd) {
case RTC_VL_READ:
- ret = i2c_master_send(client, &buf, 1);
- if (!ret)
- ret = -EIO;
- if (ret < 0)
- return ret;
-
- ret = i2c_master_recv(client, &buf, 1);
- if (!ret)
- ret = -EIO;
- if (ret < 0)
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL3, &touser);
+ if (ret)
return ret;
- touser = buf & PCF2127_REG_CTRL3_BLF ? 1 : 0;
+ touser = touser & PCF2127_REG_CTRL3_BLF ? 1 : 0;
if (copy_to_user((void __user *)arg, &touser, sizeof(int)))
return -EFAULT;
@@ -174,71 +166,270 @@ static int pcf2127_rtc_ioctl(struct device *dev,
#define pcf2127_rtc_ioctl NULL
#endif
-static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- return pcf2127_get_datetime(to_i2c_client(dev), tm);
-}
-
-static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- return pcf2127_set_datetime(to_i2c_client(dev), tm);
-}
-
static const struct rtc_class_ops pcf2127_rtc_ops = {
.ioctl = pcf2127_rtc_ioctl,
.read_time = pcf2127_rtc_read_time,
.set_time = pcf2127_rtc_set_time,
};
-static int pcf2127_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pcf2127_probe(struct device *dev, struct regmap *regmap,
+ const char *name)
{
struct pcf2127 *pcf2127;
- dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(dev, "%s\n", __func__);
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- pcf2127 = devm_kzalloc(&client->dev, sizeof(struct pcf2127),
- GFP_KERNEL);
+ pcf2127 = devm_kzalloc(dev, sizeof(*pcf2127), GFP_KERNEL);
if (!pcf2127)
return -ENOMEM;
- i2c_set_clientdata(client, pcf2127);
+ pcf2127->regmap = regmap;
+
+ dev_set_drvdata(dev, pcf2127);
- pcf2127->rtc = devm_rtc_device_register(&client->dev,
- pcf2127_driver.driver.name,
- &pcf2127_rtc_ops, THIS_MODULE);
+ pcf2127->rtc = devm_rtc_device_register(dev, name, &pcf2127_rtc_ops,
+ THIS_MODULE);
return PTR_ERR_OR_ZERO(pcf2127->rtc);
}
-static const struct i2c_device_id pcf2127_id[] = {
- { "pcf2127", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, pcf2127_id);
-
#ifdef CONFIG_OF
static const struct of_device_id pcf2127_of_match[] = {
{ .compatible = "nxp,pcf2127" },
+ { .compatible = "nxp,pcf2129" },
{}
};
MODULE_DEVICE_TABLE(of, pcf2127_of_match);
#endif
-static struct i2c_driver pcf2127_driver = {
+#if IS_ENABLED(CONFIG_I2C)
+
+static int pcf2127_i2c_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(client, data, count);
+ if (ret != count)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int pcf2127_i2c_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ void *buf;
+
+ if (WARN_ON(reg_size != 1))
+ return -EINVAL;
+
+ buf = kmalloc(val_size + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, reg, 1);
+ memcpy(buf + 1, val, val_size);
+
+ ret = i2c_master_send(client, buf, val_size + 1);
+ if (ret != val_size + 1)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int pcf2127_i2c_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ if (WARN_ON(reg_size != 1))
+ return -EINVAL;
+
+ ret = i2c_master_send(client, reg, 1);
+ if (ret != 1)
+ return ret < 0 ? ret : -EIO;
+
+ ret = i2c_master_recv(client, val, val_size);
+ if (ret != val_size)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+/*
+ * The reason we need this custom regmap_bus instead of using regmap_init_i2c()
+ * is that the STOP condition is required between set register address and
+ * read register data when reading from registers.
+ */
+static const struct regmap_bus pcf2127_i2c_regmap = {
+ .write = pcf2127_i2c_write,
+ .gather_write = pcf2127_i2c_gather_write,
+ .read = pcf2127_i2c_read,
+};
+
+static struct i2c_driver pcf2127_i2c_driver;
+
+static int pcf2127_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
+ &client->dev, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return pcf2127_probe(&client->dev, regmap,
+ pcf2127_i2c_driver.driver.name);
+}
+
+static const struct i2c_device_id pcf2127_i2c_id[] = {
+ { "pcf2127", 0 },
+ { "pcf2129", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
+
+static struct i2c_driver pcf2127_i2c_driver = {
+ .driver = {
+ .name = "rtc-pcf2127-i2c",
+ .of_match_table = of_match_ptr(pcf2127_of_match),
+ },
+ .probe = pcf2127_i2c_probe,
+ .id_table = pcf2127_i2c_id,
+};
+
+static int pcf2127_i2c_register_driver(void)
+{
+ return i2c_add_driver(&pcf2127_i2c_driver);
+}
+
+static void pcf2127_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&pcf2127_i2c_driver);
+}
+
+#else
+
+static int pcf2127_i2c_register_driver(void)
+{
+ return 0;
+}
+
+static void pcf2127_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static struct spi_driver pcf2127_spi_driver;
+
+static int pcf2127_spi_probe(struct spi_device *spi)
+{
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = 0xa0,
+ .write_flag_mask = 0x20,
+ };
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name);
+}
+
+static const struct spi_device_id pcf2127_spi_id[] = {
+ { "pcf2127", 0 },
+ { "pcf2129", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
+
+static struct spi_driver pcf2127_spi_driver = {
.driver = {
- .name = "rtc-pcf2127",
+ .name = "rtc-pcf2127-spi",
.of_match_table = of_match_ptr(pcf2127_of_match),
},
- .probe = pcf2127_probe,
- .id_table = pcf2127_id,
+ .probe = pcf2127_spi_probe,
+ .id_table = pcf2127_spi_id,
};
-module_i2c_driver(pcf2127_driver);
+static int pcf2127_spi_register_driver(void)
+{
+ return spi_register_driver(&pcf2127_spi_driver);
+}
+
+static void pcf2127_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&pcf2127_spi_driver);
+}
+
+#else
+
+static int pcf2127_spi_register_driver(void)
+{
+ return 0;
+}
+
+static void pcf2127_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init pcf2127_init(void)
+{
+ int ret;
+
+ ret = pcf2127_i2c_register_driver();
+ if (ret) {
+ pr_err("Failed to register pcf2127 i2c driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = pcf2127_spi_register_driver();
+ if (ret) {
+ pr_err("Failed to register pcf2127 spi driver: %d\n", ret);
+ pcf2127_i2c_unregister_driver();
+ }
+
+ return ret;
+}
+module_init(pcf2127_init)
+
+static void __exit pcf2127_exit(void)
+{
+ pcf2127_spi_unregister_driver();
+ pcf2127_i2c_unregister_driver();
+}
+module_exit(pcf2127_exit)
MODULE_AUTHOR("Renaud Cerrato <r.cerrato@til-technologies.fr>");
-MODULE_DESCRIPTION("NXP PCF2127 RTC driver");
+MODULE_DESCRIPTION("NXP PCF2127/29 RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 63334cbeca41..e8ddbb359d11 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -16,12 +16,12 @@
#include <linux/rtc.h>
#include <linux/module.h>
-#define DRV_VERSION "0.0.1"
-
#define PCF85063_REG_CTRL1 0x00 /* status */
+#define PCF85063_REG_CTRL1_STOP BIT(5)
#define PCF85063_REG_CTRL2 0x01
#define PCF85063_REG_SC 0x04 /* datetime */
+#define PCF85063_REG_SC_OS 0x80
#define PCF85063_REG_MN 0x05
#define PCF85063_REG_HR 0x06
#define PCF85063_REG_DM 0x07
@@ -29,15 +29,31 @@
#define PCF85063_REG_MO 0x09
#define PCF85063_REG_YR 0x0A
-#define PCF85063_MO_C 0x80 /* century */
-
static struct i2c_driver pcf85063_driver;
-struct pcf85063 {
- struct rtc_device *rtc;
- int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
- int voltage_low; /* indicates if a low_voltage was detected */
-};
+static int pcf85063_stop_clock(struct i2c_client *client, u8 *ctrl1)
+{
+ s32 ret;
+
+ ret = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failing to stop the clock\n");
+ return -EIO;
+ }
+
+ /* stop the clock */
+ ret |= PCF85063_REG_CTRL1_STOP;
+
+ ret = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failing to stop the clock\n");
+ return -EIO;
+ }
+
+ *ctrl1 = ret;
+
+ return 0;
+}
/*
* In the routines that deal directly with the pcf85063 hardware, we use
@@ -45,81 +61,85 @@ struct pcf85063 {
*/
static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
{
- struct pcf85063 *pcf85063 = i2c_get_clientdata(client);
- unsigned char buf[13] = { PCF85063_REG_CTRL1 };
- struct i2c_msg msgs[] = {
- {/* setup read ptr */
- .addr = client->addr,
- .len = 1,
- .buf = buf
- },
- {/* read status + date */
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = 13,
- .buf = buf
- },
- };
-
- /* read registers */
- if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
- dev_err(&client->dev, "%s: read error\n", __func__);
+ int rc;
+ u8 regs[7];
+
+ /*
+ * while reading, the time/date registers are blocked and not updated
+ * anymore until the access is finished. To not lose a second
+ * event, the access must be finished within one second. So, read all
+ * time/date registers in one turn.
+ */
+ rc = i2c_smbus_read_i2c_block_data(client, PCF85063_REG_SC,
+ sizeof(regs), regs);
+ if (rc != sizeof(regs)) {
+ dev_err(&client->dev, "date/time register read error\n");
return -EIO;
}
- tm->tm_sec = bcd2bin(buf[PCF85063_REG_SC] & 0x7F);
- tm->tm_min = bcd2bin(buf[PCF85063_REG_MN] & 0x7F);
- tm->tm_hour = bcd2bin(buf[PCF85063_REG_HR] & 0x3F); /* rtc hr 0-23 */
- tm->tm_mday = bcd2bin(buf[PCF85063_REG_DM] & 0x3F);
- tm->tm_wday = buf[PCF85063_REG_DW] & 0x07;
- tm->tm_mon = bcd2bin(buf[PCF85063_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
- tm->tm_year = bcd2bin(buf[PCF85063_REG_YR]);
+ /* if the clock has lost its power it makes no sense to use its time */
+ if (regs[0] & PCF85063_REG_SC_OS) {
+ dev_warn(&client->dev, "Power loss detected, invalid time\n");
+ return -EINVAL;
+ }
+
+ tm->tm_sec = bcd2bin(regs[0] & 0x7F);
+ tm->tm_min = bcd2bin(regs[1] & 0x7F);
+ tm->tm_hour = bcd2bin(regs[2] & 0x3F); /* rtc hr 0-23 */
+ tm->tm_mday = bcd2bin(regs[3] & 0x3F);
+ tm->tm_wday = regs[4] & 0x07;
+ tm->tm_mon = bcd2bin(regs[5] & 0x1F) - 1; /* rtc mn 1-12 */
+ tm->tm_year = bcd2bin(regs[6]);
if (tm->tm_year < 70)
tm->tm_year += 100; /* assume we are in 1970...2069 */
- /* detect the polarity heuristically. see note above. */
- pcf85063->c_polarity = (buf[PCF85063_REG_MO] & PCF85063_MO_C) ?
- (tm->tm_year >= 100) : (tm->tm_year < 100);
return rtc_valid_tm(tm);
}
static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
- int i = 0, err = 0;
- unsigned char buf[11];
+ int rc;
+ u8 regs[8];
- /* Control & status */
- buf[PCF85063_REG_CTRL1] = 0;
- buf[PCF85063_REG_CTRL2] = 5;
+ /*
+ * to accurately set the time, reset the divider chain and keep it in
+ * reset state until all time/date registers are written
+ */
+ rc = pcf85063_stop_clock(client, &regs[7]);
+ if (rc != 0)
+ return rc;
/* hours, minutes and seconds */
- buf[PCF85063_REG_SC] = bin2bcd(tm->tm_sec) & 0x7F;
+ regs[0] = bin2bcd(tm->tm_sec) & 0x7F; /* clear OS flag */
- buf[PCF85063_REG_MN] = bin2bcd(tm->tm_min);
- buf[PCF85063_REG_HR] = bin2bcd(tm->tm_hour);
+ regs[1] = bin2bcd(tm->tm_min);
+ regs[2] = bin2bcd(tm->tm_hour);
/* Day of month, 1 - 31 */
- buf[PCF85063_REG_DM] = bin2bcd(tm->tm_mday);
+ regs[3] = bin2bcd(tm->tm_mday);
/* Day, 0 - 6 */
- buf[PCF85063_REG_DW] = tm->tm_wday & 0x07;
+ regs[4] = tm->tm_wday & 0x07;
/* month, 1 - 12 */
- buf[PCF85063_REG_MO] = bin2bcd(tm->tm_mon + 1);
+ regs[5] = bin2bcd(tm->tm_mon + 1);
/* year and century */
- buf[PCF85063_REG_YR] = bin2bcd(tm->tm_year % 100);
-
- /* write register's data */
- for (i = 0; i < sizeof(buf); i++) {
- unsigned char data[2] = { i, buf[i] };
-
- err = i2c_master_send(client, data, sizeof(data));
- if (err != sizeof(data)) {
- dev_err(&client->dev, "%s: err=%d addr=%02x, data=%02x\n",
- __func__, err, data[0], data[1]);
- return -EIO;
- }
+ regs[6] = bin2bcd(tm->tm_year % 100);
+
+ /*
+ * after all time/date registers are written, let the 'address auto
+ * increment' feature wrap around and write register CTRL1 to re-enable
+ * the clock divider chain again
+ */
+ regs[7] &= ~PCF85063_REG_CTRL1_STOP;
+
+ /* write all registers at once */
+ rc = i2c_smbus_write_i2c_block_data(client, PCF85063_REG_SC,
+ sizeof(regs), regs);
+ if (rc < 0) {
+ dev_err(&client->dev, "date/time register write error\n");
+ return rc;
}
return 0;
@@ -143,27 +163,18 @@ static const struct rtc_class_ops pcf85063_rtc_ops = {
static int pcf85063_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pcf85063 *pcf85063;
+ struct rtc_device *rtc;
dev_dbg(&client->dev, "%s\n", __func__);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063),
- GFP_KERNEL);
- if (!pcf85063)
- return -ENOMEM;
-
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
- i2c_set_clientdata(client, pcf85063);
-
- pcf85063->rtc = devm_rtc_device_register(&client->dev,
- pcf85063_driver.driver.name,
- &pcf85063_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_device_register(&client->dev,
+ pcf85063_driver.driver.name,
+ &pcf85063_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(pcf85063->rtc);
+ return PTR_ERR_OR_ZERO(rtc);
}
static const struct i2c_device_id pcf85063_id[] = {
@@ -194,4 +205,3 @@ module_i2c_driver(pcf85063_driver);
MODULE_AUTHOR("Søren Andersen <san@rosetechnology.dk>");
MODULE_DESCRIPTION("PCF85063 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 988566caaaa6..28c48b3c1946 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -178,28 +178,8 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (err < 0)
return err;
- if (regs[0] & REG_SECONDS_OS) {
- /*
- * If the oscillator was stopped, try to clear the flag. Upon
- * power-up the flag is always set, but if we cannot clear it
- * the oscillator isn't running properly for some reason. The
- * sensible thing therefore is to return an error, signalling
- * that the clock cannot be assumed to be correct.
- */
-
- regs[0] &= ~REG_SECONDS_OS;
-
- err = pcf8523_write(client, REG_SECONDS, regs[0]);
- if (err < 0)
- return err;
-
- err = pcf8523_read(client, REG_SECONDS, &regs[0]);
- if (err < 0)
- return err;
-
- if (regs[0] & REG_SECONDS_OS)
- return -EAGAIN;
- }
+ if (regs[0] & REG_SECONDS_OS)
+ return -EINVAL;
tm->tm_sec = bcd2bin(regs[0] & 0x7f);
tm->tm_min = bcd2bin(regs[1] & 0x7f);
@@ -235,6 +215,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
return err;
regs[0] = REG_SECONDS;
+ /* This will purposely overwrite REG_SECONDS_OS */
regs[1] = bin2bcd(tm->tm_sec);
regs[2] = bin2bcd(tm->tm_min);
regs[3] = bin2bcd(tm->tm_hour);
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
new file mode 100644
index 000000000000..64e1e4578492
--- /dev/null
+++ b/drivers/rtc/rtc-pic32.c
@@ -0,0 +1,411 @@
+/*
+ * PIC32 RTC driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#define PIC32_RTCCON 0x00
+#define PIC32_RTCCON_ON BIT(15)
+#define PIC32_RTCCON_SIDL BIT(13)
+#define PIC32_RTCCON_RTCCLKSEL (3 << 9)
+#define PIC32_RTCCON_RTCCLKON BIT(6)
+#define PIC32_RTCCON_RTCWREN BIT(3)
+#define PIC32_RTCCON_RTCSYNC BIT(2)
+#define PIC32_RTCCON_HALFSEC BIT(1)
+#define PIC32_RTCCON_RTCOE BIT(0)
+
+#define PIC32_RTCALRM 0x10
+#define PIC32_RTCALRM_ALRMEN BIT(15)
+#define PIC32_RTCALRM_CHIME BIT(14)
+#define PIC32_RTCALRM_PIV BIT(13)
+#define PIC32_RTCALRM_ALARMSYNC BIT(12)
+#define PIC32_RTCALRM_AMASK 0x0F00
+#define PIC32_RTCALRM_ARPT 0xFF
+
+#define PIC32_RTCHOUR 0x23
+#define PIC32_RTCMIN 0x22
+#define PIC32_RTCSEC 0x21
+#define PIC32_RTCYEAR 0x33
+#define PIC32_RTCMON 0x32
+#define PIC32_RTCDAY 0x31
+
+#define PIC32_ALRMTIME 0x40
+#define PIC32_ALRMDATE 0x50
+
+#define PIC32_ALRMHOUR 0x43
+#define PIC32_ALRMMIN 0x42
+#define PIC32_ALRMSEC 0x41
+#define PIC32_ALRMYEAR 0x53
+#define PIC32_ALRMMON 0x52
+#define PIC32_ALRMDAY 0x51
+
+struct pic32_rtc_dev {
+ struct rtc_device *rtc;
+ void __iomem *reg_base;
+ struct clk *clk;
+ spinlock_t alarm_lock;
+ int alarm_irq;
+ bool alarm_clk_enabled;
+};
+
+static void pic32_rtc_alarm_clk_enable(struct pic32_rtc_dev *pdata,
+ bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->alarm_lock, flags);
+ if (enable) {
+ if (!pdata->alarm_clk_enabled) {
+ clk_enable(pdata->clk);
+ pdata->alarm_clk_enabled = true;
+ }
+ } else {
+ if (pdata->alarm_clk_enabled) {
+ clk_disable(pdata->clk);
+ pdata->alarm_clk_enabled = false;
+ }
+ }
+ spin_unlock_irqrestore(&pdata->alarm_lock, flags);
+}
+
+static irqreturn_t pic32_rtc_alarmirq(int irq, void *id)
+{
+ struct pic32_rtc_dev *pdata = (struct pic32_rtc_dev *)id;
+
+ clk_enable(pdata->clk);
+ rtc_update_irq(pdata->rtc, 1, RTC_AF | RTC_IRQF);
+ clk_disable(pdata->clk);
+
+ pic32_rtc_alarm_clk_enable(pdata, false);
+
+ return IRQ_HANDLED;
+}
+
+static int pic32_rtc_setaie(struct device *dev, unsigned int enabled)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ void __iomem *base = pdata->reg_base;
+
+ clk_enable(pdata->clk);
+
+ writel(PIC32_RTCALRM_ALRMEN,
+ base + (enabled ? PIC32_SET(PIC32_RTCALRM) :
+ PIC32_CLR(PIC32_RTCALRM)));
+
+ clk_disable(pdata->clk);
+
+ pic32_rtc_alarm_clk_enable(pdata, enabled);
+
+ return 0;
+}
+
+static int pic32_rtc_setfreq(struct device *dev, int freq)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ void __iomem *base = pdata->reg_base;
+
+ clk_enable(pdata->clk);
+
+ writel(PIC32_RTCALRM_AMASK, base + PIC32_CLR(PIC32_RTCALRM));
+ writel(freq << 8, base + PIC32_SET(PIC32_RTCALRM));
+ writel(PIC32_RTCALRM_CHIME, base + PIC32_SET(PIC32_RTCALRM));
+
+ clk_disable(pdata->clk);
+
+ return 0;
+}
+
+static int pic32_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ void __iomem *base = pdata->reg_base;
+ unsigned int tries = 0;
+
+ clk_enable(pdata->clk);
+
+ do {
+ rtc_tm->tm_hour = readb(base + PIC32_RTCHOUR);
+ rtc_tm->tm_min = readb(base + PIC32_RTCMIN);
+ rtc_tm->tm_mon = readb(base + PIC32_RTCMON);
+ rtc_tm->tm_mday = readb(base + PIC32_RTCDAY);
+ rtc_tm->tm_year = readb(base + PIC32_RTCYEAR);
+ rtc_tm->tm_sec = readb(base + PIC32_RTCSEC);
+
+ /*
+ * The only way to work out whether the system was mid-update
+ * when we read it is to check the second counter, and if it
+ * is zero, then we re-try the entire read.
+ */
+ tries += 1;
+ } while (rtc_tm->tm_sec == 0 && tries < 2);
+
+ rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
+ rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
+ rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
+ rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
+ rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon) - 1;
+ rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
+
+ rtc_tm->tm_year += 100;
+
+ dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+ clk_disable(pdata->clk);
+ return rtc_valid_tm(rtc_tm);
+}
+
+static int pic32_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ void __iomem *base = pdata->reg_base;
+ int year = tm->tm_year - 100;
+
+ dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ if (year < 0 || year >= 100) {
+ dev_err(dev, "rtc only supports 100 years\n");
+ return -EINVAL;
+ }
+
+ clk_enable(pdata->clk);
+ writeb(bin2bcd(tm->tm_sec), base + PIC32_RTCSEC);
+ writeb(bin2bcd(tm->tm_min), base + PIC32_RTCMIN);
+ writeb(bin2bcd(tm->tm_hour), base + PIC32_RTCHOUR);
+ writeb(bin2bcd(tm->tm_mday), base + PIC32_RTCDAY);
+ writeb(bin2bcd(tm->tm_mon + 1), base + PIC32_RTCMON);
+ writeb(bin2bcd(year), base + PIC32_RTCYEAR);
+ clk_disable(pdata->clk);
+
+ return 0;
+}
+
+static int pic32_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ struct rtc_time *alm_tm = &alrm->time;
+ void __iomem *base = pdata->reg_base;
+ unsigned int alm_en;
+
+ clk_enable(pdata->clk);
+ alm_tm->tm_sec = readb(base + PIC32_ALRMSEC);
+ alm_tm->tm_min = readb(base + PIC32_ALRMMIN);
+ alm_tm->tm_hour = readb(base + PIC32_ALRMHOUR);
+ alm_tm->tm_mon = readb(base + PIC32_ALRMMON);
+ alm_tm->tm_mday = readb(base + PIC32_ALRMDAY);
+ alm_tm->tm_year = readb(base + PIC32_ALRMYEAR);
+
+ alm_en = readb(base + PIC32_RTCALRM);
+
+ alrm->enabled = (alm_en & PIC32_RTCALRM_ALRMEN) ? 1 : 0;
+
+ dev_dbg(dev, "getalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ alm_en,
+ 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+ alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
+ alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
+ alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
+ alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
+ alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon) - 1;
+ alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
+
+ clk_disable(pdata->clk);
+ return 0;
+}
+
+static int pic32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ void __iomem *base = pdata->reg_base;
+
+ clk_enable(pdata->clk);
+ dev_dbg(dev, "setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
+ alrm->enabled,
+ 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ writel(0x00, base + PIC32_ALRMTIME);
+ writel(0x00, base + PIC32_ALRMDATE);
+
+ pic32_rtc_setaie(dev, alrm->enabled);
+
+ clk_disable(pdata->clk);
+ return 0;
+}
+
+static int pic32_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct pic32_rtc_dev *pdata = dev_get_drvdata(dev);
+ void __iomem *base = pdata->reg_base;
+ unsigned int repeat;
+
+ clk_enable(pdata->clk);
+
+ repeat = readw(base + PIC32_RTCALRM);
+ repeat &= PIC32_RTCALRM_ARPT;
+ seq_printf(seq, "periodic_IRQ\t: %s\n", repeat ? "yes" : "no");
+
+ clk_disable(pdata->clk);
+ return 0;
+}
+
+static const struct rtc_class_ops pic32_rtcops = {
+ .read_time = pic32_rtc_gettime,
+ .set_time = pic32_rtc_settime,
+ .read_alarm = pic32_rtc_getalarm,
+ .set_alarm = pic32_rtc_setalarm,
+ .proc = pic32_rtc_proc,
+ .alarm_irq_enable = pic32_rtc_setaie,
+};
+
+static void pic32_rtc_enable(struct pic32_rtc_dev *pdata, int en)
+{
+ void __iomem *base = pdata->reg_base;
+
+ if (!base)
+ return;
+
+ clk_enable(pdata->clk);
+ if (!en) {
+ writel(PIC32_RTCCON_ON, base + PIC32_CLR(PIC32_RTCCON));
+ } else {
+ pic32_syskey_unlock();
+
+ writel(PIC32_RTCCON_RTCWREN, base + PIC32_SET(PIC32_RTCCON));
+ writel(3 << 9, base + PIC32_CLR(PIC32_RTCCON));
+
+ if (!(readl(base + PIC32_RTCCON) & PIC32_RTCCON_ON))
+ writel(PIC32_RTCCON_ON, base + PIC32_SET(PIC32_RTCCON));
+ }
+ clk_disable(pdata->clk);
+}
+
+static int pic32_rtc_remove(struct platform_device *pdev)
+{
+ struct pic32_rtc_dev *pdata = platform_get_drvdata(pdev);
+
+ pic32_rtc_setaie(&pdev->dev, 0);
+ clk_unprepare(pdata->clk);
+ pdata->clk = NULL;
+
+ return 0;
+}
+
+static int pic32_rtc_probe(struct platform_device *pdev)
+{
+ struct pic32_rtc_dev *pdata;
+ struct resource *res;
+ int ret;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pdata);
+
+ pdata->alarm_irq = platform_get_irq(pdev, 0);
+ if (pdata->alarm_irq < 0) {
+ dev_err(&pdev->dev, "no irq for alarm\n");
+ return pdata->alarm_irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pdata->reg_base))
+ return PTR_ERR(pdata->reg_base);
+
+ pdata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdata->clk)) {
+ dev_err(&pdev->dev, "failed to find rtc clock source\n");
+ ret = PTR_ERR(pdata->clk);
+ pdata->clk = NULL;
+ return ret;
+ }
+
+ spin_lock_init(&pdata->alarm_lock);
+
+ clk_prepare_enable(pdata->clk);
+
+ pic32_rtc_enable(pdata, 1);
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &pic32_rtcops,
+ THIS_MODULE);
+ if (IS_ERR(pdata->rtc)) {
+ ret = PTR_ERR(pdata->rtc);
+ goto err_nortc;
+ }
+
+ pdata->rtc->max_user_freq = 128;
+
+ pic32_rtc_setfreq(&pdev->dev, 1);
+ ret = devm_request_irq(&pdev->dev, pdata->alarm_irq,
+ pic32_rtc_alarmirq, 0,
+ dev_name(&pdev->dev), pdata);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "IRQ %d error %d\n", pdata->alarm_irq, ret);
+ goto err_nortc;
+ }
+
+ clk_disable(pdata->clk);
+
+ return 0;
+
+err_nortc:
+ pic32_rtc_enable(pdata, 0);
+ clk_disable_unprepare(pdata->clk);
+
+ return ret;
+}
+
+static const struct of_device_id pic32_rtc_dt_ids[] = {
+ { .compatible = "microchip,pic32mzda-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_rtc_dt_ids);
+
+static struct platform_driver pic32_rtc_driver = {
+ .probe = pic32_rtc_probe,
+ .remove = pic32_rtc_remove,
+ .driver = {
+ .name = "pic32-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_rtc_dt_ids),
+ },
+};
+module_platform_driver(pic32_rtc_driver);
+
+MODULE_DESCRIPTION("Microchip PIC32 RTC Driver");
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index e9ac5a43be1a..d0cbf08040cd 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -1,7 +1,8 @@
/*
- * Micro Crystal RV-3029C2 rtc class driver
+ * Micro Crystal RV-3029 rtc class driver
*
* Author: Gregory Hermant <gregory.hermant@calao-systems.com>
+ * Michael Buesch <m@bues.ch>
*
* based on previously existing rtc class drivers
*
@@ -9,89 +10,120 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * NOTE: Currently this driver only supports the bare minimum for read
- * and write the RTC and alarms. The extra features provided by this chip
- * (trickle charger, eeprom, T° compensation) are unavailable.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
/* Register map */
/* control section */
-#define RV3029C2_ONOFF_CTRL 0x00
-#define RV3029C2_IRQ_CTRL 0x01
-#define RV3029C2_IRQ_CTRL_AIE (1 << 0)
-#define RV3029C2_IRQ_FLAGS 0x02
-#define RV3029C2_IRQ_FLAGS_AF (1 << 0)
-#define RV3029C2_STATUS 0x03
-#define RV3029C2_STATUS_VLOW1 (1 << 2)
-#define RV3029C2_STATUS_VLOW2 (1 << 3)
-#define RV3029C2_STATUS_SR (1 << 4)
-#define RV3029C2_STATUS_PON (1 << 5)
-#define RV3029C2_STATUS_EEBUSY (1 << 7)
-#define RV3029C2_RST_CTRL 0x04
-#define RV3029C2_CONTROL_SECTION_LEN 0x05
+#define RV3029_ONOFF_CTRL 0x00
+#define RV3029_ONOFF_CTRL_WE BIT(0)
+#define RV3029_ONOFF_CTRL_TE BIT(1)
+#define RV3029_ONOFF_CTRL_TAR BIT(2)
+#define RV3029_ONOFF_CTRL_EERE BIT(3)
+#define RV3029_ONOFF_CTRL_SRON BIT(4)
+#define RV3029_ONOFF_CTRL_TD0 BIT(5)
+#define RV3029_ONOFF_CTRL_TD1 BIT(6)
+#define RV3029_ONOFF_CTRL_CLKINT BIT(7)
+#define RV3029_IRQ_CTRL 0x01
+#define RV3029_IRQ_CTRL_AIE BIT(0)
+#define RV3029_IRQ_CTRL_TIE BIT(1)
+#define RV3029_IRQ_CTRL_V1IE BIT(2)
+#define RV3029_IRQ_CTRL_V2IE BIT(3)
+#define RV3029_IRQ_CTRL_SRIE BIT(4)
+#define RV3029_IRQ_FLAGS 0x02
+#define RV3029_IRQ_FLAGS_AF BIT(0)
+#define RV3029_IRQ_FLAGS_TF BIT(1)
+#define RV3029_IRQ_FLAGS_V1IF BIT(2)
+#define RV3029_IRQ_FLAGS_V2IF BIT(3)
+#define RV3029_IRQ_FLAGS_SRF BIT(4)
+#define RV3029_STATUS 0x03
+#define RV3029_STATUS_VLOW1 BIT(2)
+#define RV3029_STATUS_VLOW2 BIT(3)
+#define RV3029_STATUS_SR BIT(4)
+#define RV3029_STATUS_PON BIT(5)
+#define RV3029_STATUS_EEBUSY BIT(7)
+#define RV3029_RST_CTRL 0x04
+#define RV3029_RST_CTRL_SYSR BIT(4)
+#define RV3029_CONTROL_SECTION_LEN 0x05
/* watch section */
-#define RV3029C2_W_SEC 0x08
-#define RV3029C2_W_MINUTES 0x09
-#define RV3029C2_W_HOURS 0x0A
-#define RV3029C2_REG_HR_12_24 (1<<6) /* 24h/12h mode */
-#define RV3029C2_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
-#define RV3029C2_W_DATE 0x0B
-#define RV3029C2_W_DAYS 0x0C
-#define RV3029C2_W_MONTHS 0x0D
-#define RV3029C2_W_YEARS 0x0E
-#define RV3029C2_WATCH_SECTION_LEN 0x07
+#define RV3029_W_SEC 0x08
+#define RV3029_W_MINUTES 0x09
+#define RV3029_W_HOURS 0x0A
+#define RV3029_REG_HR_12_24 BIT(6) /* 24h/12h mode */
+#define RV3029_REG_HR_PM BIT(5) /* PM/AM bit in 12h mode */
+#define RV3029_W_DATE 0x0B
+#define RV3029_W_DAYS 0x0C
+#define RV3029_W_MONTHS 0x0D
+#define RV3029_W_YEARS 0x0E
+#define RV3029_WATCH_SECTION_LEN 0x07
/* alarm section */
-#define RV3029C2_A_SC 0x10
-#define RV3029C2_A_MN 0x11
-#define RV3029C2_A_HR 0x12
-#define RV3029C2_A_DT 0x13
-#define RV3029C2_A_DW 0x14
-#define RV3029C2_A_MO 0x15
-#define RV3029C2_A_YR 0x16
-#define RV3029C2_ALARM_SECTION_LEN 0x07
+#define RV3029_A_SC 0x10
+#define RV3029_A_MN 0x11
+#define RV3029_A_HR 0x12
+#define RV3029_A_DT 0x13
+#define RV3029_A_DW 0x14
+#define RV3029_A_MO 0x15
+#define RV3029_A_YR 0x16
+#define RV3029_ALARM_SECTION_LEN 0x07
/* timer section */
-#define RV3029C2_TIMER_LOW 0x18
-#define RV3029C2_TIMER_HIGH 0x19
+#define RV3029_TIMER_LOW 0x18
+#define RV3029_TIMER_HIGH 0x19
/* temperature section */
-#define RV3029C2_TEMP_PAGE 0x20
+#define RV3029_TEMP_PAGE 0x20
/* eeprom data section */
-#define RV3029C2_E2P_EEDATA1 0x28
-#define RV3029C2_E2P_EEDATA2 0x29
+#define RV3029_E2P_EEDATA1 0x28
+#define RV3029_E2P_EEDATA2 0x29
+#define RV3029_E2PDATA_SECTION_LEN 0x02
/* eeprom control section */
-#define RV3029C2_CONTROL_E2P_EECTRL 0x30
-#define RV3029C2_TRICKLE_1K (1<<0) /* 1K resistance */
-#define RV3029C2_TRICKLE_5K (1<<1) /* 5K resistance */
-#define RV3029C2_TRICKLE_20K (1<<2) /* 20K resistance */
-#define RV3029C2_TRICKLE_80K (1<<3) /* 80K resistance */
-#define RV3029C2_CONTROL_E2P_XTALOFFSET 0x31
-#define RV3029C2_CONTROL_E2P_QCOEF 0x32
-#define RV3029C2_CONTROL_E2P_TURNOVER 0x33
+#define RV3029_CONTROL_E2P_EECTRL 0x30
+#define RV3029_EECTRL_THP BIT(0) /* temp scan interval */
+#define RV3029_EECTRL_THE BIT(1) /* thermometer enable */
+#define RV3029_EECTRL_FD0 BIT(2) /* CLKOUT */
+#define RV3029_EECTRL_FD1 BIT(3) /* CLKOUT */
+#define RV3029_TRICKLE_1K BIT(4) /* 1.5K resistance */
+#define RV3029_TRICKLE_5K BIT(5) /* 5K resistance */
+#define RV3029_TRICKLE_20K BIT(6) /* 20K resistance */
+#define RV3029_TRICKLE_80K BIT(7) /* 80K resistance */
+#define RV3029_TRICKLE_MASK (RV3029_TRICKLE_1K |\
+ RV3029_TRICKLE_5K |\
+ RV3029_TRICKLE_20K |\
+ RV3029_TRICKLE_80K)
+#define RV3029_TRICKLE_SHIFT 4
+#define RV3029_CONTROL_E2P_XOFFS 0x31 /* XTAL offset */
+#define RV3029_CONTROL_E2P_XOFFS_SIGN BIT(7) /* Sign: 1->pos, 0->neg */
+#define RV3029_CONTROL_E2P_QCOEF 0x32 /* XTAL temp drift coef */
+#define RV3029_CONTROL_E2P_TURNOVER 0x33 /* XTAL turnover temp (in *C) */
+#define RV3029_CONTROL_E2P_TOV_MASK 0x3F /* XTAL turnover temp mask */
/* user ram section */
-#define RV3029C2_USR1_RAM_PAGE 0x38
-#define RV3029C2_USR1_SECTION_LEN 0x04
-#define RV3029C2_USR2_RAM_PAGE 0x3C
-#define RV3029C2_USR2_SECTION_LEN 0x04
+#define RV3029_USR1_RAM_PAGE 0x38
+#define RV3029_USR1_SECTION_LEN 0x04
+#define RV3029_USR2_RAM_PAGE 0x3C
+#define RV3029_USR2_SECTION_LEN 0x04
static int
-rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
- unsigned len)
+rv3029_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
+ unsigned len)
{
int ret;
- if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+ if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
+ (reg + len > RV3029_USR1_RAM_PAGE + 8))
return -EINVAL;
ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
@@ -103,20 +135,38 @@ rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
}
static int
-rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
- unsigned len)
+rv3029_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
+ unsigned len)
{
- if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+ if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
+ (reg + len > RV3029_USR1_RAM_PAGE + 8))
return -EINVAL;
return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
}
static int
-rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
+rv3029_i2c_update_bits(struct i2c_client *client, u8 reg, u8 mask, u8 set)
+{
+ u8 buf;
+ int ret;
+
+ ret = rv3029_i2c_read_regs(client, reg, &buf, 1);
+ if (ret < 0)
+ return ret;
+ buf &= ~mask;
+ buf |= set & mask;
+ ret = rv3029_i2c_write_regs(client, reg, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+rv3029_i2c_get_sr(struct i2c_client *client, u8 *buf)
{
- int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
+ int ret = rv3029_i2c_read_regs(client, RV3029_STATUS, buf, 1);
if (ret < 0)
return -EIO;
@@ -125,83 +175,224 @@ rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
}
static int
-rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
+rv3029_i2c_set_sr(struct i2c_client *client, u8 val)
{
u8 buf[1];
int sr;
buf[0] = val;
- sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
+ sr = rv3029_i2c_write_regs(client, RV3029_STATUS, buf, 1);
dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
if (sr < 0)
return -EIO;
return 0;
}
+static int rv3029_eeprom_busywait(struct i2c_client *client)
+{
+ int i, ret;
+ u8 sr;
+
+ for (i = 100; i > 0; i--) {
+ ret = rv3029_i2c_get_sr(client, &sr);
+ if (ret < 0)
+ break;
+ if (!(sr & RV3029_STATUS_EEBUSY))
+ break;
+ usleep_range(1000, 10000);
+ }
+ if (i <= 0) {
+ dev_err(&client->dev, "EEPROM busy wait timeout.\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int rv3029_eeprom_exit(struct i2c_client *client)
+{
+ /* Re-enable eeprom refresh */
+ return rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
+ RV3029_ONOFF_CTRL_EERE,
+ RV3029_ONOFF_CTRL_EERE);
+}
+
+static int rv3029_eeprom_enter(struct i2c_client *client)
+{
+ int ret;
+ u8 sr;
+
+ /* Check whether we are in the allowed voltage range. */
+ ret = rv3029_i2c_get_sr(client, &sr);
+ if (ret < 0)
+ return ret;
+ if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+ /* We clear the bits and retry once just in case
+ * we had a brown out in early startup.
+ */
+ sr &= ~RV3029_STATUS_VLOW1;
+ sr &= ~RV3029_STATUS_VLOW2;
+ ret = rv3029_i2c_set_sr(client, sr);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 10000);
+ ret = rv3029_i2c_get_sr(client, &sr);
+ if (ret < 0)
+ return ret;
+ if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
+ dev_err(&client->dev,
+ "Supply voltage is too low to safely access the EEPROM.\n");
+ return -ENODEV;
+ }
+ }
+
+ /* Disable eeprom refresh. */
+ ret = rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
+ RV3029_ONOFF_CTRL_EERE, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for any previous eeprom accesses to finish. */
+ ret = rv3029_eeprom_busywait(client);
+ if (ret < 0)
+ rv3029_eeprom_exit(client);
+
+ return ret;
+}
+
+static int rv3029_eeprom_read(struct i2c_client *client, u8 reg,
+ u8 buf[], size_t len)
+{
+ int ret, err;
+
+ err = rv3029_eeprom_enter(client);
+ if (err < 0)
+ return err;
+
+ ret = rv3029_i2c_read_regs(client, reg, buf, len);
+
+ err = rv3029_eeprom_exit(client);
+ if (err < 0)
+ return err;
+
+ return ret;
+}
+
+static int rv3029_eeprom_write(struct i2c_client *client, u8 reg,
+ u8 const buf[], size_t len)
+{
+ int ret, err;
+ size_t i;
+ u8 tmp;
+
+ err = rv3029_eeprom_enter(client);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < len; i++, reg++) {
+ ret = rv3029_i2c_read_regs(client, reg, &tmp, 1);
+ if (ret < 0)
+ break;
+ if (tmp != buf[i]) {
+ ret = rv3029_i2c_write_regs(client, reg, &buf[i], 1);
+ if (ret < 0)
+ break;
+ }
+ ret = rv3029_eeprom_busywait(client);
+ if (ret < 0)
+ break;
+ }
+
+ err = rv3029_eeprom_exit(client);
+ if (err < 0)
+ return err;
+
+ return ret;
+}
+
+static int rv3029_eeprom_update_bits(struct i2c_client *client,
+ u8 reg, u8 mask, u8 set)
+{
+ u8 buf;
+ int ret;
+
+ ret = rv3029_eeprom_read(client, reg, &buf, 1);
+ if (ret < 0)
+ return ret;
+ buf &= ~mask;
+ buf |= set & mask;
+ ret = rv3029_eeprom_write(client, reg, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int
-rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
+rv3029_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
{
u8 buf[1];
int ret;
- u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
+ u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
- ret = rv3029c2_i2c_get_sr(client, buf);
+ ret = rv3029_i2c_get_sr(client, buf);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
- RV3029C2_WATCH_SECTION_LEN);
+ ret = rv3029_i2c_read_regs(client, RV3029_W_SEC, regs,
+ RV3029_WATCH_SECTION_LEN);
if (ret < 0) {
dev_err(&client->dev, "%s: reading RTC section failed\n",
__func__);
return ret;
}
- tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
- tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
+ tm->tm_sec = bcd2bin(regs[RV3029_W_SEC-RV3029_W_SEC]);
+ tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES-RV3029_W_SEC]);
/* HR field has a more complex interpretation */
{
- const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
- if (_hr & RV3029C2_REG_HR_12_24) {
+ const u8 _hr = regs[RV3029_W_HOURS-RV3029_W_SEC];
+
+ if (_hr & RV3029_REG_HR_12_24) {
/* 12h format */
tm->tm_hour = bcd2bin(_hr & 0x1f);
- if (_hr & RV3029C2_REG_HR_PM) /* PM flag set */
+ if (_hr & RV3029_REG_HR_PM) /* PM flag set */
tm->tm_hour += 12;
} else /* 24h format */
tm->tm_hour = bcd2bin(_hr & 0x3f);
}
- tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
- tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
- tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
- tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
+ tm->tm_mday = bcd2bin(regs[RV3029_W_DATE-RV3029_W_SEC]);
+ tm->tm_mon = bcd2bin(regs[RV3029_W_MONTHS-RV3029_W_SEC]) - 1;
+ tm->tm_year = bcd2bin(regs[RV3029_W_YEARS-RV3029_W_SEC]) + 100;
+ tm->tm_wday = bcd2bin(regs[RV3029_W_DAYS-RV3029_W_SEC]) - 1;
return 0;
}
-static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int rv3029_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
+ return rv3029_i2c_read_time(to_i2c_client(dev), tm);
}
static int
-rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
+rv3029_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
u8 regs[8];
- ret = rv3029c2_i2c_get_sr(client, regs);
+ ret = rv3029_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
- RV3029C2_ALARM_SECTION_LEN);
+ ret = rv3029_i2c_read_regs(client, RV3029_A_SC, regs,
+ RV3029_ALARM_SECTION_LEN);
if (ret < 0) {
dev_err(&client->dev, "%s: reading alarm section failed\n",
@@ -209,51 +400,42 @@ rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
return ret;
}
- tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
- tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
- tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
- tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
- tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
- tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
- tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
+ tm->tm_sec = bcd2bin(regs[RV3029_A_SC-RV3029_A_SC] & 0x7f);
+ tm->tm_min = bcd2bin(regs[RV3029_A_MN-RV3029_A_SC] & 0x7f);
+ tm->tm_hour = bcd2bin(regs[RV3029_A_HR-RV3029_A_SC] & 0x3f);
+ tm->tm_mday = bcd2bin(regs[RV3029_A_DT-RV3029_A_SC] & 0x3f);
+ tm->tm_mon = bcd2bin(regs[RV3029_A_MO-RV3029_A_SC] & 0x1f) - 1;
+ tm->tm_year = bcd2bin(regs[RV3029_A_YR-RV3029_A_SC] & 0x7f) + 100;
+ tm->tm_wday = bcd2bin(regs[RV3029_A_DW-RV3029_A_SC] & 0x07) - 1;
return 0;
}
static int
-rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+rv3029_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
+ return rv3029_i2c_read_alarm(to_i2c_client(dev), alarm);
}
-static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
+static int rv3029_rtc_i2c_alarm_set_irq(struct i2c_client *client,
int enable)
{
int ret;
- u8 buf[1];
-
- /* enable AIE irq */
- ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
- if (ret < 0) {
- dev_err(&client->dev, "can't read INT reg\n");
- return ret;
- }
- if (enable)
- buf[0] |= RV3029C2_IRQ_CTRL_AIE;
- else
- buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
- ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
+ /* enable/disable AIE irq */
+ ret = rv3029_i2c_update_bits(client, RV3029_IRQ_CTRL,
+ RV3029_IRQ_CTRL_AIE,
+ (enable ? RV3029_IRQ_CTRL_AIE : 0));
if (ret < 0) {
- dev_err(&client->dev, "can't set INT reg\n");
+ dev_err(&client->dev, "can't update INT reg\n");
return ret;
}
return 0;
}
-static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
- struct rtc_wkalrm *alarm)
+static int rv3029_rtc_i2c_set_alarm(struct i2c_client *client,
+ struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
@@ -267,50 +449,41 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
if (tm->tm_year < 100)
return -EINVAL;
- ret = rv3029c2_i2c_get_sr(client, regs);
+ ret = rv3029_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
- regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
- regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
- regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
- regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
- regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
- regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
-
- ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
- RV3029C2_ALARM_SECTION_LEN);
+ regs[RV3029_A_SC-RV3029_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
+ regs[RV3029_A_MN-RV3029_A_SC] = bin2bcd(tm->tm_min & 0x7f);
+ regs[RV3029_A_HR-RV3029_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
+ regs[RV3029_A_DT-RV3029_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
+ regs[RV3029_A_MO-RV3029_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
+ regs[RV3029_A_DW-RV3029_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
+ regs[RV3029_A_YR-RV3029_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
+
+ ret = rv3029_i2c_write_regs(client, RV3029_A_SC, regs,
+ RV3029_ALARM_SECTION_LEN);
if (ret < 0)
return ret;
if (alarm->enabled) {
- u8 buf[1];
-
/* clear AF flag */
- ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
- buf, 1);
- if (ret < 0) {
- dev_err(&client->dev, "can't read alarm flag\n");
- return ret;
- }
- buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
- ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
- buf, 1);
+ ret = rv3029_i2c_update_bits(client, RV3029_IRQ_FLAGS,
+ RV3029_IRQ_FLAGS_AF, 0);
if (ret < 0) {
- dev_err(&client->dev, "can't set alarm flag\n");
+ dev_err(&client->dev, "can't clear alarm flag\n");
return ret;
}
/* enable AIE irq */
- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+ ret = rv3029_rtc_i2c_alarm_set_irq(client, 1);
if (ret)
return ret;
dev_dbg(&client->dev, "alarm IRQ armed\n");
} else {
/* disable AIE irq */
- ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0);
+ ret = rv3029_rtc_i2c_alarm_set_irq(client, 0);
if (ret)
return ret;
@@ -320,13 +493,13 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
return 0;
}
-static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int rv3029_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
+ return rv3029_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
}
static int
-rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
+rv3029_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
{
u8 regs[8];
int ret;
@@ -339,26 +512,26 @@ rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
if (tm->tm_year < 100)
return -EINVAL;
- regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
- regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
- regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
- regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
- regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
- regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
- regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
+ regs[RV3029_W_SEC-RV3029_W_SEC] = bin2bcd(tm->tm_sec);
+ regs[RV3029_W_MINUTES-RV3029_W_SEC] = bin2bcd(tm->tm_min);
+ regs[RV3029_W_HOURS-RV3029_W_SEC] = bin2bcd(tm->tm_hour);
+ regs[RV3029_W_DATE-RV3029_W_SEC] = bin2bcd(tm->tm_mday);
+ regs[RV3029_W_MONTHS-RV3029_W_SEC] = bin2bcd(tm->tm_mon+1);
+ regs[RV3029_W_DAYS-RV3029_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
+ regs[RV3029_W_YEARS-RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
- ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
- RV3029C2_WATCH_SECTION_LEN);
+ ret = rv3029_i2c_write_regs(client, RV3029_W_SEC, regs,
+ RV3029_WATCH_SECTION_LEN);
if (ret < 0)
return ret;
- ret = rv3029c2_i2c_get_sr(client, regs);
+ ret = rv3029_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return ret;
}
/* clear PON bit */
- ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
+ ret = rv3029_i2c_set_sr(client, (regs[0] & ~RV3029_STATUS_PON));
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return ret;
@@ -367,26 +540,238 @@ rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
return 0;
}
-static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int rv3029_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
+ return rv3029_i2c_set_time(to_i2c_client(dev), tm);
}
-static const struct rtc_class_ops rv3029c2_rtc_ops = {
- .read_time = rv3029c2_rtc_read_time,
- .set_time = rv3029c2_rtc_set_time,
- .read_alarm = rv3029c2_rtc_read_alarm,
- .set_alarm = rv3029c2_rtc_set_alarm,
+static const struct rv3029_trickle_tab_elem {
+ u32 r; /* resistance in ohms */
+ u8 conf; /* trickle config bits */
+} rv3029_trickle_tab[] = {
+ {
+ .r = 1076,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+ RV3029_TRICKLE_20K | RV3029_TRICKLE_80K,
+ }, {
+ .r = 1091,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+ RV3029_TRICKLE_20K,
+ }, {
+ .r = 1137,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_5K |
+ RV3029_TRICKLE_80K,
+ }, {
+ .r = 1154,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_5K,
+ }, {
+ .r = 1371,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_20K |
+ RV3029_TRICKLE_80K,
+ }, {
+ .r = 1395,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_20K,
+ }, {
+ .r = 1472,
+ .conf = RV3029_TRICKLE_1K | RV3029_TRICKLE_80K,
+ }, {
+ .r = 1500,
+ .conf = RV3029_TRICKLE_1K,
+ }, {
+ .r = 3810,
+ .conf = RV3029_TRICKLE_5K | RV3029_TRICKLE_20K |
+ RV3029_TRICKLE_80K,
+ }, {
+ .r = 4000,
+ .conf = RV3029_TRICKLE_5K | RV3029_TRICKLE_20K,
+ }, {
+ .r = 4706,
+ .conf = RV3029_TRICKLE_5K | RV3029_TRICKLE_80K,
+ }, {
+ .r = 5000,
+ .conf = RV3029_TRICKLE_5K,
+ }, {
+ .r = 16000,
+ .conf = RV3029_TRICKLE_20K | RV3029_TRICKLE_80K,
+ }, {
+ .r = 20000,
+ .conf = RV3029_TRICKLE_20K,
+ }, {
+ .r = 80000,
+ .conf = RV3029_TRICKLE_80K,
+ },
};
-static struct i2c_device_id rv3029c2_id[] = {
+static void rv3029_trickle_config(struct i2c_client *client)
+{
+ struct device_node *of_node = client->dev.of_node;
+ const struct rv3029_trickle_tab_elem *elem;
+ int i, err;
+ u32 ohms;
+ u8 trickle_set_bits;
+
+ if (!of_node)
+ return;
+
+ /* Configure the trickle charger. */
+ err = of_property_read_u32(of_node, "trickle-resistor-ohms", &ohms);
+ if (err) {
+ /* Disable trickle charger. */
+ trickle_set_bits = 0;
+ } else {
+ /* Enable trickle charger. */
+ for (i = 0; i < ARRAY_SIZE(rv3029_trickle_tab); i++) {
+ elem = &rv3029_trickle_tab[i];
+ if (elem->r >= ohms)
+ break;
+ }
+ trickle_set_bits = elem->conf;
+ dev_info(&client->dev,
+ "Trickle charger enabled at %d ohms resistance.\n",
+ elem->r);
+ }
+ err = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+ RV3029_TRICKLE_MASK,
+ trickle_set_bits);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "Failed to update trickle charger config\n");
+ }
+}
+
+#ifdef CONFIG_RTC_DRV_RV3029_HWMON
+
+static int rv3029_read_temp(struct i2c_client *client, int *temp_mC)
+{
+ int ret;
+ u8 temp;
+
+ ret = rv3029_i2c_read_regs(client, RV3029_TEMP_PAGE, &temp, 1);
+ if (ret < 0)
+ return ret;
+
+ *temp_mC = ((int)temp - 60) * 1000;
+
+ return 0;
+}
+
+static ssize_t rv3029_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = dev_get_drvdata(dev);
+ int ret, temp_mC;
+
+ ret = rv3029_read_temp(client, &temp_mC);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", temp_mC);
+}
+
+static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = dev_get_drvdata(dev);
+ unsigned long interval_ms;
+ int ret;
+ u8 th_set_bits = 0;
+
+ ret = kstrtoul(buf, 10, &interval_ms);
+ if (ret < 0)
+ return ret;
+
+ if (interval_ms != 0) {
+ th_set_bits |= RV3029_EECTRL_THE;
+ if (interval_ms >= 16000)
+ th_set_bits |= RV3029_EECTRL_THP;
+ }
+ ret = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+ RV3029_EECTRL_THE | RV3029_EECTRL_THP,
+ th_set_bits);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = dev_get_drvdata(dev);
+ int ret, interval_ms;
+ u8 eectrl;
+
+ ret = rv3029_eeprom_read(client, RV3029_CONTROL_E2P_EECTRL,
+ &eectrl, 1);
+ if (ret < 0)
+ return ret;
+
+ if (eectrl & RV3029_EECTRL_THE) {
+ if (eectrl & RV3029_EECTRL_THP)
+ interval_ms = 16000;
+ else
+ interval_ms = 1000;
+ } else {
+ interval_ms = 0;
+ }
+
+ return sprintf(buf, "%d\n", interval_ms);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, rv3029_hwmon_show_temp,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
+ rv3029_hwmon_show_update_interval,
+ rv3029_hwmon_set_update_interval, 0);
+
+static struct attribute *rv3029_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_update_interval.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(rv3029_hwmon);
+
+static void rv3029_hwmon_register(struct i2c_client *client)
+{
+ struct device *hwmon_dev;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(
+ &client->dev, client->name, client, rv3029_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_warn(&client->dev,
+ "unable to register hwmon device %ld\n",
+ PTR_ERR(hwmon_dev));
+ }
+}
+
+#else /* CONFIG_RTC_DRV_RV3029_HWMON */
+
+static void rv3029_hwmon_register(struct i2c_client *client)
+{
+}
+
+#endif /* CONFIG_RTC_DRV_RV3029_HWMON */
+
+static const struct rtc_class_ops rv3029_rtc_ops = {
+ .read_time = rv3029_rtc_read_time,
+ .set_time = rv3029_rtc_set_time,
+ .read_alarm = rv3029_rtc_read_alarm,
+ .set_alarm = rv3029_rtc_set_alarm,
+};
+
+static struct i2c_device_id rv3029_id[] = {
+ { "rv3029", 0 },
{ "rv3029c2", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
+MODULE_DEVICE_TABLE(i2c, rv3029_id);
-static int rv3029c2_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct rtc_device *rtc;
int rc = 0;
@@ -395,14 +780,17 @@ static int rv3029c2_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
return -ENODEV;
- rc = rv3029c2_i2c_get_sr(client, buf);
+ rc = rv3029_i2c_get_sr(client, buf);
if (rc < 0) {
dev_err(&client->dev, "reading status failed\n");
return rc;
}
+ rv3029_trickle_config(client);
+ rv3029_hwmon_register(client);
+
rtc = devm_rtc_device_register(&client->dev, client->name,
- &rv3029c2_rtc_ops, THIS_MODULE);
+ &rv3029_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
@@ -412,16 +800,17 @@ static int rv3029c2_probe(struct i2c_client *client,
return 0;
}
-static struct i2c_driver rv3029c2_driver = {
+static struct i2c_driver rv3029_driver = {
.driver = {
.name = "rtc-rv3029c2",
},
- .probe = rv3029c2_probe,
- .id_table = rv3029c2_id,
+ .probe = rv3029_probe,
+ .id_table = rv3029_id,
};
-module_i2c_driver(rv3029c2_driver);
+module_i2c_driver(rv3029_driver);
MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
-MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
+MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
+MODULE_DESCRIPTION("Micro Crystal RV3029 RTC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 7155c0816aa6..f623038e586e 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,7 +52,7 @@
struct rv8803_data {
struct i2c_client *client;
struct rtc_device *rtc;
- spinlock_t flags_lock;
+ struct mutex flags_lock;
u8 ctrl;
};
@@ -61,13 +61,16 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
struct i2c_client *client = dev_id;
struct rv8803_data *rv8803 = i2c_get_clientdata(client);
unsigned long events = 0;
- int flags;
+ int flags, try = 0;
- spin_lock(&rv8803->flags_lock);
+ mutex_lock(&rv8803->flags_lock);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ do {
+ flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ try++;
+ } while ((flags == -ENXIO) && (try < 3));
if (flags <= 0) {
- spin_unlock(&rv8803->flags_lock);
+ mutex_unlock(&rv8803->flags_lock);
return IRQ_NONE;
}
@@ -102,7 +105,7 @@ static irqreturn_t rv8803_handle_irq(int irq, void *dev_id)
rv8803->ctrl);
}
- spin_unlock(&rv8803->flags_lock);
+ mutex_unlock(&rv8803->flags_lock);
return IRQ_HANDLED;
}
@@ -155,7 +158,6 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
u8 date[7];
int flags, ret;
- unsigned long irqflags;
if ((tm->tm_year < 100) || (tm->tm_year > 199))
return -EINVAL;
@@ -173,18 +175,18 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm)
if (ret < 0)
return ret;
- spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+ mutex_lock(&rv8803->flags_lock);
flags = i2c_smbus_read_byte_data(rv8803->client, RV8803_FLAG);
if (flags < 0) {
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return flags;
}
ret = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG,
flags & ~RV8803_FLAG_V2F);
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return ret;
}
@@ -226,7 +228,6 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u8 alarmvals[3];
u8 ctrl[2];
int ret, err;
- unsigned long irqflags;
/* The alarm has no seconds, round up to nearest minute */
if (alrm->time.tm_sec) {
@@ -236,11 +237,11 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
rtc_time64_to_tm(alarm_time, &alrm->time);
}
- spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+ mutex_lock(&rv8803->flags_lock);
ret = i2c_smbus_read_i2c_block_data(client, RV8803_FLAG, 2, ctrl);
if (ret != 2) {
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return ret < 0 ? ret : -EIO;
}
@@ -253,14 +254,14 @@ static int rv8803_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
err = i2c_smbus_write_byte_data(rv8803->client, RV8803_CTRL,
rv8803->ctrl);
if (err) {
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return err;
}
}
ctrl[1] &= ~RV8803_FLAG_AF;
err = i2c_smbus_write_byte_data(rv8803->client, RV8803_FLAG, ctrl[1]);
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
@@ -289,7 +290,6 @@ static int rv8803_alarm_irq_enable(struct device *dev, unsigned int enabled)
struct i2c_client *client = to_i2c_client(dev);
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
int ctrl, flags, err;
- unsigned long irqflags;
ctrl = rv8803->ctrl;
@@ -305,15 +305,15 @@ static int rv8803_alarm_irq_enable(struct device *dev, unsigned int enabled)
ctrl &= ~RV8803_CTRL_AIE;
}
- spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+ mutex_lock(&rv8803->flags_lock);
flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
if (flags < 0) {
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_AF | RV8803_FLAG_UF);
err = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
if (err)
return err;
@@ -333,7 +333,6 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
struct i2c_client *client = to_i2c_client(dev);
struct rv8803_data *rv8803 = dev_get_drvdata(dev);
int flags, ret = 0;
- unsigned long irqflags;
switch (cmd) {
case RTC_VL_READ:
@@ -355,16 +354,16 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
return 0;
case RTC_VL_CLR:
- spin_lock_irqsave(&rv8803->flags_lock, irqflags);
+ mutex_lock(&rv8803->flags_lock);
flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
if (flags < 0) {
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
return flags;
}
flags &= ~(RV8803_FLAG_V1F | RV8803_FLAG_V2F);
ret = i2c_smbus_write_byte_data(client, RV8803_FLAG, flags);
- spin_unlock_irqrestore(&rv8803->flags_lock, irqflags);
+ mutex_unlock(&rv8803->flags_lock);
if (ret < 0)
return ret;
@@ -428,7 +427,7 @@ static int rv8803_probe(struct i2c_client *client,
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct rv8803_data *rv8803;
- int err, flags;
+ int err, flags, try = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK)) {
@@ -441,10 +440,20 @@ static int rv8803_probe(struct i2c_client *client,
if (!rv8803)
return -ENOMEM;
+ mutex_init(&rv8803->flags_lock);
rv8803->client = client;
i2c_set_clientdata(client, rv8803);
- flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ /*
+ * There is a 60µs window where the RTC may not reply on the i2c bus in
+ * that case, the transfer is not ACKed. In that case, ensure there are
+ * multiple attempts.
+ */
+ do {
+ flags = i2c_smbus_read_byte_data(client, RV8803_FLAG);
+ try++;
+ } while ((flags == -ENXIO) && (try < 3));
+
if (flags < 0)
return flags;
@@ -479,8 +488,12 @@ static int rv8803_probe(struct i2c_client *client,
return PTR_ERR(rv8803->rtc);
}
- err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT,
- RV8803_EXT_WADA);
+ try = 0;
+ do {
+ err = i2c_smbus_write_byte_data(rv8803->client, RV8803_EXT,
+ RV8803_EXT_WADA);
+ try++;
+ } while ((err == -ENXIO) && (try < 3));
if (err)
return err;
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
new file mode 100644
index 000000000000..bbad00b233bc
--- /dev/null
+++ b/drivers/rtc/rtc-rx6110.c
@@ -0,0 +1,402 @@
+/*
+ * Driver for the Epson RTC module RX-6110 SA
+ *
+ * Copyright(C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ * Copyright(C) SEIKO EPSON CORPORATION 2013. All rights reserved.
+ *
+ * This driver software is distributed as is, without any warranty of any kind,
+ * either express or implied as further specified in the GNU Public License.
+ * This software may be used and distributed according to the terms of the GNU
+ * Public License, version 2 as published by the Free Software Foundation.
+ * See the file COPYING in the main directory of this archive for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+
+/* RX-6110 Register definitions */
+#define RX6110_REG_SEC 0x10
+#define RX6110_REG_MIN 0x11
+#define RX6110_REG_HOUR 0x12
+#define RX6110_REG_WDAY 0x13
+#define RX6110_REG_MDAY 0x14
+#define RX6110_REG_MONTH 0x15
+#define RX6110_REG_YEAR 0x16
+#define RX6110_REG_RES1 0x17
+#define RX6110_REG_ALMIN 0x18
+#define RX6110_REG_ALHOUR 0x19
+#define RX6110_REG_ALWDAY 0x1A
+#define RX6110_REG_TCOUNT0 0x1B
+#define RX6110_REG_TCOUNT1 0x1C
+#define RX6110_REG_EXT 0x1D
+#define RX6110_REG_FLAG 0x1E
+#define RX6110_REG_CTRL 0x1F
+#define RX6110_REG_USER0 0x20
+#define RX6110_REG_USER1 0x21
+#define RX6110_REG_USER2 0x22
+#define RX6110_REG_USER3 0x23
+#define RX6110_REG_USER4 0x24
+#define RX6110_REG_USER5 0x25
+#define RX6110_REG_USER6 0x26
+#define RX6110_REG_USER7 0x27
+#define RX6110_REG_USER8 0x28
+#define RX6110_REG_USER9 0x29
+#define RX6110_REG_USERA 0x2A
+#define RX6110_REG_USERB 0x2B
+#define RX6110_REG_USERC 0x2C
+#define RX6110_REG_USERD 0x2D
+#define RX6110_REG_USERE 0x2E
+#define RX6110_REG_USERF 0x2F
+#define RX6110_REG_RES2 0x30
+#define RX6110_REG_RES3 0x31
+#define RX6110_REG_IRQ 0x32
+
+#define RX6110_BIT_ALARM_EN BIT(7)
+
+/* Extension Register (1Dh) bit positions */
+#define RX6110_BIT_EXT_TSEL0 BIT(0)
+#define RX6110_BIT_EXT_TSEL1 BIT(1)
+#define RX6110_BIT_EXT_TSEL2 BIT(2)
+#define RX6110_BIT_EXT_WADA BIT(3)
+#define RX6110_BIT_EXT_TE BIT(4)
+#define RX6110_BIT_EXT_USEL BIT(5)
+#define RX6110_BIT_EXT_FSEL0 BIT(6)
+#define RX6110_BIT_EXT_FSEL1 BIT(7)
+
+/* Flag Register (1Eh) bit positions */
+#define RX6110_BIT_FLAG_VLF BIT(1)
+#define RX6110_BIT_FLAG_AF BIT(3)
+#define RX6110_BIT_FLAG_TF BIT(4)
+#define RX6110_BIT_FLAG_UF BIT(5)
+
+/* Control Register (1Fh) bit positions */
+#define RX6110_BIT_CTRL_TBKE BIT(0)
+#define RX6110_BIT_CTRL_TBKON BIT(1)
+#define RX6110_BIT_CTRL_TSTP BIT(2)
+#define RX6110_BIT_CTRL_AIE BIT(3)
+#define RX6110_BIT_CTRL_TIE BIT(4)
+#define RX6110_BIT_CTRL_UIE BIT(5)
+#define RX6110_BIT_CTRL_STOP BIT(6)
+#define RX6110_BIT_CTRL_TEST BIT(7)
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WDAY,
+ RTC_MDAY,
+ RTC_MONTH,
+ RTC_YEAR,
+ RTC_NR_TIME
+};
+
+#define RX6110_DRIVER_NAME "rx6110"
+
+struct rx6110_data {
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+};
+
+/**
+ * rx6110_rtc_tm_to_data - convert rtc_time to native time encoding
+ *
+ * @tm: holds date and time
+ * @data: holds the encoding in rx6110 native form
+ */
+static int rx6110_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+{
+ pr_debug("%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ /*
+ * The year in the RTC is a value between 0 and 99.
+ * Assume that this represents the current century
+ * and disregard all other values.
+ */
+ if (tm->tm_year < 100 || tm->tm_year >= 200)
+ return -EINVAL;
+
+ data[RTC_SEC] = bin2bcd(tm->tm_sec);
+ data[RTC_MIN] = bin2bcd(tm->tm_min);
+ data[RTC_HOUR] = bin2bcd(tm->tm_hour);
+ data[RTC_WDAY] = BIT(bin2bcd(tm->tm_wday));
+ data[RTC_MDAY] = bin2bcd(tm->tm_mday);
+ data[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
+ data[RTC_YEAR] = bin2bcd(tm->tm_year % 100);
+
+ return 0;
+}
+
+/**
+ * rx6110_data_to_rtc_tm - convert native time encoding to rtc_time
+ *
+ * @data: holds the encoding in rx6110 native form
+ * @tm: holds date and time
+ */
+static int rx6110_data_to_rtc_tm(u8 *data, struct rtc_time *tm)
+{
+ tm->tm_sec = bcd2bin(data[RTC_SEC] & 0x7f);
+ tm->tm_min = bcd2bin(data[RTC_MIN] & 0x7f);
+ /* only 24-hour clock */
+ tm->tm_hour = bcd2bin(data[RTC_HOUR] & 0x3f);
+ tm->tm_wday = ffs(data[RTC_WDAY] & 0x7f);
+ tm->tm_mday = bcd2bin(data[RTC_MDAY] & 0x3f);
+ tm->tm_mon = bcd2bin(data[RTC_MONTH] & 0x1f) - 1;
+ tm->tm_year = bcd2bin(data[RTC_YEAR]) + 100;
+
+ pr_debug("%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ /*
+ * The year in the RTC is a value between 0 and 99.
+ * Assume that this represents the current century
+ * and disregard all other values.
+ */
+ if (tm->tm_year < 100 || tm->tm_year >= 200)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * rx6110_set_time - set the current time in the rx6110 registers
+ *
+ * @dev: the rtc device in use
+ * @tm: holds date and time
+ *
+ * BUG: The HW assumes every year that is a multiple of 4 to be a leap
+ * year. Next time this is wrong is 2100, which will not be a leap year
+ *
+ * Note: If STOP is not set/cleared, the clock will start when the seconds
+ * register is written
+ *
+ */
+static int rx6110_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rx6110_data *rx6110 = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int ret;
+
+ ret = rx6110_rtc_tm_to_data(tm, data);
+ if (ret < 0)
+ return ret;
+
+ /* set STOP bit before changing clock/calendar */
+ ret = regmap_update_bits(rx6110->regmap, RX6110_REG_CTRL,
+ RX6110_BIT_CTRL_STOP, RX6110_BIT_CTRL_STOP);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(rx6110->regmap, RX6110_REG_SEC, data,
+ RTC_NR_TIME);
+ if (ret)
+ return ret;
+
+ /* The time in the RTC is valid. Be sure to have VLF cleared. */
+ ret = regmap_update_bits(rx6110->regmap, RX6110_REG_FLAG,
+ RX6110_BIT_FLAG_VLF, 0);
+ if (ret)
+ return ret;
+
+ /* clear STOP bit after changing clock/calendar */
+ ret = regmap_update_bits(rx6110->regmap, RX6110_REG_CTRL,
+ RX6110_BIT_CTRL_STOP, 0);
+
+ return ret;
+}
+
+/**
+ * rx6110_get_time - get the current time from the rx6110 registers
+ * @dev: the rtc device in use
+ * @tm: holds date and time
+ */
+static int rx6110_get_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rx6110_data *rx6110 = dev_get_drvdata(dev);
+ u8 data[RTC_NR_TIME];
+ int flags;
+ int ret;
+
+ ret = regmap_read(rx6110->regmap, RX6110_REG_FLAG, &flags);
+ if (ret)
+ return -EINVAL;
+
+ /* check for VLF Flag (set at power-on) */
+ if ((flags & RX6110_BIT_FLAG_VLF)) {
+ dev_warn(dev, "Voltage low, data is invalid.\n");
+ return -EINVAL;
+ }
+
+ /* read registers to date */
+ ret = regmap_bulk_read(rx6110->regmap, RX6110_REG_SEC, data,
+ RTC_NR_TIME);
+ if (ret)
+ return ret;
+
+ ret = rx6110_data_to_rtc_tm(data, tm);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "%s: date %ds %dm %dh %dmd %dm %dy\n", __func__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return rtc_valid_tm(tm);
+}
+
+static const struct reg_sequence rx6110_default_regs[] = {
+ { RX6110_REG_RES1, 0xB8 },
+ { RX6110_REG_RES2, 0x00 },
+ { RX6110_REG_RES3, 0x10 },
+ { RX6110_REG_IRQ, 0x00 },
+ { RX6110_REG_ALMIN, 0x00 },
+ { RX6110_REG_ALHOUR, 0x00 },
+ { RX6110_REG_ALWDAY, 0x00 },
+};
+
+/**
+ * rx6110_init - initialize the rx6110 registers
+ *
+ * @rx6110: pointer to the rx6110 struct in use
+ *
+ */
+static int rx6110_init(struct rx6110_data *rx6110)
+{
+ struct rtc_device *rtc = rx6110->rtc;
+ int flags;
+ int ret;
+
+ ret = regmap_update_bits(rx6110->regmap, RX6110_REG_EXT,
+ RX6110_BIT_EXT_TE, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_register_patch(rx6110->regmap, rx6110_default_regs,
+ ARRAY_SIZE(rx6110_default_regs));
+ if (ret)
+ return ret;
+
+ ret = regmap_read(rx6110->regmap, RX6110_REG_FLAG, &flags);
+ if (ret)
+ return ret;
+
+ /* check for VLF Flag (set at power-on) */
+ if ((flags & RX6110_BIT_FLAG_VLF))
+ dev_warn(&rtc->dev, "Voltage low, data loss detected.\n");
+
+ /* check for Alarm Flag */
+ if (flags & RX6110_BIT_FLAG_AF)
+ dev_warn(&rtc->dev, "An alarm may have been missed.\n");
+
+ /* check for Periodic Timer Flag */
+ if (flags & RX6110_BIT_FLAG_TF)
+ dev_warn(&rtc->dev, "Periodic timer was detected\n");
+
+ /* check for Update Timer Flag */
+ if (flags & RX6110_BIT_FLAG_UF)
+ dev_warn(&rtc->dev, "Update timer was detected\n");
+
+ /* clear all flags BUT VLF */
+ ret = regmap_update_bits(rx6110->regmap, RX6110_REG_FLAG,
+ RX6110_BIT_FLAG_AF |
+ RX6110_BIT_FLAG_UF |
+ RX6110_BIT_FLAG_TF,
+ 0);
+
+ return ret;
+}
+
+static struct rtc_class_ops rx6110_rtc_ops = {
+ .read_time = rx6110_get_time,
+ .set_time = rx6110_set_time,
+};
+
+static struct regmap_config regmap_spi_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RX6110_REG_IRQ,
+ .read_flag_mask = 0x80,
+};
+
+/**
+ * rx6110_probe - initialize rtc driver
+ * @spi: pointer to spi device
+ */
+static int rx6110_probe(struct spi_device *spi)
+{
+ struct rx6110_data *rx6110;
+ int err;
+
+ if ((spi->bits_per_word && spi->bits_per_word != 8) ||
+ (spi->max_speed_hz > 2000000) ||
+ (spi->mode != (SPI_CS_HIGH | SPI_CPOL | SPI_CPHA))) {
+ dev_warn(&spi->dev, "SPI settings: bits_per_word: %d, max_speed_hz: %d, mode: %xh\n",
+ spi->bits_per_word, spi->max_speed_hz, spi->mode);
+ dev_warn(&spi->dev, "driving device in an unsupported mode");
+ }
+
+ rx6110 = devm_kzalloc(&spi->dev, sizeof(*rx6110), GFP_KERNEL);
+ if (!rx6110)
+ return -ENOMEM;
+
+ rx6110->regmap = devm_regmap_init_spi(spi, &regmap_spi_config);
+ if (IS_ERR(rx6110->regmap)) {
+ dev_err(&spi->dev, "regmap init failed for rtc rx6110\n");
+ return PTR_ERR(rx6110->regmap);
+ }
+
+ spi_set_drvdata(spi, rx6110);
+
+ rx6110->rtc = devm_rtc_device_register(&spi->dev,
+ RX6110_DRIVER_NAME,
+ &rx6110_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rx6110->rtc))
+ return PTR_ERR(rx6110->rtc);
+
+ err = rx6110_init(rx6110);
+ if (err)
+ return err;
+
+ rx6110->rtc->max_user_freq = 1;
+
+ return 0;
+}
+
+static int rx6110_remove(struct spi_device *spi)
+{
+ return 0;
+}
+
+static const struct spi_device_id rx6110_id[] = {
+ { "rx6110", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, rx6110_id);
+
+static struct spi_driver rx6110_driver = {
+ .driver = {
+ .name = RX6110_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = rx6110_probe,
+ .remove = rx6110_remove,
+ .id_table = rx6110_id,
+};
+
+module_spi_driver(rx6110_driver);
+
+MODULE_AUTHOR("Val Krutov <val.krutov@erd.epson.com>");
+MODULE_DESCRIPTION("RX-6110 SA RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index bd911bafb809..9f105efbc546 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -7,7 +7,7 @@
* All rights reserved.
*
* Modified by fengjh at rising.com.cn
- * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors>
+ * <lm-sensors@lm-sensors.org>
* 2006.11
*
* Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com>
@@ -65,7 +65,6 @@
static const struct i2c_device_id rx8025_id[] = {
{ "rx8025", 0 },
- { "rv8803", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rx8025_id);
@@ -147,8 +146,10 @@ static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
struct rx8025_data *rx8025 = i2c_get_clientdata(client);
+ struct mutex *lock = &rx8025->rtc->ops_lock;
int status;
+ mutex_lock(lock);
status = rx8025_read_reg(client, RX8025_REG_CTRL2);
if (status < 0)
goto out;
@@ -173,6 +174,8 @@ static irqreturn_t rx8025_handle_irq(int irq, void *dev_id)
}
out:
+ mutex_unlock(lock);
+
return IRQ_HANDLED;
}
@@ -341,7 +344,17 @@ static int rx8025_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (client->irq <= 0)
return -EINVAL;
- /* Hardware alarm precision is 1 minute! */
+ /*
+ * Hardware alarm precision is 1 minute!
+ * round up to nearest minute
+ */
+ if (t->time.tm_sec) {
+ time64_t alarm_time = rtc_tm_to_time64(&t->time);
+
+ alarm_time += 60 - t->time.tm_sec;
+ rtc_time64_to_tm(alarm_time, &t->time);
+ }
+
ald[0] = bin2bcd(t->time.tm_min);
if (rx8025->ctrl1 & RX8025_BIT_CTRL1_1224)
ald[1] = bin2bcd(t->time.tm_hour);
@@ -539,8 +552,9 @@ static int rx8025_probe(struct i2c_client *client,
if (client->irq > 0) {
dev_info(&client->dev, "IRQ %d supplied\n", client->irq);
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- rx8025_handle_irq, 0, "rx8025",
- client);
+ rx8025_handle_irq,
+ IRQF_ONESHOT,
+ "rx8025", client);
if (err) {
dev_err(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
@@ -549,6 +563,9 @@ static int rx8025_probe(struct i2c_client *client,
rx8025->rtc->max_user_freq = 1;
+ /* the rx8025 alarm only supports a minute accuracy */
+ rx8025->rtc->uie_unsupported = 1;
+
err = rx8025_sysfs_register(&client->dev);
return err;
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index ffb860d18701..d01ad7e8078e 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -501,18 +501,27 @@ static int s3c_rtc_probe(struct platform_device *pdev)
info->rtc_clk = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(info->rtc_clk)) {
- dev_err(&pdev->dev, "failed to find rtc clock\n");
- return PTR_ERR(info->rtc_clk);
+ ret = PTR_ERR(info->rtc_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find rtc clock\n");
+ else
+ dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n");
+ return ret;
}
clk_prepare_enable(info->rtc_clk);
if (info->data->needs_src_clk) {
info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
if (IS_ERR(info->rtc_src_clk)) {
- dev_err(&pdev->dev,
- "failed to find rtc source clock\n");
+ ret = PTR_ERR(info->rtc_src_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to find rtc source clock\n");
+ else
+ dev_dbg(&pdev->dev,
+ "probe deferred due to missing rtc src clk\n");
clk_disable_unprepare(info->rtc_clk);
- return PTR_ERR(info->rtc_src_clk);
+ return ret;
}
clk_prepare_enable(info->rtc_src_clk);
}
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index 7407d7394bb4..0477678d968f 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -216,7 +216,7 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
* Read RTC_UDR_CON register and wait till UDR field is cleared.
* This indicates that time/alarm update ended.
*/
-static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
+static int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
{
int ret, retry = UDR_READ_RETRY_CNT;
unsigned int data;
@@ -231,7 +231,7 @@ static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
return ret;
}
-static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
+static int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
struct rtc_wkalrm *alarm)
{
int ret;
@@ -264,7 +264,7 @@ static inline int s5m_check_peding_alarm_interrupt(struct s5m_rtc_info *info,
return 0;
}
-static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
+static int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
@@ -288,7 +288,7 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
return ret;
}
-static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
+static int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 463e286064ab..63b9fb1318c2 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -218,6 +218,34 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(wakealarm);
+static ssize_t
+offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t retval;
+ long offset;
+
+ retval = rtc_read_offset(to_rtc_device(dev), &offset);
+ if (retval == 0)
+ retval = sprintf(buf, "%ld\n", offset);
+
+ return retval;
+}
+
+static ssize_t
+offset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ ssize_t retval;
+ long offset;
+
+ retval = kstrtol(buf, 10, &offset);
+ if (retval == 0)
+ retval = rtc_set_offset(to_rtc_device(dev), offset);
+
+ return (retval < 0) ? retval : n;
+}
+static DEVICE_ATTR_RW(offset);
+
static struct attribute *rtc_attrs[] = {
&dev_attr_name.attr,
&dev_attr_date.attr,
@@ -226,6 +254,7 @@ static struct attribute *rtc_attrs[] = {
&dev_attr_max_user_freq.attr,
&dev_attr_hctosys.attr,
&dev_attr_wakealarm.attr,
+ &dev_attr_offset.attr,
NULL,
};
@@ -249,9 +278,13 @@ static umode_t rtc_attr_is_visible(struct kobject *kobj,
struct rtc_device *rtc = to_rtc_device(dev);
umode_t mode = attr->mode;
- if (attr == &dev_attr_wakealarm.attr)
+ if (attr == &dev_attr_wakealarm.attr) {
if (!rtc_does_wakealarm(rtc))
mode = 0;
+ } else if (attr == &dev_attr_offset.attr) {
+ if (!rtc->ops->set_offset)
+ mode = 0;
+ }
return mode;
}
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 3b6ce80a769c..e404faac6851 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -286,7 +286,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps6586x_rtc_irq,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "request IRQ(%d) failed with ret %d\n",
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index f42aa2b2dcba..5a3d53caa485 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -268,7 +268,7 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
}
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tps65910_rtc_interrupt, IRQF_TRIGGER_LOW | IRQF_EARLY_RESUME,
+ tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
dev_name(&pdev->dev), &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
diff --git a/drivers/rtc/rtc-tps80031.c b/drivers/rtc/rtc-tps80031.c
index 27e254cde715..737f26eb284a 100644
--- a/drivers/rtc/rtc-tps80031.c
+++ b/drivers/rtc/rtc-tps80031.c
@@ -287,7 +287,7 @@ static int tps80031_rtc_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
tps80031_rtc_irq,
- IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ IRQF_ONESHOT,
dev_name(&pdev->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "request IRQ:%d failed, err = %d\n",
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index f64c282275b3..e1b86bb01062 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
}
static const struct rtc_class_ops vr41xx_rtc_ops = {
- .release = vr41xx_rtc_release,
- .ioctl = vr41xx_rtc_ioctl,
- .read_time = vr41xx_rtc_read_time,
- .set_time = vr41xx_rtc_set_time,
- .read_alarm = vr41xx_rtc_read_alarm,
- .set_alarm = vr41xx_rtc_set_alarm,
+ .release = vr41xx_rtc_release,
+ .ioctl = vr41xx_rtc_ioctl,
+ .read_time = vr41xx_rtc_read_time,
+ .set_time = vr41xx_rtc_set_time,
+ .read_alarm = vr41xx_rtc_read_alarm,
+ .set_alarm = vr41xx_rtc_set_alarm,
+ .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
};
static int rtc_probe(struct platform_device *pdev)
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 286782c60da4..1e560188dd13 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -185,14 +185,12 @@ static void _free_lcu(struct alias_lcu *lcu)
*/
int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
unsigned long flags;
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
struct dasd_uid uid;
- private = (struct dasd_eckd_private *) device->private;
-
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(&uid);
@@ -244,14 +242,13 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
*/
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
unsigned long flags;
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
struct dasd_uid uid;
- private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
/* nothing to do if already disconnected */
if (!lcu)
@@ -316,31 +313,21 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
struct dasd_device *pos)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct alias_pav_group *group;
struct dasd_uid uid;
- unsigned long flags;
- private = (struct dasd_eckd_private *) device->private;
-
- /* only lock if not already locked */
- if (device != pos)
- spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
- CDEV_NESTED_SECOND);
+ spin_lock(get_ccwdev_lock(device->cdev));
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
private->uid.base_unit_addr =
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
uid = private->uid;
-
- if (device != pos)
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-
+ spin_unlock(get_ccwdev_lock(device->cdev));
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
list_move(&device->alias_list, &lcu->active_devices);
return 0;
}
-
group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
@@ -370,10 +357,9 @@ static int _add_device_to_lcu(struct alias_lcu *lcu,
static void _remove_device_from_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct alias_pav_group *group;
- private = (struct dasd_eckd_private *) device->private;
list_move(&device->alias_list, &lcu->inactive_devices);
group = private->pavgroup;
if (!group)
@@ -487,13 +473,13 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
- private = (struct dasd_eckd_private *) device->private;
+ private = device->private;
private->pavgroup = NULL;
}
list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
- private = (struct dasd_eckd_private *) device->private;
+ private = device->private;
private->pavgroup = NULL;
}
list_del(&pavgroup->group);
@@ -505,10 +491,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
if (rc)
return rc;
- /* need to take cdev lock before lcu lock */
- spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
- CDEV_NESTED_FIRST);
- spin_lock(&lcu->lock);
+ spin_lock_irqsave(&lcu->lock, flags);
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -527,8 +510,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
- spin_unlock(&lcu->lock);
- spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
+ spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
@@ -608,18 +590,14 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
int dasd_alias_add_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct alias_lcu *lcu;
unsigned long flags;
int rc;
- private = (struct dasd_eckd_private *) device->private;
lcu = private->lcu;
rc = 0;
-
- /* need to take cdev lock before lcu lock */
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- spin_lock(&lcu->lock);
+ spin_lock_irqsave(&lcu->lock, flags);
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
@@ -629,27 +607,24 @@ int dasd_alias_add_device(struct dasd_device *device)
list_move(&device->alias_list, &lcu->active_devices);
_schedule_lcu_update(lcu, device);
}
- spin_unlock(&lcu->lock);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ spin_unlock_irqrestore(&lcu->lock, flags);
return rc;
}
int dasd_alias_update_add_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
- private = (struct dasd_eckd_private *) device->private;
+ struct dasd_eckd_private *private = device->private;
+
private->lcu->flags |= UPDATE_PENDING;
return dasd_alias_add_device(device);
}
int dasd_alias_remove_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
- struct alias_lcu *lcu;
+ struct dasd_eckd_private *private = device->private;
+ struct alias_lcu *lcu = private->lcu;
unsigned long flags;
- private = (struct dasd_eckd_private *) device->private;
- lcu = private->lcu;
/* nothing to do if already removed */
if (!lcu)
return 0;
@@ -661,16 +636,12 @@ int dasd_alias_remove_device(struct dasd_device *device)
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
-
+ struct dasd_eckd_private *alias_priv, *private = base_device->private;
+ struct alias_pav_group *group = private->pavgroup;
+ struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
- struct alias_pav_group *group;
- struct alias_lcu *lcu;
- struct dasd_eckd_private *private, *alias_priv;
unsigned long flags;
- private = (struct dasd_eckd_private *) base_device->private;
- group = private->pavgroup;
- lcu = private->lcu;
if (!group || !lcu)
return NULL;
if (lcu->pav == NO_PAV ||
@@ -706,7 +677,7 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
group->next = list_first_entry(&alias_device->alias_list,
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
- alias_priv = (struct dasd_eckd_private *) alias_device->private;
+ alias_priv = alias_device->private;
if ((alias_priv->count < private->count) && !alias_device->stopped &&
!test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
return alias_device;
@@ -754,30 +725,19 @@ static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
- unsigned long flags;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
- private = (struct dasd_eckd_private *) device->private;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (private->uid.type != UA_BASE_DEVICE) {
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
- flags);
+ private = device->private;
+ if (private->uid.type != UA_BASE_DEVICE)
continue;
- }
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
- private = (struct dasd_eckd_private *) device->private;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (private->uid.type != UA_BASE_DEVICE) {
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
- flags);
+ private = device->private;
+ if (private->uid.type != UA_BASE_DEVICE)
continue;
- }
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
@@ -812,7 +772,7 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(device, temp, &lcu->active_devices,
alias_list) {
- private = (struct dasd_eckd_private *) device->private;
+ private = device->private;
if (private->uid.type == UA_BASE_DEVICE)
continue;
list_move(&device->alias_list, &active);
@@ -834,45 +794,39 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
if (device == list_first_entry(&active,
struct dasd_device, alias_list)) {
list_move(&device->alias_list, &lcu->active_devices);
- private = (struct dasd_eckd_private *) device->private;
+ private = device->private;
private->pavgroup = NULL;
}
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
-static void __stop_device_on_lcu(struct dasd_device *device,
- struct dasd_device *pos)
-{
- /* If pos == device then device is already locked! */
- if (pos == device) {
- dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
- return;
- }
- spin_lock(get_ccwdev_lock(pos->cdev));
- dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
- spin_unlock(get_ccwdev_lock(pos->cdev));
-}
-
-/*
- * This function is called in interrupt context, so the
- * cdev lock for device is already locked!
- */
-static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
- struct dasd_device *device)
+static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
- struct dasd_device *pos;
+ struct dasd_device *device;
- list_for_each_entry(pos, &lcu->active_devices, alias_list)
- __stop_device_on_lcu(device, pos);
- list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
- __stop_device_on_lcu(device, pos);
+ list_for_each_entry(device, &lcu->active_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
- list_for_each_entry(pos, &pavgroup->baselist, alias_list)
- __stop_device_on_lcu(device, pos);
- list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
- __stop_device_on_lcu(device, pos);
+ list_for_each_entry(device, &pavgroup->baselist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
+ list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
+ spin_lock(get_ccwdev_lock(device->cdev));
+ dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
+ spin_unlock(get_ccwdev_lock(device->cdev));
+ }
}
}
@@ -880,32 +834,27 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
- unsigned long flags;
list_for_each_entry(device, &lcu->active_devices, alias_list) {
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ spin_unlock(get_ccwdev_lock(device->cdev));
}
-
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+ spin_unlock(get_ccwdev_lock(device->cdev));
}
-
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
- flags);
+ spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
- flags);
+ spin_unlock(get_ccwdev_lock(device->cdev));
}
}
}
@@ -942,42 +891,22 @@ static void summary_unit_check_handling_work(struct work_struct *work)
spin_unlock_irqrestore(&lcu->lock, flags);
}
-/*
- * note: this will be called from int handler context (cdev locked)
- */
-void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
- struct irb *irb)
+void dasd_alias_handle_summary_unit_check(struct work_struct *work)
{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ suc_work);
+ struct dasd_eckd_private *private = device->private;
struct alias_lcu *lcu;
- char reason;
- struct dasd_eckd_private *private;
- char *sense;
-
- private = (struct dasd_eckd_private *) device->private;
-
- sense = dasd_get_sense(irb);
- if (sense) {
- reason = sense[8];
- DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
- "eckd handle summary unit check: reason", reason);
- } else {
- DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "eckd handle summary unit check:"
- " no reason code available");
- return;
- }
+ unsigned long flags;
lcu = private->lcu;
if (!lcu) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
- return;
+ goto out;
}
- spin_lock(&lcu->lock);
- _stop_all_devices_on_lcu(lcu, device);
- /* prepare for lcu_update */
- private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+ spin_lock_irqsave(&lcu->lock, flags);
/* If this device is about to be removed just return and wait for
* the next interrupt on a different device
*/
@@ -985,21 +914,26 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
- spin_unlock(&lcu->lock);
- return;
+ goto out_unlock;
}
if (lcu->suc_data.device) {
/* already scheduled or running */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
- spin_unlock(&lcu->lock);
- return ;
+ goto out_unlock;
}
- lcu->suc_data.reason = reason;
+ _stop_all_devices_on_lcu(lcu);
+ /* prepare for lcu_update */
+ lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
+ lcu->suc_data.reason = private->suc_reason;
lcu->suc_data.device = device;
dasd_get_device(device);
- spin_unlock(&lcu->lock);
if (!schedule_work(&lcu->suc_data.worker))
dasd_put_device(device);
+out_unlock:
+ spin_unlock_irqrestore(&lcu->lock, flags);
+out:
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ dasd_put_device(device);
};
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 8286f742436b..2f18f61092b5 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -214,8 +214,8 @@ dasd_feature_list(char *str, char **endp)
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
- pr_warning("%*s is not a supported device option\n",
- len, str);
+ pr_warn("%*s is not a supported device option\n",
+ len, str);
rc = -EINVAL;
}
str += len;
@@ -224,8 +224,7 @@ dasd_feature_list(char *str, char **endp)
str++;
}
if (*str != ')') {
- pr_warning("A closing parenthesis ')' is missing in the "
- "dasd= parameter\n");
+ pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
rc = -EINVAL;
} else
str++;
@@ -348,8 +347,7 @@ dasd_parse_range( char *parsestring ) {
return str + 1;
if (*str == '\0')
return str;
- pr_warning("The dasd= parameter value %s has an invalid ending\n",
- str);
+ pr_warn("The dasd= parameter value %s has an invalid ending\n", str);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 277b5c8c825c..5667146c6a0a 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -104,12 +104,10 @@ static inline int
mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
blocknum_t offset, blocknum_t *end_block)
{
- struct dasd_diag_private *private;
- struct dasd_diag_init_io *iib;
+ struct dasd_diag_private *private = device->private;
+ struct dasd_diag_init_io *iib = &private->iib;
int rc;
- private = (struct dasd_diag_private *) device->private;
- iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
@@ -130,12 +128,10 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
static inline int
mdsk_term_io(struct dasd_device * device)
{
- struct dasd_diag_private *private;
- struct dasd_diag_init_io *iib;
+ struct dasd_diag_private *private = device->private;
+ struct dasd_diag_init_io *iib = &private->iib;
int rc;
- private = (struct dasd_diag_private *) device->private;
- iib = &private->iib;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
rc = dia250(iib, TERM_BIO);
@@ -153,14 +149,13 @@ dasd_diag_erp(struct dasd_device *device)
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
- pr_warning("%s: The access mode of a DIAG device "
- "changed to read-only\n",
- dev_name(&device->cdev->dev));
+ pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
+ dev_name(&device->cdev->dev));
rc = 0;
}
if (rc)
- pr_warning("%s: DIAG ERP failed with "
- "rc=%d\n", dev_name(&device->cdev->dev), rc);
+ pr_warn("%s: DIAG ERP failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
}
/* Start a given request at the device. Return zero on success, non-zero
@@ -180,8 +175,8 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
- private = (struct dasd_diag_private *) device->private;
- dreq = (struct dasd_diag_req *) cqr->data;
+ private = device->private;
+ dreq = cqr->data;
private->iob.dev_nr = private->dev_id.devno;
private->iob.key = 0;
@@ -320,18 +315,17 @@ static void dasd_ext_handler(struct ext_code ext_code,
static int
dasd_diag_check_device(struct dasd_device *device)
{
- struct dasd_block *block;
- struct dasd_diag_private *private;
+ struct dasd_diag_private *private = device->private;
struct dasd_diag_characteristics *rdc_data;
- struct dasd_diag_bio bio;
struct vtoc_cms_label *label;
- blocknum_t end_block;
+ struct dasd_block *block;
+ struct dasd_diag_bio bio;
unsigned int sb, bsize;
+ blocknum_t end_block;
int rc;
- private = (struct dasd_diag_private *) device->private;
if (private == NULL) {
- private = kzalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
if (private == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Allocating memory for private DASD data "
@@ -339,7 +333,7 @@ dasd_diag_check_device(struct dasd_device *device)
return -ENOMEM;
}
ccw_device_get_id(device->cdev, &private->dev_id);
- device->private = (void *) private;
+ device->private = private;
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
@@ -353,7 +347,7 @@ dasd_diag_check_device(struct dasd_device *device)
block->base = device;
/* Read Device Characteristics */
- rdc_data = (void *) &(private->rdc_data);
+ rdc_data = &private->rdc_data;
rdc_data->dev_nr = private->dev_id.devno;
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
@@ -377,9 +371,9 @@ dasd_diag_check_device(struct dasd_device *device)
private->pt_block = 2;
break;
default:
- pr_warning("%s: Device type %d is not supported "
- "in DIAG mode\n", dev_name(&device->cdev->dev),
- private->rdc_data.vdev_class);
+ pr_warn("%s: Device type %d is not supported in DIAG mode\n",
+ dev_name(&device->cdev->dev),
+ private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
@@ -420,8 +414,8 @@ dasd_diag_check_device(struct dasd_device *device)
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
- pr_warning("%s: A 64-bit DIAG call failed\n",
- dev_name(&device->cdev->dev));
+ pr_warn("%s: A 64-bit DIAG call failed\n",
+ dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
goto out_label;
}
@@ -430,9 +424,8 @@ dasd_diag_check_device(struct dasd_device *device)
break;
}
if (bsize > PAGE_SIZE) {
- pr_warning("%s: Accessing the DASD failed because of an "
- "incorrect format (rc=%d)\n",
- dev_name(&device->cdev->dev), rc);
+ pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
goto out_label;
}
@@ -450,8 +443,8 @@ dasd_diag_check_device(struct dasd_device *device)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
- pr_warning("%s: DIAG initialization failed with rc=%d\n",
- dev_name(&device->cdev->dev), rc);
+ pr_warn("%s: DIAG initialization failed with rc=%d\n",
+ dev_name(&device->cdev->dev), rc);
rc = -EIO;
} else {
if (rc == 4)
@@ -601,16 +594,14 @@ static int
dasd_diag_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
- struct dasd_diag_private *private;
+ struct dasd_diag_private *private = device->private;
- private = (struct dasd_diag_private *) device->private;
info->label_block = (unsigned int) private->pt_block;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
- info->characteristics_size = sizeof (struct dasd_diag_characteristics);
- memcpy(info->characteristics,
- &((struct dasd_diag_private *) device->private)->rdc_data,
- sizeof (struct dasd_diag_characteristics));
+ info->characteristics_size = sizeof(private->rdc_data);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(private->rdc_data));
info->confdata_size = 0;
return 0;
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 9083247f55a8..c1b4ae55e129 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -212,10 +212,9 @@ check_XRC (struct ccw1 *de_ccw,
struct DE_eckd_data *data,
struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int rc;
- private = (struct dasd_eckd_private *) device->private;
if (!private->rdc_data.facilities.XRC_supported)
return 0;
@@ -237,13 +236,11 @@ static int
define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
unsigned int totrk, int cmd, struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
u32 begcyl, endcyl;
u16 heads, beghead, endhead;
int rc = 0;
- private = (struct dasd_eckd_private *) device->private;
-
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
@@ -322,10 +319,9 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int rc;
- private = (struct dasd_eckd_private *) device->private;
if (!private->rdc_data.facilities.XRC_supported)
return 0;
@@ -346,12 +342,10 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
struct dasd_device *device, unsigned int reclen,
unsigned int tlf)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
- private = (struct dasd_eckd_private *) device->private;
-
memset(data, 0, sizeof(*data));
sector = 0;
if (rec_on_trk) {
@@ -488,8 +482,8 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
u16 heads, beghead, endhead;
int rc = 0;
- basepriv = (struct dasd_eckd_private *) basedev->private;
- startpriv = (struct dasd_eckd_private *) startdev->private;
+ basepriv = basedev->private;
+ startpriv = startdev->private;
dedata = &pfxdata->define_extent;
lredata = &pfxdata->locate_record;
@@ -631,12 +625,10 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
unsigned int rec_on_trk, int no_rec, int cmd,
struct dasd_device * device, int reclen)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
- private = (struct dasd_eckd_private *) device->private;
-
DBF_DEV_EVENT(DBF_INFO, device,
"Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
trk, rec_on_trk, no_rec, cmd, reclen);
@@ -800,10 +792,9 @@ static void create_uid(struct dasd_eckd_private *private)
*/
static int dasd_eckd_generate_uid(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
unsigned long flags;
- private = (struct dasd_eckd_private *) device->private;
if (!private)
return -ENODEV;
if (!private->ned || !private->gneq)
@@ -816,11 +807,10 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
unsigned long flags;
- if (device->private) {
- private = (struct dasd_eckd_private *)device->private;
+ if (private) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
*uid = private->uid;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -1034,10 +1024,9 @@ static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
static void dasd_eckd_clear_conf_data(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int i;
- private = (struct dasd_eckd_private *) device->private;
private->conf_data = NULL;
private->conf_len = 0;
for (i = 0; i < 8; i++) {
@@ -1058,7 +1047,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
struct dasd_uid *uid;
char print_path_uid[60], print_device_uid[60];
- private = (struct dasd_eckd_private *) device->private;
+ private = device->private;
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
@@ -1191,11 +1180,10 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int mdc;
u32 fcx_max_data;
- private = (struct dasd_eckd_private *) device->private;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
if ((mdc < 0)) {
@@ -1221,15 +1209,10 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
static int rebuild_device_uid(struct dasd_device *device,
struct path_verification_work_data *data)
{
- struct dasd_eckd_private *private;
- struct dasd_path *path_data;
- __u8 lpm, opm;
- int rc;
-
- rc = -ENODEV;
- private = (struct dasd_eckd_private *) device->private;
- path_data = &device->path_data;
- opm = device->path_data.opm;
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_path *path_data = &device->path_data;
+ __u8 lpm, opm = path_data->opm;
+ int rc = -ENODEV;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & opm))
@@ -1463,14 +1446,13 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
static int dasd_eckd_read_features(struct dasd_device *device)
{
+ struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_features *features;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
- struct dasd_eckd_private *private;
- private = (struct dasd_eckd_private *) device->private;
memset(&private->features, 0, sizeof(struct dasd_rssd_features));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
@@ -1605,11 +1587,9 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
static int dasd_eckd_validate_server(struct dasd_device *device,
unsigned long flags)
{
- int rc;
- struct dasd_eckd_private *private;
- int enable_pav;
+ struct dasd_eckd_private *private = device->private;
+ int enable_pav, rc;
- private = (struct dasd_eckd_private *) device->private;
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
@@ -1662,14 +1642,13 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
static u32 get_fcx_max_data(struct dasd_device *device)
{
- int tpm, mdc;
+ struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
- struct dasd_eckd_private *private;
+ int tpm, mdc;
if (dasd_nofcx)
return 0;
/* is transport mode supported? */
- private = (struct dasd_eckd_private *) device->private;
fcx_in_css = css_general_characteristics.fcx;
fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
@@ -1694,7 +1673,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
static int
dasd_eckd_check_characteristics(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct dasd_block *block;
struct dasd_uid temp_uid;
int rc, i;
@@ -1703,6 +1682,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
/* setup work queue for validate server*/
INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+ /* setup work queue for summary unit check */
+ INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
@@ -1713,7 +1694,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dev_info(&device->cdev->dev,
"The DASD is not operating in multipath mode\n");
}
- private = (struct dasd_eckd_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
@@ -1722,7 +1702,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
"failed\n");
return -ENOMEM;
}
- device->private = (void *) private;
+ device->private = private;
} else {
memset(private, 0, sizeof(*private));
}
@@ -1837,10 +1817,9 @@ out_err1:
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int i;
- private = (struct dasd_eckd_private *) device->private;
dasd_alias_disconnect_device_from_lcu(device);
private->ned = NULL;
private->sneq = NULL;
@@ -1863,7 +1842,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
static struct dasd_ccw_req *
dasd_eckd_analysis_ccw(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct eckd_count *count_data;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
@@ -1871,8 +1850,6 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
int cplength, datasize;
int i;
- private = (struct dasd_eckd_private *) device->private;
-
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
@@ -1946,11 +1923,9 @@ static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
void *data)
{
- struct dasd_eckd_private *private;
- struct dasd_device *device;
+ struct dasd_device *device = init_cqr->startdev;
+ struct dasd_eckd_private *private = device->private;
- device = init_cqr->startdev;
- private = (struct dasd_eckd_private *) device->private;
private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
dasd_kick_device(device);
@@ -1977,15 +1952,13 @@ static int dasd_eckd_start_analysis(struct dasd_block *block)
static int dasd_eckd_end_analysis(struct dasd_block *block)
{
- struct dasd_device *device;
- struct dasd_eckd_private *private;
+ struct dasd_device *device = block->base;
+ struct dasd_eckd_private *private = device->private;
struct eckd_count *count_area;
unsigned int sb, blk_per_trk;
int status, i;
struct dasd_ccw_req *init_cqr;
- device = block->base;
- private = (struct dasd_eckd_private *) device->private;
status = private->init_cqr_status;
private->init_cqr_status = -1;
if (status == INIT_CQR_ERROR) {
@@ -2083,9 +2056,8 @@ raw:
static int dasd_eckd_do_analysis(struct dasd_block *block)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = block->base->private;
- private = (struct dasd_eckd_private *) block->base->private;
if (private->init_cqr_status < 0)
return dasd_eckd_start_analysis(block);
else
@@ -2112,9 +2084,8 @@ static int dasd_eckd_basic_to_known(struct dasd_device *device)
static int
dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = block->base->private;
- private = (struct dasd_eckd_private *) block->base->private;
if (dasd_check_blocksize(block->bp_block) == 0) {
geo->sectors = recs_per_track(&private->rdc_data,
0, block->bp_block);
@@ -2151,8 +2122,8 @@ dasd_eckd_build_format(struct dasd_device *base,
if (!startdev)
startdev = base;
- start_priv = (struct dasd_eckd_private *) startdev->private;
- base_priv = (struct dasd_eckd_private *) base->private;
+ start_priv = startdev->private;
+ base_priv = base->private;
rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
@@ -2349,14 +2320,14 @@ dasd_eckd_build_format(struct dasd_device *base,
* when formatting CDL
*/
if ((intensity & 0x08) &&
- fdata->start_unit == 0) {
+ address.cyl == 0 && address.head == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
if ((intensity & 0x08) &&
- fdata->start_unit == 1) {
+ address.cyl == 0 && address.head == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
}
@@ -2386,23 +2357,24 @@ dasd_eckd_build_format(struct dasd_device *base,
return fcp;
}
-static int
-dasd_eckd_format_device(struct dasd_device *base,
- struct format_data_t *fdata,
- int enable_pav)
+/*
+ * Wrapper function to build a CCW request depending on input data
+ */
+static struct dasd_ccw_req *
+dasd_eckd_format_build_ccw_req(struct dasd_device *base,
+ struct format_data_t *fdata, int enable_pav)
{
- struct dasd_ccw_req *cqr, *n;
- struct dasd_block *block;
- struct dasd_eckd_private *private;
- struct list_head format_queue;
- struct dasd_device *device;
- int old_stop, format_step;
- int step, rc = 0, sleep_rc;
+ return dasd_eckd_build_format(base, fdata, enable_pav);
+}
- block = base->block;
- private = (struct dasd_eckd_private *) base->private;
+/*
+ * Sanity checks on format_data
+ */
+static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
+ struct format_data_t *fdata)
+{
+ struct dasd_eckd_private *private = base->private;
- /* Sanity checks. */
if (fdata->start_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
@@ -2429,75 +2401,98 @@ dasd_eckd_format_device(struct dasd_device *base,
fdata->blksize);
return -EINVAL;
}
+ return 0;
+}
+
+/*
+ * This function will process format_data originally coming from an IOCTL
+ */
+static int dasd_eckd_format_process_data(struct dasd_device *base,
+ struct format_data_t *fdata,
+ int enable_pav)
+{
+ struct dasd_eckd_private *private = base->private;
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head format_queue;
+ struct dasd_device *device;
+ int old_start, old_stop, format_step;
+ int step, retry;
+ int rc;
+
+ rc = dasd_eckd_format_sanity_checks(base, fdata);
+ if (rc)
+ return rc;
INIT_LIST_HEAD(&format_queue);
+ old_start = fdata->start_unit;
old_stop = fdata->stop_unit;
- while (fdata->start_unit <= 1) {
- fdata->stop_unit = fdata->start_unit;
- cqr = dasd_eckd_build_format(base, fdata, enable_pav);
- list_add(&cqr->blocklist, &format_queue);
- fdata->stop_unit = old_stop;
- fdata->start_unit++;
-
- if (fdata->start_unit > fdata->stop_unit)
- goto sleep;
- }
+ format_step = DASD_CQR_MAX_CCW / recs_per_track(&private->rdc_data, 0,
+ fdata->blksize);
+ do {
+ retry = 0;
+ while (fdata->start_unit <= old_stop) {
+ step = fdata->stop_unit - fdata->start_unit + 1;
+ if (step > format_step) {
+ fdata->stop_unit =
+ fdata->start_unit + format_step - 1;
+ }
-retry:
- format_step = 255 / recs_per_track(&private->rdc_data, 0,
- fdata->blksize);
- while (fdata->start_unit <= old_stop) {
- step = fdata->stop_unit - fdata->start_unit + 1;
- if (step > format_step)
- fdata->stop_unit = fdata->start_unit + format_step - 1;
+ cqr = dasd_eckd_format_build_ccw_req(base, fdata,
+ enable_pav);
+ if (IS_ERR(cqr)) {
+ rc = PTR_ERR(cqr);
+ if (rc == -ENOMEM) {
+ if (list_empty(&format_queue))
+ goto out;
+ /*
+ * not enough memory available, start
+ * requests retry after first requests
+ * were finished
+ */
+ retry = 1;
+ break;
+ }
+ goto out_err;
+ }
+ list_add_tail(&cqr->blocklist, &format_queue);
- cqr = dasd_eckd_build_format(base, fdata, enable_pav);
- if (IS_ERR(cqr)) {
- if (PTR_ERR(cqr) == -ENOMEM) {
- /*
- * not enough memory available
- * go to out and start requests
- * retry after first requests were finished
- */
- fdata->stop_unit = old_stop;
- goto sleep;
- } else
- return PTR_ERR(cqr);
+ fdata->start_unit = fdata->stop_unit + 1;
+ fdata->stop_unit = old_stop;
}
- list_add(&cqr->blocklist, &format_queue);
- fdata->start_unit = fdata->stop_unit + 1;
- fdata->stop_unit = old_stop;
- }
+ rc = dasd_sleep_on_queue(&format_queue);
-sleep:
- sleep_rc = dasd_sleep_on_queue(&format_queue);
+out_err:
+ list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
+ device = cqr->startdev;
+ private = device->private;
+ if (cqr->status == DASD_CQR_FAILED)
+ rc = -EIO;
+ list_del_init(&cqr->blocklist);
+ dasd_sfree_request(cqr, device);
+ private->count--;
+ }
- list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
- device = cqr->startdev;
- private = (struct dasd_eckd_private *) device->private;
- if (cqr->status == DASD_CQR_FAILED)
- rc = -EIO;
- list_del_init(&cqr->blocklist);
- dasd_sfree_request(cqr, device);
- private->count--;
- }
+ if (rc)
+ goto out;
- if (sleep_rc)
- return sleep_rc;
+ } while (retry);
- /*
- * in case of ENOMEM we need to retry after
- * first requests are finished
- */
- if (fdata->start_unit <= fdata->stop_unit)
- goto retry;
+out:
+ fdata->start_unit = old_start;
+ fdata->stop_unit = old_stop;
return rc;
}
+static int dasd_eckd_format_device(struct dasd_device *base,
+ struct format_data_t *fdata, int enable_pav)
+{
+ return dasd_eckd_format_process_data(base, fdata, enable_pav);
+}
+
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0) {
@@ -2543,9 +2538,8 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
{
char mask;
char *sense = NULL;
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
- private = (struct dasd_eckd_private *) device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
@@ -2557,14 +2551,6 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
device->state == DASD_STATE_ONLINE &&
!test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
- /*
- * the state change could be caused by an alias
- * reassignment remove device from alias handling
- * to prevent new requests from being scheduled on
- * the wrong alias device
- */
- dasd_alias_remove_device(device);
-
/* schedule worker to reload device */
dasd_reload_device(device);
}
@@ -2579,7 +2565,27 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
/* summary unit check */
if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
- dasd_alias_handle_summary_unit_check(device, irb);
+ if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: device already notified");
+ return;
+ }
+ sense = dasd_get_sense(irb);
+ if (!sense) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "eckd suc: no reason code available");
+ clear_bit(DASD_FLAG_SUC, &device->flags);
+ return;
+
+ }
+ private->suc_reason = sense[8];
+ DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
+ "eckd handle summary unit check: reason",
+ private->suc_reason);
+ dasd_get_device(device);
+ if (!schedule_work(&device->suc_work))
+ dasd_put_device(device);
+
return;
}
@@ -2634,7 +2640,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *basedev;
basedev = block->base;
- private = (struct dasd_eckd_private *) basedev->private;
+ private = basedev->private;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_MT;
else if (rq_data_dir(req) == WRITE)
@@ -2990,8 +2996,8 @@ static int prepare_itcw(struct itcw *itcw,
/* setup prefix data */
- basepriv = (struct dasd_eckd_private *) basedev->private;
- startpriv = (struct dasd_eckd_private *) startdev->private;
+ basepriv = basedev->private;
+ startpriv = startdev->private;
dedata = &pfxdata.define_extent;
lredata = &pfxdata.locate_record;
@@ -3278,7 +3284,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
struct dasd_ccw_req *cqr;
basedev = block->base;
- private = (struct dasd_eckd_private *) basedev->private;
+ private = basedev->private;
/* Calculate number of blocks/records per track. */
blksize = block->bp_block;
@@ -3503,7 +3509,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (!dasd_page_cache)
goto out;
- private = (struct dasd_eckd_private *) cqr->block->base->private;
+ private = cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
@@ -3587,7 +3593,7 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
- private = (struct dasd_eckd_private *) startdev->private;
+ private = startdev->private;
if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
return ERR_PTR(-EBUSY);
@@ -3610,7 +3616,7 @@ static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
- private = (struct dasd_eckd_private *) cqr->memdev->private;
+ private = cqr->memdev->private;
private->count--;
spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
return dasd_eckd_free_cp(cqr, req);
@@ -3620,15 +3626,14 @@ static int
dasd_eckd_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
- private = (struct dasd_eckd_private *) device->private;
info->label_block = 2;
info->FBA_layout = private->uses_cdl ? 0 : 1;
info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
- info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
+ info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
- sizeof(struct dasd_eckd_characteristics));
+ sizeof(private->rdc_data));
info->confdata_size = min((unsigned long)private->conf_len,
sizeof(info->configuration_data));
memcpy(info->configuration_data, private->conf_data,
@@ -3941,8 +3946,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
static int
dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
{
- struct dasd_eckd_private *private =
- (struct dasd_eckd_private *)device->private;
+ struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib = private->attrib;
int rc;
@@ -3966,8 +3970,7 @@ dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
static int
dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
{
- struct dasd_eckd_private *private =
- (struct dasd_eckd_private *)device->private;
+ struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib;
if (!capable(CAP_SYS_ADMIN))
@@ -4430,15 +4433,13 @@ static int dasd_eckd_pm_freeze(struct dasd_device *device)
static int dasd_eckd_restore_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct dasd_eckd_characteristics temp_rdc_data;
int rc;
struct dasd_uid temp_uid;
unsigned long flags;
unsigned long cqr_flags = 0;
- private = (struct dasd_eckd_private *) device->private;
-
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc) {
@@ -4502,13 +4503,17 @@ out_err:
static int dasd_eckd_reload_device(struct dasd_device *device)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
int rc, old_base;
char print_uid[60];
struct dasd_uid uid;
unsigned long flags;
- private = (struct dasd_eckd_private *) device->private;
+ /*
+ * remove device from alias handling to prevent new requests
+ * from being scheduled on the wrong alias device
+ */
+ dasd_alias_remove_device(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
old_base = private->uid.base_unit_addr;
@@ -4556,12 +4561,10 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
{
struct dasd_rssd_messages *message_buf;
struct dasd_psf_prssd_data *prssdp;
- struct dasd_eckd_private *private;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
- private = (struct dasd_eckd_private *) device->private;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)),
@@ -4686,11 +4689,10 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
__u8 lpum,
struct dasd_cuir_message *cuir)
{
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *conf_data;
int path, pos;
- private = (struct dasd_eckd_private *) device->private;
if (cuir->record_selector == 0)
goto out;
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
@@ -4715,9 +4717,9 @@ out:
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
+ struct dasd_eckd_private *private = device->private;
struct dasd_conf_data *ref_conf_data;
unsigned long bitmask = 0, mask = 0;
- struct dasd_eckd_private *private;
struct dasd_conf_data *conf_data;
unsigned int pos, path;
char *ref_gneq, *gneq;
@@ -4730,7 +4732,6 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
!(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
return lpum;
- private = (struct dasd_eckd_private *) device->private;
/* get reference conf data */
ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
/* reference ned is determined by ned_map field */
@@ -4829,14 +4830,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
+ struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
- struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
unsigned long paths = 0;
unsigned long flags;
int tbcpm;
- private = (struct dasd_eckd_private *) device->private;
/* active devices */
list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
@@ -4892,13 +4892,12 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
struct subchannel_id sch_id,
struct dasd_cuir_message *cuir)
{
+ struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
- struct dasd_eckd_private *private;
struct dasd_device *dev, *n;
unsigned long paths = 0;
int tbcpm;
- private = (struct dasd_eckd_private *) device->private;
/*
* the path may have been added through a generic path event before
* only trigger path verification if the path is not already in use
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index f8f91ee652d3..6d9a6d3517cd 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -525,6 +525,7 @@ struct dasd_eckd_private {
int count;
u32 fcx_max_data;
+ char suc_reason;
};
@@ -534,7 +535,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
int dasd_alias_add_device(struct dasd_device *);
int dasd_alias_remove_device(struct dasd_device *);
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
-void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
+void dasd_alias_handle_summary_unit_check(struct work_struct *);
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
void dasd_alias_lcu_setup_complete(struct dasd_device *);
void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c9262e78938b..d7b5b550364b 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -125,13 +125,11 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
static int
dasd_fba_check_characteristics(struct dasd_device *device)
{
- struct dasd_block *block;
- struct dasd_fba_private *private;
+ struct dasd_fba_private *private = device->private;
struct ccw_device *cdev = device->cdev;
- int rc;
- int readonly;
+ struct dasd_block *block;
+ int readonly, rc;
- private = (struct dasd_fba_private *) device->private;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
@@ -140,7 +138,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
"data failed\n");
return -ENOMEM;
}
- device->private = (void *) private;
+ device->private = private;
} else {
memset(private, 0, sizeof(*private));
}
@@ -192,10 +190,9 @@ dasd_fba_check_characteristics(struct dasd_device *device)
static int dasd_fba_do_analysis(struct dasd_block *block)
{
- struct dasd_fba_private *private;
+ struct dasd_fba_private *private = block->base->private;
int sb, rc;
- private = (struct dasd_fba_private *) block->base->private;
rc = dasd_check_blocksize(private->rdc_data.blk_size);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
@@ -254,7 +251,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
struct dasd_block *block,
struct request *req)
{
- struct dasd_fba_private *private;
+ struct dasd_fba_private *private = block->base->private;
unsigned long *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
@@ -267,7 +264,6 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
unsigned int blksize, off;
unsigned char cmd;
- private = (struct dasd_fba_private *) block->base->private;
if (rq_data_dir(req) == READ) {
cmd = DASD_FBA_CCW_READ;
} else if (rq_data_dir(req) == WRITE) {
@@ -379,7 +375,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
static int
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
- struct dasd_fba_private *private;
+ struct dasd_fba_private *private = cqr->block->base->private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
@@ -389,7 +385,6 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
if (!dasd_page_cache)
goto out;
- private = (struct dasd_fba_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
@@ -436,13 +431,14 @@ static int
dasd_fba_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
+ struct dasd_fba_private *private = device->private;
+
info->label_block = 1;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
- info->characteristics_size = sizeof(struct dasd_fba_characteristics);
- memcpy(info->characteristics,
- &((struct dasd_fba_private *) device->private)->rdc_data,
- sizeof (struct dasd_fba_characteristics));
+ info->characteristics_size = sizeof(private->rdc_data);
+ memcpy(info->characteristics, &private->rdc_data,
+ sizeof(private->rdc_data));
info->confdata_size = 0;
return 0;
}
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index ef1d9fb06cab..31d544a87ba9 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -178,8 +178,8 @@ int dasd_gendisk_init(void)
/* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) {
- pr_warning("Registering the device driver with major number "
- "%d failed\n", DASD_MAJOR);
+ pr_warn("Registering the device driver with major number %d failed\n",
+ DASD_MAJOR);
return rc;
}
return 0;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 4aed5ed70836..0f0add932e7a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -241,6 +241,13 @@ struct dasd_ccw_req {
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
/*
+ * A single CQR can only contain a maximum of 255 CCWs. It is limited by
+ * the locate record and locate record extended count value which can only hold
+ * 1 Byte max.
+ */
+#define DASD_CQR_MAX_CCW 255
+
+/*
* Unique identifier for dasd device.
*/
#define UA_NOT_CONFIGURED 0x00
@@ -438,7 +445,7 @@ struct dasd_device {
/* Device discipline stuff. */
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
- char *private;
+ void *private;
struct dasd_path path_data;
/* Device state and target state. */
@@ -463,6 +470,7 @@ struct dasd_device {
struct work_struct restore_device;
struct work_struct reload_device;
struct work_struct kick_validate;
+ struct work_struct suc_work;
struct timer_list timer;
debug_info_t *debug_area;
@@ -535,6 +543,7 @@ struct dasd_attention_data {
#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
#define DASD_FLAG_ABORTALL 12 /* Abort all noretry requests */
#define DASD_FLAG_PATH_VERIFY 13 /* Path verification worker running */
+#define DASD_FLAG_SUC 14 /* unhandled summary unit check */
#define DASD_SLEEPON_START_TAG ((void *) 1)
#define DASD_SLEEPON_END_TAG ((void *) 2)
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 02837d0ad942..90f30cc31561 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -203,9 +203,7 @@ static int
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_device *base;
- int enable_pav = 1;
- int rc, retries;
- int start, stop;
+ int rc;
base = block->base;
if (base->discipline->format_device == NULL)
@@ -233,30 +231,11 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
bdput(bdev);
}
- retries = 255;
- /* backup start- and endtrack for retries */
- start = fdata->start_unit;
- stop = fdata->stop_unit;
- do {
- rc = base->discipline->format_device(base, fdata, enable_pav);
- if (rc) {
- if (rc == -EAGAIN) {
- retries--;
- /* disable PAV in case of errors */
- enable_pav = 0;
- fdata->start_unit = start;
- fdata->stop_unit = stop;
- } else
- return rc;
- } else
- /* success */
- break;
- } while (retries);
-
- if (!retries)
- return -EIO;
- else
- return 0;
+ rc = base->discipline->format_device(base, fdata, 1);
+ if (rc == -EAGAIN)
+ rc = base->discipline->format_device(base, fdata, 0);
+
+ return rc;
}
/*
@@ -286,9 +265,8 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
return -EFAULT;
}
if (bdev != bdev->bd_contains) {
- pr_warning("%s: The specified DASD is a partition and cannot "
- "be formatted\n",
- dev_name(&base->cdev->dev));
+ pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
+ dev_name(&base->cdev->dev));
dasd_put_device(base);
return -EINVAL;
}
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index aa7bb2d1da81..bad7a196bf84 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -322,13 +322,12 @@ static ssize_t dasd_stats_proc_write(struct file *file,
return user_len;
out_parse_error:
rc = -EINVAL;
- pr_warning("%s is not a supported value for /proc/dasd/statistics\n",
- str);
+ pr_warn("%s is not a supported value for /proc/dasd/statistics\n", str);
out_error:
vfree(buffer);
return rc;
#else
- pr_warning("/proc/dasd/statistics: is not activated in this kernel\n");
+ pr_warn("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
#endif /* CONFIG_DASD_PROFILE */
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ce7b70181740..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -738,15 +738,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
up_write(&dcssblk_devices_sem);
- pr_warning("Device %s cannot be removed because it is not a "
- "known device\n", local_buf);
+ pr_warn("Device %s cannot be removed because it is not a known device\n",
+ local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem);
- pr_warning("Device %s cannot be removed while it is in "
- "use\n", local_buf);
+ pr_warn("Device %s cannot be removed while it is in use\n",
+ local_buf);
rc = -EBUSY;
goto out_buf;
}
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
blk_cleanup_queue(dev_info->dcssblk_queue);
dev_info->gd->queue = NULL;
put_disk(dev_info->gd);
- device_unregister(&dev_info->dev);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
- put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
+ device_unregister(&dev_info->dev);
+ put_device(&dev_info->dev);
+
rc = count;
out_buf:
kfree(local_buf);
@@ -850,9 +851,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
case SEG_TYPE_SC:
/* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) {
- pr_warning("Writing to %s failed because it "
- "is a read-only device\n",
- dev_name(&dev_info->dev));
+ pr_warn("Writing to %s failed because it is a read-only device\n",
+ dev_name(&dev_info->dev));
goto fail;
}
}
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
if (req->cmd_type != REQ_TYPE_FS) {
blk_start_request(req);
blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- blk_end_request_all(req, -EIO);
+ __blk_end_request_all(req, -EIO);
continue;
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 7d82bbcb12df..e7e078b3c7e6 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -643,7 +643,6 @@ static void raw3215_shutdown(struct raw3215_info *raw)
if ((raw->flags & RAW3215_WORKING) ||
raw->queued_write != NULL ||
raw->queued_read != NULL) {
- raw->port.flags |= ASYNC_CLOSING;
add_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -651,7 +650,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
- raw->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING);
+ raw->port.flags &= ~ASYNC_INITIALIZED;
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index fc94bfdceb95..ebdeaa53182d 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -257,7 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path,
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
- pr_warning("The read queue for monitor data is full\n");
+ pr_warn("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
@@ -342,8 +342,8 @@ static int mon_close(struct inode *inode, struct file *filp)
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
- pr_warning("Disconnecting the z/VM *MONITOR system "
- "service failed with rc=%i\n", rc);
+ pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+ rc);
iucv_path_free(monpriv->path);
}
@@ -469,8 +469,8 @@ static int monreader_freeze(struct device *dev)
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
- pr_warning("Disconnecting the z/VM *MONITOR system "
- "service failed with rc=%i\n", rc);
+ pr_warn("Disconnecting the z/VM *MONITOR system service failed with rc=%i\n",
+ rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 806239c2cf2f..d3947ea3e351 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -67,8 +67,8 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
- pr_warning("sync request failed (cmd=0x%08x, "
- "status=0x%02x)\n", cmd, request->status);
+ pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
+ cmd, request->status);
rc = -EIO;
}
out:
@@ -122,8 +122,8 @@ int sclp_get_core_info(struct sclp_core_info *info)
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
- pr_warning("readcpuinfo failed (response=0x%04x)\n",
- sccb->header.response_code);
+ pr_warn("readcpuinfo failed (response=0x%04x)\n",
+ sccb->header.response_code);
rc = -EIO;
goto out;
}
@@ -160,9 +160,8 @@ static int do_core_configure(sclp_cmdw_t cmd)
case 0x0120:
break;
default:
- pr_warning("configure cpu failed (cmd=0x%08x, "
- "response=0x%04x)\n", cmd,
- sccb->header.response_code);
+ pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n",
+ cmd, sccb->header.response_code);
rc = -EIO;
break;
}
@@ -230,9 +229,8 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
case 0x0120:
break;
default:
- pr_warning("assign storage failed (cmd=0x%08x, "
- "response=0x%04x, rn=0x%04x)\n", cmd,
- sccb->header.response_code, rn);
+ pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+ cmd, sccb->header.response_code, rn);
rc = -EIO;
break;
}
@@ -675,9 +673,8 @@ static int do_chp_configure(sclp_cmdw_t cmd)
case 0x0450:
break;
default:
- pr_warning("configure channel-path failed "
- "(cmd=0x%08x, response=0x%04x)\n", cmd,
- sccb->header.response_code);
+ pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n",
+ cmd, sccb->header.response_code);
rc = -EIO;
break;
}
@@ -744,8 +741,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
- pr_warning("read channel-path info failed "
- "(response=0x%04x)\n", sccb->header.response_code);
+ pr_warn("read channel-path info failed (response=0x%04x)\n",
+ sccb->header.response_code);
rc = -EIO;
goto out;
}
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 2acea809e2ac..f344e5bd2d9f 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -154,16 +154,14 @@ static int cpi_req(void)
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
- pr_warning("request failed (status=0x%02x)\n",
- req->status);
+ pr_warn("request failed (status=0x%02x)\n", req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
- pr_warning("request failed with response code 0x%x\n",
- response);
+ pr_warn("request failed with response code 0x%x\n", response);
rc = -EIO;
}
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f3b5123faf08..3c379da2eef8 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -699,8 +699,8 @@ tape_generic_remove(struct ccw_device *cdev)
*/
DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
device->cdev_id);
- pr_warning("%s: A tape unit was detached while in "
- "use\n", dev_name(&device->cdev->dev));
+ pr_warn("%s: A tape unit was detached while in use\n",
+ dev_name(&device->cdev->dev));
tape_state_set(device, TS_NOT_OPER);
__tape_discard_requests(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 799c1524c779..e883063c7258 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -343,8 +343,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
- pr_warning("vmlogrdr: failed to start "
- "recording automatically\n");
+ pr_warn("vmlogrdr: failed to start recording automatically\n");
}
/* create connection to the system service */
@@ -396,8 +395,7 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp)
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
- pr_warning("vmlogrdr: failed to stop "
- "recording automatically\n");
+ pr_warn("vmlogrdr: failed to stop recording automatically\n");
}
logptr->dev_in_use = 0;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 20314aad7ab7..9082476b51db 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -51,9 +51,8 @@ static int blacklist_range(range_action action, unsigned int from_ssid,
{
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger)
- pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
- "range for cio_ignore\n", from_ssid, from,
- to_ssid, to);
+ pr_warn("0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n",
+ from_ssid, from, to_ssid, to);
return 1;
}
@@ -140,8 +139,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
rc = 0;
out:
if (rc && msgtrigger)
- pr_warning("%s is not a valid device for the cio_ignore "
- "kernel parameter\n", str);
+ pr_warn("%s is not a valid device for the cio_ignore kernel parameter\n",
+ str);
return rc;
}
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 79f59915f71b..2782100b2c07 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -333,13 +333,12 @@ void ccw_request_timeout(struct ccw_device *cdev)
for (chp = 0; chp < 8; chp++) {
if ((0x80 >> chp) & sch->schib.pmcw.lpum)
- pr_warning("%s: No interrupt was received within %lus "
- "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
- dev_name(&cdev->dev), req->timeout / HZ,
- scsw_cstat(&sch->schib.scsw),
- scsw_dstat(&sch->schib.scsw),
- sch->schid.cssid,
- sch->schib.pmcw.chpid[chp]);
+ pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
+ dev_name(&cdev->dev), req->timeout / HZ,
+ scsw_cstat(&sch->schib.scsw),
+ scsw_dstat(&sch->schib.scsw),
+ sch->schid.cssid,
+ sch->schib.pmcw.chpid[chp]);
}
if (!ccwreq_next_path(cdev)) {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 39a8ae54e9c1..de6fccc13124 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -656,7 +656,7 @@ struct subchannel *cio_probe_console(void)
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
- pr_warning("No CCW console was found\n");
+ pr_warn("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
init_subchannel_id(&schid);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6aae68412802..7ada078ffdd0 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -364,11 +364,11 @@ int ccw_device_set_offline(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_DISCONNECTED));
/* Inform the user if set offline failed. */
if (cdev->private->state == DEV_STATE_BOXED) {
- pr_warning("%s: The device entered boxed state while "
- "being set offline\n", dev_name(&cdev->dev));
+ pr_warn("%s: The device entered boxed state while being set offline\n",
+ dev_name(&cdev->dev));
} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
- pr_warning("%s: The device stopped operating while "
- "being set offline\n", dev_name(&cdev->dev));
+ pr_warn("%s: The device stopped operating while being set offline\n",
+ dev_name(&cdev->dev));
}
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
@@ -429,13 +429,11 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_unlock_irq(cdev->ccwlock);
/* Inform the user that set online failed. */
if (cdev->private->state == DEV_STATE_BOXED) {
- pr_warning("%s: Setting the device online failed "
- "because it is boxed\n",
- dev_name(&cdev->dev));
+ pr_warn("%s: Setting the device online failed because it is boxed\n",
+ dev_name(&cdev->dev));
} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
- pr_warning("%s: Setting the device online failed "
- "because it is not operational\n",
- dev_name(&cdev->dev));
+ pr_warn("%s: Setting the device online failed because it is not operational\n",
+ dev_name(&cdev->dev));
}
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
@@ -619,9 +617,8 @@ initiate_logging(struct device *dev, struct device_attribute *attr,
rc = chsc_siosl(sch->schid);
if (rc < 0) {
- pr_warning("Logging for subchannel 0.%x.%04x failed with "
- "errno=%d\n",
- sch->schid.ssid, sch->schid.sch_no, rc);
+ pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, rc);
return rc;
}
pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2f5b518b0e78..251db0a02e73 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1761,8 +1761,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_schedule_recovery(card);
break;
case LCS_CMD_STOPLAN:
- pr_warning("Stoplan for %s initiated by LGW.\n",
- card->dev->name);
+ pr_warn("Stoplan for %s initiated by LGW\n",
+ card->dev->name);
if (card->dev)
netif_carrier_off(card->dev);
break;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7c8c68c26540..ac544330daeb 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3624,7 +3624,7 @@ static int qeth_l3_register_notifiers(void)
return rc;
}
#else
- pr_warning("There is no IPv6 support for the layer 3 discipline\n");
+ pr_warn("There is no IPv6 support for the layer 3 discipline\n");
#endif
return 0;
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index bf2d1300a957..8688ad4c825f 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -342,13 +342,14 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
ccw->count = sizeof(*thinint_area);
ccw->cda = (__u32)(unsigned long) thinint_area;
} else {
+ /* payload is the address of the indicators */
indicatorp = kmalloc(sizeof(&vcdev->indicators),
GFP_DMA | GFP_KERNEL);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(vcdev->indicators);
+ ccw->count = sizeof(&vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
}
/* Deregister indicators from host. */
@@ -656,7 +657,10 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
}
ret = -ENOMEM;
- /* We need a data area under 2G to communicate. */
+ /*
+ * We need a data area under 2G to communicate. Our payload is
+ * the address of the indicators.
+ */
indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
if (!indicatorp)
goto out;
@@ -672,7 +676,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vcdev->indicators = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(vcdev->indicators);
+ ccw->count = sizeof(&vcdev->indicators);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
@@ -683,7 +687,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
vcdev->indicators2 = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(vcdev->indicators2);
+ ccw->count = sizeof(&vcdev->indicators2);
ccw->cda = (__u32)(unsigned long) indicatorp;
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
@@ -945,8 +949,7 @@ static struct virtio_config_ops virtio_ccw_config_ops = {
static void virtio_ccw_release_dev(struct device *_d)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device,
- dev);
+ struct virtio_device *dev = dev_to_virtio(_d);
struct virtio_ccw_device *vcdev = to_vc_device(dev);
kfree(vcdev->status);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e2f31c93717d..e80768f8e579 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -596,6 +596,7 @@ config XEN_SCSI_FRONTEND
config HYPERV_STORAGE
tristate "Microsoft Hyper-V virtual storage driver"
depends on SCSI && HYPERV
+ depends on m || SCSI_FC_ATTRS != m
default HYPERV
help
Select this option to enable the Hyper-V virtual storage driver.
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index d72867257346..3eff2a69fe08 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -760,7 +760,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -793,7 +793,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -815,15 +815,17 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
do {
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -840,8 +842,7 @@ static void NCR5380_main(struct work_struct *work)
* entire unit.
*/
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
@@ -1056,6 +1057,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1759,9 +1765,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1798,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1847,20 +1852,23 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else
cmd->SCp.this_residual -= transfersize - len;
} else
#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2292,14 +2300,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2322,6 +2333,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2336,58 +2348,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2395,9 +2370,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2450,7 +2430,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2458,6 +2447,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2465,6 +2455,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2472,12 +2463,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
#ifdef REAL_DMA
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index e4c243748a97..7dfd0fa27255 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -323,7 +323,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
return 0;
}
scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
@@ -331,7 +330,6 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
if (unlikely(!device || !scsi_device_online(device))) {
dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
return 0;
}
return 1;
@@ -541,7 +539,6 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -557,7 +554,8 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
@@ -586,7 +584,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
- aac_fib_free(cmd_fibcontext);
return -1;
}
@@ -1024,7 +1021,6 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -1040,7 +1036,8 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
- if (!(cmd_fibcontext = aac_fib_alloc(dev)))
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -ENOMEM;
aac_fib_init(cmd_fibcontext);
@@ -1068,7 +1065,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
aac_fib_complete(cmd_fibcontext);
- aac_fib_free(cmd_fibcontext);
return -1;
}
@@ -1869,7 +1865,6 @@ static void io_callback(void *context, struct fib * fibptr)
break;
}
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -1954,7 +1949,8 @@ static int aac_read(struct scsi_cmnd * scsicmd)
/*
* Alocate and initialize a Fib
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext) {
printk(KERN_WARNING "aac_read: fib allocation failed\n");
return -1;
}
@@ -2051,7 +2047,8 @@ static int aac_write(struct scsi_cmnd * scsicmd)
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext) {
/* FIB temporarily unavailable,not catastrophic failure */
/* scsicmd->result = DID_ERROR << 16;
@@ -2285,7 +2282,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
/*
* Allocate and initialize a Fib
*/
- cmd_fibcontext = aac_fib_alloc(aac);
+ cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
if (!cmd_fibcontext)
return SCSI_MLQUEUE_HOST_BUSY;
@@ -3157,7 +3154,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
aac_fib_complete(fibptr);
- aac_fib_free(fibptr);
scsicmd->scsi_done(scsicmd);
}
@@ -3187,9 +3183,10 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
/*
* Allocate and initialize a Fib then setup a BlockWrite command
*/
- if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
+ cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+ if (!cmd_fibcontext)
return -1;
- }
+
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
/*
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 074878b55a0b..efa493cf1bc6 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -62,7 +62,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41010
+# define AAC_DRIVER_BUILD 41052
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -94,6 +94,13 @@ enum {
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
+/*
+ * These macros are for keeping track of
+ * character device state.
+ */
+#define AAC_CHARDEV_UNREGISTERED (-1)
+#define AAC_CHARDEV_NEEDS_REINIT (-2)
+
/* #define AAC_DETAILED_STATUS_INFO */
struct diskparm
@@ -944,6 +951,7 @@ struct fib {
*/
struct list_head fiblink;
void *data;
+ u32 vector_no;
struct hw_fib *hw_fib_va; /* Actual shared object */
dma_addr_t hw_fib_pa; /* physical address of hw_fib*/
};
@@ -1123,6 +1131,7 @@ struct aac_dev
struct fib *free_fib;
spinlock_t fib_lock;
+ struct mutex ioctl_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1234,6 +1243,7 @@ struct aac_dev
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
u8 adapter_shutdown;
+ u32 handle_pci_error;
};
#define aac_adapter_interrupt(dev) \
@@ -2113,7 +2123,9 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd);
int aac_fib_setup(struct aac_dev *dev);
void aac_fib_map_free(struct aac_dev *dev);
void aac_fib_free(struct fib * context);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 54195a117f72..4b3bb52b5108 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -855,13 +855,20 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
{
int status;
+ mutex_lock(&dev->ioctl_mutex);
+
+ if (dev->adapter_shutdown) {
+ status = -EACCES;
+ goto cleanup;
+ }
+
/*
* HBA gets first crack
*/
status = aac_dev_ioctl(dev, cmd, arg);
if (status != -ENOTTY)
- return status;
+ goto cleanup;
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
@@ -890,6 +897,10 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
status = -ENOTTY;
break;
}
+
+cleanup:
+ mutex_unlock(&dev->ioctl_mutex);
+
return status;
}
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 0e954e37f0b5..2b4e75380ae6 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -212,8 +212,11 @@ int aac_send_shutdown(struct aac_dev * dev)
return -ENOMEM;
aac_fib_init(fibctx);
- cmd = (struct aac_close *) fib_data(fibctx);
+ mutex_lock(&dev->ioctl_mutex);
+ dev->adapter_shutdown = 1;
+ mutex_unlock(&dev->ioctl_mutex);
+ cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
@@ -229,7 +232,6 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
- dev->adapter_shutdown = 1;
if ((dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) &&
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1f90fe849c9..511bbc575062 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- pci_free_consistent(dev->pdev,
- dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
- dev->hw_fib_va, dev->hw_fib_pa);
+ if (dev->hw_fib_va && dev->max_fib_size) {
+ pci_free_consistent(dev->pdev,
+ (dev->max_fib_size *
+ (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
+ dev->hw_fib_va, dev->hw_fib_pa);
+ }
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+ u32 i = 0;
+ u32 vector = 1;
+ struct fib *fibptr = NULL;
+
+ for (i = 0, fibptr = &dev->fibs[i];
+ i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ i++, fibptr++) {
+ if ((dev->max_msix == 1) ||
+ (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+ - dev->vector_cap))) {
+ fibptr->vector_no = 0;
+ } else {
+ fibptr->vector_no = vector;
+ vector++;
+ if (vector == dev->max_msix)
+ vector = 1;
+ }
+ }
+}
+
/**
* aac_fib_setup - setup the fibs
* @dev: Adapter to set up
@@ -137,6 +162,7 @@ int aac_fib_setup(struct aac_dev * dev)
i++, fibptr++)
{
fibptr->flags = 0;
+ fibptr->size = sizeof(struct fib);
fibptr->dev = dev;
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
@@ -151,18 +177,49 @@ int aac_fib_setup(struct aac_dev * dev)
hw_fib_pa = hw_fib_pa +
dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
}
+
+ /*
+ *Assign vector numbers to fibs
+ */
+ aac_fib_vector_assign(dev);
+
/*
* Add the fib chain to the free list
*/
dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
/*
- * Enable this to debug out of queue space
- */
- dev->free_fib = &dev->fibs[0];
+ * Set 8 fibs aside for management tools
+ */
+ dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
return 0;
}
/**
+ * aac_fib_alloc_tag-allocate a fib using tags
+ * @dev: Adapter to allocate the fib for
+ *
+ * Allocate a fib from the adapter fib pool using tags
+ * from the blk layer.
+ */
+
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+{
+ struct fib *fibptr;
+
+ fibptr = &dev->fibs[scmd->request->tag];
+ /*
+ * Null out fields that depend on being zero at the start of
+ * each I/O
+ */
+ fibptr->hw_fib_va->header.XferState = 0;
+ fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+ fibptr->callback_data = NULL;
+ fibptr->callback = NULL;
+
+ return fibptr;
+}
+
+/**
* aac_fib_alloc - allocate a fib
* @dev: Adapter to allocate the fib for
*
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index da9d9936e995..d677b52860ae 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -394,7 +394,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
fib->callback(fib->callback_data, fib);
} else {
aac_fib_complete(fib);
- aac_fib_free(fib);
}
} else {
unsigned long flagv;
@@ -416,7 +415,6 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
fib->done = 0;
spin_unlock_irqrestore(&fib->event_lock, flagv);
aac_fib_complete(fib);
- aac_fib_free(fib);
}
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 76eaa38ffd6e..ff6caab8cc8b 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
+#include <linux/aer.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -79,7 +80,7 @@ MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
static DEFINE_MUTEX(aac_mutex);
static LIST_HEAD(aac_devices);
-static int aac_cfg_major = -1;
+static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
/*
@@ -451,9 +452,12 @@ static int aac_slave_configure(struct scsi_device *sdev)
else if (depth < 2)
depth = 2;
scsi_change_queue_depth(sdev, depth);
- } else
+ } else {
scsi_change_queue_depth(sdev, 1);
+ sdev->tagged_supported = 1;
+ }
+
return 0;
}
@@ -700,23 +704,18 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
static long aac_cfg_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- int ret;
- struct aac_dev *aac;
- aac = (struct aac_dev *)file->private_data;
- if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown)
+ struct aac_dev *aac = (struct aac_dev *)file->private_data;
+
+ if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- mutex_lock(&aac_mutex);
- ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
- mutex_unlock(&aac_mutex);
- return ret;
+ return aac_do_ioctl(aac, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
{
long ret;
- mutex_lock(&aac_mutex);
switch (cmd) {
case FSACTL_MINIPORT_REV_CHECK:
case FSACTL_SENDFIB:
@@ -750,7 +749,6 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&aac_mutex);
return ret;
}
@@ -1075,6 +1073,8 @@ static void __aac_shutdown(struct aac_dev * aac)
int i;
int cpu;
+ aac_send_shutdown(aac);
+
if (aac->aif_thread) {
int i;
/* Clear out events first */
@@ -1086,7 +1086,6 @@ static void __aac_shutdown(struct aac_dev * aac)
}
kthread_stop(aac->thread);
}
- aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
cpu = cpumask_first(cpu_online_mask);
if (aac->pdev->device == PMC_DEVICE_S6 ||
@@ -1120,6 +1119,13 @@ static void __aac_shutdown(struct aac_dev * aac)
else if (aac->max_msix > 1)
pci_disable_msix(aac->pdev);
}
+static void aac_init_char(void)
+{
+ aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
+ if (aac_cfg_major < 0) {
+ pr_err("aacraid: unable to register \"aac\" device.\n");
+ }
+}
static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -1132,6 +1138,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
u64 dmamask;
extern int aac_sync_mode;
+ /*
+ * Only series 7 needs freset.
+ */
+ if (pdev->device == PMC_DEVICE_S7)
+ pdev->needs_freset = 1;
+
list_for_each_entry(aac, &aac_devices, entry) {
if (aac->id > unique_id)
break;
@@ -1171,6 +1183,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_cmd_len = 16;
shost->use_cmd_list = 1;
+ if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
+ aac_init_char();
+
aac = (struct aac_dev *)shost->hostdata;
aac->base_start = pci_resource_start(pdev, 0);
aac->scsi_host_ptr = shost;
@@ -1185,6 +1200,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_host;
spin_lock_init(&aac->fib_lock);
+ mutex_init(&aac->ioctl_mutex);
/*
* Map in the registers from the adapter.
*/
@@ -1296,6 +1312,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_deinit;
scsi_scan_host(shost);
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
return 0;
out_deinit:
@@ -1317,7 +1336,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return error;
}
-#if (defined(CONFIG_PM))
static void aac_release_resources(struct aac_dev *aac)
{
int i;
@@ -1404,14 +1422,26 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if (!dev->sync_mode)
+ /*max msix may change after EEH
+ * Re-assign vectors to fibs
+ */
+ aac_fib_vector_assign(dev);
+
+ if (!dev->sync_mode) {
+ /* After EEH recovery or suspend resume, max_msix count
+ * may change, therfore updating in init as well.
+ */
aac_adapter_start(dev);
+ dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ }
return 0;
error_iounmap:
return -1;
}
+
+#if (defined(CONFIG_PM))
static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
{
@@ -1495,10 +1525,142 @@ static void aac_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
if (list_empty(&aac_devices)) {
unregister_chrdev(aac_cfg_major, "aac");
- aac_cfg_major = -1;
+ aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
}
}
+static void aac_flush_ios(struct aac_dev *aac)
+{
+ int i;
+ struct scsi_cmnd *cmd;
+
+ for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
+ cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
+ if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
+ scsi_dma_unmap(cmd);
+
+ if (aac->handle_pci_error)
+ cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_RESET << 16;
+
+ cmd->scsi_done(cmd);
+ }
+ }
+}
+
+static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct aac_dev *aac = shost_priv(shost);
+
+ dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
+
+ switch (error) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ aac->handle_pci_error = 1;
+
+ scsi_block_requests(aac->scsi_host_ptr);
+ aac_flush_ios(aac);
+ aac_release_resources(aac);
+
+ pci_disable_pcie_error_reporting(pdev);
+ aac_adapter_ioremap(aac, 0);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ aac->handle_pci_error = 1;
+
+ aac_flush_ios(aac);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
+{
+ dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
+ pci_restore_state(pdev);
+ if (pci_enable_device(pdev)) {
+ dev_warn(&pdev->dev,
+ "aacraid: failed to enable slave\n");
+ goto fail_device;
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
+ goto fail_device;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+fail_device:
+ dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+
+static void aac_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct scsi_device *sdev = NULL;
+ struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ if (aac_adapter_ioremap(aac, aac->base_size)) {
+
+ dev_err(&pdev->dev, "aacraid: ioremap failed\n");
+ /* remap failed, go back ... */
+ aac->comm_interface = AAC_COMM_PRODUCER;
+ if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
+ dev_warn(&pdev->dev,
+ "aacraid: unable to map adapter.\n");
+
+ return;
+ }
+ }
+
+ msleep(10000);
+
+ aac_acquire_resources(aac);
+
+ /*
+ * reset this flag to unblock ioctl() as it was set
+ * at aac_send_shutdown() to block ioctls from upperlayer
+ */
+ aac->adapter_shutdown = 0;
+ aac->handle_pci_error = 0;
+
+ shost_for_each_device(sdev, shost)
+ if (sdev->sdev_state == SDEV_OFFLINE)
+ sdev->sdev_state = SDEV_RUNNING;
+ scsi_unblock_requests(aac->scsi_host_ptr);
+ scsi_scan_host(aac->scsi_host_ptr);
+ pci_save_state(pdev);
+
+ dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
+}
+
+static struct pci_error_handlers aac_pci_err_handler = {
+ .error_detected = aac_pci_error_detected,
+ .mmio_enabled = aac_pci_mmio_enabled,
+ .slot_reset = aac_pci_slot_reset,
+ .resume = aac_pci_resume,
+};
+
static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
@@ -1509,6 +1671,7 @@ static struct pci_driver aac_pci_driver = {
.resume = aac_resume,
#endif
.shutdown = aac_shutdown,
+ .err_handler = &aac_pci_err_handler,
};
static int __init aac_init(void)
@@ -1522,11 +1685,8 @@ static int __init aac_init(void)
if (error < 0)
return error;
- aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
- if (aac_cfg_major < 0) {
- printk(KERN_WARNING
- "aacraid: unable to register \"aac\" device.\n");
- }
+ aac_init_char();
+
return 0;
}
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2aa34ea8ceb1..bc0203f3d243 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -156,8 +156,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
break;
if (dev->msi_enabled && dev->max_msix > 1)
atomic_dec(&dev->rrq_outstanding[vector_no]);
- aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
dev->host_rrq[index++] = 0;
+ aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
if (index == (vector_no + 1) * dev->vector_cap)
index = vector_no * dev->vector_cap;
dev->host_rrq_idx[vector_no] = index;
@@ -452,36 +452,20 @@ static int aac_src_deliver_message(struct fib *fib)
#endif
u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
+ u16 vector_no;
atomic_inc(&q->numpending);
if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
dev->max_msix > 1) {
- u_int16_t vector_no, first_choice = 0xffff;
-
- vector_no = dev->fibs_pushed_no % dev->max_msix;
- do {
- vector_no += 1;
- if (vector_no == dev->max_msix)
- vector_no = 1;
- if (atomic_read(&dev->rrq_outstanding[vector_no]) <
- dev->vector_cap)
- break;
- if (0xffff == first_choice)
- first_choice = vector_no;
- else if (vector_no == first_choice)
- break;
- } while (1);
- if (vector_no == first_choice)
- vector_no = 0;
- atomic_inc(&dev->rrq_outstanding[vector_no]);
- if (dev->fibs_pushed_no == 0xffffffff)
- dev->fibs_pushed_no = 0;
- else
- dev->fibs_pushed_no++;
+ vector_no = fib->vector_no;
fib->hw_fib_va->header.Handle += (vector_no << 16);
+ } else {
+ vector_no = 0;
}
+ atomic_inc(&dev->rrq_outstanding[vector_no]);
+
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
/* Calculate the amount to the fibsize bits */
fibsize = (hdr_size + 127) / 128 - 1;
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 5b8b2937a3fe..7db448ec8beb 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -403,6 +403,9 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
if (!cptr)
return SCSI_MLQUEUE_HOST_BUSY;
+ } else {
+ sg_count = 0;
+ cptr = NULL;
}
/* Use the outgoing mailboxes in a round-robin fashion, because this
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index b846a4683562..fc6a83188c1e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1336,6 +1336,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
case AHC_DEV_Q_TAGGED:
scsi_change_queue_depth(sdev,
dev->openings + dev->active);
+ break;
default:
/*
* We allow the OS to queue 2 untagged transactions to
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index deaaf84989cd..12b88294d667 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -677,7 +677,8 @@ int round_period(unsigned int period)
* Copyright: Copyright (c) 1996 John Shifflett, GeoLog Consulting
*/
static
-unsigned char calc_sync_xfer(unsigned int period, unsigned int offset)
+unsigned char __maybe_unused calc_sync_xfer(unsigned int period,
+ unsigned int offset)
{
return sync_xfer_table[round_period(period)].reg_value |
((offset < SDTR_SIZE) ? offset : SDTR_SIZE);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index decdc71b6b86..24388795ee9a 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -98,6 +98,7 @@ static int level_mask = LOG_ERROR;
module_param(level_mask, int, 0644);
+#ifndef MODULE
static int __init fas216_log_setup(char *str)
{
char *s;
@@ -138,6 +139,7 @@ static int __init fas216_log_setup(char *str)
}
__setup("fas216_logging=", fas216_log_setup);
+#endif
static inline unsigned char fas216_readb(FAS216_Info *info, unsigned int reg)
{
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index e65478651ca9..389825ba5d96 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -862,7 +862,7 @@ static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *cmd;
- if (list_empty(&hostdata->autosense)) {
+ if (hostdata->sensing || list_empty(&hostdata->autosense)) {
list_for_each_entry(ncmd, &hostdata->unissued, list) {
cmd = NCR5380_to_scmd(ncmd);
dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
@@ -901,7 +901,7 @@ static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
struct NCR5380_hostdata *hostdata = shost_priv(instance);
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- if (hostdata->sensing) {
+ if (hostdata->sensing == cmd) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
list_add(&ncmd->list, &hostdata->autosense);
hostdata->sensing = NULL;
@@ -923,7 +923,6 @@ static void NCR5380_main(struct work_struct *work)
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, main_task);
struct Scsi_Host *instance = hostdata->host;
- struct scsi_cmnd *cmd;
int done;
/*
@@ -936,8 +935,11 @@ static void NCR5380_main(struct work_struct *work)
done = 1;
spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected &&
- (cmd = dequeue_next_cmd(instance))) {
+ while (!hostdata->connected && !hostdata->selecting) {
+ struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
+
+ if (!cmd)
+ break;
dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
@@ -960,8 +962,7 @@ static void NCR5380_main(struct work_struct *work)
#ifdef SUPPORT_TAGS
cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
#endif
- cmd = NCR5380_select(instance, cmd);
- if (!cmd) {
+ if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
maybe_release_dma_irq(instance);
} else {
@@ -1257,6 +1258,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/* Reselection interrupt */
goto out;
}
+ if (!hostdata->selecting) {
+ /* Command was aborted */
+ NCR5380_write(MODE_REG, MR_BASE);
+ goto out;
+ }
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
shost_printk(KERN_ERR, instance,
@@ -1838,9 +1844,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char msgout = NOP;
int sink = 0;
int len;
-#if defined(REAL_DMA)
int transfersize;
-#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
@@ -1909,6 +1913,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ hostdata->connected = NULL;
return;
#endif
case PHASE_DATAIN:
@@ -1966,7 +1971,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
/* XXX - need to source or sink data here, as appropriate */
} else {
#ifdef REAL_DMA
@@ -1983,18 +1987,22 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
} else
#endif /* defined(REAL_DMA) */
{
- spin_unlock_irq(&hostdata->lock);
- NCR5380_transfer_pio(instance, &phase,
- (int *)&cmd->SCp.this_residual,
+ /* Break up transfer into 3 ms chunks,
+ * presuming 6 accesses per handshake.
+ */
+ transfersize = min((unsigned long)cmd->SCp.this_residual,
+ hostdata->accesses_per_ms / 2);
+ len = transfersize;
+ NCR5380_transfer_pio(instance, &phase, &len,
(unsigned char **)&cmd->SCp.ptr);
- spin_lock_irq(&hostdata->lock);
+ cmd->SCp.this_residual -= transfersize - len;
}
#if defined(CONFIG_SUN3) && defined(REAL_DMA)
/* if we had intended to dma that command clear it */
if (sun3_dma_setup_done == cmd)
sun3_dma_setup_done = NULL;
#endif
- break;
+ return;
case PHASE_MSGIN:
len = 1;
data = &tmp;
@@ -2487,14 +2495,17 @@ static bool list_del_cmd(struct list_head *haystack,
* [disconnected -> connected ->]...
* [autosense -> connected ->] done
*
- * If cmd is unissued then just remove it.
- * If cmd is disconnected, try to select the target.
- * If cmd is connected, try to send an abort message.
- * If cmd is waiting for autosense, give it a chance to complete but check
- * that it isn't left connected.
* If cmd was not found at all then presumably it has already been completed,
* in which case return SUCCESS to try to avoid further EH measures.
+ *
* If the command has not completed yet, we must not fail to find it.
+ * We have no option but to forget the aborted command (even if it still
+ * lacks sense data). The mid-layer may re-issue a command that is in error
+ * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
+ * this driver are such that a command can appear on one queue only.
+ *
+ * The lock protects driver data structures, but EH handlers also use it
+ * to serialize their own execution and prevent their own re-entry.
*/
static int NCR5380_abort(struct scsi_cmnd *cmd)
@@ -2517,6 +2528,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
"abort: removed %p from issue queue\n", cmd);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
+ goto out;
}
if (hostdata->selecting == cmd) {
@@ -2531,58 +2543,21 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->disconnected, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from disconnected list\n", cmd);
- cmd->result = DID_ERROR << 16;
- if (!hostdata->connected)
- NCR5380_select(instance, cmd);
- if (hostdata->connected != cmd) {
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
+ /* Can't call NCR5380_select() and send ABORT because that
+ * means releasing the lock. Need a bus reset.
+ */
+ set_host_byte(cmd, DID_ERROR);
+ complete_cmd(instance, cmd);
+ result = FAILED;
+ goto out;
}
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
#ifdef REAL_DMA
hostdata->dma_len = 0;
#endif
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- /* Perform autosense for this command */
- list_add(&ncmd->list, &hostdata->autosense);
- }
- }
-
- if (list_find_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: found %p on sense queue\n", cmd);
- spin_unlock_irqrestore(&hostdata->lock, flags);
- queue_work(hostdata->work_q, &hostdata->main_task);
- msleep(1000);
- spin_lock_irqsave(&hostdata->lock, flags);
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2590,9 +2565,14 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
goto out;
}
set_host_byte(cmd, DID_ABORT);
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
+ complete_cmd(instance, cmd);
+ goto out;
+ }
+
+ if (list_del_cmd(&hostdata->autosense, cmd)) {
+ dsprintk(NDEBUG_ABORT, instance,
+ "abort: removed %p from sense queue\n", cmd);
+ set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
@@ -2646,7 +2626,16 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
* commands!
*/
- hostdata->selecting = NULL;
+ if (list_del_cmd(&hostdata->unissued, cmd)) {
+ cmd->result = DID_RESET << 16;
+ cmd->scsi_done(cmd);
+ }
+
+ if (hostdata->selecting) {
+ hostdata->selecting->result = DID_RESET << 16;
+ complete_cmd(instance, hostdata->selecting);
+ hostdata->selecting = NULL;
+ }
list_for_each_entry(ncmd, &hostdata->disconnected, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2654,6 +2643,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->disconnected);
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
@@ -2661,6 +2651,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
+ INIT_LIST_HEAD(&hostdata->autosense);
if (hostdata->connected) {
set_host_byte(hostdata->connected, DID_RESET);
@@ -2668,12 +2659,6 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
hostdata->connected = NULL;
}
- if (hostdata->sensing) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->sensing);
- hostdata->sensing = NULL;
- }
-
#ifdef SUPPORT_TAGS
free_all_tags(hostdata);
#endif
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a41c6432f444..ee5ace873535 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -42,7 +42,7 @@ struct be_queue_info {
u16 id;
u16 tail, head;
bool created;
- atomic_t used; /* Number of valid elements in the queue */
+ u16 used; /* Number of valid elements in the queue */
};
static inline u32 MODULO(u16 val, u16 limit)
@@ -110,10 +110,9 @@ struct be_mcc_obj {
};
struct beiscsi_mcc_tag_state {
-#define MCC_TAG_STATE_COMPLETED 0x00
-#define MCC_TAG_STATE_RUNNING 0x01
-#define MCC_TAG_STATE_TIMEOUT 0x02
- uint8_t tag_state;
+ unsigned long tag_state;
+#define MCC_TAG_STATE_RUNNING 1
+#define MCC_TAG_STATE_TIMEOUT 2
struct be_dma_mem tag_mem_state;
};
@@ -124,7 +123,7 @@ struct be_ctrl_info {
struct pci_dev *pdev;
/* Mbox used for cmd request/response */
- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
@@ -133,11 +132,10 @@ struct be_ctrl_info {
/* MCC Rings */
struct be_mcc_obj mcc_obj;
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
- spinlock_t mcc_cq_lock;
wait_queue_head_t mcc_wait[MAX_MCC_CMD + 1];
unsigned int mcc_tag[MAX_MCC_CMD];
- unsigned int mcc_numtag[MAX_MCC_CMD + 1];
+ unsigned int mcc_tag_status[MAX_MCC_CMD + 1];
unsigned short mcc_alloc_index;
unsigned short mcc_free_index;
unsigned int mcc_tag_available;
@@ -147,6 +145,12 @@ struct be_ctrl_info {
#include "be_cmds.h"
+/* WRB index mask for MCC_Q_LEN queue entries */
+#define MCC_Q_WRB_IDX_MASK CQE_STATUS_WRB_MASK
+#define MCC_Q_WRB_IDX_SHIFT CQE_STATUS_WRB_SHIFT
+/* TAG is from 1...MAX_MCC_CMD, MASK includes MAX_MCC_CMD */
+#define MCC_Q_CMD_TAG_MASK ((MAX_MCC_CMD << 1) - 1)
+
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
#define mcc_timeout 120000 /* 12s timeout */
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 2778089b01a5..a55eaeea37e7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -104,24 +104,16 @@ int be_chk_reset_complete(struct beiscsi_hba *phba)
return 0;
}
-void be_mcc_notify(struct beiscsi_hba *phba)
-{
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- u32 val = 0;
-
- val |= mccq->id & DB_MCCQ_RING_ID_MASK;
- val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
- iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
-}
-
unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
{
unsigned int tag = 0;
+ spin_lock(&phba->ctrl.mcc_lock);
if (phba->ctrl.mcc_tag_available) {
tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
- phba->ctrl.mcc_numtag[tag] = 0;
+ phba->ctrl.mcc_tag_status[tag] = 0;
+ phba->ctrl.ptag_state[tag].tag_state = 0;
}
if (tag) {
phba->ctrl.mcc_tag_available--;
@@ -130,11 +122,89 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
else
phba->ctrl.mcc_alloc_index++;
}
+ spin_unlock(&phba->ctrl.mcc_lock);
return tag;
}
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+ unsigned int *ref_tag)
+{
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ struct be_mcc_wrb *wrb = NULL;
+ unsigned int tag;
+
+ spin_lock_bh(&phba->ctrl.mcc_lock);
+ if (mccq->used == mccq->len) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
+ mccq->used, phba->ctrl.mcc_tag_available);
+ goto alloc_failed;
+ }
+
+ if (!phba->ctrl.mcc_tag_available)
+ goto alloc_failed;
+
+ tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
+ if (!tag) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
+ phba->ctrl.mcc_tag_available,
+ phba->ctrl.mcc_alloc_index);
+ goto alloc_failed;
+ }
+
+ /* return this tag for further reference */
+ *ref_tag = tag;
+ phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
+ phba->ctrl.mcc_tag_status[tag] = 0;
+ phba->ctrl.ptag_state[tag].tag_state = 0;
+ phba->ctrl.mcc_tag_available--;
+ if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
+ phba->ctrl.mcc_alloc_index = 0;
+ else
+ phba->ctrl.mcc_alloc_index++;
+
+ wrb = queue_head_node(mccq);
+ memset(wrb, 0, sizeof(*wrb));
+ wrb->tag0 = tag;
+ wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
+ queue_head_inc(mccq);
+ mccq->used++;
+
+alloc_failed:
+ spin_unlock_bh(&phba->ctrl.mcc_lock);
+ return wrb;
+}
+
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+
+ spin_lock_bh(&ctrl->mcc_lock);
+ tag = tag & MCC_Q_CMD_TAG_MASK;
+ ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
+ if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
+ ctrl->mcc_free_index = 0;
+ else
+ ctrl->mcc_free_index++;
+ ctrl->mcc_tag_available++;
+ mccq->used--;
+ spin_unlock_bh(&ctrl->mcc_lock);
+}
+
+/**
+ * beiscsi_fail_session(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+{
+ iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
/*
- * beiscsi_mccq_compl()- Wait for completion of MBX
+ * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
* @phba: Driver private structure
* @tag: Tag for the MBX Command
* @wrb: the WRB used for the MBX Command
@@ -146,43 +216,40 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
* Success: 0
* Failure: Non-Zero
**/
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
- struct be_dma_mem *mbx_cmd_mem)
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem)
{
int rc = 0;
- uint32_t mcc_tag_response;
+ uint32_t mcc_tag_status;
uint16_t status = 0, addl_status = 0, wrb_num = 0;
struct be_mcc_wrb *temp_wrb;
struct be_cmd_req_hdr *mbx_hdr;
struct be_cmd_resp_hdr *mbx_resp_hdr;
struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- if (beiscsi_error(phba)) {
- free_mcc_tag(&phba->ctrl, tag);
+ if (beiscsi_error(phba))
return -EPERM;
- }
-
- /* Set MBX Tag state to Active */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING;
- spin_unlock(&phba->ctrl.mbox_lock);
/* wait for the mccq completion */
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag],
+ phba->ctrl.mcc_tag_status[tag],
msecs_to_jiffies(
BEISCSI_HOST_MBX_TIMEOUT));
-
+ /**
+ * If MBOX cmd timeout expired, tag and resource allocated
+ * for cmd is not freed until FW returns completion.
+ */
if (rc <= 0) {
struct be_dma_mem *tag_mem;
- /* Set MBX Tag state to timeout */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT;
- spin_unlock(&phba->ctrl.mbox_lock);
- /* Store resource addr to be freed later */
+ /**
+ * PCI/DMA memory allocated and posted in non-embedded mode
+ * will have mbx_cmd_mem != NULL.
+ * Save virtual and bus addresses for the command so that it
+ * can be freed later.
+ **/
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (mbx_cmd_mem) {
tag_mem->size = mbx_cmd_mem->size;
@@ -191,28 +258,28 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
} else
tag_mem->size = 0;
+ /* first make tag_mem_state visible to all */
+ wmb();
+ set_bit(MCC_TAG_STATE_TIMEOUT,
+ &phba->ctrl.ptag_state[tag].tag_state);
+
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
BEISCSI_LOG_CONFIG,
"BC_%d : MBX Cmd Completion timed out\n");
return -EBUSY;
- } else {
- rc = 0;
- /* Set MBX Tag state to completed */
- spin_lock(&phba->ctrl.mbox_lock);
- phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
- spin_unlock(&phba->ctrl.mbox_lock);
}
- mcc_tag_response = phba->ctrl.mcc_numtag[tag];
- status = (mcc_tag_response & CQE_STATUS_MASK);
- addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ rc = 0;
+ mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+ status = (mcc_tag_status & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
CQE_STATUS_ADDL_SHIFT);
if (mbx_cmd_mem) {
mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
} else {
- wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
CQE_STATUS_WRB_SHIFT;
temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
mbx_hdr = embedded_payload(temp_wrb);
@@ -231,7 +298,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
mbx_hdr->subsystem,
mbx_hdr->opcode,
status, addl_status);
-
+ rc = -EIO;
if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
beiscsi_log(phba, KERN_WARNING,
@@ -241,70 +308,16 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,
"Resp_Len : %d Actual_Resp_Len : %d\n",
mbx_resp_hdr->response_length,
mbx_resp_hdr->actual_resp_len);
-
rc = -EAGAIN;
- goto release_mcc_tag;
}
- rc = -EIO;
}
-release_mcc_tag:
- /* Release the MCC entry */
- free_mcc_tag(&phba->ctrl, tag);
-
+ free_mcc_wrb(&phba->ctrl, tag);
return rc;
}
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
-{
- spin_lock(&ctrl->mbox_lock);
- tag = tag & 0x000000FF;
- ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
- if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
- ctrl->mcc_free_index = 0;
- else
- ctrl->mcc_free_index++;
- ctrl->mcc_tag_available++;
- spin_unlock(&ctrl->mbox_lock);
-}
-
-bool is_link_state_evt(u32 trailer)
-{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE);
-}
-
-static bool is_iscsi_evt(u32 trailer)
-{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_ISCSI;
-}
-
-static int iscsi_evt_type(u32 trailer)
-{
- return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
-}
-
-static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
-{
- if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else
- return false;
-}
-
-static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
-{
- compl->flags = 0;
-}
-
/*
- * be_mcc_compl_process()- Check the MBX comapletion status
+ * beiscsi_process_mbox_compl()- Check the MBX completion status
* @ctrl: Function specific MBX data structure
* @compl: Completion status of MBX Command
*
@@ -314,8 +327,8 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
* Success: Zero
* Failure: Non-Zero
**/
-static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl)
+static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
@@ -323,206 +336,228 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
struct be_cmd_resp_hdr *resp_hdr;
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
-
+ /**
+ * To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new
+ * entry is little endian)
+ */
+ if (!compl->flags) {
beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : error in cmd completion: "
- "Subsystem : %d Opcode : %d "
- "status(compl/extd)=%d/%d\n",
- hdr->subsystem, hdr->opcode,
- compl_status, extd_status);
-
- if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
- resp_hdr = (struct be_cmd_resp_hdr *) hdr;
- if (resp_hdr->response_length)
- return 0;
- }
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : BMBX busy, no completion\n");
return -EBUSY;
}
- return 0;
-}
-
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl)
-{
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- u16 compl_status, extd_status;
- unsigned short tag;
+ compl->flags = le32_to_cpu(compl->flags);
+ WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ /**
+ * Just swap the status to host endian;
+ * mcc tag is opaquely copied from mcc_wrb.
+ */
be_dws_le_to_cpu(compl, 4);
-
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- /* The ctrl.mcc_numtag[tag] is filled with
- * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
- * [7:0] = compl_status
- */
- tag = (compl->tag0 & 0x000000FF);
+ CQE_STATUS_COMPL_MASK;
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
+ CQE_STATUS_EXTD_MASK;
+ /* Need to reset the entire word that houses the valid bit */
+ compl->flags = 0;
- ctrl->mcc_numtag[tag] = 0x80000000;
- ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
- ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
- ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
+ if (compl_status == MCC_STATUS_SUCCESS)
+ return 0;
- if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) {
- wake_up_interruptible(&ctrl->mcc_wait[tag]);
- } else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) {
- struct be_dma_mem *tag_mem;
- tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode, compl_status, extd_status);
- beiscsi_log(phba, KERN_WARNING,
- BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
- BEISCSI_LOG_CONFIG,
- "BC_%d : MBX Completion for timeout Command "
- "from FW\n");
- /* Check if memory needs to be freed */
- if (tag_mem->size)
- pci_free_consistent(ctrl->pdev, tag_mem->size,
- tag_mem->va, tag_mem->dma);
-
- /* Change tag state */
- spin_lock(&phba->ctrl.mbox_lock);
- ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED;
- spin_unlock(&phba->ctrl.mbox_lock);
-
- /* Free MCC Tag */
- free_mcc_tag(ctrl, tag);
+ if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+ /* if status is insufficient buffer, check the length */
+ resp_hdr = (struct be_cmd_resp_hdr *) hdr;
+ if (resp_hdr->response_length)
+ return 0;
}
-
- return 0;
+ return -EINVAL;
}
-static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
+static void beiscsi_process_async_link(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
- struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
+ struct be_async_event_link_state *evt;
+
+ evt = (struct be_async_event_link_state *)compl;
- if (be_mcc_compl_is_new(compl)) {
- queue_tail_inc(mcc_cq);
- return compl;
+ phba->port_speed = evt->port_speed;
+ /**
+ * Check logical link status in ASYNC event.
+ * This has been newly introduced in SKH-R Firmware 10.0.338.45.
+ **/
+ if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
+ phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+ phba->get_boot = BE_GET_BOOT_RETRIES;
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Link Up on Port %d tag 0x%x\n",
+ evt->physical_port, evt->event_tag);
+ } else {
+ phba->state = BE_ADAPTER_LINK_DOWN;
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Link Down on Port %d tag 0x%x\n",
+ evt->physical_port, evt->event_tag);
+ iscsi_host_for_each_session(phba->shost,
+ beiscsi_fail_session);
}
- return NULL;
}
-/**
- * be2iscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- *
- * Depending on adapter state appropriate error flag is passed.
- **/
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
+static char *beiscsi_port_misconf_event_msg[] = {
+ "Physical Link is functional.",
+ "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
+ "Optics of two types installed - Remove one optic or install matching pair of optics.",
+ "Incompatible optics - Replace with compatible optics for card to function.",
+ "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
+ "Uncertified optics - Replace with Avago Certified optics to enable link operation."
+};
+
+static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct beiscsi_hba *phba = iscsi_host_priv(shost);
- uint32_t iscsi_err_flag;
+ struct be_async_event_sli *async_sli;
+ u8 evt_type, state, old_state, le;
+ char *sev = KERN_WARNING;
+ char *msg = NULL;
+
+ evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
+ evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
+
+ /* processing only MISCONFIGURED physical port event */
+ if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
+ return;
+
+ async_sli = (struct be_async_event_sli *)compl;
+ state = async_sli->event_data1 >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+ le = async_sli->event_data2 >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+
+ old_state = phba->optic_state;
+ phba->optic_state = state;
+
+ if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
+ /* fw is reporting a state we don't know, log and return */
+ __beiscsi_log(phba, KERN_ERR,
+ "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
+ phba->port_name, async_sli->event_data1);
+ return;
+ }
- if (phba->state & BE_ADAPTER_STATE_SHUTDOWN)
- iscsi_err_flag = ISCSI_ERR_INVALID_HOST;
- else
- iscsi_err_flag = ISCSI_ERR_CONN_FAILED;
+ if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
+ /* log link effect for unqualified-4, uncertified-5 optics */
+ if (state > 3)
+ msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
+ " Link is non-operational." :
+ " Link is operational.";
+ /* 1 - info */
+ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
+ sev = KERN_INFO;
+ /* 2 - error */
+ if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
+ sev = KERN_ERR;
+ }
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+ if (old_state != phba->optic_state)
+ __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
+ phba->port_name,
+ beiscsi_port_misconf_event_msg[state],
+ !msg ? "" : msg);
}
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
- struct be_async_event_link_state *evt)
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl)
{
- if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) ||
- ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
- (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_LINK_DOWN;
-
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
- "BC_%d : Link Down on Port %d\n",
- evt->physical_port);
-
- iscsi_host_for_each_session(phba->shost,
- be2iscsi_fail_session);
- } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
- ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
- (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
- phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
+ char *sev = KERN_INFO;
+ u8 evt_code;
+
+ /* interpret flags as an async trailer */
+ evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
+ evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
+ switch (evt_code) {
+ case ASYNC_EVENT_CODE_LINK_STATE:
+ beiscsi_process_async_link(phba, compl);
+ break;
+ case ASYNC_EVENT_CODE_ISCSI:
+ phba->state |= BE_ADAPTER_CHECK_BOOT;
phba->get_boot = BE_GET_BOOT_RETRIES;
-
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
- "BC_%d : Link UP on Port %d\n",
- evt->physical_port);
+ sev = KERN_ERR;
+ break;
+ case ASYNC_EVENT_CODE_SLI:
+ beiscsi_process_async_sli(phba, compl);
+ break;
+ default:
+ /* event not registered */
+ sev = KERN_ERR;
}
+
+ beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
+ evt_code, compl->status, compl->flags);
}
-int beiscsi_process_mcc(struct beiscsi_hba *phba)
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl)
{
- struct be_mcc_compl *compl;
- int num = 0, status = 0;
- struct be_ctrl_info *ctrl = &phba->ctrl;
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ u16 compl_status, extd_status;
+ struct be_dma_mem *tag_mem;
+ unsigned int tag, wrb_idx;
- spin_lock_bh(&phba->ctrl.mcc_cq_lock);
- while ((compl = be_mcc_compl_get(phba))) {
- if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) compl);
- else if (is_iscsi_evt(compl->flags)) {
- switch (iscsi_evt_type(compl->flags)) {
- case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
- case ASYNC_EVENT_NEW_ISCSI_CONN:
- case ASYNC_EVENT_NEW_TCP_CONN:
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Async iscsi Event,"
- " flags handled = 0x%08x\n",
- compl->flags);
- break;
- default:
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Unsupported Async"
- " Event, flags = 0x%08x\n",
- compl->flags);
- }
- } else
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG |
- BEISCSI_LOG_MBOX,
- "BC_%d : Unsupported Async Event, flags"
- " = 0x%08x\n", compl->flags);
-
- } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(ctrl, compl);
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
- }
- be_mcc_compl_use(compl);
- num++;
+ be_dws_le_to_cpu(compl, 4);
+ tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
+ wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
+
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
+ BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX cmd completed but not posted\n");
+ return 0;
}
- if (num)
- hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);
+ if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_WARNING,
+ BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Completion for timeout Command from FW\n");
+ /**
+ * Check for the size before freeing resource.
+ * Only for non-embedded cmd, PCI resource is allocated.
+ **/
+ tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+ if (tag_mem->size)
+ pci_free_consistent(ctrl->pdev, tag_mem->size,
+ tag_mem->va, tag_mem->dma);
+ free_mcc_wrb(ctrl, tag);
+ return 0;
+ }
- spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
- return status;
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ /* The ctrl.mcc_tag_status[tag] is filled with
+ * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
+ * [7:0] = compl_status
+ */
+ ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
+ ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
+ ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
+ CQE_STATUS_ADDL_MASK;
+ ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
+
+ /* write ordering forced in wake_up_interruptible */
+ clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+ wake_up_interruptible(&ctrl->mcc_wait[tag]);
+ return 0;
}
/*
- * be_mcc_wait_compl()- Wait for MBX completion
+ * be_mcc_compl_poll()- Wait for MBX completion
* @phba: driver private structure
*
* Wait till no more pending mcc requests are present
@@ -532,50 +567,57 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
* Failure: Non-Zero
*
**/
-static int be_mcc_wait_compl(struct beiscsi_hba *phba)
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
{
- int i, status;
+ struct be_ctrl_info *ctrl = &phba->ctrl;
+ int i;
+
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d: tag %u state not running\n", tag);
+ return 0;
+ }
for (i = 0; i < mcc_timeout; i++) {
if (beiscsi_error(phba))
return -EIO;
- status = beiscsi_process_mcc(phba);
- if (status)
- return status;
-
- if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
+ beiscsi_process_mcc_cq(phba);
+ /* after polling, wrb and tag need to be released */
+ if (!test_bit(MCC_TAG_STATE_RUNNING,
+ &ctrl->ptag_state[tag].tag_state)) {
+ free_mcc_wrb(ctrl, tag);
break;
+ }
udelay(100);
}
- if (i == mcc_timeout) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
- return -EBUSY;
- }
- return 0;
+
+ if (i < mcc_timeout)
+ return 0;
+
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
+ return -EBUSY;
}
-/*
- * be_mcc_notify_wait()- Notify and wait for Compl
- * @phba: driver private structure
- *
- * Notify MCC requests and wait for completion
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- **/
-int be_mcc_notify_wait(struct beiscsi_hba *phba)
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
{
- be_mcc_notify(phba);
- return be_mcc_wait_compl(phba);
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ u32 val = 0;
+
+ set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ /* make request available for DMA */
+ wmb();
+ iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
}
/*
- * be_mbox_db_ready_wait()- Check ready status
+ * be_mbox_db_ready_poll()- Check ready status
* @ctrl: Function specific MBX data structure
*
* Check for the ready status of FW to send BMBX
@@ -585,49 +627,45 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)
* Success: 0
* Failure: Non-Zero
**/
-static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
+static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
{
-#define BEISCSI_MBX_RDY_BIT_TIMEOUT 4000 /* 4sec */
+ /* wait 30s for generic non-flash MBOX operation */
+#define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
unsigned long timeout;
- bool read_flag = false;
- int ret = 0, i;
u32 ready;
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q);
- if (beiscsi_error(phba))
- return -EIO;
+ /*
+ * This BMBX busy wait path is used during init only.
+ * For the commands executed during init, 5s should suffice.
+ */
+ timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
+ do {
+ if (beiscsi_error(phba))
+ return -EIO;
- timeout = jiffies + (HZ * 110);
+ ready = ioread32(db);
+ if (ready == 0xffffffff)
+ return -EIO;
- do {
- for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
- if (ready) {
- read_flag = true;
- break;
- }
- mdelay(1);
- }
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
+ if (ready)
+ return 0;
- if (!read_flag) {
- wait_event_timeout(rdybit_check_q,
- (read_flag != true),
- HZ * 5);
- }
- } while ((time_before(jiffies, timeout)) && !read_flag);
+ if (time_after(jiffies, timeout))
+ break;
+ msleep(20);
+ } while (!ready);
- if (!read_flag) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : FW Timed Out\n");
- phba->fw_timeout = true;
- beiscsi_ue_detect(phba);
- ret = -EBUSY;
- }
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BC_%d : FW Timed Out\n");
+
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
- return ret;
+ return -EBUSY;
}
/*
@@ -648,10 +686,8 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
struct be_mcc_mailbox *mbox = mbox_mem->va;
- struct be_mcc_compl *compl = &mbox->compl;
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
@@ -660,7 +696,7 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
iowrite32(val, db);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
@@ -670,81 +706,15 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
val |= (u32) (mbox_mem->dma >> 4) << 2;
iowrite32(val, db);
- status = be_mbox_db_ready_wait(ctrl);
+ status = be_mbox_db_ready_poll(ctrl);
if (status)
return status;
- if (be_mcc_compl_is_new(compl)) {
- status = be_mcc_compl_process(ctrl, &mbox->compl);
- be_mcc_compl_use(compl);
- if (status) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : After be_mcc_compl_process\n");
-
- return status;
- }
- } else {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : Invalid Mailbox Completion\n");
-
- return -EBUSY;
- }
- return 0;
-}
-
-/*
- * Insert the mailbox address into the doorbell in two steps
- * Polls on the mbox doorbell till a command completion (or a timeout) occurs
- */
-static int be_mbox_notify_wait(struct beiscsi_hba *phba)
-{
- int status;
- u32 val = 0;
- void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
- struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
- struct be_mcc_mailbox *mbox = mbox_mem->va;
- struct be_mcc_compl *compl = &mbox->compl;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- status = be_mbox_db_ready_wait(ctrl);
- if (status)
- return status;
-
- val |= MPU_MAILBOX_DB_HI_MASK;
- /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
- val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
- iowrite32(val, db);
-
- /* wait for ready to be set */
- status = be_mbox_db_ready_wait(ctrl);
- if (status != 0)
- return status;
-
- val = 0;
- /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
- val |= (u32)(mbox_mem->dma >> 4) << 2;
- iowrite32(val, db);
+ /* RDY is set; small delay before CQE read. */
+ udelay(1);
- status = be_mbox_db_ready_wait(ctrl);
- if (status != 0)
- return status;
-
- /* A cq entry has been made now */
- if (be_mcc_compl_is_new(compl)) {
- status = be_mcc_compl_process(ctrl, &mbox->compl);
- be_mcc_compl_use(compl);
- if (status)
- return status;
- } else {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : invalid mailbox completion\n");
-
- return -EBUSY;
- }
- return 0;
+ status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
+ return status;
}
void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
@@ -809,21 +779,6 @@ struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
-{
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
- struct be_mcc_wrb *wrb;
-
- WARN_ON(atomic_read(&mccq->used) >= mccq->len);
- wrb = queue_head_node(mccq);
- memset(wrb, 0, sizeof(*wrb));
- wrb->tag0 = (mccq->head & 0x000000FF) << 16;
- queue_head_inc(mccq);
- atomic_inc(&mccq->used);
- return wrb;
-}
-
-
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -833,7 +788,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -860,7 +815,7 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -881,7 +836,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
int status;
u8 *endian_check;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
@@ -900,7 +855,7 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_initialize Failed\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -921,7 +876,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
int status;
u8 *endian_check;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
endian_check = (u8 *) wrb;
@@ -941,7 +896,7 @@ int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BC_%d : be_cmd_fw_uninit Failed\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -957,7 +912,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1007,7 +962,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
"BC_%d : In be_cmd_cq_create, status=ox%08x\n",
status);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1025,13 +980,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_req_mcc_create *req;
+ struct be_cmd_req_mcc_create_ext *req;
struct be_dma_mem *q_mem = &mccq->dma_mem;
struct be_ctrl_info *ctrl;
void *ctxt;
int status;
- spin_lock(&phba->ctrl.mbox_lock);
+ mutex_lock(&phba->ctrl.mbox_lock);
ctrl = &phba->ctrl;
wrb = wrb_from_mbox(&ctrl->mbox_mem);
memset(wrb, 0, sizeof(*wrb));
@@ -1041,9 +996,12 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+ req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
+ req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
+ req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
PCI_FUNC(phba->pcidev->devfn));
@@ -1056,13 +1014,13 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(phba);
+ status = be_mbox_notify(ctrl);
if (!status) {
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
- spin_unlock(&phba->ctrl.mbox_lock);
+ mutex_unlock(&phba->ctrl.mbox_lock);
return status;
}
@@ -1080,7 +1038,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
"BC_%d : In beiscsi_cmd_q_destroy "
"queue_type : %d\n", queue_type);
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1110,7 +1068,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
break;
default:
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
BUG();
return -ENXIO;
}
@@ -1120,7 +1078,7 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1155,7 +1113,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1227,7 +1185,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
defq_ring->doorbell_offset = resp->doorbell_offset;
}
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1255,7 +1213,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1286,7 +1244,7 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
pwrb_context->doorbell_offset = resp->doorbell_offset;
}
}
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1297,7 +1255,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
struct be_post_template_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1310,7 +1268,7 @@ int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1320,7 +1278,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
struct be_remove_template_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1331,7 +1289,7 @@ int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1350,7 +1308,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
if (num_pages == 0xff)
num_pages = 1;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
do {
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1379,7 +1337,7 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
}
} while (num_pages > 0);
error:
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
if (status != 0)
beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
return status;
@@ -1392,15 +1350,15 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
struct be_post_sgl_pages_req *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
- status = be_mbox_notify_wait(phba);
+ status = be_mbox_notify(ctrl);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -1417,21 +1375,20 @@ int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
int be_cmd_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
- unsigned int tag = 0;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_set_vlan_req *req;
struct be_ctrl_info *ctrl = &phba->ctrl;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
@@ -1440,8 +1397,8 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
req->interface_hndl = phba->interface_handle;
req->vlan_priority = vlan_tag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 4bfca355fbe4..deeb951e6874 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -58,15 +58,16 @@ struct be_mcc_wrb {
#define MCC_STATUS_ILLEGAL_FIELD 0x3
#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_COMPL_MASK 0xFFFF
+#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
+#define CQE_STATUS_EXTD_MASK 0xFFFF
+#define CQE_STATUS_EXTD_SHIFT 16 /* bits 31 - 16 */
#define CQE_STATUS_ADDL_MASK 0xFF00
-#define CQE_STATUS_MASK 0xFF
-#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_ADDL_SHIFT 8
+#define CQE_STATUS_MASK 0xFF
#define CQE_STATUS_WRB_MASK 0xFF0000
#define CQE_STATUS_WRB_SHIFT 16
+
#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
#define BEISCSI_FW_MBX_TIMEOUT 100
@@ -119,13 +120,22 @@ struct be_mcc_compl {
#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
#define ASYNC_EVENT_CODE_ISCSI 0x4
+#define ASYNC_EVENT_CODE_SLI 0x11
#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF
+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+
+/* iSCSI events */
#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5
#define ASYNC_EVENT_NEW_TCP_CONN 0x7
+/* SLI events */
+#define ASYNC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
+#define ASYNC_SLI_LINK_EFFECT_VALID(le) (le & 0x80)
+#define ASYNC_SLI_LINK_EFFECT_SEV(le) ((le >> 1) & 0x03)
+#define ASYNC_SLI_LINK_EFFECT_STATE(le) (le & 0x01)
+
struct be_async_event_trailer {
u32 code;
};
@@ -133,7 +143,6 @@ struct be_async_event_trailer {
enum {
ASYNC_EVENT_LINK_DOWN = 0x0,
ASYNC_EVENT_LINK_UP = 0x1,
- ASYNC_EVENT_LOGICAL = 0x2
};
/**
@@ -143,16 +152,39 @@ enum {
struct be_async_event_link_state {
u8 physical_port;
u8 port_link_status;
+/**
+ * ASYNC_EVENT_LINK_DOWN 0x0
+ * ASYNC_EVENT_LINK_UP 0x1
+ * ASYNC_EVENT_LINK_LOGICAL_DOWN 0x2
+ * ASYNC_EVENT_LINK_LOGICAL_UP 0x3
+ */
+#define BE_ASYNC_LINK_UP_MASK 0x01
u8 port_duplex;
u8 port_speed;
-#define BEISCSI_PHY_LINK_FAULT_NONE 0x00
-#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01
-#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02
+/* BE2ISCSI_LINK_SPEED_ZERO 0x00 - no link */
+#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
+#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
+#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
+#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
+#define BE2ISCSI_LINK_SPEED_25GBPS 0x06
+#define BE2ISCSI_LINK_SPEED_40GBPS 0x07
u8 port_fault;
- u8 rsvd0[7];
+ u8 event_reason;
+ u16 qos_link_speed;
+ u32 event_tag;
struct be_async_event_trailer trailer;
} __packed;
+/**
+ * When async-trailer is SLI event, mcc_compl is interpreted as
+ */
+struct be_async_event_sli {
+ u32 event_data1;
+ u32 event_data2;
+ u32 reserved;
+ u32 trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_compl compl;
@@ -172,6 +204,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_MCC_CREATE_EXT 90
#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS 24
#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS 25
#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
@@ -183,6 +216,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_GET_PORT_NAME 77
/**
* LIST of opcodes that are common between Initiator and Target
@@ -587,10 +621,11 @@ struct amap_mcc_context {
u8 rsvd2[32];
} __packed;
-struct be_cmd_req_mcc_create {
+struct be_cmd_req_mcc_create_ext {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
+ u32 async_evt_bitmap;
u8 context[sizeof(struct amap_mcc_context) / 8];
struct phys_addr pages[8];
} __packed;
@@ -653,20 +688,6 @@ struct be_cmd_req_modify_eq_delay {
/******************** Get MAC ADDR *******************/
-#define ETH_ALEN 6
-
-struct be_cmd_get_nic_conf_req {
- struct be_cmd_req_hdr hdr;
- u32 nic_port_count;
- u32 speed;
- u32 max_speed;
- u32 link_state;
- u32 max_frame_size;
- u16 size_of_structure;
- u8 mac_address[ETH_ALEN];
- u32 rsvd[23];
-};
-
struct be_cmd_get_nic_conf_resp {
struct be_cmd_resp_hdr hdr;
u32 nic_port_count;
@@ -675,9 +696,8 @@ struct be_cmd_get_nic_conf_resp {
u32 link_state;
u32 max_frame_size;
u16 size_of_structure;
- u8 mac_address[6];
- u32 rsvd[23];
-};
+ u8 mac_address[ETH_ALEN];
+} __packed;
#define BEISCSI_ALIAS_LEN 32
@@ -689,29 +709,6 @@ struct be_cmd_hba_name {
u8 initiator_alias[BEISCSI_ALIAS_LEN];
} __packed;
-struct be_cmd_ntwk_link_status_req {
- struct be_cmd_req_hdr hdr;
- u32 rsvd0;
-} __packed;
-
-/*** Port Speed Values ***/
-#define BE2ISCSI_LINK_SPEED_ZERO 0x00
-#define BE2ISCSI_LINK_SPEED_10MBPS 0x01
-#define BE2ISCSI_LINK_SPEED_100MBPS 0x02
-#define BE2ISCSI_LINK_SPEED_1GBPS 0x03
-#define BE2ISCSI_LINK_SPEED_10GBPS 0x04
-struct be_cmd_ntwk_link_status_resp {
- struct be_cmd_resp_hdr hdr;
- u8 phys_port;
- u8 mac_duplex;
- u8 mac_speed;
- u8 mac_fault;
- u8 mgmt_mac_duplex;
- u8 mgmt_mac_speed;
- u16 qos_link_speed;
- u32 logical_link_speed;
-} __packed;
-
int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay);
@@ -730,28 +727,28 @@ int be_poll_mcc(struct be_ctrl_info *ctrl);
int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
-void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
int num);
-int beiscsi_mccq_compl(struct beiscsi_hba *phba,
- uint32_t tag, struct be_mcc_wrb **wrb,
- struct be_dma_mem *mbx_cmd_mem);
+int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ struct be_dma_mem *mbx_cmd_mem);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba);
-int be_mcc_notify_wait(struct beiscsi_hba *phba);
-void be_mcc_notify(struct beiscsi_hba *phba);
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba);
-void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
- struct be_async_event_link_state *evt);
-int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
- struct be_mcc_compl *compl);
+int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
+void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
+struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
+ unsigned int *ref_tag);
+void beiscsi_process_async_event(struct beiscsi_hba *phba,
+ struct be_mcc_compl *compl);
+int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
+ struct be_mcc_compl *compl);
+
int be_mbox_notify(struct be_ctrl_info *ctrl);
@@ -777,8 +774,6 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
struct hwi_wrb_context *pwrb_context,
uint8_t ulp_num);
-bool is_link_state_evt(u32 trailer);
-
/* Configuration Functions */
int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
@@ -1137,6 +1132,21 @@ struct be_cmd_get_all_if_id_req {
u32 if_hndl_list[1];
} __packed;
+struct be_cmd_get_port_name {
+ union {
+ struct be_cmd_req_hdr req_hdr;
+ struct be_cmd_resp_hdr resp_hdr;
+ } h;
+ union {
+ struct {
+ u32 reserved;
+ } req;
+ struct {
+ u32 port_names;
+ } resp;
+ } p;
+} __packed;
+
#define ISCSI_OPCODE_SCSI_DATA_OUT 5
#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
#define OPCODE_COMMON_MODIFY_EQ_DELAY 41
@@ -1367,5 +1377,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len);
-void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);
+void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 022e87b62e40..09f89a3eaa87 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -367,13 +367,14 @@ beiscsi_set_vlan_tag(struct Scsi_Host *shost,
struct iscsi_iface_param_info *iface_param)
{
struct beiscsi_hba *phba = iscsi_host_priv(shost);
- int ret = 0;
+ int ret;
/* Get the Interface Handle */
- if (mgmt_get_all_if_id(phba)) {
+ ret = mgmt_get_all_if_id(phba);
+ if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Getting Interface Handle Failed\n");
- return -EIO;
+ return ret;
}
switch (iface_param->param) {
@@ -465,6 +466,10 @@ beiscsi_set_ipv6(struct Scsi_Host *shost,
ret = mgmt_set_ip(phba, iface_param, NULL,
ISCSI_BOOTPROTO_STATIC);
break;
+ case ISCSI_NET_PARAM_VLAN_ENABLED:
+ case ISCSI_NET_PARAM_VLAN_TAG:
+ ret = beiscsi_set_vlan_tag(shost, iface_param);
+ break;
default:
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : Param %d not supported\n",
@@ -730,7 +735,7 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -753,7 +758,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?
+ ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
}
@@ -761,34 +766,13 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
* beiscsi_get_port_speed - Get the Port Speed from Adapter
* @shost : pointer to scsi_host structure
*
- * returns Success/Failure
*/
-static int beiscsi_get_port_speed(struct Scsi_Host *shost)
+static void beiscsi_get_port_speed(struct Scsi_Host *shost)
{
- int rc;
- unsigned int tag;
- struct be_mcc_wrb *wrb;
- struct be_cmd_ntwk_link_status_resp *resp;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- tag = be_cmd_get_port_speed(phba);
- if (!tag) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Getting Port Speed Failed\n");
-
- return -EBUSY;
- }
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
- if (rc) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : Port Speed MBX Failed\n");
- return rc;
- }
- resp = embedded_payload(wrb);
-
- switch (resp->mac_speed) {
+ switch (phba->port_speed) {
case BE2ISCSI_LINK_SPEED_10MBPS:
ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;
break;
@@ -801,10 +785,15 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
case BE2ISCSI_LINK_SPEED_10GBPS:
ihost->port_speed = ISCSI_PORT_SPEED_10GBPS;
break;
+ case BE2ISCSI_LINK_SPEED_25GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_25GBPS;
+ break;
+ case BE2ISCSI_LINK_SPEED_40GBPS:
+ ihost->port_speed = ISCSI_PORT_SPEED_40GBPS;
+ break;
default:
ihost->port_speed = ISCSI_PORT_SPEED_UNKNOWN;
}
- return 0;
}
/**
@@ -854,12 +843,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
break;
case ISCSI_HOST_PARAM_PORT_SPEED:
- status = beiscsi_get_port_speed(shost);
- if (status) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Port Speed Failed\n");
- return status;
- }
+ beiscsi_get_port_speed(shost);
status = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
break;
default:
@@ -1159,7 +1143,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
return -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -1293,7 +1277,7 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba)
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
irq_poll_disable(&pbe_eq->iopoll);
- beiscsi_process_cq(pbe_eq);
+ beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
irq_poll_enable(&pbe_eq->iopoll);
}
}
@@ -1318,7 +1302,7 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
ret = -EAGAIN;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
/* Flush the CQ entries */
beiscsi_flush_cq(phba);
@@ -1393,7 +1377,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_ep->ep_cid);
}
- beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
free_ep:
msleep(BEISCSI_LOGOUT_SYNC_DELAY);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index cb9072a841be..f05e7737107d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -29,6 +29,7 @@
#include <linux/iscsi_boot_sysfs.h>
#include <linux/module.h>
#include <linux/bsg-lib.h>
+#include <linux/irq_poll.h>
#include <scsi/libiscsi.h>
#include <scsi/scsi_bsg_iscsi.h>
@@ -285,7 +286,7 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
return FAILED;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -366,7 +367,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
return FAILED;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -727,9 +728,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->mbox_lock);
+ mutex_init(&ctrl->mbox_lock);
spin_lock_init(&phba->ctrl.mcc_lock);
- spin_lock_init(&phba->ctrl.mcc_cq_lock);
return status;
}
@@ -895,31 +895,17 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
static irqreturn_t be_isr_msix(int irq, void *dev_id)
{
struct beiscsi_hba *phba;
- struct be_eq_entry *eqe = NULL;
struct be_queue_info *eq;
- struct be_queue_info *cq;
- unsigned int num_eq_processed;
struct be_eq_obj *pbe_eq;
pbe_eq = dev_id;
eq = &pbe_eq->q;
- cq = pbe_eq->cq;
- eqe = queue_tail_node(eq);
phba = pbe_eq->phba;
- num_eq_processed = 0;
- while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
- & EQE_VALID_MASK) {
- irq_poll_sched(&pbe_eq->iopoll);
- AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
- queue_tail_inc(eq);
- eqe = queue_tail_node(eq);
- num_eq_processed++;
- }
-
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+ /* disable interrupt till iopoll completes */
+ hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
+ irq_poll_sched(&pbe_eq->iopoll);
return IRQ_HANDLED;
}
@@ -996,6 +982,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
return IRQ_NONE;
}
+
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
{
struct pci_dev *pcidev = phba->pcidev;
@@ -1070,7 +1057,7 @@ free_msix_irqs:
void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
- unsigned char rearm, unsigned char event)
+ unsigned char rearm)
{
u32 val = 0;
@@ -1145,6 +1132,7 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ spin_lock_bh(&phba->io_sgl_lock);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -1162,12 +1150,14 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
+ spin_unlock_bh(&phba->io_sgl_lock);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
+ spin_lock_bh(&phba->io_sgl_lock);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -1182,6 +1172,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
+ spin_unlock_bh(&phba->io_sgl_lock);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -1190,6 +1181,25 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
+ spin_unlock_bh(&phba->io_sgl_lock);
+}
+
+static inline struct wrb_handle *
+beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
+ unsigned int wrbs_per_cxn)
+{
+ struct wrb_handle *pwrb_handle;
+
+ spin_lock_bh(&pwrb_context->wrb_lock);
+ pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
+ pwrb_context->wrb_handles_available--;
+ if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
+ pwrb_context->alloc_index = 0;
+ else
+ pwrb_context->alloc_index++;
+ spin_unlock_bh(&pwrb_context->wrb_lock);
+
+ return pwrb_handle;
}
/**
@@ -1201,30 +1211,32 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
* This happens under session_lock until submission to chip
*/
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
- struct hwi_wrb_context **pcontext)
+ struct hwi_wrb_context **pcontext)
{
struct hwi_wrb_context *pwrb_context;
struct hwi_controller *phwi_ctrlr;
- struct wrb_handle *pwrb_handle;
uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
phwi_ctrlr = phba->phwi_ctrlr;
pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
- if (pwrb_context->wrb_handles_available >= 2) {
- pwrb_handle = pwrb_context->pwrb_handle_base[
- pwrb_context->alloc_index];
- pwrb_context->wrb_handles_available--;
- if (pwrb_context->alloc_index ==
- (phba->params.wrbs_per_cxn - 1))
- pwrb_context->alloc_index = 0;
- else
- pwrb_context->alloc_index++;
+ /* return the context address */
+ *pcontext = pwrb_context;
+ return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
+}
- /* Return the context address */
- *pcontext = pwrb_context;
- } else
- pwrb_handle = NULL;
- return pwrb_handle;
+static inline void
+beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
+ struct wrb_handle *pwrb_handle,
+ unsigned int wrbs_per_cxn)
+{
+ spin_lock_bh(&pwrb_context->wrb_lock);
+ pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
+ pwrb_context->wrb_handles_available++;
+ if (pwrb_context->free_index == (wrbs_per_cxn - 1))
+ pwrb_context->free_index = 0;
+ else
+ pwrb_context->free_index++;
+ spin_unlock_bh(&pwrb_context->wrb_lock);
}
/**
@@ -1239,13 +1251,9 @@ static void
free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle)
{
- pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
- pwrb_context->wrb_handles_available++;
- if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
- pwrb_context->free_index = 0;
- else
- pwrb_context->free_index++;
-
+ beiscsi_put_wrb_handle(pwrb_context,
+ pwrb_handle,
+ phba->params.wrbs_per_cxn);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
"BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
@@ -1258,6 +1266,7 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ spin_lock_bh(&phba->mgmt_sgl_lock);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1275,13 +1284,14 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
-
+ spin_lock_bh(&phba->mgmt_sgl_lock);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1296,6 +1306,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1305,6 +1316,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
+ spin_unlock_bh(&phba->mgmt_sgl_lock);
}
static void
@@ -2029,7 +2041,7 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
phwi_ctrlr, cri_index));
}
-static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
{
struct be_queue_info *mcc_cq;
struct be_mcc_compl *mcc_compl;
@@ -2039,31 +2051,15 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
mcc_compl = queue_tail_node(mcc_cq);
mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
-
if (num_processed >= 32) {
hwi_ring_cq_db(phba, mcc_cq->id,
- num_processed, 0, 0);
+ num_processed, 0);
num_processed = 0;
}
if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(mcc_compl->flags))
- /* Interpret compl as a async link evt */
- beiscsi_async_link_state_process(phba,
- (struct be_async_event_link_state *) mcc_compl);
- else {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
- "BM_%d : Unsupported Async Event, flags"
- " = 0x%08x\n",
- mcc_compl->flags);
- if (phba->state & BE_ADAPTER_LINK_UP) {
- phba->state |= BE_ADAPTER_CHECK_BOOT;
- phba->get_boot = BE_GET_BOOT_RETRIES;
- }
- }
+ beiscsi_process_async_event(phba, mcc_compl);
} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
- atomic_dec(&phba->ctrl.mcc_obj.q.used);
+ beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
}
mcc_compl->flags = 0;
@@ -2074,24 +2070,24 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
}
if (num_processed > 0)
- hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
-
+ hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
}
/**
* beiscsi_process_cq()- Process the Completion Queue
* @pbe_eq: Event Q on which the Completion has come
+ * @budget: Max number of events to processed
*
* return
* Number of Completion Entries processed.
**/
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
struct dmsg_cqe *dmsg;
+ unsigned int total = 0;
unsigned int num_processed = 0;
- unsigned int tot_nump = 0;
unsigned short code = 0, cid = 0;
uint16_t cri_index = 0;
struct beiscsi_conn *beiscsi_conn;
@@ -2142,12 +2138,12 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
- if (num_processed >= 32) {
- hwi_ring_cq_db(phba, cq->id,
- num_processed, 0, 0);
- tot_nump += num_processed;
+ /* replenish cq */
+ if (num_processed == 32) {
+ hwi_ring_cq_db(phba, cq->id, 32, 0);
num_processed = 0;
}
+ total++;
switch (code) {
case SOL_CMD_COMPLETE:
@@ -2192,7 +2188,13 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
"BM_%d : Ignoring %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
break;
+ case CXN_KILLED_HDR_DIGEST_ERR:
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
+ break;
case CMD_KILLED_INVALID_STATSN_RCVD:
case CMD_KILLED_INVALID_R2T_RCVD:
case CMD_CXN_KILLED_LUN_INVALID:
@@ -2218,7 +2220,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
case CXN_KILLED_BURST_LEN_MISMATCH:
case CXN_KILLED_AHS_RCVD:
- case CXN_KILLED_HDR_DIGEST_ERR:
case CXN_KILLED_UNKNOWN_HDR:
case CXN_KILLED_STALE_ITT_TTT_RCVD:
case CXN_KILLED_INVALID_ITT_TTT_RCVD:
@@ -2253,13 +2254,12 @@ proc_next_cqe:
queue_tail_inc(cq);
sol = queue_tail_node(cq);
num_processed++;
+ if (total == budget)
+ break;
}
- if (num_processed > 0) {
- tot_nump += num_processed;
- hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
- }
- return tot_nump;
+ hwi_ring_cq_db(phba, cq->id, num_processed, 1);
+ return total;
}
void beiscsi_process_all_cqs(struct work_struct *work)
@@ -2279,14 +2279,14 @@ void beiscsi_process_all_cqs(struct work_struct *work)
spin_lock_irqsave(&phba->isr_lock, flags);
pbe_eq->todo_mcc_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_mcc_isr(phba);
+ beiscsi_process_mcc_cq(phba);
}
if (pbe_eq->todo_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
pbe_eq->todo_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
- beiscsi_process_cq(pbe_eq);
+ beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
}
/* rearm EQ for further interrupts */
@@ -2295,20 +2295,36 @@ void beiscsi_process_all_cqs(struct work_struct *work)
static int be_iopoll(struct irq_poll *iop, int budget)
{
- unsigned int ret;
+ unsigned int ret, num_eq_processed;
struct beiscsi_hba *phba;
struct be_eq_obj *pbe_eq;
+ struct be_eq_entry *eqe = NULL;
+ struct be_queue_info *eq;
+ num_eq_processed = 0;
pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
- ret = beiscsi_process_cq(pbe_eq);
+ phba = pbe_eq->phba;
+ eq = &pbe_eq->q;
+ eqe = queue_tail_node(eq);
+
+ while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
+ EQE_VALID_MASK) {
+ AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+ queue_tail_inc(eq);
+ eqe = queue_tail_node(eq);
+ num_eq_processed++;
+ }
+
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ ret = beiscsi_process_cq(pbe_eq, budget);
pbe_eq->cq_count += ret;
if (ret < budget) {
- phba = pbe_eq->phba;
irq_poll_complete(iop);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : rearm pbe_eq->q.id =%d\n",
- pbe_eq->q.id);
+ "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
+ pbe_eq->q.id, ret);
hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
return ret;
@@ -2502,7 +2518,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
* @pwrb: ptr to the WRB entry
* @task: iscsi task which is to be executed
**/
-static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
+static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
{
struct iscsi_sge *psgl;
struct beiscsi_io_task *io_task = task->dd_data;
@@ -2534,6 +2550,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
task->data,
task->data_count,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(phba->pcidev,
+ io_task->mtask_addr))
+ return -ENOMEM;
io_task->mtask_data_count = task->data_count;
} else
io_task->mtask_addr = 0;
@@ -2578,6 +2597,7 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
}
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+ return 0;
}
/**
@@ -2706,8 +2726,10 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
phba->params.cxns_per_ctrl,
GFP_KERNEL);
- if (!phwi_ctrlr->wrb_context)
+ if (!phwi_ctrlr->wrb_context) {
+ kfree(phba->phwi_ctrlr);
return -ENOMEM;
+ }
phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
GFP_KERNEL);
@@ -2904,6 +2926,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
}
num_cxn_wrbh--;
}
+ spin_lock_init(&pwrb_context->wrb_lock);
}
idx = 0;
for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
@@ -3866,6 +3889,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
phwi_context->min_eqd = 0;
phwi_context->cur_eqd = 0;
be_cmd_fw_initialize(&phba->ctrl);
+ /* set optic state to unknown */
+ phba->optic_state = 0xff;
status = beiscsi_create_eqs(phba, phwi_context);
if (status != 0) {
@@ -4384,7 +4409,7 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
goto boot_freemem;
}
- ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);
+ ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -4468,6 +4493,7 @@ put_shost:
scsi_host_put(phba->shost);
free_kset:
iscsi_boot_destroy_kset(phba->boot_kset);
+ phba->boot_kset = NULL;
return -ENOMEM;
}
@@ -4607,11 +4633,9 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
}
if (io_task->psgl_handle) {
- spin_lock_bh(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba,
io_task->psgl_handle);
io_task->psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
}
if (io_task->mtask_addr) {
@@ -4657,9 +4681,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
}
if (io_task->psgl_handle) {
- spin_lock(&phba->io_sgl_lock);
free_io_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->io_sgl_lock);
io_task->psgl_handle = NULL;
}
@@ -4714,6 +4736,20 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
iowrite32(doorbell, phba->db_va +
beiscsi_conn->doorbell_offset);
+
+ /*
+ * There is no completion for CONTEXT_UPDATE. The completion of next
+ * WRB posted guarantees FW's processing and DMA'ing of it.
+ * Use beiscsi_put_wrb_handle to put it back in the pool which makes
+ * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
+ */
+ beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
+ phba->params.wrbs_per_cxn);
+ beiscsi_log(phba, KERN_INFO,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
+ pwrb_handle, pwrb_context->free_index,
+ pwrb_context->wrb_handles_available);
}
static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
@@ -4761,9 +4797,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->pwrb_handle = NULL;
if (task->sc) {
- spin_lock(&phba->io_sgl_lock);
io_task->psgl_handle = alloc_io_sgl_handle(phba);
- spin_unlock(&phba->io_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
@@ -4788,10 +4822,8 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
beiscsi_conn->task = task;
if (!beiscsi_conn->login_in_progress) {
- spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = (struct sgl_handle *)
alloc_mgmt_sgl_handle(phba);
- spin_unlock(&phba->mgmt_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
@@ -4830,9 +4862,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
beiscsi_conn->plogin_wrb_handle;
}
} else {
- spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
- spin_unlock(&phba->mgmt_sgl_lock);
if (!io_task->psgl_handle) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO |
@@ -4867,15 +4897,11 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
return 0;
free_io_hndls:
- spin_lock(&phba->io_sgl_lock);
free_io_sgl_handle(phba, io_task->psgl_handle);
- spin_unlock(&phba->io_sgl_lock);
goto free_hndls;
free_mgmt_hndls:
- spin_lock(&phba->mgmt_sgl_lock);
free_mgmt_sgl_handle(phba, io_task->psgl_handle);
io_task->psgl_handle = NULL;
- spin_unlock(&phba->mgmt_sgl_lock);
free_hndls:
phwi_ctrlr = phba->phwi_ctrlr;
cri_index = BE_GET_CRI_FROM_CID(
@@ -4903,7 +4929,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
pwrb = io_task->pwrb_handle->pwrb;
- io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
io_task->bhs_len = sizeof(struct be_cmd_bhs);
if (writedir) {
@@ -4964,7 +4989,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int doorbell = 0;
pwrb = io_task->pwrb_handle->pwrb;
- io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
io_task->bhs_len = sizeof(struct be_cmd_bhs);
if (writedir) {
@@ -5023,6 +5047,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
unsigned int doorbell = 0;
unsigned int cid;
unsigned int pwrb_typeoffset = 0;
+ int ret = 0;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
@@ -5071,7 +5096,7 @@ static int beiscsi_mtask(struct iscsi_task *task)
case ISCSI_OP_LOGIN:
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
@@ -5091,19 +5116,19 @@ static int beiscsi_mtask(struct iscsi_task *task)
AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
dmsg, pwrb, 0);
}
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
- hwi_write_buffer(pwrb, task);
+ ret = hwi_write_buffer(pwrb, task);
break;
default:
@@ -5114,6 +5139,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
return -EINVAL;
}
+ if (ret)
+ return ret;
+
/* Set the task type */
io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
@@ -5132,23 +5160,21 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
- struct beiscsi_hba *phba = NULL;
+ struct beiscsi_hba *phba;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
- phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+ if (!io_task->conn->login_in_progress)
+ task->hdr->exp_statsn = 0;
if (!sc)
return beiscsi_mtask(task);
io_task->scsi_cmnd = sc;
num_sg = scsi_dma_map(sc);
+ phba = io_task->conn->phba;
if (num_sg < 0) {
- struct iscsi_conn *conn = task->conn;
- struct beiscsi_hba *phba = NULL;
-
- phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
"BM_%d : scsi_dma_map Failed "
@@ -5211,12 +5237,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
rc = wait_event_interruptible_timeout(
phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag],
+ phba->ctrl.mcc_tag_status[tag],
msecs_to_jiffies(
BEISCSI_HOST_MBX_TIMEOUT));
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- free_mcc_tag(&phba->ctrl, tag);
+ extd_status = (phba->ctrl.mcc_tag_status[tag] &
+ CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
+ status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
+ free_mcc_wrb(&phba->ctrl, tag);
resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt,
@@ -5277,15 +5304,12 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
if (phba->msix_enabled) {
for (i = 0; i <= phba->num_cpus; i++) {
msix_vec = phba->msix_entries[i].vector;
- synchronize_irq(msix_vec);
free_irq(msix_vec, &phwi_context->be_eq[i]);
kfree(phba->msi_name[i]);
}
} else
- if (phba->pcidev->irq) {
- synchronize_irq(phba->pcidev->irq);
+ if (phba->pcidev->irq)
free_irq(phba->pcidev->irq, phba);
- }
pci_disable_msix(phba->pcidev);
cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
@@ -5313,7 +5337,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
static void beiscsi_remove(struct pci_dev *pcidev)
{
-
struct beiscsi_hba *phba = NULL;
phba = pci_get_drvdata(pcidev);
@@ -5323,9 +5346,9 @@ static void beiscsi_remove(struct pci_dev *pcidev)
}
beiscsi_destroy_def_ifaces(phba);
- beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
iscsi_boot_destroy_kset(phba->boot_kset);
iscsi_host_remove(phba->shost);
+ beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
pci_disable_pcie_error_reporting(pcidev);
@@ -5334,23 +5357,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
pci_disable_device(pcidev);
}
-static void beiscsi_shutdown(struct pci_dev *pcidev)
-{
-
- struct beiscsi_hba *phba = NULL;
-
- phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
- if (!phba) {
- dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
- return;
- }
-
- phba->state = BE_ADAPTER_STATE_SHUTDOWN;
- iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session);
- beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
- pci_disable_device(pcidev);
-}
-
static void beiscsi_msix_enable(struct beiscsi_hba *phba)
{
int i, status;
@@ -5413,7 +5419,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
if (num) {
tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
if (tag)
- beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
}
}
@@ -5564,11 +5570,17 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
phba->shost->max_id = phba->params.cxns_per_ctrl;
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = hwi_init_controller(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : beiscsi_eeh_resume -"
+ "Failed to initialize beiscsi_hba.\n");
+ goto ret_err;
+ }
for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
- phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_status[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
}
@@ -5670,6 +5682,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
goto hba_free;
}
+ /*
+ * FUNCTION_RESET should clean up any stale info in FW for this fn
+ */
ret = beiscsi_cmd_reset_function(phba);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5693,6 +5708,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
"BM_%d : Error getting fw config\n");
goto free_port;
}
+ mgmt_get_port_name(&phba->ctrl, phba);
+ beiscsi_get_params(phba);
if (enable_msix)
find_num_cpus(phba);
@@ -5710,7 +5727,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
}
phba->shost->max_id = phba->params.cxns_per_ctrl;
- beiscsi_get_params(phba);
phba->shost->can_queue = phba->params.ios_per_ctrl;
ret = beiscsi_init_port(phba);
if (ret < 0) {
@@ -5723,7 +5739,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
for (i = 0; i < MAX_MCC_CMD; i++) {
init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
phba->ctrl.mcc_tag[i] = i + 1;
- phba->ctrl.mcc_numtag[i + 1] = 0;
+ phba->ctrl.mcc_tag_status[i + 1] = 0;
phba->ctrl.mcc_tag_available++;
memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
sizeof(struct be_dma_mem));
@@ -5857,7 +5873,6 @@ static struct pci_driver beiscsi_pci_driver = {
.name = DRV_NAME,
.probe = beiscsi_dev_probe,
.remove = beiscsi_remove,
- .shutdown = beiscsi_shutdown,
.id_table = beiscsi_pci_id_table,
.err_handler = &beiscsi_eeh_handlers
};
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5c67c0732241..30a4606d9a3b 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -36,7 +36,7 @@
#include <scsi/scsi_transport_iscsi.h>
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "10.6.0.1"
+#define BUILD_STR "11.0.0.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
@@ -63,6 +63,7 @@
#define BE2_SGE 32
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
+#define BE2_MAX_NUM_CQ_PROC 512
#define MAX_CPUS 64
#define BEISCSI_MAX_NUM_CPUS 7
@@ -103,8 +104,7 @@
#define BE_ADAPTER_LINK_UP 0x001
#define BE_ADAPTER_LINK_DOWN 0x002
#define BE_ADAPTER_PCI_ERR 0x004
-#define BE_ADAPTER_STATE_SHUTDOWN 0x008
-#define BE_ADAPTER_CHECK_BOOT 0x010
+#define BE_ADAPTER_CHECK_BOOT 0x008
#define BEISCSI_CLEAN_UNLOAD 0x01
@@ -304,6 +304,7 @@ struct invalidate_command_table {
#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
(phwi_ctrlr->wrb_context[cri].ulp_num)
struct hwi_wrb_context {
+ spinlock_t wrb_lock;
struct list_head wrb_handle_list;
struct list_head wrb_handle_drvr_list;
struct wrb_handle **pwrb_handle_base;
@@ -398,7 +399,9 @@ struct beiscsi_hba {
* group together since they are used most frequently
* for cid to cri conversion
*/
+#define BEISCSI_PHYS_PORT_MAX 4
unsigned int phys_port;
+ /* valid values of phys_port id are 0, 1, 2, 3 */
unsigned int eqid_count;
unsigned int cqid_count;
unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT];
@@ -416,6 +419,7 @@ struct beiscsi_hba {
} fw_config;
unsigned int state;
+ u8 optic_state;
int get_boot;
bool fw_timeout;
bool ue_detected;
@@ -423,6 +427,8 @@ struct beiscsi_hba {
bool mac_addr_set;
u8 mac_address[ETH_ALEN];
+ u8 port_name;
+ u8 port_speed;
char fw_ver_str[BEISCSI_VER_STRLEN];
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
@@ -845,9 +851,10 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
void hwi_ring_cq_db(struct beiscsi_hba *phba,
unsigned int id, unsigned int num_processed,
- unsigned char rearm, unsigned char event);
+ unsigned char rearm);
-unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
+void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
static inline bool beiscsi_error(struct beiscsi_hba *phba)
{
@@ -1074,12 +1081,14 @@ struct hwi_context_memory {
#define BEISCSI_LOG_CONFIG 0x0020 /* CONFIG Code Path */
#define BEISCSI_LOG_ISCSI 0x0040 /* SCSI/iSCSI Protocol related Logs */
+#define __beiscsi_log(phba, level, fmt, arg...) \
+ shost_printk(level, phba->shost, fmt, __LINE__, ##arg)
+
#define beiscsi_log(phba, level, mask, fmt, arg...) \
do { \
uint32_t log_value = phba->attr_log_enable; \
if (((mask) & log_value) || (level[1] <= '3')) \
- shost_printk(level, phba->shost, \
- fmt, __LINE__, ##arg); \
-} while (0)
+ __beiscsi_log(phba, level, fmt, ##arg); \
+} while (0);
#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aea3e6b9477d..83926e221f1e 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -161,20 +161,17 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_req_modify_eq_delay *req;
- unsigned int tag = 0;
+ unsigned int tag;
int i;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
-
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
@@ -187,8 +184,8 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
cpu_to_le32(set_eqd[i].delay_multiplier);
}
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -209,22 +206,20 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_reopen_session_req *req;
- unsigned int tag = 0;
+ unsigned int tag;
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
@@ -234,8 +229,8 @@ unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
req->reopen_type = reopen_type;
req->session_handle = sess_handle;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -244,29 +239,27 @@ unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct be_cmd_get_boot_target_req *req;
- unsigned int tag = 0;
+ unsigned int tag;
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_get_boot_target\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
sizeof(struct be_cmd_get_boot_target_resp));
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -276,7 +269,7 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
- unsigned int tag = 0;
+ unsigned int tag;
struct be_cmd_get_session_req *req;
struct be_cmd_get_session_resp *resp;
struct be_sge *sge;
@@ -285,22 +278,17 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In beiscsi_get_session_info\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
nonemb_cmd->size = sizeof(*resp);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
- wrb->tag0 |= tag;
-
-
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
@@ -310,12 +298,54 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
/**
+ * mgmt_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba)
+{
+ int ret = 0;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_port_name *ioctl;
+
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ memset(wrb, 0, sizeof(*wrb));
+ ioctl = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+ be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME,
+ EMBED_MBX_MAX_PAYLOAD_SIZE);
+ ret = be_mbox_notify(ctrl);
+ phba->port_name = 0;
+ if (!ret) {
+ phba->port_name = ioctl->p.resp.port_names >>
+ (phba->fw_config.phys_port * 8) & 0xff;
+ } else {
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+ ret, ioctl->h.resp_hdr.status);
+ }
+
+ if (phba->port_name == 0)
+ phba->port_name = '?';
+
+ mutex_unlock(&ctrl->mbox_lock);
+ return ret;
+}
+
+/**
* mgmt_get_fw_config()- Get the FW config for the function
* @ctrl: ptr to Ctrl Info
* @phba: ptr to the dev priv structure
@@ -331,91 +361,147 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_fw_cfg *req = embedded_payload(wrb);
- int status = 0;
+ struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+ uint32_t cid_count, icd_count;
+ int status = -EINVAL;
+ uint8_t ulp_num = 0;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
+ be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
EMBED_MBX_MAX_PAYLOAD_SIZE);
- status = be_mbox_notify(ctrl);
- if (!status) {
- uint8_t ulp_num = 0;
- struct be_fw_cfg *pfw_cfg;
- pfw_cfg = req;
- if (!is_chip_be2_be3r(phba)) {
- phba->fw_config.eqid_count = pfw_cfg->eqid_count;
- phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+ if (be_mbox_notify(ctrl)) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : Failed in mgmt_get_fw_config\n");
+ goto fail_init;
+ }
- beiscsi_log(phba, KERN_INFO,
- BEISCSI_LOG_INIT,
- "BG_%d : EQ_Count : %d CQ_Count : %d\n",
- phba->fw_config.eqid_count,
+ /* FW response formats depend on port id */
+ phba->fw_config.phys_port = pfw_cfg->phys_port;
+ if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid physical port id %d\n",
+ phba->fw_config.phys_port);
+ goto fail_init;
+ }
+
+ /* populate and check FW config against min and max values */
+ if (!is_chip_be2_be3r(phba)) {
+ phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+ phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+ if (phba->fw_config.eqid_count == 0 ||
+ phba->fw_config.eqid_count > 2048) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid EQ count %d\n",
+ phba->fw_config.eqid_count);
+ goto fail_init;
+ }
+ if (phba->fw_config.cqid_count == 0 ||
+ phba->fw_config.cqid_count > 4096) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : invalid CQ count %d\n",
phba->fw_config.cqid_count);
+ goto fail_init;
}
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+ phba->fw_config.eqid_count,
+ phba->fw_config.cqid_count);
+ }
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
- if (pfw_cfg->ulp[ulp_num].ulp_mode &
- BEISCSI_ULP_ISCSI_INI_MODE)
- set_bit(ulp_num,
- &phba->fw_config.ulp_supported);
-
- phba->fw_config.phys_port = pfw_cfg->phys_port;
- for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
- if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
- phba->fw_config.iscsi_cid_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_base;
- phba->fw_config.iscsi_cid_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].sq_count;
-
- phba->fw_config.iscsi_icd_start[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_base;
- phba->fw_config.iscsi_icd_count[ulp_num] =
- pfw_cfg->ulp[ulp_num].icd_count;
-
- phba->fw_config.iscsi_chain_start[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_base;
- phba->fw_config.iscsi_chain_count[ulp_num] =
- pfw_cfg->chain_icd[ulp_num].chain_count;
-
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : Function loaded on ULP : %d\n"
- "\tiscsi_cid_count : %d\n"
- "\tiscsi_cid_start : %d\n"
- "\t iscsi_icd_count : %d\n"
- "\t iscsi_icd_start : %d\n",
- ulp_num,
- phba->fw_config.
- iscsi_cid_count[ulp_num],
- phba->fw_config.
- iscsi_cid_start[ulp_num],
- phba->fw_config.
- iscsi_icd_count[ulp_num],
- phba->fw_config.
- iscsi_icd_start[ulp_num]);
- }
+ /**
+ * Check on which all ULP iSCSI Protocol is loaded.
+ * Set the Bit for those ULP. This set flag is used
+ * at all places in the code to check on which ULP
+ * iSCSi Protocol is loaded
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+ if (pfw_cfg->ulp[ulp_num].ulp_mode &
+ BEISCSI_ULP_ISCSI_INI_MODE) {
+ set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+ /* Get the CID, ICD and Chain count for each ULP */
+ phba->fw_config.iscsi_cid_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_base;
+ phba->fw_config.iscsi_cid_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].sq_count;
+
+ phba->fw_config.iscsi_icd_start[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_base;
+ phba->fw_config.iscsi_icd_count[ulp_num] =
+ pfw_cfg->ulp[ulp_num].icd_count;
+
+ phba->fw_config.iscsi_chain_start[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_base;
+ phba->fw_config.iscsi_chain_count[ulp_num] =
+ pfw_cfg->chain_icd[ulp_num].chain_count;
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : Function loaded on ULP : %d\n"
+ "\tiscsi_cid_count : %d\n"
+ "\tiscsi_cid_start : %d\n"
+ "\t iscsi_icd_count : %d\n"
+ "\t iscsi_icd_start : %d\n",
+ ulp_num,
+ phba->fw_config.
+ iscsi_cid_count[ulp_num],
+ phba->fw_config.
+ iscsi_cid_start[ulp_num],
+ phba->fw_config.
+ iscsi_icd_count[ulp_num],
+ phba->fw_config.
+ iscsi_icd_start[ulp_num]);
}
+ }
- phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
- BEISCSI_FUNC_DUA_MODE);
+ if (phba->fw_config.ulp_supported == 0) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+ pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+ pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+ goto fail_init;
+ }
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
- "BG_%d : DUA Mode : 0x%x\n",
- phba->fw_config.dual_ulp_aware);
+ /**
+ * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+ **/
+ for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+ if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+ break;
+ icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+ if (icd_count == 0 || icd_count > 65536) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BG_%d: invalid ICD count %d\n", icd_count);
+ goto fail_init;
+ }
- } else {
+ cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+ BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+ if (cid_count == 0 || cid_count > 4096) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : Failed in mgmt_get_fw_config\n");
- status = -EINVAL;
+ "BG_%d: invalid CID count %d\n", cid_count);
+ goto fail_init;
}
- spin_unlock(&ctrl->mbox_lock);
+ /**
+ * Check FW is dual ULP aware i.e. can handle either
+ * of the protocols.
+ */
+ phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+ BEISCSI_FUNC_DUA_MODE);
+
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+ "BG_%d : DUA Mode : 0x%x\n",
+ phba->fw_config.dual_ulp_aware);
+
+ /* all set, continue using this FW config */
+ status = 0;
+fail_init:
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -440,7 +526,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
req = nonemb_cmd.va;
memset(req, 0, sizeof(*req));
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
@@ -470,7 +556,7 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
} else
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BG_%d : Failed in mgmt_check_supported_fw\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
@@ -501,8 +587,9 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
req->region = region;
req->sector = sector;
req->offset = offset;
- spin_lock(&ctrl->mbox_lock);
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
case BEISCSI_WRITE_FLASH:
offset = sector * sector_size + offset;
@@ -521,28 +608,26 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
"BG_%d : Unsupported cmd = 0x%x\n\n",
bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return -ENOSYS;
}
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
mcc_sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
job->request_payload.sg_cnt);
mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
mcc_sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
mcc_sge->len = cpu_to_le32(nonemb_cmd->size);
- wrb->tag0 |= tag;
- be_mcc_notify(phba);
+ be_mcc_notify(phba, tag);
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -558,12 +643,19 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- struct iscsi_cleanup_req *req = embedded_payload(wrb);
- int status = 0;
+ struct be_mcc_wrb *wrb;
+ struct iscsi_cleanup_req *req;
+ unsigned int tag;
+ int status;
- spin_lock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return -EBUSY;
+ }
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
@@ -572,11 +664,12 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
- status = be_mcc_notify_wait(phba);
+ be_mcc_notify(phba, tag);
+ status = be_mcc_compl_poll(phba, tag);
if (status)
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
"BG_%d : mgmt_epfw_cleanup , FAILED\n");
- spin_unlock(&ctrl->mbox_lock);
+ mutex_unlock(&ctrl->mbox_lock);
return status;
}
@@ -590,20 +683,18 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
struct be_mcc_wrb *wrb;
struct be_sge *sge;
struct invalidate_commands_params_in *req;
- unsigned int i, tag = 0;
+ unsigned int i, tag;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb = wrb_from_mccq(phba);
sge = nonembedded_sgl(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -621,8 +712,8 @@ unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -637,16 +728,14 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct iscsi_invalidate_connection_params_in *req;
unsigned int tag = 0;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- wrb->tag0 |= tag;
- req = embedded_payload(wrb);
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION,
@@ -658,8 +747,8 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
else
req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE;
req->save_cfg = savecfg_flag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -669,25 +758,23 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb;
struct tcp_upload_params_in *req;
- unsigned int tag = 0;
+ unsigned int tag;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- req = embedded_payload(wrb);
- wrb->tag0 |= tag;
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD,
OPCODE_COMMON_TCP_UPLOAD, sizeof(*req));
req->id = (unsigned short)cid;
req->upload_type = (unsigned char)upload_flag;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -722,6 +809,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
unsigned short cid = beiscsi_ep->ep_cid;
struct be_sge *sge;
+ if (dst_addr->sa_family != PF_INET && dst_addr->sa_family != PF_INET6) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+ "BG_%d : unknown addr family %d\n",
+ dst_addr->sa_family);
+ return -EINVAL;
+ }
+
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -732,18 +826,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
ptemplate_address = &template_address;
ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address);
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
- sge = nonembedded_sgl(wrb);
+ sge = nonembedded_sgl(wrb);
req = nonemb_cmd->va;
memset(req, 0, sizeof(*req));
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -760,7 +853,8 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
beiscsi_ep->ip_type = BE2_IPV4;
- } else if (dst_addr->sa_family == PF_INET6) {
+ } else {
+ /* else its PF_INET6 family */
req->ip_address.ip_type = BE2_IPV6;
memcpy(&req->ip_address.addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
@@ -769,14 +863,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
memcpy(&beiscsi_ep->dst6_addr,
&daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
beiscsi_ep->ip_type = BE2_IPV6;
- } else{
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BG_%d : unknown addr family %d\n",
- dst_addr->sa_family);
- spin_unlock(&ctrl->mbox_lock);
- free_mcc_tag(&phba->ctrl, tag);
- return -EINVAL;
-
}
req->cid = cid;
i = phba->nxt_cqid++;
@@ -801,35 +887,45 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
req->tcp_window_scale_count = 2;
}
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb);
- struct be_cmd_get_all_if_id_req *pbe_allid = req;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_get_all_if_id_req *req;
+ struct be_cmd_get_all_if_id_req *pbe_allid;
+ unsigned int tag;
int status = 0;
- memset(wrb, 0, sizeof(*wrb));
-
- spin_lock(&ctrl->mbox_lock);
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return -EINTR;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return -ENOMEM;
+ }
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
sizeof(*req));
- status = be_mbox_notify(ctrl);
- if (!status)
- phba->interface_handle = pbe_allid->if_hndl_list[0];
- else {
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
+
+ status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+ if (status) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed in mgmt_get_all_if_id\n");
+ return -EBUSY;
}
- spin_unlock(&ctrl->mbox_lock);
+
+ pbe_allid = embedded_payload(wrb);
+ phba->interface_handle = pbe_allid->if_hndl_list[0];
return status;
}
@@ -852,27 +948,24 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
unsigned int tag;
int rc = 0;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
rc = -ENOMEM;
goto free_cmd;
}
- wrb = wrb_from_mccq(phba);
- wrb->tag0 |= tag;
sge = nonembedded_sgl(wrb);
-
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, nonemb_cmd);
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
@@ -1003,8 +1096,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
uint32_t ip_type;
int rc;
- if (mgmt_get_all_if_id(phba))
- return -EIO;
+ rc = mgmt_get_all_if_id(phba);
+ if (rc)
+ return rc;
ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
BE2_IPV6 : BE2_IPV4 ;
@@ -1173,8 +1267,9 @@ int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
int rc;
- if (mgmt_get_all_if_id(phba))
- return -EIO;
+ rc = mgmt_get_all_if_id(phba);
+ if (rc)
+ return rc;
do {
rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
@@ -1245,55 +1340,27 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,
unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
{
- unsigned int tag = 0;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_hba_name *req;
struct be_ctrl_info *ctrl = &phba->ctrl;
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
+ if (mutex_lock_interruptible(&ctrl->mbox_lock))
+ return 0;
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
+ return 0;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_CFG_GET_HBA_NAME,
sizeof(*req));
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
- return tag;
-}
-
-unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba)
-{
- unsigned int tag = 0;
- struct be_mcc_wrb *wrb;
- struct be_cmd_ntwk_link_status_req *req;
- struct be_ctrl_info *ctrl = &phba->ctrl;
-
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
- return tag;
- }
-
- wrb = wrb_from_mccq(phba);
- req = embedded_payload(wrb);
- wrb->tag0 |= tag;
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
- sizeof(*req));
-
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
return tag;
}
@@ -1330,7 +1397,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
return -EAGAIN;
}
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1364,7 +1431,7 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
return -EAGAIN;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -1406,7 +1473,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
return -EBUSY;
}
- rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
@@ -1749,19 +1816,17 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : In bescsi_logout_fwboot_sess\n");
- spin_lock(&ctrl->mbox_lock);
- tag = alloc_mcc_tag(phba);
- if (!tag) {
- spin_unlock(&ctrl->mbox_lock);
+ mutex_lock(&ctrl->mbox_lock);
+ wrb = alloc_mcc_wrb(phba, &tag);
+ if (!wrb) {
+ mutex_unlock(&ctrl->mbox_lock);
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
"BG_%d : MBX Tag Failure\n");
return -EINVAL;
}
- wrb = wrb_from_mccq(phba);
req = embedded_payload(wrb);
- wrb->tag0 |= tag;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
@@ -1769,10 +1834,10 @@ int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
/* Set the session handle */
req->session_handle = fw_sess_handle;
- be_mcc_notify(phba);
- spin_unlock(&ctrl->mbox_lock);
+ be_mcc_notify(phba, tag);
+ mutex_unlock(&ctrl->mbox_lock);
- rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index c1dbb690ee27..f3a48a04b2ca 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -268,6 +268,8 @@ struct beiscsi_endpoint {
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
struct beiscsi_hba *phba);
+int mgmt_get_port_name(struct be_ctrl_info *ctrl,
+ struct beiscsi_hba *phba);
unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
struct beiscsi_endpoint *beiscsi_ep,
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 2ea0db4b62a7..7209afad82f7 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -91,6 +91,25 @@ static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
+{
+ int tail = trcm->tail;
+ struct bfa_trc_s *trc = &trcm->trc[tail];
+
+ if (trcm->stopped)
+ return;
+
+ trc->fileno = (u16) fileno;
+ trc->line = (u16) line;
+ trc->data.u64 = data;
+ trc->timestamp = BFA_TRC_TS(trcm);
+
+ trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+ if (trcm->tail == trcm->head)
+ trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+}
+
static void
bfa_com_port_attach(struct bfa_s *bfa)
{
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index da9cf655be26..df6760ca0911 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -108,44 +108,11 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm)
trcm->stopped = 1;
}
-static inline void
-__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
-{
- int tail = trcm->tail;
- struct bfa_trc_s *trc = &trcm->trc[tail];
-
- if (trcm->stopped)
- return;
-
- trc->fileno = (u16) fileno;
- trc->line = (u16) line;
- trc->data.u64 = data;
- trc->timestamp = BFA_TRC_TS(trcm);
-
- trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
- if (trcm->tail == trcm->head)
- trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-}
-
+void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data);
-static inline void
-__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
-{
- int tail = trcm->tail;
- struct bfa_trc_s *trc = &trcm->trc[tail];
-
- if (trcm->stopped)
- return;
-
- trc->fileno = (u16) fileno;
- trc->line = (u16) line;
- trc->data.u32.u32 = data;
- trc->timestamp = BFA_TRC_TS(trcm);
-
- trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
- if (trcm->tail == trcm->head)
- trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
-}
+void
+__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data);
#define bfa_sm_fault(__mod, __event) do { \
bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 251e2ff8ff5f..a1ada4a31c97 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2803,7 +2803,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 67405c628864..d7029ea5d319 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -97,6 +97,15 @@ static void __exit bnx2fc_mod_exit(void);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging,
+ "Option to enable extended logging,\n"
+ "\t\tDefault is 0 - no logging.\n"
+ "\t\t0x01 - SCSI cmd error, cleanup.\n"
+ "\t\t0x02 - Session setup, cleanup, etc.\n"
+ "\t\t0x04 - lport events, link, mtu, etc.\n"
+ "\t\t0x08 - ELS logs.\n"
+ "\t\t0x10 - fcoe L2 fame related logs.\n"
+ "\t\t0xff - LOG all messages.");
static int bnx2fc_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0002caf687dd..2230dab67ca5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1104,8 +1104,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
- int rc = FAILED;
-
+ int rc;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
@@ -1114,7 +1113,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
lport = shost_priv(sc_cmd->device->host);
if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
printk(KERN_ERR PFX "eh_abort: link not ready\n");
- return rc;
+ return FAILED;
}
tgt = (struct bnx2fc_rport *)&rp[1];
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 804806e1cbb4..339f6b7f4803 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <scsi/scsi_host.h>
@@ -158,7 +159,6 @@ static struct scsi_transport_template *cxgb4i_stt;
* open/close/abort and data send/receive.
*/
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define RCV_BUFSIZ_MASK 0x3FFU
#define MAX_IMM_TX_PKT_LEN 256
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
index 22dd8d670e4a..2fd9c76fc21c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.h
@@ -25,21 +25,4 @@
#define T5_ISS_VALID (1 << 18)
-struct ulptx_idata {
- __be32 cmd_more;
- __be32 len;
-};
-
-struct cpl_rx_data_ddp {
- union opcode_tid ot;
- __be16 urg;
- __be16 len;
- __be32 seq;
- union {
- __be32 nxt_seq;
- __be32 ddp_report;
- };
- __be32 ulp_crc;
- __be32 ddpvld;
-};
#endif /* __CXGB4I_H__ */
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 5ada9268a450..6e6815545a71 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops;
sectors
*/
-#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
@@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops;
index derivation
*/
-#define CXLFLASH_MAX_CMDS 16
+#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
+/* RRQ for master issued cmds */
+#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
+
static inline void check_sizes(void)
{
@@ -106,7 +108,6 @@ struct cxlflash_cfg {
atomic_t scan_host_needed;
struct cxl_afu *cxl_afu;
- struct pci_dev *parent_dev;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -149,7 +150,7 @@ struct afu_cmd {
struct afu {
/* Stuff requiring alignment go first. */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/*
* Command & data for AFU commands.
*/
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index f6d90ce8f3b7..8fb9643fe6e3 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd)
atomic64_set(&afu->room, room);
if (room)
goto write_rrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
pr_err("%s: no cmd_room to send reset\n", __func__);
@@ -303,7 +303,7 @@ write_rrin:
if (rrin != 0x1)
break;
/* Double delay each time */
- udelay(2 << nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
}
@@ -338,7 +338,7 @@ retry:
atomic64_set(&afu->room, room);
if (room)
goto write_ioarrin;
- udelay(nretry);
+ udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
dev_err(dev, "%s: no cmd_room to send 0x%X\n",
@@ -352,7 +352,7 @@ retry:
* afu->room.
*/
if (nretry++ < MC_ROOM_RETRY_CNT) {
- udelay(nretry);
+ udelay(1 << nretry);
goto retry;
}
@@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
/**
- * term_mc() - terminates the master context
+ * term_intr() - disables all AFU interrupts
* @cfg: Internal structure associated with the host.
* @level: Depth of allocation, where to begin waterfall tear down.
*
* Safe to call with AFU/MC in partially allocated/initialized state.
*/
-static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
{
- int rc = 0;
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
if (!afu || !cfg->mcctx) {
- dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
- __func__);
+ dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
return;
}
switch (level) {
- case UNDO_START:
- rc = cxl_stop_context(cfg->mcctx);
- BUG_ON(rc);
case UNMAP_THREE:
cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
case UNMAP_TWO:
@@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
case FREE_IRQ:
cxl_free_afu_irqs(cfg->mcctx);
- case RELEASE_CONTEXT:
- cfg->mcctx = NULL;
+ /* fall through */
+ case UNDO_NOOP:
+ /* No action required */
+ break;
+ }
+}
+
+/**
+ * term_mc() - terminates the master context
+ * @cfg: Internal structure associated with the host.
+ * @level: Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg)
+{
+ int rc = 0;
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+
+ if (!afu || !cfg->mcctx) {
+ dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
+ return;
}
+
+ rc = cxl_stop_context(cfg->mcctx);
+ WARN_ON(rc);
+ cfg->mcctx = NULL;
}
/**
@@ -726,11 +746,21 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
*/
static void term_afu(struct cxlflash_cfg *cfg)
{
- term_mc(cfg, UNDO_START);
-
+ /*
+ * Tear down is carefully orchestrated to ensure
+ * no interrupts can come in when the problem state
+ * area is unmapped.
+ *
+ * 1) Disable all AFU interrupts
+ * 2) Unmap the problem state area
+ * 3) Stop the master context
+ */
+ term_intr(cfg, UNMAP_THREE);
if (cfg->afu)
stop_afu(cfg);
+ term_mc(cfg);
+
pr_debug("%s: returning\n", __func__);
}
@@ -767,7 +797,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
cancel_work_sync(&cfg->work_q);
term_afu(cfg);
case INIT_STATE_PCI:
- pci_release_regions(cfg->dev);
pci_disable_device(pdev);
case INIT_STATE_NONE:
free_mem(cfg);
@@ -840,15 +869,6 @@ static int init_pci(struct cxlflash_cfg *cfg)
struct pci_dev *pdev = cfg->dev;
int rc = 0;
- cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
- rc = pci_request_regions(pdev, CXLFLASH_NAME);
- if (rc < 0) {
- dev_err(&pdev->dev,
- "%s: Couldn't register memory range of registers\n",
- __func__);
- goto out;
- }
-
rc = pci_enable_device(pdev);
if (rc || pci_channel_offline(pdev)) {
if (pci_channel_offline(pdev)) {
@@ -860,55 +880,13 @@ static int init_pci(struct cxlflash_cfg *cfg)
dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
__func__);
cxlflash_wait_for_pci_err_recovery(cfg);
- goto out_release_regions;
- }
- }
-
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc < 0) {
- dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
- __func__);
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- if (rc < 0) {
- dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
- __func__);
- goto out_disable;
- }
-
- pci_set_master(pdev);
-
- if (pci_channel_offline(pdev)) {
- cxlflash_wait_for_pci_err_recovery(cfg);
- if (pci_channel_offline(pdev)) {
- rc = -EIO;
- goto out_msi_disable;
+ goto out;
}
}
- rc = pci_save_state(pdev);
-
- if (rc != PCIBIOS_SUCCESSFUL) {
- dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
- __func__);
- rc = -EIO;
- goto cleanup_nolog;
- }
-
out:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
-
-cleanup_nolog:
-out_msi_disable:
- cxlflash_wait_for_pci_err_recovery(cfg);
-out_disable:
- pci_disable_device(pdev);
-out_release_regions:
- pci_release_regions(pdev);
- goto out;
-
}
/**
@@ -1407,7 +1385,7 @@ static int start_context(struct cxlflash_cfg *cfg)
*/
static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
- struct pci_dev *dev = cfg->parent_dev;
+ struct pci_dev *dev = cfg->dev;
int rc = 0;
int ro_start, ro_size, i, j, k;
ssize_t vpd_size;
@@ -1416,7 +1394,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
/* Get the VPD data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+ vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1649,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg)
}
/**
- * init_mc() - create and register as the master context
+ * init_intr() - setup interrupt handlers for the master context
* @cfg: Internal structure associated with the host.
*
* Return: 0 on success, -errno on failure
*/
-static int init_mc(struct cxlflash_cfg *cfg)
+static enum undo_level init_intr(struct cxlflash_cfg *cfg,
+ struct cxl_context *ctx)
{
- struct cxl_context *ctx;
- struct device *dev = &cfg->dev->dev;
struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
int rc = 0;
- enum undo_level level;
-
- ctx = cxl_get_context(cfg->dev);
- if (unlikely(!ctx))
- return -ENOMEM;
- cfg->mcctx = ctx;
-
- /* Set it up as a master with the CXL */
- cxl_set_master(ctx);
-
- /* During initialization reset the AFU to start from a clean slate */
- rc = cxl_afu_reset(cfg->mcctx);
- if (unlikely(rc)) {
- dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
- __func__, rc);
- level = RELEASE_CONTEXT;
- goto out;
- }
+ enum undo_level level = UNDO_NOOP;
rc = cxl_allocate_afu_irqs(ctx, 3);
if (unlikely(rc)) {
dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
__func__, rc);
- level = RELEASE_CONTEXT;
+ level = UNDO_NOOP;
goto out;
}
@@ -1713,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg)
level = UNMAP_TWO;
goto out;
}
+out:
+ return level;
+}
- rc = 0;
+/**
+ * init_mc() - create and register as the master context
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+ struct cxl_context *ctx;
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+ enum undo_level level;
+
+ ctx = cxl_get_context(cfg->dev);
+ if (unlikely(!ctx)) {
+ rc = -ENOMEM;
+ goto ret;
+ }
+ cfg->mcctx = ctx;
+
+ /* Set it up as a master with the CXL */
+ cxl_set_master(ctx);
+
+ /* During initialization reset the AFU to start from a clean slate */
+ rc = cxl_afu_reset(cfg->mcctx);
+ if (unlikely(rc)) {
+ dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+ __func__, rc);
+ goto ret;
+ }
+
+ level = init_intr(cfg, ctx);
+ if (unlikely(level)) {
+ dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
/* This performs the equivalent of the CXL_IOCTL_START_WORK.
* The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
@@ -1730,7 +1730,7 @@ ret:
pr_debug("%s: returning rc=%d\n", __func__, rc);
return rc;
out:
- term_mc(cfg, level);
+ term_intr(cfg, level);
goto ret;
}
@@ -1803,7 +1803,8 @@ out:
err2:
kref_put(&afu->mapcount, afu_unmap);
err1:
- term_mc(cfg, UNDO_START);
+ term_intr(cfg, UNMAP_THREE);
+ term_mc(cfg);
goto out;
}
@@ -2149,6 +2150,16 @@ static ssize_t lun_mode_store(struct device *dev,
rc = kstrtouint(buf, 10, &lun_mode);
if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
afu->internal_lun = lun_mode;
+
+ /*
+ * When configured for internal LUN, there is only one channel,
+ * channel number 0, else there will be 2 (default).
+ */
+ if (afu->internal_lun)
+ shost->max_channel = 0;
+ else
+ shost->max_channel = NUM_FC_PORTS - 1;
+
afu_reset(cfg);
scsi_scan_host(cfg->host);
}
@@ -2295,7 +2306,7 @@ static struct scsi_host_template driver_template = {
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = 16,
+ .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
.this_id = -1,
.sg_tablesize = SG_NONE, /* No scatter gather support */
@@ -2392,7 +2403,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
{
struct Scsi_Host *host;
struct cxlflash_cfg *cfg = NULL;
- struct device *phys_dev;
struct dev_dependent_vals *ddv;
int rc = 0;
@@ -2458,19 +2468,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- /*
- * Use the special service provided to look up the physical
- * PCI device, since we are called on the probe of the virtual
- * PCI host bus (vphb)
- */
- phys_dev = cxl_get_phys_dev(pdev);
- if (!dev_is_pci(phys_dev)) {
- dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
- rc = -ENODEV;
- goto out_remove;
- }
- cfg->parent_dev = to_pci_dev(phys_dev);
-
cfg->cxl_afu = cxl_pci_to_afu(pdev);
rc = init_pci(cfg);
@@ -2544,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
if (unlikely(rc))
dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
__func__, rc);
- term_mc(cfg, UNDO_START);
- stop_afu(cfg);
+ term_afu(cfg);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
cfg->state = STATE_FAILTERM;
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 0faed422c7f4..eb9d8f730b38 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -79,12 +79,11 @@
#define WWPN_BUF_LEN (WWPN_LEN + 1)
enum undo_level {
- RELEASE_CONTEXT = 0,
+ UNDO_NOOP = 0,
FREE_IRQ,
UNMAP_ONE,
UNMAP_TWO,
- UNMAP_THREE,
- UNDO_START
+ UNMAP_THREE
};
struct dev_dependent_vals {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index f4020dbb55c3..d8a5cb3cd2bd 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -709,27 +709,32 @@ int cxlflash_disk_release(struct scsi_device *sdev,
* @cfg: Internal structure associated with the host.
* @ctxi: Context to release.
*
- * Note that the rht_lun member of the context was cut from a single
- * allocation when the context was created and therefore does not need
- * to be explicitly freed. Also note that we conditionally check for the
- * existence of the context control map before clearing the RHT registers
- * and context capabilities because it is possible to destroy a context
- * while the context is in the error state (previous mapping was removed
- * [so we don't have to worry about clearing] and context is waiting for
- * a new mapping).
+ * This routine is safe to be called with a a non-initialized context
+ * and is tolerant of being called with the context's mutex held (it
+ * will be unlocked if necessary before freeing). Also note that the
+ * routine conditionally checks for the existence of the context control
+ * map before clearing the RHT registers and context capabilities because
+ * it is possible to destroy a context while the context is in the error
+ * state (previous mapping was removed [so there is no need to worry about
+ * clearing] and context is waiting for a new mapping).
*/
static void destroy_context(struct cxlflash_cfg *cfg,
struct ctx_info *ctxi)
{
struct afu *afu = cfg->afu;
- WARN_ON(!list_empty(&ctxi->luns));
+ if (ctxi->initialized) {
+ WARN_ON(!list_empty(&ctxi->luns));
- /* Clear RHT registers and drop all capabilities for this context */
- if (afu->afu_map && ctxi->ctrl_map) {
- writeq_be(0, &ctxi->ctrl_map->rht_start);
- writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
- writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+ /* Clear RHT registers and drop all capabilities for context */
+ if (afu->afu_map && ctxi->ctrl_map) {
+ writeq_be(0, &ctxi->ctrl_map->rht_start);
+ writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
+ writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+ }
+
+ if (mutex_is_locked(&ctxi->mutex))
+ mutex_unlock(&ctxi->mutex);
}
/* Free memory associated with context */
@@ -742,23 +747,12 @@ static void destroy_context(struct cxlflash_cfg *cfg,
/**
* create_context() - allocates and initializes a context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
- * @ctxid: Previously obtained process element associated with CXL context.
- * @adap_fd: Previously obtained adapter fd associated with CXL context.
- * @file: Previously obtained file associated with CXL context.
- * @perms: User-specified permissions.
- *
- * The context's mutex is locked when an allocated context is returned.
*
* Return: Allocated context on success, NULL on failure
*/
-static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid,
- int adap_fd, struct file *file,
- u32 perms)
+static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
{
struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
struct ctx_info *ctxi = NULL;
struct llun_info **lli = NULL;
u8 *ws = NULL;
@@ -781,28 +775,49 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
ctxi->rht_lun = lli;
ctxi->rht_needs_ws = ws;
ctxi->rht_start = rhte;
- ctxi->rht_perms = perms;
+out:
+ return ctxi;
+
+err:
+ kfree(ws);
+ kfree(lli);
+ kfree(ctxi);
+ ctxi = NULL;
+ goto out;
+}
+
+/**
+ * init_context() - initializes a previously allocated context
+ * @ctxi: Previously allocated context
+ * @cfg: Internal structure associated with the host.
+ * @ctx: Previously obtained CXL context reference.
+ * @ctxid: Previously obtained process element associated with CXL context.
+ * @adap_fd: Previously obtained adapter fd associated with CXL context.
+ * @file: Previously obtained file associated with CXL context.
+ * @perms: User-specified permissions.
+ *
+ * Upon return, the context is marked as initialized and the context's mutex
+ * is locked.
+ */
+static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
+ struct cxl_context *ctx, int ctxid, int adap_fd,
+ struct file *file, u32 perms)
+{
+ struct afu *afu = cfg->afu;
+ ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
ctxi->lfd = adap_fd;
ctxi->pid = current->tgid; /* tgid = pid */
ctxi->ctx = ctx;
ctxi->file = file;
+ ctxi->initialized = true;
mutex_init(&ctxi->mutex);
INIT_LIST_HEAD(&ctxi->luns);
INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
mutex_lock(&ctxi->mutex);
-out:
- return ctxi;
-
-err:
- kfree(ws);
- kfree(lli);
- kfree(ctxi);
- ctxi = NULL;
- goto out;
}
/**
@@ -1300,9 +1315,9 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
u32 perms;
int ctxid = -1;
u64 rctxid = 0UL;
- struct file *file;
+ struct file *file = NULL;
- struct cxl_context *ctx;
+ struct cxl_context *ctx = NULL;
int fd = -1;
@@ -1356,7 +1371,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
if (unlikely(!lun_access)) {
dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
rc = -ENOMEM;
- goto err0;
+ goto err;
}
lun_access->lli = lli;
@@ -1371,53 +1386,56 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto out_attach;
}
+ ctxi = create_context(cfg);
+ if (unlikely(!ctxi)) {
+ dev_err(dev, "%s: Failed to create context! (%d)\n",
+ __func__, ctxid);
+ goto err;
+ }
+
ctx = cxl_dev_context_init(cfg->dev);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
rc = -ENODEV;
- goto err1;
+ goto err;
+ }
+
+ work = &ctxi->work;
+ work->num_interrupts = attach->num_interrupts;
+ work->flags = CXL_START_WORK_NUM_IRQS;
+
+ rc = cxl_start_work(ctx, work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err;
}
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
- goto err2;
+ goto err;
}
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err2;
+ goto err;
}
/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
- ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
- if (unlikely(!ctxi)) {
- dev_err(dev, "%s: Failed to create context! (%d)\n",
- __func__, ctxid);
- goto err3;
- }
-
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
- goto err4;
- }
+ /* Context mutex is locked upon return */
+ init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
- goto err5;
+ goto err;
}
/*
@@ -1453,13 +1471,14 @@ out:
__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
return rc;
-err5:
- cxl_stop_context(ctx);
-err4:
- put_context(ctxi);
- destroy_context(cfg, ctxi);
- ctxi = NULL;
-err3:
+err:
+ /* Cleanup CXL context; okay to 'stop' even if it was not started */
+ if (!IS_ERR_OR_NULL(ctx)) {
+ cxl_stop_context(ctx);
+ cxl_release_context(ctx);
+ ctx = NULL;
+ }
+
/*
* Here, we're overriding the fops with a dummy all-NULL fops because
* fput() calls the release fop, which will cause us to mistakenly
@@ -1467,15 +1486,21 @@ err3:
* to that routine (cxlflash_cxl_release) we should try to fix the
* issue here.
*/
- file->f_op = &null_fops;
- fput(file);
- put_unused_fd(fd);
- fd = -1;
-err2:
- cxl_release_context(ctx);
-err1:
+ if (fd > 0) {
+ file->f_op = &null_fops;
+ fput(file);
+ put_unused_fd(fd);
+ fd = -1;
+ file = NULL;
+ }
+
+ /* Cleanup our context; safe to call even with mutex locked */
+ if (ctxi) {
+ destroy_context(cfg, ctxi);
+ ctxi = NULL;
+ }
+
kfree(lun_access);
-err0:
scsi_device_put(sdev);
goto out;
}
@@ -1507,24 +1532,24 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
goto out;
}
+ rc = cxl_start_work(ctx, &ctxi->work);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: Could not start context rc=%d\n",
+ __func__, rc);
+ goto err1;
+ }
+
ctxid = cxl_process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
rc = -EPERM;
- goto err1;
+ goto err2;
}
file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
- goto err1;
- }
-
- rc = cxl_start_work(ctx, &ctxi->work);
- if (unlikely(rc)) {
- dev_dbg(dev, "%s: Could not start context rc=%d\n",
- __func__, rc);
goto err2;
}
@@ -1569,10 +1594,10 @@ out:
return rc;
err3:
- cxl_stop_context(ctx);
-err2:
fput(file);
put_unused_fd(fd);
+err2:
+ cxl_stop_context(ctx);
err1:
cxl_release_context(ctx);
goto out;
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index bede574bcd77..5f9a091fda95 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -102,6 +102,7 @@ struct ctx_info {
u64 ctxid;
int lfd;
pid_t pid;
+ bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index e5647d59224f..0b331c9c0a8f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -13,13 +13,13 @@ menuconfig SCSI_DH
config SCSI_DH_RDAC
tristate "LSI RDAC Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a LSI RDAC select y. Otherwise, say N.
config SCSI_DH_HP_SW
tristate "HP/COMPAQ MSA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a HP/COMPAQ MSA device that requires START_STOP to
be sent to start it and cannot upgrade the firmware then select y.
@@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
config SCSI_DH_EMC
tristate "EMC CLARiiON Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
If you have a EMC CLARiiON select y. Otherwise, say N.
config SCSI_DH_ALUA
tristate "SPC-3 ALUA Device Handler"
- depends on SCSI_DH
+ depends on SCSI_DH && SCSI
help
SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
Access (ALUA).
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 5a328bf81836..8eaed0522aa3 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -24,20 +24,13 @@
#include <linux/module.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
+#include <scsi/scsi_proto.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#define ALUA_DH_NAME "alua"
-#define ALUA_DH_VER "1.3"
-
-#define TPGS_STATE_OPTIMIZED 0x0
-#define TPGS_STATE_NONOPTIMIZED 0x1
-#define TPGS_STATE_STANDBY 0x2
-#define TPGS_STATE_UNAVAILABLE 0x3
-#define TPGS_STATE_LBA_DEPENDENT 0x4
-#define TPGS_STATE_OFFLINE 0xe
-#define TPGS_STATE_TRANSITIONING 0xf
+#define ALUA_DH_VER "2.0"
#define TPGS_SUPPORT_NONE 0x00
#define TPGS_SUPPORT_OPTIMIZED 0x01
@@ -56,27 +49,62 @@
#define TPGS_MODE_IMPLICIT 0x1
#define TPGS_MODE_EXPLICIT 0x2
-#define ALUA_INQUIRY_SIZE 36
+#define ALUA_RTPG_SIZE 128
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
+#define ALUA_RTPG_DELAY_MSECS 5
/* device handler flags */
-#define ALUA_OPTIMIZE_STPG 1
-#define ALUA_RTPG_EXT_HDR_UNSUPP 2
+#define ALUA_OPTIMIZE_STPG 0x01
+#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
+#define ALUA_SYNC_STPG 0x04
+/* State machine flags */
+#define ALUA_PG_RUN_RTPG 0x10
+#define ALUA_PG_RUN_STPG 0x20
+#define ALUA_PG_RUNNING 0x40
-struct alua_dh_data {
+static uint optimize_stpg;
+module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
+
+static LIST_HEAD(port_group_list);
+static DEFINE_SPINLOCK(port_group_lock);
+static struct workqueue_struct *kaluad_wq;
+static struct workqueue_struct *kaluad_sync_wq;
+
+struct alua_port_group {
+ struct kref kref;
+ struct rcu_head rcu;
+ struct list_head node;
+ struct list_head dh_list;
+ unsigned char device_id_str[256];
+ int device_id_len;
int group_id;
- int rel_port;
int tpgs;
int state;
int pref;
unsigned flags; /* used for optimizing STPG */
- unsigned char inq[ALUA_INQUIRY_SIZE];
- unsigned char *buff;
- int bufflen;
unsigned char transition_tmo;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ unsigned long expiry;
+ unsigned long interval;
+ struct delayed_work rtpg_work;
+ spinlock_t lock;
+ struct list_head rtpg_list;
+ struct scsi_device *rtpg_sdev;
+};
+
+struct alua_dh_data {
+ struct list_head node;
+ struct alua_port_group *pg;
+ int group_id;
+ spinlock_t pg_lock;
struct scsi_device *sdev;
+ int init_error;
+ struct mutex init_mutex;
+};
+
+struct alua_queue_data {
+ struct list_head entry;
activate_complete callback_fn;
void *callback_data;
};
@@ -84,179 +112,160 @@ struct alua_dh_data {
#define ALUA_POLICY_SWITCH_CURRENT 0
#define ALUA_POLICY_SWITCH_ALL 1
-static char print_alua_state(int);
+static void alua_rtpg_work(struct work_struct *work);
+static void alua_rtpg_queue(struct alua_port_group *pg,
+ struct scsi_device *sdev,
+ struct alua_queue_data *qdata, bool force);
+static void alua_check(struct scsi_device *sdev, bool force);
-static int realloc_buffer(struct alua_dh_data *h, unsigned len)
+static void release_port_group(struct kref *kref)
{
- if (h->buff && h->buff != h->inq)
- kfree(h->buff);
-
- h->buff = kmalloc(len, GFP_NOIO);
- if (!h->buff) {
- h->buff = h->inq;
- h->bufflen = ALUA_INQUIRY_SIZE;
- return 1;
- }
- h->bufflen = len;
- return 0;
-}
-
-static struct request *get_alua_req(struct scsi_device *sdev,
- void *buffer, unsigned buflen, int rw)
-{
- struct request *rq;
- struct request_queue *q = sdev->request_queue;
-
- rq = blk_get_request(q, rw, GFP_NOIO);
-
- if (IS_ERR(rq)) {
- sdev_printk(KERN_INFO, sdev,
- "%s: blk_get_request failed\n", __func__);
- return NULL;
- }
- blk_rq_set_block_pc(rq);
-
- if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
- blk_put_request(rq);
- sdev_printk(KERN_INFO, sdev,
- "%s: blk_rq_map_kern failed\n", __func__);
- return NULL;
- }
-
- rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
- rq->retries = ALUA_FAILOVER_RETRIES;
- rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
-
- return rq;
+ struct alua_port_group *pg;
+
+ pg = container_of(kref, struct alua_port_group, kref);
+ if (pg->rtpg_sdev)
+ flush_delayed_work(&pg->rtpg_work);
+ spin_lock(&port_group_lock);
+ list_del(&pg->node);
+ spin_unlock(&port_group_lock);
+ kfree_rcu(pg, rcu);
}
/*
* submit_rtpg - Issue a REPORT TARGET GROUP STATES command
* @sdev: sdev the command should be sent to
*/
-static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
+ int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
- struct request *rq;
- int err = 0;
-
- rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
- if (!rq) {
- err = DRIVER_BUSY << 24;
- goto done;
- }
+ u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
+ int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
/* Prepare the command. */
- rq->cmd[0] = MAINTENANCE_IN;
- if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP))
- rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
+ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
+ cdb[0] = MAINTENANCE_IN;
+ if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
+ cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
else
- rq->cmd[1] = MI_REPORT_TARGET_PGS;
- put_unaligned_be32(h->bufflen, &rq->cmd[6]);
- rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
-
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
-
- blk_execute_rq(rq->q, NULL, rq, 1);
- if (rq->errors)
- err = rq->errors;
- blk_put_request(rq);
-done:
- return err;
+ cdb[1] = MI_REPORT_TARGET_PGS;
+ put_unaligned_be32(bufflen, &cdb[6]);
+
+ return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
+ buff, bufflen, sshdr,
+ ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, NULL, req_flags);
}
/*
- * stpg_endio - Evaluate SET TARGET GROUP STATES
- * @sdev: the device to be evaluated
- * @state: the new target group state
+ * submit_stpg - Issue a SET TARGET PORT GROUP command
*
- * Evaluate a SET TARGET GROUP STATES command response.
+ * Currently we're only setting the current target port group state
+ * to 'active/optimized' and let the array firmware figure out
+ * the states of the remaining groups.
*/
-static void stpg_endio(struct request *req, int error)
+static int submit_stpg(struct scsi_device *sdev, int group_id,
+ struct scsi_sense_hdr *sshdr)
{
- struct alua_dh_data *h = req->end_io_data;
- struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_OK;
+ u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
+ unsigned char stpg_data[8];
+ int stpg_len = 8;
+ int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
- if (host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE) {
- err = SCSI_DH_IO;
- goto done;
+ /* Prepare the data buffer */
+ memset(stpg_data, 0, stpg_len);
+ stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
+ put_unaligned_be16(group_id, &stpg_data[6]);
+
+ /* Prepare the command. */
+ memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
+ cdb[0] = MAINTENANCE_OUT;
+ cdb[1] = MO_SET_TARGET_PGS;
+ put_unaligned_be32(stpg_len, &cdb[6]);
+
+ return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
+ stpg_data, stpg_len,
+ sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, NULL, req_flags);
+}
+
+struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
+ int group_id)
+{
+ struct alua_port_group *pg;
+
+ list_for_each_entry(pg, &port_group_list, node) {
+ if (pg->group_id != group_id)
+ continue;
+ if (pg->device_id_len != id_size)
+ continue;
+ if (strncmp(pg->device_id_str, id_str, id_size))
+ continue;
+ if (!kref_get_unless_zero(&pg->kref))
+ continue;
+ return pg;
}
- if (scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
- &sense_hdr)) {
- if (sense_hdr.sense_key == NOT_READY &&
- sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
- /* ALUA state transition already in progress */
- err = SCSI_DH_OK;
- goto done;
- }
- if (sense_hdr.sense_key == UNIT_ATTENTION) {
- err = SCSI_DH_RETRY;
- goto done;
- }
- sdev_printk(KERN_INFO, h->sdev, "%s: stpg failed\n",
- ALUA_DH_NAME);
- scsi_print_sense_hdr(h->sdev, ALUA_DH_NAME, &sense_hdr);
- err = SCSI_DH_IO;
- } else if (error)
- err = SCSI_DH_IO;
-
- if (err == SCSI_DH_OK) {
- h->state = TPGS_STATE_OPTIMIZED;
- sdev_printk(KERN_INFO, h->sdev,
- "%s: port group %02x switched to state %c\n",
- ALUA_DH_NAME, h->group_id,
- print_alua_state(h->state));
- }
-done:
- req->end_io_data = NULL;
- __blk_put_request(req->q, req);
- if (h->callback_fn) {
- h->callback_fn(h->callback_data, err);
- h->callback_fn = h->callback_data = NULL;
- }
- return;
+ return NULL;
}
/*
- * submit_stpg - Issue a SET TARGET GROUP STATES command
+ * alua_alloc_pg - Allocate a new port_group structure
+ * @sdev: scsi device
+ * @h: alua device_handler data
+ * @group_id: port group id
*
- * Currently we're only setting the current target port group state
- * to 'active/optimized' and let the array firmware figure out
- * the states of the remaining groups.
+ * Allocate a new port_group structure for a given
+ * device.
*/
-static unsigned submit_stpg(struct alua_dh_data *h)
+struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
+ int group_id, int tpgs)
{
- struct request *rq;
- int stpg_len = 8;
- struct scsi_device *sdev = h->sdev;
+ struct alua_port_group *pg, *tmp_pg;
- /* Prepare the data buffer */
- memset(h->buff, 0, stpg_len);
- h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
- put_unaligned_be16(h->group_id, &h->buff[6]);
-
- rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
- if (!rq)
- return SCSI_DH_RES_TEMP_UNAVAIL;
+ pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
+ if (!pg)
+ return ERR_PTR(-ENOMEM);
- /* Prepare the command. */
- rq->cmd[0] = MAINTENANCE_OUT;
- rq->cmd[1] = MO_SET_TARGET_PGS;
- put_unaligned_be32(stpg_len, &rq->cmd[6]);
- rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
+ pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
+ sizeof(pg->device_id_str));
+ if (pg->device_id_len <= 0) {
+ /*
+ * Internal error: TPGS supported but no device
+ * identifcation found. Disable ALUA support.
+ */
+ kfree(pg);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: No device descriptors found\n",
+ ALUA_DH_NAME);
+ return ERR_PTR(-ENXIO);
+ }
+ pg->group_id = group_id;
+ pg->tpgs = tpgs;
+ pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ if (optimize_stpg)
+ pg->flags |= ALUA_OPTIMIZE_STPG;
+ kref_init(&pg->kref);
+ INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
+ INIT_LIST_HEAD(&pg->rtpg_list);
+ INIT_LIST_HEAD(&pg->node);
+ INIT_LIST_HEAD(&pg->dh_list);
+ spin_lock_init(&pg->lock);
+
+ spin_lock(&port_group_lock);
+ tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
+ group_id);
+ if (tmp_pg) {
+ spin_unlock(&port_group_lock);
+ kfree(pg);
+ return tmp_pg;
+ }
- rq->sense = h->sense;
- memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
- rq->sense_len = 0;
- rq->end_io_data = h;
+ list_add(&pg->node, &port_group_list);
+ spin_unlock(&port_group_lock);
- blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return SCSI_DH_OK;
+ return pg;
}
/*
@@ -318,9 +327,13 @@ static int alua_check_tpgs(struct scsi_device *sdev)
* Extract the relative target port and the target port group
* descriptor from the list of identificators.
*/
-static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h)
+static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
+ int tpgs)
{
int rel_port = -1, group_id;
+ struct alua_port_group *pg, *old_pg = NULL;
+ bool pg_updated = false;
+ unsigned long flags;
group_id = scsi_vpd_tpg_id(sdev, &rel_port);
if (group_id < 0) {
@@ -334,32 +347,63 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h)
ALUA_DH_NAME);
return SCSI_DH_DEV_UNSUPP;
}
- h->state = TPGS_STATE_OPTIMIZED;
- h->group_id = group_id;
+ pg = alua_alloc_pg(sdev, group_id, tpgs);
+ if (IS_ERR(pg)) {
+ if (PTR_ERR(pg) == -ENOMEM)
+ return SCSI_DH_NOMEM;
+ return SCSI_DH_DEV_UNSUPP;
+ }
sdev_printk(KERN_INFO, sdev,
- "%s: port group %02x rel port %02x\n",
- ALUA_DH_NAME, h->group_id, h->rel_port);
+ "%s: device %s port group %x rel port %x\n",
+ ALUA_DH_NAME, pg->device_id_str, group_id, rel_port);
+
+ /* Check for existing port group references */
+ spin_lock(&h->pg_lock);
+ old_pg = h->pg;
+ if (old_pg != pg) {
+ /* port group has changed. Update to new port group */
+ if (h->pg) {
+ spin_lock_irqsave(&old_pg->lock, flags);
+ list_del_rcu(&h->node);
+ spin_unlock_irqrestore(&old_pg->lock, flags);
+ }
+ rcu_assign_pointer(h->pg, pg);
+ pg_updated = true;
+ }
- return 0;
+ spin_lock_irqsave(&pg->lock, flags);
+ if (sdev->synchronous_alua)
+ pg->flags |= ALUA_SYNC_STPG;
+ if (pg_updated)
+ list_add_rcu(&h->node, &pg->dh_list);
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ alua_rtpg_queue(h->pg, sdev, NULL, true);
+ spin_unlock(&h->pg_lock);
+
+ if (old_pg)
+ kref_put(&old_pg->kref, release_port_group);
+
+ return SCSI_DH_OK;
}
-static char print_alua_state(int state)
+static char print_alua_state(unsigned char state)
{
switch (state) {
- case TPGS_STATE_OPTIMIZED:
+ case SCSI_ACCESS_STATE_OPTIMAL:
return 'A';
- case TPGS_STATE_NONOPTIMIZED:
+ case SCSI_ACCESS_STATE_ACTIVE:
return 'N';
- case TPGS_STATE_STANDBY:
+ case SCSI_ACCESS_STATE_STANDBY:
return 'S';
- case TPGS_STATE_UNAVAILABLE:
+ case SCSI_ACCESS_STATE_UNAVAILABLE:
return 'U';
- case TPGS_STATE_LBA_DEPENDENT:
+ case SCSI_ACCESS_STATE_LBA:
return 'L';
- case TPGS_STATE_OFFLINE:
+ case SCSI_ACCESS_STATE_OFFLINE:
return 'O';
- case TPGS_STATE_TRANSITIONING:
+ case SCSI_ACCESS_STATE_TRANSITIONING:
return 'T';
default:
return 'X';
@@ -371,18 +415,24 @@ static int alua_check_sense(struct scsi_device *sdev,
{
switch (sense_hdr->sense_key) {
case NOT_READY:
- if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
+ if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
/*
* LUN Not Accessible - ALUA state transition
*/
- return ADD_TO_MLQUEUE;
+ alua_check(sdev, false);
+ return NEEDS_RETRY;
+ }
break;
case UNIT_ATTENTION:
- if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
+ if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
/*
- * Power On, Reset, or Bus Device Reset, just retry.
+ * Power On, Reset, or Bus Device Reset.
+ * Might have obscured a state transition,
+ * so schedule a recheck.
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
+ }
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
/*
* Device internal reset
@@ -393,16 +443,20 @@ static int alua_check_sense(struct scsi_device *sdev,
* Mode Parameters Changed
*/
return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
/*
* ALUA state changed
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
- if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07)
+ }
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
/*
* Implicit ALUA state transition failed
*/
+ alua_check(sdev, true);
return ADD_TO_MLQUEUE;
+ }
if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
/*
* Inquiry data has changed
@@ -422,38 +476,71 @@ static int alua_check_sense(struct scsi_device *sdev,
}
/*
+ * alua_tur - Send a TEST UNIT READY
+ * @sdev: device to which the TEST UNIT READY command should be send
+ *
+ * Send a TEST UNIT READY to @sdev to figure out the device state
+ * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING,
+ * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
+ */
+static int alua_tur(struct scsi_device *sdev)
+{
+ struct scsi_sense_hdr sense_hdr;
+ int retval;
+
+ retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &sense_hdr);
+ if (sense_hdr.sense_key == NOT_READY &&
+ sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
+ return SCSI_DH_RETRY;
+ else if (retval)
+ return SCSI_DH_IO;
+ else
+ return SCSI_DH_OK;
+}
+
+/*
* alua_rtpg - Evaluate REPORT TARGET GROUP STATES
* @sdev: the device to be evaluated.
- * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state
*
* Evaluate the Target Port Group State.
* Returns SCSI_DH_DEV_OFFLINED if the path is
* found to be unusable.
*/
-static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)
+static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
- int len, k, off, valid_states = 0;
- unsigned char *ucp;
+ struct alua_port_group *tmp_pg;
+ int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ unsigned char *desc, *buff;
unsigned err, retval;
- unsigned long expiry, interval = 0;
unsigned int tpg_desc_tbl_off;
unsigned char orig_transition_tmo;
+ unsigned long flags;
- if (!h->transition_tmo)
- expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
- else
- expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
+ if (!pg->expiry) {
+ unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
+
+ if (pg->transition_tmo)
+ transition_tmo = pg->transition_tmo * HZ;
+
+ pg->expiry = round_jiffies_up(jiffies + transition_tmo);
+ }
+
+ buff = kzalloc(bufflen, GFP_KERNEL);
+ if (!buff)
+ return SCSI_DH_DEV_TEMP_BUSY;
retry:
- retval = submit_rtpg(sdev, h);
+ retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
+
if (retval) {
- if (!scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
- &sense_hdr)) {
+ if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
- if (driver_byte(retval) == DRIVER_BUSY)
+ kfree(buff);
+ if (driver_byte(retval) == DRIVER_ERROR)
return SCSI_DH_DEV_TEMP_BUSY;
return SCSI_DH_IO;
}
@@ -466,10 +553,10 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
* The retry without rtpg_ext_hdr_req set
* handles this.
*/
- if (!(h->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
+ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
sense_hdr.sense_key == ILLEGAL_REQUEST &&
sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
- h->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
+ pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
goto retry;
}
/*
@@ -481,65 +568,96 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
err = SCSI_DH_RETRY;
else if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY;
- if (err == SCSI_DH_RETRY && time_before(jiffies, expiry)) {
+ if (err == SCSI_DH_RETRY &&
+ pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
- goto retry;
+ return err;
}
sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+ kfree(buff);
+ pg->expiry = 0;
return SCSI_DH_IO;
}
- len = get_unaligned_be32(&h->buff[0]) + 4;
+ len = get_unaligned_be32(&buff[0]) + 4;
- if (len > h->bufflen) {
+ if (len > bufflen) {
/* Resubmit with the correct length */
- if (realloc_buffer(h, len)) {
+ kfree(buff);
+ bufflen = len;
+ buff = kmalloc(bufflen, GFP_KERNEL);
+ if (!buff) {
sdev_printk(KERN_WARNING, sdev,
"%s: kmalloc buffer failed\n",__func__);
/* Temporary failure, bypass */
+ pg->expiry = 0;
return SCSI_DH_DEV_TEMP_BUSY;
}
goto retry;
}
- orig_transition_tmo = h->transition_tmo;
- if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
- h->transition_tmo = h->buff[5];
+ orig_transition_tmo = pg->transition_tmo;
+ if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
+ pg->transition_tmo = buff[5];
else
- h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
+ pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
- if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {
+ if (orig_transition_tmo != pg->transition_tmo) {
sdev_printk(KERN_INFO, sdev,
"%s: transition timeout set to %d seconds\n",
- ALUA_DH_NAME, h->transition_tmo);
- expiry = jiffies + h->transition_tmo * HZ;
+ ALUA_DH_NAME, pg->transition_tmo);
+ pg->expiry = jiffies + pg->transition_tmo * HZ;
}
- if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
+ if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
tpg_desc_tbl_off = 8;
else
tpg_desc_tbl_off = 4;
- for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
+ for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
k < len;
- k += off, ucp += off) {
-
- if (h->group_id == get_unaligned_be16(&ucp[2])) {
- h->state = ucp[0] & 0x0f;
- h->pref = ucp[0] >> 7;
- valid_states = ucp[1];
+ k += off, desc += off) {
+ u16 group_id = get_unaligned_be16(&desc[2]);
+
+ spin_lock_irqsave(&port_group_lock, flags);
+ tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
+ group_id);
+ spin_unlock_irqrestore(&port_group_lock, flags);
+ if (tmp_pg) {
+ if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
+ if ((tmp_pg == pg) ||
+ !(tmp_pg->flags & ALUA_PG_RUNNING)) {
+ struct alua_dh_data *h;
+
+ tmp_pg->state = desc[0] & 0x0f;
+ tmp_pg->pref = desc[0] >> 7;
+ rcu_read_lock();
+ list_for_each_entry_rcu(h,
+ &tmp_pg->dh_list, node) {
+ /* h->sdev should always be valid */
+ BUG_ON(!h->sdev);
+ h->sdev->access_state = desc[0];
+ }
+ rcu_read_unlock();
+ }
+ if (tmp_pg == pg)
+ valid_states = desc[1];
+ spin_unlock_irqrestore(&tmp_pg->lock, flags);
+ }
+ kref_put(&tmp_pg->kref, release_port_group);
}
- off = 8 + (ucp[7] * 4);
+ off = 8 + (desc[7] * 4);
}
+ spin_lock_irqsave(&pg->lock, flags);
sdev_printk(KERN_INFO, sdev,
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
- ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
- h->pref ? "preferred" : "non-preferred",
+ ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
+ pg->pref ? "preferred" : "non-preferred",
valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
@@ -548,36 +666,236 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
- switch (h->state) {
- case TPGS_STATE_TRANSITIONING:
- if (wait_for_transition) {
- if (time_before(jiffies, expiry)) {
- /* State transition, retry */
- interval += 2000;
- msleep(interval);
- goto retry;
- }
+ switch (pg->state) {
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ if (time_before(jiffies, pg->expiry)) {
+ /* State transition, retry */
+ pg->interval = 2;
err = SCSI_DH_RETRY;
} else {
- err = SCSI_DH_OK;
- }
+ struct alua_dh_data *h;
- /* Transitioning time exceeded, set port to standby */
- h->state = TPGS_STATE_STANDBY;
+ /* Transitioning time exceeded, set port to standby */
+ err = SCSI_DH_IO;
+ pg->state = SCSI_ACCESS_STATE_STANDBY;
+ pg->expiry = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &pg->dh_list, node) {
+ BUG_ON(!h->sdev);
+ h->sdev->access_state =
+ (pg->state & SCSI_ACCESS_STATE_MASK);
+ if (pg->pref)
+ h->sdev->access_state |=
+ SCSI_ACCESS_STATE_PREFERRED;
+ }
+ rcu_read_unlock();
+ }
break;
- case TPGS_STATE_OFFLINE:
+ case SCSI_ACCESS_STATE_OFFLINE:
/* Path unusable */
err = SCSI_DH_DEV_OFFLINED;
+ pg->expiry = 0;
break;
default:
/* Useable path if active */
err = SCSI_DH_OK;
+ pg->expiry = 0;
break;
}
+ spin_unlock_irqrestore(&pg->lock, flags);
+ kfree(buff);
return err;
}
/*
+ * alua_stpg - Issue a SET TARGET PORT GROUP command
+ *
+ * Issue a SET TARGET PORT GROUP command and evaluate the
+ * response. Returns SCSI_DH_RETRY per default to trigger
+ * a re-evaluation of the target group state or SCSI_DH_OK
+ * if no further action needs to be taken.
+ */
+static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
+{
+ int retval;
+ struct scsi_sense_hdr sense_hdr;
+
+ if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
+ /* Only implicit ALUA supported, retry */
+ return SCSI_DH_RETRY;
+ }
+ switch (pg->state) {
+ case SCSI_ACCESS_STATE_OPTIMAL:
+ return SCSI_DH_OK;
+ case SCSI_ACCESS_STATE_ACTIVE:
+ if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
+ !pg->pref &&
+ (pg->tpgs & TPGS_MODE_IMPLICIT))
+ return SCSI_DH_OK;
+ break;
+ case SCSI_ACCESS_STATE_STANDBY:
+ case SCSI_ACCESS_STATE_UNAVAILABLE:
+ break;
+ case SCSI_ACCESS_STATE_OFFLINE:
+ return SCSI_DH_IO;
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed, unhandled TPGS state %d",
+ ALUA_DH_NAME, pg->state);
+ return SCSI_DH_NOSYS;
+ }
+ retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
+
+ if (retval) {
+ if (!scsi_sense_valid(&sense_hdr)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: stpg failed, result %d",
+ ALUA_DH_NAME, retval);
+ if (driver_byte(retval) == DRIVER_ERROR)
+ return SCSI_DH_DEV_TEMP_BUSY;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
+ ALUA_DH_NAME);
+ scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+ }
+ }
+ /* Retry RTPG */
+ return SCSI_DH_RETRY;
+}
+
+static void alua_rtpg_work(struct work_struct *work)
+{
+ struct alua_port_group *pg =
+ container_of(work, struct alua_port_group, rtpg_work.work);
+ struct scsi_device *sdev;
+ LIST_HEAD(qdata_list);
+ int err = SCSI_DH_OK;
+ struct alua_queue_data *qdata, *tmp;
+ unsigned long flags;
+ struct workqueue_struct *alua_wq = kaluad_wq;
+
+ spin_lock_irqsave(&pg->lock, flags);
+ sdev = pg->rtpg_sdev;
+ if (!sdev) {
+ WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
+ WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
+ spin_unlock_irqrestore(&pg->lock, flags);
+ return;
+ }
+ if (pg->flags & ALUA_SYNC_STPG)
+ alua_wq = kaluad_sync_wq;
+ pg->flags |= ALUA_PG_RUNNING;
+ if (pg->flags & ALUA_PG_RUN_RTPG) {
+ int state = pg->state;
+
+ pg->flags &= ~ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
+ if (alua_tur(sdev) == SCSI_DH_RETRY) {
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ /* Send RTPG on failure or if TUR indicates SUCCESS */
+ }
+ err = alua_rtpg(sdev, pg);
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ if (err != SCSI_DH_OK)
+ pg->flags &= ~ALUA_PG_RUN_STPG;
+ }
+ if (pg->flags & ALUA_PG_RUN_STPG) {
+ pg->flags &= ~ALUA_PG_RUN_STPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ err = alua_stpg(sdev, pg);
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ pg->interval = 0;
+ pg->flags &= ~ALUA_PG_RUNNING;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(alua_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+ return;
+ }
+ }
+
+ list_splice_init(&pg->rtpg_list, &qdata_list);
+ pg->rtpg_sdev = NULL;
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
+ list_del(&qdata->entry);
+ if (qdata->callback_fn)
+ qdata->callback_fn(qdata->callback_data, err);
+ kfree(qdata);
+ }
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+}
+
+static void alua_rtpg_queue(struct alua_port_group *pg,
+ struct scsi_device *sdev,
+ struct alua_queue_data *qdata, bool force)
+{
+ int start_queue = 0;
+ unsigned long flags;
+ struct workqueue_struct *alua_wq = kaluad_wq;
+
+ if (!pg)
+ return;
+
+ spin_lock_irqsave(&pg->lock, flags);
+ if (qdata) {
+ list_add_tail(&qdata->entry, &pg->rtpg_list);
+ pg->flags |= ALUA_PG_RUN_STPG;
+ force = true;
+ }
+ if (pg->rtpg_sdev == NULL) {
+ pg->interval = 0;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ kref_get(&pg->kref);
+ pg->rtpg_sdev = sdev;
+ scsi_device_get(sdev);
+ start_queue = 1;
+ } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ /* Do not queue if the worker is already running */
+ if (!(pg->flags & ALUA_PG_RUNNING)) {
+ kref_get(&pg->kref);
+ start_queue = 1;
+ }
+ }
+
+ if (pg->flags & ALUA_SYNC_STPG)
+ alua_wq = kaluad_sync_wq;
+ spin_unlock_irqrestore(&pg->lock, flags);
+
+ if (start_queue &&
+ !queue_delayed_work(alua_wq, &pg->rtpg_work,
+ msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
+ scsi_device_put(sdev);
+ kref_put(&pg->kref, release_port_group);
+ }
+}
+
+/*
* alua_initialize - Initialize ALUA state
* @sdev: the device to be initialized
*
@@ -586,21 +904,14 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_
*/
static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
{
- int err = SCSI_DH_DEV_UNSUPP;
-
- h->tpgs = alua_check_tpgs(sdev);
- if (h->tpgs == TPGS_MODE_NONE)
- goto out;
-
- err = alua_check_vpd(sdev, h);
- if (err != SCSI_DH_OK)
- goto out;
-
- err = alua_rtpg(sdev, h, 0);
- if (err != SCSI_DH_OK)
- goto out;
-
-out:
+ int err = SCSI_DH_DEV_UNSUPP, tpgs;
+
+ mutex_lock(&h->init_mutex);
+ tpgs = alua_check_tpgs(sdev);
+ if (tpgs != TPGS_MODE_NONE)
+ err = alua_check_vpd(sdev, h, tpgs);
+ h->init_error = err;
+ mutex_unlock(&h->init_mutex);
return err;
}
/*
@@ -615,9 +926,11 @@ out:
static int alua_set_params(struct scsi_device *sdev, const char *params)
{
struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group __rcu *pg = NULL;
unsigned int optimize = 0, argc;
const char *p = params;
int result = SCSI_DH_OK;
+ unsigned long flags;
if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
return -EINVAL;
@@ -627,18 +940,23 @@ static int alua_set_params(struct scsi_device *sdev, const char *params)
if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
return -EINVAL;
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg) {
+ rcu_read_unlock();
+ return -ENXIO;
+ }
+ spin_lock_irqsave(&pg->lock, flags);
if (optimize)
- h->flags |= ALUA_OPTIMIZE_STPG;
+ pg->flags |= ALUA_OPTIMIZE_STPG;
else
- h->flags &= ~ALUA_OPTIMIZE_STPG;
+ pg->flags &= ~ALUA_OPTIMIZE_STPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ rcu_read_unlock();
return result;
}
-static uint optimize_stpg;
-module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
-
/*
* alua_activate - activate a path
* @sdev: device on the path to be activated
@@ -654,48 +972,33 @@ static int alua_activate(struct scsi_device *sdev,
{
struct alua_dh_data *h = sdev->handler_data;
int err = SCSI_DH_OK;
- int stpg = 0;
+ struct alua_queue_data *qdata;
+ struct alua_port_group __rcu *pg;
- err = alua_rtpg(sdev, h, 1);
- if (err != SCSI_DH_OK)
+ qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
+ if (!qdata) {
+ err = SCSI_DH_RES_TEMP_UNAVAIL;
goto out;
-
- if (optimize_stpg)
- h->flags |= ALUA_OPTIMIZE_STPG;
-
- if (h->tpgs & TPGS_MODE_EXPLICIT) {
- switch (h->state) {
- case TPGS_STATE_NONOPTIMIZED:
- stpg = 1;
- if ((h->flags & ALUA_OPTIMIZE_STPG) &&
- (!h->pref) &&
- (h->tpgs & TPGS_MODE_IMPLICIT))
- stpg = 0;
- break;
- case TPGS_STATE_STANDBY:
- case TPGS_STATE_UNAVAILABLE:
- stpg = 1;
- break;
- case TPGS_STATE_OFFLINE:
- err = SCSI_DH_IO;
- break;
- case TPGS_STATE_TRANSITIONING:
- err = SCSI_DH_RETRY;
- break;
- default:
- break;
- }
}
-
- if (stpg) {
- h->callback_fn = fn;
- h->callback_data = data;
- err = submit_stpg(h);
- if (err == SCSI_DH_OK)
- return 0;
- h->callback_fn = h->callback_data = NULL;
+ qdata->callback_fn = fn;
+ qdata->callback_data = data;
+
+ mutex_lock(&h->init_mutex);
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg || !kref_get_unless_zero(&pg->kref)) {
+ rcu_read_unlock();
+ kfree(qdata);
+ err = h->init_error;
+ mutex_unlock(&h->init_mutex);
+ goto out;
}
+ fn = NULL;
+ rcu_read_unlock();
+ mutex_unlock(&h->init_mutex);
+ alua_rtpg_queue(pg, sdev, qdata, true);
+ kref_put(&pg->kref, release_port_group);
out:
if (fn)
fn(data, err);
@@ -703,6 +1006,29 @@ out:
}
/*
+ * alua_check - check path status
+ * @sdev: device on the path to be checked
+ *
+ * Check the device status
+ */
+static void alua_check(struct scsi_device *sdev, bool force)
+{
+ struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group *pg;
+
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (!pg || !kref_get_unless_zero(&pg->kref)) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
+ alua_rtpg_queue(pg, sdev, NULL, force);
+ kref_put(&pg->kref, release_port_group);
+}
+
+/*
* alua_prep_fn - request callback
*
* Fail I/O to all paths not in state
@@ -711,13 +1037,20 @@ out:
static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
+ struct alua_port_group __rcu *pg;
+ unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
int ret = BLKPREP_OK;
- if (h->state == TPGS_STATE_TRANSITIONING)
+ rcu_read_lock();
+ pg = rcu_dereference(h->pg);
+ if (pg)
+ state = pg->state;
+ rcu_read_unlock();
+ if (state == SCSI_ACCESS_STATE_TRANSITIONING)
ret = BLKPREP_DEFER;
- else if (h->state != TPGS_STATE_OPTIMIZED &&
- h->state != TPGS_STATE_NONOPTIMIZED &&
- h->state != TPGS_STATE_LBA_DEPENDENT) {
+ else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
+ state != SCSI_ACCESS_STATE_ACTIVE &&
+ state != SCSI_ACCESS_STATE_LBA) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
@@ -725,6 +1058,13 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
}
+static void alua_rescan(struct scsi_device *sdev)
+{
+ struct alua_dh_data *h = sdev->handler_data;
+
+ alua_initialize(sdev, h);
+}
+
/*
* alua_bus_attach - Attach device handler
* @sdev: device to be attached to
@@ -732,20 +1072,21 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
static int alua_bus_attach(struct scsi_device *sdev)
{
struct alua_dh_data *h;
- int err;
+ int err, ret = -EINVAL;
h = kzalloc(sizeof(*h) , GFP_KERNEL);
if (!h)
return -ENOMEM;
- h->tpgs = TPGS_MODE_UNINITIALIZED;
- h->state = TPGS_STATE_OPTIMIZED;
- h->group_id = -1;
- h->rel_port = -1;
- h->buff = h->inq;
- h->bufflen = ALUA_INQUIRY_SIZE;
+ spin_lock_init(&h->pg_lock);
+ rcu_assign_pointer(h->pg, NULL);
+ h->init_error = SCSI_DH_OK;
h->sdev = sdev;
+ INIT_LIST_HEAD(&h->node);
+ mutex_init(&h->init_mutex);
err = alua_initialize(sdev, h);
+ if (err == SCSI_DH_NOMEM)
+ ret = -ENOMEM;
if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
goto failed;
@@ -753,7 +1094,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
return 0;
failed:
kfree(h);
- return -EINVAL;
+ return ret;
}
/*
@@ -763,9 +1104,19 @@ failed:
static void alua_bus_detach(struct scsi_device *sdev)
{
struct alua_dh_data *h = sdev->handler_data;
-
- if (h->buff && h->inq != h->buff)
- kfree(h->buff);
+ struct alua_port_group *pg;
+
+ spin_lock(&h->pg_lock);
+ pg = h->pg;
+ rcu_assign_pointer(h->pg, NULL);
+ h->sdev = NULL;
+ spin_unlock(&h->pg_lock);
+ if (pg) {
+ spin_lock_irq(&pg->lock);
+ list_del_rcu(&h->node);
+ spin_unlock_irq(&pg->lock);
+ kref_put(&pg->kref, release_port_group);
+ }
sdev->handler_data = NULL;
kfree(h);
}
@@ -778,6 +1129,7 @@ static struct scsi_device_handler alua_dh = {
.prep_fn = alua_prep_fn,
.check_sense = alua_check_sense,
.activate = alua_activate,
+ .rescan = alua_rescan,
.set_params = alua_set_params,
};
@@ -785,16 +1137,31 @@ static int __init alua_init(void)
{
int r;
+ kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
+ if (!kaluad_wq) {
+ /* Temporary failure, bypass */
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
+ kaluad_sync_wq = create_workqueue("kaluad_sync");
+ if (!kaluad_sync_wq) {
+ destroy_workqueue(kaluad_wq);
+ return SCSI_DH_DEV_TEMP_BUSY;
+ }
r = scsi_register_device_handler(&alua_dh);
- if (r != 0)
+ if (r != 0) {
printk(KERN_ERR "%s: Failed to register scsi device handler",
ALUA_DH_NAME);
+ destroy_workqueue(kaluad_sync_wq);
+ destroy_workqueue(kaluad_wq);
+ }
return r;
}
static void __exit alua_exit(void)
{
scsi_unregister_device_handler(&alua_dh);
+ destroy_workqueue(kaluad_sync_wq);
+ destroy_workqueue(kaluad_wq);
}
module_init(alua_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e6fb97cb12f4..375d81850f15 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -199,7 +199,12 @@ static int parse_sp_info_reply(struct scsi_device *sdev,
csdev->lun_state = csdev->buffer[4];
csdev->current_sp = csdev->buffer[8];
csdev->port = csdev->buffer[7];
-
+ if (csdev->lun_state == CLARIION_LUN_OWNED)
+ sdev->access_state = SCSI_ACCESS_STATE_OPTIMAL;
+ else
+ sdev->access_state = SCSI_ACCESS_STATE_STANDBY;
+ if (csdev->default_sp == csdev->current_sp)
+ sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED;
out:
return err;
}
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 93880ed6291c..06fbd0b0c68a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -165,6 +165,7 @@ struct rdac_controller {
struct work_struct ms_work;
struct scsi_device *ms_sdev;
struct list_head ms_head;
+ struct list_head dh_list;
};
struct c2_inquiry {
@@ -181,7 +182,9 @@ struct c2_inquiry {
};
struct rdac_dh_data {
+ struct list_head node;
struct rdac_controller *ctlr;
+ struct scsi_device *sdev;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
@@ -392,6 +395,7 @@ static struct rdac_controller *get_controller(int index, char *array_name,
INIT_WORK(&ctlr->ms_work, send_mode_select);
INIT_LIST_HEAD(&ctlr->ms_head);
list_add(&ctlr->node, &ctlr_list);
+ INIT_LIST_HEAD(&ctlr->dh_list);
return ctlr;
}
@@ -455,7 +459,8 @@ static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
- int err;
+ int err, access_state;
+ struct rdac_dh_data *tmp;
struct c9_inquiry *inqp;
h->state = RDAC_STATE_ACTIVE;
@@ -471,19 +476,31 @@ static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
h->mode = RDAC_MODE; /* LUN in RDAC mode */
/* Update ownership */
- if (inqp->avte_cvp & 0x1)
+ if (inqp->avte_cvp & 0x1) {
h->lun_state = RDAC_LUN_OWNED;
- else {
+ access_state = SCSI_ACCESS_STATE_OPTIMAL;
+ } else {
h->lun_state = RDAC_LUN_UNOWNED;
- if (h->mode == RDAC_MODE)
+ if (h->mode == RDAC_MODE) {
h->state = RDAC_STATE_PASSIVE;
+ access_state = SCSI_ACCESS_STATE_STANDBY;
+ } else
+ access_state = SCSI_ACCESS_STATE_ACTIVE;
}
/* Update path prio*/
- if (inqp->path_prio & 0x1)
+ if (inqp->path_prio & 0x1) {
h->preferred = RDAC_PREFERRED;
- else
+ access_state |= SCSI_ACCESS_STATE_PREFERRED;
+ } else
h->preferred = RDAC_NON_PREFERRED;
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &h->ctlr->dh_list, node) {
+ /* h->sdev should always be valid */
+ BUG_ON(!tmp->sdev);
+ tmp->sdev->access_state = access_state;
+ }
+ rcu_read_unlock();
}
return err;
@@ -508,6 +525,10 @@ static int initialize_controller(struct scsi_device *sdev,
h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
+ else {
+ list_add_rcu(&h->node, &h->ctlr->dh_list);
+ h->sdev = sdev;
+ }
spin_unlock(&list_lock);
}
return err;
@@ -829,8 +850,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
flush_workqueue(kmpath_rdacd);
spin_lock(&list_lock);
- if (h->ctlr)
+ if (h->ctlr) {
+ list_del_rcu(&h->node);
+ h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
+ }
spin_unlock(&list_lock);
sdev->handler_data = NULL;
kfree(h);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index d4cda5e9600e..21c8d210c456 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host)
*============================================================================
*/
+#ifdef MODULE
static struct pci_device_id dptids[] = {
{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, }
};
+#endif
+
MODULE_DEVICE_TABLE(pci,dptids);
static int adpt_detect(struct scsi_host_template* sht)
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index baf913047b48..3e8483410f61 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1360,14 +1360,15 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
if (ioctl->header.channel == 0xFF) {
a = (struct esas2r_adapter *)hostdata;
} else {
- a = esas2r_adapters[ioctl->header.channel];
- if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+ if (ioctl->header.channel >= MAX_ADAPTERS ||
+ esas2r_adapters[ioctl->header.channel] == NULL) {
ioctl->header.return_code = IOCTL_BAD_CHANNEL;
esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
kfree(ioctl);
return -ENOTSUPP;
}
+ a = esas2r_adapters[ioctl->header.channel];
}
switch (cmd) {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 34a1b1f333b4..3e83d485f743 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1118,7 +1118,8 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
* If this is the first validated FCF, note the time and
* set a timer to trigger selection.
*/
- if (mtu_valid && !fip->sel_fcf && fcoe_ctlr_fcf_usable(fcf)) {
+ if (mtu_valid && !fip->sel_fcf && !fip->sel_time &&
+ fcoe_ctlr_fcf_usable(fcf)) {
fip->sel_time = jiffies +
msecs_to_jiffies(FCOE_CTLR_START_DELAY);
if (!timer_pending(&fip->timer) ||
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index d7597c08fa11..641c60e8fda3 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -93,36 +93,40 @@ static struct notifier_block libfcoe_notifier = {
int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_get_netdev(lport);
- struct ethtool_cmd ecmd;
+ struct ethtool_link_ksettings ecmd;
- if (!__ethtool_get_settings(netdev, &ecmd)) {
+ if (!__ethtool_get_link_ksettings(netdev, &ecmd)) {
lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT |
FC_PORTSPEED_10GBIT |
FC_PORTSPEED_20GBIT |
FC_PORTSPEED_40GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full |
- SUPPORTED_1000baseKX_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseKX_Full))
lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & (SUPPORTED_10000baseT_Full |
- SUPPORTED_10000baseKX4_Full |
- SUPPORTED_10000baseKR_Full |
- SUPPORTED_10000baseR_FEC))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseR_FEC))
lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
- if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
- SUPPORTED_20000baseKR2_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseLR4_Full))
+ if (ecmd.link_modes.supported[0] & (
+ SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_40000baseCR4_Full |
+ SUPPORTED_40000baseSR4_Full |
+ SUPPORTED_40000baseLR4_Full))
lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
+ switch (ecmd.base.speed) {
case SPEED_1000:
lport->link_speed = FC_PORTSPEED_1GBIT;
break;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index eefe14d453db..b87ab38a4530 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1768,7 +1768,7 @@ struct scsi_host_template fdomain_driver_template = {
};
#ifndef PCMCIA
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && defined(MODULE)
static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 266b909fe854..f3032ca5051b 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -958,23 +958,22 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
default:
- shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
- fnic_fcpio_status_to_str(hdr_status));
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
}
- if (hdr_status != FCPIO_SUCCESS) {
- atomic64_inc(&fnic_stats->io_stats.io_failures);
- shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
- fnic_fcpio_status_to_str(hdr_status));
- }
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
+ if (hdr_status != FCPIO_SUCCESS) {
+ atomic64_inc(&fnic_stats->io_stats.io_failures);
+ shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
+ fnic_fcpio_status_to_str(hdr_status));
+ }
+
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 71e138044379..0a767740bf02 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2838,7 +2838,6 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
u16 idx, gdth_evt_data *evt)
{
gdth_evt_str *e;
- struct timeval tv;
/* no GDTH_LOCK_HA() ! */
TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
@@ -2854,8 +2853,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
!strcmp((char *)&ebuffer[elastidx].event_data.event_string,
(char *)&evt->event_string)))) {
e = &ebuffer[elastidx];
- do_gettimeofday(&tv);
- e->last_stamp = tv.tv_sec;
+ e->last_stamp = (u32)ktime_get_real_seconds();
++e->same_count;
} else {
if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
@@ -2871,8 +2869,7 @@ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
e = &ebuffer[elastidx];
e->event_source = source;
e->event_idx = idx;
- do_gettimeofday(&tv);
- e->first_stamp = e->last_stamp = tv.tv_sec;
+ e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
e->same_count = 1;
e->event_data = *evt;
e->application = 0;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index e66e997992e3..be609db66807 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -148,7 +148,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
gdth_cmd_str *gdtcmd;
gdth_evt_str *estr;
char hrec[161];
- struct timeval tv;
char *buf;
gdth_dskstat_str *pds;
@@ -540,8 +539,14 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
if (estr->event_data.eu.driver.ionode == ha->hanum &&
estr->event_source == ES_ASYNC) {
gdth_log_event(&estr->event_data, hrec);
- do_gettimeofday(&tv);
- sec = (int)(tv.tv_sec - estr->first_stamp);
+
+ /*
+ * Elapsed seconds subtraction with unsigned operands is
+ * safe from wrap around in year 2106. Executes as:
+ * operand a + (2's complement operand b) + 1
+ */
+
+ sec = (int)((u32)ktime_get_real_seconds() - estr->first_stamp);
if (sec < 0) sec = 0;
seq_printf(m," date- %02d:%02d:%02d\t%s\n",
sec/3600, sec%3600/60, sec%60, hrec);
diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile
index 3e70eae81343..c6d3a1b5fcb9 100644
--- a/drivers/scsi/hisi_sas/Makefile
+++ b/drivers/scsi/hisi_sas/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o
-obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o
+obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 5af2e4187f01..29e89f340b64 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -12,22 +12,24 @@
#ifndef _HISI_SAS_H_
#define _HISI_SAS_H_
+#include <linux/acpi.h>
#include <linux/dmapool.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
+#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
-#define DRV_VERSION "v1.0"
+#define DRV_VERSION "v1.3"
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
#define HISI_SAS_QUEUE_SLOTS 512
-#define HISI_SAS_MAX_ITCT_ENTRIES 4096
+#define HISI_SAS_MAX_ITCT_ENTRIES 2048
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
-#define HISI_SAS_COMMAND_ENTRIES 8192
#define HISI_SAS_STATUS_BUF_SZ \
(sizeof(struct hisi_sas_err_record) + 1024)
@@ -36,6 +38,11 @@
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
+#define HISI_SAS_MAX_STP_RESP_SZ 28
+
+#define DEV_IS_EXPANDER(type) \
+ ((type == SAS_EDGE_EXPANDER_DEVICE) || \
+ (type == SAS_FANOUT_EXPANDER_DEVICE))
struct hisi_hba;
@@ -105,6 +112,7 @@ struct hisi_sas_slot {
int cmplt_queue;
int cmplt_queue_slot;
int idx;
+ int abort;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
void *status_buffer;
@@ -113,6 +121,7 @@ struct hisi_sas_slot {
dma_addr_t command_table_dma;
struct hisi_sas_sge_page *sge_page;
dma_addr_t sge_page_dma;
+ struct work_struct abort_slot;
};
struct hisi_sas_tmf_task {
@@ -132,6 +141,8 @@ struct hisi_sas_hw {
struct hisi_sas_tmf_task *tmf);
int (*prep_smp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
+ int (*prep_stp)(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot);
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int abort);
void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -140,6 +151,7 @@ struct hisi_sas_hw {
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+ int max_command_entries;
int complete_hdr_size;
};
@@ -244,18 +256,7 @@ struct hisi_sas_itct {
__le64 sas_addr;
__le64 qw2;
__le64 qw3;
- __le64 qw4;
- __le64 qw_sata_ncq0_3;
- __le64 qw_sata_ncq7_4;
- __le64 qw_sata_ncq11_8;
- __le64 qw_sata_ncq15_12;
- __le64 qw_sata_ncq19_16;
- __le64 qw_sata_ncq23_20;
- __le64 qw_sata_ncq27_24;
- __le64 qw_sata_ncq31_28;
- __le64 qw_non_ncq_iptt;
- __le64 qw_rsvd0;
- __le64 qw_rsvd1;
+ __le64 qw4_15[12];
};
struct hisi_sas_iost {
@@ -266,17 +267,7 @@ struct hisi_sas_iost {
};
struct hisi_sas_err_record {
- /* dw0 */
- __le32 dma_err_type;
-
- /* dw1 */
- __le32 trans_tx_fail_type;
-
- /* dw2 */
- __le32 trans_rx_fail_type;
-
- /* dw3 */
- u32 rsvd;
+ u32 data[4];
};
struct hisi_sas_initial_fis {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 99b1950d751c..097ab4f27a6b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -12,13 +12,12 @@
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"
-#define DEV_IS_EXPANDER(type) \
- ((type == SAS_EDGE_EXPANDER_DEVICE) || \
- (type == SAS_FANOUT_EXPANDER_DEVICE))
-
#define DEV_IS_GONE(dev) \
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
+static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
+ u8 *lun, struct hisi_sas_tmf_task *tmf);
+
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
@@ -111,6 +110,50 @@ static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf);
}
+static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ return hisi_hba->hw->prep_stp(hisi_hba, slot);
+}
+
+/*
+ * This function will issue an abort TMF regardless of whether the
+ * task is in the sdev or not. Then it will do the task complete
+ * cleanup and callbacks.
+ */
+static void hisi_sas_slot_abort(struct work_struct *work)
+{
+ struct hisi_sas_slot *abort_slot =
+ container_of(work, struct hisi_sas_slot, abort_slot);
+ struct sas_task *task = abort_slot->task;
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
+ struct scsi_cmnd *cmnd = task->uldd_task;
+ struct hisi_sas_tmf_task tmf_task;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct scsi_lun lun;
+ struct device *dev = &hisi_hba->pdev->dev;
+ int tag = abort_slot->idx;
+
+ if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
+ dev_err(dev, "cannot abort slot for non-ssp task\n");
+ goto out;
+ }
+
+ int_to_scsilun(cmnd->device->lun, &lun);
+ tmf_task.tmf = TMF_ABORT_TASK;
+ tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+ hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
+out:
+ /* Do cleanup for this task */
+ hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
+ if (task->task_done)
+ task->task_done(task);
+ if (sas_dev && sas_dev->running_req)
+ sas_dev->running_req--;
+}
+
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
int is_tmf, struct hisi_sas_tmf_task *tmf,
int *pass)
@@ -204,6 +247,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
slot->task = task;
slot->port = port;
task->lldd_task = slot;
+ INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
GFP_ATOMIC,
@@ -234,6 +278,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ rc = hisi_sas_task_prep_ata(hisi_hba, slot);
+ break;
default:
dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
@@ -407,6 +453,19 @@ static int hisi_sas_dev_found(struct domain_device *device)
return 0;
}
+static int hisi_sas_slave_configure(struct scsi_device *sdev)
+{
+ struct domain_device *dev = sdev_to_domain_dev(sdev);
+ int ret = sas_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+ if (!dev_is_sata(dev))
+ sas_change_queue_depth(sdev, 64);
+
+ return 0;
+}
+
static void hisi_sas_scan_start(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
@@ -657,7 +716,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
- task->task_status.stat == SAM_STAT_GOOD) {
+ task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
@@ -944,7 +1003,7 @@ static struct scsi_host_template hisi_sas_sht = {
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
- .slave_configure = sas_slave_configure,
+ .slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
@@ -977,9 +1036,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
- int i, s;
struct platform_device *pdev = hisi_hba->pdev;
struct device *dev = &pdev->dev;
+ int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
spin_lock_init(&hisi_hba->lock);
for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -1039,13 +1098,13 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->itct, 0, s);
- hisi_hba->slot_info = devm_kcalloc(dev, HISI_SAS_COMMAND_ENTRIES,
+ hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
sizeof(struct hisi_sas_slot),
GFP_KERNEL);
if (!hisi_hba->slot_info)
goto err_out;
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+ s = max_command_entries * sizeof(struct hisi_sas_iost);
hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
GFP_KERNEL);
if (!hisi_hba->iost)
@@ -1053,7 +1112,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->iost, 0, s);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->breakpoint)
@@ -1061,7 +1120,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
memset(hisi_hba->breakpoint, 0, s);
- hisi_hba->slot_index_count = HISI_SAS_COMMAND_ENTRIES;
+ hisi_hba->slot_index_count = max_command_entries;
s = hisi_hba->slot_index_count / sizeof(unsigned long);
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
@@ -1079,7 +1138,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
memset(hisi_hba->initial_fis, 0, s);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
@@ -1102,7 +1161,7 @@ err_out:
static void hisi_sas_free(struct hisi_hba *hisi_hba)
{
struct device *dev = &hisi_hba->pdev->dev;
- int i, s;
+ int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
for (i = 0; i < hisi_hba->queue_count; i++) {
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
@@ -1127,12 +1186,12 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
dma_free_coherent(dev, s,
hisi_hba->itct, hisi_hba->itct_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_iost);
+ s = max_command_entries * sizeof(struct hisi_sas_iost);
if (hisi_hba->iost)
dma_free_coherent(dev, s,
hisi_hba->iost, hisi_hba->iost_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint);
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
if (hisi_hba->breakpoint)
dma_free_coherent(dev, s,
hisi_hba->breakpoint,
@@ -1145,7 +1204,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->initial_fis,
hisi_hba->initial_fis_dma);
- s = HISI_SAS_COMMAND_ENTRIES * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
if (hisi_hba->sata_breakpoint)
dma_free_coherent(dev, s,
hisi_hba->sata_breakpoint,
@@ -1163,7 +1222,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
struct hisi_hba *hisi_hba;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
- struct property *sas_addr_prop;
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
if (!shost)
@@ -1177,27 +1235,34 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
init_timer(&hisi_hba->timer);
- sas_addr_prop = of_find_property(np, "sas-addr", NULL);
- if (!sas_addr_prop || (sas_addr_prop->length != SAS_ADDR_SIZE))
+ if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
+ SAS_ADDR_SIZE))
goto err_out;
- memcpy(hisi_hba->sas_addr, sas_addr_prop->value, SAS_ADDR_SIZE);
- if (of_property_read_u32(np, "ctrl-reset-reg",
- &hisi_hba->ctrl_reset_reg))
- goto err_out;
+ if (np) {
+ hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
+ "hisilicon,sas-syscon");
+ if (IS_ERR(hisi_hba->ctrl))
+ goto err_out;
- if (of_property_read_u32(np, "ctrl-reset-sts-reg",
- &hisi_hba->ctrl_reset_sts_reg))
- goto err_out;
+ if (device_property_read_u32(dev, "ctrl-reset-reg",
+ &hisi_hba->ctrl_reset_reg))
+ goto err_out;
- if (of_property_read_u32(np, "ctrl-clock-ena-reg",
- &hisi_hba->ctrl_clock_ena_reg))
- goto err_out;
+ if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
+ &hisi_hba->ctrl_reset_sts_reg))
+ goto err_out;
- if (of_property_read_u32(np, "phy-count", &hisi_hba->n_phy))
+ if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
+ &hisi_hba->ctrl_clock_ena_reg))
+ goto err_out;
+ }
+
+ if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
goto err_out;
- if (of_property_read_u32(np, "queue-count", &hisi_hba->queue_count))
+ if (device_property_read_u32(dev, "queue-count",
+ &hisi_hba->queue_count))
goto err_out;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1205,11 +1270,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
- hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(
- np, "hisilicon,sas-syscon");
- if (IS_ERR(hisi_hba->ctrl))
- goto err_out;
-
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
@@ -1277,8 +1337,8 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = HISI_SAS_COMMAND_ENTRIES;
- shost->cmd_per_lun = HISI_SAS_COMMAND_ENTRIES;
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
sha->sas_ha_name = DRV_NAME;
sha->dev = &hisi_hba->pdev->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index eea24d7531cf..1abbc2e162df 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -288,6 +288,20 @@ struct hisi_sas_complete_v1_hdr {
__le32 data;
};
+struct hisi_sas_err_record_v1 {
+ /* dw0 */
+ __le32 dma_err_type;
+
+ /* dw1 */
+ __le32 trans_tx_fail_type;
+
+ /* dw2 */
+ __le32 trans_rx_fail_type;
+
+ /* dw3 */
+ u32 rsvd;
+};
+
enum {
HISI_SAS_PHY_BCAST_ACK = 0,
HISI_SAS_PHY_SL_PHY_ENABLED,
@@ -392,6 +406,8 @@ enum {
TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */
};
+#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192
+
#define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS)
#define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES)
#define HISI_SAS_FATAL_INT_NR (2)
@@ -607,31 +623,42 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
return -EIO;
}
- /* Apply reset and disable clock */
- /* clk disable reg is offset by +4 bytes from clk enable reg */
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
- RESET_VALUE);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
- RESET_VALUE);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
- if (RESET_VALUE != (val & RESET_VALUE)) {
- dev_err(dev, "Reset failed\n");
- return -EIO;
- }
+ if (ACPI_HANDLE(dev)) {
+ acpi_status s;
- /* De-reset and enable clock */
- /* deassert rst reg is offset by +4 bytes from assert reg */
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
- RESET_VALUE);
- regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
- RESET_VALUE);
- msleep(1);
- regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
- if (val & RESET_VALUE) {
- dev_err(dev, "De-reset failed\n");
- return -EIO;
- }
+ s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "Reset failed\n");
+ return -EIO;
+ }
+ } else if (hisi_hba->ctrl) {
+ /* Apply reset and disable clock */
+ /* clk disable reg is offset by +4 bytes from clk enable reg */
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+ RESET_VALUE);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+ RESET_VALUE);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (RESET_VALUE != (val & RESET_VALUE)) {
+ dev_err(dev, "Reset failed\n");
+ return -EIO;
+ }
+
+ /* De-reset and enable clock */
+ /* deassert rst reg is offset by +4 bytes from assert reg */
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+ RESET_VALUE);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+ RESET_VALUE);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (val & RESET_VALUE) {
+ dev_err(dev, "De-reset failed\n");
+ return -EIO;
+ }
+ } else
+ dev_warn(dev, "no reset method\n");
return 0;
}
@@ -1096,7 +1123,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record *err_record = slot->status_buffer;
+ struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
struct device *dev = &hisi_hba->pdev->dev;
switch (task->task_proto) {
@@ -1185,6 +1212,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
ts->stat = SAS_NAK_R_ERR;
break;
}
+ case TRANS_TX_CREDIT_TIMEOUT_ERR:
+ case TRANS_TX_CLOSE_NORMAL_ERR:
+ {
+ /* This will request a retry */
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ break;
+ }
default:
{
ts->stat = SAM_STAT_CHECK_CONDITION;
@@ -1220,7 +1255,6 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
struct domain_device *device;
enum exec_status sts;
struct hisi_sas_complete_v1_hdr *complete_queue =
- (struct hisi_sas_complete_v1_hdr *)
hisi_hba->complete_hdr[slot->cmplt_queue];
struct hisi_sas_complete_v1_hdr *complete_hdr;
u32 cmplt_hdr_data;
@@ -1293,6 +1327,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
slot_err_v1_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort)) {
+ queue_work(hisi_hba->wq, &slot->abort_slot);
+ /* immediately return and do not complete */
+ return ts->stat;
+ }
goto out;
}
@@ -1796,6 +1835,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
};
@@ -1815,12 +1855,20 @@ static const struct of_device_id sas_v1_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sas_v1_of_match);
+static const struct acpi_device_id sas_v1_acpi_match[] = {
+ { "HISI0161", 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match);
+
static struct platform_driver hisi_sas_v1_driver = {
.probe = hisi_sas_v1_probe,
.remove = hisi_sas_v1_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sas_v1_of_match,
+ .acpi_match_table = ACPI_PTR(sas_v1_acpi_match),
},
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
new file mode 100644
index 000000000000..b7337476454b
--- /dev/null
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -0,0 +1,2214 @@
+/*
+ * Copyright (c) 2016 Linaro Ltd.
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include "hisi_sas.h"
+#define DRV_NAME "hisi_sas_v2_hw"
+
+/* global registers need init*/
+#define DLVRY_QUEUE_ENABLE 0x0
+#define IOST_BASE_ADDR_LO 0x8
+#define IOST_BASE_ADDR_HI 0xc
+#define ITCT_BASE_ADDR_LO 0x10
+#define ITCT_BASE_ADDR_HI 0x14
+#define IO_BROKEN_MSG_ADDR_LO 0x18
+#define IO_BROKEN_MSG_ADDR_HI 0x1c
+#define PHY_CONTEXT 0x20
+#define PHY_STATE 0x24
+#define PHY_PORT_NUM_MA 0x28
+#define PORT_STATE 0x2c
+#define PORT_STATE_PHY8_PORT_NUM_OFF 16
+#define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
+#define PORT_STATE_PHY8_CONN_RATE_OFF 20
+#define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
+#define PHY_CONN_RATE 0x30
+#define HGC_TRANS_TASK_CNT_LIMIT 0x38
+#define AXI_AHB_CLK_CFG 0x3c
+#define ITCT_CLR 0x44
+#define ITCT_CLR_EN_OFF 16
+#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
+#define ITCT_DEV_OFF 0
+#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
+#define AXI_USER1 0x48
+#define AXI_USER2 0x4c
+#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
+#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
+#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
+#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
+#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
+#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
+#define HGC_GET_ITV_TIME 0x90
+#define DEVICE_MSG_WORK_MODE 0x94
+#define OPENA_WT_CONTI_TIME 0x9c
+#define I_T_NEXUS_LOSS_TIME 0xa0
+#define MAX_CON_TIME_LIMIT_TIME 0xa4
+#define BUS_INACTIVE_LIMIT_TIME 0xa8
+#define REJECT_TO_OPEN_LIMIT_TIME 0xac
+#define CFG_AGING_TIME 0xbc
+#define HGC_DFX_CFG2 0xc0
+#define HGC_IOMB_PROC1_STATUS 0x104
+#define CFG_1US_TIMER_TRSH 0xcc
+#define HGC_INVLD_DQE_INFO 0x148
+#define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
+#define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
+#define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
+#define INT_COAL_EN 0x19c
+#define OQ_INT_COAL_TIME 0x1a0
+#define OQ_INT_COAL_CNT 0x1a4
+#define ENT_INT_COAL_TIME 0x1a8
+#define ENT_INT_COAL_CNT 0x1ac
+#define OQ_INT_SRC 0x1b0
+#define OQ_INT_SRC_MSK 0x1b4
+#define ENT_INT_SRC1 0x1b8
+#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
+#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
+#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
+#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
+#define ENT_INT_SRC2 0x1bc
+#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_ITC_INT_OFF 15
+#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC_MSK1 0x1c4
+#define ENT_INT_SRC_MSK2 0x1c8
+#define ENT_INT_SRC_MSK3 0x1cc
+#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
+#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR_MSK 0x1ec
+#define HGC_ERR_STAT_EN 0x238
+#define DLVRY_Q_0_BASE_ADDR_LO 0x260
+#define DLVRY_Q_0_BASE_ADDR_HI 0x264
+#define DLVRY_Q_0_DEPTH 0x268
+#define DLVRY_Q_0_WR_PTR 0x26c
+#define DLVRY_Q_0_RD_PTR 0x270
+#define HYPER_STREAM_ID_EN_CFG 0xc80
+#define OQ0_INT_SRC_MSK 0xc90
+#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
+#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
+#define COMPL_Q_0_DEPTH 0x4e8
+#define COMPL_Q_0_WR_PTR 0x4ec
+#define COMPL_Q_0_RD_PTR 0x4f0
+
+/* phy registers need init */
+#define PORT_BASE (0x2000)
+
+#define PHY_CFG (PORT_BASE + 0x0)
+#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
+#define PHY_CFG_ENA_OFF 0
+#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
+#define PHY_CFG_DC_OPT_OFF 2
+#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
+#define PROG_PHY_LINK_RATE_MAX_OFF 0
+#define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
+#define PHY_CTRL (PORT_BASE + 0x14)
+#define PHY_CTRL_RESET_OFF 0
+#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
+#define SAS_PHY_CTRL (PORT_BASE + 0x20)
+#define SL_CFG (PORT_BASE + 0x84)
+#define PHY_PCN (PORT_BASE + 0x44)
+#define SL_TOUT_CFG (PORT_BASE + 0x8c)
+#define SL_CONTROL (PORT_BASE + 0x94)
+#define SL_CONTROL_NOTIFY_EN_OFF 0
+#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
+#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
+#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
+#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
+#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
+#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
+#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
+#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
+#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
+#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
+#define RX_IDAF_DWORD3 (PORT_BASE + 0xd0)
+#define RX_IDAF_DWORD4 (PORT_BASE + 0xd4)
+#define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
+#define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
+#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
+#define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
+#define CHL_INT0 (PORT_BASE + 0x1b4)
+#define CHL_INT0_HOTPLUG_TOUT_OFF 0
+#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
+#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
+#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
+#define CHL_INT0_SL_PHY_ENABLE_OFF 2
+#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
+#define CHL_INT0_NOT_RDY_OFF 4
+#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
+#define CHL_INT0_PHY_RDY_OFF 5
+#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
+#define CHL_INT1 (PORT_BASE + 0x1b8)
+#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
+#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
+#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
+#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
+#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
+#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
+#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
+#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
+#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
+#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
+#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
+#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
+#define DMA_TX_STATUS (PORT_BASE + 0x2d0)
+#define DMA_TX_STATUS_BUSY_OFF 0
+#define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
+#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
+#define DMA_RX_STATUS_BUSY_OFF 0
+#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
+
+#define AXI_CFG (0x5100)
+#define AM_CFG_MAX_TRANS (0x5010)
+#define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
+
+/* HW dma structures */
+/* Delivery queue header */
+/* dw0 */
+#define CMD_HDR_RESP_REPORT_OFF 5
+#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
+#define CMD_HDR_TLR_CTRL_OFF 6
+#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PORT_OFF 18
+#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
+#define CMD_HDR_PRIORITY_OFF 27
+#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
+#define CMD_HDR_CMD_OFF 29
+#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
+/* dw1 */
+#define CMD_HDR_DIR_OFF 5
+#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
+#define CMD_HDR_RESET_OFF 7
+#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
+#define CMD_HDR_VDTL_OFF 10
+#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
+#define CMD_HDR_FRAME_TYPE_OFF 11
+#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
+#define CMD_HDR_DEV_ID_OFF 16
+#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
+/* dw2 */
+#define CMD_HDR_CFL_OFF 0
+#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
+#define CMD_HDR_NCQ_TAG_OFF 10
+#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
+#define CMD_HDR_MRFL_OFF 15
+#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
+#define CMD_HDR_SG_MOD_OFF 24
+#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
+#define CMD_HDR_FIRST_BURST_OFF 26
+#define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF)
+/* dw3 */
+#define CMD_HDR_IPTT_OFF 0
+#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
+/* dw6 */
+#define CMD_HDR_DIF_SGL_LEN_OFF 0
+#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
+#define CMD_HDR_DATA_SGL_LEN_OFF 16
+#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+
+/* Completion header */
+/* dw0 */
+#define CMPLT_HDR_RSPNS_XFRD_OFF 10
+#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_ERX_OFF 12
+#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
+/* dw1 */
+#define CMPLT_HDR_IPTT_OFF 0
+#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
+#define CMPLT_HDR_DEV_ID_OFF 16
+#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
+
+/* ITCT header */
+/* qw0 */
+#define ITCT_HDR_DEV_TYPE_OFF 0
+#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
+#define ITCT_HDR_VALID_OFF 2
+#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
+#define ITCT_HDR_MCR_OFF 5
+#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
+#define ITCT_HDR_VLN_OFF 9
+#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_PORT_ID_OFF 28
+#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
+/* qw2 */
+#define ITCT_HDR_INLT_OFF 0
+#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
+#define ITCT_HDR_BITLT_OFF 16
+#define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF)
+#define ITCT_HDR_MCTLT_OFF 32
+#define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF)
+#define ITCT_HDR_RTOLT_OFF 48
+#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+
+struct hisi_sas_complete_v2_hdr {
+ __le32 dw0;
+ __le32 dw1;
+ __le32 act;
+ __le32 dw3;
+};
+
+struct hisi_sas_err_record_v2 {
+ /* dw0 */
+ __le32 trans_tx_fail_type;
+
+ /* dw1 */
+ __le32 trans_rx_fail_type;
+
+ /* dw2 */
+ __le16 dma_tx_err_type;
+ __le16 sipc_rx_err_type;
+
+ /* dw3 */
+ __le32 dma_rx_err_type;
+};
+
+enum {
+ HISI_SAS_PHY_PHY_UPDOWN,
+ HISI_SAS_PHY_CHNL_INT,
+ HISI_SAS_PHY_INT_NR
+};
+
+enum {
+ TRANS_TX_FAIL_BASE = 0x0, /* dw0 */
+ TRANS_RX_FAIL_BASE = 0x100, /* dw1 */
+ DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */
+ SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/
+ DMA_RX_ERR_BASE = 0x400, /* dw3 */
+
+ /* trans tx*/
+ TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */
+ TRANS_TX_ERR_PHY_NOT_ENABLE, /* 0x1 */
+ TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, /* 0x2 */
+ TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, /* 0x3 */
+ TRANS_TX_OPEN_CNX_ERR_BY_OTHER, /* 0x4 */
+ RESERVED0, /* 0x5 */
+ TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, /* 0x6 */
+ TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, /* 0x7 */
+ TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, /* 0x8 */
+ TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, /* 0x9 */
+ TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, /* 0xa */
+ TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, /* 0xb */
+ TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, /* 0xc */
+ TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, /* 0xd */
+ TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, /* 0xe */
+ TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, /* 0xf */
+ TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, /* 0x10 */
+ TRANS_TX_ERR_FRAME_TXED, /* 0x11 */
+ TRANS_TX_ERR_WITH_BREAK_TIMEOUT, /* 0x12 */
+ TRANS_TX_ERR_WITH_BREAK_REQUEST, /* 0x13 */
+ TRANS_TX_ERR_WITH_BREAK_RECEVIED, /* 0x14 */
+ TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, /* 0x15 */
+ TRANS_TX_ERR_WITH_CLOSE_NORMAL, /* 0x16 for ssp*/
+ TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, /* 0x17 */
+ TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x18 */
+ TRANS_TX_ERR_WITH_CLOSE_COMINIT, /* 0x19 */
+ TRANS_TX_ERR_WITH_NAK_RECEVIED, /* 0x1a for ssp*/
+ TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, /* 0x1b for ssp*/
+ /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
+ TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, /* 0x1c for ssp */
+ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
+ TRANS_TX_ERR_WITH_IPTT_CONFLICT, /* 0x1d for ssp/smp */
+ TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, /* 0x1e */
+ /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
+ TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */
+
+ /* trans rx */
+ TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */
+ TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */
+ /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */
+ TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */
+ /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
+ TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/
+ TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */
+ TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */
+ TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */
+ TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */
+ RESERVED1, /* 0x10b */
+ TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */
+ TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */
+ TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */
+ TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */
+ TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */
+ TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */
+ /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
+ TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/
+ /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
+ TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */
+ /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
+ RESERVED2, /* 0x114 */
+ RESERVED3, /* 0x115 */
+ RESERVED4, /* 0x116 */
+ RESERVED5, /* 0x117 */
+ TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */
+ TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */
+ TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */
+ RESERVED6, /* 0x11b */
+ RESERVED7, /* 0x11c */
+ RESERVED8, /* 0x11d */
+ RESERVED9, /* 0x11e */
+ TRANS_RX_R_ERR, /* 0x11f */
+
+ /* dma tx */
+ DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */
+ DMA_TX_DIF_APP_ERR, /* 0x201 */
+ DMA_TX_DIF_RPP_ERR, /* 0x202 */
+ DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */
+ DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */
+ DMA_TX_UNEXP_XFER_ERR, /* 0x205 */
+ DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */
+ DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */
+ DMA_TX_XFER_OFFSET_ERR, /* 0x208 */
+ DMA_TX_RAM_ECC_ERR, /* 0x209 */
+ DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */
+
+ /* sipc rx */
+ SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */
+ SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */
+ SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */
+ SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */
+ SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */
+ SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */
+ SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */
+ SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */
+ SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */
+ SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */
+ SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */
+
+ /* dma rx */
+ DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */
+ DMA_RX_DIF_APP_ERR, /* 0x401 */
+ DMA_RX_DIF_RPP_ERR, /* 0x402 */
+ DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */
+ DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */
+ DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */
+ DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */
+ DMA_RX_DATA_OFFSET_ERR, /* 0x407 */
+ RESERVED10, /* 0x408 */
+ DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */
+ DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */
+ DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */
+ DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */
+ DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */
+ DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */
+ DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */
+ DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */
+ DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */
+ DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */
+ DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */
+ DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */
+ DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */
+ DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */
+ DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */
+ DMA_RX_RAM_ECC_ERR, /* 0x418 */
+ DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */
+};
+
+#define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
+
+#define DIR_NO_DATA 0
+#define DIR_TO_INI 1
+#define DIR_TO_DEVICE 2
+#define DIR_RESERVED 3
+
+#define SATA_PROTOCOL_NONDATA 0x1
+#define SATA_PROTOCOL_PIO 0x2
+#define SATA_PROTOCOL_DMA 0x4
+#define SATA_PROTOCOL_FPDMA 0x8
+#define SATA_PROTOCOL_ATAPI 0x10
+
+static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl(regs);
+}
+
+static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl_relaxed(regs);
+}
+
+static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ writel(val, regs);
+}
+
+static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
+ u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ writel(val, regs);
+}
+
+static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
+ int phy_no, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ return readl(regs);
+}
+
+static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_DC_OPT_MSK;
+ cfg |= 1 << PHY_CFG_DC_OPT_OFF;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct sas_identify_frame identify_frame;
+ u32 *identify_buffer;
+
+ memset(&identify_frame, 0, sizeof(identify_frame));
+ identify_frame.dev_type = SAS_END_DEVICE;
+ identify_frame.frame_type = 0;
+ identify_frame._un1 = 1;
+ identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
+ identify_frame.target_bits = SAS_PROTOCOL_NONE;
+ memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ identify_frame.phy_id = phy_no;
+ identify_buffer = (u32 *)(&identify_frame);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
+ __swab32(identify_buffer[0]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
+ identify_buffer[2]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
+ identify_buffer[1]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
+ identify_buffer[4]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
+ identify_buffer[3]);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
+ __swab32(identify_buffer[5]));
+}
+
+static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ config_id_frame_v2_hw(hisi_hba, i);
+}
+
+static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ struct domain_device *device = sas_dev->sas_device;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u64 qw0, device_id = sas_dev->device_id;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_port *port = device->port->lldd_port;
+
+ memset(itct, 0, sizeof(*itct));
+
+ /* qw0 */
+ qw0 = 0;
+ switch (sas_dev->dev_type) {
+ case SAS_END_DEVICE:
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ case SAS_SATA_DEV:
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
+ else
+ qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ default:
+ dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+ sas_dev->dev_type);
+ }
+
+ qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
+ (device->max_linkrate << ITCT_HDR_MCR_OFF) |
+ (1 << ITCT_HDR_VLN_OFF) |
+ (port->id << ITCT_HDR_PORT_ID_OFF));
+ itct->qw0 = cpu_to_le64(qw0);
+
+ /* qw1 */
+ memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = __swab64(itct->sas_addr);
+
+ /* qw2 */
+ itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+ (0xff00ULL << ITCT_HDR_BITLT_OFF) |
+ (0xff00ULL << ITCT_HDR_MCTLT_OFF) |
+ (0xff00ULL << ITCT_HDR_RTOLT_OFF));
+}
+
+static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ u64 qw0, dev_id = sas_dev->device_id;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
+ u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ int i;
+
+ /* clear the itct interrupt state */
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+
+ /* clear the itct int*/
+ for (i = 0; i < 2; i++) {
+ /* clear the itct table*/
+ reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+
+ udelay(10);
+ reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
+ dev_dbg(dev, "got clear ITCT done interrupt\n");
+
+ /* invalid the itct state*/
+ qw0 = cpu_to_le64(itct->qw0);
+ qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+ hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
+ hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
+
+ /* clear the itct */
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ break;
+ }
+ }
+}
+
+static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i, reset_val;
+ u32 val;
+ unsigned long end_time;
+ struct device *dev = &hisi_hba->pdev->dev;
+
+ /* The mask needs to be set depending on the number of phys */
+ if (hisi_hba->n_phy == 9)
+ reset_val = 0x1fffff;
+ else
+ reset_val = 0x7ffff;
+
+ /* Disable all of the DQ */
+ for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+
+ /* Disable all of the PHYs */
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ u32 phy_cfg = hisi_sas_phy_read32(hisi_hba, i, PHY_CFG);
+
+ phy_cfg &= ~PHY_CTRL_RESET_MSK;
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CFG, phy_cfg);
+ }
+ udelay(50);
+
+ /* Ensure DMA tx & rx idle */
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ u32 dma_tx_status, dma_rx_status;
+
+ end_time = jiffies + msecs_to_jiffies(1000);
+
+ while (1) {
+ dma_tx_status = hisi_sas_phy_read32(hisi_hba, i,
+ DMA_TX_STATUS);
+ dma_rx_status = hisi_sas_phy_read32(hisi_hba, i,
+ DMA_RX_STATUS);
+
+ if (!(dma_tx_status & DMA_TX_STATUS_BUSY_MSK) &&
+ !(dma_rx_status & DMA_RX_STATUS_BUSY_MSK))
+ break;
+
+ msleep(20);
+ if (time_after(jiffies, end_time))
+ return -EIO;
+ }
+ }
+
+ /* Ensure axi bus idle */
+ end_time = jiffies + msecs_to_jiffies(1000);
+ while (1) {
+ u32 axi_status =
+ hisi_sas_read32(hisi_hba, AXI_CFG);
+
+ if (axi_status == 0)
+ break;
+
+ msleep(20);
+ if (time_after(jiffies, end_time))
+ return -EIO;
+ }
+
+ /* reset and disable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg + 4,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg, &val);
+ if (reset_val != (val & reset_val)) {
+ dev_err(dev, "SAS reset fail.\n");
+ return -EIO;
+ }
+
+ /* De-reset and enable clock*/
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_reset_reg + 4,
+ reset_val);
+ regmap_write(hisi_hba->ctrl, hisi_hba->ctrl_clock_ena_reg,
+ reset_val);
+ msleep(1);
+ regmap_read(hisi_hba->ctrl, hisi_hba->ctrl_reset_sts_reg,
+ &val);
+ if (val & reset_val) {
+ dev_err(dev, "SAS de-reset fail.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct device_node *np = dev->of_node;
+ int i;
+
+ /* Global registers init */
+
+ /* Deal with am-max-transmissions quirk */
+ if (of_get_property(np, "hip06-sas-v2-quirk-amt", NULL)) {
+ hisi_sas_write32(hisi_hba, AM_CFG_MAX_TRANS, 0x2020);
+ hisi_sas_write32(hisi_hba, AM_CFG_SINGLE_PORT_MAX_TRANS,
+ 0x2020);
+ } /* Else, use defaults -> do nothing */
+
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000);
+ hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
+ hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
+ hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20);
+ hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
+ hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+
+ hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
+ hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
+ hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
+ hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+ }
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ /* Delivery queue */
+ hisi_sas_write32(hisi_hba,
+ DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+
+ /* Completion queue */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+ }
+
+ /* itct */
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->itct_dma));
+
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->itct_dma));
+
+ /* iost */
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->iost_dma));
+
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->iost_dma));
+
+ /* breakpoint */
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->breakpoint_dma));
+
+ /* SATA broken msg */
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ /* SATA initial fis */
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
+ lower_32_bits(hisi_hba->initial_fis_dma));
+
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
+ upper_32_bits(hisi_hba->initial_fis_dma));
+}
+
+static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ int rc;
+
+ rc = reset_hw_v2_hw(hisi_hba);
+ if (rc) {
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ return rc;
+ }
+
+ msleep(100);
+ init_reg_v2_hw(hisi_hba);
+
+ init_id_frame_v2_hw(hisi_hba);
+
+ return 0;
+}
+
+static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg |= PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ config_id_frame_v2_hw(hisi_hba, phy_no);
+ config_phy_opt_mode_v2_hw(hisi_hba, phy_no);
+ enable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ disable_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ stop_phy_v2_hw(hisi_hba, phy_no);
+ msleep(100);
+ start_phy_v2_hw(hisi_hba, phy_no);
+}
+
+static void start_phys_v2_hw(unsigned long data)
+{
+ struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ start_phy_v2_hw(hisi_hba, i);
+}
+
+static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+ struct timer_list *timer = &hisi_hba->timer;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
+ hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
+ }
+
+ setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
+ mod_timer(timer, jiffies + HZ);
+}
+
+static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 sl_control;
+
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+ msleep(1);
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+}
+
+static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
+{
+ int i, bitmap = 0;
+ u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ for (i = 0; i < (hisi_hba->n_phy < 9 ? hisi_hba->n_phy : 8); i++)
+ if (phy_state & 1 << i)
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= 1 << i;
+
+ if (hisi_hba->n_phy == 9) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ if (phy_state & 1 << 8)
+ if (((port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF) == port_id)
+ bitmap |= 1 << 9;
+ }
+
+ return bitmap;
+}
+
+/**
+ * This function allocates across all queues to load balance.
+ * Slots are allocated from queues in a round-robin fashion.
+ *
+ * The callpath to this function and upto writing the write
+ * queue pointer should be safe from interruption.
+ */
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 r, w;
+ int queue = hisi_hba->queue;
+
+ while (1) {
+ w = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_WR_PTR + (queue * 0x14));
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ queue = (queue + 1) % hisi_hba->queue_count;
+ if (queue == hisi_hba->queue) {
+ dev_warn(dev, "could not find free slot\n");
+ return -EAGAIN;
+ }
+ continue;
+ }
+ break;
+ }
+ hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+ *q = queue;
+ *s = w;
+ return 0;
+}
+
+static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
+{
+ int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+ ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+}
+
+static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_cmd_hdr *hdr,
+ struct scatterlist *scatter,
+ int n_elem)
+{
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct scatterlist *sg;
+ int i;
+
+ if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ n_elem);
+ return -EINVAL;
+ }
+
+ slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
+ &slot->sge_page_dma);
+ if (!slot->sge_page)
+ return -ENOMEM;
+
+ for_each_sg(scatter, sg, n_elem, i) {
+ struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+
+ entry->addr = cpu_to_le64(sg_dma_address(sg));
+ entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
+ entry->data_len = cpu_to_le32(sg_dma_len(sg));
+ entry->data_off = 0;
+ }
+
+ hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+
+ hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+
+ return 0;
+}
+
+static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_port *port = slot->port;
+ struct scatterlist *sg_req, *sg_resp;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ dma_addr_t req_dma_addr;
+ unsigned int req_len, resp_len;
+ int elem, rc;
+
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ /* req */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+ req_dma_addr = sg_dma_address(sg_req);
+
+ /* resp */
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out_req;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_resp;
+ }
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
+ (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
+ (2 << CMD_HDR_CMD_OFF)); /* smp */
+
+ /* map itct entry */
+ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
+ (1 << CMD_HDR_FRAME_TYPE_OFF) |
+ (DIR_NO_DATA << CMD_HDR_DIR_OFF));
+
+ /* dw2 */
+ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ CMD_HDR_MRFL_OFF));
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
+
+ hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ return 0;
+
+err_out_resp:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+err_out_req:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ return rc;
+}
+
+static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot, int is_tmf,
+ struct hisi_sas_tmf_task *tmf)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_port *port = slot->port;
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+ int has_data = 0, rc, priority = is_tmf;
+ u8 *buf_cmd;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
+ (2 << CMD_HDR_TLR_CTRL_OFF) |
+ (port->id << CMD_HDR_PORT_OFF) |
+ (priority << CMD_HDR_PRIORITY_OFF) |
+ (1 << CMD_HDR_CMD_OFF)); /* ssp */
+
+ dw1 = 1 << CMD_HDR_VDTL_OFF;
+ if (is_tmf) {
+ dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
+ } else {
+ dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+ }
+
+ /* map itct entry */
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+ + 3) / 4) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
+ (2 << CMD_HDR_SG_MOD_OFF);
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ if (!is_tmf) {
+ buf_cmd[9] = task->ssp_task.task_attr |
+ (task->ssp_task.task_prio << 3);
+ memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
+ task->ssp_task.cmd->cmd_len);
+ } else {
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
+ struct dev_to_host_fis *d2h = slot->status_buffer +
+ sizeof(struct hisi_sas_err_record);
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
+
+ ts->buf_valid_size = sizeof(*resp);
+}
+
+/* by default, task resp is complete */
+static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
+ struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
+ u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
+ u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
+ u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
+ u16 sipc_rx_err_type = cpu_to_le16(err_record->sipc_rx_err_type);
+ u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type);
+ int error = -1;
+
+ if (dma_rx_err_type) {
+ error = ffs(dma_rx_err_type)
+ - 1 + DMA_RX_ERR_BASE;
+ } else if (sipc_rx_err_type) {
+ error = ffs(sipc_rx_err_type)
+ - 1 + SIPC_RX_ERR_BASE;
+ } else if (dma_tx_err_type) {
+ error = ffs(dma_tx_err_type)
+ - 1 + DMA_TX_ERR_BASE;
+ } else if (trans_rx_fail_type) {
+ error = ffs(trans_rx_fail_type)
+ - 1 + TRANS_RX_FAIL_BASE;
+ } else if (trans_tx_fail_type) {
+ error = ffs(trans_tx_fail_type)
+ - 1 + TRANS_TX_FAIL_BASE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ {
+ switch (error) {
+ case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_NO_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_EPROTO;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_CONN_RATE;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_BAD_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_WRONG_DEST;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+ {
+ /* not sure */
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+ {
+ ts->stat = SAS_PHY_DOWN;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+ {
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ case DMA_RX_DATA_LEN_OVERFLOW:
+ {
+ ts->stat = SAS_DATA_OVERRUN;
+ ts->residual = 0;
+ break;
+ }
+ case DMA_RX_DATA_LEN_UNDERFLOW:
+ case SIPC_RX_DATA_UNDERFLOW_ERR:
+ {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ break;
+ }
+ case TRANS_TX_ERR_FRAME_TXED:
+ {
+ /* This will request a retry */
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ break;
+ }
+ case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+ case TRANS_TX_ERR_PHY_NOT_ENABLE:
+ case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+ case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+ case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_IPTT_CONFLICT:
+ case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR:
+ case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_RX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_RX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_RX_ERR_WITH_DATA_LEN0:
+ case TRANS_RX_ERR_WITH_BAD_HASH:
+ case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+ case TRANS_RX_SSP_FRM_LEN_ERR:
+ case TRANS_RX_ERR_WITH_BAD_FRM_TYPE:
+ case DMA_TX_UNEXP_XFER_ERR:
+ case DMA_TX_UNEXP_RETRANS_ERR:
+ case DMA_TX_XFER_LEN_OVERFLOW:
+ case DMA_TX_XFER_OFFSET_ERR:
+ case DMA_RX_DATA_OFFSET_ERR:
+ case DMA_RX_UNEXP_NORM_RESP_ERR:
+ case DMA_RX_UNEXP_RDFRAME_ERR:
+ case DMA_RX_UNKNOWN_FRM_ERR:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_UNKNOWN;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
+ case SAS_PROTOCOL_SMP:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ switch (error) {
+ case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER:
+ case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED:
+ case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION:
+ {
+ ts->resp = SAS_TASK_UNDELIVERED;
+ ts->stat = SAS_DEV_NO_RESPONSE;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED:
+ case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED:
+ case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION:
+ case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD:
+ case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION:
+ case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION:
+ case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT:
+ {
+ ts->stat = SAS_OPEN_TO;
+ break;
+ }
+ case DMA_RX_DATA_LEN_OVERFLOW:
+ {
+ ts->stat = SAS_DATA_OVERRUN;
+ break;
+ }
+ case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS:
+ case TRANS_TX_ERR_PHY_NOT_ENABLE:
+ case TRANS_TX_OPEN_CNX_ERR_BY_OTHER:
+ case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT:
+ case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED:
+ case TRANS_TX_ERR_WITH_BREAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_BREAK_REQUEST:
+ case TRANS_TX_ERR_WITH_BREAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_TX_ERR_WITH_NAK_RECEVIED:
+ case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT:
+ case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT:
+ case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT:
+ case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM:
+ case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR:
+ case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR:
+ case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN:
+ case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP:
+ case TRANS_RX_ERR_WITH_CLOSE_NORMAL:
+ case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE:
+ case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT:
+ case TRANS_RX_ERR_WITH_CLOSE_COMINIT:
+ case TRANS_RX_ERR_WITH_DATA_LEN0:
+ case TRANS_RX_ERR_WITH_BAD_HASH:
+ case TRANS_RX_XRDY_WLEN_ZERO_ERR:
+ case TRANS_RX_SSP_FRM_LEN_ERR:
+ case SIPC_RX_FIS_STATUS_ERR_BIT_VLD:
+ case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR:
+ case SIPC_RX_FIS_STATUS_BSY_BIT_ERR:
+ case SIPC_RX_WRSETUP_LEN_ODD_ERR:
+ case SIPC_RX_WRSETUP_LEN_ZERO_ERR:
+ case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR:
+ case SIPC_RX_SATA_UNEXP_FIS_ERR:
+ case DMA_RX_SATA_FRAME_TYPE_ERR:
+ case DMA_RX_UNEXP_RDFRAME_ERR:
+ case DMA_RX_PIO_DATA_LEN_ERR:
+ case DMA_RX_RDSETUP_STATUS_ERR:
+ case DMA_RX_RDSETUP_STATUS_DRQ_ERR:
+ case DMA_RX_RDSETUP_STATUS_BSY_ERR:
+ case DMA_RX_RDSETUP_LEN_ODD_ERR:
+ case DMA_RX_RDSETUP_LEN_ZERO_ERR:
+ case DMA_RX_RDSETUP_LEN_OVER_ERR:
+ case DMA_RX_RDSETUP_OFFSET_ERR:
+ case DMA_RX_RDSETUP_ACTIVE_ERR:
+ case DMA_RX_RDSETUP_ESTATUS_ERR:
+ case DMA_RX_UNKNOWN_FRM_ERR:
+ {
+ ts->stat = SAS_OPEN_REJECT;
+ break;
+ }
+ default:
+ {
+ ts->stat = SAS_PROTO_RESPONSE;
+ break;
+ }
+ }
+ sata_done_v2_hw(hisi_hba, task, slot);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
+ int abort)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_device *sas_dev;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct task_status_struct *ts;
+ struct domain_device *device;
+ enum exec_status sts;
+ struct hisi_sas_complete_v2_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v2_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return -EINVAL;
+
+ ts = &task->task_status;
+ device = task->dev;
+ sas_dev = device->lldd_dev;
+
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+
+ memset(ts, 0, sizeof(*ts));
+ ts->resp = SAS_TASK_COMPLETE;
+
+ if (unlikely(!sas_dev || abort)) {
+ if (!sas_dev)
+ dev_dbg(dev, "slot complete: port has not device\n");
+ ts->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
+ (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
+
+ slot_err_v2_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort)) {
+ queue_work(hisi_hba->wq, &slot->abort_slot);
+ /* immediately return and do not complete */
+ return ts->stat;
+ }
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ {
+ struct ssp_response_iu *iu = slot->status_buffer +
+ sizeof(struct hisi_sas_err_record);
+
+ sas_ssp_task_response(dev, task, iu);
+ break;
+ }
+ case SAS_PROTOCOL_SMP:
+ {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ void *to;
+
+ ts->stat = SAM_STAT_GOOD;
+ to = kmap_atomic(sg_page(sg_resp));
+
+ dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ memcpy(to + sg_resp->offset,
+ slot->status_buffer +
+ sizeof(struct hisi_sas_err_record),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to);
+ break;
+ }
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ {
+ ts->stat = SAM_STAT_GOOD;
+ sata_done_v2_hw(hisi_hba, task, slot);
+ break;
+ }
+ default:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+
+ if (!slot->port->port_attached) {
+ dev_err(dev, "slot complete: port %d has removed\n",
+ slot->port->sas_port.id);
+ ts->stat = SAS_PHY_DOWN;
+ }
+
+out:
+ if (sas_dev && sas_dev->running_req)
+ sas_dev->running_req--;
+
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ sts = ts->stat;
+
+ if (task->task_done)
+ task->task_done(task);
+
+ return sts;
+}
+
+static u8 get_ata_protocol(u8 cmd, int direction)
+{
+ switch (cmd) {
+ case ATA_CMD_FPDMA_WRITE:
+ case ATA_CMD_FPDMA_READ:
+ return SATA_PROTOCOL_FPDMA;
+
+ case ATA_CMD_ID_ATA:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_READ_LOG_EXT:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PMP_WRITE:
+ case ATA_CMD_WRITE_LOG_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ return SATA_PROTOCOL_PIO;
+
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_QUEUED:
+ case ATA_CMD_WRITE_LOG_DMA_EXT:
+ return SATA_PROTOCOL_DMA;
+
+ case ATA_CMD_DOWNLOAD_MICRO:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_CHK_POWER:
+ case ATA_CMD_FLUSH:
+ case ATA_CMD_FLUSH_EXT:
+ case ATA_CMD_VERIFY:
+ case ATA_CMD_VERIFY_EXT:
+ case ATA_CMD_SET_FEATURES:
+ case ATA_CMD_STANDBY:
+ case ATA_CMD_STANDBYNOW1:
+ return SATA_PROTOCOL_NONDATA;
+ default:
+ if (direction == DMA_NONE)
+ return SATA_PROTOCOL_NONDATA;
+ return SATA_PROTOCOL_PIO;
+ }
+}
+
+static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *device = task->dev;
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct hisi_sas_port *port = device->port->lldd_port;
+ u8 *buf_cmd;
+ int has_data = 0, rc = 0, hdr_tag = 0;
+ u32 dw1 = 0, dw2 = 0;
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+ else
+ hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+
+ /* dw1 */
+ switch (task->data_dir) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+
+ if (0 == task->ata_task.fis.command)
+ dw1 |= 1 << CMD_HDR_RESET_OFF;
+
+ dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
+ << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ /* dw2 */
+ if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
+ }
+
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
+ 2 << CMD_HDR_SG_MOD_OFF;
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ /* dw3 */
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v2_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+
+ buf_cmd = slot->command_table;
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+
+ return 0;
+}
+
+static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int i, res = 0;
+ u32 context, port_id, link_rate, hard_phy_linkrate;
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
+ struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
+
+ /* Check for SATA dev */
+ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+ if (context & (1 << phy_no))
+ goto end;
+
+ if (phy_no == 8) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF;
+ link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+ PORT_STATE_PHY8_CONN_RATE_OFF;
+ } else {
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ }
+
+ if (port_id == 0xf) {
+ dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ for (i = 0; i < 6; i++) {
+ u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
+ RX_IDAF_DWORD0 + (i * 4));
+ frame_rcvd[i] = __swab32(idaf);
+ }
+
+ /* Get the linkrates */
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ memcpy(sas_phy->attached_sas_addr, &id->sas_addr, SAS_ADDR_SIZE);
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ phy->port_id = port_id;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->phy_attached = 1;
+ phy->identify.device_type = id->dev_type;
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_PHY_ENABLE_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
+
+ return res;
+}
+
+static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int res = 0;
+ u32 phy_cfg, phy_state;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
+
+ phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
+
+ return res;
+}
+
+static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_msk;
+ int phy_no = 0;
+ irqreturn_t res = IRQ_HANDLED;
+
+ irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO)
+ >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff;
+ while (irq_msk) {
+ if (irq_msk & 1) {
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+
+ if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+ /* phy up */
+ if (phy_up_v2_hw(phy_no, hisi_hba)) {
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ if (irq_value & CHL_INT0_NOT_RDY_MSK)
+ /* phy down */
+ if (phy_down_v2_hw(phy_no, hisi_hba)) {
+ res = IRQ_NONE;
+ goto end;
+ }
+ }
+ irq_msk >>= 1;
+ phy_no++;
+ }
+
+end:
+ return res;
+}
+
+static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+ unsigned long flags;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+}
+
+static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = &hisi_hba->pdev->dev;
+ u32 ent_msk, ent_tmp, irq_msk;
+ int phy_no = 0;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ ent_tmp = ent_msk;
+ ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
+
+ irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >>
+ HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
+
+ while (irq_msk) {
+ if (irq_msk & (1 << phy_no)) {
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if (irq_value1) {
+ if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
+ CHL_INT1_DMAC_TX_ECC_ERR_MSK))
+ panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
+ dev_name(dev), irq_value1);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
+
+ if (irq_value2)
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+
+
+ if (irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
+ }
+ }
+ irq_msk &= ~(1 << phy_no);
+ phy_no++;
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct hisi_sas_itct *itct;
+ struct hisi_sas_complete_v2_hdr *complete_queue;
+ u32 irq_value, rd_point, wr_point, dev_id;
+ int queue = cq->id;
+
+ complete_queue = hisi_hba->complete_hdr[queue];
+ irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
+
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+ rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
+ (0x14 * queue));
+ wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
+ (0x14 * queue));
+
+ while (rd_point != wr_point) {
+ struct hisi_sas_complete_v2_hdr *complete_hdr;
+ int iptt;
+
+ complete_hdr = &complete_queue[rd_point];
+
+ /* Check for NCQ completion */
+ if (complete_hdr->act) {
+ u32 act_tmp = complete_hdr->act;
+ int ncq_tag_count = ffs(act_tmp);
+
+ dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+ CMPLT_HDR_DEV_ID_OFF;
+ itct = &hisi_hba->itct[dev_id];
+
+ /* The NCQ tags are held in the itct header */
+ while (ncq_tag_count) {
+ __le64 *ncq_tag = &itct->qw4_15[0];
+
+ ncq_tag_count -= 1;
+ iptt = (ncq_tag[ncq_tag_count / 5]
+ >> (ncq_tag_count % 5) * 12) & 0xfff;
+
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v2_hw(hisi_hba, slot, 0);
+
+ act_tmp &= ~(1 << ncq_tag_count);
+ ncq_tag_count = ffs(act_tmp);
+ }
+ } else {
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v2_hw(hisi_hba, slot, 0);
+ }
+
+ if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
+ rd_point = 0;
+ }
+
+ /* update rd_point */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
+{
+ struct hisi_sas_phy *phy = p;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_initial_fis *initial_fis;
+ struct dev_to_host_fis *fis;
+ u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
+ irqreturn_t res = IRQ_HANDLED;
+ u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+ int phy_no;
+
+ phy_no = sas_phy->id;
+ initial_fis = &hisi_hba->initial_fis[phy_no];
+ fis = &initial_fis->fis;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no);
+
+ ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1);
+ ent_tmp = ent_int;
+ ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
+ if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
+ dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ if (unlikely(phy_no == 8)) {
+ u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+
+ port_id = (port_state & PORT_STATE_PHY8_PORT_NUM_MSK) >>
+ PORT_STATE_PHY8_PORT_NUM_OFF;
+ link_rate = (port_state & PORT_STATE_PHY8_CONN_RATE_MSK) >>
+ PORT_STATE_PHY8_CONN_RATE_OFF;
+ } else {
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+ }
+
+ if (port_id == 0xf) {
+ dev_err(dev, "sata int: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ /* Make up some unique SAS address */
+ attached_sas_addr[0] = 0x50;
+ attached_sas_addr[7] = phy_no;
+ memcpy(sas_phy->attached_sas_addr, attached_sas_addr, SAS_ADDR_SIZE);
+ memcpy(sas_phy->frame_rcvd, fis, sizeof(struct dev_to_host_fis));
+ dev_info(dev, "sata int phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+ phy->port_id = port_id;
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->phy_attached = 1;
+ phy->identify.device_type = SAS_SATA_DEV;
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+
+ return res;
+}
+
+static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
+ int_phy_updown_v2_hw,
+ int_chnl_int_v2_hw,
+};
+
+/**
+ * There is a limitation in the hip06 chipset that we need
+ * to map in all mbigen interrupts, even if they are not used.
+ */
+static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->pdev;
+ struct device *dev = &pdev->dev;
+ int i, irq, rc, irq_map[128];
+
+
+ for (i = 0; i < 128; i++)
+ irq_map[i] = platform_get_irq(pdev, i);
+
+ for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
+ int idx = i;
+
+ irq = irq_map[idx + 1]; /* Phy up/down is irq1 */
+ if (!irq) {
+ dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
+ DRV_NAME " phy", hisi_hba);
+ if (rc) {
+ dev_err(dev, "irq init: could not request "
+ "phy interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[i];
+ int idx = i + 72; /* First SATA interrupt is irq72 */
+
+ irq = irq_map[idx];
+ if (!irq) {
+ dev_err(dev, "irq init: fail map phy interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
+ DRV_NAME " sata", phy);
+ if (rc) {
+ dev_err(dev, "irq init: could not request "
+ "sata interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ int idx = i + 96; /* First cq interrupt is irq96 */
+
+ irq = irq_map[idx];
+ if (!irq) {
+ dev_err(dev,
+ "irq init: could not map cq interrupt %d\n",
+ idx);
+ return -ENOENT;
+ }
+ rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0,
+ DRV_NAME " cq", &hisi_hba->cq[i]);
+ if (rc) {
+ dev_err(dev,
+ "irq init: could not request cq interrupt %d, rc=%d\n",
+ irq, rc);
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
+{
+ int rc;
+
+ rc = hw_init_v2_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ rc = interrupt_init_v2_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ phys_init_v2_hw(hisi_hba);
+
+ return 0;
+}
+
+static const struct hisi_sas_hw hisi_sas_v2_hw = {
+ .hw_init = hisi_sas_v2_init,
+ .setup_itct = setup_itct_v2_hw,
+ .sl_notify = sl_notify_v2_hw,
+ .get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .free_device = free_device_v2_hw,
+ .prep_smp = prep_smp_v2_hw,
+ .prep_ssp = prep_ssp_v2_hw,
+ .prep_stp = prep_ata_v2_hw,
+ .get_free_slot = get_free_slot_v2_hw,
+ .start_delivery = start_delivery_v2_hw,
+ .slot_complete = slot_complete_v2_hw,
+ .phy_enable = enable_phy_v2_hw,
+ .phy_disable = disable_phy_v2_hw,
+ .phy_hard_reset = phy_hard_reset_v2_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
+ .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
+};
+
+static int hisi_sas_v2_probe(struct platform_device *pdev)
+{
+ return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
+}
+
+static int hisi_sas_v2_remove(struct platform_device *pdev)
+{
+ return hisi_sas_remove(pdev);
+}
+
+static const struct of_device_id sas_v2_of_match[] = {
+ { .compatible = "hisilicon,hip06-sas-v2",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sas_v2_of_match);
+
+static struct platform_driver hisi_sas_v2_driver = {
+ .probe = hisi_sas_v2_probe,
+ .remove = hisi_sas_v2_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = sas_v2_of_match,
+ },
+};
+
+module_platform_driver(hisi_sas_v2_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
+MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 82ac1cd818ac..1547bd93c70b 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -33,7 +33,7 @@
#include <linux/transport_class.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-
+#include <linux/idr.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -42,7 +42,7 @@
#include "scsi_logging.h"
-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
+static DEFINE_IDA(host_index_ida);
static void scsi_host_cls_release(struct device *dev)
@@ -250,6 +250,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto out_destroy_freelist;
+ /*
+ * Increase usage count temporarily here so that calling
+ * scsi_autopm_put_host() will trigger runtime idle if there is
+ * nothing else preventing suspending the device.
+ */
+ pm_runtime_get_noresume(&shost->shost_gendev);
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
@@ -290,6 +296,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto out_destroy_host;
scsi_proc_host_add(shost);
+ scsi_autopm_put_host(shost);
return error;
out_destroy_host:
@@ -355,6 +362,8 @@ static void scsi_host_dev_release(struct device *dev)
kfree(shost->shost_data);
+ ida_simple_remove(&host_index_ida, shost->host_no);
+
if (parent)
put_device(parent);
kfree(shost);
@@ -388,6 +397,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
gfp_t gfp_mask = GFP_KERNEL;
+ int index;
if (sht->unchecked_isa_dma && privsize)
gfp_mask |= __GFP_DMA;
@@ -406,11 +416,11 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- /*
- * subtract one because we increment first then return, but we need to
- * know what the next host number was before increment
- */
- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
+ index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
+ if (index < 0)
+ goto fail_kfree;
+ shost->host_no = index;
+
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
@@ -495,7 +505,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost_printk(KERN_WARNING, shost,
"error handler thread failed to spawn, error = %ld\n",
PTR_ERR(shost->ehandler));
- goto fail_kfree;
+ goto fail_index_remove;
}
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
@@ -511,6 +521,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
fail_kthread:
kthread_stop(shost->ehandler);
+ fail_index_remove:
+ ida_simple_remove(&host_index_ida, shost->host_no);
fail_kfree:
kfree(shost);
return NULL;
@@ -606,6 +618,7 @@ int scsi_init_hosts(void)
void scsi_exit_hosts(void)
{
class_unregister(&shost_class);
+ ida_destroy(&host_index_ida);
}
int scsi_is_host_device(const struct device *dev)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 38ce0e308fbe..5be944c8b71c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
@@ -809,7 +810,8 @@ static ssize_t path_info_show(struct device *dev,
PAGE_SIZE - output_len,
"PORT: %.2s ",
phys_connector);
- if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
+ if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
+ hdev->expose_device) {
if (box == 0 || box == 0xFF) {
output_len += scnprintf(buf + output_len,
PAGE_SIZE - output_len,
@@ -1166,6 +1168,7 @@ static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
snprintf(label, LABEL_SIZE, "enclosure");
break;
case TYPE_DISK:
+ case TYPE_ZBC:
if (dev->external)
snprintf(label, LABEL_SIZE, "external");
else if (!is_logical_dev_addr_mode(dev->scsi3addr))
@@ -1636,6 +1639,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
continue;
if (dev[j]->devtype != TYPE_DISK)
continue;
+ if (dev[j]->devtype != TYPE_ZBC)
+ continue;
if (is_logical_device(dev[j]))
continue;
if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
@@ -1681,6 +1686,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
continue;
if (dev[i]->devtype != TYPE_DISK)
continue;
+ if (dev[i]->devtype != TYPE_ZBC)
+ continue;
if (!is_logical_device(dev[i]))
continue;
@@ -3208,8 +3215,10 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
- if (bmic_device_index == 0xFF00)
+ if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
+ rc = IO_OK;
goto out;
+ }
bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
if (!bssbp)
@@ -3657,18 +3666,6 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h,
return rc;
}
-static void sanitize_inquiry_string(unsigned char *s, int len)
-{
- bool terminated = false;
-
- for (; len > 0; (--len, ++s)) {
- if (*s == 0)
- terminated = true;
- if (terminated || *s < 0x20 || *s > 0x7e)
- *s = ' ';
- }
-}
-
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -3699,8 +3696,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
goto bail_out;
}
- sanitize_inquiry_string(&inq_buff[8], 8);
- sanitize_inquiry_string(&inq_buff[16], 16);
+ scsi_sanitize_inquiry_string(&inq_buff[8], 8);
+ scsi_sanitize_inquiry_string(&inq_buff[16], 16);
this_device->devtype = (inq_buff[0] & 0x1f);
memcpy(this_device->scsi3addr, scsi3addr, 8);
@@ -3713,7 +3710,8 @@ static int hpsa_update_device_info(struct ctlr_info *h,
hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
sizeof(this_device->device_id));
- if (this_device->devtype == TYPE_DISK &&
+ if ((this_device->devtype == TYPE_DISK ||
+ this_device->devtype == TYPE_ZBC) &&
is_logical_dev_addr_mode(scsi3addr)) {
int volume_offline;
@@ -4181,6 +4179,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
ncurrent++;
break;
case TYPE_DISK:
+ case TYPE_ZBC:
if (this_device->physical_device) {
/* The disk is in HBA mode. */
/* Never use RAID mapper in HBA mode. */
@@ -4197,7 +4196,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
ncurrent++;
break;
case TYPE_ENCLOSURE:
- hpsa_get_enclosure_info(h, lunaddrbytes,
+ if (!this_device->external)
+ hpsa_get_enclosure_info(h, lunaddrbytes,
physdev_list, phys_dev_index,
this_device);
ncurrent++;
@@ -4970,6 +4970,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
return IO_ACCEL_INELIGIBLE;
c->phys_disk = dev->phys_disk[map_index];
+ if (!c->phys_disk)
+ return IO_ACCEL_INELIGIBLE;
disk_handle = dd[map_index].ioaccel_handle;
disk_block = le64_to_cpu(map->disk_starting_blk) +
@@ -5835,7 +5837,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
}
static int hpsa_send_abort_both_ways(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
+ struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
{
/*
* ioccelerator mode 2 commands should be aborted via the
@@ -5844,14 +5846,16 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
* Change abort to physical device reset when abort TMF is unsupported.
*/
if (abort->cmd_type == CMD_IOACCEL2) {
- if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
+ if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
+ dev->physical_device)
return hpsa_send_abort_ioaccel2(h, abort,
reply_queue);
else
- return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
+ return hpsa_send_reset_as_abort_ioaccel2(h,
+ dev->scsi3addr,
abort, reply_queue);
}
- return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
+ return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
}
/* Find out which reply queue a command was meant to return on */
@@ -5989,7 +5993,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
cmd_free(h, abort);
return FAILED;
}
- rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
+ rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
atomic_inc(&h->abort_cmds_available);
wake_up_all(&h->abort_cmd_wait_queue);
if (rc != 0) {
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index fdd39fc0b199..d06bb7417e36 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
#ifndef HPSA_H
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 6a919ada96b3..a5be153d92d4 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
*
@@ -12,7 +13,7 @@
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more details.
*
- * Questions/Comments/Bugfixes to storagedev@pmcs.com
+ * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
*
*/
#ifndef HPSA_CMD_H
@@ -289,7 +290,7 @@ struct SenseSubsystem_info {
#define BMIC_IDENTIFY_CONTROLLER 0x11
#define BMIC_SET_DIAG_OPTIONS 0xF4
#define BMIC_SENSE_DIAG_OPTIONS 0xF5
-#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x40000000
+#define HPSA_DIAG_OPTS_DISABLE_RLD_CACHING 0x80000000
#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
#define BMIC_SENSE_STORAGE_BOX_PARAMS 0x65
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 6aa317c303e2..fc523c3e5019 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -2636,7 +2636,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_target *tgt;
ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
- " node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
+ " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
+ be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
ibmvfc_get_link_state(crq->link_state));
switch (be64_to_cpu(crq->event)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index adfef9db6f1e..d9534ee6ef52 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -182,7 +182,7 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
- if (crq->valid & 0x80) {
+ if (crq->valid != VIOSRP_CRQ_FREE) {
if (++queue->cur == queue->size)
queue->cur = 0;
@@ -231,7 +231,7 @@ static void ibmvscsi_task(void *data)
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
+ crq->valid = VIOSRP_CRQ_FREE;
}
vio_enable_interrupts(vdev);
@@ -239,7 +239,7 @@ static void ibmvscsi_task(void *data)
if (crq != NULL) {
vio_disable_interrupts(vdev);
ibmvscsi_handle_crq(crq, hostdata);
- crq->valid = 0x00;
+ crq->valid = VIOSRP_CRQ_FREE;
} else {
done = 1;
}
@@ -248,25 +248,23 @@ static void ibmvscsi_task(void *data)
static void gather_partition_info(void)
{
- struct device_node *rootdn;
-
const char *ppartition_name;
const __be32 *p_number_ptr;
/* Retrieve information about this partition */
- rootdn = of_find_node_by_path("/");
- if (!rootdn) {
+ if (!of_root)
return;
- }
- ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
+ of_node_get(of_root);
+
+ ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
strncpy(partition_name, ppartition_name,
sizeof(partition_name));
- p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
+ p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
partition_number = of_read_number(p_number_ptr, 1);
- of_node_put(rootdn);
+ of_node_put(of_root);
}
static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
@@ -283,8 +281,8 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
hostdata->madapter_info.partition_number =
cpu_to_be32(partition_number);
- hostdata->madapter_info.mad_version = cpu_to_be32(1);
- hostdata->madapter_info.os_type = cpu_to_be32(2);
+ hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
+ hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
}
/**
@@ -316,7 +314,7 @@ static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
- if (rc == 2) {
+ if (rc == H_CLOSED) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
} else if (rc != 0) {
@@ -366,7 +364,7 @@ static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
rc = ibmvscsi_reset_crq_queue(queue,
hostdata);
- if (rc == 2) {
+ if (rc == H_CLOSED) {
/* Adapter is good, but other end is not ready */
dev_warn(hostdata->dev, "Partner adapter not ready\n");
retrc = 0;
@@ -474,7 +472,7 @@ static int initialize_event_pool(struct event_pool *pool,
struct srp_event_struct *evt = &pool->events[i];
memset(&evt->crq, 0x00, sizeof(evt->crq));
atomic_set(&evt->free, 1);
- evt->crq.valid = 0x80;
+ evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
sizeof(*evt->xfer_iu) * i);
@@ -1398,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->max_sectors =
be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
- if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 &&
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
hostdata->madapter_info.srp_version);
@@ -1407,7 +1405,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
}
- if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) {
+ if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
enable_fast_fail(hostdata);
return;
}
@@ -1767,9 +1765,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
struct srp_event_struct *evt_struct =
(__force struct srp_event_struct *)crq->IU_data_ptr;
switch (crq->valid) {
- case 0xC0: /* initialization */
+ case VIOSRP_CRQ_INIT_RSP: /* initialization */
switch (crq->format) {
- case 0x01: /* Initialization message */
+ case VIOSRP_CRQ_INIT: /* Initialization message */
dev_info(hostdata->dev, "partner initialized\n");
/* Send back a response */
rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
@@ -1781,7 +1779,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
break;
- case 0x02: /* Initialization response */
+ case VIOSRP_CRQ_INIT_COMPLETE: /* Initialization response */
dev_info(hostdata->dev, "partner initialization complete\n");
/* Now login */
@@ -1791,7 +1789,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
}
return;
- case 0xFF: /* Hypervisor telling us the connection is closed */
+ case VIOSRP_CRQ_XPORT_EVENT: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
atomic_set(&hostdata->request_limit, 0);
if (crq->format == 0x06) {
@@ -1807,7 +1805,7 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
ibmvscsi_reset_host(hostdata);
}
return;
- case 0x80: /* real payload */
+ case VIOSRP_CRQ_CMD_RSP: /* real payload */
break;
default:
dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
@@ -1855,62 +1853,6 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
}
/**
- * ibmvscsi_get_host_config: Send the command to the server to get host
- * configuration data. The data is opaque to us.
- */
-static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
- unsigned char *buffer, int length)
-{
- struct viosrp_host_config *host_config;
- struct srp_event_struct *evt_struct;
- unsigned long flags;
- dma_addr_t addr;
- int rc;
-
- evt_struct = get_event_struct(&hostdata->pool);
- if (!evt_struct) {
- dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
- return -1;
- }
-
- init_event_struct(evt_struct,
- sync_completion,
- VIOSRP_MAD_FORMAT,
- info_timeout);
-
- host_config = &evt_struct->iu.mad.host_config;
-
- /* The transport length field is only 16-bit */
- length = min(0xffff, length);
-
- /* Set up a lun reset SRP command */
- memset(host_config, 0x00, sizeof(*host_config));
- host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE);
- host_config->common.length = cpu_to_be16(length);
- addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(hostdata->dev, addr)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(hostdata->dev,
- "dma_mapping error getting host config\n");
- free_event_struct(&hostdata->pool, evt_struct);
- return -1;
- }
-
- host_config->buffer = cpu_to_be64(addr);
-
- init_completion(&evt_struct->comp);
- spin_lock_irqsave(hostdata->host->host_lock, flags);
- rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
- spin_unlock_irqrestore(hostdata->host->host_lock, flags);
- if (rc == 0)
- wait_for_completion(&evt_struct->comp);
- dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
-
- return rc;
-}
-
-/**
* ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
* @sdev: struct scsi_device device to configure
*
@@ -2041,7 +1983,7 @@ static ssize_t show_host_partition_number(struct device *dev,
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
- hostdata->madapter_info.partition_number);
+ be32_to_cpu(hostdata->madapter_info.partition_number));
return len;
}
@@ -2061,7 +2003,7 @@ static ssize_t show_host_mad_version(struct device *dev,
int len;
len = snprintf(buf, PAGE_SIZE, "%d\n",
- hostdata->madapter_info.mad_version);
+ be32_to_cpu(hostdata->madapter_info.mad_version));
return len;
}
@@ -2080,7 +2022,8 @@ static ssize_t show_host_os_type(struct device *dev,
struct ibmvscsi_host_data *hostdata = shost_priv(shost);
int len;
- len = snprintf(buf, PAGE_SIZE, "%d\n", hostdata->madapter_info.os_type);
+ len = snprintf(buf, PAGE_SIZE, "%d\n",
+ be32_to_cpu(hostdata->madapter_info.os_type));
return len;
}
@@ -2095,21 +2038,14 @@ static struct device_attribute ibmvscsi_host_os_type = {
static ssize_t show_host_config(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct ibmvscsi_host_data *hostdata = shost_priv(shost);
-
- /* returns null-terminated host config data */
- if (ibmvscsi_do_host_config(hostdata, buf, PAGE_SIZE) == 0)
- return strlen(buf);
- else
- return 0;
+ return 0;
}
static struct device_attribute ibmvscsi_host_config = {
.attr = {
- .name = "config",
- .mode = S_IRUGO,
- },
+ .name = "config",
+ .mode = S_IRUGO,
+ },
.show = show_host_config,
};
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h
index 116243087622..c1ab8a4c3161 100644
--- a/drivers/scsi/ibmvscsi/viosrp.h
+++ b/drivers/scsi/ibmvscsi/viosrp.h
@@ -51,13 +51,25 @@ union srp_iu {
u8 reserved[SRP_MAX_IU_LEN];
};
+enum viosrp_crq_headers {
+ VIOSRP_CRQ_FREE = 0x00,
+ VIOSRP_CRQ_CMD_RSP = 0x80,
+ VIOSRP_CRQ_INIT_RSP = 0xC0,
+ VIOSRP_CRQ_XPORT_EVENT = 0xFF
+};
+
+enum viosrp_crq_init_formats {
+ VIOSRP_CRQ_INIT = 0x01,
+ VIOSRP_CRQ_INIT_COMPLETE = 0x02
+};
+
enum viosrp_crq_formats {
VIOSRP_SRP_FORMAT = 0x01,
VIOSRP_MAD_FORMAT = 0x02,
VIOSRP_OS400_FORMAT = 0x03,
VIOSRP_AIX_FORMAT = 0x04,
- VIOSRP_LINUX_FORMAT = 0x06,
- VIOSRP_INLINE_FORMAT = 0x07
+ VIOSRP_LINUX_FORMAT = 0x05,
+ VIOSRP_INLINE_FORMAT = 0x06
};
enum viosrp_crq_status {
@@ -87,7 +99,6 @@ enum viosrp_mad_types {
VIOSRP_EMPTY_IU_TYPE = 0x01,
VIOSRP_ERROR_LOG_TYPE = 0x02,
VIOSRP_ADAPTER_INFO_TYPE = 0x03,
- VIOSRP_HOST_CONFIG_TYPE = 0x04,
VIOSRP_CAPABILITIES_TYPE = 0x05,
VIOSRP_ENABLE_FAST_FAIL = 0x08,
};
@@ -153,11 +164,6 @@ struct viosrp_adapter_info {
__be64 buffer;
};
-struct viosrp_host_config {
- struct mad_common common;
- __be64 buffer;
-};
-
struct viosrp_fast_fail {
struct mad_common common;
};
@@ -195,7 +201,6 @@ union mad_iu {
struct viosrp_empty_iu empty_iu;
struct viosrp_error_log error_log;
struct viosrp_adapter_info adapter_info;
- struct viosrp_host_config host_config;
struct viosrp_fast_fail fast_fail;
struct viosrp_capabilities capabilities;
};
@@ -209,7 +214,10 @@ struct mad_adapter_info_data {
char srp_version[8];
char partition_name[96];
__be32 partition_number;
+#define SRP_MAD_VERSION_1 1
__be32 mad_version;
+#define SRP_MAD_OS_LINUX 2
+#define SRP_MAD_OS_AIX 3
__be32 os_type;
__be32 port_max_txu[8]; /* per-port maximum transfer */
};
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f8b88fa78e62..9164ce1249c1 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -77,9 +77,10 @@ static void imm_wakeup(void *ref)
spin_lock_irqsave(&arbitration_lock, flags);
if (dev->wanted) {
- parport_claim(dev->dev);
- got_it(dev);
- dev->wanted = 0;
+ if (parport_claim(dev->dev) == 0) {
+ got_it(dev);
+ dev->wanted = 0;
+ }
}
spin_unlock_irqrestore(&arbitration_lock, flags);
}
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 680bf6f0ce76..8f0ea97cf31f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -166,6 +166,7 @@ static struct attribute_group iscsi_boot_target_attr_group = {
iscsi_boot_rd_attr(eth_index, index, ISCSI_BOOT_ETH_INDEX);
iscsi_boot_rd_attr(eth_flags, flags, ISCSI_BOOT_ETH_FLAGS);
iscsi_boot_rd_attr(eth_ip, ip-addr, ISCSI_BOOT_ETH_IP_ADDR);
+iscsi_boot_rd_attr(eth_prefix, prefix-len, ISCSI_BOOT_ETH_PREFIX_LEN);
iscsi_boot_rd_attr(eth_subnet, subnet-mask, ISCSI_BOOT_ETH_SUBNET_MASK);
iscsi_boot_rd_attr(eth_origin, origin, ISCSI_BOOT_ETH_ORIGIN);
iscsi_boot_rd_attr(eth_gateway, gateway, ISCSI_BOOT_ETH_GATEWAY);
@@ -181,6 +182,7 @@ static struct attribute *ethernet_attrs[] = {
&iscsi_boot_attr_eth_index.attr,
&iscsi_boot_attr_eth_flags.attr,
&iscsi_boot_attr_eth_ip.attr,
+ &iscsi_boot_attr_eth_prefix.attr,
&iscsi_boot_attr_eth_subnet.attr,
&iscsi_boot_attr_eth_origin.attr,
&iscsi_boot_attr_eth_gateway.attr,
@@ -208,6 +210,9 @@ static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
else if (attr == &iscsi_boot_attr_eth_ip.attr)
return boot_kobj->is_visible(boot_kobj->data,
ISCSI_BOOT_ETH_IP_ADDR);
+ else if (attr == &iscsi_boot_attr_eth_prefix.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ETH_PREFIX_LEN);
else if (attr == &iscsi_boot_attr_eth_subnet.attr)
return boot_kobj->is_visible(boot_kobj->data,
ISCSI_BOOT_ETH_SUBNET_MASK);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0b8af186e707..2e4c82f8329c 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -26,12 +26,12 @@
* Zhenyu Wang
*/
+#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
@@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
* sufficient room.
*/
if (conn->hdrdgst_en) {
- iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
+ iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
hdr + hdrlen);
hdrlen += ISCSI_DIGEST_SIZE;
}
@@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct hash_desc *tx_hash = NULL;
+ struct ahash_request *tx_hash = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_sw_conn->tx_hash;
+ tx_hash = tcp_sw_conn->tx_hash;
return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
sg, count, offset, len,
@@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
- struct hash_desc *tx_hash = NULL;
+ struct ahash_request *tx_hash = NULL;
unsigned int hdr_spec_len;
ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
if (conn->datadgst_en)
- tx_hash = &tcp_sw_conn->tx_hash;
+ tx_hash = tcp_sw_conn->tx_hash;
iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
data, len, NULL, tx_hash);
@@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct crypto_ahash *tfm;
cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
conn_idx);
@@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
tcp_conn = conn->dd_data;
tcp_sw_conn = tcp_conn->dd_data;
- tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_sw_conn->tx_hash.flags = 0;
- if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
goto free_conn;
- tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- tcp_sw_conn->rx_hash.flags = 0;
- if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
- goto free_tx_tfm;
- tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
+ tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!tcp_sw_conn->tx_hash)
+ goto free_tfm;
+ ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
+
+ tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!tcp_sw_conn->rx_hash)
+ goto free_tx_hash;
+ ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
+
+ tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
return cls_conn;
-free_tx_tfm:
- crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+free_tx_hash:
+ ahash_request_free(tcp_sw_conn->tx_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
free_conn:
iscsi_conn_printk(KERN_ERR, conn,
"Could not create connection due to crc32c "
@@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
iscsi_sw_tcp_release_conn(conn);
- if (tcp_sw_conn->tx_hash.tfm)
- crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
- if (tcp_sw_conn->rx_hash.tfm)
- crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
+ ahash_request_free(tcp_sw_conn->rx_hash);
+ if (tcp_sw_conn->tx_hash) {
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
+ ahash_request_free(tcp_sw_conn->tx_hash);
+ crypto_free_ahash(tfm);
+ }
iscsi_tcp_conn_teardown(cls_conn);
}
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index f42ecb238af5..06d42d00a323 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn {
void (*old_write_space)(struct sock *);
/* data and header digests */
- struct hash_desc tx_hash; /* CRC32C (Tx) */
- struct hash_desc rx_hash; /* CRC32C (Rx) */
+ struct ahash_request *tx_hash; /* CRC32C (Tx) */
+ struct ahash_request *rx_hash; /* CRC32C (Rx) */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 60cb6dc3c6f0..63a1d69ff515 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -26,13 +26,13 @@
* Zhenyu Wang
*/
+#include <crypto/hash.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/blkdev.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <linux/scatterlist.h>
@@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
} else
sg_init_one(&sg, segment->data + segment->copied,
copied);
- crypto_hash_update(segment->hash, &sg, copied);
+ ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
+ crypto_ahash_update(segment->hash);
}
segment->copied += copied;
@@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
* is completely handled in hdr done function.
*/
if (segment->hash) {
- crypto_hash_final(segment->hash, segment->digest);
+ ahash_request_set_crypt(segment->hash, NULL,
+ segment->digest, 0);
+ crypto_ahash_final(segment->hash);
iscsi_tcp_segment_splice_digest(segment,
recv ? segment->recv_digest : segment->digest);
return 0;
@@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
}
inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
- unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
+ size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
{
struct scatterlist sg;
sg_init_one(&sg, hdr, hdrlen);
- crypto_hash_digest(hash, &sg, hdrlen, digest);
+ ahash_request_set_crypt(hash, &sg, digest, hdrlen);
+ crypto_ahash_digest(hash);
}
EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
@@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
*/
static inline void
__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+ iscsi_segment_done_fn_t *done, struct ahash_request *hash)
{
memset(segment, 0, sizeof(*segment));
segment->total_size = size;
@@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
if (hash) {
segment->hash = hash;
- crypto_hash_init(hash);
+ crypto_ahash_init(hash);
}
}
inline void
iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
size_t size, iscsi_segment_done_fn_t *done,
- struct hash_desc *hash)
+ struct ahash_request *hash)
{
__iscsi_segment_init(segment, size, done, hash);
segment->data = data;
@@ -368,7 +372,8 @@ inline int
iscsi_segment_seek_sg(struct iscsi_segment *segment,
struct scatterlist *sg_list, unsigned int sg_count,
unsigned int offset, size_t size,
- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+ iscsi_segment_done_fn_t *done,
+ struct ahash_request *hash)
{
struct scatterlist *sg;
unsigned int i;
@@ -431,7 +436,7 @@ static void
iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
{
struct iscsi_conn *conn = tcp_conn->iscsi_conn;
- struct hash_desc *rx_hash = NULL;
+ struct ahash_request *rx_hash = NULL;
if (conn->datadgst_en &&
!(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
@@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
if (tcp_conn->in.datalen) {
struct iscsi_tcp_task *tcp_task = task->dd_data;
- struct hash_desc *rx_hash = NULL;
+ struct ahash_request *rx_hash = NULL;
struct scsi_data_buffer *sdb = scsi_in(task->sc);
/*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 25aa9b98d53a..a63542bac153 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1054,11 +1054,11 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
{
struct dentry *dent = file->f_path.dentry;
struct lpfc_hba *phba = file->private_data;
- char dstbuf[32];
+ char dstbuf[33];
uint64_t tmp = 0;
int size;
- memset(dstbuf, 0, 32);
+ memset(dstbuf, 0, 33);
size = (nbytes < 32) ? nbytes : 32;
if (copy_from_user(dstbuf, buf, size))
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index c37d72effbff..25b5dcd1a5c8 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -25,6 +25,7 @@
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
+#include <linux/lockdep.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -1314,6 +1315,8 @@ __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
{
struct lpfc_fcf_pri *fcf_pri;
+ lockdep_assert_held(&phba->hbalock);
+
fcf_pri = &phba->fcf.fcf_pri[fcf_index];
fcf_pri->fcf_rec.fcf_index = fcf_index;
/* FCF record priority */
@@ -1398,6 +1401,8 @@ __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
struct fcf_record *new_fcf_record, uint32_t addr_mode,
uint16_t vlan_id, uint32_t flag)
{
+ lockdep_assert_held(&phba->hbalock);
+
/* Copy the fields from the HBA's FCF record */
lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
/* Update other fields of driver FCF record */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a544366a367e..f57d02c3b6cf 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2860,7 +2860,7 @@ lpfc_online(struct lpfc_hba *phba)
}
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
+ if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
struct Scsi_Host *shost;
shost = lpfc_shost_from_vport(vports[i]);
@@ -2877,7 +2877,8 @@ lpfc_online(struct lpfc_hba *phba)
}
spin_unlock_irq(shost->host_lock);
}
- lpfc_destroy_vport_work_array(phba, vports);
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
lpfc_unblock_mgmt_io(phba);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 152b3c8a5428..3bd0be6277b3 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4139,23 +4139,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
- if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_cmd->pCmd = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
-
- /*
- * If there is a thread waiting for command completion
- * wake up the thread.
- */
- spin_lock_irqsave(shost->host_lock, flags);
- if (lpfc_cmd->waitq)
- wake_up(lpfc_cmd->waitq);
- spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_release_scsi_buf(phba, lpfc_cmd);
- return;
- }
-
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_cmd->pCmd = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 92dfd6a5178c..2207726b88ee 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/lockdep.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -576,6 +577,8 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
struct lpfc_iocbq * iocbq = NULL;
+ lockdep_assert_held(&phba->hbalock);
+
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
if (iocbq)
phba->iocb_cnt++;
@@ -797,6 +800,7 @@ int
lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
+ lockdep_assert_held(&phba->hbalock);
if (!ndlp)
return 0;
if (!ndlp->active_rrqs_xri_bitmap)
@@ -914,6 +918,8 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
struct lpfc_nodelist *ndlp;
int found = 0;
+ lockdep_assert_held(&phba->hbalock);
+
if (piocbq->iocb_flag & LPFC_IO_FCP) {
lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
ndlp = lpfc_cmd->rdata->pnode;
@@ -1003,6 +1009,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
unsigned long iflag = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ lockdep_assert_held(&phba->hbalock);
+
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
@@ -1058,6 +1066,7 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+ lockdep_assert_held(&phba->hbalock);
/*
* Clean all volatile data fields, preserve iotag and node struct.
@@ -1080,6 +1089,8 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
+ lockdep_assert_held(&phba->hbalock);
+
phba->__lpfc_sli_release_iocbq(phba, iocbq);
phba->iocb_cnt--;
}
@@ -1310,6 +1321,8 @@ static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
+ lockdep_assert_held(&phba->hbalock);
+
list_add_tail(&piocb->list, &pring->txcmplq);
piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
@@ -1344,6 +1357,8 @@ lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_iocbq *cmd_iocb;
+ lockdep_assert_held(&phba->hbalock);
+
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
return cmd_iocb;
}
@@ -1367,6 +1382,9 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
+
+ lockdep_assert_held(&phba->hbalock);
+
if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
(++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
pring->sli.sli3.next_cmdidx = 0;
@@ -1497,6 +1515,7 @@ static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
+ lockdep_assert_held(&phba->hbalock);
/*
* Set up an iotag
*/
@@ -1606,6 +1625,8 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
IOCB_t *iocb;
struct lpfc_iocbq *nextiocb;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* Check to see if:
* (a) there is anything on the txq to send
@@ -1647,6 +1668,8 @@ lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
struct hbq_s *hbqp = &phba->hbqs[hbqno];
+ lockdep_assert_held(&phba->hbalock);
+
if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
++hbqp->next_hbqPutIdx >= hbqp->entry_count)
hbqp->next_hbqPutIdx = 0;
@@ -1747,6 +1770,7 @@ static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
+ lockdep_assert_held(&phba->hbalock);
return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
}
@@ -1768,6 +1792,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
struct lpfc_hbq_entry *hbqe;
dma_addr_t physaddr = hbq_buf->dbuf.phys;
+ lockdep_assert_held(&phba->hbalock);
/* Get next HBQ entry slot to use */
hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
if (hbqe) {
@@ -1808,6 +1833,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
+ lockdep_assert_held(&phba->hbalock);
hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
@@ -1986,6 +2012,8 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
struct hbq_dmabuf *hbq_buf;
uint32_t hbqno;
+ lockdep_assert_held(&phba->hbalock);
+
hbqno = tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
return NULL;
@@ -2647,6 +2675,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
+ lockdep_assert_held(&phba->hbalock);
iotag = prspiocb->iocb.ulpIoTag;
@@ -2685,6 +2714,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
{
struct lpfc_iocbq *cmd_iocb;
+ lockdep_assert_held(&phba->hbalock);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
@@ -3799,6 +3829,8 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
int i;
uint8_t hdrtype;
+ lockdep_assert_held(&phba->hbalock);
+
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
if (hdrtype != 0x80 ||
(FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
@@ -7861,6 +7893,7 @@ void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
+ lockdep_assert_held(&phba->hbalock);
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
}
@@ -7888,6 +7921,8 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
struct lpfc_iocbq * nextiocb;
+ lockdep_assert_held(&phba->hbalock);
+
nextiocb = lpfc_sli_ringtx_get(phba, pring);
if (!nextiocb) {
nextiocb = *piocb;
@@ -7927,6 +7962,8 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
IOCB_t *iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ lockdep_assert_held(&phba->hbalock);
+
if (piocb->iocb_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
@@ -8642,6 +8679,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ lockdep_assert_held(&phba->hbalock);
+
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
@@ -9752,6 +9791,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
int retval;
unsigned long iflags;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
@@ -9854,6 +9895,8 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
int retval = IOCB_ERROR;
IOCB_t *icmd = NULL;
+ lockdep_assert_held(&phba->hbalock);
+
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 141226631429..a6682c508c4c 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -18,11 +18,11 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index c0f7c8ce54aa..fce414a2cd76 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.808.16.00-rc1"
-#define MEGASAS_RELDATE "Oct. 8, 2015"
+#define MEGASAS_VERSION "06.810.09.00-rc1"
+#define MEGASAS_RELDATE "Jan. 28, 2016"
/*
* Device IDs
@@ -152,6 +152,7 @@
#define MFI_RESET_FLAGS MFI_INIT_READY| \
MFI_INIT_MFIMODE| \
MFI_INIT_ABORT
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
/*
* MFI frame flags
@@ -170,6 +171,7 @@
/* Driver internal */
#define DRV_DCMD_POLLED_MODE 0x1
+#define DRV_DCMD_SKIP_REFIRE 0x2
/*
* Definition for cmd_status
@@ -214,6 +216,7 @@
#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+#define MR_DCMD_PD_GET_INFO 0x02020000
/*
* Global functions
@@ -390,6 +393,7 @@ enum MR_EVT_ARGS {
#define SGE_BUFFER_SIZE 4096
+#define MEGASAS_CLUSTER_ID_SIZE 16
/*
* define constants for device list query options
*/
@@ -434,6 +438,257 @@ enum MR_PD_STATE {
MR_PD_STATE_SYSTEM = 0x40,
};
+union MR_PD_REF {
+ struct {
+ u16 deviceId;
+ u16 seqNum;
+ } mrPdRef;
+ u32 ref;
+};
+
+/*
+ * define the DDF Type bit structure
+ */
+union MR_PD_DDF_TYPE {
+ struct {
+ union {
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 forcedPDGUID:1;
+ u16 inVD:1;
+ u16 isGlobalSpare:1;
+ u16 isSpare:1;
+ u16 isForeign:1;
+ u16 reserved:7;
+ u16 intf:4;
+#else
+ u16 intf:4;
+ u16 reserved:7;
+ u16 isForeign:1;
+ u16 isSpare:1;
+ u16 isGlobalSpare:1;
+ u16 inVD:1;
+ u16 forcedPDGUID:1;
+#endif
+ } pdType;
+ u16 type;
+ };
+ u16 reserved;
+ } ddf;
+ struct {
+ u32 reserved;
+ } nonDisk;
+ u32 type;
+} __packed;
+
+/*
+ * defines the progress structure
+ */
+union MR_PROGRESS {
+ struct {
+ u16 progress;
+ union {
+ u16 elapsedSecs;
+ u16 elapsedSecsForLastPercent;
+ };
+ } mrProgress;
+ u32 w;
+} __packed;
+
+/*
+ * defines the physical drive progress structure
+ */
+struct MR_PD_PROGRESS {
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+ u32 copyBack:1;
+ u32 erase:1;
+ u32 locate:1;
+ u32 reserved:26;
+#else
+ u32 reserved:26;
+ u32 locate:1;
+ u32 erase:1;
+ u32 copyBack:1;
+ u32 clear:1;
+ u32 patrol:1;
+ u32 rbld:1;
+#endif
+ } active;
+ union MR_PROGRESS rbld;
+ union MR_PROGRESS patrol;
+ union {
+ union MR_PROGRESS clear;
+ union MR_PROGRESS erase;
+ };
+
+ struct {
+#ifndef MFI_BIG_ENDIAN
+ u32 rbld:1;
+ u32 patrol:1;
+ u32 clear:1;
+ u32 copyBack:1;
+ u32 erase:1;
+ u32 reserved:27;
+#else
+ u32 reserved:27;
+ u32 erase:1;
+ u32 copyBack:1;
+ u32 clear:1;
+ u32 patrol:1;
+ u32 rbld:1;
+#endif
+ } pause;
+
+ union MR_PROGRESS reserved[3];
+} __packed;
+
+struct MR_PD_INFO {
+ union MR_PD_REF ref;
+ u8 inquiryData[96];
+ u8 vpdPage83[64];
+ u8 notSupported;
+ u8 scsiDevType;
+
+ union {
+ u8 connectedPortBitmap;
+ u8 connectedPortNumbers;
+ };
+
+ u8 deviceSpeed;
+ u32 mediaErrCount;
+ u32 otherErrCount;
+ u32 predFailCount;
+ u32 lastPredFailEventSeqNum;
+
+ u16 fwState;
+ u8 disabledForRemoval;
+ u8 linkSpeed;
+ union MR_PD_DDF_TYPE state;
+
+ struct {
+ u8 count;
+#ifndef __BIG_ENDIAN_BITFIELD
+ u8 isPathBroken:4;
+ u8 reserved3:3;
+ u8 widePortCapable:1;
+#else
+ u8 widePortCapable:1;
+ u8 reserved3:3;
+ u8 isPathBroken:4;
+#endif
+
+ u8 connectorIndex[2];
+ u8 reserved[4];
+ u64 sasAddr[2];
+ u8 reserved2[16];
+ } pathInfo;
+
+ u64 rawSize;
+ u64 nonCoercedSize;
+ u64 coercedSize;
+ u16 enclDeviceId;
+ u8 enclIndex;
+
+ union {
+ u8 slotNumber;
+ u8 enclConnectorIndex;
+ };
+
+ struct MR_PD_PROGRESS progInfo;
+ u8 badBlockTableFull;
+ u8 unusableInCurrentConfig;
+ u8 vpdPage83Ext[64];
+ u8 powerState;
+ u8 enclPosition;
+ u32 allowedOps;
+ u16 copyBackPartnerId;
+ u16 enclPartnerDeviceId;
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u16 fdeCapable:1;
+ u16 fdeEnabled:1;
+ u16 secured:1;
+ u16 locked:1;
+ u16 foreign:1;
+ u16 needsEKM:1;
+ u16 reserved:10;
+#else
+ u16 reserved:10;
+ u16 needsEKM:1;
+ u16 foreign:1;
+ u16 locked:1;
+ u16 secured:1;
+ u16 fdeEnabled:1;
+ u16 fdeCapable:1;
+#endif
+ } security;
+ u8 mediaType;
+ u8 notCertified;
+ u8 bridgeVendor[8];
+ u8 bridgeProductIdentification[16];
+ u8 bridgeProductRevisionLevel[4];
+ u8 satBridgeExists;
+
+ u8 interfaceType;
+ u8 temperature;
+ u8 emulatedBlockSize;
+ u16 userDataBlockSize;
+ u16 reserved2;
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u32 piType:3;
+ u32 piFormatted:1;
+ u32 piEligible:1;
+ u32 NCQ:1;
+ u32 WCE:1;
+ u32 commissionedSpare:1;
+ u32 emergencySpare:1;
+ u32 ineligibleForSSCD:1;
+ u32 ineligibleForLd:1;
+ u32 useSSEraseType:1;
+ u32 wceUnchanged:1;
+ u32 supportScsiUnmap:1;
+ u32 reserved:18;
+#else
+ u32 reserved:18;
+ u32 supportScsiUnmap:1;
+ u32 wceUnchanged:1;
+ u32 useSSEraseType:1;
+ u32 ineligibleForLd:1;
+ u32 ineligibleForSSCD:1;
+ u32 emergencySpare:1;
+ u32 commissionedSpare:1;
+ u32 WCE:1;
+ u32 NCQ:1;
+ u32 piEligible:1;
+ u32 piFormatted:1;
+ u32 piType:3;
+#endif
+ } properties;
+
+ u64 shieldDiagCompletionTime;
+ u8 shieldCounter;
+
+ u8 linkSpeedOther;
+ u8 reserved4[2];
+
+ struct {
+#ifndef __BIG_ENDIAN_BITFIELD
+ u32 bbmErrCountSupported:1;
+ u32 bbmErrCount:31;
+#else
+ u32 bbmErrCount:31;
+ u32 bbmErrCountSupported:1;
+#endif
+ } bbmErr;
+
+ u8 reserved1[512-428];
+} __packed;
/*
* defines the physical drive address structure
@@ -473,6 +728,7 @@ struct megasas_pd_list {
u16 tid;
u8 driveType;
u8 driveState;
+ u8 interface;
} __packed;
/*
@@ -972,7 +1228,8 @@ struct megasas_ctrl_info {
*/
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:26;
+ u32 reserved:25;
+ u32 passive:1;
u32 premiumFeatureMismatch:1;
u32 ctrlPropIncompatible:1;
u32 fwVersionMismatch:1;
@@ -986,11 +1243,12 @@ struct megasas_ctrl_info {
u32 fwVersionMismatch:1;
u32 ctrlPropIncompatible:1;
u32 premiumFeatureMismatch:1;
- u32 reserved:26;
+ u32 passive:1;
+ u32 reserved:25;
#endif
} cluster;
- char clusterId[16]; /*7D4h */
+ char clusterId[MEGASAS_CLUSTER_ID_SIZE]; /*0x7D4 */
struct {
u8 maxVFsSupported; /*0x7E4*/
u8 numVFsEnabled; /*0x7E5*/
@@ -1083,6 +1341,8 @@ struct megasas_ctrl_info {
#define VD_EXT_DEBUG 0
+#define SCAN_PD_CHANNEL 0x1
+#define SCAN_VD_CHANNEL 0x2
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
@@ -1091,6 +1351,17 @@ enum MR_SCSI_CMD_TYPE {
NON_READ_WRITE_SYSPDIO = 3,
};
+enum DCMD_TIMEOUT_ACTION {
+ INITIATE_OCR = 0,
+ KILL_ADAPTER = 1,
+ IGNORE_TIMEOUT = 2,
+};
+
+enum FW_BOOT_CONTEXT {
+ PROBE_CONTEXT = 0,
+ OCR_CONTEXT = 1,
+};
+
/* Frame Type */
#define IO_FRAME 0
#define PTHRU_FRAME 1
@@ -1137,6 +1408,7 @@ enum MR_SCSI_CMD_TYPE {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
+#define MFI_IO_TIMEOUT_SECS 180
#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF (5 * HZ)
#define MEGASAS_OCR_SETTLE_TIME_VF (1000 * 30)
#define MEGASAS_ROUTINE_WAIT_TIME_VF 300
@@ -1154,6 +1426,7 @@ enum MR_SCSI_CMD_TYPE {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET 0X003FC000
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
#define MR_MAX_MSIX_REG_ARRAY 16
+#define MR_RDPQ_MODE_OFFSET 0X00800000
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -1193,8 +1466,9 @@ struct megasas_register_set {
u32 outbound_scratch_pad ; /*00B0h*/
u32 outbound_scratch_pad_2; /*00B4h*/
+ u32 outbound_scratch_pad_3; /*00B8h*/
- u32 reserved_4[2]; /*00B8h*/
+ u32 reserved_4; /*00BCh*/
u32 inbound_low_queue_port ; /*00C0h*/
@@ -1266,7 +1540,10 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:23;
+ u32 reserved:20;
+ u32 support_qd_throttling:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_vfid_in_ioframe:1;
u32 support_ext_io_size:1;
u32 support_ext_queue_depth:1;
u32 security_protocol_cmds_fw:1;
@@ -1286,7 +1563,10 @@ typedef union _MFI_CAPABILITIES {
u32 security_protocol_cmds_fw:1;
u32 support_ext_queue_depth:1;
u32 support_ext_io_size:1;
- u32 reserved:23;
+ u32 support_vfid_in_ioframe:1;
+ u32 support_fp_rlbypass:1;
+ u32 support_qd_throttling:1;
+ u32 reserved:20;
#endif
} mfi_capabilities;
__le32 reg;
@@ -1511,6 +1791,15 @@ union megasas_frame {
u8 raw_bytes[64];
};
+/**
+ * struct MR_PRIV_DEVICE - sdev private hostdata
+ * @is_tm_capable: firmware managed tm_capable flag
+ * @tm_busy: TM request is in progress
+ */
+struct MR_PRIV_DEVICE {
+ bool is_tm_capable;
+ bool tm_busy;
+};
struct megasas_cmd;
union megasas_evt_class_locale {
@@ -1700,6 +1989,19 @@ struct MR_DRV_SYSTEM_INFO {
u8 reserved[1980];
};
+enum MR_PD_TYPE {
+ UNKNOWN_DRIVE = 0,
+ PARALLEL_SCSI = 1,
+ SAS_PD = 2,
+ SATA_PD = 3,
+ FC_PD = 4,
+};
+
+/* JBOD Queue depth definitions */
+#define MEGASAS_SATA_QD 32
+#define MEGASAS_SAS_QD 64
+#define MEGASAS_DEFAULT_PD_QD 64
+
struct megasas_instance {
__le32 *producer;
@@ -1714,6 +2016,8 @@ struct megasas_instance {
dma_addr_t vf_affiliation_111_h;
struct MR_CTRL_HB_HOST_MEM *hb_host_mem;
dma_addr_t hb_host_mem_h;
+ struct MR_PD_INFO *pd_info;
+ dma_addr_t pd_info_h;
__le32 *reply_queue;
dma_addr_t reply_queue_h;
@@ -1745,6 +2049,8 @@ struct megasas_instance {
u16 max_fw_cmds;
u16 max_mfi_cmds;
u16 max_scsi_cmds;
+ u16 ldio_threshold;
+ u16 cur_can_queue;
u32 max_sectors_per_req;
struct megasas_aen_event *ev;
@@ -1762,7 +2068,7 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex aen_mutex;
+ struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -1775,6 +2081,7 @@ struct megasas_instance {
u32 fw_support_ieee;
atomic_t fw_outstanding;
+ atomic_t ldio_outstanding;
atomic_t fw_reset_no_pci_access;
struct megasas_instance_template *instancet;
@@ -1790,14 +2097,14 @@ struct megasas_instance {
u8 UnevenSpanSupport;
u8 supportmax256vd;
- u8 allow_fw_scan;
+ u8 pd_list_not_supported;
u16 fw_supported_vd_count;
u16 fw_supported_pd_count;
u16 drv_supported_vd_count;
u16 drv_supported_pd_count;
- u8 adprecovery;
+ atomic_t adprecovery;
unsigned long last_time;
u32 mfiStatus;
u32 last_seq_num;
@@ -1822,11 +2129,14 @@ struct megasas_instance {
char skip_heartbeat_timer_del;
u8 requestorId;
char PlasmaFW111;
- char mpio;
+ char clusterId[MEGASAS_CLUSTER_ID_SIZE];
+ u8 peerIsPresent;
+ u8 passive;
u16 throttlequeuedepth;
u8 mask_interrupts;
u16 max_chain_frame_sz;
u8 is_imr;
+ u8 is_rdpq;
bool dev_handle;
};
struct MR_LD_VF_MAP {
@@ -1916,7 +2226,7 @@ struct megasas_instance_template {
u32 (*init_adapter)(struct megasas_instance *);
u32 (*build_and_issue_cmd) (struct megasas_instance *,
struct scsi_cmnd *);
- void (*issue_dcmd) (struct megasas_instance *instance,
+ int (*issue_dcmd)(struct megasas_instance *instance,
struct megasas_cmd *cmd);
};
@@ -2014,6 +2324,19 @@ struct megasas_mgmt_info {
int max_index;
};
+enum MEGASAS_OCR_CAUSE {
+ FW_FAULT_OCR = 0,
+ SCSIIO_TIMEOUT_OCR = 1,
+ MFI_IO_TIMEOUT_OCR = 2,
+};
+
+enum DCMD_RETURN_STATUS {
+ DCMD_SUCCESS = 0,
+ DCMD_TIMEOUT = 1,
+ DCMD_FAILED = 2,
+ DCMD_NOT_FIRED = 3,
+};
+
u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
@@ -2051,4 +2374,8 @@ void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
int megasas_cmd_type(struct scsi_cmnd *cmd);
void megasas_setup_jbod_map(struct megasas_instance *instance);
+void megasas_update_sdev_properties(struct scsi_device *sdev);
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 97a1c1c33b05..e6ebc7ae2df1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -83,7 +83,7 @@ module_param(throttlequeuedepth, int, S_IRUGO);
MODULE_PARM_DESC(throttlequeuedepth,
"Adapter queue depth when throttled due to I/O timeout. Default: 16");
-int resetwaittime = MEGASAS_RESET_WAIT_TIME;
+unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, S_IRUGO);
MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
"before resetting adapter. Default: 180");
@@ -92,6 +92,18 @@ int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, S_IRUGO);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+int rdpq_enable = 1;
+module_param(rdpq_enable, int, S_IRUGO);
+MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
+
+unsigned int dual_qdepth_disable;
+module_param(dual_qdepth_disable, int, S_IRUGO);
+MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
+
+unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
+module_param(scmd_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
@@ -104,6 +116,8 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
static int megasas_issue_init_mfi(struct megasas_instance *instance);
static int megasas_register_aen(struct megasas_instance *instance,
u32 seq_num, u32 class_locale_word);
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
/*
* PCI ID table for all supported controllers
*/
@@ -189,18 +203,18 @@ int
wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
int seconds);
void megasas_reset_reply_desc(struct megasas_instance *instance);
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
int megasas_check_mpio_paths(struct megasas_instance *instance,
struct scsi_cmnd *scmd);
-void
+int
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
cmd->frame_phys_addr, 0, instance->reg_set);
+ return 0;
}
/**
@@ -473,7 +487,7 @@ static int
megasas_check_reset_xscale(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
(le32_to_cpu(*instance->consumer) ==
MEGASAS_ADPRESET_INPROG_SIGN))
return 1;
@@ -609,7 +623,7 @@ static int
megasas_check_reset_ppc(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
return 0;
@@ -735,6 +749,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
&(regs)->inbound_high_queue_port);
writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
&(regs)->inbound_low_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
}
@@ -746,7 +761,7 @@ static int
megasas_check_reset_skinny(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
return 0;
@@ -940,9 +955,8 @@ static int
megasas_check_reset_gen2(struct megasas_instance *instance,
struct megasas_register_set __iomem *regs)
{
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
return 1;
- }
return 0;
}
@@ -983,25 +997,20 @@ extern struct megasas_instance_template megasas_instance_template_fusion;
int
megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- int seconds;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
- frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
+ frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
- /*
- * Issue the frame using inbound queue port
- */
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
- /*
- * Wait for cmd_status to change
- */
- if (instance->requestorId)
- seconds = MEGASAS_ROUTINE_WAIT_TIME_VF;
- else
- seconds = MFI_POLL_TIMEOUT_SECS;
- return wait_and_poll(instance, cmd, seconds);
+ return wait_and_poll(instance, cmd, instance->requestorId ?
+ MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
}
/**
@@ -1019,21 +1028,29 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
-
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
+
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
- if (!ret)
- return 1;
+ if (!ret) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
+ __func__, __LINE__);
+ return DCMD_TIMEOUT;
+ }
} else
wait_event(instance->int_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
return (cmd->cmd_status_drv == MFI_STAT_OK) ?
- 0 : 1;
+ DCMD_SUCCESS : DCMD_FAILED;
}
/**
@@ -1077,15 +1094,20 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cmd->sync_cmd = 1;
cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
- instance->instancet->issue_dcmd(instance, cmd);
+ if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
+ (instance->instancet->issue_dcmd(instance, cmd))) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
+ }
if (timeout) {
ret = wait_event_timeout(instance->abort_cmd_wait_q,
cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
if (!ret) {
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
- return 1;
+ dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
+ __func__, __LINE__);
+ return DCMD_TIMEOUT;
}
} else
wait_event(instance->abort_cmd_wait_q,
@@ -1094,7 +1116,8 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
cmd->sync_cmd = 0;
megasas_return_cmd(instance, cmd);
- return 0;
+ return (cmd->cmd_status_drv == MFI_STAT_OK) ?
+ DCMD_SUCCESS : DCMD_FAILED;
}
/**
@@ -1621,7 +1644,7 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
return 0;
out_return_cmd:
megasas_return_cmd(instance, cmd);
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1634,7 +1657,7 @@ static int
megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
- unsigned long flags;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
instance = (struct megasas_instance *)
scmd->device->host->hostdata;
@@ -1648,35 +1671,38 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (instance->issuepend_done == 0)
return SCSI_MLQUEUE_HOST_BUSY;
- spin_lock_irqsave(&instance->hba_lock, flags);
/* Check for an mpio path and adjust behavior */
- if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
if (megasas_check_mpio_paths(instance, scmd) ==
(DID_RESET << 16)) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
return SCSI_MLQUEUE_HOST_BUSY;
} else {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
}
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
return 0;
}
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- return SCSI_MLQUEUE_HOST_BUSY;
+ mr_device_priv_data = scmd->device->hostdata;
+ if (!mr_device_priv_data) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (mr_device_priv_data->tm_busy)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
scmd->result = 0;
@@ -1699,12 +1725,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
break;
}
- if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
- dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
- return SCSI_MLQUEUE_HOST_BUSY;
- }
-
- return 0;
+ return instance->instancet->build_and_issue_cmd(instance, scmd);
out_done:
scmd->scsi_done(scmd);
@@ -1726,27 +1747,39 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
}
/*
-* megasas_set_dma_alignment - Set DMA alignment for PI enabled VD
+* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
*
* @sdev: OS provided scsi device
*
* Returns void
*/
-static void megasas_set_dma_alignment(struct scsi_device *sdev)
+void megasas_update_sdev_properties(struct scsi_device *sdev)
{
+ u16 pd_index = 0;
u32 device_id, ld;
struct megasas_instance *instance;
struct fusion_context *fusion;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
struct MR_LD_RAID *raid;
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
instance = megasas_lookup_instance(sdev->host->host_no);
fusion = instance->ctrl_context;
+ mr_device_priv_data = sdev->hostdata;
if (!fusion)
return;
- if (sdev->channel >= MEGASAS_MAX_PD_CHANNELS) {
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
+ instance->use_seqnum_jbod_fp) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ mr_device_priv_data->is_tm_capable =
+ pd_sync->seq[pd_index].capability.tmCapable;
+ } else {
device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+ sdev->id;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -1754,17 +1787,58 @@ static void megasas_set_dma_alignment(struct scsi_device *sdev)
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ mr_device_priv_data->is_tm_capable =
+ raid->capability.tmCapable;
}
}
+static void megasas_set_device_queue_depth(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ int ret = DCMD_FAILED;
+ struct megasas_instance *instance;
+
+ instance = megasas_lookup_instance(sdev->host->host_no);
+
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+ if (instance->pd_info) {
+ mutex_lock(&instance->hba_mutex);
+ ret = megasas_get_pd_info(instance, pd_index);
+ mutex_unlock(&instance->hba_mutex);
+ }
+
+ if (ret != DCMD_SUCCESS)
+ return;
+
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+
+ switch (instance->pd_list[pd_index].interface) {
+ case SAS_PD:
+ scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
+ break;
+
+ case SATA_PD:
+ scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
+ break;
+
+ default:
+ scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
+ }
+ }
+ }
+}
+
+
static int megasas_slave_configure(struct scsi_device *sdev)
{
u16 pd_index = 0;
struct megasas_instance *instance;
instance = megasas_lookup_instance(sdev->host->host_no);
- if (instance->allow_fw_scan) {
+ if (instance->pd_list_not_supported) {
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
sdev->type == TYPE_DISK) {
pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
@@ -1774,12 +1848,14 @@ static int megasas_slave_configure(struct scsi_device *sdev)
return -ENXIO;
}
}
- megasas_set_dma_alignment(sdev);
+ megasas_set_device_queue_depth(sdev);
+ megasas_update_sdev_properties(sdev);
+
/*
* The RAID firmware may require extended timeouts.
*/
blk_queue_rq_timeout(sdev->request_queue,
- MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+ scmd_timeout * HZ);
return 0;
}
@@ -1788,6 +1864,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
{
u16 pd_index = 0;
struct megasas_instance *instance ;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
instance = megasas_lookup_instance(sdev->host->host_no);
if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
@@ -1797,15 +1874,29 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
pd_index =
(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
- if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState ==
+ if ((instance->pd_list_not_supported ||
+ instance->pd_list[pd_index].driveState ==
MR_PD_STATE_SYSTEM)) {
- return 0;
+ goto scan_target;
}
return -ENXIO;
}
+
+scan_target:
+ mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
+ GFP_KERNEL);
+ if (!mr_device_priv_data)
+ return -ENOMEM;
+ sdev->hostdata = mr_device_priv_data;
return 0;
}
+static void megasas_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
/*
* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
* kill adapter
@@ -1845,7 +1936,7 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
void megaraid_sas_kill_hba(struct megasas_instance *instance)
{
/* Set critical error to block I/O & ioctls in case caller didn't */
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
+ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
/* Wait 1 second to ensure IO or ioctls in build have posted */
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -1854,7 +1945,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
- if (instance->mpio && instance->requestorId)
+ if (instance->requestorId && instance->peerIsPresent)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
} else {
writel(MFI_STOP_ADP,
@@ -1883,7 +1974,7 @@ megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
spin_lock_irqsave(instance->host->host_lock, flags);
instance->flag &= ~MEGASAS_FW_BUSY;
- instance->host->can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
spin_unlock_irqrestore(instance->host->host_lock, flags);
}
}
@@ -1905,7 +1996,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
unsigned long flags;
/* If we have already declared adapter dead, donot complete cmds */
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1974,7 +2065,7 @@ void megasas_do_ocr(struct megasas_instance *instance)
*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
}
instance->instancet->disable_intr(instance);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
instance->issuepend_done = 0;
atomic_set(&instance->fw_outstanding, 0);
@@ -2054,9 +2145,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
- megasas_issue_blocked_cmd(instance, cmd, 0);
-
- if (dcmd->cmd_status) {
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
" failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
@@ -2166,9 +2255,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
- megasas_issue_blocked_cmd(instance, cmd, 0);
- if (dcmd->cmd_status) {
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
" failed with status 0x%x for scsi%d\n",
dcmd->cmd_status, instance->host->host_no);
@@ -2373,21 +2461,21 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
*/
static int megasas_wait_for_outstanding(struct megasas_instance *instance)
{
- int i;
+ int i, sl, outstanding;
u32 reset_index;
u32 wait_time = MEGASAS_RESET_WAIT_TIME;
- u8 adprecovery;
unsigned long flags;
struct list_head clist_local;
struct megasas_cmd *reset_cmd;
u32 fw_state;
- u8 kill_adapter_flag;
- spin_lock_irqsave(&instance->hba_lock, flags);
- adprecovery = instance->adprecovery;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
- if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
INIT_LIST_HEAD(&clist_local);
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2398,18 +2486,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
for (i = 0; i < wait_time; i++) {
msleep(1000);
- spin_lock_irqsave(&instance->hba_lock, flags);
- adprecovery = instance->adprecovery;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- if (adprecovery == MEGASAS_HBA_OPERATIONAL)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
break;
}
- if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
- spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
+ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
return FAILED;
}
@@ -2447,7 +2530,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
for (i = 0; i < resetwaittime; i++) {
- int outstanding = atomic_read(&instance->fw_outstanding);
+ outstanding = atomic_read(&instance->fw_outstanding);
if (!outstanding)
break;
@@ -2466,67 +2549,60 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
}
i = 0;
- kill_adapter_flag = 0;
+ outstanding = atomic_read(&instance->fw_outstanding);
+ fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+
+ if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+ goto no_outstanding;
+
+ if (instance->disableOnlineCtrlReset)
+ goto kill_hba_and_failed;
do {
- fw_state = instance->instancet->read_fw_status_reg(
- instance->reg_set) & MFI_STATE_MASK;
- if ((fw_state == MFI_STATE_FAULT) &&
- (instance->disableOnlineCtrlReset == 0)) {
- if (i == 3) {
- kill_adapter_flag = 2;
- break;
- }
+ if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
+ dev_info(&instance->pdev->dev,
+ "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
+ __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
+ if (i == 3)
+ goto kill_hba_and_failed;
megasas_do_ocr(instance);
- kill_adapter_flag = 1;
- /* wait for 1 secs to let FW finish the pending cmds */
- msleep(1000);
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
+ dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
+ __func__, __LINE__);
+ return FAILED;
+ }
+ dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
+ __func__, __LINE__);
+
+ for (sl = 0; sl < 10; sl++)
+ msleep(500);
+
+ outstanding = atomic_read(&instance->fw_outstanding);
+
+ fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+ if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
+ goto no_outstanding;
}
i++;
} while (i <= 3);
- if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
- if (instance->disableOnlineCtrlReset == 0) {
- megasas_do_ocr(instance);
+no_outstanding:
- /* wait for 5 secs to let FW finish the pending cmds */
- for (i = 0; i < wait_time; i++) {
- int outstanding =
- atomic_read(&instance->fw_outstanding);
- if (!outstanding)
- return SUCCESS;
- msleep(1000);
- }
- }
- }
+ dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
+ __func__, __LINE__);
+ return SUCCESS;
- if (atomic_read(&instance->fw_outstanding) ||
- (kill_adapter_flag == 2)) {
- dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
- /*
- * Send signal to FW to stop processing any pending cmds.
- * The controller will be taken offline by the OS now.
- */
- if ((instance->pdev->device ==
- PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device ==
- PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
- writel(MFI_STOP_ADP,
- &instance->reg_set->doorbell);
- } else {
- writel(MFI_STOP_ADP,
- &instance->reg_set->inbound_doorbell);
- }
- megasas_dump_pending_frames(instance);
- spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- return FAILED;
- }
+kill_hba_and_failed:
- dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
+ /* Reset not supported, kill adapter */
+ dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
+ " disableOnlineCtrlReset %d fw_outstanding %d \n",
+ __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
+ atomic_read(&instance->fw_outstanding));
+ megasas_dump_pending_frames(instance);
+ megaraid_sas_kill_hba(instance);
- return SUCCESS;
+ return FAILED;
}
/**
@@ -2547,7 +2623,7 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
scmd->cmnd[0], scmd->retries);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
return FAILED;
}
@@ -2575,7 +2651,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
unsigned long flags;
if (time_after(jiffies, scmd->jiffies_at_alloc +
- (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
+ (scmd_timeout * 2) * HZ)) {
return BLK_EH_NOT_HANDLED;
}
@@ -2851,6 +2927,16 @@ megasas_page_size_show(struct device *cdev,
return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
}
+static ssize_t
+megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -2859,12 +2945,15 @@ static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
megasas_fw_crash_state_show, megasas_fw_crash_state_store);
static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
+static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
+ megasas_ldio_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
&dev_attr_page_size,
+ &dev_attr_ldio_outstanding,
NULL,
};
@@ -2878,6 +2967,7 @@ static struct scsi_host_template megasas_template = {
.proc_name = "megaraid_sas",
.slave_configure = megasas_slave_configure,
.slave_alloc = megasas_slave_alloc,
+ .slave_destroy = megasas_slave_destroy,
.queuecommand = megasas_queue_command,
.eh_device_reset_handler = megasas_reset_device,
.eh_bus_reset_handler = megasas_reset_bus_host,
@@ -3277,13 +3367,13 @@ process_fw_state_change_wq(struct work_struct *work)
u32 wait;
unsigned long flags;
- if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
- instance->adprecovery);
+ atomic_read(&instance->adprecovery));
return ;
}
- if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
dev_notice(&instance->pdev->dev, "FW detected to be in fault"
"state, restarting it...\n");
@@ -3326,7 +3416,7 @@ process_fw_state_change_wq(struct work_struct *work)
megasas_issue_init_mfi(instance);
spin_lock_irqsave(&instance->hba_lock, flags);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
spin_unlock_irqrestore(&instance->hba_lock, flags);
instance->instancet->enable_intr(instance);
@@ -3391,14 +3481,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
instance->instancet->disable_intr(instance);
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
instance->issuepend_done = 0;
atomic_set(&instance->fw_outstanding, 0);
megasas_internal_reset_defer_cmds(instance);
dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
- fw_state, instance->adprecovery);
+ fw_state, atomic_read(&instance->adprecovery));
schedule_work(&instance->work_init);
return IRQ_HANDLED;
@@ -3852,6 +3942,92 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
}
/*
+ * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
+ * @instance: Adapter soft state
+ *
+ * Return 0 for only Fusion adapter, if driver load/unload is not in progress
+ * or FW is not under OCR.
+ */
+inline int
+dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
+
+ if (!instance->ctrl_context)
+ return KILL_ADAPTER;
+ else if (instance->unload ||
+ test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
+ return IGNORE_TIMEOUT;
+ else
+ return INITIATE_OCR;
+}
+
+static int
+megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
+{
+ int ret;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(instance->pd_info, 0, sizeof(*instance->pd_info));
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->mbox.s[0] = cpu_to_le16(device_id);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
+ dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ switch (ret) {
+ case DCMD_SUCCESS:
+ instance->pd_list[device_id].interface =
+ instance->pd_info->state.ddf.pdType.intf;
+ break;
+
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+ }
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
+
+ return ret;
+}
+/*
* megasas_get_pd_list_info - Returns FW's pd_list structure
* @instance: Adapter soft state
* @pd_list: pd_list structure
@@ -3906,42 +4082,78 @@ megasas_get_pd_list(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- /*
- * the following function will get the instance PD LIST.
- */
+ switch (ret) {
+ case DCMD_FAILED:
+ dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
+ "failed/not supported by firmware\n");
+
+ if (instance->ctrl_context)
+ megaraid_sas_kill_hba(instance);
+ else
+ instance->pd_list_not_supported = 1;
+ break;
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
- pd_addr = ci->addr;
+ case DCMD_SUCCESS:
+ pd_addr = ci->addr;
- if (ret == 0 &&
- (le32_to_cpu(ci->count) <
- (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
+ if ((le32_to_cpu(ci->count) >
+ (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
+ break;
memset(instance->local_pd_list, 0,
- MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
+ MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
-
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
- le16_to_cpu(pd_addr->deviceId);
+ le16_to_cpu(pd_addr->deviceId);
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
- pd_addr->scsiDevType;
+ pd_addr->scsiDevType;
instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
- MR_PD_STATE_SYSTEM;
+ MR_PD_STATE_SYSTEM;
pd_addr++;
}
+
memcpy(instance->pd_list, instance->local_pd_list,
sizeof(instance->pd_list));
+ break;
+
}
pci_free_consistent(instance->pdev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4002,33 +4214,63 @@ megasas_get_ld_list(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
-
ld_count = le32_to_cpu(ci->ldCount);
- /* the following function will get the instance PD LIST */
+ switch (ret) {
+ case DCMD_FAILED:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case DCMD_TIMEOUT:
+
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+
+ case DCMD_SUCCESS:
+ if (ld_count > instance->fw_supported_vd_count)
+ break;
- if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
ids = ci->ldList[ld_index].ref.targetId;
- instance->ld_ids[ids] =
- ci->ldList[ld_index].ref.targetId;
+ instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
}
}
+
+ break;
}
- pci_free_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- ci,
- ci_h);
+ pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
+
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
- megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4090,26 +4332,61 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->pad_0 = 0;
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- tgtid_count = le32_to_cpu(ci->count);
+ switch (ret) {
+ case DCMD_FAILED:
+ dev_info(&instance->pdev->dev,
+ "DCMD not supported by firmware - %s %d\n",
+ __func__, __LINE__);
+ ret = megasas_get_ld_list(instance);
+ break;
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ /*
+ * DCMD failed from AEN path.
+ * AEN path already hold reset_mutex to avoid PCI access
+ * while OCR is in progress.
+ */
+ mutex_unlock(&instance->reset_mutex);
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+
+ break;
+ case DCMD_SUCCESS:
+ tgtid_count = le32_to_cpu(ci->count);
+
+ if ((tgtid_count > (instance->fw_supported_vd_count)))
+ break;
- if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
instance->ld_ids[ids] = ci->targetId[ld_index];
}
+ break;
}
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
+ ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (ret != DCMD_TIMEOUT)
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -4223,38 +4500,73 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dcmd->mbox.b[0] = 1;
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (!ret) {
+ switch (ret) {
+ case DCMD_SUCCESS:
memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
+ /* Save required controller information in
+ * CPU endianness format.
+ */
le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+
+ /* Update the latest Ext VD info.
+ * From Init path, store current firmware details.
+ * From OCR path, detect any firmware properties changes.
+ * in case of Firmware upgrade without system reboot.
+ */
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
ctrl_info->adapterOperations3.useSeqNumJbodFP;
+
+ /*Check whether controller is iMR or MR */
instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
- "controller type\t: %s(%dMB)\n",
- instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ "controller type\t: %s(%dMB)\n",
+ instance->is_imr ? "iMR" : "MR",
+ le16_to_cpu(ctrl_info->memory_size));
+
instance->disableOnlineCtrlReset =
ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
- instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
instance->secure_jbod_support =
ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
+ instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ break;
+
+ case DCMD_TIMEOUT:
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ case DCMD_FAILED:
+ megaraid_sas_kill_hba(instance);
+ break;
+
}
pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
ci, ci_h);
megasas_return_cmd(instance, cmd);
+
+
return ret;
}
@@ -4304,12 +4616,28 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- megasas_return_cmd(instance, cmd);
+ if (ret == DCMD_TIMEOUT) {
+ switch (dcmd_timeout_ocr_possible(instance)) {
+ case INITIATE_OCR:
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ megasas_reset_fusion(instance->host,
+ MFI_IO_TIMEOUT_OCR);
+ break;
+ case KILL_ADAPTER:
+ megaraid_sas_kill_hba(instance);
+ break;
+ case IGNORE_TIMEOUT:
+ dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
+ __func__, __LINE__);
+ break;
+ }
+ } else
+ megasas_return_cmd(instance, cmd);
+
return ret;
}
@@ -4426,6 +4754,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
}
+ instance->cur_can_queue = instance->max_scsi_cmds;
/*
* Create a pool of commands
*/
@@ -4712,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
case PCI_DEVICE_ID_DELL_PERC5:
default:
instance->instancet = &megasas_instance_template_xscale;
- instance->allow_fw_scan = 1;
break;
}
@@ -4756,6 +5084,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors = ((scratch_pad_2
& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+ if (rdpq_enable)
+ instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
+ 1 : 0;
fw_msix_count = instance->msix_vectors;
/* Save 1-15 reply post index address to local memory
* Index 0 is already saved from reg offset
@@ -4792,6 +5123,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
dev_info(&instance->pdev->dev,
"current msix/online cpus\t: (%d/%d)\n",
instance->msix_vectors, (unsigned int)num_online_cpus());
+ dev_info(&instance->pdev->dev,
+ "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
@@ -4856,7 +5189,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
- instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
+ instance->passive = ctrl_info->cluster.passive;
+ memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
instance->UnevenSpanSupport =
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
@@ -4932,6 +5267,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->throttlequeuedepth =
MEGASAS_THROTTLE_QUEUE_DEPTH;
+ if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
+ resetwaittime = MEGASAS_RESET_WAIT_TIME;
+
+ if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+ scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
@@ -5035,10 +5375,8 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
- else {
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
+ DCMD_SUCCESS) {
/*
* Copy the data back into callers buffer
*/
@@ -5047,7 +5385,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
eli->clear_seq_num = el_info->clear_seq_num;
eli->shutdown_seq_num = el_info->shutdown_seq_num;
eli->boot_seq_num = el_info->boot_seq_num;
- }
+ } else
+ dev_err(&instance->pdev->dev, "DCMD failed "
+ "from %s\n", __func__);
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
@@ -5262,6 +5602,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
if (instance->ctrl_context) {
host->hostt->eh_device_reset_handler = NULL;
host->hostt->eh_bus_reset_handler = NULL;
+ host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
+ host->hostt->eh_abort_handler = megasas_task_abort_fusion;
}
/*
@@ -5447,7 +5789,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->flag_ieee = 0;
instance->ev = NULL;
instance->issuepend_done = 1;
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
instance->is_imr = 0;
instance->evt_detail = pci_alloc_consistent(pdev,
@@ -5461,6 +5803,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
goto fail_alloc_dma_buf;
}
+ instance->pd_info = pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+
+ if (!instance->pd_info)
+ dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
/*
* Initialize locks and queues
*/
@@ -5476,8 +5824,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->aen_mutex);
mutex_init(&instance->reset_mutex);
+ mutex_init(&instance->hba_mutex);
/*
* Initialize PCI related and misc parameters
@@ -5592,6 +5940,10 @@ fail_alloc_dma_buf:
instance->evt_detail,
instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -5616,7 +5968,7 @@ static void megasas_flush_cache(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
cmd = megasas_get_cmd(instance);
@@ -5638,9 +5990,12 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- " from %s\n", __func__);
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+ != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "return from %s %d\n", __func__, __LINE__);
+ return;
+ }
megasas_return_cmd(instance, cmd);
}
@@ -5656,7 +6011,7 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return;
cmd = megasas_get_cmd(instance);
@@ -5666,13 +6021,13 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
if (instance->aen_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->aen_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
if (instance->map_update_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->map_update_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
if (instance->jbod_seq_cmd)
megasas_issue_blocked_abort_cmd(instance,
- instance->jbod_seq_cmd, MEGASAS_BLOCKED_CMD_TIMEOUT);
+ instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
dcmd = &cmd->frame->dcmd;
@@ -5687,9 +6042,12 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dcmd->data_xfer_len = 0;
dcmd->opcode = cpu_to_le32(opcode);
- if (megasas_issue_blocked_cmd(instance, cmd, 30))
- dev_err(&instance->pdev->dev, "Command timedout"
- "from %s\n", __func__);
+ if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
+ != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev,
+ "return from %s %d\n", __func__, __LINE__);
+ return;
+ }
megasas_return_cmd(instance, cmd);
}
@@ -5847,6 +6205,10 @@ fail_init_mfi:
instance->evt_detail,
instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
@@ -5941,11 +6303,11 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (fusion->ld_drv_map[i])
free_pages((ulong)fusion->ld_drv_map[i],
fusion->drv_map_pages);
- if (fusion->pd_seq_sync)
- dma_free_coherent(&instance->pdev->dev,
- pd_seq_map_sz,
- fusion->pd_seq_sync[i],
- fusion->pd_seq_phys[i]);
+ if (fusion->pd_seq_sync[i])
+ dma_free_coherent(&instance->pdev->dev,
+ pd_seq_map_sz,
+ fusion->pd_seq_sync[i],
+ fusion->pd_seq_phys[i]);
}
free_pages((ulong)instance->ctrl_context,
instance->ctrl_context_pages);
@@ -5965,6 +6327,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info,
+ instance->pd_info_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6090,7 +6456,7 @@ static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
for (i = 0; i < megasas_mgmt_info.max_index; i++) {
local_instance = megasas_mgmt_info.instance[i];
if (local_instance && local_instance->crash_dump_drv_support) {
- if ((local_instance->adprecovery ==
+ if ((atomic_read(&local_instance->adprecovery) ==
MEGASAS_HBA_OPERATIONAL) &&
!megasas_set_crash_dump_params(local_instance,
crash_support)) {
@@ -6227,7 +6593,15 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- megasas_issue_blocked_cmd(instance, cmd, 0);
+ if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
+ cmd->sync_cmd = 0;
+ dev_err(&instance->pdev->dev,
+ "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->dcmd.opcode,
+ cmd->cmd_status_drv);
+ return -EBUSY;
+ }
+
cmd->sync_cmd = 0;
if (instance->unload == 1) {
@@ -6282,12 +6656,13 @@ out:
}
for (i = 0; i < ioc->sge_count; i++) {
- if (kbuff_arr[i])
+ if (kbuff_arr[i]) {
dma_free_coherent(&instance->pdev->dev,
le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
+ }
}
megasas_return_cmd(instance, cmd);
@@ -6330,7 +6705,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_err(&instance->pdev->dev, "Controller in crit error\n");
error = -ENODEV;
goto out_kfree_ioc;
@@ -6349,7 +6724,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
for (i = 0; i < wait_time; i++) {
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
break;
}
@@ -6364,7 +6739,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
}
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
dev_err(&instance->pdev->dev, "timed out while"
@@ -6406,7 +6781,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
if (!instance)
return -ENODEV;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
return -ENODEV;
}
@@ -6417,7 +6792,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
for (i = 0; i < wait_time; i++) {
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock,
flags);
break;
@@ -6434,7 +6809,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
dev_err(&instance->pdev->dev, "timed out while waiting"
"for HBA to recover\n");
@@ -6442,10 +6817,10 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
}
spin_unlock_irqrestore(&instance->hba_lock, flags);
- mutex_lock(&instance->aen_mutex);
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
aen.class_locale_word);
- mutex_unlock(&instance->aen_mutex);
+ mutex_unlock(&instance->reset_mutex);
return error;
}
@@ -6476,9 +6851,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
int i;
int error = 0;
compat_uptr_t ptr;
- unsigned long local_raw_ptr;
u32 local_sense_off;
u32 local_sense_len;
+ u32 user_sense_off;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -6496,17 +6871,16 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
* sense_len is not null, so prepare the 64bit value under
* the same condition.
*/
- if (get_user(local_raw_ptr, ioc->frame.raw) ||
- get_user(local_sense_off, &ioc->sense_off) ||
- get_user(local_sense_len, &ioc->sense_len))
+ if (get_user(local_sense_off, &ioc->sense_off) ||
+ get_user(local_sense_len, &ioc->sense_len) ||
+ get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
-
if (local_sense_len) {
void __user **sense_ioc_ptr =
- (void __user **)((u8*)local_raw_ptr + local_sense_off);
+ (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
compat_uptr_t *sense_cioc_ptr =
- (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
if (get_user(ptr, sense_cioc_ptr) ||
put_user(compat_ptr(ptr), sense_ioc_ptr))
return -EFAULT;
@@ -6647,6 +7021,7 @@ megasas_aen_polling(struct work_struct *work)
int i, j, doscan = 0;
u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
int error;
+ u8 dcmd_ret = DCMD_SUCCESS;
if (!instance) {
printk(KERN_ERR "invalid instance!\n");
@@ -6659,16 +7034,7 @@ megasas_aen_polling(struct work_struct *work)
wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
/* Don't run the event workqueue thread if OCR is running */
- for (i = 0; i < wait_time; i++) {
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
- break;
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "%s waiting for "
- "controller reset to finish for scsi%d\n",
- __func__, instance->host->host_no);
- }
- msleep(1000);
- }
+ mutex_lock(&instance->reset_mutex);
instance->ev = NULL;
host = instance->host;
@@ -6676,212 +7042,127 @@ megasas_aen_polling(struct work_struct *work)
megasas_decode_evt(instance);
switch (le32_to_cpu(instance->evt_detail->code)) {
- case MR_EVT_PD_INSERTED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (!sdev1)
- scsi_add_device(host, i, j, 0);
-
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- break;
+ case MR_EVT_PD_INSERTED:
case MR_EVT_PD_REMOVED:
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- pd_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, i, j, 0);
-
- if (instance->pd_list[pd_index].driveState
- == MR_PD_STATE_SYSTEM) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- }
- doscan = 0;
+ dcmd_ret = megasas_get_pd_list(instance);
+ if (dcmd_ret == DCMD_SUCCESS)
+ doscan = SCAN_PD_CHANNEL;
break;
case MR_EVT_LD_OFFLINE:
case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
-
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
- }
- }
- }
- doscan = 0;
- }
- break;
case MR_EVT_LD_CREATED:
if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0;
- j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
-
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- }
- }
- doscan = 0;
- }
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret == DCMD_SUCCESS)
+ doscan = SCAN_VD_CHANNEL;
+
break;
+
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
case MR_EVT_FOREIGN_CFG_IMPORTED:
case MR_EVT_LD_STATE_CHANGE:
- doscan = 1;
+ dcmd_ret = megasas_get_pd_list(instance);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ break;
+
+ if (!instance->requestorId ||
+ (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
+ dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
+
+ if (dcmd_ret != DCMD_SUCCESS)
+ break;
+
+ doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
+ dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
+ instance->host->host_no);
break;
+
case MR_EVT_CTRL_PROP_CHANGED:
- megasas_get_ctrl_info(instance);
- break;
+ dcmd_ret = megasas_get_ctrl_info(instance);
+ break;
default:
doscan = 0;
break;
}
} else {
dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
return;
}
- if (doscan) {
- dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
- instance->host->host_no);
- if (megasas_get_pd_list(instance) == 0) {
- for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
- pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
- sdev1 = scsi_device_lookup(host, i, j, 0);
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- if (!sdev1) {
- scsi_add_device(host, i, j, 0);
- }
- if (sdev1)
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ mutex_unlock(&instance->reset_mutex);
+
+ if (doscan & SCAN_PD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
+ sdev1 = scsi_device_lookup(host, i, j, 0);
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
+ if (!sdev1)
+ scsi_add_device(host, i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
+ }
- if (!instance->requestorId ||
- megasas_get_ld_vf_affiliation(instance, 0)) {
- if (megasas_ld_list_query(instance,
- MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
- megasas_get_ld_list(instance);
- for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
- for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL;
- j++) {
- ld_index =
- (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
-
- sdev1 = scsi_device_lookup(host,
- MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- if (instance->ld_ids[ld_index]
- != 0xff) {
- if (!sdev1)
- scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
- else
- scsi_device_put(sdev1);
- } else {
- if (sdev1) {
- scsi_remove_device(sdev1);
- scsi_device_put(sdev1);
- }
+ if (doscan & SCAN_VD_CHANNEL) {
+ for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
+ for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
+ ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
+ sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ if (instance->ld_ids[ld_index] != 0xff) {
+ if (!sdev1)
+ scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
+ else
+ scsi_device_put(sdev1);
+ } else {
+ if (sdev1) {
+ scsi_remove_device(sdev1);
+ scsi_device_put(sdev1);
}
}
}
}
}
- if (instance->aen_cmd != NULL) {
- kfree(ev);
- return ;
- }
-
- seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ if (dcmd_ret == DCMD_SUCCESS)
+ seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
+ else
+ seq_num = instance->last_seq_num;
/* Register AEN with FW for latest sequence number plus 1 */
class_locale.members.reserved = 0;
class_locale.members.locale = MR_EVT_LOCALE_ALL;
class_locale.members.class = MR_EVT_CLASS_DEBUG;
- mutex_lock(&instance->aen_mutex);
+
+ if (instance->aen_cmd != NULL) {
+ kfree(ev);
+ return;
+ }
+
+ mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, seq_num,
class_locale.word);
- mutex_unlock(&instance->aen_mutex);
-
if (error)
- dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
+ dev_err(&instance->pdev->dev,
+ "register aen failed error %x\n", error);
+ mutex_unlock(&instance->reset_mutex);
kfree(ev);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 741509b35617..e413113c86ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1020,6 +1020,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
/* assume this IO needs the full row - we'll adjust if not true */
regSize = stripSize;
+ io_info->do_fp_rlbypass = raid->capability.fpBypassRegionLock;
+
/* Check if we can send this I/O via FastPath */
if (raid->capability.fpCapable) {
if (isRead)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 8d630a552b07..98a848bdfdc2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -91,7 +91,10 @@ void megasas_start_timer(struct megasas_instance *instance,
struct timer_list *timer,
void *fn, unsigned long interval);
extern struct megasas_mgmt_info megasas_mgmt_info;
-extern int resetwaittime;
+extern unsigned int resetwaittime;
+extern unsigned int dual_qdepth_disable;
+static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
+static void megasas_free_reply_fusion(struct megasas_instance *instance);
@@ -201,58 +204,72 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
&instance->reg_set->inbound_low_queue_port);
writel(le32_to_cpu(req_desc->u.high),
&instance->reg_set->inbound_high_queue_port);
+ mmiowb();
spin_unlock_irqrestore(&instance->hba_lock, flags);
#endif
}
-
/**
- * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
- * @instance: Adapter soft state
+ * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
+ * @instance: Adapter soft state
+ * fw_boot_context: Whether this function called during probe or after OCR
+ *
+ * This function is only for fusion controllers.
+ * Update host can queue, if firmware downgrade max supported firmware commands.
+ * Firmware upgrade case will be skiped because underlying firmware has
+ * more resource than exposed to the OS.
+ *
*/
-static void megasas_teardown_frame_pool_fusion(
- struct megasas_instance *instance)
+static void
+megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
{
- int i;
- struct fusion_context *fusion = instance->ctrl_context;
-
- u16 max_cmd = instance->max_fw_cmds;
+ u16 cur_max_fw_cmds = 0;
+ u16 ldio_threshold = 0;
+ struct megasas_register_set __iomem *reg_set;
- struct megasas_cmd_fusion *cmd;
+ reg_set = instance->reg_set;
- if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
- dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
- "sense pool : %p\n", fusion->sg_dma_pool,
- fusion->sense_dma_pool);
- return;
- }
+ cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
- /*
- * Return all frames to pool
- */
- for (i = 0; i < max_cmd; i++) {
+ if (dual_qdepth_disable || !cur_max_fw_cmds)
+ cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
+ else
+ ldio_threshold =
+ (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
+
+ dev_info(&instance->pdev->dev,
+ "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
+
+ if (fw_boot_context == OCR_CONTEXT) {
+ cur_max_fw_cmds = cur_max_fw_cmds - 1;
+ if (cur_max_fw_cmds <= instance->max_fw_cmds) {
+ instance->cur_can_queue =
+ cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->host->can_queue = instance->cur_can_queue;
+ instance->ldio_threshold = ldio_threshold;
+ }
+ } else {
+ instance->max_fw_cmds = cur_max_fw_cmds;
+ instance->ldio_threshold = ldio_threshold;
- cmd = fusion->cmd_list[i];
+ if (!instance->is_rdpq)
+ instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
- if (cmd->sg_frame)
- pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
+ /*
+ * Reduce the max supported cmds by 1. This is to ensure that the
+ * reply_q_sz (1 more than the max cmd that driver may send)
+ * does not exceed max cmds that the FW can support
+ */
+ instance->max_fw_cmds = instance->max_fw_cmds-1;
- if (cmd->sense)
- pci_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
}
-
- /*
- * Now destroy the pool itself
- */
- pci_pool_destroy(fusion->sg_dma_pool);
- pci_pool_destroy(fusion->sense_dma_pool);
-
- fusion->sg_dma_pool = NULL;
- fusion->sense_dma_pool = NULL;
}
-
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
* @instance: Adapter soft state
@@ -262,55 +279,65 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
{
int i;
struct fusion_context *fusion = instance->ctrl_context;
+ struct megasas_cmd_fusion *cmd;
- u32 max_cmds, req_sz, reply_sz, io_frames_sz;
+ /* SG, Sense */
+ for (i = 0; i < instance->max_fw_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ if (cmd->sense)
+ pci_pool_free(fusion->sense_dma_pool, cmd->sense,
+ cmd->sense_phys_addr);
+ }
+ }
+ if (fusion->sg_dma_pool) {
+ pci_pool_destroy(fusion->sg_dma_pool);
+ fusion->sg_dma_pool = NULL;
+ }
+ if (fusion->sense_dma_pool) {
+ pci_pool_destroy(fusion->sense_dma_pool);
+ fusion->sense_dma_pool = NULL;
+ }
- req_sz = fusion->request_alloc_sz;
- reply_sz = fusion->reply_alloc_sz;
- io_frames_sz = fusion->io_frames_alloc_sz;
- max_cmds = instance->max_fw_cmds;
+ /* Reply Frame, Desc*/
+ if (instance->is_rdpq)
+ megasas_free_rdpq_fusion(instance);
+ else
+ megasas_free_reply_fusion(instance);
- /* Free descriptors and request Frames memory */
+ /* Request Frame, Desc*/
if (fusion->req_frames_desc)
- dma_free_coherent(&instance->pdev->dev, req_sz,
- fusion->req_frames_desc,
- fusion->req_frames_desc_phys);
-
- if (fusion->reply_frames_desc) {
- pci_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_destroy(fusion->reply_frames_desc_pool);
- }
-
- if (fusion->io_request_frames) {
+ dma_free_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz, fusion->req_frames_desc,
+ fusion->req_frames_desc_phys);
+ if (fusion->io_request_frames)
pci_pool_free(fusion->io_request_frames_pool,
- fusion->io_request_frames,
- fusion->io_request_frames_phys);
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ if (fusion->io_request_frames_pool) {
pci_pool_destroy(fusion->io_request_frames_pool);
+ fusion->io_request_frames_pool = NULL;
}
- /* Free the Fusion frame pool */
- megasas_teardown_frame_pool_fusion(instance);
- /* Free all the commands in the cmd_list */
- for (i = 0; i < max_cmds; i++)
+ /* cmd_list */
+ for (i = 0; i < instance->max_fw_cmds; i++)
kfree(fusion->cmd_list[i]);
- /* Free the cmd_list buffer itself */
kfree(fusion->cmd_list);
- fusion->cmd_list = NULL;
-
}
/**
- * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
+ * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
* @instance: Adapter soft state
*
*/
-static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
+static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
{
int i;
u32 max_cmd;
@@ -321,25 +348,17 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
max_cmd = instance->max_fw_cmds;
- /*
- * Use DMA pool facility provided by PCI layer
- */
-
- fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
- instance->max_chain_frame_sz,
- 4, 0);
- if (!fusion->sg_dma_pool) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
- return -ENOMEM;
- }
- fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
- instance->pdev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ fusion->sg_dma_pool =
+ pci_pool_create("mr_sg", instance->pdev,
+ instance->max_chain_frame_sz, 4, 0);
+ /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
+ fusion->sense_dma_pool =
+ pci_pool_create("mr_sense", instance->pdev,
+ SCSI_SENSE_BUFFERSIZE, 64, 0);
- if (!fusion->sense_dma_pool) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
- pci_pool_destroy(fusion->sg_dma_pool);
- fusion->sg_dma_pool = NULL;
+ if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
@@ -347,160 +366,280 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
* Allocate and attach a frame to each of the commands in cmd_list
*/
for (i = 0; i < max_cmd; i++) {
-
cmd = fusion->cmd_list[i];
-
cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
- GFP_KERNEL,
- &cmd->sg_frame_phys_addr);
+ GFP_KERNEL, &cmd->sg_frame_phys_addr);
cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- /*
- * megasas_teardown_frame_pool_fusion() takes care of freeing
- * whatever has been allocated
- */
+ GFP_KERNEL, &cmd->sense_phys_addr);
if (!cmd->sg_frame || !cmd->sense) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
- megasas_teardown_frame_pool_fusion(instance);
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
}
return 0;
}
-/**
- * megasas_alloc_cmds_fusion - Allocates the command packets
- * @instance: Adapter soft state
- *
- *
- * Each frame has a 32-bit field called context. This context is used to get
- * back the megasas_cmd_fusion from the frame when a frame gets completed
- * In this driver, the 32 bit values are the indices into an array cmd_list.
- * This array is used only to look up the megasas_cmd_fusion given the context.
- * The free commands themselves are maintained in a linked list called cmd_pool.
- *
- * cmds are formed in the io_request and sg_frame members of the
- * megasas_cmd_fusion. The context field is used to get a request descriptor
- * and is used as SMID of the cmd.
- * SMID value range is from 1 to max_fw_cmds.
- */
int
-megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
- int i, j, count;
- u32 max_cmd, io_frames_sz;
+ u32 max_cmd, i;
struct fusion_context *fusion;
- struct megasas_cmd_fusion *cmd;
- union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- u32 offset;
- dma_addr_t io_req_base_phys;
- u8 *io_req_base;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
+ /*
+ * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
+ * Allocate the dynamic array first and then allocate individual
+ * commands.
+ */
+ fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
+ GFP_KERNEL);
+ if (!fusion->cmd_list) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_cmd; i++) {
+ fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
+ GFP_KERNEL);
+ if (!fusion->cmd_list[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+int
+megasas_alloc_request_fusion(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
fusion->req_frames_desc =
dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
-
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "request_frames\n");
- goto fail_req_desc;
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
+ fusion->io_request_frames_pool =
+ pci_pool_create("mr_ioreq", instance->pdev,
+ fusion->io_frames_alloc_sz, 16, 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ pci_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL, &fusion->io_request_frames_phys);
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int
+megasas_alloc_reply_fusion(struct megasas_instance *instance)
+{
+ int i, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ fusion = instance->ctrl_context;
+
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
fusion->reply_frames_desc_pool =
- pci_pool_create("reply_frames pool", instance->pdev,
+ pci_pool_create("mr_reply", instance->pdev,
fusion->reply_alloc_sz * count, 16, 0);
if (!fusion->reply_frames_desc_pool) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "reply_frame pool\n");
- goto fail_reply_desc;
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- fusion->reply_frames_desc =
- pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
- &fusion->reply_frames_desc_phys);
- if (!fusion->reply_frames_desc) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "reply_frame pool\n");
- pci_pool_destroy(fusion->reply_frames_desc_pool);
- goto fail_reply_desc;
+ fusion->reply_frames_desc[0] =
+ pci_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
-
- reply_desc = fusion->reply_frames_desc;
+ reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
- io_frames_sz = fusion->io_frames_alloc_sz;
+ /* This is not a rdpq mode, but driver still populate
+ * reply_frame_desc array to use same msix index in ISR path.
+ */
+ for (i = 0; i < (count - 1); i++)
+ fusion->reply_frames_desc[i + 1] =
+ fusion->reply_frames_desc[i] +
+ (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
+
+ return 0;
+}
- fusion->io_request_frames_pool =
- pci_pool_create("io_request_frames pool", instance->pdev,
- fusion->io_frames_alloc_sz, 16, 0);
+int
+megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
+{
+ int i, j, count;
+ struct fusion_context *fusion;
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
- if (!fusion->io_request_frames_pool) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "io_request_frame pool\n");
- goto fail_io_frames;
+ fusion = instance->ctrl_context;
+
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+ &fusion->rdpq_phys);
+ if (!fusion->rdpq_virt) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- fusion->io_request_frames =
- pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
- &fusion->io_request_frames_phys);
- if (!fusion->io_request_frames) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "io_request_frames frames\n");
- pci_pool_destroy(fusion->io_request_frames_pool);
- goto fail_io_frames;
+ memset(fusion->rdpq_virt, 0,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
+ count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ fusion->reply_frames_desc_pool = pci_pool_create("mr_rdpq",
+ instance->pdev, fusion->reply_alloc_sz, 16, 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
}
- /*
- * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
- * Allocate the dynamic array first and then allocate individual
- * commands.
- */
- fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
- * max_cmd, GFP_KERNEL);
+ for (i = 0; i < count; i++) {
+ fusion->reply_frames_desc[i] =
+ pci_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
+ if (!fusion->reply_frames_desc[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
- if (!fusion->cmd_list) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
- "memory for cmd_list_fusion\n");
- goto fail_cmd_list;
+ fusion->rdpq_virt[i].RDPQBaseAddress =
+ fusion->reply_frames_desc_phys[i];
+
+ reply_desc = fusion->reply_frames_desc[i];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
}
+ return 0;
+}
- max_cmd = instance->max_fw_cmds;
- for (i = 0; i < max_cmd; i++) {
- fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
- GFP_KERNEL);
- if (!fusion->cmd_list[i]) {
- dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
+static void
+megasas_free_rdpq_fusion(struct megasas_instance *instance) {
- for (j = 0; j < i; j++)
- kfree(fusion->cmd_list[j]);
+ int i;
+ struct fusion_context *fusion;
- kfree(fusion->cmd_list);
- fusion->cmd_list = NULL;
- goto fail_cmd_list;
- }
+ fusion = instance->ctrl_context;
+
+ for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
+ if (fusion->reply_frames_desc[i])
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[i],
+ fusion->reply_frames_desc_phys[i]);
}
- /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
- io_req_base = fusion->io_request_frames +
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
- io_req_base_phys = fusion->io_request_frames_phys +
- MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ if (fusion->reply_frames_desc_pool)
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+ if (fusion->rdpq_virt)
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
+ fusion->rdpq_virt, fusion->rdpq_phys);
+}
+
+static void
+megasas_free_reply_fusion(struct megasas_instance *instance) {
+
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->reply_frames_desc[0])
+ pci_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+
+ if (fusion->reply_frames_desc_pool)
+ pci_pool_destroy(fusion->reply_frames_desc_pool);
+
+}
+
+
+/**
+ * megasas_alloc_cmds_fusion - Allocates the command packets
+ * @instance: Adapter soft state
+ *
+ *
+ * Each frame has a 32-bit field called context. This context is used to get
+ * back the megasas_cmd_fusion from the frame when a frame gets completed
+ * In this driver, the 32 bit values are the indices into an array cmd_list.
+ * This array is used only to look up the megasas_cmd_fusion given the context.
+ * The free commands themselves are maintained in a linked list called cmd_pool.
+ *
+ * cmds are formed in the io_request and sg_frame members of the
+ * megasas_cmd_fusion. The context field is used to get a request descriptor
+ * and is used as SMID of the cmd.
+ * SMID value range is from 1 to max_fw_cmds.
+ */
+int
+megasas_alloc_cmds_fusion(struct megasas_instance *instance)
+{
+ int i;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *cmd;
+ u32 offset;
+ dma_addr_t io_req_base_phys;
+ u8 *io_req_base;
+
+
+ fusion = instance->ctrl_context;
+
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ if (megasas_alloc_request_fusion(instance))
+ goto fail_exit;
+
+ if (instance->is_rdpq) {
+ if (megasas_alloc_rdpq_fusion(instance))
+ goto fail_exit;
+ } else
+ if (megasas_alloc_reply_fusion(instance))
+ goto fail_exit;
+
+
+ /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
+ io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
+ io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
/*
* Add all the commands to command pool (fusion->cmd_pool)
*/
/* SMID 0 is reserved. Set SMID/index from 1 */
- for (i = 0; i < max_cmd; i++) {
+ for (i = 0; i < instance->max_fw_cmds; i++) {
cmd = fusion->cmd_list[i];
offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
@@ -518,35 +657,13 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
cmd->io_request_phys_addr = io_req_base_phys + offset;
}
- /*
- * Create a frame pool and assign one frame to each cmd
- */
- if (megasas_create_frame_pool_fusion(instance)) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
- megasas_free_cmds_fusion(instance);
- goto fail_req_desc;
- }
+ if (megasas_create_sg_sense_fusion(instance))
+ goto fail_exit;
return 0;
-fail_cmd_list:
- pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
- fusion->io_request_frames_phys);
- pci_pool_destroy(fusion->io_request_frames_pool);
-fail_io_frames:
- dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc,
- fusion->reply_frames_desc_phys);
- pci_pool_destroy(fusion->reply_frames_desc_pool);
-
-fail_reply_desc:
- dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
- fusion->req_frames_desc,
- fusion->req_frames_desc_phys);
-fail_req_desc:
+fail_exit:
+ megasas_free_cmds_fusion(instance);
return -ENOMEM;
}
@@ -576,11 +693,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
msleep(20);
}
- if (frame_hdr->cmd_status == 0xff)
- return -ETIME;
-
- return (frame_hdr->cmd_status == MFI_STAT_OK) ?
- 0 : 1;
+ if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
+ return DCMD_TIMEOUT;
+ else if (frame_hdr->cmd_status == MFI_STAT_OK)
+ return DCMD_SUCCESS;
+ else
+ return DCMD_FAILED;
}
/**
@@ -593,16 +711,17 @@ int
megasas_ioc_init_fusion(struct megasas_instance *instance)
{
struct megasas_init_frame *init_frame;
- struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
+ struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
dma_addr_t ioc_init_handle;
struct megasas_cmd *cmd;
- u8 ret;
+ u8 ret, cur_rdpq_mode;
struct fusion_context *fusion;
union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
int i;
struct megasas_header *frame_hdr;
const char *sys_info;
MFI_CAPABILITIES *drv_ops;
+ u32 scratch_pad_2;
fusion = instance->ctrl_context;
@@ -614,6 +733,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
goto fail_get_cmd;
}
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
+
+ cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+
+ if (instance->is_rdpq && !cur_rdpq_mode) {
+ dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
+ " from RDPQ mode to non RDPQ mode\n");
+ ret = 1;
+ goto fail_fw_init;
+ }
+
IOCInitMessage =
dma_alloc_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -635,7 +766,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
- IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
+ IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
+ cpu_to_le64(fusion->rdpq_phys) :
+ cpu_to_le64(fusion->reply_frames_desc_phys[0]);
+ IOCInitMessage->MsgFlags = instance->is_rdpq ?
+ MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
init_frame = (struct megasas_init_frame *)cmd->frame;
@@ -665,6 +800,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
drv_ops->mfi_capabilities.support_ext_io_size = 1;
+ drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
+ if (!dual_qdepth_disable)
+ drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
+
+ drv_ops->mfi_capabilities.support_qd_throttling = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -784,7 +924,8 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
/* Below code is only for non pended DCMD */
if (instance->ctrl_context && !instance->mask_interrupts)
- ret = megasas_issue_blocked_cmd(instance, cmd, 60);
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -795,7 +936,10 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
ret = -EINVAL;
}
- if (!ret)
+ if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ megaraid_sas_kill_hba(instance);
+
+ if (ret == DCMD_SUCCESS)
instance->pd_seq_map_id++;
megasas_return_cmd(instance, cmd);
@@ -875,10 +1019,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
if (instance->ctrl_context && !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
- MEGASAS_BLOCKED_CMD_TIMEOUT);
+ MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
+ if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ megaraid_sas_kill_hba(instance);
+
megasas_return_cmd(instance, cmd);
return ret;
@@ -1072,12 +1219,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
reg_set = instance->reg_set;
- /*
- * Get various operational parameters from status register
- */
- instance->max_fw_cmds =
- instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
- instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
+ megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
/*
* Reduce the max supported cmds by 1. This is to ensure that the
@@ -1658,7 +1800,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
local_map_ptr, start_lba_lo);
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.regLockFlags ==
@@ -1702,8 +1844,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
if (fusion->adapter_type == INVADER_SERIES) {
- if (io_request->RaidContext.regLockFlags ==
- REGION_TYPE_UNUSED)
+ if (io_info.do_fp_rlbypass ||
+ (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -1791,7 +1933,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* build request descriptor */
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
cmd->request_desc->SCSIIO.DevHandle = devHandle;
@@ -1897,7 +2039,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
}
cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
}
@@ -2035,13 +2177,21 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
+ if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
+ instance->ldio_threshold &&
+ (atomic_inc_return(&instance->ldio_outstanding) >
+ instance->ldio_threshold)) {
+ atomic_dec(&instance->ldio_outstanding);
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+
cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
index = cmd->index;
req_desc = megasas_get_request_descriptor(instance, index-1);
if (!req_desc)
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
req_desc->Words = 0;
cmd->request_desc = req_desc;
@@ -2050,7 +2200,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
megasas_return_cmd_fusion(instance, cmd);
dev_err(&instance->pdev->dev, "Error building command\n");
cmd->request_desc = NULL;
- return 1;
+ return SCSI_MLQUEUE_HOST_BUSY;
}
req_desc = cmd->request_desc;
@@ -2092,16 +2242,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
struct LD_LOAD_BALANCE_INFO *lbinfo;
int threshold_reply_count = 0;
struct scsi_cmnd *scmd_local = NULL;
+ struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
+ struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
fusion = instance->ctrl_context;
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
return IRQ_HANDLED;
- desc = fusion->reply_frames_desc;
- desc += ((MSIxIndex * fusion->reply_alloc_sz)/
- sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
- fusion->last_reply_idx[MSIxIndex];
+ desc = fusion->reply_frames_desc[MSIxIndex] +
+ fusion->last_reply_idx[MSIxIndex];
reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
@@ -2133,6 +2283,16 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
extStatus = scsi_io_req->RaidContext.exStatus;
switch (scsi_io_req->Function) {
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
+ cmd_fusion->io_request;
+ mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
+ &mr_tm_req->TmRequest;
+ dev_dbg(&instance->pdev->dev, "TM completion:"
+ "type: 0x%x TaskMID: 0x%x\n",
+ mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
+ complete(&cmd_fusion->done);
+ break;
case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
/* Update load balancing info */
device_id = MEGASAS_DEV_INDEX(scmd_local);
@@ -2155,6 +2315,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
map_cmd_status(cmd_fusion, status, extStatus);
scsi_io_req->RaidContext.status = 0;
scsi_io_req->RaidContext.exStatus = 0;
+ if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
scmd_local->scsi_done(scmd_local);
@@ -2186,9 +2348,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
/* Get the next reply descriptor */
if (!fusion->last_reply_idx[MSIxIndex])
- desc = fusion->reply_frames_desc +
- ((MSIxIndex * fusion->reply_alloc_sz)/
- sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
+ desc = fusion->reply_frames_desc[MSIxIndex];
else
desc++;
@@ -2254,7 +2414,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
/* If we have already declared adapter dead, donot complete cmds */
spin_lock_irqsave(&instance->hba_lock, flags);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
return;
}
@@ -2411,7 +2571,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
* @cmd: mfi cmd pointer
*
*/
-void
+int
megasas_issue_dcmd_fusion(struct megasas_instance *instance,
struct megasas_cmd *cmd)
{
@@ -2419,10 +2579,13 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
req_desc = build_mpt_cmd(instance, cmd);
if (!req_desc) {
- dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
- return;
+ dev_info(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return DCMD_NOT_FIRED;
}
+
megasas_fire_cmd_fusion(instance, req_desc);
+ return DCMD_SUCCESS;
}
/**
@@ -2583,7 +2746,7 @@ megasas_check_reset_fusion(struct megasas_instance *instance,
/* This function waits for outstanding commands on fusion to complete */
int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
- int iotimeout, int *convert)
+ int reason, int *convert)
{
int i, outstanding, retval = 0, hb_seconds_missed = 0;
u32 fw_state;
@@ -2599,14 +2762,22 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
retval = 1;
goto out;
}
+
+ if (reason == MFI_IO_TIMEOUT_OCR) {
+ dev_info(&instance->pdev->dev,
+ "MFI IO is timed out, initiating OCR\n");
+ retval = 1;
+ goto out;
+ }
+
/* If SR-IOV VF mode & heartbeat timeout, don't wait */
- if (instance->requestorId && !iotimeout) {
+ if (instance->requestorId && !reason) {
retval = 1;
goto out;
}
/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
- if (instance->requestorId && iotimeout) {
+ if (instance->requestorId && reason) {
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
instance->hb_host_mem->HB.driverCounter =
@@ -2655,17 +2826,18 @@ out:
void megasas_reset_reply_desc(struct megasas_instance *instance)
{
- int i, count;
+ int i, j, count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
fusion = instance->ctrl_context;
count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
- for (i = 0 ; i < count ; i++)
+ for (i = 0 ; i < count ; i++) {
fusion->last_reply_idx[i] = 0;
- reply_desc = fusion->reply_frames_desc;
- for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ reply_desc = fusion->reply_frames_desc[i];
+ for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ }
}
/*
@@ -2680,6 +2852,7 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
+ bool refire_cmd = 0;
fusion = instance->ctrl_context;
@@ -2695,42 +2868,500 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
continue;
req_desc = megasas_get_request_descriptor
(instance, smid - 1);
- if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
+ refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
(cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
+ cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
+ && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+ if (refire_cmd)
megasas_fire_cmd_fusion(instance, req_desc);
else
megasas_return_cmd(instance, cmd_mfi);
}
}
+/*
+ * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
+ * @instance: per adapter struct
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ *
+ * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
+ */
+
+static int megasas_track_scsiio(struct megasas_instance *instance,
+ int id, int channel)
+{
+ int i, found = 0;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+ fusion = instance->ctrl_context;
+
+ for (i = 0 ; i < instance->max_scsi_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd &&
+ (cmd_fusion->scmd->device->id == id &&
+ cmd_fusion->scmd->device->channel == channel)) {
+ dev_info(&instance->pdev->dev,
+ "SCSI commands pending to target"
+ "channel %d id %d \tSMID: 0x%x\n",
+ channel, id, cmd_fusion->index);
+ scsi_print_command(cmd_fusion->scmd);
+ found = 1;
+ break;
+ }
+ }
+
+ return found ? FAILED : SUCCESS;
+}
+
+/**
+ * megasas_tm_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @mpi_reply: MPI reply returned by firmware
+ *
+ * Return nothing.
+ */
+static void
+megasas_tm_response_code(struct megasas_instance *instance,
+ struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
+{
+ char *desc;
+
+ switch (mpi_reply->ResponseCode) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
+ mpi_reply->ResponseCode, desc);
+ dev_dbg(&instance->pdev->dev,
+ "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
+ " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
+ mpi_reply->TerminationCount, mpi_reply->DevHandle,
+ mpi_reply->Function, mpi_reply->TaskType,
+ mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
+}
+
+/**
+ * megasas_issue_tm - main routine for sending tm requests
+ * @instance: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
+ * @smid_task: smid assigned to the task
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * MegaRaid use MPT interface for Task Magement request.
+ * A generic API for sending task management requests to firmware.
+ *
+ * Return SUCCESS or FAILED.
+ */
+static int
+megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
+ uint channel, uint id, u16 smid_task, u8 type)
+{
+ struct MR_TASK_MANAGE_REQUEST *mr_request;
+ struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
+ unsigned long timeleft;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct megasas_cmd *cmd_mfi;
+ union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ struct fusion_context *fusion;
+ struct megasas_cmd_fusion *scsi_lookup;
+ int rc;
+ struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
+
+ fusion = instance->ctrl_context;
+
+ cmd_mfi = megasas_get_cmd(instance);
+
+ if (!cmd_mfi) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd_fusion = megasas_get_cmd_fusion(instance,
+ instance->max_scsi_cmds + cmd_mfi->index);
+
+ /* Save the smid. To be used for returning the cmd */
+ cmd_mfi->context.smid = cmd_fusion->index;
+
+ req_desc = megasas_get_request_descriptor(instance,
+ (cmd_fusion->index - 1));
+ if (!req_desc) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_cmd(instance, cmd_mfi);
+ return -ENOMEM;
+ }
+
+ cmd_fusion->request_desc = req_desc;
+ req_desc->Words = 0;
+
+ scsi_lookup = fusion->cmd_list[smid_task - 1];
+
+ mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
+ memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
+ mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(device_handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ mpi_request->LUN[1] = 0;
+
+
+ req_desc = cmd_fusion->request_desc;
+ req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
+ req_desc->HighPriority.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ req_desc->HighPriority.MSIxIndex = 0;
+ req_desc->HighPriority.LMID = 0;
+ req_desc->HighPriority.Reserved1 = 0;
+
+ if (channel < MEGASAS_MAX_PD_CHANNELS)
+ mr_request->tmReqFlags.isTMForPD = 1;
+ else
+ mr_request->tmReqFlags.isTMForLD = 1;
+
+ init_completion(&cmd_fusion->done);
+ megasas_fire_cmd_fusion(instance, req_desc);
+
+ timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
+
+ if (!timeleft) {
+ dev_err(&instance->pdev->dev,
+ "task mgmt type 0x%x timed out\n", type);
+ cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
+ mutex_unlock(&instance->reset_mutex);
+ rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
+ mutex_lock(&instance->reset_mutex);
+ return rc;
+ }
+
+ mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
+ megasas_tm_response_code(instance, mpi_reply);
+
+ megasas_return_cmd(instance, cmd_mfi);
+ rc = SUCCESS;
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ if (scsi_lookup->scmd == NULL)
+ break;
+ else {
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+ megasas_complete_cmd_dpc_fusion
+ ((unsigned long)instance);
+ instance->instancet->enable_intr(instance);
+ if (scsi_lookup->scmd == NULL)
+ break;
+ }
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
+ break;
+ instance->instancet->disable_intr(instance);
+ msleep(1000);
+ megasas_complete_cmd_dpc_fusion
+ ((unsigned long)instance);
+ rc = megasas_track_scsiio(instance, id, channel);
+ instance->instancet->enable_intr(instance);
+
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ return rc;
+
+}
+
+/*
+ * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
+ * @instance: per adapter struct
+ *
+ * Return Non Zero index, if SMID found in outstanding commands
+ */
+static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
+{
+ int i, ret = 0;
+ struct megasas_instance *instance;
+ struct megasas_cmd_fusion *cmd_fusion;
+ struct fusion_context *fusion;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ fusion = instance->ctrl_context;
+
+ for (i = 0; i < instance->max_scsi_cmds; i++) {
+ cmd_fusion = fusion->cmd_list[i];
+ if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
+ scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
+ " SMID: %d\n", cmd_fusion->index);
+ ret = cmd_fusion->index;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+* megasas_get_tm_devhandle - Get devhandle for TM request
+* @sdev- OS provided scsi device
+*
+* Returns- devhandle/targetID of SCSI device
+*/
+static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
+{
+ u16 pd_index = 0;
+ u32 device_id;
+ struct megasas_instance *instance;
+ struct fusion_context *fusion;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+ u16 devhandle = (u16)ULONG_MAX;
+
+ instance = (struct megasas_instance *)sdev->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+ if (instance->use_seqnum_jbod_fp) {
+ pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
+ pd_sync = (void *)fusion->pd_seq_sync
+ [(instance->pd_seq_map_id - 1) & 1];
+ devhandle = pd_sync->seq[pd_index].devHandle;
+ } else
+ sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
+ " without JBOD MAP support from %s %d\n", __func__, __LINE__);
+ } else {
+ device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
+ + sdev->id;
+ devhandle = device_id;
+ }
+
+ return devhandle;
+}
+
+/*
+ * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
+ * @scmd : pointer to scsi command object
+ *
+ * Return SUCCESS, if command aborted else FAILED
+ */
+
+int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+{
+ struct megasas_instance *instance;
+ u16 smid, devhandle;
+ struct fusion_context *fusion;
+ int ret;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scmd->device->hostdata;
+
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+ "SCSI host:%d\n", instance->host->host_no);
+ ret = FAILED;
+ return ret;
+ }
+
+ if (!mr_device_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ ret = SUCCESS;
+ goto out;
+ }
+
+
+ if (!mr_device_priv_data->is_tm_capable) {
+ ret = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+
+ smid = megasas_fusion_smid_lookup(scmd);
+
+ if (!smid) {
+ ret = SUCCESS;
+ scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
+ " issued is not found in oustanding commands\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+
+ devhandle = megasas_get_tm_devhandle(scmd->device);
+
+ if (devhandle == (u16)ULONG_MAX) {
+ ret = SUCCESS;
+ sdev_printk(KERN_INFO, scmd->device,
+ "task abort issued for invalid devhandle\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
+ scmd, devhandle);
+
+ mr_device_priv_data->tm_busy = 1;
+ ret = megasas_issue_tm(instance, devhandle,
+ scmd->device->channel, scmd->device->id, smid,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
+ mr_device_priv_data->tm_busy = 0;
+
+ mutex_unlock(&instance->reset_mutex);
+out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return ret;
+}
+
+/*
+ * megasas_reset_target_fusion : target reset function for fusion adapters
+ * scmd: SCSI command pointer
+ *
+ * Returns SUCCESS if all commands associated with target aborted else FAILED
+ */
+
+int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+{
+
+ struct megasas_instance *instance;
+ int ret = FAILED;
+ u16 devhandle;
+ struct fusion_context *fusion;
+ struct MR_PRIV_DEVICE *mr_device_priv_data;
+ mr_device_priv_data = scmd->device->hostdata;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+ fusion = instance->ctrl_context;
+
+ if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+ dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
+ "SCSI host:%d\n", instance->host->host_no);
+ ret = FAILED;
+ return ret;
+ }
+
+ if (!mr_device_priv_data) {
+ sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
+ "scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ ret = SUCCESS;
+ goto out;
+ }
+
+
+ if (!mr_device_priv_data->is_tm_capable) {
+ ret = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&instance->reset_mutex);
+ devhandle = megasas_get_tm_devhandle(scmd->device);
+
+ if (devhandle == (u16)ULONG_MAX) {
+ ret = SUCCESS;
+ sdev_printk(KERN_INFO, scmd->device,
+ "target reset issued for invalid devhandle\n");
+ mutex_unlock(&instance->reset_mutex);
+ goto out;
+ }
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
+ scmd, devhandle);
+ mr_device_priv_data->tm_busy = 1;
+ ret = megasas_issue_tm(instance, devhandle,
+ scmd->device->channel, scmd->device->id, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
+ mr_device_priv_data->tm_busy = 0;
+ mutex_unlock(&instance->reset_mutex);
+out:
+ scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
+ (ret == SUCCESS) ? "SUCCESS" : "FAILED");
+
+ return ret;
+}
+
+/*SRIOV get other instance in cluster if any*/
+struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
+{
+ int i;
+
+ for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
+ if (megasas_mgmt_info.instance[i] &&
+ (megasas_mgmt_info.instance[i] != instance) &&
+ megasas_mgmt_info.instance[i]->requestorId &&
+ megasas_mgmt_info.instance[i]->peerIsPresent &&
+ (memcmp((megasas_mgmt_info.instance[i]->clusterId),
+ instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
+ return megasas_mgmt_info.instance[i];
+ }
+ return NULL;
+}
+
/* Check for a second path that is currently UP */
int megasas_check_mpio_paths(struct megasas_instance *instance,
struct scsi_cmnd *scmd)
{
- int i, j, retval = (DID_RESET << 16);
-
- if (instance->mpio && instance->requestorId) {
- for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
- for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
- if (megasas_mgmt_info.instance[i] &&
- (megasas_mgmt_info.instance[i] != instance) &&
- megasas_mgmt_info.instance[i]->mpio &&
- megasas_mgmt_info.instance[i]->requestorId
- &&
- (megasas_mgmt_info.instance[i]->ld_ids[j]
- == scmd->device->id)) {
- retval = (DID_NO_CONNECT << 16);
- goto out;
- }
+ struct megasas_instance *peer_instance = NULL;
+ int retval = (DID_RESET << 16);
+
+ if (instance->peerIsPresent) {
+ peer_instance = megasas_get_peer_instance(instance);
+ if ((peer_instance) &&
+ (atomic_read(&peer_instance->adprecovery) ==
+ MEGASAS_HBA_OPERATIONAL))
+ retval = (DID_NO_CONNECT << 16);
}
-out:
return retval;
}
/* Core fusion reset function */
-int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
+int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
{
int retval = SUCCESS, i, convert = 0;
struct megasas_instance *instance;
@@ -2739,13 +3370,14 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
u32 abs_state, status_reg, reset_adapter;
u32 io_timeout_in_crash_mode = 0;
struct scsi_cmnd *scmd_local = NULL;
+ struct scsi_device *sdev;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
mutex_lock(&instance->reset_mutex);
- if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
dev_warn(&instance->pdev->dev, "Hardware critical error, "
"returning FAILED for scsi%d.\n",
instance->host->host_no);
@@ -2757,10 +3389,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
/* IO timeout detected, forcibly put FW in FAULT state */
if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
- instance->crash_dump_app_support && iotimeout) {
- dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ instance->crash_dump_app_support && reason) {
+ dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
"forcibly FAULT Firmware\n");
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
status_reg = readl(&instance->reg_set->doorbell);
writel(status_reg | MFI_STATE_FORCE_OCR,
&instance->reg_set->doorbell);
@@ -2772,10 +3404,10 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
dev_dbg(&instance->pdev->dev, "waiting for [%d] "
"seconds for crash dump collection and OCR "
"to be done\n", (io_timeout_in_crash_mode * 3));
- } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
(io_timeout_in_crash_mode < 80));
- if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
dev_info(&instance->pdev->dev, "OCR done for IO "
"timeout case\n");
retval = SUCCESS;
@@ -2792,18 +3424,18 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
- instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
instance->instancet->disable_intr(instance);
msleep(1000);
/* First try waiting for commands to complete */
- if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
+ if (megasas_wait_for_outstanding_fusion(instance, reason,
&convert)) {
- instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
dev_warn(&instance->pdev->dev, "resetting fusion "
"adapter scsi%d.\n", instance->host->host_no);
if (convert)
- iotimeout = 0;
+ reason = 0;
/* Now return commands back to the OS */
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
@@ -2813,6 +3445,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
scmd_local->result =
megasas_check_mpio_paths(instance,
scmd_local);
+ if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+ atomic_dec(&instance->ldio_outstanding);
megasas_return_cmd_fusion(instance, cmd_fusion);
scsi_dma_unmap(scmd_local);
scmd_local->scsi_done(scmd_local);
@@ -2837,55 +3471,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
}
/* Let SR-IOV VF & PF sync up if there was a HB failure */
- if (instance->requestorId && !iotimeout) {
+ if (instance->requestorId && !reason) {
msleep(MEGASAS_OCR_SETTLE_TIME_VF);
- /* Look for a late HB update after VF settle time */
- if (abs_state == MFI_STATE_OPERATIONAL &&
- (instance->hb_host_mem->HB.fwCounter !=
- instance->hb_host_mem->HB.driverCounter)) {
- instance->hb_host_mem->HB.driverCounter =
- instance->hb_host_mem->HB.fwCounter;
- dev_warn(&instance->pdev->dev, "SR-IOV:"
- "Late FW heartbeat update for "
- "scsi%d.\n",
- instance->host->host_no);
- } else {
- /* In VF mode, first poll for FW ready */
- for (i = 0;
- i < (MEGASAS_RESET_WAIT_TIME * 1000);
- i += 20) {
- status_reg =
- instance->instancet->
- read_fw_status_reg(
- instance->reg_set);
- abs_state = status_reg &
- MFI_STATE_MASK;
- if (abs_state == MFI_STATE_READY) {
- dev_warn(&instance->pdev->dev,
- "SR-IOV: FW was found"
- "to be in ready state "
- "for scsi%d.\n",
- instance->host->host_no);
- break;
- }
- msleep(20);
- }
- if (abs_state != MFI_STATE_READY) {
- dev_warn(&instance->pdev->dev, "SR-IOV: "
- "FW not in ready state after %d"
- " seconds for scsi%d, status_reg = "
- "0x%x.\n",
- MEGASAS_RESET_WAIT_TIME,
- instance->host->host_no,
- status_reg);
- megaraid_sas_kill_hba(instance);
- instance->skip_heartbeat_timer_del = 1;
- instance->adprecovery =
- MEGASAS_HW_CRITICAL_ERROR;
- retval = FAILED;
- goto out;
- }
- }
+ goto transition_to_ready;
}
/* Now try to reset the chip */
@@ -2894,23 +3482,28 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
if (instance->instancet->adp_reset
(instance, instance->reg_set))
continue;
-
+transition_to_ready:
/* Wait for FW to become ready */
if (megasas_transition_to_ready(instance, 1)) {
- dev_warn(&instance->pdev->dev, "Failed to "
- "transition controller to ready "
- "for scsi%d.\n",
- instance->host->host_no);
- continue;
+ dev_warn(&instance->pdev->dev,
+ "Failed to transition controller to ready for "
+ "scsi%d.\n", instance->host->host_no);
+ if (instance->requestorId && !reason)
+ goto fail_kill_adapter;
+ else
+ continue;
}
-
megasas_reset_reply_desc(instance);
+ megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
+
if (megasas_ioc_init_fusion(instance)) {
dev_warn(&instance->pdev->dev,
- "megasas_ioc_init_fusion() failed!"
- " for scsi%d\n",
- instance->host->host_no);
- continue;
+ "megasas_ioc_init_fusion() failed! for "
+ "scsi%d\n", instance->host->host_no);
+ if (instance->requestorId && !reason)
+ goto fail_kill_adapter;
+ else
+ continue;
}
megasas_refire_mgmt_cmd(instance);
@@ -2932,10 +3525,13 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
megasas_setup_jbod_map(instance);
+ shost_for_each_device(sdev, shost)
+ megasas_update_sdev_properties(sdev);
+
clear_bit(MEGASAS_FUSION_IN_RESET,
&instance->reset_flags);
instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
@@ -2964,6 +3560,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
retval = SUCCESS;
goto out;
}
+fail_kill_adapter:
/* Reset failed, kill the adapter */
dev_warn(&instance->pdev->dev, "Reset failed, killing "
"adapter scsi%d.\n", instance->host->host_no);
@@ -2980,7 +3577,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
- instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
}
out:
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 473005c99b44..80eaee22f5bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -176,7 +176,9 @@ enum REGION_TYPE {
#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
-#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
+#define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
@@ -277,6 +279,100 @@ union MPI2_SCSI_IO_CDB_UNION {
struct MPI2_SGE_SIMPLE_UNION SGE;
};
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+struct MPI2_SCSI_TASK_MANAGE_REQUEST {
+ u16 DevHandle; /*0x00 */
+ u8 ChainOffset; /*0x02 */
+ u8 Function; /*0x03 */
+ u8 Reserved1; /*0x04 */
+ u8 TaskType; /*0x05 */
+ u8 Reserved2; /*0x06 */
+ u8 MsgFlags; /*0x07 */
+ u8 VP_ID; /*0x08 */
+ u8 VF_ID; /*0x09 */
+ u16 Reserved3; /*0x0A */
+ u8 LUN[8]; /*0x0C */
+ u32 Reserved4[7]; /*0x14 */
+ u16 TaskMID; /*0x30 */
+ u16 Reserved5; /*0x32 */
+};
+
+
+/*SCSI Task Management Reply Message */
+struct MPI2_SCSI_TASK_MANAGE_REPLY {
+ u16 DevHandle; /*0x00 */
+ u8 MsgLength; /*0x02 */
+ u8 Function; /*0x03 */
+ u8 ResponseCode; /*0x04 */
+ u8 TaskType; /*0x05 */
+ u8 Reserved1; /*0x06 */
+ u8 MsgFlags; /*0x07 */
+ u8 VP_ID; /*0x08 */
+ u8 VF_ID; /*0x09 */
+ u16 Reserved2; /*0x0A */
+ u16 Reserved3; /*0x0C */
+ u16 IOCStatus; /*0x0E */
+ u32 IOCLogInfo; /*0x10 */
+ u32 TerminationCount; /*0x14 */
+ u32 ResponseInfo; /*0x18 */
+};
+
+struct MR_TM_REQUEST {
+ char request[128];
+};
+
+struct MR_TM_REPLY {
+ char reply[128];
+};
+
+/* SCSI Task Management Request Message */
+struct MR_TASK_MANAGE_REQUEST {
+ /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
+ struct MR_TM_REQUEST TmRequest;
+ union {
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved1:30;
+ u32 isTMForPD:1;
+ u32 isTMForLD:1;
+#else
+ u32 isTMForLD:1;
+ u32 isTMForPD:1;
+ u32 reserved1:30;
+#endif
+ u32 reserved2;
+ } tmReqFlags;
+ struct MR_TM_REPLY TMReply;
+ };
+};
+
+/* TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/* ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
/*
* RAID SCSI IO Request Message
* Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
@@ -547,7 +643,9 @@ struct MR_SPAN_BLOCK_INFO {
struct MR_LD_RAID {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved4:7;
+ u32 reserved4:5;
+ u32 fpBypassRegionLock:1;
+ u32 tmCapable:1;
u32 fpNonRWCapable:1;
u32 fpReadAcrossStripe:1;
u32 fpWriteAcrossStripe:1;
@@ -569,7 +667,9 @@ struct MR_LD_RAID {
u32 fpWriteAcrossStripe:1;
u32 fpReadAcrossStripe:1;
u32 fpNonRWCapable:1;
- u32 reserved4:7;
+ u32 tmCapable:1;
+ u32 fpBypassRegionLock:1;
+ u32 reserved4:5;
#endif
} capability;
__le32 reserved6;
@@ -639,7 +739,7 @@ struct IO_REQUEST_INFO {
u8 fpOkForIo;
u8 IoforUnevenSpan;
u8 start_span;
- u8 reserved;
+ u8 do_fp_rlbypass;
u64 start_row;
u8 span_arm; /* span[7:5], arm[4:0] */
u8 pd_after_lb;
@@ -694,6 +794,7 @@ struct megasas_cmd_fusion {
u32 sync_cmd_idx;
u32 index;
u8 pd_r1_lb;
+ struct completion done;
};
struct LD_LOAD_BALANCE_INFO {
@@ -807,9 +908,18 @@ struct MR_FW_RAID_MAP_EXT {
* * define MR_PD_CFG_SEQ structure for system PDs
* */
struct MR_PD_CFG_SEQ {
- __le16 seqNum;
- __le16 devHandle;
- u8 reserved[4];
+ u16 seqNum;
+ u16 devHandle;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:7;
+ u8 tmCapable:1;
+#else
+ u8 tmCapable:1;
+ u8 reserved:7;
+#endif
+ } capability;
+ u8 reserved[3];
} __packed;
struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -818,6 +928,12 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
struct MR_PD_CFG_SEQ seq[1];
} __packed;
+struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+ u64 RDPQBaseAddress;
+ u32 Reserved1;
+ u32 Reserved2;
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
dma_addr_t req_frames_desc_phys;
@@ -830,8 +946,8 @@ struct fusion_context {
struct dma_pool *sg_dma_pool;
struct dma_pool *sense_dma_pool;
- dma_addr_t reply_frames_desc_phys;
- union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
+ dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
+ union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
struct dma_pool *reply_frames_desc_pool;
u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -841,6 +957,8 @@ struct fusion_context {
u32 reply_alloc_sz;
u32 io_frames_alloc_sz;
+ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
+ dma_addr_t rdpq_phys;
u16 max_sge_in_main_msg;
u16 max_sge_in_chain;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 555367f00228..1753e42826dd 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -29,6 +29,7 @@
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
+#include <linux/pci.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/pgtable.h>
@@ -38,7 +39,6 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
#include <asm/macio.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index ec27ad2d186f..dfad5b8c1890 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.35
+ * mpi2.h Version: 02.00.39
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -92,6 +92,14 @@
* 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT.
* 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT
* 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-14 02.00.36 Updated copyright information.
+ * Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-16-15 02.00.37 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Scratchpad registers to
+ * MPI2_SYSTEM_INTERFACE_REGS.
+ * Added MPI2_DIAG_SBR_RELOAD.
+ * 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -124,8 +132,14 @@
MPI25_VERSION_MINOR)
#define MPI2_VERSION_02_05 (0x0205)
+/*minor version for MPI v2.6 compatible products */
+#define MPI26_VERSION_MINOR (0x06)
+#define MPI26_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI26_VERSION_MINOR)
+#define MPI2_VERSION_02_06 (0x0206)
+
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x23)
+#define MPI2_HEADER_VERSION_UNIT (0x27)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -179,10 +193,12 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
U32 HCBSize; /*0x74 */
U32 HCBAddressLow; /*0x78 */
U32 HCBAddressHigh; /*0x7C */
- U32 Reserved6[16]; /*0x80 */
+ U32 Reserved6[12]; /*0x80 */
+ U32 Scratchpad[4]; /*0xB0 */
U32 RequestDescriptorPostLow; /*0xC0 */
U32 RequestDescriptorPostHigh; /*0xC4 */
- U32 Reserved7[14]; /*0xC8 */
+ U32 AtomicRequestDescriptorPost;/*0xC8 */
+ U32 Reserved7[13]; /*0xCC */
} MPI2_SYSTEM_INTERFACE_REGS,
*PTR_MPI2_SYSTEM_INTERFACE_REGS,
Mpi2SystemInterfaceRegs_t,
@@ -224,6 +240,8 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
*/
#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+#define MPI2_DIAG_SBR_RELOAD (0x00002000)
+
#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
@@ -298,10 +316,19 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
/*
- *Offsets for the Request Queue
+ *Offsets for the Scratchpad registers
+ */
+#define MPI26_SCRATCHPAD0_OFFSET (0x000000B0)
+#define MPI26_SCRATCHPAD1_OFFSET (0x000000B4)
+#define MPI26_SCRATCHPAD2_OFFSET (0x000000B8)
+#define MPI26_SCRATCHPAD3_OFFSET (0x000000BC)
+
+/*
+ *Offsets for the Request Descriptor Post Queue
*/
#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+#define MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET (0x000000C8)
/*Hard Reset delay timings */
#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
@@ -329,7 +356,8 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
*pMpi2DefaultRequestDescriptor_t;
/*defines for the RequestFlags field */
-#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x1E)
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_RSHIFT (1)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
@@ -337,7 +365,7 @@ typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
-#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
/*High Priority Request Descriptor */
typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
@@ -408,6 +436,33 @@ typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
Mpi2RequestDescriptorUnion_t,
*pMpi2RequestDescriptorUnion_t;
+/*Atomic Request Descriptors */
+
+/*
+ * All Atomic Request Descriptors have the same format, so the following
+ * structure is used for all Atomic Request Descriptors:
+ * Atomic Default Request Descriptor
+ * Atomic High Priority Request Descriptor
+ * Atomic SCSI IO Request Descriptor
+ * Atomic SCSI Target Request Descriptor
+ * Atomic RAID Accelerator Request Descriptor
+ * Atomic Fast Path SCSI IO Request Descriptor
+ */
+
+/*Atomic Request Descriptor */
+typedef struct _MPI26_ATOMIC_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /* 0x00 */
+ U8 MSIxIndex; /* 0x01 */
+ U16 SMID; /* 0x02 */
+} MPI26_ATOMIC_REQUEST_DESCRIPTOR,
+ *PTR_MPI26_ATOMIC_REQUEST_DESCRIPTOR,
+ Mpi26AtomicRequestDescriptor_t,
+ *pMpi26AtomicRequestDescriptor_t;
+
+/*for the RequestFlags field, use the same
+ *defines as MPI2_DEFAULT_REQUEST_DESCRIPTOR
+ */
+
/*Reply Descriptors */
/*Default Reply Descriptor */
@@ -548,6 +603,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_IO_UNIT_CONTROL (0x1B)
#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
@@ -587,6 +643,7 @@ typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+#define MPI2_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
/****************************************************************************
* Config IOCStatus values
@@ -1045,7 +1102,7 @@ typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
Mpi2IeeeSgeChainUnion_t,
*pMpi2IeeeSgeChainUnion_t;
-/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 and later */
typedef struct _MPI25_IEEE_SGE_CHAIN64 {
U64 Address;
U32 Length;
@@ -1098,6 +1155,11 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+/*Next Segment Format */
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+
/*Data Location Address Space */
#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
@@ -1108,6 +1170,7 @@ typedef union _MPI25_SGE_IO_UNION {
#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
(MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+#define MPI26_IEEE_SGE_FLAGS_IOCCTL_ADDR (0x02)
/****************************************************************************
* IEEE SGE operation Macros
@@ -1166,6 +1229,7 @@ typedef union _MPI2_SGE_IO_UNION {
#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI26_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
/*values for SGL Type subfield */
#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 581fdb375db5..9cf09bf7c4a8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.29
+ * mpi2_cnfg.h Version: 02.00.33
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -178,7 +178,14 @@
* 01-08-14 02.00.28 Added more defines for the BiosOptions field of
* MPI2_CONFIG_PAGE_BIOS_1.
* 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and
- * more defines for the BiosOptions field..
+ * more defines for the BiosOptions field.
+ * 11-18-14 02.00.30 Updated copyright information.
+ * Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG.
+ * Added AdapterOrderAux fields to BIOS Page 3.
+ * 03-16-15 02.00.31 Updated for MPI v2.6.
+ * Added new SAS Phy Event codes
+ * 05-25-15 02.00.33 Added more defines for the BiosOptions field of
+ * MPI2_CONFIG_PAGE_BIOS_1.
* --------------------------------------------------------------------------
*/
@@ -355,7 +362,6 @@ typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
-
/****************************************************************************
* Configuration messages
****************************************************************************/
@@ -457,8 +463,17 @@ typedef struct _MPI2_CONFIG_REPLY {
#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
-
-
+/* MPI v2.6 SAS Products */
+#define MPI26_MFGPAGE_DEVID_SAS3216 (0x00C9)
+#define MPI26_MFGPAGE_DEVID_SAS3224 (0x00C4)
+#define MPI26_MFGPAGE_DEVID_SAS3316_1 (0x00C5)
+#define MPI26_MFGPAGE_DEVID_SAS3316_2 (0x00C6)
+#define MPI26_MFGPAGE_DEVID_SAS3316_3 (0x00C7)
+#define MPI26_MFGPAGE_DEVID_SAS3316_4 (0x00C8)
+#define MPI26_MFGPAGE_DEVID_SAS3324_1 (0x00C0)
+#define MPI26_MFGPAGE_DEVID_SAS3324_2 (0x00C1)
+#define MPI26_MFGPAGE_DEVID_SAS3324_3 (0x00C2)
+#define MPI26_MFGPAGE_DEVID_SAS3324_4 (0x00C3)
/*Manufacturing Page 0 */
@@ -941,8 +956,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U8
BoardTemperatureUnits; /*0x16 */
U8 Reserved3; /*0x17 */
- U32 Reserved4; /* 0x18 */
- U32 Reserved5; /* 0x1C */
+ U32 BoardPowerRequirement; /*0x18 */
+ U32 PCISlotPowerAllocation; /*0x1C */
U32 Reserved6; /* 0x20 */
U32 Reserved7; /* 0x24 */
} MPI2_CONFIG_PAGE_IO_UNIT_7,
@@ -1151,6 +1166,62 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+/* IO Unit Page 11 (for MPI v2.6 and later) */
+
+typedef struct _MPI26_IOUNIT11_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /* 0x00 */
+ U8 SpinupDelay; /* 0x01 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
+} MPI26_IOUNIT11_SPINUP_GROUP,
+ *PTR_MPI26_IOUNIT11_SPINUP_GROUP,
+ Mpi26IOUnit11SpinupGroup_t,
+ *pMpi26IOUnit11SpinupGroup_t;
+
+/* defines for IO Unit Page 11 SpinupFlags */
+#define MPI26_IOUNITPAGE11_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * four and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI26_IOUNITPAGE11_PHY_MAX
+#define MPI26_IOUNITPAGE11_PHY_MAX (4)
+#endif
+
+typedef struct _MPI26_CONFIG_PAGE_IO_UNIT_11 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ MPI26_IOUNIT11_SPINUP_GROUP SpinupGroupParameters[4]; /*0x08 */
+ U32 Reserved2; /*0x18 */
+ U32 Reserved3; /*0x1C */
+ U32 Reserved4; /*0x20 */
+ U8 BootDeviceWaitTime; /*0x24 */
+ U8 Reserved5; /*0x25 */
+ U16 Reserved6; /*0x26 */
+ U8 NumPhys; /*0x28 */
+ U8 PEInitialSpinupDelay; /*0x29 */
+ U8 PEReplyDelay; /*0x2A */
+ U8 Flags; /*0x2B */
+ U8 PHY[MPI26_IOUNITPAGE11_PHY_MAX];/*0x2C */
+} MPI26_CONFIG_PAGE_IO_UNIT_11,
+ *PTR_MPI26_CONFIG_PAGE_IO_UNIT_11,
+ Mpi26IOUnitPage11_t,
+ *pMpi26IOUnitPage11_t;
+
+#define MPI26_IOUNITPAGE11_PAGEVERSION (0x00)
+
+/* defines for Flags field */
+#define MPI26_IOUNITPAGE11_FLAGS_AUTO_PORTENABLE (0x01)
+
+/* defines for PHY field */
+#define MPI26_IOUNITPAGE11_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+
+
+
/****************************************************************************
* IOC Config Pages
@@ -1343,6 +1414,10 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
#define MPI2_BIOSPAGE1_PAGEVERSION (0x07)
/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_BOOT_LIST_ADD_ALT_BOOT_DEVICE (0x00008000)
+#define MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG (0x00004000)
+
+#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000)
#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800)
@@ -1492,6 +1567,8 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
/*BIOS Page 3 */
+#define MPI2_BIOSPAGE3_NUM_ADAPTER (4)
+
typedef struct _MPI2_ADAPTER_INFO {
U8 PciBusNumber; /*0x00 */
U8 PciDeviceAndFunctionNumber; /*0x01 */
@@ -1502,17 +1579,26 @@ typedef struct _MPI2_ADAPTER_INFO {
#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+typedef struct _MPI2_ADAPTER_ORDER_AUX {
+ U64 WWID; /* 0x00 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_ADAPTER_ORDER_AUX, *PTR_MPI2_ADAPTER_ORDER_AUX,
+ Mpi2AdapterOrderAux_t, *pMpi2AdapterOrderAux_t;
+
+
typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
U32 GlobalFlags; /*0x04 */
U32 BiosVersion; /*0x08 */
- MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ MPI2_ADAPTER_INFO AdapterOrder[MPI2_BIOSPAGE3_NUM_ADAPTER];
U32 Reserved1; /*0x1C */
+ MPI2_ADAPTER_ORDER_AUX AdapterOrderAux[MPI2_BIOSPAGE3_NUM_ADAPTER];
} MPI2_CONFIG_PAGE_BIOS_3,
*PTR_MPI2_CONFIG_PAGE_BIOS_3,
Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
-#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x01)
/*values for BIOS Page 3 GlobalFlags */
#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
@@ -2006,6 +2092,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI2_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
@@ -2108,6 +2196,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_DA_PERSIST_CONNECT (0x0100)
#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
@@ -2125,6 +2214,8 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI2_SASIOUNIT1_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
@@ -2144,7 +2235,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
*SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
-/*SAS IO Unit Page 4 */
+/*SAS IO Unit Page 4 (for MPI v2.5 and earlier) */
typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
U8 MaxTargetSpinup; /*0x00 */
@@ -2715,6 +2806,7 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_PERSIST_CAPABLE (0x0004)
#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002)
#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
@@ -2922,6 +3014,19 @@ typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+/*Following codes are product specific and in MPI v2.6 and later */
+#define MPI2_SASPHY3_EVENT_CODE_LCARB_WAIT_TIME (0xD3)
+#define MPI2_SASPHY3_EVENT_CODE_RCVD_CONN_RESP_WAIT_TIME (0xD4)
+#define MPI2_SASPHY3_EVENT_CODE_LCCONN_TIME (0xD5)
+#define MPI2_SASPHY3_EVENT_CODE_SSP_TX_START_TRANSMIT (0xD6)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_TX_START (0xD7)
+#define MPI2_SASPHY3_EVENT_CODE_SMP_TX_START_TRANSMT (0xD8)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_BREAK_CONN (0xD9)
+#define MPI2_SASPHY3_EVENT_CODE_SSP_RX_START_RECEIVE (0xDA)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_RX_START_RECEIVE (0xDB)
+#define MPI2_SASPHY3_EVENT_CODE_SMP_RX_START_RECEIVE (0xDC)
+
+
/*values for the CounterType field */
#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index 068c98efd742..c38f624b859d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_init.h
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.15
+ * mpi2_init.h Version: 02.00.17
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -46,6 +46,11 @@
* 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
* 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
* replacing the Reserved4 field.
+ * 11-18-14 02.00.16 Updated copyright information.
+ * 03-16-15 02.00.17 Updated for MPI v2.6.
+ * Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH.
+ * Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and
+ * MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF.
* --------------------------------------------------------------------------
*/
@@ -128,6 +133,7 @@ typedef struct _MPI2_SCSI_IO_REQUEST {
#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+#define MPI26_SCSIIO_MSGFLAGS_IOCCTL_SENSE_ADDR (0x08)
/*SCSI IO SGLFlags bits */
@@ -228,7 +234,7 @@ typedef union _MPI25_SCSI_IO_CDB_UNION {
} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
-/*MPI v2.5 SCSI IO Request Message */
+/*MPI v2.5/2.6 SCSI IO Request Message */
typedef struct _MPI25_SCSI_IO_REQUEST {
U16 DevHandle; /*0x00 */
U8 ChainOffset; /*0x02 */
@@ -302,12 +308,14 @@ typedef struct _MPI25_SCSI_IO_REQUEST {
#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
/*defines for the IoFlags field */
-#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
-#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
-#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+#define MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH (0x2000)
#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI26_SCSIIO_IOFLAGS_PORT_REQUEST (0x0400)
#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
/*MPI v2.5 defines for the EEDPFlags bits */
@@ -512,6 +520,7 @@ typedef struct _MPI2_SEP_REQUEST {
#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF (0x00080000)
#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
@@ -547,6 +556,7 @@ typedef struct _MPI2_SEP_REPLY {
Mpi2SepReply_t, *pMpi2SepReply_t;
/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x00080000)
#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index d7598cc4bb8e..cf510ed91924 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_ioc.h
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.24
+ * mpi2_ioc.h Version: 02.00.26
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -133,6 +133,10 @@
* Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
* Added Encrypted Hash Extended Image.
* 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
+ * 11-18-14 02.00.25 Updated copyright information.
+ * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
+ * MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
+ * Added MPI26_CTRL_OP_SHUTDOWN.
* --------------------------------------------------------------------------
*/
@@ -165,7 +169,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
U16 HeaderVersion; /*0x0E */
U32 Reserved5; /*0x10 */
U16 Reserved6; /*0x14 */
- U8 Reserved7; /*0x16 */
+ U8 HostPageSize; /*0x16 */
U8 HostMSIxVectors; /*0x17 */
U16 Reserved8; /*0x18 */
U16 SystemRequestFrameSize; /*0x1A */
@@ -289,7 +293,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
U16 MaxDevHandle; /*0x38 */
U16 MaxPersistentEntries; /*0x3A */
U16 MinDevHandle; /*0x3C */
- U16 Reserved4; /*0x3E */
+ U8 CurrentHostPageSize; /* 0x3E */
+ U8 Reserved4; /* 0x3F */
} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
@@ -326,6 +331,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
@@ -343,8 +349,8 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
/*ProtocolFlags */
-#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
/****************************************************************************
* PortFacts message
@@ -1247,6 +1253,7 @@ typedef struct _MPI2_FW_UPLOAD_REQUEST {
#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
/*MPI v2.0 FWUpload TransactionContext Element */
typedef struct _MPI2_FW_UPLOAD_TCSGE {
@@ -1328,7 +1335,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
U32 Reserved54; /*0x54 */
U32 Reserved58; /*0x58 */
U32 Reserved5C; /*0x5C */
- U32 Reserved60; /*0x60 */
+ U32 BootFlags; /*0x60 */
U32 FirmwareVersionNameWhat; /*0x64 */
U8 FirmwareVersionName[32]; /*0x68 */
U32 VendorNameWhat; /*0x88 */
@@ -1354,18 +1361,22 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+#define MPI26_FW_HEADER_SIGNATURE (0xEB000000)
/*Signature0 field */
#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+#define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+#define MPI26_FW_HEADER_SIGNATURE1 (0xA55AEAA5)
/*Signature2 field */
#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+#define MPI26_FW_HEADER_SIGNATURE2 (0x5AA55AEA)
/*defines for using the ProductID field */
#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
@@ -1381,6 +1392,8 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+#define MPI26_FW_HEADER_PID_FAMILY_3324_SAS (0x0028)
+#define MPI26_FW_HEADER_PID_FAMILY_3516_SAS (0x0031)
/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
@@ -1388,6 +1401,7 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI26_FW_HEADER_BOOTFLAGS_OFFSET (0x60)
#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
@@ -1493,7 +1507,9 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA {
#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
#define MPI2_FLASH_REGION_MEGARAID (0x09)
-#define MPI2_FLASH_REGION_INIT (0x0A)
+#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
+#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
+#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
@@ -1619,7 +1635,6 @@ typedef struct _MPI25_ENCRYPTED_HASH_DATA {
Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
-
/****************************************************************************
* PowerManagementControl message
****************************************************************************/
@@ -1726,4 +1741,90 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+/****************************************************************************
+* IO Unit Control messages (MPI v2.6 and later only.)
+****************************************************************************/
+
+/* IO Unit Control Request Message */
+typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 ChainOffset; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U8 PhyNum; /* 0x0E */
+ U8 PrimFlags; /* 0x0F */
+ U32 Primitive; /* 0x10 */
+ U8 LookupMethod; /* 0x14 */
+ U8 Reserved5; /* 0x15 */
+ U16 SlotNumber; /* 0x16 */
+ U64 LookupAddress; /* 0x18 */
+ U32 IOCParameterValue; /* 0x20 */
+ U32 Reserved7; /* 0x24 */
+ U32 Reserved8; /* 0x28 */
+} MPI26_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI26_IOUNIT_CONTROL_REQUEST,
+ Mpi26IoUnitControlRequest_t,
+ *pMpi26IoUnitControlRequest_t;
+
+/* values for the Operation field */
+#define MPI26_CTRL_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06)
+#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07)
+#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A)
+#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D)
+#define MPI26_CTRL_OP_LOOKUP_MAPPING (0x0E)
+#define MPI26_CTRL_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI26_CTRL_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI26_CTRL_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI26_CTRL_OP_ENABLE_FP_ALL (0x12)
+#define MPI26_CTRL_OP_DISABLE_FP_ALL (0x13)
+#define MPI26_CTRL_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI26_CTRL_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI26_CTRL_OP_SHUTDOWN (0x16)
+#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
+#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
+#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
+#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/* values for the PrimFlags field */
+#define MPI26_CTRL_PRIMFLAGS_SINGLE (0x08)
+#define MPI26_CTRL_PRIMFLAGS_TRIPLE (0x02)
+#define MPI26_CTRL_PRIMFLAGS_REDUNDANT (0x01)
+
+/* values for the LookupMethod field */
+#define MPI26_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
+#define MPI26_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
+#define MPI26_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+
+/* IO Unit Control Reply Message */
+typedef struct _MPI26_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U8 MsgLength; /* 0x02 */
+ U8 Function; /* 0x03 */
+ U16 DevHandle; /* 0x04 */
+ U8 IOCParameter; /* 0x06 */
+ U8 MsgFlags; /* 0x07 */
+ U8 VP_ID; /* 0x08 */
+ U8 VF_ID; /* 0x09 */
+ U16 Reserved3; /* 0x0A */
+ U16 Reserved4; /* 0x0C */
+ U16 IOCStatus; /* 0x0E */
+ U32 IOCLogInfo; /* 0x10 */
+} MPI26_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI26_IOUNIT_CONTROL_REPLY,
+ Mpi26IoUnitControlReply_t,
+ *pMpi26IoUnitControlReply_t;
+
+
#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 13d93ca029d5..1c0eeeeb5eaf 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_raid.h
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.10
+ * mpi2_raid.h Version: 02.00.11
*
* Version History
* ---------------
@@ -31,6 +31,7 @@
* 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
* Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
* 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
+ * 11-18-14 02.00.11 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index 156e30543a2f..c10c2c02a945 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2015 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_sas.h
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.08
+ * mpi2_sas.h Version: 02.00.10
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -32,6 +32,9 @@
* Passthrough Request message.
* 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
* for anything newer than MPI v2.0.
+ * 11-18-14 02.00.09 Updated copyright information.
+ * 03-16-15 02.00.10 Updated for MPI v2.6.
+ * Added MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA.
* --------------------------------------------------------------------------
*/
@@ -183,6 +186,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
/*values for PassthroughFlags field */
#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_FPDMA (0x0040)
#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
@@ -216,6 +220,8 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
/****************************************************************************
* SAS IO Unit Control messages
+* (MPI v2.5 and earlier only.
+* Replaced by IO Unit Control messages in MPI v2.6 and later.)
****************************************************************************/
/*SAS IO Unit Control Request Message */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 1629e5bce7e1..5f9289a1166f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_tool.h
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.12
+ * mpi2_tool.h Version: 02.00.13
*
* Version History
* ---------------
@@ -34,6 +34,7 @@
* it uses MPI Chain SGE as well as MPI Simple SGE.
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
+ * 11-18-14 02.00.13 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index 99ab093602e8..92a81abc2c31 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2014 LSI Corporation.
+ * Copyright 2000-2014 Avago Technologies. All rights reserved.
*
*
* Name: mpi2_type.h
* Title: MPI basic type definitions
* Creation Date: August 16, 2006
*
- * mpi2_type.h Version: 02.00.00
+ * mpi2_type.h Version: 02.00.01
*
* Version History
* ---------------
@@ -14,6 +14,7 @@
* Date Version Description
* -------- -------- ------------------------------------------------------
* 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 11-18-14 02.00.01 Updated copyright information.
* --------------------------------------------------------------------------
*/
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 83658acddd58..8c44b9c424af 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -83,6 +83,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+static int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
static int max_msix_vectors = -1;
module_param(max_msix_vectors, int, 0);
MODULE_PARM_DESC(max_msix_vectors,
@@ -395,6 +399,9 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
desc = "insufficient resources";
break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+ desc = "insufficient power";
+ break;
case MPI2_IOCSTATUS_INVALID_FIELD:
desc = "invalid field";
break;
@@ -772,7 +779,7 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
- return 1;
+ return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
return 1;
@@ -803,6 +810,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
Mpi2EventNotificationReply_t *mpi_reply;
Mpi2EventAckRequest_t *ack_request;
u16 smid;
+ struct _event_ack_list *delayed_event_ack;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (!mpi_reply)
@@ -816,8 +824,18 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
goto out;
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
+ GFP_ATOMIC);
+ if (!delayed_event_ack)
+ goto out;
+ INIT_LIST_HEAD(&delayed_event_ack->list);
+ delayed_event_ack->Event = mpi_reply->Event;
+ delayed_event_ack->EventContext = mpi_reply->EventContext;
+ list_add_tail(&delayed_event_ack->list,
+ &ioc->delayed_event_ack_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED: EVENT ACK: event (0x%04x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->Event)));
goto out;
}
@@ -1348,6 +1366,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+
_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
}
@@ -1797,9 +1816,10 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
- irq_set_affinity_hint(reply_q->vector, NULL);
- free_cpumask_var(reply_q->affinity_hint);
- synchronize_irq(reply_q->vector);
+ if (smp_affinity_enable) {
+ irq_set_affinity_hint(reply_q->vector, NULL);
+ free_cpumask_var(reply_q->affinity_hint);
+ }
free_irq(reply_q->vector, reply_q);
kfree(reply_q);
}
@@ -1829,9 +1849,12 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
reply_q->msix_index = index;
reply_q->vector = vector;
- if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
- return -ENOMEM;
- cpumask_clear(reply_q->affinity_hint);
+ if (smp_affinity_enable) {
+ if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
+ kfree(reply_q);
+ return -ENOMEM;
+ }
+ }
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
@@ -1845,6 +1868,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
if (r) {
pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
reply_q->name, vector);
+ free_cpumask_var(reply_q->affinity_hint);
kfree(reply_q);
return -EBUSY;
}
@@ -1894,16 +1918,17 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
for (i = 0 ; i < group ; i++) {
ioc->cpu_msix_table[cpu] = index;
- cpumask_or(reply_q->affinity_hint,
+ if (smp_affinity_enable)
+ cpumask_or(reply_q->affinity_hint,
reply_q->affinity_hint, get_cpu_mask(cpu));
cpu = cpumask_next(cpu, cpu_online_mask);
}
-
- if (irq_set_affinity_hint(reply_q->vector,
+ if (smp_affinity_enable)
+ if (irq_set_affinity_hint(reply_q->vector,
reply_q->affinity_hint))
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "error setting affinity hint for irq vector %d\n",
- ioc->name, reply_q->vector));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "Err setting affinity hint to irq vector %d\n",
+ ioc->name, reply_q->vector));
index++;
}
}
@@ -1961,6 +1986,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
} else if (max_msix_vectors == 0)
goto try_ioapic;
+ if (ioc->msix_vector_count < ioc->cpu_count)
+ smp_affinity_enable = 0;
+
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
@@ -2231,6 +2259,12 @@ mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
}
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
/**
* mpt3sas_base_get_smid - obtain a free smid from internal queue
* @ioc: per adapter object
@@ -2291,6 +2325,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
+ request->msix_io = _base_get_msix_index(ioc);
list_del(&request->tracker_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return smid;
@@ -2413,12 +2448,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
}
#endif
-static inline u8
-_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
-{
- return ioc->cpu_msix_table[raw_smp_processor_id()];
-}
-
/**
* mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
* @ioc: per adapter object
@@ -2472,18 +2501,19 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
* mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
* @ioc: per adapter object
* @smid: system request message index
- *
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
* Return nothing.
*/
void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 msix_task)
{
Mpi2RequestDescriptorUnion_t descriptor;
u64 *request = (u64 *)&descriptor;
descriptor.HighPriority.RequestFlags =
MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
- descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.MSIxIndex = msix_task;
descriptor.HighPriority.SMID = cpu_to_le16(smid);
descriptor.HighPriority.LMID = 0;
descriptor.HighPriority.Reserved1 = 0;
@@ -3185,20 +3215,35 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
}
ioc->shost->sg_tablesize = sg_tablesize;
- ioc->hi_priority_depth = facts->HighPriorityCredit;
- ioc->internal_depth = ioc->hi_priority_depth + (5);
+ ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
+ (facts->RequestCredit / 4));
+ if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
+ if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
+ INTERNAL_SCSIIO_CMDS_COUNT)) {
+ pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
+ Credits, it has just %d number of credits\n",
+ ioc->name, facts->RequestCredit);
+ return -ENOMEM;
+ }
+ ioc->internal_depth = 10;
+ }
+
+ ioc->hi_priority_depth = ioc->internal_depth - (5);
/* command line tunables for max controller queue depth */
if (max_queue_depth != -1 && max_queue_depth != 0) {
max_request_credit = min_t(u16, max_queue_depth +
- ioc->hi_priority_depth + ioc->internal_depth,
- facts->RequestCredit);
+ ioc->internal_depth, facts->RequestCredit);
if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
max_request_credit = MAX_HBA_QUEUE_DEPTH;
} else
max_request_credit = min_t(u16, facts->RequestCredit,
MAX_HBA_QUEUE_DEPTH);
- ioc->hba_queue_depth = max_request_credit;
+ /* Firmware maintains additional facts->HighPriorityCredit number of
+ * credits for HiPriprity Request messages, so hba queue depth will be
+ * sum of max_request_credit and high priority queue depth.
+ */
+ ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
/* request frame size */
ioc->request_sz = facts->IOCRequestFrameSize * 4;
@@ -3206,6 +3251,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* reply frame size */
ioc->reply_sz = facts->ReplyFrameSize * 4;
+ /* chain segment size */
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ if (facts->IOCMaxChainSegmentSize)
+ ioc->chain_segment_sz =
+ facts->IOCMaxChainSegmentSize *
+ MAX_CHAIN_ELEMT_SZ;
+ else
+ /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
+ ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
+ MAX_CHAIN_ELEMT_SZ;
+ } else
+ ioc->chain_segment_sz = ioc->request_sz;
+
/* calculate the max scatter element size */
sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
@@ -3217,7 +3275,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->max_sges_in_main_message = max_sge_elements/sge_size;
/* now do the same for a chain buffer */
- max_sge_elements = ioc->request_sz - sge_size;
+ max_sge_elements = ioc->chain_segment_sz - sge_size;
ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
/*
@@ -3245,7 +3303,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_post_queue_depth += 16 -
(ioc->reply_post_queue_depth % 16);
-
if (ioc->reply_post_queue_depth >
facts->MaxReplyDescriptorPostQueueDepth) {
ioc->reply_post_queue_depth =
@@ -3327,7 +3384,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* set the scsi host can_queue depth
* with some internal commands that could be outstanding
*/
- ioc->shost->can_queue = ioc->scsiio_depth;
+ ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"scsi host: can_queue depth (%d)\n",
ioc->name, ioc->shost->can_queue));
@@ -3354,8 +3411,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
goto out;
- retry_sz += 64;
- ioc->hba_queue_depth = max_request_credit - retry_sz;
+ retry_sz = 64;
+ ioc->hba_queue_depth -= retry_sz;
+ _base_release_memory_pools(ioc);
goto retry_allocation;
}
@@ -3410,7 +3468,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
goto out;
}
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
- ioc->request_sz, 16, 0);
+ ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
ioc->name);
@@ -3424,13 +3482,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
ioc->chain_depth = i;
goto chain_done;
}
- total_sz += ioc->request_sz;
+ total_sz += ioc->chain_segment_sz;
}
chain_done:
dinitprintk(ioc, pr_info(MPT3SAS_FMT
"chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->request_sz,
- ((ioc->chain_depth * ioc->request_sz))/1024));
+ ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
+ ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
/* initialize hi-priority queue smid's */
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
@@ -4291,6 +4349,10 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
facts->IOCRequestFrameSize =
le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ facts->IOCMaxChainSegmentSize =
+ le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
+ }
facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
ioc->shost->max_id = -1;
@@ -4968,15 +5030,16 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
{
- int r, i;
+ int r, i, index;
unsigned long flags;
u32 reply_address;
u16 smid;
struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct _sc_list *delayed_sc, *delayed_sc_next;
+ struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
u8 hide_flag;
struct adapter_reply_queue *reply_q;
- long reply_post_free;
- u32 reply_post_free_sz, index = 0;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -4995,6 +5058,18 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
kfree(delayed_tr);
}
+ list_for_each_entry_safe(delayed_sc, delayed_sc_next,
+ &ioc->delayed_sc_list, list) {
+ list_del(&delayed_sc->list);
+ kfree(delayed_sc);
+ }
+
+ list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
+ &ioc->delayed_event_ack_list, list) {
+ list_del(&delayed_event_ack->list);
+ kfree(delayed_event_ack);
+ }
+
/* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
INIT_LIST_HEAD(&ioc->free_list);
@@ -5048,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ index = 0;
+ reply_post_free_contig = ioc->reply_post[0].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable) {
+ reply_q->reply_post_free =
+ ioc->reply_post[index++].reply_post_free;
+ } else {
+ reply_q->reply_post_free = reply_post_free_contig;
+ reply_post_free_contig += ioc->reply_post_queue_depth;
+ }
+
reply_q->reply_post_host_index = 0;
- reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
- reply_post_free;
for (i = 0; i < ioc->reply_post_queue_depth; i++)
reply_q->reply_post_free[i].Words =
cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- /*
- * If RDPQ is enabled, switch to the next allocation.
- * Otherwise advance within the contiguous region.
- */
- if (ioc->rdpq_array_enable)
- reply_post_free = (long)
- ioc->reply_post[++index].reply_post_free;
- else
- reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
@@ -5226,6 +5301,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->build_zero_len_sge = &_base_build_zero_len_sge;
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
/*
* In SAS3.0,
* SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 5ad271efbd45..32580b514b18 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "09.102.00.00"
-#define MPT3SAS_MAJOR_VERSION 9
-#define MPT3SAS_MINOR_VERSION 102
+#define MPT3SAS_DRIVER_VERSION "12.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 12
+#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -122,11 +122,16 @@
#define NO_SLEEP 0
#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+/* reserved for issuing internally framed scsi io cmds */
+#define INTERNAL_SCSIIO_CMDS_COUNT 3
#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+#define MAX_CHAIN_ELEMT_SZ 16
+#define DEFAULT_NUM_FWCHAIN_ELEMTS 8
+
/*
* reset phases
*/
@@ -398,6 +403,7 @@ struct MPT3SAS_DEVICE {
u8 configured_lun;
u8 block;
u8 tlr_snoop_check;
+ u8 ignore_delay_remove;
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
@@ -643,6 +649,7 @@ struct chain_tracker {
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
* @tracker_list: list of free request (ioc->free_list)
+ * @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
@@ -651,6 +658,7 @@ struct scsiio_tracker {
u8 direct_io;
struct list_head chain_list;
struct list_head tracker_list;
+ u16 msix_io;
};
/**
@@ -676,6 +684,25 @@ struct _tr_list {
u16 state;
};
+/**
+ * struct _sc_list - delayed SAS_IO_UNIT_CONTROL message list
+ * @handle: device handle
+ */
+struct _sc_list {
+ struct list_head list;
+ u16 handle;
+};
+
+/**
+ * struct _event_ack_list - delayed event acknowledgment list
+ * @Event: Event ID
+ * @EventContext: used to track the event uniquely
+ */
+struct _event_ack_list {
+ struct list_head list;
+ u16 Event;
+ u32 EventContext;
+};
/**
* struct adapter_reply_queue - the reply queue struct
@@ -737,7 +764,7 @@ struct mpt3sas_facts {
u32 IOCCapabilities;
union mpi3_version_union FWVersion;
u16 IOCRequestFrameSize;
- u16 Reserved3;
+ u16 IOCMaxChainSegmentSize;
u16 MaxInitiators;
u16 MaxTargets;
u16 MaxSasExpanders;
@@ -884,6 +911,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @max_sges_in_chain_message: number sg elements per chain
* @chains_needed_per_io: max chains per io
* @chain_depth: total chains allocated
+ * @chain_segment_sz: gives the max number of
+ * SGEs accommodate on single chain buffer
* @hi_priority_smid:
* @hi_priority:
* @hi_priority_dma:
@@ -921,6 +950,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @replyPostRegisterIndex: index of next position in Reply Desc Post Queue
* @delayed_tr_list: target reset link list
* @delayed_tr_volume_list: volume target reset link list
+ * @delayed_sc_list:
+ * @delayed_event_ack_list:
* @temp_sensors_count: flag to carry the number of temperature sensors
* @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and
* pci resource handling. PCI resource freeing will lead to free
@@ -1089,6 +1120,7 @@ struct MPT3SAS_ADAPTER {
u16 max_sges_in_chain_message;
u16 chains_needed_per_io;
u32 chain_depth;
+ u16 chain_segment_sz;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1142,6 +1174,8 @@ struct MPT3SAS_ADAPTER {
struct list_head delayed_tr_list;
struct list_head delayed_tr_volume_list;
+ struct list_head delayed_sc_list;
+ struct list_head delayed_event_ack_list;
u8 temp_sensors_count;
struct mutex pci_access_mutex;
@@ -1213,7 +1247,8 @@ void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 msix_task);
void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void mpt3sas_base_initialize_callback_handler(void);
u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
@@ -1259,6 +1294,8 @@ void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address);
+u8 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index d8366b056b70..7d00f09666b6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -401,7 +401,8 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
Mpi2EventNotificationReply_t *mpi_reply;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ if (mpi_reply)
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
return 1;
}
@@ -410,7 +411,7 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
* @ioc: per adapter object
* @iocpp: The ioc pointer is returned in this.
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
- * MPI25_VERSION for mpt3ctl ioctl device.
+ * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*
* Return (-1) means error, else ioc_number.
*/
@@ -419,6 +420,7 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
int mpi_version)
{
struct MPT3SAS_ADAPTER *ioc;
+ int version = 0;
/* global ioc lock to protect controller on list operations */
spin_lock(&gioc_lock);
list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
@@ -427,8 +429,21 @@ _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
/* Check whether this ioctl command is from right
* ioctl device or not, if not continue the search.
*/
- if (ioc->hba_mpi_version_belonged != mpi_version)
- continue;
+ version = ioc->hba_mpi_version_belonged;
+ /* MPI25_VERSION and MPI26_VERSION uses same ioctl
+ * device.
+ */
+ if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
+ if ((version == MPI25_VERSION) ||
+ (version == MPI26_VERSION))
+ goto out;
+ else
+ continue;
+ } else {
+ if (version != mpi_version)
+ continue;
+ }
+out:
spin_unlock(&gioc_lock);
*iocpp = ioc;
return ioc_number;
@@ -817,7 +832,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
tm_request->DevHandle));
ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
data_in_dma, data_in_sz);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
break;
}
case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -1053,6 +1068,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
break;
@@ -2203,7 +2219,7 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
* @arg - user space data buffer
* @compat - handles 32 bit applications in 64bit os
* @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
- * MPI25_VERSION for mpt3ctl ioctl device.
+ * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
*/
static long
_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
@@ -2341,10 +2357,12 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long ret;
- /* pass MPI25_VERSION value, to indicate that this ioctl cmd
+ /* pass MPI25_VERSION | MPI26_VERSION value,
+ * to indicate that this ioctl cmd
* came from mpt3ctl ioctl device.
*/
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
+ MPI25_VERSION | MPI26_VERSION);
return ret;
}
@@ -2379,7 +2397,8 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
{
long ret;
- ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI25_VERSION);
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
+ MPI25_VERSION | MPI26_VERSION);
return ret;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 9ab77b06434d..e0e4920d0fa6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1589,10 +1589,16 @@ scsih_get_resync(struct device *dev)
percent_complete = 0;
out:
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+ break;
+ }
}
/**
@@ -1650,10 +1656,15 @@ scsih_get_state(struct device *dev)
break;
}
out:
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_state(mpt2sas_raid_template, dev, state);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_state(mpt3sas_raid_template, dev, state);
+ break;
+ }
}
/**
@@ -1682,12 +1693,17 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
break;
}
- if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
+ switch (ioc->hba_mpi_version_belonged) {
+ case MPI2_VERSION:
raid_set_level(mpt2sas_raid_template,
- &sdev->sdev_gendev, level);
- if (ioc->hba_mpi_version_belonged == MPI25_VERSION)
+ &sdev->sdev_gendev, level);
+ break;
+ case MPI25_VERSION:
+ case MPI26_VERSION:
raid_set_level(mpt3sas_raid_template,
- &sdev->sdev_gendev, level);
+ &sdev->sdev_gendev, level);
+ break;
+ }
}
@@ -1937,7 +1953,15 @@ scsih_slave_configure(struct scsi_device *sdev)
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
ssp_target = 1;
- ds = "SSP";
+ if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SEP) {
+ sdev_printk(KERN_WARNING, sdev,
+ "set ignore_delay_remove for handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ sas_device_priv_data->ignore_delay_remove = 1;
+ ds = "SES";
+ } else
+ ds = "SSP";
} else {
qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
@@ -2193,6 +2217,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
unsigned long timeleft;
struct scsiio_tracker *scsi_lookup = NULL;
int rc;
+ u16 msix_task = 0;
if (m_type == TM_MUTEX_ON)
mutex_lock(&ioc->tm_cmds.mutex);
@@ -2256,7 +2281,12 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
+ (scsi_lookup->msix_io < ioc->reply_queue_count))
+ msix_task = scsi_lookup->msix_io;
+ else
+ msix_task = 0;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2383,7 +2413,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
(unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot);
- if (sas_device->connector_name)
+ if (sas_device->connector_name[0] != '\0')
starget_printk(KERN_INFO, starget,
"enclosure level(0x%04x),connector name(%s)\n",
sas_device->enclosure_level,
@@ -2927,6 +2957,12 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
continue;
if (sas_device_priv_data->block)
continue;
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
}
@@ -2959,6 +2995,12 @@ _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
continue;
if (sas_device->pend_sas_rphy_add)
continue;
+ if (sas_device_priv_data->ignore_delay_remove) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s skip device_block for SES handle(0x%04x)\n",
+ __func__, sas_device_priv_data->sas_target->handle);
+ continue;
+ }
_scsih_internal_device_block(sdev, sas_device_priv_data);
}
@@ -3118,7 +3160,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
" slot(%d)\n", ioc->name, (unsigned long long)
sas_device->enclosure_logical_id,
sas_device->slot));
- if (sas_device->connector_name)
+ if (sas_device->connector_name[0] != '\0')
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"setting delete flag: enclosure level(0x%04x),"
" connector name( %s)\n", ioc->name,
@@ -3151,7 +3193,7 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
out:
@@ -3186,6 +3228,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
Mpi2SasIoUnitControlRequest_t *mpi_request;
u16 smid_sas_ctrl;
u32 ioc_state;
+ struct _sc_list *delayed_sc;
if (ioc->remove_host) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3228,9 +3271,16 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- return 1;
+ delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
+ if (!delayed_sc)
+ return _scsih_check_for_pending_tm(ioc, smid);
+ INIT_LIST_HEAD(&delayed_sc->list);
+ delayed_sc->handle = mpi_request_tm->DevHandle;
+ list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:sc:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return _scsih_check_for_pending_tm(ioc, smid);
}
dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3281,7 +3331,7 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
}
- return 1;
+ return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
/**
@@ -3332,7 +3382,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
- mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
}
/**
@@ -3388,6 +3438,142 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
return _scsih_check_for_pending_tm(ioc, smid);
}
+/**
+ * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @event: Event ID
+ * @event_context: used to track events uniquely
+ *
+ * Context - processed in interrupt context.
+ */
+void
+_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
+ u32 event_context)
+{
+ Mpi2EventAckRequest_t *ack_request;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ ioc->name, le16_to_cpu(event), smid,
+ ioc->base_cb_idx));
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = event;
+ ack_request->EventContext = event_context;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+}
+
+/**
+ * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
+ * sas_io_unit_ctrl messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Context - processed in interrupt context.
+ */
+void
+_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid, u16 handle)
+ {
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u32 ioc_state;
+ int i = smid - ioc->internal_smid;
+ unsigned long flags;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n",
+ __func__, ioc->name));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n",
+ __func__, ioc->name));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ /* Without releasing the smid just update the
+ * call back index and reuse the same smid for
+ * processing this delayed request
+ */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, le16_to_cpu(handle), smid,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = handle;
+ mpt3sas_base_put_smid_default(ioc, smid);
+}
+
+/**
+ * _scsih_check_for_pending_internal_cmds - check for pending internal messages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Context: Executed in interrupt context
+ *
+ * This will check delayed internal messages list, and process the
+ * next request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _sc_list *delayed_sc;
+ struct _event_ack_list *delayed_event_ack;
+
+ if (!list_empty(&ioc->delayed_event_ack_list)) {
+ delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
+ struct _event_ack_list, list);
+ _scsih_issue_delayed_event_ack(ioc, smid,
+ delayed_event_ack->Event, delayed_event_ack->EventContext);
+ list_del(&delayed_event_ack->list);
+ kfree(delayed_event_ack);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_sc_list)) {
+ delayed_sc = list_entry(ioc->delayed_sc_list.next,
+ struct _sc_list, list);
+ _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
+ delayed_sc->handle);
+ list_del(&delayed_sc->list);
+ kfree(delayed_sc);
+ return 0;
+ }
+ return 1;
+}
/**
* _scsih_check_for_pending_tm - check for pending task management
@@ -4084,6 +4270,9 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
desc_ioc_state = "eedp app tag error";
break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
+ desc_ioc_state = "insufficient power";
+ break;
default:
desc_ioc_state = "unknown";
break;
@@ -4609,6 +4798,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
case MPI2_IOCSTATUS_INVALID_STATE:
case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
default:
scmd->result = DID_SOFT_ERROR << 16;
break;
@@ -8391,7 +8581,8 @@ static struct raid_function_template mpt3sas_raid_functions = {
* @pdev: PCI device struct
*
* return MPI2_VERSION for SAS 2.0 HBA devices,
- * MPI25_VERSION for SAS 3.0 HBA devices.
+ * MPI25_VERSION for SAS 3.0 HBA devices, and
+ * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
*/
u16
_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
@@ -8423,6 +8614,17 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI25_MFGPAGE_DEVID_SAS3108_5:
case MPI25_MFGPAGE_DEVID_SAS3108_6:
return MPI25_VERSION;
+ case MPI26_MFGPAGE_DEVID_SAS3216:
+ case MPI26_MFGPAGE_DEVID_SAS3224:
+ case MPI26_MFGPAGE_DEVID_SAS3316_1:
+ case MPI26_MFGPAGE_DEVID_SAS3316_2:
+ case MPI26_MFGPAGE_DEVID_SAS3316_3:
+ case MPI26_MFGPAGE_DEVID_SAS3316_4:
+ case MPI26_MFGPAGE_DEVID_SAS3324_1:
+ case MPI26_MFGPAGE_DEVID_SAS3324_2:
+ case MPI26_MFGPAGE_DEVID_SAS3324_3:
+ case MPI26_MFGPAGE_DEVID_SAS3324_4:
+ return MPI26_VERSION;
}
return 0;
}
@@ -8456,7 +8658,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
* for other generation HBA's return with -ENODEV
*/
- if ((hbas_to_enumerate == 2) && (hba_mpi_version != MPI25_VERSION))
+ if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
+ || hba_mpi_version == MPI26_VERSION)))
return -ENODEV;
switch (hba_mpi_version) {
@@ -8478,6 +8681,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
break;
case MPI25_VERSION:
+ case MPI26_VERSION:
/* Use mpt3sas driver host template for SAS 3.0 HBA's */
shost = scsi_host_alloc(&mpt3sas_driver_template,
sizeof(struct MPT3SAS_ADAPTER));
@@ -8488,7 +8692,9 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->hba_mpi_version_belonged = hba_mpi_version;
ioc->id = mpt3_ids++;
sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
- if (pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION)
+ if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
+ pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
+ (ioc->hba_mpi_version_belonged == MPI26_VERSION))
ioc->msix96_vector = 1;
break;
default:
@@ -8533,6 +8739,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->raid_device_list);
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_sc_list);
+ INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
INIT_LIST_HEAD(&ioc->reply_queue_list);
@@ -8866,6 +9074,28 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
PCI_ANY_ID, PCI_ANY_ID },
+ /* Cutlass ~ 3216 and 3224 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Intruder ~ 3316 and 3324 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index ca36d7ea0964..6a84b82d71bb 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1418,7 +1418,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
unsigned long timeleft;
void *psge;
- u32 sgl_flags;
u8 issue_reset = 0;
void *data_out = NULL;
dma_addr_t data_out_dma;
@@ -1507,24 +1506,10 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
cpu_to_le16(sizeof(struct phy_error_log_request));
psge = &mpi_request->SGL;
- /* WRITE sgel first */
- sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
- MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
- sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- sizeof(struct phy_control_request), data_out_dma);
-
- /* incr sgel */
- psge += ioc->sge_size;
-
- /* READ sgel last */
- sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
- MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
- MPI2_SGE_FLAGS_END_OF_LIST);
- sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
- ioc->base_add_sg_single(psge, sgl_flags |
- sizeof(struct phy_control_reply), data_out_dma +
- sizeof(struct phy_control_request));
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_control_request),
+ data_out_dma + sizeof(struct phy_control_request),
+ sizeof(struct phy_control_reply));
dtransportprintk(ioc, pr_info(MPT3SAS_FMT
"phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
@@ -1615,7 +1600,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
SMP_PHY_CONTROL_LINK_RESET);
/* handle hba phys */
- memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
mpi_request.Operation = hard_reset ?
MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 02360de6b7e0..39285070f3b5 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev)
mvumi_flush_cache(mhba);
}
-static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mvumi_hba *mhba = NULL;
@@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int mvumi_resume(struct pci_dev *pdev)
+static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
{
int ret;
struct mvumi_hba *mhba = NULL;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index d8a2b5185f56..3b11aad03752 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -2006,9 +2006,8 @@ EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps);
*/
void osd_set_caps(struct osd_cdb *cdb, const void *caps)
{
- bool is_ver1 = true;
/* NOTE: They start at same address */
- memcpy(&cdb->v1.caps, caps, is_ver1 ? OSDv1_CAP_LEN : OSD_CAP_LEN);
+ memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN);
}
bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused)
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index ee00e27ba396..f6ad579280d4 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -37,6 +37,7 @@ typedef struct {
unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
unsigned int failed:1; /* Failure flag */
unsigned wanted:1; /* Parport sharing busy flag */
+ unsigned int dev_no; /* Device number */
wait_queue_head_t *waiting;
struct Scsi_Host *host;
struct list_head list;
@@ -985,15 +986,40 @@ static struct scsi_host_template ppa_template = {
static LIST_HEAD(ppa_hosts);
+/*
+ * Finds the first available device number that can be alloted to the
+ * new ppa device and returns the address of the previous node so that
+ * we can add to the tail and have a list in the ascending order.
+ */
+
+static inline ppa_struct *find_parent(void)
+{
+ ppa_struct *dev, *par = NULL;
+ unsigned int cnt = 0;
+
+ if (list_empty(&ppa_hosts))
+ return NULL;
+
+ list_for_each_entry(dev, &ppa_hosts, list) {
+ if (dev->dev_no != cnt)
+ return par;
+ cnt++;
+ par = dev;
+ }
+
+ return par;
+}
+
static int __ppa_attach(struct parport *pb)
{
struct Scsi_Host *host;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
DEFINE_WAIT(wait);
- ppa_struct *dev;
+ ppa_struct *dev, *temp;
int ports;
int modes, ppb, ppb_hi;
int err = -ENOMEM;
+ struct pardev_cb ppa_cb;
dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
if (!dev)
@@ -1002,8 +1028,15 @@ static int __ppa_attach(struct parport *pb)
dev->mode = PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
init_waitqueue_head(&waiting);
- dev->dev = parport_register_device(pb, "ppa", NULL, ppa_wakeup,
- NULL, 0, dev);
+ temp = find_parent();
+ if (temp)
+ dev->dev_no = temp->dev_no + 1;
+
+ memset(&ppa_cb, 0, sizeof(ppa_cb));
+ ppa_cb.private = dev;
+ ppa_cb.wakeup = ppa_wakeup;
+
+ dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no);
if (!dev->dev)
goto out;
@@ -1110,9 +1143,10 @@ static void ppa_detach(struct parport *pb)
}
static struct parport_driver ppa_driver = {
- .name = "ppa",
- .attach = ppa_attach,
- .detach = ppa_detach,
+ .name = "ppa",
+ .match_port = ppa_attach,
+ .detach = ppa_detach,
+ .devmodel = true,
};
static int __init ppa_driver_init(void)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6992ebc50c87..4dc06a13cab8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -272,8 +272,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
iter = (uint32_t *)buf;
chksum = 0;
- for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
- chksum += le32_to_cpu(*iter++);
+ for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
+ chksum += le32_to_cpu(*iter);
chksum = ~chksum + 1;
*iter = cpu_to_le32(chksum);
} else {
@@ -562,6 +562,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr;
if (unlikely(pci_channel_offline(ha->pdev)))
return -EAGAIN;
@@ -569,9 +570,16 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EINVAL;
- if (IS_NOCACHE_VPD_TYPE(ha))
- ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+ if (IS_NOCACHE_VPD_TYPE(ha)) {
+ faddr = ha->flt_region_vpd << 2;
+
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_vpd_sec << 2;
+
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size);
+ }
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -1909,7 +1917,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
+ stats = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
if (stats == NULL) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
@@ -1957,7 +1966,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
do_div(pfc_host_stat->seconds_since_last_reset, HZ);
done_free:
- dma_pool_free(ha->s_dma_pool, stats, stats_dma);
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+ stats, stats_dma);
done:
return pfc_host_stat;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c26acde797f0..392c147d5793 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2107,6 +2107,195 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
}
static int
+qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_flash_update_caps cap;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+ cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+ (uint64_t)ha->fw_attributes_ext[0] << 32 |
+ (uint64_t)ha->fw_attributes_h << 16 |
+ (uint64_t)ha->fw_attributes;
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t online_fw_attr = 0;
+ struct qla_flash_update_caps cap;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
+
+ online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
+ (uint64_t)ha->fw_attributes_ext[0] << 32 |
+ (uint64_t)ha->fw_attributes_h << 16 |
+ (uint64_t)ha->fw_attributes;
+
+ if (online_fw_attr != cap.capabilities) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_INVALID_PARAM;
+ return -EINVAL;
+ }
+
+ if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_INVALID_PARAM;
+ return -EINVAL;
+ }
+
+ bsg_job->reply->reply_payload_rcv_len = 0;
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+ EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_bbcr_data bbcr;
+ uint16_t loop_id, topo, sw_cap;
+ uint8_t domain, area, al_pa, state;
+ int rval;
+
+ if (!(IS_QLA27XX(ha)))
+ return -EPERM;
+
+ memset(&bbcr, 0, sizeof(bbcr));
+
+ if (vha->flags.bbcr_enable)
+ bbcr.status = QLA_BBCR_STATUS_ENABLED;
+ else
+ bbcr.status = QLA_BBCR_STATUS_DISABLED;
+
+ if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
+ rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
+ &area, &domain, &topo, &sw_cap);
+ if (rval != QLA_SUCCESS) {
+ bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
+ bbcr.state = QLA_BBCR_STATE_OFFLINE;
+ bbcr.mbx1 = loop_id;
+ goto done;
+ }
+
+ state = (vha->bbcr >> 12) & 0x1;
+
+ if (state) {
+ bbcr.state = QLA_BBCR_STATE_OFFLINE;
+ bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
+ } else {
+ bbcr.state = QLA_BBCR_STATE_ONLINE;
+ bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
+ }
+
+ bbcr.configured_bbscn = vha->bbcr & 0xf;
+ }
+
+done:
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+ return 0;
+}
+
+static int
+qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = bsg_job->shost;
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct link_statistics *stats = NULL;
+ dma_addr_t stats_dma;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
+ if (qla2x00_reset_active(vha))
+ goto done;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ goto done;
+
+ stats = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct link_statistics), &stats_dma, GFP_KERNEL);
+ if (!stats) {
+ ql_log(ql_log_warn, vha, 0x70e2,
+ "Failed to allocate memory for stats.\n");
+ goto done;
+ }
+
+ memset(stats, 0, sizeof(struct link_statistics));
+
+ rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
+
+ if (rval != QLA_SUCCESS)
+ goto done_free;
+
+ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
+ (uint8_t *)stats, sizeof(struct link_statistics));
+
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, stats, sizeof(struct link_statistics));
+ bsg_job->reply->reply_payload_rcv_len = sizeof(struct link_statistics);
+
+ bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ bsg_job->reply->result = DID_OK << 16;
+ bsg_job->job_done(bsg_job);
+
+done_free:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
+ stats, stats_dma);
+done:
+ return rval;
+}
+
+static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -2161,6 +2350,18 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
case QL_VND_SERDES_OP_EX:
return qla8044_serdes_op(bsg_job);
+ case QL_VND_GET_FLASH_UPDATE_CAPS:
+ return qla27xx_get_flash_upd_cap(bsg_job);
+
+ case QL_VND_SET_FLASH_UPDATE_CAPS:
+ return qla27xx_set_flash_upd_cap(bsg_job);
+
+ case QL_VND_GET_BBCR_DATA:
+ return qla27xx_get_bbcr_data(bsg_job);
+
+ case QL_VND_GET_PRIV_STATS:
+ return qla2x00_get_priv_stats(bsg_job);
+
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index d38f9efa56fa..c80192d45536 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -25,6 +25,10 @@
#define QL_VND_FX00_MGMT_CMD 0x12
#define QL_VND_SERDES_OP 0x13
#define QL_VND_SERDES_OP_EX 0x14
+#define QL_VND_GET_FLASH_UPDATE_CAPS 0x15
+#define QL_VND_SET_FLASH_UPDATE_CAPS 0x16
+#define QL_VND_GET_BBCR_DATA 0x17
+#define QL_VND_GET_PRIV_STATS 0x18
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -232,4 +236,34 @@ struct qla_serdes_reg_ex {
uint32_t val;
} __packed;
+struct qla_flash_update_caps {
+ uint64_t capabilities;
+ uint32_t outage_duration;
+ uint8_t reserved[20];
+} __packed;
+
+/* BB_CR Status */
+#define QLA_BBCR_STATUS_DISABLED 0
+#define QLA_BBCR_STATUS_ENABLED 1
+#define QLA_BBCR_STATUS_UNKNOWN 2
+
+/* BB_CR State */
+#define QLA_BBCR_STATE_OFFLINE 0
+#define QLA_BBCR_STATE_ONLINE 1
+
+/* BB_CR Offline Reason Code */
+#define QLA_BBCR_REASON_PORT_SPEED 1
+#define QLA_BBCR_REASON_PEER_PORT 2
+#define QLA_BBCR_REASON_SWITCH 3
+#define QLA_BBCR_REASON_LOGIN_REJECT 4
+
+struct qla_bbcr_data {
+ uint8_t status; /* 1 - enabled, 0 - Disabled */
+ uint8_t state; /* 1 - online, 0 - offline */
+ uint8_t configured_bbscn; /* 0-15 */
+ uint8_t negotiated_bbscn; /* 0-15 */
+ uint8_t offline_reason_code;
+ uint16_t mbx1; /* Port state */
+ uint8_t reserved[9];
+} __packed;
#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cd0d94ea7f74..b64c504ff12f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x017f | 0x0146 |
+ * | Module Init and Probe | 0x018f | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e-0x0170 |
* | Mailbox commands | 0x1192 | |
@@ -27,11 +27,12 @@
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
* | Async Events | 0x5089 | 0x502b-0x502f |
+ * | | | 0x505e |
* | | | 0x5084,0x5075 |
* | | | 0x503d,0x5044 |
* | | | 0x507b,0x505f |
* | Timer Routines | 0x6012 | |
- * | User Space Interactions | 0x70e65 | 0x7018,0x702e |
+ * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
* | | | 0x7020,0x7024 |
* | | | 0x7039,0x7045 |
* | | | 0x7073-0x7075 |
@@ -293,8 +294,8 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
WRT_REG_DWORD(&reg->iobase_addr, iobase);
dmp_reg = &reg->iobase_window;
- while (count--)
- *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for ( ; count--; dmp_reg++)
+ *buf++ = htonl(RD_REG_DWORD(dmp_reg));
return buf;
}
@@ -456,8 +457,8 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
{
uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
- while (count--)
- *buf++ = htons(RD_REG_WORD(dmp_reg++));
+ for ( ; count--; dmp_reg++)
+ *buf++ = htons(RD_REG_WORD(dmp_reg));
}
static inline void *
@@ -732,16 +733,18 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2300.req_q_in;
- for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
- fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2300.mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->ctrl_status, 0x40);
qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
@@ -751,8 +754,9 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
+ cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -895,25 +899,25 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
}
if (rval == QLA_SUCCESS) {
dmp_reg = &reg->flash_address;
- for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
- fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
+ fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
dmp_reg = &reg->u.isp2100.mailbox0;
- for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+ for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
if (cnt == 8)
dmp_reg = &reg->u_end.isp2200.mailbox8;
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
}
dmp_reg = &reg->u.isp2100.unused_2[0];
- for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
- fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
+ fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->ctrl_status, 0x00);
dmp_reg = &reg->risc_hw;
- for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
- fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
+ fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
WRT_REG_WORD(&reg->pcr, 0x2000);
qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
@@ -1095,8 +1099,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1128,8 +1132,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1167,20 +1171,20 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1358,8 +1362,10 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -1368,8 +1374,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1417,8 +1423,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1481,20 +1487,20 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -1679,8 +1685,10 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -1689,8 +1697,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -1738,8 +1746,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -1802,20 +1810,20 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
@@ -2022,8 +2030,10 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
RD_REG_DWORD(&reg->iobase_addr);
WRT_REG_DWORD(&reg->iobase_window, 0x01);
dmp_reg = &reg->iobase_c4;
- fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
- fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
+ fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
+ fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
+ dmp_reg++;
fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
@@ -2032,8 +2042,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Host interface registers. */
dmp_reg = &reg->flash_addr;
- for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
- fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
+ fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
/* Disable interrupts. */
WRT_REG_DWORD(&reg->ictrl, 0);
@@ -2081,8 +2091,8 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */
mbx_reg = &reg->mailbox0;
- for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
- fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
+ for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+ fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */
iter_reg = fw->xseq_gp_reg;
@@ -2177,20 +2187,20 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
iter_reg = fw->req0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->resp0_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
iter_reg = fw->req1_dma_reg;
iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
dmp_reg = &reg->iobase_q;
- for (cnt = 0; cnt < 7; cnt++)
- *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
+ for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
+ *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
/* Transmit DMA registers. */
iter_reg = fw->xmt0_dma_reg;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9872f3429e53..47f8b9b49bac 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1060,6 +1060,12 @@ struct mbx_cmd_32 {
#define FSTATE_FATAL_ERROR 4
#define FSTATE_LOOP_BACK_CONN 5
+#define QLA27XX_IMG_STATUS_VER_MAJOR 0x01
+#define QLA27XX_IMG_STATUS_VER_MINOR 0x00
+#define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE
+#define QLA27XX_PRIMARY_IMAGE 1
+#define QLA27XX_SECONDARY_IMAGE 2
+
/*
* Port Database structure definition
* Little endian except where noted.
@@ -1248,13 +1254,41 @@ struct link_statistics {
uint32_t inval_xmit_word_cnt;
uint32_t inval_crc_cnt;
uint32_t lip_cnt;
- uint32_t unused1[0x1a];
+ uint32_t link_up_cnt;
+ uint32_t link_down_loop_init_tmo;
+ uint32_t link_down_los;
+ uint32_t link_down_loss_rcv_clk;
+ uint32_t reserved0[5];
+ uint32_t port_cfg_chg;
+ uint32_t reserved1[11];
+ uint32_t rsp_q_full;
+ uint32_t atio_q_full;
+ uint32_t drop_ae;
+ uint32_t els_proto_err;
+ uint32_t reserved2;
uint32_t tx_frames;
uint32_t rx_frames;
uint32_t discarded_frames;
uint32_t dropped_frames;
- uint32_t unused2[1];
+ uint32_t reserved3;
uint32_t nos_rcvd;
+ uint32_t reserved4[4];
+ uint32_t tx_prjt;
+ uint32_t rcv_exfail;
+ uint32_t rcv_abts;
+ uint32_t seq_frm_miss;
+ uint32_t corr_err;
+ uint32_t mb_rqst;
+ uint32_t nport_full;
+ uint32_t eofa;
+ uint32_t reserved5;
+ uint32_t fpm_recv_word_cnt_lo;
+ uint32_t fpm_recv_word_cnt_hi;
+ uint32_t fpm_disc_word_cnt_lo;
+ uint32_t fpm_disc_word_cnt_hi;
+ uint32_t fpm_xmit_word_cnt_lo;
+ uint32_t fpm_xmit_word_cnt_hi;
+ uint32_t reserved6[70];
};
/*
@@ -2929,6 +2963,7 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE];
+ struct dentry *dfs_tgt_sess;
struct list_head q_full_list;
uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc;
@@ -3433,14 +3468,20 @@ struct qla_hw_data {
uint32_t flt_region_flt;
uint32_t flt_region_fdt;
uint32_t flt_region_boot;
+ uint32_t flt_region_boot_sec;
uint32_t flt_region_fw;
+ uint32_t flt_region_fw_sec;
uint32_t flt_region_vpd_nvram;
uint32_t flt_region_vpd;
+ uint32_t flt_region_vpd_sec;
uint32_t flt_region_nvram;
uint32_t flt_region_npiv_conf;
uint32_t flt_region_gold_fw;
uint32_t flt_region_fcp_prio;
uint32_t flt_region_bootload;
+ uint32_t flt_region_img_status_pri;
+ uint32_t flt_region_img_status_sec;
+ uint8_t active_image;
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -3571,6 +3612,7 @@ typedef struct scsi_qla_host {
uint32_t delete_progress:1;
uint32_t fw_tgt_reported:1;
+ uint32_t bbcr_enable:1;
} flags;
atomic_t loop_state;
@@ -3703,8 +3745,19 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
+ uint16_t bbcr;
} scsi_qla_host_t;
+struct qla27xx_image_status {
+ uint8_t image_status_mask;
+ uint16_t generation_number;
+ uint8_t reserved[3];
+ uint8_t ver_minor;
+ uint8_t ver_major;
+ uint32_t checksum;
+ uint32_t signature;
+} __packed;
+
#define SET_VP_IDX 1
#define SET_AL_PA 2
#define RESET_VP_IDX 3
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index cd8b96a4b0dd..34272fde8a5b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -13,6 +13,47 @@ static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
static int
+qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ struct qla_tgt_sess *sess = NULL;
+ struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+
+ seq_printf(s, "%s\n",vha->host_str);
+ if (tgt) {
+ seq_printf(s, "Port ID Port Name Handle\n");
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
+ sess->s_id.b.domain,sess->s_id.b.area,
+ sess->s_id.b.al_pa, sess->port_name,
+ sess->loop_id);
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ }
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+ return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
+}
+
+
+static const struct file_operations dfs_tgt_sess_ops = {
+ .open = qla2x00_dfs_tgt_sess_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
@@ -248,6 +289,15 @@ create_nodes:
"Unable to create debugfs fce node.\n");
goto out;
}
+
+ ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+ if (!ha->tgt.dfs_tgt_sess) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to create debugFS tgt_sess node.\n");
+ goto out;
+ }
+
out:
return 0;
}
@@ -257,6 +307,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (ha->tgt.dfs_tgt_sess) {
+ debugfs_remove(ha->tgt.dfs_tgt_sess);
+ ha->tgt.dfs_tgt_sess = NULL;
+ }
+
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 42bb357bf56b..4c0f3a774799 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1288,7 +1288,9 @@ struct vp_rpt_id_entry_24xx {
uint8_t vp_idx_map[16];
- uint8_t reserved_4[32];
+ uint8_t reserved_4[28];
+ uint16_t bbcr;
+ uint8_t reserved_5[6];
};
#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
@@ -1393,6 +1395,16 @@ struct qla_flt_header {
#define FLT_REG_FCOE_NVRAM_0 0xAA
#define FLT_REG_FCOE_NVRAM_1 0xAC
+/* 27xx */
+#define FLT_REG_IMG_PRI_27XX 0x95
+#define FLT_REG_IMG_SEC_27XX 0x96
+#define FLT_REG_FW_SEC_27XX 0x02
+#define FLT_REG_BOOTLOAD_SEC_27XX 0x9
+#define FLT_REG_VPD_SEC_27XX_0 0x50
+#define FLT_REG_VPD_SEC_27XX_1 0x52
+#define FLT_REG_VPD_SEC_27XX_2 0xD8
+#define FLT_REG_VPD_SEC_27XX_3 0xDA
+
struct qla_flt_region {
uint32_t code;
uint32_t size;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0103e468e357..fe943772fe7b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -90,6 +90,7 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
extern int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
+extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
/*
* Global Data in qla_os.c source file.
@@ -121,6 +122,7 @@ extern int ql2xmdcapmask;
extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
+extern int ql2xfwholdabts;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 692a7570b5e1..c56cdb35f3ed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -157,8 +157,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0x2072,
"Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
@@ -2062,6 +2066,10 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
if (IS_P3P_TYPE(ha))
return;
+ /* Hold status IOCBs until ABTS response received. */
+ if (ql2xfwholdabts)
+ ha->fw_options[3] |= BIT_12;
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -2844,7 +2852,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (nv->login_timeout < 4)
nv->login_timeout = 4;
ha->login_timeout = nv->login_timeout;
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -5122,8 +5129,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
dptr = (uint32_t *)nv;
ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
ha->nvram_size);
- for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
- chksum += le32_to_cpu(*dptr++);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+ chksum += le32_to_cpu(*dptr);
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
"Contents of NVRAM\n");
@@ -5274,7 +5281,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -5346,6 +5352,93 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
+uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+{
+ struct qla27xx_image_status pri_image_status, sec_image_status;
+ uint8_t valid_pri_image, valid_sec_image;
+ uint32_t *wptr;
+ uint32_t cnt, chksum, size;
+ struct qla_hw_data *ha = vha->hw;
+
+ valid_pri_image = valid_sec_image = 1;
+ ha->active_image = 0;
+ size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t);
+
+ if (!ha->flt_region_img_status_pri) {
+ valid_pri_image = 0;
+ goto check_sec_image;
+ }
+
+ qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+ ha->flt_region_img_status_pri, size);
+
+ if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary image signature (0x%x) not valid\n",
+ pri_image_status.signature);
+ valid_pri_image = 0;
+ goto check_sec_image;
+ }
+
+ wptr = (uint32_t *)(&pri_image_status);
+ cnt = size;
+
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le32_to_cpu(*wptr);
+ if (chksum) {
+ ql_dbg(ql_dbg_init, vha, 0x018c,
+ "Checksum validation failed for primary image (0x%x)\n",
+ chksum);
+ valid_pri_image = 0;
+ }
+
+check_sec_image:
+ if (!ha->flt_region_img_status_sec) {
+ valid_sec_image = 0;
+ goto check_valid_image;
+ }
+
+ qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+ ha->flt_region_img_status_sec, size);
+
+ if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Secondary image signature(0x%x) not valid\n",
+ sec_image_status.signature);
+ valid_sec_image = 0;
+ goto check_valid_image;
+ }
+
+ wptr = (uint32_t *)(&sec_image_status);
+ cnt = size;
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le32_to_cpu(*wptr);
+ if (chksum) {
+ ql_dbg(ql_dbg_init, vha, 0x018e,
+ "Checksum validation failed for secondary image (0x%x)\n",
+ chksum);
+ valid_sec_image = 0;
+ }
+
+check_valid_image:
+ if (valid_pri_image && (pri_image_status.image_status_mask & 0x1))
+ ha->active_image = QLA27XX_PRIMARY_IMAGE;
+ if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) {
+ if (!ha->active_image ||
+ pri_image_status.generation_number <
+ sec_image_status.generation_number)
+ ha->active_image = QLA27XX_SECONDARY_IMAGE;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n",
+ ha->active_image == 0 ? "default bootld and fw" :
+ ha->active_image == 1 ? "primary" :
+ ha->active_image == 2 ? "secondary" :
+ "Invalid");
+
+ return ha->active_image;
+}
+
static int
qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
uint32_t faddr)
@@ -5368,6 +5461,10 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
dcode = (uint32_t *)req->ring;
*srisc_addr = 0;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
+
/* Validate firmware image by checking version. */
qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
for (i = 0; i < 4; i++)
@@ -6068,8 +6165,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
dptr = (uint32_t *)nv;
- for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
- chksum += le32_to_cpu(*dptr++);
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+ chksum += le32_to_cpu(*dptr);
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
"Contents of NVRAM:\n");
@@ -6231,7 +6328,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (le16_to_cpu(nv->login_timeout) < 4)
nv->login_timeout = cpu_to_le16(4);
ha->login_timeout = le16_to_cpu(nv->login_timeout);
- icb->login_timeout = nv->login_timeout;
/* Set minimum RATOV to 100 tenths of a second. */
ha->r_a_tov = 100;
@@ -6413,12 +6509,17 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ /* Hold status IOCBs until ABTS response received. */
+ if (ql2xfwholdabts)
+ ha->fw_options[3] |= BIT_12;
+
if (!ql2xetsenable)
- return;
+ goto out;
/* Enable ETS Burst. */
memset(ha->fw_options, 0, sizeof(ha->fw_options));
ha->fw_options[2] |= BIT_9;
+out:
qla2x00_set_fw_options(vha, ha->fw_options);
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index a6b7f1588aa4..edc48f3b8230 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -87,8 +87,8 @@ host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
__le32 *odest = (__le32 *) dst;
uint32_t iter = bsize >> 2;
- for (; iter ; iter--)
- *odest++ = cpu_to_le32(*isrc++);
+ for ( ; iter--; isrc++)
+ *odest++ = cpu_to_le32(*isrc);
}
static inline void
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 4af95479a9db..5649c200d37c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -934,10 +934,6 @@ skip_rio:
break;
global_port_update:
- /* Port unavailable. */
- ql_log(ql_log_warn, vha, 0x505e,
- "Link is offline.\n");
-
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 87e6758302f6..968b84613096 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1349,6 +1349,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+ if (IS_QLA27XX(vha->hw))
+ mcp->in_mb |= MBX_15;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
@@ -1400,6 +1402,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
wwn_to_u64(vha->port_name));
}
}
+
+ if (IS_QLA27XX(vha->hw))
+ vha->bbcr = mcp->mb[15];
}
return rval;
@@ -2754,7 +2759,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *siter, *diter, dwords;
+ uint32_t *iter, dwords;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
@@ -2794,10 +2799,11 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
/* Copy over data -- firmware data is LE. */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
"Done %s.\n", __func__);
- dwords = offsetof(struct link_statistics, unused1) / 4;
- siter = diter = &stats->link_fail_cnt;
- while (dwords--)
- *diter++ = le32_to_cpu(*siter++);
+ dwords = offsetof(struct link_statistics,
+ link_up_cnt) / 4;
+ iter = &stats->link_fail_cnt;
+ for ( ; dwords--; iter++)
+ le32_to_cpus(iter);
}
} else {
/* Failed. */
@@ -2814,7 +2820,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- uint32_t *siter, *diter, dwords;
+ uint32_t *iter, dwords;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
@@ -2843,9 +2849,9 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
"Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
- siter = diter = &stats->link_fail_cnt;
- while (dwords--)
- *diter++ = le32_to_cpu(*siter++);
+ iter = &stats->link_fail_cnt;
+ for ( ; dwords--; iter++)
+ le32_to_cpus(iter);
}
} else {
/* Failed. */
@@ -3612,6 +3618,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+ /* buffer to buffer credit flag */
+ vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0;
+
/* FA-WWN is only for physical port */
if (!vp_idx) {
void *wwpn = ha->init_cb->port_name;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f6c7ce35b542..7c0b60ca158f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -233,6 +233,13 @@ MODULE_PARM_DESC(ql2xexchoffld,
"Number of exchanges to offload. "
"0 (Default)- Disabled.");
+int ql2xfwholdabts = 0;
+module_param(ql2xfwholdabts, int, S_IRUGO);
+MODULE_PARM_DESC(ql2xfwholdabts,
+ "Allow FW to hold status IOCB until ABTS rsp received. "
+ "0 (Default) Do not set fw option. "
+ "1 - Set fw option to hold ABTS.");
+
/*
* SCSI host template entry points
*/
@@ -2216,6 +2223,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2271:
@@ -2223,6 +2231,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
case PCI_DEVICE_ID_QLOGIC_ISP2261:
@@ -2230,6 +2239,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
ha->device_type |= DT_ZIO_SUPPORTED;
ha->device_type |= DT_FWI2;
ha->device_type |= DT_IIDMA;
+ ha->device_type |= DT_T10_PI;
ha->fw_srisc_address = RISC_START_ADDRESS_2400;
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3272ed5bbcc7..5e9392316425 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -610,8 +610,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
wptr = (uint16_t *)req->ring;
cnt = sizeof(struct qla_flt_location) >> 1;
- for (chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
@@ -702,8 +702,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
}
cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
- for (chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ for (chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_log(ql_log_fatal, vha, 0x0048,
"Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
@@ -846,6 +846,38 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (ha->port_no == 1)
ha->flt_region_nvram = start;
break;
+ case FLT_REG_IMG_PRI_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_img_status_pri = start;
+ break;
+ case FLT_REG_IMG_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_img_status_sec = start;
+ break;
+ case FLT_REG_FW_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_fw_sec = start;
+ break;
+ case FLT_REG_BOOTLOAD_SEC_27XX:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_boot_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_0:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_1:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_2:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_3:
+ if (IS_QLA27XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
}
}
goto done;
@@ -898,9 +930,8 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
fdt->sig[3] != 'D')
goto no_flash_data;
- for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
- cnt++)
- chksum += le16_to_cpu(*wptr++);
+ for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_dbg(ql_dbg_init, vha, 0x004c,
"Inconsistent FDT detected:"
@@ -995,7 +1026,8 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
} else {
- ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr++);
+ ha->fcoe_dev_init_timeout = le32_to_cpu(*wptr);
+ wptr++;
ha->fcoe_reset_timeout = le32_to_cpu(*wptr);
}
ql_dbg(ql_dbg_init, vha, 0x004e,
@@ -1072,10 +1104,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)data,
ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
- cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
- sizeof(struct qla_npiv_entry)) >> 1;
- for (wptr = data, chksum = 0; cnt; cnt--)
- chksum += le16_to_cpu(*wptr++);
+ cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1;
+ for (wptr = data, chksum = 0; cnt--; wptr++)
+ chksum += le16_to_cpu(*wptr);
if (chksum) {
ql_dbg(ql_dbg_user, vha, 0x7092,
"Inconsistent NPIV-Config "
@@ -2989,6 +3020,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
uint8_t code_type, last_image;
int i;
struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr = 0;
+
+ pcihdr = pcids = 0;
if (IS_P3P_TYPE(ha))
return ret;
@@ -3002,9 +3036,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
-
- /* Begin with first PCI expansion ROM header. */
pcihdr = ha->flt_region_boot << 2;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ pcihdr = ha->flt_region_boot_sec << 2;
+
last_image = 1;
do {
/* Verify PCI expansion ROM header. */
@@ -3077,8 +3113,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Read firmware image information. */
memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
dcode = mbuf;
+ faddr = ha->flt_region_fw;
+ if (IS_QLA27XX(ha) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
- qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
+ qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
for (i = 0; i < 4; i++)
dcode[i] = be32_to_cpu(dcode[i]);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index ee967becd257..8a44d1541eb4 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -641,7 +641,8 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
{
struct scsi_qla_host *vha = sess->vha;
- vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+ if (sess->se_sess)
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
if (!list_empty(&sess->del_list_entry))
list_del_init(&sess->del_list_entry);
@@ -856,8 +857,12 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
sess);
- ha->tgt.tgt_ops->shutdown_sess(sess);
- ha->tgt.tgt_ops->put_sess(sess);
+ if (sess->se_sess) {
+ ha->tgt.tgt_ops->shutdown_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ } else {
+ qlt_unreg_sess(sess);
+ }
} else {
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - elapsed);
@@ -879,7 +884,6 @@ static struct qla_tgt_sess *qlt_create_sess(
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
unsigned long flags;
- unsigned char be_sid[3];
/* Check to avoid double sessions */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -905,6 +909,14 @@ static struct qla_tgt_sess *qlt_create_sess(
if (sess->deleted)
qlt_undelete_sess(sess);
+ if (!sess->se_sess) {
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &sess->port_name[0], sess) < 0) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return NULL;
+ }
+ }
+
kref_get(&sess->se_sess->sess_kref);
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
(fcport->flags & FCF_CONF_COMP_SUPPORTED));
@@ -948,26 +960,6 @@ static struct qla_tgt_sess *qlt_create_sess(
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
sess, vha->vha_tgt.qla_tgt);
- be_sid[0] = sess->s_id.b.domain;
- be_sid[1] = sess->s_id.b.area;
- be_sid[2] = sess->s_id.b.al_pa;
- /*
- * Determine if this fc_port->port_name is allowed to access
- * target mode using explict NodeACLs+MappedLUNs, or using
- * TPG demo mode. If this is successful a target mode FC nexus
- * is created.
- */
- if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
- &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
- kfree(sess);
- return NULL;
- }
- /*
- * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
- * access across ->tgt.sess_lock reaquire.
- */
- kref_get(&sess->se_sess->sess_kref);
-
sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
@@ -985,6 +977,23 @@ static struct qla_tgt_sess *qlt_create_sess(
fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
+ /*
+ * Determine if this fc_port->port_name is allowed to access
+ * target mode using explict NodeACLs+MappedLUNs, or using
+ * TPG demo mode. If this is successful a target mode FC nexus
+ * is created.
+ */
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &fcport->port_name[0], sess) < 0) {
+ return NULL;
+ } else {
+ /*
+ * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+ * access across ->tgt.sess_lock reaquire.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
return sess;
}
@@ -1872,15 +1881,17 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
else
vha->req->cnt = vha->req->length -
(vha->req->ring_index - cnt);
- }
- if (unlikely(vha->req->cnt < (req_cnt + 2))) {
- ql_dbg(ql_dbg_io, vha, 0x305a,
- "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
- vha->vp_idx, vha->req->ring_index,
- vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
- return -EAGAIN;
+ if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+ ql_dbg(ql_dbg_io, vha, 0x305a,
+ "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
+ vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt, cnt, cnt_in,
+ vha->req->length);
+ return -EAGAIN;
+ }
}
+
vha->req->cnt -= req_cnt;
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 22a6a767fe07..d857feeb6514 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -731,7 +731,7 @@ struct qla_tgt_func_tmpl {
void (*free_session)(struct qla_tgt_sess *);
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
- void *, uint8_t *, uint16_t);
+ struct qla_tgt_sess *);
void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6d31faa8c57b..0bc93fa46dae 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.26-k"
+#define QLA2XXX_VERSION "8.07.00.33-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 1808a01cfb7e..c1461d225f08 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1406,6 +1406,39 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
transport_deregister_session(sess->se_sess);
}
+static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ struct qla_hw_data *ha = lport->qla_vha->hw;
+ struct se_node_acl *se_nacl = se_sess->se_node_acl;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ struct qla_tgt_sess *qlat_sess = p;
+ uint16_t loop_id = qlat_sess->loop_id;
+ unsigned long flags;
+ unsigned char be_sid[3];
+
+ be_sid[0] = qlat_sess->s_id.b.domain;
+ be_sid[1] = qlat_sess->s_id.b.area;
+ be_sid[2] = qlat_sess->s_id.b.al_pa;
+
+ /*
+ * And now setup se_nacl and session pointers into HW lport internal
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl,
+ se_sess, qlat_sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl,
+ se_sess, qlat_sess, loop_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return 0;
+}
+
/*
* Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
* to locate struct se_node_acl
@@ -1413,20 +1446,13 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
static int tcm_qla2xxx_check_initiator_node_acl(
scsi_qla_host_t *vha,
unsigned char *fc_wwpn,
- void *qla_tgt_sess,
- uint8_t *s_id,
- uint16_t loop_id)
+ struct qla_tgt_sess *qlat_sess)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
struct tcm_qla2xxx_tpg *tpg;
- struct tcm_qla2xxx_nacl *nacl;
- struct se_portal_group *se_tpg;
- struct se_node_acl *se_nacl;
struct se_session *se_sess;
- struct qla_tgt_sess *sess = qla_tgt_sess;
unsigned char port_name[36];
- unsigned long flags;
int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count :
TCM_QLA2XXX_DEFAULT_TAGS;
@@ -1444,15 +1470,6 @@ static int tcm_qla2xxx_check_initiator_node_acl(
pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
return -EINVAL;
}
- se_tpg = &tpg->se_tpg;
-
- se_sess = transport_init_session_tags(num_tags,
- sizeof(struct qla_tgt_cmd),
- TARGET_PROT_ALL);
- if (IS_ERR(se_sess)) {
- pr_err("Unable to initialize struct se_session\n");
- return PTR_ERR(se_sess);
- }
/*
* Format the FCP Initiator port_name into colon seperated values to
* match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
@@ -1463,28 +1480,12 @@ static int tcm_qla2xxx_check_initiator_node_acl(
* Locate our struct se_node_acl either from an explict NodeACL created
* via ConfigFS, or via running in TPG demo mode.
*/
- se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
- port_name);
- if (!se_sess->se_node_acl) {
- transport_free_session(se_sess);
- return -EINVAL;
- }
- se_nacl = se_sess->se_node_acl;
- nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- /*
- * And now setup the new se_nacl and session pointers into our HW lport
- * mappings for fabric S_ID and LOOP_ID.
- */
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
- qla_tgt_sess, s_id);
- tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
- qla_tgt_sess, loop_id);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- /*
- * Finally register the new FC Nexus with TCM
- */
- transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+ se_sess = target_alloc_session(&tpg->se_tpg, num_tags,
+ sizeof(struct qla_tgt_cmd),
+ TARGET_PROT_ALL, port_name,
+ qlat_sess, tcm_qla2xxx_session_cb);
+ if (IS_ERR(se_sess))
+ return PTR_ERR(se_sess);
return 0;
}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5d4f8e67fb25..638f72c5ab05 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -46,11 +46,13 @@ int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
ret_val = qla4_83xx_set_win_base(ha, addr);
- if (ret_val == QLA_SUCCESS)
+ if (ret_val == QLA_SUCCESS) {
*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
- else
+ } else {
+ *data = 0xffffffff;
ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
__func__, addr);
+ }
return ret_val;
}
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 676385ff28ef..69bfc0a1aea3 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -710,7 +710,7 @@ static int qpti_map_regs(struct qlogicpti *qpti)
"PTI Qlogic/ISP");
if (!qpti->qregs) {
printk("PTI: Qlogic/ISP registers are unmappable\n");
- return -1;
+ return -ENODEV;
}
if (qpti->is_pti) {
qpti->sreg = of_ioremap(&op->resource[0], (16 * 4096),
@@ -718,7 +718,7 @@ static int qpti_map_regs(struct qlogicpti *qpti)
"PTI Qlogic/ISP statreg");
if (!qpti->sreg) {
printk("PTI: Qlogic/ISP status register is unmappable\n");
- return -1;
+ return -ENODEV;
}
}
return 0;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b1bf42b93fcc..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev)
int pg83_supported = 0;
unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL;
- if (sdev->skip_vpd_pages)
+ if (!scsi_device_supports_vpd(sdev))
return;
+
retry_pg0:
vpd_buf = kmalloc(vpd_len, GFP_KERNEL);
if (!vpd_buf)
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index c126966130ab..ce79de822e46 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -278,8 +278,16 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
ucp[3] = 0;
put_unaligned_be64(info, &ucp[4]);
} else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
+ /*
+ * Only set the 'VALID' bit if we can represent the value
+ * correctly; otherwise just fill out the lower bytes and
+ * clear the 'VALID' flag.
+ */
+ if (info <= 0xffffffffUL)
+ buf[0] |= 0x80;
+ else
+ buf[0] &= 0x7f;
+ put_unaligned_be32((u32)info, &buf[3]);
}
return 0;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index bbfbfd9e5aa3..3408578b08d6 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,6 +220,8 @@ static struct {
{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
+ {"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
+ {"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8c6e31874171..8106515d1df8 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2700,6 +2700,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ scsi_rescan_device(&sdev->sdev_gendev);
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
break;
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 459abe1dcc87..b44c1bb687a2 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -139,6 +139,16 @@ static int scsi_bus_resume_common(struct device *dev,
else
fn = NULL;
+ /*
+ * Forcibly set runtime PM status of request queue to "active" to
+ * make sure we can again get requests from the queue (see also
+ * blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
+ blk_set_runtime_active(to_scsi_device(dev)->request_queue);
+
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 6266a5d73d0f..e659912498bd 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -4,7 +4,7 @@
#define SAS_HOST_ATTRS 0
#define SAS_PHY_ATTRS 17
#define SAS_PORT_ATTRS 1
-#define SAS_RPORT_ATTRS 7
+#define SAS_RPORT_ATTRS 8
#define SAS_END_DEV_ATTRS 5
#define SAS_EXPANDER_ATTRS 7
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6a820668d442..97074c91e328 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -43,6 +43,7 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dh.h>
#include <scsi/scsi_eh.h>
#include "scsi_priv.h"
@@ -518,7 +519,8 @@ void scsi_target_reap(struct scsi_target *starget)
}
/**
- * sanitize_inquiry_string - remove non-graphical chars from an INQUIRY result string
+ * scsi_sanitize_inquiry_string - remove non-graphical chars from an
+ * INQUIRY result string
* @s: INQUIRY result string to sanitize
* @len: length of the string
*
@@ -531,7 +533,7 @@ void scsi_target_reap(struct scsi_target *starget)
* string terminator, so all the following characters are set to
* spaces.
**/
-static void sanitize_inquiry_string(unsigned char *s, int len)
+void scsi_sanitize_inquiry_string(unsigned char *s, int len)
{
int terminated = 0;
@@ -542,6 +544,7 @@ static void sanitize_inquiry_string(unsigned char *s, int len)
*s = ' ';
}
}
+EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
/**
* scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
@@ -627,9 +630,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
if (result == 0) {
- sanitize_inquiry_string(&inq_result[8], 8);
- sanitize_inquiry_string(&inq_result[16], 16);
- sanitize_inquiry_string(&inq_result[32], 4);
+ scsi_sanitize_inquiry_string(&inq_result[8], 8);
+ scsi_sanitize_inquiry_string(&inq_result[16], 16);
+ scsi_sanitize_inquiry_string(&inq_result[32], 4);
response_len = inq_result[4] + 5;
if (response_len > 255)
@@ -962,6 +965,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_SYNC_ALUA)
+ sdev->synchronous_alua = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -1519,9 +1525,14 @@ EXPORT_SYMBOL(scsi_add_device);
void scsi_rescan_device(struct device *dev)
{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
device_lock(dev);
- scsi_attach_vpd(to_scsi_device(dev));
+ scsi_attach_vpd(sdev);
+
+ if (sdev->handler && sdev->handler->rescan)
+ sdev->handler->rescan(sdev);
if (dev->driver && try_module_get(dev->driver->owner)) {
struct scsi_driver *drv = to_scsi_driver(dev->driver);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 00bc7218a7f8..2b642b145be1 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -81,6 +81,35 @@ const char *scsi_host_state_name(enum scsi_host_state state)
return name;
}
+#ifdef CONFIG_SCSI_DH
+static const struct {
+ unsigned char value;
+ char *name;
+} sdev_access_states[] = {
+ { SCSI_ACCESS_STATE_OPTIMAL, "active/optimized" },
+ { SCSI_ACCESS_STATE_ACTIVE, "active/non-optimized" },
+ { SCSI_ACCESS_STATE_STANDBY, "standby" },
+ { SCSI_ACCESS_STATE_UNAVAILABLE, "unavailable" },
+ { SCSI_ACCESS_STATE_LBA, "lba-dependent" },
+ { SCSI_ACCESS_STATE_OFFLINE, "offline" },
+ { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" },
+};
+
+static const char *scsi_access_state_name(unsigned char state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(sdev_access_states); i++) {
+ if (sdev_access_states[i].value == state) {
+ name = sdev_access_states[i].name;
+ break;
+ }
+ }
+ return name;
+}
+#endif
+
static int check_set(unsigned long long *val, char *src)
{
char *last;
@@ -199,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf)
}
/* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */
-struct device_attribute dev_attr_hstate =
+static struct device_attribute dev_attr_hstate =
__ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state);
static ssize_t
@@ -374,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-struct attribute_group scsi_shost_attr_group = {
+static struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
@@ -973,6 +1002,43 @@ sdev_store_dh_state(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(dh_state, S_IRUGO | S_IWUSR, sdev_show_dh_state,
sdev_store_dh_state);
+
+static ssize_t
+sdev_show_access_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ unsigned char access_state;
+ const char *access_state_name;
+
+ if (!sdev->handler)
+ return -EINVAL;
+
+ access_state = (sdev->access_state & SCSI_ACCESS_STATE_MASK);
+ access_state_name = scsi_access_state_name(access_state);
+
+ return sprintf(buf, "%s\n",
+ access_state_name ? access_state_name : "unknown");
+}
+static DEVICE_ATTR(access_state, S_IRUGO, sdev_show_access_state, NULL);
+
+static ssize_t
+sdev_show_preferred_path(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (!sdev->handler)
+ return -EINVAL;
+
+ if (sdev->access_state & SCSI_ACCESS_STATE_PREFERRED)
+ return sprintf(buf, "1\n");
+ else
+ return sprintf(buf, "0\n");
+}
+static DEVICE_ATTR(preferred_path, S_IRUGO, sdev_show_preferred_path, NULL);
#endif
static ssize_t
@@ -1020,9 +1086,33 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
!sdev->host->hostt->change_queue_depth)
return 0;
+#ifdef CONFIG_SCSI_DH
+ if (attr == &dev_attr_access_state.attr &&
+ !sdev->handler)
+ return 0;
+ if (attr == &dev_attr_preferred_path.attr &&
+ !sdev->handler)
+ return 0;
+#endif
return attr->mode;
}
+static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
+ struct bin_attribute *attr, int i)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+
+ if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
+ return 0;
+
+ if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
+ return 0;
+
+ return S_IRUGO;
+}
+
/* Default template for device attributes. May NOT be modified */
static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_device_blocked.attr,
@@ -1047,6 +1137,8 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_wwid.attr,
#ifdef CONFIG_SCSI_DH
&dev_attr_dh_state.attr,
+ &dev_attr_access_state.attr,
+ &dev_attr_preferred_path.attr,
#endif
&dev_attr_queue_ramp_up_period.attr,
REF_EVT(media_change),
@@ -1068,6 +1160,7 @@ static struct attribute_group scsi_sdev_attr_group = {
.attrs = scsi_sdev_attrs,
.bin_attrs = scsi_sdev_bin_attrs,
.is_visible = scsi_sdev_attr_is_visible,
+ .is_bin_visible = scsi_sdev_bin_attr_is_visible,
};
static const struct attribute_group *scsi_sdev_attr_groups[] = {
@@ -1129,13 +1222,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
scsi_autopm_get_device(sdev);
- error = device_add(&sdev->sdev_gendev);
- if (error) {
- sdev_printk(KERN_INFO, sdev,
- "failed to add device: %d\n", error);
- return error;
- }
-
error = scsi_dh_add_device(sdev);
if (error)
/*
@@ -1144,6 +1230,14 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
sdev_printk(KERN_INFO, sdev,
"failed to add device handler: %d\n", error);
+ error = device_add(&sdev->sdev_gendev);
+ if (error) {
+ sdev_printk(KERN_INFO, sdev,
+ "failed to add device: %d\n", error);
+ scsi_dh_remove_device(sdev);
+ return error;
+ }
+
device_enable_async_suspend(&sdev->sdev_dev);
error = device_add(&sdev->sdev_dev);
if (error) {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e4b3d8f4fd85..441481623fb9 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4308,6 +4308,8 @@ static const struct {
{ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
{ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
{ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
+ {ISCSI_PORT_SPEED_25GBPS, "25 Gbps" },
+ {ISCSI_PORT_SPEED_40GBPS, "40 Gbps" },
};
char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 80520e2f0fa2..b6f958193dad 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
+sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32);
/* only need 8 bytes of data plus header (4 or 8) */
#define BUF_SIZE 64
@@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_RPORT_ATTRIBUTE(rphy_device_type);
SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
+ SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier,
get_enclosure_identifier);
SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d749da765df1..f52b74cf8d1e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -648,7 +648,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
*/
if (sdkp->lbprz) {
q->limits.discard_alignment = 0;
- q->limits.discard_granularity = 1;
+ q->limits.discard_granularity = logical_block_size;
} else {
q->limits.discard_alignment = sdkp->unmap_alignment *
logical_block_size;
@@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdp = sdkp->device;
struct Scsi_Host *host = sdp->host;
+ sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
int diskinfo[4];
/* default to most commonly used values */
- diskinfo[0] = 0x40; /* 1 << 6 */
- diskinfo[1] = 0x20; /* 1 << 5 */
- diskinfo[2] = sdkp->capacity >> 11;
-
+ diskinfo[0] = 0x40; /* 1 << 6 */
+ diskinfo[1] = 0x20; /* 1 << 5 */
+ diskinfo[2] = capacity >> 11;
+
/* override with calculated, extended default, or driver values */
if (host->hostt->bios_param)
- host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
+ host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
else
- scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
+ scsicam_bios_param(bdev, capacity, diskinfo);
geo->heads = diskinfo[0];
geo->sectors = diskinfo[1];
@@ -2337,14 +2338,6 @@ got_data:
if (sdkp->capacity > 0xffffffff)
sdp->use_16_for_rw = 1;
- /* Rescale capacity to 512-byte units */
- if (sector_size == 4096)
- sdkp->capacity <<= 3;
- else if (sector_size == 2048)
- sdkp->capacity <<= 2;
- else if (sector_size == 1024)
- sdkp->capacity <<= 1;
-
blk_queue_physical_block_size(sdp->request_queue,
sdkp->physical_block_size);
sdkp->device->sector_size = sector_size;
@@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->ws10 = 1;
}
-static int sd_try_extended_inquiry(struct scsi_device *sdp)
-{
- /* Attempt VPD inquiry if the device blacklist explicitly calls
- * for it.
- */
- if (sdp->try_vpd_pages)
- return 1;
- /*
- * Although VPD inquiries can go to SCSI-2 type devices,
- * some USB ones crash on receiving them, and the pages
- * we currently ask for are for SPC-3 and beyond
- */
- if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages)
- return 1;
- return 0;
-}
-
-static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks)
-{
- return blocks << (ilog2(sdev->sector_size) - 9);
-}
-
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
- if (sd_try_extended_inquiry(sdp)) {
+ if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
@@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
rw_max = q->limits.io_opt =
sdkp->opt_xfer_blocks * sdp->sector_size;
else
@@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
/* Combine with controller limits */
q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
- set_capacity(disk, sdkp->capacity);
+ set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5f2a84aff29f..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -65,7 +65,7 @@ struct scsi_disk {
struct device dev;
struct gendisk *disk;
atomic_t openers;
- sector_t capacity; /* size in 512-byte sectors */
+ sector_t capacity; /* size in logical blocks */
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
@@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
return 0;
}
+static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks << (ilog2(sdev->sector_size) - 9);
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 5e820674432c..ae7d9bdf409c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
else
hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
hp->dxfer_len = mxsize;
- if (hp->dxfer_direction == SG_DXFER_TO_DEV)
+ if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
+ (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
hp->dxferp = (char __user *)buf + cmd_size;
else
hp->dxferp = NULL;
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index 3b3b56f4a830..82ed99848378 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
};
MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
-static __init int
-sim710_eisa_probe(struct device *dev)
+static int sim710_eisa_probe(struct device *dev)
{
struct eisa_device *edev = to_eisa_device(dev);
unsigned long io_addr = edev->base_addr;
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index aebe75320ed3..ab0e06b0b4ff 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -75,7 +75,7 @@ snic_ver_enc(const char *s)
continue;
}
- if (i > 4 || !isdigit(c))
+ if (i > 3 || !isdigit(c))
goto end;
v[i] = v[i] * 10 + (c - '0');
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2e522951b619..dbf1882cfbac 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -9,7 +9,7 @@
Steve Hirsch, Andreas Koppenh"ofer, Michael Leodolter, Eyal Lebedinsky,
Michael Schaefer, J"org Weule, and Eric Youngdale.
- Copyright 1992 - 2010 Kai Makisara
+ Copyright 1992 - 2016 Kai Makisara
email Kai.Makisara@kolumbus.fi
Some small formal changes - aeb, 950809
@@ -17,7 +17,7 @@
Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
*/
-static const char *verstr = "20101219";
+static const char *verstr = "20160209";
#include <linux/module.h>
@@ -3296,7 +3296,10 @@ static int switch_partition(struct scsi_tape *STp)
#define PP_OFF_RESERVED 7
#define PP_BIT_IDP 0x20
+#define PP_BIT_FDP 0x80
#define PP_MSK_PSUM_MB 0x10
+#define PP_MSK_PSUM_UNITS 0x18
+#define PP_MSK_POFM 0x04
/* Get the number of partitions on the tape. As a side effect reads the
mode page into the tape buffer. */
@@ -3322,6 +3325,29 @@ static int nbr_partitions(struct scsi_tape *STp)
}
+static int format_medium(struct scsi_tape *STp, int format)
+{
+ int result = 0;
+ int timeout = STp->long_timeout;
+ unsigned char scmd[MAX_COMMAND_SIZE];
+ struct st_request *SRpnt;
+
+ memset(scmd, 0, MAX_COMMAND_SIZE);
+ scmd[0] = FORMAT_UNIT;
+ scmd[2] = format;
+ if (STp->immediate) {
+ scmd[1] |= 1; /* Don't wait for completion */
+ timeout = STp->device->request_queue->rq_timeout;
+ }
+ DEBC_printk(STp, "Sending FORMAT MEDIUM\n");
+ SRpnt = st_do_scsi(NULL, STp, scmd, 0, DMA_NONE,
+ timeout, MAX_RETRIES, 1);
+ if (!SRpnt)
+ result = STp->buffer->syscall_result;
+ return result;
+}
+
+
/* Partition the tape into two partitions if size > 0 or one partition if
size == 0.
@@ -3340,11 +3366,16 @@ static int nbr_partitions(struct scsi_tape *STp)
and 10 when 1 partition is defined (information from Eric Lee Green). This is
is acceptable also to some other old drives and enforced if the first partition
size field is used for the first additional partition size.
+
+ For drives that advertize SCSI-3 or newer, use the SSC-3 methods.
*/
static int partition_tape(struct scsi_tape *STp, int size)
{
int result;
+ int target_partition;
+ bool scsi3 = STp->device->scsi_level >= SCSI_3, needs_format = false;
int pgo, psd_cnt, psdo;
+ int psum = PP_MSK_PSUM_MB, units = 0;
unsigned char *bp;
result = read_mode_page(STp, PART_PAGE, 0);
@@ -3352,6 +3383,12 @@ static int partition_tape(struct scsi_tape *STp, int size)
DEBC_printk(STp, "Can't read partition mode page.\n");
return result;
}
+ target_partition = 1;
+ if (size < 0) {
+ target_partition = 0;
+ size = -size;
+ }
+
/* The mode page is in the buffer. Let's modify it and write it. */
bp = (STp->buffer)->b_data;
pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH];
@@ -3359,9 +3396,52 @@ static int partition_tape(struct scsi_tape *STp, int size)
bp[pgo + MP_OFF_PAGE_LENGTH] + 2);
psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2;
+
+ if (scsi3) {
+ needs_format = (bp[pgo + PP_OFF_FLAGS] & PP_MSK_POFM) != 0;
+ if (needs_format && size == 0) {
+ /* No need to write the mode page when clearing
+ * partitioning
+ */
+ DEBC_printk(STp, "Formatting tape with one partition.\n");
+ result = format_medium(STp, 0);
+ goto out;
+ }
+ if (needs_format) /* Leave the old value for HP DATs claiming SCSI_3 */
+ psd_cnt = 2;
+ if ((bp[pgo + PP_OFF_FLAGS] & PP_MSK_PSUM_UNITS) == PP_MSK_PSUM_UNITS) {
+ /* Use units scaling for large partitions if the device
+ * suggests it and no precision lost. Required for IBM
+ * TS1140/50 drives that don't support MB units.
+ */
+ if (size >= 1000 && (size % 1000) == 0) {
+ size /= 1000;
+ psum = PP_MSK_PSUM_UNITS;
+ units = 9; /* GB */
+ }
+ }
+ /* Try it anyway if too large to specify in MB */
+ if (psum == PP_MSK_PSUM_MB && size >= 65534) {
+ size /= 1000;
+ psum = PP_MSK_PSUM_UNITS;
+ units = 9; /* GB */
+ }
+ }
+
+ if (size >= 65535 || /* Does not fit into two bytes */
+ (target_partition == 0 && psd_cnt < 2)) {
+ result = -EINVAL;
+ goto out;
+ }
+
psdo = pgo + PART_PAGE_FIXED_LENGTH;
- if (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS]) {
- bp[psdo] = bp[psdo + 1] = 0xff; /* Rest of the tape */
+ /* The second condition is for HP DDS which use only one partition size
+ * descriptor
+ */
+ if (target_partition > 0 &&
+ (psd_cnt > bp[pgo + PP_OFF_MAX_ADD_PARTS] ||
+ bp[pgo + PP_OFF_MAX_ADD_PARTS] != 1)) {
+ bp[psdo] = bp[psdo + 1] = 0xff; /* Rest to partition 0 */
psdo += 2;
}
memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2);
@@ -3370,7 +3450,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS],
bp[pgo + PP_OFF_NBR_ADD_PARTS]);
- if (size <= 0) {
+ if (size == 0) {
bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0;
if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS])
bp[pgo + MP_OFF_PAGE_LENGTH] = 6;
@@ -3378,22 +3458,37 @@ static int partition_tape(struct scsi_tape *STp, int size)
} else {
bp[psdo] = (size >> 8) & 0xff;
bp[psdo + 1] = size & 0xff;
+ if (target_partition == 0)
+ bp[psdo + 2] = bp[psdo + 3] = 0xff;
bp[pgo + 3] = 1;
if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8)
bp[pgo + MP_OFF_PAGE_LENGTH] = 8;
- DEBC_printk(STp, "Formatting tape with two partitions "
- "(1 = %d MB).\n", size);
+ DEBC_printk(STp,
+ "Formatting tape with two partitions (%i = %d MB).\n",
+ target_partition, units > 0 ? size * 1000 : size);
}
bp[pgo + PP_OFF_PART_UNITS] = 0;
bp[pgo + PP_OFF_RESERVED] = 0;
- bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | PP_MSK_PSUM_MB;
+ if (size != 1 || units != 0) {
+ bp[pgo + PP_OFF_FLAGS] = PP_BIT_IDP | psum |
+ (bp[pgo + PP_OFF_FLAGS] & 0x07);
+ bp[pgo + PP_OFF_PART_UNITS] = units;
+ } else
+ bp[pgo + PP_OFF_FLAGS] = PP_BIT_FDP |
+ (bp[pgo + PP_OFF_FLAGS] & 0x1f);
+ bp[pgo + MP_OFF_PAGE_LENGTH] = 6 + psd_cnt * 2;
result = write_mode_page(STp, PART_PAGE, 1);
+
+ if (!result && needs_format)
+ result = format_medium(STp, 1);
+
if (result) {
st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n");
result = (-EIO);
}
+out:
return result;
}
@@ -3570,8 +3665,13 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
retval = (-EINVAL);
goto out;
}
- if ((i = st_int_ioctl(STp, MTREW, 0)) < 0 ||
- (i = partition_tape(STp, mtc.mt_count)) < 0) {
+ i = do_load_unload(STp, file, 1);
+ if (i < 0) {
+ retval = i;
+ goto out;
+ }
+ i = partition_tape(STp, mtc.mt_count);
+ if (i < 0) {
retval = i;
goto out;
}
@@ -3581,7 +3681,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
STp->ps[i].last_block_valid = 0;
}
STp->partition = STp->new_partition = 0;
- STp->nbr_partitions = 1; /* Bad guess ?-) */
+ STp->nbr_partitions = mtc.mt_count != 0 ? 2 : 1;
STps->drv_block = STps->drv_file = 0;
retval = 0;
goto out;
@@ -4817,8 +4917,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
/* Try to fault in all of the necessary pages */
/* rw==READ means read from drive, write into memory area */
res = get_user_pages_unlocked(
- current,
- current->mm,
uaddr,
nr_pages,
rw == READ,
@@ -4843,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
out_unmap:
if (res > 0) {
for (j=0; j < res; j++)
- page_cache_release(pages[j]);
+ put_page(pages[j]);
res = 0;
}
kfree(pages);
@@ -4865,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
/* FIXME: cache flush missing for rw==READ
* FIXME: call the correct reference counting function
*/
- page_cache_release(page);
+ put_page(page);
}
kfree(STbp->mapped_pages);
STbp->mapped_pages = NULL;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 2de28d7a0b04..5b23175a584c 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1,7 +1,7 @@
/*
* SuperTrak EX Series Storage Controller driver for Linux
*
- * Copyright (C) 2005-2009 Promise Technology Inc.
+ * Copyright (C) 2005-2015 Promise Technology Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,11 +38,11 @@
#include <scsi/scsi_eh.h>
#define DRV_NAME "stex"
-#define ST_DRIVER_VERSION "4.6.0000.4"
-#define ST_VER_MAJOR 4
-#define ST_VER_MINOR 6
-#define ST_OEM 0
-#define ST_BUILD_VER 4
+#define ST_DRIVER_VERSION "5.00.0000.01"
+#define ST_VER_MAJOR 5
+#define ST_VER_MINOR 00
+#define ST_OEM 0000
+#define ST_BUILD_VER 01
enum {
/* MU register offset */
@@ -84,6 +84,8 @@ enum {
MU_STATE_STARTED = 2,
MU_STATE_RESETTING = 3,
MU_STATE_FAILED = 4,
+ MU_STATE_STOP = 5,
+ MU_STATE_NOCONNECT = 6,
MU_MAX_DELAY = 120,
MU_HANDSHAKE_SIGNATURE = 0x55aaaa55,
@@ -165,6 +167,14 @@ enum {
ST_ADDITIONAL_MEM = 0x200000,
ST_ADDITIONAL_MEM_MIN = 0x80000,
+ PMIC_SHUTDOWN = 0x0D,
+ PMIC_REUMSE = 0x10,
+ ST_IGNORED = -1,
+ ST_NOTHANDLED = 7,
+ ST_S3 = 3,
+ ST_S4 = 4,
+ ST_S5 = 5,
+ ST_S6 = 6,
};
struct st_sgitem {
@@ -328,6 +338,7 @@ struct st_hba {
u16 rq_count;
u16 rq_size;
u16 sts_count;
+ u8 supports_pm;
};
struct st_card_info {
@@ -536,6 +547,27 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
readl(hba->mmio_base + YH2I_REQ); /* flush */
}
+static void return_abnormal_state(struct st_hba *hba, int status)
+{
+ struct st_ccb *ccb;
+ unsigned long flags;
+ u16 tag;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ for (tag = 0; tag < hba->host->can_queue; tag++) {
+ ccb = &hba->ccb[tag];
+ if (ccb->req == NULL)
+ continue;
+ ccb->req = NULL;
+ if (ccb->cmd) {
+ scsi_dma_unmap(ccb->cmd);
+ ccb->cmd->result = status << 16;
+ ccb->cmd->scsi_done(ccb->cmd);
+ ccb->cmd = NULL;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
static int
stex_slave_config(struct scsi_device *sdev)
{
@@ -559,8 +591,12 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
id = cmd->device->id;
lun = cmd->device->lun;
hba = (struct st_hba *) &host->hostdata[0];
-
- if (unlikely(hba->mu_status == MU_STATE_RESETTING))
+ if (hba->mu_status == MU_STATE_NOCONNECT) {
+ cmd->result = DID_NO_CONNECT;
+ done(cmd);
+ return 0;
+ }
+ if (unlikely(hba->mu_status != MU_STATE_STARTED))
return SCSI_MLQUEUE_HOST_BUSY;
switch (cmd->cmnd[0]) {
@@ -1259,10 +1295,8 @@ static void stex_ss_reset(struct st_hba *hba)
static int stex_do_reset(struct st_hba *hba)
{
- struct st_ccb *ccb;
unsigned long flags;
unsigned int mu_status = MU_STATE_RESETTING;
- u16 tag;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->mu_status == MU_STATE_STARTING) {
@@ -1296,20 +1330,8 @@ static int stex_do_reset(struct st_hba *hba)
else if (hba->cardtype == st_yel)
stex_ss_reset(hba);
- spin_lock_irqsave(hba->host->host_lock, flags);
- for (tag = 0; tag < hba->host->can_queue; tag++) {
- ccb = &hba->ccb[tag];
- if (ccb->req == NULL)
- continue;
- ccb->req = NULL;
- if (ccb->cmd) {
- scsi_dma_unmap(ccb->cmd);
- ccb->cmd->result = DID_RESET << 16;
- ccb->cmd->scsi_done(ccb->cmd);
- ccb->cmd = NULL;
- }
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return_abnormal_state(hba, DID_RESET);
if (stex_handshake(hba) == 0)
return 0;
@@ -1560,6 +1582,25 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hba->cardtype = (unsigned int) id->driver_data;
ci = &stex_card_info[hba->cardtype];
+ switch (id->subdevice) {
+ case 0x4221:
+ case 0x4222:
+ case 0x4223:
+ case 0x4224:
+ case 0x4225:
+ case 0x4226:
+ case 0x4227:
+ case 0x4261:
+ case 0x4262:
+ case 0x4263:
+ case 0x4264:
+ case 0x4265:
+ break;
+ default:
+ if (hba->cardtype == st_yel)
+ hba->supports_pm = 1;
+ }
+
sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
if (hba->cardtype == st_yel)
sts_offset += (ci->sts_count+1) * sizeof(u32);
@@ -1685,7 +1726,7 @@ out_disable:
return err;
}
-static void stex_hba_stop(struct st_hba *hba)
+static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
{
struct req_msg *req;
struct st_msg_header *msg_h;
@@ -1694,6 +1735,15 @@ static void stex_hba_stop(struct st_hba *hba)
u16 tag = 0;
spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (hba->cardtype == st_yel && hba->supports_pm == 1)
+ {
+ if(st_sleep_mic == ST_NOTHANDLED)
+ {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return;
+ }
+ }
req = hba->alloc_rq(hba);
if (hba->cardtype == st_yel) {
msg_h = (struct st_msg_header *)req - 1;
@@ -1701,11 +1751,18 @@ static void stex_hba_stop(struct st_hba *hba)
} else
memset(req, 0, hba->rq_size);
- if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) {
+ if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel)
+ && st_sleep_mic == ST_IGNORED) {
req->cdb[0] = MGT_CMD;
req->cdb[1] = MGT_CMD_SIGNATURE;
req->cdb[2] = CTLR_CONFIG_CMD;
req->cdb[3] = CTLR_SHUTDOWN;
+ } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) {
+ req->cdb[0] = MGT_CMD;
+ req->cdb[1] = MGT_CMD_SIGNATURE;
+ req->cdb[2] = CTLR_CONFIG_CMD;
+ req->cdb[3] = PMIC_SHUTDOWN;
+ req->cdb[4] = st_sleep_mic;
} else {
req->cdb[0] = CONTROLLER_CMD;
req->cdb[1] = CTLR_POWER_STATE_CHANGE;
@@ -1725,10 +1782,12 @@ static void stex_hba_stop(struct st_hba *hba)
while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
hba->ccb[tag].req_type = 0;
+ hba->mu_status = MU_STATE_STOP;
return;
}
msleep(1);
}
+ hba->mu_status = MU_STATE_STOP;
}
static void stex_hba_free(struct st_hba *hba)
@@ -1751,9 +1810,11 @@ static void stex_remove(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
+ hba->mu_status = MU_STATE_NOCONNECT;
+ return_abnormal_state(hba, DID_NO_CONNECT);
scsi_remove_host(hba->host);
- stex_hba_stop(hba);
+ scsi_block_requests(hba->host);
stex_hba_free(hba);
@@ -1766,9 +1827,43 @@ static void stex_shutdown(struct pci_dev *pdev)
{
struct st_hba *hba = pci_get_drvdata(pdev);
- stex_hba_stop(hba);
+ if (hba->supports_pm == 0)
+ stex_hba_stop(hba, ST_IGNORED);
+ else
+ stex_hba_stop(hba, ST_S5);
+}
+
+static int stex_choice_sleep_mic(pm_message_t state)
+{
+ switch (state.event) {
+ case PM_EVENT_SUSPEND:
+ return ST_S3;
+ case PM_EVENT_HIBERNATE:
+ return ST_S4;
+ default:
+ return ST_NOTHANDLED;
+ }
+}
+
+static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ if (hba->cardtype == st_yel && hba->supports_pm == 1)
+ stex_hba_stop(hba, stex_choice_sleep_mic(state));
+ else
+ stex_hba_stop(hba, ST_IGNORED);
+ return 0;
}
+static int stex_resume(struct pci_dev *pdev)
+{
+ struct st_hba *hba = pci_get_drvdata(pdev);
+
+ hba->mu_status = MU_STATE_STARTING;
+ stex_handshake(hba);
+ return 0;
+}
MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
static struct pci_driver stex_pci_driver = {
@@ -1777,6 +1872,8 @@ static struct pci_driver stex_pci_driver = {
.probe = stex_probe,
.remove = stex_remove,
.shutdown = stex_shutdown,
+ .suspend = stex_suspend,
+ .resume = stex_resume,
};
static int __init stex_init(void)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 292c04eec9ad..3ddcabb790a8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -914,8 +914,9 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
do_work = true;
process_err_fn = storvsc_remove_lun;
break;
- case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
- if ((asc == 0x2a) && (ascq == 0x9)) {
+ case SRB_STATUS_ABORTED:
+ if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID &&
+ (asc == 0x2a) && (ascq == 0x9)) {
do_work = true;
process_err_fn = storvsc_device_scan;
/*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 5f4530744e0a..097894a1fab5 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -37,6 +37,7 @@ config SCSI_UFSHCD
depends on SCSI && SCSI_DMA
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select NLS
---help---
This selects the support for UFS devices in Linux, say Y and make
sure that you know the name of your UFS host adapter (the card
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 4f38d008bfb4..3aedf73f1131 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,8 +16,8 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-
#include <linux/phy/phy-qcom-ufs.h>
+
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#include "unipro.h"
@@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
len * 4, false);
}
+static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
+ char *prefix, void *priv)
+{
+ ufs_qcom_dump_regs(hba, offset, len, prefix);
+}
+
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
{
int err = 0;
@@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- clk_disable_unprepare(host->tx_l1_sync_clk);
+ if (host->hba->lanes_per_direction > 1)
+ clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ if (host->hba->lanes_per_direction > 1)
+ clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
+ if (host->hba->lanes_per_direction > 1) {
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ host->rx_l1_sync_clk);
+ if (err)
+ goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
+ if (err)
+ goto disable_rx_l1;
+ }
host->is_lane_clks_enabled = true;
goto out;
disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ if (host->hba->lanes_per_direction > 1)
+ clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
if (err)
goto out;
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
+ /* In case of single lane per direction, don't read lane1 clocks */
+ if (host->hba->lanes_per_direction > 1) {
+ err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+ &host->rx_l1_sync_clk);
+ if (err)
+ goto out;
+ err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk);
+ }
out:
return err;
}
@@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
- dev_err(hba->dev,
- "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
- __func__, ret);
+ dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ __func__, ret);
goto out;
}
@@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
150);
+ /*
+ * Some UFS devices (and may be host) have issues if LCC is
+ * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
+ * before link startup which will make sure that both host
+ * and device TX LCC are disabled once link startup is
+ * completed.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
+ 0);
+
break;
case POST_CHANGE:
ufs_qcom_link_startup_post_change(hba);
@@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
goto out;
}
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+ ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
ufs_qcom_update_bus_bw_vote(host);
+
+ /* disable the device ref clock if entered PWM mode */
+ if (ufshcd_is_hs_mode(&hba->pwr_info) &&
+ !ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
break;
default:
ret = -EINVAL;
@@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
goto out;
}
+ /* enable the device ref clock for HS mode*/
+ if (ufshcd_is_hs_mode(&hba->pwr_info))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
vote = host->bus_vote.saved_vote;
if (vote == host->bus_vote.min_bw_vote)
ufs_qcom_update_bus_bw_vote(host);
@@ -1367,6 +1403,74 @@ out:
return err;
}
+static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
+ void *priv, void (*print_fn)(struct ufs_hba *hba,
+ int offset, int num_regs, char *str, void *priv))
+{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ if (unlikely(!hba)) {
+ pr_err("%s: hba is NULL\n", __func__);
+ return;
+ }
+ if (unlikely(!print_fn)) {
+ dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
+ return;
+ }
+
+ host = ufshcd_get_variant(hba);
+ if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
+ return;
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UFS_BIT(17);
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
+
+ ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+}
+
+static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
+{
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
+ else
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+}
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
@@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
+ ufs_qcom_enable_test_bus(host);
ufshcd_release(host->hba);
pm_runtime_put_sync(host->hba->dev);
@@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
+ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
ufs_qcom_testbus_read(hba);
}
+
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev)
* ufs_qcom_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle
*
- * Always return 0
+ * Always returns 0
*/
static int ufs_qcom_remove(struct platform_device *pdev)
{
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 36249b35f858..a19307a57ce2 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -241,6 +241,15 @@ struct ufs_qcom_host {
struct ufs_qcom_testbus testbus;
};
+static inline u32
+ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
+{
+ if (host->hw_ver.major <= 0x02)
+ return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
+
+ return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
+};
+
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 42c459a9d3fe..b291fa6ed2ad 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -43,6 +43,7 @@
#define GENERAL_UPIU_REQUEST_SIZE 32
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
+#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
@@ -170,7 +171,7 @@ enum ufs_desc_max_size {
* of descriptor header.
*/
QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
QUERY_DESC_POWER_MAX_SIZE = 0x62,
QUERY_DESC_RFU_MAX_SIZE = 0x00,
};
@@ -195,6 +196,37 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
};
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ DEVICE_DESC_PARAM_LEN = 0x0,
+ DEVICE_DESC_PARAM_TYPE = 0x1,
+ DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ DEVICE_DESC_PARAM_PRTCL = 0x5,
+ DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ DEVICE_DESC_PARAM_SN = 0x16,
+ DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -469,6 +501,7 @@ struct ufs_vreg {
struct regulator *reg;
const char *name;
bool enabled;
+ bool unused;
int min_uV;
int max_uV;
int min_uA;
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
new file mode 100644
index 000000000000..ee4ab85e2801
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QUIRKS_H_
+#define _UFS_QUIRKS_H_
+
+/* return true if s1 is a prefix of s2 */
+#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
+
+#define UFS_ANY_VENDOR 0xFFFF
+#define UFS_ANY_MODEL "ANY_MODEL"
+
+#define MAX_MODEL_LEN 16
+
+#define UFS_VENDOR_TOSHIBA 0x198
+#define UFS_VENDOR_SAMSUNG 0x1CE
+
+/**
+ * ufs_device_info - ufs device details
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_device_info {
+ u16 wmanufacturerid;
+ char model[MAX_MODEL_LEN + 1];
+};
+
+/**
+ * ufs_dev_fix - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+struct ufs_dev_fix {
+ struct ufs_device_info card;
+ unsigned int quirk;
+};
+
+#define END_FIX { { 0 }, 0 }
+
+/* add specific device quirk */
+#define UFS_FIX(_vendor, _model, _quirk) \
+ { \
+ .card.wmanufacturerid = (_vendor),\
+ .card.model = (_model), \
+ .quirk = (_quirk), \
+ }
+
+/*
+ * If UFS device is having issue in processing LCC (Line Control
+ * Command) coming from UFS host controller then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS host controller (by clearing
+ * TX_LCC_ENABLE attribute of host to 0).
+ */
+#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
+
+/*
+ * Some UFS devices don't need VCCQ rail for device operations. Enabling this
+ * quirk for such devices will make sure that VCCQ rail is not voted.
+ */
+#define UFS_DEVICE_NO_VCCQ (1 << 1)
+
+/*
+ * Some vendor's UFS device sends back to back NACs for the DL data frames
+ * causing the host controller to raise the DFES error status. Sometimes
+ * such UFS devices send back to back NAC without waiting for new
+ * retransmitted DL frame from the host and in such cases it might be possible
+ * the Host UniPro goes into bad state without raising the DFES error
+ * interrupt. If this happens then all the pending commands would timeout
+ * only after respective SW command (which is generally too large).
+ *
+ * We can workaround such device behaviour like this:
+ * - As soon as SW sees the DL NAC error, it should schedule the error handler
+ * - Error handler would sleep for 50ms to see if there are any fatal errors
+ * raised by UFS controller.
+ * - If there are fatal errors then SW does normal error recovery.
+ * - If there are no fatal errors then SW sends the NOP command to device
+ * to check if link is alive.
+ * - If NOP command times out, SW does normal error recovery
+ * - If NOP command succeed, skip the error handling.
+ *
+ * If DL NAC error is seen multiple times with some vendor's UFS devices then
+ * enable this quirk to initiate quick error recovery and also silence related
+ * error logs to reduce spamming of kernel logs.
+ */
+#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
+
+/*
+ * Some UFS devices may not work properly after resume if the link was kept
+ * in off state during suspend. Enabling this quirk will not allow the
+ * link to be kept in off state during suspend.
+ */
+#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
+
+/*
+ * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
+ * 600us which may not be enough for reliable hibern8 exit hardware sequence
+ * from UFS device.
+ * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
+ * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
+ */
+#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
+
+/*
+ * Some UFS memory devices may have really low read/write throughput in
+ * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
+ * never enabled for such devices.
+ */
+#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
+
+/*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
+
+struct ufs_hba;
+void ufs_advertise_fixup_device(struct ufs_hba *hba);
+
+static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+
+ END_FIX
+};
+#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index d2a7b127b05c..718f12e09885 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,8 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
+
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
@@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+ &hba->lanes_per_direction);
+ if (ret) {
+ dev_dbg(hba->dev,
+ "%s: failed to read lanes-per-direction, ret=%d\n",
+ __func__, ret);
+ hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+ }
+}
+
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
@@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
+ ufshcd_init_lanes_per_dir(hba);
+
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err(dev, "Initialization failed\n");
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 85cd2564c157..f8fa72c31a9d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -39,8 +39,10 @@
#include <linux/async.h>
#include <linux/devfreq.h>
-
+#include <linux/nls.h>
+#include <linux/of.h>
#include "ufshcd.h"
+#include "ufs_quirks.h"
#include "unipro.h"
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -58,13 +60,25 @@
#define QUERY_REQ_RETRIES 10
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 30 /* msec */
+/*
+ * Query request timeout for fDeviceInit flag
+ * fDeviceInit query response time for some devices is too large that default
+ * QUERY_REQ_TIMEOUT may not be enough for such devices.
+ */
+#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -92,7 +106,7 @@ static u32 ufs_query_desc_max_size[] = {
QUERY_DESC_INTERCONNECT_MAX_SIZE,
QUERY_DESC_STRING_MAX_SIZE,
QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAZ_SIZE,
+ QUERY_DESC_GEOMETRY_MAX_SIZE,
QUERY_DESC_POWER_MAX_SIZE,
QUERY_DESC_RFU_MAX_SIZE,
};
@@ -119,9 +133,11 @@ enum {
/* UFSHCD UIC layer error flags */
enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
- UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
- UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
- UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
};
/* Interrupt configuration options */
@@ -181,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba);
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -190,6 +207,10 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
+{
+ return tag >= 0 && tag < hba->nutrs;
+}
static inline int ufshcd_enable_irq(struct ufs_hba *hba)
{
@@ -215,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
}
}
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+ if (!val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -223,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
* @val - wait condition
* @interval_us - polling interval in microsecs
* @timeout_ms - timeout in millisecs
+ * @can_sleep - perform sleep or just spin
*
* Returns -ETIMEDOUT on error, zero on success
*/
-static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us, unsigned long timeout_ms)
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -236,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- /* wakeup within 50us of expiry */
- usleep_range(interval_us, interval_us + 50);
-
+ if (can_sleep)
+ usleep_range(interval_us, interval_us + 50);
+ else
+ udelay(interval_us);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -360,6 +394,16 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -374,11 +418,9 @@ static inline int ufshcd_get_lists_status(u32 reg)
* 1 UTRLRDY
* 2 UTMRLRDY
* 3 UCRDY
- * 4 HEI
- * 5 DEI
- * 6-7 reserved
+ * 4-7 reserved
*/
- return (((reg) & (0xFF)) >> 1) ^ (0x07);
+ return ((reg & 0xFF) >> 1) ^ 0x07;
}
/**
@@ -528,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
}
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+ /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ return UFS_UNIPRO_VER_1_41;
+ else
+ return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+ /*
+ * If both host and device support UniPro ver1.6 or later, PA layer
+ * parameters tuning happens during link startup itself.
+ *
+ * We can manually tune PA layer parameters if either host or device
+ * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+ * logic simple, we will only do manual tuning if local unipro version
+ * doesn't support ver1.6 or later.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+ return true;
+ else
+ return false;
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -582,6 +652,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
@@ -697,7 +772,8 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba))
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -953,13 +1029,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
*
* Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
* with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
{
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
@@ -967,7 +1045,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
return -EIO;
}
- init_completion(&uic_cmd->done);
+ if (completion)
+ init_completion(&uic_cmd->done);
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -992,7 +1071,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
@@ -1035,6 +1114,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
cpu_to_le32(lower_32_bits(sg->dma_address));
prd_table[i].upper_addr =
cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@ -1117,7 +1197,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
/* Transfer request descriptor header fields */
req_desc->header.dword_0 = cpu_to_le32(dword_0);
-
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
@@ -1125,6 +1206,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*/
req_desc->header.dword_2 =
cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
}
/**
@@ -1137,6 +1222,7 @@ static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
/* command descriptor fields */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -1151,8 +1237,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
- (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1189,6 +1278,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(descp, query->descriptor, len);
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -1201,6 +1291,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
ucd_req_ptr->header.dword_0 =
UPIU_HEADER_DWORD(
UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1293,6 +1388,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba = shost_priv(host);
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
@@ -1312,6 +1413,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
cmd->scsi_done(cmd);
goto out_unlock;
}
+
+ /* if error handling is in progress, don't issue commands */
+ if (ufshcd_eh_in_progress(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ goto out_unlock;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* acquire the tag to make sure device cmds don't use it */
@@ -1396,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000);
+ mask, ~mask, 1000, 1000, true);
return err;
}
@@ -1475,9 +1583,17 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
if (!time_left) {
err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
- /* sucessfully cleared the command, retry if needed */
+ /* successfully cleared the command, retry if needed */
err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
}
return err;
@@ -1555,6 +1671,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1591,6 +1709,29 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.selector = selector;
}
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
+}
+
/**
* ufshcd_query_flag() - API function for sending flag query requests
* hba: per-adapter instance
@@ -1600,12 +1741,13 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
@@ -1638,7 +1780,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
+ timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
dev_err(hba->dev,
@@ -1722,20 +1867,43 @@ out:
}
/**
- * ufshcd_query_descriptor - API function for sending descriptor requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * desc_buf: the buffer that contains the descriptor
- * buf_len: length parameter passed to the device
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
*
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
- */
-static int ufshcd_query_descriptor(struct ufs_hba *hba,
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, QUERY_REQ_RETRIES);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
{
@@ -1800,6 +1968,39 @@ out:
}
/**
+ * ufshcd_query_descriptor_retry - API function for sending descriptor
+ * requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -1841,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
return -ENOMEM;
}
- ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, desc_buf,
+ &buff_len);
if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
(desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
@@ -1881,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
}
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+EXPORT_SYMBOL(ufshcd_read_device_desc);
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+ u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba,
+ QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ char *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ err = -ENOMEM;
+ goto out_free_buff;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+ desc_len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+out_free_buff:
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_read_string_desc);
+
/**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
* @hba: Pointer to adapter instance
@@ -2128,6 +2405,7 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *set = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
@@ -2135,10 +2413,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
+ if (!retries)
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ retries);
return ret;
}
@@ -2163,6 +2449,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *get = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
struct ufs_pa_layer_attr orig_pwr_info;
struct ufs_pa_layer_attr temp_pwr_info;
bool pwr_mode_change = false;
@@ -2193,14 +2480,19 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret) {
- dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
- get, UIC_GET_ATTR_ID(attr_sel), ret);
- goto out;
- }
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
+
+ if (!retries)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel), retries);
- if (mib_val)
+ if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@ -2233,6 +2525,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
+ bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
@@ -2240,15 +2533,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;
- ret = __ufshcd_send_uic_cmd(hba, cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
- goto out;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
}
- ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -2274,7 +2569,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
}
out:
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
@@ -2315,13 +2613,65 @@ out:
return ret;
}
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_link_recovery(struct ufs_hba *hba)
{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufshcd_state = UFSHCD_STATE_RESET;
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ ret = ufshcd_host_reset_and_restore(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ret)
+ dev_err(hba->dev, "%s: link recovery failed, err %d",
+ __func__, ret);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret;
struct uic_command uic_cmd = {0};
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+ __func__, ret);
+
+ /*
+ * If link recovery fails then return error so that caller
+ * don't retry the hibern8 enter again.
+ */
+ if (ufshcd_link_recovery(hba))
+ ret = -ENOLINK;
+ }
+
+ return ret;
+}
- return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
+
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret || ret == -ENOLINK)
+ goto out;
+ }
+out:
+ return ret;
}
static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
@@ -2332,8 +2682,9 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
if (ret) {
- ufshcd_set_link_off(hba);
- ret = ufshcd_host_reset_and_restore(hba);
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
}
return ret;
@@ -2513,17 +2864,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
- int i, retries, err = 0;
+ int i;
+ int err;
bool flag_res = 1;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* Set the fDeviceInit flag */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -2531,18 +2877,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
goto out;
}
- /* poll for max. 100 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 100 && !err && flag_res; i++) {
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- err = ufshcd_query_flag(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
- err);
- }
- }
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
if (err)
dev_err(hba->dev,
"%s reading fDeviceInit flag failed with error %d\n",
@@ -2563,7 +2902,7 @@ out:
* To bring UFS host controller to operational state,
* 1. Enable required interrupts
* 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base addres
+ * 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
* Returns 0 on success, non-zero value on failure
@@ -2593,8 +2932,13 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
- * DEI, HEI bits must be 0
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
if (!(ufshcd_get_lists_status(reg))) {
@@ -2611,6 +2955,23 @@ out:
}
/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10, 1, can_sleep);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
* ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
*
@@ -2630,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
* development and testing of this driver. msleep can be changed to
* mdelay and retry count can be reduced based on the controller.
*/
- if (!ufshcd_is_hba_active(hba)) {
-
+ if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
-
- /*
- * This delay is based on the testing done with UFS host
- * controller FPGA. The delay can be changed based on the
- * host controller used.
- */
- msleep(5);
- }
+ ufshcd_hba_stop(hba, true);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -3090,7 +3442,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
scsi_status = result & MASK_SCSI_STATUS;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
- if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -3149,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
}
/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
+ * @completed_reqs: requests to complete
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- unsigned long completed_reqs;
- u32 tr_doorbell;
int result;
int index;
- /* Resetting interrupt aggregation counters first and reading the
- * DOOR_BELL afterward allows us to handle all the completed requests.
- * In order to prevent other interrupts starvation the DB is read once
- * after reset. The down side of this solution is the possibility of
- * false interrupt if device completes another request after resetting
- * aggregation and before reading the DB.
- */
- if (ufshcd_is_intr_aggr_allowed(hba))
- ufshcd_reset_intr_aggr(hba);
-
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
-
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
@@ -3203,6 +3555,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
}
/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ */
+static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+}
+
+/**
* ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance
* @mask: exception event to disable
@@ -3222,7 +3599,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask & ~mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask &= ~mask;
@@ -3250,7 +3627,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask | mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask |= mask;
@@ -3276,7 +3653,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
if (hba->auto_bkops_enabled)
goto out;
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
@@ -3325,7 +3702,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
goto out;
}
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
@@ -3356,7 +3733,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
}
@@ -3414,15 +3791,52 @@ out:
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
- return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+ return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
}
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
}
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+ int err;
+ u32 curr_status = 0;
+
+ if (hba->is_urgent_bkops_lvl_checked)
+ goto enable_auto_bkops;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * We are seeing that some devices are raising the urgent bkops
+ * exception events even when BKOPS status doesn't indicate performace
+ * impacted or critical. Handle these device by determining their urgent
+ * bkops status at runtime.
+ */
+ if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+ dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+ __func__, curr_status);
+ /* update the current status as the urgent bkops level */
+ hba->urgent_bkops_lvl = curr_status;
+ hba->is_urgent_bkops_lvl_checked = true;
+ }
+
+enable_auto_bkops:
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -3446,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
}
status &= hba->ee_ctrl_mask;
- if (status & MASK_EE_URGENT_BKOPS) {
- err = ufshcd_urgent_bkops(hba);
- if (err < 0)
- dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
- __func__, err);
- }
+
+ if (status & MASK_EE_URGENT_BKOPS)
+ ufshcd_bkops_exception_event_handler(hba);
+
out:
pm_runtime_put_sync(hba->dev);
return;
}
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ * to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool err_handling = true;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+ * device fatal error and/or DL NAC & REPLAY timeout errors.
+ */
+ if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+ goto out;
+
+ if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
+ goto out;
+
+ if ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+ int err;
+ /*
+ * wait for 50ms to see if we can get any other errors or not.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ msleep(50);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /*
+ * now check if we have got any other severe errors other than
+ * DL NAC error?
+ */
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
+ goto out;
+
+ /*
+ * As DL NAC is the only error received so far, send out NOP
+ * command to confirm if link is still active or not.
+ * - If we don't get any response then do error recovery.
+ * - If we get response then clear the DL NAC error bit.
+ */
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_verify_dev_init(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (err)
+ goto out;
+
+ /* Link seems to be alive hence ignore the DL NAC errors */
+ if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+ hba->saved_err &= ~UIC_ERROR;
+ /* clear NAC error */
+ hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ if (!hba->saved_uic_err) {
+ err_handling = false;
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err_handling;
+}
+
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
@@ -3469,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work)
u32 err_tm = 0;
int err = 0;
int tag;
+ bool needs_reset = false;
hba = container_of(work, struct ufs_hba, eh_work);
@@ -3476,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_hold(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
goto out;
- }
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_complete_requests(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ bool ret;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+ ret = ufshcd_quirk_dl_nac_errors(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!ret)
+ goto skip_err_handling;
+ }
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ needs_reset = true;
+
+ /*
+ * if host reset is required then skip clearing the pending
+ * transfers forcefully because they will automatically get
+ * cleared after link startup.
+ */
+ if (needs_reset)
+ goto skip_pending_xfer_clear;
+
+ /* release lock as clear command might sleep */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
- if (ufshcd_clear_cmd(hba, tag))
- err_xfer |= 1 << tag;
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, tag)) {
+ err_xfer = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
/* Clear pending task management requests */
- for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
- if (ufshcd_clear_tm_cmd(hba, tag))
- err_tm |= 1 << tag;
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+ if (ufshcd_clear_tm_cmd(hba, tag)) {
+ err_tm = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
- /* Complete the requests that are cleared by s/w */
+lock_skip_pending_xfer_clear:
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
+
+ if (err_xfer || err_tm)
+ needs_reset = true;
+
+skip_pending_xfer_clear:
/* Fatal errors need reset */
- if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+ if (needs_reset) {
+ unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+ /*
+ * ufshcd_reset_and_restore() does the link reinitialization
+ * which will need atleast one empty doorbell slot to send the
+ * device management commands (NOP and query commands).
+ * If there is no slot empty at this moment then free up last
+ * slot forcefully.
+ */
+ if (hba->outstanding_reqs == max_doorbells)
+ __ufshcd_transfer_req_compl(hba,
+ (1UL << (hba->nutrs - 1)));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (err) {
dev_err(hba->dev, "%s: reset and restore failed\n",
__func__);
@@ -3523,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_err = 0;
hba->saved_uic_err = 0;
}
+
+skip_err_handling:
+ if (!needs_reset) {
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->saved_err || hba->saved_uic_err)
+ dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ }
+
ufshcd_clear_eh_in_progress(hba);
out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
scsi_unblock_requests(hba->host);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
@@ -3543,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
@@ -3580,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
}
if (queue_eh_work) {
+ /*
+ * update the transfer error masks to sticky bits, let's do this
+ * irrespective of current ufshcd_state.
+ */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
/* block commands from scsi mid-layer */
scsi_block_requests(hba->host);
- /* transfer error masks to sticky bits */
- hba->saved_err |= hba->errors;
- hba->saved_uic_err |= hba->uic_error;
-
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
}
@@ -3645,16 +4205,20 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status;
+ u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status) {
+ if (intr_status)
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- ufshcd_sl_intr(hba, intr_status);
+
+ if (enabled_intr_status) {
+ ufshcd_sl_intr(hba, enabled_intr_status);
retval = IRQ_HANDLED;
}
spin_unlock(hba->host->host_lock);
@@ -3677,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000);
+ mask, 0, 1000, 1000, true);
out:
return err;
}
@@ -3740,6 +4304,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3845,13 +4413,23 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
ufshcd_hold(hba, false);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
- if (!(test_bit(tag, &hba->outstanding_reqs)))
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
goto out;
+ }
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!(reg & (1 << tag))) {
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
@@ -3905,7 +4483,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
- __clear_bit(tag, &hba->outstanding_reqs);
+ ufshcd_outstanding_req_clear(hba, tag);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3945,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Reset the host controller */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_hba_enable(hba);
@@ -4155,9 +4733,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4232,6 +4810,164 @@ out:
return ret;
}
+static int ufs_get_device_info(struct ufs_hba *hba,
+ struct ufs_device_info *card_data)
+{
+ int err;
+ u8 model_index;
+ u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
+ u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+
+ err = ufshcd_read_device_desc(hba, desc_buf,
+ QUERY_DESC_DEVICE_MAX_SIZE);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+ QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+
+ /* Null terminate the model string */
+ card_data->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ return err;
+}
+
+void ufs_advertise_fixup_device(struct ufs_hba *hba)
+{
+ int err;
+ struct ufs_dev_fix *f;
+ struct ufs_device_info card_data;
+
+ card_data.wmanufacturerid = 0;
+
+ err = ufs_get_device_info(hba, &card_data);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+ __func__, err);
+ return;
+ }
+
+ for (f = ufs_fixups; f->quirk; f++) {
+ if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+ (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+ (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ hba->dev_quirks |= f->quirk;
+ }
+}
+
+/**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_min_activatetime);
+ if (ret)
+ goto out;
+
+ /* make sure proper unit conversion is applied */
+ tuned_pa_tactivate =
+ ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+ / PA_TACTIVATE_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ tuned_pa_tactivate);
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+ u32 max_hibern8_time, tuned_pa_hibern8time;
+
+ ret = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &local_tx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ max_hibern8_time = max(local_tx_hibern8_time_cap,
+ peer_rx_hibern8_time_cap);
+ /* make sure proper unit conversion is applied */
+ tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+ / PA_HIBERN8_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ tuned_pa_hibern8time);
+out:
+ return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+ if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ ufshcd_tune_pa_tactivate(hba);
+ ufshcd_tune_pa_hibern8time(hba);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+ /* set 1ms timeout for PA_TACTIVATE */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -4248,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_pwr_info(hba);
+ /* set the default level for urgent bkops */
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+ hba->is_urgent_bkops_lvl_checked = false;
+
/* UniPro link is active now */
ufshcd_set_link_active(hba);
@@ -4259,10 +4999,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ ufs_advertise_fixup_device(hba);
+ ufshcd_tune_unipro_params(hba);
+
+ ret = ufshcd_set_vccq_rail_unused(hba,
+ (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+ if (ret)
+ goto out;
+
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
hba->wlun_dev_clr_ua = true;
if (ufshcd_get_max_pwr_mode(hba)) {
@@ -4276,6 +5023,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__, ret);
}
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -4285,8 +5034,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* clear any previous UFS device information */
memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
hba->dev_info.f_power_on_wp_en = flag;
if (!hba->is_init_prefetch)
@@ -4332,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
ufshcd_probe_hba(hba);
}
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int index;
+ bool found = false;
+
+ if (!scmd || !scmd->device || !scmd->device->host)
+ return BLK_EH_NOT_HANDLED;
+
+ host = scmd->device->host;
+ hba = shost_priv(host);
+ if (!hba)
+ return BLK_EH_NOT_HANDLED;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[index].cmd == scmd) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /*
+ * Bypass SCSI error handling and reset the block layer timer if this
+ * SCSI command was not actually dispatched to UFS driver, otherwise
+ * let SCSI layer handle the error as usual.
+ */
+ return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
+}
+
static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
@@ -4344,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -4372,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
- return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg,
+ UFS_VREG_LPM_LOAD_UA);
}
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
static int ufshcd_config_vreg(struct device *dev,
@@ -4413,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || vreg->enabled)
+ if (!vreg)
+ goto out;
+ else if (vreg->enabled || vreg->unused)
goto out;
ret = ufshcd_config_vreg(dev, vreg, true);
@@ -4433,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg)
+ goto out;
+ else if (!vreg->enabled || vreg->unused)
goto out;
ret = regulator_disable(vreg->reg);
@@ -4539,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0;
}
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+ int ret = 0;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+ else if (!info->vccq)
+ goto out;
+
+ if (unused) {
+ /* shut off the rail here */
+ ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+ /*
+ * Mark this rail as no longer used, so it doesn't get enabled
+ * later by mistake
+ */
+ if (!ret)
+ info->vccq->unused = true;
+ } else {
+ /*
+ * rail should have been already enabled hence just make sure
+ * that unused flag is cleared.
+ */
+ info->vccq->unused = false;
+ }
+out:
+ return ret;
+}
+
static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
bool skip_ref_clk)
{
@@ -4858,10 +5688,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
(!check_for_bkops || (check_for_bkops &&
!hba->auto_bkops_enabled))) {
/*
+ * Let's make sure that link is in low power mode, we are doing
+ * this currently by putting the link in Hibern8. Otherway to
+ * put the link in low power mode is to send the DME end point
+ * to device and then send the DME reset command to local
+ * unipro. But putting the link in hibern8 is much faster.
+ */
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ goto out;
+ /*
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, true);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -4876,6 +5716,16 @@ out:
static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
{
/*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+ if (!ufshcd_is_link_active(hba) &&
+ hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
+ usleep_range(2000, 2100);
+
+ /*
* If UFS device is either in UFS_Sleep turn off VCC rail to save some
* power.
*
@@ -5337,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, true);
scsi_host_put(hba->host);
@@ -5601,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
init_waitqueue_head(&hba->dev_cmd.tag_wq);
ufshcd_init_clk_gating(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ /*
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+ mb();
+
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 2570d9477b37..4bb65669f052 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,6 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -53,6 +54,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include "unipro.h"
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -177,7 +179,7 @@ struct ufshcd_lrb {
};
/**
- * struct ufs_query - holds relevent data structures for query request
+ * struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
* @response: response upiu and response
@@ -382,6 +384,9 @@ struct ufs_init_prefetch {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -469,6 +474,9 @@ struct ufs_hba {
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
+ /* Device deviations from standard UFS device spec. */
+ unsigned int dev_quirks;
+
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
unsigned long tm_condition;
@@ -508,6 +516,8 @@ struct ufs_hba {
bool wlun_dev_clr_ua;
+ /* Number of lanes available (1 or 2) for Rx/Tx */
+ u32 lanes_per_direction;
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
@@ -532,6 +542,9 @@ struct ufs_hba {
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
bool is_sys_suspended;
+
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -587,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
void ufshcd_remove(struct ufs_hba *);
-
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
-}
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep);
static inline void check_upiu_size(void)
{
@@ -681,8 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
+
+static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+{
+ return (pwr_info->pwr_rx == FAST_MODE ||
+ pwr_info->pwr_rx == FASTAUTO_MODE) &&
+ (pwr_info->pwr_tx == FAST_MODE ||
+ pwr_info->pwr_tx == FASTAUTO_MODE);
+}
+
+#define ASCII_STD true
+
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+ u32 size, bool ascii);
+
+/* Expose Query-Request API */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0ae0967aaed8..4cb1cc63f1a1 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -92,6 +92,7 @@ enum {
UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
+ UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
};
/*
@@ -170,6 +171,8 @@ enum {
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
+#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
@@ -209,6 +212,7 @@ enum {
/* GenSelectorIndex calculation macros for M-PHY attributes */
#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 816a8a46efb8..e2854e45f8d3 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -15,6 +15,7 @@
/*
* M-TX Configuration Attributes
*/
+#define TX_HIBERN8TIME_CAPABILITY 0x000F
#define TX_MODE 0x0021
#define TX_HSRATE_SERIES 0x0022
#define TX_HSGEAR 0x0023
@@ -48,8 +49,12 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x0089
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+#define RX_MIN_ACTIVATETIME_UNIT_US 100
+#define HIBERN8TIME_UNIT_US 100
/*
* PHY Adpater attributes
*/
@@ -70,6 +75,7 @@
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0
@@ -110,6 +116,12 @@
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
+#define PA_TACTIVATE_TIME_UNIT_US 10
+#define PA_HIBERN8_TIME_UNIT_US 100
+
+/* PHY Adapter Protocol Constants */
+#define PA_MAXDATALANES 4
+
/* PA power modes */
enum {
FAST_MODE = 1,
@@ -143,6 +155,16 @@ enum ufs_hs_gear_tag {
UFS_HS_G3, /* HS Gear 3 */
};
+enum ufs_unipro_ver {
+ UFS_UNIPRO_VER_RESERVED = 0,
+ UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
+ UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
+ /* UniPro version field mask in PA_LOCALVERINFO */
+ UFS_UNIPRO_VER_MASK = 0xF,
+};
+
/*
* Data Link Layer Attributes
*/
diff --git a/drivers/sh/superhyway/superhyway.c b/drivers/sh/superhyway/superhyway.c
index 2d9e7f3d5611..bb1fb7712134 100644
--- a/drivers/sh/superhyway/superhyway.c
+++ b/drivers/sh/superhyway/superhyway.c
@@ -66,7 +66,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev,
superhyway_read_vcr(dev, base, &dev->vcr);
if (!dev->resource) {
- dev->resource = kmalloc(sizeof(struct resource), GFP_KERNEL);
+ dev->resource = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!dev->resource) {
kfree(dev);
return -ENOMEM;
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 88260205a261..cb58ef0d9b2c 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -6,6 +6,7 @@ source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/samsung/Kconfig"
source "drivers/soc/sunxi/Kconfig"
source "drivers/soc/tegra/Kconfig"
source "drivers/soc/ti/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 2afdc74f7491..5ade71306ee1 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -10,6 +10,7 @@ obj-y += fsl/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index aa5c11acf212..65845712571c 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -63,7 +63,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
- return in_be32(&regs->cpdata) & pin_mask;
+ return !!(in_be32(&regs->cpdata) & pin_mask);
}
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 419fa5b7be4d..41eff805a904 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -103,6 +103,39 @@ out_muram:
}
/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns an offset into the muram area.
+ */
+static unsigned long cpm_muram_alloc_common(unsigned long size,
+ genpool_algo_t algo, void *data)
+{
+ struct muram_block *entry;
+ unsigned long start;
+
+ start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+ if (!start)
+ goto out2;
+ start = start - GENPOOL_OFFSET;
+ memset_io(cpm_muram_addr(start), 0, size);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ goto out1;
+ entry->start = start;
+ entry->size = size;
+ list_add(&entry->head, &muram_block_list);
+
+ return start;
+out1:
+ gen_pool_free(muram_pool, start, size);
+out2:
+ return (unsigned long)-ENOMEM;
+}
+
+/*
* cpm_muram_alloc - allocate the requested size worth of multi-user ram
* @size: number of bytes to allocate
* @align: requested alignment, in bytes
@@ -175,39 +208,6 @@ unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
}
EXPORT_SYMBOL(cpm_muram_alloc_fixed);
-/*
- * cpm_muram_alloc_common - cpm_muram_alloc common code
- * @size: number of bytes to allocate
- * @algo: algorithm for alloc.
- * @data: data for genalloc's algorithm.
- *
- * This function returns an offset into the muram area.
- */
-unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
- void *data)
-{
- struct muram_block *entry;
- unsigned long start;
-
- start = gen_pool_alloc_algo(muram_pool, size, algo, data);
- if (!start)
- goto out2;
- start = start - GENPOOL_OFFSET;
- memset_io(cpm_muram_addr(start), 0, size);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto out1;
- entry->start = start;
- entry->size = size;
- list_add(&entry->head, &muram_block_list);
-
- return start;
-out1:
- gen_pool_free(muram_pool, start, size);
-out2:
- return (unsigned long)-ENOMEM;
-}
-
/**
* cpm_muram_addr - turn a muram offset into a virtual address
* @offset: muram offset to convert
diff --git a/drivers/soc/fsl/qe/qe_ic.c b/drivers/soc/fsl/qe/qe_ic.c
index b77d01ff8330..ec2ca864b0c5 100644
--- a/drivers/soc/fsl/qe/qe_ic.c
+++ b/drivers/soc/fsl/qe/qe_ic.c
@@ -259,6 +259,11 @@ static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
struct qe_ic *qe_ic = h->host_data;
struct irq_chip *chip;
+ if (hw >= ARRAY_SIZE(qe_ic_info)) {
+ pr_err("%s: Invalid hw irq number for QEIC\n", __func__);
+ return -EINVAL;
+ }
+
if (qe_ic_info[hw].mask == 0) {
printk(KERN_ERR "Can't map reserved IRQ\n");
return -EINVAL;
@@ -407,7 +412,8 @@ int qe_ic_set_priority(unsigned int virq, unsigned int priority)
if (priority > 8 || priority == 0)
return -EINVAL;
- if (src > 127)
+ if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+ "%s: Invalid hw irq number for QEIC\n", __func__))
return -EINVAL;
if (qe_ic_info[src].pri_reg == 0)
return -EINVAL;
@@ -436,6 +442,9 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
if (priority > 2 || priority == 0)
return -EINVAL;
+ if (WARN_ONCE(src >= ARRAY_SIZE(qe_ic_info),
+ "%s: Invalid hw irq number for QEIC\n", __func__))
+ return -EINVAL;
switch (qe_ic_info[src].pri_reg) {
case QEIC_CIPZCC:
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 105597a885cb..0d9b19a78d27 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -60,6 +60,15 @@
#define PWRAP_MAN_CMD_OP_OUTD (0x9 << 8)
#define PWRAP_MAN_CMD_OP_OUTQ (0xa << 8)
+/* macro for Watch Dog Timer Source */
+#define PWRAP_WDT_SRC_EN_STAUPD_TRIG (1 << 25)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE (1 << 20)
+#define PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE (1 << 6)
+#define PWRAP_WDT_SRC_MASK_ALL 0xffffffff
+#define PWRAP_WDT_SRC_MASK_NO_STAUPD ~(PWRAP_WDT_SRC_EN_STAUPD_TRIG | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
+ PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
+
/* macro for slave device wrapper registers */
#define PWRAP_DEW_BASE 0xbc00
#define PWRAP_DEW_EVENT_OUT_EN (PWRAP_DEW_BASE + 0x0)
@@ -412,6 +421,20 @@ static bool pwrap_is_fsm_vldclr(struct pmic_wrapper *wrp)
return PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR;
}
+/*
+ * Timeout issue sometimes caused by the last read command
+ * failed because pmic wrap could not got the FSM_VLDCLR
+ * in time after finishing WACS2_CMD. It made state machine
+ * still on FSM_VLDCLR and timeout next time.
+ * Check the status of FSM and clear the vldclr to recovery the
+ * error.
+ */
+static inline void pwrap_leave_fsm_vldclr(struct pmic_wrapper *wrp)
+{
+ if (pwrap_is_fsm_vldclr(wrp))
+ pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+}
+
static bool pwrap_is_sync_idle(struct pmic_wrapper *wrp)
{
return pwrap_readl(wrp, PWRAP_WACS2_RDATA) & PWRAP_STATE_SYNC_IDLE0;
@@ -445,8 +468,10 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
int ret;
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
- if (ret)
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
return ret;
+ }
pwrap_writel(wrp, (1 << 31) | ((adr >> 1) << 16) | wdata,
PWRAP_WACS2_CMD);
@@ -459,8 +484,10 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
int ret;
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
- if (ret)
+ if (ret) {
+ pwrap_leave_fsm_vldclr(wrp);
return ret;
+ }
pwrap_writel(wrp, (adr >> 1) << 16, PWRAP_WACS2_CMD);
@@ -804,7 +831,7 @@ MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
static int pwrap_probe(struct platform_device *pdev)
{
- int ret, irq;
+ int ret, irq, wdt_src;
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
@@ -894,7 +921,13 @@ static int pwrap_probe(struct platform_device *pdev)
/* Initialize watchdog, may not be done by the bootloader */
pwrap_writel(wrp, 0xf, PWRAP_WDT_UNIT);
- pwrap_writel(wrp, 0xffffffff, PWRAP_WDT_SRC_EN);
+ /*
+ * Since STAUPD was not used on mt8173 platform,
+ * so STAUPD of WDT_SRC which should be turned off
+ */
+ wdt_src = pwrap_is_mt8173(wrp) ?
+ PWRAP_WDT_SRC_MASK_NO_STAUPD : PWRAP_WDT_SRC_MASK_ALL;
+ pwrap_writel(wrp, wdt_src, PWRAP_WDT_SRC_EN);
pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 0221387e5e27..57e781c71e67 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -76,7 +76,7 @@ struct scp_domain_data {
bool active_wakeup;
};
-static const struct scp_domain_data scp_domain_data[] __initconst = {
+static const struct scp_domain_data scp_domain_data[] = {
[MT8173_POWER_DOMAIN_VDEC] = {
.name = "vdec",
.sta_mask = PWR_STATUS_VDEC,
@@ -174,12 +174,7 @@ struct scp_domain {
struct generic_pm_domain genpd;
struct scp *scp;
struct clk *clk[MAX_CLKS];
- u32 sta_mask;
- void __iomem *ctl_addr;
- u32 sram_pdn_bits;
- u32 sram_pdn_ack_bits;
- u32 bus_prot_mask;
- bool active_wakeup;
+ const struct scp_domain_data *data;
struct regulator *supply;
};
@@ -195,8 +190,9 @@ static int scpsys_domain_is_on(struct scp_domain *scpd)
{
struct scp *scp = scpd->scp;
- u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->sta_mask;
- u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) & scpd->sta_mask;
+ u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->data->sta_mask;
+ u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) &
+ scpd->data->sta_mask;
/*
* A domain is on when both status bits are set. If only one is set
@@ -217,8 +213,8 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
struct scp *scp = scpd->scp;
unsigned long timeout;
bool expired;
- void __iomem *ctl_addr = scpd->ctl_addr;
- u32 sram_pdn_ack = scpd->sram_pdn_ack_bits;
+ void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
+ u32 sram_pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret;
int i;
@@ -273,7 +269,7 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
val |= PWR_RST_B_BIT;
writel(val, ctl_addr);
- val &= ~scpd->sram_pdn_bits;
+ val &= ~scpd->data->sram_pdn_bits;
writel(val, ctl_addr);
/* wait until SRAM_PDN_ACK all 0 */
@@ -292,9 +288,9 @@ static int scpsys_power_on(struct generic_pm_domain *genpd)
expired = true;
}
- if (scpd->bus_prot_mask) {
+ if (scpd->data->bus_prot_mask) {
ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
- scpd->bus_prot_mask);
+ scpd->data->bus_prot_mask);
if (ret)
goto err_pwr_ack;
}
@@ -321,21 +317,21 @@ static int scpsys_power_off(struct generic_pm_domain *genpd)
struct scp *scp = scpd->scp;
unsigned long timeout;
bool expired;
- void __iomem *ctl_addr = scpd->ctl_addr;
- u32 pdn_ack = scpd->sram_pdn_ack_bits;
+ void __iomem *ctl_addr = scp->base + scpd->data->ctl_offs;
+ u32 pdn_ack = scpd->data->sram_pdn_ack_bits;
u32 val;
int ret;
int i;
- if (scpd->bus_prot_mask) {
+ if (scpd->data->bus_prot_mask) {
ret = mtk_infracfg_set_bus_protection(scp->infracfg,
- scpd->bus_prot_mask);
+ scpd->data->bus_prot_mask);
if (ret)
goto out;
}
val = readl(ctl_addr);
- val |= scpd->sram_pdn_bits;
+ val |= scpd->data->sram_pdn_bits;
writel(val, ctl_addr);
/* wait until SRAM_PDN_ACK all 1 */
@@ -409,10 +405,10 @@ static bool scpsys_active_wakeup(struct device *dev)
genpd = pd_to_genpd(dev->pm_domain);
scpd = container_of(genpd, struct scp_domain, genpd);
- return scpd->active_wakeup;
+ return scpd->data->active_wakeup;
}
-static int __init scpsys_probe(struct platform_device *pdev)
+static int scpsys_probe(struct platform_device *pdev)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
@@ -485,12 +481,7 @@ static int __init scpsys_probe(struct platform_device *pdev)
pd_data->domains[i] = genpd;
scpd->scp = scp;
- scpd->sta_mask = data->sta_mask;
- scpd->ctl_addr = scp->base + data->ctl_offs;
- scpd->sram_pdn_bits = data->sram_pdn_bits;
- scpd->sram_pdn_ack_bits = data->sram_pdn_ack_bits;
- scpd->bus_prot_mask = data->bus_prot_mask;
- scpd->active_wakeup = data->active_wakeup;
+ scpd->data = data;
for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
scpd->clk[j] = clk[data->clk_id[j]];
@@ -500,14 +491,13 @@ static int __init scpsys_probe(struct platform_device *pdev)
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
/*
- * Initially turn on all domains to make the domains usable
- * with !CONFIG_PM and to get the hardware in sync with the
- * software. The unused domains will be switched off during
- * late_init time.
+ * With CONFIG_PM disabled turn on all domains to make the
+ * hardware usable.
*/
- genpd->power_on(genpd);
+ if (!IS_ENABLED(CONFIG_PM))
+ genpd->power_on(genpd);
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, NULL, true);
}
/*
@@ -542,10 +532,12 @@ static const struct of_device_id of_scpsys_match_tbl[] = {
};
static struct platform_driver scpsys_drv = {
+ .probe = scpsys_probe,
.driver = {
.name = "mtk-scpsys",
+ .suppress_bind_attrs = true,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_scpsys_match_tbl),
},
};
-builtin_platform_driver_probe(scpsys_drv, scpsys_probe);
+builtin_platform_driver(scpsys_drv);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 534c58937a56..43155e1f97b9 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <dt-bindings/power/rk3288-power.h>
+#include <dt-bindings/power/rk3368-power.h>
struct rockchip_domain_info {
int pwr_mask;
@@ -75,6 +76,9 @@ struct rockchip_pmu {
#define DOMAIN_RK3288(pwr, status, req) \
DOMAIN(pwr, status, req, req, (req) + 16)
+#define DOMAIN_RK3368(pwr, status, req) \
+ DOMAIN(pwr, status, req, (req) + 16, req)
+
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
@@ -419,6 +423,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
if (error) {
dev_err(dev, "failed to handle node %s: %d\n",
node->name, error);
+ of_node_put(node);
goto err_out;
}
}
@@ -444,6 +449,14 @@ static const struct rockchip_domain_info rk3288_pm_domains[] = {
[RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
};
+static const struct rockchip_domain_info rk3368_pm_domains[] = {
+ [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2),
+};
+
static const struct rockchip_pmu_info rk3288_pmu = {
.pwr_offset = 0x08,
.status_offset = 0x0c,
@@ -461,11 +474,32 @@ static const struct rockchip_pmu_info rk3288_pmu = {
.domain_info = rk3288_pm_domains,
};
+static const struct rockchip_pmu_info rk3368_pmu = {
+ .pwr_offset = 0x0c,
+ .status_offset = 0x10,
+ .req_offset = 0x3c,
+ .idle_offset = 0x40,
+ .ack_offset = 0x40,
+
+ .core_pwrcnt_offset = 0x48,
+ .gpu_pwrcnt_offset = 0x50,
+
+ .core_power_transition_time = 24,
+ .gpu_power_transition_time = 24,
+
+ .num_domains = ARRAY_SIZE(rk3368_pm_domains),
+ .domain_info = rk3368_pm_domains,
+};
+
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,rk3288-power-controller",
.data = (void *)&rk3288_pmu,
},
+ {
+ .compatible = "rockchip,rk3368-power-controller",
+ .data = (void *)&rk3368_pmu,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
new file mode 100644
index 000000000000..d7fc123006a3
--- /dev/null
+++ b/drivers/soc/samsung/Kconfig
@@ -0,0 +1,13 @@
+#
+# SAMSUNG SoC drivers
+#
+menuconfig SOC_SAMSUNG
+ bool "Samsung SoC driver support" if COMPILE_TEST
+
+if SOC_SAMSUNG
+
+config EXYNOS_PMU
+ bool "Exynos PMU controller driver" if COMPILE_TEST
+ depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST)
+
+endif
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
new file mode 100644
index 000000000000..f64ac4d80564
--- /dev/null
+++ b/drivers/soc/samsung/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
+ exynos5250-pmu.o exynos5420-pmu.o
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
new file mode 100644
index 000000000000..0acdfd82e751
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+struct exynos_pmu_context {
+ struct device *dev;
+ const struct exynos_pmu_data *pmu_data;
+};
+
+void __iomem *pmu_base_addr;
+static struct exynos_pmu_context *pmu_context;
+
+void pmu_raw_writel(u32 val, u32 offset)
+{
+ writel_relaxed(val, pmu_base_addr + offset);
+}
+
+u32 pmu_raw_readl(u32 offset)
+{
+ return readl_relaxed(pmu_base_addr + offset);
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ const struct exynos_pmu_data *pmu_data;
+
+ if (!pmu_context)
+ return;
+
+ pmu_data = pmu_context->pmu_data;
+
+ if (pmu_data->powerdown_conf)
+ pmu_data->powerdown_conf(mode);
+
+ if (pmu_data->pmu_config) {
+ for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
+ pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
+ pmu_data->pmu_config[i].offset);
+ }
+
+ if (pmu_data->powerdown_conf_extra)
+ pmu_data->powerdown_conf_extra(mode);
+
+ if (pmu_data->pmu_config_extra) {
+ for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
+ pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
+ pmu_data->pmu_config_extra[i].offset);
+ }
+}
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static const struct of_device_id exynos_pmu_of_device_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = &exynos3250_pmu_data,
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = &exynos4210_pmu_data,
+ }, {
+ .compatible = "samsung,exynos4212-pmu",
+ .data = &exynos4212_pmu_data,
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = &exynos4412_pmu_data,
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = &exynos5250_pmu_data,
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = &exynos5420_pmu_data,
+ },
+ { /*sentinel*/ },
+};
+
+static int exynos_pmu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmu_base_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pmu_base_addr))
+ return PTR_ERR(pmu_base_addr);
+
+ pmu_context = devm_kzalloc(&pdev->dev,
+ sizeof(struct exynos_pmu_context),
+ GFP_KERNEL);
+ if (!pmu_context) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+ pmu_context->dev = dev;
+
+ match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
+
+ pmu_context->pmu_data = match->data;
+
+ if (pmu_context->pmu_data->pmu_init)
+ pmu_context->pmu_data->pmu_init();
+
+ platform_set_drvdata(pdev, pmu_context);
+
+ dev_dbg(dev, "Exynos PMU Driver probe done\n");
+ return 0;
+}
+
+static struct platform_driver exynos_pmu_driver = {
+ .driver = {
+ .name = "exynos-pmu",
+ .of_match_table = exynos_pmu_of_device_ids,
+ },
+ .probe = exynos_pmu_probe,
+};
+
+static int __init exynos_pmu_init(void)
+{
+ return platform_driver_register(&exynos_pmu_driver);
+
+}
+postcore_initcall(exynos_pmu_init);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
new file mode 100644
index 000000000000..a469e366fead
--- /dev/null
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PMU_H
+#define __EXYNOS_PMU_H
+
+#include <linux/io.h>
+
+#define PMU_TABLE_END (-1U)
+
+struct exynos_pmu_conf {
+ unsigned int offset;
+ u8 val[NUM_SYS_POWERDOWN];
+};
+
+struct exynos_pmu_data {
+ const struct exynos_pmu_conf *pmu_config;
+ const struct exynos_pmu_conf *pmu_config_extra;
+
+ void (*pmu_init)(void);
+ void (*powerdown_conf)(enum sys_powerdown);
+ void (*powerdown_conf_extra)(enum sys_powerdown);
+};
+
+extern void __iomem *pmu_base_addr;
+/* list of all exported SoC specific data */
+extern const struct exynos_pmu_data exynos3250_pmu_data;
+extern const struct exynos_pmu_data exynos4210_pmu_data;
+extern const struct exynos_pmu_data exynos4212_pmu_data;
+extern const struct exynos_pmu_data exynos4412_pmu_data;
+extern const struct exynos_pmu_data exynos5250_pmu_data;
+extern const struct exynos_pmu_data exynos5420_pmu_data;
+
+extern void pmu_raw_writel(u32 val, u32 offset);
+extern u32 pmu_raw_readl(u32 offset);
+#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
new file mode 100644
index 000000000000..20b3ab8aa790
--- /dev/null
+++ b/drivers/soc/samsung/exynos3250-pmu.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS3250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos3250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
+ { EXYNOS3_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS3_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS3_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x3} },
+ { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS3_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS3_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS3_CAM_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_LCD0_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_MAUDIO_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos3250_list_feed[] = {
+ EXYNOS3_ARM_CORE_OPTION(0),
+ EXYNOS3_ARM_CORE_OPTION(1),
+ EXYNOS3_ARM_CORE_OPTION(2),
+ EXYNOS3_ARM_CORE_OPTION(3),
+ EXYNOS3_ARM_COMMON_OPTION,
+ EXYNOS3_TOP_PWR_OPTION,
+ EXYNOS3_CORE_TOP_PWR_OPTION,
+ S5P_CAM_OPTION,
+ S5P_MFC_OPTION,
+ S5P_G3D_OPTION,
+ S5P_LCD0_OPTION,
+ S5P_ISP_OPTION,
+};
+
+static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /* Enable only SC_FEEDBACK */
+ for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
+ tmp = pmu_raw_readl(exynos3250_list_feed[i]);
+ tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
+ tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
+ pmu_raw_writel(tmp, exynos3250_list_feed[i]);
+ }
+
+ if (mode != SYS_SLEEP)
+ return;
+
+ pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
+ pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
+ pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
+ EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
+}
+
+static void exynos3250_pmu_init(void)
+{
+ unsigned int value;
+
+ /*
+ * To prevent from issuing new bus request form L2 memory system
+ * If core status is power down, should be set '1' to L2 power down
+ */
+ value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
+ value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ /*
+ * Set PSHOLD port for output high
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_OUTPUT_HIGH;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+
+ /*
+ * Enable signal for PSHOLD port
+ */
+ value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+ value |= S5P_PS_HOLD_EN;
+ pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+}
+
+const struct exynos_pmu_data exynos3250_pmu_data = {
+ .pmu_config = exynos3250_pmu_config,
+ .pmu_init = exynos3250_pmu_init,
+ .powerdown_conf_extra = exynos3250_powerdown_conf_extra,
+};
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
new file mode 100644
index 000000000000..bc4fa73bed11
--- /dev/null
+++ b/drivers/soc/samsung/exynos4-pmu.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS4 - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_L2_1_LOWPWR, { 0x2, 0x2, 0x3 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_LCD1_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_MODIMIF_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PCIE_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SATA_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD1_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+ { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE1_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE1, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL1, { 0x0, 0x0, 0x0 } },
+ { S5P_ISP_ARM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_COMMON_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_L2_0_LOWPWR, { 0x0, 0x0, 0x3 } },
+ /* XXX_OPTION register should be set other field */
+ { S5P_ARM_L2_0_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_L2_1_LOWPWR, { 0x0, 0x0, 0x3 } },
+ { S5P_ARM_L2_1_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CMU_ACLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_DRAM_FREQ_DOWN_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_DDRPHY_DLLOFF_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_APLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_MPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_VPLL_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_EPLL_SYSCLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_MPLLUSER_SYSCLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_GPSALIVE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_CLKSTOP_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_CAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_TV_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MFC_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_G3D_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_LCD0_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_RESET_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_CMU_RESET_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_TOP_BUS_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_TOP_BUS_COREBLK_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_TOP_RETENTION_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_TOP_PWR_COREBLK_LOWPWR, { 0x3, 0x0, 0x3 } },
+ { S5P_LOGIC_RESET_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_LOGIC_RESET_COREBLK_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_OSCCLK_GATE_COREBLK_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_ONENAND_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ONENAND_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSI_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSI_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_G2D_ACP_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_G2D_ACP_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_USBOTG_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_USBOTG_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_HSMMC_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_HSMMC_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_CSSYS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_CSSYS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_SECSS_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_SECSS_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_ROTATOR_MEM_LOWPWR, { 0x3, 0x0, 0x0 } },
+ { S5P_ROTATOR_MEM_OPTION, { 0x10, 0x10, 0x0 } },
+ { S5P_PAD_RETENTION_DRAM_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_UART_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_MMCB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIA_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_EBIB_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ISOLATION_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_ISOLATION_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_PAD_RETENTION_ALV_SEL_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_XUSBXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_XXTI_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_EXT_REGULATOR_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_GPIO_MODE_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_GPIO_MODE_MAUDIO_LOWPWR, { 0x1, 0x1, 0x0 } },
+ { S5P_TOP_ASB_RESET_LOWPWR, { 0x1, 0x1, 0x1 } },
+ { S5P_TOP_ASB_ISOLATION_LOWPWR, { 0x1, 0x0, 0x1 } },
+ { S5P_CAM_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_TV_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MFC_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_G3D_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_LCD0_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_ISP_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_MAUDIO_LOWPWR, { 0x7, 0x7, 0x0 } },
+ { S5P_GPS_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_GPS_ALIVE_LOWPWR, { 0x7, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_ISP_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { S5P_CMU_SYSCLK_GPS_LOWPWR, { 0x1, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
+ { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
+const struct exynos_pmu_data exynos4210_pmu_data = {
+ .pmu_config = exynos4210_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4212_pmu_data = {
+ .pmu_config = exynos4x12_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4412_pmu_data = {
+ .pmu_config = exynos4x12_pmu_config,
+ .pmu_config_extra = exynos4412_pmu_config,
+};
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
new file mode 100644
index 000000000000..3fac42561964
--- /dev/null
+++ b/drivers/soc/samsung/exynos5250-pmu.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS5250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
+ { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_JPEG_MEM_OPTION, { 0x10, 0x10, 0x0} },
+ { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5_list_both_cnt_feed[] = {
+ EXYNOS5_ARM_CORE0_OPTION,
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_ARM_COMMON_OPTION,
+ EXYNOS5_GSCL_OPTION,
+ EXYNOS5_ISP_OPTION,
+ EXYNOS5_MFC_OPTION,
+ EXYNOS5_G3D_OPTION,
+ EXYNOS5_DISP1_OPTION,
+ EXYNOS5_MAU_OPTION,
+ EXYNOS5_TOP_PWR_OPTION,
+ EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
+ EXYNOS5_ARM_CORE1_OPTION,
+ EXYNOS5_FSYS_ARM_OPTION,
+ EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5250_pmu_init(void)
+{
+ unsigned int value;
+ /*
+ * When SYS_WDTRESET is set, watchdog timer reset request
+ * is ignored by power management unit.
+ */
+ value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+ value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+}
+
+static void exynos5_powerdown_conf(enum sys_powerdown mode)
+{
+ unsigned int i;
+ unsigned int tmp;
+
+ /*
+ * Enable both SC_FEEDBACK and SC_COUNTER
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
+ tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
+ tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+ EXYNOS5_USE_SC_COUNTER);
+ pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+ }
+
+ /*
+ * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+ */
+ tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+ tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+ /*
+ * Disable WFI/WFE on XXX_OPTION
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
+ tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
+ tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+ EXYNOS5_OPTION_USE_STANDBYWFI);
+ pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
+ }
+}
+
+const struct exynos_pmu_data exynos5250_pmu_data = {
+ .pmu_config = exynos5250_pmu_config,
+ .pmu_init = exynos5250_pmu_init,
+ .powerdown_conf = exynos5_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
new file mode 100644
index 000000000000..b962fb6a5d22
--- /dev/null
+++ b/drivers/soc/samsung/exynos5420-pmu.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS5420 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pm.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include <asm/cputype.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos5420_pmu_config[] = {
+ /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+ { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_ARM_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_CORE3_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_KFC_L2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+ { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x3, 0x0} },
+ { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
+ { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
+ { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5420_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
+ { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+ { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+ { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
+ { EXYNOS5420_G2D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_MSC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_FSYS2_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PSGEN_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_PERIC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5420_WCORE_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+ { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5420_list_disable_pmu_reg[] = {
+ EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
+ EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
+ EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
+};
+
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
+{
+ u32 this_cluster;
+
+ this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+
+ /*
+ * set the cluster id to IROM register to ensure that we wake
+ * up with the current cluster.
+ */
+ pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
+}
+
+static void exynos5420_pmu_init(void)
+{
+ unsigned int value;
+ int i;
+
+ /*
+ * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
+ * for local power blocks to Low initially as per Table 8-4:
+ * "System-Level Power-Down Configuration Registers".
+ */
+ for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
+ pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
+
+ /* Enable USE_STANDBY_WFI for all CORE */
+ pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+ value &= ~EXYNOS5_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
+
+ value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
+ value &= ~EXYNOS5_USE_RETENTION;
+ pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
+
+ /*
+ * If L2_COMMON is turned off, clocks related to ATB async
+ * bridge are gated. Thus, when ISP power is gated, LPI
+ * may get stuck.
+ */
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
+ value |= EXYNOS5420_ATB_ISP_ARM;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
+
+ value = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
+ value |= EXYNOS5420_ATB_KFC;
+ pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
+
+ /* Prevent issue of new bus request from L2 memory */
+ value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
+
+ value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
+ value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+ pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
+
+ /* This setting is to reduce suspend/resume time */
+ pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
+
+ /* Serialized CPU wakeup of Eagle */
+ pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
+
+ pmu_raw_writel(SPREAD_USE_STANDWFI,
+ EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
+
+ pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
+
+ pr_info("EXYNOS5420 PMU initialized\n");
+}
+
+const struct exynos_pmu_data exynos5420_pmu_data = {
+ .pmu_config = exynos5420_pmu_config,
+ .pmu_init = exynos5420_pmu_init,
+ .powerdown_conf = exynos5420_powerdown_conf,
+};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index bc52670c8f4b..99e354c8f53f 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -117,7 +117,7 @@ static int sunxi_sram_show(struct seq_file *s, void *data)
val = readl(base + sram_data->reg);
val >>= sram_data->offset;
- val &= sram_data->width;
+ val &= GENMASK(sram_data->width - 1, 0);
for (func = sram_data->func; func->func; func++) {
seq_printf(s, "\t\t%s%c\n", func->func,
@@ -208,7 +208,8 @@ int sunxi_sram_claim(struct device *dev)
return -EBUSY;
}
- mask = GENMASK(sram_data->offset + sram_data->width, sram_data->offset);
+ mask = GENMASK(sram_data->offset + sram_data->width - 1,
+ sram_data->offset);
val = readl(base + sram_data->reg);
val &= ~mask;
writel(val | ((device << sram_data->offset) & mask),
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index 6ff936cacb70..905b974d1bdc 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -93,13 +93,13 @@ struct knav_reg_pdsp_regs {
struct knav_reg_acc_command {
u32 command;
u32 queue_mask;
- u32 list_phys;
+ u32 list_dma;
u32 queue_num;
u32 timer_config;
};
struct knav_link_ram_block {
- dma_addr_t phys;
+ dma_addr_t dma;
void *virt;
size_t size;
};
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index d2d48f2802bc..0612ebae0a09 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -122,8 +122,8 @@ static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
channel = acc->channel;
list_dma = acc->list_dma[acc->list_index];
list_cpu = acc->list_cpu[acc->list_index];
- dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
- channel, acc->list_index, list_cpu, list_dma);
+ dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, dma %pad\n",
+ channel, acc->list_index, list_cpu, &list_dma);
if (atomic_read(&acc->retrigger_count)) {
atomic_dec(&acc->retrigger_count);
__knav_acc_notify(range, acc);
@@ -297,12 +297,12 @@ knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
u32 result;
dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
- cmd->command, cmd->queue_mask, cmd->list_phys,
+ cmd->command, cmd->queue_mask, cmd->list_dma,
cmd->queue_num, cmd->timer_config);
writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
- writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys);
+ writel_relaxed(cmd->list_dma, &pdsp->acc_command->list_dma);
writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
writel_relaxed(cmd->command, &pdsp->acc_command->command);
@@ -337,7 +337,7 @@ static void knav_acc_setup_cmd(struct knav_device *kdev,
memset(cmd, 0, sizeof(*cmd));
cmd->command = acc->channel;
cmd->queue_mask = queue_mask;
- cmd->list_phys = acc->list_dma[0];
+ cmd->list_dma = (u32)acc->list_dma[0];
cmd->queue_num = info->list_entries << 16;
cmd->queue_num |= queue_base;
@@ -591,8 +591,8 @@ int knav_init_acc_range(struct knav_device *kdev,
acc->list_cpu[1] = list_mem + list_size;
acc->list_dma[0] = list_dma;
acc->list_dma[1] = list_dma + list_size;
- dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n",
- acc->name, acc->channel, list_dma, list_mem);
+ dev_dbg(kdev->dev, "%s: channel %d, dma %pad, virt %8p\n",
+ acc->name, acc->channel, &list_dma, list_mem);
}
range->ops = &knav_acc_range_ops;
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 8c03a80b482d..b73e3534f67b 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1023,9 +1023,9 @@ static void knav_queue_setup_region(struct knav_device *kdev,
list_add(&pool->region_inst, &region->pools);
dev_dbg(kdev->dev,
- "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n",
+ "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
region->name, id, region->desc_size, region->num_desc,
- region->link_index, region->dma_start, region->dma_end,
+ region->link_index, &region->dma_start, &region->dma_end,
region->virt_start, region->virt_end);
hw_desc_size = (region->desc_size / 16) - 1;
@@ -1033,7 +1033,7 @@ static void knav_queue_setup_region(struct knav_device *kdev,
for_each_qmgr(kdev, qmgr) {
regs = qmgr->reg_region + id;
- writel_relaxed(region->dma_start, &regs->base);
+ writel_relaxed((u32)region->dma_start, &regs->base);
writel_relaxed(region->link_index, &regs->start_index);
writel_relaxed(hw_desc_size << 16 | hw_num_desc,
&regs->size_count);
@@ -1145,14 +1145,14 @@ static int knav_get_link_ram(struct knav_device *kdev,
* queue_base specified => using internal or onchip
* link ram WARNING - we do not "reserve" this block
*/
- block->phys = (dma_addr_t)temp[0];
+ block->dma = (dma_addr_t)temp[0];
block->virt = NULL;
block->size = temp[1];
} else {
block->size = temp[1];
/* queue_base not specific => allocate requested size */
block->virt = dmam_alloc_coherent(kdev->dev,
- 8 * block->size, &block->phys,
+ 8 * block->size, &block->dma,
GFP_KERNEL);
if (!block->virt) {
dev_err(kdev->dev, "failed to alloc linkram\n");
@@ -1172,18 +1172,18 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev)
for_each_qmgr(kdev, qmgr) {
block = &kdev->link_rams[0];
- dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n",
- block->phys, block->virt, block->size);
- writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0);
+ dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
block++;
if (!block->size)
continue;
- dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n",
- block->phys, block->virt, block->size);
- writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1);
+ dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
+ &block->dma, block->virt, block->size);
+ writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
}
return 0;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 77064160dd76..9d8c84bb1544 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,11 +75,26 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT32 (AVR32) and AT91 (ARM) chips.
+config SPI_AU1550
+ tristate "Au1550/Au1200/Au1300 SPI Controller"
+ depends on MIPS_ALCHEMY
+ select SPI_BITBANG
+ help
+ If you say yes to this option, support will be included for the
+ PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+
+config SPI_AXI_SPI_ENGINE
+ tristate "Analog Devices AXI SPI Engine controller"
+ depends on HAS_IOMEM
+ help
+ This enables support for the Analog Devices AXI SPI Engine SPI controller.
+ It is part of the SPI Engine framework that is used in some Analog Devices
+ reference designs for FPGAs.
+
config SPI_BCM2835
tristate "BCM2835 SPI controller"
depends on GPIOLIB
depends on ARCH_BCM2835 || COMPILE_TEST
- depends on GPIOLIB
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -90,8 +105,7 @@ config SPI_BCM2835
config SPI_BCM2835AUX
tristate "BCM2835 SPI auxiliary controller"
- depends on ARCH_BCM2835 || COMPILE_TEST
- depends on GPIOLIB
+ depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI aux master.
@@ -118,14 +132,6 @@ config SPI_BFIN_SPORT
help
Enable support for a SPI bus via the Blackfin SPORT peripheral.
-config SPI_AU1550
- tristate "Au1550/Au1200/Au1300 SPI Controller"
- depends on MIPS_ALCHEMY
- select SPI_BITBANG
- help
- If you say yes to this option, support will be included for the
- PSC SPI controller found on Au1550, Au1200 and Au1300 series.
-
config SPI_BCM53XX
tristate "Broadcom BCM53xx SPI controller"
depends on ARCH_BCM_5301X
@@ -197,6 +203,23 @@ config SPI_DAVINCI
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+config SPI_DESIGNWARE
+ tristate "DesignWare SPI controller core support"
+ help
+ general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+ tristate "PCI interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && PCI
+
+config SPI_DW_MID_DMA
+ bool "DMA support for DW SPI controller on Intel MID platform"
+ depends on SPI_DW_PCI && DW_DMAC_PCI
+
+config SPI_DW_MMIO
+ tristate "Memory-mapped io interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE
+
config SPI_DLN2
tristate "Diolan DLN-2 USB SPI adapter"
depends on MFD_DLN2
@@ -271,6 +294,16 @@ config SPI_LM70_LLP
which interfaces to an LM70 temperature sensor using
a parallel port.
+config SPI_LP8841_RTC
+ tristate "ICP DAS LP-8841 SPI Controller for RTC"
+ depends on MACH_PXA27X_DT || COMPILE_TEST
+ help
+ This driver provides an SPI master device to drive Maxim
+ DS-1302 real time clock.
+
+ Say N here unless you plan to run the kernel on an ICP DAS
+ LP-8x4x industrial computer.
+
config SPI_MPC52xx
tristate "Freescale MPC52xx SPI (non-PSC) controller support"
depends on PPC_MPC52xx
@@ -346,6 +379,13 @@ config SPI_MT65XX
say Y or M here.If you are not sure, say N.
SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+config SPI_NUC900
+ tristate "Nuvoton NUC900 series SPI"
+ depends on ARCH_W90X900
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
+
config SPI_OC_TINY
tristate "OpenCores tiny SPI"
depends on GPIOLIB || COMPILE_TEST
@@ -415,10 +455,6 @@ config SPI_PPC4xx
help
This selects a driver for the PPC4xx SPI Controller.
-config SPI_PXA2XX_DMA
- def_bool y
- depends on SPI_PXA2XX
-
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
depends on (ARCH_PXA || PCI || ACPI)
@@ -451,7 +487,7 @@ config SPI_RB4XX
config SPI_RSPI
tristate "Renesas RSPI/QSPI controller"
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
SPI driver for Renesas RSPI and QSPI blocks.
@@ -501,7 +537,7 @@ config SPI_SC18IS602
config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on HAVE_CLK && HAS_DMA
- depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -520,7 +556,7 @@ config SPI_SH_SCI
config SPI_SH_HSPI
tristate "SuperH HSPI controller"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
SPI driver for SuperH HSPI blocks.
@@ -647,34 +683,10 @@ config SPI_ZYNQMP_GQSPI
help
Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
-config SPI_NUC900
- tristate "Nuvoton NUC900 series SPI"
- depends on ARCH_W90X900
- select SPI_BITBANG
- help
- SPI driver for Nuvoton NUC900 series ARM SoCs
-
#
# Add new SPI master controllers in alphabetical order above this line
#
-config SPI_DESIGNWARE
- tristate "DesignWare SPI controller core support"
- help
- general driver for SPI controller core from DesignWare
-
-config SPI_DW_PCI
- tristate "PCI interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && PCI
-
-config SPI_DW_MID_DMA
- bool "DMA support for DW SPI controller on Intel MID platform"
- depends on SPI_DW_PCI && DW_DMAC_PCI
-
-config SPI_DW_MMIO
- tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE
-
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8991ffce6e12..fbb255c5a608 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
@@ -62,8 +64,7 @@ obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
-spi-pxa2xx-platform-objs := spi-pxa2xx.o
-spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
+spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
new file mode 100644
index 000000000000..c968ab210a51
--- /dev/null
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -0,0 +1,591 @@
+/*
+ * SPI-Engine SPI controller driver
+ * Copyright 2015 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
+#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
+#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
+
+#define SPI_ENGINE_REG_VERSION 0x00
+
+#define SPI_ENGINE_REG_RESET 0x40
+
+#define SPI_ENGINE_REG_INT_ENABLE 0x80
+#define SPI_ENGINE_REG_INT_PENDING 0x84
+#define SPI_ENGINE_REG_INT_SOURCE 0x88
+
+#define SPI_ENGINE_REG_SYNC_ID 0xc0
+
+#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
+#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
+#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
+
+#define SPI_ENGINE_REG_CMD_FIFO 0xe0
+#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
+#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
+#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
+
+#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
+#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
+#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
+#define SPI_ENGINE_INT_SYNC BIT(3)
+
+#define SPI_ENGINE_CONFIG_CPHA BIT(0)
+#define SPI_ENGINE_CONFIG_CPOL BIT(1)
+#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
+
+#define SPI_ENGINE_INST_TRANSFER 0x0
+#define SPI_ENGINE_INST_ASSERT 0x1
+#define SPI_ENGINE_INST_WRITE 0x2
+#define SPI_ENGINE_INST_MISC 0x3
+
+#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
+#define SPI_ENGINE_CMD_REG_CONFIG 0x1
+
+#define SPI_ENGINE_MISC_SYNC 0x0
+#define SPI_ENGINE_MISC_SLEEP 0x1
+
+#define SPI_ENGINE_TRANSFER_WRITE 0x1
+#define SPI_ENGINE_TRANSFER_READ 0x2
+
+#define SPI_ENGINE_CMD(inst, arg1, arg2) \
+ (((inst) << 12) | ((arg1) << 8) | (arg2))
+
+#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
+#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
+#define SPI_ENGINE_CMD_WRITE(reg, val) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
+#define SPI_ENGINE_CMD_SLEEP(delay) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
+#define SPI_ENGINE_CMD_SYNC(id) \
+ SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
+
+struct spi_engine_program {
+ unsigned int length;
+ uint16_t instructions[];
+};
+
+struct spi_engine {
+ struct clk *clk;
+ struct clk *ref_clk;
+
+ spinlock_t lock;
+
+ void __iomem *base;
+
+ struct spi_message *msg;
+ struct spi_engine_program *p;
+ unsigned cmd_length;
+ const uint16_t *cmd_buf;
+
+ struct spi_transfer *tx_xfer;
+ unsigned int tx_length;
+ const uint8_t *tx_buf;
+
+ struct spi_transfer *rx_xfer;
+ unsigned int rx_length;
+ uint8_t *rx_buf;
+
+ unsigned int sync_id;
+ unsigned int completed_id;
+
+ unsigned int int_enable;
+};
+
+static void spi_engine_program_add_cmd(struct spi_engine_program *p,
+ bool dry, uint16_t cmd)
+{
+ if (!dry)
+ p->instructions[p->length] = cmd;
+ p->length++;
+}
+
+static unsigned int spi_engine_get_config(struct spi_device *spi)
+{
+ unsigned int config = 0;
+
+ if (spi->mode & SPI_CPOL)
+ config |= SPI_ENGINE_CONFIG_CPOL;
+ if (spi->mode & SPI_CPHA)
+ config |= SPI_ENGINE_CONFIG_CPHA;
+ if (spi->mode & SPI_3WIRE)
+ config |= SPI_ENGINE_CONFIG_3WIRE;
+
+ return config;
+}
+
+static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
+ struct spi_device *spi, struct spi_transfer *xfer)
+{
+ unsigned int clk_div;
+
+ clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
+ xfer->speed_hz * 2);
+ if (clk_div > 255)
+ clk_div = 255;
+ else if (clk_div > 0)
+ clk_div -= 1;
+
+ return clk_div;
+}
+
+static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
+ struct spi_transfer *xfer)
+{
+ unsigned int len = xfer->len;
+
+ while (len) {
+ unsigned int n = min(len, 256U);
+ unsigned int flags = 0;
+
+ if (xfer->tx_buf)
+ flags |= SPI_ENGINE_TRANSFER_WRITE;
+ if (xfer->rx_buf)
+ flags |= SPI_ENGINE_TRANSFER_READ;
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
+ len -= n;
+ }
+}
+
+static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
+ struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
+{
+ unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
+ unsigned int t;
+
+ if (delay == 0)
+ return;
+
+ t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+ while (t) {
+ unsigned int n = min(t, 256U);
+
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
+ t -= n;
+ }
+}
+
+static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
+ struct spi_device *spi, bool assert)
+{
+ unsigned int mask = 0xff;
+
+ if (assert)
+ mask ^= BIT(spi->chip_select);
+
+ spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+}
+
+static int spi_engine_compile_message(struct spi_engine *spi_engine,
+ struct spi_message *msg, bool dry, struct spi_engine_program *p)
+{
+ struct spi_device *spi = msg->spi;
+ struct spi_transfer *xfer;
+ int clk_div, new_clk_div;
+ bool cs_change = true;
+
+ clk_div = -1;
+
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+ spi_engine_get_config(spi)));
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+ if (new_clk_div != clk_div) {
+ clk_div = new_clk_div;
+ spi_engine_program_add_cmd(p, dry,
+ SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
+ clk_div));
+ }
+
+ if (cs_change)
+ spi_engine_gen_cs(p, dry, spi, true);
+
+ spi_engine_gen_xfer(p, dry, xfer);
+ spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
+ xfer->delay_usecs);
+
+ cs_change = xfer->cs_change;
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ cs_change = !cs_change;
+
+ if (cs_change)
+ spi_engine_gen_cs(p, dry, spi, false);
+ }
+
+ return 0;
+}
+
+static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+ struct spi_transfer **_xfer)
+{
+ struct spi_message *msg = spi_engine->msg;
+ struct spi_transfer *xfer = *_xfer;
+
+ if (!xfer) {
+ xfer = list_first_entry(&msg->transfers,
+ struct spi_transfer, transfer_list);
+ } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+ xfer = NULL;
+ } else {
+ xfer = list_next_entry(xfer, transfer_list);
+ }
+
+ *_xfer = xfer;
+}
+
+static void spi_engine_tx_next(struct spi_engine *spi_engine)
+{
+ struct spi_transfer *xfer = spi_engine->tx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->tx_buf);
+
+ spi_engine->tx_xfer = xfer;
+ if (xfer) {
+ spi_engine->tx_length = xfer->len;
+ spi_engine->tx_buf = xfer->tx_buf;
+ } else {
+ spi_engine->tx_buf = NULL;
+ }
+}
+
+static void spi_engine_rx_next(struct spi_engine *spi_engine)
+{
+ struct spi_transfer *xfer = spi_engine->rx_xfer;
+
+ do {
+ spi_engine_xfer_next(spi_engine, &xfer);
+ } while (xfer && !xfer->rx_buf);
+
+ spi_engine->rx_xfer = xfer;
+ if (xfer) {
+ spi_engine->rx_length = xfer->len;
+ spi_engine->rx_buf = xfer->rx_buf;
+ } else {
+ spi_engine->rx_buf = NULL;
+ }
+}
+
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+ unsigned int n, m, i;
+ const uint16_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+ while (n && spi_engine->cmd_length) {
+ m = min(n, spi_engine->cmd_length);
+ buf = spi_engine->cmd_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ spi_engine->cmd_buf += m;
+ spi_engine->cmd_length -= m;
+ n -= m;
+ }
+
+ return spi_engine->cmd_length != 0;
+}
+
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+ unsigned int n, m, i;
+ const uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+ while (n && spi_engine->tx_length) {
+ m = min(n, spi_engine->tx_length);
+ buf = spi_engine->tx_buf;
+ for (i = 0; i < m; i++)
+ writel_relaxed(buf[i], addr);
+ spi_engine->tx_buf += m;
+ spi_engine->tx_length -= m;
+ n -= m;
+ if (spi_engine->tx_length == 0)
+ spi_engine_tx_next(spi_engine);
+ }
+
+ return spi_engine->tx_length != 0;
+}
+
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+{
+ void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+ unsigned int n, m, i;
+ uint8_t *buf;
+
+ n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+ while (n && spi_engine->rx_length) {
+ m = min(n, spi_engine->rx_length);
+ buf = spi_engine->rx_buf;
+ for (i = 0; i < m; i++)
+ buf[i] = readl_relaxed(addr);
+ spi_engine->rx_buf += m;
+ spi_engine->rx_length -= m;
+ n -= m;
+ if (spi_engine->rx_length == 0)
+ spi_engine_rx_next(spi_engine);
+ }
+
+ return spi_engine->rx_length != 0;
+}
+
+static irqreturn_t spi_engine_irq(int irq, void *devid)
+{
+ struct spi_master *master = devid;
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ unsigned int disable_int = 0;
+ unsigned int pending;
+
+ pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+
+ if (pending & SPI_ENGINE_INT_SYNC) {
+ writel_relaxed(SPI_ENGINE_INT_SYNC,
+ spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ spi_engine->completed_id = readl_relaxed(
+ spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
+ }
+
+ spin_lock(&spi_engine->lock);
+
+ if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
+ if (!spi_engine_write_cmd_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+ }
+
+ if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
+ if (!spi_engine_write_tx_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+ }
+
+ if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
+ if (!spi_engine_read_rx_fifo(spi_engine))
+ disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+ }
+
+ if (pending & SPI_ENGINE_INT_SYNC) {
+ if (spi_engine->msg &&
+ spi_engine->completed_id == spi_engine->sync_id) {
+ struct spi_message *msg = spi_engine->msg;
+
+ kfree(spi_engine->p);
+ msg->status = 0;
+ msg->actual_length = msg->frame_length;
+ spi_engine->msg = NULL;
+ spi_finalize_current_message(master);
+ disable_int |= SPI_ENGINE_INT_SYNC;
+ }
+ }
+
+ if (disable_int) {
+ spi_engine->int_enable &= ~disable_int;
+ writel_relaxed(spi_engine->int_enable,
+ spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ }
+
+ spin_unlock(&spi_engine->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_engine_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_engine_program p_dry, *p;
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ unsigned int int_enable = 0;
+ unsigned long flags;
+ size_t size;
+
+ p_dry.length = 0;
+ spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+
+ size = sizeof(*p->instructions) * (p_dry.length + 1);
+ p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ spi_engine_compile_message(spi_engine, msg, false, p);
+
+ spin_lock_irqsave(&spi_engine->lock, flags);
+ spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+ spi_engine_program_add_cmd(p, false,
+ SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+
+ spi_engine->msg = msg;
+ spi_engine->p = p;
+
+ spi_engine->cmd_buf = p->instructions;
+ spi_engine->cmd_length = p->length;
+ if (spi_engine_write_cmd_fifo(spi_engine))
+ int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+
+ spi_engine_tx_next(spi_engine);
+ if (spi_engine_write_tx_fifo(spi_engine))
+ int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+
+ spi_engine_rx_next(spi_engine);
+ if (spi_engine->rx_length != 0)
+ int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+
+ int_enable |= SPI_ENGINE_INT_SYNC;
+
+ writel_relaxed(int_enable,
+ spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ spi_engine->int_enable = int_enable;
+ spin_unlock_irqrestore(&spi_engine->lock, flags);
+
+ return 0;
+}
+
+static int spi_engine_probe(struct platform_device *pdev)
+{
+ struct spi_engine *spi_engine;
+ struct spi_master *master;
+ unsigned int version;
+ struct resource *res;
+ int irq;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+ if (!spi_engine)
+ return -ENOMEM;
+
+ master = spi_alloc_master(&pdev->dev, 0);
+ if (!master)
+ return -ENOMEM;
+
+ spi_master_set_devdata(master, spi_engine);
+
+ spin_lock_init(&spi_engine->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_engine->base)) {
+ ret = PTR_ERR(spi_engine->base);
+ goto err_put_master;
+ }
+
+ version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+ if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+ SPI_ENGINE_VERSION_MAJOR(version),
+ SPI_ENGINE_VERSION_MINOR(version),
+ SPI_ENGINE_VERSION_PATCH(version));
+ return -ENODEV;
+ }
+
+ spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(spi_engine->clk)) {
+ ret = PTR_ERR(spi_engine->clk);
+ goto err_put_master;
+ }
+
+ spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(spi_engine->ref_clk)) {
+ ret = PTR_ERR(spi_engine->ref_clk);
+ goto err_put_master;
+ }
+
+ ret = clk_prepare_enable(spi_engine->clk);
+ if (ret)
+ goto err_put_master;
+
+ ret = clk_prepare_enable(spi_engine->ref_clk);
+ if (ret)
+ goto err_clk_disable;
+
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+
+ ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
+ if (ret)
+ goto err_ref_clk_disable;
+
+ master->dev.parent = &pdev->dev;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+ master->transfer_one_message = spi_engine_transfer_one_message;
+ master->num_chipselect = 8;
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto err_free_irq;
+
+ platform_set_drvdata(pdev, master);
+
+ return 0;
+err_free_irq:
+ free_irq(irq, master);
+err_ref_clk_disable:
+ clk_disable_unprepare(spi_engine->ref_clk);
+err_clk_disable:
+ clk_disable_unprepare(spi_engine->clk);
+err_put_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int spi_engine_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ int irq = platform_get_irq(pdev, 0);
+
+ spi_unregister_master(master);
+
+ free_irq(irq, master);
+
+ writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+ writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+ writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+
+ clk_disable_unprepare(spi_engine->ref_clk);
+ clk_disable_unprepare(spi_engine->clk);
+
+ return 0;
+}
+
+static const struct of_device_id spi_engine_match_table[] = {
+ { .compatible = "adi,axi-spi-engine-1.00.a" },
+ { },
+};
+
+static struct platform_driver spi_engine_driver = {
+ .probe = spi_engine_probe,
+ .remove = spi_engine_remove,
+ .driver = {
+ .name = "spi-engine",
+ .of_match_table = spi_engine_match_table,
+ },
+};
+module_platform_driver(spi_engine_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index cf04960cc3e6..f35cc10772f6 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -727,11 +727,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
spi->chip_select, spi->cs_gpio, err);
return err;
}
- /* the implementation of pinctrl-bcm2835 currently does not
- * set the GPIO value when using gpio_direction_output
- * so we are setting it here explicitly
- */
- gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
return 0;
}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index ecc73c0a97cf..7428091d3f5b 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -64,9 +64,9 @@
#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
-#define BCM2835_AUX_SPI_CNTL0_CPHA_IN 0x00000400
+#define BCM2835_AUX_SPI_CNTL0_IN_RISING 0x00000400
#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
-#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT 0x00000100
+#define BCM2835_AUX_SPI_CNTL0_OUT_RISING 0x00000100
#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
@@ -92,9 +92,6 @@
#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
-#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
- | SPI_NO_CS)
-
struct bcm2835aux_spi {
void __iomem *regs;
struct clk *clk;
@@ -212,9 +209,15 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
- /* and if rx_len is 0 then wake up completion and disable spi */
+ if (!bs->tx_len) {
+ /* disable tx fifo empty interrupt */
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+ BCM2835_AUX_SPI_CNTL1_IDLE);
+ }
+
+ /* and if rx_len is 0 then disable interrupts and wake up completion */
if (!bs->rx_len) {
- bcm2835aux_spi_reset_hw(bs);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
complete(&master->xfer_completion);
}
@@ -307,9 +310,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
}
}
- /* Transfer complete - reset SPI HW */
- bcm2835aux_spi_reset_hw(bs);
-
/* and return without waiting for completion */
return 0;
}
@@ -330,10 +330,6 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
* resulting (potentially) in more interrupts when transferring
* more than 12 bytes
*/
- bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
- BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
- BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
- bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
/* set clock */
spi_hz = tfr->speed_hz;
@@ -348,17 +344,13 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
} else { /* the slowest we can go */
speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
}
+ /* mask out old speed from previous spi_transfer */
+ bs->cntl[0] &= ~(BCM2835_AUX_SPI_CNTL0_SPEED);
+ /* set the new speed */
bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
spi_used_hz = clk_hz / (2 * (speed + 1));
- /* handle all the modes */
- if (spi->mode & SPI_CPOL)
- bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
- if (spi->mode & SPI_CPHA)
- bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
- BCM2835_AUX_SPI_CNTL0_CPHA_IN;
-
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
bs->rx_buf = tfr->rx_buf;
@@ -382,6 +374,40 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
}
+static int bcm2835aux_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
+ BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
+ BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
+ bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
+
+ /* handle all the modes */
+ if (spi->mode & SPI_CPOL) {
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING;
+ } else {
+ bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING;
+ }
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
+ bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
+
+ return 0;
+}
+
+static int bcm2835aux_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+
+ bcm2835aux_spi_reset_hw(bs);
+
+ return 0;
+}
+
static void bcm2835aux_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
@@ -405,11 +431,13 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, master);
- master->mode_bits = BCM2835_AUX_SPI_MODE_BITS;
+ master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = -1;
master->transfer_one = bcm2835aux_spi_transfer_one;
master->handle_err = bcm2835aux_spi_handle_err;
+ master->prepare_message = bcm2835aux_spi_prepare_message;
+ master->unprepare_message = bcm2835aux_spi_unprepare_message;
master->dev.of_node = pdev->dev.of_node;
bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 9185f6c08459..e31971f91475 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -89,10 +89,10 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
if (!dws->dma_inited)
return;
- dmaengine_terminate_all(dws->txchan);
+ dmaengine_terminate_sync(dws->txchan);
dma_release_channel(dws->txchan);
- dmaengine_terminate_all(dws->rxchan);
+ dmaengine_terminate_sync(dws->rxchan);
dma_release_channel(dws->rxchan);
}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a6d7029a85ac..447497e9124c 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -47,11 +47,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
/* Get basic io resource and map it */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "no mem resource?\n");
- return -EINVAL;
- }
-
dws->regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(dws->regs)) {
dev_err(&pdev->dev, "SPI region map failed\n");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c688efa95e29..50769078e72e 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,7 +56,6 @@
/* The maximum bytes that a sdma BD can transfer.*/
#define MAX_SDMA_BD_BYTES (1 << 15)
-#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
struct spi_imx_config {
unsigned int speed_hz;
unsigned int bpw;
@@ -86,12 +85,18 @@ struct spi_imx_devtype_data {
struct spi_imx_data {
struct spi_bitbang bitbang;
+ struct device *dev;
struct completion xfer_done;
void __iomem *base;
+ unsigned long base_phys;
+
struct clk *clk_per;
struct clk *clk_ipg;
unsigned long spi_clk;
+ unsigned int spi_bus_clk;
+
+ unsigned int bytes_per_word;
unsigned int count;
void (*tx)(struct spi_imx_data *);
@@ -101,8 +106,6 @@ struct spi_imx_data {
unsigned int txfifo; /* number of words pushed in tx FIFO */
/* DMA */
- unsigned int dma_is_inited;
- unsigned int dma_finished;
bool usedma;
u32 wml;
struct completion dma_rx_completion;
@@ -199,15 +202,39 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
return 7;
}
+static int spi_imx_bytes_per_word(const int bpw)
+{
+ return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
+}
+
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ unsigned int bpw;
+
+ if (!master->dma_rx)
+ return false;
+
+ if (!transfer)
+ return false;
+
+ bpw = transfer->bits_per_word;
+ if (!bpw)
+ bpw = spi->bits_per_word;
+
+ bpw = spi_imx_bytes_per_word(bpw);
+
+ if (bpw != 1 && bpw != 2 && bpw != 4)
+ return false;
+
+ if (transfer->len < spi_imx->wml * bpw)
+ return false;
- if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
- (transfer->len % spi_imx->wml) == 0)
- return true;
- return false;
+ if (transfer->len % (spi_imx->wml * bpw))
+ return false;
+
+ return true;
}
#define MX51_ECSPI_CTRL 0x08
@@ -232,16 +259,13 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT_RREN (1 << 3)
#define MX51_ECSPI_DMA 0x14
-#define MX51_ECSPI_DMA_TX_WML_OFFSET 0
-#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F
-#define MX51_ECSPI_DMA_RX_WML_OFFSET 16
-#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
-#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
-#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
+#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
+#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
+#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
-#define MX51_ECSPI_DMA_TEDEN_OFFSET 7
-#define MX51_ECSPI_DMA_RXDEN_OFFSET 23
-#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31
+#define MX51_ECSPI_DMA_TEDEN (1 << 7)
+#define MX51_ECSPI_DMA_RXDEN (1 << 23)
+#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
#define MX51_ECSPI_STAT 0x18
#define MX51_ECSPI_STAT_RR (1 << 3)
@@ -250,14 +274,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_TESTREG_LBC BIT(31)
/* MX51 eCSPI */
-static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
- unsigned int *fres)
+static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
+ unsigned int fspi, unsigned int *fres)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
* $pre, the post-divider by 2^$post
*/
unsigned int pre, post;
+ unsigned int fin = spi_imx->spi_clk;
if (unlikely(fspi > fin))
return 0;
@@ -270,14 +295,14 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
post = max(4U, post) - 4;
if (unlikely(post > 0xf)) {
- pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
- __func__, fspi, fin);
+ dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
+ fspi, fin);
return 0xff;
}
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
- pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
+ dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
/* Resulting frequency for the SCLK line. */
@@ -302,23 +327,19 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
- u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ u32 reg;
- if (!spi_imx->usedma)
- reg |= MX51_ECSPI_CTRL_XCH;
- else if (!spi_imx->dma_finished)
- reg |= MX51_ECSPI_CTRL_SMC;
- else
- reg &= ~MX51_ECSPI_CTRL_SMC;
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
- u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
u32 clk = config->speed_hz, delay, reg;
+ u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
/*
* The hardware seems to have a race condition when changing modes. The
@@ -330,7 +351,8 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
+ spi_imx->spi_bus_clk = clk;
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -356,6 +378,9 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
else
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+ if (spi_imx->usedma)
+ ctrl |= MX51_ECSPI_CTRL_SMC;
+
/* CTRL register always go first to bring out controller from reset */
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
@@ -389,22 +414,12 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
* Configure the DMA register: setup the watermark
* and enable DMA request.
*/
- if (spi_imx->dma_is_inited) {
- dma = readl(spi_imx->base + MX51_ECSPI_DMA);
-
- rx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
- tx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
- rxt_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
- dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
- & ~MX51_ECSPI_DMA_RX_WML_MASK
- & ~MX51_ECSPI_DMA_RXT_WML_MASK)
- | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
- |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
- |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
- |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
-
- writel(dma, spi_imx->base + MX51_ECSPI_DMA);
- }
+
+ writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
+ MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
+ MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
+ MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
+ MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
return 0;
}
@@ -784,11 +799,63 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int spi_imx_dma_configure(struct spi_master *master,
+ int bytes_per_word)
+{
+ int ret;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config rx = {}, tx = {};
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ if (bytes_per_word == spi_imx->bytes_per_word)
+ /* Same as last time */
+ return 0;
+
+ switch (bytes_per_word) {
+ case 4:
+ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ case 2:
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 1:
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tx.direction = DMA_MEM_TO_DEV;
+ tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
+ tx.dst_addr_width = buswidth;
+ tx.dst_maxburst = spi_imx->wml;
+ ret = dmaengine_slave_config(master->dma_tx, &tx);
+ if (ret) {
+ dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
+ return ret;
+ }
+
+ rx.direction = DMA_DEV_TO_MEM;
+ rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
+ rx.src_addr_width = buswidth;
+ rx.src_maxburst = spi_imx->wml;
+ ret = dmaengine_slave_config(master->dma_rx, &rx);
+ if (ret) {
+ dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
+ return ret;
+ }
+
+ spi_imx->bytes_per_word = bytes_per_word;
+
+ return 0;
+}
+
static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
struct spi_imx_config config;
+ int ret;
config.bpw = t ? t->bits_per_word : spi->bits_per_word;
config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
@@ -812,6 +879,18 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->tx = spi_imx_buf_tx_u32;
}
+ if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
+ spi_imx->usedma = 1;
+ else
+ spi_imx->usedma = 0;
+
+ if (spi_imx->usedma) {
+ ret = spi_imx_dma_configure(spi->master,
+ spi_imx_bytes_per_word(config.bpw));
+ if (ret)
+ return ret;
+ }
+
spi_imx->devtype_data->config(spi_imx, &config);
return 0;
@@ -830,15 +909,11 @@ static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
}
-
- spi_imx->dma_is_inited = 0;
}
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
- struct spi_master *master,
- const struct resource *res)
+ struct spi_master *master)
{
- struct dma_slave_config slave_config = {};
int ret;
/* use pio mode for i.mx6dl chip TKT238285 */
@@ -856,16 +931,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
goto err;
}
- slave_config.direction = DMA_MEM_TO_DEV;
- slave_config.dst_addr = res->start + MXC_CSPITXDATA;
- slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- slave_config.dst_maxburst = spi_imx->wml;
- ret = dmaengine_slave_config(master->dma_tx, &slave_config);
- if (ret) {
- dev_err(dev, "error in TX dma configuration.\n");
- goto err;
- }
-
/* Prepare for RX : */
master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
if (IS_ERR(master->dma_rx)) {
@@ -875,15 +940,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
goto err;
}
- slave_config.direction = DMA_DEV_TO_MEM;
- slave_config.src_addr = res->start + MXC_CSPIRXDATA;
- slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- slave_config.src_maxburst = spi_imx->wml;
- ret = dmaengine_slave_config(master->dma_rx, &slave_config);
- if (ret) {
- dev_err(dev, "error in RX dma configuration.\n");
- goto err;
- }
+ spi_imx_dma_configure(master, 1);
init_completion(&spi_imx->dma_rx_completion);
init_completion(&spi_imx->dma_tx_completion);
@@ -891,7 +948,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
master->max_dma_len = MAX_SDMA_BD_BYTES;
spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
SPI_MASTER_MUST_TX;
- spi_imx->dma_is_inited = 1;
return 0;
err:
@@ -913,95 +969,81 @@ static void spi_imx_dma_tx_callback(void *cookie)
complete(&spi_imx->dma_tx_completion);
}
+static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
+{
+ unsigned long timeout = 0;
+
+ /* Time with actual data transfer and CS change delay related to HW */
+ timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
+
+ /* Add extra second for scheduler related activities */
+ timeout += 1;
+
+ /* Double calculated timeout */
+ return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
+}
+
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
{
- struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
- int ret;
+ struct dma_async_tx_descriptor *desc_tx, *desc_rx;
+ unsigned long transfer_timeout;
unsigned long timeout;
struct spi_master *master = spi_imx->bitbang.master;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
- if (tx) {
- desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
- tx->sgl, tx->nents, DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_tx)
- goto tx_nodma;
-
- desc_tx->callback = spi_imx_dma_tx_callback;
- desc_tx->callback_param = (void *)spi_imx;
- dmaengine_submit(desc_tx);
- }
+ /*
+ * The TX DMA setup starts the transfer, so make sure RX is configured
+ * before TX.
+ */
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EINVAL;
- if (rx) {
- desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
- rx->sgl, rx->nents, DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc_rx)
- goto rx_nodma;
+ desc_rx->callback = spi_imx_dma_rx_callback;
+ desc_rx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_rx);
+ reinit_completion(&spi_imx->dma_rx_completion);
+ dma_async_issue_pending(master->dma_rx);
- desc_rx->callback = spi_imx_dma_rx_callback;
- desc_rx->callback_param = (void *)spi_imx;
- dmaengine_submit(desc_rx);
+ desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dmaengine_terminate_all(master->dma_tx);
+ return -EINVAL;
}
- reinit_completion(&spi_imx->dma_rx_completion);
+ desc_tx->callback = spi_imx_dma_tx_callback;
+ desc_tx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_tx);
reinit_completion(&spi_imx->dma_tx_completion);
-
- /* Trigger the cspi module. */
- spi_imx->dma_finished = 0;
-
- /*
- * Set these order to avoid potential RX overflow. The overflow may
- * happen if we enable SPI HW before starting RX DMA due to rescheduling
- * for another task and/or interrupt.
- * So RX DMA enabled first to make sure data would be read out from FIFO
- * ASAP. TX DMA enabled next to start filling TX FIFO with new data.
- * And finaly SPI HW enabled to start actual data transfer.
- */
- dma_async_issue_pending(master->dma_rx);
dma_async_issue_pending(master->dma_tx);
- spi_imx->devtype_data->trigger(spi_imx);
+
+ transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
/* Wait SDMA to finish the data transfer.*/
timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
- IMX_DMA_TIMEOUT);
+ transfer_timeout);
if (!timeout) {
- pr_warn("%s %s: I/O Error in DMA TX\n",
- dev_driver_string(&master->dev),
- dev_name(&master->dev));
+ dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(master->dma_tx);
dmaengine_terminate_all(master->dma_rx);
- } else {
- timeout = wait_for_completion_timeout(
- &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
- if (!timeout) {
- pr_warn("%s %s: I/O Error in DMA RX\n",
- dev_driver_string(&master->dev),
- dev_name(&master->dev));
- spi_imx->devtype_data->reset(spi_imx);
- dmaengine_terminate_all(master->dma_rx);
- }
+ return -ETIMEDOUT;
}
- spi_imx->dma_finished = 1;
- spi_imx->devtype_data->trigger(spi_imx);
-
- if (!timeout)
- ret = -ETIMEDOUT;
- else
- ret = transfer->len;
-
- return ret;
+ timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
+ transfer_timeout);
+ if (!timeout) {
+ dev_err(&master->dev, "I/O Error in DMA RX\n");
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(master->dma_rx);
+ return -ETIMEDOUT;
+ }
-rx_nodma:
- dmaengine_terminate_all(master->dma_tx);
-tx_nodma:
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&master->dev),
- dev_name(&master->dev));
- return -EAGAIN;
+ return transfer->len;
}
static int spi_imx_pio_transfer(struct spi_device *spi,
@@ -1028,19 +1070,12 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
static int spi_imx_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
- int ret;
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
- if (spi_imx->bitbang.master->can_dma &&
- spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
- spi_imx->usedma = true;
- ret = spi_imx_dma_transfer(spi_imx, transfer);
- if (ret != -EAGAIN)
- return ret;
- }
- spi_imx->usedma = false;
-
- return spi_imx_pio_transfer(spi, transfer);
+ if (spi_imx->usedma)
+ return spi_imx_dma_transfer(spi_imx, transfer);
+ else
+ return spi_imx_pio_transfer(spi, transfer);
}
static int spi_imx_setup(struct spi_device *spi)
@@ -1130,6 +1165,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = master;
+ spi_imx->dev = &pdev->dev;
spi_imx->devtype_data = of_id ? of_id->data :
(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
@@ -1170,6 +1206,7 @@ static int spi_imx_probe(struct platform_device *pdev)
ret = PTR_ERR(spi_imx->base);
goto out_master_put;
}
+ spi_imx->base_phys = res->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -1210,7 +1247,7 @@ static int spi_imx_probe(struct platform_device *pdev)
* other chips.
*/
if (is_imx51_ecspi(spi_imx)) {
- ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master, res);
+ ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
if (ret == -EPROBE_DEFER)
goto out_clk_put;
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
new file mode 100644
index 000000000000..faa577d282c0
--- /dev/null
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -0,0 +1,256 @@
+/*
+ * SPI master driver for ICP DAS LP-8841 RTC
+ *
+ * Copyright (C) 2016 Sergei Ianovich
+ *
+ * based on
+ *
+ * Dallas DS1302 RTC Support
+ * Copyright (C) 2002 David McCullough
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi_lp8841_rtc"
+
+#define SPI_LP8841_RTC_CE 0x01
+#define SPI_LP8841_RTC_CLK 0x02
+#define SPI_LP8841_RTC_nWE 0x04
+#define SPI_LP8841_RTC_MOSI 0x08
+#define SPI_LP8841_RTC_MISO 0x01
+
+/*
+ * REVISIT If there is support for SPI_3WIRE and SPI_LSB_FIRST in SPI
+ * GPIO driver, this SPI driver can be replaced by a simple GPIO driver
+ * providing 3 GPIO pins.
+ */
+
+struct spi_lp8841_rtc {
+ void *iomem;
+ unsigned long state;
+};
+
+static inline void
+setsck(struct spi_lp8841_rtc *data, int is_on)
+{
+ if (is_on)
+ data->state |= SPI_LP8841_RTC_CLK;
+ else
+ data->state &= ~SPI_LP8841_RTC_CLK;
+ writeb(data->state, data->iomem);
+}
+
+static inline void
+setmosi(struct spi_lp8841_rtc *data, int is_on)
+{
+ if (is_on)
+ data->state |= SPI_LP8841_RTC_MOSI;
+ else
+ data->state &= ~SPI_LP8841_RTC_MOSI;
+ writeb(data->state, data->iomem);
+}
+
+static inline int
+getmiso(struct spi_lp8841_rtc *data)
+{
+ return ioread8(data->iomem) & SPI_LP8841_RTC_MISO;
+}
+
+static inline u32
+bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
+ unsigned usecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ u32 shift = 32 - bits;
+ /* clock starts at inactive polarity */
+ for (; likely(bits); bits--) {
+
+ /* setup LSB (to slave) on leading edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0)
+ setmosi(data, (word & 1));
+
+ usleep_range(usecs, usecs + 1); /* T(setup) */
+
+ /* sample LSB (from slave) on trailing edge */
+ word >>= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= (getmiso(data) << 31);
+
+ setsck(data, !cpol);
+ usleep_range(usecs, usecs + 1);
+
+ setsck(data, cpol);
+ }
+
+ word >>= shift;
+ return word;
+}
+
+static int
+spi_lp8841_rtc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_lp8841_rtc *data = spi_master_get_devdata(master);
+ unsigned count = t->len;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u8 word = 0;
+ int ret = 0;
+
+ if (tx) {
+ data->state &= ~SPI_LP8841_RTC_nWE;
+ writeb(data->state, data->iomem);
+ while (likely(count > 0)) {
+ word = *tx++;
+ bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+ SPI_MASTER_NO_RX, word, 8);
+ count--;
+ }
+ } else if (rx) {
+ data->state |= SPI_LP8841_RTC_nWE;
+ writeb(data->state, data->iomem);
+ while (likely(count > 0)) {
+ word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
+ SPI_MASTER_NO_TX, word, 8);
+ *rx++ = word;
+ count--;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return ret;
+}
+
+static void
+spi_lp8841_rtc_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_lp8841_rtc *data = spi_master_get_devdata(spi->master);
+
+ data->state = 0;
+ writeb(data->state, data->iomem);
+ if (enable) {
+ usleep_range(4, 5);
+ data->state |= SPI_LP8841_RTC_CE;
+ writeb(data->state, data->iomem);
+ usleep_range(4, 5);
+ }
+}
+
+static int
+spi_lp8841_rtc_setup(struct spi_device *spi)
+{
+ if ((spi->mode & SPI_CS_HIGH) == 0) {
+ dev_err(&spi->dev, "unsupported active low chip select\n");
+ return -EINVAL;
+ }
+
+ if ((spi->mode & SPI_LSB_FIRST) == 0) {
+ dev_err(&spi->dev, "unsupported MSB first mode\n");
+ return -EINVAL;
+ }
+
+ if ((spi->mode & SPI_3WIRE) == 0) {
+ dev_err(&spi->dev, "unsupported wiring. 3 wires required\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spi_lp8841_rtc_dt_ids[] = {
+ { .compatible = "icpdas,lp8841-spi-rtc" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, spi_lp8841_rtc_dt_ids);
+#endif
+
+static int
+spi_lp8841_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct spi_master *master;
+ struct spi_lp8841_rtc *data;
+ void *iomem;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*data));
+ if (!master)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = 1;
+ master->setup = spi_lp8841_rtc_setup;
+ master->set_cs = spi_lp8841_rtc_set_cs;
+ master->transfer_one = spi_lp8841_rtc_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+#ifdef CONFIG_OF
+ master->dev.of_node = pdev->dev.of_node;
+#endif
+
+ data = spi_master_get_devdata(master);
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
+ ret = PTR_ERR_OR_ZERO(data->iomem);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get IO address\n");
+ goto err_put_master;
+ }
+
+ /* register with the SPI framework */
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register spi master\n");
+ goto err_put_master;
+ }
+
+ return ret;
+
+
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver spi_lp8841_rtc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(spi_lp8841_rtc_dt_ids),
+ },
+ .probe = spi_lp8841_rtc_probe,
+};
+module_platform_driver(spi_lp8841_rtc_driver);
+
+MODULE_DESCRIPTION("SPI master driver for ICP DAS LP-8841 RTC");
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8bef46..43a02e377b3b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
if (mcspi_dma->dma_tx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->tx_dma;
- sg_dma_len(&sg) = xfer->len;
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_tx_callback;
tx->callback_param = spi;
@@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
if (mcspi_dma->dma_rx) {
struct dma_async_tx_descriptor *tx;
- struct scatterlist sg;
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
dma_count -= es;
- sg_init_table(&sg, 1);
- sg_dma_address(&sg) = xfer->rx_dma;
- sg_dma_len(&sg) = dma_count;
-
- tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
- DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
- DMA_CTRL_ACK);
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (tx) {
tx->callback = omap2_mcspi_rx_callback;
tx->callback_param = spi;
@@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
omap2_mcspi_set_dma_req(spi, 1, 1);
wait_for_completion(&mcspi_dma->dma_rx_completion);
- dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
- DMA_FROM_DEVICE);
if (mcspi->fifo_depth > 0)
return count;
@@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion);
- dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
- DMA_TO_DEVICE);
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio);
}
+static bool omap2_mcspi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (xfer->len < DMA_MIN_BYTES)
+ return false;
+
+ return true;
+}
+
static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
struct spi_device *spi, struct spi_transfer *t)
{
@@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
return -EINVAL;
}
- if (len < DMA_MIN_BYTES)
- goto skip_dma_map;
-
- if (mcspi_dma->dma_tx && tx_buf != NULL) {
- t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'T', len);
- return -EINVAL;
- }
- }
- if (mcspi_dma->dma_rx && rx_buf != NULL) {
- t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
- dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
- 'R', len);
- if (tx_buf != NULL)
- dma_unmap_single(mcspi->dev, t->tx_dma,
- len, DMA_TO_DEVICE);
- return -EINVAL;
- }
- }
-
-skip_dma_map:
return omap2_mcspi_work_one(mcspi, spi, t);
}
@@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
+ master->can_dma = omap2_mcspi_can_dma;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5e5fd77e2711..f7f7ba17b40e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -346,13 +346,6 @@ struct vendor_data {
* @clk: outgoing clock "SPICLK" for the SPI bus
* @master: SPI framework hookup
* @master_info: controller-specific data from machine setup
- * @kworker: thread struct for message pump
- * @kworker_task: pointer to task for message pump kworker thread
- * @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
- * @queue: message queue
- * @busy: message pump is busy
- * @running: message pump is running
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index bd8b369a343c..365fc22c3572 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -254,8 +254,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
- dmaengine_terminate_all(drv_data->rx_chan);
- dmaengine_terminate_all(drv_data->tx_chan);
+ dmaengine_terminate_async(drv_data->rx_chan);
+ dmaengine_terminate_async(drv_data->tx_chan);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
@@ -331,13 +331,13 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
if (drv_data->rx_chan) {
- dmaengine_terminate_all(drv_data->rx_chan);
+ dmaengine_terminate_sync(drv_data->rx_chan);
dma_release_channel(drv_data->rx_chan);
sg_free_table(&drv_data->rx_sgt);
drv_data->rx_chan = NULL;
}
if (drv_data->tx_chan) {
- dmaengine_terminate_all(drv_data->tx_chan);
+ dmaengine_terminate_sync(drv_data->tx_chan);
dma_release_channel(drv_data->tx_chan);
sg_free_table(&drv_data->tx_sgt);
drv_data->tx_chan = NULL;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index d19d7f28aecb..520ed1dd5780 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -19,6 +19,7 @@ enum {
PORT_BSW1,
PORT_BSW2,
PORT_QUARK_X1000,
+ PORT_LPT,
};
struct pxa_spi_info {
@@ -42,6 +43,9 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
+
static bool lpss_dma_filter(struct dma_chan *chan, void *param)
{
struct dw_dma_slave *dws = param;
@@ -98,6 +102,14 @@ static struct pxa_spi_info spi_info_configs[] = {
.num_chipselect = 1,
.max_clk_rate = 50000000,
},
+ [PORT_LPT] = {
+ .type = LPSS_LPT_SSP,
+ .port_id = 0,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ .tx_param = &lpt_tx_param,
+ .rx_param = &lpt_rx_param,
+ },
};
static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
@@ -202,6 +214,7 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
{ PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
{ PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
{ PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
{ },
};
MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index ab9914ad8365..85e59a406a4c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -65,8 +65,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
#define LPSS_CS_CONTROL_SW_MODE BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
-#define LPSS_CS_CONTROL_CS_SEL_SHIFT 8
-#define LPSS_CS_CONTROL_CS_SEL_MASK (3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
@@ -82,6 +80,10 @@ struct lpss_config {
u32 rx_threshold;
u32 tx_threshold_lo;
u32 tx_threshold_hi;
+ /* Chip select control */
+ unsigned cs_sel_shift;
+ unsigned cs_sel_mask;
+ unsigned cs_num;
};
/* Keep these sorted with enum pxa_ssp_type */
@@ -106,6 +108,19 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
},
+ { /* LPSS_BSW_SSP */
+ .offset = 0x400,
+ .reg_general = 0x08,
+ .reg_ssp = 0x0c,
+ .reg_cs_ctrl = 0x18,
+ .reg_capabilities = -1,
+ .rx_threshold = 64,
+ .tx_threshold_lo = 160,
+ .tx_threshold_hi = 224,
+ .cs_sel_shift = 2,
+ .cs_sel_mask = 1 << 2,
+ .cs_num = 2,
+ },
{ /* LPSS_SPT_SSP */
.offset = 0x200,
.reg_general = -1,
@@ -125,6 +140,8 @@ static const struct lpss_config lpss_platforms[] = {
.rx_threshold = 1,
.tx_threshold_lo = 16,
.tx_threshold_hi = 48,
+ .cs_sel_shift = 8,
+ .cs_sel_mask = 3 << 8,
},
};
@@ -139,6 +156,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
switch (drv_data->ssp_type) {
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
return true;
@@ -288,37 +306,50 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
}
}
+static void lpss_ssp_select_cs(struct driver_data *drv_data,
+ const struct lpss_config *config)
+{
+ u32 value, cs;
+
+ if (!config->cs_sel_mask)
+ return;
+
+ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+
+ cs = drv_data->cur_msg->spi->chip_select;
+ cs <<= config->cs_sel_shift;
+ if (cs != (value & config->cs_sel_mask)) {
+ /*
+ * When switching another chip select output active the
+ * output must be selected first and wait 2 ssp_clk cycles
+ * before changing state to active. Otherwise a short
+ * glitch will occur on the previous chip select since
+ * output select is latched but state control is not.
+ */
+ value &= ~config->cs_sel_mask;
+ value |= cs;
+ __lpss_ssp_write_priv(drv_data,
+ config->reg_cs_ctrl, value);
+ ndelay(1000000000 /
+ (drv_data->master->max_speed_hz / 2));
+ }
+}
+
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
const struct lpss_config *config;
- u32 value, cs;
+ u32 value;
config = lpss_get_config(drv_data);
+ if (enable)
+ lpss_ssp_select_cs(drv_data, config);
+
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
- if (enable) {
- cs = drv_data->cur_msg->spi->chip_select;
- cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
- if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
- /*
- * When switching another chip select output active
- * the output must be selected first and wait 2 ssp_clk
- * cycles before changing state to active. Otherwise
- * a short glitch will occur on the previous chip
- * select since output select is latched but state
- * control is not.
- */
- value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
- value |= cs;
- __lpss_ssp_write_priv(drv_data,
- config->reg_cs_ctrl, value);
- ndelay(1000000000 /
- (drv_data->master->max_speed_hz / 2));
- }
+ if (enable)
value &= ~LPSS_CS_CONTROL_CS_HIGH;
- } else {
+ else
value |= LPSS_CS_CONTROL_CS_HIGH;
- }
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
}
@@ -496,6 +527,7 @@ static void giveback(struct driver_data *drv_data)
{
struct spi_transfer* last_transfer;
struct spi_message *msg;
+ unsigned long timeout;
msg = drv_data->cur_msg;
drv_data->cur_msg = NULL;
@@ -508,6 +540,12 @@ static void giveback(struct driver_data *drv_data)
if (last_transfer->delay_usecs)
udelay(last_transfer->delay_usecs);
+ /* Wait until SSP becomes idle before deasserting the CS */
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
+ !time_after(jiffies, timeout))
+ cpu_relax();
+
/* Drop chip select UNLESS cs_change is true or we are returning
* a message with an error, or next message is for another chip
*/
@@ -572,7 +610,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
static void int_transfer_complete(struct driver_data *drv_data)
{
- /* Stop SSP */
+ /* Clear and disable interrupts */
write_SSSR_CS(drv_data, drv_data->clear_sr);
reset_sccr1(drv_data);
if (!pxa25x_ssp_comp(drv_data))
@@ -957,8 +995,6 @@ static void pump_transfers(unsigned long data)
drv_data->tx_end = drv_data->tx + transfer->len;
drv_data->rx = transfer->rx_buf;
drv_data->rx_end = drv_data->rx + transfer->len;
- drv_data->rx_dma = transfer->rx_dma;
- drv_data->tx_dma = transfer->tx_dma;
drv_data->len = transfer->len;
drv_data->write = drv_data->tx ? chip->write : null_writer;
drv_data->read = drv_data->rx ? chip->read : null_reader;
@@ -1001,19 +1037,6 @@ static void pump_transfers(unsigned long data)
"pump_transfers: DMA burst size reduced to match bits_per_word\n");
}
- /* NOTE: PXA25x_SSP _could_ use external clocking ... */
- cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
- if (!pxa25x_ssp_comp(drv_data))
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- drv_data->master->max_speed_hz
- / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
- else
- dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
- drv_data->master->max_speed_hz / 2
- / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
- chip->enable_dma ? "DMA" : "PIO");
-
message->state = RUNNING_STATE;
drv_data->dma_mapped = 0;
@@ -1040,6 +1063,19 @@ static void pump_transfers(unsigned long data)
write_SSSR_CS(drv_data, drv_data->clear_sr);
}
+ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+ if (!pxa25x_ssp_comp(drv_data))
+ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ drv_data->master->max_speed_hz
+ / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+ drv_data->dma_mapped ? "DMA" : "PIO");
+ else
+ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+ drv_data->master->max_speed_hz / 2
+ / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+ drv_data->dma_mapped ? "DMA" : "PIO");
+
if (is_lpss_ssp(drv_data)) {
if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
!= chip->lpss_rx_threshold)
@@ -1166,6 +1202,7 @@ static int setup(struct spi_device *spi)
break;
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
case LPSS_SPT_SSP:
case LPSS_BXT_SSP:
config = lpss_get_config(drv_data);
@@ -1313,7 +1350,7 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT3430", LPSS_LPT_SSP },
{ "INT3431", LPSS_LPT_SSP },
{ "80860F0E", LPSS_BYT_SSP },
- { "8086228E", LPSS_BYT_SSP },
+ { "8086228E", LPSS_BSW_SSP },
{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
@@ -1347,10 +1384,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
- /* BXT */
+ /* BXT A-Step */
{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
+ /* BXT B-Step */
+ { PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
+ { PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1438,6 +1479,29 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
}
#endif
+static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
+{
+ struct driver_data *drv_data = spi_master_get_devdata(master);
+
+ if (has_acpi_companion(&drv_data->pdev->dev)) {
+ switch (drv_data->ssp_type) {
+ /*
+ * For Atoms the ACPI DeviceSelection used by the Windows
+ * driver starts from 1 instead of 0 so translate it here
+ * to match what Linux expects.
+ */
+ case LPSS_BYT_SSP:
+ case LPSS_BSW_SSP:
+ return cs - 1;
+
+ default:
+ break;
+ }
+ }
+
+ return cs;
+}
+
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1490,6 +1554,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->setup = setup;
master->transfer_one_message = pxa2xx_spi_transfer_one_message;
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
master->auto_runtime_pm = true;
drv_data->ssp_type = ssp->type;
@@ -1576,6 +1641,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
tmp &= LPSS_CAPS_CS_EN_MASK;
tmp >>= LPSS_CAPS_CS_EN_SHIFT;
platform_info->num_chipselect = ffz(tmp);
+ } else if (config->cs_num) {
+ platform_info->num_chipselect = config->cs_num;
}
}
master->num_chipselect = platform_info->num_chipselect;
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98313aa..a1ef88948144 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -69,8 +69,6 @@ struct driver_data {
void *rx;
void *rx_end;
int dma_mapped;
- dma_addr_t rx_dma;
- dma_addr_t tx_dma;
size_t rx_map_len;
size_t tx_map_len;
u8 n_bytes;
@@ -147,20 +145,9 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
extern int pxa2xx_spi_flush(struct driver_data *drv_data);
extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
-/*
- * Select the right DMA implementation.
- */
-#if defined(CONFIG_SPI_PXA2XX_DMA)
-#define SPI_PXA2XX_USE_DMA 1
#define MAX_DMA_LEN SZ_64K
#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
-#else
-#undef SPI_PXA2XX_USE_DMA
-#define MAX_DMA_LEN 0
-#define DEFAULT_DMA_CR1 0
-#endif
-#ifdef SPI_PXA2XX_USE_DMA
extern bool pxa2xx_spi_dma_is_possible(size_t len);
extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
@@ -173,29 +160,5 @@ extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
u8 bits_per_word,
u32 *burst_code,
u32 *threshold);
-#else
-static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
-static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
-{
- return 0;
-}
-#define pxa2xx_spi_dma_transfer NULL
-static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
- u32 dma_burst) {}
-static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
-{
- return 0;
-}
-static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
- struct spi_device *spi,
- u8 bits_per_word,
- u32 *burst_code,
- u32 *threshold)
-{
- return -ENODEV;
-}
-#endif
#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 7cb1b2d710c1..6c6c0013ec7a 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -13,20 +13,14 @@
*
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include <linux/scatterlist.h>
-#include <linux/of.h>
#include <linux/pm_runtime.h>
-#include <linux/io.h>
-#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
#define DRIVER_NAME "rockchip-spi"
@@ -179,7 +173,7 @@ struct rockchip_spi {
u8 tmode;
u8 bpw;
u8 n_bytes;
- u8 rsd_nsecs;
+ u32 rsd_nsecs;
unsigned len;
u32 speed;
@@ -192,13 +186,12 @@ struct rockchip_spi {
/* protect state */
spinlock_t lock;
- struct completion xfer_completion;
-
u32 use_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
struct rockchip_spi_dma_data dma_rx;
struct rockchip_spi_dma_data dma_tx;
+ struct dma_slave_caps dma_caps;
};
static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
@@ -265,7 +258,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
u32 ser;
- struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
+ struct spi_master *master = spi->master;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(rs->dev);
ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
@@ -290,6 +286,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
ser &= ~(1 << spi->chip_select);
writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+
+ pm_runtime_put_sync(rs->dev);
}
static int rockchip_spi_prepare_message(struct spi_master *master,
@@ -319,12 +317,12 @@ static void rockchip_spi_handle_err(struct spi_master *master,
*/
if (rs->use_dma) {
if (rs->state & RXBUSY) {
- dmaengine_terminate_all(rs->dma_rx.ch);
+ dmaengine_terminate_async(rs->dma_rx.ch);
flush_fifo(rs);
}
if (rs->state & TXBUSY)
- dmaengine_terminate_all(rs->dma_tx.ch);
+ dmaengine_terminate_async(rs->dma_tx.ch);
}
spin_unlock_irqrestore(&rs->lock, flags);
@@ -433,7 +431,7 @@ static void rockchip_spi_dma_txcb(void *data)
spin_unlock_irqrestore(&rs->lock, flags);
}
-static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
+static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
{
unsigned long flags;
struct dma_slave_config rxconf, txconf;
@@ -449,13 +447,18 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
rxconf.direction = rs->dma_rx.direction;
rxconf.src_addr = rs->dma_rx.addr;
rxconf.src_addr_width = rs->n_bytes;
- rxconf.src_maxburst = rs->n_bytes;
+ if (rs->dma_caps.max_burst > 4)
+ rxconf.src_maxburst = 4;
+ else
+ rxconf.src_maxburst = 1;
dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
rs->dma_rx.ch,
rs->rx_sg.sgl, rs->rx_sg.nents,
rs->dma_rx.direction, DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ return -EINVAL;
rxdesc->callback = rockchip_spi_dma_rxcb;
rxdesc->callback_param = rs;
@@ -466,13 +469,21 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
txconf.direction = rs->dma_tx.direction;
txconf.dst_addr = rs->dma_tx.addr;
txconf.dst_addr_width = rs->n_bytes;
- txconf.dst_maxburst = rs->n_bytes;
+ if (rs->dma_caps.max_burst > 4)
+ txconf.dst_maxburst = 4;
+ else
+ txconf.dst_maxburst = 1;
dmaengine_slave_config(rs->dma_tx.ch, &txconf);
txdesc = dmaengine_prep_slave_sg(
rs->dma_tx.ch,
rs->tx_sg.sgl, rs->tx_sg.nents,
rs->dma_tx.direction, DMA_PREP_INTERRUPT);
+ if (!txdesc) {
+ if (rxdesc)
+ dmaengine_terminate_sync(rs->dma_rx.ch);
+ return -EINVAL;
+ }
txdesc->callback = rockchip_spi_dma_txcb;
txdesc->callback_param = rs;
@@ -494,6 +505,8 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dmaengine_submit(txdesc);
dma_async_issue_pending(rs->dma_tx.ch);
}
+
+ return 0;
}
static void rockchip_spi_config(struct rockchip_spi *rs)
@@ -503,7 +516,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
int rsd = 0;
u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
- | (CR0_SSD_ONE << CR0_SSD_OFFSET);
+ | (CR0_SSD_ONE << CR0_SSD_OFFSET)
+ | (CR0_EM_BIG << CR0_EM_OFFSET);
cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
@@ -520,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
if (WARN_ON(rs->speed > MAX_SCLK_OUT))
rs->speed = MAX_SCLK_OUT;
- /* the minimum divsor is 2 */
+ /* the minimum divisor is 2 */
if (rs->max_freq < 2 * rs->speed) {
clk_set_rate(rs->spiclk, 2 * rs->speed);
rs->max_freq = clk_get_rate(rs->spiclk);
@@ -606,12 +620,12 @@ static int rockchip_spi_transfer_one(
if (rs->use_dma) {
if (rs->tmode == CR0_XFM_RO) {
/* rx: dma must be prepared first */
- rockchip_spi_prepare_dma(rs);
+ ret = rockchip_spi_prepare_dma(rs);
spi_enable_chip(rs, 1);
} else {
/* tx or tr: spi must be enabled first */
spi_enable_chip(rs, 1);
- rockchip_spi_prepare_dma(rs);
+ ret = rockchip_spi_prepare_dma(rs);
}
} else {
spi_enable_chip(rs, 1);
@@ -716,20 +730,31 @@ static int rockchip_spi_probe(struct platform_device *pdev)
master->transfer_one = rockchip_spi_transfer_one;
master->handle_err = rockchip_spi_handle_err;
- rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
- if (!rs->dma_tx.ch)
+ rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
+ if (IS_ERR(rs->dma_tx.ch)) {
+ /* Check tx to see if we need defer probing driver */
+ if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_get_fifo_len;
+ }
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+ rs->dma_tx.ch = NULL;
+ }
- rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
- if (!rs->dma_rx.ch) {
- if (rs->dma_tx.ch) {
+ rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
+ if (IS_ERR(rs->dma_rx.ch)) {
+ if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
dma_release_channel(rs->dma_tx.ch);
rs->dma_tx.ch = NULL;
+ ret = -EPROBE_DEFER;
+ goto err_get_fifo_len;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+ rs->dma_rx.ch = NULL;
}
if (rs->dma_tx.ch && rs->dma_rx.ch) {
+ dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
rs->dma_tx.direction = DMA_MEM_TO_DEV;
@@ -871,6 +896,7 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
{ .compatible = "rockchip,rk3066-spi", },
{ .compatible = "rockchip,rk3188-spi", },
{ .compatible = "rockchip,rk3288-spi", },
+ { .compatible = "rockchip,rk3399-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 64318fcfacf2..eac3c960b2de 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -31,6 +31,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -44,8 +46,9 @@ struct ti_qspi {
struct spi_master *master;
void __iomem *base;
- void __iomem *ctrl_base;
void __iomem *mmap_base;
+ struct regmap *ctrl_base;
+ unsigned int ctrl_reg;
struct clk *fclk;
struct device *dev;
@@ -55,7 +58,7 @@ struct ti_qspi {
u32 cmd;
u32 dc;
- bool ctrl_mod;
+ bool mmap_enabled;
};
#define QSPI_PID (0x0)
@@ -65,11 +68,8 @@ struct ti_qspi {
#define QSPI_SPI_CMD_REG (0x48)
#define QSPI_SPI_STATUS_REG (0x4c)
#define QSPI_SPI_DATA_REG (0x50)
-#define QSPI_SPI_SETUP0_REG (0x54)
+#define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
#define QSPI_SPI_SWITCH_REG (0x64)
-#define QSPI_SPI_SETUP1_REG (0x58)
-#define QSPI_SPI_SETUP2_REG (0x5c)
-#define QSPI_SPI_SETUP3_REG (0x60)
#define QSPI_SPI_DATA_REG_1 (0x68)
#define QSPI_SPI_DATA_REG_2 (0x6c)
#define QSPI_SPI_DATA_REG_3 (0x70)
@@ -109,6 +109,17 @@ struct ti_qspi {
#define QSPI_AUTOSUSPEND_TIMEOUT 2000
+#define MEM_CS_EN(n) ((n + 1) << 8)
+#define MEM_CS_MASK (7 << 8)
+
+#define MM_SWITCH 0x1
+
+#define QSPI_SETUP_RD_NORMAL (0x0 << 12)
+#define QSPI_SETUP_RD_DUAL (0x1 << 12)
+#define QSPI_SETUP_RD_QUAD (0x3 << 12)
+#define QSPI_SETUP_ADDR_SHIFT 8
+#define QSPI_SETUP_DUMMY_SHIFT 10
+
static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
unsigned long reg)
{
@@ -366,6 +377,72 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
return 0;
}
+static void ti_qspi_enable_memory_map(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+
+ ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
+ if (qspi->ctrl_base) {
+ regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+ MEM_CS_EN(spi->chip_select),
+ MEM_CS_MASK);
+ }
+ qspi->mmap_enabled = true;
+}
+
+static void ti_qspi_disable_memory_map(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+
+ ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
+ if (qspi->ctrl_base)
+ regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+ 0, MEM_CS_MASK);
+ qspi->mmap_enabled = false;
+}
+
+static void ti_qspi_setup_mmap_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ u32 memval = msg->read_opcode;
+
+ switch (msg->data_nbits) {
+ case SPI_NBITS_QUAD:
+ memval |= QSPI_SETUP_RD_QUAD;
+ break;
+ case SPI_NBITS_DUAL:
+ memval |= QSPI_SETUP_RD_DUAL;
+ break;
+ default:
+ memval |= QSPI_SETUP_RD_NORMAL;
+ break;
+ }
+ memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
+ msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
+ ti_qspi_write(qspi, memval,
+ QSPI_SPI_SETUP_REG(spi->chip_select));
+}
+
+static int ti_qspi_spi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ int ret = 0;
+
+ mutex_lock(&qspi->list_lock);
+
+ if (!qspi->mmap_enabled)
+ ti_qspi_enable_memory_map(spi);
+ ti_qspi_setup_mmap_read(spi, msg);
+ memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
+ msg->retlen = msg->len;
+
+ mutex_unlock(&qspi->list_lock);
+
+ return ret;
+}
+
static int ti_qspi_start_transfer_one(struct spi_master *master,
struct spi_message *m)
{
@@ -398,6 +475,9 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
mutex_lock(&qspi->list_lock);
+ if (qspi->mmap_enabled)
+ ti_qspi_disable_memory_map(spi);
+
list_for_each_entry(t, &m->transfers, transfer_list) {
qspi->cmd |= QSPI_WLEN(t->bits_per_word);
@@ -441,7 +521,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
{
struct ti_qspi *qspi;
struct spi_master *master;
- struct resource *r, *res_ctrl, *res_mmap;
+ struct resource *r, *res_mmap;
struct device_node *np = pdev->dev.of_node;
u32 max_freq;
int ret = 0, num_cs, irq;
@@ -487,16 +567,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
}
}
- res_ctrl = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "qspi_ctrlmod");
- if (res_ctrl == NULL) {
- res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res_ctrl == NULL) {
- dev_dbg(&pdev->dev,
- "control module resources not required\n");
- }
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
@@ -511,20 +581,31 @@ static int ti_qspi_probe(struct platform_device *pdev)
goto free_master;
}
- if (res_ctrl) {
- qspi->ctrl_mod = true;
- qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
- if (IS_ERR(qspi->ctrl_base)) {
- ret = PTR_ERR(qspi->ctrl_base);
- goto free_master;
- }
- }
-
if (res_mmap) {
- qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev,
+ res_mmap);
+ master->spi_flash_read = ti_qspi_spi_flash_read;
if (IS_ERR(qspi->mmap_base)) {
- ret = PTR_ERR(qspi->mmap_base);
- goto free_master;
+ dev_err(&pdev->dev,
+ "falling back to PIO mode\n");
+ master->spi_flash_read = NULL;
+ }
+ }
+ qspi->mmap_enabled = false;
+
+ if (of_property_read_bool(np, "syscon-chipselects")) {
+ qspi->ctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "syscon-chipselects");
+ if (IS_ERR(qspi->ctrl_base))
+ return PTR_ERR(qspi->ctrl_base);
+ ret = of_property_read_u32_index(np,
+ "syscon-chipselects",
+ 1, &qspi->ctrl_reg);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "couldn't get ctrl_mod reg index\n");
+ return ret;
}
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 47eff8012a77..0239b45eed92 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -144,6 +144,8 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
+SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
+
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
@@ -181,6 +183,7 @@ static struct attribute *spi_device_statistics_attrs[] = {
&dev_attr_spi_device_transfer_bytes_histo14.attr,
&dev_attr_spi_device_transfer_bytes_histo15.attr,
&dev_attr_spi_device_transfer_bytes_histo16.attr,
+ &dev_attr_spi_device_transfers_split_maxsize.attr,
NULL,
};
@@ -223,6 +226,7 @@ static struct attribute *spi_master_statistics_attrs[] = {
&dev_attr_spi_master_transfer_bytes_histo14.attr,
&dev_attr_spi_master_transfer_bytes_histo15.attr,
&dev_attr_spi_master_transfer_bytes_histo16.attr,
+ &dev_attr_spi_master_transfers_split_maxsize.attr,
NULL,
};
@@ -702,6 +706,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
enum dma_data_direction dir)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
int desc_len;
int sgs;
struct page *vm_page;
@@ -710,10 +715,10 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
int i, ret;
if (vmalloced_buf) {
- desc_len = PAGE_SIZE;
+ desc_len = min_t(int, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
} else {
- desc_len = master->max_dma_len;
+ desc_len = min_t(int, max_seg_size, master->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
}
@@ -739,7 +744,6 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
sg_set_buf(&sgt->sgl[i], sg_buf, min);
}
-
buf += min;
len -= min;
}
@@ -1024,6 +1028,8 @@ out:
if (msg->status && master->handle_err)
master->handle_err(master, msg);
+ spi_res_release(master, msg);
+
spi_finalize_current_message(master);
return ret;
@@ -1047,6 +1053,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
* __spi_pump_messages - function which processes spi message queue
* @master: master to process queue for
* @in_kthread: true if we are in the context of the message pump thread
+ * @bus_locked: true if the bus mutex is held when calling this function
*
* This function checks if there is any spi message in the queue that
* needs processing and if so call out to the driver to initialize hardware
@@ -1056,7 +1063,8 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
* inside spi_sync(); the queue extraction handling at the top of the
* function should deal with this safely.
*/
-static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
+ bool bus_locked)
{
unsigned long flags;
bool was_busy = false;
@@ -1152,6 +1160,9 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
}
}
+ if (!bus_locked)
+ mutex_lock(&master->bus_lock_mutex);
+
trace_spi_message_start(master->cur_msg);
if (master->prepare_message) {
@@ -1161,7 +1172,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
"failed to prepare message: %d\n", ret);
master->cur_msg->status = ret;
spi_finalize_current_message(master);
- return;
+ goto out;
}
master->cur_msg_prepared = true;
}
@@ -1170,15 +1181,23 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
if (ret) {
master->cur_msg->status = ret;
spi_finalize_current_message(master);
- return;
+ goto out;
}
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
"failed to transfer one message from queue\n");
- return;
+ goto out;
}
+
+out:
+ if (!bus_locked)
+ mutex_unlock(&master->bus_lock_mutex);
+
+ /* Prod the scheduler in case transfer_one() was busy waiting */
+ if (!ret)
+ cond_resched();
}
/**
@@ -1190,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
- __spi_pump_messages(master, true);
+ __spi_pump_messages(master, true, master->bus_lock_flag);
}
static int spi_init_queue(struct spi_master *master)
@@ -1581,13 +1600,30 @@ static void of_register_spi_devices(struct spi_master *master) { }
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
struct spi_device *spi = data;
+ struct spi_master *master = spi->master;
if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
struct acpi_resource_spi_serialbus *sb;
sb = &ares->data.spi_serial_bus;
if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
- spi->chip_select = sb->device_selection;
+ /*
+ * ACPI DeviceSelection numbering is handled by the
+ * host controller driver in Windows and can vary
+ * from driver to driver. In Linux we always expect
+ * 0 .. max - 1 so we need to ask the driver to
+ * translate between the two schemes.
+ */
+ if (master->fw_translate_cs) {
+ int cs = master->fw_translate_cs(master,
+ sb->device_selection);
+ if (cs < 0)
+ return cs;
+ spi->chip_select = cs;
+ } else {
+ spi->chip_select = sb->device_selection;
+ }
+
spi->max_speed_hz = sb->connection_speed;
if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
@@ -2013,6 +2049,336 @@ struct spi_master *spi_busnum_to_master(u16 bus_num)
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ * during the processing of a spi_message while using
+ * spi_transfer_one
+ * @spi: the spi device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size: size to alloc and return
+ * @gfp: GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_master to avoid repeated allocations.
+ */
+void *spi_res_alloc(struct spi_device *spi,
+ spi_res_release_t release,
+ size_t size, gfp_t gfp)
+{
+ struct spi_res *sres;
+
+ sres = kzalloc(sizeof(*sres) + size, gfp);
+ if (!sres)
+ return NULL;
+
+ INIT_LIST_HEAD(&sres->entry);
+ sres->release = release;
+
+ return sres->data;
+}
+EXPORT_SYMBOL_GPL(spi_res_alloc);
+
+/**
+ * spi_res_free - free an spi resource
+ * @res: pointer to the custom data of a resource
+ *
+ */
+void spi_res_free(void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ if (!res)
+ return;
+
+ WARN_ON(!list_empty(&sres->entry));
+ kfree(sres);
+}
+EXPORT_SYMBOL_GPL(spi_res_free);
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the spi message
+ * @res: the spi_resource
+ */
+void spi_res_add(struct spi_message *message, void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ WARN_ON(!list_empty(&sres->entry));
+ list_add_tail(&sres->entry, &message->resources);
+}
+EXPORT_SYMBOL_GPL(spi_res_add);
+
+/**
+ * spi_res_release - release all spi resources for this message
+ * @master: the @spi_master
+ * @message: the @spi_message
+ */
+void spi_res_release(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct spi_res *res;
+
+ while (!list_empty(&message->resources)) {
+ res = list_last_entry(&message->resources,
+ struct spi_res, entry);
+
+ if (res->release)
+ res->release(master, message, res->data);
+
+ list_del(&res->entry);
+
+ kfree(res);
+ }
+}
+EXPORT_SYMBOL_GPL(spi_res_release);
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for spi_message alterations */
+
+static void __spi_replace_transfers_release(struct spi_master *master,
+ struct spi_message *msg,
+ void *res)
+{
+ struct spi_replaced_transfers *rxfer = res;
+ size_t i;
+
+ /* call extra callback if requested */
+ if (rxfer->release)
+ rxfer->release(master, msg, res);
+
+ /* insert replaced transfers back into the message */
+ list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
+
+ /* remove the formerly inserted entries */
+ for (i = 0; i < rxfer->inserted; i++)
+ list_del(&rxfer->inserted_transfers[i].transfer_list);
+}
+
+/**
+ * spi_replace_transfers - replace transfers with several transfers
+ * and register change with spi_message.resources
+ * @msg: the spi_message we work upon
+ * @xfer_first: the first spi_transfer we want to replace
+ * @remove: number of transfers to remove
+ * @insert: the number of transfers we want to insert instead
+ * @release: extra release code necessary in some circumstances
+ * @extradatasize: extra data to allocate (with alignment guarantees
+ * of struct @spi_transfer)
+ * @gfp: gfp flags
+ *
+ * Returns: pointer to @spi_replaced_transfers,
+ * PTR_ERR(...) in case of errors.
+ */
+struct spi_replaced_transfers *spi_replace_transfers(
+ struct spi_message *msg,
+ struct spi_transfer *xfer_first,
+ size_t remove,
+ size_t insert,
+ spi_replaced_release_t release,
+ size_t extradatasize,
+ gfp_t gfp)
+{
+ struct spi_replaced_transfers *rxfer;
+ struct spi_transfer *xfer;
+ size_t i;
+
+ /* allocate the structure using spi_res */
+ rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
+ insert * sizeof(struct spi_transfer)
+ + sizeof(struct spi_replaced_transfers)
+ + extradatasize,
+ gfp);
+ if (!rxfer)
+ return ERR_PTR(-ENOMEM);
+
+ /* the release code to invoke before running the generic release */
+ rxfer->release = release;
+
+ /* assign extradata */
+ if (extradatasize)
+ rxfer->extradata =
+ &rxfer->inserted_transfers[insert];
+
+ /* init the replaced_transfers list */
+ INIT_LIST_HEAD(&rxfer->replaced_transfers);
+
+ /* assign the list_entry after which we should reinsert
+ * the @replaced_transfers - it may be spi_message.messages!
+ */
+ rxfer->replaced_after = xfer_first->transfer_list.prev;
+
+ /* remove the requested number of transfers */
+ for (i = 0; i < remove; i++) {
+ /* if the entry after replaced_after it is msg->transfers
+ * then we have been requested to remove more transfers
+ * than are in the list
+ */
+ if (rxfer->replaced_after->next == &msg->transfers) {
+ dev_err(&msg->spi->dev,
+ "requested to remove more spi_transfers than are available\n");
+ /* insert replaced transfers back into the message */
+ list_splice(&rxfer->replaced_transfers,
+ rxfer->replaced_after);
+
+ /* free the spi_replace_transfer structure */
+ spi_res_free(rxfer);
+
+ /* and return with an error */
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* remove the entry after replaced_after from list of
+ * transfers and add it to list of replaced_transfers
+ */
+ list_move_tail(rxfer->replaced_after->next,
+ &rxfer->replaced_transfers);
+ }
+
+ /* create copy of the given xfer with identical settings
+ * based on the first transfer to get removed
+ */
+ for (i = 0; i < insert; i++) {
+ /* we need to run in reverse order */
+ xfer = &rxfer->inserted_transfers[insert - 1 - i];
+
+ /* copy all spi_transfer data */
+ memcpy(xfer, xfer_first, sizeof(*xfer));
+
+ /* add to list */
+ list_add(&xfer->transfer_list, rxfer->replaced_after);
+
+ /* clear cs_change and delay_usecs for all but the last */
+ if (i) {
+ xfer->cs_change = false;
+ xfer->delay_usecs = 0;
+ }
+ }
+
+ /* set up inserted */
+ rxfer->inserted = insert;
+
+ /* and register it with spi_res/spi_message */
+ spi_res_add(msg, rxfer);
+
+ return rxfer;
+}
+EXPORT_SYMBOL_GPL(spi_replace_transfers);
+
+static int __spi_split_transfer_maxsize(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer **xferp,
+ size_t maxsize,
+ gfp_t gfp)
+{
+ struct spi_transfer *xfer = *xferp, *xfers;
+ struct spi_replaced_transfers *srt;
+ size_t offset;
+ size_t count, i;
+
+ /* warn once about this fact that we are splitting a transfer */
+ dev_warn_once(&msg->spi->dev,
+ "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
+ xfer->len, maxsize);
+
+ /* calculate how many we have to replace */
+ count = DIV_ROUND_UP(xfer->len, maxsize);
+
+ /* create replacement */
+ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+ if (IS_ERR(srt))
+ return PTR_ERR(srt);
+ xfers = srt->inserted_transfers;
+
+ /* now handle each of those newly inserted spi_transfers
+ * note that the replacements spi_transfers all are preset
+ * to the same values as *xferp, so tx_buf, rx_buf and len
+ * are all identical (as well as most others)
+ * so we just have to fix up len and the pointers.
+ *
+ * this also includes support for the depreciated
+ * spi_message.is_dma_mapped interface
+ */
+
+ /* the first transfer just needs the length modified, so we
+ * run it outside the loop
+ */
+ xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
+
+ /* all the others need rx_buf/tx_buf also set */
+ for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
+ /* update rx_buf, tx_buf and dma */
+ if (xfers[i].rx_buf)
+ xfers[i].rx_buf += offset;
+ if (xfers[i].rx_dma)
+ xfers[i].rx_dma += offset;
+ if (xfers[i].tx_buf)
+ xfers[i].tx_buf += offset;
+ if (xfers[i].tx_dma)
+ xfers[i].tx_dma += offset;
+
+ /* update length */
+ xfers[i].len = min(maxsize, xfers[i].len - offset);
+ }
+
+ /* we set up xferp to the last entry we have inserted,
+ * so that we skip those already split transfers
+ */
+ *xferp = &xfers[count - 1];
+
+ /* increment statistics counters */
+ SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+ transfers_split_maxsize);
+ SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
+ transfers_split_maxsize);
+
+ return 0;
+}
+
+/**
+ * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
+ * when an individual transfer exceeds a
+ * certain size
+ * @master: the @spi_master for this transfer
+ * @msg: the @spi_message to transform
+ * @maxsize: the maximum when to apply this
+ * @gfp: GFP allocation flags
+ *
+ * Return: status of transformation
+ */
+int spi_split_transfers_maxsize(struct spi_master *master,
+ struct spi_message *msg,
+ size_t maxsize,
+ gfp_t gfp)
+{
+ struct spi_transfer *xfer;
+ int ret;
+
+ /* iterate over the transfer_list,
+ * but note that xfer is advanced to the last transfer inserted
+ * to avoid checking sizes again unnecessarily (also xfer does
+ * potentiall belong to a different list by the time the
+ * replacement has happened
+ */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->len > maxsize) {
+ ret = __spi_split_transfer_maxsize(
+ master, msg, &xfer, maxsize, gfp);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
/*-------------------------------------------------------------------------*/
@@ -2351,6 +2717,46 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
EXPORT_SYMBOL_GPL(spi_async_locked);
+int spi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+
+{
+ struct spi_master *master = spi->master;
+ int ret;
+
+ if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
+ msg->addr_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ return -EINVAL;
+ if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
+ msg->addr_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_TX_QUAD))
+ return -EINVAL;
+ if (msg->data_nbits == SPI_NBITS_DUAL &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ if (msg->data_nbits == SPI_NBITS_QUAD &&
+ !(spi->mode & SPI_RX_QUAD))
+ return -EINVAL;
+
+ if (master->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(master->dev.parent);
+ if (ret < 0) {
+ dev_err(&master->dev, "Failed to power device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ mutex_lock(&master->bus_lock_mutex);
+ ret = master->spi_flash_read(spi, msg);
+ mutex_unlock(&master->bus_lock_mutex);
+ if (master->auto_runtime_pm)
+ pm_runtime_put(master->dev.parent);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_flash_read);
+
/*-------------------------------------------------------------------------*/
/* Utility methods for SPI master protocol drivers, layered on
@@ -2414,7 +2820,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
spi_sync_immediate);
SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
spi_sync_immediate);
- __spi_pump_messages(master, false);
+ __spi_pump_messages(master, false, bus_locked);
}
wait_for_completion(&done);
@@ -2447,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
- return __spi_sync(spi, message, 0);
+ return __spi_sync(spi, message, spi->master->bus_lock_flag);
}
EXPORT_SYMBOL_GPL(spi_sync);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index be822f7a9ce6..aca282d45421 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -10,6 +10,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -47,9 +48,9 @@
#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
-#define SPMI_MAPPING_TABLE_LEN 255
#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ BIT(12) /* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID BIT(15)
/* Ownership Table */
#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
@@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code {
};
/* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS 256
-#define PMIC_ARB_MAX_CHNL 128
-#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
+#define PMIC_ARB_MAX_PERIPHS 512
#define PMIC_ARB_TIMEOUT_US 100
#define PMIC_ARB_MAX_TRANS_BYTES (8)
@@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev {
void __iomem *wr_base;
void __iomem *intr;
void __iomem *cnfg;
+ void __iomem *core;
+ resource_size_t core_size;
raw_spinlock_t lock;
u8 channel;
int irq;
u8 ee;
- u8 min_apid;
- u8 max_apid;
- u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
+ u16 min_apid;
+ u16 max_apid;
+ u32 *mapping_table;
+ DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
struct irq_domain *domain;
struct spmi_controller *spmic;
- u16 apid_to_ppid[256];
+ u16 *apid_to_ppid;
const struct pmic_arb_ver_ops *ver_ops;
- u8 *ppid_to_chan;
+ u16 *ppid_to_chan;
+ u16 last_channel;
};
/**
@@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev {
*/
struct pmic_arb_ver_ops {
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
- u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+ int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+ u32 *offset);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
@@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
- u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+ u32 offset;
+ int rc;
+
+ rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+ if (rc)
+ return rc;
+
+ offset += PMIC_ARB_STATUS;
while (timeout--) {
status = readl_relaxed(base + offset);
@@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
unsigned long flags;
u32 cmd;
int rc;
- u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+ u32 offset;
+
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+ if (rc)
+ return rc;
cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
@@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u8 bc = len - 1;
u32 cmd;
int rc;
- u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+ u32 offset;
+
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+ if (rc)
+ return rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
@@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
u8 bc = len - 1;
u32 cmd;
int rc;
- u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+ u32 offset;
+
+ rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+ if (rc)
+ return rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(&ctrl->dev,
@@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
u32 data;
for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+ if (!test_and_set_bit(index, pa->mapping_table_valid))
+ mapping_table[index] = readl_relaxed(pa->cnfg +
+ SPMI_MAPPING_TABLE_REG(index));
+
data = mapping_table[index];
if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
@@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
}
/* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
{
- return 0x800 + 0x80 * pa->channel;
+ *offset = 0x800 + 0x80 * pa->channel;
+ return 0;
}
+static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+{
+ u32 regval, offset;
+ u16 chan;
+ u16 id;
+
+ /*
+ * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+ * ppid_to_chan is an in-memory invert of that table.
+ */
+ for (chan = pa->last_channel; ; chan++) {
+ offset = PMIC_ARB_REG_CHNL(chan);
+ if (offset >= pa->core_size)
+ break;
+
+ regval = readl_relaxed(pa->core + offset);
+ if (!regval)
+ continue;
+
+ id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+ pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+ if (id == ppid) {
+ chan |= PMIC_ARB_CHAN_VALID;
+ break;
+ }
+ }
+ pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+
+ return chan;
+}
+
+
/* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
{
u16 ppid = (sid << 8) | (addr >> 8);
- u8 chan = pa->ppid_to_chan[ppid];
+ u16 chan;
- return 0x1000 * pa->ee + 0x8000 * chan;
+ chan = pa->ppid_to_chan[ppid];
+ if (!(chan & PMIC_ARB_CHAN_VALID))
+ chan = pmic_arb_find_chan(pa, ppid);
+ if (!(chan & PMIC_ARB_CHAN_VALID))
+ return -ENODEV;
+ chan &= ~PMIC_ARB_CHAN_VALID;
+
+ *offset = 0x1000 * pa->ee + 0x8000 * chan;
+ return 0;
}
static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *core;
u32 channel, ee, hw_ver;
- int err, i;
+ int err;
bool is_v1;
ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->spmic = ctrl;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+ pa->core_size = resource_size(res);
core = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(core)) {
err = PTR_ERR(core);
@@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->wr_base = core;
pa->rd_base = core;
} else {
- u8 chan;
- u16 ppid;
- u32 regval;
-
+ pa->core = core;
pa->ver_ops = &pmic_arb_v2;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
goto err_put_ctrl;
}
- pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
- PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+ pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+ PMIC_ARB_MAX_PPID,
+ sizeof(*pa->ppid_to_chan),
+ GFP_KERNEL);
if (!pa->ppid_to_chan) {
err = -ENOMEM;
goto err_put_ctrl;
}
- /*
- * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
- * ppid_to_chan is an in-memory invert of that table.
- */
- for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
- regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
- if (!regval)
- continue;
-
- ppid = (regval >> 8) & 0xFFF;
- pa->ppid_to_chan[ppid] = chan;
- }
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
@@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
pa->ee = ee;
- for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
- pa->mapping_table[i] = readl_relaxed(
- pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+ pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+ sizeof(*pa->apid_to_ppid),
+ GFP_KERNEL);
+ if (!pa->apid_to_ppid) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
+
+ pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+ sizeof(*pa->mapping_table), GFP_KERNEL);
+ if (!pa->mapping_table) {
+ err = -ENOMEM;
+ goto err_put_ctrl;
+ }
/* Initialize max_apid/min_apid to the opposite bounds, during
* the irq domain translation, we are sure to update these */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5d3b86a33857..5bac28a3944e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -32,8 +32,6 @@ source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
-source "drivers/staging/panel/Kconfig"
-
source "drivers/staging/rtl8192u/Kconfig"
source "drivers/staging/rtl8192e/Kconfig"
@@ -64,8 +62,6 @@ source "drivers/staging/emxx_udc/Kconfig"
source "drivers/staging/speakup/Kconfig"
-source "drivers/staging/ste_rmi4/Kconfig"
-
source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
@@ -76,8 +72,6 @@ source "drivers/staging/android/Kconfig"
source "drivers/staging/board/Kconfig"
-source "drivers/staging/gdm72xx/Kconfig"
-
source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
@@ -92,8 +86,6 @@ source "drivers/staging/lustre/Kconfig"
source "drivers/staging/dgnc/Kconfig"
-source "drivers/staging/dgap/Kconfig"
-
source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/skein/Kconfig"
@@ -110,4 +102,6 @@ source "drivers/staging/wilc1000/Kconfig"
source "drivers/staging/most/Kconfig"
+source "drivers/staging/i4l/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 30918edef5e3..a954242b0f2c 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,14 +1,10 @@
# Makefile for staging directory
-# fix for build system bug...
-obj-$(CONFIG_STAGING) += staging.o
-
obj-y += media/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
-obj-$(CONFIG_PANEL) += panel/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
@@ -26,18 +22,15 @@ obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
-obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_STAGING_RDMA) += rdma/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_STAGING_BOARD) += board/
-obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_LUSTRE_FS) += lustre/
+obj-$(CONFIG_LNET) += lustre/
obj-$(CONFIG_DGNC) += dgnc/
-obj-$(CONFIG_DGAP) += dgap/
obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_CRYPTO_SKEIN) += skein/
@@ -47,3 +40,4 @@ obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_ISDN_I4L) += i4l/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 42b15126aa06..bd90d2002afb 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -57,15 +57,6 @@ config SW_SYNC
synchronization. Useful when there is no hardware primitive backing
the synchronization.
-config SW_SYNC_USER
- bool "Userspace API for SW_SYNC"
- default n
- depends on SW_SYNC
- ---help---
- Provides a user space API to the sw sync object.
- *WARNING* improper use of this can result in deadlocking kernel
- drivers from userspace.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 5bb1283d19cd..ca9a53c03f0f 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -106,21 +106,34 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define range_on_lru(range) \
((range)->purged == ASHMEM_NOT_PURGED)
-#define page_range_subsumes_range(range, start, end) \
- (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+static inline int page_range_subsumes_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return (((range)->pgstart >= (start)) && ((range)->pgend <= (end)));
+}
-#define page_range_subsumed_by_range(range, start, end) \
- (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+static inline int page_range_subsumed_by_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return (((range)->pgstart <= (start)) && ((range)->pgend >= (end)));
+}
-#define page_in_range(range, page) \
- (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+static inline int page_in_range(struct ashmem_range *range, size_t page)
+{
+ return (((range)->pgstart <= (page)) && ((range)->pgend >= (page)));
+}
-#define page_range_in_range(range, start, end) \
- (page_in_range(range, start) || page_in_range(range, end) || \
- page_range_subsumes_range(range, start, end))
+static inline int page_range_in_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return (page_in_range(range, start) || page_in_range(range, end) ||
+ page_range_subsumes_range(range, start, end));
+}
-#define range_before_page(range, page) \
- ((range)->pgend < (page))
+static inline int range_before_page(struct ashmem_range *range, size_t page)
+{
+ return ((range)->pgend < (page));
+}
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
@@ -372,8 +385,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
/* requested protection bits must match our allowed protection mask */
- if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
- calc_vm_prot_bits(PROT_MASK))) {
+ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
+ calc_vm_prot_bits(PROT_MASK, 0))) {
ret = -EPERM;
goto out;
}
@@ -441,7 +454,9 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
- mutex_lock(&ashmem_mutex);
+ if (!mutex_trylock(&ashmem_mutex))
+ return -1;
+
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
@@ -661,8 +676,8 @@ restart:
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
- pgstart = min_t(size_t, range->pgstart, pgstart);
- pgend = max_t(size_t, range->pgend, pgend);
+ pgstart = min(range->pgstart, pgstart);
+ pgend = max(range->pgend, pgend);
purged |= range->purged;
range_del(range);
goto restart;
diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
index e3c07b2ba00e..fe9f0fd210cd 100644
--- a/drivers/staging/android/ion/hisilicon/hi6220_ion.c
+++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c
@@ -214,10 +214,7 @@ static struct platform_driver hi6220_ion_driver = {
static int __init hi6220_ion_init(void)
{
- int ret;
-
- ret = platform_driver_register(&hi6220_ion_driver);
- return ret;
+ return platform_driver_register(&hi6220_ion_driver);
}
subsys_initcall(hi6220_ion_init);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0754a37c9674..85365672c931 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -251,8 +251,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ }
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
@@ -385,13 +387,22 @@ static void ion_handle_get(struct ion_handle *handle)
kref_get(&handle->ref);
}
-static int ion_handle_put(struct ion_handle *handle)
+static int ion_handle_put_nolock(struct ion_handle *handle)
+{
+ int ret;
+
+ ret = kref_put(&handle->ref, ion_handle_destroy);
+
+ return ret;
+}
+
+int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
mutex_lock(&client->lock);
- ret = kref_put(&handle->ref, ion_handle_destroy);
+ ret = ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
return ret;
@@ -415,20 +426,30 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
int id)
{
struct ion_handle *handle;
- mutex_lock(&client->lock);
handle = idr_find(&client->idr, id);
if (handle)
ion_handle_get(handle);
- mutex_unlock(&client->lock);
return handle ? handle : ERR_PTR(-EINVAL);
}
+struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
+{
+ struct ion_handle *handle;
+
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, id);
+ mutex_unlock(&client->lock);
+
+ return handle;
+}
+
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
@@ -530,22 +551,28 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
}
EXPORT_SYMBOL(ion_alloc);
-void ion_free(struct ion_client *client, struct ion_handle *handle)
+static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
{
bool valid_handle;
BUG_ON(client != handle->client);
- mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to free.\n", __func__);
- mutex_unlock(&client->lock);
return;
}
+ ion_handle_put_nolock(handle);
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ BUG_ON(client != handle->client);
+
+ mutex_lock(&client->lock);
+ ion_free_nolock(client, handle);
mutex_unlock(&client->lock);
- ion_handle_put(handle);
}
EXPORT_SYMBOL(ion_free);
@@ -675,6 +702,34 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_unmap_kernel);
+static struct mutex debugfs_mutex;
+static struct rb_root *ion_root_client;
+static int is_client_alive(struct ion_client *client)
+{
+ struct rb_node *node;
+ struct ion_client *tmp;
+ struct ion_device *dev;
+
+ node = ion_root_client->rb_node;
+ dev = container_of(ion_root_client, struct ion_device, clients);
+
+ down_read(&dev->lock);
+ while (node) {
+ tmp = rb_entry(node, struct ion_client, node);
+ if (client < tmp) {
+ node = node->rb_left;
+ } else if (client > tmp) {
+ node = node->rb_right;
+ } else {
+ up_read(&dev->lock);
+ return 1;
+ }
+ }
+
+ up_read(&dev->lock);
+ return 0;
+}
+
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
struct ion_client *client = s->private;
@@ -683,6 +738,14 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
+ mutex_lock(&debugfs_mutex);
+ if (!is_client_alive(client)) {
+ seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
+ client);
+ mutex_unlock(&debugfs_mutex);
+ return 0;
+ }
+
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
@@ -694,6 +757,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
sizes[id] += handle->buffer->size;
}
mutex_unlock(&client->lock);
+ mutex_unlock(&debugfs_mutex);
seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
@@ -830,6 +894,7 @@ void ion_client_destroy(struct ion_client *client)
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
+ mutex_lock(&debugfs_mutex);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
@@ -848,6 +913,7 @@ void ion_client_destroy(struct ion_client *client)
kfree(client->display_name);
kfree(client->name);
kfree(client);
+ mutex_unlock(&debugfs_mutex);
}
EXPORT_SYMBOL(ion_client_destroy);
@@ -1075,14 +1141,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
return PTR_ERR_OR_ZERO(vaddr);
}
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
+static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
+
+ return 0;
}
static struct dma_buf_ops dma_buf_ops = {
@@ -1149,22 +1217,18 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+ struct dma_buf *dmabuf)
{
- struct dma_buf *dmabuf;
struct ion_buffer *buffer;
struct ion_handle *handle;
int ret;
- dmabuf = dma_buf_get(fd);
- if (IS_ERR(dmabuf))
- return ERR_CAST(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not import dmabuf from another exporter\n",
__func__);
- dma_buf_put(dmabuf);
return ERR_PTR(-EINVAL);
}
buffer = dmabuf->priv;
@@ -1192,11 +1256,25 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
}
end:
- dma_buf_put(dmabuf);
return handle;
}
EXPORT_SYMBOL(ion_import_dma_buf);
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+{
+ struct dma_buf *dmabuf;
+ struct ion_handle *handle;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return ERR_CAST(dmabuf);
+
+ handle = ion_import_dma_buf(client, dmabuf);
+ dma_buf_put(dmabuf);
+ return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf_fd);
+
static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
@@ -1279,11 +1357,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_handle *handle;
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
return PTR_ERR(handle);
- ion_free(client, handle);
- ion_handle_put(handle);
+ }
+ ion_free_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
break;
}
case ION_IOC_SHARE:
@@ -1304,7 +1386,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_handle *handle;
- handle = ion_import_dma_buf(client, data.fd.fd);
+ handle = ion_import_dma_buf_fd(client, data.fd.fd);
if (IS_ERR(handle))
ret = PTR_ERR(handle);
else
@@ -1401,6 +1483,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
+ mutex_lock(&debugfs_mutex);
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
@@ -1419,6 +1502,8 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
client->pid, size);
}
}
+ mutex_unlock(&debugfs_mutex);
+
seq_puts(s, "----------------------------------------------------\n");
seq_puts(s, "orphaned allocations (info is from last known client):\n");
mutex_lock(&dev->buffer_lock);
@@ -1470,7 +1555,7 @@ static int debug_shrink_set(void *data, u64 val)
struct shrink_control sc;
int objs;
- sc.gfp_mask = -1;
+ sc.gfp_mask = GFP_HIGHUSER;
sc.nr_to_scan = val;
if (!val) {
@@ -1488,7 +1573,7 @@ static int debug_shrink_get(void *data, u64 *val)
struct shrink_control sc;
int objs;
- sc.gfp_mask = -1;
+ sc.gfp_mask = GFP_HIGHUSER;
sc.nr_to_scan = 0;
objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
@@ -1603,6 +1688,8 @@ debugfs_done:
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
+ ion_root_client = &idev->clients;
+ mutex_init(&debugfs_mutex);
return idev;
}
EXPORT_SYMBOL(ion_device_create);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index b860c5f579f5..a1331fc169a1 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -192,14 +192,26 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
/**
- * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
+ * ion_import_dma_buf() - get ion_handle from dma-buf
+ * @client: the client
+ * @dmabuf: the dma-buf
+ *
+ * Get the ion_buffer associated with the dma-buf and return the ion_handle.
+ * If no ion_handle exists for this buffer, return newly created ion_handle.
+ * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+ struct dma_buf *dmabuf);
+
+/**
+ * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
* @client: the client
* @fd: the dma-buf fd
*
- * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
- * import that fd and return a handle representing it. If a dma-buf from
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
+ * import that fd and return a handle representing it. If a dma-buf from
* another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
-struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 9156d8238c97..1fb0d81556da 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -81,7 +81,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
if (align > PAGE_SIZE)
return -EINVAL;
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
@@ -117,7 +117,7 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
@@ -163,11 +163,11 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
+ carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
if (!carveout_heap)
return ERR_PTR(-ENOMEM);
- carveout_heap->pool = gen_pool_create(12, -1);
+ carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
if (!carveout_heap->pool) {
kfree(carveout_heap);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index fd7e23e0c06e..1fe80165a462 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -149,8 +149,8 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
{
- struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
- GFP_KERNEL);
+ struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
if (!pool)
return NULL;
pool->high_count = 0;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index d4c3e5512dd5..b69dfc706440 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -27,7 +27,7 @@
#include "ion_priv.h"
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
- __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
+ __GFP_NORETRY) & ~__GFP_RECLAIM;
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index da34bc12cd7c..83a3af06d01c 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -285,8 +285,8 @@ static int __init ion_test_init(void)
{
ion_test_pdev = platform_device_register_simple("ion-test",
-1, NULL, 0);
- if (!ion_test_pdev)
- return -ENODEV;
+ if (IS_ERR(ion_test_pdev))
+ return PTR_ERR(ion_test_pdev);
return platform_driver_probe(&ion_test_platform_driver, ion_test_probe);
}
diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c
index 4d3c516cc15e..49e55e5acead 100644
--- a/drivers/staging/android/ion/tegra/tegra_ion.c
+++ b/drivers/staging/android/ion/tegra/tegra_ion.c
@@ -33,12 +33,11 @@ static int tegra_ion_probe(struct platform_device *pdev)
num_heaps = pdata->nr;
- heaps = devm_kzalloc(&pdev->dev,
- sizeof(struct ion_heap *) * pdata->nr,
- GFP_KERNEL);
+ heaps = devm_kcalloc(&pdev->dev, pdata->nr,
+ sizeof(struct ion_heap *), GFP_KERNEL);
idev = ion_device_create(NULL);
- if (IS_ERR_OR_NULL(idev))
+ if (IS_ERR(idev))
return PTR_ERR(idev);
/* create the heaps as specified in the board file */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 8b5a4a82d8b8..2509e5df7244 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -50,6 +50,7 @@ static short lowmem_adj[6] = {
6,
12,
};
+
static int lowmem_adj_size = 4;
static int lowmem_minfree[6] = {
3 * 512, /* 6MB */
@@ -57,6 +58,7 @@ static int lowmem_minfree[6] = {
4 * 1024, /* 16MB */
16 * 1024, /* 64MB */
};
+
static int lowmem_minfree_size = 4;
static unsigned long lowmem_deathpending_timeout;
@@ -84,6 +86,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
int tasksize;
int i;
short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
+ int minfree = 0;
int selected_tasksize = 0;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
@@ -97,8 +100,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
- if (other_free < lowmem_minfree[i] &&
- other_file < lowmem_minfree[i]) {
+ minfree = lowmem_minfree[i];
+ if (other_free < minfree && other_file < minfree) {
min_score_adj = lowmem_adj[i];
break;
}
@@ -153,8 +156,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
selected = p;
selected_tasksize = tasksize;
selected_oom_score_adj = oom_score_adj;
- lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
- p->pid, p->comm, oom_score_adj, tasksize);
+ lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
+ p->comm, p->pid, oom_score_adj, tasksize);
}
if (selected) {
task_lock(selected);
@@ -167,9 +170,18 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (selected->mm)
mark_oom_victim(selected);
task_unlock(selected);
- lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
- selected->pid, selected->comm,
- selected_oom_score_adj, selected_tasksize);
+ lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
+ " to free %ldkB on behalf of '%s' (%d) because\n"
+ " cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
+ " Free memory is %ldkB above reserved\n",
+ selected->comm, selected->pid,
+ selected_oom_score_adj,
+ selected_tasksize * (long)(PAGE_SIZE / 1024),
+ current->comm, current->pid,
+ other_file * (long)(PAGE_SIZE / 1024),
+ minfree * (long)(PAGE_SIZE / 1024),
+ min_score_adj,
+ other_free * (long)(PAGE_SIZE / 1024));
lowmem_deathpending_timeout = jiffies + HZ;
rem += selected_tasksize;
}
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index c4ff1679ebbc..af39ff58fa33 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -25,15 +25,7 @@
#include "sw_sync.h"
-static int sw_sync_cmp(u32 a, u32 b)
-{
- if (a == b)
- return 0;
-
- return ((s32)a - (s32)b) < 0 ? -1 : 1;
-}
-
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
{
struct sw_sync_pt *pt;
@@ -42,47 +34,17 @@ struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
pt->value = value;
- return (struct sync_pt *)pt;
+ return (struct fence *)pt;
}
EXPORT_SYMBOL(sw_sync_pt_create);
-static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
- struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return (struct sync_pt *)sw_sync_pt_create(obj, pt->value);
-}
-
-static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+static int sw_sync_fence_has_signaled(struct fence *fence)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
struct sw_sync_timeline *obj =
- (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
-
- return sw_sync_cmp(obj->value, pt->value) >= 0;
-}
-
-static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
- struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
- struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
-
- return sw_sync_cmp(pt_a->value, pt_b->value);
-}
-
-static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
- void *data, int size)
-{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
-
- if (size < sizeof(pt->value))
- return -ENOMEM;
+ (struct sw_sync_timeline *)fence_parent(fence);
- memcpy(data, &pt->value, sizeof(pt->value));
-
- return sizeof(pt->value);
+ return (pt->value > obj->value) ? 0 : 1;
}
static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
@@ -93,22 +55,18 @@ static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
snprintf(str, size, "%d", timeline->value);
}
-static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
- char *str, int size)
+static void sw_sync_fence_value_str(struct fence *fence, char *str, int size)
{
- struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+ struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
snprintf(str, size, "%d", pt->value);
}
static struct sync_timeline_ops sw_sync_timeline_ops = {
.driver_name = "sw_sync",
- .dup = sw_sync_pt_dup,
- .has_signaled = sw_sync_pt_has_signaled,
- .compare = sw_sync_pt_compare,
- .fill_driver_data = sw_sync_fill_driver_data,
+ .has_signaled = sw_sync_fence_has_signaled,
.timeline_value_str = sw_sync_timeline_value_str,
- .pt_value_str = sw_sync_pt_value_str,
+ .fence_value_str = sw_sync_fence_value_str,
};
struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
@@ -129,132 +87,3 @@ void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
sync_timeline_signal(&obj->obj);
}
EXPORT_SYMBOL(sw_sync_timeline_inc);
-
-#ifdef CONFIG_SW_SYNC_USER
-/* *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_open(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj;
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, current);
-
- obj = sw_sync_timeline_create(task_comm);
- if (!obj)
- return -ENOMEM;
-
- file->private_data = obj;
-
- return 0;
-}
-
-static int sw_sync_release(struct inode *inode, struct file *file)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- sync_timeline_destroy(&obj->obj);
- return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_pt *pt;
- struct sync_fence *fence;
- struct sw_sync_create_fence_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err;
- }
-
- pt = sw_sync_pt_create(obj, data.value);
- if (!pt) {
- err = -ENOMEM;
- goto err;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence = sync_fence_create(data.name, pt);
- if (!fence) {
- sync_pt_free(pt);
- err = -ENOMEM;
- goto err;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_fence_put(fence);
- err = -EFAULT;
- goto err;
- }
-
- sync_fence_install(fence, fd);
-
- return 0;
-
-err:
- put_unused_fd(fd);
- return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
- u32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- sw_sync_timeline_inc(obj, value);
-
- return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sw_sync_timeline *obj = file->private_data;
-
- switch (cmd) {
- case SW_SYNC_IOC_CREATE_FENCE:
- return sw_sync_ioctl_create_fence(obj, arg);
-
- case SW_SYNC_IOC_INC:
- return sw_sync_ioctl_inc(obj, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sw_sync_fops = {
- .owner = THIS_MODULE,
- .open = sw_sync_open,
- .release = sw_sync_release,
- .unlocked_ioctl = sw_sync_ioctl,
- .compat_ioctl = sw_sync_ioctl,
-};
-
-static struct miscdevice sw_sync_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sw_sync",
- .fops = &sw_sync_fops,
-};
-
-static int __init sw_sync_device_init(void)
-{
- return misc_register(&sw_sync_dev);
-}
-device_initcall(sw_sync_device_init);
-
-#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index c87ae9ebf267..e18667bfb0ca 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -29,7 +29,7 @@ struct sw_sync_timeline {
};
struct sw_sync_pt {
- struct sync_pt pt;
+ struct fence pt;
u32 value;
};
@@ -38,7 +38,7 @@ struct sw_sync_pt {
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
-struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
#else
static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
{
@@ -49,8 +49,8 @@ static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
{
}
-static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
- u32 value)
+static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
{
return NULL;
}
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index ed43796b5b58..3a8f21031440 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -32,7 +32,7 @@
#include "trace/sync.h"
static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_fence_fops;
+static const struct file_operations sync_file_fops;
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name)
@@ -68,9 +68,6 @@ static void sync_timeline_free(struct kref *kref)
sync_timeline_debug_remove(obj);
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
kfree(obj);
}
@@ -93,10 +90,6 @@ void sync_timeline_destroy(struct sync_timeline *obj)
*/
smp_wmb();
- /*
- * signal any children that their parent is going away.
- */
- sync_timeline_signal(obj);
sync_timeline_put(obj);
}
EXPORT_SYMBOL(sync_timeline_destroy);
@@ -104,126 +97,115 @@ EXPORT_SYMBOL(sync_timeline_destroy);
void sync_timeline_signal(struct sync_timeline *obj)
{
unsigned long flags;
- LIST_HEAD(signaled_pts);
- struct sync_pt *pt, *next;
+ struct fence *fence, *next;
trace_sync_timeline(obj);
spin_lock_irqsave(&obj->child_list_lock, flags);
- list_for_each_entry_safe(pt, next, &obj->active_list_head,
+ list_for_each_entry_safe(fence, next, &obj->active_list_head,
active_list) {
- if (fence_is_signaled_locked(&pt->base))
- list_del_init(&pt->active_list);
+ if (fence_is_signaled_locked(fence))
+ list_del_init(&fence->active_list);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
EXPORT_SYMBOL(sync_timeline_signal);
-struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
+struct fence *sync_pt_create(struct sync_timeline *obj, int size)
{
unsigned long flags;
- struct sync_pt *pt;
+ struct fence *fence;
- if (size < sizeof(struct sync_pt))
+ if (size < sizeof(*fence))
return NULL;
- pt = kzalloc(size, GFP_KERNEL);
- if (!pt)
+ fence = kzalloc(size, GFP_KERNEL);
+ if (!fence)
return NULL;
spin_lock_irqsave(&obj->child_list_lock, flags);
sync_timeline_get(obj);
- fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
+ fence_init(fence, &android_fence_ops, &obj->child_list_lock,
obj->context, ++obj->value);
- list_add_tail(&pt->child_list, &obj->child_list_head);
- INIT_LIST_HEAD(&pt->active_list);
+ list_add_tail(&fence->child_list, &obj->child_list_head);
+ INIT_LIST_HEAD(&fence->active_list);
spin_unlock_irqrestore(&obj->child_list_lock, flags);
- return pt;
+ return fence;
}
EXPORT_SYMBOL(sync_pt_create);
-void sync_pt_free(struct sync_pt *pt)
-{
- fence_put(&pt->base);
-}
-EXPORT_SYMBOL(sync_pt_free);
-
-static struct sync_fence *sync_fence_alloc(int size, const char *name)
+static struct sync_file *sync_file_alloc(int size, const char *name)
{
- struct sync_fence *fence;
+ struct sync_file *sync_file;
- fence = kzalloc(size, GFP_KERNEL);
- if (!fence)
+ sync_file = kzalloc(size, GFP_KERNEL);
+ if (!sync_file)
return NULL;
- fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
- fence, 0);
- if (IS_ERR(fence->file))
+ sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+ sync_file, 0);
+ if (IS_ERR(sync_file->file))
goto err;
- kref_init(&fence->kref);
- strlcpy(fence->name, name, sizeof(fence->name));
+ kref_init(&sync_file->kref);
+ strlcpy(sync_file->name, name, sizeof(sync_file->name));
- init_waitqueue_head(&fence->wq);
+ init_waitqueue_head(&sync_file->wq);
- return fence;
+ return sync_file;
err:
- kfree(fence);
+ kfree(sync_file);
return NULL;
}
static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
{
- struct sync_fence_cb *check;
- struct sync_fence *fence;
+ struct sync_file_cb *check;
+ struct sync_file *sync_file;
- check = container_of(cb, struct sync_fence_cb, cb);
- fence = check->fence;
+ check = container_of(cb, struct sync_file_cb, cb);
+ sync_file = check->sync_file;
- if (atomic_dec_and_test(&fence->status))
- wake_up_all(&fence->wq);
+ if (atomic_dec_and_test(&sync_file->status))
+ wake_up_all(&sync_file->wq);
}
-/* TODO: implement a create which takes more that one sync_pt */
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt)
+/* TODO: implement a create which takes more that one fence */
+struct sync_file *sync_file_create(const char *name, struct fence *fence)
{
- struct sync_fence *fence;
+ struct sync_file *sync_file;
- fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
- if (!fence)
+ sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
+ name);
+ if (!sync_file)
return NULL;
- fence->num_fences = 1;
- atomic_set(&fence->status, 1);
+ sync_file->num_fences = 1;
+ atomic_set(&sync_file->status, 1);
- fence->cbs[0].sync_pt = pt;
- fence->cbs[0].fence = fence;
- if (fence_add_callback(pt, &fence->cbs[0].cb, fence_check_cb_func))
- atomic_dec(&fence->status);
+ sync_file->cbs[0].fence = fence;
+ sync_file->cbs[0].sync_file = sync_file;
+ if (fence_add_callback(fence, &sync_file->cbs[0].cb,
+ fence_check_cb_func))
+ atomic_dec(&sync_file->status);
- sync_fence_debug_add(fence);
+ sync_file_debug_add(sync_file);
- return fence;
+ return sync_file;
}
-EXPORT_SYMBOL(sync_fence_create_dma);
+EXPORT_SYMBOL(sync_file_create);
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
-{
- return sync_fence_create_dma(name, &pt->base);
-}
-EXPORT_SYMBOL(sync_fence_create);
-
-struct sync_fence *sync_fence_fdget(int fd)
+struct sync_file *sync_file_fdget(int fd)
{
struct file *file = fget(fd);
if (!file)
return NULL;
- if (file->f_op != &sync_fence_fops)
+ if (file->f_op != &sync_file_fops)
goto err;
return file->private_data;
@@ -232,70 +214,71 @@ err:
fput(file);
return NULL;
}
-EXPORT_SYMBOL(sync_fence_fdget);
+EXPORT_SYMBOL(sync_file_fdget);
-void sync_fence_put(struct sync_fence *fence)
+void sync_file_put(struct sync_file *sync_file)
{
- fput(fence->file);
+ fput(sync_file->file);
}
-EXPORT_SYMBOL(sync_fence_put);
+EXPORT_SYMBOL(sync_file_put);
-void sync_fence_install(struct sync_fence *fence, int fd)
+void sync_file_install(struct sync_file *sync_file, int fd)
{
- fd_install(fd, fence->file);
+ fd_install(fd, sync_file->file);
}
-EXPORT_SYMBOL(sync_fence_install);
+EXPORT_SYMBOL(sync_file_install);
-static void sync_fence_add_pt(struct sync_fence *fence,
- int *i, struct fence *pt)
+static void sync_file_add_pt(struct sync_file *sync_file, int *i,
+ struct fence *fence)
{
- fence->cbs[*i].sync_pt = pt;
- fence->cbs[*i].fence = fence;
+ sync_file->cbs[*i].fence = fence;
+ sync_file->cbs[*i].sync_file = sync_file;
- if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
- fence_get(pt);
+ if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
+ fence_check_cb_func)) {
+ fence_get(fence);
(*i)++;
}
}
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b)
+struct sync_file *sync_file_merge(const char *name,
+ struct sync_file *a, struct sync_file *b)
{
int num_fences = a->num_fences + b->num_fences;
- struct sync_fence *fence;
+ struct sync_file *sync_file;
int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
+ unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
- fence = sync_fence_alloc(size, name);
- if (!fence)
+ sync_file = sync_file_alloc(size, name);
+ if (!sync_file)
return NULL;
- atomic_set(&fence->status, num_fences);
+ atomic_set(&sync_file->status, num_fences);
/*
- * Assume sync_fence a and b are both ordered and have no
+ * Assume sync_file a and b are both ordered and have no
* duplicates with the same context.
*
- * If a sync_fence can only be created with sync_fence_merge
- * and sync_fence_create, this is a reasonable assumption.
+ * If a sync_file can only be created with sync_file_merge
+ * and sync_file_create, this is a reasonable assumption.
*/
for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].sync_pt;
- struct fence *pt_b = b->cbs[i_b].sync_pt;
+ struct fence *pt_a = a->cbs[i_a].fence;
+ struct fence *pt_b = b->cbs[i_b].fence;
if (pt_a->context < pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_a);
+ sync_file_add_pt(sync_file, &i, pt_a);
i_a++;
} else if (pt_a->context > pt_b->context) {
- sync_fence_add_pt(fence, &i, pt_b);
+ sync_file_add_pt(sync_file, &i, pt_b);
i_b++;
} else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_fence_add_pt(fence, &i, pt_a);
+ sync_file_add_pt(sync_file, &i, pt_a);
else
- sync_fence_add_pt(fence, &i, pt_b);
+ sync_file_add_pt(sync_file, &i, pt_b);
i_a++;
i_b++;
@@ -303,156 +286,55 @@ struct sync_fence *sync_fence_merge(const char *name,
}
for (; i_a < a->num_fences; i_a++)
- sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
+ sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
for (; i_b < b->num_fences; i_b++)
- sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
+ sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
if (num_fences > i)
- atomic_sub(num_fences - i, &fence->status);
- fence->num_fences = i;
-
- sync_fence_debug_add(fence);
- return fence;
-}
-EXPORT_SYMBOL(sync_fence_merge);
-
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key)
-{
- struct sync_fence_waiter *wait;
-
- wait = container_of(curr, struct sync_fence_waiter, work);
- list_del_init(&wait->work.task_list);
-
- wait->callback(wait->work.private, wait);
- return 1;
-}
-
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- int err = atomic_read(&fence->status);
- unsigned long flags;
-
- if (err < 0)
- return err;
-
- if (!err)
- return 1;
-
- init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
- waiter->work.private = fence;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- err = atomic_read(&fence->status);
- if (err > 0)
- __add_wait_queue_tail(&fence->wq, &waiter->work);
- spin_unlock_irqrestore(&fence->wq.lock, flags);
-
- if (err < 0)
- return err;
-
- return !err;
-}
-EXPORT_SYMBOL(sync_fence_wait_async);
-
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- if (!list_empty(&waiter->work.task_list))
- list_del_init(&waiter->work.task_list);
- else
- ret = -ENOENT;
- spin_unlock_irqrestore(&fence->wq.lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(sync_fence_cancel_async);
-
-int sync_fence_wait(struct sync_fence *fence, long timeout)
-{
- long ret;
- int i;
+ atomic_sub(num_fences - i, &sync_file->status);
+ sync_file->num_fences = i;
- if (timeout < 0)
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = msecs_to_jiffies(timeout);
-
- trace_sync_wait(fence, 1);
- for (i = 0; i < fence->num_fences; ++i)
- trace_sync_pt(fence->cbs[i].sync_pt);
- ret = wait_event_interruptible_timeout(fence->wq,
- atomic_read(&fence->status) <= 0,
- timeout);
- trace_sync_wait(fence, 0);
-
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- if (timeout) {
- pr_info("fence timeout on [%p] after %dms\n", fence,
- jiffies_to_msecs(timeout));
- sync_dump();
- }
- return -ETIME;
- }
-
- ret = atomic_read(&fence->status);
- if (ret) {
- pr_info("fence error %ld on [%p]\n", ret, fence);
- sync_dump();
- }
- return ret;
+ sync_file_debug_add(sync_file);
+ return sync_file;
}
-EXPORT_SYMBOL(sync_fence_wait);
+EXPORT_SYMBOL(sync_file_merge);
static const char *android_fence_get_driver_name(struct fence *fence)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
return parent->ops->driver_name;
}
static const char *android_fence_get_timeline_name(struct fence *fence)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
return parent->name;
}
static void android_fence_release(struct fence *fence)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
unsigned long flags;
spin_lock_irqsave(fence->lock, flags);
- list_del(&pt->child_list);
- if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
- list_del(&pt->active_list);
+ list_del(&fence->child_list);
+ if (WARN_ON_ONCE(!list_empty(&fence->active_list)))
+ list_del(&fence->active_list);
spin_unlock_irqrestore(fence->lock, flags);
- if (parent->ops->free_pt)
- parent->ops->free_pt(pt);
-
sync_timeline_put(parent);
- fence_free(&pt->base);
+ fence_free(fence);
}
static bool android_fence_signaled(struct fence *fence)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
int ret;
- ret = parent->ops->has_signaled(pt);
+ ret = parent->ops->has_signaled(fence);
if (ret < 0)
fence->status = ret;
return ret;
@@ -460,46 +342,32 @@ static bool android_fence_signaled(struct fence *fence)
static bool android_fence_enable_signaling(struct fence *fence)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
if (android_fence_signaled(fence))
return false;
- list_add_tail(&pt->active_list, &parent->active_list_head);
+ list_add_tail(&fence->active_list, &parent->active_list_head);
return true;
}
-static int android_fence_fill_driver_data(struct fence *fence,
- void *data, int size)
-{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
-
- if (!parent->ops->fill_driver_data)
- return 0;
- return parent->ops->fill_driver_data(pt, data, size);
-}
-
static void android_fence_value_str(struct fence *fence,
char *str, int size)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
- if (!parent->ops->pt_value_str) {
+ if (!parent->ops->fence_value_str) {
if (size)
*str = 0;
return;
}
- parent->ops->pt_value_str(pt, str, size);
+ parent->ops->fence_value_str(fence, str, size);
}
static void android_fence_timeline_value_str(struct fence *fence,
char *str, int size)
{
- struct sync_pt *pt = container_of(fence, struct sync_pt, base);
- struct sync_timeline *parent = sync_pt_parent(pt);
+ struct sync_timeline *parent = fence_parent(fence);
if (!parent->ops->timeline_value_str) {
if (size)
@@ -516,65 +384,57 @@ static const struct fence_ops android_fence_ops = {
.signaled = android_fence_signaled,
.wait = fence_default_wait,
.release = android_fence_release,
- .fill_driver_data = android_fence_fill_driver_data,
.fence_value_str = android_fence_value_str,
.timeline_value_str = android_fence_timeline_value_str,
};
-static void sync_fence_free(struct kref *kref)
+static void sync_file_free(struct kref *kref)
{
- struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+ struct sync_file *sync_file = container_of(kref, struct sync_file,
+ kref);
int i;
- for (i = 0; i < fence->num_fences; ++i) {
- fence_remove_callback(fence->cbs[i].sync_pt, &fence->cbs[i].cb);
- fence_put(fence->cbs[i].sync_pt);
+ for (i = 0; i < sync_file->num_fences; ++i) {
+ fence_remove_callback(sync_file->cbs[i].fence,
+ &sync_file->cbs[i].cb);
+ fence_put(sync_file->cbs[i].fence);
}
- kfree(fence);
+ kfree(sync_file);
}
-static int sync_fence_release(struct inode *inode, struct file *file)
+static int sync_file_release(struct inode *inode, struct file *file)
{
- struct sync_fence *fence = file->private_data;
+ struct sync_file *sync_file = file->private_data;
- sync_fence_debug_remove(fence);
+ sync_file_debug_remove(sync_file);
- kref_put(&fence->kref, sync_fence_free);
+ kref_put(&sync_file->kref, sync_file_free);
return 0;
}
-static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+static unsigned int sync_file_poll(struct file *file, poll_table *wait)
{
- struct sync_fence *fence = file->private_data;
+ struct sync_file *sync_file = file->private_data;
int status;
- poll_wait(file, &fence->wq, wait);
+ poll_wait(file, &sync_file->wq, wait);
- status = atomic_read(&fence->status);
+ status = atomic_read(&sync_file->status);
if (!status)
return POLLIN;
- else if (status < 0)
+ if (status < 0)
return POLLERR;
return 0;
}
-static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
-{
- __s32 value;
-
- if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
- return -EFAULT;
-
- return sync_fence_wait(fence, value);
-}
-
-static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+ unsigned long arg)
{
int fd = get_unused_fd_flags(O_CLOEXEC);
int err;
- struct sync_fence *fence2, *fence3;
+ struct sync_file *fence2, *fence3;
struct sync_merge_data data;
if (fd < 0)
@@ -585,14 +445,14 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
goto err_put_fd;
}
- fence2 = sync_fence_fdget(data.fd2);
+ fence2 = sync_file_fdget(data.fd2);
if (!fence2) {
err = -ENOENT;
goto err_put_fd;
}
data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_fence_merge(data.name, fence, fence2);
+ fence3 = sync_file_merge(data.name, sync_file, fence2);
if (!fence3) {
err = -ENOMEM;
goto err_put_fence2;
@@ -604,40 +464,28 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
goto err_put_fence3;
}
- sync_fence_install(fence3, fd);
- sync_fence_put(fence2);
+ sync_file_install(fence3, fd);
+ sync_file_put(fence2);
return 0;
err_put_fence3:
- sync_fence_put(fence3);
+ sync_file_put(fence3);
err_put_fence2:
- sync_fence_put(fence2);
+ sync_file_put(fence2);
err_put_fd:
put_unused_fd(fd);
return err;
}
-static int sync_fill_pt_info(struct fence *fence, void *data, int size)
+static int sync_fill_fence_info(struct fence *fence, void *data, int size)
{
- struct sync_pt_info *info = data;
- int ret;
+ struct sync_fence_info *info = data;
- if (size < sizeof(struct sync_pt_info))
+ if (size < sizeof(*info))
return -ENOMEM;
- info->len = sizeof(struct sync_pt_info);
-
- if (fence->ops->fill_driver_data) {
- ret = fence->ops->fill_driver_data(fence, info->driver_data,
- size - sizeof(*info));
- if (ret < 0)
- return ret;
-
- info->len += ret;
- }
-
strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
sizeof(info->obj_name));
strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
@@ -648,13 +496,13 @@ static int sync_fill_pt_info(struct fence *fence, void *data, int size)
info->status = 0;
info->timestamp_ns = ktime_to_ns(fence->timestamp);
- return info->len;
+ return sizeof(*info);
}
-static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
unsigned long arg)
{
- struct sync_fence_info_data *data;
+ struct sync_file_info *info;
__u32 size;
__u32 len = 0;
int ret, i;
@@ -662,27 +510,27 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
return -EFAULT;
- if (size < sizeof(struct sync_fence_info_data))
+ if (size < sizeof(struct sync_file_info))
return -EINVAL;
if (size > 4096)
size = 4096;
- data = kzalloc(size, GFP_KERNEL);
- if (!data)
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info)
return -ENOMEM;
- strlcpy(data->name, fence->name, sizeof(data->name));
- data->status = atomic_read(&fence->status);
- if (data->status >= 0)
- data->status = !data->status;
+ strlcpy(info->name, sync_file->name, sizeof(info->name));
+ info->status = atomic_read(&sync_file->status);
+ if (info->status >= 0)
+ info->status = !info->status;
- len = sizeof(struct sync_fence_info_data);
+ len = sizeof(struct sync_file_info);
- for (i = 0; i < fence->num_fences; ++i) {
- struct fence *pt = fence->cbs[i].sync_pt;
+ for (i = 0; i < sync_file->num_fences; ++i) {
+ struct fence *fence = sync_file->cbs[i].fence;
- ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+ ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
if (ret < 0)
goto out;
@@ -690,43 +538,40 @@ static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
len += ret;
}
- data->len = len;
+ info->len = len;
- if (copy_to_user((void __user *)arg, data, len))
+ if (copy_to_user((void __user *)arg, info, len))
ret = -EFAULT;
else
ret = 0;
out:
- kfree(data);
+ kfree(info);
return ret;
}
-static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct sync_fence *fence = file->private_data;
+ struct sync_file *sync_file = file->private_data;
switch (cmd) {
- case SYNC_IOC_WAIT:
- return sync_fence_ioctl_wait(fence, arg);
-
case SYNC_IOC_MERGE:
- return sync_fence_ioctl_merge(fence, arg);
+ return sync_file_ioctl_merge(sync_file, arg);
case SYNC_IOC_FENCE_INFO:
- return sync_fence_ioctl_fence_info(fence, arg);
+ return sync_file_ioctl_fence_info(sync_file, arg);
default:
return -ENOTTY;
}
}
-static const struct file_operations sync_fence_fops = {
- .release = sync_fence_release,
- .poll = sync_fence_poll,
- .unlocked_ioctl = sync_fence_ioctl,
- .compat_ioctl = sync_fence_ioctl,
+static const struct file_operations sync_file_fops = {
+ .release = sync_file_release,
+ .poll = sync_file_poll,
+ .unlocked_ioctl = sync_file_ioctl,
+ .compat_ioctl = sync_file_ioctl,
};
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index afa0752275a7..d2a173433a7d 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -18,63 +18,35 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-#include <linux/wait.h>
#include <linux/fence.h>
#include "uapi/sync.h"
struct sync_timeline;
-struct sync_pt;
-struct sync_fence;
+struct sync_file;
/**
* struct sync_timeline_ops - sync object implementation ops
* @driver_name: name of the implementation
- * @dup: duplicate a sync_pt
* @has_signaled: returns:
* 1 if pt has signaled
* 0 if pt has not signaled
* <0 on error
- * @compare: returns:
- * 1 if b will signal before a
- * 0 if a and b will signal at the same time
- * -1 if a will signal before b
- * @free_pt: called before sync_pt is freed
- * @release_obj: called before sync_timeline is freed
- * @fill_driver_data: write implementation specific driver data to data.
- * should return an error if there is not enough room
- * as specified by size. This information is returned
- * to userspace by SYNC_IOC_FENCE_INFO.
* @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @pt_value_str: fill str with the value of the sync_pt
+ * @fence_value_str: fill str with the value of the fence
*/
struct sync_timeline_ops {
const char *driver_name;
/* required */
- struct sync_pt * (*dup)(struct sync_pt *pt);
-
- /* required */
- int (*has_signaled)(struct sync_pt *pt);
-
- /* required */
- int (*compare)(struct sync_pt *a, struct sync_pt *b);
-
- /* optional */
- void (*free_pt)(struct sync_pt *sync_pt);
-
- /* optional */
- void (*release_obj)(struct sync_timeline *sync_timeline);
-
- /* optional */
- int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
+ int (*has_signaled)(struct fence *fence);
/* optional */
void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
int size);
/* optional */
- void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
+ void (*fence_value_str)(struct fence *fence, char *str, int size);
};
/**
@@ -85,7 +57,7 @@ struct sync_timeline_ops {
* @destroyed: set when sync_timeline is destroyed
* @child_list_head: list of children sync_pts for this sync_timeline
* @child_list_lock: lock protecting @child_list_head, destroyed, and
- * sync_pt.status
+ * fence.status
* @active_list_head: list of active (unsignaled/errored) sync_pts
* @sync_timeline_list: membership in global sync_timeline_list
*/
@@ -108,86 +80,44 @@ struct sync_timeline {
#endif
};
-/**
- * struct sync_pt - sync point
- * @fence: base fence class
- * @child_list: membership in sync_timeline.child_list_head
- * @active_list: membership in sync_timeline.active_list_head
- * @signaled_list: membership in temporary signaled_list on stack
- * @fence: sync_fence to which the sync_pt belongs
- * @pt_list: membership in sync_fence.pt_list_head
- * @status: 1: signaled, 0:active, <0: error
- * @timestamp: time which sync_pt status transitioned from active to
- * signaled or error.
- */
-struct sync_pt {
- struct fence base;
-
- struct list_head child_list;
- struct list_head active_list;
-};
-
-static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
+static inline struct sync_timeline *fence_parent(struct fence *fence)
{
- return container_of(pt->base.lock, struct sync_timeline,
+ return container_of(fence->lock, struct sync_timeline,
child_list_lock);
}
-struct sync_fence_cb {
+struct sync_file_cb {
struct fence_cb cb;
- struct fence *sync_pt;
- struct sync_fence *fence;
+ struct fence *fence;
+ struct sync_file *sync_file;
};
/**
- * struct sync_fence - sync fence
+ * struct sync_file - sync file to export to the userspace
* @file: file representing this fence
* @kref: reference count on fence.
- * @name: name of sync_fence. Useful for debugging
- * @pt_list_head: list of sync_pts in the fence. immutable once fence
- * is created
- * @status: 0: signaled, >0:active, <0: error
- *
+ * @name: name of sync_file. Useful for debugging
+ * @sync_file_list: membership in global file list
+ * @num_fences number of sync_pts in the fence
* @wq: wait queue for fence signaling
- * @sync_fence_list: membership in global fence list
+ * @status: 0: signaled, >0:active, <0: error
+ * @cbs: sync_pts callback information
*/
-struct sync_fence {
+struct sync_file {
struct file *file;
struct kref kref;
char name[32];
#ifdef CONFIG_DEBUG_FS
- struct list_head sync_fence_list;
+ struct list_head sync_file_list;
#endif
int num_fences;
wait_queue_head_t wq;
atomic_t status;
- struct sync_fence_cb cbs[];
-};
-
-struct sync_fence_waiter;
-typedef void (*sync_callback_t)(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
- * @waiter_list: membership in sync_fence.waiter_list_head
- * @callback: function pointer to call when fence signals
- * @callback_data: pointer to pass to @callback
- */
-struct sync_fence_waiter {
- wait_queue_t work;
- sync_callback_t callback;
+ struct sync_file_cb cbs[];
};
-static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
- sync_callback_t callback)
-{
- INIT_LIST_HEAD(&waiter->work.task_list);
- waiter->callback = callback;
-}
-
/*
* API for sync_timeline implementers
*/
@@ -200,7 +130,8 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
*
* Creates a new sync_timeline which will use the implementation specified by
* @ops. @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct.
+ * data to be kept after the generic sync_timeline struct. Returns the
+ * sync_timeline object or NULL in case of error.
*/
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name);
@@ -211,7 +142,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
*
* A sync implementation should call this when the @obj is going away
* (i.e. module unload.) @obj won't actually be freed until all its children
- * sync_pts are freed.
+ * fences are freed.
*/
void sync_timeline_destroy(struct sync_timeline *obj);
@@ -219,148 +150,92 @@ void sync_timeline_destroy(struct sync_timeline *obj);
* sync_timeline_signal() - signal a status change on a sync_timeline
* @obj: sync_timeline to signal
*
- * A sync implementation should call this any time one of it's sync_pts
+ * A sync implementation should call this any time one of it's fences
* has signaled or has an error condition.
*/
void sync_timeline_signal(struct sync_timeline *obj);
/**
* sync_pt_create() - creates a sync pt
- * @parent: sync_pt's parent sync_timeline
+ * @parent: fence's parent sync_timeline
* @size: size to allocate for this pt
*
- * Creates a new sync_pt as a child of @parent. @size bytes will be
+ * Creates a new fence as a child of @parent. @size bytes will be
* allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct.
- */
-struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
-
-/**
- * sync_pt_free() - frees a sync pt
- * @pt: sync_pt to free
- *
- * This should only be called on sync_pts which have been created but
- * not added to a fence.
+ * the generic sync_timeline struct. Returns the fence object or
+ * NULL in case of error.
*/
-void sync_pt_free(struct sync_pt *pt);
+struct fence *sync_pt_create(struct sync_timeline *parent, int size);
/**
* sync_fence_create() - creates a sync fence
* @name: name of fence to create
- * @pt: sync_pt to add to the fence
- *
- * Creates a fence containg @pt. Once this is called, the fence takes
- * ownership of @pt.
- */
-struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
-
-/**
- * sync_fence_create_dma() - creates a sync fence from dma-fence
- * @name: name of fence to create
- * @pt: dma-fence to add to the fence
+ * @fence: fence to add to the sync_fence
*
- * Creates a fence containg @pt. Once this is called, the fence takes
- * ownership of @pt.
+ * Creates a sync_file containg @fence. Once this is called, the sync_file
+ * takes ownership of @fence.
*/
-struct sync_fence *sync_fence_create_dma(const char *name, struct fence *pt);
+struct sync_file *sync_file_create(const char *name, struct fence *fence);
/*
- * API for sync_fence consumers
+ * API for sync_file consumers
*/
/**
- * sync_fence_merge() - merge two fences
+ * sync_file_merge() - merge two sync_files
* @name: name of new fence
- * @a: fence a
- * @b: fence b
+ * @a: sync_file a
+ * @b: sync_file b
*
- * Creates a new fence which contains copies of all the sync_pts in both
- * @a and @b. @a and @b remain valid, independent fences.
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b. @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
*/
-struct sync_fence *sync_fence_merge(const char *name,
- struct sync_fence *a, struct sync_fence *b);
+struct sync_file *sync_file_merge(const char *name,
+ struct sync_file *a, struct sync_file *b);
/**
- * sync_fence_fdget() - get a fence from an fd
+ * sync_file_fdget() - get a sync_file from an fd
* @fd: fd referencing a fence
*
- * Ensures @fd references a valid fence, increments the refcount of the backing
- * file, and returns the fence.
+ * Ensures @fd references a valid sync_file, increments the refcount of the
+ * backing file. Returns the sync_file or NULL in case of error.
*/
-struct sync_fence *sync_fence_fdget(int fd);
+struct sync_file *sync_file_fdget(int fd);
/**
- * sync_fence_put() - puts a reference of a sync fence
- * @fence: fence to put
+ * sync_file_put() - puts a reference of a sync_file
+ * @sync_file: sync_file to put
*
- * Puts a reference on @fence. If this is the last reference, the fence and
- * all it's sync_pts will be freed
+ * Puts a reference on @sync_fence. If this is the last reference, the
+ * sync_fil and all it's sync_pts will be freed
*/
-void sync_fence_put(struct sync_fence *fence);
+void sync_file_put(struct sync_file *sync_file);
/**
- * sync_fence_install() - installs a fence into a file descriptor
- * @fence: fence to install
+ * sync_file_install() - installs a sync_file into a file descriptor
+ * @sync_file: sync_file to install
* @fd: file descriptor in which to install the fence
*
- * Installs @fence into @fd. @fd's should be acquired through
+ * Installs @sync_file into @fd. @fd's should be acquired through
* get_unused_fd_flags(O_CLOEXEC).
*/
-void sync_fence_install(struct sync_fence *fence, int fd);
-
-/**
- * sync_fence_wait_async() - registers and async wait on the fence
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * Returns 1 if @fence has already signaled.
- *
- * Registers a callback to be called when @fence signals or has an error.
- * @waiter should be initialized with sync_fence_waiter_init().
- */
-int sync_fence_wait_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_cancel_async() - cancels an async wait
- * @fence: fence to wait on
- * @waiter: waiter callback struck
- *
- * returns 0 if waiter was removed from fence's async waiter list.
- * returns -ENOENT if waiter was not found on fence's async waiter list.
- *
- * Cancels a previously registered async wait. Will fail gracefully if
- * @waiter was never registered or if @fence has already signaled @waiter.
- */
-int sync_fence_cancel_async(struct sync_fence *fence,
- struct sync_fence_waiter *waiter);
-
-/**
- * sync_fence_wait() - wait on fence
- * @fence: fence to wait on
- * @tiemout: timeout in ms
- *
- * Wait for @fence to be signaled or have an error. Waits indefinitely
- * if @timeout < 0
- */
-int sync_fence_wait(struct sync_fence *fence, long timeout);
+void sync_file_install(struct sync_file *sync_file, int fd);
#ifdef CONFIG_DEBUG_FS
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_fence_debug_add(struct sync_fence *fence);
-void sync_fence_debug_remove(struct sync_fence *fence);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
void sync_dump(void);
#else
# define sync_timeline_debug_add(obj)
# define sync_timeline_debug_remove(obj)
-# define sync_fence_debug_add(fence)
-# define sync_fence_debug_remove(fence)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
# define sync_dump()
#endif
-int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
- int wake_flags, void *key);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index f45d13cdd42b..5a7ec58fbc09 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,14 +26,16 @@
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/time64.h>
-#include "sync.h"
+#include "sw_sync.h"
#ifdef CONFIG_DEBUG_FS
+static struct dentry *dbgfs;
+
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_fence_list_head);
-static DEFINE_SPINLOCK(sync_fence_list_lock);
+static LIST_HEAD(sync_file_list_head);
+static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -53,22 +55,22 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_fence_debug_add(struct sync_fence *fence)
+void sync_file_debug_add(struct sync_file *sync_file)
{
unsigned long flags;
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
}
-void sync_fence_debug_remove(struct sync_fence *fence)
+void sync_file_debug_remove(struct sync_file *sync_file)
{
unsigned long flags;
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_del(&fence->sync_fence_list);
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_del(&sync_file->sync_file_list);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
}
static const char *sync_status_str(int status)
@@ -82,39 +84,40 @@ static const char *sync_status_str(int status)
return "error";
}
-static void sync_print_pt(struct seq_file *s, struct fence *pt, bool fence)
+static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
{
int status = 1;
+ struct sync_timeline *parent = fence_parent(fence);
- if (fence_is_signaled_locked(pt))
- status = pt->status;
+ if (fence_is_signaled_locked(fence))
+ status = fence->status;
- seq_printf(s, " %s%spt %s",
- fence && pt->ops->get_timeline_name ?
- pt->ops->get_timeline_name(pt) : "",
- fence ? "_" : "",
+ seq_printf(s, " %s%sfence %s",
+ show ? parent->name : "",
+ show ? "_" : "",
sync_status_str(status));
if (status <= 0) {
struct timespec64 ts64 =
- ktime_to_timespec64(pt->timestamp);
+ ktime_to_timespec64(fence->timestamp);
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if ((!fence || pt->ops->timeline_value_str) &&
- pt->ops->fence_value_str) {
+ if ((!fence || fence->ops->timeline_value_str) &&
+ fence->ops->fence_value_str) {
char value[64];
bool success;
- pt->ops->fence_value_str(pt, value, sizeof(value));
+ fence->ops->fence_value_str(fence, value, sizeof(value));
success = strlen(value);
if (success)
seq_printf(s, ": %s", value);
if (success && fence) {
- pt->ops->timeline_value_str(pt, value, sizeof(value));
+ fence->ops->timeline_value_str(fence, value,
+ sizeof(value));
if (strlen(value))
seq_printf(s, " / %s", value);
@@ -142,38 +145,23 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_lock_irqsave(&obj->child_list_lock, flags);
list_for_each(pos, &obj->child_list_head) {
- struct sync_pt *pt =
- container_of(pos, struct sync_pt, child_list);
- sync_print_pt(s, &pt->base, false);
+ struct fence *fence =
+ container_of(pos, struct fence, child_list);
+ sync_print_fence(s, fence, false);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
-static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+static void sync_print_sync_file(struct seq_file *s,
+ struct sync_file *sync_file)
{
- wait_queue_t *pos;
- unsigned long flags;
int i;
- seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
- sync_status_str(atomic_read(&fence->status)));
-
- for (i = 0; i < fence->num_fences; ++i) {
- sync_print_pt(s, fence->cbs[i].sync_pt, true);
- }
-
- spin_lock_irqsave(&fence->wq.lock, flags);
- list_for_each_entry(pos, &fence->wq.task_list, task_list) {
- struct sync_fence_waiter *waiter;
-
- if (pos->func != &sync_fence_wake_up_wq)
- continue;
+ seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name,
+ sync_status_str(atomic_read(&sync_file->status)));
- waiter = container_of(pos, struct sync_fence_waiter, work);
-
- seq_printf(s, "waiter %pF\n", waiter->callback);
- }
- spin_unlock_irqrestore(&fence->wq.lock, flags);
+ for (i = 0; i < sync_file->num_fences; ++i)
+ sync_print_fence(s, sync_file->cbs[i].fence, true);
}
static int sync_debugfs_show(struct seq_file *s, void *unused)
@@ -196,33 +184,152 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irqsave(&sync_fence_list_lock, flags);
- list_for_each(pos, &sync_fence_list_head) {
- struct sync_fence *fence =
- container_of(pos, struct sync_fence, sync_fence_list);
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_for_each(pos, &sync_file_list_head) {
+ struct sync_file *sync_file =
+ container_of(pos, struct sync_file, sync_file_list);
- sync_print_fence(s, fence);
+ sync_print_sync_file(s, sync_file);
seq_puts(s, "\n");
}
- spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
return 0;
}
-static int sync_debugfs_open(struct inode *inode, struct file *file)
+static int sync_info_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sync_debugfs_show, inode->i_private);
}
-static const struct file_operations sync_debugfs_fops = {
- .open = sync_debugfs_open,
+static const struct file_operations sync_info_debugfs_fops = {
+ .open = sync_info_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, current);
+
+ obj = sw_sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
+}
+
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+
+ sync_timeline_destroy(&obj->obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct fence *fence;
+ struct sync_file *sync_file;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ fence = sw_sync_pt_create(obj, data.value);
+ if (!fence) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ sync_file = sync_file_create(data.name, fence);
+ if (!sync_file) {
+ fence_put(fence);
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ sync_file_put(sync_file);
+ err = -EFAULT;
+ goto err;
+ }
+
+ sync_file_install(sync_file, fd);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
+
+static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ sw_sync_timeline_inc(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sw_sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sw_sync_debugfs_fops = {
+ .open = sw_sync_debugfs_open,
+ .release = sw_sync_debugfs_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = sw_sync_ioctl,
+};
+
static __init int sync_debugfs_init(void)
{
- debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+ dbgfs = debugfs_create_dir("sync", NULL);
+
+ debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops);
+ debugfs_create_file("sw_sync", 0644, dbgfs, NULL,
+ &sw_sync_debugfs_fops);
+
return 0;
}
late_initcall(sync_debugfs_init);
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index bcd9924d4631..914fd1005467 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -92,9 +92,8 @@ static int timed_gpio_probe(struct platform_device *pdev)
if (!pdata)
return -EBUSY;
- gpio_data = devm_kzalloc(&pdev->dev,
- sizeof(*gpio_data) * pdata->num_gpios,
- GFP_KERNEL);
+ gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios,
+ sizeof(*gpio_data), GFP_KERNEL);
if (!gpio_data)
return -ENOMEM;
diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h
index 77edb977a7bf..a0f80f41677e 100644
--- a/drivers/staging/android/trace/sync.h
+++ b/drivers/staging/android/trace/sync.h
@@ -32,50 +32,6 @@ TRACE_EVENT(sync_timeline,
TP_printk("name=%s value=%s", __get_str(name), __entry->value)
);
-TRACE_EVENT(sync_wait,
- TP_PROTO(struct sync_fence *fence, int begin),
-
- TP_ARGS(fence, begin),
-
- TP_STRUCT__entry(
- __string(name, fence->name)
- __field(s32, status)
- __field(u32, begin)
- ),
-
- TP_fast_assign(
- __assign_str(name, fence->name);
- __entry->status = atomic_read(&fence->status);
- __entry->begin = begin;
- ),
-
- TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end",
- __get_str(name), __entry->status)
-);
-
-TRACE_EVENT(sync_pt,
- TP_PROTO(struct fence *pt),
-
- TP_ARGS(pt),
-
- TP_STRUCT__entry(
- __string(timeline, pt->ops->get_timeline_name(pt))
- __array(char, value, 32)
- ),
-
- TP_fast_assign(
- __assign_str(timeline, pt->ops->get_timeline_name(pt));
- if (pt->ops->fence_value_str) {
- pt->ops->fence_value_str(pt, __entry->value,
- sizeof(__entry->value));
- } else {
- __entry->value[0] = '\0';
- }
- ),
-
- TP_printk("name=%s value=%s", __get_str(timeline), __entry->value)
-);
-
#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
/* This part must be outside protection */
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
index ba4743c71d6b..13df42d200b7 100644
--- a/drivers/staging/android/uapi/ashmem.h
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -13,6 +13,7 @@
#define _UAPI_LINUX_ASHMEM_H
#include <linux/ioctl.h>
+#include <linux/types.h>
#define ASHMEM_NAME_LEN 256
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
index e964c751f6b8..a0cf357e598d 100644
--- a/drivers/staging/android/uapi/sync.h
+++ b/drivers/staging/android/uapi/sync.h
@@ -27,51 +27,39 @@ struct sync_merge_data {
};
/**
- * struct sync_pt_info - detailed sync_pt information
- * @len: length of sync_pt_info including any driver_data
+ * struct sync_fence_info - detailed fence information
* @obj_name: name of parent sync_timeline
* @driver_name: name of driver implementing the parent
- * @status: status of the sync_pt 0:active 1:signaled <0:error
+ * @status: status of the fence 0:active 1:signaled <0:error
* @timestamp_ns: timestamp of status change in nanoseconds
- * @driver_data: any driver dependent data
*/
-struct sync_pt_info {
- __u32 len;
+struct sync_fence_info {
char obj_name[32];
char driver_name[32];
__s32 status;
__u64 timestamp_ns;
-
- __u8 driver_data[0];
};
/**
- * struct sync_fence_info_data - data returned from fence info ioctl
+ * struct sync_file_info - data returned from fence info ioctl
* @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_fence_data returned to userspace
- * including pt_info.
+ * ioctl returns length of sync_file_info returned to
+ * userspace including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
- * @pt_info: a sync_pt_info struct for every sync_pt in the fence
+ * @sync_fence_info: array of sync_fence_info for every fence in the sync_file
*/
-struct sync_fence_info_data {
+struct sync_file_info {
__u32 len;
char name[32];
__s32 status;
- __u8 pt_info[0];
+ __u8 sync_fence_info[0];
};
#define SYNC_IOC_MAGIC '>'
/**
- * DOC: SYNC_IOC_WAIT - wait for a fence to signal
- *
- * pass timeout in milliseconds. Waits indefinitely timeout < 0.
- */
-#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
-
-/**
* DOC: SYNC_IOC_MERGE - merge two fences
*
* Takes a struct sync_merge_data. Creates a new fence containing copies of
@@ -83,15 +71,14 @@ struct sync_fence_info_data {
/**
* DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
*
- * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
+ * Takes a struct sync_file_info_data with extra space allocated for pt_info.
* Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_fence_info_data including
+ * updated to reflect the total size of the sync_file_info_data including
* pt_info.
*
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
* To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
- struct sync_fence_info_data)
+#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index 912c96b0536d..bb63ece4d766 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -27,7 +27,6 @@
#include "board.h"
-
static struct fb_videomode lcdc0_mode = {
.name = "AMPIER/AM-800480",
.xres = 800,
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
index 965afc79aadd..45807d8287d1 100644
--- a/drivers/staging/board/board.c
+++ b/drivers/staging/board/board.c
@@ -155,7 +155,6 @@ static int board_staging_add_dev_domain(struct platform_device *pdev,
if (IS_ERR(pd)) {
pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
return PTR_ERR(pd);
-
}
pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index b8e2f611fd47..7b8be5293883 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -32,8 +32,8 @@
#define WZRD_CLK_CFG_REG(n) (0x200 + 4 * (n))
-#define WZRD_CLkOUT0_FRAC_EN BIT(18)
-#define WZRD_CLkFBOUT_FRAC_EN BIT(26)
+#define WZRD_CLKOUT0_FRAC_EN BIT(18)
+#define WZRD_CLKFBOUT_FRAC_EN BIT(26)
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
@@ -71,6 +71,7 @@ struct clk_wzrd {
int speed_grade;
bool suspended;
};
+
#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
/* maximum frequencies for input/output clocks per speed grade */
@@ -195,9 +196,9 @@ static int clk_wzrd_probe(struct platform_device *pdev)
/* we don't support fractional div/mul yet */
reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLkFBOUT_FRAC_EN;
+ WZRD_CLKFBOUT_FRAC_EN;
reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLkOUT0_FRAC_EN;
+ WZRD_CLKOUT0_FRAC_EN;
if (reg)
dev_warn(&pdev->dev, "fractional div/mul not supported\n");
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b68fbdb5eebf..f733c017f181 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -3,6 +3,7 @@ TODO:
- Lindent
- remove all wrappers
- audit userspace interface
+ - Fix coverity 1195261
- cleanup the individual comedi drivers as well
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 83bd309d011b..ad5297f6d418 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1,6 +1,6 @@
/*
- * include/comedi.h (installed as /usr/include/comedi.h)
- * header file for comedi
+ * comedi.h
+ * header file for COMEDI user API
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
@@ -72,12 +72,12 @@
#define CR_AREF(a) (((a) >> 24) & 0x03)
#define CR_FLAGS_MASK 0xfc000000
-#define CR_ALT_FILTER (1 << 26)
+#define CR_ALT_FILTER 0x04000000
#define CR_DITHER CR_ALT_FILTER
#define CR_DEGLITCH CR_ALT_FILTER
-#define CR_ALT_SOURCE (1 << 27)
-#define CR_EDGE (1 << 30)
-#define CR_INVERT (1 << 31)
+#define CR_ALT_SOURCE 0x08000000
+#define CR_EDGE 0x40000000
+#define CR_INVERT 0x80000000
#define AREF_GROUND 0x00 /* analog ref = analog ground */
#define AREF_COMMON 0x01 /* analog ref = analog common */
@@ -120,13 +120,6 @@
#define INSN_WAIT (5 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
#define INSN_INTTRIG (6 | INSN_MASK_WRITE | INSN_MASK_SPECIAL)
-/* trigger flags */
-/* These flags are used in comedi_trig structures */
-
-#define TRIG_DITHER 0x0002 /* enable dithering */
-#define TRIG_DEGLITCH 0x0004 /* enable deglitching */
-#define TRIG_CONFIG 0x0010 /* perform configuration, not triggering */
-
/* command flags */
/* These flags are used in comedi_cmd structures */
@@ -190,11 +183,8 @@
#define SDF_MAXDATA 0x0010 /* maxdata depends on channel */
#define SDF_FLAGS 0x0020 /* flags depend on channel */
#define SDF_RANGETYPE 0x0040 /* range type depends on channel */
-#define SDF_MODE0 0x0080 /* can do mode 0 */
-#define SDF_MODE1 0x0100 /* can do mode 1 */
-#define SDF_MODE2 0x0200 /* can do mode 2 */
-#define SDF_MODE3 0x0400 /* can do mode 3 */
-#define SDF_MODE4 0x0800 /* can do mode 4 */
+#define SDF_PWM_COUNTER 0x0080 /* PWM can automatically switch off */
+#define SDF_PWM_HBRIDGE 0x0100 /* PWM is signed (H-bridge) */
#define SDF_CMD 0x1000 /* can do commands (deprecated) */
#define SDF_SOFT_CALIBRATED 0x2000 /* subdevice uses software calibration */
#define SDF_CMD_WRITE 0x4000 /* can do output commands */
@@ -217,30 +207,94 @@
#define SDF_RUNNING 0x08000000 /* subdevice is acquiring data */
#define SDF_LSAMPL 0x10000000 /* subdevice uses 32-bit samples */
#define SDF_PACKED 0x20000000 /* subdevice can do packed DIO */
-/* re recycle these flags for PWM */
-#define SDF_PWM_COUNTER SDF_MODE0 /* PWM can automatically switch off */
-#define SDF_PWM_HBRIDGE SDF_MODE1 /* PWM is signed (H-bridge) */
/* subdevice types */
+/**
+ * enum comedi_subdevice_type - COMEDI subdevice types
+ * @COMEDI_SUBD_UNUSED: Unused subdevice.
+ * @COMEDI_SUBD_AI: Analog input.
+ * @COMEDI_SUBD_AO: Analog output.
+ * @COMEDI_SUBD_DI: Digital input.
+ * @COMEDI_SUBD_DO: Digital output.
+ * @COMEDI_SUBD_DIO: Digital input/output.
+ * @COMEDI_SUBD_COUNTER: Counter.
+ * @COMEDI_SUBD_TIMER: Timer.
+ * @COMEDI_SUBD_MEMORY: Memory, EEPROM, DPRAM.
+ * @COMEDI_SUBD_CALIB: Calibration DACs.
+ * @COMEDI_SUBD_PROC: Processor, DSP.
+ * @COMEDI_SUBD_SERIAL: Serial I/O.
+ * @COMEDI_SUBD_PWM: Pulse-Width Modulation output.
+ */
enum comedi_subdevice_type {
- COMEDI_SUBD_UNUSED, /* unused by driver */
- COMEDI_SUBD_AI, /* analog input */
- COMEDI_SUBD_AO, /* analog output */
- COMEDI_SUBD_DI, /* digital input */
- COMEDI_SUBD_DO, /* digital output */
- COMEDI_SUBD_DIO, /* digital input/output */
- COMEDI_SUBD_COUNTER, /* counter */
- COMEDI_SUBD_TIMER, /* timer */
- COMEDI_SUBD_MEMORY, /* memory, EEPROM, DPRAM */
- COMEDI_SUBD_CALIB, /* calibration DACs */
- COMEDI_SUBD_PROC, /* processor, DSP */
- COMEDI_SUBD_SERIAL, /* serial IO */
- COMEDI_SUBD_PWM /* PWM */
+ COMEDI_SUBD_UNUSED,
+ COMEDI_SUBD_AI,
+ COMEDI_SUBD_AO,
+ COMEDI_SUBD_DI,
+ COMEDI_SUBD_DO,
+ COMEDI_SUBD_DIO,
+ COMEDI_SUBD_COUNTER,
+ COMEDI_SUBD_TIMER,
+ COMEDI_SUBD_MEMORY,
+ COMEDI_SUBD_CALIB,
+ COMEDI_SUBD_PROC,
+ COMEDI_SUBD_SERIAL,
+ COMEDI_SUBD_PWM
};
/* configuration instructions */
+/**
+ * enum configuration_ids - COMEDI configuration instruction codes
+ * @INSN_CONFIG_DIO_INPUT: Configure digital I/O as input.
+ * @INSN_CONFIG_DIO_OUTPUT: Configure digital I/O as output.
+ * @INSN_CONFIG_DIO_OPENDRAIN: Configure digital I/O as open-drain (or open
+ * collector) output.
+ * @INSN_CONFIG_ANALOG_TRIG: Configure analog trigger.
+ * @INSN_CONFIG_ALT_SOURCE: Configure alternate input source.
+ * @INSN_CONFIG_DIGITAL_TRIG: Configure digital trigger.
+ * @INSN_CONFIG_BLOCK_SIZE: Configure block size for DMA transfers.
+ * @INSN_CONFIG_TIMER_1: Configure divisor for external clock.
+ * @INSN_CONFIG_FILTER: Configure a filter.
+ * @INSN_CONFIG_CHANGE_NOTIFY: Configure change notification for digital
+ * inputs. (New drivers should use
+ * %INSN_CONFIG_DIGITAL_TRIG instead.)
+ * @INSN_CONFIG_SERIAL_CLOCK: Configure clock for serial I/O.
+ * @INSN_CONFIG_BIDIRECTIONAL_DATA: Send and receive byte over serial I/O.
+ * @INSN_CONFIG_DIO_QUERY: Query direction of digital I/O channel.
+ * @INSN_CONFIG_PWM_OUTPUT: Configure pulse-width modulator output.
+ * @INSN_CONFIG_GET_PWM_OUTPUT: Get pulse-width modulator output configuration.
+ * @INSN_CONFIG_ARM: Arm a subdevice or channel.
+ * @INSN_CONFIG_DISARM: Disarm a subdevice or channel.
+ * @INSN_CONFIG_GET_COUNTER_STATUS: Get counter status.
+ * @INSN_CONFIG_RESET: Reset a subdevice or channel.
+ * @INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR: Configure counter/timer as
+ * single pulse generator.
+ * @INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR: Configure counter/timer as
+ * pulse train generator.
+ * @INSN_CONFIG_GPCT_QUADRATURE_ENCODER: Configure counter as a quadrature
+ * encoder.
+ * @INSN_CONFIG_SET_GATE_SRC: Set counter/timer gate source.
+ * @INSN_CONFIG_GET_GATE_SRC: Get counter/timer gate source.
+ * @INSN_CONFIG_SET_CLOCK_SRC: Set counter/timer master clock source.
+ * @INSN_CONFIG_GET_CLOCK_SRC: Get counter/timer master clock source.
+ * @INSN_CONFIG_SET_OTHER_SRC: Set counter/timer "other" source.
+ * @INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE: Get size (in bytes) of subdevice's
+ * on-board FIFOs used during streaming
+ * input/output.
+ * @INSN_CONFIG_SET_COUNTER_MODE: Set counter/timer mode.
+ * @INSN_CONFIG_8254_SET_MODE: (Deprecated) Same as
+ * %INSN_CONFIG_SET_COUNTER_MODE.
+ * @INSN_CONFIG_8254_READ_STATUS: Read status of 8254 counter channel.
+ * @INSN_CONFIG_SET_ROUTING: Set routing for a channel.
+ * @INSN_CONFIG_GET_ROUTING: Get routing for a channel.
+ * @INSN_CONFIG_PWM_SET_PERIOD: Set PWM period in nanoseconds.
+ * @INSN_CONFIG_PWM_GET_PERIOD: Get PWM period in nanoseconds.
+ * @INSN_CONFIG_GET_PWM_STATUS: Get PWM status.
+ * @INSN_CONFIG_PWM_SET_H_BRIDGE: Set PWM H bridge duty cycle and polarity for
+ * a relay simultaneously.
+ * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
+ */
enum configuration_ids {
INSN_CONFIG_DIO_INPUT = 0,
INSN_CONFIG_DIO_OUTPUT = 1,
@@ -265,72 +319,76 @@ enum configuration_ids {
INSN_CONFIG_DISARM = 32,
INSN_CONFIG_GET_COUNTER_STATUS = 33,
INSN_CONFIG_RESET = 34,
- /* Use CTR as single pulsegenerator */
INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR = 1001,
- /* Use CTR as pulsetraingenerator */
INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR = 1002,
- /* Use the counter as encoder */
INSN_CONFIG_GPCT_QUADRATURE_ENCODER = 1003,
- INSN_CONFIG_SET_GATE_SRC = 2001, /* Set gate source */
- INSN_CONFIG_GET_GATE_SRC = 2002, /* Get gate source */
- /* Set master clock source */
+ INSN_CONFIG_SET_GATE_SRC = 2001,
+ INSN_CONFIG_GET_GATE_SRC = 2002,
INSN_CONFIG_SET_CLOCK_SRC = 2003,
- INSN_CONFIG_GET_CLOCK_SRC = 2004, /* Get master clock source */
- INSN_CONFIG_SET_OTHER_SRC = 2005, /* Set other source */
- /* INSN_CONFIG_GET_OTHER_SRC = 2006,*//* Get other source */
- /* Get size in bytes of subdevice's on-board fifos used during
- * streaming input/output
- */
+ INSN_CONFIG_GET_CLOCK_SRC = 2004,
+ INSN_CONFIG_SET_OTHER_SRC = 2005,
INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE = 2006,
INSN_CONFIG_SET_COUNTER_MODE = 4097,
- /* INSN_CONFIG_8254_SET_MODE is deprecated */
INSN_CONFIG_8254_SET_MODE = INSN_CONFIG_SET_COUNTER_MODE,
INSN_CONFIG_8254_READ_STATUS = 4098,
INSN_CONFIG_SET_ROUTING = 4099,
INSN_CONFIG_GET_ROUTING = 4109,
- /* PWM */
- INSN_CONFIG_PWM_SET_PERIOD = 5000, /* sets frequency */
- INSN_CONFIG_PWM_GET_PERIOD = 5001, /* gets frequency */
- INSN_CONFIG_GET_PWM_STATUS = 5002, /* is it running? */
- /* sets H bridge: duty cycle and sign bit for a relay at the
- * same time
- */
+ INSN_CONFIG_PWM_SET_PERIOD = 5000,
+ INSN_CONFIG_PWM_GET_PERIOD = 5001,
+ INSN_CONFIG_GET_PWM_STATUS = 5002,
INSN_CONFIG_PWM_SET_H_BRIDGE = 5003,
- /* gets H bridge data: duty cycle and the sign bit */
INSN_CONFIG_PWM_GET_H_BRIDGE = 5004
};
-/*
- * Settings for INSN_CONFIG_DIGITAL_TRIG:
- * data[0] = INSN_CONFIG_DIGITAL_TRIG
- * data[1] = trigger ID
- * data[2] = configuration operation
- * data[3] = configuration parameter 1
- * data[4] = configuration parameter 2
- * data[5] = configuration parameter 3
+/**
+ * enum comedi_digital_trig_op - operations for configuring a digital trigger
+ * @COMEDI_DIGITAL_TRIG_DISABLE: Return digital trigger to its default,
+ * inactive, unconfigured state.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_EDGES: Set rising and/or falling edge inputs
+ * that each can fire the trigger.
+ * @COMEDI_DIGITAL_TRIG_ENABLE_LEVELS: Set a combination of high and/or low
+ * level inputs that can fire the trigger.
+ *
+ * These are used with the %INSN_CONFIG_DIGITAL_TRIG configuration instruction.
+ * The data for the configuration instruction is as follows...
*
- * operation parameter 1 parameter 2 parameter 3
- * --------------------------------- ----------- ----------- -----------
- * COMEDI_DIGITAL_TRIG_DISABLE
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES left-shift rising-edges falling-edges
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS left-shift high-levels low-levels
+ * data[%0] = %INSN_CONFIG_DIGITAL_TRIG
*
- * COMEDI_DIGITAL_TRIG_DISABLE returns the trigger to its default, inactive,
- * unconfigured state.
+ * data[%1] = trigger ID
*
- * COMEDI_DIGITAL_TRIG_ENABLE_EDGES sets the rising and/or falling edge inputs
- * that each can fire the trigger.
+ * data[%2] = configuration operation
*
- * COMEDI_DIGITAL_TRIG_ENABLE_LEVELS sets a combination of high and/or low
- * level inputs that can fire the trigger.
+ * data[%3] = configuration parameter 1
*
- * "left-shift" is useful if the trigger has more than 32 inputs to specify the
- * first input for this configuration.
+ * data[%4] = configuration parameter 2
*
- * Some sequences of INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
+ * data[%5] = configuration parameter 3
+ *
+ * The trigger ID (data[%1]) is used to differentiate multiple digital triggers
+ * belonging to the same subdevice. The configuration operation (data[%2]) is
+ * one of the enum comedi_digital_trig_op values. The configuration
+ * parameters (data[%3], data[%4], and data[%5]) depend on the operation; they
+ * are not used with %COMEDI_DIGITAL_TRIG_DISABLE.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES and %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS,
+ * configuration parameter 1 (data[%3]) contains a "left-shift" value that
+ * specifies the input corresponding to bit 0 of configuration parameters 2
+ * and 3. This is useful if the trigger has more than 32 inputs.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_EDGES, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs have rising-edge sensitivity, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * have falling-edge sensitivity that can fire the trigger.
+ *
+ * For %COMEDI_DIGITAL_TRIG_ENABLE_LEVELS, configuration parameter 2 (data[%4])
+ * specifies which of up to 32 inputs must be at a high level, and
+ * configuration parameter 3 (data[%5]) specifies which of up to 32 inputs
+ * must be at a low level for the trigger to fire.
+ *
+ * Some sequences of %INSN_CONFIG_DIGITAL_TRIG instructions may have a (partly)
* accumulative effect, depending on the low-level driver. This is useful
- * when setting up a trigger that has more than 32 inputs or has a combination
- * of edge and level triggered inputs.
+ * when setting up a trigger that has more than 32 inputs, or has a combination
+ * of edge- and level-triggered inputs.
*/
enum comedi_digital_trig_op {
COMEDI_DIGITAL_TRIG_DISABLE = 0,
@@ -338,18 +396,49 @@ enum comedi_digital_trig_op {
COMEDI_DIGITAL_TRIG_ENABLE_LEVELS = 2
};
+/**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT: Input.
+ * @COMEDI_OUTPUT: Output.
+ * @COMEDI_OPENDRAIN: Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction. They may also be used in other places where a direction
+ * needs to be specified.
+ */
enum comedi_io_direction {
COMEDI_INPUT = 0,
COMEDI_OUTPUT = 1,
COMEDI_OPENDRAIN = 2
};
+/**
+ * enum comedi_support_level - support level for a COMEDI feature
+ * @COMEDI_UNKNOWN_SUPPORT: Unspecified support for feature.
+ * @COMEDI_SUPPORTED: Feature is supported.
+ * @COMEDI_UNSUPPORTED: Feature is unsupported.
+ */
enum comedi_support_level {
COMEDI_UNKNOWN_SUPPORT = 0,
COMEDI_SUPPORTED,
COMEDI_UNSUPPORTED
};
+/**
+ * enum comedi_counter_status_flags - counter status bits
+ * @COMEDI_COUNTER_ARMED: Counter is armed.
+ * @COMEDI_COUNTER_COUNTING: Counter is counting.
+ * @COMEDI_COUNTER_TERMINAL_COUNT: Counter reached terminal count.
+ *
+ * These bitwise values are used by the %INSN_CONFIG_GET_COUNTER_STATUS
+ * configuration instruction to report the status of a counter.
+ */
+enum comedi_counter_status_flags {
+ COMEDI_COUNTER_ARMED = 0x1,
+ COMEDI_COUNTER_COUNTING = 0x2,
+ COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
+};
+
/* ioctls */
#define CIO 'd'
@@ -357,7 +446,7 @@ enum comedi_support_level {
#define COMEDI_DEVINFO _IOR(CIO, 1, struct comedi_devinfo)
#define COMEDI_SUBDINFO _IOR(CIO, 2, struct comedi_subdinfo)
#define COMEDI_CHANINFO _IOR(CIO, 3, struct comedi_chaninfo)
-#define COMEDI_TRIG _IOWR(CIO, 4, comedi_trig)
+/* _IOWR(CIO, 4, ...) is reserved */
#define COMEDI_LOCK _IO(CIO, 5)
#define COMEDI_UNLOCK _IO(CIO, 6)
#define COMEDI_CANCEL _IO(CIO, 7)
@@ -374,21 +463,19 @@ enum comedi_support_level {
/* structures */
-struct comedi_trig {
- unsigned int subdev; /* subdevice */
- unsigned int mode; /* mode */
- unsigned int flags;
- unsigned int n_chan; /* number of channels */
- unsigned int *chanlist; /* channel/range list */
- short *data; /* data list, size depends on subd flags */
- unsigned int n; /* number of scans */
- unsigned int trigsrc;
- unsigned int trigvar;
- unsigned int trigvar1;
- unsigned int data_len;
- unsigned int unused[3];
-};
-
+/**
+ * struct comedi_insn - COMEDI instruction
+ * @insn: COMEDI instruction type (%INSN_xxx).
+ * @n: Length of @data[].
+ * @data: Pointer to data array operated on by the instruction.
+ * @subdev: Subdevice index.
+ * @chanspec: A packed "chanspec" value consisting of channel number,
+ * analog range index, analog reference type, and flags.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_INSN ioctl, and indirectly with the
+ * %COMEDI_INSNLIST ioctl.
+ */
struct comedi_insn {
unsigned int insn;
unsigned int n;
@@ -398,11 +485,95 @@ struct comedi_insn {
unsigned int unused[3];
};
+/**
+ * struct comedi_insnlist - list of COMEDI instructions
+ * @n_insns: Number of COMEDI instructions.
+ * @insns: Pointer to array COMEDI instructions.
+ *
+ * This is used with the %COMEDI_INSNLIST ioctl.
+ */
struct comedi_insnlist {
unsigned int n_insns;
struct comedi_insn __user *insns;
};
+/**
+ * struct comedi_cmd - COMEDI asynchronous acquisition command details
+ * @subdev: Subdevice index.
+ * @flags: Command flags (%CMDF_xxx).
+ * @start_src: "Start acquisition" trigger source (%TRIG_xxx).
+ * @start_arg: "Start acquisition" trigger argument.
+ * @scan_begin_src: "Scan begin" trigger source.
+ * @scan_begin_arg: "Scan begin" trigger argument.
+ * @convert_src: "Convert" trigger source.
+ * @convert_arg: "Convert" trigger argument.
+ * @scan_end_src: "Scan end" trigger source.
+ * @scan_end_arg: "Scan end" trigger argument.
+ * @stop_src: "Stop acquisition" trigger source.
+ * @stop_arg: "Stop acquisition" trigger argument.
+ * @chanlist: Pointer to array of "chanspec" values, containing a
+ * sequence of channel numbers packed with analog range
+ * index, etc.
+ * @chanlist_len: Number of channels in sequence.
+ * @data: Pointer to miscellaneous set-up data (not used).
+ * @data_len: Length of miscellaneous set-up data.
+ *
+ * This is used with the %COMEDI_CMD or %COMEDI_CMDTEST ioctl to set-up
+ * or validate an asynchronous acquisition command. The ioctl may modify
+ * the &struct comedi_cmd and copy it back to the caller.
+ *
+ * Optional command @flags values that can be ORed together...
+ *
+ * %CMDF_BOGUS - makes %COMEDI_CMD ioctl return error %EAGAIN instead of
+ * starting the command.
+ *
+ * %CMDF_PRIORITY - requests "hard real-time" processing (which is not
+ * supported in this version of COMEDI).
+ *
+ * %CMDF_WAKE_EOS - requests the command makes data available for reading
+ * after every "scan" period.
+ *
+ * %CMDF_WRITE - marks the command as being in the "write" (to device)
+ * direction. This does not need to be specified by the caller unless the
+ * subdevice supports commands in either direction.
+ *
+ * %CMDF_RAWDATA - prevents the command from "munging" the data between the
+ * COMEDI sample format and the raw hardware sample format.
+ *
+ * %CMDF_ROUND_NEAREST - requests timing periods to be rounded to nearest
+ * supported values.
+ *
+ * %CMDF_ROUND_DOWN - requests timing periods to be rounded down to supported
+ * values (frequencies rounded up).
+ *
+ * %CMDF_ROUND_UP - requests timing periods to be rounded up to supported
+ * values (frequencies rounded down).
+ *
+ * Trigger source values for @start_src, @scan_begin_src, @convert_src,
+ * @scan_end_src, and @stop_src...
+ *
+ * %TRIG_ANY - "all ones" value used to test which trigger sources are
+ * supported.
+ *
+ * %TRIG_INVALID - "all zeroes" value used to indicate that all requested
+ * trigger sources are invalid.
+ *
+ * %TRIG_NONE - never trigger (often used as a @stop_src value).
+ *
+ * %TRIG_NOW - trigger after '_arg' nanoseconds.
+ *
+ * %TRIG_FOLLOW - trigger follows another event.
+ *
+ * %TRIG_TIMER - trigger every '_arg' nanoseconds.
+ *
+ * %TRIG_COUNT - trigger when count '_arg' is reached.
+ *
+ * %TRIG_EXT - trigger on external signal specified by '_arg'.
+ *
+ * %TRIG_INT - trigger on internal, software trigger specified by '_arg'.
+ *
+ * %TRIG_OTHER - trigger on other, driver-defined signal specified by '_arg'.
+ */
struct comedi_cmd {
unsigned int subdev;
unsigned int flags;
@@ -422,13 +593,31 @@ struct comedi_cmd {
unsigned int stop_src;
unsigned int stop_arg;
- unsigned int *chanlist; /* channel/range list */
+ unsigned int *chanlist;
unsigned int chanlist_len;
- short __user *data; /* data list, size depends on subd flags */
+ short __user *data;
unsigned int data_len;
};
+/**
+ * struct comedi_chaninfo - used to retrieve per-channel information
+ * @subdev: Subdevice index.
+ * @maxdata_list: Optional pointer to per-channel maximum data values.
+ * @flaglist: Optional pointer to per-channel flags.
+ * @rangelist: Optional pointer to per-channel range types.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_CHANINFO ioctl to get per-channel information
+ * for the subdevice. Use of this requires knowledge of the number of channels
+ * and subdevice flags obtained using the %COMEDI_SUBDINFO ioctl.
+ *
+ * The @maxdata_list member must be %NULL unless the %SDF_MAXDATA subdevice
+ * flag is set. The @flaglist member must be %NULL unless the %SDF_FLAGS
+ * subdevice flag is set. The @rangelist member must be %NULL unless the
+ * %SDF_RANGETYPE subdevice flag is set. Otherwise, the arrays they point to
+ * must be at least as long as the number of channels.
+ */
struct comedi_chaninfo {
unsigned int subdev;
unsigned int __user *maxdata_list;
@@ -437,17 +626,149 @@ struct comedi_chaninfo {
unsigned int unused[4];
};
+/**
+ * struct comedi_rangeinfo - used to retrieve the range table for a channel
+ * @range_type: Encodes subdevice index (bits 27:24), channel index
+ * (bits 23:16) and range table length (bits 15:0).
+ * @range_ptr: Pointer to array of @struct comedi_krange to be filled
+ * in with the range table for the channel or subdevice.
+ *
+ * This is used with the %COMEDI_RANGEINFO ioctl to retrieve the range table
+ * for a specific channel (if the subdevice has the %SDF_RANGETYPE flag set to
+ * indicate that the range table depends on the channel), or for the subdevice
+ * as a whole (if the %SDF_RANGETYPE flag is clear, indicating the range table
+ * is shared by all channels).
+ *
+ * The @range_type value is an input to the ioctl and comes from a previous
+ * use of the %COMEDI_SUBDINFO ioctl (if the %SDF_RANGETYPE flag is clear),
+ * or the %COMEDI_CHANINFO ioctl (if the %SDF_RANGETYPE flag is set).
+ */
struct comedi_rangeinfo {
unsigned int range_type;
void __user *range_ptr;
};
+/**
+ * struct comedi_krange - describes a range in a range table
+ * @min: Minimum value in millionths (1e-6) of a unit.
+ * @max: Maximum value in millionths (1e-6) of a unit.
+ * @flags: Indicates the units (in bits 7:0) OR'ed with optional flags.
+ *
+ * A range table is associated with a single channel, or with all channels in a
+ * subdevice, and a list of one or more ranges. A %struct comedi_krange
+ * describes the physical range of units for one of those ranges. Sample
+ * values in COMEDI are unsigned from %0 up to some 'maxdata' value. The
+ * mapping from sample values to physical units is assumed to be nomimally
+ * linear (for the purpose of describing the range), with sample value %0
+ * mapping to @min, and the 'maxdata' sample value mapping to @max.
+ *
+ * The currently defined units are %UNIT_volt (%0), %UNIT_mA (%1), and
+ * %UNIT_none (%2). The @min and @max values are the physical range multiplied
+ * by 1e6, so a @max value of %1000000 (with %UNIT_volt) represents a maximal
+ * value of 1 volt.
+ *
+ * The only defined flag value is %RF_EXTERNAL (%0x100), indicating that the
+ * the range needs to be multiplied by an external reference.
+ */
struct comedi_krange {
- int min; /* fixed point, multiply by 1e-6 */
- int max; /* fixed point, multiply by 1e-6 */
+ int min;
+ int max;
unsigned int flags;
};
+/**
+ * struct comedi_subdinfo - used to retrieve information about a subdevice
+ * @type: Type of subdevice from &enum comedi_subdevice_type.
+ * @n_chan: Number of channels the subdevice supports.
+ * @subd_flags: A mixture of static and dynamic flags describing
+ * aspects of the subdevice and its current state.
+ * @timer_type: Timer type. Always set to %5 ("nanosecond timer").
+ * @len_chanlist: Maximum length of a channel list if the subdevice
+ * supports asynchronous acquisition commands.
+ * @maxdata: Maximum sample value for all channels if the
+ * %SDF_MAXDATA subdevice flag is clear.
+ * @flags: Channel flags for all channels if the %SDF_FLAGS
+ * subdevice flag is clear.
+ * @range_type: The range type for all channels if the %SDF_RANGETYPE
+ * subdevice flag is clear. Encodes the subdevice index
+ * (bits 27:24), a dummy channel index %0 (bits 23:16),
+ * and the range table length (bits 15:0).
+ * @settling_time_0: Not used.
+ * @insn_bits_support: Set to %COMEDI_SUPPORTED if the subdevice supports the
+ * %INSN_BITS instruction, or to %COMEDI_UNSUPPORTED if it
+ * does not.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_SUBDINFO ioctl which copies an array of
+ * &struct comedi_subdinfo back to user space, with one element per subdevice.
+ * Use of this requires knowledge of the number of subdevices obtained from
+ * the %COMEDI_DEVINFO ioctl.
+ *
+ * These are the @subd_flags values that may be ORed together...
+ *
+ * %SDF_BUSY - the subdevice is busy processing an asynchronous command or a
+ * synchronous instruction.
+ *
+ * %SDF_BUSY_OWNER - the subdevice is busy processing an asynchronous
+ * acquisition command started on the current file object (the file object
+ * issuing the %COMEDI_SUBDINFO ioctl).
+ *
+ * %SDF_LOCKED - the subdevice is locked by a %COMEDI_LOCK ioctl.
+ *
+ * %SDF_LOCK_OWNER - the subdevice is locked by a %COMEDI_LOCK ioctl from the
+ * current file object.
+ *
+ * %SDF_MAXDATA - maximum sample values are channel-specific.
+ *
+ * %SDF_FLAGS - channel flags are channel-specific.
+ *
+ * %SDF_RANGETYPE - range types are channel-specific.
+ *
+ * %SDF_PWM_COUNTER - PWM can switch off automatically.
+ *
+ * %SDF_PWM_HBRIDGE - or PWM is signed (H-bridge).
+ *
+ * %SDF_CMD - the subdevice supports asynchronous commands.
+ *
+ * %SDF_SOFT_CALIBRATED - the subdevice uses software calibration.
+ *
+ * %SDF_CMD_WRITE - the subdevice supports asynchronous commands in the output
+ * ("write") direction.
+ *
+ * %SDF_CMD_READ - the subdevice supports asynchronous commands in the input
+ * ("read") direction.
+ *
+ * %SDF_READABLE - the subdevice is readable (e.g. analog input).
+ *
+ * %SDF_WRITABLE (aliased as %SDF_WRITEABLE) - the subdevice is writable (e.g.
+ * analog output).
+ *
+ * %SDF_INTERNAL - the subdevice has no externally visible lines.
+ *
+ * %SDF_GROUND - the subdevice can use ground as an analog reference.
+ *
+ * %SDF_COMMON - the subdevice can use a common analog reference.
+ *
+ * %SDF_DIFF - the subdevice can use differential inputs (or outputs).
+ *
+ * %SDF_OTHER - the subdevice can use some other analog reference.
+ *
+ * %SDF_DITHER - the subdevice can do dithering.
+ *
+ * %SDF_DEGLITCH - the subdevice can do deglitching.
+ *
+ * %SDF_MMAP - this is never set.
+ *
+ * %SDF_RUNNING - an asynchronous command is still running.
+ *
+ * %SDF_LSAMPL - the subdevice uses "long" (32-bit) samples (for asynchronous
+ * command data).
+ *
+ * %SDF_PACKED - the subdevice packs several DIO samples into a single sample
+ * (for asynchronous command data).
+ *
+ * No "channel flags" (@flags) values are currently defined.
+ */
struct comedi_subdinfo {
unsigned int type;
unsigned int n_chan;
@@ -455,14 +776,26 @@ struct comedi_subdinfo {
unsigned int timer_type;
unsigned int len_chanlist;
unsigned int maxdata;
- unsigned int flags; /* channel flags */
- unsigned int range_type; /* lookup in kernel */
+ unsigned int flags;
+ unsigned int range_type;
unsigned int settling_time_0;
- /* see support_level enum for values */
unsigned insn_bits_support;
unsigned int unused[8];
};
+/**
+ * struct comedi_devinfo - used to retrieve information about a COMEDI device
+ * @version_code: COMEDI version code.
+ * @n_subdevs: Number of subdevices the device has.
+ * @driver_name: Null-terminated COMEDI driver name.
+ * @board_name: Null-terminated COMEDI board name.
+ * @read_subdevice: Index of the current "read" subdevice (%-1 if none).
+ * @write_subdevice: Index of the current "write" subdevice (%-1 if none).
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_DEVINFO ioctl to get basic information about
+ * the device.
+ */
struct comedi_devinfo {
unsigned int version_code;
unsigned int n_subdevs;
@@ -473,11 +806,45 @@ struct comedi_devinfo {
int unused[30];
};
+/**
+ * struct comedi_devconfig - used to configure a legacy COMEDI device
+ * @board_name: Null-terminated string specifying the type of board
+ * to configure.
+ * @options: An array of integer configuration options.
+ *
+ * This is used with the %COMEDI_DEVCONFIG ioctl to configure a "legacy" COMEDI
+ * device, such as an ISA card. Not all COMEDI drivers support this. Those
+ * that do either expect the specified board name to match one of a list of
+ * names registered with the COMEDI core, or expect the specified board name
+ * to match the COMEDI driver name itself. The configuration options are
+ * handled in a driver-specific manner.
+ */
struct comedi_devconfig {
char board_name[COMEDI_NAMELEN];
int options[COMEDI_NDEVCONFOPTS];
};
+/**
+ * struct comedi_bufconfig - used to set or get buffer size for a subdevice
+ * @subdevice: Subdevice index.
+ * @flags: Not used.
+ * @maximum_size: Maximum allowed buffer size.
+ * @size: Buffer size.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFCONFIG ioctl to get or configure the
+ * maximum buffer size and current buffer size for a COMEDI subdevice that
+ * supports asynchronous commands. If the subdevice does not support
+ * asynchronous commands, @maximum_size and @size are ignored and set to 0.
+ *
+ * On ioctl input, non-zero values of @maximum_size and @size specify a
+ * new maximum size and new current size (in bytes), respectively. These
+ * will by rounded up to a multiple of %PAGE_SIZE. Specifying a new maximum
+ * size requires admin capabilities.
+ *
+ * On ioctl output, @maximum_size and @size and set to the current maximum
+ * buffer size and current buffer size, respectively.
+ */
struct comedi_bufconfig {
unsigned int subdevice;
unsigned int flags;
@@ -488,6 +855,23 @@ struct comedi_bufconfig {
unsigned int unused[4];
};
+/**
+ * struct comedi_bufinfo - used to manipulate buffer position for a subdevice
+ * @subdevice: Subdevice index.
+ * @bytes_read: Specify amount to advance read position for an
+ * asynchronous command in the input ("read") direction.
+ * @buf_write_ptr: Current write position (index) within the buffer.
+ * @buf_read_ptr: Current read position (index) within the buffer.
+ * @buf_write_count: Total amount written, modulo 2^32.
+ * @buf_read_count: Total amount read, modulo 2^32.
+ * @bytes_written: Specify amount to advance write position for an
+ * asynchronous command in the output ("write") direction.
+ * @unused: Reserved for future use.
+ *
+ * This is used with the %COMEDI_BUFINFO ioctl to optionally advance the
+ * current read or write position in an asynchronous acquisition data buffer,
+ * and to get the current read and write positions in the buffer.
+ */
struct comedi_bufinfo {
unsigned int subdevice;
unsigned int bytes_read;
@@ -510,13 +894,13 @@ struct comedi_bufinfo {
#define RANGE_LENGTH(b) ((b) & 0xffff)
#define RF_UNIT(flags) ((flags) & 0xff)
-#define RF_EXTERNAL (1 << 8)
+#define RF_EXTERNAL 0x100
#define UNIT_volt 0
#define UNIT_mA 1
#define UNIT_none 2
-#define COMEDI_MIN_SPEED ((unsigned int)0xffffffff)
+#define COMEDI_MIN_SPEED 0xffffffffu
/**********************************************************/
/* everything after this line is ALPHA */
@@ -849,13 +1233,6 @@ enum ni_660x_pfi_routing {
#define NI_EXT_PFI(x) (NI_USUAL_PFI_SELECT(x) - 1)
#define NI_EXT_RTSI(x) (NI_USUAL_RTSI_SELECT(x) - 1)
-/* status bits for INSN_CONFIG_GET_COUNTER_STATUS */
-enum comedi_counter_status_flags {
- COMEDI_COUNTER_ARMED = 0x1,
- COMEDI_COUNTER_COUNTING = 0x2,
- COMEDI_COUNTER_TERMINAL_COUNT = 0x4,
-};
-
/*
* Clock sources for CDIO subdevice on NI m-series boards. Used as the
* scan_begin_arg for a comedi_command. These sources may also be bitwise-or'd
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d57fadef47fc..7c7b477b0f28 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -686,13 +686,6 @@ static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
return comedi_is_runflags_running(runflags);
}
-static bool comedi_is_subdevice_idle(struct comedi_subdevice *s)
-{
- unsigned runflags = comedi_get_subdevice_runflags(s);
-
- return !(runflags & COMEDI_SRF_BUSY_MASK);
-}
-
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
unsigned runflags = __comedi_get_subdevice_runflags(s);
@@ -1111,6 +1104,9 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
struct comedi_bufinfo bi;
struct comedi_subdevice *s;
struct comedi_async *async;
+ unsigned int runflags;
+ int retval = 0;
+ bool become_nonbusy = false;
if (copy_from_user(&bi, arg, sizeof(bi)))
return -EFAULT;
@@ -1122,48 +1118,56 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
async = s->async;
- if (!async) {
- dev_dbg(dev->class_dev,
- "subdevice does not have async capability\n");
- bi.buf_write_ptr = 0;
- bi.buf_read_ptr = 0;
- bi.buf_write_count = 0;
- bi.buf_read_count = 0;
- bi.bytes_read = 0;
- bi.bytes_written = 0;
- goto copyback;
- }
- if (!s->busy) {
- bi.bytes_read = 0;
- bi.bytes_written = 0;
- goto copyback_position;
- }
- if (s->busy != file)
- return -EACCES;
-
- if (bi.bytes_read && !(async->cmd.flags & CMDF_WRITE)) {
- bi.bytes_read = comedi_buf_read_alloc(s, bi.bytes_read);
- comedi_buf_read_free(s, bi.bytes_read);
+ if (!async || s->busy != file)
+ return -EINVAL;
- if (comedi_is_subdevice_idle(s) &&
- comedi_buf_read_n_available(s) == 0) {
- do_become_nonbusy(dev, s);
+ runflags = comedi_get_subdevice_runflags(s);
+ if (!(async->cmd.flags & CMDF_WRITE)) {
+ /* command was set up in "read" direction */
+ if (bi.bytes_read) {
+ comedi_buf_read_alloc(s, bi.bytes_read);
+ bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read);
}
+ /*
+ * If nothing left to read, and command has stopped, and
+ * {"read" position not updated or command stopped normally},
+ * then become non-busy.
+ */
+ if (comedi_buf_read_n_available(s) == 0 &&
+ !comedi_is_runflags_running(runflags) &&
+ (bi.bytes_read == 0 ||
+ !comedi_is_runflags_in_error(runflags))) {
+ become_nonbusy = true;
+ if (comedi_is_runflags_in_error(runflags))
+ retval = -EPIPE;
+ }
+ bi.bytes_written = 0;
+ } else {
+ /* command was set up in "write" direction */
+ if (!comedi_is_runflags_running(runflags)) {
+ bi.bytes_written = 0;
+ become_nonbusy = true;
+ if (comedi_is_runflags_in_error(runflags))
+ retval = -EPIPE;
+ } else if (bi.bytes_written) {
+ comedi_buf_write_alloc(s, bi.bytes_written);
+ bi.bytes_written =
+ comedi_buf_write_free(s, bi.bytes_written);
+ }
+ bi.bytes_read = 0;
}
- if (bi.bytes_written && (async->cmd.flags & CMDF_WRITE)) {
- bi.bytes_written =
- comedi_buf_write_alloc(s, bi.bytes_written);
- comedi_buf_write_free(s, bi.bytes_written);
- }
-
-copyback_position:
bi.buf_write_count = async->buf_write_count;
bi.buf_write_ptr = async->buf_write_ptr;
bi.buf_read_count = async->buf_read_count;
bi.buf_read_ptr = async->buf_read_ptr;
-copyback:
+ if (become_nonbusy)
+ do_become_nonbusy(dev, s);
+
+ if (retval)
+ return retval;
+
if (copy_to_user(arg, &bi, sizeof(bi)))
return -EFAULT;
@@ -2220,7 +2224,7 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
retval = -EFAULT;
goto done;
}
- if (size & (~PAGE_MASK)) {
+ if (offset_in_page(size)) {
retval = -EFAULT;
goto done;
}
diff --git a/drivers/staging/comedi/comedi_pcmcia.h b/drivers/staging/comedi/comedi_pcmcia.h
index 5d3db2b9b4a1..5a572c200a8b 100644
--- a/drivers/staging/comedi/comedi_pcmcia.h
+++ b/drivers/staging/comedi/comedi_pcmcia.h
@@ -39,7 +39,8 @@ void comedi_pcmcia_driver_unregister(struct comedi_driver *,
struct pcmcia_driver *);
/**
- * module_comedi_pcmcia_driver() - Helper macro for registering a comedi PCMCIA driver
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
* @__comedi_driver: comedi_driver struct
* @__pcmcia_driver: pcmcia_driver struct
*
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 995096c78844..b6af3eba91fd 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -496,7 +496,7 @@ static int apci3xxx_ai_ns_to_timer(struct comedi_device *dev,
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- timer = (*ns + base / 2) / base;
+ timer = DIV_ROUND_CLOSEST(*ns, base);
break;
case CMDF_ROUND_DOWN:
timer = *ns / base;
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 4b39f6960c0a..907c39cc89d7 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -637,12 +637,12 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
switch (flags & CMDF_ROUND_MASK) {
default:
case CMDF_ROUND_NEAREST:
- div += (rem + (timebase / 2)) / timebase;
+ div += DIV_ROUND_CLOSEST(rem, timebase);
break;
case CMDF_ROUND_DOWN:
break;
case CMDF_ROUND_UP:
- div += (rem + timebase - 1) / timebase;
+ div += DIV_ROUND_UP(rem, timebase);
break;
}
return div > UINT_MAX ? UINT_MAX : (unsigned int)div;
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index d33b8fe872a7..c773b8ca6599 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1376,7 +1376,7 @@ static int set_ai_fifo_segment_length(struct comedi_device *dev,
num_entries = fifo->max_segment_length;
/* 1 == 256 entries, 2 == 512 entries, etc */
- num_increments = (num_entries + increment_size / 2) / increment_size;
+ num_increments = DIV_ROUND_CLOSEST(num_entries, increment_size);
bits = (~(num_increments - 1)) & fifo->fifo_size_reg_mask;
devpriv->fifo_size_bits &= ~fifo->fifo_size_reg_mask;
@@ -1480,35 +1480,39 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
/* allocate pci dma buffers */
for (i = 0; i < ai_dma_ring_count(board); i++) {
devpriv->ai_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->ai_buffer_bus_addr[i]);
+ dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
+ &devpriv->ai_buffer_bus_addr[i],
+ GFP_KERNEL);
if (!devpriv->ai_buffer[i])
return -ENOMEM;
}
for (i = 0; i < AO_DMA_RING_COUNT; i++) {
if (ao_cmd_is_supported(board)) {
devpriv->ao_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->
- ao_buffer_bus_addr[i]);
+ dma_alloc_coherent(&pcidev->dev,
+ DMA_BUFFER_SIZE,
+ &devpriv->
+ ao_buffer_bus_addr[i],
+ GFP_KERNEL);
if (!devpriv->ao_buffer[i])
return -ENOMEM;
}
}
/* allocate dma descriptors */
devpriv->ai_dma_desc =
- pci_alloc_consistent(pcidev, sizeof(struct plx_dma_desc) *
- ai_dma_ring_count(board),
- &devpriv->ai_dma_desc_bus_addr);
+ dma_alloc_coherent(&pcidev->dev, sizeof(struct plx_dma_desc) *
+ ai_dma_ring_count(board),
+ &devpriv->ai_dma_desc_bus_addr, GFP_KERNEL);
if (!devpriv->ai_dma_desc)
return -ENOMEM;
if (ao_cmd_is_supported(board)) {
devpriv->ao_dma_desc =
- pci_alloc_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- AO_DMA_RING_COUNT,
- &devpriv->ao_dma_desc_bus_addr);
+ dma_alloc_coherent(&pcidev->dev,
+ sizeof(struct plx_dma_desc) *
+ AO_DMA_RING_COUNT,
+ &devpriv->ao_dma_desc_bus_addr,
+ GFP_KERNEL);
if (!devpriv->ao_dma_desc)
return -ENOMEM;
}
@@ -1564,31 +1568,31 @@ static void cb_pcidas64_free_dma(struct comedi_device *dev)
/* free pci dma buffers */
for (i = 0; i < ai_dma_ring_count(board); i++) {
if (devpriv->ai_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->ai_buffer[i],
- devpriv->ai_buffer_bus_addr[i]);
+ dma_free_coherent(&pcidev->dev,
+ DMA_BUFFER_SIZE,
+ devpriv->ai_buffer[i],
+ devpriv->ai_buffer_bus_addr[i]);
}
for (i = 0; i < AO_DMA_RING_COUNT; i++) {
if (devpriv->ao_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->ao_buffer[i],
- devpriv->ao_buffer_bus_addr[i]);
+ dma_free_coherent(&pcidev->dev,
+ DMA_BUFFER_SIZE,
+ devpriv->ao_buffer[i],
+ devpriv->ao_buffer_bus_addr[i]);
}
/* free dma descriptors */
if (devpriv->ai_dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- ai_dma_ring_count(board),
- devpriv->ai_dma_desc,
- devpriv->ai_dma_desc_bus_addr);
+ dma_free_coherent(&pcidev->dev,
+ sizeof(struct plx_dma_desc) *
+ ai_dma_ring_count(board),
+ devpriv->ai_dma_desc,
+ devpriv->ai_dma_desc_bus_addr);
if (devpriv->ao_dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- AO_DMA_RING_COUNT,
- devpriv->ao_dma_desc,
- devpriv->ao_dma_desc_bus_addr);
+ dma_free_coherent(&pcidev->dev,
+ sizeof(struct plx_dma_desc) *
+ AO_DMA_RING_COUNT,
+ devpriv->ao_dma_desc,
+ devpriv->ao_dma_desc_bus_addr);
}
static inline void warn_external_queue(struct comedi_device *dev)
@@ -2004,7 +2008,7 @@ static unsigned int get_divisor(unsigned int ns, unsigned int flags)
break;
case CMDF_ROUND_NEAREST:
default:
- divisor = (ns + TIMER_BASE / 2) / TIMER_BASE;
+ divisor = DIV_ROUND_CLOSEST(ns, TIMER_BASE);
break;
}
return divisor;
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 19210d89f2b2..84ef45457c60 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -1,77 +1,78 @@
/*
- comedi/drivers/cb_pcimdda.c
- Computer Boards PCIM-DDA06-16 Comedi driver
- Author: Calin Culianu <calin@ajvar.org>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * comedi/drivers/cb_pcimdda.c
+ * Computer Boards PCIM-DDA06-16 Comedi driver
+ * Author: Calin Culianu <calin@ajvar.org>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
-Driver: cb_pcimdda
-Description: Measurement Computing PCIM-DDA06-16
-Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda)
-Author: Calin Culianu <calin@ajvar.org>
-Updated: Mon, 14 Apr 2008 15:15:51 +0100
-Status: works
-
-All features of the PCIM-DDA06-16 board are supported. This board
-has 6 16-bit AO channels, and the usual 8255 DIO setup. (24 channels,
-configurable in banks of 8 and 4, etc.). This board does not support commands.
-
-The board has a peculiar way of specifying AO gain/range settings -- You have
-1 jumper bank on the card, which either makes all 6 AO channels either
-5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar.
-
-Since there is absolutely _no_ way to tell in software how this jumper is set
-(well, at least according to the rather thin spec. from Measurement Computing
- that comes with the board), the driver assumes the jumper is at its factory
-default setting of +/-5V.
-
-Also of note is the fact that this board features another jumper, whose
-state is also completely invisible to software. It toggles two possible AO
-output modes on the board:
-
- - Update Mode: Writing to an AO channel instantaneously updates the actual
- signal output by the DAC on the board (this is the factory default).
- - Simultaneous XFER Mode: Writing to an AO channel has no effect until
- you read from any one of the AO channels. This is useful for loading
- all 6 AO values, and then reading from any one of the AO channels on the
- device to instantly update all 6 AO values in unison. Useful for some
- control apps, I would assume? If your jumper is in this setting, then you
- need to issue your comedi_data_write()s to load all the values you want,
- then issue one comedi_data_read() on any channel on the AO subdevice
- to initiate the simultaneous XFER.
-
-Configuration Options: not applicable, uses PCI auto config
-*/
+ * Driver: cb_pcimdda
+ * Description: Measurement Computing PCIM-DDA06-16
+ * Devices: [Measurement Computing] PCIM-DDA06-16 (cb_pcimdda)
+ * Author: Calin Culianu <calin@ajvar.org>
+ * Updated: Mon, 14 Apr 2008 15:15:51 +0100
+ * Status: works
+ *
+ * All features of the PCIM-DDA06-16 board are supported.
+ * This board has 6 16-bit AO channels, and the usual 8255 DIO setup.
+ * (24 channels, configurable in banks of 8 and 4, etc.).
+ * This board does not support commands.
+ *
+ * The board has a peculiar way of specifying AO gain/range settings -- You have
+ * 1 jumper bank on the card, which either makes all 6 AO channels either
+ * 5 Volt unipolar, 5V bipolar, 10 Volt unipolar or 10V bipolar.
+ *
+ * Since there is absolutely _no_ way to tell in software how this jumper is set
+ * (well, at least according to the rather thin spec. from Measurement Computing
+ * that comes with the board), the driver assumes the jumper is at its factory
+ * default setting of +/-5V.
+ *
+ * Also of note is the fact that this board features another jumper, whose
+ * state is also completely invisible to software. It toggles two possible AO
+ * output modes on the board:
+ *
+ * - Update Mode: Writing to an AO channel instantaneously updates the actual
+ * signal output by the DAC on the board (this is the factory default).
+ * - Simultaneous XFER Mode: Writing to an AO channel has no effect until
+ * you read from any one of the AO channels. This is useful for loading
+ * all 6 AO values, and then reading from any one of the AO channels on the
+ * device to instantly update all 6 AO values in unison. Useful for some
+ * control apps, I would assume? If your jumper is in this setting, then you
+ * need to issue your comedi_data_write()s to load all the values you want,
+ * then issue one comedi_data_read() on any channel on the AO subdevice
+ * to initiate the simultaneous XFER.
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
/*
- This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output
- card. This board has a unique register layout and as such probably
- deserves its own driver file.
-
- It is theoretically possible to integrate this board into the cb_pcidda
- file, but since that isn't my code, I didn't want to significantly
- modify that file to support this board (I thought it impolite to do so).
-
- At any rate, if you feel ambitious, please feel free to take
- the code out of this file and combine it with a more unified driver
- file.
-
- I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil>
- for lending me a board so that I could write this driver.
-
- -Calin Culianu <calin@ajvar.org>
+ * This is a driver for the Computer Boards PCIM-DDA06-16 Analog Output
+ * card. This board has a unique register layout and as such probably
+ * deserves its own driver file.
+ *
+ * It is theoretically possible to integrate this board into the cb_pcidda
+ * file, but since that isn't my code, I didn't want to significantly
+ * modify that file to support this board (I thought it impolite to do so).
+ *
+ * At any rate, if you feel ambitious, please feel free to take
+ * the code out of this file and combine it with a more unified driver
+ * file.
+ *
+ * I would like to thank Timothy Curry <Timothy.Curry@rdec.redstone.army.mil>
+ * for lending me a board so that I could write this driver.
+ *
+ * -Calin Culianu <calin@ajvar.org>
*/
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/comedi_isadma.c b/drivers/staging/comedi/drivers/comedi_isadma.c
index 6ba71d114a95..68ef9b1750be 100644
--- a/drivers/staging/comedi/drivers/comedi_isadma.c
+++ b/drivers/staging/comedi/drivers/comedi_isadma.c
@@ -132,8 +132,7 @@ unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
result = result1;
if (result >= desc->size || result == 0)
return 0;
- else
- return desc->size - result;
+ return desc->size - result;
}
EXPORT_SYMBOL_GPL(comedi_isadma_poll);
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 4956a49a6140..5f848396c2f7 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -1,29 +1,30 @@
/*
- comedi/drivers/contec_pci_dio.c
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * comedi/drivers/contec_pci_dio.c
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: contec_pci_dio
-Description: Contec PIO1616L digital I/O board
-Devices: [Contec] PIO1616L (contec_pci_dio)
-Author: Stefano Rivoir <s.rivoir@gts.it>
-Updated: Wed, 27 Jun 2007 13:00:06 +0100
-Status: works
-
-Configuration Options: not applicable, uses comedi PCI auto config
-*/
+ * Driver: contec_pci_dio
+ * Description: Contec PIO1616L digital I/O board
+ * Devices: [Contec] PIO1616L (contec_pci_dio)
+ * Author: Stefano Rivoir <s.rivoir@gts.it>
+ * Updated: Wed, 27 Jun 2007 13:00:06 +0100
+ * Status: works
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ */
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 80e38dedd359..6c7b4d27c27c 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -68,17 +68,17 @@ Configuration options:
/* Command modifiers (only used with read/write), EXTTRIG can be
used with some other commands.
*/
-#define DT_MOD_DMA (1<<4)
-#define DT_MOD_CONT (1<<5)
-#define DT_MOD_EXTCLK (1<<6)
-#define DT_MOD_EXTTRIG (1<<7)
+#define DT_MOD_DMA BIT(4)
+#define DT_MOD_CONT BIT(5)
+#define DT_MOD_EXTCLK BIT(6)
+#define DT_MOD_EXTTRIG BIT(7)
/* Bits in status register */
-#define DT_S_DATA_OUT_READY (1<<0)
-#define DT_S_DATA_IN_FULL (1<<1)
-#define DT_S_READY (1<<2)
-#define DT_S_COMMAND (1<<3)
-#define DT_S_COMPOSITE_ERROR (1<<7)
+#define DT_S_DATA_OUT_READY BIT(0)
+#define DT_S_DATA_IN_FULL BIT(1)
+#define DT_S_READY BIT(2)
+#define DT_S_COMMAND BIT(3)
+#define DT_S_COMPOSITE_ERROR BIT(7)
/* registers */
#define DT2801_DATA 0
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 5a536a00066f..40bf00984fa5 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -371,13 +371,13 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (*ns + base / 2) / base;
+ divider = DIV_ROUND_CLOSEST(*ns, base);
break;
case CMDF_ROUND_DOWN:
divider = (*ns) / base;
break;
case CMDF_ROUND_UP:
- divider = (*ns + base - 1) / base;
+ divider = DIV_ROUND_UP(*ns, base);
break;
}
if (divider < 256) {
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index ab7a332fbcc4..19e0b7be8495 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -361,7 +361,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (*nanosec + base / 2) / base;
+ divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 46ca5d938d5b..63b5cbc44bda 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -499,18 +499,18 @@ static void gsc_hpdi_free_dma(struct comedi_device *dev)
/* free pci dma buffers */
for (i = 0; i < NUM_DMA_BUFFERS; i++) {
if (devpriv->dio_buffer[i])
- pci_free_consistent(pcidev,
- DMA_BUFFER_SIZE,
- devpriv->dio_buffer[i],
- devpriv->dio_buffer_phys_addr[i]);
+ dma_free_coherent(&pcidev->dev,
+ DMA_BUFFER_SIZE,
+ devpriv->dio_buffer[i],
+ devpriv->dio_buffer_phys_addr[i]);
}
/* free dma descriptors */
if (devpriv->dma_desc)
- pci_free_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- NUM_DMA_DESCRIPTORS,
- devpriv->dma_desc,
- devpriv->dma_desc_phys_addr);
+ dma_free_coherent(&pcidev->dev,
+ sizeof(struct plx_dma_desc) *
+ NUM_DMA_DESCRIPTORS,
+ devpriv->dma_desc,
+ devpriv->dma_desc_phys_addr);
}
static int gsc_hpdi_init(struct comedi_device *dev)
@@ -630,14 +630,16 @@ static int gsc_hpdi_auto_attach(struct comedi_device *dev,
/* allocate pci dma buffers */
for (i = 0; i < NUM_DMA_BUFFERS; i++) {
devpriv->dio_buffer[i] =
- pci_alloc_consistent(pcidev, DMA_BUFFER_SIZE,
- &devpriv->dio_buffer_phys_addr[i]);
+ dma_alloc_coherent(&pcidev->dev, DMA_BUFFER_SIZE,
+ &devpriv->dio_buffer_phys_addr[i],
+ GFP_KERNEL);
}
/* allocate dma descriptors */
- devpriv->dma_desc = pci_alloc_consistent(pcidev,
- sizeof(struct plx_dma_desc) *
- NUM_DMA_DESCRIPTORS,
- &devpriv->dma_desc_phys_addr);
+ devpriv->dma_desc = dma_alloc_coherent(&pcidev->dev,
+ sizeof(struct plx_dma_desc) *
+ NUM_DMA_DESCRIPTORS,
+ &devpriv->dma_desc_phys_addr,
+ GFP_KERNEL);
if (devpriv->dma_desc_phys_addr & 0xf) {
dev_warn(dev->class_dev,
" dma descriptors not quad-word aligned (bug)\n");
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index fa7ae2c04556..8f24702c3380 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -297,7 +297,6 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
{
struct comedi_async *async = s->async;
unsigned int n_links;
- int i;
if (ring->descriptors) {
dma_free_coherent(ring->hw_dev,
@@ -326,17 +325,58 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
}
ring->n_links = n_links;
- for (i = 0; i < n_links; i++) {
+ return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mite_buf_change);
+
+/*
+ * initializes the ring buffer descriptors to provide correct DMA transfer links
+ * to the exact amount of memory required. When the ring buffer is allocated in
+ * mite_buf_change, the default is to initialize the ring to refer to the entire
+ * DMA data buffer. A command may call this function later to re-initialize and
+ * shorten the amount of memory that will be transferred.
+ */
+int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
+ struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ struct comedi_async *async = s->async;
+ unsigned int n_full_links = nbytes >> PAGE_SHIFT;
+ unsigned int remainder = nbytes % PAGE_SIZE;
+ int i;
+
+ dev_dbg(s->device->class_dev,
+ "mite: init ring buffer to %u bytes\n", nbytes);
+
+ if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer too small for requested init\n");
+ return -ENOMEM;
+ }
+
+ /* We set the descriptors for all full links. */
+ for (i = 0; i < n_full_links; ++i) {
ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
ring->descriptors[i].addr =
cpu_to_le32(async->buf_map->page_list[i].dma_addr);
ring->descriptors[i].next =
- cpu_to_le32(ring->descriptors_dma_addr + (i +
- 1) *
- sizeof(struct mite_dma_descriptor));
+ cpu_to_le32(ring->descriptors_dma_addr +
+ (i + 1) * sizeof(struct mite_dma_descriptor));
}
- ring->descriptors[n_links - 1].next =
- cpu_to_le32(ring->descriptors_dma_addr);
+
+ /* the last link is either a remainder or was a full link. */
+ if (remainder > 0) {
+ /* set the lesser count for the remainder link */
+ ring->descriptors[i].count = cpu_to_le32(remainder);
+ ring->descriptors[i].addr =
+ cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ /* increment i so that assignment below refs last link */
+ ++i;
+ }
+
+ /* Assign the last link->next to point back to the head of the list. */
+ ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
+
/*
* barrier is meant to insure that all the writes to the dma descriptors
* have completed before the dma controller is commanded to read them
@@ -344,7 +384,7 @@ int mite_buf_change(struct mite_dma_descriptor_ring *ring,
smp_wmb();
return 0;
}
-EXPORT_SYMBOL_GPL(mite_buf_change);
+EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits)
@@ -552,6 +592,7 @@ int mite_sync_output_dma(struct mite_channel *mite_chan,
unsigned int old_alloc_count = async->buf_read_alloc_count;
u32 nbytes_ub, nbytes_lb;
int count;
+ bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
/* read alloc as much as we can */
comedi_buf_read_alloc(s, async->prealloc_bufsz);
@@ -561,11 +602,24 @@ int mite_sync_output_dma(struct mite_channel *mite_chan,
nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
nbytes_ub = stop_count;
- if ((int)(nbytes_ub - old_alloc_count) > 0) {
+
+ if ((!finite_regen || stop_count > old_alloc_count) &&
+ ((int)(nbytes_ub - old_alloc_count) > 0)) {
dev_warn(s->device->class_dev, "mite: DMA underrun\n");
async->events |= COMEDI_CB_OVERFLOW;
return -1;
}
+
+ if (finite_regen) {
+ /*
+ * This is a special case where we continuously output a finite
+ * buffer. In this case, we do not free any of the memory,
+ * hence we expect that old_alloc_count will reach a maximum of
+ * stop_count bytes.
+ */
+ return 0;
+ }
+
count = nbytes_lb - async->buf_read_count;
if (count <= 0)
return 0;
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index c32d4e4ddccc..87534b07ec81 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -110,6 +110,9 @@ void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits);
int mite_buf_change(struct mite_dma_descriptor_ring *ring,
struct comedi_subdevice *s);
+int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
+ struct comedi_subdevice *s,
+ unsigned int nbytes);
enum mite_registers {
/*
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_mio_c_common.c
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5e8130a7d670..dcaf7e89f299 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -246,24 +246,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
-
- outl(data, dev->iobase + reg);
+ else
+ outl(data, dev->iobase + reg);
}
static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
-
- outw(data, dev->iobase + reg);
+ else
+ outw(data, dev->iobase + reg);
}
static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
-
- outb(data, dev->iobase + reg);
+ else
+ outb(data, dev->iobase + reg);
}
static uint32_t ni_readl(struct comedi_device *dev, int reg)
@@ -1166,8 +1166,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
comedi_buf_write_samples(s, &data, 1);
}
} else {
- if (n > sizeof(devpriv->ai_fifo_buffer) /
- sizeof(devpriv->ai_fifo_buffer[0])) {
+ if (n > ARRAY_SIZE(devpriv->ai_fifo_buffer)) {
dev_err(dev->class_dev,
"bug! ai_fifo_buffer too small\n");
async->events |= COMEDI_CB_ERROR;
@@ -1242,9 +1241,7 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
NISTC_AI_STATUS1_FIFO_E;
while (fifo_empty == 0) {
for (i = 0;
- i <
- sizeof(devpriv->ai_fifo_buffer) /
- sizeof(devpriv->ai_fifo_buffer[0]); i++) {
+ i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
fifo_empty = ni_stc_readw(dev,
NISTC_AI_STATUS1_REG) &
NISTC_AI_STATUS1_FIFO_E;
@@ -1500,7 +1497,8 @@ static void handle_b_interrupt(struct comedi_device *dev,
s->async->events |= COMEDI_CB_OVERFLOW;
}
- if (b_status & NISTC_AO_STATUS1_BC_TC)
+ if (s->async->cmd.stop_src != TRIG_NONE &&
+ b_status & NISTC_AO_STATUS1_BC_TC)
s->async->events |= COMEDI_CB_EOA;
#ifndef PCIDMA
@@ -2054,13 +2052,13 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
+ divider = DIV_ROUND_CLOSEST(nanosec, devpriv->clock_ns);
break;
case CMDF_ROUND_DOWN:
divider = (nanosec) / devpriv->clock_ns;
break;
case CMDF_ROUND_UP:
- divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
+ divider = DIV_ROUND_UP(nanosec, devpriv->clock_ns);
break;
}
return divider - 1;
@@ -2073,6 +2071,37 @@ static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
return devpriv->clock_ns * (timer + 1);
}
+static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
+ struct comedi_subdevice *sdev,
+ const struct comedi_cmd *cmd,
+ unsigned int max_count) {
+#ifdef PCIDMA
+ unsigned int nbytes = max_count;
+
+ if (cmd->stop_arg > 0 && cmd->stop_arg < max_count)
+ nbytes = cmd->stop_arg;
+ nbytes *= comedi_bytes_per_scan(sdev);
+
+ if (nbytes > sdev->async->prealloc_bufsz) {
+ if (cmd->stop_arg > 0)
+ dev_err(sdev->device->class_dev,
+ "ni_cmd_set_mite_transfer: tried exact data transfer limits greater than buffer size\n");
+
+ /*
+ * we can only transfer up to the size of the buffer. In this
+ * case, the user is expected to continue to write into the
+ * comedi buffer (already implemented as a ring buffer).
+ */
+ nbytes = sdev->async->prealloc_bufsz;
+ }
+
+ mite_init_ring_descriptors(ring, sdev, nbytes);
+#else
+ dev_err(sdev->device->class_dev,
+ "ni_cmd_set_mite_transfer: exact data transfer limits not implemented yet without DMA\n");
+#endif
+}
+
static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
unsigned num_channels)
{
@@ -2428,7 +2457,8 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
ni_stc_writew(dev, mode2, NISTC_AI_MODE2_REG);
break;
case TRIG_EXT:
- mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 + cmd->convert_arg);
+ mode1 |= NISTC_AI_MODE1_CONVERT_SRC(1 +
+ CR_CHAN(cmd->convert_arg));
if ((cmd->convert_arg & CR_INVERT) == 0)
mode1 |= NISTC_AI_MODE1_CONVERT_POLARITY;
ni_stc_writew(dev, mode1, NISTC_AI_MODE1_REG);
@@ -2902,8 +2932,6 @@ static int ni_ao_inttrig(struct comedi_device *dev,
ni_stc_writew(dev, NISTC_AO_CMD1_UI_ARM |
NISTC_AO_CMD1_UC_ARM |
NISTC_AO_CMD1_BC_ARM |
- NISTC_AO_CMD1_DAC1_UPDATE_MODE |
- NISTC_AO_CMD1_DAC0_UPDATE_MODE |
devpriv->ao_cmd1,
NISTC_AO_CMD1_REG);
@@ -2913,42 +2941,68 @@ static int ni_ao_inttrig(struct comedi_device *dev,
return 0;
}
-static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+/*
+ * begin ni_ao_cmd.
+ * Organized similar to NI-STC and MHDDK examples.
+ * ni_ao_cmd is broken out into configuration sub-routines for clarity.
+ */
+
+static void ni_ao_cmd_personalize(struct comedi_device *dev,
+ const struct comedi_cmd *cmd)
{
const struct ni_board_struct *board = dev->board_ptr;
- struct ni_private *devpriv = dev->private;
- const struct comedi_cmd *cmd = &s->async->cmd;
- int bits;
- int i;
- unsigned trigvar;
- unsigned val;
-
- if (dev->irq == 0) {
- dev_err(dev->class_dev, "cannot run command without an irq\n");
- return -EIO;
- }
+ unsigned bits;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
+ bits =
+ /* fast CPU interface--only eseries */
+ /* ((slow CPU interface) ? 0 : AO_Fast_CPU) | */
+ NISTC_AO_PERSONAL_BC_SRC_SEL |
+ 0 /* (use_original_pulse ? 0 : NISTC_AO_PERSONAL_UPDATE_TIMEBASE) */ |
+ /*
+ * FIXME: start setting following bit when appropriate. Need to
+ * determine whether board is E4 or E1.
+ * FROM MHHDK:
+ * if board is E4 or E1
+ * Set bit "NISTC_AO_PERSONAL_UPDATE_PW" to 0
+ * else
+ * set it to 1
+ */
+ NISTC_AO_PERSONAL_UPDATE_PW |
+ /* FIXME: when should we set following bit to zero? */
+ NISTC_AO_PERSONAL_TMRDACWR_PW |
+ (board->ao_fifo_depth ?
+ NISTC_AO_PERSONAL_FIFO_ENA : NISTC_AO_PERSONAL_DMA_PIO_CTRL)
+ ;
+#if 0
+ /*
+ * FIXME:
+ * add something like ".has_individual_dacs = 0" to ni_board_struct
+ * since, as F Hess pointed out, not all in m series have singles. not
+ * sure if e-series all have duals...
+ */
- if (devpriv->is_6xxx) {
- ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
- NI611X_AO_MISC_REG);
+ /*
+ * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit for
+ * 6281, verified with bus analyzer.
+ */
+ if (devpriv->is_m_series)
+ bits |= NISTC_AO_PERSONAL_NUM_DAC;
+#endif
+ ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG);
- bits = 0;
- for (i = 0; i < cmd->chanlist_len; i++) {
- int chan;
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
- chan = CR_CHAN(cmd->chanlist[i]);
- bits |= 1 << chan;
- ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG);
- }
- ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG);
- }
+static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
+ const struct comedi_cmd *cmd)
+{
+ struct ni_private *devpriv = dev->private;
- ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+ /* sync */
if (cmd->stop_src == TRIG_NONE) {
devpriv->ao_mode1 |= NISTC_AO_MODE1_CONTINUOUS;
devpriv->ao_mode1 &= ~NISTC_AO_MODE1_TRIGGER_ONCE;
@@ -2958,177 +3012,351 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- val = devpriv->ao_trigger_select;
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- val &= ~(NISTC_AO_TRIG_START1_POLARITY |
- NISTC_AO_TRIG_START1_SEL_MASK);
- val |= NISTC_AO_TRIG_START1_EDGE |
- NISTC_AO_TRIG_START1_SYNC;
- break;
- case TRIG_EXT:
- val = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
- if (cmd->start_arg & CR_INVERT) {
- /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
- val |= NISTC_AO_TRIG_START1_POLARITY;
- }
- if (cmd->start_arg & CR_EDGE) {
- /* 0=edge detection disabled, 1=enabled */
- val |= NISTC_AO_TRIG_START1_EDGE;
+ {
+ unsigned int trigsel = devpriv->ao_trigger_select;
+
+ switch (cmd->start_src) {
+ case TRIG_INT:
+ case TRIG_NOW:
+ trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY |
+ NISTC_AO_TRIG_START1_SEL_MASK);
+ trigsel |= NISTC_AO_TRIG_START1_EDGE |
+ NISTC_AO_TRIG_START1_SYNC;
+ break;
+ case TRIG_EXT:
+ trigsel = NISTC_AO_TRIG_START1_SEL(
+ CR_CHAN(cmd->start_arg) + 1);
+ if (cmd->start_arg & CR_INVERT)
+ /*
+ * 0=active high, 1=active low.
+ * see daq-stc 3-24 (p186)
+ */
+ trigsel |= NISTC_AO_TRIG_START1_POLARITY;
+ if (cmd->start_arg & CR_EDGE)
+ /* 0=edge detection disabled, 1=enabled */
+ trigsel |= NISTC_AO_TRIG_START1_EDGE;
+ break;
+ default:
+ BUG();
+ break;
}
+
+ devpriv->ao_trigger_select = trigsel;
ni_stc_writew(dev, devpriv->ao_trigger_select,
NISTC_AO_TRIG_SEL_REG);
- break;
- default:
- BUG();
- break;
}
- devpriv->ao_trigger_select = val;
- ni_stc_writew(dev, devpriv->ao_trigger_select, NISTC_AO_TRIG_SEL_REG);
+ /* AO_Delayed_START1 = 0, we do not support delayed start...yet */
+ /* sync */
+ /* select DA_START1 as PFI6/AO_START1 when configured as an output */
devpriv->ao_mode3 &= ~NISTC_AO_MODE3_TRIG_LEN;
ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_counters(struct comedi_device *dev,
+ const struct comedi_cmd *cmd)
+{
+ struct ni_private *devpriv = dev->private;
+ /* Not supporting 'waveform staging' or 'local buffer with pauses' */
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+ /*
+ * This relies on ao_mode1/(Trigger_Once | Continuous) being set in
+ * set_trigger above. It is unclear whether we really need to re-write
+ * this register with these values. The mhddk examples for e-series
+ * show writing this in both places, but the examples for m-series show
+ * a single write in the set_counters function (here).
+ */
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
+
+ /* sync (upload number of buffer iterations -1) */
+ /* indicate that we want to use BC_Load_A_Register as the source */
devpriv->ao_mode2 &= ~NISTC_AO_MODE2_BC_INIT_LOAD_SRC;
ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- if (cmd->stop_src == TRIG_NONE)
- ni_stc_writel(dev, 0xffffff, NISTC_AO_BC_LOADA_REG);
- else
- ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG);
+
+ /*
+ * if the BC_TC interrupt is still issued in spite of UC, BC, UI
+ * ignoring BC_TC, then we will need to find a way to ignore that
+ * interrupt in continuous mode.
+ */
+ ni_stc_writel(dev, 0, NISTC_AO_BC_LOADA_REG); /* iter once */
+
+ /* sync (issue command to load number of buffer iterations -1) */
ni_stc_writew(dev, NISTC_AO_CMD1_BC_LOAD, NISTC_AO_CMD1_REG);
+
+ /* sync (upload number of updates in buffer) */
+ /* indicate that we want to use UC_Load_A_Register as the source */
devpriv->ao_mode2 &= ~NISTC_AO_MODE2_UC_INIT_LOAD_SRC;
ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- switch (cmd->stop_src) {
- case TRIG_COUNT:
+
+ /*
+ * if a user specifies '0', this automatically assumes the entire 24bit
+ * address space is available for the (multiple iterations of single
+ * buffer) MISB. Otherwise, stop_arg specifies the MISB length that
+ * will be used, regardless of whether we are in continuous mode or not.
+ * In continuous mode, the output will just iterate indefinitely over
+ * the MISB.
+ */
+ {
+ unsigned int stop_arg = cmd->stop_arg > 0 ?
+ (cmd->stop_arg & 0xffffff) : 0xffffff;
+
if (devpriv->is_m_series) {
- /* this is how the NI example code does it for m-series boards, verified correct with 6259 */
- ni_stc_writel(dev, cmd->stop_arg - 1,
- NISTC_AO_UC_LOADA_REG);
+ /*
+ * this is how the NI example code does it for m-series
+ * boards, verified correct with 6259
+ */
+ ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG);
+
+ /* sync (issue cmd to load number of updates in MISB) */
ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
NISTC_AO_CMD1_REG);
} else {
- ni_stc_writel(dev, cmd->stop_arg,
- NISTC_AO_UC_LOADA_REG);
+ ni_stc_writel(dev, stop_arg, NISTC_AO_UC_LOADA_REG);
+
+ /* sync (issue cmd to load number of updates in MISB) */
ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD,
NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, cmd->stop_arg - 1,
- NISTC_AO_UC_LOADA_REG);
+
+ /*
+ * sync (upload number of updates-1 in MISB)
+ * --eseries only?
+ */
+ ni_stc_writel(dev, stop_arg - 1, NISTC_AO_UC_LOADA_REG);
}
- break;
- case TRIG_NONE:
- ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, 0xffffff, NISTC_AO_UC_LOADA_REG);
- break;
- default:
- ni_stc_writel(dev, 0, NISTC_AO_UC_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UC_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, cmd->stop_arg, NISTC_AO_UC_LOADA_REG);
}
- devpriv->ao_mode1 &= ~(NISTC_AO_MODE1_UPDATE_SRC_MASK |
- NISTC_AO_MODE1_UI_SRC_MASK |
- NISTC_AO_MODE1_UPDATE_SRC_POLARITY |
- NISTC_AO_MODE1_UI_SRC_POLARITY);
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_update(struct comedi_device *dev,
+ const struct comedi_cmd *cmd)
+{
+ struct ni_private *devpriv = dev->private;
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+ /*
+ * zero out these bit fields to be set below. Does an ao-reset do this
+ * automatically?
+ */
+ devpriv->ao_mode1 &= ~(
+ NISTC_AO_MODE1_UI_SRC_MASK |
+ NISTC_AO_MODE1_UI_SRC_POLARITY |
+ NISTC_AO_MODE1_UPDATE_SRC_MASK |
+ NISTC_AO_MODE1_UPDATE_SRC_POLARITY
+ );
+
switch (cmd->scan_begin_src) {
case TRIG_TIMER:
- devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
- trigvar =
- ni_ns_to_timer(dev, cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
- ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
- ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
+ devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
+
+ /*
+ * NOTE: there are several other ways of configuring internal
+ * updates, but we'll only support one for now: using
+ * AO_IN_TIMEBASE, w/o waveform staging, w/o a delay between
+ * START1 and first update, and also w/o local buffer mode w/
+ * pauses.
+ */
+
+ /*
+ * This is already done above:
+ * devpriv->ao_mode1 &= ~(
+ * // set UPDATE_Source to UI_TC:
+ * NISTC_AO_MODE1_UPDATE_SRC_MASK |
+ * // set UPDATE_Source_Polarity to rising (required?)
+ * NISTC_AO_MODE1_UPDATE_SRC_POLARITY |
+ * // set UI_Source to AO_IN_TIMEBASE1:
+ * NISTC_AO_MODE1_UI_SRC_MASK |
+ * // set UI_Source_Polarity to rising (required?)
+ * NISTC_AO_MODE1_UI_SRC_POLARITY
+ * );
+ */
+
+ /*
+ * TODO: use ao_ui_clock_source to allow all possible signals
+ * to be routed to UI_Source_Select. See tSTC.h for
+ * eseries/ni67xx and tMSeries.h for mseries.
+ */
+
+ {
+ unsigned trigvar = ni_ns_to_timer(dev,
+ cmd->scan_begin_arg,
+ CMDF_ROUND_NEAREST);
+
+ /*
+ * Wait N TB3 ticks after the start trigger before
+ * clocking(N must be >=2).
+ */
+ /* following line: 2-1 per STC */
+ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD,
+ NISTC_AO_CMD1_REG);
+ /* following line: N-1 per STC */
+ ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ }
break;
case TRIG_EXT:
- devpriv->ao_mode1 |=
- NISTC_AO_MODE1_UPDATE_SRC(cmd->scan_begin_arg);
+ /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
+ devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
+ devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
+ CR_CHAN(cmd->scan_begin_arg));
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
- devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
break;
default:
BUG();
break;
}
+
ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
devpriv->ao_mode2 &= ~(NISTC_AO_MODE2_UI_RELOAD_MODE(3) |
NISTC_AO_MODE2_UI_INIT_LOAD_SRC);
ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
+ /* Configure DAQ-STC for Timed update mode */
+ devpriv->ao_cmd1 |= NISTC_AO_CMD1_DAC1_UPDATE_MODE |
+ NISTC_AO_CMD1_DAC0_UPDATE_MODE;
+ /* We are not using UPDATE2-->don't have to set DACx_Source_Select */
+ ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG);
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_channels(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct ni_private *devpriv = dev->private;
+ const struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned bits = 0;
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+ if (devpriv->is_6xxx) {
+ unsigned int i;
+
+ bits = 0;
+ for (i = 0; i < cmd->chanlist_len; ++i) {
+ int chan = CR_CHAN(cmd->chanlist[i]);
+
+ bits |= 1 << chan;
+ ni_ao_win_outw(dev, chan, NI611X_AO_WAVEFORM_GEN_REG);
+ }
+ ni_ao_win_outw(dev, bits, NI611X_AO_TIMED_REG);
+ }
+
+ ni_ao_config_chanlist(dev, s, cmd->chanlist, cmd->chanlist_len, 1);
+
if (cmd->scan_end_arg > 1) {
devpriv->ao_mode1 |= NISTC_AO_MODE1_MULTI_CHAN;
- ni_stc_writew(dev,
- NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1) |
- NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ,
- NISTC_AO_OUT_CTRL_REG);
- } else {
- unsigned bits;
+ bits = NISTC_AO_OUT_CTRL_CHANS(cmd->scan_end_arg - 1)
+ | NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ;
+ } else {
devpriv->ao_mode1 &= ~NISTC_AO_MODE1_MULTI_CHAN;
bits = NISTC_AO_OUT_CTRL_UPDATE_SEL_HIGHZ;
- if (devpriv->is_m_series || devpriv->is_6xxx) {
+ if (devpriv->is_m_series | devpriv->is_6xxx)
bits |= NISTC_AO_OUT_CTRL_CHANS(0);
- } else {
- bits |=
- NISTC_AO_OUT_CTRL_CHANS(CR_CHAN(cmd->chanlist[0]));
- }
- ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG);
+ else
+ bits |= NISTC_AO_OUT_CTRL_CHANS(
+ CR_CHAN(cmd->chanlist[0]));
}
+
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
+ ni_stc_writew(dev, bits, NISTC_AO_OUT_CTRL_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_DAC1_UPDATE_MODE |
- NISTC_AO_CMD1_DAC0_UPDATE_MODE,
- NISTC_AO_CMD1_REG);
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_stop_conditions(struct comedi_device *dev,
+ const struct comedi_cmd *cmd)
+{
+ struct ni_private *devpriv = dev->private;
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
devpriv->ao_mode3 |= NISTC_AO_MODE3_STOP_ON_OVERRUN_ERR;
ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
+ /*
+ * Since we are not supporting waveform staging, we ignore these errors:
+ * NISTC_AO_MODE3_STOP_ON_BC_TC_ERR,
+ * NISTC_AO_MODE3_STOP_ON_BC_TC_TRIG_ERR
+ */
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+}
+
+static void ni_ao_cmd_set_fifo_mode(struct comedi_device *dev)
+{
+ struct ni_private *devpriv = dev->private;
+
+ ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_MODE_MASK;
#ifdef PCIDMA
devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF_F;
#else
devpriv->ao_mode2 |= NISTC_AO_MODE2_FIFO_MODE_HF;
#endif
+ /* NOTE: this is where use_onboard_memory=True would be implemented */
devpriv->ao_mode2 &= ~NISTC_AO_MODE2_FIFO_REXMIT_ENA;
ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
- bits = NISTC_AO_PERSONAL_BC_SRC_SEL |
- NISTC_AO_PERSONAL_UPDATE_PW |
- NISTC_AO_PERSONAL_TMRDACWR_PW;
- if (board->ao_fifo_depth)
- bits |= NISTC_AO_PERSONAL_FIFO_ENA;
- else
- bits |= NISTC_AO_PERSONAL_DMA_PIO_CTRL;
-#if 0
- /*
- * F Hess: windows driver does not set NISTC_AO_PERSONAL_NUM_DAC bit
- * for 6281, verified with bus analyzer.
- */
- if (devpriv->is_m_series)
- bits |= NISTC_AO_PERSONAL_NUM_DAC;
-#endif
- ni_stc_writew(dev, bits, NISTC_AO_PERSONAL_REG);
- /* enable sending of ao dma requests */
+ /* enable sending of ao fifo requests (dma request) */
ni_stc_writew(dev, NISTC_AO_START_AOFREQ_ENA, NISTC_AO_START_SEL_REG);
ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
- if (cmd->stop_src == TRIG_COUNT) {
- ni_stc_writew(dev, NISTC_INTB_ACK_AO_BC_TC,
- NISTC_INTB_ACK_REG);
+ /* we are not supporting boards with virtual fifos */
+}
+
+static void ni_ao_cmd_set_interrupts(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ if (s->async->cmd.stop_src == TRIG_COUNT)
ni_set_bits(dev, NISTC_INTB_ENA_REG,
NISTC_INTB_ENA_AO_BC_TC, 1);
- }
s->async->inttrig = ni_ao_inttrig;
+}
+
+static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct ni_private *devpriv = dev->private;
+ const struct comedi_cmd *cmd = &s->async->cmd;
+
+ if (dev->irq == 0) {
+ dev_err(dev->class_dev, "cannot run command without an irq");
+ return -EIO;
+ }
+
+ /* ni_ao_reset should have already been done */
+ ni_ao_cmd_personalize(dev, cmd);
+ /* clearing fifo and preload happens elsewhere */
+ ni_ao_cmd_set_trigger(dev, cmd);
+ ni_ao_cmd_set_counters(dev, cmd);
+ ni_ao_cmd_set_update(dev, cmd);
+ ni_ao_cmd_set_channels(dev, s);
+ ni_ao_cmd_set_stop_conditions(dev, cmd);
+ ni_ao_cmd_set_fifo_mode(dev);
+ ni_cmd_set_mite_transfer(devpriv->ao_mite_ring, s, cmd, 0x00ffffff);
+ ni_ao_cmd_set_interrupts(dev, s);
+
+ /*
+ * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be
+ * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA
+ * must be setup and initially written to before arm/start happen.
+ */
return 0;
}
+/* end ni_ao_cmd */
+
static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
@@ -3187,11 +3415,7 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
-
- if (cmd->stop_src == TRIG_COUNT)
- err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
- else /* TRIG_NONE */
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 0x00ffffff);
if (err)
return 3;
@@ -3214,48 +3438,70 @@ static int ni_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ /* See 3.6.1.2 "Resetting", of DAQ-STC Technical Reference Manual */
+
+ /*
+ * In the following, the "--sync" comments are meant to denote
+ * asynchronous boundaries for setting the registers as described in the
+ * DAQ-STC mostly in the order also described in the DAQ-STC.
+ */
+
struct ni_private *devpriv = dev->private;
ni_release_ao_mite_channel(dev);
+ /* --sync (reset AO) */
+ if (devpriv->is_m_series)
+ /* following example in mhddk for m-series */
+ ni_stc_writew(dev, NISTC_RESET_AO, NISTC_RESET_REG);
+
+ /*--sync (start config) */
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
+
+ /*--sync (Disarm) */
ni_stc_writew(dev, NISTC_AO_CMD1_DISARM, NISTC_AO_CMD1_REG);
- ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0);
- ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG);
- ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG);
- ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL |
- NISTC_AO_PERSONAL_UPDATE_PW |
- NISTC_AO_PERSONAL_TMRDACWR_PW,
- NISTC_AO_PERSONAL_REG);
- ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG);
- ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG);
- devpriv->ao_cmd1 = 0;
- ni_stc_writew(dev, devpriv->ao_cmd1, NISTC_AO_CMD1_REG);
- devpriv->ao_cmd2 = 0;
- ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
+
+ /*
+ * --sync
+ * (clear bunch of registers--mseries mhddk examples do not include
+ * this)
+ */
+ devpriv->ao_cmd1 = 0;
+ devpriv->ao_cmd2 = 0;
devpriv->ao_mode1 = 0;
- ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
devpriv->ao_mode2 = 0;
- ni_stc_writew(dev, devpriv->ao_mode2, NISTC_AO_MODE2_REG);
if (devpriv->is_m_series)
devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
else
devpriv->ao_mode3 = 0;
- ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
devpriv->ao_trigger_select = 0;
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
- if (devpriv->is_6xxx) {
- unsigned immediate_bits = 0;
- unsigned i;
- for (i = 0; i < s->n_chan; ++i)
- immediate_bits |= 1 << i;
- ni_ao_win_outw(dev, immediate_bits, NI671X_AO_IMMEDIATE_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_CMD2_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_MODE1_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_MODE2_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_OUT_CTRL_REG);
+ ni_stc_writew(dev, devpriv->ao_mode3, NISTC_AO_MODE3_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_START_SEL_REG);
+ ni_stc_writew(dev, 0, NISTC_AO_TRIG_SEL_REG);
+
+ /*--sync (disable interrupts) */
+ ni_set_bits(dev, NISTC_INTB_ENA_REG, ~0, 0);
+
+ /*--sync (ack) */
+ ni_stc_writew(dev, NISTC_AO_PERSONAL_BC_SRC_SEL, NISTC_AO_PERSONAL_REG);
+ ni_stc_writew(dev, NISTC_INTB_ACK_AO_ALL, NISTC_INTB_ACK_REG);
+
+ /*--not in DAQ-STC. which doc? */
+ if (devpriv->is_6xxx) {
+ ni_ao_win_outw(dev, (1u << s->n_chan) - 1u,
+ NI671X_AO_IMMEDIATE_REG);
ni_ao_win_outw(dev, NI611X_AO_MISC_CLEAR_WG,
NI611X_AO_MISC_REG);
}
ni_stc_writew(dev, NISTC_RESET_AO_CFG_END, NISTC_RESET_REG);
+ /*--end */
return 0;
}
@@ -3381,7 +3627,9 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
- err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+ err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+ s->async->prealloc_bufsz /
+ comedi_bytes_per_scan(s));
if (err)
return 3;
@@ -3458,6 +3706,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
unsigned cdo_mode_bits;
int retval;
@@ -3482,6 +3731,10 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (retval < 0)
return retval;
+ ni_cmd_set_mite_transfer(devpriv->cdo_mite_ring, s, cmd,
+ s->async->prealloc_bufsz /
+ comedi_bytes_per_scan(s));
+
s->async->inttrig = ni_cdo_inttrig;
return 0;
@@ -3980,34 +4233,30 @@ static int ni_m_series_pwm_config(struct comedi_device *dev,
case INSN_CONFIG_PWM_OUTPUT:
switch (data[1]) {
case CMDF_ROUND_NEAREST:
- up_count =
- (data[2] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
+ up_count = DIV_ROUND_CLOSEST(data[2],
+ devpriv->clock_ns);
break;
case CMDF_ROUND_DOWN:
up_count = data[2] / devpriv->clock_ns;
break;
case CMDF_ROUND_UP:
up_count =
- (data[2] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
+ DIV_ROUND_UP(data[2], devpriv->clock_ns);
break;
default:
return -EINVAL;
}
switch (data[3]) {
case CMDF_ROUND_NEAREST:
- down_count =
- (data[4] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
+ down_count = DIV_ROUND_CLOSEST(data[4],
+ devpriv->clock_ns);
break;
case CMDF_ROUND_DOWN:
down_count = data[4] / devpriv->clock_ns;
break;
case CMDF_ROUND_UP:
down_count =
- (data[4] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
+ DIV_ROUND_UP(data[4], devpriv->clock_ns);
break;
default:
return -EINVAL;
@@ -4044,34 +4293,30 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
case INSN_CONFIG_PWM_OUTPUT:
switch (data[1]) {
case CMDF_ROUND_NEAREST:
- up_count =
- (data[2] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
+ up_count = DIV_ROUND_CLOSEST(data[2],
+ devpriv->clock_ns);
break;
case CMDF_ROUND_DOWN:
up_count = data[2] / devpriv->clock_ns;
break;
case CMDF_ROUND_UP:
up_count =
- (data[2] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
+ DIV_ROUND_UP(data[2], devpriv->clock_ns);
break;
default:
return -EINVAL;
}
switch (data[3]) {
case CMDF_ROUND_NEAREST:
- down_count =
- (data[4] +
- devpriv->clock_ns / 2) / devpriv->clock_ns;
+ down_count = DIV_ROUND_CLOSEST(data[4],
+ devpriv->clock_ns);
break;
case CMDF_ROUND_DOWN:
down_count = data[4] / devpriv->clock_ns;
break;
case CMDF_ROUND_UP:
down_count =
- (data[4] + devpriv->clock_ns -
- 1) / devpriv->clock_ns;
+ DIV_ROUND_UP(data[4], devpriv->clock_ns);
break;
default:
return -EINVAL;
@@ -4665,9 +4910,9 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
*freq_divider = best_div;
*freq_multiplier = best_mult;
- *actual_period_ns =
- (best_period_picosec * fudge_factor_80_to_20Mhz +
- (pico_per_nano / 2)) / pico_per_nano;
+ *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec *
+ fudge_factor_80_to_20Mhz,
+ pico_per_nano);
return 0;
}
@@ -5024,7 +5269,6 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
unsigned long flags;
#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
- struct mite_struct *mite = devpriv->mite;
#endif
if (!dev->attached)
@@ -5036,8 +5280,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
a_status = ni_stc_readw(dev, NISTC_AI_STATUS1_REG);
b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
#ifdef PCIDMA
- if (mite) {
- struct ni_private *devpriv = dev->private;
+ if (devpriv->mite) {
unsigned long flags_too;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
@@ -5053,7 +5296,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
if (ao_mite_status & CHSR_LINKC)
writel(CHOR_CLRLC,
- mite->mite_io_addr +
+ devpriv->mite->mite_io_addr +
MITE_CHOR(devpriv->
ao_mite_chan->channel));
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index ac79099bc23e..7112c3fec8bb 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -525,13 +525,13 @@ static int ni_pcidio_ns_to_timer(int *nanosec, unsigned int flags)
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- divider = (*nanosec + base / 2) / base;
+ divider = DIV_ROUND_CLOSEST(*nanosec, base);
break;
case CMDF_ROUND_DOWN:
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec + base - 1) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 30a5a75d1fe7..231e37d6b7c6 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -26,7 +26,8 @@ Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E, PCI-6071E, PCI-6023E,
PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E, PCI-6035E, PCI-6052E,
PCI-6110, PCI-6111, PCI-6220, PCI-6221, PCI-6224, PXI-6224,
- PCI-6225, PXI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251, PXIe-6251,
+ PCI-6225, PXI-6225, PCI-6229, PCI-6250,
+ PCI-6251, PXI-6251, PCIe-6251, PXIe-6251,
PCI-6254, PCI-6259, PCIe-6259,
PCI-6280, PCI-6281, PXI-6281, PCI-6284, PCI-6289,
PCI-6711, PXI-6711, PCI-6713, PXI-6713,
@@ -193,6 +194,7 @@ enum ni_pcimio_boardid {
BOARD_PCI6229,
BOARD_PCI6250,
BOARD_PCI6251,
+ BOARD_PXI6251,
BOARD_PCIE6251,
BOARD_PXIE6251,
BOARD_PCI6254,
@@ -811,6 +813,21 @@ static const struct ni_board_struct ni_boards[] = {
.ao_speed = 350,
.caldac = { caldac_none },
},
+ [BOARD_PXI6251] = {
+ .name = "pxi-6251",
+ .n_adchan = 16,
+ .ai_maxdata = 0xffff,
+ .ai_fifo_depth = 4095,
+ .gainlkup = ai_gain_628x,
+ .ai_speed = 800,
+ .n_aochan = 2,
+ .ao_maxdata = 0xffff,
+ .ao_fifo_depth = 8191,
+ .ao_range_table = &range_ni_M_625x_ao,
+ .reg_type = ni_reg_625x,
+ .ao_speed = 350,
+ .caldac = { caldac_none },
+ },
[BOARD_PCIE6251] = {
.name = "pcie-6251",
.n_adchan = 16,
@@ -1290,6 +1307,7 @@ static const struct pci_device_id ni_pcimio_pci_table[] = {
{ PCI_VDEVICE(NI, 0x71bc), BOARD_PCI6221_37PIN },
{ PCI_VDEVICE(NI, 0x717d), BOARD_PCIE6251 },
{ PCI_VDEVICE(NI, 0x72e8), BOARD_PXIE6251 },
+ { PCI_VDEVICE(NI, 0x70ad), BOARD_PXI6251 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ni_pcimio_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 437f723bb34d..823e47910004 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
unsigned long flags;
int ret = 0;
- if (trig_num != cmd->start_src)
+ if (trig_num != cmd->start_arg)
return -EINVAL;
spin_lock_irqsave(&counter->lock, flags);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 68ac02b68cb2..9b6c56773247 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -892,9 +892,8 @@ static int rtd_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->xfer_count = cmd->chanlist_len;
} else { /* make a multiple of scan length */
devpriv->xfer_count =
- (devpriv->xfer_count +
- cmd->chanlist_len - 1)
- / cmd->chanlist_len;
+ DIV_ROUND_UP(devpriv->xfer_count,
+ cmd->chanlist_len);
devpriv->xfer_count *= cmd->chanlist_len;
}
devpriv->flags |= SEND_EOS;
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 35f0f676eb28..c5e08635e01e 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -1167,12 +1167,6 @@ static void s626_set_clk_mult(struct comedi_device *dev,
s626_set_mode(dev, chan, mode, false);
}
-static uint16_t s626_get_clk_mult(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_CLKMULT(s626_get_mode(dev, chan));
-}
-
/*
* Return/set the clock polarity.
*/
@@ -1188,12 +1182,6 @@ static void s626_set_clk_pol(struct comedi_device *dev,
s626_set_mode(dev, chan, mode, false);
}
-static uint16_t s626_get_clk_pol(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_CLKPOL(s626_get_mode(dev, chan));
-}
-
/*
* Return/set the encoder mode.
*/
@@ -1209,27 +1197,6 @@ static void s626_set_enc_mode(struct comedi_device *dev,
s626_set_mode(dev, chan, mode, false);
}
-static uint16_t s626_get_enc_mode(struct comedi_device *dev,
- unsigned int chan)
-{
- return S626_GET_STD_ENCMODE(s626_get_mode(dev, chan));
-}
-
-/*
- * Return/set the index polarity.
- */
-static void s626_set_index_pol(struct comedi_device *dev,
- unsigned int chan, uint16_t value)
-{
- uint16_t mode;
-
- mode = s626_get_mode(dev, chan);
- mode &= ~S626_STDMSK_INDXPOL;
- mode |= S626_SET_STD_INDXPOL(value != 0);
-
- s626_set_mode(dev, chan, mode, false);
-}
-
static uint16_t s626_get_index_pol(struct comedi_device *dev,
unsigned int chan)
{
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
deleted file mode 100644
index 3bbe9e122365..000000000000
--- a/drivers/staging/dgap/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DGAP
- tristate "Digi EPCA PCI products"
- default n
- depends on TTY && HAS_IOMEM
- ---help---
- Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
deleted file mode 100644
index 0063d044ca71..000000000000
--- a/drivers/staging/dgap/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_DGAP) += dgap.o
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
deleted file mode 100644
index bad355100825..000000000000
--- a/drivers/staging/dgap/dgap.c
+++ /dev/null
@@ -1,7079 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- */
-
-/*
- * In the original out of kernel Digi dgap driver, firmware
- * loading was done via user land to driver handshaking.
- *
- * For cards that support a concentrator (port expander),
- * I believe the concentrator its self told the card which
- * concentrator is actually attached and then that info
- * was used to tell user land which concentrator firmware
- * image was to be downloaded. I think even the BIOS or
- * FEP images required could change with the connection
- * of a particular concentrator.
- *
- * Since I have no access to any of these cards or
- * concentrators, I cannot put the correct concentrator
- * firmware file names into the firmware_info structure
- * as is now done for the BIOS and FEP images.
- *
- * I think, but am not certain, that the cards supporting
- * concentrators will function without them. So support
- * of these cards has been left in this driver.
- *
- * In order to fully support those cards, they would
- * either have to be acquired for dissection or maybe
- * Digi International could provide some assistance.
- */
-#undef DIGI_CONCENTRATORS_SUPPORTED
-
-#define pr_fmt(fmt) "dgap: " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h> /* For udelay */
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/sched.h>
-
-#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
-#include <linux/ctype.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h> /* For read[bwl]/write[bwl] */
-
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/kdev_t.h>
-#include <linux/firmware.h>
-
-#include "dgap.h"
-
-/*
- * File operations permitted on Control/Management major.
- */
-static const struct file_operations dgap_board_fops = {
- .owner = THIS_MODULE,
-};
-
-static uint dgap_numboards;
-static struct board_t *dgap_board[MAXBOARDS];
-static ulong dgap_poll_counter;
-static int dgap_driver_state = DRIVER_INITIALIZED;
-static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
-
-static struct class *dgap_class;
-
-static uint dgap_count = 500;
-
-/*
- * Poller stuff
- */
-static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
-static ulong dgap_poll_time; /* Time of next poll */
-static uint dgap_poll_stop; /* Used to tell poller to stop */
-static struct timer_list dgap_poll_timer;
-
-/*
- SUPPORTED PRODUCTS
-
- Card Model Number of Ports Interface
- ----------------------------------------------------------------
- Acceleport Xem 4 - 64 (EIA232 & EIA422)
- Acceleport Xr 4 & 8 (EIA232)
- Acceleport Xr 920 4 & 8 (EIA232)
- Acceleport C/X 8 - 128 (EIA232)
- Acceleport EPC/X 8 - 224 (EIA232)
- Acceleport Xr/422 4 & 8 (EIA422)
- Acceleport 2r/920 2 (EIA232)
- Acceleport 4r/920 4 (EIA232)
- Acceleport 8r/920 8 (EIA232)
-
- IBM 8-Port Asynchronous PCI Adapter (EIA232)
- IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422)
-*/
-
-static struct pci_device_id dgap_pci_tbl[] = {
- { DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- { DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
- { DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
- { DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
- { DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
- { DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
- { DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
- { DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
- { DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
- { DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
- { DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
- {0,} /* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
-
-/*
- * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
- */
-struct board_id {
- uint config_type;
- u8 *name;
- uint maxports;
- uint dpatype;
-};
-
-static struct board_id dgap_ids[] = {
- {PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS)},
- {PCX, PCI_DEV_CX_NAME, 128, (T_CX | T_PCIBUS) },
- {PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) },
- {PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) },
- {APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS)},
- {PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS)},
- {0,} /* 0 terminated list. */
-};
-
-struct firmware_info {
- u8 *conf_name; /* dgap.conf */
- u8 *bios_name; /* BIOS filename */
- u8 *fep_name; /* FEP filename */
- u8 *con_name; /* Concentrator filename FIXME*/
- int num; /* sequence number */
-};
-
-/*
- * Firmware - BIOS, FEP, and CONC filenames
- */
-static struct firmware_info fw_info[] = {
- { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 0 },
- { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 1 },
- { "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 2 },
- { "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", NULL, 3 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 4 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 5 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 6 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 7 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 8 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 9 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 10 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 11 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 12 },
- { "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 13 },
- { "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 14 },
- {NULL,}
-};
-
-/*
- * Default transparent print information.
- */
-static struct digi_t dgap_digi_init = {
- .digi_flags = DIGI_COOK, /* Flags */
- .digi_maxcps = 100, /* Max CPS */
- .digi_maxchar = 50, /* Max chars in print queue */
- .digi_bufsize = 100, /* Printer buffer size */
- .digi_onlen = 4, /* size of printer on string */
- .digi_offlen = 4, /* size of printer off string */
- .digi_onstr = "\033[5i", /* ANSI printer on string ] */
- .digi_offstr = "\033[4i", /* ANSI printer off string ] */
- .digi_term = "ansi" /* default terminal type */
-};
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially.
- *
- * This defines a raw port at 9600 baud, 8 data bits, no parity,
- * 1 stop bit.
- */
-
-static struct ktermios dgap_default_termios = {
- .c_iflag = (DEFAULT_IFLAGS), /* iflags */
- .c_oflag = (DEFAULT_OFLAGS), /* oflags */
- .c_cflag = (DEFAULT_CFLAGS), /* cflags */
- .c_lflag = (DEFAULT_LFLAGS), /* lflags */
- .c_cc = INIT_C_CC,
- .c_line = 0,
-};
-
-/*
- * Our needed internal static variables from dgap_parse.c
- */
-static struct cnode dgap_head;
-#define MAXCWORD 200
-static char dgap_cword[MAXCWORD];
-
-struct toklist {
- int token;
- char *string;
-};
-
-static struct toklist dgap_brdtype[] = {
- { PCX, "Digi_AccelePort_C/X_PCI" },
- { PEPC, "Digi_AccelePort_EPC/X_PCI" },
- { PPCM, "Digi_AccelePort_Xem_PCI" },
- { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
- { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
- { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
- { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
- { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
- { 0, NULL }
-};
-
-static struct toklist dgap_tlist[] = {
- { BEGIN, "config_begin" },
- { END, "config_end" },
- { BOARD, "board" },
- { PCIINFO, "pciinfo" },
- { LINE, "line" },
- { CONC, "conc" },
- { CONC, "concentrator" },
- { CX, "cx" },
- { CX, "ccon" },
- { EPC, "epccon" },
- { EPC, "epc" },
- { MOD, "module" },
- { ID, "id" },
- { STARTO, "start" },
- { SPEED, "speed" },
- { CABLE, "cable" },
- { CONNECT, "connect" },
- { METHOD, "method" },
- { STATUS, "status" },
- { CUSTOM, "Custom" },
- { BASIC, "Basic" },
- { MEM, "mem" },
- { MEM, "memory" },
- { PORTS, "ports" },
- { MODEM, "modem" },
- { NPORTS, "nports" },
- { TTYN, "ttyname" },
- { CU, "cuname" },
- { PRINT, "prname" },
- { CMAJOR, "major" },
- { ALTPIN, "altpin" },
- { USEINTR, "useintr" },
- { TTSIZ, "ttysize" },
- { CHSIZ, "chsize" },
- { BSSIZ, "boardsize" },
- { UNTSIZ, "schedsize" },
- { F2SIZ, "f2200size" },
- { VPSIZ, "vpixsize" },
- { 0, NULL }
-};
-
-/*
- * get a word from the input stream, also keep track of current line number.
- * words are separated by whitespace.
- */
-static char *dgap_getword(char **in)
-{
- char *ret_ptr = *in;
-
- char *ptr = strpbrk(*in, " \t\n");
-
- /* If no word found, return null */
- if (!ptr)
- return NULL;
-
- /* Mark new location for our buffer */
- *ptr = '\0';
- *in = ptr + 1;
-
- /* Eat any extra spaces/tabs/newlines that might be present */
- while (*in && **in && ((**in == ' ') ||
- (**in == '\t') ||
- (**in == '\n'))) {
- **in = '\0';
- *in = *in + 1;
- }
-
- return ret_ptr;
-}
-
-
-/*
- * Get a token from the input file; return 0 if end of file is reached
- */
-static int dgap_gettok(char **in)
-{
- char *w;
- struct toklist *t;
-
- if (strstr(dgap_cword, "board")) {
- w = dgap_getword(in);
- if (!w)
- return 0;
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_brdtype; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
- } else {
- while ((w = dgap_getword(in))) {
- snprintf(dgap_cword, MAXCWORD, "%s", w);
- for (t = dgap_tlist; t->token != 0; t++) {
- if (!strcmp(w, t->string))
- return t->token;
- }
- }
- }
-
- return 0;
-}
-
-/*
- * dgap_checknode: see if all the necessary info has been supplied for a node
- * before creating the next node.
- */
-static int dgap_checknode(struct cnode *p)
-{
- switch (p->type) {
- case LNODE:
- if (p->u.line.v_speed == 0) {
- pr_err("line speed not specified");
- return 1;
- }
- return 0;
-
- case CNODE:
- if (p->u.conc.v_speed == 0) {
- pr_err("concentrator line speed not specified");
- return 1;
- }
- if (p->u.conc.v_nport == 0) {
- pr_err("number of ports on concentrator not specified");
- return 1;
- }
- if (p->u.conc.v_id == 0) {
- pr_err("concentrator id letter not specified");
- return 1;
- }
- return 0;
-
- case MNODE:
- if (p->u.module.v_nport == 0) {
- pr_err("number of ports on EBI module not specified");
- return 1;
- }
- if (p->u.module.v_id == 0) {
- pr_err("EBI module id letter not specified");
- return 1;
- }
- return 0;
- }
- return 0;
-}
-
-/*
- * Given a board pointer, returns whether we should use interrupts or not.
- */
-static uint dgap_config_get_useintr(struct board_t *bd)
-{
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == INTRNODE) {
- /*
- * check for pcxr types.
- */
- return p->u.useintr;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-/*
- * Given a board pointer, returns whether we turn on altpin or not.
- */
-static uint dgap_config_get_altpin(struct board_t *bd)
-{
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
- if (p->type == ANODE) {
- /*
- * check for pcxr types.
- */
- return p->u.altpin;
- }
- }
-
- /* If not found, then don't turn on interrupts. */
- return 0;
-}
-
-/*
- * Given a specific type of board, if found, detached link and
- * returns the first occurrence in the list.
- */
-static struct cnode *dgap_find_config(int type, int bus, int slot)
-{
- struct cnode *p, *prev, *prev2, *found;
-
- p = &dgap_head;
-
- while (p->next) {
- prev = p;
- p = p->next;
-
- if (p->type != BNODE)
- continue;
-
- if (p->u.board.type != type)
- continue;
-
- if (p->u.board.v_pcibus &&
- p->u.board.pcibus != bus)
- continue;
-
- if (p->u.board.v_pcislot &&
- p->u.board.pcislot != slot)
- continue;
-
- found = p;
- /*
- * Keep walking thru the list till we
- * find the next board.
- */
- while (p->next) {
- prev2 = p;
- p = p->next;
-
- if (p->type != BNODE)
- continue;
-
- /*
- * Mark the end of our 1 board
- * chain of configs.
- */
- prev2->next = NULL;
-
- /*
- * Link the "next" board to the
- * previous board, effectively
- * "unlinking" our board from
- * the main config.
- */
- prev->next = p;
-
- return found;
- }
- /*
- * It must be the last board in the list.
- */
- prev->next = NULL;
- return found;
- }
- return NULL;
-}
-
-/*
- * Given a board pointer, walks the config link, counting up
- * all ports user specified should be on the board.
- * (This does NOT mean they are all actually present right now tho)
- */
-static uint dgap_config_get_num_prts(struct board_t *bd)
-{
- int count = 0;
- struct cnode *p;
-
- if (!bd)
- return 0;
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case BNODE:
- /*
- * check for pcxr types.
- */
- if (p->u.board.type > EPCFE)
- count += p->u.board.nport;
- break;
- case CNODE:
- count += p->u.conc.nport;
- break;
- case MNODE:
- count += p->u.module.nport;
- break;
- }
- }
- return count;
-}
-
-static char *dgap_create_config_string(struct board_t *bd, char *string)
-{
- char *ptr = string;
- struct cnode *p;
- struct cnode *q;
- int speed;
-
- if (!bd) {
- *ptr = 0xff;
- return string;
- }
-
- for (p = bd->bd_config; p; p = p->next) {
- switch (p->type) {
- case LNODE:
- *ptr = '\0';
- ptr++;
- *ptr = p->u.line.speed;
- ptr++;
- break;
- case CNODE:
- /*
- * Because the EPC/con concentrators can have EM modules
- * hanging off of them, we have to walk ahead in the
- * list and keep adding the number of ports on each EM
- * to the config. UGH!
- */
- speed = p->u.conc.speed;
- q = p->next;
- if (q && (q->type == MNODE)) {
- *ptr = (p->u.conc.nport + 0x80);
- ptr++;
- p = q;
- while (q->next && (q->next->type) == MNODE) {
- *ptr = (q->u.module.nport + 0x80);
- ptr++;
- p = q;
- q = q->next;
- }
- *ptr = q->u.module.nport;
- ptr++;
- } else {
- *ptr = p->u.conc.nport;
- ptr++;
- }
-
- *ptr = speed;
- ptr++;
- break;
- }
- }
-
- *ptr = 0xff;
- return string;
-}
-
-/*
- * Parse a configuration file read into memory as a string.
- */
-static int dgap_parsefile(char **in)
-{
- struct cnode *p, *brd, *line, *conc;
- int rc;
- char *s;
- int linecnt = 0;
-
- p = &dgap_head;
- brd = line = conc = NULL;
-
- /* perhaps we are adding to an existing list? */
- while (p->next)
- p = p->next;
-
- /* file must start with a BEGIN */
- while ((rc = dgap_gettok(in)) != BEGIN) {
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
- }
-
- for (; ;) {
- int board_type = 0;
- int conc_type = 0;
- int module_type = 0;
-
- rc = dgap_gettok(in);
- if (rc == 0) {
- pr_err("unexpected EOF");
- return -1;
- }
-
- switch (rc) {
- case BEGIN: /* should only be 1 begin */
- pr_err("unexpected config_begin\n");
- return -1;
-
- case END:
- return 0;
-
- case BOARD: /* board info */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
-
- p->type = BNODE;
- p->u.board.status = kstrdup("No", GFP_KERNEL);
- line = conc = NULL;
- brd = p;
- linecnt = -1;
-
- board_type = dgap_gettok(in);
- if (board_type == 0) {
- pr_err("board !!type not specified");
- return -1;
- }
-
- p->u.board.type = board_type;
-
- break;
-
- case MEM: /* memory address */
- if (p->type != BNODE) {
- pr_err("memory address only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.addrstr);
- p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.addr)) {
- pr_err("bad number for memory address");
- return -1;
- }
- p->u.board.v_addr = 1;
- break;
-
- case PCIINFO: /* pci information */
- if (p->type != BNODE) {
- pr_err("memory address only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.pcibusstr);
- p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcibus)) {
- pr_err("bad number for pci bus");
- return -1;
- }
- p->u.board.v_pcibus = 1;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.pcislotstr);
- p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
- if (kstrtoul(s, 0, &p->u.board.pcislot)) {
- pr_err("bad number for pci slot");
- return -1;
- }
- p->u.board.v_pcislot = 1;
- break;
-
- case METHOD:
- if (p->type != BNODE) {
- pr_err("install method only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.method);
- p->u.board.method = kstrdup(s, GFP_KERNEL);
- p->u.board.v_method = 1;
- break;
-
- case STATUS:
- if (p->type != BNODE) {
- pr_err("config status only valid for boards");
- return -1;
- }
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.status);
- p->u.board.status = kstrdup(s, GFP_KERNEL);
- break;
-
- case NPORTS: /* number of ports */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.board.v_nport = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.conc.v_nport = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.nport)) {
- pr_err("bad number for number of ports");
- return -1;
- }
- p->u.module.v_nport = 1;
- } else {
- pr_err("nports only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case ID: /* letter ID used in tty name */
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.board.status);
- p->u.board.status = kstrdup(s, GFP_KERNEL);
-
- if (p->type == CNODE) {
- kfree(p->u.conc.id);
- p->u.conc.id = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_id = 1;
- } else if (p->type == MNODE) {
- kfree(p->u.module.id);
- p->u.module.id = kstrdup(s, GFP_KERNEL);
- p->u.module.v_id = 1;
- } else {
- pr_err("id only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case STARTO: /* start offset of ID */
- if (p->type == BNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.board.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.board.v_start = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.conc.v_start = 1;
- } else if (p->type == MNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.module.start)) {
- pr_err("bad number for start of tty count");
- return -1;
- }
- p->u.module.v_start = 1;
- } else {
- pr_err("start only valid for concentrators or modules");
- return -1;
- }
- break;
-
- case TTYN: /* tty name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = TNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.ttyname = kstrdup(s, GFP_KERNEL);
- if (!p->u.ttyname)
- return -1;
-
- break;
-
- case CU: /* cu name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CUNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.cuname = kstrdup(s, GFP_KERNEL);
- if (!p->u.cuname)
- return -1;
-
- break;
-
- case LINE: /* line information */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board before line info");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- pr_err("line not valid for PC/em");
- return -1;
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = LNODE;
- conc = NULL;
- line = p;
- linecnt++;
- break;
-
- case CONC: /* concentrator information */
- if (dgap_checknode(p))
- return -1;
- if (!line) {
- pr_err("must specify line info before concentrator");
- return -1;
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CNODE;
- conc = p;
-
- if (linecnt)
- brd->u.board.conc2++;
- else
- brd->u.board.conc1++;
-
- conc_type = dgap_gettok(in);
- if (conc_type == 0 ||
- (conc_type != CX && conc_type != EPC)) {
- pr_err("failed to set a type of concentratros");
- return -1;
- }
-
- p->u.conc.type = conc_type;
-
- break;
-
- case MOD: /* EBI module */
- if (dgap_checknode(p))
- return -1;
- if (!brd) {
- pr_err("must specify board info before EBI modules");
- return -1;
- }
- switch (brd->u.board.type) {
- case PPCM:
- linecnt = 0;
- break;
- default:
- if (!conc) {
- pr_err("must specify concentrator info before EBI module");
- return -1;
- }
- }
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = MNODE;
-
- if (linecnt)
- brd->u.board.module2++;
- else
- brd->u.board.module1++;
-
- module_type = dgap_gettok(in);
- if (module_type == 0 ||
- (module_type != PORTS && module_type != MODEM)) {
- pr_err("failed to set a type of module");
- return -1;
- }
-
- p->u.module.type = module_type;
-
- break;
-
- case CABLE:
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.line.cable);
- p->u.line.cable = kstrdup(s, GFP_KERNEL);
- p->u.line.v_cable = 1;
- }
- break;
-
- case SPEED: /* sync line speed indication */
- if (p->type == LNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.line.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.line.v_speed = 1;
- } else if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.conc.speed)) {
- pr_err("bad number for line speed");
- return -1;
- }
- p->u.conc.v_speed = 1;
- } else {
- pr_err("speed valid only for lines or concentrators.");
- return -1;
- }
- break;
-
- case CONNECT:
- if (p->type == CNODE) {
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- kfree(p->u.conc.connect);
- p->u.conc.connect = kstrdup(s, GFP_KERNEL);
- p->u.conc.v_connect = 1;
- }
- break;
- case PRINT: /* transparent print name prefix */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = PNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpeced end of file");
- return -1;
- }
- p->u.printname = kstrdup(s, GFP_KERNEL);
- if (!p->u.printname)
- return -1;
-
- break;
-
- case CMAJOR: /* major number */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = JNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.majornumber)) {
- pr_err("bad number for major number");
- return -1;
- }
- break;
-
- case ALTPIN: /* altpin setting */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = ANODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.altpin)) {
- pr_err("bad number for altpin");
- return -1;
- }
- break;
-
- case USEINTR: /* enable interrupt setting */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = INTRNODE;
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.useintr)) {
- pr_err("bad number for useintr");
- return -1;
- }
- break;
-
- case TTSIZ: /* size of tty structure */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = TSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.ttysize)) {
- pr_err("bad number for ttysize");
- return -1;
- }
- break;
-
- case CHSIZ: /* channel structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = CSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.chsize)) {
- pr_err("bad number for chsize");
- return -1;
- }
- break;
-
- case BSSIZ: /* board structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = BSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.bssize)) {
- pr_err("bad number for bssize");
- return -1;
- }
- break;
-
- case UNTSIZ: /* sched structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = USNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.unsize)) {
- pr_err("bad number for schedsize");
- return -1;
- }
- break;
-
- case F2SIZ: /* f2200 structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = FSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.f2size)) {
- pr_err("bad number for f2200size");
- return -1;
- }
- break;
-
- case VPSIZ: /* vpix structure size */
- if (dgap_checknode(p))
- return -1;
-
- p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
- if (!p->next)
- return -ENOMEM;
-
- p = p->next;
- p->type = VSNODE;
-
- s = dgap_getword(in);
- if (!s) {
- pr_err("unexpected end of file");
- return -1;
- }
- if (kstrtol(s, 0, &p->u.vpixsize)) {
- pr_err("bad number for vpixsize");
- return -1;
- }
- break;
- }
- }
-}
-
-static void dgap_cleanup_nodes(void)
-{
- struct cnode *p;
-
- p = &dgap_head;
-
- while (p) {
- struct cnode *tmp = p->next;
-
- if (p->type == NULLNODE) {
- p = tmp;
- continue;
- }
-
- switch (p->type) {
- case BNODE:
- kfree(p->u.board.addrstr);
- kfree(p->u.board.pcibusstr);
- kfree(p->u.board.pcislotstr);
- kfree(p->u.board.method);
- break;
- case CNODE:
- kfree(p->u.conc.id);
- kfree(p->u.conc.connect);
- break;
- case MNODE:
- kfree(p->u.module.id);
- break;
- case TNODE:
- kfree(p->u.ttyname);
- break;
- case CUNODE:
- kfree(p->u.cuname);
- break;
- case LNODE:
- kfree(p->u.line.cable);
- break;
- case PNODE:
- kfree(p->u.printname);
- break;
- }
-
- kfree(p->u.board.status);
- kfree(p);
- p = tmp;
- }
-}
-
-/*
- * Retrives the current custom baud rate from FEP memory,
- * and returns it back to the user.
- * Returns 0 on error.
- */
-static uint dgap_get_custom_baud(struct channel_t *ch)
-{
- u8 __iomem *vaddr;
- ulong offset;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
- return 0;
-
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return 0;
-
- /*
- * Go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
- + LINE_SPEED;
-
- return readw(vaddr + offset);
-}
-
-/*
- * Remap PCI memory.
- */
-static int dgap_remap(struct board_t *brd)
-{
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- if (!request_mem_region(brd->membase, 0x200000, "dgap"))
- return -ENOMEM;
-
- if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap"))
- goto err_req_mem;
-
- brd->re_map_membase = ioremap(brd->membase, 0x200000);
- if (!brd->re_map_membase)
- goto err_remap_mem;
-
- brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
- if (!brd->re_map_port)
- goto err_remap_port;
-
- return 0;
-
-err_remap_port:
- iounmap(brd->re_map_membase);
-err_remap_mem:
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
-err_req_mem:
- release_mem_region(brd->membase, 0x200000);
-
- return -ENOMEM;
-}
-
-static void dgap_unmap(struct board_t *brd)
-{
- iounmap(brd->re_map_port);
- iounmap(brd->re_map_membase);
- release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
- release_mem_region(brd->membase, 0x200000);
-}
-
-/*
- * dgap_parity_scan()
- *
- * Convert the FEP5 way of reporting parity errors and breaks into
- * the Linux line discipline way.
- */
-static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
- unsigned char *fbuf, int *len)
-{
- int l = *len;
- int count = 0;
- unsigned char *in, *cout, *fout;
- unsigned char c;
-
- in = cbuf;
- cout = cbuf;
- fout = fbuf;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- while (l--) {
- c = *in++;
- switch (ch->pscan_state) {
- default:
- /* reset to sanity and fall through */
- ch->pscan_state = 0;
-
- case 0:
- /* No FF seen yet */
- if (c == (unsigned char)'\377')
- /* delete this character from stream */
- ch->pscan_state = 1;
- else {
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- }
- break;
-
- case 1:
- /* first FF seen */
- if (c == (unsigned char)'\377') {
- /* doubled ff, transform to single ff */
- *cout++ = c;
- *fout++ = TTY_NORMAL;
- count += 1;
- ch->pscan_state = 0;
- } else {
- /* save value examination in next state */
- ch->pscan_savechar = c;
- ch->pscan_state = 2;
- }
- break;
-
- case 2:
- /* third character of ff sequence */
-
- *cout++ = c;
-
- if (ch->pscan_savechar == 0x0) {
- if (c == 0x0) {
- ch->ch_err_break++;
- *fout++ = TTY_BREAK;
- } else {
- ch->ch_err_parity++;
- *fout++ = TTY_PARITY;
- }
- }
-
- count += 1;
- ch->pscan_state = 0;
- }
- }
- *len = count;
-}
-
-/*=======================================================================
- *
- * dgap_input - Process received data.
- *
- * ch - Pointer to channel structure.
- *
- *=======================================================================*/
-
-static void dgap_input(struct channel_t *ch)
-{
- struct board_t *bd;
- struct bs_t __iomem *bs;
- struct tty_struct *tp;
- struct tty_ldisc *ld;
- uint rmask;
- uint head;
- uint tail;
- int data_len;
- ulong lock_flags;
- ulong lock_flags2;
- int flip_len;
- int len;
- int n;
- u8 *buf;
- u8 tmpchar;
- int s;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- tp = ch->ch_tun.un_tty;
-
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /*
- * Figure the number of characters in the buffer.
- * Exit immediately if none.
- */
-
- rmask = ch->ch_rsize - 1;
-
- head = readw(&bs->rx_head);
- head &= rmask;
- tail = readw(&bs->rx_tail);
- tail &= rmask;
-
- data_len = (head - tail) & rmask;
-
- if (data_len == 0) {
- writeb(1, &bs->idata);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If the device is not open, or CREAD is off, flush
- * input data and return immediately.
- */
- if ((bd->state != BOARD_READY) || !tp ||
- (tp->magic != TTY_MAGIC) ||
- !(ch->ch_tun.un_flags & UN_ISOPEN) ||
- !(tp->termios.c_cflag & CREAD) ||
- (ch->ch_tun.un_flags & UN_CLOSING)) {
- writew(head, &bs->rx_tail);
- writeb(1, &bs->idata);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * If we are throttled, simply don't read any data.
- */
- if (ch->ch_flags & CH_RXBLOCK) {
- writeb(1, &bs->idata);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- /*
- * Ignore oruns.
- */
- tmpchar = readb(&bs->orun);
- if (tmpchar) {
- ch->ch_err_overrun++;
- writeb(0, &bs->orun);
- }
-
- /* Decide how much data we can send into the tty layer */
- flip_len = TTY_FLIPBUF_SIZE;
-
- /* Chop down the length, if needed */
- len = min(data_len, flip_len);
- len = min(len, (N_TTY_BUF_SIZE - 1));
-
- ld = tty_ldisc_ref(tp);
-
-#ifdef TTY_DONT_FLIP
- /*
- * If the DONT_FLIP flag is on, don't flush our buffer, and act
- * like the ld doesn't have any space to put the data right now.
- */
- if (test_bit(TTY_DONT_FLIP, &tp->flags))
- len = 0;
-#endif
-
- /*
- * If we were unable to get a reference to the ld,
- * don't flush our buffer, and act like the ld doesn't
- * have any space to put the data right now.
- */
- if (!ld) {
- len = 0;
- } else {
- /*
- * If ld doesn't have a pointer to a receive_buf function,
- * flush the data, then act like the ld doesn't have any
- * space to put the data right now.
- */
- if (!ld->ops->receive_buf) {
- writew(head, &bs->rx_tail);
- len = 0;
- }
- }
-
- if (len <= 0) {
- writeb(1, &bs->idata);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (ld)
- tty_ldisc_deref(ld);
- return;
- }
-
- buf = ch->ch_bd->flipbuf;
- n = len;
-
- /*
- * n now contains the most amount of data we can copy,
- * bounded either by our buffer size or the amount
- * of data the card actually has pending...
- */
- while (n) {
- s = ((head >= tail) ? head : ch->ch_rsize) - tail;
- s = min(s, n);
-
- if (s <= 0)
- break;
-
- memcpy_fromio(buf, ch->ch_raddr + tail, s);
-
- tail += s;
- buf += s;
-
- n -= s;
- /* Flip queue if needed */
- tail &= rmask;
- }
-
- writew(tail, &bs->rx_tail);
- writeb(1, &bs->idata);
- ch->ch_rxcount += len;
-
- /*
- * If we are completely raw, we don't need to go through a lot
- * of the tty layers that exist.
- * In this case, we take the shortest and fastest route we
- * can to relay the data to the user.
- *
- * On the other hand, if we are not raw, we need to go through
- * the tty layer, which has its API more well defined.
- */
- if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
- dgap_parity_scan(ch, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, &len);
-
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
- ch->ch_bd->flipflagbuf, len);
- } else {
- len = tty_buffer_request_room(tp->port, len);
- tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- /* Tell the tty layer its okay to "eat" the data now */
- tty_flip_buffer_push(tp->port);
-
- if (ld)
- tty_ldisc_deref(ld);
-}
-
-static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
- struct un_t *un, u32 mask,
- unsigned long *irq_flags1,
- unsigned long *irq_flags2)
-{
- if (!(un->un_flags & mask))
- return;
-
- un->un_flags &= ~mask;
-
- if (!(un->un_flags & UN_ISOPEN))
- return;
-
- if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- un->un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
- (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
- spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
- spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
- }
- wake_up_interruptible(&un->un_tty->write_wait);
- wake_up_interruptible(&un->un_flags_wait);
-}
-
-/************************************************************************
- * Determines when CARRIER changes state and takes appropriate
- * action.
- ************************************************************************/
-static void dgap_carrier(struct channel_t *ch)
-{
- struct board_t *bd;
-
- int virt_carrier = 0;
- int phys_carrier = 0;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* Make sure altpin is always set correctly */
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- } else {
- ch->ch_dsr = DM_DSR;
- ch->ch_cd = DM_CD;
- }
-
- if (ch->ch_mistat & D_CD(ch))
- phys_carrier = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
- virt_carrier = 1;
-
- if (ch->ch_c_cflag & CLOCAL)
- virt_carrier = 1;
-
- /*
- * Test for a VIRTUAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL carrier transition to HIGH.
- */
- if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
- /*
- * When carrier rises, wake any threads waiting
- * for carrier in the open routine.
- */
-
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
- }
-
- /*
- * Test for a PHYSICAL transition to low, so long as we aren't
- * currently ignoring physical transitions (which is what "virtual
- * carrier" indicates).
- *
- * The transition of the virtual carrier to low really doesn't
- * matter... it really only means "ignore carrier state", not
- * "make pretend that carrier is there".
- */
- if ((virt_carrier == 0) &&
- ((ch->ch_flags & CH_CD) != 0) &&
- (phys_carrier == 0)) {
- /*
- * When carrier drops:
- *
- * Drop carrier on all open units.
- *
- * Flush queues, waking up any task waiting in the
- * line discipline.
- *
- * Send a hangup to the control terminal.
- *
- * Enable all select calls.
- */
- if (waitqueue_active(&(ch->ch_flags_wait)))
- wake_up_interruptible(&ch->ch_flags_wait);
-
- if (ch->ch_tun.un_open_count > 0)
- tty_hangup(ch->ch_tun.un_tty);
-
- if (ch->ch_pun.un_open_count > 0)
- tty_hangup(ch->ch_pun.un_tty);
- }
-
- /*
- * Make sure that our cached values reflect the current reality.
- */
- if (virt_carrier == 1)
- ch->ch_flags |= CH_FCAR;
- else
- ch->ch_flags &= ~CH_FCAR;
-
- if (phys_carrier == 1)
- ch->ch_flags |= CH_CD;
- else
- ch->ch_flags &= ~CH_CD;
-}
-
-/*=======================================================================
- *
- * dgap_event - FEP to host event processing routine.
- *
- * bd - Board of current event.
- *
- *=======================================================================*/
-static int dgap_event(struct board_t *bd)
-{
- struct channel_t *ch;
- ulong lock_flags;
- ulong lock_flags2;
- struct bs_t __iomem *bs;
- u8 __iomem *event;
- u8 __iomem *vaddr;
- struct ev_t __iomem *eaddr;
- uint head;
- uint tail;
- int port;
- int reason;
- int modem;
-
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- if (!vaddr) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- eaddr = (struct ev_t __iomem *)(vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&eaddr->ev_head);
- tail = readw(&eaddr->ev_tail);
-
- /*
- * Forget it if pointers out of range.
- */
-
- if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
- (head | tail) & 03) {
- /* Let go of board lock */
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /*
- * Loop to process all the events in the buffer.
- */
- while (tail != head) {
- /*
- * Get interrupt information.
- */
-
- event = bd->re_map_membase + tail + EVSTART;
-
- port = ioread8(event);
- reason = ioread8(event + 1);
- modem = ioread8(event + 2);
- ioread8(event + 3);
-
- /*
- * Make sure the interrupt is valid.
- */
- if (port >= bd->nasync)
- goto next;
-
- if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
- goto next;
-
- ch = bd->channels[port];
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- goto next;
-
- /*
- * If we have made it here, the event was valid.
- * Lock down the channel.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- bs = ch->ch_bs;
-
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- goto next;
- }
-
- /*
- * Process received data.
- */
- if (reason & IFDATA) {
- /*
- * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
- * input could send some data to ld, which in turn
- * could do a callback to one of our other functions.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- dgap_input(ch);
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (ch->ch_flags & CH_RACTIVE)
- ch->ch_flags |= CH_RENABLE;
- else
- writeb(1, &bs->idata);
-
- if (ch->ch_flags & CH_RWAIT) {
- ch->ch_flags &= ~CH_RWAIT;
-
- wake_up_interruptible
- (&ch->ch_tun.un_flags_wait);
- }
- }
-
- /*
- * Process Modem change signals.
- */
- if (reason & IFMODEM) {
- ch->ch_mistat = modem;
- dgap_carrier(ch);
- }
-
- /*
- * Process break.
- */
- if (reason & IFBREAK) {
- if (ch->ch_tun.un_tty) {
- /* A break has been indicated */
- ch->ch_err_break++;
- tty_buffer_request_room
- (ch->ch_tun.un_tty->port, 1);
- tty_insert_flip_char(ch->ch_tun.un_tty->port,
- 0, TTY_BREAK);
- tty_flip_buffer_push(ch->ch_tun.un_tty->port);
- }
- }
-
- /*
- * Process Transmit low.
- */
- if (reason & IFTLW) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WLOW) {
- ch->ch_flags &= ~CH_WLOW;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- /*
- * Process Transmit empty.
- */
- if (reason & IFTEM) {
- dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
- &lock_flags, &lock_flags2);
- if (ch->ch_flags & CH_WEMPTY) {
- ch->ch_flags &= ~CH_WEMPTY;
- wake_up_interruptible(&ch->ch_flags_wait);
- }
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
-
-next:
- tail = (tail + 4) & (EVMAX - EVSTART - 4);
- }
-
- writew(tail, &eaddr->ev_tail);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * Our board poller function.
- */
-static void dgap_poll_tasklet(unsigned long data)
-{
- struct board_t *bd = (struct board_t *)data;
- ulong lock_flags;
- char __iomem *vaddr;
- u16 head, tail;
-
- if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
- return;
-
- if (bd->inhibit_poller)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
-
- vaddr = bd->re_map_membase;
-
- /*
- * If board is ready, parse deeper to see if there is anything to do.
- */
- if (bd->state == BOARD_READY) {
- struct ev_t __iomem *eaddr;
-
- if (!bd->re_map_membase) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
- if (!bd->re_map_port) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- if (!bd->nasync)
- goto out;
-
- eaddr = (struct ev_t __iomem *)(vaddr + EVBUF);
-
- /* Get our head and tail */
- head = readw(&eaddr->ev_head);
- tail = readw(&eaddr->ev_tail);
-
- /*
- * If there is an event pending. Go service it.
- */
- if (head != tail) {
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_event(bd);
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- }
-
-out:
- /*
- * If board is doing interrupts, ACK the interrupt.
- */
- if (bd->intr_running)
- readb(bd->re_map_port + 2);
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return;
- }
-
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_found_board()
- *
- * A board has been found, init it.
- */
-static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
- int boardnum)
-{
- struct board_t *brd;
- unsigned int pci_irq;
- int i;
- int ret;
-
- /* get the board structure and prep it */
- brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
- if (!brd)
- return ERR_PTR(-ENOMEM);
-
- /* store the info for the board we've found */
- brd->magic = DGAP_BOARD_MAGIC;
- brd->boardnum = boardnum;
- brd->vendor = dgap_pci_tbl[id].vendor;
- brd->device = dgap_pci_tbl[id].device;
- brd->pdev = pdev;
- brd->pci_bus = pdev->bus->number;
- brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgap_ids[id].name;
- brd->maxports = dgap_ids[id].maxports;
- brd->type = dgap_ids[id].config_type;
- brd->dpatype = dgap_ids[id].dpatype;
- brd->dpastatus = BD_NOFEP;
- init_waitqueue_head(&brd->state_wait);
-
- spin_lock_init(&brd->bd_lock);
-
- brd->inhibit_poller = FALSE;
- brd->wait_for_bios = 0;
- brd->wait_for_fep = 0;
-
- for (i = 0; i < MAXPORTS; i++)
- brd->channels[i] = NULL;
-
- /* store which card & revision we have */
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
-
- pci_irq = pdev->irq;
- brd->irq = pci_irq;
-
- /* get the PCI Base Address Registers */
-
- /* Xr Jupiter and EPC use BAR 2 */
- if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
- brd->membase = pci_resource_start(pdev, 2);
- brd->membase_end = pci_resource_end(pdev, 2);
- }
- /* Everyone else uses BAR 0 */
- else {
- brd->membase = pci_resource_start(pdev, 0);
- brd->membase_end = pci_resource_end(pdev, 0);
- }
-
- if (!brd->membase) {
- ret = -ENODEV;
- goto free_brd;
- }
-
- if (brd->membase & 1)
- brd->membase &= ~3;
- else
- brd->membase &= ~15;
-
- /*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
- brd->port = brd->membase + PCI_IO_OFFSET;
- brd->port_end = brd->port + PCI_IO_SIZE_DGAP;
-
- /*
- * Special initialization for non-PLX boards
- */
- if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
- unsigned short cmd;
-
- pci_write_config_byte(pdev, 0x40, 0);
- pci_write_config_byte(pdev, 0x46, 0);
-
- /* Limit burst length to 2 doubleword transactions */
- pci_write_config_byte(pdev, 0x42, 1);
-
- /*
- * Enable IO and mem if not already done.
- * This was needed for support on Itanium.
- */
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- }
-
- /* init our poll helper tasklet */
- tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
- (unsigned long)brd);
-
- ret = dgap_remap(brd);
- if (ret)
- goto free_brd;
-
- pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
- boardnum, brd->name, brd->rev, brd->irq);
-
- return brd;
-
-free_brd:
- kfree(brd);
-
- return ERR_PTR(ret);
-}
-
-/*
- * dgap_intr()
- *
- * Driver interrupt handler.
- */
-static irqreturn_t dgap_intr(int irq, void *voidbrd)
-{
- struct board_t *brd = voidbrd;
-
- if (!brd)
- return IRQ_NONE;
-
- /*
- * Check to make sure its for us.
- */
- if (brd->magic != DGAP_BOARD_MAGIC)
- return IRQ_NONE;
-
- brd->intr_count++;
-
- /*
- * Schedule tasklet to run at a better time.
- */
- tasklet_schedule(&brd->helper_tasklet);
- return IRQ_HANDLED;
-}
-
-/*****************************************************************************
-*
-* Function:
-*
-* dgap_poll_handler
-*
-* Author:
-*
-* Scott H Kilau
-*
-* Parameters:
-*
-* dummy -- ignored
-*
-* Return Values:
-*
-* none
-*
-* Description:
-*
-* As each timer expires, it determines (a) whether the "transmit"
-* waiter needs to be woken up, and (b) whether the poller needs to
-* be rescheduled.
-*
-******************************************************************************/
-
-static void dgap_poll_handler(ulong dummy)
-{
- unsigned int i;
- struct board_t *brd;
- unsigned long lock_flags;
- ulong new_time;
-
- dgap_poll_counter++;
-
- /*
- * Do not start the board state machine until
- * driver tells us its up and running, and has
- * everything it needs.
- */
- if (dgap_driver_state != DRIVER_READY)
- goto schedule_poller;
-
- /*
- * If we have just 1 board, or the system is not SMP,
- * then use the typical old style poller.
- * Otherwise, use our new tasklet based poller, which should
- * speed things up for multiple boards.
- */
- if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
- for (i = 0; i < dgap_numboards; i++) {
- brd = dgap_board[i];
-
- if (brd->state == BOARD_FAILED)
- continue;
- if (!brd->intr_running)
- /* Call the real board poller directly */
- dgap_poll_tasklet((unsigned long)brd);
- }
- } else {
- /*
- * Go thru each board, kicking off a
- * tasklet for each if needed
- */
- for (i = 0; i < dgap_numboards; i++) {
- brd = dgap_board[i];
-
- /*
- * Attempt to grab the board lock.
- *
- * If we can't get it, no big deal, the next poll
- * will get it. Basically, I just really don't want
- * to spin in here, because I want to kick off my
- * tasklets as fast as I can, and then get out the
- * poller.
- */
- if (!spin_trylock(&brd->bd_lock))
- continue;
-
- /*
- * If board is in a failed state, don't bother
- * scheduling a tasklet
- */
- if (brd->state == BOARD_FAILED) {
- spin_unlock(&brd->bd_lock);
- continue;
- }
-
- /* Schedule a poll helper task */
- if (!brd->intr_running)
- tasklet_schedule(&brd->helper_tasklet);
-
- /*
- * Can't do DGAP_UNLOCK here, as we don't have
- * lock_flags because we did a trylock above.
- */
- spin_unlock(&brd->bd_lock);
- }
- }
-
-schedule_poller:
-
- /*
- * Schedule ourself back at the nominal wakeup interval.
- */
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
-
- new_time = dgap_poll_time - jiffies;
-
- if ((ulong)new_time >= 2 * dgap_poll_tick) {
- dgap_poll_time =
- jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- }
-
- dgap_poll_timer.function = dgap_poll_handler;
- dgap_poll_timer.data = 0;
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- if (!dgap_poll_stop)
- add_timer(&dgap_poll_timer);
-}
-
-/*=======================================================================
- *
- * dgap_cmdb - Sends a 2 byte command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * byte1 - Integer containing first byte to be sent.
- * byte2 - Integer containing second byte to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
- u8 byte2, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
-
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
- head = readw(&cm_addr->cm_head);
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writeb(byte1, (vaddr + head + CMDSTART + 2));
- writeb(byte2, (vaddr + head + CMDSTART + 3));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &cm_addr->cm_head);
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
- head = readw(&cm_addr->cm_head);
- tail = readw(&cm_addr->cm_tail);
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw - Sends a 1 word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
- head = readw(&cm_addr->cm_head);
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
- writeb(cmd, (vaddr + head + CMDSTART + 0));
- writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16)word, (vaddr + head + CMDSTART + 2));
-
- head = (head + 4) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &cm_addr->cm_head);
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
- head = readw(&cm_addr->cm_head);
- tail = readw(&cm_addr->cm_tail);
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_cmdw_ext - Sends a extended word command to the FEP.
- *
- * ch - Pointer to channel structure.
- * cmd - Command to be sent.
- * word - Integer containing word to be sent.
- * ncmds - Wait until ncmds or fewer cmds are left
- * in the cmd buffer before returning.
- *
- *=======================================================================*/
-static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
-{
- char __iomem *vaddr;
- struct __iomem cm_t *cm_addr;
- uint count;
- uint n;
- u16 head;
- u16 tail;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check if board is still alive.
- */
- if (ch->ch_bd->state == BOARD_FAILED)
- return;
-
- /*
- * Make sure the pointers are in range before
- * writing to the FEP memory.
- */
- vaddr = ch->ch_bd->re_map_membase;
- if (!vaddr)
- return;
-
- cm_addr = (struct cm_t __iomem *)(vaddr + CMDBUF);
- head = readw(&cm_addr->cm_head);
-
- /*
- * Forget it if pointers out of range.
- */
- if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
-
- /*
- * Put the data in the circular command buffer.
- */
-
- /* Write an FF to tell the FEP that we want an extended command */
- writeb((u8)0xff, (vaddr + head + CMDSTART + 0));
-
- writeb((u8)ch->ch_portnum, (vaddr + head + CMDSTART + 1));
- writew((u16)cmd, (vaddr + head + CMDSTART + 2));
-
- /*
- * If the second part of the command won't fit,
- * put it at the beginning of the circular buffer.
- */
- if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
- writew((u16)word, (vaddr + CMDSTART));
- else
- writew((u16)word, (vaddr + head + CMDSTART + 4));
-
- head = (head + 8) & (CMDMAX - CMDSTART - 4);
-
- writew(head, &cm_addr->cm_head);
-
- /*
- * Wait if necessary before updating the head
- * pointer to limit the number of outstanding
- * commands to the FEP. If the time spent waiting
- * is outlandish, declare the FEP dead.
- */
- for (count = dgap_count ;;) {
- head = readw(&cm_addr->cm_head);
- tail = readw(&cm_addr->cm_tail);
-
- n = (head - tail) & (CMDMAX - CMDSTART - 4);
-
- if (n <= ncmds * sizeof(struct cm_t))
- break;
-
- if (--count == 0) {
- ch->ch_bd->state = BOARD_FAILED;
- return;
- }
- udelay(10);
- }
-}
-
-/*=======================================================================
- *
- * dgap_wmove - Write data to FEP buffer.
- *
- * ch - Pointer to channel structure.
- * buf - Pointer to characters to be moved.
- * cnt - Number of characters to move.
- *
- *=======================================================================*/
-static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
-{
- int n;
- char __iomem *taddr;
- struct bs_t __iomem *bs;
- u16 head;
-
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- /*
- * Check parameters.
- */
- bs = ch->ch_bs;
- head = readw(&bs->tx_head);
-
- /*
- * If pointers are out of range, just return.
- */
- if ((cnt > ch->ch_tsize) ||
- (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
- return;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- n = ch->ch_tstart + ch->ch_tsize - head;
-
- if (cnt >= n) {
- cnt -= n;
- taddr = ch->ch_taddr + head;
- memcpy_toio(taddr, buf, n);
- head = ch->ch_tstart;
- buf += n;
- }
-
- /*
- * Move rest of data.
- */
- taddr = ch->ch_taddr + head;
- n = cnt;
- memcpy_toio(taddr, buf, n);
- head += cnt;
-
- writew(head, &bs->tx_head);
-}
-
-/*
- * Calls the firmware to reset this channel.
- */
-static void dgap_firmware_reset_port(struct channel_t *ch)
-{
- dgap_cmdb(ch, CHRESET, 0, 0, 0);
-
- /*
- * Now that the channel is reset, we need to make sure
- * all the current settings get reapplied to the port
- * in the firmware.
- *
- * So we will set the driver's cache of firmware
- * settings all to 0, and then call param.
- */
- ch->ch_fepiflag = 0;
- ch->ch_fepcflag = 0;
- ch->ch_fepoflag = 0;
- ch->ch_fepstartc = 0;
- ch->ch_fepstopc = 0;
- ch->ch_fepastartc = 0;
- ch->ch_fepastopc = 0;
- ch->ch_mostat = 0;
- ch->ch_hflow = 0;
-}
-
-/*=======================================================================
- *
- * dgap_param - Set Digi parameters.
- *
- * struct tty_struct * - TTY for port.
- *
- *=======================================================================*/
-static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
-{
- u16 head;
- u16 cflag;
- u16 iflag;
- u8 mval;
- u8 hflow;
-
- /*
- * If baud rate is zero, flush queues, and set mval to drop DTR.
- */
- if ((ch->ch_c_cflag & (CBAUD)) == 0) {
- /* flush rx */
- head = readw(&ch->ch_bs->rx_head);
- writew(head, &ch->ch_bs->rx_tail);
-
- /* flush tx */
- head = readw(&ch->ch_bs->tx_head);
- writew(head, &ch->ch_bs->tx_tail);
-
- ch->ch_flags |= (CH_BAUD0);
-
- /* Drop RTS and DTR */
- ch->ch_mval &= ~(D_RTS(ch) | D_DTR(ch));
- mval = D_DTR(ch) | D_RTS(ch);
- ch->ch_baud_info = 0;
-
- } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
- /*
- * Tell the fep to do the command
- */
-
- dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
-
- /*
- * Now go get from fep mem, what the fep
- * believes the custom baud rate is.
- */
- ch->ch_custom_speed = dgap_get_custom_baud(ch);
- ch->ch_baud_info = ch->ch_custom_speed;
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch) | D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
-
- } else {
- /*
- * Set baud rate, character size, and parity.
- */
-
-
- int iindex = 0;
- int jindex = 0;
- int baud = 0;
-
- ulong bauds[4][16] = {
- { /* slowbaud */
- 0, 50, 75, 110,
- 134, 150, 200, 300,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* slowbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 },
- { /* fastbaud */
- 0, 57600, 76800, 115200,
- 14400, 57600, 230400, 76800,
- 115200, 230400, 28800, 460800,
- 921600, 9600, 19200, 38400 },
- { /* fastbaud & CBAUDEX */
- 0, 57600, 115200, 230400,
- 460800, 150, 200, 921600,
- 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400 }
- };
-
- /*
- * Only use the TXPrint baud rate if the
- * terminal unit is NOT open
- */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
- un_type == DGAP_PRINT)
- baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
- else
- baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
-
- if (ch->ch_c_cflag & CBAUDEX)
- iindex = 1;
-
- if (ch->ch_digi.digi_flags & DIGI_FAST)
- iindex += 2;
-
- jindex = baud;
-
- if ((iindex >= 0) && (iindex < 4) &&
- (jindex >= 0) && (jindex < 16))
- baud = bauds[iindex][jindex];
- else
- baud = 0;
-
- if (baud == 0)
- baud = 9600;
-
- ch->ch_baud_info = baud;
-
- /*
- * CBAUD has bit position 0x1000 set these days to
- * indicate Linux baud rate remap.
- * We use a different bit assignment for high speed.
- * Clear this bit out while grabbing the parts of
- * "cflag" we want.
- */
- cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
- CSTOPB | CSIZE);
-
- /*
- * HUPCL bit is used by FEP to indicate fast baud
- * table is to be used.
- */
- if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
- (ch->ch_c_cflag & CBAUDEX))
- cflag |= HUPCL;
-
- if ((ch->ch_c_cflag & CBAUDEX) &&
- !(ch->ch_digi.digi_flags & DIGI_FAST)) {
- /*
- * The below code is trying to guarantee that only
- * baud rates 115200, 230400, 460800, 921600 are
- * remapped. We use exclusive or because the various
- * baud rates share common bit positions and therefore
- * can't be tested for easily.
- */
- tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
- int baudpart = 0;
-
- /*
- * Map high speed requests to index
- * into FEP's baud table
- */
- switch (tcflag) {
- case B57600:
- baudpart = 1;
- break;
-#ifdef B76800
- case B76800:
- baudpart = 2;
- break;
-#endif
- case B115200:
- baudpart = 3;
- break;
- case B230400:
- baudpart = 9;
- break;
- case B460800:
- baudpart = 11;
- break;
-#ifdef B921600
- case B921600:
- baudpart = 12;
- break;
-#endif
- default:
- baudpart = 0;
- }
-
- if (baudpart)
- cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
- }
-
- cflag &= 0xffff;
-
- if (cflag != ch->ch_fepcflag) {
- ch->ch_fepcflag = (u16)(cflag & 0xffff);
-
- /*
- * Okay to have channel and board
- * locks held calling this
- */
- dgap_cmdw(ch, SCFLAG, (u16)cflag, 0);
- }
-
- /* Handle transition from B0 */
- if (ch->ch_flags & CH_BAUD0) {
- ch->ch_flags &= ~(CH_BAUD0);
- ch->ch_mval |= (D_RTS(ch) | D_DTR(ch));
- }
- mval = D_DTR(ch) | D_RTS(ch);
- }
-
- /*
- * Get input flags.
- */
- iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
- INPCK | ISTRIP | IXON | IXANY | IXOFF);
-
- if ((ch->ch_startc == _POSIX_VDISABLE) ||
- (ch->ch_stopc == _POSIX_VDISABLE)) {
- iflag &= ~(IXON | IXOFF);
- ch->ch_c_iflag &= ~(IXON | IXOFF);
- }
-
- /*
- * Only the IBM Xr card can switch between
- * 232 and 422 modes on the fly
- */
- if (bd->device == PCI_DEV_XR_IBM_DID) {
- if (ch->ch_digi.digi_flags & DIGI_422)
- dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
- else
- dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
- }
-
- if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
- iflag |= IALTPIN;
-
- if (iflag != ch->ch_fepiflag) {
- ch->ch_fepiflag = iflag;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdw(ch, SIFLAG, (u16)ch->ch_fepiflag, 0);
- }
-
- /*
- * Select hardware handshaking.
- */
- hflow = 0;
-
- if (ch->ch_c_cflag & CRTSCTS)
- hflow |= (D_RTS(ch) | D_CTS(ch));
- if (ch->ch_digi.digi_flags & RTSPACE)
- hflow |= D_RTS(ch);
- if (ch->ch_digi.digi_flags & DTRPACE)
- hflow |= D_DTR(ch);
- if (ch->ch_digi.digi_flags & CTSPACE)
- hflow |= D_CTS(ch);
- if (ch->ch_digi.digi_flags & DSRPACE)
- hflow |= D_DSR(ch);
- if (ch->ch_digi.digi_flags & DCDPACE)
- hflow |= D_CD(ch);
-
- if (hflow != ch->ch_hflow) {
- ch->ch_hflow = hflow;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SHFLOW, (u8)hflow, 0xff, 0);
- }
-
- /*
- * Set RTS and/or DTR Toggle if needed,
- * but only if product is FEP5+ based.
- */
- if (bd->bd_flags & BD_FEP5PLUS) {
- u16 hflow2 = 0;
-
- if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
- hflow2 |= (D_RTS(ch));
- if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
- hflow2 |= (D_DTR(ch));
-
- dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
- }
-
- /*
- * Set modem control lines.
- */
-
- mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
-
- if (ch->ch_mostat ^ mval) {
- ch->ch_mostat = mval;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SMODEM, (u8)mval, D_RTS(ch) | D_DTR(ch), 0);
- }
-
- /*
- * Read modem signals, and then call carrier function.
- */
- ch->ch_mistat = readb(&ch->ch_bs->m_stat);
- dgap_carrier(ch);
-
- /*
- * Set the start and stop characters.
- */
- if (ch->ch_startc != ch->ch_fepstartc ||
- ch->ch_stopc != ch->ch_fepstopc) {
- ch->ch_fepstartc = ch->ch_startc;
- ch->ch_fepstopc = ch->ch_stopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
- }
-
- /*
- * Set the Auxiliary start and stop characters.
- */
- if (ch->ch_astartc != ch->ch_fepastartc ||
- ch->ch_astopc != ch->ch_fepastopc) {
- ch->ch_fepastartc = ch->ch_astartc;
- ch->ch_fepastopc = ch->ch_astopc;
-
- /* Okay to have channel and board locks held calling this */
- dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
- }
-
- return 0;
-}
-
-/*
- * dgap_block_til_ready()
- *
- * Wait for DCD, if needed.
- */
-static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
- struct channel_t *ch)
-{
- int retval = 0;
- struct un_t *un;
- ulong lock_flags;
- uint old_flags;
- int sleep_on_un_flags;
-
- if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
- ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- ch->ch_wopen++;
-
- /* Loop forever */
- while (1) {
- sleep_on_un_flags = 0;
-
- /*
- * If board has failed somehow during our sleep,
- * bail with error.
- */
- if (ch->ch_bd->state == BOARD_FAILED) {
- retval = -EIO;
- break;
- }
-
- /* If tty was hung up, break out of loop and set error. */
- if (tty_hung_up_p(file)) {
- retval = -EAGAIN;
- break;
- }
-
- /*
- * If either unit is in the middle of the fragile part of close,
- * we just cannot touch the channel safely.
- * Go back to sleep, knowing that when the channel can be
- * touched safely, the close routine will signal the
- * ch_wait_flags to wake us back up.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
- UN_CLOSING)) {
- /*
- * Our conditions to leave cleanly and happily:
- * 1) NONBLOCKING on the tty is set.
- * 2) CLOCAL is set.
- * 3) DCD (fake or real) is active.
- */
-
- if (file->f_flags & O_NONBLOCK)
- break;
-
- if (tty->flags & (1 << TTY_IO_ERROR))
- break;
-
- if (ch->ch_flags & CH_CD)
- break;
-
- if (ch->ch_flags & CH_FCAR)
- break;
- } else {
- sleep_on_un_flags = 1;
- }
-
- /*
- * If there is a signal pending, the user probably
- * interrupted (ctrl-c) us.
- * Leave loop with error set.
- */
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
-
- /*
- * Store the flags before we let go of channel lock
- */
- if (sleep_on_un_flags)
- old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
- else
- old_flags = ch->ch_flags;
-
- /*
- * Let go of channel lock before calling schedule.
- * Our poller will get any FEP events and wake us up when DCD
- * eventually goes active.
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /*
- * Wait for something in the flags to change
- * from the current value.
- */
- if (sleep_on_un_flags) {
- retval = wait_event_interruptible(un->un_flags_wait,
- (old_flags != (ch->ch_tun.un_flags |
- ch->ch_pun.un_flags)));
- } else {
- retval = wait_event_interruptible(ch->ch_flags_wait,
- (old_flags != ch->ch_flags));
- }
-
- /*
- * We got woken up for some reason.
- * Before looping around, grab our channel lock.
- */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->ch_wopen--;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return retval;
-}
-
-/*
- * dgap_tty_flush_buffer()
- *
- * Flush Tx buffer (make in == out)
- */
-static void dgap_tty_flush_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&ch->ch_bs->tx_head);
- dgap_cmdw(ch, FLUSHTX, (u16)head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- tty_wakeup(tty);
-}
-
-/*
- * dgap_tty_hangup()
- *
- * Hangup the port. Like a close, but don't wait for output to drain.
- */
-static void dgap_tty_hangup(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- /* flush the transmit queues */
- dgap_tty_flush_buffer(tty);
-}
-
-/*
- * dgap_tty_chars_in_buffer()
- *
- * Return number of characters that have not been transmitted yet.
- *
- * This routine is used by the line discipline to determine if there
- * is data waiting to be transmitted/drained/flushed or not.
- */
-static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- u8 tbusy;
- uint chars;
- u16 thead, ttail, tmask, chead, ctail;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- tmask = (ch->ch_tsize - 1);
-
- /* Get Transmit queue pointers */
- thead = readw(&bs->tx_head) & tmask;
- ttail = readw(&bs->tx_tail) & tmask;
-
- /* Get tbusy flag */
- tbusy = readb(&bs->tbusy);
-
- /* Get Command queue pointers */
- chead = readw(&ch->ch_cm->cm_head);
- ctail = readw(&ch->ch_cm->cm_tail);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- /*
- * The only way we know for sure if there is no pending
- * data left to be transferred, is if:
- * 1) Transmit head and tail are equal (empty).
- * 2) Command queue head and tail are equal (empty).
- * 3) The "TBUSY" flag is 0. (Transmitter not busy).
- */
-
- if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
- chars = 0;
- } else {
- if (thead >= ttail)
- chars = thead - ttail;
- else
- chars = thead - ttail + ch->ch_tsize;
- /*
- * Fudge factor here.
- * If chars is zero, we know that the command queue had
- * something in it or tbusy was set. Because we cannot
- * be sure if there is still some data to be transmitted,
- * lets lie, and tell ld we have 1 byte left.
- */
- if (chars == 0) {
- /*
- * If TBUSY is still set, and our tx buffers are empty,
- * force the firmware to send me another wakeup after
- * TBUSY has been cleared.
- */
- if (tbusy != 0) {
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &bs->iempty);
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
- }
- chars = 1;
- }
- }
-
- return chars;
-}
-
-static int dgap_wait_for_drain(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- int ret = 0;
- uint count = 1;
- ulong lock_flags = 0;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bs = ch->ch_bs;
- if (!bs)
- return -EIO;
-
- /* Loop until data is drained */
- while (count != 0) {
- count = dgap_tty_chars_in_buffer(tty);
-
- if (count == 0)
- break;
-
- /* Set flag waiting for drain */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags |= UN_EMPTY;
- writeb(1, &bs->iempty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* Go to sleep till we get woken up */
- ret = wait_event_interruptible(un->un_flags_wait,
- ((un->un_flags & UN_EMPTY) == 0));
- /* If ret is non-zero, user ctrl-c'ed us */
- if (ret)
- break;
- }
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- un->un_flags &= ~(UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return ret;
-}
-
-/*
- * dgap_maxcps_room
- *
- * Reduces bytes_available to the max number of characters
- * that can be sent currently given the maxcps value, and
- * returns the new bytes_available. This only affects printer
- * output.
- */
-static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un,
- int bytes_available)
-{
- /*
- * If its not the Transparent print device, return
- * the full data amount.
- */
- if (un->un_type != DGAP_PRINT)
- return bytes_available;
-
- if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
- int cps_limit = 0;
- unsigned long current_time = jiffies;
- unsigned long buffer_time = current_time +
- (HZ * ch->ch_digi.digi_bufsize) /
- ch->ch_digi.digi_maxcps;
-
- if (ch->ch_cpstime < current_time) {
- /* buffer is empty */
- ch->ch_cpstime = current_time; /* reset ch_cpstime */
- cps_limit = ch->ch_digi.digi_bufsize;
- } else if (ch->ch_cpstime < buffer_time) {
- /* still room in the buffer */
- cps_limit = ((buffer_time - ch->ch_cpstime) *
- ch->ch_digi.digi_maxcps) / HZ;
- } else {
- /* no room in the buffer */
- cps_limit = 0;
- }
-
- bytes_available = min(cps_limit, bytes_available);
- }
-
- return bytes_available;
-}
-
-static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
-{
- struct channel_t *ch;
- struct bs_t __iomem *bs;
-
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
- bs = ch->ch_bs;
- if (!bs)
- return;
-
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_LOW) == 0) {
- un->un_flags |= UN_LOW;
- writeb(1, &bs->ilow);
- }
- }
- if ((event & UN_LOW) != 0) {
- if ((un->un_flags & UN_EMPTY) == 0) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &bs->iempty);
- }
- }
-}
-
-/*
- * dgap_tty_write_room()
- *
- * Return space available in Tx buffer
- */
-static int dgap_tty_write_room(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- u16 head, tail, tmask;
- int ret;
- ulong lock_flags = 0;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tmask = ch->ch_tsize - 1;
- head = readw(&bs->tx_head) & tmask;
- tail = readw(&bs->tx_tail) & tmask;
-
- ret = tail - head - 1;
- if (ret < 0)
- ret += ch->ch_tsize;
-
- /* Limit printer to maxcps */
- ret = dgap_maxcps_room(ch, un, ret);
-
- /*
- * If we are printer device, leave space for
- * possibly both the on and off strings.
- */
- if (un->un_type == DGAP_PRINT) {
- if (!(ch->ch_flags & CH_PRON))
- ret -= ch->ch_digi.digi_onlen;
- ret -= ch->ch_digi.digi_offlen;
- } else {
- if (ch->ch_flags & CH_PRON)
- ret -= ch->ch_digi.digi_offlen;
- }
-
- if (ret < 0)
- ret = 0;
-
- /*
- * Schedule FEP to wake us up if needed.
- *
- * TODO: This might be overkill...
- * Do we really need to schedule callbacks from the FEP
- * in every case? Can we get smarter based on ret?
- */
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return ret;
-}
-
-/*
- * dgap_tty_write()
- *
- * Take data from the user or kernel and send it out to the FEP.
- * In here exists all the Transparent Print magic as well.
- */
-static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
-{
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- char __iomem *vaddr;
- u16 head, tail, tmask, remain;
- int bufcount, n;
- ulong lock_flags;
-
- if (!tty)
- return 0;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
-
- bs = ch->ch_bs;
- if (!bs)
- return 0;
-
- if (!count)
- return 0;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /* Get our space available for the channel from the board */
- tmask = ch->ch_tsize - 1;
- head = readw(&(bs->tx_head)) & tmask;
- tail = readw(&(bs->tx_tail)) & tmask;
-
- bufcount = tail - head - 1;
- if (bufcount < 0)
- bufcount += ch->ch_tsize;
-
- /*
- * Limit printer output to maxcps overall, with bursts allowed
- * up to bufsize characters.
- */
- bufcount = dgap_maxcps_room(ch, un, bufcount);
-
- /*
- * Take minimum of what the user wants to send, and the
- * space available in the FEP buffer.
- */
- count = min(count, bufcount);
-
- /*
- * Bail if no space left.
- */
- if (count <= 0) {
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return 0;
- }
-
- /*
- * Output the printer ON string, if we are in terminal mode, but
- * need to be in printer mode.
- */
- if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_onstr,
- (int)ch->ch_digi.digi_onlen);
- head = readw(&bs->tx_head) & tmask;
- ch->ch_flags |= CH_PRON;
- }
-
- /*
- * On the other hand, output the printer OFF string, if we are
- * currently in printer mode, but need to output to the terminal.
- */
- if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int)ch->ch_digi.digi_offlen);
- head = readw(&bs->tx_head) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
-
- n = count;
-
- /*
- * If the write wraps over the top of the circular buffer,
- * move the portion up to the wrap point, and reset the
- * pointers to the bottom.
- */
- remain = ch->ch_tstart + ch->ch_tsize - head;
-
- if (n >= remain) {
- n -= remain;
- vaddr = ch->ch_taddr + head;
-
- memcpy_toio(vaddr, (u8 *)buf, remain);
-
- head = ch->ch_tstart;
- buf += remain;
- }
-
- if (n > 0) {
- /*
- * Move rest of data.
- */
- vaddr = ch->ch_taddr + head;
- remain = n;
-
- memcpy_toio(vaddr, (u8 *)buf, remain);
- head += remain;
- }
-
- if (count) {
- ch->ch_txcount += count;
- head &= tmask;
- writew(head, &bs->tx_head);
- }
-
- dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
-
- /*
- * If this is the print device, and the
- * printer is still on, we need to turn it
- * off before going idle. If the buffer is
- * non-empty, wait until it goes empty.
- * Otherwise turn it off right now.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- tail = readw(&bs->tx_tail) & tmask;
-
- if (tail != head) {
- un->un_flags |= UN_EMPTY;
- writeb(1, &bs->iempty);
- } else {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int)ch->ch_digi.digi_offlen);
- head = readw(&bs->tx_head) & tmask;
- ch->ch_flags &= ~CH_PRON;
- }
- }
-
- /* Update printer buffer empty time. */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
- ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
- }
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return count;
-}
-
-/*
- * dgap_tty_put_char()
- *
- * Put a character into ch->ch_buf
- *
- * - used by the line discipline for OPOST processing
- */
-static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
-{
- /*
- * Simply call tty_write.
- */
- dgap_tty_write(tty, &c, 1);
- return 1;
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_tty_tiocmget(struct tty_struct *tty)
-{
- struct channel_t *ch;
- struct un_t *un;
- int result;
- u8 mstat;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- mstat = readb(&ch->ch_bs->m_stat);
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- return result;
-}
-
-/*
- * dgap_tty_tiocmset()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_tty_tiocmset(struct tty_struct *tty,
- unsigned int set, unsigned int clear)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (set & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (set & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- if (clear & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (clear & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_send_break()
- *
- * Send a Break, called by ld.
- */
-static int dgap_tty_send_break(struct tty_struct *tty, int msec)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EIO;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EIO;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EIO;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -EIO;
-
- switch (msec) {
- case -1:
- msec = 0xFFFF;
- break;
- case 0:
- msec = 1;
- break;
- default:
- msec /= 10;
- break;
- }
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-#if 0
- dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
-#endif
- dgap_cmdw(ch, SBREAK, (u16)msec, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_wait_until_sent()
- *
- * wait until data has been transmitted, called by ld.
- */
-static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- dgap_wait_for_drain(tty);
-}
-
-/*
- * dgap_send_xchar()
- *
- * send a high priority character, called by ld.
- */
-static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /*
- * This is technically what we should do.
- * However, the NIST tests specifically want
- * to see each XON or XOFF character that it
- * sends, so lets just send each character
- * by hand...
- */
-#if 0
- if (c == STOP_CHAR(tty))
- dgap_cmdw(ch, RPAUSE, 0, 0);
- else if (c == START_CHAR(tty))
- dgap_cmdw(ch, RRESUME, 0, 0);
- else
- dgap_wmove(ch, &c, 1);
-#else
- dgap_wmove(ch, &c, 1);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * Return modem signals to ld.
- */
-static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
-{
- int result;
- u8 mstat;
- ulong lock_flags;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- mstat = readb(&ch->ch_bs->m_stat);
- /* Append any outbound signals that might be pending... */
- mstat |= ch->ch_mostat;
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- result = 0;
-
- if (mstat & D_DTR(ch))
- result |= TIOCM_DTR;
- if (mstat & D_RTS(ch))
- result |= TIOCM_RTS;
- if (mstat & D_CTS(ch))
- result |= TIOCM_CTS;
- if (mstat & D_DSR(ch))
- result |= TIOCM_DSR;
- if (mstat & D_RI(ch))
- result |= TIOCM_RI;
- if (mstat & D_CD(ch))
- result |= TIOCM_CD;
-
- return put_user(result, value);
-}
-
-/*
- * dgap_set_modem_info()
- *
- * Set modem signals, called by ld.
- */
-static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, unsigned int command,
- unsigned int __user *value)
-{
- int ret;
- unsigned int arg;
- ulong lock_flags;
- ulong lock_flags2;
-
- ret = get_user(arg, value);
- if (ret)
- return ret;
-
- switch (command) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval |= D_RTS(ch);
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval |= D_DTR(ch);
- }
-
- break;
-
- case TIOCMBIC:
- if (arg & TIOCM_RTS) {
- ch->ch_mforce |= D_RTS(ch);
- ch->ch_mval &= ~(D_RTS(ch));
- }
-
- if (arg & TIOCM_DTR) {
- ch->ch_mforce |= D_DTR(ch);
- ch->ch_mval &= ~(D_DTR(ch));
- }
-
- break;
-
- case TIOCMSET:
- ch->ch_mforce = D_DTR(ch) | D_RTS(ch);
-
- if (arg & TIOCM_RTS)
- ch->ch_mval |= D_RTS(ch);
- else
- ch->ch_mval &= ~(D_RTS(ch));
-
- if (arg & TIOCM_DTR)
- ch->ch_mval |= (D_DTR(ch));
- else
- ch->ch_mval &= ~(D_DTR(ch));
-
- break;
-
- default:
- return -EINVAL;
- }
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigeta()
- *
- * Ioctl to get the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digigeta(struct channel_t *ch,
- struct digi_t __user *retinfo)
-{
- struct digi_t tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digiseta()
- *
- * Ioctl to set the information for ditty.
- *
- *
- *
- */
-static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, struct digi_t __user *new_info)
-{
- struct digi_t new_digi;
- ulong lock_flags = 0;
- unsigned long lock_flags2;
-
- if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
- return -EFAULT;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
-
- if (ch->ch_digi.digi_maxcps < 1)
- ch->ch_digi.digi_maxcps = 1;
-
- if (ch->ch_digi.digi_maxcps > 10000)
- ch->ch_digi.digi_maxcps = 10000;
-
- if (ch->ch_digi.digi_bufsize < 10)
- ch->ch_digi.digi_bufsize = 10;
-
- if (ch->ch_digi.digi_maxchar < 1)
- ch->ch_digi.digi_maxchar = 1;
-
- if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
- ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
-
- if (ch->ch_digi.digi_onlen > DIGI_PLEN)
- ch->ch_digi.digi_onlen = DIGI_PLEN;
-
- if (ch->ch_digi.digi_offlen > DIGI_PLEN)
- ch->ch_digi.digi_offlen = DIGI_PLEN;
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigetedelay()
- *
- * Ioctl to get the current edelay setting.
- *
- *
- *
- */
-static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
-{
- struct channel_t *ch;
- struct un_t *un;
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -EFAULT;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -EFAULT;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- tmp = readw(&ch->ch_bs->edelay);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digisetedelay()
- *
- * Ioctl to set the EDELAY setting
- *
- */
-static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info)
-{
- int new_digi;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (copy_from_user(&new_digi, new_info, sizeof(int)))
- return -EFAULT;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- writew((u16)new_digi, &ch->ch_bs->edelay);
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-}
-
-/*
- * dgap_tty_digigetcustombaud()
- *
- * Ioctl to get the current custom baud rate setting.
- */
-static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
- int __user *retinfo)
-{
- int tmp;
- ulong lock_flags;
-
- if (!retinfo)
- return -EFAULT;
-
- memset(&tmp, 0, sizeof(tmp));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- tmp = dgap_get_custom_baud(ch);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-/*
- * dgap_tty_digisetcustombaud()
- *
- * Ioctl to set the custom baud rate setting
- */
-static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
- struct un_t *un, int __user *new_info)
-{
- uint new_rate;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
- return -EFAULT;
-
- if (bd->bd_flags & BD_FEP5PLUS) {
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_custom_speed = new_rate;
-
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- }
-
- return 0;
-}
-
-/*
- * dgap_set_termios()
- */
-static void dgap_tty_set_termios(struct tty_struct *tty,
- struct ktermios *old_termios)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- unsigned long lock_flags;
- unsigned long lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- dgap_carrier(ch);
- dgap_param(ch, bd, un->un_type);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_throttle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags |= (CH_RXBLOCK);
-#if 1
- dgap_cmdw(ch, RPAUSE, 0, 0);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_unthrottle(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- ch->ch_flags &= ~(CH_RXBLOCK);
-
-#if 1
- dgap_cmdw(ch, RRESUME, 0, 0);
-#endif
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static struct board_t *find_board_by_major(unsigned int major)
-{
- unsigned int i;
-
- for (i = 0; i < MAXBOARDS; i++) {
- struct board_t *brd = dgap_board[i];
-
- if (!brd)
- return NULL;
- if (major == brd->serial_driver->major ||
- major == brd->print_driver->major)
- return brd;
- }
-
- return NULL;
-}
-
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_open()
- *
- */
-static int dgap_tty_open(struct tty_struct *tty, struct file *file)
-{
- struct board_t *brd;
- struct channel_t *ch;
- struct un_t *un;
- struct bs_t __iomem *bs;
- uint major;
- uint minor;
- int rc;
- ulong lock_flags;
- ulong lock_flags2;
- u16 head;
-
- major = MAJOR(tty_devnum(tty));
- minor = MINOR(tty_devnum(tty));
-
- brd = find_board_by_major(major);
- if (!brd)
- return -EIO;
-
- /*
- * If board is not yet up to a state of READY, go to
- * sleep waiting for it to happen or they cancel the open.
- */
- rc = wait_event_interruptible(brd->state_wait,
- (brd->state & BOARD_READY));
-
- if (rc)
- return rc;
-
- spin_lock_irqsave(&brd->bd_lock, lock_flags);
-
- /* The wait above should guarantee this cannot happen */
- if (brd->state != BOARD_READY) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* If opened device is greater than our number of ports, bail. */
- if (MINOR(tty_devnum(tty)) > brd->nasync) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- ch = brd->channels[minor];
- if (!ch) {
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* Grab channel lock */
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /* Figure out our type */
- if (major == brd->serial_driver->major) {
- un = &brd->channels[minor]->ch_tun;
- un->un_type = DGAP_SERIAL;
- } else if (major == brd->print_driver->major) {
- un = &brd->channels[minor]->ch_pun;
- un->un_type = DGAP_PRINT;
- } else {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /* Store our unit into driver_data, so we always have it available. */
- tty->driver_data = un;
-
- /*
- * Error if channel info pointer is NULL.
- */
- bs = ch->ch_bs;
- if (!bs) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
- return -EIO;
- }
-
- /*
- * Initialize tty's
- */
- if (!(un->un_flags & UN_ISOPEN)) {
- /* Store important variables. */
- un->un_tty = tty;
-
- /* Maybe do something here to the TTY struct as well? */
- }
-
- /*
- * Initialize if neither terminal or printer is open.
- */
- if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
- ch->ch_mforce = 0;
- ch->ch_mval = 0;
-
- /*
- * Flush input queue.
- */
- head = readw(&bs->rx_head);
- writew(head, &bs->rx_tail);
-
- ch->ch_flags = 0;
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
-
- ch->ch_c_cflag = tty->termios.c_cflag;
- ch->ch_c_iflag = tty->termios.c_iflag;
- ch->ch_c_oflag = tty->termios.c_oflag;
- ch->ch_c_lflag = tty->termios.c_lflag;
- ch->ch_startc = tty->termios.c_cc[VSTART];
- ch->ch_stopc = tty->termios.c_cc[VSTOP];
-
- /* TODO: flush our TTY struct here? */
- }
-
- dgap_carrier(ch);
- /*
- * Run param in case we changed anything
- */
- dgap_param(ch, brd, un->un_type);
-
- /*
- * follow protocol for opening port
- */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
-
- rc = dgap_block_til_ready(tty, file, ch);
-
- if (!un->un_tty)
- return -ENODEV;
-
- /* No going back now, increment our unit and channel counters */
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- ch->ch_open_count++;
- un->un_open_count++;
- un->un_flags |= (UN_ISOPEN);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- return rc;
-}
-
-/*
- * dgap_tty_close()
- *
- */
-static void dgap_tty_close(struct tty_struct *tty, struct file *file)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- /*
- * Determine if this is the last close or not - and if we agree about
- * which type of close it is with the Line Discipline
- */
- if ((tty->count == 1) && (un->un_open_count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. un_open_count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- un->un_open_count = 1;
- }
-
- if (--un->un_open_count < 0)
- un->un_open_count = 0;
-
- ch->ch_open_count--;
-
- if (ch->ch_open_count && un->un_open_count) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
- return;
- }
-
- /* OK, its the last close on the unit */
-
- un->un_flags |= UN_CLOSING;
-
- tty->closing = 1;
-
- /*
- * Only officially close channel if count is 0 and
- * DIGI_PRINTER bit is not set.
- */
- if ((ch->ch_open_count == 0) &&
- !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
- ch->ch_flags &= ~(CH_RXBLOCK);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-
- /* wait for output to drain */
- /* This will also return if we take an interrupt */
-
- dgap_wait_for_drain(tty);
-
- dgap_tty_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
-
- tty->closing = 0;
-
- /*
- * If we have HUPCL set, lower DTR and RTS
- */
- if (ch->ch_c_cflag & HUPCL) {
- ch->ch_mostat &= ~(D_RTS(ch) | D_DTR(ch));
- dgap_cmdb(ch, SMODEM, 0, D_DTR(ch) | D_RTS(ch), 0);
-
- /*
- * Go to sleep to ensure RTS/DTR
- * have been dropped for modems to see it.
- */
- spin_unlock_irqrestore(&ch->ch_lock,
- lock_flags);
-
- /* .25 second delay for dropping RTS/DTR */
- schedule_timeout_interruptible(msecs_to_jiffies(250));
-
- spin_lock_irqsave(&ch->ch_lock, lock_flags);
- }
-
- ch->pscan_state = 0;
- ch->pscan_savechar = 0;
- ch->ch_baud_info = 0;
- }
-
- /*
- * turn off print device when closing print device.
- */
- if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
- dgap_wmove(ch, ch->ch_digi.digi_offstr,
- (int)ch->ch_digi.digi_offlen);
- ch->ch_flags &= ~CH_PRON;
- }
-
- un->un_tty = NULL;
- un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
- tty->driver_data = NULL;
-
- wake_up_interruptible(&ch->ch_flags_wait);
- wake_up_interruptible(&un->un_flags_wait);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
-}
-
-static void dgap_tty_start(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, RESUMETX, 0, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-static void dgap_tty_stop(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, PAUSETX, 0, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*
- * dgap_tty_flush_chars()
- *
- * Flush the cook buffer
- *
- * Note to self, and any other poor souls who venture here:
- *
- * flush in this case DOES NOT mean dispose of the data.
- * instead, it means "stop buffering and send it if you
- * haven't already." Just guess how I figured that out... SRW 2-Jun-98
- *
- * It is also always called in interrupt context - JAR 8-Sept-99
- */
-static void dgap_tty_flush_chars(struct tty_struct *tty)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- ulong lock_flags;
- ulong lock_flags2;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- /* TODO: Do something here */
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-}
-
-/*****************************************************************************
- *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
-
-/*
- * dgap_tty_ioctl()
- *
- * The usual assortment of ioctl's
- */
-static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int rc;
- u16 head;
- ulong lock_flags = 0;
- ulong lock_flags2 = 0;
- void __user *uarg = (void __user *)arg;
-
- if (!tty || tty->magic != TTY_MAGIC)
- return -ENODEV;
-
- un = tty->driver_data;
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return -ENODEV;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return -ENODEV;
-
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (un->un_open_count <= 0) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EIO;
- }
-
- switch (cmd) {
- /* Here are all the standard ioctl's that we MUST implement */
- case TCSBRK:
- /*
- * TCSBRK is SVID version: non-zero arg --> no break
- * this behaviour is exploited by tcdrain().
- *
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
-
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TCSBRKP:
- /* support for POSIX tcsendbreak()
-
- * According to POSIX.1 spec (7.2.2.1.2) breaks should be
- * between 0.25 and 0.5 seconds so we'll ask for something
- * in the middle: 0.375 seconds.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCSBRK:
- /*
- * FEP5 doesn't support turning on a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- rc = tty_check_change(tty);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- if (rc)
- return rc;
-
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
-
- dgap_cmdw(ch, SBREAK, (u16)SBREAK_TIME, 0);
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCCBRK:
- /*
- * FEP5 doesn't support turning off a break unconditionally.
- * The FEP5 device will stop sending a break automatically
- * after the specified time value that was sent when turning on
- * the break.
- */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return 0;
-
- case TIOCGSOFTCAR:
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return put_user(C_CLOCAL(tty) ? 1 : 0,
- (unsigned long __user *)arg);
-
- case TIOCSSOFTCAR:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- rc = get_user(arg, (unsigned long __user *)arg);
- if (rc)
- return rc;
-
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
- (arg ? CLOCAL : 0));
- dgap_param(ch, bd, un->un_type);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return 0;
-
- case TIOCMGET:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_get_modem_info(ch, uarg);
-
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_set_modem_info(ch, bd, un, cmd, uarg);
-
- /*
- * Here are any additional ioctl's that we want to implement
- */
-
- case TCFLSH:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return rc;
- }
-
- if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
- if (!(un->un_type == DGAP_PRINT)) {
- head = readw(&ch->ch_bs->rx_head);
- writew(head, &ch->ch_bs->rx_tail);
- writeb(0, &ch->ch_bs->orun);
- }
- }
-
- if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
- /* pretend we didn't recognize this IOCTL */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return -ENOIOCTLCMD;
- }
-
- ch->ch_flags &= ~CH_STOP;
- head = readw(&ch->ch_bs->tx_head);
- dgap_cmdw(ch, FLUSHTX, (u16)head, 0);
- dgap_cmdw(ch, RESUMETX, 0, 0);
- if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_tun.un_flags_wait);
- }
- if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
- wake_up_interruptible(&ch->ch_pun.un_flags_wait);
- }
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
-
- /* Can't hold any locks when calling tty_wakeup! */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- tty_wakeup(tty);
-
- /* pretend we didn't recognize this IOCTL */
- return -ENOIOCTLCMD;
-
- case TCSETSF:
- case TCSETSW:
- /*
- * The linux tty driver doesn't have a flush
- * input routine for the driver, assuming all backed
- * up data is in the line disc. buffers. However,
- * we all know that's not the case. Here, we
- * act on the ioctl, but then lie and say we didn't
- * so the line discipline will process the flush
- * also.
- */
- if (cmd == TCSETSF) {
- /* flush rx */
- ch->ch_flags &= ~CH_STOP;
- head = readw(&ch->ch_bs->rx_head);
- writew(head, &ch->ch_bs->rx_tail);
- }
-
- /* now wait for all the output to drain */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCSETAW:
-
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
-
- /* pretend we didn't recognize this */
- return -ENOIOCTLCMD;
-
- case TCXONC:
- /*
- * The Linux Line Discipline (LD) would do this for us if we
- * let it, but we have the special firmware options to do this
- * the "right way" regardless of hardware or software flow
- * control so we'll do it outselves instead of letting the LD
- * do it.
- */
- rc = tty_check_change(tty);
- if (rc) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return rc;
- }
-
- switch (arg) {
- case TCOON:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_tty_start(tty);
- return 0;
- case TCOOFF:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- dgap_tty_stop(tty);
- return 0;
- case TCION:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
- case TCIOFF:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- /* Make the ld do it */
- return -ENOIOCTLCMD;
- default:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return -EINVAL;
- }
-
- case DIGI_GETA:
- /* get information for ditty */
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigeta(ch, uarg);
-
- case DIGI_SETAW:
- case DIGI_SETAF:
-
- /* set information for ditty */
- if (cmd == (DIGI_SETAW)) {
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- rc = dgap_wait_for_drain(tty);
- if (rc)
- return -EINTR;
- spin_lock_irqsave(&bd->bd_lock, lock_flags);
- spin_lock_irqsave(&ch->ch_lock, lock_flags2);
- } else
- tty_ldisc_flush(tty);
- /* fall thru */
-
- case DIGI_SETA:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digiseta(ch, bd, un, uarg);
-
- case DIGI_GEDELAY:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigetedelay(tty, uarg);
-
- case DIGI_SEDELAY:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digisetedelay(ch, bd, un, uarg);
-
- case DIGI_GETCUSTOMBAUD:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digigetcustombaud(ch, un, uarg);
-
- case DIGI_SETCUSTOMBAUD:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return dgap_tty_digisetcustombaud(ch, bd, un, uarg);
-
- case DIGI_RESET_PORT:
- dgap_firmware_reset_port(ch);
- dgap_param(ch, bd, un->un_type);
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
- return 0;
-
- default:
- spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
- spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-
- return -ENOIOCTLCMD;
- }
-}
-
-static const struct tty_operations dgap_tty_ops = {
- .open = dgap_tty_open,
- .close = dgap_tty_close,
- .write = dgap_tty_write,
- .write_room = dgap_tty_write_room,
- .flush_buffer = dgap_tty_flush_buffer,
- .chars_in_buffer = dgap_tty_chars_in_buffer,
- .flush_chars = dgap_tty_flush_chars,
- .ioctl = dgap_tty_ioctl,
- .set_termios = dgap_tty_set_termios,
- .stop = dgap_tty_stop,
- .start = dgap_tty_start,
- .throttle = dgap_tty_throttle,
- .unthrottle = dgap_tty_unthrottle,
- .hangup = dgap_tty_hangup,
- .put_char = dgap_tty_put_char,
- .tiocmget = dgap_tty_tiocmget,
- .tiocmset = dgap_tty_tiocmset,
- .break_ctl = dgap_tty_send_break,
- .wait_until_sent = dgap_tty_wait_until_sent,
- .send_xchar = dgap_tty_send_xchar
-};
-
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgap_tty_register()
- *
- * Init the tty subsystem for this board.
- */
-static int dgap_tty_register(struct board_t *brd)
-{
- int rc;
-
- brd->serial_driver = tty_alloc_driver(MAXPORTS,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
- if (IS_ERR(brd->serial_driver))
- return PTR_ERR(brd->serial_driver);
-
- snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
- brd->boardnum);
- brd->serial_driver->name = brd->serial_name;
- brd->serial_driver->name_base = 0;
- brd->serial_driver->major = 0;
- brd->serial_driver->minor_start = 0;
- brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->serial_driver->init_termios = dgap_default_termios;
- brd->serial_driver->driver_name = DRVSTR;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->serial_driver, &dgap_tty_ops);
-
- /*
- * If we're doing transparent print, we have to do all of the above
- * again, separately so we don't get the LD confused about what major
- * we are when we get into the dgap_tty_open() routine.
- */
- brd->print_driver = tty_alloc_driver(MAXPORTS,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
- if (IS_ERR(brd->print_driver)) {
- rc = PTR_ERR(brd->print_driver);
- goto free_serial_drv;
- }
-
- snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
- brd->boardnum);
- brd->print_driver->name = brd->print_name;
- brd->print_driver->name_base = 0;
- brd->print_driver->major = 0;
- brd->print_driver->minor_start = 0;
- brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
- brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
- brd->print_driver->init_termios = dgap_default_termios;
- brd->print_driver->driver_name = DRVSTR;
-
- /*
- * Entry points for driver. Called by the kernel from
- * tty_io.c and n_tty.c.
- */
- tty_set_operations(brd->print_driver, &dgap_tty_ops);
-
- /* Register tty devices */
- rc = tty_register_driver(brd->serial_driver);
- if (rc < 0)
- goto free_print_drv;
-
- /* Register Transparent Print devices */
- rc = tty_register_driver(brd->print_driver);
- if (rc < 0)
- goto unregister_serial_drv;
-
- return 0;
-
-unregister_serial_drv:
- tty_unregister_driver(brd->serial_driver);
-free_print_drv:
- put_tty_driver(brd->print_driver);
-free_serial_drv:
- put_tty_driver(brd->serial_driver);
-
- return rc;
-}
-
-static void dgap_tty_unregister(struct board_t *brd)
-{
- tty_unregister_driver(brd->print_driver);
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->print_driver);
- put_tty_driver(brd->serial_driver);
-}
-
-static int dgap_alloc_flipbuf(struct board_t *brd)
-{
- /*
- * allocate flip buffer for board.
- */
- brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipbuf)
- return -ENOMEM;
-
- brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
- if (!brd->flipflagbuf) {
- kfree(brd->flipbuf);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void dgap_free_flipbuf(struct board_t *brd)
-{
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-}
-
-static struct board_t *dgap_verify_board(struct device *p)
-{
- struct board_t *bd;
-
- if (!p)
- return NULL;
-
- bd = dev_get_drvdata(p);
- if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
- return NULL;
-
- return bd;
-}
-
-static ssize_t dgap_ports_state_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s\n", bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_open_count ? "Open" : "Closed");
- }
- return count;
-}
-static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
-
-static ssize_t dgap_ports_baud_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_baud_info);
- }
- return count;
-}
-static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
-
-static ssize_t dgap_ports_msignals_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count)
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat &
- UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat &
- UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat &
- UART_MSR_RI) ? "RI" : "");
- else
- count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
- }
- return count;
-}
-static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
-
-static ssize_t dgap_ports_iflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_iflag);
- return count;
-}
-static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
-
-static ssize_t dgap_ports_cflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_cflag);
- return count;
-}
-static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
-
-static ssize_t dgap_ports_oflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_oflag);
- return count;
-}
-static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
-
-static ssize_t dgap_ports_lflag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_c_lflag);
- return count;
-}
-static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
-
-static ssize_t dgap_ports_digi_flag_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_digi.digi_flags);
- return count;
-}
-static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
-
-static ssize_t dgap_ports_rxcount_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_rxcount);
- return count;
-}
-static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
-
-static ssize_t dgap_ports_txcount_show(struct device *p,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- int count = 0;
- unsigned int i;
-
- bd = dgap_verify_board(p);
- if (!bd)
- return 0;
-
- for (i = 0; i < bd->nasync; i++)
- count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
- bd->channels[i]->ch_portnum,
- bd->channels[i]->ch_txcount);
- return count;
-}
-static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
-
-static ssize_t dgap_tty_state_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
- "Open" : "Closed");
-}
-static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
-
-static ssize_t dgap_tty_baud_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
-}
-static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
-
-static ssize_t dgap_tty_msignals_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- if (ch->ch_open_count) {
- return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
- (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
- }
- return 0;
-}
-static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
-
-static ssize_t dgap_tty_iflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
-
-static ssize_t dgap_tty_cflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
-
-static ssize_t dgap_tty_oflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
-
-static ssize_t dgap_tty_lflag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
-
-static ssize_t dgap_tty_digi_flag_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
-
-static ssize_t dgap_tty_rxcount_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
-
-static ssize_t dgap_tty_txcount_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
-
-static ssize_t dgap_tty_name_show(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct board_t *bd;
- struct channel_t *ch;
- struct un_t *un;
- int cn;
- int bn;
- struct cnode *cptr;
- int found = FALSE;
- int ncount = 0;
- int starto = 0;
- int i;
-
- if (!d)
- return 0;
- un = dev_get_drvdata(d);
- if (!un || un->magic != DGAP_UNIT_MAGIC)
- return 0;
- ch = un->un_ch;
- if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
- return 0;
- bd = ch->ch_bd;
- if (!bd || bd->magic != DGAP_BOARD_MAGIC)
- return 0;
- if (bd->state != BOARD_READY)
- return 0;
-
- bn = bd->boardnum;
- cn = ch->ch_portnum;
-
- for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
- if ((cptr->type == BNODE) &&
- ((cptr->u.board.type == APORT2_920P) ||
- (cptr->u.board.type == APORT4_920P) ||
- (cptr->u.board.type == APORT8_920P) ||
- (cptr->u.board.type == PAPORT4) ||
- (cptr->u.board.type == PAPORT8))) {
- found = TRUE;
- if (cptr->u.board.v_start)
- starto = cptr->u.board.start;
- else
- starto = 1;
- }
-
- if (cptr->type == TNODE && found == TRUE) {
- char *ptr1;
-
- if (strstr(cptr->u.ttyname, "tty")) {
- ptr1 = cptr->u.ttyname;
- ptr1 += 3;
- } else
- ptr1 = cptr->u.ttyname;
-
- for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
- if (cn != i)
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- ptr1, i + starto);
- }
- }
-
- if (cptr->type == CNODE) {
- for (i = 0; i < cptr->u.conc.nport; i++) {
- if (cn != (i + ncount))
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- cptr->u.conc.id,
- i + (cptr->u.conc.v_start ?
- cptr->u.conc.start : 1));
- }
-
- ncount += cptr->u.conc.nport;
- }
-
- if (cptr->type == MNODE) {
- for (i = 0; i < cptr->u.module.nport; i++) {
- if (cn != (i + ncount))
- continue;
-
- return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
- (un->un_type == DGAP_PRINT) ?
- "pr" : "tty",
- cptr->u.module.id,
- i + (cptr->u.module.v_start ?
- cptr->u.module.start : 1));
- }
-
- ncount += cptr->u.module.nport;
- }
- }
-
- return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
- (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
-}
-static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
-
-static struct attribute *dgap_sysfs_tty_entries[] = {
- &dev_attr_state.attr,
- &dev_attr_baud.attr,
- &dev_attr_msignals.attr,
- &dev_attr_iflag.attr,
- &dev_attr_cflag.attr,
- &dev_attr_oflag.attr,
- &dev_attr_lflag.attr,
- &dev_attr_digi_flag.attr,
- &dev_attr_rxcount.attr,
- &dev_attr_txcount.attr,
- &dev_attr_custom_name.attr,
- NULL
-};
-
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-static void dgap_create_ports_sysfiles(struct board_t *bd)
-{
- dev_set_drvdata(&bd->pdev->dev, bd);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-}
-
-/* removes all the sys files created for that port */
-static void dgap_remove_ports_sysfiles(struct board_t *bd)
-{
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
- device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-}
-
-/*
- * Copies the BIOS code from the user to the board,
- * and starts the BIOS running.
- */
-static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
-{
- u8 __iomem *addr;
- uint offset;
- unsigned int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * clear POST area
- */
- for (i = 0; i < 16; i++)
- writeb(0, addr + POSTAREA + i);
-
- /*
- * Download bios
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ubios, len);
-
- writel(0x0bf00401, addr);
- writel(0, (addr + 4));
-
- /* Clear the reset, and change states. */
- writeb(FEPCLR, brd->re_map_port);
-}
-
-/*
- * Checks to see if the BIOS completed running on the card.
- */
-static int dgap_test_bios(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + POSTAREA);
-
- /*
- * It can take 5-6 seconds for a board to
- * pass the bios self test and post results.
- * Give it 10 seconds.
- */
- brd->wait_for_bios = 0;
- while (brd->wait_for_bios < 1000) {
- /* Check to see if BIOS thinks board is good. (GD). */
- if (word == *(u16 *)"GD")
- return 0;
- msleep_interruptible(10);
- brd->wait_for_bios++;
- word = readw(addr + POSTAREA);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev, "%s failed diagnostics. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOBIOS;
-
- return -EIO;
-}
-
-/*
- * Copies the FEP code from the user to the board,
- * and starts the FEP running.
- */
-static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
-{
- u8 __iomem *addr;
- uint offset;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- addr = brd->re_map_membase;
-
- /*
- * Download FEP
- */
- offset = 0x1000;
- memcpy_toio(addr + offset, ufep, len);
-
- /*
- * If board is a concentrator product, we need to give
- * it its config string describing how the concentrators look.
- */
- if ((brd->type == PCX) || (brd->type == PEPC)) {
- u8 string[100];
- u8 __iomem *config;
- u8 *xconfig;
- unsigned int i = 0;
-
- xconfig = dgap_create_config_string(brd, string);
-
- /* Write string to board memory */
- config = addr + CONFIG;
- for (; i < CONFIGSIZE; i++, config++, xconfig++) {
- writeb(*xconfig, config);
- if ((*xconfig & 0xff) == 0xff)
- break;
- }
- }
-
- writel(0xbfc01004, (addr + 0xc34));
- writel(0x3, (addr + 0xc30));
-}
-
-/*
- * Waits for the FEP to report thats its ready for us to use.
- */
-static int dgap_test_fep(struct board_t *brd)
-{
- u8 __iomem *addr;
- u16 word;
- u16 err1;
- u16 err2;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return -EINVAL;
-
- addr = brd->re_map_membase;
- word = readw(addr + FEPSTAT);
-
- /*
- * It can take 2-3 seconds for the FEP to
- * be up and running. Give it 5 secs.
- */
- brd->wait_for_fep = 0;
- while (brd->wait_for_fep < 500) {
- /* Check to see if FEP is up and running now. */
- if (word == *(u16 *)"OS") {
- /*
- * Check to see if the board can support FEP5+ commands.
- */
- word = readw(addr + FEP5_PLUS);
- if (word == *(u16 *)"5A")
- brd->bd_flags |= BD_FEP5PLUS;
-
- return 0;
- }
- msleep_interruptible(10);
- brd->wait_for_fep++;
- word = readw(addr + FEPSTAT);
- }
-
- /* Gave up on board after too long of time taken */
- err1 = readw(addr + SEQUENCE);
- err2 = readw(addr + ERROR);
- dev_warn(&brd->pdev->dev,
- "FEPOS for %s not functioning. Error #(%x,%x).\n",
- brd->name, err1, err2);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
-
- return -EIO;
-}
-
-/*
- * Physically forces the FEP5 card to reset itself.
- */
-static void dgap_do_reset_board(struct board_t *brd)
-{
- u8 check;
- u32 check1;
- u32 check2;
- unsigned int i;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
- !brd->re_map_membase || !brd->re_map_port)
- return;
-
- /* FEPRST does not vary among supported boards */
- writeb(FEPRST, brd->re_map_port);
-
- for (i = 0; i <= 1000; i++) {
- check = readb(brd->re_map_port) & 0xe;
- if (check == FEPRST)
- break;
- udelay(10);
- }
- if (i > 1000) {
- dev_warn(&brd->pdev->dev,
- "dgap: Board not resetting... Failing board.\n");
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-
- /*
- * Make sure there really is memory out there.
- */
- writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
- writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
- check1 = readl(brd->re_map_membase + LOWMEM);
- check2 = readl(brd->re_map_membase + HIGHMEM);
-
- if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
- dev_warn(&brd->pdev->dev,
- "No memory at %p for board.\n",
- brd->re_map_membase);
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return;
- }
-}
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
-/*
- * Sends a concentrator image into the FEP5 board.
- */
-static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
-{
- char __iomem *vaddr;
- u16 offset;
- struct downld_t *to_dp;
-
- if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
- return;
-
- vaddr = brd->re_map_membase;
-
- offset = readw((u16 *)(vaddr + DOWNREQ));
- to_dp = (struct downld_t *)(vaddr + (int)offset);
- memcpy_toio(to_dp, uaddr, len);
-
- /* Tell card we have data for it */
- writew(0, vaddr + (DOWNREQ));
-
- brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
-}
-#endif
-
-#define EXPANSION_ROM_SIZE (64 * 1024)
-#define FEP5_ROM_MAGIC (0xFEFFFFFF)
-
-static void dgap_get_vpd(struct board_t *brd)
-{
- u32 magic;
- u32 base_offset;
- u16 rom_offset;
- u16 vpd_offset;
- u16 image_length;
- u16 i;
- u8 byte1;
- u8 byte2;
-
- /*
- * Poke the magic number at the PCI Rom Address location.
- * If VPD is supported, the value read from that address
- * will be non-zero.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- /* VPD not supported, bail */
- if (!magic)
- return;
-
- /*
- * To get to the OTPROM memory, we have to send the boards base
- * address or'ed with 1 into the PCI Rom Address location.
- */
- magic = brd->membase | 0x01;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
- pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
-
- byte1 = readb(brd->re_map_membase);
- byte2 = readb(brd->re_map_membase + 1);
-
- /*
- * If the board correctly swapped to the OTPROM memory,
- * the first 2 bytes (header) should be 0x55, 0xAA
- */
- if (byte1 == 0x55 && byte2 == 0xAA) {
- base_offset = 0;
-
- /*
- * We have to run through all the OTPROM memory looking
- * for the VPD offset.
- */
- while (base_offset <= EXPANSION_ROM_SIZE) {
- /*
- * Lots of magic numbers here.
- *
- * The VPD offset is located inside the ROM Data
- * Structure.
- *
- * We also have to remember the length of each
- * ROM Data Structure, so we can "hop" to the next
- * entry if the VPD isn't in the current
- * ROM Data Structure.
- */
- rom_offset = readw(brd->re_map_membase +
- base_offset + 0x18);
- image_length = readw(brd->re_map_membase +
- rom_offset + 0x10) * 512;
- vpd_offset = readw(brd->re_map_membase +
- rom_offset + 0x08);
-
- /* Found the VPD entry */
- if (vpd_offset)
- break;
-
- /* We didn't find a VPD entry, go to next ROM entry. */
- base_offset += image_length;
-
- byte1 = readb(brd->re_map_membase + base_offset);
- byte2 = readb(brd->re_map_membase + base_offset + 1);
-
- /*
- * If the new ROM offset doesn't have 0x55, 0xAA
- * as its header, we have run out of ROM.
- */
- if (byte1 != 0x55 || byte2 != 0xAA)
- break;
- }
-
- /*
- * If we have a VPD offset, then mark the board
- * as having a valid VPD, and copy VPDSIZE (512) bytes of
- * that VPD to the buffer we have in our board structure.
- */
- if (vpd_offset) {
- brd->bd_flags |= BD_HAS_VPD;
- for (i = 0; i < VPDSIZE; i++) {
- brd->vpd[i] = readb(brd->re_map_membase +
- vpd_offset + i);
- }
- }
- }
-
- /*
- * We MUST poke the magic number at the PCI Rom Address location again.
- * This makes the card report the regular board memory back to us,
- * rather than the OTPROM memory.
- */
- magic = FEP5_ROM_MAGIC;
- pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
-}
-
-
-static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
-
-
-static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
-}
-static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
-
-
-static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
-
-
-static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
-}
-static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
-
-static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
-}
-
-static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
- const char *buf, size_t count)
-{
- if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
- return -EINVAL;
- return count;
-}
-static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
- dgap_driver_pollrate_store);
-
-
-static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- int rc = 0;
- struct device_driver *driverfs = &dgap_driver->driver;
-
- rc |= driver_create_file(driverfs, &driver_attr_version);
- rc |= driver_create_file(driverfs, &driver_attr_boards);
- rc |= driver_create_file(driverfs, &driver_attr_maxboards);
- rc |= driver_create_file(driverfs, &driver_attr_pollrate);
- rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
-
- return rc;
-}
-
-static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
-{
- struct device_driver *driverfs = &dgap_driver->driver;
-
- driver_remove_file(driverfs, &driver_attr_version);
- driver_remove_file(driverfs, &driver_attr_boards);
- driver_remove_file(driverfs, &driver_attr_maxboards);
- driver_remove_file(driverfs, &driver_attr_pollrate);
- driver_remove_file(driverfs, &driver_attr_pollcounter);
-}
-
-static struct attribute_group dgap_tty_attribute_group = {
- .name = NULL,
- .attrs = dgap_sysfs_tty_entries,
-};
-
-static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
-{
- int ret;
-
- ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
- if (ret)
- return;
-
- dev_set_drvdata(c, un);
-}
-
-static void dgap_remove_tty_sysfs(struct device *c)
-{
- sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
-}
-
-/*
- * Create pr and tty device entries
- */
-static int dgap_tty_register_ports(struct board_t *brd)
-{
- struct channel_t *ch;
- int i;
- int ret;
-
- brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
- GFP_KERNEL);
- if (!brd->serial_ports)
- return -ENOMEM;
-
- brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
- GFP_KERNEL);
- if (!brd->printer_ports) {
- ret = -ENOMEM;
- goto free_serial_ports;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_init(&brd->serial_ports[i]);
- tty_port_init(&brd->printer_ports[i]);
- }
-
- ch = brd->channels[0];
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
- struct device *classp;
-
- classp = tty_port_register_device(&brd->serial_ports[i],
- brd->serial_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_tun, classp);
- ch->ch_tun.un_sysfs = classp;
-
- classp = tty_port_register_device(&brd->printer_ports[i],
- brd->print_driver,
- i, NULL);
-
- if (IS_ERR(classp)) {
- ret = PTR_ERR(classp);
- goto unregister_ttys;
- }
-
- dgap_create_tty_sysfs(&ch->ch_pun, classp);
- ch->ch_pun.un_sysfs = classp;
- }
- dgap_create_ports_sysfiles(brd);
-
- return 0;
-
-unregister_ttys:
- while (i >= 0) {
- ch = brd->channels[i];
- if (ch->ch_tun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
- tty_unregister_device(brd->serial_driver, i);
- }
-
- if (ch->ch_pun.un_sysfs) {
- dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
- tty_unregister_device(brd->print_driver, i);
- }
- i--;
- }
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- tty_port_destroy(&brd->printer_ports[i]);
- }
-
- kfree(brd->printer_ports);
- brd->printer_ports = NULL;
-
-free_serial_ports:
- kfree(brd->serial_ports);
- brd->serial_ports = NULL;
-
- return ret;
-}
-
-/*
- * dgap_cleanup_tty()
- *
- * Uninitialize the TTY portion of this driver. Free all memory and
- * resources.
- */
-static void dgap_cleanup_tty(struct board_t *brd)
-{
- struct device *dev;
- unsigned int i;
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->serial_ports[i]);
- dev = brd->channels[i]->ch_tun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->serial_driver, i);
- }
- tty_unregister_driver(brd->serial_driver);
- put_tty_driver(brd->serial_driver);
- kfree(brd->serial_ports);
-
- for (i = 0; i < brd->nasync; i++) {
- tty_port_destroy(&brd->printer_ports[i]);
- dev = brd->channels[i]->ch_pun.un_sysfs;
- dgap_remove_tty_sysfs(dev);
- tty_unregister_device(brd->print_driver, i);
- }
- tty_unregister_driver(brd->print_driver);
- put_tty_driver(brd->print_driver);
- kfree(brd->printer_ports);
-}
-
-static int dgap_request_irq(struct board_t *brd)
-{
- int rc;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return -ENODEV;
-
- /*
- * Set up our interrupt handler if we are set to do interrupts.
- */
- if (dgap_config_get_useintr(brd) && brd->irq) {
- rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
-
- if (!rc)
- brd->intr_used = 1;
- }
- return 0;
-}
-
-static void dgap_free_irq(struct board_t *brd)
-{
- if (brd->intr_used && brd->irq)
- free_irq(brd->irq, brd);
-}
-
-static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
- struct board_t *brd)
-{
- const struct firmware *fw;
- char *tmp_ptr;
- int ret;
- char *dgap_config_buf;
-
- dgap_get_vpd(brd);
- dgap_do_reset_board(brd);
-
- if (fw_info[card_type].conf_name) {
- ret = request_firmware(&fw, fw_info[card_type].conf_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "config file %s not found\n",
- fw_info[card_type].conf_name);
- return ret;
- }
-
- dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
- if (!dgap_config_buf) {
- release_firmware(fw);
- return -ENOMEM;
- }
-
- memcpy(dgap_config_buf, fw->data, fw->size);
- release_firmware(fw);
-
- /*
- * preserve dgap_config_buf
- * as dgap_parsefile would
- * otherwise alter it.
- */
- tmp_ptr = dgap_config_buf;
-
- if (dgap_parsefile(&tmp_ptr) != 0) {
- kfree(dgap_config_buf);
- return -EINVAL;
- }
- kfree(dgap_config_buf);
- }
-
- /*
- * Match this board to a config the user created for us.
- */
- brd->bd_config =
- dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
-
- /*
- * Because the 4 port Xr products share the same PCI ID
- * as the 8 port Xr products, if we receive a NULL config
- * back, and this is a PAPORT8 board, retry with a
- * PAPORT4 attempt as well.
- */
- if (brd->type == PAPORT8 && !brd->bd_config)
- brd->bd_config =
- dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
-
- if (!brd->bd_config) {
- dev_err(&pdev->dev, "No valid configuration found\n");
- return -EINVAL;
- }
-
- if (fw_info[card_type].bios_name) {
- ret = request_firmware(&fw, fw_info[card_type].bios_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "bios file %s not found\n",
- fw_info[card_type].bios_name);
- return ret;
- }
- dgap_do_bios_load(brd, fw->data, fw->size);
- release_firmware(fw);
-
- /* Wait for BIOS to test board... */
- ret = dgap_test_bios(brd);
- if (ret)
- return ret;
- }
-
- if (fw_info[card_type].fep_name) {
- ret = request_firmware(&fw, fw_info[card_type].fep_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "dgap: fep file %s not found\n",
- fw_info[card_type].fep_name);
- return ret;
- }
- dgap_do_fep_load(brd, fw->data, fw->size);
- release_firmware(fw);
-
- /* Wait for FEP to load on board... */
- ret = dgap_test_fep(brd);
- if (ret)
- return ret;
- }
-
-#ifdef DIGI_CONCENTRATORS_SUPPORTED
- /*
- * If this is a CX or EPCX, we need to see if the firmware
- * is requesting a concentrator image from us.
- */
- if ((bd->type == PCX) || (bd->type == PEPC)) {
- chk_addr = (u16 *)(vaddr + DOWNREQ);
- /* Nonzero if FEP is requesting concentrator image. */
- check = readw(chk_addr);
- vaddr = brd->re_map_membase;
- }
-
- if (fw_info[card_type].con_name && check && vaddr) {
- ret = request_firmware(&fw, fw_info[card_type].con_name,
- &pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "conc file %s not found\n",
- fw_info[card_type].con_name);
- return ret;
- }
- /* Put concentrator firmware loading code here */
- offset = readw((u16 *)(vaddr + DOWNREQ));
- memcpy_toio(offset, fw->data, fw->size);
-
- dgap_do_conc_load(brd, (char *)fw->data, fw->size)
- release_firmware(fw);
- }
-#endif
-
- return 0;
-}
-
-/*
- * dgap_tty_init()
- *
- * Init the tty subsystem. Called once per board after board has been
- * downloaded and init'ed.
- */
-static int dgap_tty_init(struct board_t *brd)
-{
- int i;
- int tlw;
- uint true_count;
- u8 __iomem *vaddr;
- u8 modem;
- struct channel_t *ch;
- struct bs_t __iomem *bs;
- struct cm_t __iomem *cm;
- int ret;
-
- /*
- * Initialize board structure elements.
- */
-
- vaddr = brd->re_map_membase;
- true_count = readw((vaddr + NCHAN));
-
- brd->nasync = dgap_config_get_num_prts(brd);
-
- if (!brd->nasync)
- brd->nasync = brd->maxports;
-
- if (brd->nasync > brd->maxports)
- brd->nasync = brd->maxports;
-
- if (true_count != brd->nasync) {
- dev_warn(&brd->pdev->dev,
- "%s configured for %d ports, has %d ports.\n",
- brd->name, brd->nasync, true_count);
-
- if ((brd->type == PPCM) &&
- (true_count == 64 || true_count == 0)) {
- dev_warn(&brd->pdev->dev,
- "Please make SURE the EBI cable running from the card\n");
- dev_warn(&brd->pdev->dev,
- "to each EM module is plugged into EBI IN!\n");
- }
-
- brd->nasync = true_count;
-
- /* If no ports, don't bother going any further */
- if (!brd->nasync) {
- brd->state = BOARD_FAILED;
- brd->dpastatus = BD_NOFEP;
- return -EIO;
- }
- }
-
- /*
- * Allocate channel memory that might not have been allocated
- * when the driver was first loaded.
- */
- for (i = 0; i < brd->nasync; i++) {
- brd->channels[i] =
- kzalloc(sizeof(struct channel_t), GFP_KERNEL);
- if (!brd->channels[i]) {
- ret = -ENOMEM;
- goto free_chan;
- }
- }
-
- ch = brd->channels[0];
- vaddr = brd->re_map_membase;
-
- bs = (struct bs_t __iomem *)((ulong)vaddr + CHANBUF);
- cm = (struct cm_t __iomem *)((ulong)vaddr + CMDBUF);
-
- brd->bd_bs = bs;
-
- /* Set up channel variables */
- for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
- spin_lock_init(&ch->ch_lock);
-
- /* Store all our magic numbers */
- ch->magic = DGAP_CHANNEL_MAGIC;
- ch->ch_tun.magic = DGAP_UNIT_MAGIC;
- ch->ch_tun.un_type = DGAP_SERIAL;
- ch->ch_tun.un_ch = ch;
- ch->ch_tun.un_dev = i;
-
- ch->ch_pun.magic = DGAP_UNIT_MAGIC;
- ch->ch_pun.un_type = DGAP_PRINT;
- ch->ch_pun.un_ch = ch;
- ch->ch_pun.un_dev = i;
-
- ch->ch_vaddr = vaddr;
- ch->ch_bs = bs;
- ch->ch_cm = cm;
- ch->ch_bd = brd;
- ch->ch_portnum = i;
- ch->ch_digi = dgap_digi_init;
-
- /*
- * Set up digi dsr and dcd bits based on altpin flag.
- */
- if (dgap_config_get_altpin(brd)) {
- ch->ch_dsr = DM_CD;
- ch->ch_cd = DM_DSR;
- ch->ch_digi.digi_flags |= DIGI_ALTPIN;
- } else {
- ch->ch_cd = DM_CD;
- ch->ch_dsr = DM_DSR;
- }
-
- ch->ch_taddr = vaddr + (ioread16(&ch->ch_bs->tx_seg) << 4);
- ch->ch_raddr = vaddr + (ioread16(&ch->ch_bs->rx_seg) << 4);
- ch->ch_tx_win = 0;
- ch->ch_rx_win = 0;
- ch->ch_tsize = readw(&ch->ch_bs->tx_max) + 1;
- ch->ch_rsize = readw(&ch->ch_bs->rx_max) + 1;
- ch->ch_tstart = 0;
- ch->ch_rstart = 0;
-
- /*
- * Set queue water marks, interrupt mask,
- * and general tty parameters.
- */
- tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
- ch->ch_tsize / 2;
- ch->ch_tlw = tlw;
-
- dgap_cmdw(ch, STLOW, tlw, 0);
-
- dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
-
- dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
-
- ch->ch_mistat = readb(&ch->ch_bs->m_stat);
-
- init_waitqueue_head(&ch->ch_flags_wait);
- init_waitqueue_head(&ch->ch_tun.un_flags_wait);
- init_waitqueue_head(&ch->ch_pun.un_flags_wait);
-
- /* Turn on all modem interrupts for now */
- modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
- writeb(modem, &ch->ch_bs->m_int);
-
- /*
- * Set edelay to 0 if interrupts are turned on,
- * otherwise set edelay to the usual 100.
- */
- if (brd->intr_used)
- writew(0, &ch->ch_bs->edelay);
- else
- writew(100, &ch->ch_bs->edelay);
-
- writeb(1, &ch->ch_bs->idata);
- }
-
- return 0;
-
-free_chan:
- while (--i >= 0) {
- kfree(brd->channels[i]);
- brd->channels[i] = NULL;
- }
- return ret;
-}
-
-/*
- * dgap_tty_free()
- *
- * Free the channles which are allocated in dgap_tty_init().
- */
-static void dgap_tty_free(struct board_t *brd)
-{
- int i;
-
- for (i = 0; i < brd->nasync; i++)
- kfree(brd->channels[i]);
-}
-
-static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int rc;
- struct board_t *brd;
-
- if (dgap_numboards >= MAXBOARDS)
- return -EPERM;
-
- rc = pci_enable_device(pdev);
- if (rc)
- return -EIO;
-
- brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
- if (IS_ERR(brd))
- return PTR_ERR(brd);
-
- rc = dgap_firmware_load(pdev, ent->driver_data, brd);
- if (rc)
- goto cleanup_brd;
-
- rc = dgap_alloc_flipbuf(brd);
- if (rc)
- goto cleanup_brd;
-
- rc = dgap_tty_register(brd);
- if (rc)
- goto free_flipbuf;
-
- rc = dgap_request_irq(brd);
- if (rc)
- goto unregister_tty;
-
- /*
- * Do tty device initialization.
- */
- rc = dgap_tty_init(brd);
- if (rc < 0)
- goto free_irq;
-
- rc = dgap_tty_register_ports(brd);
- if (rc)
- goto tty_free;
-
- brd->state = BOARD_READY;
- brd->dpastatus = BD_RUNNING;
-
- dgap_board[dgap_numboards++] = brd;
-
- return 0;
-
-tty_free:
- dgap_tty_free(brd);
-free_irq:
- dgap_free_irq(brd);
-unregister_tty:
- dgap_tty_unregister(brd);
-free_flipbuf:
- dgap_free_flipbuf(brd);
-cleanup_brd:
- dgap_cleanup_nodes();
- dgap_unmap(brd);
- kfree(brd);
-
- return rc;
-}
-
-/*
- * dgap_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgap_cleanup_board(struct board_t *brd)
-{
- unsigned int i;
-
- if (!brd || brd->magic != DGAP_BOARD_MAGIC)
- return;
-
- dgap_free_irq(brd);
-
- tasklet_kill(&brd->helper_tasklet);
-
- dgap_unmap(brd);
-
- /* Free all allocated channels structs */
- for (i = 0; i < MAXPORTS ; i++)
- kfree(brd->channels[i]);
-
- kfree(brd->flipbuf);
- kfree(brd->flipflagbuf);
-
- dgap_board[brd->boardnum] = NULL;
-
- kfree(brd);
-}
-
-static void dgap_stop(bool removesys, struct pci_driver *drv)
-{
- unsigned long lock_flags;
-
- spin_lock_irqsave(&dgap_poll_lock, lock_flags);
- dgap_poll_stop = 1;
- spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
- del_timer_sync(&dgap_poll_timer);
- if (removesys)
- dgap_remove_driver_sysfiles(drv);
-
- device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
- class_destroy(dgap_class);
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-}
-
-static void dgap_remove_one(struct pci_dev *dev)
-{
- unsigned int i;
- struct pci_driver *drv = to_pci_driver(dev->dev.driver);
-
- dgap_stop(true, drv);
- for (i = 0; i < dgap_numboards; ++i) {
- dgap_remove_ports_sysfiles(dgap_board[i]);
- dgap_cleanup_tty(dgap_board[i]);
- dgap_cleanup_board(dgap_board[i]);
- }
-
- dgap_cleanup_nodes();
-}
-
-static struct pci_driver dgap_driver = {
- .name = "dgap",
- .probe = dgap_init_one,
- .id_table = dgap_pci_tbl,
- .remove = dgap_remove_one,
-};
-
-/*
- * Start of driver.
- */
-static int dgap_start(void)
-{
- int rc;
- unsigned long flags;
- struct device *device;
-
- dgap_numboards = 0;
-
- pr_info("For the tools package please visit http://www.digi.com\n");
-
- /*
- * Register our base character device into the kernel.
- */
-
- /*
- * Register management/dpa devices
- */
- rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
- if (rc < 0)
- return rc;
-
- dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
- if (IS_ERR(dgap_class)) {
- rc = PTR_ERR(dgap_class);
- goto failed_class;
- }
-
- device = device_create(dgap_class, NULL,
- MKDEV(DIGI_DGAP_MAJOR, 0),
- NULL, "dgap_mgmt");
- if (IS_ERR(device)) {
- rc = PTR_ERR(device);
- goto failed_device;
- }
-
- /* Start the poller */
- spin_lock_irqsave(&dgap_poll_lock, flags);
- setup_timer(&dgap_poll_timer, dgap_poll_handler, 0);
- dgap_poll_timer.data = 0;
- dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
- dgap_poll_timer.expires = dgap_poll_time;
- spin_unlock_irqrestore(&dgap_poll_lock, flags);
-
- add_timer(&dgap_poll_timer);
-
- return rc;
-
-failed_device:
- class_destroy(dgap_class);
-failed_class:
- unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
- return rc;
-}
-
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-/*
- * init_module()
- *
- * Module load. This is where it all starts.
- */
-static int dgap_init_module(void)
-{
- int rc;
-
- pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
-
- rc = dgap_start();
- if (rc)
- return rc;
-
- rc = pci_register_driver(&dgap_driver);
- if (rc) {
- dgap_stop(false, NULL);
- return rc;
- }
-
- rc = dgap_create_driver_sysfiles(&dgap_driver);
- if (rc)
- goto err_unregister;
-
- dgap_driver_state = DRIVER_READY;
-
- return 0;
-
-err_unregister:
- pci_unregister_driver(&dgap_driver);
- return rc;
-}
-
-/*
- * dgap_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void dgap_cleanup_module(void)
-{
- if (dgap_numboards)
- pci_unregister_driver(&dgap_driver);
-}
-
-module_init(dgap_init_module);
-module_exit(dgap_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Digi International, http://www.digi.com");
-MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
-MODULE_SUPPORTED_DEVICE("dgap");
diff --git a/drivers/staging/dgap/dgap.h b/drivers/staging/dgap/dgap.h
deleted file mode 100644
index c84dbf2a0684..000000000000
--- a/drivers/staging/dgap/dgap.h
+++ /dev/null
@@ -1,1229 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- * Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
- *
- *************************************************************************
- *
- * Driver includes
- *
- *************************************************************************/
-
-#ifndef __DGAP_DRIVER_H
-#define __DGAP_DRIVER_H
-
-#include <linux/types.h> /* To pick up the varions Linux types */
-#include <linux/tty.h> /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h> /* For irqreturn_t type */
-
-#ifndef TRUE
-# define TRUE 1
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-
-#if !defined(TTY_FLIPBUF_SIZE)
-# define TTY_FLIPBUF_SIZE 512
-#endif
-
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
-
-/*
- * Driver identification
- */
-#define DG_NAME "dgap-1.3-16"
-#define DG_PART "40002347_C"
-#define DRVSTR "dgap"
-
-/*
- * defines from dgap_pci.h
- */
-#define PCIMAX 32 /* maximum number of PCI boards */
-
-#define DIGI_VID 0x114F
-
-#define PCI_DEV_EPC_DID 0x0002
-#define PCI_DEV_XEM_DID 0x0004
-#define PCI_DEV_XR_DID 0x0005
-#define PCI_DEV_CX_DID 0x0006
-#define PCI_DEV_XRJ_DID 0x0009 /* PLX-based Xr adapter */
-#define PCI_DEV_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
-#define PCI_DEV_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
-#define PCI_DEV_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
-#define PCI_DEV_XR_422_DID 0x0012 /* Xr-422 */
-#define PCI_DEV_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
-#define PCI_DEV_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
-#define PCI_DEV_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
-#define PCI_DEV_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
-#define PCI_DEV_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
-#define PCI_DEV_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
-#define PCI_DEV_XEM_HP_DID 0x0059 /* HP Xem PCI */
-
-#define PCI_DEV_XEM_NAME "AccelePort XEM"
-#define PCI_DEV_CX_NAME "AccelePort CX"
-#define PCI_DEV_XR_NAME "AccelePort Xr"
-#define PCI_DEV_XRJ_NAME "AccelePort Xr (PLX)"
-#define PCI_DEV_XR_SAIP_NAME "AccelePort Xr (SAIP)"
-#define PCI_DEV_920_2_NAME "AccelePort Xr920 2 port"
-#define PCI_DEV_920_4_NAME "AccelePort Xr920 4 port"
-#define PCI_DEV_920_8_NAME "AccelePort Xr920 8 port"
-#define PCI_DEV_XR_422_NAME "AccelePort Xr 422"
-#define PCI_DEV_EPCJ_NAME "AccelePort EPC (PLX)"
-#define PCI_DEV_XR_BULL_NAME "AccelePort Xr (BULL)"
-#define PCI_DEV_XR_IBM_NAME "AccelePort Xr (IBM)"
-#define PCI_DEV_CX_IBM_NAME "AccelePort CX (IBM)"
-#define PCI_DEV_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
-#define PCI_DEV_XEM_HP_NAME "AccelePort XEM (HP)"
-
-/*
- * On the PCI boards, there is no IO space allocated
- * The I/O registers will be in the first 3 bytes of the
- * upper 2MB of the 4MB memory space. The board memory
- * will be mapped into the low 2MB of the 4MB memory space
- */
-
-/* Potential location of PCI Bios from E0000 to FFFFF*/
-#define PCI_BIOS_SIZE 0x00020000
-
-/* Size of Memory and I/O for PCI (4MB) */
-#define PCI_RAM_SIZE 0x00400000
-
-/* Size of Memory (2MB) */
-#define PCI_MEM_SIZE 0x00200000
-
-/* Max PCI Window Size (2MB) */
-#define PCI_WIN_SIZE 0x00200000
-
-#define PCI_WIN_SHIFT 21 /* 21 bits max */
-
-/* Offset of I/0 in Memory (2MB) */
-#define PCI_IO_OFFSET 0x00200000
-
-/* Size of IO (2MB) */
-#define PCI_IO_SIZE_DGAP 0x00200000
-
-/* Number of boards we support at once. */
-#define MAXBOARDS 32
-#define MAXPORTS 224
-#define MAXTTYNAMELEN 200
-
-/* Our 3 magic numbers for our board, channel and unit structs */
-#define DGAP_BOARD_MAGIC 0x5c6df104
-#define DGAP_CHANNEL_MAGIC 0x6c6df104
-#define DGAP_UNIT_MAGIC 0x7c6df104
-
-/* Serial port types */
-#define DGAP_SERIAL 0
-#define DGAP_PRINT 1
-
-#define SERIAL_TYPE_NORMAL 1
-
-/* 4 extra for alignment play space */
-#define WRITEBUFLEN ((4096) + 4)
-#define MYFLIPLEN N_TTY_BUF_SIZE
-
-#define SBREAK_TIME 0x25
-#define U2BSIZE 0x400
-
-#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
-
-/*
- * Our major for the mgmt devices.
- *
- * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
- * 22 has now become obsolete now that the "cu" devices have
- * been removed from 2.6.
- * Also, this *IS* the epca driver, just PCI only now.
- */
-#ifndef DIGI_DGAP_MAJOR
-# define DIGI_DGAP_MAJOR 22
-#endif
-
-/*
- * The parameters we use to define the periods of the moving averages.
- */
-#define MA_PERIOD (HZ / 10)
-#define SMA_DUR (1 * HZ)
-#define EMA_DUR (1 * HZ)
-#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
-#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
-
-/*
- * Define a local default termios struct. All ports will be created
- * with this termios initially. This is the same structure that is defined
- * as the default in tty_io.c with the same settings overridden as in serial.c
- *
- * In short, this should match the internal serial ports' defaults.
- */
-#define DEFAULT_IFLAGS (ICRNL | IXON)
-#define DEFAULT_OFLAGS (OPOST | ONLCR)
-#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
-#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
- ECHOCTL | ECHOKE | IEXTEN)
-
-#ifndef _POSIX_VDISABLE
-#define _POSIX_VDISABLE ('\0')
-#endif
-
-#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
-#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
-
-#define VPDSIZE (512)
-
-/************************************************************************
- * FEP memory offsets
- ************************************************************************/
-#define START 0x0004L /* Execution start address */
-
-#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
-#define CMDSTART 0x0400L /* Start of command buffer */
-#define CMDMAX 0x0800L /* End of command buffer */
-
-#define EVBUF 0x0d18L /* Event (ev_t) structure */
-#define EVSTART 0x0800L /* Start of event buffer */
-#define EVMAX 0x0c00L /* End of event buffer */
-#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
-#define ECS_SEG 0x0E44 /* Segment of the extended */
- /* channel structure */
-#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line */
- /* speed if the fep has extended */
- /* capabilities */
-
-/* BIOS MAGIC SPOTS */
-#define ERROR 0x0C14L /* BIOS error code */
-#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
-#define POSTAREA 0x0C00L /* POST complete message area */
-
-/* FEP MAGIC SPOTS */
-#define FEPSTAT POSTAREA /* OS here when FEP comes up */
-#define NCHAN 0x0C02L /* number of ports FEP sees */
-#define PANIC 0x0C10L /* PANIC area for FEP */
-#define KMEMEM 0x0C30L /* Memory for KME use */
-#define CONFIG 0x0CD0L /* Concentrator configuration info */
-#define CONFIGSIZE 0x0030 /* configuration info size */
-#define DOWNREQ 0x0D00 /* Download request buffer pointer */
-
-#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
-#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
-
-#define XEMPORTS 0xC02 /*
- * Offset in board memory where FEP5 stores
- * how many ports it has detected.
- * NOTE: FEP5 reports 64 ports when the user
- * has the cable in EBI OUT instead of EBI IN.
- */
-
-#define FEPCLR 0x00
-#define FEPMEM 0x02
-#define FEPRST 0x04
-#define FEPINT 0x08
-#define FEPMASK 0x0e
-#define FEPWIN 0x80
-
-#define LOWMEM 0x0100
-#define HIGHMEM 0x7f00
-
-#define FEPTIMEOUT 200000
-
-#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
-#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
-#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
-#define FEPPOLL 0x0c26 /* Fep event poll interval */
-
-#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
-
-/************************************************************************
- * FEP supported functions
- ************************************************************************/
-#define SRLOW 0xe0 /* Set receive low water */
-#define SRHIGH 0xe1 /* Set receive high water */
-#define FLUSHTX 0xe2 /* Flush transmit buffer */
-#define PAUSETX 0xe3 /* Pause data transmission */
-#define RESUMETX 0xe4 /* Resume data transmission */
-#define SMINT 0xe5 /* Set Modem Interrupt */
-#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
-#define SBREAK 0xe8 /* Send break */
-#define SMODEM 0xe9 /* Set 8530 modem control lines */
-#define SIFLAG 0xea /* Set UNIX iflags */
-#define SFLOWC 0xeb /* Set flow control characters */
-#define STLOW 0xec /* Set transmit low water mark */
-#define RPAUSE 0xee /* Pause receive */
-#define RRESUME 0xef /* Resume receive */
-#define CHRESET 0xf0 /* Reset Channel */
-#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
-#define SOFLAG 0xf3 /* Set UNIX oflags */
-#define SHFLOW 0xf4 /* Set hardware handshake */
-#define SCFLAG 0xf5 /* Set UNIX cflags */
-#define SVNEXT 0xf6 /* Set VNEXT character */
-#define SPINTFC 0xfc /* Reserved */
-#define SCOMMODE 0xfd /* Set RS232/422 mode */
-
-/************************************************************************
- * Modes for SCOMMODE
- ************************************************************************/
-#define MODE_232 0x00
-#define MODE_422 0x01
-
-/************************************************************************
- * Event flags.
- ************************************************************************/
-#define IFBREAK 0x01 /* Break received */
-#define IFTLW 0x02 /* Transmit low water */
-#define IFTEM 0x04 /* Transmitter empty */
-#define IFDATA 0x08 /* Receive data present */
-#define IFMODEM 0x20 /* Modem status change */
-
-/************************************************************************
- * Modem flags
- ************************************************************************/
-# define DM_RTS 0x02 /* Request to send */
-# define DM_CD 0x80 /* Carrier detect */
-# define DM_DSR 0x20 /* Data set ready */
-# define DM_CTS 0x10 /* Clear to send */
-# define DM_RI 0x40 /* Ring indicator */
-# define DM_DTR 0x01 /* Data terminal ready */
-
-/*
- * defines from dgap_conf.h
- */
-#define NULLNODE 0 /* header node, not used */
-#define BNODE 1 /* Board node */
-#define LNODE 2 /* Line node */
-#define CNODE 3 /* Concentrator node */
-#define MNODE 4 /* EBI Module node */
-#define TNODE 5 /* tty name prefix node */
-#define CUNODE 6 /* cu name prefix (non-SCO) */
-#define PNODE 7 /* trans. print prefix node */
-#define JNODE 8 /* maJor number node */
-#define ANODE 9 /* altpin */
-#define TSNODE 10 /* tty structure size */
-#define CSNODE 11 /* channel structure size */
-#define BSNODE 12 /* board structure size */
-#define USNODE 13 /* unit schedule structure size */
-#define FSNODE 14 /* f2200 structure size */
-#define VSNODE 15 /* size of VPIX structures */
-#define INTRNODE 16 /* enable interrupt */
-
-/* Enumeration of tokens */
-#define BEGIN 1
-#define END 2
-#define BOARD 10
-
-#define EPCFS 11 /* start of EPC family definitions */
-#define ICX 11
-#define MCX 13
-#define PCX 14
-#define IEPC 15
-#define EEPC 16
-#define MEPC 17
-#define IPCM 18
-#define EPCM 19
-#define MPCM 20
-#define PEPC 21
-#define PPCM 22
-#ifdef CP
-#define ICP 23
-#define ECP 24
-#define MCP 25
-#endif
-#define EPCFE 25 /* end of EPC family definitions */
-#define PC2E 26
-#define PC4E 27
-#define PC4E8K 28
-#define PC8E 29
-#define PC8E8K 30
-#define PC16E 31
-#define MC2E8K 34
-#define MC4E8K 35
-#define MC8E8K 36
-
-#define AVANFS 42 /* start of Avanstar family definitions */
-#define A8P 42
-#define A16P 43
-#define AVANFE 43 /* end of Avanstar family definitions */
-
-#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
-#define DA22 44 /* AccelePort 2002 */
-#define DA24 45 /* AccelePort 2004 */
-#define DA28 46 /* AccelePort 2008 */
-#define DA216 47 /* AccelePort 2016 */
-#define DAR4 48 /* AccelePort RAS 4 port */
-#define DAR8 49 /* AccelePort RAS 8 port */
-#define DDR24 50 /* DataFire RAS 24 port */
-#define DDR30 51 /* DataFire RAS 30 port */
-#define DDR48 52 /* DataFire RAS 48 port */
-#define DDR60 53 /* DataFire RAS 60 port */
-#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
-
-#define PCXRFS 106 /* start of PCXR family definitions */
-#define APORT4 106
-#define APORT8 107
-#define PAPORT4 108
-#define PAPORT8 109
-#define APORT4_920I 110
-#define APORT8_920I 111
-#define APORT4_920P 112
-#define APORT8_920P 113
-#define APORT2_920P 114
-#define PCXRFE 117 /* end of PCXR family definitions */
-
-#define LINE 82
-#ifdef T1
-#define T1M 83
-#define E1M 84
-#endif
-#define CONC 64
-#define CX 65
-#define EPC 66
-#define MOD 67
-#define PORTS 68
-#define METHOD 69
-#define CUSTOM 70
-#define BASIC 71
-#define STATUS 72
-#define MODEM 73
-/* The following tokens can appear in multiple places */
-#define SPEED 74
-#define NPORTS 75
-#define ID 76
-#define CABLE 77
-#define CONNECT 78
-#define MEM 80
-#define DPSZ 81
-
-#define TTYN 90
-#define CU 91
-#define PRINT 92
-#define XPRINT 93
-#define CMAJOR 94
-#define ALTPIN 95
-#define STARTO 96
-#define USEINTR 97
-#define PCIINFO 98
-
-#define TTSIZ 100
-#define CHSIZ 101
-#define BSSIZ 102
-#define UNTSIZ 103
-#define F2SIZ 104
-#define VPSIZ 105
-
-#define TOTAL_BOARD 2
-#define CURRENT_BRD 4
-#define BOARD_TYPE 6
-#define IO_ADDRESS 8
-#define MEM_ADDRESS 10
-
-#define FIELDS_PER_PAGE 18
-
-#define TB_FIELD 1
-#define CB_FIELD 3
-#define BT_FIELD 5
-#define IO_FIELD 7
-#define ID_FIELD 8
-#define ME_FIELD 9
-#define TTY_FIELD 11
-#define CU_FIELD 13
-#define PR_FIELD 15
-#define MPR_FIELD 17
-
-#define MAX_FIELD 512
-
-#define INIT 0
-#define NITEMS 128
-#define MAX_ITEM 512
-
-#define DSCRINST 1
-#define DSCRNUM 3
-#define ALTPINQ 5
-#define SSAVE 7
-
-#define DSCR "32"
-#define ONETONINE "123456789"
-#define ALL "1234567890"
-
-/*
- * All the possible states the driver can be while being loaded.
- */
-enum {
- DRIVER_INITIALIZED = 0,
- DRIVER_READY
-};
-
-/*
- * All the possible states the board can be while booting up.
- */
-enum {
- BOARD_FAILED = 0,
- BOARD_READY
-};
-
-/*
- * All the possible states that a requested concentrator image can be in.
- */
-enum {
- NO_PENDING_CONCENTRATOR_REQUESTS = 0,
- NEED_CONCENTRATOR,
- REQUESTED_CONCENTRATOR
-};
-
-/*
- * Modem line constants are defined as macros because DSR and
- * DCD are swapable using the ditty altpin option.
- */
-#define D_CD(ch) ch->ch_cd /* Carrier detect */
-#define D_DSR(ch) ch->ch_dsr /* Data set ready */
-#define D_RTS(ch) DM_RTS /* Request to send */
-#define D_CTS(ch) DM_CTS /* Clear to send */
-#define D_RI(ch) DM_RI /* Ring indicator */
-#define D_DTR(ch) DM_DTR /* Data terminal ready */
-
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
-
-/*
- * A structure to hold a statistics counter. We also
- * compute moving averages for this counter.
- */
-struct macounter {
- u32 cnt; /* Total count */
- ulong accum; /* Acuumulator per period */
- ulong sma; /* Simple moving average */
- ulong ema; /* Exponential moving average */
-};
-
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
-#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
-#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
-
-/*
- * Per-board information
- */
-struct board_t {
- int magic; /* Board Magic number. */
- int boardnum; /* Board number: 0-3 */
-
- int type; /* Type of board */
- char *name; /* Product Name */
- struct pci_dev *pdev; /* Pointer to the pci_dev struct */
- u16 vendor; /* PCI vendor ID */
- u16 device; /* PCI device ID */
- u16 subvendor; /* PCI subsystem vendor ID */
- u16 subdevice; /* PCI subsystem device ID */
- u8 rev; /* PCI revision ID */
- uint pci_bus; /* PCI bus value */
- uint pci_slot; /* PCI slot value */
- u16 maxports; /* MAX ports this board can handle */
- u8 vpd[VPDSIZE]; /* VPD of board, if found */
- u32 bd_flags; /* Board flags */
-
- spinlock_t bd_lock; /* Used to protect board */
-
- u32 state; /* State of card. */
- wait_queue_head_t state_wait; /* Place to sleep on for state change */
-
- struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
-
- u32 wait_for_bios;
- u32 wait_for_fep;
-
- struct cnode *bd_config; /* Config of board */
-
- u16 nasync; /* Number of ports on card */
-
- ulong irq; /* Interrupt request number */
- ulong intr_count; /* Count of interrupts */
- u32 intr_used; /* Non-zero if using interrupts */
- u32 intr_running; /* Non-zero if FEP knows its doing */
- /* interrupts */
-
- ulong port; /* Start of base io port of the card */
- ulong port_end; /* End of base io port of the card */
- ulong membase; /* Start of base memory of the card */
- ulong membase_end; /* End of base memory of the card */
-
- u8 __iomem *re_map_port; /* Remapped io port of the card */
- u8 __iomem *re_map_membase;/* Remapped memory of the card */
-
- u8 inhibit_poller; /* Tells the poller to leave us alone */
-
- struct channel_t *channels[MAXPORTS]; /* array of pointers to our */
- /* channels. */
-
- struct tty_driver *serial_driver;
- struct tty_port *serial_ports;
- char serial_name[200];
- struct tty_driver *print_driver;
- struct tty_port *printer_ports;
- char print_name[200];
-
- struct bs_t __iomem *bd_bs; /* Base structure pointer */
-
- char *flipbuf; /* Our flip buffer, alloced if */
- /* board is found */
- char *flipflagbuf; /* Our flip flag buffer, alloced */
- /* if board is found */
-
- u16 dpatype; /* The board "type", as defined */
- /* by DPA */
- u16 dpastatus; /* The board "status", as defined */
- /* by DPA */
-
- u32 conc_dl_status; /* Status of any pending conc */
- /* download */
-};
-
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN 0x0001 /* Device is open */
-#define UN_CLOSING 0x0002 /* Line is being closed */
-#define UN_IMM 0x0004 /* Service immediately */
-#define UN_BUSY 0x0008 /* Some work this channel */
-#define UN_BREAKI 0x0010 /* Input break received */
-#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
-#define UN_TIME 0x0040 /* Waiting on time */
-#define UN_EMPTY 0x0080 /* Waiting output queue empty */
-#define UN_LOW 0x0100 /* Waiting output low water mark*/
-#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
-#define UN_WOPEN 0x0400 /* Device waiting for open */
-#define UN_WIOCTL 0x0800 /* Device waiting for open */
-#define UN_HANGUP 0x8000 /* Carrier lost */
-
-struct device;
-
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
-struct un_t {
- int magic; /* Unit Magic Number. */
- struct channel_t *un_ch;
- u32 un_time;
- u32 un_type;
- int un_open_count; /* Counter of opens to port */
- struct tty_struct *un_tty;/* Pointer to unit tty structure */
- u32 un_flags; /* Unit flags */
- wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
- u32 un_dev; /* Minor device number */
- tcflag_t un_oflag; /* oflags being done on board */
- tcflag_t un_lflag; /* lflags being done on board */
- struct device *un_sysfs;
-};
-
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON 0x0001 /* Printer on string */
-#define CH_OUT 0x0002 /* Dial-out device open */
-#define CH_STOP 0x0004 /* Output is stopped */
-#define CH_STOPI 0x0008 /* Input is stopped */
-#define CH_CD 0x0010 /* Carrier is present */
-#define CH_FCAR 0x0020 /* Carrier forced on */
-
-#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
-#define CH_WLOW 0x0100 /* Term waiting low event */
-#define CH_WEMPTY 0x0200 /* Term waiting empty event */
-#define CH_RENABLE 0x0400 /* Buffer just emptied */
-#define CH_RACTIVE 0x0800 /* Process active in xxread() */
-#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
-#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
-#define CH_HANGUP 0x8000 /* Hangup received */
-
-/*
- * Definitions for ch_sniff_flags
- */
-#define SNIFF_OPEN 0x1
-#define SNIFF_WAIT_DATA 0x2
-#define SNIFF_WAIT_SPACE 0x4
-
-/************************************************************************
- *** Definitions for Digi ditty(1) command.
- ************************************************************************/
-
-/************************************************************************
- * This module provides application access to special Digi
- * serial line enhancements which are not standard UNIX(tm) features.
- ************************************************************************/
-
-#if !defined(TIOCMODG)
-
-#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
-#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
-
-#ifndef TIOCM_LE
-#define TIOCM_LE 0x01 /* line enable */
-#define TIOCM_DTR 0x02 /* data terminal ready */
-#define TIOCM_RTS 0x04 /* request to send */
-#define TIOCM_ST 0x08 /* secondary transmit */
-#define TIOCM_SR 0x10 /* secondary receive */
-#define TIOCM_CTS 0x20 /* clear to send */
-#define TIOCM_CAR 0x40 /* carrier detect */
-#define TIOCM_RNG 0x80 /* ring indicator */
-#define TIOCM_DSR 0x100 /* data set ready */
-#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
-#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
-#endif
-
-#endif
-
-#if !defined(TIOCMSET)
-#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
-#endif
-
-#if !defined(TIOCSDTR)
-#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
-#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
-#endif
-
-/************************************************************************
- * Ioctl command arguments for DIGI parameters.
- ************************************************************************/
-#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-
-#define DIGI_SETA (('e'<<8) | 95) /* Set params */
-#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
-#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-
-#define DIGI_KME (('e'<<8) | 98) /* Read/Write Host */
- /* Adapter Memory */
-
-#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
- /* control characters */
-#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
- /* control characters */
-#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
- /* flow control chars */
-#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
- /* flow control chars */
-
-#define DIGI_GEDELAY (('d'<<8) | 246) /* Get edelay */
-#define DIGI_SEDELAY (('d'<<8) | 247) /* Set edelay */
-
-struct digiflow_t {
- unsigned char startc; /* flow cntl start char */
- unsigned char stopc; /* flow cntl stop char */
-};
-
-#ifdef FLOW_2200
-#define F2200_GETA (('e'<<8) | 104) /* Get 2x36 flow cntl flags */
-#define F2200_SETAW (('e'<<8) | 105) /* Set 2x36 flow cntl flags */
-#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
-#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
-#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
-#define F2200_XON 0xf8
-#define P2200_XON 0xf9
-#define F2200_XOFF 0xfa
-#define P2200_XOFF 0xfb
-
-#define FXOFF_MASK 0x03 /* 2200 flow status mask */
-#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
-#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
-#endif
-
-/************************************************************************
- * Values for digi_flags
- ************************************************************************/
-#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
-#define DIGI_FAST 0x0002 /* Fast baud rates */
-#define RTSPACE 0x0004 /* RTS input flow control */
-#define CTSPACE 0x0008 /* CTS output flow control */
-#define DSRPACE 0x0010 /* DSR output flow control */
-#define DCDPACE 0x0020 /* DCD output flow control */
-#define DTRPACE 0x0040 /* DTR input flow control */
-#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
-#define DIGI_FORCEDCD 0x0100 /* Force carrier */
-#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
-#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
-#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
-#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
-#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
-#define DIGI_422 0x4000 /* for 422/232 selectable panel */
-#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
-
-/************************************************************************
- * These options are not supported on the comxi.
- ************************************************************************/
-#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
-
-#define DIGI_PLEN 28 /* String length */
-#define DIGI_TSIZ 10 /* Terminal string len */
-
-/************************************************************************
- * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
-struct digi_t {
- unsigned short digi_flags; /* Flags (see above) */
- unsigned short digi_maxcps; /* Max printer CPS */
- unsigned short digi_maxchar; /* Max chars in print queue */
- unsigned short digi_bufsize; /* Buffer size */
- unsigned char digi_onlen; /* Length of ON string */
- unsigned char digi_offlen; /* Length of OFF string */
- char digi_onstr[DIGI_PLEN]; /* Printer on string */
- char digi_offstr[DIGI_PLEN]; /* Printer off string */
- char digi_term[DIGI_TSIZ]; /* terminal string */
-};
-
-/************************************************************************
- * KME definitions and structures.
- ************************************************************************/
-#define RW_IDLE 0 /* Operation complete */
-#define RW_READ 1 /* Read Concentrator Memory */
-#define RW_WRITE 2 /* Write Concentrator Memory */
-
-struct rw_t {
- unsigned char rw_req; /* Request type */
- unsigned char rw_board; /* Host Adapter board number */
- unsigned char rw_conc; /* Concentrator number */
- unsigned char rw_reserved; /* Reserved for expansion */
- unsigned long rw_addr; /* Address in concentrator */
- unsigned short rw_size; /* Read/write request length */
- unsigned char rw_data[128]; /* Data to read/write */
-};
-
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
-struct digi_dinfo {
- unsigned long dinfo_nboards; /* # boards configured */
- char dinfo_reserved[12]; /* for future expansion */
- char dinfo_version[16]; /* driver version */
-};
-
-#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
-
-/************************************************************************
- * Structure used with ioctl commands for per-board information
- *
- * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
-struct digi_info {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_ioport; /* io port address */
- unsigned long info_physaddr; /* memory address */
- unsigned long info_physsize; /* Size of host mem window */
- unsigned long info_memsize; /* Amount of dual-port mem */
- /* on board */
- unsigned short info_bdtype; /* Board type */
- unsigned short info_nports; /* number of ports */
- char info_bdstate; /* board state */
- char info_reserved[7]; /* for future expansion */
-};
-
-#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
-
-struct digi_stat {
- unsigned int info_chan; /* Channel number (0 based) */
- unsigned int info_brd; /* Board number (0 based) */
- unsigned long info_cflag; /* cflag for channel */
- unsigned long info_iflag; /* iflag for channel */
- unsigned long info_oflag; /* oflag for channel */
- unsigned long info_mstat; /* mstat for channel */
- unsigned long info_tx_data; /* tx_data for channel */
- unsigned long info_rx_data; /* rx_data for channel */
- unsigned long info_hflow; /* hflow for channel */
- unsigned long info_reserved[8]; /* for future expansion */
-};
-
-#define DIGI_GETSTAT (('d'<<8) | 244) /* get board info */
-/************************************************************************
- *
- * Structure used with ioctl commands for per-channel information
- *
- ************************************************************************/
-struct digi_ch {
- unsigned long info_bdnum; /* Board number (0 based) */
- unsigned long info_channel; /* Channel index number */
- unsigned long info_ch_cflag; /* Channel cflag */
- unsigned long info_ch_iflag; /* Channel iflag */
- unsigned long info_ch_oflag; /* Channel oflag */
- unsigned long info_chsize; /* Channel structure size */
- unsigned long info_sleep_stat; /* sleep status */
- dev_t info_dev; /* device number */
- unsigned char info_initstate; /* Channel init state */
- unsigned char info_running; /* Channel running state */
- long reserved[8]; /* reserved for future use */
-};
-
-/*
-* This structure is used with the DIGI_FEPCMD ioctl to
-* tell the driver which port to send the command for.
-*/
-struct digi_cmd {
- int cmd;
- int word;
- int ncmds;
- int chan; /* channel index (zero based) */
- int bdid; /* board index (zero based) */
-};
-
-/*
-* info_sleep_stat defines
-*/
-#define INFO_RUNWAIT 0x0001
-#define INFO_WOPEN 0x0002
-#define INFO_TTIOW 0x0004
-#define INFO_CH_RWAIT 0x0008
-#define INFO_CH_WEMPTY 0x0010
-#define INFO_CH_WLOW 0x0020
-#define INFO_XXBUF_BUSY 0x0040
-
-#define DIGI_GETCH (('d'<<8) | 245) /* get board info */
-
-/* Board type definitions */
-
-#define SUBTYPE 0007
-#define T_PCXI 0000
-#define T_PCXM 0001
-#define T_PCXE 0002
-#define T_PCXR 0003
-#define T_SP 0004
-#define T_SP_PLUS 0005
-# define T_HERC 0000
-# define T_HOU 0001
-# define T_LON 0002
-# define T_CHA 0003
-#define FAMILY 0070
-#define T_COMXI 0000
-#define T_PCXX 0010
-#define T_CX 0020
-#define T_EPC 0030
-#define T_PCLITE 0040
-#define T_SPXX 0050
-#define T_AVXX 0060
-#define T_DXB 0070
-#define T_A2K_4_8 0070
-#define BUSTYPE 0700
-#define T_ISABUS 0000
-#define T_MCBUS 0100
-#define T_EISABUS 0200
-#define T_PCIBUS 0400
-
-/* Board State Definitions */
-
-#define BD_RUNNING 0x0
-#define BD_REASON 0x7f
-#define BD_NOTFOUND 0x1
-#define BD_NOIOPORT 0x2
-#define BD_NOMEM 0x3
-#define BD_NOBIOS 0x4
-#define BD_NOFEP 0x5
-#define BD_FAILED 0x6
-#define BD_ALLOCATED 0x7
-#define BD_TRIBOOT 0x8
-#define BD_BADKME 0x80
-
-#define DIGI_LOOPBACK (('d'<<8) | 252) /* Enable/disable UART */
- /* internal loopback */
-#define DIGI_SPOLL (('d'<<8) | 254) /* change poller rate */
-
-#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
-#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_RESET_PORT (('e'<<8) | 93) /* Reset port */
-
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
-struct channel_t {
- int magic; /* Channel Magic Number */
- struct bs_t __iomem *ch_bs; /* Base structure pointer */
- struct cm_t __iomem *ch_cm; /* Command queue pointer */
- struct board_t *ch_bd; /* Board structure pointer */
- u8 __iomem *ch_vaddr; /* FEP memory origin */
- u8 __iomem *ch_taddr; /* Write buffer origin */
- u8 __iomem *ch_raddr; /* Read buffer origin */
- struct digi_t ch_digi; /* Transparent Print structure */
- struct un_t ch_tun; /* Terminal unit info */
- struct un_t ch_pun; /* Printer unit info */
-
- spinlock_t ch_lock; /* provide for serialization */
- wait_queue_head_t ch_flags_wait;
-
- u32 pscan_state;
- u8 pscan_savechar;
-
- u32 ch_portnum; /* Port number, 0 offset. */
- u32 ch_open_count; /* open count */
- u32 ch_flags; /* Channel flags */
-
- u32 ch_cpstime; /* Time for CPS calculations */
-
- tcflag_t ch_c_iflag; /* channel iflags */
- tcflag_t ch_c_cflag; /* channel cflags */
- tcflag_t ch_c_oflag; /* channel oflags */
- tcflag_t ch_c_lflag; /* channel lflags */
-
- u16 ch_fepiflag; /* FEP tty iflags */
- u16 ch_fepcflag; /* FEP tty cflags */
- u16 ch_fepoflag; /* FEP tty oflags */
- u16 ch_wopen; /* Waiting for open process cnt */
- u16 ch_tstart; /* Transmit buffer start */
- u16 ch_tsize; /* Transmit buffer size */
- u16 ch_rstart; /* Receive buffer start */
- u16 ch_rsize; /* Receive buffer size */
- u16 ch_rdelay; /* Receive delay time */
-
- u16 ch_tlw; /* Our currently set low water mark */
-
- u16 ch_cook; /* Output character mask */
-
- u8 ch_card; /* Card channel is on */
- u8 ch_stopc; /* Stop character */
- u8 ch_startc; /* Start character */
-
- u8 ch_mostat; /* FEP output modem status */
- u8 ch_mistat; /* FEP input modem status */
- u8 ch_mforce; /* Modem values to be forced */
- u8 ch_mval; /* Force values */
- u8 ch_fepstopc; /* FEP stop character */
- u8 ch_fepstartc; /* FEP start character */
-
- u8 ch_astopc; /* Auxiliary Stop character */
- u8 ch_astartc; /* Auxiliary Start character */
- u8 ch_fepastopc; /* Auxiliary FEP stop char */
- u8 ch_fepastartc; /* Auxiliary FEP start char */
-
- u8 ch_hflow; /* FEP hardware handshake */
- u8 ch_dsr; /* stores real dsr value */
- u8 ch_cd; /* stores real cd value */
- u8 ch_tx_win; /* channel tx buffer window */
- u8 ch_rx_win; /* channel rx buffer window */
- uint ch_custom_speed; /* Custom baud, if set */
- uint ch_baud_info; /* Current baud info for /proc output */
- ulong ch_rxcount; /* total of data received so far */
- ulong ch_txcount; /* total of data transmitted so far */
- ulong ch_err_parity; /* Count of parity errors on channel */
- ulong ch_err_frame; /* Count of framing errors on channel */
- ulong ch_err_break; /* Count of breaks on channel */
- ulong ch_err_overrun; /* Count of overruns on channel */
-};
-
-/************************************************************************
- * Command structure definition.
- ************************************************************************/
-struct cm_t {
- unsigned short cm_head; /* Command buffer head offset */
- unsigned short cm_tail; /* Command buffer tail offset */
- unsigned short cm_start; /* start offset of buffer */
- unsigned short cm_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Event structure definition.
- ************************************************************************/
-struct ev_t {
- unsigned short ev_head; /* Command buffer head offset */
- unsigned short ev_tail; /* Command buffer tail offset */
- unsigned short ev_start; /* start offset of buffer */
- unsigned short ev_max; /* last offset of buffer */
-};
-
-/************************************************************************
- * Download buffer structure.
- ************************************************************************/
-struct downld_t {
- u8 dl_type; /* Header */
- u8 dl_seq; /* Download sequence */
- ushort dl_srev; /* Software revision number */
- ushort dl_lrev; /* Low revision number */
- ushort dl_hrev; /* High revision number */
- ushort dl_seg; /* Start segment address */
- ushort dl_size; /* Number of bytes to download */
- u8 dl_data[1024]; /* Download data */
-};
-
-/************************************************************************
- * Per channel buffer structure
- ************************************************************************
- * Base Structure Entries Usage Meanings to Host *
- * *
- * W = read write R = read only *
- * C = changed by commands only *
- * U = unknown (may be changed w/o notice) *
- ************************************************************************/
-struct bs_t {
- unsigned short tp_jmp; /* Transmit poll jump */
- unsigned short tc_jmp; /* Cooked procedure jump */
- unsigned short ri_jmp; /* Not currently used */
- unsigned short rp_jmp; /* Receive poll jump */
-
- unsigned short tx_seg; /* W Tx segment */
- unsigned short tx_head; /* W Tx buffer head offset */
- unsigned short tx_tail; /* R Tx buffer tail offset */
- unsigned short tx_max; /* W Tx buffer size - 1 */
-
- unsigned short rx_seg; /* W Rx segment */
- unsigned short rx_head; /* W Rx buffer head offset */
- unsigned short rx_tail; /* R Rx buffer tail offset */
- unsigned short rx_max; /* W Rx buffer size - 1 */
-
- unsigned short tx_lw; /* W Tx buffer low water mark */
- unsigned short rx_lw; /* W Rx buffer low water mark */
- unsigned short rx_hw; /* W Rx buffer high water mark*/
- unsigned short incr; /* W Increment to next channel*/
-
- unsigned short fepdev; /* U SCC device base address */
- unsigned short edelay; /* W Exception delay */
- unsigned short blen; /* W Break length */
- unsigned short btime; /* U Break complete time */
-
- unsigned short iflag; /* C UNIX input flags */
- unsigned short oflag; /* C UNIX output flags */
- unsigned short cflag; /* C UNIX control flags */
- unsigned short wfill[13]; /* U Reserved for expansion */
-
- unsigned char num; /* U Channel number */
- unsigned char ract; /* U Receiver active counter */
- unsigned char bstat; /* U Break status bits */
- unsigned char tbusy; /* W Transmit busy */
- unsigned char iempty; /* W Transmit empty event */
- /* enable */
- unsigned char ilow; /* W Transmit low-water event */
- /* enable */
- unsigned char idata; /* W Receive data interrupt */
- /* enable */
- unsigned char eflag; /* U Host event flags */
-
- unsigned char tflag; /* U Transmit flags */
- unsigned char rflag; /* U Receive flags */
- unsigned char xmask; /* U Transmit ready flags */
- unsigned char xval; /* U Transmit ready value */
- unsigned char m_stat; /* RC Modem status bits */
- unsigned char m_change; /* U Modem bits which changed */
- unsigned char m_int; /* W Modem interrupt enable */
- /* bits */
- unsigned char m_last; /* U Last modem status */
-
- unsigned char mtran; /* C Unreported modem trans */
- unsigned char orun; /* C Buffer overrun occurred */
- unsigned char astartc; /* W Auxiliary Xon char */
- unsigned char astopc; /* W Auxiliary Xoff char */
- unsigned char startc; /* W Xon character */
- unsigned char stopc; /* W Xoff character */
- unsigned char vnextc; /* W Vnext character */
- unsigned char hflow; /* C Software flow control */
-
- unsigned char fillc; /* U Delay Fill character */
- unsigned char ochar; /* U Saved output character */
- unsigned char omask; /* U Output character mask */
-
- unsigned char bfill[13]; /* U Reserved for expansion */
-
- unsigned char scc[16]; /* U SCC registers */
-};
-
-struct cnode {
- struct cnode *next;
- int type;
- int numbrd;
-
- union {
- struct {
- char type; /* Board Type */
- long addr; /* Memory Address */
- char *addrstr; /* Memory Address in string */
- long pcibus; /* PCI BUS */
- char *pcibusstr; /* PCI BUS in string */
- long pcislot; /* PCI SLOT */
- char *pcislotstr; /* PCI SLOT in string */
- long nport; /* Number of Ports */
- char *id; /* tty id */
- long start; /* start of tty counting */
- char *method; /* Install method */
- char v_addr;
- char v_pcibus;
- char v_pcislot;
- char v_nport;
- char v_id;
- char v_start;
- char v_method;
- char line1;
- char line2;
- char conc1; /* total concs in line1 */
- char conc2; /* total concs in line2 */
- char module1; /* total modules for line1 */
- char module2; /* total modules for line2 */
- char *status; /* config status */
- char *dimstatus; /* Y/N */
- int status_index; /* field pointer */
- } board;
-
- struct {
- char *cable;
- char v_cable;
- long speed;
- char v_speed;
- } line;
-
- struct {
- char type;
- char *connect;
- long speed;
- long nport;
- char *id;
- char *idstr;
- long start;
- char v_connect;
- char v_speed;
- char v_nport;
- char v_id;
- char v_start;
- } conc;
-
- struct {
- char type;
- long nport;
- char *id;
- char *idstr;
- long start;
- char v_nport;
- char v_id;
- char v_start;
- } module;
-
- char *ttyname;
- char *cuname;
- char *printname;
- long majornumber;
- long altpin;
- long ttysize;
- long chsize;
- long bssize;
- long unsize;
- long f2size;
- long vpixsize;
- long useintr;
- } u;
-};
-#endif
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 72f0aaa6911f..0ff3139e52b6 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -823,7 +823,7 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
tail = ch->ch_r_tail;
/* Store how much space we have left in the queue */
- qleft = (tail - head - 1);
+ qleft = tail - head - 1;
if (qleft < 0)
qleft += RQUEUEMASK + 1;
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index fc6d2989e28f..4eb410e09609 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -125,12 +125,7 @@ static struct pci_driver dgnc_driver = {
*
************************************************************************/
-/*
- * dgnc_cleanup_module()
- *
- * Module unload. This is where it all ends.
- */
-static void dgnc_cleanup_module(void)
+static void cleanup(bool sysfiles)
{
int i;
unsigned long flags;
@@ -142,7 +137,8 @@ static void dgnc_cleanup_module(void)
/* Turn off poller right away. */
del_timer_sync(&dgnc_poll_timer);
- dgnc_remove_driver_sysfiles(&dgnc_driver);
+ if (sysfiles)
+ dgnc_remove_driver_sysfiles(&dgnc_driver);
device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
class_destroy(dgnc_class);
@@ -155,9 +151,17 @@ static void dgnc_cleanup_module(void)
}
dgnc_tty_post_uninit();
+}
- if (dgnc_NumBoards)
- pci_unregister_driver(&dgnc_driver);
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+static void dgnc_cleanup_module(void)
+{
+ cleanup(true);
+ pci_unregister_driver(&dgnc_driver);
}
/*
@@ -181,23 +185,14 @@ static int __init dgnc_init_module(void)
* Find and configure all the cards
*/
rc = pci_register_driver(&dgnc_driver);
-
- /*
- * If something went wrong in the scan, bail out of driver.
- */
- if (rc < 0) {
- /* Only unregister if it was actually registered. */
- if (dgnc_NumBoards)
- pci_unregister_driver(&dgnc_driver);
- else
- pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
-
- dgnc_cleanup_module();
- } else {
- dgnc_create_driver_sysfiles(&dgnc_driver);
+ if (rc) {
+ pr_warn("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+ cleanup(false);
+ return rc;
}
+ dgnc_create_driver_sysfiles(&dgnc_driver);
- return rc;
+ return 0;
}
module_init(dgnc_init_module);
@@ -283,13 +278,13 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* wake up and enable device */
rc = pci_enable_device(pdev);
- if (rc < 0) {
- rc = -EIO;
- } else {
- rc = dgnc_found_board(pdev, ent->driver_data);
- if (rc == 0)
- dgnc_NumBoards++;
- }
+ if (rc)
+ return -EIO;
+
+ rc = dgnc_found_board(pdev, ent->driver_data);
+ if (rc == 0)
+ dgnc_NumBoards++;
+
return rc;
}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index ce7cd9b96542..e4be81b66041 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -88,7 +88,6 @@
#define _POSIX_VDISABLE '\0'
#endif
-
/*
* All the possible states the driver can be while being loaded.
*/
@@ -106,7 +105,6 @@ enum {
BOARD_READY
};
-
/*************************************************************************
*
* Structures and closely related defines.
@@ -145,7 +143,6 @@ struct board_ops {
************************************************************************/
#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
-
/*
* Per-board information
*/
@@ -241,7 +238,6 @@ struct dgnc_board {
};
-
/************************************************************************
* Unit flag definitions for un_flags.
************************************************************************/
@@ -277,7 +273,6 @@ struct un_t {
struct device *un_sysfs;
};
-
/************************************************************************
* Device flag definitions for ch_flags.
************************************************************************/
@@ -300,7 +295,6 @@ struct un_t {
#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
-
/* Our Read/Error/Write queue sizes */
#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
@@ -309,7 +303,6 @@ struct un_t {
#define EQUEUESIZE RQUEUESIZE
#define WQUEUESIZE (WQUEUEMASK + 1)
-
/************************************************************************
* Channel information structure.
************************************************************************/
@@ -397,7 +390,6 @@ struct channel_t {
ulong ch_intr_tx; /* Count of interrupts */
ulong ch_intr_rx; /* Count of interrupts */
-
/* /proc/<board>/<channel> entries */
struct proc_dir_entry *proc_entry_pointer;
struct dgnc_proc_entry *dgnc_channel_table;
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 518fbd5e2d0e..ba29a8d913f2 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -192,7 +192,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_lock_irqsave(&ch->ch_lock, flags);
- mstat = (ch->ch_mostat | ch->ch_mistat);
+ mstat = ch->ch_mostat | ch->ch_mistat;
if (mstat & UART_MCR_DTR) {
ni.mstat |= TIOCM_DTR;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 39c76e78e56a..31ac437cb4a4 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -1306,10 +1306,10 @@ static int neo_drain(struct tty_struct *tty, uint seconds)
/*
* Go to sleep waiting for the tty layer to wake me back up when
* the empty flag goes away.
- *
- * NOTE: TODO: Do something with time passed in.
*/
- rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+ rc = wait_event_interruptible_timeout(un->un_flags_wait,
+ ((un->un_flags & UN_EMPTY) == 0),
+ msecs_to_jiffies(seconds * 1000));
/* If ret is non-zero, user ctrl-c'ed us */
return rc;
@@ -1735,7 +1735,7 @@ static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int ad
/* enable chip select */
writeb(NEO_EECS, base + NEO_EEREG);
/* READ */
- enable = (address | 0x180);
+ enable = address | 0x180;
for (bits = 9; bits--; ) {
databit = (enable & (1 << bits)) ? NEO_EEDI : 0;
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index c528df5a0e5a..abddd48353d0 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -65,7 +65,6 @@ struct neo_uart_struct {
#define NEO_EEDO 0x80 /* Data Out is an Input Pin */
#define NEO_EEREG 0x8E /* offset to EEPROM control reg */
-
#define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */
#define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2)
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
index 617d40d1ec19..4e170c47f4a3 100644
--- a/drivers/staging/dgnc/dgnc_pci.h
+++ b/drivers/staging/dgnc/dgnc_pci.h
@@ -59,7 +59,6 @@
#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
-
/* Size of Memory and I/O for PCI (4 K) */
#define PCI_RAM_SIZE 0x1000
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index b79eab084c02..bcd2bdfb9c8f 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -443,15 +443,13 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
brd->PrintDriver.termios = NULL;
}
-/*=======================================================================
- *
+/*
* dgnc_wmove - Write data to transmit queue.
*
* ch - Pointer to channel structure.
* buf - Pointer to characters to be moved.
* n - Number of characters to move.
- *
- *=======================================================================*/
+ */
static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
{
int remain;
@@ -489,13 +487,11 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
ch->ch_w_head = head;
}
-/*=======================================================================
- *
+/*
* dgnc_input - Process received data.
*
* ch - Pointer to channel structure.
- *
- *=======================================================================*/
+ */
void dgnc_input(struct channel_t *ch)
{
struct dgnc_board *bd;
@@ -541,7 +537,7 @@ void dgnc_input(struct channel_t *ch)
*/
if (!tp || (tp->magic != TTY_MAGIC) ||
!(ch->ch_tun.un_flags & UN_ISOPEN) ||
- !(tp->termios.c_cflag & CREAD) ||
+ !C_CREAD(tp) ||
(ch->ch_tun.un_flags & UN_CLOSING)) {
ch->ch_r_head = tail;
@@ -796,7 +792,7 @@ static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
* And of course, rates above the dividend won't fly.
*/
if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1))
- newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1);
+ newrate = (ch->ch_bd->bd_dividend / 0xFFFF) + 1;
if (newrate && newrate > ch->ch_bd->bd_dividend)
newrate = ch->ch_bd->bd_dividend;
@@ -933,14 +929,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
}
if (ch->ch_tun.un_flags & UN_ISOPEN) {
- if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- ch->ch_tun.un_tty->ldisc->ops->write_wakeup(ch->ch_tun.un_tty);
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
-
- wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+ tty_wakeup(ch->ch_tun.un_tty);
/*
* If unit is set to wait until empty, check to make sure
@@ -975,14 +964,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
}
if (ch->ch_pun.un_flags & UN_ISOPEN) {
- if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup) {
- spin_unlock_irqrestore(&ch->ch_lock, flags);
- ch->ch_pun.un_tty->ldisc->ops->write_wakeup(ch->ch_pun.un_tty);
- spin_lock_irqsave(&ch->ch_lock, flags);
- }
-
- wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+ tty_wakeup(ch->ch_pun.un_tty);
/*
* If unit is set to wait until empty, check to make sure
@@ -1800,8 +1782,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
}
/* Update printer buffer empty time. */
- if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0)
- && (ch->ch_digi.digi_bufsize > 0)) {
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0) &&
+ (ch->ch_digi.digi_bufsize > 0)) {
ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
}
@@ -1848,7 +1830,7 @@ static int dgnc_tty_tiocmget(struct tty_struct *tty)
spin_lock_irqsave(&ch->ch_lock, flags);
- mstat = (ch->ch_mostat | ch->ch_mistat);
+ mstat = ch->ch_mostat | ch->ch_mistat;
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2048,7 +2030,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
spin_lock_irqsave(&ch->ch_lock, flags);
- mstat = (ch->ch_mostat | ch->ch_mistat);
+ mstat = ch->ch_mostat | ch->ch_mistat;
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2520,12 +2502,12 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
/* Flush UARTs transmit FIFO */
ch->ch_bd->bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
}
- if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
- ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_pun.un_flags_wait);
}
@@ -2719,13 +2701,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_tun.un_flags &=
- ~(UN_LOW|UN_EMPTY);
+ ~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
}
if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_pun.un_flags &=
- ~(UN_LOW|UN_EMPTY);
+ ~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_pun.un_flags_wait);
}
}
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index cf9dcae7cc3f..523a2d34f747 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -31,21 +31,21 @@
#endif
#if !defined(TIOCMSET)
-#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
-#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
+#define TIOCMSET (('d' << 8) | 252) /* set modem ctrl state */
+#define TIOCMGET (('d' << 8) | 253) /* set modem ctrl state */
#endif
#if !defined(TIOCMBIC)
-#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
-#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
+#define TIOCMBIC (('d' << 8) | 254) /* set modem ctrl state */
+#define TIOCMBIS (('d' << 8) | 255) /* set modem ctrl state */
#endif
-#define DIGI_GETA (('e'<<8) | 94) /* Read params */
-#define DIGI_SETA (('e'<<8) | 95) /* Set params */
-#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
-#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d'<<8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d'<<8) | 252) /*
+#define DIGI_GETA (('e' << 8) | 94) /* Read params */
+#define DIGI_SETA (('e' << 8) | 95) /* Set params */
+#define DIGI_SETAW (('e' << 8) | 96) /* Drain & set params */
+#define DIGI_SETAF (('e' << 8) | 97) /* Drain, flush & set params */
+#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
+#define DIGI_LOOPBACK (('d' << 8) | 252) /*
* Enable/disable UART
* internal loopback
*/
@@ -85,7 +85,7 @@ struct digi_dinfo {
char dinfo_version[16]; /* driver version */
};
-#define DIGI_GETDD (('d'<<8) | 248) /* get driver info */
+#define DIGI_GETDD (('d' << 8) | 248) /* get driver info */
/************************************************************************
* Structure used with ioctl commands for per-board information
@@ -105,7 +105,7 @@ struct digi_info {
char info_reserved[7]; /* for future expansion */
};
-#define DIGI_GETBD (('d'<<8) | 249) /* get board info */
+#define DIGI_GETBD (('d' << 8) | 249) /* get board info */
struct digi_getbuffer /* Struct for holding buffer use counts */
{
@@ -133,10 +133,10 @@ struct digi_getcounter {
#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
-#define DIGI_REALPORT_GETBUFFERS (('e'<<8) | 108)
-#define DIGI_REALPORT_SENDIMMEDIATE (('e'<<8) | 109)
-#define DIGI_REALPORT_GETCOUNTERS (('e'<<8) | 110)
-#define DIGI_REALPORT_GETEVENTS (('e'<<8) | 111)
+#define DIGI_REALPORT_GETBUFFERS (('e' << 8) | 108)
+#define DIGI_REALPORT_SENDIMMEDIATE (('e' << 8) | 109)
+#define DIGI_REALPORT_GETCOUNTERS (('e' << 8) | 110)
+#define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
#define EV_OPU 0x0001 /* !<Output paused by client */
#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index beb9411658ba..e8cacaecf9ad 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
@@ -160,7 +159,7 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
selector = p_ctrl->wValue;
if ((recipient == USB_RECIP_DEVICE) &&
- (selector == USB_DEVICE_TEST_MODE)) {
+ (selector == USB_DEVICE_TEST_MODE)) {
test_mode = (u32)(p_ctrl->wIndex >> 8);
_nbu2ss_set_test_mode(udc, test_mode);
}
@@ -271,21 +270,21 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
data = EPn_EN | EPn_BCLR | EPn_DIR0;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- data = (EPn_ONAK | EPn_OSTL_EN | EPn_OSTL);
+ data = EPn_ONAK | EPn_OSTL_EN | EPn_OSTL;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- data = (EPn_OUT_EN | EPn_OUT_END_EN);
+ data = EPn_OUT_EN | EPn_OUT_END_EN;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
} else {
/*---------------------------------------------------------*/
/* IN */
- data = (EPn_EN | EPn_BCLR | EPn_AUTO);
+ data = EPn_EN | EPn_BCLR | EPn_AUTO;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- data = (EPn_ISTL);
+ data = EPn_ISTL;
_nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- data = (EPn_IN_EN | EPn_IN_END_EN);
+ data = EPn_IN_EN | EPn_IN_END_EN;
_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
}
@@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end(
if (length)
_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
- data = (((((u32)length) << 5) & EPn_DW) | EPn_DEND);
+ data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND;
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -526,10 +525,10 @@ static void _nbu2ss_dma_unmap_single(
if (req->unaligned) {
if (direct == USB_DIR_OUT)
memcpy(req->req.buf, ep->virt_buf,
- req->req.actual & 0xfffffffc);
+ req->req.actual & 0xfffffffc);
} else
dma_unmap_single(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
+ req->req.dma, req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
@@ -538,7 +537,7 @@ static void _nbu2ss_dma_unmap_single(
} else {
if (!req->unaligned)
dma_sync_single_for_cpu(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
+ req->req.dma, req->req.length,
(direct == USB_DIR_IN)
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
@@ -844,7 +843,7 @@ static int _nbu2ss_out_dma(
/* Number of transfer packets */
mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPn_MPKT;
- dmacnt = (length / mpkt);
+ dmacnt = length / mpkt;
lmpkt = (length % mpkt) & ~(u32)0x03;
if (dmacnt > DMA_MAX_COUNT) {
@@ -1490,7 +1489,7 @@ static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
int result = -EOPNOTSUPP;
if ((udc->ctrl.wLength != 0x0000) ||
- (direction != USB_DIR_OUT)) {
+ (direction != USB_DIR_OUT)) {
return -EINVAL;
}
@@ -1648,7 +1647,7 @@ static int std_req_set_address(struct nbu2ss_udc *udc)
u32 wValue = udc->ctrl.wValue;
if ((udc->ctrl.bRequestType != 0x00) ||
- (udc->ctrl.wIndex != 0x0000) ||
+ (udc->ctrl.wIndex != 0x0000) ||
(udc->ctrl.wLength != 0x0000)) {
return -EINVAL;
}
@@ -1670,7 +1669,7 @@ static int std_req_set_configuration(struct nbu2ss_udc *udc)
u32 ConfigValue = (u32)(udc->ctrl.wValue & 0x00ff);
if ((udc->ctrl.wIndex != 0x0000) ||
- (udc->ctrl.wLength != 0x0000) ||
+ (udc->ctrl.wLength != 0x0000) ||
(udc->ctrl.bRequestType != 0x00)) {
return -EINVAL;
}
@@ -1949,7 +1948,7 @@ static void _nbu2ss_ep_done(
#ifdef USE_DMA
if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
- (req->req.dma != 0))
+ (req->req.dma != 0))
_nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
#endif
@@ -2277,7 +2276,7 @@ static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
_nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
_nbu2ss_writel(&udc->p_regs->AHBMCTR,
- HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
+ HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
waitcnt++;
@@ -2626,7 +2625,7 @@ static struct usb_request *nbu2ss_ep_alloc_request(
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
- return 0;
+ return NULL;
#ifdef USE_DMA
req->req.dma = DMA_ADDR_INVALID;
@@ -2701,7 +2700,7 @@ static int nbu2ss_ep_queue(
if (unlikely(!udc->driver)) {
dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
- udc->driver);
+ udc->driver);
return -ESHUTDOWN;
}
@@ -2721,12 +2720,12 @@ static int nbu2ss_ep_queue(
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
- req->req.length);
+ req->req.length);
}
}
if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
- (req->req.dma != 0))
+ (req->req.dma != 0))
_nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
#endif
@@ -2741,12 +2740,12 @@ static int nbu2ss_ep_queue(
result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
if (result < 0) {
dev_err(udc->dev, " *** %s, result = %d\n", __func__,
- result);
+ result);
list_del(&req->queue);
} else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
#ifdef USE_DMA
if (req->req.length < 4 &&
- req->req.length == req->req.actual)
+ req->req.length == req->req.actual)
#else
if (req->req.length == req->req.actual)
#endif
@@ -3026,7 +3025,7 @@ static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
- int is_selfpowered)
+ int is_selfpowered)
{
struct nbu2ss_udc *udc;
unsigned long flags;
@@ -3180,7 +3179,8 @@ static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
ep->ep.ops = &nbu2ss_ep_ops;
usb_ep_set_maxpacket_limit(&ep->ep,
- i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
+ i == 0 ? EP0_PACKETSIZE
+ : EP_PACKETSIZE);
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
INIT_LIST_HEAD(&ep->queue);
@@ -3273,10 +3273,7 @@ static int nbu2ss_drv_probe(struct platform_device *pdev)
/* VBUS Interrupt */
irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH);
status = request_irq(INT_VBUS,
- _nbu2ss_vbus_irq,
- IRQF_SHARED,
- driver_name,
- udc);
+ _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
if (status != 0) {
dev_err(udc->dev, "request_irq(INT_VBUS) failed\n");
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index 883ff5b8fdab..6f5e82464d78 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -117,12 +117,24 @@ config FB_TFT_SSD1289
help
Framebuffer support for SSD1289
+config FB_TFT_SSD1305
+ tristate "FB driver for the SSD1305 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1305
+
config FB_TFT_SSD1306
tristate "FB driver for the SSD1306 OLED Controller"
depends on FB_TFT
help
Framebuffer support for SSD1306
+config FB_TFT_SSD1325
+ tristate "FB driver for the SSD1325 OLED Controller"
+ depends on FB_TFT
+ help
+ Framebuffer support for SSD1305
+
config FB_TFT_SSD1331
tristate "FB driver for the SSD1331 LCD Controller"
depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 4f9071d96d01..2725ea9a4afc 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -21,7 +21,9 @@ obj-$(CONFIG_FB_TFT_RA8875) += fb_ra8875.o
obj-$(CONFIG_FB_TFT_S6D02A1) += fb_s6d02a1.o
obj-$(CONFIG_FB_TFT_S6D1121) += fb_s6d1121.o
obj-$(CONFIG_FB_TFT_SSD1289) += fb_ssd1289.o
+obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1305.o
obj-$(CONFIG_FB_TFT_SSD1306) += fb_ssd1306.o
+obj-$(CONFIG_FB_TFT_SSD1305) += fb_ssd1325.o
obj-$(CONFIG_FB_TFT_SSD1331) += fb_ssd1331.o
obj-$(CONFIG_FB_TFT_SSD1351) += fb_ssd1351.o
obj-$(CONFIG_FB_TFT_ST7735R) += fb_st7735r.o
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 2a50cf957101..ba9fc444b848 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -272,8 +272,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
int ret = 0;
/* buffer to convert RGB565 -> grayscale16 -> Dithered image 1bpp */
- signed short *convert_buf = kmalloc(par->info->var.xres *
- par->info->var.yres * sizeof(signed short), GFP_NOIO);
+ signed short *convert_buf = kmalloc_array(par->info->var.xres *
+ par->info->var.yres, sizeof(signed short), GFP_NOIO);
if (!convert_buf)
return -ENOMEM;
diff --git a/drivers/staging/fbtft/fb_hx8340bn.c b/drivers/staging/fbtft/fb_hx8340bn.c
index e1ed177f9184..9970ed74bb38 100644
--- a/drivers/staging/fbtft/fb_hx8340bn.c
+++ b/drivers/staging/fbtft/fb_hx8340bn.c
@@ -25,6 +25,7 @@
#include <linux/vmalloc.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -45,56 +46,70 @@ static int init_display(struct fbtft_par *par)
/* BTL221722-276L startup sequence, from datasheet */
- /* SETEXTCOM: Set extended command set (C1h)
- This command is used to set extended command set access enable.
- Enable: After command (C1h), must write: ffh,83h,40h */
+ /*
+ * SETEXTCOM: Set extended command set (C1h)
+ * This command is used to set extended command set access enable.
+ * Enable: After command (C1h), must write: ffh,83h,40h
+ */
write_reg(par, 0xC1, 0xFF, 0x83, 0x40);
- /* Sleep out
- This command turns off sleep mode.
- In this mode the DC/DC converter is enabled, Internal oscillator
- is started, and panel scanning is started. */
+ /*
+ * Sleep out
+ * This command turns off sleep mode.
+ * In this mode the DC/DC converter is enabled, Internal oscillator
+ * is started, and panel scanning is started.
+ */
write_reg(par, 0x11);
mdelay(150);
/* Undoc'd register? */
write_reg(par, 0xCA, 0x70, 0x00, 0xD9);
- /* SETOSC: Set Internal Oscillator (B0h)
- This command is used to set internal oscillator related settings */
- /* OSC_EN: Enable internal oscillator */
- /* Internal oscillator frequency: 125% x 2.52MHz */
+ /*
+ * SETOSC: Set Internal Oscillator (B0h)
+ * This command is used to set internal oscillator related settings
+ * OSC_EN: Enable internal oscillator
+ * Internal oscillator frequency: 125% x 2.52MHz
+ */
write_reg(par, 0xB0, 0x01, 0x11);
/* Drive ability setting */
write_reg(par, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06);
mdelay(20);
- /* SETPWCTR5: Set Power Control 5(B5h)
- This command is used to set VCOM Low and VCOM High Voltage */
- /* VCOMH 0110101 : 3.925 */
- /* VCOML 0100000 : -1.700 */
- /* 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d */
+ /*
+ * SETPWCTR5: Set Power Control 5(B5h)
+ * This command is used to set VCOM Low and VCOM High Voltage
+ * VCOMH 0110101 : 3.925
+ * VCOML 0100000 : -1.700
+ * 45h=69 VCOMH: "VMH" + 5d VCOML: "VMH" + 5d
+ */
write_reg(par, 0xB5, 0x35, 0x20, 0x45);
- /* SETPWCTR4: Set Power Control 4(B4h)
- VRH[4:0]: Specify the VREG1 voltage adjusting.
- VREG1 voltage is for gamma voltage setting.
- BT[2:0]: Switch the output factor of step-up circuit 2
- for VGH and VGL voltage generation. */
+ /*
+ * SETPWCTR4: Set Power Control 4(B4h)
+ * VRH[4:0]: Specify the VREG1 voltage adjusting.
+ * VREG1 voltage is for gamma voltage setting.
+ * BT[2:0]: Switch the output factor of step-up circuit 2
+ * for VGH and VGL voltage generation.
+ */
write_reg(par, 0xB4, 0x33, 0x25, 0x4C);
mdelay(10);
- /* Interface Pixel Format (3Ah)
- This command is used to define the format of RGB picture data,
- which is to be transfer via the system and RGB interface. */
- /* RGB interface: 16 Bit/Pixel */
- write_reg(par, 0x3A, 0x05);
-
- /* Display on (29h)
- This command is used to recover from DISPLAY OFF mode.
- Output from the Frame Memory is enabled. */
- write_reg(par, 0x29);
+ /*
+ * Interface Pixel Format (3Ah)
+ * This command is used to define the format of RGB picture data,
+ * which is to be transfer via the system and RGB interface.
+ * RGB interface: 16 Bit/Pixel
+ */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /*
+ * Display on (29h)
+ * This command is used to recover from DISPLAY OFF mode.
+ * Output from the Frame Memory is enabled.
+ */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
mdelay(10);
return 0;
@@ -102,9 +117,9 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- write_reg(par, FBTFT_CASET, 0x00, xs, 0x00, xe);
- write_reg(par, FBTFT_RASET, 0x00, ys, 0x00, ye);
- write_reg(par, FBTFT_RAMWR);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0x00, xs, 0x00, xe);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0x00, ys, 0x00, ye);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
@@ -116,16 +131,19 @@ static int set_var(struct fbtft_par *par)
#define MV BIT(5)
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, par->bgr << 3);
break;
case 270:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
}
@@ -133,12 +151,12 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma Curve selection, GC (only GC0 can be customized):
- 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
- Gamma string format:
- OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
- ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC
-*/
+ * Gamma Curve selection, GC (only GC0 can be customized):
+ * 0 = 2.2, 1 = 1.8, 2 = 2.5, 3 = 1.0
+ * Gamma string format:
+ * OP0 OP1 CP0 CP1 CP2 CP3 CP4 MP0 MP1 MP2 MP3 MP4 MP5 CGM0 CGM1
+ * ON0 ON1 CN0 CN1 CN2 CN3 CN4 MN0 MN1 MN2 MN3 MN4 MN5 XXXX GC
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
@@ -154,36 +172,38 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (j = 0; j < par->gamma.num_values; j++)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
- write_reg(par, 0x26, 1 << CURVE(1, 14)); /* Gamma Set (26h) */
+ /* Gamma Set (26h) */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 1 << CURVE(1, 14));
if (CURVE(1, 14))
return 0; /* only GC0 can be customized */
write_reg(par, 0xC2,
- (CURVE(0, 8) << 4) | CURVE(0, 7),
- (CURVE(0, 10) << 4) | CURVE(0, 9),
- (CURVE(0, 12) << 4) | CURVE(0, 11),
- CURVE(0, 2),
- (CURVE(0, 4) << 4) | CURVE(0, 3),
- CURVE(0, 5),
- CURVE(0, 6),
- (CURVE(0, 1) << 4) | CURVE(0, 0),
- (CURVE(0, 14) << 2) | CURVE(0, 13));
+ (CURVE(0, 8) << 4) | CURVE(0, 7),
+ (CURVE(0, 10) << 4) | CURVE(0, 9),
+ (CURVE(0, 12) << 4) | CURVE(0, 11),
+ CURVE(0, 2),
+ (CURVE(0, 4) << 4) | CURVE(0, 3),
+ CURVE(0, 5),
+ CURVE(0, 6),
+ (CURVE(0, 1) << 4) | CURVE(0, 0),
+ (CURVE(0, 14) << 2) | CURVE(0, 13));
write_reg(par, 0xC3,
- (CURVE(1, 8) << 4) | CURVE(1, 7),
- (CURVE(1, 10) << 4) | CURVE(1, 9),
- (CURVE(1, 12) << 4) | CURVE(1, 11),
- CURVE(1, 2),
- (CURVE(1, 4) << 4) | CURVE(1, 3),
- CURVE(1, 5),
- CURVE(1, 6),
- (CURVE(1, 1) << 4) | CURVE(1, 0));
+ (CURVE(1, 8) << 4) | CURVE(1, 7),
+ (CURVE(1, 10) << 4) | CURVE(1, 9),
+ (CURVE(1, 12) << 4) | CURVE(1, 11),
+ CURVE(1, 2),
+ (CURVE(1, 4) << 4) | CURVE(1, 3),
+ CURVE(1, 5),
+ CURVE(1, 6),
+ (CURVE(1, 1) << 4) | CURVE(1, 0));
mdelay(10);
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_hx8347d.c b/drivers/staging/fbtft/fb_hx8347d.c
index 6ff76e531a37..450a61e3f99c 100644
--- a/drivers/staging/fbtft/fb_hx8347d.c
+++ b/drivers/staging/fbtft/fb_hx8347d.c
@@ -97,10 +97,10 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
}
/*
- Gamma string format:
- VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
- VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
-*/
+ * Gamma string format:
+ * VRP0 VRP1 VRP2 VRP3 VRP4 VRP5 PRP0 PRP1 PKP0 PKP1 PKP2 PKP3 PKP4 CGM
+ * VRN0 VRN1 VRN2 VRN3 VRN4 VRN5 PRN0 PRN1 PKN0 PKN1 PKN2 PKN3 PKN4 CGM
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
@@ -140,6 +140,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_hx8353d.c b/drivers/staging/fbtft/fb_hx8353d.c
index 8552411695fa..72e4ff8c5553 100644
--- a/drivers/staging/fbtft/fb_hx8353d.c
+++ b/drivers/staging/fbtft/fb_hx8353d.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -27,7 +28,6 @@
static int init_display(struct fbtft_par *par)
{
-
par->fbtftops.reset(par);
mdelay(150);
@@ -47,18 +47,18 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0x3A, 0x05);
/* MEM ACCESS */
- write_reg(par, 0x36, 0xC0);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
/* SLPOUT - Sleep out & booster on */
- write_reg(par, 0x11);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(150);
/* DISPON - Display On */
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
/* RGBSET */
- write_reg(par, 0x2D,
- 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
+ write_reg(par, MIPI_DCS_WRITE_LUT,
+ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
@@ -87,41 +87,45 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define mv BIT(5)
static int set_var(struct fbtft_par *par)
{
- /* madctl - memory data access control
- rgb/bgr:
- 1. mode selection pin srgb
- rgb h/w pin for color filter setting: 0=rgb, 1=bgr
- 2. madctl rgb bit
- rgb-bgr order color filter panel: 0=rgb, 1=bgr */
+ /*
+ * madctl - memory data access control
+ * rgb/bgr:
+ * 1. mode selection pin srgb
+ * rgb h/w pin for color filter setting: 0=rgb, 1=bgr
+ * 2. madctl rgb bit
+ * rgb-bgr order color filter panel: 0=rgb, 1=bgr
+ */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, mx | my | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ mx | my | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, my | mv | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ my | mv | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, mx | mv | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ mx | mv | (par->bgr << 3));
break;
}
return 0;
}
-/*
- gamma string format:
-*/
+/* gamma string format: */
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
write_reg(par, 0xE0,
- curves[0], curves[1], curves[2], curves[3],
- curves[4], curves[5], curves[6], curves[7],
- curves[8], curves[9], curves[10], curves[11],
- curves[12], curves[13], curves[14], curves[15],
- curves[16], curves[17], curves[18]);
+ curves[0], curves[1], curves[2], curves[3],
+ curves[4], curves[5], curves[6], curves[7],
+ curves[8], curves[9], curves[10], curves[11],
+ curves[12], curves[13], curves[14], curves[15],
+ curves[16], curves[17], curves[18]);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_hx8357d.c b/drivers/staging/fbtft/fb_hx8357d.c
index a381dbcf5535..32e6efe1d0a7 100644
--- a/drivers/staging/fbtft/fb_hx8357d.c
+++ b/drivers/staging/fbtft/fb_hx8357d.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
#include "fb_hx8357d.h"
@@ -35,7 +36,7 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* Reset things like Gamma */
- write_reg(par, HX8357B_SWRESET);
+ write_reg(par, MIPI_DCS_SOFT_RESET);
usleep_range(5000, 7000);
/* setextc */
@@ -55,83 +56,83 @@ static int init_display(struct fbtft_par *par)
write_reg(par, HX8357_SETPANEL, 0x05);
write_reg(par, HX8357_SETPWR1,
- 0x00, /* Not deep standby */
- 0x15, /* BT */
- 0x1C, /* VSPR */
- 0x1C, /* VSNR */
- 0x83, /* AP */
- 0xAA); /* FS */
+ 0x00, /* Not deep standby */
+ 0x15, /* BT */
+ 0x1C, /* VSPR */
+ 0x1C, /* VSNR */
+ 0x83, /* AP */
+ 0xAA); /* FS */
write_reg(par, HX8357D_SETSTBA,
- 0x50, /* OPON normal */
- 0x50, /* OPON idle */
- 0x01, /* STBA */
- 0x3C, /* STBA */
- 0x1E, /* STBA */
- 0x08); /* GEN */
+ 0x50, /* OPON normal */
+ 0x50, /* OPON idle */
+ 0x01, /* STBA */
+ 0x3C, /* STBA */
+ 0x1E, /* STBA */
+ 0x08); /* GEN */
write_reg(par, HX8357D_SETCYC,
- 0x02, /* NW 0x02 */
- 0x40, /* RTN */
- 0x00, /* DIV */
- 0x2A, /* DUM */
- 0x2A, /* DUM */
- 0x0D, /* GDON */
- 0x78); /* GDOFF */
+ 0x02, /* NW 0x02 */
+ 0x40, /* RTN */
+ 0x00, /* DIV */
+ 0x2A, /* DUM */
+ 0x2A, /* DUM */
+ 0x0D, /* GDON */
+ 0x78); /* GDOFF */
write_reg(par, HX8357D_SETGAMMA,
- 0x02,
- 0x0A,
- 0x11,
- 0x1d,
- 0x23,
- 0x35,
- 0x41,
- 0x4b,
- 0x4b,
- 0x42,
- 0x3A,
- 0x27,
- 0x1B,
- 0x08,
- 0x09,
- 0x03,
- 0x02,
- 0x0A,
- 0x11,
- 0x1d,
- 0x23,
- 0x35,
- 0x41,
- 0x4b,
- 0x4b,
- 0x42,
- 0x3A,
- 0x27,
- 0x1B,
- 0x08,
- 0x09,
- 0x03,
- 0x00,
- 0x01);
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x00,
+ 0x01);
/* 16 bit */
- write_reg(par, HX8357_COLMOD, 0x55);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
- write_reg(par, HX8357_MADCTL, 0xC0);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0xC0);
/* TE off */
- write_reg(par, HX8357_TEON, 0x00);
+ write_reg(par, MIPI_DCS_SET_TEAR_ON, 0x00);
/* tear line */
- write_reg(par, HX8357_TEARLINE, 0x00, 0x02);
+ write_reg(par, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
/* Exit Sleep */
- write_reg(par, HX8357_SLPOUT);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(150);
/* display on */
- write_reg(par, HX8357_DISPON);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(5000, 7000);
return 0;
@@ -139,18 +140,15 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column addr set */
- write_reg(par, HX8357_CASET,
- xs >> 8, xs & 0xff, /* XSTART */
- xe >> 8, xe & 0xff); /* XEND */
-
- /* Row addr set */
- write_reg(par, HX8357_PASET,
- ys >> 8, ys & 0xff, /* YSTART */
- ye >> 8, ye & 0xff); /* YEND */
-
- /* write to RAM */
- write_reg(par, HX8357_RAMWR);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, /* XSTART */
+ xe >> 8, xe & 0xff); /* XEND */
+
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, /* YSTART */
+ ye >> 8, ye & 0xff); /* YEND */
+
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define HX8357D_MADCTL_MY 0x80
@@ -182,7 +180,7 @@ static int set_var(struct fbtft_par *par)
val |= (par->bgr ? HX8357D_MADCTL_RGB : HX8357D_MADCTL_BGR);
/* Memory Access Control */
- write_reg(par, HX8357_MADCTL, val);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val);
return 0;
}
diff --git a/drivers/staging/fbtft/fb_hx8357d.h b/drivers/staging/fbtft/fb_hx8357d.h
index de05e8cdf04c..e281921d4a97 100644
--- a/drivers/staging/fbtft/fb_hx8357d.h
+++ b/drivers/staging/fbtft/fb_hx8357d.h
@@ -1,17 +1,17 @@
-/***************************************************
- This is our library for the Adafruit ILI9341 Breakout and Shield
- ----> http://www.adafruit.com/products/1651
-
- Check out the links above for our tutorials and wiring diagrams
- These displays use SPI to communicate, 4 or 5 pins are required to
- interface (RST is optional)
- Adafruit invests time and resources providing this open source code,
- please support Adafruit and open-source hardware by purchasing
- products from Adafruit!
-
- Written by Limor Fried/Ladyada for Adafruit Industries.
- MIT license, all text above must be included in any redistribution
- ****************************************************/
+/*
+ * This is our library for the Adafruit ILI9341 Breakout and Shield
+ * ----> http://www.adafruit.com/products/1651
+ *
+ * Check out the links above for our tutorials and wiring diagrams
+ * These displays use SPI to communicate, 4 or 5 pins are required to
+ * interface (RST is optional)
+ * Adafruit invests time and resources providing this open source code,
+ * please support Adafruit and open-source hardware by purchasing
+ * products from Adafruit!
+ *
+ * Written by Limor Fried/Ladyada for Adafruit Industries.
+ * MIT license, all text above must be included in any redistribution
+ */
#ifndef __HX8357_H__
#define __HX8357_H__
@@ -22,38 +22,6 @@
#define HX8357_TFTWIDTH 320
#define HX8357_TFTHEIGHT 480
-#define HX8357B_NOP 0x00
-#define HX8357B_SWRESET 0x01
-#define HX8357B_RDDID 0x04
-#define HX8357B_RDDST 0x09
-
-#define HX8357B_RDPOWMODE 0x0A
-#define HX8357B_RDMADCTL 0x0B
-#define HX8357B_RDCOLMOD 0x0C
-#define HX8357B_RDDIM 0x0D
-#define HX8357B_RDDSDR 0x0F
-
-#define HX8357_SLPIN 0x10
-#define HX8357_SLPOUT 0x11
-#define HX8357B_PTLON 0x12
-#define HX8357B_NORON 0x13
-
-#define HX8357_INVOFF 0x20
-#define HX8357_INVON 0x21
-#define HX8357_DISPOFF 0x28
-#define HX8357_DISPON 0x29
-
-#define HX8357_CASET 0x2A
-#define HX8357_PASET 0x2B
-#define HX8357_RAMWR 0x2C
-#define HX8357_RAMRD 0x2E
-
-#define HX8357B_PTLAR 0x30
-#define HX8357_TEON 0x35
-#define HX8357_TEARLINE 0x44
-#define HX8357_MADCTL 0x36
-#define HX8357_COLMOD 0x3A
-
#define HX8357_SETOSC 0xB0
#define HX8357_SETPWR1 0xB1
#define HX8357B_SETDISPLAY 0xB2
diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
index f31b3f4b9275..6b8f8b17e9a3 100644
--- a/drivers/staging/fbtft/fb_ili9163.c
+++ b/drivers/staging/fbtft/fb_ili9163.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -38,37 +39,11 @@
#endif
/* ILI9163C commands */
-#define CMD_NOP 0x00 /* Non operation*/
-#define CMD_SWRESET 0x01 /* Soft Reset */
-#define CMD_SLPIN 0x10 /* Sleep ON */
-#define CMD_SLPOUT 0x11 /* Sleep OFF */
-#define CMD_PTLON 0x12 /* Partial Mode ON */
-#define CMD_NORML 0x13 /* Normal Display ON */
-#define CMD_DINVOF 0x20 /* Display Inversion OFF */
-#define CMD_DINVON 0x21 /* Display Inversion ON */
-#define CMD_GAMMASET 0x26 /* Gamma Set (0x01[1],0x02[2],0x04[3],0x08[4]) */
-#define CMD_DISPOFF 0x28 /* Display OFF */
-#define CMD_DISPON 0x29 /* Display ON */
-#define CMD_IDLEON 0x39 /* Idle Mode ON */
-#define CMD_IDLEOF 0x38 /* Idle Mode OFF */
-#define CMD_CLMADRS 0x2A /* Column Address Set */
-#define CMD_PGEADRS 0x2B /* Page Address Set */
-
-#define CMD_RAMWR 0x2C /* Memory Write */
-#define CMD_RAMRD 0x2E /* Memory Read */
-#define CMD_CLRSPACE 0x2D /* Color Space : 4K/65K/262K */
-#define CMD_PARTAREA 0x30 /* Partial Area */
-#define CMD_VSCLLDEF 0x33 /* Vertical Scroll Definition */
-#define CMD_TEFXLON 0x34 /* Tearing Effect Line ON */
-#define CMD_TEFXLOF 0x35 /* Tearing Effect Line OFF */
-#define CMD_MADCTL 0x36 /* Memory Access Control */
-
-#define CMD_PIXFMT 0x3A /* Interface Pixel Format */
-#define CMD_FRMCTR1 0xB1 /* Frame Rate Control
- (In normal mode/Full colors) */
+#define CMD_FRMCTR1 0xB1 /* Frame Rate Control */
+ /* (In normal mode/Full colors) */
#define CMD_FRMCTR2 0xB2 /* Frame Rate Control (In Idle mode/8-colors) */
-#define CMD_FRMCTR3 0xB3 /* Frame Rate Control
- (In Partial mode/full colors) */
+#define CMD_FRMCTR3 0xB3 /* Frame Rate Control */
+ /* (In Partial mode/full colors) */
#define CMD_DINVCTR 0xB4 /* Display Inversion Control */
#define CMD_RGBBLK 0xB5 /* RGB Interface Blanking Porch setting */
#define CMD_DFUNCTR 0xB6 /* Display Function set 5 */
@@ -88,17 +63,18 @@
#define CMD_GAMRSEL 0xF2 /* GAM_R_SEL */
/*
-This display:
-http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-Color-TFT-LCD-Display-Module-/271422122271
-This particular display has a design error! The controller has 3 pins to
-configure to constrain the memory and resolution to a fixed dimension (in
-that case 128x128) but they leaved those pins configured for 128x160 so
-there was several pixel memory addressing problems.
-I solved by setup several parameters that dinamically fix the resolution as
-needit so below the parameters for this display. If you have a strain or a
-correct display (can happen with chinese) you can copy those parameters and
-create setup for different displays.
-*/
+ * This display:
+ * http://www.ebay.com/itm/Replace-Nokia-5110-LCD-1-44-Red-Serial-128X128-SPI-
+ * Color-TFT-LCD-Display-Module-/271422122271
+ * This particular display has a design error! The controller has 3 pins to
+ * configure to constrain the memory and resolution to a fixed dimension (in
+ * that case 128x128) but they leaved those pins configured for 128x160 so
+ * there was several pixel memory addressing problems.
+ * I solved by setup several parameters that dinamically fix the resolution as
+ * needit so below the parameters for this display. If you have a strain or a
+ * correct display (can happen with chinese) you can copy those parameters and
+ * create setup for different displays.
+ */
#ifdef RED
#define __OFFSET 32 /*see note 2 - this is the red version */
@@ -113,16 +89,17 @@ static int init_display(struct fbtft_par *par)
if (par->gpio.cs != -1)
gpio_set_value(par->gpio.cs, 0); /* Activate chip */
- write_reg(par, CMD_SWRESET); /* software reset */
+ write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
mdelay(500);
- write_reg(par, CMD_SLPOUT); /* exit sleep */
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); /* exit sleep */
mdelay(5);
- write_reg(par, CMD_PIXFMT, 0x05); /* Set Color Format 16bit */
- write_reg(par, CMD_GAMMASET, 0x02); /* default gamma curve 3 */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ /* default gamma curve 3 */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x02);
#ifdef GAMMA_ADJ
write_reg(par, CMD_GAMRSEL, 0x01); /* Enable Gamma adj */
#endif
- write_reg(par, CMD_NORML);
+ write_reg(par, MIPI_DCS_ENTER_NORMAL_MODE);
write_reg(par, CMD_DFUNCTR, 0xff, 0x06);
/* Frame Rate Control (In normal mode/Full colors) */
write_reg(par, CMD_FRMCTR1, 0x08, 0x02);
@@ -135,66 +112,67 @@ static int init_display(struct fbtft_par *par)
write_reg(par, CMD_VCOMCTR1, 0x50, 0x63);
write_reg(par, CMD_VCOMOFFS, 0);
- write_reg(par, CMD_CLMADRS, 0, 0, 0, WIDTH); /* Set Column Address */
- write_reg(par, CMD_PGEADRS, 0, 0, 0, HEIGHT); /* Set Page Address */
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0, 0, WIDTH);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0, 0, HEIGHT);
- write_reg(par, CMD_DISPON); /* display ON */
- write_reg(par, CMD_RAMWR); /* Memory Write */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON); /* display ON */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START); /* Memory Write */
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys,
- int xe, int ye)
+ int xe, int ye)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
- xe & 0xff);
- write_reg(par, CMD_PGEADRS,
- (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
- (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys + __OFFSET) >> 8, (ys + __OFFSET) & 0xff,
+ (ye + __OFFSET) >> 8, (ye + __OFFSET) & 0xff);
break;
case 90:
- write_reg(par, CMD_CLMADRS,
- (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
- (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
- write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
- ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs + __OFFSET) >> 8, (xs + __OFFSET) & 0xff,
+ (xe + __OFFSET) >> 8, (xe + __OFFSET) & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
break;
case 180:
case 270:
- write_reg(par, CMD_CLMADRS, xs >> 8, xs & 0xff, xe >> 8,
- xe & 0xff);
- write_reg(par, CMD_PGEADRS, ys >> 8, ys & 0xff, ye >> 8,
- ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
break;
default:
- par->info->var.rotate = 0; /* Fix incorrect setting */
+ /* Fix incorrect setting */
+ par->info->var.rotate = 0;
}
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
/*
-7) MY: 1(bottom to top), 0(top to bottom) Row Address Order
-6) MX: 1(R to L), 0(L to R) Column Address Order
-5) MV: 1(Exchanged), 0(normal) Row/Column exchange
-4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order
-3) RGB: 1(BGR), 0(RGB) Color Space
-2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order
-1)
-0)
-
- MY, MX, MV, ML,RGB, MH, D1, D0
- 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal
- 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror
- 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror
- 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror
- 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange
- 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror
- 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange
- 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
-*/
+ * 7) MY: 1(bottom to top), 0(top to bottom) Row Address Order
+ * 6) MX: 1(R to L), 0(L to R) Column Address Order
+ * 5) MV: 1(Exchanged), 0(normal) Row/Column exchange
+ * 4) ML: 1(bottom to top), 0(top to bottom) Vertical Refresh Order
+ * 3) RGB: 1(BGR), 0(RGB) Color Space
+ * 2) MH: 1(R to L), 0(L to R) Horizontal Refresh Order
+ * 1)
+ * 0)
+ *
+ * MY, MX, MV, ML,RGB, MH, D1, D0
+ * 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //normal
+ * 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 //Y-Mirror
+ * 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Mirror
+ * 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 //X-Y-Mirror
+ * 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange
+ * 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 //X-Y Exchange, Y-Mirror
+ * 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 //XY exchange
+ * 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0
+ */
static int set_var(struct fbtft_par *par)
{
u8 mactrl_data = 0; /* Avoid compiler warning */
@@ -217,8 +195,8 @@ static int set_var(struct fbtft_par *par)
/* Colorspcae */
if (par->bgr)
mactrl_data |= (1 << 2);
- write_reg(par, CMD_MADCTL, mactrl_data);
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, mactrl_data);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
return 0;
}
@@ -237,27 +215,28 @@ static int gamma_adj(struct fbtft_par *par, unsigned long *curves)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
write_reg(par, CMD_PGAMMAC,
- CURVE(0, 0),
- CURVE(0, 1),
- CURVE(0, 2),
- CURVE(0, 3),
- CURVE(0, 4),
- CURVE(0, 5),
- CURVE(0, 6),
- (CURVE(0, 7) << 4) | CURVE(0, 8),
- CURVE(0, 9),
- CURVE(0, 10),
- CURVE(0, 11),
- CURVE(0, 12),
- CURVE(0, 13),
- CURVE(0, 14),
- CURVE(0, 15)
- );
+ CURVE(0, 0),
+ CURVE(0, 1),
+ CURVE(0, 2),
+ CURVE(0, 3),
+ CURVE(0, 4),
+ CURVE(0, 5),
+ CURVE(0, 6),
+ (CURVE(0, 7) << 4) | CURVE(0, 8),
+ CURVE(0, 9),
+ CURVE(0, 10),
+ CURVE(0, 11),
+ CURVE(0, 12),
+ CURVE(0, 13),
+ CURVE(0, 14),
+ CURVE(0, 15));
- write_reg(par, CMD_RAMWR); /* Write Data to GRAM mode */
+ /* Write Data to GRAM mode */
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
return 0;
}
+
#undef CURVE
#endif
diff --git a/drivers/staging/fbtft/fb_ili9320.c b/drivers/staging/fbtft/fb_ili9320.c
index 3ed50febe36f..6ff222d6d6d6 100644
--- a/drivers/staging/fbtft/fb_ili9320.c
+++ b/drivers/staging/fbtft/fb_ili9320.c
@@ -47,10 +47,10 @@ static int init_display(struct fbtft_par *par)
devcode = read_devicecode(par);
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
- devcode);
+ devcode);
if ((devcode != 0x0000) && (devcode != 0x9320))
dev_warn(par->info->device,
- "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
+ "Unrecognized Device code: 0x%04X (expected 0x9320)\n",
devcode);
/* Initialization sequence from ILI9320 Application Notes */
@@ -216,10 +216,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
- VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
+ * Gamma string format:
+ * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
@@ -248,6 +248,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index 3b3a06d8a125..fdf98d37550e 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -56,42 +56,42 @@ module_param(vcm, uint, 0);
MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
/*
-Verify that this configuration is within the Voltage limits
-
-Display module configuration: Vcc = IOVcc = Vci = 3.3V
-
- Voltages
-----------
-Vci = 3.3
-Vci1 = Vci * 0.80 = 2.64
-DDVDH = Vci1 * 2 = 5.28
-VCL = -Vci1 = -2.64
-VREG1OUT = Vci * 1.85 = 4.88
-VCOMH = VREG1OUT * 0.735 = 3.59
-VCOM amplitude = VREG1OUT * 0.98 = 4.79
-VGH = Vci * 4 = 13.2
-VGL = -Vci * 4 = -13.2
-
- Limits
---------
-Power supplies
-1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30
-2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30
-2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30
-
-Source/VCOM power supply voltage
- 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0
--3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0
-VCI - VCL < 6.0 => 5.94 < 6.0
-
-Gate driver output voltage
- 10 < VGH < 20 => 10 < 13.2 < 20
--15 < VGL < -5 => -15 < -13.2 < -5
-VGH - VGL < 32 => 26.4 < 32
-
-VCOM driver output voltage
-VCOMH - VCOML < 6.0 => 4.79 < 6.0
-*/
+ * Verify that this configuration is within the Voltage limits
+ *
+ * Display module configuration: Vcc = IOVcc = Vci = 3.3V
+ *
+ * Voltages
+ * ----------
+ * Vci = 3.3
+ * Vci1 = Vci * 0.80 = 2.64
+ * DDVDH = Vci1 * 2 = 5.28
+ * VCL = -Vci1 = -2.64
+ * VREG1OUT = Vci * 1.85 = 4.88
+ * VCOMH = VREG1OUT * 0.735 = 3.59
+ * VCOM amplitude = VREG1OUT * 0.98 = 4.79
+ * VGH = Vci * 4 = 13.2
+ * VGL = -Vci * 4 = -13.2
+ *
+ * Limits
+ * --------
+ * Power supplies
+ * 1.65 < IOVcc < 3.30 => 1.65 < 3.3 < 3.30
+ * 2.40 < Vcc < 3.30 => 2.40 < 3.3 < 3.30
+ * 2.50 < Vci < 3.30 => 2.50 < 3.3 < 3.30
+ *
+ * Source/VCOM power supply voltage
+ * 4.50 < DDVDH < 6.0 => 4.50 < 5.28 < 6.0
+ * -3.0 < VCL < -2.0 => -3.0 < -2.64 < -2.0
+ * VCI - VCL < 6.0 => 5.94 < 6.0
+ *
+ * Gate driver output voltage
+ * 10 < VGH < 20 => 10 < 13.2 < 20
+ * -15 < VGL < -5 => -15 < -13.2 < -5
+ * VGH - VGL < 32 => 26.4 < 32
+ *
+ * VCOM driver output voltage
+ * VCOMH - VCOML < 6.0 => 4.79 < 6.0
+ */
static int init_display(struct fbtft_par *par)
{
@@ -213,10 +213,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
- VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
-*/
+ * Gamma string format:
+ * VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
+ * VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
@@ -245,6 +245,7 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9340.c b/drivers/staging/fbtft/fb_ili9340.c
index e0e253989271..0711121c303c 100644
--- a/drivers/staging/fbtft/fb_ili9340.c
+++ b/drivers/staging/fbtft/fb_ili9340.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -53,7 +54,7 @@ static int init_display(struct fbtft_par *par)
/* COLMOD: Pixel Format Set */
/* 16 bits/pixel */
- write_reg(par, 0x3A, 0x55);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
/* Frame Rate Control */
/* Division ratio = fosc, Frame Rate = 79Hz */
@@ -65,40 +66,37 @@ static int init_display(struct fbtft_par *par)
/* Gamma Function Disable */
write_reg(par, 0xF2, 0x00);
- /* Gamma curve selected */
- write_reg(par, 0x26, 0x01);
+ /* Gamma curve selection */
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
/* Positive Gamma Correction */
write_reg(par, 0xE0,
- 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
- 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
+ 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E, 0xF1,
+ 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00);
/* Negative Gamma Correction */
write_reg(par, 0xE1,
- 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
- 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
+ 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31, 0xC1,
+ 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F);
- /* Sleep OUT */
- write_reg(par, 0x11);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(120);
- /* Display ON */
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define ILI9340_MADCTL_MV 0x20
@@ -123,7 +121,7 @@ static int set_var(struct fbtft_par *par)
break;
}
/* Memory Access Control */
- write_reg(par, 0x36, val | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, val | (par->bgr << 3));
return 0;
}
diff --git a/drivers/staging/fbtft/fb_ili9341.c b/drivers/staging/fbtft/fb_ili9341.c
index dcee0aff5875..ff35c8624ca3 100644
--- a/drivers/staging/fbtft/fb_ili9341.c
+++ b/drivers/staging/fbtft/fb_ili9341.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -39,9 +40,9 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
/* startup sequence for MI0283QT-9A */
- write_reg(par, 0x01); /* software reset */
+ write_reg(par, MIPI_DCS_SOFT_RESET);
mdelay(5);
- write_reg(par, 0x28); /* display off */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_OFF);
/* --------------------------------------------------------- */
write_reg(par, 0xCF, 0x00, 0x83, 0x30);
write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81);
@@ -56,18 +57,18 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xC5, 0x35, 0x3E);
write_reg(par, 0xC7, 0xBE);
/* ------------memory access control------------------------ */
- write_reg(par, 0x3A, 0x55); /* 16bit pixel */
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* 16bit pixel */
/* ------------frame rate----------------------------------- */
write_reg(par, 0xB1, 0x00, 0x1B);
/* ------------Gamma---------------------------------------- */
/* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */
- write_reg(par, 0x26, 0x01);
+ write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
/* ------------display-------------------------------------- */
write_reg(par, 0xB7, 0x07); /* entry mode set */
write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00);
- write_reg(par, 0x11); /* sleep out */
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(100);
- write_reg(par, 0x29); /* display on */
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
mdelay(20);
return 0;
@@ -75,40 +76,39 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address set */
- write_reg(par, 0x2A,
- (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
- /* Row address set */
- write_reg(par, 0x2B,
- (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
-#define MEM_Y (7) /* MY row address order */
-#define MEM_X (6) /* MX column address order */
-#define MEM_V (5) /* MV row / column exchange */
-#define MEM_L (4) /* ML vertical refresh order */
-#define MEM_H (2) /* MH horizontal refresh order */
+#define MEM_Y BIT(7) /* MY row address order */
+#define MEM_X BIT(6) /* MX column address order */
+#define MEM_V BIT(5) /* MV row / column exchange */
+#define MEM_L BIT(4) /* ML vertical refresh order */
+#define MEM_H BIT(2) /* MH horizontal refresh order */
#define MEM_BGR (3) /* RGB-BGR Order */
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, (1 << MEM_X) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_X | (par->bgr << MEM_BGR));
break;
case 270:
- write_reg(par, 0x36,
- (1 << MEM_V) | (1 << MEM_L) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_V | MEM_L | (par->bgr << MEM_BGR));
break;
case 180:
- write_reg(par, 0x36, (1 << MEM_Y) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_Y | (par->bgr << MEM_BGR));
break;
case 90:
- write_reg(par, 0x36, (1 << MEM_Y) | (1 << MEM_X) |
- (1 << MEM_V) | (par->bgr << MEM_BGR));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MEM_Y | MEM_X | MEM_V | (par->bgr << MEM_BGR));
break;
}
@@ -116,10 +116,10 @@ static int set_var(struct fbtft_par *par)
}
/*
- Gamma string format:
- Positive: Par1 Par2 [...] Par15
- Negative: Par1 Par2 [...] Par15
-*/
+ * Gamma string format:
+ * Positive: Par1 Par2 [...] Par15
+ * Negative: Par1 Par2 [...] Par15
+ */
#define CURVE(num, idx) curves[num * par->gamma.num_values + idx]
static int set_gamma(struct fbtft_par *par, unsigned long *curves)
{
@@ -127,14 +127,15 @@ static int set_gamma(struct fbtft_par *par, unsigned long *curves)
for (i = 0; i < par->gamma.num_curves; i++)
write_reg(par, 0xE0 + i,
- CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
- CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
- CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
- CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
- CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
+ CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
+ CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
+ CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
+ CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
+ CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
return 0;
}
+
#undef CURVE
static struct fbtft_display display = {
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 63684864f309..242adb3859bd 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -27,9 +28,8 @@
#define HEIGHT 480
static int default_init_sequence[] = {
-
/* SLP_OUT - Sleep out */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
/* Power setting */
-1, 0xD0, 0x07, 0x42, 0x18,
@@ -42,44 +42,47 @@ static int default_init_sequence[] = {
/* Frame rate & inv. */
-1, 0xC5, 0x03,
/* Pixel format */
- -1, 0x3A, 0x55,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
/* Gamma */
-1, 0xC8, 0x00, 0x32, 0x36, 0x45, 0x06, 0x16,
0x37, 0x75, 0x77, 0x54, 0x0C, 0x00,
/* DISP_ON */
- -1, 0x29,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
-3
};
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* column address */
- write_reg(par, 0x2a, xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xff, xe >> 8, xe & 0xff);
- /* Row address */
- write_reg(par, 0x2b, ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xff, ye >> 8, ye & 0xff);
- /* memory write */
- write_reg(par, 0x2c);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define HFLIP 0x01
#define VFLIP 0x02
-#define ROWxCOL 0x20
+#define ROW_X_COL 0x20
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 270:
- write_reg(par, 0x36, ROWxCOL | HFLIP | VFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ ROW_X_COL | HFLIP | VFLIP | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, VFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ VFLIP | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, ROWxCOL | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ ROW_X_COL | (par->bgr << 3));
break;
default:
- write_reg(par, 0x36, HFLIP | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ HFLIP | (par->bgr << 3));
break;
}
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index d9dfff68159b..fa38d8885f0b 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -28,11 +29,10 @@
static int default_init_sequence[] = {
/* Interface Mode Control */
-1, 0xb0, 0x0,
- /* Sleep OUT */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 250,
/* Interface Pixel Format */
- -1, 0x3A, 0x55,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
/* Power Control 3 */
-1, 0xC2, 0x44,
/* VCOM Control 1 */
@@ -46,40 +46,41 @@ static int default_init_sequence[] = {
/* Digital Gamma Control 1 */
-1, 0xE2, 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00,
- /* Sleep OUT */
- -1, 0x11,
- /* Display ON */
- -1, 0x29,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
/* end marker */
-3
};
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, 0x80 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x80 | (par->bgr << 3));
break;
case 90:
- write_reg(par, 0x36, 0x20 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x20 | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, 0x40 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0x40 | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, 0xE0 | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ 0xE0 | (par->bgr << 3));
break;
default:
break;
diff --git a/drivers/staging/fbtft/fb_ra8875.c b/drivers/staging/fbtft/fb_ra8875.c
index b167c5061631..308a244972aa 100644
--- a/drivers/staging/fbtft/fb_ra8875.c
+++ b/drivers/staging/fbtft/fb_ra8875.c
@@ -257,7 +257,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ u16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -271,13 +271,13 @@ static int write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
remain = len / 2;
vmem16 = (u16 *)(par->info->screen_buffer + offset);
tx_array_size = par->txbuf.len / 2;
- txbuf16 = (u16 *)(par->txbuf.buf + 1);
+ txbuf16 = par->txbuf.buf + 1;
tx_array_size -= 2;
*(u8 *)(par->txbuf.buf) = 0x00;
startbyte_size = 1;
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
+ to_copy = min(tx_array_size, remain);
dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
to_copy, remain - to_copy);
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index da85057eb3e0..3113355062fc 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -50,7 +51,7 @@ static int default_init_sequence[] = {
-1, 0xf3, 0x00, 0x00,
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
-1, 0xf3, 0x00, 0x01,
@@ -79,18 +80,18 @@ static int default_init_sequence[] = {
/* initializing sequence */
- -1, 0x36, 0x08,
+ -1, MIPI_DCS_SET_ADDRESS_MODE, 0x08,
- -1, 0x35, 0x00,
+ -1, MIPI_DCS_SET_TEAR_ON, 0x00,
- -1, 0x3a, 0x05,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x05,
- /* gamma setting sequence */
- -1, 0x26, 0x01, /* preset gamma curves, possible values 0x01, 0x02, 0x04, 0x08 */
+ /* gamma setting - possible values 0x01, 0x02, 0x04, 0x08 */
+ -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-2, 150,
- -1, 0x29,
- -1, 0x2c,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
+ -1, MIPI_DCS_WRITE_MEMORY_START,
/* end marker */
-3
@@ -98,14 +99,13 @@ static int default_init_sequence[] = {
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MY BIT(7)
@@ -113,7 +113,7 @@ static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
#define MV BIT(5)
static int set_var(struct fbtft_par *par)
{
- /* MADCTL - Memory data access control
+ /* Memory data access control (0x36h)
RGB/BGR:
1. Mode selection pin SRGB
RGB H/W pin for color filter setting: 0=RGB, 1=BGR
@@ -121,16 +121,20 @@ static int set_var(struct fbtft_par *par)
RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
}
diff --git a/drivers/staging/fbtft/fb_ssd1305.c b/drivers/staging/fbtft/fb_ssd1305.c
new file mode 100644
index 000000000000..4b38c3fadd60
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1305.c
@@ -0,0 +1,216 @@
+/*
+ * FB driver for the SSD1305 OLED Controller
+ *
+ * based on SSD1306 driver by Noralf Tronnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1305"
+
+#define WIDTH 128
+#define HEIGHT 64
+
+/*
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ if (par->gamma.curves[0] == 0) {
+ mutex_lock(&par->gamma.lock);
+ if (par->info->var.yres == 64)
+ par->gamma.curves[0] = 0xCF;
+ else
+ par->gamma.curves[0] = 0x8F;
+ mutex_unlock(&par->gamma.lock);
+ }
+
+ /* Set Display OFF */
+ write_reg(par, 0xAE);
+
+ /* Set Display Clock Divide Ratio/ Oscillator Frequency */
+ write_reg(par, 0xD5);
+ write_reg(par, 0x80);
+
+ /* Set Multiplex Ratio */
+ write_reg(par, 0xA8);
+ if (par->info->var.yres == 64)
+ write_reg(par, 0x3F);
+ else
+ write_reg(par, 0x1F);
+
+ /* Set Display Offset */
+ write_reg(par, 0xD3);
+ write_reg(par, 0x0);
+
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+
+ /* Charge Pump Setting */
+ write_reg(par, 0x8D);
+ /* A[2] = 1b, Enable charge pump during display on */
+ write_reg(par, 0x14);
+
+ /* Set Memory Addressing Mode */
+ write_reg(par, 0x20);
+ /* Vertical addressing mode */
+ write_reg(par, 0x01);
+
+ /*
+ * Set Segment Re-map
+ * column address 127 is mapped to SEG0
+ */
+ write_reg(par, 0xA0 | ((par->info->var.rotate == 180) ? 0x0 : 0x1));
+
+ /*
+ * Set COM Output Scan Direction
+ * remapped mode. Scan from COM[N-1] to COM0
+ */
+ write_reg(par, ((par->info->var.rotate == 180) ? 0xC8 : 0xC0));
+
+ /* Set COM Pins Hardware Configuration */
+ write_reg(par, 0xDA);
+ if (par->info->var.yres == 64) {
+ /* A[4]=1b, Alternative COM pin configuration */
+ write_reg(par, 0x12);
+ } else {
+ /* A[4]=0b, Sequential COM pin configuration */
+ write_reg(par, 0x02);
+ }
+
+ /* Set Pre-charge Period */
+ write_reg(par, 0xD9);
+ write_reg(par, 0xF1);
+
+ /*
+ * Entire Display ON
+ * Resume to RAM content display. Output follows RAM content
+ */
+ write_reg(par, 0xA4);
+
+ /*
+ * Set Normal Display
+ * 0 in RAM: OFF in display panel
+ * 1 in RAM: ON in display panel
+ */
+ write_reg(par, 0xA6);
+
+ /* Set Display ON */
+ write_reg(par, 0xAF);
+
+ return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ /* Set Lower Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x00 | ((par->info->var.rotate == 180) ? 0x0 : 0x4));
+ /* Set Higher Column Start Address for Page Addressing Mode */
+ write_reg(par, 0x10 | 0x0);
+ /* Set Display Start Line */
+ write_reg(par, 0x40 | 0x0);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+/* Gamma is used to control Contrast */
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ curves[0] &= 0xFF;
+ /* Set Contrast Control for BANK0 */
+ write_reg(par, 0x81);
+ write_reg(par, curves[0]);
+
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u8 *buf = par->txbuf.buf;
+ int x, y, i;
+ int ret;
+
+ for (x = 0; x < par->info->var.xres; x++) {
+ for (y = 0; y < par->info->var.yres / 8; y++) {
+ *buf = 0x00;
+ for (i = 0; i < 8; i++)
+ *buf |= (vmem16[(y * 8 + i) *
+ par->info->var.xres + x] ?
+ 1 : 0) << i;
+ buf++;
+ }
+ }
+
+ /* Write data */
+ gpio_set_value(par->gpio.dc, 1);
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ par->info->var.xres * par->info->var.yres /
+ 8);
+ if (ret < 0)
+ dev_err(par->info->device, "write failed and returned: %d\n",
+ ret);
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH * HEIGHT / 8,
+ .gamma_num = 1,
+ .gamma_len = 1,
+ .gamma = "00",
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1305", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1305");
+MODULE_ALIAS("platform:ssd1305");
+
+MODULE_DESCRIPTION("SSD1305 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_ssd1325.c b/drivers/staging/fbtft/fb_ssd1325.c
new file mode 100644
index 000000000000..15078bf2aa4b
--- /dev/null
+++ b/drivers/staging/fbtft/fb_ssd1325.c
@@ -0,0 +1,205 @@
+/*
+ * FB driver for the SSD1325 OLED Controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME "fb_ssd1325"
+
+#define WIDTH 128
+#define HEIGHT 64
+#define GAMMA_NUM 1
+#define GAMMA_LEN 15
+#define DEFAULT_GAMMA "7 1 1 1 1 2 2 3 3 4 4 5 5 6 6"
+
+/*
+ * write_reg() caveat:
+ *
+ * This doesn't work because D/C has to be LOW for both values:
+ * write_reg(par, val1, val2);
+ *
+ * Do it like this:
+ * write_reg(par, val1);
+ * write_reg(par, val2);
+ */
+
+/* Init sequence taken from the Adafruit SSD1306 Arduino library */
+static int init_display(struct fbtft_par *par)
+{
+ par->fbtftops.reset(par);
+
+ gpio_set_value(par->gpio.cs, 0);
+
+ write_reg(par, 0xb3);
+ write_reg(par, 0xf0);
+ write_reg(par, 0xae);
+ write_reg(par, 0xa1);
+ write_reg(par, 0x00);
+ write_reg(par, 0xa8);
+ write_reg(par, 0x3f);
+ write_reg(par, 0xa0);
+ write_reg(par, 0x45);
+ write_reg(par, 0xa2);
+ write_reg(par, 0x40);
+ write_reg(par, 0x75);
+ write_reg(par, 0x00);
+ write_reg(par, 0x3f);
+ write_reg(par, 0x15);
+ write_reg(par, 0x00);
+ write_reg(par, 0x7f);
+ write_reg(par, 0xa4);
+ write_reg(par, 0xaf);
+
+ return 0;
+}
+
+static uint8_t rgb565_to_g16(u16 pixel)
+{
+ u16 b = pixel & 0x1f;
+ u16 g = (pixel & (0x3f << 5)) >> 5;
+ u16 r = (pixel & (0x1f << (5 + 6))) >> (5 + 6);
+
+ pixel = (299 * r + 587 * g + 114 * b) / 195;
+ if (pixel > 255)
+ pixel = 255;
+ return (uint8_t)pixel / 16;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+ fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+ "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n", __func__, xs, ys, xe,
+ ye);
+
+ write_reg(par, 0x75);
+ write_reg(par, 0x00);
+ write_reg(par, 0x3f);
+ write_reg(par, 0x15);
+ write_reg(par, 0x00);
+ write_reg(par, 0x7f);
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+ fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+ __func__, on ? "true" : "false");
+
+ if (on)
+ write_reg(par, 0xAE);
+ else
+ write_reg(par, 0xAF);
+ return 0;
+}
+
+/*
+ * Grayscale Lookup Table
+ * GS1 - GS15
+ * The "Gamma curve" contains the relative values between the entries
+ * in the Lookup table.
+ *
+ * 0 = Setting of GS1 < Setting of GS2 < Setting of GS3.....<
+ * Setting of GS14 < Setting of GS15
+ */
+static int set_gamma(struct fbtft_par *par, unsigned long *curves)
+{
+ int i;
+
+ fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+ for (i = 0; i < GAMMA_LEN; i++) {
+ if (i > 0 && curves[i] < 1) {
+ dev_err(par->info->device,
+ "Illegal value in Grayscale Lookup Table at index %d.\n"
+ "Must be greater than 0\n", i);
+ return -EINVAL;
+ }
+ if (curves[i] > 7) {
+ dev_err(par->info->device,
+ "Illegal value(s) in Grayscale Lookup Table.\n"
+ "At index=%d, the accumulated value has exceeded 7\n",
+ i);
+ return -EINVAL;
+ }
+ }
+ write_reg(par, 0xB8);
+ for (i = 0; i < 8; i++)
+ write_reg(par, (curves[i] & 0xFF));
+ return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+ u16 *vmem16 = (u16 *)par->info->screen_buffer;
+ u8 *buf = par->txbuf.buf;
+ u8 n1;
+ u8 n2;
+ int y, x;
+ int ret;
+
+ for (x = 0; x < par->info->var.xres; x++) {
+ if (x % 2)
+ continue;
+ for (y = 0; y < par->info->var.yres; y++) {
+ n1 = rgb565_to_g16(vmem16[y * par->info->var.xres + x]);
+ n2 = rgb565_to_g16(vmem16
+ [y * par->info->var.xres + x + 1]);
+ *buf = (n1 << 4) | n2;
+ buf++;
+ }
+ }
+
+ gpio_set_value(par->gpio.dc, 1);
+
+ /* Write data */
+ ret = par->fbtftops.write(par, par->txbuf.buf,
+ par->info->var.xres * par->info->var.yres / 2);
+ if (ret < 0)
+ dev_err(par->info->device,
+ "%s: write failed and returned: %d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct fbtft_display display = {
+ .regwidth = 8,
+ .width = WIDTH,
+ .height = HEIGHT,
+ .txbuflen = WIDTH * HEIGHT / 2,
+ .gamma_num = GAMMA_NUM,
+ .gamma_len = GAMMA_LEN,
+ .gamma = DEFAULT_GAMMA,
+ .fbtftops = {
+ .write_vmem = write_vmem,
+ .init_display = init_display,
+ .set_addr_win = set_addr_win,
+ .blank = blank,
+ .set_gamma = set_gamma,
+ },
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1325", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:ssd1325");
+MODULE_ALIAS("platform:ssd1325");
+
+MODULE_DESCRIPTION("SSD1325 OLED Driver");
+MODULE_AUTHOR("Alexey Mednyy");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index a92b0d071097..c5e51fe1aad5 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -25,12 +26,10 @@
"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
static int default_init_sequence[] = {
- /* SWRESET - Software reset */
- -1, 0x01,
+ -1, MIPI_DCS_SOFT_RESET,
-2, 150, /* delay */
- /* SLPOUT - Sleep out & booster on */
- -1, 0x11,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 500, /* delay */
/* FRMCTR1 - frame rate control: normal mode
@@ -71,18 +70,14 @@ static int default_init_sequence[] = {
/* VMCTR1 - Power Control */
-1, 0xC5, 0x0E,
- /* INVOFF - Display inversion off */
- -1, 0x20,
+ -1, MIPI_DCS_EXIT_INVERT_MODE,
- /* COLMOD - Interface pixel format */
- -1, 0x3A, 0x05,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
- /* DISPON - Display On */
- -1, 0x29,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
-2, 100, /* delay */
- /* NORON - Partial off (Normal) */
- -1, 0x13,
+ -1, MIPI_DCS_ENTER_NORMAL_MODE,
-2, 10, /* delay */
/* end marker */
@@ -91,14 +86,13 @@ static int default_init_sequence[] = {
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MY BIT(7)
@@ -114,16 +108,20 @@ static int set_var(struct fbtft_par *par)
RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR */
switch (par->info->var.rotate) {
case 0:
- write_reg(par, 0x36, MX | MY | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MY | (par->bgr << 3));
break;
case 270:
- write_reg(par, 0x36, MY | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MY | MV | (par->bgr << 3));
break;
case 180:
- write_reg(par, 0x36, par->bgr << 3);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ par->bgr << 3);
break;
case 90:
- write_reg(par, 0x36, MX | MV | (par->bgr << 3));
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
+ MX | MV | (par->bgr << 3));
break;
}
diff --git a/drivers/staging/fbtft/fb_tinylcd.c b/drivers/staging/fbtft/fb_tinylcd.c
index caf263db436a..097e71cfef62 100644
--- a/drivers/staging/fbtft/fb_tinylcd.c
+++ b/drivers/staging/fbtft/fb_tinylcd.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
@@ -38,7 +39,7 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xB4, 0x02);
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
write_reg(par, 0xB7, 0x07);
- write_reg(par, 0x36, 0x58);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
write_reg(par, 0xF0, 0x36, 0xA5, 0xD3);
write_reg(par, 0xE5, 0x80);
write_reg(par, 0xE5, 0x01);
@@ -47,24 +48,23 @@ static int init_display(struct fbtft_par *par)
write_reg(par, 0xF0, 0x36, 0xA5, 0x53);
write_reg(par, 0xE0, 0x00, 0x35, 0x33, 0x00, 0x00, 0x00,
0x00, 0x35, 0x33, 0x00, 0x00, 0x00);
- write_reg(par, 0x3A, 0x55);
- write_reg(par, 0x11);
+ write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
udelay(250);
- write_reg(par, 0x29);
+ write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
- /* Column address */
- write_reg(par, 0x2A, xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
- /* Row address */
- write_reg(par, 0x2B, ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static int set_var(struct fbtft_par *par)
@@ -72,19 +72,19 @@ static int set_var(struct fbtft_par *par)
switch (par->info->var.rotate) {
case 270:
write_reg(par, 0xB6, 0x00, 0x02, 0x3B);
- write_reg(par, 0x36, 0x28);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x28);
break;
case 180:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x58);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x58);
break;
case 90:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x38);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x38);
break;
default:
write_reg(par, 0xB6, 0x00, 0x22, 0x3B);
- write_reg(par, 0x36, 0x08);
+ write_reg(par, MIPI_DCS_SET_ADDRESS_MODE, 0x08);
break;
}
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 4e828142058e..e87401aacfb3 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -222,8 +222,8 @@ static int set_var(struct fbtft_par *par)
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u8 *vmem8 = (u8 *)(par->info->screen_buffer);
- u8 *buf8 = (u8 *)(par->txbuf.buf);
- u16 *buf16 = (u16 *)(par->txbuf.buf);
+ u8 *buf8 = par->txbuf.buf;
+ u16 *buf16 = par->txbuf.buf;
int line_length = par->info->fix.line_length;
int y_start = (offset / line_length);
int y_end = (offset + len - 1) / line_length;
diff --git a/drivers/staging/fbtft/fb_uc1701.c b/drivers/staging/fbtft/fb_uc1701.c
index 212908e39277..b78045fe5393 100644
--- a/drivers/staging/fbtft/fb_uc1701.c
+++ b/drivers/staging/fbtft/fb_uc1701.c
@@ -78,11 +78,11 @@ static int init_display(struct fbtft_par *par)
mdelay(10);
/* set startpoint */
- /* LCD_START_LINE | (pos & 0x3F) */
write_reg(par, LCD_START_LINE);
/* select orientation BOTTOMVIEW */
write_reg(par, LCD_BOTTOMVIEW | 1);
+
/* output mode select (turns display upside-down) */
write_reg(par, LCD_SCAN_DIR | 0x00);
@@ -96,20 +96,14 @@ static int init_display(struct fbtft_par *par)
write_reg(par, LCD_BIAS | 0);
/* power control mode: all features on */
- /* LCD_POWER_CONTROL | (val&0x07) */
write_reg(par, LCD_POWER_CONTROL | 0x07);
/* set voltage regulator R/R */
- /* LCD_VOLTAGE | (val&0x07) */
write_reg(par, LCD_VOLTAGE | 0x07);
/* volume mode set */
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, LCD_VOLUME_MODE);
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, 0x09);
- /* ???? */
- /* LCD_VOLUME_MODE,val&0x3f,LCD_NO_OP */
write_reg(par, LCD_NO_OP);
/* advanced program control */
@@ -125,17 +119,8 @@ static int init_display(struct fbtft_par *par)
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* goto address */
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_PAGE_ADDRESS);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, 0x00);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_COL_ADDRESS);
}
@@ -156,17 +141,9 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
1 : 0) << i;
buf++;
}
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
+
write_reg(par, LCD_PAGE_ADDRESS | (u8)y);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, 0x00);
- /* LCD_PAGE_ADDRESS | ((page) & 0x1F),
- (((col)+SHIFT_ADDR_NORMAL) & 0x0F),
- LCD_COL_ADDRESS | ((((col)+SHIFT_ADDR_NORMAL)>>4) & 0x0F) */
write_reg(par, LCD_COL_ADDRESS);
gpio_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
index 58449ad84f46..83505bce628a 100644
--- a/drivers/staging/fbtft/fbtft-bus.c
+++ b/drivers/staging/fbtft/fbtft-bus.c
@@ -125,7 +125,7 @@ EXPORT_SYMBOL(fbtft_write_reg8_bus9);
int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16;
- u16 *txbuf16 = (u16 *)par->txbuf.buf;
+ u16 *txbuf16 = par->txbuf.buf;
size_t remain;
size_t to_copy;
size_t tx_array_size;
@@ -150,14 +150,14 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
tx_array_size = par->txbuf.len / 2;
if (par->startbyte) {
- txbuf16 = (u16 *)(par->txbuf.buf + 1);
+ txbuf16 = par->txbuf.buf + 1;
tx_array_size -= 2;
*(u8 *)(par->txbuf.buf) = par->startbyte | 0x2;
startbyte_size = 1;
}
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
+ to_copy = min(tx_array_size, remain);
dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
to_copy, remain - to_copy);
@@ -201,7 +201,7 @@ int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len)
tx_array_size = par->txbuf.len / 2;
while (remain) {
- to_copy = remain > tx_array_size ? tx_array_size : remain;
+ to_copy = min(tx_array_size, remain);
dev_dbg(par->info->device, " to_copy=%zu, remain=%zu\n",
to_copy, remain - to_copy);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index b1e45161eefc..0c1a77cafe14 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -35,6 +35,7 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <video/mipi_display.h>
#include "fbtft.h"
#include "internal.h"
@@ -129,7 +130,8 @@ static int fbtft_request_gpios(struct fbtft_par *par)
while (gpio->name[0]) {
flags = FBTFT_GPIO_NO_MATCH;
/* if driver provides match function, try it first,
- if no match use our own */
+ * if no match use our own
+ */
if (par->fbtftops.request_gpios_match)
flags = par->fbtftops.request_gpios_match(par, gpio);
if (flags == FBTFT_GPIO_NO_MATCH)
@@ -319,16 +321,13 @@ EXPORT_SYMBOL(fbtft_unregister_backlight);
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
int ye)
{
- /* Column address set */
- write_reg(par, 0x2A,
- (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
+ write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
+ (xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
- /* Row address set */
- write_reg(par, 0x2B,
- (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
+ write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
+ (ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
- /* Memory write */
- write_reg(par, 0x2C);
+ write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
static void fbtft_reset(struct fbtft_par *par)
@@ -520,8 +519,7 @@ static ssize_t fbtft_fb_write(struct fb_info *info, const char __user *buf,
"%s: count=%zd, ppos=%llu\n", __func__, count, *ppos);
res = fb_sys_write(info, buf, count, ppos);
- /* TODO: only mark changed area
- update all for now */
+ /* TODO: only mark changed area update all for now */
par->fbtftops.mkdirty(info, -1, 0);
return res;
@@ -738,8 +736,11 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
goto alloc_fail;
if (display->gamma_num && display->gamma_len) {
- gamma_curves = devm_kzalloc(dev, display->gamma_num * display->gamma_len * sizeof(gamma_curves[0]),
- GFP_KERNEL);
+ gamma_curves = devm_kcalloc(dev,
+ display->gamma_num *
+ display->gamma_len,
+ sizeof(gamma_curves[0]),
+ GFP_KERNEL);
if (!gamma_curves)
goto alloc_fail;
}
@@ -987,10 +988,6 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
reg_fail:
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
- if (spi)
- spi_set_drvdata(spi, NULL);
- if (par->pdev)
- platform_set_drvdata(par->pdev, NULL);
return ret;
}
@@ -1008,12 +1005,7 @@ EXPORT_SYMBOL(fbtft_register_framebuffer);
int fbtft_unregister_framebuffer(struct fb_info *fb_info)
{
struct fbtft_par *par = fb_info->par;
- struct spi_device *spi = par->spi;
- if (spi)
- spi_set_drvdata(spi, NULL);
- if (par->pdev)
- platform_set_drvdata(par->pdev, NULL);
if (par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight(par);
fbtft_sysfs_exit(par);
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 3ccdec94fee7..d3bc3943a983 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -20,14 +20,6 @@
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
-#define FBTFT_NOP 0x00
-#define FBTFT_SWRESET 0x01
-#define FBTFT_RDDID 0x04
-#define FBTFT_RDDST 0x09
-#define FBTFT_CASET 0x2A
-#define FBTFT_RASET 0x2B
-#define FBTFT_RAMWR 0x2C
-
#define FBTFT_ONBOARD_BACKLIGHT 2
#define FBTFT_GPIO_NO_MATCH 0xFFFF
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 071f79bd19f3..241d7c6bebde 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -212,38 +212,63 @@ static int hy28b_init_sequence[] = {
"0F 00 1 7 4 0 0 0 6 7"
static int pitft_init_sequence[] = {
- -1, 0x01, -2, 5, -1, 0x28, -1, 0xEF,
- 0x03, 0x80, 0x02, -1, 0xCF, 0x00, 0xC1, 0x30,
+ -1, MIPI_DCS_SOFT_RESET,
+ -2, 5,
+ -1, MIPI_DCS_SET_DISPLAY_OFF,
+ -1, 0xEF, 0x03, 0x80, 0x02,
+ -1, 0xCF, 0x00, 0xC1, 0x30,
-1, 0xED, 0x64, 0x03, 0x12, 0x81,
-1, 0xE8, 0x85, 0x00, 0x78,
-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
- -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00,
- -1, 0xC0, 0x23, -1, 0xC1, 0x10, -1, 0xC5,
- 0x3e, 0x28, -1, 0xC7, 0x86, -1, 0x3A, 0x55,
- -1, 0xB1, 0x00, 0x18, -1, 0xB6, 0x08, 0x82,
- 0x27, -1, 0xF2, 0x00, -1, 0x26, 0x01,
- -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08,
- 0x4E, 0xF1, 0x37, 0x07, 0x10, 0x03,
- 0x0E, 0x09, 0x00, -1, 0xE1, 0x00, 0x0E, 0x14,
- 0x03, 0x11, 0x07, 0x31, 0xC1, 0x48,
- 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F, -1,
- 0x11, -2, 100, -1, 0x29, -2, 20, -3 };
+ -1, 0xF7, 0x20,
+ -1, 0xEA, 0x00, 0x00,
+ -1, 0xC0, 0x23,
+ -1, 0xC1, 0x10,
+ -1, 0xC5, 0x3E, 0x28,
+ -1, 0xC7, 0x86,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
+ -1, 0xB1, 0x00, 0x18,
+ -1, 0xB6, 0x08, 0x82, 0x27,
+ -1, 0xF2, 0x00,
+ -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
+ -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
+ 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
+ -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
+ 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
+ -2, 100,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
+ -2, 20,
+ -3
+};
static int waveshare32b_init_sequence[] = {
-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-1, 0xCF, 0x00, 0xC1, 0x30,
- -1, 0xE8, 0x85, 0x00, 0x78, -1, 0xEA, 0x00,
- 0x00, -1, 0xED, 0x64, 0x03, 0x12, 0x81,
- -1, 0xF7, 0x20, -1, 0xC0, 0x23, -1, 0xC1,
- 0x10, -1, 0xC5, 0x3e, 0x28, -1, 0xC7, 0x86,
- -1, 0x36, 0x28, -1, 0x3A, 0x55, -1, 0xB1, 0x00,
- 0x18, -1, 0xB6, 0x08, 0x82, 0x27,
- -1, 0xF2, 0x00, -1, 0x26, 0x01,
+ -1, 0xE8, 0x85, 0x00, 0x78,
+ -1, 0xEA, 0x00, 0x00,
+ -1, 0xED, 0x64, 0x03, 0x12, 0x81,
+ -1, 0xF7, 0x20,
+ -1, 0xC0, 0x23,
+ -1, 0xC1, 0x10,
+ -1, 0xC5, 0x3E, 0x28,
+ -1, 0xC7, 0x86,
+ -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
+ -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
+ -1, 0xB1, 0x00, 0x18,
+ -1, 0xB6, 0x08, 0x82, 0x27,
+ -1, 0xF2, 0x00,
+ -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
- 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
+ 0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
-1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
- 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
- -1, 0x11, -2, 120, -1, 0x29, -1, 0x2c, -3 };
+ 0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
+ -1, MIPI_DCS_EXIT_SLEEP_MODE,
+ -2, 120,
+ -1, MIPI_DCS_SET_DISPLAY_ON,
+ -1, MIPI_DCS_WRITE_MEMORY_START,
+ -3
+};
/* Supported displays in alphabetical order */
static struct fbtft_device_display displays[] = {
@@ -1287,7 +1312,7 @@ Device 'xxx' does not have a release() function, it is broken and must be fixed
static int spi_device_found(struct device *dev, void *data)
{
- struct spi_device *spi = container_of(dev, struct spi_device, dev);
+ struct spi_device *spi = to_spi_device(dev);
dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
@@ -1305,7 +1330,7 @@ static void pr_spi_devices(void)
static int p_device_found(struct device *dev, void *data)
{
struct platform_device
- *pdev = container_of(dev, struct platform_device, dev);
+ *pdev = to_platform_device(dev);
if (strstr(pdev->name, "fb"))
dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 0d779d9ccbd8..1f959339c671 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -7,8 +7,9 @@
#
config FSL_MC_BUS
- tristate "Freescale Management Complex (MC) bus driver"
+ bool "Freescale Management Complex (MC) bus driver"
depends on OF && ARM64
+ select GENERIC_MSI_IRQ_DOMAIN
help
Driver to enable the bus infrastructure for the Freescale
QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile
index 25433a998478..e7315170b7a3 100644
--- a/drivers/staging/fsl-mc/bus/Makefile
+++ b/drivers/staging/fsl-mc/bus/Makefile
@@ -13,5 +13,7 @@ mc-bus-driver-objs := mc-bus.o \
dpmng.o \
dprc-driver.o \
mc-allocator.o \
+ mc-msi.o \
+ irq-gic-v3-its-fsl-mc-msi.o \
dpmcp.o \
dpbp.o
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 2c4cd70b4cbb..31488a7b9e86 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -13,6 +13,8 @@
#include "../include/mc-sys.h"
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
#include "dprc-cmd.h"
struct dprc_child_objs {
@@ -127,7 +129,7 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
{
int error;
u32 plugged_flag_at_mc =
- (obj_desc->state & DPRC_OBJ_STATE_PLUGGED);
+ obj_desc->state & DPRC_OBJ_STATE_PLUGGED;
if (plugged_flag_at_mc !=
(mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) {
@@ -241,6 +243,7 @@ static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
* dprc_scan_objects - Discover objects in a DPRC
*
* @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object
+ * @total_irq_count: total number of IRQs needed by objects in the DPRC.
*
* Detects objects added and removed from a DPRC and synchronizes the
* state of the Linux bus driver, MC by adding and removing
@@ -254,11 +257,13 @@ static void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev)
* populated before they can get allocation requests from probe callbacks
* of the device drivers for the non-allocatable devices.
*/
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ unsigned int *total_irq_count)
{
int num_child_objects;
int dprc_get_obj_failures;
int error;
+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count;
struct dprc_obj_desc *child_obj_desc_array = NULL;
error = dprc_get_obj_count(mc_bus_dev->mc_io,
@@ -307,6 +312,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
continue;
}
+ irq_count += obj_desc->irq_count;
dev_dbg(&mc_bus_dev->dev,
"Discovered object: type %s, id %d\n",
obj_desc->type, obj_desc->id);
@@ -319,6 +325,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev)
}
}
+ *total_irq_count = irq_count;
dprc_remove_devices(mc_bus_dev, child_obj_desc_array,
num_child_objects);
@@ -344,6 +351,7 @@ EXPORT_SYMBOL_GPL(dprc_scan_objects);
int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
{
int error;
+ unsigned int irq_count;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
dprc_init_all_resource_pools(mc_bus_dev);
@@ -352,11 +360,25 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev)
* Discover objects in the DPRC:
*/
mutex_lock(&mc_bus->scan_mutex);
- error = dprc_scan_objects(mc_bus_dev);
+ error = dprc_scan_objects(mc_bus_dev, &irq_count);
mutex_unlock(&mc_bus->scan_mutex);
if (error < 0)
goto error;
+ if (dev_get_msi_domain(&mc_bus_dev->dev) && !mc_bus->irq_resources) {
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(&mc_bus_dev->dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+
+ error = fsl_mc_populate_irq_pool(
+ mc_bus,
+ FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ if (error < 0)
+ goto error;
+ }
+
return 0;
error:
dprc_cleanup_all_resource_pools(mc_bus_dev);
@@ -365,6 +387,230 @@ error:
EXPORT_SYMBOL_GPL(dprc_scan_container);
/**
+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
+ *
+ * @irq: IRQ number of the interrupt being handled
+ * @arg: Pointer to device structure
+ */
+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
+{
+ int error;
+ u32 status;
+ struct device *dev = arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+ struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+
+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
+ irq_num, smp_processor_id());
+
+ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC)))
+ return IRQ_HANDLED;
+
+ mutex_lock(&mc_bus->scan_mutex);
+ if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
+ goto out;
+
+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ &status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_get_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
+ status);
+ if (error < 0) {
+ dev_err(dev,
+ "dprc_clear_irq_status() failed: %d\n", error);
+ goto out;
+ }
+
+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED |
+ DPRC_IRQ_EVENT_OBJ_REMOVED |
+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_DESTROYED |
+ DPRC_IRQ_EVENT_OBJ_CREATED)) {
+ unsigned int irq_count;
+
+ error = dprc_scan_objects(mc_dev, &irq_count);
+ if (error < 0) {
+ /*
+ * If the error is -ENXIO, we ignore it, as it indicates
+ * that the object scan was aborted, as we detected that
+ * an object was removed from the DPRC in the MC, while
+ * we were scanning the DPRC.
+ */
+ if (error != -ENXIO) {
+ dev_err(dev, "dprc_scan_objects() failed: %d\n",
+ error);
+ }
+
+ goto out;
+ }
+
+ if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
+ dev_warn(dev,
+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
+ irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
+ }
+ }
+
+out:
+ mutex_unlock(&mc_bus->scan_mutex);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Disable and clear interrupt for a given DPRC object
+ */
+static int disable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_io *mc_io = mc_dev->mc_io;
+
+ WARN_ON(mc_dev->obj_desc.irq_count != 1);
+
+ /*
+ * Disable generation of interrupt, while we configure it:
+ */
+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, 0, 0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Disable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, 0, 0x0);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+ return error;
+ }
+
+ /*
+ * Clear any leftover interrupts:
+ */
+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, 0, ~0x0U);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Disabling DPRC IRQ failed: dprc_clear_irq_status() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
+{
+ int error;
+ struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
+ WARN_ON(mc_dev->obj_desc.irq_count != 1);
+
+ /*
+ * NOTE: devm_request_threaded_irq() invokes the device-specific
+ * function that programs the MSI physically in the device
+ */
+ error = devm_request_threaded_irq(&mc_dev->dev,
+ irq->msi_desc->irq,
+ dprc_irq0_handler,
+ dprc_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ "FSL MC DPRC irq0",
+ &mc_dev->dev);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "devm_request_threaded_irq() failed: %d\n",
+ error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int enable_dprc_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ /*
+ * Enable all interrupt causes for the interrupt:
+ */
+ error = dprc_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, 0,
+ ~0x0u);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_mask() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ /*
+ * Enable generation of the interrupt:
+ */
+ error = dprc_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, 0, 1);
+ if (error < 0) {
+ dev_err(&mc_dev->dev,
+ "Enabling DPRC IRQ failed: dprc_set_irq_enable() failed: %d\n",
+ error);
+
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Setup interrupt for a given DPRC device
+ */
+static int dprc_setup_irq(struct fsl_mc_device *mc_dev)
+{
+ int error;
+
+ error = fsl_mc_allocate_irqs(mc_dev);
+ if (error < 0)
+ return error;
+
+ error = disable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = register_dprc_irq_handler(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ error = enable_dprc_irq(mc_dev);
+ if (error < 0)
+ goto error_free_irqs;
+
+ return 0;
+
+error_free_irqs:
+ fsl_mc_free_irqs(mc_dev);
+ return error;
+}
+
+/**
* dprc_probe - callback invoked when a DPRC is being bound to this driver
*
* @mc_dev: Pointer to fsl-mc device representing a DPRC
@@ -378,15 +624,24 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
{
int error;
size_t region_size;
+ struct device *parent_dev = mc_dev->dev.parent;
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
+ bool mc_io_created = false;
+ bool msi_domain_set = false;
if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
return -EINVAL;
+ if (WARN_ON(dev_get_msi_domain(&mc_dev->dev)))
+ return -EINVAL;
+
if (!mc_dev->mc_io) {
/*
* This is a child DPRC:
*/
+ if (WARN_ON(parent_dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
if (WARN_ON(mc_dev->obj_desc.region_count == 0))
return -EINVAL;
@@ -396,16 +651,45 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
error = fsl_create_mc_io(&mc_dev->dev,
mc_dev->regions[0].start,
region_size,
- NULL, 0, &mc_dev->mc_io);
+ NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
if (error < 0)
return error;
+
+ mc_io_created = true;
+
+ /*
+ * Inherit parent MSI domain:
+ */
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(parent_dev));
+ msi_domain_set = true;
+ } else {
+ /*
+ * This is a root DPRC
+ */
+ struct irq_domain *mc_msi_domain;
+
+ if (WARN_ON(parent_dev->bus == &fsl_mc_bus_type))
+ return -EINVAL;
+
+ error = fsl_mc_find_msi_domain(parent_dev,
+ &mc_msi_domain);
+ if (error < 0) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
+ } else {
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
+ }
}
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
&mc_dev->mc_handle);
if (error < 0) {
dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error);
- goto error_cleanup_mc_io;
+ goto error_cleanup_msi_domain;
}
mutex_init(&mc_bus->scan_mutex);
@@ -417,17 +701,40 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
if (error < 0)
goto error_cleanup_open;
+ /*
+ * Configure interrupt for the DPRC object associated with this MC bus:
+ */
+ error = dprc_setup_irq(mc_dev);
+ if (error < 0)
+ goto error_cleanup_open;
+
dev_info(&mc_dev->dev, "DPRC device bound to driver");
return 0;
error_cleanup_open:
(void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
-error_cleanup_mc_io:
- fsl_destroy_mc_io(mc_dev->mc_io);
+error_cleanup_msi_domain:
+ if (msi_domain_set)
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+
+ if (mc_io_created) {
+ fsl_destroy_mc_io(mc_dev->mc_io);
+ mc_dev->mc_io = NULL;
+ }
+
return error;
}
+/*
+ * Tear down interrupt for a given DPRC object
+ */
+static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
+{
+ (void)disable_dprc_irq(mc_dev);
+ fsl_mc_free_irqs(mc_dev);
+}
+
/**
* dprc_remove - callback invoked when a DPRC is being unbound from this driver
*
@@ -441,18 +748,30 @@ error_cleanup_mc_io:
static int dprc_remove(struct fsl_mc_device *mc_dev)
{
int error;
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
return -EINVAL;
if (WARN_ON(!mc_dev->mc_io))
return -EINVAL;
+ if (WARN_ON(!mc_bus->irq_resources))
+ return -EINVAL;
+
+ if (dev_get_msi_domain(&mc_dev->dev))
+ dprc_teardown_irq(mc_dev);
+
device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
dprc_cleanup_all_resource_pools(mc_dev);
error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
if (error < 0)
dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
+ if (dev_get_msi_domain(&mc_dev->dev)) {
+ fsl_mc_cleanup_irq_pool(mc_bus);
+ dev_set_msi_domain(&mc_dev->dev, NULL);
+ }
+
dev_info(&mc_dev->dev, "DPRC device unbound from driver");
return 0;
}
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
new file mode 100644
index 000000000000..720e2b018d00
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -0,0 +1,125 @@
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "../include/mc-private.h"
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include "../include/mc-sys.h"
+#include "dprc-cmd.h"
+
+static struct irq_chip its_msi_irq_chip = {
+ .name = "fsl-mc-bus-msi",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = msi_domain_set_affinity
+};
+
+static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
+ struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct fsl_mc_device *mc_bus_dev;
+ struct msi_domain_info *msi_info;
+
+ if (WARN_ON(dev->bus != &fsl_mc_bus_type))
+ return -EINVAL;
+
+ mc_bus_dev = to_fsl_mc_device(dev);
+ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC)))
+ return -EINVAL;
+
+ /*
+ * Set the device Id to be passed to the GIC-ITS:
+ *
+ * NOTE: This device id corresponds to the IOMMU stream ID
+ * associated with the DPRC object (ICID).
+ */
+ info->scratchpad[0].ul = mc_bus_dev->icid;
+ msi_info = msi_get_domain_info(msi_domain->parent);
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops its_fsl_mc_msi_ops = {
+ .msi_prepare = its_fsl_mc_msi_prepare,
+};
+
+static struct msi_domain_info its_fsl_mc_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .ops = &its_fsl_mc_msi_ops,
+ .chip = &its_msi_irq_chip,
+};
+
+static const struct of_device_id its_device_id[] = {
+ { .compatible = "arm,gic-v3-its", },
+ {},
+};
+
+int __init its_fsl_mc_msi_init(void)
+{
+ struct device_node *np;
+ struct irq_domain *parent;
+ struct irq_domain *mc_msi_domain;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ pr_err("%s: unable to locate ITS domain\n",
+ np->full_name);
+ continue;
+ }
+
+ mc_msi_domain = fsl_mc_msi_create_irq_domain(
+ of_node_to_fwnode(np),
+ &its_fsl_mc_msi_domain_info,
+ parent);
+ if (!mc_msi_domain) {
+ pr_err("%s: unable to create fsl-mc domain\n",
+ np->full_name);
+ continue;
+ }
+
+ WARN_ON(mc_msi_domain->
+ host_data != &its_fsl_mc_msi_domain_info);
+
+ pr_info("fsl-mc MSI: %s domain created\n", np->full_name);
+ }
+
+ return 0;
+}
+
+void its_fsl_mc_msi_cleanup(void)
+{
+ struct device_node *np;
+
+ for (np = of_find_matching_node(NULL, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ struct irq_domain *mc_msi_domain = irq_find_matching_host(
+ np,
+ DOMAIN_BUS_FSL_MC_MSI);
+
+ if (!of_property_read_bool(np, "msi-controller"))
+ continue;
+
+ if (mc_msi_domain &&
+ mc_msi_domain->host_data == &its_fsl_mc_msi_domain_info)
+ irq_domain_remove(mc_msi_domain);
+ }
+}
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 88d1857521a5..86f8543c2b9a 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -15,6 +15,7 @@
#include "../include/dpcon-cmd.h"
#include "dpmcp-cmd.h"
#include "dpmcp.h"
+#include <linux/msi.h>
/**
* fsl_mc_resource_pool_add_device - add allocatable device to a resource
@@ -160,6 +161,7 @@ static const char *const fsl_mc_pool_type_strings[] = {
[FSL_MC_POOL_DPMCP] = "dpmcp",
[FSL_MC_POOL_DPBP] = "dpbp",
[FSL_MC_POOL_DPCON] = "dpcon",
+ [FSL_MC_POOL_IRQ] = "irq",
};
static int __must_check object_type_to_pool_type(const char *object_type,
@@ -465,6 +467,203 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
}
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
+/*
+ * Initialize the interrupt pool associated with a MC bus.
+ * It allocates a block of IRQs from the GIC-ITS
+ */
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count)
+{
+ unsigned int i;
+ struct msi_desc *msi_desc;
+ struct fsl_mc_device_irq *irq_resources;
+ struct fsl_mc_device_irq *mc_dev_irq;
+ int error;
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (WARN_ON(irq_count == 0 ||
+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS))
+ return -EINVAL;
+
+ error = fsl_mc_msi_domain_alloc_irqs(&mc_bus_dev->dev, irq_count);
+ if (error < 0)
+ return error;
+
+ irq_resources = devm_kzalloc(&mc_bus_dev->dev,
+ sizeof(*irq_resources) * irq_count,
+ GFP_KERNEL);
+ if (!irq_resources) {
+ error = -ENOMEM;
+ goto cleanup_msi_irqs;
+ }
+
+ for (i = 0; i < irq_count; i++) {
+ mc_dev_irq = &irq_resources[i];
+
+ /*
+ * NOTE: This mc_dev_irq's MSI addr/value pair will be set
+ * by the fsl_mc_msi_write_msg() callback
+ */
+ mc_dev_irq->resource.type = res_pool->type;
+ mc_dev_irq->resource.data = mc_dev_irq;
+ mc_dev_irq->resource.parent_pool = res_pool;
+ INIT_LIST_HEAD(&mc_dev_irq->resource.node);
+ list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
+ }
+
+ for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
+ mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
+ mc_dev_irq->msi_desc = msi_desc;
+ mc_dev_irq->resource.id = msi_desc->irq;
+ }
+
+ res_pool->max_count = irq_count;
+ res_pool->free_count = irq_count;
+ mc_bus->irq_resources = irq_resources;
+ return 0;
+
+cleanup_msi_irqs:
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
+
+/**
+ * Teardown the interrupt pool associated with an MC bus.
+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
+ */
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
+{
+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
+ struct fsl_mc_resource_pool *res_pool =
+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+
+ if (WARN_ON(!mc_bus->irq_resources))
+ return;
+
+ if (WARN_ON(res_pool->max_count == 0))
+ return;
+
+ if (WARN_ON(res_pool->free_count != res_pool->max_count))
+ return;
+
+ INIT_LIST_HEAD(&res_pool->free_list);
+ res_pool->max_count = 0;
+ res_pool->free_count = 0;
+ mc_bus->irq_resources = NULL;
+ fsl_mc_msi_domain_free_irqs(&mc_bus_dev->dev);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
+
+/**
+ * It allocates the IRQs required by a given MC object device. The
+ * IRQs are allocated from the interrupt pool associated with the
+ * MC bus that contains the device, if the device is not a DPRC device.
+ * Otherwise, the IRQs are allocated from the interrupt pool associated
+ * with the MC bus that represents the DPRC device itself.
+ */
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ int res_allocated_count = 0;
+ int error = -EINVAL;
+ struct fsl_mc_device_irq **irqs = NULL;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_resource_pool *res_pool;
+
+ if (WARN_ON(mc_dev->irqs))
+ return -EINVAL;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+ if (WARN_ON(irq_count == 0))
+ return -EINVAL;
+
+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (WARN_ON(!mc_bus->irq_resources))
+ return -EINVAL;
+
+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ];
+ if (res_pool->free_count < irq_count) {
+ dev_err(&mc_dev->dev,
+ "Not able to allocate %u irqs for device\n", irq_count);
+ return -ENOSPC;
+ }
+
+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]),
+ GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_count; i++) {
+ struct fsl_mc_resource *resource;
+
+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ,
+ &resource);
+ if (error < 0)
+ goto error_resource_alloc;
+
+ irqs[i] = to_fsl_mc_irq(resource);
+ res_allocated_count++;
+
+ WARN_ON(irqs[i]->mc_dev);
+ irqs[i]->mc_dev = mc_dev;
+ irqs[i]->dev_irq_index = i;
+ }
+
+ mc_dev->irqs = irqs;
+ return 0;
+
+error_resource_alloc:
+ for (i = 0; i < res_allocated_count; i++) {
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
+
+/*
+ * It frees the IRQs that were allocated for a MC object device, by
+ * returning them to the corresponding interrupt pool.
+ */
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
+{
+ int i;
+ int irq_count;
+ struct fsl_mc_bus *mc_bus;
+ struct fsl_mc_device_irq **irqs = mc_dev->irqs;
+
+ if (WARN_ON(!irqs))
+ return;
+
+ irq_count = mc_dev->obj_desc.irq_count;
+
+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
+ mc_bus = to_fsl_mc_bus(mc_dev);
+ else
+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent));
+
+ if (WARN_ON(!mc_bus->irq_resources))
+ return;
+
+ for (i = 0; i < irq_count; i++) {
+ WARN_ON(!irqs[i]->mc_dev);
+ irqs[i]->mc_dev = NULL;
+ fsl_mc_resource_free(&irqs[i]->resource);
+ }
+
+ mc_dev->irqs = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs);
+
/**
* fsl_mc_allocator_probe - callback invoked when an allocatable device is
* being added to the system
@@ -557,7 +756,7 @@ int __init fsl_mc_allocator_driver_init(void)
return fsl_mc_driver_register(&fsl_mc_allocator_driver);
}
-void __exit fsl_mc_allocator_driver_exit(void)
+void fsl_mc_allocator_driver_exit(void)
{
fsl_mc_driver_unregister(&fsl_mc_allocator_driver);
}
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index 84db55b4dda5..b59455661f4d 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -16,6 +16,8 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/limits.h>
+#include <linux/bitops.h>
+#include <linux/msi.h>
#include "../include/dpmng.h"
#include "../include/mc-sys.h"
#include "dprc-cmd.h"
@@ -246,8 +248,7 @@ static bool fsl_mc_is_root_dprc(struct device *dev)
fsl_mc_get_root_dprc(dev, &root_dprc_dev);
if (!root_dprc_dev)
return false;
- else
- return dev == root_dprc_dev;
+ return dev == root_dprc_dev;
}
static int get_dprc_icid(struct fsl_mc_io *mc_io,
@@ -259,14 +260,15 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
error = dprc_open(mc_io, 0, container_id, &dprc_handle);
if (error < 0) {
- pr_err("dprc_open() failed: %d\n", error);
+ dev_err(mc_io->dev, "dprc_open() failed: %d\n", error);
return error;
}
memset(&attr, 0, sizeof(attr));
error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
if (error < 0) {
- pr_err("dprc_get_attributes() failed: %d\n", error);
+ dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
+ error);
goto common_cleanup;
}
@@ -472,6 +474,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ dev_set_msi_domain(&mc_dev->dev,
+ dev_get_msi_domain(&parent_mc_dev->dev));
}
/*
@@ -702,7 +706,8 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
mc_portal_phys_addr = res.start;
mc_portal_size = resource_size(&res);
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
- mc_portal_size, NULL, 0, &mc_io);
+ mc_portal_size, NULL,
+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
if (error < 0)
return error;
@@ -790,7 +795,6 @@ MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
static struct platform_driver fsl_mc_bus_driver = {
.driver = {
.name = "fsl_mc_bus",
- .owner = THIS_MODULE,
.pm = NULL,
.of_match_table = fsl_mc_bus_match_table,
},
@@ -832,8 +836,15 @@ static int __init fsl_mc_bus_driver_init(void)
if (error < 0)
goto error_cleanup_dprc_driver;
+ error = its_fsl_mc_msi_init();
+ if (error < 0)
+ goto error_cleanup_mc_allocator;
+
return 0;
+error_cleanup_mc_allocator:
+ fsl_mc_allocator_driver_exit();
+
error_cleanup_dprc_driver:
dprc_driver_exit();
@@ -855,6 +866,7 @@ static void __exit fsl_mc_bus_driver_exit(void)
if (WARN_ON(!mc_dev_cache))
return;
+ its_fsl_mc_msi_cleanup();
fsl_mc_allocator_driver_exit();
dprc_driver_exit();
platform_driver_unregister(&fsl_mc_bus_driver);
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
new file mode 100644
index 000000000000..3a8258ff4426
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -0,0 +1,276 @@
+/*
+ * Freescale Management Complex (MC) bus driver MSI support
+ *
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Author: German Rivera <German.Rivera@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "../include/mc-private.h"
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include "../include/mc-sys.h"
+#include "dprc-cmd.h"
+
+static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
+}
+
+static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (WARN_ON(!ops))
+ return;
+
+ /*
+ * set_desc should not be set by the caller
+ */
+ if (WARN_ON(ops->set_desc))
+ return;
+
+ ops->set_desc = fsl_mc_msi_set_desc;
+}
+
+static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_device_irq *mc_dev_irq)
+{
+ int error;
+ struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
+ struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
+ struct dprc_irq_cfg irq_cfg;
+
+ /*
+ * msi_desc->msg.address is 0x0 when this function is invoked in
+ * the free_irq() code path. In this case, for the MC, we don't
+ * really need to "unprogram" the MSI, so we just return.
+ */
+ if (msi_desc->msg.address_lo == 0x0 && msi_desc->msg.address_hi == 0x0)
+ return;
+
+ if (WARN_ON(!owner_mc_dev))
+ return;
+
+ irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ irq_cfg.val = msi_desc->msg.data;
+ irq_cfg.user_irq_id = msi_desc->irq;
+
+ if (owner_mc_dev == mc_bus_dev) {
+ /*
+ * IRQ is for the mc_bus_dev's DPRC itself
+ */
+ error = dprc_set_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_set_irq() failed: %d\n", error);
+ }
+ } else {
+ /*
+ * IRQ is for for a child device of mc_bus_dev
+ */
+ error = dprc_set_obj_irq(mc_bus_dev->mc_io,
+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI,
+ mc_bus_dev->mc_handle,
+ owner_mc_dev->obj_desc.type,
+ owner_mc_dev->obj_desc.id,
+ mc_dev_irq->dev_irq_index,
+ &irq_cfg);
+ if (error < 0) {
+ dev_err(&owner_mc_dev->dev,
+ "dprc_obj_set_irq() failed: %d\n", error);
+ }
+ }
+}
+
+/*
+ * NOTE: This function is invoked with interrupts disabled
+ */
+static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
+ struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ struct fsl_mc_device_irq *mc_dev_irq =
+ &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+
+ WARN_ON(mc_dev_irq->msi_desc != msi_desc);
+ msi_desc->msg = *msg;
+
+ /*
+ * Program the MSI (paddr, value) pair in the device:
+ */
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+}
+
+static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (WARN_ON((!chip)))
+ return;
+
+ /*
+ * irq_write_msi_msg should not be set by the caller
+ */
+ if (WARN_ON(chip->irq_write_msi_msg))
+ return;
+
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+}
+
+/**
+ * fsl_mc_msi_create_irq_domain - Create a fsl-mc MSI interrupt domain
+ * @np: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a fsl-mc MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ fsl_mc_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ fsl_mc_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ domain->bus_token = DOMAIN_BUS_FSL_MC_MSI;
+
+ return domain;
+}
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+ struct irq_domain **mc_msi_domain)
+{
+ struct irq_domain *msi_domain;
+ struct device_node *mc_of_node = mc_platform_dev->of_node;
+
+ msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ if (!msi_domain) {
+ pr_err("Unable to find fsl-mc MSI domain for %s\n",
+ mc_of_node->full_name);
+
+ return -ENOENT;
+ }
+
+ *mc_msi_domain = msi_domain;
+ return 0;
+}
+
+static void fsl_mc_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
+
+{
+ unsigned int i;
+ int error;
+ struct msi_desc *msi_desc;
+
+ for (i = 0; i < irq_count; i++) {
+ msi_desc = alloc_msi_entry(dev);
+ if (!msi_desc) {
+ dev_err(dev, "Failed to allocate msi entry\n");
+ error = -ENOMEM;
+ goto cleanup_msi_descs;
+ }
+
+ msi_desc->fsl_mc.msi_index = i;
+ msi_desc->nvec_used = 1;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count)
+{
+ struct irq_domain *msi_domain;
+ int error;
+
+ if (WARN_ON(!list_empty(dev_to_msi_list(dev))))
+ return -EINVAL;
+
+ error = fsl_mc_msi_alloc_descs(dev, irq_count);
+ if (error < 0)
+ return error;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (WARN_ON(!msi_domain)) {
+ error = -EINVAL;
+ goto cleanup_msi_descs;
+ }
+
+ /*
+ * NOTE: Calling this function will trigger the invocation of the
+ * its_fsl_mc_msi_prepare() callback
+ */
+ error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
+
+ if (error) {
+ dev_err(dev, "Failed to allocate IRQs\n");
+ goto cleanup_msi_descs;
+ }
+
+ return 0;
+
+cleanup_msi_descs:
+ fsl_mc_msi_free_descs(dev);
+ return error;
+}
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev)
+{
+ struct irq_domain *msi_domain;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (WARN_ON(!msi_domain))
+ return;
+
+ msi_domain_free_irqs(msi_domain, dev);
+
+ if (WARN_ON(list_empty(dev_to_msi_list(dev))))
+ return;
+
+ fsl_mc_msi_free_descs(dev);
+}
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 6e1489246066..810a611c1cb0 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -328,7 +328,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
if (time_after_eq(jiffies, jiffies_until_timeout)) {
- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)
MC_CMD_HDR_READ_TOKEN(cmd->header),
@@ -369,7 +370,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
if (timeout_usecs == 0) {
- pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+ dev_dbg(mc_io->dev,
+ "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
mc_io->portal_phys_addr,
(unsigned int)
MC_CMD_HDR_READ_TOKEN(cmd->header),
@@ -424,7 +426,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
goto common_exit;
if (status != MC_CMD_STATUS_OK) {
- pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+ dev_dbg(mc_io->dev,
+ "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
mc_io->portal_phys_addr,
(unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
(unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index c3152f677ff1..94c492706315 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -176,7 +176,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* @user_irq_id: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
- u64 paddr;
+ phys_addr_t paddr;
u32 val;
int user_irq_id;
};
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index c706f778626e..ee5f1d2bf604 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -26,6 +26,19 @@
strcmp(_obj_type, "dpmcp") == 0 || \
strcmp(_obj_type, "dpcon") == 0)
+struct irq_domain;
+struct msi_domain_info;
+
+/**
+ * Maximum number of total IRQs that can be pre-allocated for an MC bus'
+ * IRQ pool
+ */
+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256
+
+struct device_node;
+struct irq_domain;
+struct msi_domain_info;
+
/**
* struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
* @root_mc_bus_dev: MC object device representing the root DPRC
@@ -79,11 +92,13 @@ struct fsl_mc_resource_pool {
* @resource_pools: array of resource pools (one pool per resource type)
* for this MC bus. These resources represent allocatable entities
* from the physical DPRC.
+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
+ struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
};
@@ -99,7 +114,8 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev);
int dprc_scan_container(struct fsl_mc_device *mc_bus_dev);
-int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev);
+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
+ unsigned int *total_irq_count);
int __init dprc_driver_init(void);
@@ -107,7 +123,7 @@ void dprc_driver_exit(void);
int __init fsl_mc_allocator_driver_init(void);
-void __exit fsl_mc_allocator_driver_exit(void);
+void fsl_mc_allocator_driver_exit(void);
int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
enum fsl_mc_pool_type pool_type,
@@ -116,4 +132,25 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
void fsl_mc_resource_free(struct fsl_mc_resource *resource);
+struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+
+int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
+ struct irq_domain **mc_msi_domain);
+
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
+ unsigned int irq_count);
+
+void fsl_mc_msi_domain_free_irqs(struct device *dev);
+
+int __init its_fsl_mc_msi_init(void);
+
+void its_fsl_mc_msi_cleanup(void);
+
+int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
+ unsigned int irq_count);
+
+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus);
+
#endif /* _FSL_MC_PRIVATE_H_ */
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index a933291e400a..ac7c1ce68c03 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -14,12 +14,14 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/list.h>
+#include <linux/interrupt.h>
#include "../include/dprc.h"
#define FSL_MC_VENDOR_FREESCALE 0x1957
struct fsl_mc_device;
struct fsl_mc_io;
+struct fsl_mc_bus;
/**
* struct fsl_mc_driver - MC object device driver object
@@ -75,6 +77,7 @@ enum fsl_mc_pool_type {
FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */
FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */
FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */
+ FSL_MC_POOL_IRQ,
/*
* NOTE: New resource pool types must be added before this entry
@@ -104,6 +107,23 @@ struct fsl_mc_resource {
};
/**
+ * struct fsl_mc_device_irq - MC object device message-based interrupt
+ * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @mc_dev: MC object device that owns this interrupt
+ * @dev_irq_index: device-relative IRQ index
+ * @resource: MC generic resource associated with the interrupt
+ */
+struct fsl_mc_device_irq {
+ struct msi_desc *msi_desc;
+ struct fsl_mc_device *mc_dev;
+ u8 dev_irq_index;
+ struct fsl_mc_resource resource;
+};
+
+#define to_fsl_mc_irq(_mc_resource) \
+ container_of(_mc_resource, struct fsl_mc_device_irq, resource)
+
+/**
* Bit masks for a MC object device (struct fsl_mc_device) flags
*/
#define FSL_MC_IS_DPRC 0x0001
@@ -124,6 +144,7 @@ struct fsl_mc_resource {
* NULL if none.
* @obj_desc: MC description of the DPAA device
* @regions: pointer to array of MMIO region entries
+ * @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
*
* Generic device object for MC object devices that are "attached" to a
@@ -155,6 +176,7 @@ struct fsl_mc_device {
struct fsl_mc_io *mc_io;
struct dprc_obj_desc obj_desc;
struct resource *regions;
+ struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
};
@@ -198,6 +220,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
void fsl_mc_object_free(struct fsl_mc_device *mc_adev);
+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
+
+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+
extern struct bus_type fsl_mc_bus_type;
#endif /* _FSL_MC_H_ */
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 7a3347c3d02b..4cd3ed3ee141 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -106,7 +106,7 @@ void dma_fifo_free(struct dma_fifo *fifo)
{
struct dma_pending *pending, *next;
- if (fifo->data == NULL)
+ if (!fifo->data)
return;
list_for_each_entry_safe(pending, next, &fifo->pending, link)
@@ -123,7 +123,7 @@ void dma_fifo_reset(struct dma_fifo *fifo)
{
struct dma_pending *pending, *next;
- if (fifo->data == NULL)
+ if (!fifo->data)
return;
list_for_each_entry_safe(pending, next, &fifo->pending, link)
@@ -149,7 +149,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
{
int ofs, l;
- if (fifo->data == NULL)
+ if (!fifo->data)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
@@ -192,7 +192,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
{
unsigned len, n, ofs, l, limit;
- if (fifo->data == NULL)
+ if (!fifo->data)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
@@ -252,7 +252,7 @@ int dma_fifo_out_complete(struct dma_fifo *fifo, struct dma_pending *complete)
{
struct dma_pending *pending, *next, *tmp;
- if (fifo->data == NULL)
+ if (!fifo->data)
return -ENOENT;
if (fifo->corrupt)
return -ENXIO;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index b676c486cb18..9b23b5c95f5e 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -828,7 +828,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
rcu_read_unlock();
}
-struct fwtty_port *fwtty_port_get(unsigned index)
+static struct fwtty_port *fwtty_port_get(unsigned index)
{
struct fwtty_port *port;
@@ -842,7 +842,6 @@ struct fwtty_port *fwtty_port_get(unsigned index)
mutex_unlock(&port_table_lock);
return port;
}
-EXPORT_SYMBOL(fwtty_port_get);
static int fwtty_ports_add(struct fw_serial *serial)
{
@@ -1465,9 +1464,9 @@ static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
seq_printf(m, " %s:", dev_name(&peer->unit->device));
seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
- peer->max_payload, (unsigned long long) peer->guid);
- seq_printf(m, " mgmt:%012llx", (unsigned long long) peer->mgmt_addr);
- seq_printf(m, " addr:%012llx", (unsigned long long) peer->status_addr);
+ peer->max_payload, (unsigned long long)peer->guid);
+ seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr);
+ seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr);
seq_putc(m, '\n');
}
@@ -1514,7 +1513,7 @@ static int fwtty_debugfs_peers_show(struct seq_file *m, void *v)
rcu_read_lock();
seq_printf(m, "card: %s guid: %016llx\n",
dev_name(serial->card->device),
- (unsigned long long) serial->card->guid);
+ (unsigned long long)serial->card->guid);
list_for_each_entry_rcu(peer, &serial->peer_list, list)
fwtty_debugfs_show_peer(m, peer);
rcu_read_unlock();
@@ -1986,7 +1985,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
* been probed for any unit devices...
*/
fwtty_err(card, "unknown card (guid %016llx)\n",
- (unsigned long long) card->guid);
+ (unsigned long long)card->guid);
return NULL;
}
@@ -2016,7 +2015,7 @@ static void __dump_peer_list(struct fw_card *card)
smp_rmb();
fwtty_dbg(card, "peer(%d:%x) guid: %016llx\n",
- g, peer->node_id, (unsigned long long) peer->guid);
+ g, peer->node_id, (unsigned long long)peer->guid);
}
}
#else
@@ -2313,7 +2312,7 @@ static int fwserial_create(struct fw_unit *unit)
list_add_rcu(&serial->list, &fwserial_list);
fwtty_notice(&unit, "TTY over FireWire on device %s (guid %016llx)\n",
- dev_name(card->device), (unsigned long long) card->guid);
+ dev_name(card->device), (unsigned long long)card->guid);
err = fwserial_add_peer(serial, unit);
if (!err)
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index e13fe33a6897..6fa936501b3f 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -341,7 +341,6 @@ static const char loop_dev_name[] = "fwloop";
extern struct tty_driver *fwtty_driver;
-struct fwtty_port *fwtty_port_get(unsigned index);
/*
* Returns the max send async payload size in bytes based on the unit device
* link speed. Self-limiting asynchronous bandwidth (via reducing the payload)
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 17d148f6e02c..bb552193e4ba 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -382,7 +382,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
/* Check DHCPv4 */
if (ip->protocol == IPPROTO_UDP) {
struct udphdr *udp =
- (network_data + sizeof(struct iphdr));
+ network_data + sizeof(struct iphdr);
if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -393,12 +393,12 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
struct icmp6hdr *icmp6 =
- (network_data + sizeof(struct ipv6hdr));
+ network_data + sizeof(struct ipv6hdr);
if (icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
nic_type |= NIC_TYPE_ICMPV6;
} else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
struct udphdr *udp =
- (network_data + sizeof(struct ipv6hdr));
+ network_data + sizeof(struct ipv6hdr);
if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
nic_type |= NIC_TYPE_F_DHCP;
}
@@ -855,7 +855,7 @@ static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest,
/* Create random nic src and copy the first
* 3 bytes to be the same as dev_addr
*/
- random_ether_addr(nic_src);
+ eth_random_addr(nic_src);
memcpy(nic_src, dev_addr, 3);
/* Copy the nic_dest from dev_addr*/
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 445f83615575..6bedd668324c 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -26,8 +26,6 @@
#include "gdm_mux.h"
-static struct workqueue_struct *mux_rx_wq;
-
static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
#define USB_DEVICE_CDC_DATA(vid, pid) \
@@ -275,7 +273,7 @@ static void gdm_mux_rcv_complete(struct urb *urb)
r->len = r->urb->actual_length;
spin_lock_irqsave(&rx->to_host_lock, flags);
list_add_tail(&r->to_host_list, &rx->to_host_list);
- queue_work(mux_rx_wq, &mux_dev->work_rx.work);
+ schedule_work(&mux_dev->work_rx.work);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
}
}
@@ -435,7 +433,7 @@ static int gdm_mux_send_control(void *priv_dev, int request, int value,
if (ret < 0)
pr_err("usb_control_msg error: %d\n", ret);
- return ret < 0 ? ret : 0;
+ return min(ret, 0);
}
static void release_usb(struct mux_dev *mux_dev)
@@ -602,6 +600,8 @@ static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
mux_dev = tty_dev->priv_dev;
rx = &mux_dev->rx;
+ cancel_work_sync(&mux_dev->work_rx.work);
+
if (mux_dev->usb_state != PM_NORMAL) {
dev_err(intf->usb_dev, "usb suspend - invalid state\n");
return -1;
@@ -656,13 +656,6 @@ static struct usb_driver gdm_mux_driver = {
static int __init gdm_usb_mux_init(void)
{
-
- mux_rx_wq = create_workqueue("mux_rx_wq");
- if (!mux_rx_wq) {
- pr_err("work queue create fail\n");
- return -1;
- }
-
register_lte_tty_driver();
return usb_register(&gdm_mux_driver);
@@ -672,11 +665,6 @@ static void __exit gdm_usb_mux_exit(void)
{
unregister_lte_tty_driver();
- if (mux_rx_wq) {
- flush_workqueue(mux_rx_wq);
- destroy_workqueue(mux_rx_wq);
- }
-
usb_deregister(&gdm_mux_driver);
}
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 92ea1a16afff..9db9b903f1db 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -55,9 +55,6 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
-static struct workqueue_struct *usb_tx_wq;
-static struct workqueue_struct *usb_rx_wq;
-
static void do_tx(struct work_struct *work);
static void do_rx(struct work_struct *work);
@@ -476,7 +473,7 @@ static void gdm_usb_rcv_complete(struct urb *urb)
if (!urb->status && r->callback) {
spin_lock_irqsave(&rx->to_host_lock, flags);
list_add_tail(&r->to_host_list, &rx->to_host_list);
- queue_work(usb_rx_wq, &udev->work_rx.work);
+ schedule_work(&udev->work_rx.work);
spin_unlock_irqrestore(&rx->to_host_lock, flags);
} else {
if (urb->status && udev->usb_state == PM_NORMAL)
@@ -568,7 +565,7 @@ static void gdm_usb_send_complete(struct urb *urb)
spin_lock_irqsave(&tx->lock, flags);
udev->send_complete = 1;
- queue_work(usb_tx_wq, &udev->work_tx.work);
+ schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
}
@@ -759,7 +756,7 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
spin_lock_irqsave(&tx->lock, flags);
list_add_tail(&t_sdu->list, &tx->sdu_list);
- queue_work(usb_tx_wq, &udev->work_tx.work);
+ schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
if (no_spc)
@@ -796,7 +793,7 @@ static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
spin_lock_irqsave(&tx->lock, flags);
list_add_tail(&t->list, &tx->hci_list);
- queue_work(usb_tx_wq, &udev->work_tx.work);
+ schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
return 0;
@@ -944,6 +941,9 @@ static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
}
spin_unlock_irqrestore(&rx->submit_lock, flags);
+ cancel_work_sync(&udev->work_tx.work);
+ cancel_work_sync(&udev->work_rx.work);
+
return 0;
}
@@ -981,7 +981,7 @@ static int gdm_usb_resume(struct usb_interface *intf)
tx = &udev->tx;
spin_lock_irqsave(&tx->lock, flags);
- queue_work(usb_tx_wq, &udev->work_tx.work);
+ schedule_work(&udev->work_tx.work);
spin_unlock_irqrestore(&tx->lock, flags);
return 0;
@@ -1005,14 +1005,6 @@ static int __init gdm_usb_lte_init(void)
return -1;
}
- usb_tx_wq = create_workqueue("usb_tx_wq");
- if (!usb_tx_wq)
- return -1;
-
- usb_rx_wq = create_workqueue("usb_rx_wq");
- if (!usb_rx_wq)
- return -1;
-
return usb_register(&gdm_usb_lte_driver);
}
@@ -1021,16 +1013,6 @@ static void __exit gdm_usb_lte_exit(void)
gdm_lte_event_exit();
usb_deregister(&gdm_usb_lte_driver);
-
- if (usb_tx_wq) {
- flush_workqueue(usb_tx_wq);
- destroy_workqueue(usb_tx_wq);
- }
-
- if (usb_rx_wq) {
- flush_workqueue(usb_rx_wq);
- destroy_workqueue(usb_rx_wq);
- }
}
module_init(gdm_usb_lte_init);
diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig
deleted file mode 100644
index bf11a7fbfc51..000000000000
--- a/drivers/staging/gdm72xx/Kconfig
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# GCT GDM72xx WiMAX driver configuration
-#
-
-menuconfig WIMAX_GDM72XX
- tristate "GCT GDM72xx WiMAX support"
- depends on NET && (USB || MMC)
- help
- Support a WiMAX module based on the GCT GDM72xx WiMAX chip.
-
-if WIMAX_GDM72XX
-
-config WIMAX_GDM72XX_QOS
- bool "Enable QoS support"
- default n
- help
- Enable Quality of Service support based on the data protocol of
- transmitting packets.
-
-config WIMAX_GDM72XX_K_MODE
- bool "Enable K mode"
- default n
- help
- Enable support for proprietary functions for KT (Korea Telecom).
-
-config WIMAX_GDM72XX_WIMAX2
- bool "Enable WiMAX2 support"
- default n
- help
- Enable support for transmitting multiple packets (packet
- aggregation) from the WiMAX module to the host processor.
-
-choice
- prompt "Select interface"
-
-config WIMAX_GDM72XX_USB
- bool "USB interface"
- depends on (USB = y || USB = WIMAX_GDM72XX)
- help
- Select this option if the WiMAX module interfaces with the host
- processor via USB.
-
-config WIMAX_GDM72XX_SDIO
- bool "SDIO interface"
- depends on (MMC = y || MMC = WIMAX_GDM72XX)
- help
- Select this option if the WiMAX module interfaces with the host
- processor via SDIO.
-
-endchoice
-
-if WIMAX_GDM72XX_USB
-
-config WIMAX_GDM72XX_USB_PM
- bool "Enable power management support"
- depends on PM
- help
- Enable USB power management in order to reduce power consumption
- while the interface is not in use.
-
-endif # WIMAX_GDM72XX_USB
-
-endif # WIMAX_GDM72XX
diff --git a/drivers/staging/gdm72xx/Makefile b/drivers/staging/gdm72xx/Makefile
deleted file mode 100644
index 35da7b90b19b..000000000000
--- a/drivers/staging/gdm72xx/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-obj-$(CONFIG_WIMAX_GDM72XX) := gdmwm.o
-
-gdmwm-y += gdm_wimax.o netlink_k.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_QOS) += gdm_qos.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_SDIO) += gdm_sdio.o sdio_boot.o
-gdmwm-$(CONFIG_WIMAX_GDM72XX_USB) += gdm_usb.o usb_boot.o
diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO
deleted file mode 100644
index 62d0cd6225c8..000000000000
--- a/drivers/staging/gdm72xx/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-TODO:
-- Clean up coding style to meet kernel standard.
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
deleted file mode 100644
index cad347a05d18..000000000000
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/etherdevice.h>
-#include <asm/byteorder.h>
-
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/if_ether.h>
-
-#include "gdm_wimax.h"
-#include "hci.h"
-#include "gdm_qos.h"
-
-#define MAX_FREE_LIST_CNT 32
-static struct {
- struct list_head head;
- int cnt;
- spinlock_t lock;
-} qos_free_list;
-
-static void init_qos_entry_list(void)
-{
- qos_free_list.cnt = 0;
- INIT_LIST_HEAD(&qos_free_list.head);
- spin_lock_init(&qos_free_list.lock);
-}
-
-static void *alloc_qos_entry(void)
-{
- struct qos_entry_s *entry;
- unsigned long flags;
-
- spin_lock_irqsave(&qos_free_list.lock, flags);
- if (qos_free_list.cnt) {
- entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
- list);
- list_del(&entry->list);
- qos_free_list.cnt--;
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
- return entry;
- }
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
-
- return kmalloc(sizeof(*entry), GFP_ATOMIC);
-}
-
-static void free_qos_entry(void *entry)
-{
- struct qos_entry_s *qentry = entry;
- unsigned long flags;
-
- spin_lock_irqsave(&qos_free_list.lock, flags);
- if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
- list_add(&qentry->list, &qos_free_list.head);
- qos_free_list.cnt++;
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
- return;
- }
- spin_unlock_irqrestore(&qos_free_list.lock, flags);
-
- kfree(entry);
-}
-
-static void free_qos_entry_list(struct list_head *free_list)
-{
- struct qos_entry_s *entry, *n;
- int total_free = 0;
-
- list_for_each_entry_safe(entry, n, free_list, list) {
- list_del(&entry->list);
- kfree(entry);
- total_free++;
- }
-
- pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
-}
-
-void gdm_qos_init(void *nic_ptr)
-{
- struct nic *nic = nic_ptr;
- struct qos_cb_s *qcb = &nic->qos;
- int i;
-
- for (i = 0; i < QOS_MAX; i++) {
- INIT_LIST_HEAD(&qcb->qos_list[i]);
- qcb->csr[i].qos_buf_count = 0;
- qcb->csr[i].enabled = false;
- }
-
- qcb->qos_list_cnt = 0;
- qcb->qos_null_idx = QOS_MAX - 1;
- qcb->qos_limit_size = 255;
-
- spin_lock_init(&qcb->qos_lock);
-
- init_qos_entry_list();
-}
-
-void gdm_qos_release_list(void *nic_ptr)
-{
- struct nic *nic = nic_ptr;
- struct qos_cb_s *qcb = &nic->qos;
- unsigned long flags;
- struct qos_entry_s *entry, *n;
- struct list_head free_list;
- int i;
-
- INIT_LIST_HEAD(&free_list);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
-
- for (i = 0; i < QOS_MAX; i++) {
- qcb->csr[i].qos_buf_count = 0;
- qcb->csr[i].enabled = false;
- }
-
- qcb->qos_list_cnt = 0;
- qcb->qos_null_idx = QOS_MAX - 1;
-
- for (i = 0; i < QOS_MAX; i++) {
- list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
- list_move_tail(&entry->list, &free_list);
- }
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- free_qos_entry_list(&free_list);
-}
-
-static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
-{
- int i;
-
- if (csr->classifier_rule_en & IPTYPEOFSERVICE) {
- if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
- ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
- return 1;
- }
-
- if (csr->classifier_rule_en & PROTOCOL) {
- if (stream[9] != csr->protocol)
- return 1;
- }
-
- if (csr->classifier_rule_en & IPMASKEDSRCADDRESS) {
- for (i = 0; i < 4; i++) {
- if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
- (csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
- return 1;
- }
- }
-
- if (csr->classifier_rule_en & IPMASKEDDSTADDRESS) {
- for (i = 0; i < 4; i++) {
- if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
- (csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
- return 1;
- }
- }
-
- if (csr->classifier_rule_en & PROTOCOLSRCPORTRANGE) {
- i = ((port[0] << 8) & 0xff00) + port[1];
- if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
- return 1;
- }
-
- if (csr->classifier_rule_en & PROTOCOLDSTPORTRANGE) {
- i = ((port[2] << 8) & 0xff00) + port[3];
- if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
- return 1;
- }
-
- return 0;
-}
-
-static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
-{
- int ip_ver, i;
- struct qos_cb_s *qcb = &nic->qos;
-
- if (!iph || !tcpudph)
- return -1;
-
- ip_ver = (iph[0] >> 4) & 0xf;
-
- if (ip_ver != 4)
- return -1;
-
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled)
- continue;
- if (!qcb->csr[i].classifier_rule_en)
- continue;
- if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
- return i;
- }
-
- return -1;
-}
-
-static void extract_qos_list(struct nic *nic, struct list_head *head)
-{
- struct qos_cb_s *qcb = &nic->qos;
- struct qos_entry_s *entry;
- int i;
-
- INIT_LIST_HEAD(head);
-
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled)
- continue;
- if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
- continue;
- if (list_empty(&qcb->qos_list[i]))
- continue;
-
- entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
- list);
-
- list_move_tail(&entry->list, head);
- qcb->csr[i].qos_buf_count++;
-
- if (!list_empty(&qcb->qos_list[i]))
- netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
- }
-}
-
-static void send_qos_list(struct nic *nic, struct list_head *head)
-{
- struct qos_entry_s *entry, *n;
-
- list_for_each_entry_safe(entry, n, head, list) {
- list_del(&entry->list);
- gdm_wimax_send_tx(entry->skb, entry->dev);
- free_qos_entry(entry);
- }
-}
-
-int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- int index;
- struct qos_cb_s *qcb = &nic->qos;
- unsigned long flags;
- struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
- struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
- struct tcphdr *tcph;
- struct qos_entry_s *entry = NULL;
- struct list_head send_list;
- int ret = 0;
-
- tcph = (struct tcphdr *)iph + iph->ihl*4;
-
- if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
- if (qcb->qos_list_cnt && !qos_free_list.cnt) {
- entry = alloc_qos_entry();
- entry->skb = skb;
- entry->dev = dev;
- netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
- qcb->qos_list_cnt);
- }
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- if (qcb->qos_list_cnt) {
- index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
- if (index == -1)
- index = qcb->qos_null_idx;
-
- if (!entry) {
- entry = alloc_qos_entry();
- entry->skb = skb;
- entry->dev = dev;
- }
-
- list_add_tail(&entry->list, &qcb->qos_list[index]);
- extract_qos_list(nic, &send_list);
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- send_qos_list(nic, &send_list);
- goto out;
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- if (entry)
- free_qos_entry(entry);
- }
-
- ret = gdm_wimax_send_tx(skb, dev);
-out:
- return ret;
-}
-
-static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
-{
- int i;
-
- for (i = 0; i < qcb->qos_list_cnt; i++) {
- if (qcb->csr[i].sfid == sfid)
- return i;
- }
-
- if (mode) {
- for (i = 0; i < QOS_MAX; i++) {
- if (!qcb->csr[i].enabled) {
- qcb->csr[i].enabled = true;
- qcb->qos_list_cnt++;
- return i;
- }
- }
- }
- return -1;
-}
-
-#define QOS_CHANGE_DEL 0xFC
-#define QOS_ADD 0xFD
-#define QOS_REPORT 0xFE
-
-void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
-{
- struct nic *nic = nic_ptr;
- int i, index, pos;
- u32 sfid;
- u8 sub_cmd_evt;
- struct qos_cb_s *qcb = &nic->qos;
- struct qos_entry_s *entry, *n;
- struct list_head send_list;
- struct list_head free_list;
- unsigned long flags;
-
- sub_cmd_evt = (u8)buf[4];
-
- if (sub_cmd_evt == QOS_REPORT) {
- spin_lock_irqsave(&qcb->qos_lock, flags);
- for (i = 0; i < qcb->qos_list_cnt; i++) {
- sfid = ((buf[(i*5) + 6] << 24) & 0xff000000);
- sfid += ((buf[(i*5) + 7] << 16) & 0xff0000);
- sfid += ((buf[(i*5) + 8] << 8) & 0xff00);
- sfid += (buf[(i*5) + 9]);
- index = get_csr(qcb, sfid, 0);
- if (index == -1) {
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- netdev_err(nic->netdev, "QoS ERROR: No SF\n");
- return;
- }
- qcb->csr[index].qos_buf_count = buf[(i*5) + 10];
- }
-
- extract_qos_list(nic, &send_list);
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- send_qos_list(nic, &send_list);
- return;
- }
-
- /* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
- pos = 6;
- sfid = ((buf[pos++] << 24) & 0xff000000);
- sfid += ((buf[pos++] << 16) & 0xff0000);
- sfid += ((buf[pos++] << 8) & 0xff00);
- sfid += (buf[pos++]);
-
- index = get_csr(qcb, sfid, 1);
- if (index == -1) {
- netdev_err(nic->netdev,
- "QoS ERROR: csr Update Error / Wrong index (%d)\n",
- index);
- return;
- }
-
- if (sub_cmd_evt == QOS_ADD) {
- netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
- sfid, index);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- qcb->csr[index].sfid = sfid;
- qcb->csr[index].classifier_rule_en = ((buf[pos++] << 8) & 0xff00);
- qcb->csr[index].classifier_rule_en += buf[pos++];
- if (qcb->csr[index].classifier_rule_en == 0)
- qcb->qos_null_idx = index;
- qcb->csr[index].ip2s_mask = buf[pos++];
- qcb->csr[index].ip2s_lo = buf[pos++];
- qcb->csr[index].ip2s_hi = buf[pos++];
- qcb->csr[index].protocol = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
- qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
- qcb->csr[index].ipsrc_addr[0] = buf[pos++];
- qcb->csr[index].ipsrc_addr[1] = buf[pos++];
- qcb->csr[index].ipsrc_addr[2] = buf[pos++];
- qcb->csr[index].ipsrc_addr[3] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
- qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
- qcb->csr[index].ipdst_addr[0] = buf[pos++];
- qcb->csr[index].ipdst_addr[1] = buf[pos++];
- qcb->csr[index].ipdst_addr[2] = buf[pos++];
- qcb->csr[index].ipdst_addr[3] = buf[pos++];
- qcb->csr[index].srcport_lo = ((buf[pos++] << 8) & 0xff00);
- qcb->csr[index].srcport_lo += buf[pos++];
- qcb->csr[index].srcport_hi = ((buf[pos++] << 8) & 0xff00);
- qcb->csr[index].srcport_hi += buf[pos++];
- qcb->csr[index].dstport_lo = ((buf[pos++] << 8) & 0xff00);
- qcb->csr[index].dstport_lo += buf[pos++];
- qcb->csr[index].dstport_hi = ((buf[pos++] << 8) & 0xff00);
- qcb->csr[index].dstport_hi += buf[pos++];
-
- qcb->qos_limit_size = 254 / qcb->qos_list_cnt;
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- } else if (sub_cmd_evt == QOS_CHANGE_DEL) {
- netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
- sfid, index);
-
- INIT_LIST_HEAD(&free_list);
-
- spin_lock_irqsave(&qcb->qos_lock, flags);
- qcb->csr[index].enabled = false;
- qcb->qos_list_cnt--;
- qcb->qos_limit_size = 254 / qcb->qos_list_cnt;
-
- list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
- list) {
- list_move_tail(&entry->list, &free_list);
- }
- spin_unlock_irqrestore(&qcb->qos_lock, flags);
- free_qos_entry_list(&free_list);
- }
-}
diff --git a/drivers/staging/gdm72xx/gdm_qos.h b/drivers/staging/gdm72xx/gdm_qos.h
deleted file mode 100644
index bbc8aab338b5..000000000000
--- a/drivers/staging/gdm72xx/gdm_qos.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_QOS_H__
-#define __GDM72XX_GDM_QOS_H__
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-
-#define QOS_MAX 16
-#define IPTYPEOFSERVICE 0x8000
-#define PROTOCOL 0x4000
-#define IPMASKEDSRCADDRESS 0x2000
-#define IPMASKEDDSTADDRESS 0x1000
-#define PROTOCOLSRCPORTRANGE 0x800
-#define PROTOCOLDSTPORTRANGE 0x400
-#define DSTMACADDR 0x200
-#define SRCMACADDR 0x100
-#define ETHERTYPE 0x80
-#define IEEE802_1DUSERPRIORITY 0x40
-#define IEEE802_1QVLANID 0x10
-
-struct gdm_wimax_csr_s {
- bool enabled;
- u32 sfid;
- u8 qos_buf_count;
- u16 classifier_rule_en;
- u8 ip2s_lo;
- u8 ip2s_hi;
- u8 ip2s_mask;
- u8 protocol;
- u8 ipsrc_addr[16];
- u8 ipsrc_addrmask[16];
- u8 ipdst_addr[16];
- u8 ipdst_addrmask[16];
- u16 srcport_lo;
- u16 srcport_hi;
- u16 dstport_lo;
- u16 dstport_hi;
-};
-
-struct qos_entry_s {
- struct list_head list;
- struct sk_buff *skb;
- struct net_device *dev;
-
-};
-
-struct qos_cb_s {
- struct list_head qos_list[QOS_MAX];
- int qos_list_cnt;
- int qos_null_idx;
- struct gdm_wimax_csr_s csr[QOS_MAX];
- spinlock_t qos_lock;
- int qos_limit_size;
-};
-
-void gdm_qos_init(void *nic_ptr);
-void gdm_qos_release_list(void *nic_ptr);
-int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev);
-void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size);
-
-#endif /* __GDM72XX_GDM_QOS_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c
deleted file mode 100644
index 1f5a087723ba..000000000000
--- a/drivers/staging/gdm72xx/gdm_sdio.c
+++ /dev/null
@@ -1,700 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-
-#include "gdm_sdio.h"
-#include "gdm_wimax.h"
-#include "sdio_boot.h"
-#include "hci.h"
-
-#define TYPE_A_HEADER_SIZE 4
-#define TYPE_A_LOOKAHEAD_SIZE 16
-
-#define MAX_NR_RX_BUF 4
-
-#define SDU_TX_BUF_SIZE 2048
-#define TX_BUF_SIZE 2048
-#define TX_CHUNK_SIZE (2048 - TYPE_A_HEADER_SIZE)
-#define RX_BUF_SIZE (25*1024)
-
-#define TX_HZ 2000
-#define TX_INTERVAL (NSEC_PER_SEC/TX_HZ)
-
-static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx)
-{
- struct sdio_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
-
- if (!t)
- return NULL;
-
- t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
- if (!t->buf) {
- kfree(t);
- return NULL;
- }
-
- t->tx_cxt = tx;
-
- return t;
-}
-
-static void free_tx_struct(struct sdio_tx *t)
-{
- if (t) {
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static struct sdio_rx *alloc_rx_struct(struct rx_cxt *rx)
-{
- struct sdio_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC);
-
- if (r)
- r->rx_cxt = rx;
-
- return r;
-}
-
-static void free_rx_struct(struct sdio_rx *r)
-{
- kfree(r);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct sdio_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc)
-{
- struct sdio_tx *t;
-
- if (list_empty(&tx->free_list))
- return NULL;
-
- t = list_entry(tx->free_list.prev, struct sdio_tx, list);
- list_del(&t->list);
-
- *no_spc = list_empty(&tx->free_list) ? 1 : 0;
-
- return t;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_tx_struct(struct tx_cxt *tx, struct sdio_tx *t)
-{
- list_add_tail(&t->list, &tx->free_list);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct sdio_rx *get_rx_struct(struct rx_cxt *rx)
-{
- struct sdio_rx *r;
-
- if (list_empty(&rx->free_list))
- return NULL;
-
- r = list_entry(rx->free_list.prev, struct sdio_rx, list);
- list_del(&r->list);
-
- return r;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_rx_struct(struct rx_cxt *rx, struct sdio_rx *r)
-{
- list_add_tail(&r->list, &rx->free_list);
-}
-
-static void release_sdio(struct sdiowm_dev *sdev)
-{
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_tx *t, *t_next;
- struct sdio_rx *r, *r_next;
-
- kfree(tx->sdu_buf);
-
- list_for_each_entry_safe(t, t_next, &tx->free_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- kfree(rx->rx_buf);
-
- list_for_each_entry_safe(r, r_next, &rx->free_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- list_for_each_entry_safe(r, r_next, &rx->req_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-}
-
-static int init_sdio(struct sdiowm_dev *sdev)
-{
- int ret = 0, i;
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_tx *t;
- struct sdio_rx *r;
-
- INIT_LIST_HEAD(&tx->free_list);
- INIT_LIST_HEAD(&tx->sdu_list);
- INIT_LIST_HEAD(&tx->hci_list);
-
- spin_lock_init(&tx->lock);
-
- tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL);
- if (!tx->sdu_buf)
- goto fail;
-
- for (i = 0; i < MAX_NR_SDU_BUF; i++) {
- t = alloc_tx_struct(tx);
- if (!t) {
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&t->list, &tx->free_list);
- }
-
- INIT_LIST_HEAD(&rx->free_list);
- INIT_LIST_HEAD(&rx->req_list);
-
- spin_lock_init(&rx->lock);
-
- for (i = 0; i < MAX_NR_RX_BUF; i++) {
- r = alloc_rx_struct(rx);
- if (!r) {
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&r->list, &rx->free_list);
- }
-
- rx->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
- if (!rx->rx_buf)
- goto fail;
-
- return 0;
-
-fail:
- release_sdio(sdev);
- return ret;
-}
-
-static void send_sdio_pkt(struct sdio_func *func, u8 *data, int len)
-{
- int n, blocks, ret, remain;
-
- sdio_claim_host(func);
-
- blocks = len / func->cur_blksize;
- n = blocks * func->cur_blksize;
- if (blocks) {
- ret = sdio_memcpy_toio(func, 0, data, n);
- if (ret < 0) {
- if (ret != -ENOMEDIUM)
- dev_err(&func->dev,
- "gdmwms: error: ret = %d\n", ret);
- goto end_io;
- }
- }
-
- remain = len - n;
- remain = (remain + 3) & ~3;
-
- if (remain) {
- ret = sdio_memcpy_toio(func, 0, data + n, remain);
- if (ret < 0) {
- if (ret != -ENOMEDIUM)
- dev_err(&func->dev,
- "gdmwms: error: ret = %d\n", ret);
- goto end_io;
- }
- }
-
-end_io:
- sdio_release_host(func);
-}
-
-static void send_sdu(struct sdio_func *func, struct tx_cxt *tx)
-{
- struct list_head *l, *next;
- struct hci_s *hci;
- struct sdio_tx *t;
- int pos, len, i, estlen, aggr_num = 0, aggr_len;
- u8 *buf;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- pos = TYPE_A_HEADER_SIZE + HCI_HEADER_SIZE;
- list_for_each_entry(t, &tx->sdu_list, list) {
- estlen = ((t->len + 3) & ~3) + 4;
- if ((pos + estlen) > SDU_TX_BUF_SIZE)
- break;
-
- aggr_num++;
- memcpy(tx->sdu_buf + pos, t->buf, t->len);
- memset(tx->sdu_buf + pos + t->len, 0, estlen - t->len);
- pos += estlen;
- }
- aggr_len = pos;
-
- hci = (struct hci_s *)(tx->sdu_buf + TYPE_A_HEADER_SIZE);
- hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU_AGGR);
- hci->length = cpu_to_be16(aggr_len - TYPE_A_HEADER_SIZE -
- HCI_HEADER_SIZE);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- dev_dbg(&func->dev, "sdio_send: %*ph\n", aggr_len - TYPE_A_HEADER_SIZE,
- tx->sdu_buf + TYPE_A_HEADER_SIZE);
-
- for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) {
- len = aggr_len - pos;
- len = len > TX_CHUNK_SIZE ? TX_CHUNK_SIZE : len;
- buf = tx->sdu_buf + pos - TYPE_A_HEADER_SIZE;
-
- buf[0] = len & 0xff;
- buf[1] = (len >> 8) & 0xff;
- buf[2] = (len >> 16) & 0xff;
- buf[3] = (pos + len) >= aggr_len ? 0 : 1;
- send_sdio_pkt(func, buf, len + TYPE_A_HEADER_SIZE);
- }
-
- spin_lock_irqsave(&tx->lock, flags);
-
- for (l = tx->sdu_list.next, i = 0; i < aggr_num; i++, l = next) {
- next = l->next;
- t = list_entry(l, struct sdio_tx, list);
- if (t->callback)
- t->callback(t->cb_data);
-
- list_del(l);
- put_tx_struct(t->tx_cxt, t);
- }
-
- tx->sdu_stamp = ktime_get();
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static void send_hci(struct sdio_func *func, struct tx_cxt *tx,
- struct sdio_tx *t)
-{
- unsigned long flags;
-
- dev_dbg(&func->dev, "sdio_send: %*ph\n", t->len - TYPE_A_HEADER_SIZE,
- t->buf + TYPE_A_HEADER_SIZE);
-
- send_sdio_pkt(func, t->buf, t->len);
-
- spin_lock_irqsave(&tx->lock, flags);
- if (t->callback)
- t->callback(t->cb_data);
- free_tx_struct(t);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static void do_tx(struct work_struct *work)
-{
- struct sdiowm_dev *sdev = container_of(work, struct sdiowm_dev, ws);
- struct sdio_func *func = sdev->func;
- struct tx_cxt *tx = &sdev->tx;
- struct sdio_tx *t = NULL;
- ktime_t now, before;
- int is_sdu = 0;
- long diff;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!tx->can_send) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
-
- if (!list_empty(&tx->hci_list)) {
- t = list_entry(tx->hci_list.next, struct sdio_tx, list);
- list_del(&t->list);
- is_sdu = 0;
- } else if (!tx->stop_sdu_tx && !list_empty(&tx->sdu_list)) {
- now = ktime_get();
- before = tx->sdu_stamp;
-
- diff = ktime_to_ns(ktime_sub(now, before));
- if (diff >= 0 && diff < TX_INTERVAL) {
- schedule_work(&sdev->ws);
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
- is_sdu = 1;
- }
-
- if (!is_sdu && !t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return;
- }
-
- tx->can_send = 0;
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (is_sdu)
- send_sdu(func, tx);
- else
- send_hci(func, tx, t);
-}
-
-static int gdm_sdio_send(void *priv_dev, void *data, int len,
- void (*cb)(void *data), void *cb_data)
-{
- struct sdiowm_dev *sdev = priv_dev;
- struct tx_cxt *tx = &sdev->tx;
- struct sdio_tx *t;
- u8 *pkt = data;
- int no_spc = 0;
- u16 cmd_evt;
- unsigned long flags;
-
- if (len > TX_BUF_SIZE - TYPE_A_HEADER_SIZE)
- return -EINVAL;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU) {
- t = get_tx_struct(tx, &no_spc);
- if (!t) {
- /* This case must not happen. */
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOSPC;
- }
- list_add_tail(&t->list, &tx->sdu_list);
-
- memcpy(t->buf, data, len);
-
- t->len = len;
- t->callback = cb;
- t->cb_data = cb_data;
- } else {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOMEM;
- }
- list_add_tail(&t->list, &tx->hci_list);
-
- t->buf[0] = len & 0xff;
- t->buf[1] = (len >> 8) & 0xff;
- t->buf[2] = (len >> 16) & 0xff;
- t->buf[3] = 2;
- memcpy(t->buf + TYPE_A_HEADER_SIZE, data, len);
-
- t->len = len + TYPE_A_HEADER_SIZE;
- t->callback = cb;
- t->cb_data = cb_data;
- }
-
- if (tx->can_send)
- schedule_work(&sdev->ws);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (no_spc)
- return -ENOSPC;
-
- return 0;
-}
-
-/* Handle the HCI, WIMAX_SDU_TX_FLOW. */
-static int control_sdu_tx_flow(struct sdiowm_dev *sdev, u8 *hci_data, int len)
-{
- struct tx_cxt *tx = &sdev->tx;
- u16 cmd_evt;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (hci_data[0] << 8) | (hci_data[1]);
- if (cmd_evt != WIMAX_SDU_TX_FLOW)
- goto out;
-
- if (hci_data[4] == 0) {
- dev_dbg(&sdev->func->dev, "WIMAX ==> STOP SDU TX\n");
- tx->stop_sdu_tx = 1;
- } else if (hci_data[4] == 1) {
- dev_dbg(&sdev->func->dev, "WIMAX ==> START SDU TX\n");
- tx->stop_sdu_tx = 0;
- if (tx->can_send)
- schedule_work(&sdev->ws);
- /* If free buffer for sdu tx doesn't exist, then tx queue
- * should not be woken. For this reason, don't pass the command,
- * START_SDU_TX.
- */
- if (list_empty(&tx->free_list))
- len = 0;
- }
-
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
- return len;
-}
-
-static void gdm_sdio_irq(struct sdio_func *func)
-{
- struct phy_dev *phy_dev = sdio_get_drvdata(func);
- struct sdiowm_dev *sdev = phy_dev->priv_dev;
- struct tx_cxt *tx = &sdev->tx;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_rx *r;
- unsigned long flags;
- u8 val, hdr[TYPE_A_LOOKAHEAD_SIZE], *buf;
- u32 len, blocks, n;
- int ret, remain;
-
- /* Check interrupt */
- val = sdio_readb(func, 0x13, &ret);
- if (val & 0x01)
- sdio_writeb(func, 0x01, 0x13, &ret); /* clear interrupt */
- else
- return;
-
- ret = sdio_memcpy_fromio(func, hdr, 0x0, TYPE_A_LOOKAHEAD_SIZE);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
-
- len = (hdr[2] << 16) | (hdr[1] << 8) | hdr[0];
- if (len > (RX_BUF_SIZE - TYPE_A_HEADER_SIZE)) {
- dev_err(&func->dev, "Too big Type-A size: %d\n", len);
- goto done;
- }
-
- if (hdr[3] == 1) { /* Ack */
- u32 *ack_seq = (u32 *)&hdr[4];
-
- spin_lock_irqsave(&tx->lock, flags);
- tx->can_send = 1;
-
- if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list))
- schedule_work(&sdev->ws);
- spin_unlock_irqrestore(&tx->lock, flags);
- dev_dbg(&func->dev, "Ack... %0x\n", ntohl(*ack_seq));
- goto done;
- }
-
- memcpy(rx->rx_buf, hdr + TYPE_A_HEADER_SIZE,
- TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE);
-
- buf = rx->rx_buf + TYPE_A_LOOKAHEAD_SIZE - TYPE_A_HEADER_SIZE;
- remain = len - TYPE_A_LOOKAHEAD_SIZE + TYPE_A_HEADER_SIZE;
- if (remain <= 0)
- goto end_io;
-
- blocks = remain / func->cur_blksize;
-
- if (blocks) {
- n = blocks * func->cur_blksize;
- ret = sdio_memcpy_fromio(func, buf, 0x0, n);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
- buf += n;
- remain -= n;
- }
-
- if (remain) {
- ret = sdio_memcpy_fromio(func, buf, 0x0, remain);
- if (ret) {
- dev_err(&func->dev,
- "Cannot read from function %d\n", func->num);
- goto done;
- }
- }
-
-end_io:
- dev_dbg(&func->dev, "sdio_receive: %*ph\n", len, rx->rx_buf);
-
- len = control_sdu_tx_flow(sdev, rx->rx_buf, len);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- if (!list_empty(&rx->req_list)) {
- r = list_entry(rx->req_list.next, struct sdio_rx, list);
- spin_unlock_irqrestore(&rx->lock, flags);
- if (r->callback)
- r->callback(r->cb_data, rx->rx_buf, len);
- spin_lock_irqsave(&rx->lock, flags);
- list_del(&r->list);
- put_rx_struct(rx, r);
- }
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
-done:
- sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */
- if (!phy_dev->netdev)
- register_wimax_device(phy_dev, &func->dev);
-}
-
-static int gdm_sdio_receive(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data)
-{
- struct sdiowm_dev *sdev = priv_dev;
- struct rx_cxt *rx = &sdev->rx;
- struct sdio_rx *r;
- unsigned long flags;
-
- spin_lock_irqsave(&rx->lock, flags);
- r = get_rx_struct(rx);
- if (!r) {
- spin_unlock_irqrestore(&rx->lock, flags);
- return -ENOMEM;
- }
-
- r->callback = cb;
- r->cb_data = cb_data;
-
- list_add_tail(&r->list, &rx->req_list);
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-static int sdio_wimax_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int ret;
- struct phy_dev *phy_dev = NULL;
- struct sdiowm_dev *sdev = NULL;
-
- dev_info(&func->dev, "Found GDM SDIO VID = 0x%04x PID = 0x%04x...\n",
- func->vendor, func->device);
- dev_info(&func->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION);
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- sdio_claim_irq(func, gdm_sdio_irq);
-
- ret = sdio_boot(func);
- if (ret)
- return ret;
-
- phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
- ret = -ENOMEM;
- goto out;
- }
-
- phy_dev->priv_dev = (void *)sdev;
- phy_dev->send_func = gdm_sdio_send;
- phy_dev->rcv_func = gdm_sdio_receive;
-
- ret = init_sdio(sdev);
- if (ret < 0)
- goto out;
-
- sdev->func = func;
-
- sdio_writeb(func, 1, 0x14, &ret); /* Enable interrupt */
- sdio_release_host(func);
-
- INIT_WORK(&sdev->ws, do_tx);
-
- sdio_set_drvdata(func, phy_dev);
-out:
- if (ret) {
- kfree(phy_dev);
- kfree(sdev);
- }
-
- return ret;
-}
-
-static void sdio_wimax_remove(struct sdio_func *func)
-{
- struct phy_dev *phy_dev = sdio_get_drvdata(func);
- struct sdiowm_dev *sdev = phy_dev->priv_dev;
-
- cancel_work_sync(&sdev->ws);
- if (phy_dev->netdev)
- unregister_wimax_device(phy_dev);
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_disable_func(func);
- sdio_release_host(func);
- release_sdio(sdev);
-
- kfree(sdev);
- kfree(phy_dev);
-}
-
-static const struct sdio_device_id sdio_wimax_ids[] = {
- { SDIO_DEVICE(0x0296, 0x5347) },
- {0}
-};
-
-MODULE_DEVICE_TABLE(sdio, sdio_wimax_ids);
-
-static struct sdio_driver sdio_wimax_driver = {
- .probe = sdio_wimax_probe,
- .remove = sdio_wimax_remove,
- .name = "sdio_wimax",
- .id_table = sdio_wimax_ids,
-};
-
-static int __init sdio_gdm_wimax_init(void)
-{
- return sdio_register_driver(&sdio_wimax_driver);
-}
-
-static void __exit sdio_gdm_wimax_exit(void)
-{
- sdio_unregister_driver(&sdio_wimax_driver);
-}
-
-module_init(sdio_gdm_wimax_init);
-module_exit(sdio_gdm_wimax_exit);
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_DESCRIPTION("GCT WiMax SDIO Device Driver");
-MODULE_AUTHOR("Ethan Park");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm72xx/gdm_sdio.h b/drivers/staging/gdm72xx/gdm_sdio.h
deleted file mode 100644
index aa7dad22a219..000000000000
--- a/drivers/staging/gdm72xx/gdm_sdio.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_SDIO_H__
-#define __GDM72XX_GDM_SDIO_H__
-
-#include <linux/types.h>
-#include <linux/ktime.h>
-
-#define MAX_NR_SDU_BUF 64
-
-struct sdio_tx {
- struct list_head list;
- struct tx_cxt *tx_cxt;
- u8 *buf;
- int len;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct tx_cxt {
- struct list_head free_list;
- struct list_head sdu_list;
- struct list_head hci_list;
- ktime_t sdu_stamp;
- u8 *sdu_buf;
- spinlock_t lock;
- int can_send;
- int stop_sdu_tx;
-};
-
-struct sdio_rx {
- struct list_head list;
- struct rx_cxt *rx_cxt;
- void (*callback)(void *cb_data, void *data, int len);
- void *cb_data;
-};
-
-struct rx_cxt {
- struct list_head free_list;
- struct list_head req_list;
- u8 *rx_buf;
- spinlock_t lock;
-};
-
-struct sdiowm_dev {
- struct sdio_func *func;
- struct tx_cxt tx;
- struct rx_cxt rx;
- struct work_struct ws;
-};
-
-#endif /* __GDM72XX_GDM_SDIO_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
deleted file mode 100644
index 16e497d9d0cf..000000000000
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/usb.h>
-#include <asm/byteorder.h>
-#include <linux/kthread.h>
-
-#include "gdm_usb.h"
-#include "gdm_wimax.h"
-#include "usb_boot.h"
-#include "hci.h"
-
-#include "usb_ids.h"
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-#define TX_BUF_SIZE 2048
-
-#if defined(CONFIG_WIMAX_GDM72XX_WIMAX2)
-#define RX_BUF_SIZE (128*1024) /* For packet aggregation */
-#else
-#define RX_BUF_SIZE 2048
-#endif
-
-#define GDM7205_PADDING 256
-
-#define DOWNLOAD_CONF_VALUE 0x21
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
-
-static DECLARE_WAIT_QUEUE_HEAD(k_wait);
-static LIST_HEAD(k_list);
-static DEFINE_SPINLOCK(k_lock);
-static int k_mode_stop;
-
-#define K_WAIT_TIME (2 * HZ / 100)
-
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
-static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx)
-{
- struct usb_tx *t = kzalloc(sizeof(*t), GFP_ATOMIC);
-
- if (!t)
- return NULL;
-
- t->urb = usb_alloc_urb(0, GFP_ATOMIC);
- t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC);
- if (!t->urb || !t->buf) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- return NULL;
- }
-
- t->tx_cxt = tx;
-
- return t;
-}
-
-static void free_tx_struct(struct usb_tx *t)
-{
- if (t) {
- usb_free_urb(t->urb);
- kfree(t->buf);
- kfree(t);
- }
-}
-
-static struct usb_rx *alloc_rx_struct(struct rx_cxt *rx)
-{
- struct usb_rx *r = kzalloc(sizeof(*r), GFP_ATOMIC);
-
- if (!r)
- return NULL;
-
- r->urb = usb_alloc_urb(0, GFP_ATOMIC);
- r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
- if (!r->urb || !r->buf) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- return NULL;
- }
-
- r->rx_cxt = rx;
- return r;
-}
-
-static void free_rx_struct(struct usb_rx *r)
-{
- if (r) {
- usb_free_urb(r->urb);
- kfree(r->buf);
- kfree(r);
- }
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct usb_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc)
-{
- struct usb_tx *t;
-
- if (list_empty(&tx->free_list)) {
- *no_spc = 1;
- return NULL;
- }
-
- t = list_entry(tx->free_list.next, struct usb_tx, list);
- list_del(&t->list);
-
- *no_spc = list_empty(&tx->free_list) ? 1 : 0;
-
- return t;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_tx_struct(struct tx_cxt *tx, struct usb_tx *t)
-{
- list_add_tail(&t->list, &tx->free_list);
-}
-
-/* Before this function is called, spin lock should be locked. */
-static struct usb_rx *get_rx_struct(struct rx_cxt *rx)
-{
- struct usb_rx *r;
-
- if (list_empty(&rx->free_list)) {
- r = alloc_rx_struct(rx);
- if (!r)
- return NULL;
-
- list_add(&r->list, &rx->free_list);
- }
-
- r = list_entry(rx->free_list.next, struct usb_rx, list);
- list_move_tail(&r->list, &rx->used_list);
-
- return r;
-}
-
-/* Before this function is called, spin lock should be locked. */
-static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
-{
- list_move(&r->list, &rx->free_list);
-}
-
-static void release_usb(struct usbwm_dev *udev)
-{
- struct tx_cxt *tx = &udev->tx;
- struct rx_cxt *rx = &udev->rx;
- struct usb_tx *t, *t_next;
- struct usb_rx *r, *r_next;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->hci_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- list_for_each_entry_safe(t, t_next, &tx->free_list, list) {
- list_del(&t->list);
- free_tx_struct(t);
- }
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry_safe(r, r_next, &rx->free_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- list_for_each_entry_safe(r, r_next, &rx->used_list, list) {
- list_del(&r->list);
- free_rx_struct(r);
- }
-
- spin_unlock_irqrestore(&rx->lock, flags);
-}
-
-static int init_usb(struct usbwm_dev *udev)
-{
- int ret = 0, i;
- struct tx_cxt *tx = &udev->tx;
- struct rx_cxt *rx = &udev->rx;
- struct usb_tx *t;
- struct usb_rx *r;
- unsigned long flags;
-
- INIT_LIST_HEAD(&tx->free_list);
- INIT_LIST_HEAD(&tx->sdu_list);
- INIT_LIST_HEAD(&tx->hci_list);
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- INIT_LIST_HEAD(&tx->pending_list);
-#endif
-
- INIT_LIST_HEAD(&rx->free_list);
- INIT_LIST_HEAD(&rx->used_list);
-
- spin_lock_init(&tx->lock);
- spin_lock_init(&rx->lock);
-
- spin_lock_irqsave(&tx->lock, flags);
- for (i = 0; i < MAX_NR_SDU_BUF; i++) {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- ret = -ENOMEM;
- goto fail;
- }
- list_add(&t->list, &tx->free_list);
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-
- r = alloc_rx_struct(rx);
- if (!r) {
- ret = -ENOMEM;
- goto fail;
- }
-
- spin_lock_irqsave(&rx->lock, flags);
- list_add(&r->list, &rx->free_list);
- spin_unlock_irqrestore(&rx->lock, flags);
- return ret;
-
-fail:
- release_usb(udev);
- return ret;
-}
-
-static void __gdm_usb_send_complete(struct urb *urb)
-{
- struct usb_tx *t = urb->context;
- struct tx_cxt *tx = t->tx_cxt;
- u8 *pkt = t->buf;
- u16 cmd_evt;
-
- /* Completion by usb_unlink_urb */
- if (urb->status == -ECONNRESET)
- return;
-
- if (t->callback)
- t->callback(t->cb_data);
-
- /* Delete from sdu list or hci list. */
- list_del(&t->list);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU)
- put_tx_struct(tx, t);
- else
- free_tx_struct(t);
-}
-
-static void gdm_usb_send_complete(struct urb *urb)
-{
- struct usb_tx *t = urb->context;
- struct tx_cxt *tx = t->tx_cxt;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- __gdm_usb_send_complete(urb);
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
-static int gdm_usb_send(void *priv_dev, void *data, int len,
- void (*cb)(void *data), void *cb_data)
-{
- struct usbwm_dev *udev = priv_dev;
- struct usb_device *usbdev = udev->usbdev;
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t;
- int padding = udev->padding;
- int no_spc = 0, ret;
- u8 *pkt = data;
- u16 cmd_evt;
- unsigned long flags;
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- unsigned long flags2;
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
- if (!udev->usbdev) {
- dev_err(&usbdev->dev, "%s: No such device\n", __func__);
- return -ENODEV;
- }
-
- if (len > TX_BUF_SIZE - padding - 1)
- return -EINVAL;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- cmd_evt = (pkt[0] << 8) | pkt[1];
- if (cmd_evt == WIMAX_TX_SDU) {
- t = get_tx_struct(tx, &no_spc);
- if (!t) {
- /* This case must not happen. */
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOSPC;
- }
- list_add_tail(&t->list, &tx->sdu_list);
- } else {
- t = alloc_tx_struct(tx);
- if (!t) {
- spin_unlock_irqrestore(&tx->lock, flags);
- return -ENOMEM;
- }
- list_add_tail(&t->list, &tx->hci_list);
- }
-
- memcpy(t->buf + padding, data, len);
- t->callback = cb;
- t->cb_data = cb_data;
-
- /* In some cases, USB Module of WiMax is blocked when data size is
- * the multiple of 512. So, increment length by one in that case.
- */
- if ((len % 512) == 0)
- len++;
-
- usb_fill_bulk_urb(t->urb, usbdev, usb_sndbulkpipe(usbdev, 1), t->buf,
- len + padding, gdm_usb_send_complete, t);
-
- dev_dbg(&usbdev->dev, "usb_send: %*ph\n", len + padding, t->buf);
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- if (usbdev->state & USB_STATE_SUSPENDED) {
- list_add_tail(&t->p_list, &tx->pending_list);
- schedule_work(&udev->pm_ws);
- goto out;
- }
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- if (udev->bw_switch) {
- list_add_tail(&t->p_list, &tx->pending_list);
- goto out;
- } else if (cmd_evt == WIMAX_SCAN) {
- struct rx_cxt *rx;
- struct usb_rx *r;
-
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags2);
- list_for_each_entry(r, &rx->used_list, list)
- usb_unlink_urb(r->urb);
- spin_unlock_irqrestore(&rx->lock, flags2);
-
- udev->bw_switch = 1;
-
- spin_lock_irqsave(&k_lock, flags2);
- list_add_tail(&udev->list, &k_list);
- spin_unlock_irqrestore(&k_lock, flags2);
-
- wake_up(&k_wait);
- }
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
- if (ret)
- goto send_fail;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- usb_mark_last_busy(usbdev);
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
-out:
-#endif
- spin_unlock_irqrestore(&tx->lock, flags);
-
- if (no_spc)
- return -ENOSPC;
-
- return 0;
-
-send_fail:
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- spin_unlock_irqrestore(&tx->lock, flags);
- return ret;
-}
-
-static void gdm_usb_rcv_complete(struct urb *urb)
-{
- struct usb_rx *r = urb->context;
- struct rx_cxt *rx = r->rx_cxt;
- struct usbwm_dev *udev = container_of(r->rx_cxt, struct usbwm_dev, rx);
- struct tx_cxt *tx = &udev->tx;
- struct usb_tx *t;
- u16 cmd_evt;
- unsigned long flags, flags2;
- struct usb_device *dev = urb->dev;
-
- /* Completion by usb_unlink_urb */
- if (urb->status == -ECONNRESET)
- return;
-
- spin_lock_irqsave(&tx->lock, flags);
-
- if (!urb->status) {
- cmd_evt = (r->buf[0] << 8) | (r->buf[1]);
-
- dev_dbg(&dev->dev, "usb_receive: %*ph\n", urb->actual_length,
- r->buf);
-
- if (cmd_evt == WIMAX_SDU_TX_FLOW) {
- if (r->buf[4] == 0) {
- dev_dbg(&dev->dev, "WIMAX ==> STOP SDU TX\n");
- list_for_each_entry(t, &tx->sdu_list, list)
- usb_unlink_urb(t->urb);
- } else if (r->buf[4] == 1) {
- dev_dbg(&dev->dev, "WIMAX ==> START SDU TX\n");
- list_for_each_entry(t, &tx->sdu_list, list) {
- usb_submit_urb(t->urb, GFP_ATOMIC);
- }
- /* If free buffer for sdu tx doesn't
- * exist, then tx queue should not be
- * woken. For this reason, don't pass
- * the command, START_SDU_TX.
- */
- if (list_empty(&tx->free_list))
- urb->actual_length = 0;
- }
- }
- }
-
- if (!urb->status && r->callback)
- r->callback(r->cb_data, r->buf, urb->actual_length);
-
- spin_lock_irqsave(&rx->lock, flags2);
- put_rx_struct(rx, r);
- spin_unlock_irqrestore(&rx->lock, flags2);
-
- spin_unlock_irqrestore(&tx->lock, flags);
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- usb_mark_last_busy(dev);
-#endif
-}
-
-static int gdm_usb_receive(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data)
-{
- struct usbwm_dev *udev = priv_dev;
- struct usb_device *usbdev = udev->usbdev;
- struct rx_cxt *rx = &udev->rx;
- struct usb_rx *r;
- unsigned long flags;
-
- if (!udev->usbdev) {
- dev_err(&usbdev->dev, "%s: No such device\n", __func__);
- return -ENODEV;
- }
-
- spin_lock_irqsave(&rx->lock, flags);
- r = get_rx_struct(rx);
- spin_unlock_irqrestore(&rx->lock, flags);
-
- if (!r)
- return -ENOMEM;
-
- r->callback = cb;
- r->cb_data = cb_data;
-
- usb_fill_bulk_urb(r->urb, usbdev, usb_rcvbulkpipe(usbdev, 0x82), r->buf,
- RX_BUF_SIZE, gdm_usb_rcv_complete, r);
-
- return usb_submit_urb(r->urb, GFP_ATOMIC);
-}
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
-static void do_pm_control(struct work_struct *work)
-{
- struct usbwm_dev *udev = container_of(work, struct usbwm_dev, pm_ws);
- struct tx_cxt *tx = &udev->tx;
- int ret;
- unsigned long flags;
-
- ret = usb_autopm_get_interface(udev->intf);
- if (!ret)
- usb_autopm_put_interface(udev->intf);
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!(udev->usbdev->state & USB_STATE_SUSPENDED) &&
- (!list_empty(&tx->hci_list) || !list_empty(&tx->sdu_list))) {
- struct usb_tx *t, *temp;
-
- list_for_each_entry_safe(t, temp, &tx->pending_list, p_list) {
- list_del(&t->p_list);
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- if (ret) {
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- }
- }
- }
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-static int gdm_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- int ret = 0;
- u8 bConfigurationValue;
- struct phy_dev *phy_dev = NULL;
- struct usbwm_dev *udev = NULL;
- u16 idVendor, idProduct, bcdDevice;
-
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- usb_get_dev(usbdev);
- bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue;
-
- /*USB description is set up with Little-Endian*/
- idVendor = le16_to_cpu(usbdev->descriptor.idVendor);
- idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
- bcdDevice = le16_to_cpu(usbdev->descriptor.bcdDevice);
-
- dev_info(&intf->dev, "Found GDM USB VID = 0x%04x PID = 0x%04x...\n",
- idVendor, idProduct);
- dev_info(&intf->dev, "GCT WiMax driver version %s\n", DRIVER_VERSION);
-
-
- if (idProduct == EMERGENCY_PID) {
- ret = usb_emergency(usbdev);
- goto out;
- }
-
- /* Support for EEPROM bootloader */
- if (bConfigurationValue == DOWNLOAD_CONF_VALUE ||
- idProduct & B_DOWNLOAD) {
- ret = usb_boot(usbdev, bcdDevice);
- goto out;
- }
-
- phy_dev = kzalloc(sizeof(*phy_dev), GFP_KERNEL);
- if (!phy_dev) {
- ret = -ENOMEM;
- goto out;
- }
- udev = kzalloc(sizeof(*udev), GFP_KERNEL);
- if (!udev) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (idProduct == 0x7205 || idProduct == 0x7206)
- udev->padding = GDM7205_PADDING;
- else
- udev->padding = 0;
-
- phy_dev->priv_dev = (void *)udev;
- phy_dev->send_func = gdm_usb_send;
- phy_dev->rcv_func = gdm_usb_receive;
-
- ret = init_usb(udev);
- if (ret < 0)
- goto out;
-
- udev->usbdev = usbdev;
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- udev->intf = intf;
-
- intf->needs_remote_wakeup = 1;
- device_init_wakeup(&intf->dev, 1);
-
- pm_runtime_set_autosuspend_delay(&usbdev->dev, 10 * 1000); /* msec */
-
- INIT_WORK(&udev->pm_ws, do_pm_control);
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
- ret = register_wimax_device(phy_dev, &intf->dev);
- if (ret)
- release_usb(udev);
-
-out:
- if (ret) {
- kfree(phy_dev);
- kfree(udev);
- usb_put_dev(usbdev);
- } else {
- usb_set_intfdata(intf, phy_dev);
- }
- return ret;
-}
-
-static void gdm_usb_disconnect(struct usb_interface *intf)
-{
- u8 bConfigurationValue;
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- u16 idProduct;
- struct usb_device *usbdev = interface_to_usbdev(intf);
-
- bConfigurationValue = usbdev->actconfig->desc.bConfigurationValue;
- phy_dev = usb_get_intfdata(intf);
-
- /*USB description is set up with Little-Endian*/
- idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
-
- if (idProduct != EMERGENCY_PID &&
- bConfigurationValue != DOWNLOAD_CONF_VALUE &&
- (idProduct & B_DOWNLOAD) == 0) {
- udev = phy_dev->priv_dev;
- udev->usbdev = NULL;
-
- unregister_wimax_device(phy_dev);
- release_usb(udev);
- kfree(udev);
- kfree(phy_dev);
- }
-
- usb_put_dev(usbdev);
-}
-
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
-static int gdm_suspend(struct usb_interface *intf, pm_message_t pm_msg)
-{
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- struct rx_cxt *rx;
- struct usb_rx *r;
- unsigned long flags;
-
- phy_dev = usb_get_intfdata(intf);
- if (!phy_dev)
- return 0;
-
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_unlink_urb(r->urb);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-static int gdm_resume(struct usb_interface *intf)
-{
- struct phy_dev *phy_dev;
- struct usbwm_dev *udev;
- struct rx_cxt *rx;
- struct usb_rx *r;
- unsigned long flags;
-
- phy_dev = usb_get_intfdata(intf);
- if (!phy_dev)
- return 0;
-
- udev = phy_dev->priv_dev;
- rx = &udev->rx;
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_submit_urb(r->urb, GFP_ATOMIC);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- return 0;
-}
-
-#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
-
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
-static int k_mode_thread(void *arg)
-{
- struct usbwm_dev *udev;
- struct tx_cxt *tx;
- struct rx_cxt *rx;
- struct usb_tx *t, *temp;
- struct usb_rx *r;
- unsigned long flags, flags2, expire;
- int ret;
-
- while (!k_mode_stop) {
- spin_lock_irqsave(&k_lock, flags2);
- while (!list_empty(&k_list)) {
- udev = list_entry(k_list.next, struct usbwm_dev, list);
- tx = &udev->tx;
- rx = &udev->rx;
-
- list_del(&udev->list);
- spin_unlock_irqrestore(&k_lock, flags2);
-
- expire = jiffies + K_WAIT_TIME;
- while (time_before(jiffies, expire))
- schedule_timeout(K_WAIT_TIME);
-
- spin_lock_irqsave(&rx->lock, flags);
-
- list_for_each_entry(r, &rx->used_list, list)
- usb_submit_urb(r->urb, GFP_ATOMIC);
-
- spin_unlock_irqrestore(&rx->lock, flags);
-
- spin_lock_irqsave(&tx->lock, flags);
-
- list_for_each_entry_safe(t, temp, &tx->pending_list,
- p_list) {
- list_del(&t->p_list);
- ret = usb_submit_urb(t->urb, GFP_ATOMIC);
-
- if (ret) {
- t->callback = NULL;
- __gdm_usb_send_complete(t->urb);
- }
- }
-
- udev->bw_switch = 0;
- spin_unlock_irqrestore(&tx->lock, flags);
-
- spin_lock_irqsave(&k_lock, flags2);
- }
- wait_event_interruptible_lock_irq(k_wait,
- !list_empty(&k_list) ||
- k_mode_stop, k_lock);
- spin_unlock_irqrestore(&k_lock, flags2);
- }
- return 0;
-}
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
-
-static struct usb_driver gdm_usb_driver = {
- .name = "gdm_wimax",
- .probe = gdm_usb_probe,
- .disconnect = gdm_usb_disconnect,
- .id_table = id_table,
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- .supports_autosuspend = 1,
- .suspend = gdm_suspend,
- .resume = gdm_resume,
- .reset_resume = gdm_resume,
-#endif
-};
-
-static int __init usb_gdm_wimax_init(void)
-{
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- kthread_run(k_mode_thread, NULL, "k_mode_wimax");
-#endif /* CONFIG_WIMAX_GDM72XX_K_MODE */
- return usb_register(&gdm_usb_driver);
-}
-
-static void __exit usb_gdm_wimax_exit(void)
-{
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- k_mode_stop = 1;
- wake_up(&k_wait);
-#endif
- usb_deregister(&gdm_usb_driver);
-}
-
-module_init(usb_gdm_wimax_init);
-module_exit(usb_gdm_wimax_exit);
-
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_DESCRIPTION("GCT WiMax Device Driver");
-MODULE_AUTHOR("Ethan Park");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm72xx/gdm_usb.h b/drivers/staging/gdm72xx/gdm_usb.h
deleted file mode 100644
index 8e58a25e7143..000000000000
--- a/drivers/staging/gdm72xx/gdm_usb.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_USB_H__
-#define __GDM72XX_GDM_USB_H__
-
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/list.h>
-
-#define B_DIFF_DL_DRV (1 << 4)
-#define B_DOWNLOAD (1 << 5)
-#define MAX_NR_SDU_BUF 64
-
-struct usb_tx {
- struct list_head list;
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- struct list_head p_list;
-#endif
- struct tx_cxt *tx_cxt;
- struct urb *urb;
- u8 *buf;
- void (*callback)(void *cb_data);
- void *cb_data;
-};
-
-struct tx_cxt {
- struct list_head free_list;
- struct list_head sdu_list;
- struct list_head hci_list;
-#if defined(CONFIG_WIMAX_GDM72XX_USB_PM) || defined(CONFIG_WIMAX_GDM72XX_K_MODE)
- struct list_head pending_list;
-#endif
- spinlock_t lock;
-};
-
-struct usb_rx {
- struct list_head list;
- struct rx_cxt *rx_cxt;
- struct urb *urb;
- u8 *buf;
- void (*callback)(void *cb_data, void *data, int len);
- void *cb_data;
-};
-
-struct rx_cxt {
- struct list_head free_list;
- struct list_head used_list;
- spinlock_t lock;
-};
-
-struct usbwm_dev {
- struct usb_device *usbdev;
-#ifdef CONFIG_WIMAX_GDM72XX_USB_PM
- struct work_struct pm_ws;
-
- struct usb_interface *intf;
-#endif
-#ifdef CONFIG_WIMAX_GDM72XX_K_MODE
- int bw_switch;
- struct list_head list;
-#endif
- struct tx_cxt tx;
- struct rx_cxt rx;
- int padding;
-};
-
-#endif /* __GDM72XX_GDM_USB_H__ */
diff --git a/drivers/staging/gdm72xx/gdm_wimax.c b/drivers/staging/gdm72xx/gdm_wimax.c
deleted file mode 100644
index ba03f9386567..000000000000
--- a/drivers/staging/gdm72xx/gdm_wimax.c
+++ /dev/null
@@ -1,815 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/etherdevice.h>
-#include <asm/byteorder.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-
-#include "gdm_wimax.h"
-#include "hci.h"
-#include "wm_ioctl.h"
-#include "netlink_k.h"
-
-#define gdm_wimax_send(n, d, l) \
- (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, NULL, NULL)
-#define gdm_wimax_send_with_cb(n, d, l, c, b) \
- (n->phy_dev->send_func)(n->phy_dev->priv_dev, d, l, c, b)
-#define gdm_wimax_rcv_with_cb(n, c, b) \
- (n->phy_dev->rcv_func)(n->phy_dev->priv_dev, c, b)
-
-#define EVT_MAX_SIZE 2048
-
-struct evt_entry {
- struct list_head list;
- struct net_device *dev;
- char evt_data[EVT_MAX_SIZE];
- int size;
-};
-
-static struct {
- int ref_cnt;
- struct sock *sock;
- struct list_head evtq;
- spinlock_t evt_lock;
- struct list_head freeq;
- struct work_struct ws;
-} wm_event;
-
-static u8 gdm_wimax_macaddr[6] = {0x00, 0x0a, 0x3b, 0xf0, 0x01, 0x30};
-
-static inline int gdm_wimax_header(struct sk_buff **pskb)
-{
- u16 buf[HCI_HEADER_SIZE / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- struct sk_buff *skb = *pskb;
-
- if (unlikely(skb_headroom(skb) < HCI_HEADER_SIZE)) {
- struct sk_buff *skb2;
-
- skb2 = skb_realloc_headroom(skb, HCI_HEADER_SIZE);
- if (!skb2)
- return -ENOMEM;
- if (skb->sk)
- skb_set_owner_w(skb2, skb->sk);
- kfree_skb(skb);
- skb = skb2;
- }
-
- skb_push(skb, HCI_HEADER_SIZE);
- hci->cmd_evt = cpu_to_be16(WIMAX_TX_SDU);
- hci->length = cpu_to_be16(skb->len - HCI_HEADER_SIZE);
- memcpy(skb->data, buf, HCI_HEADER_SIZE);
-
- *pskb = skb;
- return 0;
-}
-
-static inline struct evt_entry *alloc_event_entry(void)
-{
- return kmalloc(sizeof(struct evt_entry), GFP_ATOMIC);
-}
-
-static struct evt_entry *get_event_entry(void)
-{
- struct evt_entry *e;
-
- if (list_empty(&wm_event.freeq)) {
- e = alloc_event_entry();
- } else {
- e = list_entry(wm_event.freeq.next, struct evt_entry, list);
- list_del(&e->list);
- }
-
- return e;
-}
-
-static void put_event_entry(struct evt_entry *e)
-{
- BUG_ON(!e);
-
- list_add_tail(&e->list, &wm_event.freeq);
-}
-
-static void gdm_wimax_event_rcv(struct net_device *dev, u16 type, void *msg,
- int len)
-{
- struct nic *nic = netdev_priv(dev);
-
- u8 *buf = msg;
- u16 hci_cmd = (buf[0]<<8) | buf[1];
- u16 hci_len = (buf[2]<<8) | buf[3];
-
- netdev_dbg(dev, "H=>D: 0x%04x(%d)\n", hci_cmd, hci_len);
-
- gdm_wimax_send(nic, msg, len);
-}
-
-static void __gdm_wimax_event_send(struct work_struct *work)
-{
- int idx;
- unsigned long flags;
- struct evt_entry *e;
- struct evt_entry *tmp;
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- list_for_each_entry_safe(e, tmp, &wm_event.evtq, list) {
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-
- if (sscanf(e->dev->name, "wm%d", &idx) == 1)
- netlink_send(wm_event.sock, idx, 0, e->evt_data,
- e->size);
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
- list_del(&e->list);
- put_event_entry(e);
- }
-
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-}
-
-static int gdm_wimax_event_init(void)
-{
- if (!wm_event.ref_cnt) {
- wm_event.sock = netlink_init(NETLINK_WIMAX,
- gdm_wimax_event_rcv);
- if (wm_event.sock) {
- INIT_LIST_HEAD(&wm_event.evtq);
- INIT_LIST_HEAD(&wm_event.freeq);
- INIT_WORK(&wm_event.ws, __gdm_wimax_event_send);
- spin_lock_init(&wm_event.evt_lock);
- }
- }
-
- if (wm_event.sock) {
- wm_event.ref_cnt++;
- return 0;
- }
-
- pr_err("Creating WiMax Event netlink is failed\n");
- return -1;
-}
-
-static void gdm_wimax_event_exit(void)
-{
- if (wm_event.sock && --wm_event.ref_cnt == 0) {
- struct evt_entry *e, *temp;
- unsigned long flags;
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- list_for_each_entry_safe(e, temp, &wm_event.evtq, list) {
- list_del(&e->list);
- kfree(e);
- }
- list_for_each_entry_safe(e, temp, &wm_event.freeq, list) {
- list_del(&e->list);
- kfree(e);
- }
-
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
- netlink_exit(wm_event.sock);
- wm_event.sock = NULL;
- }
-}
-
-static int gdm_wimax_event_send(struct net_device *dev, char *buf, int size)
-{
- struct evt_entry *e;
- unsigned long flags;
-
- u16 hci_cmd = ((u8)buf[0]<<8) | (u8)buf[1];
- u16 hci_len = ((u8)buf[2]<<8) | (u8)buf[3];
-
- netdev_dbg(dev, "D=>H: 0x%04x(%d)\n", hci_cmd, hci_len);
-
- spin_lock_irqsave(&wm_event.evt_lock, flags);
-
- e = get_event_entry();
- if (!e) {
- netdev_err(dev, "%s: No memory for event\n", __func__);
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
- return -ENOMEM;
- }
-
- e->dev = dev;
- e->size = size;
- memcpy(e->evt_data, buf, size);
-
- list_add_tail(&e->list, &wm_event.evtq);
- spin_unlock_irqrestore(&wm_event.evt_lock, flags);
-
- schedule_work(&wm_event.ws);
-
- return 0;
-}
-
-static void tx_complete(void *arg)
-{
- struct nic *nic = arg;
-
- if (netif_queue_stopped(nic->netdev))
- netif_wake_queue(nic->netdev);
-}
-
-int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int ret = 0;
- struct nic *nic = netdev_priv(dev);
-
- ret = gdm_wimax_send_with_cb(nic, skb->data, skb->len, tx_complete,
- nic);
- if (ret == -ENOSPC) {
- netif_stop_queue(dev);
- ret = 0;
- }
-
- if (ret) {
- skb_pull(skb, HCI_HEADER_SIZE);
- return ret;
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len - HCI_HEADER_SIZE;
- kfree_skb(skb);
- return ret;
-}
-
-static int gdm_wimax_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int ret = 0;
-
- ret = gdm_wimax_header(&skb);
- if (ret < 0) {
- skb_pull(skb, HCI_HEADER_SIZE);
- return ret;
- }
-
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- ret = gdm_qos_send_hci_pkt(skb, dev);
-#else
- ret = gdm_wimax_send_tx(skb, dev);
-#endif
- return ret;
-}
-
-static int gdm_wimax_set_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- return 0;
-}
-
-static void __gdm_wimax_set_mac_addr(struct net_device *dev, char *mac_addr)
-{
- u16 hci_pkt_buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)hci_pkt_buf;
- struct nic *nic = netdev_priv(dev);
-
- /* Since dev is registered as a ethernet device,
- * ether_setup has made dev->addr_len to be ETH_ALEN
- */
- memcpy(dev->dev_addr, mac_addr, dev->addr_len);
-
- /* Let lower layer know of this change by sending
- * SetInformation(MAC Address)
- */
- hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO);
- hci->length = cpu_to_be16(8);
- hci->data[0] = 0; /* T */
- hci->data[1] = 6; /* L */
- memcpy(&hci->data[2], mac_addr, dev->addr_len); /* V */
-
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE + 8);
-}
-
-/* A driver function */
-static int gdm_wimax_set_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
- if (netif_running(dev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- __gdm_wimax_set_mac_addr(dev, addr->sa_data);
-
- return 0;
-}
-
-static void gdm_wimax_ind_if_updown(struct net_device *dev, int if_up)
-{
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- unsigned char up_down;
-
- up_down = if_up ? WIMAX_IF_UP : WIMAX_IF_DOWN;
-
- /* Indicate updating fsm */
- hci->cmd_evt = cpu_to_be16(WIMAX_IF_UPDOWN);
- hci->length = cpu_to_be16(sizeof(up_down));
- hci->data[0] = up_down;
-
- gdm_wimax_event_send(dev, (char *)hci, HCI_HEADER_SIZE+sizeof(up_down));
-}
-
-static int gdm_wimax_open(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf;
-
- netif_start_queue(dev);
-
- if (fsm && fsm->m_status != M_INIT)
- gdm_wimax_ind_if_updown(dev, 1);
- return 0;
-}
-
-static int gdm_wimax_close(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf;
-
- netif_stop_queue(dev);
-
- if (fsm && fsm->m_status != M_INIT)
- gdm_wimax_ind_if_updown(dev, 0);
- return 0;
-}
-
-static void kdelete(void **buf)
-{
- if (buf && *buf) {
- kfree(*buf);
- *buf = NULL;
- }
-}
-
-static int gdm_wimax_ioctl_get_data(struct udata_s *dst, struct data_s *src)
-{
- int size;
-
- size = dst->size < src->size ? dst->size : src->size;
-
- dst->size = size;
- if (src->size) {
- if (!dst->buf)
- return -EINVAL;
- if (copy_to_user(dst->buf, src->buf, size))
- return -EFAULT;
- }
- return 0;
-}
-
-static int gdm_wimax_ioctl_set_data(struct data_s *dst, struct udata_s *src)
-{
- if (!src->size) {
- dst->size = 0;
- return 0;
- }
-
- if (!src->buf)
- return -EINVAL;
-
- if (!(dst->buf && dst->size == src->size)) {
- kdelete(&dst->buf);
- dst->buf = kmalloc(src->size, GFP_KERNEL);
- if (!dst->buf)
- return -ENOMEM;
- }
-
- if (copy_from_user(dst->buf, src->buf, src->size)) {
- kdelete(&dst->buf);
- return -EFAULT;
- }
- dst->size = src->size;
- return 0;
-}
-
-static void gdm_wimax_cleanup_ioctl(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- int i;
-
- for (i = 0; i < SIOC_DATA_MAX; i++)
- kdelete(&nic->sdk_data[i].buf);
-}
-
-static void gdm_wimax_ind_fsm_update(struct net_device *dev, struct fsm_s *fsm)
-{
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
-
- /* Indicate updating fsm */
- hci->cmd_evt = cpu_to_be16(WIMAX_FSM_UPDATE);
- hci->length = cpu_to_be16(sizeof(struct fsm_s));
- memcpy(&hci->data[0], fsm, sizeof(struct fsm_s));
-
- gdm_wimax_event_send(dev, (char *)hci,
- HCI_HEADER_SIZE + sizeof(struct fsm_s));
-}
-
-static void gdm_update_fsm(struct net_device *dev, struct fsm_s *new_fsm)
-{
- struct nic *nic = netdev_priv(dev);
- struct fsm_s *cur_fsm =
- nic->sdk_data[SIOC_DATA_FSM].buf;
-
- if (!cur_fsm)
- return;
-
- if (cur_fsm->m_status != new_fsm->m_status ||
- cur_fsm->c_status != new_fsm->c_status) {
- if (new_fsm->m_status == M_CONNECTED) {
- netif_carrier_on(dev);
- } else if (cur_fsm->m_status == M_CONNECTED) {
- netif_carrier_off(dev);
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- gdm_qos_release_list(nic);
- #endif
- }
- gdm_wimax_ind_fsm_update(dev, new_fsm);
- }
-}
-
-static int gdm_wimax_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct wm_req_s *req = (struct wm_req_s *)ifr;
- struct nic *nic = netdev_priv(dev);
- int ret;
- struct fsm_s fsm_buf;
-
- if (cmd != SIOCWMIOCTL)
- return -EOPNOTSUPP;
-
- switch (req->cmd) {
- case SIOCG_DATA:
- case SIOCS_DATA:
- if (req->data_id >= SIOC_DATA_MAX) {
- netdev_err(dev, "%s error: data-index(%d) is invalid!!\n",
- __func__, req->data_id);
- return -EOPNOTSUPP;
- }
- if (req->cmd == SIOCG_DATA) {
- ret = gdm_wimax_ioctl_get_data(
- &req->data, &nic->sdk_data[req->data_id]);
- if (ret < 0)
- return ret;
- } else if (req->cmd == SIOCS_DATA) {
- if (req->data_id == SIOC_DATA_FSM) {
- /* NOTE: gdm_update_fsm should be called
- * before gdm_wimax_ioctl_set_data is called.
- */
- if (copy_from_user(&fsm_buf, req->data.buf,
- sizeof(struct fsm_s)))
- return -EFAULT;
-
- gdm_update_fsm(dev, &fsm_buf);
- }
- ret = gdm_wimax_ioctl_set_data(
- &nic->sdk_data[req->data_id], &req->data);
- if (ret < 0)
- return ret;
- }
- break;
- default:
- netdev_err(dev, "%s: %x unknown ioctl\n", __func__, cmd);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static void gdm_wimax_prepare_device(struct net_device *dev)
-{
- struct nic *nic = netdev_priv(dev);
- u16 buf[32 / sizeof(u16)];
- struct hci_s *hci = (struct hci_s *)buf;
- u16 len = 0;
- u32 val = 0;
- __be32 val_be32;
-
- /* GetInformation mac address */
- len = 0;
- hci->cmd_evt = cpu_to_be16(WIMAX_GET_INFO);
- hci->data[len++] = TLV_T(T_MAC_ADDRESS);
- hci->length = cpu_to_be16(len);
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
-
- val = T_CAPABILITY_WIMAX | T_CAPABILITY_MULTI_CS;
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- val |= T_CAPABILITY_QOS;
- #endif
- #if defined(CONFIG_WIMAX_GDM72XX_WIMAX2)
- val |= T_CAPABILITY_AGGREGATION;
- #endif
-
- /* Set capability */
- len = 0;
- hci->cmd_evt = cpu_to_be16(WIMAX_SET_INFO);
- hci->data[len++] = TLV_T(T_CAPABILITY);
- hci->data[len++] = TLV_L(T_CAPABILITY);
- val_be32 = cpu_to_be32(val);
- memcpy(&hci->data[len], &val_be32, TLV_L(T_CAPABILITY));
- len += TLV_L(T_CAPABILITY);
- hci->length = cpu_to_be16(len);
- gdm_wimax_send(nic, hci, HCI_HEADER_SIZE+len);
-
- netdev_info(dev, "GDM WiMax Set CAPABILITY: 0x%08X\n", val);
-}
-
-static int gdm_wimax_hci_get_tlv(u8 *buf, u8 *T, u16 *L, u8 **V)
-{
- #define __U82U16(b) ((u16)((u8 *)(b))[0] | ((u16)((u8 *)(b))[1] << 8))
- int next_pos;
-
- *T = buf[0];
- if (buf[1] == 0x82) {
- *L = be16_to_cpu(__U82U16(&buf[2]));
- next_pos = 1/*type*/+3/*len*/;
- } else {
- *L = buf[1];
- next_pos = 1/*type*/+1/*len*/;
- }
- *V = &buf[next_pos];
-
- next_pos += *L/*length of val*/;
- return next_pos;
-}
-
-static int gdm_wimax_get_prepared_info(struct net_device *dev, char *buf,
- int len)
-{
- u8 T, *V;
- u16 L;
- u16 cmd_evt, cmd_len;
- int pos = HCI_HEADER_SIZE;
-
- cmd_evt = be16_to_cpup((const __be16 *)&buf[0]);
- cmd_len = be16_to_cpup((const __be16 *)&buf[2]);
-
- if (len < cmd_len + HCI_HEADER_SIZE) {
- netdev_err(dev, "%s: invalid length [%d/%d]\n", __func__,
- cmd_len + HCI_HEADER_SIZE, len);
- return -1;
- }
-
- if (cmd_evt == WIMAX_GET_INFO_RESULT) {
- if (cmd_len < 2) {
- netdev_err(dev, "%s: len is too short [%x/%d]\n",
- __func__, cmd_evt, len);
- return -1;
- }
-
- pos += gdm_wimax_hci_get_tlv(&buf[pos], &T, &L, &V);
- if (TLV_T(T_MAC_ADDRESS) == T) {
- if (dev->addr_len != L) {
- netdev_err(dev,
- "%s Invalid information result T/L [%x/%d]\n",
- __func__, T, L);
- return -1;
- }
- netdev_info(dev, "MAC change [%pM]->[%pM]\n",
- dev->dev_addr, V);
- memcpy(dev->dev_addr, V, dev->addr_len);
- return 1;
- }
- }
-
- gdm_wimax_event_send(dev, buf, len);
- return 0;
-}
-
-static void gdm_wimax_netif_rx(struct net_device *dev, char *buf, int len)
-{
- struct sk_buff *skb;
- int ret;
-
- skb = dev_alloc_skb(len + 2);
- if (!skb)
- return;
- skb_reserve(skb, 2);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- memcpy(skb_put(skb, len), buf, len);
-
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev); /* what will happen? */
-
- ret = in_interrupt() ? netif_rx(skb) : netif_rx_ni(skb);
- if (ret == NET_RX_DROP)
- netdev_err(dev, "%s skb dropped\n", __func__);
-}
-
-static void gdm_wimax_transmit_aggr_pkt(struct net_device *dev, char *buf,
- int len)
-{
- #define HCI_PADDING_BYTE 4
- #define HCI_RESERVED_BYTE 4
- struct hci_s *hci;
- int length;
-
- while (len > 0) {
- hci = (struct hci_s *)buf;
-
- if (hci->cmd_evt != cpu_to_be16(WIMAX_RX_SDU)) {
- netdev_err(dev, "Wrong cmd_evt(0x%04X)\n",
- be16_to_cpu(hci->cmd_evt));
- break;
- }
-
- length = be16_to_cpu(hci->length);
- gdm_wimax_netif_rx(dev, hci->data, length);
-
- if (length & 0x3) {
- /* Add padding size */
- length += HCI_PADDING_BYTE - (length & 0x3);
- }
-
- length += HCI_HEADER_SIZE + HCI_RESERVED_BYTE;
- len -= length;
- buf += length;
- }
-}
-
-static void gdm_wimax_transmit_pkt(struct net_device *dev, char *buf, int len)
-{
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- struct nic *nic = netdev_priv(dev);
- #endif
- u16 cmd_evt, cmd_len;
-
- /* This code is added for certain rx packet to be ignored. */
- if (len == 0)
- return;
-
- cmd_evt = be16_to_cpup((const __be16 *)&buf[0]);
- cmd_len = be16_to_cpup((const __be16 *)&buf[2]);
-
- if (len < cmd_len + HCI_HEADER_SIZE) {
- if (len)
- netdev_err(dev, "%s: invalid length [%d/%d]\n",
- __func__, cmd_len + HCI_HEADER_SIZE, len);
- return;
- }
-
- switch (cmd_evt) {
- case WIMAX_RX_SDU_AGGR:
- gdm_wimax_transmit_aggr_pkt(dev, &buf[HCI_HEADER_SIZE],
- cmd_len);
- break;
- case WIMAX_RX_SDU:
- gdm_wimax_netif_rx(dev, &buf[HCI_HEADER_SIZE], cmd_len);
- break;
- #if defined(CONFIG_WIMAX_GDM72XX_QOS)
- case WIMAX_EVT_MODEM_REPORT:
- gdm_recv_qos_hci_packet(nic, buf, len);
- break;
- #endif
- case WIMAX_SDU_TX_FLOW:
- if (buf[4] == 0) {
- if (!netif_queue_stopped(dev))
- netif_stop_queue(dev);
- } else if (buf[4] == 1) {
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
- }
- break;
- default:
- gdm_wimax_event_send(dev, buf, len);
- break;
- }
-}
-
-static void rx_complete(void *arg, void *data, int len)
-{
- struct nic *nic = arg;
-
- gdm_wimax_transmit_pkt(nic->netdev, data, len);
- gdm_wimax_rcv_with_cb(nic, rx_complete, nic);
-}
-
-static void prepare_rx_complete(void *arg, void *data, int len)
-{
- struct nic *nic = arg;
- int ret;
-
- ret = gdm_wimax_get_prepared_info(nic->netdev, data, len);
- if (ret == 1) {
- gdm_wimax_rcv_with_cb(nic, rx_complete, nic);
- } else {
- if (ret < 0)
- netdev_err(nic->netdev,
- "get_prepared_info failed(%d)\n", ret);
- gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic);
- }
-}
-
-static void start_rx_proc(struct nic *nic)
-{
- gdm_wimax_rcv_with_cb(nic, prepare_rx_complete, nic);
-}
-
-static struct net_device_ops gdm_netdev_ops = {
- .ndo_open = gdm_wimax_open,
- .ndo_stop = gdm_wimax_close,
- .ndo_set_config = gdm_wimax_set_config,
- .ndo_start_xmit = gdm_wimax_tx,
- .ndo_set_mac_address = gdm_wimax_set_mac_addr,
- .ndo_do_ioctl = gdm_wimax_ioctl,
-};
-
-int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev)
-{
- struct nic *nic = NULL;
- struct net_device *dev;
- int ret;
-
- dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN,
- ether_setup);
-
- if (!dev) {
- pr_err("alloc_etherdev failed\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(dev, pdev);
- dev->mtu = 1400;
- dev->netdev_ops = &gdm_netdev_ops;
- dev->flags &= ~IFF_MULTICAST;
- memcpy(dev->dev_addr, gdm_wimax_macaddr, sizeof(gdm_wimax_macaddr));
-
- nic = netdev_priv(dev);
- nic->netdev = dev;
- nic->phy_dev = phy_dev;
- phy_dev->netdev = dev;
-
- /* event socket init */
- ret = gdm_wimax_event_init();
- if (ret < 0) {
- pr_err("Cannot create event.\n");
- goto cleanup;
- }
-
- ret = register_netdev(dev);
- if (ret)
- goto cleanup;
-
- netif_carrier_off(dev);
-
-#ifdef CONFIG_WIMAX_GDM72XX_QOS
- gdm_qos_init(nic);
-#endif
-
- start_rx_proc(nic);
-
- /* Prepare WiMax device */
- gdm_wimax_prepare_device(dev);
-
- return 0;
-
-cleanup:
- pr_err("register_netdev failed\n");
- free_netdev(dev);
- return ret;
-}
-
-void unregister_wimax_device(struct phy_dev *phy_dev)
-{
- struct nic *nic = netdev_priv(phy_dev->netdev);
- struct fsm_s *fsm = nic->sdk_data[SIOC_DATA_FSM].buf;
-
- if (fsm)
- fsm->m_status = M_INIT;
- unregister_netdev(nic->netdev);
-
- gdm_wimax_event_exit();
-
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- gdm_qos_release_list(nic);
-#endif
-
- gdm_wimax_cleanup_ioctl(phy_dev->netdev);
-
- free_netdev(nic->netdev);
-}
diff --git a/drivers/staging/gdm72xx/gdm_wimax.h b/drivers/staging/gdm72xx/gdm_wimax.h
deleted file mode 100644
index 3330cd798c69..000000000000
--- a/drivers/staging/gdm72xx/gdm_wimax.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_GDM_WIMAX_H__
-#define __GDM72XX_GDM_WIMAX_H__
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include "wm_ioctl.h"
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
-#include "gdm_qos.h"
-#endif
-
-#define DRIVER_VERSION "3.2.3"
-
-struct phy_dev {
- void *priv_dev;
- struct net_device *netdev;
- int (*send_func)(void *priv_dev, void *data, int len,
- void (*cb)(void *cb_data), void *cb_data);
- int (*rcv_func)(void *priv_dev,
- void (*cb)(void *cb_data, void *data, int len),
- void *cb_data);
-};
-
-struct nic {
- struct net_device *netdev;
- struct phy_dev *phy_dev;
- struct data_s sdk_data[SIOC_DATA_MAX];
-#if defined(CONFIG_WIMAX_GDM72XX_QOS)
- struct qos_cb_s qos;
-#endif
-};
-
-int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev);
-int gdm_wimax_send_tx(struct sk_buff *skb, struct net_device *dev);
-void unregister_wimax_device(struct phy_dev *phy_dev);
-
-#endif /* __GDM72XX_GDM_WIMAX_H__ */
diff --git a/drivers/staging/gdm72xx/hci.h b/drivers/staging/gdm72xx/hci.h
deleted file mode 100644
index 10a6bfa6e998..000000000000
--- a/drivers/staging/gdm72xx/hci.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_HCI_H__
-#define __GDM72XX_HCI_H__
-
-#define HCI_HEADER_SIZE 4
-#define HCI_VALUE_OFFS (HCI_HEADER_SIZE)
-#define HCI_MAX_PACKET 2048
-#define HCI_MAX_PARAM (HCI_MAX_PACKET-HCI_HEADER_SIZE)
-#define HCI_MAX_TLV 32
-
-/* CMD-EVT */
-
-/* Category 0 */
-#define WIMAX_RESET 0x0000
-#define WIMAX_SET_INFO 0x0001
-#define WIMAX_GET_INFO 0x0002
-#define WIMAX_GET_INFO_RESULT 0x8003
-#define WIMAX_RADIO_OFF 0x0004
-#define WIMAX_RADIO_ON 0x0006
-#define WIMAX_WIMAX_RESET 0x0007 /* Is this still here */
-
-/* Category 1 */
-#define WIMAX_NET_ENTRY 0x0100
-#define WIMAX_NET_DISCONN 0x0102
-#define WIMAX_ENTER_SLEEP 0x0103
-#define WIMAX_EXIT_SLEEP 0x0104
-#define WIMAX_ENTER_IDLE 0x0105
-#define WIMAX_EXIT_IDLE 0x0106
-#define WIMAX_MODE_CHANGE 0x8108
-#define WIMAX_HANDOVER 0x8109 /* obsolete */
-#define WIMAX_SCAN 0x010d
-#define WIMAX_SCAN_COMPLETE 0x810e
-#define WIMAX_SCAN_RESULT 0x810f
-#define WIMAX_CONNECT 0x0110
-#define WIMAX_CONNECT_START 0x8111
-#define WIMAX_CONNECT_COMPLETE 0x8112
-#define WIMAX_ASSOC_START 0x8113
-#define WIMAX_ASSOC_COMPLETE 0x8114
-#define WIMAX_DISCONN_IND 0x8115
-#define WIMAX_ENTRY_IND 0x8116
-#define WIMAX_HO_START 0x8117
-#define WIMAX_HO_COMPLETE 0x8118
-#define WIMAX_RADIO_STATE_IND 0x8119
-#define WIMAX_IP_RENEW_IND 0x811a
-#define WIMAX_DISCOVER_NSP 0x011d
-#define WIMAX_DISCOVER_NSP_RESULT 0x811e
-#define WIMAX_SDU_TX_FLOW 0x8125
-
-/* Category 2 */
-#define WIMAX_TX_EAP 0x0200
-#define WIMAX_RX_EAP 0x8201
-#define WIMAX_TX_SDU 0x0202
-#define WIMAX_RX_SDU 0x8203
-#define WIMAX_RX_SDU_AGGR 0x8204
-#define WIMAX_TX_SDU_AGGR 0x0205
-
-/* Category 3 */
-#define WIMAX_DM_CMD 0x030a
-#define WIMAX_DM_RSP 0x830b
-
-#define WIMAX_CLI_CMD 0x030c
-#define WIMAX_CLI_RSP 0x830d
-
-#define WIMAX_DL_IMAGE 0x0310
-#define WIMAX_DL_IMAGE_STATUS 0x8311
-#define WIMAX_UL_IMAGE 0x0312
-#define WIMAX_UL_IMAGE_RESULT 0x8313
-#define WIMAX_UL_IMAGE_STATUS 0x0314
-#define WIMAX_EVT_MODEM_REPORT 0x8325
-
-/* Category 0xF */
-#define WIMAX_FSM_UPDATE 0x8F01
-#define WIMAX_IF_UPDOWN 0x8F02
-#define WIMAX_IF_UP 1
-#define WIMAX_IF_DOWN 2
-
-/* WIMAX mode */
-#define W_NULL 0
-#define W_STANDBY 1
-#define W_OOZ 2
-#define W_AWAKE 3
-#define W_IDLE 4
-#define W_SLEEP 5
-#define W_WAIT 6
-
-#define W_NET_ENTRY_RNG 0x80
-#define W_NET_ENTRY_SBC 0x81
-#define W_NET_ENTRY_PKM 0x82
-#define W_NET_ENTRY_REG 0x83
-#define W_NET_ENTRY_DSX 0x84
-
-#define W_NET_ENTRY_RNG_FAIL 0x1100100
-#define W_NET_ENTRY_SBC_FAIL 0x1100200
-#define W_NET_ENTRY_PKM_FAIL 0x1102000
-#define W_NET_ENTRY_REG_FAIL 0x1103000
-#define W_NET_ENTRY_DSX_FAIL 0x1104000
-
-/* Scan Type */
-#define W_SCAN_ALL_CHANNEL 0
-#define W_SCAN_ALL_SUBSCRIPTION 1
-#define W_SCAN_SPECIFIED_SUBSCRIPTION 2
-
-/* TLV
- *
- * [31:31] indicates the type is composite.
- * [30:16] is the length of the type. 0 length means length is variable.
- * [15:0] is the actual type.
- */
-#define TLV_L(x) (((x) >> 16) & 0xff)
-#define TLV_T(x) ((x) & 0xff)
-#define TLV_COMPOSITE(x) ((x) >> 31)
-
-/* GENERAL */
-#define T_MAC_ADDRESS (0x00 | (6 << 16))
-#define T_BSID (0x01 | (6 << 16))
-#define T_MSK (0x02 | (64 << 16))
-#define T_RSSI_THRSHLD (0x03 | (1 << 16))
-#define T_FREQUENCY (0x04 | (4 << 16))
-#define T_CONN_CS_TYPE (0x05 | (1 << 16))
-#define T_HOST_IP_VER (0x06 | (1 << 16))
-#define T_STBY_SCAN_INTERVAL (0x07 | (4 << 16))
-#define T_OOZ_SCAN_INTERVAL (0x08 | (4 << 16))
-#define T_IMEI (0x09 | (8 << 16))
-#define T_PID (0x0a | (12 << 16))
-#define T_CAPABILITY (0x1a | (4 << 16))
-#define T_RELEASE_NUMBER (0x1b | (4 << 16))
-#define T_DRIVER_REVISION (0x1c | (4 << 16))
-#define T_FW_REVISION (0x1d | (4 << 16))
-#define T_MAC_HW_REVISION (0x1e | (4 << 16))
-#define T_PHY_HW_REVISION (0x1f | (4 << 16))
-
-/* HANDOVER */
-#define T_SCAN_INTERVAL (0x20 | (1 << 16))
-#define T_RSC_RETAIN_TIME (0x2f | (2 << 16))
-
-/* SLEEP */
-#define T_TYPE1_ISW (0x40 | (1 << 16))
-#define T_SLP_START_TO (0x4a | (2 << 16))
-
-/* IDLE */
-#define T_IDLE_MODE_TO (0x50 | (2 << 16))
-#define T_IDLE_START_TO (0x54 | (2 << 16))
-
-/* MONITOR */
-#define T_RSSI (0x60 | (1 << 16))
-#define T_CINR (0x61 | (1 << 16))
-#define T_TX_POWER (0x6a | (1 << 16))
-#define T_CUR_FREQ (0x7f | (4 << 16))
-
-
-/* WIMAX */
-#define T_MAX_SUBSCRIPTION (0xa1 | (1 << 16))
-#define T_MAX_SF (0xa2 | (1 << 16))
-#define T_PHY_TYPE (0xa3 | (1 << 16))
-#define T_PKM (0xa4 | (1 << 16))
-#define T_AUTH_POLICY (0xa5 | (1 << 16))
-#define T_CS_TYPE (0xa6 | (2 << 16))
-#define T_VENDOR_NAME (0xa7 | (0 << 16))
-#define T_MOD_NAME (0xa8 | (0 << 16))
-#define T_PACKET_FILTER (0xa9 | (1 << 16))
-#define T_NSP_CHANGE_COUNT (0xaa | (4 << 16))
-#define T_RADIO_STATE (0xab | (1 << 16))
-#define T_URI_CONTACT_TYPE (0xac | (1 << 16))
-#define T_URI_TEXT (0xad | (0 << 16))
-#define T_URI (0xae | (0 << 16))
-#define T_ENABLE_AUTH (0xaf | (1 << 16))
-#define T_TIMEOUT (0xb0 | (2 << 16))
-#define T_RUN_MODE (0xb1 | (1 << 16))
-#define T_OMADMT_VER (0xb2 | (4 << 16))
-/* This is measured in seconds from 00:00:00 GMT January 1, 1970. */
-#define T_RTC_TIME (0xb3 | (4 << 16))
-#define T_CERT_STATUS (0xb4 | (4 << 16))
-#define T_CERT_MASK (0xb5 | (4 << 16))
-#define T_EMSK (0xb6 | (64 << 16))
-
-/* Subscription TLV */
-#define T_SUBSCRIPTION_LIST (0xd1 | (0 << 16) | (1 << 31))
-#define T_H_NSPID (0xd2 | (3 << 16))
-#define T_NSP_NAME (0xd3 | (0 << 16))
-#define T_SUBSCRIPTION_NAME (0xd4 | (0 << 16))
-#define T_SUBSCRIPTION_FLAG (0xd5 | (2 << 16))
-#define T_V_NSPID (0xd6 | (3 << 16))
-#define T_NAP_ID (0xd7 | (3 << 16))
-#define T_PREAMBLES (0xd8 | (15 << 16))
-#define T_BW (0xd9 | (4 << 16))
-#define T_FFTSIZE (0xda | (4 << 16))
-#define T_DUPLEX_MODE (0xdb | (4 << 16))
-
-/* T_CAPABILITY */
-#define T_CAPABILITY_MULTI_CS (1 << 0)
-#define T_CAPABILITY_WIMAX (1 << 1)
-#define T_CAPABILITY_QOS (1 << 2)
-#define T_CAPABILITY_AGGREGATION (1 << 3)
-
-struct hci_s {
- __be16 cmd_evt;
- __be16 length;
- u8 data[0];
-} __packed;
-
-#endif /* __GDM72XX_HCI_H__ */
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
deleted file mode 100644
index f3cdaa6c468c..000000000000
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <net/netlink.h>
-#include <asm/byteorder.h>
-#include <net/sock.h>
-#include "netlink_k.h"
-
-#if !defined(NLMSG_HDRLEN)
-#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
-#endif
-
-#define ND_MAX_GROUP 30
-#define ND_IFINDEX_LEN sizeof(int)
-#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN)
-#define ND_NLMSG_DATA(nlh) \
- ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
-#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
-#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
-#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh)
-#define ND_MAX_MSG_LEN 8096
-
-#if defined(DEFINE_MUTEX)
-static DEFINE_MUTEX(netlink_mutex);
-#else
-static struct semaphore netlink_mutex;
-#define mutex_lock(x) down(x)
-#define mutex_unlock(x) up(x)
-#endif
-
-static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
-
-static void netlink_rcv_cb(struct sk_buff *skb)
-{
- struct nlmsghdr *nlh;
- struct net_device *dev;
- u32 mlen;
- void *msg;
- int ifindex;
-
- if (skb->len >= NLMSG_HDRLEN) {
- nlh = (struct nlmsghdr *)skb->data;
-
- if (skb->len < nlh->nlmsg_len ||
- nlh->nlmsg_len > ND_MAX_MSG_LEN) {
- netdev_err(skb->dev, "Invalid length (%d,%d)\n",
- skb->len, nlh->nlmsg_len);
- return;
- }
-
- memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
- msg = ND_NLMSG_DATA(nlh);
- mlen = ND_NLMSG_R_LEN(nlh);
-
- if (rcv_cb) {
- dev = dev_get_by_index(&init_net, ifindex);
- if (dev) {
- rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
- dev_put(dev);
- } else
- netdev_err(skb->dev,
- "dev_get_by_index(%d) is not found.\n",
- ifindex);
- } else {
- netdev_err(skb->dev, "Unregistered Callback\n");
- }
- }
-}
-
-static void netlink_rcv(struct sk_buff *skb)
-{
- mutex_lock(&netlink_mutex);
- netlink_rcv_cb(skb);
- mutex_unlock(&netlink_mutex);
-}
-
-struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
- void *msg, int len))
-{
- struct sock *sock;
- struct netlink_kernel_cfg cfg = {
- .input = netlink_rcv,
- };
-
-#if !defined(DEFINE_MUTEX)
- init_MUTEX(&netlink_mutex);
-#endif
-
- sock = netlink_kernel_create(&init_net, unit, &cfg);
-
- if (sock)
- rcv_cb = cb;
-
- return sock;
-}
-
-void netlink_exit(struct sock *sock)
-{
- netlink_kernel_release(sock);
-}
-
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
-{
- static u32 seq;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- int ret = 0;
-
- if (group > ND_MAX_GROUP) {
- pr_err("Group %d is invalid.\n", group);
- pr_err("Valid group is 0 ~ %d.\n", ND_MAX_GROUP);
- return -EINVAL;
- }
-
- skb = nlmsg_new(len, GFP_ATOMIC);
- if (!skb) {
- pr_err("netlink_broadcast ret=%d\n", ret);
- return -ENOMEM;
- }
-
- seq++;
- nlh = nlmsg_put(skb, 0, seq, type, len, 0);
- if (!nlh) {
- kfree_skb(skb);
- return -EMSGSIZE;
- }
- memcpy(nlmsg_data(nlh), msg, len);
-
- NETLINK_CB(skb).portid = 0;
- NETLINK_CB(skb).dst_group = 0;
-
- ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
-
- if (!ret)
- return len;
- if (ret != -ESRCH) {
- pr_err("netlink_broadcast g=%d, t=%d, l=%d, r=%d\n",
- group, type, len, ret);
- }
- ret = 0;
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/netlink_k.h b/drivers/staging/gdm72xx/netlink_k.h
deleted file mode 100644
index 1fe7198d539e..000000000000
--- a/drivers/staging/gdm72xx/netlink_k.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_NETLINK_K_H__
-#define __GDM72XX_NETLINK_K_H__
-
-#include <linux/netdevice.h>
-#include <net/sock.h>
-
-struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
- void *msg, int len));
-void netlink_exit(struct sock *sock);
-int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
-
-#endif /* __GDM72XX_NETLINK_K_H__ */
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
deleted file mode 100644
index ba94b5f13bb2..000000000000
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-#include <linux/firmware.h>
-
-#include "gdm_sdio.h"
-#include "sdio_boot.h"
-
-#define TYPE_A_HEADER_SIZE 4
-#define TYPE_A_LOOKAHEAD_SIZE 16
-#define YMEM0_SIZE 0x8000 /* 32kbytes */
-#define DOWNLOAD_SIZE (YMEM0_SIZE - TYPE_A_HEADER_SIZE)
-
-#define FW_DIR "gdm72xx/"
-#define FW_KRN "gdmskrn.bin"
-#define FW_RFS "gdmsrfs.bin"
-
-static u8 *tx_buf;
-
-static int ack_ready(struct sdio_func *func)
-{
- unsigned long wait = jiffies + HZ;
- u8 val;
- int ret;
-
- while (time_before(jiffies, wait)) {
- val = sdio_readb(func, 0x13, &ret);
- if (val & 0x01)
- return 1;
- schedule();
- }
-
- return 0;
-}
-
-static int download_image(struct sdio_func *func, const char *img_name)
-{
- int ret = 0, len, pno;
- u8 *buf = tx_buf;
- loff_t pos = 0;
- int img_len;
- const struct firmware *firm;
-
- ret = request_firmware(&firm, img_name, &func->dev);
- if (ret < 0) {
- dev_err(&func->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- buf = kmalloc(DOWNLOAD_SIZE + TYPE_A_HEADER_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- img_len = firm->size;
-
- if (img_len <= 0) {
- ret = -1;
- goto out;
- }
-
- pno = 0;
- while (img_len > 0) {
- if (img_len > DOWNLOAD_SIZE) {
- len = DOWNLOAD_SIZE;
- buf[3] = 0;
- } else {
- len = img_len; /* the last packet */
- buf[3] = 2;
- }
-
- buf[0] = len & 0xff;
- buf[1] = (len >> 8) & 0xff;
- buf[2] = (len >> 16) & 0xff;
-
- memcpy(buf+TYPE_A_HEADER_SIZE, firm->data + pos, len);
- ret = sdio_memcpy_toio(func, 0, buf, len + TYPE_A_HEADER_SIZE);
- if (ret < 0) {
- dev_err(&func->dev,
- "send image error: packet number = %d ret = %d\n",
- pno, ret);
- goto out;
- }
-
- if (buf[3] == 2) /* The last packet */
- break;
- if (!ack_ready(func)) {
- ret = -EIO;
- dev_err(&func->dev, "Ack is not ready.\n");
- goto out;
- }
- ret = sdio_memcpy_fromio(func, buf, 0, TYPE_A_LOOKAHEAD_SIZE);
- if (ret < 0) {
- dev_err(&func->dev,
- "receive ack error: packet number = %d ret = %d\n",
- pno, ret);
- goto out;
- }
- sdio_writeb(func, 0x01, 0x13, &ret);
- sdio_writeb(func, 0x00, 0x10, &ret); /* PCRRT */
-
- img_len -= DOWNLOAD_SIZE;
- pos += DOWNLOAD_SIZE;
- pno++;
- }
-
-out:
- kfree(buf);
- return ret;
-}
-
-int sdio_boot(struct sdio_func *func)
-{
- int ret;
- const char *krn_name = FW_DIR FW_KRN;
- const char *rfs_name = FW_DIR FW_RFS;
-
- tx_buf = kmalloc(YMEM0_SIZE, GFP_KERNEL);
- if (!tx_buf)
- return -ENOMEM;
-
- ret = download_image(func, krn_name);
- if (ret)
- goto restore_fs;
- dev_info(&func->dev, "GCT: Kernel download success.\n");
-
- ret = download_image(func, rfs_name);
- if (ret)
- goto restore_fs;
- dev_info(&func->dev, "GCT: Filesystem download success.\n");
-
-restore_fs:
- kfree(tx_buf);
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/sdio_boot.h b/drivers/staging/gdm72xx/sdio_boot.h
deleted file mode 100644
index e0800c6fe2fd..000000000000
--- a/drivers/staging/gdm72xx/sdio_boot.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_SDIO_BOOT_H__
-#define __GDM72XX_SDIO_BOOT_H__
-
-struct sdio_func;
-
-int sdio_boot(struct sdio_func *func);
-
-#endif /* __GDM72XX_SDIO_BOOT_H__ */
diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c
deleted file mode 100644
index 39ca34031a6b..000000000000
--- a/drivers/staging/gdm72xx/usb_boot.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/usb.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/firmware.h>
-
-#include <asm/byteorder.h>
-#include "gdm_usb.h"
-#include "usb_boot.h"
-
-#define DN_KERNEL_MAGIC_NUMBER 0x10760001
-#define DN_ROOTFS_MAGIC_NUMBER 0x10760002
-
-#define DOWNLOAD_SIZE 1024
-
-#define MAX_IMG_CNT 16
-#define FW_DIR "gdm72xx/"
-#define FW_UIMG "gdmuimg.bin"
-#define FW_KERN "zImage"
-#define FW_FS "ramdisk.jffs2"
-
-struct dn_header {
- __be32 magic_num;
- __be32 file_size;
-};
-
-struct img_header {
- u32 magic_code;
- u32 count;
- u32 len;
- u32 offset[MAX_IMG_CNT];
- char hostname[32];
- char date[32];
-};
-
-struct fw_info {
- u32 id;
- u32 len;
- u32 kernel_len;
- u32 rootfs_len;
- u32 kernel_offset;
- u32 rootfs_offset;
- u32 fw_ver;
- u32 mac_ver;
- char hostname[32];
- char userid[16];
- char date[32];
- char user_desc[128];
-};
-
-static void array_le32_to_cpu(u32 *arr, int num)
-{
- int i;
-
- for (i = 0; i < num; i++, arr++)
- le32_to_cpus(arr);
-}
-
-static u8 *tx_buf;
-
-static int gdm_wibro_send(struct usb_device *usbdev, void *data, int len)
-{
- int ret;
- int actual;
-
- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 1), data, len,
- &actual, 1000);
-
- if (ret < 0) {
- dev_err(&usbdev->dev, "Error : usb_bulk_msg ( result = %d )\n",
- ret);
- return ret;
- }
- return 0;
-}
-
-static int gdm_wibro_recv(struct usb_device *usbdev, void *data, int len)
-{
- int ret;
- int actual;
-
- ret = usb_bulk_msg(usbdev, usb_rcvbulkpipe(usbdev, 2), data, len,
- &actual, 5000);
-
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "Error : usb_bulk_msg(recv) ( result = %d )\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int download_image(struct usb_device *usbdev,
- const struct firmware *firm,
- loff_t pos, u32 img_len, u32 magic_num)
-{
- struct dn_header h;
- int ret = 0;
- u32 size;
-
- size = ALIGN(img_len, DOWNLOAD_SIZE);
- h.magic_num = cpu_to_be32(magic_num);
- h.file_size = cpu_to_be32(size);
-
- ret = gdm_wibro_send(usbdev, &h, sizeof(h));
- if (ret < 0)
- return ret;
-
- while (img_len > 0) {
- if (img_len > DOWNLOAD_SIZE)
- size = DOWNLOAD_SIZE;
- else
- size = img_len; /* the last chunk of data */
-
- memcpy(tx_buf, firm->data + pos, size);
- ret = gdm_wibro_send(usbdev, tx_buf, size);
-
- if (ret < 0)
- return ret;
-
- img_len -= size;
- pos += size;
- }
-
- return ret;
-}
-
-int usb_boot(struct usb_device *usbdev, u16 pid)
-{
- int i, ret = 0;
- struct img_header hdr;
- struct fw_info fw_info;
- loff_t pos = 0;
- char *img_name = FW_DIR FW_UIMG;
- const struct firmware *firm;
-
- ret = request_firmware(&firm, img_name, &usbdev->dev);
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- tx_buf = kmalloc(DOWNLOAD_SIZE, GFP_KERNEL);
- if (!tx_buf) {
- release_firmware(firm);
- return -ENOMEM;
- }
-
- if (firm->size < sizeof(hdr)) {
- dev_err(&usbdev->dev, "Cannot read the image info.\n");
- ret = -EIO;
- goto out;
- }
- memcpy(&hdr, firm->data, sizeof(hdr));
-
- array_le32_to_cpu((u32 *)&hdr, 19);
-
- if (hdr.count > MAX_IMG_CNT) {
- dev_err(&usbdev->dev, "Too many images. %d\n", hdr.count);
- ret = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < hdr.count; i++) {
- if (hdr.offset[i] > hdr.len) {
- dev_err(&usbdev->dev,
- "Invalid offset. Entry = %d Offset = 0x%08x Image length = 0x%08x\n",
- i, hdr.offset[i], hdr.len);
- ret = -EINVAL;
- goto out;
- }
-
- pos = hdr.offset[i];
- if (firm->size < sizeof(fw_info) + pos) {
- dev_err(&usbdev->dev, "Cannot read the FW info.\n");
- ret = -EIO;
- goto out;
- }
- memcpy(&fw_info, firm->data + pos, sizeof(fw_info));
-
- array_le32_to_cpu((u32 *)&fw_info, 8);
-
- if ((fw_info.id & 0xffff) != pid)
- continue;
-
- pos = hdr.offset[i] + fw_info.kernel_offset;
- if (firm->size < fw_info.kernel_len + pos) {
- dev_err(&usbdev->dev, "Kernel FW is too small.\n");
- goto out;
- }
-
- ret = download_image(usbdev, firm, pos, fw_info.kernel_len,
- DN_KERNEL_MAGIC_NUMBER);
- if (ret < 0)
- goto out;
- dev_info(&usbdev->dev, "GCT: Kernel download success.\n");
-
- pos = hdr.offset[i] + fw_info.rootfs_offset;
- if (firm->size < fw_info.rootfs_len + pos) {
- dev_err(&usbdev->dev, "Filesystem FW is too small.\n");
- goto out;
- }
- ret = download_image(usbdev, firm, pos, fw_info.rootfs_len,
- DN_ROOTFS_MAGIC_NUMBER);
- if (ret < 0)
- goto out;
- dev_info(&usbdev->dev, "GCT: Filesystem download success.\n");
-
- break;
- }
-
- if (i == hdr.count) {
- dev_err(&usbdev->dev, "Firmware for gsk%x is not installed.\n",
- pid);
- ret = -EINVAL;
- }
-out:
- release_firmware(firm);
- kfree(tx_buf);
- return ret;
-}
-
-/*#define GDM7205_PADDING 256 */
-#define DOWNLOAD_CHUCK 2048
-#define KERNEL_TYPE_STRING "linux"
-#define FS_TYPE_STRING "rootfs"
-
-static int em_wait_ack(struct usb_device *usbdev, int send_zlp)
-{
- int ack;
- int ret = -1;
-
- if (send_zlp) {
- /*Send ZLP*/
- ret = gdm_wibro_send(usbdev, NULL, 0);
- if (ret < 0)
- goto out;
- }
-
- /*Wait for ACK*/
- ret = gdm_wibro_recv(usbdev, &ack, sizeof(ack));
- if (ret < 0)
- goto out;
-out:
- return ret;
-}
-
-static int em_download_image(struct usb_device *usbdev, const char *img_name,
- char *type_string)
-{
- char *buf = NULL;
- loff_t pos = 0;
- int ret = 0;
- int len;
- int img_len;
- const struct firmware *firm;
- #if defined(GDM7205_PADDING)
- const int pad_size = GDM7205_PADDING;
- #else
- const int pad_size = 0;
- #endif
-
- ret = request_firmware(&firm, img_name, &usbdev->dev);
- if (ret < 0) {
- dev_err(&usbdev->dev,
- "requesting firmware %s failed with error %d\n",
- img_name, ret);
- return ret;
- }
-
- buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL);
- if (!buf) {
- release_firmware(firm);
- return -ENOMEM;
- }
-
- strcpy(buf+pad_size, type_string);
- ret = gdm_wibro_send(usbdev, buf, strlen(type_string)+pad_size);
- if (ret < 0)
- goto out;
-
- img_len = firm->size;
-
- if (img_len <= 0) {
- ret = -1;
- goto out;
- }
-
- while (img_len > 0) {
- if (img_len > DOWNLOAD_CHUCK)
- len = DOWNLOAD_CHUCK;
- else
- len = img_len; /* the last chunk of data */
-
- memcpy(buf+pad_size, firm->data + pos, len);
- ret = gdm_wibro_send(usbdev, buf, len+pad_size);
-
- if (ret < 0)
- goto out;
-
- img_len -= DOWNLOAD_CHUCK;
- pos += DOWNLOAD_CHUCK;
-
- ret = em_wait_ack(usbdev, ((len+pad_size) % 512 == 0));
- if (ret < 0)
- goto out;
- }
-
- ret = em_wait_ack(usbdev, 1);
- if (ret < 0)
- goto out;
-
-out:
- release_firmware(firm);
- kfree(buf);
-
- return ret;
-}
-
-static int em_fw_reset(struct usb_device *usbdev)
-{
- /*Send ZLP*/
- return gdm_wibro_send(usbdev, NULL, 0);
-}
-
-int usb_emergency(struct usb_device *usbdev)
-{
- int ret;
- const char *kern_name = FW_DIR FW_KERN;
- const char *fs_name = FW_DIR FW_FS;
-
- ret = em_download_image(usbdev, kern_name, KERNEL_TYPE_STRING);
- if (ret < 0)
- return ret;
- dev_err(&usbdev->dev, "GCT Emergency: Kernel download success.\n");
-
- ret = em_download_image(usbdev, fs_name, FS_TYPE_STRING);
- if (ret < 0)
- return ret;
- dev_info(&usbdev->dev, "GCT Emergency: Filesystem download success.\n");
-
- ret = em_fw_reset(usbdev);
-
- return ret;
-}
diff --git a/drivers/staging/gdm72xx/usb_boot.h b/drivers/staging/gdm72xx/usb_boot.h
deleted file mode 100644
index 5bf7190377e2..000000000000
--- a/drivers/staging/gdm72xx/usb_boot.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_USB_BOOT_H__
-#define __GDM72XX_USB_BOOT_H__
-
-struct usb_device;
-
-int usb_boot(struct usb_device *usbdev, u16 pid);
-int usb_emergency(struct usb_device *usbdev);
-
-#endif /* __GDM72XX_USB_BOOT_H__ */
diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h
deleted file mode 100644
index 7afb9ba5fdba..000000000000
--- a/drivers/staging/gdm72xx/usb_ids.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_USB_IDS_H__
-#define __GDM72XX_USB_IDS_H__
-
-/*You can replace vendor-ID as yours.*/
-#define GCT_VID 0x1076
-
-/*You can replace product-ID as yours.*/
-#define GCT_PID1 0x7e00
-#define GCT_PID2 0x7f00
-
-#define USB_DEVICE_ID_MATCH_DEVICE_INTERFACE \
- (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS)
-
-#define USB_DEVICE_INTF(vend, prod, intf) \
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE_INTERFACE, \
- .idVendor = (vend), .idProduct = (prod), .bInterfaceClass = (intf)
-
-#define EMERGENCY_PID 0x720f
-#define BL_PID_MASK 0xffc0
-
-#define USB_DEVICE_BOOTLOADER(vid, pid) \
- {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}
-
-#define USB_DEVICE_BOOTLOADER_DRV(vid, pid) \
- {USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)}
-
-#define USB_DEVICE_CDC_DATA(vid, pid) \
- {USB_DEVICE_INTF((vid), (pid), USB_CLASS_CDC_DATA)}
-
-static const struct usb_device_id id_table[] = {
- USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1),
- USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x3),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x4),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x5),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x6),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x7),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x8),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x9),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xa),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xb),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xc),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xd),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xe),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf),
-
- USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2),
- USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x3),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x4),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x5),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x6),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x7),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x8),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x9),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xa),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xb),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xc),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xd),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xe),
- USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0xf),
-
- {USB_DEVICE(GCT_VID, EMERGENCY_PID)},
- { }
-};
-
-#endif /* __GDM72XX_USB_IDS_H__ */
diff --git a/drivers/staging/gdm72xx/wm_ioctl.h b/drivers/staging/gdm72xx/wm_ioctl.h
deleted file mode 100644
index 631cb1d23c7e..000000000000
--- a/drivers/staging/gdm72xx/wm_ioctl.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __GDM72XX_WM_IOCTL_H__
-#define __GDM72XX_WM_IOCTL_H__
-
-#if !defined(__KERNEL__)
-#include <net/if.h>
-#endif
-
-#define NETLINK_WIMAX 31
-
-#define SIOCWMIOCTL SIOCDEVPRIVATE
-
-#define SIOCG_DATA 0x8D10
-#define SIOCS_DATA 0x8D11
-
-enum {
- SIOC_DATA_FSM,
- SIOC_DATA_NETLIST,
- SIOC_DATA_CONNNSP,
- SIOC_DATA_CONNCOMP,
- SIOC_DATA_PROFILEID,
-
- SIOC_DATA_END
-};
-
-#define SIOC_DATA_MAX 16
-
-/* FSM */
-enum {
- M_INIT = 0,
- M_OPEN_OFF,
- M_OPEN_ON,
- M_SCAN,
- M_CONNECTING,
- M_CONNECTED,
- M_FSM_END,
-
- C_INIT = 0,
- C_CONNSTART,
- C_ASSOCSTART,
- C_RNG,
- C_SBC,
- C_AUTH,
- C_REG,
- C_DSX,
- C_ASSOCCOMPLETE,
- C_CONNCOMPLETE,
- C_FSM_END,
-
- D_INIT = 0,
- D_READY,
- D_LISTEN,
- D_IPACQUISITION,
-
- END_FSM
-};
-
-struct fsm_s {
- int m_status; /*main status*/
- int c_status; /*connection status*/
- int d_status; /*oma-dm status*/
-};
-
-struct data_s {
- int size;
- void *buf;
-};
-
-struct udata_s {
- int size;
- void __user *buf;
-};
-
-struct wm_req_s {
- union {
- char ifrn_name[IFNAMSIZ];
- } ifr_ifrn;
- unsigned short cmd;
- unsigned short data_id;
- struct udata_s data;
-
-/* NOTE: sizeof(struct wm_req_s) must be less than sizeof(struct ifreq). */
-};
-
-#ifndef ifr_name
-#define ifr_name ifr_ifrn.ifrn_name
-#endif
-
-#endif /* __GDM72XX_WM_IOCTL_H__ */
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index b0927e49d0a8..bd559956f199 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <linux/goldfish.h>
MODULE_AUTHOR("Google, Inc.");
@@ -63,7 +64,7 @@ struct goldfish_audio {
#define AUDIO_READ(data, addr) (readl(data->reg_base + addr))
#define AUDIO_WRITE(data, addr, x) (writel(x, data->reg_base + addr))
#define AUDIO_WRITE64(data, addr, addr2, x) \
- (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base+addr2))
+ (gf_write_dma_addr((x), data->reg_base + addr, data->reg_base + addr2))
/*
* temporary variable used between goldfish_audio_probe() and
@@ -280,12 +281,12 @@ static int goldfish_audio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
+ if (!r) {
dev_err(&pdev->dev, "platform_get_resource failed\n");
return -ENODEV;
}
data->reg_base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (data->reg_base == NULL)
+ if (!data->reg_base)
return -ENOMEM;
data->irq = platform_get_irq(pdev, 0);
@@ -295,7 +296,7 @@ static int goldfish_audio_probe(struct platform_device *pdev)
}
data->buffer_virt = dmam_alloc_coherent(&pdev->dev,
COMBINED_BUFFER_SIZE, &buf_addr, GFP_KERNEL);
- if (data->buffer_virt == NULL) {
+ if (!data->buffer_virt) {
dev_err(&pdev->dev, "allocate buffer failed\n");
return -ENOMEM;
}
@@ -344,11 +345,18 @@ static int goldfish_audio_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_audio_of_match[] = {
+ { .compatible = "google,goldfish-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_audio_of_match);
+
static struct platform_driver goldfish_audio_driver = {
.probe = goldfish_audio_probe,
.remove = goldfish_audio_remove,
.driver = {
- .name = "goldfish_audio"
+ .name = "goldfish_audio",
+ .of_match_table = goldfish_audio_of_match,
}
};
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index 623353db5a08..76d60eed1490 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/goldfish.h>
#include <asm/div64.h>
+#include <linux/dma-mapping.h>
#include "goldfish_nand_reg.h"
@@ -99,11 +100,11 @@ static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
loff_t ofs = instr->addr;
u32 len = instr->len;
- u32 rem;
+ s32 rem;
if (ofs + len > mtd->size)
goto invalid_arg;
- rem = do_div(ofs, mtd->writesize);
+ ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
@@ -132,7 +133,7 @@ invalid_arg:
static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
- u32 rem;
+ s32 rem;
if (ofs + ops->len > mtd->size)
goto invalid_arg;
@@ -141,7 +142,7 @@ static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
if (ops->ooblen + ops->ooboffs > mtd->oobsize)
goto invalid_arg;
- rem = do_div(ofs, mtd->writesize);
+ ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
@@ -164,7 +165,7 @@ invalid_arg:
static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
struct mtd_oob_ops *ops)
{
- u32 rem;
+ s32 rem;
if (ofs + ops->len > mtd->size)
goto invalid_arg;
@@ -173,7 +174,7 @@ static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
if (ops->ooblen + ops->ooboffs > mtd->oobsize)
goto invalid_arg;
- rem = do_div(ofs, mtd->writesize);
+ ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= (mtd->writesize + mtd->oobsize);
@@ -196,12 +197,12 @@ invalid_arg:
static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- u32 rem;
+ s32 rem;
if (from + len > mtd->size)
goto invalid_arg;
- rem = do_div(from, mtd->writesize);
+ from = div_s64_rem(from, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
from *= (mtd->writesize + mtd->oobsize);
@@ -218,12 +219,12 @@ invalid_arg:
static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- u32 rem;
+ s32 rem;
if (to + len > mtd->size)
goto invalid_arg;
- rem = do_div(to, mtd->writesize);
+ to = div_s64_rem(to, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
to *= (mtd->writesize + mtd->oobsize);
@@ -239,12 +240,12 @@ invalid_arg:
static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
- u32 rem;
+ s32 rem;
if (ofs >= mtd->size)
goto invalid_arg;
- rem = do_div(ofs, mtd->erasesize);
+ ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= mtd->erasesize / mtd->writesize;
@@ -260,12 +261,12 @@ invalid_arg:
static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- u32 rem;
+ s32 rem;
if (ofs >= mtd->size)
goto invalid_arg;
- rem = do_div(ofs, mtd->erasesize);
+ ofs = div_s64_rem(ofs, mtd->writesize, &rem);
if (rem)
goto invalid_arg;
ofs *= mtd->erasesize / mtd->writesize;
@@ -284,17 +285,18 @@ invalid_arg:
static int nand_setup_cmd_params(struct platform_device *pdev,
struct goldfish_nand *nand)
{
- u64 paddr;
+ dma_addr_t dma_handle;
unsigned char __iomem *base = nand->base;
- nand->cmd_params = devm_kzalloc(&pdev->dev,
- sizeof(struct cmd_params), GFP_KERNEL);
- if (!nand->cmd_params)
- return -1;
-
- paddr = __pa(nand->cmd_params);
- writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
- writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW);
+ nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct cmd_params),
+ &dma_handle, GFP_KERNEL);
+ if (!nand->cmd_params) {
+ dev_err(&pdev->dev, "allocate buffer failed\n");
+ return -ENOMEM;
+ }
+ writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
+ writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
return 0;
}
@@ -319,7 +321,7 @@ static int goldfish_nand_init_device(struct platform_device *pdev,
mtd->oobavail = mtd->oobsize;
mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
(mtd->writesize + mtd->oobsize) * mtd->writesize;
- do_div(mtd->size, mtd->writesize + mtd->oobsize);
+ mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
mtd->size *= mtd->writesize;
dev_dbg(&pdev->dev,
"goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index a3a10f9a2a2b..7b7c9786c162 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -34,7 +34,7 @@
#define DEVICE_NAME "device"
#define CLASS_NAME "fpgaboot"
-static uint8_t bits_magic[] = {
+static u8 bits_magic[] = {
0x0, 0x9, 0xf, 0xf0, 0xf, 0xf0,
0xf, 0xf0, 0xf, 0xf0, 0x0, 0x0, 0x1};
@@ -54,7 +54,7 @@ static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
static void readinfo_bitstream(char *bitdata, char *buf, int *offset)
{
char tbuf[64];
- int32_t len;
+ s32 len;
/* read section char */
read_bitstream(bitdata, tbuf, offset, 1);
@@ -281,17 +281,12 @@ static int init_driver(void)
return PTR_ERR_OR_ZERO(firmware_pdev);
}
-static void finish_driver(void)
-{
- platform_device_unregister(firmware_pdev);
-}
-
static int gs_fpgaboot(void)
{
int err;
struct fpgaimage *fimage;
- fimage = kmalloc(sizeof(struct fpgaimage), GFP_KERNEL);
+ fimage = kmalloc(sizeof(*fimage), GFP_KERNEL);
if (!fimage)
return -ENOMEM;
@@ -370,14 +365,14 @@ static int __init gs_fpgaboot_init(void)
return 0;
errout:
- finish_driver();
+ platform_device_unregister(firmware_pdev);
return err;
}
static void __exit gs_fpgaboot_exit(void)
{
- finish_driver();
+ platform_device_unregister(firmware_pdev);
pr_info("FPGA image download module removed\n");
}
diff --git a/drivers/staging/i4l/Documentation/README.act2000 b/drivers/staging/i4l/Documentation/README.act2000
new file mode 100644
index 000000000000..ce7115e7f4ce
--- /dev/null
+++ b/drivers/staging/i4l/Documentation/README.act2000
@@ -0,0 +1,104 @@
+$Id: README.act2000,v 1.3 2000/08/06 09:22:51 armin Exp $
+
+This document describes the ACT2000 driver for the
+IBM Active 2000 ISDN card.
+
+There are 3 Types of this card available. A ISA-, MCA-, and PCMCIA-Bus
+Version. Currently, only the ISA-Bus version of the card is supported.
+However MCA and PCMCIA will follow soon.
+
+The ISA-Bus Version uses 8 IO-ports. The base port address has to be set
+manually using the DIP switches.
+
+Setting up the DIP switches for the IBM Active 2000 ISDN card:
+
+ Note: S5 and S6 always set off!
+
+ S1 S2 S3 S4 Base-port
+ on on on on 0x0200 (Factory default)
+ off on on on 0x0240
+ on off on on 0x0280
+ off off on on 0x02c0
+ on on off on 0x0300
+ off on off on 0x0340
+ on off off on 0x0380
+ on on on off 0xcfe0
+ off on on off 0xcfa0
+ on off on off 0xcf60
+ off off on off 0xcf20
+ on on off off 0xcee0
+ off on off off 0xcea0
+ on off off off 0xce60
+ off off off off Card disabled
+
+IRQ is configured by software. Possible values are:
+
+ 3, 5, 7, 10, 11, 12, 15 and none (polled mode)
+
+
+The ACT2000 driver may either be built into the kernel or as a module.
+Initialization depends on how the driver is built:
+
+Driver built into the kernel:
+
+ The ACT2000 driver can be configured using the commandline-feature while
+ loading the kernel with LILO or LOADLIN. It accepts the following syntax:
+
+ act2000=b,p,i[,idstring]
+
+ where
+
+ b = Bus-Type (1=ISA, 2=MCA, 3=PCMCIA)
+ p = portbase (-1 means autoprobe)
+ i = Interrupt (-1 means use next free IRQ, 0 means polled mode)
+
+ The idstring is an arbitrary string used for referencing the card
+ by the actctrl tool later.
+
+ Defaults used, when no parameters given at all:
+
+ 1,-1,-1,""
+
+ which means: Autoprobe for an ISA card, use next free IRQ, let the
+ ISDN linklevel fill the IdString (usually "line0" for the first card).
+
+ If you like to use more than one card, you can use the program
+ "actctrl" from the utility-package to configure additional cards.
+
+ Using the "actctrl"-utility, portbase and irq can also be changed
+ during runtime. The D-channel protocol is configured by the "dproto"
+ option of the "actctrl"-utility after loading the firmware into the
+ card's memory using the "actctrl"-utility.
+
+Driver built as module:
+
+ The module act2000.o can be configured during modprobe (insmod) by
+ appending its parameters to the modprobe resp. insmod commandline.
+ The following syntax is accepted:
+
+ act_bus=b act_port=p act_irq=i act_id=idstring
+
+ where b, p, i and idstring have the same meanings as the parameters
+ described for the builtin version above.
+
+ Using the "actctrl"-utility, the same features apply to the modularized
+ version as to the kernel-builtin one. (i.e. loading of firmware and
+ configuring the D-channel protocol)
+
+Loading the firmware into the card:
+
+ The firmware is supplied together with the isdn4k-utils package. It
+ can be found in the subdirectory act2000/firmware/
+
+ Assuming you have installed the utility-package correctly, the firmware
+ will be downloaded into the card using the following command:
+
+ actctrl -d idstring load /etc/isdn/bip11.btl
+
+ where idstring is the Name of the card, given during insmod-time or
+ (for kernel-builtin driver) on the kernel commandline. If only one
+ ISDN card is used, the -d isdstrin may be omitted.
+
+ For further documentation (adding more IBM Active 2000 cards), refer to
+ the manpage actctrl.8 which is included in the isdn4k-utils package.
+
diff --git a/drivers/staging/i4l/Documentation/README.icn b/drivers/staging/i4l/Documentation/README.icn
new file mode 100644
index 000000000000..13f833d4e910
--- /dev/null
+++ b/drivers/staging/i4l/Documentation/README.icn
@@ -0,0 +1,148 @@
+$Id: README.icn,v 1.7 2000/08/06 09:22:51 armin Exp $
+
+You can get the ICN-ISDN-card from:
+
+Thinking Objects Software GmbH
+Versbacher Röthe 159
+97078 Würzburg
+Tel: +49 931 2877950
+Fax: +49 931 2877951
+
+email info@think.de
+WWW http:/www.think.de
+
+
+The card communicates with the PC by two interfaces:
+ 1. A range of 4 successive port-addresses, whose base address can be
+ configured with the switches.
+ 2. A memory window with 16KB-256KB size, which can be setup in 16k steps
+ over the whole range of 16MB. Isdn4linux only uses a 16k window.
+ The base address of the window can be configured when loading
+ the lowlevel-module (see README). If using more than one card,
+ all cards are mapped to the same window and activated as needed.
+
+Setting up the IO-address dipswitches for the ICN-ISDN-card:
+
+ Two types of cards exist, one with dip-switches and one with
+ hook-switches.
+
+ 1. Setting for the card with hook-switches:
+
+ (0 = switch closed, 1 = switch open)
+
+ S3 S2 S1 Base-address
+ 0 0 0 0x300
+ 0 0 1 0x310
+ 0 1 0 0x320 (Default for isdn4linux)
+ 0 1 1 0x330
+ 1 0 0 0x340
+ 1 0 1 0x350
+ 1 1 0 0x360
+ 1 1 1 NOT ALLOWED!
+
+ 2. Setting for the card with dip-switches:
+
+ (0 = switch closed, 1 = switch open)
+
+ S1 S2 S3 S4 Base-Address
+ 0 0 0 0 0x300
+ 0 0 0 1 0x310
+ 0 0 1 0 0x320 (Default for isdn4linux)
+ 0 0 1 1 0x330
+ 0 1 0 0 0x340
+ 0 1 0 1 0x350
+ 0 1 1 0 0x360
+ 0 1 1 1 NOT ALLOWED!
+ 1 0 0 0 0x308
+ 1 0 0 1 0x318
+ 1 0 1 0 0x328
+ 1 0 1 1 0x338
+ 1 1 0 0 0x348
+ 1 1 0 1 0x358
+ 1 1 1 0 0x368
+ 1 1 1 1 NOT ALLOWED!
+
+The ICN driver may be built into the kernel or as a module. Initialization
+depends on how the driver is built:
+
+Driver built into the kernel:
+
+ The ICN driver can be configured using the commandline-feature while
+ loading the kernel with LILO or LOADLIN. It accepts the following syntax:
+
+ icn=p,m[,idstring1[,idstring2]]
+
+ where
+
+ p = portbase (default: 0x320)
+ m = shared memory (default: 0xd0000)
+
+ When using the ICN double card (4B), you MUST define TWO idstrings.
+ idstring must start with a character! There is no way for the driver
+ to distinguish between a 2B and 4B type card. Therefore, by supplying
+ TWO idstrings, you tell the driver that you have a 4B installed.
+
+ If you like to use more than one card, you can use the program
+ "icnctrl" from the utility-package to configure additional cards.
+ You need to configure shared memory only once, since the icn-driver
+ maps all cards into the same address-space.
+
+ Using the "icnctrl"-utility, portbase and shared memory can also be
+ changed during runtime.
+
+ The D-channel protocol is configured by loading different firmware
+ into the card's memory using the "icnctrl"-utility.
+
+
+Driver built as module:
+
+ The module icn.o can be configured during "insmod'ing" it by
+ appending its parameters to the insmod-commandline. The following
+ syntax is accepted:
+
+ portbase=p membase=m icn_id=idstring [icn_id2=idstring2]
+
+ where p, m, idstring1 and idstring2 have the same meanings as the
+ parameters described for the kernel-version above.
+
+ When using the ICN double card (4B), you MUST define TWO idstrings.
+ idstring must start with a character! There is no way for the driver
+ to distinguish between a 2B and 4B type card. Therefore, by supplying
+ TWO idstrings, you tell the driver that you have a 4B installed.
+
+ Using the "icnctrl"-utility, the same features apply to the modularized
+ version like to the kernel-builtin one.
+
+ The D-channel protocol is configured by loading different firmware
+ into the card's memory using the "icnctrl"-utility.
+
+Loading the firmware into the card:
+
+ The firmware is supplied together with the isdn4k-utils package. It
+ can be found in the subdirectory icnctrl/firmware/
+
+ There are 3 files:
+
+ loadpg.bin - Image of the bootstrap loader.
+ pc_1t_ca.bin - Image of firmware for german 1TR6 protocol.
+ pc_eu_ca.bin - Image if firmware for EDSS1 (Euro-ISDN) protocol.
+
+ Assuming you have installed the utility-package correctly, the firmware
+ will be downloaded into the 2B-card using the following command:
+
+ icnctrl -d Idstring load /etc/isdn/loadpg.bin /etc/isdn/pc_XX_ca.bin
+
+ where XX is either "1t" or "eu", depending on the D-Channel protocol
+ used on your S0-bus and Idstring is the Name of the card, given during
+ insmod-time or (for kernel-builtin driver) on the kernel commandline.
+
+ To load a 4B-card, the same command is used, except a second firmware
+ file is appended to the commandline of icnctrl.
+
+ -> After downloading firmware, the two LEDs at the back cover of the card
+ (ICN-4B: 4 LEDs) must be blinking intermittently now. If a connection
+ is up, the corresponding led is lit continuously.
+
+ For further documentation (adding more ICN-cards), refer to the manpage
+ icnctrl.8 which is included in the isdn4k-utils package.
+
diff --git a/drivers/staging/i4l/Documentation/README.pcbit b/drivers/staging/i4l/Documentation/README.pcbit
new file mode 100644
index 000000000000..5125002282e5
--- /dev/null
+++ b/drivers/staging/i4l/Documentation/README.pcbit
@@ -0,0 +1,40 @@
+------------------------------------------------------------------------------
+ README file for the PCBIT-D Device Driver.
+------------------------------------------------------------------------------
+
+The PCBIT is a Euro ISDN adapter manufactured in Portugal by Octal and
+developed in cooperation with Portugal Telecom and Inesc.
+The driver interfaces with the standard kernel isdn facilities
+originally developed by Fritz Elfert in the isdn4linux project.
+
+The common versions of the pcbit board require a firmware that is
+distributed (and copyrighted) by the manufacturer. To load this
+firmware you need "pcbitctl" available on the standard isdn4k-utils
+package or in the pcbit package available in:
+
+ftp://ftp.di.fc.ul.pt/pub/systems/Linux/isdn
+
+Known Limitations:
+
+- The board reset procedure is at the moment incorrect and will only
+allow you to load the firmware after a hard reset.
+
+- Only HDLC in B-channels is supported at the moment. There is no
+current support for X.25 in B or D channels nor LAPD in B
+channels. The main reason is that these two other protocol modes have,
+to my knowledge, very little use. If you want to see them implemented
+*do* send me a mail.
+
+- The driver often triggers errors in the board that I and the
+manufacturer believe to be caused by bugs in the firmware. The current
+version includes several procedures for error recovery that should
+allow normal operation. Plans for the future include cooperation with
+the manufacturer in order to solve this problem.
+
+Information/hints/help can be obtained in the linux isdn
+mailing list (isdn4linux@listserv.isdn4linux.de) or directly from me.
+
+regards,
+ Pedro.
+
+<roque@di.fc.ul.pt>
diff --git a/drivers/staging/i4l/Documentation/README.sc b/drivers/staging/i4l/Documentation/README.sc
new file mode 100644
index 000000000000..1153cd926059
--- /dev/null
+++ b/drivers/staging/i4l/Documentation/README.sc
@@ -0,0 +1,281 @@
+Welcome to Beta Release 2 of the combination ISDN driver for SpellCaster's
+ISA ISDN adapters. Please note this release 2 includes support for the
+DataCommute/BRI and TeleCommute/BRI adapters only and any other use is
+guaranteed to fail. If you have a DataCommute/PRI installed in the test
+computer, we recommend removing it as it will be detected but will not
+be usable. To see what we have done to Beta Release 2, see section 3.
+
+Speaking of guarantees, THIS IS BETA SOFTWARE and as such contains
+bugs and defects either known or unknown. Use this software at your own
+risk. There is NO SUPPORT for this software. Some help may be available
+through the web site or the mailing list but such support is totally at
+our own option and without warranty. If you choose to assume all and
+total risk by using this driver, we encourage you to join the beta
+mailing list.
+
+To join the Linux beta mailing list, send a message to:
+majordomo@spellcast.com with the words "subscribe linux-beta" as the only
+contents of the message. Do not include a signature. If you choose to
+remove yourself from this list at a later date, send another message to
+the same address with the words "unsubscribe linux-beta" as its only
+contents.
+
+TABLE OF CONTENTS
+-----------------
+ 1. Introduction
+ 1.1 What is ISDN4Linux?
+ 1.2 What is different between this driver and previous drivers?
+ 1.3 How do I setup my system with the correct software to use
+ this driver release?
+
+ 2. Basic Operations
+ 2.1 Unpacking and installing the driver
+ 2.2 Read the man pages!!!
+ 2.3 Installing the driver
+ 2.4 Removing the driver
+ 2.5 What to do if it doesn't load
+ 2.6 How to setup ISDN4Linux with the driver
+
+ 3. Beta Change Summaries and Miscellaneous Notes
+
+1. Introduction
+---------------
+
+The revision 2 Linux driver for SpellCaster ISA ISDN adapters is built
+upon ISDN4Linux available separately or as included in Linux 2.0 and later.
+The driver will support a maximum of 4 adapters in any one system of any
+type including DataCommute/BRI, DataCommute/PRI and TeleCommute/BRI for a
+maximum of 92 channels for host. The driver is supplied as a module in
+source form and needs to be complied before it can be used. It has been
+tested on Linux 2.0.20.
+
+1.1 What Is ISDN4Linux
+
+ISDN4Linux is a driver and set of tools used to access and use ISDN devices
+on a Linux platform in a common and standard way. It supports HDLC and PPP
+protocols and offers channel bundling and MLPPP support. To use ISDN4Linux
+you need to configure your kernel for ISDN support and get the ISDN4Linux
+tool kit from our web site.
+
+ISDN4Linux creates a channel pool from all of the available ISDN channels
+and therefore can function across adapters. When an ISDN4Linux compliant
+driver (such as ours) is loaded, all of the channels go into a pool and
+are used on a first-come first-served basis. In addition, individual
+channels can be specifically bound to particular interfaces.
+
+1.2 What is different between this driver and previous drivers?
+
+The revision 2 driver besides adopting the ISDN4Linux architecture has many
+subtle and not so subtle functional differences from previous releases. These
+include:
+ - More efficient shared memory management combined with a simpler
+ configuration. All adapters now use only 16Kbytes of shared RAM
+ versus between 16K and 64K. New methods for using the shared RAM
+ allow us to utilize all of the available RAM on the adapter through
+ only one 16K page.
+ - Better detection of available upper memory. The probing routines
+ have been improved to better detect available shared RAM pages and
+ used pages are now locked.
+ - Decreased loading time and a wider range of I/O ports probed.
+ We have significantly reduced the amount of time it takes to load
+ the driver and at the same time doubled the number of I/O ports
+ probed increasing the likelihood of finding an adapter.
+ - We now support all ISA adapter models with a single driver instead
+ of separate drivers for each model. The revision 2 driver supports
+ the DataCommute/BRI, DataCommute/PRI and TeleCommute/BRI in any
+ combination up to a maximum of four adapters per system.
+ - On board PPP protocol support has been removed in favour of the
+ sync-PPP support used in ISDN4Linux. This means more control of
+ the protocol parameters, faster negotiation time and a more
+ familiar interface.
+
+1.3 How do I setup my system with the correct software to use
+ this driver release?
+
+Before you can compile, install and use the SpellCaster ISA ISDN driver, you
+must ensure that the following software is installed, configured and running:
+
+ - Linux kernel 2.0.20 or later with the required init and ps
+ versions. Please see your distribution vendor for the correct
+ utility packages. The latest kernel is available from
+ ftp://sunsite.unc.edu/pub/Linux/kernel/v2.0/
+
+ - The latest modules package (modules-2.0.0.tar.gz) from
+ ftp://sunsite.unc.edu/pub/Linux/kernel/modules-2.0.0.tar.gz
+
+ - The ISDN4Linux tools available from
+ ftp://ftp.franken.de/pub/isdn4linux/v2.0/isdn4k-utils-2.0.tar.gz
+ This package may fail to compile for you so you can alternatively
+ get a pre-compiled version from
+ ftp://ftp.spellcast.com/pub/drivers/isdn4linux/isdn4k-bin-2.0.tar.gz
+
+
+2. Basic Operations
+-------------------
+
+2.1 Unpacking and installing the driver
+
+ 1. As root, create a directory in a convenient place. We suggest
+ /usr/src/spellcaster.
+
+ 2. Unpack the archive with :
+ tar xzf sc-n.nn.tar.gz -C /usr/src/spellcaster
+
+ 3. Change directory to /usr/src/spellcaster
+
+ 4. Read the README and RELNOTES files.
+
+ 5. Run 'make' and if all goes well, run 'make install'.
+
+2.2 Read the man pages!!!
+
+Make sure you read the scctrl(8) and sc(4) manual pages before continuing
+any further. Type 'man 8 scctrl' and 'man 4 sc'.
+
+2.3 Installing the driver
+
+To install the driver, type '/sbin/insmod sc' as root. sc(4) details options
+you can specify but you shouldn't need to use any unless this doesn't work.
+
+Make sure the driver loaded and detected all of the adapters by typing
+'dmesg'.
+
+The driver can be configured so that it is loaded upon startup. To do this,
+edit the file "/etc/modules/'uname -f'/'uname -v'" and insert the driver name
+"sc" into this file.
+
+2.4 Removing the driver
+
+To remove the driver, delete any interfaces that may exist (see isdnctrl(8)
+for more on this) and then type '/sbin/rmmod sc'.
+
+2.5 What to do if it doesn't load
+
+If, when you try to install the driver, you get a message mentioning
+'register_isdn' then you do not have the ISDN4Linux system installed. Please
+make sure that ISDN support is configured in the kernel.
+
+If you get a message that says 'initialization of sc failed', then the
+driver failed to detect an adapter or failed to find resources needed such
+as a free IRQ line or shared memory segment. If you are sure there are free
+resources available, use the insmod options detailed in sc(4) to override
+the probing function.
+
+Upon testing, the following problem was noted, the driver would load without
+problems, but the board would not respond beyond that point. When a check was
+done with 'cat /proc/interrupts' the interrupt count for sc was 0. In the event
+of this problem, change the BIOS settings so that the interrupts in question are
+reserved for ISA use only.
+
+
+2.6 How to setup ISDN4Linux with the driver
+
+There are three main configurations which you can use with the driver:
+
+A) Basic HDLC connection
+B) PPP connection
+C) MLPPP connection
+
+It should be mentioned here that you may also use a tty connection if you
+desire. The Documentation directory of the isdn4linux subsystem offers good
+documentation on this feature.
+
+A) 10 steps to the establishment of a basic HDLC connection
+-----------------------------------------------------------
+
+- please open the isdn-hdlc file in the examples directory and follow along...
+
+ This file is a script used to configure a BRI ISDN TA to establish a
+ basic HDLC connection between its two channels. Two network
+ interfaces are created and two routes added between the channels.
+
+ i) using the isdnctrl utility, add an interface with "addif" and
+ name it "isdn0"
+ ii) add the outgoing and inbound telephone numbers
+ iii) set the Layer 2 protocol to hdlc
+ iv) set the eaz of the interface to be the phone number of that
+ specific channel
+ v) to turn the callback features off, set the callback to "off" and
+ the callback delay (cbdelay) to 0.
+ vi) the hangup timeout can be set to a specified number of seconds
+ vii) the hangup upon incoming call can be set on or off
+ viii) use the ifconfig command to bring up the network interface with
+ a specific IP address and point to point address
+ ix) add a route to the IP address through the isdn0 interface
+ x) a ping should result in the establishment of the connection
+
+
+B) Establishment of a PPP connection
+------------------------------------
+
+- please open the isdn-ppp file in the examples directory and follow along...
+
+ This file is a script used to configure a BRI ISDN TA to establish a
+ PPP connection between the two channels. The file is almost
+ identical to the HDLC connection example except that the packet
+ encapsulation type has to be set.
+
+ use the same procedure as in the HDLC connection from steps i) to
+ iii) then, after the Layer 2 protocol is set, set the encapsulation
+ "encap" to syncppp. With this done, the rest of the steps, iv) to x)
+ can be followed from above.
+
+ Then, the ipppd (ippp daemon) must be setup:
+
+ xi) use the ipppd function found in /sbin/ipppd to set the following:
+ xii) take out (minus) VJ compression and bsd compression
+ xiii) set the mru size to 2000
+ xiv) link the two /dev interfaces to the daemon
+
+NOTE: A "*" in the inbound telephone number specifies that a call can be
+accepted on any number.
+
+C) Establishment of a MLPPP connection
+--------------------------------------
+
+- please open the isdn-mppp file in the examples directory and follow along...
+
+ This file is a script used to configure a BRI ISDN TA to accept a
+ Multi Link PPP connection.
+
+ i) using the isdnctrl utility, add an interface with "addif" and
+ name it "ippp0"
+ ii) add the inbound telephone number
+ iii) set the Layer 2 protocol to hdlc and the Layer 3 protocol to
+ trans (transparent)
+ iv) set the packet encapsulation to syncppp
+ v) set the eaz of the interface to be the phone number of that
+ specific channel
+ vi) to turn the callback features off, set the callback to "off" and
+ the callback delay (cbdelay) to 0.
+ vi) the hangup timeout can be set to a specified number of seconds
+ vii) the hangup upon incoming call can be set on or off
+ viii) add a slave interface and name it "ippp32" for example
+ ix) set the similar parameters for the ippp32 interface
+ x) use the ifconfig command to bring-up the ippp0 interface with a
+ specific IP address and point to point address
+ xi) add a route to the IP address through the ippp0 interface
+ xii) use the ipppd function found in /sbin/ipppd to set the following:
+ xiii) take out (minus) bsd compression
+ xiv) set the mru size to 2000
+ xv) add (+) the multi-link function "+mp"
+ xvi) link the two /dev interfaces to the daemon
+
+NOTE: To use the MLPPP connection to dial OUT to a MLPPP connection, change
+the inbound telephone numbers to the outgoing telephone numbers of the MLPPP
+host.
+
+
+3. Beta Change Summaries and Miscellaneous Notes
+------------------------------------------------
+When using the "scctrl" utility to upload firmware revisions on the board,
+please note that the byte count displayed at the end of the operation may be
+different from the total number of bytes in the "dcbfwn.nn.sr" file. Please
+disregard the displayed byte count.
+
+It was noted that in Beta Release 1, the module would fail to load and result
+in a segmentation fault when 'insmod'ed. This problem was created when one of
+the isdn4linux parameters, (isdn_ctrl, data field) was filled in. In some
+cases, this data field was NULL, and was left unchecked, so when it was
+referenced... segv. The bug has been fixed around line 63-68 of event.c.
+
diff --git a/drivers/staging/i4l/Kconfig b/drivers/staging/i4l/Kconfig
new file mode 100644
index 000000000000..920216e88de7
--- /dev/null
+++ b/drivers/staging/i4l/Kconfig
@@ -0,0 +1,13 @@
+#
+# Old ISDN4Linux config
+#
+menu "Old ISDN4Linux (deprecated)"
+ depends on ISDN_I4L
+
+source "drivers/staging/i4l/icn/Kconfig"
+
+source "drivers/staging/i4l/pcbit/Kconfig"
+
+source "drivers/staging/i4l/act2000/Kconfig"
+
+endmenu
diff --git a/drivers/staging/i4l/Makefile b/drivers/staging/i4l/Makefile
new file mode 100644
index 000000000000..158b87093db5
--- /dev/null
+++ b/drivers/staging/i4l/Makefile
@@ -0,0 +1,5 @@
+# Makefile for the old ISDN I4L subsystem and device drivers.
+
+obj-$(CONFIG_ISDN_DRV_ICN) += icn/
+obj-$(CONFIG_ISDN_DRV_PCBIT) += pcbit/
+obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
diff --git a/drivers/staging/i4l/TODO b/drivers/staging/i4l/TODO
new file mode 100644
index 000000000000..6fe2c08bec7a
--- /dev/null
+++ b/drivers/staging/i4l/TODO
@@ -0,0 +1,3 @@
+* The icn, pcbit and act2000 drivers are dead, remove them in 2017
+ after another longterm kernel has been released, just in the
+ unlikely case someone still has this hardware.
diff --git a/drivers/isdn/act2000/Kconfig b/drivers/staging/i4l/act2000/Kconfig
index fa2673fc69c2..fa2673fc69c2 100644
--- a/drivers/isdn/act2000/Kconfig
+++ b/drivers/staging/i4l/act2000/Kconfig
diff --git a/drivers/isdn/act2000/Makefile b/drivers/staging/i4l/act2000/Makefile
index 05e582fb5c00..05e582fb5c00 100644
--- a/drivers/isdn/act2000/Makefile
+++ b/drivers/staging/i4l/act2000/Makefile
diff --git a/drivers/isdn/act2000/act2000.h b/drivers/staging/i4l/act2000/act2000.h
index 321d437f579e..321d437f579e 100644
--- a/drivers/isdn/act2000/act2000.h
+++ b/drivers/staging/i4l/act2000/act2000.h
diff --git a/drivers/isdn/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index b5fad29a9ba6..b5fad29a9ba6 100644
--- a/drivers/isdn/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
diff --git a/drivers/isdn/act2000/act2000_isa.h b/drivers/staging/i4l/act2000/act2000_isa.h
index 1a728984ede1..1a728984ede1 100644
--- a/drivers/isdn/act2000/act2000_isa.h
+++ b/drivers/staging/i4l/act2000/act2000_isa.h
diff --git a/drivers/isdn/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 3f66ca20b5e5..3f66ca20b5e5 100644
--- a/drivers/isdn/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
diff --git a/drivers/isdn/act2000/capi.h b/drivers/staging/i4l/act2000/capi.h
index 01ccdecd43f7..01ccdecd43f7 100644
--- a/drivers/isdn/act2000/capi.h
+++ b/drivers/staging/i4l/act2000/capi.h
diff --git a/drivers/isdn/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 68073d0da0e3..68073d0da0e3 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
diff --git a/drivers/isdn/icn/Kconfig b/drivers/staging/i4l/icn/Kconfig
index 4534f21a1852..4534f21a1852 100644
--- a/drivers/isdn/icn/Kconfig
+++ b/drivers/staging/i4l/icn/Kconfig
diff --git a/drivers/isdn/icn/Makefile b/drivers/staging/i4l/icn/Makefile
index d9b476fcf384..d9b476fcf384 100644
--- a/drivers/isdn/icn/Makefile
+++ b/drivers/staging/i4l/icn/Makefile
diff --git a/drivers/isdn/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 358a574d9e8b..46d957c34be1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -718,7 +718,7 @@ icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
return 0;
if (card->sndcount[channel] > ICN_MAX_SQUEUE)
return 0;
-#warning TODO test headroom or use skb->nb to flag ACK
+ /* TODO test headroom or use skb->nb to flag ACK */
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
/* Push ACK flag as one
diff --git a/drivers/isdn/icn/icn.h b/drivers/staging/i4l/icn/icn.h
index f8f2e76d34bf..f8f2e76d34bf 100644
--- a/drivers/isdn/icn/icn.h
+++ b/drivers/staging/i4l/icn/icn.h
diff --git a/drivers/isdn/pcbit/Kconfig b/drivers/staging/i4l/pcbit/Kconfig
index e9b2dd85d410..e9b2dd85d410 100644
--- a/drivers/isdn/pcbit/Kconfig
+++ b/drivers/staging/i4l/pcbit/Kconfig
diff --git a/drivers/isdn/pcbit/Makefile b/drivers/staging/i4l/pcbit/Makefile
index 2d026c3242e8..2d026c3242e8 100644
--- a/drivers/isdn/pcbit/Makefile
+++ b/drivers/staging/i4l/pcbit/Makefile
diff --git a/drivers/isdn/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c
index efb6d6a3639a..efb6d6a3639a 100644
--- a/drivers/isdn/pcbit/callbacks.c
+++ b/drivers/staging/i4l/pcbit/callbacks.c
diff --git a/drivers/isdn/pcbit/callbacks.h b/drivers/staging/i4l/pcbit/callbacks.h
index a036b4a7ffad..a036b4a7ffad 100644
--- a/drivers/isdn/pcbit/callbacks.h
+++ b/drivers/staging/i4l/pcbit/callbacks.h
diff --git a/drivers/isdn/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 4e3cbf857d60..4e3cbf857d60 100644
--- a/drivers/isdn/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
diff --git a/drivers/isdn/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h
index 635f63476944..635f63476944 100644
--- a/drivers/isdn/pcbit/capi.h
+++ b/drivers/staging/i4l/pcbit/capi.h
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index 4172e22ae7ed..4172e22ae7ed 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
diff --git a/drivers/isdn/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index b2262ba6f0c9..b2262ba6f0c9 100644
--- a/drivers/isdn/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
diff --git a/drivers/isdn/pcbit/edss1.h b/drivers/staging/i4l/pcbit/edss1.h
index 2f6b3a8edfba..2f6b3a8edfba 100644
--- a/drivers/isdn/pcbit/edss1.h
+++ b/drivers/staging/i4l/pcbit/edss1.h
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index 46e1240ae074..46e1240ae074 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
diff --git a/drivers/isdn/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h
index be1327bc162a..be1327bc162a 100644
--- a/drivers/isdn/pcbit/layer2.h
+++ b/drivers/staging/i4l/pcbit/layer2.h
diff --git a/drivers/isdn/pcbit/module.c b/drivers/staging/i4l/pcbit/module.c
index 0a59bd0b8210..0a59bd0b8210 100644
--- a/drivers/isdn/pcbit/module.c
+++ b/drivers/staging/i4l/pcbit/module.c
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/staging/i4l/pcbit/pcbit.h
index 0a5a99440a80..0a5a99440a80 100644
--- a/drivers/isdn/pcbit/pcbit.h
+++ b/drivers/staging/i4l/pcbit/pcbit.h
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
index 17e5c9c515d4..7c7cd8456060 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio-light
@@ -1,31 +1,3 @@
-
-What: /sys/bus/iio/devices/device[n]/range
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent ADC Full Scale Range used for some ambient
- light sensors in calculating lux.
-
-What: /sys/bus/iio/devices/device[n]/range_available
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent supported vales for ADC Full Scale Range.
-
-What: /sys/bus/iio/devices/device[n]/adc_resolution
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent ADC resolution of the ambient light sensor
- used in calculating the lux.
-
-What: /sys/bus/iio/devices/device[n]/adc_resolution_available
-KernelVersion: 2.6.37
-Contact: linux-iio@vger.kernel.org
-Description:
- Hardware dependent list of possible values supported for the
- adc_resolution of the given sensor.
-
What: /sys/bus/iio/devices/device[n]/in_illuminance0[_input|_raw]
KernelVersion: 2.6.35
Contact: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 0e044cb0def8..8abc1ab3c0c7 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -12,7 +12,6 @@ source "drivers/staging/iio/frequency/Kconfig"
source "drivers/staging/iio/gyro/Kconfig"
source "drivers/staging/iio/impedance-analyzer/Kconfig"
source "drivers/staging/iio/light/Kconfig"
-source "drivers/staging/iio/magnetometer/Kconfig"
source "drivers/staging/iio/meter/Kconfig"
source "drivers/staging/iio/resolver/Kconfig"
source "drivers/staging/iio/trigger/Kconfig"
diff --git a/drivers/staging/iio/Makefile b/drivers/staging/iio/Makefile
index 3e616b4437f5..0cfd05d5bf49 100644
--- a/drivers/staging/iio/Makefile
+++ b/drivers/staging/iio/Makefile
@@ -10,7 +10,6 @@ obj-y += frequency/
obj-y += gyro/
obj-y += impedance-analyzer/
obj-y += light/
-obj-y += magnetometer/
obj-y += meter/
obj-y += resolver/
obj-y += trigger/
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index c22a0edd1528..93a896883e37 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -58,14 +58,6 @@ different requirements. This one suits mid range
frequencies (100Hz - 4kHz).
2) Lots of testing
-Periodic Timer trigger
-1) Move to a more general hardware periodic timer request
-subsystem. Current approach is abusing purpose of RTC.
-Initial discussions have taken place, but no actual code
-is in place as yet. This topic will be reopened on lkml
-shortly. I don't really envision this patch being merged
-in anything like its current form.
-
GPIO trigger
1) Add control over the type of interrupt etc. This will
necessitate a header that is also visible from arch board
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 3f24c629be6f..6bd3d4d5bc9d 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -67,7 +67,8 @@
#define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE 0x02
/* Data alignment, default is 12 bit right justified
- * - option for 16 bit left justified */
+ * - option for 16 bit left justified
+ */
#define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED 0x01
/* Interrupt related stuff */
@@ -77,7 +78,8 @@
#define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND 0x80
/* Latch interrupt request,
- * if on ack must be given by reading the ack register */
+ * if on ack must be given by reading the ack register
+ */
#define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC 0x40
/* Z Interrupt on High (above threshold) */
@@ -94,7 +96,8 @@
#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01
/* Register that gives description of what caused interrupt
- * - latched if set in CFG_ADDRES */
+ * - latched if set in CFG_ADDRES
+ */
#define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR 0x24
/* top bit ignored */
/* Interrupt Active */
@@ -123,7 +126,8 @@
#define LIS3L02DQ_REG_STATUS_X_NEW_DATA 0x01
/* The accelerometer readings - low and high bytes.
- * Form of high byte dependent on justification set in ctrl reg */
+ * Form of high byte dependent on justification set in ctrl reg
+ */
#define LIS3L02DQ_REG_OUT_X_L_ADDR 0x28
#define LIS3L02DQ_REG_OUT_X_H_ADDR 0x29
#define LIS3L02DQ_REG_OUT_Y_L_ADDR 0x2A
@@ -132,7 +136,8 @@
#define LIS3L02DQ_REG_OUT_Z_H_ADDR 0x2D
/* Threshold values for all axes and both above and below thresholds
- * - i.e. there is only one value */
+ * - i.e. there is only one value
+ */
#define LIS3L02DQ_REG_THS_L_ADDR 0x2E
#define LIS3L02DQ_REG_THS_H_ADDR 0x2F
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 7939ae6378d7..7a6fed3f2d3f 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -567,7 +567,7 @@ static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
{
u8 val;
int ret;
- u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)));
+ u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
@@ -622,7 +622,7 @@ static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
u8 val, control;
u8 currentlyset;
bool changed = false;
- u8 mask = (1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING)));
+ u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
mutex_lock(&indio_dev->mlock);
/* read current control */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 02e930c55570..a8f533af9eca 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -216,8 +216,7 @@ static int sca3000_read_ctrl_reg(struct sca3000_state *st,
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
if (ret)
goto error_ret;
- else
- return st->rx[0];
+ return st->rx[0];
error_ret:
return ret;
}
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 1920dc60cf3d..d1cb9b9cf22b 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -99,8 +99,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
if (ret)
goto error_ret;
- else
- num_available = st->rx[0];
+ num_available = st->rx[0];
/*
* num_available is the total number of samples available
* i.e. number of time points * number of channels.
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index b9519be90fda..deff89973d53 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -59,12 +59,12 @@ config AD7816
temperature sensors and ADC.
config AD7192
- tristate "Analog Devices AD7190 AD7192 AD7195 ADC driver"
+ tristate "Analog Devices AD7190 AD7192 AD7193 AD7195 ADC driver"
depends on SPI
select AD_SIGMA_DELTA
help
Say yes here to build support for Analog Devices AD7190,
- AD7192 or AD7195 SPI analog to digital converters (ADC).
+ AD7192, AD7193 or AD7195 SPI analog to digital converters (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
@@ -92,20 +92,6 @@ config LPC32XX_ADC
activate only one via device tree selection. Provides direct access
via sysfs.
-config MXS_LRADC
- tristate "Freescale i.MX23/i.MX28 LRADC"
- depends on (ARCH_MXS || COMPILE_TEST) && HAS_IOMEM
- depends on INPUT
- select STMP_DEVICE
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- help
- Say yes here to build support for i.MX23/i.MX28 LRADC convertor
- built into these chips.
-
- To compile this driver as a module, choose M here: the
- module will be called mxs-lradc.
-
config SPEAR_ADC
tristate "ST SPEAr ADC"
depends on PLAT_SPEAR || COMPILE_TEST
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 0c87ce3530f8..3cdd83ccec8e 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -12,5 +12,4 @@ obj-$(CONFIG_AD7816) += ad7816.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7280) += ad7280a.o
obj-$(CONFIG_LPC32XX_ADC) += lpc32xx_adc.o
-obj-$(CONFIG_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 92211039ffa9..f843f19cf675 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -1,7 +1,7 @@
/*
- * AD7190 AD7192 AD7195 SPI ADC driver
+ * AD7190 AD7192 AD7193 AD7195 SPI ADC driver
*
- * Copyright 2011-2012 Analog Devices Inc.
+ * Copyright 2011-2015 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
@@ -92,26 +92,43 @@
#define AD7192_CONF_CHOP BIT(23) /* CHOP enable */
#define AD7192_CONF_REFSEL BIT(20) /* REFIN1/REFIN2 Reference Select */
-#define AD7192_CONF_CHAN(x) (((1 << (x)) & 0xFF) << 8) /* Channel select */
-#define AD7192_CONF_CHAN_MASK (0xFF << 8) /* Channel select mask */
+#define AD7192_CONF_CHAN(x) ((x) << 8) /* Channel select */
+#define AD7192_CONF_CHAN_MASK (0x7FF << 8) /* Channel select mask */
#define AD7192_CONF_BURN BIT(7) /* Burnout current enable */
#define AD7192_CONF_REFDET BIT(6) /* Reference detect enable */
#define AD7192_CONF_BUF BIT(4) /* Buffered Mode Enable */
#define AD7192_CONF_UNIPOLAR BIT(3) /* Unipolar/Bipolar Enable */
#define AD7192_CONF_GAIN(x) ((x) & 0x7) /* Gain Select */
-#define AD7192_CH_AIN1P_AIN2M 0 /* AIN1(+) - AIN2(-) */
-#define AD7192_CH_AIN3P_AIN4M 1 /* AIN3(+) - AIN4(-) */
-#define AD7192_CH_TEMP 2 /* Temp Sensor */
-#define AD7192_CH_AIN2P_AIN2M 3 /* AIN2(+) - AIN2(-) */
-#define AD7192_CH_AIN1 4 /* AIN1 - AINCOM */
-#define AD7192_CH_AIN2 5 /* AIN2 - AINCOM */
-#define AD7192_CH_AIN3 6 /* AIN3 - AINCOM */
-#define AD7192_CH_AIN4 7 /* AIN4 - AINCOM */
+#define AD7192_CH_AIN1P_AIN2M BIT(0) /* AIN1(+) - AIN2(-) */
+#define AD7192_CH_AIN3P_AIN4M BIT(1) /* AIN3(+) - AIN4(-) */
+#define AD7192_CH_TEMP BIT(2) /* Temp Sensor */
+#define AD7192_CH_AIN2P_AIN2M BIT(3) /* AIN2(+) - AIN2(-) */
+#define AD7192_CH_AIN1 BIT(4) /* AIN1 - AINCOM */
+#define AD7192_CH_AIN2 BIT(5) /* AIN2 - AINCOM */
+#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
+#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
+
+#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
+#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
+#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
+#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
+#define AD7193_CH_TEMP 0x100 /* Temp senseor */
+#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
+#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
+#define AD7193_CH_AIN2 0x402 /* AIN2 - AINCOM */
+#define AD7193_CH_AIN3 0x404 /* AIN3 - AINCOM */
+#define AD7193_CH_AIN4 0x408 /* AIN4 - AINCOM */
+#define AD7193_CH_AIN5 0x410 /* AIN5 - AINCOM */
+#define AD7193_CH_AIN6 0x420 /* AIN6 - AINCOM */
+#define AD7193_CH_AIN7 0x440 /* AIN7 - AINCOM */
+#define AD7193_CH_AIN8 0x480 /* AIN7 - AINCOM */
+#define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */
/* ID Register Bit Designations (AD7192_REG_ID) */
#define ID_AD7190 0x4
#define ID_AD7192 0x0
+#define ID_AD7193 0x2
#define ID_AD7195 0x6
#define AD7192_ID_MASK 0x0F
@@ -236,7 +253,7 @@ static int ad7192_setup(struct ad7192_state *st,
st->mclk = pdata->ext_clk_hz;
else
st->mclk = AD7192_INT_FREQ_MHZ;
- break;
+ break;
default:
ret = -EINVAL;
goto out;
@@ -607,6 +624,24 @@ static const struct iio_chan_spec ad7192_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(8),
};
+static const struct iio_chan_spec ad7193_channels[] = {
+ AD_SD_DIFF_CHANNEL(0, 1, 2, AD7193_CH_AIN1P_AIN2M, 24, 32, 0),
+ AD_SD_DIFF_CHANNEL(1, 3, 4, AD7193_CH_AIN3P_AIN4M, 24, 32, 0),
+ AD_SD_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M, 24, 32, 0),
+ AD_SD_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M, 24, 32, 0),
+ AD_SD_TEMP_CHANNEL(4, AD7193_CH_TEMP, 24, 32, 0),
+ AD_SD_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M, 24, 32, 0),
+ AD_SD_CHANNEL(6, 1, AD7193_CH_AIN1, 24, 32, 0),
+ AD_SD_CHANNEL(7, 2, AD7193_CH_AIN2, 24, 32, 0),
+ AD_SD_CHANNEL(8, 3, AD7193_CH_AIN3, 24, 32, 0),
+ AD_SD_CHANNEL(9, 4, AD7193_CH_AIN4, 24, 32, 0),
+ AD_SD_CHANNEL(10, 5, AD7193_CH_AIN5, 24, 32, 0),
+ AD_SD_CHANNEL(11, 6, AD7193_CH_AIN6, 24, 32, 0),
+ AD_SD_CHANNEL(12, 7, AD7193_CH_AIN7, 24, 32, 0),
+ AD_SD_CHANNEL(13, 8, AD7193_CH_AIN8, 24, 32, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(14),
+};
+
static int ad7192_probe(struct spi_device *spi)
{
const struct ad7192_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -651,8 +686,18 @@ static int ad7192_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = ad7192_channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+
+ switch (st->devid) {
+ case ID_AD7193:
+ indio_dev->channels = ad7193_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7193_channels);
+ break;
+ default:
+ indio_dev->channels = ad7192_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7192_channels);
+ break;
+ }
+
if (st->devid == ID_AD7195)
indio_dev->info = &ad7195_info;
else
@@ -699,6 +744,7 @@ static int ad7192_remove(struct spi_device *spi)
static const struct spi_device_id ad7192_id[] = {
{"ad7190", ID_AD7190},
{"ad7192", ID_AD7192},
+ {"ad7193", ID_AD7193},
{"ad7195", ID_AD7195},
{}
};
@@ -715,5 +761,5 @@ static struct spi_driver ad7192_driver = {
module_spi_driver(ad7192_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
+MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7193, AD7195 ADC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index f45ebedb7a05..62e5ecacf634 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -214,8 +214,8 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
unsigned addr, bool all, unsigned val)
{
- unsigned reg = (devaddr << 27 | addr << 21 |
- (val & 0xFF) << 13 | all << 12);
+ unsigned reg = devaddr << 27 | addr << 21 |
+ (val & 0xFF) << 13 | all << 12;
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
st->buf[0] = cpu_to_be32(reg);
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index ec89d055cf58..cca946924c58 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -85,8 +85,6 @@ struct ad7606_bus_ops {
int (*read_block)(struct device *, int, void *);
};
-void ad7606_suspend(struct iio_dev *indio_dev);
-void ad7606_resume(struct iio_dev *indio_dev);
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address, unsigned id,
const struct ad7606_bus_ops *bops);
@@ -101,4 +99,12 @@ enum ad7606_supported_device_ids {
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7606_ring_cleanup(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops ad7606_pm_ops;
+#define AD7606_PM_OPS (&ad7606_pm_ops)
+#else
+#define AD7606_PM_OPS NULL
+#endif
+
#endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 2c9d8b7de9f5..fe6caeee0843 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -250,7 +250,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
}, \
}
-static const struct iio_chan_spec ad7606_8_channels[] = {
+static const struct iio_chan_spec ad7606_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(8),
AD7606_CHANNEL(0),
AD7606_CHANNEL(1),
AD7606_CHANNEL(2),
@@ -259,25 +260,6 @@ static const struct iio_chan_spec ad7606_8_channels[] = {
AD7606_CHANNEL(5),
AD7606_CHANNEL(6),
AD7606_CHANNEL(7),
- IIO_CHAN_SOFT_TIMESTAMP(8),
-};
-
-static const struct iio_chan_spec ad7606_6_channels[] = {
- AD7606_CHANNEL(0),
- AD7606_CHANNEL(1),
- AD7606_CHANNEL(2),
- AD7606_CHANNEL(3),
- AD7606_CHANNEL(4),
- AD7606_CHANNEL(5),
- IIO_CHAN_SOFT_TIMESTAMP(6),
-};
-
-static const struct iio_chan_spec ad7606_4_channels[] = {
- AD7606_CHANNEL(0),
- AD7606_CHANNEL(1),
- AD7606_CHANNEL(2),
- AD7606_CHANNEL(3),
- IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
@@ -287,20 +269,20 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
[ID_AD7606_8] = {
.name = "ad7606",
.int_vref_mv = 2500,
- .channels = ad7606_8_channels,
- .num_channels = 8,
+ .channels = ad7606_channels,
+ .num_channels = 9,
},
[ID_AD7606_6] = {
.name = "ad7606-6",
.int_vref_mv = 2500,
- .channels = ad7606_6_channels,
- .num_channels = 6,
+ .channels = ad7606_channels,
+ .num_channels = 7,
},
[ID_AD7606_4] = {
.name = "ad7606-4",
.int_vref_mv = 2500,
- .channels = ad7606_4_channels,
- .num_channels = 4,
+ .channels = ad7606_channels,
+ .num_channels = 5,
},
};
@@ -578,8 +560,11 @@ int ad7606_remove(struct iio_dev *indio_dev, int irq)
}
EXPORT_SYMBOL_GPL(ad7606_remove);
-void ad7606_suspend(struct iio_dev *indio_dev)
+#ifdef CONFIG_PM_SLEEP
+
+static int ad7606_suspend(struct device *dev)
{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
if (gpio_is_valid(st->pdata->gpio_stby)) {
@@ -587,11 +572,13 @@ void ad7606_suspend(struct iio_dev *indio_dev)
gpio_set_value(st->pdata->gpio_range, 1);
gpio_set_value(st->pdata->gpio_stby, 0);
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(ad7606_suspend);
-void ad7606_resume(struct iio_dev *indio_dev)
+static int ad7606_resume(struct device *dev)
{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
if (gpio_is_valid(st->pdata->gpio_stby)) {
@@ -602,8 +589,14 @@ void ad7606_resume(struct iio_dev *indio_dev)
gpio_set_value(st->pdata->gpio_stby, 1);
ad7606_reset(st);
}
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(ad7606_resume);
+
+SIMPLE_DEV_PM_OPS(ad7606_pm_ops, ad7606_suspend, ad7606_resume);
+EXPORT_SYMBOL_GPL(ad7606_pm_ops);
+
+#endif
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index adc370ee8632..84d23930fdde 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -90,36 +90,6 @@ static int ad7606_par_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ad7606_par_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_suspend(indio_dev);
-
- return 0;
-}
-
-static int ad7606_par_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_resume(indio_dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
- .suspend = ad7606_par_suspend,
- .resume = ad7606_par_resume,
-};
-
-#define AD7606_PAR_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_PAR_PM_OPS NULL
-#endif /* CONFIG_PM */
-
static const struct platform_device_id ad7606_driver_ids[] = {
{
.name = "ad7606-8",
@@ -142,7 +112,7 @@ static struct platform_driver ad7606_driver = {
.id_table = ad7606_driver_ids,
.driver = {
.name = "ad7606",
- .pm = AD7606_PAR_PM_OPS,
+ .pm = AD7606_PM_OPS,
},
};
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index cbb36317200e..d873a5164595 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -62,36 +62,6 @@ static int ad7606_spi_remove(struct spi_device *spi)
return ad7606_remove(indio_dev, spi->irq);
}
-#ifdef CONFIG_PM
-static int ad7606_spi_suspend(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_suspend(indio_dev);
-
- return 0;
-}
-
-static int ad7606_spi_resume(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
-
- ad7606_resume(indio_dev);
-
- return 0;
-}
-
-static const struct dev_pm_ops ad7606_pm_ops = {
- .suspend = ad7606_spi_suspend,
- .resume = ad7606_spi_resume,
-};
-
-#define AD7606_SPI_PM_OPS (&ad7606_pm_ops)
-
-#else
-#define AD7606_SPI_PM_OPS NULL
-#endif
-
static const struct spi_device_id ad7606_id[] = {
{"ad7606-8", ID_AD7606_8},
{"ad7606-6", ID_AD7606_6},
@@ -103,7 +73,7 @@ MODULE_DEVICE_TABLE(spi, ad7606_id);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
- .pm = AD7606_SPI_PM_OPS,
+ .pm = AD7606_PM_OPS,
},
.probe = ad7606_spi_probe,
.remove = ad7606_spi_remove,
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 22260512cf01..ac3735c7f4a9 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -296,14 +296,14 @@ static inline ssize_t ad7816_set_oti(struct device *dev,
dev_err(dev, "Invalid oti channel id %d.\n", chip->channel_id);
return -EINVAL;
} else if (chip->channel_id == 0) {
- if (ret || value < AD7816_BOUND_VALUE_MIN ||
+ if (value < AD7816_BOUND_VALUE_MIN ||
value > AD7816_BOUND_VALUE_MAX)
return -EINVAL;
data = (u8)(value - AD7816_BOUND_VALUE_MIN +
AD7816_BOUND_VALUE_BASE);
} else {
- if (ret || value < AD7816_BOUND_VALUE_BASE || value > 255)
+ if (value < AD7816_BOUND_VALUE_BASE || value > 255)
return -EINVAL;
data = (u8)value;
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index 712cae0e8608..5dd61f6a57b9 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -262,6 +262,7 @@ static int spear_adc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct spear_adc_state *st;
+ struct resource *res;
struct iio_dev *indio_dev = NULL;
int ret = -ENODEV;
int irq;
@@ -280,45 +281,45 @@ static int spear_adc_probe(struct platform_device *pdev)
* (e.g. SPEAr3xx). Let's provide two register base addresses
* to support multi-arch kernels.
*/
- st->adc_base_spear6xx = of_iomap(np, 0);
- if (!st->adc_base_spear6xx) {
- dev_err(dev, "failed mapping memory\n");
- return -ENOMEM;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ st->adc_base_spear6xx = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(st->adc_base_spear6xx))
+ return PTR_ERR(st->adc_base_spear6xx);
+
st->adc_base_spear3xx =
(struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;
- st->clk = clk_get(dev, NULL);
+ st->clk = devm_clk_get(dev, NULL);
if (IS_ERR(st->clk)) {
dev_err(dev, "failed getting clock\n");
- goto errout1;
+ return PTR_ERR(st->clk);
}
ret = clk_prepare_enable(st->clk);
if (ret) {
dev_err(dev, "failed enabling clock\n");
- goto errout2;
+ return ret;
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
- goto errout3;
+ goto errout2;
}
ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
st);
if (ret < 0) {
dev_err(dev, "failed requesting interrupt\n");
- goto errout3;
+ goto errout2;
}
if (of_property_read_u32(np, "sampling-frequency",
&st->sampling_freq)) {
dev_err(dev, "sampling-frequency missing in DT\n");
ret = -EINVAL;
- goto errout3;
+ goto errout2;
}
/*
@@ -348,18 +349,14 @@ static int spear_adc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret)
- goto errout3;
+ goto errout2;
dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
return 0;
-errout3:
- clk_disable_unprepare(st->clk);
errout2:
- clk_put(st->clk);
-errout1:
- iounmap(st->adc_base_spear6xx);
+ clk_disable_unprepare(st->clk);
return ret;
}
@@ -370,8 +367,6 @@ static int spear_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
clk_disable_unprepare(st->clk);
- clk_put(st->clk);
- iounmap(st->adc_base_spear6xx);
return 0;
}
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 78fe0b557280..0ccf192b9a03 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -21,7 +21,7 @@
static int adt7316_i2c_read(void *client, u8 reg, u8 *data)
{
struct i2c_client *cl = client;
- int ret = 0;
+ int ret;
ret = i2c_smbus_write_byte(cl, reg);
if (ret < 0) {
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3adc4516918c..a10e7d8e6002 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -465,9 +465,8 @@ static ssize_t adt7316_show_all_ad_channels(struct device *dev,
return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
"2 - External Temperature or AIN1\n"
"3 - AIN2\n4 - AIN3\n5 - AIN4\n");
- else
- return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
- "2 - External Temperature\n");
+ return sprintf(buf, "0 - VDD\n1 - Internal Temperature\n"
+ "2 - External Temperature\n");
}
static IIO_DEVICE_ATTR(all_ad_channels, S_IRUGO,
@@ -637,7 +636,7 @@ static ssize_t adt7316_show_da_high_resolution(struct device *dev,
if (chip->config3 & ADT7316_DA_HIGH_RESOLUTION) {
if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
return sprintf(buf, "1 (12 bits)\n");
- else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
+ if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
return sprintf(buf, "1 (10 bits)\n");
}
@@ -919,8 +918,7 @@ static ssize_t adt7316_show_all_DAC_update_modes(struct device *dev,
"1 - auto at MSB DAC AB and CD writing\n"
"2 - auto at MSB DAC ABCD writing\n"
"3 - manual\n");
- else
- return sprintf(buf, "manual\n");
+ return sprintf(buf, "manual\n");
}
static IIO_DEVICE_ATTR(all_DAC_update_modes, S_IRUGO,
@@ -1068,9 +1066,8 @@ static ssize_t adt7316_show_DAC_internal_Vref(struct device *dev,
return sprintf(buf, "0x%x\n",
(chip->dac_config & ADT7516_DAC_IN_VREF_MASK) >>
ADT7516_DAC_IN_VREF_OFFSET);
- else
- return sprintf(buf, "%d\n",
- !!(chip->dac_config & ADT7316_DAC_IN_VREF));
+ return sprintf(buf, "%d\n",
+ !!(chip->dac_config & ADT7316_DAC_IN_VREF));
}
static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index e8d0ff2d5c9b..f6b9a10326ea 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -21,8 +21,8 @@
*/
#define AD7150_STATUS 0
-#define AD7150_STATUS_OUT1 (1 << 3)
-#define AD7150_STATUS_OUT2 (1 << 5)
+#define AD7150_STATUS_OUT1 BIT(3)
+#define AD7150_STATUS_OUT2 BIT(5)
#define AD7150_CH1_DATA_HIGH 1
#define AD7150_CH2_DATA_HIGH 3
#define AD7150_CH1_AVG_HIGH 5
@@ -36,7 +36,7 @@
#define AD7150_CH2_TIMEOUT 13
#define AD7150_CH2_SETUP 14
#define AD7150_CFG 15
-#define AD7150_CFG_FIX (1 << 7)
+#define AD7150_CFG_FIX BIT(7)
#define AD7150_PD_TIMER 16
#define AD7150_CH1_CAPDAC 17
#define AD7150_CH2_CAPDAC 18
@@ -160,8 +160,9 @@ static int ad7150_read_event_config(struct iio_dev *indio_dev,
/* lock should be held */
static int ad7150_write_event_params(struct iio_dev *indio_dev,
- unsigned int chan, enum iio_event_type type,
- enum iio_event_direction dir)
+ unsigned int chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
{
int ret;
u16 value;
@@ -209,8 +210,9 @@ static int ad7150_write_event_params(struct iio_dev *indio_dev,
}
static int ad7150_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, enum iio_event_type type,
- enum iio_event_direction dir, int state)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
{
u8 thresh_type, cfg, adaptive;
int ret;
@@ -302,11 +304,11 @@ static int ad7150_read_event_value(struct iio_dev *indio_dev,
}
static int ad7150_write_event_value(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- enum iio_event_info info,
- int val, int val2)
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
{
int ret;
struct ad7150_chip_info *chip = iio_priv(indio_dev);
@@ -365,9 +367,9 @@ static ssize_t ad7150_show_timeout(struct device *dev,
}
static ssize_t ad7150_store_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf,
+ size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7150_chip_info *chip = iio_priv(indio_dev);
@@ -580,7 +582,7 @@ static const struct iio_info ad7150_info = {
*/
static int ad7150_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
int ret;
struct ad7150_chip_info *chip;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 2c5d27784ed3..5771d4ee8ef1 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -529,8 +529,8 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
val /= 338646;
- chip->capdac[chan->channel][chan->differential] = (val > 0 ?
- AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0);
+ chip->capdac[chan->channel][chan->differential] = val > 0 ?
+ AD7746_CAPDAC_DACP(val) | AD7746_CAPDAC_DACEN : 0;
ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_CAPDACA,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 10c43dda0f5a..d1218d896725 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -558,7 +558,7 @@ out:
}
static const struct iio_info ad5933_info = {
- .read_raw = &ad5933_read_raw,
+ .read_raw = ad5933_read_raw,
.attrs = &ad5933_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -616,9 +616,9 @@ static int ad5933_ring_postdisable(struct iio_dev *indio_dev)
}
static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
- .preenable = &ad5933_ring_preenable,
- .postenable = &ad5933_ring_postenable,
- .postdisable = &ad5933_ring_postdisable,
+ .preenable = ad5933_ring_preenable,
+ .postenable = ad5933_ring_postenable,
+ .postdisable = ad5933_ring_postdisable,
};
static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index bbf7e35cbc7d..76d9f74e7dcb 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -100,7 +100,6 @@ static const struct isl29018_scale {
};
struct isl29018_chip {
- struct device *dev;
struct regmap *regmap;
struct mutex lock;
int type;
@@ -180,30 +179,31 @@ static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode)
int status;
unsigned int lsb;
unsigned int msb;
+ struct device *dev = regmap_get_device(chip->regmap);
/* Set mode */
status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
- mode << COMMMAND1_OPMODE_SHIFT);
+ mode << COMMMAND1_OPMODE_SHIFT);
if (status) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in setting operating mode err %d\n", status);
return status;
}
msleep(CONVERSION_TIME_MS);
status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb);
if (status < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading LSB DATA with err %d\n", status);
return status;
}
status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb);
if (status < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading MSB DATA with error %d\n", status);
return status;
}
- dev_vdbg(chip->dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb);
+ dev_vdbg(dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb);
return (msb << 8) | lsb;
}
@@ -241,23 +241,24 @@ static int isl29018_read_ir(struct isl29018_chip *chip, int *ir)
}
static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
- int *near_ir)
+ int *near_ir)
{
int status;
int prox_data = -1;
int ir_data = -1;
+ struct device *dev = regmap_get_device(chip->regmap);
/* Do proximity sensing with required scheme */
status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMANDII,
- COMMANDII_SCHEME_MASK,
- scheme << COMMANDII_SCHEME_SHIFT);
+ COMMANDII_SCHEME_MASK,
+ scheme << COMMANDII_SCHEME_SHIFT);
if (status) {
- dev_err(chip->dev, "Error in setting operating mode\n");
+ dev_err(dev, "Error in setting operating mode\n");
return status;
}
prox_data = isl29018_read_sensor_input(chip,
- COMMMAND1_OPMODE_PROX_ONCE);
+ COMMMAND1_OPMODE_PROX_ONCE);
if (prox_data < 0)
return prox_data;
@@ -280,7 +281,7 @@ static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
}
static ssize_t show_scale_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -297,7 +298,7 @@ static ssize_t show_scale_available(struct device *dev,
}
static ssize_t show_int_time_available(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -314,18 +315,22 @@ static ssize_t show_int_time_available(struct device *dev,
/* proximity scheme */
static ssize_t show_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
- /* return the "proximity scheme" i.e. if the chip does on chip
- infrared suppression (1 means perform on chip suppression) */
+ /*
+ * return the "proximity scheme" i.e. if the chip does on chip
+ * infrared suppression (1 means perform on chip suppression)
+ */
return sprintf(buf, "%d\n", chip->prox_scheme);
}
static ssize_t store_prox_infrared_suppression(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct isl29018_chip *chip = iio_priv(indio_dev);
@@ -338,8 +343,10 @@ static ssize_t store_prox_infrared_suppression(struct device *dev,
return -EINVAL;
}
- /* get the "proximity scheme" i.e. if the chip does on chip
- infrared suppression (1 means perform on chip suppression) */
+ /*
+ * get the "proximity scheme" i.e. if the chip does on chip
+ * infrared suppression (1 means perform on chip suppression)
+ */
mutex_lock(&chip->lock);
chip->prox_scheme = val;
mutex_unlock(&chip->lock);
@@ -413,7 +420,8 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
break;
case IIO_PROXIMITY:
ret = isl29018_read_proximity_ir(chip,
- chip->prox_scheme, val);
+ chip->prox_scheme,
+ val);
break;
default:
break;
@@ -518,10 +526,11 @@ static int isl29035_detect(struct isl29018_chip *chip)
{
int status;
unsigned int id;
+ struct device *dev = regmap_get_device(chip->regmap);
status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
if (status < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error reading ID register with error %d\n",
status);
return status;
@@ -546,6 +555,7 @@ enum {
static int isl29018_chip_init(struct isl29018_chip *chip)
{
int status;
+ struct device *dev = regmap_get_device(chip->regmap);
if (chip->type == isl29035) {
status = isl29035_detect(chip);
@@ -575,7 +585,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
*/
status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0);
if (status < 0) {
- dev_err(chip->dev, "Failed to clear isl29018 TEST reg.(%d)\n",
+ dev_err(dev, "Failed to clear isl29018 TEST reg.(%d)\n",
status);
return status;
}
@@ -586,7 +596,7 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
*/
status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0);
if (status < 0) {
- dev_err(chip->dev, "Failed to clear isl29018 CMD1 reg.(%d)\n",
+ dev_err(dev, "Failed to clear isl29018 CMD1 reg.(%d)\n",
status);
return status;
}
@@ -597,14 +607,14 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
status = isl29018_set_scale(chip, chip->scale.scale,
chip->scale.uscale);
if (status < 0) {
- dev_err(chip->dev, "Init of isl29018 fails\n");
+ dev_err(dev, "Init of isl29018 fails\n");
return status;
}
status = isl29018_set_integration_time(chip,
isl29018_int_utimes[chip->type][chip->int_time]);
if (status < 0) {
- dev_err(chip->dev, "Init of isl29018 fails\n");
+ dev_err(dev, "Init of isl29018 fails\n");
return status;
}
@@ -614,15 +624,15 @@ static int isl29018_chip_init(struct isl29018_chip *chip)
static const struct iio_info isl29018_info = {
.attrs = &isl29018_group,
.driver_module = THIS_MODULE,
- .read_raw = &isl29018_read_raw,
- .write_raw = &isl29018_write_raw,
+ .read_raw = isl29018_read_raw,
+ .write_raw = isl29018_write_raw,
};
static const struct iio_info isl29023_info = {
.attrs = &isl29023_group,
.driver_module = THIS_MODULE,
- .read_raw = &isl29018_read_raw,
- .write_raw = &isl29018_write_raw,
+ .read_raw = isl29018_read_raw,
+ .write_raw = isl29018_write_raw,
};
static bool is_volatile_reg(struct device *dev, unsigned int reg)
@@ -699,13 +709,13 @@ static const char *isl29018_match_acpi_device(struct device *dev, int *data)
if (!id)
return NULL;
- *data = (int) id->driver_data;
+ *data = (int)id->driver_data;
return dev_name(dev);
}
static int isl29018_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct isl29018_chip *chip;
struct iio_dev *indio_dev;
@@ -721,7 +731,6 @@ static int isl29018_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
if (id) {
name = id->name;
@@ -744,7 +753,7 @@ static int isl29018_probe(struct i2c_client *client,
chip_info_tbl[dev_id].regmap_cfg);
if (IS_ERR(chip->regmap)) {
err = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", err);
+ dev_err(&client->dev, "regmap initialization fails: %d\n", err);
return err;
}
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 32ae1127da33..6e2ba458c24d 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -81,7 +81,7 @@ struct isl29028_chip {
};
static int isl29028_set_proxim_sampling(struct isl29028_chip *chip,
- unsigned int sampling)
+ unsigned int sampling)
{
static unsigned int prox_period[] = {800, 400, 200, 100, 75, 50, 12, 0};
int sel;
@@ -103,7 +103,7 @@ static int isl29028_enable_proximity(struct isl29028_chip *chip, bool enable)
if (enable)
val = CONFIGURE_PROX_EN;
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_PROX_EN_MASK, val);
+ CONFIGURE_PROX_EN_MASK, val);
if (ret < 0)
return ret;
@@ -122,24 +122,27 @@ static int isl29028_set_als_scale(struct isl29028_chip *chip, int lux_scale)
}
static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
- enum als_ir_mode mode)
+ enum als_ir_mode mode)
{
int ret = 0;
switch (mode) {
case MODE_ALS:
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_ALS);
+ CONFIGURE_ALS_IR_MODE_MASK,
+ CONFIGURE_ALS_IR_MODE_ALS);
if (ret < 0)
return ret;
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_RANGE_MASK, CONFIGURE_ALS_RANGE_HIGH_LUX);
+ CONFIGURE_ALS_RANGE_MASK,
+ CONFIGURE_ALS_RANGE_HIGH_LUX);
break;
case MODE_IR:
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_IR_MODE_MASK, CONFIGURE_ALS_IR_MODE_IR);
+ CONFIGURE_ALS_IR_MODE_MASK,
+ CONFIGURE_ALS_IR_MODE_IR);
break;
case MODE_NONE:
@@ -152,7 +155,7 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
/* Enable the ALS/IR */
ret = regmap_update_bits(chip->regmap, ISL29028_REG_CONFIGURE,
- CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
+ CONFIGURE_ALS_EN_MASK, CONFIGURE_ALS_EN);
if (ret < 0)
return ret;
@@ -193,7 +196,7 @@ static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
if (ret < 0) {
dev_err(chip->dev, "Error in reading register %d, error %d\n",
- ISL29028_REG_PROX_DATA, ret);
+ ISL29028_REG_PROX_DATA, ret);
return ret;
}
*prox = data;
@@ -264,7 +267,8 @@ static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
/* Channel IO */
static int isl29028_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2, long mask)
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
int ret = -EINVAL;
@@ -323,7 +327,8 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
}
static int isl29028_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
int ret = -EINVAL;
@@ -406,8 +411,8 @@ static const struct iio_chan_spec isl29028_channels[] = {
static const struct iio_info isl29028_info = {
.attrs = &isl29108_group,
.driver_module = THIS_MODULE,
- .read_raw = &isl29028_read_raw,
- .write_raw = &isl29028_write_raw,
+ .read_raw = isl29028_read_raw,
+ .write_raw = isl29028_write_raw,
};
static int isl29028_chip_init(struct isl29028_chip *chip)
@@ -476,7 +481,7 @@ static const struct regmap_config isl29028_regmap_config = {
};
static int isl29028_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct isl29028_chip *chip;
struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 3100d960fe2c..05b4ad4e941c 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -240,8 +240,10 @@ static int taos_get_lux(struct iio_dev *indio_dev)
}
}
- /* clear status, really interrupt status (interrupts are off), but
- * we use the bit anyway - don't forget 0x80 - this is a command*/
+ /*
+ * clear status, really interrupt status (interrupts are off), but
+ * we use the bit anyway - don't forget 0x80 - this is a command
+ */
ret = i2c_smbus_write_byte(chip->client,
(TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
TSL258X_CMD_ALS_INT_CLR));
@@ -265,13 +267,14 @@ static int taos_get_lux(struct iio_dev *indio_dev)
if (!ch0) {
/* have no data, so return LAST VALUE */
- ret = chip->als_cur_info.lux = 0;
+ ret = 0;
+ chip->als_cur_info.lux = 0;
goto out_unlock;
}
/* calculate ratio */
ratio = (ch1 << 15) / ch0;
/* convert to unscaled lux using the pointer to the table */
- for (p = (struct taos_lux *) taos_device_lux;
+ for (p = (struct taos_lux *)taos_device_lux;
p->ratio != 0 && p->ratio < ratio; p++)
;
@@ -290,7 +293,8 @@ static int taos_get_lux(struct iio_dev *indio_dev)
/* note: lux is 31 bit max at this point */
if (ch1lux > ch0lux) {
dev_dbg(&chip->client->dev, "No Data - Return last value\n");
- ret = chip->als_cur_info.lux = 0;
+ ret = 0;
+ chip->als_cur_info.lux = 0;
goto out_unlock;
}
@@ -378,7 +382,7 @@ static int taos_als_calibrate(struct iio_dev *indio_dev)
dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
return lux_val;
}
- gain_trim_val = (unsigned int) (((chip->taos_settings.als_cal_target)
+ gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
* chip->taos_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
@@ -387,9 +391,9 @@ static int taos_als_calibrate(struct iio_dev *indio_dev)
gain_trim_val);
return -ENODATA;
}
- chip->taos_settings.als_gain_trim = (int) gain_trim_val;
+ chip->taos_settings.als_gain_trim = (int)gain_trim_val;
- return (int) gain_trim_val;
+ return (int)gain_trim_val;
}
/*
@@ -429,8 +433,10 @@ static int taos_chip_on(struct iio_dev *indio_dev)
chip->als_saturation = als_count * 922; /* 90% of full scale */
chip->als_time_scale = (als_time + 25) / 50;
- /* TSL258x Specific power-on / adc enable sequence
- * Power on the device 1st. */
+ /*
+ * TSL258x Specific power-on / adc enable sequence
+ * Power on the device 1st.
+ */
utmp = TSL258X_CNTL_PWR_ON;
ret = i2c_smbus_write_byte_data(chip->client,
TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
@@ -439,8 +445,10 @@ static int taos_chip_on(struct iio_dev *indio_dev)
return ret;
}
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
+ /*
+ * Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers.
+ */
for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
ret = i2c_smbus_write_byte_data(chip->client,
TSL258X_CMD_REG + i,
@@ -453,8 +461,10 @@ static int taos_chip_on(struct iio_dev *indio_dev)
}
usleep_range(3000, 3500);
- /* NOW enable the ADC
- * initialize the desired mode of operation */
+ /*
+ * NOW enable the ADC
+ * initialize the desired mode of operation
+ */
utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
ret = i2c_smbus_write_byte_data(chip->client,
TSL258X_CMD_REG | TSL258X_CNTRL,
@@ -482,7 +492,7 @@ static int taos_chip_off(struct iio_dev *indio_dev)
/* Sysfs Interface Functions */
static ssize_t taos_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -491,7 +501,8 @@ static ssize_t taos_power_state_show(struct device *dev,
}
static ssize_t taos_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
int value;
@@ -508,7 +519,7 @@ static ssize_t taos_power_state_store(struct device *dev,
}
static ssize_t taos_gain_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -533,7 +544,8 @@ static ssize_t taos_gain_show(struct device *dev,
}
static ssize_t taos_gain_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -564,13 +576,14 @@ static ssize_t taos_gain_store(struct device *dev,
}
static ssize_t taos_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", "1 8 16 111");
}
static ssize_t taos_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -579,7 +592,8 @@ static ssize_t taos_als_time_show(struct device *dev,
}
static ssize_t taos_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -600,14 +614,15 @@ static ssize_t taos_als_time_store(struct device *dev,
}
static ssize_t taos_als_time_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n",
"50 100 150 200 250 300 350 400 450 500 550 600 650");
}
static ssize_t taos_als_trim_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -616,7 +631,8 @@ static ssize_t taos_als_trim_show(struct device *dev,
}
static ssize_t taos_als_trim_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -632,7 +648,8 @@ static ssize_t taos_als_trim_store(struct device *dev,
}
static ssize_t taos_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -641,7 +658,8 @@ static ssize_t taos_als_cal_target_show(struct device *dev,
}
static ssize_t taos_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
@@ -657,7 +675,7 @@ static ssize_t taos_als_cal_target_store(struct device *dev,
}
static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
int ret;
@@ -669,7 +687,8 @@ static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t taos_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
int value;
@@ -684,7 +703,7 @@ static ssize_t taos_do_calibrate(struct device *dev,
}
static ssize_t taos_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
int i;
int offset = 0;
@@ -695,8 +714,10 @@ static ssize_t taos_luxtable_show(struct device *dev,
taos_device_lux[i].ch0,
taos_device_lux[i].ch1);
if (taos_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
offset--;
break;
}
@@ -707,11 +728,12 @@ static ssize_t taos_luxtable_show(struct device *dev,
}
static ssize_t taos_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2583_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(taos_device_lux)*3 + 1];
+ int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
@@ -809,7 +831,7 @@ static int taos_probe(struct i2c_client *clientp,
struct iio_dev *indio_dev;
if (!i2c_check_functionality(clientp->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
return -EOPNOTSUPP;
}
@@ -846,7 +868,7 @@ static int taos_probe(struct i2c_client *clientp,
if (!taos_tsl258x_device(buf)) {
dev_info(&clientp->dev,
- "i2c device found but does not match expected id in taos_probe()\n");
+ "i2c device found but does not match expected id in taos_probe()\n");
return -EINVAL;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5b1c1650a0e4..5f308bae41b9 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -296,7 +296,7 @@ static const u8 device_channel_config[] = {
static int
tsl2x7x_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
{
- int ret = 0;
+ int ret;
/* select register to write */
ret = i2c_smbus_write_byte(client, (TSL2X7X_CMD_REG | reg));
@@ -687,9 +687,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Set the gain based on tsl2x7x_settings struct */
chip->tsl2x7x_config[TSL2X7X_GAIN] =
- (chip->tsl2x7x_settings.als_gain |
+ chip->tsl2x7x_settings.als_gain |
(TSL2X7X_mA100 | TSL2X7X_DIODE1)
- | ((chip->tsl2x7x_settings.prox_gain) << 2));
+ | ((chip->tsl2x7x_settings.prox_gain) << 2);
/* set chip struct re scaling and saturation */
chip->als_saturation = als_count * 922; /* 90% of full scale */
@@ -983,7 +983,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
result.fract /= 3;
chip->tsl2x7x_settings.als_time =
- (TSL2X7X_MAX_TIMER_CNT - (u8)result.fract);
+ TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
dev_info(&chip->client->dev, "%s: als time = %d",
__func__, chip->tsl2x7x_settings.als_time);
diff --git a/drivers/staging/iio/magnetometer/Kconfig b/drivers/staging/iio/magnetometer/Kconfig
deleted file mode 100644
index dec814a7a073..000000000000
--- a/drivers/staging/iio/magnetometer/Kconfig
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Magnetometer sensors
-#
-menu "Magnetometer sensors"
-
-config SENSORS_HMC5843
- tristate
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
-
-config SENSORS_HMC5843_I2C
- tristate "Honeywell HMC5843/5883/5883L 3-Axis Magnetometer (I2C)"
- depends on I2C
- select SENSORS_HMC5843
- select REGMAP_I2C
- help
- Say Y here to add support for the Honeywell HMC5843, HMC5883 and
- HMC5883L 3-Axis Magnetometer (digital compass).
-
- This driver can also be compiled as a set of modules.
- If so, these modules will be created:
- - hmc5843_core (core functions)
- - hmc5843_i2c (support for HMC5843, HMC5883, HMC5883L and HMC5983)
-
-config SENSORS_HMC5843_SPI
- tristate "Honeywell HMC5983 3-Axis Magnetometer (SPI)"
- depends on SPI_MASTER
- select SENSORS_HMC5843
- select REGMAP_SPI
- help
- Say Y here to add support for the Honeywell HMC5983 3-Axis Magnetometer
- (digital compass).
-
- This driver can also be compiled as a set of modules.
- If so, these modules will be created:
- - hmc5843_core (core functions)
- - hmc5843_spi (support for HMC5983)
-
-
-endmenu
diff --git a/drivers/staging/iio/magnetometer/Makefile b/drivers/staging/iio/magnetometer/Makefile
deleted file mode 100644
index 33761a19a956..000000000000
--- a/drivers/staging/iio/magnetometer/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for industrial I/O Magnetometer sensors
-#
-
-obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
-obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
-obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 1e950685e12f..f4188e17d30b 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -347,7 +347,7 @@ static int ade7754_set_irq(struct device *dev, bool enable)
ret = ade7754_spi_read_reg_16(dev, ADE7754_IRQEN, &irqen);
if (ret)
- goto error_ret;
+ return ret;
if (enable)
irqen |= BIT(14); /* Enables an interrupt when a data is
@@ -356,10 +356,7 @@ static int ade7754_set_irq(struct device *dev, bool enable)
irqen &= ~BIT(14);
ret = ade7754_spi_write_reg_16(dev, ADE7754_IRQEN, irqen);
- if (ret)
- goto error_ret;
-error_ret:
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 0db23e4d1852..40f5afaa984b 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -423,7 +423,7 @@ int ade7758_set_irq(struct device *dev, bool enable)
ret = ade7758_spi_read_reg_24(dev, ADE7758_MASK, &irqen);
if (ret)
- goto error_ret;
+ return ret;
if (enable)
irqen |= BIT(16); /* Enables an interrupt when a data is
@@ -432,10 +432,7 @@ int ade7758_set_irq(struct device *dev, bool enable)
irqen &= ~BIT(16);
ret = ade7758_spi_write_reg_24(dev, ADE7758_MASK, irqen);
- if (ret)
- goto error_ret;
-error_ret:
return ret;
}
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index 07cfe28b24e2..8106f8cceeab 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -227,11 +227,6 @@ static int ade7854_i2c_probe(struct i2c_client *client,
return ade7854_probe(indio_dev, &client->dev);
}
-static int ade7854_i2c_remove(struct i2c_client *client)
-{
- return ade7854_remove(i2c_get_clientdata(client));
-}
-
static const struct i2c_device_id ade7854_id[] = {
{ "ade7854", 0 },
{ "ade7858", 0 },
@@ -246,7 +241,6 @@ static struct i2c_driver ade7854_i2c_driver = {
.name = "ade7854",
},
.probe = ade7854_i2c_probe,
- .remove = ade7854_i2c_remove,
.id_table = ade7854_id,
};
module_i2c_driver(ade7854_i2c_driver);
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index 2413052c5bfb..63e200ffd1f2 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -296,12 +296,6 @@ static int ade7854_spi_probe(struct spi_device *spi)
return ade7854_probe(indio_dev, &spi->dev);
}
-static int ade7854_spi_remove(struct spi_device *spi)
-{
- ade7854_remove(spi_get_drvdata(spi));
-
- return 0;
-}
static const struct spi_device_id ade7854_id[] = {
{ "ade7854", 0 },
{ "ade7858", 0 },
@@ -316,7 +310,6 @@ static struct spi_driver ade7854_driver = {
.name = "ade7854",
},
.probe = ade7854_spi_probe,
- .remove = ade7854_spi_remove,
.id_table = ade7854_id,
};
module_spi_driver(ade7854_driver);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index a83883596dbc..9e439af7100d 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -417,7 +417,7 @@ static int ade7854_set_irq(struct device *dev, bool enable)
ret = st->read_reg_32(dev, ADE7854_MASK0, &irqen);
if (ret)
- goto error_ret;
+ return ret;
if (enable)
irqen |= BIT(17); /* 1: interrupt enabled when all periodical
@@ -426,10 +426,7 @@ static int ade7854_set_irq(struct device *dev, bool enable)
irqen &= ~BIT(17);
ret = st->write_reg_32(dev, ADE7854_MASK0, irqen);
- if (ret)
- goto error_ret;
-error_ret:
return ret;
}
@@ -548,31 +545,15 @@ int ade7854_probe(struct iio_dev *indio_dev, struct device *dev)
indio_dev->info = &ade7854_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return ret;
/* Get the device into a sane initial state */
- ret = ade7854_initial_setup(indio_dev);
- if (ret)
- goto error_unreg_dev;
-
- return 0;
-
-error_unreg_dev:
- iio_device_unregister(indio_dev);
- return ret;
+ return ade7854_initial_setup(indio_dev);
}
EXPORT_SYMBOL(ade7854_probe);
-int ade7854_remove(struct iio_dev *indio_dev)
-{
- iio_device_unregister(indio_dev);
-
- return 0;
-}
-EXPORT_SYMBOL(ade7854_remove);
-
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Energy Meter");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 595e711d35a6..82b2d88ca942 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -31,7 +31,7 @@
/* input clock on serial interface */
#define AD2S1200_HZ 8192000
/* clock period in nano second */
-#define AD2S1200_TSCLK (1000000000/AD2S1200_HZ)
+#define AD2S1200_TSCLK (1000000000 / AD2S1200_HZ)
struct ad2s1200_state {
struct mutex lock;
@@ -42,10 +42,10 @@ struct ad2s1200_state {
};
static int ad2s1200_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val,
- int *val2,
- long m)
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
int ret = 0;
s16 vel;
@@ -113,7 +113,7 @@ static int ad2s1200_probe(struct spi_device *spi)
DRV_NAME);
if (ret) {
dev_err(&spi->dev, "request gpio pin %d failed\n",
- pins[pn]);
+ pins[pn]);
return ret;
}
}
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index d97aa2827412..6b992634f009 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -67,7 +67,7 @@
/* default input clock on serial interface */
#define AD2S1210_DEF_CLKIN 8192000
/* clock period in nano second */
-#define AD2S1210_DEF_TCK (1000000000/AD2S1210_DEF_CLKIN)
+#define AD2S1210_DEF_TCK (1000000000 / AD2S1210_DEF_CLKIN)
#define AD2S1210_DEF_EXCIT 10000
enum ad2s1210_mode {
@@ -98,6 +98,7 @@ static const int ad2s1210_mode_vals[4][2] = {
[MOD_VEL] = { 0, 1 },
[MOD_CONFIG] = { 1, 0 },
};
+
static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
struct ad2s1210_state *st)
{
@@ -123,7 +124,7 @@ static int ad2s1210_config_write(struct ad2s1210_state *st, u8 data)
/* read value from one of the registers */
static int ad2s1210_config_read(struct ad2s1210_state *st,
- unsigned char address)
+ unsigned char address)
{
struct spi_transfer xfer = {
.len = 2,
@@ -176,9 +177,9 @@ static const int ad2s1210_res_pins[4][2] = {
static inline void ad2s1210_set_resolution_pin(struct ad2s1210_state *st)
{
gpio_set_value(st->pdata->res[0],
- ad2s1210_res_pins[(st->resolution - 10)/2][0]);
+ ad2s1210_res_pins[(st->resolution - 10) / 2][0]);
gpio_set_value(st->pdata->res[1],
- ad2s1210_res_pins[(st->resolution - 10)/2][1]);
+ ad2s1210_res_pins[(st->resolution - 10) / 2][1]);
}
static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
@@ -282,8 +283,8 @@ static ssize_t ad2s1210_show_control(struct device *dev,
}
static ssize_t ad2s1210_store_control(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned char udata;
@@ -318,9 +319,9 @@ static ssize_t ad2s1210_store_control(struct device *dev,
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
dev_warn(dev, "ad2s1210: resolution settings not match\n");
- } else
+ } else {
ad2s1210_set_resolution_pin(st);
-
+ }
ret = len;
st->hysteresis = !!(data & AD2S1210_ENABLE_HYSTERESIS);
@@ -330,7 +331,8 @@ error_ret:
}
static ssize_t ad2s1210_show_resolution(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
@@ -338,8 +340,8 @@ static ssize_t ad2s1210_show_resolution(struct device *dev,
}
static ssize_t ad2s1210_store_resolution(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned char data;
@@ -379,8 +381,9 @@ static ssize_t ad2s1210_store_resolution(struct device *dev,
data = ad2s1210_read_resolution_pin(st);
if (data != st->resolution)
dev_warn(dev, "ad2s1210: resolution settings not match\n");
- } else
+ } else {
ad2s1210_set_resolution_pin(st);
+ }
ret = len;
error_ret:
mutex_unlock(&st->lock);
@@ -389,7 +392,7 @@ error_ret:
/* read the fault register since last sample */
static ssize_t ad2s1210_show_fault(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
int ret;
@@ -441,7 +444,8 @@ static ssize_t ad2s1210_show_reg(struct device *dev,
}
static ssize_t ad2s1210_store_reg(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned char data;
@@ -497,7 +501,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ANGL:
- pos = be16_to_cpup((__be16 *) st->rx);
+ pos = be16_to_cpup((__be16 *)st->rx);
if (st->hysteresis)
pos >>= 16 - st->resolution;
*val = pos;
@@ -505,7 +509,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
break;
case IIO_ANGL_VEL:
negative = st->rx[0] & 0x80;
- vel = be16_to_cpup((__be16 *) st->rx);
+ vel = be16_to_cpup((__be16 *)st->rx);
vel >>= 16 - st->resolution;
if (vel & 0x8000) {
negative = (0xffff >> st->resolution) << st->resolution;
@@ -560,7 +564,6 @@ static IIO_DEVICE_ATTR(lot_low_thrd, S_IRUGO | S_IWUSR,
ad2s1210_show_reg, ad2s1210_store_reg,
AD2S1210_REG_LOT_LOW_THRD);
-
static const struct iio_chan_spec ad2s1210_channels[] = {
{
.type = IIO_ANGL,
@@ -672,7 +675,7 @@ static int ad2s1210_probe(struct spi_device *spi)
struct ad2s1210_state *st;
int ret;
- if (spi->dev.platform_data == NULL)
+ if (!spi->dev.platform_data)
return -EINVAL;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
diff --git a/drivers/staging/iio/trigger/Kconfig b/drivers/staging/iio/trigger/Kconfig
index 710a2f3e787e..0b01d24cea51 100644
--- a/drivers/staging/iio/trigger/Kconfig
+++ b/drivers/staging/iio/trigger/Kconfig
@@ -5,16 +5,6 @@ comment "Triggers - standalone"
if IIO_TRIGGER
-config IIO_PERIODIC_RTC_TRIGGER
- tristate "Periodic RTC triggers"
- depends on RTC_CLASS
- help
- Provides support for using periodic capable real time
- clocks as IIO triggers.
-
- To compile this driver as a module, choose M here: the
- module will be called iio-trig-periodic-rtc.
-
config IIO_BFIN_TMR_TRIGGER
tristate "Blackfin TIMER trigger"
depends on BLACKFIN
diff --git a/drivers/staging/iio/trigger/Makefile b/drivers/staging/iio/trigger/Makefile
index 238481b78e72..1300a21363db 100644
--- a/drivers/staging/iio/trigger/Makefile
+++ b/drivers/staging/iio/trigger/Makefile
@@ -2,5 +2,4 @@
# Makefile for triggers not associated with iio-devices
#
-obj-$(CONFIG_IIO_PERIODIC_RTC_TRIGGER) += iio-trig-periodic-rtc.o
obj-$(CONFIG_IIO_BFIN_TMR_TRIGGER) += iio-trig-bfin-timer.o
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
deleted file mode 100644
index 00d139331261..000000000000
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/* The industrial I/O periodic RTC trigger driver
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This is a heavily rewritten version of the periodic timer system in
- * earlier version of industrialio. It supplies the same functionality
- * but via a trigger rather than a specific periodic timer system.
- */
-
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/trigger.h>
-
-static LIST_HEAD(iio_prtc_trigger_list);
-static DEFINE_MUTEX(iio_prtc_trigger_list_lock);
-
-struct iio_prtc_trigger_info {
- struct rtc_device *rtc;
- unsigned int frequency;
- struct rtc_task task;
- bool state;
-};
-
-static int iio_trig_periodic_rtc_set_state(struct iio_trigger *trig, bool state)
-{
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
- int ret;
-
- if (trig_info->frequency == 0 && state)
- return -EINVAL;
- dev_dbg(&trig_info->rtc->dev, "trigger frequency is %u\n",
- trig_info->frequency);
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state);
- if (!ret)
- trig_info->state = state;
-
- return ret;
-}
-
-static ssize_t iio_trig_periodic_read_freq(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
-
- return sprintf(buf, "%u\n", trig_info->frequency);
-}
-
-static ssize_t iio_trig_periodic_write_freq(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_trigger *trig = to_iio_trigger(dev);
- struct iio_prtc_trigger_info *trig_info = iio_trigger_get_drvdata(trig);
- unsigned int val;
- int ret;
-
- ret = kstrtouint(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- if (val > 0) {
- ret = rtc_irq_set_freq(trig_info->rtc, &trig_info->task, val);
- if (ret == 0 && trig_info->state && trig_info->frequency == 0)
- ret = rtc_irq_set_state(trig_info->rtc,
- &trig_info->task, 1);
- } else {
- ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
- }
- if (ret)
- goto error_ret;
-
- trig_info->frequency = val;
-
- return len;
-
-error_ret:
- return ret;
-}
-
-static DEVICE_ATTR(frequency, S_IRUGO | S_IWUSR,
- iio_trig_periodic_read_freq,
- iio_trig_periodic_write_freq);
-
-static struct attribute *iio_trig_prtc_attrs[] = {
- &dev_attr_frequency.attr,
- NULL,
-};
-
-static const struct attribute_group iio_trig_prtc_attr_group = {
- .attrs = iio_trig_prtc_attrs,
-};
-
-static const struct attribute_group *iio_trig_prtc_attr_groups[] = {
- &iio_trig_prtc_attr_group,
- NULL
-};
-
-static void iio_prtc_trigger_poll(void *private_data)
-{
- iio_trigger_poll(private_data);
-}
-
-static const struct iio_trigger_ops iio_prtc_trigger_ops = {
- .owner = THIS_MODULE,
- .set_trigger_state = &iio_trig_periodic_rtc_set_state,
-};
-
-static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
-{
- char **pdata = dev->dev.platform_data;
- struct iio_prtc_trigger_info *trig_info;
- struct iio_trigger *trig, *trig2;
-
- int i, ret;
-
- for (i = 0;; i++) {
- if (!pdata[i])
- break;
- trig = iio_trigger_alloc("periodic%s", pdata[i]);
- if (!trig) {
- ret = -ENOMEM;
- goto error_free_completed_registrations;
- }
- list_add(&trig->alloc_list, &iio_prtc_trigger_list);
-
- trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
- if (!trig_info) {
- ret = -ENOMEM;
- goto error_put_trigger_and_remove_from_list;
- }
- iio_trigger_set_drvdata(trig, trig_info);
- trig->ops = &iio_prtc_trigger_ops;
- /* RTC access */
- trig_info->rtc = rtc_class_open(pdata[i]);
- if (!trig_info->rtc) {
- ret = -EINVAL;
- goto error_free_trig_info;
- }
- trig_info->task.func = iio_prtc_trigger_poll;
- trig_info->task.private_data = trig;
- ret = rtc_irq_register(trig_info->rtc, &trig_info->task);
- if (ret)
- goto error_close_rtc;
- trig->dev.groups = iio_trig_prtc_attr_groups;
- ret = iio_trigger_register(trig);
- if (ret)
- goto error_unregister_rtc_irq;
- }
- return 0;
-error_unregister_rtc_irq:
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
-error_close_rtc:
- rtc_class_close(trig_info->rtc);
-error_free_trig_info:
- kfree(trig_info);
-error_put_trigger_and_remove_from_list:
- list_del(&trig->alloc_list);
- iio_trigger_put(trig);
-error_free_completed_registrations:
- list_for_each_entry_safe(trig,
- trig2,
- &iio_prtc_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
- rtc_class_close(trig_info->rtc);
- kfree(trig_info);
- iio_trigger_unregister(trig);
- }
- return ret;
-}
-
-static int iio_trig_periodic_rtc_remove(struct platform_device *dev)
-{
- struct iio_trigger *trig, *trig2;
- struct iio_prtc_trigger_info *trig_info;
-
- mutex_lock(&iio_prtc_trigger_list_lock);
- list_for_each_entry_safe(trig,
- trig2,
- &iio_prtc_trigger_list,
- alloc_list) {
- trig_info = iio_trigger_get_drvdata(trig);
- rtc_irq_unregister(trig_info->rtc, &trig_info->task);
- rtc_class_close(trig_info->rtc);
- kfree(trig_info);
- iio_trigger_unregister(trig);
- }
- mutex_unlock(&iio_prtc_trigger_list_lock);
- return 0;
-}
-
-static struct platform_driver iio_trig_periodic_rtc_driver = {
- .probe = iio_trig_periodic_rtc_probe,
- .remove = iio_trig_periodic_rtc_remove,
- .driver = {
- .name = "iio_prtc_trigger",
- },
-};
-
-module_platform_driver(iio_trig_periodic_rtc_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("Periodic realtime clock trigger for the iio subsystem");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/lustre/Kconfig b/drivers/staging/lustre/Kconfig
index a224d88bf43d..b7d81096eee9 100644
--- a/drivers/staging/lustre/Kconfig
+++ b/drivers/staging/lustre/Kconfig
@@ -1,3 +1,3 @@
-source "drivers/staging/lustre/lustre/Kconfig"
-
source "drivers/staging/lustre/lnet/Kconfig"
+
+source "drivers/staging/lustre/lustre/Kconfig"
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 0d8a91ee5ffc..40af75c4201a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -42,6 +42,8 @@
#include "curproc.h"
+#define LIBCFS_VERSION "0.7.0"
+
#define LOWEST_BIT_SET(x) ((x) & ~((x) - 1))
/*
@@ -51,8 +53,6 @@
#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
((hexnum) >> 8 & 0xf))
-#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID
-
#include <linux/list.h>
/* need both kernel and user-land acceptor */
@@ -77,7 +77,7 @@ struct cfs_psdev_ops {
int (*p_close)(unsigned long, void *);
int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void *);
+ int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
};
/*
@@ -90,7 +90,6 @@ void cfs_enter_debugger(void);
* Defined by platform
*/
int unshare_fs_struct(void);
-sigset_t cfs_get_blocked_sigs(void);
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
@@ -115,7 +114,6 @@ void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_prim.h"
#include "libcfs_time.h"
#include "libcfs_string.h"
-#include "libcfs_kernelcomm.h"
#include "libcfs_workitem.h"
#include "libcfs_hash.h"
#include "libcfs_fail.h"
@@ -156,5 +154,9 @@ struct lnet_debugfs_symlink_def {
void lustre_insert_debugfs(struct ctl_table *table,
const struct lnet_debugfs_symlink_def *symlinks);
+int lprocfs_call_handler(void *data, int write, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
+ int (*handler)(void *data, int write,
+ loff_t pos, void __user *buffer, int len));
#endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 1530b0458a61..9e62c59714b7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index a1787bb43483..98430e7108c1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -106,7 +106,7 @@ struct ptldebug_header {
#define S_LOV 0x00020000
#define S_LQUOTA 0x00040000
#define S_OSD 0x00080000
-/* unused */
+#define S_LFSCK 0x00100000
/* unused */
/* unused */
#define S_LMV 0x00800000 /* b_new_cmd */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 485ab2670918..5ca99bd6f4e9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -41,11 +41,16 @@
#ifndef __LIBCFS_IOCTL_H__
#define __LIBCFS_IOCTL_H__
-#define LIBCFS_IOCTL_VERSION 0x0001000a
+#define LIBCFS_IOCTL_VERSION 0x0001000a
+#define LIBCFS_IOCTL_VERSION2 0x0001000b
-struct libcfs_ioctl_data {
+struct libcfs_ioctl_hdr {
__u32 ioc_len;
__u32 ioc_version;
+};
+
+struct libcfs_ioctl_data {
+ struct libcfs_ioctl_hdr ioc_hdr;
__u64 ioc_nid;
__u64 ioc_u64[1];
@@ -61,20 +66,15 @@ struct libcfs_ioctl_data {
char *ioc_inlbuf2;
__u32 ioc_plen1; /* buffers in userspace */
- char *ioc_pbuf1;
+ void __user *ioc_pbuf1;
__u32 ioc_plen2; /* buffers in userspace */
- char *ioc_pbuf2;
+ void __user *ioc_pbuf2;
char ioc_bulk[0];
};
#define ioc_priority ioc_u32[0]
-struct libcfs_ioctl_hdr {
- __u32 ioc_len;
- __u32 ioc_version;
-};
-
struct libcfs_debug_ioctl_data {
struct libcfs_ioctl_hdr hdr;
unsigned int subs;
@@ -90,7 +90,7 @@ do { \
struct libcfs_ioctl_handler {
struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_data *data);
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
};
#define DECLARE_IOCTL_HANDLER(ident, func) \
@@ -102,7 +102,6 @@ struct libcfs_ioctl_handler {
/* FIXME check conflict with lustre_lib.h */
#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-/* ioctls for manipulating snapshots 30- */
#define IOC_LIBCFS_TYPE 'e'
#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
@@ -113,18 +112,16 @@ struct libcfs_ioctl_handler {
/* lnet ioctls */
#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
-#define IOC_LIBCFS_ADD_ROUTE _IOWR('e', 52, long)
-#define IOC_LIBCFS_DEL_ROUTE _IOWR('e', 53, long)
-#define IOC_LIBCFS_GET_ROUTE _IOWR('e', 54, long)
#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
-#define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long)
+/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */
#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
#define IOC_LIBCFS_PING _IOWR('e', 61, long)
-#define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long)
+/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */
#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
+#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long)
/* lnd ioctls */
#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
@@ -138,7 +135,25 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
-#define IOC_LIBCFS_MAX_NR 80
+/*
+ * DLC Specific IOCTL numbers.
+ * In order to maintain backward compatibility with any possible external
+ * tools which might be accessing the IOCTL numbers, a new group of IOCTL
+ * number have been allocated.
+ */
+#define IOCTL_CONFIG_SIZE struct lnet_ioctl_config_data
+#define IOC_LIBCFS_ADD_ROUTE _IOWR(IOC_LIBCFS_TYPE, 81, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_DEL_ROUTE _IOWR(IOC_LIBCFS_TYPE, 82, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_ROUTE _IOWR(IOC_LIBCFS_TYPE, 83, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_ADD_NET _IOWR(IOC_LIBCFS_TYPE, 84, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_DEL_NET _IOWR(IOC_LIBCFS_TYPE, 85, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_NET _IOWR(IOC_LIBCFS_TYPE, 86, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_CONFIG_RTR _IOWR(IOC_LIBCFS_TYPE, 87, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_ADD_BUF _IOWR(IOC_LIBCFS_TYPE, 88, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_BUF _IOWR(IOC_LIBCFS_TYPE, 89, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_PEER_INFO _IOWR(IOC_LIBCFS_TYPE, 90, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
+#define IOC_LIBCFS_MAX_NR 91
static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
{
@@ -149,9 +164,9 @@ static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
return len;
}
-static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
{
- if (data->ioc_len > (1<<30)) {
+ if (data->ioc_hdr.ioc_len > (1 << 30)) {
CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
return 1;
}
@@ -187,7 +202,7 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
return 1;
}
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_len) {
+ if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
CERROR("LIBCFS ioctl: packlen != ioc_len\n");
return 1;
}
@@ -207,7 +222,9 @@ static inline int libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg);
-int libcfs_ioctl_popdata(void *arg, void *buf, int size);
+int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
+ __u32 *buf_len);
+int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index a80d993b882e..13335437c69c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -88,7 +88,7 @@ do { \
} while (0)
#ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \
@@ -387,11 +387,6 @@ int cfs_percpt_atomic_summary(atomic_t **refs);
* Support for temporary event tracing with minimal Heisenberg effect.
* -------------------------------------------------------------------- */
-struct libcfs_device_userstate {
- int ldu_memhog_pages;
- struct page *ldu_memhog_root_page;
-};
-
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
static inline int cfs_size_round4(int val)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index d8d2e7dc212e..e02cde5aeca1 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -44,8 +44,6 @@
#define __LIBCFS_STRING_H__
/* libcfs_string.c */
-/* string comparison ignoring case */
-int cfs_strncasecmp(const char *s1, const char *s2, size_t n);
/* Convert a text string to a bitmask */
int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
int *oldmask, int minmask, int allmask);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index aac59008ad1a..d94b2661658a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -118,9 +118,6 @@ do { \
#define CDEBUG_STACK() (0L)
#endif /* __x86_64__ */
-/* initial pid */
-#define LUSTRE_LNET_PID 12345
-
#define __current_nesting_level() (0)
/**
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index 520209f17173..c04979ae0a38 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 0f2fd79e5ec8..837eb22749c3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -57,7 +57,7 @@
#include "../libcfs_cpu.h"
#endif
-#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
#define page_index(p) ((p)->index)
#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@@ -67,7 +67,7 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
#else
#define NUM_CACHEPAGES totalram_pages
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/api.h b/drivers/staging/lustre/include/linux/lnet/api.h
index 75285fde15e8..cb0d6b481455 100644
--- a/drivers/staging/lustre/include/linux/lnet/api.h
+++ b/drivers/staging/lustre/include/linux/lnet/api.h
@@ -48,7 +48,8 @@
/** \defgroup lnet_init_fini Initialization and cleanup
* The LNet must be properly initialized before any LNet calls can be made.
- * @{ */
+ * @{
+ */
int LNetNIInit(lnet_pid_t requested_pid);
int LNetNIFini(void);
/** @} lnet_init_fini */
@@ -71,7 +72,8 @@ int LNetNIFini(void);
* it's an entry in the portals table of a process.
*
* \see LNetMEAttach
- * @{ */
+ * @{
+ */
int LNetGetId(unsigned int index, lnet_process_id_t *id);
int LNetDist(lnet_nid_t nid, lnet_nid_t *srcnid, __u32 *order);
void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
@@ -89,7 +91,8 @@ void LNetSnprintHandle(char *str, int str_len, lnet_handle_any_t handle);
* incoming requests based on process ID or the match bits provided in the
* request. MEs can be dynamically inserted into a match list by LNetMEAttach()
* and LNetMEInsert(), and removed from its list by LNetMEUnlink().
- * @{ */
+ * @{
+ */
int LNetMEAttach(unsigned int portal,
lnet_process_id_t match_id_in,
__u64 match_bits_in,
@@ -120,7 +123,8 @@ int LNetMEUnlink(lnet_handle_me_t current_in);
* The LNet API provides two operations to create MDs: LNetMDAttach()
* and LNetMDBind(); one operation to unlink and release the resources
* associated with a MD: LNetMDUnlink().
- * @{ */
+ * @{
+ */
int LNetMDAttach(lnet_handle_me_t current_in,
lnet_md_t md_in,
lnet_unlink_t unlink_in,
@@ -154,7 +158,8 @@ int LNetMDUnlink(lnet_handle_md_t md_in);
* event from an EQ, and LNetEQWait() can be used to block a process until
* an EQ has at least one event. LNetEQPoll() can be used to test or wait
* on multiple EQs.
- * @{ */
+ * @{
+ */
int LNetEQAlloc(unsigned int count_in,
lnet_eq_handler_t handler,
lnet_handle_eq_t *handle_out);
@@ -172,7 +177,8 @@ int LNetEQPoll(lnet_handle_eq_t *eventqs_in,
*
* The LNet API provides two data movement operations: LNetPut()
* and LNetGet().
- * @{ */
+ * @{
+ */
int LNetPut(lnet_nid_t self,
lnet_handle_md_t md_in,
lnet_ack_req_t ack_req_in,
@@ -192,11 +198,12 @@ int LNetGet(lnet_nid_t self,
/** \defgroup lnet_misc Miscellaneous operations.
* Miscellaneous operations.
- * @{ */
-
+ * @{
+ */
int LNetSetLazyPortal(int portal);
int LNetClearLazyPortal(int portal);
int LNetCtl(unsigned int cmd, void *arg);
+void LNetDebugPeer(lnet_process_id_t id);
/** @} lnet_misc */
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
new file mode 100644
index 000000000000..84a19e96ea04
--- /dev/null
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -0,0 +1,122 @@
+/*
+ * LGPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library.
+ *
+ * LGPL HEADER END
+ *
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ */
+/*
+ * Author: Amir Shehata <amir.shehata@intel.com>
+ */
+
+#ifndef LNET_DLC_H
+#define LNET_DLC_H
+
+#include "../libcfs/libcfs_ioctl.h"
+#include "types.h"
+
+#define MAX_NUM_SHOW_ENTRIES 32
+#define LNET_MAX_STR_LEN 128
+#define LNET_MAX_SHOW_NUM_CPT 128
+#define LNET_UNDEFINED_HOPS ((__u32) -1)
+
+struct lnet_ioctl_net_config {
+ char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
+ __u32 ni_status;
+ __u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
+};
+
+#define LNET_TINY_BUF_IDX 0
+#define LNET_SMALL_BUF_IDX 1
+#define LNET_LARGE_BUF_IDX 2
+
+/* # different router buffer pools */
+#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1)
+
+struct lnet_ioctl_pool_cfg {
+ struct {
+ __u32 pl_npages;
+ __u32 pl_nbuffers;
+ __u32 pl_credits;
+ __u32 pl_mincredits;
+ } pl_pools[LNET_NRBPOOLS];
+ __u32 pl_routing;
+};
+
+struct lnet_ioctl_config_data {
+ struct libcfs_ioctl_hdr cfg_hdr;
+
+ __u32 cfg_net;
+ __u32 cfg_count;
+ __u64 cfg_nid;
+ __u32 cfg_ncpts;
+
+ union {
+ struct {
+ __u32 rtr_hop;
+ __u32 rtr_priority;
+ __u32 rtr_flags;
+ } cfg_route;
+ struct {
+ char net_intf[LNET_MAX_STR_LEN];
+ __s32 net_peer_timeout;
+ __s32 net_peer_tx_credits;
+ __s32 net_peer_rtr_credits;
+ __s32 net_max_tx_credits;
+ __u32 net_cksum_algo;
+ __u32 net_pad;
+ } cfg_net;
+ struct {
+ __u32 buf_enable;
+ __s32 buf_tiny;
+ __s32 buf_small;
+ __s32 buf_large;
+ } cfg_buffers;
+ } cfg_config_u;
+
+ char cfg_bulk[0];
+};
+
+struct lnet_ioctl_peer {
+ struct libcfs_ioctl_hdr pr_hdr;
+ __u32 pr_count;
+ __u32 pr_pad;
+ __u64 pr_nid;
+
+ union {
+ struct {
+ char cr_aliveness[LNET_MAX_STR_LEN];
+ __u32 cr_refcount;
+ __u32 cr_ni_peer_tx_credits;
+ __u32 cr_peer_tx_credits;
+ __u32 cr_peer_rtr_credits;
+ __u32 cr_peer_min_rtr_credits;
+ __u32 cr_peer_tx_qnob;
+ __u32 cr_ncpt;
+ } pr_peer_credits;
+ } pr_lnd_u;
+};
+
+struct lnet_ioctl_lnet_stats {
+ struct libcfs_ioctl_hdr st_hdr;
+ struct lnet_counters st_cntrs;
+};
+
+#endif /* LNET_DLC_H */
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index b67a6607bb3b..dfc0208dc3a7 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -39,6 +39,7 @@
#include "api.h"
#include "lnet.h"
#include "lib-types.h"
+#include "lib-dlc.h"
extern lnet_t the_lnet; /* THE network */
@@ -64,6 +65,19 @@ extern lnet_t the_lnet; /* THE network */
/** exclusive lock */
#define LNET_LOCK_EX CFS_PERCPT_LOCK_EX
+static inline int lnet_is_route_alive(lnet_route_t *route)
+{
+ /* gateway is down */
+ if (!route->lr_gateway->lp_alive)
+ return 0;
+ /* no NI status, assume it's alive */
+ if ((route->lr_gateway->lp_ping_feats &
+ LNET_PING_FEAT_NI_STATUS) == 0)
+ return 1;
+ /* has NI status, check # down NIs */
+ return route->lr_downis == 0;
+}
+
static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
{
return (wh->wh_interface_cookie == LNET_WIRE_HANDLE_COOKIE_NONE &&
@@ -72,25 +86,26 @@ static inline int lnet_is_wire_handle_none(lnet_handle_wire_t *wh)
static inline int lnet_md_exhausted(lnet_libmd_t *md)
{
- return (md->md_threshold == 0 ||
- ((md->md_options & LNET_MD_MAX_SIZE) != 0 &&
+ return (!md->md_threshold ||
+ ((md->md_options & LNET_MD_MAX_SIZE) &&
md->md_offset + md->md_max_size > md->md_length));
}
static inline int lnet_md_unlinkable(lnet_libmd_t *md)
{
- /* Should unlink md when its refcount is 0 and either:
+ /*
+ * Should unlink md when its refcount is 0 and either:
* - md has been flagged for deletion (by auto unlink or
* LNetM[DE]Unlink, in the latter case md may not be exhausted).
* - auto unlink is on and md is exhausted.
*/
- if (md->md_refcount != 0)
+ if (md->md_refcount)
return 0;
- if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) != 0)
+ if (md->md_flags & LNET_MD_FLAG_ZOMBIE)
return 1;
- return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0 &&
+ return ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) &&
lnet_md_exhausted(md));
}
@@ -102,8 +117,10 @@ lnet_cpt_of_cookie(__u64 cookie)
{
unsigned int cpt = (cookie >> LNET_COOKIE_TYPE_BITS) & LNET_CPT_MASK;
- /* LNET_CPT_NUMBER doesn't have to be power2, which means we can
- * get illegal cpt from it's invalid cookie */
+ /*
+ * LNET_CPT_NUMBER doesn't have to be power2, which means we can
+ * get illegal cpt from it's invalid cookie
+ */
return cpt < LNET_CPT_NUMBER ? cpt : cpt % LNET_CPT_NUMBER;
}
@@ -183,18 +200,17 @@ lnet_md_alloc(lnet_md_t *umd)
unsigned int size;
unsigned int niov;
- if ((umd->options & LNET_MD_KIOV) != 0) {
+ if (umd->options & LNET_MD_KIOV) {
niov = umd->length;
size = offsetof(lnet_libmd_t, md_iov.kiov[niov]);
} else {
- niov = ((umd->options & LNET_MD_IOVEC) != 0) ?
- umd->length : 1;
+ niov = umd->options & LNET_MD_IOVEC ? umd->length : 1;
size = offsetof(lnet_libmd_t, md_iov.iov[niov]);
}
LIBCFS_ALLOC(md, size);
- if (md != NULL) {
+ if (md) {
/* Set here in case of early free */
md->md_options = umd->options;
md->md_niov = niov;
@@ -209,7 +225,7 @@ lnet_md_free(lnet_libmd_t *md)
{
unsigned int size;
- if ((md->md_options & LNET_MD_KIOV) != 0)
+ if (md->md_options & LNET_MD_KIOV)
size = offsetof(lnet_libmd_t, md_iov.kiov[md->md_niov]);
else
size = offsetof(lnet_libmd_t, md_iov.iov[md->md_niov]);
@@ -264,7 +280,7 @@ lnet_res_lh_invalidate(lnet_libhandle_t *lh)
static inline void
lnet_eq2handle(lnet_handle_eq_t *handle, lnet_eq_t *eq)
{
- if (eq == NULL) {
+ if (!eq) {
LNetInvalidateHandle(handle);
return;
}
@@ -278,7 +294,7 @@ lnet_handle2eq(lnet_handle_eq_t *handle)
lnet_libhandle_t *lh;
lh = lnet_res_lh_lookup(&the_lnet.ln_eq_container, handle->cookie);
- if (lh == NULL)
+ if (!lh)
return NULL;
return lh_entry(lh, lnet_eq_t, eq_lh);
@@ -300,7 +316,7 @@ lnet_handle2md(lnet_handle_md_t *handle)
cpt = lnet_cpt_of_cookie(handle->cookie);
lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
handle->cookie);
- if (lh == NULL)
+ if (!lh)
return NULL;
return lh_entry(lh, lnet_libmd_t, md_lh);
@@ -319,7 +335,7 @@ lnet_wire_handle2md(lnet_handle_wire_t *wh)
cpt = lnet_cpt_of_cookie(wh->wh_object_cookie);
lh = lnet_res_lh_lookup(the_lnet.ln_md_containers[cpt],
wh->wh_object_cookie);
- if (lh == NULL)
+ if (!lh)
return NULL;
return lh_entry(lh, lnet_libmd_t, md_lh);
@@ -341,7 +357,7 @@ lnet_handle2me(lnet_handle_me_t *handle)
cpt = lnet_cpt_of_cookie(handle->cookie);
lh = lnet_res_lh_lookup(the_lnet.ln_me_containers[cpt],
handle->cookie);
- if (lh == NULL)
+ if (!lh)
return NULL;
return lh_entry(lh, lnet_me_t, me_lh);
@@ -361,14 +377,14 @@ lnet_peer_decref_locked(lnet_peer_t *lp)
{
LASSERT(lp->lp_refcount > 0);
lp->lp_refcount--;
- if (lp->lp_refcount == 0)
+ if (!lp->lp_refcount)
lnet_destroy_peer_locked(lp);
}
static inline int
lnet_isrouter(lnet_peer_t *lp)
{
- return lp->lp_rtr_refcount != 0;
+ return lp->lp_rtr_refcount ? 1 : 0;
}
static inline void
@@ -406,6 +422,8 @@ lnet_ni_decref(lnet_ni_t *ni)
}
void lnet_ni_free(lnet_ni_t *ni);
+lnet_ni_t *
+lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist);
static inline int
lnet_nid2peerhash(lnet_nid_t nid)
@@ -430,24 +448,41 @@ lnet_ni_t *lnet_nid2ni_locked(lnet_nid_t nid, int cpt);
lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
lnet_ni_t *lnet_net2ni(__u32 net);
-int lnet_init(void);
-void lnet_fini(void);
+extern int portal_rotor;
+
+int lnet_lib_init(void);
+void lnet_lib_exit(void);
int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when);
void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
unsigned long when);
-int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
+int lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway_nid,
unsigned int priority);
int lnet_check_routes(void);
int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
+int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
+ int *peer_timeout, int *peer_tx_credits,
+ int *peer_rtr_cr, int *max_tx_credits,
+ struct lnet_ioctl_net_config *net_config);
+int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
+
void lnet_router_debugfs_init(void);
void lnet_router_debugfs_fini(void);
int lnet_rtrpools_alloc(int im_a_router);
-void lnet_rtrpools_free(void);
+void lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages);
+int lnet_rtrpools_adjust(int tiny, int small, int large);
+int lnet_rtrpools_enable(void);
+void lnet_rtrpools_disable(void);
+void lnet_rtrpools_free(int keep_pools);
lnet_remotenet_t *lnet_find_net_locked(__u32 net);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
+ __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
+ __s32 credits);
+int lnet_dyn_del_ni(__u32 net);
+int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
int lnet_islocalnid(lnet_nid_t nid);
int lnet_islocalnet(__u32 net);
@@ -466,6 +501,8 @@ void lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
int lnet_send(lnet_nid_t nid, lnet_msg_t *msg, lnet_nid_t rtr_nid);
void lnet_return_tx_credits_locked(lnet_msg_t *msg);
void lnet_return_rx_credits_locked(lnet_msg_t *msg);
+void lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp);
+void lnet_drop_routed_msgs_locked(struct list_head *list, int cpt);
/* portals functions */
/* portals attributes */
@@ -522,13 +559,22 @@ void lnet_portals_destroy(void);
/* message functions */
int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr,
lnet_nid_t fromnid, void *private, int rdma_req);
+int lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg);
+int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg);
+
void lnet_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen);
+void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg,
+ int delayed, unsigned int offset,
+ unsigned int mlen, unsigned int rlen);
+
lnet_msg_t *lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *get_msg);
void lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *msg, unsigned int len);
void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int rc);
+void lnet_drop_message(lnet_ni_t *ni, int cpt, void *private,
+ unsigned int nob);
void lnet_drop_delayed_msg_list(struct list_head *head, char *reason);
void lnet_recv_delayed_msg_list(struct list_head *head);
@@ -541,6 +587,24 @@ char *lnet_msgtyp2str(int type);
void lnet_print_hdr(lnet_hdr_t *hdr);
int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold);
+/** \addtogroup lnet_fault_simulation @{ */
+
+int lnet_fault_ctl(int cmd, struct libcfs_ioctl_data *data);
+int lnet_fault_init(void);
+void lnet_fault_fini(void);
+
+bool lnet_drop_rule_match(lnet_hdr_t *hdr);
+
+int lnet_delay_rule_add(struct lnet_fault_attr *attr);
+int lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown);
+int lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
+ struct lnet_fault_stat *stat);
+void lnet_delay_rule_reset(void);
+void lnet_delay_rule_check(void);
+bool lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg);
+
+/** @} lnet_fault_simulation */
+
void lnet_counters_get(lnet_counters_t *counters);
void lnet_counters_reset(void);
@@ -660,27 +724,30 @@ void lnet_router_checker_stop(void);
void lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net);
void lnet_swap_pinginfo(lnet_ping_info_t *info);
-int lnet_ping_target_init(void);
-void lnet_ping_target_fini(void);
-int lnet_ping(lnet_process_id_t id, int timeout_ms,
- lnet_process_id_t *ids, int n_ids);
-
int lnet_parse_ip2nets(char **networksp, char *ip2nets);
int lnet_parse_routes(char *route_str, int *im_a_router);
int lnet_parse_networks(struct list_head *nilist, char *networks);
+int lnet_net_unique(__u32 net, struct list_head *nilist);
int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt);
lnet_peer_t *lnet_find_peer_locked(struct lnet_peer_table *ptable,
lnet_nid_t nid);
-void lnet_peer_tables_cleanup(void);
+void lnet_peer_tables_cleanup(lnet_ni_t *ni);
void lnet_peer_tables_destroy(void);
int lnet_peer_tables_create(void);
void lnet_debug_peer(lnet_nid_t nid);
+int lnet_get_peer_info(__u32 peer_index, __u64 *nid,
+ char alivness[LNET_MAX_STR_LEN],
+ __u32 *cpt_iter, __u32 *refcount,
+ __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+ __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credtis,
+ __u32 *peer_tx_qnob);
static inline void
lnet_peer_set_alive(lnet_peer_t *lp)
{
- lp->lp_last_alive = lp->lp_last_query = jiffies;
+ lp->lp_last_query = jiffies;
+ lp->lp_last_alive = jiffies;
if (!lp->lp_alive)
lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
}
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 3bb9468e0b9d..29c72f8c2f99 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -38,9 +38,9 @@
#include <linux/kthread.h>
#include <linux/uio.h>
#include <linux/types.h>
-#include <net/sock.h>
#include "types.h"
+#include "lnetctl.h"
/* Max payload size */
#define LNET_MAX_PAYLOAD CONFIG_LNET_MAX_PAYLOAD
@@ -85,10 +85,10 @@ typedef struct lnet_msg {
unsigned int msg_receiving:1; /* being received */
unsigned int msg_txcredit:1; /* taken an NI send credit */
unsigned int msg_peertxcredit:1; /* taken a peer send credit */
- unsigned int msg_rtrcredit:1; /* taken a global
- router credit */
+ unsigned int msg_rtrcredit:1; /* taken a global router credit */
unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
unsigned int msg_onactivelist:1; /* on the activelist */
+ unsigned int msg_rdma_get:1;
struct lnet_peer *msg_txpeer; /* peer I'm sending to */
struct lnet_peer *msg_rxpeer; /* peer I received from */
@@ -113,7 +113,7 @@ typedef struct lnet_libhandle {
} lnet_libhandle_t;
#define lh_entry(ptr, type, member) \
- ((type *)((char *)(ptr)-(char *)(&((type *)0)->member)))
+ ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
typedef struct lnet_eq {
struct list_head eq_list;
@@ -190,7 +190,8 @@ typedef struct lnet_lnd {
void (*lnd_shutdown)(struct lnet_ni *ni);
int (*lnd_ctl)(struct lnet_ni *ni, unsigned int cmd, void *arg);
- /* In data movement APIs below, payload buffers are described as a set
+ /*
+ * In data movement APIs below, payload buffers are described as a set
* of 'niov' fragments which are...
* EITHER
* in virtual memory (struct iovec *iov != NULL)
@@ -201,30 +202,36 @@ typedef struct lnet_lnd {
* fragments to start from
*/
- /* Start sending a preformatted message. 'private' is NULL for PUT and
+ /*
+ * Start sending a preformatted message. 'private' is NULL for PUT and
* GET messages; otherwise this is a response to an incoming message
* and 'private' is the 'private' passed to lnet_parse(). Return
* non-zero for immediate failure, otherwise complete later with
- * lnet_finalize() */
+ * lnet_finalize()
+ */
int (*lnd_send)(struct lnet_ni *ni, void *private, lnet_msg_t *msg);
- /* Start receiving 'mlen' bytes of payload data, skipping the following
+ /*
+ * Start receiving 'mlen' bytes of payload data, skipping the following
* 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
* lnet_parse(). Return non-zero for immediate failure, otherwise
* complete later with lnet_finalize(). This also gives back a receive
- * credit if the LND does flow control. */
+ * credit if the LND does flow control.
+ */
int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
int delayed, unsigned int niov,
struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen,
unsigned int rlen);
- /* lnet_parse() has had to delay processing of this message
+ /*
+ * lnet_parse() has had to delay processing of this message
* (e.g. waiting for a forwarding buffer or send credits). Give the
* LND a chance to free urgently needed resources. If called, return 0
* for success and do NOT give back a receive credit; that has to wait
* until lnd_recv() gets called. On failure return < 0 and
- * release resources; lnd_recv() will not be called. */
+ * release resources; lnd_recv() will not be called.
+ */
int (*lnd_eager_recv)(struct lnet_ni *ni, void *private,
lnet_msg_t *msg, void **new_privatep);
@@ -272,11 +279,14 @@ typedef struct lnet_ni {
#define LNET_PROTO_PING_MATCHBITS 0x8000000000000000LL
-/* NB: value of these features equal to LNET_PROTO_PING_VERSION_x
- * of old LNet, so there shouldn't be any compatibility issue */
+/*
+ * NB: value of these features equal to LNET_PROTO_PING_VERSION_x
+ * of old LNet, so there shouldn't be any compatibility issue
+ */
#define LNET_PING_FEAT_INVAL (0) /* no feature */
#define LNET_PING_FEAT_BASE (1 << 0) /* just a ping */
#define LNET_PING_FEAT_NI_STATUS (1 << 1) /* return NI status */
+#define LNET_PING_FEAT_RTE_DISABLED (1 << 2) /* Routing enabled */
#define LNET_PING_FEAT_MASK (LNET_PING_FEAT_BASE | \
LNET_PING_FEAT_NI_STATUS)
@@ -343,13 +353,17 @@ typedef struct lnet_peer {
struct lnet_peer_table {
int pt_version; /* /proc validity stamp */
int pt_number; /* # peers extant */
+ /* # zombies to go to deathrow (and not there yet) */
+ int pt_zombies;
struct list_head pt_deathrow; /* zombie peers */
struct list_head *pt_hash; /* NID->peer hash */
};
-/* peer aliveness is enabled only on routers for peers in a network where the
- * lnet_ni_t::ni_peertimeout has been set to a positive value */
-#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing != 0 && \
+/*
+ * peer aliveness is enabled only on routers for peers in a network where the
+ * lnet_ni_t::ni_peertimeout has been set to a positive value
+ */
+#define lnet_peer_aliveness_enabled(lp) (the_lnet.ln_routing && \
(lp)->lp_ni->ni_peertimeout > 0)
typedef struct {
@@ -359,7 +373,7 @@ typedef struct {
__u32 lr_net; /* remote network number */
int lr_seq; /* sequence for round-robin */
unsigned int lr_downis; /* number of down NIs */
- unsigned int lr_hops; /* how far I am */
+ __u32 lr_hops; /* how far I am */
unsigned int lr_priority; /* route priority */
} lnet_route_t;
@@ -384,7 +398,10 @@ typedef struct {
struct list_head rbp_msgs; /* messages blocking
for a buffer */
int rbp_npages; /* # pages in each buffer */
- int rbp_nbuffers; /* # buffers */
+ /* requested number of buffers */
+ int rbp_req_nbuffers;
+ /* # buffers actually allocated */
+ int rbp_nbuffers;
int rbp_credits; /* # free buffers /
blocked messages */
int rbp_mincredits; /* low water mark */
@@ -398,7 +415,12 @@ typedef struct {
#define LNET_PEER_HASHSIZE 503 /* prime! */
-#define LNET_NRBPOOLS 3 /* # different router buffer pools */
+#define LNET_TINY_BUF_IDX 0
+#define LNET_SMALL_BUF_IDX 1
+#define LNET_LARGE_BUF_IDX 2
+
+/* # different router buffer pools */
+#define LNET_NRBPOOLS (LNET_LARGE_BUF_IDX + 1)
enum {
/* Didn't match anything */
@@ -433,12 +455,16 @@ struct lnet_match_info {
#define LNET_MT_HASH_BITS 8
#define LNET_MT_HASH_SIZE (1 << LNET_MT_HASH_BITS)
#define LNET_MT_HASH_MASK (LNET_MT_HASH_SIZE - 1)
-/* we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
- * the last entry is reserved for MEs with ignore-bits */
+/*
+ * we allocate (LNET_MT_HASH_SIZE + 1) entries for lnet_match_table::mt_hash,
+ * the last entry is reserved for MEs with ignore-bits
+ */
#define LNET_MT_HASH_IGNORE LNET_MT_HASH_SIZE
-/* __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
+/*
+ * __u64 has 2^6 bits, so need 2^(LNET_MT_HASH_BITS - LNET_MT_BITS_U64) which
* is 4 __u64s as bit-map, and add an extra __u64 (only use one bit) for the
- * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE] */
+ * ME-list with ignore-bits, which is mtable::mt_hash[LNET_MT_HASH_IGNORE]
+ */
#define LNET_MT_BITS_U64 6 /* 2^6 bits */
#define LNET_MT_EXHAUSTED_BITS (LNET_MT_HASH_BITS - LNET_MT_BITS_U64)
#define LNET_MT_EXHAUSTED_BMAP ((1 << LNET_MT_EXHAUSTED_BITS) + 1)
@@ -448,8 +474,10 @@ struct lnet_match_table {
/* reserved for upcoming patches, CPU partition ID */
unsigned int mt_cpt;
unsigned int mt_portal; /* portal index */
- /* match table is set as "enabled" if there's non-exhausted MD
- * attached on mt_mhash, it's only valid for wildcard portal */
+ /*
+ * match table is set as "enabled" if there's non-exhausted MD
+ * attached on mt_mhash, it's only valid for wildcard portal
+ */
unsigned int mt_enabled;
/* bitmap to flag whether MEs on mt_hash are exhausted or not */
__u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
@@ -546,6 +574,8 @@ typedef struct {
struct lnet_peer_table **ln_peer_tables;
/* failure simulation */
struct list_head ln_test_peers;
+ struct list_head ln_drop_rules;
+ struct list_head ln_delay_rules;
struct list_head ln_nis; /* LND instances */
/* NIs bond on specific CPT(s) */
@@ -553,8 +583,6 @@ typedef struct {
/* dying LND instances */
struct list_head ln_nis_zombie;
lnet_ni_t *ln_loni; /* the loopback NI */
- /* NI to wait for events in */
- lnet_ni_t *ln_eq_waitni;
/* remote networks with routes to them */
struct list_head *ln_remote_nets_hash;
@@ -584,8 +612,7 @@ typedef struct {
struct mutex ln_api_mutex;
struct mutex ln_lnd_mutex;
- int ln_init; /* lnet_init()
- called? */
+ struct mutex ln_delay_mutex;
/* Have I called LNetNIInit myself? */
int ln_niinit_self;
/* LNetNIInit/LNetNIFini counter */
@@ -600,12 +627,24 @@ typedef struct {
/* registered LNDs */
struct list_head ln_lnds;
- /* space for network names */
- char *ln_network_tokens;
- int ln_network_tokens_nob;
/* test protocol compatibility flags */
int ln_testprotocompat;
+ /*
+ * 0 - load the NIs from the mod params
+ * 1 - do not load the NIs from the mod params
+ * Reverse logic to ensure that other calls to LNetNIInit
+ * need no change
+ */
+ bool ln_nis_from_mod_params;
+
+ /*
+ * waitq for router checker. As long as there are no routes in
+ * the list, the router checker will sleep on this queue. when
+ * routes are added the thread will wake up
+ */
+ wait_queue_head_t ln_rc_waitq;
+
} lnet_t;
#endif
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetctl.h b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
index bdd69b2af909..39575073b00b 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetctl.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetctl.h
@@ -10,10 +10,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
* header for lnet ioctl
*/
#ifndef _LNETCTL_H_
@@ -21,6 +17,106 @@
#include "types.h"
+/** \addtogroup lnet_fault_simulation
+ * @{
+ */
+
+enum {
+ LNET_CTL_DROP_ADD,
+ LNET_CTL_DROP_DEL,
+ LNET_CTL_DROP_RESET,
+ LNET_CTL_DROP_LIST,
+ LNET_CTL_DELAY_ADD,
+ LNET_CTL_DELAY_DEL,
+ LNET_CTL_DELAY_RESET,
+ LNET_CTL_DELAY_LIST,
+};
+
+#define LNET_ACK_BIT BIT(0)
+#define LNET_PUT_BIT BIT(1)
+#define LNET_GET_BIT BIT(2)
+#define LNET_REPLY_BIT BIT(3)
+
+/** ioctl parameter for LNet fault simulation */
+struct lnet_fault_attr {
+ /**
+ * source NID of drop rule
+ * LNET_NID_ANY is wildcard for all sources
+ * 255.255.255.255@net is wildcard for all addresses from @net
+ */
+ lnet_nid_t fa_src;
+ /** destination NID of drop rule, see \a dr_src for details */
+ lnet_nid_t fa_dst;
+ /**
+ * Portal mask to drop, -1 means all portals, for example:
+ * fa_ptl_mask = (1 << _LDLM_CB_REQUEST_PORTAL ) |
+ * (1 << LDLM_CANCEL_REQUEST_PORTAL)
+ *
+ * If it is non-zero then only PUT and GET will be filtered, otherwise
+ * there is no portal filter, all matched messages will be checked.
+ */
+ __u64 fa_ptl_mask;
+ /**
+ * message types to drop, for example:
+ * dra_type = LNET_DROP_ACK_BIT | LNET_DROP_PUT_BIT
+ *
+ * If it is non-zero then only specified message types are filtered,
+ * otherwise all message types will be checked.
+ */
+ __u32 fa_msg_mask;
+ union {
+ /** message drop simulation */
+ struct {
+ /** drop rate of this rule */
+ __u32 da_rate;
+ /**
+ * time interval of message drop, it is exclusive
+ * with da_rate
+ */
+ __u32 da_interval;
+ } drop;
+ /** message latency simulation */
+ struct {
+ __u32 la_rate;
+ /**
+ * time interval of message delay, it is exclusive
+ * with la_rate
+ */
+ __u32 la_interval;
+ /** latency to delay */
+ __u32 la_latency;
+ } delay;
+ __u64 space[8];
+ } u;
+};
+
+/** fault simluation stats */
+struct lnet_fault_stat {
+ /** total # matched messages */
+ __u64 fs_count;
+ /** # dropped LNET_MSG_PUT by this rule */
+ __u64 fs_put;
+ /** # dropped LNET_MSG_ACK by this rule */
+ __u64 fs_ack;
+ /** # dropped LNET_MSG_GET by this rule */
+ __u64 fs_get;
+ /** # dropped LNET_MSG_REPLY by this rule */
+ __u64 fs_reply;
+ union {
+ struct {
+ /** total # dropped messages */
+ __u64 ds_dropped;
+ } drop;
+ struct {
+ /** total # delayed messages */
+ __u64 ls_delayed;
+ } delay;
+ __u64 space[8];
+ } u;
+};
+
+/** @} lnet_fault_simulation */
+
#define LNET_DEV_ID 0
#define LNET_DEV_PATH "/dev/lnet"
#define LNET_DEV_MAJOR 10
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index fd1e0fd3696f..417044552d3f 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -245,20 +245,20 @@ typedef struct {
int lstio_ses_force; /* IN: force create ? */
/** IN: session features */
unsigned lstio_ses_feats;
- lst_sid_t *lstio_ses_idp; /* OUT: session id */
+ lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
int lstio_ses_nmlen; /* IN: name length */
- char *lstio_ses_namep; /* IN: session name */
+ char __user *lstio_ses_namep; /* IN: session name */
} lstio_session_new_args_t;
/* query current session */
typedef struct {
- lst_sid_t *lstio_ses_idp; /* OUT: session id */
- int *lstio_ses_keyp; /* OUT: local key */
+ lst_sid_t __user *lstio_ses_idp; /* OUT: session id */
+ int __user *lstio_ses_keyp; /* OUT: local key */
/** OUT: session features */
- unsigned *lstio_ses_featp;
- lstcon_ndlist_ent_t *lstio_ses_ndinfo; /* OUT: */
+ unsigned __user *lstio_ses_featp;
+ lstcon_ndlist_ent_t __user *lstio_ses_ndinfo; /* OUT: */
int lstio_ses_nmlen; /* IN: name length */
- char *lstio_ses_namep; /* OUT: session name */
+ char __user *lstio_ses_namep; /* OUT: session name */
} lstio_session_info_args_t;
/* delete a session */
@@ -283,26 +283,26 @@ typedef struct {
int lstio_dbg_timeout; /* IN: timeout of
debug */
int lstio_dbg_nmlen; /* IN: len of name */
- char *lstio_dbg_namep; /* IN: name of
+ char __user *lstio_dbg_namep; /* IN: name of
group|batch */
int lstio_dbg_count; /* IN: # of test nodes
to debug */
- lnet_process_id_t *lstio_dbg_idsp; /* IN: id of test
+ lnet_process_id_t __user *lstio_dbg_idsp; /* IN: id of test
nodes */
- struct list_head *lstio_dbg_resultp; /* OUT: list head of
+ struct list_head __user *lstio_dbg_resultp; /* OUT: list head of
result buffer */
} lstio_debug_args_t;
typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char __user *lstio_grp_namep; /* IN: group name */
} lstio_group_add_args_t;
typedef struct {
- int lstio_grp_key; /* IN: session key */
- int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
+ int lstio_grp_key; /* IN: session key */
+ int lstio_grp_nmlen; /* IN: name length */
+ char __user *lstio_grp_namep; /* IN: group name */
} lstio_group_del_args_t;
#define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */
@@ -315,22 +315,22 @@ typedef struct {
int lstio_grp_opc; /* IN: OPC */
int lstio_grp_args; /* IN: arguments */
int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
+ char __user *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes id */
- lnet_process_id_t *lstio_grp_idsp; /* IN: array of nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of
+ lnet_process_id_t __user *lstio_grp_idsp; /* IN: array of nodes */
+ struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
} lstio_group_update_args_t;
typedef struct {
int lstio_grp_key; /* IN: session key */
int lstio_grp_nmlen; /* IN: name length */
- char *lstio_grp_namep; /* IN: group name */
+ char __user *lstio_grp_namep; /* IN: group name */
int lstio_grp_count; /* IN: # of nodes */
/** OUT: session features */
- unsigned *lstio_grp_featp;
- lnet_process_id_t *lstio_grp_idsp; /* IN: nodes */
- struct list_head *lstio_grp_resultp; /* OUT: list head of
+ unsigned __user *lstio_grp_featp;
+ lnet_process_id_t __user *lstio_grp_idsp; /* IN: nodes */
+ struct list_head __user *lstio_grp_resultp; /* OUT: list head of
result buffer */
} lstio_group_nodes_args_t;
@@ -338,18 +338,18 @@ typedef struct {
int lstio_grp_key; /* IN: session key */
int lstio_grp_idx; /* IN: group idx */
int lstio_grp_nmlen; /* IN: name len */
- char *lstio_grp_namep; /* OUT: name */
+ char __user *lstio_grp_namep; /* OUT: name */
} lstio_group_list_args_t;
typedef struct {
int lstio_grp_key; /* IN: session key */
int lstio_grp_nmlen; /* IN: name len */
- char *lstio_grp_namep; /* IN: name */
- lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of
+ char __user *lstio_grp_namep; /* IN: name */
+ lstcon_ndlist_ent_t __user *lstio_grp_entp; /* OUT: description of
group */
- int *lstio_grp_idxp; /* IN/OUT: node index */
- int *lstio_grp_ndentp; /* IN/OUT: # of nodent */
- lstcon_node_ent_t *lstio_grp_dentsp; /* OUT: nodent array */
+ int __user *lstio_grp_idxp; /* IN/OUT: node index */
+ int __user *lstio_grp_ndentp; /* IN/OUT: # of nodent */
+ lstcon_node_ent_t __user *lstio_grp_dentsp; /* OUT: nodent array */
} lstio_group_info_args_t;
#define LST_DEFAULT_BATCH "batch" /* default batch name */
@@ -357,13 +357,13 @@ typedef struct {
typedef struct {
int lstio_bat_key; /* IN: session key */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
+ char __user *lstio_bat_namep; /* IN: batch name */
} lstio_batch_add_args_t;
typedef struct {
int lstio_bat_key; /* IN: session key */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
+ char __user *lstio_bat_namep; /* IN: batch name */
} lstio_batch_del_args_t;
typedef struct {
@@ -371,8 +371,8 @@ typedef struct {
int lstio_bat_timeout; /* IN: timeout for
the batch */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
+ char __user *lstio_bat_namep; /* IN: batch name */
+ struct list_head __user *lstio_bat_resultp; /* OUT: list head of
result buffer */
} lstio_batch_run_args_t;
@@ -381,8 +381,8 @@ typedef struct {
int lstio_bat_force; /* IN: abort unfinished
test RPC */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
+ char __user *lstio_bat_namep; /* IN: batch name */
+ struct list_head __user *lstio_bat_resultp; /* OUT: list head of
result buffer */
} lstio_batch_stop_args_t;
@@ -394,8 +394,8 @@ typedef struct {
int lstio_bat_timeout; /* IN: timeout for
waiting */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
- struct list_head *lstio_bat_resultp; /* OUT: list head of
+ char __user *lstio_bat_namep; /* IN: batch name */
+ struct list_head __user *lstio_bat_resultp; /* OUT: list head of
result buffer */
} lstio_batch_query_args_t;
@@ -403,21 +403,21 @@ typedef struct {
int lstio_bat_key; /* IN: session key */
int lstio_bat_idx; /* IN: index */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: batch name */
+ char __user *lstio_bat_namep; /* IN: batch name */
} lstio_batch_list_args_t;
typedef struct {
int lstio_bat_key; /* IN: session key */
int lstio_bat_nmlen; /* IN: name length */
- char *lstio_bat_namep; /* IN: name */
+ char __user *lstio_bat_namep; /* IN: name */
int lstio_bat_server; /* IN: query server
or not */
int lstio_bat_testidx; /* IN: test index */
- lstcon_test_batch_ent_t *lstio_bat_entp; /* OUT: batch ent */
+ lstcon_test_batch_ent_t __user *lstio_bat_entp; /* OUT: batch ent */
- int *lstio_bat_idxp; /* IN/OUT: index of node */
- int *lstio_bat_ndentp; /* IN/OUT: # of nodent */
- lstcon_node_ent_t *lstio_bat_dentsp; /* array of nodent */
+ int __user *lstio_bat_idxp; /* IN/OUT: index of node */
+ int __user *lstio_bat_ndentp; /* IN/OUT: # of nodent */
+ lstcon_node_ent_t __user *lstio_bat_dentsp; /* array of nodent */
} lstio_batch_info_args_t;
/* add stat in session */
@@ -427,10 +427,10 @@ typedef struct {
stat request */
int lstio_sta_nmlen; /* IN: group name
length */
- char *lstio_sta_namep; /* IN: group name */
+ char __user *lstio_sta_namep; /* IN: group name */
int lstio_sta_count; /* IN: # of pid */
- lnet_process_id_t *lstio_sta_idsp; /* IN: pid */
- struct list_head *lstio_sta_resultp; /* OUT: list head of
+ lnet_process_id_t __user *lstio_sta_idsp; /* IN: pid */
+ struct list_head __user *lstio_sta_resultp; /* OUT: list head of
result buffer */
} lstio_stat_args_t;
@@ -445,7 +445,7 @@ typedef enum {
typedef struct {
int lstio_tes_key; /* IN: session key */
int lstio_tes_bat_nmlen; /* IN: batch name len */
- char *lstio_tes_bat_name; /* IN: batch name */
+ char __user *lstio_tes_bat_name; /* IN: batch name */
int lstio_tes_type; /* IN: test type */
int lstio_tes_oneside; /* IN: one sided test */
int lstio_tes_loop; /* IN: loop count */
@@ -457,20 +457,20 @@ typedef struct {
destination groups */
int lstio_tes_sgrp_nmlen; /* IN: source group
name length */
- char *lstio_tes_sgrp_name; /* IN: group name */
+ char __user *lstio_tes_sgrp_name; /* IN: group name */
int lstio_tes_dgrp_nmlen; /* IN: destination group
name length */
- char *lstio_tes_dgrp_name; /* IN: group name */
+ char __user *lstio_tes_dgrp_name; /* IN: group name */
int lstio_tes_param_len; /* IN: param buffer len */
- void *lstio_tes_param; /* IN: parameter for specified
+ void __user *lstio_tes_param; /* IN: parameter for specified
test:
lstio_bulk_param_t,
lstio_ping_param_t,
... more */
- int *lstio_tes_retp; /* OUT: private returned
+ int __user *lstio_tes_retp; /* OUT: private returned
value */
- struct list_head *lstio_tes_resultp; /* OUT: list head of
+ struct list_head __user *lstio_tes_resultp;/* OUT: list head of
result buffer */
} lstio_test_args_t;
diff --git a/drivers/staging/lustre/include/linux/lnet/nidstr.h b/drivers/staging/lustre/include/linux/lnet/nidstr.h
index 4fc9ddce829d..937fcc9e4a30 100644
--- a/drivers/staging/lustre/include/linux/lnet/nidstr.h
+++ b/drivers/staging/lustre/include/linux/lnet/nidstr.h
@@ -34,8 +34,10 @@
* Lustre Network Driver types.
*/
enum {
- /* Only add to these values (i.e. don't ever change or redefine them):
- * network addresses depend on them... */
+ /*
+ * Only add to these values (i.e. don't ever change or redefine them):
+ * network addresses depend on them...
+ */
QSWLND = 1,
SOCKLND = 2,
GMLND = 3,
@@ -67,6 +69,7 @@ static inline char *libcfs_lnd2str(__u32 lnd)
return libcfs_lnd2str_r(lnd, libcfs_next_nidstring(),
LNET_NIDSTR_SIZE);
}
+
int libcfs_str2lnd(const char *str);
char *libcfs_net2str_r(__u32 net, char *buf, size_t buf_size);
static inline char *libcfs_net2str(__u32 net)
@@ -74,12 +77,14 @@ static inline char *libcfs_net2str(__u32 net)
return libcfs_net2str_r(net, libcfs_next_nidstring(),
LNET_NIDSTR_SIZE);
}
+
char *libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size);
static inline char *libcfs_nid2str(lnet_nid_t nid)
{
return libcfs_nid2str_r(nid, libcfs_next_nidstring(),
LNET_NIDSTR_SIZE);
}
+
__u32 libcfs_str2net(const char *str);
lnet_nid_t libcfs_str2nid(const char *str);
int libcfs_str2anynid(lnet_nid_t *nid, const char *str);
diff --git a/drivers/staging/lustre/include/linux/lnet/socklnd.h b/drivers/staging/lustre/include/linux/lnet/socklnd.h
index 599c9f6628fb..bc32403f4a08 100644
--- a/drivers/staging/lustre/include/linux/lnet/socklnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/socklnd.h
@@ -85,14 +85,17 @@ socklnd_init_msg(ksock_msg_t *msg, int type)
{
msg->ksm_csum = 0;
msg->ksm_type = type;
- msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
+ msg->ksm_zc_cookies[0] = 0;
+ msg->ksm_zc_cookies[1] = 0;
}
#define KSOCK_MSG_NOOP 0xC0 /* ksm_u empty */
#define KSOCK_MSG_LNET 0xC1 /* lnet msg */
-/* We need to know this number to parse hello msg from ksocklnd in
- * other LND (usocklnd, for example) */
+/*
+ * We need to know this number to parse hello msg from ksocklnd in
+ * other LND (usocklnd, for example)
+ */
#define KSOCK_PROTO_V2 2
#define KSOCK_PROTO_V3 3
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index 11630180c5e7..1c679cb72785 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -36,10 +36,14 @@
#include <linux/types.h>
/** \addtogroup lnet
- * @{ */
+ * @{
+ */
+
+#define LNET_VERSION "0.6.0"
/** \addtogroup lnet_addr
- * @{ */
+ * @{
+ */
/** Portal reserved for LNet's own use.
* \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
@@ -116,10 +120,12 @@ typedef struct {
lnet_pid_t pid;
} WIRE_ATTR lnet_process_id_packed_t;
-/* The wire handle's interface cookie only matches one network interface in
+/*
+ * The wire handle's interface cookie only matches one network interface in
* one epoch (i.e. new cookie when the interface restarts or the node
* reboots). The object cookie only matches one object on that interface
- * during that object's lifetime (i.e. no cookie re-use). */
+ * during that object's lifetime (i.e. no cookie re-use).
+ */
typedef struct {
__u64 wh_interface_cookie;
__u64 wh_object_cookie;
@@ -133,10 +139,12 @@ typedef enum {
LNET_MSG_HELLO,
} lnet_msg_type_t;
-/* The variant fields of the portals message header are aligned on an 8
+/*
+ * The variant fields of the portals message header are aligned on an 8
* byte boundary in the message header. Note that all types used in these
* wire structs MUST be fixed size and the smaller types are placed at the
- * end. */
+ * end.
+ */
typedef struct lnet_ack {
lnet_handle_wire_t dst_wmd;
__u64 match_bits;
@@ -185,7 +193,8 @@ typedef struct {
} msg;
} WIRE_ATTR lnet_hdr_t;
-/* A HELLO message contains a magic number and protocol version
+/*
+ * A HELLO message contains a magic number and protocol version
* code in the header's dest_nid, the peer's NID in the src_nid, and
* LNET_MSG_HELLO in the type field. All other common fields are zero
* (including payload_size; i.e. no payload).
@@ -208,8 +217,10 @@ typedef struct {
#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
/* Placeholder for a future "unified" protocol across all LNDs */
-/* Current LNDs that receive a request with this magic will respond with a
- * "stub" reply using their current protocol */
+/*
+ * Current LNDs that receive a request with this magic will respond with a
+ * "stub" reply using their current protocol
+ */
#define LNET_PROTO_MAGIC 0x45726963 /* ! */
#define LNET_PROTO_TCP_VERSION_MAJOR 1
@@ -258,7 +269,7 @@ typedef struct lnet_counters {
#define LNET_MAX_INTERFACES 16
-/*
+/**
* Objects maintained by the LNet are accessed through handles. Handle types
* have names of the form lnet_handle_xx_t, where xx is one of the two letter
* object type codes ('eq' for event queue, 'md' for memory descriptor, and
@@ -318,7 +329,8 @@ typedef struct {
/** @} lnet_addr */
/** \addtogroup lnet_me
- * @{ */
+ * @{
+ */
/**
* Specifies whether the match entry or memory descriptor should be unlinked
@@ -348,7 +360,8 @@ typedef enum {
/** @} lnet_me */
/** \addtogroup lnet_md
- * @{ */
+ * @{
+ */
/**
* Defines the visible parts of a memory descriptor. Values of this type
@@ -450,9 +463,11 @@ typedef struct {
lnet_handle_eq_t eq_handle;
} lnet_md_t;
-/* Max Transfer Unit (minimum supported everywhere).
+/*
+ * Max Transfer Unit (minimum supported everywhere).
* CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
- * these limits are system wide and not interface-local. */
+ * these limits are system wide and not interface-local.
+ */
#define LNET_MTU_BITS 20
#define LNET_MTU (1 << LNET_MTU_BITS)
@@ -499,14 +514,15 @@ typedef struct {
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+ * kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;
/** @} lnet_md */
/** \addtogroup lnet_eq
- * @{ */
+ * @{
+ */
/**
* Six types of events can be logged in an event queue.
@@ -640,7 +656,8 @@ typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
/** @} lnet_eq */
/** \addtogroup lnet_data
- * @{ */
+ * @{
+ */
/**
* Specify whether an acknowledgment should be sent by target when the PUT
diff --git a/drivers/staging/lustre/lnet/Kconfig b/drivers/staging/lustre/lnet/Kconfig
index 00850eeb6a8c..2b5930150cda 100644
--- a/drivers/staging/lustre/lnet/Kconfig
+++ b/drivers/staging/lustre/lnet/Kconfig
@@ -1,10 +1,16 @@
config LNET
- tristate "Lustre networking subsystem"
- depends on LUSTRE_FS
+ tristate "Lustre networking subsystem (LNet)"
+ depends on INET && m
+ help
+ The Lustre network layer, also known as LNet, is a networking abstaction
+ level API that was initially created to allow Lustre Filesystem to utilize
+ very different networks like tcp and ib verbs in a uniform way. In the
+ case of Lustre routers only the LNet layer is required. Lately other
+ projects are also looking into using LNet as their networking API as well.
config LNET_MAX_PAYLOAD
- int "Lustre lnet max transfer payload (default 2MB)"
- depends on LUSTRE_FS
+ int "Lustre lnet max transfer payload (default 1MB)"
+ depends on LNET
default "1048576"
help
This option defines the maximum size of payload in bytes that lnet
diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile
index f6f03e304d81..0a380fe88ce8 100644
--- a/drivers/staging/lustre/lnet/Makefile
+++ b/drivers/staging/lustre/lnet/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/
+obj-$(CONFIG_LNET) += libcfs/ lnet/ klnds/ selftest/
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index cb74ae731b95..0d32e6541a3f 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -42,15 +42,7 @@
#include <asm/page.h>
#include "o2iblnd.h"
-static lnd_t the_o2iblnd = {
- .lnd_type = O2IBLND,
- .lnd_startup = kiblnd_startup,
- .lnd_shutdown = kiblnd_shutdown,
- .lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
- .lnd_send = kiblnd_send,
- .lnd_recv = kiblnd_recv,
-};
+static lnd_t the_o2iblnd;
kib_data_t kiblnd_data;
@@ -63,7 +55,7 @@ static __u32 kiblnd_cksum(void *ptr, int nob)
sum = ((sum << 1) | (sum >> 31)) + *c++;
/* ensure I don't return 0 (== no checksum) */
- return (sum == 0) ? 1 : sum;
+ return !sum ? 1 : sum;
}
static char *kiblnd_msgtype2str(int type)
@@ -145,7 +137,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
int i;
LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
- msg->ibm_type == IBLND_MSG_PUT_ACK);
+ msg->ibm_type == IBLND_MSG_PUT_ACK);
rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
&msg->ibm_u.get.ibgm_rd :
@@ -189,8 +181,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
{
kib_net_t *net = ni->ni_data;
- /* CAVEAT EMPTOR! all message fields not set here should have been
- * initialised previously. */
+ /*
+ * CAVEAT EMPTOR! all message fields not set here should have been
+ * initialised previously.
+ */
msg->ibm_magic = IBLND_MSG_MAGIC;
msg->ibm_version = version;
/* ibm_type */
@@ -249,11 +243,13 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
return -EPROTO;
}
- /* checksum must be computed with ibm_cksum zero and BEFORE anything
- * gets flipped */
+ /*
+ * checksum must be computed with ibm_cksum zero and BEFORE anything
+ * gets flipped
+ */
msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
msg->ibm_cksum = 0;
- if (msg_cksum != 0 &&
+ if (msg_cksum &&
msg_cksum != kiblnd_cksum(msg, msg_nob)) {
CERROR("Bad checksum\n");
return -EPROTO;
@@ -326,21 +322,21 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
int cpt = lnet_cpt_of_nid(nid);
unsigned long flags;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(nid != LNET_NID_ANY);
LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
+ if (!peer) {
CERROR("Cannot allocate peer\n");
return -ENOMEM;
}
- memset(peer, 0, sizeof(*peer)); /* zero flags etc */
-
peer->ibp_ni = ni;
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
+ peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
+ peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -350,7 +346,7 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(net->ibn_shutdown == 0);
+ LASSERT(!net->ibn_shutdown);
/* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
@@ -365,38 +361,36 @@ void kiblnd_destroy_peer(kib_peer_t *peer)
{
kib_net_t *net = peer->ibp_ni->ni_data;
- LASSERT(net != NULL);
- LASSERT(atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT(net);
+ LASSERT(!atomic_read(&peer->ibp_refcount));
LASSERT(!kiblnd_peer_active(peer));
- LASSERT(peer->ibp_connecting == 0);
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(kiblnd_peer_idle(peer));
LASSERT(list_empty(&peer->ibp_tx_queue));
LIBCFS_FREE(peer, sizeof(*peer));
- /* NB a peer's connections keep a reference on their peer until
+ /*
+ * NB a peer's connections keep a reference on their peer until
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
- * zero. */
+ * zero.
+ */
atomic_dec(&net->ibn_npeers);
}
kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
{
- /* the caller is responsible for accounting the additional reference
- * that this creates */
+ /*
+ * the caller is responsible for accounting the additional reference
+ * that this creates
+ */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
kib_peer_t *peer;
list_for_each(tmp, peer_list) {
-
peer = list_entry(tmp, kib_peer_t, ibp_list);
-
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_nid != nid)
continue;
@@ -431,13 +425,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -474,8 +464,10 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer)
}
/* NB closing peer's last conn unlinked it. */
}
- /* NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it. */
+ /*
+ * NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it.
+ */
}
static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
@@ -493,7 +485,8 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
} else {
lo = 0;
hi = kiblnd_data.kib_peer_hash_size - 1;
@@ -502,9 +495,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -516,7 +507,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
LASSERT(list_empty(&peer->ibp_conns));
list_splice_init(&peer->ibp_tx_queue,
- &zombies);
+ &zombies);
}
kiblnd_del_peer_locked(peer);
@@ -544,11 +535,8 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -558,7 +546,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
continue;
conn = list_entry(ctmp, kib_conn_t,
- ibc_list);
+ ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(
&kiblnd_data.kib_global_lock,
@@ -597,12 +585,12 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
int mtu;
/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
- if (cmid->route.path_rec == NULL)
+ if (!cmid->route.path_rec)
return;
mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
LASSERT(mtu >= 0);
- if (mtu != 0)
+ if (mtu)
cmid->route.path_rec->mtu = mtu;
}
@@ -619,13 +607,13 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
return 0;
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
- if (mask == NULL)
+ if (!mask)
return 0;
/* hash NID to CPU id in this partition... */
off = do_div(nid, cpumask_weight(mask));
for_each_cpu(i, mask) {
- if (off-- == 0)
+ if (!off--)
return i % vectors;
}
@@ -634,15 +622,17 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
}
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version)
+ int state, int version)
{
- /* CAVEAT EMPTOR:
+ /*
+ * CAVEAT EMPTOR:
* If the new conn is created successfully it takes over the caller's
* ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
* is destroyed. On failure, the caller's ref on 'peer' remains and
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid'). */
+ * its ref on 'cmid').
+ */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_net_t *net = peer->ibp_ni->ni_data;
kib_dev_t *dev;
@@ -656,7 +646,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int rc;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
dev = net->ibn_dev;
@@ -668,14 +658,14 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
- if (init_qp_attr == NULL) {
+ if (!init_qp_attr) {
CERROR("Can't allocate qp_attr for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_0;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
- if (conn == NULL) {
+ if (!conn) {
CERROR("Can't allocate connection for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_1;
@@ -686,6 +676,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
conn->ibc_peer = peer; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
+ conn->ibc_max_frags = peer->ibp_max_frags;
+ conn->ibc_queue_depth = peer->ibp_queue_depth;
INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
@@ -697,7 +689,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
sizeof(*conn->ibc_connvars));
- if (conn->ibc_connvars == NULL) {
+ if (!conn->ibc_connvars) {
CERROR("Can't allocate in-progress connection state\n");
goto failed_2;
}
@@ -731,42 +723,42 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
- if (conn->ibc_rxs == NULL) {
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
}
rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(version));
- if (rc != 0)
+ IBLND_RX_MSG_PAGES(conn));
+ if (rc)
goto failed_2;
kiblnd_map_rx_descs(conn);
- cq_attr.cqe = IBLND_CQ_ENTRIES(version);
+ cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
cq = ib_create_cq(cmid->device,
kiblnd_cq_completion, kiblnd_cq_event, conn,
&cq_attr);
if (IS_ERR(cq)) {
- CERROR("Can't create CQ: %ld, cqe: %d\n",
- PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
+ CERROR("Failed to create CQ with %d CQEs: %ld\n",
+ IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
goto failed_2;
}
conn->ibc_cq = cq;
rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (rc != 0) {
- CERROR("Can't request completion notificiation: %d\n", rc);
+ if (rc) {
+ CERROR("Can't request completion notification: %d\n", rc);
goto failed_2;
}
init_qp_attr->event_handler = kiblnd_qp_event;
init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
+ init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
init_qp_attr->cap.max_send_sge = 1;
init_qp_attr->cap.max_recv_sge = 1;
init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -777,7 +769,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
conn->ibc_sched = sched;
rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
rc, init_qp_attr->cap.max_send_wr,
init_qp_attr->cap.max_recv_wr);
@@ -787,33 +779,37 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
/* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
- conn->ibc_nrx = IBLND_RX_MSGS(version);
+ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
+ conn->ibc_nrx = IBLND_RX_MSGS(conn);
/* post receives */
- for (i = 0; i < IBLND_RX_MSGS(version); i++) {
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
rc = kiblnd_post_rx(&conn->ibc_rxs[i],
IBLND_POSTRX_NO_CREDIT);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't post rxmsg: %d\n", rc);
/* Make posted receives complete */
kiblnd_abort_receives(conn);
- /* correct # of posted buffers
- * NB locking needed now I'm racing with completion */
+ /*
+ * correct # of posted buffers
+ * NB locking needed now I'm racing with completion
+ */
spin_lock_irqsave(&sched->ibs_lock, flags);
- conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
+ conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- /* cmid will be destroyed by CM(ofed) after cm_callback
+ /*
+ * cmid will be destroyed by CM(ofed) after cm_callback
* returned, so we can't refer it anymore
- * (by kiblnd_connd()->kiblnd_destroy_conn) */
+ * (by kiblnd_connd()->kiblnd_destroy_conn)
+ */
rdma_destroy_qp(conn->ibc_cmid);
conn->ibc_cmid = NULL;
/* Drop my own and unused rxbuffer refcounts */
- while (i++ <= IBLND_RX_MSGS(version))
+ while (i++ <= IBLND_RX_MSGS(conn))
kiblnd_conn_decref(conn);
return NULL;
@@ -822,7 +818,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
/* Init successful! */
LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
- state == IBLND_CONN_PASSIVE_WAIT);
+ state == IBLND_CONN_PASSIVE_WAIT);
conn->ibc_state = state;
/* 1 more conn */
@@ -830,29 +826,29 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
return conn;
failed_2:
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, true);
failed_1:
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
failed_0:
return NULL;
}
-void kiblnd_destroy_conn(kib_conn_t *conn)
+void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
kib_peer_t *peer = conn->ibc_peer;
int rc;
LASSERT(!in_interrupt());
- LASSERT(atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT(!atomic_read(&conn->ibc_refcount));
LASSERT(list_empty(&conn->ibc_early_rxs));
LASSERT(list_empty(&conn->ibc_tx_noops));
LASSERT(list_empty(&conn->ibc_tx_queue));
LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
LASSERT(list_empty(&conn->ibc_active_txs));
- LASSERT(conn->ibc_noops_posted == 0);
- LASSERT(conn->ibc_nsends_posted == 0);
+ LASSERT(!conn->ibc_noops_posted);
+ LASSERT(!conn->ibc_nsends_posted);
switch (conn->ibc_state) {
default:
@@ -861,7 +857,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
case IBLND_CONN_DISCONNECTED:
/* connvars should have been freed already */
- LASSERT(conn->ibc_connvars == NULL);
+ LASSERT(!conn->ibc_connvars);
break;
case IBLND_CONN_INIT:
@@ -869,28 +865,27 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
}
/* conn->ibc_cmid might be destroyed by CM already */
- if (cmid != NULL && cmid->qp != NULL)
+ if (cmid && cmid->qp)
rdma_destroy_qp(cmid);
- if (conn->ibc_cq != NULL) {
+ if (conn->ibc_cq) {
rc = ib_destroy_cq(conn->ibc_cq);
- if (rc != 0)
+ if (rc)
CWARN("Error destroying CQ: %d\n", rc);
}
- if (conn->ibc_rx_pages != NULL)
+ if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs != NULL) {
+ if (conn->ibc_rxs) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn->ibc_version)
- * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
}
- if (conn->ibc_connvars != NULL)
+ if (conn->ibc_connvars)
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- if (conn->ibc_hdev != NULL)
+ if (conn->ibc_hdev)
kiblnd_hdev_decref(conn->ibc_hdev);
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
@@ -927,7 +922,7 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
}
int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation)
+ int version, __u64 incarnation)
{
kib_conn_t *conn;
struct list_head *ctmp;
@@ -967,20 +962,18 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (nid != LNET_NID_ANY)
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- else {
+ if (nid != LNET_NID_ANY) {
+ lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ } else {
lo = 0;
hi = kiblnd_data.kib_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -998,10 +991,10 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
if (nid == LNET_NID_ANY)
return 0;
- return (count == 0) ? -ENOENT : 0;
+ return !count ? -ENOENT : 0;
}
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
int rc = -EINVAL;
@@ -1027,14 +1020,14 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
- if (conn == NULL) {
+ if (!conn) {
rc = -ENOENT;
break;
}
- LASSERT(conn->ibc_cmid != NULL);
+ LASSERT(conn->ibc_cmid);
data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
+ if (!conn->ibc_cmid->route.path_rec)
data->ioc_u32[0] = 0; /* iWarp has no path MTU */
else
data->ioc_u32[0] =
@@ -1054,7 +1047,7 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
return rc;
}
-void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
+static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
@@ -1065,21 +1058,19 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
read_lock_irqsave(glock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ if (peer)
last_alive = peer->ibp_last_alive;
- }
read_unlock_irqrestore(glock, flags);
- if (last_alive != 0)
+ if (last_alive)
*when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
+ /*
+ * peer is not persistent in hash, trigger peer creation
+ * and connection establishment with a NULL tx
+ */
+ if (!peer)
kiblnd_launch_tx(ni, NULL, nid);
CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
@@ -1087,13 +1078,13 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
last_alive ? cfs_duration_sec(now - last_alive) : -1);
}
-void kiblnd_free_pages(kib_pages_t *p)
+static void kiblnd_free_pages(kib_pages_t *p)
{
int npages = p->ibp_npages;
int i;
for (i = 0; i < npages; i++) {
- if (p->ibp_pages[i] != NULL)
+ if (p->ibp_pages[i])
__free_page(p->ibp_pages[i]);
}
@@ -1107,7 +1098,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
offsetof(kib_pages_t, ibp_pages[npages]));
- if (p == NULL) {
+ if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
@@ -1119,7 +1110,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
p->ibp_pages[i] = alloc_pages_node(
cfs_cpt_spread_node(lnet_cpt_table(), cpt),
GFP_NOFS, 0);
- if (p->ibp_pages[i] == NULL) {
+ if (!p->ibp_pages[i]) {
CERROR("Can't allocate page %d of %d\n", i, npages);
kiblnd_free_pages(p);
return -ENOMEM;
@@ -1135,10 +1126,10 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
kib_rx_t *rx;
int i;
- LASSERT(conn->ibc_rxs != NULL);
- LASSERT(conn->ibc_hdev != NULL);
+ LASSERT(conn->ibc_rxs);
+ LASSERT(conn->ibc_hdev);
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
rx = &conn->ibc_rxs[i];
LASSERT(rx->rx_nob >= 0); /* not posted */
@@ -1162,7 +1153,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
int ipg;
int i;
- for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
pg = conn->ibc_rx_pages->ibp_pages[ipg];
rx = &conn->ibc_rxs[i];
@@ -1174,7 +1165,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
IBLND_MSG_SIZE,
DMA_FROM_DEVICE);
LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msgaddr));
+ rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
@@ -1187,7 +1178,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
if (pg_off == PAGE_SIZE) {
pg_off = 0;
ipg++;
- LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
+ LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
}
}
}
@@ -1198,9 +1189,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
kib_tx_t *tx;
int i;
- LASSERT(tpo->tpo_pool.po_allocated == 0);
+ LASSERT(!tpo->tpo_pool.po_allocated);
- if (hdev == NULL)
+ if (!hdev)
return;
for (i = 0; i < tpo->tpo_pool.po_size; i++) {
@@ -1224,9 +1215,10 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
while (dev->ibd_failover) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (i++ % 50 == 0)
+ if (!(i++ % 50))
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1252,7 +1244,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
int ipage;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
dev = net->ibn_dev;
@@ -1260,7 +1252,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
- CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
+ CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
tpo->tpo_hdev = kiblnd_current_hdev(dev);
@@ -1275,7 +1267,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
IBLND_MSG_SIZE, DMA_TO_DEVICE);
LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
+ tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
list_add(&tx->tx_list, &pool->po_free_list);
@@ -1291,68 +1283,32 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+ int negotiated_nfrags)
{
- __u64 index;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
-
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- index = addr >> hdev->ibh_mr_shift;
+ __u16 nfrags = (negotiated_nfrags != -1) ?
+ negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
- if (index < hdev->ibh_nmrs &&
- index == ((addr + size - 1) >> hdev->ibh_mr_shift))
- return hdev->ibh_mrs[index];
-
- return NULL;
-}
-
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
-{
- struct ib_mr *prev_mr;
- struct ib_mr *mr;
- int i;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
+ LASSERT(hdev->ibh_mrs);
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
+ nfrags <= rd->rd_nfrags)
return NULL;
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- for (i = 0, mr = prev_mr = NULL;
- i < rd->rd_nfrags; i++) {
- mr = kiblnd_find_dma_mr(hdev,
- rd->rd_frags[i].rf_addr,
- rd->rd_frags[i].rf_nob);
- if (prev_mr == NULL)
- prev_mr = mr;
-
- if (mr == NULL || prev_mr != mr) {
- /* Can't covered by one single MR */
- mr = NULL;
- break;
- }
- }
-
- return mr;
+ return hdev->ibh_mrs;
}
static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
{
- LASSERT(pool->fpo_map_count == 0);
+ LASSERT(!pool->fpo_map_count);
- if (pool->fpo_fmr_pool != NULL)
+ if (pool->fpo_fmr_pool)
ib_destroy_fmr_pool(pool->fpo_fmr_pool);
- if (pool->fpo_hdev != NULL)
+ if (pool->fpo_hdev)
kiblnd_hdev_decref(pool->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(pool, sizeof(*pool));
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
@@ -1387,7 +1343,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
kib_dev_t *dev = fps->fps_net->ibn_dev;
kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
.page_shift = PAGE_SHIFT,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE),
@@ -1399,7 +1355,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
+ if (!fpo)
return -ENOMEM;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
@@ -1410,7 +1366,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
CERROR("Failed to create FMR pool: %d\n", rc);
kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
return rc;
}
@@ -1424,7 +1380,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
struct list_head *zombies)
{
- if (fps->fps_net == NULL) /* intialized? */
+ if (!fps->fps_net) /* intialized? */
return;
spin_lock(&fps->fps_lock);
@@ -1434,7 +1390,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
kib_fmr_pool_t, fpo_list);
fpo->fpo_failed = 1;
list_del(&fpo->fpo_list);
- if (fpo->fpo_map_count == 0)
+ if (!fpo->fpo_map_count)
list_add(&fpo->fpo_list, zombies);
else
list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
@@ -1445,7 +1401,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
{
- if (fps->fps_net != NULL) { /* initialized? */
+ if (fps->fps_net) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
}
@@ -1458,7 +1414,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
kib_fmr_pool_t *fpo;
int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(*fps));
fps->fps_net = net;
fps->fps_cpt = cpt;
@@ -1469,7 +1425,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (rc == 0)
+ if (!rc)
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
return rc;
@@ -1477,7 +1433,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
{
- if (fpo->fpo_map_count != 0) /* still in use */
+ if (fpo->fpo_map_count) /* still in use */
return 0;
if (fpo->fpo_failed)
return 1;
@@ -1494,11 +1450,11 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
int rc;
rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(rc == 0);
+ LASSERT(!rc);
- if (status != 0) {
+ if (status) {
rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(rc == 0);
+ LASSERT(!rc);
}
fmr->fmr_pool = NULL;
@@ -1563,11 +1519,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
if (fps->fps_increasing) {
spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET,
- "Another thread is allocating new FMR pool, waiting for her to complete\n");
+ CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
schedule();
goto again;
-
}
if (time_before(cfs_time_current(), fps->fps_next_retry)) {
@@ -1583,7 +1537,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
rc = kiblnd_create_fmr_pool(fps, &fpo);
spin_lock(&fps->fps_lock);
fps->fps_increasing = 0;
- if (rc == 0) {
+ if (!rc) {
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
@@ -1597,7 +1551,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
static void kiblnd_fini_pool(kib_pool_t *pool)
{
LASSERT(list_empty(&pool->po_free_list));
- LASSERT(pool->po_allocated == 0);
+ LASSERT(!pool->po_allocated);
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
@@ -1606,7 +1560,7 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
+ memset(pool, 0, sizeof(*pool));
INIT_LIST_HEAD(&pool->po_free_list);
pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
pool->po_owner = ps;
@@ -1621,14 +1575,14 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
pool = list_entry(head->next, kib_pool_t, po_list);
list_del(&pool->po_list);
- LASSERT(pool->po_owner != NULL);
+ LASSERT(pool->po_owner);
pool->po_owner->ps_pool_destroy(pool);
}
}
static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
{
- if (ps->ps_net == NULL) /* intialized? */
+ if (!ps->ps_net) /* intialized? */
return;
spin_lock(&ps->ps_lock);
@@ -1637,7 +1591,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
kib_pool_t, po_list);
po->po_failed = 1;
list_del(&po->po_list);
- if (po->po_allocated == 0)
+ if (!po->po_allocated)
list_add(&po->po_list, zombies);
else
list_add(&po->po_list, &ps->ps_failed_pool_list);
@@ -1647,7 +1601,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
static void kiblnd_fini_poolset(kib_poolset_t *ps)
{
- if (ps->ps_net != NULL) { /* initialized? */
+ if (ps->ps_net) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
kiblnd_destroy_pool_list(&ps->ps_pool_list);
}
@@ -1663,7 +1617,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
kib_pool_t *pool;
int rc;
- memset(ps, 0, sizeof(kib_poolset_t));
+ memset(ps, 0, sizeof(*ps));
ps->ps_cpt = cpt;
ps->ps_net = net;
@@ -1680,7 +1634,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
INIT_LIST_HEAD(&ps->ps_failed_pool_list);
rc = ps->ps_pool_create(ps, size, &pool);
- if (rc == 0)
+ if (!rc)
list_add(&pool->po_list, &ps->ps_pool_list);
else
CERROR("Failed to create the first pool for %s\n", ps->ps_name);
@@ -1690,7 +1644,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
{
- if (pool->po_allocated != 0) /* still in use */
+ if (pool->po_allocated) /* still in use */
return 0;
if (pool->po_failed)
return 1;
@@ -1706,7 +1660,7 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
spin_lock(&ps->ps_lock);
- if (ps->ps_node_fini != NULL)
+ if (ps->ps_node_fini)
ps->ps_node_fini(pool, node);
LASSERT(pool->po_allocated > 0);
@@ -1731,6 +1685,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
{
struct list_head *node;
kib_pool_t *pool;
+ unsigned int interval = 1;
+ unsigned long time_before;
+ unsigned int trips = 0;
int rc;
again:
@@ -1744,7 +1701,7 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
node = pool->po_free_list.next;
list_del(node);
- if (ps->ps_node_init != NULL) {
+ if (ps->ps_node_init) {
/* still hold the lock */
ps->ps_node_init(pool, node);
}
@@ -1756,9 +1713,15 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
- ps->ps_name);
- schedule();
+ trips++;
+ CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n",
+ ps->ps_name, interval, trips);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(interval);
+ if (interval < cfs_time_seconds(1))
+ interval *= 2;
+
goto again;
}
@@ -1772,12 +1735,14 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-
+ time_before = cfs_time_current();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+ CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
+ cfs_time_current() - time_before);
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
- if (rc == 0) {
+ if (!rc) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
@@ -1794,37 +1759,37 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
int i;
- LASSERT(pool->po_allocated == 0);
+ LASSERT(!pool->po_allocated);
- if (tpo->tpo_tx_pages != NULL) {
+ if (tpo->tpo_tx_pages) {
kiblnd_unmap_tx_pool(tpo);
kiblnd_free_pages(tpo->tpo_tx_pages);
}
- if (tpo->tpo_tx_descs == NULL)
+ if (!tpo->tpo_tx_descs)
goto out;
for (i = 0; i < pool->po_size; i++) {
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
- if (tx->tx_pages != NULL)
+ if (tx->tx_pages)
LIBCFS_FREE(tx->tx_pages,
LNET_MAX_IOV *
sizeof(*tx->tx_pages));
- if (tx->tx_frags != NULL)
+ if (tx->tx_frags)
LIBCFS_FREE(tx->tx_frags,
IBLND_MAX_RDMA_FRAGS *
sizeof(*tx->tx_frags));
- if (tx->tx_wrq != NULL)
+ if (tx->tx_wrq)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_sge != NULL)
+ if (tx->tx_sge)
LIBCFS_FREE(tx->tx_sge,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_sge));
- if (tx->tx_rd != NULL)
+ if (tx->tx_rd)
LIBCFS_FREE(tx->tx_rd,
offsetof(kib_rdma_desc_t,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
@@ -1834,7 +1799,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
pool->po_size * sizeof(kib_tx_t));
out:
kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(*tpo));
}
static int kiblnd_tx_pool_size(int ncpts)
@@ -1853,7 +1818,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
kib_tx_pool_t *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
- if (tpo == NULL) {
+ if (!tpo) {
CERROR("Failed to allocate TX pool\n");
return -ENOMEM;
}
@@ -1864,15 +1829,15 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
tpo->tpo_tx_pages = NULL;
npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
- if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
+ if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(*tpo));
return -ENOMEM;
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
size * sizeof(kib_tx_t));
- if (tpo->tpo_tx_descs == NULL) {
+ if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
@@ -1884,17 +1849,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
tx->tx_pool = tpo;
- if (ps->ps_net->ibn_fmr_ps != NULL) {
+ if (ps->ps_net->ibn_fmr_ps) {
LIBCFS_CPT_ALLOC(tx->tx_pages,
lnet_cpt_table(), ps->ps_cpt,
LNET_MAX_IOV * sizeof(*tx->tx_pages));
- if (tx->tx_pages == NULL)
+ if (!tx->tx_pages)
break;
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
- if (tx->tx_frags == NULL)
+ if (!tx->tx_frags)
break;
sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
@@ -1902,19 +1867,19 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_wrq == NULL)
+ if (!tx->tx_wrq)
break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_sge));
- if (tx->tx_sge == NULL)
+ if (!tx->tx_sge)
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
offsetof(kib_rdma_desc_t,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
- if (tx->tx_rd == NULL)
+ if (!tx->tx_rd)
break;
}
@@ -1945,23 +1910,23 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
kib_tx_poolset_t *tps;
kib_fmr_poolset_t *fps;
- if (net->ibn_tx_ps != NULL) {
+ if (net->ibn_tx_ps) {
tps = net->ibn_tx_ps[i];
kiblnd_fini_poolset(&tps->tps_poolset);
}
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps) {
fps = net->ibn_fmr_ps[i];
kiblnd_fini_fmr_poolset(fps);
}
}
- if (net->ibn_tx_ps != NULL) {
+ if (net->ibn_tx_ps) {
cfs_percpt_free(net->ibn_tx_ps);
net->ibn_tx_ps = NULL;
}
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps) {
cfs_percpt_free(net->ibn_fmr_ps);
net->ibn_fmr_ps = NULL;
}
@@ -1975,8 +1940,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
int i;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0 &&
- net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
+ if (!*kiblnd_tunables.kib_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
goto create_tx_pool;
}
@@ -1996,7 +1960,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
* TX pool must be created later than FMR, see LU-2268
* for details
*/
- LASSERT(net->ibn_tx_ps == NULL);
+ LASSERT(!net->ibn_tx_ps);
/*
* premapping can fail if ibd_nmr > 1, so we always create
@@ -2005,56 +1969,45 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
- if (net->ibn_fmr_ps == NULL) {
+ if (!net->ibn_fmr_ps) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
goto failed;
}
for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
kiblnd_fmr_pool_size(ncpts),
kiblnd_fmr_flush_trigger(ncpts));
- if (rc == -ENOSYS && i == 0) /* no FMR */
- break;
-
- if (rc != 0) { /* a real error */
+ if (rc) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
goto failed;
}
}
- if (i > 0) {
+ if (i > 0)
LASSERT(i == ncpts);
- goto create_tx_pool;
- }
-
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
-
- CWARN("Device does not support FMR\n");
- goto failed;
create_tx_pool:
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
- if (net->ibn_tx_ps == NULL) {
+ if (!net->ibn_tx_ps) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
goto failed;
}
for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
cpt, net, "TX",
kiblnd_tx_pool_size(ncpts),
kiblnd_create_tx_pool,
kiblnd_destroy_tx_pool,
kiblnd_tx_init, NULL);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't initialize TX pool for CPT %d: %d\n",
cpt, rc);
goto failed;
@@ -2064,14 +2017,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
return 0;
failed:
kiblnd_net_fini_pools(net);
- LASSERT(rc != 0);
+ LASSERT(rc);
return rc;
}
static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
{
- /* It's safe to assume a HCA can handle a page size
- * matching that of the native system */
+ /*
+ * It's safe to assume a HCA can handle a page size
+ * matching that of the native system
+ */
hdev->ibh_page_shift = PAGE_SHIFT;
hdev->ibh_page_size = 1 << PAGE_SHIFT;
hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
@@ -2082,44 +2037,28 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return 0;
}
- for (hdev->ibh_mr_shift = 0;
- hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
- if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
- hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
- return 0;
- }
-
CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
return -EINVAL;
}
static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
- int i;
-
- if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
+ if (!hdev->ibh_mrs)
return;
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- if (hdev->ibh_mrs[i] == NULL)
- break;
+ ib_dereg_mr(hdev->ibh_mrs);
- ib_dereg_mr(hdev->ibh_mrs[i]);
- }
-
- LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- hdev->ibh_mrs = NULL;
- hdev->ibh_nmrs = 0;
+ hdev->ibh_mrs = NULL;
}
void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
{
kiblnd_hdev_cleanup_mrs(hdev);
- if (hdev->ibh_pd != NULL)
+ if (hdev->ibh_pd)
ib_dealloc_pd(hdev->ibh_pd);
- if (hdev->ibh_cmid != NULL)
+ if (hdev->ibh_cmid)
rdma_destroy_id(hdev->ibh_cmid);
LIBCFS_FREE(hdev, sizeof(*hdev));
@@ -2132,18 +2071,9 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
rc = kiblnd_hdev_get_attr(hdev);
- if (rc != 0)
+ if (rc)
return rc;
- LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs table\n");
- return -ENOMEM;
- }
-
- hdev->ibh_mrs[0] = NULL;
- hdev->ibh_nmrs = 1;
-
mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
if (IS_ERR(mr)) {
CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
@@ -2151,7 +2081,7 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
return PTR_ERR(mr);
}
- hdev->ibh_mrs[0] = mr;
+ hdev->ibh_mrs = mr;
return 0;
}
@@ -2170,12 +2100,13 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
struct sockaddr_in dstaddr;
int rc;
- if (dev->ibd_hdev == NULL || /* initializing */
- dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
+ if (!dev->ibd_hdev || /* initializing */
+ !dev->ibd_hdev->ibh_cmid || /* listener is dead */
*kiblnd_tunables.kib_dev_failover > 1) /* debugging */
return 1;
- /* XXX: it's UGLY, but I don't have better way to find
+ /*
+ * XXX: it's UGLY, but I don't have better way to find
* ib-bonding HCA failover because:
*
* a. no reliable CM event for HCA failover...
@@ -2184,7 +2115,8 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
* We have only two choices at this point:
*
* a. rdma_bind_addr(), it will conflict with listener cmid
- * b. rdma_resolve_addr() to zero addr */
+ * b. rdma_resolve_addr() to zero addr
+ */
cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(cmid)) {
@@ -2201,7 +2133,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
dstaddr.sin_family = AF_INET;
rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
(struct sockaddr *)&dstaddr, 1);
- if (rc != 0 || cmid->device == NULL) {
+ if (rc || !cmid->device) {
CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
@@ -2230,24 +2162,27 @@ int kiblnd_dev_failover(kib_dev_t *dev)
int i;
LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
- dev->ibd_can_failover ||
- dev->ibd_hdev == NULL);
+ dev->ibd_can_failover || !dev->ibd_hdev);
rc = kiblnd_dev_need_failover(dev);
if (rc <= 0)
goto out;
- if (dev->ibd_hdev != NULL &&
- dev->ibd_hdev->ibh_cmid != NULL) {
- /* XXX it's not good to close old listener at here,
+ if (dev->ibd_hdev &&
+ dev->ibd_hdev->ibh_cmid) {
+ /*
+ * XXX it's not good to close old listener at here,
* because we can fail to create new listener.
* But we have to close it now, otherwise rdma_bind_addr
- * will return EADDRINUSE... How crap! */
+ * will return EADDRINUSE... How crap!
+ */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
cmid = dev->ibd_hdev->ibh_cmid;
- /* make next schedule of kiblnd_dev_need_failover()
- * return 1 for me */
+ /*
+ * make next schedule of kiblnd_dev_need_failover()
+ * return 1 for me
+ */
dev->ibd_hdev->ibh_cmid = NULL;
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
@@ -2269,7 +2204,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
/* Bind to failover device or port */
rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
- if (rc != 0 || cmid->device == NULL) {
+ if (rc || !cmid->device) {
CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
@@ -2278,7 +2213,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
}
LIBCFS_ALLOC(hdev, sizeof(*hdev));
- if (hdev == NULL) {
+ if (!hdev) {
CERROR("Failed to allocate kib_hca_dev\n");
rdma_destroy_id(cmid);
rc = -ENOMEM;
@@ -2300,13 +2235,13 @@ int kiblnd_dev_failover(kib_dev_t *dev)
hdev->ibh_pd = pd;
rc = rdma_listen(cmid, 0);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't start new listener: %d\n", rc);
goto out;
}
rc = kiblnd_hdev_setup_mrs(hdev);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup device: %d\n", rc);
goto out;
}
@@ -2334,10 +2269,10 @@ int kiblnd_dev_failover(kib_dev_t *dev)
kiblnd_destroy_pool_list(&zombie_ppo);
if (!list_empty(&zombie_fpo))
kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev != NULL)
+ if (hdev)
kiblnd_hdev_decref(hdev);
- if (rc != 0)
+ if (rc)
dev->ibd_failed_failover++;
else
dev->ibd_failed_failover = 0;
@@ -2347,13 +2282,13 @@ int kiblnd_dev_failover(kib_dev_t *dev)
void kiblnd_destroy_dev(kib_dev_t *dev)
{
- LASSERT(dev->ibd_nnets == 0);
+ LASSERT(!dev->ibd_nnets);
LASSERT(list_empty(&dev->ibd_nets));
list_del(&dev->ibd_fail_list);
list_del(&dev->ibd_list);
- if (dev->ibd_hdev != NULL)
+ if (dev->ibd_hdev)
kiblnd_hdev_decref(dev->ibd_hdev);
LIBCFS_FREE(dev, sizeof(*dev));
@@ -2369,7 +2304,7 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
int rc;
rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't query IPoIB interface %s: %d\n",
ifname, rc);
return NULL;
@@ -2381,11 +2316,11 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
}
LIBCFS_ALLOC(dev, sizeof(*dev));
- if (dev == NULL)
+ if (!dev)
return NULL;
netdev = dev_get_by_name(&init_net, ifname);
- if (netdev == NULL) {
+ if (!netdev) {
dev->ibd_can_failover = 0;
} else {
dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
@@ -2400,14 +2335,13 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
/* initialize the device */
rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't initialize device: %d\n", rc);
LIBCFS_FREE(dev, sizeof(*dev));
return NULL;
}
- list_add_tail(&dev->ibd_list,
- &kiblnd_data.kib_devs);
+ list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
return dev;
}
@@ -2424,18 +2358,22 @@ static void kiblnd_base_shutdown(void)
case IBLND_INIT_ALL:
case IBLND_INIT_DATA:
- LASSERT(kiblnd_data.kib_peers != NULL);
+ LASSERT(kiblnd_data.kib_peers);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
- /* NB: we really want to stop scheduler threads net by net
+ /*
+ * NB: we really want to stop scheduler threads net by net
* instead of the whole module, this should be improved
- * with dynamic configuration LNet */
+ * with dynamic configuration LNet
+ */
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
wake_up_all(&sched->ibs_waitq);
@@ -2443,7 +2381,7 @@ static void kiblnd_base_shutdown(void)
wake_up_all(&kiblnd_data.kib_failover_waitq);
i = 2;
- while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ while (atomic_read(&kiblnd_data.kib_nthreads)) {
i++;
/* power of 2 ? */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
@@ -2459,20 +2397,20 @@ static void kiblnd_base_shutdown(void)
break;
}
- if (kiblnd_data.kib_peers != NULL) {
+ if (kiblnd_data.kib_peers) {
LIBCFS_FREE(kiblnd_data.kib_peers,
sizeof(struct list_head) *
kiblnd_data.kib_peer_hash_size);
}
- if (kiblnd_data.kib_scheds != NULL)
+ if (kiblnd_data.kib_scheds)
cfs_percpt_free(kiblnd_data.kib_scheds);
kiblnd_data.kib_init = IBLND_INIT_NOTHING;
module_put(THIS_MODULE);
}
-void kiblnd_shutdown(lnet_ni_t *ni)
+static void kiblnd_shutdown(lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
@@ -2481,7 +2419,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
- if (net == NULL)
+ if (!net)
goto out;
write_lock_irqsave(g_lock, flags);
@@ -2498,7 +2436,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
/* Wait for all peer state to clean up */
i = 2;
- while (atomic_read(&net->ibn_npeers) != 0) {
+ while (atomic_read(&net->ibn_npeers)) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
"%s: waiting for %d peers to disconnect\n",
@@ -2519,10 +2457,9 @@ void kiblnd_shutdown(lnet_ni_t *ni)
/* fall through */
case IBLND_INIT_NOTHING:
- LASSERT(atomic_read(&net->ibn_nconns) == 0);
+ LASSERT(!atomic_read(&net->ibn_nconns));
- if (net->ibn_dev != NULL &&
- net->ibn_dev->ibd_nnets == 0)
+ if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
kiblnd_destroy_dev(net->ibn_dev);
break;
@@ -2558,7 +2495,7 @@ static int kiblnd_base_startup(void)
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
LIBCFS_ALLOC(kiblnd_data.kib_peers,
sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
- if (kiblnd_data.kib_peers == NULL)
+ if (!kiblnd_data.kib_peers)
goto failed;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
@@ -2566,12 +2503,15 @@ static int kiblnd_base_startup(void)
spin_lock_init(&kiblnd_data.kib_connd_lock);
INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
+
init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*sched));
- if (kiblnd_data.kib_scheds == NULL)
+ if (!kiblnd_data.kib_scheds)
goto failed;
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
@@ -2585,8 +2525,10 @@ static int kiblnd_base_startup(void)
if (*kiblnd_tunables.kib_nscheds > 0) {
nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
} else {
- /* max to half of CPUs, another half is reserved for
- * upper layer modules */
+ /*
+ * max to half of CPUs, another half is reserved for
+ * upper layer modules
+ */
nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
}
@@ -2601,16 +2543,16 @@ static int kiblnd_base_startup(void)
/*****************************************************/
rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn o2iblnd connd: %d\n", rc);
goto failed;
}
- if (*kiblnd_tunables.kib_dev_failover != 0)
+ if (*kiblnd_tunables.kib_dev_failover)
rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
"kiblnd_failover");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
goto failed;
}
@@ -2632,7 +2574,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
int nthrs;
int i;
- if (sched->ibs_nthreads == 0) {
+ if (!sched->ibs_nthreads) {
if (*kiblnd_tunables.kib_nscheds > 0) {
nthrs = sched->ibs_nthreads_max;
} else {
@@ -2655,7 +2597,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
- if (rc == 0)
+ if (!rc)
continue;
CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2677,14 +2619,14 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
for (i = 0; i < ncpts; i++) {
struct kib_sched_info *sched;
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
sched = kiblnd_data.kib_scheds[cpt];
if (!newdev && sched->ibs_nthreads > 0)
continue;
rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to start scheduler threads for %s\n",
dev->ibd_ifname);
return rc;
@@ -2702,30 +2644,30 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
colon = strchr(ifname, ':');
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ if (!strcmp(&dev->ibd_ifname[0], ifname))
return dev;
- if (alias != NULL)
+ if (alias)
continue;
colon2 = strchr(dev->ibd_ifname, ':');
- if (colon != NULL)
+ if (colon)
*colon = 0;
- if (colon2 != NULL)
+ if (colon2)
*colon2 = 0;
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ if (!strcmp(&dev->ibd_ifname[0], ifname))
alias = dev;
- if (colon != NULL)
+ if (colon)
*colon = ':';
- if (colon2 != NULL)
+ if (colon2)
*colon2 = ':';
}
return alias;
}
-int kiblnd_startup(lnet_ni_t *ni)
+static int kiblnd_startup(lnet_ni_t *ni)
{
char *ifname;
kib_dev_t *ibdev = NULL;
@@ -2739,13 +2681,13 @@ int kiblnd_startup(lnet_ni_t *ni)
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
- if (rc != 0)
+ if (rc)
return rc;
}
LIBCFS_ALLOC(net, sizeof(*net));
ni->ni_data = net;
- if (net == NULL)
+ if (!net)
goto net_failed;
ktime_get_real_ts64(&tv);
@@ -2757,11 +2699,11 @@ int kiblnd_startup(lnet_ni_t *ni)
ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
- if (ni->ni_interfaces[0] != NULL) {
+ if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
CLASSERT(LNET_MAX_INTERFACES > 1);
- if (ni->ni_interfaces[1] != NULL) {
+ if (ni->ni_interfaces[1]) {
CERROR("Multiple interfaces not supported\n");
goto failed;
}
@@ -2778,12 +2720,12 @@ int kiblnd_startup(lnet_ni_t *ni)
ibdev = kiblnd_dev_search(ifname);
- newdev = ibdev == NULL;
+ newdev = !ibdev;
/* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
+ if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
ibdev = kiblnd_create_dev(ifname);
- if (ibdev == NULL)
+ if (!ibdev)
goto failed;
net->ibn_dev = ibdev;
@@ -2791,11 +2733,11 @@ int kiblnd_startup(lnet_ni_t *ni)
rc = kiblnd_dev_start_threads(ibdev, newdev,
ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
+ if (rc)
goto failed;
rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
}
@@ -2810,7 +2752,7 @@ int kiblnd_startup(lnet_ni_t *ni)
return 0;
failed:
- if (net->ibn_dev == NULL && ibdev != NULL)
+ if (!net->ibn_dev && ibdev)
kiblnd_destroy_dev(ibdev);
net_failed:
@@ -2820,25 +2762,35 @@ net_failed:
return -ENETDOWN;
}
-static void __exit kiblnd_module_fini(void)
+static lnd_t the_o2iblnd = {
+ .lnd_type = O2IBLND,
+ .lnd_startup = kiblnd_startup,
+ .lnd_shutdown = kiblnd_shutdown,
+ .lnd_ctl = kiblnd_ctl,
+ .lnd_query = kiblnd_query,
+ .lnd_send = kiblnd_send,
+ .lnd_recv = kiblnd_recv,
+};
+
+static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
}
-static int __init kiblnd_module_init(void)
+static int __init ko2iblnd_init(void)
{
int rc;
CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
rc = kiblnd_tunables_init();
- if (rc != 0)
+ if (rc)
return rc;
lnet_register_lnd(&the_o2iblnd);
@@ -2847,8 +2799,9 @@ static int __init kiblnd_module_init(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
+MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
+MODULE_VERSION("2.7.0");
MODULE_LICENSE("GPL");
-module_init(kiblnd_module_init);
-module_exit(kiblnd_module_fini);
+module_init(ko2iblnd_init);
+module_exit(ko2iblnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 025faa9f86b3..bfcbdd167da7 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -60,17 +60,17 @@
#include <net/sock.h>
#include <linux/in.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_fmr_pool.h>
+
#define DEBUG_SUBSYSTEM S_LND
#include "../../../include/linux/libcfs/libcfs.h"
#include "../../../include/linux/lnet/lnet.h"
#include "../../../include/linux/lnet/lib-lnet.h"
-#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
-
#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
/* # scheduler loops before reschedule */
#define IBLND_RESCHED 100
@@ -146,9 +146,9 @@ kiblnd_concurrent_sends_v1(void)
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
*kiblnd_tunables.kib_map_on_demand : \
IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
@@ -162,18 +162,17 @@ kiblnd_concurrent_sends_v1(void)
#define IBLND_FMR_POOL 256
#define IBLND_FMR_POOL_FLUSH 192
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
-
-/* RX messages (per connection) */
-#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define IBLND_RX_MSGS(c) \
+ ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
+#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(c) \
+ ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
+#define IBLND_SEND_WRS(c) \
+ ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version))
+#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -209,8 +208,7 @@ typedef struct kib_hca_dev {
__u64 ibh_page_mask; /* page mask of current HCA */
int ibh_mr_shift; /* bits shift of max MR size */
__u64 ibh_mr_size; /* size of MR */
- int ibh_nmrs; /* # of global MRs */
- struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_mr *ibh_mrs; /* global MR */
struct ib_pd *ibh_pd; /* PD */
kib_dev_t *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
@@ -350,6 +348,16 @@ typedef struct {
void *kib_connd; /* the connd task (serialisation assertions) */
struct list_head kib_connd_conns; /* connections to setup/teardown */
struct list_head kib_connd_zombies; /* connections with zero refcount */
+ /* connections to reconnect */
+ struct list_head kib_reconn_list;
+ /* peers wait for reconnection */
+ struct list_head kib_reconn_wait;
+ /**
+ * The second that peers are pulled out from \a kib_reconn_wait
+ * for reconnection.
+ */
+ time64_t kib_reconn_sec;
+
wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
@@ -465,10 +473,10 @@ typedef struct {
#define IBLND_REJECT_FATAL 3 /* Anything else */
#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */
- /* mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */
- /* match mine */
+/* peer's rdma frags doesn't match mine */
+#define IBLND_REJECT_RDMA_FRAGS 6
+/* peer's msg queue size doesn't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7
/***********************************************************************/
@@ -527,6 +535,8 @@ typedef struct kib_conn {
struct list_head ibc_list; /* stash on peer's conn list */
struct list_head ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
+ /* reconnect later */
+ __u16 ibc_reconnect:1;
__u64 ibc_incarnation; /* which instance of the peer */
atomic_t ibc_refcount; /* # users */
int ibc_state; /* what's happening */
@@ -536,6 +546,10 @@ typedef struct kib_conn {
int ibc_outstanding_credits; /* # credits to return */
int ibc_reserved_credits; /* # ACK/DONE msg credits */
int ibc_comms_error; /* set on comms error */
+ /* connections queue depth */
+ __u16 ibc_queue_depth;
+ /* connections max frags */
+ __u16 ibc_max_frags;
unsigned int ibc_nrx:16; /* receive buffers owned */
unsigned int ibc_scheduled:1; /* scheduled for attention */
unsigned int ibc_ready:1; /* CQ callback fired */
@@ -572,18 +586,29 @@ typedef struct kib_peer {
struct list_head ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
struct list_head ibp_conns; /* all active connections */
struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
__u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts
- */
- int ibp_accepting; /* current passive connection attempts
- */
- int ibp_error; /* errno on closing this peer */
- unsigned long ibp_last_alive; /* when (in jiffies) I was last alive
- */
+ /* when (in jiffies) I was last alive */
+ unsigned long ibp_last_alive;
+ /* # users */
+ atomic_t ibp_refcount;
+ /* version of peer */
+ __u16 ibp_version;
+ /* current passive connection attempts */
+ unsigned short ibp_accepting;
+ /* current active connection attempts */
+ unsigned short ibp_connecting;
+ /* reconnect this peer later */
+ unsigned short ibp_reconnecting:1;
+ /* # consecutive reconnection attempts to this peer */
+ unsigned int ibp_reconnected;
+ /* errno on closing this peer */
+ int ibp_error;
+ /* max map_on_demand */
+ __u16 ibp_max_frags;
+ /* max_peer_credits */
+ __u16 ibp_queue_depth;
} kib_peer_t;
extern kib_data_t kiblnd_data;
@@ -611,7 +636,7 @@ kiblnd_dev_can_failover(kib_dev_t *dev)
if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
- if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+ if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
return 0;
if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
@@ -661,6 +686,20 @@ do { \
kiblnd_destroy_peer(peer); \
} while (0)
+static inline bool
+kiblnd_peer_connecting(kib_peer_t *peer)
+{
+ return peer->ibp_connecting ||
+ peer->ibp_reconnecting ||
+ peer->ibp_accepting;
+}
+
+static inline bool
+kiblnd_peer_idle(kib_peer_t *peer)
+{
+ return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
+}
+
static inline struct list_head *
kiblnd_nid2peerlist(lnet_nid_t nid)
{
@@ -691,7 +730,8 @@ kiblnd_send_keepalive(kib_conn_t *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
+ msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
+ MSEC_PER_SEC));
}
static inline int
@@ -710,16 +750,16 @@ kiblnd_need_noop(kib_conn_t *conn)
/* No tx to piggyback NOOP onto or no credit to send a tx */
return (list_empty(&conn->ibc_tx_queue) ||
- conn->ibc_credits == 0);
+ !conn->ibc_credits);
}
if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
!list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
- conn->ibc_credits == 0) /* no credit */
+ !conn->ibc_credits) /* no credit */
return 0;
if (conn->ibc_credits == 1 && /* last credit reserved for */
- conn->ibc_outstanding_credits == 0) /* giving back credits */
+ !conn->ibc_outstanding_credits) /* giving back credits */
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
@@ -755,18 +795,19 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
/* lowest bits of the work request id to stash the work item type. */
-#define IBLND_WID_TX 0
-#define IBLND_WID_RDMA 1
-#define IBLND_WID_RX 2
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_INVAL 0
+#define IBLND_WID_TX 1
+#define IBLND_WID_RX 2
+#define IBLND_WID_RDMA 3
+#define IBLND_WID_MASK 3UL
static inline __u64
kiblnd_ptr2wreqid(void *ptr, int type)
{
unsigned long lptr = (unsigned long)ptr;
- LASSERT((lptr & IBLND_WID_MASK) == 0);
- LASSERT((type & ~IBLND_WID_MASK) == 0);
+ LASSERT(!(lptr & IBLND_WID_MASK));
+ LASSERT(!(type & ~IBLND_WID_MASK));
return (__u64)(lptr | type);
}
@@ -907,9 +948,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
- __u64 addr, __u64 size);
+ kib_rdma_desc_t *rd,
+ int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
@@ -919,11 +959,6 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_startup(lnet_ni_t *ni);
-void kiblnd_shutdown(lnet_ni_t *ni);
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
@@ -933,7 +968,6 @@ int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
int kiblnd_failover_thread(void *arg);
int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages(kib_pages_t *p);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
@@ -942,39 +976,30 @@ int kiblnd_translate_mtu(int value);
int kiblnd_dev_failover(kib_dev_t *dev);
int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
void kiblnd_destroy_peer(kib_peer_t *peer);
+bool kiblnd_reconnect_peer(kib_peer_t *peer);
void kiblnd_destroy_dev(kib_dev_t *dev);
void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-void kiblnd_peer_alive(kib_peer_t *peer);
kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation);
+ int version, __u64 incarnation);
int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
-void kiblnd_connreq_done(kib_conn_t *conn, int status);
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn);
+ int state, int version);
+void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
void kiblnd_close_conn(kib_conn_t *conn, int error);
void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
-int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
- int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+ int status);
void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp);
+ int credits, lnet_nid_t dstnid, __u64 dststamp);
int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
int kiblnd_post_rx(kib_rx_t *rx, int credit);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index c7b9ccb13f1c..2323e8d3a318 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -40,6 +40,15 @@
#include "o2iblnd.h"
+static void kiblnd_peer_alive(kib_peer_t *peer);
+static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+static void kiblnd_check_sends(kib_conn_t *conn);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+ int type, int body_nob);
+static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
static void
@@ -50,12 +59,12 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
int rc;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
LASSERT(!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */
LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool);
kiblnd_unmap_tx(ni, tx);
@@ -64,7 +73,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
rc = tx->tx_status;
- if (tx->tx_conn != NULL) {
+ if (tx->tx_conn) {
LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni);
kiblnd_conn_decref(tx->tx_conn);
@@ -78,7 +87,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
/* delay finalize until my descs have been freed */
for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
+ if (!lntmsg[i])
continue;
lnet_finalize(ni, lntmsg[i], rc);
@@ -111,19 +120,19 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
- if (node == NULL)
+ if (!node)
return NULL;
- tx = container_of(node, kib_tx_t, tx_list);
+ tx = list_entry(node, kib_tx_t, tx_list);
- LASSERT(tx->tx_nwrq == 0);
+ LASSERT(!tx->tx_nwrq);
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending == 0);
+ LASSERT(!tx->tx_sending);
LASSERT(!tx->tx_waiting);
- LASSERT(tx->tx_status == 0);
- LASSERT(tx->tx_conn == NULL);
- LASSERT(tx->tx_lntmsg[0] == NULL);
- LASSERT(tx->tx_lntmsg[1] == NULL);
- LASSERT(tx->tx_nfrags == 0);
+ LASSERT(!tx->tx_status);
+ LASSERT(!tx->tx_conn);
+ LASSERT(!tx->tx_lntmsg[0]);
+ LASSERT(!tx->tx_lntmsg[1]);
+ LASSERT(!tx->tx_nfrags);
return tx;
}
@@ -149,17 +158,15 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
kib_conn_t *conn = rx->rx_conn;
kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
+ struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
int rc;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
-
- mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT(mr != NULL);
+ LASSERT(mr);
rx->rx_sge.lkey = mr->lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
@@ -185,7 +192,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
*/
kiblnd_conn_addref(conn);
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc)) {
CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
rx->rx_nob = 0;
@@ -194,7 +201,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
goto out;
- if (unlikely(rc != 0)) {
+ if (unlikely(rc)) {
kiblnd_close_conn(conn, rc);
kiblnd_drop_rx(rx); /* No more posts for this rx */
goto out;
@@ -225,7 +232,7 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
+ LASSERT(tx->tx_sending || tx->tx_waiting);
if (tx->tx_cookie != cookie)
continue;
@@ -251,7 +258,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (tx == NULL) {
+ if (!tx) {
spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie %#llx from %s\n",
@@ -260,7 +267,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
return;
}
- if (tx->tx_status == 0) { /* success so far */
+ if (!tx->tx_status) { /* success so far */
if (status < 0) /* failed? */
tx->tx_status = status;
else if (txtype == IBLND_MSG_GET_REQ)
@@ -269,7 +276,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
tx->tx_waiting = 0;
- idle = !tx->tx_queued && (tx->tx_sending == 0);
+ idle = !tx->tx_queued && !tx->tx_sending;
if (idle)
list_del(&tx->tx_list);
@@ -285,7 +292,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't get tx for completion %x for %s\n",
type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
return;
@@ -316,19 +323,18 @@ kiblnd_handle_rx(kib_rx_t *rx)
msg->ibm_type, credits,
libcfs_nid2str(conn->ibc_peer->ibp_nid));
- if (credits != 0) {
+ if (credits) {
/* Have I received credits that will let me send? */
spin_lock(&conn->ibc_lock);
if (conn->ibc_credits + credits >
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
+ conn->ibc_queue_depth) {
rc2 = conn->ibc_credits;
spin_unlock(&conn->ibc_lock);
CERROR("Bad credits from %s: %d + %d > %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits,
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ rc2, credits, conn->ibc_queue_depth);
kiblnd_close_conn(conn, -EPROTO);
kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
@@ -360,7 +366,7 @@ kiblnd_handle_rx(kib_rx_t *rx)
break;
}
- if (credits != 0) /* credit already posted */
+ if (credits) /* credit already posted */
post_credit = IBLND_POSTRX_NO_CREDIT;
else /* a keepalive NOOP */
post_credit = IBLND_POSTRX_PEER_CREDIT;
@@ -396,12 +402,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx != NULL)
+ msg->ibm_u.putack.ibpam_src_cookie);
+ if (tx)
list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Unmatched PUT_ACK from %s\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
rc = -EPROTO;
@@ -409,10 +415,11 @@ kiblnd_handle_rx(kib_rx_t *rx)
}
LASSERT(tx->tx_waiting);
- /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
+ /*
+ * CAVEAT EMPTOR: I could be racing with tx_complete, but...
* (a) I can overwrite tx_msg since my peer has received it!
- * (b) tx_waiting set tells tx_complete() it's not done. */
-
+ * (b) tx_waiting set tells tx_complete() it's not done.
+ */
tx->tx_nwrq = 0; /* overwrite PUT_REQ */
rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
@@ -469,7 +476,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
int rc;
int err = -EIO;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(rx->rx_nob < 0); /* was posted */
rx->rx_nob = 0; /* isn't now */
@@ -486,9 +493,9 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
rx->rx_nob = nob;
rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
goto failed;
}
@@ -497,7 +504,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
msg->ibm_srcstamp != conn->ibc_incarnation ||
msg->ibm_dststamp != net->ibn_incarnation) {
CERROR("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
err = -ESTALE;
goto failed;
}
@@ -537,7 +544,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page((void *)vaddr);
- LASSERT(page != NULL);
+ LASSERT(page);
return page;
}
#ifdef CONFIG_HIGHMEM
@@ -549,7 +556,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
#endif
page = virt_to_page(vaddr);
- LASSERT(page != NULL);
+ LASSERT(page);
return page;
}
@@ -565,8 +572,8 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
int rc;
int i;
- LASSERT(tx->tx_pool != NULL);
- LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+ LASSERT(tx->tx_pool);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner);
hdev = tx->tx_pool->tpo_hdev;
@@ -582,13 +589,15 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
fps = net->ibn_fmr_ps[cpt];
rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't map %d pages: %d\n", npages, rc);
return rc;
}
- /* If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey */
+ /*
+ * If rd is not tx_rd, it's going to get sent to a peer, who will need
+ * the rkey
+ */
rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
tx->fmr.fmr_pfmr->fmr->lkey;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
@@ -602,14 +611,14 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
{
kib_net_t *net = ni->ni_data;
- LASSERT(net != NULL);
+ LASSERT(net);
if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
tx->fmr.fmr_pfmr = NULL;
}
- if (tx->tx_nfrags != 0) {
+ if (tx->tx_nfrags) {
kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
@@ -625,8 +634,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
__u32 nob;
int i;
- /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
- * RDMA sink */
+ /*
+ * If rd is not tx_rd, it's going to get sent to a peer and I'm the
+ * RDMA sink
+ */
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
@@ -641,15 +652,15 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
nob += rd->rd_frags[i].rf_nob;
}
- /* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(hdev, rd);
- if (mr != NULL) {
+ mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ?
+ tx->tx_conn->ibc_max_frags : -1);
+ if (mr) {
/* found pre-mapping MR */
rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
return 0;
}
- if (net->ibn_fmr_ps != NULL)
+ if (net->ibn_fmr_ps)
return kiblnd_fmr_map_tx(net, tx, rd, nob);
return -EINVAL;
@@ -668,7 +679,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
LASSERT(nob > 0);
LASSERT(niov > 0);
- LASSERT(net != NULL);
+ LASSERT(net);
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
@@ -684,7 +695,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
vaddr = ((unsigned long)iov->iov_base) + offset;
page_offset = vaddr & (PAGE_SIZE - 1);
page = kiblnd_kvaddr_to_page(vaddr);
- if (page == NULL) {
+ if (!page) {
CERROR("Can't find page\n");
return -EFAULT;
}
@@ -710,7 +721,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
static int
kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+ int nkiov, lnet_kiov_t *kiov, int offset, int nob)
{
kib_net_t *net = ni->ni_data;
struct scatterlist *sg;
@@ -720,7 +731,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
LASSERT(nob > 0);
LASSERT(nkiov > 0);
- LASSERT(net != NULL);
+ LASSERT(net);
while (offset >= kiov->kiov_len) {
offset -= kiov->kiov_len;
@@ -750,26 +761,24 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
static int
kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
- __releases(conn->ibc_lock)
- __acquires(conn->ibc_lock)
+ __must_hold(&conn->ibc_lock)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
int ver = conn->ibc_version;
int rc;
int done;
- struct ib_send_wr *bad_wrq;
LASSERT(tx->tx_queued);
/* We rely on this for QP sizing */
LASSERT(tx->tx_nwrq > 0);
- LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
+ LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
- LASSERT(credit == 0 || credit == 1);
+ LASSERT(!credit || credit == 1);
LASSERT(conn->ibc_outstanding_credits >= 0);
- LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
LASSERT(conn->ibc_credits >= 0);
- LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
/* tx completions outstanding... */
@@ -778,13 +787,13 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
return -EAGAIN;
}
- if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
+ if (credit && !conn->ibc_credits) { /* no credits */
CDEBUG(D_NET, "%s: no credits\n",
libcfs_nid2str(peer->ibp_nid));
return -EAGAIN;
}
- if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+ if (credit && !IBLND_OOB_CAPABLE(ver) &&
conn->ibc_credits == 1 && /* last credit reserved */
msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
CDEBUG(D_NET, "%s: not using last credit\n",
@@ -800,9 +809,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
(!kiblnd_need_noop(conn) || /* redundant NOOP */
(IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
- /* OK to drop when posted enough NOOPs, since
+ /*
+ * OK to drop when posted enough NOOPs, since
* kiblnd_check_sends will queue NOOP again when
- * posted NOOPs complete */
+ * posted NOOPs complete
+ */
spin_unlock(&conn->ibc_lock);
kiblnd_tx_done(peer->ibp_ni, tx);
spin_lock(&conn->ibc_lock);
@@ -821,12 +832,14 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
if (msg->ibm_type == IBLND_MSG_NOOP)
conn->ibc_noops_posted++;
- /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
+ /*
+ * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
* PUT. If so, it was first queued here as a PUT_REQ, sent and
* stashed on ibc_active_txs, matched by an incoming PUT_ACK,
* and then re-queued here. It's (just) possible that
* tx_sending is non-zero if we've not done the tx_complete()
- * from the first send; hence the ++ rather than = below. */
+ * from the first send; hence the ++ rather than = below.
+ */
tx->tx_sending++;
list_add(&tx->tx_list, &conn->ibc_active_txs);
@@ -838,16 +851,25 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq);
+ struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+
+ LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ "bad wr_id %llx, opc %d, flags %d, peer: %s\n",
+ wrq->wr_id, wrq->opcode, wrq->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ wrq = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq);
}
conn->ibc_last_send = jiffies;
- if (rc == 0)
+ if (!rc)
return 0;
- /* NB credits are transferred in the actual
- * message, which can only be the last work item */
+ /*
+ * NB credits are transferred in the actual
+ * message, which can only be the last work item
+ */
conn->ibc_credits += credit;
conn->ibc_outstanding_credits += msg->ibm_credits;
conn->ibc_nsends_posted--;
@@ -858,7 +880,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
tx->tx_waiting = 0;
tx->tx_sending--;
- done = (tx->tx_sending == 0);
+ done = !tx->tx_sending;
if (done)
list_del(&tx->tx_list);
@@ -881,7 +903,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
return -EIO;
}
-void
+static void
kiblnd_check_sends(kib_conn_t *conn)
{
int ver = conn->ibc_version;
@@ -899,13 +921,13 @@ kiblnd_check_sends(kib_conn_t *conn)
LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
- conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
+ conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT(conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
+ kib_tx_t, tx_list);
list_del(&tx->tx_list);
list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
@@ -915,23 +937,21 @@ kiblnd_check_sends(kib_conn_t *conn)
spin_unlock(&conn->ibc_lock);
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx != NULL)
+ if (tx)
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
spin_lock(&conn->ibc_lock);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx_locked(tx, conn);
}
- kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
-
for (;;) {
int credit;
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ kib_tx_t, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT(!IBLND_OOB_CAPABLE(ver));
credit = 1;
@@ -940,17 +960,16 @@ kiblnd_check_sends(kib_conn_t *conn)
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
- } else
+ kib_tx_t, tx_list);
+ } else {
break;
+ }
- if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+ if (kiblnd_post_tx_locked(conn, tx, credit))
break;
}
spin_unlock(&conn->ibc_lock);
-
- kiblnd_conn_decref(conn); /* ...until here */
}
static void
@@ -976,9 +995,10 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
spin_lock(&conn->ibc_lock);
- /* I could be racing with rdma completion. Whoever makes 'tx' idle
- * gets to free it, which also drops its ref on 'conn'. */
-
+ /*
+ * I could be racing with rdma completion. Whoever makes 'tx' idle
+ * gets to free it, which also drops its ref on 'conn'.
+ */
tx->tx_sending--;
conn->ibc_nsends_posted--;
if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
@@ -989,7 +1009,7 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
tx->tx_status = -EIO;
}
- idle = (tx->tx_sending == 0) && /* This is the final callback */
+ idle = !tx->tx_sending && /* This is the final callback */
!tx->tx_waiting && /* Not waiting for peer */
!tx->tx_queued; /* Not re-queued (PUT_DONE) */
if (idle)
@@ -1007,24 +1027,22 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
kiblnd_conn_decref(conn); /* ...until here */
}
-void
+static void
kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
- struct ib_mr *mr;
+ struct ib_mr *mr = hdev->ibh_mrs;
LASSERT(tx->tx_nwrq >= 0);
LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT(nob <= IBLND_MSG_SIZE);
+ LASSERT(mr);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
- LASSERT(mr != NULL);
-
sge->lkey = mr->lkey;
sge->addr = tx->tx_msgaddr;
sge->length = nob;
@@ -1041,25 +1059,23 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
tx->tx_nwrq++;
}
-int
+static int
kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
{
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
int rc = resid;
- int srcidx;
- int dstidx;
+ int srcidx = 0;
+ int dstidx = 0;
int wrknob;
LASSERT(!in_interrupt());
- LASSERT(tx->tx_nwrq == 0);
+ LASSERT(!tx->tx_nwrq);
LASSERT(type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
+ type == IBLND_MSG_PUT_DONE);
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
@@ -1074,10 +1090,10 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
break;
}
- if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
+ if (tx->tx_nwrq >= conn->ibc_max_frags) {
+ CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- IBLND_RDMA_FRAGS(conn->ibc_version),
+ conn->ibc_max_frags,
srcidx, srcrd->rd_nfrags,
dstidx, dstrd->rd_nfrags);
rc = -EMSGSIZE;
@@ -1127,7 +1143,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
return rc;
}
-void
+static void
kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
{
struct list_head *q;
@@ -1137,9 +1153,11 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+ tx->tx_deadline = jiffies +
+ msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
+ MSEC_PER_SEC);
- if (tx->tx_conn == NULL) {
+ if (!tx->tx_conn) {
kiblnd_conn_addref(conn);
tx->tx_conn = conn;
LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
@@ -1180,7 +1198,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
list_add_tail(&tx->tx_list, q);
}
-void
+static void
kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
{
spin_lock(&conn->ibc_lock);
@@ -1200,19 +1218,19 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
/* allow the port to be reused */
rc = rdma_set_reuseaddr(cmid, 1);
- if (rc != 0) {
+ if (rc) {
CERROR("Unable to set reuse on cmid: %d\n", rc);
return rc;
}
/* look for a free privileged port */
- for (port = PROT_SOCK-1; port > 0; port--) {
+ for (port = PROT_SOCK - 1; port > 0; port--) {
srcaddr->sin_port = htons(port);
rc = rdma_resolve_addr(cmid,
(struct sockaddr *)srcaddr,
(struct sockaddr *)dstaddr,
timeout_ms);
- if (rc == 0) {
+ if (!rc) {
CDEBUG(D_NET, "bound to port %hu\n", port);
return 0;
} else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
@@ -1237,8 +1255,9 @@ kiblnd_connect_peer(kib_peer_t *peer)
struct sockaddr_in dstaddr;
int rc;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(peer->ibp_connecting > 0);
+ LASSERT(!peer->ibp_reconnecting);
cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
IB_QPT_RC);
@@ -1271,14 +1290,14 @@ kiblnd_connect_peer(kib_peer_t *peer)
(struct sockaddr *)&dstaddr,
*kiblnd_tunables.kib_timeout * 1000);
}
- if (rc != 0) {
+ if (rc) {
/* Can't initiate address resolution: */
CERROR("Can't resolve addr for %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
goto failed2;
}
- LASSERT(cmid->device != NULL);
+ LASSERT(cmid->device);
CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
&dev->ibd_ifip, cmid->device->name);
@@ -1286,12 +1305,64 @@ kiblnd_connect_peer(kib_peer_t *peer)
return;
failed2:
+ kiblnd_peer_connect_failed(peer, 1, rc);
kiblnd_peer_decref(peer); /* cmid's ref */
rdma_destroy_id(cmid);
+ return;
failed:
kiblnd_peer_connect_failed(peer, 1, rc);
}
+bool
+kiblnd_reconnect_peer(kib_peer_t *peer)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ struct list_head txs;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&txs);
+
+ write_lock_irqsave(glock, flags);
+ if (!peer->ibp_reconnecting) {
+ if (peer->ibp_accepting)
+ reason = "accepting";
+ else if (peer->ibp_connecting)
+ reason = "connecting";
+ else if (!list_empty(&peer->ibp_conns))
+ reason = "connected";
+ else /* connected then closed */
+ reason = "closed";
+
+ goto no_reconnect;
+ }
+
+ LASSERT(!peer->ibp_accepting && !peer->ibp_connecting &&
+ list_empty(&peer->ibp_conns));
+ peer->ibp_reconnecting = 0;
+
+ if (!kiblnd_peer_active(peer)) {
+ list_splice_init(&peer->ibp_tx_queue, &txs);
+ reason = "unlinked";
+ goto no_reconnect;
+ }
+
+ peer->ibp_connecting++;
+ peer->ibp_reconnected++;
+ write_unlock_irqrestore(glock, flags);
+
+ kiblnd_connect_peer(peer);
+ return true;
+
+no_reconnect:
+ write_unlock_irqrestore(glock, flags);
+
+ CWARN("Abort reconnection of %s: %s\n",
+ libcfs_nid2str(peer->ibp_nid), reason);
+ kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED);
+ return false;
+}
+
void
kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
{
@@ -1302,25 +1373,28 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
unsigned long flags;
int rc;
- /* If I get here, I've committed to send, so I complete the tx with
- * failure on any problems */
-
- LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
- LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
+ /*
+ * If I get here, I've committed to send, so I complete the tx with
+ * failure on any problems
+ */
+ LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */
+ LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */
- /* First time, just use a read lock since I expect to find my peer
- * connected */
+ /*
+ * First time, just use a read lock since I expect to find my peer
+ * connected
+ */
read_lock_irqsave(g_lock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+ if (peer && !list_empty(&peer->ibp_conns)) {
/* Found a peer with an established connection */
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
read_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
return;
@@ -1331,14 +1405,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_lock(g_lock);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
+ if (peer) {
if (list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0);
- if (tx != NULL)
+ LASSERT(kiblnd_peer_connecting(peer));
+ if (tx)
list_add_tail(&tx->tx_list,
- &peer->ibp_tx_queue);
+ &peer->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer);
@@ -1346,7 +1419,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
}
@@ -1357,9 +1430,9 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
/* Allocate a peer ready to add to the peer table and retry */
rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
- if (tx != NULL) {
+ if (tx) {
tx->tx_status = -EHOSTUNREACH;
tx->tx_waiting = 0;
kiblnd_tx_done(ni, tx);
@@ -1370,14 +1443,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
+ if (peer2) {
if (list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer2->ibp_connecting != 0 ||
- peer2->ibp_accepting != 0);
- if (tx != NULL)
+ LASSERT(kiblnd_peer_connecting(peer2));
+ if (tx)
list_add_tail(&tx->tx_list,
- &peer2->ibp_tx_queue);
+ &peer2->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer2);
@@ -1385,7 +1457,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
}
@@ -1395,13 +1467,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
}
/* Brand new peer */
- LASSERT(peer->ibp_connecting == 0);
+ LASSERT(!peer->ibp_connecting);
peer->ibp_connecting = 1;
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+ LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
- if (tx != NULL)
+ if (tx)
list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
kiblnd_peer_addref(peer);
@@ -1437,13 +1509,13 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT(payload_nob == 0 || payload_niov > 0);
+ LASSERT(!payload_nob || payload_niov > 0);
LASSERT(payload_niov <= LNET_MAX_IOV);
/* Thread context */
LASSERT(!in_interrupt());
/* payload is either all vaddrs or all pages */
- LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT(!(payload_kiov && payload_iov));
switch (type) {
default:
@@ -1451,7 +1523,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return -EIO;
case LNET_MSG_ACK:
- LASSERT(payload_nob == 0);
+ LASSERT(!payload_nob);
break;
case LNET_MSG_GET:
@@ -1464,7 +1536,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate txd for GET to %s\n",
libcfs_nid2str(target.nid));
return -ENOMEM;
@@ -1472,7 +1544,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+ if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV))
rc = kiblnd_setup_rd_iov(ni, tx, rd,
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.iov,
@@ -1482,7 +1554,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.kiov,
0, lntmsg->msg_md->md_length);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup GET sink for %s: %d\n",
libcfs_nid2str(target.nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1496,7 +1568,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
- if (tx->tx_lntmsg[1] == NULL) {
+ if (!tx->tx_lntmsg[1]) {
CERROR("Can't create reply for GET -> %s\n",
libcfs_nid2str(target.nid));
kiblnd_tx_done(ni, tx);
@@ -1516,14 +1588,14 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate %s txd for %s\n",
type == LNET_MSG_PUT ? "PUT" : "REPLY",
libcfs_nid2str(target.nid));
return -ENOMEM;
}
- if (payload_kiov == NULL)
+ if (!payload_kiov)
rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
payload_niov, payload_iov,
payload_offset, payload_nob);
@@ -1531,7 +1603,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
payload_niov, payload_kiov,
payload_offset, payload_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup PUT src for %s: %d\n",
libcfs_nid2str(target.nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1555,16 +1627,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
<= IBLND_MSG_SIZE);
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
+ type, libcfs_nid2str(target.nid));
return -ENOMEM;
}
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- if (payload_kiov != NULL)
+ if (payload_kiov)
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
payload_niov, payload_kiov,
@@ -1596,22 +1668,22 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't get tx for REPLY to %s\n",
libcfs_nid2str(target.nid));
goto failed_0;
}
- if (nob == 0)
+ if (!nob)
rc = 0;
- else if (kiov == NULL)
+ else if (!kiov)
rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
niov, iov, offset, nob);
else
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
niov, kiov, offset, nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup GET src for %s: %d\n",
libcfs_nid2str(target.nid), rc);
goto failed_1;
@@ -1627,12 +1699,11 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
goto failed_1;
}
- if (nob == 0) {
+ if (!nob) {
/* No RDMA: local completion may happen now! */
lnet_finalize(ni, lntmsg, 0);
} else {
- /* RDMA: lnet_finalize(lntmsg) when it
- * completes */
+ /* RDMA: lnet_finalize(lntmsg) when it completes */
tx->tx_lntmsg[0] = lntmsg;
}
@@ -1647,8 +1718,8 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
int
kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kib_rx_t *rx = private;
kib_msg_t *rxmsg = rx->rx_msg;
@@ -1661,7 +1732,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
LASSERT(mlen <= rlen);
LASSERT(!in_interrupt());
/* Either all pages or all vaddrs */
- LASSERT(!(kiov != NULL && iov != NULL));
+ LASSERT(!(kiov && iov));
switch (rxmsg->ibm_type) {
default:
@@ -1671,13 +1742,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
if (nob > rx->rx_nob) {
CERROR("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
+ libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+ nob, rx->rx_nob);
rc = -EPROTO;
break;
}
- if (kiov != NULL)
+ if (kiov)
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
@@ -1694,7 +1765,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
kib_msg_t *txmsg;
kib_rdma_desc_t *rd;
- if (mlen == 0) {
+ if (!mlen) {
lnet_finalize(ni, lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
rxmsg->ibm_u.putreq.ibprm_cookie);
@@ -1702,7 +1773,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
}
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate tx for %s\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
/* Not replying will break the connection */
@@ -1712,13 +1783,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (kiov == NULL)
+ if (!kiov)
rc = kiblnd_setup_rd_iov(ni, tx, rd,
niov, iov, offset, mlen);
else
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
niov, kiov, offset, mlen);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1744,7 +1815,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
}
case IBLND_MSG_GET_REQ:
- if (lntmsg != NULL) {
+ if (lntmsg) {
/* Optimized GET; RDMA lntmsg's payload */
kiblnd_reply(ni, rx, lntmsg);
} else {
@@ -1778,7 +1849,7 @@ kiblnd_thread_fini(void)
atomic_dec(&kiblnd_data.kib_nthreads);
}
-void
+static void
kiblnd_peer_alive(kib_peer_t *peer)
{
/* This is racy, but everyone's only writing cfs_time_current() */
@@ -1795,10 +1866,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (list_empty(&peer->ibp_conns) &&
- peer->ibp_accepting == 0 &&
- peer->ibp_connecting == 0 &&
- peer->ibp_error != 0) {
+ if (kiblnd_peer_idle(peer) && peer->ibp_error) {
error = peer->ibp_error;
peer->ibp_error = 0;
@@ -1807,7 +1875,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (error != 0)
+ if (error)
lnet_notify(peer->ibp_ni,
peer->ibp_nid, 0, last_alive);
}
@@ -1815,25 +1883,27 @@ kiblnd_peer_notify(kib_peer_t *peer)
void
kiblnd_close_conn_locked(kib_conn_t *conn, int error)
{
- /* This just does the immediate housekeeping. 'error' is zero for a
+ /*
+ * This just does the immediate housekeeping. 'error' is zero for a
* normal shutdown which can happen only after the connection has been
* established. If the connection is established, schedule the
- * connection to be finished off by the connd. Otherwise the connd is
+ * connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
- * Caller holds kib_global_lock exclusively in irq context */
+ * Caller holds kib_global_lock exclusively in irq context
+ */
kib_peer_t *peer = conn->ibc_peer;
kib_dev_t *dev;
unsigned long flags;
- LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- if (error != 0 && conn->ibc_comms_error == 0)
+ if (error && !conn->ibc_comms_error)
conn->ibc_comms_error = error;
if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
return; /* already being handled */
- if (error == 0 &&
+ if (!error &&
list_empty(&conn->ibc_tx_noops) &&
list_empty(&conn->ibc_tx_queue) &&
list_empty(&conn->ibc_tx_queue_rsrvd) &&
@@ -1843,12 +1913,12 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
libcfs_nid2str(peer->ibp_nid));
} else {
CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ libcfs_nid2str(peer->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -1865,7 +1935,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- if (error != 0 &&
+ if (error &&
kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
@@ -1929,8 +1999,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting ||
- tx->tx_sending != 0);
+ LASSERT(tx->tx_waiting || tx->tx_sending);
} else {
LASSERT(tx->tx_queued);
}
@@ -1938,7 +2007,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
tx->tx_status = -ECONNABORTED;
tx->tx_waiting = 0;
- if (tx->tx_sending == 0) {
+ if (!tx->tx_sending) {
tx->tx_queued = 0;
list_del(&tx->tx_list);
list_add(&tx->tx_list, &zombies);
@@ -1958,14 +2027,17 @@ kiblnd_finalise_conn(kib_conn_t *conn)
kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ /*
+ * abort_receives moves QP state to IB_QPS_ERR. This is only required
* for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
+ * rdma_disconnect() does this for free.
+ */
kiblnd_abort_receives(conn);
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
-
+ /*
+ * Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state
+ */
kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
@@ -1975,13 +2047,13 @@ kiblnd_finalise_conn(kib_conn_t *conn)
kiblnd_handle_early_rxs(conn);
}
-void
+static void
kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
{
LIST_HEAD(zombies);
unsigned long flags;
- LASSERT(error != 0);
+ LASSERT(error);
LASSERT(!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1994,14 +2066,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
peer->ibp_accepting--;
}
- if (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0) {
+ if (kiblnd_peer_connecting(peer)) {
/* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ flags);
return;
}
+ peer->ibp_reconnected = 0;
if (list_empty(&peer->ibp_conns)) {
/* Take peer's blocked transmits to complete with error */
list_add(&zombies, &peer->ibp_tx_queue);
@@ -2029,7 +2101,7 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
}
-void
+static void
kiblnd_connreq_done(kib_conn_t *conn, int status)
{
kib_peer_t *peer = conn->ibc_peer;
@@ -2047,14 +2119,14 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
LASSERT(!in_interrupt());
LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
+ peer->ibp_connecting > 0) ||
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
+ peer->ibp_accepting > 0));
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
conn->ibc_connvars = NULL;
- if (status != 0) {
+ if (status) {
/* failed to establish connection */
kiblnd_peer_connect_failed(peer, active, status);
kiblnd_finalise_conn(conn);
@@ -2068,16 +2140,19 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer);
- /* Add conn to peer's list and nuke any dangling conns from a different
- * peer instance... */
+ /*
+ * Add conn to peer's list and nuke any dangling conns from a different
+ * peer instance...
+ */
kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
list_add(&conn->ibc_list, &peer->ibp_conns);
+ peer->ibp_reconnected = 0;
if (active)
peer->ibp_connecting--;
else
peer->ibp_accepting--;
- if (peer->ibp_version == 0) {
+ if (!peer->ibp_version) {
peer->ibp_version = conn->ibc_version;
peer->ibp_incarnation = conn->ibc_incarnation;
}
@@ -2095,7 +2170,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
list_del_init(&peer->ibp_tx_queue);
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
- conn->ibc_comms_error != 0) { /* error has happened already */
+ conn->ibc_comms_error) { /* error has happened already */
lnet_ni_t *ni = peer->ibp_ni;
/* start to shut down connection */
@@ -2107,6 +2182,16 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
return;
}
+ /**
+ * refcount taken by cmid is not reliable after I released the glock
+ * because this connection is visible to other threads now, another
+ * thread can find and close this connection right after I released
+ * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is
+ * called, it can release the connection refcount taken by cmid.
+ * It means the connection could be destroyed before I finish my
+ * operations on it.
+ */
+ kiblnd_conn_addref(conn);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* Schedule blocked txs */
@@ -2122,6 +2207,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
/* schedule blocked rxs */
kiblnd_handle_early_rxs(conn);
+
+ kiblnd_conn_decref(conn);
}
static void
@@ -2131,7 +2218,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
rc = rdma_reject(cmid, rej, sizeof(*rej));
- if (rc != 0)
+ if (rc)
CWARN("Error %d sending reject\n", rc);
}
@@ -2159,14 +2246,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
/* cmid inherits 'context' from the corresponding listener id */
ibdev = (kib_dev_t *)cmid->context;
- LASSERT(ibdev != NULL);
+ LASSERT(ibdev);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
rej.ibr_why = IBLND_REJECT_FATAL;
rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
- peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr;
if (*kiblnd_tunables.kib_require_priv_port &&
ntohs(peer_addr->sin_port) >= PROT_SOCK) {
__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
@@ -2181,12 +2268,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- /* Future protocol version compatibility support! If the
+ /*
+ * Future protocol version compatibility support! If the
* o2iblnd-specific protocol changes, or when LNET unifies
* protocols over all LNDs, the initial connection will
* negotiate a protocol version. I trap this here to avoid
* console errors; the reject tells the peer which protocol I
- * speak. */
+ * speak.
+ */
if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
goto failed;
@@ -2200,7 +2289,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
rc = kiblnd_unpack_msg(reqmsg, priv_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't parse connection request: %d\n", rc);
goto failed;
}
@@ -2208,17 +2297,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
nid = reqmsg->ibm_srcnid;
ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
- if (ni != NULL) {
+ if (ni) {
net = (kib_net_t *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
}
- if (ni == NULL || /* no matching net */
+ if (!ni || /* no matching net */
ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
libcfs_nid2str(nid),
- ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+ !ni ? "NA" : libcfs_nid2str(ni->ni_nid),
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
@@ -2227,7 +2316,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
/* check time stamp as soon as possible */
- if (reqmsg->ibm_dststamp != 0 &&
+ if (reqmsg->ibm_dststamp &&
reqmsg->ibm_dststamp != net->ibn_incarnation) {
CWARN("Stale connection request\n");
rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2243,10 +2332,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
+ if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
IBLND_MSG_QUEUE_SIZE(version)) {
- CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
- libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+ CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
+ libcfs_nid2str(nid),
+ reqmsg->ibm_u.connparams.ibcp_queue_depth,
IBLND_MSG_QUEUE_SIZE(version));
if (version == IBLND_MSG_VERSION)
@@ -2255,18 +2345,28 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
+ if (reqmsg->ibm_u.connparams.ibcp_max_frags >
IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(version));
- if (version == IBLND_MSG_VERSION)
+ if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
+ } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
+ IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) {
+ CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(version));
+
+ if (version >= IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+ goto failed;
}
if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
@@ -2279,17 +2379,21 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
/* assume 'nid' is a new peer; create */
rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
goto failed;
}
+ /* We have validated the peer's parameters so use those */
+ peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
+ peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
+
write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
- if (peer2->ibp_version == 0) {
+ if (peer2) {
+ if (!peer2->ibp_version) {
peer2->ibp_version = version;
peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
}
@@ -2298,10 +2402,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+
+ if (kiblnd_peer_active(peer2)) {
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ peer2->ibp_version = version;
+ }
write_unlock_irqrestore(g_lock, flags);
- CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
- libcfs_nid2str(nid), peer2->ibp_version, version);
+ CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
+ libcfs_nid2str(nid), peer2->ibp_version, version,
+ peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2309,7 +2419,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
/* tie-break connection race in favour of the higher NID */
- if (peer2->ibp_connecting != 0 &&
+ if (peer2->ibp_connecting &&
nid < ni->ni_nid) {
write_unlock_irqrestore(g_lock, flags);
@@ -2320,24 +2430,37 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
+ /**
+ * passive connection is allowed even this peer is waiting for
+ * reconnection.
+ */
+ peer2->ibp_reconnecting = 0;
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
+ /**
+ * Race with kiblnd_launch_tx (active connect) to create peer
+ * so copy validated parameters since we now know what the
+ * peer's limits are
+ */
+ peer2->ibp_max_frags = peer->ibp_max_frags;
+ peer2->ibp_queue_depth = peer->ibp_queue_depth;
+
write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer);
peer = peer2;
} else {
/* Brand new peer */
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(peer->ibp_version == 0 &&
- peer->ibp_incarnation == 0);
+ LASSERT(!peer->ibp_accepting);
+ LASSERT(!peer->ibp_version &&
+ !peer->ibp_incarnation);
peer->ibp_accepting = 1;
peer->ibp_version = version;
peer->ibp_incarnation = reqmsg->ibm_srcstamp;
/* I have a ref on ni that prevents it being shutdown */
- LASSERT(net->ibn_shutdown == 0);
+ LASSERT(!net->ibn_shutdown);
kiblnd_peer_addref(peer);
list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
@@ -2345,31 +2468,33 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
write_unlock_irqrestore(g_lock, flags);
}
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
- if (conn == NULL) {
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT,
+ version);
+ if (!conn) {
kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
goto failed;
}
- /* conn now "owns" cmid, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. */
-
+ /*
+ * conn now "owns" cmid, so I return success from here on to ensure the
+ * CM callback doesn't destroy cmid.
+ */
conn->ibc_incarnation = reqmsg->ibm_srcstamp;
- conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
- <= IBLND_RX_MSGS(version));
+ conn->ibc_credits = conn->ibc_queue_depth;
+ conn->ibc_reserved_credits = conn->ibc_queue_depth;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
ackmsg = &conn->ibc_connvars->cv_msg;
memset(ackmsg, 0, sizeof(*ackmsg));
kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
sizeof(ackmsg->ibm_u.connparams));
- ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+ ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
- ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
@@ -2385,7 +2510,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
rc = rdma_accept(cmid, &cp);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
rej.ibr_version = version;
rej.ibr_why = IBLND_REJECT_FATAL;
@@ -2399,7 +2524,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
return 0;
failed:
- if (ni != NULL)
+ if (ni)
lnet_ni_decref(ni);
rej.ibr_version = version;
@@ -2411,45 +2536,82 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
static void
-kiblnd_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(kib_conn_t *conn, int version,
+ __u64 incarnation, int why, kib_connparams_t *cp)
{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_peer_t *peer = conn->ibc_peer;
char *reason;
- int retry = 0;
+ int msg_size = IBLND_MSG_SIZE;
+ int frag_num = -1;
+ int queue_dep = -1;
+ bool reconnect;
unsigned long flags;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
+ LASSERT(!peer->ibp_reconnecting);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (cp) {
+ msg_size = cp->ibcp_max_msg_size;
+ frag_num = cp->ibcp_max_frags;
+ queue_dep = cp->ibcp_queue_depth;
+ }
- /* retry connection if it's still needed and no other connection
+ write_lock_irqsave(glock, flags);
+ /**
+ * retry connection if it's still needed and no other connection
* attempts (active or passive) are in progress
* NB: reconnect is still needed even when ibp_tx_queue is
* empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query() */
- if ((!list_empty(&peer->ibp_tx_queue) ||
- peer->ibp_version != version) &&
- peer->ibp_connecting == 1 &&
- peer->ibp_accepting == 0) {
- retry = 1;
- peer->ibp_connecting++;
-
- peer->ibp_version = version;
- peer->ibp_incarnation = incarnation;
+ * initiated by kiblnd_query()
+ */
+ reconnect = (!list_empty(&peer->ibp_tx_queue) ||
+ peer->ibp_version != version) &&
+ peer->ibp_connecting == 1 &&
+ !peer->ibp_accepting;
+ if (!reconnect) {
+ reason = "no need";
+ goto out;
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (!retry)
- return;
-
switch (why) {
default:
reason = "Unknown";
break;
+ case IBLND_REJECT_RDMA_FRAGS:
+ if (!cp) {
+ reason = "can't negotiate max frags";
+ goto out;
+ }
+ if (!*kiblnd_tunables.kib_map_on_demand) {
+ reason = "map_on_demand must be enabled";
+ goto out;
+ }
+ if (conn->ibc_max_frags <= frag_num) {
+ reason = "unsupported max frags";
+ goto out;
+ }
+
+ peer->ibp_max_frags = frag_num;
+ reason = "rdma fragments";
+ break;
+
+ case IBLND_REJECT_MSG_QUEUE_SIZE:
+ if (!cp) {
+ reason = "can't negotiate queue depth";
+ goto out;
+ }
+ if (conn->ibc_queue_depth <= queue_dep) {
+ reason = "unsupported queue depth";
+ goto out;
+ }
+
+ peer->ibp_queue_depth = queue_dep;
+ reason = "queue depth";
+ break;
+
case IBLND_REJECT_CONN_STALE:
reason = "stale";
break;
@@ -2463,14 +2625,24 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
break;
}
- CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- reason, IBLND_MSG_VERSION, version,
- cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
- cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
- cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
+ conn->ibc_reconnect = 1;
+ peer->ibp_reconnecting = 1;
+ peer->ibp_version = version;
+ if (incarnation)
+ peer->ibp_incarnation = incarnation;
+out:
+ write_unlock_irqrestore(glock, flags);
- kiblnd_connect_peer(peer);
+ CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ reconnect ? "reconnect" : "don't reconnect",
+ reason, IBLND_MSG_VERSION, version, msg_size,
+ conn->ibc_queue_depth, queue_dep,
+ conn->ibc_max_frags, frag_num);
+ /**
+ * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
+ * while destroying the zombie
+ */
}
static void
@@ -2483,8 +2655,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
switch (reason) {
case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
+ kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
break;
case IB_CM_REJ_INVALID_SERVICE_ID:
@@ -2521,9 +2693,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
if (priv_nob >= sizeof(kib_rej_t) &&
rej->ibr_version > IBLND_MSG_VERSION_1) {
- /* priv_nob is always 148 in current version
+ /*
+ * priv_nob is always 148 in current version
* of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
+ * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+ */
cp = &rej->ibr_cp;
if (flip) {
@@ -2564,24 +2738,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
case IBLND_REJECT_CONN_RACE:
case IBLND_REJECT_CONN_STALE:
case IBLND_REJECT_CONN_UNCOMPAT:
- kiblnd_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
case IBLND_REJECT_MSG_QUEUE_SIZE:
- CERROR("%s rejected: incompatible message queue depth %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_queue_depth :
- IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
- break;
-
case IBLND_REJECT_RDMA_FRAGS:
- CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_max_frags :
- IBLND_RDMA_FRAGS(rej->ibr_version),
- IBLND_RDMA_FRAGS(conn->ibc_version));
+ kiblnd_check_reconnect(conn, rej->ibr_version,
+ incarnation,
+ rej->ibr_why, cp);
break;
case IBLND_REJECT_NO_RESOURCES:
@@ -2623,9 +2784,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
- LASSERT(net != NULL);
+ LASSERT(net);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't unpack connack from %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
goto failed;
@@ -2645,22 +2806,22 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(ver)) {
- CERROR("%s has incompatible queue depth %d(%d wanted)\n",
+ if (msg->ibm_u.connparams.ibcp_queue_depth >
+ conn->ibc_queue_depth) {
+ CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
libcfs_nid2str(peer->ibp_nid),
msg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(ver));
+ conn->ibc_queue_depth);
rc = -EPROTO;
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(ver)) {
- CERROR("%s has incompatible max_frags %d (%d wanted)\n",
+ if (msg->ibm_u.connparams.ibcp_max_frags >
+ conn->ibc_max_frags) {
+ CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
libcfs_nid2str(peer->ibp_nid),
msg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(ver));
+ conn->ibc_max_frags);
rc = -EPROTO;
goto failed;
}
@@ -2682,7 +2843,7 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
rc = -ESTALE;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (rc != 0) {
+ if (rc) {
CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
libcfs_nid2str(peer->ibp_nid), rc,
msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
@@ -2690,21 +2851,24 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
}
conn->ibc_incarnation = msg->ibm_srcstamp;
- conn->ibc_credits =
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
- <= IBLND_RX_MSGS(ver));
+ conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
kiblnd_connreq_done(conn, 0);
return;
failed:
- /* NB My QP has already established itself, so I handle anything going
+ /*
+ * NB My QP has already established itself, so I handle anything going
* wrong here by setting ibc_comms_error.
* kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
- * immediately tears it down. */
-
- LASSERT(rc != 0);
+ * immediately tears it down.
+ */
+ LASSERT(rc);
conn->ibc_comms_error = rc;
kiblnd_connreq_done(conn, 0);
}
@@ -2724,28 +2888,30 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
incarnation = peer->ibp_incarnation;
- version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
- peer->ibp_version;
+ version = !peer->ibp_version ? IBLND_MSG_VERSION :
+ peer->ibp_version;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
- if (conn == NULL) {
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
+ version);
+ if (!conn) {
kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
kiblnd_peer_decref(peer); /* lose cmid's ref */
return -ENOMEM;
}
- /* conn "owns" cmid now, so I return success from here on to ensure the
+ /*
+ * conn "owns" cmid now, so I return success from here on to ensure the
* CM callback doesn't destroy cmid. conn also takes over cmid's ref
- * on peer */
-
+ * on peer
+ */
msg = &conn->ibc_connvars->cv_msg;
memset(msg, 0, sizeof(*msg));
kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
- msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(peer->ibp_ni, msg, version,
@@ -2764,7 +2930,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
LASSERT(conn->ibc_cmid == cmid);
rc = rdma_connect(cmid, &cp);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't connect to %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
kiblnd_connreq_done(conn, rc);
@@ -2798,10 +2964,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_ERROR:
peer = (kib_peer_t *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+ return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
peer = (kib_peer_t *)cmid->context;
@@ -2809,14 +2975,14 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
CDEBUG(D_NET, "%s Addr resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
- if (event->status != 0) {
+ if (event->status) {
CNETERR("Can't resolve address for %s: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
rc = event->status;
} else {
rc = rdma_resolve_route(
cmid, *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0)
+ if (!rc)
return 0;
/* Can't initiate route resolution */
CERROR("Can't resolve route for %s: %d\n",
@@ -2824,7 +2990,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
}
kiblnd_peer_connect_failed(peer, 1, rc);
kiblnd_peer_decref(peer);
- return rc; /* rc != 0 destroys cmid */
+ return rc; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
peer = (kib_peer_t *)cmid->context;
@@ -2832,28 +2998,28 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+ return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
peer = (kib_peer_t *)cmid->context;
CDEBUG(D_NET, "%s Route resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
- if (event->status == 0)
+ if (!event->status)
return kiblnd_active_connect(cmid);
CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, event->status);
kiblnd_peer_decref(peer);
- return event->status; /* rc != 0 destroys cmid */
+ return event->status; /* rc destroys cmid */
case RDMA_CM_EVENT_UNREACHABLE:
conn = (kib_conn_t *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENETDOWN);
kiblnd_conn_decref(conn);
return 0;
@@ -2876,8 +3042,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case IBLND_CONN_PASSIVE_WAIT:
CERROR("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status);
kiblnd_connreq_done(conn, -ECONNRESET);
break;
@@ -2933,8 +3099,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
LCONSOLE_ERROR_MSG(0x131,
"Received notification of device removal\n"
"Please shutdown LNET to allow this to proceed\n");
- /* Can't remove network from underneath LNET for now, so I have
- * to ignore this */
+ /*
+ * Can't remove network from underneath LNET for now, so I have
+ * to ignore this
+ */
return 0;
case RDMA_CM_EVENT_ADDR_CHANGE:
@@ -2956,7 +3124,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
LASSERT(tx->tx_queued);
} else {
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+ LASSERT(tx->tx_waiting || tx->tx_sending);
}
if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
@@ -2989,13 +3157,16 @@ kiblnd_check_conns(int idx)
struct list_head *ptmp;
kib_peer_t *peer;
kib_conn_t *conn;
+ kib_conn_t *temp;
kib_conn_t *tmp;
struct list_head *ctmp;
unsigned long flags;
- /* NB. We expect to have a look at all the peers and not find any
+ /*
+ * NB. We expect to have a look at all the peers and not find any
* RDMAs to time out, so we just use a shared lock while we
- * take a look... */
+ * take a look...
+ */
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
list_for_each(ptmp, peers) {
@@ -3028,8 +3199,7 @@ kiblnd_check_conns(int idx)
conn->ibc_reserved_credits);
list_add(&conn->ibc_connd_list, &closes);
} else {
- list_add(&conn->ibc_connd_list,
- &checksends);
+ list_add(&conn->ibc_connd_list, &checksends);
}
/* +ref for 'closes' or 'checksends' */
kiblnd_conn_addref(conn);
@@ -3040,21 +3210,23 @@ kiblnd_check_conns(int idx)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* Handle timeout by closing the whole
+ /*
+ * Handle timeout by closing the whole
* connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified. */
+ * has ceased once the QP has been modified.
+ */
list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
}
- /* In case we have enough credits to return via a
+ /*
+ * In case we have enough credits to return via a
* NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
+ * free to do it last time...
+ */
+ list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) {
list_del(&conn->ibc_connd_list);
kiblnd_check_sends(conn);
kiblnd_conn_decref(conn);
@@ -3074,9 +3246,21 @@ kiblnd_disconnect_conn(kib_conn_t *conn)
kiblnd_peer_notify(conn->ibc_peer);
}
+/**
+ * High-water for reconnection to the same peer, reconnection attempt should
+ * be delayed after trying more than KIB_RECONN_HIGH_RACE.
+ */
+#define KIB_RECONN_HIGH_RACE 10
+/**
+ * Allow connd to take a break and handle other things after consecutive
+ * reconnection attemps.
+ */
+#define KIB_RECONN_BREAK 100
+
int
kiblnd_connd(void *arg)
{
+ spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
kib_conn_t *conn;
@@ -3091,39 +3275,79 @@ kiblnd_connd(void *arg)
init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
while (!kiblnd_data.kib_shutdown) {
+ int reconn = 0;
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ kib_peer_t *peer = NULL;
+
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
+ kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
+ if (conn->ibc_reconnect) {
+ peer = conn->ibc_peer;
+ kiblnd_peer_addref(peer);
+ }
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, !peer);
+
+ spin_lock_irqsave(lock, flags);
+ if (!peer)
+ continue;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_list);
+ else
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_wait);
}
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
kiblnd_disconnect_conn(conn);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
+ }
+
+ while (reconn < KIB_RECONN_BREAK) {
+ if (kiblnd_data.kib_reconn_sec !=
+ ktime_get_real_seconds()) {
+ kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
+ list_splice_init(&kiblnd_data.kib_reconn_wait,
+ &kiblnd_data.kib_reconn_list);
+ }
+
+ if (list_empty(&kiblnd_data.kib_reconn_list))
+ break;
+
+ conn = list_entry(kiblnd_data.kib_reconn_list.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+ reconn += kiblnd_reconnect_peer(conn->ibc_peer);
+ kiblnd_peer_decref(conn->ibc_peer);
+ LIBCFS_FREE(conn, sizeof(*conn));
+
+ spin_lock_irqsave(lock, flags);
}
/* careful with the jiffy wrap... */
@@ -3133,21 +3357,22 @@ kiblnd_connd(void *arg)
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- /* Time to check for RDMA timeouts on a few more
+ /*
+ * Time to check for RDMA timeouts on a few more
* peers: I do checks every 'p' seconds on a
* proportion of the peer table and I need to check
* every connection 'n' times within a timeout
* interval, to ensure I detect a timeout on any
* connection within (n+1)/n times the timeout
- * interval. */
-
+ * interval.
+ */
if (*kiblnd_tunables.kib_timeout > n * p)
chunk = (chunk * n * p) /
*kiblnd_tunables.kib_timeout;
- if (chunk == 0)
+ if (!chunk)
chunk = 1;
for (i = 0; i < chunk; i++) {
@@ -3156,8 +3381,8 @@ kiblnd_connd(void *arg)
kiblnd_data.kib_peer_hash_size;
}
- deadline += p * HZ;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ spin_lock_irqsave(lock, flags);
}
if (dropped_lock)
@@ -3166,15 +3391,15 @@ kiblnd_connd(void *arg)
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
schedule_timeout(timeout);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
}
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
kiblnd_thread_fini();
return 0;
@@ -3206,12 +3431,14 @@ kiblnd_complete(struct ib_wc *wc)
LBUG();
case IBLND_WID_RDMA:
- /* We only get RDMA completion notification if it fails. All
+ /*
+ * We only get RDMA completion notification if it fails. All
* subsequent work items, including the final SEND will fail
* too. However we can't print out any more info about the
* failing RDMA because 'tx' might be back on the idle list or
* even reused already if we didn't manage to post all our work
- * items */
+ * items
+ */
CNETERR("RDMA (tx: %p) failed: %d\n",
kiblnd_wreqid2ptr(wc->wr_id), wc->status);
return;
@@ -3230,11 +3457,13 @@ kiblnd_complete(struct ib_wc *wc)
void
kiblnd_cq_completion(struct ib_cq *cq, void *arg)
{
- /* NB I'm not allowed to schedule this conn once its refcount has
+ /*
+ * NB I'm not allowed to schedule this conn once its refcount has
* reached 0. Since fundamentally I'm racing with scheduler threads
* consuming my CQ I could be called after all completions have
- * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
- * and this CQ is about to be destroyed so I NOOP. */
+ * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
+ * and this CQ is about to be destroyed so I NOOP.
+ */
kib_conn_t *conn = arg;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -3288,7 +3517,7 @@ kiblnd_scheduler(void *arg)
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
sched->ibs_cpt);
}
@@ -3308,8 +3537,8 @@ kiblnd_scheduler(void *arg)
did_something = 0;
if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- kib_conn_t, ibc_sched_list);
+ conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+ ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
@@ -3317,8 +3546,10 @@ kiblnd_scheduler(void *arg)
spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ wc.wr_id = IBLND_WID_INVAL;
+
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (rc == 0) {
+ if (!rc) {
rc = ib_req_notify_cq(conn->ibc_cq,
IB_CQ_NEXT_COMP);
if (rc < 0) {
@@ -3327,13 +3558,22 @@ kiblnd_scheduler(void *arg)
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock,
- flags);
+ flags);
continue;
}
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
}
+ if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
+ LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n",
+ rc, wc.opcode, wc.status,
+ wc.vendor_err,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ conn->ibc_state);
+ rc = -EINVAL;
+ }
+
if (rc < 0) {
CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
@@ -3346,21 +3586,23 @@ kiblnd_scheduler(void *arg)
spin_lock_irqsave(&sched->ibs_lock, flags);
- if (rc != 0 || conn->ibc_ready) {
- /* There may be another completion waiting; get
+ if (rc || conn->ibc_ready) {
+ /*
+ * There may be another completion waiting; get
* another scheduler to check while I handle
- * this one... */
+ * this one...
+ */
/* +1 ref for sched_conns */
kiblnd_conn_addref(conn);
list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
+ &sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
} else {
conn->ibc_scheduled = 0;
}
- if (rc != 0) {
+ if (rc) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
kiblnd_complete(&wc);
@@ -3400,7 +3642,7 @@ kiblnd_failover_thread(void *arg)
unsigned long flags;
int rc;
- LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT(*kiblnd_tunables.kib_dev_failover);
cfs_block_allsigs();
@@ -3459,13 +3701,15 @@ kiblnd_failover_thread(void *arg)
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
- if (!long_sleep || rc != 0)
+ if (!long_sleep || rc)
continue;
- /* have a long sleep, routine check all active devices,
+ /*
+ * have a long sleep, routine check all active devices,
* we need checking like this because if there is not active
* connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover */
+ * on wrong HCA for ever while there is a bonding failover
+ */
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 1d4e7efb53d4..b4607dad3712 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -52,8 +52,10 @@ static int timeout = 50;
module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "timeout (seconds)");
-/* Number of threads in each scheduler pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's set to zero. */
+/*
+ * Number of threads in each scheduler pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's set to zero.
+ */
static int nscheds;
module_param(nscheds, int, 0444);
MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
@@ -200,7 +202,7 @@ kiblnd_tunables_init(void)
if (*kiblnd_tunables.kib_map_on_demand == 1)
*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
- if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+ if (!*kiblnd_tunables.kib_concurrent_sends) {
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
*kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 05aa90ea597a..cca7b2f7f1a7 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -70,7 +70,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
ksock_route_t *route;
LIBCFS_ALLOC(route, sizeof(*route));
- if (route == NULL)
+ if (!route)
return NULL;
atomic_set(&route->ksnr_refcount, 1);
@@ -91,9 +91,9 @@ ksocknal_create_route(__u32 ipaddr, int port)
void
ksocknal_destroy_route(ksock_route_t *route)
{
- LASSERT(atomic_read(&route->ksnr_refcount) == 0);
+ LASSERT(!atomic_read(&route->ksnr_refcount));
- if (route->ksnr_peer != NULL)
+ if (route->ksnr_peer)
ksocknal_peer_decref(route->ksnr_peer);
LIBCFS_FREE(route, sizeof(*route));
@@ -102,6 +102,7 @@ ksocknal_destroy_route(ksock_route_t *route)
static int
ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
+ int cpt = lnet_cpt_of_nid(id.nid);
ksock_net_t *net = ni->ni_data;
ksock_peer_t *peer;
@@ -109,8 +110,8 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
- LIBCFS_ALLOC(peer, sizeof(*peer));
- if (peer == NULL)
+ LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ if (!peer)
return -ENOMEM;
peer->ksnp_ni = ni;
@@ -152,10 +153,10 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
ksock_net_t *net = peer->ksnp_ni->ni_data;
CDEBUG(D_NET, "peer %s %p deleted\n",
- libcfs_id2str(peer->ksnp_id), peer);
+ libcfs_id2str(peer->ksnp_id), peer);
- LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
- LASSERT(peer->ksnp_accepting == 0);
+ LASSERT(!atomic_read(&peer->ksnp_refcount));
+ LASSERT(!peer->ksnp_accepting);
LASSERT(list_empty(&peer->ksnp_conns));
LASSERT(list_empty(&peer->ksnp_routes));
LASSERT(list_empty(&peer->ksnp_tx_queue));
@@ -163,10 +164,12 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
LIBCFS_FREE(peer, sizeof(*peer));
- /* NB a peer's connections and routes keep a reference on their peer
+ /*
+ * NB a peer's connections and routes keep a reference on their peer
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
- * zero. */
+ * zero.
+ */
spin_lock_bh(&net->ksnn_lock);
net->ksnn_npeers--;
spin_unlock_bh(&net->ksnn_lock);
@@ -180,7 +183,6 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
ksock_peer_t *peer;
list_for_each(tmp, peer_list) {
-
peer = list_entry(tmp, ksock_peer_t, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -207,7 +209,7 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
read_lock(&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) /* +1 ref for caller? */
+ if (peer) /* +1 ref for caller? */
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -226,9 +228,11 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer)
ip = peer->ksnp_passive_ips[i];
iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
- /* All IPs in peer->ksnp_passive_ips[] come from the
- * interface list, therefore the call must succeed. */
- LASSERT(iface != NULL);
+ /*
+ * All IPs in peer->ksnp_passive_ips[] come from the
+ * interface list, therefore the call must succeed.
+ */
+ LASSERT(iface);
CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
peer, iface, iface->ksni_nroutes);
@@ -246,8 +250,8 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer)
static int
ksocknal_get_peer_info(lnet_ni_t *ni, int index,
- lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
- int *port, int *conn_count, int *share_count)
+ lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+ int *port, int *conn_count, int *share_count)
{
ksock_peer_t *peer;
struct list_head *ptmp;
@@ -260,14 +264,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
- if (peer->ksnp_n_passive_ips == 0 &&
+ if (!peer->ksnp_n_passive_ips &&
list_empty(&peer->ksnp_routes)) {
if (index-- > 0)
continue;
@@ -301,7 +304,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
continue;
route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ ksnr_list);
*id = peer->ksnp_id;
*myip = route->ksnr_myipaddr;
@@ -330,7 +333,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
ksocknal_route_addref(route);
if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
- if (route->ksnr_myipaddr == 0) {
+ if (!route->ksnr_myipaddr) {
/* route wasn't bound locally yet (the initial route) */
CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
@@ -345,21 +348,23 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes--;
}
route->ksnr_myipaddr = conn->ksnc_myipaddr;
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes++;
}
- route->ksnr_connected |= (1<<type);
+ route->ksnr_connected |= (1 << type);
route->ksnr_conn_count++;
- /* Successful connection => further attempts can
- * proceed immediately */
+ /*
+ * Successful connection => further attempts can
+ * proceed immediately
+ */
route->ksnr_retry_interval = 0;
}
@@ -371,10 +376,10 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
ksock_route_t *route2;
LASSERT(!peer->ksnp_closing);
- LASSERT(route->ksnr_peer == NULL);
+ LASSERT(!route->ksnr_peer);
LASSERT(!route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
- LASSERT(route->ksnr_connected == 0);
+ LASSERT(!route->ksnr_connected);
/* LASSERT(unique) */
list_for_each(tmp, &peer->ksnp_routes) {
@@ -382,8 +387,8 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr);
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr);
LBUG();
}
}
@@ -425,10 +430,10 @@ ksocknal_del_route_locked(ksock_route_t *route)
ksocknal_close_conn_locked(conn, 0);
}
- if (route->ksnr_myipaddr != 0) {
+ if (route->ksnr_myipaddr) {
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes--;
}
@@ -438,8 +443,10 @@ ksocknal_del_route_locked(ksock_route_t *route)
if (list_empty(&peer->ksnp_routes) &&
list_empty(&peer->ksnp_conns)) {
- /* I've just removed the last route to a peer with no active
- * connections */
+ /*
+ * I've just removed the last route to a peer with no active
+ * connections
+ */
ksocknal_unlink_peer_locked(peer);
}
}
@@ -460,11 +467,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
/* Have a brand new peer ready... */
rc = ksocknal_create_peer(&peer, ni, id);
- if (rc != 0)
+ if (rc)
return rc;
route = ksocknal_create_route(ipaddr, port);
- if (route == NULL) {
+ if (!route) {
ksocknal_peer_decref(peer);
return -ENOMEM;
}
@@ -472,16 +479,16 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, id);
- if (peer2 != NULL) {
+ if (peer2) {
ksocknal_peer_decref(peer);
peer = peer2;
} else {
/* peer table takes my ref on peer */
list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(id.nid));
+ ksocknal_nid2peerlist(id.nid));
}
route2 = NULL;
@@ -493,7 +500,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
route2 = NULL;
}
- if (route2 == NULL) {
+ if (!route2) {
ksocknal_add_route_locked(peer, route);
route->ksnr_share_count++;
} else {
@@ -524,7 +531,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
route = list_entry(tmp, ksock_route_t, ksnr_list);
/* no match */
- if (!(ip == 0 || route->ksnr_ipaddr == ip))
+ if (!(!ip || route->ksnr_ipaddr == ip))
continue;
route->ksnr_share_count = 0;
@@ -538,15 +545,16 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
nshared += route->ksnr_share_count;
}
- if (nshared == 0) {
- /* remove everything else if there are no explicit entries
- * left */
-
+ if (!nshared) {
+ /*
+ * remove everything else if there are no explicit entries
+ * left
+ */
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
/* we should only be removing auto-entries */
- LASSERT(route->ksnr_share_count == 0);
+ LASSERT(!route->ksnr_share_count);
ksocknal_del_route_locked(route);
}
@@ -575,16 +583,16 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
+ if (id.nid != LNET_NID_ANY) {
+ lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ } else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
+ list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
@@ -604,7 +612,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
LASSERT(list_empty(&peer->ksnp_routes));
list_splice_init(&peer->ksnp_tx_queue,
- &zombies);
+ &zombies);
}
ksocknal_peer_decref(peer); /* ...till here */
@@ -645,7 +653,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
continue;
conn = list_entry(ctmp, ksock_conn_t,
- ksnc_list);
+ ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
@@ -692,8 +700,10 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
nip = net->ksnn_ninterfaces;
LASSERT(nip <= LNET_MAX_INTERFACES);
- /* Only offer interfaces for additional connections if I have
- * more than one. */
+ /*
+ * Only offer interfaces for additional connections if I have
+ * more than one.
+ */
if (nip < 2) {
read_unlock(&ksocknal_data.ksnd_global_lock);
return 0;
@@ -701,7 +711,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
for (i = 0; i < nip; i++) {
ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
- LASSERT(ipaddrs[i] != 0);
+ LASSERT(ipaddrs[i]);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -719,11 +729,11 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
int i;
for (i = 0; i < nips; i++) {
- if (ips[i] == 0)
+ if (!ips[i])
continue;
this_xor = ips[i] ^ iface->ksni_ipaddr;
- this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
+ this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
if (!(best < 0 ||
best_netmatch < this_netmatch ||
@@ -757,38 +767,45 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
int best_netmatch;
int best_npeers;
- /* CAVEAT EMPTOR: We do all our interface matching with an
+ /*
+ * CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
* expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness shouldn't matter */
-
- /* Also note that I'm not going to return more than n_peerips
- * interfaces, even if I have more myself */
-
+ * O(n**3)-ness shouldn't matter
+ */
+ /*
+ * Also note that I'm not going to return more than n_peerips
+ * interfaces, even if I have more myself
+ */
write_lock_bh(global_lock);
LASSERT(n_peerips <= LNET_MAX_INTERFACES);
LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
- /* Only match interfaces for additional connections
- * if I have > 1 interface */
+ /*
+ * Only match interfaces for additional connections
+ * if I have > 1 interface
+ */
n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
min(n_peerips, net->ksnn_ninterfaces);
for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
/* ^ yes really... */
- /* If we have any new interfaces, first tick off all the
+ /*
+ * If we have any new interfaces, first tick off all the
* peer IPs that match old interfaces, then choose new
* interfaces to match the remaining peer IPS.
* We don't forget interfaces we've stopped using; we might
- * start using them again... */
-
+ * start using them again...
+ */
if (i < peer->ksnp_n_passive_ips) {
/* Old interface. */
ip = peer->ksnp_passive_ips[i];
best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+ /* peer passive ips are kept up to date */
+ LASSERT(best_iface);
} else {
/* choose a new interface */
LASSERT(i == peer->ksnp_n_passive_ips);
@@ -810,9 +827,9 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
k = ksocknal_match_peerip(iface, peerips, n_peerips);
xor = ip ^ peerips[k];
- this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
+ this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
- if (!(best_iface == NULL ||
+ if (!(!best_iface ||
best_netmatch < this_netmatch ||
(best_netmatch == this_netmatch &&
best_npeers > iface->ksni_npeers)))
@@ -823,10 +840,12 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
best_npeers = iface->ksni_npeers;
}
+ LASSERT(best_iface);
+
best_iface->ksni_npeers++;
ip = best_iface->ksni_ipaddr;
peer->ksnp_passive_ips[i] = ip;
- peer->ksnp_n_passive_ips = i+1;
+ peer->ksnp_n_passive_ips = i + 1;
}
/* mark the best matching peer IP used */
@@ -860,16 +879,19 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
int i;
int j;
- /* CAVEAT EMPTOR: We do all our interface matching with an
+ /*
+ * CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
* expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness here shouldn't matter */
-
+ * O(n**3)-ness here shouldn't matter
+ */
write_lock_bh(global_lock);
if (net->ksnn_ninterfaces < 2) {
- /* Only create additional connections
- * if I have > 1 interface */
+ /*
+ * Only create additional connections
+ * if I have > 1 interface
+ */
write_unlock_bh(global_lock);
return;
}
@@ -877,13 +899,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
for (i = 0; i < npeer_ipaddrs; i++) {
- if (newroute != NULL) {
+ if (newroute) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
write_unlock_bh(global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
- if (newroute == NULL)
+ if (!newroute)
return;
write_lock_bh(global_lock);
@@ -904,7 +926,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
route = NULL;
}
- if (route != NULL)
+ if (route)
continue;
best_iface = NULL;
@@ -920,21 +942,21 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Using this interface already? */
list_for_each(rtmp, &peer->ksnp_routes) {
route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
break;
route = NULL;
}
- if (route != NULL)
+ if (route)
continue;
- this_netmatch = (((iface->ksni_ipaddr ^
+ this_netmatch = (!((iface->ksni_ipaddr ^
newroute->ksnr_ipaddr) &
- iface->ksni_netmask) == 0) ? 1 : 0;
+ iface->ksni_netmask)) ? 1 : 0;
- if (!(best_iface == NULL ||
+ if (!(!best_iface ||
best_netmatch < this_netmatch ||
(best_netmatch == this_netmatch &&
best_nroutes > iface->ksni_nroutes)))
@@ -945,7 +967,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
best_nroutes = iface->ksni_nroutes;
}
- if (best_iface == NULL)
+ if (!best_iface)
continue;
newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
@@ -956,7 +978,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
}
write_unlock_bh(global_lock);
- if (newroute != NULL)
+ if (newroute)
ksocknal_route_decref(newroute);
}
@@ -969,10 +991,10 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
int peer_port;
rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(rc == 0); /* we succeeded before */
+ LASSERT(!rc); /* we succeeded before */
LIBCFS_ALLOC(cr, sizeof(*cr));
- if (cr == NULL) {
+ if (!cr) {
LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
&peer_ip);
return -ENOMEM;
@@ -997,7 +1019,6 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
ksock_route_t *route;
list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
-
if (route->ksnr_ipaddr == ipaddr)
return route->ksnr_connecting;
}
@@ -1006,7 +1027,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
int
ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
- struct socket *sock, int type)
+ struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
@@ -1026,12 +1047,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
int active;
char *warn = NULL;
- active = (route != NULL);
+ active = !!route;
LASSERT(active == (type != SOCKLND_CONN_NONE));
LIBCFS_ALLOC(conn, sizeof(*conn));
- if (conn == NULL) {
+ if (!conn) {
rc = -ENOMEM;
goto failed_0;
}
@@ -1039,8 +1060,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn->ksnc_peer = NULL;
conn->ksnc_route = NULL;
conn->ksnc_sock = sock;
- /* 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection */
+ /*
+ * 2 ref, 1 for conn, another extra ref prevents socket
+ * being closed before establishment of connection
+ */
atomic_set(&conn->ksnc_sock_refcount, 2);
conn->ksnc_type = type;
ksocknal_lib_save_callback(sock, conn);
@@ -1057,21 +1080,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
- if (hello == NULL) {
+ if (!hello) {
rc = -ENOMEM;
goto failed_1;
}
/* stash conn's local and remote addrs */
rc = ksocknal_lib_get_conn_addrs(conn);
- if (rc != 0)
+ if (rc)
goto failed_1;
- /* Find out/confirm peer's NID and connection type and get the
+ /*
+ * Find out/confirm peer's NID and connection type and get the
* vector of interfaces she's willing to let me connect to.
* Passive connections use the listener timeout since the peer sends
- * eagerly */
-
+ * eagerly
+ */
if (active) {
peer = route->ksnr_peer;
LASSERT(ni == peer->ksnp_ni);
@@ -1084,7 +1108,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn->ksnc_proto = peer->ksnp_proto;
write_unlock_bh(global_lock);
- if (conn->ksnc_proto == NULL) {
+ if (!conn->ksnc_proto) {
conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 2)
@@ -1095,7 +1119,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
}
rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- if (rc != 0)
+ if (rc)
goto failed_1;
} else {
peerid.nid = LNET_NID_ANY;
@@ -1109,8 +1133,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
if (rc < 0)
goto failed_1;
- LASSERT(rc == 0 || active);
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(!rc || active);
+ LASSERT(conn->ksnc_proto);
LASSERT(peerid.nid != LNET_NID_ANY);
cpt = lnet_cpt_of_nid(peerid.nid);
@@ -1120,20 +1144,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_lock_bh(global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
- if (rc != 0)
+ if (rc)
goto failed_1;
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, peerid);
- if (peer2 == NULL) {
- /* NB this puts an "empty" peer in the peer
- * table (which takes my ref) */
+ if (!peer2) {
+ /*
+ * NB this puts an "empty" peer in the peer
+ * table (which takes my ref)
+ */
list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
+ ksocknal_nid2peerlist(peerid.nid));
} else {
ksocknal_peer_decref(peer);
peer = peer2;
@@ -1143,8 +1169,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
ksocknal_peer_addref(peer);
peer->ksnp_accepting++;
- /* Am I already connecting to this guy? Resolve in
- * favour of higher NID... */
+ /*
+ * Am I already connecting to this guy? Resolve in
+ * favour of higher NID...
+ */
if (peerid.nid < ni->ni_nid &&
ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
rc = EALREADY;
@@ -1161,8 +1189,9 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
goto failed_2;
}
- if (peer->ksnp_proto == NULL) {
- /* Never connected before.
+ if (!peer->ksnp_proto) {
+ /*
+ * Never connected before.
* NB recv_hello may have returned EPROTO to signal my peer
* wants a different protocol than the one I asked for.
*/
@@ -1198,8 +1227,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
goto failed_2;
}
- /* Refuse to duplicate an existing connection, unless this is a
- * loopback connection */
+ /*
+ * Refuse to duplicate an existing connection, unless this is a
+ * loopback connection
+ */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer->ksnp_conns) {
conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
@@ -1209,9 +1240,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn2->ksnc_type != conn->ksnc_type)
continue;
- /* Reply on a passive connection attempt so the peer
- * realises we're connected. */
- LASSERT(rc == 0);
+ /*
+ * Reply on a passive connection attempt so the peer
+ * realises we're connected.
+ */
+ LASSERT(!rc);
if (!active)
rc = EALREADY;
@@ -1220,9 +1253,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
}
}
- /* If the connection created by this route didn't bind to the IP
+ /*
+ * If the connection created by this route didn't bind to the IP
* address the route connected to, the connection/route matching
- * code below probably isn't going to work. */
+ * code below probably isn't going to work.
+ */
if (active &&
route->ksnr_ipaddr != conn->ksnc_ipaddr) {
CERROR("Route %s %pI4h connected to %pI4h\n",
@@ -1231,10 +1266,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
&conn->ksnc_ipaddr);
}
- /* Search for a route corresponding to the new connection and
+ /*
+ * Search for a route corresponding to the new connection and
* create an association. This allows incoming connections created
* by routes in my peer to match my own route entries so I don't
- * continually create duplicate routes. */
+ * continually create duplicate routes.
+ */
list_for_each(tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
@@ -1278,14 +1315,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- /* We've now got a new connection. Any errors from here on are just
+ /*
+ * We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
* NB (a) we still have to send the reply HELLO for passive
* connections,
* (b) normal I/O on the conn is blocked until I setup and call the
* socket callbacks.
*/
-
CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
@@ -1305,12 +1342,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
- /* setup the socket AFTER I've received hello (it disables
+ /*
+ * setup the socket AFTER I've received hello (it disables
* SO_LINGER). I might call back to the acceptor who may want
* to send a protocol version response and then close the
* socket; this ensures the socket only tears down after the
- * response has been sent. */
- if (rc == 0)
+ * response has been sent.
+ */
+ if (!rc)
rc = ksocknal_lib_setup_sock(sock);
write_lock_bh(global_lock);
@@ -1323,14 +1362,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- if (rc != 0) {
+ if (rc) {
write_lock_bh(global_lock);
if (!conn->ksnc_closing) {
/* could be closed by another thread */
ksocknal_close_conn_locked(conn, rc);
}
write_unlock_bh(global_lock);
- } else if (ksocknal_connsock_addref(conn) == 0) {
+ } else if (!ksocknal_connsock_addref(conn)) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_write_callback(conn);
@@ -1352,19 +1391,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- if (warn != NULL) {
+ if (warn) {
if (rc < 0)
CERROR("Not creating conn %s type %d: %s\n",
libcfs_id2str(peerid), conn->ksnc_type, warn);
else
CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
+ libcfs_id2str(peerid), conn->ksnc_type, warn);
}
if (!active) {
if (rc > 0) {
- /* Request retry by replying with CONN_NONE
- * ksnc_proto has been set already */
+ /*
+ * Request retry by replying with CONN_NONE
+ * ksnc_proto has been set already
+ */
conn->ksnc_type = SOCKLND_CONN_NONE;
hello->kshm_nips = 0;
ksocknal_send_hello(ni, conn, peerid.nid, hello);
@@ -1379,7 +1420,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
ksocknal_peer_decref(peer);
failed_1:
- if (hello != NULL)
+ if (hello)
LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
@@ -1393,15 +1434,17 @@ failed_0:
void
ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
{
- /* This just does the immmediate housekeeping, and queues the
+ /*
+ * This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
- * Caller holds ksnd_global_lock exclusively in irq context */
+ * Caller holds ksnd_global_lock exclusively in irq context
+ */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
struct list_head *tmp;
- LASSERT(peer->ksnp_error == 0);
+ LASSERT(!peer->ksnp_error);
LASSERT(!conn->ksnc_closing);
conn->ksnc_closing = 1;
@@ -1409,10 +1452,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
list_del(&conn->ksnc_list);
route = conn->ksnc_route;
- if (route != NULL) {
+ if (route) {
/* dissociate conn from route... */
LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+ LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
conn2 = NULL;
list_for_each(tmp, &peer->ksnp_conns) {
@@ -1424,7 +1467,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
conn2 = NULL;
}
- if (conn2 == NULL)
+ if (!conn2)
route->ksnr_connected &= ~(1 << conn->ksnc_type);
conn->ksnc_route = NULL;
@@ -1445,15 +1488,17 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- /* throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler */
+ /*
+ * throw them to the last connection...,
+ * these TXs will be send to /dev/null by scheduler
+ */
list_for_each_entry(tx, &peer->ksnp_tx_queue,
- tx_list)
+ tx_list)
ksocknal_tx_prep(conn, tx);
spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
@@ -1461,8 +1506,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
peer->ksnp_error = error; /* stash last conn close reason */
if (list_empty(&peer->ksnp_routes)) {
- /* I've just closed last conn belonging to a
- * peer with no routes to it */
+ /*
+ * I've just closed last conn belonging to a
+ * peer with no routes to it
+ */
ksocknal_unlink_peer_locked(peer);
}
}
@@ -1470,7 +1517,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
+ &ksocknal_data.ksnd_deathrow_conns);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -1482,16 +1529,17 @@ ksocknal_peer_failed(ksock_peer_t *peer)
int notify = 0;
unsigned long last_alive = 0;
- /* There has been a connection failure or comms error; but I'll only
+ /*
+ * There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
- * there are no connections or connection attempts in existence. */
-
+ * there are no connections or connection attempts in existence.
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
- if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+ if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
list_empty(&peer->ksnp_conns) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
+ !peer->ksnp_accepting &&
+ !ksocknal_find_connecting_route_locked(peer)) {
notify = 1;
last_alive = peer->ksnp_last_alive;
}
@@ -1500,7 +1548,7 @@ ksocknal_peer_failed(ksock_peer_t *peer)
if (notify)
lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
- last_alive);
+ last_alive);
}
void
@@ -1508,12 +1556,15 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
- /* NB safe to finalize TXs because closing of socket will
- * abort all buffered data */
- LASSERT(conn->ksnc_sock == NULL);
+ /*
+ * NB safe to finalize TXs because closing of socket will
+ * abort all buffered data
+ */
+ LASSERT(!conn->ksnc_sock);
spin_lock(&peer->ksnp_lock);
@@ -1521,7 +1572,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
if (tx->tx_conn != conn)
continue;
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
+ LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_zc_aborted = 1; /* mark it as not-acked */
@@ -1531,9 +1582,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
-
+ list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
@@ -1542,10 +1591,12 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
void
ksocknal_terminate_conn(ksock_conn_t *conn)
{
- /* This gets called by the reaper (guaranteed thread context) to
+ /*
+ * This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
- * destroy it. */
+ * destroy it.
+ */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_sched_t *sched = conn->ksnc_scheduler;
int failed = 0;
@@ -1561,7 +1612,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
if (!conn->ksnc_tx_scheduled &&
!list_empty(&conn->ksnc_tx_queue)) {
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
@@ -1576,11 +1627,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
- /* OK, so this conn may not be completely disengaged from its
- * scheduler yet, but it _has_ committed to terminate... */
+ /*
+ * OK, so this conn may not be completely disengaged from its
+ * scheduler yet, but it _has_ committed to terminate...
+ */
conn->ksnc_scheduler->kss_nconns--;
- if (peer->ksnp_error != 0) {
+ if (peer->ksnp_error) {
/* peer's last conn closed in error */
LASSERT(list_empty(&peer->ksnp_conns));
failed = 1;
@@ -1592,11 +1645,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
if (failed)
ksocknal_peer_failed(peer);
- /* The socket is closed on the final put; either here, or in
+ /*
+ * The socket is closed on the final put; either here, or in
* ksocknal_{send,recv}msg(). Since we set up the linger2 option
* when the connection was established, this will close the socket
* immediately, aborting anything buffered in it. Any hung
- * zero-copy transmits will therefore complete in finite time. */
+ * zero-copy transmits will therefore complete in finite time.
+ */
ksocknal_connsock_decref(conn);
}
@@ -1605,7 +1660,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
{
/* Queue the conn for the reaper to destroy */
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
+ LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
@@ -1622,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
/* Final coup-de-grace of the reaper */
CDEBUG(D_NET, "connection %p\n", conn);
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
- LASSERT(conn->ksnc_sock == NULL);
- LASSERT(conn->ksnc_route == NULL);
+ LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
+ LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
+ LASSERT(!conn->ksnc_sock);
+ LASSERT(!conn->ksnc_route);
LASSERT(!conn->ksnc_tx_scheduled);
LASSERT(!conn->ksnc_rx_scheduled);
LASSERT(list_empty(&conn->ksnc_tx_queue));
@@ -1642,7 +1697,7 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni,
- conn->ksnc_cookie, -EIO);
+ conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
@@ -1685,8 +1740,7 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
- if (ipaddr == 0 ||
- conn->ksnc_ipaddr == ipaddr) {
+ if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
count++;
ksocknal_close_conn_locked(conn, why);
}
@@ -1724,17 +1778,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
+ if (id.nid != LNET_NID_ANY) {
+ lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ } else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
-
+ &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
@@ -1748,10 +1802,10 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+ if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
return 0;
- if (count == 0)
+ if (!count)
return -ENOENT;
else
return 0;
@@ -1760,15 +1814,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
void
ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
{
- /* The router is telling me she's been notified of a change in
- * gateway state.... */
+ /*
+ * The router is telling me she's been notified of a change in
+ * gateway state....
+ */
lnet_process_id_t id = {0};
id.nid = gw_nid;
id.pid = LNET_PID_ANY;
CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
- alive ? "up" : "down");
+ alive ? "up" : "down");
if (!alive) {
/* If the gateway crashed, close all open connections... */
@@ -1776,8 +1832,10 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
return;
}
- /* ...otherwise do nothing. We can only establish new connections
- * if we have autroutes, and these connect on demand. */
+ /*
+ * ...otherwise do nothing. We can only establish new connections
+ * if we have autroutes, and these connect on demand.
+ */
}
void
@@ -1788,12 +1846,15 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
+ lnet_process_id_t id = {
+ .nid = nid,
+ .pid = LNET_PID_LUSTRE,
+ };
read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
+ if (peer) {
struct list_head *tmp;
ksock_conn_t *conn;
int bufnob;
@@ -1812,13 +1873,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
}
last_alive = peer->ksnp_last_alive;
- if (ksocknal_find_connectable_route_locked(peer) == NULL)
+ if (!ksocknal_find_connectable_route_locked(peer))
connect = 0;
}
read_unlock(glock);
- if (last_alive != 0)
+ if (last_alive)
*when = last_alive;
CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
@@ -1834,7 +1895,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
write_lock_bh(glock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
+ if (peer)
ksocknal_launch_all_connections_locked(peer);
write_unlock_bh(glock);
@@ -1857,7 +1918,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
list_for_each(tmp, &peer->ksnp_conns) {
if (i++ == index) {
conn = list_entry(tmp, ksock_conn_t,
- ksnc_list);
+ ksnc_list);
ksocknal_conn_addref(conn);
break;
}
@@ -1865,7 +1926,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (conn == NULL)
+ if (!conn)
break;
ksocknal_lib_push_conn(conn);
@@ -1885,7 +1946,8 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
start = &ksocknal_data.ksnd_peers[0];
end = &ksocknal_data.ksnd_peers[hsize - 1];
} else {
- start = end = ksocknal_nid2peerlist(id.nid);
+ start = ksocknal_nid2peerlist(id.nid);
+ end = ksocknal_nid2peerlist(id.nid);
}
for (tmp = start; tmp <= end; tmp++) {
@@ -1910,7 +1972,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (i == 0) /* no match */
+ if (!i) /* no match */
break;
rc = 0;
@@ -1934,14 +1996,13 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
struct list_head *rtmp;
ksock_route_t *route;
- if (ipaddress == 0 ||
- netmask == 0)
+ if (!ipaddress || !netmask)
return -EINVAL;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
- if (iface != NULL) {
+ if (iface) {
/* silently ignore dups */
rc = 0;
} else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
@@ -1957,16 +2018,15 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t,
- ksnp_list);
+ ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
if (peer->ksnp_passive_ips[j] == ipaddress)
iface->ksni_npeers++;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp,
- ksock_route_t,
- ksnr_list);
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
iface->ksni_nroutes++;
@@ -1995,8 +2055,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
for (i = 0; i < peer->ksnp_n_passive_ips; i++)
if (peer->ksnp_passive_ips[i] == ipaddr) {
- for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
- peer->ksnp_passive_ips[j-1] =
+ for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
+ peer->ksnp_passive_ips[j - 1] =
peer->ksnp_passive_ips[j];
peer->ksnp_n_passive_ips--;
break;
@@ -2008,7 +2068,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
if (route->ksnr_myipaddr != ipaddr)
continue;
- if (route->ksnr_share_count != 0) {
+ if (route->ksnr_share_count) {
/* Manually created; keep, but unbind */
route->ksnr_myipaddr = 0;
} else {
@@ -2041,23 +2101,21 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
- if (!(ipaddress == 0 ||
- ipaddress == this_ip))
+ if (!(!ipaddress || ipaddress == this_ip))
continue;
rc = 0;
- for (j = i+1; j < net->ksnn_ninterfaces; j++)
- net->ksnn_interfaces[j-1] =
+ for (j = i + 1; j < net->ksnn_ninterfaces; j++)
+ net->ksnn_interfaces[j - 1] =
net->ksnn_interfaces[j];
net->ksnn_ninterfaces--;
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t,
- ksnp_list);
+ &ksocknal_data.ksnd_peers[j]) {
+ peer = list_entry(tmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -2121,7 +2179,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
rc = ksocknal_get_peer_info(ni, data->ioc_count,
&id, &myip, &ip, &port,
&conn_count, &share_count);
- if (rc != 0)
+ if (rc)
return rc;
data->ioc_nid = id.nid;
@@ -2136,7 +2194,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
case IOC_LIBCFS_ADD_PEER:
id.nid = data->ioc_nid;
- id.pid = LUSTRE_SRV_LNET_PID;
+ id.pid = LNET_PID_LUSTRE;
return ksocknal_add_peer(ni, id,
data->ioc_u32[0], /* IP */
data->ioc_u32[1]); /* port */
@@ -2153,7 +2211,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int nagle;
ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
- if (conn == NULL)
+ if (!conn)
return -ENOENT;
ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
@@ -2202,14 +2260,14 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
static void
ksocknal_free_buffers(void)
{
- LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
struct ksock_sched_info *info;
int i;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds != NULL) {
+ if (info->ksi_scheds) {
LIBCFS_FREE(info->ksi_scheds,
info->ksi_nthreads_max *
sizeof(info->ksi_scheds[0]));
@@ -2219,21 +2277,21 @@ ksocknal_free_buffers(void)
}
LIBCFS_FREE(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
+ sizeof(struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
@@ -2250,7 +2308,7 @@ ksocknal_base_shutdown(void)
int i;
int j;
- LASSERT(ksocknal_data.ksnd_nnets == 0);
+ LASSERT(!ksocknal_data.ksnd_nnets);
switch (ksocknal_data.ksnd_init) {
default:
@@ -2258,7 +2316,7 @@ ksocknal_base_shutdown(void)
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
- LASSERT(ksocknal_data.ksnd_peers != NULL);
+ LASSERT(ksocknal_data.ksnd_peers);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
@@ -2268,14 +2326,13 @@ ksocknal_base_shutdown(void)
LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
cfs_percpt_for_each(info, i,
ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
continue;
for (j = 0; j < info->ksi_nthreads_max; j++) {
-
sched = &info->ksi_scheds[j];
LASSERT(list_empty(
&sched->kss_tx_conns));
@@ -2283,7 +2340,7 @@ ksocknal_base_shutdown(void)
&sched->kss_rx_conns));
LASSERT(list_empty(
&sched->kss_zombie_noop_txs));
- LASSERT(sched->kss_nconns == 0);
+ LASSERT(!sched->kss_nconns);
}
}
}
@@ -2293,10 +2350,10 @@ ksocknal_base_shutdown(void)
wake_up_all(&ksocknal_data.ksnd_connd_waitq);
wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
cfs_percpt_for_each(info, i,
ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
continue;
for (j = 0; j < info->ksi_nthreads_max; j++) {
@@ -2308,7 +2365,7 @@ ksocknal_base_shutdown(void)
i = 4;
read_lock(&ksocknal_data.ksnd_global_lock);
- while (ksocknal_data.ksnd_nthreads != 0) {
+ while (ksocknal_data.ksnd_nthreads) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
@@ -2332,7 +2389,6 @@ ksocknal_base_shutdown(void)
static __u64
ksocknal_new_incarnation(void)
{
-
/* The incarnation number is the time this module loaded and it
* identifies this particular instance of the socknal.
*/
@@ -2347,15 +2403,15 @@ ksocknal_base_startup(void)
int i;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
- LASSERT(ksocknal_data.ksnd_nnets == 0);
+ LASSERT(!ksocknal_data.ksnd_nnets);
memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
- if (ksocknal_data.ksnd_peers == NULL)
+ sizeof(struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
+ if (!ksocknal_data.ksnd_peers)
return -ENOMEM;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
@@ -2386,7 +2442,7 @@ ksocknal_base_startup(void)
ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*info));
- if (ksocknal_data.ksnd_sched_info == NULL)
+ if (!ksocknal_data.ksnd_sched_info)
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
@@ -2397,8 +2453,10 @@ ksocknal_base_startup(void)
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
} else {
- /* max to half of CPUs, assume another half should be
- * reserved for upper layer modules */
+ /*
+ * max to half of CPUs, assume another half should be
+ * reserved for upper layer modules
+ */
nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
}
@@ -2407,7 +2465,7 @@ ksocknal_base_startup(void)
LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
info->ksi_nthreads_max * sizeof(*sched));
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
goto failed;
for (; nthrs > 0; nthrs--) {
@@ -2425,8 +2483,10 @@ ksocknal_base_startup(void)
ksocknal_data.ksnd_connd_starting = 0;
ksocknal_data.ksnd_connd_failed_stamp = 0;
ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
- /* must have at least 2 connds to remain responsive to accepts while
- * connecting */
+ /*
+ * must have at least 2 connds to remain responsive to accepts while
+ * connecting
+ */
if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
*ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
@@ -2446,7 +2506,7 @@ ksocknal_base_startup(void)
snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
(void *)((ulong_ptr_t)i), name);
- if (rc != 0) {
+ if (rc) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
@@ -2456,7 +2516,7 @@ ksocknal_base_startup(void)
}
rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn socknal reaper: %d\n", rc);
goto failed;
}
@@ -2491,7 +2551,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
}
- if (peer != NULL) {
+ if (peer) {
ksock_route_t *route;
ksock_conn_t *conn;
@@ -2515,9 +2575,9 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
list_for_each(tmp, &peer->ksnp_conns) {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
- conn->ksnc_type, conn->ksnc_closing);
+ atomic_read(&conn->ksnc_conn_refcount),
+ atomic_read(&conn->ksnc_sock_refcount),
+ conn->ksnc_type, conn->ksnc_closing);
}
}
@@ -2548,7 +2608,7 @@ ksocknal_shutdown(lnet_ni_t *ni)
/* Wait for all peer state to clean up */
i = 2;
spin_lock_bh(&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
+ while (net->ksnn_npeers) {
spin_unlock_bh(&net->ksnn_lock);
i++;
@@ -2565,15 +2625,15 @@ ksocknal_shutdown(lnet_ni_t *ni)
spin_unlock_bh(&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
- LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
+ LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
+ LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
}
list_del(&net->ksnn_list);
LIBCFS_FREE(net, sizeof(*net));
ksocknal_data.ksnd_nnets--;
- if (ksocknal_data.ksnd_nnets == 0)
+ if (!ksocknal_data.ksnd_nnets)
ksocknal_base_shutdown();
}
@@ -2601,7 +2661,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
continue;
rc = lnet_ipif_query(names[i], &up, &ip, &mask);
- if (rc != 0) {
+ if (rc) {
CWARN("Can't get interface %s info: %d\n",
names[i], rc);
continue;
@@ -2628,7 +2688,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
lnet_ipif_free_enumeration(names, n);
- if (j == 0)
+ if (!j)
CERROR("Can't find any usable interfaces\n");
return j;
@@ -2647,21 +2707,20 @@ ksocknal_search_new_ipif(ksock_net_t *net)
ksock_net_t *tmp;
int j;
- if (colon != NULL) /* ignore alias device */
+ if (colon) /* ignore alias device */
*colon = 0;
- list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
- ksnn_list) {
+ list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
char *ifnam2 =
&tmp->ksnn_interfaces[j].ksni_name[0];
char *colon2 = strchr(ifnam2, ':');
- if (colon2 != NULL)
+ if (colon2)
*colon2 = 0;
- found = strcmp(ifnam, ifnam2) == 0;
- if (colon2 != NULL)
+ found = !strcmp(ifnam, ifnam2);
+ if (colon2)
*colon2 = ':';
}
if (found)
@@ -2669,7 +2728,7 @@ ksocknal_search_new_ipif(ksock_net_t *net)
}
new_ipif += !found;
- if (colon != NULL)
+ if (colon)
*colon = ':';
}
@@ -2683,7 +2742,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
int rc = 0;
int i;
- if (info->ksi_nthreads == 0) {
+ if (!info->ksi_nthreads) {
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = info->ksi_nthreads_max;
} else {
@@ -2711,7 +2770,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
rc = ksocknal_thread_start(ksocknal_scheduler,
(void *)id, name);
- if (rc == 0)
+ if (!rc)
continue;
CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2734,7 +2793,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
for (i = 0; i < ncpts; i++) {
struct ksock_sched_info *info;
- int cpt = (cpts == NULL) ? i : cpts[i];
+ int cpt = !cpts ? i : cpts[i];
LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
info = ksocknal_data.ksnd_sched_info[cpt];
@@ -2743,7 +2802,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
continue;
rc = ksocknal_start_schedulers(info);
- if (rc != 0)
+ if (rc)
return rc;
}
return 0;
@@ -2760,12 +2819,12 @@ ksocknal_startup(lnet_ni_t *ni)
if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
rc = ksocknal_base_startup();
- if (rc != 0)
+ if (rc)
return rc;
}
LIBCFS_ALLOC(net, sizeof(*net));
- if (net == NULL)
+ if (!net)
goto fail_0;
spin_lock_init(&net->ksnn_lock);
@@ -2776,7 +2835,7 @@ ksocknal_startup(lnet_ni_t *ni)
ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
- if (ni->ni_interfaces[0] == NULL) {
+ if (!ni->ni_interfaces[0]) {
rc = ksocknal_enumerate_interfaces(net);
if (rc <= 0)
goto fail_1;
@@ -2786,14 +2845,14 @@ ksocknal_startup(lnet_ni_t *ni)
for (i = 0; i < LNET_MAX_INTERFACES; i++) {
int up;
- if (ni->ni_interfaces[i] == NULL)
+ if (!ni->ni_interfaces[i])
break;
rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
- &net->ksnn_interfaces[i].ksni_ipaddr,
- &net->ksnn_interfaces[i].ksni_netmask);
+ &net->ksnn_interfaces[i].ksni_ipaddr,
+ &net->ksnn_interfaces[i].ksni_netmask);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't get interface %s info: %d\n",
ni->ni_interfaces[i], rc);
goto fail_1;
@@ -2814,7 +2873,7 @@ ksocknal_startup(lnet_ni_t *ni)
/* call it before add it to ksocknal_data.ksnd_nets */
rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
+ if (rc)
goto fail_1;
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
@@ -2828,20 +2887,18 @@ ksocknal_startup(lnet_ni_t *ni)
fail_1:
LIBCFS_FREE(net, sizeof(*net));
fail_0:
- if (ksocknal_data.ksnd_nnets == 0)
+ if (!ksocknal_data.ksnd_nnets)
ksocknal_base_shutdown();
return -ENETDOWN;
}
-static void __exit
-ksocknal_module_fini(void)
+static void __exit ksocklnd_exit(void)
{
lnet_unregister_lnd(&the_ksocklnd);
}
-static int __init
-ksocknal_module_init(void)
+static int __init ksocklnd_init(void)
{
int rc;
@@ -2861,7 +2918,7 @@ ksocknal_module_init(void)
the_ksocklnd.lnd_accept = ksocknal_accept;
rc = ksocknal_tunables_init();
- if (rc != 0)
+ if (rc)
return rc;
lnet_register_lnd(&the_ksocklnd);
@@ -2870,9 +2927,9 @@ ksocknal_module_init(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
+MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
+MODULE_VERSION("2.7.0");
MODULE_LICENSE("GPL");
-MODULE_VERSION("3.0.0");
-module_init(ksocknal_module_init);
-module_exit(ksocknal_module_fini);
+module_init(ksocklnd_init);
+module_exit(ksocklnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index f4fa72550657..a60d72f9432f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _SOCKLND_SOCKLND_H_
@@ -69,8 +65,10 @@
#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
-/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform. */
+/*
+ * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
+ * no risk if we're not running on a CONFIG_HIGHMEM platform.
+ */
#ifdef CONFIG_HIGHMEM
# define SOCKNAL_RISK_KMAP_DEADLOCK 0
#else
@@ -237,15 +235,16 @@ typedef struct {
#define SOCKNAL_INIT_DATA 1
#define SOCKNAL_INIT_ALL 2
-/* A packet just assembled for transmission is represented by 1 or more
+/*
+ * A packet just assembled for transmission is represented by 1 or more
* struct iovec fragments (the first frag contains the portals header),
* followed by 0 or more lnet_kiov_t fragments.
*
* On the receive side, initially 1 struct iovec fragment is posted for
* receive (the header). Once the header has been received, the payload is
* received into either struct iovec or lnet_kiov_t fragments, depending on
- * what the header matched or whether the message needs forwarding. */
-
+ * what the header matched or whether the message needs forwarding.
+ */
struct ksock_conn; /* forward ref */
struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
@@ -284,12 +283,14 @@ typedef struct /* transmit packet */
} tx_frags;
} ksock_tx_t;
-#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
/* network zero copy callback descriptor embedded in ksock_tx_t */
-/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to LNET_MAX_IOV frags of payload of either type. */
+/*
+ * space for the rx frag descriptors; we either read a single contiguous
+ * header, or up to LNET_MAX_IOV frags of payload of either type.
+ */
typedef union {
struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
@@ -463,11 +464,13 @@ typedef struct ksock_proto {
/* handle ZC ACK */
int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
- /* msg type matches the connection type:
+ /*
+ * msg type matches the connection type:
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
- * return MATCH_MAY : can be backup */
+ * return MATCH_MAY : can be backup
+ */
int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
} ksock_proto_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 477b385f15e0..976fd78926e0 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -47,10 +44,10 @@ ksocknal_alloc_tx(int type, int size)
spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
- if (tx == NULL)
+ if (!tx)
LIBCFS_ALLOC(tx, size);
- if (tx == NULL)
+ if (!tx)
return NULL;
atomic_set(&tx->tx_refcount, 1);
@@ -70,7 +67,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
ksock_tx_t *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate noop tx desc\n");
return NULL;
}
@@ -90,11 +87,11 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
}
void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
- if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
spin_lock(&ksocknal_data.ksnd_tx_lock);
@@ -107,7 +104,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
}
static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
@@ -122,7 +119,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
return rc;
nob = rc;
- LASSERT (nob <= tx->tx_resid);
+ LASSERT(nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" iov */
@@ -138,19 +135,19 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
nob -= iov->iov_len;
tx->tx_iov = ++iov;
tx->tx_niov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
int rc;
- LASSERT(tx->tx_niov == 0);
+ LASSERT(!tx->tx_niov);
LASSERT(tx->tx_nkiov > 0);
/* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
@@ -160,7 +157,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
return rc;
nob = rc;
- LASSERT (nob <= tx->tx_resid);
+ LASSERT(nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" kiov */
@@ -176,27 +173,27 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
nob -= (int)kiov->kiov_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
+ if (ksocknal_data.ksnd_stall_tx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
- LASSERT(tx->tx_resid != 0);
+ LASSERT(tx->tx_resid);
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
+ if (rc) {
+ LASSERT(conn->ksnc_closing);
return -ESHUTDOWN;
}
@@ -205,10 +202,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
/* testing... */
ksocknal_data.ksnd_enomem_tx--;
rc = -EAGAIN;
- } else if (tx->tx_niov != 0) {
- rc = ksocknal_send_iov (conn, tx);
+ } else if (tx->tx_niov) {
+ rc = ksocknal_send_iov(conn, tx);
} else {
- rc = ksocknal_send_kiov (conn, tx);
+ rc = ksocknal_send_kiov(conn, tx);
}
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
@@ -216,8 +213,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
conn->ksnc_tx_bufnob += rc; /* account it */
if (bufnob < conn->ksnc_tx_bufnob) {
- /* allocated send buffer bytes < computed; infer
- * something got ACKed */
+ /*
+ * allocated send buffer bytes < computed; infer
+ * something got ACKed
+ */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
@@ -227,7 +226,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
if (rc <= 0) { /* Didn't write anything? */
- if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+ if (!rc) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
/* Check if EAGAIN is due to memory pressure */
@@ -238,17 +237,17 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
}
/* socket's wmem_queued now includes 'rc' bytes */
- atomic_sub (rc, &conn->ksnc_tx_nob);
+ atomic_sub(rc, &conn->ksnc_tx_nob);
rc = 0;
- } while (tx->tx_resid != 0);
+ } while (tx->tx_resid);
ksocknal_connsock_decref(conn);
return rc;
}
static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
@@ -256,8 +255,10 @@ ksocknal_recv_iov (ksock_conn_t *conn)
LASSERT(conn->ksnc_rx_niov > 0);
- /* Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov */
+ /*
+ * Never touch conn->ksnc_rx_iov or change connection
+ * status inside ksocknal_lib_recv_iov
+ */
rc = ksocknal_lib_recv_iov(conn);
if (rc <= 0)
@@ -287,13 +288,13 @@ ksocknal_recv_iov (ksock_conn_t *conn)
nob -= iov->iov_len;
conn->ksnc_rx_iov = ++iov;
conn->ksnc_rx_niov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
@@ -301,8 +302,10 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
LASSERT(conn->ksnc_rx_nkiov > 0);
- /* Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov */
+ /*
+ * Never touch conn->ksnc_rx_kiov or change connection
+ * status inside ksocknal_lib_recv_iov
+ */
rc = ksocknal_lib_recv_kiov(conn);
if (rc <= 0)
@@ -332,41 +335,43 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
nob -= kiov->kiov_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
- } while (nob != 0);
+ } while (nob);
return 1;
}
static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
{
- /* Return 1 on success, 0 on EOF, < 0 on error.
+ /*
+ * Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
- * progress/completion. */
+ * progress/completion.
+ */
int rc;
- if (ksocknal_data.ksnd_stall_rx != 0) {
+ if (ksocknal_data.ksnd_stall_rx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
}
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
+ if (rc) {
+ LASSERT(conn->ksnc_closing);
return -ESHUTDOWN;
}
for (;;) {
- if (conn->ksnc_rx_niov != 0)
- rc = ksocknal_recv_iov (conn);
+ if (conn->ksnc_rx_niov)
+ rc = ksocknal_recv_iov(conn);
else
- rc = ksocknal_recv_kiov (conn);
+ rc = ksocknal_recv_kiov(conn);
if (rc <= 0) {
/* error/EOF or partial receive */
if (rc == -EAGAIN) {
rc = 1;
- } else if (rc == 0 && conn->ksnc_rx_started) {
+ } else if (!rc && conn->ksnc_rx_started) {
/* EOF in the middle of a message */
rc = -EPROTO;
}
@@ -375,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn)
/* Completed a fragment */
- if (conn->ksnc_rx_nob_wanted == 0) {
+ if (!conn->ksnc_rx_nob_wanted) {
rc = 1;
break;
}
@@ -386,36 +391,36 @@ ksocknal_receive (ksock_conn_t *conn)
}
void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
- int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
+ int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
- LASSERT(ni != NULL || tx->tx_conn != NULL);
+ LASSERT(ni || tx->tx_conn);
- if (tx->tx_conn != NULL)
+ if (tx->tx_conn)
ksocknal_conn_decref(tx->tx_conn);
- if (ni == NULL && tx->tx_conn != NULL)
+ if (!ni && tx->tx_conn)
ni = tx->tx_conn->ksnc_peer->ksnp_ni;
- ksocknal_free_tx (tx);
- if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
- lnet_finalize (ni, lnetmsg, rc);
+ ksocknal_free_tx(tx);
+ if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
+ lnet_finalize(ni, lnetmsg, rc);
}
void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
ksock_tx_t *tx;
- while (!list_empty (txlist)) {
+ while (!list_empty(txlist)) {
tx = list_entry(txlist->next, ksock_tx_t, tx_list);
- if (error && tx->tx_lnetmsg != NULL) {
+ if (error && tx->tx_lnetmsg) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+ le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+ le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
} else if (error) {
@@ -435,12 +440,14 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
ksock_conn_t *conn = tx->tx_conn;
ksock_peer_t *peer = conn->ksnc_peer;
- /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
+ /*
+ * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
* zero-copy. Our peer will send an ACK containing this cookie when
* she has received this message to tell us we can signal completion.
* tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
- * ksnp_zc_req_list. */
+ * ksnp_zc_req_list.
+ */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
@@ -450,9 +457,10 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
!conn->ksnc_zc_capable)
return;
- /* assign cookie and queue tx to pending list, it will be released when
- * a matching ack is received. See ksocknal_handle_zcack() */
-
+ /*
+ * assign cookie and queue tx to pending list, it will be released when
+ * a matching ack is received. See ksocknal_handle_zcack()
+ */
ksocknal_tx_addref(tx);
spin_lock(&peer->ksnp_lock);
@@ -461,11 +469,11 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
- if (peer->ksnp_zc_next_cookie == 0)
+ if (!peer->ksnp_zc_next_cookie)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
@@ -485,7 +493,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
spin_lock(&peer->ksnp_lock);
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ if (!tx->tx_msg.ksm_zc_cookies[0]) {
/* Not waiting for an ACK */
spin_unlock(&peer->ksnp_lock);
return;
@@ -500,20 +508,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
}
static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
- rc = ksocknal_transmit (conn, tx);
+ rc = ksocknal_transmit(conn, tx);
CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
- if (tx->tx_resid == 0) {
+ if (!tx->tx_resid) {
/* Sent everything OK */
- LASSERT (rc == 0);
+ LASSERT(!rc);
return 0;
}
@@ -532,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
- LASSERT (conn->ksnc_tx_scheduled);
+ LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
+ &ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime))
- wake_up (&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
return rc;
@@ -569,21 +577,19 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
ksocknal_uncheck_zc_req(tx);
/* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
+ ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
return rc;
}
static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
{
-
/* called holding write lock on ksnd_global_lock */
LASSERT(!route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
- LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+ LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
@@ -591,14 +597,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
+ &ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
{
ksock_route_t *route;
@@ -606,7 +612,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
for (;;) {
/* launch any/all connections that need it */
route = ksocknal_find_connectable_route_locked(peer);
- if (route == NULL)
+ if (!route)
return;
ksocknal_launch_connection_locked(route);
@@ -623,15 +629,15 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
int tnob = 0;
int fnob = 0;
- list_for_each (tmp, &peer->ksnp_conns) {
+ list_for_each(tmp, &peer->ksnp_conns) {
ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
LASSERT(!c->ksnc_closing);
- LASSERT(c->ksnc_proto != NULL &&
- c->ksnc_proto->pro_match_tx != NULL);
+ LASSERT(c->ksnc_proto &&
+ c->ksnc_proto->pro_match_tx);
rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
@@ -642,7 +648,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
continue;
case SOCKNAL_MATCH_YES: /* typed connection */
- if (typed == NULL || tnob > nob ||
+ if (!typed || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
typed = c;
@@ -651,7 +657,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
break;
case SOCKNAL_MATCH_MAY: /* fallback connection */
- if (fallback == NULL || fnob > nob ||
+ if (!fallback || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c;
@@ -662,9 +668,9 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
}
/* prefer the typed selection */
- conn = (typed != NULL) ? typed : fallback;
+ conn = (typed) ? typed : fallback;
- if (conn != NULL)
+ if (conn)
conn->ksnc_tx_last_post = cfs_time_current();
return conn;
@@ -675,48 +681,51 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
{
conn->ksnc_proto->pro_pack(tx);
- atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
{
ksock_sched_t *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
- /* called holding global lock (read or irq-write) and caller may
+ /*
+ * called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
* so we don't need the {get,put}connsock dance to deref
- * ksnc_sock... */
+ * ksnc_sock...
+ */
LASSERT(!conn->ksnc_closing);
CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
ksocknal_tx_prep(conn, tx);
- /* Ensure the frags we've been given EXACTLY match the number of
+ /*
+ * Ensure the frags we've been given EXACTLY match the number of
* bytes we want to send. Many TCP/IP stacks disregard any total
* size parameters passed to them and just look at the frags.
*
* We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header. */
- LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+ * complete ksocknal message header.
+ */
+ LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
(unsigned int)tx->tx_nob);
LASSERT(tx->tx_niov >= 1);
LASSERT(tx->tx_resid == tx->tx_nob);
- CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
- tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
- KSOCK_MSG_NOOP,
- tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+ CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+ tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
+ KSOCK_MSG_NOOP,
+ tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
/*
* FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
@@ -725,7 +734,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
spin_lock_bh(&sched->kss_lock);
- if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
@@ -736,26 +745,30 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
}
if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /* The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it */
- LASSERT(msg->ksm_zc_cookies[1] != 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+ /*
+ * The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it
+ */
+ LASSERT(msg->ksm_zc_cookies[1]);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
} else {
- /* It's a normal packet - can it piggback a noop zc-ack that
- * has been queued already? */
- LASSERT(msg->ksm_zc_cookies[1] == 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
+ /*
+ * It's a normal packet - can it piggback a noop zc-ack that
+ * has been queued already?
+ */
+ LASSERT(!msg->ksm_zc_cookies[1]);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
/* ztx will be released later */
}
- if (ztx != NULL) {
- atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ if (ztx) {
+ atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
@@ -763,24 +776,23 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
ksocknal_conn_addref(conn);
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -788,10 +800,10 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
continue;
/* all route types connected ? */
- if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+ if (!(ksocknal_route_mask() & ~route->ksnr_connected))
continue;
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+ if (!(!route->ksnr_retry_interval || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
"Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
@@ -809,13 +821,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
}
ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
{
struct list_head *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -827,7 +839,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
}
int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
{
ksock_peer_t *peer;
ksock_conn_t *conn;
@@ -835,21 +847,23 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int retry;
int rc;
- LASSERT(tx->tx_conn == NULL);
+ LASSERT(!tx->tx_conn);
g_lock = &ksocknal_data.ksnd_global_lock;
for (retry = 0;; retry = 1) {
read_lock(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
- if (ksocknal_find_connectable_route_locked(peer) == NULL) {
+ if (peer) {
+ if (!ksocknal_find_connectable_route_locked(peer)) {
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* I've got no routes that need to be
+ if (conn) {
+ /*
+ * I've got no routes that need to be
* connecting and I do have an actual
- * connection... */
- ksocknal_queue_tx_locked (tx, conn);
+ * connection...
+ */
+ ksocknal_queue_tx_locked(tx, conn);
read_unlock(g_lock);
return 0;
}
@@ -862,12 +876,12 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
write_lock_bh(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
+ if (peer)
break;
write_unlock_bh(g_lock);
- if ((id.pid & LNET_PID_USERFLAG) != 0) {
+ if (id.pid & LNET_PID_USERFLAG) {
CERROR("Refusing to create a connection to userspace process %s\n",
libcfs_id2str(id));
return -EHOSTUNREACH;
@@ -881,7 +895,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
rc = ksocknal_add_peer(ni, id,
LNET_NIDADDR(id.nid),
lnet_acceptor_port());
- if (rc != 0) {
+ if (rc) {
CERROR("Can't add peer %s: %d\n",
libcfs_id2str(id), rc);
return rc;
@@ -891,21 +905,21 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
ksocknal_launch_all_connections_locked(peer);
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
+ if (conn) {
/* Connection exists; queue message on it */
- ksocknal_queue_tx_locked (tx, conn);
+ ksocknal_queue_tx_locked(tx, conn);
write_unlock_bh(g_lock);
return 0;
}
if (peer->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked (peer) != NULL) {
+ ksocknal_find_connecting_route_locked(peer)) {
/* the message is going to be pinned to the peer */
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
}
@@ -932,19 +946,20 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int desc_size;
int rc;
- /* NB 'private' is different depending on what we're sending.
- * Just ignore it... */
-
+ /*
+ * NB 'private' is different depending on what we're sending.
+ * Just ignore it...
+ */
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT(payload_nob == 0 || payload_niov > 0);
+ LASSERT(!payload_nob || payload_niov > 0);
LASSERT(payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!in_interrupt ());
+ LASSERT(!(payload_kiov && payload_iov));
+ LASSERT(!in_interrupt());
- if (payload_iov != NULL)
+ if (payload_iov)
desc_size = offsetof(ksock_tx_t,
tx_frags.virt.iov[1 + payload_niov]);
else
@@ -954,7 +969,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (lntmsg->msg_vmflush)
mpflag = cfs_memory_pressure_get_and_set();
tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate tx desc type %d size %d\n",
type, desc_size);
if (lntmsg->msg_vmflush)
@@ -965,7 +980,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
tx->tx_conn = NULL; /* set when assigned a conn */
tx->tx_lnetmsg = lntmsg;
- if (payload_iov != NULL) {
+ if (payload_iov) {
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
@@ -992,7 +1007,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (!mpflag)
cfs_memory_pressure_restore(mpflag);
- if (rc == 0)
+ if (!rc)
return 0;
ksocknal_free_tx(tx);
@@ -1014,7 +1029,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
}
void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
{
write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
@@ -1022,7 +1037,7 @@ ksocknal_thread_fini (void)
}
int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
@@ -1030,14 +1045,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
unsigned int niov;
int skipped;
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(conn->ksnc_proto);
- if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+ if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
/* Remind the socket to ack eagerly... */
ksocknal_lib_eager_ack(conn);
}
- if (nob_to_skip == 0) { /* right at next packet boundary now */
+ if (!nob_to_skip) { /* right at next packet boundary now */
conn->ksnc_rx_started = 0;
mb(); /* racing with timeout thread */
@@ -1061,11 +1076,11 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+ conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
break;
default:
- LBUG ();
+ LBUG();
}
conn->ksnc_rx_niov = 1;
@@ -1075,9 +1090,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
return 1;
}
- /* Set up to skip as much as possible now. If there's more left
- * (ran out of iov entries) we'll get called again */
-
+ /*
+ * Set up to skip as much as possible now. If there's more left
+ * (ran out of iov entries) we'll get called again
+ */
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
@@ -1093,8 +1109,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
skipped += nob;
nob_to_skip -= nob;
- } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
- niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+ } while (nob_to_skip && /* mustn't overflow conn's rx iov */
+ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
conn->ksnc_rx_niov = niov;
conn->ksnc_rx_kiov = NULL;
@@ -1104,13 +1120,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
}
static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
@@ -1119,13 +1135,13 @@ ksocknal_process_receive (ksock_conn_t *conn)
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
- if (conn->ksnc_rx_nob_wanted != 0) {
+ if (conn->ksnc_rx_nob_wanted) {
rc = ksocknal_receive(conn);
if (rc <= 0) {
- LASSERT (rc != -EAGAIN);
+ LASSERT(rc != -EAGAIN);
- if (rc == 0)
+ if (!rc)
CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
conn,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1139,12 +1155,12 @@ ksocknal_process_receive (ksock_conn_t *conn)
conn->ksnc_port);
/* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
- return (rc == 0 ? -ESHUTDOWN : rc);
+ ksocknal_close_conn_and_siblings(conn,
+ (conn->ksnc_closing) ? 0 : rc);
+ return (!rc ? -ESHUTDOWN : rc);
}
- if (conn->ksnc_rx_nob_wanted != 0) {
+ if (conn->ksnc_rx_nob_wanted) {
/* short read */
return -EAGAIN;
}
@@ -1169,7 +1185,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
/* NOOP Checksum error */
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
@@ -1180,10 +1196,10 @@ ksocknal_process_receive (ksock_conn_t *conn)
return -EIO;
}
- if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
+ if (conn->ksnc_msg.ksm_zc_cookies[1]) {
__u64 cookie = 0;
- LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+ LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
cookie = conn->ksnc_msg.ksm_zc_cookies[0];
@@ -1191,7 +1207,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
conn->ksnc_msg.ksm_zc_cookies[1]);
- if (rc != 0) {
+ if (rc) {
CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
@@ -1202,7 +1218,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
- ksocknal_new_packet (conn, 0);
+ ksocknal_new_packet(conn, 0);
return 0; /* NOOP is done and just return */
}
@@ -1224,7 +1240,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
/* unpack message header */
conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
- if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+ if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
/* Userspace peer */
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
@@ -1243,14 +1259,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
if (rc < 0) {
/* I just received garbage: give up on this conn */
ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_close_conn_and_siblings(conn, rc);
ksocknal_conn_decref(conn);
return -EPROTO;
}
/* I'm racing with ksocknal_recv() */
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+ LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
return 0;
@@ -1262,8 +1278,8 @@ ksocknal_process_receive (ksock_conn_t *conn)
/* payload all received */
rc = 0;
- if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ if (!conn->ksnc_rx_nob_left && /* not truncating */
+ conn->ksnc_msg.ksm_csum && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1271,7 +1287,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
rc = -EIO;
}
- if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
+ if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
@@ -1285,16 +1301,16 @@ ksocknal_process_receive (ksock_conn_t *conn)
lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
- if (rc != 0) {
+ if (rc) {
ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_close_conn_and_siblings(conn, rc);
return -EPROTO;
}
/* Fall through */
case SOCKNAL_RX_SLOP:
/* starting new packet? */
- if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+ if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
return 0; /* come back later */
goto again; /* try to finish reading slop now */
@@ -1308,9 +1324,9 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = private;
ksock_sched_t *sched = conn->ksnc_scheduler;
@@ -1322,7 +1338,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
- if (mlen == 0 || iov != NULL) {
+ if (!mlen || iov) {
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
@@ -1349,8 +1365,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- wake_up (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
+ wake_up(&sched->kss_waitq);
+ LASSERT(conn->ksnc_rx_ready);
break;
case SOCKNAL_RX_PARSE:
@@ -1396,7 +1412,7 @@ int ksocknal_scheduler(void *arg)
cfs_block_allsigs();
rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set CPT affinity to %d: %d\n",
info->ksi_cpt, rc);
}
@@ -1408,18 +1424,20 @@ int ksocknal_scheduler(void *arg)
/* Ensure I progress everything semi-fairly */
- if (!list_empty (&sched->kss_rx_conns)) {
+ if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ ksock_conn_t, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
- /* clear rx_ready in case receive isn't complete.
+ /*
+ * clear rx_ready in case receive isn't complete.
* Do it BEFORE we call process_recv, since
* data_ready can set it any time after we release
- * kss_lock. */
+ * kss_lock.
+ */
conn->ksnc_rx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
@@ -1431,18 +1449,20 @@ int ksocknal_scheduler(void *arg)
LASSERT(conn->ksnc_rx_scheduled);
/* Did process_receive get everything it wanted? */
- if (rc == 0)
+ if (!rc)
conn->ksnc_rx_ready = 1;
if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
- /* Conn blocked waiting for ksocknal_recv()
+ /*
+ * Conn blocked waiting for ksocknal_recv()
* I change its state (under lock) to signal
- * it can be rescheduled */
+ * it can be rescheduled
+ */
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
- list_add_tail (&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
/* drop my ref */
@@ -1452,25 +1472,24 @@ int ksocknal_scheduler(void *arg)
did_something = 1;
}
- if (!list_empty (&sched->kss_tx_conns)) {
+ if (!list_empty(&sched->kss_tx_conns)) {
LIST_HEAD(zlist);
if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
+ list_add(&zlist, &sched->kss_zombie_noop_txs);
list_del_init(&sched->kss_zombie_noop_txs);
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ ksock_conn_t, ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
@@ -1478,16 +1497,20 @@ int ksocknal_scheduler(void *arg)
/* dequeue now so empty list => more to send */
list_del(&tx->tx_list);
- /* Clear tx_ready in case send isn't complete. Do
+ /*
+ * Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
* write_space can set it any time after we release
- * kss_lock. */
+ * kss_lock.
+ */
conn->ksnc_tx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
if (!list_empty(&zlist)) {
- /* free zombie noop txs, it's fast because
- * noop txs are just put in freelist */
+ /*
+ * free zombie noop txs, it's fast because
+ * noop txs are just put in freelist
+ */
ksocknal_txlist_done(NULL, &zlist, 0);
}
@@ -1496,8 +1519,7 @@ int ksocknal_scheduler(void *arg)
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
spin_lock_bh(&sched->kss_lock);
- list_add(&tx->tx_list,
- &conn->ksnc_tx_queue);
+ list_add(&tx->tx_list, &conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref(tx);
@@ -1508,13 +1530,15 @@ int ksocknal_scheduler(void *arg)
}
if (rc == -ENOMEM) {
- /* Do nothing; after a short timeout, this
- * conn will be reposted on kss_tx_conns. */
+ /*
+ * Do nothing; after a short timeout, this
+ * conn will be reposted on kss_tx_conns.
+ */
} else if (conn->ksnc_tx_ready &&
!list_empty(&conn->ksnc_tx_queue)) {
/* reschedule for tx */
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
/* drop my ref */
@@ -1533,7 +1557,7 @@ int ksocknal_scheduler(void *arg)
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
- LASSERT (rc == 0);
+ LASSERT(!rc);
} else {
cond_resched();
}
@@ -1551,7 +1575,7 @@ int ksocknal_scheduler(void *arg)
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
@@ -1562,13 +1586,12 @@ void ksocknal_read_callback (ksock_conn_t *conn)
conn->ksnc_rx_ready = 1;
if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
conn->ksnc_rx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
@@ -1577,7 +1600,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
@@ -1589,20 +1612,19 @@ void ksocknal_write_callback (ksock_conn_t *conn)
if (!conn->ksnc_tx_scheduled && /* not being progressed */
!list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
{
__u32 version = 0;
@@ -1611,7 +1633,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
version = __swab32(hello->kshm_version);
- if (version != 0) {
+ if (version) {
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 1)
return NULL;
@@ -1632,11 +1654,11 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
- CLASSERT(sizeof (lnet_magicversion_t) ==
- offsetof (ksock_hello_msg_t, kshm_src_nid));
+ CLASSERT(sizeof(lnet_magicversion_t) ==
+ offsetof(ksock_hello_msg_t, kshm_src_nid));
- if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
- hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+ if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
+ hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
return &ksocknal_protocol_v1x;
}
@@ -1644,8 +1666,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
}
int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
@@ -1653,7 +1675,7 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(conn->ksnc_proto);
hello->kshm_src_nid = ni->ni_nid;
hello->kshm_dst_nid = peer_nid;
@@ -1682,9 +1704,9 @@ ksocknal_invert_type(int type)
}
int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
- __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+ __u64 *incarnation)
{
/* Return < 0 fatal error
* 0 success
@@ -1692,7 +1714,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
* EPROTO protocol version mismatch
*/
struct socket *sock = conn->ksnc_sock;
- int active = (conn->ksnc_proto != NULL);
+ int active = !!conn->ksnc_proto;
int timeout;
int proto_match;
int rc;
@@ -1705,20 +1727,20 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
- if (rc != 0) {
+ rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT (rc < 0);
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0);
return rc;
}
if (hello->kshm_magic != LNET_PROTO_MAGIC &&
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+ hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
- __cpu_to_le32 (hello->kshm_magic),
+ __cpu_to_le32(hello->kshm_magic),
LNET_PROTO_TCP_MAGIC,
&conn->ksnc_ipaddr);
return -EPROTO;
@@ -1726,15 +1748,15 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
rc = lnet_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0);
return rc;
}
proto = ksocknal_parse_proto_version(hello);
- if (proto == NULL) {
+ if (!proto) {
if (!active) {
/* unknown protocol from peer, tell peer my protocol */
conn->ksnc_proto = &ksocknal_protocol_v3x;
@@ -1760,7 +1782,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading or checking hello from from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0);
@@ -1792,8 +1814,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
CERROR("Unexpected type %d from %s ip %pI4h\n",
- hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
+ hello->kshm_ctype, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1816,9 +1838,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
- conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- hello->kshm_ctype);
+ conn->ksnc_type, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr, hello->kshm_ctype);
return -EPROTO;
}
@@ -1826,7 +1847,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
}
static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
{
LIST_HEAD(zombies);
ksock_peer_t *peer = route->ksnr_peer;
@@ -1850,10 +1871,12 @@ ksocknal_connect (ksock_route_t *route)
for (;;) {
wanted = ksocknal_route_mask() & ~route->ksnr_connected;
- /* stop connecting if peer/route got closed under me, or
- * route got connected while queued */
+ /*
+ * stop connecting if peer/route got closed under me, or
+ * route got connected while queued
+ */
if (peer->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
+ !wanted) {
retry_later = 0;
break;
}
@@ -1869,14 +1892,14 @@ ksocknal_connect (ksock_route_t *route)
if (retry_later) /* needs reschedule */
break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+ if (wanted & (1 << SOCKLND_CONN_ANY)) {
type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+ } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+ } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
type = SOCKLND_CONN_BULK_IN;
} else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+ LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
type = SOCKLND_CONN_BULK_OUT;
}
@@ -1893,7 +1916,7 @@ ksocknal_connect (ksock_route_t *route)
rc = lnet_connect(&sock, peer->ksnp_id.nid,
route->ksnr_myipaddr,
route->ksnr_ipaddr, route->ksnr_port);
- if (rc != 0)
+ if (rc)
goto failed;
rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
@@ -1904,9 +1927,11 @@ ksocknal_connect (ksock_route_t *route)
goto failed;
}
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
+ /*
+ * A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version
+ */
+ retry_later = (rc);
if (retry_later)
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
@@ -1918,17 +1943,20 @@ ksocknal_connect (ksock_route_t *route)
route->ksnr_connecting = 0;
if (retry_later) {
- /* re-queue for attention; this frees me up to handle
- * the peer's incoming connection request */
-
+ /*
+ * re-queue for attention; this frees me up to handle
+ * the peer's incoming connection request
+ */
if (rc == EALREADY ||
- (rc == 0 && peer->ksnp_accepting > 0)) {
- /* We want to introduce a delay before next
+ (!rc && peer->ksnp_accepting > 0)) {
+ /*
+ * We want to introduce a delay before next
* attempt to connect if we lost conn race,
* but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic */
+ * so min_reconnectms should be good heuristic
+ */
route->ksnr_retry_interval =
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
}
@@ -1949,30 +1977,34 @@ ksocknal_connect (ksock_route_t *route)
route->ksnr_retry_interval *= 2;
route->ksnr_retry_interval =
max(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
route->ksnr_retry_interval =
min(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+ cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
- LASSERT (route->ksnr_retry_interval != 0);
+ LASSERT(route->ksnr_retry_interval);
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
if (!list_empty(&peer->ksnp_tx_queue) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
+ !peer->ksnp_accepting &&
+ !ksocknal_find_connecting_route_locked(peer)) {
ksock_conn_t *conn;
- /* ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x */
- if (!list_empty (&peer->ksnp_conns)) {
+ /*
+ * ksnp_tx_queue is queued on a conn on successful
+ * connection for V1.x and V2.x
+ */
+ if (!list_empty(&peer->ksnp_conns)) {
conn = list_entry(peer->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+ ksock_conn_t, ksnc_list);
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
}
- /* take all the blocked packets while I've got the lock and
- * complete below... */
+ /*
+ * take all the blocked packets while I've got the lock and
+ * complete below...
+ */
list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
@@ -2011,8 +2043,10 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
- /* can't create more connd, or still have enough
- * threads to handle more connecting */
+ /*
+ * can't create more connd, or still have enough
+ * threads to handle more connecting
+ */
return 0;
}
@@ -2041,7 +2075,7 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- if (rc == 0)
+ if (!rc)
return 1;
/* we tried ... */
@@ -2093,8 +2127,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
}
-/* Go through connd_routes queue looking for a route that we can process
- * right now, @timeout_p can be updated if we need to come back later */
+/*
+ * Go through connd_routes queue looking for a route that we can process
+ * right now, @timeout_p can be updated if we need to come back later
+ */
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
@@ -2104,10 +2140,9 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
-
- if (route->ksnr_retry_interval == 0 ||
+ list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
+ if (!route->ksnr_retry_interval ||
cfs_time_aftereq(now, route->ksnr_timeout))
return route;
@@ -2120,7 +2155,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
}
int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
ksock_connreq_t *cr;
@@ -2172,15 +2207,17 @@ ksocknal_connd (void *arg)
spin_lock_bh(connd_lock);
}
- /* Only handle an outgoing connection request if there
+ /*
+ * Only handle an outgoing connection request if there
* is a thread left to handle incoming connections and
- * create new connd */
+ * create new connd
+ */
if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
ksocknal_data.ksnd_connd_running) {
route = ksocknal_connd_get_route_locked(&timeout);
}
- if (route != NULL) {
- list_del (&route->ksnr_connd_list);
+ if (route) {
+ list_del(&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
dropped_lock = 1;
@@ -2231,24 +2268,26 @@ ksocknal_connd (void *arg)
}
static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
struct list_head *ctmp;
- list_for_each (ctmp, &peer->ksnp_conns) {
+ list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT(!conn->ksnc_closing);
- /* SOCK_ERROR will reset error code of socket in
- * some platform (like Darwin8.x) */
+ /*
+ * SOCK_ERROR will reset error code of socket in
+ * some platform (like Darwin8.x)
+ */
error = conn->ksnc_sock->sk->sk_err;
- if (error != 0) {
+ if (error) {
ksocknal_conn_addref(conn);
switch (error) {
@@ -2292,11 +2331,13 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
}
if ((!list_empty(&conn->ksnc_tx_queue) ||
- conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
+ conn->ksnc_sock->sk->sk_wmem_queued) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
- /* Timed out messages queued for sending or
- * buffered in the socket's send buffer */
+ /*
+ * Timed out messages queued for sending or
+ * buffered in the socket's send buffer
+ */
ksocknal_conn_addref(conn);
CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
@@ -2313,20 +2354,18 @@ static inline void
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
+ ksock_tx_t *tmp;
LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
-
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &stale_txs);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2336,6 +2375,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
static int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+ __must_hold(&ksocknal_data.ksnd_global_lock)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
@@ -2356,12 +2396,14 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
return 0;
- /* retry 10 secs later, so we wouldn't put pressure
- * on this peer if we failed to send keepalive this time */
+ /*
+ * retry 10 secs later, so we wouldn't put pressure
+ * on this peer if we failed to send keepalive this time
+ */
peer->ksnp_send_keepalive = cfs_time_shift(10);
conn = ksocknal_find_conn_locked(peer, NULL, 1);
- if (conn != NULL) {
+ if (conn) {
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
@@ -2378,12 +2420,12 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
/* cookie = 1 is reserved for keepalive PING */
tx = ksocknal_alloc_tx_noop(1, 1);
- if (tx == NULL) {
+ if (!tx) {
read_lock(&ksocknal_data.ksnd_global_lock);
return -ENOMEM;
}
- if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+ if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
read_lock(&ksocknal_data.ksnd_global_lock);
return 1;
}
@@ -2395,7 +2437,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
}
static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
@@ -2403,9 +2445,11 @@ ksocknal_check_peer_timeouts (int idx)
ksock_tx_t *tx;
again:
- /* NB. We expect to have a look at all the peers and not find any
+ /*
+ * NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
- * take a look... */
+ * take a look...
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
@@ -2413,35 +2457,37 @@ ksocknal_check_peer_timeouts (int idx)
int resid = 0;
int n = 0;
- if (ksocknal_send_keepalive_locked(peer) != 0) {
+ if (ksocknal_send_keepalive_locked(peer)) {
read_unlock(&ksocknal_data.ksnd_global_lock);
goto again;
}
- conn = ksocknal_find_timed_out_conn (peer);
+ conn = ksocknal_find_timed_out_conn(peer);
- if (conn != NULL) {
+ if (conn) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- /* NB we won't find this one again, but we can't
+ /*
+ * NB we won't find this one again, but we can't
* just proceed with the next peer, since we dropped
- * ksnd_global_lock and it might be dead already! */
+ * ksnd_global_lock and it might be dead already!
+ */
ksocknal_conn_decref(conn);
goto again;
}
- /* we can't process stale txs right here because we're
- * holding only shared lock */
- if (!list_empty (&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx =
- list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ /*
+ * we can't process stale txs right here because we're
+ * holding only shared lock
+ */
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
-
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2466,13 +2512,13 @@ ksocknal_check_peer_timeouts (int idx)
n++;
}
- if (n == 0) {
+ if (!n) {
spin_unlock(&peer->ksnp_lock);
continue;
}
tx = list_entry(peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
+ ksock_tx_t, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
@@ -2486,7 +2532,7 @@ ksocknal_check_peer_timeouts (int idx)
cfs_duration_sec(cfs_time_current() - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
ksocknal_conn_decref(conn);
goto again;
}
@@ -2495,7 +2541,7 @@ ksocknal_check_peer_timeouts (int idx)
}
int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
{
wait_queue_t wait;
ksock_conn_t *conn;
@@ -2515,12 +2561,10 @@ ksocknal_reaper (void *arg)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
-
- if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry (ksocknal_data. \
- ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2531,10 +2575,10 @@ ksocknal_reaper (void *arg)
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
- next, ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2544,9 +2588,9 @@ ksocknal_reaper (void *arg)
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+ if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
+ &ksocknal_data.ksnd_enomem_conns);
list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
@@ -2554,10 +2598,10 @@ ksocknal_reaper (void *arg)
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty (&enomem_conns)) {
- conn = list_entry (enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ while (!list_empty(&enomem_conns)) {
+ conn = list_entry(enomem_conns.next, ksock_conn_t,
+ ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
@@ -2566,7 +2610,7 @@ ksocknal_reaper (void *arg)
LASSERT(conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
wake_up(&sched->kss_waitq);
spin_unlock_bh(&sched->kss_lock);
@@ -2580,21 +2624,22 @@ ksocknal_reaper (void *arg)
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
- /* Time to check for timeouts on a few more peers: I do
+ /*
+ * Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer
* table and I need to check every connection 'n' times
* within a timeout interval, to ensure I detect a
* timeout on any connection within (n+1)/n times the
- * timeout interval. */
-
+ * timeout interval.
+ */
if (*ksocknal_tunables.ksnd_timeout > n * p)
chunk = (chunk * n * p) /
*ksocknal_tunables.ksnd_timeout;
- if (chunk == 0)
+ if (!chunk)
chunk = 1;
for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
+ ksocknal_check_peer_timeouts(peer_index);
peer_index = (peer_index + 1) %
ksocknal_data.ksnd_peer_hash_size;
}
@@ -2602,25 +2647,27 @@ ksocknal_reaper (void *arg)
deadline = cfs_time_add(deadline, cfs_time_seconds(p));
}
- if (nenomem_conns != 0) {
- /* Reduce my timeout if I rescheduled ENOMEM conns.
+ if (nenomem_conns) {
+ /*
+ * Reduce my timeout if I rescheduled ENOMEM conns.
* This also prevents me getting woken immediately
- * if any go back on my enomem list. */
+ * if any go back on my enomem list.
+ */
timeout = SOCKNAL_ENOMEM_RETRY;
}
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty (&ksocknal_data.ksnd_zombie_conns))
+ list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ list_empty(&ksocknal_data.ksnd_zombie_conns))
schedule_timeout(timeout);
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index cf8e43bd3c03..d4ce06d0aeeb 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -45,13 +45,13 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d getting sock peer IP\n", rc);
return rc;
}
rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d getting sock local IP\n", rc);
return rc;
}
@@ -67,9 +67,11 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
if (conn->ksnc_proto == &ksocknal_protocol_v1x)
return 0;
- /* ZC if the socket supports scatter/gather and doesn't need software
- * checksums */
- return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
+ /*
+ * ZC if the socket supports scatter/gather and doesn't need software
+ * checksums
+ */
+ return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
}
int
@@ -82,12 +84,13 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
tx->tx_nob == tx->tx_resid && /* frist sending */
- tx->tx_msg.ksm_csum == 0) /* not checksummed */
+ !tx->tx_msg.ksm_csum) /* not checksummed */
ksocknal_lib_csum_tx(tx);
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
-
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
{
#if SOCKNAL_SINGLE_FRAG_TX
struct kvec scratch;
@@ -123,11 +126,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
int nob;
/* Not NOOP message */
- LASSERT(tx->tx_lnetmsg != NULL);
+ LASSERT(tx->tx_lnetmsg);
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
- if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
+ if (tx->tx_msg.ksm_zc_cookies[0]) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
@@ -136,13 +141,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
+ page, offset, kiov->kiov_len);
if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
msgflg |= MSG_MORE;
- if (sk->sk_prot->sendpage != NULL) {
+ if (sk->sk_prot->sendpage) {
rc = sk->sk_prot->sendpage(sk, page,
offset, fragsize, msgflg);
} else {
@@ -187,13 +192,14 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn)
int opt = 1;
struct socket *sock = conn->ksnc_sock;
- /* Remind the socket to ACK eagerly. If I don't, the socket might
+ /*
+ * Remind the socket to ACK eagerly. If I don't, the socket might
* think I'm about to send something it could piggy-back the ACK
* on, introducing delay in completing zero-copy sends in my
- * peer. */
-
- kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char *)&opt, sizeof(opt));
+ * peer.
+ */
+ kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
+ sizeof(opt));
}
int
@@ -218,8 +224,10 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
int sum;
__u32 saved_csum;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
LASSERT(niov > 0);
for (nob = i = 0; i < niov; i++) {
@@ -228,8 +236,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
}
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- scratchiov, niov, nob, MSG_DONTWAIT);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
+ MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -237,7 +245,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
conn->ksnc_msg.ksm_csum = 0;
}
- if (saved_csum != 0) {
+ if (saved_csum) {
/* accumulate checksum */
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
@@ -258,7 +266,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
static void
ksocknal_lib_kiov_vunmap(void *addr)
{
- if (addr == NULL)
+ if (!addr)
return;
vunmap(addr);
@@ -272,7 +280,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
int nob;
int i;
- if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
+ if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
return NULL;
LASSERT(niov <= LNET_MAX_IOV);
@@ -282,8 +290,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
return NULL;
for (nob = i = 0; i < niov; i++) {
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+ if ((kiov[i].kiov_offset && i > 0) ||
+ (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
@@ -291,7 +299,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
}
addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
- if (addr == NULL)
+ if (!addr)
return NULL;
iov->iov_base = addr + kiov[0].kiov_offset;
@@ -329,10 +337,12 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
int fragnob;
int n;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
- if (addr != NULL) {
+ if (addr) {
nob = scratchiov[0].iov_len;
n = 1;
@@ -347,17 +357,19 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
+ n, nob, MSG_DONTWAIT);
- if (conn->ksnc_msg.ksm_csum != 0) {
+ if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
- /* Dang! have to kmap again because I have nowhere to
+ /*
+ * Dang! have to kmap again because I have nowhere to
* stash the mapped address. But by doing it while the
* page is still mapped, the kernel just bumps the map
- * count and returns me the address it stashed. */
+ * count and returns me the address it stashed.
+ */
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
fragnob = kiov[i].kiov_len;
if (fragnob > sum)
@@ -370,7 +382,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
}
}
- if (addr != NULL) {
+ if (addr) {
ksocknal_lib_kiov_vunmap(addr);
} else {
for (i = 0; i < niov; i++)
@@ -388,7 +400,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
void *base;
LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
- LASSERT(tx->tx_conn != NULL);
+ LASSERT(tx->tx_conn);
LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
tx->tx_msg.ksm_csum = 0;
@@ -396,7 +408,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
tx->tx_iov[0].iov_len);
- if (tx->tx_kiov != NULL) {
+ if (tx->tx_kiov) {
for (i = 0; i < tx->tx_nkiov; i++) {
base = kmap(tx->tx_kiov[i].kiov_page) +
tx->tx_kiov[i].kiov_offset;
@@ -427,22 +439,22 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *
int rc;
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
+ if (rc) {
LASSERT(conn->ksnc_closing);
*txmem = *rxmem = *nagle = 0;
return -ESHUTDOWN;
}
rc = lnet_sock_getbuf(sock, txmem, rxmem);
- if (rc == 0) {
+ if (!rc) {
len = sizeof(*nagle);
rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)nagle, &len);
+ (char *)nagle, &len);
}
ksocknal_connsock_decref(conn);
- if (rc == 0)
+ if (!rc)
*nagle = !*nagle;
else
*txmem = *rxmem = *nagle = 0;
@@ -463,23 +475,24 @@ ksocknal_lib_setup_sock(struct socket *sock)
sock->sk->sk_allocation = GFP_NOFS;
- /* Ensure this socket aborts active sends immediately when we close
- * it. */
-
+ /*
+ * Ensure this socket aborts active sends immediately when we close
+ * it.
+ */
linger.l_onoff = 0;
linger.l_linger = 0;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&linger, sizeof(linger));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
+ sizeof(linger));
+ if (rc) {
CERROR("Can't set SO_LINGER: %d\n", rc);
return rc;
}
option = -1;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
+ sizeof(option));
+ if (rc) {
CERROR("Can't set SO_LINGER2: %d\n", rc);
return rc;
}
@@ -488,8 +501,8 @@ ksocknal_lib_setup_sock(struct socket *sock)
option = 1;
rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ (char *)&option, sizeof(option));
+ if (rc) {
CERROR("Can't disable nagle: %d\n", rc);
return rc;
}
@@ -497,10 +510,10 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
- *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size, rc);
+ *ksocknal_tunables.ksnd_tx_buffer_size,
+ *ksocknal_tunables.ksnd_rx_buffer_size, rc);
return rc;
}
@@ -514,9 +527,9 @@ ksocknal_lib_setup_sock(struct socket *sock)
do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
option = (do_keepalive ? 1 : 0);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
+ sizeof(option));
+ if (rc) {
CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
return rc;
}
@@ -524,23 +537,23 @@ ksocknal_lib_setup_sock(struct socket *sock)
if (!do_keepalive)
return 0;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
- (char *)&keep_idle, sizeof(keep_idle));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
+ sizeof(keep_idle));
+ if (rc) {
CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
return rc;
}
rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
- (char *)&keep_intvl, sizeof(keep_intvl));
- if (rc != 0) {
+ (char *)&keep_intvl, sizeof(keep_intvl));
+ if (rc) {
CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
return rc;
}
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
- (char *)&keep_count, sizeof(keep_count));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
+ sizeof(keep_count));
+ if (rc) {
CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
return rc;
}
@@ -558,7 +571,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
int rc;
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) /* being shut down */
+ if (rc) /* being shut down */
return;
sk = conn->ksnc_sock->sk;
@@ -570,8 +583,8 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
release_sock(sk);
rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- LASSERT(rc == 0);
+ (char *)&val, sizeof(val));
+ LASSERT(!rc);
lock_sock(sk);
tp->nonagle = nonagle;
@@ -593,11 +606,12 @@ ksocknal_data_ready(struct sock *sk)
read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ if (!conn) { /* raced with ksocknal_terminate_conn */
LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
sk->sk_data_ready(sk);
- } else
+ } else {
ksocknal_read_callback(conn);
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
}
@@ -619,14 +633,14 @@ ksocknal_write_space(struct sock *sk)
CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
sk, wspace, min_wpace, conn,
- (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
+ !conn ? "" : (conn->ksnc_tx_ready ?
" ready" : " blocked"),
- (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
+ !conn ? "" : (conn->ksnc_tx_scheduled ?
" scheduled" : " idle"),
- (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
+ !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
" empty" : " queued"));
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ if (!conn) { /* raced with ksocknal_terminate_conn */
LASSERT(sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space(sk);
@@ -637,10 +651,11 @@ ksocknal_write_space(struct sock *sk)
if (wspace >= min_wpace) { /* got enough space */
ksocknal_write_callback(conn);
- /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
+ /*
+ * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
* ENOMEM check in ksocknal_transmit is race-free (think about
- * it). */
-
+ * it).
+ */
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
@@ -666,15 +681,19 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
void
ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
{
- /* Remove conn's network callbacks.
+ /*
+ * Remove conn's network callbacks.
* NB I _have_ to restore the callback, rather than storing a noop,
- * since the socket could survive past this module being unloaded!! */
+ * since the socket could survive past this module being unloaded!!
+ */
sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
sock->sk->sk_write_space = conn->ksnc_saved_write_space;
- /* A callback could be in progress already; they hold a read lock
+ /*
+ * A callback could be in progress already; they hold a read lock
* on ksnd_global_lock (to serialise with me) and NOOP if
- * sk_user_data is NULL. */
+ * sk_user_data is NULL.
+ */
sock->sk->sk_user_data = NULL;
return ;
@@ -691,14 +710,16 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn)
if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
!conn->ksnc_tx_ready) {
- /* SOCK_NOSPACE is set when the socket fills
+ /*
+ * SOCK_NOSPACE is set when the socket fills
* and cleared in the write_space callback
* (which also sets ksnc_tx_ready). If
* SOCK_NOSPACE and ksnc_tx_ready are BOTH
* zero, I didn't fill the socket and
* write_space won't reschedule me, so I
* return -ENOMEM to get my caller to retry
- * after a timeout */
+ * after a timeout
+ */
rc = -ENOMEM;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index fdb2b23e2ef0..6329cbe66573 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -14,9 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -41,8 +38,10 @@ static int peer_timeout = 180;
module_param(peer_timeout, int, 0444);
MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-/* Number of daemons in each thread pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's not set. */
+/*
+ * Number of daemons in each thread pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's not set.
+ */
static unsigned int nscheds;
module_param(nscheds, int, 0444);
MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
@@ -72,7 +71,7 @@ static int typed_conns = 1;
module_param(typed_conns, int, 0444);
MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
-static int min_bulk = 1<<10;
+static int min_bulk = 1 << 10;
module_param(min_bulk, int, 0644);
MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 986bce4c9f3b..32cc31e4cc29 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -56,15 +53,14 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
- LASSERT(tx != NULL);
+ LASSERT(tx);
/* Next TX that can carry ZC-ACK or LNet message */
if (tx->tx_list.next == &conn->ksnc_tx_queue) {
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
- conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
- ksock_tx_t, tx_list);
+ conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
}
}
@@ -75,8 +71,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
{
ksock_tx_t *tx = conn->ksnc_tx_carrier;
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+ LASSERT(!tx_ack ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
/*
* Enqueue or piggyback tx_ack / cookie
@@ -85,10 +81,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
* . There is tx can piggyback cookie of tx_ack (or cookie),
* piggyback the cookie and return the tx.
*/
- if (tx == NULL) {
- if (tx_ack != NULL) {
+ if (!tx) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
@@ -96,16 +92,16 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
/* tx is noop zc-ack, can't piggyback zc-ack cookie */
- if (tx_ack != NULL)
+ if (tx_ack)
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
return 0;
}
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
- LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
- if (tx_ack != NULL)
+ if (tx_ack)
cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
/* piggyback the zc-ack cookie */
@@ -128,7 +124,7 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
* . If there is NOOP on the connection, piggyback the cookie
* and replace the NOOP tx, and return the NOOP tx.
*/
- if (tx == NULL) { /* nothing on queue */
+ if (!tx) { /* nothing on queue */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_msg;
return NULL;
@@ -162,22 +158,22 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
/* non-blocking ZC-ACK (to router) */
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+ LASSERT(!tx_ack ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
tx = conn->ksnc_tx_carrier;
- if (tx == NULL) {
- if (tx_ack != NULL) {
+ if (!tx) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
}
- /* conn->ksnc_tx_carrier != NULL */
+ /* conn->ksnc_tx_carrier */
- if (tx_ack != NULL)
+ if (tx_ack)
cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
@@ -185,7 +181,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
/* replace the keepalive PING with a real ACK */
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[1] = cookie;
return 1;
}
@@ -197,7 +193,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
return 1; /* XXX return error in the future */
}
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ if (!tx->tx_msg.ksm_zc_cookies[0]) {
/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
@@ -233,7 +229,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
tmp = tx->tx_msg.ksm_zc_cookies[0];
}
- if (tmp != 0) {
+ if (tmp) {
/* range of cookies */
tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
@@ -261,7 +257,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
}
/* failed to piggyback ZC-ACK */
- if (tx_ack != NULL) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
/* the next tx can piggyback at least 1 ACK */
ksocknal_next_tx_carrier(conn);
@@ -280,7 +276,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
return SOCKNAL_MATCH_YES;
#endif
- if (tx == NULL || tx->tx_lnetmsg == NULL) {
+ if (!tx || !tx->tx_lnetmsg) {
/* noop packet */
nob = offsetof(ksock_msg_t, ksm_u);
} else {
@@ -319,7 +315,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
{
int nob;
- if (tx == NULL || tx->tx_lnetmsg == NULL)
+ if (!tx || !tx->tx_lnetmsg)
nob = offsetof(ksock_msg_t, ksm_u);
else
nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
@@ -334,7 +330,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
case SOCKLND_CONN_ACK:
if (nonblk)
return SOCKNAL_MATCH_YES;
- else if (tx == NULL || tx->tx_lnetmsg == NULL)
+ else if (!tx || !tx->tx_lnetmsg)
return SOCKNAL_MATCH_MAY;
else
return SOCKNAL_MATCH_NO;
@@ -369,10 +365,10 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
- if (conn != NULL) {
+ if (conn) {
ksock_sched_t *sched = conn->ksnc_scheduler;
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
spin_lock_bh(&sched->kss_lock);
@@ -390,11 +386,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
/* ACK connection is not ready, or can't piggyback the ACK */
tx = ksocknal_alloc_tx_noop(cookie, !!remote);
- if (tx == NULL)
+ if (!tx)
return -ENOMEM;
rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
- if (rc == 0)
+ if (!rc)
return 0;
ksocknal_free_tx(tx);
@@ -407,11 +403,12 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
int count;
- if (cookie1 == 0)
+ if (!cookie1)
cookie1 = cookie2;
count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
@@ -424,8 +421,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
spin_lock(&peer->ksnp_lock);
- list_for_each_entry_safe(tx, tmp,
- &peer->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list,
+ tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
@@ -433,20 +430,19 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
- if (--count == 0)
+ if (!--count)
break;
}
}
spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
- return count == 0 ? 0 : -EPROTO;
+ return !count ? 0 : -EPROTO;
}
static int
@@ -461,58 +457,59 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
+ if (!hdr) {
CERROR("Can't allocate lnet_hdr_t\n");
return -ENOMEM;
}
hmv = (lnet_magicversion_t *)&hdr->dest_nid;
- /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
- * header and send out */
- hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
- hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
- hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
+ /*
+ * Re-organize V2.x message header to V1.x (lnet_hdr_t)
+ * header and send out
+ */
+ hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
+ hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
+ hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
- if (the_lnet.ln_testprotocompat != 0) {
+ if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ if (the_lnet.ln_testprotocompat & 1) {
hmv->version_major++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
- if ((the_lnet.ln_testprotocompat & 2) != 0) {
+ if (the_lnet.ln_testprotocompat & 2) {
hmv->magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~2;
}
LNET_UNLOCK();
}
- hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
- hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
- hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
- hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
- hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
- hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
+ hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
+ hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
+ hdr->type = cpu_to_le32(LNET_MSG_HELLO);
+ hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
+ hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
+ hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
goto out;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
goto out;
- for (i = 0; i < (int) hello->kshm_nips; i++) {
- hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
- }
+ for (i = 0; i < (int) hello->kshm_nips; i++)
+ hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -532,10 +529,10 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
- if (the_lnet.ln_testprotocompat != 0) {
+ if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ if (the_lnet.ln_testprotocompat & 1) {
hello->kshm_version++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
@@ -544,19 +541,19 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
return rc;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
return 0;
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -575,7 +572,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
int i;
LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
+ if (!hdr) {
CERROR("Can't allocate lnet_hdr_t\n");
return -ENOMEM;
}
@@ -583,15 +580,15 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
rc = lnet_sock_read(sock, &hdr->src_nid,
sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
goto out;
}
/* ...and check we got what we expected */
- if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
+ if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) {
CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
le32_to_cpu(hdr->type),
&conn->ksnc_ipaddr);
@@ -613,14 +610,14 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
goto out;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
goto out;
}
@@ -628,7 +625,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
for (i = 0; i < (int) hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
- if (hello->kshm_ips[i] == 0) {
+ if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
rc = -EPROTO;
@@ -657,9 +654,9 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
offsetof(ksock_hello_msg_t, kshm_ips) -
offsetof(ksock_hello_msg_t, kshm_src_nid),
timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -681,14 +678,14 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
return -EPROTO;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
return 0;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -697,7 +694,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
- if (hello->kshm_ips[i] == 0) {
+ if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
return -EPROTO;
@@ -712,12 +709,13 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_lnetmsg != NULL);
+ LASSERT(tx->tx_lnetmsg);
tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
- tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+ tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+ tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
}
static void
@@ -725,17 +723,19 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx)
{
tx->tx_iov[0].iov_base = &tx->tx_msg;
- if (tx->tx_lnetmsg != NULL) {
+ if (tx->tx_lnetmsg) {
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
- tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
} else {
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
/* Don't checksum before start sending, because packet can be piggybacked with ACK */
}
@@ -745,7 +745,8 @@ ksocknal_unpack_msg_v1(ksock_msg_t *msg)
{
msg->ksm_csum = 0;
msg->ksm_type = KSOCK_MSG_LNET;
- msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
+ msg->ksm_zc_cookies[0] = 0;
+ msg->ksm_zc_cookies[1] = 0;
}
static void
diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lnet/libcfs/Makefile
index 03d3f3d7b1f8..8c8945545375 100644
--- a/drivers/staging/lustre/lustre/libcfs/Makefile
+++ b/drivers/staging/lustre/lnet/libcfs/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_LUSTRE_FS) += libcfs.o
+obj-$(CONFIG_LNET) += libcfs.o
libcfs-linux-objs := linux-tracefile.o linux-debug.o
libcfs-linux-objs += linux-prim.o linux-cpu.o
@@ -11,8 +11,7 @@ libcfs-linux-objs += linux-mem.o
libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
libcfs-all-objs := debug.o fail.o module.o tracefile.o \
- libcfs_string.o hash.o kernel_user_comm.o \
- prng.o workitem.o libcfs_cpu.o \
- libcfs_mem.o libcfs_lock.o
+ libcfs_string.o hash.o prng.o workitem.o \
+ libcfs_cpu.o libcfs_mem.o libcfs_lock.o
libcfs-objs := $(libcfs-linux-objs) $(libcfs-all-objs)
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 0b38dad13546..c3d628bac5b8 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -47,15 +47,15 @@
static char debug_file_name[1024];
unsigned int libcfs_subsystem_debug = ~0;
+EXPORT_SYMBOL(libcfs_subsystem_debug);
module_param(libcfs_subsystem_debug, int, 0644);
MODULE_PARM_DESC(libcfs_subsystem_debug, "Lustre kernel debug subsystem mask");
-EXPORT_SYMBOL(libcfs_subsystem_debug);
unsigned int libcfs_debug = (D_CANTMASK |
D_NETERROR | D_HA | D_CONFIG | D_IOCTL);
+EXPORT_SYMBOL(libcfs_debug);
module_param(libcfs_debug, int, 0644);
MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
-EXPORT_SYMBOL(libcfs_debug);
static int libcfs_param_debug_mb_set(const char *val,
const struct kernel_param *kp)
@@ -82,7 +82,8 @@ static int libcfs_param_debug_mb_set(const char *val,
/* While debug_mb setting look like unsigned int, in fact
* it needs quite a bunch of extra processing, so we define special
- * debugmb parameter type with corresponding methods to handle this case */
+ * debugmb parameter type with corresponding methods to handle this case
+ */
static struct kernel_param_ops param_ops_debugmb = {
.set = libcfs_param_debug_mb_set,
.get = param_get_uint,
@@ -227,8 +228,7 @@ MODULE_PARM_DESC(libcfs_debug_file_path,
int libcfs_panic_in_progress;
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
+/* libcfs_debug_token2mask() expects the returned string in lower-case */
static const char *
libcfs_debug_subsys2str(int subsys)
{
@@ -271,6 +271,8 @@ libcfs_debug_subsys2str(int subsys)
return "lquota";
case S_OSD:
return "osd";
+ case S_LFSCK:
+ return "lfsck";
case S_LMV:
return "lmv";
case S_SEC:
@@ -288,8 +290,7 @@ libcfs_debug_subsys2str(int subsys)
}
}
-/* libcfs_debug_token2mask() expects the returned
- * string in lower-case */
+/* libcfs_debug_token2mask() expects the returned string in lower-case */
static const char *
libcfs_debug_dbg2str(int debug)
{
@@ -376,7 +377,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
continue;
token = fn(i);
- if (token == NULL) /* unused bit */
+ if (!token) /* unused bit */
continue;
if (len > 0) { /* separator? */
@@ -416,7 +417,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
/* Allow a number for backwards compatibility */
for (n = strlen(str); n > 0; n--)
- if (!isspace(str[n-1]))
+ if (!isspace(str[n - 1]))
break;
matched = n;
t = sscanf(str, "%i%n", &m, &matched);
@@ -446,8 +447,7 @@ void libcfs_debug_dumplog_internal(void *arg)
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
"%s.%lld.%ld", libcfs_debug_file_path_arr,
(s64)ktime_get_real_seconds(), (long_ptr_t)arg);
- pr_alert("LustreError: dumping log to %s\n",
- debug_file_name);
+ pr_alert("LustreError: dumping log to %s\n", debug_file_name);
cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
}
@@ -469,7 +469,8 @@ void libcfs_debug_dumplog(void)
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
- * get to schedule() */
+ * get to schedule()
+ */
init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&debug_ctlwq, &wait);
@@ -503,19 +504,20 @@ int libcfs_debug_init(unsigned long bufsize)
libcfs_console_min_delay = CDEBUG_DEFAULT_MIN_DELAY;
}
- if (libcfs_debug_file_path != NULL) {
+ if (libcfs_debug_file_path) {
strlcpy(libcfs_debug_file_path_arr,
libcfs_debug_file_path,
sizeof(libcfs_debug_file_path_arr));
}
/* If libcfs_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES */
+ * then just make the total buffers smp_num_cpus * TCD_MAX_PAGES
+ */
if (max > cfs_trace_max_debug_mb() || max < num_possible_cpus()) {
max = TCD_MAX_PAGES;
} else {
max = max / num_possible_cpus();
- max <<= (20 - PAGE_CACHE_SHIFT);
+ max <<= (20 - PAGE_SHIFT);
}
rc = cfs_tracefile_init(max);
@@ -540,8 +542,7 @@ int libcfs_debug_clear_buffer(void)
return 0;
}
-/* Debug markers, although printed by S_LNET
- * should not be be marked as such. */
+/* Debug markers, although printed by S_LNET should not be be marked as such. */
#undef DEBUG_SUBSYSTEM
#define DEBUG_SUBSYSTEM S_UNDEFINED
int libcfs_debug_mark_buffer(const char *text)
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index 27831432d69a..dadaf7685cbd 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -97,7 +97,8 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
/* Lost race to set CFS_FAILED_BIT. */
if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
/* If CFS_FAIL_ONCE is valid, only one process can fail,
- * otherwise multi-process can fail at the same time. */
+ * otherwise multi-process can fail at the same time.
+ */
if (cfs_fail_loc & CFS_FAIL_ONCE)
return 0;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 4d50510434be..f60feb3a3dc7 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -355,7 +355,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
dh = container_of(cfs_hash_dh_hhead(hs, bd),
struct cfs_hash_dhead, dh_head);
- if (dh->dh_tail != NULL) /* not empty */
+ if (dh->dh_tail) /* not empty */
hlist_add_behind(hnode, dh->dh_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dh_head);
@@ -371,7 +371,7 @@ cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
dh = container_of(cfs_hash_dh_hhead(hs, bd),
struct cfs_hash_dhead, dh_head);
- if (hnd->next == NULL) { /* it's the tail */
+ if (!hnd->next) { /* it's the tail */
dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
container_of(hnd->pprev, struct hlist_node, next);
}
@@ -412,7 +412,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
dh = container_of(cfs_hash_dd_hhead(hs, bd),
struct cfs_hash_dhead_dep, dd_head);
- if (dh->dd_tail != NULL) /* not empty */
+ if (dh->dd_tail) /* not empty */
hlist_add_behind(hnode, dh->dd_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dd_head);
@@ -428,7 +428,7 @@ cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
dh = container_of(cfs_hash_dd_hhead(hs, bd),
struct cfs_hash_dhead_dep, dd_head);
- if (hnd->next == NULL) { /* it's the tail */
+ if (!hnd->next) { /* it's the tail */
dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
container_of(hnd->pprev, struct hlist_node, next);
}
@@ -492,7 +492,7 @@ void
cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (likely(hs->hs_rehash_buckets == NULL)) {
+ if (likely(!hs->hs_rehash_buckets)) {
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, bd);
} else {
@@ -579,7 +579,8 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
return;
/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
- * in cfs_hash_bd_del/add_locked */
+ * in cfs_hash_bd_del/add_locked
+ */
hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
cfs_hash_bd_dep_record(hs, bd_new, rc);
@@ -635,13 +636,14 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
/* with this function, we can avoid a lot of useless refcount ops,
- * which are expensive atomic operations most time. */
+ * which are expensive atomic operations most time.
+ */
match = intent_add ? NULL : hnode;
hlist_for_each(ehnode, hhead) {
if (!cfs_hash_keycmp(hs, key, ehnode))
continue;
- if (match != NULL && match != ehnode) /* can't match */
+ if (match && match != ehnode) /* can't match */
continue;
/* match and ... */
@@ -659,7 +661,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
if (!intent_add)
return NULL;
- LASSERT(hnode != NULL);
+ LASSERT(hnode);
cfs_hash_bd_add_locked(hs, bd, hnode);
return hnode;
}
@@ -698,8 +700,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
if (prev == bds[i].bd_bucket)
continue;
- LASSERT(prev == NULL ||
- prev->hsb_index < bds[i].bd_bucket->hsb_index);
+ LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
cfs_hash_bd_lock(hs, &bds[i], excl);
prev = bds[i].bd_bucket;
}
@@ -730,7 +731,7 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
CFS_HS_LOOKUP_IT_FIND);
- if (ehnode != NULL)
+ if (ehnode)
return ehnode;
}
return NULL;
@@ -745,13 +746,13 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
int intent;
unsigned i;
- LASSERT(hnode != NULL);
+ LASSERT(hnode);
intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
NULL, intent);
- if (ehnode != NULL)
+ if (ehnode)
return ehnode;
}
@@ -778,7 +779,7 @@ cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
CFS_HS_LOOKUP_IT_FINDDEL);
- if (ehnode != NULL)
+ if (ehnode)
return ehnode;
}
return NULL;
@@ -789,26 +790,20 @@ cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
{
int rc;
- if (bd2->bd_bucket == NULL)
+ if (!bd2->bd_bucket)
return;
- if (bd1->bd_bucket == NULL) {
+ if (!bd1->bd_bucket) {
*bd1 = *bd2;
bd2->bd_bucket = NULL;
return;
}
rc = cfs_hash_bd_compare(bd1, bd2);
- if (rc == 0) {
+ if (!rc)
bd2->bd_bucket = NULL;
-
- } else if (rc > 0) { /* swab bd1 and bd2 */
- struct cfs_hash_bd tmp;
-
- tmp = *bd2;
- *bd2 = *bd1;
- *bd1 = tmp;
- }
+ else if (rc > 0)
+ swap(*bd1, *bd2); /* swap bd1 and bd2 */
}
void
@@ -818,7 +813,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
/* NB: caller should hold hs_lock.rw if REHASH is set */
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, &bds[0]);
- if (likely(hs->hs_rehash_buckets == NULL)) {
+ if (likely(!hs->hs_rehash_buckets)) {
/* no rehash or not rehashing */
bds[1].bd_bucket = NULL;
return;
@@ -873,7 +868,7 @@ cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
int i;
for (i = prev_size; i < size; i++) {
- if (buckets[i] != NULL)
+ if (buckets[i])
LIBCFS_FREE(buckets[i], bkt_size);
}
@@ -892,16 +887,16 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
struct cfs_hash_bucket **new_bkts;
int i;
- LASSERT(old_size == 0 || old_bkts != NULL);
+ LASSERT(old_size == 0 || old_bkts);
- if (old_bkts != NULL && old_size == new_size)
+ if (old_bkts && old_size == new_size)
return old_bkts;
LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
- if (new_bkts == NULL)
+ if (!new_bkts)
return NULL;
- if (old_bkts != NULL) {
+ if (old_bkts) {
memcpy(new_bkts, old_bkts,
min(old_size, new_size) * sizeof(*old_bkts));
}
@@ -911,7 +906,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
struct cfs_hash_bd bd;
LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
- if (new_bkts[i] == NULL) {
+ if (!new_bkts[i]) {
cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
old_size, new_size);
return NULL;
@@ -1011,14 +1006,13 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
CLASSERT(CFS_HASH_THETA_BITS < 15);
- LASSERT(name != NULL);
- LASSERT(ops != NULL);
+ LASSERT(name);
LASSERT(ops->hs_key);
LASSERT(ops->hs_hash);
LASSERT(ops->hs_object);
LASSERT(ops->hs_keycmp);
- LASSERT(ops->hs_get != NULL);
- LASSERT(ops->hs_put_locked != NULL);
+ LASSERT(ops->hs_get);
+ LASSERT(ops->hs_put_locked);
if ((flags & CFS_HASH_REHASH) != 0)
flags |= CFS_HASH_COUNTER; /* must have counter */
@@ -1029,13 +1023,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
(flags & CFS_HASH_NO_LOCK) == 0));
- LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
- ops->hs_keycpy != NULL));
+ LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
len = (flags & CFS_HASH_BIGNAME) == 0 ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
- if (hs == NULL)
+ if (!hs)
return NULL;
strlcpy(hs->hs_name, name, len);
@@ -1063,7 +1056,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
CFS_HASH_NBKT(hs));
- if (hs->hs_buckets != NULL)
+ if (hs->hs_buckets)
return hs;
LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
@@ -1082,7 +1075,7 @@ cfs_hash_destroy(struct cfs_hash *hs)
struct cfs_hash_bd bd;
int i;
- LASSERT(hs != NULL);
+ LASSERT(hs);
LASSERT(!cfs_hash_is_exiting(hs) &&
!cfs_hash_is_iterating(hs));
@@ -1096,13 +1089,12 @@ cfs_hash_destroy(struct cfs_hash *hs)
cfs_hash_depth_wi_cancel(hs);
/* rehash should be done/canceled */
- LASSERT(hs->hs_buckets != NULL &&
- hs->hs_rehash_buckets == NULL);
+ LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
- LASSERT(bd.bd_bucket != NULL);
+ LASSERT(bd.bd_bucket);
/* no need to take this lock, just for consistent code */
cfs_hash_bd_lock(hs, &bd, 1);
@@ -1113,7 +1105,8 @@ cfs_hash_destroy(struct cfs_hash *hs)
hs->hs_name, bd.bd_bucket->hsb_index,
bd.bd_offset, bd.bd_bucket->hsb_count);
/* can't assert key valicate, because we
- * can interrupt rehash */
+ * can interrupt rehash
+ */
cfs_hash_bd_del_locked(hs, &bd, hnode);
cfs_hash_exit(hs, hnode);
}
@@ -1164,7 +1157,8 @@ cfs_hash_rehash_bits(struct cfs_hash *hs)
return -EAGAIN;
/* XXX: need to handle case with max_theta != 2.0
- * and the case with min_theta != 0.5 */
+ * and the case with min_theta != 0.5
+ */
if ((hs->hs_cur_bits < hs->hs_max_bits) &&
(__cfs_hash_theta(hs) > hs->hs_max_theta))
return hs->hs_cur_bits + 1;
@@ -1293,8 +1287,8 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
/* NB: do nothing if @hnode is not in hash table */
- if (hnode == NULL || !hlist_unhashed(hnode)) {
- if (bds[1].bd_bucket == NULL && hnode != NULL) {
+ if (!hnode || !hlist_unhashed(hnode)) {
+ if (!bds[1].bd_bucket && hnode) {
cfs_hash_bd_del_locked(hs, &bds[0], hnode);
} else {
hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
@@ -1302,7 +1296,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
}
}
- if (hnode != NULL) {
+ if (hnode) {
obj = cfs_hash_object(hs, hnode);
bits = cfs_hash_rehash_bits(hs);
}
@@ -1348,7 +1342,7 @@ cfs_hash_lookup(struct cfs_hash *hs, const void *key)
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
- if (hnode != NULL)
+ if (hnode)
obj = cfs_hash_object(hs, hnode);
cfs_hash_dual_bd_unlock(hs, bds, 0);
@@ -1378,7 +1372,8 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
/* NB: iteration is mostly called by service thread,
* we tend to cancel pending rehash-request, instead of
* blocking service thread, we will relaunch rehash request
- * after iteration */
+ * after iteration
+ */
if (cfs_hash_is_rehashing(hs))
cfs_hash_rehash_cancel_locked(hs);
cfs_hash_unlock(hs, 1);
@@ -1436,7 +1431,7 @@ cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, excl);
- if (func == NULL) { /* only glimpse size */
+ if (!func) { /* only glimpse size */
count += bd.bd_bucket->hsb_count;
cfs_hash_bd_unlock(hs, &bd, excl);
continue;
@@ -1574,7 +1569,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
- hs->hs_ops->hs_put_locked == NULL;
+ !hs->hs_ops->hs_put_locked;
cfs_hash_lock(hs, 0);
LASSERT(!cfs_hash_is_rehashing(hs));
@@ -1585,7 +1580,7 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
version = cfs_hash_bd_version_get(&bd);
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
- for (hnode = hhead->first; hnode != NULL;) {
+ for (hnode = hhead->first; hnode;) {
cfs_hash_bucket_validate(hs, &bd, hnode);
cfs_hash_get(hs, hnode);
cfs_hash_bd_unlock(hs, &bd, 0);
@@ -1634,9 +1629,8 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
!cfs_hash_with_no_itemref(hs))
return -EOPNOTSUPP;
- if (hs->hs_ops->hs_get == NULL ||
- (hs->hs_ops->hs_put == NULL &&
- hs->hs_ops->hs_put_locked == NULL))
+ if (!hs->hs_ops->hs_get ||
+ (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
@@ -1667,9 +1661,8 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
- if (hs->hs_ops->hs_get == NULL ||
- (hs->hs_ops->hs_put == NULL &&
- hs->hs_ops->hs_put_locked == NULL))
+ if (!hs->hs_ops->hs_get ||
+ (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
@@ -1708,7 +1701,6 @@ out:
cfs_hash_unlock(hs, 0);
cfs_hash_for_each_exit(hs);
}
-
EXPORT_SYMBOL(cfs_hash_hlist_for_each);
/*
@@ -1837,7 +1829,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
cfs_hash_bd_for_each_hlist(hs, old, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
key = cfs_hash_key(hs, hnode);
- LASSERT(key != NULL);
+ LASSERT(key);
/* Validate hnode is in the correct bucket. */
cfs_hash_bucket_validate(hs, old, hnode);
/*
@@ -1867,7 +1859,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
int rc = 0;
int i;
- LASSERT(hs != NULL && cfs_hash_with_rehash(hs));
+ LASSERT(hs && cfs_hash_with_rehash(hs));
cfs_hash_lock(hs, 0);
LASSERT(cfs_hash_is_rehashing(hs));
@@ -1884,7 +1876,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
old_size, new_size);
cfs_hash_lock(hs, 1);
- if (bkts == NULL) {
+ if (!bkts) {
rc = -ENOMEM;
goto out;
}
@@ -1903,7 +1895,7 @@ cfs_hash_rehash_worker(cfs_workitem_t *wi)
goto out;
}
- LASSERT(hs->hs_rehash_buckets == NULL);
+ LASSERT(!hs->hs_rehash_buckets);
hs->hs_rehash_buckets = bkts;
rc = 0;
@@ -1946,7 +1938,7 @@ out:
bsize = cfs_hash_bkt_size(hs);
cfs_hash_unlock(hs, 1);
/* can't refer to @hs anymore because it could be destroyed */
- if (bkts != NULL)
+ if (bkts)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc != 0)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
@@ -1987,14 +1979,15 @@ void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
cfs_hash_bd_order(&bds[0], &bds[1]);
cfs_hash_multi_bd_lock(hs, bds, 3, 1);
- if (likely(old_bds[1].bd_bucket == NULL)) {
+ if (likely(!old_bds[1].bd_bucket)) {
cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
} else {
cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
cfs_hash_bd_add_locked(hs, &new_bd, hnode);
}
/* overwrite key inside locks, otherwise may screw up with
- * other operations, i.e: rehash */
+ * other operations, i.e: rehash
+ */
cfs_hash_keycpy(hs, hnode, new_key);
cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
@@ -2013,7 +2006,7 @@ static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (hs->hs_rehash_buckets == NULL)
+ if (!hs->hs_rehash_buckets)
return hs->hs_buckets;
LASSERT(hs->hs_rehash_bits != 0);
@@ -2025,7 +2018,7 @@ static unsigned int
cfs_hash_full_nbkt(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
- if (hs->hs_rehash_buckets == NULL)
+ if (!hs->hs_rehash_buckets)
return CFS_HASH_NBKT(hs);
LASSERT(hs->hs_rehash_bits != 0);
@@ -2046,15 +2039,15 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
theta = __cfs_hash_theta(hs);
seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
- CFS_HASH_BIGNAME_LEN, hs->hs_name,
- 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
- 1 << hs->hs_max_bits,
- __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
- __cfs_hash_theta_int(hs->hs_min_theta),
- __cfs_hash_theta_frac(hs->hs_min_theta),
- __cfs_hash_theta_int(hs->hs_max_theta),
- __cfs_hash_theta_frac(hs->hs_max_theta),
- hs->hs_flags, hs->hs_rehash_count);
+ CFS_HASH_BIGNAME_LEN, hs->hs_name,
+ 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
+ 1 << hs->hs_max_bits,
+ __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
+ __cfs_hash_theta_int(hs->hs_min_theta),
+ __cfs_hash_theta_frac(hs->hs_min_theta),
+ __cfs_hash_theta_int(hs->hs_max_theta),
+ __cfs_hash_theta_frac(hs->hs_max_theta),
+ hs->hs_flags, hs->hs_rehash_count);
/*
* The distribution is a summary of the chained hash depth in
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 933525c73da1..33352af6c27f 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
@@ -56,8 +51,9 @@ cfs_cpt_table_alloc(unsigned int ncpt)
}
LIBCFS_ALLOC(cptab, sizeof(*cptab));
- if (cptab != NULL) {
+ if (cptab) {
cptab->ctb_version = CFS_CPU_VERSION_MAGIC;
+ node_set(0, cptab->ctb_nodemask);
cptab->ctb_nparts = ncpt;
}
@@ -111,6 +107,13 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
}
EXPORT_SYMBOL(cfs_cpt_online);
+nodemask_t *
+cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt)
+{
+ return &cptab->ctb_nodemask;
+}
+EXPORT_SYMBOL(cfs_cpt_cpumask);
+
int
cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
{
@@ -207,7 +210,7 @@ EXPORT_SYMBOL(cfs_cpt_bind);
void
cfs_cpu_fini(void)
{
- if (cfs_cpt_table != NULL) {
+ if (cfs_cpt_table) {
cfs_cpt_table_free(cfs_cpt_table);
cfs_cpt_table = NULL;
}
@@ -218,7 +221,7 @@ cfs_cpu_init(void)
{
cfs_cpt_table = cfs_cpt_table_alloc(1);
- return cfs_cpt_table != NULL ? 0 : -1;
+ return cfs_cpt_table ? 0 : -1;
}
#endif /* HAVE_LIBCFS_CPT */
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 15782d9e6aa9..2de9eeae0232 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
@@ -38,7 +33,7 @@
void
cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
{
- LASSERT(pcl->pcl_locks != NULL);
+ LASSERT(pcl->pcl_locks);
LASSERT(!pcl->pcl_locked);
cfs_percpt_free(pcl->pcl_locks);
@@ -90,6 +85,7 @@ EXPORT_SYMBOL(cfs_percpt_lock_alloc);
*/
void
cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
+ __acquires(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
@@ -114,7 +110,8 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
if (i == 0) {
LASSERT(!pcl->pcl_locked);
/* nobody should take private lock after this
- * so I wouldn't starve for too long time */
+ * so I wouldn't starve for too long time
+ */
pcl->pcl_locked = 1;
}
}
@@ -124,6 +121,7 @@ EXPORT_SYMBOL(cfs_percpt_lock);
/** unlock a CPU partition */
void
cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
+ __releases(pcl->pcl_locks)
{
int ncpt = cfs_cpt_number(pcl->pcl_cptab);
int i;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index 27cf86106363..c5a6951516ed 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
@@ -54,7 +49,7 @@ cfs_percpt_free(void *vars)
arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
for (i = 0; i < arr->va_count; i++) {
- if (arr->va_ptrs[i] != NULL)
+ if (arr->va_ptrs[i])
LIBCFS_FREE(arr->va_ptrs[i], arr->va_size);
}
@@ -87,9 +82,10 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
if (!arr)
return NULL;
- arr->va_size = size = L1_CACHE_ALIGN(size);
- arr->va_count = count;
- arr->va_cptab = cptab;
+ size = L1_CACHE_ALIGN(size);
+ arr->va_size = size;
+ arr->va_count = count;
+ arr->va_cptab = cptab;
for (i = 0; i < count; i++) {
LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 205a3ed435a8..50ac1536db4b 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -54,7 +54,8 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
* and optionally an operator ('+' or '-'). If an operator
* appears first in <str>, '*oldmask' is used as the starting point
* (relative), otherwise minmask is used (absolute). An operator
- * applies to all following tokens up to the next operator. */
+ * applies to all following tokens up to the next operator.
+ */
while (*str != '\0') {
while (isspace(*str))
str++;
@@ -81,8 +82,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
found = 0;
for (i = 0; i < 32; i++) {
debugstr = bit2str(i);
- if (debugstr != NULL &&
- strlen(debugstr) == len &&
+ if (debugstr && strlen(debugstr) == len &&
strncasecmp(str, debugstr, len) == 0) {
if (op == '-')
newmask &= ~(1 << i);
@@ -175,7 +175,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
{
char *end;
- if (next->ls_str == NULL)
+ if (!next->ls_str)
return 0;
/* skip leading white spaces */
@@ -196,7 +196,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
res->ls_str = next->ls_str;
end = memchr(next->ls_str, delim, next->ls_len);
- if (end == NULL) {
+ if (!end) {
/* there is no the delimeter in the string */
end = next->ls_str + next->ls_len;
next->ls_str = NULL;
@@ -229,17 +229,37 @@ int
cfs_str2num_check(char *str, int nob, unsigned *num,
unsigned min, unsigned max)
{
- char *endp;
+ bool all_numbers = true;
+ char *endp, cache;
+ int rc;
str = cfs_trimwhite(str);
- *num = simple_strtoul(str, &endp, 0);
- if (endp == str)
- return 0;
- for (; endp < str + nob; endp++) {
- if (!isspace(*endp))
- return 0;
+ /**
+ * kstrouint can only handle strings composed
+ * of only numbers. We need to scan the string
+ * passed in for the first non-digit character
+ * and end the string at that location. If we
+ * don't find any non-digit character we still
+ * need to place a '\0' at position nob since
+ * we are not interested in the rest of the
+ * string which is longer than nob in size.
+ * After we are done the character at the
+ * position we placed '\0' must be restored.
+ */
+ for (endp = str; endp < str + nob; endp++) {
+ if (!isdigit(*endp)) {
+ all_numbers = false;
+ break;
+ }
}
+ cache = *endp;
+ *endp = '\0';
+
+ rc = kstrtouint(str, 10, num);
+ *endp = cache;
+ if (rc || !all_numbers)
+ return 0;
return (*num >= min && *num <= max);
}
@@ -266,7 +286,7 @@ cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
struct cfs_lstr tok;
LIBCFS_ALLOC(re, sizeof(*re));
- if (re == NULL)
+ if (!re)
return -ENOMEM;
if (src->ls_len == 1 && src->ls_str[0] == '*') {
@@ -337,18 +357,19 @@ cfs_range_expr_print(char *buffer, int count, struct cfs_range_expr *expr,
char s[] = "[";
char e[] = "]";
- if (bracketed)
- s[0] = e[0] = '\0';
+ if (bracketed) {
+ s[0] = '\0';
+ e[0] = '\0';
+ }
if (expr->re_lo == expr->re_hi)
i = scnprintf(buffer, count, "%u", expr->re_lo);
else if (expr->re_stride == 1)
i = scnprintf(buffer, count, "%s%u-%u%s",
- s, expr->re_lo, expr->re_hi, e);
+ s, expr->re_lo, expr->re_hi, e);
else
i = scnprintf(buffer, count, "%s%u-%u/%u%s",
- s, expr->re_lo, expr->re_hi,
- expr->re_stride, e);
+ s, expr->re_lo, expr->re_hi, expr->re_stride, e);
return i;
}
@@ -442,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
}
LIBCFS_ALLOC(val, sizeof(val[0]) * count);
- if (val == NULL)
+ if (!val)
return -ENOMEM;
count = 0;
@@ -470,7 +491,7 @@ cfs_expr_list_free(struct cfs_expr_list *expr_list)
struct cfs_range_expr *expr;
expr = list_entry(expr_list->el_exprs.next,
- struct cfs_range_expr, re_link);
+ struct cfs_range_expr, re_link);
list_del(&expr->re_link);
LIBCFS_FREE(expr, sizeof(*expr));
}
@@ -495,7 +516,7 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
int rc;
LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
- if (expr_list == NULL)
+ if (!expr_list)
return -ENOMEM;
src.ls_str = str;
@@ -509,7 +530,7 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
src.ls_len -= 2;
rc = -EINVAL;
- while (src.ls_str != NULL) {
+ while (src.ls_str) {
struct cfs_lstr tok;
if (!cfs_gettok(&src, ',', &tok)) {
@@ -521,15 +542,12 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
if (rc != 0)
break;
- list_add_tail(&expr->re_link,
- &expr_list->el_exprs);
+ list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
} else {
rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
- if (rc == 0) {
- list_add_tail(&expr->re_link,
- &expr_list->el_exprs);
- }
+ if (rc == 0)
+ list_add_tail(&expr->re_link, &expr_list->el_exprs);
}
if (rc != 0)
@@ -555,8 +573,7 @@ cfs_expr_list_free_list(struct list_head *list)
struct cfs_expr_list *el;
while (!list_empty(list)) {
- el = list_entry(list->next,
- struct cfs_expr_list, el_link);
+ el = list_entry(list->next, struct cfs_expr_list, el_link);
list_del(&el->el_link);
cfs_expr_list_free(el);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index e52afe35e7ea..389fb9eeea75 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
@@ -84,32 +79,32 @@ cfs_cpt_table_free(struct cfs_cpt_table *cptab)
{
int i;
- if (cptab->ctb_cpu2cpt != NULL) {
+ if (cptab->ctb_cpu2cpt) {
LIBCFS_FREE(cptab->ctb_cpu2cpt,
num_possible_cpus() *
sizeof(cptab->ctb_cpu2cpt[0]));
}
- for (i = 0; cptab->ctb_parts != NULL && i < cptab->ctb_nparts; i++) {
+ for (i = 0; cptab->ctb_parts && i < cptab->ctb_nparts; i++) {
struct cfs_cpu_partition *part = &cptab->ctb_parts[i];
- if (part->cpt_nodemask != NULL) {
+ if (part->cpt_nodemask) {
LIBCFS_FREE(part->cpt_nodemask,
sizeof(*part->cpt_nodemask));
}
- if (part->cpt_cpumask != NULL)
+ if (part->cpt_cpumask)
LIBCFS_FREE(part->cpt_cpumask, cpumask_size());
}
- if (cptab->ctb_parts != NULL) {
+ if (cptab->ctb_parts) {
LIBCFS_FREE(cptab->ctb_parts,
cptab->ctb_nparts * sizeof(cptab->ctb_parts[0]));
}
- if (cptab->ctb_nodemask != NULL)
+ if (cptab->ctb_nodemask)
LIBCFS_FREE(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
- if (cptab->ctb_cpumask != NULL)
+ if (cptab->ctb_cpumask)
LIBCFS_FREE(cptab->ctb_cpumask, cpumask_size());
LIBCFS_FREE(cptab, sizeof(*cptab));
@@ -123,7 +118,7 @@ cfs_cpt_table_alloc(unsigned int ncpt)
int i;
LIBCFS_ALLOC(cptab, sizeof(*cptab));
- if (cptab == NULL)
+ if (!cptab)
return NULL;
cptab->ctb_nparts = ncpt;
@@ -131,19 +126,19 @@ cfs_cpt_table_alloc(unsigned int ncpt)
LIBCFS_ALLOC(cptab->ctb_cpumask, cpumask_size());
LIBCFS_ALLOC(cptab->ctb_nodemask, sizeof(*cptab->ctb_nodemask));
- if (cptab->ctb_cpumask == NULL || cptab->ctb_nodemask == NULL)
+ if (!cptab->ctb_cpumask || !cptab->ctb_nodemask)
goto failed;
LIBCFS_ALLOC(cptab->ctb_cpu2cpt,
num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
- if (cptab->ctb_cpu2cpt == NULL)
+ if (!cptab->ctb_cpu2cpt)
goto failed;
memset(cptab->ctb_cpu2cpt, -1,
num_possible_cpus() * sizeof(cptab->ctb_cpu2cpt[0]));
LIBCFS_ALLOC(cptab->ctb_parts, ncpt * sizeof(cptab->ctb_parts[0]));
- if (cptab->ctb_parts == NULL)
+ if (!cptab->ctb_parts)
goto failed;
for (i = 0; i < ncpt; i++) {
@@ -151,7 +146,7 @@ cfs_cpt_table_alloc(unsigned int ncpt)
LIBCFS_ALLOC(part->cpt_cpumask, cpumask_size());
LIBCFS_ALLOC(part->cpt_nodemask, sizeof(*part->cpt_nodemask));
- if (part->cpt_cpumask == NULL || part->cpt_nodemask == NULL)
+ if (!part->cpt_cpumask || !part->cpt_nodemask)
goto failed;
}
@@ -359,8 +354,6 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
if (i >= nr_cpu_ids)
node_clear(node, *cptab->ctb_nodemask);
-
- return;
}
EXPORT_SYMBOL(cfs_cpt_unset_cpu);
@@ -530,7 +523,8 @@ cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
return cpt;
/* don't return negative value for safety of upper layer,
- * instead we shadow the unknown cpu to a valid partition ID */
+ * instead we shadow the unknown cpu to a valid partition ID
+ */
cpt = cpu % cptab->ctb_nparts;
}
@@ -618,7 +612,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
/* allocate scratch buffer */
LIBCFS_ALLOC(socket, cpumask_size());
LIBCFS_ALLOC(core, cpumask_size());
- if (socket == NULL || core == NULL) {
+ if (!socket || !core) {
rc = -ENOMEM;
goto out;
}
@@ -659,9 +653,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
}
out:
- if (socket != NULL)
+ if (socket)
LIBCFS_FREE(socket, cpumask_size());
- if (core != NULL)
+ if (core)
LIBCFS_FREE(core, cpumask_size());
return rc;
}
@@ -682,7 +676,8 @@ cfs_cpt_num_estimate(void)
/* generate reasonable number of CPU partitions based on total number
* of CPUs, Preferred N should be power2 and match this condition:
- * 2 * (N - 1)^2 < NCPUS <= 2 * N^2 */
+ * 2 * (N - 1)^2 < NCPUS <= 2 * N^2
+ */
for (ncpt = 2; ncpu > 2 * ncpt * ncpt; ncpt <<= 1)
;
@@ -700,7 +695,8 @@ cfs_cpt_num_estimate(void)
out:
#if (BITS_PER_LONG == 32)
/* config many CPU partitions on 32-bit system could consume
- * too much memory */
+ * too much memory
+ */
ncpt = min(2U, ncpt);
#endif
while (ncpu % ncpt != 0)
@@ -735,7 +731,7 @@ cfs_cpt_table_create(int ncpt)
}
cptab = cfs_cpt_table_alloc(ncpt);
- if (cptab == NULL) {
+ if (!cptab) {
CERROR("Failed to allocate CPU map(%d)\n", ncpt);
goto failed;
}
@@ -747,7 +743,7 @@ cfs_cpt_table_create(int ncpt)
}
LIBCFS_ALLOC(mask, cpumask_size());
- if (mask == NULL) {
+ if (!mask) {
CERROR("Failed to allocate scratch cpumask\n");
goto failed;
}
@@ -793,10 +789,10 @@ cfs_cpt_table_create(int ncpt)
CERROR("Failed to setup CPU-partition-table with %d CPU-partitions, online HW nodes: %d, HW cpus: %d.\n",
ncpt, num_online_nodes(), num_online_cpus());
- if (mask != NULL)
+ if (mask)
LIBCFS_FREE(mask, cpumask_size());
- if (cptab != NULL)
+ if (cptab)
cfs_cpt_table_free(cptab);
return NULL;
@@ -814,7 +810,7 @@ cfs_cpt_table_create_pattern(char *pattern)
for (ncpt = 0;; ncpt++) { /* quick scan bracket */
str = strchr(str, '[');
- if (str == NULL)
+ if (!str)
break;
str++;
}
@@ -836,7 +832,7 @@ cfs_cpt_table_create_pattern(char *pattern)
high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
cptab = cfs_cpt_table_alloc(ncpt);
- if (cptab == NULL) {
+ if (!cptab) {
CERROR("Failed to allocate cpu partition table\n");
return NULL;
}
@@ -850,11 +846,12 @@ cfs_cpt_table_create_pattern(char *pattern)
int i;
int n;
- if (bracket == NULL) {
+ if (!bracket) {
if (*str != 0) {
CERROR("Invalid pattern %s\n", str);
goto failed;
- } else if (c != ncpt) {
+ }
+ if (c != ncpt) {
CERROR("expect %d partitions but found %d\n",
ncpt, c);
goto failed;
@@ -885,7 +882,7 @@ cfs_cpt_table_create_pattern(char *pattern)
}
bracket = strchr(str, ']');
- if (bracket == NULL) {
+ if (!bracket) {
CERROR("missing right bracket for cpt %d, %s\n",
cpt, str);
goto failed;
@@ -943,6 +940,7 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
spin_lock(&cpt_data.cpt_lock);
cpt_data.cpt_version++;
spin_unlock(&cpt_data.cpt_lock);
+ /* Fall through */
default:
if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) {
CDEBUG(D_INFO, "CPU changed [cpu %u action %lx]\n",
@@ -975,25 +973,25 @@ static struct notifier_block cfs_cpu_notifier = {
void
cfs_cpu_fini(void)
{
- if (cfs_cpt_table != NULL)
+ if (cfs_cpt_table)
cfs_cpt_table_free(cfs_cpt_table);
#ifdef CONFIG_HOTPLUG_CPU
unregister_hotcpu_notifier(&cfs_cpu_notifier);
#endif
- if (cpt_data.cpt_cpumask != NULL)
+ if (cpt_data.cpt_cpumask)
LIBCFS_FREE(cpt_data.cpt_cpumask, cpumask_size());
}
int
cfs_cpu_init(void)
{
- LASSERT(cfs_cpt_table == NULL);
+ LASSERT(!cfs_cpt_table);
memset(&cpt_data, 0, sizeof(cpt_data));
LIBCFS_ALLOC(cpt_data.cpt_cpumask, cpumask_size());
- if (cpt_data.cpt_cpumask == NULL) {
+ if (!cpt_data.cpt_cpumask) {
CERROR("Failed to allocate scratch buffer\n");
return -1;
}
@@ -1007,7 +1005,7 @@ cfs_cpu_init(void)
if (*cpu_pattern != 0) {
cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
- if (cfs_cpt_table == NULL) {
+ if (!cfs_cpt_table) {
CERROR("Failed to create cptab from pattern %s\n",
cpu_pattern);
goto failed;
@@ -1015,7 +1013,7 @@ cfs_cpu_init(void)
} else {
cfs_cpt_table = cfs_cpt_table_create(cpu_npartitions);
- if (cfs_cpt_table == NULL) {
+ if (!cfs_cpt_table) {
CERROR("Failed to create ptable with npartitions %d\n",
cpu_npartitions);
goto failed;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index db0572733712..db0572733712 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 079d50ebfa3a..8c9377ed850c 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -27,7 +27,7 @@
* Copyright (c) 2012, Intel Corporation.
*/
-#include <linux/crypto.h>
+#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include "../../../include/linux/libcfs/libcfs.h"
#include "linux-crypto.h"
@@ -38,30 +38,37 @@ static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
static int cfs_crypto_hash_alloc(unsigned char alg_id,
const struct cfs_crypto_hash_type **type,
- struct hash_desc *desc, unsigned char *key,
+ struct ahash_request **req,
+ unsigned char *key,
unsigned int key_len)
{
+ struct crypto_ahash *tfm;
int err = 0;
*type = cfs_crypto_hash_type(alg_id);
- if (*type == NULL) {
+ if (!*type) {
CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
alg_id, CFS_HASH_ALG_MAX);
return -EINVAL;
}
- desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
+ tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
- if (desc->tfm == NULL)
- return -EINVAL;
-
- if (IS_ERR(desc->tfm)) {
+ if (IS_ERR(tfm)) {
CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
(*type)->cht_name);
- return PTR_ERR(desc->tfm);
+ return PTR_ERR(tfm);
}
- desc->flags = 0;
+ *req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!*req) {
+ CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
+ (*type)->cht_name);
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+ }
+
+ ahash_request_set_callback(*req, 0, NULL, NULL);
/** Shash have different logic for initialization then digest
* shash: crypto_hash_setkey, crypto_hash_init
@@ -69,24 +76,28 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
* Skip this function for digest, because we use shash logic at
* cfs_crypto_hash_alloc.
*/
- if (key != NULL)
- err = crypto_hash_setkey(desc->tfm, key, key_len);
+ if (key)
+ err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
- err = crypto_hash_setkey(desc->tfm,
+ err = crypto_ahash_setkey(tfm,
(unsigned char *)&((*type)->cht_key),
(*type)->cht_size);
if (err != 0) {
- crypto_free_hash(desc->tfm);
+ crypto_free_ahash(tfm);
return err;
}
CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
- (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name,
- (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name,
+ crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
cfs_crypto_hash_speeds[alg_id]);
- return crypto_hash_init(desc);
+ err = crypto_ahash_init(*req);
+ if (err) {
+ ahash_request_free(*req);
+ crypto_free_ahash(tfm);
+ }
+ return err;
}
int cfs_crypto_hash_digest(unsigned char alg_id,
@@ -95,27 +106,29 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
unsigned char *hash, unsigned int *hash_len)
{
struct scatterlist sl;
- struct hash_desc hdesc;
+ struct ahash_request *req;
int err;
const struct cfs_crypto_hash_type *type;
- if (buf == NULL || buf_len == 0 || hash_len == NULL)
+ if (!buf || buf_len == 0 || !hash_len)
return -EINVAL;
- err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
+ err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
if (err != 0)
return err;
- if (hash == NULL || *hash_len < type->cht_size) {
+ if (!hash || *hash_len < type->cht_size) {
*hash_len = type->cht_size;
- crypto_free_hash(hdesc.tfm);
+ crypto_free_ahash(crypto_ahash_reqtfm(req));
+ ahash_request_free(req);
return -ENOSPC;
}
sg_init_one(&sl, buf, buf_len);
- hdesc.flags = 0;
- err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
- crypto_free_hash(hdesc.tfm);
+ ahash_request_set_crypt(req, &sl, hash, sl.length);
+ err = crypto_ahash_digest(req);
+ crypto_free_ahash(crypto_ahash_reqtfm(req));
+ ahash_request_free(req);
return err;
}
@@ -125,22 +138,15 @@ struct cfs_crypto_hash_desc *
cfs_crypto_hash_init(unsigned char alg_id,
unsigned char *key, unsigned int key_len)
{
-
- struct hash_desc *hdesc;
+ struct ahash_request *req;
int err;
const struct cfs_crypto_hash_type *type;
- hdesc = kmalloc(sizeof(*hdesc), 0);
- if (hdesc == NULL)
- return ERR_PTR(-ENOMEM);
-
- err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
+ err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
- if (err) {
- kfree(hdesc);
+ if (err)
return ERR_PTR(err);
- }
- return (struct cfs_crypto_hash_desc *)hdesc;
+ return (struct cfs_crypto_hash_desc *)req;
}
EXPORT_SYMBOL(cfs_crypto_hash_init);
@@ -148,23 +154,27 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct page *page, unsigned int offset,
unsigned int len)
{
+ struct ahash_request *req = (void *)hdesc;
struct scatterlist sl;
sg_init_table(&sl, 1);
sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
- return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+ ahash_request_set_crypt(req, &sl, NULL, sl.length);
+ return crypto_ahash_update(req);
}
EXPORT_SYMBOL(cfs_crypto_hash_update_page);
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
const void *buf, unsigned int buf_len)
{
+ struct ahash_request *req = (void *)hdesc;
struct scatterlist sl;
sg_init_one(&sl, buf, buf_len);
- return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+ ahash_request_set_crypt(req, &sl, NULL, sl.length);
+ return crypto_ahash_update(req);
}
EXPORT_SYMBOL(cfs_crypto_hash_update);
@@ -173,25 +183,27 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
int err;
- int size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
+ struct ahash_request *req = (void *)hdesc;
+ int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (hash_len == NULL) {
- crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- kfree(hdesc);
+ if (!hash_len) {
+ crypto_free_ahash(crypto_ahash_reqtfm(req));
+ ahash_request_free(req);
return 0;
}
- if (hash == NULL || *hash_len < size) {
+ if (!hash || *hash_len < size) {
*hash_len = size;
return -ENOSPC;
}
- err = crypto_hash_final((struct hash_desc *) hdesc, hash);
+ ahash_request_set_crypt(req, NULL, hash, 0);
+ err = crypto_ahash_final(req);
if (err < 0) {
/* May be caller can fix error */
return err;
}
- crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
- kfree(hdesc);
+ crypto_free_ahash(crypto_ahash_reqtfm(req));
+ ahash_request_free(req);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
@@ -212,7 +224,6 @@ static void cfs_crypto_performance_test(unsigned char alg_id,
hash, &hash_len);
if (err)
break;
-
}
end = jiffies;
@@ -235,8 +246,7 @@ int cfs_crypto_hash_speed(unsigned char hash_alg)
{
if (hash_alg < CFS_HASH_ALG_MAX)
return cfs_crypto_hash_speeds[hash_alg];
- else
- return -1;
+ return -1;
}
EXPORT_SYMBOL(cfs_crypto_hash_speed);
@@ -249,14 +259,13 @@ static int cfs_crypto_test_hashes(void)
unsigned char *data;
unsigned int j;
/* Data block size for testing hash. Maximum
- * kmalloc size for 2.6.18 kernel is 128K */
+ * kmalloc size for 2.6.18 kernel is 128K
+ */
unsigned int data_len = 1 * 128 * 1024;
data = kmalloc(data_len, 0);
- if (data == NULL) {
- CERROR("Failed to allocate mem\n");
+ if (!data)
return -ENOMEM;
- }
for (j = 0; j < data_len; j++)
data[j] = j & 0xff;
@@ -285,6 +294,4 @@ void cfs_crypto_unregister(void)
{
if (adler32 == 0)
cfs_crypto_adler32_unregister();
-
- return;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index 18e8cd4d8758..18e8cd4d8758 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
index 68515d9130c1..13d31e8a931d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
@@ -65,6 +65,7 @@ void cfs_cap_raise(cfs_cap_t cap)
commit_creds(cred);
}
}
+EXPORT_SYMBOL(cfs_cap_raise);
void cfs_cap_lower(cfs_cap_t cap)
{
@@ -76,11 +77,13 @@ void cfs_cap_lower(cfs_cap_t cap)
commit_creds(cred);
}
}
+EXPORT_SYMBOL(cfs_cap_lower);
int cfs_cap_raised(cfs_cap_t cap)
{
return cap_raised(current_cap(), cap);
}
+EXPORT_SYMBOL(cfs_cap_raised);
static void cfs_kernel_cap_pack(kernel_cap_t kcap, cfs_cap_t *cap)
{
@@ -95,10 +98,6 @@ cfs_cap_t cfs_curproc_cap_pack(void)
cfs_kernel_cap_pack(current_cap(), &cap);
return cap;
}
-
-EXPORT_SYMBOL(cfs_cap_raise);
-EXPORT_SYMBOL(cfs_cap_lower);
-EXPORT_SYMBOL(cfs_cap_raised);
EXPORT_SYMBOL(cfs_curproc_cap_pack);
/*
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 59c7bf3cbc1f..638e4b33d3a9 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -80,14 +80,14 @@ void libcfs_run_debug_log_upcall(char *file)
argv[0] = lnet_debug_log_upcall;
- LASSERTF(file != NULL, "called on a null filename\n");
+ LASSERTF(file, "called on a null filename\n");
argv[1] = file; /* only need to pass the path of the file */
argv[2] = NULL;
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET debug log upcall %s %s; check /proc/sys/lnet/debug_log_upcall\n",
+ CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
rc, argv[0], argv[1]);
} else {
CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
@@ -106,14 +106,14 @@ void libcfs_run_upcall(char **argv)
argv[0] = lnet_upcall;
argc = 1;
- while (argv[argc] != NULL)
+ while (argv[argc])
argc++;
LASSERT(argc >= 2);
rc = call_usermodehelper(argv[0], argv, envp, 1);
if (rc < 0 && rc != -ENOENT) {
- CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /proc/sys/lnet/upcall\n",
+ CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
rc, argv[0], argv[1],
argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
@@ -142,8 +142,9 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
argv[4] = buf;
argv[5] = NULL;
- libcfs_run_upcall (argv);
+ libcfs_run_upcall(argv);
}
+EXPORT_SYMBOL(libcfs_run_lbug_upcall);
/* coverity[+kill] */
void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
@@ -166,9 +167,10 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
while (1)
schedule();
}
+EXPORT_SYMBOL(lbug_with_loc);
static int panic_notifier(struct notifier_block *self, unsigned long unused1,
- void *unused2)
+ void *unused2)
{
if (libcfs_panic_in_progress)
return 0;
@@ -187,13 +189,12 @@ static struct notifier_block libcfs_panic_notifier = {
void libcfs_register_panic_notifier(void)
{
- atomic_notifier_chain_register(&panic_notifier_list, &libcfs_panic_notifier);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &libcfs_panic_notifier);
}
void libcfs_unregister_panic_notifier(void)
{
- atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &libcfs_panic_notifier);
}
-
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-EXPORT_SYMBOL(lbug_with_loc);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
index 025e2f0028ab..86f32ffc5d04 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
@@ -50,7 +50,7 @@ void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
ret = kzalloc_node(size, flags | __GFP_NOWARN,
cfs_cpt_spread_node(cptab, cpt));
if (!ret) {
- WARN_ON(!(flags & (__GFP_FS|__GFP_HIGH)));
+ WARN_ON(!(flags & (__GFP_FS | __GFP_HIGH)));
ret = vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt));
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 70a99cf019de..ebc60ac9bb7a 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -40,41 +40,10 @@
#define LNET_MINOR 240
-int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
{
- struct libcfs_ioctl_hdr *hdr;
- struct libcfs_ioctl_data *data;
- int orig_len;
-
- hdr = (struct libcfs_ioctl_hdr *)buf;
- data = (struct libcfs_ioctl_data *)buf;
-
- if (copy_from_user(buf, arg, sizeof(*hdr)))
- return -EFAULT;
-
- if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
- CERROR("PORTALS: version mismatch kernel vs application\n");
- return -EINVAL;
- }
-
- if (hdr->ioc_len >= end - buf) {
- CERROR("PORTALS: user buffer exceeds kernel buffer\n");
- return -EINVAL;
- }
-
- if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
- CERROR("PORTALS: user buffer too small for ioctl\n");
- return -EINVAL;
- }
-
- orig_len = hdr->ioc_len;
- if (copy_from_user(buf, arg, hdr->ioc_len))
- return -EFAULT;
- if (orig_len != data->ioc_len)
- return -EINVAL;
-
if (libcfs_ioctl_is_invalid(data)) {
- CERROR("PORTALS: ioctl not correctly formatted\n");
+ CERROR("LNET: ioctl not correctly formatted\n");
return -EINVAL;
}
@@ -88,9 +57,29 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
return 0;
}
-int libcfs_ioctl_popdata(void *arg, void *data, int size)
+int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
+ __u32 *len)
+{
+ struct libcfs_ioctl_hdr hdr;
+
+ if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ return -EFAULT;
+
+ if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
+ hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
+ CERROR("LNET: version mismatch expected %#x, got %#x\n",
+ LIBCFS_IOCTL_VERSION, hdr.ioc_version);
+ return -EINVAL;
+ }
+
+ *len = hdr.ioc_len;
+
+ return 0;
+}
+
+int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
{
- if (copy_to_user((char *)arg, data, size))
+ if (copy_to_user(arg, data, size))
return -EFAULT;
return 0;
}
@@ -98,14 +87,12 @@ int libcfs_ioctl_popdata(void *arg, void *data, int size)
static int
libcfs_psdev_open(struct inode *inode, struct file *file)
{
- struct libcfs_device_userstate **pdu = NULL;
int rc = 0;
if (!inode)
return -EINVAL;
- pdu = (struct libcfs_device_userstate **)&file->private_data;
- if (libcfs_psdev_ops.p_open != NULL)
- rc = libcfs_psdev_ops.p_open(0, (void *)pdu);
+ if (libcfs_psdev_ops.p_open)
+ rc = libcfs_psdev_ops.p_open(0, NULL);
else
return -EPERM;
return rc;
@@ -115,14 +102,12 @@ libcfs_psdev_open(struct inode *inode, struct file *file)
static int
libcfs_psdev_release(struct inode *inode, struct file *file)
{
- struct libcfs_device_userstate *pdu;
int rc = 0;
if (!inode)
return -EINVAL;
- pdu = file->private_data;
- if (libcfs_psdev_ops.p_close != NULL)
- rc = libcfs_psdev_ops.p_close(0, (void *)pdu);
+ if (libcfs_psdev_ops.p_close)
+ rc = libcfs_psdev_ops.p_close(0, NULL);
else
rc = -EPERM;
return rc;
@@ -138,8 +123,8 @@ static long libcfs_ioctl(struct file *file,
return -EACCES;
if (_IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
- _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
- _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
+ _IOC_NR(cmd) < IOC_LIBCFS_MIN_NR ||
+ _IOC_NR(cmd) > IOC_LIBCFS_MAX_NR) {
CDEBUG(D_IOCTL, "invalid ioctl ( type %d, nr %d, size %d )\n",
_IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd));
return -EINVAL;
@@ -152,16 +137,10 @@ static long libcfs_ioctl(struct file *file,
return -EPERM;
panic("debugctl-invoked panic");
return 0;
- case IOC_LIBCFS_MEMHOG:
- if (!capable(CFS_CAP_SYS_ADMIN))
- return -EPERM;
- /* go thought */
}
- pfile.off = 0;
- pfile.private_data = file->private_data;
- if (libcfs_psdev_ops.p_ioctl != NULL)
- rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void *)arg);
+ if (libcfs_psdev_ops.p_ioctl)
+ rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
else
rc = -EPERM;
return rc;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 89084460231a..89084460231a 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 64a136cd503d..91c2ae8f9d67 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -63,9 +63,8 @@ int cfs_tracefile_init_arch(void)
cfs_trace_data[i] =
kmalloc(sizeof(union cfs_trace_data_union) *
num_possible_cpus(), GFP_KERNEL);
- if (cfs_trace_data[i] == NULL)
+ if (!cfs_trace_data[i])
goto out;
-
}
/* arch related info initialized */
@@ -82,7 +81,7 @@ int cfs_tracefile_init_arch(void)
kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
GFP_KERNEL);
- if (cfs_trace_console_buffers[i][j] == NULL)
+ if (!cfs_trace_console_buffers[i][j])
goto out;
}
@@ -105,7 +104,7 @@ void cfs_tracefile_fini_arch(void)
cfs_trace_console_buffers[i][j] = NULL;
}
- for (i = 0; cfs_trace_data[i] != NULL; i++) {
+ for (i = 0; cfs_trace_data[i]; i++) {
kfree(cfs_trace_data[i]);
cfs_trace_data[i] = NULL;
}
@@ -131,14 +130,13 @@ void cfs_tracefile_write_unlock(void)
up_write(&cfs_tracefile_sem);
}
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
+enum cfs_trace_buf_type cfs_trace_buf_idx_get(void)
{
if (in_irq())
return CFS_TCD_TYPE_IRQ;
- else if (in_softirq())
+ if (in_softirq())
return CFS_TCD_TYPE_SOFTIRQ;
- else
- return CFS_TCD_TYPE_PROC;
+ return CFS_TCD_TYPE_PROC;
}
/*
@@ -176,16 +174,6 @@ void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
spin_unlock(&tcd->tcd_lock);
}
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
- struct cfs_trace_page *tage)
-{
- /*
- * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
- * from here: this will lead to infinite recursion.
- */
- return tcd->tcd_cpu == tage->cpu;
-}
-
void
cfs_set_ptldebug_header(struct ptldebug_header *header,
struct libcfs_debug_msg_data *msgdata,
@@ -200,14 +188,14 @@ cfs_set_ptldebug_header(struct ptldebug_header *header,
header->ph_cpu_id = smp_processor_id();
header->ph_type = cfs_trace_buf_idx_get();
/* y2038 safe since all user space treats this as unsigned, but
- * will overflow in 2106 */
+ * will overflow in 2106
+ */
header->ph_sec = (u32)ts.tv_sec;
header->ph_usec = ts.tv_nsec / NSEC_PER_USEC;
header->ph_stack = stack;
header->ph_pid = current->pid;
header->ph_line_num = msgdata->msg_line;
header->ph_extern_pid = 0;
- return;
}
static char *
@@ -261,12 +249,11 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
fn, len, buf);
}
- return;
}
int cfs_trace_max_debug_mb(void)
{
int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
- return max(512, (total_mb * 80)/100);
+ return max(512, (total_mb * 80) / 100);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 329d78ce272d..cdc640bfdba8 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -54,156 +54,30 @@
# define DEBUG_SUBSYSTEM S_LNET
+#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
+ sizeof(struct lnet_ioctl_config_data))
+
#include "../../include/linux/libcfs/libcfs.h"
#include <asm/div64.h>
#include "../../include/linux/libcfs/libcfs_crypto.h"
#include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
#include "../../include/linux/lnet/lnet.h"
#include "tracefile.h"
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Portals v3.1");
-MODULE_LICENSE("GPL");
-
static struct dentry *lnet_debugfs_root;
-static void kportal_memhog_free(struct libcfs_device_userstate *ldu)
-{
- struct page **level0p = &ldu->ldu_memhog_root_page;
- struct page **level1p;
- struct page **level2p;
- int count1;
- int count2;
-
- if (*level0p != NULL) {
-
- level1p = (struct page **)page_address(*level0p);
- count1 = 0;
-
- while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
- *level1p != NULL) {
-
- level2p = (struct page **)page_address(*level1p);
- count2 = 0;
-
- while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
- *level2p != NULL) {
-
- __free_page(*level2p);
- ldu->ldu_memhog_pages--;
- level2p++;
- count2++;
- }
-
- __free_page(*level1p);
- ldu->ldu_memhog_pages--;
- level1p++;
- count1++;
- }
-
- __free_page(*level0p);
- ldu->ldu_memhog_pages--;
-
- *level0p = NULL;
- }
-
- LASSERT(ldu->ldu_memhog_pages == 0);
-}
-
-static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
- gfp_t flags)
-{
- struct page **level0p;
- struct page **level1p;
- struct page **level2p;
- int count1;
- int count2;
-
- LASSERT(ldu->ldu_memhog_pages == 0);
- LASSERT(ldu->ldu_memhog_root_page == NULL);
-
- if (npages < 0)
- return -EINVAL;
-
- if (npages == 0)
- return 0;
-
- level0p = &ldu->ldu_memhog_root_page;
- *level0p = alloc_page(flags);
- if (*level0p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level1p = (struct page **)page_address(*level0p);
- count1 = 0;
- memset(level1p, 0, PAGE_CACHE_SIZE);
-
- while (ldu->ldu_memhog_pages < npages &&
- count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
- if (cfs_signal_pending())
- return -EINTR;
-
- *level1p = alloc_page(flags);
- if (*level1p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level2p = (struct page **)page_address(*level1p);
- count2 = 0;
- memset(level2p, 0, PAGE_CACHE_SIZE);
-
- while (ldu->ldu_memhog_pages < npages &&
- count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
-
- if (cfs_signal_pending())
- return -EINTR;
-
- *level2p = alloc_page(flags);
- if (*level2p == NULL)
- return -ENOMEM;
- ldu->ldu_memhog_pages++;
-
- level2p++;
- count2++;
- }
-
- level1p++;
- count1++;
- }
-
- return 0;
-}
-
/* called when opening /dev/device */
static int libcfs_psdev_open(unsigned long flags, void *args)
{
- struct libcfs_device_userstate *ldu;
-
try_module_get(THIS_MODULE);
-
- LIBCFS_ALLOC(ldu, sizeof(*ldu));
- if (ldu != NULL) {
- ldu->ldu_memhog_pages = 0;
- ldu->ldu_memhog_root_page = NULL;
- }
- *(struct libcfs_device_userstate **)args = ldu;
-
return 0;
}
/* called when closing /dev/device */
static int libcfs_psdev_release(unsigned long flags, void *args)
{
- struct libcfs_device_userstate *ldu;
-
- ldu = (struct libcfs_device_userstate *)args;
- if (ldu != NULL) {
- kportal_memhog_free(ldu);
- LIBCFS_FREE(ldu, sizeof(*ldu));
- }
-
module_put(THIS_MODULE);
return 0;
}
@@ -241,11 +115,25 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
}
EXPORT_SYMBOL(libcfs_deregister_ioctl);
-static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
- void *arg, struct libcfs_ioctl_data *data)
+static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
+ void __user *arg, struct libcfs_ioctl_hdr *hdr)
{
+ struct libcfs_ioctl_data *data = NULL;
int err = -EINVAL;
+ /*
+ * The libcfs_ioctl_data_adjust() function performs adjustment
+ * operations on the libcfs_ioctl_data structure to make
+ * it usable by the code. This doesn't need to be called
+ * for new data structures added.
+ */
+ if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
+ data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
+ err = libcfs_ioctl_data_adjust(data);
+ if (err)
+ return err;
+ }
+
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
@@ -255,24 +143,11 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
* Handled in arch/cfs_module.c
*/
case IOC_LIBCFS_MARK_DEBUG:
- if (data->ioc_inlbuf1 == NULL ||
+ if (!data->ioc_inlbuf1 ||
data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
return -EINVAL;
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
return 0;
- case IOC_LIBCFS_MEMHOG:
- if (pfile->private_data == NULL) {
- err = -EINVAL;
- } else {
- kportal_memhog_free(pfile->private_data);
- /* XXX The ioc_flags is not GFP flags now, need to be fixed */
- err = kportal_memhog_alloc(pfile->private_data,
- data->ioc_count,
- data->ioc_flags);
- if (err != 0)
- kportal_memhog_free(pfile->private_data);
- }
- break;
default: {
struct libcfs_ioctl_handler *hand;
@@ -280,11 +155,11 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
err = -EINVAL;
down_read(&ioctl_list_sem);
list_for_each_entry(hand, &ioctl_list, item) {
- err = hand->handle_ioctl(cmd, data);
+ err = hand->handle_ioctl(cmd, hdr);
if (err != -EINVAL) {
if (err == 0)
err = libcfs_ioctl_popdata(arg,
- data, sizeof(*data));
+ hdr, hdr->ioc_len);
break;
}
}
@@ -296,28 +171,41 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd,
return err;
}
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
+ void __user *arg)
{
- char *buf;
- struct libcfs_ioctl_data *data;
+ struct libcfs_ioctl_hdr *hdr;
int err = 0;
+ __u32 buf_len;
- LIBCFS_ALLOC_GFP(buf, 1024, GFP_KERNEL);
- if (buf == NULL)
+ err = libcfs_ioctl_getdata_len(arg, &buf_len);
+ if (err)
+ return err;
+
+ /*
+ * do a check here to restrict the size of the memory
+ * to allocate to guard against DoS attacks.
+ */
+ if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
+ CERROR("LNET: user buffer exceeds kernel buffer\n");
+ return -EINVAL;
+ }
+
+ LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
+ if (!hdr)
return -ENOMEM;
/* 'cmd' and permissions get checked in our arch-specific caller */
- if (libcfs_ioctl_getdata(buf, buf + 800, arg)) {
- CERROR("PORTALS ioctl: data error\n");
- err = -EINVAL;
+ if (copy_from_user(hdr, arg, buf_len)) {
+ CERROR("LNET ioctl: data error\n");
+ err = -EFAULT;
goto out;
}
- data = (struct libcfs_ioctl_data *)buf;
- err = libcfs_ioctl_int(pfile, cmd, arg, data);
+ err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
out:
- LIBCFS_FREE(buf, 1024);
+ LIBCFS_FREE(hdr, buf_len);
return err;
}
@@ -329,10 +217,10 @@ struct cfs_psdev_ops libcfs_psdev_ops = {
libcfs_ioctl
};
-static int proc_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write,
- loff_t pos, void __user *buffer, int len))
+int lprocfs_call_handler(void *data, int write, loff_t *ppos,
+ void __user *buffer, size_t *lenp,
+ int (*handler)(void *data, int write, loff_t pos,
+ void __user *buffer, int len))
{
int rc = handler(data, write, *ppos, buffer, *lenp);
@@ -347,6 +235,7 @@ static int proc_call_handler(void *data, int write, loff_t *ppos,
}
return 0;
}
+EXPORT_SYMBOL(lprocfs_call_handler);
static int __proc_dobitmasks(void *data, int write,
loff_t pos, void __user *buffer, int nob)
@@ -392,8 +281,8 @@ static int __proc_dobitmasks(void *data, int write,
static int proc_dobitmasks(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dobitmasks);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_dobitmasks);
}
static int __proc_dump_kernel(void *data, int write,
@@ -408,8 +297,8 @@ static int __proc_dump_kernel(void *data, int write,
static int proc_dump_kernel(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_dump_kernel);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_dump_kernel);
}
static int __proc_daemon_file(void *data, int write,
@@ -431,8 +320,8 @@ static int __proc_daemon_file(void *data, int write,
static int proc_daemon_file(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_daemon_file);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_daemon_file);
}
static int libcfs_force_lbug(struct ctl_table *table, int write,
@@ -467,11 +356,11 @@ static int __proc_cpt_table(void *data, int write,
if (write)
return -EPERM;
- LASSERT(cfs_cpt_table != NULL);
+ LASSERT(cfs_cpt_table);
while (1) {
LIBCFS_ALLOC(buf, len);
- if (buf == NULL)
+ if (!buf)
return -ENOMEM;
rc = cfs_cpt_table_print(cfs_cpt_table, buf, len);
@@ -493,23 +382,19 @@ static int __proc_cpt_table(void *data, int write,
rc = cfs_trace_copyout_string(buffer, nob, buf + pos, NULL);
out:
- if (buf != NULL)
+ if (buf)
LIBCFS_FREE(buf, len);
return rc;
}
static int proc_cpt_table(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_cpt_table);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_cpt_table);
}
static struct ctl_table lnet_table[] = {
- /*
- * NB No .strategy entries have been provided since sysctl(8) prefers
- * to go via /proc for portability.
- */
{
.procname = "debug",
.data = &libcfs_debug,
@@ -640,47 +525,68 @@ static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf,
return error;
}
-static const struct file_operations lnet_debugfs_file_operations = {
+static const struct file_operations lnet_debugfs_file_operations_rw = {
.open = simple_open,
.read = lnet_debugfs_read,
.write = lnet_debugfs_write,
.llseek = default_llseek,
};
+static const struct file_operations lnet_debugfs_file_operations_ro = {
+ .open = simple_open,
+ .read = lnet_debugfs_read,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations lnet_debugfs_file_operations_wo = {
+ .open = simple_open,
+ .write = lnet_debugfs_write,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations *lnet_debugfs_fops_select(umode_t mode)
+{
+ if (!(mode & S_IWUGO))
+ return &lnet_debugfs_file_operations_ro;
+
+ if (!(mode & S_IRUGO))
+ return &lnet_debugfs_file_operations_wo;
+
+ return &lnet_debugfs_file_operations_rw;
+}
+
void lustre_insert_debugfs(struct ctl_table *table,
const struct lnet_debugfs_symlink_def *symlinks)
{
- struct dentry *entry;
-
- if (lnet_debugfs_root == NULL)
+ if (!lnet_debugfs_root)
lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
/* Even if we cannot create, just ignore it altogether) */
if (IS_ERR_OR_NULL(lnet_debugfs_root))
return;
+ /* We don't save the dentry returned in next two calls, because
+ * we don't call debugfs_remove() but rather remove_recursive()
+ */
for (; table->procname; table++)
- entry = debugfs_create_file(table->procname, table->mode,
- lnet_debugfs_root, table,
- &lnet_debugfs_file_operations);
+ debugfs_create_file(table->procname, table->mode,
+ lnet_debugfs_root, table,
+ lnet_debugfs_fops_select(table->mode));
for (; symlinks && symlinks->name; symlinks++)
- entry = debugfs_create_symlink(symlinks->name,
- lnet_debugfs_root,
- symlinks->target);
-
+ debugfs_create_symlink(symlinks->name, lnet_debugfs_root,
+ symlinks->target);
}
EXPORT_SYMBOL_GPL(lustre_insert_debugfs);
static void lustre_remove_debugfs(void)
{
- if (lnet_debugfs_root != NULL)
- debugfs_remove_recursive(lnet_debugfs_root);
+ debugfs_remove_recursive(lnet_debugfs_root);
lnet_debugfs_root = NULL;
}
-static int init_libcfs_module(void)
+static int libcfs_init(void)
{
int rc;
@@ -736,7 +642,7 @@ cleanup_cpu:
return rc;
}
-static void exit_libcfs_module(void)
+static void libcfs_exit(void)
{
int rc;
@@ -759,7 +665,10 @@ static void exit_libcfs_module(void)
pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc);
}
-MODULE_VERSION("1.0.0");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre helper library");
+MODULE_VERSION(LIBCFS_VERSION);
+MODULE_LICENSE("GPL");
-module_init(init_libcfs_module);
-module_exit(exit_libcfs_module);
+module_init(libcfs_init);
+module_exit(libcfs_exit);
diff --git a/drivers/staging/lustre/lustre/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index 4147664ff57a..c75ae9a68e76 100644
--- a/drivers/staging/lustre/lustre/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -42,11 +42,11 @@
#include "../../include/linux/libcfs/libcfs.h"
/*
-From: George Marsaglia <geo@stat.fsu.edu>
-Newsgroups: sci.math
-Subject: Re: A RANDOM NUMBER GENERATOR FOR C
-Date: Tue, 30 Sep 1997 05:29:35 -0700
-
+ * From: George Marsaglia <geo@stat.fsu.edu>
+ * Newsgroups: sci.math
+ * Subject: Re: A RANDOM NUMBER GENERATOR FOR C
+ * Date: Tue, 30 Sep 1997 05:29:35 -0700
+ *
* You may replace the two constants 36969 and 18000 by any
* pair of distinct constants from this list:
* 18000 18030 18273 18513 18879 19074 19098 19164 19215 19584
@@ -58,7 +58,8 @@ Date: Tue, 30 Sep 1997 05:29:35 -0700
* 27960 28320 28380 28689 28710 28794 28854 28959 28980 29013
* 29379 29889 30135 30345 30459 30714 30903 30963 31059 31083
* (or any other 16-bit constants k for which both k*2^16-1
- * and k*2^15-1 are prime) */
+ * and k*2^15-1 are prime)
+ */
#define RANDOM_CONST_A 18030
#define RANDOM_CONST_B 29013
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 65c4f1ab0de8..244eb89eef68 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -56,6 +56,51 @@ static int thread_running;
static atomic_t cfs_tage_allocated = ATOMIC_INIT(0);
+struct page_collection {
+ struct list_head pc_pages;
+ /*
+ * if this flag is set, collect_pages() will spill both
+ * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
+ * only ->tcd_pages are spilled.
+ */
+ int pc_want_daemon_pages;
+};
+
+struct tracefiled_ctl {
+ struct completion tctl_start;
+ struct completion tctl_stop;
+ wait_queue_head_t tctl_waitq;
+ pid_t tctl_pid;
+ atomic_t tctl_shutdown;
+};
+
+/*
+ * small data-structure for each page owned by tracefiled.
+ */
+struct cfs_trace_page {
+ /*
+ * page itself
+ */
+ struct page *page;
+ /*
+ * linkage into one of the lists in trace_data_union or
+ * page_collection
+ */
+ struct list_head linkage;
+ /*
+ * number of bytes used within this page
+ */
+ unsigned int used;
+ /*
+ * cpu that owns this page
+ */
+ unsigned short cpu;
+ /*
+ * type(context) of this page
+ */
+ unsigned short type;
+};
+
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd);
@@ -80,11 +125,11 @@ static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
*/
gfp |= __GFP_NOWARN;
page = alloc_page(gfp);
- if (page == NULL)
+ if (!page)
return NULL;
tage = kmalloc(sizeof(*tage), gfp);
- if (tage == NULL) {
+ if (!tage) {
__free_page(page);
return NULL;
}
@@ -96,9 +141,6 @@ static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
static void cfs_tage_free(struct cfs_trace_page *tage)
{
- __LASSERT(tage != NULL);
- __LASSERT(tage->page != NULL);
-
__free_page(tage->page);
kfree(tage);
atomic_dec(&cfs_tage_allocated);
@@ -107,9 +149,6 @@ static void cfs_tage_free(struct cfs_trace_page *tage)
static void cfs_tage_to_tail(struct cfs_trace_page *tage,
struct list_head *queue)
{
- __LASSERT(tage != NULL);
- __LASSERT(queue != NULL);
-
list_move_tail(&tage->linkage, queue);
}
@@ -127,7 +166,7 @@ int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct cfs_trace_page *tage;
tage = cfs_tage_alloc(gfp);
- if (tage == NULL)
+ if (!tage)
break;
list_add_tail(&tage->linkage, stock);
}
@@ -143,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_CACHE_SIZE)
+ if (tage->used + len <= PAGE_SIZE)
return tage;
}
@@ -154,7 +193,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
list_del_init(&tage->linkage);
} else {
tage = cfs_tage_alloc(GFP_ATOMIC);
- if (unlikely(tage == NULL)) {
+ if (unlikely(!tage)) {
if ((!memory_pressure_get() ||
in_interrupt()) && printk_ratelimit())
printk(KERN_WARNING
@@ -221,13 +260,13 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
* from here: this will lead to infinite recursion.
*/
- if (len > PAGE_CACHE_SIZE) {
+ if (len > PAGE_SIZE) {
pr_err("cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
}
tage = cfs_trace_get_tage_try(tcd, len);
- if (tage != NULL)
+ if (tage)
return tage;
if (thread_running)
cfs_tcd_shrink(tcd);
@@ -278,10 +317,11 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
* pins us to a particular CPU. This avoids an smp_processor_id()
- * warning on Linux when debugging is enabled. */
+ * warning on Linux when debugging is enabled.
+ */
cfs_set_ptldebug_header(&header, msgdata, CDEBUG_STACK());
- if (tcd == NULL) /* arch may not log in IRQ context */
+ if (!tcd) /* arch may not log in IRQ context */
goto console;
if (tcd->tcd_cur_pages == 0)
@@ -301,15 +341,15 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
if (libcfs_debug_binary)
known_size += sizeof(header);
- /*/
+ /*
* '2' used because vsnprintf return real size required for output
* _without_ terminating NULL.
* if needed is to small for this format.
*/
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
- if (tage == NULL) {
- if (needed + known_size > PAGE_CACHE_SIZE)
+ if (!tage) {
+ if (needed + known_size > PAGE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
@@ -320,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
- max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
+ max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(KERN_EMERG "negative max_nob: %d\n",
max_nob);
@@ -352,7 +392,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
break;
}
- if (*(string_buf+needed-1) != '\n')
+ if (*(string_buf + needed - 1) != '\n')
printk(KERN_INFO "format at %s:%d:%s doesn't end in newline\n",
file, msgdata->msg_line, msgdata->msg_fn);
@@ -384,30 +424,30 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
__LASSERT(debug_buf == string_buf);
tage->used += needed;
- __LASSERT (tage->used <= PAGE_CACHE_SIZE);
+ __LASSERT(tage->used <= PAGE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
/* no console output requested */
- if (tcd != NULL)
+ if (tcd)
cfs_trace_put_tcd(tcd);
return 1;
}
- if (cdls != NULL) {
+ if (cdls) {
if (libcfs_console_ratelimit &&
cdls->cdls_next != 0 && /* not first time ever */
!cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
/* skipping a console message */
cdls->cdls_count++;
- if (tcd != NULL)
+ if (tcd)
cfs_trace_put_tcd(tcd);
return 1;
}
- if (cfs_time_after(cfs_time_current(), cdls->cdls_next +
- libcfs_console_max_delay
- + cfs_time_seconds(10))) {
+ if (cfs_time_after(cfs_time_current(),
+ cdls->cdls_next + libcfs_console_max_delay +
+ cfs_time_seconds(10))) {
/* last timeout was a long time ago */
cdls->cdls_delay /= libcfs_console_backoff * 4;
} else {
@@ -423,7 +463,7 @@ console:
cdls->cdls_next = (cfs_time_current() + cdls->cdls_delay) | 1;
}
- if (tcd != NULL) {
+ if (tcd) {
cfs_print_to_console(&header, mask, string_buf, needed, file,
msgdata->msg_fn);
cfs_trace_put_tcd(tcd);
@@ -431,18 +471,18 @@ console:
string_buf = cfs_trace_get_console_buffer();
needed = 0;
- if (format1 != NULL) {
+ if (format1) {
va_copy(ap, args);
needed = vsnprintf(string_buf,
CFS_TRACE_CONSOLE_BUFFER_SIZE,
format1, ap);
va_end(ap);
}
- if (format2 != NULL) {
+ if (format2) {
remain = CFS_TRACE_CONSOLE_BUFFER_SIZE - needed;
if (remain > 0) {
va_start(ap, format2);
- needed += vsnprintf(string_buf+needed, remain,
+ needed += vsnprintf(string_buf + needed, remain,
format2, ap);
va_end(ap);
}
@@ -453,7 +493,7 @@ console:
put_cpu();
}
- if (cdls != NULL && cdls->cdls_count != 0) {
+ if (cdls && cdls->cdls_count != 0) {
string_buf = cfs_trace_get_console_buffer();
needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -497,7 +537,8 @@ panic_collect_pages(struct page_collection *pc)
{
/* Do the collect_pages job on a single CPU: assumes that all other
* CPUs have been stopped during a panic. If this isn't true for some
- * arch, this will have to be implemented separately in each arch. */
+ * arch, this will have to be implemented separately in each arch.
+ */
int i;
int j;
struct cfs_trace_cpu_data *tcd;
@@ -509,8 +550,7 @@ panic_collect_pages(struct page_collection *pc)
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
- list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
+ list_splice_init(&tcd->tcd_daemon_pages, &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
@@ -527,7 +567,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc)
tcd->tcd_cur_pages = 0;
if (pc->pc_want_daemon_pages) {
list_splice_init(&tcd->tcd_daemon_pages,
- &pc->pc_pages);
+ &pc->pc_pages);
tcd->tcd_cur_daemon_pages = 0;
}
}
@@ -558,7 +598,6 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
linkage) {
-
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != cpu || tage->type != i)
@@ -580,7 +619,8 @@ static void put_pages_back(struct page_collection *pc)
/* Add pages to a per-cpu debug daemon ringbuffer. This buffer makes sure that
* we have a good amount of data at all times for dumping during an LBUG, even
* if we have been steadily writing (and otherwise discarding) pages via the
- * debug daemon. */
+ * debug daemon.
+ */
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_cpu_data *tcd)
{
@@ -588,7 +628,6 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
struct cfs_trace_page *tmp;
list_for_each_entry_safe(tage, tmp, &pc->pc_pages, linkage) {
-
__LASSERT_TAGE_INVARIANT(tage);
if (tage->cpu != tcd->tcd_cpu || tage->type != tcd->tcd_type)
@@ -674,12 +713,13 @@ int cfs_tracefile_dump_all_pages(char *filename)
cfs_tracefile_write_lock();
- filp = filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE, 0600);
+ filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
+ 0600);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
filp = NULL;
pr_err("LustreError: can't open %s for dump: rc %d\n",
- filename, rc);
+ filename, rc);
goto out;
}
@@ -691,10 +731,10 @@ int cfs_tracefile_dump_all_pages(char *filename)
}
/* ok, for now, just write the pages. in the future we'll be building
- * iobufs with the pages and calling generic_direct_IO */
+ * iobufs with the pages and calling generic_direct_IO
+ */
MMSPACE_OPEN;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
__LASSERT_TAGE_INVARIANT(tage);
buf = kmap(tage->page);
@@ -732,7 +772,6 @@ void cfs_trace_flush_pages(void)
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
-
__LASSERT_TAGE_INVARIANT(tage);
list_del(&tage->linkage);
@@ -771,9 +810,10 @@ EXPORT_SYMBOL(cfs_trace_copyin_string);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
const char *knl_buffer, char *append)
{
- /* NB if 'append' != NULL, it's a single character to append to the
- * copied out string - usually "\n", for /proc entries and "" (i.e. a
- * terminating zero byte) for sysctl entries */
+ /*
+ * NB if 'append' != NULL, it's a single character to append to the
+ * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
+ */
int nob = strlen(knl_buffer);
if (nob > usr_buffer_nob)
@@ -782,7 +822,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
if (copy_to_user(usr_buffer, knl_buffer, nob))
return -EFAULT;
- if (append != NULL && nob < usr_buffer_nob) {
+ if (append && nob < usr_buffer_nob) {
if (copy_to_user(usr_buffer + nob, append, 1))
return -EFAULT;
@@ -795,11 +835,11 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
- if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
+ if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
- if (*str == NULL)
+ if (!*str)
return -ENOMEM;
return 0;
@@ -842,12 +882,15 @@ int cfs_trace_daemon_command(char *str)
memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
} else if (strncmp(str, "size=", 5) == 0) {
- cfs_tracefile_size = simple_strtoul(str + 5, NULL, 0);
- if (cfs_tracefile_size < 10 || cfs_tracefile_size > 20480)
- cfs_tracefile_size = CFS_TRACEFILE_SIZE;
- else
- cfs_tracefile_size <<= 20;
-
+ unsigned long tmp;
+
+ rc = kstrtoul(str + 5, 10, &tmp);
+ if (!rc) {
+ if (tmp < 10 || tmp > 20480)
+ cfs_tracefile_size = CFS_TRACEFILE_SIZE;
+ else
+ cfs_tracefile_size = tmp << 20;
+ }
} else if (strlen(str) >= sizeof(cfs_tracefile)) {
rc = -ENAMETOOLONG;
} else if (str[0] != '/') {
@@ -877,7 +920,7 @@ int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
return rc;
rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
- usr_str, usr_str_nob);
+ usr_str, usr_str_nob);
if (rc == 0)
rc = cfs_trace_daemon_command(str);
@@ -908,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
}
mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_CACHE_SHIFT);
+ pages = mb << (20 - PAGE_SHIFT);
cfs_tracefile_write_lock();
@@ -934,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
cfs_tracefile_read_unlock();
- return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
+ return (total_pages >> (20 - PAGE_SHIFT)) + 1;
}
static int tracefiled(void *arg)
@@ -977,7 +1020,7 @@ static int tracefiled(void *arg)
}
}
cfs_tracefile_read_unlock();
- if (filp == NULL) {
+ if (!filp) {
put_pages_on_daemon_list(&pc);
__LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
@@ -985,8 +1028,7 @@ static int tracefiled(void *arg)
MMSPACE_OPEN;
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage) {
+ list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
__LASSERT_TAGE_INVARIANT(tage);
@@ -1017,8 +1059,7 @@ static int tracefiled(void *arg)
int i;
printk(KERN_ALERT "Lustre: trace pages aren't empty\n");
- pr_err("total cpus(%d): ",
- num_possible_cpus());
+ pr_err("total cpus(%d): ", num_possible_cpus());
for (i = 0; i < num_possible_cpus(); i++)
if (cpu_online(i))
pr_cont("%d(on) ", i);
@@ -1028,9 +1069,9 @@ static int tracefiled(void *arg)
i = 0;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages,
- linkage)
+ linkage)
pr_err("page %d belongs to cpu %d\n",
- ++i, tage->cpu);
+ ++i, tage->cpu);
pr_err("There are %d pages unwritten\n", i);
}
__LASSERT(list_empty(&pc.pc_pages));
@@ -1056,6 +1097,7 @@ end_loop:
int cfs_trace_start_thread(void)
{
struct tracefiled_ctl *tctl = &trace_tctl;
+ struct task_struct *task;
int rc = 0;
mutex_lock(&cfs_trace_thread_mutex);
@@ -1067,8 +1109,9 @@ int cfs_trace_start_thread(void)
init_waitqueue_head(&tctl->tctl_waitq);
atomic_set(&tctl->tctl_shutdown, 0);
- if (IS_ERR(kthread_run(tracefiled, tctl, "ktracefiled"))) {
- rc = -ECHILD;
+ task = kthread_run(tracefiled, tctl, "ktracefiled");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
goto out;
}
@@ -1135,7 +1178,7 @@ static void trace_cleanup_on_all_cpus(void)
tcd->tcd_shutting_down = 1;
list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
- linkage) {
+ linkage) {
__LASSERT_TAGE_INVARIANT(tage);
list_del(&tage->linkage);
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index 7bf1471a54fb..ac84e7f4c859 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -39,12 +39,12 @@
#include "../../include/linux/libcfs/libcfs.h"
-typedef enum {
+enum cfs_trace_buf_type {
CFS_TCD_TYPE_PROC = 0,
CFS_TCD_TYPE_SOFTIRQ,
CFS_TCD_TYPE_IRQ,
CFS_TCD_TYPE_MAX
-} cfs_trace_buf_type_t;
+};
/* trace file lock routines */
@@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
@@ -96,13 +96,15 @@ int cfs_trace_max_debug_mb(void);
/*
* Private declare for tracefile
*/
-#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
-/* Size of a buffer for sprinting console messages if we can't get a page
- * from system */
+/*
+ * Size of a buffer for sprinting console messages if we can't get a page
+ * from system
+ */
#define CFS_TRACE_CONSOLE_BUFFER_SIZE 1024
union cfs_trace_data_union {
@@ -185,66 +187,15 @@ union cfs_trace_data_union {
extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[NR_CPUS];
#define cfs_tcd_for_each(tcd, i, j) \
- for (i = 0; cfs_trace_data[i] != NULL; i++) \
- for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
- j < num_possible_cpus(); \
- j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
+ for (i = 0; cfs_trace_data[i]; i++) \
+ for (j = 0, ((tcd) = &(*cfs_trace_data[i])[j].tcd); \
+ j < num_possible_cpus(); \
+ j++, (tcd) = &(*cfs_trace_data[i])[j].tcd)
#define cfs_tcd_for_each_type_lock(tcd, i, cpu) \
- for (i = 0; cfs_trace_data[i] && \
- (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
- cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct page_collection {
- struct list_head pc_pages;
- /*
- * if this flag is set, collect_pages() will spill both
- * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
- * only ->tcd_pages are spilled.
- */
- int pc_want_daemon_pages;
-};
-
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct tracefiled_ctl {
- struct completion tctl_start;
- struct completion tctl_stop;
- wait_queue_head_t tctl_waitq;
- pid_t tctl_pid;
- atomic_t tctl_shutdown;
-};
-
-/*
- * small data-structure for each page owned by tracefiled.
- */
-/* XXX nikita: this declaration is internal to tracefile.c and should probably
- * be moved there */
-struct cfs_trace_page {
- /*
- * page itself
- */
- struct page *page;
- /*
- * linkage into one of the lists in trace_data_union or
- * page_collection
- */
- struct list_head linkage;
- /*
- * number of bytes used within this page
- */
- unsigned int used;
- /*
- * cpu that owns this page
- */
- unsigned short cpu;
- /*
- * type(context) of this page
- */
- unsigned short type;
-};
+ for (i = 0; cfs_trace_data[i] && \
+ (tcd = &(*cfs_trace_data[i])[cpu].tcd) && \
+ cfs_trace_lock_tcd(tcd, 1); cfs_trace_unlock_tcd(tcd, 1), i++)
void cfs_set_ptldebug_header(struct ptldebug_header *header,
struct libcfs_debug_msg_data *m,
@@ -257,7 +208,7 @@ int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
+enum cfs_trace_buf_type cfs_trace_buf_idx_get(void);
static inline char *
cfs_trace_get_console_buffer(void)
@@ -279,8 +230,7 @@ cfs_trace_get_tcd(void)
return tcd;
}
-static inline void
-cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
+static inline void cfs_trace_put_tcd(struct cfs_trace_cpu_data *tcd)
{
cfs_trace_unlock_tcd(tcd, 0);
@@ -290,9 +240,6 @@ cfs_trace_put_tcd (struct cfs_trace_cpu_data *tcd)
int cfs_trace_refill_stock(struct cfs_trace_cpu_data *tcd, gfp_t gfp,
struct list_head *stock);
-int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
- struct cfs_trace_page *tage);
-
void cfs_trace_assertion_failed(const char *str,
struct libcfs_debug_msg_data *m);
@@ -308,9 +255,9 @@ do { \
#define __LASSERT_TAGE_INVARIANT(tage) \
do { \
- __LASSERT(tage != NULL); \
- __LASSERT(tage->page != NULL); \
- __LASSERT(tage->used <= PAGE_CACHE_SIZE); \
+ __LASSERT(tage); \
+ __LASSERT(tage->page); \
+ __LASSERT(tage->used <= PAGE_SIZE); \
__LASSERT(page_count(tage->page) > 0); \
} while (0)
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index 60bb88a00b41..c72fe00dce8d 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -46,18 +46,21 @@
#define CFS_WS_NAME_LEN 16
struct cfs_wi_sched {
- struct list_head ws_list; /* chain on global list */
+ /* chain on global list */
+ struct list_head ws_list;
/** serialised workitems */
spinlock_t ws_lock;
/** where schedulers sleep */
wait_queue_head_t ws_waitq;
/** concurrent workitems */
struct list_head ws_runq;
- /** rescheduled running-workitems, a workitem can be rescheduled
+ /**
+ * rescheduled running-workitems, a workitem can be rescheduled
* while running in wi_action(), but we don't to execute it again
* unless it returns from wi_action(), so we put it on ws_rerunq
* while rescheduling, and move it to runq after it returns
- * from wi_action() */
+ * from wi_action()
+ */
struct list_head ws_rerunq;
/** CPT-table for this scheduler */
struct cfs_cpt_table *ws_cptab;
@@ -128,8 +131,6 @@ cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
wi->wi_scheduled = 1; /* LBUG future schedule attempts */
spin_unlock(&sched->ws_lock);
-
- return;
}
EXPORT_SYMBOL(cfs_wi_exit);
@@ -163,7 +164,7 @@ cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
wi->wi_scheduled = 0;
}
- LASSERT (list_empty(&wi->wi_list));
+ LASSERT(list_empty(&wi->wi_list));
spin_unlock(&sched->ws_lock);
return rc;
@@ -186,7 +187,7 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
spin_lock(&sched->ws_lock);
if (!wi->wi_scheduled) {
- LASSERT (list_empty(&wi->wi_list));
+ LASSERT(list_empty(&wi->wi_list));
wi->wi_scheduled = 1;
sched->ws_nscheduled++;
@@ -198,21 +199,19 @@ cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
}
}
- LASSERT (!list_empty(&wi->wi_list));
+ LASSERT(!list_empty(&wi->wi_list));
spin_unlock(&sched->ws_lock);
- return;
}
EXPORT_SYMBOL(cfs_wi_schedule);
-static int
-cfs_wi_scheduler (void *arg)
+static int cfs_wi_scheduler(void *arg)
{
struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
cfs_block_allsigs();
/* CPT affinity scheduler? */
- if (sched->ws_cptab != NULL)
+ if (sched->ws_cptab)
if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
CWARN("Failed to bind %s on CPT %d\n",
sched->ws_name, sched->ws_cpt);
@@ -234,8 +233,8 @@ cfs_wi_scheduler (void *arg)
while (!list_empty(&sched->ws_runq) &&
nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next,
- cfs_workitem_t, wi_list);
+ wi = list_entry(sched->ws_runq.next, cfs_workitem_t,
+ wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running);
list_del_init(&wi->wi_list);
@@ -261,14 +260,16 @@ cfs_wi_scheduler (void *arg)
LASSERT(wi->wi_scheduled);
/* wi is rescheduled, should be on rerunq now, we
- * move it to runq so it can run action now */
+ * move it to runq so it can run action now
+ */
list_move_tail(&wi->wi_list, &sched->ws_runq);
}
if (!list_empty(&sched->ws_runq)) {
spin_unlock(&sched->ws_lock);
/* don't sleep because some workitems still
- * expect me to come back soon */
+ * expect me to come back soon
+ */
cond_resched();
spin_lock(&sched->ws_lock);
continue;
@@ -343,14 +344,18 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
- LASSERT(cptab == NULL || cpt == CFS_CPT_ANY ||
+ LASSERT(!cptab || cpt == CFS_CPT_ANY ||
(cpt >= 0 && cpt < cfs_cpt_number(cptab)));
LIBCFS_ALLOC(sched, sizeof(*sched));
- if (sched == NULL)
+ if (!sched)
return -ENOMEM;
- strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN);
+ if (strlen(name) > sizeof(sched->ws_name) - 1) {
+ LIBCFS_FREE(sched, sizeof(*sched));
+ return -E2BIG;
+ }
+ strncpy(sched->ws_name, name, sizeof(sched->ws_name));
sched->ws_cptab = cptab;
sched->ws_cpt = cpt;
@@ -376,7 +381,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
sched->ws_starting++;
spin_unlock(&cfs_wi_data.wi_glock);
- if (sched->ws_cptab != NULL && sched->ws_cpt >= 0) {
+ if (sched->ws_cptab && sched->ws_cpt >= 0) {
snprintf(name, sizeof(name), "%s_%02d_%02u",
sched->ws_name, sched->ws_cpt,
sched->ws_nthreads);
@@ -431,6 +436,7 @@ void
cfs_wi_shutdown(void)
{
struct cfs_wi_sched *sched;
+ struct cfs_wi_sched *temp;
spin_lock(&cfs_wi_data.wi_glock);
cfs_wi_data.wi_stopping = 1;
@@ -453,9 +459,7 @@ cfs_wi_shutdown(void)
}
spin_unlock(&cfs_wi_data.wi_glock);
}
- while (!list_empty(&cfs_wi_data.wi_scheds)) {
- sched = list_entry(cfs_wi_data.wi_scheds.next,
- struct cfs_wi_sched, ws_list);
+ list_for_each_entry_safe(sched, temp, &cfs_wi_data.wi_scheds, ws_list) {
list_del(&sched->ws_list);
LIBCFS_FREE(sched, sizeof(*sched));
}
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
index e276fe2bf8f3..4c81fa19429a 100644
--- a/drivers/staging/lustre/lnet/lnet/Makefile
+++ b/drivers/staging/lustre/lnet/lnet/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LNET) += lnet.o
-lnet-y := api-ni.o config.o nidstrings.o \
+lnet-y := api-ni.o config.o nidstrings.o net_fault.o \
lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \
lib-socket.o lib-move.o module.o lo.o \
router.o router_proc.o acceptor.o peer.o
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index fed57d90028d..1452bb3ad9eb 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -36,6 +36,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/completion.h>
+#include <net/sock.h>
#include "../../include/linux/lnet/lib-lnet.h"
static int accept_port = 988;
@@ -46,7 +47,9 @@ static struct {
int pta_shutdown;
struct socket *pta_sock;
struct completion pta_signal;
-} lnet_acceptor_state;
+} lnet_acceptor_state = {
+ .pta_shutdown = 1
+};
int
lnet_acceptor_port(void)
@@ -78,9 +81,11 @@ static char *accept_type;
static int
lnet_acceptor_get_tunables(void)
{
- /* Userland acceptor uses 'accept_type' instead of 'accept', due to
+ /*
+ * Userland acceptor uses 'accept_type' instead of 'accept', due to
* conflict with 'accept(2)', but kernel acceptor still uses 'accept'
- * for compatibility. Hence the trick. */
+ * for compatibility. Hence the trick.
+ */
accept_type = accept;
return 0;
}
@@ -140,7 +145,7 @@ EXPORT_SYMBOL(lnet_connect_console_error);
int
lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
- __u32 local_ip, __u32 peer_ip, int peer_port)
+ __u32 local_ip, __u32 peer_ip, int peer_port)
{
lnet_acceptor_connreq_t cr;
struct socket *sock;
@@ -157,7 +162,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip,
peer_port);
- if (rc != 0) {
+ if (rc) {
if (fatal)
goto failed;
continue;
@@ -169,14 +174,14 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
cr.acr_nid = peer_nid;
- if (the_lnet.ln_testprotocompat != 0) {
+ if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
lnet_net_lock(LNET_LOCK_EX);
- if ((the_lnet.ln_testprotocompat & 4) != 0) {
+ if (the_lnet.ln_testprotocompat & 4) {
cr.acr_version++;
the_lnet.ln_testprotocompat &= ~4;
}
- if ((the_lnet.ln_testprotocompat & 8) != 0) {
+ if (the_lnet.ln_testprotocompat & 8) {
cr.acr_magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~8;
}
@@ -184,7 +189,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
}
rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc != 0)
+ if (rc)
goto failed_sock;
*sockp = sock;
@@ -202,8 +207,6 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid,
}
EXPORT_SYMBOL(lnet_connect);
-/* Below is the code common for both kernel and MT user-space */
-
static int
lnet_accept(struct socket *sock, __u32 magic)
{
@@ -218,23 +221,23 @@ lnet_accept(struct socket *sock, __u32 magic)
LASSERT(sizeof(cr) <= 16); /* not too big for the stack */
rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(rc == 0); /* we succeeded before */
+ LASSERT(!rc); /* we succeeded before */
if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) {
-
if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) {
- /* future version compatibility!
+ /*
+ * future version compatibility!
* When LNET unifies protocols over all LNDs, the first
- * thing sent will be a version query. I send back
- * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */
-
+ * thing sent will be a version query. I send back
+ * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old"
+ */
memset(&cr, 0, sizeof(cr));
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
rc = lnet_sock_write(sock, &cr, sizeof(cr),
accept_timeout);
- if (rc != 0)
+ if (rc)
CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n",
&peer_ip, rc);
return -EPROTO;
@@ -254,9 +257,9 @@ lnet_accept(struct socket *sock, __u32 magic)
rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version),
accept_timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading connection request version from %pI4h\n",
- rc, &peer_ip);
+ rc, &peer_ip);
return -EIO;
}
@@ -264,10 +267,12 @@ lnet_accept(struct socket *sock, __u32 magic)
__swab32s(&cr.acr_version);
if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) {
- /* future version compatibility!
+ /*
+ * future version compatibility!
* An acceptor-specific protocol rev will first send a version
* query. I send back my current version to tell her I'm
- * "old". */
+ * "old".
+ */
int peer_version = cr.acr_version;
memset(&cr, 0, sizeof(cr));
@@ -275,7 +280,7 @@ lnet_accept(struct socket *sock, __u32 magic)
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
- if (rc != 0)
+ if (rc)
CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n",
peer_version, &peer_ip, rc);
return -EPROTO;
@@ -285,9 +290,9 @@ lnet_accept(struct socket *sock, __u32 magic)
sizeof(cr) -
offsetof(lnet_acceptor_connreq_t, acr_nid),
accept_timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
+ rc, &peer_ip);
return -EIO;
}
@@ -295,20 +300,20 @@ lnet_accept(struct socket *sock, __u32 magic)
__swab64s(&cr.acr_nid);
ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid));
- if (ni == NULL || /* no matching net */
+ if (!ni || /* no matching net */
ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
- if (ni != NULL)
+ if (ni)
lnet_ni_decref(ni);
LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n",
&peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
- if (ni->ni_lnd->lnd_accept == NULL) {
+ if (!ni->ni_lnd->lnd_accept) {
/* This catches a request for the loopback LND */
lnet_ni_decref(ni);
LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n",
- &peer_ip, libcfs_nid2str(cr.acr_nid));
+ &peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
@@ -331,13 +336,13 @@ lnet_acceptor(void *arg)
int peer_port;
int secure = (int)((long_ptr_t)arg);
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
+ LASSERT(!lnet_acceptor_state.pta_sock);
cfs_block_allsigs();
rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port,
accept_backlog);
- if (rc != 0) {
+ if (rc) {
if (rc == -EADDRINUSE)
LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n",
accept_port);
@@ -354,13 +359,12 @@ lnet_acceptor(void *arg)
lnet_acceptor_state.pta_shutdown = rc;
complete(&lnet_acceptor_state.pta_signal);
- if (rc != 0)
+ if (rc)
return rc;
while (!lnet_acceptor_state.pta_shutdown) {
-
rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock);
- if (rc != 0) {
+ if (rc) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -376,7 +380,7 @@ lnet_acceptor(void *arg)
}
rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't determine new connection's address\n");
goto failed;
}
@@ -389,14 +393,14 @@ lnet_acceptor(void *arg)
rc = lnet_sock_read(newsock, &magic, sizeof(magic),
accept_timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading connection request from %pI4h\n",
- rc, &peer_ip);
+ rc, &peer_ip);
goto failed;
}
rc = lnet_accept(newsock, magic);
- if (rc != 0)
+ if (rc)
goto failed;
continue;
@@ -436,14 +440,19 @@ accept2secure(const char *acc, long *sec)
int
lnet_acceptor_start(void)
{
+ struct task_struct *task;
int rc;
long rc2;
long secure;
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
+ /* if acceptor is already running return immediately */
+ if (!lnet_acceptor_state.pta_shutdown)
+ return 0;
+
+ LASSERT(!lnet_acceptor_state.pta_sock);
rc = lnet_acceptor_get_tunables();
- if (rc != 0)
+ if (rc)
return rc;
init_completion(&lnet_acceptor_state.pta_signal);
@@ -451,13 +460,13 @@ lnet_acceptor_start(void)
if (rc <= 0)
return rc;
- if (lnet_count_acceptor_nis() == 0) /* not required */
+ if (!lnet_count_acceptor_nis()) /* not required */
return 0;
- rc2 = PTR_ERR(kthread_run(lnet_acceptor,
- (void *)(ulong_ptr_t)secure,
- "acceptor_%03ld", secure));
- if (IS_ERR_VALUE(rc2)) {
+ task = kthread_run(lnet_acceptor, (void *)(ulong_ptr_t)secure,
+ "acceptor_%03ld", secure);
+ if (IS_ERR(task)) {
+ rc2 = PTR_ERR(task);
CERROR("Can't start acceptor thread: %ld\n", rc2);
return -ESRCH;
@@ -468,11 +477,11 @@ lnet_acceptor_start(void)
if (!lnet_acceptor_state.pta_shutdown) {
/* started OK */
- LASSERT(lnet_acceptor_state.pta_sock != NULL);
+ LASSERT(lnet_acceptor_state.pta_sock);
return 0;
}
- LASSERT(lnet_acceptor_state.pta_sock == NULL);
+ LASSERT(!lnet_acceptor_state.pta_sock);
return -ENETDOWN;
}
@@ -480,11 +489,17 @@ lnet_acceptor_start(void)
void
lnet_acceptor_stop(void)
{
- if (lnet_acceptor_state.pta_sock == NULL) /* not running */
+ struct sock *sk;
+
+ if (lnet_acceptor_state.pta_shutdown) /* not running */
return;
lnet_acceptor_state.pta_shutdown = 1;
- wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk));
+
+ sk = lnet_acceptor_state.pta_sock->sk;
+
+ /* awake any sleepers using safe method */
+ sk->sk_state_change(sk);
/* block until acceptor signals exit */
wait_for_completion(&lnet_acceptor_state.pta_signal);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 362282fa00bf..8764755544c9 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -39,6 +39,7 @@
#include <linux/ktime.h>
#include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
#define D_LNI D_CONSOLE
@@ -61,6 +62,9 @@ static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
module_param(rnet_htable_size, int, 0444);
MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
+static int lnet_ping(lnet_process_id_t id, int timeout_ms,
+ lnet_process_id_t __user *ids, int n_ids);
+
static char *
lnet_get_routes(void)
{
@@ -73,17 +77,17 @@ lnet_get_networks(void)
char *nets;
int rc;
- if (*networks != 0 && *ip2nets != 0) {
+ if (*networks && *ip2nets) {
LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n");
return NULL;
}
- if (*ip2nets != 0) {
+ if (*ip2nets) {
rc = lnet_parse_ip2nets(&nets, ip2nets);
- return (rc == 0) ? nets : NULL;
+ return !rc ? nets : NULL;
}
- if (*networks != 0)
+ if (*networks)
return networks;
return "tcp";
@@ -94,6 +98,7 @@ lnet_init_locks(void)
{
spin_lock_init(&the_lnet.ln_eq_wait_lock);
init_waitqueue_head(&the_lnet.ln_eq_waitq);
+ init_waitqueue_head(&the_lnet.ln_rc_waitq);
mutex_init(&the_lnet.ln_lnd_mutex);
mutex_init(&the_lnet.ln_api_mutex);
}
@@ -104,10 +109,10 @@ lnet_create_remote_nets_table(void)
int i;
struct list_head *hash;
- LASSERT(the_lnet.ln_remote_nets_hash == NULL);
+ LASSERT(!the_lnet.ln_remote_nets_hash);
LASSERT(the_lnet.ln_remote_nets_hbits > 0);
LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
- if (hash == NULL) {
+ if (!hash) {
CERROR("Failed to create remote nets hash table\n");
return -ENOMEM;
}
@@ -123,7 +128,7 @@ lnet_destroy_remote_nets_table(void)
{
int i;
- if (the_lnet.ln_remote_nets_hash == NULL)
+ if (!the_lnet.ln_remote_nets_hash)
return;
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
@@ -138,12 +143,12 @@ lnet_destroy_remote_nets_table(void)
static void
lnet_destroy_locks(void)
{
- if (the_lnet.ln_res_lock != NULL) {
+ if (the_lnet.ln_res_lock) {
cfs_percpt_lock_free(the_lnet.ln_res_lock);
the_lnet.ln_res_lock = NULL;
}
- if (the_lnet.ln_net_lock != NULL) {
+ if (the_lnet.ln_net_lock) {
cfs_percpt_lock_free(the_lnet.ln_net_lock);
the_lnet.ln_net_lock = NULL;
}
@@ -155,11 +160,11 @@ lnet_create_locks(void)
lnet_init_locks();
the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_res_lock == NULL)
+ if (!the_lnet.ln_res_lock)
goto failed;
the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
- if (the_lnet.ln_net_lock == NULL)
+ if (!the_lnet.ln_net_lock)
goto failed;
return 0;
@@ -171,10 +176,12 @@ lnet_create_locks(void)
static void lnet_assert_wire_constants(void)
{
- /* Wire protocol assertions generated by 'wirecheck'
+ /*
+ * Wire protocol assertions generated by 'wirecheck'
* running on Linux robert.bartonsoftware.com 2.6.8-1.521
* #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
- * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
+ * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7)
+ */
/* Constants... */
CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
@@ -284,9 +291,8 @@ lnet_register_lnd(lnd_t *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
- LASSERT(the_lnet.ln_init);
LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
- LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
+ LASSERT(!lnet_find_lnd_by_type(lnd->lnd_type));
list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
lnd->lnd_refcount = 0;
@@ -302,9 +308,8 @@ lnet_unregister_lnd(lnd_t *lnd)
{
mutex_lock(&the_lnet.ln_lnd_mutex);
- LASSERT(the_lnet.ln_init);
LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
- LASSERT(lnd->lnd_refcount == 0);
+ LASSERT(!lnd->lnd_refcount);
list_del(&lnd->lnd_list);
CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
@@ -335,7 +340,6 @@ lnet_counters_get(lnet_counters_t *counters)
counters->recv_length += ctr->recv_length;
counters->route_length += ctr->route_length;
counters->drop_length += ctr->drop_length;
-
}
lnet_net_unlock(LNET_LOCK_EX);
}
@@ -375,7 +379,7 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
{
int count = 0;
- if (rec->rec_type == 0) /* not set yet, it's uninitialized */
+ if (!rec->rec_type) /* not set yet, it's uninitialized */
return;
while (!list_empty(&rec->rec_active)) {
@@ -395,14 +399,16 @@ lnet_res_container_cleanup(struct lnet_res_container *rec)
}
if (count > 0) {
- /* Found alive MD/ME/EQ, user really should unlink/free
+ /*
+ * Found alive MD/ME/EQ, user really should unlink/free
* all of them before finalize LNet, but if someone didn't,
- * we have to recycle garbage for him */
+ * we have to recycle garbage for him
+ */
CERROR("%d active elements on exit of %s container\n",
count, lnet_res_type2str(rec->rec_type));
}
- if (rec->rec_lh_hash != NULL) {
+ if (rec->rec_lh_hash) {
LIBCFS_FREE(rec->rec_lh_hash,
LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
rec->rec_lh_hash = NULL;
@@ -417,7 +423,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
int rc = 0;
int i;
- LASSERT(rec->rec_type == 0);
+ LASSERT(!rec->rec_type);
rec->rec_type = type;
INIT_LIST_HEAD(&rec->rec_active);
@@ -426,7 +432,7 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
/* Arbitrary choice of hash table size */
LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
- if (rec->rec_lh_hash == NULL) {
+ if (!rec->rec_lh_hash) {
rc = -ENOMEM;
goto out;
}
@@ -464,7 +470,7 @@ lnet_res_containers_create(int type)
int i;
recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
- if (recs == NULL) {
+ if (!recs) {
CERROR("Failed to allocate %s resource containers\n",
lnet_res_type2str(type));
return NULL;
@@ -472,7 +478,7 @@ lnet_res_containers_create(int type)
cfs_percpt_for_each(rec, i, recs) {
rc = lnet_res_container_setup(rec, i, type);
- if (rc != 0) {
+ if (rc) {
lnet_res_containers_destroy(recs);
return NULL;
}
@@ -518,7 +524,7 @@ lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
}
-int lnet_unprepare(void);
+static int lnet_unprepare(void);
static int
lnet_prepare(lnet_pid_t requested_pid)
@@ -527,11 +533,16 @@ lnet_prepare(lnet_pid_t requested_pid)
struct lnet_res_container **recs;
int rc = 0;
- LASSERT(the_lnet.ln_refcount == 0);
+ if (requested_pid == LNET_PID_ANY) {
+ /* Don't instantiate LNET just for me */
+ return -ENETDOWN;
+ }
+
+ LASSERT(!the_lnet.ln_refcount);
the_lnet.ln_routing = 0;
- LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
+ LASSERT(!(requested_pid & LNET_PID_USERFLAG));
the_lnet.ln_pid = requested_pid;
INIT_LIST_HEAD(&the_lnet.ln_test_peers);
@@ -539,9 +550,11 @@ lnet_prepare(lnet_pid_t requested_pid)
INIT_LIST_HEAD(&the_lnet.ln_nis_cpt);
INIT_LIST_HEAD(&the_lnet.ln_nis_zombie);
INIT_LIST_HEAD(&the_lnet.ln_routers);
+ INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
+ INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
rc = lnet_create_remote_nets_table();
- if (rc != 0)
+ if (rc)
goto failed;
/*
* NB the interface cookie in wire handles guards against delayed
@@ -551,27 +564,27 @@ lnet_prepare(lnet_pid_t requested_pid)
the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(lnet_counters_t));
- if (the_lnet.ln_counters == NULL) {
+ if (!the_lnet.ln_counters) {
CERROR("Failed to allocate counters for LNet\n");
rc = -ENOMEM;
goto failed;
}
rc = lnet_peer_tables_create();
- if (rc != 0)
+ if (rc)
goto failed;
rc = lnet_msg_containers_create();
- if (rc != 0)
+ if (rc)
goto failed;
rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
LNET_COOKIE_TYPE_EQ);
- if (rc != 0)
+ if (rc)
goto failed;
recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
- if (recs == NULL) {
+ if (!recs) {
rc = -ENOMEM;
goto failed;
}
@@ -579,7 +592,7 @@ lnet_prepare(lnet_pid_t requested_pid)
the_lnet.ln_me_containers = recs;
recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
- if (recs == NULL) {
+ if (!recs) {
rc = -ENOMEM;
goto failed;
}
@@ -587,7 +600,7 @@ lnet_prepare(lnet_pid_t requested_pid)
the_lnet.ln_md_containers = recs;
rc = lnet_portals_create();
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to create portals for LNet: %d\n", rc);
goto failed;
}
@@ -599,17 +612,18 @@ lnet_prepare(lnet_pid_t requested_pid)
return rc;
}
-int
+static int
lnet_unprepare(void)
{
- /* NB no LNET_LOCK since this is the last reference. All LND instances
+ /*
+ * NB no LNET_LOCK since this is the last reference. All LND instances
* have shut down already, so it is safe to unlink and free all
* descriptors, even those that appear committed to a network op (eg MD
- * with non-zero pending count) */
-
+ * with non-zero pending count)
+ */
lnet_fail_nid(LNET_NID_ANY, 0);
- LASSERT(the_lnet.ln_refcount == 0);
+ LASSERT(!the_lnet.ln_refcount);
LASSERT(list_empty(&the_lnet.ln_test_peers));
LASSERT(list_empty(&the_lnet.ln_nis));
LASSERT(list_empty(&the_lnet.ln_nis_cpt));
@@ -617,12 +631,12 @@ lnet_unprepare(void)
lnet_portals_destroy();
- if (the_lnet.ln_md_containers != NULL) {
+ if (the_lnet.ln_md_containers) {
lnet_res_containers_destroy(the_lnet.ln_md_containers);
the_lnet.ln_md_containers = NULL;
}
- if (the_lnet.ln_me_containers != NULL) {
+ if (the_lnet.ln_me_containers) {
lnet_res_containers_destroy(the_lnet.ln_me_containers);
the_lnet.ln_me_containers = NULL;
}
@@ -631,9 +645,9 @@ lnet_unprepare(void)
lnet_msg_containers_destroy();
lnet_peer_tables_destroy();
- lnet_rtrpools_free();
+ lnet_rtrpools_free(0);
- if (the_lnet.ln_counters != NULL) {
+ if (the_lnet.ln_counters) {
cfs_percpt_free(the_lnet.ln_counters);
the_lnet.ln_counters = NULL;
}
@@ -709,7 +723,7 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid)
if (LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid))
continue;
- LASSERT(ni->ni_cpts != NULL);
+ LASSERT(ni->ni_cpts);
return ni->ni_cpts[lnet_nid_cpt_hash
(nid, ni->ni_ncpts)];
}
@@ -747,12 +761,12 @@ lnet_islocalnet(__u32 net)
cpt = lnet_net_lock_current();
ni = lnet_net2ni_locked(net, cpt);
- if (ni != NULL)
+ if (ni)
lnet_ni_decref_locked(ni, cpt);
lnet_net_unlock(cpt);
- return ni != NULL;
+ return !!ni;
}
lnet_ni_t *
@@ -783,11 +797,11 @@ lnet_islocalnid(lnet_nid_t nid)
cpt = lnet_net_lock_current();
ni = lnet_nid2ni_locked(nid, cpt);
- if (ni != NULL)
+ if (ni)
lnet_ni_decref_locked(ni, cpt);
lnet_net_unlock(cpt);
- return ni != NULL;
+ return !!ni;
}
int
@@ -803,7 +817,7 @@ lnet_count_acceptor_nis(void)
list_for_each(tmp, &the_lnet.ln_nis) {
ni = list_entry(tmp, lnet_ni_t, ni_list);
- if (ni->ni_lnd->lnd_accept != NULL)
+ if (ni->ni_lnd->lnd_accept)
count++;
}
@@ -812,90 +826,280 @@ lnet_count_acceptor_nis(void)
return count;
}
-static int
-lnet_ni_tq_credits(lnet_ni_t *ni)
+static lnet_ping_info_t *
+lnet_ping_info_create(int num_ni)
{
- int credits;
+ lnet_ping_info_t *ping_info;
+ unsigned int infosz;
- LASSERT(ni->ni_ncpts >= 1);
+ infosz = offsetof(lnet_ping_info_t, pi_ni[num_ni]);
+ LIBCFS_ALLOC(ping_info, infosz);
+ if (!ping_info) {
+ CERROR("Can't allocate ping info[%d]\n", num_ni);
+ return NULL;
+ }
- if (ni->ni_ncpts == 1)
- return ni->ni_maxtxcredits;
+ ping_info->pi_nnis = num_ni;
+ ping_info->pi_pid = the_lnet.ln_pid;
+ ping_info->pi_magic = LNET_PROTO_PING_MAGIC;
+ ping_info->pi_features = LNET_PING_FEAT_NI_STATUS;
- credits = ni->ni_maxtxcredits / ni->ni_ncpts;
- credits = max(credits, 8 * ni->ni_peertxcredits);
- credits = min(credits, ni->ni_maxtxcredits);
+ return ping_info;
+}
- return credits;
+static inline int
+lnet_get_ni_count(void)
+{
+ struct lnet_ni *ni;
+ int count = 0;
+
+ lnet_net_lock(0);
+
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list)
+ count++;
+
+ lnet_net_unlock(0);
+
+ return count;
+}
+
+static inline void
+lnet_ping_info_free(lnet_ping_info_t *pinfo)
+{
+ LIBCFS_FREE(pinfo,
+ offsetof(lnet_ping_info_t,
+ pi_ni[pinfo->pi_nnis]));
}
static void
-lnet_shutdown_lndnis(void)
+lnet_ping_info_destroy(void)
{
- int i;
- int islo;
- lnet_ni_t *ni;
+ struct lnet_ni *ni;
- /* NB called holding the global mutex */
+ lnet_net_lock(LNET_LOCK_EX);
- /* All quiet on the API front */
- LASSERT(!the_lnet.ln_shutdown);
- LASSERT(the_lnet.ln_refcount == 0);
- LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ lnet_ni_lock(ni);
+ ni->ni_status = NULL;
+ lnet_ni_unlock(ni);
+ }
- lnet_net_lock(LNET_LOCK_EX);
- the_lnet.ln_shutdown = 1; /* flag shutdown */
+ lnet_ping_info_free(the_lnet.ln_ping_info);
+ the_lnet.ln_ping_info = NULL;
- /* Unlink NIs from the global table */
- while (!list_empty(&the_lnet.ln_nis)) {
- ni = list_entry(the_lnet.ln_nis.next,
- lnet_ni_t, ni_list);
- /* move it to zombie list and nobody can find it anymore */
- list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
- lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
-
- if (!list_empty(&ni->ni_cptlist)) {
- list_del_init(&ni->ni_cptlist);
- lnet_ni_decref_locked(ni, 0);
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static void
+lnet_ping_event_handler(lnet_event_t *event)
+{
+ lnet_ping_info_t *pinfo = event->md.user_ptr;
+
+ if (event->unlinked)
+ pinfo->pi_features = LNET_PING_FEAT_INVAL;
+}
+
+static int
+lnet_ping_info_setup(lnet_ping_info_t **ppinfo, lnet_handle_md_t *md_handle,
+ int ni_count, bool set_eq)
+{
+ lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
+ lnet_handle_me_t me_handle;
+ lnet_md_t md = { NULL };
+ int rc, rc2;
+
+ if (set_eq) {
+ rc = LNetEQAlloc(0, lnet_ping_event_handler,
+ &the_lnet.ln_ping_target_eq);
+ if (rc) {
+ CERROR("Can't allocate ping EQ: %d\n", rc);
+ return rc;
}
}
- /* Drop the cached eqwait NI. */
- if (the_lnet.ln_eq_waitni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_eq_waitni, 0);
- the_lnet.ln_eq_waitni = NULL;
+ *ppinfo = lnet_ping_info_create(ni_count);
+ if (!*ppinfo) {
+ rc = -ENOMEM;
+ goto failed_0;
}
- /* Drop the cached loopback NI. */
- if (the_lnet.ln_loni != NULL) {
- lnet_ni_decref_locked(the_lnet.ln_loni, 0);
- the_lnet.ln_loni = NULL;
+ rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
+ LNET_PROTO_PING_MATCHBITS, 0,
+ LNET_UNLINK, LNET_INS_AFTER,
+ &me_handle);
+ if (rc) {
+ CERROR("Can't create ping ME: %d\n", rc);
+ goto failed_1;
}
- lnet_net_unlock(LNET_LOCK_EX);
+ /* initialize md content */
+ md.start = *ppinfo;
+ md.length = offsetof(lnet_ping_info_t,
+ pi_ni[(*ppinfo)->pi_nnis]);
+ md.threshold = LNET_MD_THRESH_INF;
+ md.max_size = 0;
+ md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
+ LNET_MD_MANAGE_REMOTE;
+ md.user_ptr = NULL;
+ md.eq_handle = the_lnet.ln_ping_target_eq;
+ md.user_ptr = *ppinfo;
- /* Clear lazy portals and drop delayed messages which hold refs
- * on their lnet_msg_t::msg_rxpeer */
- for (i = 0; i < the_lnet.ln_nportals; i++)
- LNetClearLazyPortal(i);
+ rc = LNetMDAttach(me_handle, md, LNET_RETAIN, md_handle);
+ if (rc) {
+ CERROR("Can't attach ping MD: %d\n", rc);
+ goto failed_2;
+ }
+
+ return 0;
+
+failed_2:
+ rc2 = LNetMEUnlink(me_handle);
+ LASSERT(!rc2);
+failed_1:
+ lnet_ping_info_free(*ppinfo);
+ *ppinfo = NULL;
+failed_0:
+ if (set_eq)
+ LNetEQFree(the_lnet.ln_ping_target_eq);
+ return rc;
+}
+
+static void
+lnet_ping_md_unlink(lnet_ping_info_t *pinfo, lnet_handle_md_t *md_handle)
+{
+ sigset_t blocked = cfs_block_allsigs();
+
+ LNetMDUnlink(*md_handle);
+ LNetInvalidateHandle(md_handle);
+
+ /* NB md could be busy; this just starts the unlink */
+ while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
+ CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
+
+ cfs_restore_sigs(blocked);
+}
- /* Clear the peer table and wait for all peers to go (they hold refs on
- * their NIs) */
- lnet_peer_tables_cleanup();
+static void
+lnet_ping_info_install_locked(lnet_ping_info_t *ping_info)
+{
+ lnet_ni_status_t *ns;
+ lnet_ni_t *ni;
+ int i = 0;
+
+ list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+ LASSERT(i < ping_info->pi_nnis);
+ ns = &ping_info->pi_ni[i];
+
+ ns->ns_nid = ni->ni_nid;
+
+ lnet_ni_lock(ni);
+ ns->ns_status = (ni->ni_status) ?
+ ni->ni_status->ns_status : LNET_NI_STATUS_UP;
+ ni->ni_status = ns;
+ lnet_ni_unlock(ni);
+
+ i++;
+ }
+}
+
+static void
+lnet_ping_target_update(lnet_ping_info_t *pinfo, lnet_handle_md_t md_handle)
+{
+ lnet_ping_info_t *old_pinfo = NULL;
+ lnet_handle_md_t old_md;
+
+ /* switch the NIs to point to the new ping info created */
lnet_net_lock(LNET_LOCK_EX);
- /* Now wait for the NI's I just nuked to show up on ln_zombie_nis
- * and shut them down in guaranteed thread context */
+
+ if (!the_lnet.ln_routing)
+ pinfo->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+ lnet_ping_info_install_locked(pinfo);
+
+ if (the_lnet.ln_ping_info) {
+ old_pinfo = the_lnet.ln_ping_info;
+ old_md = the_lnet.ln_ping_target_md;
+ }
+ the_lnet.ln_ping_target_md = md_handle;
+ the_lnet.ln_ping_info = pinfo;
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ if (old_pinfo) {
+ /* unlink the old ping info */
+ lnet_ping_md_unlink(old_pinfo, &old_md);
+ lnet_ping_info_free(old_pinfo);
+ }
+}
+
+static void
+lnet_ping_target_fini(void)
+{
+ int rc;
+
+ lnet_ping_md_unlink(the_lnet.ln_ping_info,
+ &the_lnet.ln_ping_target_md);
+
+ rc = LNetEQFree(the_lnet.ln_ping_target_eq);
+ LASSERT(!rc);
+
+ lnet_ping_info_destroy();
+}
+
+static int
+lnet_ni_tq_credits(lnet_ni_t *ni)
+{
+ int credits;
+
+ LASSERT(ni->ni_ncpts >= 1);
+
+ if (ni->ni_ncpts == 1)
+ return ni->ni_maxtxcredits;
+
+ credits = ni->ni_maxtxcredits / ni->ni_ncpts;
+ credits = max(credits, 8 * ni->ni_peertxcredits);
+ credits = min(credits, ni->ni_maxtxcredits);
+
+ return credits;
+}
+
+static void
+lnet_ni_unlink_locked(lnet_ni_t *ni)
+{
+ if (!list_empty(&ni->ni_cptlist)) {
+ list_del_init(&ni->ni_cptlist);
+ lnet_ni_decref_locked(ni, 0);
+ }
+
+ /* move it to zombie list and nobody can find it anymore */
+ LASSERT(!list_empty(&ni->ni_list));
+ list_move(&ni->ni_list, &the_lnet.ln_nis_zombie);
+ lnet_ni_decref_locked(ni, 0); /* drop ln_nis' ref */
+}
+
+static void
+lnet_clear_zombies_nis_locked(void)
+{
+ int i;
+ int islo;
+ lnet_ni_t *ni;
+ lnet_ni_t *temp;
+
+ /*
+ * Now wait for the NI's I just nuked to show up on ln_zombie_nis
+ * and shut them down in guaranteed thread context
+ */
i = 2;
- while (!list_empty(&the_lnet.ln_nis_zombie)) {
+ list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis_zombie, ni_list) {
int *ref;
int j;
- ni = list_entry(the_lnet.ln_nis_zombie.next,
- lnet_ni_t, ni_list);
list_del_init(&ni->ni_list);
cfs_percpt_for_each(ref, j, ni->ni_refs) {
- if (*ref == 0)
+ if (!*ref)
continue;
/* still busy, add it back to zombie list */
list_add(&ni->ni_list, &the_lnet.ln_nis_zombie);
@@ -921,11 +1125,12 @@ lnet_shutdown_lndnis(void)
islo = ni->ni_lnd->lnd_type == LOLND;
LASSERT(!in_interrupt());
- (ni->ni_lnd->lnd_shutdown)(ni);
-
- /* can't deref lnd anymore now; it might have unregistered
- * itself... */
+ ni->ni_lnd->lnd_shutdown(ni);
+ /*
+ * can't deref lnd anymore now; it might have unregistered
+ * itself...
+ */
if (!islo)
CDEBUG(D_LNI, "Removed LNI %s\n",
libcfs_nid2str(ni->ni_nid));
@@ -935,176 +1140,263 @@ lnet_shutdown_lndnis(void)
lnet_net_lock(LNET_LOCK_EX);
}
+}
+
+static void
+lnet_shutdown_lndnis(void)
+{
+ lnet_ni_t *ni;
+ lnet_ni_t *temp;
+ int i;
+
+ /* NB called holding the global mutex */
+
+ /* All quiet on the API front */
+ LASSERT(!the_lnet.ln_shutdown);
+ LASSERT(!the_lnet.ln_refcount);
+ LASSERT(list_empty(&the_lnet.ln_nis_zombie));
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_shutdown = 1; /* flag shutdown */
+
+ /* Unlink NIs from the global table */
+ list_for_each_entry_safe(ni, temp, &the_lnet.ln_nis, ni_list) {
+ lnet_ni_unlink_locked(ni);
+ }
+ /* Drop the cached loopback NI. */
+ if (the_lnet.ln_loni) {
+ lnet_ni_decref_locked(the_lnet.ln_loni, 0);
+ the_lnet.ln_loni = NULL;
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /*
+ * Clear lazy portals and drop delayed messages which hold refs
+ * on their lnet_msg_t::msg_rxpeer
+ */
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ LNetClearLazyPortal(i);
+
+ /*
+ * Clear the peer table and wait for all peers to go (they hold refs on
+ * their NIs)
+ */
+ lnet_peer_tables_cleanup(NULL);
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ lnet_clear_zombies_nis_locked();
the_lnet.ln_shutdown = 0;
lnet_net_unlock(LNET_LOCK_EX);
+}
- if (the_lnet.ln_network_tokens != NULL) {
- LIBCFS_FREE(the_lnet.ln_network_tokens,
- the_lnet.ln_network_tokens_nob);
- the_lnet.ln_network_tokens = NULL;
- }
+/* shutdown down the NI and release refcount */
+static void
+lnet_shutdown_lndni(struct lnet_ni *ni)
+{
+ int i;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_ni_unlink_locked(ni);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ /* clear messages for this NI on the lazy portal */
+ for (i = 0; i < the_lnet.ln_nportals; i++)
+ lnet_clear_lazy_portal(ni, i, "Shutting down NI");
+
+ /* Do peer table cleanup for this ni */
+ lnet_peer_tables_cleanup(ni);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ lnet_clear_zombies_nis_locked();
+ lnet_net_unlock(LNET_LOCK_EX);
}
static int
-lnet_startup_lndnis(void)
+lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
+ __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
{
+ int rc = -EINVAL;
+ int lnd_type;
lnd_t *lnd;
- struct lnet_ni *ni;
struct lnet_tx_queue *tq;
- struct list_head nilist;
int i;
- int rc = 0;
- __u32 lnd_type;
- int nicount = 0;
- char *nets = lnet_get_networks();
- INIT_LIST_HEAD(&nilist);
+ lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
- if (nets == NULL)
- goto failed;
+ LASSERT(libcfs_isknown_lnd(lnd_type));
- rc = lnet_parse_networks(&nilist, nets);
- if (rc != 0)
- goto failed;
+ if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
+ lnd_type == IIBLND || lnd_type == VIBLND) {
+ CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
+ goto failed0;
+ }
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
- lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
+ /* Make sure this new NI is unique. */
+ lnet_net_lock(LNET_LOCK_EX);
+ rc = lnet_net_unique(LNET_NIDNET(ni->ni_nid), &the_lnet.ln_nis);
+ lnet_net_unlock(LNET_LOCK_EX);
+ if (!rc) {
+ if (lnd_type == LOLND) {
+ lnet_ni_free(ni);
+ return 0;
+ }
- LASSERT(libcfs_isknown_lnd(lnd_type));
+ CERROR("Net %s is not unique\n",
+ libcfs_net2str(LNET_NIDNET(ni->ni_nid)));
+ rc = -EEXIST;
+ goto failed0;
+ }
- if (lnd_type == CIBLND ||
- lnd_type == OPENIBLND ||
- lnd_type == IIBLND ||
- lnd_type == VIBLND) {
- CERROR("LND %s obsoleted\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
- }
+ mutex_lock(&the_lnet.ln_lnd_mutex);
+ lnd = lnet_find_lnd_by_type(lnd_type);
+ if (!lnd) {
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
+ rc = request_module("%s", libcfs_lnd2modname(lnd_type));
mutex_lock(&the_lnet.ln_lnd_mutex);
- lnd = lnet_find_lnd_by_type(lnd_type);
- if (lnd == NULL) {
+ lnd = lnet_find_lnd_by_type(lnd_type);
+ if (!lnd) {
mutex_unlock(&the_lnet.ln_lnd_mutex);
- rc = request_module("%s",
- libcfs_lnd2modname(lnd_type));
- mutex_lock(&the_lnet.ln_lnd_mutex);
-
- lnd = lnet_find_lnd_by_type(lnd_type);
- if (lnd == NULL) {
- mutex_unlock(&the_lnet.ln_lnd_mutex);
- CERROR("Can't load LND %s, module %s, rc=%d\n",
- libcfs_lnd2str(lnd_type),
- libcfs_lnd2modname(lnd_type), rc);
- goto failed;
- }
+ CERROR("Can't load LND %s, module %s, rc=%d\n",
+ libcfs_lnd2str(lnd_type),
+ libcfs_lnd2modname(lnd_type), rc);
+ rc = -EINVAL;
+ goto failed0;
}
+ }
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount++;
- lnet_net_unlock(LNET_LOCK_EX);
-
- ni->ni_lnd = lnd;
-
- rc = (lnd->lnd_startup)(ni);
-
- mutex_unlock(&the_lnet.ln_lnd_mutex);
+ lnet_net_lock(LNET_LOCK_EX);
+ lnd->lnd_refcount++;
+ lnet_net_unlock(LNET_LOCK_EX);
- if (rc != 0) {
- LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
- rc, libcfs_lnd2str(lnd->lnd_type));
- lnet_net_lock(LNET_LOCK_EX);
- lnd->lnd_refcount--;
- lnet_net_unlock(LNET_LOCK_EX);
- goto failed;
- }
+ ni->ni_lnd = lnd;
- LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query != NULL);
+ rc = lnd->lnd_startup(ni);
- list_del(&ni->ni_list);
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
+ if (rc) {
+ LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
+ rc, libcfs_lnd2str(lnd->lnd_type));
lnet_net_lock(LNET_LOCK_EX);
- /* refcount for ln_nis */
- lnet_ni_addref_locked(ni, 0);
- list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
- if (ni->ni_cpts != NULL) {
- list_add_tail(&ni->ni_cptlist,
- &the_lnet.ln_nis_cpt);
- lnet_ni_addref_locked(ni, 0);
- }
-
+ lnd->lnd_refcount--;
lnet_net_unlock(LNET_LOCK_EX);
+ goto failed0;
+ }
- if (lnd->lnd_type == LOLND) {
- lnet_ni_addref(ni);
- LASSERT(the_lnet.ln_loni == NULL);
- the_lnet.ln_loni = ni;
- continue;
- }
+ /*
+ * If given some LND tunable parameters, parse those now to
+ * override the values in the NI structure.
+ */
+ if (peer_buf_cr >= 0)
+ ni->ni_peerrtrcredits = peer_buf_cr;
+ if (peer_timeout >= 0)
+ ni->ni_peertimeout = peer_timeout;
+ /*
+ * TODO
+ * Note: For now, don't allow the user to change
+ * peertxcredits as this number is used in the
+ * IB LND to control queue depth.
+ * if (peer_cr != -1)
+ * ni->ni_peertxcredits = peer_cr;
+ */
+ if (credits >= 0)
+ ni->ni_maxtxcredits = credits;
- if (ni->ni_peertxcredits == 0 ||
- ni->ni_maxtxcredits == 0) {
- LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
- libcfs_lnd2str(lnd->lnd_type),
- ni->ni_peertxcredits == 0 ?
- "" : "per-peer ");
- goto failed;
- }
+ LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
- cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- tq->tq_credits_min =
- tq->tq_credits_max =
- tq->tq_credits = lnet_ni_tq_credits(ni);
- }
+ lnet_net_lock(LNET_LOCK_EX);
+ /* refcount for ln_nis */
+ lnet_ni_addref_locked(ni, 0);
+ list_add_tail(&ni->ni_list, &the_lnet.ln_nis);
+ if (ni->ni_cpts) {
+ lnet_ni_addref_locked(ni, 0);
+ list_add_tail(&ni->ni_cptlist, &the_lnet.ln_nis_cpt);
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
- CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
- libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
- lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
- ni->ni_peerrtrcredits, ni->ni_peertimeout);
+ if (lnd->lnd_type == LOLND) {
+ lnet_ni_addref(ni);
+ LASSERT(!the_lnet.ln_loni);
+ the_lnet.ln_loni = ni;
+ return 0;
+ }
- nicount++;
+ if (!ni->ni_peertxcredits || !ni->ni_maxtxcredits) {
+ LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
+ libcfs_lnd2str(lnd->lnd_type),
+ !ni->ni_peertxcredits ?
+ "" : "per-peer ");
+ /*
+ * shutdown the NI since if we get here then it must've already
+ * been started
+ */
+ lnet_shutdown_lndni(ni);
+ return -EINVAL;
}
- if (the_lnet.ln_eq_waitni != NULL && nicount > 1) {
- lnd_type = the_lnet.ln_eq_waitni->ni_lnd->lnd_type;
- LCONSOLE_ERROR_MSG(0x109, "LND %s can only run single-network\n",
- libcfs_lnd2str(lnd_type));
- goto failed;
+ cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
+ tq->tq_credits_min =
+ tq->tq_credits_max =
+ tq->tq_credits = lnet_ni_tq_credits(ni);
}
+ CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
+ libcfs_nid2str(ni->ni_nid), ni->ni_peertxcredits,
+ lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
+ ni->ni_peerrtrcredits, ni->ni_peertimeout);
+
return 0;
+failed0:
+ lnet_ni_free(ni);
+ return rc;
+}
- failed:
- lnet_shutdown_lndnis();
+static int
+lnet_startup_lndnis(struct list_head *nilist)
+{
+ struct lnet_ni *ni;
+ int rc;
+ int ni_count = 0;
- while (!list_empty(&nilist)) {
- ni = list_entry(nilist.next, lnet_ni_t, ni_list);
+ while (!list_empty(nilist)) {
+ ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
- lnet_ni_free(ni);
+ rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
+
+ if (rc < 0)
+ goto failed;
+
+ ni_count++;
}
- return -ENETDOWN;
+ return ni_count;
+failed:
+ lnet_shutdown_lndnis();
+
+ return rc;
}
/**
* Initialize LNet library.
*
- * Only userspace program needs to call this function - it's automatically
- * called in the kernel at module loading time. Caller has to call lnet_fini()
- * after a call to lnet_init(), if and only if the latter returned 0. It must
- * be called exactly once.
+ * Automatically called at module loading time. Caller has to call
+ * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
+ * latter returned 0. It must be called exactly once.
*
- * \return 0 on success, and -ve on failures.
+ * \retval 0 on success
+ * \retval -ve on failures.
*/
-int
-lnet_init(void)
+int lnet_lib_init(void)
{
int rc;
lnet_assert_wire_constants();
- LASSERT(!the_lnet.ln_init);
memset(&the_lnet, 0, sizeof(the_lnet));
@@ -1117,28 +1409,29 @@ lnet_init(void)
/* we are under risk of consuming all lh_cookie */
CERROR("Can't have %d CPTs for LNet (max allowed is %d), please change setting of CPT-table and retry\n",
the_lnet.ln_cpt_number, LNET_CPT_MAX);
- return -1;
+ return -E2BIG;
}
while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
the_lnet.ln_cpt_bits++;
rc = lnet_create_locks();
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create LNet global locks: %d\n", rc);
- return -1;
+ return rc;
}
the_lnet.ln_refcount = 0;
- the_lnet.ln_init = 1;
LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
INIT_LIST_HEAD(&the_lnet.ln_lnds);
INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
- /* The hash table size is the number of bits it takes to express the set
+ /*
+ * The hash table size is the number of bits it takes to express the set
* ln_num_routes, minus 1 (better to under estimate than over so we
- * don't waste memory). */
+ * don't waste memory).
+ */
if (rnet_htable_size <= 0)
rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
@@ -1146,9 +1439,11 @@ lnet_init(void)
the_lnet.ln_remote_nets_hbits = max_t(int, 1,
order_base_2(rnet_htable_size) - 1);
- /* All LNDs apart from the LOLND are in separate modules. They
+ /*
+ * All LNDs apart from the LOLND are in separate modules. They
* register themselves when their module loads, and unregister
- * themselves when their module is unloaded. */
+ * themselves when their module is unloaded.
+ */
lnet_register_lnd(&the_lolnd);
return 0;
}
@@ -1156,30 +1451,22 @@ lnet_init(void)
/**
* Finalize LNet library.
*
- * Only userspace program needs to call this function. It can be called
- * at most once.
- *
- * \pre lnet_init() called with success.
+ * \pre lnet_lib_init() called with success.
* \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
*/
-void
-lnet_fini(void)
+void lnet_lib_exit(void)
{
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount == 0);
+ LASSERT(!the_lnet.ln_refcount);
while (!list_empty(&the_lnet.ln_lnds))
lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
- lnd_t, lnd_list));
+ lnd_t, lnd_list));
lnet_destroy_locks();
-
- the_lnet.ln_init = 0;
}
/**
* Set LNet PID and start LNet interfaces, routing, and forwarding.
*
- * Userspace program should call this after a successful call to lnet_init().
* Users must call this function at least once before any other functions.
* For each successful call there must be a corresponding call to
* LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
@@ -1197,77 +1484,114 @@ LNetNIInit(lnet_pid_t requested_pid)
{
int im_a_router = 0;
int rc;
+ int ni_count;
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ struct list_head net_head;
+
+ INIT_LIST_HEAD(&net_head);
mutex_lock(&the_lnet.ln_api_mutex);
- LASSERT(the_lnet.ln_init);
CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
if (the_lnet.ln_refcount > 0) {
rc = the_lnet.ln_refcount++;
- goto out;
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
- if (requested_pid == LNET_PID_ANY) {
- /* Don't instantiate LNET just for me */
- rc = -ENETDOWN;
- goto failed0;
+ rc = lnet_prepare(requested_pid);
+ if (rc) {
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
}
- rc = lnet_prepare(requested_pid);
- if (rc != 0)
- goto failed0;
+ /* Add in the loopback network */
+ if (!lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, &net_head)) {
+ rc = -ENOMEM;
+ goto err_empty_list;
+ }
- rc = lnet_startup_lndnis();
- if (rc != 0)
- goto failed1;
+ /*
+ * If LNet is being initialized via DLC it is possible
+ * that the user requests not to load module parameters (ones which
+ * are supported by DLC) on initialization. Therefore, make sure not
+ * to load networks, routes and forwarding from module parameters
+ * in this case. On cleanup in case of failure only clean up
+ * routes if it has been loaded
+ */
+ if (!the_lnet.ln_nis_from_mod_params) {
+ rc = lnet_parse_networks(&net_head, lnet_get_networks());
+ if (rc < 0)
+ goto err_empty_list;
+ }
+
+ ni_count = lnet_startup_lndnis(&net_head);
+ if (ni_count < 0) {
+ rc = ni_count;
+ goto err_empty_list;
+ }
- rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
- if (rc != 0)
- goto failed2;
+ if (!the_lnet.ln_nis_from_mod_params) {
+ rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
+ if (rc)
+ goto err_shutdown_lndnis;
- rc = lnet_check_routes();
- if (rc != 0)
- goto failed2;
+ rc = lnet_check_routes();
+ if (rc)
+ goto err_destory_routes;
- rc = lnet_rtrpools_alloc(im_a_router);
- if (rc != 0)
- goto failed2;
+ rc = lnet_rtrpools_alloc(im_a_router);
+ if (rc)
+ goto err_destory_routes;
+ }
rc = lnet_acceptor_start();
- if (rc != 0)
- goto failed2;
+ if (rc)
+ goto err_destory_routes;
the_lnet.ln_refcount = 1;
/* Now I may use my own API functions... */
- /* NB router checker needs the_lnet.ln_ping_info in
- * lnet_router_checker -> lnet_update_ni_status_locked */
- rc = lnet_ping_target_init();
- if (rc != 0)
- goto failed3;
+ rc = lnet_ping_info_setup(&pinfo, &md_handle, ni_count, true);
+ if (rc)
+ goto err_acceptor_stop;
+
+ lnet_ping_target_update(pinfo, md_handle);
rc = lnet_router_checker_start();
- if (rc != 0)
- goto failed4;
+ if (rc)
+ goto err_stop_ping;
+ lnet_fault_init();
lnet_router_debugfs_init();
- goto out;
- failed4:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+
+err_stop_ping:
lnet_ping_target_fini();
- failed3:
+err_acceptor_stop:
the_lnet.ln_refcount = 0;
lnet_acceptor_stop();
- failed2:
- lnet_destroy_routes();
+err_destory_routes:
+ if (!the_lnet.ln_nis_from_mod_params)
+ lnet_destroy_routes();
+err_shutdown_lndnis:
lnet_shutdown_lndnis();
- failed1:
+err_empty_list:
lnet_unprepare();
- failed0:
LASSERT(rc < 0);
- out:
mutex_unlock(&the_lnet.ln_api_mutex);
+ while (!list_empty(&net_head)) {
+ struct lnet_ni *ni;
+
+ ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+ list_del_init(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
return rc;
}
EXPORT_SYMBOL(LNetNIInit);
@@ -1286,7 +1610,6 @@ LNetNIFini(void)
{
mutex_lock(&the_lnet.ln_api_mutex);
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (the_lnet.ln_refcount != 1) {
@@ -1294,6 +1617,7 @@ LNetNIFini(void)
} else {
LASSERT(!the_lnet.ln_niinit_self);
+ lnet_fault_fini();
lnet_router_debugfs_fini();
lnet_router_checker_stop();
lnet_ping_target_fini();
@@ -1313,30 +1637,233 @@ LNetNIFini(void)
EXPORT_SYMBOL(LNetNIFini);
/**
- * This is an ugly hack to export IOC_LIBCFS_DEBUG_PEER and
- * IOC_LIBCFS_PORTALS_COMPATIBILITY commands to users, by tweaking the LNet
- * internal ioctl handler.
+ * Grabs the ni data from the ni structure and fills the out
+ * parameters
*
- * IOC_LIBCFS_PORTALS_COMPATIBILITY is now deprecated, don't use it.
- *
- * \param cmd IOC_LIBCFS_DEBUG_PEER to print debugging data about a peer.
- * The data will be printed to system console. Don't use it excessively.
- * \param arg A pointer to lnet_process_id_t, process ID of the peer.
+ * \param[in] ni network interface structure
+ * \param[out] cpt_count the number of cpts the ni is on
+ * \param[out] nid Network Interface ID
+ * \param[out] peer_timeout NI peer timeout
+ * \param[out] peer_tx_crdits NI peer transmit credits
+ * \param[out] peer_rtr_credits NI peer router credits
+ * \param[out] max_tx_credits NI max transmit credit
+ * \param[out] net_config Network configuration
+ */
+static void
+lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
+ int *peer_timeout, int *peer_tx_credits,
+ int *peer_rtr_credits, int *max_tx_credits,
+ struct lnet_ioctl_net_config *net_config)
+{
+ int i;
+
+ if (!ni)
+ return;
+
+ if (!net_config)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
+ ARRAY_SIZE(net_config->ni_interfaces));
+
+ for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
+ if (!ni->ni_interfaces[i])
+ break;
+
+ strncpy(net_config->ni_interfaces[i],
+ ni->ni_interfaces[i],
+ sizeof(net_config->ni_interfaces[i]));
+ }
+
+ *nid = ni->ni_nid;
+ *peer_timeout = ni->ni_peertimeout;
+ *peer_tx_credits = ni->ni_peertxcredits;
+ *peer_rtr_credits = ni->ni_peerrtrcredits;
+ *max_tx_credits = ni->ni_maxtxcredits;
+
+ net_config->ni_status = ni->ni_status->ns_status;
+
+ if (ni->ni_cpts) {
+ int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
+
+ for (i = 0; i < num_cpts; i++)
+ net_config->ni_cpts[i] = ni->ni_cpts[i];
+
+ *cpt_count = num_cpts;
+ }
+}
+
+int
+lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
+ int *peer_tx_credits, int *peer_rtr_credits,
+ int *max_tx_credits,
+ struct lnet_ioctl_net_config *net_config)
+{
+ struct lnet_ni *ni;
+ struct list_head *tmp;
+ int cpt, i = 0;
+ int rc = -ENOENT;
+
+ cpt = lnet_net_lock_current();
+
+ list_for_each(tmp, &the_lnet.ln_nis) {
+ if (i++ != idx)
+ continue;
+
+ ni = list_entry(tmp, lnet_ni_t, ni_list);
+ lnet_ni_lock(ni);
+ lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
+ peer_tx_credits, peer_rtr_credits,
+ max_tx_credits, net_config);
+ lnet_ni_unlock(ni);
+ rc = 0;
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+ return rc;
+}
+
+int
+lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
+ __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
+ __s32 credits)
+{
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ struct lnet_ni *ni;
+ struct list_head net_head;
+ lnet_remotenet_t *rnet;
+ int rc;
+
+ INIT_LIST_HEAD(&net_head);
+
+ /* Create a ni structure for the network string */
+ rc = lnet_parse_networks(&net_head, nets);
+ if (rc <= 0)
+ return !rc ? -EINVAL : rc;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+
+ if (rc > 1) {
+ rc = -EINVAL; /* only add one interface per call */
+ goto failed0;
+ }
+
+ ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ rnet = lnet_find_net_locked(LNET_NIDNET(ni->ni_nid));
+ lnet_net_unlock(LNET_LOCK_EX);
+ /*
+ * make sure that the net added doesn't invalidate the current
+ * configuration LNet is keeping
+ */
+ if (rnet) {
+ CERROR("Adding net %s will invalidate routing configuration\n",
+ nets);
+ rc = -EUSERS;
+ goto failed0;
+ }
+
+ rc = lnet_ping_info_setup(&pinfo, &md_handle, 1 + lnet_get_ni_count(),
+ false);
+ if (rc)
+ goto failed0;
+
+ list_del_init(&ni->ni_list);
+
+ rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
+ peer_buf_cr, credits);
+ if (rc)
+ goto failed1;
+
+ if (ni->ni_lnd->lnd_accept) {
+ rc = lnet_acceptor_start();
+ if (rc < 0) {
+ /* shutdown the ni that we just started */
+ CERROR("Failed to start up acceptor thread\n");
+ lnet_shutdown_lndni(ni);
+ goto failed1;
+ }
+ }
+
+ lnet_ping_target_update(pinfo, md_handle);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return 0;
+
+failed1:
+ lnet_ping_md_unlink(pinfo, &md_handle);
+ lnet_ping_info_free(pinfo);
+failed0:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ while (!list_empty(&net_head)) {
+ ni = list_entry(net_head.next, struct lnet_ni, ni_list);
+ list_del_init(&ni->ni_list);
+ lnet_ni_free(ni);
+ }
+ return rc;
+}
+
+int
+lnet_dyn_del_ni(__u32 net)
+{
+ lnet_ni_t *ni;
+ lnet_ping_info_t *pinfo;
+ lnet_handle_md_t md_handle;
+ int rc;
+
+ /* don't allow userspace to shutdown the LOLND */
+ if (LNET_NETTYP(net) == LOLND)
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ /* create and link a new ping info, before removing the old one */
+ rc = lnet_ping_info_setup(&pinfo, &md_handle,
+ lnet_get_ni_count() - 1, false);
+ if (rc)
+ goto out;
+
+ ni = lnet_net2ni(net);
+ if (!ni) {
+ rc = -EINVAL;
+ goto failed;
+ }
+
+ /* decrement the reference counter taken by lnet_net2ni() */
+ lnet_ni_decref_locked(ni, 0);
+
+ lnet_shutdown_lndni(ni);
+
+ if (!lnet_count_acceptor_nis())
+ lnet_acceptor_stop();
+
+ lnet_ping_target_update(pinfo, md_handle);
+ goto out;
+failed:
+ lnet_ping_md_unlink(pinfo, &md_handle);
+ lnet_ping_info_free(pinfo);
+out:
+ mutex_unlock(&the_lnet.ln_api_mutex);
+
+ return rc;
+}
+
+/**
+ * LNet ioctl handler.
*
- * \return Always return 0 when called by users directly (i.e., not via ioctl).
*/
int
LNetCtl(unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
+ struct lnet_ioctl_config_data *config;
lnet_process_id_t id = {0};
lnet_ni_t *ni;
int rc;
unsigned long secs_passed;
- LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
-
switch (cmd) {
case IOC_LIBCFS_GET_NI:
rc = LNetGetId(data->ioc_count, &id);
@@ -1347,26 +1874,149 @@ LNetCtl(unsigned int cmd, void *arg)
return lnet_fail_nid(data->ioc_nid, data->ioc_count);
case IOC_LIBCFS_ADD_ROUTE:
- rc = lnet_add_route(data->ioc_net, data->ioc_count,
- data->ioc_nid, data->ioc_priority);
- return (rc != 0) ? rc : lnet_check_routes();
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < sizeof(*config))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_add_route(config->cfg_net,
+ config->cfg_config_u.cfg_route.rtr_hop,
+ config->cfg_nid,
+ config->cfg_config_u.cfg_route.rtr_priority);
+ if (!rc) {
+ rc = lnet_check_routes();
+ if (rc)
+ lnet_del_route(config->cfg_net,
+ config->cfg_nid);
+ }
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
case IOC_LIBCFS_DEL_ROUTE:
- return lnet_del_route(data->ioc_net, data->ioc_nid);
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < sizeof(*config))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_del_route(config->cfg_net, config->cfg_nid);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
case IOC_LIBCFS_GET_ROUTE:
- return lnet_get_route(data->ioc_count,
- &data->ioc_net, &data->ioc_count,
- &data->ioc_nid, &data->ioc_flags,
- &data->ioc_priority);
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < sizeof(*config))
+ return -EINVAL;
+
+ return lnet_get_route(config->cfg_count,
+ &config->cfg_net,
+ &config->cfg_config_u.cfg_route.rtr_hop,
+ &config->cfg_nid,
+ &config->cfg_config_u.cfg_route.rtr_flags,
+ &config->cfg_config_u.cfg_route.rtr_priority);
+
+ case IOC_LIBCFS_GET_NET: {
+ struct lnet_ioctl_net_config *net_config;
+ size_t total = sizeof(*config) + sizeof(*net_config);
+
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < total)
+ return -EINVAL;
+
+ net_config = (struct lnet_ioctl_net_config *)
+ config->cfg_bulk;
+ if (!net_config)
+ return -EINVAL;
+
+ return lnet_get_net_config(config->cfg_count,
+ &config->cfg_ncpts,
+ &config->cfg_nid,
+ &config->cfg_config_u.cfg_net.net_peer_timeout,
+ &config->cfg_config_u.cfg_net.net_peer_tx_credits,
+ &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
+ &config->cfg_config_u.cfg_net.net_max_tx_credits,
+ net_config);
+ }
+
+ case IOC_LIBCFS_GET_LNET_STATS: {
+ struct lnet_ioctl_lnet_stats *lnet_stats = arg;
+
+ if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
+ return -EINVAL;
+
+ lnet_counters_get(&lnet_stats->st_cntrs);
+ return 0;
+ }
+
+ case IOC_LIBCFS_CONFIG_RTR:
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < sizeof(*config))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ if (config->cfg_config_u.cfg_buffers.buf_enable) {
+ rc = lnet_rtrpools_enable();
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+ }
+ lnet_rtrpools_disable();
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return 0;
+
+ case IOC_LIBCFS_ADD_BUF:
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < sizeof(*config))
+ return -EINVAL;
+
+ mutex_lock(&the_lnet.ln_api_mutex);
+ rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.buf_tiny,
+ config->cfg_config_u.cfg_buffers.buf_small,
+ config->cfg_config_u.cfg_buffers.buf_large);
+ mutex_unlock(&the_lnet.ln_api_mutex);
+ return rc;
+
+ case IOC_LIBCFS_GET_BUF: {
+ struct lnet_ioctl_pool_cfg *pool_cfg;
+ size_t total = sizeof(*config) + sizeof(*pool_cfg);
+
+ config = arg;
+
+ if (config->cfg_hdr.ioc_len < total)
+ return -EINVAL;
+
+ pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
+ return lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
+ }
+
+ case IOC_LIBCFS_GET_PEER_INFO: {
+ struct lnet_ioctl_peer *peer_info = arg;
+
+ if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
+ return -EINVAL;
+
+ return lnet_get_peer_info(peer_info->pr_count,
+ &peer_info->pr_nid,
+ peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_rtr_credits,
+ &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
+ }
+
case IOC_LIBCFS_NOTIFY_ROUTER:
secs_passed = (ktime_get_real_seconds() - data->ioc_u64[0]);
- return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
- jiffies - secs_passed * HZ);
+ secs_passed *= msecs_to_jiffies(MSEC_PER_SEC);
- case IOC_LIBCFS_PORTALS_COMPATIBILITY:
- /* This can be removed once lustre stops calling it */
- return 0;
+ return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
+ jiffies - secs_passed);
case IOC_LIBCFS_LNET_DIST:
rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
@@ -1382,46 +2032,26 @@ LNetCtl(unsigned int cmd, void *arg)
lnet_net_unlock(LNET_LOCK_EX);
return 0;
+ case IOC_LIBCFS_LNET_FAULT:
+ return lnet_fault_ctl(data->ioc_flags, data);
+
case IOC_LIBCFS_PING:
id.nid = data->ioc_nid;
id.pid = data->ioc_u32[0];
rc = lnet_ping(id, data->ioc_u32[1], /* timeout */
- (lnet_process_id_t *)data->ioc_pbuf1,
- data->ioc_plen1/sizeof(lnet_process_id_t));
+ data->ioc_pbuf1,
+ data->ioc_plen1 / sizeof(lnet_process_id_t));
if (rc < 0)
return rc;
data->ioc_count = rc;
return 0;
- case IOC_LIBCFS_DEBUG_PEER: {
- /* CAVEAT EMPTOR: this one designed for calling directly; not
- * via an ioctl */
- id = *((lnet_process_id_t *) arg);
-
- lnet_debug_peer(id.nid);
-
- ni = lnet_net2ni(LNET_NIDNET(id.nid));
- if (ni == NULL) {
- CDEBUG(D_WARNING, "No NI for %s\n", libcfs_id2str(id));
- } else {
- if (ni->ni_lnd->lnd_ctl == NULL) {
- CDEBUG(D_WARNING, "No ctl for %s\n",
- libcfs_id2str(id));
- } else {
- (void)ni->ni_lnd->lnd_ctl(ni, cmd, arg);
- }
-
- lnet_ni_decref(ni);
- }
- return 0;
- }
-
default:
ni = lnet_net2ni(data->ioc_net);
- if (ni == NULL)
+ if (!ni)
return -EINVAL;
- if (ni->ni_lnd->lnd_ctl == NULL)
+ if (!ni->ni_lnd->lnd_ctl)
rc = -EINVAL;
else
rc = ni->ni_lnd->lnd_ctl(ni, cmd, arg);
@@ -1433,6 +2063,12 @@ LNetCtl(unsigned int cmd, void *arg)
}
EXPORT_SYMBOL(LNetCtl);
+void LNetDebugPeer(lnet_process_id_t id)
+{
+ lnet_debug_peer(id.nid);
+}
+EXPORT_SYMBOL(LNetDebugPeer);
+
/**
* Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
* all interfaces share a same PID, as requested by LNetNIInit().
@@ -1452,16 +2088,12 @@ LNetGetId(unsigned int index, lnet_process_id_t *id)
int cpt;
int rc = -ENOENT;
- LASSERT(the_lnet.ln_init);
-
- /* LNetNI initilization failed? */
- if (the_lnet.ln_refcount == 0)
- return rc;
+ LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
list_for_each(tmp, &the_lnet.ln_nis) {
- if (index-- != 0)
+ if (index--)
continue;
ni = list_entry(tmp, lnet_ni_t, ni_list);
@@ -1488,192 +2120,8 @@ LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
}
EXPORT_SYMBOL(LNetSnprintHandle);
-static int
-lnet_create_ping_info(void)
-{
- int i;
- int n;
- int rc;
- unsigned int infosz;
- lnet_ni_t *ni;
- lnet_process_id_t id;
- lnet_ping_info_t *pinfo;
-
- for (n = 0; ; n++) {
- rc = LNetGetId(n, &id);
- if (rc == -ENOENT)
- break;
-
- LASSERT(rc == 0);
- }
-
- infosz = offsetof(lnet_ping_info_t, pi_ni[n]);
- LIBCFS_ALLOC(pinfo, infosz);
- if (pinfo == NULL) {
- CERROR("Can't allocate ping info[%d]\n", n);
- return -ENOMEM;
- }
-
- pinfo->pi_nnis = n;
- pinfo->pi_pid = the_lnet.ln_pid;
- pinfo->pi_magic = LNET_PROTO_PING_MAGIC;
- pinfo->pi_features = LNET_PING_FEAT_NI_STATUS;
-
- for (i = 0; i < n; i++) {
- lnet_ni_status_t *ns = &pinfo->pi_ni[i];
-
- rc = LNetGetId(i, &id);
- LASSERT(rc == 0);
-
- ns->ns_nid = id.nid;
- ns->ns_status = LNET_NI_STATUS_UP;
-
- lnet_net_lock(0);
-
- ni = lnet_nid2ni_locked(id.nid, 0);
- LASSERT(ni != NULL);
-
- lnet_ni_lock(ni);
- LASSERT(ni->ni_status == NULL);
- ni->ni_status = ns;
- lnet_ni_unlock(ni);
-
- lnet_ni_decref_locked(ni, 0);
- lnet_net_unlock(0);
- }
-
- the_lnet.ln_ping_info = pinfo;
- return 0;
-}
-
-static void
-lnet_destroy_ping_info(void)
-{
- struct lnet_ni *ni;
-
- lnet_net_lock(0);
-
- list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
- lnet_ni_lock(ni);
- ni->ni_status = NULL;
- lnet_ni_unlock(ni);
- }
-
- lnet_net_unlock(0);
-
- LIBCFS_FREE(the_lnet.ln_ping_info,
- offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]));
- the_lnet.ln_ping_info = NULL;
-}
-
-int
-lnet_ping_target_init(void)
-{
- lnet_md_t md = { NULL };
- lnet_handle_me_t meh;
- lnet_process_id_t id;
- int rc;
- int rc2;
- int infosz;
-
- rc = lnet_create_ping_info();
- if (rc != 0)
- return rc;
-
- /* We can have a tiny EQ since we only need to see the unlink event on
- * teardown, which by definition is the last one! */
- rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &the_lnet.ln_ping_target_eq);
- if (rc != 0) {
- CERROR("Can't allocate ping EQ: %d\n", rc);
- goto failed_0;
- }
-
- memset(&id, 0, sizeof(lnet_process_id_t));
- id.nid = LNET_NID_ANY;
- id.pid = LNET_PID_ANY;
-
- rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
- LNET_PROTO_PING_MATCHBITS, 0,
- LNET_UNLINK, LNET_INS_AFTER,
- &meh);
- if (rc != 0) {
- CERROR("Can't create ping ME: %d\n", rc);
- goto failed_1;
- }
-
- /* initialize md content */
- infosz = offsetof(lnet_ping_info_t,
- pi_ni[the_lnet.ln_ping_info->pi_nnis]);
- md.start = the_lnet.ln_ping_info;
- md.length = infosz;
- md.threshold = LNET_MD_THRESH_INF;
- md.max_size = 0;
- md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
- LNET_MD_MANAGE_REMOTE;
- md.user_ptr = NULL;
- md.eq_handle = the_lnet.ln_ping_target_eq;
-
- rc = LNetMDAttach(meh, md,
- LNET_RETAIN,
- &the_lnet.ln_ping_target_md);
- if (rc != 0) {
- CERROR("Can't attach ping MD: %d\n", rc);
- goto failed_2;
- }
-
- return 0;
-
- failed_2:
- rc2 = LNetMEUnlink(meh);
- LASSERT(rc2 == 0);
- failed_1:
- rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc2 == 0);
- failed_0:
- lnet_destroy_ping_info();
- return rc;
-}
-
-void
-lnet_ping_target_fini(void)
-{
- lnet_event_t event;
- int rc;
- int which;
- int timeout_ms = 1000;
- sigset_t blocked = cfs_block_allsigs();
-
- LNetMDUnlink(the_lnet.ln_ping_target_md);
- /* NB md could be busy; this just starts the unlink */
-
- for (;;) {
- rc = LNetEQPoll(&the_lnet.ln_ping_target_eq, 1,
- timeout_ms, &event, &which);
-
- /* I expect overflow... */
- LASSERT(rc >= 0 || rc == -EOVERFLOW);
-
- if (rc == 0) {
- /* timed out: provide a diagnostic */
- CWARN("Still waiting for ping MD to unlink\n");
- timeout_ms *= 2;
- continue;
- }
-
- /* Got a valid event */
- if (event.unlinked)
- break;
- }
-
- rc = LNetEQFree(the_lnet.ln_ping_target_eq);
- LASSERT(rc == 0);
- lnet_destroy_ping_info();
- cfs_restore_sigs(blocked);
-}
-
-int
-lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids)
+static int lnet_ping(lnet_process_id_t id, int timeout_ms,
+ lnet_process_id_t __user *ids, int n_ids)
{
lnet_handle_eq_t eqh;
lnet_handle_md_t mdh;
@@ -1683,7 +2131,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
int unlinked = 0;
int replied = 0;
const int a_long_time = 60000; /* mS */
- int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+ int infosz;
lnet_ping_info_t *info;
lnet_process_id_t tmpid;
int i;
@@ -1692,6 +2140,8 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
int rc2;
sigset_t blocked;
+ infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]);
+
if (n_ids <= 0 ||
id.nid == LNET_NID_ANY ||
timeout_ms > 500000 || /* arbitrary limit! */
@@ -1699,15 +2149,15 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
return -EINVAL;
if (id.pid == LNET_PID_ANY)
- id.pid = LUSTRE_SRV_LNET_PID;
+ id.pid = LNET_PID_LUSTRE;
LIBCFS_ALLOC(info, infosz);
- if (info == NULL)
+ if (!info)
return -ENOMEM;
/* NB 2 events max (including any unlink event) */
rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't allocate EQ: %d\n", rc);
goto out_0;
}
@@ -1722,7 +2172,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
md.eq_handle = eqh;
rc = LNetMDBind(md, LNET_UNLINK, &mdh);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't bind MD: %d\n", rc);
goto out_1;
}
@@ -1731,11 +2181,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
LNET_RESERVED_PORTAL,
LNET_PROTO_PING_MATCHBITS, 0);
- if (rc != 0) {
+ if (rc) {
/* Don't CERROR; this could be deliberate! */
rc2 = LNetMDUnlink(mdh);
- LASSERT(rc2 == 0);
+ LASSERT(!rc2);
/* NB must wait for the UNLINK event below... */
unlinked = 1;
@@ -1759,11 +2209,11 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
- if (rc2 <= 0 || event.status != 0) {
+ if (rc2 <= 0 || event.status) {
/* timeout or error */
- if (!replied && rc == 0)
+ if (!replied && !rc)
rc = (rc2 < 0) ? rc2 :
- (rc2 == 0) ? -ETIMEDOUT :
+ !rc2 ? -ETIMEDOUT :
event.status;
if (!unlinked) {
@@ -1772,7 +2222,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
/* No assertion (racing with network) */
unlinked = 1;
timeout_ms = a_long_time;
- } else if (rc2 == 0) {
+ } else if (!rc2) {
/* timed out waiting for unlink */
CWARN("ping %s: late network completion\n",
libcfs_id2str(id));
@@ -1812,7 +2262,7 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
goto out_1;
}
- if ((info->pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
+ if (!(info->pi_features & LNET_PING_FEAT_NI_STATUS)) {
CERROR("%s: ping w/o NI status: 0x%x\n",
libcfs_id2str(id), info->pi_features);
goto out_1;
@@ -1846,9 +2296,9 @@ lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_id
out_1:
rc2 = LNetEQFree(eqh);
- if (rc2 != 0)
+ if (rc2)
CERROR("rc2 %d\n", rc2);
- LASSERT(rc2 == 0);
+ LASSERT(!rc2);
out_0:
LIBCFS_FREE(info, infosz);
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 284a3c271bc6..449069c9e649 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -37,15 +37,15 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
-struct lnet_text_buf_t { /* tmp struct for parsing routes */
+struct lnet_text_buf { /* tmp struct for parsing routes */
struct list_head ltb_list; /* stash on lists */
int ltb_size; /* allocated size */
char ltb_text[0]; /* text buffer */
};
static int lnet_tbnob; /* track text buf allocation */
-#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
-#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
+#define LNET_MAX_TEXTBUF_NOB (64 << 10) /* bound allocation */
+#define LNET_SINGLE_TEXTBUF_NOB (4 << 10)
static void
lnet_syntax(char *name, char *str, int offset, int width)
@@ -54,9 +54,9 @@ lnet_syntax(char *name, char *str, int offset, int width)
static char dashes[LNET_SINGLE_TEXTBUF_NOB];
memset(dots, '.', sizeof(dots));
- dots[sizeof(dots)-1] = 0;
+ dots[sizeof(dots) - 1] = 0;
memset(dashes, '-', sizeof(dashes));
- dashes[sizeof(dashes)-1] = 0;
+ dashes[sizeof(dashes) - 1] = 0;
LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
@@ -77,7 +77,7 @@ lnet_issep(char c)
}
}
-static int
+int
lnet_net_unique(__u32 net, struct list_head *nilist)
{
struct list_head *tmp;
@@ -96,19 +96,25 @@ lnet_net_unique(__u32 net, struct list_head *nilist)
void
lnet_ni_free(struct lnet_ni *ni)
{
- if (ni->ni_refs != NULL)
+ int i;
+
+ if (ni->ni_refs)
cfs_percpt_free(ni->ni_refs);
- if (ni->ni_tx_queues != NULL)
+ if (ni->ni_tx_queues)
cfs_percpt_free(ni->ni_tx_queues);
- if (ni->ni_cpts != NULL)
+ if (ni->ni_cpts)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
+ for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
+ LIBCFS_FREE(ni->ni_interfaces[i],
+ strlen(ni->ni_interfaces[i]) + 1);
+ }
LIBCFS_FREE(ni, sizeof(*ni));
}
-static lnet_ni_t *
+lnet_ni_t *
lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
{
struct lnet_tx_queue *tq;
@@ -123,7 +129,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
}
LIBCFS_ALLOC(ni, sizeof(*ni));
- if (ni == NULL) {
+ if (!ni) {
CERROR("Out of memory creating network %s\n",
libcfs_net2str(net));
return NULL;
@@ -133,18 +139,18 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
INIT_LIST_HEAD(&ni->ni_cptlist);
ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_refs[0]));
- if (ni->ni_refs == NULL)
+ if (!ni->ni_refs)
goto failed;
ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_tx_queues[0]));
- if (ni->ni_tx_queues == NULL)
+ if (!ni->ni_tx_queues)
goto failed;
cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
INIT_LIST_HEAD(&tq->tq_delayed);
- if (el == NULL) {
+ if (!el) {
ni->ni_cpts = NULL;
ni->ni_ncpts = LNET_CPT_NUMBER;
} else {
@@ -178,13 +184,19 @@ int
lnet_parse_networks(struct list_head *nilist, char *networks)
{
struct cfs_expr_list *el = NULL;
- int tokensize = strlen(networks) + 1;
+ int tokensize;
char *tokens;
char *str;
char *tmp;
struct lnet_ni *ni;
__u32 net;
int nnets = 0;
+ struct list_head *temp_node;
+
+ if (!networks) {
+ CERROR("networks string is undefined\n");
+ return -EINVAL;
+ }
if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
/* _WAY_ conservative */
@@ -193,23 +205,19 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
return -EINVAL;
}
+ tokensize = strlen(networks) + 1;
+
LIBCFS_ALLOC(tokens, tokensize);
- if (tokens == NULL) {
+ if (!tokens) {
CERROR("Can't allocate net tokens\n");
return -ENOMEM;
}
- the_lnet.ln_network_tokens = tokens;
- the_lnet.ln_network_tokens_nob = tokensize;
memcpy(tokens, networks, tokensize);
- str = tmp = tokens;
-
- /* Add in the loopback network */
- ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
- if (ni == NULL)
- goto failed;
+ tmp = tokens;
+ str = tokens;
- while (str != NULL && *str != 0) {
+ while (str && *str) {
char *comma = strchr(str, ',');
char *bracket = strchr(str, '(');
char *square = strchr(str, '[');
@@ -217,26 +225,29 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
int niface;
int rc;
- /* NB we don't check interface conflicts here; it's the LNDs
- * responsibility (if it cares at all) */
-
- if (square != NULL && (comma == NULL || square < comma)) {
- /* i.e: o2ib0(ib0)[1,2], number between square
- * brackets are CPTs this NI needs to be bond */
- if (bracket != NULL && bracket > square) {
+ /*
+ * NB we don't check interface conflicts here; it's the LNDs
+ * responsibility (if it cares at all)
+ */
+ if (square && (!comma || square < comma)) {
+ /*
+ * i.e: o2ib0(ib0)[1,2], number between square
+ * brackets are CPTs this NI needs to be bond
+ */
+ if (bracket && bracket > square) {
tmp = square;
goto failed_syntax;
}
tmp = strchr(square, ']');
- if (tmp == NULL) {
+ if (!tmp) {
tmp = square;
goto failed_syntax;
}
rc = cfs_expr_list_parse(square, tmp - square + 1,
0, LNET_CPT_NUMBER - 1, &el);
- if (rc != 0) {
+ if (rc) {
tmp = square;
goto failed_syntax;
}
@@ -245,12 +256,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
*square++ = ' ';
}
- if (bracket == NULL ||
- (comma != NULL && comma < bracket)) {
-
+ if (!bracket || (comma && comma < bracket)) {
/* no interface list specified */
- if (comma != NULL)
+ if (comma)
*comma++ = 0;
net = libcfs_str2net(cfs_trimwhite(str));
@@ -262,10 +271,10 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
- lnet_ni_alloc(net, el, nilist) == NULL)
+ !lnet_ni_alloc(net, el, nilist))
goto failed;
- if (el != NULL) {
+ if (el) {
cfs_expr_list_free(el);
el = NULL;
}
@@ -281,12 +290,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
goto failed_syntax;
}
- nnets++;
ni = lnet_ni_alloc(net, el, nilist);
- if (ni == NULL)
+ if (!ni)
goto failed;
- if (el != NULL) {
+ if (el) {
cfs_expr_list_free(el);
el = NULL;
}
@@ -295,7 +303,7 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
iface = bracket + 1;
bracket = strchr(iface, ')');
- if (bracket == NULL) {
+ if (!bracket) {
tmp = iface;
goto failed_syntax;
}
@@ -303,11 +311,11 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
*bracket = 0;
do {
comma = strchr(iface, ',');
- if (comma != NULL)
+ if (comma)
*comma++ = 0;
iface = cfs_trimwhite(iface);
- if (*iface == 0) {
+ if (!*iface) {
tmp = iface;
goto failed_syntax;
}
@@ -319,16 +327,32 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
goto failed;
}
- ni->ni_interfaces[niface++] = iface;
+ /*
+ * Allocate a separate piece of memory and copy
+ * into it the string, so we don't have
+ * a depencency on the tokens string. This way we
+ * can free the tokens at the end of the function.
+ * The newly allocated ni_interfaces[] can be
+ * freed when freeing the NI
+ */
+ LIBCFS_ALLOC(ni->ni_interfaces[niface],
+ strlen(iface) + 1);
+ if (!ni->ni_interfaces[niface]) {
+ CERROR("Can't allocate net interface name\n");
+ goto failed;
+ }
+ strncpy(ni->ni_interfaces[niface], iface,
+ strlen(iface));
+ niface++;
iface = comma;
- } while (iface != NULL);
+ } while (iface);
str = bracket + 1;
comma = strchr(bracket + 1, ',');
- if (comma != NULL) {
+ if (comma) {
*comma = 0;
str = cfs_trimwhite(str);
- if (*str != 0) {
+ if (*str) {
tmp = str;
goto failed_syntax;
}
@@ -337,14 +361,17 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
}
str = cfs_trimwhite(str);
- if (*str != 0) {
+ if (*str) {
tmp = str;
goto failed_syntax;
}
}
- LASSERT(!list_empty(nilist));
- return 0;
+ list_for_each(temp_node, nilist)
+ nnets++;
+
+ LIBCFS_FREE(tokens, tokensize);
+ return nnets;
failed_syntax:
lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
@@ -356,23 +383,22 @@ lnet_parse_networks(struct list_head *nilist, char *networks)
lnet_ni_free(ni);
}
- if (el != NULL)
+ if (el)
cfs_expr_list_free(el);
LIBCFS_FREE(tokens, tokensize);
- the_lnet.ln_network_tokens = NULL;
return -EINVAL;
}
-static struct lnet_text_buf_t *
+static struct lnet_text_buf *
lnet_new_text_buf(int str_len)
{
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
int nob;
/* NB allocate space for the terminating 0 */
- nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]);
+ nob = offsetof(struct lnet_text_buf, ltb_text[str_len + 1]);
if (nob > LNET_SINGLE_TEXTBUF_NOB) {
/* _way_ conservative for "route net gateway..." */
CERROR("text buffer too big\n");
@@ -385,7 +411,7 @@ lnet_new_text_buf(int str_len)
}
LIBCFS_ALLOC(ltb, nob);
- if (ltb == NULL)
+ if (!ltb)
return NULL;
ltb->ltb_size = nob;
@@ -395,7 +421,7 @@ lnet_new_text_buf(int str_len)
}
static void
-lnet_free_text_buf(struct lnet_text_buf_t *ltb)
+lnet_free_text_buf(struct lnet_text_buf *ltb)
{
lnet_tbnob -= ltb->ltb_size;
LIBCFS_FREE(ltb, ltb->ltb_size);
@@ -404,10 +430,10 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb)
static void
lnet_free_text_bufs(struct list_head *tbs)
{
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
+ ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
list_del(&ltb->ltb_list);
lnet_free_text_buf(ltb);
@@ -421,7 +447,7 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
char *sep;
int nob;
int i;
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
INIT_LIST_HEAD(&pending);
@@ -432,16 +458,16 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
str++;
/* scan for separator or comment */
- for (sep = str; *sep != 0; sep++)
+ for (sep = str; *sep; sep++)
if (lnet_issep(*sep) || *sep == '#')
break;
nob = (int)(sep - str);
if (nob > 0) {
ltb = lnet_new_text_buf(nob);
- if (ltb == NULL) {
+ if (!ltb) {
lnet_free_text_bufs(&pending);
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < nob; i++)
@@ -459,10 +485,10 @@ lnet_str2tbs_sep(struct list_head *tbs, char *str)
/* scan for separator */
do {
sep++;
- } while (*sep != 0 && !lnet_issep(*sep));
+ } while (*sep && !lnet_issep(*sep));
}
- if (*sep == 0)
+ if (!*sep)
break;
str = sep + 1;
@@ -479,18 +505,18 @@ lnet_expand1tb(struct list_head *list,
{
int len1 = (int)(sep1 - str);
int len2 = strlen(sep2 + 1);
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
LASSERT(*sep1 == '[');
LASSERT(*sep2 == ']');
ltb = lnet_new_text_buf(len1 + itemlen + len2);
- if (ltb == NULL)
+ if (!ltb)
return -ENOMEM;
memcpy(ltb->ltb_text, str, len1);
memcpy(&ltb->ltb_text[len1], item, itemlen);
- memcpy(&ltb->ltb_text[len1+itemlen], sep2 + 1, len2);
+ memcpy(&ltb->ltb_text[len1 + itemlen], sep2 + 1, len2);
ltb->ltb_text[len1 + itemlen + len2] = 0;
list_add_tail(&ltb->ltb_list, list);
@@ -516,15 +542,14 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
INIT_LIST_HEAD(&pending);
sep = strchr(str, '[');
- if (sep == NULL) /* nothing to expand */
+ if (!sep) /* nothing to expand */
return 0;
sep2 = strchr(sep, ']');
- if (sep2 == NULL)
+ if (!sep2)
goto failed;
for (parsed = sep; parsed < sep2; parsed = enditem) {
-
enditem = ++parsed;
while (enditem < sep2 && *enditem != ',')
enditem++;
@@ -534,17 +559,13 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi,
&stride, &scanned) < 3) {
-
if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
-
/* simple string enumeration */
- if (lnet_expand1tb(
- &pending, str, sep, sep2,
- parsed,
- (int)(enditem - parsed)) != 0) {
+ if (lnet_expand1tb(&pending, str, sep, sep2,
+ parsed,
+ (int)(enditem - parsed))) {
goto failed;
}
-
continue;
}
@@ -557,18 +578,17 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
goto failed;
if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
- (hi - lo) % stride != 0)
+ (hi - lo) % stride)
goto failed;
for (i = lo; i <= hi; i += stride) {
-
snprintf(num, sizeof(num), "%d", i);
nob = strlen(num);
if (nob + 1 == sizeof(num))
goto failed;
if (lnet_expand1tb(&pending, str, sep, sep2,
- num, nob) != 0)
+ num, nob))
goto failed;
}
}
@@ -578,7 +598,7 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str)
failed:
lnet_free_text_bufs(&pending);
- return -1;
+ return -EINVAL;
}
static int
@@ -602,17 +622,19 @@ lnet_parse_priority(char *str, unsigned int *priority, char **token)
int len;
sep = strchr(str, LNET_PRIORITY_SEPARATOR);
- if (sep == NULL) {
+ if (!sep) {
*priority = 0;
return 0;
}
len = strlen(sep + 1);
- if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
- /* Update the caller's token pointer so it treats the found
- priority as the token to report in the error message. */
+ if ((sscanf((sep + 1), "%u%n", priority, &nob) < 1) || (len != nob)) {
+ /*
+ * Update the caller's token pointer so it treats the found
+ * priority as the token to report in the error message.
+ */
*token += sep - str + 1;
- return -1;
+ return -EINVAL;
}
CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
@@ -636,13 +658,13 @@ lnet_parse_route(char *str, int *im_a_router)
struct list_head *tmp2;
__u32 net;
lnet_nid_t nid;
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
int rc;
char *sep;
char *token = str;
int ntokens = 0;
int myrc = -1;
- unsigned int hops;
+ __u32 hops;
int got_hops = 0;
unsigned int priority = 0;
@@ -658,7 +680,7 @@ lnet_parse_route(char *str, int *im_a_router)
/* scan for token start */
while (isspace(*sep))
sep++;
- if (*sep == 0) {
+ if (!*sep) {
if (ntokens < (got_hops ? 3 : 2))
goto token_error;
break;
@@ -668,9 +690,9 @@ lnet_parse_route(char *str, int *im_a_router)
token = sep++;
/* scan for token end */
- while (*sep != 0 && !isspace(*sep))
+ while (*sep && !isspace(*sep))
sep++;
- if (*sep != 0)
+ if (*sep)
*sep++ = 0;
if (ntokens == 1) {
@@ -684,7 +706,7 @@ lnet_parse_route(char *str, int *im_a_router)
}
ltb = lnet_new_text_buf(strlen(token));
- if (ltb == NULL)
+ if (!ltb)
goto out;
strcpy(ltb->ltb_text, token);
@@ -692,8 +714,7 @@ lnet_parse_route(char *str, int *im_a_router)
list_add_tail(tmp1, tmp2);
while (tmp1 != tmp2) {
- ltb = list_entry(tmp1, struct lnet_text_buf_t,
- ltb_list);
+ ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
if (rc < 0)
@@ -726,20 +747,23 @@ lnet_parse_route(char *str, int *im_a_router)
}
}
+ /**
+ * if there are no hops set then we want to flag this value as
+ * unset since hops is an optional parameter
+ */
if (!got_hops)
- hops = 1;
+ hops = LNET_UNDEFINED_HOPS;
LASSERT(!list_empty(&nets));
LASSERT(!list_empty(&gateways));
list_for_each(tmp1, &nets) {
- ltb = list_entry(tmp1, struct lnet_text_buf_t, ltb_list);
+ ltb = list_entry(tmp1, struct lnet_text_buf, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
list_for_each(tmp2, &gateways) {
- ltb = list_entry(tmp2, struct lnet_text_buf_t,
- ltb_list);
+ ltb = list_entry(tmp2, struct lnet_text_buf, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
LASSERT(nid != LNET_NID_ANY);
@@ -749,7 +773,7 @@ lnet_parse_route(char *str, int *im_a_router)
}
rc = lnet_add_route(net, hops, nid, priority);
- if (rc != 0) {
+ if (rc && rc != -EEXIST && rc != -EHOSTUNREACH) {
CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
libcfs_nid2str(nid));
@@ -772,10 +796,10 @@ lnet_parse_route(char *str, int *im_a_router)
static int
lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
{
- struct lnet_text_buf_t *ltb;
+ struct lnet_text_buf *ltb;
while (!list_empty(tbs)) {
- ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list);
+ ltb = list_entry(tbs->next, struct lnet_text_buf, ltb_list);
if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
lnet_free_text_bufs(tbs);
@@ -806,7 +830,7 @@ lnet_parse_routes(char *routes, int *im_a_router)
rc = lnet_parse_route_tbs(&tbs, im_a_router);
}
- LASSERT(lnet_tbnob == 0);
+ LASSERT(!lnet_tbnob);
return rc;
}
@@ -818,7 +842,7 @@ lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
int i;
rc = cfs_ip_addr_parse(token, len, &list);
- if (rc != 0)
+ if (rc)
return rc;
for (rc = i = 0; !rc && i < nip; i++)
@@ -851,18 +875,18 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
/* scan for token start */
while (isspace(*sep))
sep++;
- if (*sep == 0)
+ if (!*sep)
break;
token = sep++;
/* scan for token end */
- while (*sep != 0 && !isspace(*sep))
+ while (*sep && !isspace(*sep))
sep++;
- if (*sep != 0)
+ if (*sep)
*sep++ = 0;
- if (ntokens++ == 0) {
+ if (!ntokens++) {
net = token;
continue;
}
@@ -876,7 +900,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
return rc;
}
- matched |= (rc != 0);
+ if (rc)
+ matched |= 1;
}
if (!matched)
@@ -892,12 +917,12 @@ lnet_netspec2net(char *netspec)
char *bracket = strchr(netspec, '(');
__u32 net;
- if (bracket != NULL)
+ if (bracket)
*bracket = 0;
net = libcfs_str2net(netspec);
- if (bracket != NULL)
+ if (bracket)
*bracket = '(';
return net;
@@ -909,8 +934,8 @@ lnet_splitnets(char *source, struct list_head *nets)
int offset = 0;
int offset2;
int len;
- struct lnet_text_buf_t *tb;
- struct lnet_text_buf_t *tb2;
+ struct lnet_text_buf *tb;
+ struct lnet_text_buf *tb2;
struct list_head *t;
char *sep;
char *bracket;
@@ -919,15 +944,13 @@ lnet_splitnets(char *source, struct list_head *nets)
LASSERT(!list_empty(nets));
LASSERT(nets->next == nets->prev); /* single entry */
- tb = list_entry(nets->next, struct lnet_text_buf_t, ltb_list);
+ tb = list_entry(nets->next, struct lnet_text_buf, ltb_list);
for (;;) {
sep = strchr(tb->ltb_text, ',');
bracket = strchr(tb->ltb_text, '(');
- if (sep != NULL &&
- bracket != NULL &&
- bracket < sep) {
+ if (sep && bracket && bracket < sep) {
/* netspec lists interfaces... */
offset2 = offset + (int)(bracket - tb->ltb_text);
@@ -935,16 +958,16 @@ lnet_splitnets(char *source, struct list_head *nets)
bracket = strchr(bracket + 1, ')');
- if (bracket == NULL ||
- !(bracket[1] == ',' || bracket[1] == 0)) {
+ if (!bracket ||
+ !(bracket[1] == ',' || !bracket[1])) {
lnet_syntax("ip2nets", source, offset2, len);
return -EINVAL;
}
- sep = (bracket[1] == 0) ? NULL : bracket + 1;
+ sep = !bracket[1] ? NULL : bracket + 1;
}
- if (sep != NULL)
+ if (sep)
*sep++ = 0;
net = lnet_netspec2net(tb->ltb_text);
@@ -955,7 +978,7 @@ lnet_splitnets(char *source, struct list_head *nets)
}
list_for_each(t, nets) {
- tb2 = list_entry(t, struct lnet_text_buf_t, ltb_list);
+ tb2 = list_entry(t, struct lnet_text_buf, ltb_list);
if (tb2 == tb)
continue;
@@ -968,13 +991,13 @@ lnet_splitnets(char *source, struct list_head *nets)
}
}
- if (sep == NULL)
+ if (!sep)
return 0;
offset += (int)(sep - tb->ltb_text);
len = strlen(sep);
tb2 = lnet_new_text_buf(len);
- if (tb2 == NULL)
+ if (!tb2)
return -ENOMEM;
strncpy(tb2->ltb_text, sep, len);
@@ -996,8 +1019,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
struct list_head current_nets;
struct list_head *t;
struct list_head *t2;
- struct lnet_text_buf_t *tb;
- struct lnet_text_buf_t *tb2;
+ struct lnet_text_buf *tb;
+ struct lnet_text_buf *temp;
+ struct lnet_text_buf *tb2;
__u32 net1;
__u32 net2;
int len;
@@ -1008,7 +1032,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
- LASSERT(lnet_tbnob == 0);
+ LASSERT(!lnet_tbnob);
return -EINVAL;
}
@@ -1019,12 +1043,9 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
len = 0;
rc = 0;
- while (!list_empty(&raw_entries)) {
- tb = list_entry(raw_entries.next, struct lnet_text_buf_t,
- ltb_list);
-
+ list_for_each_entry_safe(tb, temp, &raw_entries, ltb_list) {
strncpy(source, tb->ltb_text, sizeof(source));
- source[sizeof(source)-1] = '\0';
+ source[sizeof(source) - 1] = '\0';
/* replace ltb_text with the network(s) add on match */
rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
@@ -1033,7 +1054,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
list_del(&tb->ltb_list);
- if (rc == 0) { /* no match */
+ if (!rc) { /* no match */
lnet_free_text_buf(tb);
continue;
}
@@ -1047,13 +1068,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
dup = 0;
list_for_each(t, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
+ tb = list_entry(t, struct lnet_text_buf, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
list_for_each(t2, &matched_nets) {
- tb2 = list_entry(t2, struct lnet_text_buf_t,
- ltb_list);
+ tb2 = list_entry(t2, struct lnet_text_buf,
+ ltb_list);
net2 = lnet_netspec2net(tb2->ltb_text);
LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
@@ -1073,13 +1094,13 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
}
list_for_each_safe(t, t2, &current_nets) {
- tb = list_entry(t, struct lnet_text_buf_t, ltb_list);
+ tb = list_entry(t, struct lnet_text_buf, ltb_list);
list_del(&tb->ltb_list);
list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
- "%s%s", (len == 0) ? "" : ",",
+ "%s%s", !len ? "" : ",",
tb->ltb_text);
if (len >= sizeof(networks)) {
@@ -1096,7 +1117,7 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
lnet_free_text_bufs(&raw_entries);
lnet_free_text_bufs(&matched_nets);
lnet_free_text_bufs(&current_nets);
- LASSERT(lnet_tbnob == 0);
+ LASSERT(!lnet_tbnob);
if (rc < 0)
return rc;
@@ -1122,7 +1143,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
return nif;
LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
- if (ipaddrs == NULL) {
+ if (!ipaddrs) {
CERROR("Can't allocate ipaddrs[%d]\n", nif);
lnet_ipif_free_enumeration(ifnames, nif);
return -ENOMEM;
@@ -1133,7 +1154,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
continue;
rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask);
- if (rc != 0) {
+ if (rc) {
CWARN("Can't query interface %s: %d\n",
ifnames[i], rc);
continue;
@@ -1155,7 +1176,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp)
} else {
if (nip > 0) {
LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
- if (ipaddrs2 == NULL) {
+ if (!ipaddrs2) {
CERROR("Can't allocate ipaddrs[%d]\n", nip);
nip = -ENOMEM;
} else {
@@ -1184,7 +1205,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
return nip;
}
- if (nip == 0) {
+ if (!nip) {
LCONSOLE_ERROR_MSG(0x118,
"No local IP interfaces for ip2nets to match\n");
return -ENOENT;
@@ -1198,7 +1219,7 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets)
return rc;
}
- if (rc == 0) {
+ if (!rc) {
LCONSOLE_ERROR_MSG(0x11a,
"ip2nets does not match any local IP interfaces\n");
return -ENOENT;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 64f94a690081..adbcadbab1be 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -72,33 +72,38 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
{
lnet_eq_t *eq;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
- /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
+ /*
+ * We need count to be a power of 2 so that when eq_{enq,deq}_seq
* overflow, they don't skip entries, so the queue has the same
- * apparent capacity at all times */
+ * apparent capacity at all times
+ */
+ if (count)
+ count = roundup_pow_of_two(count);
- count = roundup_pow_of_two(count);
-
- if (callback != LNET_EQ_HANDLER_NONE && count != 0)
+ if (callback != LNET_EQ_HANDLER_NONE && count)
CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
- /* count can be 0 if only need callback, we can eliminate
- * overhead of enqueue event */
- if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
+ /*
+ * count can be 0 if only need callback, we can eliminate
+ * overhead of enqueue event
+ */
+ if (!count && callback == LNET_EQ_HANDLER_NONE)
return -EINVAL;
eq = lnet_eq_alloc();
- if (eq == NULL)
+ if (!eq)
return -ENOMEM;
- if (count != 0) {
+ if (count) {
LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
- if (eq->eq_events == NULL)
+ if (!eq->eq_events)
goto failed;
- /* NB allocator has set all event sequence numbers to 0,
- * so all them should be earlier than eq_deq_seq */
+ /*
+ * NB allocator has set all event sequence numbers to 0,
+ * so all them should be earlier than eq_deq_seq
+ */
}
eq->eq_deq_seq = 1;
@@ -108,13 +113,15 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*eq->eq_refs[0]));
- if (eq->eq_refs == NULL)
+ if (!eq->eq_refs)
goto failed;
/* MUST hold both exclusive lnet_res_lock */
lnet_res_lock(LNET_LOCK_EX);
- /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ /*
+ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock
+ */
lnet_eq_wait_lock();
lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
@@ -127,10 +134,10 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
return 0;
failed:
- if (eq->eq_events != NULL)
+ if (eq->eq_events)
LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
- if (eq->eq_refs != NULL)
+ if (eq->eq_refs)
cfs_percpt_free(eq->eq_refs);
lnet_eq_free(eq);
@@ -159,23 +166,24 @@ LNetEQFree(lnet_handle_eq_t eqh)
int size = 0;
int i;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
lnet_res_lock(LNET_LOCK_EX);
- /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
- * both EQ lookup and poll event with only lnet_eq_wait_lock */
+ /*
+ * NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
+ * both EQ lookup and poll event with only lnet_eq_wait_lock
+ */
lnet_eq_wait_lock();
eq = lnet_handle2eq(&eqh);
- if (eq == NULL) {
+ if (!eq) {
rc = -ENOENT;
goto out;
}
cfs_percpt_for_each(ref, i, eq->eq_refs) {
LASSERT(*ref >= 0);
- if (*ref == 0)
+ if (!*ref)
continue;
CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
@@ -196,9 +204,9 @@ LNetEQFree(lnet_handle_eq_t eqh)
lnet_eq_wait_unlock();
lnet_res_unlock(LNET_LOCK_EX);
- if (events != NULL)
+ if (events)
LIBCFS_FREE(events, size * sizeof(lnet_event_t));
- if (refs != NULL)
+ if (refs)
cfs_percpt_free(refs);
return rc;
@@ -211,7 +219,7 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
/* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
int index;
- if (eq->eq_size == 0) {
+ if (!eq->eq_size) {
LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
eq->eq_callback(ev);
return;
@@ -255,8 +263,10 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
if (eq->eq_deq_seq == new_event->sequence) {
rc = 1;
} else {
- /* don't complain with CERROR: some EQs are sized small
- * anyway; if it's important, the caller should complain */
+ /*
+ * don't complain with CERROR: some EQs are sized small
+ * anyway; if it's important, the caller should complain
+ */
CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
eq->eq_deq_seq, new_event->sequence);
rc = -EOVERFLOW;
@@ -309,8 +319,8 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
wait_queue_t wl;
unsigned long now;
- if (tms == 0)
- return -1; /* don't want to wait and no new event */
+ if (!tms)
+ return -ENXIO; /* don't want to wait and no new event */
init_waitqueue_entry(&wl, current);
set_current_state(TASK_INTERRUPTIBLE);
@@ -320,7 +330,6 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
if (tms < 0) {
schedule();
-
} else {
now = jiffies;
schedule_timeout(msecs_to_jiffies(tms));
@@ -329,7 +338,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
tms = 0;
}
- wait = tms != 0; /* might need to call here again */
+ wait = tms; /* might need to call here again */
*timeout_ms = tms;
lnet_eq_wait_lock();
@@ -372,7 +381,6 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
int rc;
int i;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (neq < 1)
@@ -384,20 +392,20 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
for (i = 0; i < neq; i++) {
lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
- if (eq == NULL) {
+ if (!eq) {
lnet_eq_wait_unlock();
return -ENOENT;
}
rc = lnet_eq_dequeue_event(eq, event);
- if (rc != 0) {
+ if (rc) {
lnet_eq_wait_unlock();
*which = i;
return rc;
}
}
- if (wait == 0)
+ if (!wait)
break;
/*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c
index 758f5bedef7e..75d31217bf92 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-md.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-md.c
@@ -46,16 +46,18 @@
void
lnet_md_unlink(lnet_libmd_t *md)
{
- if ((md->md_flags & LNET_MD_FLAG_ZOMBIE) == 0) {
+ if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
/* first unlink attempt... */
lnet_me_t *me = md->md_me;
md->md_flags |= LNET_MD_FLAG_ZOMBIE;
- /* Disassociate from ME (if any),
+ /*
+ * Disassociate from ME (if any),
* and unlink it if it was created
- * with LNET_UNLINK */
- if (me != NULL) {
+ * with LNET_UNLINK
+ */
+ if (me) {
/* detach MD from portal */
lnet_ptl_detach_md(me, md);
if (me->me_unlink == LNET_UNLINK)
@@ -66,14 +68,14 @@ lnet_md_unlink(lnet_libmd_t *md)
lnet_res_lh_invalidate(&md->md_lh);
}
- if (md->md_refcount != 0) {
+ if (md->md_refcount) {
CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
return;
}
CDEBUG(D_NET, "Unlinking md %p\n", md);
- if (md->md_eq != NULL) {
+ if (md->md_eq) {
int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
LASSERT(*md->md_eq->eq_refs[cpt] > 0);
@@ -103,12 +105,12 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_refcount = 0;
lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
- if ((umd->options & LNET_MD_IOVEC) != 0) {
-
- if ((umd->options & LNET_MD_KIOV) != 0) /* Can't specify both */
+ if (umd->options & LNET_MD_IOVEC) {
+ if (umd->options & LNET_MD_KIOV) /* Can't specify both */
return -EINVAL;
- lmd->md_niov = niov = umd->length;
+ niov = umd->length;
+ lmd->md_niov = umd->length;
memcpy(lmd->md_iov.iov, umd->start,
niov * sizeof(lmd->md_iov.iov[0]));
@@ -123,20 +125,21 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_length = total_length;
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* use max size */
+ if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
(umd->max_size < 0 ||
umd->max_size > total_length)) /* illegal max_size */
return -EINVAL;
- } else if ((umd->options & LNET_MD_KIOV) != 0) {
- lmd->md_niov = niov = umd->length;
+ } else if (umd->options & LNET_MD_KIOV) {
+ niov = umd->length;
+ lmd->md_niov = umd->length;
memcpy(lmd->md_iov.kiov, umd->start,
niov * sizeof(lmd->md_iov.kiov[0]));
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
+ lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;
@@ -144,17 +147,18 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
lmd->md_length = total_length;
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
(umd->max_size < 0 ||
umd->max_size > total_length)) /* illegal max_size */
return -EINVAL;
} else { /* contiguous */
lmd->md_length = umd->length;
- lmd->md_niov = niov = 1;
+ niov = 1;
+ lmd->md_niov = 1;
lmd->md_iov.iov[0].iov_base = umd->start;
lmd->md_iov.iov[0].iov_len = umd->length;
- if ((umd->options & LNET_MD_MAX_SIZE) != 0 && /* max size used */
+ if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
(umd->max_size < 0 ||
umd->max_size > (int)umd->length)) /* illegal max_size */
return -EINVAL;
@@ -169,22 +173,26 @@ lnet_md_link(lnet_libmd_t *md, lnet_handle_eq_t eq_handle, int cpt)
{
struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
- /* NB we are passed an allocated, but inactive md.
+ /*
+ * NB we are passed an allocated, but inactive md.
* if we return success, caller may lnet_md_unlink() it.
* otherwise caller may only lnet_md_free() it.
*/
- /* This implementation doesn't know how to create START events or
+ /*
+ * This implementation doesn't know how to create START events or
* disable END events. Best to LASSERT our caller is compliant so
- * we find out quickly... */
- /* TODO - reevaluate what should be here in light of
+ * we find out quickly...
+ */
+ /*
+ * TODO - reevaluate what should be here in light of
* the removal of the start and end events
* maybe there we shouldn't even allow LNET_EQ_NONE!)
- * LASSERT (eq == NULL);
+ * LASSERT(!eq);
*/
if (!LNetHandleIsInvalid(eq_handle)) {
md->md_eq = lnet_handle2eq(&eq_handle);
- if (md->md_eq == NULL)
+ if (!md->md_eq)
return -ENOENT;
(*md->md_eq->eq_refs[cpt])++;
@@ -208,8 +216,8 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
* and that's all.
*/
umd->start = lmd->md_start;
- umd->length = ((lmd->md_options &
- (LNET_MD_IOVEC | LNET_MD_KIOV)) == 0) ?
+ umd->length = !(lmd->md_options &
+ (LNET_MD_IOVEC | LNET_MD_KIOV)) ?
lmd->md_length : lmd->md_niov;
umd->threshold = lmd->md_threshold;
umd->max_size = lmd->md_max_size;
@@ -221,13 +229,13 @@ lnet_md_deconstruct(lnet_libmd_t *lmd, lnet_md_t *umd)
static int
lnet_md_validate(lnet_md_t *umd)
{
- if (umd->start == NULL && umd->length != 0) {
+ if (!umd->start && umd->length) {
CERROR("MD start pointer can not be NULL with length %u\n",
umd->length);
return -EINVAL;
}
- if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) != 0 &&
+ if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
umd->length > LNET_MAX_IOV) {
CERROR("Invalid option: too many fragments %u, %d max\n",
umd->length, LNET_MAX_IOV);
@@ -273,41 +281,42 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd,
int cpt;
int rc;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
- if (lnet_md_validate(&umd) != 0)
+ if (lnet_md_validate(&umd))
return -EINVAL;
- if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) == 0) {
+ if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
CERROR("Invalid option: no MD_OP set\n");
return -EINVAL;
}
md = lnet_md_alloc(&umd);
- if (md == NULL)
+ if (!md)
return -ENOMEM;
rc = lnet_md_build(md, &umd, unlink);
cpt = lnet_cpt_of_cookie(meh.cookie);
lnet_res_lock(cpt);
- if (rc != 0)
+ if (rc)
goto failed;
me = lnet_handle2me(&meh);
- if (me == NULL)
+ if (!me)
rc = -ENOENT;
- else if (me->me_md != NULL)
+ else if (me->me_md)
rc = -EBUSY;
else
rc = lnet_md_link(md, umd.eq_handle, cpt);
- if (rc != 0)
+ if (rc)
goto failed;
- /* attach this MD to portal of ME and check if it matches any
- * blocked msgs on this portal */
+ /*
+ * attach this MD to portal of ME and check if it matches any
+ * blocked msgs on this portal
+ */
lnet_ptl_attach_md(me, md, &matches, &drops);
lnet_md2handle(handle, md);
@@ -350,29 +359,28 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle)
int cpt;
int rc;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
- if (lnet_md_validate(&umd) != 0)
+ if (lnet_md_validate(&umd))
return -EINVAL;
- if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT)) != 0) {
+ if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
CERROR("Invalid option: GET|PUT illegal on active MDs\n");
return -EINVAL;
}
md = lnet_md_alloc(&umd);
- if (md == NULL)
+ if (!md)
return -ENOMEM;
rc = lnet_md_build(md, &umd, unlink);
cpt = lnet_res_lock_current();
- if (rc != 0)
+ if (rc)
goto failed;
rc = lnet_md_link(md, umd.eq_handle, cpt);
- if (rc != 0)
+ if (rc)
goto failed;
lnet_md2handle(handle, md);
@@ -425,23 +433,24 @@ LNetMDUnlink(lnet_handle_md_t mdh)
lnet_libmd_t *md;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_cpt_of_cookie(mdh.cookie);
lnet_res_lock(cpt);
md = lnet_handle2md(&mdh);
- if (md == NULL) {
+ if (!md) {
lnet_res_unlock(cpt);
return -ENOENT;
}
md->md_flags |= LNET_MD_FLAG_ABORTED;
- /* If the MD is busy, lnet_md_unlink just marks it for deletion, and
+ /*
+ * If the MD is busy, lnet_md_unlink just marks it for deletion, and
* when the LND is done, the completion event flags that the MD was
- * unlinked. Otherwise, we enqueue an event now... */
- if (md->md_eq != NULL && md->md_refcount == 0) {
+ * unlinked. Otherwise, we enqueue an event now...
+ */
+ if (md->md_eq && !md->md_refcount) {
lnet_build_unlink_event(md, &ev);
lnet_eq_enqueue_event(md->md_eq, &ev);
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index 42fc99ef9f80..e671aed373df 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -83,7 +83,6 @@ LNetMEAttach(unsigned int portal,
struct lnet_me *me;
struct list_head *head;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if ((int)portal >= the_lnet.ln_nportals)
@@ -91,11 +90,11 @@ LNetMEAttach(unsigned int portal,
mtable = lnet_mt_of_attach(portal, match_id,
match_bits, ignore_bits, pos);
- if (mtable == NULL) /* can't match portal type */
+ if (!mtable) /* can't match portal type */
return -EPERM;
me = lnet_me_alloc();
- if (me == NULL)
+ if (!me)
return -ENOMEM;
lnet_res_lock(mtable->mt_cpt);
@@ -109,7 +108,7 @@ LNetMEAttach(unsigned int portal,
lnet_res_lh_initialize(the_lnet.ln_me_containers[mtable->mt_cpt],
&me->me_lh);
- if (ignore_bits != 0)
+ if (ignore_bits)
head = &mtable->mt_mhash[LNET_MT_HASH_IGNORE];
else
head = lnet_mt_match_head(mtable, match_id, match_bits);
@@ -156,14 +155,13 @@ LNetMEInsert(lnet_handle_me_t current_meh,
struct lnet_portal *ptl;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (pos == LNET_INS_LOCAL)
return -EPERM;
new_me = lnet_me_alloc();
- if (new_me == NULL)
+ if (!new_me)
return -ENOMEM;
cpt = lnet_cpt_of_cookie(current_meh.cookie);
@@ -171,7 +169,7 @@ LNetMEInsert(lnet_handle_me_t current_meh,
lnet_res_lock(cpt);
current_me = lnet_handle2me(&current_meh);
- if (current_me == NULL) {
+ if (!current_me) {
lnet_me_free(new_me);
lnet_res_unlock(cpt);
@@ -233,22 +231,21 @@ LNetMEUnlink(lnet_handle_me_t meh)
lnet_event_t ev;
int cpt;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_cpt_of_cookie(meh.cookie);
lnet_res_lock(cpt);
me = lnet_handle2me(&meh);
- if (me == NULL) {
+ if (!me) {
lnet_res_unlock(cpt);
return -ENOENT;
}
md = me->me_md;
- if (md != NULL) {
+ if (md) {
md->md_flags |= LNET_MD_FLAG_ABORTED;
- if (md->md_eq != NULL && md->md_refcount == 0) {
+ if (md->md_eq && !md->md_refcount) {
lnet_build_unlink_event(md, &ev);
lnet_eq_enqueue_event(md->md_eq, &ev);
}
@@ -267,7 +264,7 @@ lnet_me_unlink(lnet_me_t *me)
{
list_del(&me->me_list);
- if (me->me_md != NULL) {
+ if (me->me_md) {
lnet_libmd_t *md = me->me_md;
/* detach MD from portal of this ME */
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index fb8f7be043ec..f19aa9320e34 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -50,17 +50,16 @@ int
lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
{
lnet_test_peer_t *tp;
+ lnet_test_peer_t *temp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
- LASSERT(the_lnet.ln_init);
-
/* NB: use lnet_net_lock(0) to serialize operations on test peers */
- if (threshold != 0) {
+ if (threshold) {
/* Adding a new entry */
LIBCFS_ALLOC(tp, sizeof(*tp));
- if (tp == NULL)
+ if (!tp)
return -ENOMEM;
tp->tp_nid = nid;
@@ -80,7 +79,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
tp = list_entry(el, lnet_test_peer_t, tp_list);
- if (tp->tp_threshold == 0 || /* needs culling anyway */
+ if (!tp->tp_threshold || /* needs culling anyway */
nid == LNET_NID_ANY || /* removing all entries */
tp->tp_nid == nid) { /* matched this one */
list_del(&tp->tp_list);
@@ -90,9 +89,7 @@ lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
lnet_net_unlock(0);
- while (!list_empty(&cull)) {
- tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
-
+ list_for_each_entry_safe(tp, temp, &cull, tp_list) {
list_del(&tp->tp_list);
LIBCFS_FREE(tp, sizeof(*tp));
}
@@ -103,6 +100,7 @@ static int
fail_peer(lnet_nid_t nid, int outgoing)
{
lnet_test_peer_t *tp;
+ lnet_test_peer_t *temp;
struct list_head *el;
struct list_head *next;
struct list_head cull;
@@ -116,12 +114,14 @@ fail_peer(lnet_nid_t nid, int outgoing)
list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
tp = list_entry(el, lnet_test_peer_t, tp_list);
- if (tp->tp_threshold == 0) {
+ if (!tp->tp_threshold) {
/* zombie entry */
if (outgoing) {
- /* only cull zombies on outgoing tests,
+ /*
+ * only cull zombies on outgoing tests,
* since we may be at interrupt priority on
- * incoming messages. */
+ * incoming messages.
+ */
list_del(&tp->tp_list);
list_add(&tp->tp_list, &cull);
}
@@ -135,7 +135,7 @@ fail_peer(lnet_nid_t nid, int outgoing)
if (tp->tp_threshold != LNET_MD_THRESH_INF) {
tp->tp_threshold--;
if (outgoing &&
- tp->tp_threshold == 0) {
+ !tp->tp_threshold) {
/* see above */
list_del(&tp->tp_list);
list_add(&tp->tp_list, &cull);
@@ -147,8 +147,7 @@ fail_peer(lnet_nid_t nid, int outgoing)
lnet_net_unlock(0);
- while (!list_empty(&cull)) {
- tp = list_entry(cull.next, lnet_test_peer_t, tp_list);
+ list_for_each_entry_safe(tp, temp, &cull, tp_list) {
list_del(&tp->tp_list);
LIBCFS_FREE(tp, sizeof(*tp));
@@ -162,6 +161,7 @@ lnet_iov_nob(unsigned int niov, struct kvec *iov)
{
unsigned int nob = 0;
+ LASSERT(!niov || iov);
while (niov-- > 0)
nob += (iov++)->iov_len;
@@ -171,13 +171,13 @@ EXPORT_SYMBOL(lnet_iov_nob);
void
lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
- unsigned int nsiov, struct kvec *siov, unsigned int soffset,
- unsigned int nob)
+ unsigned int nsiov, struct kvec *siov, unsigned int soffset,
+ unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
unsigned int this_nob;
- if (nob == 0)
+ if (!nob)
return;
/* skip complete frags before 'doffset' */
@@ -206,7 +206,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
this_nob = min(this_nob, nob);
memcpy((char *)diov->iov_base + doffset,
- (char *)siov->iov_base + soffset, this_nob);
+ (char *)siov->iov_base + soffset, this_nob);
nob -= this_nob;
if (diov->iov_len > doffset + this_nob) {
@@ -230,16 +230,18 @@ EXPORT_SYMBOL(lnet_copy_iov2iov);
int
lnet_extract_iov(int dst_niov, struct kvec *dst,
- int src_niov, struct kvec *src,
- unsigned int offset, unsigned int len)
+ int src_niov, struct kvec *src,
+ unsigned int offset, unsigned int len)
{
- /* Initialise 'dst' to the subset of 'src' starting at 'offset',
+ /*
+ * Initialise 'dst' to the subset of 'src' starting at 'offset',
* for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src' */
+ * NB not destructive to 'src'
+ */
unsigned int frag_len;
unsigned int niov;
- if (len == 0) /* no data => */
+ if (!len) /* no data => */
return 0; /* no frags */
LASSERT(src_niov > 0);
@@ -280,6 +282,7 @@ lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
{
unsigned int nob = 0;
+ LASSERT(!niov || kiov);
while (niov-- > 0)
nob += (kiov++)->kiov_len;
@@ -297,7 +300,7 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
char *daddr = NULL;
char *saddr = NULL;
- if (nob == 0)
+ if (!nob)
return;
LASSERT(!in_interrupt());
@@ -325,17 +328,18 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
siov->kiov_len - soffset);
this_nob = min(this_nob, nob);
- if (daddr == NULL)
+ if (!daddr)
daddr = ((char *)kmap(diov->kiov_page)) +
diov->kiov_offset + doffset;
- if (saddr == NULL)
+ if (!saddr)
saddr = ((char *)kmap(siov->kiov_page)) +
siov->kiov_offset + soffset;
- /* Vanishing risk of kmap deadlock when mapping 2 pages.
+ /*
+ * Vanishing risk of kmap deadlock when mapping 2 pages.
* However in practice at least one of the kiovs will be mapped
- * kernel pages and the map/unmap will be NOOPs */
-
+ * kernel pages and the map/unmap will be NOOPs
+ */
memcpy(daddr, saddr, this_nob);
nob -= this_nob;
@@ -362,9 +366,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
}
} while (nob > 0);
- if (daddr != NULL)
+ if (daddr)
kunmap(diov->kiov_page);
- if (saddr != NULL)
+ if (saddr)
kunmap(siov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
@@ -378,7 +382,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
unsigned int this_nob;
char *addr = NULL;
- if (nob == 0)
+ if (!nob)
return;
LASSERT(!in_interrupt());
@@ -406,7 +410,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
(__kernel_size_t) kiov->kiov_len - kiovoffset);
this_nob = min(this_nob, nob);
- if (addr == NULL)
+ if (!addr)
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
@@ -434,7 +438,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
} while (nob > 0);
- if (addr != NULL)
+ if (addr)
kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2iov);
@@ -449,7 +453,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
unsigned int this_nob;
char *addr = NULL;
- if (nob == 0)
+ if (!nob)
return;
LASSERT(!in_interrupt());
@@ -477,7 +481,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
iov->iov_len - iovoffset);
this_nob = min(this_nob, nob);
- if (addr == NULL)
+ if (!addr)
addr = ((char *)kmap(kiov->kiov_page)) +
kiov->kiov_offset + kiovoffset;
@@ -504,23 +508,25 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
}
} while (nob > 0);
- if (addr != NULL)
+ if (addr)
kunmap(kiov->kiov_page);
}
EXPORT_SYMBOL(lnet_copy_iov2kiov);
int
lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len)
+ int src_niov, lnet_kiov_t *src,
+ unsigned int offset, unsigned int len)
{
- /* Initialise 'dst' to the subset of 'src' starting at 'offset',
+ /*
+ * Initialise 'dst' to the subset of 'src' starting at 'offset',
* for exactly 'len' bytes, and return the number of entries.
- * NB not destructive to 'src' */
+ * NB not destructive to 'src'
+ */
unsigned int frag_len;
unsigned int niov;
- if (len == 0) /* no data => */
+ if (!len) /* no data => */
return 0; /* no frags */
LASSERT(src_niov > 0);
@@ -543,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
if (len <= frag_len) {
dst->kiov_len = len;
LASSERT(dst->kiov_offset + dst->kiov_len
- <= PAGE_CACHE_SIZE);
+ <= PAGE_SIZE);
return niov;
}
dst->kiov_len = frag_len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+ LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
len -= frag_len;
dst++;
@@ -560,7 +566,7 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
}
EXPORT_SYMBOL(lnet_extract_kiov);
-static void
+void
lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
@@ -570,9 +576,9 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
int rc;
LASSERT(!in_interrupt());
- LASSERT(mlen == 0 || msg != NULL);
+ LASSERT(!mlen || msg);
- if (msg != NULL) {
+ if (msg) {
LASSERT(msg->msg_receiving);
LASSERT(!msg->msg_sending);
LASSERT(rlen == msg->msg_len);
@@ -582,18 +588,18 @@ lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
msg->msg_receiving = 0;
- if (mlen != 0) {
+ if (mlen) {
niov = msg->msg_niov;
iov = msg->msg_iov;
kiov = msg->msg_kiov;
LASSERT(niov > 0);
- LASSERT((iov == NULL) != (kiov == NULL));
+ LASSERT(!iov != !kiov);
}
}
- rc = (ni->ni_lnd->lnd_recv)(ni, private, msg, delayed,
- niov, iov, kiov, offset, mlen, rlen);
+ rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
+ niov, iov, kiov, offset, mlen, rlen);
if (rc < 0)
lnet_finalize(ni, msg, rc);
}
@@ -605,13 +611,13 @@ lnet_setpayloadbuffer(lnet_msg_t *msg)
LASSERT(msg->msg_len > 0);
LASSERT(!msg->msg_routing);
- LASSERT(md != NULL);
- LASSERT(msg->msg_niov == 0);
- LASSERT(msg->msg_iov == NULL);
- LASSERT(msg->msg_kiov == NULL);
+ LASSERT(md);
+ LASSERT(!msg->msg_niov);
+ LASSERT(!msg->msg_iov);
+ LASSERT(!msg->msg_kiov);
msg->msg_niov = md->md_niov;
- if ((md->md_options & LNET_MD_KIOV) != 0)
+ if (md->md_options & LNET_MD_KIOV)
msg->msg_kiov = md->md_iov.kiov;
else
msg->msg_iov = md->md_iov.iov;
@@ -626,7 +632,7 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
msg->msg_len = len;
msg->msg_offset = offset;
- if (len != 0)
+ if (len)
lnet_setpayloadbuffer(msg);
memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
@@ -646,9 +652,9 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
LASSERT(!in_interrupt());
LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
- (msg->msg_txcredit && msg->msg_peertxcredit));
+ (msg->msg_txcredit && msg->msg_peertxcredit));
- rc = (ni->ni_lnd->lnd_send)(ni, priv, msg);
+ rc = ni->ni_lnd->lnd_send(ni, priv, msg);
if (rc < 0)
lnet_finalize(ni, msg, rc);
}
@@ -661,12 +667,12 @@ lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
LASSERT(!msg->msg_sending);
LASSERT(msg->msg_receiving);
LASSERT(!msg->msg_rx_ready_delay);
- LASSERT(ni->ni_lnd->lnd_eager_recv != NULL);
+ LASSERT(ni->ni_lnd->lnd_eager_recv);
msg->msg_rx_ready_delay = 1;
- rc = (ni->ni_lnd->lnd_eager_recv)(ni, msg->msg_private, msg,
- &msg->msg_private);
- if (rc != 0) {
+ rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
+ &msg->msg_private);
+ if (rc) {
CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
libcfs_nid2str(msg->msg_rxpeer->lp_nid),
libcfs_id2str(msg->msg_target), rc);
@@ -683,15 +689,15 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
unsigned long last_alive = 0;
LASSERT(lnet_peer_aliveness_enabled(lp));
- LASSERT(ni->ni_lnd->lnd_query != NULL);
+ LASSERT(ni->ni_lnd->lnd_query);
lnet_net_unlock(lp->lp_cpt);
- (ni->ni_lnd->lnd_query)(ni, lp->lp_nid, &last_alive);
+ ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
lnet_net_lock(lp->lp_cpt);
lp->lp_last_query = cfs_time_current();
- if (last_alive != 0) /* NI has updated timestamp */
+ if (last_alive) /* NI has updated timestamp */
lp->lp_last_alive = last_alive;
}
@@ -720,14 +726,16 @@ lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
* case, and moreover lp_last_alive at peer creation is assumed.
*/
if (alive && !lp->lp_alive &&
- !(lnet_isrouter(lp) && lp->lp_alive_count == 0))
+ !(lnet_isrouter(lp) && !lp->lp_alive_count))
lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
return alive;
}
-/* NB: returns 1 when alive, 0 when dead, negative when error;
- * may drop the lnet_net_lock */
+/*
+ * NB: returns 1 when alive, 0 when dead, negative when error;
+ * may drop the lnet_net_lock
+ */
static int
lnet_peer_alive_locked(lnet_peer_t *lp)
{
@@ -739,9 +747,11 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
if (lnet_peer_is_alive(lp, now))
return 1;
- /* Peer appears dead, but we should avoid frequent NI queries (at
- * most once per lnet_queryinterval seconds). */
- if (lp->lp_last_query != 0) {
+ /*
+ * Peer appears dead, but we should avoid frequent NI queries (at
+ * most once per lnet_queryinterval seconds).
+ */
+ if (lp->lp_last_query) {
static const int lnet_queryinterval = 1;
unsigned long next_query =
@@ -775,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp)
* lnet_send() is going to lnet_net_unlock immediately after this, so
* it sets do_send FALSE and I don't do the unlock/send/lock bit.
*
- * \retval 0 If \a msg sent or OK to send.
- * \retval EAGAIN If \a msg blocked for credit.
- * \retval EHOSTUNREACH If the next hop of the message appears dead.
- * \retval ECANCELED If the MD of the message has been unlinked.
+ * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
+ * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
+ * \retval -EHOSTUNREACH If the next hop of the message appears dead.
+ * \retval -ECANCELED If the MD of the message has been unlinked.
*/
static int
lnet_post_send_locked(lnet_msg_t *msg, int do_send)
@@ -794,8 +804,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
LASSERT(msg->msg_tx_committed);
/* NB 'lp' is always the next hop */
- if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
- lnet_peer_alive_locked(lp) == 0) {
+ if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
+ !lnet_peer_alive_locked(lp)) {
the_lnet.ln_counters[cpt]->drop_count++;
the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
lnet_net_unlock(cpt);
@@ -806,11 +816,11 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
lnet_finalize(ni, msg, -EHOSTUNREACH);
lnet_net_lock(cpt);
- return EHOSTUNREACH;
+ return -EHOSTUNREACH;
}
- if (msg->msg_md != NULL &&
- (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED) != 0) {
+ if (msg->msg_md &&
+ (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
lnet_net_unlock(cpt);
CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
@@ -819,12 +829,12 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
lnet_finalize(ni, msg, -ECANCELED);
lnet_net_lock(cpt);
- return ECANCELED;
+ return -ECANCELED;
}
if (!msg->msg_peertxcredit) {
LASSERT((lp->lp_txcredits < 0) ==
- !list_empty(&lp->lp_txq));
+ !list_empty(&lp->lp_txq));
msg->msg_peertxcredit = 1;
lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
@@ -836,7 +846,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
if (lp->lp_txcredits < 0) {
msg->msg_tx_delayed = 1;
list_add_tail(&msg->msg_list, &lp->lp_txq);
- return EAGAIN;
+ return LNET_CREDIT_WAIT;
}
}
@@ -853,7 +863,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
if (tq->tq_credits < 0) {
msg->msg_tx_delayed = 1;
list_add_tail(&msg->msg_list, &tq->tq_delayed);
- return EAGAIN;
+ return LNET_CREDIT_WAIT;
}
}
@@ -862,7 +872,7 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send)
lnet_ni_send(ni, msg);
lnet_net_lock(cpt);
}
- return 0;
+ return LNET_CREDIT_OK;
}
static lnet_rtrbufpool_t *
@@ -877,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
- while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
+ while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}
@@ -888,16 +898,19 @@ lnet_msg2bufpool(lnet_msg_t *msg)
static int
lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
{
- /* lnet_parse is going to lnet_net_unlock immediately after this, so it
- * sets do_recv FALSE and I don't do the unlock/send/lock bit. I
- * return EAGAIN if msg blocked and 0 if received or OK to receive */
+ /*
+ * lnet_parse is going to lnet_net_unlock immediately after this, so it
+ * sets do_recv FALSE and I don't do the unlock/send/lock bit.
+ * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
+ * received or OK to receive
+ */
lnet_peer_t *lp = msg->msg_rxpeer;
lnet_rtrbufpool_t *rbp;
lnet_rtrbuf_t *rb;
- LASSERT(msg->msg_iov == NULL);
- LASSERT(msg->msg_kiov == NULL);
- LASSERT(msg->msg_niov == 0);
+ LASSERT(!msg->msg_iov);
+ LASSERT(!msg->msg_kiov);
+ LASSERT(!msg->msg_niov);
LASSERT(msg->msg_routing);
LASSERT(msg->msg_receiving);
LASSERT(!msg->msg_sending);
@@ -907,7 +920,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
if (!msg->msg_peerrtrcredit) {
LASSERT((lp->lp_rtrcredits < 0) ==
- !list_empty(&lp->lp_rtrq));
+ !list_empty(&lp->lp_rtrq));
msg->msg_peerrtrcredit = 1;
lp->lp_rtrcredits--;
@@ -919,16 +932,13 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
list_add_tail(&msg->msg_list, &lp->lp_rtrq);
- return EAGAIN;
+ return LNET_CREDIT_WAIT;
}
}
rbp = lnet_msg2bufpool(msg);
if (!msg->msg_rtrcredit) {
- LASSERT((rbp->rbp_credits < 0) ==
- !list_empty(&rbp->rbp_msgs));
-
msg->msg_rtrcredit = 1;
rbp->rbp_credits--;
if (rbp->rbp_credits < rbp->rbp_mincredits)
@@ -939,7 +949,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
- return EAGAIN;
+ return LNET_CREDIT_WAIT;
}
}
@@ -958,7 +968,7 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
0, msg->msg_len, msg->msg_len);
lnet_net_lock(cpt);
}
- return 0;
+ return LNET_CREDIT_OK;
}
void
@@ -980,7 +990,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
tq->tq_credits++;
if (tq->tq_credits <= 0) {
msg2 = list_entry(tq->tq_delayed.next,
- lnet_msg_t, msg_list);
+ lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer->lp_ni == ni);
@@ -1003,7 +1013,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
txpeer->lp_txcredits++;
if (txpeer->lp_txcredits <= 0) {
msg2 = list_entry(txpeer->lp_txq.next,
- lnet_msg_t, msg_list);
+ lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
LASSERT(msg2->msg_txpeer == txpeer);
@@ -1013,13 +1023,50 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
}
}
- if (txpeer != NULL) {
+ if (txpeer) {
msg->msg_txpeer = NULL;
lnet_peer_decref_locked(txpeer);
}
}
void
+lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
+{
+ lnet_msg_t *msg;
+
+ if (list_empty(&rbp->rbp_msgs))
+ return;
+ msg = list_entry(rbp->rbp_msgs.next,
+ lnet_msg_t, msg_list);
+ list_del(&msg->msg_list);
+
+ (void)lnet_post_routed_recv_locked(msg, 1);
+}
+
+void
+lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
+{
+ struct list_head drop;
+ lnet_msg_t *msg;
+ lnet_msg_t *tmp;
+
+ INIT_LIST_HEAD(&drop);
+
+ list_splice_init(list, &drop);
+
+ lnet_net_unlock(cpt);
+
+ list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
+ lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
+ 0, 0, 0, msg->msg_hdr.payload_length);
+ list_del_init(&msg->msg_list);
+ lnet_finalize(NULL, msg, -ECANCELED);
+ }
+
+ lnet_net_lock(cpt);
+}
+
+void
lnet_return_rx_credits_locked(lnet_msg_t *msg)
{
lnet_peer_t *rxpeer = msg->msg_rxpeer;
@@ -1030,34 +1077,51 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg)
lnet_rtrbuf_t *rb;
lnet_rtrbufpool_t *rbp;
- /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays
+ /*
+ * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
* there until it gets one allocated, or aborts the wait
- * itself */
- LASSERT(msg->msg_kiov != NULL);
+ * itself
+ */
+ LASSERT(msg->msg_kiov);
rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
rbp = rb->rb_pool;
- LASSERT(rbp == lnet_msg2bufpool(msg));
msg->msg_kiov = NULL;
msg->msg_rtrcredit = 0;
- LASSERT((rbp->rbp_credits < 0) ==
- !list_empty(&rbp->rbp_msgs));
+ LASSERT(rbp == lnet_msg2bufpool(msg));
+
LASSERT((rbp->rbp_credits > 0) ==
!list_empty(&rbp->rbp_bufs));
- list_add(&rb->rb_list, &rbp->rbp_bufs);
- rbp->rbp_credits++;
- if (rbp->rbp_credits <= 0) {
- msg2 = list_entry(rbp->rbp_msgs.next,
- lnet_msg_t, msg_list);
- list_del(&msg2->msg_list);
+ /*
+ * If routing is now turned off, we just drop this buffer and
+ * don't bother trying to return credits.
+ */
+ if (!the_lnet.ln_routing) {
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ goto routing_off;
+ }
- (void) lnet_post_routed_recv_locked(msg2, 1);
+ /*
+ * It is possible that a user has lowered the desired number of
+ * buffers in this pool. Make sure we never put back
+ * more buffers than the stated number.
+ */
+ if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
+ /* Discard this buffer so we don't have too many. */
+ lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
+ rbp->rbp_nbuffers--;
+ } else {
+ list_add(&rb->rb_list, &rbp->rbp_bufs);
+ rbp->rbp_credits++;
+ if (rbp->rbp_credits <= 0)
+ lnet_schedule_blocked_locked(rbp);
}
}
+routing_off:
if (msg->msg_peerrtrcredit) {
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
@@ -1066,15 +1130,22 @@ lnet_return_rx_credits_locked(lnet_msg_t *msg)
!list_empty(&rxpeer->lp_rtrq));
rxpeer->lp_rtrcredits++;
- if (rxpeer->lp_rtrcredits <= 0) {
+ /*
+ * drop all messages which are queued to be routed on that
+ * peer.
+ */
+ if (!the_lnet.ln_routing) {
+ lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
+ msg->msg_rx_cpt);
+ } else if (rxpeer->lp_rtrcredits <= 0) {
msg2 = list_entry(rxpeer->lp_rtrq.next,
- lnet_msg_t, msg_list);
+ lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
(void) lnet_post_routed_recv_locked(msg2, 1);
}
}
- if (rxpeer != NULL) {
+ if (rxpeer) {
msg->msg_rxpeer = NULL;
lnet_peer_decref_locked(rxpeer);
}
@@ -1085,94 +1156,99 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
{
lnet_peer_t *p1 = r1->lr_gateway;
lnet_peer_t *p2 = r2->lr_gateway;
+ int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
+ int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
if (r1->lr_priority < r2->lr_priority)
return 1;
if (r1->lr_priority > r2->lr_priority)
- return -1;
+ return -ERANGE;
- if (r1->lr_hops < r2->lr_hops)
+ if (r1_hops < r2_hops)
return 1;
- if (r1->lr_hops > r2->lr_hops)
- return -1;
+ if (r1_hops > r2_hops)
+ return -ERANGE;
if (p1->lp_txqnob < p2->lp_txqnob)
return 1;
if (p1->lp_txqnob > p2->lp_txqnob)
- return -1;
+ return -ERANGE;
if (p1->lp_txcredits > p2->lp_txcredits)
return 1;
if (p1->lp_txcredits < p2->lp_txcredits)
- return -1;
+ return -ERANGE;
if (r1->lr_seq - r2->lr_seq <= 0)
return 1;
- return -1;
+ return -ERANGE;
}
static lnet_peer_t *
lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
{
lnet_remotenet_t *rnet;
- lnet_route_t *rtr;
- lnet_route_t *rtr_best;
- lnet_route_t *rtr_last;
+ lnet_route_t *route;
+ lnet_route_t *best_route;
+ lnet_route_t *last_route;
struct lnet_peer *lp_best;
struct lnet_peer *lp;
int rc;
- /* If @rtr_nid is not LNET_NID_ANY, return the gateway with
- * rtr_nid nid, otherwise find the best gateway I can use */
-
+ /*
+ * If @rtr_nid is not LNET_NID_ANY, return the gateway with
+ * rtr_nid nid, otherwise find the best gateway I can use
+ */
rnet = lnet_find_net_locked(LNET_NIDNET(target));
- if (rnet == NULL)
+ if (!rnet)
return NULL;
lp_best = NULL;
- rtr_best = rtr_last = NULL;
- list_for_each_entry(rtr, &rnet->lrn_routes, lr_list) {
- lp = rtr->lr_gateway;
+ best_route = NULL;
+ last_route = NULL;
+ list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
+ lp = route->lr_gateway;
- if (!lp->lp_alive || /* gateway is down */
- ((lp->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0 &&
- rtr->lr_downis != 0)) /* NI to target is down */
+ if (!lnet_is_route_alive(route))
continue;
- if (ni != NULL && lp->lp_ni != ni)
+ if (ni && lp->lp_ni != ni)
continue;
if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
return lp;
- if (lp_best == NULL) {
- rtr_best = rtr_last = rtr;
+ if (!lp_best) {
+ best_route = route;
+ last_route = route;
lp_best = lp;
continue;
}
/* no protection on below fields, but it's harmless */
- if (rtr_last->lr_seq - rtr->lr_seq < 0)
- rtr_last = rtr;
+ if (last_route->lr_seq - route->lr_seq < 0)
+ last_route = route;
- rc = lnet_compare_routes(rtr, rtr_best);
+ rc = lnet_compare_routes(route, best_route);
if (rc < 0)
continue;
- rtr_best = rtr;
+ best_route = route;
lp_best = lp;
}
- /* set sequence number on the best router to the latest sequence + 1
+ /*
+ * set sequence number on the best router to the latest sequence + 1
* so we can round-robin all routers, it's race and inaccurate but
- * harmless and functional */
- if (rtr_best != NULL)
- rtr_best->lr_seq = rtr_last->lr_seq + 1;
+ * harmless and functional
+ */
+ if (best_route)
+ best_route->lr_seq = last_route->lr_seq + 1;
return lp_best;
}
@@ -1187,11 +1263,13 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
int cpt2;
int rc;
- /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
+ /*
+ * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
* but we might want to use pre-determined router for ACK/REPLY
- * in the future */
- /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
- LASSERT(msg->msg_txpeer == NULL);
+ * in the future
+ */
+ /* NB: ni == interface pre-determined (ACK/REPLY) */
+ LASSERT(!msg->msg_txpeer);
LASSERT(!msg->msg_sending);
LASSERT(!msg->msg_target_is_router);
LASSERT(!msg->msg_receiving);
@@ -1212,7 +1290,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
src_ni = NULL;
} else {
src_ni = lnet_nid2ni_locked(src_nid, cpt);
- if (src_ni == NULL) {
+ if (!src_ni) {
lnet_net_unlock(cpt);
LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
libcfs_nid2str(dst_nid),
@@ -1225,8 +1303,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
/* Is this for someone on a local network? */
local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
- if (local_ni != NULL) {
- if (src_ni == NULL) {
+ if (local_ni) {
+ if (!src_ni) {
src_ni = local_ni;
src_nid = src_ni->ni_nid;
} else if (src_ni == local_ni) {
@@ -1261,7 +1339,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
/* lp has ref on src_ni; lose mine */
lnet_ni_decref_locked(src_ni, cpt);
- if (rc != 0) {
+ if (rc) {
lnet_net_unlock(cpt);
LCONSOLE_WARN("Error %d finding peer %s\n", rc,
libcfs_nid2str(dst_nid));
@@ -1272,8 +1350,8 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
} else {
/* sending to a remote network */
lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
- if (lp == NULL) {
- if (src_ni != NULL)
+ if (!lp) {
+ if (src_ni)
lnet_ni_decref_locked(src_ni, cpt);
lnet_net_unlock(cpt);
@@ -1283,14 +1361,16 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
return -EHOSTUNREACH;
}
- /* rtr_nid is LNET_NID_ANY or NID of pre-determined router,
+ /*
+ * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
* it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
* pre-determined router, this can happen if router table
- * was changed when we release the lock */
+ * was changed when we release the lock
+ */
if (rtr_nid != lp->lp_nid) {
cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
if (cpt2 != cpt) {
- if (src_ni != NULL)
+ if (src_ni)
lnet_ni_decref_locked(src_ni, cpt);
lnet_net_unlock(cpt);
@@ -1304,7 +1384,7 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
lnet_msgtyp2str(msg->msg_type), msg->msg_len);
- if (src_ni == NULL) {
+ if (!src_ni) {
src_ni = lp->lp_ni;
src_nid = src_ni->ni_nid;
} else {
@@ -1324,30 +1404,30 @@ lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
msg->msg_target_is_router = 1;
msg->msg_target.nid = lp->lp_nid;
- msg->msg_target.pid = LUSTRE_SRV_LNET_PID;
+ msg->msg_target.pid = LNET_PID_LUSTRE;
}
/* 'lp' is our best choice of peer */
LASSERT(!msg->msg_peertxcredit);
LASSERT(!msg->msg_txcredit);
- LASSERT(msg->msg_txpeer == NULL);
+ LASSERT(!msg->msg_txpeer);
msg->msg_txpeer = lp; /* msg takes my ref on lp */
rc = lnet_post_send_locked(msg, 0);
lnet_net_unlock(cpt);
- if (rc == EHOSTUNREACH || rc == ECANCELED)
- return -rc;
+ if (rc < 0)
+ return rc;
- if (rc == 0)
+ if (rc == LNET_CREDIT_OK)
lnet_ni_send(src_ni, msg);
- return 0; /* rc == 0 or EAGAIN */
+ return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
}
-static void
+void
lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
{
lnet_net_lock(cpt);
@@ -1363,15 +1443,17 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
{
lnet_hdr_t *hdr = &msg->msg_hdr;
- if (msg->msg_wanted != 0)
+ if (msg->msg_wanted)
lnet_setpayloadbuffer(msg);
lnet_build_msg_event(msg, LNET_EVENT_PUT);
- /* Must I ACK? If so I'll grab the ack_wmd out of the header and put
- * it back into the ACK during lnet_finalize() */
- msg->msg_ack = (!lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
- (msg->msg_md->md_options & LNET_MD_ACK_DISABLE) == 0);
+ /*
+ * Must I ACK? If so I'll grab the ack_wmd out of the header and put
+ * it back into the ACK during lnet_finalize()
+ */
+ msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
+ !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
msg->msg_offset, msg->msg_wanted, hdr->payload_length);
@@ -1382,6 +1464,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
{
lnet_hdr_t *hdr = &msg->msg_hdr;
struct lnet_match_info info;
+ bool ready_delay;
int rc;
/* Convert put fields to host byte order */
@@ -1397,7 +1480,8 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
info.mi_roffset = hdr->msg.put.offset;
info.mi_mbits = hdr->msg.put.match_bits;
- msg->msg_rx_ready_delay = ni->ni_lnd->lnd_eager_recv == NULL;
+ msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
+ ready_delay = msg->msg_rx_ready_delay;
again:
rc = lnet_ptl_match_md(&info, msg);
@@ -1410,12 +1494,18 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
return 0;
case LNET_MATCHMD_NONE:
- if (msg->msg_rx_delayed) /* attached on delayed list */
+ /**
+ * no eager_recv or has already called it, should
+ * have been attached on delayed list
+ */
+ if (ready_delay)
return 0;
rc = lnet_ni_eager_recv(ni, msg);
- if (rc == 0)
+ if (!rc) {
+ ready_delay = true;
goto again;
+ }
/* fall through */
case LNET_MATCHMD_DROP:
@@ -1423,7 +1513,7 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
libcfs_id2str(info.mi_id), info.mi_portal,
info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
- return ENOENT; /* +ve: OK but no match */
+ return -ENOENT; /* -ve: OK but no match */
}
}
@@ -1454,7 +1544,7 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
libcfs_id2str(info.mi_id), info.mi_portal,
info.mi_mbits, info.mi_roffset, info.mi_rlength);
- return ENOENT; /* +ve: OK but no match */
+ return -ENOENT; /* -ve: OK but no match */
}
LASSERT(rc == LNET_MATCHMD_OK);
@@ -1510,33 +1600,33 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
/* NB handles only looked up by creator (no flips) */
md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ if (!md || !md->md_threshold || md->md_me) {
CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- (md == NULL) ? "invalid" : "inactive",
+ !md ? "invalid" : "inactive",
hdr->msg.reply.dst_wmd.wh_interface_cookie,
hdr->msg.reply.dst_wmd.wh_object_cookie);
- if (md != NULL && md->md_me != NULL)
+ if (md && md->md_me)
CERROR("REPLY MD also attached to portal %d\n",
md->md_me->me_portal);
lnet_res_unlock(cpt);
- return ENOENT; /* +ve: OK but no match */
+ return -ENOENT; /* -ve: OK but no match */
}
- LASSERT(md->md_offset == 0);
+ LASSERT(!md->md_offset);
rlength = hdr->payload_length;
mlength = min_t(uint, rlength, md->md_length);
if (mlength < rlength &&
- (md->md_options & LNET_MD_TRUNCATE) == 0) {
+ !(md->md_options & LNET_MD_TRUNCATE)) {
CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
mlength);
lnet_res_unlock(cpt);
- return ENOENT; /* +ve: OK but no match */
+ return -ENOENT; /* -ve: OK but no match */
}
CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
@@ -1545,7 +1635,7 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
lnet_msg_attach_md(msg, md, 0, mlength);
- if (mlength != 0)
+ if (mlength)
lnet_setpayloadbuffer(msg);
lnet_res_unlock(cpt);
@@ -1576,20 +1666,20 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
/* NB handles only looked up by creator (no flips) */
md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ if (!md || !md->md_threshold || md->md_me) {
/* Don't moan; this is expected */
CDEBUG(D_NET,
"%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
- (md == NULL) ? "invalid" : "inactive",
+ !md ? "invalid" : "inactive",
hdr->msg.ack.dst_wmd.wh_interface_cookie,
hdr->msg.ack.dst_wmd.wh_object_cookie);
- if (md != NULL && md->md_me != NULL)
+ if (md && md->md_me)
CERROR("Source MD also attached to portal %d\n",
md->md_me->me_portal);
lnet_res_unlock(cpt);
- return ENOENT; /* +ve! */
+ return -ENOENT; /* -ve! */
}
CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
@@ -1606,14 +1696,22 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
return 0;
}
-static int
+/**
+ * \retval LNET_CREDIT_OK If \a msg is forwarded
+ * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
+ * \retval -ve error code
+ */
+int
lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
{
int rc = 0;
+ if (!the_lnet.ln_routing)
+ return -ECANCELED;
+
if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
lnet_msg2bufpool(msg)->rbp_credits <= 0) {
- if (ni->ni_lnd->lnd_eager_recv == NULL) {
+ if (!ni->ni_lnd->lnd_eager_recv) {
msg->msg_rx_ready_delay = 1;
} else {
lnet_net_unlock(msg->msg_rx_cpt);
@@ -1622,11 +1720,38 @@ lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
}
}
- if (rc == 0)
+ if (!rc)
rc = lnet_post_routed_recv_locked(msg, 0);
return rc;
}
+int
+lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
+{
+ int rc;
+
+ switch (msg->msg_type) {
+ case LNET_MSG_ACK:
+ rc = lnet_parse_ack(ni, msg);
+ break;
+ case LNET_MSG_PUT:
+ rc = lnet_parse_put(ni, msg);
+ break;
+ case LNET_MSG_GET:
+ rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
+ break;
+ case LNET_MSG_REPLY:
+ rc = lnet_parse_reply(ni, msg);
+ break;
+ default: /* prevent an unused label if !kernel */
+ LASSERT(0);
+ return -EPROTO;
+ }
+
+ LASSERT(!rc || rc == -ENOENT);
+ return rc;
+}
+
char *
lnet_msgtyp2str(int type)
{
@@ -1702,7 +1827,6 @@ lnet_print_hdr(lnet_hdr_t *hdr)
hdr->msg.reply.dst_wmd.wh_object_cookie,
hdr->payload_length);
}
-
}
int
@@ -1765,20 +1889,20 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
if (the_lnet.ln_routing &&
ni->ni_last_alive != ktime_get_real_seconds()) {
- lnet_ni_lock(ni);
-
/* NB: so far here is the only place to set NI status to "up */
+ lnet_ni_lock(ni);
ni->ni_last_alive = ktime_get_real_seconds();
- if (ni->ni_status != NULL &&
+ if (ni->ni_status &&
ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
ni->ni_status->ns_status = LNET_NI_STATUS_UP;
lnet_ni_unlock(ni);
}
- /* Regard a bad destination NID as a protocol error. Senders should
+ /*
+ * Regard a bad destination NID as a protocol error. Senders should
* know what they're doing; if they don't they're misconfigured, buggy
- * or malicious so we chop them off at the knees :) */
-
+ * or malicious so we chop them off at the knees :)
+ */
if (!for_me) {
if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
/* should have gone direct */
@@ -1790,8 +1914,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
if (lnet_islocalnid(dest_nid)) {
- /* dest is another local NI; sender should have used
- * this node's NID on its own network */
+ /*
+ * dest is another local NI; sender should have used
+ * this node's NID on its own network
+ */
CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
libcfs_nid2str(from_nid),
libcfs_nid2str(src_nid),
@@ -1816,9 +1942,10 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
}
}
- /* Message looks OK; we're not going to return an error, so we MUST
- * call back lnd_recv() come what may... */
-
+ /*
+ * Message looks OK; we're not going to return an error, so we MUST
+ * call back lnd_recv() come what may...
+ */
if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
fail_peer(src_nid, 0)) { /* shall we now? */
CERROR("%s, src %s: Dropping %s to simulate failure\n",
@@ -1827,8 +1954,16 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
goto drop;
}
+ if (!list_empty(&the_lnet.ln_drop_rules) &&
+ lnet_drop_rule_match(hdr)) {
+ CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
+ libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
+ libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
+ goto drop;
+ }
+
msg = lnet_msg_alloc();
- if (msg == NULL) {
+ if (!msg) {
CERROR("%s, src %s: Dropping %s (out of memory)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
lnet_msgtyp2str(type));
@@ -1838,11 +1973,12 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
/* msg zeroed in lnet_msg_alloc;
* i.e. flags all clear, pointers NULL etc
*/
-
msg->msg_type = type;
msg->msg_private = private;
msg->msg_receiving = 1;
- msg->msg_len = msg->msg_wanted = payload_length;
+ msg->msg_rdma_get = rdma_req;
+ msg->msg_wanted = payload_length;
+ msg->msg_len = payload_length;
msg->msg_offset = 0;
msg->msg_hdr = *hdr;
/* for building message event */
@@ -1864,7 +2000,7 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
lnet_net_lock(cpt);
rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
- if (rc != 0) {
+ if (rc) {
lnet_net_unlock(cpt);
CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
@@ -1888,13 +2024,21 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
lnet_msg_commit(msg, cpt);
+ /* message delay simulation */
+ if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
+ lnet_delay_rule_match_locked(hdr, msg))) {
+ lnet_net_unlock(cpt);
+ return 0;
+ }
+
if (!for_me) {
rc = lnet_parse_forward_locked(ni, msg);
lnet_net_unlock(cpt);
if (rc < 0)
goto free_drop;
- if (rc == 0) {
+
+ if (rc == LNET_CREDIT_OK) {
lnet_ni_recv(ni, msg->msg_private, msg, 0,
0, payload_length, payload_length);
}
@@ -1903,32 +2047,13 @@ lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
lnet_net_unlock(cpt);
- switch (type) {
- case LNET_MSG_ACK:
- rc = lnet_parse_ack(ni, msg);
- break;
- case LNET_MSG_PUT:
- rc = lnet_parse_put(ni, msg);
- break;
- case LNET_MSG_GET:
- rc = lnet_parse_get(ni, msg, rdma_req);
- break;
- case LNET_MSG_REPLY:
- rc = lnet_parse_reply(ni, msg);
- break;
- default:
- LASSERT(0);
- rc = -EPROTO;
- goto free_drop; /* prevent an unused label if !kernel */
- }
-
- if (rc == 0)
- return 0;
-
- LASSERT(rc == ENOENT);
+ rc = lnet_parse_local(ni, msg);
+ if (rc)
+ goto free_drop;
+ return 0;
free_drop:
- LASSERT(msg->msg_md == NULL);
+ LASSERT(!msg->msg_md);
lnet_finalize(ni, msg, rc);
drop:
@@ -1950,9 +2075,9 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
id.nid = msg->msg_hdr.src_nid;
id.pid = msg->msg_hdr.src_pid;
- LASSERT(msg->msg_md == NULL);
+ LASSERT(!msg->msg_md);
LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_rxpeer != NULL);
+ LASSERT(msg->msg_rxpeer);
LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
@@ -1962,10 +2087,11 @@ lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
msg->msg_hdr.msg.put.offset,
msg->msg_hdr.payload_length, reason);
- /* NB I can't drop msg's ref on msg_rxpeer until after I've
+ /*
+ * NB I can't drop msg's ref on msg_rxpeer until after I've
* called lnet_drop_message(), so I just hang onto msg as well
- * until that's done */
-
+ * until that's done
+ */
lnet_drop_message(msg->msg_rxpeer->lp_ni,
msg->msg_rxpeer->lp_cpt,
msg->msg_private, msg->msg_len);
@@ -1988,15 +2114,16 @@ lnet_recv_delayed_msg_list(struct list_head *head)
msg = list_entry(head->next, lnet_msg_t, msg_list);
list_del(&msg->msg_list);
- /* md won't disappear under me, since each msg
- * holds a ref on it */
-
+ /*
+ * md won't disappear under me, since each msg
+ * holds a ref on it
+ */
id.nid = msg->msg_hdr.src_nid;
id.pid = msg->msg_hdr.src_pid;
LASSERT(msg->msg_rx_delayed);
- LASSERT(msg->msg_md != NULL);
- LASSERT(msg->msg_rxpeer != NULL);
+ LASSERT(msg->msg_md);
+ LASSERT(msg->msg_rxpeer);
LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
@@ -2064,7 +2191,6 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
int cpt;
int rc;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
@@ -2075,7 +2201,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
}
msg = lnet_msg_alloc();
- if (msg == NULL) {
+ if (!msg) {
CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
libcfs_id2str(target));
return -ENOMEM;
@@ -2086,11 +2212,11 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
lnet_res_lock(cpt);
md = lnet_handle2md(&mdh);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ if (!md || !md->md_threshold || md->md_me) {
CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
match_bits, portal, libcfs_id2str(target),
- md == NULL ? -1 : md->md_threshold);
- if (md != NULL && md->md_me != NULL)
+ !md ? -1 : md->md_threshold);
+ if (md && md->md_me)
CERROR("Source MD also attached to portal %d\n",
md->md_me->me_portal);
lnet_res_unlock(cpt);
@@ -2128,9 +2254,9 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
lnet_build_msg_event(msg, LNET_EVENT_SEND);
rc = lnet_send(self, msg, LNET_NID_ANY);
- if (rc != 0) {
+ if (rc) {
CNETERR("Error sending PUT to %s: %d\n",
- libcfs_id2str(target), rc);
+ libcfs_id2str(target), rc);
lnet_finalize(NULL, msg, rc);
}
@@ -2142,13 +2268,14 @@ EXPORT_SYMBOL(LNetPut);
lnet_msg_t *
lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
{
- /* The LND can DMA direct to the GET md (i.e. no REPLY msg). This
+ /*
+ * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
* returns a msg for the LND to pass to lnet_finalize() when the sink
* data has been received.
*
* CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
- * lnet_finalize() is called on it, so the LND must call this first */
-
+ * lnet_finalize() is called on it, so the LND must call this first
+ */
struct lnet_msg *msg = lnet_msg_alloc();
struct lnet_libmd *getmd = getmsg->msg_md;
lnet_process_id_t peer_id = getmsg->msg_target;
@@ -2157,26 +2284,26 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
LASSERT(!getmsg->msg_target_is_router);
LASSERT(!getmsg->msg_routing);
+ if (!msg) {
+ CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
+ goto drop;
+ }
+
cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
lnet_res_lock(cpt);
LASSERT(getmd->md_refcount > 0);
- if (msg == NULL) {
- CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
- goto drop;
- }
-
- if (getmd->md_threshold == 0) {
+ if (!getmd->md_threshold) {
CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
- libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
- getmd);
+ libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
+ getmd);
lnet_res_unlock(cpt);
goto drop;
}
- LASSERT(getmd->md_offset == 0);
+ LASSERT(!getmd->md_offset);
CDEBUG(D_NET, "%s: Reply from %s md %p\n",
libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
@@ -2209,7 +2336,7 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
lnet_net_unlock(cpt);
- if (msg != NULL)
+ if (msg)
lnet_msg_free(msg);
return NULL;
@@ -2219,14 +2346,18 @@ EXPORT_SYMBOL(lnet_create_reply_msg);
void
lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
{
- /* Set the REPLY length, now the RDMA that elides the REPLY message has
- * completed and I know it. */
- LASSERT(reply != NULL);
+ /*
+ * Set the REPLY length, now the RDMA that elides the REPLY message has
+ * completed and I know it.
+ */
+ LASSERT(reply);
LASSERT(reply->msg_type == LNET_MSG_GET);
LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
- /* NB I trusted my peer to RDMA. If she tells me she's written beyond
- * the end of my buffer, I might as well be dead. */
+ /*
+ * NB I trusted my peer to RDMA. If she tells me she's written beyond
+ * the end of my buffer, I might as well be dead.
+ */
LASSERT(len <= reply->msg_ev.mlength);
reply->msg_ev.mlength = len;
@@ -2264,7 +2395,6 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
int cpt;
int rc;
- LASSERT(the_lnet.ln_init);
LASSERT(the_lnet.ln_refcount > 0);
if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
@@ -2275,7 +2405,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
}
msg = lnet_msg_alloc();
- if (msg == NULL) {
+ if (!msg) {
CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
libcfs_id2str(target));
return -ENOMEM;
@@ -2285,11 +2415,11 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
lnet_res_lock(cpt);
md = lnet_handle2md(&mdh);
- if (md == NULL || md->md_threshold == 0 || md->md_me != NULL) {
+ if (!md || !md->md_threshold || md->md_me) {
CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
match_bits, portal, libcfs_id2str(target),
- md == NULL ? -1 : md->md_threshold);
- if (md != NULL && md->md_me != NULL)
+ !md ? -1 : md->md_threshold);
+ if (md && md->md_me)
CERROR("REPLY MD also attached to portal %d\n",
md->md_me->me_portal);
@@ -2323,7 +2453,7 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
rc = lnet_send(self, msg, LNET_NID_ANY);
if (rc < 0) {
CNETERR("Error sending GET to %s: %d\n",
- libcfs_id2str(target), rc);
+ libcfs_id2str(target), rc);
lnet_finalize(NULL, msg, rc);
}
@@ -2358,12 +2488,12 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
__u32 order = 2;
struct list_head *rn_list;
- /* if !local_nid_dist_zero, I don't return a distance of 0 ever
+ /*
+ * if !local_nid_dist_zero, I don't return a distance of 0 ever
* (when lustre sees a distance of 0, it substitutes 0@lo), so I
* keep order 0 free for 0@lo and order 1 free for a local NID
- * match */
-
- LASSERT(the_lnet.ln_init);
+ * match
+ */
LASSERT(the_lnet.ln_refcount > 0);
cpt = lnet_net_lock_current();
@@ -2372,9 +2502,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
ni = list_entry(e, lnet_ni_t, ni_list);
if (ni->ni_nid == dstnid) {
- if (srcnidp != NULL)
+ if (srcnidp)
*srcnidp = dstnid;
- if (orderp != NULL) {
+ if (orderp) {
if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
*orderp = 0;
else
@@ -2386,9 +2516,9 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
}
if (LNET_NIDNET(ni->ni_nid) == dstnet) {
- if (srcnidp != NULL)
+ if (srcnidp)
*srcnidp = ni->ni_nid;
- if (orderp != NULL)
+ if (orderp)
*orderp = order;
lnet_net_unlock(cpt);
return 1;
@@ -2404,21 +2534,28 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
if (rnet->lrn_net == dstnet) {
lnet_route_t *route;
lnet_route_t *shortest = NULL;
+ __u32 shortest_hops = LNET_UNDEFINED_HOPS;
+ __u32 route_hops;
LASSERT(!list_empty(&rnet->lrn_routes));
list_for_each_entry(route, &rnet->lrn_routes,
- lr_list) {
- if (shortest == NULL ||
- route->lr_hops < shortest->lr_hops)
+ lr_list) {
+ route_hops = route->lr_hops;
+ if (route_hops == LNET_UNDEFINED_HOPS)
+ route_hops = 1;
+ if (!shortest ||
+ route_hops < shortest_hops) {
shortest = route;
+ shortest_hops = route_hops;
+ }
}
- LASSERT(shortest != NULL);
- hops = shortest->lr_hops;
- if (srcnidp != NULL)
+ LASSERT(shortest);
+ hops = shortest_hops;
+ if (srcnidp)
*srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
- if (orderp != NULL)
+ if (orderp)
*orderp = order;
lnet_net_unlock(cpt);
return hops + 1;
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 43977e8dffbb..f879d7f28708 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -74,7 +74,6 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type)
ev->initiator.nid = LNET_NID_ANY;
ev->initiator.pid = the_lnet.ln_pid;
ev->sender = LNET_NID_ANY;
-
} else {
/* event for passive message */
ev->target.pid = hdr->dest_pid;
@@ -173,7 +172,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
lnet_event_t *ev = &msg->msg_ev;
LASSERT(msg->msg_tx_committed);
- if (status != 0)
+ if (status)
goto out;
counters = the_lnet.ln_counters[msg->msg_tx_cpt];
@@ -181,7 +180,7 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
default: /* routed message */
LASSERT(msg->msg_routing);
LASSERT(msg->msg_rx_committed);
- LASSERT(ev->type == 0);
+ LASSERT(!ev->type);
counters->route_length += msg->msg_len;
counters->route_count++;
@@ -203,8 +202,10 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status)
case LNET_EVENT_GET:
LASSERT(msg->msg_rx_committed);
- /* overwritten while sending reply, we should never be
- * here for optimized GET */
+ /*
+ * overwritten while sending reply, we should never be
+ * here for optimized GET
+ */
LASSERT(msg->msg_type == LNET_MSG_REPLY);
msg->msg_type = LNET_MSG_GET; /* fix type */
break;
@@ -225,13 +226,13 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
LASSERT(msg->msg_rx_committed);
- if (status != 0)
+ if (status)
goto out;
counters = the_lnet.ln_counters[msg->msg_rx_cpt];
switch (ev->type) {
default:
- LASSERT(ev->type == 0);
+ LASSERT(!ev->type);
LASSERT(msg->msg_routing);
goto out;
@@ -240,10 +241,12 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
break;
case LNET_EVENT_GET:
- /* type is "REPLY" if it's an optimized GET on passive side,
+ /*
+ * type is "REPLY" if it's an optimized GET on passive side,
* because optimized GET will never be committed for sending,
* so message type wouldn't be changed back to "GET" by
- * lnet_msg_decommit_tx(), see details in lnet_parse_get() */
+ * lnet_msg_decommit_tx(), see details in lnet_parse_get()
+ */
LASSERT(msg->msg_type == LNET_MSG_REPLY ||
msg->msg_type == LNET_MSG_GET);
counters->send_length += msg->msg_wanted;
@@ -254,8 +257,10 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status)
break;
case LNET_EVENT_REPLY:
- /* type is "GET" if it's an optimized GET on active side,
- * see details in lnet_create_reply_msg() */
+ /*
+ * type is "GET" if it's an optimized GET on active side,
+ * see details in lnet_create_reply_msg()
+ */
LASSERT(msg->msg_type == LNET_MSG_GET ||
msg->msg_type == LNET_MSG_REPLY);
break;
@@ -309,10 +314,12 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
unsigned int offset, unsigned int mlen)
{
/* NB: @offset and @len are only useful for receiving */
- /* Here, we attach the MD on lnet_msg and mark it busy and
+ /*
+ * Here, we attach the MD on lnet_msg and mark it busy and
* decrementing its threshold. Come what may, the lnet_msg "owns"
* the MD until a call to lnet_msg_detach_md or lnet_finalize()
- * signals completion. */
+ * signals completion.
+ */
LASSERT(!msg->msg_routing);
msg->msg_md = md;
@@ -343,7 +350,7 @@ lnet_msg_detach_md(lnet_msg_t *msg, int status)
LASSERT(md->md_refcount >= 0);
unlink = lnet_md_unlinkable(md);
- if (md->md_eq != NULL) {
+ if (md->md_eq) {
msg->msg_ev.status = status;
msg->msg_ev.unlinked = unlink;
lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
@@ -364,7 +371,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
LASSERT(msg->msg_onactivelist);
- if (status == 0 && msg->msg_ack) {
+ if (!status && msg->msg_ack) {
/* Only send an ACK if the PUT completed successfully */
lnet_msg_decommit(msg, cpt, 0);
@@ -383,8 +390,10 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
- /* NB: we probably want to use NID of msg::msg_from as 3rd
- * parameter (router NID) if it's routed message */
+ /*
+ * NB: we probably want to use NID of msg::msg_from as 3rd
+ * parameter (router NID) if it's routed message
+ */
rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
lnet_net_lock(cpt);
@@ -401,7 +410,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
*/
return rc;
- } else if (status == 0 && /* OK so far */
+ } else if (!status && /* OK so far */
(msg->msg_routing && !msg->msg_sending)) {
/* not forwarded */
LASSERT(!msg->msg_receiving); /* called back recv already */
@@ -442,7 +451,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
LASSERT(!in_interrupt());
- if (msg == NULL)
+ if (!msg)
return;
#if 0
CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
@@ -458,12 +467,12 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
msg->msg_rtrcredit ? "F" : "",
msg->msg_peerrtrcredit ? "f" : "",
msg->msg_onactivelist ? "!" : "",
- msg->msg_txpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
- msg->msg_rxpeer == NULL ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
+ !msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
+ !msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
#endif
msg->msg_ev.status = status;
- if (msg->msg_md != NULL) {
+ if (msg->msg_md) {
cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
lnet_res_lock(cpt);
@@ -491,15 +500,16 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
container = the_lnet.ln_msg_containers[cpt];
list_add_tail(&msg->msg_list, &container->msc_finalizing);
- /* Recursion breaker. Don't complete the message here if I am (or
- * enough other threads are) already completing messages */
-
+ /*
+ * Recursion breaker. Don't complete the message here if I am (or
+ * enough other threads are) already completing messages
+ */
my_slot = -1;
for (i = 0; i < container->msc_nfinalizers; i++) {
if (container->msc_finalizers[i] == current)
break;
- if (my_slot < 0 && container->msc_finalizers[i] == NULL)
+ if (my_slot < 0 && !container->msc_finalizers[i])
my_slot = i;
}
@@ -512,21 +522,29 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
while (!list_empty(&container->msc_finalizing)) {
msg = list_entry(container->msc_finalizing.next,
- lnet_msg_t, msg_list);
+ lnet_msg_t, msg_list);
list_del(&msg->msg_list);
- /* NB drops and regains the lnet lock if it actually does
- * anything, so my finalizing friends can chomp along too */
+ /*
+ * NB drops and regains the lnet lock if it actually does
+ * anything, so my finalizing friends can chomp along too
+ */
rc = lnet_complete_msg_locked(msg, cpt);
- if (rc != 0)
+ if (rc)
break;
}
+ if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
+ lnet_net_unlock(cpt);
+ lnet_delay_rule_check();
+ lnet_net_lock(cpt);
+ }
+
container->msc_finalizers[my_slot] = NULL;
lnet_net_unlock(cpt);
- if (rc != 0)
+ if (rc)
goto again;
}
EXPORT_SYMBOL(lnet_finalize);
@@ -536,12 +554,12 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
{
int count = 0;
- if (container->msc_init == 0)
+ if (!container->msc_init)
return;
while (!list_empty(&container->msc_active)) {
lnet_msg_t *msg = list_entry(container->msc_active.next,
- lnet_msg_t, msg_activelist);
+ lnet_msg_t, msg_activelist);
LASSERT(msg->msg_onactivelist);
msg->msg_onactivelist = 0;
@@ -553,41 +571,23 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container)
if (count > 0)
CERROR("%d active msg on exit\n", count);
- if (container->msc_finalizers != NULL) {
+ if (container->msc_finalizers) {
LIBCFS_FREE(container->msc_finalizers,
container->msc_nfinalizers *
sizeof(*container->msc_finalizers));
container->msc_finalizers = NULL;
}
-#ifdef LNET_USE_LIB_FREELIST
- lnet_freelist_fini(&container->msc_freelist);
-#endif
container->msc_init = 0;
}
int
lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
{
- int rc;
-
container->msc_init = 1;
INIT_LIST_HEAD(&container->msc_active);
INIT_LIST_HEAD(&container->msc_finalizing);
-#ifdef LNET_USE_LIB_FREELIST
- memset(&container->msc_freelist, 0, sizeof(lnet_freelist_t));
-
- rc = lnet_freelist_init(&container->msc_freelist,
- LNET_FL_MAX_MSGS, sizeof(lnet_msg_t));
- if (rc != 0) {
- CERROR("Failed to init freelist for message container\n");
- lnet_msg_container_cleanup(container);
- return rc;
- }
-#else
- rc = 0;
-#endif
/* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
@@ -595,13 +595,13 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
container->msc_nfinalizers *
sizeof(*container->msc_finalizers));
- if (container->msc_finalizers == NULL) {
+ if (!container->msc_finalizers) {
CERROR("Failed to allocate message finalizers\n");
lnet_msg_container_cleanup(container);
return -ENOMEM;
}
- return rc;
+ return 0;
}
void
@@ -610,7 +610,7 @@ lnet_msg_containers_destroy(void)
struct lnet_msg_container *container;
int i;
- if (the_lnet.ln_msg_containers == NULL)
+ if (!the_lnet.ln_msg_containers)
return;
cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
@@ -630,14 +630,14 @@ lnet_msg_containers_create(void)
the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*container));
- if (the_lnet.ln_msg_containers == NULL) {
+ if (!the_lnet.ln_msg_containers) {
CERROR("Failed to allocate cpu-partition data for network\n");
return -ENOMEM;
}
cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
rc = lnet_msg_container_setup(container, i);
- if (rc != 0) {
+ if (rc) {
lnet_msg_containers_destroy();
return rc;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
index bd7b071b2873..3947e8b711c0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
@@ -50,7 +45,7 @@ lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id,
struct lnet_portal *ptl = the_lnet.ln_portals[index];
int unique;
- unique = ignore_bits == 0 &&
+ unique = !ignore_bits &&
match_id.nid != LNET_NID_ANY &&
match_id.pid != LNET_PID_ANY;
@@ -139,8 +134,10 @@ static int
lnet_try_match_md(lnet_libmd_t *md,
struct lnet_match_info *info, struct lnet_msg *msg)
{
- /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
- * lnet_match_blocked_msg() relies on this to avoid races */
+ /*
+ * ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock;
+ * lnet_match_blocked_msg() relies on this to avoid races
+ */
unsigned int offset;
unsigned int mlength;
lnet_me_t *me = md->md_me;
@@ -150,7 +147,7 @@ lnet_try_match_md(lnet_libmd_t *md,
return LNET_MATCHMD_NONE | LNET_MATCHMD_EXHAUSTED;
/* mismatched MD op */
- if ((md->md_options & info->mi_opc) == 0)
+ if (!(md->md_options & info->mi_opc))
return LNET_MATCHMD_NONE;
/* mismatched ME nid/pid? */
@@ -163,17 +160,17 @@ lnet_try_match_md(lnet_libmd_t *md,
return LNET_MATCHMD_NONE;
/* mismatched ME matchbits? */
- if (((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits) != 0)
+ if ((me->me_match_bits ^ info->mi_mbits) & ~me->me_ignore_bits)
return LNET_MATCHMD_NONE;
/* Hurrah! This _is_ a match; check it out... */
- if ((md->md_options & LNET_MD_MANAGE_REMOTE) == 0)
+ if (!(md->md_options & LNET_MD_MANAGE_REMOTE))
offset = md->md_offset;
else
offset = info->mi_roffset;
- if ((md->md_options & LNET_MD_MAX_SIZE) != 0) {
+ if (md->md_options & LNET_MD_MAX_SIZE) {
mlength = md->md_max_size;
LASSERT(md->md_offset + mlength <= md->md_length);
} else {
@@ -182,7 +179,7 @@ lnet_try_match_md(lnet_libmd_t *md,
if (info->mi_rlength <= mlength) { /* fits in allowed space */
mlength = info->mi_rlength;
- } else if ((md->md_options & LNET_MD_TRUNCATE) == 0) {
+ } else if (!(md->md_options & LNET_MD_TRUNCATE)) {
/* this packet _really_ is too big */
CERROR("Matching packet from %s, match %llu length %d too big: %d left, %d allowed\n",
libcfs_id2str(info->mi_id), info->mi_mbits,
@@ -203,10 +200,12 @@ lnet_try_match_md(lnet_libmd_t *md,
if (!lnet_md_exhausted(md))
return LNET_MATCHMD_OK;
- /* Auto-unlink NOW, so the ME gets unlinked if required.
+ /*
+ * Auto-unlink NOW, so the ME gets unlinked if required.
* We bumped md->md_refcount above so the MD just gets flagged
- * for unlink when it is finalized. */
- if ((md->md_flags & LNET_MD_FLAG_AUTO_UNLINK) != 0)
+ * for unlink when it is finalized.
+ */
+ if (md->md_flags & LNET_MD_FLAG_AUTO_UNLINK)
lnet_md_unlink(md);
return LNET_MATCHMD_OK | LNET_MATCHMD_EXHAUSTED;
@@ -239,7 +238,7 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
ptl = the_lnet.ln_portals[index];
mtable = lnet_match2mt(ptl, id, mbits);
- if (mtable != NULL) /* unique portal or only one match-table */
+ if (mtable) /* unique portal or only one match-table */
return mtable;
/* it's a wildcard portal */
@@ -248,8 +247,10 @@ lnet_mt_of_attach(unsigned int index, lnet_process_id_t id,
return NULL;
case LNET_INS_BEFORE:
case LNET_INS_AFTER:
- /* posted by no affinity thread, always hash to specific
- * match-table to avoid buffer stealing which is heavy */
+ /*
+ * posted by no affinity thread, always hash to specific
+ * match-table to avoid buffer stealing which is heavy
+ */
return ptl->ptl_mtables[ptl->ptl_index % LNET_CPT_NUMBER];
case LNET_INS_LOCAL:
/* posted by cpu-affinity thread */
@@ -274,7 +275,7 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
LASSERT(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl));
mtable = lnet_match2mt(ptl, info->mi_id, info->mi_mbits);
- if (mtable != NULL)
+ if (mtable)
return mtable;
/* it's a wildcard portal */
@@ -298,10 +299,12 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg)
/* is there any active entry for this portal? */
nmaps = ptl->ptl_mt_nmaps;
/* map to an active mtable to avoid heavy "stealing" */
- if (nmaps != 0) {
- /* NB: there is possibility that ptl_mt_maps is being
+ if (nmaps) {
+ /*
+ * NB: there is possibility that ptl_mt_maps is being
* changed because we are not under protection of
- * lnet_ptl_lock, but it shouldn't hurt anything */
+ * lnet_ptl_lock, but it shouldn't hurt anything
+ */
cpt = ptl->ptl_mt_maps[rotor % nmaps];
}
}
@@ -331,7 +334,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos)
bmap = &mtable->mt_exhausted[pos >> LNET_MT_BITS_U64];
pos &= (1 << LNET_MT_BITS_U64) - 1;
- return ((*bmap) & (1ULL << pos)) != 0;
+ return (*bmap & (1ULL << pos));
}
static void
@@ -357,16 +360,15 @@ lnet_mt_match_head(struct lnet_match_table *mtable,
lnet_process_id_t id, __u64 mbits)
{
struct lnet_portal *ptl = the_lnet.ln_portals[mtable->mt_portal];
+ unsigned long hash = mbits;
- if (lnet_ptl_is_wildcard(ptl)) {
- return &mtable->mt_mhash[mbits & LNET_MT_HASH_MASK];
- } else {
- unsigned long hash = mbits + id.nid + id.pid;
+ if (!lnet_ptl_is_wildcard(ptl)) {
+ hash += id.nid + id.pid;
LASSERT(lnet_ptl_is_unique(ptl));
hash = hash_long(hash, LNET_MT_HASH_BITS);
- return &mtable->mt_mhash[hash];
}
+ return &mtable->mt_mhash[hash & LNET_MT_HASH_MASK];
}
int
@@ -391,18 +393,20 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
list_for_each_entry_safe(me, tmp, head, me_list) {
/* ME attached but MD not attached yet */
- if (me->me_md == NULL)
+ if (!me->me_md)
continue;
LASSERT(me == me->me_md->md_me);
rc = lnet_try_match_md(me->me_md, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) == 0)
+ if (!(rc & LNET_MATCHMD_EXHAUSTED))
exhausted = 0; /* mlist is not empty */
- if ((rc & LNET_MATCHMD_FINISH) != 0) {
- /* don't return EXHAUSTED bit because we don't know
- * whether the mlist is empty or not */
+ if (rc & LNET_MATCHMD_FINISH) {
+ /*
+ * don't return EXHAUSTED bit because we don't know
+ * whether the mlist is empty or not
+ */
return rc & ~LNET_MATCHMD_EXHAUSTED;
}
}
@@ -413,7 +417,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable,
exhausted = 0;
}
- if (exhausted == 0 && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
+ if (!exhausted && head == &mtable->mt_mhash[LNET_MT_HASH_IGNORE]) {
head = lnet_mt_match_head(mtable, info->mi_id, info->mi_mbits);
goto again; /* re-check MEs w/o ignore-bits */
}
@@ -430,8 +434,10 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
{
int rc;
- /* message arrived before any buffer posting on this portal,
- * simply delay or drop this message */
+ /*
+ * message arrived before any buffer posting on this portal,
+ * simply delay or drop this message
+ */
if (likely(lnet_ptl_is_wildcard(ptl) || lnet_ptl_is_unique(ptl)))
return 0;
@@ -446,7 +452,7 @@ lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg)
if (msg->msg_rx_ready_delay) {
msg->msg_rx_delayed = 1;
list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
+ &ptl->ptl_msg_delayed);
}
rc = LNET_MATCHMD_NONE;
} else {
@@ -465,9 +471,13 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
int rc = 0;
int i;
- /* steal buffer from other CPTs, and delay it if nothing to steal,
- * this function is more expensive than a regular match, but we
- * don't expect it can happen a lot */
+ /**
+ * Steal buffer from other CPTs, and delay msg if nothing to
+ * steal. This function is more expensive than a regular
+ * match, but we don't expect it can happen a lot. The return
+ * code contains one of LNET_MATCHMD_OK, LNET_MATCHMD_DROP, or
+ * LNET_MATCHMD_NONE.
+ */
LASSERT(lnet_ptl_is_wildcard(ptl));
for (i = 0; i < LNET_CPT_NUMBER; i++) {
@@ -476,56 +486,77 @@ lnet_ptl_match_delay(struct lnet_portal *ptl,
cpt = (first + i) % LNET_CPT_NUMBER;
mtable = ptl->ptl_mtables[cpt];
- if (i != 0 && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
+ if (i && i != LNET_CPT_NUMBER - 1 && !mtable->mt_enabled)
continue;
lnet_res_lock(cpt);
lnet_ptl_lock(ptl);
- if (i == 0) { /* the first try, attach on stealing list */
+ if (!i) {
+ /* The first try, add to stealing list. */
list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_stealing);
+ &ptl->ptl_msg_stealing);
}
- if (!list_empty(&msg->msg_list)) { /* on stealing list */
+ if (!list_empty(&msg->msg_list)) {
+ /* On stealing list. */
rc = lnet_mt_match_md(mtable, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 &&
+ if ((rc & LNET_MATCHMD_EXHAUSTED) &&
mtable->mt_enabled)
lnet_ptl_disable_mt(ptl, cpt);
- if ((rc & LNET_MATCHMD_FINISH) != 0)
+ if (rc & LNET_MATCHMD_FINISH) {
+ /* Match found, remove from stealing list. */
+ list_del_init(&msg->msg_list);
+ } else if (i == LNET_CPT_NUMBER - 1 || /* (1) */
+ !ptl->ptl_mt_nmaps || /* (2) */
+ (ptl->ptl_mt_nmaps == 1 && /* (3) */
+ ptl->ptl_mt_maps[0] == cpt)) {
+ /**
+ * No match found, and this is either
+ * (1) the last cpt to check, or
+ * (2) there is no active cpt, or
+ * (3) this is the only active cpt.
+ * There is nothing to steal: delay or
+ * drop the message.
+ */
list_del_init(&msg->msg_list);
- } else {
- /* could be matched by lnet_ptl_attach_md()
- * which is called by another thread */
- rc = msg->msg_md == NULL ?
- LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
- }
-
- if (!list_empty(&msg->msg_list) && /* not matched yet */
- (i == LNET_CPT_NUMBER - 1 || /* the last CPT */
- ptl->ptl_mt_nmaps == 0 || /* no active CPT */
- (ptl->ptl_mt_nmaps == 1 && /* the only active CPT */
- ptl->ptl_mt_maps[0] == cpt))) {
- /* nothing to steal, delay or drop */
- list_del_init(&msg->msg_list);
-
- if (lnet_ptl_is_lazy(ptl)) {
- msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list,
- &ptl->ptl_msg_delayed);
- rc = LNET_MATCHMD_NONE;
+ if (lnet_ptl_is_lazy(ptl)) {
+ msg->msg_rx_delayed = 1;
+ list_add_tail(&msg->msg_list,
+ &ptl->ptl_msg_delayed);
+ rc = LNET_MATCHMD_NONE;
+ } else {
+ rc = LNET_MATCHMD_DROP;
+ }
} else {
- rc = LNET_MATCHMD_DROP;
+ /* Do another iteration. */
+ rc = 0;
}
+ } else {
+ /**
+ * No longer on stealing list: another thread
+ * matched the message in lnet_ptl_attach_md().
+ * We are now expected to handle the message.
+ */
+ rc = !msg->msg_md ?
+ LNET_MATCHMD_DROP : LNET_MATCHMD_OK;
}
lnet_ptl_unlock(ptl);
lnet_res_unlock(cpt);
- if ((rc & LNET_MATCHMD_FINISH) != 0 || msg->msg_rx_delayed)
+ /**
+ * Note that test (1) above ensures that we always
+ * exit the loop through this break statement.
+ *
+ * LNET_MATCHMD_NONE means msg was added to the
+ * delayed queue, and we may no longer reference it
+ * after lnet_ptl_unlock() and lnet_res_unlock().
+ */
+ if (rc & (LNET_MATCHMD_FINISH | LNET_MATCHMD_NONE))
break;
}
@@ -551,7 +582,7 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
ptl = the_lnet.ln_portals[info->mi_portal];
rc = lnet_ptl_match_early(ptl, msg);
- if (rc != 0) /* matched or delayed early message */
+ if (rc) /* matched or delayed early message */
return rc;
mtable = lnet_mt_of_match(info, msg);
@@ -563,13 +594,13 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
}
rc = lnet_mt_match_md(mtable, info, msg);
- if ((rc & LNET_MATCHMD_EXHAUSTED) != 0 && mtable->mt_enabled) {
+ if ((rc & LNET_MATCHMD_EXHAUSTED) && mtable->mt_enabled) {
lnet_ptl_lock(ptl);
lnet_ptl_disable_mt(ptl, mtable->mt_cpt);
lnet_ptl_unlock(ptl);
}
- if ((rc & LNET_MATCHMD_FINISH) != 0) /* matched or dropping */
+ if (rc & LNET_MATCHMD_FINISH) /* matched or dropping */
goto out1;
if (!msg->msg_rx_ready_delay)
@@ -587,13 +618,14 @@ lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg)
lnet_ptl_unlock(ptl);
lnet_res_unlock(mtable->mt_cpt);
-
+ rc = LNET_MATCHMD_NONE;
} else {
lnet_res_unlock(mtable->mt_cpt);
rc = lnet_ptl_match_delay(ptl, info, msg);
}
- if (msg->msg_rx_delayed) {
+ /* LNET_MATCHMD_NONE means msg was added to the delay queue */
+ if (rc & LNET_MATCHMD_NONE) {
CDEBUG(D_NET,
"Delaying %s from %s ptl %d MB %#llx off %d len %d\n",
info->mi_opc == LNET_MD_OP_PUT ? "PUT" : "GET",
@@ -630,7 +662,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
int exhausted = 0;
int cpt;
- LASSERT(md->md_refcount == 0); /* a brand new MD */
+ LASSERT(!md->md_refcount); /* a brand new MD */
me->me_md = md;
md->md_me = me;
@@ -664,15 +696,15 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
rc = lnet_try_match_md(md, &info, msg);
- exhausted = (rc & LNET_MATCHMD_EXHAUSTED) != 0;
- if ((rc & LNET_MATCHMD_NONE) != 0) {
+ exhausted = (rc & LNET_MATCHMD_EXHAUSTED);
+ if (rc & LNET_MATCHMD_NONE) {
if (exhausted)
break;
continue;
}
/* Hurrah! This _is_ a match */
- LASSERT((rc & LNET_MATCHMD_FINISH) != 0);
+ LASSERT(rc & LNET_MATCHMD_FINISH);
list_del_init(&msg->msg_list);
if (head == &ptl->ptl_msg_stealing) {
@@ -682,7 +714,7 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md,
continue;
}
- if ((rc & LNET_MATCHMD_OK) != 0) {
+ if (rc & LNET_MATCHMD_OK) {
list_add_tail(&msg->msg_list, matches);
CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
@@ -717,7 +749,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
struct lnet_match_table *mtable;
int i;
- if (ptl->ptl_mtables == NULL) /* uninitialized portal */
+ if (!ptl->ptl_mtables) /* uninitialized portal */
return;
LASSERT(list_empty(&ptl->ptl_msg_delayed));
@@ -727,7 +759,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
lnet_me_t *me;
int j;
- if (mtable->mt_mhash == NULL) /* uninitialized match-table */
+ if (!mtable->mt_mhash) /* uninitialized match-table */
continue;
mhash = mtable->mt_mhash;
@@ -735,7 +767,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl)
for (j = 0; j < LNET_MT_HASH_SIZE + 1; j++) {
while (!list_empty(&mhash[j])) {
me = list_entry(mhash[j].next,
- lnet_me_t, me_list);
+ lnet_me_t, me_list);
CERROR("Active ME %p on exit\n", me);
list_del(&me->me_list);
lnet_me_free(me);
@@ -759,7 +791,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(struct lnet_match_table));
- if (ptl->ptl_mtables == NULL) {
+ if (!ptl->ptl_mtables) {
CERROR("Failed to create match table for portal %d\n", index);
return -ENOMEM;
}
@@ -772,7 +804,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
/* the extra entry is for MEs with ignore bits */
LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i,
sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1));
- if (mhash == NULL) {
+ if (!mhash) {
CERROR("Failed to create match hash for portal %d\n",
index);
goto failed;
@@ -800,7 +832,7 @@ lnet_portals_destroy(void)
{
int i;
- if (the_lnet.ln_portals == NULL)
+ if (!the_lnet.ln_portals)
return;
for (i = 0; i < the_lnet.ln_nportals; i++)
@@ -820,7 +852,7 @@ lnet_portals_create(void)
the_lnet.ln_nportals = MAX_PORTALS;
the_lnet.ln_portals = cfs_array_alloc(the_lnet.ln_nportals, size);
- if (the_lnet.ln_portals == NULL) {
+ if (!the_lnet.ln_portals) {
CERROR("Failed to allocate portals table\n");
return -ENOMEM;
}
@@ -886,17 +918,8 @@ LNetSetLazyPortal(int portal)
}
EXPORT_SYMBOL(LNetSetLazyPortal);
-/**
- * Turn off the lazy portal attribute. Delayed requests on the portal,
- * if any, will be all dropped when this function returns.
- *
- * \param portal Index of the portal to disable the lazy attribute on.
- *
- * \retval 0 On success.
- * \retval -EINVAL If \a portal is not a valid index.
- */
int
-LNetClearLazyPortal(int portal)
+lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason)
{
struct lnet_portal *ptl;
LIST_HEAD(zombies);
@@ -915,21 +938,48 @@ LNetClearLazyPortal(int portal)
return 0;
}
- if (the_lnet.ln_shutdown)
- CWARN("Active lazy portal %d on exit\n", portal);
- else
- CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
+ if (ni) {
+ struct lnet_msg *msg, *tmp;
+
+ /* grab all messages which are on the NI passed in */
+ list_for_each_entry_safe(msg, tmp, &ptl->ptl_msg_delayed,
+ msg_list) {
+ if (msg->msg_rxpeer->lp_ni == ni)
+ list_move(&msg->msg_list, &zombies);
+ }
+ } else {
+ if (the_lnet.ln_shutdown)
+ CWARN("Active lazy portal %d on exit\n", portal);
+ else
+ CDEBUG(D_NET, "clearing portal %d lazy\n", portal);
- /* grab all the blocked messages atomically */
- list_splice_init(&ptl->ptl_msg_delayed, &zombies);
+ /* grab all the blocked messages atomically */
+ list_splice_init(&ptl->ptl_msg_delayed, &zombies);
- lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
+ lnet_ptl_unsetopt(ptl, LNET_PTL_LAZY);
+ }
lnet_ptl_unlock(ptl);
lnet_res_unlock(LNET_LOCK_EX);
- lnet_drop_delayed_msg_list(&zombies, "Clearing lazy portal attr");
+ lnet_drop_delayed_msg_list(&zombies, reason);
return 0;
}
+
+/**
+ * Turn off the lazy portal attribute. Delayed requests on the portal,
+ * if any, will be all dropped when this function returns.
+ *
+ * \param portal Index of the portal to disable the lazy attribute on.
+ *
+ * \retval 0 On success.
+ * \retval -EINVAL If \a portal is not a valid index.
+ */
+int
+LNetClearLazyPortal(int portal)
+{
+ return lnet_clear_lazy_portal(NULL, portal,
+ "Clearing lazy portal attr");
+}
EXPORT_SYMBOL(LNetClearLazyPortal);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 589ecc84d1b8..891fd59401d7 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -64,7 +64,7 @@ lnet_sock_ioctl(int cmd, unsigned long arg)
int rc;
rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create socket: %d\n", rc);
return rc;
}
@@ -99,14 +99,17 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ);
- strcpy(ifr.ifr_name, name);
+ if (strlen(name) > sizeof(ifr.ifr_name) - 1)
+ return -E2BIG;
+ strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
+
rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't get flags for interface %s\n", name);
return rc;
}
- if ((ifr.ifr_flags & IFF_UP) == 0) {
+ if (!(ifr.ifr_flags & IFF_UP)) {
CDEBUG(D_NET, "Interface %s down\n", name);
*up = 0;
*ip = *mask = 0;
@@ -114,10 +117,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
}
*up = 1;
- strcpy(ifr.ifr_name, name);
+ if (strlen(name) > sizeof(ifr.ifr_name) - 1)
+ return -E2BIG;
+ strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
+
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't get IP address for interface %s\n", name);
return rc;
}
@@ -125,10 +131,13 @@ lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr;
*ip = ntohl(val);
- strcpy(ifr.ifr_name, name);
+ if (strlen(name) > sizeof(ifr.ifr_name) - 1)
+ return -E2BIG;
+ strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
+
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't get netmask for interface %s\n", name);
return rc;
}
@@ -157,15 +166,15 @@ lnet_ipif_enumerate(char ***namesp)
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
- if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+ if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
toobig = 1;
- nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+ nalloc = PAGE_SIZE / sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}
LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
- if (ifr == NULL) {
+ if (!ifr) {
CERROR("ENOMEM enumerating up to %d interfaces\n",
nalloc);
rc = -ENOMEM;
@@ -181,9 +190,9 @@ lnet_ipif_enumerate(char ***namesp)
goto out1;
}
- LASSERT(rc == 0);
+ LASSERT(!rc);
- nfound = ifc.ifc_len/sizeof(*ifr);
+ nfound = ifc.ifc_len / sizeof(*ifr);
LASSERT(nfound <= nalloc);
if (nfound < nalloc || toobig)
@@ -193,11 +202,11 @@ lnet_ipif_enumerate(char ***namesp)
nalloc *= 2;
}
- if (nfound == 0)
+ if (!nfound)
goto out1;
LIBCFS_ALLOC(names, nfound * sizeof(*names));
- if (names == NULL) {
+ if (!names) {
rc = -ENOMEM;
goto out1;
}
@@ -213,7 +222,7 @@ lnet_ipif_enumerate(char ***namesp)
}
LIBCFS_ALLOC(names[i], IFNAMSIZ);
- if (names[i] == NULL) {
+ if (!names[i]) {
rc = -ENOMEM;
goto out2;
}
@@ -242,7 +251,7 @@ lnet_ipif_free_enumeration(char **names, int n)
LASSERT(n > 0);
- for (i = 0; i < n && names[i] != NULL; i++)
+ for (i = 0; i < n && names[i]; i++)
LIBCFS_FREE(names[i], IFNAMSIZ);
LIBCFS_FREE(names, n * sizeof(*names));
@@ -253,32 +262,30 @@ int
lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
{
int rc;
- long ticks = timeout * HZ;
+ long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
LASSERT(nob > 0);
- /* Caller may pass a zero timeout if she thinks the socket buffer is
- * empty enough to take the whole message immediately */
-
+ /*
+ * Caller may pass a zero timeout if she thinks the socket buffer is
+ * empty enough to take the whole message immediately
+ */
for (;;) {
struct kvec iov = {
.iov_base = buffer,
.iov_len = nob
};
struct msghdr msg = {
- .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0
+ .msg_flags = !timeout ? MSG_DONTWAIT : 0
};
- if (timeout != 0) {
+ if (timeout) {
/* Set send timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = ticks / HZ,
- .tv_usec = ((ticks % HZ) * 1000000) / HZ
- };
+ jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,
(char *)&tv, sizeof(tv));
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set socket send timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
@@ -287,7 +294,7 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
then = jiffies;
rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
- ticks -= jiffies - then;
+ jiffies_left -= jiffies - then;
if (rc == nob)
return 0;
@@ -295,12 +302,12 @@ lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
if (rc < 0)
return rc;
- if (rc == 0) {
+ if (!rc) {
CERROR("Unexpected zero rc\n");
return -ECONNABORTED;
}
- if (ticks <= 0)
+ if (jiffies_left <= 0)
return -EAGAIN;
buffer = ((char *)buffer) + rc;
@@ -314,12 +321,12 @@ int
lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
{
int rc;
- long ticks = timeout * HZ;
+ long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
LASSERT(nob > 0);
- LASSERT(ticks > 0);
+ LASSERT(jiffies_left > 0);
for (;;) {
struct kvec iov = {
@@ -331,13 +338,10 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
};
/* Set receive timeout to remaining time */
- tv = (struct timeval) {
- .tv_sec = ticks / HZ,
- .tv_usec = ((ticks % HZ) * 1000000) / HZ
- };
+ jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
(char *)&tv, sizeof(tv));
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set socket recv timeout %ld.%06d: %d\n",
(long)tv.tv_sec, (int)tv.tv_usec, rc);
return rc;
@@ -345,21 +349,21 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
then = jiffies;
rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
- ticks -= jiffies - then;
+ jiffies_left -= jiffies - then;
if (rc < 0)
return rc;
- if (rc == 0)
+ if (!rc)
return -ECONNRESET;
buffer = ((char *)buffer) + rc;
nob -= rc;
- if (nob == 0)
+ if (!nob)
return 0;
- if (ticks <= 0)
+ if (jiffies_left <= 0)
return -ETIMEDOUT;
}
}
@@ -379,7 +383,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock);
*sockp = sock;
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create socket: %d\n", rc);
return rc;
}
@@ -387,16 +391,16 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
option = 1;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&option, sizeof(option));
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc);
goto failed;
}
- if (local_ip != 0 || local_port != 0) {
+ if (local_ip || local_port) {
memset(&locaddr, 0, sizeof(locaddr));
locaddr.sin_family = AF_INET;
locaddr.sin_port = htons(local_port);
- locaddr.sin_addr.s_addr = (local_ip == 0) ?
+ locaddr.sin_addr.s_addr = !local_ip ?
INADDR_ANY : htonl(local_ip);
rc = kernel_bind(sock, (struct sockaddr *)&locaddr,
@@ -406,7 +410,7 @@ lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip,
*fatal = 0;
goto failed;
}
- if (rc != 0) {
+ if (rc) {
CERROR("Error trying to bind to port %d: %d\n",
local_port, rc);
goto failed;
@@ -425,22 +429,22 @@ lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize)
int option;
int rc;
- if (txbufsize != 0) {
+ if (txbufsize) {
option = txbufsize;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
(char *)&option, sizeof(option));
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set send buffer %d: %d\n",
option, rc);
return rc;
}
}
- if (rxbufsize != 0) {
+ if (rxbufsize) {
option = rxbufsize;
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ (char *)&option, sizeof(option));
+ if (rc) {
CERROR("Can't set receive buffer %d: %d\n",
option, rc);
return rc;
@@ -461,16 +465,16 @@ lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port)
rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
else
rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d getting sock %s IP/port\n",
rc, remote ? "peer" : "local");
return rc;
}
- if (ip != NULL)
+ if (ip)
*ip = ntohl(sin.sin_addr.s_addr);
- if (port != NULL)
+ if (port)
*port = ntohs(sin.sin_port);
return 0;
@@ -480,10 +484,10 @@ EXPORT_SYMBOL(lnet_sock_getaddr);
int
lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize)
{
- if (txbufsize != NULL)
+ if (txbufsize)
*txbufsize = sock->sk->sk_sndbuf;
- if (rxbufsize != NULL)
+ if (rxbufsize)
*rxbufsize = sock->sk->sk_rcvbuf;
return 0;
@@ -498,7 +502,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
int rc;
rc = lnet_sock_create(sockp, &fatal, local_ip, local_port);
- if (rc != 0) {
+ if (rc) {
if (!fatal)
CERROR("Can't create socket: port %d already in use\n",
local_port);
@@ -506,14 +510,13 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
}
rc = kernel_listen(*sockp, backlog);
- if (rc == 0)
+ if (!rc)
return 0;
CERROR("Can't set listen backlog %d: %d\n", backlog, rc);
sock_release(*sockp);
return rc;
}
-EXPORT_SYMBOL(lnet_sock_listen);
int
lnet_sock_accept(struct socket **newsockp, struct socket *sock)
@@ -522,10 +525,10 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
struct socket *newsock;
int rc;
- init_waitqueue_entry(&wait, current);
-
- /* XXX this should add a ref to sock->ops->owner, if
- * TCP could be a module */
+ /*
+ * XXX this should add a ref to sock->ops->owner, if
+ * TCP could be a module
+ */
rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock);
if (rc) {
CERROR("Can't allocate socket\n");
@@ -537,15 +540,15 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
if (rc == -EAGAIN) {
/* Nothing ready, so wait for activity */
- set_current_state(TASK_INTERRUPTIBLE);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sock->sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(sk_sleep(sock->sk), &wait);
- set_current_state(TASK_RUNNING);
rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
}
- if (rc != 0)
+ if (rc)
goto failed;
*newsockp = newsock;
@@ -555,7 +558,6 @@ failed:
sock_release(newsock);
return rc;
}
-EXPORT_SYMBOL(lnet_sock_accept);
int
lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
@@ -565,7 +567,7 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
int rc;
rc = lnet_sock_create(sockp, fatal, local_ip, local_port);
- if (rc != 0)
+ if (rc)
return rc;
memset(&srvaddr, 0, sizeof(srvaddr));
@@ -575,13 +577,15 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr,
sizeof(srvaddr), 0);
- if (rc == 0)
+ if (!rc)
return 0;
- /* EADDRNOTAVAIL probably means we're already connected to the same
+ /*
+ * EADDRNOTAVAIL probably means we're already connected to the same
* peer/port on the same local port on a differently typed
* connection. Let our caller retry with a different local
- * port... */
+ * port...
+ */
*fatal = !(rc == -EADDRNOTAVAIL);
CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
@@ -591,4 +595,3 @@ lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip,
sock_release(*sockp);
return rc;
}
-EXPORT_SYMBOL(lnet_sock_connect);
diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c
index 2a137f46800f..468eda611bf8 100644
--- a/drivers/staging/lustre/lnet/lnet/lo.c
+++ b/drivers/staging/lustre/lnet/lnet/lo.c
@@ -46,15 +46,15 @@ lolnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
static int
lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
- int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ int delayed, unsigned int niov,
+ struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
lnet_msg_t *sendmsg = private;
- if (lntmsg != NULL) { /* not discarding */
- if (sendmsg->msg_iov != NULL) {
- if (iov != NULL)
+ if (lntmsg) { /* not discarding */
+ if (sendmsg->msg_iov) {
+ if (iov)
lnet_copy_iov2iov(niov, iov, offset,
sendmsg->msg_niov,
sendmsg->msg_iov,
@@ -65,7 +65,7 @@ lolnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
sendmsg->msg_iov,
sendmsg->msg_offset, mlen);
} else {
- if (iov != NULL)
+ if (iov)
lnet_copy_kiov2iov(niov, iov, offset,
sendmsg->msg_niov,
sendmsg->msg_kiov,
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index c93c00752a4c..93037c1168ca 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -36,6 +36,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
static int config_on_load;
module_param(config_on_load, int, 0444);
@@ -52,13 +53,21 @@ lnet_configure(void *arg)
mutex_lock(&lnet_config_mutex);
if (!the_lnet.ln_niinit_self) {
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+ rc = try_module_get(THIS_MODULE);
+
+ if (rc != 1)
+ goto out;
+
+ rc = LNetNIInit(LNET_PID_LUSTRE);
if (rc >= 0) {
the_lnet.ln_niinit_self = 1;
rc = 0;
+ } else {
+ module_put(THIS_MODULE);
}
}
+out:
mutex_unlock(&lnet_config_mutex);
return rc;
}
@@ -73,6 +82,7 @@ lnet_unconfigure(void)
if (the_lnet.ln_niinit_self) {
the_lnet.ln_niinit_self = 0;
LNetNIFini();
+ module_put(THIS_MODULE);
}
mutex_lock(&the_lnet.ln_api_mutex);
@@ -80,28 +90,93 @@ lnet_unconfigure(void)
mutex_unlock(&the_lnet.ln_api_mutex);
mutex_unlock(&lnet_config_mutex);
- return (refcount == 0) ? 0 : -EBUSY;
+ return !refcount ? 0 : -EBUSY;
}
static int
-lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
+lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
+{
+ struct lnet_ioctl_config_data *conf =
+ (struct lnet_ioctl_config_data *)hdr;
+ int rc;
+
+ if (conf->cfg_hdr.ioc_len < sizeof(*conf))
+ return -EINVAL;
+
+ mutex_lock(&lnet_config_mutex);
+ if (!the_lnet.ln_niinit_self) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ rc = lnet_dyn_add_ni(LNET_PID_LUSTRE,
+ conf->cfg_config_u.cfg_net.net_intf,
+ conf->cfg_config_u.cfg_net.net_peer_timeout,
+ conf->cfg_config_u.cfg_net.net_peer_tx_credits,
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
+ conf->cfg_config_u.cfg_net.net_max_tx_credits);
+out_unlock:
+ mutex_unlock(&lnet_config_mutex);
+
+ return rc;
+}
+
+static int
+lnet_dyn_unconfigure(struct libcfs_ioctl_hdr *hdr)
+{
+ struct lnet_ioctl_config_data *conf =
+ (struct lnet_ioctl_config_data *)hdr;
+ int rc;
+
+ if (conf->cfg_hdr.ioc_len < sizeof(*conf))
+ return -EINVAL;
+
+ mutex_lock(&lnet_config_mutex);
+ if (!the_lnet.ln_niinit_self) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ rc = lnet_dyn_del_ni(conf->cfg_net);
+out_unlock:
+ mutex_unlock(&lnet_config_mutex);
+
+ return rc;
+}
+
+static int
+lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
{
int rc;
switch (cmd) {
- case IOC_LIBCFS_CONFIGURE:
+ case IOC_LIBCFS_CONFIGURE: {
+ struct libcfs_ioctl_data *data =
+ (struct libcfs_ioctl_data *)hdr;
+
+ if (data->ioc_hdr.ioc_len < sizeof(*data))
+ return -EINVAL;
+
+ the_lnet.ln_nis_from_mod_params = data->ioc_flags;
return lnet_configure(NULL);
+ }
case IOC_LIBCFS_UNCONFIGURE:
return lnet_unconfigure();
+ case IOC_LIBCFS_ADD_NET:
+ return lnet_dyn_configure(hdr);
+
+ case IOC_LIBCFS_DEL_NET:
+ return lnet_dyn_unconfigure(hdr);
+
default:
- /* Passing LNET_PID_ANY only gives me a ref if the net is up
+ /*
+ * Passing LNET_PID_ANY only gives me a ref if the net is up
* already; I'll need it to ensure the net can't go down while
- * I'm called into it */
+ * I'm called into it
+ */
rc = LNetNIInit(LNET_PID_ANY);
if (rc >= 0) {
- rc = LNetCtl(cmd, data);
+ rc = LNetCtl(cmd, hdr);
LNetNIFini();
}
return rc;
@@ -110,46 +185,46 @@ lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data)
static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl);
-static int __init
-init_lnet(void)
+static int __init lnet_init(void)
{
int rc;
mutex_init(&lnet_config_mutex);
- rc = lnet_init();
- if (rc != 0) {
- CERROR("lnet_init: error %d\n", rc);
+ rc = lnet_lib_init();
+ if (rc) {
+ CERROR("lnet_lib_init: error %d\n", rc);
return rc;
}
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
- LASSERT(rc == 0);
+ LASSERT(!rc);
if (config_on_load) {
- /* Have to schedule a separate thread to avoid deadlocking
- * in modload */
+ /*
+ * Have to schedule a separate thread to avoid deadlocking
+ * in modload
+ */
(void) kthread_run(lnet_configure, NULL, "lnet_initd");
}
return 0;
}
-static void __exit
-fini_lnet(void)
+static void __exit lnet_exit(void)
{
int rc;
rc = libcfs_deregister_ioctl(&lnet_ioctl_handler);
- LASSERT(rc == 0);
+ LASSERT(!rc);
- lnet_fini();
+ lnet_lib_exit();
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("LNet v3.1");
+MODULE_DESCRIPTION("Lustre Networking layer");
+MODULE_VERSION(LNET_VERSION);
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
-module_init(init_lnet);
-module_exit(fini_lnet);
+module_init(lnet_init);
+module_exit(lnet_exit);
diff --git a/drivers/staging/lustre/lnet/lnet/net_fault.c b/drivers/staging/lustre/lnet/lnet/net_fault.c
new file mode 100644
index 000000000000..7d76f28d3a7a
--- /dev/null
+++ b/drivers/staging/lustre/lnet/lnet/net_fault.c
@@ -0,0 +1,1025 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Seagate, Inc.
+ *
+ * lnet/lnet/net_fault.c
+ *
+ * Lustre network fault simulation
+ *
+ * Author: liang.zhen@intel.com
+ */
+
+#define DEBUG_SUBSYSTEM S_LNET
+
+#include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lnetctl.h"
+
+#define LNET_MSG_MASK (LNET_PUT_BIT | LNET_ACK_BIT | \
+ LNET_GET_BIT | LNET_REPLY_BIT)
+
+struct lnet_drop_rule {
+ /** link chain on the_lnet.ln_drop_rules */
+ struct list_head dr_link;
+ /** attributes of this rule */
+ struct lnet_fault_attr dr_attr;
+ /** lock to protect \a dr_drop_at and \a dr_stat */
+ spinlock_t dr_lock;
+ /**
+ * the message sequence to drop, which means message is dropped when
+ * dr_stat.drs_count == dr_drop_at
+ */
+ unsigned long dr_drop_at;
+ /**
+ * seconds to drop the next message, it's exclusive with dr_drop_at
+ */
+ unsigned long dr_drop_time;
+ /** baseline to caculate dr_drop_time */
+ unsigned long dr_time_base;
+ /** statistic of dropped messages */
+ struct lnet_fault_stat dr_stat;
+};
+
+static bool
+lnet_fault_nid_match(lnet_nid_t nid, lnet_nid_t msg_nid)
+{
+ if (nid == msg_nid || nid == LNET_NID_ANY)
+ return true;
+
+ if (LNET_NIDNET(nid) != LNET_NIDNET(msg_nid))
+ return false;
+
+ /* 255.255.255.255@net is wildcard for all addresses in a network */
+ return LNET_NIDADDR(nid) == LNET_NIDADDR(LNET_NID_ANY);
+}
+
+static bool
+lnet_fault_attr_match(struct lnet_fault_attr *attr, lnet_nid_t src,
+ lnet_nid_t dst, unsigned int type, unsigned int portal)
+{
+ if (!lnet_fault_nid_match(attr->fa_src, src) ||
+ !lnet_fault_nid_match(attr->fa_dst, dst))
+ return false;
+
+ if (!(attr->fa_msg_mask & (1 << type)))
+ return false;
+
+ /**
+ * NB: ACK and REPLY have no portal, but they should have been
+ * rejected by message mask
+ */
+ if (attr->fa_ptl_mask && /* has portal filter */
+ !(attr->fa_ptl_mask & (1ULL << portal)))
+ return false;
+
+ return true;
+}
+
+static int
+lnet_fault_attr_validate(struct lnet_fault_attr *attr)
+{
+ if (!attr->fa_msg_mask)
+ attr->fa_msg_mask = LNET_MSG_MASK; /* all message types */
+
+ if (!attr->fa_ptl_mask) /* no portal filter */
+ return 0;
+
+ /* NB: only PUT and GET can be filtered if portal filter has been set */
+ attr->fa_msg_mask &= LNET_GET_BIT | LNET_PUT_BIT;
+ if (!attr->fa_msg_mask) {
+ CDEBUG(D_NET, "can't find valid message type bits %x\n",
+ attr->fa_msg_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+lnet_fault_stat_inc(struct lnet_fault_stat *stat, unsigned int type)
+{
+ /* NB: fs_counter is NOT updated by this function */
+ switch (type) {
+ case LNET_MSG_PUT:
+ stat->fs_put++;
+ return;
+ case LNET_MSG_ACK:
+ stat->fs_ack++;
+ return;
+ case LNET_MSG_GET:
+ stat->fs_get++;
+ return;
+ case LNET_MSG_REPLY:
+ stat->fs_reply++;
+ return;
+ }
+}
+
+/**
+ * LNet message drop simulation
+ */
+
+/**
+ * Add a new drop rule to LNet
+ * There is no check for duplicated drop rule, all rules will be checked for
+ * incoming message.
+ */
+static int
+lnet_drop_rule_add(struct lnet_fault_attr *attr)
+{
+ struct lnet_drop_rule *rule;
+
+ if (attr->u.drop.da_rate & attr->u.drop.da_interval) {
+ CDEBUG(D_NET, "please provide either drop rate or drop interval, but not both at the same time %d/%d\n",
+ attr->u.drop.da_rate, attr->u.drop.da_interval);
+ return -EINVAL;
+ }
+
+ if (lnet_fault_attr_validate(attr))
+ return -EINVAL;
+
+ CFS_ALLOC_PTR(rule);
+ if (!rule)
+ return -ENOMEM;
+
+ spin_lock_init(&rule->dr_lock);
+
+ rule->dr_attr = *attr;
+ if (attr->u.drop.da_interval) {
+ rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
+ rule->dr_drop_time = cfs_time_shift(cfs_rand() %
+ attr->u.drop.da_interval);
+ } else {
+ rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ }
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_add(&rule->dr_link, &the_lnet.ln_drop_rules);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ CDEBUG(D_NET, "Added drop rule: src %s, dst %s, rate %d, interval %d\n",
+ libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
+ attr->u.drop.da_rate, attr->u.drop.da_interval);
+ return 0;
+}
+
+/**
+ * Remove matched drop rules from lnet, all rules that can match \a src and
+ * \a dst will be removed.
+ * If \a src is zero, then all rules have \a dst as destination will be remove
+ * If \a dst is zero, then all rules have \a src as source will be removed
+ * If both of them are zero, all rules will be removed
+ */
+static int
+lnet_drop_rule_del(lnet_nid_t src, lnet_nid_t dst)
+{
+ struct lnet_drop_rule *rule;
+ struct lnet_drop_rule *tmp;
+ struct list_head zombies;
+ int n = 0;
+
+ INIT_LIST_HEAD(&zombies);
+
+ lnet_net_lock(LNET_LOCK_EX);
+ list_for_each_entry_safe(rule, tmp, &the_lnet.ln_drop_rules, dr_link) {
+ if (rule->dr_attr.fa_src != src && src)
+ continue;
+
+ if (rule->dr_attr.fa_dst != dst && dst)
+ continue;
+
+ list_move(&rule->dr_link, &zombies);
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ list_for_each_entry_safe(rule, tmp, &zombies, dr_link) {
+ CDEBUG(D_NET, "Remove drop rule: src %s->dst: %s (1/%d, %d)\n",
+ libcfs_nid2str(rule->dr_attr.fa_src),
+ libcfs_nid2str(rule->dr_attr.fa_dst),
+ rule->dr_attr.u.drop.da_rate,
+ rule->dr_attr.u.drop.da_interval);
+
+ list_del(&rule->dr_link);
+ CFS_FREE_PTR(rule);
+ n++;
+ }
+
+ return n;
+}
+
+/**
+ * List drop rule at position of \a pos
+ */
+static int
+lnet_drop_rule_list(int pos, struct lnet_fault_attr *attr,
+ struct lnet_fault_stat *stat)
+{
+ struct lnet_drop_rule *rule;
+ int cpt;
+ int i = 0;
+ int rc = -ENOENT;
+
+ cpt = lnet_net_lock_current();
+ list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
+ if (i++ < pos)
+ continue;
+
+ spin_lock(&rule->dr_lock);
+ *attr = rule->dr_attr;
+ *stat = rule->dr_stat;
+ spin_unlock(&rule->dr_lock);
+ rc = 0;
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+ return rc;
+}
+
+/**
+ * reset counters for all drop rules
+ */
+static void
+lnet_drop_rule_reset(void)
+{
+ struct lnet_drop_rule *rule;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+
+ list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
+ struct lnet_fault_attr *attr = &rule->dr_attr;
+
+ spin_lock(&rule->dr_lock);
+
+ memset(&rule->dr_stat, 0, sizeof(rule->dr_stat));
+ if (attr->u.drop.da_rate) {
+ rule->dr_drop_at = cfs_rand() % attr->u.drop.da_rate;
+ } else {
+ rule->dr_drop_time = cfs_time_shift(cfs_rand() %
+ attr->u.drop.da_interval);
+ rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
+ }
+ spin_unlock(&rule->dr_lock);
+ }
+
+ lnet_net_unlock(cpt);
+}
+
+/**
+ * check source/destination NID, portal, message type and drop rate,
+ * decide whether should drop this message or not
+ */
+static bool
+drop_rule_match(struct lnet_drop_rule *rule, lnet_nid_t src,
+ lnet_nid_t dst, unsigned int type, unsigned int portal)
+{
+ struct lnet_fault_attr *attr = &rule->dr_attr;
+ bool drop;
+
+ if (!lnet_fault_attr_match(attr, src, dst, type, portal))
+ return false;
+
+ /* match this rule, check drop rate now */
+ spin_lock(&rule->dr_lock);
+ if (rule->dr_drop_time) { /* time based drop */
+ unsigned long now = cfs_time_current();
+
+ rule->dr_stat.fs_count++;
+ drop = cfs_time_aftereq(now, rule->dr_drop_time);
+ if (drop) {
+ if (cfs_time_after(now, rule->dr_time_base))
+ rule->dr_time_base = now;
+
+ rule->dr_drop_time = rule->dr_time_base +
+ cfs_time_seconds(cfs_rand() %
+ attr->u.drop.da_interval);
+ rule->dr_time_base += cfs_time_seconds(attr->u.drop.da_interval);
+
+ CDEBUG(D_NET, "Drop Rule %s->%s: next drop : %lu\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst),
+ rule->dr_drop_time);
+ }
+
+ } else { /* rate based drop */
+ drop = rule->dr_stat.fs_count++ == rule->dr_drop_at;
+
+ if (!do_div(rule->dr_stat.fs_count, attr->u.drop.da_rate)) {
+ rule->dr_drop_at = rule->dr_stat.fs_count +
+ cfs_rand() % attr->u.drop.da_rate;
+ CDEBUG(D_NET, "Drop Rule %s->%s: next drop: %lu\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst), rule->dr_drop_at);
+ }
+ }
+
+ if (drop) { /* drop this message, update counters */
+ lnet_fault_stat_inc(&rule->dr_stat, type);
+ rule->dr_stat.u.drop.ds_dropped++;
+ }
+
+ spin_unlock(&rule->dr_lock);
+ return drop;
+}
+
+/**
+ * Check if message from \a src to \a dst can match any existed drop rule
+ */
+bool
+lnet_drop_rule_match(lnet_hdr_t *hdr)
+{
+ struct lnet_drop_rule *rule;
+ lnet_nid_t src = le64_to_cpu(hdr->src_nid);
+ lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
+ unsigned int typ = le32_to_cpu(hdr->type);
+ unsigned int ptl = -1;
+ bool drop = false;
+ int cpt;
+
+ /**
+ * NB: if Portal is specified, then only PUT and GET will be
+ * filtered by drop rule
+ */
+ if (typ == LNET_MSG_PUT)
+ ptl = le32_to_cpu(hdr->msg.put.ptl_index);
+ else if (typ == LNET_MSG_GET)
+ ptl = le32_to_cpu(hdr->msg.get.ptl_index);
+
+ cpt = lnet_net_lock_current();
+ list_for_each_entry(rule, &the_lnet.ln_drop_rules, dr_link) {
+ drop = drop_rule_match(rule, src, dst, typ, ptl);
+ if (drop)
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+ return drop;
+}
+
+/**
+ * LNet Delay Simulation
+ */
+/** timestamp (second) to send delayed message */
+#define msg_delay_send msg_ev.hdr_data
+
+struct lnet_delay_rule {
+ /** link chain on the_lnet.ln_delay_rules */
+ struct list_head dl_link;
+ /** link chain on delay_dd.dd_sched_rules */
+ struct list_head dl_sched_link;
+ /** attributes of this rule */
+ struct lnet_fault_attr dl_attr;
+ /** lock to protect \a below members */
+ spinlock_t dl_lock;
+ /** refcount of delay rule */
+ atomic_t dl_refcount;
+ /**
+ * the message sequence to delay, which means message is delayed when
+ * dl_stat.fs_count == dl_delay_at
+ */
+ unsigned long dl_delay_at;
+ /**
+ * seconds to delay the next message, it's exclusive with dl_delay_at
+ */
+ unsigned long dl_delay_time;
+ /** baseline to caculate dl_delay_time */
+ unsigned long dl_time_base;
+ /** jiffies to send the next delayed message */
+ unsigned long dl_msg_send;
+ /** delayed message list */
+ struct list_head dl_msg_list;
+ /** statistic of delayed messages */
+ struct lnet_fault_stat dl_stat;
+ /** timer to wakeup delay_daemon */
+ struct timer_list dl_timer;
+};
+
+struct delay_daemon_data {
+ /** serialise rule add/remove */
+ struct mutex dd_mutex;
+ /** protect rules on \a dd_sched_rules */
+ spinlock_t dd_lock;
+ /** scheduled delay rules (by timer) */
+ struct list_head dd_sched_rules;
+ /** daemon thread sleeps at here */
+ wait_queue_head_t dd_waitq;
+ /** controller (lctl command) wait at here */
+ wait_queue_head_t dd_ctl_waitq;
+ /** daemon is running */
+ unsigned int dd_running;
+ /** daemon stopped */
+ unsigned int dd_stopped;
+};
+
+static struct delay_daemon_data delay_dd;
+
+static unsigned long
+round_timeout(unsigned long timeout)
+{
+ return cfs_time_seconds((unsigned int)
+ cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
+}
+
+static void
+delay_rule_decref(struct lnet_delay_rule *rule)
+{
+ if (atomic_dec_and_test(&rule->dl_refcount)) {
+ LASSERT(list_empty(&rule->dl_sched_link));
+ LASSERT(list_empty(&rule->dl_msg_list));
+ LASSERT(list_empty(&rule->dl_link));
+
+ CFS_FREE_PTR(rule);
+ }
+}
+
+/**
+ * check source/destination NID, portal, message type and delay rate,
+ * decide whether should delay this message or not
+ */
+static bool
+delay_rule_match(struct lnet_delay_rule *rule, lnet_nid_t src,
+ lnet_nid_t dst, unsigned int type, unsigned int portal,
+ struct lnet_msg *msg)
+{
+ struct lnet_fault_attr *attr = &rule->dl_attr;
+ bool delay;
+
+ if (!lnet_fault_attr_match(attr, src, dst, type, portal))
+ return false;
+
+ /* match this rule, check delay rate now */
+ spin_lock(&rule->dl_lock);
+ if (rule->dl_delay_time) { /* time based delay */
+ unsigned long now = cfs_time_current();
+
+ rule->dl_stat.fs_count++;
+ delay = cfs_time_aftereq(now, rule->dl_delay_time);
+ if (delay) {
+ if (cfs_time_after(now, rule->dl_time_base))
+ rule->dl_time_base = now;
+
+ rule->dl_delay_time = rule->dl_time_base +
+ cfs_time_seconds(cfs_rand() %
+ attr->u.delay.la_interval);
+ rule->dl_time_base += cfs_time_seconds(attr->u.delay.la_interval);
+
+ CDEBUG(D_NET, "Delay Rule %s->%s: next delay : %lu\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst),
+ rule->dl_delay_time);
+ }
+
+ } else { /* rate based delay */
+ delay = rule->dl_stat.fs_count++ == rule->dl_delay_at;
+ /* generate the next random rate sequence */
+ if (!do_div(rule->dl_stat.fs_count, attr->u.delay.la_rate)) {
+ rule->dl_delay_at = rule->dl_stat.fs_count +
+ cfs_rand() % attr->u.delay.la_rate;
+ CDEBUG(D_NET, "Delay Rule %s->%s: next delay: %lu\n",
+ libcfs_nid2str(attr->fa_src),
+ libcfs_nid2str(attr->fa_dst), rule->dl_delay_at);
+ }
+ }
+
+ if (!delay) {
+ spin_unlock(&rule->dl_lock);
+ return false;
+ }
+
+ /* delay this message, update counters */
+ lnet_fault_stat_inc(&rule->dl_stat, type);
+ rule->dl_stat.u.delay.ls_delayed++;
+
+ list_add_tail(&msg->msg_list, &rule->dl_msg_list);
+ msg->msg_delay_send = round_timeout(
+ cfs_time_shift(attr->u.delay.la_latency));
+ if (rule->dl_msg_send == -1) {
+ rule->dl_msg_send = msg->msg_delay_send;
+ mod_timer(&rule->dl_timer, rule->dl_msg_send);
+ }
+
+ spin_unlock(&rule->dl_lock);
+ return true;
+}
+
+/**
+ * check if \a msg can match any Delay Rule, receiving of this message
+ * will be delayed if there is a match.
+ */
+bool
+lnet_delay_rule_match_locked(lnet_hdr_t *hdr, struct lnet_msg *msg)
+{
+ struct lnet_delay_rule *rule;
+ lnet_nid_t src = le64_to_cpu(hdr->src_nid);
+ lnet_nid_t dst = le64_to_cpu(hdr->dest_nid);
+ unsigned int typ = le32_to_cpu(hdr->type);
+ unsigned int ptl = -1;
+
+ /* NB: called with hold of lnet_net_lock */
+
+ /**
+ * NB: if Portal is specified, then only PUT and GET will be
+ * filtered by delay rule
+ */
+ if (typ == LNET_MSG_PUT)
+ ptl = le32_to_cpu(hdr->msg.put.ptl_index);
+ else if (typ == LNET_MSG_GET)
+ ptl = le32_to_cpu(hdr->msg.get.ptl_index);
+
+ list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
+ if (delay_rule_match(rule, src, dst, typ, ptl, msg))
+ return true;
+ }
+
+ return false;
+}
+
+/** check out delayed messages for send */
+static void
+delayed_msg_check(struct lnet_delay_rule *rule, bool all,
+ struct list_head *msg_list)
+{
+ struct lnet_msg *msg;
+ struct lnet_msg *tmp;
+ unsigned long now = cfs_time_current();
+
+ if (!all && rule->dl_msg_send > now)
+ return;
+
+ spin_lock(&rule->dl_lock);
+ list_for_each_entry_safe(msg, tmp, &rule->dl_msg_list, msg_list) {
+ if (!all && msg->msg_delay_send > now)
+ break;
+
+ msg->msg_delay_send = 0;
+ list_move_tail(&msg->msg_list, msg_list);
+ }
+
+ if (list_empty(&rule->dl_msg_list)) {
+ del_timer(&rule->dl_timer);
+ rule->dl_msg_send = -1;
+
+ } else if (!list_empty(msg_list)) {
+ /*
+ * dequeued some timedout messages, update timer for the
+ * next delayed message on rule
+ */
+ msg = list_entry(rule->dl_msg_list.next,
+ struct lnet_msg, msg_list);
+ rule->dl_msg_send = msg->msg_delay_send;
+ mod_timer(&rule->dl_timer, rule->dl_msg_send);
+ }
+ spin_unlock(&rule->dl_lock);
+}
+
+static void
+delayed_msg_process(struct list_head *msg_list, bool drop)
+{
+ struct lnet_msg *msg;
+
+ while (!list_empty(msg_list)) {
+ struct lnet_ni *ni;
+ int cpt;
+ int rc;
+
+ msg = list_entry(msg_list->next, struct lnet_msg, msg_list);
+ LASSERT(msg->msg_rxpeer);
+
+ ni = msg->msg_rxpeer->lp_ni;
+ cpt = msg->msg_rx_cpt;
+
+ list_del_init(&msg->msg_list);
+ if (drop) {
+ rc = -ECANCELED;
+
+ } else if (!msg->msg_routing) {
+ rc = lnet_parse_local(ni, msg);
+ if (!rc)
+ continue;
+
+ } else {
+ lnet_net_lock(cpt);
+ rc = lnet_parse_forward_locked(ni, msg);
+ lnet_net_unlock(cpt);
+
+ switch (rc) {
+ case LNET_CREDIT_OK:
+ lnet_ni_recv(ni, msg->msg_private, msg, 0,
+ 0, msg->msg_len, msg->msg_len);
+ case LNET_CREDIT_WAIT:
+ continue;
+ default: /* failures */
+ break;
+ }
+ }
+
+ lnet_drop_message(ni, cpt, msg->msg_private, msg->msg_len);
+ lnet_finalize(ni, msg, rc);
+ }
+}
+
+/**
+ * Process delayed messages for scheduled rules
+ * This function can either be called by delay_rule_daemon, or by lnet_finalise
+ */
+void
+lnet_delay_rule_check(void)
+{
+ struct lnet_delay_rule *rule;
+ struct list_head msgs;
+
+ INIT_LIST_HEAD(&msgs);
+ while (1) {
+ if (list_empty(&delay_dd.dd_sched_rules))
+ break;
+
+ spin_lock_bh(&delay_dd.dd_lock);
+ if (list_empty(&delay_dd.dd_sched_rules)) {
+ spin_unlock_bh(&delay_dd.dd_lock);
+ break;
+ }
+
+ rule = list_entry(delay_dd.dd_sched_rules.next,
+ struct lnet_delay_rule, dl_sched_link);
+ list_del_init(&rule->dl_sched_link);
+ spin_unlock_bh(&delay_dd.dd_lock);
+
+ delayed_msg_check(rule, false, &msgs);
+ delay_rule_decref(rule); /* -1 for delay_dd.dd_sched_rules */
+ }
+
+ if (!list_empty(&msgs))
+ delayed_msg_process(&msgs, false);
+}
+
+/** daemon thread to handle delayed messages */
+static int
+lnet_delay_rule_daemon(void *arg)
+{
+ delay_dd.dd_running = 1;
+ wake_up(&delay_dd.dd_ctl_waitq);
+
+ while (delay_dd.dd_running) {
+ wait_event_interruptible(delay_dd.dd_waitq,
+ !delay_dd.dd_running ||
+ !list_empty(&delay_dd.dd_sched_rules));
+ lnet_delay_rule_check();
+ }
+
+ /* in case more rules have been enqueued after my last check */
+ lnet_delay_rule_check();
+ delay_dd.dd_stopped = 1;
+ wake_up(&delay_dd.dd_ctl_waitq);
+
+ return 0;
+}
+
+static void
+delay_timer_cb(unsigned long arg)
+{
+ struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+
+ spin_lock_bh(&delay_dd.dd_lock);
+ if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
+ atomic_inc(&rule->dl_refcount);
+ list_add_tail(&rule->dl_sched_link, &delay_dd.dd_sched_rules);
+ wake_up(&delay_dd.dd_waitq);
+ }
+ spin_unlock_bh(&delay_dd.dd_lock);
+}
+
+/**
+ * Add a new delay rule to LNet
+ * There is no check for duplicated delay rule, all rules will be checked for
+ * incoming message.
+ */
+int
+lnet_delay_rule_add(struct lnet_fault_attr *attr)
+{
+ struct lnet_delay_rule *rule;
+ int rc = 0;
+
+ if (attr->u.delay.la_rate & attr->u.delay.la_interval) {
+ CDEBUG(D_NET, "please provide either delay rate or delay interval, but not both at the same time %d/%d\n",
+ attr->u.delay.la_rate, attr->u.delay.la_interval);
+ return -EINVAL;
+ }
+
+ if (!attr->u.delay.la_latency) {
+ CDEBUG(D_NET, "delay latency cannot be zero\n");
+ return -EINVAL;
+ }
+
+ if (lnet_fault_attr_validate(attr))
+ return -EINVAL;
+
+ CFS_ALLOC_PTR(rule);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&delay_dd.dd_mutex);
+ if (!delay_dd.dd_running) {
+ struct task_struct *task;
+
+ /**
+ * NB: although LND threads will process delayed message
+ * in lnet_finalize, but there is no guarantee that LND
+ * threads will be waken up if no other message needs to
+ * be handled.
+ * Only one daemon thread, performance is not the concern
+ * of this simualation module.
+ */
+ task = kthread_run(lnet_delay_rule_daemon, NULL, "lnet_dd");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ goto failed;
+ }
+ wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
+ }
+
+ init_timer(&rule->dl_timer);
+ rule->dl_timer.function = delay_timer_cb;
+ rule->dl_timer.data = (unsigned long)rule;
+
+ spin_lock_init(&rule->dl_lock);
+ INIT_LIST_HEAD(&rule->dl_msg_list);
+ INIT_LIST_HEAD(&rule->dl_sched_link);
+
+ rule->dl_attr = *attr;
+ if (attr->u.delay.la_interval) {
+ rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
+ rule->dl_delay_time = cfs_time_shift(cfs_rand() %
+ attr->u.delay.la_interval);
+ } else {
+ rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ }
+
+ rule->dl_msg_send = -1;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ atomic_set(&rule->dl_refcount, 1);
+ list_add(&rule->dl_link, &the_lnet.ln_delay_rules);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ CDEBUG(D_NET, "Added delay rule: src %s, dst %s, rate %d\n",
+ libcfs_nid2str(attr->fa_src), libcfs_nid2str(attr->fa_src),
+ attr->u.delay.la_rate);
+
+ mutex_unlock(&delay_dd.dd_mutex);
+ return 0;
+failed:
+ mutex_unlock(&delay_dd.dd_mutex);
+ CFS_FREE_PTR(rule);
+ return rc;
+}
+
+/**
+ * Remove matched Delay Rules from lnet, if \a shutdown is true or both \a src
+ * and \a dst are zero, all rules will be removed, otherwise only matched rules
+ * will be removed.
+ * If \a src is zero, then all rules have \a dst as destination will be remove
+ * If \a dst is zero, then all rules have \a src as source will be removed
+ *
+ * When a delay rule is removed, all delayed messages of this rule will be
+ * processed immediately.
+ */
+int
+lnet_delay_rule_del(lnet_nid_t src, lnet_nid_t dst, bool shutdown)
+{
+ struct lnet_delay_rule *rule;
+ struct lnet_delay_rule *tmp;
+ struct list_head rule_list;
+ struct list_head msg_list;
+ int n = 0;
+ bool cleanup;
+
+ INIT_LIST_HEAD(&rule_list);
+ INIT_LIST_HEAD(&msg_list);
+
+ if (shutdown) {
+ src = 0;
+ dst = 0;
+ }
+
+ mutex_lock(&delay_dd.dd_mutex);
+ lnet_net_lock(LNET_LOCK_EX);
+
+ list_for_each_entry_safe(rule, tmp, &the_lnet.ln_delay_rules, dl_link) {
+ if (rule->dl_attr.fa_src != src && src)
+ continue;
+
+ if (rule->dl_attr.fa_dst != dst && dst)
+ continue;
+
+ CDEBUG(D_NET, "Remove delay rule: src %s->dst: %s (1/%d, %d)\n",
+ libcfs_nid2str(rule->dl_attr.fa_src),
+ libcfs_nid2str(rule->dl_attr.fa_dst),
+ rule->dl_attr.u.delay.la_rate,
+ rule->dl_attr.u.delay.la_interval);
+ /* refcount is taken over by rule_list */
+ list_move(&rule->dl_link, &rule_list);
+ }
+
+ /* check if we need to shutdown delay_daemon */
+ cleanup = list_empty(&the_lnet.ln_delay_rules) &&
+ !list_empty(&rule_list);
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ list_for_each_entry_safe(rule, tmp, &rule_list, dl_link) {
+ list_del_init(&rule->dl_link);
+
+ del_timer_sync(&rule->dl_timer);
+ delayed_msg_check(rule, true, &msg_list);
+ delay_rule_decref(rule); /* -1 for the_lnet.ln_delay_rules */
+ n++;
+ }
+
+ if (cleanup) { /* no more delay rule, shutdown delay_daemon */
+ LASSERT(delay_dd.dd_running);
+ delay_dd.dd_running = 0;
+ wake_up(&delay_dd.dd_waitq);
+
+ while (!delay_dd.dd_stopped)
+ wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_stopped);
+ }
+ mutex_unlock(&delay_dd.dd_mutex);
+
+ if (!list_empty(&msg_list))
+ delayed_msg_process(&msg_list, shutdown);
+
+ return n;
+}
+
+/**
+ * List Delay Rule at position of \a pos
+ */
+int
+lnet_delay_rule_list(int pos, struct lnet_fault_attr *attr,
+ struct lnet_fault_stat *stat)
+{
+ struct lnet_delay_rule *rule;
+ int cpt;
+ int i = 0;
+ int rc = -ENOENT;
+
+ cpt = lnet_net_lock_current();
+ list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
+ if (i++ < pos)
+ continue;
+
+ spin_lock(&rule->dl_lock);
+ *attr = rule->dl_attr;
+ *stat = rule->dl_stat;
+ spin_unlock(&rule->dl_lock);
+ rc = 0;
+ break;
+ }
+
+ lnet_net_unlock(cpt);
+ return rc;
+}
+
+/**
+ * reset counters for all Delay Rules
+ */
+void
+lnet_delay_rule_reset(void)
+{
+ struct lnet_delay_rule *rule;
+ int cpt;
+
+ cpt = lnet_net_lock_current();
+
+ list_for_each_entry(rule, &the_lnet.ln_delay_rules, dl_link) {
+ struct lnet_fault_attr *attr = &rule->dl_attr;
+
+ spin_lock(&rule->dl_lock);
+
+ memset(&rule->dl_stat, 0, sizeof(rule->dl_stat));
+ if (attr->u.delay.la_rate) {
+ rule->dl_delay_at = cfs_rand() % attr->u.delay.la_rate;
+ } else {
+ rule->dl_delay_time = cfs_time_shift(cfs_rand() %
+ attr->u.delay.la_interval);
+ rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
+ }
+ spin_unlock(&rule->dl_lock);
+ }
+
+ lnet_net_unlock(cpt);
+}
+
+int
+lnet_fault_ctl(int opc, struct libcfs_ioctl_data *data)
+{
+ struct lnet_fault_attr *attr;
+ struct lnet_fault_stat *stat;
+
+ attr = (struct lnet_fault_attr *)data->ioc_inlbuf1;
+
+ switch (opc) {
+ default:
+ return -EINVAL;
+
+ case LNET_CTL_DROP_ADD:
+ if (!attr)
+ return -EINVAL;
+
+ return lnet_drop_rule_add(attr);
+
+ case LNET_CTL_DROP_DEL:
+ if (!attr)
+ return -EINVAL;
+
+ data->ioc_count = lnet_drop_rule_del(attr->fa_src,
+ attr->fa_dst);
+ return 0;
+
+ case LNET_CTL_DROP_RESET:
+ lnet_drop_rule_reset();
+ return 0;
+
+ case LNET_CTL_DROP_LIST:
+ stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
+ if (!attr || !stat)
+ return -EINVAL;
+
+ return lnet_drop_rule_list(data->ioc_count, attr, stat);
+
+ case LNET_CTL_DELAY_ADD:
+ if (!attr)
+ return -EINVAL;
+
+ return lnet_delay_rule_add(attr);
+
+ case LNET_CTL_DELAY_DEL:
+ if (!attr)
+ return -EINVAL;
+
+ data->ioc_count = lnet_delay_rule_del(attr->fa_src,
+ attr->fa_dst, false);
+ return 0;
+
+ case LNET_CTL_DELAY_RESET:
+ lnet_delay_rule_reset();
+ return 0;
+
+ case LNET_CTL_DELAY_LIST:
+ stat = (struct lnet_fault_stat *)data->ioc_inlbuf2;
+ if (!attr || !stat)
+ return -EINVAL;
+
+ return lnet_delay_rule_list(data->ioc_count, attr, stat);
+ }
+}
+
+int
+lnet_fault_init(void)
+{
+ CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
+ CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
+ CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
+ CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
+
+ mutex_init(&delay_dd.dd_mutex);
+ spin_lock_init(&delay_dd.dd_lock);
+ init_waitqueue_head(&delay_dd.dd_waitq);
+ init_waitqueue_head(&delay_dd.dd_ctl_waitq);
+ INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
+
+ return 0;
+}
+
+void
+lnet_fault_fini(void)
+{
+ lnet_drop_rule_del(0, 0);
+ lnet_delay_rule_del(0, 0, true);
+
+ LASSERT(list_empty(&the_lnet.ln_drop_rules));
+ LASSERT(list_empty(&the_lnet.ln_delay_rules));
+ LASSERT(list_empty(&delay_dd.dd_sched_rules));
+}
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index 80f585afa259..ebf468fbc64f 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -170,7 +170,7 @@ parse_addrange(const struct cfs_lstr *src, struct nidrange *nidrange)
}
LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
- if (addrrange == NULL)
+ if (!addrrange)
return -ENOMEM;
list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);
@@ -203,16 +203,18 @@ add_nidrange(const struct cfs_lstr *src,
return NULL;
nf = libcfs_namenum2netstrfns(src->ls_str);
- if (nf == NULL)
+ if (!nf)
return NULL;
endlen = src->ls_len - strlen(nf->nf_name);
- if (endlen == 0)
+ if (!endlen)
/* network name only, e.g. "elan" or "tcp" */
netnum = 0;
else {
- /* e.g. "elan25" or "tcp23", refuse to parse if
+ /*
+ * e.g. "elan25" or "tcp23", refuse to parse if
* network name is not appended with decimal or
- * hexadecimal number */
+ * hexadecimal number
+ */
if (!cfs_str2num_check(src->ls_str + strlen(nf->nf_name),
endlen, &netnum, 0, MAX_NUMERIC_VALUE))
return NULL;
@@ -227,7 +229,7 @@ add_nidrange(const struct cfs_lstr *src,
}
LIBCFS_ALLOC(nr, sizeof(struct nidrange));
- if (nr == NULL)
+ if (!nr)
return NULL;
list_add_tail(&nr->nr_link, nidlist);
INIT_LIST_HEAD(&nr->nr_addrranges);
@@ -253,22 +255,21 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
struct nidrange *nr;
tmp = *src;
- if (cfs_gettok(src, '@', &addrrange) == 0)
+ if (!cfs_gettok(src, '@', &addrrange))
goto failed;
- if (cfs_gettok(src, '@', &net) == 0 || src->ls_str != NULL)
+ if (!cfs_gettok(src, '@', &net) || src->ls_str)
goto failed;
nr = add_nidrange(&net, nidlist);
- if (nr == NULL)
+ if (!nr)
goto failed;
- if (parse_addrange(&addrrange, nr) != 0)
+ if (parse_addrange(&addrrange, nr))
goto failed;
return 1;
failed:
- CWARN("can't parse nidrange: \"%.*s\"\n", tmp.ls_len, tmp.ls_str);
return 0;
}
@@ -342,12 +343,12 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
INIT_LIST_HEAD(nidlist);
while (src.ls_str) {
rc = cfs_gettok(&src, ' ', &res);
- if (rc == 0) {
+ if (!rc) {
cfs_free_nidlist(nidlist);
return 0;
}
rc = parse_nidrange(&res, nidlist);
- if (rc == 0) {
+ if (!rc) {
cfs_free_nidlist(nidlist);
return 0;
}
@@ -378,7 +379,7 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
return 1;
list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
- &ar->ar_numaddr_ranges))
+ &ar->ar_numaddr_ranges))
return 1;
}
return 0;
@@ -395,7 +396,7 @@ cfs_print_network(char *buffer, int count, struct nidrange *nr)
{
struct netstrfns *nf = nr->nr_netstrfns;
- if (nr->nr_netnum == 0)
+ if (!nr->nr_netnum)
return scnprintf(buffer, count, "@%s", nf->nf_name);
else
return scnprintf(buffer, count, "@%s%u",
@@ -417,7 +418,7 @@ cfs_print_addrranges(char *buffer, int count, struct list_head *addrranges,
struct netstrfns *nf = nr->nr_netstrfns;
list_for_each_entry(ar, addrranges, ar_link) {
- if (i != 0)
+ if (i)
i += scnprintf(buffer + i, count - i, " ");
i += nf->nf_print_addrlist(buffer + i, count - i,
&ar->ar_numaddr_ranges);
@@ -442,10 +443,10 @@ int cfs_print_nidlist(char *buffer, int count, struct list_head *nidlist)
return 0;
list_for_each_entry(nr, nidlist, nr_link) {
- if (i != 0)
+ if (i)
i += scnprintf(buffer + i, count - i, " ");
- if (nr->nr_all != 0) {
+ if (nr->nr_all) {
LASSERT(list_empty(&nr->nr_addrranges));
i += scnprintf(buffer + i, count - i, "*");
i += cfs_print_network(buffer + i, count - i, nr);
@@ -487,13 +488,13 @@ static void cfs_ip_ar_min_max(struct addrrange *ar, __u32 *min_nid,
tmp_ip_addr = ((min_ip[0] << 24) | (min_ip[1] << 16) |
(min_ip[2] << 8) | min_ip[3]);
- if (min_nid != NULL)
+ if (min_nid)
*min_nid = tmp_ip_addr;
tmp_ip_addr = ((max_ip[0] << 24) | (max_ip[1] << 16) |
(max_ip[2] << 8) | max_ip[3]);
- if (max_nid != NULL)
+ if (max_nid)
*max_nid = tmp_ip_addr;
}
@@ -515,16 +516,16 @@ static void cfs_num_ar_min_max(struct addrrange *ar, __u32 *min_nid,
list_for_each_entry(el, &ar->ar_numaddr_ranges, el_link) {
list_for_each_entry(re, &el->el_exprs, re_link) {
- if (re->re_lo < min_addr || min_addr == 0)
+ if (re->re_lo < min_addr || !min_addr)
min_addr = re->re_lo;
if (re->re_hi > max_addr)
max_addr = re->re_hi;
}
}
- if (min_nid != NULL)
+ if (min_nid)
*min_nid = min_addr;
- if (max_nid != NULL)
+ if (max_nid)
*max_nid = max_addr;
}
@@ -546,17 +547,17 @@ bool cfs_nidrange_is_contiguous(struct list_head *nidlist)
list_for_each_entry(nr, nidlist, nr_link) {
nf = nr->nr_netstrfns;
- if (lndname == NULL)
+ if (!lndname)
lndname = nf->nf_name;
if (netnum == -1)
netnum = nr->nr_netnum;
- if (strcmp(lndname, nf->nf_name) != 0 ||
+ if (strcmp(lndname, nf->nf_name) ||
netnum != nr->nr_netnum)
return false;
}
- if (nf == NULL)
+ if (!nf)
return false;
if (!nf->nf_is_contiguous(nidlist))
@@ -590,7 +591,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist)
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_num_ar_min_max(ar, &current_start_nid,
&current_end_nid);
- if (last_end_nid != 0 &&
+ if (last_end_nid &&
(current_start_nid - last_end_nid != 1))
return false;
last_end_nid = current_end_nid;
@@ -600,7 +601,7 @@ static bool cfs_num_is_contiguous(struct list_head *nidlist)
re_link) {
if (re->re_stride > 1)
return false;
- else if (last_hi != 0 &&
+ else if (last_hi &&
re->re_hi - last_hi != 1)
return false;
last_hi = re->re_hi;
@@ -640,7 +641,7 @@ static bool cfs_ip_is_contiguous(struct list_head *nidlist)
last_diff = 0;
cfs_ip_ar_min_max(ar, &current_start_nid,
&current_end_nid);
- if (last_end_nid != 0 &&
+ if (last_end_nid &&
(current_start_nid - last_end_nid != 1))
return false;
last_end_nid = current_end_nid;
@@ -724,7 +725,7 @@ static void cfs_num_min_max(struct list_head *nidlist, __u32 *min_nid,
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_num_ar_min_max(ar, &tmp_min_addr,
&tmp_max_addr);
- if (tmp_min_addr < min_addr || min_addr == 0)
+ if (tmp_min_addr < min_addr || !min_addr)
min_addr = tmp_min_addr;
if (tmp_max_addr > max_addr)
max_addr = tmp_min_addr;
@@ -756,16 +757,16 @@ static void cfs_ip_min_max(struct list_head *nidlist, __u32 *min_nid,
list_for_each_entry(ar, &nr->nr_addrranges, ar_link) {
cfs_ip_ar_min_max(ar, &tmp_min_ip_addr,
&tmp_max_ip_addr);
- if (tmp_min_ip_addr < min_ip_addr || min_ip_addr == 0)
+ if (tmp_min_ip_addr < min_ip_addr || !min_ip_addr)
min_ip_addr = tmp_min_ip_addr;
if (tmp_max_ip_addr > max_ip_addr)
max_ip_addr = tmp_max_ip_addr;
}
}
- if (min_nid != NULL)
+ if (min_nid)
*min_nid = min_ip_addr;
- if (max_nid != NULL)
+ if (max_nid)
*max_nid = max_ip_addr;
}
@@ -784,12 +785,14 @@ libcfs_ip_addr2str(__u32 addr, char *str, size_t size)
(addr >> 8) & 0xff, addr & 0xff);
}
-/* CAVEAT EMPTOR XscanfX
+/*
+ * CAVEAT EMPTOR XscanfX
* I use "%n" at the end of a sscanf format to detect trailing junk. However
* sscanf may return immediately if it sees the terminating '0' in a string, so
* I initialise the %n variable to the expected length. If sscanf sets it;
* fine, if it doesn't, then the scan ended at the end of the string, which is
- * fine too :) */
+ * fine too :)
+ */
static int
libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
{
@@ -802,9 +805,9 @@ libcfs_ip_str2addr(const char *str, int nob, __u32 *addr)
/* numeric IP? */
if (sscanf(str, "%u.%u.%u.%u%n", &a, &b, &c, &d, &n) >= 4 &&
n == nob &&
- (a & ~0xff) == 0 && (b & ~0xff) == 0 &&
- (c & ~0xff) == 0 && (d & ~0xff) == 0) {
- *addr = ((a<<24)|(b<<16)|(c<<8)|d);
+ !(a & ~0xff) && !(b & ~0xff) &&
+ !(c & ~0xff) && !(d & ~0xff)) {
+ *addr = ((a << 24) | (b << 16) | (c << 8) | d);
return 1;
}
@@ -824,7 +827,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list)
src.ls_len = len;
i = 0;
- while (src.ls_str != NULL) {
+ while (src.ls_str) {
struct cfs_lstr res;
if (!cfs_gettok(&src, '.', &res)) {
@@ -833,7 +836,7 @@ cfs_ip_addr_parse(char *str, int len, struct list_head *list)
}
rc = cfs_expr_list_parse(res.ls_str, res.ls_len, 0, 255, &el);
- if (rc != 0)
+ if (rc)
goto out;
list_add_tail(&el->el_link, list);
@@ -858,7 +861,7 @@ libcfs_ip_addr_range_print(char *buffer, int count, struct list_head *list)
list_for_each_entry(el, list, el_link) {
LASSERT(j++ < 4);
- if (i != 0)
+ if (i)
i += scnprintf(buffer + i, count - i, ".");
i += cfs_expr_list_print(buffer + i, count - i, el);
}
@@ -928,7 +931,7 @@ libcfs_num_parse(char *str, int len, struct list_head *list)
int rc;
rc = cfs_expr_list_parse(str, len, 0, MAX_NUMERIC_VALUE, &el);
- if (rc == 0)
+ if (!rc)
list_add_tail(&el->el_link, list);
return rc;
@@ -1060,7 +1063,7 @@ libcfs_name2netstrfns(const char *name)
int
libcfs_isknown_lnd(__u32 lnd)
{
- return libcfs_lnd2netstrfns(lnd) != NULL;
+ return !!libcfs_lnd2netstrfns(lnd);
}
EXPORT_SYMBOL(libcfs_isknown_lnd);
@@ -1069,7 +1072,7 @@ libcfs_lnd2modname(__u32 lnd)
{
struct netstrfns *nf = libcfs_lnd2netstrfns(lnd);
- return (nf == NULL) ? NULL : nf->nf_modname;
+ return nf ? nf->nf_modname : NULL;
}
EXPORT_SYMBOL(libcfs_lnd2modname);
@@ -1078,10 +1081,10 @@ libcfs_str2lnd(const char *str)
{
struct netstrfns *nf = libcfs_name2netstrfns(str);
- if (nf != NULL)
+ if (nf)
return nf->nf_type;
- return -1;
+ return -ENXIO;
}
EXPORT_SYMBOL(libcfs_str2lnd);
@@ -1091,7 +1094,7 @@ libcfs_lnd2str_r(__u32 lnd, char *buf, size_t buf_size)
struct netstrfns *nf;
nf = libcfs_lnd2netstrfns(lnd);
- if (nf == NULL)
+ if (!nf)
snprintf(buf, buf_size, "?%u?", lnd);
else
snprintf(buf, buf_size, "%s", nf->nf_name);
@@ -1108,9 +1111,9 @@ libcfs_net2str_r(__u32 net, char *buf, size_t buf_size)
struct netstrfns *nf;
nf = libcfs_lnd2netstrfns(lnd);
- if (nf == NULL)
+ if (!nf)
snprintf(buf, buf_size, "<%u:%u>", lnd, nnum);
- else if (nnum == 0)
+ else if (!nnum)
snprintf(buf, buf_size, "%s", nf->nf_name);
else
snprintf(buf, buf_size, "%s%u", nf->nf_name, nnum);
@@ -1135,14 +1138,14 @@ libcfs_nid2str_r(lnet_nid_t nid, char *buf, size_t buf_size)
}
nf = libcfs_lnd2netstrfns(lnd);
- if (nf == NULL)
+ if (!nf) {
snprintf(buf, buf_size, "%x@<%u:%u>", addr, lnd, nnum);
- else {
+ } else {
size_t addr_len;
nf->nf_addr2str(addr, buf, buf_size);
addr_len = strlen(buf);
- if (nnum == 0)
+ if (!nnum)
snprintf(buf + addr_len, buf_size - addr_len, "@%s",
nf->nf_name);
else
@@ -1195,7 +1198,7 @@ libcfs_str2net(const char *str)
{
__u32 net;
- if (libcfs_str2net_internal(str, &net) != NULL)
+ if (libcfs_str2net_internal(str, &net))
return net;
return LNET_NIDNET(LNET_NID_ANY);
@@ -1210,15 +1213,15 @@ libcfs_str2nid(const char *str)
__u32 net;
__u32 addr;
- if (sep != NULL) {
+ if (sep) {
nf = libcfs_str2net_internal(sep + 1, &net);
- if (nf == NULL)
+ if (!nf)
return LNET_NID_ANY;
} else {
sep = str + strlen(str);
net = LNET_MKNET(SOCKLND, 0);
nf = libcfs_lnd2netstrfns(SOCKLND);
- LASSERT(nf != NULL);
+ LASSERT(nf);
}
if (!nf->nf_str2addr(str, (int)(sep - str), &addr))
@@ -1240,8 +1243,8 @@ libcfs_id2str(lnet_process_id_t id)
}
snprintf(str, LNET_NIDSTR_SIZE, "%s%u-%s",
- ((id.pid & LNET_PID_USERFLAG) != 0) ? "U" : "",
- (id.pid & ~LNET_PID_USERFLAG), libcfs_nid2str(id.nid));
+ id.pid & LNET_PID_USERFLAG ? "U" : "",
+ id.pid & ~LNET_PID_USERFLAG, libcfs_nid2str(id.nid));
return str;
}
EXPORT_SYMBOL(libcfs_id2str);
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c
index 1fceed3c8fc0..b026feebc03a 100644
--- a/drivers/staging/lustre/lnet/lnet/peer.c
+++ b/drivers/staging/lustre/lnet/lnet/peer.c
@@ -39,6 +39,7 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
+#include "../../include/linux/lnet/lib-dlc.h"
int
lnet_peer_tables_create(void)
@@ -50,7 +51,7 @@ lnet_peer_tables_create(void)
the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ptable));
- if (the_lnet.ln_peer_tables == NULL) {
+ if (!the_lnet.ln_peer_tables) {
CERROR("Failed to allocate cpu-partition peer tables\n");
return -ENOMEM;
}
@@ -60,7 +61,7 @@ lnet_peer_tables_create(void)
LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
LNET_PEER_HASH_SIZE * sizeof(*hash));
- if (hash == NULL) {
+ if (!hash) {
CERROR("Failed to create peer hash table\n");
lnet_peer_tables_destroy();
return -ENOMEM;
@@ -82,12 +83,12 @@ lnet_peer_tables_destroy(void)
int i;
int j;
- if (the_lnet.ln_peer_tables == NULL)
+ if (!the_lnet.ln_peer_tables)
return;
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
hash = ptable->pt_hash;
- if (hash == NULL) /* not initialized */
+ if (!hash) /* not initialized */
break;
LASSERT(list_empty(&ptable->pt_deathrow));
@@ -103,62 +104,116 @@ lnet_peer_tables_destroy(void)
the_lnet.ln_peer_tables = NULL;
}
+static void
+lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
+{
+ int i;
+ lnet_peer_t *lp;
+ lnet_peer_t *tmp;
+
+ for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+ list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+ lp_hashlist) {
+ if (ni && ni != lp->lp_ni)
+ continue;
+ list_del_init(&lp->lp_hashlist);
+ /* Lose hash table's ref */
+ ptable->pt_zombies++;
+ lnet_peer_decref_locked(lp);
+ }
+ }
+}
+
+static void
+lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
+ int cpt_locked)
+{
+ int i;
+
+ for (i = 3; ptable->pt_zombies; i++) {
+ lnet_net_unlock(cpt_locked);
+
+ if (is_power_of_2(i)) {
+ CDEBUG(D_WARNING,
+ "Waiting for %d zombies on peer table\n",
+ ptable->pt_zombies);
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1) >> 1);
+ lnet_net_lock(cpt_locked);
+ }
+}
+
+static void
+lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
+ int cpt_locked)
+{
+ lnet_peer_t *lp;
+ lnet_peer_t *tmp;
+ lnet_nid_t lp_nid;
+ int i;
+
+ for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
+ list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
+ lp_hashlist) {
+ if (ni != lp->lp_ni)
+ continue;
+
+ if (!lp->lp_rtr_refcount)
+ continue;
+
+ lp_nid = lp->lp_nid;
+
+ lnet_net_unlock(cpt_locked);
+ lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
+ lnet_net_lock(cpt_locked);
+ }
+ }
+}
+
void
-lnet_peer_tables_cleanup(void)
+lnet_peer_tables_cleanup(lnet_ni_t *ni)
{
struct lnet_peer_table *ptable;
+ struct list_head deathrow;
+ lnet_peer_t *lp;
+ lnet_peer_t *temp;
int i;
- int j;
- LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */
+ INIT_LIST_HEAD(&deathrow);
+ LASSERT(the_lnet.ln_shutdown || ni);
+ /*
+ * If just deleting the peers for a NI, get rid of any routes these
+ * peers are gateways for.
+ */
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
lnet_net_lock(i);
-
- for (j = 0; j < LNET_PEER_HASH_SIZE; j++) {
- struct list_head *peers = &ptable->pt_hash[j];
-
- while (!list_empty(peers)) {
- lnet_peer_t *lp = list_entry(peers->next,
- lnet_peer_t,
- lp_hashlist);
- list_del_init(&lp->lp_hashlist);
- /* lose hash table's ref */
- lnet_peer_decref_locked(lp);
- }
- }
-
+ lnet_peer_table_del_rtrs_locked(ni, ptable, i);
lnet_net_unlock(i);
}
+ /*
+ * Start the process of moving the applicable peers to
+ * deathrow.
+ */
cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
- LIST_HEAD(deathrow);
- lnet_peer_t *lp;
-
lnet_net_lock(i);
+ lnet_peer_table_cleanup_locked(ni, ptable);
+ lnet_net_unlock(i);
+ }
- for (j = 3; ptable->pt_number != 0; j++) {
- lnet_net_unlock(i);
-
- if ((j & (j - 1)) == 0) {
- CDEBUG(D_WARNING,
- "Waiting for %d peers on peer table\n",
- ptable->pt_number);
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 2);
- lnet_net_lock(i);
- }
+ /* Cleanup all entries on deathrow. */
+ cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
+ lnet_net_lock(i);
+ lnet_peer_table_deathrow_wait_locked(ptable, i);
list_splice_init(&ptable->pt_deathrow, &deathrow);
-
lnet_net_unlock(i);
+ }
- while (!list_empty(&deathrow)) {
- lp = list_entry(deathrow.next,
- lnet_peer_t, lp_hashlist);
- list_del(&lp->lp_hashlist);
- LIBCFS_FREE(lp, sizeof(*lp));
- }
+ list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) {
+ list_del(&lp->lp_hashlist);
+ LIBCFS_FREE(lp, sizeof(*lp));
}
}
@@ -167,11 +222,11 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
{
struct lnet_peer_table *ptable;
- LASSERT(lp->lp_refcount == 0);
- LASSERT(lp->lp_rtr_refcount == 0);
+ LASSERT(!lp->lp_refcount);
+ LASSERT(!lp->lp_rtr_refcount);
LASSERT(list_empty(&lp->lp_txq));
LASSERT(list_empty(&lp->lp_hashlist));
- LASSERT(lp->lp_txqnob == 0);
+ LASSERT(!lp->lp_txqnob);
ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
LASSERT(ptable->pt_number > 0);
@@ -181,6 +236,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp)
lp->lp_ni = NULL;
list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
+ LASSERT(ptable->pt_zombies > 0);
+ ptable->pt_zombies--;
}
lnet_peer_t *
@@ -220,14 +277,14 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
ptable = the_lnet.ln_peer_tables[cpt2];
lp = lnet_find_peer_locked(ptable, nid);
- if (lp != NULL) {
+ if (lp) {
*lpp = lp;
return 0;
}
if (!list_empty(&ptable->pt_deathrow)) {
lp = list_entry(ptable->pt_deathrow.next,
- lnet_peer_t, lp_hashlist);
+ lnet_peer_t, lp_hashlist);
list_del(&lp->lp_hashlist);
}
@@ -238,12 +295,12 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
ptable->pt_number++;
lnet_net_unlock(cpt);
- if (lp != NULL)
+ if (lp)
memset(lp, 0, sizeof(*lp));
else
LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
- if (lp == NULL) {
+ if (!lp) {
rc = -ENOMEM;
lnet_net_lock(cpt);
goto out;
@@ -276,30 +333,30 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt)
}
lp2 = lnet_find_peer_locked(ptable, nid);
- if (lp2 != NULL) {
+ if (lp2) {
*lpp = lp2;
goto out;
}
lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
- if (lp->lp_ni == NULL) {
+ if (!lp->lp_ni) {
rc = -EHOSTUNREACH;
goto out;
}
- lp->lp_txcredits =
- lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
- lp->lp_rtrcredits =
+ lp->lp_txcredits = lp->lp_ni->ni_peertxcredits;
+ lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
+ lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
list_add_tail(&lp->lp_hashlist,
- &ptable->pt_hash[lnet_nid2peerhash(nid)]);
+ &ptable->pt_hash[lnet_nid2peerhash(nid)]);
ptable->pt_version++;
*lpp = lp;
return 0;
out:
- if (lp != NULL)
+ if (lp)
list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
ptable->pt_number--;
return rc;
@@ -317,7 +374,7 @@ lnet_debug_peer(lnet_nid_t nid)
lnet_net_lock(cpt);
rc = lnet_nid2peer_locked(&lp, nid, cpt);
- if (rc != 0) {
+ if (rc) {
lnet_net_unlock(cpt);
CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
return;
@@ -336,3 +393,65 @@ lnet_debug_peer(lnet_nid_t nid)
lnet_net_unlock(cpt);
}
+
+int
+lnet_get_peer_info(__u32 peer_index, __u64 *nid,
+ char aliveness[LNET_MAX_STR_LEN],
+ __u32 *cpt_iter, __u32 *refcount,
+ __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
+ __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
+ __u32 *peer_tx_qnob)
+{
+ struct lnet_peer_table *peer_table;
+ lnet_peer_t *lp;
+ bool found = false;
+ int lncpt, j;
+
+ /* get the number of CPTs */
+ lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
+
+ /*
+ * if the cpt number to be examined is >= the number of cpts in
+ * the system then indicate that there are no more cpts to examin
+ */
+ if (*cpt_iter >= lncpt)
+ return -ENOENT;
+
+ /* get the current table */
+ peer_table = the_lnet.ln_peer_tables[*cpt_iter];
+ /* if the ptable is NULL then there are no more cpts to examine */
+ if (!peer_table)
+ return -ENOENT;
+
+ lnet_net_lock(*cpt_iter);
+
+ for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
+ struct list_head *peers = &peer_table->pt_hash[j];
+
+ list_for_each_entry(lp, peers, lp_hashlist) {
+ if (peer_index-- > 0)
+ continue;
+
+ snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
+ if (lnet_isrouter(lp) ||
+ lnet_peer_aliveness_enabled(lp))
+ snprintf(aliveness, LNET_MAX_STR_LEN,
+ lp->lp_alive ? "up" : "down");
+
+ *nid = lp->lp_nid;
+ *refcount = lp->lp_refcount;
+ *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits;
+ *peer_tx_credits = lp->lp_txcredits;
+ *peer_rtr_credits = lp->lp_rtrcredits;
+ *peer_min_rtr_credits = lp->lp_mintxcredits;
+ *peer_tx_qnob = lp->lp_txqnob;
+
+ found = true;
+ }
+ }
+ lnet_net_unlock(*cpt_iter);
+
+ *cpt_iter = lncpt;
+
+ return found ? 0 : -ENOENT;
+}
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index f5faa414d250..b01dc424c514 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define DEBUG_SUBSYSTEM S_LNET
@@ -28,8 +24,11 @@
#define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
#define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
#define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
+#define LNET_NRB_SMALL_PAGES 1
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
+#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
+ PAGE_SHIFT)
static char *forwarding = "";
module_param(forwarding, charp, 0444);
@@ -61,8 +60,10 @@ lnet_peer_buffer_credits(lnet_ni_t *ni)
if (peer_buffer_credits > 0)
return peer_buffer_credits;
- /* As an approximation, allow this peer the same number of router
- * buffers as it is allowed outstanding sends */
+ /*
+ * As an approximation, allow this peer the same number of router
+ * buffers as it is allowed outstanding sends
+ */
return ni->ni_peertxcredits;
}
@@ -107,7 +108,7 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive,
lp->lp_timestamp = when; /* update timestamp */
lp->lp_ping_deadline = 0; /* disable ping timeout */
- if (lp->lp_alive_count != 0 && /* got old news */
+ if (lp->lp_alive_count && /* got old news */
(!lp->lp_alive) == (!alive)) { /* new date for old news */
CDEBUG(D_NET, "Old news\n");
return;
@@ -131,11 +132,12 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
int alive;
int notifylnd;
- /* Notify only in 1 thread at any time to ensure ordered notification.
+ /*
+ * Notify only in 1 thread at any time to ensure ordered notification.
* NB individual events can be missed; the only guarantee is that you
- * always get the most recent news */
-
- if (lp->lp_notifying || ni == NULL)
+ * always get the most recent news
+ */
+ if (lp->lp_notifying || !ni)
return;
lp->lp_notifying = 1;
@@ -147,13 +149,14 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
lp->lp_notifylnd = 0;
lp->lp_notify = 0;
- if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
+ if (notifylnd && ni->ni_lnd->lnd_notify) {
lnet_net_unlock(lp->lp_cpt);
- /* A new notification could happen now; I'll handle it
- * when control returns to me */
-
- (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
+ /*
+ * A new notification could happen now; I'll handle it
+ * when control returns to me
+ */
+ ni->ni_lnd->lnd_notify(ni, lp->lp_nid, alive);
lnet_net_lock(lp->lp_cpt);
}
@@ -176,7 +179,7 @@ lnet_rtr_addref_locked(lnet_peer_t *lp)
/* a simple insertion sort */
list_for_each_prev(pos, &the_lnet.ln_routers) {
lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
- lp_rtr_list);
+ lp_rtr_list);
if (rtr->lp_nid < lp->lp_nid)
break;
@@ -197,12 +200,12 @@ lnet_rtr_decref_locked(lnet_peer_t *lp)
/* lnet_net_lock must be exclusively locked */
lp->lp_rtr_refcount--;
- if (lp->lp_rtr_refcount == 0) {
+ if (!lp->lp_rtr_refcount) {
LASSERT(list_empty(&lp->lp_routes));
- if (lp->lp_rcd != NULL) {
+ if (lp->lp_rcd) {
list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
+ &the_lnet.ln_rcd_deathrow);
lp->lp_rcd = NULL;
}
@@ -245,8 +248,10 @@ static void lnet_shuffle_seed(void)
cfs_get_random_bytes(seed, sizeof(seed));
- /* Nodes with small feet have little entropy
- * the NID for this node gives the most entropy in the low bits */
+ /*
+ * Nodes with small feet have little entropy
+ * the NID for this node gives the most entropy in the low bits
+ */
list_for_each(tmp, &the_lnet.ln_nis) {
ni = list_entry(tmp, lnet_ni_t, ni_list);
lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
@@ -277,7 +282,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
/* len+1 positions to add a new entry, also prevents division by 0 */
offset = cfs_rand() % (len + 1);
list_for_each(e, &rnet->lrn_routes) {
- if (offset == 0)
+ if (!offset)
break;
offset--;
}
@@ -289,7 +294,7 @@ lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
}
int
-lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
+lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
unsigned int priority)
{
struct list_head *e;
@@ -300,7 +305,7 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
int add_route;
int rc;
- CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
+ CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n",
libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
if (gateway == LNET_NID_ANY ||
@@ -308,21 +313,21 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
net == LNET_NIDNET(LNET_NID_ANY) ||
LNET_NETTYP(net) == LOLND ||
LNET_NIDNET(gateway) == net ||
- hops < 1 || hops > 255)
+ (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
return -EINVAL;
if (lnet_islocalnet(net)) /* it's a local network */
- return 0; /* ignore the route entry */
+ return -EEXIST;
/* Assume net, route, all new */
LIBCFS_ALLOC(route, sizeof(*route));
LIBCFS_ALLOC(rnet, sizeof(*rnet));
- if (route == NULL || rnet == NULL) {
+ if (!route || !rnet) {
CERROR("Out of memory creating route %s %d %s\n",
libcfs_net2str(net), hops, libcfs_nid2str(gateway));
- if (route != NULL)
+ if (route)
LIBCFS_FREE(route, sizeof(*route));
- if (rnet != NULL)
+ if (rnet)
LIBCFS_FREE(rnet, sizeof(*rnet));
return -ENOMEM;
}
@@ -336,25 +341,24 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
lnet_net_lock(LNET_LOCK_EX);
rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
- if (rc != 0) {
+ if (rc) {
lnet_net_unlock(LNET_LOCK_EX);
LIBCFS_FREE(route, sizeof(*route));
LIBCFS_FREE(rnet, sizeof(*rnet));
if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
- return 0; /* ignore the route entry */
+ return rc; /* ignore the route entry */
CERROR("Error %d creating route %s %d %s\n", rc,
libcfs_net2str(net), hops,
libcfs_nid2str(gateway));
-
return rc;
}
LASSERT(!the_lnet.ln_shutdown);
rnet2 = lnet_find_net_locked(net);
- if (rnet2 == NULL) {
+ if (!rnet2) {
/* new network */
list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
rnet2 = rnet;
@@ -382,8 +386,8 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
lnet_net_unlock(LNET_LOCK_EX);
/* XXX Assume alive */
- if (ni->ni_lnd->lnd_notify != NULL)
- (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
+ if (ni->ni_lnd->lnd_notify)
+ ni->ni_lnd->lnd_notify(ni, gateway, 1);
lnet_net_lock(LNET_LOCK_EX);
}
@@ -391,14 +395,20 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
/* -1 for notify or !add_route */
lnet_peer_decref_locked(route->lr_gateway);
lnet_net_unlock(LNET_LOCK_EX);
+ rc = 0;
- if (!add_route)
+ if (!add_route) {
+ rc = -EEXIST;
LIBCFS_FREE(route, sizeof(*route));
+ }
if (rnet != rnet2)
LIBCFS_FREE(rnet, sizeof(*rnet));
- return 0;
+ /* indicate to startup the router checker if configured */
+ wake_up(&the_lnet.ln_rc_waitq);
+
+ return rc;
}
int
@@ -426,10 +436,9 @@ lnet_check_routes(void)
lnet_nid_t nid2;
int net;
- route = list_entry(e2, lnet_route_t,
- lr_list);
+ route = list_entry(e2, lnet_route_t, lr_list);
- if (route2 == NULL) {
+ if (!route2) {
route2 = route;
continue;
}
@@ -472,9 +481,10 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid)
CDEBUG(D_NET, "Del route: net %s : gw %s\n",
libcfs_net2str(net), libcfs_nid2str(gw_nid));
- /* NB Caller may specify either all routes via the given gateway
- * or a specific route entry actual NIDs) */
-
+ /*
+ * NB Caller may specify either all routes via the given gateway
+ * or a specific route entry actual NIDs)
+ */
lnet_net_lock(LNET_LOCK_EX);
if (net == LNET_NIDNET(LNET_NID_ANY))
rn_list = &the_lnet.ln_remote_nets_hash[0];
@@ -486,7 +496,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid)
rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
- net == rnet->lrn_net))
+ net == rnet->lrn_net))
continue;
list_for_each(e2, &rnet->lrn_routes) {
@@ -513,7 +523,7 @@ lnet_del_route(__u32 net, lnet_nid_t gw_nid)
LIBCFS_FREE(route, sizeof(*route));
- if (rnet != NULL)
+ if (rnet)
LIBCFS_FREE(rnet, sizeof(*rnet));
rc = 0;
@@ -538,6 +548,38 @@ lnet_destroy_routes(void)
lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
}
+int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
+{
+ int i, rc = -ENOENT, j;
+
+ if (!the_lnet.ln_rtrpools)
+ return rc;
+
+ for (i = 0; i < LNET_NRBPOOLS; i++) {
+ lnet_rtrbufpool_t *rbp;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
+ if (i++ != idx)
+ continue;
+
+ pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
+ pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
+ pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
+ pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
+ rc = 0;
+ break;
+ }
+ lnet_net_unlock(LNET_LOCK_EX);
+ }
+
+ lnet_net_lock(LNET_LOCK_EX);
+ pool_cfg->pl_routing = the_lnet.ln_routing;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return rc;
+}
+
int
lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
@@ -558,15 +600,14 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops,
rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
list_for_each(e2, &rnet->lrn_routes) {
- route = list_entry(e2, lnet_route_t,
- lr_list);
+ route = list_entry(e2, lnet_route_t, lr_list);
- if (idx-- == 0) {
+ if (!idx--) {
*net = rnet->lrn_net;
*hops = route->lr_hops;
*priority = route->lr_priority;
*gateway = route->lr_gateway->lp_nid;
- *alive = route->lr_gateway->lp_alive;
+ *alive = lnet_is_route_alive(route);
lnet_net_unlock(cpt);
return 0;
}
@@ -604,7 +645,7 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
{
lnet_ping_info_t *info = rcd->rcd_pinginfo;
struct lnet_peer *gw = rcd->rcd_gateway;
- lnet_route_t *rtr;
+ lnet_route_t *rte;
if (!gw->lp_alive)
return;
@@ -621,21 +662,25 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
}
gw->lp_ping_feats = info->pi_features;
- if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
+ if (!(gw->lp_ping_feats & LNET_PING_FEAT_MASK)) {
CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
return; /* nothing I can understand */
}
- if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
+ if (!(gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS))
return; /* can't carry NI status info */
- list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
- int ptl_status = LNET_NI_STATUS_INVALID;
+ list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
int down = 0;
int up = 0;
int i;
+ if (gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) {
+ rte->lr_downis = 1;
+ continue;
+ }
+
for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
lnet_ni_status_t *stat = &info->pi_ni[i];
lnet_nid_t nid = stat->ns_nid;
@@ -651,22 +696,15 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
continue;
if (stat->ns_status == LNET_NI_STATUS_DOWN) {
- if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
- down++;
- else if (ptl_status != LNET_NI_STATUS_UP)
- ptl_status = LNET_NI_STATUS_DOWN;
+ down++;
continue;
}
if (stat->ns_status == LNET_NI_STATUS_UP) {
- if (LNET_NIDNET(nid) == rtr->lr_net) {
+ if (LNET_NIDNET(nid) == rte->lr_net) {
up = 1;
break;
}
- /* ptl NIs are considered down only when
- * they're all down */
- if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
- ptl_status = LNET_NI_STATUS_UP;
continue;
}
@@ -677,10 +715,17 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd)
}
if (up) { /* ignore downed NIs if NI for dest network is up */
- rtr->lr_downis = 0;
+ rte->lr_downis = 0;
continue;
}
- rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
+ /**
+ * if @down is zero and this route is single-hop, it means
+ * we can't find NI for target network
+ */
+ if (!down && rte->lr_hops == 1)
+ down = 1;
+
+ rte->lr_downis = down;
}
}
@@ -690,7 +735,7 @@ lnet_router_checker_event(lnet_event_t *event)
lnet_rc_data_t *rcd = event->md.user_ptr;
struct lnet_peer *lp;
- LASSERT(rcd != NULL);
+ LASSERT(rcd);
if (event->unlinked) {
LNetInvalidateHandle(&rcd->rcd_mdh);
@@ -701,11 +746,13 @@ lnet_router_checker_event(lnet_event_t *event)
event->type == LNET_EVENT_REPLY);
lp = rcd->rcd_gateway;
- LASSERT(lp != NULL);
+ LASSERT(lp);
- /* NB: it's called with holding lnet_res_lock, we have a few
- * places need to hold both locks at the same time, please take
- * care of lock ordering */
+ /*
+ * NB: it's called with holding lnet_res_lock, we have a few
+ * places need to hold both locks at the same time, please take
+ * care of lock ordering
+ */
lnet_net_lock(lp->lp_cpt);
if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
/* ignore if no longer a router or rcd is replaced */
@@ -714,23 +761,26 @@ lnet_router_checker_event(lnet_event_t *event)
if (event->type == LNET_EVENT_SEND) {
lp->lp_ping_notsent = 0;
- if (event->status == 0)
+ if (!event->status)
goto out;
}
/* LNET_EVENT_REPLY */
- /* A successful REPLY means the router is up. If _any_ comms
+ /*
+ * A successful REPLY means the router is up. If _any_ comms
* to the router fail I assume it's down (this will happen if
* we ping alive routers to try to detect router death before
- * apps get burned). */
+ * apps get burned).
+ */
+ lnet_notify_locked(lp, 1, !event->status, cfs_time_current());
- lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
- /* The router checker will wake up very shortly and do the
+ /*
+ * The router checker will wake up very shortly and do the
* actual notification.
* XXX If 'lp' stops being a router before then, it will still
- * have the notification pending!!! */
-
- if (avoid_asym_router_failure && event->status == 0)
+ * have the notification pending!!!
+ */
+ if (avoid_asym_router_failure && !event->status)
lnet_parse_rc_info(rcd);
out:
@@ -753,7 +803,7 @@ lnet_wait_known_routerstate(void)
list_for_each(entry, &the_lnet.ln_routers) {
rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
- if (rtr->lp_alive_count == 0) {
+ if (!rtr->lp_alive_count) {
all_known = 0;
break;
}
@@ -774,7 +824,7 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
{
lnet_route_t *rte;
- if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
+ if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS)) {
list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
if (rte->lr_net == net) {
rte->lr_downis = 0;
@@ -811,13 +861,15 @@ lnet_update_ni_status_locked(void)
continue;
}
- LASSERT(ni->ni_status != NULL);
+ LASSERT(ni->ni_status);
if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
libcfs_nid2str(ni->ni_nid), timeout);
- /* NB: so far, this is the only place to set
- * NI status to "down" */
+ /*
+ * NB: so far, this is the only place to set
+ * NI status to "down"
+ */
ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
}
lnet_ni_unlock(ni);
@@ -831,7 +883,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
/* detached from network */
LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
- if (rcd->rcd_gateway != NULL) {
+ if (rcd->rcd_gateway) {
int cpt = rcd->rcd_gateway->lp_cpt;
lnet_net_lock(cpt);
@@ -839,7 +891,7 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd)
lnet_net_unlock(cpt);
}
- if (rcd->rcd_pinginfo != NULL)
+ if (rcd->rcd_pinginfo)
LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
LIBCFS_FREE(rcd, sizeof(*rcd));
@@ -856,14 +908,14 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
lnet_net_unlock(gateway->lp_cpt);
LIBCFS_ALLOC(rcd, sizeof(*rcd));
- if (rcd == NULL)
+ if (!rcd)
goto out;
LNetInvalidateHandle(&rcd->rcd_mdh);
INIT_LIST_HEAD(&rcd->rcd_list);
LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
- if (pi == NULL)
+ if (!pi)
goto out;
for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
@@ -885,11 +937,11 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
CERROR("Can't bind MD: %d\n", rc);
goto out;
}
- LASSERT(rc == 0);
+ LASSERT(!rc);
lnet_net_lock(gateway->lp_cpt);
/* router table changed or someone has created rcd for this gateway */
- if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
+ if (!lnet_isrouter(gateway) || gateway->lp_rcd) {
lnet_net_unlock(gateway->lp_cpt);
goto out;
}
@@ -902,10 +954,10 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
return rcd;
out:
- if (rcd != NULL) {
+ if (rcd) {
if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
rc = LNetMDUnlink(rcd->rcd_mdh);
- LASSERT(rc == 0);
+ LASSERT(!rc);
}
lnet_destroy_rc_data(rcd);
}
@@ -936,7 +988,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
lnet_peer_addref_locked(rtr);
- if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
+ if (rtr->lp_ping_deadline && /* ping timed out? */
cfs_time_after(now, rtr->lp_ping_deadline))
lnet_notify_locked(rtr, 1, 0, now);
@@ -950,10 +1002,10 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
return;
}
- rcd = rtr->lp_rcd != NULL ?
+ rcd = rtr->lp_rcd ?
rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
- if (rcd == NULL)
+ if (!rcd)
return;
secs = lnet_router_check_interval(rtr);
@@ -964,7 +1016,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
rtr->lp_ping_deadline, rtr->lp_ping_notsent,
rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
- if (secs != 0 && !rtr->lp_ping_notsent &&
+ if (secs && !rtr->lp_ping_notsent &&
cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
cfs_time_seconds(secs)))) {
int rc;
@@ -972,7 +1024,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
lnet_handle_md_t mdh;
id.nid = rtr->lp_nid;
- id.pid = LUSTRE_SRV_LNET_PID;
+ id.pid = LNET_PID_LUSTRE;
CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
rtr->lp_ping_notsent = 1;
@@ -980,7 +1032,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
mdh = rcd->rcd_mdh;
- if (rtr->lp_ping_deadline == 0) {
+ if (!rtr->lp_ping_deadline) {
rtr->lp_ping_deadline =
cfs_time_shift(router_ping_timeout);
}
@@ -991,7 +1043,7 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
LNET_PROTO_PING_MATCHBITS, 0);
lnet_net_lock(rtr->lp_cpt);
- if (rc != 0)
+ if (rc)
rtr->lp_ping_notsent = 0; /* no event pending */
}
@@ -1001,8 +1053,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr)
int
lnet_router_checker_start(void)
{
+ struct task_struct *task;
int rc;
- int eqsz;
+ int eqsz = 0;
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
@@ -1012,39 +1065,33 @@ lnet_router_checker_start(void)
return -EINVAL;
}
- if (!the_lnet.ln_routing &&
- live_router_check_interval <= 0 &&
- dead_router_check_interval <= 0)
- return 0;
-
sema_init(&the_lnet.ln_rc_signal, 0);
- /* EQ size doesn't matter; the callback is guaranteed to get every
- * event */
- eqsz = 0;
- rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
- &the_lnet.ln_rc_eqh);
- if (rc != 0) {
+
+ rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
+ if (rc) {
CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
return -ENOMEM;
}
the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
- rc = PTR_ERR(kthread_run(lnet_router_checker,
- NULL, "router_checker"));
- if (IS_ERR_VALUE(rc)) {
+ task = kthread_run(lnet_router_checker, NULL, "router_checker");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
CERROR("Can't start router checker thread: %d\n", rc);
/* block until event callback signals exit */
down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(rc == 0);
+ LASSERT(!rc);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
return -ENOMEM;
}
if (check_routers_before_use) {
- /* Note that a helpful side-effect of pinging all known routers
+ /*
+ * Note that a helpful side-effect of pinging all known routers
* at startup is that it makes them drop stale connections they
- * may have to a previous instance of me. */
+ * may have to a previous instance of me.
+ */
lnet_wait_known_routerstate();
}
@@ -1061,13 +1108,15 @@ lnet_router_checker_stop(void)
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
+ /* wakeup the RC thread if it's sleeping */
+ wake_up(&the_lnet.ln_rc_waitq);
/* block until event callback signals exit */
down(&the_lnet.ln_rc_signal);
LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
- LASSERT(rc == 0);
+ LASSERT(!rc);
}
static void
@@ -1091,13 +1140,13 @@ lnet_prune_rc_data(int wait_unlink)
if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
/* router checker is stopping, prune all */
list_for_each_entry(lp, &the_lnet.ln_routers,
- lp_rtr_list) {
- if (lp->lp_rcd == NULL)
+ lp_rtr_list) {
+ if (!lp->lp_rcd)
continue;
LASSERT(list_empty(&lp->lp_rcd->rcd_list));
list_add(&lp->lp_rcd->rcd_list,
- &the_lnet.ln_rcd_deathrow);
+ &the_lnet.ln_rcd_deathrow);
lp->lp_rcd = NULL;
}
}
@@ -1119,7 +1168,7 @@ lnet_prune_rc_data(int wait_unlink)
/* release all zombie RCDs */
while (!list_empty(&the_lnet.ln_rcd_zombie)) {
list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
- rcd_list) {
+ rcd_list) {
if (LNetHandleIsInvalid(rcd->rcd_mdh))
list_move(&rcd->rcd_list, &head);
}
@@ -1131,7 +1180,7 @@ lnet_prune_rc_data(int wait_unlink)
while (!list_empty(&head)) {
rcd = list_entry(head.next,
- lnet_rc_data_t, rcd_list);
+ lnet_rc_data_t, rcd_list);
list_del_init(&rcd->rcd_list);
lnet_destroy_rc_data(rcd);
}
@@ -1151,6 +1200,33 @@ lnet_prune_rc_data(int wait_unlink)
lnet_net_unlock(LNET_LOCK_EX);
}
+/*
+ * This function is called to check if the RC should block indefinitely.
+ * It's called from lnet_router_checker() as well as being passed to
+ * wait_event_interruptible() to avoid the lost wake_up problem.
+ *
+ * When it's called from wait_event_interruptible() it is necessary to
+ * also not sleep if the rc state is not running to avoid a deadlock
+ * when the system is shutting down
+ */
+static inline bool
+lnet_router_checker_active(void)
+{
+ if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
+ return true;
+
+ /*
+ * Router Checker thread needs to run when routing is enabled in
+ * order to call lnet_update_ni_status_locked()
+ */
+ if (the_lnet.ln_routing)
+ return true;
+
+ return !list_empty(&the_lnet.ln_routers) &&
+ (live_router_check_interval > 0 ||
+ dead_router_check_interval > 0);
+}
+
static int
lnet_router_checker(void *arg)
{
@@ -1159,8 +1235,6 @@ lnet_router_checker(void *arg)
cfs_block_allsigs();
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-
while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
__u64 version;
int cpt;
@@ -1199,15 +1273,25 @@ rescan:
lnet_prune_rc_data(0); /* don't wait for UNLINK */
- /* Call schedule_timeout() here always adds 1 to load average
+ /*
+ * Call schedule_timeout() here always adds 1 to load average
* because kernel counts # active tasks as nr_running
- * + nr_uninterruptible. */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ * + nr_uninterruptible.
+ */
+ /*
+ * if there are any routes then wakeup every second. If
+ * there are no routes then sleep indefinitely until woken
+ * up by a user adding a route
+ */
+ if (!lnet_router_checker_active())
+ wait_event_interruptible(the_lnet.ln_rc_waitq,
+ lnet_router_checker_active());
+ else
+ wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
+ false,
+ cfs_time_seconds(1));
}
- LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
-
lnet_prune_rc_data(1); /* wait for UNLINK */
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1216,7 +1300,7 @@ rescan:
return 0;
}
-static void
+void
lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
{
int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
@@ -1237,7 +1321,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
int i;
LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
- if (rb == NULL)
+ if (!rb)
return NULL;
rb->rb_pool = rbp;
@@ -1246,7 +1330,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
page = alloc_pages_node(
cfs_cpt_spread_node(lnet_cpt_table(), cpt),
GFP_KERNEL | __GFP_ZERO, 0);
- if (page == NULL) {
+ if (!page) {
while (--i >= 0)
__free_page(rb->rb_kiov[i].kiov_page);
@@ -1254,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
+ rb->rb_kiov[i].kiov_len = PAGE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}
@@ -1263,66 +1347,119 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
}
static void
-lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
+lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
{
int npages = rbp->rbp_npages;
- int nbuffers = 0;
+ struct list_head tmp;
lnet_rtrbuf_t *rb;
+ lnet_rtrbuf_t *temp;
- if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
+ if (!rbp->rbp_nbuffers) /* not initialized or already freed */
return;
- LASSERT(list_empty(&rbp->rbp_msgs));
- LASSERT(rbp->rbp_credits == rbp->rbp_nbuffers);
+ INIT_LIST_HEAD(&tmp);
- while (!list_empty(&rbp->rbp_bufs)) {
- LASSERT(rbp->rbp_credits > 0);
+ lnet_net_lock(cpt);
+ lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
+ list_splice_init(&rbp->rbp_bufs, &tmp);
+ rbp->rbp_req_nbuffers = 0;
+ rbp->rbp_nbuffers = 0;
+ rbp->rbp_credits = 0;
+ rbp->rbp_mincredits = 0;
+ lnet_net_unlock(cpt);
- rb = list_entry(rbp->rbp_bufs.next,
- lnet_rtrbuf_t, rb_list);
+ /* Free buffers on the free list. */
+ list_for_each_entry_safe(rb, temp, &tmp, rb_list) {
list_del(&rb->rb_list);
lnet_destroy_rtrbuf(rb, npages);
- nbuffers++;
}
-
- LASSERT(rbp->rbp_nbuffers == nbuffers);
- LASSERT(rbp->rbp_credits == nbuffers);
-
- rbp->rbp_nbuffers = rbp->rbp_credits = 0;
}
static int
-lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
+lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
{
+ struct list_head rb_list;
lnet_rtrbuf_t *rb;
- int i;
+ int num_rb;
+ int num_buffers = 0;
+ int old_req_nbufs;
+ int npages = rbp->rbp_npages;
- if (rbp->rbp_nbuffers != 0) {
- LASSERT(rbp->rbp_nbuffers == nbufs);
+ lnet_net_lock(cpt);
+ /*
+ * If we are called for less buffers than already in the pool, we
+ * just lower the req_nbuffers number and excess buffers will be
+ * thrown away as they are returned to the free list. Credits
+ * then get adjusted as well.
+ * If we already have enough buffers allocated to serve the
+ * increase requested, then we can treat that the same way as we
+ * do the decrease.
+ */
+ num_rb = nbufs - rbp->rbp_nbuffers;
+ if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
+ rbp->rbp_req_nbuffers = nbufs;
+ lnet_net_unlock(cpt);
return 0;
}
+ /*
+ * store the older value of rbp_req_nbuffers and then set it to
+ * the new request to prevent lnet_return_rx_credits_locked() from
+ * freeing buffers that we need to keep around
+ */
+ old_req_nbufs = rbp->rbp_req_nbuffers;
+ rbp->rbp_req_nbuffers = nbufs;
+ lnet_net_unlock(cpt);
- for (i = 0; i < nbufs; i++) {
+ INIT_LIST_HEAD(&rb_list);
+
+ /*
+ * allocate the buffers on a local list first. If all buffers are
+ * allocated successfully then join this list to the rbp buffer
+ * list. If not then free all allocated buffers.
+ */
+ while (num_rb-- > 0) {
rb = lnet_new_rtrbuf(rbp, cpt);
+ if (!rb) {
+ CERROR("Failed to allocate %d route bufs of %d pages\n",
+ nbufs, npages);
- if (rb == NULL) {
- CERROR("Failed to allocate %d router bufs of %d pages\n",
- nbufs, rbp->rbp_npages);
- return -ENOMEM;
- }
+ lnet_net_lock(cpt);
+ rbp->rbp_req_nbuffers = old_req_nbufs;
+ lnet_net_unlock(cpt);
- rbp->rbp_nbuffers++;
- rbp->rbp_credits++;
- rbp->rbp_mincredits++;
- list_add(&rb->rb_list, &rbp->rbp_bufs);
+ goto failed;
+ }
- /* No allocation "under fire" */
- /* Otherwise we'd need code to schedule blocked msgs etc */
- LASSERT(!the_lnet.ln_routing);
+ list_add(&rb->rb_list, &rb_list);
+ num_buffers++;
}
- LASSERT(rbp->rbp_credits == nbufs);
+ lnet_net_lock(cpt);
+
+ list_splice_tail(&rb_list, &rbp->rbp_bufs);
+ rbp->rbp_nbuffers += num_buffers;
+ rbp->rbp_credits += num_buffers;
+ rbp->rbp_mincredits = rbp->rbp_credits;
+ /*
+ * We need to schedule blocked msg using the newly
+ * added buffers.
+ */
+ while (!list_empty(&rbp->rbp_bufs) &&
+ !list_empty(&rbp->rbp_msgs))
+ lnet_schedule_blocked_locked(rbp);
+
+ lnet_net_unlock(cpt);
+
return 0;
+
+failed:
+ while (!list_empty(&rb_list)) {
+ rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
+ list_del(&rb->rb_list);
+ lnet_destroy_rtrbuf(rb, npages);
+ }
+
+ return -ENOMEM;
}
static void
@@ -1337,26 +1474,28 @@ lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
}
void
-lnet_rtrpools_free(void)
+lnet_rtrpools_free(int keep_pools)
{
lnet_rtrbufpool_t *rtrp;
int i;
- if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
+ if (!the_lnet.ln_rtrpools) /* uninitialized or freed */
return;
cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_free_bufs(&rtrp[0]);
- lnet_rtrpool_free_bufs(&rtrp[1]);
- lnet_rtrpool_free_bufs(&rtrp[2]);
+ lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
+ lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
+ lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
}
- cfs_percpt_free(the_lnet.ln_rtrpools);
- the_lnet.ln_rtrpools = NULL;
+ if (!keep_pools) {
+ cfs_percpt_free(the_lnet.ln_rtrpools);
+ the_lnet.ln_rtrpools = NULL;
+ }
}
static int
-lnet_nrb_tiny_calculate(int npages)
+lnet_nrb_tiny_calculate(void)
{
int nrbs = LNET_NRB_TINY;
@@ -1364,7 +1503,7 @@ lnet_nrb_tiny_calculate(int npages)
LCONSOLE_ERROR_MSG(0x10c,
"tiny_router_buffers=%d invalid when routing enabled\n",
tiny_router_buffers);
- return -1;
+ return -EINVAL;
}
if (tiny_router_buffers > 0)
@@ -1375,7 +1514,7 @@ lnet_nrb_tiny_calculate(int npages)
}
static int
-lnet_nrb_small_calculate(int npages)
+lnet_nrb_small_calculate(void)
{
int nrbs = LNET_NRB_SMALL;
@@ -1383,7 +1522,7 @@ lnet_nrb_small_calculate(int npages)
LCONSOLE_ERROR_MSG(0x10c,
"small_router_buffers=%d invalid when routing enabled\n",
small_router_buffers);
- return -1;
+ return -EINVAL;
}
if (small_router_buffers > 0)
@@ -1394,7 +1533,7 @@ lnet_nrb_small_calculate(int npages)
}
static int
-lnet_nrb_large_calculate(int npages)
+lnet_nrb_large_calculate(void)
{
int nrbs = LNET_NRB_LARGE;
@@ -1402,7 +1541,7 @@ lnet_nrb_large_calculate(int npages)
LCONSOLE_ERROR_MSG(0x10c,
"large_router_buffers=%d invalid when routing enabled\n",
large_router_buffers);
- return -1;
+ return -EINVAL;
}
if (large_router_buffers > 0)
@@ -1416,16 +1555,12 @@ int
lnet_rtrpools_alloc(int im_a_router)
{
lnet_rtrbufpool_t *rtrp;
- int large_pages;
- int small_pages = 1;
int nrb_tiny;
int nrb_small;
int nrb_large;
int rc;
int i;
- large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
if (!strcmp(forwarding, "")) {
/* not set either way */
if (!im_a_router)
@@ -1440,41 +1575,46 @@ lnet_rtrpools_alloc(int im_a_router)
return -EINVAL;
}
- nrb_tiny = lnet_nrb_tiny_calculate(0);
+ nrb_tiny = lnet_nrb_tiny_calculate();
if (nrb_tiny < 0)
return -EINVAL;
- nrb_small = lnet_nrb_small_calculate(small_pages);
+ nrb_small = lnet_nrb_small_calculate();
if (nrb_small < 0)
return -EINVAL;
- nrb_large = lnet_nrb_large_calculate(large_pages);
+ nrb_large = lnet_nrb_large_calculate();
if (nrb_large < 0)
return -EINVAL;
the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
LNET_NRBPOOLS *
sizeof(lnet_rtrbufpool_t));
- if (the_lnet.ln_rtrpools == NULL) {
+ if (!the_lnet.ln_rtrpools) {
LCONSOLE_ERROR_MSG(0x10c,
"Failed to initialize router buffe pool\n");
return -ENOMEM;
}
cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
- lnet_rtrpool_init(&rtrp[0], 0);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
- if (rc != 0)
+ lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+ nrb_tiny, i);
+ if (rc)
goto failed;
- lnet_rtrpool_init(&rtrp[1], small_pages);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
- if (rc != 0)
+ lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
+ LNET_NRB_SMALL_PAGES);
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+ nrb_small, i);
+ if (rc)
goto failed;
- lnet_rtrpool_init(&rtrp[2], large_pages);
- rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
- if (rc != 0)
+ lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
+ LNET_NRB_LARGE_PAGES);
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+ nrb_large, i);
+ if (rc)
goto failed;
}
@@ -1485,10 +1625,118 @@ lnet_rtrpools_alloc(int im_a_router)
return 0;
failed:
- lnet_rtrpools_free();
+ lnet_rtrpools_free(0);
return rc;
}
+static int
+lnet_rtrpools_adjust_helper(int tiny, int small, int large)
+{
+ int nrb = 0;
+ int rc = 0;
+ int i;
+ lnet_rtrbufpool_t *rtrp;
+
+ /*
+ * If the provided values for each buffer pool are different than the
+ * configured values, we need to take action.
+ */
+ if (tiny >= 0) {
+ tiny_router_buffers = tiny;
+ nrb = lnet_nrb_tiny_calculate();
+ cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+ nrb, i);
+ if (rc)
+ return rc;
+ }
+ }
+ if (small >= 0) {
+ small_router_buffers = small;
+ nrb = lnet_nrb_small_calculate();
+ cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+ nrb, i);
+ if (rc)
+ return rc;
+ }
+ }
+ if (large >= 0) {
+ large_router_buffers = large;
+ nrb = lnet_nrb_large_calculate();
+ cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+ rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+ nrb, i);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int
+lnet_rtrpools_adjust(int tiny, int small, int large)
+{
+ /*
+ * this function doesn't revert the changes if adding new buffers
+ * failed. It's up to the user space caller to revert the
+ * changes.
+ */
+ if (!the_lnet.ln_routing)
+ return 0;
+
+ return lnet_rtrpools_adjust_helper(tiny, small, large);
+}
+
+int
+lnet_rtrpools_enable(void)
+{
+ int rc;
+
+ if (the_lnet.ln_routing)
+ return 0;
+
+ if (!the_lnet.ln_rtrpools)
+ /*
+ * If routing is turned off, and we have never
+ * initialized the pools before, just call the
+ * standard buffer pool allocation routine as
+ * if we are just configuring this for the first
+ * time.
+ */
+ return lnet_rtrpools_alloc(1);
+
+ rc = lnet_rtrpools_adjust_helper(0, 0, 0);
+ if (rc)
+ return rc;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_routing = 1;
+
+ the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return 0;
+}
+
+void
+lnet_rtrpools_disable(void)
+{
+ if (!the_lnet.ln_routing)
+ return;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_routing = 0;
+ the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+
+ tiny_router_buffers = 0;
+ small_router_buffers = 0;
+ large_router_buffers = 0;
+ lnet_net_unlock(LNET_LOCK_EX);
+ lnet_rtrpools_free(1);
+}
+
int
lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
{
@@ -1499,28 +1747,28 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
LASSERT(!in_interrupt());
CDEBUG(D_NET, "%s notifying %s: %s\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
- libcfs_nid2str(nid),
- alive ? "up" : "down");
+ !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ libcfs_nid2str(nid),
+ alive ? "up" : "down");
- if (ni != NULL &&
+ if (ni &&
LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
CWARN("Ignoring notification of %s %s by %s (different net)\n",
- libcfs_nid2str(nid), alive ? "birth" : "death",
- libcfs_nid2str(ni->ni_nid));
+ libcfs_nid2str(nid), alive ? "birth" : "death",
+ libcfs_nid2str(ni->ni_nid));
return -EINVAL;
}
/* can't do predictions... */
if (cfs_time_after(when, now)) {
CWARN("Ignoring prediction from %s of %s %s %ld seconds in the future\n",
- (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+ !ni ? "userspace" : libcfs_nid2str(ni->ni_nid),
libcfs_nid2str(nid), alive ? "up" : "down",
cfs_duration_sec(cfs_time_sub(when, now)));
return -EINVAL;
}
- if (ni != NULL && !alive && /* LND telling me she's down */
+ if (ni && !alive && /* LND telling me she's down */
!auto_down) { /* auto-down disabled */
CDEBUG(D_NET, "Auto-down disabled\n");
return 0;
@@ -1534,23 +1782,26 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
}
lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
- if (lp == NULL) {
+ if (!lp) {
/* nid not found */
lnet_net_unlock(cpt);
CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
return 0;
}
- /* We can't fully trust LND on reporting exact peer last_alive
+ /*
+ * We can't fully trust LND on reporting exact peer last_alive
* if he notifies us about dead peer. For example ksocklnd can
* call us with when == _time_when_the_node_was_booted_ if
- * no connections were successfully established */
- if (ni != NULL && !alive && when < lp->lp_last_alive)
+ * no connections were successfully established
+ */
+ if (ni && !alive && when < lp->lp_last_alive)
when = lp->lp_last_alive;
- lnet_notify_locked(lp, ni == NULL, alive, when);
+ lnet_notify_locked(lp, !ni, alive, when);
- lnet_ni_notify_locked(ni, lp);
+ if (ni)
+ lnet_ni_notify_locked(ni, lp);
lnet_peer_decref_locked(lp);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 396c7c4e5c83..65f65a3fc901 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -15,18 +15,16 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/libcfs/libcfs.h"
#include "../../include/linux/lnet/lib-lnet.h"
-/* This is really lnet_proc.c. You might need to update sanity test 215
- * if any file format is changed. */
+/*
+ * This is really lnet_proc.c. You might need to update sanity test 215
+ * if any file format is changed.
+ */
#define LNET_LOFFT_BITS (sizeof(loff_t) * 8)
/*
@@ -75,25 +73,6 @@
#define LNET_PROC_VERSION(v) ((unsigned int)((v) & LNET_PROC_VER_MASK))
-static int proc_call_handler(void *data, int write, loff_t *ppos,
- void __user *buffer, size_t *lenp,
- int (*handler)(void *data, int write,
- loff_t pos, void __user *buffer, int len))
-{
- int rc = handler(data, write, *ppos, buffer, *lenp);
-
- if (rc < 0)
- return rc;
-
- if (write) {
- *ppos += *lenp;
- } else {
- *lenp = rc;
- *ppos += rc;
- }
- return 0;
-}
-
static int __proc_lnet_stats(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
@@ -111,11 +90,11 @@ static int __proc_lnet_stats(void *data, int write,
/* read */
LIBCFS_ALLOC(ctrs, sizeof(*ctrs));
- if (ctrs == NULL)
+ if (!ctrs)
return -ENOMEM;
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL) {
+ if (!tmpstr) {
LIBCFS_FREE(ctrs, sizeof(*ctrs));
return -ENOMEM;
}
@@ -145,8 +124,8 @@ static int __proc_lnet_stats(void *data, int write,
static int proc_lnet_stats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_stats);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_lnet_stats);
}
static int proc_lnet_routes(struct ctl_table *table, int write,
@@ -167,16 +146,16 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
LASSERT(!write);
- if (*lenp == 0)
+ if (!*lenp)
return 0;
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
+ if (!tmpstr)
return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
- if (*ppos == 0) {
+ if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n",
the_lnet.ln_routing ? "enabled" : "disabled");
LASSERT(tmpstr + tmpsiz - s > 0);
@@ -206,23 +185,22 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
return -ESTALE;
}
- for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && route == NULL;
- i++) {
+ for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
n = rn_list->next;
- while (n != rn_list && route == NULL) {
+ while (n != rn_list && !route) {
rnet = list_entry(n, lnet_remotenet_t,
- lrn_list);
+ lrn_list);
r = rnet->lrn_routes.next;
while (r != &rnet->lrn_routes) {
lnet_route_t *re =
list_entry(r, lnet_route_t,
- lr_list);
- if (skip == 0) {
+ lr_list);
+ if (!skip) {
route = re;
break;
}
@@ -235,12 +213,12 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
}
}
- if (route != NULL) {
+ if (route) {
__u32 net = rnet->lrn_net;
- unsigned int hops = route->lr_hops;
+ __u32 hops = route->lr_hops;
unsigned int priority = route->lr_priority;
lnet_nid_t nid = route->lr_gateway->lp_nid;
- int alive = route->lr_gateway->lp_alive;
+ int alive = lnet_is_route_alive(route);
s += snprintf(s, tmpstr + tmpsiz - s,
"%-8s %4u %8u %7s %s\n",
@@ -259,9 +237,9 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len)) {
rc = -EFAULT;
- else {
+ } else {
off += 1;
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
}
@@ -269,7 +247,7 @@ static int proc_lnet_routes(struct ctl_table *table, int write,
LIBCFS_FREE(tmpstr, tmpsiz);
- if (rc == 0)
+ if (!rc)
*lenp = len;
return rc;
@@ -291,16 +269,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
LASSERT(!write);
- if (*lenp == 0)
+ if (!*lenp)
return 0;
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
+ if (!tmpstr)
return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
- if (*ppos == 0) {
+ if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s,
"%-4s %7s %9s %6s %12s %9s %8s %7s %s\n",
"ref", "rtr_ref", "alive_cnt", "state",
@@ -330,9 +308,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
while (r != &the_lnet.ln_routers) {
lnet_peer_t *lp = list_entry(r, lnet_peer_t,
- lp_rtr_list);
+ lp_rtr_list);
- if (skip == 0) {
+ if (!skip) {
peer = lp;
break;
}
@@ -341,7 +319,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
r = r->next;
}
- if (peer != NULL) {
+ if (peer) {
lnet_nid_t nid = peer->lp_nid;
unsigned long now = cfs_time_current();
unsigned long deadline = peer->lp_ping_deadline;
@@ -356,19 +334,21 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
lnet_route_t *rtr;
if ((peer->lp_ping_feats &
- LNET_PING_FEAT_NI_STATUS) != 0) {
+ LNET_PING_FEAT_NI_STATUS)) {
list_for_each_entry(rtr, &peer->lp_routes,
- lr_gwlist) {
- /* downis on any route should be the
- * number of downis on the gateway */
- if (rtr->lr_downis != 0) {
+ lr_gwlist) {
+ /*
+ * downis on any route should be the
+ * number of downis on the gateway
+ */
+ if (rtr->lr_downis) {
down_ni = rtr->lr_downis;
break;
}
}
}
- if (deadline == 0)
+ if (!deadline)
s += snprintf(s, tmpstr + tmpsiz - s,
"%-4d %7d %9d %6s %12d %9d %8s %7d %s\n",
nrefs, nrtrrefs, alive_cnt,
@@ -394,9 +374,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
- if (copy_to_user(buffer, tmpstr, len))
+ if (copy_to_user(buffer, tmpstr, len)) {
rc = -EFAULT;
- else {
+ } else {
off += 1;
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
}
@@ -404,7 +384,7 @@ static int proc_lnet_routers(struct ctl_table *table, int write,
LIBCFS_FREE(tmpstr, tmpsiz);
- if (rc == 0)
+ if (!rc)
*lenp = len;
return rc;
@@ -427,7 +407,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
LASSERT(!write);
- if (*lenp == 0)
+ if (!*lenp)
return 0;
if (cpt >= LNET_CPT_NUMBER) {
@@ -436,12 +416,12 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
}
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
+ if (!tmpstr)
return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
- if (*ppos == 0) {
+ if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s,
"%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n",
"nid", "refs", "state", "last", "max",
@@ -470,18 +450,20 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
}
while (hash < LNET_PEER_HASH_SIZE) {
- if (p == NULL)
+ if (!p)
p = ptable->pt_hash[hash].next;
while (p != &ptable->pt_hash[hash]) {
lnet_peer_t *lp = list_entry(p, lnet_peer_t,
- lp_hashlist);
- if (skip == 0) {
+ lp_hashlist);
+ if (!skip) {
peer = lp;
- /* minor optimization: start from idx+1
+ /*
+ * minor optimization: start from idx+1
* on next iteration if we've just
- * drained lp_hashlist */
+ * drained lp_hashlist
+ */
if (lp->lp_hashlist.next ==
&ptable->pt_hash[hash]) {
hoff = 1;
@@ -497,7 +479,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
p = lp->lp_hashlist.next;
}
- if (peer != NULL)
+ if (peer)
break;
p = NULL;
@@ -505,7 +487,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
hash++;
}
- if (peer != NULL) {
+ if (peer) {
lnet_nid_t nid = peer->lp_nid;
int nrefs = peer->lp_refcount;
int lastalive = -1;
@@ -553,7 +535,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
cpt++;
hash = 0;
hoff = 1;
- if (peer == NULL && cpt < LNET_CPT_NUMBER)
+ if (!peer && cpt < LNET_CPT_NUMBER)
goto again;
}
}
@@ -571,7 +553,7 @@ static int proc_lnet_peers(struct ctl_table *table, int write,
LIBCFS_FREE(tmpstr, tmpsiz);
- if (rc == 0)
+ if (!rc)
*lenp = len;
return rc;
@@ -593,7 +575,7 @@ static int __proc_lnet_buffers(void *data, int write,
/* (4 %d) * 4 * LNET_CPT_NUMBER */
tmpsiz = 64 * (LNET_NRBPOOLS + 1) * LNET_CPT_NUMBER;
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
+ if (!tmpstr)
return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
@@ -603,7 +585,7 @@ static int __proc_lnet_buffers(void *data, int write,
"pages", "count", "credits", "min");
LASSERT(tmpstr + tmpsiz - s > 0);
- if (the_lnet.ln_rtrpools == NULL)
+ if (!the_lnet.ln_rtrpools)
goto out; /* I'm not a router */
for (idx = 0; idx < LNET_NRBPOOLS; idx++) {
@@ -638,8 +620,8 @@ static int __proc_lnet_buffers(void *data, int write,
static int proc_lnet_buffers(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_buffers);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_lnet_buffers);
}
static int proc_lnet_nis(struct ctl_table *table, int write,
@@ -653,16 +635,16 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
LASSERT(!write);
- if (*lenp == 0)
+ if (!*lenp)
return 0;
LIBCFS_ALLOC(tmpstr, tmpsiz);
- if (tmpstr == NULL)
+ if (!tmpstr)
return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
- if (*ppos == 0) {
+ if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s,
"%-24s %6s %5s %4s %4s %4s %5s %5s %5s\n",
"nid", "status", "alive", "refs", "peer",
@@ -680,7 +662,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
while (n != &the_lnet.ln_nis) {
lnet_ni_t *a_ni = list_entry(n, lnet_ni_t, ni_list);
- if (skip == 0) {
+ if (!skip) {
ni = a_ni;
break;
}
@@ -689,7 +671,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
n = n->next;
}
- if (ni != NULL) {
+ if (ni) {
struct lnet_tx_queue *tq;
char *stat;
time64_t now = ktime_get_real_seconds();
@@ -705,15 +687,17 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
last_alive = 0;
lnet_ni_lock(ni);
- LASSERT(ni->ni_status != NULL);
+ LASSERT(ni->ni_status);
stat = (ni->ni_status->ns_status ==
LNET_NI_STATUS_UP) ? "up" : "down";
lnet_ni_unlock(ni);
- /* we actually output credits information for
- * TX queue of each partition */
+ /*
+ * we actually output credits information for
+ * TX queue of each partition
+ */
cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
- for (j = 0; ni->ni_cpts != NULL &&
+ for (j = 0; ni->ni_cpts &&
j < ni->ni_ncpts; j++) {
if (i == ni->ni_cpts[j])
break;
@@ -722,18 +706,19 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
if (j == ni->ni_ncpts)
continue;
- if (i != 0)
+ if (i)
lnet_net_lock(i);
s += snprintf(s, tmpstr + tmpsiz - s,
- "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
- libcfs_nid2str(ni->ni_nid), stat,
- last_alive, *ni->ni_refs[i],
- ni->ni_peertxcredits,
- ni->ni_peerrtrcredits,
- tq->tq_credits_max,
- tq->tq_credits, tq->tq_credits_min);
- if (i != 0)
+ "%-24s %6s %5d %4d %4d %4d %5d %5d %5d\n",
+ libcfs_nid2str(ni->ni_nid), stat,
+ last_alive, *ni->ni_refs[i],
+ ni->ni_peertxcredits,
+ ni->ni_peerrtrcredits,
+ tq->tq_credits_max,
+ tq->tq_credits,
+ tq->tq_credits_min);
+ if (i)
lnet_net_unlock(i);
}
LASSERT(tmpstr + tmpsiz - s > 0);
@@ -755,7 +740,7 @@ static int proc_lnet_nis(struct ctl_table *table, int write,
LIBCFS_FREE(tmpstr, tmpsiz);
- if (rc == 0)
+ if (!rc)
*lenp = len;
return rc;
@@ -795,8 +780,6 @@ static struct lnet_portal_rotors portal_rotors[] = {
},
};
-extern int portal_rotor;
-
static int __proc_lnet_portal_rotor(void *data, int write,
loff_t pos, void __user *buffer, int nob)
{
@@ -807,7 +790,7 @@ static int __proc_lnet_portal_rotor(void *data, int write,
int i;
LIBCFS_ALLOC(buf, buf_len);
- if (buf == NULL)
+ if (!buf)
return -ENOMEM;
if (!write) {
@@ -831,7 +814,7 @@ static int __proc_lnet_portal_rotor(void *data, int write,
rc = 0;
} else {
rc = cfs_trace_copyout_string(buffer, nob,
- buf + pos, "\n");
+ buf + pos, "\n");
}
goto out;
}
@@ -844,9 +827,9 @@ static int __proc_lnet_portal_rotor(void *data, int write,
rc = -EINVAL;
lnet_res_lock(0);
- for (i = 0; portal_rotors[i].pr_name != NULL; i++) {
- if (strncasecmp(portal_rotors[i].pr_name, tmp,
- strlen(portal_rotors[i].pr_name)) == 0) {
+ for (i = 0; portal_rotors[i].pr_name; i++) {
+ if (!strncasecmp(portal_rotors[i].pr_name, tmp,
+ strlen(portal_rotors[i].pr_name))) {
portal_rotor = portal_rotors[i].pr_value;
rc = 0;
break;
@@ -862,8 +845,8 @@ static int proc_lnet_portal_rotor(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- return proc_call_handler(table->data, write, ppos, buffer, lenp,
- __proc_lnet_portal_rotor);
+ return lprocfs_call_handler(table->data, write, ppos, buffer, lenp,
+ __proc_lnet_portal_rotor);
}
static struct ctl_table lnet_table[] = {
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 1f04cc1fc31c..dcb6e506f592 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -51,14 +51,14 @@ MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by d
static void
brw_client_fini(sfw_test_instance_t *tsi)
{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ srpc_bulk_t *bulk;
+ sfw_test_unit_t *tsu;
LASSERT(tsi->tsi_is_client);
list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = tsu->tsu_private;
- if (bulk == NULL)
+ if (!bulk)
continue;
srpc_free_bulk(bulk);
@@ -69,38 +69,42 @@ brw_client_fini(sfw_test_instance_t *tsi)
static int
brw_client_init(sfw_test_instance_t *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- int flags;
- int npg;
- int len;
- int opc;
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
-
- LASSERT(sn != NULL);
+ sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ int flags;
+ int npg;
+ int len;
+ int opc;
+ srpc_bulk_t *bulk;
+ sfw_test_unit_t *tsu;
+
+ LASSERT(sn);
LASSERT(tsi->tsi_is_client);
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
+ test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
- opc = breq->blk_opc;
+ opc = breq->blk_opc;
flags = breq->blk_flags;
- npg = breq->blk_npg;
- /* NB: this is not going to work for variable page size,
- * but we have to keep it for compatibility */
- len = npg * PAGE_CACHE_SIZE;
+ npg = breq->blk_npg;
+ /*
+ * NB: this is not going to work for variable page size,
+ * but we have to keep it for compatibility
+ */
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
- /* I should never get this step if it's unknown feature
- * because make_session will reject unknown feature */
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+ /*
+ * I should never get this step if it's unknown feature
+ * because make_session will reject unknown feature
+ */
+ LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
- opc = breq->blk_opc;
+ opc = breq->blk_opc;
flags = breq->blk_flags;
- len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ len = breq->blk_len;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
@@ -116,7 +120,7 @@ brw_client_init(sfw_test_instance_t *tsi)
list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
npg, len, opc == LST_BRW_READ);
- if (bulk == NULL) {
+ if (!bulk) {
brw_client_fini(tsi);
return -ENOMEM;
}
@@ -127,9 +131,9 @@ brw_client_init(sfw_test_instance_t *tsi)
return 0;
}
-#define BRW_POISON 0xbeefbeefbeefbeefULL
-#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE sizeof(__u64)
+#define BRW_POISON 0xbeefbeefbeefbeefULL
+#define BRW_MAGIC 0xeeb0eeb1eeb2eeb3ULL
+#define BRW_MSIZE sizeof(__u64)
static int
brw_inject_one_error(void)
@@ -141,7 +145,7 @@ brw_inject_one_error(void)
ktime_get_ts64(&ts);
- if (((ts.tv_nsec / NSEC_PER_USEC) & 1) == 0)
+ if (!((ts.tv_nsec / NSEC_PER_USEC) & 1))
return 0;
return brw_inject_errors--;
@@ -151,9 +155,9 @@ static void
brw_fill_page(struct page *pg, int pattern, __u64 magic)
{
char *addr = page_address(pg);
- int i;
+ int i;
- LASSERT(addr != NULL);
+ LASSERT(addr);
if (pattern == LST_BRW_CHECK_NONE)
return;
@@ -163,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
+ addr += PAGE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
@@ -180,22 +184,22 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
static int
brw_check_page(struct page *pg, int pattern, __u64 magic)
{
- char *addr = page_address(pg);
- __u64 data = 0; /* make compiler happy */
- int i;
+ char *addr = page_address(pg);
+ __u64 data = 0; /* make compiler happy */
+ int i;
- LASSERT(addr != NULL);
+ LASSERT(addr);
if (pattern == LST_BRW_CHECK_NONE)
return 0;
if (pattern == LST_BRW_CHECK_SIMPLE) {
- data = *((__u64 *) addr);
+ data = *((__u64 *)addr);
if (data != magic)
goto bad_data;
- addr += PAGE_CACHE_SIZE - BRW_MSIZE;
- data = *((__u64 *) addr);
+ addr += PAGE_SIZE - BRW_MSIZE;
+ data = *((__u64 *)addr);
if (data != magic)
goto bad_data;
@@ -203,8 +207,8 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
}
if (pattern == LST_BRW_CHECK_FULL) {
- for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
- data = *(((__u64 *) addr) + i);
+ for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
+ data = *(((__u64 *)addr) + i);
if (data != magic)
goto bad_data;
}
@@ -216,7 +220,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
bad_data:
CERROR("Bad data in page %p: %#llx, %#llx expected\n",
- pg, data, magic);
+ pg, data, magic);
return 1;
}
@@ -240,9 +244,9 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page;
- if (brw_check_page(pg, pattern, magic) != 0) {
+ if (brw_check_page(pg, pattern, magic)) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
- pg, i, bk->bk_niov);
+ pg, i, bk->bk_niov);
return 1;
}
}
@@ -252,7 +256,7 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
static int
brw_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+ lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
{
srpc_bulk_t *bulk = tsu->tsu_private;
sfw_test_instance_t *tsi = tsu->tsu_instance;
@@ -265,32 +269,34 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
int opc;
int rc;
- LASSERT(sn != NULL);
- LASSERT(bulk != NULL);
+ LASSERT(sn);
+ LASSERT(bulk);
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
+ if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
- opc = breq->blk_opc;
+ opc = breq->blk_opc;
flags = breq->blk_flags;
- npg = breq->blk_npg;
- len = npg * PAGE_CACHE_SIZE;
+ npg = breq->blk_npg;
+ len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
- /* I should never get this step if it's unknown feature
- * because make_session will reject unknown feature */
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+ /*
+ * I should never get this step if it's unknown feature
+ * because make_session will reject unknown feature
+ */
+ LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
- opc = breq->blk_opc;
+ opc = breq->blk_opc;
flags = breq->blk_flags;
- len = breq->blk_len;
- npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ len = breq->blk_len;
+ npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
- if (rc != 0)
+ if (rc)
return rc;
memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
@@ -301,8 +307,8 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
req = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
req->brw_flags = flags;
- req->brw_rw = opc;
- req->brw_len = len;
+ req->brw_rw = opc;
+ req->brw_len = len;
*rpcpp = rpc;
return 0;
@@ -318,14 +324,14 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
- LASSERT(sn != NULL);
+ LASSERT(sn);
- if (rpc->crpc_status != 0) {
+ if (rpc->crpc_status) {
CERROR("BRW RPC to %s failed with %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
+ libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_brw_errors);
- goto out;
+ return;
}
if (msg->msg_magic != SRPC_MSG_MAGIC) {
@@ -334,27 +340,24 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
}
CDEBUG(reply->brw_status ? D_WARNING : D_NET,
- "BRW RPC to %s finished with brw_status: %d\n",
- libcfs_id2str(rpc->crpc_dest), reply->brw_status);
+ "BRW RPC to %s finished with brw_status: %d\n",
+ libcfs_id2str(rpc->crpc_dest), reply->brw_status);
- if (reply->brw_status != 0) {
+ if (reply->brw_status) {
atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -(int)reply->brw_status;
- goto out;
+ return;
}
if (reqst->brw_rw == LST_BRW_WRITE)
- goto out;
+ return;
- if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic) != 0) {
+ if (brw_check_bulk(&rpc->crpc_bulk, reqst->brw_flags, magic)) {
CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->crpc_dest));
+ libcfs_id2str(rpc->crpc_dest));
atomic_inc(&sn->sn_brw_errors);
rpc->crpc_status = -EBADMSG;
}
-
-out:
- return;
}
static void
@@ -362,17 +365,17 @@ brw_server_rpc_done(struct srpc_server_rpc *rpc)
{
srpc_bulk_t *blk = rpc->srpc_bulk;
- if (blk == NULL)
+ if (!blk)
return;
- if (rpc->srpc_status != 0)
+ if (rpc->srpc_status)
CERROR("Bulk transfer %s %s has failed: %d\n",
- blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
+ blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
else
CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
- blk->bk_niov, blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer));
+ blk->bk_niov, blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer));
sfw_free_pages(rpc);
}
@@ -385,16 +388,16 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
srpc_brw_reqst_t *reqst;
srpc_msg_t *reqstmsg;
- LASSERT(rpc->srpc_bulk != NULL);
- LASSERT(rpc->srpc_reqstbuf != NULL);
+ LASSERT(rpc->srpc_bulk);
+ LASSERT(rpc->srpc_reqstbuf);
reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
reqst = &reqstmsg->msg_body.brw_reqst;
- if (status != 0) {
+ if (status) {
CERROR("BRW bulk %s failed for RPC from %s: %d\n",
- reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
- libcfs_id2str(rpc->srpc_peer), status);
+ reqst->brw_rw == LST_BRW_READ ? "READ" : "WRITE",
+ libcfs_id2str(rpc->srpc_peer), status);
return -EIO;
}
@@ -404,9 +407,9 @@ brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
if (reqstmsg->msg_magic != SRPC_MSG_MAGIC)
__swab64s(&magic);
- if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic) != 0) {
+ if (brw_check_bulk(rpc->srpc_bulk, reqst->brw_flags, magic)) {
CERROR("Bulk data from %s is corrupted!\n",
- libcfs_id2str(rpc->srpc_peer));
+ libcfs_id2str(rpc->srpc_peer));
reply->brw_status = EBADMSG;
}
@@ -448,27 +451,27 @@ brw_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
- if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
replymsg->msg_ses_feats = LST_FEATS_MASK;
reply->brw_status = EPROTO;
return 0;
}
- if ((reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+ if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
/* compat with old version */
- if ((reqst->brw_len & ~CFS_PAGE_MASK) != 0) {
+ if (reqst->brw_len & ~CFS_PAGE_MASK) {
reply->brw_status = EINVAL;
return 0;
}
- npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
+ npg = reqst->brw_len >> PAGE_SHIFT;
} else {
- npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
- if (reqst->brw_len == 0 || npg > LNET_MAX_IOV) {
+ if (!reqst->brw_len || npg > LNET_MAX_IOV) {
reply->brw_status = EINVAL;
return 0;
}
@@ -476,7 +479,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
rc = sfw_alloc_pages(rpc, rpc->srpc_scd->scd_cpt, npg,
reqst->brw_len,
reqst->brw_rw == LST_BRW_WRITE);
- if (rc != 0)
+ if (rc)
return rc;
if (reqst->brw_rw == LST_BRW_READ)
@@ -490,8 +493,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
sfw_test_client_ops_t brw_test_client;
void brw_init_test_client(void)
{
- brw_test_client.tso_init = brw_client_init;
- brw_test_client.tso_fini = brw_client_fini;
+ brw_test_client.tso_init = brw_client_init;
+ brw_test_client.tso_fini = brw_client_fini;
brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
brw_test_client.tso_done_rpc = brw_client_done_rpc;
};
@@ -499,10 +502,9 @@ void brw_init_test_client(void)
srpc_service_t brw_test_service;
void brw_init_test_service(void)
{
-
- brw_test_service.sv_id = SRPC_SERVICE_BRW;
- brw_test_service.sv_name = "brw_test";
- brw_test_service.sv_handler = brw_server_handle;
+ brw_test_service.sv_id = SRPC_SERVICE_BRW;
+ brw_test_service.sv_name = "brw_test";
+ brw_test_service.sv_handler = brw_server_handle;
brw_test_service.sv_bulk_ready = brw_bulk_ready;
- brw_test_service.sv_wi_total = brw_srv_workitems;
+ brw_test_service.sv_wi_total = brw_srv_workitems;
}
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index a534665403e5..79ee6c0bf7c1 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,20 +51,19 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
char *name;
int rc;
- if (args->lstio_ses_idp == NULL || /* address for output sid */
- args->lstio_ses_key == 0 || /* no key is specified */
- args->lstio_ses_namep == NULL || /* session name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_key || /* no key is specified */
+ !args->lstio_ses_namep || /* session name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_ses_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_ses_namep,
- args->lstio_ses_nmlen)) {
+ if (copy_from_user(name, args->lstio_ses_namep,
+ args->lstio_ses_nmlen)) {
LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
return -EFAULT;
}
@@ -96,12 +95,12 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (args->lstio_ses_idp == NULL || /* address for output sid */
- args->lstio_ses_keyp == NULL || /* address for output key */
- args->lstio_ses_featp == NULL || /* address for output features */
- args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */
- args->lstio_ses_namep == NULL || /* address for output name */
- args->lstio_ses_nmlen <= 0 ||
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_keyp || /* address for output key */
+ !args->lstio_ses_featp || /* address for output features */
+ !args->lstio_ses_ndinfo || /* address for output ndinfo */
+ !args->lstio_ses_namep || /* address for output name */
+ args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -116,28 +115,28 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
static int
lst_debug_ioctl(lstio_debug_args_t *args)
{
- char *name = NULL;
- int client = 1;
- int rc;
+ char *name = NULL;
+ int client = 1;
+ int rc;
if (args->lstio_dbg_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_dbg_resultp == NULL)
+ if (!args->lstio_dbg_resultp)
return -EINVAL;
- if (args->lstio_dbg_namep != NULL && /* name of batch/group */
+ if (args->lstio_dbg_namep && /* name of batch/group */
(args->lstio_dbg_nmlen <= 0 ||
args->lstio_dbg_nmlen > LST_NAME_SIZE))
return -EINVAL;
- if (args->lstio_dbg_namep != NULL) {
+ if (args->lstio_dbg_namep) {
LIBCFS_ALLOC(name, args->lstio_dbg_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
if (copy_from_user(name, args->lstio_dbg_namep,
- args->lstio_dbg_nmlen)) {
+ args->lstio_dbg_nmlen)) {
LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
return -EFAULT;
@@ -157,7 +156,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
case LST_OPC_BATCHSRV:
client = 0;
case LST_OPC_BATCHCLI:
- if (name == NULL)
+ if (!name)
goto out;
rc = lstcon_batch_debug(args->lstio_dbg_timeout,
@@ -165,7 +164,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
break;
case LST_OPC_GROUP:
- if (name == NULL)
+ if (!name)
goto out;
rc = lstcon_group_debug(args->lstio_dbg_timeout,
@@ -174,7 +173,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
case LST_OPC_NODES:
if (args->lstio_dbg_count <= 0 ||
- args->lstio_dbg_idsp == NULL)
+ !args->lstio_dbg_idsp)
goto out;
rc = lstcon_nodes_debug(args->lstio_dbg_timeout,
@@ -188,7 +187,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
}
out:
- if (name != NULL)
+ if (name)
LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
return rc;
@@ -203,18 +202,17 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_namep == NULL ||
+ if (!args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen);
return -EFAULT;
}
@@ -231,24 +229,23 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
static int
lst_group_del_ioctl(lstio_group_del_args_t *args)
{
- int rc;
- char *name;
+ int rc;
+ char *name;
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_namep == NULL ||
+ if (!args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
@@ -265,24 +262,23 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
static int
lst_group_update_ioctl(lstio_group_update_args_t *args)
{
- int rc;
- char *name;
+ int rc;
+ char *name;
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_resultp == NULL ||
- args->lstio_grp_namep == NULL ||
+ if (!args->lstio_grp_resultp ||
+ !args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
+ if (copy_from_user(name, args->lstio_grp_namep,
args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
@@ -300,8 +296,8 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
break;
case LST_GROUP_RMND:
- if (args->lstio_grp_count <= 0 ||
- args->lstio_grp_idsp == NULL) {
+ if (args->lstio_grp_count <= 0 ||
+ !args->lstio_grp_idsp) {
rc = -EINVAL;
break;
}
@@ -330,21 +326,21 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_idsp == NULL || /* array of ids */
+ if (!args->lstio_grp_idsp || /* array of ids */
args->lstio_grp_count <= 0 ||
- args->lstio_grp_resultp == NULL ||
- args->lstio_grp_featp == NULL ||
- args->lstio_grp_namep == NULL ||
+ !args->lstio_grp_resultp ||
+ !args->lstio_grp_featp ||
+ !args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
if (copy_from_user(name, args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
@@ -357,7 +353,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
args->lstio_grp_resultp);
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- if (rc == 0 &&
+ if (!rc &&
copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
return -EINVAL;
}
@@ -371,15 +367,15 @@ lst_group_list_ioctl(lstio_group_list_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_idx < 0 ||
- args->lstio_grp_namep == NULL ||
+ if (args->lstio_grp_idx < 0 ||
+ !args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
return lstcon_group_list(args->lstio_grp_idx,
- args->lstio_grp_nmlen,
- args->lstio_grp_namep);
+ args->lstio_grp_nmlen,
+ args->lstio_grp_namep);
}
static int
@@ -393,24 +389,24 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_grp_namep == NULL ||
+ if (!args->lstio_grp_namep ||
args->lstio_grp_nmlen <= 0 ||
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (args->lstio_grp_entp == NULL && /* output: group entry */
- args->lstio_grp_dentsp == NULL) /* output: node entry */
+ if (!args->lstio_grp_entp && /* output: group entry */
+ !args->lstio_grp_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_grp_dentsp != NULL) { /* have node entry */
- if (args->lstio_grp_idxp == NULL || /* node index */
- args->lstio_grp_ndentp == NULL) /* # of node entry */
+ if (args->lstio_grp_dentsp) { /* have node entry */
+ if (!args->lstio_grp_idxp || /* node index */
+ !args->lstio_grp_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&ndent, args->lstio_grp_ndentp,
- sizeof(ndent)) ||
+ sizeof(ndent)) ||
copy_from_user(&index, args->lstio_grp_idxp,
- sizeof(index)))
+ sizeof(index)))
return -EFAULT;
if (ndent <= 0 || index < 0)
@@ -418,12 +414,11 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
}
LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_grp_namep,
- args->lstio_grp_nmlen)) {
+ if (copy_from_user(name, args->lstio_grp_namep,
+ args->lstio_grp_nmlen)) {
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
return -EFAULT;
}
@@ -435,10 +430,10 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
- if (rc != 0)
+ if (rc)
return rc;
- if (args->lstio_grp_dentsp != NULL &&
+ if (args->lstio_grp_dentsp &&
(copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
return -EFAULT;
@@ -455,18 +450,17 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_namep == NULL ||
+ if (!args->lstio_bat_namep ||
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
@@ -489,18 +483,17 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_namep == NULL ||
+ if (!args->lstio_bat_namep ||
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
@@ -524,19 +517,18 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_resultp == NULL ||
- args->lstio_bat_namep == NULL ||
+ if (!args->lstio_bat_resultp ||
+ !args->lstio_bat_namep ||
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
@@ -554,14 +546,14 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
static int
lst_batch_query_ioctl(lstio_batch_query_args_t *args)
{
- char *name;
- int rc;
+ char *name;
+ int rc;
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_resultp == NULL ||
- args->lstio_bat_namep == NULL ||
+ if (!args->lstio_bat_resultp ||
+ !args->lstio_bat_namep ||
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -570,12 +562,11 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args)
return -EINVAL;
LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep,
- args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
@@ -599,8 +590,8 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_idx < 0 ||
- args->lstio_bat_namep == NULL ||
+ if (args->lstio_bat_idx < 0 ||
+ !args->lstio_bat_namep ||
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -621,24 +612,24 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_bat_namep == NULL || /* batch name */
+ if (!args->lstio_bat_namep || /* batch name */
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (args->lstio_bat_entp == NULL && /* output: batch entry */
- args->lstio_bat_dentsp == NULL) /* output: node entry */
+ if (!args->lstio_bat_entp && /* output: batch entry */
+ !args->lstio_bat_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_bat_dentsp != NULL) { /* have node entry */
- if (args->lstio_bat_idxp == NULL || /* node index */
- args->lstio_bat_ndentp == NULL) /* # of node entry */
+ if (args->lstio_bat_dentsp) { /* have node entry */
+ if (!args->lstio_bat_idxp || /* node index */
+ !args->lstio_bat_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&index, args->lstio_bat_idxp,
- sizeof(index)) ||
+ sizeof(index)) ||
copy_from_user(&ndent, args->lstio_bat_ndentp,
- sizeof(ndent)))
+ sizeof(ndent)))
return -EFAULT;
if (ndent <= 0 || index < 0)
@@ -646,28 +637,27 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
}
LIBCFS_ALLOC(name, args->lstio_bat_nmlen + 1);
- if (name == NULL)
+ if (!name)
return -ENOMEM;
- if (copy_from_user(name,
- args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+ if (copy_from_user(name, args->lstio_bat_namep,
+ args->lstio_bat_nmlen)) {
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
return -EFAULT;
}
name[args->lstio_bat_nmlen] = 0;
- rc = lstcon_batch_info(name,
- args->lstio_bat_entp, args->lstio_bat_server,
- args->lstio_bat_testidx, &index, &ndent,
- args->lstio_bat_dentsp);
+ rc = lstcon_batch_info(name, args->lstio_bat_entp,
+ args->lstio_bat_server, args->lstio_bat_testidx,
+ &index, &ndent, args->lstio_bat_dentsp);
LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
- if (rc != 0)
+ if (rc)
return rc;
- if (args->lstio_bat_dentsp != NULL &&
+ if (args->lstio_bat_dentsp &&
(copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
rc = -EFAULT;
@@ -679,98 +669,104 @@ static int
lst_stat_query_ioctl(lstio_stat_args_t *args)
{
int rc;
- char *name;
+ char *name = NULL;
/* TODO: not finished */
if (args->lstio_sta_key != console_session.ses_key)
return -EACCES;
- if (args->lstio_sta_resultp == NULL ||
- (args->lstio_sta_namep == NULL &&
- args->lstio_sta_idsp == NULL) ||
- args->lstio_sta_nmlen <= 0 ||
- args->lstio_sta_nmlen > LST_NAME_SIZE)
+ if (!args->lstio_sta_resultp)
return -EINVAL;
- if (args->lstio_sta_idsp != NULL &&
- args->lstio_sta_count <= 0)
- return -EINVAL;
-
- LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
- if (name == NULL)
- return -ENOMEM;
-
- if (copy_from_user(name, args->lstio_sta_namep,
- args->lstio_sta_nmlen)) {
- LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
- return -EFAULT;
- }
+ if (args->lstio_sta_idsp) {
+ if (args->lstio_sta_count <= 0)
+ return -EINVAL;
- if (args->lstio_sta_idsp == NULL) {
- rc = lstcon_group_stat(name, args->lstio_sta_timeout,
- args->lstio_sta_resultp);
- } else {
rc = lstcon_nodes_stat(args->lstio_sta_count,
args->lstio_sta_idsp,
args->lstio_sta_timeout,
args->lstio_sta_resultp);
- }
+ } else if (args->lstio_sta_namep) {
+ if (args->lstio_sta_nmlen <= 0 ||
+ args->lstio_sta_nmlen > LST_NAME_SIZE)
+ return -EINVAL;
+
+ LIBCFS_ALLOC(name, args->lstio_sta_nmlen + 1);
+ if (!name)
+ return -ENOMEM;
- LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
+ rc = copy_from_user(name, args->lstio_sta_namep,
+ args->lstio_sta_nmlen);
+ if (!rc)
+ rc = lstcon_group_stat(name, args->lstio_sta_timeout,
+ args->lstio_sta_resultp);
+ else
+ rc = -EFAULT;
+ } else {
+ rc = -EINVAL;
+ }
+ if (name)
+ LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
return rc;
}
static int lst_test_add_ioctl(lstio_test_args_t *args)
{
- char *batch_name;
- char *src_name = NULL;
- char *dst_name = NULL;
- void *param = NULL;
- int ret = 0;
- int rc = -ENOMEM;
-
- if (args->lstio_tes_resultp == NULL ||
- args->lstio_tes_retp == NULL ||
- args->lstio_tes_bat_name == NULL || /* no specified batch */
+ char *batch_name;
+ char *src_name = NULL;
+ char *dst_name = NULL;
+ void *param = NULL;
+ int ret = 0;
+ int rc = -ENOMEM;
+
+ if (!args->lstio_tes_resultp ||
+ !args->lstio_tes_retp ||
+ !args->lstio_tes_bat_name || /* no specified batch */
args->lstio_tes_bat_nmlen <= 0 ||
args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- args->lstio_tes_sgrp_name == NULL || /* no source group */
+ !args->lstio_tes_sgrp_name || /* no source group */
args->lstio_tes_sgrp_nmlen <= 0 ||
args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- args->lstio_tes_dgrp_name == NULL || /* no target group */
+ !args->lstio_tes_dgrp_name || /* no target group */
args->lstio_tes_dgrp_nmlen <= 0 ||
args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (args->lstio_tes_loop == 0 || /* negative is infinite */
+ if (!args->lstio_tes_loop || /* negative is infinite */
args->lstio_tes_concur <= 0 ||
args->lstio_tes_dist <= 0 ||
args->lstio_tes_span <= 0)
return -EINVAL;
/* have parameter, check if parameter length is valid */
- if (args->lstio_tes_param != NULL &&
+ if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
- args->lstio_tes_param_len > PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
+ args->lstio_tes_param_len >
+ PAGE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (batch_name == NULL)
+ if (!batch_name)
return rc;
LIBCFS_ALLOC(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (src_name == NULL)
+ if (!src_name)
goto out;
LIBCFS_ALLOC(dst_name, args->lstio_tes_dgrp_nmlen + 1);
- if (dst_name == NULL)
+ if (!dst_name)
goto out;
- if (args->lstio_tes_param != NULL) {
+ if (args->lstio_tes_param) {
LIBCFS_ALLOC(param, args->lstio_tes_param_len);
- if (param == NULL)
+ if (!param)
goto out;
+ if (copy_from_user(param, args->lstio_tes_param,
+ args->lstio_tes_param_len)) {
+ rc = -EFAULT;
+ goto out;
+ }
}
rc = -EFAULT;
@@ -779,54 +775,55 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
copy_from_user(src_name, args->lstio_tes_sgrp_name,
args->lstio_tes_sgrp_nmlen) ||
copy_from_user(dst_name, args->lstio_tes_dgrp_name,
- args->lstio_tes_dgrp_nmlen) ||
- copy_from_user(param, args->lstio_tes_param,
- args->lstio_tes_param_len))
+ args->lstio_tes_dgrp_nmlen))
goto out;
- rc = lstcon_test_add(batch_name,
- args->lstio_tes_type,
- args->lstio_tes_loop,
- args->lstio_tes_concur,
- args->lstio_tes_dist, args->lstio_tes_span,
- src_name, dst_name, param,
- args->lstio_tes_param_len,
- &ret, args->lstio_tes_resultp);
+ rc = lstcon_test_add(batch_name, args->lstio_tes_type,
+ args->lstio_tes_loop, args->lstio_tes_concur,
+ args->lstio_tes_dist, args->lstio_tes_span,
+ src_name, dst_name, param,
+ args->lstio_tes_param_len,
+ &ret, args->lstio_tes_resultp);
- if (ret != 0)
+ if (ret)
rc = (copy_to_user(args->lstio_tes_retp, &ret,
- sizeof(ret))) ? -EFAULT : 0;
+ sizeof(ret))) ? -EFAULT : 0;
out:
- if (batch_name != NULL)
+ if (batch_name)
LIBCFS_FREE(batch_name, args->lstio_tes_bat_nmlen + 1);
- if (src_name != NULL)
+ if (src_name)
LIBCFS_FREE(src_name, args->lstio_tes_sgrp_nmlen + 1);
- if (dst_name != NULL)
+ if (dst_name)
LIBCFS_FREE(dst_name, args->lstio_tes_dgrp_nmlen + 1);
- if (param != NULL)
+ if (param)
LIBCFS_FREE(param, args->lstio_tes_param_len);
return rc;
}
int
-lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
+lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
{
- char *buf;
- int opc = data->ioc_u32[0];
- int rc;
+ char *buf;
+ struct libcfs_ioctl_data *data;
+ int opc;
+ int rc;
if (cmd != IOC_LIBCFS_LNETST)
return -EINVAL;
- if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+ data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
+
+ opc = data->ioc_u32[0];
+
+ if (data->ioc_plen1 > PAGE_SIZE)
return -EINVAL;
LIBCFS_ALLOC(buf, data->ioc_plen1);
- if (buf == NULL)
+ if (!buf)
return -ENOMEM;
/* copy in parameter */
@@ -916,7 +913,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
}
if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
- sizeof(lstcon_trans_stat_t)))
+ sizeof(lstcon_trans_stat_t)))
rc = -EFAULT;
out:
mutex_unlock(&console_session.ses_mutex);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 1066c70434b1..35a227d0c657 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -54,14 +54,16 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
{
lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
- LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
+ LASSERT(crpc && rpc == crpc->crp_rpc);
LASSERT(crpc->crp_posted && !crpc->crp_finished);
spin_lock(&rpc->crpc_lock);
- if (crpc->crp_trans == NULL) {
- /* Orphan RPC is not in any transaction,
- * I'm just a poor body and nobody loves me */
+ if (!crpc->crp_trans) {
+ /*
+ * Orphan RPC is not in any transaction,
+ * I'm just a poor body and nobody loves me
+ */
spin_unlock(&rpc->crpc_lock);
/* release it */
@@ -72,11 +74,11 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
/* not an orphan RPC */
crpc->crp_finished = 1;
- if (crpc->crp_stamp == 0) {
+ if (!crpc->crp_stamp) {
/* not aborted */
- LASSERT(crpc->crp_status == 0);
+ LASSERT(!crpc->crp_status);
- crpc->crp_stamp = cfs_time_current();
+ crpc->crp_stamp = cfs_time_current();
crpc->crp_status = rpc->crpc_status;
}
@@ -94,16 +96,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
lstcon_rpc_done, (void *)crpc);
- if (crpc->crp_rpc == NULL)
+ if (!crpc->crp_rpc)
return -ENOMEM;
- crpc->crp_trans = NULL;
- crpc->crp_node = nd;
- crpc->crp_posted = 0;
+ crpc->crp_trans = NULL;
+ crpc->crp_node = nd;
+ crpc->crp_posted = 0;
crpc->crp_finished = 0;
crpc->crp_unpacked = 0;
- crpc->crp_status = 0;
- crpc->crp_stamp = 0;
+ crpc->crp_status = 0;
+ crpc->crp_stamp = 0;
crpc->crp_embedded = embedded;
INIT_LIST_HEAD(&crpc->crp_link);
@@ -121,22 +123,21 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
spin_lock(&console_session.ses_rpc_lock);
- if (!list_empty(&console_session.ses_rpc_freelist)) {
- crpc = list_entry(console_session.ses_rpc_freelist.next,
- lstcon_rpc_t, crp_link);
+ crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
+ lstcon_rpc_t, crp_link);
+ if (crpc)
list_del_init(&crpc->crp_link);
- }
spin_unlock(&console_session.ses_rpc_lock);
- if (crpc == NULL) {
+ if (!crpc) {
LIBCFS_ALLOC(crpc, sizeof(*crpc));
- if (crpc == NULL)
+ if (!crpc)
return -ENOMEM;
}
rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
- if (rc == 0) {
+ if (!rc) {
*crpcpp = crpc;
return 0;
}
@@ -155,7 +156,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
LASSERT(list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) {
- if (bulk->bk_iovs[i].kiov_page == NULL)
+ if (!bulk->bk_iovs[i].kiov_page)
continue;
__free_page(bulk->bk_iovs[i].kiov_page);
@@ -172,7 +173,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
spin_lock(&console_session.ses_rpc_lock);
list_add(&crpc->crp_link,
- &console_session.ses_rpc_freelist);
+ &console_session.ses_rpc_freelist);
spin_unlock(&console_session.ses_rpc_lock);
}
@@ -186,7 +187,7 @@ lstcon_rpc_post(lstcon_rpc_t *crpc)
{
lstcon_rpc_trans_t *trans = crpc->crp_trans;
- LASSERT(trans != NULL);
+ LASSERT(trans);
atomic_inc(&trans->tas_remaining);
crpc->crp_posted = 1;
@@ -234,15 +235,17 @@ lstcon_rpc_trans_name(int transop)
}
int
-lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp)
+lstcon_rpc_trans_prep(struct list_head *translist, int transop,
+ lstcon_rpc_trans_t **transpp)
{
lstcon_rpc_trans_t *trans;
- if (translist != NULL) {
+ if (translist) {
list_for_each_entry(trans, translist, tas_link) {
- /* Can't enqueue two private transaction on
- * the same object */
+ /*
+ * Can't enqueue two private transaction on
+ * the same object
+ */
if ((trans->tas_opc & transop) == LST_TRANS_PRIVATE)
return -EPERM;
}
@@ -250,12 +253,12 @@ lstcon_rpc_trans_prep(struct list_head *translist,
/* create a trans group */
LIBCFS_ALLOC(trans, sizeof(*trans));
- if (trans == NULL)
+ if (!trans)
return -ENOMEM;
trans->tas_opc = transop;
- if (translist == NULL)
+ if (!translist)
INIT_LIST_HEAD(&trans->tas_olink);
else
list_add_tail(&trans->tas_olink, translist);
@@ -285,8 +288,8 @@ void
lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
{
srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_node_t *nd;
+ lstcon_rpc_t *crpc;
+ lstcon_node_t *nd;
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
@@ -294,8 +297,8 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
spin_lock(&rpc->crpc_lock);
if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp != 0) { /* rpc done or aborted already */
- if (crpc->crp_stamp == 0) {
+ crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
}
@@ -303,14 +306,14 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
continue;
}
- crpc->crp_stamp = cfs_time_current();
+ crpc->crp_stamp = cfs_time_current();
crpc->crp_status = error;
spin_unlock(&rpc->crpc_lock);
sfw_abort_rpc(rpc);
- if (error != ETIMEDOUT)
+ if (error != -ETIMEDOUT)
continue;
nd = crpc->crp_node;
@@ -329,7 +332,7 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
!list_empty(&trans->tas_olink)) /* Not an end session RPC */
return 1;
- return (atomic_read(&trans->tas_remaining) == 0) ? 1 : 0;
+ return !atomic_read(&trans->tas_remaining) ? 1 : 0;
}
int
@@ -366,7 +369,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
if (console_session.ses_shutdown)
rc = -ESHUTDOWN;
- if (rc != 0 || atomic_read(&trans->tas_remaining) != 0) {
+ if (rc || atomic_read(&trans->tas_remaining)) {
/* treat short timeout as canceled */
if (rc == -ETIMEDOUT && timeout < LST_TRANS_MIN_TIMEOUT * 2)
rc = -EINTR;
@@ -385,14 +388,14 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
static int
lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
{
- lstcon_node_t *nd = crpc->crp_node;
+ lstcon_node_t *nd = crpc->crp_node;
srpc_client_rpc_t *rpc = crpc->crp_rpc;
srpc_generic_reply_t *rep;
- LASSERT(nd != NULL && rpc != NULL);
- LASSERT(crpc->crp_stamp != 0);
+ LASSERT(nd && rpc);
+ LASSERT(crpc->crp_stamp);
- if (crpc->crp_status != 0) {
+ if (crpc->crp_status) {
*msgpp = NULL;
return crpc->crp_status;
}
@@ -422,23 +425,23 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
void
lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
{
- lstcon_rpc_t *crpc;
+ lstcon_rpc_t *crpc;
srpc_msg_t *rep;
int error;
- LASSERT(stat != NULL);
+ LASSERT(stat);
memset(stat, 0, sizeof(*stat));
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
lstcon_rpc_stat_total(stat, 1);
- LASSERT(crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp);
error = lstcon_rpc_get_reply(crpc, &rep);
- if (error != 0) {
+ if (error) {
lstcon_rpc_stat_failure(stat, 1);
- if (stat->trs_rpc_errno == 0)
+ if (!stat->trs_rpc_errno)
stat->trs_rpc_errno = -error;
continue;
@@ -449,7 +452,7 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
lstcon_rpc_stat_reply(trans, rep, crpc->crp_node, stat);
}
- if (trans->tas_opc == LST_TRANS_SESNEW && stat->trs_fwk_errno == 0) {
+ if (trans->tas_opc == LST_TRANS_SESNEW && !stat->trs_fwk_errno) {
stat->trs_fwk_errno =
lstcon_session_feats_check(trans->tas_features);
}
@@ -460,17 +463,15 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
lstcon_rpc_stat_failure(stat, 0),
lstcon_rpc_stat_total(stat, 0),
stat->trs_rpc_errno, stat->trs_fwk_errno);
-
- return;
}
int
lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
+ struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent)
{
struct list_head tmp;
- struct list_head *next;
+ struct list_head __user *next;
lstcon_rpc_ent_t *ent;
srpc_generic_reply_t *rep;
lstcon_rpc_t *crpc;
@@ -480,13 +481,13 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
struct timeval tv;
int error;
- LASSERT(head_up != NULL);
+ LASSERT(head_up);
next = head_up;
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
if (copy_from_user(&tmp, next,
- sizeof(struct list_head)))
+ sizeof(struct list_head)))
return -EFAULT;
if (tmp.next == head_up)
@@ -496,7 +497,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
ent = list_entry(next, lstcon_rpc_ent_t, rpe_link);
- LASSERT(crpc->crp_stamp != 0);
+ LASSERT(crpc->crp_stamp);
error = lstcon_rpc_get_reply(crpc, &msg);
@@ -506,33 +507,32 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
(unsigned long)console_session.ses_id.ses_stamp);
jiffies_to_timeval(dur, &tv);
- if (copy_to_user(&ent->rpe_peer,
- &nd->nd_id, sizeof(lnet_process_id_t)) ||
+ if (copy_to_user(&ent->rpe_peer, &nd->nd_id,
+ sizeof(lnet_process_id_t)) ||
copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
- copy_to_user(&ent->rpe_state,
- &nd->nd_state, sizeof(nd->nd_state)) ||
+ copy_to_user(&ent->rpe_state, &nd->nd_state,
+ sizeof(nd->nd_state)) ||
copy_to_user(&ent->rpe_rpc_errno, &error,
- sizeof(error)))
+ sizeof(error)))
return -EFAULT;
- if (error != 0)
+ if (error)
continue;
/* RPC is done */
rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
- if (copy_to_user(&ent->rpe_sid,
- &rep->sid, sizeof(lst_sid_t)) ||
- copy_to_user(&ent->rpe_fwk_errno,
- &rep->status, sizeof(rep->status)))
+ if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
+ copy_to_user(&ent->rpe_fwk_errno, &rep->status,
+ sizeof(rep->status)))
return -EFAULT;
- if (readent == NULL)
+ if (!readent)
continue;
error = readent(trans->tas_opc, msg, ent);
- if (error != 0)
+ if (error)
return error;
}
@@ -547,8 +547,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
lstcon_rpc_t *tmp;
int count = 0;
- list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list,
- crp_link) {
+ list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
@@ -563,14 +562,15 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
continue;
}
- /* rpcs can be still not callbacked (even LNetMDUnlink is called)
+ /*
+ * rpcs can be still not callbacked (even LNetMDUnlink is called)
* because huge timeout for inaccessible network, don't make
* user wait for them, just abandon them, they will be recycled
- * in callback */
+ * in callback
+ */
+ LASSERT(crpc->crp_status);
- LASSERT(crpc->crp_status != 0);
-
- crpc->crp_node = NULL;
+ crpc->crp_node = NULL;
crpc->crp_trans = NULL;
list_del_init(&crpc->crp_link);
count++;
@@ -580,7 +580,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
atomic_dec(&trans->tas_remaining);
}
- LASSERT(atomic_read(&trans->tas_remaining) == 0);
+ LASSERT(!atomic_read(&trans->tas_remaining));
list_del(&trans->tas_link);
if (!list_empty(&trans->tas_olink))
@@ -590,8 +590,6 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
lstcon_rpc_trans_name(trans->tas_opc), count);
LIBCFS_FREE(trans, sizeof(*trans));
-
- return;
}
int
@@ -606,12 +604,12 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
case LST_TRANS_SESNEW:
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_MAKE_SESSION,
feats, 0, 0, crpc);
- if (rc != 0)
+ if (rc)
return rc;
msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
- msrq->mksn_sid = console_session.ses_id;
- msrq->mksn_force = console_session.ses_force;
+ msrq->mksn_sid = console_session.ses_id;
+ msrq->mksn_force = console_session.ses_force;
strlcpy(msrq->mksn_name, console_session.ses_name,
sizeof(msrq->mksn_name));
break;
@@ -619,7 +617,7 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
case LST_TRANS_SESEND:
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_REMOVE_SESSION,
feats, 0, 0, crpc);
- if (rc != 0)
+ if (rc)
return rc;
rsrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.rmsn_reqst;
@@ -640,12 +638,12 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
- if (rc != 0)
+ if (rc)
return rc;
drq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
- drq->dbg_sid = console_session.ses_id;
+ drq->dbg_sid = console_session.ses_id;
drq->dbg_flags = 0;
return rc;
@@ -655,28 +653,28 @@ int
lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
{
- lstcon_batch_t *batch;
+ lstcon_batch_t *batch;
srpc_batch_reqst_t *brq;
- int rc;
+ int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
- if (rc != 0)
+ if (rc)
return rc;
brq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.bat_reqst;
- brq->bar_sid = console_session.ses_id;
- brq->bar_bid = tsb->tsb_id;
+ brq->bar_sid = console_session.ses_id;
+ brq->bar_bid = tsb->tsb_id;
brq->bar_testidx = tsb->tsb_index;
- brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
- (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP :
- SRPC_BATCH_OPC_QUERY);
+ brq->bar_opc = transop == LST_TRANS_TSBRUN ? SRPC_BATCH_OPC_RUN :
+ (transop == LST_TRANS_TSBSTOP ? SRPC_BATCH_OPC_STOP :
+ SRPC_BATCH_OPC_QUERY);
if (transop != LST_TRANS_TSBRUN &&
transop != LST_TRANS_TSBSTOP)
return 0;
- LASSERT(tsb->tsb_index == 0);
+ LASSERT(!tsb->tsb_index);
batch = (lstcon_batch_t *)tsb;
brq->bar_arg = batch->bat_arg;
@@ -688,15 +686,15 @@ int
lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
{
srpc_stat_reqst_t *srq;
- int rc;
+ int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
- if (rc != 0)
+ if (rc)
return rc;
srq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.stat_reqst;
- srq->str_sid = console_session.ses_id;
+ srq->str_sid = console_session.ses_id;
srq->str_type = 0; /* XXX remove it */
return 0;
@@ -736,7 +734,7 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
return -EINVAL;
start = ((idx / dist) * span) % grp->grp_nnode;
- end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
+ end = ((idx / dist) * span + span - 1) % grp->grp_nnode;
list_for_each_entry(ndl, &grp->grp_ndl_list, ndl_link) {
nd = ndl->ndl_node;
@@ -776,7 +774,7 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
{
test_ping_req_t *prq = &req->tsr_u.ping;
- prq->png_size = param->png_size;
+ prq->png_size = param->png_size;
prq->png_flags = param->png_flags;
/* TODO dest */
return 0;
@@ -787,9 +785,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
- brq->blk_opc = param->blk_opc;
- brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
- PAGE_CACHE_SIZE;
+ brq->blk_opc = param->blk_opc;
+ brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
+ PAGE_SIZE;
brq->blk_flags = param->blk_flags;
return 0;
@@ -800,9 +798,9 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
{
test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
- brq->blk_opc = param->blk_opc;
- brq->blk_flags = param->blk_flags;
- brq->blk_len = param->blk_size;
+ brq->blk_opc = param->blk_opc;
+ brq->blk_flags = param->blk_flags;
+ brq->blk_len = param->blk_size;
brq->blk_offset = 0; /* reserved */
return 0;
@@ -812,27 +810,27 @@ int
lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
lstcon_test_t *test, lstcon_rpc_t **crpc)
{
- lstcon_group_t *sgrp = test->tes_src_grp;
- lstcon_group_t *dgrp = test->tes_dst_grp;
+ lstcon_group_t *sgrp = test->tes_src_grp;
+ lstcon_group_t *dgrp = test->tes_dst_grp;
srpc_test_reqst_t *trq;
- srpc_bulk_t *bulk;
- int i;
- int npg = 0;
- int nob = 0;
- int rc = 0;
+ srpc_bulk_t *bulk;
+ int i;
+ int npg = 0;
+ int nob = 0;
+ int rc = 0;
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
- nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
- npg * PAGE_CACHE_SIZE :
+ nob = !(feats & LST_FEAT_BULK_LEN) ?
+ npg * PAGE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_TEST, feats, npg, nob, crpc);
- if (rc != 0)
+ if (rc)
return rc;
- trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
+ trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst;
if (transop == LST_TRANS_TSBSRVADD) {
int ndist = (sgrp->grp_nnode + test->tes_dist - 1) /
@@ -842,27 +840,27 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
int nmax = (ndist + nspan - 1) / nspan;
trq->tsr_ndest = 0;
- trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
+ trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
} else {
bulk = &(*crpc)->crp_rpc->crpc_bulk;
for (i = 0; i < npg; i++) {
- int len;
+ int len;
LASSERT(nob > 0);
- len = (feats & LST_FEAT_BULK_LEN) == 0 ?
- PAGE_CACHE_SIZE :
- min_t(int, nob, PAGE_CACHE_SIZE);
+ len = !(feats & LST_FEAT_BULK_LEN) ?
+ PAGE_SIZE :
+ min_t(int, nob, PAGE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;
- bulk->bk_iovs[i].kiov_len = len;
- bulk->bk_iovs[i].kiov_page =
+ bulk->bk_iovs[i].kiov_len = len;
+ bulk->bk_iovs[i].kiov_page =
alloc_page(GFP_KERNEL);
- if (bulk->bk_iovs[i].kiov_page == NULL) {
+ if (!bulk->bk_iovs[i].kiov_page) {
lstcon_rpc_put(*crpc);
return -ENOMEM;
}
@@ -877,19 +875,19 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
test->tes_dist,
test->tes_span,
npg, &bulk->bk_iovs[0]);
- if (rc != 0) {
+ if (rc) {
lstcon_rpc_put(*crpc);
return rc;
}
trq->tsr_ndest = test->tes_span;
- trq->tsr_loop = test->tes_loop;
+ trq->tsr_loop = test->tes_loop;
}
- trq->tsr_sid = console_session.ses_id;
- trq->tsr_bid = test->tes_hdr.tsb_id;
- trq->tsr_concur = test->tes_concur;
- trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
+ trq->tsr_sid = console_session.ses_id;
+ trq->tsr_bid = test->tes_hdr.tsb_id;
+ trq->tsr_concur = test->tes_concur;
+ trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0;
trq->tsr_stop_onerr = !!test->tes_stop_onerr;
switch (test->tes_type) {
@@ -901,7 +899,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
case LST_TEST_BULK:
trq->tsr_service = SRPC_SERVICE_BRW;
- if ((feats & LST_FEAT_BULK_LEN) == 0) {
+ if (!(feats & LST_FEAT_BULK_LEN)) {
rc = lstcon_bulkrpc_v0_prep((lst_test_bulk_param_t *)
&test->tes_param[0], trq);
} else {
@@ -923,10 +921,10 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
lstcon_node_t *nd, srpc_msg_t *reply)
{
srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
- int status = mksn_rep->mksn_status;
+ int status = mksn_rep->mksn_status;
- if (status == 0 &&
- (reply->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ if (!status &&
+ (reply->msg_ses_feats & ~LST_FEATS_MASK)) {
mksn_rep->mksn_status = EPROTO;
status = EPROTO;
}
@@ -937,22 +935,27 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
reply->msg_ses_feats);
}
- if (status != 0)
+ if (status)
return status;
if (!trans->tas_feats_updated) {
- trans->tas_feats_updated = 1;
- trans->tas_features = reply->msg_ses_feats;
+ spin_lock(&console_session.ses_rpc_lock);
+ if (!trans->tas_feats_updated) { /* recheck with lock */
+ trans->tas_feats_updated = 1;
+ trans->tas_features = reply->msg_ses_feats;
+ }
+ spin_unlock(&console_session.ses_rpc_lock);
}
if (reply->msg_ses_feats != trans->tas_features) {
CNETERR("Framework features %x from %s is different with features on this transaction: %x\n",
- reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
- trans->tas_features);
- status = mksn_rep->mksn_status = EPROTO;
+ reply->msg_ses_feats, libcfs_nid2str(nd->nd_id.nid),
+ trans->tas_features);
+ mksn_rep->mksn_status = EPROTO;
+ status = EPROTO;
}
- if (status == 0) {
+ if (!status) {
/* session timeout on remote node */
nd->nd_timeout = mksn_rep->mksn_timeout;
}
@@ -964,17 +967,17 @@ void
lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
lstcon_node_t *nd, lstcon_trans_stat_t *stat)
{
- srpc_rmsn_reply_t *rmsn_rep;
+ srpc_rmsn_reply_t *rmsn_rep;
srpc_debug_reply_t *dbg_rep;
srpc_batch_reply_t *bat_rep;
- srpc_test_reply_t *test_rep;
- srpc_stat_reply_t *stat_rep;
- int rc = 0;
+ srpc_test_reply_t *test_rep;
+ srpc_stat_reply_t *stat_rep;
+ int rc = 0;
switch (trans->tas_opc) {
case LST_TRANS_SESNEW:
rc = lstcon_sesnew_stat_reply(trans, nd, msg);
- if (rc == 0) {
+ if (!rc) {
lstcon_sesop_stat_success(stat, 1);
return;
}
@@ -985,7 +988,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
case LST_TRANS_SESEND:
rmsn_rep = &msg->msg_body.rmsn_reply;
/* ESRCH is not an error for end session */
- if (rmsn_rep->rmsn_status == 0 ||
+ if (!rmsn_rep->rmsn_status ||
rmsn_rep->rmsn_status == ESRCH) {
lstcon_sesop_stat_success(stat, 1);
return;
@@ -1014,7 +1017,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
case LST_TRANS_TSBSTOP:
bat_rep = &msg->msg_body.bat_reply;
- if (bat_rep->bar_status == 0) {
+ if (!bat_rep->bar_status) {
lstcon_tsbop_stat_success(stat, 1);
return;
}
@@ -1033,12 +1036,12 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
case LST_TRANS_TSBSRVQRY:
bat_rep = &msg->msg_body.bat_reply;
- if (bat_rep->bar_active != 0)
+ if (bat_rep->bar_active)
lstcon_tsbqry_stat_run(stat, 1);
else
lstcon_tsbqry_stat_idle(stat, 1);
- if (bat_rep->bar_status == 0)
+ if (!bat_rep->bar_status)
return;
lstcon_tsbqry_stat_failure(stat, 1);
@@ -1049,7 +1052,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
case LST_TRANS_TSBSRVADD:
test_rep = &msg->msg_body.tes_reply;
- if (test_rep->tsr_status == 0) {
+ if (!test_rep->tsr_status) {
lstcon_tsbop_stat_success(stat, 1);
return;
}
@@ -1061,7 +1064,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
case LST_TRANS_STATQRY:
stat_rep = &msg->msg_body.stat_reply;
- if (stat_rep->str_status == 0) {
+ if (!stat_rep->str_status) {
lstcon_statqry_stat_success(stat, 1);
return;
}
@@ -1074,10 +1077,8 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
LBUG();
}
- if (stat->trs_fwk_errno == 0)
+ if (!stat->trs_fwk_errno)
stat->trs_fwk_errno = rc;
-
- return;
}
int
@@ -1096,22 +1097,22 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
/* Creating session RPG for list of nodes */
rc = lstcon_rpc_trans_prep(translist, transop, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction %d: %d\n", transop, rc);
return rc;
}
feats = trans->tas_features;
list_for_each_entry(ndl, ndlist, ndl_link) {
- rc = condition == NULL ? 1 :
+ rc = !condition ? 1 :
condition(transop, ndl->ndl_node, arg);
- if (rc == 0)
+ if (!rc)
continue;
if (rc < 0) {
CDEBUG(D_NET, "Condition error while creating RPC for transaction %d: %d\n",
- transop, rc);
+ transop, rc);
break;
}
@@ -1146,7 +1147,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
break;
}
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to create RPC for transaction %s: %d\n",
lstcon_rpc_trans_name(transop), rc);
break;
@@ -1155,7 +1156,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
lstcon_rpc_trans_addreq(trans, rpc);
}
- if (rc == 0) {
+ if (!rc) {
*transpp = trans;
return 0;
}
@@ -1168,7 +1169,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
static void
lstcon_rpc_pinger(void *arg)
{
- stt_timer_t *ptimer = (stt_timer_t *)arg;
+ struct stt_timer *ptimer = (struct stt_timer *)arg;
lstcon_rpc_trans_t *trans;
lstcon_rpc_t *crpc;
srpc_msg_t *rep;
@@ -1196,7 +1197,7 @@ lstcon_rpc_pinger(void *arg)
trans = console_session.ses_ping;
- LASSERT(trans != NULL);
+ LASSERT(trans);
list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link) {
nd = ndl->ndl_node;
@@ -1208,7 +1209,7 @@ lstcon_rpc_pinger(void *arg)
rc = lstcon_sesrpc_prep(nd, LST_TRANS_SESEND,
trans->tas_features, &crpc);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
break;
}
@@ -1221,7 +1222,7 @@ lstcon_rpc_pinger(void *arg)
crpc = &nd->nd_ping;
- if (crpc->crp_rpc != NULL) {
+ if (crpc->crp_rpc) {
LASSERT(crpc->crp_trans == trans);
LASSERT(!list_empty(&crpc->crp_link));
@@ -1247,20 +1248,20 @@ lstcon_rpc_pinger(void *arg)
if (nd->nd_state != LST_NODE_ACTIVE)
continue;
- intv = (jiffies - nd->nd_stamp) / HZ;
+ intv = (jiffies - nd->nd_stamp) / msecs_to_jiffies(MSEC_PER_SEC);
if (intv < nd->nd_timeout / 2)
continue;
rc = lstcon_rpc_init(nd, SRPC_SERVICE_DEBUG,
trans->tas_features, 0, 0, 1, crpc);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
break;
}
drq = &crpc->crp_rpc->crpc_reqstmsg.msg_body.dbg_reqst;
- drq->dbg_sid = console_session.ses_id;
+ drq->dbg_sid = console_session.ses_id;
drq->dbg_flags = 0;
lstcon_rpc_trans_addreq(trans, crpc);
@@ -1285,15 +1286,15 @@ lstcon_rpc_pinger(void *arg)
int
lstcon_rpc_pinger_start(void)
{
- stt_timer_t *ptimer;
+ struct stt_timer *ptimer;
int rc;
LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(!atomic_read(&console_session.ses_rpc_counter));
rc = lstcon_rpc_trans_prep(NULL, LST_TRANS_SESPING,
&console_session.ses_ping);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to create console pinger\n");
return rc;
}
@@ -1327,6 +1328,7 @@ lstcon_rpc_cleanup_wait(void)
{
lstcon_rpc_trans_t *trans;
lstcon_rpc_t *crpc;
+ lstcon_rpc_t *temp;
struct list_head *pacer;
struct list_head zlist;
@@ -1337,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void)
while (!list_empty(&console_session.ses_trans_list)) {
list_for_each(pacer, &console_session.ses_trans_list) {
trans = list_entry(pacer, lstcon_rpc_trans_t,
- tas_link);
+ tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
lstcon_rpc_trans_name(trans->tas_opc));
@@ -1356,7 +1358,7 @@ lstcon_rpc_cleanup_wait(void)
spin_lock(&console_session.ses_rpc_lock);
- lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
+ lst_wait_until(!atomic_read(&console_session.ses_rpc_counter),
console_session.ses_rpc_lock,
"Network is not accessible or target is down, waiting for %d console RPCs to being recycled\n",
atomic_read(&console_session.ses_rpc_counter));
@@ -1366,9 +1368,7 @@ lstcon_rpc_cleanup_wait(void)
spin_unlock(&console_session.ses_rpc_lock);
- while (!list_empty(&zlist)) {
- crpc = list_entry(zlist.next, lstcon_rpc_t, crp_link);
-
+ list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
list_del(&crpc->crp_link);
LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
}
@@ -1394,5 +1394,5 @@ void
lstcon_rpc_module_fini(void)
{
LASSERT(list_empty(&console_session.ses_rpc_freelist));
- LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0);
+ LASSERT(!atomic_read(&console_session.ses_rpc_counter));
}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 95c832ff7375..3e7839dad5bb 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -51,12 +51,12 @@
#include "selftest.h"
/* Console rpc and rpc transaction */
-#define LST_TRANS_TIMEOUT 30
-#define LST_TRANS_MIN_TIMEOUT 3
+#define LST_TRANS_TIMEOUT 30
+#define LST_TRANS_MIN_TIMEOUT 3
#define LST_VALIDATE_TIMEOUT(t) min(max(t, LST_TRANS_MIN_TIMEOUT), LST_TRANS_TIMEOUT)
-#define LST_PING_INTERVAL 8
+#define LST_PING_INTERVAL 8
struct lstcon_rpc_trans;
struct lstcon_tsb_hdr;
@@ -64,49 +64,50 @@ struct lstcon_test;
struct lstcon_node;
typedef struct lstcon_rpc {
- struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
- struct lstcon_node *crp_node; /* destination node */
- struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
-
- unsigned int crp_posted:1; /* rpc is posted */
- unsigned int crp_finished:1; /* rpc is finished */
- unsigned int crp_unpacked:1; /* reply is unpacked */
+ struct list_head crp_link; /* chain on rpc transaction */
+ srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct lstcon_node *crp_node; /* destination node */
+ struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
+
+ unsigned int crp_posted:1; /* rpc is posted */
+ unsigned int crp_finished:1; /* rpc is finished */
+ unsigned int crp_unpacked:1; /* reply is unpacked */
/** RPC is embedded in other structure and can't free it */
- unsigned int crp_embedded:1;
- int crp_status; /* console rpc errors */
- unsigned long crp_stamp; /* replied time stamp */
+ unsigned int crp_embedded:1;
+ int crp_status; /* console rpc errors */
+ unsigned long crp_stamp; /* replied time stamp */
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
- struct list_head tas_olink; /* link chain on owner list */
- struct list_head tas_link; /* link chain on global list */
- int tas_opc; /* operation code of transaction */
- unsigned tas_feats_updated; /* features mask is uptodate */
- unsigned tas_features; /* test features mask */
- wait_queue_head_t tas_waitq; /* wait queue head */
- atomic_t tas_remaining; /* # of un-scheduled rpcs */
+ struct list_head tas_olink; /* link chain on owner list */
+ struct list_head tas_link; /* link chain on global list */
+ int tas_opc; /* operation code of transaction */
+ unsigned tas_feats_updated; /* features mask is uptodate */
+ unsigned tas_features; /* test features mask */
+ wait_queue_head_t tas_waitq; /* wait queue head */
+ atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
} lstcon_rpc_trans_t;
-#define LST_TRANS_PRIVATE 0x1000
+#define LST_TRANS_PRIVATE 0x1000
#define LST_TRANS_SESNEW (LST_TRANS_PRIVATE | 0x01)
#define LST_TRANS_SESEND (LST_TRANS_PRIVATE | 0x02)
#define LST_TRANS_SESQRY 0x03
-#define LST_TRANS_SESPING 0x04
+#define LST_TRANS_SESPING 0x04
-#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11)
-#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12)
+#define LST_TRANS_TSBCLIADD (LST_TRANS_PRIVATE | 0x11)
+#define LST_TRANS_TSBSRVADD (LST_TRANS_PRIVATE | 0x12)
#define LST_TRANS_TSBRUN (LST_TRANS_PRIVATE | 0x13)
-#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14)
-#define LST_TRANS_TSBCLIQRY 0x15
-#define LST_TRANS_TSBSRVQRY 0x16
+#define LST_TRANS_TSBSTOP (LST_TRANS_PRIVATE | 0x14)
+#define LST_TRANS_TSBCLIQRY 0x15
+#define LST_TRANS_TSBSRVQRY 0x16
-#define LST_TRANS_STATQRY 0x21
+#define LST_TRANS_STATQRY 0x21
typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *, lstcon_rpc_ent_t *);
+typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *,
+ lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
unsigned version, lstcon_rpc_t **crpc);
@@ -128,7 +129,7 @@ int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
lstcon_trans_stat_t *stat);
int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
- struct list_head *head_up,
+ struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 5619fc430e8d..1a923ea3a755 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -49,16 +49,16 @@
do { \
if ((nd)->nd_state == LST_NODE_ACTIVE) \
(p)->nle_nactive++; \
- else if ((nd)->nd_state == LST_NODE_BUSY) \
+ else if ((nd)->nd_state == LST_NODE_BUSY) \
(p)->nle_nbusy++; \
- else if ((nd)->nd_state == LST_NODE_DOWN) \
+ else if ((nd)->nd_state == LST_NODE_DOWN) \
(p)->nle_ndown++; \
else \
(p)->nle_nunknown++; \
(p)->nle_nnode++; \
} while (0)
-lstcon_session_t console_session;
+struct lstcon_session console_session;
static void
lstcon_node_get(lstcon_node_t *nd)
@@ -71,12 +71,13 @@ lstcon_node_get(lstcon_node_t *nd)
static int
lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
{
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
LASSERT(id.nid != LNET_NID_ANY);
- list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx], ndl_hlink) {
+ list_for_each_entry(ndl, &console_session.ses_ndl_hash[idx],
+ ndl_hlink) {
if (ndl->ndl_node->nd_id.nid != id.nid ||
ndl->ndl_node->nd_id.pid != id.pid)
continue;
@@ -90,23 +91,25 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
return -ENOENT;
LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
- if (*ndpp == NULL)
+ if (!*ndpp)
return -ENOMEM;
ndl = (lstcon_ndlink_t *)(*ndpp + 1);
ndl->ndl_node = *ndpp;
- ndl->ndl_node->nd_ref = 1;
- ndl->ndl_node->nd_id = id;
+ ndl->ndl_node->nd_ref = 1;
+ ndl->ndl_node->nd_id = id;
ndl->ndl_node->nd_stamp = cfs_time_current();
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0;
memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
- /* queued in global hash & list, no refcount is taken by
+ /*
+ * queued in global hash & list, no refcount is taken by
* global hash & list, if caller release his refcount,
- * node will be released */
+ * node will be released
+ */
list_add_tail(&ndl->ndl_hlink, &console_session.ses_ndl_hash[idx]);
list_add_tail(&ndl->ndl_link, &console_session.ses_ndl_list);
@@ -157,16 +160,16 @@ lstcon_ndlink_find(struct list_head *hash,
return 0;
}
- if (create == 0)
+ if (!create)
return -ENOENT;
/* find or create in session hash */
rc = lstcon_node_find(id, &nd, (create == 1) ? 1 : 0);
- if (rc != 0)
+ if (rc)
return rc;
LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
- if (ndl == NULL) {
+ if (!ndl) {
lstcon_node_put(nd);
return -ENOMEM;
}
@@ -177,7 +180,7 @@ lstcon_ndlink_find(struct list_head *hash,
INIT_LIST_HEAD(&ndl->ndl_link);
list_add_tail(&ndl->ndl_hlink, &hash[idx]);
- return 0;
+ return 0;
}
static void
@@ -200,12 +203,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
grp_ndl_hash[LST_NODE_HASHSIZE]));
- if (grp == NULL)
+ if (!grp)
return -ENOMEM;
grp->grp_ref = 1;
- if (name != NULL)
- strcpy(grp->grp_name, name);
+ if (name) {
+ if (strlen(name) > sizeof(grp->grp_name) - 1) {
+ LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ grp_ndl_hash[LST_NODE_HASHSIZE]));
+ return -E2BIG;
+ }
+ strncpy(grp->grp_name, name, sizeof(grp->grp_name));
+ }
INIT_LIST_HEAD(&grp->grp_link);
INIT_LIST_HEAD(&grp->grp_ndl_list);
@@ -234,7 +243,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
lstcon_ndlink_t *tmp;
list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
- if ((ndl->ndl_node->nd_state & keep) == 0)
+ if (!(ndl->ndl_node->nd_state & keep))
lstcon_group_ndlink_release(grp, ndl);
}
}
@@ -252,9 +261,8 @@ lstcon_group_decref(lstcon_group_t *grp)
lstcon_group_drain(grp, 0);
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- }
LIBCFS_FREE(grp, offsetof(lstcon_group_t,
grp_ndl_hash[LST_NODE_HASHSIZE]));
@@ -266,7 +274,7 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
lstcon_group_t *grp;
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0)
+ if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
continue;
lstcon_group_addref(grp); /* +1 ref for caller */
@@ -284,7 +292,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
int rc;
rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create);
- if (rc != 0)
+ if (rc)
return rc;
if (!list_empty(&(*ndlpp)->ndl_link))
@@ -309,7 +317,7 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
lstcon_group_t *new, lstcon_ndlink_t *ndl)
{
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
- LST_NODE_HASHSIZE;
+ LST_NODE_HASHSIZE;
list_del(&ndl->ndl_hlink);
list_del(&ndl->ndl_link);
@@ -327,7 +335,7 @@ lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
while (!list_empty(&old->grp_ndl_list)) {
ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
+ lstcon_ndlink_t, ndl_link);
lstcon_group_ndlink_move(old, new, ndl);
}
}
@@ -347,7 +355,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
if (nd->nd_state != LST_NODE_ACTIVE)
return 0;
- if (grp != NULL && nd->nd_ref > 1)
+ if (grp && nd->nd_ref > 1)
return 0;
break;
@@ -363,7 +371,7 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
static int
lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
+ lstcon_rpc_ent_t __user *ent_up)
{
srpc_debug_reply_t *rep;
@@ -376,9 +384,9 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
rep = &msg->msg_body.dbg_reply;
if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->dbg_timeout, sizeof(int)) ||
+ &rep->dbg_timeout, sizeof(int)) ||
copy_to_user(&ent_up->rpe_payload[0],
- &rep->dbg_name, LST_NAME_SIZE))
+ &rep->dbg_name, LST_NAME_SIZE))
return -EFAULT;
return 0;
@@ -392,18 +400,18 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
static int
lstcon_group_nodes_add(lstcon_group_t *grp,
- int count, lnet_process_id_t *ids_up,
- unsigned *featp, struct list_head *result_up)
+ int count, lnet_process_id_t __user *ids_up,
+ unsigned *featp, struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
lstcon_group_t *tmp;
lnet_process_id_t id;
int i;
int rc;
rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
return -ENOMEM;
}
@@ -416,18 +424,18 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
/* skip if it's in this group already */
rc = lstcon_group_ndlink_find(grp, id, &ndl, 0);
- if (rc == 0)
+ if (!rc)
continue;
/* add to tmp group */
rc = lstcon_group_ndlink_find(tmp, id, &ndl, 1);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create ndlink, out of memory\n");
break;
}
}
- if (rc != 0) {
+ if (rc) {
lstcon_group_decref(tmp);
return rc;
}
@@ -435,7 +443,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
&tmp->grp_trans_list, LST_TRANS_SESNEW,
tmp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
lstcon_group_decref(tmp);
return rc;
@@ -459,8 +467,8 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
static int
lstcon_group_nodes_remove(lstcon_group_t *grp,
- int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
+ int count, lnet_process_id_t __user *ids_up,
+ struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_ndlink_t *ndl;
@@ -472,7 +480,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp,
/* End session and remove node from the group */
rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
return -ENOMEM;
}
@@ -484,14 +492,14 @@ lstcon_group_nodes_remove(lstcon_group_t *grp,
}
/* move node to tmp group */
- if (lstcon_group_ndlink_find(grp, id, &ndl, 0) == 0)
+ if (!lstcon_group_ndlink_find(grp, id, &ndl, 0))
lstcon_group_ndlink_move(grp, tmp, ndl);
}
rc = lstcon_rpc_trans_ndlist(&tmp->grp_ndl_list,
&tmp->grp_trans_list, LST_TRANS_SESEND,
tmp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
goto error;
}
@@ -518,15 +526,15 @@ lstcon_group_add(char *name)
lstcon_group_t *grp;
int rc;
- rc = (lstcon_group_find(name, &grp) == 0) ? -EEXIST : 0;
- if (rc != 0) {
+ rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
+ if (rc) {
/* find a group with same name */
lstcon_group_decref(grp);
return rc;
}
rc = lstcon_group_alloc(name, &grp);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't allocate descriptor for group %s\n", name);
return -ENOMEM;
}
@@ -537,17 +545,17 @@ lstcon_group_add(char *name)
}
int
-lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up,
- unsigned *featp, struct list_head *result_up)
+lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
+ unsigned *featp, struct list_head __user *result_up)
{
lstcon_group_t *grp;
int rc;
LASSERT(count > 0);
- LASSERT(ids_up != NULL);
+ LASSERT(ids_up);
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group %s\n", name);
return rc;
}
@@ -575,7 +583,7 @@ lstcon_group_del(char *name)
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group: %s\n", name);
return rc;
}
@@ -590,7 +598,7 @@ lstcon_group_del(char *name)
rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
&grp->grp_trans_list, LST_TRANS_SESEND,
grp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
lstcon_group_decref(grp);
return rc;
@@ -601,8 +609,10 @@ lstcon_group_del(char *name)
lstcon_rpc_trans_destroy(trans);
lstcon_group_decref(grp);
- /* -ref for session, it's destroyed,
- * status can't be rolled back, destroy group anyway */
+ /*
+ * -ref for session, it's destroyed,
+ * status can't be rolled back, destroy group anyway
+ */
lstcon_group_decref(grp);
return rc;
@@ -615,7 +625,7 @@ lstcon_group_clean(char *name, int args)
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group %s\n", name);
return rc;
}
@@ -641,14 +651,14 @@ lstcon_group_clean(char *name, int args)
}
int
-lstcon_nodes_remove(char *name, int count,
- lnet_process_id_t *ids_up, struct list_head *result_up)
+lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
+ struct list_head __user *result_up)
{
lstcon_group_t *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group: %s\n", name);
return rc;
}
@@ -671,14 +681,14 @@ lstcon_nodes_remove(char *name, int count,
}
int
-lstcon_group_refresh(char *name, struct list_head *result_up)
+lstcon_group_refresh(char *name, struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group: %s\n", name);
return rc;
}
@@ -694,7 +704,7 @@ lstcon_group_refresh(char *name, struct list_head *result_up)
rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
&grp->grp_trans_list, LST_TRANS_SESNEW,
grp, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
/* local error, return */
CDEBUG(D_NET, "Can't create transaction: %d\n", rc);
lstcon_group_decref(grp);
@@ -713,15 +723,15 @@ lstcon_group_refresh(char *name, struct list_head *result_up)
}
int
-lstcon_group_list(int index, int len, char *name_up)
+lstcon_group_list(int index, int len, char __user *name_up)
{
lstcon_group_t *grp;
LASSERT(index >= 0);
- LASSERT(name_up != NULL);
+ LASSERT(name_up);
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
- if (index-- == 0) {
+ if (!index--) {
return copy_to_user(name_up, grp->grp_name, len) ?
-EFAULT : 0;
}
@@ -732,15 +742,15 @@ lstcon_group_list(int index, int len, char *name_up)
static int
lstcon_nodes_getent(struct list_head *head, int *index_p,
- int *count_p, lstcon_node_ent_t *dents_up)
+ int *count_p, lstcon_node_ent_t __user *dents_up)
{
lstcon_ndlink_t *ndl;
lstcon_node_t *nd;
int count = 0;
int index = 0;
- LASSERT(index_p != NULL && count_p != NULL);
- LASSERT(dents_up != NULL);
+ LASSERT(index_p && count_p);
+ LASSERT(dents_up);
LASSERT(*index_p >= 0);
LASSERT(*count_p > 0);
@@ -753,9 +763,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p,
nd = ndl->ndl_node;
if (copy_to_user(&dents_up[count].nde_id,
- &nd->nd_id, sizeof(nd->nd_id)) ||
+ &nd->nd_id, sizeof(nd->nd_id)) ||
copy_to_user(&dents_up[count].nde_state,
- &nd->nd_state, sizeof(nd->nd_state)))
+ &nd->nd_state, sizeof(nd->nd_state)))
return -EFAULT;
count++;
@@ -771,8 +781,9 @@ lstcon_nodes_getent(struct list_head *head, int *index_p,
}
int
-lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
- int *index_p, int *count_p, lstcon_node_ent_t *dents_up)
+lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
+ int *index_p, int *count_p,
+ lstcon_node_ent_t __user *dents_up)
{
lstcon_ndlist_ent_t *gentp;
lstcon_group_t *grp;
@@ -780,7 +791,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group %s\n", name);
return rc;
}
@@ -796,7 +807,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
/* non-verbose query */
LIBCFS_ALLOC(gentp, sizeof(lstcon_ndlist_ent_t));
- if (gentp == NULL) {
+ if (!gentp) {
CERROR("Can't allocate ndlist_ent\n");
lstcon_group_decref(grp);
@@ -807,7 +818,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p,
LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp);
rc = copy_to_user(gents_p, gentp,
- sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
+ sizeof(lstcon_ndlist_ent_t)) ? -EFAULT : 0;
LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t));
@@ -822,7 +833,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
lstcon_batch_t *bat;
list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) {
+ if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
*batpp = bat;
return 0;
}
@@ -838,21 +849,21 @@ lstcon_batch_add(char *name)
int i;
int rc;
- rc = (lstcon_batch_find(name, &bat) == 0) ? -EEXIST : 0;
- if (rc != 0) {
+ rc = !lstcon_batch_find(name, &bat) ? -EEXIST : 0;
+ if (rc) {
CDEBUG(D_NET, "Batch %s already exists\n", name);
return rc;
}
LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
- if (bat == NULL) {
+ if (!bat) {
CERROR("Can't allocate descriptor for batch %s\n", name);
return -ENOMEM;
}
LIBCFS_ALLOC(bat->bat_cli_hash,
sizeof(struct list_head) * LST_NODE_HASHSIZE);
- if (bat->bat_cli_hash == NULL) {
+ if (!bat->bat_cli_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
@@ -861,7 +872,7 @@ lstcon_batch_add(char *name)
LIBCFS_ALLOC(bat->bat_srv_hash,
sizeof(struct list_head) * LST_NODE_HASHSIZE);
- if (bat->bat_srv_hash == NULL) {
+ if (!bat->bat_srv_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
@@ -869,7 +880,13 @@ lstcon_batch_add(char *name)
return -ENOMEM;
}
- strcpy(bat->bat_name, name);
+ if (strlen(name) > sizeof(bat->bat_name) - 1) {
+ LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
+ LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ return -E2BIG;
+ }
+ strncpy(bat->bat_name, name, sizeof(bat->bat_name));
bat->bat_hdr.tsb_index = 0;
bat->bat_hdr.tsb_id.bat_id = ++console_session.ses_id_cookie;
@@ -892,17 +909,17 @@ lstcon_batch_add(char *name)
}
int
-lstcon_batch_list(int index, int len, char *name_up)
+lstcon_batch_list(int index, int len, char __user *name_up)
{
lstcon_batch_t *bat;
- LASSERT(name_up != NULL);
+ LASSERT(name_up);
LASSERT(index >= 0);
list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
- if (index-- == 0) {
+ if (!index--) {
return copy_to_user(name_up, bat->bat_name, len) ?
- -EFAULT : 0;
+ -EFAULT : 0;
}
}
@@ -910,20 +927,20 @@ lstcon_batch_list(int index, int len, char *name_up)
}
int
-lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
- int testidx, int *index_p, int *ndent_p,
- lstcon_node_ent_t *dents_up)
+lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
+ int server, int testidx, int *index_p, int *ndent_p,
+ lstcon_node_ent_t __user *dents_up)
{
lstcon_test_batch_ent_t *entp;
struct list_head *clilst;
struct list_head *srvlst;
lstcon_test_t *test = NULL;
lstcon_batch_t *bat;
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
int rc;
rc = lstcon_batch_find(name, &bat);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return -ENOENT;
}
@@ -941,12 +958,12 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
}
}
- clilst = (test == NULL) ? &bat->bat_cli_list :
- &test->tes_src_grp->grp_ndl_list;
- srvlst = (test == NULL) ? &bat->bat_srv_list :
- &test->tes_dst_grp->grp_ndl_list;
+ clilst = !test ? &bat->bat_cli_list :
+ &test->tes_src_grp->grp_ndl_list;
+ srvlst = !test ? &bat->bat_srv_list :
+ &test->tes_dst_grp->grp_ndl_list;
- if (dents_up != NULL) {
+ if (dents_up) {
rc = lstcon_nodes_getent((server ? srvlst : clilst),
index_p, ndent_p, dents_up);
return rc;
@@ -954,17 +971,16 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
/* non-verbose query */
LIBCFS_ALLOC(entp, sizeof(lstcon_test_batch_ent_t));
- if (entp == NULL)
+ if (!entp)
return -ENOMEM;
- if (test == NULL) {
+ if (!test) {
entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
entp->u.tbe_batch.bae_state = bat->bat_state;
} else {
-
- entp->u.tbe_test.tse_type = test->tes_type;
- entp->u.tbe_test.tse_loop = test->tes_loop;
+ entp->u.tbe_test.tse_type = test->tes_type;
+ entp->u.tbe_test.tse_loop = test->tes_loop;
entp->u.tbe_test.tse_concur = test->tes_concur;
}
@@ -975,7 +991,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server,
LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle);
rc = copy_to_user(ent_up, entp,
- sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
+ sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0;
LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t));
@@ -1006,7 +1022,7 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
static int
lstcon_batch_op(lstcon_batch_t *bat, int transop,
- struct list_head *result_up)
+ struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
int rc;
@@ -1014,7 +1030,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
&bat->bat_trans_list, transop,
bat, lstcon_batrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
@@ -1029,12 +1045,12 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
}
int
-lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
+lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
{
lstcon_batch_t *bat;
int rc;
- if (lstcon_batch_find(name, &bat) != 0) {
+ if (lstcon_batch_find(name, &bat)) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return -ENOENT;
}
@@ -1044,19 +1060,19 @@ lstcon_batch_run(char *name, int timeout, struct list_head *result_up)
rc = lstcon_batch_op(bat, LST_TRANS_TSBRUN, result_up);
/* mark batch as running if it's started in any node */
- if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0) != 0)
+ if (lstcon_tsbop_stat_success(lstcon_trans_stat(), 0))
bat->bat_state = LST_BATCH_RUNNING;
return rc;
}
int
-lstcon_batch_stop(char *name, int force, struct list_head *result_up)
+lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
{
lstcon_batch_t *bat;
int rc;
- if (lstcon_batch_find(name, &bat) != 0) {
+ if (lstcon_batch_find(name, &bat)) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return -ENOENT;
}
@@ -1066,7 +1082,7 @@ lstcon_batch_stop(char *name, int force, struct list_head *result_up)
rc = lstcon_batch_op(bat, LST_TRANS_TSBSTOP, result_up);
/* mark batch as stopped if all RPCs finished */
- if (lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0) == 0)
+ if (!lstcon_tsbop_stat_failure(lstcon_trans_stat(), 0))
bat->bat_state = LST_BATCH_IDLE;
return rc;
@@ -1083,7 +1099,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_test_list)) {
test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
+ lstcon_test_t, tes_link);
LASSERT(list_empty(&test->tes_trans_list));
list_del(&test->tes_link);
@@ -1099,7 +1115,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_cli_list)) {
ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
+ lstcon_ndlink_t, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1107,7 +1123,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_srv_list)) {
ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
+ lstcon_ndlink_t, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1135,10 +1151,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
struct list_head *head;
test = (lstcon_test_t *)arg;
- LASSERT(test != NULL);
+ LASSERT(test);
batch = test->tes_batch;
- LASSERT(batch != NULL);
+ LASSERT(batch);
if (test->tes_oneside &&
transop == LST_TRANS_TSBSRVADD)
@@ -1160,7 +1176,7 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
LASSERT(nd->nd_id.nid != LNET_NID_ANY);
- if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1) != 0)
+ if (lstcon_ndlink_find(hash, nd->nd_id, &ndl, 1))
return -ENOMEM;
if (list_empty(&ndl->ndl_link))
@@ -1170,31 +1186,31 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up)
+lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
int transop;
int rc;
- LASSERT(test->tes_src_grp != NULL);
- LASSERT(test->tes_dst_grp != NULL);
+ LASSERT(test->tes_src_grp);
+ LASSERT(test->tes_dst_grp);
transop = LST_TRANS_TSBSRVADD;
- grp = test->tes_dst_grp;
+ grp = test->tes_dst_grp;
again:
rc = lstcon_rpc_trans_ndlist(&grp->grp_ndl_list,
&test->tes_trans_list, transop,
test, lstcon_testrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
lstcon_rpc_trans_postwait(trans, LST_TRANS_TIMEOUT);
- if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
- lstcon_trans_stat()->trs_fwk_errno != 0) {
+ if (lstcon_trans_stat()->trs_rpc_errno ||
+ lstcon_trans_stat()->trs_fwk_errno) {
lstcon_rpc_trans_interpreter(trans, result_up, NULL);
lstcon_rpc_trans_destroy(trans);
@@ -1226,7 +1242,7 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
int rc;
rc = lstcon_batch_find(name, batch);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find batch %s\n", name);
return rc;
}
@@ -1243,10 +1259,10 @@ static int
lstcon_verify_group(const char *name, lstcon_group_t **grp)
{
int rc;
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
rc = lstcon_group_find(name, grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "can't find group %s\n", name);
return rc;
}
@@ -1266,13 +1282,13 @@ lstcon_test_add(char *batch_name, int type, int loop,
int concur, int dist, int span,
char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
- struct list_head *result_up)
+ struct list_head __user *result_up)
{
- lstcon_test_t *test = NULL;
- int rc;
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_batch_t *batch = NULL;
+ lstcon_test_t *test = NULL;
+ int rc;
+ lstcon_group_t *src_grp = NULL;
+ lstcon_group_t *dst_grp = NULL;
+ lstcon_batch_t *batch = NULL;
/*
* verify that a batch of the given name exists, and the groups
@@ -1280,15 +1296,15 @@ lstcon_test_add(char *batch_name, int type, int loop,
* active node
*/
rc = lstcon_verify_batch(batch_name, &batch);
- if (rc != 0)
+ if (rc)
goto out;
rc = lstcon_verify_group(src_name, &src_grp);
- if (rc != 0)
+ if (rc)
goto out;
rc = lstcon_verify_group(dst_name, &dst_grp);
- if (rc != 0)
+ if (rc)
goto out;
if (dst_grp->grp_userland)
@@ -1302,32 +1318,32 @@ lstcon_test_add(char *batch_name, int type, int loop,
goto out;
}
- test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
- test->tes_batch = batch;
- test->tes_type = type;
- test->tes_oneside = 0; /* TODO */
- test->tes_loop = loop;
- test->tes_concur = concur;
- test->tes_stop_onerr = 1; /* TODO */
- test->tes_span = span;
- test->tes_dist = dist;
- test->tes_cliidx = 0; /* just used for creating RPC */
- test->tes_src_grp = src_grp;
- test->tes_dst_grp = dst_grp;
+ test->tes_hdr.tsb_id = batch->bat_hdr.tsb_id;
+ test->tes_batch = batch;
+ test->tes_type = type;
+ test->tes_oneside = 0; /* TODO */
+ test->tes_loop = loop;
+ test->tes_concur = concur;
+ test->tes_stop_onerr = 1; /* TODO */
+ test->tes_span = span;
+ test->tes_dist = dist;
+ test->tes_cliidx = 0; /* just used for creating RPC */
+ test->tes_src_grp = src_grp;
+ test->tes_dst_grp = dst_grp;
INIT_LIST_HEAD(&test->tes_trans_list);
- if (param != NULL) {
+ if (param) {
test->tes_paramlen = paramlen;
memcpy(&test->tes_param[0], param, paramlen);
}
rc = lstcon_test_nodes_add(test, result_up);
- if (rc != 0)
+ if (rc)
goto out;
- if (lstcon_trans_stat()->trs_rpc_errno != 0 ||
- lstcon_trans_stat()->trs_fwk_errno != 0)
+ if (lstcon_trans_stat()->trs_rpc_errno ||
+ lstcon_trans_stat()->trs_fwk_errno)
CDEBUG(D_NET, "Failed to add test %d to batch %s\n", type,
batch_name);
@@ -1340,13 +1356,13 @@ lstcon_test_add(char *batch_name, int type, int loop,
/* hold groups so nobody can change them */
return rc;
out:
- if (test != NULL)
+ if (test)
LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
- if (dst_grp != NULL)
+ if (dst_grp)
lstcon_group_decref(dst_grp);
- if (src_grp != NULL)
+ if (src_grp)
lstcon_group_decref(src_grp);
return rc;
@@ -1369,16 +1385,16 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
static int
lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
+ lstcon_rpc_ent_t __user *ent_up)
{
srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
LASSERT(transop == LST_TRANS_TSBCLIQRY ||
- transop == LST_TRANS_TSBSRVQRY);
+ transop == LST_TRANS_TSBSRVQRY);
/* positive errno, framework error code */
- if (copy_to_user(&ent_up->rpe_priv[0],
- &rep->bar_active, sizeof(rep->bar_active)))
+ if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active,
+ sizeof(rep->bar_active)))
return -EFAULT;
return 0;
@@ -1386,7 +1402,7 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
int
lstcon_test_batch_query(char *name, int testidx, int client,
- int timeout, struct list_head *result_up)
+ int timeout, struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
struct list_head *translist;
@@ -1398,43 +1414,43 @@ lstcon_test_batch_query(char *name, int testidx, int client,
int rc;
rc = lstcon_batch_find(name, &batch);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find batch: %s\n", name);
return rc;
}
- if (testidx == 0) {
+ if (!testidx) {
translist = &batch->bat_trans_list;
- ndlist = &batch->bat_cli_list;
- hdr = &batch->bat_hdr;
+ ndlist = &batch->bat_cli_list;
+ hdr = &batch->bat_hdr;
} else {
/* query specified test only */
rc = lstcon_test_find(batch, testidx, &test);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find test: %d\n", testidx);
return rc;
}
translist = &test->tes_trans_list;
- ndlist = &test->tes_src_grp->grp_ndl_list;
- hdr = &test->tes_hdr;
+ ndlist = &test->tes_src_grp->grp_ndl_list;
+ hdr = &test->tes_hdr;
}
transop = client ? LST_TRANS_TSBCLIQRY : LST_TRANS_TSBSRVQRY;
rc = lstcon_rpc_trans_ndlist(ndlist, translist, transop, hdr,
lstcon_batrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
lstcon_rpc_trans_postwait(trans, timeout);
- if (testidx == 0 && /* query a batch, not a test */
- lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) == 0 &&
- lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0) == 0) {
+ if (!testidx && /* query a batch, not a test */
+ !lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
+ !lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
/* all RPCs finished, and no active test */
batch->bat_state = LST_BATCH_IDLE;
}
@@ -1448,19 +1464,19 @@ lstcon_test_batch_query(char *name, int testidx, int client,
static int
lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
- lstcon_rpc_ent_t *ent_up)
+ lstcon_rpc_ent_t __user *ent_up)
{
srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
- sfw_counters_t *sfwk_stat;
- srpc_counters_t *srpc_stat;
- lnet_counters_t *lnet_stat;
+ sfw_counters_t __user *sfwk_stat;
+ srpc_counters_t __user *srpc_stat;
+ lnet_counters_t __user *lnet_stat;
- if (rep->str_status != 0)
+ if (rep->str_status)
return 0;
- sfwk_stat = (sfw_counters_t *)&ent_up->rpe_payload[0];
- srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat));
- lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat));
+ sfwk_stat = (sfw_counters_t __user *)&ent_up->rpe_payload[0];
+ srpc_stat = (srpc_counters_t __user *)(sfwk_stat + 1);
+ lnet_stat = (lnet_counters_t __user *)(srpc_stat + 1);
if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) ||
copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) ||
@@ -1472,7 +1488,7 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
static int
lstcon_ndlist_stat(struct list_head *ndlist,
- int timeout, struct list_head *result_up)
+ int timeout, struct list_head __user *result_up)
{
struct list_head head;
lstcon_rpc_trans_t *trans;
@@ -1482,7 +1498,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
rc = lstcon_rpc_trans_ndlist(ndlist, &head,
LST_TRANS_STATQRY, NULL, NULL, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
@@ -1497,13 +1513,14 @@ lstcon_ndlist_stat(struct list_head *ndlist,
}
int
-lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
+lstcon_group_stat(char *grp_name, int timeout,
+ struct list_head __user *result_up)
{
lstcon_group_t *grp;
int rc;
rc = lstcon_group_find(grp_name, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Can't find group %s\n", grp_name);
return rc;
}
@@ -1516,17 +1533,17 @@ lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up)
}
int
-lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up)
+lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+ int timeout, struct list_head __user *result_up)
{
- lstcon_ndlink_t *ndl;
+ lstcon_ndlink_t *ndl;
lstcon_group_t *tmp;
lnet_process_id_t id;
int i;
int rc;
rc = lstcon_group_alloc(NULL, &tmp);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
return -ENOMEM;
}
@@ -1539,7 +1556,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
/* add to tmp group */
rc = lstcon_group_ndlink_find(tmp, id, &ndl, 2);
- if (rc != 0) {
+ if (rc) {
CDEBUG((rc == -ENOMEM) ? D_ERROR : D_NET,
"Failed to find or create %s: %d\n",
libcfs_id2str(id), rc);
@@ -1547,7 +1564,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
}
}
- if (rc != 0) {
+ if (rc) {
lstcon_group_decref(tmp);
return rc;
}
@@ -1562,14 +1579,14 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
static int
lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
- int timeout, struct list_head *result_up)
+ int timeout, struct list_head __user *result_up)
{
lstcon_rpc_trans_t *trans;
- int rc;
+ int rc;
rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
NULL, lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
@@ -1584,7 +1601,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
}
int
-lstcon_session_debug(int timeout, struct list_head *result_up)
+lstcon_session_debug(int timeout, struct list_head __user *result_up)
{
return lstcon_debug_ndlist(&console_session.ses_ndl_list,
NULL, timeout, result_up);
@@ -1592,13 +1609,13 @@ lstcon_session_debug(int timeout, struct list_head *result_up)
int
lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up)
+ int client, struct list_head __user *result_up)
{
lstcon_batch_t *bat;
int rc;
rc = lstcon_batch_find(name, &bat);
- if (rc != 0)
+ if (rc)
return -ENOENT;
rc = lstcon_debug_ndlist(client ? &bat->bat_cli_list :
@@ -1610,13 +1627,13 @@ lstcon_batch_debug(int timeout, char *name,
int
lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up)
+ struct list_head __user *result_up)
{
lstcon_group_t *grp;
int rc;
rc = lstcon_group_find(name, &grp);
- if (rc != 0)
+ if (rc)
return -ENOENT;
rc = lstcon_debug_ndlist(&grp->grp_ndl_list, NULL,
@@ -1628,8 +1645,8 @@ lstcon_group_debug(int timeout, char *name,
int
lstcon_nodes_debug(int timeout,
- int count, lnet_process_id_t *ids_up,
- struct list_head *result_up)
+ int count, lnet_process_id_t __user *ids_up,
+ struct list_head __user *result_up)
{
lnet_process_id_t id;
lstcon_ndlink_t *ndl;
@@ -1638,7 +1655,7 @@ lstcon_nodes_debug(int timeout,
int rc;
rc = lstcon_group_alloc(NULL, &grp);
- if (rc != 0) {
+ if (rc) {
CDEBUG(D_NET, "Out of memory\n");
return rc;
}
@@ -1651,13 +1668,13 @@ lstcon_nodes_debug(int timeout,
/* node is added to tmp group */
rc = lstcon_group_ndlink_find(grp, id, &ndl, 1);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create node link\n");
break;
}
}
- if (rc != 0) {
+ if (rc) {
lstcon_group_decref(grp);
return rc;
}
@@ -1673,8 +1690,8 @@ lstcon_nodes_debug(int timeout,
int
lstcon_session_match(lst_sid_t sid)
{
- return (console_session.ses_id.ses_nid == sid.ses_nid &&
- console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0;
+ return (console_session.ses_id.ses_nid == sid.ses_nid &&
+ console_session.ses_id.ses_stamp == sid.ses_stamp) ? 1 : 0;
}
static void
@@ -1685,15 +1702,13 @@ lstcon_new_session_id(lst_sid_t *sid)
LASSERT(console_session.ses_state == LST_SESSION_NONE);
LNetGetId(1, &id);
- sid->ses_nid = id.nid;
+ sid->ses_nid = id.nid;
sid->ses_stamp = cfs_time_current();
}
-extern srpc_service_t lstcon_acceptor_service;
-
int
lstcon_session_new(char *name, int key, unsigned feats,
- int timeout, int force, lst_sid_t *sid_up)
+ int timeout, int force, lst_sid_t __user *sid_up)
{
int rc = 0;
int i;
@@ -1709,11 +1724,11 @@ lstcon_session_new(char *name, int key, unsigned feats,
rc = lstcon_session_end();
/* lstcon_session_end() only return local error */
- if (rc != 0)
+ if (rc)
return rc;
}
- if ((feats & ~LST_FEATS_MASK) != 0) {
+ if (feats & ~LST_FEATS_MASK) {
CNETERR("Unknown session features %x\n",
(feats & ~LST_FEATS_MASK));
return -EINVAL;
@@ -1731,15 +1746,18 @@ lstcon_session_new(char *name, int key, unsigned feats,
console_session.ses_feats_updated = 0;
console_session.ses_timeout = (timeout <= 0) ?
LST_CONSOLE_TIMEOUT : timeout;
- strlcpy(console_session.ses_name, name,
+
+ if (strlen(name) > sizeof(console_session.ses_name) - 1)
+ return -E2BIG;
+ strncpy(console_session.ses_name, name,
sizeof(console_session.ses_name));
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
- if (rc != 0)
+ if (rc)
return rc;
rc = lstcon_rpc_pinger_start();
- if (rc != 0) {
+ if (rc) {
lstcon_batch_t *bat = NULL;
lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
@@ -1748,8 +1766,8 @@ lstcon_session_new(char *name, int key, unsigned feats,
return rc;
}
- if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(lst_sid_t)) == 0)
+ if (!copy_to_user(sid_up, &console_session.ses_id,
+ sizeof(lst_sid_t)))
return rc;
lstcon_session_end();
@@ -1758,8 +1776,10 @@ lstcon_session_new(char *name, int key, unsigned feats,
}
int
-lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
- lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len)
+lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
+ unsigned __user *featp,
+ lstcon_ndlist_ent_t __user *ndinfo_up,
+ char __user *name_up, int len)
{
lstcon_ndlist_ent_t *entp;
lstcon_ndlink_t *ndl;
@@ -1769,18 +1789,18 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
return -ESRCH;
LIBCFS_ALLOC(entp, sizeof(*entp));
- if (entp == NULL)
+ if (!entp)
return -ENOMEM;
list_for_each_entry(ndl, &console_session.ses_ndl_list, ndl_link)
LST_NODE_STATE_COUNTER(ndl->ndl_node, entp);
if (copy_to_user(sid_up, &console_session.ses_id,
- sizeof(lst_sid_t)) ||
+ sizeof(lst_sid_t)) ||
copy_to_user(key_up, &console_session.ses_key,
- sizeof(*key_up)) ||
+ sizeof(*key_up)) ||
copy_to_user(featp, &console_session.ses_features,
- sizeof(*featp)) ||
+ sizeof(*featp)) ||
copy_to_user(ndinfo_up, entp, sizeof(*entp)) ||
copy_to_user(name_up, console_session.ses_name, len))
rc = -EFAULT;
@@ -1803,7 +1823,7 @@ lstcon_session_end(void)
rc = lstcon_rpc_trans_ndlist(&console_session.ses_ndl_list,
NULL, LST_TRANS_SESEND, NULL,
lstcon_sesrpc_condition, &trans);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create transaction: %d\n", rc);
return rc;
}
@@ -1820,16 +1840,16 @@ lstcon_session_end(void)
/* waiting for orphan rpcs to die */
lstcon_rpc_cleanup_wait();
- console_session.ses_id = LST_INVALID_SID;
+ console_session.ses_id = LST_INVALID_SID;
console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_key = 0;
+ console_session.ses_key = 0;
console_session.ses_force = 0;
console_session.ses_feats_updated = 0;
/* destroy all batches */
while (!list_empty(&console_session.ses_bat_list)) {
bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ lstcon_batch_t, bat_link);
lstcon_batch_destroy(bat);
}
@@ -1837,7 +1857,7 @@ lstcon_session_end(void)
/* destroy all groups */
while (!list_empty(&console_session.ses_grp_list)) {
grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
+ lstcon_group_t, grp_link);
LASSERT(grp->grp_ref == 1);
lstcon_group_decref(grp);
@@ -1847,7 +1867,7 @@ lstcon_session_end(void)
LASSERT(list_empty(&console_session.ses_ndl_list));
console_session.ses_shutdown = 0;
- console_session.ses_expired = 0;
+ console_session.ses_expired = 0;
return rc;
}
@@ -1857,7 +1877,7 @@ lstcon_session_feats_check(unsigned feats)
{
int rc = 0;
- if ((feats & ~LST_FEATS_MASK) != 0) {
+ if (feats & ~LST_FEATS_MASK) {
CERROR("Can't support these features: %x\n",
(feats & ~LST_FEATS_MASK));
return -EPROTO;
@@ -1875,7 +1895,7 @@ lstcon_session_feats_check(unsigned feats)
spin_unlock(&console_session.ses_rpc_lock);
- if (rc != 0) {
+ if (rc) {
CERROR("remote features %x do not match with session features %x of console\n",
feats, console_session.ses_features);
}
@@ -1886,13 +1906,13 @@ lstcon_session_feats_check(unsigned feats)
static int
lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
{
- srpc_msg_t *rep = &rpc->srpc_replymsg;
- srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
+ srpc_msg_t *rep = &rpc->srpc_replymsg;
+ srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
- lstcon_group_t *grp = NULL;
+ lstcon_group_t *grp = NULL;
lstcon_ndlink_t *ndl;
- int rc = 0;
+ int rc = 0;
sfw_unpack_message(req);
@@ -1905,26 +1925,26 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
goto out;
}
- if (lstcon_session_feats_check(req->msg_ses_feats) != 0) {
+ if (lstcon_session_feats_check(req->msg_ses_feats)) {
jrep->join_status = EPROTO;
goto out;
}
if (jreq->join_sid.ses_nid != LNET_NID_ANY &&
- !lstcon_session_match(jreq->join_sid)) {
+ !lstcon_session_match(jreq->join_sid)) {
jrep->join_status = EBUSY;
goto out;
}
- if (lstcon_group_find(jreq->join_group, &grp) != 0) {
+ if (lstcon_group_find(jreq->join_group, &grp)) {
rc = lstcon_group_alloc(jreq->join_group, &grp);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
goto out;
}
list_add_tail(&grp->grp_link,
- &console_session.ses_grp_list);
+ &console_session.ses_grp_list);
lstcon_group_addref(grp);
}
@@ -1935,31 +1955,31 @@ lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
}
rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 0);
- if (rc == 0) {
+ if (!rc) {
jrep->join_status = EEXIST;
goto out;
}
rc = lstcon_group_ndlink_find(grp, rpc->srpc_peer, &ndl, 1);
- if (rc != 0) {
+ if (rc) {
CERROR("Out of memory\n");
goto out;
}
- ndl->ndl_node->nd_state = LST_NODE_ACTIVE;
+ ndl->ndl_node->nd_state = LST_NODE_ACTIVE;
ndl->ndl_node->nd_timeout = console_session.ses_timeout;
- if (grp->grp_userland == 0)
+ if (!grp->grp_userland)
grp->grp_userland = 1;
strlcpy(jrep->join_session, console_session.ses_name,
sizeof(jrep->join_session));
jrep->join_timeout = console_session.ses_timeout;
- jrep->join_status = 0;
+ jrep->join_status = 0;
out:
rep->msg_ses_feats = console_session.ses_features;
- if (grp != NULL)
+ if (grp)
lstcon_group_decref(grp);
mutex_unlock(&console_session.ses_mutex);
@@ -1967,17 +1987,17 @@ out:
return rc;
}
-srpc_service_t lstcon_acceptor_service;
+static srpc_service_t lstcon_acceptor_service;
static void lstcon_init_acceptor_service(void)
{
/* initialize selftest console acceptor service table */
- lstcon_acceptor_service.sv_name = "join session";
- lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
- lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
+ lstcon_acceptor_service.sv_name = "join session";
+ lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle;
+ lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN;
lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX;
}
-extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
+extern int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry);
@@ -1988,16 +2008,16 @@ lstcon_console_init(void)
int i;
int rc;
- memset(&console_session, 0, sizeof(lstcon_session_t));
+ memset(&console_session, 0, sizeof(struct lstcon_session));
- console_session.ses_id = LST_INVALID_SID;
- console_session.ses_state = LST_SESSION_NONE;
- console_session.ses_timeout = 0;
- console_session.ses_force = 0;
- console_session.ses_expired = 0;
+ console_session.ses_id = LST_INVALID_SID;
+ console_session.ses_state = LST_SESSION_NONE;
+ console_session.ses_timeout = 0;
+ console_session.ses_force = 0;
+ console_session.ses_expired = 0;
console_session.ses_feats_updated = 0;
- console_session.ses_features = LST_FEATS_MASK;
- console_session.ses_laststamp = ktime_get_real_seconds();
+ console_session.ses_features = LST_FEATS_MASK;
+ console_session.ses_laststamp = ktime_get_real_seconds();
mutex_init(&console_session.ses_mutex);
@@ -2008,7 +2028,7 @@ lstcon_console_init(void)
LIBCFS_ALLOC(console_session.ses_ndl_hash,
sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
- if (console_session.ses_ndl_hash == NULL)
+ if (!console_session.ses_ndl_hash)
return -ENOMEM;
for (i = 0; i < LST_GLOBAL_HASHSIZE; i++)
@@ -2019,7 +2039,7 @@ lstcon_console_init(void)
rc = srpc_add_service(&lstcon_acceptor_service);
LASSERT(rc != -EBUSY);
- if (rc != 0) {
+ if (rc) {
LIBCFS_FREE(console_session.ses_ndl_hash,
sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
return rc;
@@ -2027,14 +2047,14 @@ lstcon_console_init(void)
rc = srpc_service_add_buffers(&lstcon_acceptor_service,
lstcon_acceptor_service.sv_wi_total);
- if (rc != 0) {
+ if (rc) {
rc = -ENOMEM;
goto out;
}
rc = libcfs_register_ioctl(&lstcon_ioctl_handler);
- if (rc == 0) {
+ if (!rc) {
lstcon_rpc_module_init();
return 0;
}
@@ -2075,9 +2095,8 @@ lstcon_console_fini(void)
LASSERT(list_empty(&console_session.ses_bat_list));
LASSERT(list_empty(&console_session.ses_trans_list));
- for (i = 0; i < LST_NODE_HASHSIZE; i++) {
+ for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&console_session.ses_ndl_hash[i]));
- }
LIBCFS_FREE(console_session.ses_ndl_hash,
sizeof(struct list_head) * LST_GLOBAL_HASHSIZE);
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 3f3286c0c7bf..554f582441f1 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -52,79 +52,79 @@
typedef struct lstcon_node {
lnet_process_id_t nd_id; /* id of the node */
- int nd_ref; /* reference count */
- int nd_state; /* state of the node */
- int nd_timeout; /* session timeout */
- unsigned long nd_stamp; /* timestamp of last replied RPC */
+ int nd_ref; /* reference count */
+ int nd_state; /* state of the node */
+ int nd_timeout; /* session timeout */
+ unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
} lstcon_node_t; /* node descriptor */
typedef struct {
struct list_head ndl_link; /* chain on list */
struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
+ lstcon_node_t *ndl_node; /* pointer to node */
} lstcon_ndlink_t; /* node link descriptor */
typedef struct {
- struct list_head grp_link; /* chain on global group list
+ struct list_head grp_link; /* chain on global group list
*/
- int grp_ref; /* reference count */
- int grp_userland; /* has userland nodes */
- int grp_nnode; /* # of nodes */
- char grp_name[LST_NAME_SIZE]; /* group name */
-
- struct list_head grp_trans_list; /* transaction list */
- struct list_head grp_ndl_list; /* nodes list */
- struct list_head grp_ndl_hash[0]; /* hash table for nodes */
+ int grp_ref; /* reference count */
+ int grp_userland; /* has userland nodes */
+ int grp_nnode; /* # of nodes */
+ char grp_name[LST_NAME_SIZE]; /* group name */
+
+ struct list_head grp_trans_list; /* transaction list */
+ struct list_head grp_ndl_list; /* nodes list */
+ struct list_head grp_ndl_hash[0]; /* hash table for nodes */
} lstcon_group_t; /* (alias of nodes) group descriptor */
-#define LST_BATCH_IDLE 0xB0 /* idle batch */
+#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
typedef struct lstcon_tsb_hdr {
- lst_bid_t tsb_id; /* batch ID */
- int tsb_index; /* test index */
+ lst_bid_t tsb_id; /* batch ID */
+ int tsb_index; /* test index */
} lstcon_tsb_hdr_t;
typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
- struct list_head bat_link; /* chain on session's batches list */
- int bat_ntest; /* # of test */
- int bat_state; /* state of the batch */
- int bat_arg; /* parameter for run|stop, timeout
+ lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
+ struct list_head bat_link; /* chain on session's batches list */
+ int bat_ntest; /* # of test */
+ int bat_state; /* state of the batch */
+ int bat_arg; /* parameter for run|stop, timeout
* for run, force for stop */
- char bat_name[LST_NAME_SIZE];/* name of batch */
+ char bat_name[LST_NAME_SIZE];/* name of batch */
struct list_head bat_test_list; /* list head of tests (lstcon_test_t)
*/
struct list_head bat_trans_list; /* list head of transaction */
- struct list_head bat_cli_list; /* list head of client nodes
+ struct list_head bat_cli_list; /* list head of client nodes
* (lstcon_node_t) */
struct list_head *bat_cli_hash; /* hash table of client nodes */
- struct list_head bat_srv_list; /* list head of server nodes */
+ struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
} lstcon_batch_t; /* (tests ) batch descriptor */
typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
- struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
-
- int tes_type; /* type of the test, i.e: bulk, ping */
- int tes_stop_onerr; /* stop on error */
- int tes_oneside; /* one-sided test */
- int tes_concur; /* concurrency */
- int tes_loop; /* loop count */
- int tes_dist; /* nodes distribution of target group */
- int tes_span; /* nodes span of target group */
- int tes_cliidx; /* client index, used for RPC creating */
+ lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+ struct list_head tes_link; /* chain on batch's tests list */
+ lstcon_batch_t *tes_batch; /* pointer to batch */
+
+ int tes_type; /* type of the test, i.e: bulk, ping */
+ int tes_stop_onerr; /* stop on error */
+ int tes_oneside; /* one-sided test */
+ int tes_concur; /* concurrency */
+ int tes_loop; /* loop count */
+ int tes_dist; /* nodes distribution of target group */
+ int tes_span; /* nodes span of target group */
+ int tes_cliidx; /* client index, used for RPC creating */
struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
+ lstcon_group_t *tes_src_grp; /* group run the test */
+ lstcon_group_t *tes_dst_grp; /* target group */
- int tes_paramlen; /* test parameter length */
- char tes_param[0]; /* test parameter */
+ int tes_paramlen; /* test parameter length */
+ char tes_param[0]; /* test parameter */
} lstcon_test_t; /* a single test descriptor */
#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
@@ -135,25 +135,25 @@ typedef struct lstcon_test {
#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
-typedef struct {
- struct mutex ses_mutex; /* only 1 thread in session */
- lst_sid_t ses_id; /* global session id */
- int ses_key; /* local session key */
- int ses_state; /* state of session */
- int ses_timeout; /* timeout in seconds */
- time64_t ses_laststamp; /* last operation stamp (seconds)
+struct lstcon_session {
+ struct mutex ses_mutex; /* only 1 thread in session */
+ lst_sid_t ses_id; /* global session id */
+ int ses_key; /* local session key */
+ int ses_state; /* state of session */
+ int ses_timeout; /* timeout in seconds */
+ time64_t ses_laststamp; /* last operation stamp (seconds)
*/
- unsigned ses_features; /* tests features of the session
+ unsigned ses_features; /* tests features of the session
*/
- unsigned ses_feats_updated:1; /* features are synced with
+ unsigned ses_feats_updated:1; /* features are synced with
* remote test nodes */
- unsigned ses_force:1; /* force creating */
- unsigned ses_shutdown:1; /* session is shutting down */
- unsigned ses_expired:1; /* console is timedout */
- __u64 ses_id_cookie; /* batch id cookie */
- char ses_name[LST_NAME_SIZE];/* session name */
- lstcon_rpc_trans_t *ses_ping; /* session pinger */
- stt_timer_t ses_ping_timer; /* timer for pinger */
+ unsigned ses_force:1; /* force creating */
+ unsigned ses_shutdown:1; /* session is shutting down */
+ unsigned ses_expired:1; /* console is timedout */
+ __u64 ses_id_cookie; /* batch id cookie */
+ char ses_name[LST_NAME_SIZE];/* session name */
+ lstcon_rpc_trans_t *ses_ping; /* session pinger */
+ struct stt_timer ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
struct list_head ses_trans_list; /* global list of transaction */
@@ -162,12 +162,12 @@ typedef struct {
struct list_head ses_ndl_list; /* global list of nodes */
struct list_head *ses_ndl_hash; /* hash table of nodes */
- spinlock_t ses_rpc_lock; /* serialize */
- atomic_t ses_rpc_counter; /* # of initialized RPCs */
+ spinlock_t ses_rpc_lock; /* serialize */
+ atomic_t ses_rpc_counter; /* # of initialized RPCs */
struct list_head ses_rpc_freelist; /* idle console rpc */
-} lstcon_session_t; /* session descriptor */
+}; /* session descriptor */
-extern lstcon_session_t console_session;
+extern struct lstcon_session console_session;
static inline lstcon_trans_stat_t *
lstcon_trans_stat(void)
@@ -176,7 +176,7 @@ lstcon_trans_stat(void)
}
static inline struct list_head *
-lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
+lstcon_id2hash(lnet_process_id_t id, struct list_head *hash)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
@@ -184,51 +184,54 @@ lstcon_id2hash (lnet_process_id_t id, struct list_head *hash)
}
int lstcon_console_init(void);
-int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
int lstcon_console_fini(void);
int lstcon_session_match(lst_sid_t sid);
int lstcon_session_new(char *name, int key, unsigned version,
- int timeout, int flags, lst_sid_t *sid_up);
-int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
- lstcon_ndlist_ent_t *entp, char *name_up, int len);
+ int timeout, int flags, lst_sid_t __user *sid_up);
+int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
+ unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
+ char __user *name_up, int len);
int lstcon_session_end(void);
-int lstcon_session_debug(int timeout, struct list_head *result_up);
+int lstcon_session_debug(int timeout, struct list_head __user *result_up);
int lstcon_session_feats_check(unsigned feats);
int lstcon_batch_debug(int timeout, char *name,
- int client, struct list_head *result_up);
+ int client, struct list_head __user *result_up);
int lstcon_group_debug(int timeout, char *name,
- struct list_head *result_up);
-int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
+ struct list_head __user *result_up);
+int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t __user *nds_up,
+ struct list_head __user *result_up);
int lstcon_group_add(char *name);
int lstcon_group_del(char *name);
int lstcon_group_clean(char *name, int args);
-int lstcon_group_refresh(char *name, struct list_head *result_up);
-int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
- unsigned *featp, struct list_head *result_up);
-int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
- struct list_head *result_up);
-int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
- int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
-int lstcon_group_list(int idx, int len, char *name_up);
+int lstcon_group_refresh(char *name, struct list_head __user *result_up);
+int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
+ unsigned *featp, struct list_head __user *result_up);
+int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
+ struct list_head __user *result_up);
+int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
+ int *index_p, int *ndent_p,
+ lstcon_node_ent_t __user *ndents_up);
+int lstcon_group_list(int idx, int len, char __user *name_up);
int lstcon_batch_add(char *name);
-int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
-int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+int lstcon_batch_run(char *name, int timeout,
+ struct list_head __user *result_up);
+int lstcon_batch_stop(char *name, int force,
+ struct list_head __user *result_up);
int lstcon_test_batch_query(char *name, int testidx,
int client, int timeout,
- struct list_head *result_up);
+ struct list_head __user *result_up);
int lstcon_batch_del(char *name);
-int lstcon_batch_list(int idx, int namelen, char *name_up);
-int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
+int lstcon_batch_list(int idx, int namelen, char __user *name_up);
+int lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
int server, int testidx, int *index_p,
- int *ndent_p, lstcon_node_ent_t *dents_up);
+ int *ndent_p, lstcon_node_ent_t __user *dents_up);
int lstcon_group_stat(char *grp_name, int timeout,
- struct list_head *result_up);
-int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
- int timeout, struct list_head *result_up);
+ struct list_head __user *result_up);
+int lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
+ int timeout, struct list_head __user *result_up);
int lstcon_test_add(char *batch_name, int type, int loop,
int concur, int dist, int span,
char *src_name, char *dst_name,
void *param, int paramlen, int *retp,
- struct list_head *result_up);
+ struct list_head __user *result_up);
#endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 1a2da7430190..e2c532399366 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -53,64 +53,64 @@ static int rpc_timeout = 64;
module_param(rpc_timeout, int, 0644);
MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
-#define sfw_unpack_id(id) \
-do { \
- __swab64s(&(id).nid); \
- __swab32s(&(id).pid); \
+#define sfw_unpack_id(id) \
+do { \
+ __swab64s(&(id).nid); \
+ __swab32s(&(id).pid); \
} while (0)
-#define sfw_unpack_sid(sid) \
-do { \
- __swab64s(&(sid).ses_nid); \
- __swab64s(&(sid).ses_stamp); \
+#define sfw_unpack_sid(sid) \
+do { \
+ __swab64s(&(sid).ses_nid); \
+ __swab64s(&(sid).ses_stamp); \
} while (0)
-#define sfw_unpack_fw_counters(fc) \
-do { \
- __swab32s(&(fc).running_ms); \
+#define sfw_unpack_fw_counters(fc) \
+do { \
+ __swab32s(&(fc).running_ms); \
__swab32s(&(fc).active_batches); \
__swab32s(&(fc).zombie_sessions); \
- __swab32s(&(fc).brw_errors); \
- __swab32s(&(fc).ping_errors); \
+ __swab32s(&(fc).brw_errors); \
+ __swab32s(&(fc).ping_errors); \
} while (0)
-#define sfw_unpack_rpc_counters(rc) \
-do { \
+#define sfw_unpack_rpc_counters(rc) \
+do { \
__swab32s(&(rc).errors); \
- __swab32s(&(rc).rpcs_sent); \
- __swab32s(&(rc).rpcs_rcvd); \
- __swab32s(&(rc).rpcs_dropped); \
- __swab32s(&(rc).rpcs_expired); \
- __swab64s(&(rc).bulk_get); \
- __swab64s(&(rc).bulk_put); \
+ __swab32s(&(rc).rpcs_sent); \
+ __swab32s(&(rc).rpcs_rcvd); \
+ __swab32s(&(rc).rpcs_dropped); \
+ __swab32s(&(rc).rpcs_expired); \
+ __swab64s(&(rc).bulk_get); \
+ __swab64s(&(rc).bulk_put); \
} while (0)
-#define sfw_unpack_lnet_counters(lc) \
-do { \
+#define sfw_unpack_lnet_counters(lc) \
+do { \
__swab32s(&(lc).errors); \
- __swab32s(&(lc).msgs_max); \
- __swab32s(&(lc).msgs_alloc); \
- __swab32s(&(lc).send_count); \
- __swab32s(&(lc).recv_count); \
- __swab32s(&(lc).drop_count); \
- __swab32s(&(lc).route_count); \
- __swab64s(&(lc).send_length); \
- __swab64s(&(lc).recv_length); \
- __swab64s(&(lc).drop_length); \
- __swab64s(&(lc).route_length); \
+ __swab32s(&(lc).msgs_max); \
+ __swab32s(&(lc).msgs_alloc); \
+ __swab32s(&(lc).send_count); \
+ __swab32s(&(lc).recv_count); \
+ __swab32s(&(lc).drop_count); \
+ __swab32s(&(lc).route_count); \
+ __swab64s(&(lc).send_length); \
+ __swab64s(&(lc).recv_length); \
+ __swab64s(&(lc).drop_length); \
+ __swab64s(&(lc).route_length); \
} while (0)
-#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
-#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
+#define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive))
+#define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive))
static struct smoketest_framework {
struct list_head fw_zombie_rpcs; /* RPCs to be recycled */
struct list_head fw_zombie_sessions; /* stopping sessions */
- struct list_head fw_tests; /* registered test cases */
- atomic_t fw_nzombies; /* # zombie sessions */
- spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
+ struct list_head fw_tests; /* registered test cases */
+ atomic_t fw_nzombies; /* # zombie sessions */
+ spinlock_t fw_lock; /* serialise */
+ sfw_session_t *fw_session; /* _the_ session */
+ int fw_shuttingdown; /* shutdown in progress */
struct srpc_server_rpc *fw_active_srpc;/* running RPC */
} sfw_data;
@@ -139,17 +139,17 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
{
sfw_test_case_t *tsc;
- if (sfw_find_test_case(service->sv_id) != NULL) {
+ if (sfw_find_test_case(service->sv_id)) {
CERROR("Failed to register test %s (%d)\n",
- service->sv_name, service->sv_id);
+ service->sv_name, service->sv_id);
return -EEXIST;
}
LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
- if (tsc == NULL)
+ if (!tsc)
return -ENOMEM;
- tsc->tsc_cli_ops = cliops;
+ tsc->tsc_cli_ops = cliops;
tsc->tsc_srv_service = service;
list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
@@ -160,11 +160,11 @@ static void
sfw_add_session_timer(void)
{
sfw_session_t *sn = sfw_data.fw_session;
- stt_timer_t *timer = &sn->sn_timer;
+ struct stt_timer *timer = &sn->sn_timer;
LASSERT(!sfw_data.fw_shuttingdown);
- if (sn == NULL || sn->sn_timeout == 0)
+ if (!sn || !sn->sn_timeout)
return;
LASSERT(!sn->sn_timer_active);
@@ -172,7 +172,6 @@ sfw_add_session_timer(void)
sn->sn_timer_active = 1;
timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout;
stt_add_timer(timer);
- return;
}
static int
@@ -180,10 +179,10 @@ sfw_del_session_timer(void)
{
sfw_session_t *sn = sfw_data.fw_session;
- if (sn == NULL || !sn->sn_timer_active)
+ if (!sn || !sn->sn_timer_active)
return 0;
- LASSERT(sn->sn_timeout != 0);
+ LASSERT(sn->sn_timeout);
if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
sn->sn_timer_active = 0;
@@ -195,14 +194,14 @@ sfw_del_session_timer(void)
static void
sfw_deactivate_session(void)
- __must_hold(&sfw_data.fw_lock)
+__must_hold(&sfw_data.fw_lock)
{
sfw_session_t *sn = sfw_data.fw_session;
int nactive = 0;
sfw_batch_t *tsb;
sfw_test_case_t *tsc;
- if (sn == NULL)
+ if (!sn)
return;
LASSERT(!sn->sn_timer_active);
@@ -226,7 +225,7 @@ sfw_deactivate_session(void)
}
}
- if (nactive != 0)
+ if (nactive)
return; /* wait for active batches to stop */
list_del_init(&sn->sn_list);
@@ -248,8 +247,8 @@ sfw_session_expired(void *data)
LASSERT(sn == sfw_data.fw_session);
CWARN("Session expired! sid: %s-%llu, name: %s\n",
- libcfs_nid2str(sn->sn_id.ses_nid),
- sn->sn_id.ses_stamp, &sn->sn_name[0]);
+ libcfs_nid2str(sn->sn_id.ses_nid),
+ sn->sn_id.ses_stamp, &sn->sn_name[0]);
sn->sn_timer_active = 0;
sfw_deactivate_session();
@@ -261,7 +260,7 @@ static inline void
sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
unsigned features, const char *name)
{
- stt_timer_t *timer = &sn->sn_timer;
+ struct stt_timer *timer = &sn->sn_timer;
memset(sn, 0, sizeof(sfw_session_t));
INIT_LIST_HEAD(&sn->sn_list);
@@ -272,10 +271,10 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
sn->sn_timer_active = 0;
- sn->sn_id = sid;
- sn->sn_features = features;
- sn->sn_timeout = session_timeout;
- sn->sn_started = cfs_time_current();
+ sn->sn_id = sid;
+ sn->sn_features = features;
+ sn->sn_timeout = session_timeout;
+ sn->sn_started = cfs_time_current();
timer->stt_data = sn;
timer->stt_func = sfw_session_expired;
@@ -289,29 +288,26 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int status = rpc->srpc_status;
- CDEBUG(D_NET,
- "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state),
- status);
+ CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+ swi_state2str(rpc->srpc_wi.swi_state),
+ status);
- if (rpc->srpc_bulk != NULL)
+ if (rpc->srpc_bulk)
sfw_free_pages(rpc);
- return;
}
static void
sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
{
- LASSERT(rpc->crpc_bulk.bk_niov == 0);
+ LASSERT(!rpc->crpc_bulk.bk_niov);
LASSERT(list_empty(&rpc->crpc_list));
- LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT(!atomic_read(&rpc->crpc_refcount));
- CDEBUG(D_NET,
- "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state),
- rpc->crpc_aborted, rpc->crpc_status);
+ CDEBUG(D_NET, "Outgoing framework RPC done: service %d, peer %s, status %s:%d:%d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(rpc->crpc_wi.swi_state),
+ rpc->crpc_aborted, rpc->crpc_status);
spin_lock(&sfw_data.fw_lock);
@@ -328,7 +324,7 @@ sfw_find_batch(lst_bid_t bid)
sfw_session_t *sn = sfw_data.fw_session;
sfw_batch_t *bat;
- LASSERT(sn != NULL);
+ LASSERT(sn);
list_for_each_entry(bat, &sn->sn_batches, bat_list) {
if (bat->bat_id.bat_id == bid.bat_id)
@@ -344,19 +340,19 @@ sfw_bid2batch(lst_bid_t bid)
sfw_session_t *sn = sfw_data.fw_session;
sfw_batch_t *bat;
- LASSERT(sn != NULL);
+ LASSERT(sn);
bat = sfw_find_batch(bid);
- if (bat != NULL)
+ if (bat)
return bat;
LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
- if (bat == NULL)
+ if (!bat)
return NULL;
- bat->bat_error = 0;
- bat->bat_session = sn;
- bat->bat_id = bid;
+ bat->bat_error = 0;
+ bat->bat_session = sn;
+ bat->bat_id = bid;
atomic_set(&bat->bat_nactive, 0);
INIT_LIST_HEAD(&bat->bat_tests);
@@ -371,14 +367,14 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
sfw_counters_t *cnt = &reply->str_fw;
sfw_batch_t *bat;
- reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
if (request->str_sid.ses_nid == LNET_NID_ANY) {
reply->str_status = EINVAL;
return 0;
}
- if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
+ if (!sn || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
reply->str_status = ESRCH;
return 0;
}
@@ -386,11 +382,13 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
lnet_counters_get(&reply->str_lnet);
srpc_get_counters(&reply->str_rpc);
- /* send over the msecs since the session was started
- - with 32 bits to send, this is ~49 days */
- cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
- cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
- cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
+ /*
+ * send over the msecs since the session was started
+ * with 32 bits to send, this is ~49 days
+ */
+ cnt->running_ms = jiffies_to_msecs(jiffies - sn->sn_started);
+ cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
+ cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
cnt->active_batches = 0;
@@ -408,18 +406,18 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
srpc_msg_t *msg = container_of(request, srpc_msg_t,
- msg_body.mksn_reqst);
+ msg_body.mksn_reqst);
int cplen = 0;
if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
- reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->mksn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
reply->mksn_status = EINVAL;
return 0;
}
- if (sn != NULL) {
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
+ if (sn) {
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
reply->mksn_timeout = sn->sn_timeout;
if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
@@ -437,21 +435,23 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
}
- /* reject the request if it requires unknown features
+ /*
+ * reject the request if it requires unknown features
* NB: old version will always accept all features because it's not
* aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
* harmless because it will return zero feature to console, and it's
* console's responsibility to make sure all nodes in a session have
- * same feature mask. */
- if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ * same feature mask.
+ */
+ if (msg->msg_ses_feats & ~LST_FEATS_MASK) {
reply->mksn_status = EPROTO;
return 0;
}
/* brand new or create by force */
LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
- if (sn == NULL) {
- CERROR("Dropping RPC (mksn) under memory pressure.\n");
+ if (!sn) {
+ CERROR("dropping RPC mksn under memory pressure\n");
return -ENOMEM;
}
@@ -461,13 +461,13 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
spin_lock(&sfw_data.fw_lock);
sfw_deactivate_session();
- LASSERT(sfw_data.fw_session == NULL);
+ LASSERT(!sfw_data.fw_session);
sfw_data.fw_session = sn;
spin_unlock(&sfw_data.fw_lock);
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
reply->mksn_timeout = sn->sn_timeout;
return 0;
}
@@ -477,15 +477,15 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
- reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
reply->rmsn_status = EINVAL;
return 0;
}
- if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
- reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY;
+ if (!sn || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
+ reply->rmsn_status = !sn ? ESRCH : EBUSY;
return 0;
}
@@ -499,8 +499,8 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
spin_unlock(&sfw_data.fw_lock);
reply->rmsn_status = 0;
- reply->rmsn_sid = LST_INVALID_SID;
- LASSERT(sfw_data.fw_session == NULL);
+ reply->rmsn_sid = LST_INVALID_SID;
+ LASSERT(!sfw_data.fw_session);
return 0;
}
@@ -509,14 +509,14 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
{
sfw_session_t *sn = sfw_data.fw_session;
- if (sn == NULL) {
+ if (!sn) {
reply->dbg_status = ESRCH;
- reply->dbg_sid = LST_INVALID_SID;
+ reply->dbg_sid = LST_INVALID_SID;
return 0;
}
- reply->dbg_status = 0;
- reply->dbg_sid = sn->sn_id;
+ reply->dbg_status = 0;
+ reply->dbg_sid = sn->sn_id;
reply->dbg_timeout = sn->sn_timeout;
if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name))
>= sizeof(reply->dbg_name))
@@ -539,10 +539,16 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
static inline int
sfw_test_buffers(sfw_test_instance_t *tsi)
{
- struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
- struct srpc_service *svc = tsc->tsc_srv_service;
+ struct sfw_test_case *tsc;
+ struct srpc_service *svc;
int nbuf;
+ LASSERT(tsi);
+ tsc = sfw_find_test_case(tsi->tsi_service);
+ LASSERT(tsc);
+ svc = tsc->tsc_srv_service;
+ LASSERT(svc);
+
nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
}
@@ -555,10 +561,10 @@ sfw_load_test(struct sfw_test_instance *tsi)
int nbuf;
int rc;
- LASSERT(tsi != NULL);
+ LASSERT(tsi);
tsc = sfw_find_test_case(tsi->tsi_service);
nbuf = sfw_test_buffers(tsi);
- LASSERT(tsc != NULL);
+ LASSERT(tsc);
svc = tsc->tsc_srv_service;
if (tsi->tsi_is_client) {
@@ -567,39 +573,44 @@ sfw_load_test(struct sfw_test_instance *tsi)
}
rc = srpc_service_add_buffers(svc, nbuf);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
svc->sv_name, nbuf, rc);
- /* NB: this error handler is not strictly correct, because
+ /*
+ * NB: this error handler is not strictly correct, because
* it may release more buffers than already allocated,
* but it doesn't matter because request portal should
- * be lazy portal and will grow buffers if necessary. */
+ * be lazy portal and will grow buffers if necessary.
+ */
srpc_service_remove_buffers(svc, nbuf);
return -ENOMEM;
}
CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
nbuf * (srpc_serv_is_framework(svc) ?
- 1 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
+ 2 : cfs_cpt_number(cfs_cpt_table)), svc->sv_name);
return 0;
}
static void
sfw_unload_test(struct sfw_test_instance *tsi)
{
- struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service);
+ struct sfw_test_case *tsc;
- LASSERT(tsc != NULL);
+ LASSERT(tsi);
+ tsc = sfw_find_test_case(tsi->tsi_service);
+ LASSERT(tsc);
if (tsi->tsi_is_client)
return;
- /* shrink buffers, because request portal is lazy portal
+ /*
+ * shrink buffers, because request portal is lazy portal
* which can grow buffers at runtime so we may leave
- * some buffers behind, but never mind... */
+ * some buffers behind, but never mind...
+ */
srpc_service_remove_buffers(tsc->tsc_srv_service,
sfw_test_buffers(tsi));
- return;
}
static void
@@ -619,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
while (!list_empty(&tsi->tsi_units)) {
tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
+ sfw_test_unit_t, tsu_list);
list_del(&tsu->tsu_list);
LIBCFS_FREE(tsu, sizeof(*tsu));
}
while (!list_empty(&tsi->tsi_free_rpcs)) {
rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ srpc_client_rpc_t, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
@@ -634,7 +645,6 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
clean:
sfw_unload_test(tsi);
LIBCFS_FREE(tsi, sizeof(*tsi));
- return;
}
static void
@@ -647,13 +657,12 @@ sfw_destroy_batch(sfw_batch_t *tsb)
while (!list_empty(&tsb->bat_tests)) {
tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
+ sfw_test_instance_t, tsi_list);
list_del_init(&tsi->tsi_list);
sfw_destroy_test_instance(tsi);
}
LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
- return;
}
void
@@ -666,14 +675,13 @@ sfw_destroy_session(sfw_session_t *sn)
while (!list_empty(&sn->sn_batches)) {
batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
+ sfw_batch_t, bat_list);
list_del_init(&batch->bat_list);
sfw_destroy_batch(batch);
}
LIBCFS_FREE(sn, sizeof(*sn));
atomic_dec(&sfw_data.fw_nzombies);
- return;
}
static void
@@ -690,7 +698,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (req->tsr_service == SRPC_SERVICE_BRW) {
- if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
+ if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
__swab32s(&bulk->blk_opc);
@@ -718,7 +726,6 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
LBUG();
- return;
}
static int
@@ -734,9 +741,9 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
int rc;
LIBCFS_ALLOC(tsi, sizeof(*tsi));
- if (tsi == NULL) {
+ if (!tsi) {
CERROR("Can't allocate test instance for batch: %llu\n",
- tsb->bat_id.bat_id);
+ tsb->bat_id.bat_id);
return -ENOMEM;
}
@@ -746,16 +753,16 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
- tsi->tsi_stopping = 0;
- tsi->tsi_batch = tsb;
- tsi->tsi_loop = req->tsr_loop;
- tsi->tsi_concur = req->tsr_concur;
- tsi->tsi_service = req->tsr_service;
- tsi->tsi_is_client = !!(req->tsr_is_client);
+ tsi->tsi_stopping = 0;
+ tsi->tsi_batch = tsb;
+ tsi->tsi_loop = req->tsr_loop;
+ tsi->tsi_concur = req->tsr_concur;
+ tsi->tsi_service = req->tsr_service;
+ tsi->tsi_is_client = !!(req->tsr_is_client);
tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
rc = sfw_load_test(tsi);
- if (rc != 0) {
+ if (rc) {
LIBCFS_FREE(tsi, sizeof(*tsi));
return rc;
}
@@ -768,7 +775,7 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
return 0;
}
- LASSERT(bk != NULL);
+ LASSERT(bk);
LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
LASSERT((unsigned int)bk->bk_len >=
sizeof(lnet_process_id_packed_t) * ndest);
@@ -782,36 +789,36 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
int j;
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests != NULL); /* my pages are within KVM always */
+ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
for (j = 0; j < tsi->tsi_concur; j++) {
LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
- if (tsu == NULL) {
+ if (!tsu) {
rc = -ENOMEM;
CERROR("Can't allocate tsu for %d\n",
- tsi->tsi_service);
+ tsi->tsi_service);
goto error;
}
tsu->tsu_dest.nid = id.nid;
tsu->tsu_dest.pid = id.pid;
tsu->tsu_instance = tsi;
- tsu->tsu_private = NULL;
+ tsu->tsu_private = NULL;
list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
}
}
rc = tsi->tsi_ops->tso_init(tsi);
- if (rc == 0) {
+ if (!rc) {
list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
return 0;
}
error:
- LASSERT(rc != 0);
+ LASSERT(rc);
sfw_destroy_test_instance(tsi);
return rc;
}
@@ -856,7 +863,6 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
spin_unlock(&sfw_data.fw_lock);
sfw_destroy_session(sn);
- return;
}
static void
@@ -876,9 +882,8 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
list_del_init(&rpc->crpc_list);
/* batch is stopping or loop is done or get error */
- if (tsi->tsi_stopping ||
- tsu->tsu_loop == 0 ||
- (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
+ if (tsi->tsi_stopping || !tsu->tsu_loop ||
+ (rpc->crpc_status && tsi->tsi_stoptsu_onerr))
done = 1;
/* dec ref for poster */
@@ -892,7 +897,6 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
}
sfw_test_unit_done(tsu);
- return;
}
int
@@ -906,18 +910,17 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
spin_lock(&tsi->tsi_lock);
LASSERT(sfw_test_active(tsi));
-
- if (!list_empty(&tsi->tsi_free_rpcs)) {
/* pick request from buffer */
- rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
+ srpc_client_rpc_t, crpc_list);
+ if (rpc) {
LASSERT(nblk == rpc->crpc_bulk.bk_niov);
list_del_init(&rpc->crpc_list);
}
spin_unlock(&tsi->tsi_lock);
- if (rpc == NULL) {
+ if (!rpc) {
rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
blklen, sfw_test_rpc_done,
sfw_test_rpc_fini, tsu);
@@ -927,7 +930,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
sfw_test_rpc_fini, tsu);
}
- if (rpc == NULL) {
+ if (!rpc) {
CERROR("Can't create rpc for test %d\n", tsi->tsi_service);
return -ENOMEM;
}
@@ -947,12 +950,12 @@ sfw_run_test(swi_workitem_t *wi)
LASSERT(wi == &tsu->tsu_worker);
- if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
- LASSERT(rpc == NULL);
+ if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc)) {
+ LASSERT(!rpc);
goto test_done;
}
- LASSERT(rpc != NULL);
+ LASSERT(rpc);
spin_lock(&tsi->tsi_lock);
@@ -968,9 +971,8 @@ sfw_run_test(swi_workitem_t *wi)
list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
spin_unlock(&tsi->tsi_lock);
- rpc->crpc_timeout = rpc_timeout;
-
spin_lock(&rpc->crpc_lock);
+ rpc->crpc_timeout = rpc_timeout;
srpc_post_rpc(rpc);
spin_unlock(&rpc->crpc_lock);
return 0;
@@ -1015,8 +1017,7 @@ sfw_run_batch(sfw_batch_t *tsb)
tsu->tsu_loop = tsi->tsi_loop;
wi = &tsu->tsu_worker;
swi_init_workitem(wi, tsu, sfw_run_test,
- lst_sched_test[\
- lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
+ lst_sched_test[lnet_cpt_of_nid(tsu->tsu_dest.nid)]);
swi_schedule_workitem(wi);
}
}
@@ -1074,7 +1075,7 @@ sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
if (testidx < 0)
return -EINVAL;
- if (testidx == 0) {
+ if (!testidx) {
reply->bar_active = atomic_read(&tsb->bat_nactive);
return 0;
}
@@ -1101,11 +1102,11 @@ int
sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
int sink)
{
- LASSERT(rpc->srpc_bulk == NULL);
+ LASSERT(!rpc->srpc_bulk);
LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
- if (rpc->srpc_bulk == NULL)
+ if (!rpc->srpc_bulk)
return -ENOMEM;
return 0;
@@ -1121,13 +1122,13 @@ sfw_add_test(struct srpc_server_rpc *rpc)
sfw_batch_t *bat;
request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
- reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
- if (request->tsr_loop == 0 ||
- request->tsr_concur == 0 ||
+ if (!request->tsr_loop ||
+ !request->tsr_concur ||
request->tsr_sid.ses_nid == LNET_NID_ANY ||
request->tsr_ndest > SFW_MAX_NDESTS ||
- (request->tsr_is_client && request->tsr_ndest == 0) ||
+ (request->tsr_is_client && !request->tsr_ndest) ||
request->tsr_concur > SFW_MAX_CONCUR ||
request->tsr_service > SRPC_SERVICE_MAX_ID ||
request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
@@ -1135,17 +1136,17 @@ sfw_add_test(struct srpc_server_rpc *rpc)
return 0;
}
- if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
- sfw_find_test_case(request->tsr_service) == NULL) {
+ if (!sn || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
+ !sfw_find_test_case(request->tsr_service)) {
reply->tsr_status = ENOENT;
return 0;
}
bat = sfw_bid2batch(request->tsr_bid);
- if (bat == NULL) {
- CERROR("Dropping RPC (%s) from %s under memory pressure.\n",
- rpc->srpc_scd->scd_svc->sv_name,
- libcfs_id2str(rpc->srpc_peer));
+ if (!bat) {
+ CERROR("dropping RPC %s from %s under memory pressure\n",
+ rpc->srpc_scd->scd_svc->sv_name,
+ libcfs_id2str(rpc->srpc_peer));
return -ENOMEM;
}
@@ -1154,15 +1155,15 @@ sfw_add_test(struct srpc_server_rpc *rpc)
return 0;
}
- if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
+ if (request->tsr_is_client && !rpc->srpc_bulk) {
/* rpc will be resumed later in sfw_bulk_ready */
int npg = sfw_id_pages(request->tsr_ndest);
int len;
- if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) {
- len = npg * PAGE_CACHE_SIZE;
+ if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
+ len = npg * PAGE_SIZE;
- } else {
+ } else {
len = sizeof(lnet_process_id_packed_t) *
request->tsr_ndest;
}
@@ -1171,11 +1172,11 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
rc = sfw_add_test_instance(bat, rpc);
- CDEBUG(rc == 0 ? D_NET : D_WARNING,
- "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
- rc == 0 ? "Added" : "Failed to add", request->tsr_service,
- request->tsr_is_client ? "client" : "server",
- request->tsr_loop, request->tsr_concur, request->tsr_ndest);
+ CDEBUG(!rc ? D_NET : D_WARNING,
+ "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
+ !rc ? "Added" : "Failed to add", request->tsr_service,
+ request->tsr_is_client ? "client" : "server",
+ request->tsr_loop, request->tsr_concur, request->tsr_ndest);
reply->tsr_status = (rc < 0) ? -rc : rc;
return 0;
@@ -1188,15 +1189,15 @@ sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
int rc = 0;
sfw_batch_t *bat;
- reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id;
+ reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
- if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
+ if (!sn || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
reply->bar_status = ESRCH;
return 0;
}
bat = sfw_find_batch(request->bar_bid);
- if (bat == NULL) {
+ if (!bat) {
reply->bar_status = ENOENT;
return 0;
}
@@ -1231,7 +1232,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
unsigned features = LST_FEATS_MASK;
int rc = 0;
- LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(!sfw_data.fw_active_srpc);
LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
spin_lock(&sfw_data.fw_lock);
@@ -1242,7 +1243,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
}
/* Remove timer to avoid racing with it or expiring active session */
- if (sfw_del_session_timer() != 0) {
+ if (sfw_del_session_timer()) {
CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
spin_unlock(&sfw_data.fw_lock);
@@ -1262,19 +1263,21 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
sv->sv_id != SRPC_SERVICE_DEBUG) {
sfw_session_t *sn = sfw_data.fw_session;
- if (sn != NULL &&
+ if (sn &&
sn->sn_features != request->msg_ses_feats) {
CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
request->msg_ses_feats, sn->sn_features);
reply->msg_body.reply.status = EPROTO;
- reply->msg_body.reply.sid = sn->sn_id;
+ reply->msg_body.reply.sid = sn->sn_id;
goto out;
}
- } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
- /* NB: at this point, old version will ignore features and
+ } else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
+ /**
+ * NB: at this point, old version will ignore features and
* create new session anyway, so console should be able
- * to handle this */
+ * to handle this
+ */
reply->msg_body.reply.status = EPROTO;
goto out;
}
@@ -1312,7 +1315,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
break;
}
- if (sfw_data.fw_session != NULL)
+ if (sfw_data.fw_session)
features = sfw_data.fw_session->sn_features;
out:
reply->msg_ses_feats = features;
@@ -1333,14 +1336,14 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int rc;
- LASSERT(rpc->srpc_bulk != NULL);
+ LASSERT(rpc->srpc_bulk);
LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
- LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(!sfw_data.fw_active_srpc);
LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
spin_lock(&sfw_data.fw_lock);
- if (status != 0) {
+ if (status) {
CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
spin_unlock(&sfw_data.fw_lock);
@@ -1352,8 +1355,8 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
return -ESHUTDOWN;
}
- if (sfw_del_session_timer() != 0) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
+ if (sfw_del_session_timer()) {
+ CERROR("dropping RPC %s from %s: racing with expiry timer\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
@@ -1386,9 +1389,9 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
LASSERT(!sfw_data.fw_shuttingdown);
LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
+ if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ srpc_client_rpc_t, crpc_list);
list_del(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1397,15 +1400,15 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
spin_unlock(&sfw_data.fw_lock);
- if (rpc == NULL) {
+ if (!rpc) {
rpc = srpc_create_client_rpc(peer, service,
nbulkiov, bulklen, done,
- nbulkiov != 0 ? NULL :
+ nbulkiov ? NULL :
sfw_client_rpc_fini,
priv);
}
- if (rpc != NULL) /* "session" is concept in framework */
+ if (rpc) /* "session" is concept in framework */
rpc->crpc_reqstmsg.msg_ses_feats = features;
return rpc;
@@ -1552,7 +1555,6 @@ sfw_unpack_message(srpc_msg_t *msg)
}
LBUG();
- return;
}
void
@@ -1564,7 +1566,6 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, -EINTR);
spin_unlock(&rpc->crpc_lock);
- return;
}
void
@@ -1581,7 +1582,6 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
srpc_post_rpc(rpc);
spin_unlock(&rpc->crpc_lock);
- return;
}
static srpc_service_t sfw_services[] = {
@@ -1622,16 +1622,6 @@ static srpc_service_t sfw_services[] = {
}
};
-extern sfw_test_client_ops_t ping_test_client;
-extern srpc_service_t ping_test_service;
-extern void ping_init_test_client(void);
-extern void ping_init_test_service(void);
-
-extern sfw_test_client_ops_t brw_test_client;
-extern srpc_service_t brw_test_service;
-extern void brw_init_test_client(void);
-extern void brw_init_test_service(void);
-
int
sfw_startup(void)
{
@@ -1643,25 +1633,25 @@ sfw_startup(void)
if (session_timeout < 0) {
CERROR("Session timeout must be non-negative: %d\n",
- session_timeout);
+ session_timeout);
return -EINVAL;
}
if (rpc_timeout < 0) {
CERROR("RPC timeout must be non-negative: %d\n",
- rpc_timeout);
+ rpc_timeout);
return -EINVAL;
}
- if (session_timeout == 0)
+ if (!session_timeout)
CWARN("Zero session_timeout specified - test sessions never expire.\n");
- if (rpc_timeout == 0)
+ if (!rpc_timeout)
CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
memset(&sfw_data, 0, sizeof(struct smoketest_framework));
- sfw_data.fw_session = NULL;
+ sfw_data.fw_session = NULL;
sfw_data.fw_active_srpc = NULL;
spin_lock_init(&sfw_data.fw_lock);
atomic_set(&sfw_data.fw_nzombies, 0);
@@ -1672,12 +1662,12 @@ sfw_startup(void)
brw_init_test_client();
brw_init_test_service();
rc = sfw_register_test(&brw_test_service, &brw_test_client);
- LASSERT(rc == 0);
+ LASSERT(!rc);
ping_init_test_client();
ping_init_test_service();
rc = sfw_register_test(&ping_test_service, &ping_test_client);
- LASSERT(rc == 0);
+ LASSERT(!rc);
error = 0;
list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
@@ -1685,29 +1675,29 @@ sfw_startup(void)
rc = srpc_add_service(sv);
LASSERT(rc != -EBUSY);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
+ sv->sv_name, rc);
error = rc;
}
}
for (i = 0; ; i++) {
sv = &sfw_services[i];
- if (sv->sv_name == NULL)
+ if (!sv->sv_name)
break;
sv->sv_bulk_ready = NULL;
- sv->sv_handler = sfw_handle_server_rpc;
- sv->sv_wi_total = SFW_FRWK_WI_MAX;
+ sv->sv_handler = sfw_handle_server_rpc;
+ sv->sv_wi_total = SFW_FRWK_WI_MAX;
if (sv->sv_id == SRPC_SERVICE_TEST)
sv->sv_bulk_ready = sfw_bulk_ready;
rc = srpc_add_service(sv);
LASSERT(rc != -EBUSY);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to add %s service: %d\n",
- sv->sv_name, rc);
+ sv->sv_name, rc);
error = rc;
}
@@ -1716,14 +1706,14 @@ sfw_startup(void)
continue;
rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
sv->sv_name, sv->sv_wi_total, rc);
error = -ENOMEM;
}
}
- if (error != 0)
+ if (error)
sfw_shutdown();
return error;
}
@@ -1738,15 +1728,15 @@ sfw_shutdown(void)
spin_lock(&sfw_data.fw_lock);
sfw_data.fw_shuttingdown = 1;
- lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
+ lst_wait_until(!sfw_data.fw_active_srpc, sfw_data.fw_lock,
"waiting for active RPC to finish.\n");
- if (sfw_del_session_timer() != 0)
- lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
+ if (sfw_del_session_timer())
+ lst_wait_until(!sfw_data.fw_session, sfw_data.fw_lock,
"waiting for session timer to explode.\n");
sfw_deactivate_session();
- lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
+ lst_wait_until(!atomic_read(&sfw_data.fw_nzombies),
sfw_data.fw_lock,
"waiting for %d zombie sessions to die.\n",
atomic_read(&sfw_data.fw_nzombies));
@@ -1755,7 +1745,7 @@ sfw_shutdown(void)
for (i = 0; ; i++) {
sv = &sfw_services[i];
- if (sv->sv_name == NULL)
+ if (!sv->sv_name)
break;
srpc_shutdown_service(sv);
@@ -1772,7 +1762,7 @@ sfw_shutdown(void)
srpc_client_rpc_t *rpc;
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ srpc_client_rpc_t, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1780,7 +1770,7 @@ sfw_shutdown(void)
for (i = 0; ; i++) {
sv = &sfw_services[i];
- if (sv->sv_name == NULL)
+ if (!sv->sv_name)
break;
srpc_wait_service_shutdown(sv);
@@ -1788,13 +1778,11 @@ sfw_shutdown(void)
while (!list_empty(&sfw_data.fw_tests)) {
tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
+ sfw_test_case_t, tsc_list);
srpc_wait_service_shutdown(tsc->tsc_srv_service);
list_del(&tsc->tsc_list);
LIBCFS_FREE(tsc, sizeof(*tsc));
}
-
- return;
}
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 46cbdf0456cc..cc046b1d4d0a 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -37,9 +37,10 @@
#define DEBUG_SUBSYSTEM S_LNET
#include "selftest.h"
+#include "console.h"
enum {
- LST_INIT_NONE = 0,
+ LST_INIT_NONE = 0,
LST_INIT_WI_SERIAL,
LST_INIT_WI_TEST,
LST_INIT_RPC,
@@ -47,16 +48,13 @@ enum {
LST_INIT_CONSOLE
};
-extern int lstcon_console_init(void);
-extern int lstcon_console_fini(void);
-
static int lst_init_step = LST_INIT_NONE;
struct cfs_wi_sched *lst_sched_serial;
struct cfs_wi_sched **lst_sched_test;
static void
-lnet_selftest_fini(void)
+lnet_selftest_exit(void)
{
int i;
@@ -70,7 +68,7 @@ lnet_selftest_fini(void)
case LST_INIT_WI_TEST:
for (i = 0;
i < cfs_cpt_number(lnet_cpt_table()); i++) {
- if (lst_sched_test[i] == NULL)
+ if (!lst_sched_test[i])
continue;
cfs_wi_sched_destroy(lst_sched_test[i]);
}
@@ -98,7 +96,7 @@ lnet_selftest_init(void)
rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY,
1, &lst_sched_serial);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to create serial WI scheduler for LST\n");
return rc;
}
@@ -106,7 +104,7 @@ lnet_selftest_init(void)
nscheds = cfs_cpt_number(lnet_cpt_table());
LIBCFS_ALLOC(lst_sched_test, sizeof(lst_sched_test[0]) * nscheds);
- if (lst_sched_test == NULL)
+ if (!lst_sched_test)
goto error;
lst_init_step = LST_INIT_WI_TEST;
@@ -117,42 +115,42 @@ lnet_selftest_init(void)
nthrs = max(nthrs - 1, 1);
rc = cfs_wi_sched_create("lst_t", lnet_cpt_table(), i,
nthrs, &lst_sched_test[i]);
- if (rc != 0) {
- CERROR("Failed to create CPT affinity WI scheduler %d for LST\n",
- i);
+ if (rc) {
+ CERROR("Failed to create CPT affinity WI scheduler %d for LST\n", i);
goto error;
}
}
rc = srpc_startup();
- if (rc != 0) {
+ if (rc) {
CERROR("LST can't startup rpc\n");
goto error;
}
lst_init_step = LST_INIT_RPC;
rc = sfw_startup();
- if (rc != 0) {
+ if (rc) {
CERROR("LST can't startup framework\n");
goto error;
}
lst_init_step = LST_INIT_FW;
rc = lstcon_console_init();
- if (rc != 0) {
+ if (rc) {
CERROR("LST can't startup console\n");
goto error;
}
lst_init_step = LST_INIT_CONSOLE;
return 0;
error:
- lnet_selftest_fini();
+ lnet_selftest_exit();
return rc;
}
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("LNet Selftest");
+MODULE_VERSION("2.7.0");
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.9.0");
module_init(lnet_selftest_init);
-module_exit(lnet_selftest_fini);
+module_exit(lnet_selftest_exit);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index d42653654fa8..81a45045e186 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -42,18 +42,18 @@
#include "selftest.h"
-#define LST_PING_TEST_MAGIC 0xbabeface
+#define LST_PING_TEST_MAGIC 0xbabeface
static int ping_srv_workitems = SFW_TEST_WI_MAX;
module_param(ping_srv_workitems, int, 0644);
MODULE_PARM_DESC(ping_srv_workitems, "# PING server workitems");
-typedef struct {
+struct lst_ping_data {
spinlock_t pnd_lock; /* serialize */
int pnd_counter; /* sequence counter */
-} lst_ping_data_t;
+};
-static lst_ping_data_t lst_ping_data;
+static struct lst_ping_data lst_ping_data;
static int
ping_client_init(sfw_test_instance_t *tsi)
@@ -61,7 +61,7 @@ ping_client_init(sfw_test_instance_t *tsi)
sfw_session_t *sn = tsi->tsi_batch->bat_session;
LASSERT(tsi->tsi_is_client);
- LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
+ LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
spin_lock_init(&lst_ping_data.pnd_lock);
lst_ping_data.pnd_counter = 0;
@@ -75,7 +75,7 @@ ping_client_fini(sfw_test_instance_t *tsi)
sfw_session_t *sn = tsi->tsi_batch->bat_session;
int errors;
- LASSERT(sn != NULL);
+ LASSERT(sn);
LASSERT(tsi->tsi_is_client);
errors = atomic_read(&sn->sn_ping_errors);
@@ -95,11 +95,11 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
struct timespec64 ts;
int rc;
- LASSERT(sn != NULL);
- LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0);
+ LASSERT(sn);
+ LASSERT(!(sn->sn_features & ~LST_FEATS_MASK));
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, 0, 0, rpc);
- if (rc != 0)
+ if (rc)
return rc;
req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst;
@@ -111,7 +111,7 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
spin_unlock(&lst_ping_data.pnd_lock);
ktime_get_real_ts64(&ts);
- req->pnr_time_sec = ts.tv_sec;
+ req->pnr_time_sec = ts.tv_sec;
req->pnr_time_usec = ts.tv_nsec / NSEC_PER_USEC;
return rc;
@@ -126,14 +126,14 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
struct timespec64 ts;
- LASSERT(sn != NULL);
+ LASSERT(sn);
- if (rpc->crpc_status != 0) {
+ if (rpc->crpc_status) {
if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_ping_errors);
CERROR("Unable to ping %s (%d): %d\n",
- libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq, rpc->crpc_status);
+ libcfs_id2str(rpc->crpc_dest),
+ reqst->pnr_seq, rpc->crpc_status);
return;
}
@@ -147,8 +147,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
rpc->crpc_status = -EBADMSG;
atomic_inc(&sn->sn_ping_errors);
CERROR("Bad magic %u from %s, %u expected.\n",
- reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
- LST_PING_TEST_MAGIC);
+ reply->pnr_magic, libcfs_id2str(rpc->crpc_dest),
+ LST_PING_TEST_MAGIC);
return;
}
@@ -156,8 +156,8 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
rpc->crpc_status = -EBADMSG;
atomic_inc(&sn->sn_ping_errors);
CERROR("Bad seq %u from %s, %u expected.\n",
- reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
- reqst->pnr_seq);
+ reply->pnr_seq, libcfs_id2str(rpc->crpc_dest),
+ reqst->pnr_seq);
return;
}
@@ -165,13 +165,12 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
(unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
(ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
- return;
}
static int
ping_server_handle(struct srpc_server_rpc *rpc)
{
- struct srpc_service *sv = rpc->srpc_scd->scd_svc;
+ struct srpc_service *sv = rpc->srpc_scd->scd_svc;
srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
srpc_msg_t *replymsg = &rpc->srpc_replymsg;
srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
@@ -191,14 +190,14 @@ ping_server_handle(struct srpc_server_rpc *rpc)
if (req->pnr_magic != LST_PING_TEST_MAGIC) {
CERROR("Unexpected magic %08x from %s\n",
- req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
+ req->pnr_magic, libcfs_id2str(rpc->srpc_peer));
return -EINVAL;
}
- rep->pnr_seq = req->pnr_seq;
+ rep->pnr_seq = req->pnr_seq;
rep->pnr_magic = LST_PING_TEST_MAGIC;
- if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
+ if (reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) {
replymsg->msg_ses_feats = LST_FEATS_MASK;
rep->pnr_status = EPROTO;
return 0;
@@ -214,8 +213,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
sfw_test_client_ops_t ping_test_client;
void ping_init_test_client(void)
{
- ping_test_client.tso_init = ping_client_init;
- ping_test_client.tso_fini = ping_client_fini;
+ ping_test_client.tso_init = ping_client_init;
+ ping_test_client.tso_fini = ping_client_fini;
ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
ping_test_client.tso_done_rpc = ping_client_done_rpc;
}
@@ -223,8 +222,8 @@ void ping_init_test_client(void)
srpc_service_t ping_test_service;
void ping_init_test_service(void)
{
- ping_test_service.sv_id = SRPC_SERVICE_PING;
- ping_test_service.sv_name = "ping_test";
- ping_test_service.sv_handler = ping_server_handle;
+ ping_test_service.sv_id = SRPC_SERVICE_PING;
+ ping_test_service.sv_name = "ping_test";
+ ping_test_service.sv_handler = ping_server_handle;
ping_test_service.sv_wi_total = ping_srv_workitems;
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 2acf6ec717be..7d7748d96332 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -90,14 +90,14 @@ void srpc_set_counters(const srpc_counters_t *cnt)
static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
- nob = min_t(int, nob, PAGE_CACHE_SIZE);
+ nob = min_t(int, nob, PAGE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);
bk->bk_iovs[i].kiov_offset = 0;
- bk->bk_iovs[i].kiov_page = pg;
- bk->bk_iovs[i].kiov_len = nob;
+ bk->bk_iovs[i].kiov_page = pg;
+ bk->bk_iovs[i].kiov_len = nob;
return nob;
}
@@ -107,18 +107,17 @@ srpc_free_bulk(srpc_bulk_t *bk)
int i;
struct page *pg;
- LASSERT(bk != NULL);
+ LASSERT(bk);
for (i = 0; i < bk->bk_niov; i++) {
pg = bk->bk_iovs[i].kiov_page;
- if (pg == NULL)
+ if (!pg)
break;
__free_page(pg);
}
LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
- return;
}
srpc_bulk_t *
@@ -131,15 +130,15 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
- if (bk == NULL) {
+ if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
- bk->bk_sink = sink;
- bk->bk_len = bulk_len;
- bk->bk_niov = bulk_npg;
+ bk->bk_sink = sink;
+ bk->bk_len = bulk_len;
+ bk->bk_niov = bulk_npg;
for (i = 0; i < bulk_npg; i++) {
struct page *pg;
@@ -147,7 +146,7 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt),
GFP_KERNEL, 0);
- if (pg == NULL) {
+ if (!pg) {
CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
srpc_free_bulk(bk);
return NULL;
@@ -183,10 +182,10 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc,
rpc->srpc_ev.ev_fired = 1; /* no event expected now */
- rpc->srpc_scd = scd;
+ rpc->srpc_scd = scd;
rpc->srpc_reqstbuf = buffer;
- rpc->srpc_peer = buffer->buf_peer;
- rpc->srpc_self = buffer->buf_self;
+ rpc->srpc_peer = buffer->buf_peer;
+ rpc->srpc_self = buffer->buf_self;
LNetInvalidateHandle(&rpc->srpc_replymdh);
}
@@ -199,7 +198,7 @@ srpc_service_fini(struct srpc_service *svc)
struct list_head *q;
int i;
- if (svc->sv_cpt_data == NULL)
+ if (!svc->sv_cpt_data)
return;
cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
@@ -212,9 +211,8 @@ srpc_service_fini(struct srpc_service *svc)
break;
while (!list_empty(q)) {
- buf = list_entry(q->next,
- struct srpc_buffer,
- buf_list);
+ buf = list_entry(q->next, struct srpc_buffer,
+ buf_list);
list_del(&buf->buf_list);
LIBCFS_FREE(buf, sizeof(*buf));
}
@@ -224,8 +222,8 @@ srpc_service_fini(struct srpc_service *svc)
while (!list_empty(&scd->scd_rpc_free)) {
rpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
+ struct srpc_server_rpc,
+ srpc_list);
list_del(&rpc->srpc_list);
LIBCFS_FREE(rpc, sizeof(*rpc));
}
@@ -259,7 +257,7 @@ srpc_service_init(struct srpc_service *svc)
svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(struct srpc_service_cd));
- if (svc->sv_cpt_data == NULL)
+ if (!svc->sv_cpt_data)
return -ENOMEM;
svc->sv_ncpts = srpc_serv_is_framework(svc) ?
@@ -278,23 +276,27 @@ srpc_service_init(struct srpc_service *svc)
scd->scd_ev.ev_data = scd;
scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
- /* NB: don't use lst_sched_serial for adding buffer,
- * see details in srpc_service_add_buffers() */
+ /*
+ * NB: don't use lst_sched_serial for adding buffer,
+ * see details in srpc_service_add_buffers()
+ */
swi_init_workitem(&scd->scd_buf_wi, scd,
srpc_add_buffer, lst_sched_test[i]);
- if (i != 0 && srpc_serv_is_framework(svc)) {
- /* NB: framework service only needs srpc_service_cd for
+ if (i && srpc_serv_is_framework(svc)) {
+ /*
+ * NB: framework service only needs srpc_service_cd for
* one partition, but we allocate for all to make
* it easier to implement, it will waste a little
- * memory but nobody should care about this */
+ * memory but nobody should care about this
+ */
continue;
}
for (j = 0; j < nrpcs; j++) {
LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
i, sizeof(*rpc));
- if (rpc == NULL) {
+ if (!rpc) {
srpc_service_fini(svc);
return -ENOMEM;
}
@@ -312,14 +314,14 @@ srpc_add_service(struct srpc_service *sv)
LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
- if (srpc_service_init(sv) != 0)
+ if (srpc_service_init(sv))
return -ENOMEM;
spin_lock(&srpc_data.rpc_glock);
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
- if (srpc_data.rpc_services[id] != NULL) {
+ if (srpc_data.rpc_services[id]) {
spin_unlock(&srpc_data.rpc_glock);
goto failed;
}
@@ -363,32 +365,31 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
- if (rc != 0) {
+ if (rc) {
CERROR("LNetMEAttach failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
md.threshold = 1;
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
- md.options = options;
+ md.user_ptr = ev;
+ md.start = buf;
+ md.length = len;
+ md.options = options;
md.eq_handle = srpc_data.rpc_lnet_eq;
rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
- if (rc != 0) {
+ if (rc) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
rc = LNetMEUnlink(meh);
- LASSERT(rc == 0);
+ LASSERT(!rc);
return -ENOMEM;
}
- CDEBUG(D_NET,
- "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
+ CDEBUG(D_NET, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
+ libcfs_id2str(peer), portal, matchbits);
return 0;
}
@@ -400,46 +401,48 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int rc;
lnet_md_t md;
- md.user_ptr = ev;
- md.start = buf;
- md.length = len;
+ md.user_ptr = ev;
+ md.start = buf;
+ md.length = len;
md.eq_handle = srpc_data.rpc_lnet_eq;
- md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
- md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
+ md.threshold = options & LNET_MD_OP_GET ? 2 : 1;
+ md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
rc = LNetMDBind(md, LNET_UNLINK, mdh);
- if (rc != 0) {
+ if (rc) {
CERROR("LNetMDBind failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
- /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
+ /*
+ * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
* they're only meaningful for MDs attached to an ME (i.e. passive
- * buffers... */
- if ((options & LNET_MD_OP_PUT) != 0) {
+ * buffers...
+ */
+ if (options & LNET_MD_OP_PUT) {
rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
portal, matchbits, 0, 0);
} else {
- LASSERT((options & LNET_MD_OP_GET) != 0);
+ LASSERT(options & LNET_MD_OP_GET);
rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
}
- if (rc != 0) {
+ if (rc) {
CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
- ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
- libcfs_id2str(peer), portal, matchbits, rc);
+ options & LNET_MD_OP_PUT ? "Put" : "Get",
+ libcfs_id2str(peer), portal, matchbits, rc);
- /* The forthcoming unlink event will complete this operation
+ /*
+ * The forthcoming unlink event will complete this operation
* with failure, so fall through and return success here.
*/
rc = LNetMDUnlink(*mdh);
- LASSERT(rc == 0);
+ LASSERT(!rc);
} else {
- CDEBUG(D_NET,
- "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
- libcfs_id2str(peer), portal, matchbits);
+ CDEBUG(D_NET, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
+ libcfs_id2str(peer), portal, matchbits);
}
return 0;
}
@@ -448,7 +451,7 @@ static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
lnet_handle_md_t *mdh, srpc_event_t *ev)
{
- lnet_process_id_t any = {0};
+ lnet_process_id_t any = { 0 };
any.nid = LNET_NID_ANY;
any.pid = LNET_PID_ANY;
@@ -460,10 +463,10 @@ srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
static int
srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
- __must_hold(&scd->scd_lock)
+__must_hold(&scd->scd_lock)
{
struct srpc_service *sv = scd->scd_svc;
- struct srpc_msg *msg = &buf->buf_msg;
+ struct srpc_msg *msg = &buf->buf_msg;
int rc;
LNetInvalidateHandle(&buf->buf_mdh);
@@ -476,19 +479,22 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
msg, sizeof(*msg), &buf->buf_mdh,
&scd->scd_ev);
- /* At this point, a RPC (new or delayed) may have arrived in
+ /*
+ * At this point, a RPC (new or delayed) may have arrived in
* msg and its event handler has been called. So we must add
- * buf to scd_buf_posted _before_ dropping scd_lock */
-
+ * buf to scd_buf_posted _before_ dropping scd_lock
+ */
spin_lock(&scd->scd_lock);
- if (rc == 0) {
+ if (!rc) {
if (!sv->sv_shuttingdown)
return 0;
spin_unlock(&scd->scd_lock);
- /* srpc_shutdown_service might have tried to unlink me
- * when my buf_mdh was still invalid */
+ /*
+ * srpc_shutdown_service might have tried to unlink me
+ * when my buf_mdh was still invalid
+ */
LNetMDUnlink(buf->buf_mdh);
spin_lock(&scd->scd_lock);
return 0;
@@ -514,9 +520,11 @@ srpc_add_buffer(struct swi_workitem *wi)
struct srpc_buffer *buf;
int rc = 0;
- /* it's called by workitem scheduler threads, these threads
+ /*
+ * it's called by workitem scheduler threads, these threads
* should have been set CPT affinity, so buffers will be posted
- * on CPT local list of Portal */
+ * on CPT local list of Portal
+ */
spin_lock(&scd->scd_lock);
while (scd->scd_buf_adjust > 0 &&
@@ -527,7 +535,7 @@ srpc_add_buffer(struct swi_workitem *wi)
spin_unlock(&scd->scd_lock);
LIBCFS_ALLOC(buf, sizeof(*buf));
- if (buf == NULL) {
+ if (!buf) {
CERROR("Failed to add new buf to service: %s\n",
scd->scd_svc->sv_name);
spin_lock(&scd->scd_lock);
@@ -546,7 +554,7 @@ srpc_add_buffer(struct swi_workitem *wi)
}
rc = srpc_service_post_buffer(scd, buf);
- if (rc != 0)
+ if (rc)
break; /* buf has been freed inside */
LASSERT(scd->scd_buf_posting > 0);
@@ -555,7 +563,7 @@ srpc_add_buffer(struct swi_workitem *wi)
scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
}
- if (rc != 0) {
+ if (rc) {
scd->scd_buf_err_stamp = ktime_get_real_seconds();
scd->scd_buf_err = rc;
@@ -607,12 +615,12 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
* block all WIs pending on lst_sched_serial for a moment
* which is not good but not fatal.
*/
- lst_wait_until(scd->scd_buf_err != 0 ||
- (scd->scd_buf_adjust == 0 &&
- scd->scd_buf_posting == 0),
+ lst_wait_until(scd->scd_buf_err ||
+ (!scd->scd_buf_adjust &&
+ !scd->scd_buf_posting),
scd->scd_lock, "waiting for adding buffer\n");
- if (scd->scd_buf_err != 0 && rc == 0)
+ if (scd->scd_buf_err && !rc)
rc = scd->scd_buf_err;
spin_unlock(&scd->scd_lock);
@@ -658,7 +666,7 @@ srpc_finish_service(struct srpc_service *sv)
}
if (scd->scd_buf_nposted > 0) {
- CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
+ CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
scd->scd_buf_nposted);
spin_unlock(&scd->scd_lock);
return 0;
@@ -670,7 +678,7 @@ srpc_finish_service(struct srpc_service *sv)
}
rpc = list_entry(scd->scd_rpc_active.next,
- struct srpc_server_rpc, srpc_list);
+ struct srpc_server_rpc, srpc_list);
CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
swi_state2str(rpc->srpc_wi.swi_state),
@@ -690,10 +698,10 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
- __must_hold(&scd->scd_lock)
+__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
- if (srpc_service_post_buffer(scd, buf) != 0) {
+ if (srpc_service_post_buffer(scd, buf)) {
CWARN("Failed to post %s buffer\n",
scd->scd_svc->sv_name);
}
@@ -706,7 +714,7 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
if (scd->scd_buf_adjust < 0) {
scd->scd_buf_adjust++;
if (scd->scd_buf_adjust < 0 &&
- scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
+ !scd->scd_buf_total && !scd->scd_buf_posting) {
CDEBUG(D_INFO,
"Try to recycle %d buffers but nothing left\n",
scd->scd_buf_adjust);
@@ -732,9 +740,11 @@ srpc_abort_service(struct srpc_service *sv)
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
spin_lock(&scd->scd_lock);
- /* schedule in-flight RPCs to notice the abort, NB:
+ /*
+ * schedule in-flight RPCs to notice the abort, NB:
* racing with incoming RPCs; complete fix should make test
- * RPCs carry session ID in its headers */
+ * RPCs carry session ID in its headers
+ */
list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
rpc->srpc_aborted = 1;
swi_schedule_workitem(&rpc->srpc_wi);
@@ -772,8 +782,10 @@ srpc_shutdown_service(srpc_service_t *sv)
spin_unlock(&scd->scd_lock);
- /* OK to traverse scd_buf_posted without lock, since no one
- * touches scd_buf_posted now */
+ /*
+ * OK to traverse scd_buf_posted without lock, since no one
+ * touches scd_buf_posted now
+ */
list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
LNetMDUnlink(buf->buf_mdh);
}
@@ -786,15 +798,15 @@ srpc_send_request(srpc_client_rpc_t *rpc)
int rc;
ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REQUEST_SENT;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REQUEST_SENT;
rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
rpc->crpc_service, &rpc->crpc_reqstmsg,
sizeof(srpc_msg_t), LNET_MD_OP_PUT,
rpc->crpc_dest, LNET_NID_ANY,
&rpc->crpc_reqstmdh, ev);
- if (rc != 0) {
+ if (rc) {
LASSERT(rc == -ENOMEM);
ev->ev_fired = 1; /* no more event expected */
}
@@ -809,8 +821,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
int rc;
ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_RCVD;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REPLY_RCVD;
*id = srpc_next_id();
@@ -818,7 +830,7 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
&rpc->crpc_replymsg, sizeof(srpc_msg_t),
LNET_MD_OP_PUT, rpc->crpc_dest,
&rpc->crpc_replymdh, ev);
- if (rc != 0) {
+ if (rc) {
LASSERT(rc == -ENOMEM);
ev->ev_fired = 1; /* no more event expected */
}
@@ -830,28 +842,28 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
{
srpc_bulk_t *bk = &rpc->crpc_bulk;
srpc_event_t *ev = &rpc->crpc_bulkev;
- __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
+ __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
int rc;
int opt;
LASSERT(bk->bk_niov <= LNET_MAX_IOV);
- if (bk->bk_niov == 0)
+ if (!bk->bk_niov)
return 0; /* nothing to do */
opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
opt |= LNET_MD_KIOV;
ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_BULK_REQ_RCVD;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_BULK_REQ_RCVD;
*id = srpc_next_id();
rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
&bk->bk_iovs[0], bk->bk_niov, opt,
rpc->crpc_dest, &bk->bk_mdh, ev);
- if (rc != 0) {
+ if (rc) {
LASSERT(rc == -ENOMEM);
ev->ev_fired = 1; /* no more event expected */
}
@@ -867,20 +879,20 @@ srpc_do_bulk(struct srpc_server_rpc *rpc)
int rc;
int opt;
- LASSERT(bk != NULL);
+ LASSERT(bk);
opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
opt |= LNET_MD_KIOV;
ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
+ ev->ev_data = rpc;
+ ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
&bk->bk_iovs[0], bk->bk_niov, opt,
rpc->srpc_peer, rpc->srpc_self,
&bk->bk_mdh, ev);
- if (rc != 0)
+ if (rc)
ev->ev_fired = 1; /* no more event expected */
return rc;
}
@@ -890,33 +902,35 @@ static void
srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
- struct srpc_service *sv = scd->scd_svc;
+ struct srpc_service *sv = scd->scd_svc;
srpc_buffer_t *buffer;
- LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
+ LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
rpc->srpc_status = status;
- CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
- "Server RPC %p done: service %s, peer %s, status %s:%d\n",
- rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
- swi_state2str(rpc->srpc_wi.swi_state), status);
+ CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
+ "Server RPC %p done: service %s, peer %s, status %s:%d\n",
+ rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
+ swi_state2str(rpc->srpc_wi.swi_state), status);
- if (status != 0) {
+ if (status) {
spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_dropped++;
spin_unlock(&srpc_data.rpc_glock);
}
- if (rpc->srpc_done != NULL)
+ if (rpc->srpc_done)
(*rpc->srpc_done) (rpc);
- LASSERT(rpc->srpc_bulk == NULL);
+ LASSERT(!rpc->srpc_bulk);
spin_lock(&scd->scd_lock);
- if (rpc->srpc_reqstbuf != NULL) {
- /* NB might drop sv_lock in srpc_service_recycle_buffer, but
- * sv won't go away for scd_rpc_active must not be empty */
+ if (rpc->srpc_reqstbuf) {
+ /*
+ * NB might drop sv_lock in srpc_service_recycle_buffer, but
+ * sv won't go away for scd_rpc_active must not be empty
+ */
srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
rpc->srpc_reqstbuf = NULL;
}
@@ -934,7 +948,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
+ srpc_buffer_t, buf_list);
list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
@@ -945,7 +959,6 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
}
spin_unlock(&scd->scd_lock);
- return;
}
/* handles an incoming RPC */
@@ -965,7 +978,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
if (sv->sv_shuttingdown || rpc->srpc_aborted) {
spin_unlock(&scd->scd_lock);
- if (rpc->srpc_bulk != NULL)
+ if (rpc->srpc_bulk)
LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
LNetMDUnlink(rpc->srpc_replymdh);
@@ -988,7 +1001,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
msg = &rpc->srpc_reqstbuf->buf_msg;
reply = &rpc->srpc_replymsg.msg_body.reply;
- if (msg->msg_magic == 0) {
+ if (!msg->msg_magic) {
/* moaned already in srpc_lnet_ev_handler */
srpc_server_rpc_done(rpc, EBADMSG);
return 1;
@@ -1004,8 +1017,8 @@ srpc_handle_rpc(swi_workitem_t *wi)
} else {
reply->status = 0;
rc = (*sv->sv_handler)(rpc);
- LASSERT(reply->status == 0 || !rpc->srpc_bulk);
- if (rc != 0) {
+ LASSERT(!reply->status || !rpc->srpc_bulk);
+ if (rc) {
srpc_server_rpc_done(rpc, rc);
return 1;
}
@@ -1013,9 +1026,9 @@ srpc_handle_rpc(swi_workitem_t *wi)
wi->swi_state = SWI_STATE_BULK_STARTED;
- if (rpc->srpc_bulk != NULL) {
+ if (rpc->srpc_bulk) {
rc = srpc_do_bulk(rpc);
- if (rc == 0)
+ if (!rc)
return 0; /* wait for bulk */
LASSERT(ev->ev_fired);
@@ -1023,15 +1036,15 @@ srpc_handle_rpc(swi_workitem_t *wi)
}
}
case SWI_STATE_BULK_STARTED:
- LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
+ LASSERT(!rpc->srpc_bulk || ev->ev_fired);
- if (rpc->srpc_bulk != NULL) {
+ if (rpc->srpc_bulk) {
rc = ev->ev_status;
- if (sv->sv_bulk_ready != NULL)
+ if (sv->sv_bulk_ready)
rc = (*sv->sv_bulk_ready) (rpc, rc);
- if (rc != 0) {
+ if (rc) {
srpc_server_rpc_done(rpc, rc);
return 1;
}
@@ -1039,7 +1052,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
rc = srpc_send_reply(rpc);
- if (rc == 0)
+ if (!rc)
return 0; /* wait for reply */
srpc_server_rpc_done(rpc, rc);
return 1;
@@ -1067,8 +1080,8 @@ srpc_client_rpc_expired(void *data)
srpc_client_rpc_t *rpc = data;
CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- rpc->crpc_timeout);
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ rpc->crpc_timeout);
spin_lock(&rpc->crpc_lock);
@@ -1082,32 +1095,32 @@ srpc_client_rpc_expired(void *data)
spin_unlock(&srpc_data.rpc_glock);
}
-inline void
+static void
srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
{
- stt_timer_t *timer = &rpc->crpc_timer;
+ struct stt_timer *timer = &rpc->crpc_timer;
- if (rpc->crpc_timeout == 0)
+ if (!rpc->crpc_timeout)
return;
INIT_LIST_HEAD(&timer->stt_list);
- timer->stt_data = rpc;
- timer->stt_func = srpc_client_rpc_expired;
+ timer->stt_data = rpc;
+ timer->stt_func = srpc_client_rpc_expired;
timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
stt_add_timer(timer);
- return;
}
/*
* Called with rpc->crpc_lock held.
*
* Upon exit the RPC expiry timer is not queued and the handler is not
- * running on any CPU. */
+ * running on any CPU.
+ */
static void
srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
{
/* timer not planted or already exploded */
- if (rpc->crpc_timeout == 0)
+ if (!rpc->crpc_timeout)
return;
/* timer successfully defused */
@@ -1115,7 +1128,7 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
return;
/* timer detonated, wait for it to explode */
- while (rpc->crpc_timeout != 0) {
+ while (rpc->crpc_timeout) {
spin_unlock(&rpc->crpc_lock);
schedule();
@@ -1129,20 +1142,20 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
{
swi_workitem_t *wi = &rpc->crpc_wi;
- LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
+ LASSERT(status || wi->swi_state == SWI_STATE_DONE);
spin_lock(&rpc->crpc_lock);
rpc->crpc_closed = 1;
- if (rpc->crpc_status == 0)
+ if (!rpc->crpc_status)
rpc->crpc_status = status;
srpc_del_client_rpc_timer(rpc);
- CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
- "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
+ CDEBUG_LIMIT(!status ? D_NET : D_NETERROR,
+ "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
/*
* No one can schedule me now since:
@@ -1158,7 +1171,6 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
spin_unlock(&rpc->crpc_lock);
(*rpc->crpc_done)(rpc);
- return;
}
/* sends an outgoing RPC */
@@ -1170,11 +1182,11 @@ srpc_send_rpc(swi_workitem_t *wi)
srpc_msg_t *reply;
int do_bulk;
- LASSERT(wi != NULL);
+ LASSERT(wi);
rpc = wi->swi_workitem.wi_data;
- LASSERT(rpc != NULL);
+ LASSERT(rpc);
LASSERT(wi == &rpc->crpc_wi);
reply = &rpc->crpc_replymsg;
@@ -1196,13 +1208,13 @@ srpc_send_rpc(swi_workitem_t *wi)
LASSERT(!srpc_event_pending(rpc));
rc = srpc_prepare_reply(rpc);
- if (rc != 0) {
+ if (rc) {
srpc_client_rpc_done(rpc, rc);
return 1;
}
rc = srpc_prepare_bulk(rpc);
- if (rc != 0)
+ if (rc)
break;
wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
@@ -1210,14 +1222,16 @@ srpc_send_rpc(swi_workitem_t *wi)
break;
case SWI_STATE_REQUEST_SUBMITTED:
- /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
+ /*
+ * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
* order; however, they're processed in a strict order:
- * rqt, rpy, and bulk. */
+ * rqt, rpy, and bulk.
+ */
if (!rpc->crpc_reqstev.ev_fired)
break;
rc = rpc->crpc_reqstev.ev_status;
- if (rc != 0)
+ if (rc)
break;
wi->swi_state = SWI_STATE_REQUEST_SENT;
@@ -1229,7 +1243,7 @@ srpc_send_rpc(swi_workitem_t *wi)
break;
rc = rpc->crpc_replyev.ev_status;
- if (rc != 0)
+ if (rc)
break;
srpc_unpack_msg_hdr(reply);
@@ -1244,7 +1258,7 @@ srpc_send_rpc(swi_workitem_t *wi)
break;
}
- if (do_bulk && reply->msg_body.reply.status != 0) {
+ if (do_bulk && reply->msg_body.reply.status) {
CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
reply->msg_body.reply.status,
libcfs_id2str(rpc->crpc_dest));
@@ -1259,12 +1273,14 @@ srpc_send_rpc(swi_workitem_t *wi)
rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
- /* Bulk buffer was unlinked due to remote error. Clear error
+ /*
+ * Bulk buffer was unlinked due to remote error. Clear error
* since reply buffer still contains valid data.
* NB rpc->crpc_done shouldn't look into bulk data in case of
- * remote error. */
+ * remote error.
+ */
if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
- rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
+ !rpc->crpc_status && reply->msg_body.reply.status)
rc = 0;
wi->swi_state = SWI_STATE_DONE;
@@ -1272,7 +1288,7 @@ srpc_send_rpc(swi_workitem_t *wi)
return 1;
}
- if (rc != 0) {
+ if (rc) {
spin_lock(&rpc->crpc_lock);
srpc_abort_rpc(rpc, rc);
spin_unlock(&rpc->crpc_lock);
@@ -1294,15 +1310,15 @@ abort:
srpc_client_rpc_t *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
- int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
{
srpc_client_rpc_t *rpc;
LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
crpc_bulk.bk_iovs[nbulkiov]));
- if (rpc == NULL)
+ if (!rpc)
return NULL;
srpc_init_client_rpc(rpc, peer, service, nbulkiov,
@@ -1314,21 +1330,19 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
void
srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
{
- LASSERT(why != 0);
+ LASSERT(why);
if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
+ rpc->crpc_closed) /* callback imminent */
return;
- CDEBUG(D_NET,
- "Aborting RPC: service %d, peer %s, state %s, why %d\n",
- rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
- swi_state2str(rpc->crpc_wi.swi_state), why);
+ CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
+ rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
+ swi_state2str(rpc->crpc_wi.swi_state), why);
rpc->crpc_aborted = 1;
- rpc->crpc_status = why;
+ rpc->crpc_status = why;
swi_schedule_workitem(&rpc->crpc_wi);
- return;
}
/* called with rpc->crpc_lock held */
@@ -1339,12 +1353,11 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
- libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
- rpc->crpc_timeout);
+ libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
+ rpc->crpc_timeout);
srpc_add_client_rpc_timer(rpc);
swi_schedule_workitem(&rpc->crpc_wi);
- return;
}
int
@@ -1358,15 +1371,17 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
__u64 rpyid;
int rc;
- LASSERT(buffer != NULL);
+ LASSERT(buffer);
rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
spin_lock(&scd->scd_lock);
if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
- /* Repost buffer before replying since test client
- * might send me another RPC once it gets the reply */
- if (srpc_service_post_buffer(scd, buffer) != 0)
+ /*
+ * Repost buffer before replying since test client
+ * might send me another RPC once it gets the reply
+ */
+ if (srpc_service_post_buffer(scd, buffer))
CWARN("Failed to repost %s buffer\n", sv->sv_name);
rpc->srpc_reqstbuf = NULL;
}
@@ -1374,18 +1389,18 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
spin_unlock(&scd->scd_lock);
ev->ev_fired = 0;
- ev->ev_data = rpc;
- ev->ev_type = SRPC_REPLY_SENT;
+ ev->ev_data = rpc;
+ ev->ev_type = SRPC_REPLY_SENT;
- msg->msg_magic = SRPC_MSG_MAGIC;
+ msg->msg_magic = SRPC_MSG_MAGIC;
msg->msg_version = SRPC_MSG_VERSION;
- msg->msg_type = srpc_service2reply(sv->sv_id);
+ msg->msg_type = srpc_service2reply(sv->sv_id);
rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
sizeof(*msg), LNET_MD_OP_PUT,
rpc->srpc_peer, rpc->srpc_self,
&rpc->srpc_replymdh, ev);
- if (rc != 0)
+ if (rc)
ev->ev_fired = 1; /* no more event expected */
return rc;
}
@@ -1405,10 +1420,17 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
LASSERT(!in_interrupt());
- if (ev->status != 0) {
+ if (ev->status) {
+ __u32 errors;
+
spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.errors++;
+ if (ev->status != -ECANCELED) /* cancellation is not error */
+ srpc_data.rpc_counters.errors++;
+ errors = srpc_data.rpc_counters.errors;
spin_unlock(&srpc_data.rpc_glock);
+
+ CNETERR("LNet event status %d type %d, RPC errors %u\n",
+ ev->status, ev->type, errors);
}
rpcev->ev_lnet = ev->type;
@@ -1419,7 +1441,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
LBUG();
case SRPC_REQUEST_SENT:
- if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+ if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
spin_lock(&srpc_data.rpc_glock);
srpc_data.rpc_counters.rpcs_sent++;
spin_unlock(&srpc_data.rpc_glock);
@@ -1441,8 +1463,8 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
spin_lock(&crpc->crpc_lock);
- LASSERT(rpcev->ev_fired == 0);
- rpcev->ev_fired = 1;
+ LASSERT(!rpcev->ev_fired);
+ rpcev->ev_fired = 1;
rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
-EINTR : ev->status;
swi_schedule_workitem(&crpc->crpc_wi);
@@ -1460,9 +1482,9 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
LASSERT(ev->unlinked);
LASSERT(ev->type == LNET_EVENT_PUT ||
- ev->type == LNET_EVENT_UNLINK);
+ ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->type != LNET_EVENT_UNLINK ||
- sv->sv_shuttingdown);
+ sv->sv_shuttingdown);
buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
buffer->buf_peer = ev->initiator;
@@ -1472,21 +1494,23 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
scd->scd_buf_nposted--;
if (sv->sv_shuttingdown) {
- /* Leave buffer on scd->scd_buf_nposted since
- * srpc_finish_service needs to traverse it. */
+ /*
+ * Leave buffer on scd->scd_buf_nposted since
+ * srpc_finish_service needs to traverse it.
+ */
spin_unlock(&scd->scd_lock);
break;
}
- if (scd->scd_buf_err_stamp != 0 &&
+ if (scd->scd_buf_err_stamp &&
scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
/* re-enable adding buffer */
scd->scd_buf_err_stamp = 0;
scd->scd_buf_err = 0;
}
- if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
- scd->scd_buf_adjust == 0 &&
+ if (!scd->scd_buf_err && /* adding buffer is enabled */
+ !scd->scd_buf_adjust &&
scd->scd_buf_nposted < scd->scd_buf_low) {
scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
SFW_TEST_WI_MIN);
@@ -1497,7 +1521,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
msg = &buffer->buf_msg;
type = srpc_service2request(sv->sv_id);
- if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
+ if (ev->status || ev->mlength != sizeof(*msg) ||
(msg->msg_type != type &&
msg->msg_type != __swab32(type)) ||
(msg->msg_magic != SRPC_MSG_MAGIC &&
@@ -1507,25 +1531,27 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
ev->status, ev->mlength,
msg->msg_type, msg->msg_magic);
- /* NB can't call srpc_service_recycle_buffer here since
+ /*
+ * NB can't call srpc_service_recycle_buffer here since
* it may call LNetM[DE]Attach. The invalid magic tells
- * srpc_handle_rpc to drop this RPC */
+ * srpc_handle_rpc to drop this RPC
+ */
msg->msg_magic = 0;
}
if (!list_empty(&scd->scd_rpc_free)) {
srpc = list_entry(scd->scd_rpc_free.next,
- struct srpc_server_rpc,
- srpc_list);
+ struct srpc_server_rpc,
+ srpc_list);
list_del(&srpc->srpc_list);
srpc_init_server_rpc(srpc, scd, buffer);
list_add_tail(&srpc->srpc_list,
- &scd->scd_rpc_active);
+ &scd->scd_rpc_active);
swi_schedule_workitem(&srpc->srpc_wi);
} else {
list_add_tail(&buffer->buf_list,
- &scd->scd_buf_blocked);
+ &scd->scd_buf_blocked);
}
spin_unlock(&scd->scd_lock);
@@ -1537,14 +1563,14 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
case SRPC_BULK_GET_RPLD:
LASSERT(ev->type == LNET_EVENT_SEND ||
- ev->type == LNET_EVENT_REPLY ||
- ev->type == LNET_EVENT_UNLINK);
+ ev->type == LNET_EVENT_REPLY ||
+ ev->type == LNET_EVENT_UNLINK);
if (!ev->unlinked)
break; /* wait for final event */
case SRPC_BULK_PUT_SENT:
- if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
+ if (!ev->status && ev->type != LNET_EVENT_UNLINK) {
spin_lock(&srpc_data.rpc_glock);
if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
@@ -1556,13 +1582,13 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
}
case SRPC_REPLY_SENT:
srpc = rpcev->ev_data;
- scd = srpc->srpc_scd;
+ scd = srpc->srpc_scd;
LASSERT(rpcev == &srpc->srpc_ev);
spin_lock(&scd->scd_lock);
- rpcev->ev_fired = 1;
+ rpcev->ev_fired = 1;
rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
-EINTR : ev->status;
swi_schedule_workitem(&srpc->srpc_wi);
@@ -1587,7 +1613,7 @@ srpc_startup(void)
srpc_data.rpc_state = SRPC_STATE_NONE;
- rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
+ rc = LNetNIInit(LNET_PID_LUSTRE);
if (rc < 0) {
CERROR("LNetNIInit() has failed: %d\n", rc);
return rc;
@@ -1597,22 +1623,22 @@ srpc_startup(void)
LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
- if (rc != 0) {
+ if (rc) {
CERROR("LNetEQAlloc() has failed: %d\n", rc);
goto bail;
}
rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
- LASSERT(rc == 0);
+ LASSERT(!rc);
rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(rc == 0);
+ LASSERT(!rc);
srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
rc = stt_startup();
bail:
- if (rc != 0)
+ if (rc)
srpc_shutdown();
else
srpc_data.rpc_state = SRPC_STATE_RUNNING;
@@ -1639,9 +1665,8 @@ srpc_shutdown(void)
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
srpc_service_t *sv = srpc_data.rpc_services[i];
- LASSERTF(sv == NULL,
- "service not empty: id %d, name %s\n",
- i, sv->sv_name);
+ LASSERTF(!sv, "service not empty: id %d, name %s\n",
+ i, sv->sv_name);
}
spin_unlock(&srpc_data.rpc_glock);
@@ -1651,13 +1676,11 @@ srpc_shutdown(void)
case SRPC_STATE_EQ_INIT:
rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
- LASSERT(rc == 0);
+ LASSERT(!rc);
rc = LNetEQFree(srpc_data.rpc_lnet_eq);
- LASSERT(rc == 0); /* the EQ should have no user by now */
+ LASSERT(!rc); /* the EQ should have no user by now */
case SRPC_STATE_NI_INIT:
LNetNIFini();
}
-
- return;
}
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 6b4a32a90857..a79c315f2ceb 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -45,24 +45,24 @@
* XXX: *REPLY == *REQST + 1
*/
typedef enum {
- SRPC_MSG_MKSN_REQST = 0,
- SRPC_MSG_MKSN_REPLY = 1,
- SRPC_MSG_RMSN_REQST = 2,
- SRPC_MSG_RMSN_REPLY = 3,
- SRPC_MSG_BATCH_REQST = 4,
- SRPC_MSG_BATCH_REPLY = 5,
- SRPC_MSG_STAT_REQST = 6,
- SRPC_MSG_STAT_REPLY = 7,
- SRPC_MSG_TEST_REQST = 8,
- SRPC_MSG_TEST_REPLY = 9,
- SRPC_MSG_DEBUG_REQST = 10,
- SRPC_MSG_DEBUG_REPLY = 11,
- SRPC_MSG_BRW_REQST = 12,
- SRPC_MSG_BRW_REPLY = 13,
- SRPC_MSG_PING_REQST = 14,
- SRPC_MSG_PING_REPLY = 15,
- SRPC_MSG_JOIN_REQST = 16,
- SRPC_MSG_JOIN_REPLY = 17,
+ SRPC_MSG_MKSN_REQST = 0,
+ SRPC_MSG_MKSN_REPLY = 1,
+ SRPC_MSG_RMSN_REQST = 2,
+ SRPC_MSG_RMSN_REPLY = 3,
+ SRPC_MSG_BATCH_REQST = 4,
+ SRPC_MSG_BATCH_REPLY = 5,
+ SRPC_MSG_STAT_REQST = 6,
+ SRPC_MSG_STAT_REPLY = 7,
+ SRPC_MSG_TEST_REQST = 8,
+ SRPC_MSG_TEST_REPLY = 9,
+ SRPC_MSG_DEBUG_REQST = 10,
+ SRPC_MSG_DEBUG_REPLY = 11,
+ SRPC_MSG_BRW_REQST = 12,
+ SRPC_MSG_BRW_REPLY = 13,
+ SRPC_MSG_PING_REQST = 14,
+ SRPC_MSG_PING_REPLY = 15,
+ SRPC_MSG_JOIN_REQST = 16,
+ SRPC_MSG_JOIN_REPLY = 17,
} srpc_msg_type_t;
/* CAVEAT EMPTOR:
@@ -78,127 +78,127 @@ typedef struct {
} WIRE_ATTR srpc_generic_reqst_t;
typedef struct {
- __u32 status;
- lst_sid_t sid;
+ __u32 status;
+ lst_sid_t sid;
} WIRE_ATTR srpc_generic_reply_t;
/* FRAMEWORK RPCs */
typedef struct {
- __u64 mksn_rpyid; /* reply buffer matchbits */
- lst_sid_t mksn_sid; /* session id */
- __u32 mksn_force; /* use brute force */
+ __u64 mksn_rpyid; /* reply buffer matchbits */
+ lst_sid_t mksn_sid; /* session id */
+ __u32 mksn_force; /* use brute force */
char mksn_name[LST_NAME_SIZE];
} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
typedef struct {
- __u32 mksn_status; /* session status */
- lst_sid_t mksn_sid; /* session id */
- __u32 mksn_timeout; /* session timeout */
- char mksn_name[LST_NAME_SIZE];
+ __u32 mksn_status; /* session status */
+ lst_sid_t mksn_sid; /* session id */
+ __u32 mksn_timeout; /* session timeout */
+ char mksn_name[LST_NAME_SIZE];
} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
typedef struct {
- __u64 rmsn_rpyid; /* reply buffer matchbits */
- lst_sid_t rmsn_sid; /* session id */
+ __u64 rmsn_rpyid; /* reply buffer matchbits */
+ lst_sid_t rmsn_sid; /* session id */
} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
typedef struct {
- __u32 rmsn_status;
- lst_sid_t rmsn_sid; /* session id */
+ __u32 rmsn_status;
+ lst_sid_t rmsn_sid; /* session id */
} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
typedef struct {
- __u64 join_rpyid; /* reply buffer matchbits */
- lst_sid_t join_sid; /* session id to join */
- char join_group[LST_NAME_SIZE]; /* group name */
+ __u64 join_rpyid; /* reply buffer matchbits */
+ lst_sid_t join_sid; /* session id to join */
+ char join_group[LST_NAME_SIZE]; /* group name */
} WIRE_ATTR srpc_join_reqst_t;
typedef struct {
- __u32 join_status; /* returned status */
- lst_sid_t join_sid; /* session id */
- __u32 join_timeout; /* # seconds' inactivity to
+ __u32 join_status; /* returned status */
+ lst_sid_t join_sid; /* session id */
+ __u32 join_timeout; /* # seconds' inactivity to
* expire */
- char join_session[LST_NAME_SIZE]; /* session name */
+ char join_session[LST_NAME_SIZE]; /* session name */
} WIRE_ATTR srpc_join_reply_t;
typedef struct {
- __u64 dbg_rpyid; /* reply buffer matchbits */
- lst_sid_t dbg_sid; /* session id */
- __u32 dbg_flags; /* bitmap of debug */
+ __u64 dbg_rpyid; /* reply buffer matchbits */
+ lst_sid_t dbg_sid; /* session id */
+ __u32 dbg_flags; /* bitmap of debug */
} WIRE_ATTR srpc_debug_reqst_t;
typedef struct {
- __u32 dbg_status; /* returned code */
- lst_sid_t dbg_sid; /* session id */
- __u32 dbg_timeout; /* session timeout */
- __u32 dbg_nbatch; /* # of batches in the node */
- char dbg_name[LST_NAME_SIZE]; /* session name */
+ __u32 dbg_status; /* returned code */
+ lst_sid_t dbg_sid; /* session id */
+ __u32 dbg_timeout; /* session timeout */
+ __u32 dbg_nbatch; /* # of batches in the node */
+ char dbg_name[LST_NAME_SIZE]; /* session name */
} WIRE_ATTR srpc_debug_reply_t;
-#define SRPC_BATCH_OPC_RUN 1
-#define SRPC_BATCH_OPC_STOP 2
-#define SRPC_BATCH_OPC_QUERY 3
+#define SRPC_BATCH_OPC_RUN 1
+#define SRPC_BATCH_OPC_STOP 2
+#define SRPC_BATCH_OPC_QUERY 3
typedef struct {
- __u64 bar_rpyid; /* reply buffer matchbits */
- lst_sid_t bar_sid; /* session id */
- lst_bid_t bar_bid; /* batch id */
- __u32 bar_opc; /* create/start/stop batch */
- __u32 bar_testidx; /* index of test */
- __u32 bar_arg; /* parameters */
+ __u64 bar_rpyid; /* reply buffer matchbits */
+ lst_sid_t bar_sid; /* session id */
+ lst_bid_t bar_bid; /* batch id */
+ __u32 bar_opc; /* create/start/stop batch */
+ __u32 bar_testidx; /* index of test */
+ __u32 bar_arg; /* parameters */
} WIRE_ATTR srpc_batch_reqst_t;
typedef struct {
- __u32 bar_status; /* status of request */
- lst_sid_t bar_sid; /* session id */
- __u32 bar_active; /* # of active tests in batch/test */
- __u32 bar_time; /* remained time */
+ __u32 bar_status; /* status of request */
+ lst_sid_t bar_sid; /* session id */
+ __u32 bar_active; /* # of active tests in batch/test */
+ __u32 bar_time; /* remained time */
} WIRE_ATTR srpc_batch_reply_t;
typedef struct {
- __u64 str_rpyid; /* reply buffer matchbits */
- lst_sid_t str_sid; /* session id */
- __u32 str_type; /* type of stat */
+ __u64 str_rpyid; /* reply buffer matchbits */
+ lst_sid_t str_sid; /* session id */
+ __u32 str_type; /* type of stat */
} WIRE_ATTR srpc_stat_reqst_t;
typedef struct {
- __u32 str_status;
- lst_sid_t str_sid;
- sfw_counters_t str_fw;
+ __u32 str_status;
+ lst_sid_t str_sid;
+ sfw_counters_t str_fw;
srpc_counters_t str_rpc;
lnet_counters_t str_lnet;
} WIRE_ATTR srpc_stat_reply_t;
typedef struct {
- __u32 blk_opc; /* bulk operation code */
- __u32 blk_npg; /* # of pages */
- __u32 blk_flags; /* reserved flags */
+ __u32 blk_opc; /* bulk operation code */
+ __u32 blk_npg; /* # of pages */
+ __u32 blk_flags; /* reserved flags */
} WIRE_ATTR test_bulk_req_t;
typedef struct {
- __u16 blk_opc; /* bulk operation code */
- __u16 blk_flags; /* data check flags */
- __u32 blk_len; /* data length */
- __u32 blk_offset; /* reserved: offset */
+ __u16 blk_opc; /* bulk operation code */
+ __u16 blk_flags; /* data check flags */
+ __u32 blk_len; /* data length */
+ __u32 blk_offset; /* reserved: offset */
} WIRE_ATTR test_bulk_req_v1_t;
typedef struct {
- __u32 png_size; /* size of ping message */
- __u32 png_flags; /* reserved flags */
+ __u32 png_size; /* size of ping message */
+ __u32 png_flags; /* reserved flags */
} WIRE_ATTR test_ping_req_t;
typedef struct {
- __u64 tsr_rpyid; /* reply buffer matchbits */
- __u64 tsr_bulkid; /* bulk buffer matchbits */
+ __u64 tsr_rpyid; /* reply buffer matchbits */
+ __u64 tsr_bulkid; /* bulk buffer matchbits */
lst_sid_t tsr_sid; /* session id */
lst_bid_t tsr_bid; /* batch id */
- __u32 tsr_service; /* test type: bulk|ping|... */
- __u32 tsr_loop; /* test client loop count or
+ __u32 tsr_service; /* test type: bulk|ping|... */
+ __u32 tsr_loop; /* test client loop count or
* # server buffers needed */
- __u32 tsr_concur; /* concurrency of test */
- __u8 tsr_is_client; /* is test client or not */
+ __u32 tsr_concur; /* concurrency of test */
+ __u8 tsr_is_client; /* is test client or not */
__u8 tsr_stop_onerr; /* stop on error */
- __u32 tsr_ndest; /* # of dest nodes */
+ __u32 tsr_ndest; /* # of dest nodes */
union {
test_ping_req_t ping;
@@ -208,7 +208,7 @@ typedef struct {
} WIRE_ATTR srpc_test_reqst_t;
typedef struct {
- __u32 tsr_status; /* returned code */
+ __u32 tsr_status; /* returned code */
lst_sid_t tsr_sid;
} WIRE_ATTR srpc_test_reply_t;
@@ -228,19 +228,19 @@ typedef struct {
} WIRE_ATTR srpc_ping_reply_t;
typedef struct {
- __u64 brw_rpyid; /* reply buffer matchbits */
- __u64 brw_bulkid; /* bulk buffer matchbits */
- __u32 brw_rw; /* read or write */
- __u32 brw_len; /* bulk data len */
- __u32 brw_flags; /* bulk data patterns */
+ __u64 brw_rpyid; /* reply buffer matchbits */
+ __u64 brw_bulkid; /* bulk buffer matchbits */
+ __u32 brw_rw; /* read or write */
+ __u32 brw_len; /* bulk data len */
+ __u32 brw_flags; /* bulk data patterns */
} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
typedef struct {
__u32 brw_status;
} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
-#define SRPC_MSG_MAGIC 0xeeb0f00d
-#define SRPC_MSG_VERSION 1
+#define SRPC_MSG_MAGIC 0xeeb0f00d
+#define SRPC_MSG_VERSION 1
typedef struct srpc_msg {
__u32 msg_magic; /* magic number */
@@ -281,8 +281,10 @@ srpc_unpack_msg_hdr(srpc_msg_t *msg)
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
- /* We do not swap the magic number here as it is needed to
- determine whether the body needs to be swapped. */
+ /*
+ * We do not swap the magic number here as it is needed to
+ * determine whether the body needs to be swapped.
+ */
/* __swab32s(&msg->msg_magic); */
__swab32s(&msg->msg_type);
__swab32s(&msg->msg_version);
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index 870498339538..e689ca1846e1 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -56,14 +56,14 @@
#define MADE_WITHOUT_COMPROMISE
#endif
-#define SWI_STATE_NEWBORN 0
-#define SWI_STATE_REPLY_SUBMITTED 1
-#define SWI_STATE_REPLY_SENT 2
-#define SWI_STATE_REQUEST_SUBMITTED 3
-#define SWI_STATE_REQUEST_SENT 4
-#define SWI_STATE_REPLY_RECEIVED 5
-#define SWI_STATE_BULK_STARTED 6
-#define SWI_STATE_DONE 10
+#define SWI_STATE_NEWBORN 0
+#define SWI_STATE_REPLY_SUBMITTED 1
+#define SWI_STATE_REPLY_SENT 2
+#define SWI_STATE_REQUEST_SUBMITTED 3
+#define SWI_STATE_REQUEST_SENT 4
+#define SWI_STATE_REPLY_RECEIVED 5
+#define SWI_STATE_BULK_STARTED 6
+#define SWI_STATE_DONE 10
/* forward refs */
struct srpc_service;
@@ -74,31 +74,31 @@ struct sfw_test_instance;
/* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
* services, e.g. create/modify session.
*/
-#define SRPC_SERVICE_DEBUG 0
-#define SRPC_SERVICE_MAKE_SESSION 1
-#define SRPC_SERVICE_REMOVE_SESSION 2
-#define SRPC_SERVICE_BATCH 3
-#define SRPC_SERVICE_TEST 4
-#define SRPC_SERVICE_QUERY_STAT 5
-#define SRPC_SERVICE_JOIN 6
-#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
+#define SRPC_SERVICE_DEBUG 0
+#define SRPC_SERVICE_MAKE_SESSION 1
+#define SRPC_SERVICE_REMOVE_SESSION 2
+#define SRPC_SERVICE_BATCH 3
+#define SRPC_SERVICE_TEST 4
+#define SRPC_SERVICE_QUERY_STAT 5
+#define SRPC_SERVICE_JOIN 6
+#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
/* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
-#define SRPC_SERVICE_BRW 11
-#define SRPC_SERVICE_PING 12
-#define SRPC_SERVICE_MAX_ID 12
+#define SRPC_SERVICE_BRW 11
+#define SRPC_SERVICE_PING 12
+#define SRPC_SERVICE_MAX_ID 12
-#define SRPC_REQUEST_PORTAL 50
+#define SRPC_REQUEST_PORTAL 50
/* a lazy portal for framework RPC requests */
-#define SRPC_FRAMEWORK_REQUEST_PORTAL 51
+#define SRPC_FRAMEWORK_REQUEST_PORTAL 51
/* all reply/bulk RDMAs go to this portal */
-#define SRPC_RDMA_PORTAL 52
+#define SRPC_RDMA_PORTAL 52
static inline srpc_msg_type_t
-srpc_service2request (int service)
+srpc_service2request(int service)
{
switch (service) {
default:
- LBUG ();
+ LBUG();
case SRPC_SERVICE_DEBUG:
return SRPC_MSG_DEBUG_REQST;
@@ -129,7 +129,7 @@ srpc_service2request (int service)
}
static inline srpc_msg_type_t
-srpc_service2reply (int service)
+srpc_service2reply(int service)
{
return srpc_service2request(service) + 1;
}
@@ -149,25 +149,25 @@ typedef enum {
typedef struct {
srpc_event_type_t ev_type; /* what's up */
lnet_event_kind_t ev_lnet; /* LNet event type */
- int ev_fired; /* LNet event fired? */
- int ev_status; /* LNet event status */
- void *ev_data; /* owning server/client RPC */
+ int ev_fired; /* LNet event fired? */
+ int ev_status; /* LNet event status */
+ void *ev_data; /* owning server/client RPC */
} srpc_event_t;
typedef struct {
- int bk_len; /* len of bulk data */
+ int bk_len; /* len of bulk data */
lnet_handle_md_t bk_mdh;
- int bk_sink; /* sink/source */
- int bk_niov; /* # iov in bk_iovs */
- lnet_kiov_t bk_iovs[0];
+ int bk_sink; /* sink/source */
+ int bk_niov; /* # iov in bk_iovs */
+ lnet_kiov_t bk_iovs[0];
} srpc_bulk_t; /* bulk descriptor */
/* message buffer descriptor */
typedef struct srpc_buffer {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
+ srpc_msg_t buf_msg;
lnet_handle_md_t buf_mdh;
- lnet_nid_t buf_self;
+ lnet_nid_t buf_self;
lnet_process_id_t buf_peer;
} srpc_buffer_t;
@@ -176,9 +176,9 @@ typedef int (*swi_action_t) (struct swi_workitem *);
typedef struct swi_workitem {
struct cfs_wi_sched *swi_sched;
- cfs_workitem_t swi_workitem;
- swi_action_t swi_action;
- int swi_state;
+ cfs_workitem_t swi_workitem;
+ swi_action_t swi_action;
+ int swi_state;
} swi_workitem_t;
/* server-side state of a RPC */
@@ -186,78 +186,78 @@ struct srpc_server_rpc {
/* chain on srpc_service::*_rpcq */
struct list_head srpc_list;
struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
- lnet_nid_t srpc_self;
+ swi_workitem_t srpc_wi;
+ srpc_event_t srpc_ev; /* bulk/reply event */
+ lnet_nid_t srpc_self;
lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
+ srpc_msg_t srpc_replymsg;
lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
+ srpc_buffer_t *srpc_reqstbuf;
+ srpc_bulk_t *srpc_bulk;
- unsigned int srpc_aborted; /* being given up */
- int srpc_status;
- void (*srpc_done)(struct srpc_server_rpc *);
+ unsigned int srpc_aborted; /* being given up */
+ int srpc_status;
+ void (*srpc_done)(struct srpc_server_rpc *);
};
/* client-side state of a RPC */
typedef struct srpc_client_rpc {
- struct list_head crpc_list; /* chain on user's lists */
- spinlock_t crpc_lock; /* serialize */
- int crpc_service;
- atomic_t crpc_refcount;
- int crpc_timeout; /* # seconds to wait for reply */
- stt_timer_t crpc_timer;
- swi_workitem_t crpc_wi;
+ struct list_head crpc_list; /* chain on user's lists */
+ spinlock_t crpc_lock; /* serialize */
+ int crpc_service;
+ atomic_t crpc_refcount;
+ int crpc_timeout; /* # seconds to wait for reply */
+ struct stt_timer crpc_timer;
+ swi_workitem_t crpc_wi;
lnet_process_id_t crpc_dest;
- void (*crpc_done)(struct srpc_client_rpc *);
- void (*crpc_fini)(struct srpc_client_rpc *);
- int crpc_status; /* completion status */
- void *crpc_priv; /* caller data */
+ void (*crpc_done)(struct srpc_client_rpc *);
+ void (*crpc_fini)(struct srpc_client_rpc *);
+ int crpc_status; /* completion status */
+ void *crpc_priv; /* caller data */
/* state flags */
- unsigned int crpc_aborted:1; /* being given up */
- unsigned int crpc_closed:1; /* completed */
+ unsigned int crpc_aborted:1; /* being given up */
+ unsigned int crpc_closed:1; /* completed */
/* RPC events */
- srpc_event_t crpc_bulkev; /* bulk event */
- srpc_event_t crpc_reqstev; /* request event */
- srpc_event_t crpc_replyev; /* reply event */
+ srpc_event_t crpc_bulkev; /* bulk event */
+ srpc_event_t crpc_reqstev; /* request event */
+ srpc_event_t crpc_replyev; /* reply event */
/* bulk, request(reqst), and reply exchanged on wire */
- srpc_msg_t crpc_reqstmsg;
- srpc_msg_t crpc_replymsg;
+ srpc_msg_t crpc_reqstmsg;
+ srpc_msg_t crpc_replymsg;
lnet_handle_md_t crpc_reqstmdh;
lnet_handle_md_t crpc_replymdh;
- srpc_bulk_t crpc_bulk;
+ srpc_bulk_t crpc_bulk;
} srpc_client_rpc_t;
-#define srpc_client_rpc_size(rpc) \
+#define srpc_client_rpc_size(rpc) \
offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
-#define srpc_client_rpc_addref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- atomic_inc(&(rpc)->crpc_refcount); \
+#define srpc_client_rpc_addref(rpc) \
+do { \
+ CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
+ (rpc), libcfs_id2str((rpc)->crpc_dest), \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ atomic_inc(&(rpc)->crpc_refcount); \
} while (0)
-#define srpc_client_rpc_decref(rpc) \
-do { \
- CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
- (rpc), libcfs_id2str((rpc)->crpc_dest), \
- atomic_read(&(rpc)->crpc_refcount)); \
- LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
- if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
- srpc_destroy_client_rpc(rpc); \
+#define srpc_client_rpc_decref(rpc) \
+do { \
+ CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
+ (rpc), libcfs_id2str((rpc)->crpc_dest), \
+ atomic_read(&(rpc)->crpc_refcount)); \
+ LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
+ if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
+ srpc_destroy_client_rpc(rpc); \
} while (0)
-#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \
- (rpc)->crpc_reqstev.ev_fired == 0 || \
- (rpc)->crpc_replyev.ev_fired == 0)
+#define srpc_event_pending(rpc) (!(rpc)->crpc_bulkev.ev_fired || \
+ !(rpc)->crpc_reqstev.ev_fired || \
+ !(rpc)->crpc_replyev.ev_fired)
/* CPU partition data of srpc service */
struct srpc_service_cd {
@@ -268,9 +268,9 @@ struct srpc_service_cd {
/** event buffer */
srpc_event_t scd_ev;
/** free RPC descriptors */
- struct list_head scd_rpc_free;
+ struct list_head scd_rpc_free;
/** in-flight RPCs */
- struct list_head scd_rpc_active;
+ struct list_head scd_rpc_active;
/** workitem for posting buffer */
swi_workitem_t scd_buf_wi;
/** CPT id */
@@ -278,7 +278,7 @@ struct srpc_service_cd {
/** error code for scd_buf_wi */
int scd_buf_err;
/** timestamp for scd_buf_err */
- time64_t scd_buf_err_stamp;
+ time64_t scd_buf_err_stamp;
/** total # request buffers */
int scd_buf_total;
/** # posted request buffers */
@@ -290,16 +290,16 @@ struct srpc_service_cd {
/** increase/decrease some buffers */
int scd_buf_adjust;
/** posted message buffers */
- struct list_head scd_buf_posted;
+ struct list_head scd_buf_posted;
/** blocked for RPC descriptor */
- struct list_head scd_buf_blocked;
+ struct list_head scd_buf_blocked;
};
/* number of server workitems (mini-thread) for testing service */
#define SFW_TEST_WI_MIN 256
#define SFW_TEST_WI_MAX 2048
/* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions */
+ * of peers between partitions */
#define SFW_TEST_WI_EXTRA 64
/* number of server workitems (mini-thread) for framework service */
@@ -324,29 +324,29 @@ typedef struct srpc_service {
typedef struct {
struct list_head sn_list; /* chain on fw_zombie_sessions */
- lst_sid_t sn_id; /* unique identifier */
- unsigned int sn_timeout; /* # seconds' inactivity to expire */
- int sn_timer_active;
- unsigned int sn_features;
- stt_timer_t sn_timer;
+ lst_sid_t sn_id; /* unique identifier */
+ unsigned int sn_timeout; /* # seconds' inactivity to expire */
+ int sn_timer_active;
+ unsigned int sn_features;
+ struct stt_timer sn_timer;
struct list_head sn_batches; /* list of batches */
- char sn_name[LST_NAME_SIZE];
- atomic_t sn_refcount;
- atomic_t sn_brw_errors;
- atomic_t sn_ping_errors;
- unsigned long sn_started;
+ char sn_name[LST_NAME_SIZE];
+ atomic_t sn_refcount;
+ atomic_t sn_brw_errors;
+ atomic_t sn_ping_errors;
+ unsigned long sn_started;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
typedef struct {
- struct list_head bat_list; /* chain on sn_batches */
- lst_bid_t bat_id; /* batch id */
- int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
- atomic_t bat_nactive; /* # of active tests */
- struct list_head bat_tests; /* test instances */
+ struct list_head bat_list; /* chain on sn_batches */
+ lst_bid_t bat_id; /* batch id */
+ int bat_error; /* error code of batch */
+ sfw_session_t *bat_session; /* batch's session */
+ atomic_t bat_nactive; /* # of active tests */
+ struct list_head bat_tests; /* test instances */
} sfw_batch_t;
typedef struct {
@@ -356,32 +356,32 @@ typedef struct {
* client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
- srpc_client_rpc_t **rpc); /* prep a tests rpc */
+ srpc_client_rpc_t **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- srpc_client_rpc_t *rpc); /* done a test rpc */
+ srpc_client_rpc_t *rpc); /* done a test rpc */
} sfw_test_client_ops_t;
typedef struct sfw_test_instance {
- struct list_head tsi_list; /* chain on batch */
- int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operation
+ struct list_head tsi_list; /* chain on batch */
+ int tsi_service; /* test type */
+ sfw_batch_t *tsi_batch; /* batch */
+ sfw_test_client_ops_t *tsi_ops; /* test client operation
*/
/* public parameter for all test units */
- unsigned int tsi_is_client:1; /* is test client */
- unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */
- int tsi_concur; /* concurrency */
- int tsi_loop; /* loop count */
+ unsigned int tsi_is_client:1; /* is test client */
+ unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */
+ int tsi_concur; /* concurrency */
+ int tsi_loop; /* loop count */
/* status of test instance */
- spinlock_t tsi_lock; /* serialize */
- unsigned int tsi_stopping:1; /* test is stopping */
- atomic_t tsi_nactive; /* # of active test
+ spinlock_t tsi_lock; /* serialize */
+ unsigned int tsi_stopping:1; /* test is stopping */
+ atomic_t tsi_nactive; /* # of active test
* unit */
- struct list_head tsi_units; /* test units */
- struct list_head tsi_free_rpcs; /* free rpcs */
- struct list_head tsi_active_rpcs; /* active rpcs */
+ struct list_head tsi_units; /* test units */
+ struct list_head tsi_free_rpcs; /* free rpcs */
+ struct list_head tsi_active_rpcs; /* active rpcs */
union {
test_ping_req_t ping; /* ping parameter */
@@ -390,32 +390,32 @@ typedef struct sfw_test_instance {
} tsi_u;
} sfw_test_instance_t;
-/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
- * the end of pages are not used */
-#define SFW_MAX_CONCUR LST_MAX_CONCUR
-#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
-#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
+/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used */
+#define SFW_MAX_CONCUR LST_MAX_CONCUR
+#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
+#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
typedef struct sfw_test_unit {
- struct list_head tsu_list; /* chain on lst_test_instance */
- lnet_process_id_t tsu_dest; /* id of dest node */
- int tsu_loop; /* loop count of the test */
+ struct list_head tsu_list; /* chain on lst_test_instance */
+ lnet_process_id_t tsu_dest; /* id of dest node */
+ int tsu_loop; /* loop count of the test */
sfw_test_instance_t *tsu_instance; /* pointer to test instance */
- void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
+ void *tsu_private; /* private data */
+ swi_workitem_t tsu_worker; /* workitem of the test unit */
} sfw_test_unit_t;
typedef struct sfw_test_case {
- struct list_head tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
+ struct list_head tsc_list; /* chain on fw_tests */
+ srpc_service_t *tsc_srv_service; /* test service */
+ sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
} sfw_test_case_t;
srpc_client_rpc_t *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done) (srpc_client_rpc_t *), void *priv);
+ void (*done)(srpc_client_rpc_t *), void *priv);
int sfw_create_test_rpc(sfw_test_unit_t *tsu,
lnet_process_id_t peer, unsigned features,
int nblk, int blklen, srpc_client_rpc_t **rpc);
@@ -427,7 +427,7 @@ void sfw_free_pages(struct srpc_server_rpc *rpc);
void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
int sink);
-int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
srpc_client_rpc_t *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
@@ -472,9 +472,9 @@ static inline void
swi_init_workitem(swi_workitem_t *swi, void *data,
swi_action_t action, struct cfs_wi_sched *sched)
{
- swi->swi_sched = sched;
+ swi->swi_sched = sched;
swi->swi_action = action;
- swi->swi_state = SWI_STATE_NEWBORN;
+ swi->swi_state = SWI_STATE_NEWBORN;
cfs_wi_init(&swi->swi_workitem, data, swi_wi_action);
}
@@ -502,26 +502,23 @@ void sfw_shutdown(void);
void srpc_shutdown(void);
static inline void
-srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
{
- LASSERT(rpc != NULL);
+ LASSERT(rpc);
LASSERT(!srpc_event_pending(rpc));
- LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
+ LASSERT(!atomic_read(&rpc->crpc_refcount));
- if (rpc->crpc_fini == NULL) {
+ if (!rpc->crpc_fini)
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
- } else {
- (*rpc->crpc_fini) (rpc);
- }
-
- return;
+ else
+ (*rpc->crpc_fini)(rpc);
}
static inline void
-srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
- int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+ int service, int nbulkiov, int bulklen,
+ void (*rpc_done)(srpc_client_rpc_t *),
+ void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
{
LASSERT(nbulkiov <= LNET_MAX_IOV);
@@ -534,30 +531,29 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
spin_lock_init(&rpc->crpc_lock);
atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
- rpc->crpc_dest = peer;
- rpc->crpc_priv = priv;
- rpc->crpc_service = service;
- rpc->crpc_bulk.bk_len = bulklen;
+ rpc->crpc_dest = peer;
+ rpc->crpc_priv = priv;
+ rpc->crpc_service = service;
+ rpc->crpc_bulk.bk_len = bulklen;
rpc->crpc_bulk.bk_niov = nbulkiov;
- rpc->crpc_done = rpc_done;
- rpc->crpc_fini = rpc_fini;
+ rpc->crpc_done = rpc_done;
+ rpc->crpc_fini = rpc_fini;
LNetInvalidateHandle(&rpc->crpc_reqstmdh);
LNetInvalidateHandle(&rpc->crpc_replymdh);
LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh);
/* no event is expected at this point */
- rpc->crpc_bulkev.ev_fired =
- rpc->crpc_reqstev.ev_fired =
+ rpc->crpc_bulkev.ev_fired = 1;
+ rpc->crpc_reqstev.ev_fired = 1;
rpc->crpc_replyev.ev_fired = 1;
- rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
+ rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
- rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
- return;
+ rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
}
static inline const char *
-swi_state2str (int state)
+swi_state2str(int state)
{
#define STATE2STR(x) case x: return #x
switch (state) {
@@ -602,11 +598,11 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
LASSERT(sv->sv_shuttingdown);
- while (srpc_finish_service(sv) == 0) {
+ while (!srpc_finish_service(sv)) {
i++;
- CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
- "Waiting for %s service to shutdown...\n",
- sv->sv_name);
+ CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
+ "Waiting for %s service to shutdown...\n",
+ sv->sv_name);
selftest_wait_events();
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index b98c08a10606..8be52526ae5a 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -57,17 +57,17 @@
(STTIMER_NSLOTS - 1))])
static struct st_timer_data {
- spinlock_t stt_lock;
- unsigned long stt_prev_slot; /* start time of the slot processed
+ spinlock_t stt_lock;
+ unsigned long stt_prev_slot; /* start time of the slot processed
* previously */
struct list_head stt_hash[STTIMER_NSLOTS];
- int stt_shuttingdown;
+ int stt_shuttingdown;
wait_queue_head_t stt_waitq;
- int stt_nthreads;
+ int stt_nthreads;
} stt_data;
void
-stt_add_timer(stt_timer_t *timer)
+stt_add_timer(struct stt_timer *timer)
{
struct list_head *pos;
@@ -75,13 +75,14 @@ stt_add_timer(stt_timer_t *timer)
LASSERT(stt_data.stt_nthreads > 0);
LASSERT(!stt_data.stt_shuttingdown);
- LASSERT(timer->stt_func != NULL);
+ LASSERT(timer->stt_func);
LASSERT(list_empty(&timer->stt_list));
LASSERT(timer->stt_expires > ktime_get_real_seconds());
/* a simple insertion sort */
list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
- stt_timer_t *old = list_entry(pos, stt_timer_t, stt_list);
+ struct stt_timer *old = list_entry(pos, struct stt_timer,
+ stt_list);
if (timer->stt_expires >= old->stt_expires)
break;
@@ -101,7 +102,7 @@ stt_add_timer(stt_timer_t *timer)
* another CPU.
*/
int
-stt_del_timer(stt_timer_t *timer)
+stt_del_timer(struct stt_timer *timer)
{
int ret = 0;
@@ -124,10 +125,10 @@ static int
stt_expire_list(struct list_head *slot, time64_t now)
{
int expired = 0;
- stt_timer_t *timer;
+ struct stt_timer *timer;
while (!list_empty(slot)) {
- timer = list_entry(slot->next, stt_timer_t, stt_list);
+ timer = list_entry(slot->next, struct stt_timer, stt_list);
if (timer->stt_expires > now)
break;
@@ -218,7 +219,7 @@ stt_startup(void)
stt_data.stt_nthreads = 0;
init_waitqueue_head(&stt_data.stt_waitq);
rc = stt_start_timer_thread();
- if (rc != 0)
+ if (rc)
CERROR("Can't spawn timer thread: %d\n", rc);
return rc;
@@ -237,7 +238,7 @@ stt_shutdown(void)
stt_data.stt_shuttingdown = 1;
wake_up(&stt_data.stt_waitq);
- lst_wait_until(stt_data.stt_nthreads == 0, stt_data.stt_lock,
+ lst_wait_until(!stt_data.stt_nthreads, stt_data.stt_lock,
"waiting for %d threads to terminate\n",
stt_data.stt_nthreads);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h
index 03e2ee294c1c..f1fbebd8a67c 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.h
+++ b/drivers/staging/lustre/lnet/selftest/timer.h
@@ -38,15 +38,15 @@
#ifndef __SELFTEST_TIMER_H__
#define __SELFTEST_TIMER_H__
-typedef struct {
+struct stt_timer {
struct list_head stt_list;
- time64_t stt_expires;
- void (*stt_func) (void *);
- void *stt_data;
-} stt_timer_t;
+ time64_t stt_expires;
+ void (*stt_func)(void *);
+ void *stt_data;
+};
-void stt_add_timer(stt_timer_t *timer);
-int stt_del_timer(stt_timer_t *timer);
+void stt_add_timer(struct stt_timer *timer);
+int stt_del_timer(struct stt_timer *timer);
int stt_startup(void);
void stt_shutdown(void);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index 62c7bba75274..8ac7cd4d6fdb 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -1,7 +1,7 @@
config LUSTRE_FS
tristate "Lustre file system client support"
- depends on INET && m && !MIPS && !XTENSA && !SUPERH
- select LNET
+ depends on m && !MIPS && !XTENSA && !SUPERH
+ depends on LNET
select CRYPTO
select CRYPTO_CRC32
select CRYPTO_CRC32_PCLMUL if X86
diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile
index 35d8b0b2dff4..331e4fcdd5a2 100644
--- a/drivers/staging/lustre/lustre/Makefile
+++ b/drivers/staging/lustre/lustre/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_LUSTRE_FS) += libcfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \
+obj-$(CONFIG_LUSTRE_FS) += obdclass/ ptlrpc/ fld/ osc/ mgc/ \
fid/ lov/ mdc/ lmv/ llite/ obdecho/
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index ff8f38dc10ce..39269c3c56a6 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -68,7 +68,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
/* Init operation code */
@@ -95,7 +95,8 @@ static int seq_client_rpc(struct lu_client_seq *seq,
* precreating objects on this OST), and it will send the
* request to MDT0 here, so we can not keep resending the
* request here, otherwise if MDT0 is failed(umounted),
- * it can not release the export of MDT0 */
+ * it can not release the export of MDT0
+ */
if (seq->lcs_type == LUSTRE_SEQ_DATA)
req->rq_no_delay = req->rq_no_resend = 1;
debug_mask = D_CONSOLE;
@@ -152,7 +153,8 @@ static int seq_client_alloc_meta(const struct lu_env *env,
/* If meta server return -EINPROGRESS or EAGAIN,
* it means meta server might not be ready to
* allocate super sequence from sequence controller
- * (MDT0)yet */
+ * (MDT0)yet
+ */
rc = seq_client_rpc(seq, &seq->lcs_space,
SEQ_ALLOC_META, "meta");
} while (rc == -EINPROGRESS || rc == -EAGAIN);
@@ -226,8 +228,8 @@ int seq_client_alloc_fid(const struct lu_env *env,
wait_queue_t link;
int rc;
- LASSERT(seq != NULL);
- LASSERT(fid != NULL);
+ LASSERT(seq);
+ LASSERT(fid);
init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
@@ -292,7 +294,7 @@ void seq_client_flush(struct lu_client_seq *seq)
{
wait_queue_t link;
- LASSERT(seq != NULL);
+ LASSERT(seq);
init_waitqueue_entry(&link, current);
mutex_lock(&seq->lcs_mutex);
@@ -375,8 +377,8 @@ static int seq_client_init(struct lu_client_seq *seq,
{
int rc;
- LASSERT(seq != NULL);
- LASSERT(prefix != NULL);
+ LASSERT(seq);
+ LASSERT(prefix);
seq->lcs_type = type;
@@ -438,7 +440,7 @@ int client_fid_fini(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
- if (cli->cl_seq != NULL) {
+ if (cli->cl_seq) {
seq_client_fini(cli->cl_seq);
kfree(cli->cl_seq);
cli->cl_seq = NULL;
@@ -448,7 +450,7 @@ int client_fid_fini(struct obd_device *obd)
}
EXPORT_SYMBOL(client_fid_fini);
-static int __init fid_mod_init(void)
+static int __init fid_init(void)
{
seq_debugfs_dir = ldebugfs_register(LUSTRE_SEQ_NAME,
debugfs_lustre_root,
@@ -456,16 +458,16 @@ static int __init fid_mod_init(void)
return PTR_ERR_OR_ZERO(seq_debugfs_dir);
}
-static void __exit fid_mod_exit(void)
+static void __exit fid_exit(void)
{
if (!IS_ERR_OR_NULL(seq_debugfs_dir))
ldebugfs_remove(&seq_debugfs_dir);
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FID Module");
+MODULE_DESCRIPTION("Lustre File IDentifier");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-MODULE_VERSION("0.1.0");
-module_init(fid_mod_init);
-module_exit(fid_mod_exit);
+module_init(fid_init);
+module_exit(fid_exit);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 39f2aa32e984..1f0e78686278 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -66,7 +66,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
int rc;
char kernbuf[MAX_FID_RANGE_STRLEN];
- LASSERT(range != NULL);
+ LASSERT(range);
if (count >= sizeof(kernbuf))
return -EINVAL;
@@ -85,6 +85,8 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
rc = sscanf(kernbuf, "[%llx - %llx]\n",
(unsigned long long *)&tmp.lsr_start,
(unsigned long long *)&tmp.lsr_end);
+ if (rc != 2)
+ return -EINVAL;
if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
return -EINVAL;
@@ -102,7 +104,6 @@ ldebugfs_fid_space_seq_write(struct file *file,
int rc;
seq = ((struct seq_file *)file->private_data)->private;
- LASSERT(seq != NULL);
mutex_lock(&seq->lcs_mutex);
rc = ldebugfs_fid_write_common(buffer, count, &seq->lcs_space);
@@ -122,8 +123,6 @@ ldebugfs_fid_space_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- LASSERT(seq != NULL);
-
mutex_lock(&seq->lcs_mutex);
seq_printf(m, "[%#llx - %#llx]:%x:%s\n", PRANGE(&seq->lcs_space));
mutex_unlock(&seq->lcs_mutex);
@@ -141,7 +140,6 @@ ldebugfs_fid_width_seq_write(struct file *file,
int rc, val;
seq = ((struct seq_file *)file->private_data)->private;
- LASSERT(seq != NULL);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
@@ -170,8 +168,6 @@ ldebugfs_fid_width_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- LASSERT(seq != NULL);
-
mutex_lock(&seq->lcs_mutex);
seq_printf(m, "%llu\n", seq->lcs_width);
mutex_unlock(&seq->lcs_mutex);
@@ -184,8 +180,6 @@ ldebugfs_fid_fid_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
- LASSERT(seq != NULL);
-
mutex_lock(&seq->lcs_mutex);
seq_printf(m, DFID "\n", PFID(&seq->lcs_fid));
mutex_unlock(&seq->lcs_mutex);
@@ -199,9 +193,7 @@ ldebugfs_fid_server_seq_show(struct seq_file *m, void *unused)
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
struct client_obd *cli;
- LASSERT(seq != NULL);
-
- if (seq->lcs_exp != NULL) {
+ if (seq->lcs_exp) {
cli = &seq->lcs_exp->exp_obd->u.cli;
seq_printf(m, "%s\n", cli->cl_target_uuid.uuid);
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index d9459e58e2ce..062f388cf38a 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -65,7 +65,7 @@ struct fld_cache *fld_cache_init(const char *name,
{
struct fld_cache *cache;
- LASSERT(name != NULL);
+ LASSERT(name);
LASSERT(cache_threshold < cache_size);
cache = kzalloc(sizeof(*cache), GFP_NOFS);
@@ -100,7 +100,7 @@ void fld_cache_fini(struct fld_cache *cache)
{
__u64 pct;
- LASSERT(cache != NULL);
+ LASSERT(cache);
fld_cache_flush(cache);
if (cache->fci_stat.fst_count > 0) {
@@ -183,7 +183,8 @@ restart_fixup:
}
/* we could have overlap over next
- * range too. better restart. */
+ * range too. better restart.
+ */
goto restart_fixup;
}
@@ -218,8 +219,6 @@ static int fld_cache_shrink(struct fld_cache *cache)
struct list_head *curr;
int num = 0;
- LASSERT(cache != NULL);
-
if (cache->fci_cache_count < cache->fci_cache_size)
return 0;
@@ -234,7 +233,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
}
CDEBUG(D_INFO, "%s: FLD cache - Shrunk by %d entries\n",
- cache->fci_name, num);
+ cache->fci_name, num);
return 0;
}
@@ -295,8 +294,8 @@ static void fld_cache_punch_hole(struct fld_cache *cache,
* handle range overlap in fld cache.
*/
static void fld_cache_overlap_handle(struct fld_cache *cache,
- struct fld_cache_entry *f_curr,
- struct fld_cache_entry *f_new)
+ struct fld_cache_entry *f_curr,
+ struct fld_cache_entry *f_new)
{
const struct lu_seq_range *range = &f_new->fce_range;
const u64 new_start = range->lsr_start;
@@ -304,7 +303,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
const u32 mdt = range->lsr_index;
/* this is overlap case, these case are checking overlapping with
- * prev range only. fixup will handle overlapping with next range. */
+ * prev range only. fixup will handle overlapping with next range.
+ */
if (f_curr->fce_range.lsr_index == mdt) {
f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
@@ -319,7 +319,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
} else if (new_start <= f_curr->fce_range.lsr_start &&
f_curr->fce_range.lsr_end <= new_end) {
/* case 1: new range completely overshadowed existing range.
- * e.g. whole range migrated. update fld cache entry */
+ * e.g. whole range migrated. update fld cache entry
+ */
f_curr->fce_range = *range;
kfree(f_new);
@@ -401,8 +402,8 @@ static int fld_cache_insert_nolock(struct fld_cache *cache,
list_for_each_entry_safe(f_curr, n, head, fce_list) {
/* add list if next is end of list */
if (new_end < f_curr->fce_range.lsr_start ||
- (new_end == f_curr->fce_range.lsr_start &&
- new_flags != f_curr->fce_range.lsr_flags))
+ (new_end == f_curr->fce_range.lsr_start &&
+ new_flags != f_curr->fce_range.lsr_flags))
break;
prev = &f_curr->fce_list;
@@ -414,7 +415,7 @@ static int fld_cache_insert_nolock(struct fld_cache *cache,
}
}
- if (prev == NULL)
+ if (!prev)
prev = head;
CDEBUG(D_INFO, "insert range "DRANGE"\n", PRANGE(&f_new->fce_range));
@@ -459,8 +460,8 @@ struct fld_cache_entry
head = &cache->fci_entries_head;
list_for_each_entry(flde, head, fce_list) {
if (range->lsr_start == flde->fce_range.lsr_start ||
- (range->lsr_end == flde->fce_range.lsr_end &&
- range->lsr_flags == flde->fce_range.lsr_flags)) {
+ (range->lsr_end == flde->fce_range.lsr_end &&
+ range->lsr_flags == flde->fce_range.lsr_flags)) {
got = flde;
break;
}
@@ -499,7 +500,7 @@ int fld_cache_lookup(struct fld_cache *cache,
cache->fci_stat.fst_count++;
list_for_each_entry(flde, head, fce_list) {
if (flde->fce_range.lsr_start > seq) {
- if (prev != NULL)
+ if (prev)
*range = prev->fce_range;
break;
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 12eb1647b4bf..e8a3caf20c9b 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -58,22 +58,16 @@ struct fld_stats {
__u64 fst_inflight;
};
-typedef int (*fld_hash_func_t) (struct lu_client_fld *, __u64);
-
-typedef struct lu_fld_target *
-(*fld_scan_func_t) (struct lu_client_fld *, __u64);
-
struct lu_fld_hash {
const char *fh_name;
- fld_hash_func_t fh_hash_func;
- fld_scan_func_t fh_scan_func;
+ int (*fh_hash_func)(struct lu_client_fld *, __u64);
+ struct lu_fld_target *(*fh_scan_func)(struct lu_client_fld *, __u64);
};
struct fld_cache_entry {
struct list_head fce_lru;
struct list_head fce_list;
- /**
- * fld cache entries are sorted on range->lsr_start field. */
+ /** fld cache entries are sorted on range->lsr_start field. */
struct lu_seq_range fce_range;
};
@@ -84,32 +78,25 @@ struct fld_cache {
*/
rwlock_t fci_lock;
- /**
- * Cache shrink threshold */
+ /** Cache shrink threshold */
int fci_threshold;
- /**
- * Preferred number of cached entries */
+ /** Preferred number of cached entries */
int fci_cache_size;
- /**
- * Current number of cached entries. Protected by \a fci_lock */
+ /** Current number of cached entries. Protected by \a fci_lock */
int fci_cache_count;
- /**
- * LRU list fld entries. */
+ /** LRU list fld entries. */
struct list_head fci_lru;
- /**
- * sorted fld entries. */
+ /** sorted fld entries. */
struct list_head fci_entries_head;
- /**
- * Cache statistics. */
+ /** Cache statistics. */
struct fld_stats fci_stat;
- /**
- * Cache name used for debug and messages. */
+ /** Cache name used for debug and messages. */
char fci_name[LUSTRE_MDT_MAXNAMELEN];
unsigned int fci_no_shrink:1;
};
@@ -169,7 +156,7 @@ struct fld_cache_entry
static inline const char *
fld_target_name(struct lu_fld_target *tar)
{
- if (tar->ft_srv != NULL)
+ if (tar->ft_srv)
return tar->ft_srv->lsf_name;
return (const char *)tar->ft_exp->exp_obd->obd_name;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index d92c01b74865..a3d122d85c8d 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -58,7 +58,8 @@
#include "fld_internal.h"
/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
- * It should be common thing. The same about mdc RPC lock */
+ * It should be common thing. The same about mdc RPC lock
+ */
static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
@@ -124,7 +125,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
* it should go to index 0 directly, instead of calculating
* hash again, and also if other MDTs is not being connected,
* the fld lookup requests(for seq on MDT0) should not be
- * blocked because of other MDTs */
+ * blocked because of other MDTs
+ */
if (fid_seq_is_norm(seq))
hash = fld_rrb_hash(fld, seq);
else
@@ -139,18 +141,19 @@ again:
if (hash != 0) {
/* It is possible the remote target(MDT) are not connected to
* with client yet, so we will refer this to MDT0, which should
- * be connected during mount */
+ * be connected during mount
+ */
hash = 0;
goto again;
}
CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n",
- fld->lcf_name, hash, seq, fld->lcf_count);
+ fld->lcf_name, hash, seq, fld->lcf_count);
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
- const char *srv_name = target->ft_srv != NULL ?
+ const char *srv_name = target->ft_srv ?
target->ft_srv->lsf_name : "<null>";
- const char *exp_name = target->ft_exp != NULL ?
+ const char *exp_name = target->ft_exp ?
(char *)target->ft_exp->exp_obd->obd_uuid.uuid :
"<null>";
@@ -183,13 +186,13 @@ fld_client_get_target(struct lu_client_fld *fld, u64 seq)
{
struct lu_fld_target *target;
- LASSERT(fld->lcf_hash != NULL);
+ LASSERT(fld->lcf_hash);
spin_lock(&fld->lcf_lock);
target = fld->lcf_hash->fh_scan_func(fld, seq);
spin_unlock(&fld->lcf_lock);
- if (target != NULL) {
+ if (target) {
CDEBUG(D_INFO, "%s: Found target (idx %llu) by seq %#llx\n",
fld->lcf_name, target->ft_idx, seq);
}
@@ -207,18 +210,18 @@ int fld_client_add_target(struct lu_client_fld *fld,
const char *name;
struct lu_fld_target *target, *tmp;
- LASSERT(tar != NULL);
+ LASSERT(tar);
name = fld_target_name(tar);
- LASSERT(name != NULL);
- LASSERT(tar->ft_srv != NULL || tar->ft_exp != NULL);
+ LASSERT(name);
+ LASSERT(tar->ft_srv || tar->ft_exp);
if (fld->lcf_flags != LUSTRE_FLD_INIT) {
CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
- fld->lcf_name, name, tar->ft_idx);
+ fld->lcf_name, name, tar->ft_idx);
return 0;
}
CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
- fld->lcf_name, name, tar->ft_idx);
+ fld->lcf_name, name, tar->ft_idx);
target = kzalloc(sizeof(*target), GFP_NOFS);
if (!target)
@@ -236,13 +239,12 @@ int fld_client_add_target(struct lu_client_fld *fld,
}
target->ft_exp = tar->ft_exp;
- if (target->ft_exp != NULL)
+ if (target->ft_exp)
class_export_get(target->ft_exp);
target->ft_srv = tar->ft_srv;
target->ft_idx = tar->ft_idx;
- list_add_tail(&target->ft_chain,
- &fld->lcf_targets);
+ list_add_tail(&target->ft_chain, &fld->lcf_targets);
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
@@ -257,14 +259,13 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
struct lu_fld_target *target, *tmp;
spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == idx) {
fld->lcf_count--;
list_del(&target->ft_chain);
spin_unlock(&fld->lcf_lock);
- if (target->ft_exp != NULL)
+ if (target->ft_exp)
class_export_put(target->ft_exp);
kfree(target);
@@ -326,8 +327,6 @@ int fld_client_init(struct lu_client_fld *fld,
int cache_size, cache_threshold;
int rc;
- LASSERT(fld != NULL);
-
snprintf(fld->lcf_name, sizeof(fld->lcf_name),
"cli-%s", prefix);
@@ -375,17 +374,16 @@ void fld_client_fini(struct lu_client_fld *fld)
struct lu_fld_target *target, *tmp;
spin_lock(&fld->lcf_lock);
- list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
fld->lcf_count--;
list_del(&target->ft_chain);
- if (target->ft_exp != NULL)
+ if (target->ft_exp)
class_export_put(target->ft_exp);
kfree(target);
}
spin_unlock(&fld->lcf_lock);
- if (fld->lcf_cache != NULL) {
+ if (fld->lcf_cache) {
if (!IS_ERR(fld->lcf_cache))
fld_cache_fini(fld->lcf_cache);
fld->lcf_cache = NULL;
@@ -402,12 +400,12 @@ int fld_client_rpc(struct obd_export *exp,
int rc;
struct obd_import *imp;
- LASSERT(exp != NULL);
+ LASSERT(exp);
imp = class_exp2cliimp(exp);
req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
FLD_QUERY);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
@@ -436,7 +434,7 @@ int fld_client_rpc(struct obd_export *exp,
goto out_req;
prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (prange == NULL) {
+ if (!prange) {
rc = -EFAULT;
goto out_req;
}
@@ -463,10 +461,10 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
/* Can not find it in the cache */
target = fld_client_get_target(fld, seq);
- LASSERT(target != NULL);
+ LASSERT(target);
CDEBUG(D_INFO, "%s: Lookup fld entry (seq: %#llx) on target %s (idx %llu)\n",
- fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
+ fld->lcf_name, seq, fld_target_name(target), target->ft_idx);
res.lsr_start = seq;
fld_range_set_type(&res, flags);
@@ -487,7 +485,7 @@ void fld_client_flush(struct lu_client_fld *fld)
}
EXPORT_SYMBOL(fld_client_flush);
-static int __init fld_mod_init(void)
+static int __init fld_init(void)
{
fld_debugfs_dir = ldebugfs_register(LUSTRE_FLD_NAME,
debugfs_lustre_root,
@@ -495,15 +493,16 @@ static int __init fld_mod_init(void)
return PTR_ERR_OR_ZERO(fld_debugfs_dir);
}
-static void __exit fld_mod_exit(void)
+static void __exit fld_exit(void)
{
if (!IS_ERR_OR_NULL(fld_debugfs_dir))
ldebugfs_remove(&fld_debugfs_dir);
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FLD");
+MODULE_DESCRIPTION("Lustre FID Location Database");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-module_init(fld_mod_init)
-module_exit(fld_mod_exit)
+module_init(fld_init)
+module_exit(fld_exit)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index 41ceaa8198a7..ca898befeba6 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -60,11 +60,8 @@ fld_debugfs_targets_seq_show(struct seq_file *m, void *unused)
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
struct lu_fld_target *target;
- LASSERT(fld != NULL);
-
spin_lock(&fld->lcf_lock);
- list_for_each_entry(target,
- &fld->lcf_targets, ft_chain)
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain)
seq_printf(m, "%s\n", fld_target_name(target));
spin_unlock(&fld->lcf_lock);
@@ -76,8 +73,6 @@ fld_debugfs_hash_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
- LASSERT(fld != NULL);
-
spin_lock(&fld->lcf_lock);
seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
spin_unlock(&fld->lcf_lock);
@@ -102,9 +97,8 @@ fld_debugfs_hash_seq_write(struct file *file,
return -EFAULT;
fld = ((struct seq_file *)file->private_data)->private;
- LASSERT(fld != NULL);
- for (i = 0; fld_hash[i].fh_name != NULL; i++) {
+ for (i = 0; fld_hash[i].fh_name; i++) {
if (count != strlen(fld_hash[i].fh_name))
continue;
@@ -114,7 +108,7 @@ fld_debugfs_hash_seq_write(struct file *file,
}
}
- if (hash != NULL) {
+ if (hash) {
spin_lock(&fld->lcf_lock);
fld->lcf_hash = hash;
spin_unlock(&fld->lcf_lock);
@@ -132,8 +126,6 @@ fld_debugfs_cache_flush_write(struct file *file, const char __user *buffer,
{
struct lu_client_fld *fld = file->private_data;
- LASSERT(fld != NULL);
-
fld_cache_flush(fld->lcf_cache);
CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index bd7acc2a1219..fb971ded5a1b 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -157,7 +157,8 @@ struct cl_device {
};
/** \addtogroup cl_object cl_object
- * @{ */
+ * @{
+ */
/**
* "Data attributes" of cl_object. Data attributes can be updated
* independently for a sub-object, and top-object's attributes are calculated
@@ -288,13 +289,14 @@ struct cl_object_conf {
enum {
/** configure layout, set up a new stripe, must be called while
- * holding layout lock. */
+ * holding layout lock.
+ */
OBJECT_CONF_SET = 0,
/** invalidate the current stripe configuration due to losing
- * layout lock. */
+ * layout lock.
+ */
OBJECT_CONF_INVALIDATE = 1,
- /** wait for old layout to go away so that new layout can be
- * set up. */
+ /** wait for old layout to go away so that new layout can be set up. */
OBJECT_CONF_WAIT = 2
};
@@ -320,7 +322,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, struct page *vmpage);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -393,7 +395,8 @@ struct cl_object_operations {
*/
struct cl_object_header {
/** Standard lu_object_header. cl_object::co_lu::lo_header points
- * here. */
+ * here.
+ */
struct lu_object_header coh_lu;
/** \name locks
* \todo XXX move locks below to the separate cache-lines, they are
@@ -464,7 +467,8 @@ struct cl_object_header {
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
- * @{ */
+ * @{
+ */
/** \struct cl_page
* Layered client page.
@@ -687,12 +691,14 @@ enum cl_page_state {
enum cl_page_type {
/** Host page, the page is from the host inode which the cl_page
- * belongs to. */
+ * belongs to.
+ */
CPT_CACHEABLE = 1,
/** Transient page, the transient cl_page is used to bind a cl_page
* to vmpage which is not belonging to the same object of cl_page.
- * it is used in DirectIO, lockless IO and liblustre. */
+ * it is used in DirectIO and lockless IO.
+ */
CPT_TRANSIENT,
};
@@ -728,7 +734,8 @@ struct cl_page {
/** Parent page, NULL for top-level page. Immutable after creation. */
struct cl_page *cp_parent;
/** Lower-layer page. NULL for bottommost page. Immutable after
- * creation. */
+ * creation.
+ */
struct cl_page *cp_child;
/**
* Page state. This field is const to avoid accidental update, it is
@@ -842,7 +849,7 @@ struct cl_page_operations {
* \return the underlying VM page. Optional.
*/
struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
+ const struct cl_page_slice *slice);
/**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
@@ -1126,7 +1133,8 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
/** \struct cl_lock
*
* Extent locking on the client.
@@ -1641,7 +1649,8 @@ struct cl_lock {
struct cl_lock_slice {
struct cl_lock *cls_lock;
/** Object slice corresponding to this lock slice. Immutable after
- * creation. */
+ * creation.
+ */
struct cl_object *cls_obj;
const struct cl_lock_operations *cls_ops;
/** Linkage into cl_lock::cll_layers. Immutable after creation. */
@@ -1885,7 +1894,8 @@ struct cl_2queue {
/** @} cl_page_list */
/** \addtogroup cl_io cl_io
- * @{ */
+ * @{
+ */
/** \struct cl_io
* I/O
*
@@ -2041,8 +2051,8 @@ struct cl_io_operations {
*
* \see cl_io_operations::cio_iter_fini()
*/
- int (*cio_iter_init) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ int (*cio_iter_init)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Finalize io iteration.
*
@@ -2052,8 +2062,8 @@ struct cl_io_operations {
*
* \see cl_io_operations::cio_iter_init()
*/
- void (*cio_iter_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_iter_fini)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Collect locks for the current iteration of io.
*
@@ -2063,8 +2073,8 @@ struct cl_io_operations {
* cl_io_lock_add(). Once all locks are collected, they are
* sorted and enqueued in the proper order.
*/
- int (*cio_lock) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ int (*cio_lock)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Finalize unlocking.
*
@@ -2089,8 +2099,8 @@ struct cl_io_operations {
* Called top-to-bottom at the end of io loop. Here layer
* might wait for an unfinished asynchronous io.
*/
- void (*cio_end) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_end)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
/**
* Called bottom-to-top to notify layers that read/write IO
* iteration finished, with \a nob bytes transferred.
@@ -2101,8 +2111,8 @@ struct cl_io_operations {
/**
* Called once per io, bottom-to-top to release io resources.
*/
- void (*cio_fini) (const struct lu_env *env,
- const struct cl_io_slice *slice);
+ void (*cio_fini)(const struct lu_env *env,
+ const struct cl_io_slice *slice);
} op[CIT_OP_NR];
struct {
/**
@@ -2222,7 +2232,7 @@ struct cl_io_lock_link {
struct cl_lock *cill_lock;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
- struct cl_io_lock_link *link);
+ struct cl_io_lock_link *link);
};
/**
@@ -2272,7 +2282,7 @@ enum cl_io_lock_dmd {
CILR_MANDATORY = 0,
/** Layers are free to decide between local and global locking. */
CILR_MAYBE,
- /** Never lock: there is no cache (e.g., liblustre). */
+ /** Never lock: there is no cache (e.g., lockless IO). */
CILR_NEVER
};
@@ -2284,7 +2294,8 @@ enum cl_fsync_mode {
/** discard all of dirty pages in a specific file range */
CL_FSYNC_DISCARD = 2,
/** start writeback and make sure they have reached storage before
- * return. OST_SYNC RPC must be issued and finished */
+ * return. OST_SYNC RPC must be issued and finished
+ */
CL_FSYNC_ALL = 3
};
@@ -2403,7 +2414,8 @@ struct cl_io {
/** @} cl_io */
/** \addtogroup cl_req cl_req
- * @{ */
+ * @{
+ */
/** \struct cl_req
* Transfer.
*
@@ -2582,7 +2594,8 @@ enum cache_stats_item {
/** how many entities are in the cache right now */
CS_total,
/** how many entities in the cache are actively used (and cannot be
- * evicted) right now */
+ * evicted) right now
+ */
CS_busy,
/** how many entities were created at all */
CS_create,
@@ -2600,7 +2613,7 @@ struct cache_stats {
};
/** These are not exported so far */
-void cache_stats_init (struct cache_stats *cs, const char *name);
+void cache_stats_init(struct cache_stats *cs, const char *name);
/**
* Client-side site. This represents particular client stack. "Global"
@@ -2613,7 +2626,7 @@ struct cl_site {
* Statistical counters. Atomics do not scale, something better like
* per-cpu counters is needed.
*
- * These are exported as /proc/fs/lustre/llite/.../site
+ * These are exported as /sys/kernel/debug/lustre/llite/.../site
*
* When interpreting keep in mind that both sub-locks (and sub-pages)
* and top-locks (and top-pages) are accounted here.
@@ -2624,8 +2637,8 @@ struct cl_site {
atomic_t cs_locks_state[CLS_NR];
};
-int cl_site_init (struct cl_site *s, struct cl_device *top);
-void cl_site_fini (struct cl_site *s);
+int cl_site_init(struct cl_site *s, struct cl_device *top);
+void cl_site_fini(struct cl_site *s);
void cl_stack_fini(const struct lu_env *env, struct cl_device *cl);
/**
@@ -2653,7 +2666,7 @@ static inline int lu_device_is_cl(const struct lu_device *d)
static inline struct cl_device *lu2cl_dev(const struct lu_device *d)
{
- LASSERT(d == NULL || IS_ERR(d) || lu_device_is_cl(d));
+ LASSERT(!d || IS_ERR(d) || lu_device_is_cl(d));
return container_of0(d, struct cl_device, cd_lu_dev);
}
@@ -2664,7 +2677,7 @@ static inline struct lu_device *cl2lu_dev(struct cl_device *d)
static inline struct cl_object *lu2cl(const struct lu_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
+ LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->lo_dev));
return container_of0(o, struct cl_object, co_lu);
}
@@ -2681,7 +2694,7 @@ static inline struct cl_object *cl_object_next(const struct cl_object *obj)
static inline struct cl_device *cl_object_device(const struct cl_object *o)
{
- LASSERT(o == NULL || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
+ LASSERT(!o || IS_ERR(o) || lu_device_is_cl(o->co_lu.lo_dev));
return container_of0(o->co_lu.lo_dev, struct cl_device, cd_lu_dev);
}
@@ -2725,27 +2738,28 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
/** @} helpers */
/** \defgroup cl_object cl_object
- * @{ */
-struct cl_object *cl_object_top (struct cl_object *o);
+ * @{
+ */
+struct cl_object *cl_object_top(struct cl_object *o);
struct cl_object *cl_object_find(const struct lu_env *env, struct cl_device *cd,
const struct lu_fid *fid,
const struct cl_object_conf *c);
int cl_object_header_init(struct cl_object_header *h);
-void cl_object_put (const struct lu_env *env, struct cl_object *o);
-void cl_object_get (struct cl_object *o);
-void cl_object_attr_lock (struct cl_object *o);
+void cl_object_put(const struct lu_env *env, struct cl_object *o);
+void cl_object_get(struct cl_object *o);
+void cl_object_attr_lock(struct cl_object *o);
void cl_object_attr_unlock(struct cl_object *o);
-int cl_object_attr_get (const struct lu_env *env, struct cl_object *obj,
- struct cl_attr *attr);
-int cl_object_attr_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
-int cl_object_glimpse (const struct lu_env *env, struct cl_object *obj,
- struct ost_lvb *lvb);
-int cl_conf_set (const struct lu_env *env, struct cl_object *obj,
- const struct cl_object_conf *conf);
-void cl_object_prune (const struct lu_env *env, struct cl_object *obj);
-void cl_object_kill (const struct lu_env *env, struct cl_object *obj);
+int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
+ struct cl_attr *attr);
+int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_attr *attr, unsigned valid);
+int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
+ struct ost_lvb *lvb);
+int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
+ const struct cl_object_conf *conf);
+void cl_object_prune(const struct lu_env *env, struct cl_object *obj);
+void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
/**
* Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2770,7 +2784,8 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
/** @} cl_object */
/** \defgroup cl_page cl_page
- * @{ */
+ * @{
+ */
enum {
CLP_GANG_OKAY = 0,
CLP_GANG_RESCHED,
@@ -2781,34 +2796,26 @@ enum {
/* callback of cl_page_gang_lookup() */
typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
struct cl_page *, void *);
-int cl_page_gang_lookup (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_io *io,
- pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup (struct cl_object_header *hdr,
- pgoff_t index);
-struct cl_page *cl_page_find (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type);
-struct cl_page *cl_page_find_sub (const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
+int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, pgoff_t start, pgoff_t end,
+ cl_page_gang_cb_t cb, void *cbdata);
+struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type);
+struct cl_page *cl_page_find_sub(const struct lu_env *env,
+ struct cl_object *obj,
+ pgoff_t idx, struct page *vmpage,
struct cl_page *parent);
-void cl_page_get (struct cl_page *page);
-void cl_page_put (const struct lu_env *env,
- struct cl_page *page);
-void cl_page_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-void cl_page_header_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_page *pg);
-struct page *cl_page_vmpage (const struct lu_env *env,
- struct cl_page *page);
-struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top (struct cl_page *page);
+void cl_page_get(struct cl_page *page);
+void cl_page_put(const struct lu_env *env, struct cl_page *page);
+void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
+ const struct cl_page *pg);
+void cl_page_header_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_page *pg);
+struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
+struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
+struct cl_page *cl_page_top(struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
@@ -2820,17 +2827,17 @@ const struct cl_page_slice *cl_page_at(const struct cl_page *page,
*/
/** @{ */
-int cl_page_own (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_own_try (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_assume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-void cl_page_unassume (const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg);
-void cl_page_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page *page);
-int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
+int cl_page_own(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_own_try(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+void cl_page_unassume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *pg);
+void cl_page_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page *page);
+int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io);
/** @} ownership */
@@ -2841,19 +2848,19 @@ int cl_page_is_owned (const struct cl_page *pg, const struct cl_io *io);
* tracking transfer state.
*/
/** @{ */
-int cl_page_prep (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_completion (const struct lu_env *env,
- struct cl_page *pg, enum cl_req_type crt, int ioret);
-int cl_page_make_ready (const struct lu_env *env, struct cl_page *pg,
- enum cl_req_type crt);
-int cl_page_cache_add (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt);
-void cl_page_clip (const struct lu_env *env, struct cl_page *pg,
- int from, int to);
-int cl_page_cancel (const struct lu_env *env, struct cl_page *page);
-int cl_page_flush (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
+int cl_page_prep(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_completion(const struct lu_env *env,
+ struct cl_page *pg, enum cl_req_type crt, int ioret);
+int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
+ enum cl_req_type crt);
+int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, enum cl_req_type crt);
+void cl_page_clip(const struct lu_env *env, struct cl_page *pg,
+ int from, int to);
+int cl_page_cancel(const struct lu_env *env, struct cl_page *page);
+int cl_page_flush(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
/** @} transfer */
@@ -2862,24 +2869,22 @@ int cl_page_flush (const struct lu_env *env, struct cl_io *io,
* Functions to discard, delete and export a cl_page.
*/
/** @{ */
-void cl_page_discard (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-void cl_page_delete (const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap (const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
-int cl_page_is_vmlocked (const struct lu_env *env,
- const struct cl_page *pg);
-void cl_page_export (const struct lu_env *env,
- struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-loff_t cl_offset (const struct cl_object *obj, pgoff_t idx);
-pgoff_t cl_index (const struct cl_object *obj, loff_t offset);
-int cl_page_size (const struct cl_object *obj);
-int cl_pages_prune (const struct lu_env *env, struct cl_object *obj);
-
-void cl_lock_print (const struct lu_env *env, void *cookie,
- lu_printer_t printer, const struct cl_lock *lock);
+void cl_page_discard(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
+int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg);
+int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
+void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
+int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
+pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
+int cl_page_size(const struct cl_object *obj);
+int cl_pages_prune(const struct lu_env *env, struct cl_object *obj);
+
+void cl_lock_print(const struct lu_env *env, void *cookie,
+ lu_printer_t printer, const struct cl_lock *lock);
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
lu_printer_t printer,
const struct cl_lock_descr *descr);
@@ -2888,7 +2893,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
/** @} cl_page */
/** \defgroup cl_lock cl_lock
- * @{ */
+ * @{
+ */
struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
const struct cl_lock_descr *need,
@@ -2917,19 +2923,19 @@ static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
-void cl_lock_get (struct cl_lock *lock);
-void cl_lock_get_trust (struct cl_lock *lock);
-void cl_lock_put (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
+void cl_lock_get(struct cl_lock *lock);
+void cl_lock_get_trust(struct cl_lock *lock);
+void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source);
-void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release (const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
+ const char *scope, const void *source);
+void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
int cl_lock_is_intransit(struct cl_lock *lock);
@@ -2966,52 +2972,53 @@ int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
*
* cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
*
- * @{ */
+ * @{
+ */
-int cl_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse (const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try (const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try (const struct lu_env *env, struct cl_lock *lock, int atomic);
+int cl_wait(const struct lu_env *env, struct cl_lock *lock);
+void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
+int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_io *io, __u32 flags);
+int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
+int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
+int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
/** @} statemachine */
-void cl_lock_signal (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set (const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match (const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get (const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed (struct cl_lock *lock);
-int cl_lock_nr_mutexed (const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match (const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match (enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify (const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init (const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini (struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure (const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure (const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
+void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
+ enum cl_lock_state state);
+int cl_queue_match(const struct list_head *queue,
+ const struct cl_lock_descr *need);
+
+void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
+void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_is_mutexed(struct cl_lock *lock);
+int cl_lock_nr_mutexed(const struct lu_env *env);
+int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_ext_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_descr_match(const struct cl_lock_descr *has,
+ const struct cl_lock_descr *need);
+int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
+int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_lock_descr *desc);
+
+void cl_lock_closure_init(const struct lu_env *env,
+ struct cl_lock_closure *closure,
+ struct cl_lock *origin, int wait);
+void cl_lock_closure_fini(struct cl_lock_closure *closure);
+int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
+void cl_lock_disclosure(const struct lu_env *env,
+ struct cl_lock_closure *closure);
+int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
+ struct cl_lock_closure *closure);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error (const struct lu_env *env, struct cl_lock *lock, int error);
+void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
@@ -3019,39 +3026,40 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */
/** \defgroup cl_io cl_io
- * @{ */
-
-int cl_io_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_sub_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, struct cl_object *obj);
-int cl_io_rw_init (const struct lu_env *env, struct cl_io *io,
- enum cl_io_type iot, loff_t pos, size_t count);
-int cl_io_loop (const struct lu_env *env, struct cl_io *io);
-
-void cl_io_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_iter_init (const struct lu_env *env, struct cl_io *io);
-void cl_io_iter_fini (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock (const struct lu_env *env, struct cl_io *io);
-void cl_io_unlock (const struct lu_env *env, struct cl_io *io);
-int cl_io_start (const struct lu_env *env, struct cl_io *io);
-void cl_io_end (const struct lu_env *env, struct cl_io *io);
-int cl_io_lock_add (const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link);
-int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
- struct cl_lock_descr *descr);
-int cl_io_read_page (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write (const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue);
-int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io,
- enum cl_req_type iot, struct cl_2queue *queue,
- long timeout);
-int cl_io_is_going (const struct lu_env *env);
+ * @{
+ */
+
+int cl_io_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, struct cl_object *obj);
+int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
+ enum cl_io_type iot, loff_t pos, size_t count);
+int cl_io_loop(const struct lu_env *env, struct cl_io *io);
+
+void cl_io_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_iter_init(const struct lu_env *env, struct cl_io *io);
+void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock(const struct lu_env *env, struct cl_io *io);
+void cl_io_unlock(const struct lu_env *env, struct cl_io *io);
+int cl_io_start(const struct lu_env *env, struct cl_io *io);
+void cl_io_end(const struct lu_env *env, struct cl_io *io);
+int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_io_lock_link *link);
+int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock_descr *descr);
+int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page);
+int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, unsigned from, unsigned to);
+int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue);
+int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
+ enum cl_req_type iot, struct cl_2queue *queue,
+ long timeout);
+int cl_io_is_going(const struct lu_env *env);
/**
* True, iff \a io is an O_APPEND write(2).
@@ -3094,7 +3102,8 @@ do { \
/** @} cl_io */
/** \defgroup cl_page_list cl_page_list
- * @{ */
+ * @{
+ */
/**
* Last page in the page list.
@@ -3117,40 +3126,41 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
#define cl_page_list_for_each_safe(page, temp, list) \
list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
-void cl_page_list_init (struct cl_page_list *plist);
-void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_splice (struct cl_page_list *list,
- struct cl_page_list *head);
-void cl_page_list_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-
-void cl_2queue_init (struct cl_2queue *queue);
-void cl_2queue_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_discard (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
+void cl_page_list_init(struct cl_page_list *plist);
+void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
+void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
+void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
+void cl_page_list_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+
+void cl_2queue_init(struct cl_2queue *queue);
+void cl_2queue_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_discard(const struct lu_env *env,
+ struct cl_io *io, struct cl_2queue *queue);
+void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */
/** \defgroup cl_req cl_req
- * @{ */
+ * @{
+ */
struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
enum cl_req_type crt, int nr_objects);
-void cl_req_page_add (const struct lu_env *env, struct cl_req *req,
- struct cl_page *page);
-void cl_req_page_done (const struct lu_env *env, struct cl_page *page);
-int cl_req_prep (const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set (const struct lu_env *env, struct cl_req *req,
- struct cl_req_attr *attr, u64 flags);
+void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
+ struct cl_page *page);
+void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
+int cl_req_prep(const struct lu_env *env, struct cl_req *req);
+void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
+ struct cl_req_attr *attr, u64 flags);
void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
/** \defgroup cl_sync_io cl_sync_io
- * @{ */
+ * @{
+ */
/**
* Anchor for synchronous transfer. This is allocated on a stack by thread
@@ -3214,22 +3224,23 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
* - cl_env_reexit(cl_env_reenter had to be called priorly)
*
* \see lu_env, lu_context, lu_context_key
- * @{ */
+ * @{
+ */
struct cl_env_nest {
int cen_refcheck;
void *cen_cookie;
};
-struct lu_env *cl_env_get (int *refcheck);
-struct lu_env *cl_env_alloc (int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get (struct cl_env_nest *nest);
-void cl_env_put (struct lu_env *env, int *refcheck);
-void cl_env_nested_put (struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter (void);
-void cl_env_reexit (void *cookie);
-void cl_env_implant (struct lu_env *env, int *refcheck);
-void cl_env_unplant (struct lu_env *env, int *refcheck);
+struct lu_env *cl_env_get(int *refcheck);
+struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
+struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
+void cl_env_put(struct lu_env *env, int *refcheck);
+void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
+void *cl_env_reenter(void);
+void cl_env_reexit(void *cookie);
+void cl_env_implant(struct lu_env *env, int *refcheck);
+void cl_env_unplant(struct lu_env *env, int *refcheck);
/** @} cl_env */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
index 36e7a6767e71..5d839a9f789f 100644
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ b/drivers/staging/lustre/lustre/include/lclient.h
@@ -127,7 +127,7 @@ static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
struct ccc_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &ccc_key);
- LASSERT(info != NULL);
+ LASSERT(info);
return info;
}
@@ -156,7 +156,7 @@ static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
struct ccc_session *ses;
ses = lu_context_key_get(env->le_ses, &ccc_session_key);
- LASSERT(ses != NULL);
+ LASSERT(ses);
return ses;
}
@@ -383,7 +383,8 @@ void cl_put_grouplock(struct ccc_grouplock *cg);
*
* NB: If you find you have to use these interfaces for your new code, please
* think about it again. These interfaces may be removed in the future for
- * better layering. */
+ * better layering.
+ */
struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
int lov_read_and_clear_async_rc(struct cl_object *clob);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 33e0b99e1fb4..c6c7f54637fb 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return;
if (PagePrivate(page))
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
cancel_dirty_page(page);
ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 468bc28be895..3907bf4ce07c 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -57,23 +57,23 @@ struct ll_iattr {
#define CLIENT_OBD_LIST_LOCK_DEBUG 1
-typedef struct {
+struct client_obd_lock {
spinlock_t lock;
unsigned long time;
struct task_struct *task;
const char *func;
int line;
-} client_obd_lock_t;
+};
-static inline void __client_obd_list_lock(client_obd_lock_t *lock,
+static inline void __client_obd_list_lock(struct client_obd_lock *lock,
const char *func, int line)
{
unsigned long cur = jiffies;
while (1) {
if (spin_trylock(&lock->lock)) {
- LASSERT(lock->task == NULL);
+ LASSERT(!lock->task);
lock->task = current;
lock->func = func;
lock->line = line;
@@ -85,7 +85,7 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
time_before(lock->time + 5 * HZ, jiffies)) {
struct task_struct *task = lock->task;
- if (task == NULL)
+ if (!task)
continue;
LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
@@ -106,20 +106,20 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
#define client_obd_list_lock(lock) \
__client_obd_list_lock(lock, __func__, __LINE__)
-static inline void client_obd_list_unlock(client_obd_lock_t *lock)
+static inline void client_obd_list_unlock(struct client_obd_lock *lock)
{
- LASSERT(lock->task != NULL);
+ LASSERT(lock->task);
lock->task = NULL;
lock->time = jiffies;
spin_unlock(&lock->lock);
}
-static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
+static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
{
spin_lock_init(&lock->lock);
}
-static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
+static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
{}
#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 0ac8e0edcc48..4146c9c3999f 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -54,7 +54,7 @@ struct lprocfs_vars {
struct file_operations *fops;
void *data;
/**
- * /proc file mode.
+ * sysfs file mode.
*/
umode_t proc_mode;
};
@@ -175,7 +175,8 @@ struct lprocfs_percpu {
enum lprocfs_stats_flags {
LPROCFS_STATS_FLAG_NONE = 0x0000, /* per cpu counter */
LPROCFS_STATS_FLAG_NOPERCPU = 0x0001, /* stats have no percpu
- * area and need locking */
+ * area and need locking
+ */
LPROCFS_STATS_FLAG_IRQ_SAFE = 0x0002, /* alloc need irq safe */
};
@@ -196,7 +197,8 @@ struct lprocfs_stats {
unsigned short ls_biggest_alloc_num;
enum lprocfs_stats_flags ls_flags;
/* Lock used when there are no percpu stats areas; For percpu stats,
- * it is used to protect ls_biggest_alloc_num change */
+ * it is used to protect ls_biggest_alloc_num change
+ */
spinlock_t ls_lock;
/* has ls_num of counter headers */
@@ -274,20 +276,7 @@ static inline int opcode_offset(__u32 opc)
OPC_RANGE(OST));
} else if (opc < FLD_LAST_OPC) {
/* FLD opcode */
- return (opc - FLD_FIRST_OPC +
- OPC_RANGE(SEC) +
- OPC_RANGE(SEQ) +
- OPC_RANGE(QUOTA) +
- OPC_RANGE(LLOG) +
- OPC_RANGE(OBD) +
- OPC_RANGE(MGS) +
- OPC_RANGE(LDLM) +
- OPC_RANGE(MDS) +
- OPC_RANGE(OST));
- } else if (opc < UPDATE_LAST_OPC) {
- /* update opcode */
- return (opc - UPDATE_FIRST_OPC +
- OPC_RANGE(FLD) +
+ return (opc - FLD_FIRST_OPC +
OPC_RANGE(SEC) +
OPC_RANGE(SEQ) +
OPC_RANGE(QUOTA) +
@@ -312,8 +301,7 @@ static inline int opcode_offset(__u32 opc)
OPC_RANGE(SEC) + \
OPC_RANGE(SEQ) + \
OPC_RANGE(SEC) + \
- OPC_RANGE(FLD) + \
- OPC_RANGE(UPDATE))
+ OPC_RANGE(FLD))
#define EXTRA_MAX_OPCODES ((PTLRPC_LAST_CNTR - PTLRPC_FIRST_CNTR) + \
OPC_RANGE(EXTRA))
@@ -407,7 +395,7 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
} else {
unsigned int cpuid = get_cpu();
- if (unlikely(stats->ls_percpu[cpuid] == NULL)) {
+ if (unlikely(!stats->ls_percpu[cpuid])) {
rc = lprocfs_stats_alloc_one(stats, cpuid);
if (rc < 0) {
put_cpu();
@@ -438,12 +426,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
case LPROCFS_GET_SMP_ID:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- spin_unlock_irqrestore(&stats->ls_lock,
- *flags);
- } else {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_unlock_irqrestore(&stats->ls_lock, *flags);
+ else
spin_unlock(&stats->ls_lock);
- }
} else {
put_cpu();
}
@@ -451,12 +437,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
case LPROCFS_GET_NUM_CPU:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
- if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- spin_unlock_irqrestore(&stats->ls_lock,
- *flags);
- } else {
+ if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
+ spin_unlock_irqrestore(&stats->ls_lock, *flags);
+ else
spin_unlock(&stats->ls_lock);
- }
}
return;
}
@@ -521,11 +505,11 @@ static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
unsigned long flags = 0;
__u64 ret = 0;
- LASSERT(stats != NULL);
+ LASSERT(stats);
num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_cpu; i++) {
- if (stats->ls_percpu[i] == NULL)
+ if (!stats->ls_percpu[i])
continue;
ret += lprocfs_read_helper(
lprocfs_stats_counter_get(stats, i, idx),
@@ -608,7 +592,7 @@ int lprocfs_write_helper(const char __user *buffer, unsigned long count,
int *val);
int lprocfs_write_u64_helper(const char __user *buffer,
unsigned long count, __u64 *val);
-int lprocfs_write_frac_u64_helper(const char *buffer,
+int lprocfs_write_frac_u64_helper(const char __user *buffer,
unsigned long count,
__u64 *val, int mult);
char *lprocfs_find_named_value(const char *buffer, const char *name,
@@ -625,9 +609,10 @@ int lprocfs_single_release(struct inode *, struct file *);
int lprocfs_seq_release(struct inode *, struct file *);
/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
- proc entries; otherwise, you will define name##_seq_write function also for
- a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
- call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data); */
+ * proc entries; otherwise, you will define name##_seq_write function also for
+ * a read-write proc entry, and then call LPROC_SEQ_SEQ instead. Finally,
+ * call ldebugfs_obd_seq_create(obd, filename, 0444, &name#_fops, data);
+ */
#define __LPROC_SEQ_FOPS(name, custom_seq_write) \
static int name##_single_open(struct inode *inode, struct file *file) \
{ \
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 1d79341a495d..242bb1ef6245 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -164,11 +164,12 @@ struct lu_device_operations {
/**
* For lu_object_conf flags
*/
-typedef enum {
+enum loc_flags {
/* This is a new object to be allocated, or the file
- * corresponding to the object does not exists. */
+ * corresponding to the object does not exists.
+ */
LOC_F_NEW = 0x00000001,
-} loc_flags_t;
+};
/**
* Object configuration, describing particulars of object being created. On
@@ -179,7 +180,7 @@ struct lu_object_conf {
/**
* Some hints for obj find and alloc.
*/
- loc_flags_t loc_flags;
+ enum loc_flags loc_flags;
};
/**
@@ -392,7 +393,7 @@ struct lu_device_type_operations {
static inline int lu_device_is_md(const struct lu_device *d)
{
- return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
+ return ergo(d, d->ld_type->ldt_tags & LU_DEVICE_MD);
}
/**
@@ -488,7 +489,7 @@ enum lu_object_header_flags {
/**
* Mark this object has already been taken out of cache.
*/
- LU_OBJECT_UNHASHED = 1
+ LU_OBJECT_UNHASHED = 1,
};
enum lu_object_header_attr {
@@ -756,7 +757,7 @@ static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
/**
* return device operations vector for this object
*/
-static const inline struct lu_device_operations *
+static inline const struct lu_device_operations *
lu_object_ops(const struct lu_object *o)
{
return o->lo_dev->ld_ops;
@@ -895,7 +896,8 @@ enum lu_xattr_flags {
/** @} helpers */
/** \name lu_context
- * @{ */
+ * @{
+ */
/** For lu_context health-checks */
enum lu_context_state {
@@ -1116,10 +1118,10 @@ struct lu_context_key {
{ \
type *value; \
\
- CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
- if (value == NULL) \
+ if (!value) \
value = ERR_PTR(-ENOMEM); \
\
return value; \
@@ -1174,7 +1176,7 @@ void lu_context_key_revive (struct lu_context_key *key);
do { \
LU_CONTEXT_KEY_INIT(key); \
key = va_arg(args, struct lu_context_key *); \
- } while (key != NULL); \
+ } while (key); \
va_end(args); \
}
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
index 97cd157dd35a..f7dfd83951ee 100644
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef __LUSTRE_LU_REF_H
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index 09088f40ba88..07d45de69dd9 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -47,9 +47,11 @@
struct ll_fiemap_extent {
__u64 fe_logical; /* logical offset in bytes for the start of
- * the extent from the beginning of the file */
+ * the extent from the beginning of the file
+ */
__u64 fe_physical; /* physical offset in bytes for the start
- * of the extent from the beginning of the disk */
+ * of the extent from the beginning of the disk
+ */
__u64 fe_length; /* length in bytes for this extent */
__u64 fe_reserved64[2];
__u32 fe_flags; /* FIEMAP_EXTENT_* flags for this extent */
@@ -59,9 +61,11 @@ struct ll_fiemap_extent {
struct ll_user_fiemap {
__u64 fm_start; /* logical offset (inclusive) at
- * which to start mapping (in) */
+ * which to start mapping (in)
+ */
__u64 fm_length; /* logical length of mapping which
- * userspace wants (in) */
+ * userspace wants (in)
+ */
__u32 fm_flags; /* FIEMAP_FLAG_* flags for request (in/out) */
__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
__u32 fm_extent_count; /* size of fm_extents array (in) */
@@ -71,28 +75,38 @@ struct ll_user_fiemap {
#define FIEMAP_MAX_OFFSET (~0ULL)
-#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
-#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
-
-#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
- * Sets EXTENT_UNKNOWN. */
-#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
- * while fs is unmounted */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
- * Sets EXTENT_NO_DIRECT. */
+#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before
+ * map
+ */
+#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute
+ * tree
+ */
+#define FIEMAP_EXTENT_LAST 0x00000001 /* Last extent in file. */
+#define FIEMAP_EXTENT_UNKNOWN 0x00000002 /* Data location unknown. */
+#define FIEMAP_EXTENT_DELALLOC 0x00000004 /* Location still pending.
+ * Sets EXTENT_UNKNOWN.
+ */
+#define FIEMAP_EXTENT_ENCODED 0x00000008 /* Data can not be read
+ * while fs is unmounted
+ */
+#define FIEMAP_EXTENT_DATA_ENCRYPTED 0x00000080 /* Data is encrypted by fs.
+ * Sets EXTENT_NO_DIRECT.
+ */
#define FIEMAP_EXTENT_NOT_ALIGNED 0x00000100 /* Extent offsets may not be
- * block aligned. */
+ * block aligned.
+ */
#define FIEMAP_EXTENT_DATA_INLINE 0x00000200 /* Data mixed with metadata.
* Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
- * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
- * no data (i.e. zero). */
-#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
+#define FIEMAP_EXTENT_DATA_TAIL 0x00000400 /* Multiple files in block.
+ * Sets EXTENT_NOT_ALIGNED.
+ */
+#define FIEMAP_EXTENT_UNWRITTEN 0x00000800 /* Space allocated, but
+ * no data (i.e. zero).
+ */
+#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
* support extents. Result
- * merged for efficiency. */
+ * merged for efficiency.
+ */
static inline size_t fiemap_count_to_size(size_t extent_count)
{
@@ -114,7 +128,8 @@ static inline unsigned fiemap_size_to_count(size_t array_size)
/* Lustre specific flags - use a high bit, don't conflict with upstream flag */
#define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */
-#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely.
- * Sets NO_DIRECT flag */
+#define FIEMAP_EXTENT_NET 0x80000000 /* Data stored remotely.
+ * Sets NO_DIRECT flag
+ */
#endif /* _LUSTRE_FIEMAP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h b/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
deleted file mode 100644
index 93a3d7db3010..000000000000
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_build_version.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define BUILD_VERSION "v2_3_64_0-g6e62c21-CHANGED-3.9.0"
-#define LUSTRE_RELEASE 3.9.0_g6e62c21
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index b064b5821e3f..5aae1d06a5fa 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -113,25 +113,25 @@
#define CONNMGR_REQUEST_PORTAL 1
#define CONNMGR_REPLY_PORTAL 2
-//#define OSC_REQUEST_PORTAL 3
+/*#define OSC_REQUEST_PORTAL 3 */
#define OSC_REPLY_PORTAL 4
-//#define OSC_BULK_PORTAL 5
+/*#define OSC_BULK_PORTAL 5 */
#define OST_IO_PORTAL 6
#define OST_CREATE_PORTAL 7
#define OST_BULK_PORTAL 8
-//#define MDC_REQUEST_PORTAL 9
+/*#define MDC_REQUEST_PORTAL 9 */
#define MDC_REPLY_PORTAL 10
-//#define MDC_BULK_PORTAL 11
+/*#define MDC_BULK_PORTAL 11 */
#define MDS_REQUEST_PORTAL 12
-//#define MDS_REPLY_PORTAL 13
+/*#define MDS_REPLY_PORTAL 13 */
#define MDS_BULK_PORTAL 14
#define LDLM_CB_REQUEST_PORTAL 15
#define LDLM_CB_REPLY_PORTAL 16
#define LDLM_CANCEL_REQUEST_PORTAL 17
#define LDLM_CANCEL_REPLY_PORTAL 18
-//#define PTLBD_REQUEST_PORTAL 19
-//#define PTLBD_REPLY_PORTAL 20
-//#define PTLBD_BULK_PORTAL 21
+/*#define PTLBD_REQUEST_PORTAL 19 */
+/*#define PTLBD_REPLY_PORTAL 20 */
+/*#define PTLBD_BULK_PORTAL 21 */
#define MDS_SETATTR_PORTAL 22
#define MDS_READPAGE_PORTAL 23
#define OUT_PORTAL 24
@@ -146,7 +146,9 @@
#define SEQ_CONTROLLER_PORTAL 32
#define MGS_BULK_PORTAL 33
-/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
+/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
+ * n8851@cray.com
+ */
/* packet types */
#define PTL_RPC_MSG_REQUEST 4711
@@ -295,7 +297,8 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
fld_range_is_mdt(range) ? "mdt" : "ost"
/** \defgroup lu_fid lu_fid
- * @{ */
+ * @{
+ */
/**
* Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
@@ -307,7 +310,8 @@ enum lma_compat {
LMAC_SOM = 0x00000002,
LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
- * under /O/<seq>/d<x>. */
+ * under /O/<seq>/d<x>.
+ */
};
/**
@@ -319,7 +323,8 @@ enum lma_incompat {
LMAI_RELEASED = 0x00000001, /* file is released */
LMAI_AGENT = 0x00000002, /* agent inode */
LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
- is on the remote MDT */
+ * is on the remote MDT
+ */
};
#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
@@ -395,12 +400,14 @@ enum fid_seq {
FID_SEQ_LOCAL_FILE = 0x200000001ULL,
FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
/* sequence is used for local named objects FIDs generated
- * by local_object_storage library */
+ * by local_object_storage library
+ */
FID_SEQ_LOCAL_NAME = 0x200000003ULL,
/* Because current FLD will only cache the fid sequence, instead
* of oid on the client side, if the FID needs to be exposed to
* clients sides, it needs to make sure all of fids under one
- * sequence will be located in one MDT. */
+ * sequence will be located in one MDT.
+ */
FID_SEQ_SPECIAL = 0x200000004ULL,
FID_SEQ_QUOTA = 0x200000005ULL,
FID_SEQ_QUOTA_GLB = 0x200000006ULL,
@@ -601,7 +608,8 @@ static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
oi->oi_fid.f_seq = seq;
/* Note: if f_oid + f_ver is zero, we need init it
* to be 1, otherwise, ostid_seq will treat this
- * as old ostid (oi_seq == 0) */
+ * as old ostid (oi_seq == 0)
+ */
if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
}
@@ -630,15 +638,13 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
if (fid_seq_is_mdt0(ostid_seq(oi))) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
- oid, POSTID(oi));
+ CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
} else {
if (oid > OBIF_MAX_OID) {
- CERROR("Bad %llu to set "DOSTID"\n",
- oid, POSTID(oi));
+ CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
oi->oi_fid.f_oid = oid;
@@ -689,11 +695,12 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
* that we map into the IDIF namespace. It allows up to 2^48
* objects per OST, as this is the object namespace that has
* been in production for years. This can handle create rates
- * of 1M objects/s/OST for 9 years, or combinations thereof. */
+ * of 1M objects/s/OST for 9 years, or combinations thereof.
+ */
if (ostid_id(ostid) >= IDIF_MAX_OID) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
- POSTID(ostid), ost_idx);
- return -EBADF;
+ CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
}
fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
/* truncate to 32 bits by assignment */
@@ -704,10 +711,11 @@ static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
/* This is either an IDIF object, which identifies objects across
* all OSTs, or a regular FID. The IDIF namespace maps legacy
* OST objects into the FID namespace. In both cases, we just
- * pass the FID through, no conversion needed. */
+ * pass the FID through, no conversion needed.
+ */
if (ostid->oi_fid.f_ver != 0) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
- POSTID(ostid), ost_idx);
+ CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
return -EBADF;
}
*fid = ostid->oi_fid;
@@ -807,7 +815,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
static inline int fid_is_sane(const struct lu_fid *fid)
{
- return fid != NULL &&
+ return fid &&
((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
fid_is_igif(fid) || fid_is_idif(fid) ||
fid_seq_is_rsvd(fid_seq(fid)));
@@ -868,7 +876,8 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
/** @} lu_fid */
/** \defgroup lu_dir lu_dir
- * @{ */
+ * @{
+ */
/**
* Enumeration of possible directory entry attributes.
@@ -880,24 +889,8 @@ enum lu_dirent_attrs {
LUDA_FID = 0x0001,
LUDA_TYPE = 0x0002,
LUDA_64BITHASH = 0x0004,
-
- /* The following attrs are used for MDT internal only,
- * not visible to client */
-
- /* Verify the dirent consistency */
- LUDA_VERIFY = 0x8000,
- /* Only check but not repair the dirent inconsistency */
- LUDA_VERIFY_DRYRUN = 0x4000,
- /* The dirent has been repaired, or to be repaired (dryrun). */
- LUDA_REPAIR = 0x2000,
- /* The system is upgraded, has beed or to be repaired (dryrun). */
- LUDA_UPGRADE = 0x1000,
- /* Ignore this record, go to next directly. */
- LUDA_IGNORE = 0x0800,
};
-#define LU_DIRENT_ATTRS_MASK 0xf800
-
/**
* Layout of readdir pages, as transmitted on wire.
*/
@@ -1029,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than PAGE_CACHE_SIZE because the client needs to
+ * It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */
@@ -1128,7 +1121,8 @@ struct ptlrpc_body_v2 {
__u32 pb_conn_cnt;
__u32 pb_timeout; /* for req, the deadline, for rep, the service est */
__u32 pb_service_time; /* for rep, actual service time, also used for
- net_latency of req */
+ * net_latency of req
+ */
__u32 pb_limit;
__u64 pb_slv;
/* VBR: pre-versions */
@@ -1174,7 +1168,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
/* #define MSG_AT_SUPPORT 0x0008
* This was used in early prototypes of adaptive timeouts, and while there
* shouldn't be any users of that code there also isn't a need for using this
- * bits. Defer usage until at least 1.10 to avoid potential conflict. */
+ * bits. Defer usage until at least 1.10 to avoid potential conflict.
+ */
#define MSG_DELAY_REPLAY 0x0010
#define MSG_VERSION_REPLAY 0x0020
#define MSG_REQ_REPLAY_DONE 0x0040
@@ -1187,7 +1182,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define MSG_CONNECT_RECOVERING 0x00000001
#define MSG_CONNECT_RECONNECT 0x00000002
#define MSG_CONNECT_REPLAYABLE 0x00000004
-//#define MSG_CONNECT_PEER 0x8
+/*#define MSG_CONNECT_PEER 0x8 */
#define MSG_CONNECT_LIBCLIENT 0x00000010
#define MSG_CONNECT_INITIAL 0x00000020
#define MSG_CONNECT_ASYNC 0x00000040
@@ -1195,60 +1190,65 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
/* Connect flags */
-#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
-#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
-#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
-#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
-#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
-#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
-#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
-#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
-#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
+#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
+#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
+#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
+#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
+#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
+#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
+#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
+#define OBD_CONNECT_ACL 0x80ULL /*access control lists */
+#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
-#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
-#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
-#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
+#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
+#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
+#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
*We do not support JOIN FILE
*anymore, reserve this flags
*just for preventing such bit
- *to be reused.*/
-#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
-#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
+ *to be reused.
+ */
+#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
+#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
+#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
-#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
-#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
-#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
-#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
-#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
-#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
+#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
+#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
+#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
+#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
+#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
+#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
+#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
-#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
+#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
-#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
-#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
-#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
-#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
+#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
+#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
+#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
+#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
- * directory hash */
+ * directory hash
+ */
#define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
#define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
- * RPC error properly */
+ * RPC error properly
+ */
#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
- * finer space reservation */
+ * finer space reservation
+ */
#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
- * policy and 2.x server */
+ * policy and 2.x server
+ */
#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
@@ -1264,61 +1264,19 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
* submit a small patch against EVERY branch that ONLY adds the new flag,
* updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
* flag to check_obd_connect_data(), and updates wiretests accordingly, so it
- * can be approved and landed easily to reserve the flag for future use. */
+ * can be approved and landed easily to reserve the flag for future use.
+ */
/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
* connection. It is a temporary bug fix for Imperative Recovery interop
* between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
- * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
+ * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
+ */
#define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
#define OCD_HAS_FLAG(ocd, flg) \
(!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
-#define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
-
-#define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
- OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
- OBD_CONNECT_IBITS | \
- OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
- OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
- OBD_CONNECT_RMT_CLIENT | \
- OBD_CONNECT_RMT_CLIENT_FORCE | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
- OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
- OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
- OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
- OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
- OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
- OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
- OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
- OBD_CONNECT_FLOCK_DEAD | \
- OBD_CONNECT_DISP_STRIPE)
-
-#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
- OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
- OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
- OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
- LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
- OBD_CONNECT_RMT_CLIENT | \
- OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
- OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
- OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
- OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
- OBD_CONNECT_MAX_EASIZE | \
- OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_JOBSTATS | \
- OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
- OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
- OBD_CONNECT_PINGLESS)
-#define ECHO_CONNECT_SUPPORTED (0)
-#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
- OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
- OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
-
/* Features required for this version of the client to work with server */
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
OBD_CONNECT_FULL20)
@@ -1334,7 +1292,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
/* This structure is used for both request and reply.
*
* If we eventually have separate connect data for different types, which we
- * almost certainly will, then perhaps we stick a union in here. */
+ * almost certainly will, then perhaps we stick a union in here.
+ */
struct obd_connect_data_v1 {
__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
__u32 ocd_version; /* lustre release version number */
@@ -1364,7 +1323,7 @@ struct obd_connect_data {
__u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
__u8 ocd_inodespace; /* log2 of the per-inode space consumption */
__u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
- __u32 ocd_unused; /* also fix lustre_swab_connect */
+ __u32 ocd_unused; /* also fix lustre_swab_connect */
__u64 ocd_transno; /* first transno from client to be replayed */
__u32 ocd_group; /* MDS group on OST */
__u32 ocd_cksum_types; /* supported checksum algorithms */
@@ -1374,7 +1333,8 @@ struct obd_connect_data {
/* Fields after ocd_maxbytes are only accessible by the receiver
* if the corresponding flag in ocd_connect_flags is set. Accessing
* any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops. */
+ * may result in out-of-bound memory access and kernel oops.
+ */
__u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
__u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
@@ -1398,7 +1358,8 @@ struct obd_connect_data {
* with senior engineers before starting to use a new field. Then, submit
* a small patch against EVERY branch that ONLY adds the new field along with
* the matching OBD_CONNECT flag, so that can be approved and landed easily to
- * reserve the flag for future use. */
+ * reserve the flag for future use.
+ */
void lustre_swab_connect(struct obd_connect_data *ocd);
@@ -1408,18 +1369,18 @@ void lustre_swab_connect(struct obd_connect_data *ocd);
* Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
* algorithm and also the OBD_FL_CKSUM* flags.
*/
-typedef enum {
+enum cksum_type {
OBD_CKSUM_CRC32 = 0x00000001,
OBD_CKSUM_ADLER = 0x00000002,
OBD_CKSUM_CRC32C = 0x00000004,
-} cksum_type_t;
+};
/*
* OST requests: OBDO & OBD request records
*/
/* opcodes */
-typedef enum {
+enum ost_cmd {
OST_REPLY = 0, /* reply ? */
OST_GETATTR = 1,
OST_SETATTR = 2,
@@ -1440,14 +1401,14 @@ typedef enum {
OST_QUOTACTL = 19,
OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
OST_LAST_OPC
-} ost_cmd_t;
+};
#define OST_FIRST_OPC OST_REPLY
enum obdo_flags {
OBD_FL_INLINEDATA = 0x00000001,
OBD_FL_OBDMDEXISTS = 0x00000002,
OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
- OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
+ OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
@@ -1461,14 +1422,16 @@ enum obdo_flags {
OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
- OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
+ OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
* XXX: obsoleted - reserved for old
- * clients prior than 2.2 */
+ * clients prior than 2.2
+ */
OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
/* Note that while these checksum values are currently separate bits,
- * in 2.x we can actually allow all values from 1-31 if we wanted. */
+ * in 2.x we can actually allow all values from 1-31 if we wanted.
+ */
OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
OBD_FL_CKSUM_CRC32C,
@@ -1657,7 +1620,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
}
}
-#define OBD_MD_FLID (0x00000001ULL) /* object ID */
+#define OBD_MD_FLID (0x00000001ULL) /* object ID */
#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
#define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
@@ -1683,22 +1646,23 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
- /* ->mds if epoch opens or closes */
+ /* ->mds if epoch opens or closes
+ */
#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
#define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
-#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
+#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
-#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
+#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
-#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
+#define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
#define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
#define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
@@ -1707,7 +1671,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
* under lock; for xattr
* requests means the
- * client holds the lock */
+ * client holds the lock
+ */
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
@@ -1727,7 +1692,8 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
/* don't forget obdo_fid which is way down at the bottom so it can
- * come after the definition of llog_cookie */
+ * come after the definition of llog_cookie
+ */
enum hss_valid {
HSS_SETMASK = 0x01,
@@ -1749,19 +1715,20 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
/* ost_body.data values for OST_BRW */
-#define OBD_BRW_READ 0x01
-#define OBD_BRW_WRITE 0x02
-#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
-#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
+#define OBD_BRW_READ 0x01
+#define OBD_BRW_WRITE 0x02
+#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
+#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
* transfer and is not accounted in
- * the grant. */
-#define OBD_BRW_CHECK 0x10
+ * the grant.
+ */
+#define OBD_BRW_CHECK 0x10
#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
-#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
-#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
-#define OBD_BRW_NOQUOTA 0x100
-#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
-#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
+#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
+#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
+#define OBD_BRW_NOQUOTA 0x100
+#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
+#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
@@ -1775,7 +1742,8 @@ struct obd_ioobj {
struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
__u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
* now (PTLRPC_BULK_OPS_COUNT - 1) in
- * high 16 bits in 2.4 and later */
+ * high 16 bits in 2.4 and later
+ */
__u32 ioo_bufcnt; /* number of niobufs for this object */
};
@@ -1799,7 +1767,8 @@ void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
/* lock value block communicated between the filter and llite */
/* OST_LVB_ERR_INIT is needed because the return code in rc is
- * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
+ * negative, i.e. because ((MASK + rc) & MASK) != MASK.
+ */
#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
#define OST_LVB_IS_ERR(blocks) \
@@ -1836,23 +1805,12 @@ void lustre_swab_ost_lvb(struct ost_lvb *lvb);
* lquota data structures
*/
-#ifndef QUOTABLOCK_BITS
-#define QUOTABLOCK_BITS 10
-#endif
-
-#ifndef QUOTABLOCK_SIZE
-#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
-#endif
-
-#ifndef toqb
-#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
-#endif
-
/* The lquota_id structure is an union of all the possible identifier types that
* can be used with quota, this includes:
* - 64-bit user ID
* - 64-bit group ID
- * - a FID which can be used for per-directory quota in the future */
+ * - a FID which can be used for per-directory quota in the future
+ */
union lquota_id {
struct lu_fid qid_fid; /* FID for per-directory quota */
__u64 qid_uid; /* user identifier */
@@ -1889,89 +1847,6 @@ do { \
Q_COPY(out, in, qc_dqblk); \
} while (0)
-/* Body of quota request used for quota acquire/release RPCs between quota
- * master (aka QMT) and slaves (ak QSD). */
-struct quota_body {
- struct lu_fid qb_fid; /* FID of global index packing the pool ID
- * and type (data or metadata) as well as
- * the quota type (user or group). */
- union lquota_id qb_id; /* uid or gid or directory FID */
- __u32 qb_flags; /* see below */
- __u32 qb_padding;
- __u64 qb_count; /* acquire/release count (kbytes/inodes) */
- __u64 qb_usage; /* current slave usage (kbytes/inodes) */
- __u64 qb_slv_ver; /* slave index file version */
- struct lustre_handle qb_lockh; /* per-ID lock handle */
- struct lustre_handle qb_glb_lockh; /* global lock handle */
- __u64 qb_padding1[4];
-};
-
-/* When the quota_body is used in the reply of quota global intent
- * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
-#define qb_slv_fid qb_fid
-/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
- * quota reply */
-#define qb_qunit qb_usage
-
-#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
-#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
-#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
-#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
-
-void lustre_swab_quota_body(struct quota_body *b);
-
-/* Quota types currently supported */
-enum {
- LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
- LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
- LQUOTA_TYPE_MAX
-};
-
-/* There are 2 different resource types on which a quota limit can be enforced:
- * - inodes on the MDTs
- * - blocks on the OSTs */
-enum {
- LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
- LQUOTA_RES_DT = 0x02,
- LQUOTA_LAST_RES,
- LQUOTA_FIRST_RES = LQUOTA_RES_MD
-};
-
-#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
-
-/*
- * Space accounting support
- * Format of an accounting record, providing disk usage information for a given
- * user or group
- */
-struct lquota_acct_rec { /* 16 bytes */
- __u64 bspace; /* current space in use */
- __u64 ispace; /* current # inodes in use */
-};
-
-/*
- * Global quota index support
- * Format of a global record, providing global quota settings for a given quota
- * identifier
- */
-struct lquota_glb_rec { /* 32 bytes */
- __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
- __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
- __u64 qbr_time; /* grace time, in seconds */
- __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
- * kbytes */
-};
-
-/*
- * Slave index support
- * Format of a slave record, recording how much space is granted to a given
- * slave
- */
-struct lquota_slv_rec { /* 8 bytes */
- __u64 qsr_granted; /* space granted to the slave for the key=ID,
- * in #inodes or kbytes */
-};
-
/* Data structures associated with the quota locks */
/* Glimpse descriptor used for the index & per-ID quota locks */
@@ -1985,9 +1860,6 @@ struct ldlm_gl_lquota_desc {
__u64 gl_pad2;
};
-#define gl_qunit gl_hardlimit /* current qunit value used when
- * glimpsing per-ID quota locks */
-
/* quota glimpse flags */
#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
@@ -2002,15 +1874,12 @@ struct lquota_lvb {
void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-/* LVB used with global quota lock */
-#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
-
/* op codes */
-typedef enum {
+enum quota_cmd {
QUOTA_DQACQ = 601,
QUOTA_DQREL = 602,
QUOTA_LAST_OPC
-} quota_cmd_t;
+};
#define QUOTA_FIRST_OPC QUOTA_DQACQ
/*
@@ -2018,7 +1887,7 @@ typedef enum {
*/
/* opcodes */
-typedef enum {
+enum mds_cmd {
MDS_GETATTR = 33,
MDS_GETATTR_NAME = 34,
MDS_CLOSE = 35,
@@ -2049,23 +1918,15 @@ typedef enum {
MDS_HSM_CT_UNREGISTER = 60,
MDS_SWAP_LAYOUTS = 61,
MDS_LAST_OPC
-} mds_cmd_t;
+};
#define MDS_FIRST_OPC MDS_GETATTR
-/* opcodes for object update */
-typedef enum {
- UPDATE_OBJ = 1000,
- UPDATE_LAST_OPC
-} update_cmd_t;
-
-#define UPDATE_FIRST_OPC UPDATE_OBJ
-
/*
* Do not exceed 63
*/
-typedef enum {
+enum mdt_reint_cmd {
REINT_SETATTR = 1,
REINT_CREATE = 2,
REINT_LINK = 3,
@@ -2074,9 +1935,9 @@ typedef enum {
REINT_OPEN = 6,
REINT_SETXATTR = 7,
REINT_RMENTRY = 8,
-// REINT_WRITE = 9,
+/* REINT_WRITE = 9, */
REINT_MAX
-} mds_reint_t, mdt_reint_t;
+};
void lustre_swab_generic_32s(__u32 *val);
@@ -2097,7 +1958,8 @@ void lustre_swab_generic_32s(__u32 *val);
/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
* was used to protect permission (mode,
- * owner, group etc) before 2.4. */
+ * owner, group etc) before 2.4.
+ */
#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
@@ -2110,7 +1972,8 @@ void lustre_swab_generic_32s(__u32 *val);
* For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
* For Remote directory, the master MDT, where the remote directory is, will
* grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
- * will grant LOOKUP_LOCK. */
+ * will grant LOOKUP_LOCK.
+ */
#define MDS_INODELOCK_PERM 0x000010
#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
@@ -2120,7 +1983,8 @@ void lustre_swab_generic_32s(__u32 *val);
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
- * name[2,3] fields that need to be used for the quota id (also a FID). */
+ * name[2,3] fields that need to be used for the quota id (also a FID).
+ */
enum {
LUSTRE_RES_ID_SEQ_OFF = 0,
LUSTRE_RES_ID_VER_OID_OFF = 1,
@@ -2156,7 +2020,8 @@ enum md_op_flags {
#define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
/* these should be identical to their EXT4_*_FL counterparts, they are
- * redefined here only to avoid dragging in fs/ext4/ext4.h */
+ * redefined here only to avoid dragging in fs/ext4/ext4.h
+ */
#define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
#define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
#define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
@@ -2168,15 +2033,14 @@ enum md_op_flags {
* protocol equivalents of LDISKFS_*_FL values stored on disk, while
* the S_* flags are kernel-internal values that change between kernel
* versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
- * See b=16526 for a full history. */
+ * See b=16526 for a full history.
+ */
static inline int ll_ext_to_inode_flags(int flags)
{
return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
-#if defined(S_DIRSYNC)
((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
-#endif
((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
}
@@ -2185,9 +2049,7 @@ static inline int ll_inode_to_ext_flags(int iflags)
return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
-#if defined(S_DIRSYNC)
((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
-#endif
((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
}
@@ -2207,9 +2069,10 @@ struct mdt_body {
__s64 ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 ioepoch;
- __u64 t_state; /* transient file state defined in
- * enum md_transient_state
- * was "ino" until 2.4.0 */
+ __u64 t_state; /* transient file state defined in
+ * enum md_transient_state
+ * was "ino" until 2.4.0
+ */
__u32 fsuid;
__u32 fsgid;
__u32 capability;
@@ -2219,7 +2082,7 @@ struct mdt_body {
__u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
__u32 rdev;
__u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
- __u32 unused2; /* was "generation" until 2.4.0 */
+ __u32 unused2; /* was "generation" until 2.4.0 */
__u32 suppgid;
__u32 eadatasize;
__u32 aclsize;
@@ -2256,7 +2119,8 @@ enum {
};
/* inode access permission for remote user, the inode info are omitted,
- * for client knows them. */
+ * for client knows them.
+ */
struct mdt_remote_perm {
__u32 rp_uid;
__u32 rp_gid;
@@ -2306,13 +2170,13 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
* since the client and MDS may run different kernels (see bug 13828)
* Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
*/
-#define MDS_ATTR_MODE 0x1ULL /* = 1 */
-#define MDS_ATTR_UID 0x2ULL /* = 2 */
-#define MDS_ATTR_GID 0x4ULL /* = 4 */
-#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
-#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
-#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
-#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
+#define MDS_ATTR_MODE 0x1ULL /* = 1 */
+#define MDS_ATTR_UID 0x2ULL /* = 2 */
+#define MDS_ATTR_GID 0x4ULL /* = 4 */
+#define MDS_ATTR_SIZE 0x8ULL /* = 8 */
+#define MDS_ATTR_ATIME 0x10ULL /* = 16 */
+#define MDS_ATTR_MTIME 0x20ULL /* = 32 */
+#define MDS_ATTR_CTIME 0x40ULL /* = 64 */
#define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
#define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
#define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
@@ -2320,14 +2184,11 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
#define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
#define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
#define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
-#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
+#define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
+ * ie O_TRUNC
+ */
#define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
-#ifndef FMODE_READ
-#define FMODE_READ 00000001
-#define FMODE_WRITE 00000002
-#endif
-
#define MDS_FMODE_CLOSED 00000000
#define MDS_FMODE_EXEC 00000004
/* IO Epoch is opened on a closed file. */
@@ -2354,9 +2215,10 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
* We do not support JOIN FILE
* anymore, reserve this flags
* just for preventing such bit
- * to be reused. */
+ * to be reused.
+ */
-#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
+#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
@@ -2409,7 +2271,8 @@ struct mdt_rec_create {
__u32 cr_bias;
/* use of helpers set/get_mrc_cr_flags() is needed to access
* 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
- * extend cr_flags size without breaking 1.8 compat */
+ * extend cr_flags size without breaking 1.8 compat
+ */
__u32 cr_flags_l; /* for use with open, low 32 bits */
__u32 cr_flags_h; /* for use with open, high 32 bits */
__u32 cr_umask; /* umask for create */
@@ -2630,7 +2493,8 @@ enum seq_op {
#define LOV_MAX_UUID_BUFFER_SIZE 8192
/* The size of the buffer the lov/mdc reserves for the
* array of UUIDs returned by the MDS. With the current
- * protocol, this will limit the max number of OSTs per LOV */
+ * protocol, this will limit the max number of OSTs per LOV
+ */
#define LOV_DESC_MAGIC 0xB0CCDE5C
#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
@@ -2639,13 +2503,13 @@ enum seq_op {
/* LOV settings descriptor (should only contain static info) */
struct lov_desc {
__u32 ld_tgt_count; /* how many OBD's */
- __u32 ld_active_tgt_count; /* how many active */
- __u32 ld_default_stripe_count; /* how many objects are used */
- __u32 ld_pattern; /* default PATTERN_RAID0 */
- __u64 ld_default_stripe_size; /* in bytes */
- __u64 ld_default_stripe_offset; /* in bytes */
+ __u32 ld_active_tgt_count; /* how many active */
+ __u32 ld_default_stripe_count; /* how many objects are used */
+ __u32 ld_pattern; /* default PATTERN_RAID0 */
+ __u64 ld_default_stripe_size; /* in bytes */
+ __u64 ld_default_stripe_offset; /* in bytes */
__u32 ld_padding_0; /* unused */
- __u32 ld_qos_maxage; /* in second */
+ __u32 ld_qos_maxage; /* in second */
__u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
__u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
struct obd_uuid ld_uuid;
@@ -2659,7 +2523,7 @@ void lustre_swab_lov_desc(struct lov_desc *ld);
* LDLM requests:
*/
/* opcodes -- MUST be distinct from OST/MDS opcodes */
-typedef enum {
+enum ldlm_cmd {
LDLM_ENQUEUE = 101,
LDLM_CONVERT = 102,
LDLM_CANCEL = 103,
@@ -2668,7 +2532,7 @@ typedef enum {
LDLM_GL_CALLBACK = 106,
LDLM_SET_INFO = 107,
LDLM_LAST_OPC
-} ldlm_cmd_t;
+};
#define LDLM_FIRST_OPC LDLM_ENQUEUE
#define RES_NAME_SIZE 4
@@ -2687,7 +2551,7 @@ static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
}
/* lock types */
-typedef enum {
+enum ldlm_mode {
LCK_MINMODE = 0,
LCK_EX = 1,
LCK_PW = 2,
@@ -2698,17 +2562,17 @@ typedef enum {
LCK_GROUP = 64,
LCK_COS = 128,
LCK_MAXMODE
-} ldlm_mode_t;
+};
#define LCK_MODE_NUM 8
-typedef enum {
+enum ldlm_type {
LDLM_PLAIN = 10,
LDLM_EXTENT = 11,
LDLM_FLOCK = 12,
LDLM_IBITS = 13,
LDLM_MAX_TYPE
-} ldlm_type_t;
+};
#define LDLM_MIN_TYPE LDLM_PLAIN
@@ -2747,7 +2611,8 @@ struct ldlm_flock_wire {
* the first fields of the ldlm_flock structure because there is only
* one ldlm_swab routine to process the ldlm_policy_data_t union. if
* this ever changes we will need to swab the union differently based
- * on the resource type. */
+ * on the resource type.
+ */
typedef union {
struct ldlm_extent l_extent;
@@ -2768,15 +2633,15 @@ struct ldlm_intent {
void lustre_swab_ldlm_intent(struct ldlm_intent *i);
struct ldlm_resource_desc {
- ldlm_type_t lr_type;
+ enum ldlm_type lr_type;
__u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
struct ldlm_res_id lr_name;
};
struct ldlm_lock_desc {
struct ldlm_resource_desc l_resource;
- ldlm_mode_t l_req_mode;
- ldlm_mode_t l_granted_mode;
+ enum ldlm_mode l_req_mode;
+ enum ldlm_mode l_granted_mode;
ldlm_wire_policy_data_t l_policy_data;
};
@@ -2793,7 +2658,8 @@ struct ldlm_request {
void lustre_swab_ldlm_request(struct ldlm_request *rq);
/* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
- * Otherwise, 2 are available. */
+ * Otherwise, 2 are available.
+ */
#define ldlm_request_bufsize(count, type) \
({ \
int _avail = LDLM_LOCKREQ_HANDLES; \
@@ -2820,7 +2686,7 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r);
/*
* Opcodes for mountconf (mgs and mgc)
*/
-typedef enum {
+enum mgs_cmd {
MGS_CONNECT = 250,
MGS_DISCONNECT,
MGS_EXCEPTION, /* node died, etc. */
@@ -2829,7 +2695,7 @@ typedef enum {
MGS_SET_INFO,
MGS_CONFIG_READ,
MGS_LAST_OPC
-} mgs_cmd_t;
+};
#define MGS_FIRST_OPC MGS_CONNECT
#define MGS_PARAM_MAXLEN 1024
@@ -2918,13 +2784,13 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
* Opcodes for multiple servers.
*/
-typedef enum {
+enum obd_cmd {
OBD_PING = 400,
OBD_LOG_CANCEL,
OBD_QC_CALLBACK,
OBD_IDX_READ,
OBD_LAST_OPC
-} obd_cmd_t;
+};
#define OBD_FIRST_OPC OBD_PING
/* catalog of log objects */
@@ -2933,7 +2799,7 @@ typedef enum {
struct llog_logid {
struct ost_id lgl_oi;
__u32 lgl_ogen;
-} __attribute__((packed));
+} __packed;
/** Records written to the CATALOGS list */
#define CATLIST "CATALOGS"
@@ -2942,7 +2808,7 @@ struct llog_catid {
__u32 lci_padding1;
__u32 lci_padding2;
__u32 lci_padding3;
-} __attribute__((packed));
+} __packed;
/* Log data record types - there is no specific reason that these need to
* be related to the RPC opcodes, but no reason not to (may be handy later?)
@@ -2950,7 +2816,7 @@ struct llog_catid {
#define LLOG_OP_MAGIC 0x10600000
#define LLOG_OP_MASK 0xfff00000
-typedef enum {
+enum llog_op_type {
LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
/* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
@@ -2970,7 +2836,7 @@ typedef enum {
HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
-} llog_op_type;
+};
#define LLOG_REC_HDR_NEEDS_SWABBING(r) \
(((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
@@ -3006,7 +2872,7 @@ struct llog_logid_rec {
__u64 lid_padding2;
__u64 lid_padding3;
struct llog_rec_tail lid_tail;
-} __attribute__((packed));
+} __packed;
struct llog_unlink_rec {
struct llog_rec_hdr lur_hdr;
@@ -3014,7 +2880,7 @@ struct llog_unlink_rec {
__u32 lur_oseq;
__u32 lur_count;
struct llog_rec_tail lur_tail;
-} __attribute__((packed));
+} __packed;
struct llog_unlink64_rec {
struct llog_rec_hdr lur_hdr;
@@ -3024,7 +2890,7 @@ struct llog_unlink64_rec {
__u64 lur_padding2;
__u64 lur_padding3;
struct llog_rec_tail lur_tail;
-} __attribute__((packed));
+} __packed;
struct llog_setattr64_rec {
struct llog_rec_hdr lsr_hdr;
@@ -3035,7 +2901,7 @@ struct llog_setattr64_rec {
__u32 lsr_gid_h;
__u64 lsr_padding;
struct llog_rec_tail lsr_tail;
-} __attribute__((packed));
+} __packed;
struct llog_size_change_rec {
struct llog_rec_hdr lsc_hdr;
@@ -3045,16 +2911,7 @@ struct llog_size_change_rec {
__u64 lsc_padding2;
__u64 lsc_padding3;
struct llog_rec_tail lsc_tail;
-} __attribute__((packed));
-
-#define CHANGELOG_MAGIC 0xca103000
-
-/** \a changelog_rec_type's that can't be masked */
-#define CHANGELOG_MINMASK (1 << CL_MARK)
-/** bits covering all \a changelog_rec_type's */
-#define CHANGELOG_ALLMASK 0XFFFFFFFF
-/** default \a changelog_rec_type mask */
-#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
+} __packed;
/* changelog llog name, needed by client replicators */
#define CHANGELOG_CATALOG "changelog_catalog"
@@ -3062,22 +2919,20 @@ struct llog_size_change_rec {
struct changelog_setinfo {
__u64 cs_recno;
__u32 cs_id;
-} __attribute__((packed));
+} __packed;
/** changelog record */
struct llog_changelog_rec {
struct llog_rec_hdr cr_hdr;
struct changelog_rec cr;
struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
+} __packed;
struct llog_changelog_ext_rec {
struct llog_rec_hdr cr_hdr;
struct changelog_ext_rec cr;
struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
-
-#define CHANGELOG_USER_PREFIX "cl"
+} __packed;
struct llog_changelog_user_rec {
struct llog_rec_hdr cur_hdr;
@@ -3085,7 +2940,7 @@ struct llog_changelog_user_rec {
__u32 cur_padding;
__u64 cur_endrec;
struct llog_rec_tail cur_tail;
-} __attribute__((packed));
+} __packed;
enum agent_req_status {
ARS_WAITING,
@@ -3123,21 +2978,22 @@ struct llog_agent_req_rec {
struct llog_rec_hdr arr_hdr; /**< record header */
__u32 arr_status; /**< status of the request */
/* must match enum
- * agent_req_status */
+ * agent_req_status
+ */
__u32 arr_archive_id; /**< backend archive number */
__u64 arr_flags; /**< req flags */
- __u64 arr_compound_id; /**< compound cookie */
+ __u64 arr_compound_id;/**< compound cookie */
__u64 arr_req_create; /**< req. creation time */
__u64 arr_req_change; /**< req. status change time */
struct hsm_action_item arr_hai; /**< req. to the agent */
- struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
-} __attribute__((packed));
+ struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
+} __packed;
/* Old llog gen for compatibility */
struct llog_gen {
__u64 mnt_cnt;
__u64 conn_cnt;
-} __attribute__((packed));
+} __packed;
struct llog_gen_rec {
struct llog_rec_hdr lgr_hdr;
@@ -3175,19 +3031,21 @@ struct llog_log_hdr {
__u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
__u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
struct llog_rec_tail llh_tail;
-} __attribute__((packed));
+} __packed;
#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
llh->llh_bitmap_offset - \
sizeof(llh->llh_tail)) * 8)
-/** log cookies are used to reference a specific log file and a record therein */
+/** log cookies are used to reference a specific log file and a record
+ * therein
+ */
struct llog_cookie {
struct llog_logid lgc_lgl;
__u32 lgc_subsys;
__u32 lgc_index;
__u32 lgc_padding;
-} __attribute__((packed));
+} __packed;
/** llog protocol */
enum llogd_rpc_ops {
@@ -3196,7 +3054,7 @@ enum llogd_rpc_ops {
LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
LLOG_ORIGIN_HANDLE_CLOSE = 505,
- LLOG_ORIGIN_CONNECT = 506,
+ LLOG_ORIGIN_CONNECT = 506,
LLOG_CATINFO = 507, /* deprecated */
LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
@@ -3212,13 +3070,13 @@ struct llogd_body {
__u32 lgd_saved_index;
__u32 lgd_len;
__u64 lgd_cur_offset;
-} __attribute__((packed));
+} __packed;
struct llogd_conn_body {
struct llog_gen lgdc_gen;
struct llog_logid lgdc_logid;
__u32 lgdc_ctxt_idx;
-} __attribute__((packed));
+} __packed;
/* Note: 64-bit types are 64-bit aligned in structure */
struct obdo {
@@ -3245,17 +3103,18 @@ struct obdo {
__u64 o_ioepoch; /* epoch in ost writes */
__u32 o_stripe_idx; /* holds stripe idx */
__u32 o_parent_ver;
- struct lustre_handle o_handle; /* brw: lock handle to prolong
- * locks */
- struct llog_cookie o_lcookie; /* destroy: unlink cookie from
- * MDS */
+ struct lustre_handle o_handle; /* brw: lock handle to prolong locks
+ */
+ struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
+ */
__u32 o_uid_h;
__u32 o_gid_h;
__u64 o_data_version; /* getattr: sum of iversion for
* each stripe.
* brw: grant space consumed on
- * the client for the write */
+ * the client for the write
+ */
__u64 o_padding_4;
__u64 o_padding_5;
__u64 o_padding_6;
@@ -3273,13 +3132,14 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
{
*wobdo = *lobdo;
wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- if (ocd == NULL)
+ if (!ocd)
return;
if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
/* Currently OBD_FL_OSTID will only be used when 2.4 echo
- * client communicate with pre-2.4 server */
+ * client communicate with pre-2.4 server
+ */
wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
}
@@ -3292,7 +3152,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
__u32 local_flags = 0;
if (lobdo->o_valid & OBD_MD_FLFLAGS)
- local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
+ local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
*lobdo = *wobdo;
if (local_flags != 0) {
@@ -3300,7 +3160,7 @@ static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
lobdo->o_flags |= local_flags;
}
- if (ocd == NULL)
+ if (!ocd)
return;
if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
@@ -3349,100 +3209,14 @@ void dump_ioo(struct obd_ioobj *nb);
void dump_ost_body(struct ost_body *ob);
void dump_rcs(__u32 *rc);
-#define IDX_INFO_MAGIC 0x3D37CC37
-
-/* Index file transfer through the network. The server serializes the index into
- * a byte stream which is sent to the client via a bulk transfer */
-struct idx_info {
- __u32 ii_magic;
-
- /* reply: see idx_info_flags below */
- __u32 ii_flags;
-
- /* request & reply: number of lu_idxpage (to be) transferred */
- __u16 ii_count;
- __u16 ii_pad0;
-
- /* request: requested attributes passed down to the iterator API */
- __u32 ii_attrs;
-
- /* request & reply: index file identifier (FID) */
- struct lu_fid ii_fid;
-
- /* reply: version of the index file before starting to walk the index.
- * Please note that the version can be modified at any time during the
- * transfer */
- __u64 ii_version;
-
- /* request: hash to start with:
- * reply: hash of the first entry of the first lu_idxpage and hash
- * of the entry to read next if any */
- __u64 ii_hash_start;
- __u64 ii_hash_end;
-
- /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
- * set */
- __u16 ii_keysize;
-
- /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
- * is set */
- __u16 ii_recsize;
-
- __u32 ii_pad1;
- __u64 ii_pad2;
- __u64 ii_pad3;
-};
-
-void lustre_swab_idx_info(struct idx_info *ii);
-
-#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
-
-/* List of flags used in idx_info::ii_flags */
-enum idx_info_flags {
- II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
- II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
- II_FL_VARREC = 1 << 2, /* records can be of variable size */
- II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
-};
-
-#define LIP_MAGIC 0x8A6D6B6C
-
-/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
-struct lu_idxpage {
- /* 16-byte header */
- __u32 lip_magic;
- __u16 lip_flags;
- __u16 lip_nr; /* number of entries in the container */
- __u64 lip_pad0; /* additional padding for future use */
-
- /* key/record pairs are stored in the remaining 4080 bytes.
- * depending upon the flags in idx_info::ii_flags, each key/record
- * pair might be preceded by:
- * - a hash value
- * - the key size (II_FL_VARKEY is set)
- * - the record size (II_FL_VARREC is set)
- *
- * For the time being, we only support fixed-size key & record. */
- char lip_entries[0];
-};
-
-#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
-
-/* Gather all possible type associated with a 4KB container */
-union lu_page {
- struct lu_dirpage lp_dir; /* for MDS_READPAGE */
- struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
- char lp_array[LU_PAGE_SIZE];
-};
-
/* security opcodes */
-typedef enum {
+enum sec_cmd {
SEC_CTX_INIT = 801,
SEC_CTX_INIT_CONT = 802,
SEC_CTX_FINI = 803,
SEC_LAST_OPC,
SEC_FIRST_OPC = SEC_CTX_INIT
-} sec_cmd_t;
+};
/*
* capa related definitions
@@ -3451,7 +3225,8 @@ typedef enum {
#define CAPA_HMAC_KEY_MAX_LEN 56
/* NB take care when changing the sequence of elements this struct,
- * because the offset info is used in find_capa() */
+ * because the offset info is used in find_capa()
+ */
struct lustre_capa {
struct lu_fid lc_fid; /** fid */
__u64 lc_opc; /** operations allowed */
@@ -3463,7 +3238,7 @@ struct lustre_capa {
/* FIXME: y2038 time_t overflow: */
__u32 lc_expiry; /** expiry time (sec) */
__u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
-} __attribute__((packed));
+} __packed;
void lustre_swab_lustre_capa(struct lustre_capa *c);
@@ -3497,7 +3272,7 @@ struct lustre_capa_key {
__u32 lk_keyid; /**< key# */
__u32 lk_padding;
__u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
-} __attribute__((packed));
+} __packed;
/** The link ea holds 1 \a link_ea_entry for each hardlink */
#define LINK_EA_MAGIC 0x11EAF1DFUL
@@ -3518,7 +3293,7 @@ struct link_ea_entry {
unsigned char lee_reclen[2];
unsigned char lee_parent_fid[sizeof(struct lu_fid)];
char lee_name[0];
-} __attribute__((packed));
+} __packed;
/** fid2path request/reply structure */
struct getinfo_fid2path {
@@ -3527,7 +3302,7 @@ struct getinfo_fid2path {
__u32 gf_linkno;
__u32 gf_pathlen;
char gf_path[0];
-} __attribute__((packed));
+} __packed;
void lustre_swab_fid2path (struct getinfo_fid2path *gf);
@@ -3558,7 +3333,7 @@ void lustre_swab_layout_intent(struct layout_intent *li);
*/
struct hsm_progress_kernel {
/* Field taken from struct hsm_progress */
- lustre_fid hpk_fid;
+ struct lu_fid hpk_fid;
__u64 hpk_cookie;
struct hsm_extent hpk_extent;
__u16 hpk_flags;
@@ -3567,7 +3342,7 @@ struct hsm_progress_kernel {
/* Additional fields */
__u64 hpk_data_version;
__u64 hpk_padding2;
-} __attribute__((packed));
+} __packed;
void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
void lustre_swab_hsm_current_action(struct hsm_current_action *action);
@@ -3576,92 +3351,6 @@ void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
void lustre_swab_hsm_request(struct hsm_request *hr);
-/**
- * These are object update opcode under UPDATE_OBJ, which is currently
- * being used by cross-ref operations between MDT.
- *
- * During the cross-ref operation, the Master MDT, which the client send the
- * request to, will disassembly the operation into object updates, then OSP
- * will send these updates to the remote MDT to be executed.
- *
- * Update request format
- * magic: UPDATE_BUFFER_MAGIC_V1
- * Count: How many updates in the req.
- * bufs[0] : following are packets of object.
- * update[0]:
- * type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * update[1]:
- * type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * ..........
- * update[7]: type: object_update_op, the op code of update
- * fid: The object fid of the update.
- * lens/bufs: other parameters of the update.
- * Current 8 maxim updates per object update request.
- *
- *******************************************************************
- * update reply format:
- *
- * ur_version: UPDATE_REPLY_V1
- * ur_count: The count of the reply, which is usually equal
- * to the number of updates in the request.
- * ur_lens: The reply lengths of each object update.
- *
- * replies: 1st update reply [4bytes_ret: other body]
- * 2nd update reply [4bytes_ret: other body]
- * .....
- * nth update reply [4bytes_ret: other body]
- *
- * For each reply of the update, the format would be
- * result(4 bytes):Other stuff
- */
-
-#define UPDATE_MAX_OPS 10
-#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
-#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
-#define UPDATE_BUF_COUNT 8
-enum object_update_op {
- OBJ_CREATE = 1,
- OBJ_DESTROY = 2,
- OBJ_REF_ADD = 3,
- OBJ_REF_DEL = 4,
- OBJ_ATTR_SET = 5,
- OBJ_ATTR_GET = 6,
- OBJ_XATTR_SET = 7,
- OBJ_XATTR_GET = 8,
- OBJ_INDEX_LOOKUP = 9,
- OBJ_INDEX_INSERT = 10,
- OBJ_INDEX_DELETE = 11,
- OBJ_LAST
-};
-
-struct update {
- __u32 u_type;
- __u32 u_batchid;
- struct lu_fid u_fid;
- __u32 u_lens[UPDATE_BUF_COUNT];
- __u32 u_bufs[0];
-};
-
-struct update_buf {
- __u32 ub_magic;
- __u32 ub_count;
- __u32 ub_bufs[0];
-};
-
-#define UPDATE_REPLY_V1 0x00BD0001
-struct update_reply {
- __u32 ur_version;
- __u32 ur_count;
- __u32 ur_lens[0];
-};
-
-void lustre_swab_update_buf(struct update_buf *ub);
-void lustre_swab_update_reply_buf(struct update_reply *ur);
-
/** layout swap request structure
* fid1 and fid2 are in mdt_body
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 2b4dd656d5f5..276906e646f5 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -85,9 +85,8 @@ struct obd_statfs {
__u32 os_namelen;
__u64 os_maxbytes;
__u32 os_state; /**< obd_statfs_state OS_STATE_* flag */
- __u32 os_fprecreated; /* objs available now to the caller */
- /* used in QoS code to find preferred
- * OSTs */
+ __u32 os_fprecreated; /* objs available now to the caller */
+ /* used in QoS code to find preferred OSTs */
__u32 os_spare2;
__u32 os_spare3;
__u32 os_spare4;
@@ -135,8 +134,9 @@ struct filter_fid_old {
/* Userspace should treat lu_fid as opaque, and only use the following methods
* to print or parse them. Other functions (e.g. compare, swab) could be moved
- * here from lustre_idl.h if needed. */
-typedef struct lu_fid lustre_fid;
+ * here from lustre_idl.h if needed.
+ */
+struct lu_fid;
/**
* Following struct for object attributes, that will be kept inode's EA.
@@ -266,7 +266,8 @@ struct ost_id {
/* Define O_LOV_DELAY_CREATE to be a mask that is not useful for regular
* files, but are unlikely to be used in practice and are not harmful if
* used incorrectly. O_NOCTTY and FASYNC are only meaningful for character
- * devices and are safe for use on new files (See LU-812, LU-4209). */
+ * devices and are safe for use on new files (See LU-812, LU-4209).
+ */
#define O_LOV_DELAY_CREATE (O_NOCTTY | FASYNC)
#define LL_FILE_IGNORE_LOCK 0x00000001
@@ -302,7 +303,8 @@ struct ost_id {
* The limit of 12 pages is somewhat arbitrary, but is a reasonably large
* allocation that is sufficient for the current generation of systems.
*
- * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1) */
+ * (max buffer size - lov+rpc header) / sizeof(struct lov_ost_data_v1)
+ */
#define LOV_MAX_STRIPE_COUNT 2000 /* ((12 * 4096 - 256) / 24) */
#define LOV_ALL_STRIPES 0xffff /* only valid for directories */
#define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
@@ -323,9 +325,11 @@ struct lov_user_md_v1 { /* LOV EA user data (host-endian) */
__u16 lmm_stripe_count; /* num stripes in use for this object */
union {
__u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing */
+ * lmm_objects, use when writing
+ */
__u16 lmm_layout_gen; /* layout generation number
- * used when reading */
+ * used when reading
+ */
};
struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
} __attribute__((packed, __may_alias__));
@@ -338,9 +342,11 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
__u16 lmm_stripe_count; /* num stripes in use for this object */
union {
__u16 lmm_stripe_offset; /* starting stripe offset in
- * lmm_objects, use when writing */
+ * lmm_objects, use when writing
+ */
__u16 lmm_layout_gen; /* layout generation number
- * used when reading */
+ * used when reading
+ */
};
char lmm_pool_name[LOV_MAXPOOLNAME]; /* pool name */
struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
@@ -442,9 +448,13 @@ static inline void obd_str2uuid(struct obd_uuid *uuid, const char *tmp)
/* For printf's only, make sure uuid is terminated */
static inline char *obd_uuid2str(const struct obd_uuid *uuid)
{
+ if (!uuid)
+ return NULL;
+
if (uuid->uuid[sizeof(*uuid) - 1] != '\0') {
/* Obviously not safe, but for printfs, no real harm done...
- we're always null-terminated, even in a race. */
+ * we're always null-terminated, even in a race.
+ */
static char temp[sizeof(*uuid)];
memcpy(temp, uuid->uuid, sizeof(*uuid) - 1);
@@ -455,8 +465,9 @@ static inline char *obd_uuid2str(const struct obd_uuid *uuid)
}
/* Extract fsname from uuid (or target name) of a target
- e.g. (myfs-OST0007_UUID -> myfs)
- see also deuuidify. */
+ * e.g. (myfs-OST0007_UUID -> myfs)
+ * see also deuuidify.
+ */
static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
{
char *p;
@@ -465,11 +476,12 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
buf[buflen - 1] = '\0';
p = strrchr(buf, '-');
if (p)
- *p = '\0';
+ *p = '\0';
}
/* printf display format
- e.g. printf("file FID is "DFID"\n", PFID(fid)); */
+ * e.g. printf("file FID is "DFID"\n", PFID(fid));
+ */
#define FID_NOBRACE_LEN 40
#define FID_LEN (FID_NOBRACE_LEN + 2)
#define DFID_NOBRACE "%#llx:0x%x:0x%x"
@@ -480,7 +492,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
(fid)->f_ver
/* scanf input parse format -- strip '[' first.
- e.g. sscanf(fidstr, SFID, RFID(&fid)); */
+ * e.g. sscanf(fidstr, SFID, RFID(&fid));
+ */
#define SFID "0x%llx:0x%x:0x%x"
#define RFID(fid) \
&((fid)->f_seq), \
@@ -542,22 +555,6 @@ enum {
RMT_RGETFACL = 4
};
-#ifdef NEED_QUOTA_DEFS
-#ifndef QIF_BLIMITS
-#define QIF_BLIMITS 1
-#define QIF_SPACE 2
-#define QIF_ILIMITS 4
-#define QIF_INODES 8
-#define QIF_BTIME 16
-#define QIF_ITIME 32
-#define QIF_LIMITS (QIF_BLIMITS | QIF_ILIMITS)
-#define QIF_USAGE (QIF_SPACE | QIF_INODES)
-#define QIF_TIMES (QIF_BTIME | QIF_ITIME)
-#define QIF_ALL (QIF_LIMITS | QIF_USAGE | QIF_TIMES)
-#endif
-
-#endif /* !__KERNEL__ */
-
/* lustre volatile file support
* file name header: .^L^S^T^R:volatile"
*/
@@ -566,9 +563,9 @@ enum {
/* hdr + MDT index */
#define LUSTRE_VOLATILE_IDX LUSTRE_VOLATILE_HDR":%.4X:"
-typedef enum lustre_quota_version {
+enum lustre_quota_version {
LUSTRE_QUOTA_V2 = 1
-} lustre_quota_version_t;
+};
/* XXX: same as if_dqinfo struct in kernel */
struct obd_dqinfo {
@@ -698,7 +695,8 @@ static inline const char *changelog_type2str(int type)
#define CLF_HSM_LAST 15
/* Remove bits higher than _h, then extract the value
- * between _h and _l by shifting lower weigth to bit 0. */
+ * between _h and _l by shifting lower weigth to bit 0.
+ */
#define CLF_GET_BITS(_b, _h, _l) (((_b << (CLF_HSM_LAST - _h)) & 0xFFFF) \
>> (CLF_HSM_LAST - _h + _l))
@@ -761,10 +759,10 @@ struct changelog_rec {
__u64 cr_prev; /**< last index for this target fid */
__u64 cr_time;
union {
- lustre_fid cr_tfid; /**< target fid */
+ struct lu_fid cr_tfid; /**< target fid */
__u32 cr_markerflags; /**< CL_MARK flags */
};
- lustre_fid cr_pfid; /**< parent fid */
+ struct lu_fid cr_pfid; /**< parent fid */
char cr_name[0]; /**< last element */
} __packed;
@@ -775,18 +773,19 @@ struct changelog_rec {
struct changelog_ext_rec {
__u16 cr_namelen;
__u16 cr_flags; /**< (flags & CLF_FLAGMASK) |
- CLF_EXT_VERSION */
+ * CLF_EXT_VERSION
+ */
__u32 cr_type; /**< \a changelog_rec_type */
__u64 cr_index; /**< changelog record number */
__u64 cr_prev; /**< last index for this target fid */
__u64 cr_time;
union {
- lustre_fid cr_tfid; /**< target fid */
+ struct lu_fid cr_tfid; /**< target fid */
__u32 cr_markerflags; /**< CL_MARK flags */
};
- lustre_fid cr_pfid; /**< target parent fid */
- lustre_fid cr_sfid; /**< source fid, or zero */
- lustre_fid cr_spfid; /**< source parent fid, or zero */
+ struct lu_fid cr_pfid; /**< target parent fid */
+ struct lu_fid cr_sfid; /**< source fid, or zero */
+ struct lu_fid cr_spfid; /**< source parent fid, or zero */
char cr_name[0]; /**< last element */
} __packed;
@@ -835,7 +834,8 @@ struct ioc_data_version {
};
#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
- version. Dirty caches are left unchanged. */
+ * version. Dirty caches are left unchanged.
+ */
#ifndef offsetof
# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -976,8 +976,8 @@ struct hsm_request {
};
struct hsm_user_item {
- lustre_fid hui_fid;
- struct hsm_extent hui_extent;
+ struct lu_fid hui_fid;
+ struct hsm_extent hui_extent;
} __packed;
struct hsm_user_request {
@@ -1046,8 +1046,8 @@ static inline char *hsm_copytool_action2name(enum hsm_copytool_action a)
struct hsm_action_item {
__u32 hai_len; /* valid size of this struct */
__u32 hai_action; /* hsm_copytool_action, but use known size */
- lustre_fid hai_fid; /* Lustre FID to operated on */
- lustre_fid hai_dfid; /* fid used for data access */
+ struct lu_fid hai_fid; /* Lustre FID to operated on */
+ struct lu_fid hai_dfid; /* fid used for data access */
struct hsm_extent hai_extent; /* byte range to operate on */
__u64 hai_cookie; /* action cookie from coordinator */
__u64 hai_gid; /* grouplock id */
@@ -1095,7 +1095,8 @@ struct hsm_action_list {
__u32 padding1;
char hal_fsname[0]; /* null-terminated */
/* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- boundaries. See hai_zero */
+ * boundaries. See hai_zero
+ */
} __packed;
#ifndef HAVE_CFS_SIZE_ROUND
@@ -1157,7 +1158,7 @@ struct hsm_user_import {
#define HP_FLAG_RETRY 0x02
struct hsm_progress {
- lustre_fid hp_fid;
+ struct lu_fid hp_fid;
__u64 hp_cookie;
struct hsm_extent hp_extent;
__u16 hp_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index eb6b292b7b25..bb16ae980b98 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -50,12 +50,13 @@
#define LUSTRE_CFG_MAX_BUFCOUNT 8
#define LCFG_HDR_SIZE(count) \
- cfs_size_round(offsetof (struct lustre_cfg, lcfg_buflens[(count)]))
+ cfs_size_round(offsetof(struct lustre_cfg, lcfg_buflens[(count)]))
/** If the LCFG_REQUIRED bit is set in a configuration command,
* then the client is required to understand this parameter
* in order to mount the filesystem. If it does not understand
- * a REQUIRED command the client mount will fail. */
+ * a REQUIRED command the client mount will fail.
+ */
#define LCFG_REQUIRED 0x0001000
enum lcfg_command_type {
@@ -87,9 +88,11 @@ enum lcfg_command_type {
LCFG_POOL_DEL = 0x00ce023, /**< destroy an ost pool name */
LCFG_SET_LDLM_TIMEOUT = 0x00ce030, /**< set ldlm_timeout */
LCFG_PRE_CLEANUP = 0x00cf031, /**< call type-specific pre
- * cleanup cleanup */
+ * cleanup cleanup
+ */
LCFG_SET_PARAM = 0x00ce032, /**< use set_param syntax to set
- *a proc parameters */
+ * a proc parameters
+ */
};
struct lustre_cfg_bufs {
@@ -128,7 +131,7 @@ static inline void lustre_cfg_bufs_set(struct lustre_cfg_bufs *bufs,
{
if (index >= LUSTRE_CFG_MAX_BUFCOUNT)
return;
- if (bufs == NULL)
+ if (!bufs)
return;
if (bufs->lcfg_bufcount <= index)
@@ -158,7 +161,6 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
int offset;
int bufcount;
- LASSERT (lcfg != NULL);
LASSERT (index >= 0);
bufcount = lcfg->lcfg_bufcount;
@@ -191,7 +193,7 @@ static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
return NULL;
s = lustre_cfg_buf(lcfg, index);
- if (s == NULL)
+ if (!s)
return NULL;
/*
@@ -252,10 +254,6 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd,
static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
{
- int len;
-
- len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens);
-
kfree(lcfg);
return;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 7c6933ffc9c1..95fd36063f55 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -65,7 +65,8 @@
/****************** mount command *********************/
/* The lmd is only used internally by Lustre; mount simply passes
- everything as string options */
+ * everything as string options
+ */
#define LMD_MAGIC 0xbdacbd03
#define LMD_PARAMS_MAXLEN 4096
@@ -79,23 +80,26 @@ struct lustre_mount_data {
int lmd_recovery_time_soft;
int lmd_recovery_time_hard;
char *lmd_dev; /* device name */
- char *lmd_profile; /* client only */
+ char *lmd_profile; /* client only */
char *lmd_mgssec; /* sptlrpc flavor to mgs */
- char *lmd_opts; /* lustre mount options (as opposed to
- _device_ mount options) */
+ char *lmd_opts; /* lustre mount options (as opposed to
+ * _device_ mount options)
+ */
char *lmd_params; /* lustre params */
- __u32 *lmd_exclude; /* array of OSTs to ignore */
- char *lmd_mgs; /* MGS nid */
- char *lmd_osd_type; /* OSD type */
+ __u32 *lmd_exclude; /* array of OSTs to ignore */
+ char *lmd_mgs; /* MGS nid */
+ char *lmd_osd_type; /* OSD type */
};
#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
- no other services */
-#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing
- existing MGS services */
+ * no other services
+ */
+#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers,
+ * reusing existing MGS services
+ */
#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
@@ -116,231 +120,6 @@ struct lustre_mount_data {
#define LR_EXPIRE_INTERVALS 16 /**< number of intervals to track transno */
#define ENOENT_VERSION 1 /** 'virtual' version of non-existent object */
-#define LR_SERVER_SIZE 512
-#define LR_CLIENT_START 8192
-#define LR_CLIENT_SIZE 128
-#if LR_CLIENT_START < LR_SERVER_SIZE
-#error "Can't have LR_CLIENT_START < LR_SERVER_SIZE"
-#endif
-
-/*
- * This limit is arbitrary (131072 clients on x86), but it is convenient to use
- * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation.
- * If we need more than 131072 clients (order-2 allocation on x86) then this
- * should become an array of single-page pointers that are allocated on demand.
- */
-#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8)
-#define LR_MAX_CLIENTS (128 * 1024UL)
-#else
-#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8)
-#endif
-
-/** COMPAT_146: this is an OST (temporary) */
-#define OBD_COMPAT_OST 0x00000002
-/** COMPAT_146: this is an MDT (temporary) */
-#define OBD_COMPAT_MDT 0x00000004
-/** 2.0 server, interop flag to show server version is changed */
-#define OBD_COMPAT_20 0x00000008
-
-/** MDS handles LOV_OBJID file */
-#define OBD_ROCOMPAT_LOVOBJID 0x00000001
-
-/** OST handles group subdirs */
-#define OBD_INCOMPAT_GROUPS 0x00000001
-/** this is an OST */
-#define OBD_INCOMPAT_OST 0x00000002
-/** this is an MDT */
-#define OBD_INCOMPAT_MDT 0x00000004
-/** common last_rvcd format */
-#define OBD_INCOMPAT_COMMON_LR 0x00000008
-/** FID is enabled */
-#define OBD_INCOMPAT_FID 0x00000010
-/** Size-on-MDS is enabled */
-#define OBD_INCOMPAT_SOM 0x00000020
-/** filesystem using iam format to store directory entries */
-#define OBD_INCOMPAT_IAM_DIR 0x00000040
-/** LMA attribute contains per-inode incompatible flags */
-#define OBD_INCOMPAT_LMA 0x00000080
-/** lmm_stripe_count has been shrunk from __u32 to __u16 and the remaining 16
- * bits are now used to store a generation. Once we start changing the layout
- * and bumping the generation, old versions expecting a 32-bit lmm_stripe_count
- * will be confused by interpreting stripe_count | gen << 16 as the actual
- * stripe count */
-#define OBD_INCOMPAT_LMM_VER 0x00000100
-/** multiple OI files for MDT */
-#define OBD_INCOMPAT_MULTI_OI 0x00000200
-
-/* Data stored per server at the head of the last_rcvd file. In le32 order.
- This should be common to filter_internal.h, lustre_mds.h */
-struct lr_server_data {
- __u8 lsd_uuid[40]; /* server UUID */
- __u64 lsd_last_transno; /* last completed transaction ID */
- __u64 lsd_compat14; /* reserved - compat with old last_rcvd */
- __u64 lsd_mount_count; /* incarnation number */
- __u32 lsd_feature_compat; /* compatible feature flags */
- __u32 lsd_feature_rocompat;/* read-only compatible feature flags */
- __u32 lsd_feature_incompat;/* incompatible feature flags */
- __u32 lsd_server_size; /* size of server data area */
- __u32 lsd_client_start; /* start of per-client data area */
- __u16 lsd_client_size; /* size of per-client data area */
- __u16 lsd_subdir_count; /* number of subdirectories for objects */
- __u64 lsd_catalog_oid; /* recovery catalog object id */
- __u32 lsd_catalog_ogen; /* recovery catalog inode generation */
- __u8 lsd_peeruuid[40]; /* UUID of MDS associated with this OST */
- __u32 lsd_osd_index; /* index number of OST in LOV */
- __u32 lsd_padding1; /* was lsd_mdt_index, unused in 2.4.0 */
- __u32 lsd_start_epoch; /* VBR: start epoch from last boot */
- /** transaction values since lsd_trans_table_time */
- __u64 lsd_trans_table[LR_EXPIRE_INTERVALS];
- /** start point of transno table below */
- __u32 lsd_trans_table_time; /* time of first slot in table above */
- __u32 lsd_expire_intervals; /* LR_EXPIRE_INTERVALS */
- __u8 lsd_padding[LR_SERVER_SIZE - 288];
-};
-
-/* Data stored per client in the last_rcvd file. In le32 order. */
-struct lsd_client_data {
- __u8 lcd_uuid[40]; /* client UUID */
- __u64 lcd_last_transno; /* last completed transaction ID */
- __u64 lcd_last_xid; /* xid for the last transaction */
- __u32 lcd_last_result; /* result from last RPC */
- __u32 lcd_last_data; /* per-op data (disposition for open &c.) */
- /* for MDS_CLOSE requests */
- __u64 lcd_last_close_transno; /* last completed transaction ID */
- __u64 lcd_last_close_xid; /* xid for the last transaction */
- __u32 lcd_last_close_result; /* result from last RPC */
- __u32 lcd_last_close_data; /* per-op data */
- /* VBR: last versions */
- __u64 lcd_pre_versions[4];
- __u32 lcd_last_epoch;
- /** orphans handling for delayed export rely on that */
- __u32 lcd_first_epoch;
- __u8 lcd_padding[LR_CLIENT_SIZE - 128];
-};
-
-/* bug20354: the lcd_uuid for export of clients may be wrong */
-static inline void check_lcd(char *obd_name, int index,
- struct lsd_client_data *lcd)
-{
- int length = sizeof(lcd->lcd_uuid);
-
- if (strnlen((char *)lcd->lcd_uuid, length) == length) {
- lcd->lcd_uuid[length - 1] = '\0';
-
- LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
- lcd->lcd_uuid, obd_name, index);
- }
-}
-
-/* last_rcvd handling */
-static inline void lsd_le_to_cpu(struct lr_server_data *buf,
- struct lr_server_data *lsd)
-{
- int i;
-
- memcpy(lsd->lsd_uuid, buf->lsd_uuid, sizeof(lsd->lsd_uuid));
- lsd->lsd_last_transno = le64_to_cpu(buf->lsd_last_transno);
- lsd->lsd_compat14 = le64_to_cpu(buf->lsd_compat14);
- lsd->lsd_mount_count = le64_to_cpu(buf->lsd_mount_count);
- lsd->lsd_feature_compat = le32_to_cpu(buf->lsd_feature_compat);
- lsd->lsd_feature_rocompat = le32_to_cpu(buf->lsd_feature_rocompat);
- lsd->lsd_feature_incompat = le32_to_cpu(buf->lsd_feature_incompat);
- lsd->lsd_server_size = le32_to_cpu(buf->lsd_server_size);
- lsd->lsd_client_start = le32_to_cpu(buf->lsd_client_start);
- lsd->lsd_client_size = le16_to_cpu(buf->lsd_client_size);
- lsd->lsd_subdir_count = le16_to_cpu(buf->lsd_subdir_count);
- lsd->lsd_catalog_oid = le64_to_cpu(buf->lsd_catalog_oid);
- lsd->lsd_catalog_ogen = le32_to_cpu(buf->lsd_catalog_ogen);
- memcpy(lsd->lsd_peeruuid, buf->lsd_peeruuid, sizeof(lsd->lsd_peeruuid));
- lsd->lsd_osd_index = le32_to_cpu(buf->lsd_osd_index);
- lsd->lsd_padding1 = le32_to_cpu(buf->lsd_padding1);
- lsd->lsd_start_epoch = le32_to_cpu(buf->lsd_start_epoch);
- for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
- lsd->lsd_trans_table[i] = le64_to_cpu(buf->lsd_trans_table[i]);
- lsd->lsd_trans_table_time = le32_to_cpu(buf->lsd_trans_table_time);
- lsd->lsd_expire_intervals = le32_to_cpu(buf->lsd_expire_intervals);
-}
-
-static inline void lsd_cpu_to_le(struct lr_server_data *lsd,
- struct lr_server_data *buf)
-{
- int i;
-
- memcpy(buf->lsd_uuid, lsd->lsd_uuid, sizeof(buf->lsd_uuid));
- buf->lsd_last_transno = cpu_to_le64(lsd->lsd_last_transno);
- buf->lsd_compat14 = cpu_to_le64(lsd->lsd_compat14);
- buf->lsd_mount_count = cpu_to_le64(lsd->lsd_mount_count);
- buf->lsd_feature_compat = cpu_to_le32(lsd->lsd_feature_compat);
- buf->lsd_feature_rocompat = cpu_to_le32(lsd->lsd_feature_rocompat);
- buf->lsd_feature_incompat = cpu_to_le32(lsd->lsd_feature_incompat);
- buf->lsd_server_size = cpu_to_le32(lsd->lsd_server_size);
- buf->lsd_client_start = cpu_to_le32(lsd->lsd_client_start);
- buf->lsd_client_size = cpu_to_le16(lsd->lsd_client_size);
- buf->lsd_subdir_count = cpu_to_le16(lsd->lsd_subdir_count);
- buf->lsd_catalog_oid = cpu_to_le64(lsd->lsd_catalog_oid);
- buf->lsd_catalog_ogen = cpu_to_le32(lsd->lsd_catalog_ogen);
- memcpy(buf->lsd_peeruuid, lsd->lsd_peeruuid, sizeof(buf->lsd_peeruuid));
- buf->lsd_osd_index = cpu_to_le32(lsd->lsd_osd_index);
- buf->lsd_padding1 = cpu_to_le32(lsd->lsd_padding1);
- buf->lsd_start_epoch = cpu_to_le32(lsd->lsd_start_epoch);
- for (i = 0; i < LR_EXPIRE_INTERVALS; i++)
- buf->lsd_trans_table[i] = cpu_to_le64(lsd->lsd_trans_table[i]);
- buf->lsd_trans_table_time = cpu_to_le32(lsd->lsd_trans_table_time);
- buf->lsd_expire_intervals = cpu_to_le32(lsd->lsd_expire_intervals);
-}
-
-static inline void lcd_le_to_cpu(struct lsd_client_data *buf,
- struct lsd_client_data *lcd)
-{
- memcpy(lcd->lcd_uuid, buf->lcd_uuid, sizeof (lcd->lcd_uuid));
- lcd->lcd_last_transno = le64_to_cpu(buf->lcd_last_transno);
- lcd->lcd_last_xid = le64_to_cpu(buf->lcd_last_xid);
- lcd->lcd_last_result = le32_to_cpu(buf->lcd_last_result);
- lcd->lcd_last_data = le32_to_cpu(buf->lcd_last_data);
- lcd->lcd_last_close_transno = le64_to_cpu(buf->lcd_last_close_transno);
- lcd->lcd_last_close_xid = le64_to_cpu(buf->lcd_last_close_xid);
- lcd->lcd_last_close_result = le32_to_cpu(buf->lcd_last_close_result);
- lcd->lcd_last_close_data = le32_to_cpu(buf->lcd_last_close_data);
- lcd->lcd_pre_versions[0] = le64_to_cpu(buf->lcd_pre_versions[0]);
- lcd->lcd_pre_versions[1] = le64_to_cpu(buf->lcd_pre_versions[1]);
- lcd->lcd_pre_versions[2] = le64_to_cpu(buf->lcd_pre_versions[2]);
- lcd->lcd_pre_versions[3] = le64_to_cpu(buf->lcd_pre_versions[3]);
- lcd->lcd_last_epoch = le32_to_cpu(buf->lcd_last_epoch);
- lcd->lcd_first_epoch = le32_to_cpu(buf->lcd_first_epoch);
-}
-
-static inline void lcd_cpu_to_le(struct lsd_client_data *lcd,
- struct lsd_client_data *buf)
-{
- memcpy(buf->lcd_uuid, lcd->lcd_uuid, sizeof (lcd->lcd_uuid));
- buf->lcd_last_transno = cpu_to_le64(lcd->lcd_last_transno);
- buf->lcd_last_xid = cpu_to_le64(lcd->lcd_last_xid);
- buf->lcd_last_result = cpu_to_le32(lcd->lcd_last_result);
- buf->lcd_last_data = cpu_to_le32(lcd->lcd_last_data);
- buf->lcd_last_close_transno = cpu_to_le64(lcd->lcd_last_close_transno);
- buf->lcd_last_close_xid = cpu_to_le64(lcd->lcd_last_close_xid);
- buf->lcd_last_close_result = cpu_to_le32(lcd->lcd_last_close_result);
- buf->lcd_last_close_data = cpu_to_le32(lcd->lcd_last_close_data);
- buf->lcd_pre_versions[0] = cpu_to_le64(lcd->lcd_pre_versions[0]);
- buf->lcd_pre_versions[1] = cpu_to_le64(lcd->lcd_pre_versions[1]);
- buf->lcd_pre_versions[2] = cpu_to_le64(lcd->lcd_pre_versions[2]);
- buf->lcd_pre_versions[3] = cpu_to_le64(lcd->lcd_pre_versions[3]);
- buf->lcd_last_epoch = cpu_to_le32(lcd->lcd_last_epoch);
- buf->lcd_first_epoch = cpu_to_le32(lcd->lcd_first_epoch);
-}
-
-static inline __u64 lcd_last_transno(struct lsd_client_data *lcd)
-{
- return (lcd->lcd_last_transno > lcd->lcd_last_close_transno ?
- lcd->lcd_last_transno : lcd->lcd_last_close_transno);
-}
-
-static inline __u64 lcd_last_xid(struct lsd_client_data *lcd)
-{
- return (lcd->lcd_last_xid > lcd->lcd_last_close_xid ?
- lcd->lcd_last_xid : lcd->lcd_last_close_xid);
-}
-
/****************** superblock additional info *********************/
struct ll_sb_info;
@@ -360,7 +139,8 @@ struct lustre_sb_info {
char lsi_osd_type[16];
char lsi_fstype[16];
struct backing_dev_info lsi_bdi; /* each client mountpoint needs
- own backing_dev_info */
+ * own backing_dev_info
+ */
};
#define LSI_UMOUNT_FAILOVER 0x00200000
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 9b319f1df025..8b0364f71129 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -69,7 +69,7 @@ struct obd_device;
/**
* LDLM non-error return states
*/
-typedef enum {
+enum ldlm_error {
ELDLM_OK = 0,
ELDLM_LOCK_CHANGED = 300,
@@ -80,7 +80,7 @@ typedef enum {
ELDLM_NAMESPACE_EXISTS = 400,
ELDLM_BAD_NAMESPACE = 401
-} ldlm_error_t;
+};
/**
* LDLM namespace type.
@@ -145,16 +145,17 @@ typedef enum {
#define LCK_COMPAT_COS (LCK_COS)
/** @} Lock Compatibility Matrix */
-extern ldlm_mode_t lck_compat_array[];
+extern enum ldlm_mode lck_compat_array[];
-static inline void lockmode_verify(ldlm_mode_t mode)
+static inline void lockmode_verify(enum ldlm_mode mode)
{
- LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
+ LASSERT(mode > LCK_MINMODE && mode < LCK_MAXMODE);
}
-static inline int lockmode_compat(ldlm_mode_t exist_mode, ldlm_mode_t new_mode)
+static inline int lockmode_compat(enum ldlm_mode exist_mode,
+ enum ldlm_mode new_mode)
{
- return (lck_compat_array[exist_mode] & new_mode);
+ return (lck_compat_array[exist_mode] & new_mode);
}
/*
@@ -249,7 +250,8 @@ struct ldlm_pool {
/** Current biggest client lock volume. Protected by pl_lock. */
__u64 pl_client_lock_volume;
/** Lock volume factor. SLV on client is calculated as following:
- * server_slv * lock_volume_factor. */
+ * server_slv * lock_volume_factor.
+ */
atomic_t pl_lock_volume_factor;
/** Time when last SLV from server was obtained. */
time64_t pl_recalc_time;
@@ -295,10 +297,10 @@ struct ldlm_valblock_ops {
* LDLM pools related, type of lock pool in the namespace.
* Greedy means release cached locks aggressively
*/
-typedef enum {
+enum ldlm_appetite {
LDLM_NAMESPACE_GREEDY = 1 << 0,
LDLM_NAMESPACE_MODEST = 1 << 1
-} ldlm_appetite_t;
+};
struct ldlm_ns_bucket {
/** back pointer to namespace */
@@ -317,7 +319,7 @@ enum {
LDLM_NSS_LAST
};
-typedef enum {
+enum ldlm_ns_type {
/** invalid type */
LDLM_NS_TYPE_UNKNOWN = 0,
/** mdc namespace */
@@ -332,7 +334,7 @@ typedef enum {
LDLM_NS_TYPE_MGC,
/** mgs namespace */
LDLM_NS_TYPE_MGT,
-} ldlm_ns_type_t;
+};
/**
* LDLM Namespace.
@@ -373,7 +375,7 @@ struct ldlm_namespace {
/**
* Namespace connect flags supported by server (may be changed via
- * /proc, LRU resize may be disabled/enabled).
+ * sysfs, LRU resize may be disabled/enabled).
*/
__u64 ns_connect_flags;
@@ -439,7 +441,7 @@ struct ldlm_namespace {
/** LDLM pool structure for this namespace */
struct ldlm_pool ns_pool;
/** Definition of how eagerly unused locks will be released from LRU */
- ldlm_appetite_t ns_appetite;
+ enum ldlm_appetite ns_appetite;
/** Limit of parallel AST RPC count. */
unsigned ns_max_parallel_ast;
@@ -465,7 +467,6 @@ struct ldlm_namespace {
*/
static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
{
- LASSERT(ns != NULL);
return !!(ns->ns_connect_flags & OBD_CONNECT_CANCELSET);
}
@@ -474,14 +475,12 @@ static inline int ns_connect_cancelset(struct ldlm_namespace *ns)
*/
static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
{
- LASSERT(ns != NULL);
return !!(ns->ns_connect_flags & OBD_CONNECT_LRU_RESIZE);
}
static inline void ns_register_cancel(struct ldlm_namespace *ns,
ldlm_cancel_for_recovery arg)
{
- LASSERT(ns != NULL);
ns->ns_cancel_for_recovery = arg;
}
@@ -503,7 +502,8 @@ struct ldlm_glimpse_work {
struct list_head gl_list; /* linkage to other gl work structs */
__u32 gl_flags;/* see LDLM_GL_WORK_* below */
union ldlm_gl_desc *gl_desc; /* glimpse descriptor to be packed in
- * glimpse callback request */
+ * glimpse callback request
+ */
};
/** The ldlm_glimpse_work is allocated on the stack and should not be freed. */
@@ -512,8 +512,9 @@ struct ldlm_glimpse_work {
/** Interval node data for each LDLM_EXTENT lock. */
struct ldlm_interval {
struct interval_node li_node; /* node for tree management */
- struct list_head li_group; /* the locks which have the same
- * policy - group of the policy */
+ struct list_head li_group; /* the locks which have the same
+ * policy - group of the policy
+ */
};
#define to_ldlm_interval(n) container_of(n, struct ldlm_interval, li_node)
@@ -527,7 +528,7 @@ struct ldlm_interval {
struct ldlm_interval_tree {
/** Tree size. */
int lit_size;
- ldlm_mode_t lit_mode; /* lock mode */
+ enum ldlm_mode lit_mode; /* lock mode */
struct interval_node *lit_root; /* actual ldlm_interval */
};
@@ -535,12 +536,13 @@ struct ldlm_interval_tree {
#define LUSTRE_TRACKS_LOCK_EXP_REFS (0)
/** Cancel flags. */
-typedef enum {
+enum ldlm_cancel_flags {
LCF_ASYNC = 0x1, /* Cancel locks asynchronously. */
LCF_LOCAL = 0x2, /* Cancel locks locally, not notifing server */
LCF_BL_AST = 0x4, /* Cancel locks marked as LDLM_FL_BL_AST
- * in the same RPC */
-} ldlm_cancel_flags_t;
+ * in the same RPC
+ */
+};
struct ldlm_flock {
__u64 start;
@@ -559,7 +561,7 @@ typedef union {
struct ldlm_inodebits l_inodebits;
} ldlm_policy_data_t;
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy);
@@ -637,11 +639,11 @@ struct ldlm_lock {
* Requested mode.
* Protected by lr_lock.
*/
- ldlm_mode_t l_req_mode;
+ enum ldlm_mode l_req_mode;
/**
* Granted mode, also protected by lr_lock.
*/
- ldlm_mode_t l_granted_mode;
+ enum ldlm_mode l_granted_mode;
/** Lock completion handler pointer. Called when lock is granted. */
ldlm_completion_callback l_completion_ast;
/**
@@ -841,20 +843,19 @@ struct ldlm_resource {
/**
* protected by lr_lock
- * @{ */
+ * @{
+ */
/** List of locks in granted state */
struct list_head lr_granted;
/**
* List of locks that could not be granted due to conflicts and
- * that are waiting for conflicts to go away */
+ * that are waiting for conflicts to go away
+ */
struct list_head lr_waiting;
/** @} */
- /* XXX No longer needed? Remove ASAP */
- ldlm_mode_t lr_most_restr;
-
/** Type of locks this resource can hold. Only one type per resource. */
- ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
+ enum ldlm_type lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */
/** Resource name */
struct ldlm_res_id lr_name;
@@ -921,7 +922,7 @@ static inline int ldlm_lvbo_init(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
- if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_init != NULL)
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init)
return ns->ns_lvbo->lvbo_init(res);
return 0;
@@ -931,7 +932,7 @@ static inline int ldlm_lvbo_size(struct ldlm_lock *lock)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- if (ns->ns_lvbo != NULL && ns->ns_lvbo->lvbo_size != NULL)
+ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_size)
return ns->ns_lvbo->lvbo_size(lock);
return 0;
@@ -941,10 +942,9 @@ static inline int ldlm_lvbo_fill(struct ldlm_lock *lock, void *buf, int len)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- if (ns->ns_lvbo != NULL) {
- LASSERT(ns->ns_lvbo->lvbo_fill != NULL);
+ if (ns->ns_lvbo)
return ns->ns_lvbo->lvbo_fill(lock, buf, len);
- }
+
return 0;
}
@@ -1015,7 +1015,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
/** Non-rate-limited lock printing function for debugging purposes. */
#define LDLM_DEBUG(lock, fmt, a...) do { \
- if (likely(lock != NULL)) { \
+ if (likely(lock)) { \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_DLMTRACE, NULL); \
ldlm_lock_debug(&msgdata, D_DLMTRACE, NULL, lock, \
"### " fmt, ##a); \
@@ -1025,7 +1025,7 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
} while (0)
typedef int (*ldlm_processing_policy)(struct ldlm_lock *lock, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list);
/**
@@ -1042,7 +1042,8 @@ typedef int (*ldlm_res_iterator_t)(struct ldlm_resource *, void *);
*
* LDLM provides for a way to iterate through every lock on a resource or
* namespace or every resource in a namespace.
- * @{ */
+ * @{
+ */
int ldlm_resource_iterate(struct ldlm_namespace *, const struct ldlm_res_id *,
ldlm_iterator_t iter, void *data);
/** @} ldlm_iterator */
@@ -1091,7 +1092,7 @@ ldlm_handle2lock_long(const struct lustre_handle *h, __u64 flags)
struct ldlm_lock *lock;
lock = __ldlm_handle2lock(h, flags);
- if (lock != NULL)
+ if (lock)
LDLM_LOCK_REF_DEL(lock);
return lock;
}
@@ -1111,7 +1112,7 @@ static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
return 0;
}
-int ldlm_error2errno(ldlm_error_t error);
+int ldlm_error2errno(enum ldlm_error error);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_dump_export_locks(struct obd_export *exp);
@@ -1168,12 +1169,13 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
void ldlm_lock_allow_match(struct ldlm_lock *lock);
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *, ldlm_type_t type,
- ldlm_policy_data_t *, ldlm_mode_t mode,
- struct lustre_handle *, int unref);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits);
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *,
+ enum ldlm_type type, ldlm_policy_data_t *,
+ enum ldlm_mode mode, struct lustre_handle *,
+ int unref);
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits);
void ldlm_lock_cancel(struct ldlm_lock *lock);
void ldlm_lock_dump_handle(int level, struct lustre_handle *);
void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
@@ -1181,8 +1183,8 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
/* resource.c */
struct ldlm_namespace *
ldlm_namespace_new(struct obd_device *obd, char *name,
- ldlm_side_t client, ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type);
+ ldlm_side_t client, enum ldlm_appetite apt,
+ enum ldlm_ns_type ns_type);
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
@@ -1193,7 +1195,7 @@ void ldlm_debugfs_cleanup(void);
struct ldlm_resource *ldlm_resource_get(struct ldlm_namespace *ns,
struct ldlm_resource *parent,
const struct ldlm_res_id *,
- ldlm_type_t type, int create);
+ enum ldlm_type type, int create);
int ldlm_resource_putref(struct ldlm_resource *res);
void ldlm_resource_add_lock(struct ldlm_resource *res,
struct list_head *head,
@@ -1219,7 +1221,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
* These AST handlers are typically used for server-side local locks and are
* also used by client-side lock handlers to perform minimum level base
* processing.
- * @{ */
+ * @{
+ */
int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data);
int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
/** @} ldlm_local_ast */
@@ -1227,7 +1230,8 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
/** \defgroup ldlm_cli_api API to operate on locks from actual LDLM users.
* These are typically used by client and server (*_local versions)
* to obtain and release locks.
- * @{ */
+ * @{
+ */
int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
struct ldlm_enqueue_info *einfo,
const struct ldlm_res_id *res_id,
@@ -1244,29 +1248,32 @@ int ldlm_prep_elc_req(struct obd_export *exp,
struct list_head *cancels, int count);
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+ enum ldlm_type type, __u8 with_policy,
+ enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
struct lustre_handle *lockh, int rc);
int ldlm_cli_update_pool(struct ptlrpc_request *req);
int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags);
+ enum ldlm_cancel_flags cancel_flags);
int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
- ldlm_cancel_flags_t flags, void *opaque);
+ enum ldlm_cancel_flags flags, void *opaque);
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags,
void *opaque);
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque);
+ enum ldlm_mode mode, __u64 lock_flags,
+ enum ldlm_cancel_flags cancel_flags,
+ void *opaque);
int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags);
+ enum ldlm_cancel_flags flags);
int ldlm_cli_cancel_list(struct list_head *head, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags);
+ struct ptlrpc_request *req,
+ enum ldlm_cancel_flags flags);
/** @} ldlm_cli_api */
/* mds/handler.c */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 0d3ed87d38e1..7f2ba2ffe0eb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -57,7 +57,8 @@
/**
* Server placed lock on granted list, or a recovering client wants the
- * lock added to the granted list, no questions asked. */
+ * lock added to the granted list, no questions asked.
+ */
#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL /* bit 1 */
#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG((_l), 1ULL << 1)
#define ldlm_set_block_granted(_l) LDLM_SET_FLAG((_l), 1ULL << 1)
@@ -65,7 +66,8 @@
/**
* Server placed lock on conv list, or a recovering client wants the lock
- * added to the conv list, no questions asked. */
+ * added to the conv list, no questions asked.
+ */
#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL /* bit 2 */
#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG((_l), 1ULL << 2)
#define ldlm_set_block_conv(_l) LDLM_SET_FLAG((_l), 1ULL << 2)
@@ -73,7 +75,8 @@
/**
* Server placed lock on wait list, or a recovering client wants the lock
- * added to the wait list, no questions asked. */
+ * added to the wait list, no questions asked.
+ */
#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL /* bit 3 */
#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG((_l), 1ULL << 3)
#define ldlm_set_block_wait(_l) LDLM_SET_FLAG((_l), 1ULL << 3)
@@ -87,7 +90,8 @@
/**
* Lock is being replayed. This could probably be implied by the fact that
- * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
+ * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
+ */
#define LDLM_FL_REPLAY 0x0000000000000100ULL /* bit 8 */
#define ldlm_is_replay(_l) LDLM_TEST_FLAG((_l), 1ULL << 8)
#define ldlm_set_replay(_l) LDLM_SET_FLAG((_l), 1ULL << 8)
@@ -125,7 +129,8 @@
/**
* Server told not to wait if blocked. For AGL, OST will not send glimpse
- * callback. */
+ * callback.
+ */
#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL /* bit 18 */
#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG((_l), 1ULL << 18)
#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG((_l), 1ULL << 18)
@@ -141,7 +146,8 @@
* Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This
* is for clients (like liblustre) that cannot be expected to reliably
- * response to blocking AST. */
+ * response to blocking AST.
+ */
#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL /* bit 23 */
#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG((_l), 1ULL << 23)
#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG((_l), 1ULL << 23)
@@ -164,7 +170,8 @@
/**
* Used for marking lock as a target for -EINTR while cp_ast sleep emulation
- * + race with upcoming bl_ast. */
+ * + race with upcoming bl_ast.
+ */
#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL /* bit 32 */
#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG((_l), 1ULL << 32)
#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG((_l), 1ULL << 32)
@@ -172,7 +179,8 @@
/**
* Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it. */
+ * handled this lock and decided to skip it.
+ */
#define LDLM_FL_SKIPPED 0x0000000200000000ULL /* bit 33 */
#define ldlm_is_skipped(_l) LDLM_TEST_FLAG((_l), 1ULL << 33)
#define ldlm_set_skipped(_l) LDLM_SET_FLAG((_l), 1ULL << 33)
@@ -231,7 +239,8 @@
* The proper fix is to do the granting inside of the completion AST,
* which can be replaced with a LVB-aware wrapping function for OSC locks.
* That change is pretty high-risk, though, and would need a lot more
- * testing. */
+ * testing.
+ */
#define LDLM_FL_LVB_READY 0x0000020000000000ULL /* bit 41 */
#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG((_l), 1ULL << 41)
#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG((_l), 1ULL << 41)
@@ -243,7 +252,8 @@
* dirty pages. It can remain on the granted list during this whole time.
* Threads racing to update the KMS after performing their writeback need
* to know to exclude each other's locks from the calculation as they walk
- * the granted list. */
+ * the granted list.
+ */
#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL /* bit 42 */
#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG((_l), 1ULL << 42)
#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG((_l), 1ULL << 42)
@@ -263,7 +273,8 @@
/**
* optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
+ * w/o involving separate thread. in order to decrease cs rate
+ */
#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL /* bit 45 */
#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG((_l), 1ULL << 45)
#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG((_l), 1ULL << 45)
@@ -280,7 +291,8 @@
* LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
* dropped to let ldlm_callback_handler() return EINVAL to the server. It
* is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC. */
+ * too late to send a separate CANCEL RPC.
+ */
#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
@@ -295,7 +307,8 @@
/**
* Don't put lock into the LRU list, so that it is not canceled due
* to aging. Used by MGC locks, they are cancelled only at unmount or
- * by callback. */
+ * by callback.
+ */
#define LDLM_FL_NO_LRU 0x0001000000000000ULL /* bit 48 */
#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG((_l), 1ULL << 48)
#define ldlm_set_no_lru(_l) LDLM_SET_FLAG((_l), 1ULL << 48)
@@ -304,7 +317,8 @@
/**
* Set for locks that failed and where the server has been notified.
*
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL /* bit 49 */
#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG((_l), 1ULL << 49)
#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG((_l), 1ULL << 49)
@@ -315,7 +329,8 @@
* be destroyed when last reference to them is released. Set by
* ldlm_lock_destroy_internal().
*
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
#define LDLM_FL_DESTROYED 0x0004000000000000ULL /* bit 50 */
#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG((_l), 1ULL << 50)
#define ldlm_set_destroyed(_l) LDLM_SET_FLAG((_l), 1ULL << 50)
@@ -333,7 +348,8 @@
* NB: compared with check_res_locked(), checking this bit is cheaper.
* Also, spin_is_locked() is deprecated for kernel code; one reason is
* because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels.
+ */
#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL /* bit 52 */
#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG((_l), 1ULL << 52)
#define ldlm_set_res_locked(_l) LDLM_SET_FLAG((_l), 1ULL << 52)
@@ -343,7 +359,8 @@
* It's set once we call ldlm_add_waiting_lock_res_locked() to start the
* lock-timeout timer and it will never be reset.
*
- * Protected by lock and resource locks. */
+ * Protected by lock and resource locks.
+ */
#define LDLM_FL_WAITED 0x0020000000000000ULL /* bit 53 */
#define ldlm_is_waited(_l) LDLM_TEST_FLAG((_l), 1ULL << 53)
#define ldlm_set_waited(_l) LDLM_SET_FLAG((_l), 1ULL << 53)
@@ -365,10 +382,10 @@
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
/** set a ldlm_lock flag bit */
-#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
-#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
+#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
/** Mask of flags inherited from parent lock when doing intents. */
#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 311e5aa9b0db..3014d27e6dc2 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -50,62 +50,6 @@
#include "lustre/lustre_idl.h"
#include "lustre_dlm.h"
-struct mds_client_data;
-struct mdt_client_data;
-struct mds_idmap_table;
-struct mdt_idmap_table;
-
-/**
- * Target-specific export data
- */
-struct tg_export_data {
- /** Protects led_lcd below */
- struct mutex ted_lcd_lock;
- /** Per-client data for each export */
- struct lsd_client_data *ted_lcd;
- /** Offset of record in last_rcvd file */
- loff_t ted_lr_off;
- /** Client index in last_rcvd file */
- int ted_lr_idx;
-};
-
-/**
- * MDT-specific export data
- */
-struct mdt_export_data {
- struct tg_export_data med_ted;
- /** List of all files opened by client on this MDT */
- struct list_head med_open_head;
- spinlock_t med_open_lock; /* med_open_head, mfd_list */
- /** Bitmask of all ibit locks this MDT understands */
- __u64 med_ibits_known;
- struct mutex med_idmap_mutex;
- struct lustre_idmap_table *med_idmap;
-};
-
-struct ec_export_data { /* echo client */
- struct list_head eced_locks;
-};
-
-/* In-memory access to client data from OST struct */
-/** Filter (oss-side) specific import data */
-struct filter_export_data {
- struct tg_export_data fed_ted;
- spinlock_t fed_lock; /**< protects fed_mod_list */
- long fed_dirty; /* in bytes */
- long fed_grant; /* in bytes */
- struct list_head fed_mod_list; /* files being modified */
- int fed_mod_count;/* items in fed_writing list */
- long fed_pending; /* bytes just being written */
- __u32 fed_group;
- __u8 fed_pagesize; /* log2 of client page size */
-};
-
-struct mgs_export_data {
- struct list_head med_clients; /* mgc fs client via this exp */
- spinlock_t med_lock; /* protect med_clients */
-};
-
enum obd_option {
OBD_OPT_FORCE = 0x0001,
OBD_OPT_FAILOVER = 0x0002,
@@ -179,7 +123,8 @@ struct obd_export {
*/
spinlock_t exp_lock;
/** Compatibility flags for this export are embedded into
- * exp_connect_data */
+ * exp_connect_data
+ */
struct obd_connect_data exp_connect_data;
enum obd_option exp_flags;
unsigned long exp_failed:1,
@@ -200,22 +145,8 @@ struct obd_export {
/** blocking dlm lock list, protected by exp_bl_list_lock */
struct list_head exp_bl_list;
spinlock_t exp_bl_list_lock;
-
- /** Target specific data */
- union {
- struct tg_export_data eu_target_data;
- struct mdt_export_data eu_mdt_data;
- struct filter_export_data eu_filter_data;
- struct ec_export_data eu_ec_data;
- struct mgs_export_data eu_mgs_data;
- } u;
};
-#define exp_target_data u.eu_target_data
-#define exp_mdt_data u.eu_mdt_data
-#define exp_filter_data u.eu_filter_data
-#define exp_ec_data u.eu_ec_data
-
static inline __u64 *exp_connect_flags_ptr(struct obd_export *exp)
{
return &exp->exp_connect_data.ocd_connect_flags;
@@ -228,7 +159,6 @@ static inline __u64 exp_connect_flags(struct obd_export *exp)
static inline int exp_max_brw_size(struct obd_export *exp)
{
- LASSERT(exp != NULL);
if (exp_connect_flags(exp) & OBD_CONNECT_BRW_SIZE)
return exp->exp_connect_data.ocd_brw_size;
@@ -242,19 +172,16 @@ static inline int exp_connect_multibulk(struct obd_export *exp)
static inline int exp_connect_cancelset(struct obd_export *exp)
{
- LASSERT(exp != NULL);
return !!(exp_connect_flags(exp) & OBD_CONNECT_CANCELSET);
}
static inline int exp_connect_lru_resize(struct obd_export *exp)
{
- LASSERT(exp != NULL);
return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
}
static inline int exp_connect_rmtclient(struct obd_export *exp)
{
- LASSERT(exp != NULL);
return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
}
@@ -268,14 +195,11 @@ static inline int client_is_remote(struct obd_export *exp)
static inline int exp_connect_vbr(struct obd_export *exp)
{
- LASSERT(exp != NULL);
- LASSERT(exp->exp_connection);
return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
}
static inline int exp_connect_som(struct obd_export *exp)
{
- LASSERT(exp != NULL);
return !!(exp_connect_flags(exp) & OBD_CONNECT_SOM);
}
@@ -288,7 +212,6 @@ static inline int imp_connect_lru_resize(struct obd_import *imp)
{
struct obd_connect_data *ocd;
- LASSERT(imp != NULL);
ocd = &imp->imp_connect_data;
return !!(ocd->ocd_connect_flags & OBD_CONNECT_LRU_RESIZE);
}
@@ -300,7 +223,6 @@ static inline int exp_connect_layout(struct obd_export *exp)
static inline bool exp_connect_lvb_type(struct obd_export *exp)
{
- LASSERT(exp != NULL);
if (exp_connect_flags(exp) & OBD_CONNECT_LVB_TYPE)
return true;
else
@@ -311,7 +233,6 @@ static inline bool imp_connect_lvb_type(struct obd_import *imp)
{
struct obd_connect_data *ocd;
- LASSERT(imp != NULL);
ocd = &imp->imp_connect_data;
if (ocd->ocd_connect_flags & OBD_CONNECT_LVB_TYPE)
return true;
@@ -331,13 +252,19 @@ static inline bool imp_connect_disp_stripe(struct obd_import *imp)
{
struct obd_connect_data *ocd;
- LASSERT(imp != NULL);
ocd = &imp->imp_connect_data;
return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
}
struct obd_export *class_conn2export(struct lustre_handle *conn);
+#define KKUC_CT_DATA_MAGIC 0x092013cea
+struct kkuc_ct_data {
+ __u32 kcd_magic;
+ struct obd_uuid kcd_uuid;
+ __u32 kcd_archive;
+};
+
/** @} export */
#endif /* __EXPORT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 9b1a9c695113..ab4a92390a43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -251,7 +251,8 @@ static inline void lu_local_name_obj_fid(struct lu_fid *fid, __u32 oid)
/* For new FS (>= 2.4), the root FID will be changed to
* [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
- * the root FID will still be IGIF */
+ * the root FID will still be IGIF
+ */
static inline int fid_is_root(const struct lu_fid *fid)
{
return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
@@ -294,7 +295,8 @@ static inline int fid_is_namespace_visible(const struct lu_fid *fid)
const __u64 seq = fid_seq(fid);
/* Here, we cannot distinguish whether the normal FID is for OST
- * object or not. It is caller's duty to check more if needed. */
+ * object or not. It is caller's duty to check more if needed.
+ */
return (!fid_is_last_id(fid) &&
(fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
fid_is_root(fid) || fid_is_dot_lustre(fid);
@@ -433,7 +435,7 @@ fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
*/
static inline struct ldlm_res_id *
fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
- struct ldlm_res_id *res)
+ struct ldlm_res_id *res)
{
fid_build_reg_res_name(glb_fid, res);
res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
@@ -516,7 +518,8 @@ static inline int ostid_res_name_eq(struct ost_id *oi,
struct ldlm_res_id *name)
{
/* Note: it is just a trick here to save some effort, probably the
- * correct way would be turn them into the FID and compare */
+ * correct way would be turn them into the FID and compare
+ */
if (fid_seq_is_mdt0(ostid_seq(oi))) {
return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
@@ -589,12 +592,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid)
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
{
/* all objects with same id and different versions will belong to same
- * collisions list. */
+ * collisions list.
+ */
return hash_long(fid_flatten(f), bits);
}
/**
- * map fid to 32 bit value for ino on 32bit systems. */
+ * map fid to 32 bit value for ino on 32bit systems.
+ */
static inline __u32 fid_flatten32(const struct lu_fid *fid)
{
__u32 ino;
@@ -611,7 +616,8 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
* that inodes generated at about the same time have a reduced chance
* of collisions. This will give a period of 2^12 = 1024 unique clients
* (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files. */
+ * (from OID), or up to 128M inodes without collisions for new files.
+ */
ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
(seq >> (64 - (40-8)) & 0xffffff00) +
(fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 551162624974..4cf2b0e61672 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -71,50 +71,41 @@ struct lu_fld_target {
struct lu_server_fld {
/**
* super sequence controller export, needed to forward fld
- * lookup request. */
+ * lookup request.
+ */
struct obd_export *lsf_control_exp;
- /**
- * Client FLD cache. */
+ /** Client FLD cache. */
struct fld_cache *lsf_cache;
- /**
- * Protect index modifications */
+ /** Protect index modifications */
struct mutex lsf_lock;
- /**
- * Fld service name in form "fld-srv-lustre-MDTXXX" */
+ /** Fld service name in form "fld-srv-lustre-MDTXXX" */
char lsf_name[LUSTRE_MDT_MAXNAMELEN];
};
struct lu_client_fld {
- /**
- * Client side debugfs entry. */
+ /** Client side debugfs entry. */
struct dentry *lcf_debugfs_entry;
- /**
- * List of exports client FLD knows about. */
+ /** List of exports client FLD knows about. */
struct list_head lcf_targets;
- /**
- * Current hash to be used to chose an export. */
+ /** Current hash to be used to chose an export. */
struct lu_fld_hash *lcf_hash;
- /**
- * Exports count. */
+ /** Exports count. */
int lcf_count;
- /**
- * Lock protecting exports list and fld_hash. */
+ /** Lock protecting exports list and fld_hash. */
spinlock_t lcf_lock;
- /**
- * Client FLD cache. */
+ /** Client FLD cache. */
struct fld_cache *lcf_cache;
- /**
- * Client fld debugfs entry name. */
+ /** Client fld debugfs entry name. */
char lcf_name[LUSTRE_MDT_MAXNAMELEN];
int lcf_flags;
diff --git a/drivers/staging/lustre/lustre/include/lustre_handles.h b/drivers/staging/lustre/lustre/include/lustre_handles.h
index f39780ae4c8a..27f169d2ed34 100644
--- a/drivers/staging/lustre/lustre/include/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/lustre_handles.h
@@ -65,7 +65,8 @@ struct portals_handle_ops {
*
* Now you're able to assign the results of cookie2handle directly to an
* ldlm_lock. If it's not at the top, you'll want to use container_of()
- * to compute the start of the structure based on the handle field. */
+ * to compute the start of the structure based on the handle field.
+ */
struct portals_handle {
struct list_head h_link;
__u64 h_cookie;
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 4e4230e94c11..dac2d84d8266 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -292,7 +292,8 @@ struct obd_import {
/* need IR MNE swab */
imp_need_mne_swab:1,
/* import must be reconnected instead of
- * chose new connection */
+ * chosing new connection
+ */
imp_force_reconnect:1,
/* import has tried to connect with server */
imp_connect_tried:1;
diff --git a/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
new file mode 100644
index 000000000000..970610b6de89
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_kernelcomm.h
@@ -0,0 +1,55 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2013 Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ *
+ * Author: Nathan Rutman <nathan.rutman@sun.com>
+ *
+ * Kernel <-> userspace communication routines.
+ * The definitions below are used in the kernel and userspace.
+ */
+
+#ifndef __LUSTRE_KERNELCOMM_H__
+#define __LUSTRE_KERNELCOMM_H__
+
+/* For declarations shared with userspace */
+#include "uapi_kernelcomm.h"
+
+/* prototype for callback function on kuc groups */
+typedef int (*libcfs_kkuc_cb_t)(void *data, void *cb_arg);
+
+/* Kernel methods */
+int libcfs_kkuc_msg_put(struct file *fp, void *payload);
+int libcfs_kkuc_group_put(unsigned int group, void *payload);
+int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group,
+ void *data, size_t data_len);
+int libcfs_kkuc_group_rem(int uid, unsigned int group);
+int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
+ void *cb_arg);
+
+#endif /* __LUSTRE_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 428469fec534..f2223d55850a 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -153,9 +153,9 @@ struct obd_ioctl_data {
/* buffers the kernel will treat as user pointers */
__u32 ioc_plen1;
- char *ioc_pbuf1;
+ void __user *ioc_pbuf1;
__u32 ioc_plen2;
- char *ioc_pbuf2;
+ void __user *ioc_pbuf2;
/* inline buffers for various arguments */
__u32 ioc_inllen1;
@@ -252,8 +252,8 @@ static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
#include "obd_support.h"
/* function defined in lustre/obdclass/<platform>/<platform>-module.c */
-int obd_ioctl_getdata(char **buf, int *len, void *arg);
-int obd_ioctl_popdata(void *arg, void *data, int len);
+int obd_ioctl_getdata(char **buf, int *len, void __user *arg);
+int obd_ioctl_popdata(void __user *arg, void *data, int len);
static inline void obd_ioctl_freedata(char *buf, int len)
{
@@ -365,10 +365,10 @@ static inline void obd_ioctl_freedata(char *buf, int len)
/* OBD_IOC_LLOG_CATINFO is deprecated */
#define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE)
-#define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE)
+/* #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE) */
+/* #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE) */
+/* #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE) */
+/* #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE) */
#define OBD_IOC_GET_OBJ_VERSION _IOR('f', 210, OBD_IOC_DATA_TYPE)
@@ -387,7 +387,8 @@ static inline void obd_ioctl_freedata(char *buf, int len)
*/
/* Until such time as we get_info the per-stripe maximum from the OST,
- * we define this to be 2T - 4k, which is the ext3 maxbytes. */
+ * we define this to be 2T - 4k, which is the ext3 maxbytes.
+ */
#define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
/* Special values for remove LOV EA from disk */
@@ -540,7 +541,7 @@ do { \
l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
- if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
+ if (info->lwi_on_signal && (__timeout == 0 || __allow_intr)) \
__blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
else \
__blocked = cfs_block_sigsinv(0); \
@@ -562,13 +563,13 @@ do { \
__timeout = cfs_time_sub(__timeout, \
cfs_time_sub(interval, remaining));\
if (__timeout == 0) { \
- if (info->lwi_on_timeout == NULL || \
+ if (!info->lwi_on_timeout || \
info->lwi_on_timeout(info->lwi_cb_data)) { \
ret = -ETIMEDOUT; \
break; \
} \
/* Take signals after the timeout expires. */ \
- if (info->lwi_on_signal != NULL) \
+ if (info->lwi_on_signal) \
(void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
} \
} \
@@ -578,7 +579,7 @@ do { \
if (condition) \
break; \
if (cfs_signal_pending()) { \
- if (info->lwi_on_signal != NULL && \
+ if (info->lwi_on_signal && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
info->lwi_on_signal(info->lwi_cb_data);\
diff --git a/drivers/staging/lustre/lustre/include/lustre_lite.h b/drivers/staging/lustre/lustre/include/lustre_lite.h
index f6d7aae3a0b8..fcc5ebbceed8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lite.h
@@ -53,56 +53,8 @@
#define LL_MAX_BLKSIZE_BITS (22)
#define LL_MAX_BLKSIZE (1UL<<LL_MAX_BLKSIZE_BITS)
-#include "lustre/lustre_user.h"
-
-struct lustre_rw_params {
- int lrp_lock_mode;
- ldlm_policy_data_t lrp_policy;
- u32 lrp_brw_flags;
- int lrp_ast_flags;
-};
-
-/*
- * XXX nikita: this function lives in the header because it is used by both
- * llite kernel module and liblustre library, and there is no (?) better place
- * to put it in.
- */
-static inline void lustre_build_lock_params(int cmd, unsigned long open_flags,
- __u64 connect_flags,
- loff_t pos, ssize_t len,
- struct lustre_rw_params *params)
-{
- params->lrp_lock_mode = (cmd == OBD_BRW_READ) ? LCK_PR : LCK_PW;
- params->lrp_brw_flags = 0;
-
- params->lrp_policy.l_extent.start = pos;
- params->lrp_policy.l_extent.end = pos + len - 1;
- /*
- * for now O_APPEND always takes local locks.
- */
- if (cmd == OBD_BRW_WRITE && (open_flags & O_APPEND)) {
- params->lrp_policy.l_extent.start = 0;
- params->lrp_policy.l_extent.end = OBD_OBJECT_EOF;
- } else if (LIBLUSTRE_CLIENT && (connect_flags & OBD_CONNECT_SRVLOCK)) {
- /*
- * liblustre: OST-side locking for all non-O_APPEND
- * reads/writes.
- */
- params->lrp_lock_mode = LCK_NL;
- params->lrp_brw_flags = OBD_BRW_SRVLOCK;
- } else {
- /*
- * nothing special for the kernel. In the future llite may use
- * OST-side locks for small writes into highly contended
- * files.
- */
- }
- params->lrp_ast_flags = (open_flags & O_NONBLOCK) ?
- LDLM_FL_BLOCK_NOWAIT : 0;
-}
-
/*
- * This is embedded into liblustre and llite super-blocks to keep track of
+ * This is embedded into llite super-blocks to keep track of
* connect flags (capabilities) supported by all imports given mount is
* connected to.
*/
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index e4fc8b5e1336..49618e186824 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -241,7 +241,8 @@ struct llog_ctxt {
struct obd_llog_group *loc_olg; /* group containing that ctxt */
struct obd_export *loc_exp; /* parent "disk" export (e.g. MDS) */
struct obd_import *loc_imp; /* to use in RPC's: can be backward
- pointing import */
+ * pointing import
+ */
struct llog_operations *loc_logops;
struct llog_handle *loc_handle;
struct mutex loc_mutex; /* protect loc_imp */
@@ -255,7 +256,7 @@ struct llog_ctxt {
static inline int llog_handle2ops(struct llog_handle *loghandle,
struct llog_operations **lop)
{
- if (loghandle == NULL || loghandle->lgh_logops == NULL)
+ if (!loghandle || !loghandle->lgh_logops)
return -EINVAL;
*lop = loghandle->lgh_logops;
@@ -272,7 +273,7 @@ static inline struct llog_ctxt *llog_ctxt_get(struct llog_ctxt *ctxt)
static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
{
- if (ctxt == NULL)
+ if (!ctxt)
return;
LASSERT_ATOMIC_GT_LT(&ctxt->loc_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "PUTting ctxt %p : new refcount %d\n", ctxt,
@@ -294,7 +295,7 @@ static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] != NULL) {
+ if (olg->olg_ctxts[index]) {
spin_unlock(&olg->olg_lock);
return -EEXIST;
}
@@ -311,7 +312,7 @@ static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] == NULL)
+ if (!olg->olg_ctxts[index])
ctxt = NULL;
else
ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
@@ -335,7 +336,7 @@ static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
static inline int llog_group_ctxt_null(struct obd_llog_group *olg, int index)
{
- return (olg->olg_ctxts[index] == NULL);
+ return (!olg->olg_ctxts[index]);
}
static inline int llog_ctxt_null(struct obd_device *obd, int index)
@@ -354,7 +355,7 @@ static inline int llog_next_block(const struct lu_env *env,
rc = llog_handle2ops(loghandle, &lop);
if (rc)
return rc;
- if (lop->lop_next_block == NULL)
+ if (!lop->lop_next_block)
return -EOPNOTSUPP;
rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 3da373315856..af77eb359c43 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -81,8 +81,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
return;
/* This would normally block until the existing request finishes.
@@ -90,7 +90,8 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
* done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set
* it will only be cleared when all fake requests are finished.
* Only when all fake requests are finished can normal requests
- * be sent, to ensure they are recoverable again. */
+ * be sent, to ensure they are recoverable again.
+ */
again:
mutex_lock(&lck->rpcl_mutex);
@@ -105,22 +106,23 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
* just turned off but there are still requests in progress.
* Wait until they finish. It doesn't need to be efficient
* in this extremely rare case, just have low overhead in
- * the common case when it isn't true. */
+ * the common case when it isn't true.
+ */
while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
mutex_unlock(&lck->rpcl_mutex);
schedule_timeout(cfs_time_seconds(1) / 4);
goto again;
}
- LASSERT(lck->rpcl_it == NULL);
+ LASSERT(!lck->rpcl_it);
lck->rpcl_it = it;
}
static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
- it->it_op == IT_LAYOUT))
+ if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
return;
if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
@@ -153,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
if (cli->cl_max_mds_easize < body->max_mdsize) {
cli->cl_max_mds_easize = body->max_mdsize;
cli->cl_default_mds_easize =
- min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
+ min_t(__u32, body->max_mdsize, PAGE_SIZE);
}
if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
cli->cl_max_mds_cookiesize = body->max_cookiesize;
cli->cl_default_mds_cookiesize =
- min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
+ min_t(__u32, body->max_cookiesize, PAGE_SIZE);
}
}
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index d834ddd8183b..69586a522eb7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -76,7 +76,8 @@
* In order for the client and server to properly negotiate the maximum
* possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
* value. The client is free to limit the actual RPC size for any bulk
- * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
+ * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
+ */
#define PTLRPC_BULK_OPS_BITS 2
#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
/**
@@ -85,7 +86,8 @@
* protocol limitation on the maximum RPC size that can be used by any
* RPC sent to that server in the future. Instead, the server should
* use the negotiated per-client ocd_brw_size to determine the bulk
- * RPC count. */
+ * RPC count.
+ */
#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
/**
@@ -97,21 +99,21 @@
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
-#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
-#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
-#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
+#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
-# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
-# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
+# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
+# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"
@@ -419,16 +421,18 @@ struct ptlrpc_reply_state {
/** A spinlock to protect the reply state flags */
spinlock_t rs_lock;
/** Reply state flags */
- unsigned long rs_difficult:1; /* ACK/commit stuff */
+ unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
- difficult requests */
+ * difficult requests
+ */
unsigned long rs_scheduled:1; /* being handled? */
unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
unsigned long rs_handled:1; /* been handled yet? */
unsigned long rs_on_net:1; /* reply_out_callback pending? */
unsigned long rs_prealloc:1; /* rs from prealloc list */
unsigned long rs_committed:1;/* the transaction was committed
- * and the rs was dispatched */
+ * and the rs was dispatched
+ */
/** Size of the state */
int rs_size;
/** opcode */
@@ -463,7 +467,7 @@ struct ptlrpc_reply_state {
/** Handles of locks awaiting client reply ACK */
struct lustre_handle rs_locks[RS_MAX_LOCKS];
/** Lock modes of locks in \a rs_locks */
- ldlm_mode_t rs_modes[RS_MAX_LOCKS];
+ enum ldlm_mode rs_modes[RS_MAX_LOCKS];
};
struct ptlrpc_thread;
@@ -1181,7 +1185,7 @@ struct nrs_fifo_req {
* purpose of this object is to hold references to the request's resources
* for the lifetime of the request, and to hold properties that policies use
* use for determining the request's scheduling priority.
- * */
+ */
struct ptlrpc_nrs_request {
/**
* The request's resource hierarchy.
@@ -1321,15 +1325,17 @@ struct ptlrpc_request {
/* do not resend request on -EINPROGRESS */
rq_no_retry_einprogress:1,
/* allow the req to be sent if the import is in recovery
- * status */
+ * status
+ */
rq_allow_replay:1;
unsigned int rq_nr_resend;
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcount for multiple replies */
+ atomic_t rq_refcount; /* client-side refcount for SENT race,
+ * server-side refcount for multiple replies
+ */
/** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */
@@ -1363,7 +1369,8 @@ struct ptlrpc_request {
/**
* security and encryption data
- * @{ */
+ * @{
+ */
struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
struct list_head rq_ctx_chain; /**< link to waited ctx */
@@ -1477,7 +1484,8 @@ struct ptlrpc_request {
/** when request must finish. volatile
* so that servers' early reply updates to the deadline aren't
- * kept in per-cpu cache */
+ * kept in per-cpu cache
+ */
volatile time64_t rq_deadline;
/** when req reply unlink must finish. */
time64_t rq_reply_deadline;
@@ -1518,7 +1526,7 @@ struct ptlrpc_request {
static inline int ptlrpc_req_interpret(const struct lu_env *env,
struct ptlrpc_request *req, int rc)
{
- if (req->rq_interpret_reply != NULL) {
+ if (req->rq_interpret_reply) {
req->rq_status = req->rq_interpret_reply(env, req,
&req->rq_async_args,
rc);
@@ -1678,7 +1686,8 @@ do { \
/**
* This is the debug print function you need to use to print request structure
* content into lustre debug log.
- * for most callers (level is a constant) this is resolved at compile time */
+ * for most callers (level is a constant) this is resolved at compile time
+ */
#define DEBUG_REQ(level, req, fmt, args...) \
do { \
if ((level) & (D_ERROR | D_WARNING)) { \
@@ -1947,7 +1956,7 @@ struct ptlrpc_service_ops {
* or general metadata service for MDS.
*/
struct ptlrpc_service {
- /** serialize /proc operations */
+ /** serialize sysfs operations */
spinlock_t srv_lock;
/** most often accessed fields */
/** chain thru all services */
@@ -2101,7 +2110,8 @@ struct ptlrpc_service_part {
/** NRS head for regular requests */
struct ptlrpc_nrs scp_nrs_reg;
/** NRS head for HP requests; this is only valid for services that can
- * handle HP requests */
+ * handle HP requests
+ */
struct ptlrpc_nrs *scp_nrs_hp;
/** AT stuff */
@@ -2141,8 +2151,8 @@ struct ptlrpc_service_part {
#define ptlrpc_service_for_each_part(part, i, svc) \
for (i = 0; \
i < (svc)->srv_ncpts && \
- (svc)->srv_parts != NULL && \
- ((part) = (svc)->srv_parts[i]) != NULL; i++)
+ (svc)->srv_parts && \
+ ((part) = (svc)->srv_parts[i]); i++)
/**
* Declaration of ptlrpcd control structure
@@ -2259,7 +2269,6 @@ static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
const struct ptlrpc_nrs_pol_desc *desc)
{
- LASSERT(desc->pd_compat_svc_name != NULL);
return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
}
@@ -2303,7 +2312,6 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
struct ptlrpc_bulk_desc *desc;
int rc;
- LASSERT(req != NULL);
desc = req->rq_bulk;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
@@ -2374,14 +2382,14 @@ void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
const struct req_format *format);
struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
- struct ptlrpc_request_pool *,
- const struct req_format *format);
+ struct ptlrpc_request_pool *,
+ const struct req_format *);
void ptlrpc_request_free(struct ptlrpc_request *request);
int ptlrpc_request_pack(struct ptlrpc_request *request,
__u32 version, int opcode);
-struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
- const struct req_format *format,
- __u32 version, int opcode);
+struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *,
+ const struct req_format *,
+ __u32, int);
int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
__u32 version, int opcode, char **bufs,
struct ptlrpc_cli_ctx *ctx);
@@ -2462,7 +2470,8 @@ struct ptlrpc_service_thr_conf {
/* "soft" limit for total threads number */
unsigned int tc_nthrs_max;
/* user specified threads number, it will be validated due to
- * other members of this structure. */
+ * other members of this structure.
+ */
unsigned int tc_nthrs_user;
/* set NUMA node affinity for service threads */
unsigned int tc_cpu_affinity;
@@ -2500,14 +2509,12 @@ struct ptlrpc_service_conf {
*/
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
-struct ptlrpc_service *ptlrpc_register_service(
- struct ptlrpc_service_conf *conf,
- struct kset *parent,
- struct dentry *debugfs_entry);
+struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf,
+ struct kset *parent,
+ struct dentry *debugfs_entry);
int ptlrpc_start_threads(struct ptlrpc_service *svc);
int ptlrpc_unregister_service(struct ptlrpc_service *service);
-int liblustre_check_services(void *arg);
int ptlrpc_hr_init(void);
void ptlrpc_hr_fini(void);
@@ -2536,7 +2543,7 @@ int ptlrpc_reconnect_import(struct obd_import *imp);
int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
int index);
void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
- int index);
+ int index);
int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
@@ -2726,7 +2733,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
static inline void
ptlrpc_client_wake_req(struct ptlrpc_request *req)
{
- if (req->rq_set == NULL)
+ if (!req->rq_set)
wake_up(&req->rq_reply_waitq);
else
wake_up(&req->rq_set->set_waitq);
@@ -2750,7 +2757,7 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
/* Should only be called once per req */
static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
{
- if (req->rq_reply_state == NULL)
+ if (!req->rq_reply_state)
return; /* shouldn't occur */
ptlrpc_rs_decref(req->rq_reply_state);
req->rq_reply_state = NULL;
@@ -2807,7 +2814,6 @@ ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
static inline struct ptlrpc_service *
ptlrpc_req2svc(struct ptlrpc_request *req)
{
- LASSERT(req->rq_rqbd != NULL);
return req->rq_rqbd->rqbd_svcpt->scp_service;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index 46a662f89322..b2e67fcf9ef1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -107,8 +107,8 @@ void req_capsule_set_size(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc, int size);
int req_capsule_get_size(const struct req_capsule *pill,
- const struct req_msg_field *field,
- enum req_location loc);
+ const struct req_msg_field *field,
+ enum req_location loc);
int req_capsule_msg_size(struct req_capsule *pill, enum req_location loc);
int req_capsule_fmt_size(__u32 magic, const struct req_format *fmt,
enum req_location loc);
@@ -130,7 +130,6 @@ void req_layout_fini(void);
extern struct req_format RQF_OBD_PING;
extern struct req_format RQF_OBD_SET_INFO;
extern struct req_format RQF_SEC_CTX;
-extern struct req_format RQF_OBD_IDX_READ;
/* MGS req_format */
extern struct req_format RQF_MGS_TARGET_REG;
extern struct req_format RQF_MGS_SET_INFO;
@@ -146,7 +145,6 @@ extern struct req_format RQF_MDS_GETSTATUS;
extern struct req_format RQF_MDS_SYNC;
extern struct req_format RQF_MDS_GETXATTR;
extern struct req_format RQF_MDS_GETATTR;
-extern struct req_format RQF_UPDATE_OBJ;
/*
* This is format of direct (non-intent) MDS_GETATTR_NAME request.
@@ -177,7 +175,6 @@ extern struct req_format RQF_MDS_REINT_SETXATTR;
extern struct req_format RQF_MDS_QUOTACHECK;
extern struct req_format RQF_MDS_QUOTACTL;
extern struct req_format RQF_QC_CALLBACK;
-extern struct req_format RQF_QUOTA_DQACQ;
extern struct req_format RQF_MDS_SWAP_LAYOUTS;
/* MDS hsm formats */
extern struct req_format RQF_MDS_HSM_STATE_GET;
@@ -220,7 +217,6 @@ extern struct req_format RQF_LDLM_INTENT_OPEN;
extern struct req_format RQF_LDLM_INTENT_CREATE;
extern struct req_format RQF_LDLM_INTENT_UNLINK;
extern struct req_format RQF_LDLM_INTENT_GETXATTR;
-extern struct req_format RQF_LDLM_INTENT_QUOTA;
extern struct req_format RQF_LDLM_CANCEL;
extern struct req_format RQF_LDLM_CALLBACK;
extern struct req_format RQF_LDLM_CP_CALLBACK;
@@ -252,7 +248,6 @@ extern struct req_msg_field RMF_SETINFO_KEY;
extern struct req_msg_field RMF_GETINFO_VAL;
extern struct req_msg_field RMF_GETINFO_VALLEN;
extern struct req_msg_field RMF_GETINFO_KEY;
-extern struct req_msg_field RMF_IDX_INFO;
extern struct req_msg_field RMF_CLOSE_DATA;
/*
@@ -277,7 +272,6 @@ extern struct req_msg_field RMF_CAPA1;
extern struct req_msg_field RMF_CAPA2;
extern struct req_msg_field RMF_OBD_QUOTACHECK;
extern struct req_msg_field RMF_OBD_QUOTACTL;
-extern struct req_msg_field RMF_QUOTA_BODY;
extern struct req_msg_field RMF_STRING;
extern struct req_msg_field RMF_SWAP_LAYOUTS;
extern struct req_msg_field RMF_MDS_HSM_PROGRESS;
@@ -322,9 +316,6 @@ extern struct req_msg_field RMF_MGS_CONFIG_RES;
/* generic uint32 */
extern struct req_msg_field RMF_U32;
-/* OBJ update format */
-extern struct req_msg_field RMF_UPDATE;
-extern struct req_msg_field RMF_UPDATE_REPLY;
/** @} req_layout */
#endif /* _LUSTRE_REQ_LAYOUT_H__ */
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index dd1033be6bfa..01b4e6726a68 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -351,26 +351,23 @@ struct ptlrpc_ctx_ops {
/**
* To determine whether it's suitable to use the \a ctx for \a vcred.
*/
- int (*match) (struct ptlrpc_cli_ctx *ctx,
- struct vfs_cred *vcred);
+ int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
/**
* To bring the \a ctx uptodate.
*/
- int (*refresh) (struct ptlrpc_cli_ctx *ctx);
+ int (*refresh)(struct ptlrpc_cli_ctx *ctx);
/**
* Validate the \a ctx.
*/
- int (*validate) (struct ptlrpc_cli_ctx *ctx);
+ int (*validate)(struct ptlrpc_cli_ctx *ctx);
/**
* Force the \a ctx to die.
*/
- void (*force_die) (struct ptlrpc_cli_ctx *ctx,
- int grace);
- int (*display) (struct ptlrpc_cli_ctx *ctx,
- char *buf, int bufsize);
+ void (*force_die)(struct ptlrpc_cli_ctx *ctx, int grace);
+ int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
/**
* Sign the request message using \a ctx.
@@ -382,8 +379,7 @@ struct ptlrpc_ctx_ops {
*
* \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
*/
- int (*sign) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
+ int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
/**
* Verify the reply message using \a ctx.
@@ -395,8 +391,7 @@ struct ptlrpc_ctx_ops {
*
* \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
*/
- int (*verify) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
+ int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
/**
* Encrypt the request message using \a ctx.
@@ -408,8 +403,7 @@ struct ptlrpc_ctx_ops {
*
* \see gss_cli_ctx_seal().
*/
- int (*seal) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
+ int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
/**
* Decrypt the reply message using \a ctx.
@@ -421,8 +415,7 @@ struct ptlrpc_ctx_ops {
*
* \see gss_cli_ctx_unseal().
*/
- int (*unseal) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req);
+ int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
/**
* Wrap bulk request data. This is called before wrapping RPC
@@ -444,9 +437,9 @@ struct ptlrpc_ctx_ops {
*
* \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
*/
- int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
+ int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
/**
* Unwrap bulk reply data. This is called after wrapping RPC
@@ -461,9 +454,9 @@ struct ptlrpc_ctx_ops {
*
* \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
*/
- int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
- struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
+ int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx,
+ struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
};
#define PTLRPC_CTX_NEW_BIT (0) /* newly created */
@@ -515,9 +508,9 @@ struct ptlrpc_sec_cops {
*
* \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
*/
- struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx,
- struct sptlrpc_flavor *flavor);
+ struct ptlrpc_sec *(*create_sec)(struct obd_import *imp,
+ struct ptlrpc_svc_ctx *ctx,
+ struct sptlrpc_flavor *flavor);
/**
* Destructor of ptlrpc_sec. When called, refcount has been dropped
@@ -525,7 +518,7 @@ struct ptlrpc_sec_cops {
*
* \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
*/
- void (*destroy_sec) (struct ptlrpc_sec *sec);
+ void (*destroy_sec)(struct ptlrpc_sec *sec);
/**
* Notify that this ptlrpc_sec is going to die. Optionally, policy
@@ -534,7 +527,7 @@ struct ptlrpc_sec_cops {
*
* \see plain_kill_sec(), gss_sec_kill().
*/
- void (*kill_sec) (struct ptlrpc_sec *sec);
+ void (*kill_sec)(struct ptlrpc_sec *sec);
/**
* Given \a vcred, lookup and/or create its context. The policy module
@@ -544,10 +537,9 @@ struct ptlrpc_sec_cops {
*
* \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
*/
- struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
- struct vfs_cred *vcred,
- int create,
- int remove_dead);
+ struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec,
+ struct vfs_cred *vcred,
+ int create, int remove_dead);
/**
* Called then the reference of \a ctx dropped to 0. The policy module
@@ -559,9 +551,8 @@ struct ptlrpc_sec_cops {
*
* \see plain_release_ctx(), gss_sec_release_ctx_kr().
*/
- void (*release_ctx) (struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx,
- int sync);
+ void (*release_ctx)(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx,
+ int sync);
/**
* Flush the context cache.
@@ -573,11 +564,8 @@ struct ptlrpc_sec_cops {
*
* \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
*/
- int (*flush_ctx_cache)
- (struct ptlrpc_sec *sec,
- uid_t uid,
- int grace,
- int force);
+ int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid,
+ int grace, int force);
/**
* Called periodically by garbage collector to remove dead contexts
@@ -585,7 +573,7 @@ struct ptlrpc_sec_cops {
*
* \see gss_sec_gc_ctx_kr().
*/
- void (*gc_ctx) (struct ptlrpc_sec *sec);
+ void (*gc_ctx)(struct ptlrpc_sec *sec);
/**
* Given an context \a ctx, install a corresponding reverse service
@@ -593,9 +581,8 @@ struct ptlrpc_sec_cops {
* XXX currently it's only used by GSS module, maybe we should remove
* this from general API.
*/
- int (*install_rctx)(struct obd_import *imp,
- struct ptlrpc_sec *sec,
- struct ptlrpc_cli_ctx *ctx);
+ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec,
+ struct ptlrpc_cli_ctx *ctx);
/**
* To allocate request buffer for \a req.
@@ -608,9 +595,8 @@ struct ptlrpc_sec_cops {
*
* \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
*/
- int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int lustre_msg_size);
+ int (*alloc_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
+ int lustre_msg_size);
/**
* To free request buffer for \a req.
@@ -619,8 +605,7 @@ struct ptlrpc_sec_cops {
*
* \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
*/
- void (*free_reqbuf) (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req);
+ void (*free_reqbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
/**
* To allocate reply buffer for \a req.
@@ -632,9 +617,8 @@ struct ptlrpc_sec_cops {
*
* \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
*/
- int (*alloc_repbuf)(struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int lustre_msg_size);
+ int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
+ int lustre_msg_size);
/**
* To free reply buffer for \a req.
@@ -645,8 +629,7 @@ struct ptlrpc_sec_cops {
*
* \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
*/
- void (*free_repbuf) (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req);
+ void (*free_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
/**
* To expand the request buffer of \a req, thus the \a segment in
@@ -658,15 +641,13 @@ struct ptlrpc_sec_cops {
* \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
* gss_enlarge_reqbuf().
*/
- int (*enlarge_reqbuf)
- (struct ptlrpc_sec *sec,
- struct ptlrpc_request *req,
- int segment, int newsize);
+ int (*enlarge_reqbuf)(struct ptlrpc_sec *sec,
+ struct ptlrpc_request *req,
+ int segment, int newsize);
/*
* misc
*/
- int (*display) (struct ptlrpc_sec *sec,
- struct seq_file *seq);
+ int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq);
};
/**
@@ -690,7 +671,7 @@ struct ptlrpc_sec_sops {
*
* \see null_accept(), plain_accept(), gss_svc_accept_kr().
*/
- int (*accept) (struct ptlrpc_request *req);
+ int (*accept)(struct ptlrpc_request *req);
/**
* Perform security transformation upon reply message.
@@ -702,15 +683,14 @@ struct ptlrpc_sec_sops {
*
* \see null_authorize(), plain_authorize(), gss_svc_authorize().
*/
- int (*authorize) (struct ptlrpc_request *req);
+ int (*authorize)(struct ptlrpc_request *req);
/**
* Invalidate server context \a ctx.
*
* \see gss_svc_invalidate_ctx().
*/
- void (*invalidate_ctx)
- (struct ptlrpc_svc_ctx *ctx);
+ void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx);
/**
* Allocate a ptlrpc_reply_state.
@@ -724,28 +704,26 @@ struct ptlrpc_sec_sops {
*
* \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
*/
- int (*alloc_rs) (struct ptlrpc_request *req,
- int msgsize);
+ int (*alloc_rs)(struct ptlrpc_request *req, int msgsize);
/**
* Free a ptlrpc_reply_state.
*/
- void (*free_rs) (struct ptlrpc_reply_state *rs);
+ void (*free_rs)(struct ptlrpc_reply_state *rs);
/**
* Release the server context \a ctx.
*
* \see gss_svc_free_ctx().
*/
- void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
+ void (*free_ctx)(struct ptlrpc_svc_ctx *ctx);
/**
* Install a reverse context based on the server context \a ctx.
*
* \see gss_svc_install_rctx_kr().
*/
- int (*install_rctx)(struct obd_import *imp,
- struct ptlrpc_svc_ctx *ctx);
+ int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx);
/**
* Prepare buffer for incoming bulk write.
@@ -755,24 +733,24 @@ struct ptlrpc_sec_sops {
*
* \see gss_svc_prep_bulk().
*/
- int (*prep_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
+ int (*prep_bulk)(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
/**
* Unwrap the bulk write data.
*
* \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
*/
- int (*unwrap_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
+ int (*unwrap_bulk)(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
/**
* Wrap the bulk read data.
*
* \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
*/
- int (*wrap_bulk) (struct ptlrpc_request *req,
- struct ptlrpc_bulk_desc *desc);
+ int (*wrap_bulk)(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
};
struct ptlrpc_sec_policy {
diff --git a/drivers/staging/lustre/lustre/include/lustre_ver.h b/drivers/staging/lustre/lustre/include/lustre_ver.h
index caa4da12f37a..64559a16f4de 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ver.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ver.h
@@ -1,26 +1,20 @@
#ifndef _LUSTRE_VER_H_
#define _LUSTRE_VER_H_
-/* This file automatically generated from lustre/include/lustre_ver.h.in,
- * based on parameters in lustre/autoconf/lustre-version.ac.
- * Changes made directly to this file will be lost. */
#define LUSTRE_MAJOR 2
-#define LUSTRE_MINOR 3
-#define LUSTRE_PATCH 64
+#define LUSTRE_MINOR 4
+#define LUSTRE_PATCH 60
#define LUSTRE_FIX 0
-#define LUSTRE_VERSION_STRING "2.3.64"
+#define LUSTRE_VERSION_STRING "2.4.60"
#define LUSTRE_VERSION_CODE OBD_OCD_VERSION(LUSTRE_MAJOR, \
LUSTRE_MINOR, LUSTRE_PATCH, \
LUSTRE_FIX)
-/* liblustre clients are only allowed to connect if their LUSTRE_FIX mismatches
- * by this amount (set in lustre/autoconf/lustre-version.ac). */
-#define LUSTRE_VERSION_ALLOWED_OFFSET OBD_OCD_VERSION(0, 0, 1, 32)
-
-/* If lustre version of client and servers it connects to differs by more
+/*
+ * If lustre version of client and servers it connects to differs by more
* than this amount, client would issue a warning.
- * (set in lustre/autoconf/lustre-version.ac) */
+ */
#define LUSTRE_VERSION_OFFSET_WARN OBD_OCD_VERSION(0, 4, 0, 0)
#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index bcbe61301713..4264d97650ec 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -90,7 +90,8 @@ struct lov_stripe_md {
pid_t lsm_lock_owner; /* debugging */
/* maximum possible file size, might change as OSTs status changes,
- * e.g. disconnected, deactivated */
+ * e.g. disconnected, deactivated
+ */
__u64 lsm_maxbytes;
struct {
/* Public members. */
@@ -123,7 +124,7 @@ static inline bool lsm_is_released(struct lov_stripe_md *lsm)
static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
{
- if (lsm == NULL)
+ if (!lsm)
return false;
if (lsm_is_released(lsm))
return false;
@@ -159,7 +160,8 @@ struct obd_info {
/* An update callback which is called to update some data on upper
* level. E.g. it is used for update lsm->lsm_oinfo at every received
* request in osc level for enqueue requests. It is also possible to
- * update some caller data from LOV layer if needed. */
+ * update some caller data from LOV layer if needed.
+ */
obd_enqueue_update_f oi_cb_up;
};
@@ -216,7 +218,6 @@ struct timeout_item {
};
#define OSC_MAX_RIF_DEFAULT 8
-#define MDS_OSC_MAX_RIF_DEFAULT 50
#define OSC_MAX_RIF_MAX 256
#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
@@ -241,7 +242,8 @@ struct client_obd {
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
/* max_mds_easize is purely a performance thing so we don't have to
- * call obd_size_diskmd() all the time. */
+ * call obd_size_diskmd() all the time.
+ */
int cl_default_mds_easize;
int cl_max_mds_easize;
int cl_default_mds_cookiesize;
@@ -261,7 +263,8 @@ struct client_obd {
/* since we allocate grant by blocks, we don't know how many grant will
* be used to add a page into cache. As a solution, we reserve maximum
* grant before trying to dirty a page and unreserve the rest.
- * See osc_{reserve|unreserve}_grant for details. */
+ * See osc_{reserve|unreserve}_grant for details.
+ */
long cl_reserved_grant;
struct list_head cl_cache_waiters; /* waiting for cache/grant */
unsigned long cl_next_shrink_grant; /* jiffies */
@@ -269,14 +272,16 @@ struct client_obd {
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
- * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
+ * the extent size. A chunk is max(PAGE_SIZE, OST block size)
+ */
int cl_chunkbits;
int cl_chunk;
int cl_extent_tax; /* extent overhead, by bytes */
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
- * lists of osc_client_pages that hang off of the loi */
+ * lists of osc_client_pages that hang off of the loi
+ */
/*
* ->cl_loi_list_lock protects consistency of
* ->cl_loi_{ready,read,write}_list. ->ap_make_ready() and
@@ -295,14 +300,14 @@ struct client_obd {
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- client_obd_lock_t cl_loi_list_lock;
+ struct client_obd_lock cl_loi_list_lock;
struct list_head cl_loi_ready_list;
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
struct list_head cl_loi_read_list;
int cl_r_in_flight;
int cl_w_in_flight;
- /* just a sum of the loi/lop pending numbers to be exported by /proc */
+ /* just a sum of the loi/lop pending numbers to be exported by sysfs */
atomic_t cl_pending_w_pages;
atomic_t cl_pending_r_pages;
__u32 cl_max_pages_per_rpc;
@@ -322,7 +327,7 @@ struct client_obd {
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
- client_obd_lock_t cl_lru_list_lock; /* page list protector */
+ struct client_obd_lock cl_lru_list_lock; /* page list protector */
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
@@ -340,7 +345,7 @@ struct client_obd {
/* supported checksum types that are worked out at connect time */
__u32 cl_supp_cksum_types;
/* checksum algorithm to be used */
- cksum_type_t cl_cksum_type;
+ enum cksum_type cl_cksum_type;
/* also protected by the poorly named _loi_list_lock lock above */
struct osc_async_rc cl_ar;
@@ -375,14 +380,12 @@ struct echo_client_obd {
spinlock_t ec_lock;
struct list_head ec_objects;
struct list_head ec_locks;
- int ec_nstripes;
__u64 ec_unique;
};
/* Generic subset of OSTs */
struct ost_pool {
- __u32 *op_array; /* array of index of
- lov_obd->lov_tgts */
+ __u32 *op_array; /* array of index of lov_obd->lov_tgts */
unsigned int op_count; /* number of OSTs in the array */
unsigned int op_size; /* allocated size of lp_array */
struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
@@ -415,14 +418,16 @@ struct lov_qos {
struct lov_qos_rr lq_rr; /* round robin qos data */
unsigned long lq_dirty:1, /* recalc qos data */
lq_same_space:1,/* the ost's all have approx.
- the same space avail */
+ * the same space avail
+ */
lq_reset:1, /* zero current penalties */
lq_statfs_in_progress:1; /* statfs op in
progress */
/* qos statfs data */
struct lov_statfs_data *lq_statfs_data;
- wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion */
+ wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
+ * requests completion
+ */
};
struct lov_tgt_desc {
@@ -450,16 +455,16 @@ struct pool_desc {
struct lov_qos_rr pool_rr; /* round robin qos */
struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in /proc */
+ struct dentry *pool_debugfs_entry; /* file in debugfs */
struct obd_device *pool_lobd; /* obd of the lov/lod to which
- * this pool belongs */
+ * this pool belongs
+ */
};
struct lov_obd {
struct lov_desc desc;
struct lov_tgt_desc **lov_tgts; /* sparse array */
- struct ost_pool lov_packed; /* all OSTs in a packed
- array */
+ struct ost_pool lov_packed; /* all OSTs in a packed array */
struct mutex lov_lock;
struct obd_connect_data lov_ocd;
atomic_t lov_refcount;
@@ -596,34 +601,6 @@ struct obd_trans_info {
struct obd_uuid *oti_ost_uuid;
};
-static inline void oti_init(struct obd_trans_info *oti,
- struct ptlrpc_request *req)
-{
- if (oti == NULL)
- return;
- memset(oti, 0, sizeof(*oti));
-
- if (req == NULL)
- return;
-
- oti->oti_xid = req->rq_xid;
- /** VBR: take versions from request */
- if (req->rq_reqmsg != NULL &&
- lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
- __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg);
-
- oti->oti_pre_version = pre_version ? pre_version[0] : 0;
- oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg);
- }
-
- /** called from mds_create_objects */
- if (req->rq_repmsg != NULL)
- oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
- oti->oti_thread = req->rq_svc_thread;
- if (req->rq_reqmsg != NULL)
- oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
-}
-
static inline void oti_alloc_cookies(struct obd_trans_info *oti,
int num_cookies)
{
@@ -681,7 +658,7 @@ enum obd_notify_event {
/*
* Data structure used to pass obd_notify()-event to non-obd listeners (llite
- * and liblustre being main examples).
+ * being main example).
*/
struct obd_notify_upcall {
int (*onu_upcall)(struct obd_device *host, struct obd_device *watched,
@@ -728,21 +705,23 @@ struct obd_device {
unsigned long obd_attached:1, /* finished attach */
obd_set_up:1, /* finished setup */
obd_version_recov:1, /* obd uses version checking */
- obd_replayable:1, /* recovery is enabled; inform clients */
- obd_no_transno:1, /* no committed-transno notification */
+ obd_replayable:1,/* recovery is enabled; inform clients */
+ obd_no_transno:1, /* no committed-transno notification */
obd_no_recov:1, /* fail instead of retry messages */
obd_stopping:1, /* started cleanup */
obd_starting:1, /* started setup */
obd_force:1, /* cleanup with > 0 obd refcount */
- obd_fail:1, /* cleanup with failover */
- obd_async_recov:1, /* allow asynchronous orphan cleanup */
+ obd_fail:1, /* cleanup with failover */
+ obd_async_recov:1, /* allow asynchronous orphan cleanup */
obd_no_conn:1, /* deny new connections */
obd_inactive:1, /* device active/inactive
- * (for /proc/status only!!) */
+ * (for sysfs status only!!)
+ */
obd_no_ir:1, /* no imperative recovery. */
obd_process_conf:1; /* device is processing mgs config */
/* use separate field as it is set in interrupt to don't mess with
- * protection of other bits using _bh lock */
+ * protection of other bits using _bh lock
+ */
unsigned long obd_recovery_expired:1;
/* uuid-export hash body */
struct cfs_hash *obd_uuid_hash;
@@ -935,7 +914,8 @@ struct md_op_data {
__u32 op_npages;
/* used to transfer info between the stacks of MD client
- * see enum op_cli_flags */
+ * see enum op_cli_flags
+ */
__u32 op_cli_flags;
/* File object data version for HSM release, on client */
@@ -957,7 +937,7 @@ struct md_enqueue_info {
struct lustre_handle mi_lockh;
struct inode *mi_dir;
int (*mi_cb)(struct ptlrpc_request *req,
- struct md_enqueue_info *minfo, int rc);
+ struct md_enqueue_info *minfo, int rc);
__u64 mi_cbdata;
unsigned int mi_generation;
};
@@ -965,7 +945,7 @@ struct md_enqueue_info {
struct obd_ops {
struct module *owner;
int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg);
+ void *karg, void __user *uarg);
int (*get_info)(const struct lu_env *env, struct obd_export *,
__u32 keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm);
@@ -987,7 +967,8 @@ struct obd_ops {
/* connect to the target device with given connection
* data. @ocd->ocd_connect_flags is modified to reflect flags actually
* granted by the target, which are guaranteed to be a subset of flags
- * asked for. If @ocd == NULL, use default parameters. */
+ * asked for. If @ocd == NULL, use default parameters.
+ */
int (*connect)(const struct lu_env *env,
struct obd_export **exp, struct obd_device *src,
struct obd_uuid *cluuid, struct obd_connect_data *ocd,
@@ -1083,7 +1064,8 @@ struct obd_ops {
/*
* NOTE: If adding ops, add another LPROCFS_OBD_OP_INIT() line
* to lprocfs_alloc_obd_stats() in obdclass/lprocfs_status.c.
- * Also, add a wrapper function in include/linux/obd_class.h. */
+ * Also, add a wrapper function in include/linux/obd_class.h.
+ */
};
enum {
@@ -1189,14 +1171,14 @@ struct md_ops {
struct obd_client_handle *);
int (*set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
- ldlm_mode_t (*lock_match)(struct obd_export *, __u64,
- const struct lu_fid *, ldlm_type_t,
- ldlm_policy_data_t *, ldlm_mode_t,
- struct lustre_handle *);
+ enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
+ const struct lu_fid *, enum ldlm_type,
+ ldlm_policy_data_t *, enum ldlm_mode,
+ struct lustre_handle *);
int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, ldlm_mode_t,
- ldlm_cancel_flags_t flags, void *opaque);
+ ldlm_policy_data_t *, enum ldlm_mode,
+ enum ldlm_cancel_flags flags, void *opaque);
int (*get_remote_perm)(struct obd_export *, const struct lu_fid *,
__u32, struct ptlrpc_request **);
@@ -1224,9 +1206,9 @@ struct lsm_operations {
void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
u64 *);
int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
- __u16 *stripe_count);
+ __u16 *stripe_count);
int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmm);
+ struct lov_mds_md *lmm);
};
extern const struct lsm_operations lsm_v1_ops;
@@ -1253,7 +1235,7 @@ static inline struct md_open_data *obd_mod_alloc(void)
struct md_open_data *mod;
mod = kzalloc(sizeof(*mod), GFP_NOFS);
- if (mod == NULL)
+ if (!mod)
return NULL;
atomic_set(&mod->mod_refcount, 1);
return mod;
@@ -1300,7 +1282,7 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
return false;
/* caller does not care of idx */
- if (idx == NULL)
+ if (!idx)
return true;
/* volatile file, the MDT can be set from name */
@@ -1327,7 +1309,8 @@ static inline bool filename_is_volatile(const char *name, int namelen, int *idx)
return true;
bad_format:
/* bad format of mdt idx, we cannot return an error
- * to caller so we use hash algo */
+ * to caller so we use hash algo
+ */
CERROR("Bad volatile file name format: %s\n",
name + LUSTRE_VOLATILE_HDR_LEN);
return false;
@@ -1335,8 +1318,7 @@ bad_format:
static inline int cli_brw_size(struct obd_device *obd)
{
- LASSERT(obd != NULL);
- return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 01db60405393..637fa22110a4 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -37,7 +37,7 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "lustre/lustre_idl.h"
-static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type)
+static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
{
switch (cksum_type) {
case OBD_CKSUM_CRC32:
@@ -63,8 +63,9 @@ static inline unsigned char cksum_obd2cfs(cksum_type_t cksum_type)
* In case of an unsupported types/flags we fall back to ADLER
* because that is supported by all clients since 1.8
*
- * In case multiple algorithms are supported the best one is used. */
-static inline u32 cksum_type_pack(cksum_type_t cksum_type)
+ * In case multiple algorithms are supported the best one is used.
+ */
+static inline u32 cksum_type_pack(enum cksum_type cksum_type)
{
unsigned int performance = 0, tmp;
u32 flag = OBD_FL_CKSUM_ADLER;
@@ -98,7 +99,7 @@ static inline u32 cksum_type_pack(cksum_type_t cksum_type)
return flag;
}
-static inline cksum_type_t cksum_type_unpack(u32 o_flags)
+static inline enum cksum_type cksum_type_unpack(u32 o_flags)
{
switch (o_flags & OBD_FL_CKSUM_ALL) {
case OBD_FL_CKSUM_CRC32C:
@@ -116,9 +117,9 @@ static inline cksum_type_t cksum_type_unpack(u32 o_flags)
* 1.8 supported ADLER it is base and not depend on hw
* Client uses all available local algos
*/
-static inline cksum_type_t cksum_types_supported_client(void)
+static inline enum cksum_type cksum_types_supported_client(void)
{
- cksum_type_t ret = OBD_CKSUM_ADLER;
+ enum cksum_type ret = OBD_CKSUM_ADLER;
CDEBUG(D_INFO, "Crypto hash speed: crc %d, crc32c %d, adler %d\n",
cfs_crypto_hash_speed(cksum_obd2cfs(OBD_CKSUM_CRC32)),
@@ -139,14 +140,16 @@ static inline cksum_type_t cksum_types_supported_client(void)
* Currently, calling cksum_type_pack() with a mask will return the fastest
* checksum type due to its benchmarking at libcfs module load.
* Caution is advised, however, since what is fastest on a single client may
- * not be the fastest or most efficient algorithm on the server. */
-static inline cksum_type_t cksum_type_select(cksum_type_t cksum_types)
+ * not be the fastest or most efficient algorithm on the server.
+ */
+static inline enum cksum_type cksum_type_select(enum cksum_type cksum_types)
{
return cksum_type_unpack(cksum_type_pack(cksum_types));
}
/* Checksum algorithm names. Must be defined in the same order as the
- * OBD_CKSUM_* flags. */
+ * OBD_CKSUM_* flags.
+ */
#define DECLARE_CKSUM_NAME char *cksum_name[] = {"crc32", "adler", "crc32c"}
#endif /* __OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 97d80397503c..706869f8c98f 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -45,18 +45,22 @@
#include "lprocfs_status.h"
#define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay
- * and resends for avoid deadlocks */
+ * and resends for avoid deadlocks
+ */
#define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update
- * obd_osfs_age */
+ * obd_osfs_age
+ */
#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd
* instead of a specific set. This
* means that we cannot rely on the set
* interpret routine to be called.
* lov_statfs_fini() must thus be called
- * by the request interpret routine */
+ * by the request interpret routine
+ */
#define OBD_STATFS_FOR_MDT0 0x0008 /* The statfs is only for retrieving
- * information from MDT0. */
-#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */
+ * information from MDT0.
+ */
+#define OBD_FL_PUNCH 0x00000001 /* To indicate it is punch operation */
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
@@ -83,10 +87,10 @@ int class_name2dev(const char *name);
struct obd_device *class_name2obd(const char *name);
int class_uuid2dev(struct obd_uuid *uuid);
struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid);
+ const char *typ_name,
+ struct obd_uuid *grp_uuid);
struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid,
- int *next);
+ int *next);
struct obd_device *class_num2obd(int num);
int class_notify_sptlrpc_conf(const char *fsname, int namelen);
@@ -160,8 +164,9 @@ struct config_llog_data {
struct mutex cld_lock;
int cld_type;
unsigned int cld_stopping:1, /* we were told to stop
- * watching */
- cld_lostlock:1; /* lock not requeued */
+ * watching
+ */
+ cld_lostlock:1; /* lock not requeued */
char cld_logname[0];
};
@@ -193,7 +198,7 @@ extern void (*class_export_dump_hook)(struct obd_export *);
struct obd_export *class_export_get(struct obd_export *exp);
void class_export_put(struct obd_export *exp);
struct obd_export *class_new_export(struct obd_device *obddev,
- struct obd_uuid *cluuid);
+ struct obd_uuid *cluuid);
void class_unlink_export(struct obd_export *exp);
struct obd_import *class_import_get(struct obd_import *);
@@ -203,7 +208,7 @@ void class_destroy_import(struct obd_import *exp);
void class_put_type(struct obd_type *type);
int class_connect(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid);
+ struct obd_uuid *cluuid);
int class_disconnect(struct obd_export *exp);
void class_fail_export(struct obd_export *exp);
int class_manual_cleanup(struct obd_device *obd);
@@ -275,7 +280,8 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid);
#define CTXTP(ctxt, op) (ctxt)->loc_logops->lop_##op
/* Ensure obd_setup: used for cleanup which must be called
- while obd is stopping */
+ * while obd is stopping
+ */
static inline int obd_check_dev(struct obd_device *obd)
{
if (!obd) {
@@ -306,7 +312,7 @@ static inline int obd_check_dev_active(struct obd_device *obd)
/ sizeof(((struct obd_ops *)(0))->iocontrol))
#define OBD_COUNTER_INCREMENT(obdx, op) \
- if ((obdx)->obd_stats != NULL) { \
+ if ((obdx)->obd_stats) { \
unsigned int coffset; \
coffset = (unsigned int)((obdx)->obd_cntr_base) + \
OBD_COUNTER_OFFSET(op); \
@@ -315,7 +321,7 @@ static inline int obd_check_dev_active(struct obd_device *obd)
}
#define EXP_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats != NULL) { \
+ if ((export)->exp_obd->obd_stats) { \
unsigned int coffset; \
coffset = (unsigned int)((export)->exp_obd->obd_cntr_base) + \
OBD_COUNTER_OFFSET(op); \
@@ -329,7 +335,7 @@ static inline int obd_check_dev_active(struct obd_device *obd)
/ sizeof(((struct md_ops *)(0))->getstatus))
#define MD_COUNTER_INCREMENT(obdx, op) \
- if ((obd)->md_stats != NULL) { \
+ if ((obd)->md_stats) { \
unsigned int coffset; \
coffset = (unsigned int)((obdx)->md_cntr_base) + \
MD_COUNTER_OFFSET(op); \
@@ -338,24 +344,24 @@ static inline int obd_check_dev_active(struct obd_device *obd)
}
#define EXP_MD_COUNTER_INCREMENT(export, op) \
- if ((export)->exp_obd->obd_stats != NULL) { \
+ if ((export)->exp_obd->obd_stats) { \
unsigned int coffset; \
coffset = (unsigned int)((export)->exp_obd->md_cntr_base) + \
MD_COUNTER_OFFSET(op); \
LASSERT(coffset < (export)->exp_obd->md_stats->ls_num); \
lprocfs_counter_incr((export)->exp_obd->md_stats, coffset); \
- if ((export)->exp_md_stats != NULL) \
+ if ((export)->exp_md_stats) \
lprocfs_counter_incr( \
(export)->exp_md_stats, coffset); \
}
#define EXP_CHECK_MD_OP(exp, op) \
do { \
- if ((exp) == NULL) { \
+ if (!(exp)) { \
CERROR("obd_" #op ": NULL export\n"); \
return -ENODEV; \
} \
- if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
+ if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
return -EOPNOTSUPP; \
} \
@@ -379,11 +385,11 @@ do { \
#define EXP_CHECK_DT_OP(exp, op) \
do { \
- if ((exp) == NULL) { \
+ if (!(exp)) { \
CERROR("obd_" #op ": NULL export\n"); \
return -ENODEV; \
} \
- if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
+ if (!(exp)->exp_obd || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
return -EOPNOTSUPP; \
} \
@@ -467,7 +473,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
DECLARE_LU_VARS(ldt, d);
ldt = obd->obd_type->typ_lu;
- if (ldt != NULL) {
+ if (ldt) {
struct lu_context session_ctx;
struct lu_env env;
@@ -509,7 +515,7 @@ static inline int obd_precleanup(struct obd_device *obd,
return rc;
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
+ if (ldt && d) {
if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
struct lu_env env;
@@ -538,7 +544,7 @@ static inline int obd_cleanup(struct obd_device *obd)
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
+ if (ldt && d) {
struct lu_env env;
rc = lu_env_init(&env, ldt->ldt_ctx_tags);
@@ -558,7 +564,8 @@ static inline int obd_cleanup(struct obd_device *obd)
static inline void obd_cleanup_client_import(struct obd_device *obd)
{
/* If we set up but never connected, the
- client import will not have been cleaned. */
+ * client import will not have been cleaned.
+ */
down_write(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import) {
struct obd_import *imp;
@@ -586,7 +593,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
obd->obd_process_conf = 1;
ldt = obd->obd_type->typ_lu;
d = obd->obd_lu_dev;
- if (ldt != NULL && d != NULL) {
+ if (ldt && d) {
struct lu_env env;
rc = lu_env_init(&env, ldt->ldt_ctx_tags);
@@ -674,7 +681,7 @@ static inline int obd_alloc_memmd(struct obd_export *exp,
struct lov_stripe_md **mem_tgt)
{
LASSERT(mem_tgt);
- LASSERT(*mem_tgt == NULL);
+ LASSERT(!*mem_tgt);
return obd_unpackmd(exp, mem_tgt, NULL, 0);
}
@@ -767,7 +774,7 @@ static inline int obd_setattr_rqset(struct obd_export *exp,
EXP_COUNTER_INCREMENT(exp, setattr_async);
set = ptlrpc_prep_set();
- if (set == NULL)
+ if (!set)
return -ENOMEM;
rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
@@ -778,7 +785,8 @@ static inline int obd_setattr_rqset(struct obd_export *exp,
}
/* This adds all the requests into @set if @set != NULL, otherwise
- all requests are sent asynchronously without waiting for response. */
+ * all requests are sent asynchronously without waiting for response.
+ */
static inline int obd_setattr_async(struct obd_export *exp,
struct obd_info *oinfo,
struct obd_trans_info *oti,
@@ -848,7 +856,8 @@ static inline int obd_connect(const struct lu_env *env,
{
int rc;
__u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
- * check */
+ * check
+ */
rc = obd_check_dev_active(obd);
if (rc)
@@ -858,7 +867,7 @@ static inline int obd_connect(const struct lu_env *env,
rc = OBP(obd, connect)(env, exp, obd, cluuid, data, localdata);
/* check that only subset is granted */
- LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) ==
+ LASSERT(ergo(data, (data->ocd_connect_flags & ocf) ==
data->ocd_connect_flags));
return rc;
}
@@ -871,8 +880,7 @@ static inline int obd_reconnect(const struct lu_env *env,
void *localdata)
{
int rc;
- __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
- * check */
+ __u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition check */
rc = obd_check_dev_active(obd);
if (rc)
@@ -882,8 +890,7 @@ static inline int obd_reconnect(const struct lu_env *env,
rc = OBP(obd, reconnect)(env, exp, obd, cluuid, d, localdata);
/* check that only subset is granted */
- LASSERT(ergo(d != NULL,
- (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
+ LASSERT(ergo(d, (d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
return rc;
}
@@ -998,7 +1005,7 @@ static inline int obd_init_export(struct obd_export *exp)
{
int rc = 0;
- if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+ if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, init_export))
rc = OBP(exp->exp_obd, init_export)(exp);
return rc;
@@ -1006,7 +1013,7 @@ static inline int obd_init_export(struct obd_export *exp)
static inline int obd_destroy_export(struct obd_export *exp)
{
- if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
+ if ((exp)->exp_obd && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, destroy_export))
OBP(exp->exp_obd, destroy_export)(exp);
return 0;
@@ -1014,7 +1021,8 @@ static inline int obd_destroy_export(struct obd_export *exp)
/* @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+ * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ */
static inline int obd_statfs_async(struct obd_export *exp,
struct obd_info *oinfo,
__u64 max_age,
@@ -1023,7 +1031,7 @@ static inline int obd_statfs_async(struct obd_export *exp,
int rc = 0;
struct obd_device *obd;
- if (exp == NULL || exp->exp_obd == NULL)
+ if (!exp || !exp->exp_obd)
return -EINVAL;
obd = exp->exp_obd;
@@ -1059,7 +1067,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
int rc = 0;
set = ptlrpc_prep_set();
- if (set == NULL)
+ if (!set)
return -ENOMEM;
oinfo.oi_osfs = osfs;
@@ -1073,7 +1081,8 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
/* @max_age is the oldest time in jiffies that we accept using a cached data.
* If the cache is older than @max_age we will get a new value from the
- * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness. */
+ * target. Use a value of "cfs_time_current() + HZ" to guarantee freshness.
+ */
static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *osfs, __u64 max_age,
__u32 flags)
@@ -1081,7 +1090,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
int rc = 0;
struct obd_device *obd = exp->exp_obd;
- if (obd == NULL)
+ if (!obd)
return -EINVAL;
OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
@@ -1155,7 +1164,7 @@ static inline int obd_adjust_kms(struct obd_export *exp,
}
static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void *uarg)
+ int len, void *karg, void __user *uarg)
{
int rc;
@@ -1205,9 +1214,10 @@ static inline int obd_notify(struct obd_device *obd,
return rc;
/* the check for async_recov is a complete hack - I'm hereby
- overloading the meaning to also mean "this was called from
- mds_postsetup". I know that my mds is able to handle notifies
- by this point, and it needs to get them to execute mds_postrecov. */
+ * overloading the meaning to also mean "this was called from
+ * mds_postsetup". I know that my mds is able to handle notifies
+ * by this point, and it needs to get them to execute mds_postrecov.
+ */
if (!obd->obd_set_up && !obd->obd_async_recov) {
CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
return -EINVAL;
@@ -1241,7 +1251,7 @@ static inline int obd_notify_observer(struct obd_device *observer,
* Also, call non-obd listener, if any
*/
onu = &observer->obd_upcall;
- if (onu->onu_upcall != NULL)
+ if (onu->onu_upcall)
rc2 = onu->onu_upcall(observer, observed, ev,
onu->onu_owner, NULL);
else
@@ -1287,7 +1297,7 @@ static inline int obd_health_check(const struct lu_env *env,
int rc;
/* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
- if (obd == NULL || !OBT(obd)) {
+ if (!obd || !OBT(obd)) {
CERROR("cleaned up obd\n");
return -EOPNOTSUPP;
}
@@ -1318,57 +1328,6 @@ static inline int obd_register_observer(struct obd_device *obd,
return 0;
}
-#if 0
-static inline int obd_register_page_removal_cb(struct obd_export *exp,
- obd_page_removal_cb_t cb,
- obd_pin_extent_cb pin_cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
-
- rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb);
- return rc;
-}
-
-static inline int obd_unregister_page_removal_cb(struct obd_export *exp,
- obd_page_removal_cb_t cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
-
- rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb);
- return rc;
-}
-
-static inline int obd_register_lock_cancel_cb(struct obd_export *exp,
- obd_lock_cancel_cb cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
-
- rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb);
- return rc;
-}
-
-static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp,
- obd_lock_cancel_cb cb)
-{
- int rc;
-
- OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
- OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
-
- rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb);
- return rc;
-}
-#endif
-
/* metadata helpers */
static inline int md_getstatus(struct obd_export *exp, struct lu_fid *fid)
{
@@ -1392,7 +1351,7 @@ static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data,
}
static inline int md_null_inode(struct obd_export *exp,
- const struct lu_fid *fid)
+ const struct lu_fid *fid)
{
int rc;
@@ -1657,8 +1616,8 @@ static inline int md_set_lock_data(struct obd_export *exp,
static inline int md_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags,
void *opaque)
{
int rc;
@@ -1671,12 +1630,12 @@ static inline int md_cancel_unused(struct obd_export *exp,
return rc;
}
-static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid,
- ldlm_type_t type,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- struct lustre_handle *lockh)
+static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid,
+ enum ldlm_type type,
+ ldlm_policy_data_t *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh)
{
EXP_CHECK_MD_OP(exp, lock_match);
EXP_MD_COUNTER_INCREMENT(exp, lock_match);
@@ -1759,7 +1718,8 @@ struct lwp_register_item {
/* I'm as embarrassed about this as you are.
*
* <shaver> // XXX do not look into _superhack with remaining eye
- * <shaver> // XXX if this were any uglier, I'd get my own show on MTV */
+ * <shaver> // XXX if this were any uglier, I'd get my own show on MTV
+ */
extern int (*ptlrpc_put_connection_superhack)(struct ptlrpc_connection *c);
/* obd_mount.c */
@@ -1774,7 +1734,7 @@ void class_uuid_unparse(class_uuid_t in, struct obd_uuid *out);
/* lustre_peer.c */
int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index);
int class_add_uuid(const char *uuid, __u64 nid);
-int class_del_uuid (const char *uuid);
+int class_del_uuid(const char *uuid);
int class_check_uuid(struct obd_uuid *uuid, __u64 nid);
void class_init_uuidlist(void);
void class_exit_uuidlist(void);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index d031437c0528..f8ee3a3254ba 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -47,7 +47,8 @@ extern unsigned int obd_debug_peer_on_timeout;
extern unsigned int obd_dump_on_timeout;
extern unsigned int obd_dump_on_eviction;
/* obd_timeout should only be used for recovery, not for
- networking / disk / timings affected by load (use Adaptive Timeouts) */
+ * networking / disk / timings affected by load (use Adaptive Timeouts)
+ */
extern unsigned int obd_timeout; /* seconds */
extern unsigned int obd_timeout_set;
extern unsigned int at_min;
@@ -104,18 +105,21 @@ extern char obd_jobid_var[];
* failover targets the client only pings one server at a time, and pings
* can be lost on a loaded network. Since eviction has serious consequences,
* and there's no urgent need to evict a client just because it's idle, we
- * should be very conservative here. */
+ * should be very conservative here.
+ */
#define PING_EVICT_TIMEOUT (PING_INTERVAL * 6)
#define DISK_TIMEOUT 50 /* Beyond this we warn about disk speed */
#define CONNECTION_SWITCH_MIN 5U /* Connection switching rate limiter */
- /* Max connect interval for nonresponsive servers; ~50s to avoid building up
- connect requests in the LND queues, but within obd_timeout so we don't
- miss the recovery window */
+/* Max connect interval for nonresponsive servers; ~50s to avoid building up
+ * connect requests in the LND queues, but within obd_timeout so we don't
+ * miss the recovery window
+ */
#define CONNECTION_SWITCH_MAX min(50U, max(CONNECTION_SWITCH_MIN, obd_timeout))
#define CONNECTION_SWITCH_INC 5 /* Connection timeout backoff */
/* In general this should be low to have quick detection of a system
- running on a backup server. (If it's too low, import_select_connection
- will increase the timeout anyhow.) */
+ * running on a backup server. (If it's too low, import_select_connection
+ * will increase the timeout anyhow.)
+ */
#define INITIAL_CONNECT_TIMEOUT max(CONNECTION_SWITCH_MIN, obd_timeout/20)
/* The max delay between connects is SWITCH_MAX + SWITCH_INC + INITIAL */
#define RECONNECT_DELAY_MAX (CONNECTION_SWITCH_MAX + CONNECTION_SWITCH_INC + \
@@ -496,7 +500,7 @@ extern char obd_jobid_var[];
#ifdef POISON_BULK
#define POISON_PAGE(page, val) do { \
- memset(kmap(page), val, PAGE_CACHE_SIZE); \
+ memset(kmap(page), val, PAGE_SIZE); \
kunmap(page); \
} while (0)
#else
@@ -507,7 +511,6 @@ extern char obd_jobid_var[];
do { \
struct portals_handle *__h = (handle); \
\
- LASSERT(handle != NULL); \
__h->h_cookie = (unsigned long)(ptr); \
__h->h_size = (size); \
call_rcu(&__h->h_rcu, class_handle_free_cb); \
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h
index 41f3d810aea4..5e998362e44b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/lustre/include/uapi_kernelcomm.h
@@ -15,37 +15,29 @@
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Author: Nathan Rutman <nathan.rutman@sun.com>
*
- * libcfs/include/libcfs/libcfs_kernelcomm.h
- *
* Kernel <-> userspace communication routines.
* The definitions below are used in the kernel and userspace.
- *
*/
-#ifndef __LIBCFS_KERNELCOMM_H__
-#define __LIBCFS_KERNELCOMM_H__
+#ifndef __UAPI_KERNELCOMM_H__
+#define __UAPI_KERNELCOMM_H__
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
+#include <linux/types.h>
/* KUC message header.
* All current and future KUC messages should use this header.
@@ -53,66 +45,50 @@
*/
struct kuc_hdr {
__u16 kuc_magic;
- __u8 kuc_transport; /* Each new Lustre feature should use a different
- transport */
+ /* Each new Lustre feature should use a different transport */
+ __u8 kuc_transport;
__u8 kuc_flags;
- __u16 kuc_msgtype; /* Message type or opcode, transport-specific */
- __u16 kuc_msglen; /* Including header */
+ /* Message type or opcode, transport-specific */
+ __u16 kuc_msgtype;
+ /* Including header */
+ __u16 kuc_msglen;
} __aligned(sizeof(__u64));
-#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr) + CR_MAXSIZE)
-#define KUC_MAGIC 0x191C /*Lustre9etLinC */
-#define KUC_FL_BLOCK 0x01 /* Wait for send */
+#define KUC_MAGIC 0x191C /*Lustre9etLinC */
/* kuc_msgtype values are defined in each transport */
enum kuc_transport_type {
- KUC_TRANSPORT_GENERIC = 1,
- KUC_TRANSPORT_HSM = 2,
- KUC_TRANSPORT_CHANGELOG = 3,
+ KUC_TRANSPORT_GENERIC = 1,
+ KUC_TRANSPORT_HSM = 2,
+ KUC_TRANSPORT_CHANGELOG = 3,
};
enum kuc_generic_message_type {
- KUC_MSG_SHUTDOWN = 1,
+ KUC_MSG_SHUTDOWN = 1,
};
-/* prototype for callback function on kuc groups */
-typedef int (*libcfs_kkuc_cb_t)(__u32 data, void *cb_arg);
-
/* KUC Broadcast Groups. This determines which userspace process hears which
* messages. Mutliple transports may be used within a group, or multiple
* groups may use the same transport. Broadcast
* groups need not be used if e.g. a UID is specified instead;
* use group 0 to signify unicast.
*/
-#define KUC_GRP_HSM 0x02
-#define KUC_GRP_MAX KUC_GRP_HSM
-
-/* Kernel methods */
-int libcfs_kkuc_msg_put(struct file *fp, void *payload);
-int libcfs_kkuc_group_put(int group, void *payload);
-int libcfs_kkuc_group_add(struct file *fp, int uid, unsigned int group,
- __u32 data);
-int libcfs_kkuc_group_rem(int uid, int group);
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
- void *cb_arg);
+#define KUC_GRP_HSM 0x02
+#define KUC_GRP_MAX KUC_GRP_HSM
#define LK_FLG_STOP 0x01
+#define LK_NOFD -1U
/* kernelcomm control structure, passed from userspace to kernel */
-typedef struct lustre_kernelcomm {
+struct lustre_kernelcomm {
__u32 lk_wfd;
__u32 lk_rfd;
__u32 lk_uid;
__u32 lk_group;
__u32 lk_data;
__u32 lk_flags;
-} __packed lustre_kernelcomm;
-
-/* Userspace methods */
-int libcfs_ukuc_start(lustre_kernelcomm *l, int groups);
-int libcfs_ukuc_stop(lustre_kernelcomm *l);
-int libcfs_ukuc_msg_get(lustre_kernelcomm *l, char *buf, int maxsize,
- int transport);
+} __packed;
-#endif /* __LIBCFS_KERNELCOMM_H__ */
+#endif /* __UAPI_KERNELCOMM_H__ */
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c
index 8533a1e539f4..c4e8a0878ac8 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/lclient/glimpse.c
@@ -109,7 +109,8 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
* if there were no conflicting locks. If there
* were conflicting locks, enqueuing or waiting
* fails with -ENAVAIL, but valid inode
- * attributes are returned anyway. */
+ * attributes are returned anyway.
+ */
*descr = whole_file;
descr->cld_obj = clob;
descr->cld_mode = CLM_PHANTOM;
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 34dde7dede74..96141d17d07f 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -116,8 +116,8 @@ void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
{
struct ccc_thread_info *info;
- info = kmem_cache_alloc(ccc_thread_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -135,8 +135,8 @@ void *ccc_session_key_init(const struct lu_context *ctx,
{
struct ccc_session *session;
- session = kmem_cache_alloc(ccc_session_kmem, GFP_NOFS | __GFP_ZERO);
- if (session == NULL)
+ session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
+ if (!session)
session = ERR_PTR(-ENOMEM);
return session;
}
@@ -173,7 +173,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d,
vdv = lu2ccc_dev(d);
vdv->cdv_next = lu2cl_dev(next);
- LASSERT(d->ld_site != NULL && next->ld_type != NULL);
+ LASSERT(d->ld_site && next->ld_type);
next->ld_site = d->ld_site;
rc = next->ld_type->ldt_ops->ldto_device_init(
env, next, next->ld_type->ldt_name, NULL);
@@ -211,12 +211,12 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
vdv->cdv_cl.cd_ops = clops;
site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site != NULL) {
+ if (site) {
rc = cl_site_init(site, &vdv->cdv_cl);
if (rc == 0)
rc = lu_site_init_finish(&site->cs_lu);
else {
- LASSERT(lud->ld_site == NULL);
+ LASSERT(!lud->ld_site);
CERROR("Cannot init lu_site, rc %d.\n", rc);
kfree(site);
}
@@ -236,7 +236,7 @@ struct lu_device *ccc_device_free(const struct lu_env *env,
struct cl_site *site = lu2cl_site(d->ld_site);
struct lu_device *next = cl2lu_dev(vdv->cdv_next);
- if (d->ld_site != NULL) {
+ if (d->ld_site) {
cl_site_fini(site);
kfree(site);
}
@@ -251,8 +251,8 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
struct ccc_req *vrq;
int result;
- vrq = kmem_cache_alloc(ccc_req_kmem, GFP_NOFS | __GFP_ZERO);
- if (vrq != NULL) {
+ vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS);
+ if (vrq) {
cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
result = 0;
} else
@@ -304,7 +304,7 @@ out_kmem:
void ccc_global_fini(struct lu_device_type *device_type)
{
- if (ccc_inode_fini_env != NULL) {
+ if (ccc_inode_fini_env) {
cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
ccc_inode_fini_env = NULL;
}
@@ -327,8 +327,8 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env,
struct ccc_object *vob;
struct lu_object *obj;
- vob = kmem_cache_alloc(ccc_object_kmem, GFP_NOFS | __GFP_ZERO);
- if (vob != NULL) {
+ vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS);
+ if (vob) {
struct cl_object_header *hdr;
obj = ccc2lu(vob);
@@ -365,7 +365,7 @@ int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
under = &dev->cdv_next->cd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below != NULL) {
+ if (below) {
const struct cl_object_conf *cconf;
cconf = lu2cl_conf(conf);
@@ -396,8 +396,8 @@ int ccc_lock_init(const struct lu_env *env,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- clk = kmem_cache_alloc(ccc_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (clk != NULL) {
+ clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
+ if (clk) {
cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
result = 0;
} else
@@ -613,7 +613,8 @@ void ccc_lock_state(const struct lu_env *env,
* stale i_size when doing appending writes and effectively
* cancel the result of the truncate. Getting the
* ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order. */
+ * -> ll_inode_size_lock() acquiring order.
+ */
if (lock->cll_descr.cld_start == 0 &&
lock->cll_descr.cld_end == CL_PAGE_EOF)
cl_merge_lvb(env, inode);
@@ -660,7 +661,7 @@ void ccc_io_update_iov(const struct lu_env *env,
{
size_t size = io->u.ci_rw.crw_count;
- if (!cl_is_normalio(env, io) || cio->cui_iter == NULL)
+ if (!cl_is_normalio(env, io) || !cio->cui_iter)
return;
iov_iter_truncate(cio->cui_iter, size);
@@ -749,16 +750,17 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
*/
ccc_object_size_unlock(obj);
result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed != NULL) {
+ if (result == 0 && exceed) {
/* If objective page index exceed end-of-file
* page index, return directly. Do not expect
* kernel will check such case correctly.
* linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336 */
+ * --bug 17336
+ */
loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_CACHE_SHIFT;
+ loff_t cur_index = start >> PAGE_SHIFT;
loff_t size_index = (size - 1) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
if ((size == 0 && cur_index != 0) ||
size_index < cur_index)
@@ -884,7 +886,8 @@ again:
if (attr->ia_valid & ATTR_FILE)
/* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787 */
+ * group lock - see LU-787
+ */
cio->cui_fd = cl_iattr2fd(inode, attr);
result = cl_io_loop(env, io);
@@ -896,7 +899,8 @@ again:
goto again;
/* HSM import case: file is released, cannot be restored
* no need to fail except if restore registration failed
- * with -ENODATA */
+ * with -ENODATA
+ */
if (result == -ENODATA && io->ci_restore_needed &&
io->ci_result != -ENODATA)
result = 0;
@@ -986,17 +990,6 @@ struct inode *ccc_object_inode(const struct cl_object *obj)
}
/**
- * Returns a pointer to cl_page associated with \a vmpage, without acquiring
- * additional reference to the resulting page. This is an unsafe version of
- * cl_vmpage_page() that can only be used under vmpage lock.
- */
-struct cl_page *ccc_vmpage_page_transient(struct page *vmpage)
-{
- KLASSERT(PageLocked(vmpage));
- return (struct cl_page *)vmpage->private;
-}
-
-/**
* Initialize or update CLIO structures for regular files when new
* meta-data arrives from the server.
*
@@ -1033,11 +1026,12 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
fid = &lli->lli_fid;
LASSERT(fid_is_sane(fid));
- if (lli->lli_clob == NULL) {
+ if (!lli->lli_clob) {
/* clob is slave of inode, empty lli_clob means for new inode,
* there is no clob in cache with the given fid, so it is
* unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly. */
+ * alloc and insert directly.
+ */
LASSERT(inode->i_state & I_NEW);
conf.coc_lu.loc_flags = LOC_F_NEW;
clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
@@ -1109,7 +1103,7 @@ void cl_inode_fini(struct inode *inode)
int refcheck;
int emergency;
- if (clob != NULL) {
+ if (clob) {
void *cookie;
cookie = cl_env_reenter();
@@ -1117,7 +1111,7 @@ void cl_inode_fini(struct inode *inode)
emergency = IS_ERR(env);
if (emergency) {
mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env != NULL);
+ LASSERT(ccc_inode_fini_env);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
}
@@ -1162,7 +1156,8 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent)
}
/**
- * build inode number from passed @fid */
+ * build inode number from passed @fid
+ */
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
{
if (BITS_PER_LONG == 32 || api32)
@@ -1173,7 +1168,8 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
/**
* build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them. */
+ * inode number then return a non-zero generation to distinguish them.
+ */
__u32 cl_fid_build_gen(const struct lu_fid *fid)
{
__u32 gen;
@@ -1194,7 +1190,8 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
* have to wait for the refcount to become zero to destroy the older layout.
*
* Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT. */
+ * inside layout lock - MDS_INODELOCK_LAYOUT.
+ */
struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
{
return lov_lsm_get(cl_i2info(inode)->lli_clob);
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
index 8389a0edad36..d80bcedd78d1 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -48,7 +48,8 @@
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
* maximum-sized (= maximum striped) EA and cookie without having to
- * calculate this (via a call into the LOV + OSCs) each time we make an RPC. */
+ * calculate this (via a call into the LOV + OSCs) each time we make an RPC.
+ */
int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
{
struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
@@ -74,7 +75,8 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
cookiesize = stripes * sizeof(struct llog_cookie);
/* default cookiesize is 0 because from 2.4 server doesn't send
- * llog cookies to client. */
+ * llog cookies to client.
+ */
CDEBUG(D_HA,
"updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
def_easize, easize, cookiesize);
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index a2ea8e5b93d8..323060626fdf 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -49,13 +49,11 @@ enum {
static inline int node_is_left_child(struct interval_node *node)
{
- LASSERT(node->in_parent != NULL);
return node == node->in_parent->in_left;
}
static inline int node_is_right_child(struct interval_node *node)
{
- LASSERT(node->in_parent != NULL);
return node == node->in_parent->in_right;
}
@@ -135,7 +133,8 @@ static void __rotate_change_maxhigh(struct interval_node *node,
/* The left rotation "pivots" around the link from node to node->right, and
* - node will be linked to node->right's left child, and
- * - node->right's left child will be linked to node's right child. */
+ * - node->right's left child will be linked to node's right child.
+ */
static void __rotate_left(struct interval_node *node,
struct interval_node **root)
{
@@ -164,7 +163,8 @@ static void __rotate_left(struct interval_node *node,
/* The right rotation "pivots" around the link from node to node->left, and
* - node will be linked to node->left's right child, and
- * - node->left's right child will be linked to node's left child. */
+ * - node->left's right child will be linked to node's left child.
+ */
static void __rotate_right(struct interval_node *node,
struct interval_node **root)
{
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index 9c70f31ea56e..a803e200f206 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -62,7 +62,8 @@
* is the "highest lock". This function returns the new KMS value.
* Caller must hold lr_lock already.
*
- * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
+ * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
+ */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
@@ -72,7 +73,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
/* don't let another thread in ldlm_extent_shift_kms race in
* just after we finish and take our lock into account in its
- * calculation of the kms */
+ * calculation of the kms
+ */
lock->l_flags |= LDLM_FL_KMS_IGNORE;
list_for_each(tmp, &res->lr_granted) {
@@ -85,7 +87,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
return old_kms;
/* This extent _has_ to be smaller than old_kms (checked above)
- * so kms can only ever be smaller or the same as old_kms. */
+ * so kms can only ever be smaller or the same as old_kms.
+ */
if (lck->l_policy_data.l_extent.end + 1 > kms)
kms = lck->l_policy_data.l_extent.end + 1;
}
@@ -112,8 +115,8 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
struct ldlm_interval *node;
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
- node = kmem_cache_alloc(ldlm_interval_slab, GFP_NOFS | __GFP_ZERO);
- if (node == NULL)
+ node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS);
+ if (!node)
return NULL;
INIT_LIST_HEAD(&node->li_group);
@@ -134,7 +137,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
{
struct ldlm_interval *n = l->l_tree_node;
- if (n == NULL)
+ if (!n)
return NULL;
LASSERT(!list_empty(&n->li_group));
@@ -144,7 +147,7 @@ struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
return list_empty(&n->li_group) ? n : NULL;
}
-static inline int lock_mode_to_index(ldlm_mode_t mode)
+static inline int lock_mode_to_index(enum ldlm_mode mode)
{
int index;
@@ -168,7 +171,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
LASSERT(lock->l_granted_mode == lock->l_req_mode);
node = lock->l_tree_node;
- LASSERT(node != NULL);
+ LASSERT(node);
LASSERT(!interval_is_intree(&node->li_node));
idx = lock_mode_to_index(lock->l_granted_mode);
@@ -185,14 +188,14 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
struct ldlm_interval *tmp;
tmp = ldlm_interval_detach(lock);
- LASSERT(tmp != NULL);
ldlm_interval_free(tmp);
ldlm_interval_attach(to_ldlm_interval(found), lock);
}
res->lr_itree[idx].lit_size++;
/* even though we use interval tree to manage the extent lock, we also
- * add the locks into grant list, for debug purpose, .. */
+ * add the locks into grant list, for debug purpose, ..
+ */
ldlm_resource_add_lock(res, &res->lr_granted, lock);
}
@@ -211,7 +214,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
LASSERT(lock->l_granted_mode == 1 << idx);
tree = &res->lr_itree[idx];
- LASSERT(tree->lit_root != NULL); /* assure the tree is not null */
+ LASSERT(tree->lit_root); /* assure the tree is not null */
tree->lit_size--;
node = ldlm_interval_detach(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 4310154e1728..b88b78606aee 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -92,7 +92,7 @@ ldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new)
}
static inline void
-ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
+ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
{
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
mode, flags);
@@ -107,7 +107,8 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
/* when reaching here, it is under lock_res_and_lock(). Thus,
- need call the nolock version of ldlm_lock_decref_internal*/
+ * need call the nolock version of ldlm_lock_decref_internal
+ */
ldlm_lock_decref_internal_nolock(lock, mode);
}
@@ -133,7 +134,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
* would be collected and ASTs sent.
*/
static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
- int first_enq, ldlm_error_t *err,
+ int first_enq, enum ldlm_error *err,
struct list_head *work_list)
{
struct ldlm_resource *res = req->l_resource;
@@ -143,7 +144,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
struct ldlm_lock *lock = NULL;
struct ldlm_lock *new = req;
struct ldlm_lock *new2 = NULL;
- ldlm_mode_t mode = req->l_req_mode;
+ enum ldlm_mode mode = req->l_req_mode;
int added = (mode == LCK_NL);
int overlaps = 0;
int splitted = 0;
@@ -159,13 +160,15 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
*err = ELDLM_OK;
/* No blocking ASTs are sent to the clients for
- * Posix file & record locks */
+ * Posix file & record locks
+ */
req->l_blocking_ast = NULL;
reprocess:
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
- * in the resource lr_granted list. */
+ * in the resource lr_granted list.
+ */
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
@@ -180,7 +183,8 @@ reprocess:
lockmode_verify(mode);
/* This loop determines if there are existing locks
- * that conflict with the new lock request. */
+ * that conflict with the new lock request.
+ */
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
@@ -238,8 +242,8 @@ reprocess:
}
/* Scan the locks owned by this process that overlap this request.
- * We may have to merge or split existing locks. */
-
+ * We may have to merge or split existing locks.
+ */
if (!ownlocks)
ownlocks = &res->lr_granted;
@@ -253,7 +257,8 @@ reprocess:
/* If the modes are the same then we need to process
* locks that overlap OR adjoin the new lock. The extra
* logic condition is necessary to deal with arithmetic
- * overflow and underflow. */
+ * overflow and underflow.
+ */
if ((new->l_policy_data.l_flock.start >
(lock->l_policy_data.l_flock.end + 1))
&& (lock->l_policy_data.l_flock.end !=
@@ -327,11 +332,13 @@ reprocess:
* with the request but this would complicate the reply
* processing since updates to req get reflected in the
* reply. The client side replays the lock request so
- * it must see the original lock data in the reply. */
+ * it must see the original lock data in the reply.
+ */
/* XXX - if ldlm_lock_new() can sleep we should
* release the lr_lock, allocate the new lock,
- * and restart processing this lock. */
+ * and restart processing this lock.
+ */
if (!new2) {
unlock_res_and_lock(req);
new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
@@ -361,7 +368,7 @@ reprocess:
lock->l_policy_data.l_flock.start =
new->l_policy_data.l_flock.end + 1;
new2->l_conn_export = lock->l_conn_export;
- if (lock->l_export != NULL) {
+ if (lock->l_export) {
new2->l_export = class_export_lock_get(lock->l_export,
new2);
if (new2->l_export->exp_lock_hash &&
@@ -381,7 +388,7 @@ reprocess:
}
/* if new2 is created but never used, destroy it*/
- if (splitted == 0 && new2 != NULL)
+ if (splitted == 0 && new2)
ldlm_lock_destroy_nolock(new2);
/* At this point we're granting the lock request. */
@@ -396,7 +403,8 @@ reprocess:
if (*flags != LDLM_FL_WAIT_NOREPROC) {
/* The only one possible case for client-side calls flock
* policy function is ldlm_flock_completion_ast inside which
- * carries LDLM_FL_WAIT_NOREPROC flag. */
+ * carries LDLM_FL_WAIT_NOREPROC flag.
+ */
CERROR("Illegal parameter for client-side-only module.\n");
LBUG();
}
@@ -404,7 +412,8 @@ reprocess:
/* In case we're reprocessing the requested lock we can't destroy
* it until after calling ldlm_add_ast_work_item() above so that laawi()
* can bump the reference count on \a req. Otherwise \a req
- * could be freed before the completion AST can be sent. */
+ * could be freed before the completion AST can be sent.
+ */
if (added)
ldlm_flock_destroy(req, mode, *flags);
@@ -449,7 +458,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
struct obd_import *imp = NULL;
struct ldlm_flock_wait_data fwd;
struct l_wait_info lwi;
- ldlm_error_t err;
+ enum ldlm_error err;
int rc = 0;
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
@@ -458,12 +467,12 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
/* Import invalidation. We need to actually release the lock
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too. */
+ * server already dropped it anyway. Only for granted locks too.
+ */
if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
(LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
if (lock->l_req_mode == lock->l_granted_mode &&
- lock->l_granted_mode != LCK_NL &&
- data == NULL)
+ lock->l_granted_mode != LCK_NL && !data)
ldlm_lock_decref_internal(lock, lock->l_req_mode);
/* Need to wake up the waiter if we were evicted */
@@ -475,7 +484,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
LDLM_FL_BLOCK_CONV))) {
- if (data == NULL)
+ if (!data)
/* mds granted the lock in the reply */
goto granted;
/* CP AST RPC: lock get granted, wake it up */
@@ -488,10 +497,10 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, there is no import */
- if (obd != NULL)
+ if (obd)
imp = obd->u.cli.cl_import;
- if (imp != NULL) {
+ if (imp) {
spin_lock(&imp->imp_lock);
fwd.fwd_generation = imp->imp_generation;
spin_unlock(&imp->imp_lock);
@@ -540,7 +549,8 @@ granted:
} else if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
- * in the lock changes we can decref the appropriate refcount.*/
+ * in the lock changes we can decref the appropriate refcount.
+ */
ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
@@ -559,7 +569,8 @@ granted:
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
/* We need to reprocess the lock to do merges or splits
- * with existing locks owned by this process. */
+ * with existing locks owned by this process.
+ */
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
@@ -576,7 +587,8 @@ void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
/* Compat code, old clients had no idea about owner field and
* relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011 */
+ * April 2011
+ */
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 849cc98df7dd..e21373e7306f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -96,14 +96,15 @@ enum {
LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs) */
+ * sending nor waiting for any rpcs)
+ */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t sync, int flags);
+ enum ldlm_cancel_flags sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
- ldlm_cancel_flags_t cancel_flags, int flags);
+ enum ldlm_cancel_flags cancel_flags, int flags);
extern int ldlm_enqueue_min;
/* ldlm_resource.c */
@@ -133,11 +134,11 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
enum req_location loc, void *data, int size);
struct ldlm_lock *
ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
- ldlm_type_t type, ldlm_mode_t,
+ enum ldlm_type type, enum ldlm_mode mode,
const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len, enum lvb_type lvb_type);
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
- void *cookie, __u64 *flags);
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
+ void *cookie, __u64 *flags);
void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
@@ -154,7 +155,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags);
+ enum ldlm_cancel_flags cancel_flags);
void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 3c8d4413d976..7dd7df59aa1f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -219,7 +219,8 @@ EXPORT_SYMBOL(client_import_find_conn);
void client_destroy_import(struct obd_import *imp)
{
/* Drop security policy instance after all RPCs have finished/aborted
- * to let all busy contexts be released. */
+ * to let all busy contexts be released.
+ */
class_import_get(imp);
class_destroy_import(imp);
sptlrpc_import_sec_put(imp);
@@ -227,29 +228,6 @@ void client_destroy_import(struct obd_import *imp)
}
EXPORT_SYMBOL(client_destroy_import);
-/**
- * Check whether or not the OSC is on MDT.
- * In the config log,
- * osc on MDT
- * setup 0:{fsname}-OSTxxxx-osc[-MDTxxxx] 1:lustre-OST0000_UUID 2:NID
- * osc on client
- * setup 0:{fsname}-OSTxxxx-osc 1:lustre-OST0000_UUID 2:NID
- *
- **/
-static int osc_on_mdt(char *obdname)
-{
- char *ptr;
-
- ptr = strrchr(obdname, '-');
- if (ptr == NULL)
- return 0;
-
- if (strncmp(ptr + 1, "MDT", 3) == 0)
- return 1;
-
- return 0;
-}
-
/* Configure an RPC client OBD device.
*
* lcfg parameters:
@@ -264,11 +242,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
struct obd_uuid server_uuid;
int rq_portal, rp_portal, connect_op;
char *name = obddev->obd_type->typ_name;
- ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
+ enum ldlm_ns_type ns_type = LDLM_NS_TYPE_UNKNOWN;
int rc;
/* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there. */
+ * obd_type and just use the values from there.
+ */
if (!strcmp(name, LUSTRE_OSC_NAME)) {
rq_portal = OST_REQUEST_PORTAL;
rp_portal = OSC_REPLY_PORTAL;
@@ -284,22 +263,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_sp_me = LUSTRE_SP_CLI;
cli->cl_sp_to = LUSTRE_SP_MDT;
ns_type = LDLM_NS_TYPE_MDC;
- } else if (!strcmp(name, LUSTRE_OSP_NAME)) {
- if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) {
- /* OSP_on_MDT for other MDTs */
- connect_op = MDS_CONNECT;
- cli->cl_sp_to = LUSTRE_SP_MDT;
- ns_type = LDLM_NS_TYPE_MDC;
- rq_portal = OUT_PORTAL;
- } else {
- /* OSP on MDT for OST */
- connect_op = OST_CONNECT;
- cli->cl_sp_to = LUSTRE_SP_OST;
- ns_type = LDLM_NS_TYPE_OSC;
- rq_portal = OST_REQUEST_PORTAL;
- }
- rp_portal = OSC_REPLY_PORTAL;
- cli->cl_sp_me = LUSTRE_SP_CLI;
} else if (!strcmp(name, LUSTRE_MGC_NAME)) {
rq_portal = MGS_REQUEST_PORTAL;
rp_portal = MGC_REPLY_PORTAL;
@@ -344,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
- cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
+ if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
+ cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -387,23 +350,21 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
/* This value may be reduced at connect time in
* ptlrpc_connect_interpret() . We initialize it to only
* 1MB until we know what the performance looks like.
- * In the future this should likely be increased. LU-1431 */
+ * In the future this should likely be increased. LU-1431
+ */
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
- LNET_MTU >> PAGE_CACHE_SHIFT);
+ LNET_MTU >> PAGE_SHIFT);
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
- if (osc_on_mdt(obddev->obd_name))
- cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT;
- else
- cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
+ cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
}
rc = ldlm_get_ref();
if (rc) {
@@ -415,7 +376,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
&obddev->obd_ldlm_client);
imp = class_new_import(obddev);
- if (imp == NULL) {
+ if (!imp) {
rc = -ENOENT;
goto err_ldlm;
}
@@ -451,7 +412,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
LDLM_NAMESPACE_CLIENT,
LDLM_NAMESPACE_GREEDY,
ns_type);
- if (obddev->obd_namespace == NULL) {
+ if (!obddev->obd_namespace) {
CERROR("Unable to create client namespace - %s\n",
obddev->obd_name);
rc = -ENOMEM;
@@ -477,7 +438,7 @@ int client_obd_cleanup(struct obd_device *obddev)
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
- LASSERT(obddev->u.cli.cl_import == NULL);
+ LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
return 0;
@@ -528,7 +489,7 @@ int client_connect_import(const struct lu_env *env,
LASSERT(imp->imp_state == LUSTRE_IMP_DISCON);
goto out_ldlm;
}
- LASSERT(*exp != NULL && (*exp)->exp_connection);
+ LASSERT(*exp && (*exp)->exp_connection);
if (data) {
LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
@@ -587,17 +548,19 @@ int client_disconnect_export(struct obd_export *exp)
/* Mark import deactivated now, so we don't try to reconnect if any
* of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
- * fully deactivate the import, or that would drop all requests. */
+ * fully deactivate the import, or that would drop all requests.
+ */
spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
spin_unlock(&imp->imp_lock);
/* Some non-replayable imports (MDS's OSCs) are pinged, so just
* delete it regardless. (It's safe to delete an import that was
- * never added.) */
+ * never added.)
+ */
(void)ptlrpc_pinger_del_import(imp);
- if (obd->obd_namespace != NULL) {
+ if (obd->obd_namespace) {
/* obd_force == local only */
ldlm_cli_cancel_unused(obd->obd_namespace, NULL,
obd->obd_force ? LCF_LOCAL : 0, NULL);
@@ -606,7 +569,8 @@ int client_disconnect_export(struct obd_export *exp)
}
/* There's no need to hold sem while disconnecting an import,
- * and it may actually cause deadlock in GSS. */
+ * and it may actually cause deadlock in GSS.
+ */
up_write(&cli->cl_sem);
rc = ptlrpc_disconnect_import(imp, 0);
down_write(&cli->cl_sem);
@@ -615,7 +579,8 @@ int client_disconnect_export(struct obd_export *exp)
out_disconnect:
/* Use server style - class_disconnect should be always called for
- * o_disconnect. */
+ * o_disconnect.
+ */
err = class_disconnect(exp);
if (!rc && err)
rc = err;
@@ -634,7 +599,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
struct obd_device *obd;
/* Check that we still have all structures alive as this may
- * be some late RPC at shutdown time. */
+ * be some late RPC at shutdown time.
+ */
if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
!exp_connect_lru_resize(req->rq_export))) {
lustre_msg_set_slv(req->rq_repmsg, 0);
@@ -684,14 +650,14 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
svcpt = req->rq_rqbd->rqbd_svcpt;
rs = req->rq_reply_state;
- if (rs == NULL || !rs->rs_difficult) {
+ if (!rs || !rs->rs_difficult) {
/* no notifiers */
target_send_reply_msg(req, rc, fail_id);
return;
}
/* must be an export if locks saved */
- LASSERT(req->rq_export != NULL);
+ LASSERT(req->rq_export);
/* req/reply consistent */
LASSERT(rs->rs_svcpt == svcpt);
@@ -700,7 +666,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
LASSERT(!rs->rs_scheduled_ever);
LASSERT(!rs->rs_handled);
LASSERT(!rs->rs_on_net);
- LASSERT(rs->rs_export == NULL);
+ LASSERT(!rs->rs_export);
LASSERT(list_empty(&rs->rs_obd_list));
LASSERT(list_empty(&rs->rs_exp_list));
@@ -739,7 +705,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
* reply ref until ptlrpc_handle_rs() is done
* with the reply state (if the send was successful, there
* would have been +1 ref for the net, which
- * reply_out_callback leaves alone) */
+ * reply_out_callback leaves alone)
+ */
rs->rs_on_net = 0;
ptlrpc_rs_addref(rs);
}
@@ -760,7 +727,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
}
EXPORT_SYMBOL(target_send_reply);
-ldlm_mode_t lck_compat_array[] = {
+enum ldlm_mode lck_compat_array[] = {
[LCK_EX] = LCK_COMPAT_EX,
[LCK_PW] = LCK_COMPAT_PW,
[LCK_PR] = LCK_COMPAT_PR,
@@ -775,7 +742,7 @@ ldlm_mode_t lck_compat_array[] = {
* Rather arbitrary mapping from LDLM error codes to errno values. This should
* not escape to the user level.
*/
-int ldlm_error2errno(ldlm_error_t error)
+int ldlm_error2errno(enum ldlm_error error)
{
int result;
@@ -803,7 +770,7 @@ int ldlm_error2errno(ldlm_error_t error)
break;
default:
if (((int)error) < 0) /* cast to signed type */
- result = error; /* as ldlm_error_t can be unsigned */
+ result = error; /* as enum ldlm_error can be unsigned */
else {
CERROR("Invalid DLM result code: %d\n", error);
result = -EPROTO;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index cf9ec0cfe247..ecd65a7a3dc9 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -91,7 +91,7 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
/**
* Converts lock policy from local format to on the wire lock_desc format
*/
-static void ldlm_convert_policy_to_wire(ldlm_type_t type,
+static void ldlm_convert_policy_to_wire(enum ldlm_type type,
const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy)
{
@@ -105,7 +105,7 @@ static void ldlm_convert_policy_to_wire(ldlm_type_t type,
/**
* Converts lock policy from on the wire lock_desc format to local format
*/
-void ldlm_convert_policy_to_local(struct obd_export *exp, ldlm_type_t type,
+void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy)
{
@@ -326,9 +326,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
- * in exp_lock_hash. */
+ * in exp_lock_hash.
+ */
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_del(lock->l_export->exp_lock_hash,
&lock->l_remote_handle, &lock->l_exp_hash);
@@ -337,16 +339,6 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
ldlm_lock_remove_from_lru(lock);
class_handle_unhash(&lock->l_handle);
-#if 0
- /* Wake anyone waiting for this lock */
- /* FIXME: I should probably add yet another flag, instead of using
- * l_export to only call this on clients */
- if (lock->l_export)
- class_export_put(lock->l_export);
- lock->l_export = NULL;
- if (lock->l_export && lock->l_completion_ast)
- lock->l_completion_ast(lock, 0);
-#endif
return 1;
}
@@ -412,11 +404,10 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
{
struct ldlm_lock *lock;
- if (resource == NULL)
- LBUG();
+ LASSERT(resource);
- lock = kmem_cache_alloc(ldlm_lock_slab, GFP_NOFS | __GFP_ZERO);
- if (lock == NULL)
+ lock = kmem_cache_zalloc(ldlm_lock_slab, GFP_NOFS);
+ if (!lock)
return NULL;
spin_lock_init(&lock->l_lock);
@@ -485,7 +476,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
unlock_res_and_lock(lock);
newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (newres == NULL)
+ if (!newres)
return -ENOMEM;
lu_ref_add(&newres->lr_reference, "lock", lock);
@@ -547,11 +538,12 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(handle);
lock = class_handle2object(handle->cookie);
- if (lock == NULL)
+ if (!lock)
return NULL;
/* It's unlikely but possible that someone marked the lock as
- * destroyed after we did handle2object on it */
+ * destroyed after we did handle2object on it
+ */
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
@@ -559,7 +551,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
lock_res_and_lock(lock);
- LASSERT(lock->l_resource != NULL);
+ LASSERT(lock->l_resource);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
@@ -611,13 +603,14 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
lock->l_flags |= LDLM_FL_AST_SENT;
/* If the enqueuing client said so, tell the AST recipient to
- * discard dirty data, rather than writing back. */
+ * discard dirty data, rather than writing back.
+ */
if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
- LASSERT(lock->l_blocking_lock == NULL);
+ LASSERT(!lock->l_blocking_lock);
lock->l_blocking_lock = LDLM_LOCK_GET(new);
}
}
@@ -664,7 +657,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
+ LASSERT(lock);
ldlm_lock_addref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -708,7 +701,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
result = -EAGAIN;
lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
+ if (lock) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
!(lock->l_flags & LDLM_FL_CBPENDING)) {
@@ -780,7 +773,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
if (lock->l_flags & LDLM_FL_LOCAL &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
- * the last reference, cancel the lock. */
+ * the last reference, cancel the lock.
+ */
CDEBUG(D_INFO, "forcing cancel of local lock\n");
lock->l_flags |= LDLM_FL_CBPENDING;
}
@@ -788,7 +782,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
if (!lock->l_readers && !lock->l_writers &&
(lock->l_flags & LDLM_FL_CBPENDING)) {
/* If we received a blocked AST and this was the last reference,
- * run the callback. */
+ * run the callback.
+ */
LDLM_DEBUG(lock, "final decref done on cbpending lock");
@@ -809,7 +804,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
- * reference, put it on the LRU. */
+ * reference, put it on the LRU.
+ */
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
@@ -818,7 +814,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
* are not supported by the server, otherwise, it is done on
- * enqueue. */
+ * enqueue.
+ */
if (!exp_connect_cancelset(lock->l_conn_export) &&
!ns_connect_lru_resize(ns))
ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
@@ -835,7 +832,7 @@ void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
- LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
+ LASSERTF(lock, "Non-existing lock: %#llx\n", lockh->cookie);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -852,7 +849,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
- LASSERT(lock != NULL);
+ LASSERT(lock);
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
@@ -893,8 +890,7 @@ static void search_granted_lock(struct list_head *queue,
list_for_each(tmp, queue) {
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
- mode_end = list_entry(lock->l_sl_mode.prev,
- struct ldlm_lock, l_sl_mode);
+ mode_end = list_prev_entry(lock, l_sl_mode);
if (lock->l_req_mode != req->l_req_mode) {
/* jump to last lock of mode group */
@@ -914,14 +910,13 @@ static void search_granted_lock(struct list_head *queue,
if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
policy_end =
- list_entry(lock->l_sl_policy.prev,
- struct ldlm_lock,
- l_sl_policy);
+ list_prev_entry(lock, l_sl_policy);
if (lock->l_policy_data.l_inodebits.bits ==
req->l_policy_data.l_inodebits.bits) {
/* insert point is last lock of
- * the policy group */
+ * the policy group
+ */
prev->res_link =
&policy_end->l_res_link;
prev->mode_link =
@@ -942,7 +937,8 @@ static void search_granted_lock(struct list_head *queue,
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
- * new policy group is started */
+ * new policy group is started
+ */
prev->res_link = &mode_end->l_res_link;
prev->mode_link = &mode_end->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
@@ -954,7 +950,8 @@ static void search_granted_lock(struct list_head *queue,
}
/* insert point is last lock on the queue,
- * new mode group and new policy group are started */
+ * new mode group and new policy group are started
+ */
prev->res_link = queue->prev;
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
@@ -1034,10 +1031,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
else
ldlm_resource_add_lock(res, &res->lr_granted, lock);
- if (lock->l_granted_mode < res->lr_most_restr)
- res->lr_most_restr = lock->l_granted_mode;
-
- if (work_list && lock->l_completion_ast != NULL)
+ if (work_list && lock->l_completion_ast)
ldlm_add_ast_work_item(lock, NULL, work_list);
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
@@ -1050,7 +1044,7 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
* comment above ldlm_lock_match
*/
static struct ldlm_lock *search_queue(struct list_head *queue,
- ldlm_mode_t *mode,
+ enum ldlm_mode *mode,
ldlm_policy_data_t *policy,
struct ldlm_lock *old_lock,
__u64 flags, int unref)
@@ -1059,7 +1053,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
struct list_head *tmp;
list_for_each(tmp, queue) {
- ldlm_mode_t match;
+ enum ldlm_mode match;
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
@@ -1067,7 +1061,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
break;
/* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock */
+ * Used by LU-2919(exclusive open) for open lease lock
+ */
if (ldlm_is_excl(lock))
continue;
@@ -1076,7 +1071,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* if it passes in CBPENDING and the lock still has users.
* this is generally only going to be used by children
* whose parents already hold a lock so forward progress
- * can still happen. */
+ * can still happen.
+ */
if (lock->l_flags & LDLM_FL_CBPENDING &&
!(flags & LDLM_FL_CBPENDING))
continue;
@@ -1100,7 +1096,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
continue;
/* We match if we have existing lock with same or wider set
- of bits. */
+ * of bits.
+ */
if (lock->l_resource->lr_type == LDLM_IBITS &&
((lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits) !=
@@ -1192,16 +1189,18 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
* keep caller code unchanged), the context failure will be discovered by
* caller sometime later.
*/
-ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
- const struct ldlm_res_id *res_id, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh, int unref)
+enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
+ const struct ldlm_res_id *res_id,
+ enum ldlm_type type,
+ ldlm_policy_data_t *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh, int unref)
{
struct ldlm_resource *res;
struct ldlm_lock *lock, *old_lock = NULL;
int rc = 0;
- if (ns == NULL) {
+ if (!ns) {
old_lock = ldlm_handle2lock(lockh);
LASSERT(old_lock);
@@ -1212,8 +1211,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (res == NULL) {
- LASSERT(old_lock == NULL);
+ if (!res) {
+ LASSERT(!old_lock);
return 0;
}
@@ -1222,7 +1221,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
flags, unref);
- if (lock != NULL) {
+ if (lock) {
rc = 1;
goto out;
}
@@ -1232,7 +1231,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
flags, unref);
- if (lock != NULL) {
+ if (lock) {
rc = 1;
goto out;
}
@@ -1317,14 +1316,14 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
EXPORT_SYMBOL(ldlm_lock_match);
-ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
- __u64 *bits)
+enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+ __u64 *bits)
{
struct ldlm_lock *lock;
- ldlm_mode_t mode = 0;
+ enum ldlm_mode mode = 0;
lock = ldlm_handle2lock(lockh);
- if (lock != NULL) {
+ if (lock) {
lock_res_and_lock(lock);
if (lock->l_flags & LDLM_FL_GONE_MASK)
goto out;
@@ -1340,7 +1339,7 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
}
out:
- if (lock != NULL) {
+ if (lock) {
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
}
@@ -1354,7 +1353,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
{
void *lvb;
- LASSERT(data != NULL);
+ LASSERT(data);
LASSERT(size >= 0);
switch (lock->l_lvb_type) {
@@ -1368,7 +1367,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_server_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_ost_lvb);
- if (unlikely(lvb == NULL)) {
+ if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
}
@@ -1385,7 +1384,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_server_sized_swab_get(pill,
&RMF_DLM_LVB, size,
lustre_swab_ost_lvb_v1);
- if (unlikely(lvb == NULL)) {
+ if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
}
@@ -1410,7 +1409,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_server_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_lquota_lvb);
- if (unlikely(lvb == NULL)) {
+ if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
}
@@ -1431,7 +1430,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_client_get(pill, &RMF_DLM_LVB);
else
lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
- if (unlikely(lvb == NULL)) {
+ if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
}
@@ -1453,8 +1452,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
*/
struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_type_t type,
- ldlm_mode_t mode,
+ enum ldlm_type type,
+ enum ldlm_mode mode,
const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len,
enum lvb_type lvb_type)
@@ -1463,12 +1462,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
struct ldlm_resource *res;
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (res == NULL)
+ if (!res)
return NULL;
lock = ldlm_lock_new(res);
- if (lock == NULL)
+ if (!lock)
return NULL;
lock->l_req_mode = mode;
@@ -1483,7 +1482,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
lock->l_tree_node = NULL;
/* if this is the extent lock, allocate the interval tree node */
if (type == LDLM_EXTENT) {
- if (ldlm_interval_alloc(lock) == NULL)
+ if (!ldlm_interval_alloc(lock))
goto out;
}
@@ -1514,9 +1513,9 @@ out:
* Does not block. As a result of enqueue the lock would be put
* into granted or waiting list.
*/
-ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
- struct ldlm_lock **lockp,
- void *cookie, __u64 *flags)
+enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
+ struct ldlm_lock **lockp,
+ void *cookie, __u64 *flags)
{
struct ldlm_lock *lock = *lockp;
struct ldlm_resource *res = lock->l_resource;
@@ -1527,7 +1526,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
if (lock->l_req_mode == lock->l_granted_mode) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
- * need to do anything else. */
+ * need to do anything else.
+ */
*flags &= ~(LDLM_FL_BLOCK_GRANTED |
LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
goto out;
@@ -1540,7 +1540,8 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
LBUG();
/* Some flags from the enqueue want to make it into the AST, via the
- * lock's l_flags. */
+ * lock's l_flags.
+ */
lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
/*
@@ -1621,19 +1622,21 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
* This can't happen with the blocking_ast, however, because we
* will never call the local blocking_ast until we drop our
* reader/writer reference, which we won't do until we get the
- * reply and finish enqueueing. */
+ * reply and finish enqueueing.
+ */
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
/* save l_completion_ast since it can be changed by
- * mds_intent_policy(), see bug 14225 */
+ * mds_intent_policy(), see bug 14225
+ */
completion_callback = lock->l_completion_ast;
lock->l_flags &= ~LDLM_FL_CP_REQD;
unlock_res_and_lock(lock);
- if (completion_callback != NULL)
+ if (completion_callback)
rc = completion_callback(lock, 0, (void *)arg);
LDLM_LOCK_RELEASE(lock);
@@ -1749,10 +1752,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
/* We create a ptlrpc request set with flow control extension.
* This request set will use the work_ast_lock function to produce new
* requests and will send a new request each time one completes in order
- * to keep the number of requests in flight to ns_max_parallel_ast */
+ * to keep the number of requests in flight to ns_max_parallel_ast
+ */
arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
work_ast_lock, arg);
- if (arg->set == NULL) {
+ if (!arg->set) {
rc = -ENOMEM;
goto out;
}
@@ -1815,7 +1819,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
ns = ldlm_res_to_ns(res);
/* Please do not, no matter how tempting, remove this LBUG without
- * talking to me first. -phik */
+ * talking to me first. -phik
+ */
if (lock->l_readers || lock->l_writers) {
LDLM_ERROR(lock, "lock still has references");
LBUG();
@@ -1831,7 +1836,8 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
ldlm_pool_del(&ns->ns_pool, lock);
/* Make sure we will not be called again for same lock what is possible
- * if not to zero out lock->l_granted_mode */
+ * if not to zero out lock->l_granted_mode
+ */
lock->l_granted_mode = LCK_MINMODE;
unlock_res_and_lock(lock);
}
@@ -1846,7 +1852,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
int rc = -EINVAL;
if (lock) {
- if (lock->l_ast_data == NULL)
+ if (!lock->l_ast_data)
lock->l_ast_data = data;
if (lock->l_ast_data == data)
rc = 0;
@@ -1874,7 +1880,7 @@ void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
return;
lock = ldlm_handle2lock(lockh);
- if (lock == NULL)
+ if (!lock)
return;
LDLM_DEBUG_LIMIT(level, lock, "###");
@@ -1900,13 +1906,13 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
if (exp && exp->exp_connection) {
nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
- } else if (exp && exp->exp_obd != NULL) {
+ } else if (exp && exp->exp_obd) {
struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
}
- if (resource == NULL) {
+ if (!resource) {
libcfs_debug_vmsg2(msgdata, fmt, args,
" ns: \?\? lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: \?\? rrc=\?\? type: \?\?\? flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lu lvb_type: %d\n",
lock,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 79aeb2bf6c8e..ebe9042adb25 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -107,7 +107,7 @@ struct ldlm_bl_work_item {
struct list_head blwi_head;
int blwi_count;
struct completion blwi_comp;
- ldlm_cancel_flags_t blwi_flags;
+ enum ldlm_cancel_flags blwi_flags;
int blwi_mem_pressure;
};
@@ -136,7 +136,7 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
CDEBUG(D_DLMTRACE,
"Lock %p already unused, calling callback (%p)\n", lock,
lock->l_blocking_ast);
- if (lock->l_blocking_ast != NULL)
+ if (lock->l_blocking_ast)
lock->l_blocking_ast(lock, ld, lock->l_ast_data,
LDLM_CB_BLOCKING);
} else {
@@ -185,7 +185,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
} else if (lvb_len > 0) {
if (lock->l_lvb_len > 0) {
/* for extent lock, lvb contains ost_lvb{}. */
- LASSERT(lock->l_lvb_data != NULL);
+ LASSERT(lock->l_lvb_data);
if (unlikely(lock->l_lvb_len < lvb_len)) {
LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
@@ -194,7 +194,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
goto out;
}
} else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
- * variable length */
+ * variable length
+ */
void *lvb_data;
lvb_data = kzalloc(lvb_len, GFP_NOFS);
@@ -205,7 +206,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
- LASSERT(lock->l_lvb_data == NULL);
+ LASSERT(!lock->l_lvb_data);
lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lvb_data;
lock->l_lvb_len = lvb_len;
@@ -224,7 +225,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
/* If we receive the completion AST before the actual enqueue returned,
- * then we might need to switch lock modes, resources, or extents. */
+ * then we might need to switch lock modes, resources, or extents.
+ */
if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
LDLM_DEBUG(lock, "completion AST, new lock mode");
@@ -256,7 +258,8 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
/* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
+ * Let ldlm_cancel_lru() be fast.
+ */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST");
@@ -276,8 +279,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
- /* Let Enqueue to call osc_lock_upcall() and initialize
- * l_ast_data */
+ /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
@@ -312,10 +314,10 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
LDLM_DEBUG(lock, "client glimpse AST callback handler");
- if (lock->l_glimpse_ast != NULL)
+ if (lock->l_glimpse_ast)
rc = lock->l_glimpse_ast(lock, req);
- if (req->rq_repmsg != NULL) {
+ if (req->rq_repmsg) {
ptlrpc_reply(req);
} else {
req->rq_status = rc;
@@ -353,7 +355,7 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
}
static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
@@ -371,7 +373,8 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
wake_up(&blp->blp_waitq);
/* can not check blwi->blwi_flags as blwi could be already freed in
- LCF_ASYNC mode */
+ * LCF_ASYNC mode
+ */
if (!(cancel_flags & LCF_ASYNC))
wait_for_completion(&blwi->blwi_comp);
@@ -383,7 +386,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi,
struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
struct ldlm_lock *lock,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
init_completion(&blwi->blwi_comp);
INIT_LIST_HEAD(&blwi->blwi_head);
@@ -393,7 +396,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi,
blwi->blwi_ns = ns;
blwi->blwi_flags = cancel_flags;
- if (ld != NULL)
+ if (ld)
blwi->blwi_ld = *ld;
if (count) {
list_add(&blwi->blwi_head, cancels);
@@ -417,7 +420,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld,
struct ldlm_lock *lock,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
if (cancels && count == 0)
return 0;
@@ -451,7 +454,7 @@ int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
struct list_head *cancels, int count,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
}
@@ -470,14 +473,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
- if (key == NULL) {
+ if (!key) {
DEBUG_REQ(D_IOCTL, req, "no set_info key");
return -EFAULT;
}
keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT);
val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
- if (val == NULL) {
+ if (!val) {
DEBUG_REQ(D_IOCTL, req, "no set_info val");
return -EFAULT;
}
@@ -519,7 +522,7 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
- if (oqctl == NULL) {
+ if (!oqctl) {
CERROR("Can't unpack obd_quotactl\n");
return -EPROTO;
}
@@ -541,7 +544,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
/* Requests arrive in sender's byte order. The ptlrpc service
* handler has already checked and, if necessary, byte-swapped the
* incoming request message body, but I am responsible for the
- * message buffers. */
+ * message buffers.
+ */
/* do nothing for sec context finalize */
if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
@@ -549,15 +553,14 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
req_capsule_init(&req->rq_pill, req, RCL_SERVER);
- if (req->rq_export == NULL) {
+ if (!req->rq_export) {
rc = ldlm_callback_reply(req, -ENOTCONN);
ldlm_callback_errmsg(req, "Operate on unconnected server",
rc, NULL);
return 0;
}
- LASSERT(req->rq_export != NULL);
- LASSERT(req->rq_export->exp_obd != NULL);
+ LASSERT(req->rq_export->exp_obd);
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
@@ -591,12 +594,12 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
}
ns = req->rq_export->exp_obd->obd_namespace;
- LASSERT(ns != NULL);
+ LASSERT(ns);
req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- if (dlm_req == NULL) {
+ if (!dlm_req) {
rc = ldlm_callback_reply(req, -EPROTO);
ldlm_callback_errmsg(req, "Operate without parameter", rc,
NULL);
@@ -604,7 +607,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
}
/* Force a known safe race, send a cancel to the server for a lock
- * which the server has already started a blocking callback on. */
+ * which the server has already started a blocking callback on.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
@@ -634,7 +638,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
- * should send cancel after dropping the cache. */
+ * should send cancel after dropping the cache.
+ */
if (((lock->l_flags & LDLM_FL_CANCELING) &&
(lock->l_flags & LDLM_FL_BL_DONE)) ||
(lock->l_flags & LDLM_FL_FAILED)) {
@@ -648,7 +653,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
/* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
+ * Let ldlm_cancel_lru() be fast.
+ */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_BL_AST;
}
@@ -661,7 +667,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* But we'd also like to be able to indicate in the reply that we're
* cancelling right now, because it's unused, or have an intent result
* in the reply, so we might have to push the responsibility for sending
- * the reply down into the AST handlers, alas. */
+ * the reply down into the AST handlers, alas.
+ */
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
@@ -781,17 +788,17 @@ static int ldlm_bl_thread_main(void *arg)
blwi = ldlm_bl_get_work(blp);
- if (blwi == NULL) {
+ if (!blwi) {
atomic_dec(&blp->blp_busy_threads);
l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)) != NULL,
+ (blwi = ldlm_bl_get_work(blp)),
&lwi);
busy = atomic_inc_return(&blp->blp_busy_threads);
} else {
busy = atomic_read(&blp->blp_busy_threads);
}
- if (blwi->blwi_ns == NULL)
+ if (!blwi->blwi_ns)
/* added by ldlm_cleanup() */
break;
@@ -810,7 +817,8 @@ static int ldlm_bl_thread_main(void *arg)
/* The special case when we cancel locks in LRU
* asynchronously, we pass the list of locks here.
* Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet. */
+ * canceled locally yet.
+ */
count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
blwi->blwi_count,
LCF_BL_AST);
@@ -915,7 +923,7 @@ static int ldlm_setup(void)
int rc = 0;
int i;
- if (ldlm_state != NULL)
+ if (ldlm_state)
return -EALREADY;
ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
@@ -1040,7 +1048,7 @@ static int ldlm_cleanup(void)
ldlm_pools_fini();
- if (ldlm_state->ldlm_bl_pool != NULL) {
+ if (ldlm_state->ldlm_bl_pool) {
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
while (atomic_read(&blp->blp_num_threads) > 0) {
@@ -1059,7 +1067,7 @@ static int ldlm_cleanup(void)
kfree(blp);
}
- if (ldlm_state->ldlm_cb_service != NULL)
+ if (ldlm_state->ldlm_cb_service)
ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
if (ldlm_ns_kset)
@@ -1085,13 +1093,13 @@ int ldlm_init(void)
ldlm_resource_slab = kmem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
SLAB_HWCACHE_ALIGN, NULL);
- if (ldlm_resource_slab == NULL)
+ if (!ldlm_resource_slab)
return -ENOMEM;
ldlm_lock_slab = kmem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0,
SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
- if (ldlm_lock_slab == NULL) {
+ if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
@@ -1099,7 +1107,7 @@ int ldlm_init(void)
ldlm_interval_slab = kmem_cache_create("interval_node",
sizeof(struct ldlm_interval),
0, SLAB_HWCACHE_ALIGN, NULL);
- if (ldlm_interval_slab == NULL) {
+ if (!ldlm_interval_slab) {
kmem_cache_destroy(ldlm_resource_slab);
kmem_cache_destroy(ldlm_lock_slab);
return -ENOMEM;
@@ -1117,7 +1125,8 @@ void ldlm_exit(void)
kmem_cache_destroy(ldlm_resource_slab);
/* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
* synchronize_rcu() to wait a grace period elapsed, so that
- * ldlm_lock_free() get a chance to be called. */
+ * ldlm_lock_free() get a chance to be called.
+ */
synchronize_rcu();
kmem_cache_destroy(ldlm_lock_slab);
kmem_cache_destroy(ldlm_interval_slab);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 3d7c137d223a..b913ba9cf97c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -107,7 +107,7 @@
/*
* 50 ldlm locks for 1MB of RAM.
*/
-#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
+#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
/*
* Maximal possible grant step plan in %.
@@ -246,7 +246,6 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
*/
obd = container_of(pl, struct ldlm_namespace,
ns_pool)->ns_obd;
- LASSERT(obd != NULL);
read_lock(&obd->obd_pool_lock);
pl->pl_server_lock_volume = obd->obd_pool_slv;
atomic_set(&pl->pl_limit, obd->obd_pool_limit);
@@ -381,7 +380,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
spin_unlock(&pl->pl_lock);
recalc:
- if (pl->pl_ops->po_recalc != NULL) {
+ if (pl->pl_ops->po_recalc) {
count = pl->pl_ops->po_recalc(pl);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count);
@@ -409,7 +408,7 @@ static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask)
{
int cancel = 0;
- if (pl->pl_ops->po_shrink != NULL) {
+ if (pl->pl_ops->po_shrink) {
cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
if (nr > 0) {
lprocfs_counter_add(pl->pl_stats,
@@ -643,11 +642,11 @@ static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl)
static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
{
- if (pl->pl_stats != NULL) {
+ if (pl->pl_stats) {
lprocfs_free_stats(&pl->pl_stats);
pl->pl_stats = NULL;
}
- if (pl->pl_debugfs_entry != NULL) {
+ if (pl->pl_debugfs_entry) {
ldebugfs_remove(&pl->pl_debugfs_entry);
pl->pl_debugfs_entry = NULL;
}
@@ -834,7 +833,7 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
continue;
}
- if (ns_old == NULL)
+ if (!ns_old)
ns_old = ns;
ldlm_namespace_get(ns);
@@ -957,7 +956,7 @@ static int ldlm_pools_recalc(ldlm_side_t client)
continue;
}
- if (ns_old == NULL)
+ if (!ns_old)
ns_old = ns;
spin_lock(&ns->ns_lock);
@@ -1040,7 +1039,7 @@ static int ldlm_pools_thread_start(void)
struct l_wait_info lwi = { 0 };
struct task_struct *task;
- if (ldlm_pools_thread != NULL)
+ if (ldlm_pools_thread)
return -EALREADY;
ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
@@ -1065,7 +1064,7 @@ static int ldlm_pools_thread_start(void)
static void ldlm_pools_thread_stop(void)
{
- if (ldlm_pools_thread == NULL)
+ if (!ldlm_pools_thread)
return;
thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index b9eb37762434..74e193e52cd6 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -94,7 +94,7 @@ static int ldlm_expired_completion_wait(void *data)
struct obd_import *imp;
struct obd_device *obd;
- if (lock->l_conn_export == NULL) {
+ if (!lock->l_conn_export) {
static unsigned long next_dump, last_dump;
LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
@@ -128,7 +128,8 @@ static int ldlm_expired_completion_wait(void *data)
}
/* We use the same basis for both server side and client side functions
- from a single node. */
+ * from a single node.
+ */
static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
int timeout = at_get(ldlm_lock_to_ns_at(lock));
@@ -136,8 +137,9 @@ static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
- It would be nice to have some kind of "early reply" mechanism for
- lock callbacks too... */
+ * It would be nice to have some kind of "early reply" mechanism for
+ * lock callbacks too...
+ */
timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
return max(timeout, ldlm_enqueue_min);
}
@@ -239,12 +241,13 @@ noreproc:
obd = class_exp2obd(lock->l_conn_export);
/* if this is a local lock, then there is no import */
- if (obd != NULL)
+ if (obd)
imp = obd->u.cli.cl_import;
/* Wait a long time for enqueue - server may have to callback a
- lock from another client. Server will evict the other client if it
- doesn't respond reasonably, and then give us the lock. */
+ * lock from another client. Server will evict the other client if it
+ * doesn't respond reasonably, and then give us the lock.
+ */
timeout = ldlm_get_enq_timeout(lock) * 2;
lwd.lwd_lock = lock;
@@ -258,7 +261,7 @@ noreproc:
interrupted_completion_wait, &lwd);
}
- if (imp != NULL) {
+ if (imp) {
spin_lock(&imp->imp_lock);
lwd.lwd_conn_cnt = imp->imp_conn_cnt;
spin_unlock(&imp->imp_lock);
@@ -296,7 +299,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
!(lock->l_flags & LDLM_FL_FAILED)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
+ * bug 17645
+ */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
need_cancel = 1;
@@ -312,11 +316,13 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
ldlm_lock_decref_internal(lock, mode);
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
- * from llite/file.c/ll_file_flock(). */
+ * from llite/file.c/ll_file_flock().
+ */
/* This code makes for the fact that we do not have blocking handler on
* a client for flock locks. As such this is the place where we must
* completely kill failed locks. (interrupted and those that
- * were waiting to be granted when server evicted us. */
+ * were waiting to be granted when server evicted us.
+ */
if (lock->l_resource->lr_type == LDLM_FLOCK) {
lock_res_and_lock(lock);
ldlm_resource_unlink_lock(lock);
@@ -331,7 +337,8 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
* Called after receiving reply from server.
*/
int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
- ldlm_type_t type, __u8 with_policy, ldlm_mode_t mode,
+ enum ldlm_type type, __u8 with_policy,
+ enum ldlm_mode mode,
__u64 *flags, void *lvb, __u32 lvb_len,
struct lustre_handle *lockh, int rc)
{
@@ -363,13 +370,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* Before we return, swab the reply */
reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (reply == NULL) {
+ if (!reply) {
rc = -EPROTO;
goto cleanup;
}
if (lvb_len != 0) {
- LASSERT(lvb != NULL);
+ LASSERT(lvb);
size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER);
@@ -401,7 +408,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* Key change rehash lock in per-export hash with new key */
if (exp->exp_lock_hash) {
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_rehash_key(exp->exp_lock_hash,
&lock->l_remote_handle,
@@ -415,7 +423,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_INHERIT_FLAGS);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well */
+ * to wait with no timeout as well
+ */
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_FL_NO_TIMEOUT);
unlock_res_and_lock(lock);
@@ -425,7 +434,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* If enqueue returned a blocked lock but the completion handler has
* already run, then it fixed up the resource and we don't need to do it
- * again. */
+ * again.
+ */
if ((*flags) & LDLM_FL_LOCK_CHANGED) {
int newmode = reply->lock_desc.l_req_mode;
@@ -445,7 +455,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = ldlm_lock_change_resource(ns, lock,
&reply->lock_desc.l_resource.lr_name);
- if (rc || lock->l_resource == NULL) {
+ if (rc || !lock->l_resource) {
rc = -ENOMEM;
goto cleanup;
}
@@ -467,7 +477,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
if ((*flags) & LDLM_FL_AST_SENT ||
/* Cancel extent locks as soon as possible on a liblustre client,
* because it cannot handle asynchronous ASTs robustly (see
- * bug 7311). */
+ * bug 7311).
+ */
(LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
@@ -476,12 +487,14 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
/* If the lock has already been granted by a completion AST, don't
- * clobber the LVB with an older one. */
+ * clobber the LVB with an older one.
+ */
if (lvb_len != 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
- * Cannot unlock after the check either, a that still leaves
- * a tiny window for completion to get in */
+ * Cannot unlock after the check either, as that still leaves
+ * a tiny window for completion to get in
+ */
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
@@ -495,7 +508,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
if (!is_replay) {
rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
- if (lock->l_completion_ast != NULL) {
+ if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock, *flags, NULL);
if (!rc)
@@ -505,9 +518,10 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
}
- if (lvb_len && lvb != NULL) {
+ if (lvb_len && lvb) {
/* Copy the LVB here, and not earlier, because the completion
- * AST (if any) can override what we got in the reply */
+ * AST (if any) can override what we got in the reply
+ */
memcpy(lvb, lock->l_lvb_data, lvb_len);
}
@@ -532,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
{
int avail;
- avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
+ avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else
@@ -579,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
LIST_HEAD(head);
int rc;
- if (cancels == NULL)
+ if (!cancels)
cancels = &head;
if (ns_connect_cancelset(ns)) {
/* Estimate the amount of available space in the request. */
@@ -593,7 +607,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
/* Cancel LRU locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
- * RPC, which will make us slower. */
+ * RPC, which will make us slower.
+ */
if (avail > count)
count += ldlm_cancel_lru_local(ns, cancels, to_free,
avail - count, 0, flags);
@@ -618,7 +633,8 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
/* Skip first lock handler in ldlm_request_pack(),
* this method will increment @lock_count according
* to the lock handle amount actually written to
- * the buffer. */
+ * the buffer.
+ */
dlm->lock_count = canceloff;
}
/* Pack into the request @pack lock handles. */
@@ -665,15 +681,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
int rc, err;
struct ptlrpc_request *req;
- LASSERT(exp != NULL);
-
ns = exp->exp_obd->obd_namespace;
/* If we're replaying this lock, just check some invariants.
- * If we're creating a new lock, get everything all setup nice. */
+ * If we're creating a new lock, get everything all setup nicely.
+ */
if (is_replay) {
lock = ldlm_handle2lock_long(lockh, 0);
- LASSERT(lock != NULL);
+ LASSERT(lock);
LDLM_DEBUG(lock, "client-side enqueue START");
LASSERT(exp == lock->l_conn_export);
} else {
@@ -685,16 +700,21 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (lock == NULL)
+ if (!lock)
return -ENOMEM;
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
- if (policy != NULL)
- lock->l_policy_data = *policy;
+ if (policy)
+ lock->l_policy_data = *policy;
+
+ if (einfo->ei_type == LDLM_EXTENT) {
+ /* extent lock without policy is a bug */
+ if (!policy)
+ LBUG();
- if (einfo->ei_type == LDLM_EXTENT)
lock->l_req_extent = policy->l_extent;
+ }
LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
*flags);
}
@@ -706,12 +726,12 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
/* lock not sent to server yet */
- if (reqp == NULL || *reqp == NULL) {
+ if (!reqp || !*reqp) {
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE,
LUSTRE_DLM_VERSION,
LDLM_ENQUEUE);
- if (req == NULL) {
+ if (!req) {
failed_lock_cleanup(ns, lock, einfo->ei_mode);
LDLM_LOCK_RELEASE(lock);
return -ENOMEM;
@@ -754,7 +774,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
policy->l_extent.end == OBD_OBJECT_EOF));
if (async) {
- LASSERT(reqp != NULL);
+ LASSERT(reqp);
return 0;
}
@@ -767,13 +787,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
- * one reference that we took */
+ * one reference that we took
+ */
if (err == -ENOLCK)
LDLM_LOCK_RELEASE(lock);
else
rc = err;
- if (!req_passed_in && req != NULL) {
+ if (!req_passed_in && req) {
ptlrpc_req_finished(req);
if (reqp)
*reqp = NULL;
@@ -832,7 +853,7 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
int max, packed = 0;
dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
- LASSERT(dlm != NULL);
+ LASSERT(dlm);
/* Check the room in the request buffer. */
max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
@@ -843,7 +864,8 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
/* XXX: it would be better to pack lock handles grouped by resource.
* so that the server cancel would call filter_lvbo_update() less
- * frequently. */
+ * frequently.
+ */
list_for_each_entry(lock, head, l_bl_ast) {
if (!count--)
break;
@@ -858,17 +880,18 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
/**
* Prepare and send a batched cancel RPC. It will include \a count lock
- * handles of locks given in \a cancels list. */
+ * handles of locks given in \a cancels list.
+ */
static int ldlm_cli_cancel_req(struct obd_export *exp,
struct list_head *cancels,
- int count, ldlm_cancel_flags_t flags)
+ int count, enum ldlm_cancel_flags flags)
{
struct ptlrpc_request *req = NULL;
struct obd_import *imp;
int free, sent = 0;
int rc = 0;
- LASSERT(exp != NULL);
+ LASSERT(exp);
LASSERT(count > 0);
CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
@@ -883,14 +906,14 @@ static int ldlm_cli_cancel_req(struct obd_export *exp,
while (1) {
imp = class_exp2cliimp(exp);
- if (imp == NULL || imp->imp_invalid) {
+ if (!imp || imp->imp_invalid) {
CDEBUG(D_DLMTRACE,
"skipping cancel on invalid import %p\n", imp);
return count;
}
req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -946,7 +969,6 @@ out:
static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
{
- LASSERT(imp != NULL);
return &imp->imp_obd->obd_namespace->ns_pool;
}
@@ -971,7 +993,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
* is the case when server does not support LRU resize feature.
* This is also possible in some recovery cases when server-side
* reqs have no reference to the OBD export and thus access to
- * server-side namespace is not possible. */
+ * server-side namespace is not possible.
+ */
if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
DEBUG_REQ(D_HA, req,
@@ -989,7 +1012,8 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
* to the pool thread. We do not access obd_namespace and pool
* directly here as there is no reliable way to make sure that
* they are still alive at cleanup time. Evil races are possible
- * which may cause Oops at that time. */
+ * which may cause Oops at that time.
+ */
write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = new_slv;
obd->obd_pool_limit = new_limit;
@@ -1005,7 +1029,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool);
* Lock must not have any readers or writers by this time.
*/
int ldlm_cli_cancel(struct lustre_handle *lockh,
- ldlm_cancel_flags_t cancel_flags)
+ enum ldlm_cancel_flags cancel_flags)
{
struct obd_export *exp;
int avail, flags, count = 1;
@@ -1016,8 +1040,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
/* concurrent cancels on the same handle can happen */
lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
- if (lock == NULL) {
- LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
+ if (!lock) {
+ LDLM_DEBUG_NOLOCK("lock is already being destroyed");
return 0;
}
@@ -1028,7 +1052,8 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* RPC which goes to canceld portal, so we can cancel other LRU locks
- * here and send them all as one LDLM_CANCEL RPC. */
+ * here and send them all as one LDLM_CANCEL RPC.
+ */
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, &cancels);
@@ -1055,7 +1080,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel);
* Return the number of cancelled locks.
*/
int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
- ldlm_cancel_flags_t flags)
+ enum ldlm_cancel_flags flags)
{
LIST_HEAD(head);
struct ldlm_lock *lock, *next;
@@ -1076,7 +1101,8 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
/* Until we have compound requests and can send LDLM_CANCEL
* requests batched with generic RPCs, we need to send cancels
* with the LDLM_FL_BL_AST flag in a separate RPC from
- * the one being generated now. */
+ * the one being generated now.
+ */
if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
list_del_init(&lock->l_bl_ast);
@@ -1116,7 +1142,8 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* don't check added & count since we want to process all locks
- * from unused list */
+ * from unused list
+ */
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
case LDLM_IBITS:
@@ -1152,7 +1179,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
unsigned long la;
/* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
if (count && added >= count)
return LDLM_POLICY_KEEP_LOCK;
@@ -1166,7 +1194,8 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
ldlm_pool_set_clv(pl, lv);
/* Stop when SLV is not yet come from server or lv is smaller than
- * it is. */
+ * it is.
+ */
return (slv == 0 || lv < slv) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
@@ -1186,7 +1215,8 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
int count)
{
/* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
@@ -1227,7 +1257,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
int count)
{
/* Stop LRU processing when we reach past count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
@@ -1307,7 +1338,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
count += unused - ns->ns_max_unused;
pf = ldlm_cancel_lru_policy(ns, flags);
- LASSERT(pf != NULL);
+ LASSERT(pf);
while (!list_empty(&ns->ns_unused_list)) {
ldlm_policy_res_t result;
@@ -1331,7 +1362,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
continue;
/* Somebody is already doing CANCEL. No need for this
- * lock in LRU, do not traverse it again. */
+ * lock in LRU, do not traverse it again.
+ */
if (!(lock->l_flags & LDLM_FL_CANCELING))
break;
@@ -1380,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused. */
+ * by itself, or the lock is no longer unused.
+ */
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
__func__, current);
@@ -1394,7 +1427,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* better send cancel notification to server, so that it
* frees appropriate state. This might lead to a race
* where while we are doing cancel here, server is also
- * silently cancelling this lock. */
+ * silently cancelling this lock.
+ */
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
/* Setting the CBPENDING flag is a little misleading,
@@ -1402,7 +1436,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* CBPENDING is set, the lock can accumulate no more
* readers/writers. Since readers and writers are
* already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
+ * this flag and call l_blocking_ast
+ */
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
/* We can't re-add to l_lru as it confuses the
@@ -1410,7 +1445,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* arrives after we drop lr_lock below. We use l_bl_ast
* and can't use l_pending_chain as it is used both on
* server and client nevertheless bug 5666 says it is
- * used only on server */
+ * used only on server
+ */
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
@@ -1425,7 +1461,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
- ldlm_cancel_flags_t cancel_flags, int flags)
+ enum ldlm_cancel_flags cancel_flags, int flags)
{
int added;
@@ -1444,14 +1480,15 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
* callback will be performed in this function.
*/
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
- ldlm_cancel_flags_t cancel_flags,
+ enum ldlm_cancel_flags cancel_flags,
int flags)
{
LIST_HEAD(cancels);
int count, rc;
/* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
+ * Locks are cancelled later in a separate thread.
+ */
count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
@@ -1468,15 +1505,16 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
int ldlm_cancel_resource_local(struct ldlm_resource *res,
struct list_head *cancels,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode, __u64 lock_flags,
- ldlm_cancel_flags_t cancel_flags, void *opaque)
+ enum ldlm_mode mode, __u64 lock_flags,
+ enum ldlm_cancel_flags cancel_flags,
+ void *opaque)
{
struct ldlm_lock *lock;
int count = 0;
lock_res(res);
list_for_each_entry(lock, &res->lr_granted, l_res_link) {
- if (opaque != NULL && lock->l_ast_data != opaque) {
+ if (opaque && lock->l_ast_data != opaque) {
LDLM_ERROR(lock, "data %p doesn't match opaque %p",
lock->l_ast_data, opaque);
continue;
@@ -1486,7 +1524,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
continue;
/* If somebody is already doing CANCEL, or blocking AST came,
- * skip this lock. */
+ * skip this lock.
+ */
if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
@@ -1495,7 +1534,8 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
continue;
/* If policy is given and this is IBITS lock, add to list only
- * those locks that match by policy. */
+ * those locks that match by policy.
+ */
if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
!(lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits))
@@ -1527,7 +1567,8 @@ EXPORT_SYMBOL(ldlm_cancel_resource_local);
* Destroy \a cancels at the end.
*/
int ldlm_cli_cancel_list(struct list_head *cancels, int count,
- struct ptlrpc_request *req, ldlm_cancel_flags_t flags)
+ struct ptlrpc_request *req,
+ enum ldlm_cancel_flags flags)
{
struct ldlm_lock *lock;
int res = 0;
@@ -1539,7 +1580,8 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
* Usually it is enough to have just 1 RPC, but it is possible that
* there are too many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
- * the feature. */
+ * the feature.
+ */
while (count > 0) {
LASSERT(!list_empty(cancels));
lock = list_entry(cancels->next, struct ldlm_lock,
@@ -1577,12 +1619,13 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list);
* Cancel all locks on a resource that have 0 readers/writers.
*
* If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
- * to notify the server. */
+ * to notify the server.
+ */
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags,
void *opaque)
{
struct ldlm_resource *res;
@@ -1591,7 +1634,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
int rc;
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL) {
+ if (!res) {
/* This is not a problem. */
CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
return 0;
@@ -1638,17 +1681,17 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
* to notify the server. */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
- ldlm_cancel_flags_t flags, void *opaque)
+ enum ldlm_cancel_flags flags, void *opaque)
{
struct ldlm_cli_cancel_arg arg = {
.lc_flags = flags,
.lc_opaque = opaque,
};
- if (ns == NULL)
+ if (!ns)
return ELDLM_OK;
- if (res_id != NULL) {
+ if (res_id) {
return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
LCK_MINMODE, flags,
opaque);
@@ -1743,13 +1786,13 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns,
struct ldlm_resource *res;
int rc;
- if (ns == NULL) {
+ if (!ns) {
CERROR("must pass in namespace\n");
LBUG();
}
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL)
+ if (!res)
return 0;
LDLM_RESOURCE_ADDREF(res);
@@ -1796,7 +1839,7 @@ static int replay_lock_interpret(const struct lu_env *env,
goto out;
reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if (reply == NULL) {
+ if (!reply) {
rc = -EPROTO;
goto out;
}
@@ -1815,7 +1858,8 @@ static int replay_lock_interpret(const struct lu_env *env,
exp = req->rq_export;
if (exp && exp->exp_lock_hash) {
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_rehash_key(exp->exp_lock_hash,
&lock->l_remote_handle,
@@ -1850,7 +1894,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
- * lost by network. (and server granted conflicting lock already) */
+ * lost by network. (and server granted conflicting lock already)
+ */
if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
@@ -1882,7 +1927,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
/* We're part of recovery, so don't wait for it. */
@@ -1901,7 +1946,8 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
/* notify the server we've replayed all requests.
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
- * bug 6063 */
+ * bug 6063
+ */
lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
LDLM_DEBUG(lock, "replaying lock:");
@@ -1936,7 +1982,8 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
/* We don't need to care whether or not LRU resize is enabled
* because the LDLM_CANCEL_NO_WAIT policy doesn't use the
- * count parameter */
+ * count parameter
+ */
canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 0ae610015b7c..9dede87ad0a3 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -56,7 +56,8 @@ LIST_HEAD(ldlm_srv_namespace_list);
struct mutex ldlm_cli_namespace_lock;
/* Client Namespaces that have active resources in them.
* Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list */
+ * inactive list
+ */
LIST_HEAD(ldlm_cli_active_namespace_list);
/* Client namespaces that don't have any locks in them */
static LIST_HEAD(ldlm_cli_inactive_namespace_list);
@@ -66,7 +67,8 @@ static struct dentry *ldlm_ns_debugfs_dir;
struct dentry *ldlm_svc_debugfs_dir;
/* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS. */
+ * DDOS.
+ */
static unsigned int ldlm_dump_granted_max = 256;
static ssize_t
@@ -275,7 +277,8 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
/* Make sure that LRU resize was originally supported before
- * turning it on here. */
+ * turning it on here.
+ */
if (lru_resize &&
(ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
CDEBUG(D_DLMTRACE,
@@ -380,7 +383,7 @@ static void ldlm_namespace_debugfs_unregister(struct ldlm_namespace *ns)
else
ldebugfs_remove(&ns->ns_debugfs_entry);
- if (ns->ns_stats != NULL)
+ if (ns->ns_stats)
lprocfs_free_stats(&ns->ns_stats);
}
@@ -400,7 +403,7 @@ static int ldlm_namespace_sysfs_register(struct ldlm_namespace *ns)
"%s", ldlm_ns_name(ns));
ns->ns_stats = lprocfs_alloc_stats(LDLM_NSS_LAST, 0);
- if (ns->ns_stats == NULL) {
+ if (!ns->ns_stats) {
kobject_put(&ns->ns_kobj);
return -ENOMEM;
}
@@ -420,7 +423,7 @@ static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
} else {
ns_entry = debugfs_create_dir(ldlm_ns_name(ns),
ldlm_ns_debugfs_dir);
- if (ns_entry == NULL)
+ if (!ns_entry)
return -ENOMEM;
ns->ns_debugfs_entry = ns_entry;
}
@@ -554,7 +557,7 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
};
struct ldlm_ns_hash_def {
- ldlm_ns_type_t nsd_type;
+ enum ldlm_ns_type nsd_type;
/** hash bucket bits */
unsigned nsd_bkt_bits;
/** hash bits */
@@ -621,8 +624,8 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns,
*/
struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
ldlm_side_t client,
- ldlm_appetite_t apt,
- ldlm_ns_type_t ns_type)
+ enum ldlm_appetite apt,
+ enum ldlm_ns_type ns_type)
{
struct ldlm_namespace *ns = NULL;
struct ldlm_ns_bucket *nsb;
@@ -631,7 +634,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
int idx;
int rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = ldlm_get_ref();
if (rc) {
@@ -664,7 +667,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
CFS_HASH_BIGNAME |
CFS_HASH_SPIN_BKTLOCK |
CFS_HASH_NO_ITEMREF);
- if (ns->ns_rs_hash == NULL)
+ if (!ns->ns_rs_hash)
goto out_ns;
cfs_hash_for_each_bucket(ns->ns_rs_hash, &bd, idx) {
@@ -749,7 +752,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
struct lustre_handle lockh;
/* First, we look for non-cleaned-yet lock
- * all cleaned locks are marked by CLEANED flag. */
+ * all cleaned locks are marked by CLEANED flag.
+ */
lock_res(res);
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
@@ -763,13 +767,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
break;
}
- if (lock == NULL) {
+ if (!lock) {
unlock_res(res);
break;
}
/* Set CBPENDING so nothing in the cancellation path
- * can match this lock. */
+ * can match this lock.
+ */
lock->l_flags |= LDLM_FL_CBPENDING;
lock->l_flags |= LDLM_FL_FAILED;
lock->l_flags |= flags;
@@ -782,7 +787,8 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
/* This is a little bit gross, but much better than the
* alternative: pretend that we got a blocking AST from
* the server, so that when the lock is decref'd, it
- * will go away ... */
+ * will go away ...
+ */
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
if (lock->l_completion_ast)
@@ -837,7 +843,7 @@ static int ldlm_resource_complain(struct cfs_hash *hs, struct cfs_hash_bd *bd,
*/
int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
{
- if (ns == NULL) {
+ if (!ns) {
CDEBUG(D_INFO, "NULL ns, skipping cleanup\n");
return ELDLM_OK;
}
@@ -873,7 +879,8 @@ force_wait:
atomic_read(&ns->ns_bref) == 0, &lwi);
/* Forced cleanups should be able to reclaim all references,
- * so it's safe to wait forever... we can't leak locks... */
+ * so it's safe to wait forever... we can't leak locks...
+ */
if (force && rc == -ETIMEDOUT) {
LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
@@ -943,7 +950,8 @@ static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
LASSERT(!list_empty(&ns->ns_list_chain));
/* Some asserts and possibly other parts of the code are still
* using list_empty(&ns->ns_list_chain). This is why it is
- * important to use list_del_init() here. */
+ * important to use list_del_init() here.
+ */
list_del_init(&ns->ns_list_chain);
ldlm_namespace_nr_dec(client);
mutex_unlock(ldlm_namespace_lock(client));
@@ -963,7 +971,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
ldlm_namespace_unregister(ns, ns->ns_client);
/* Fini pool _before_ parent proc dir is removed. This is important as
* ldlm_pool_fini() removes own proc dir which is child to @dir.
- * Removing it after @dir may cause oops. */
+ * Removing it after @dir may cause oops.
+ */
ldlm_pool_fini(&ns->ns_pool);
ldlm_namespace_debugfs_unregister(ns);
@@ -971,7 +980,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
cfs_hash_putref(ns->ns_rs_hash);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
- * thread. */
+ * thread.
+ */
LASSERT(list_empty(&ns->ns_list_chain));
kfree(ns);
ldlm_put_ref();
@@ -1031,8 +1041,8 @@ static struct ldlm_resource *ldlm_resource_new(void)
struct ldlm_resource *res;
int idx;
- res = kmem_cache_alloc(ldlm_resource_slab, GFP_NOFS | __GFP_ZERO);
- if (res == NULL)
+ res = kmem_cache_zalloc(ldlm_resource_slab, GFP_NOFS);
+ if (!res)
return NULL;
INIT_LIST_HEAD(&res->lr_granted);
@@ -1050,7 +1060,8 @@ static struct ldlm_resource *ldlm_resource_new(void)
lu_ref_init(&res->lr_reference);
/* The creator of the resource must unlock the mutex after LVB
- * initialization. */
+ * initialization.
+ */
mutex_init(&res->lr_lvb_mutex);
mutex_lock(&res->lr_lvb_mutex);
@@ -1065,7 +1076,8 @@ static struct ldlm_resource *ldlm_resource_new(void)
*/
struct ldlm_resource *
ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
- const struct ldlm_res_id *name, ldlm_type_t type, int create)
+ const struct ldlm_res_id *name, enum ldlm_type type,
+ int create)
{
struct hlist_node *hnode;
struct ldlm_resource *res;
@@ -1073,14 +1085,13 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
__u64 version;
int ns_refcount = 0;
- LASSERT(ns != NULL);
- LASSERT(parent == NULL);
- LASSERT(ns->ns_rs_hash != NULL);
+ LASSERT(!parent);
+ LASSERT(ns->ns_rs_hash);
LASSERT(name->name[0] != 0);
cfs_hash_bd_get_and_lock(ns->ns_rs_hash, (void *)name, &bd, 0);
hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
- if (hnode != NULL) {
+ if (hnode) {
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* Synchronize with regard to resource creation. */
@@ -1111,13 +1122,12 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
res->lr_name = *name;
res->lr_type = type;
- res->lr_most_restr = LCK_NL;
cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
- if (hnode != NULL) {
+ if (hnode) {
/* Someone won the race and already added the resource. */
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
/* Clean lu_ref for failed resource. */
@@ -1167,7 +1177,8 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
/* Let's see if we happened to be the very first resource in this
* namespace. If so, and this is a client namespace, we need to move
* the namespace into the active namespaces list to be patrolled by
- * the ldlm_poold. */
+ * the ldlm_poold.
+ */
if (ns_refcount == 1) {
mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 3d6745e63fe3..dd1c827013b9 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -60,9 +60,9 @@ static void ll_release(struct dentry *de)
{
struct ll_dentry_data *lld;
- LASSERT(de != NULL);
+ LASSERT(de);
lld = ll_d2d(de);
- if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
+ if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
return;
if (lld->lld_it) {
@@ -80,7 +80,8 @@ static void ll_release(struct dentry *de)
* This avoids a race where ll_lookup_it() instantiates a dentry, but we get
* an AST before calling d_revalidate_it(). The dentry still exists (marked
* INVALID) so d_lookup() matches it, but we have no lock on it (so
- * lock_match() fails) and we spin around real_lookup(). */
+ * lock_match() fails) and we spin around real_lookup().
+ */
static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str,
const struct qstr *name)
@@ -117,7 +118,8 @@ static inline int return_if_equal(struct ldlm_lock *lock, void *data)
/* find any ldlm lock of the inode in mdc and lov
* return 0 not find
* 1 find one
- * < 0 error */
+ * < 0 error
+ */
static int find_cbdata(struct inode *inode)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -131,7 +133,7 @@ static int find_cbdata(struct inode *inode)
return rc;
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
+ if (!lsm)
return rc;
rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
@@ -163,10 +165,12 @@ static int ll_ddelete(const struct dentry *de)
/* Disable this piece of code temporarily because this is called
* inside dcache_lock so it's not appropriate to do lots of work
* here. ATTENTION: Before this piece of code enabling, LU-2487 must be
- * resolved. */
+ * resolved.
+ */
#if 0
/* if not ldlm lock for this inode, set i_nlink to 0 so that
- * this inode can be recycled later b=20433 */
+ * this inode can be recycled later b=20433
+ */
if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
clear_nlink(d_inode(de));
#endif
@@ -178,19 +182,16 @@ static int ll_ddelete(const struct dentry *de)
int ll_d_init(struct dentry *de)
{
- LASSERT(de != NULL);
-
CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, d_inode(de),
- d_count(de));
+ de, de, de->d_parent, d_inode(de), d_count(de));
- if (de->d_fsdata == NULL) {
+ if (!de->d_fsdata) {
struct ll_dentry_data *lld;
lld = kzalloc(sizeof(*lld), GFP_NOFS);
if (likely(lld)) {
spin_lock(&de->d_lock);
- if (likely(de->d_fsdata == NULL)) {
+ if (likely(!de->d_fsdata)) {
de->d_fsdata = lld;
__d_lustre_invalidate(de);
} else {
@@ -218,7 +219,8 @@ void ll_intent_drop_lock(struct lookup_intent *it)
ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
/* bug 494: intent_release may be called multiple times, from
- * this thread and we don't want to double-decref this lock */
+ * this thread and we don't want to double-decref this lock
+ */
it->d.lustre.it_lock_mode = 0;
if (it->d.lustre.it_remote_lock_mode != 0) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
@@ -251,8 +253,6 @@ void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
- LASSERT(inode != NULL);
-
CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
inode->i_ino, inode->i_generation, inode);
@@ -286,9 +286,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
{
- LASSERT(it != NULL);
-
- if (it->d.lustre.it_lock_mode && inode != NULL) {
+ if (it->d.lustre.it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
@@ -300,7 +298,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
if (it->it_op == IT_LOOKUP || it->it_op == IT_GETATTR) {
/* on 2.6 there are situation when several lookups and
* revalidations may be requested during single operation.
- * therefore, we don't release intent here -bzzz */
+ * therefore, we don't release intent here -bzzz
+ */
ll_intent_drop_lock(it);
}
}
@@ -328,7 +327,7 @@ static int ll_revalidate_dentry(struct dentry *dentry,
if (lookup_flags & LOOKUP_RCU)
return -ECHILD;
- do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL);
+ do_statahead_enter(dir, &dentry, !d_inode(dentry));
ll_statahead_mark(dir, dentry);
return 1;
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 8982f7d1b374..e4c82883e580 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -55,6 +55,7 @@
#include "../include/lustre_lite.h"
#include "../include/lustre_dlm.h"
#include "../include/lustre_fid.h"
+#include "../include/lustre_kernelcomm.h"
#include "llite_internal.h"
/*
@@ -133,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
- * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
- * lu_dirpage for this integrated page will be adjusted. See
- * lmv_adjust_dirpages().
+ * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
+ * for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
@@ -152,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
struct page **page_pool;
struct page *page;
struct lu_dirpage *dp;
- int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
+ int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
int nrdpgs = 0; /* number of pages read actually */
int npages;
int i;
@@ -189,13 +189,11 @@ static int ll_dir_filler(void *_hash, struct page *page0)
} else if (rc == 0) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
- LASSERT(body != NULL);
-
if (body->valid & OBD_MD_FLSIZE)
cl_isize_write(inode, body->size);
- nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
- >> PAGE_CACHE_SHIFT;
+ nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
+ >> PAGE_SHIFT;
SetPageUptodate(page0);
}
unlock_page(page0);
@@ -210,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page = page_pool[i];
if (rc < 0 || i >= nrdpgs) {
- page_cache_release(page);
+ put_page(page);
continue;
}
@@ -231,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
offset, ret);
}
- page_cache_release(page);
+ put_page(page);
}
if (page_pool != &page0)
@@ -244,11 +242,11 @@ void ll_release_page(struct page *page, int remove)
kunmap(page);
if (remove) {
lock_page(page);
- if (likely(page->mapping != NULL))
+ if (likely(page->mapping))
truncate_complete_page(page->mapping, page);
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
/*
@@ -274,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
if (found > 0 && !radix_tree_exceptional_entry(page)) {
struct lu_dirpage *dp;
- page_cache_get(page);
+ get_page(page);
spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
@@ -314,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
page = NULL;
}
} else {
- page_cache_release(page);
+ put_page(page);
page = ERR_PTR(-EIO);
}
@@ -333,7 +331,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
struct lustre_handle lockh;
struct lu_dirpage *dp;
struct page *page;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
int rc;
__u64 start = 0;
__u64 end = 0;
@@ -356,7 +354,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
struct md_op_data *op_data;
op_data = ll_prep_md_op_data(NULL, dir, dir, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
+ LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return (void *)op_data;
@@ -369,8 +367,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
if (request)
ptlrpc_req_finished(request);
if (rc < 0) {
- CERROR("lock enqueue: "DFID" at %llu: rc %d\n",
- PFID(ll_inode2fid(dir)), hash, rc);
+ CERROR("lock enqueue: " DFID " at %llu: rc %d\n",
+ PFID(ll_inode2fid(dir)), hash, rc);
return ERR_PTR(rc);
}
@@ -380,7 +378,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
&it.d.lustre.it_lock_handle, dir, NULL);
} else {
/* for cross-ref object, l_ast_data of the lock may not be set,
- * we reset it here */
+ * we reset it here
+ */
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp, &lockh.cookie,
dir, NULL);
}
@@ -392,7 +391,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
CERROR("dir page locate: "DFID" at %llu: rc %ld\n",
PFID(ll_inode2fid(dir)), lhash, PTR_ERR(page));
goto out_unlock;
- } else if (page != NULL) {
+ } else if (page) {
/*
* XXX nikita: not entirely correct handling of a corner case:
* suppose hash chain of entries with hash value HASH crosses
@@ -498,7 +497,7 @@ int ll_dir_read(struct inode *inode, struct dir_context *ctx)
__u64 next;
dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL && !done;
+ for (ent = lu_dirent_start(dp); ent && !done;
ent = lu_dirent_next(ent)) {
__u16 type;
int namelen;
@@ -688,7 +687,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
struct obd_device *mgc = lsi->lsi_mgc;
int lum_size;
- if (lump != NULL) {
+ if (lump) {
/*
* This is coming from userspace, so should be in
* local endian. But the MDS would like it in little
@@ -724,7 +723,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
+ if (lump && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
op_data->op_cli_flags |= CLI_SET_MEA;
/* swabbing is done in lov_setstripe() on server side */
@@ -738,8 +737,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
}
/* In the following we use the fact that LOV_USER_MAGIC_V1 and
- LOV_USER_MAGIC_V3 have the same initial fields so we do not
- need to make the distinction between the 2 versions */
+ * LOV_USER_MAGIC_V3 have the same initial fields so we do not
+ * need to make the distinction between the 2 versions
+ */
if (set_default && mgc->u.cli.cl_mgc_mgsexp) {
char *param = NULL;
char *buf;
@@ -811,7 +811,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
lmmsize = body->eadatasize;
@@ -823,7 +822,6 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
lmm = req_capsule_server_sized_get(&req->rq_pill,
&RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
/*
* This is coming from the MDS, so is probably in
@@ -879,7 +877,7 @@ int ll_get_mdt_idx(struct inode *inode)
/**
* Generic handler to do any pre-copy work.
*
- * It send a first hsm_progress (with extent length == 0) to coordinator as a
+ * It sends a first hsm_progress (with extent length == 0) to coordinator as a
* first information for it that real work has started.
*
* Moreover, for a ARCHIVE request, it will sample the file data version and
@@ -931,8 +929,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
goto progress;
}
- /* Store it the hsm_copy for later copytool use.
- * Always modified even if no lsm. */
+ /* Store in the hsm_copy for later copytool use.
+ * Always modified even if no lsm.
+ */
copy->hc_data_version = data_version;
}
@@ -1008,12 +1007,14 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
goto progress;
}
- /* Store it the hsm_copy for later copytool use.
- * Always modified even if no lsm. */
+ /* Store in the hsm_copy for later copytool use.
+ * Always modified even if no lsm.
+ */
hpk.hpk_data_version = data_version;
/* File could have been stripped during archiving, so we need
- * to check anyway. */
+ * to check anyway.
+ */
if ((copy->hc_hai.hai_action == HSMA_ARCHIVE) &&
(copy->hc_data_version != data_version)) {
CDEBUG(D_HSM, "File data version mismatched. File content was changed during archiving. "
@@ -1025,7 +1026,8 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
* the cdt will loop on retried archive requests.
* The policy engine will ask for a new archive later
* when the file will not be modified for some tunable
- * time */
+ * time
+ */
/* we do not notify caller */
hpk.hpk_flags &= ~HP_FLAG_RETRY;
/* hpk_errval must be >= 0 */
@@ -1153,7 +1155,8 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
return rc;
}
/* If QIF_SPACE is not set, client should collect the
- * space usage from OSSs by itself */
+ * space usage from OSSs by itself
+ */
if (cmd == Q_GETQUOTA &&
!(oqctl->qc_dqblk.dqb_valid & QIF_SPACE) &&
!oqctl->qc_dqblk.dqb_curspace) {
@@ -1204,7 +1207,8 @@ out:
/* This function tries to get a single name component,
* to send to the server. No actual path traversal involved,
- * so we limit to NAME_MAX */
+ * so we limit to NAME_MAX
+ */
static char *ll_getname(const char __user *filename)
{
int ret = 0, len;
@@ -1252,7 +1256,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ll_iocontrol(inode, file, cmd, arg);
case FSFILT_IOC_GETVERSION_OLD:
case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int *)arg);
+ return put_user(inode->i_generation, (int __user *)arg);
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
@@ -1266,7 +1270,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (mdtidx < 0)
return mdtidx;
- if (put_user((int)mdtidx, (int *)arg))
+ if (put_user((int)mdtidx, (int __user *)arg))
return -EFAULT;
return 0;
@@ -1278,7 +1282,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
char *filename;
struct md_op_data *op_data;
- rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+ rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
if (rc)
return rc;
data = (void *)buf;
@@ -1320,12 +1324,12 @@ out_free:
int len;
int rc;
- rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
+ rc = obd_ioctl_getdata(&buf, &len, (void __user *)arg);
if (rc)
return rc;
data = (void *)buf;
- if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
+ if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2 ||
data->ioc_inllen1 == 0 || data->ioc_inllen2 == 0) {
rc = -EINVAL;
goto lmv_out_free;
@@ -1363,8 +1367,8 @@ lmv_out_free:
case LL_IOC_LOV_SETSTRIPE: {
struct lov_user_md_v3 lumv3;
struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
- struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
+ struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
+ struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
int set_default = 0;
@@ -1389,7 +1393,7 @@ lmv_out_free:
return rc;
}
case LL_IOC_LMV_GETSTRIPE: {
- struct lmv_user_md *lump = (struct lmv_user_md *)arg;
+ struct lmv_user_md __user *lump = (void __user *)arg;
struct lmv_user_md lum;
struct lmv_user_md *tmp;
int lum_size;
@@ -1422,7 +1426,7 @@ lmv_out_free:
tmp->lum_objects[0].lum_mds = mdtindex;
memcpy(&tmp->lum_objects[0].lum_fid, ll_inode2fid(inode),
sizeof(struct lu_fid));
- if (copy_to_user((void *)arg, tmp, lum_size)) {
+ if (copy_to_user((void __user *)arg, tmp, lum_size)) {
rc = -EFAULT;
goto free_lmv;
}
@@ -1433,13 +1437,13 @@ free_lmv:
case LL_IOC_LOV_SWAP_LAYOUTS:
return -EPERM;
case LL_IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void *)arg);
+ return ll_obd_statfs(inode, (void __user *)arg);
case LL_IOC_LOV_GETSTRIPE:
case LL_IOC_MDC_GETINFO:
case IOC_MDC_GETFILEINFO:
case IOC_MDC_GETFILESTRIPE: {
struct ptlrpc_request *request = NULL;
- struct lov_user_md *lump;
+ struct lov_user_md __user *lump;
struct lov_mds_md *lmm = NULL;
struct mdt_body *body;
char *filename = NULL;
@@ -1447,7 +1451,7 @@ free_lmv:
if (cmd == IOC_MDC_GETFILEINFO ||
cmd == IOC_MDC_GETFILESTRIPE) {
- filename = ll_getname((const char *)arg);
+ filename = ll_getname((const char __user *)arg);
if (IS_ERR(filename))
return PTR_ERR(filename);
@@ -1460,7 +1464,7 @@ free_lmv:
if (request) {
body = req_capsule_server_get(&request->rq_pill,
&RMF_MDT_BODY);
- LASSERT(body != NULL);
+ LASSERT(body);
} else {
goto out_req;
}
@@ -1476,11 +1480,11 @@ free_lmv:
if (cmd == IOC_MDC_GETFILESTRIPE ||
cmd == LL_IOC_LOV_GETSTRIPE) {
- lump = (struct lov_user_md *)arg;
+ lump = (struct lov_user_md __user *)arg;
} else {
- struct lov_user_mds_data *lmdp;
+ struct lov_user_mds_data __user *lmdp;
- lmdp = (struct lov_user_mds_data *)arg;
+ lmdp = (struct lov_user_mds_data __user *)arg;
lump = &lmdp->lmd_lmm;
}
if (copy_to_user(lump, lmm, lmmsize)) {
@@ -1492,7 +1496,7 @@ free_lmv:
}
skip_lmm:
if (cmd == IOC_MDC_GETFILEINFO || cmd == LL_IOC_MDC_GETINFO) {
- struct lov_user_mds_data *lmdp;
+ struct lov_user_mds_data __user *lmdp;
lstat_t st = { 0 };
st.st_dev = inode->i_sb->s_dev;
@@ -1502,14 +1506,14 @@ skip_lmm:
st.st_gid = body->gid;
st.st_rdev = body->rdev;
st.st_size = body->size;
- st.st_blksize = PAGE_CACHE_SIZE;
+ st.st_blksize = PAGE_SIZE;
st.st_blocks = body->blocks;
st.st_atime = body->atime;
st.st_mtime = body->mtime;
st.st_ctime = body->ctime;
st.st_ino = inode->i_ino;
- lmdp = (struct lov_user_mds_data *)arg;
+ lmdp = (struct lov_user_mds_data __user *)arg;
if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
rc = -EFAULT;
goto out_req;
@@ -1523,14 +1527,14 @@ out_req:
return rc;
}
case IOC_LOV_GETINFO: {
- struct lov_user_mds_data *lumd;
+ struct lov_user_mds_data __user *lumd;
struct lov_stripe_md *lsm;
- struct lov_user_md *lum;
+ struct lov_user_md __user *lum;
struct lov_mds_md *lmm;
int lmmsize;
lstat_t st;
- lumd = (struct lov_user_mds_data *)arg;
+ lumd = (struct lov_user_mds_data __user *)arg;
lum = &lumd->lmd_lmm;
rc = ll_get_max_mdsize(sbi, &lmmsize);
@@ -1538,7 +1542,7 @@ out_req:
return rc;
lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (lmm == NULL)
+ if (!lmm)
return -ENOMEM;
if (copy_from_user(lmm, lum, lmmsize)) {
rc = -EFAULT;
@@ -1636,8 +1640,8 @@ free_lmm:
NULL);
if (rc) {
CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check,
- sizeof(*check)))
+ if (copy_to_user((void __user *)arg, check,
+ sizeof(*check)))
CDEBUG(D_QUOTA, "copy_to_user failed\n");
goto out_poll;
}
@@ -1646,8 +1650,8 @@ free_lmm:
NULL);
if (rc) {
CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
- if (copy_to_user((void *)arg, check,
- sizeof(*check)))
+ if (copy_to_user((void __user *)arg, check,
+ sizeof(*check)))
CDEBUG(D_QUOTA, "copy_to_user failed\n");
goto out_poll;
}
@@ -1662,14 +1666,15 @@ out_poll:
if (!qctl)
return -ENOMEM;
- if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) {
+ if (copy_from_user(qctl, (void __user *)arg, sizeof(*qctl))) {
rc = -EFAULT;
goto out_quotactl;
}
rc = quotactl_ioctl(sbi, qctl);
- if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl)))
+ if (rc == 0 && copy_to_user((void __user *)arg, qctl,
+ sizeof(*qctl)))
rc = -EFAULT;
out_quotactl:
@@ -1686,7 +1691,6 @@ out_quotactl:
if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
rc = rct_add(&sbi->ll_rct, current_pid(), arg);
if (!rc)
fd->fd_flags |= LL_FILE_RMTACL;
@@ -1699,7 +1703,7 @@ out_quotactl:
int count, vallen;
struct obd_export *exp;
- if (copy_from_user(&count, (int *)arg, sizeof(int)))
+ if (copy_from_user(&count, (int __user *)arg, sizeof(int)))
return -EFAULT;
/* get ost count when count is zero, get mdt count otherwise */
@@ -1712,34 +1716,35 @@ out_quotactl:
return rc;
}
- if (copy_to_user((int *)arg, &count, sizeof(int)))
+ if (copy_to_user((int __user *)arg, &count, sizeof(int)))
return -EFAULT;
return 0;
}
case LL_IOC_PATH2FID:
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
- sizeof(struct lu_fid)))
+ if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
+ sizeof(struct lu_fid)))
return -EFAULT;
return 0;
case LL_IOC_GET_CONNECT_FLAGS: {
- return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void *)arg);
+ return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL,
+ (void __user *)arg);
}
case OBD_IOC_CHANGELOG_SEND:
case OBD_IOC_CHANGELOG_CLEAR:
if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+ rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
sizeof(struct ioc_changelog));
return rc;
case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void *)arg);
+ return ll_fid2path(inode, (void __user *)arg);
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur;
ssize_t totalsize;
- hur = memdup_user((void *)arg, sizeof(*hur));
+ hur = memdup_user((void __user *)arg, sizeof(*hur));
if (IS_ERR(hur))
return PTR_ERR(hur);
@@ -1754,11 +1759,11 @@ out_quotactl:
return -E2BIG;
hur = libcfs_kvzalloc(totalsize, GFP_NOFS);
- if (hur == NULL)
+ if (!hur)
return -ENOMEM;
/* Copy the whole struct */
- if (copy_from_user(hur, (void *)arg, totalsize)) {
+ if (copy_from_user(hur, (void __user *)arg, totalsize)) {
kvfree(hur);
return -EFAULT;
}
@@ -1794,7 +1799,7 @@ out_quotactl:
struct hsm_progress_kernel hpk;
struct hsm_progress hp;
- if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
+ if (copy_from_user(&hp, (void __user *)arg, sizeof(hp)))
return -EFAULT;
hpk.hpk_fid = hp.hp_fid;
@@ -1805,13 +1810,14 @@ out_quotactl:
hpk.hpk_data_version = 0;
/* File may not exist in Lustre; all progress
- * reported to Lustre root */
+ * reported to Lustre root
+ */
rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
NULL);
return rc;
}
case LL_IOC_HSM_CT_START:
- rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
+ rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
sizeof(struct lustre_kernelcomm));
return rc;
@@ -1819,12 +1825,12 @@ out_quotactl:
struct hsm_copy *copy;
int rc;
- copy = memdup_user((char *)arg, sizeof(*copy));
+ copy = memdup_user((char __user *)arg, sizeof(*copy));
if (IS_ERR(copy))
return PTR_ERR(copy);
rc = ll_ioc_copy_start(inode->i_sb, copy);
- if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+ if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
rc = -EFAULT;
kfree(copy);
@@ -1834,19 +1840,20 @@ out_quotactl:
struct hsm_copy *copy;
int rc;
- copy = memdup_user((char *)arg, sizeof(*copy));
+ copy = memdup_user((char __user *)arg, sizeof(*copy));
if (IS_ERR(copy))
return PTR_ERR(copy);
rc = ll_ioc_copy_end(inode->i_sb, copy);
- if (copy_to_user((char *)arg, copy, sizeof(*copy)))
+ if (copy_to_user((char __user *)arg, copy, sizeof(*copy)))
rc = -EFAULT;
kfree(copy);
return rc;
}
default:
- return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
+ return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
+ (void __user *)arg);
}
}
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 39e2ffd5f97f..cf619af3caf5 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -64,8 +64,8 @@ static struct ll_file_data *ll_file_data_get(void)
{
struct ll_file_data *fd;
- fd = kmem_cache_alloc(ll_file_data_slab, GFP_NOFS | __GFP_ZERO);
- if (fd == NULL)
+ fd = kmem_cache_zalloc(ll_file_data_slab, GFP_NOFS);
+ if (!fd)
return NULL;
fd->fd_write_failed = false;
return fd;
@@ -73,7 +73,7 @@ static struct ll_file_data *ll_file_data_get(void)
static void ll_file_data_put(struct ll_file_data *fd)
{
- if (fd != NULL)
+ if (fd)
kmem_cache_free(ll_file_data_slab, fd);
}
@@ -134,7 +134,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
int epoch_close = 1;
int rc;
- if (obd == NULL) {
+ if (!obd) {
/*
* XXX: in case of LMV, is this correct to access
* ->exp_handle?
@@ -153,7 +153,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
}
ll_prepare_close(inode, op_data, och);
- if (data_version != NULL) {
+ if (data_version) {
/* Pass in data_version implies release. */
op_data->op_bias |= MDS_HSM_RELEASE;
op_data->op_data_version = *data_version;
@@ -166,7 +166,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
/* This close must have the epoch closed. */
LASSERT(epoch_close);
/* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS. */
+ * OSTs and send setattr to back to MDS.
+ */
rc = ll_som_update(inode, op_data);
if (rc) {
CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
@@ -179,7 +180,8 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
}
/* DATA_MODIFIED flag was successfully sent on close, cancel data
- * modification flag. */
+ * modification flag.
+ */
if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
struct ll_inode_info *lli = ll_i2info(inode);
@@ -242,7 +244,8 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
mutex_lock(&lli->lli_och_mutex);
if (*och_usecount > 0) {
/* There are still users of this handle, so skip
- * freeing it. */
+ * freeing it.
+ */
mutex_unlock(&lli->lli_och_mutex);
return 0;
}
@@ -251,9 +254,10 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
*och_p = NULL;
mutex_unlock(&lli->lli_och_mutex);
- if (och != NULL) {
+ if (och) {
/* There might be a race and this handle may already
- be closed. */
+ * be closed.
+ */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
inode, och, NULL);
}
@@ -276,26 +280,29 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
- if (fd->fd_lease_och != NULL) {
+ if (fd->fd_lease_och) {
bool lease_broken;
/* Usually the lease is not released when the
- * application crashed, we need to release here. */
+ * application crashed, we need to release here.
+ */
rc = ll_lease_close(fd->fd_lease_och, inode, &lease_broken);
- CDEBUG(rc ? D_ERROR : D_INODE, "Clean up lease "DFID" %d/%d\n",
- PFID(&lli->lli_fid), rc, lease_broken);
+ CDEBUG(rc ? D_ERROR : D_INODE,
+ "Clean up lease " DFID " %d/%d\n",
+ PFID(&lli->lli_fid), rc, lease_broken);
fd->fd_lease_och = NULL;
}
- if (fd->fd_och != NULL) {
+ if (fd->fd_och) {
rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
fd->fd_och = NULL;
goto out;
}
/* Let's see if we have good enough OPEN lock on the file and if
- we can skip talking to MDS */
+ * we can skip talking to MDS
+ */
mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
@@ -343,7 +350,6 @@ int ll_file_release(struct inode *inode, struct file *file)
if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
fd->fd_flags &= ~LL_FILE_RMTACL;
rct_del(&sbi->ll_rct, current_pid());
@@ -355,11 +361,12 @@ int ll_file_release(struct inode *inode, struct file *file)
if (!is_root_inode(inode))
ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
+ LASSERT(fd);
- /* The last ref on @file, maybe not the owner pid of statahead.
+ /* The last ref on @file, maybe not be the owner pid of statahead.
* Different processes can open the same dir, "ll_opendir_key" means:
- * it is me that should stop the statahead thread. */
+ * it is me that should stop the statahead thread.
+ */
if (S_ISDIR(inode->i_mode) && lli->lli_opendir_key == fd &&
lli->lli_opendir_pid != 0)
ll_stop_statahead(inode, lli->lli_opendir_key);
@@ -396,16 +403,16 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
__u32 opc = LUSTRE_OPC_ANY;
int rc;
- /* Usually we come here only for NFSD, and we want open lock.
- But we can also get here with pre 2.6.15 patchless kernels, and in
- that case that lock is also ok */
+ /* Usually we come here only for NFSD, and we want open lock. */
/* We can also get here if there was cached open handle in revalidate_it
* but it disappeared while we were getting from there to ll_file_open.
* But this means this file was closed and immediately opened which
- * makes a good candidate for using OPEN lock */
+ * makes a good candidate for using OPEN lock
+ */
/* If lmmsize & lmm are not 0, we are just setting stripe info
- * parameters. No need for the open lock */
- if (lmm == NULL && lmmsize == 0) {
+ * parameters. No need for the open lock
+ */
+ if (!lmm && lmmsize == 0) {
itp->it_flags |= MDS_OPEN_LOCK;
if (itp->it_flags & FMODE_WRITE)
opc = LUSTRE_OPC_CREATE;
@@ -426,7 +433,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
* with messages with -ESTALE errors.
*/
if (!it_disposition(itp, DISP_OPEN_OPEN) ||
- it_open_error(DISP_OPEN_OPEN, itp))
+ it_open_error(DISP_OPEN_OPEN, itp))
goto out;
ll_release_openhandle(inode, itp);
goto out;
@@ -492,7 +499,7 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
LASSERT(!LUSTRE_FPRIVATE(file));
- LASSERT(fd != NULL);
+ LASSERT(fd);
if (och) {
struct ptlrpc_request *req = it->d.lustre.it_data;
@@ -543,7 +550,7 @@ int ll_file_open(struct inode *inode, struct file *file)
file->private_data = NULL; /* prevent ll_local_open assertion */
fd = ll_file_data_get();
- if (fd == NULL) {
+ if (!fd) {
rc = -ENOMEM;
goto out_openerr;
}
@@ -551,7 +558,7 @@ int ll_file_open(struct inode *inode, struct file *file)
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+ if (!lli->lli_opendir_key && !lli->lli_sai &&
lli->lli_opendir_pid == 0) {
lli->lli_opendir_key = fd;
lli->lli_opendir_pid = current_pid();
@@ -568,7 +575,8 @@ int ll_file_open(struct inode *inode, struct file *file)
if (!it || !it->d.lustre.it_disposition) {
/* Convert f_flags into access mode. We cannot use file->f_mode,
* because everything but O_ACCMODE mask was stripped from
- * there */
+ * there
+ */
if ((oit.it_flags + 1) & O_ACCMODE)
oit.it_flags++;
if (file->f_flags & O_TRUNC)
@@ -577,17 +585,20 @@ int ll_file_open(struct inode *inode, struct file *file)
/* kernel only call f_op->open in dentry_open. filp_open calls
* dentry_open after call to open_namei that checks permissions.
* Only nfsd_open call dentry_open directly without checking
- * permissions and because of that this code below is safe. */
+ * permissions and because of that this code below is safe.
+ */
if (oit.it_flags & (FMODE_WRITE | FMODE_READ))
oit.it_flags |= MDS_OPEN_OWNEROVERRIDE;
/* We do not want O_EXCL here, presumably we opened the file
- * already? XXX - NFS implications? */
+ * already? XXX - NFS implications?
+ */
oit.it_flags &= ~O_EXCL;
/* bug20584, if "it_flags" contains O_CREAT, the file will be
* created if necessary, then "IT_CREAT" should be set to keep
- * consistent with it */
+ * consistent with it
+ */
if (oit.it_flags & O_CREAT)
oit.it_op |= IT_CREAT;
@@ -611,7 +622,8 @@ restart:
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
- let's close it somehow. This will decref request. */
+ * let's close it somehow. This will decref request.
+ */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
mutex_unlock(&lli->lli_och_mutex);
@@ -632,10 +644,11 @@ restart:
LASSERT(*och_usecount == 0);
if (!it->d.lustre.it_disposition) {
/* We cannot just request lock handle now, new ELC code
- means that one of other OPEN locks for this file
- could be cancelled, and since blocking ast handler
- would attempt to grab och_mutex as well, that would
- result in a deadlock */
+ * means that one of other OPEN locks for this file
+ * could be cancelled, and since blocking ast handler
+ * would attempt to grab och_mutex as well, that would
+ * result in a deadlock
+ */
mutex_unlock(&lli->lli_och_mutex);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file->f_path.dentry, NULL, 0, it);
@@ -655,9 +668,11 @@ restart:
/* md_intent_lock() didn't get a request ref if there was an
* open error, so don't do cleanup on the request here
- * (bug 3430) */
+ * (bug 3430)
+ */
/* XXX (green): Should not we bail out on any error here, not
- * just open error? */
+ * just open error?
+ */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc)
goto out_och_free;
@@ -672,8 +687,9 @@ restart:
fd = NULL;
/* Must do this outside lli_och_mutex lock to prevent deadlock where
- different kind of OPEN lock for this same inode gets cancelled
- by ldlm_cancel_lru */
+ * different kind of OPEN lock for this same inode gets cancelled
+ * by ldlm_cancel_lru
+ */
if (!S_ISREG(inode->i_mode))
goto out_och_free;
@@ -712,7 +728,8 @@ out_openerr:
}
static int ll_md_blocking_lease_ast(struct ldlm_lock *lock,
- struct ldlm_lock_desc *desc, void *data, int flag)
+ struct ldlm_lock_desc *desc,
+ void *data, int flag)
{
int rc;
struct lustre_handle lockh;
@@ -752,7 +769,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
if (fmode != FMODE_WRITE && fmode != FMODE_READ)
return ERR_PTR(-EINVAL);
- if (file != NULL) {
+ if (file) {
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct obd_client_handle **och_p;
@@ -764,18 +781,18 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
/* Get the openhandle of the file */
rc = -EBUSY;
mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
+ if (fd->fd_lease_och) {
mutex_unlock(&lli->lli_och_mutex);
return ERR_PTR(rc);
}
- if (fd->fd_och == NULL) {
+ if (!fd->fd_och) {
if (file->f_mode & FMODE_WRITE) {
- LASSERT(lli->lli_mds_write_och != NULL);
+ LASSERT(lli->lli_mds_write_och);
och_p = &lli->lli_mds_write_och;
och_usecount = &lli->lli_open_fd_write_count;
} else {
- LASSERT(lli->lli_mds_read_och != NULL);
+ LASSERT(lli->lli_mds_read_och);
och_p = &lli->lli_mds_read_och;
och_usecount = &lli->lli_open_fd_read_count;
}
@@ -790,7 +807,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
if (rc < 0) /* more than 1 opener */
return ERR_PTR(rc);
- LASSERT(fd->fd_och != NULL);
+ LASSERT(fd->fd_och);
old_handle = fd->fd_och->och_fh;
}
@@ -799,7 +816,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
return ERR_PTR(-ENOMEM);
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
+ LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data)) {
rc = PTR_ERR(op_data);
goto out;
@@ -811,13 +828,14 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
it.it_flags = fmode | open_flags;
it.it_flags |= MDS_OPEN_LOCK | MDS_OPEN_BY_FID | MDS_OPEN_LEASE;
rc = md_intent_lock(sbi->ll_md_exp, op_data, NULL, 0, &it, 0, &req,
- ll_md_blocking_lease_ast,
+ ll_md_blocking_lease_ast,
/* LDLM_FL_NO_LRU: To not put the lease lock into LRU list, otherwise
* it can be cancelled which may mislead applications that the lease is
* broken;
* LDLM_FL_EXCL: Set this flag so that it won't be matched by normal
* open in ll_md_blocking_ast(). Otherwise as ll_md_blocking_lease_ast
- * doesn't deal with openhandle, so normal openhandle will be leaked. */
+ * doesn't deal with openhandle, so normal openhandle will be leaked.
+ */
LDLM_FL_NO_LRU | LDLM_FL_EXCL);
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
@@ -847,8 +865,8 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
/* open lock must return for lease */
CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
- PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
- it.d.lustre.it_lock_bits);
+ PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
+ it.d.lustre.it_lock_bits);
rc = -EPROTO;
goto out_close;
}
@@ -864,7 +882,7 @@ out_close:
/* cancel open lock */
if (it.d.lustre.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
- it.d.lustre.it_lock_mode);
+ it.d.lustre.it_lock_mode);
it.d.lustre.it_lock_mode = 0;
}
out_release_it:
@@ -886,19 +904,19 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
int rc;
lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock != NULL) {
+ if (lock) {
lock_res_and_lock(lock);
cancelled = ldlm_is_cancel(lock);
unlock_res_and_lock(lock);
ldlm_lock_put(lock);
}
- CDEBUG(D_INODE, "lease for "DFID" broken? %d\n",
- PFID(&ll_i2info(inode)->lli_fid), cancelled);
+ CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
+ PFID(&ll_i2info(inode)->lli_fid), cancelled);
if (!cancelled)
ldlm_cli_cancel(&och->och_lease_handle, 0);
- if (lease_broken != NULL)
+ if (lease_broken)
*lease_broken = cancelled;
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
@@ -914,7 +932,7 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
struct obd_info oinfo = { };
int rc;
- LASSERT(lsm != NULL);
+ LASSERT(lsm);
oinfo.oi_md = lsm;
oinfo.oi_oa = obdo;
@@ -933,8 +951,8 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
}
set = ptlrpc_prep_set();
- if (set == NULL) {
- CERROR("can't allocate ptlrpc set\n");
+ if (!set) {
+ CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
rc = -ENOMEM;
} else {
rc = obd_getattr_async(exp, &oinfo, set);
@@ -986,7 +1004,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
ll_inode_size_lock(inode);
/* merge timestamps the most recently obtained from mds with
- timestamps obtained from osts */
+ * timestamps obtained from osts
+ */
LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
@@ -1009,8 +1028,8 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
if (lvb.lvb_mtime < attr->cat_mtime)
lvb.lvb_mtime = attr->cat_mtime;
- CDEBUG(D_VFSTRACE, DFID" updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(&lli->lli_fid), attr->cat_size);
cl_isize_write_nolock(inode, attr->cat_size);
inode->i_blocks = attr->cat_blocks;
@@ -1155,12 +1174,13 @@ restart:
out:
cl_io_fini(env, io);
/* If any bit been read/written (result != 0), we just return
- * short read/write instead of restart io. */
+ * short read/write instead of restart io.
+ */
if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zd\n",
iot == CIT_READ ? "read" : "write",
file, *ppos, count);
- LASSERTF(io->ci_nob == 0, "%zd", io->ci_nob);
+ LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
goto restart;
}
@@ -1221,7 +1241,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
args->u.normal.via_iocb = iocb;
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
- &iocb->ki_pos, iov_iter_count(from));
+ &iocb->ki_pos, iov_iter_count(from));
cl_env_put(env, &refcheck);
return result;
}
@@ -1260,8 +1280,8 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx)
int rc = 0;
struct lov_stripe_md *lsm = NULL, *lsm2;
- oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
- if (oa == NULL)
+ oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!oa)
return -ENOMEM;
lsm = ccc_inode_lsm_get(inode);
@@ -1274,7 +1294,7 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi, u32 ost_idx)
(lsm->lsm_stripe_count));
lsm2 = libcfs_kvzalloc(lsm_size, GFP_NOFS);
- if (lsm2 == NULL) {
+ if (!lsm2) {
rc = -ENOMEM;
goto out;
}
@@ -1307,7 +1327,7 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
+ if (copy_from_user(&ucreat, (struct ll_recreate_obj __user *)arg,
sizeof(ucreat)))
return -EFAULT;
@@ -1325,7 +1345,7 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
if (!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
- if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
+ if (copy_from_user(&fid, (struct lu_fid __user *)arg, sizeof(fid)))
return -EFAULT;
fid_to_ostid(&fid, &oi);
@@ -1341,7 +1361,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
int rc = 0;
lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL) {
+ if (lsm) {
ccc_inode_lsm_put(inode, lsm);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
@@ -1401,18 +1421,16 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL); /* checked by mdc_getattr_name */
lmmsize = body->eadatasize;
if (!(body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) ||
- lmmsize == 0) {
+ lmmsize == 0) {
rc = -ENODATA;
goto out;
}
lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD, lmmsize);
- LASSERT(lmm != NULL);
if ((lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V1)) &&
(lmm->lmm_magic != cpu_to_le32(LOV_MAGIC_V3))) {
@@ -1433,7 +1451,8 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
stripe_count = 0;
/* if function called for directory - we should
- * avoid swab not existent lsm objects */
+ * avoid swab not existent lsm objects
+ */
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
if (S_ISREG(body->mode))
@@ -1457,7 +1476,7 @@ out:
}
static int ll_lov_setea(struct inode *inode, struct file *file,
- unsigned long arg)
+ unsigned long arg)
{
int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
@@ -1469,16 +1488,16 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
return -EPERM;
lump = libcfs_kvzalloc(lum_size, GFP_NOFS);
- if (lump == NULL)
+ if (!lump)
return -ENOMEM;
- if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
+ if (copy_from_user(lump, (struct lov_user_md __user *)arg, lum_size)) {
kvfree(lump);
return -EFAULT;
}
rc = ll_lov_setstripe_ea_info(inode, file->f_path.dentry, flags, lump,
- lum_size);
+ lum_size);
cl_lov_delay_create_clear(&file->f_flags);
kvfree(lump);
@@ -1488,12 +1507,12 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
static int ll_lov_setstripe(struct inode *inode, struct file *file,
unsigned long arg)
{
- struct lov_user_md_v3 lumv3;
- struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
- struct lov_user_md_v1 *lumv1p = (struct lov_user_md_v1 *)arg;
- struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
- int lum_size, rc;
- int flags = FMODE_WRITE;
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
+ struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
+ int lum_size, rc;
+ int flags = FMODE_WRITE;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
@@ -1518,7 +1537,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
ll_layout_refresh(inode, &gen);
lsm = ccc_inode_lsm_get(inode);
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode),
- 0, lsm, (void *)arg);
+ 0, lsm, (void __user *)arg);
ccc_inode_lsm_put(inode, lsm);
}
return rc;
@@ -1530,9 +1549,9 @@ static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
int rc = -ENODATA;
lsm = ccc_inode_lsm_get(inode);
- if (lsm != NULL)
+ if (lsm)
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
- lsm, (void *)arg);
+ lsm, (void __user *)arg);
ccc_inode_lsm_put(inode, lsm);
return rc;
}
@@ -1560,7 +1579,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
- LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ LASSERT(!fd->fd_grouplock.cg_lock);
spin_unlock(&lli->lli_lock);
rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
@@ -1597,11 +1616,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
CWARN("no group lock held\n");
return -EINVAL;
}
- LASSERT(fd->fd_grouplock.cg_lock != NULL);
+ LASSERT(fd->fd_grouplock.cg_lock);
if (fd->fd_grouplock.cg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.cg_gid);
+ arg, fd->fd_grouplock.cg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
@@ -1688,7 +1707,7 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
}
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
+ if (!lsm)
return -ENOENT;
/* If the stripe_count > 1 and the application does not understand
@@ -1782,9 +1801,10 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
int rc = 0;
/* Get the extent count so we can calculate the size of
- * required fiemap buffer */
+ * required fiemap buffer
+ */
if (get_user(extent_count,
- &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
+ &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
return -EFAULT;
if (extent_count >=
@@ -1794,7 +1814,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
sizeof(struct ll_fiemap_extent));
fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
- if (fiemap_s == NULL)
+ if (!fiemap_s)
return -ENOMEM;
/* get the fiemap value */
@@ -1806,11 +1826,12 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
/* If fm_extent_count is non-zero, read the first extent since
* it is used to calculate end_offset and device from previous
- * fiemap call. */
+ * fiemap call.
+ */
if (extent_count) {
if (copy_from_user(&fiemap_s->fm_extents[0],
- (char __user *)arg + sizeof(*fiemap_s),
- sizeof(struct ll_fiemap_extent))) {
+ (char __user *)arg + sizeof(*fiemap_s),
+ sizeof(struct ll_fiemap_extent))) {
rc = -EFAULT;
goto error;
}
@@ -1826,7 +1847,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
ret_bytes += (fiemap_s->fm_mapped_extents *
sizeof(struct ll_fiemap_extent));
- if (copy_to_user((void *)arg, fiemap_s, ret_bytes))
+ if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
rc = -EFAULT;
error:
@@ -1917,13 +1938,14 @@ int ll_hsm_release(struct inode *inode)
/* Release the file.
* NB: lease lock handle is released in mdc_hsm_release_pack() because
- * we still need it to pack l_remote_handle to MDT. */
+ * we still need it to pack l_remote_handle to MDT.
+ */
rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
&data_version);
och = NULL;
out:
- if (och != NULL && !IS_ERR(och)) /* close the file */
+ if (och && !IS_ERR(och)) /* close the file */
ll_lease_close(och, inode, NULL);
return rc;
@@ -2007,7 +2029,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
}
/* to be able to restore mtime and atime after swap
- * we need to first save them */
+ * we need to first save them
+ */
if (lsl->sl_flags &
(SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
llss->ia1.ia_mtime = llss->inode1->i_mtime;
@@ -2019,7 +2042,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
}
/* ultimate check, before swapping the layouts we check if
- * dataversion has changed (if requested) */
+ * dataversion has changed (if requested)
+ */
if (llss->check_dv1) {
rc = ll_data_version(llss->inode1, &dv, 0);
if (rc)
@@ -2042,9 +2066,11 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
/* struct md_op_data is used to send the swap args to the mdt
* only flags is missing, so we use struct mdc_swap_layouts
- * through the md_op_data->op_data */
+ * through the md_op_data->op_data
+ */
/* flags from user space have to be converted before they are send to
- * server, no flag is sent today, they are only used on the client */
+ * server, no flag is sent today, they are only used on the client
+ */
msl.msl_flags = 0;
rc = -ENOMEM;
op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
@@ -2113,7 +2139,8 @@ static int ll_hsm_state_set(struct inode *inode, struct hsm_state_set *hss)
return -EINVAL;
/* Non-root users are forbidden to set or clear flags which are
- * NOT defined in HSM_USER_MASK. */
+ * NOT defined in HSM_USER_MASK.
+ */
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK) &&
!capable(CFS_CAP_SYS_ADMIN))
return -EPERM;
@@ -2211,14 +2238,14 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case LL_IOC_GETFLAGS:
/* Get the current value of the file flags */
- return put_user(fd->fd_flags, (int *)arg);
+ return put_user(fd->fd_flags, (int __user *)arg);
case LL_IOC_SETFLAGS:
case LL_IOC_CLRFLAGS:
/* Set or clear specific file flags */
/* XXX This probably needs checks to ensure the flags are
* not abused, and to handle any flag side effects.
*/
- if (get_user(flags, (int *) arg))
+ if (get_user(flags, (int __user *)arg))
return -EFAULT;
if (cmd == LL_IOC_SETFLAGS) {
@@ -2242,15 +2269,15 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct file *file2;
struct lustre_swap_layouts lsl;
- if (copy_from_user(&lsl, (char *)arg,
- sizeof(struct lustre_swap_layouts)))
+ if (copy_from_user(&lsl, (char __user *)arg,
+ sizeof(struct lustre_swap_layouts)))
return -EFAULT;
if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
return -EPERM;
file2 = fget(lsl.sl_fd);
- if (file2 == NULL)
+ if (!file2)
return -EBADF;
rc = -EPERM;
@@ -2272,13 +2299,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ll_iocontrol(inode, file, cmd, arg);
case FSFILT_IOC_GETVERSION_OLD:
case FSFILT_IOC_GETVERSION:
- return put_user(inode->i_generation, (int *)arg);
+ return put_user(inode->i_generation, (int __user *)arg);
case LL_IOC_GROUP_LOCK:
return ll_get_grouplock(inode, file, arg);
case LL_IOC_GROUP_UNLOCK:
return ll_put_grouplock(inode, file, arg);
case IOC_OBD_STATFS:
- return ll_obd_statfs(inode, (void *)arg);
+ return ll_obd_statfs(inode, (void __user *)arg);
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
@@ -2289,25 +2316,26 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LL_IOC_FLUSHCTX:
return ll_flush_ctx(inode);
case LL_IOC_PATH2FID: {
- if (copy_to_user((void *)arg, ll_inode2fid(inode),
+ if (copy_to_user((void __user *)arg, ll_inode2fid(inode),
sizeof(struct lu_fid)))
return -EFAULT;
return 0;
}
case OBD_IOC_FID2PATH:
- return ll_fid2path(inode, (void *)arg);
+ return ll_fid2path(inode, (void __user *)arg);
case LL_IOC_DATA_VERSION: {
struct ioc_data_version idv;
int rc;
- if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
+ if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
return -EFAULT;
rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
+ !(idv.idv_flags & LL_DV_NOFLUSH));
- if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
+ if (rc == 0 && copy_to_user((char __user *)arg, &idv,
+ sizeof(idv)))
return -EFAULT;
return rc;
@@ -2320,7 +2348,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (mdtidx < 0)
return mdtidx;
- if (put_user((int)mdtidx, (int *)arg))
+ if (put_user(mdtidx, (int __user *)arg))
return -EFAULT;
return 0;
@@ -2347,7 +2375,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
op_data, NULL);
- if (copy_to_user((void *)arg, hus, sizeof(*hus)))
+ if (copy_to_user((void __user *)arg, hus, sizeof(*hus)))
rc = -EFAULT;
ll_finish_md_op_data(op_data);
@@ -2358,7 +2386,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hsm_state_set *hss;
int rc;
- hss = memdup_user((char *)arg, sizeof(*hss));
+ hss = memdup_user((char __user *)arg, sizeof(*hss));
if (IS_ERR(hss))
return PTR_ERR(hss);
@@ -2386,7 +2414,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
op_data, NULL);
- if (copy_to_user((char *)arg, hca, sizeof(*hca)))
+ if (copy_to_user((char __user *)arg, hca, sizeof(*hca)))
rc = -EFAULT;
ll_finish_md_op_data(op_data);
@@ -2412,13 +2440,13 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
case F_UNLCK:
mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
+ if (fd->fd_lease_och) {
och = fd->fd_lease_och;
fd->fd_lease_och = NULL;
}
mutex_unlock(&lli->lli_och_mutex);
- if (och != NULL) {
+ if (och) {
mode = och->och_flags &
(FMODE_READ|FMODE_WRITE);
rc = ll_lease_close(och, inode, &lease_broken);
@@ -2443,12 +2471,12 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = 0;
mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och == NULL) {
+ if (!fd->fd_lease_och) {
fd->fd_lease_och = och;
och = NULL;
}
mutex_unlock(&lli->lli_och_mutex);
- if (och != NULL) {
+ if (och) {
/* impossible now that only excl is supported for now */
ll_lease_close(och, inode, &lease_broken);
rc = -EBUSY;
@@ -2461,11 +2489,11 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = 0;
mutex_lock(&lli->lli_och_mutex);
- if (fd->fd_lease_och != NULL) {
+ if (fd->fd_lease_och) {
struct obd_client_handle *och = fd->fd_lease_och;
lock = ldlm_handle2lock(&och->och_lease_handle);
- if (lock != NULL) {
+ if (lock) {
lock_res_and_lock(lock);
if (!ldlm_is_cancel(lock))
rc = och->och_flags &
@@ -2480,7 +2508,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case LL_IOC_HSM_IMPORT: {
struct hsm_user_import *hui;
- hui = memdup_user((void *)arg, sizeof(*hui));
+ hui = memdup_user((void __user *)arg, sizeof(*hui));
if (IS_ERR(hui))
return PTR_ERR(hui);
@@ -2497,7 +2525,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return err;
return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
- (void *)arg);
+ (void __user *)arg);
}
}
}
@@ -2536,15 +2564,17 @@ static int ll_flush(struct file *file, fl_owner_t id)
LASSERT(!S_ISDIR(inode->i_mode));
/* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
+ * failed for pages in this mapping.
+ */
rc = lli->lli_async_rc;
lli->lli_async_rc = 0;
err = lov_read_and_clear_async_rc(lli->lli_clob);
if (rc == 0)
rc = err;
- /* The application has been told write failure already.
- * Do not report failure again. */
+ /* The application has been told about write failure already.
+ * Do not report failure again.
+ */
if (fd->fd_write_failed)
return 0;
return rc ? -EIO : 0;
@@ -2612,7 +2642,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
inode_lock(inode);
/* catch async errors that were recorded back when async writeback
- * failed for pages in this mapping. */
+ * failed for pages in this mapping.
+ */
if (!S_ISDIR(inode->i_mode)) {
err = lli->lli_async_rc;
lli->lli_async_rc = 0;
@@ -2683,7 +2714,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
* I guess between lockd processes) and then compares pid.
* As such we assign pid to the owner field to make it all work,
* conflict with normal locks is unlikely since pid space and
- * pointer space for current->files are not intersecting */
+ * pointer space for current->files are not intersecting
+ */
if (file_lock->fl_lmops && file_lock->fl_lmops->lm_compare_owner)
flock.l_flock.owner = (unsigned long)file_lock->fl_pid;
@@ -2699,7 +2731,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
* order to process an unlock request we need all of the same
* information that is given with a normal read or write record
* lock request. To avoid creating another ldlm unlock (cancel)
- * message we'll treat a LCK_NL flock request as an unlock. */
+ * message we'll treat a LCK_NL flock request as an unlock.
+ */
einfo.ei_mode = LCK_NL;
break;
case F_WRLCK:
@@ -2707,7 +2740,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
break;
default:
CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
- file_lock->fl_type);
+ file_lock->fl_type);
return -ENOTSUPP;
}
@@ -2730,7 +2763,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
#endif
flags = LDLM_FL_TEST_LOCK;
/* Save the old mode so that if the mode in the lock changes we
- * can decrement the appropriate reader or writer refcount. */
+ * can decrement the appropriate reader or writer refcount.
+ */
file_lock->fl_type = einfo.ei_mode;
break;
default:
@@ -2757,7 +2791,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (rc2 && file_lock->fl_type != F_UNLCK) {
einfo.ei_mode = LCK_NL;
md_enqueue(sbi->ll_md_exp, &einfo, NULL,
- op_data, &lockh, &flock, 0, NULL /* req */, flags);
+ op_data, &lockh, &flock, 0, NULL /* req */, flags);
rc = rc2;
}
@@ -2782,11 +2816,12 @@ ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
* \param l_req_mode [IN] searched lock mode
* \retval boolean, true iff all bits are found
*/
-int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
+int ll_have_md_lock(struct inode *inode, __u64 *bits,
+ enum ldlm_mode l_req_mode)
{
struct lustre_handle lockh;
ldlm_policy_data_t policy;
- ldlm_mode_t mode = (l_req_mode == LCK_MINMODE) ?
+ enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
(LCK_CR|LCK_CW|LCK_PR|LCK_PW) : l_req_mode;
struct lu_fid *fid;
__u64 flags;
@@ -2822,13 +2857,13 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
return *bits == 0;
}
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode)
+enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags,
+ enum ldlm_mode mode)
{
ldlm_policy_data_t policy = { .l_inodebits = {bits} };
struct lu_fid *fid;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
@@ -2866,8 +2901,6 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
struct obd_export *exp;
int rc = 0;
- LASSERT(inode != NULL);
-
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
inode->i_ino, inode->i_generation, inode, dentry);
@@ -2875,7 +2908,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
/* XXX: Enable OBD_CONNECT_ATTRFID to reduce unnecessary getattr RPC.
* But under CMD case, it caused some lock issues, should be fixed
- * with new CMD ibits lock. See bug 12718 */
+ * with new CMD ibits lock. See bug 12718
+ */
if (exp_connect_flags(exp) & OBD_CONNECT_ATTRFID) {
struct lookup_intent oit = { .it_op = IT_GETATTR };
struct md_op_data *op_data;
@@ -2893,7 +2927,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
oit.it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0,
/* we are not interested in name
- based lookup */
+ * based lookup
+ */
&oit, 0, &req,
ll_md_blocking_ast, 0);
ll_finish_md_op_data(op_data);
@@ -2910,9 +2945,10 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
}
/* Unlinked? Unhash dentry, so it is not picked up later by
- do_lookup() -> ll_revalidate_it(). We cannot use d_drop
- here to preserve get_cwd functionality on 2.6.
- Bug 10503 */
+ * do_lookup() -> ll_revalidate_it(). We cannot use d_drop
+ * here to preserve get_cwd functionality on 2.6.
+ * Bug 10503
+ */
if (!d_inode(dentry)->i_nlink)
d_lustre_invalidate(dentry, 0);
@@ -3026,26 +3062,33 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
sizeof(struct ll_fiemap_extent));
fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
- if (fiemap == NULL)
+ if (!fiemap)
return -ENOMEM;
fiemap->fm_flags = fieinfo->fi_flags;
fiemap->fm_extent_count = fieinfo->fi_extents_max;
fiemap->fm_start = start;
fiemap->fm_length = len;
- if (extent_count > 0)
- memcpy(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
- sizeof(struct ll_fiemap_extent));
+ if (extent_count > 0 &&
+ copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
+ sizeof(struct ll_fiemap_extent)) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
rc = ll_do_fiemap(inode, fiemap, num_bytes);
fieinfo->fi_flags = fiemap->fm_flags;
fieinfo->fi_extents_mapped = fiemap->fm_mapped_extents;
- if (extent_count > 0)
- memcpy(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
- fiemap->fm_mapped_extents *
- sizeof(struct ll_fiemap_extent));
+ if (extent_count > 0 &&
+ copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
+ fiemap->fm_mapped_extents *
+ sizeof(struct ll_fiemap_extent)) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+out:
kvfree(fiemap);
return rc;
}
@@ -3067,13 +3110,12 @@ int ll_inode_permission(struct inode *inode, int mask)
{
int rc = 0;
-#ifdef MAY_NOT_BLOCK
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
-#endif
/* as root inode are NOT getting validated in lookup operation,
- * need to do it before permission check. */
+ * need to do it before permission check.
+ */
if (is_root_inode(inode)) {
rc = __ll_inode_revalidate(inode->i_sb->s_root,
@@ -3173,8 +3215,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
unsigned int size;
struct llioc_data *in_data = NULL;
- if (cb == NULL || cmd == NULL ||
- count > LLIOC_MAX_CMD || count < 0)
+ if (!cb || !cmd || count > LLIOC_MAX_CMD || count < 0)
return NULL;
size = sizeof(*in_data) + count * sizeof(unsigned int);
@@ -3200,7 +3241,7 @@ void ll_iocontrol_unregister(void *magic)
{
struct llioc_data *tmp;
- if (magic == NULL)
+ if (!magic)
return;
down_write(&llioc.ioc_sem);
@@ -3254,7 +3295,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
struct lu_env *env;
int result;
- if (lli->lli_clob == NULL)
+ if (!lli->lli_clob)
return 0;
env = cl_env_nested_get(&nest);
@@ -3267,13 +3308,14 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
if (conf->coc_opc == OBJECT_CONF_SET) {
struct ldlm_lock *lock = conf->coc_lock;
- LASSERT(lock != NULL);
+ LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
if (result == 0) {
/* it can only be allowed to match after layout is
* applied to inode otherwise false layout would be
* seen. Applying layout should happen before dropping
- * the intent lock. */
+ * the intent lock.
+ */
ldlm_lock_allow_match(lock);
}
}
@@ -3296,14 +3338,15 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
lock->l_lvb_data, lock->l_lvb_len);
- if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+ if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
return 0;
/* if layout lock was granted right away, the layout is returned
* within DLM_LVB of dlm reply; otherwise if the lock was ever
* blocked and then granted via completion ast, we have to fetch
* layout here. Please note that we can't use the LVB buffer in
- * completion AST because it doesn't have a large enough buffer */
+ * completion AST because it doesn't have a large enough buffer
+ */
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc == 0)
rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
@@ -3313,7 +3356,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
return rc;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out;
}
@@ -3325,20 +3368,20 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
}
lmm = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, lmmsize);
- if (lmm == NULL) {
+ if (!lmm) {
rc = -EFAULT;
goto out;
}
lvbdata = libcfs_kvzalloc(lmmsize, GFP_NOFS);
- if (lvbdata == NULL) {
+ if (!lvbdata) {
rc = -ENOMEM;
goto out;
}
memcpy(lvbdata, lmm, lmmsize);
lock_res_and_lock(lock);
- if (lock->l_lvb_data != NULL)
+ if (lock->l_lvb_data)
kvfree(lock->l_lvb_data);
lock->l_lvb_data = lvbdata;
@@ -3354,8 +3397,8 @@ out:
* Apply the layout to the inode. Layout lock is held and will be released
* in this function.
*/
-static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
- struct inode *inode, __u32 *gen, bool reconf)
+static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
+ struct inode *inode, __u32 *gen, bool reconf)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -3369,10 +3412,10 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
LASSERT(lustre_handle_is_used(lockh));
lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
+ LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
+ LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d",
inode, PFID(&lli->lli_fid), reconf);
/* in case this is a caching lock and reinstate with new inode */
@@ -3382,12 +3425,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
- * that multi processes may configure the file on the same time. */
+ * that multi processes may configure the file on the same time.
+ */
if (lvb_ready || !reconf) {
rc = -ENODATA;
if (lvb_ready) {
/* layout_gen must be valid if layout lock is not
- * cancelled and stripe has already set */
+ * cancelled and stripe has already set
+ */
*gen = ll_layout_version_get(lli);
rc = 0;
}
@@ -3401,26 +3446,28 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
/* for layout lock, lmm is returned in lock's lvb.
* lvb_data is immutable if the lock is held so it's safe to access it
* without res lock. See the description in ldlm_lock_decref_internal()
- * for the condition to free lvb_data of layout lock */
- if (lock->l_lvb_data != NULL) {
+ * for the condition to free lvb_data of layout lock
+ */
+ if (lock->l_lvb_data) {
rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
lock->l_lvb_data, lock->l_lvb_len);
if (rc >= 0) {
*gen = LL_LAYOUT_GEN_EMPTY;
- if (md.lsm != NULL)
+ if (md.lsm)
*gen = md.lsm->lsm_layout_gen;
rc = 0;
} else {
- CERROR("%s: file "DFID" unpackmd error: %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(&lli->lli_fid), rc);
+ CERROR("%s: file " DFID " unpackmd error: %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), rc);
}
}
if (rc < 0)
goto out;
/* set layout to file. Unlikely this will fail as old layout was
- * surely eliminated */
+ * surely eliminated
+ */
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_SET;
conf.coc_inode = inode;
@@ -3428,7 +3475,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
conf.u.coc_md = &md;
rc = ll_layout_conf(inode, &conf);
- if (md.lsm != NULL)
+ if (md.lsm)
obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
/* refresh layout failed, need to wait */
@@ -3440,9 +3487,9 @@ out:
/* wait for IO to complete if it's still being used. */
if (wait_layout) {
- CDEBUG(D_INODE, "%s: %p/"DFID" wait for layout reconf.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- inode, PFID(&lli->lli_fid));
+ CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ inode, PFID(&lli->lli_fid));
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
@@ -3451,8 +3498,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n",
- PFID(&lli->lli_fid), rc);
+ CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n",
+ PFID(&lli->lli_fid), rc);
}
return rc;
}
@@ -3477,7 +3524,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
struct md_op_data *op_data;
struct lookup_intent it;
struct lustre_handle lockh;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
struct ldlm_enqueue_info einfo = {
.ei_type = LDLM_IBITS,
.ei_mode = LCK_CR,
@@ -3499,7 +3546,8 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
again:
/* mostly layout lock is caching on the local side, so try to match
- * it before grabbing layout lock mutex. */
+ * it before grabbing layout lock mutex.
+ */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
LCK_CR | LCK_CW | LCK_PR | LCK_PW);
if (mode != 0) { /* hit cached lock */
@@ -3512,7 +3560,7 @@ again:
}
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
- 0, 0, LUSTRE_OPC_ANY, NULL);
+ 0, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data)) {
mutex_unlock(&lli->lli_layout_mutex);
return PTR_ERR(op_data);
@@ -3523,14 +3571,13 @@ again:
it.it_op = IT_LAYOUT;
lockh.cookie = 0ULL;
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/"DFID".\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
PFID(&lli->lli_fid));
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
- if (it.d.lustre.it_data != NULL)
- ptlrpc_req_finished(it.d.lustre.it_data);
+ ptlrpc_req_finished(it.d.lustre.it_data);
it.d.lustre.it_data = NULL;
ll_finish_md_op_data(op_data);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 3f348a3aad43..a55ac4dccd90 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -52,9 +52,8 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage,
- &club->cob_pending_list);
+ if (page && list_empty(&page->cpg_pending_linkage))
+ list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
spin_unlock(&lli->lli_lock);
}
@@ -65,7 +64,7 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
int rc = 0;
spin_lock(&lli->lli_lock);
- if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+ if (page && !list_empty(&page->cpg_pending_linkage)) {
list_del_init(&page->cpg_pending_linkage);
rc = 1;
}
@@ -76,7 +75,8 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
/** Queues DONE_WRITING if
* - done writing is allowed;
- * - inode has no no dirty pages; */
+ * - inode has no no dirty pages;
+ */
void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -106,7 +106,8 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
* close() happen, epoch is closed as the inode is marked as
* LLIF_EPOCH_PENDING. When pages are written inode should not
* be inserted into the queue again, clear this flag to avoid
- * it. */
+ * it.
+ */
lli->lli_flags &= ~LLIF_DONE_WRITING;
wake_up(&lcq->lcq_waitq);
@@ -144,10 +145,11 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
spin_lock(&lli->lli_lock);
if (!(list_empty(&club->cob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
- LASSERT(*och != NULL);
- LASSERT(lli->lli_pending_och == NULL);
+ LASSERT(*och);
+ LASSERT(!lli->lli_pending_och);
/* Inode is dirty and there is no pending write done
- * request yet, DONE_WRITE is to be sent later. */
+ * request yet, DONE_WRITE is to be sent later.
+ */
lli->lli_flags |= LLIF_EPOCH_PENDING;
lli->lli_pending_och = *och;
spin_unlock(&lli->lli_lock);
@@ -159,7 +161,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
if (flags & LLIF_DONE_WRITING) {
/* Some pages are still dirty, it is early to send
* DONE_WRITE. Wait until all pages will be flushed
- * and try DONE_WRITE again later. */
+ * and try DONE_WRITE again later.
+ */
LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
lli->lli_flags |= LLIF_DONE_WRITING;
spin_unlock(&lli->lli_lock);
@@ -187,7 +190,8 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
}
/* There is a pending DONE_WRITE -- close epoch with no
- * attribute change. */
+ * attribute change.
+ */
if (lli->lli_flags & LLIF_EPOCH_PENDING) {
spin_unlock(&lli->lli_lock);
goto out;
@@ -215,13 +219,13 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
struct obdo *oa;
int rc;
- LASSERT(op_data != NULL);
+ LASSERT(op_data);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
inode->i_ino, inode->i_generation,
lli->lli_flags);
- oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
+ oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
CERROR("can't allocate memory for Size-on-MDS update.\n");
return -ENOMEM;
@@ -266,7 +270,7 @@ static void ll_prepare_done_writing(struct inode *inode,
{
ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
/* If there is no @och, we do not do D_W yet. */
- if (*och == NULL)
+ if (!*och)
return;
ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
@@ -289,13 +293,14 @@ static void ll_done_writing(struct inode *inode)
ll_prepare_done_writing(inode, op_data, &och);
/* If there is no @och, we do not do D_W yet. */
- if (och == NULL)
+ if (!och)
goto out;
rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
if (rc == -EAGAIN)
/* MDS has instructed us to obtain Size-on-MDS attribute from
- * OSTs and send setattr to back to MDS. */
+ * OSTs and send setattr to back to MDS.
+ */
rc = ll_som_update(inode, op_data);
else if (rc)
CERROR("inode %lu mdc done_writing failed: rc = %d\n",
@@ -316,7 +321,7 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
if (!list_empty(&lcq->lcq_head)) {
lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
+ lli_close_list);
list_del_init(&lli->lli_close_list);
} else if (atomic_read(&lcq->lcq_stop))
lli = ERR_PTR(-EALREADY);
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 845e992ca5fc..e3c0f1dd4d31 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -93,9 +93,10 @@ struct ll_remote_perm {
gid_t lrp_gid;
uid_t lrp_fsuid;
gid_t lrp_fsgid;
- int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
- is access permission with
- lrp_fsuid/lrp_fsgid. */
+ int lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
+ * is access permission with
+ * lrp_fsuid/lrp_fsgid.
+ */
};
enum lli_flags {
@@ -106,7 +107,8 @@ enum lli_flags {
/* DONE WRITING is allowed. */
LLIF_DONE_WRITING = (1 << 2),
/* Sizeon-on-MDS attributes are changed. An attribute update needs to
- * be sent to MDS. */
+ * be sent to MDS.
+ */
LLIF_SOM_DIRTY = (1 << 3),
/* File data is modified. */
LLIF_DATA_MODIFIED = (1 << 4),
@@ -130,22 +132,23 @@ struct ll_inode_info {
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
/* Parent fid for accessing default stripe data on parent directory
- * for allocating OST objects after a mknod() and later open-by-FID. */
+ * for allocating OST objects after a mknod() and later open-by-FID.
+ */
struct lu_fid lli_pfid;
- struct list_head lli_close_list;
- /* open count currently used by capability only, indicate whether
- * capability needs renewal */
- atomic_t lli_open_count;
+ struct list_head lli_close_list;
+
unsigned long lli_rmtperm_time;
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
- * the inode state on the MDS. XXX: recovery is not ready yet. */
+ * the inode state on the MDS. XXX: recovery is not ready yet.
+ */
struct obd_client_handle *lli_pending_och;
/* We need all three because every inode may be opened in different
- * modes */
+ * modes
+ */
struct obd_client_handle *lli_mds_read_och;
struct obd_client_handle *lli_mds_write_och;
struct obd_client_handle *lli_mds_exec_och;
@@ -162,7 +165,8 @@ struct ll_inode_info {
spinlock_t lli_agl_lock;
/* Try to make the d::member and f::member are aligned. Before using
- * these members, make clear whether it is directory or not. */
+ * these members, make clear whether it is directory or not.
+ */
union {
/* for directory */
struct {
@@ -173,13 +177,15 @@ struct ll_inode_info {
/* since parent-child threads can share the same @file
* struct, "opendir_key" is the token when dir close for
* case of parent exit before child -- it is me should
- * cleanup the dir readahead. */
+ * cleanup the dir readahead.
+ */
void *d_opendir_key;
struct ll_statahead_info *d_sai;
/* protect statahead stuff. */
spinlock_t d_sa_lock;
- /* "opendir_pid" is the token when lookup/revalid
- * -- I am the owner of dir statahead. */
+ /* "opendir_pid" is the token when lookup/revalidate
+ * -- I am the owner of dir statahead.
+ */
pid_t d_opendir_pid;
} d;
@@ -281,11 +287,8 @@ static inline void ll_layout_version_set(struct ll_inode_info *lli, __u32 gen)
int ll_xattr_cache_destroy(struct inode *inode);
-int ll_xattr_cache_get(struct inode *inode,
- const char *name,
- char *buffer,
- size_t size,
- __u64 valid);
+int ll_xattr_cache_get(struct inode *inode, const char *name,
+ char *buffer, size_t size, __u64 valid);
/*
* Locking to guarantee consistency of non-atomic updates to long long i_size,
@@ -305,11 +308,12 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
}
/* default to about 40meg of readahead on a given system. That much tied
- * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */
-#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
+ * up in 512k readahead requests serviced at 40ms each is about 1GB/s.
+ */
+#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
-#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
+#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
@@ -344,11 +348,13 @@ struct ra_io_arg {
unsigned long ria_end; /* end offset of read-ahead*/
/* If stride read pattern is detected, ria_stoff means where
* stride read is started. Note: for normal read-ahead, the
- * value here is meaningless, and also it will not be accessed*/
+ * value here is meaningless, and also it will not be accessed
+ */
pgoff_t ria_stoff;
/* ria_length and ria_pages are the length and pages length in the
* stride I/O mode. And they will also be used to check whether
- * it is stride I/O read-ahead in the read-ahead pages*/
+ * it is stride I/O read-ahead in the read-ahead pages
+ */
unsigned long ria_length;
unsigned long ria_pages;
};
@@ -455,7 +461,8 @@ struct eacl_table {
struct ll_sb_info {
/* this protects pglist and ra_info. It isn't safe to
- * grab from interrupt contexts */
+ * grab from interrupt contexts
+ */
spinlock_t ll_lock;
spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
spinlock_t ll_process_lock; /* ll_rw_process_info */
@@ -468,10 +475,8 @@ struct ll_sb_info {
int ll_flags;
unsigned int ll_umounting:1,
ll_xattr_cache_enabled:1;
- struct list_head ll_conn_chain; /* per-conn chain of SBs */
struct lustre_client_ocd ll_lco;
- struct list_head ll_orphan_dentry_list; /*please don't ask -p*/
struct ll_close_queue *ll_lcq;
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
@@ -502,13 +507,16 @@ struct ll_sb_info {
/* metadata stat-ahead */
unsigned int ll_sa_max; /* max statahead RPCs */
atomic_t ll_sa_total; /* statahead thread started
- * count */
+ * count
+ */
atomic_t ll_sa_wrong; /* statahead thread stopped for
- * low hit ratio */
+ * low hit ratio
+ */
atomic_t ll_agl_total; /* AGL thread started count */
- dev_t ll_sdev_orig; /* save s_dev before assign for
- * clustered nfs */
+ dev_t ll_sdev_orig; /* save s_dev before assign for
+ * clustered nfs
+ */
struct rmtacl_ctl_table ll_rct;
struct eacl_table ll_et;
__kernel_fsid_t ll_fsid;
@@ -619,13 +627,15 @@ struct ll_file_data {
__u32 fd_flags;
fmode_t fd_omode;
/* openhandle if lease exists for this file.
- * Borrow lli->lli_och_mutex to protect assignment */
+ * Borrow lli->lli_och_mutex to protect assignment
+ */
struct obd_client_handle *fd_lease_och;
struct obd_client_handle *fd_och;
struct file *fd_file;
/* Indicate whether need to report failure when close.
* true: failure is known, not report again.
- * false: unknown failure, should report. */
+ * false: unknown failure, should report.
+ */
bool fd_write_failed;
};
@@ -647,7 +657,7 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#if BITS_PER_LONG == 32
return 1;
#elif defined(CONFIG_COMPAT)
- return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API));
+ return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API));
#else
return unlikely(sbi->ll_flags & LL_SBI_32BIT_API);
#endif
@@ -705,10 +715,10 @@ extern struct file_operations ll_file_operations_flock;
extern struct file_operations ll_file_operations_noflock;
extern const struct inode_operations ll_file_inode_operations;
int ll_have_md_lock(struct inode *inode, __u64 *bits,
- ldlm_mode_t l_req_mode);
-ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
- struct lustre_handle *lockh, __u64 flags,
- ldlm_mode_t mode);
+ enum ldlm_mode l_req_mode);
+enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
+ struct lustre_handle *lockh, __u64 flags,
+ enum ldlm_mode mode);
int ll_file_open(struct inode *inode, struct file *file);
int ll_file_release(struct inode *inode, struct file *file);
int ll_glimpse_ioctl(struct ll_sb_info *sbi,
@@ -782,7 +792,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry);
void ll_dirty_page_discard_warn(struct page *page, int ioret);
int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct super_block *, struct lookup_intent *);
-int ll_obd_statfs(struct inode *inode, void *arg);
+int ll_obd_statfs(struct inode *inode, void __user *arg);
int ll_get_max_mdsize(struct ll_sb_info *sbi, int *max_mdsize);
int ll_get_default_mdsize(struct ll_sb_info *sbi, int *default_mdsize);
int ll_process_config(struct lustre_cfg *lcfg);
@@ -796,7 +806,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req);
/* llite/llite_nfs.c */
-extern struct export_operations lustre_export_operations;
+extern const struct export_operations lustre_export_operations;
__u32 get_uuid2int(const char *name, int len);
void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
struct inode *search_inode_for_lustre(struct super_block *sb,
@@ -913,7 +923,7 @@ static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
struct vvp_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &vvp_key);
- LASSERT(info != NULL);
+ LASSERT(info);
return info;
}
@@ -937,7 +947,7 @@ static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
struct vvp_session *ses;
ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses != NULL);
+ LASSERT(ses);
return ses;
}
@@ -957,21 +967,21 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret);
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr, size_t count);
+void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma,
+ unsigned long addr, size_t count);
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count);
static inline void ll_invalidate_page(struct page *vmpage)
{
struct address_space *mapping = vmpage->mapping;
- loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
+ loff_t offset = vmpage->index << PAGE_SHIFT;
LASSERT(PageLocked(vmpage));
- if (mapping == NULL)
+ if (!mapping)
return;
- ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
@@ -993,7 +1003,7 @@ static inline struct client_obd *sbi2mdc(struct ll_sb_info *sbi)
{
struct obd_device *obd = sbi->ll_md_exp->exp_obd;
- if (obd == NULL)
+ if (!obd)
LBUG();
return &obd->u.cli;
}
@@ -1018,7 +1028,7 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode)
{
struct lu_fid *fid;
- LASSERT(inode != NULL);
+ LASSERT(inode);
fid = &ll_i2info(inode)->lli_fid;
return fid;
@@ -1107,39 +1117,44 @@ static inline u64 rce_ops2valid(int ops)
struct ll_statahead_info {
struct inode *sai_inode;
atomic_t sai_refcount; /* when access this struct, hold
- * refcount */
+ * refcount
+ */
unsigned int sai_generation; /* generation for statahead */
unsigned int sai_max; /* max ahead of lookup */
__u64 sai_sent; /* stat requests sent count */
__u64 sai_replied; /* stat requests which received
- * reply */
+ * reply
+ */
__u64 sai_index; /* index of statahead entry */
__u64 sai_index_wait; /* index of entry which is the
- * caller is waiting for */
+ * caller is waiting for
+ */
__u64 sai_hit; /* hit count */
__u64 sai_miss; /* miss count:
- * for "ls -al" case, it includes
- * hidden dentry miss;
- * for "ls -l" case, it does not
- * include hidden dentry miss.
- * "sai_miss_hidden" is used for
- * the later case.
- */
+ * for "ls -al" case, it includes
+ * hidden dentry miss;
+ * for "ls -l" case, it does not
+ * include hidden dentry miss.
+ * "sai_miss_hidden" is used for
+ * the later case.
+ */
unsigned int sai_consecutive_miss; /* consecutive miss */
unsigned int sai_miss_hidden;/* "ls -al", but first dentry
- * is not a hidden one */
+ * is not a hidden one
+ */
unsigned int sai_skip_hidden;/* skipped hidden dentry count */
unsigned int sai_ls_all:1, /* "ls -al", do stat-ahead for
- * hidden entries */
+ * hidden entries
+ */
sai_agl_valid:1;/* AGL is valid for the dir */
- wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
+ wait_queue_head_t sai_waitq; /* stat-ahead wait queue */
struct ptlrpc_thread sai_thread; /* stat-ahead thread */
struct ptlrpc_thread sai_agl_thread; /* AGL thread */
- struct list_head sai_entries; /* entry list */
- struct list_head sai_entries_received; /* entries returned */
- struct list_head sai_entries_stated; /* entries stated */
- struct list_head sai_entries_agl; /* AGL entries to be sent */
- struct list_head sai_cache[LL_SA_CACHE_SIZE];
+ struct list_head sai_entries; /* entry list */
+ struct list_head sai_entries_received; /* entries returned */
+ struct list_head sai_entries_stated; /* entries stated */
+ struct list_head sai_entries_agl; /* AGL entries to be sent */
+ struct list_head sai_cache[LL_SA_CACHE_SIZE];
spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
atomic_t sai_cache_count; /* entry count in cache */
};
@@ -1171,8 +1186,8 @@ ll_statahead_mark(struct inode *dir, struct dentry *dentry)
if (lli->lli_opendir_pid != current_pid())
return;
- LASSERT(ldd != NULL);
- if (sai != NULL)
+ LASSERT(ldd);
+ if (sai)
ldd->lld_sa_generation = sai->sai_generation;
}
@@ -1191,7 +1206,7 @@ d_need_statahead(struct inode *dir, struct dentry *dentryp)
return -EAGAIN;
/* statahead has been stopped */
- if (lli->lli_opendir_key == NULL)
+ if (!lli->lli_opendir_key)
return -EAGAIN;
ldd = ll_d2d(dentryp);
@@ -1313,13 +1328,15 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
/** direct write pages */
struct ll_dio_pages {
/** page array to be written. we don't support
- * partial pages except the last one. */
+ * partial pages except the last one.
+ */
struct page **ldp_pages;
/* offset of each page */
loff_t *ldp_offsets;
/** if ldp_offsets is NULL, it means a sequential
* pages to be written, then this is the file offset
- * of the * first page. */
+ * of the first page.
+ */
loff_t ldp_start_offset;
/** how many bytes are to be written. */
size_t ldp_size;
@@ -1345,7 +1362,6 @@ static inline int ll_file_nolock(const struct file *file)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct inode *inode = file_inode(file);
- LASSERT(fd != NULL);
return ((fd->fd_flags & LL_FILE_IGNORE_LOCK) ||
(ll_i2sbi(inode)->ll_flags & LL_SBI_NOLCK));
}
@@ -1362,7 +1378,8 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
* remote MDT, where the object is, will grant
* UPDATE|PERM lock. The inode will be attached to both
* LOOKUP and PERM locks, so revoking either locks will
- * case the dcache being cleared */
+ * case the dcache being cleared
+ */
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
@@ -1383,7 +1400,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
it->d.lustre.it_lock_set = 1;
}
- if (bits != NULL)
+ if (bits)
*bits = it->d.lustre.it_lock_bits;
}
@@ -1401,14 +1418,14 @@ static inline int d_lustre_invalid(const struct dentry *dentry)
{
struct ll_dentry_data *lld = ll_d2d(dentry);
- return (lld == NULL) || lld->lld_invalid;
+ return !lld || lld->lld_invalid;
}
static inline void __d_lustre_invalidate(struct dentry *dentry)
{
struct ll_dentry_data *lld = ll_d2d(dentry);
- if (lld != NULL)
+ if (lld)
lld->lld_invalid = 1;
}
@@ -1442,7 +1459,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
static inline void d_lustre_revalidate(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
- LASSERT(ll_d2d(dentry) != NULL);
+ LASSERT(ll_d2d(dentry));
ll_d2d(dentry)->lld_invalid = 0;
spin_unlock(&dentry->d_lock);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b2fc5b3786ee..b57a992688a8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
+ if (pages >> (20 - PAGE_SHIFT) < 512)
lru_page_max = pages / 2;
else
lru_page_max = (pages / 4) * 3;
@@ -102,8 +102,6 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
sbi->ll_ra_info.ra_max_read_ahead_whole_pages =
SBI_DEFAULT_READAHEAD_WHOLE_MAX;
- INIT_LIST_HEAD(&sbi->ll_conn_chain);
- INIT_LIST_HEAD(&sbi->ll_orphan_dentry_list);
ll_generate_random_uuid(uuid);
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
@@ -171,7 +169,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
return -ENOMEM;
}
- if (llite_root != NULL) {
+ if (llite_root) {
err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
if (err < 0)
CERROR("could not register mount in <debugfs>/lustre/llite\n");
@@ -204,7 +202,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (OBD_FAIL_CHECK(OBD_FAIL_MDC_LIGHTWEIGHT))
/* flag mdc connection as lightweight, only used for test
- * purpose, use with care */
+ * purpose, use with care
+ */
data->ocd_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
data->ocd_ibits_known = MDS_INODELOCK_FULL;
@@ -252,10 +251,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* For mount, we only need fs info from MDT0, and also in DNE, it
* can make sure the client can be mounted as long as MDT0 is
- * available */
+ * available
+ */
err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
- OBD_STATFS_FOR_MDT0);
+ cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ OBD_STATFS_FOR_MDT0);
if (err)
goto out_md_fid;
@@ -265,18 +265,19 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
* we can access the MDC export directly and exp_connect_flags will
* be non-zero, but if accessing an upgraded 2.1 server it will
* have the correct flags filled in.
- * XXX: fill in the LMV exp_connect_flags from MDC(s). */
+ * XXX: fill in the LMV exp_connect_flags from MDC(s).
+ */
valid = exp_connect_flags(sbi->ll_md_exp) & CLIENT_CONNECT_MDT_REQD;
if (exp_connect_flags(sbi->ll_md_exp) != 0 &&
valid != CLIENT_CONNECT_MDT_REQD) {
char *buf;
- buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto out_md_fid;
}
- obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
+ obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
@@ -308,15 +309,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
}
if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
-#ifdef MS_POSIXACL
sb->s_flags |= MS_POSIXACL;
-#endif
sbi->ll_flags |= LL_SBI_ACL;
} else {
LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
-#ifdef MS_POSIXACL
sb->s_flags &= ~MS_POSIXACL;
-#endif
sbi->ll_flags &= ~LL_SBI_ACL;
}
@@ -338,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
sbi->ll_md_brw_size = data->ocd_brw_size;
else
- sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
+ sbi->ll_md_brw_size = PAGE_SIZE;
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
LCONSOLE_INFO("Layout lock feature supported.\n");
@@ -382,7 +379,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* OBD_CONNECT_CKSUM should always be set, even if checksums are
* disabled by default, because it can still be enabled on the
* fly via /sys. As a consequence, we still need to come to an
- * agreement on the supported algorithms at connect time */
+ * agreement on the supported algorithms at connect time
+ */
data->ocd_connect_flags |= OBD_CONNECT_CKSUM;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CKSUM_ADLER_ONLY))
@@ -453,7 +451,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
#endif
/* make root inode
- * XXX: move this to after cbd setup? */
+ * XXX: move this to after cbd setup?
+ */
valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
valid |= OBD_MD_FLRMTPERM;
@@ -493,7 +492,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
md_free_lustre_md(sbi->ll_md_exp, &lmd);
ptlrpc_req_finished(request);
- if (root == NULL || IS_ERR(root)) {
+ if (!(root)) {
if (lmd.lsm)
obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
#ifdef CONFIG_FS_POSIX_ACL
@@ -502,8 +501,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
lmd.posix_acl = NULL;
}
#endif
- err = IS_ERR(root) ? PTR_ERR(root) : -EBADF;
- root = NULL;
+ err = -EBADF;
CERROR("lustre_lite: bad iget4 for root\n");
goto out_root;
}
@@ -532,9 +530,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
&sbi->ll_cache, NULL);
sb->s_root = d_make_root(root);
- if (sb->s_root == NULL) {
+ if (!sb->s_root) {
CERROR("%s: can't make root dentry\n",
- ll_get_fsname(sb, NULL, 0));
+ ll_get_fsname(sb, NULL, 0));
err = -ENOMEM;
goto out_lock_cn_cb;
}
@@ -543,11 +541,13 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* We set sb->s_dev equal on all lustre clients in order to support
* NFS export clustering. NFSD requires that the FSID be the same
- * on all clients. */
+ * on all clients.
+ */
/* s_dev is also used in lt_compare() to compare two fs, but that is
- * only a node-local comparison. */
+ * only a node-local comparison.
+ */
uuid = obd_get_uuid(sbi->ll_md_exp);
- if (uuid != NULL) {
+ if (uuid) {
sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
}
@@ -597,7 +597,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
size = sizeof(int);
rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
- KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
+ KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
if (rc)
CERROR("Get default mdsize error rc %d\n", rc);
@@ -619,13 +619,12 @@ static void client_common_put_super(struct super_block *sb)
cl_sb_fini(sb);
- list_del(&sbi->ll_conn_chain);
-
obd_fid_fini(sbi->ll_dt_exp->exp_obd);
obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL;
/* wait till all OSCs are gone, since cl_cache is accessing sbi.
- * see LU-2543. */
+ * see LU-2543.
+ */
obd_zombie_barrier();
ldebugfs_unregister_mountpoint(sbi);
@@ -646,7 +645,8 @@ void ll_kill_super(struct super_block *sb)
sbi = ll_s2sbi(sb);
/* we need to restore s_dev from changed for clustered NFS before
* put_super because new kernels have cached s_dev and change sb->s_dev
- * in put_super not affected real removing devices */
+ * in put_super not affected real removing devices
+ */
if (sbi) {
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
@@ -777,7 +777,7 @@ static int ll_options(char *options, int *flags)
next:
/* Find next opt */
s2 = strchr(s1, ',');
- if (s2 == NULL)
+ if (!s2)
break;
s1 = s2 + 1;
}
@@ -797,7 +797,6 @@ void ll_lli_init(struct ll_inode_info *lli)
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
INIT_LIST_HEAD(&lli->lli_close_list);
- atomic_set(&lli->lli_open_count, 0);
lli->lli_rmtperm_time = 0;
lli->lli_pending_och = NULL;
lli->lli_mds_read_och = NULL;
@@ -890,8 +889,9 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
sb->s_d_op = &ll_d_ops;
/* Generate a string unique to this super, in case some joker tries
- to mount the same fs at two mount points.
- Use the address of the super itself.*/
+ * to mount the same fs at two mount points.
+ * Use the address of the super itself.
+ */
cfg->cfg_instance = sb;
cfg->cfg_uuid = lsi->lsi_llsbi->ll_sb_uuid;
cfg->cfg_callback = class_config_llog_handler;
@@ -904,7 +904,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
- if (lprof == NULL) {
+ if (!lprof) {
LCONSOLE_ERROR_MSG(0x156, "The client profile '%s' could not be read from the MGS. Does that filesystem exist?\n",
profilenm);
err = -EINVAL;
@@ -964,7 +964,8 @@ void ll_put_super(struct super_block *sb)
}
/* We need to set force before the lov_disconnect in
- lustre_common_put_super, since l_d cleans up osc's as well. */
+ * lustre_common_put_super, since l_d cleans up osc's as well.
+ */
if (force) {
next = 0;
while ((obd = class_devices_in_group(&sbi->ll_sb_uuid,
@@ -1036,8 +1037,8 @@ void ll_clear_inode(struct inode *inode)
if (S_ISDIR(inode->i_mode)) {
/* these should have been cleared in ll_file_release */
- LASSERT(lli->lli_opendir_key == NULL);
- LASSERT(lli->lli_sai == NULL);
+ LASSERT(!lli->lli_opendir_key);
+ LASSERT(!lli->lli_sai);
LASSERT(lli->lli_opendir_pid == 0);
}
@@ -1065,7 +1066,7 @@ void ll_clear_inode(struct inode *inode)
ll_xattr_cache_destroy(inode);
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
- LASSERT(lli->lli_posix_acl == NULL);
+ LASSERT(!lli->lli_posix_acl);
if (lli->lli_remote_perms) {
free_rmtperm_hash(lli->lli_remote_perms);
lli->lli_remote_perms = NULL;
@@ -1074,7 +1075,7 @@ void ll_clear_inode(struct inode *inode)
#ifdef CONFIG_FS_POSIX_ACL
else if (lli->lli_posix_acl) {
LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
- LASSERT(lli->lli_remote_perms == NULL);
+ LASSERT(!lli->lli_remote_perms);
posix_acl_release(lli->lli_posix_acl);
lli->lli_posix_acl = NULL;
}
@@ -1095,7 +1096,7 @@ void ll_clear_inode(struct inode *inode)
#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
- struct md_open_data **mod)
+ struct md_open_data **mod)
{
struct lustre_md md;
struct inode *inode = d_inode(dentry);
@@ -1115,7 +1116,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
if (rc == -ENOENT) {
clear_nlink(inode);
/* Unlinked special device node? Or just a race?
- * Pretend we done everything. */
+ * Pretend we did everything.
+ */
if (!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) {
ia_valid = op_data->op_attr.ia_valid;
@@ -1138,7 +1140,8 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
ia_valid = op_data->op_attr.ia_valid;
/* inode size will be in cl_setattr_ost, can't do it now since dirty
- * cache is not cleared yet. */
+ * cache is not cleared yet.
+ */
op_data->op_attr.ia_valid &= ~(TIMES_SET_FLAGS | ATTR_SIZE);
rc = simple_setattr(dentry, &op_data->op_attr);
op_data->op_attr.ia_valid = ia_valid;
@@ -1161,7 +1164,6 @@ static int ll_setattr_done_writing(struct inode *inode,
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
- LASSERT(op_data != NULL);
if (!S_ISREG(inode->i_mode))
return 0;
@@ -1175,7 +1177,8 @@ static int ll_setattr_done_writing(struct inode *inode,
rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
if (rc == -EAGAIN)
/* MDS has instructed us to obtain Size-on-MDS attribute
- * from OSTs and send setattr to back to MDS. */
+ * from OSTs and send setattr to back to MDS.
+ */
rc = ll_som_update(inode, op_data);
else if (rc)
CERROR("inode %lu mdc truncate failed: rc = %d\n",
@@ -1208,11 +1211,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
int rc = 0, rc1 = 0;
CDEBUG(D_VFSTRACE,
- "%s: setattr inode %p/fid:"DFID
- " from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid, hsm_import);
+ "%s: setattr inode %p/fid:" DFID
+ " from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), inode,
+ PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
+ attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1222,7 +1225,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
/* The maximum Lustre file size is variable, based on the
* OST maximum object size and number of stripes. This
- * needs another check in addition to the VFS check above. */
+ * needs another check in addition to the VFS check above.
+ */
if (attr->ia_size > ll_file_maxbytes(inode)) {
CDEBUG(D_INODE, "file "DFID" too large %llu > %llu\n",
PFID(&lli->lli_fid), attr->ia_size,
@@ -1270,7 +1274,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
}
/* We always do an MDS RPC, even if we're only changing the size;
- * only the MDS knows whether truncate() should fail with -ETXTBUSY */
+ * only the MDS knows whether truncate() should fail with -ETXTBUSY
+ */
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
if (!op_data)
@@ -1304,7 +1309,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
/* if not in HSM import mode, clear size attr for released file
* we clear the attribute send to MDT in op_data, not the original
* received from caller in attr which is used later to
- * decide return code */
+ * decide return code
+ */
if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
op_data->op_attr.ia_valid &= ~ATTR_SIZE;
@@ -1322,7 +1328,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
}
/* RPC to MDT is sent, cancel data modification flag */
- if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
+ if (op_data->op_bias & MDS_DATA_MODIFIED) {
spin_lock(&lli->lli_lock);
lli->lli_flags &= ~LLIF_DATA_MODIFIED;
spin_unlock(&lli->lli_lock);
@@ -1342,7 +1348,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
* extent lock (new_size:EOF for truncate). It may seem
* excessive to send mtime/atime updates to OSTs when not
* setting times to past, but it is necessary due to possible
- * time de-synchronization between MDT inode and OST objects */
+ * time de-synchronization between MDT inode and OST objects
+ */
if (attr->ia_valid & ATTR_SIZE)
down_write(&lli->lli_trunc_sem);
rc = cl_setattr_ost(inode, attr);
@@ -1470,7 +1477,8 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs)
/* We need to downshift for all 32-bit kernels, because we can't
* tell if the kernel is being called via sys_statfs64() or not.
* Stop before overflowing f_bsize - in which case it is better
- * to just risk EOVERFLOW if caller is using old sys_statfs(). */
+ * to just risk EOVERFLOW if caller is using old sys_statfs().
+ */
if (sizeof(long) < 8) {
while (osfs.os_blocks > ~0UL && sfs->f_bsize < 0x40000000) {
sfs->f_bsize <<= 1;
@@ -1514,7 +1522,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
struct ll_sb_info *sbi = ll_i2sbi(inode);
LASSERT((lsm != NULL) == ((body->valid & OBD_MD_FLEASIZE) != 0));
- if (lsm != NULL) {
+ if (lsm) {
if (!lli->lli_has_smd &&
!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
cl_file_inode_init(inode, md);
@@ -1599,12 +1607,13 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (exp_connect_som(ll_i2mdexp(inode)) &&
S_ISREG(inode->i_mode)) {
struct lustre_handle lockh;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
/* As it is possible a blocking ast has been processed
* by this time, we need to check there is an UPDATE
* lock on the client and set LLIF_MDS_SIZE_LOCK holding
- * it. */
+ * it.
+ */
mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
&lockh, LDLM_FL_CBPENDING,
LCK_CR | LCK_CW |
@@ -1617,7 +1626,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
inode->i_ino, lli->lli_flags);
} else {
/* Use old size assignment to avoid
- * deadlock bz14138 & bz14326 */
+ * deadlock bz14138 & bz14326
+ */
i_size_write(inode, body->size);
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
@@ -1627,7 +1637,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
}
} else {
/* Use old size assignment to avoid
- * deadlock bz14138 & bz14326 */
+ * deadlock bz14138 & bz14326
+ */
i_size_write(inode, body->size);
CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
@@ -1657,7 +1668,8 @@ void ll_read_inode2(struct inode *inode, void *opaque)
/* Core attributes from the MDS first. This is a new inode, and
* the VFS doesn't zero times in the core inode so we have to do
* it ourselves. They will be overwritten by either MDS or OST
- * attributes - we just need to make sure they aren't newer. */
+ * attributes - we just need to make sure they aren't newer.
+ */
LTIME_S(inode->i_mtime) = 0;
LTIME_S(inode->i_atime) = 0;
LTIME_S(inode->i_ctime) = 0;
@@ -1689,9 +1701,10 @@ void ll_delete_inode(struct inode *inode)
{
struct cl_inode_info *lli = cl_i2info(inode);
- if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
+ if (S_ISREG(inode->i_mode) && lli->lli_clob)
/* discard all dirty pages before truncating them, required by
- * osc_extent implementation at LU-1030. */
+ * osc_extent implementation at LU-1030.
+ */
cl_sync_file_range(inode, 0, OBD_OBJECT_EOF,
CL_FSYNC_DISCARD, 1);
@@ -1744,14 +1757,14 @@ int ll_iocontrol(struct inode *inode, struct file *file,
ptlrpc_req_finished(req);
- return put_user(flags, (int *)arg);
+ return put_user(flags, (int __user *)arg);
}
case FSFILT_IOC_SETFLAGS: {
struct lov_stripe_md *lsm;
struct obd_info oinfo = { };
struct md_op_data *op_data;
- if (get_user(flags, (int *)arg))
+ if (get_user(flags, (int __user *)arg))
return -EFAULT;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
@@ -1776,8 +1789,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
return 0;
}
- oinfo.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
+ oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oinfo.oi_oa) {
ccc_inode_lsm_put(inode, lsm);
return -ENOMEM;
@@ -1809,7 +1821,7 @@ int ll_flush_ctx(struct inode *inode)
struct ll_sb_info *sbi = ll_i2sbi(inode);
CDEBUG(D_SEC, "flush context for user %d\n",
- from_kuid(&init_user_ns, current_uid()));
+ from_kuid(&init_user_ns, current_uid()));
obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
@@ -1831,7 +1843,7 @@ void ll_umount_begin(struct super_block *sb)
sb->s_count, atomic_read(&sb->s_active));
obd = class_exp2obd(sbi->ll_md_exp);
- if (obd == NULL) {
+ if (!obd) {
CERROR("Invalid MDC connection handle %#llx\n",
sbi->ll_md_exp->exp_handle.h_cookie);
return;
@@ -1839,7 +1851,7 @@ void ll_umount_begin(struct super_block *sb)
obd->obd_force = 1;
obd = class_exp2obd(sbi->ll_dt_exp);
- if (obd == NULL) {
+ if (!obd) {
CERROR("Invalid LOV connection handle %#llx\n",
sbi->ll_dt_exp->exp_handle.h_cookie);
return;
@@ -1920,13 +1932,8 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (!op_data) {
- CWARN("%s: cannot allocate op_data to release open handle for "
- DFID "\n",
- ll_get_fsname(sb, NULL, 0), PFID(&body->fid1));
-
+ if (!op_data)
return;
- }
op_data->op_fid1 = body->fid1;
op_data->op_ioepoch = body->ioepoch;
@@ -1941,7 +1948,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct super_block *sb, struct lookup_intent *it)
{
struct ll_sb_info *sbi = NULL;
- struct lustre_md md;
+ struct lustre_md md = { NULL };
int rc;
LASSERT(*inode || sb);
@@ -1954,7 +1961,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
if (*inode) {
ll_update_inode(*inode, &md);
} else {
- LASSERT(sb != NULL);
+ LASSERT(sb);
/*
* At this point server returns to client's same fid as client
@@ -1965,15 +1972,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
*inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
sbi->ll_flags & LL_SBI_32BIT_API),
&md);
- if (*inode == NULL || IS_ERR(*inode)) {
+ if (!*inode) {
#ifdef CONFIG_FS_POSIX_ACL
if (md.posix_acl) {
posix_acl_release(md.posix_acl);
md.posix_acl = NULL;
}
#endif
- rc = IS_ERR(*inode) ? PTR_ERR(*inode) : -ENOMEM;
- *inode = NULL;
+ rc = -ENOMEM;
CERROR("new_inode -fatal: rc %d\n", rc);
goto out;
}
@@ -1986,14 +1992,15 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
* 1. proc1: mdt returns a lsm but not granting layout
* 2. layout was changed by another client
* 3. proc2: refresh layout and layout lock granted
- * 4. proc1: to apply a stale layout */
- if (it != NULL && it->d.lustre.it_lock_mode != 0) {
+ * 4. proc1: to apply a stale layout
+ */
+ if (it && it->d.lustre.it_lock_mode != 0) {
struct lustre_handle lockh;
struct ldlm_lock *lock;
lockh.cookie = it->d.lustre.it_lock_handle;
lock = ldlm_handle2lock(&lockh);
- LASSERT(lock != NULL);
+ LASSERT(lock);
if (ldlm_has_layout(lock)) {
struct cl_object_conf conf;
@@ -2008,7 +2015,7 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
}
out:
- if (md.lsm != NULL)
+ if (md.lsm)
obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
md_free_lustre_md(sbi->ll_md_exp, &md);
@@ -2019,14 +2026,13 @@ cleanup:
return rc;
}
-int ll_obd_statfs(struct inode *inode, void *arg)
+int ll_obd_statfs(struct inode *inode, void __user *arg)
{
struct ll_sb_info *sbi = NULL;
struct obd_export *exp;
char *buf = NULL;
struct obd_ioctl_data *data = NULL;
__u32 type;
- __u32 flags;
int len = 0, rc;
if (!inode) {
@@ -2069,8 +2075,7 @@ int ll_obd_statfs(struct inode *inode, void *arg)
goto out_statfs;
}
- flags = (type & LL_STATFS_NODELAY) ? OBD_STATFS_NODELAY : 0;
- rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, &flags);
+ rc = obd_iocontrol(IOC_OBD_STATFS, exp, len, buf, NULL);
if (rc)
goto out_statfs;
out_statfs:
@@ -2101,7 +2106,8 @@ int ll_process_config(struct lustre_cfg *lcfg)
LASSERT(s2lsi((struct super_block *)sb)->lsi_lmd->lmd_magic == LMD_MAGIC);
/* Note we have not called client_common_fill_super yet, so
- proc fns must be able to handle that! */
+ * proc fns must be able to handle that!
+ */
rc = class_process_proc_param(PARAM_LLITE, lvars.obd_vars,
lcfg, sb);
if (rc > 0)
@@ -2111,19 +2117,17 @@ int ll_process_config(struct lustre_cfg *lcfg)
/* this function prepares md_op_data hint for passing ot down to MD stack. */
struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
- struct inode *i1, struct inode *i2,
- const char *name, int namelen,
- int mode, __u32 opc, void *data)
+ struct inode *i1, struct inode *i2,
+ const char *name, int namelen,
+ int mode, __u32 opc, void *data)
{
- LASSERT(i1 != NULL);
-
if (namelen > ll_i2sbi(i1)->ll_namelen)
return ERR_PTR(-ENAMETOOLONG);
- if (op_data == NULL)
+ if (!op_data)
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
- if (op_data == NULL)
+ if (!op_data)
return ERR_PTR(-ENOMEM);
ll_i2gids(op_data->op_suppgids, i1, i2);
@@ -2143,8 +2147,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_cap = cfs_curproc_cap_pack();
op_data->op_bias = 0;
op_data->op_cli_flags = 0;
- if ((opc == LUSTRE_OPC_CREATE) && (name != NULL) &&
- filename_is_volatile(name, namelen, NULL))
+ if ((opc == LUSTRE_OPC_CREATE) && name &&
+ filename_is_volatile(name, namelen, NULL))
op_data->op_bias |= MDS_CREATE_VOLATILE;
op_data->op_opc = opc;
op_data->op_mds = 0;
@@ -2152,7 +2156,8 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
/* If the file is being opened after mknod() (normally due to NFS)
* try to use the default stripe data from parent directory for
- * allocating OST objects. Try to pass the parent FID to MDS. */
+ * allocating OST objects. Try to pass the parent FID to MDS.
+ */
if (opc == LUSTRE_OPC_CREATE && i1 == i2 && S_ISREG(i2->i_mode) &&
!ll_i2info(i2)->lli_has_smd) {
struct ll_inode_info *lli = ll_i2info(i2);
@@ -2179,7 +2184,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct ll_sb_info *sbi;
- LASSERT((seq != NULL) && (dentry != NULL));
+ LASSERT(seq && dentry);
sbi = ll_s2sbi(dentry->d_sb);
if (sbi->ll_flags & LL_SBI_NOLCK)
@@ -2221,8 +2226,8 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
if (!obd)
return -ENOENT;
- if (copy_to_user((void *)arg, obd->obd_name,
- strlen(obd->obd_name) + 1))
+ if (copy_to_user((void __user *)arg, obd->obd_name,
+ strlen(obd->obd_name) + 1))
return -EFAULT;
return 0;
@@ -2240,10 +2245,11 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
char *ptr;
int len;
- if (buf == NULL) {
+ if (!buf) {
/* this means the caller wants to use static buffer
* and it doesn't care about race. Usually this is
- * in error reporting path */
+ * in error reporting path
+ */
buf = fsname_static;
buflen = sizeof(fsname_static);
}
@@ -2269,9 +2275,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
/* this can be called inside spin lock so use GFP_ATOMIC. */
buf = (char *)__get_free_page(GFP_ATOMIC);
- if (buf != NULL) {
+ if (buf) {
dentry = d_find_alias(page->mapping->host);
- if (dentry != NULL)
+ if (dentry)
path = dentry_path_raw(dentry, buf, PAGE_SIZE);
}
@@ -2282,9 +2288,9 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
PFID(&obj->cob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
- if (dentry != NULL)
+ if (dentry)
dput(dentry);
- if (buf != NULL)
+ if (buf)
free_page((unsigned long)buf);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index bbae95c9feed..5b484e62ffd0 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -54,11 +54,11 @@
static const struct vm_operations_struct ll_file_vm_ops;
void policy_from_vma(ldlm_policy_data_t *policy,
- struct vm_area_struct *vma, unsigned long addr,
- size_t count)
+ struct vm_area_struct *vma, unsigned long addr,
+ size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
- (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+ (vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
@@ -72,7 +72,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
LASSERT(!down_write_trylock(&mm->mmap_sem));
for (vma = find_vma(mm, addr);
- vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
+ vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
vma->vm_flags & VM_SHARED) {
ret = vma;
@@ -119,13 +119,13 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
*/
env = cl_env_nested_get(nest);
if (IS_ERR(env))
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
*env_ret = env;
io = ccc_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
- LASSERT(io->ci_obj != NULL);
+ LASSERT(io->ci_obj);
fio = &io->u.ci_fault;
fio->ft_index = index;
@@ -136,7 +136,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
* the kernel will not read other pages not covered by ldlm in
* filemap_nopage. we do our readahead in ll_readpage.
*/
- if (ra_flags != NULL)
+ if (ra_flags)
*ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
vma->vm_flags &= ~VM_SEQ_READ;
vma->vm_flags |= VM_RAND_READ;
@@ -151,8 +151,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
LASSERT(cio->cui_cl.cis_io == io);
- /* mmap lock must be MANDATORY it has to cache
- * pages. */
+ /* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY;
cio->cui_fd = fd;
} else {
@@ -178,8 +177,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
struct inode *inode;
struct ll_inode_info *lli;
- LASSERT(vmpage != NULL);
-
io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
if (IS_ERR(io)) {
result = PTR_ERR(io);
@@ -201,7 +198,8 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
/* we grab lli_trunc_sem to exclude truncate case.
* Otherwise, we could add dirty pages into osc cache
- * while truncate is on-going. */
+ * while truncate is on-going.
+ */
inode = ccc_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
@@ -217,12 +215,13 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
struct ll_inode_info *lli = ll_i2info(inode);
lock_page(vmpage);
- if (vmpage->mapping == NULL) {
+ if (!vmpage->mapping) {
unlock_page(vmpage);
/* page was truncated and lock was cancelled, return
* ENODATA so that VM_FAULT_NOPAGE will be returned
- * to handle_mm_fault(). */
+ * to handle_mm_fault().
+ */
if (result == 0)
result = -ENODATA;
} else if (!PageDirty(vmpage)) {
@@ -315,13 +314,14 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
result = cl_io_loop(env, io);
/* ft_flags are only valid if we reached
- * the call to filemap_fault */
+ * the call to filemap_fault
+ */
if (vio->u.fault.fault.ft_flags_valid)
fault_ret = vio->u.fault.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage;
- if (result != 0 && vmpage != NULL) {
- page_cache_release(vmpage);
+ if (result != 0 && vmpage) {
+ put_page(vmpage);
vmf->page = NULL;
}
}
@@ -344,9 +344,10 @@ static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
int result;
sigset_t set;
- /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+ /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
* so that it can be killed by admin but not cause segfault by
- * other signals. */
+ * other signals.
+ */
set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
restart:
@@ -357,9 +358,9 @@ restart:
/* check if this page has been truncated */
lock_page(vmpage);
- if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+ if (unlikely(!vmpage->mapping)) { /* unlucky */
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
vmf->page = NULL;
if (!printed && ++count > 16) {
@@ -447,7 +448,8 @@ static void ll_vm_close(struct vm_area_struct *vma)
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
- * nopage's reference passing to the pte */
+ * nopage's reference passing to the pte
+ */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
{
int rc = -ENOENT;
@@ -455,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
- unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
+ unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
last - first + 1, 0);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 18aab25f9cd9..193aab879709 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -105,7 +105,8 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
return ERR_PTR(rc);
/* Because inode is NULL, ll_prep_md_op_data can not
- * be used here. So we allocate op_data ourselves */
+ * be used here. So we allocate op_data ourselves
+ */
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
if (!op_data)
return ERR_PTR(-ENOMEM);
@@ -141,10 +142,11 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
struct inode *inode;
struct dentry *result;
- CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
if (!fid_is_sane(fid))
return ERR_PTR(-ESTALE);
+ CDEBUG(D_INFO, "Get dentry for fid: " DFID "\n", PFID(fid));
+
inode = search_inode_for_lustre(sb, fid);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -160,7 +162,7 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
* We have to find the parent to tell MDS how to init lov objects.
*/
if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
- parent != NULL) {
+ parent && !fid_is_zero(parent)) {
struct ll_inode_info *lli = ll_i2info(inode);
spin_lock(&lli->lli_lock);
@@ -174,8 +176,6 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
return result;
}
-#define LUSTRE_NFS_FID 0x97
-
/**
* \a connectable - is nfsd will connect himself or this should be done
* at lustre
@@ -188,20 +188,25 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
struct inode *parent)
{
+ int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
- inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
- (int)sizeof(struct lustre_nfs_fid));
+ CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n",
+ inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len);
- if (*plen < sizeof(struct lustre_nfs_fid) / 4)
- return 255;
+ if (*plen < fileid_len) {
+ *plen = fileid_len;
+ return FILEID_INVALID;
+ }
nfs_fid->lnf_child = *ll_inode2fid(inode);
- nfs_fid->lnf_parent = *ll_inode2fid(parent);
- *plen = sizeof(struct lustre_nfs_fid) / 4;
+ if (parent)
+ nfs_fid->lnf_parent = *ll_inode2fid(parent);
+ else
+ fid_zero(&nfs_fid->lnf_parent);
+ *plen = fileid_len;
- return LUSTRE_NFS_FID;
+ return FILEID_LUSTRE;
}
static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
@@ -209,7 +214,8 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
unsigned type)
{
/* It is hack to access lde_fid for comparison with lgd_fid.
- * So the input 'name' must be part of the 'lu_dirent'. */
+ * So the input 'name' must be part of the 'lu_dirent'.
+ */
struct lu_dirent *lde = container_of0(name, struct lu_dirent, lde_name);
struct ll_getname_data *lgd =
container_of(ctx, struct ll_getname_data, ctx);
@@ -259,7 +265,7 @@ static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
{
struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
- if (fh_type != LUSTRE_NFS_FID)
+ if (fh_type != FILEID_LUSTRE)
return ERR_PTR(-EPROTO);
return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
@@ -270,7 +276,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
{
struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
- if (fh_type != LUSTRE_NFS_FID)
+ if (fh_type != FILEID_LUSTRE)
return ERR_PTR(-EPROTO);
return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
@@ -292,8 +298,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
sbi = ll_s2sbi(dir->i_sb);
- CDEBUG(D_INFO, "getting parent for (%lu,"DFID")\n",
- dir->i_ino, PFID(ll_inode2fid(dir)));
+ CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n",
+ dir->i_ino, PFID(ll_inode2fid(dir)));
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc != 0)
@@ -314,8 +320,8 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body->valid & OBD_MD_FLID);
- CDEBUG(D_INFO, "parent for "DFID" is "DFID"\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+ CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
+ PFID(ll_inode2fid(dir)), PFID(&body->fid1));
result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
@@ -323,10 +329,10 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
return result;
}
-struct export_operations lustre_export_operations = {
- .get_parent = ll_get_parent,
- .encode_fh = ll_encode_fh,
- .get_name = ll_get_name,
+const struct export_operations lustre_export_operations = {
+ .get_parent = ll_get_parent,
+ .encode_fh = ll_encode_fh,
+ .get_name = ll_get_name,
.fh_to_dentry = ll_fh_to_dentry,
.fh_to_parent = ll_fh_to_parent,
};
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
index b27c3f2fcd02..8509b07cb5c7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
+++ b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
@@ -98,7 +98,7 @@ static void rce_free(struct rmtacl_ctl_entry *rce)
}
static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
- pid_t key)
+ pid_t key)
{
struct rmtacl_ctl_entry *rce;
struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
@@ -125,12 +125,12 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
struct rmtacl_ctl_entry *rce, *e;
rce = rce_alloc(key, ops);
- if (rce == NULL)
+ if (!rce)
return -ENOMEM;
spin_lock(&rct->rct_lock);
e = __rct_search(rct, key);
- if (unlikely(e != NULL)) {
+ if (unlikely(e)) {
CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
(int)key, ops);
rce_free(e);
@@ -172,7 +172,7 @@ void rct_fini(struct rmtacl_ctl_table *rct)
for (i = 0; i < RCE_HASHES; i++)
while (!list_empty(&rct->rct_entries[i])) {
rce = list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
+ struct rmtacl_ctl_entry, rce_list);
rce_free(rce);
}
spin_unlock(&rct->rct_lock);
@@ -208,12 +208,12 @@ void ee_free(struct eacl_entry *ee)
}
static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
- struct lu_fid *fid, int type)
+ struct lu_fid *fid, int type)
{
struct eacl_entry *ee;
struct list_head *head = &et->et_entries[ee_hashfunc(key)];
- LASSERT(fid != NULL);
+ LASSERT(fid);
list_for_each_entry(ee, head, ee_list)
if (ee->ee_key == key) {
if (lu_fid_eq(&ee->ee_fid, fid) &&
@@ -256,12 +256,12 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
struct eacl_entry *ee, *e;
ee = ee_alloc(key, fid, type, header);
- if (ee == NULL)
+ if (!ee)
return -ENOMEM;
spin_lock(&et->et_lock);
e = __et_search_del(et, key, fid, type);
- if (unlikely(e != NULL)) {
+ if (unlikely(e)) {
CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
(int)key, PFID(fid), type);
ee_free(e);
@@ -290,7 +290,7 @@ void et_fini(struct eacl_table *et)
for (i = 0; i < EE_HASHES; i++)
while (!list_empty(&et->et_entries[i])) {
ee = list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
+ struct eacl_entry, ee_list);
ee_free(ee);
}
spin_unlock(&et->et_lock);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 871924b3f2e7..f169c0db63b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -211,15 +211,14 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
return io->ci_result;
io->ci_lockreq = CILR_NEVER;
- LASSERT(head != NULL);
rw = head->bi_rw;
- for (bio = head; bio != NULL; bio = bio->bi_next) {
+ for (bio = head; bio ; bio = bio->bi_next) {
LASSERT(rw == bio->bi_rw);
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_offset != 0);
- BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
+ BUG_ON(bvec.bv_len != PAGE_SIZE);
pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
@@ -233,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
page_count);
- pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
+ pvec->ldp_size = page_count << PAGE_SHIFT;
pvec->ldp_nr = page_count;
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@@ -297,7 +296,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
spin_lock_irq(&lo->lo_lock);
first = lo->lo_bio;
- if (unlikely(first == NULL)) {
+ if (unlikely(!first)) {
spin_unlock_irq(&lo->lo_lock);
return 0;
}
@@ -308,7 +307,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
rw = first->bi_rw;
bio = &lo->lo_bio;
while (*bio && (*bio)->bi_rw == rw) {
- CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
+ CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n",
(unsigned long long)(*bio)->bi_iter.bi_sector,
(*bio)->bi_iter.bi_size,
page_count, (*bio)->bi_vcnt);
@@ -458,7 +457,7 @@ static int loop_thread(void *data)
total_count, times, total_count / times);
}
- LASSERT(bio != NULL);
+ LASSERT(bio);
LASSERT(count <= atomic_read(&lo->lo_pending));
loop_handle_bio(lo, bio);
atomic_sub(count, &lo->lo_pending);
@@ -508,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
- lo->lo_blocksize = PAGE_CACHE_SIZE;
+ lo->lo_blocksize = PAGE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -526,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
lo->lo_queue->queuedata = lo;
/* queue parameters */
- CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
+ CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
blk_queue_logical_block_size(lo->lo_queue,
- (unsigned short)PAGE_CACHE_SIZE);
+ (unsigned short)PAGE_SIZE);
blk_queue_max_hw_sectors(lo->lo_queue,
- LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
+ LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);
@@ -560,7 +559,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
if (lo->lo_refcnt > count) /* we needed one fd for the ioctl */
return -EBUSY;
- if (filp == NULL)
+ if (!filp)
return -EINVAL;
spin_lock_irq(&lo->lo_lock);
@@ -625,18 +624,18 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LL_IOC_LLOOP_INFO: {
struct lu_fid fid;
- if (lo->lo_backing_file == NULL) {
+ if (!lo->lo_backing_file) {
err = -ENOENT;
break;
}
- if (inode == NULL)
+ if (!inode)
inode = file_inode(lo->lo_backing_file);
if (lo->lo_state == LLOOP_BOUND)
fid = ll_i2info(inode)->lli_fid;
else
fid_zero(&fid);
- if (copy_to_user((struct lu_fid *)arg, &fid, sizeof(fid)))
+ if (copy_to_user((void __user *)arg, &fid, sizeof(fid)))
err = -EFAULT;
break;
}
@@ -676,7 +675,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
if (magic != ll_iocontrol_magic)
return LLIOC_CONT;
- if (disks == NULL) {
+ if (!disks) {
err = -ENODEV;
goto out1;
}
@@ -708,7 +707,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
dev = MKDEV(lloop_major, lo->lo_number);
/* quit if the used pointer is writable */
- if (put_user((long)old_encode_dev(dev), (long *)arg)) {
+ if (put_user((long)old_encode_dev(dev), (long __user *)arg)) {
err = -EFAULT;
goto out;
}
@@ -793,7 +792,7 @@ static int __init lloop_init(void)
lloop_major, max_loop);
ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
- if (ll_iocontrol_magic == NULL)
+ if (!ll_iocontrol_magic)
goto out_mem1;
loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
@@ -872,11 +871,12 @@ static void lloop_exit(void)
kfree(loop_dev);
}
-module_init(lloop_init);
-module_exit(lloop_exit);
-
module_param(max_loop, int, 0444);
MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre virtual block device");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
+
+module_init(lloop_init);
+module_exit(lloop_exit);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index f134ad9d23f0..27ab1261400e 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -43,7 +43,7 @@
#include "llite_internal.h"
#include "vvp_internal.h"
-/* /proc/lustre/llite mount point registration */
+/* debugfs llite mount point registration */
static struct file_operations ll_rw_extents_stats_fops;
static struct file_operations ll_rw_extents_stats_pp_fops;
static struct file_operations ll_rw_offset_stats_fops;
@@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
if (rc)
return rc;
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+ pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
- totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+ totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@@ -345,10 +345,11 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
return rc;
/* Cap this at the current max readahead window size, the readahead
- * algorithm does this anyway so it's pointless to set it larger. */
+ * algorithm does this anyway so it's pointless to set it larger.
+ */
if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
- sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
+ sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
return -ERANGE;
}
@@ -365,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
@@ -404,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -414,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+ totalram_pages >> (20 - PAGE_SHIFT));
return -ERANGE;
}
@@ -453,7 +454,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
if (diff <= 0)
break;
- if (sbi->ll_dt_exp == NULL) { /* being initialized */
+ if (!sbi->ll_dt_exp) { /* being initialized */
rc = -ENODEV;
break;
}
@@ -461,9 +462,9 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_CACHE_LRU_SHRINK),
- KEY_CACHE_LRU_SHRINK,
- sizeof(tmp), &tmp, NULL);
+ sizeof(KEY_CACHE_LRU_SHRINK),
+ KEY_CACHE_LRU_SHRINK,
+ sizeof(tmp), &tmp, NULL);
if (rc < 0)
break;
}
@@ -966,9 +967,9 @@ int ldebugfs_register_mountpoint(struct dentry *parent,
name[MAX_STRING_SIZE] = '\0';
- LASSERT(sbi != NULL);
- LASSERT(mdc != NULL);
- LASSERT(osc != NULL);
+ LASSERT(sbi);
+ LASSERT(mdc);
+ LASSERT(osc);
/* Get fsname */
len = strlen(lsi->lsi_lmd->lmd_profile);
@@ -999,7 +1000,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent,
CWARN("Error adding the extent_stats file\n");
rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
- "extents_stats_per_process",
+ "extents_stats_per_process",
0644, &ll_rw_extents_stats_pp_fops, sbi);
if (rc)
CWARN("Error adding the extents_stats_per_process file\n");
@@ -1012,7 +1013,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent,
/* File operations stats */
sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
LPROCFS_STATS_FLAG_NONE);
- if (sbi->ll_stats == NULL) {
+ if (!sbi->ll_stats) {
err = -ENOMEM;
goto out;
}
@@ -1033,13 +1034,13 @@ int ldebugfs_register_mountpoint(struct dentry *parent,
llite_opcode_table[id].opname, ptr);
}
err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
- sbi->ll_stats);
+ sbi->ll_stats);
if (err)
goto out;
sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
LPROCFS_STATS_FLAG_NONE);
- if (sbi->ll_ra_stats == NULL) {
+ if (!sbi->ll_ra_stats) {
err = -ENOMEM;
goto out;
}
@@ -1049,7 +1050,7 @@ int ldebugfs_register_mountpoint(struct dentry *parent,
ra_stat_string[id], "pages");
err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
- sbi->ll_ra_stats);
+ sbi->ll_ra_stats);
if (err)
goto out;
@@ -1103,7 +1104,7 @@ void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
#define pct(a, b) (b ? a * 100 / b : 0)
static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
- struct seq_file *seq, int which)
+ struct seq_file *seq, int which)
{
unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
unsigned long start, end, r, w;
@@ -1503,5 +1504,5 @@ LPROC_SEQ_FOPS(ll_rw_offset_stats);
void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
{
- lvars->obd_vars = lprocfs_llite_obd_vars;
+ lvars->obd_vars = lprocfs_llite_obd_vars;
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index da5f443a0768..f8f98e4e8258 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -118,16 +118,16 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
ll_read_inode2(inode, md);
if (S_ISREG(inode->i_mode) &&
- ll_i2info(inode)->lli_clob == NULL) {
+ !ll_i2info(inode)->lli_clob) {
CDEBUG(D_INODE,
- "%s: apply lsm %p to inode "DFID".\n",
- ll_get_fsname(sb, NULL, 0), md->lsm,
- PFID(ll_inode2fid(inode)));
+ "%s: apply lsm %p to inode " DFID ".\n",
+ ll_get_fsname(sb, NULL, 0), md->lsm,
+ PFID(ll_inode2fid(inode)));
rc = cl_file_inode_init(inode, md);
}
if (rc != 0) {
iget_failed(inode);
- inode = ERR_PTR(rc);
+ inode = NULL;
} else
unlock_new_inode(inode);
} else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
@@ -180,10 +180,11 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
__u64 bits = lock->l_policy_data.l_inodebits.bits;
/* Inode is set to lock->l_resource->lr_lvb_inode
- * for mdc - bug 24555 */
- LASSERT(lock->l_ast_data == NULL);
+ * for mdc - bug 24555
+ */
+ LASSERT(!lock->l_ast_data);
- if (inode == NULL)
+ if (!inode)
break;
/* Invalidate all dentries associated with this inode */
@@ -202,7 +203,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
/* For OPEN locks we differentiate between lock modes
- * LCK_CR, LCK_CW, LCK_PR - bug 22891 */
+ * LCK_CR, LCK_CW, LCK_PR - bug 22891
+ */
if (bits & MDS_INODELOCK_OPEN)
ll_have_md_lock(inode, &bits, lock->l_req_mode);
@@ -260,7 +262,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
- inode->i_sb->s_root != NULL &&
+ inode->i_sb->s_root &&
!is_root_inode(inode))
ll_invalidate_aliases(inode);
@@ -285,15 +287,11 @@ __u32 ll_i2suppgid(struct inode *i)
/* Pack the required supplementary groups into the supplied groups array.
* If we don't need to use the groups from the target inode(s) then we
* instead pack one or more groups from the user's supplementary group
- * array in case it might be useful. Not needed if doing an MDS-side upcall. */
+ * array in case it might be useful. Not needed if doing an MDS-side upcall.
+ */
void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
{
-#if 0
- int i;
-#endif
-
- LASSERT(i1 != NULL);
- LASSERT(suppgids != NULL);
+ LASSERT(i1);
suppgids[0] = ll_i2suppgid(i1);
@@ -301,22 +299,6 @@ void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
suppgids[1] = ll_i2suppgid(i2);
else
suppgids[1] = -1;
-
-#if 0
- for (i = 0; i < current_ngroups; i++) {
- if (suppgids[0] == -1) {
- if (current_groups[i] != suppgids[1])
- suppgids[0] = current_groups[i];
- continue;
- }
- if (suppgids[1] == -1) {
- if (current_groups[i] != suppgids[0])
- suppgids[1] = current_groups[i];
- continue;
- }
- break;
- }
-#endif
}
/*
@@ -409,7 +391,8 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
int rc = 0;
/* NB 1 request reference will be taken away by ll_intent_lock()
- * when I return */
+ * when I return
+ */
CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
it->d.lustre.it_disposition);
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
@@ -420,13 +403,14 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
/* We used to query real size from OSTs here, but actually
- this is not needed. For stat() calls size would be updated
- from subsequent do_revalidate()->ll_inode_revalidate_it() in
- 2.4 and
- vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
- Everybody else who needs correct file size would call
- ll_glimpse_size or some equivalent themselves anyway.
- Also see bug 7198. */
+ * this is not needed. For stat() calls size would be updated
+ * from subsequent do_revalidate()->ll_inode_revalidate_it() in
+ * 2.4 and
+ * vfs_getattr_it->ll_getattr()->ll_inode_revalidate_it() in 2.6
+ * Everybody else who needs correct file size would call
+ * ll_glimpse_size or some equivalent themselves anyway.
+ * Also see bug 7198.
+ */
}
/* Only hash *de if it is unhashed (new dentry).
@@ -443,9 +427,10 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
*de = alias;
} else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
!it_disposition(it, DISP_OPEN_CREATE)) {
- /* With DISP_OPEN_CREATE dentry will
- instantiated in ll_create_it. */
- LASSERT(d_inode(*de) == NULL);
+ /* With DISP_OPEN_CREATE dentry will be
+ * instantiated in ll_create_it.
+ */
+ LASSERT(!d_inode(*de));
d_instantiate(*de, inode);
}
@@ -498,7 +483,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (d_mountpoint(dentry))
CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
- if (it == NULL || it->it_op == IT_GETXATTR)
+ if (!it || it->it_op == IT_GETXATTR)
it = &lookup_it;
if (it->it_op == IT_GETATTR) {
@@ -557,7 +542,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
out:
if (req)
ptlrpc_req_finished(req);
- if (it->it_op == IT_GETATTR && (retval == NULL || retval == dentry))
+ if (it->it_op == IT_GETATTR && (!retval || retval == dentry))
ll_statahead_mark(parent, dentry);
return retval;
}
@@ -582,7 +567,7 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
itp = &it;
de = ll_lookup_it(parent, dentry, itp, 0);
- if (itp != NULL)
+ if (itp)
ll_intent_release(itp);
return de;
@@ -622,7 +607,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
de = ll_lookup_it(dir, dentry, it, lookup_flags);
if (IS_ERR(de))
rc = PTR_ERR(de);
- else if (de != NULL)
+ else if (de)
dentry = de;
if (!rc) {
@@ -631,7 +616,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
rc = ll_create_it(dir, dentry, mode, it);
if (rc) {
/* We dget in ll_splice_alias. */
- if (de != NULL)
+ if (de)
dput(de);
goto out_release;
}
@@ -655,7 +640,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
/* We dget in ll_splice_alias. finish_open takes
* care of dget for fd open.
*/
- if (de != NULL)
+ if (de)
dput(de);
}
} else {
@@ -693,7 +678,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
/* We asked for a lock on the directory, but were granted a
* lock on the inode. Since we finally have an inode pointer,
- * stuff it in the lock. */
+ * stuff it in the lock.
+ */
CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
inode, inode->i_ino, inode->i_generation);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
@@ -767,7 +753,7 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry,
int tgt_len = 0;
int err;
- if (unlikely(tgt != NULL))
+ if (unlikely(tgt))
tgt_len = strlen(tgt) + 1;
op_data = ll_prep_md_op_data(NULL, dir, NULL,
@@ -888,10 +874,11 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
/* The MDS sent back the EA because we unlinked the last reference
* to this file. Use this EA to unlink the objects on the OST.
* It's opaque so we don't swab here; we leave it to obd_unpackmd() to
- * check it is complete and sensible. */
+ * check it is complete and sensible.
+ */
eadata = req_capsule_server_sized_get(&request->rq_pill, &RMF_MDT_MD,
body->eadatasize);
- LASSERT(eadata != NULL);
+ LASSERT(eadata);
rc = obd_unpackmd(ll_i2dtexp(dir), &lsm, eadata, body->eadatasize);
if (rc < 0) {
@@ -900,8 +887,8 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
}
LASSERT(rc >= sizeof(*lsm));
- oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
- if (oa == NULL) {
+ oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!oa) {
rc = -ENOMEM;
goto out_free_memmd;
}
@@ -917,7 +904,7 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
&RMF_LOGCOOKIES,
sizeof(struct llog_cookie) *
lsm->lsm_stripe_count);
- if (oti.oti_logcookies == NULL) {
+ if (!oti.oti_logcookies) {
oa->o_valid &= ~OBD_MD_FLCOOKIE;
body->valid &= ~OBD_MD_FLCOOKIE;
}
@@ -938,7 +925,8 @@ out:
/* ll_unlink() doesn't update the inode with the new link count.
* Instead, ll_ddelete() and ll_d_iput() will update it based upon if there
* is any lock existing. They will recycle dentries and inodes based upon locks
- * too. b=20433 */
+ * too. b=20433
+ */
static int ll_unlink(struct inode *dir, struct dentry *dentry)
{
struct ptlrpc_request *request = NULL;
@@ -1028,7 +1016,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
dir, 3000, oldname);
err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
- 0, LUSTRE_OPC_SYMLINK);
+ 0, LUSTRE_OPC_SYMLINK);
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index fe4a72268e3a..e9d25317cd28 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -61,7 +61,7 @@ static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
{
struct ll_remote_perm *lrp;
- lrp = kmem_cache_alloc(ll_remote_perm_cachep, GFP_KERNEL | __GFP_ZERO);
+ lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL);
if (lrp)
INIT_HLIST_NODE(&lrp->lrp_list);
return lrp;
@@ -82,7 +82,7 @@ static struct hlist_head *alloc_rmtperm_hash(void)
struct hlist_head *hash;
int i;
- hash = kmem_cache_alloc(ll_rmtperm_hash_cachep, GFP_NOFS | __GFP_ZERO);
+ hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS);
if (!hash)
return NULL;
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index f355474967d6..edab6c5b7e50 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -70,9 +70,9 @@ static void ll_cl_fini(struct ll_cl_context *lcc)
struct cl_page *page = lcc->lcc_page;
LASSERT(lcc->lcc_cookie == current);
- LASSERT(env != NULL);
+ LASSERT(env);
- if (page != NULL) {
+ if (page) {
lu_ref_del(&page->cp_reference, "cl_io", io);
cl_page_put(env, page);
}
@@ -97,7 +97,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
int result = 0;
clob = ll_i2info(vmpage->mapping->host)->lli_clob;
- LASSERT(clob != NULL);
+ LASSERT(clob);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -111,7 +111,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
cio = ccc_env_io(env);
io = cio->cui_cl.cis_io;
- if (io == NULL && create) {
+ if (!io && create) {
struct inode *inode = vmpage->mapping->host;
loff_t pos;
@@ -120,7 +120,8 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
/* this is too bad. Someone is trying to write the
* page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate */
+ * add dirty pages into cache during truncate
+ */
CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
current->comm);
dump_stack();
@@ -145,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
*/
io->ci_lockreq = CILR_NEVER;
- pos = vmpage->index << PAGE_CACHE_SHIFT;
+ pos = vmpage->index << PAGE_SHIFT;
/* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
+ result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
if (result == 0) {
cio->cui_fd = LUSTRE_FPRIVATE(file);
cio->cui_iter = NULL;
@@ -163,12 +164,11 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
}
lcc->lcc_io = io;
- if (io == NULL)
+ if (!io)
result = -EIO;
if (result == 0) {
struct cl_page *page;
- LASSERT(io != NULL);
LASSERT(io->ci_state == CIS_IO_GOING);
LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
page = cl_page_find(env, clob, vmpage->index, vmpage,
@@ -240,7 +240,8 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
ll_cl_fini(lcc);
}
/* returning 0 in prepare assumes commit must be called
- * afterwards */
+ * afterwards
+ */
} else {
result = PTR_ERR(lcc);
}
@@ -296,8 +297,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
* to get an ra budget that is larger than the remaining readahead pages
* and reach here at exactly the same time. They will compute /a ret to
* consume the remaining pages, but will fail at atomic_add_return() and
- * get a zero ra window, although there is still ra space remaining. - Jay */
-
+ * get a zero ra window, although there is still ra space remaining. - Jay
+ */
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
struct ra_io_arg *ria,
unsigned long pages)
@@ -307,7 +308,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
/* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
- * performance a lot. */
+ * performance a lot.
+ */
ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages)) {
ret = 0;
@@ -324,7 +326,8 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
* branch is more expensive than subtracting zero from the result.
*
* Strided read is left unaligned to avoid small fragments beyond
- * the RPC boundary from needing an extra read RPC. */
+ * the RPC boundary from needing an extra read RPC.
+ */
if (ria->ria_pages == 0) {
long beyond_rpc = (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
@@ -364,7 +367,7 @@ void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
#define RAS_CDEBUG(ras) \
CDEBUG(D_READA, \
"lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
- "csr %lu sf %lu sp %lu sl %lu \n", \
+ "csr %lu sf %lu sp %lu sl %lu\n", \
ras->ras_last_readpage, ras->ras_consecutive_requests, \
ras->ras_consecutive_pages, ras->ras_window_start, \
ras->ras_window_len, ras->ras_next_readahead, \
@@ -378,9 +381,9 @@ static int index_in_window(unsigned long index, unsigned long point,
unsigned long start = point - before, end = point + after;
if (start > point)
- start = 0;
+ start = 0;
if (end < point)
- end = ~0;
+ end = ~0;
return start <= index && index <= end;
}
@@ -473,7 +476,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
const char *msg = NULL;
vmpage = grab_cache_page_nowait(mapping, index);
- if (vmpage != NULL) {
+ if (vmpage) {
/* Check if vmpage was truncated or reclaimed */
if (vmpage->mapping == mapping) {
page = cl_page_find(env, clob, vmpage->index,
@@ -495,12 +498,12 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
}
if (rc != 1)
unlock_page(vmpage);
- page_cache_release(vmpage);
+ put_page(vmpage);
} else {
which = RA_STAT_FAILED_GRAB_PAGE;
msg = "g_c_p_n failed";
}
- if (msg != NULL) {
+ if (msg) {
ll_ra_stats_inc(mapping, which);
CDEBUG(D_READA, "%s\n", msg);
}
@@ -515,14 +518,16 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
/* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
* know what the actual RPC size is. If this needs to change, it makes more
* sense to tune the i_blkbits value for the file based on the OSTs it is
- * striped over, rather than having a constant value for all files here. */
+ * striped over, rather than having a constant value for all files here.
+ */
-/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
+/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
- * up quickly which will affect read performance significantly. See LU-2816 */
-#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
+ * up quickly which will affect read performance significantly. See LU-2816
+ */
+#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
@@ -570,7 +575,7 @@ stride_pg_count(pgoff_t st_off, unsigned long st_len, unsigned long st_pgs,
if (end_left > st_pgs)
end_left = st_pgs;
- CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu \n",
+ CDEBUG(D_READA, "start %llu, end %llu start_left %lu end_left %lu\n",
start, end, start_left, end_left);
if (start == end)
@@ -600,7 +605,8 @@ static int ras_inside_ra_window(unsigned long idx, struct ra_io_arg *ria)
/* If ria_length == ria_pages, it means non-stride I/O mode,
* idx should always inside read-ahead window in this case
* For stride I/O mode, just check whether the idx is inside
- * the ria_pages. */
+ * the ria_pages.
+ */
return ria->ria_length == 0 || ria->ria_length == ria->ria_pages ||
(idx >= ria->ria_stoff && (idx - ria->ria_stoff) %
ria->ria_length < ria->ria_pages);
@@ -616,12 +622,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
int rc, count = 0, stride_ria;
unsigned long page_idx;
- LASSERT(ria != NULL);
+ LASSERT(ria);
RIA_DEBUG(ria);
stride_ria = ria->ria_length > ria->ria_pages && ria->ria_pages > 0;
- for (page_idx = ria->ria_start; page_idx <= ria->ria_end &&
- *reserved_pages > 0; page_idx++) {
+ for (page_idx = ria->ria_start;
+ page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
if (ras_inside_ra_window(page_idx, ria)) {
/* If the page is inside the read-ahead window*/
rc = ll_read_ahead_page(env, io, queue,
@@ -634,11 +640,13 @@ static int ll_read_ahead_pages(const struct lu_env *env,
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
* read-ahead mode, then check whether it should skip
- * the stride gap */
+ * the stride gap
+ */
pgoff_t offset;
/* FIXME: This assertion only is valid when it is for
* forward read-ahead, it will be fixed when backward
- * read-ahead is implemented */
+ * read-ahead is implemented
+ */
LASSERTF(page_idx > ria->ria_stoff, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
page_idx,
ria->ria_start, ria->ria_end, ria->ria_stoff,
@@ -647,7 +655,7 @@ static int ll_read_ahead_pages(const struct lu_env *env,
offset = offset % (ria->ria_length);
if (offset > ria->ria_pages) {
page_idx += ria->ria_length - offset;
- CDEBUG(D_READA, "i %lu skip %lu \n", page_idx,
+ CDEBUG(D_READA, "i %lu skip %lu\n", page_idx,
ria->ria_length - offset);
continue;
}
@@ -699,7 +707,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
bead = NULL;
/* Enlarge the RA window to encompass the full read */
- if (bead != NULL && ras->ras_window_start + ras->ras_window_len <
+ if (bead && ras->ras_window_start + ras->ras_window_len <
bead->lrr_start + bead->lrr_count) {
ras->ras_window_len = bead->lrr_start + bead->lrr_count -
ras->ras_window_start;
@@ -721,7 +729,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
*/
/* Note: we only trim the RPC, instead of extending the RPC
* to the boundary, so to avoid reading too much pages during
- * random reading. */
+ * random reading.
+ */
rpc_boundary = (end + 1) & (~(PTLRPC_MAX_BRW_PAGES - 1));
if (rpc_boundary > 0)
rpc_boundary--;
@@ -730,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
end = rpc_boundary;
/* Truncate RA window to end of file */
- end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
+ end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
@@ -764,19 +773,19 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
ret = ll_read_ahead_pages(env, io, queue,
ria, &reserved, mapping, &ra_end);
- LASSERTF(reserved >= 0, "reserved %lu\n", reserved);
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
- if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
+ if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
ll_ra_stats_inc(mapping, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
* the ras we need to go back and update the ras so that the
* next read-ahead tries from where we left off. we only do so
* if the region we failed to issue read-ahead on is still ahead
- * of the app and behind the next index to start read-ahead from */
- CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
+ * of the app and behind the next index to start read-ahead from
+ */
+ CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n",
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
@@ -860,7 +869,7 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras,
unsigned long stride_gap = index - ras->ras_last_readpage - 1;
if (!stride_io_mode(ras) && (stride_gap != 0 ||
- ras->ras_consecutive_stride_requests == 0)) {
+ ras->ras_consecutive_stride_requests == 0)) {
ras->ras_stride_pages = ras->ras_consecutive_pages;
ras->ras_stride_length = stride_gap+ras->ras_consecutive_pages;
}
@@ -881,7 +890,8 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras,
}
/* Stride Read-ahead window will be increased inc_len according to
- * stride I/O pattern */
+ * stride I/O pattern
+ */
static void ras_stride_increase_window(struct ll_readahead_state *ras,
struct ll_ra_info *ra,
unsigned long inc_len)
@@ -952,7 +962,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
* or reads to some other part of the file. Secondly if we get a
* read-ahead miss that we think we've previously issued. This can
* be a symptom of there being so many read-ahead pages that the VM is
- * reclaiming it before we get to it. */
+ * reclaiming it before we get to it.
+ */
if (!index_in_window(index, ras->ras_last_readpage, 8, 8)) {
zero = 1;
ll_ra_stats_inc_sbi(sbi, RA_STAT_DISTANT_READPAGE);
@@ -969,12 +980,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
* file up to ra_max_pages_per_file. This is simply a best effort
* and only occurs once per open file. Normal RA behavior is reverted
* to for subsequent IO. The mmap case does not increment
- * ras_requests and thus can never trigger this behavior. */
+ * ras_requests and thus can never trigger this behavior.
+ */
if (ras->ras_requests == 2 && !ras->ras_request_index) {
__u64 kms_pages;
- kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
+ kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
+ PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@@ -1015,14 +1027,16 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
stride_io_mode(ras)) {
/*If stride-RA hit cache miss, the stride dector
*will not be reset to avoid the overhead of
- *redetecting read-ahead mode */
+ *redetecting read-ahead mode
+ */
if (index != ras->ras_last_readpage + 1)
ras->ras_consecutive_pages = 0;
ras_reset(inode, ras, index);
RAS_CDEBUG(ras);
} else {
/* Reset both stride window and normal RA
- * window */
+ * window
+ */
ras_reset(inode, ras, index);
ras->ras_consecutive_pages++;
ras_stride_reset(ras);
@@ -1031,7 +1045,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
} else if (stride_io_mode(ras)) {
/* If this is contiguous read but in stride I/O mode
* currently, check whether stride step still is valid,
- * if invalid, it will reset the stride ra window*/
+ * if invalid, it will reset the stride ra window
+ */
if (!index_in_stride_window(ras, index)) {
/* Shrink stride read-ahead window to be zero */
ras_stride_reset(ras);
@@ -1047,7 +1062,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (stride_io_mode(ras))
/* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
- * instead of ras_window_start, which is RPC aligned */
+ * instead of ras_window_start, which is RPC aligned
+ */
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
else
ras->ras_next_readahead = max(ras->ras_window_start,
@@ -1055,7 +1071,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
- * is not incremented and thus can't be used to trigger RA */
+ * is not incremented and thus can't be used to trigger RA
+ */
if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
ras->ras_window_len = RAS_INCREASE_STEP(inode);
goto out_unlock;
@@ -1101,7 +1118,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
- LASSERT(ll_i2dtexp(inode) != NULL);
+ LASSERT(ll_i2dtexp(inode));
env = cl_env_nested_get(&nest);
if (IS_ERR(env)) {
@@ -1110,7 +1127,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
}
clob = ll_i2info(inode)->lli_clob;
- LASSERT(clob != NULL);
+ LASSERT(clob);
io = ccc_env_thread_io(env);
io->ci_obj = clob;
@@ -1153,14 +1170,16 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
/* Flush page failed because the extent is being written out.
* Wait for the write of extent to be finished to avoid
* breaking kernel which assumes ->writepage should mark
- * PageWriteback or clean the page. */
+ * PageWriteback or clean the page.
+ */
result = cl_sync_file_range(inode, offset,
- offset + PAGE_CACHE_SIZE - 1,
+ offset + PAGE_SIZE - 1,
CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
* decreasing this page because the caller will count
- * it. */
+ * it.
+ */
wbc->nr_to_write -= result - 1;
result = 0;
}
@@ -1192,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
int ignore_layout = 0;
if (wbc->range_cyclic) {
- start = mapping->writeback_index << PAGE_CACHE_SHIFT;
+ start = mapping->writeback_index << PAGE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
@@ -1210,7 +1229,8 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (sbi->ll_umounting)
/* if the mountpoint is being umounted, all pages have to be
* evicted to avoid hitting LBUG when truncate_inode_pages()
- * is called later on. */
+ * is called later on.
+ */
ignore_layout = 1;
result = cl_sync_file_range(inode, start, end, mode, ignore_layout);
if (result > 0) {
@@ -1221,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 711fda93a58d..69aa15e8e3ef 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -87,14 +87,14 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
* below because they are run with page locked and all our io is
* happening with locked page too
*/
- if (offset == 0 && length == PAGE_CACHE_SIZE) {
+ if (offset == 0 && length == PAGE_SIZE) {
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
inode = vmpage->mapping->host;
obj = ll_i2info(inode)->lli_clob;
- if (obj != NULL) {
+ if (obj) {
page = cl_vmpage_page(vmpage, obj);
- if (page != NULL) {
+ if (page) {
lu_ref_add(&page->cp_reference,
"delete", vmpage);
cl_page_delete(env, page);
@@ -109,12 +109,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
}
}
-#ifdef HAVE_RELEASEPAGE_WITH_INT
-#define RELEASEPAGE_ARG_TYPE int
-#else
-#define RELEASEPAGE_ARG_TYPE gfp_t
-#endif
-static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
+static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
struct cl_env_nest nest;
struct lu_env *env;
@@ -128,11 +123,11 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
return 0;
mapping = vmpage->mapping;
- if (mapping == NULL)
+ if (!mapping)
return 1;
obj = ll_i2info(mapping->host)->lli_clob;
- if (obj == NULL)
+ if (!obj)
return 1;
/* 1 for page allocator, 1 for cl_page and 1 for page cache */
@@ -145,12 +140,13 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
/* If we can't allocate an env we won't call cl_page_put()
* later on which further means it's impossible to drop
* page refcount by cl_page, so ask kernel to not free
- * this page. */
+ * this page.
+ */
return 0;
page = cl_vmpage_page(vmpage, obj);
- result = page == NULL;
- if (page != NULL) {
+ result = !page;
+ if (page) {
if (!cl_page_in_use(page)) {
result = 1;
cl_page_delete(env, page);
@@ -197,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
return -EFBIG;
}
- *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- *max_pages -= user_addr >> PAGE_CACHE_SHIFT;
+ *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ *max_pages -= user_addr >> PAGE_SHIFT;
*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
if (*pages) {
@@ -212,7 +208,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
}
/* ll_free_user_pages - tear down page struct array
- * @pages: array of page struct pointers underlying target buffer */
+ * @pages: array of page struct pointers underlying target buffer
+ */
static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
{
int i;
@@ -220,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
for (i = 0; i < npages; i++) {
if (do_dirty)
set_page_dirty_lock(pages[i]);
- page_cache_release(pages[i]);
+ put_page(pages[i]);
}
kvfree(pages);
}
@@ -246,7 +243,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
cl_2queue_init(queue);
for (i = 0; i < page_count; i++) {
if (pv->ldp_offsets)
- file_offset = pv->ldp_offsets[i];
+ file_offset = pv->ldp_offsets[i];
LASSERT(!(file_offset & (page_size - 1)));
clp = cl_page_find(env, obj, cl_index(obj, file_offset),
@@ -266,7 +263,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
do_io = true;
/* check the page type: if the page is a host page, then do
- * write directly */
+ * write directly
+ */
if (clp->cp_type == CPT_CACHEABLE) {
struct page *vmpage = cl_page_vmpage(env, clp);
struct page *src_page;
@@ -284,14 +282,16 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
kunmap_atomic(src);
/* make sure page will be added to the transfer by
- * cl_io_submit()->...->vvp_page_prep_write(). */
+ * cl_io_submit()->...->vvp_page_prep_write().
+ */
if (rw == WRITE)
set_page_dirty(vmpage);
if (rw == READ) {
/* do not issue the page for read, since it
* may reread a ra page which has NOT uptodate
- * bit set. */
+ * bit set.
+ */
cl_page_disown(env, io, clp);
do_io = false;
}
@@ -339,29 +339,25 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
size_t size, loff_t file_offset,
struct page **pages, int page_count)
{
- struct ll_dio_pages pvec = { .ldp_pages = pages,
- .ldp_nr = page_count,
- .ldp_size = size,
- .ldp_offsets = NULL,
- .ldp_start_offset = file_offset
- };
-
- return ll_direct_rw_pages(env, io, rw, inode, &pvec);
+ struct ll_dio_pages pvec = {
+ .ldp_pages = pages,
+ .ldp_nr = page_count,
+ .ldp_size = size,
+ .ldp_offsets = NULL,
+ .ldp_start_offset = file_offset
+ };
+
+ return ll_direct_rw_pages(env, io, rw, inode, &pvec);
}
-#ifdef KMALLOC_MAX_SIZE
-#define MAX_MALLOC KMALLOC_MAX_SIZE
-#else
-#define MAX_MALLOC (128 * 1024)
-#endif
-
/* This is the maximum size of a single O_DIRECT request, based on the
* kmalloc limit. We need to fit all of the brw_page structs, each one
* representing PAGE_SIZE worth of user data, into a single buffer, and
* then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is
- * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */
-#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \
- ~(DT_MAX_BRW_SIZE - 1))
+ * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
+ */
+#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
+ PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
loff_t file_offset)
{
@@ -386,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
CDEBUG(D_VFSTRACE,
"VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
- file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
- MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
+ file_offset, file_offset, count >> PAGE_SHIFT,
+ MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@@ -396,7 +392,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
io = ccc_env_io(env)->cui_cl.cis_io;
- LASSERT(io != NULL);
+ LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with
* size changing by concurrent truncates and writes.
@@ -433,10 +429,11 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
* for the request, shrink it to a smaller
* PAGE_SIZE multiple and try again.
* We should always be able to kmalloc for a
- * page worth of page pointers = 4MB on i386. */
+ * page worth of page pointers = 4MB on i386.
+ */
if (result == -ENOMEM &&
- size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
- PAGE_CACHE_SIZE) {
+ size > (PAGE_SIZE / sizeof(*pages)) *
+ PAGE_SIZE) {
size = ((((size / 2) - 1) |
~CFS_PAGE_MASK) + 1) &
CFS_PAGE_MASK;
@@ -461,7 +458,7 @@ out:
struct lov_stripe_md *lsm;
lsm = ccc_inode_lsm_get(inode);
- LASSERT(lsm != NULL);
+ LASSERT(lsm);
lov_stripe_lock(lsm);
obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
lov_stripe_unlock(lsm);
@@ -474,13 +471,13 @@ out:
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
{
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int rc;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@@ -491,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
rc = ll_prepare_write(file, page, from, from + len);
if (rc) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return rc;
}
@@ -500,20 +497,20 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned from = pos & (PAGE_SIZE - 1);
int rc;
rc = ll_commit_write(file, page, from, from + copied);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return rc ?: copied;
}
#ifdef CONFIG_MIGRATION
static int ll_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode
)
{
/* Always fail page migration until we have a proper implementation */
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 88ffd8e3abdb..99ffd1589df8 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -49,13 +49,13 @@
#define SA_OMITTED_ENTRY_MAX 8ULL
-typedef enum {
+enum se_stat {
/** negative values are for error cases */
SA_ENTRY_INIT = 0, /** init entry */
SA_ENTRY_SUCC = 1, /** stat succeed */
SA_ENTRY_INVA = 2, /** invalid entry */
SA_ENTRY_DEST = 3, /** entry to be destroyed */
-} se_stat_t;
+};
struct ll_sa_entry {
/* link into sai->sai_entries */
@@ -71,7 +71,7 @@ struct ll_sa_entry {
/* low layer ldlm lock handle */
__u64 se_handle;
/* entry status */
- se_stat_t se_stat;
+ enum se_stat se_stat;
/* entry size, contains name */
int se_size;
/* pointer to async getattr enqueue info */
@@ -130,7 +130,7 @@ ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
static inline int agl_should_run(struct ll_statahead_info *sai,
struct inode *inode)
{
- return (inode != NULL && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
+ return (inode && S_ISREG(inode->i_mode) && sai->sai_agl_valid);
}
static inline int sa_sent_full(struct ll_statahead_info *sai)
@@ -284,7 +284,7 @@ ll_sa_entry_get_byindex(struct ll_statahead_info *sai, __u64 index)
}
static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
+ struct ll_sa_entry *entry)
{
struct md_enqueue_info *minfo = entry->se_minfo;
struct ptlrpc_request *req = entry->se_req;
@@ -303,7 +303,7 @@ static void ll_sa_entry_cleanup(struct ll_statahead_info *sai,
}
static void ll_sa_entry_put(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry)
+ struct ll_sa_entry *entry)
{
if (atomic_dec_and_test(&entry->se_refcount)) {
CDEBUG(D_READA, "free sa entry %.*s(%p) index %llu\n",
@@ -366,7 +366,7 @@ ll_sa_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
*/
static void
do_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, se_stat_t stat)
+ struct ll_sa_entry *entry, enum se_stat stat)
{
struct ll_sa_entry *se;
struct list_head *pos = &sai->sai_entries_stated;
@@ -392,7 +392,7 @@ do_sa_entry_to_stated(struct ll_statahead_info *sai,
*/
static int
ll_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, se_stat_t stat)
+ struct ll_sa_entry *entry, enum se_stat stat)
{
struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
int ret = 1;
@@ -494,12 +494,13 @@ static void ll_sai_put(struct ll_statahead_info *sai)
if (unlikely(atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
- * a reference count */
+ * a reference count
+ */
spin_unlock(&lli->lli_sa_lock);
return;
}
- LASSERT(lli->lli_opendir_key == NULL);
+ LASSERT(!lli->lli_opendir_key);
LASSERT(thread_is_stopped(&sai->sai_thread));
LASSERT(thread_is_stopped(&sai->sai_agl_thread));
@@ -513,8 +514,8 @@ static void ll_sai_put(struct ll_statahead_info *sai)
PFID(&lli->lli_fid),
sai->sai_sent, sai->sai_replied);
- list_for_each_entry_safe(entry, next,
- &sai->sai_entries, se_link)
+ list_for_each_entry_safe(entry, next, &sai->sai_entries,
+ se_link)
do_sa_entry_fini(sai, entry);
LASSERT(list_empty(&sai->sai_entries));
@@ -618,20 +619,21 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
it = &minfo->mi_it;
req = entry->se_req;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EFAULT;
goto out;
}
child = entry->se_inode;
- if (child == NULL) {
+ if (!child) {
/*
* lookup.
*/
LASSERT(fid_is_zero(&minfo->mi_data.op_fid2));
/* XXX: No fid in reply, this is probably cross-ref case.
- * SA can't handle it yet. */
+ * SA can't handle it yet.
+ */
if (body->valid & OBD_MD_MDS) {
rc = -EAGAIN;
goto out;
@@ -672,7 +674,8 @@ out:
/* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
* reference count by calling "ll_intent_drop_lock()" in spite of the
* above operations failed or not. Do not worry about calling
- * "ll_intent_drop_lock()" more than once. */
+ * "ll_intent_drop_lock()" more than once.
+ */
rc = ll_sa_entry_to_stated(sai, entry,
rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
if (rc == 0 && entry->se_index == sai->sai_index_wait)
@@ -698,14 +701,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
/* release ibits lock ASAP to avoid deadlock when statahead
* thread enqueues lock on parent in readdir and another
* process enqueues lock on child with parent lock held, eg.
- * unlink. */
+ * unlink.
+ */
handle = it->d.lustre.it_lock_handle;
ll_intent_drop_lock(it);
}
spin_lock(&lli->lli_sa_lock);
/* stale entry */
- if (unlikely(lli->lli_sai == NULL ||
+ if (unlikely(!lli->lli_sai ||
lli->lli_sai->sai_generation != minfo->mi_generation)) {
spin_unlock(&lli->lli_sa_lock);
rc = -ESTALE;
@@ -720,7 +724,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
}
entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
- if (entry == NULL) {
+ if (!entry) {
sai->sai_replied++;
spin_unlock(&lli->lli_sa_lock);
rc = -EIDRM;
@@ -736,11 +740,12 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
/* Release the async ibits lock ASAP to avoid deadlock
* when statahead thread tries to enqueue lock on parent
* for readpage and other tries to enqueue lock on child
- * with parent's lock held, for example: unlink. */
+ * with parent's lock held, for example: unlink.
+ */
entry->se_handle = handle;
wakeup = list_empty(&sai->sai_entries_received);
list_add_tail(&entry->se_list,
- &sai->sai_entries_received);
+ &sai->sai_entries_received);
}
sai->sai_replied++;
spin_unlock(&lli->lli_sa_lock);
@@ -756,7 +761,7 @@ out:
iput(dir);
kfree(minfo);
}
- if (sai != NULL)
+ if (sai)
ll_sai_put(sai);
return rc;
}
@@ -853,7 +858,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
struct ldlm_enqueue_info *einfo;
int rc;
- if (unlikely(inode == NULL))
+ if (unlikely(!inode))
return 1;
if (d_mountpoint(dentry))
@@ -908,10 +913,9 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
rc = do_sa_revalidate(dir, entry, dentry);
if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
ll_agl_add(sai, d_inode(dentry), entry->se_index);
- }
- if (dentry != NULL)
dput(dentry);
+ }
if (rc) {
rc1 = ll_sa_entry_to_stated(sai, entry,
@@ -948,7 +952,8 @@ static int ll_agl_thread(void *arg)
if (thread_is_init(thread))
/* If someone else has changed the thread state
* (e.g. already changed to SVC_STOPPING), we can't just
- * blindly overwrite that setting. */
+ * blindly overwrite that setting.
+ */
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_agl_lock);
wake_up(&thread->t_ctl_waitq);
@@ -964,7 +969,8 @@ static int ll_agl_thread(void *arg)
spin_lock(&plli->lli_agl_lock);
/* The statahead thread maybe help to process AGL entries,
- * so check whether list empty again. */
+ * so check whether list empty again.
+ */
if (!list_empty(&sai->sai_entries_agl)) {
clli = list_entry(sai->sai_entries_agl.next,
struct ll_inode_info, lli_agl_list);
@@ -1007,8 +1013,8 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
sai, parent);
plli = ll_i2info(d_inode(parent));
- task = kthread_run(ll_agl_thread, parent,
- "ll_agl_%u", plli->lli_opendir_pid);
+ task = kthread_run(ll_agl_thread, parent, "ll_agl_%u",
+ plli->lli_opendir_pid);
if (IS_ERR(task)) {
CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
@@ -1049,7 +1055,8 @@ static int ll_statahead_thread(void *arg)
if (thread_is_init(thread))
/* If someone else has changed the thread state
* (e.g. already changed to SVC_STOPPING), we can't just
- * blindly overwrite that setting. */
+ * blindly overwrite that setting.
+ */
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_sa_lock);
wake_up(&thread->t_ctl_waitq);
@@ -1070,7 +1077,7 @@ static int ll_statahead_thread(void *arg)
}
dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL;
+ for (ent = lu_dirent_start(dp); ent;
ent = lu_dirent_next(ent)) {
__u64 hash;
int namelen;
@@ -1137,7 +1144,8 @@ interpret_it:
/* If no window for metadata statahead, but there are
* some AGL entries to be triggered, then try to help
- * to process the AGL entries. */
+ * to process the AGL entries.
+ */
if (sa_sent_full(sai)) {
spin_lock(&plli->lli_agl_lock);
while (!list_empty(&sai->sai_entries_agl)) {
@@ -1274,7 +1282,7 @@ void ll_stop_statahead(struct inode *dir, void *key)
{
struct ll_inode_info *lli = ll_i2info(dir);
- if (unlikely(key == NULL))
+ if (unlikely(!key))
return;
spin_lock(&lli->lli_sa_lock);
@@ -1357,7 +1365,7 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
}
dp = page_address(page);
- for (ent = lu_dirent_start(dp); ent != NULL;
+ for (ent = lu_dirent_start(dp); ent;
ent = lu_dirent_next(ent)) {
__u64 hash;
int namelen;
@@ -1365,7 +1373,8 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
hash = le64_to_cpu(ent->lde_hash);
/* The ll_get_dir_page() can return any page containing
- * the given hash which may be not the start hash. */
+ * the given hash which may be not the start hash.
+ */
if (unlikely(hash < pos))
continue;
@@ -1448,7 +1457,7 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
int hit;
- if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
+ if (entry && entry->se_stat == SA_ENTRY_SUCC)
hit = 1;
else
hit = 0;
@@ -1498,6 +1507,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
struct ll_sa_entry *entry;
struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
int rc = 0;
struct ll_inode_info *plli;
@@ -1540,7 +1550,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
}
entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
- if (entry == NULL || only_unplug) {
+ if (!entry || only_unplug) {
ll_sai_unplug(sai, entry);
return entry ? 1 : -EAGAIN;
}
@@ -1559,8 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
}
}
- if (entry->se_stat == SA_ENTRY_SUCC &&
- entry->se_inode != NULL) {
+ if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
struct inode *inode = entry->se_inode;
struct lookup_intent it = { .it_op = IT_GETATTR,
.d.lustre.it_lock_handle =
@@ -1570,11 +1579,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
ll_inode2fid(inode), &bits);
if (rc == 1) {
- if (d_inode(*dentryp) == NULL) {
+ if (!d_inode(*dentryp)) {
struct dentry *alias;
alias = ll_splice_alias(inode,
- *dentryp);
+ *dentryp);
if (IS_ERR(alias)) {
ll_sai_unplug(sai, entry);
return PTR_ERR(alias);
@@ -1583,7 +1592,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
} else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
+ "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
*dentryp,
d_inode(*dentryp)->i_ino,
d_inode(*dentryp)->i_generation,
@@ -1616,14 +1625,14 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
}
sai = ll_sai_alloc();
- if (sai == NULL) {
+ if (!sai) {
rc = -ENOMEM;
goto out;
}
sai->sai_ls_all = (rc == LS_FIRST_DOT_DE);
sai->sai_inode = igrab(dir);
- if (unlikely(sai->sai_inode == NULL)) {
+ if (unlikely(!sai->sai_inode)) {
CWARN("Do not start stat ahead on dying inode "DFID"\n",
PFID(&lli->lli_fid));
rc = -ESTALE;
@@ -1651,25 +1660,28 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
* but as soon as we expose the sai by attaching it to the lli that
* default reference can be dropped by another thread calling
* ll_stop_statahead. We need to take a local reference to protect
- * the sai buffer while we intend to access it. */
+ * the sai buffer while we intend to access it.
+ */
ll_sai_get(sai);
lli->lli_sai = sai;
plli = ll_i2info(d_inode(parent));
- rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
- "ll_sa_%u", plli->lli_opendir_pid));
+ task = kthread_run(ll_statahead_thread, parent, "ll_sa_%u",
+ plli->lli_opendir_pid);
thread = &sai->sai_thread;
- if (IS_ERR_VALUE(rc)) {
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
CERROR("can't start ll_sa thread, rc: %d\n", rc);
dput(parent);
lli->lli_opendir_key = NULL;
thread_set_flags(thread, SVC_STOPPED);
thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
/* Drop both our own local reference and the default
- * reference from allocation time. */
+ * reference from allocation time.
+ */
ll_sai_put(sai);
ll_sai_put(sai);
- LASSERT(lli->lli_sai == NULL);
+ LASSERT(!lli->lli_sai);
return -EAGAIN;
}
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 86c371ef71ea..61856d37afc5 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -53,8 +53,8 @@ static struct inode *ll_alloc_inode(struct super_block *sb)
struct ll_inode_info *lli;
ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1);
- lli = kmem_cache_alloc(ll_inode_cachep, GFP_NOFS | __GFP_ZERO);
- if (lli == NULL)
+ lli = kmem_cache_zalloc(ll_inode_cachep, GFP_NOFS);
+ if (!lli)
return NULL;
inode_init_once(&lli->lli_vfs_inode);
@@ -89,7 +89,7 @@ MODULE_ALIAS_FS("lustre");
void lustre_register_client_process_config(int (*cpc)(struct lustre_cfg *lcfg));
-static int __init init_lustre_lite(void)
+static int __init lustre_init(void)
{
lnet_process_id_t lnet_id;
struct timespec64 ts;
@@ -99,7 +99,8 @@ static int __init init_lustre_lite(void)
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
+ * symbols from modules.
+ */
CDEBUG(D_INFO, "Lustre client module (%p).\n",
&lustre_super_operations);
@@ -108,26 +109,26 @@ static int __init init_lustre_lite(void)
sizeof(struct ll_inode_info),
0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
NULL);
- if (ll_inode_cachep == NULL)
+ if (!ll_inode_cachep)
goto out_cache;
ll_file_data_slab = kmem_cache_create("ll_file_data",
- sizeof(struct ll_file_data), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (ll_file_data_slab == NULL)
+ sizeof(struct ll_file_data), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ll_file_data_slab)
goto out_cache;
ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
sizeof(struct ll_remote_perm),
0, 0, NULL);
- if (ll_remote_perm_cachep == NULL)
+ if (!ll_remote_perm_cachep)
goto out_cache;
ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
REMOTE_PERM_HASHSIZE *
sizeof(struct list_head),
0, 0, NULL);
- if (ll_rmtperm_hash_cachep == NULL)
+ if (!ll_rmtperm_hash_cachep)
goto out_cache;
llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
@@ -146,7 +147,8 @@ static int __init init_lustre_lite(void)
cfs_get_random_bytes(seed, sizeof(seed));
/* Nodes with small feet have little entropy. The NID for this
- * node gives the most entropy in the low bits */
+ * node gives the most entropy in the low bits
+ */
for (i = 0;; i++) {
if (LNetGetId(i, &lnet_id) == -ENOENT)
break;
@@ -186,7 +188,7 @@ out_cache:
return rc;
}
-static void __exit exit_lustre_lite(void)
+static void __exit lustre_exit(void)
{
lustre_register_client_fill_super(NULL);
lustre_register_kill_super_cb(NULL);
@@ -207,8 +209,9 @@ static void __exit exit_lustre_lite(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Lite Client File System");
+MODULE_DESCRIPTION("Lustre Client File System");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-module_init(init_lustre_lite);
-module_exit(exit_lustre_lite);
+module_init(lustre_init);
+module_exit(lustre_exit);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 2610348f6c72..46d03ea48352 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -59,7 +59,8 @@ static int ll_readlink_internal(struct inode *inode,
*symname = lli->lli_symlink_name;
/* If the total CDEBUG() size is larger than a page, it
* will print a warning to the console, avoid this by
- * printing just the last part of the symlink. */
+ * printing just the last part of the symlink.
+ */
CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
print_limit < symlen ? "..." : "", print_limit,
(*symname) + symlen - print_limit, symlen);
@@ -81,7 +82,6 @@ static int ll_readlink_internal(struct inode *inode,
}
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
if ((body->valid & OBD_MD_LINKNAME) == 0) {
CERROR("OBD_MD_LINKNAME not set on reply\n");
rc = -EPROTO;
@@ -91,13 +91,13 @@ static int ll_readlink_internal(struct inode *inode,
LASSERT(symlen != 0);
if (body->eadatasize != symlen) {
CERROR("inode %lu: symlink length %d not expected %d\n",
- inode->i_ino, body->eadatasize - 1, symlen - 1);
+ inode->i_ino, body->eadatasize - 1, symlen - 1);
rc = -EPROTO;
goto failed;
}
*symname = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_MD);
- if (*symname == NULL ||
+ if (!*symname ||
strnlen(*symname, symlen) != symlen - 1) {
/* not full/NULL terminated */
CERROR("inode %lu: symlink not NULL terminated string of length %d\n",
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index fdca4ec0555d..282b70b776da 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -79,8 +79,8 @@ static void *vvp_key_init(const struct lu_context *ctx,
{
struct vvp_thread_info *info;
- info = kmem_cache_alloc(vvp_thread_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -98,8 +98,8 @@ static void *vvp_session_key_init(const struct lu_context *ctx,
{
struct vvp_session *session;
- session = kmem_cache_alloc(vvp_session_kmem, GFP_NOFS | __GFP_ZERO);
- if (session == NULL)
+ session = kmem_cache_zalloc(vvp_session_kmem, GFP_NOFS);
+ if (!session)
session = ERR_PTR(-ENOMEM);
return session;
}
@@ -228,7 +228,7 @@ int cl_sb_fini(struct super_block *sb)
if (!IS_ERR(env)) {
cld = sbi->ll_cl;
- if (cld != NULL) {
+ if (cld) {
cl_stack_fini(env, cld);
sbi->ll_cl = NULL;
sbi->ll_site = NULL;
@@ -325,11 +325,11 @@ static struct cl_object *vvp_pgcache_obj(const struct lu_env *env,
cfs_hash_hlist_for_each(dev->ld_site->ls_obj_hash, id->vpi_bucket,
vvp_pgcache_obj_get, id);
- if (id->vpi_obj != NULL) {
+ if (id->vpi_obj) {
struct lu_object *lu_obj;
lu_obj = lu_object_locate(id->vpi_obj, dev->ld_type);
- if (lu_obj != NULL) {
+ if (lu_obj) {
lu_object_ref_add(lu_obj, "dump", current);
return lu2cl(lu_obj);
}
@@ -355,7 +355,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
if (id.vpi_bucket >= CFS_HASH_NHLIST(site->ls_obj_hash))
return ~0ULL;
clob = vvp_pgcache_obj(env, dev, &id);
- if (clob != NULL) {
+ if (clob) {
struct cl_object_header *hdr;
int nr;
struct cl_page *pg;
@@ -443,7 +443,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
vvp_pgcache_id_unpack(pos, &id);
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
- if (clob != NULL) {
+ if (clob) {
hdr = cl_object_header(clob);
spin_lock(&hdr->coh_page_guard);
@@ -452,7 +452,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
seq_printf(f, "%8x@"DFID": ",
id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
- if (page != NULL) {
+ if (page) {
vvp_pgcache_page_show(env, f, page);
cl_page_put(env, page);
} else
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 2e39533a45f8..bb393378c9bb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -44,14 +44,13 @@
#include "../include/cl_object.h"
#include "llite_internal.h"
-int vvp_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int vvp_lock_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
+int vvp_io_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int vvp_lock_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
-int vvp_page_init (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 0920ac6b3003..85a835976174 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -68,7 +68,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
* have to acquire group lock.
*/
static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
- struct inode *inode)
+ struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ccc_io *cio = ccc_env_io(env);
@@ -78,7 +78,8 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
case CIT_READ:
case CIT_WRITE:
/* don't need lock here to check lli_layout_gen as we have held
- * extent lock and GROUP lock has to hold to swap layout */
+ * extent lock and GROUP lock has to hold to swap layout
+ */
if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
io->ci_need_restart = 1;
/* this will return application a short read/write */
@@ -134,7 +135,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
*/
rc = ll_layout_restore(ccc_object_inode(obj));
/* if restore registration failed, no restart,
- * we will return -ENODATA */
+ * we will return -ENODATA
+ */
/* The layout will change after restore, so we need to
* block on layout lock hold by the MDT
* as MDT will not send new layout in lvb (see LU-3124)
@@ -164,8 +166,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
cio->cui_layout_gen, gen);
- /* today successful restore is the only possible
- * case */
+ /* today successful restore is the only possible case */
/* restore was done, clear restoring state */
ll_i2info(ccc_object_inode(obj))->lli_flags &=
~LLIF_FILE_RESTORING;
@@ -181,7 +182,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
- if (page != NULL) {
+ if (page) {
lu_ref_del(&page->cp_reference, "fault", io);
cl_page_put(env, page);
io->u.ci_fault.ft_page = NULL;
@@ -220,11 +221,11 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (!cl_is_normalio(env, io))
return 0;
- if (vio->cui_iter == NULL) /* nfs or loop back device write */
+ if (!vio->cui_iter) /* nfs or loop back device write */
return 0;
/* No MM (e.g. NFS)? No vmas too. */
- if (mm == NULL)
+ if (!mm)
return 0;
iov_for_each(iov, i, *(vio->cui_iter)) {
@@ -456,7 +457,8 @@ static void vvp_io_setattr_end(const struct lu_env *env,
if (cl_io_is_trunc(io))
/* Truncate in memory pages - they must be clean pages
- * because osc has already notified to destroy osc_extents. */
+ * because osc has already notified to destroy osc_extents.
+ */
vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
inode_unlock(inode);
@@ -499,8 +501,8 @@ static int vvp_io_read_start(const struct lu_env *env,
goto out;
LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
- "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
- inode->i_ino, cnt, pos, i_size_read(inode));
+ "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+ inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
cio->cui_fd->fd_file->f_ra.ra_pages = 0;
@@ -510,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
/*
- * XXX: explicit PAGE_CACHE_SIZE
+ * XXX: explicit PAGE_SIZE
*/
- bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
+ bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead);
}
@@ -525,11 +527,12 @@ static int vvp_io_read_start(const struct lu_env *env,
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
+ vio->u.splice.cui_pipe, cnt,
+ vio->u.splice.cui_flags);
/* LU-1109: do splice read stripe by stripe otherwise if it
* may make nfsd stuck if this read occupied all internal pipe
- * buffers. */
+ * buffers.
+ */
io->ci_continue = 0;
break;
default:
@@ -587,7 +590,7 @@ static int vvp_io_write_start(const struct lu_env *env,
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (cio->cui_iter == NULL) /* from a temp io in ll_cl_init(). */
+ if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */
result = 0;
else
result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
@@ -673,7 +676,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
/* must return locked page */
if (fio->ft_mkwrite) {
- LASSERT(cfio->ft_vmpage != NULL);
+ LASSERT(cfio->ft_vmpage);
lock_page(cfio->ft_vmpage);
} else {
result = vvp_io_kernel_fault(cfio);
@@ -689,13 +692,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
size = i_size_read(inode);
/* Though we have already held a cl_lock upon this page, but
- * it still can be truncated locally. */
+ * it still can be truncated locally.
+ */
if (unlikely((vmpage->mapping != inode->i_mapping) ||
(page_offset(vmpage) > size))) {
CDEBUG(D_PAGE, "llite: fault and truncate race happened!\n");
/* return +1 to stop cl_io_loop() and ll_fault() will catch
- * and retry. */
+ * and retry.
+ */
result = 1;
goto out;
}
@@ -736,7 +741,8 @@ static int vvp_io_fault_start(const struct lu_env *env,
}
/* if page is going to be written, we should add this page into cache
- * earlier. */
+ * earlier.
+ */
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
if (set_page_dirty(vmpage)) {
@@ -750,7 +756,8 @@ static int vvp_io_fault_start(const struct lu_env *env,
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
- * still have chance to detect it. */
+ * still have chance to detect it.
+ */
result = cl_page_cache_add(env, io, page, CRT_WRITE);
LASSERT(cl_page_is_owned(page, io));
@@ -792,7 +799,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
out:
/* return unlocked vmpage to avoid deadlocking */
- if (vmpage != NULL)
+ if (vmpage)
unlock_page(vmpage);
cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
return result;
@@ -803,7 +810,8 @@ static int vvp_io_fsync_start(const struct lu_env *env,
{
/* we should mark TOWRITE bit to each dirty page in radix tree to
* verify pages have been written, but this is difficult because of
- * race. */
+ * race.
+ */
return 0;
}
@@ -951,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
* We're completely overwriting an existing page, so _don't_
* set it up to date until commit_write
*/
- if (from == 0 && to == PAGE_CACHE_SIZE) {
+ if (from == 0 && to == PAGE_SIZE) {
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
POISON_PAGE(page, 0x11);
} else
@@ -1003,7 +1011,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
*
* (3) IO is batched up to the RPC size and is async until the
* client max cache is hit
- * (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
+ * (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
*
*/
if (!PageDirty(vmpage)) {
@@ -1014,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
set_page_dirty(vmpage);
vvp_write_pending(cl2ccc(obj), cp);
} else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+ pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
bool need_clip = true;
/*
@@ -1032,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
* being.
*/
if (last_index > pg->cp_index) {
- to = PAGE_CACHE_SIZE;
+ to = PAGE_SIZE;
need_clip = false;
} else if (last_index == pg->cp_index) {
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
@@ -1153,7 +1161,8 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
count = io->u.ci_rw.crw_count;
/* "If nbyte is 0, read() will return 0 and have no other
- * results." -- Single Unix Spec */
+ * results." -- Single Unix Spec
+ */
if (count == 0)
result = 1;
else
@@ -1173,25 +1182,28 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
/* ignore layout change for generic CIT_MISC but not for glimpse.
* io context for glimpse must set ci_verify_layout to true,
- * see cl_glimpse_size0() for details. */
+ * see cl_glimpse_size0() for details.
+ */
if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
io->ci_ignore_layout = 1;
/* Enqueue layout lock and get layout version. We need to do this
* even for operations requiring to open file, such as read and write,
- * because it might not grant layout lock in IT_OPEN. */
+ * because it might not grant layout lock in IT_OPEN.
+ */
if (result == 0 && !io->ci_ignore_layout) {
result = ll_layout_refresh(inode, &cio->cui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
* fetch will return -ENOENT, we'd ignore this error
- * and continue with dirty flush. LU-3230. */
+ * and continue with dirty flush. LU-3230.
+ */
result = 0;
if (result < 0)
CERROR("%s: refresh file layout " DFID " error %d.\n",
- ll_get_fsname(inode->i_sb, NULL, 0),
- PFID(lu_object_fid(&obj->co_lu)), result);
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(lu_object_fid(&obj->co_lu)), result);
}
return result;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index c82714ea898e..03c887d8ed83 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -137,7 +137,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
* page may be stale due to layout change, and the process
* will never be notified.
* This operation is expensive but mmap processes have to pay
- * a price themselves. */
+ * a price themselves.
+ */
unmap_mapping_range(conf->coc_inode->i_mapping,
0, OBD_OBJECT_EOF, 0);
@@ -147,7 +148,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
if (conf->coc_opc != OBJECT_CONF_SET)
return 0;
- if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL) {
+ if (conf->u.coc_md && conf->u.coc_md->lsm) {
CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
PFID(&lli->lli_fid), lli->lli_layout_gen,
conf->u.coc_md->lsm->lsm_layout_gen);
@@ -186,9 +187,8 @@ struct ccc_object *cl_inode2ccc(struct inode *inode)
struct cl_object *obj = lli->lli_clob;
struct lu_object *lu;
- LASSERT(obj != NULL);
lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
- LASSERT(lu != NULL);
+ LASSERT(lu);
return lu2ccc(lu);
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index a133475a7c74..33ca3eb34965 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -56,8 +56,8 @@ static void vvp_page_fini_common(struct ccc_page *cp)
{
struct page *vmpage = cp->cpg_page;
- LASSERT(vmpage != NULL);
- page_cache_release(vmpage);
+ LASSERT(vmpage);
+ put_page(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
@@ -81,7 +81,7 @@ static int vvp_page_own(const struct lu_env *env,
struct ccc_page *vpg = cl2ccc_page(slice);
struct page *vmpage = vpg->cpg_page;
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
if (nonblock) {
if (!trylock_page(vmpage))
return -EAGAIN;
@@ -105,7 +105,7 @@ static void vvp_page_assume(const struct lu_env *env,
{
struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
wait_on_page_writeback(vmpage);
}
@@ -116,7 +116,7 @@ static void vvp_page_unassume(const struct lu_env *env,
{
struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
}
@@ -125,7 +125,7 @@ static void vvp_page_disown(const struct lu_env *env,
{
struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
unlock_page(cl2vm_page(slice));
@@ -139,7 +139,7 @@ static void vvp_page_discard(const struct lu_env *env,
struct address_space *mapping;
struct ccc_page *cpg = cl2ccc_page(slice);
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
mapping = vmpage->mapping;
@@ -161,15 +161,15 @@ static int vvp_page_unmap(const struct lu_env *env,
struct page *vmpage = cl2vm_page(slice);
__u64 offset;
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
- offset = vmpage->index << PAGE_CACHE_SHIFT;
+ offset = vmpage->index << PAGE_SHIFT;
/*
* XXX is it safe to call this with the page lock held?
*/
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
+ ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
return 0;
}
@@ -199,7 +199,7 @@ static void vvp_page_export(const struct lu_env *env,
{
struct page *vmpage = cl2vm_page(slice);
- LASSERT(vmpage != NULL);
+ LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
if (uptodate)
SetPageUptodate(vmpage);
@@ -232,7 +232,8 @@ static int vvp_page_prep_write(const struct lu_env *env,
LASSERT(!PageDirty(vmpage));
/* ll_writepage path is not a sync write, so need to set page writeback
- * flag */
+ * flag
+ */
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
@@ -262,7 +263,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->cob_discard_page_warned == 0) {
+ obj->cob_discard_page_warned == 0) {
obj->cob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
@@ -290,7 +291,7 @@ static void vvp_page_completion_read(const struct lu_env *env,
} else
cp->cpg_defer_uptodate = 0;
- if (page->cp_sync_io == NULL)
+ if (!page->cp_sync_io)
unlock_page(vmpage);
}
@@ -317,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
cp->cpg_write_queued = 0;
vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
- if (pg->cp_sync_io != NULL) {
+ if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
} else {
@@ -356,15 +357,14 @@ static int vvp_page_make_ready(const struct lu_env *env,
lock_page(vmpage);
if (clear_page_dirty_for_io(vmpage)) {
LASSERT(pg->cp_state == CPS_CACHED);
- /* This actually clears the dirty bit in the radix
- * tree. */
+ /* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj),
- cl2ccc_page(slice));
+ vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
- * make it ready? */
+ * make it ready?
+ */
result = -EALREADY;
} else {
CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
@@ -385,7 +385,7 @@ static int vvp_page_print(const struct lu_env *env,
(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
vp->cpg_write_queued, vmpage);
- if (vmpage != NULL) {
+ if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
page_mapcount(vmpage), vmpage->private,
@@ -530,27 +530,26 @@ static const struct cl_page_operations vvp_transient_page_ops = {
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct ccc_page *cpg = cl_object_page_slice(obj, page);
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
cpg->cpg_page = vmpage;
- page_cache_get(vmpage);
+ get_page(vmpage);
INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_page_ops);
+ cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops);
} else {
struct ccc_object *clobj = cl2ccc(obj);
LASSERT(!inode_trylock(clobj->cob_inode));
cl_page_slice_add(page, &cpg->cpg_cl, obj,
- &vvp_transient_page_ops);
+ &vvp_transient_page_ops);
clobj->cob_transient_pages++;
}
return 0;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 8eb43f192d1f..b68dcc921ca2 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -135,7 +135,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
/* b15587: ignore security.capability xattr for now */
if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
+ strcmp(name, "security.capability") == 0))
return 0;
/* LU-549: Disable security.selinux when selinux is disabled */
@@ -148,7 +148,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
(xattr_type == XATTR_ACL_ACCESS_T ||
xattr_type == XATTR_ACL_DEFAULT_T)) {
rce = rct_search(&sbi->ll_rct, current_pid());
- if (rce == NULL ||
+ if (!rce ||
(rce->rce_ops != RMT_LSETFACL &&
rce->rce_ops != RMT_RSETFACL))
return -EOPNOTSUPP;
@@ -158,7 +158,6 @@ int ll_setxattr_common(struct inode *inode, const char *name,
ee = et_search_del(&sbi->ll_et, current_pid(),
ll_inode2fid(inode), xattr_type);
- LASSERT(ee != NULL);
if (valid & OBD_MD_FLXATTR) {
acl = lustre_acl_xattr_merge2ext(
(posix_acl_xattr_header *)value,
@@ -192,12 +191,11 @@ int ll_setxattr_common(struct inode *inode, const char *name,
valid, name, pv, size, 0, flags,
ll_i2suppgid(inode), &req);
#ifdef CONFIG_FS_POSIX_ACL
- if (new_value != NULL)
- /*
- * Release the posix ACL space.
- */
- kfree(new_value);
- if (acl != NULL)
+ /*
+ * Release the posix ACL space.
+ */
+ kfree(new_value);
+ if (acl)
lustre_ext_acl_xattr_free(acl);
#endif
if (rc) {
@@ -239,11 +237,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
/* Attributes that are saved via getxattr will always have
* the stripe_offset as 0. Instead, the MDS should be
- * allowed to pick the starting OST index. b=17846 */
- if (lump != NULL && lump->lmm_stripe_offset == 0)
+ * allowed to pick the starting OST index. b=17846
+ */
+ if (lump && lump->lmm_stripe_offset == 0)
lump->lmm_stripe_offset = -1;
- if (lump != NULL && S_ISREG(inode->i_mode)) {
+ if (lump && S_ISREG(inode->i_mode)) {
int flags = FMODE_WRITE;
int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
sizeof(*lump) : sizeof(struct lov_user_md_v3);
@@ -312,7 +311,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
/* b15587: ignore security.capability xattr for now */
if ((xattr_type == XATTR_SECURITY_T &&
- strcmp(name, "security.capability") == 0))
+ strcmp(name, "security.capability") == 0))
return -ENODATA;
/* LU-549: Disable security.selinux when selinux is disabled */
@@ -325,7 +324,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
(xattr_type == XATTR_ACL_ACCESS_T ||
xattr_type == XATTR_ACL_DEFAULT_T)) {
rce = rct_search(&sbi->ll_rct, current_pid());
- if (rce == NULL ||
+ if (!rce ||
(rce->rce_ops != RMT_LSETFACL &&
rce->rce_ops != RMT_LGETFACL &&
rce->rce_ops != RMT_RSETFACL &&
@@ -366,7 +365,7 @@ do_getxattr:
goto out_xattr;
/* Add "system.posix_acl_access" to the list */
- if (lli->lli_posix_acl != NULL && valid & OBD_MD_FLXATTRLS) {
+ if (lli->lli_posix_acl && valid & OBD_MD_FLXATTRLS) {
if (size == 0) {
rc += sizeof(XATTR_NAME_ACL_ACCESS);
} else if (size - rc >= sizeof(XATTR_NAME_ACL_ACCESS)) {
@@ -398,7 +397,7 @@ getxattr_nocache:
if (size < body->eadatasize) {
CERROR("server bug: replied size %u > %u\n",
- body->eadatasize, (int)size);
+ body->eadatasize, (int)size);
rc = -ERANGE;
goto out;
}
@@ -410,7 +409,7 @@ getxattr_nocache:
/* do not need swab xattr data */
xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
+ body->eadatasize);
if (!xdata) {
rc = -EFAULT;
goto out;
@@ -482,13 +481,14 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name,
if (size == 0 && S_ISDIR(inode->i_mode)) {
/* XXX directory EA is fix for now, optimize to save
- * RPC transfer */
+ * RPC transfer
+ */
rc = sizeof(struct lov_user_md);
goto out;
}
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL) {
+ if (!lsm) {
if (S_ISDIR(inode->i_mode)) {
rc = ll_dir_getstripe(inode, &lmm,
&lmmsize, &request);
@@ -497,7 +497,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name,
}
} else {
/* LSM is present already after lookup/getattr call.
- * we need to grab layout lock once it is implemented */
+ * we need to grab layout lock once it is implemented
+ */
rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
lmmsize = rc;
}
@@ -510,7 +511,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name,
/* used to call ll_get_max_mdsize() forward to get
* the maximum buffer size, while some apps (such as
* rsync 3.0.x) care much about the exact xattr value
- * size */
+ * size
+ */
rc = lmmsize;
goto out;
}
@@ -526,7 +528,8 @@ ssize_t ll_getxattr(struct dentry *dentry, const char *name,
memcpy(lump, lmm, lmmsize);
/* do not return layout gen for getxattr otherwise it would
* confuse tar --xattr by recognizing layout gen as stripe
- * offset when the file is restored. See LU-2809. */
+ * offset when the file is restored. See LU-2809.
+ */
lump->lmm_layout_gen = 0;
rc = lmmsize;
@@ -560,7 +563,7 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
if (rc < 0)
goto out;
- if (buffer != NULL) {
+ if (buffer) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
char *xattr_name = buffer;
int xlen, rem = rc;
@@ -598,12 +601,12 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
const size_t name_len = sizeof("lov") - 1;
const size_t total_len = prefix_len + name_len + 1;
- if (((rc + total_len) > size) && (buffer != NULL)) {
+ if (((rc + total_len) > size) && buffer) {
ptlrpc_req_finished(request);
return -ERANGE;
}
- if (buffer != NULL) {
+ if (buffer) {
buffer += rc;
memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
memcpy(buffer + prefix_len, "lov", name_len);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index d1402762a0b2..3480ce2bb3cc 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -23,7 +23,8 @@
*/
struct ll_xattr_entry {
struct list_head xe_list; /* protected with
- * lli_xattrs_list_rwsem */
+ * lli_xattrs_list_rwsem
+ */
char *xe_name; /* xattr name, \0-terminated */
char *xe_value; /* xattr value */
unsigned xe_namelen; /* strlen(xe_name) + 1 */
@@ -59,9 +60,6 @@ void ll_xattr_fini(void)
*/
static void ll_xattr_cache_init(struct ll_inode_info *lli)
{
-
- LASSERT(lli != NULL);
-
INIT_LIST_HEAD(&lli->lli_xattrs);
lli->lli_flags |= LLIF_XATTR_CACHE;
}
@@ -83,8 +81,7 @@ static int ll_xattr_cache_find(struct list_head *cache,
list_for_each_entry(entry, cache, xe_list) {
/* xattr_name == NULL means look for any entry */
- if (xattr_name == NULL ||
- strcmp(xattr_name, entry->xe_name) == 0) {
+ if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
*xattr = entry;
CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
entry->xe_name, entry->xe_vallen,
@@ -117,8 +114,8 @@ static int ll_xattr_cache_add(struct list_head *cache,
return -EPROTO;
}
- xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO);
- if (xattr == NULL) {
+ xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
+ if (!xattr) {
CDEBUG(D_CACHE, "failed to allocate xattr\n");
return -ENOMEM;
}
@@ -136,8 +133,8 @@ static int ll_xattr_cache_add(struct list_head *cache,
xattr->xe_vallen = xattr_val_len;
list_add(&xattr->xe_list, cache);
- CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name,
- xattr_val_len, xattr_val);
+ CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
+ xattr_val);
return 0;
err_value:
@@ -194,7 +191,7 @@ static int ll_xattr_cache_list(struct list_head *cache,
list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
- xld_buffer, xld_tail, xattr->xe_name);
+ xld_buffer, xld_tail, xattr->xe_name);
if (xld_buffer) {
xld_size -= xattr->xe_namelen;
@@ -270,7 +267,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
struct lookup_intent *oit,
struct ptlrpc_request **req)
{
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
struct lustre_handle lockh = { 0 };
struct md_op_data *op_data;
struct ll_inode_info *lli = ll_i2info(inode);
@@ -284,7 +281,8 @@ static int ll_xattr_find_get_lock(struct inode *inode,
mutex_lock(&lli->lli_xattrs_enq_lock);
/* inode may have been shrunk and recreated, so data is gone, match lock
- * only when data exists. */
+ * only when data exists.
+ */
if (ll_xattr_cache_valid(lli)) {
/* Try matching first. */
mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
@@ -359,7 +357,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
}
/* Matched but no cache? Cancelled on error by a parallel refill. */
- if (unlikely(req == NULL)) {
+ if (unlikely(!req)) {
CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
rc = -EIO;
goto out_maybe_drop;
@@ -376,19 +374,19 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
+ if (!body) {
CERROR("no MDT BODY in the refill xattr reply\n");
rc = -EPROTO;
goto out_destroy;
}
/* do not need swab xattr data */
xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
- body->eadatasize);
+ body->eadatasize);
xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
- body->aclsize);
+ body->aclsize);
xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
body->max_mdsize * sizeof(__u32));
- if (xdata == NULL || xval == NULL || xsizes == NULL) {
+ if (!xdata || !xval || !xsizes) {
CERROR("wrong setxattr reply\n");
rc = -EPROTO;
goto out_destroy;
@@ -404,7 +402,7 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
for (i = 0; i < body->max_mdsize; i++) {
CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
/* Perform consistency checks: attr names and vals in pill */
- if (memchr(xdata, 0, xtail - xdata) == NULL) {
+ if (!memchr(xdata, 0, xtail - xdata)) {
CERROR("xattr protocol violation (names are broken)\n");
rc = -EPROTO;
} else if (xval + *xsizes > xvtail) {
@@ -471,11 +469,8 @@ out_destroy:
* \retval -ERANGE the buffer is not large enough
* \retval -ENODATA no such attr or the list is empty
*/
-int ll_xattr_cache_get(struct inode *inode,
- const char *name,
- char *buffer,
- size_t size,
- __u64 valid)
+int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
+ size_t size, __u64 valid)
{
struct lookup_intent oit = { .it_op = IT_GETXATTR };
struct ll_inode_info *lli = ll_i2info(inode);
@@ -504,7 +499,7 @@ int ll_xattr_cache_get(struct inode *inode,
if (size != 0) {
if (size >= xattr->xe_vallen)
memcpy(buffer, xattr->xe_value,
- xattr->xe_vallen);
+ xattr->xe_vallen);
else
rc = -ERANGE;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index ee235926f52b..378691b2a062 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -58,7 +58,8 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
int rc;
/* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
- * this fid_is_local check should be removed once LU-2240 is fixed */
+ * this fid_is_local check should be removed once LU-2240 is fixed
+ */
LASSERTF((fid_seq_in_fldb(fid_seq(fid)) ||
fid_seq_is_local_file(fid_seq(fid))) &&
fid_is_sane(fid), DFID" is insane!\n", PFID(fid));
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 66de27f1d289..e0958eaed054 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -69,7 +69,7 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
int rc = 0;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
return -EPROTO;
LASSERT((body->valid & OBD_MD_MDS));
@@ -107,14 +107,16 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
op_data->op_fid1 = body->fid1;
/* Sent the parent FID to the remote MDT */
- if (parent_fid != NULL) {
+ if (parent_fid) {
/* The parent fid is only for remote open to
* check whether the open is from OBF,
- * see mdt_cross_open */
+ * see mdt_cross_open
+ */
LASSERT(it->it_op & IT_OPEN);
op_data->op_fid2 = *parent_fid;
/* Add object FID to op_fid3, in case it needs to check stale
- * (M_CHECK_STALE), see mdc_finish_intent_lock */
+ * (M_CHECK_STALE), see mdc_finish_intent_lock
+ */
op_data->op_fid3 = body->fid1;
}
@@ -173,7 +175,8 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
return PTR_ERR(tgt);
/* If it is ready to open the file by FID, do not need
- * allocate FID at all, otherwise it will confuse MDT */
+ * allocate FID at all, otherwise it will confuse MDT
+ */
if ((it->it_op & IT_CREAT) &&
!(it->it_flags & MDS_OPEN_BY_FID)) {
/*
@@ -204,7 +207,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
return rc;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
return -EPROTO;
/*
* Not cross-ref case, just get out of here.
@@ -268,9 +271,9 @@ static int lmv_intent_lookup(struct obd_export *exp,
op_data->op_bias &= ~MDS_CROSS_REF;
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it,
- flags, reqp, cb_blocking, extra_lock_flags);
+ flags, reqp, cb_blocking, extra_lock_flags);
- if (rc < 0 || *reqp == NULL)
+ if (rc < 0 || !*reqp)
return rc;
/*
@@ -278,7 +281,7 @@ static int lmv_intent_lookup(struct obd_export *exp,
* remote inode. Let's check this.
*/
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
return -EPROTO;
/* Not cross-ref case, just get out of here. */
if (likely(!(body->valid & OBD_MD_MDS)))
@@ -299,7 +302,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = exp->exp_obd;
int rc;
- LASSERT(it != NULL);
LASSERT(fid_is_sane(&op_data->op_fid1));
CDEBUG(D_INODE, "INTENT LOCK '%s' for '%*s' on "DFID"\n",
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index eb8e673cbc3f..8a0087190e23 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -66,7 +66,7 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req)
struct mdt_body *body;
struct lmv_stripe_md *mea;
- LASSERT(req != NULL);
+ LASSERT(req);
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
@@ -75,13 +75,11 @@ static inline struct lmv_stripe_md *lmv_get_mea(struct ptlrpc_request *req)
mea = req_capsule_server_sized_get(&req->rq_pill, &RMF_MDT_MD,
body->eadatasize);
- LASSERT(mea != NULL);
-
if (mea->mea_count == 0)
return NULL;
if (mea->mea_magic != MEA_MAGIC_LAST_CHAR &&
- mea->mea_magic != MEA_MAGIC_ALL_CHARS &&
- mea->mea_magic != MEA_MAGIC_HASH_SEGMENT)
+ mea->mea_magic != MEA_MAGIC_ALL_CHARS &&
+ mea->mea_magic != MEA_MAGIC_HASH_SEGMENT)
return NULL;
return mea;
@@ -101,7 +99,7 @@ lmv_get_target(struct lmv_obd *lmv, u32 mds)
int i;
for (i = 0; i < count; i++) {
- if (lmv->tgts[i] == NULL)
+ if (!lmv->tgts[i])
continue;
if (lmv->tgts[i]->ltd_idx == mds)
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index bbafe0a710d8..9abb7c2b9231 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -53,6 +53,7 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre_lite.h"
#include "../include/lustre_fid.h"
+#include "../include/lustre_kernelcomm.h"
#include "lmv_internal.h"
static void lmv_activate_target(struct lmv_obd *lmv,
@@ -87,7 +88,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
spin_lock(&lmv->lmv_lock);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL)
+ if (!tgt || !tgt->ltd_exp)
continue;
CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i,
@@ -103,7 +104,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
}
obd = class_exp2obd(tgt->ltd_exp);
- if (obd == NULL) {
+ if (!obd) {
rc = -ENOTCONN;
goto out_lmv_lock;
}
@@ -237,7 +238,7 @@ static int lmv_connect(const struct lu_env *env,
* and MDC stuff will be called directly, for instance while reading
* ../mdc/../kbytesfree procfs file, etc.
*/
- if (data->ocd_connect_flags & OBD_CONNECT_REAL)
+ if (data && data->ocd_connect_flags & OBD_CONNECT_REAL)
rc = lmv_check_connect(obd);
if (rc && lmv->lmv_tgts_kobj)
@@ -261,7 +262,7 @@ static void lmv_set_timeouts(struct obd_device *obd)
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
continue;
obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -301,8 +302,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
return 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL ||
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
lmv->tgts[i]->ltd_active == 0) {
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
@@ -311,7 +311,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
- CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d.\n",
+ CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
obd->obd_name, i, rc);
break;
}
@@ -339,9 +339,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
}
CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
- cluuid->uuid);
+ mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
+ tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid);
if (!mdc_obd->obd_set_up) {
CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
@@ -397,8 +396,8 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
lmv->max_cookiesize, lmv->max_def_cookiesize);
CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
- mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- atomic_read(&obd->obd_refcount));
+ mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
+ atomic_read(&obd->obd_refcount));
if (lmv->lmv_tgts_kobj)
/* Even if we failed to create the link, that's fine */
@@ -409,7 +408,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
static void lmv_del_target(struct lmv_obd *lmv, int index)
{
- if (lmv->tgts[index] == NULL)
+ if (!lmv->tgts[index])
return;
kfree(lmv->tgts[index]);
@@ -418,7 +417,7 @@ static void lmv_del_target(struct lmv_obd *lmv, int index)
}
static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
- __u32 index, int gen)
+ __u32 index, int gen)
{
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
@@ -441,7 +440,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
}
}
- if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
+ if ((index < lmv->tgts_size) && lmv->tgts[index]) {
tgt = lmv->tgts[index];
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
obd->obd_name,
@@ -459,7 +458,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
while (newsize < index + 1)
newsize <<= 1;
newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (newtgts == NULL) {
+ if (!newtgts) {
lmv_init_unlock(lmv);
return -ENOMEM;
}
@@ -538,11 +537,9 @@ int lmv_check_connect(struct obd_device *obd)
CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
lmv->cluuid.uuid, obd->obd_name);
- LASSERT(lmv->tgts != NULL);
-
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
tgt = lmv->tgts[i];
- if (tgt == NULL)
+ if (!tgt)
continue;
rc = lmv_connect_mdc(obd, tgt);
if (rc)
@@ -562,7 +559,7 @@ int lmv_check_connect(struct obd_device *obd)
int rc2;
tgt = lmv->tgts[i];
- if (tgt == NULL)
+ if (!tgt)
continue;
tgt->ltd_active = 0;
if (tgt->ltd_exp) {
@@ -585,9 +582,6 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
struct obd_device *mdc_obd;
int rc;
- LASSERT(tgt != NULL);
- LASSERT(obd != NULL);
-
mdc_obd = class_exp2obd(tgt->ltd_exp);
if (mdc_obd) {
@@ -640,7 +634,7 @@ static int lmv_disconnect(struct obd_export *exp)
goto out_local;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
continue;
lmv_disconnect_mdc(obd, lmv->tgts[i]);
@@ -662,7 +656,8 @@ out_local:
return rc;
}
-static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
+static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
+ void __user *uarg)
{
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
@@ -683,8 +678,9 @@ repeat_fid2path:
goto out_fid2path;
/* If remote_gf != NULL, it means just building the
- * path on the remote MDT, copy this path segment to gf */
- if (remote_gf != NULL) {
+ * path on the remote MDT, copy this path segment to gf
+ */
+ if (remote_gf) {
struct getinfo_fid2path *ori_gf;
char *ptr;
@@ -714,7 +710,7 @@ repeat_fid2path:
goto out_fid2path;
/* sigh, has to go to another MDT to do path building further */
- if (remote_gf == NULL) {
+ if (!remote_gf) {
remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
if (!remote_gf) {
@@ -779,7 +775,7 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
nr_out = 0;
for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
curr_tgt = lmv_find_target(lmv,
- &hur_in->hur_user_item[i].hui_fid);
+ &hur_in->hur_user_item[i].hui_fid);
if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
hur_out->hur_user_item[nr_out] =
hur_in->hur_user_item[i];
@@ -792,14 +788,17 @@ static void lmv_hsm_req_build(struct lmv_obd *lmv,
}
static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk, void *uarg)
+ struct lustre_kernelcomm *lk,
+ void __user *uarg)
{
- int i, rc = 0;
+ int rc = 0;
+ __u32 i;
/* unregister request (call from llapi_hsm_copytool_fini) */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
/* best effort: try to clean as much as possible
- * (continue on error) */
+ * (continue on error)
+ */
obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
}
@@ -808,23 +807,25 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
* and will unregister automatically.
*/
rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
+
return rc;
}
static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
- struct lustre_kernelcomm *lk, void *uarg)
+ struct lustre_kernelcomm *lk, void __user *uarg)
{
- struct file *filp;
- int i, j, err;
- int rc = 0;
- bool any_set = false;
+ struct file *filp;
+ __u32 i, j;
+ int err, rc = 0;
+ bool any_set = false;
+ struct kkuc_ct_data kcd = { 0 };
/* All or nothing: try to register to all MDS.
* In case of failure, unregister from previous MDS,
- * except if it because of inactive target. */
+ * except if it because of inactive target.
+ */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- len, lk, uarg);
+ err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
if (err) {
if (lmv->tgts[i]->ltd_active) {
/* permanent error */
@@ -836,13 +837,13 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
/* unregister from previous MDS */
for (j = 0; j < i; j++)
obd_iocontrol(cmd,
- lmv->tgts[j]->ltd_exp,
- len, lk, uarg);
+ lmv->tgts[j]->ltd_exp,
+ len, lk, uarg);
return rc;
}
/* else: transient error.
- * kuc will register to the missing MDT
- * when it is back */
+ * kuc will register to the missing MDT when it is back
+ */
} else {
any_set = true;
}
@@ -854,17 +855,25 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
/* at least one registration done, with no failure */
filp = fget(lk->lk_wfd);
- if (filp == NULL) {
+ if (!filp)
return -EBADF;
+
+ kcd.kcd_magic = KKUC_CT_DATA_MAGIC;
+ kcd.kcd_uuid = lmv->cluuid;
+ kcd.kcd_archive = lk->lk_data;
+
+ rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group,
+ &kcd, sizeof(kcd));
+ if (rc) {
+ if (filp)
+ fput(filp);
}
- rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
- if (rc != 0 && filp != NULL)
- fput(filp);
+
return rc;
}
static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
- int len, void *karg, void *uarg)
+ int len, void *karg, void __user *uarg)
{
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
@@ -887,8 +896,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (index >= count)
return -ENODEV;
- if (lmv->tgts[index] == NULL ||
- lmv->tgts[index]->ltd_active == 0)
+ if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0)
return -ENODATA;
mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
@@ -897,8 +905,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
+ min((int)data->ioc_plen2,
+ (int)sizeof(struct obd_uuid))))
return -EFAULT;
rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
@@ -907,8 +915,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (rc)
return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int) data->ioc_plen1,
- (int) sizeof(stat_buf))))
+ min((int)data->ioc_plen1,
+ (int)sizeof(stat_buf))))
return -EFAULT;
break;
}
@@ -922,18 +930,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
return -EINVAL;
tgt = lmv->tgts[qctl->qc_idx];
- if (tgt == NULL || tgt->ltd_exp == NULL)
+ if (!tgt || !tgt->ltd_exp)
return -EINVAL;
} else if (qctl->qc_valid == QC_UUID) {
for (i = 0; i < count; i++) {
tgt = lmv->tgts[i];
- if (tgt == NULL)
+ if (!tgt)
continue;
if (!obd_uuid_equals(&tgt->ltd_uuid,
&qctl->obd_uuid))
continue;
- if (tgt->ltd_exp == NULL)
+ if (!tgt->ltd_exp)
return -EINVAL;
break;
@@ -967,8 +975,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (icc->icc_mdtindex >= count)
return -ENODEV;
- if (lmv->tgts[icc->icc_mdtindex] == NULL ||
- lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL ||
+ if (!lmv->tgts[icc->icc_mdtindex] ||
+ !lmv->tgts[icc->icc_mdtindex]->ltd_exp ||
lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
return -ENODEV;
rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
@@ -976,7 +984,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
- if (lmv->tgts[0] == NULL)
+ if (!lmv->tgts[0])
return -ENODATA;
rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
break;
@@ -993,10 +1001,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- return PTR_ERR(tgt);
+ return PTR_ERR(tgt);
- if (tgt->ltd_exp == NULL)
- return -EINVAL;
+ if (!tgt->ltd_exp)
+ return -EINVAL;
rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
break;
@@ -1021,7 +1029,8 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
/* if the request is about a single fid
* or if there is a single MDS, no need to split
- * the request. */
+ * the request.
+ */
if (reqcount == 1 || count == 1) {
tgt = lmv_find_target(lmv,
&hur->hur_user_item[0].hui_fid);
@@ -1044,7 +1053,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
hur_user_item[nr])
+ hur->hur_request.hr_data_len;
req = libcfs_kvzalloc(reqlen, GFP_NOFS);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
@@ -1070,7 +1079,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (IS_ERR(tgt2))
return PTR_ERR(tgt2);
- if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
+ if (!tgt1->ltd_exp || !tgt2->ltd_exp)
return -EINVAL;
/* only files on same MDT can have their layouts swapped */
@@ -1094,11 +1103,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct obd_device *mdc_obd;
int err;
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL)
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
continue;
/* ll_umount_begin() sets force flag but for lmv, not
- * mdc. Let's pass it through */
+ * mdc. Let's pass it through
+ */
mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
@@ -1122,51 +1131,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
return rc;
}
-#if 0
-static int lmv_all_chars_policy(int count, const char *name,
- int len)
-{
- unsigned int c = 0;
-
- while (len > 0)
- c += name[--len];
- c = c % count;
- return c;
-}
-
-static int lmv_nid_policy(struct lmv_obd *lmv)
-{
- struct obd_import *imp;
- __u32 id;
-
- /*
- * XXX: To get nid we assume that underlying obd device is mdc.
- */
- imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
- id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
- return id % lmv->desc.ld_tgt_count;
-}
-
-static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
- enum placement_policy placement)
-{
- switch (placement) {
- case PLACEMENT_CHAR_POLICY:
- return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
- op_data->op_name,
- op_data->op_namelen);
- case PLACEMENT_NID_POLICY:
- return lmv_nid_policy(lmv);
-
- default:
- break;
- }
-
- CERROR("Unsupported placement policy %x\n", placement);
- return -EINVAL;
-}
-#endif
-
/**
* This is _inode_ placement policy function (not name).
*/
@@ -1175,7 +1139,7 @@ static int lmv_placement_policy(struct obd_device *obd,
{
struct lmv_obd *lmv = &obd->u.lmv;
- LASSERT(mds != NULL);
+ LASSERT(mds);
if (lmv->desc.ld_tgt_count == 1) {
*mds = 0;
@@ -1205,7 +1169,8 @@ static int lmv_placement_policy(struct obd_device *obd,
}
/* Allocate new fid on target according to operation type and parent
- * home mds. */
+ * home mds.
+ */
*mds = op_data->op_mds;
return 0;
}
@@ -1225,7 +1190,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
*/
mutex_lock(&tgt->ltd_fid_mutex);
- if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) {
+ if (tgt->ltd_active == 0 || !tgt->ltd_exp) {
rc = -ENODEV;
goto out;
}
@@ -1252,8 +1217,8 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
u32 mds = 0;
int rc;
- LASSERT(op_data != NULL);
- LASSERT(fid != NULL);
+ LASSERT(op_data);
+ LASSERT(fid);
rc = lmv_placement_policy(obd, op_data, &mds);
if (rc) {
@@ -1291,7 +1256,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
lmv->tgts = kcalloc(32, sizeof(*lmv->tgts), GFP_NOFS);
- if (lmv->tgts == NULL)
+ if (!lmv->tgts)
return -ENOMEM;
lmv->tgts_size = 32;
@@ -1332,11 +1297,11 @@ static int lmv_cleanup(struct obd_device *obd)
struct lmv_obd *lmv = &obd->u.lmv;
fld_client_fini(&lmv->lmv_fld);
- if (lmv->tgts != NULL) {
+ if (lmv->tgts) {
int i;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL)
+ if (!lmv->tgts[i])
continue;
lmv_del_target(lmv, i);
}
@@ -1357,7 +1322,8 @@ static int lmv_process_config(struct obd_device *obd, u32 len, void *buf)
switch (lcfg->lcfg_command) {
case LCFG_ADD_MDC:
/* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
- * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */
+ * 2:0 3:1 4:lustre-MDT0000-mdc_UUID
+ */
if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) {
rc = -EINVAL;
goto out;
@@ -1402,7 +1368,7 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
return -ENOMEM;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
continue;
rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
@@ -1421,7 +1387,8 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
* i.e. mount does not need the merged osfs
* from all of MDT.
* And also clients can be mounted as long as
- * MDT0 is in service*/
+ * MDT0 is in service
+ */
if (flags & OBD_STATFS_FOR_MDT0)
goto out_free_temp;
} else {
@@ -1547,7 +1514,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
* space of MDT storing inode.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
continue;
md_null_inode(lmv->tgts[i]->ltd_exp, fid);
}
@@ -1575,7 +1542,7 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
* space of MDT storing inode.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
continue;
rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
if (rc)
@@ -1655,7 +1622,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
cap_effective, rdev, request);
if (rc == 0) {
- if (*request == NULL)
+ if (!*request)
return rc;
CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
}
@@ -1701,7 +1668,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
int pmode;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
if (!(body->valid & OBD_MD_MDS))
return 0;
@@ -1808,7 +1774,6 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
body = req_capsule_server_get(&(*request)->rq_pill,
&RMF_MDT_BODY);
- LASSERT(body != NULL);
if (body->valid & OBD_MD_MDS) {
struct lu_fid rid = body->fid1;
@@ -1842,7 +1807,8 @@ lmv_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
NULL)
static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
- int op_tgt, ldlm_mode_t mode, int bits, int flag)
+ int op_tgt, enum ldlm_mode mode, int bits,
+ int flag)
{
struct lu_fid *fid = md_op_data_fid(op_data, flag);
struct obd_device *obd = exp->exp_obd;
@@ -2051,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
- * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
+ * However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
@@ -2082,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
-#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
+#if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
@@ -2097,7 +2063,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
while (--nlupgs > 0) {
ent = lu_dirent_start(dp);
- for (end_dirent = ent; ent != NULL;
+ for (end_dirent = ent; ent;
end_dirent = ent, ent = lu_dirent_next(ent))
;
@@ -2117,7 +2083,8 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
break;
/* Enlarge the end entry lde_reclen from 0 to
- * first entry of next lu_dirpage. */
+ * first entry of next lu_dirpage.
+ */
LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
end_dirent->lde_reclen =
cpu_to_le16((char *)(dp->ldp_entries) -
@@ -2134,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
-#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
+#endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
@@ -2143,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
- int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
+ int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
@@ -2162,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
if (rc != 0)
return rc;
- ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
- >> PAGE_CACHE_SHIFT;
+ ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);
@@ -2227,7 +2194,7 @@ retry:
return rc;
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
return -EPROTO;
/* Not cross-ref case, just get out of here. */
@@ -2255,7 +2222,8 @@ retry:
* 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
*
* In theory, it might try unlimited time here, but it should
- * be very rare case. */
+ * be very rare case.
+ */
op_data->op_fid2 = body->fid1;
ptlrpc_req_finished(*request);
*request = NULL;
@@ -2270,7 +2238,8 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
switch (stage) {
case OBD_CLEANUP_EARLY:
/* XXX: here should be calling obd_precleanup() down to
- * stack. */
+ * stack.
+ */
break;
case OBD_CLEANUP_EXPORTS:
fld_client_debugfs_fini(&lmv->lmv_fld);
@@ -2291,7 +2260,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
int rc = 0;
obd = class_exp2obd(exp);
- if (obd == NULL) {
+ if (!obd) {
CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
exp->exp_handle.h_cookie);
return -EINVAL;
@@ -2312,7 +2281,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
/*
* All tgts should be connected when this gets called.
*/
- if (tgt == NULL || tgt->ltd_exp == NULL)
+ if (!tgt || !tgt->ltd_exp)
continue;
if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
@@ -2355,7 +2324,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
int rc = 0;
obd = class_exp2obd(exp);
- if (obd == NULL) {
+ if (!obd) {
CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
exp->exp_handle.h_cookie);
return -EINVAL;
@@ -2368,7 +2337,7 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL)
+ if (!tgt || !tgt->ltd_exp)
continue;
err = obd_set_info_async(env, tgt->ltd_exp,
@@ -2403,9 +2372,9 @@ static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
return 0;
}
- if (*lmmp == NULL) {
+ if (!*lmmp) {
*lmmp = libcfs_kvzalloc(mea_size, GFP_NOFS);
- if (*lmmp == NULL)
+ if (!*lmmp)
return -ENOMEM;
}
@@ -2443,10 +2412,10 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
__u32 magic;
mea_size = lmv_get_easize(lmv);
- if (lsmp == NULL)
+ if (!lsmp)
return mea_size;
- if (*lsmp != NULL && lmm == NULL) {
+ if (*lsmp && !lmm) {
kvfree(*tmea);
*lsmp = NULL;
return 0;
@@ -2455,7 +2424,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
LASSERT(mea_size == lmm_size);
*tmea = libcfs_kvzalloc(mea_size, GFP_NOFS);
- if (*tmea == NULL)
+ if (!*tmea)
return -ENOMEM;
if (!lmm)
@@ -2485,8 +2454,8 @@ static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
}
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque)
+ ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
@@ -2494,10 +2463,10 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
int err;
int i;
- LASSERT(fid != NULL);
+ LASSERT(fid);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL ||
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
lmv->tgts[i]->ltd_active == 0)
continue;
@@ -2519,14 +2488,16 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
return rc;
}
-static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
+static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid,
+ enum ldlm_type type,
+ ldlm_policy_data_t *policy,
+ enum ldlm_mode mode,
+ struct lustre_handle *lockh)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
int i;
CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
@@ -2538,8 +2509,7 @@ static ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
* one fid was created in.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (lmv->tgts[i] == NULL ||
- lmv->tgts[i]->ltd_exp == NULL ||
+ if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
lmv->tgts[i]->ltd_active == 0)
continue;
@@ -2695,7 +2665,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
continue;
if (!tgt->ltd_active) {
CDEBUG(D_HA, "mdt %d is inactive.\n", i);
@@ -2730,7 +2700,7 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
int err;
tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
CERROR("lmv idx %d inactive\n", i);
return -EIO;
}
@@ -2813,7 +2783,8 @@ static void lmv_exit(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
+MODULE_DESCRIPTION("Lustre Logical Metadata Volume");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(lmv_init);
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index 40cf4d9f0486..b39e364a29ab 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -138,7 +138,7 @@ static int lmv_desc_uuid_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lmv_obd *lmv;
- LASSERT(dev != NULL);
+ LASSERT(dev);
lmv = &dev->u.lmv;
seq_printf(m, "%s\n", lmv->desc.ld_uuid.uuid);
return 0;
@@ -171,7 +171,7 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v)
{
struct lmv_tgt_desc *tgt = v;
- if (tgt == NULL)
+ if (!tgt)
return 0;
seq_printf(p, "%d: %s %sACTIVE\n",
tgt->ltd_idx, tgt->ltd_uuid.uuid,
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 66a2492c1cc3..7dd3162b51e9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -579,51 +579,49 @@ extern struct kmem_cache *lovsub_req_kmem;
extern struct kmem_cache *lov_lock_link_kmem;
-int lov_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-
-int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link,
- struct lovsub_lock *sub);
+int lov_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf);
+int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf);
+int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+
+int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
+int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+void lov_lock_unlink(const struct lu_env *env, struct lov_lock_link *link,
+ struct lovsub_lock *sub);
struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
int stripe);
-void lov_sub_put(struct lov_io_sub *sub);
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx);
-
-int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
-int lov_page_init_empty(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-int lov_page_init_raid0(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+void lov_sub_put(struct lov_io_sub *sub);
+int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
+ struct lovsub_lock *sublock,
+ const struct cl_lock_descr *d, int idx);
+
+int lov_page_init(const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, struct page *vmpage);
+int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
+ struct cl_page *page, struct page *vmpage);
+
+int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
+int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, struct page *vmpage);
struct lu_object *lov_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev);
+ const struct lu_object_header *hdr,
+ struct lu_device *dev);
struct lu_object *lovsub_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
@@ -631,9 +629,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lov_lock *lck,
struct lovsub_lock *sub);
-struct lov_io_sub *lov_page_subio(const struct lu_env *env,
- struct lov_io *lio,
- const struct cl_page_slice *slice);
+struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
+ const struct cl_page_slice *slice);
#define lov_foreach_target(lov, var) \
for (var = 0; var < lov_targets_nr(lov); ++var)
@@ -651,7 +648,7 @@ static inline struct lov_session *lov_env_session(const struct lu_env *env)
struct lov_session *ses;
ses = lu_context_key_get(env->le_ses, &lov_session_key);
- LASSERT(ses != NULL);
+ LASSERT(ses);
return ses;
}
@@ -759,7 +756,7 @@ static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
const struct cl_lock_slice *slice;
slice = cl_lock_at(lock, &lovsub_device_type);
- LASSERT(slice != NULL);
+ LASSERT(slice);
return cl2lovsub_lock(slice);
}
@@ -798,7 +795,7 @@ static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
}
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
- const struct cl_io_slice *ios)
+ const struct cl_io_slice *ios)
{
struct lov_io *lio;
@@ -817,7 +814,7 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env)
struct lov_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &lov_key);
- LASSERT(info != NULL);
+ LASSERT(info);
return info;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 3733fdc88c8c..532ef87dfb44 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -142,8 +142,8 @@ static void *lov_key_init(const struct lu_context *ctx,
{
struct lov_thread_info *info;
- info = kmem_cache_alloc(lov_thread_kmem, GFP_NOFS | __GFP_ZERO);
- if (info != NULL)
+ info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
+ if (info)
INIT_LIST_HEAD(&info->lti_closure.clc_list);
else
info = ERR_PTR(-ENOMEM);
@@ -170,8 +170,8 @@ static void *lov_session_key_init(const struct lu_context *ctx,
{
struct lov_session *info;
- info = kmem_cache_alloc(lov_session_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(lov_session_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -199,15 +199,15 @@ static struct lu_device *lov_device_fini(const struct lu_env *env,
int i;
struct lov_device *ld = lu2lov_dev(d);
- LASSERT(ld->ld_lov != NULL);
- if (ld->ld_target == NULL)
+ LASSERT(ld->ld_lov);
+ if (!ld->ld_target)
return NULL;
lov_foreach_target(ld, i) {
struct lovsub_device *lsd;
lsd = ld->ld_target[i];
- if (lsd != NULL) {
+ if (lsd) {
cl_stack_fini(env, lovsub2cl_dev(lsd));
ld->ld_target[i] = NULL;
}
@@ -222,8 +222,8 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
int i;
int rc = 0;
- LASSERT(d->ld_site != NULL);
- if (ld->ld_target == NULL)
+ LASSERT(d->ld_site);
+ if (!ld->ld_target)
return rc;
lov_foreach_target(ld, i) {
@@ -232,7 +232,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
struct lov_tgt_desc *desc;
desc = ld->ld_lov->lov_tgts[i];
- if (desc == NULL)
+ if (!desc)
continue;
cl = cl_type_setup(env, d->ld_site, &lovsub_device_type,
@@ -261,8 +261,8 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
struct lov_req *lr;
int result;
- lr = kmem_cache_alloc(lov_req_kmem, GFP_NOFS | __GFP_ZERO);
- if (lr != NULL) {
+ lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
+ if (lr) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
} else
@@ -282,9 +282,9 @@ static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
struct lov_device_emerg *em;
em = emrg[i];
- if (em != NULL) {
+ if (em) {
LASSERT(em->emrg_page_list.pl_nr == 0);
- if (em->emrg_env != NULL)
+ if (em->emrg_env)
cl_env_put(em->emrg_env, &em->emrg_refcheck);
kfree(em);
}
@@ -300,7 +300,7 @@ static struct lu_device *lov_device_free(const struct lu_env *env,
cl_device_fini(lu2cl_dev(d));
kfree(ld->ld_target);
- if (ld->ld_emrg != NULL)
+ if (ld->ld_emrg)
lov_emerg_free(ld->ld_emrg, nr);
kfree(ld);
return NULL;
@@ -311,7 +311,7 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev,
{
struct lov_device *ld = lu2lov_dev(dev);
- if (ld->ld_target[index] != NULL) {
+ if (ld->ld_target[index]) {
cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
ld->ld_target[index] = NULL;
}
@@ -324,17 +324,17 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
int result;
emerg = kcalloc(nr, sizeof(emerg[0]), GFP_NOFS);
- if (emerg == NULL)
+ if (!emerg)
return ERR_PTR(-ENOMEM);
for (result = i = 0; i < nr && result == 0; i++) {
struct lov_device_emerg *em;
em = kzalloc(sizeof(*em), GFP_NOFS);
- if (em != NULL) {
+ if (em) {
emerg[i] = em;
cl_page_list_init(&em->emrg_page_list);
em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
- LCT_REMEMBER|LCT_NOREF);
+ LCT_REMEMBER | LCT_NOREF);
if (!IS_ERR(em->emrg_env))
em->emrg_env->le_ctx.lc_cookie = 0x2;
else {
@@ -370,7 +370,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
return PTR_ERR(emerg);
newd = kcalloc(tgt_size, sz, GFP_NOFS);
- if (newd != NULL) {
+ if (newd) {
mutex_lock(&dev->ld_mutex);
if (sub_size > 0) {
memcpy(newd, dev->ld_target, sub_size * sz);
@@ -379,7 +379,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
dev->ld_target = newd;
dev->ld_target_nr = tgt_size;
- if (dev->ld_emrg != NULL)
+ if (dev->ld_emrg)
lov_emerg_free(dev->ld_emrg, sub_size);
dev->ld_emrg = emerg;
mutex_unlock(&dev->ld_mutex);
@@ -404,8 +404,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
obd_getref(obd);
tgt = obd->u.lov.lov_tgts[index];
- LASSERT(tgt != NULL);
- LASSERT(tgt->ltd_obd != NULL);
if (!tgt->ltd_obd->obd_set_up) {
CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
@@ -414,7 +412,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
rc = lov_expand_targets(env, ld);
if (rc == 0 && ld->ld_flags & LOV_DEV_INITIALIZED) {
- LASSERT(dev->ld_site != NULL);
+ LASSERT(dev->ld_site);
cl = cl_type_setup(env, dev->ld_site, &lovsub_device_type,
tgt->ltd_obd->obd_lu_dev);
@@ -492,7 +490,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
/* setup the LOV OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lov_setup(obd, cfg);
if (rc) {
lov_device_free(env, d);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b3c9c85aab9d..b6529401c713 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -100,8 +100,8 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
return NULL;
for (i = 0; i < stripe_count; i++) {
- loi = kmem_cache_alloc(lov_oinfo_slab, GFP_NOFS | __GFP_ZERO);
- if (loi == NULL)
+ loi = kmem_cache_zalloc(lov_oinfo_slab, GFP_NOFS);
+ if (!loi)
goto err;
lsm->lsm_oinfo[i] = loi;
}
@@ -141,7 +141,7 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
static void
lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
- u64 *lov_off, u64 *swidth)
+ u64 *lov_off, u64 *swidth)
{
if (swidth)
*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
@@ -162,12 +162,13 @@ static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa,
}
/* Find minimum stripe maxbytes value. For inactive or
- * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
+ * reconnecting targets use LUSTRE_STRIPE_MAXBYTES.
+ */
static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
{
struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
- if (imp == NULL || !tgt->ltd_active) {
+ if (!imp || !tgt->ltd_active) {
*stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
return;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 2d00bad58e35..590f9326af37 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -43,7 +43,8 @@
/* lov_do_div64(a, b) returns a % b, and a = a / b.
* The 32-bit code is LOV-specific due to knowing about stripe limits in
* order to reduce the divisor to a 32-bit number. If the divisor is
- * already a 32-bit value the compiler handles this directly. */
+ * already a 32-bit value the compiler handles this directly.
+ */
#if BITS_PER_LONG == 64
# define lov_do_div64(n, base) ({ \
uint64_t __base = (base); \
@@ -92,7 +93,8 @@ struct lov_request_set {
atomic_t set_refcount;
struct obd_export *set_exp;
/* XXX: There is @set_exp already, however obd_statfs gets obd_device
- only. */
+ * only.
+ */
struct obd_device *set_obd;
int set_count;
atomic_t set_completes;
@@ -114,7 +116,6 @@ void lov_finish_set(struct lov_request_set *set);
static inline void lov_get_reqset(struct lov_request_set *set)
{
- LASSERT(set != NULL);
LASSERT(atomic_read(&set->set_refcount) > 0);
atomic_inc(&set->set_refcount);
}
@@ -137,12 +138,10 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
struct ost_lvb *lvb, __u64 *kms_place);
/* lov_offset.c */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
- int stripeno);
+u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno);
int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
int stripeno, u64 *u64);
-u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
- int stripeno);
+u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size, int stripeno);
int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end,
u64 *obd_start, u64 *obd_end);
@@ -197,7 +196,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
struct lov_mds_md *lmm, int lmm_bytes);
int lov_getstripe(struct obd_export *exp,
- struct lov_stripe_md *lsm, struct lov_user_md *lump);
+ struct lov_stripe_md *lsm, struct lov_user_md __user *lump);
int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
int pattern, int magic);
int lov_free_memmd(struct lov_stripe_md **lsmp);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 93fe69eb2560..4296aacd84fc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -60,7 +60,7 @@ static inline void lov_sub_exit(struct lov_io_sub *sub)
static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
struct lov_io_sub *sub)
{
- if (sub->sub_io != NULL) {
+ if (sub->sub_io) {
if (sub->sub_io_initialized) {
lov_sub_enter(sub);
cl_io_fini(sub->sub_env, sub->sub_io);
@@ -74,7 +74,7 @@ static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
kfree(sub->sub_io);
sub->sub_io = NULL;
}
- if (sub->sub_env != NULL && !IS_ERR(sub->sub_env)) {
+ if (!IS_ERR_OR_NULL(sub->sub_env)) {
if (!sub->sub_borrowed)
cl_env_put(sub->sub_env, &sub->sub_refcheck);
sub->sub_env = NULL;
@@ -143,11 +143,11 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
int stripe = sub->sub_stripe;
int result;
- LASSERT(sub->sub_io == NULL);
- LASSERT(sub->sub_env == NULL);
+ LASSERT(!sub->sub_io);
+ LASSERT(!sub->sub_env);
LASSERT(sub->sub_stripe < lio->lis_stripe_count);
- if (unlikely(lov_r0(lov)->lo_sub[stripe] == NULL))
+ if (unlikely(!lov_r0(lov)->lo_sub[stripe]))
return -EIO;
result = 0;
@@ -252,7 +252,6 @@ static int lov_page_stripe(const struct cl_page *page)
subobj = lu2lovsub(
lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
&lovsub_device_type));
- LASSERT(subobj != NULL);
return subobj->lso_index;
}
@@ -263,9 +262,9 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
struct cl_page *page = slice->cpl_page;
int stripe;
- LASSERT(lio->lis_cl.cis_io != NULL);
+ LASSERT(lio->lis_cl.cis_io);
LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
- LASSERT(lsm != NULL);
+ LASSERT(lsm);
LASSERT(lio->lis_nr_subios > 0);
stripe = lov_page_stripe(page);
@@ -278,7 +277,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
int result;
- LASSERT(lio->lis_object != NULL);
+ LASSERT(lio->lis_object);
/*
* Need to be optimized, we can't afford to allocate a piece of memory
@@ -288,7 +287,7 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
libcfs_kvzalloc(lsm->lsm_stripe_count *
sizeof(lio->lis_subs[0]),
GFP_NOFS);
- if (lio->lis_subs != NULL) {
+ if (lio->lis_subs) {
lio->lis_nr_subios = lio->lis_stripe_count;
lio->lis_single_subio_index = -1;
lio->lis_active_subios = 0;
@@ -304,7 +303,6 @@ static void lov_io_slice_init(struct lov_io *lio,
io->ci_result = 0;
lio->lis_object = obj;
- LASSERT(obj->lo_lsm != NULL);
lio->lis_stripe_count = obj->lo_lsm->lsm_stripe_count;
switch (io->ci_type) {
@@ -358,7 +356,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
struct lov_object *lov = cl2lov(ios->cis_obj);
int i;
- if (lio->lis_subs != NULL) {
+ if (lio->lis_subs) {
for (i = 0; i < lio->lis_nr_subios; i++)
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
kvfree(lio->lis_subs);
@@ -395,7 +393,7 @@ static int lov_io_iter_init(const struct lu_env *env,
endpos, &start, &end))
continue;
- if (unlikely(lov_r0(lio->lis_object)->lo_sub[stripe] == NULL)) {
+ if (unlikely(!lov_r0(lio->lis_object)->lo_sub[stripe])) {
if (ios->cis_io->ci_type == CIT_READ ||
ios->cis_io->ci_type == CIT_WRITE ||
ios->cis_io->ci_type == CIT_FAULT)
@@ -601,13 +599,13 @@ static int lov_io_submit(const struct lu_env *env,
return rc;
}
- LASSERT(lio->lis_subs != NULL);
+ LASSERT(lio->lis_subs);
if (alloc) {
stripes_qin =
libcfs_kvzalloc(sizeof(*stripes_qin) *
lio->lis_nr_subios,
GFP_NOFS);
- if (stripes_qin == NULL)
+ if (!stripes_qin)
return -ENOMEM;
for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
@@ -949,13 +947,13 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
}
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io)
+ struct cl_io *io)
{
struct lov_object *lov = cl2lov(obj);
struct lov_io *lio = lov_env_io(env);
int result;
- LASSERT(lov->lo_lsm != NULL);
+ LASSERT(lov->lo_lsm);
lio->lis_object = lov;
switch (io->ci_type) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index d866791d7b22..ae854bc25dbe 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -115,7 +115,7 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
/*
* check that sub-lock doesn't have lock link to this top-lock.
*/
- LASSERT(lov_lock_link_find(env, lck, lsl) == NULL);
+ LASSERT(!lov_lock_link_find(env, lck, lsl));
LASSERT(idx < lck->lls_nr);
lck->lls_sub[idx].sub_lock = lsl;
@@ -144,8 +144,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
LASSERT(idx < lck->lls_nr);
- link = kmem_cache_alloc(lov_lock_link_kmem, GFP_NOFS | __GFP_ZERO);
- if (link != NULL) {
+ link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
+ if (link) {
struct lov_sublock_env *subenv;
struct lov_lock_sub *lls;
struct cl_lock_descr *descr;
@@ -160,7 +160,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
* to remember the subio. This is because lock is able
* to be cached, but this is not true for IO. This
* further means a sublock might be referenced in
- * different io context. -jay */
+ * different io context. -jay
+ */
sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
descr, "lov-parent", parent);
@@ -220,7 +221,7 @@ static int lov_sublock_lock(const struct lu_env *env,
LASSERT(!(lls->sub_flags & LSF_HELD));
link = lov_lock_link_find(env, lck, sublock);
- LASSERT(link != NULL);
+ LASSERT(link);
lov_lock_unlink(env, link, sublock);
lov_sublock_unlock(env, sublock, closure, NULL);
lck->lls_cancel_race = 1;
@@ -263,7 +264,7 @@ static int lov_subresult(int result, int rc)
int rc_rank;
LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
- "result = %d", result);
+ "result = %d\n", result);
LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
"rc = %d\n", rc);
CLASSERT(CLO_WAIT < CLO_REPEAT);
@@ -309,14 +310,14 @@ static int lov_lock_sub_init(const struct lu_env *env,
* XXX for wide striping smarter algorithm is desirable,
* breaking out of the loop, early.
*/
- if (likely(r0->lo_sub[i] != NULL) &&
+ if (likely(r0->lo_sub[i]) &&
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end))
nr++;
}
LASSERT(nr > 0);
lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
- if (lck->lls_sub == NULL)
+ if (!lck->lls_sub)
return -ENOMEM;
lck->lls_nr = nr;
@@ -328,14 +329,14 @@ static int lov_lock_sub_init(const struct lu_env *env,
* top-lock.
*/
for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
- if (likely(r0->lo_sub[i] != NULL) &&
+ if (likely(r0->lo_sub[i]) &&
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end)) {
struct cl_lock_descr *descr;
descr = &lck->lls_sub[nr].sub_descr;
- LASSERT(descr->cld_obj == NULL);
+ LASSERT(!descr->cld_obj);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
descr->cld_start = cl_index(descr->cld_obj, start);
descr->cld_end = cl_index(descr->cld_obj, end);
@@ -369,7 +370,6 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
struct cl_lock *sublock;
int dying;
- LASSERT(lck->lls_sub[i].sub_lock != NULL);
sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(sublock));
@@ -413,7 +413,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
struct cl_lock *sublock;
- LASSERT(lck->lls_sub[i].sub_lock != NULL);
sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(sublock));
LASSERT(sublock->cll_state != CLS_FREEING);
@@ -435,13 +434,13 @@ static void lov_lock_fini(const struct lu_env *env,
lck = cl2lov_lock(slice);
LASSERT(lck->lls_nr_filled == 0);
- if (lck->lls_sub != NULL) {
+ if (lck->lls_sub) {
for (i = 0; i < lck->lls_nr; ++i)
/*
* No sub-locks exists at this point, as sub-lock has
* a reference on its parent.
*/
- LASSERT(lck->lls_sub[i].sub_lock == NULL);
+ LASSERT(!lck->lls_sub[i].sub_lock);
kvfree(lck->lls_sub);
}
kmem_cache_free(lov_lock_kmem, lck);
@@ -479,7 +478,8 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
result = cl_enqueue_try(env, sublock, io, enqflags);
if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
/* if it is enqueued, try to `wait' on it---maybe it's already
- * granted */
+ * granted
+ */
result = cl_wait_try(env, sublock);
if (result == CLO_REENQUEUED)
result = CLO_WAIT;
@@ -515,12 +515,13 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
if (!IS_ERR(sublock)) {
cl_lock_get_trust(sublock);
if (parent->cll_state == CLS_QUEUING &&
- lck->lls_sub[idx].sub_lock == NULL) {
+ !lck->lls_sub[idx].sub_lock) {
lov_sublock_adopt(env, lck, sublock, idx, link);
} else {
kmem_cache_free(lov_lock_link_kmem, link);
/* other thread allocated sub-lock, or enqueue is no
- * longer going on */
+ * longer going on
+ */
cl_lock_mutex_put(env, parent);
cl_lock_unhold(env, sublock, "lov-parent", parent);
cl_lock_mutex_get(env, parent);
@@ -574,10 +575,11 @@ static int lov_lock_enqueue(const struct lu_env *env,
* Sub-lock might have been canceled, while top-lock was
* cached.
*/
- if (sub == NULL) {
+ if (!sub) {
result = lov_sublock_fill(env, lock, io, lck, i);
/* lov_sublock_fill() released @lock mutex,
- * restart. */
+ * restart.
+ */
break;
}
sublock = sub->lss_cl.cls_lock;
@@ -605,7 +607,8 @@ static int lov_lock_enqueue(const struct lu_env *env,
/* take recursive mutex of sublock */
cl_lock_mutex_get(env, sublock);
/* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.*/
+ * otherwise it may deadlock. LU-2683.
+ */
lov_sublock_unlock(env, sub, closure,
subenv);
/* sublock and parent are held. */
@@ -620,7 +623,7 @@ static int lov_lock_enqueue(const struct lu_env *env,
break;
}
} else {
- LASSERT(sublock->cll_conflict == NULL);
+ LASSERT(!sublock->cll_conflict);
lov_sublock_unlock(env, sub, closure, subenv);
}
}
@@ -649,11 +652,12 @@ static int lov_lock_unuse(const struct lu_env *env,
/* top-lock state cannot change concurrently, because single
* thread (one that released the last hold) carries unlocking
- * to the completion. */
+ * to the completion.
+ */
LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
- if (sub == NULL)
+ if (!sub)
continue;
sublock = sub->lss_cl.cls_lock;
@@ -679,7 +683,7 @@ static int lov_lock_unuse(const struct lu_env *env,
}
static void lov_lock_cancel(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+ const struct cl_lock_slice *slice)
{
struct lov_lock *lck = cl2lov_lock(slice);
struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
@@ -695,10 +699,11 @@ static void lov_lock_cancel(const struct lu_env *env,
/* top-lock state cannot change concurrently, because single
* thread (one that released the last hold) carries unlocking
- * to the completion. */
+ * to the completion.
+ */
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
- if (sub == NULL)
+ if (!sub)
continue;
sublock = sub->lss_cl.cls_lock;
@@ -757,7 +762,6 @@ again:
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
- LASSERT(sub != NULL);
sublock = sub->lss_cl.cls_lock;
rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
if (rc == 0) {
@@ -776,8 +780,9 @@ again:
if (result != 0)
break;
}
- /* Each sublock only can be reenqueued once, so will not loop for
- * ever. */
+ /* Each sublock only can be reenqueued once, so will not loop
+ * forever.
+ */
if (result == 0 && reenqueued != 0)
goto again;
cl_lock_closure_fini(closure);
@@ -805,7 +810,7 @@ static int lov_lock_use(const struct lu_env *env,
lls = &lck->lls_sub[i];
sub = lls->sub_lock;
- if (sub == NULL) {
+ if (!sub) {
/*
* Sub-lock might have been canceled, while top-lock was
* cached.
@@ -826,7 +831,8 @@ static int lov_lock_use(const struct lu_env *env,
i, 1, rc);
} else if (sublock->cll_state == CLS_NEW) {
/* Sub-lock might have been canceled, while
- * top-lock was cached. */
+ * top-lock was cached.
+ */
result = -ESTALE;
lov_sublock_release(env, lck, i, 1, result);
}
@@ -852,45 +858,6 @@ static int lov_lock_use(const struct lu_env *env,
return result;
}
-#if 0
-static int lock_lock_multi_match()
-{
- struct cl_lock *lock = slice->cls_lock;
- struct cl_lock_descr *subneed = &lov_env_info(env)->lti_ldescr;
- struct lov_object *loo = cl2lov(lov->lls_cl.cls_obj);
- struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct lov_lock_sub *sub;
- struct cl_object *subobj;
- u64 fstart;
- u64 fend;
- u64 start;
- u64 end;
- int i;
-
- fstart = cl_offset(need->cld_obj, need->cld_start);
- fend = cl_offset(need->cld_obj, need->cld_end + 1) - 1;
- subneed->cld_mode = need->cld_mode;
- cl_lock_mutex_get(env, lock);
- for (i = 0; i < lov->lls_nr; ++i) {
- sub = &lov->lls_sub[i];
- if (sub->sub_lock == NULL)
- continue;
- subobj = sub->sub_descr.cld_obj;
- if (!lov_stripe_intersects(loo->lo_lsm, sub->sub_stripe,
- fstart, fend, &start, &end))
- continue;
- subneed->cld_start = cl_index(subobj, start);
- subneed->cld_end = cl_index(subobj, end);
- subneed->cld_obj = subobj;
- if (!cl_lock_ext_match(&sub->sub_got, subneed)) {
- result = 0;
- break;
- }
- }
- cl_lock_mutex_put(env, lock);
-}
-#endif
-
/**
* Check if the extent region \a descr is covered by \a child against the
* specific \a stripe.
@@ -922,10 +889,10 @@ static int lov_lock_stripe_is_matching(const struct lu_env *env,
idx = lov_stripe_number(lsm, start);
if (idx == stripe ||
- unlikely(lov_r0(lov)->lo_sub[idx] == NULL)) {
+ unlikely(!lov_r0(lov)->lo_sub[idx])) {
idx = lov_stripe_number(lsm, end);
if (idx == stripe ||
- unlikely(lov_r0(lov)->lo_sub[idx] == NULL))
+ unlikely(!lov_r0(lov)->lo_sub[idx]))
result = 1;
}
}
@@ -970,7 +937,8 @@ static int lov_lock_fits_into(const struct lu_env *env,
LASSERT(lov->lls_nr > 0);
/* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue. */
+ * run into problem if a sublock is missing and reenqueue.
+ */
if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
return 0;
@@ -1074,7 +1042,7 @@ static void lov_lock_delete(const struct lu_env *env,
struct lov_lock_sub *lls = &lck->lls_sub[i];
struct lovsub_lock *lsl = lls->sub_lock;
- if (lsl == NULL) /* already removed */
+ if (!lsl) /* already removed */
continue;
rc = lov_sublock_lock(env, lck, lls, closure, NULL);
@@ -1090,9 +1058,9 @@ static void lov_lock_delete(const struct lu_env *env,
lov_sublock_release(env, lck, i, 1, 0);
link = lov_lock_link_find(env, lck, lsl);
- LASSERT(link != NULL);
+ LASSERT(link);
lov_lock_unlink(env, link, lsl);
- LASSERT(lck->lls_sub[i].sub_lock == NULL);
+ LASSERT(!lck->lls_sub[i].sub_lock);
lov_sublock_unlock(env, lsl, closure, NULL);
}
@@ -1112,7 +1080,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
sub = &lck->lls_sub[i];
(*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
- if (sub->sub_lock != NULL)
+ if (sub->sub_lock)
cl_lock_print(env, cookie, p,
sub->sub_lock->lss_cl.cls_lock);
else
@@ -1139,8 +1107,8 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct lov_lock *lck;
int result;
- lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (lck != NULL) {
+ lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
+ if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
result = lov_lock_sub_init(env, lck, io);
} else
@@ -1157,7 +1125,8 @@ static void lov_empty_lock_fini(const struct lu_env *env,
}
static int lov_empty_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
+ lu_printer_t p,
+ const struct cl_lock_slice *slice)
{
(*p)(env, cookie, "empty\n");
return 0;
@@ -1170,13 +1139,13 @@ static const struct cl_lock_operations lov_empty_lock_ops = {
};
int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
int result = -ENOMEM;
- lck = kmem_cache_alloc(lov_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (lck != NULL) {
+ lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
+ if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;
result = 0;
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 97115bec7cca..029cd4d62796 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -129,7 +129,8 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
"stripe %d KMS %sing %llu->%llu\n",
stripe, kms > loi->loi_kms ? "increase":"shrink",
loi->loi_kms, kms);
- loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
+ loi->loi_lvb.lvb_size = kms;
+ loi_kms_set(loi, loi->loi_lvb.lvb_size);
}
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 6c2bdfe9cdcf..5daa7faf4dda 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -61,7 +61,8 @@
#include "lov_internal.h"
/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
- Any function that expects lov_tgts to remain stationary must take a ref. */
+ * Any function that expects lov_tgts to remain stationary must take a ref.
+ */
static void lov_getref(struct obd_device *obd)
{
struct lov_obd *lov = &obd->u.lov;
@@ -96,7 +97,8 @@ static void lov_putref(struct obd_device *obd)
list_add(&tgt->ltd_kill, &kill);
/* XXX - right now there is a dependency on ld_tgt_count
* being the maximum tgt index for computing the
- * mds_max_easize. So we can't shrink it. */
+ * mds_max_easize. So we can't shrink it.
+ */
lov_ost_pool_remove(&lov->lov_packed, i);
lov->lov_tgts[i] = NULL;
lov->lov_death_row--;
@@ -158,7 +160,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
if (activate) {
tgt_obd->obd_no_recov = 0;
/* FIXME this is probably supposed to be
- ptlrpc_set_import_active. Horrible naming. */
+ * ptlrpc_set_import_active. Horrible naming.
+ */
ptlrpc_activate_import(imp);
}
@@ -262,7 +265,7 @@ static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
osc_obd = class_exp2obd(tgt->ltd_exp);
CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
- obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL");
+ obd->obd_name, osc_obd ? osc_obd->obd_name : "NULL");
if (tgt->ltd_active) {
tgt->ltd_active = 0;
@@ -315,7 +318,8 @@ static int lov_disconnect(struct obd_export *exp)
}
/* Let's hold another reference so lov_del_obd doesn't spin through
- putref every time */
+ * putref every time
+ */
obd_getref(obd);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
@@ -358,7 +362,7 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
* LU-642, initially inactive OSC could miss the obd_connect,
* we make up for it here.
*/
- if (ev == OBD_NOTIFY_ACTIVATE && tgt->ltd_exp == NULL &&
+ if (ev == OBD_NOTIFY_ACTIVATE && !tgt->ltd_exp &&
obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
struct obd_uuid lov_osc_uuid = {"LOV_OSC_UUID"};
@@ -399,10 +403,9 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
CDEBUG(D_INFO, "OSC %s already %sactive!\n",
uuid->uuid, active ? "" : "in");
goto out;
- } else {
- CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
- obd_uuid2str(uuid), active ? "" : "in");
}
+ CDEBUG(D_CONFIG, "Marking OSC %s %sactive\n",
+ obd_uuid2str(uuid), active ? "" : "in");
lov->lov_tgts[index]->ltd_active = active;
if (active) {
@@ -481,7 +484,8 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
continue;
/* don't send sync event if target not
- * connected/activated */
+ * connected/activated
+ */
if (is_sync && !lov->lov_tgts[i]->ltd_active)
continue;
@@ -521,12 +525,12 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
&obd->obd_uuid);
- if (tgt_obd == NULL)
+ if (!tgt_obd)
return -EINVAL;
mutex_lock(&lov->lov_lock);
- if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
+ if ((index < lov->lov_tgt_size) && lov->lov_tgts[index]) {
tgt = lov->lov_tgts[index];
CERROR("UUID %s already assigned at LOV target index %d\n",
obd_uuid2str(&tgt->ltd_uuid), index);
@@ -543,7 +547,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
while (newsize < index + 1)
newsize <<= 1;
newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
- if (newtgts == NULL) {
+ if (!newtgts) {
mutex_unlock(&lov->lov_lock);
return -ENOMEM;
}
@@ -590,14 +594,15 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
mutex_unlock(&lov->lov_lock);
CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
- index, tgt->ltd_gen, lov->desc.ld_tgt_count);
+ index, tgt->ltd_gen, lov->desc.ld_tgt_count);
rc = obd_notify(obd, tgt_obd, OBD_NOTIFY_CREATE, &index);
if (lov->lov_connects == 0) {
/* lov_connect hasn't been called yet. We'll do the
- lov_connect_obd on this target when that fn first runs,
- because we don't know the connect flags yet. */
+ * lov_connect_obd on this target when that fn first runs,
+ * because we don't know the connect flags yet.
+ */
return 0;
}
@@ -613,11 +618,11 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
goto out;
}
- if (lov->lov_cache != NULL) {
+ if (lov->lov_cache) {
rc = obd_set_info_async(NULL, tgt->ltd_exp,
- sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
- sizeof(struct cl_client_cache), lov->lov_cache,
- NULL);
+ sizeof(KEY_CACHE_SET), KEY_CACHE_SET,
+ sizeof(struct cl_client_cache),
+ lov->lov_cache, NULL);
if (rc < 0)
goto out;
}
@@ -702,8 +707,9 @@ static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
kfree(tgt);
/* Manual cleanup - no cleanup logs to clean up the osc's. We must
- do it ourselves. And we can't do it from lov_cleanup,
- because we just lost our only reference to it. */
+ * do it ourselves. And we can't do it from lov_cleanup,
+ * because we just lost our only reference to it.
+ */
if (osc_obd)
class_manual_cleanup(osc_obd);
}
@@ -773,9 +779,9 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
if (desc->ld_magic != LOV_DESC_MAGIC) {
if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
- CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
- obd->obd_name, desc);
- lustre_swab_lov_desc(desc);
+ CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
+ obd->obd_name, desc);
+ lustre_swab_lov_desc(desc);
} else {
CERROR("%s: Bad lov desc magic: %#x\n",
obd->obd_name, desc->ld_magic);
@@ -859,7 +865,8 @@ static int lov_cleanup(struct obd_device *obd)
/* free pool structs */
CDEBUG(D_INFO, "delete pool %p\n", pool);
/* In the function below, .hs_keycmp resolves to
- * pool_hashkey_keycmp() */
+ * pool_hashkey_keycmp()
+ */
/* coverity[overrun-buffer-val] */
lov_pool_del(obd, pool->pool_name);
}
@@ -879,8 +886,9 @@ static int lov_cleanup(struct obd_device *obd)
if (lov->lov_tgts[i]->ltd_active ||
atomic_read(&lov->lov_refcount))
/* We should never get here - these
- should have been removed in the
- disconnect. */
+ * should have been removed in the
+ * disconnect.
+ */
CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
i, lov->lov_death_row,
atomic_read(&lov->lov_refcount));
@@ -981,7 +989,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
ost_idx = src_oa->o_nlink;
lsm = *ea;
- if (lsm == NULL) {
+ if (!lsm) {
rc = -EINVAL;
goto out;
}
@@ -1025,8 +1033,8 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
struct lov_obd *lov;
int rc = 0;
- LASSERT(ea != NULL);
- if (exp == NULL)
+ LASSERT(ea);
+ if (!exp)
return -EINVAL;
if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
@@ -1043,7 +1051,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
/* Recreate a specific object id at the given OST index */
if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
(src_oa->o_flags & OBD_FL_RECREATE_OBJS)) {
- rc = lov_recreate(exp, src_oa, ea, oti);
+ rc = lov_recreate(exp, src_oa, ea, oti);
}
obd_putref(exp->exp_obd);
@@ -1052,7 +1060,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
#define ASSERT_LSM_MAGIC(lsmp) \
do { \
- LASSERT((lsmp) != NULL); \
+ LASSERT((lsmp)); \
LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 || \
(lsmp)->lsm_magic == LOV_MAGIC_V3), \
"%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic); \
@@ -1065,7 +1073,6 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
struct lov_request_set *set;
struct obd_info oinfo;
struct lov_request *req;
- struct list_head *pos;
struct lov_obd *lov;
int rc = 0, err = 0;
@@ -1085,9 +1092,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
if (rc)
goto out;
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
+ list_for_each_entry(req, &set->set_list, rq_link) {
if (oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1105,10 +1110,9 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
}
}
- if (rc == 0) {
- LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+ if (rc == 0)
rc = lsm_op_find(lsm->lsm_magic)->lsm_destroy(lsm, oa, md_exp);
- }
+
err = lov_fini_destroy_set(set);
out:
obd_putref(exp->exp_obd);
@@ -1129,11 +1133,10 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
}
static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
- struct ptlrpc_request_set *rqset)
+ struct ptlrpc_request_set *rqset)
{
struct lov_request_set *lovset;
struct lov_obd *lov;
- struct list_head *pos;
struct lov_request *req;
int rc = 0, err;
@@ -1153,9 +1156,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- list_for_each(pos, &lovset->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
+ list_for_each_entry(req, &lovset->set_list, rq_link) {
CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
@@ -1174,7 +1175,7 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
if (!list_empty(&rqset->set_requests)) {
LASSERT(rc == 0);
- LASSERT(rqset->set_interpret == NULL);
+ LASSERT(!rqset->set_interpret);
rqset->set_interpret = lov_getattr_interpret;
rqset->set_arg = (void *)lovset;
return rc;
@@ -1199,14 +1200,14 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
}
/* If @oti is given, the request goes from MDS and responses from OSTs are not
- needed. Otherwise, a client is waiting for responses. */
+ * needed. Otherwise, a client is waiting for responses.
+ */
static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti,
struct ptlrpc_request_set *rqset)
{
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
@@ -1230,9 +1231,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
oinfo->oi_md->lsm_stripe_count,
oinfo->oi_md->lsm_stripe_size);
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
+ list_for_each_entry(req, &set->set_list, rq_link) {
if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
oti->oti_logcookies = set->set_cookies + req->rq_stripe;
@@ -1262,7 +1261,7 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
return rc ? rc : err;
}
- LASSERT(rqset->set_interpret == NULL);
+ LASSERT(!rqset->set_interpret);
rqset->set_interpret = lov_setattr_interpret;
rqset->set_arg = (void *)set;
@@ -1272,7 +1271,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
/* find any ldlm lock of the inode in lov
* return 0 not find
* 1 find one
- * < 0 error */
+ * < 0 error
+ */
static int lov_find_cbdata(struct obd_export *exp,
struct lov_stripe_md *lsm, ldlm_iterator_t it,
void *data)
@@ -1326,20 +1326,17 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
struct obd_device *obd = class_exp2obd(exp);
struct lov_request_set *set;
struct lov_request *req;
- struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
- LASSERT(oinfo != NULL);
- LASSERT(oinfo->oi_osfs != NULL);
+ LASSERT(oinfo->oi_osfs);
lov = &obd->u.lov;
rc = lov_prep_statfs_set(obd, oinfo, &set);
if (rc)
return rc;
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
+ list_for_each_entry(req, &set->set_list, rq_link) {
rc = obd_statfs_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
&req->rq_oi, max_age, rqset);
if (rc)
@@ -1355,7 +1352,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
return rc ? rc : err;
}
- LASSERT(rqset->set_interpret == NULL);
+ LASSERT(!rqset->set_interpret);
rqset->set_interpret = lov_statfs_interpret;
rqset->set_arg = (void *)set;
return 0;
@@ -1369,9 +1366,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
int rc = 0;
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
- * statfs requests */
+ * statfs requests
+ */
set = ptlrpc_prep_set();
- if (set == NULL)
+ if (!set)
return -ENOMEM;
oinfo.oi_osfs = osfs;
@@ -1385,7 +1383,7 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
}
static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
+ void *karg, void __user *uarg)
{
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
@@ -1416,11 +1414,13 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
- min((int) data->ioc_plen2,
- (int) sizeof(struct obd_uuid))))
+ min((int)data->ioc_plen2,
+ (int)sizeof(struct obd_uuid))))
return -EFAULT;
- flags = uarg ? *(__u32 *)uarg : 0;
+ memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
+ flags = flags & LL_STATFS_NODELAY ? OBD_STATFS_NODELAY : 0;
+
/* got statfs data */
rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
@@ -1428,8 +1428,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (rc)
return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
- min((int) data->ioc_plen1,
- (int) sizeof(stat_buf))))
+ min((int)data->ioc_plen1,
+ (int)sizeof(stat_buf))))
return -EFAULT;
break;
}
@@ -1501,7 +1501,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
&qctl->obd_uuid))
continue;
- if (tgt->ltd_exp == NULL)
+ if (!tgt->ltd_exp)
return -EINVAL;
break;
@@ -1543,14 +1543,15 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
continue;
/* ll_umount_begin() sets force flag but for lov, not
- * osc. Let's pass it through */
+ * osc. Let's pass it through
+ */
osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
osc_obd->obd_force = obddev->obd_force;
err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
len, karg, uarg);
- if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
+ if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
return err;
- } else if (err) {
+ if (err) {
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
D_IOCTL : D_WARNING,
@@ -1620,7 +1621,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
return -EINVAL;
/* If we have finished mapping on previous device, shift logical
- * offset to start of next device */
+ * offset to start of next device
+ */
if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
&lun_start, &lun_end)) != 0 &&
local_end < lun_end) {
@@ -1628,7 +1630,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
*start_stripe = stripe_no;
} else {
/* This is a special value to indicate that caller should
- * calculate offset in next stripe. */
+ * calculate offset in next stripe.
+ */
fm_end_offset = 0;
*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
}
@@ -1739,7 +1742,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
- if (fm_local == NULL) {
+ if (!fm_local) {
rc = -ENOMEM;
goto out;
}
@@ -1759,7 +1762,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
fm_end = fm_key->oa.o_size;
last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
- actual_start_stripe, &stripe_count);
+ actual_start_stripe,
+ &stripe_count);
fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
fm_end, &start_stripe);
@@ -1796,7 +1800,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
/* If this is a continuation FIEMAP call and we are on
* starting stripe then lun_start needs to be set to
- * fm_end_offset */
+ * fm_end_offset
+ */
if (fm_end_offset != 0 && cur_stripe == start_stripe)
lun_start = fm_end_offset;
@@ -1818,7 +1823,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
len_mapped_single_call = 0;
/* If the output buffer is very large and the objects have many
- * extents we may need to loop on a single OST repeatedly */
+ * extents we may need to loop on a single OST repeatedly
+ */
ost_eof = 0;
ost_done = 0;
do {
@@ -1874,7 +1880,8 @@ inactive_tgt:
if (ext_count == 0) {
ost_done = 1;
/* If last stripe has hole at the end,
- * then we need to return */
+ * then we need to return
+ */
if (cur_stripe_wrap == last_stripe) {
fiemap->fm_mapped_extents = 0;
goto finish;
@@ -1896,7 +1903,8 @@ inactive_tgt:
ost_done = 1;
/* Clear the EXTENT_LAST flag which can be present on
- * last extent */
+ * last extent
+ */
if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
lcl_fm_ext[ext_count - 1].fe_flags &=
~FIEMAP_EXTENT_LAST;
@@ -1925,7 +1933,8 @@ inactive_tgt:
finish:
/* Indicate that we are returning device offsets unless file just has
- * single stripe */
+ * single stripe
+ */
if (lsm->lsm_stripe_count > 1)
fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
@@ -1933,7 +1942,8 @@ finish:
goto skip_last_device_calc;
/* Check if we have reached the last stripe and whether mapping for that
- * stripe is done. */
+ * stripe is done.
+ */
if (cur_stripe_wrap == last_stripe) {
if (ost_done || ost_eof)
fiemap->fm_extents[current_extent - 1].fe_flags |=
@@ -1978,10 +1988,12 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
/* XXX This is another one of those bits that will need to
* change if we ever actually support nested LOVs. It uses
- * the lock's export to find out which stripe it is. */
+ * the lock's export to find out which stripe it is.
+ */
/* XXX - it's assumed all the locks for deleted OSTs have
* been cancelled. Also, the export for deleted OSTs will
- * be NULL and won't match the lock's export. */
+ * be NULL and won't match the lock's export.
+ */
for (i = 0; i < lsm->lsm_stripe_count; i++) {
loi = lsm->lsm_oinfo[i];
if (lov_oinfo_is_dummy(loi))
@@ -2070,7 +2082,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
unsigned next_id = 0, mds_con = 0;
incr = check_uuid = do_inactive = no_set = 0;
- if (set == NULL) {
+ if (!set) {
no_set = 1;
set = ptlrpc_prep_set();
if (!set)
@@ -2093,7 +2105,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
} else if (KEY_IS(KEY_MDS_CONN)) {
mds_con = 1;
} else if (KEY_IS(KEY_CACHE_SET)) {
- LASSERT(lov->lov_cache == NULL);
+ LASSERT(!lov->lov_cache);
lov->lov_cache = val;
do_inactive = 1;
}
@@ -2119,12 +2131,12 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
/* Only want a specific OSC */
if (mgi->uuid && !obd_uuid_equals(mgi->uuid,
- &tgt->ltd_uuid))
+ &tgt->ltd_uuid))
continue;
err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, sizeof(int),
- &mgi->group, set);
+ keylen, key, sizeof(int),
+ &mgi->group, set);
} else if (next_id) {
err = obd_set_info_async(env, tgt->ltd_exp,
keylen, key, vallen,
@@ -2136,7 +2148,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
continue;
err = obd_set_info_async(env, tgt->ltd_exp,
- keylen, key, vallen, val, set);
+ keylen, key, vallen, val, set);
}
if (!rc)
@@ -2187,7 +2199,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
oqctl->qc_cmd != Q_INITQUOTA &&
oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
oqctl->qc_cmd != Q_FINVALIDATE) {
- CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd);
+ CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
return -EFAULT;
}
@@ -2317,7 +2329,8 @@ static int __init lov_init(void)
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
+ * symbols from modules.
+ */
CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);
rc = lu_kmem_init(lov_caches);
@@ -2325,9 +2338,9 @@ static int __init lov_init(void)
return rc;
lov_oinfo_slab = kmem_cache_create("lov_oinfo",
- sizeof(struct lov_oinfo),
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (lov_oinfo_slab == NULL) {
+ sizeof(struct lov_oinfo),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!lov_oinfo_slab) {
lu_kmem_fini(lov_caches);
return -ENOMEM;
}
@@ -2353,7 +2366,7 @@ static void /*__exit*/ lov_exit(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
+MODULE_DESCRIPTION("Lustre Logical Object Volume");
MODULE_LICENSE("GPL");
MODULE_VERSION(LUSTRE_VERSION_STRING);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 3b79ebc8eccf..1f8ed95a6d89 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -59,7 +59,7 @@ struct lov_layout_operations {
const struct cl_object_conf *conf,
union lov_layout_state *state);
int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state);
+ union lov_layout_state *state);
void (*llo_fini)(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state);
void (*llo_install)(const struct lu_env *env, struct lov_object *lov,
@@ -67,7 +67,7 @@ struct lov_layout_operations {
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, struct page *vmpage);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
@@ -135,7 +135,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
* Do not leave the object in cache to avoid accessing
* freed memory. This is because osc_object is referring to
* lov_oinfo of lsm_stripe_data which will be freed due to
- * this failure. */
+ * this failure.
+ */
cl_object_kill(env, stripe);
cl_object_put(env, stripe);
return -EIO;
@@ -154,7 +155,7 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
/* reuse ->coh_attr_guard to protect coh_parent change */
spin_lock(&subhdr->coh_attr_guard);
parent = subhdr->coh_parent;
- if (parent == NULL) {
+ if (!parent) {
subhdr->coh_parent = hdr;
spin_unlock(&subhdr->coh_attr_guard);
subhdr->coh_nesting = hdr->coh_nesting + 1;
@@ -170,11 +171,12 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
spin_unlock(&subhdr->coh_attr_guard);
old_obj = lu_object_locate(&parent->coh_lu, &lov_device_type);
- LASSERT(old_obj != NULL);
+ LASSERT(old_obj);
old_lov = cl2lov(lu2cl(old_obj));
if (old_lov->lo_layout_invalid) {
/* the object's layout has already changed but isn't
- * refreshed */
+ * refreshed
+ */
lu_object_unhash(env, &stripe->co_lu);
result = -EAGAIN;
} else {
@@ -212,14 +214,14 @@ static int lov_init_raid0(const struct lu_env *env,
LOV_MAGIC_V1, LOV_MAGIC_V3, lsm->lsm_magic);
}
- LASSERT(lov->lo_lsm == NULL);
+ LASSERT(!lov->lo_lsm);
lov->lo_lsm = lsm_addref(lsm);
r0->lo_nr = lsm->lsm_stripe_count;
LASSERT(r0->lo_nr <= lov_targets_nr(dev));
r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
GFP_NOFS);
- if (r0->lo_sub != NULL) {
+ if (r0->lo_sub) {
result = 0;
subconf->coc_inode = conf->coc_inode;
spin_lock_init(&r0->lo_sub_lock);
@@ -241,9 +243,10 @@ static int lov_init_raid0(const struct lu_env *env,
subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
subconf->u.coc_oinfo = oinfo;
- LASSERTF(subdev != NULL, "not init ost %d\n", ost_idx);
+ LASSERTF(subdev, "not init ost %d\n", ost_idx);
/* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp() */
+ * lu_obj_hop_keycmp()
+ */
/* coverity[overrun-buffer-val] */
stripe = lov_sub_find(env, subdev, ofid, subconf);
if (!IS_ERR(stripe)) {
@@ -263,15 +266,15 @@ out:
}
static int lov_init_released(const struct lu_env *env,
- struct lov_device *dev, struct lov_object *lov,
- const struct cl_object_conf *conf,
- union lov_layout_state *state)
+ struct lov_device *dev, struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
{
struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
- LASSERT(lsm != NULL);
+ LASSERT(lsm);
LASSERT(lsm_is_released(lsm));
- LASSERT(lov->lo_lsm == NULL);
+ LASSERT(!lov->lo_lsm);
lov->lo_lsm = lsm_addref(lsm);
return 0;
@@ -310,7 +313,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
cl_object_put(env, sub);
/* ... wait until it is actually destroyed---sub-object clears its
- * ->lo_sub[] slot in lovsub_object_fini() */
+ * ->lo_sub[] slot in lovsub_object_fini()
+ */
if (r0->lo_sub[idx] == los) {
waiter = &lov_env_info(env)->lti_waiter;
init_waitqueue_entry(waiter, current);
@@ -318,7 +322,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
set_current_state(TASK_UNINTERRUPTIBLE);
while (1) {
/* this wait-queue is signaled at the end of
- * lu_object_free(). */
+ * lu_object_free().
+ */
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock(&r0->lo_sub_lock);
if (r0->lo_sub[idx] == los) {
@@ -332,7 +337,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
}
remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
}
- LASSERT(r0->lo_sub[idx] == NULL);
+ LASSERT(!r0->lo_sub[idx]);
}
static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
@@ -345,11 +350,11 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
dump_lsm(D_INODE, lsm);
lov_layout_wait(env, lov);
- if (r0->lo_sub != NULL) {
+ if (r0->lo_sub) {
for (i = 0; i < r0->lo_nr; ++i) {
struct lovsub_object *los = r0->lo_sub[i];
- if (los != NULL) {
+ if (los) {
cl_locks_prune(env, &los->lso_cl, 1);
/*
* If top-level object is to be evicted from
@@ -374,7 +379,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
{
struct lov_layout_raid0 *r0 = &state->raid0;
- if (r0->lo_sub != NULL) {
+ if (r0->lo_sub) {
kvfree(r0->lo_sub);
r0->lo_sub = NULL;
}
@@ -384,7 +389,7 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
}
static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
- union lov_layout_state *state)
+ union lov_layout_state *state)
{
dump_lsm(D_INODE, lov->lo_lsm);
lov_free_memmd(&lov->lo_lsm);
@@ -406,13 +411,13 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
int i;
(*p)(env, cookie, "stripes: %d, %s, lsm{%p 0x%08X %d %u %u}:\n",
- r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
+ r0->lo_nr, lov->lo_layout_invalid ? "invalid" : "valid", lsm,
+ lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_stripe_count, lsm->lsm_layout_gen);
for (i = 0; i < r0->lo_nr; ++i) {
struct lu_object *sub;
- if (r0->lo_sub[i] != NULL) {
+ if (r0->lo_sub[i]) {
sub = lovsub2lu(r0->lo_sub[i]);
lu_object_print(env, cookie, p, sub);
} else {
@@ -423,16 +428,16 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
}
static int lov_print_released(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
+ lu_printer_t p, const struct lu_object *o)
{
struct lov_object *lov = lu2lov(o);
struct lov_stripe_md *lsm = lov->lo_lsm;
(*p)(env, cookie,
- "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
- lov->lo_layout_invalid ? "invalid" : "valid", lsm,
- lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
- lsm->lsm_stripe_count, lsm->lsm_layout_gen);
+ "released: %s, lsm{%p 0x%08X %d %u %u}:\n",
+ lov->lo_layout_invalid ? "invalid" : "valid", lsm,
+ lsm->lsm_magic, atomic_read(&lsm->lsm_refc),
+ lsm->lsm_stripe_count, lsm->lsm_layout_gen);
return 0;
}
@@ -465,7 +470,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
* context, and this function is called in ccc_lock_state(), it will
* hit this assertion.
* Anyway, it's still okay to call attr_get w/o type guard as layout
- * can't go if locks exist. */
+ * can't go if locks exist.
+ */
/* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */
if (!r0->lo_attr_valid) {
@@ -475,7 +481,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
memset(lvb, 0, sizeof(*lvb));
/* XXX: timestamps can be negative by sanity:test_39m,
- * how can it be? */
+ * how can it be?
+ */
lvb->lvb_atime = LLONG_MIN;
lvb->lvb_ctime = LLONG_MIN;
lvb->lvb_mtime = LLONG_MIN;
@@ -569,7 +576,7 @@ static const struct lov_layout_operations lov_dispatch[] = {
*/
static enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
{
- if (lsm == NULL)
+ if (!lsm)
return LLT_EMPTY;
if (lsm_is_released(lsm))
return LLT_RELEASED;
@@ -624,7 +631,7 @@ static void lov_conf_lock(struct lov_object *lov)
{
LASSERT(lov->lo_owner != current);
down_write(&lov->lo_type_guard);
- LASSERT(lov->lo_owner == NULL);
+ LASSERT(!lov->lo_owner);
lov->lo_owner = current;
}
@@ -639,9 +646,9 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
struct l_wait_info lwi = { 0 };
while (atomic_read(&lov->lo_active_ios) > 0) {
- CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
- PFID(lu_object_fid(lov2lu(lov))),
- atomic_read(&lov->lo_active_ios));
+ CDEBUG(D_INODE, "file:" DFID " wait for active IO, now: %d.\n",
+ PFID(lu_object_fid(lov2lu(lov))),
+ atomic_read(&lov->lo_active_ios));
l_wait_event(lov->lo_waitq,
atomic_read(&lov->lo_active_ios) == 0, &lwi);
@@ -666,7 +673,7 @@ static int lov_layout_change(const struct lu_env *unused,
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
- if (conf->u.coc_md != NULL)
+ if (conf->u.coc_md)
llt = lov_type(conf->u.coc_md->lsm);
LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
@@ -689,7 +696,7 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops->llo_fini(env, lov, &lov->u);
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
- LASSERT(hdr->coh_tree.rnode == NULL);
+ LASSERT(!hdr->coh_tree.rnode);
LASSERT(hdr->coh_pages == 0);
lov->lo_type = LLT_EMPTY;
@@ -767,10 +774,10 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
LASSERT(conf->coc_opc == OBJECT_CONF_SET);
- if (conf->u.coc_md != NULL)
+ if (conf->u.coc_md)
lsm = conf->u.coc_md->lsm;
- if ((lsm == NULL && lov->lo_lsm == NULL) ||
- ((lsm != NULL && lov->lo_lsm != NULL) &&
+ if ((!lsm && !lov->lo_lsm) ||
+ ((lsm && lov->lo_lsm) &&
(lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
(lov->lo_lsm->lsm_pattern == lsm->lsm_pattern))) {
/* same version of layout */
@@ -818,7 +825,7 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
}
int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
llo_page_init, env, obj, page, vmpage);
@@ -845,7 +852,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
/* do not take lock, as this function is called under a
- * spin-lock. Layout is protected from changing by ongoing IO. */
+ * spin-lock. Layout is protected from changing by ongoing IO.
+ */
return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
}
@@ -891,8 +899,8 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
struct lov_object *lov;
struct lu_object *obj;
- lov = kmem_cache_alloc(lov_object_kmem, GFP_NOFS | __GFP_ZERO);
- if (lov != NULL) {
+ lov = kmem_cache_zalloc(lov_object_kmem, GFP_NOFS);
+ if (lov) {
obj = lov2lu(lov);
lu_object_init(obj, NULL, dev);
lov->lo_cl.co_ops = &lov_ops;
@@ -913,11 +921,11 @@ static struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
struct lov_stripe_md *lsm = NULL;
lov_conf_freeze(lov);
- if (lov->lo_lsm != NULL) {
+ if (lov->lo_lsm) {
lsm = lsm_addref(lov->lo_lsm);
CDEBUG(D_INODE, "lsm %p addref %d/%d by %p.\n",
- lsm, atomic_read(&lsm->lsm_refc),
- lov->lo_layout_invalid, current);
+ lsm, atomic_read(&lsm->lsm_refc),
+ lov->lo_layout_invalid, current);
}
lov_conf_thaw(lov);
return lsm;
@@ -928,12 +936,12 @@ struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
struct lu_object *luobj;
struct lov_stripe_md *lsm = NULL;
- if (clobj == NULL)
+ if (!clobj)
return NULL;
luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
&lov_device_type);
- if (luobj != NULL)
+ if (luobj)
lsm = lov_lsm_addref(lu2lov(luobj));
return lsm;
}
@@ -941,7 +949,7 @@ EXPORT_SYMBOL(lov_lsm_get);
void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
{
- if (lsm != NULL)
+ if (lsm)
lov_free_memmd(&lsm);
}
EXPORT_SYMBOL(lov_lsm_put);
@@ -953,7 +961,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob)
luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
&lov_device_type);
- if (luobj != NULL) {
+ if (luobj) {
struct lov_object *lov = lu2lov(luobj);
lov_conf_freeze(lov);
@@ -963,7 +971,6 @@ int lov_read_and_clear_async_rc(struct cl_object *clob)
int i;
lsm = lov->lo_lsm;
- LASSERT(lsm != NULL);
for (i = 0; i < lsm->lsm_stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index aa520aa76e09..ae83eb0f6f36 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -43,8 +43,7 @@
#include "lov_internal.h"
/* compute object size given "stripeno" and the ost size */
-u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
- int stripeno)
+u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
{
unsigned long ssize = lsm->lsm_stripe_size;
unsigned long stripe_size;
@@ -55,7 +54,6 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
if (ost_size == 0)
return 0;
- LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
/* lov_do_div64(a, b) returns a % b, and a = a / b */
@@ -115,7 +113,8 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size,
* this function returns < 0 when the offset was "before" the stripe and
* was moved forward to the start of the stripe in question; 0 when it
* falls in the stripe and no shifting was done; > 0 when the offset
- * was outside the stripe and was pulled back to its final byte. */
+ * was outside the stripe and was pulled back to its final byte.
+ */
int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
int stripeno, u64 *obdoff)
{
@@ -129,8 +128,6 @@ int lov_stripe_offset(struct lov_stripe_md *lsm, u64 lov_off,
return 0;
}
- LASSERT(lsm_op_find(magic) != NULL);
-
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &lov_off,
&swidth);
@@ -183,7 +180,6 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
if (file_size == OBD_OBJECT_EOF)
return OBD_OBJECT_EOF;
- LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, &file_size,
&swidth);
@@ -213,7 +209,8 @@ u64 lov_size_to_stripe(struct lov_stripe_md *lsm, u64 file_size,
/* given an extent in an lov and a stripe, calculate the extent of the stripe
* that is contained within the lov extent. this returns true if the given
- * stripe does intersect with the lov extent. */
+ * stripe does intersect with the lov extent.
+ */
int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end, u64 *obd_start, u64 *obd_end)
{
@@ -227,7 +224,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
/* this stripe doesn't intersect the file extent when neither
* start or the end intersected the stripe and obd_start and
- * obd_end got rounded up to the save value. */
+ * obd_end got rounded up to the save value.
+ */
if (start_side != 0 && end_side != 0 && *obd_start == *obd_end)
return 0;
@@ -238,7 +236,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
* in the wrong direction and touch it up.
* interestingly, this can't underflow since end must be > start
* if we passed through the previous check.
- * (should we assert for that somewhere?) */
+ * (should we assert for that somewhere?)
+ */
if (end_side != 0)
(*obd_end)--;
@@ -252,7 +251,6 @@ int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off)
u64 stripe_off, swidth;
int magic = lsm->lsm_magic;
- LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_offset(lsm, NULL, &lov_off, &swidth);
stripe_off = lov_do_div64(lov_off, swidth);
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 6b2d1007192b..3925633a99ec 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -134,17 +134,18 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
if ((lmm_magic != LOV_MAGIC_V1) &&
(lmm_magic != LOV_MAGIC_V3)) {
CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
- lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
return -EINVAL;
}
if (lsm) {
/* If we are just sizing the EA, limit the stripe count
- * to the actual number of OSTs in this filesystem. */
+ * to the actual number of OSTs in this filesystem.
+ */
if (!lmmp) {
stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
+ lsm->lsm_stripe_count);
lsm->lsm_stripe_count = stripe_count;
} else if (!lsm_is_released(lsm)) {
stripe_count = lsm->lsm_stripe_count;
@@ -155,7 +156,8 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
/* No need to allocate more than maximum supported stripes.
* Anyway, this is pretty inaccurate since ld_tgt_count now
* represents max index and we should rely on the actual number
- * of OSTs instead */
+ * of OSTs instead
+ */
stripe_count = lov_mds_md_max_stripe_count(
lov->lov_ocd.ocd_max_easize, lmm_magic);
@@ -183,7 +185,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
return -ENOMEM;
}
- CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
+ CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
lmm_magic, lmm_size);
lmmv1 = *lmmp;
@@ -241,7 +243,8 @@ __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
stripe_count = 1;
/* stripe count is based on whether ldiskfs can handle
- * larger EA sizes */
+ * larger EA sizes
+ */
if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
lov->lov_ocd.ocd_max_easize)
max_stripes = lov_mds_md_max_stripe_count(
@@ -257,14 +260,15 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
{
int rc;
- if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
+ if (!lsm_op_find(le32_to_cpu(*(__u32 *)lmm))) {
CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
CERROR("%*phN\n", lmm_bytes, lmm);
return -EINVAL;
}
rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
- lmm_bytes, stripe_count);
+ lmm_bytes,
+ stripe_count);
return rc;
}
@@ -306,10 +310,9 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
*lsmp = NULL;
LASSERT(atomic_read(&lsm->lsm_refc) > 0);
refc = atomic_dec_return(&lsm->lsm_refc);
- if (refc == 0) {
- LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+ if (refc == 0)
lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
- }
+
return refc;
}
@@ -359,7 +362,6 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
if (!lmm)
return lsm_size;
- LASSERT(lsm_op_find(magic) != NULL);
rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
if (rc) {
lov_free_memmd(lsmp);
@@ -376,7 +378,7 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
* lmm_magic must be LOV_USER_MAGIC.
*/
int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
- struct lov_user_md *lump)
+ struct lov_user_md __user *lump)
{
/*
* XXX huge struct allocated on stack.
@@ -399,13 +401,15 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
set_fs(KERNEL_DS);
/* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3) */
+ * lmm_stripe_count, (the header part is common to v1 and v3)
+ */
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
goto out_set;
- } else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
- (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
+ }
+ if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3)) {
rc = -EINVAL;
goto out_set;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 037ae91b74e7..fdcaf8047ad8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -57,7 +57,7 @@ static int lov_page_invariant(const struct cl_page_slice *slice)
const struct cl_page *page = slice->cpl_page;
const struct cl_page *sub = lov_sub_page(slice);
- return ergo(sub != NULL,
+ return ergo(sub,
page->cp_child == sub &&
sub->cp_parent == page &&
page->cp_state == sub->cp_state);
@@ -70,7 +70,7 @@ static void lov_page_fini(const struct lu_env *env,
LINVRNT(lov_page_invariant(slice));
- if (sub != NULL) {
+ if (sub) {
LASSERT(sub->cp_state == CPS_FREEING);
lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
sub->cp_parent = NULL;
@@ -151,7 +151,7 @@ static const struct cl_page_operations lov_page_ops = {
static void lov_empty_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- LASSERT(slice->cpl_page->cp_child == NULL);
+ LASSERT(!slice->cpl_page->cp_child);
}
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
@@ -172,8 +172,7 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
offset = cl_offset(obj, page->cp_index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
LASSERT(stripe < r0->lo_nr);
- rc = lov_stripe_offset(loo->lo_lsm, offset, stripe,
- &suboff);
+ rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
LASSERT(rc == 0);
lpg->lps_invalid = 1;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index b43ce6cd64c2..9ae1d6f42d6e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -64,7 +64,7 @@ void lov_pool_putref(struct pool_desc *pool)
if (atomic_dec_and_test(&pool->pool_refcount)) {
LASSERT(hlist_unhashed(&pool->pool_hash));
LASSERT(list_empty(&pool->pool_list));
- LASSERT(pool->pool_debugfs_entry == NULL);
+ LASSERT(!pool->pool_debugfs_entry);
lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
kfree(pool);
@@ -152,9 +152,8 @@ struct cfs_hash_ops pool_hash_operations = {
};
-/* ifdef needed for liblustre support */
/*
- * pool /proc seq_file methods
+ * pool debugfs seq_file methods
*/
/*
* iterator is used to go through the target pool entries
@@ -174,7 +173,7 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
struct pool_iterator *iter = (struct pool_iterator *)s->private;
int prev_idx;
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
+ LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
/* test if end of file */
if (*pos >= pool_tgt_count(iter->pool))
@@ -204,7 +203,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos)
if ((pool_tgt_count(pool) == 0) ||
(*pos >= pool_tgt_count(pool))) {
/* iter is not created, so stop() has no way to
- * find pool to dec ref */
+ * find pool to dec ref
+ */
lov_pool_putref(pool);
return NULL;
}
@@ -217,7 +217,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos)
iter->idx = 0;
/* we use seq_file private field to memorized iterator so
- * we can free it at stop() */
+ * we can free it at stop()
+ */
/* /!\ do not forget to restore it to pool before freeing it */
s->private = iter;
if (*pos > 0) {
@@ -226,8 +227,8 @@ static void *pool_proc_start(struct seq_file *s, loff_t *pos)
i = 0;
do {
- ptr = pool_proc_next(s, &iter, &i);
- } while ((i < *pos) && (ptr != NULL));
+ ptr = pool_proc_next(s, &iter, &i);
+ } while ((i < *pos) && ptr);
return ptr;
}
return iter;
@@ -239,15 +240,16 @@ static void pool_proc_stop(struct seq_file *s, void *v)
/* in some cases stop() method is called 2 times, without
* calling start() method (see seq_read() from fs/seq_file.c)
- * we have to free only if s->private is an iterator */
+ * we have to free only if s->private is an iterator
+ */
if ((iter) && (iter->magic == POOL_IT_MAGIC)) {
/* we restore s->private so next call to pool_proc_start()
- * will work */
+ * will work
+ */
s->private = iter->pool;
lov_pool_putref(iter->pool);
kfree(iter);
}
- return;
}
static int pool_proc_show(struct seq_file *s, void *v)
@@ -255,8 +257,8 @@ static int pool_proc_show(struct seq_file *s, void *v)
struct pool_iterator *iter = (struct pool_iterator *)v;
struct lov_tgt_desc *tgt;
- LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X", iter->magic);
- LASSERT(iter->pool != NULL);
+ LASSERTF(iter->magic == POOL_IT_MAGIC, "%08X\n", iter->magic);
+ LASSERT(iter->pool);
LASSERT(iter->idx <= pool_tgt_count(iter->pool));
down_read(&pool_tgt_rw_sem(iter->pool));
@@ -305,7 +307,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
init_rwsem(&op->op_rw_sem);
op->op_size = count;
op->op_array = kcalloc(op->op_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (op->op_array == NULL) {
+ if (!op->op_array) {
op->op_size = 0;
return -ENOMEM;
}
@@ -325,7 +327,7 @@ int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
new_size = max(min_count, 2 * op->op_size);
new = kcalloc(new_size, sizeof(op->op_array[0]), GFP_NOFS);
- if (new == NULL)
+ if (!new)
return -ENOMEM;
/* copy old array to new one */
@@ -429,8 +431,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
INIT_HLIST_NODE(&new_pool->pool_hash);
- /* we need this assert seq_file is not implemented for liblustre */
- /* get ref for /proc file */
+ /* get ref for debugfs file */
lov_pool_getref(new_pool);
new_pool->pool_debugfs_entry = ldebugfs_add_simple(
lov->lov_pool_debugfs_entry,
@@ -443,7 +444,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
lov_pool_putref(new_pool);
}
CDEBUG(D_INFO, "pool %p - proc %p\n",
- new_pool, new_pool->pool_debugfs_entry);
+ new_pool, new_pool->pool_debugfs_entry);
spin_lock(&obd->obd_dev_lock);
list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
@@ -487,7 +488,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
/* lookup and kill hash reference */
pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
+ if (!pool)
return -ENOENT;
if (!IS_ERR_OR_NULL(pool->pool_debugfs_entry)) {
@@ -518,7 +519,7 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
lov = &(obd->u.lov);
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
+ if (!pool)
return -ENOENT;
obd_str2uuid(&ost_uuid, ostname);
@@ -564,7 +565,7 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
lov = &(obd->u.lov);
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
+ if (!pool)
return -ENOENT;
obd_str2uuid(&ost_uuid, ostname);
@@ -632,12 +633,12 @@ struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
pool = NULL;
if (poolname[0] != '\0') {
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (pool == NULL)
+ if (!pool)
CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
poolname);
- if ((pool != NULL) && (pool_tgt_count(pool) == 0)) {
+ if (pool && (pool_tgt_count(pool) == 0)) {
CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
- poolname);
+ poolname);
/* pool is ignored, so we remove ref on it */
lov_pool_putref(pool);
pool = NULL;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 42deda71f577..7178a02d6267 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -156,7 +156,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
tgt = lov->lov_tgts[ost_idx];
- if (unlikely(tgt == NULL)) {
+ if (unlikely(!tgt)) {
rc = 0;
goto out;
}
@@ -178,7 +178,7 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
cfs_time_seconds(1), NULL, NULL);
rc = l_wait_event(waitq, lov_check_set(lov, ost_idx), &lwi);
- if (tgt != NULL && tgt->ltd_active)
+ if (tgt->ltd_active)
return 1;
return 0;
@@ -190,28 +190,23 @@ out:
static int common_attr_done(struct lov_request_set *set)
{
- struct list_head *pos;
struct lov_request *req;
struct obdo *tmp_oa;
int rc = 0, attrset = 0;
- LASSERT(set->set_oi != NULL);
-
- if (set->set_oi->oi_oa == NULL)
+ if (!set->set_oi->oi_oa)
return 0;
if (!atomic_read(&set->set_success))
return -EIO;
- tmp_oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
- if (tmp_oa == NULL) {
+ tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!tmp_oa) {
rc = -ENOMEM;
goto out;
}
- list_for_each(pos, &set->set_list) {
- req = list_entry(pos, struct lov_request, rq_link);
-
+ list_for_each_entry(req, &set->set_list, rq_link) {
if (!req->rq_complete || req->rq_rc)
continue;
if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */
@@ -227,7 +222,8 @@ static int common_attr_done(struct lov_request_set *set)
if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
(set->set_oi->oi_md->lsm_stripe_count != attrset)) {
/* When we take attributes of some epoch, we require all the
- * ost to be active. */
+ * ost to be active.
+ */
CERROR("Not all the stripes had valid attrs\n");
rc = -EIO;
goto out;
@@ -246,7 +242,7 @@ int lov_fini_getattr_set(struct lov_request_set *set)
{
int rc = 0;
- if (set == NULL)
+ if (!set)
return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes))
@@ -258,7 +254,8 @@ int lov_fini_getattr_set(struct lov_request_set *set)
}
/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_getattr_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
@@ -310,9 +307,8 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
- if (req->rq_oi.oi_oa == NULL) {
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
goto out_set;
@@ -337,7 +333,7 @@ out_set:
int lov_fini_destroy_set(struct lov_request_set *set)
{
- if (set == NULL)
+ if (!set)
return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
@@ -368,7 +364,7 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
set->set_oi->oi_md = lsm;
set->set_oi->oi_oa = src_oa;
set->set_oti = oti;
- if (oti != NULL && src_oa->o_valid & OBD_MD_FLCOOKIE)
+ if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
for (i = 0; i < lsm->lsm_stripe_count; i++) {
@@ -393,9 +389,8 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
- if (req->rq_oi.oi_oa == NULL) {
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
goto out_set;
@@ -419,7 +414,7 @@ int lov_fini_setattr_set(struct lov_request_set *set)
{
int rc = 0;
- if (set == NULL)
+ if (!set)
return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
@@ -460,7 +455,8 @@ int lov_update_setattr_set(struct lov_request_set *set,
}
/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_setattr_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
@@ -486,7 +482,7 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
set->set_exp = exp;
set->set_oti = oti;
set->set_oi = oinfo;
- if (oti != NULL && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
+ if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
@@ -509,9 +505,8 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
req->rq_stripe = i;
req->rq_idx = loi->loi_ost_idx;
- req->rq_oi.oi_oa = kmem_cache_alloc(obdo_cachep,
- GFP_NOFS | __GFP_ZERO);
- if (req->rq_oi.oi_oa == NULL) {
+ req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!req->rq_oi.oi_oa) {
kfree(req);
rc = -ENOMEM;
goto out_set;
@@ -581,7 +576,7 @@ int lov_fini_statfs_set(struct lov_request_set *set)
{
int rc = 0;
- if (set == NULL)
+ if (!set)
return 0;
if (atomic_read(&set->set_completes)) {
@@ -648,7 +643,8 @@ static void lov_update_statfs(struct obd_statfs *osfs,
}
/* The callback for osc_statfs_async that finalizes a request info when a
- * response is received. */
+ * response is received.
+ */
static int cb_statfs_update(void *cookie, int rc)
{
struct obd_info *oinfo = cookie;
@@ -668,7 +664,8 @@ static int cb_statfs_update(void *cookie, int rc)
lov_sfs = oinfo->oi_osfs;
success = atomic_read(&set->set_success);
/* XXX: the same is done in lov_update_common_set, however
- lovset->set_exp is not initialized. */
+ * lovset->set_exp is not initialized.
+ */
lov_update_set(set, lovreq, rc);
if (rc)
goto out;
@@ -718,7 +715,7 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
struct lov_request *req;
- if (lov->lov_tgts[i] == NULL ||
+ if (!lov->lov_tgts[i] ||
(!lov_check_and_wait_active(lov, i) &&
(oinfo->oi_flags & OBD_STATFS_NODELAY))) {
CDEBUG(D_HA, "lov idx %d inactive\n", i);
@@ -726,7 +723,8 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
}
/* skip targets that have been explicitly disabled by the
- * administrator */
+ * administrator
+ */
if (!lov->lov_tgts[i]->ltd_exp) {
CDEBUG(D_HA, "lov idx %d administratively disabled\n", i);
continue;
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index f1795c3e2db5..c335c020f4f4 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -101,7 +101,6 @@ static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
next->ld_site = d->ld_site;
ldt = next->ld_type;
- LASSERT(ldt != NULL);
rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
if (rc) {
next->ld_site = NULL;
@@ -148,8 +147,8 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
struct lovsub_req *lsr;
int result;
- lsr = kmem_cache_alloc(lovsub_req_kmem, GFP_NOFS | __GFP_ZERO);
- if (lsr != NULL) {
+ lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
+ if (lsr) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
} else
@@ -175,7 +174,7 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
struct lovsub_device *lsd;
lsd = kzalloc(sizeof(*lsd), GFP_NOFS);
- if (lsd != NULL) {
+ if (lsd) {
int result;
result = cl_device_init(&lsd->acid_cl, t);
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 1a3e30a14895..3bb0c9068a90 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -148,7 +148,8 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
{
pgoff_t size; /* stripe size in pages */
pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes */
+ * "other" stripes
+ */
pgoff_t start;
pgoff_t end;
@@ -284,7 +285,8 @@ static int lovsub_lock_delete_one(const struct lu_env *env,
switch (parent->cll_state) {
case CLS_ENQUEUED:
/* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal */
+ * interrupted by signal
+ */
LASSERT(parent->cll_flags & CLF_CANCELLED);
break;
case CLS_QUEUING:
@@ -402,7 +404,7 @@ static void lovsub_lock_delete(const struct lu_env *env,
restart = 0;
list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
+ &sub->lss_parents, lll_list) {
lov = scan->lll_super;
subdata = &lov->lls_sub[scan->lll_idx];
lovsub_parent_lock(env, lov);
@@ -429,7 +431,7 @@ static int lovsub_lock_print(const struct lu_env *env, void *cookie,
list_for_each_entry(scan, &sub->lss_parents, lll_list) {
lov = scan->lll_super;
(*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
- if (lov != NULL)
+ if (lov)
cl_lock_descr_print(env, cookie, p,
&lov->lls_cl.cls_lock->cll_descr);
(*p)(env, cookie, "] ");
@@ -453,8 +455,8 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
struct lovsub_lock *lsk;
int result;
- lsk = kmem_cache_alloc(lovsub_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (lsk != NULL) {
+ lsk = kmem_cache_zalloc(lovsub_lock_kmem, GFP_NOFS);
+ if (lsk) {
INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
result = 0;
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 5ba5ee1b8681..6c5430d938d0 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -63,7 +63,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
under = &dev->acid_next->cd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below != NULL) {
+ if (below) {
lu_object_add(obj, below);
cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
result = 0;
@@ -143,8 +143,8 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
struct lovsub_object *los;
struct lu_object *obj;
- los = kmem_cache_alloc(lovsub_object_kmem, GFP_NOFS | __GFP_ZERO);
- if (los != NULL) {
+ los = kmem_cache_zalloc(lovsub_object_kmem, GFP_NOFS);
+ if (los) {
struct cl_object_header *hdr;
obj = lovsub2lu(los);
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 3f00ce9677b7..2d945532b78e 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -60,7 +60,7 @@ static const struct cl_page_operations lovsub_page_ops = {
};
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *unused)
+ struct cl_page *page, struct page *unused)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 337241d84980..0dcb6b6a7782 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -46,22 +46,22 @@ static int lov_stripesize_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lov_desc *desc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
seq_printf(m, "%llu\n", desc->ld_default_stripe_size);
return 0;
}
static ssize_t lov_stripesize_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
__u64 val;
int rc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
rc = lprocfs_write_u64_helper(buffer, count, &val);
if (rc)
@@ -79,22 +79,22 @@ static int lov_stripeoffset_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lov_desc *desc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
seq_printf(m, "%llu\n", desc->ld_default_stripe_offset);
return 0;
}
static ssize_t lov_stripeoffset_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
__u64 val;
int rc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
rc = lprocfs_write_u64_helper(buffer, count, &val);
if (rc)
@@ -111,21 +111,21 @@ static int lov_stripetype_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lov_desc *desc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
seq_printf(m, "%u\n", desc->ld_pattern);
return 0;
}
static ssize_t lov_stripetype_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
int val, rc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
@@ -143,21 +143,21 @@ static int lov_stripecount_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lov_desc *desc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1);
return 0;
}
static ssize_t lov_stripecount_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct lov_desc *desc;
int val, rc;
- LASSERT(dev != NULL);
+ LASSERT(dev);
desc = &dev->u.lov.desc;
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
@@ -199,7 +199,7 @@ static int lov_desc_uuid_seq_show(struct seq_file *m, void *v)
struct obd_device *dev = (struct obd_device *)m->private;
struct lov_obd *lov;
- LASSERT(dev != NULL);
+ LASSERT(dev);
lov = &dev->u.lov;
seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid);
return 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index 3d2997a161b6..c5519aeb0d8a 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -53,7 +53,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
struct md_op_data *data, int ea_size);
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
- void *ea, int ealen, void *ea2, int ea2len);
+ void *ea, int ealen, void *ea2, int ea2len);
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
const void *data, int datalen, __u32 mode, __u32 uid,
__u32 gid, cfs_cap_t capability, __u64 rdev);
@@ -90,7 +90,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request **req, __u64 extra_lock_flags);
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
+ struct list_head *cancels, enum ldlm_mode mode,
__u64 bits);
/* mdc/mdc_request.c */
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
@@ -119,8 +119,8 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request);
int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- ldlm_cancel_flags_t flags, void *opaque);
+ ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags, void *opaque);
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
struct lu_fid *fid, __u64 *bits);
@@ -129,10 +129,10 @@ int mdc_intent_getattr_async(struct obd_export *exp,
struct md_enqueue_info *minfo,
struct ldlm_enqueue_info *einfo);
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh);
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, enum ldlm_type type,
+ ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ struct lustre_handle *lockh);
static inline int mdc_prep_elc_req(struct obd_export *exp,
struct ptlrpc_request *req, int opc,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index 7218532ffea3..b3bfdcb73670 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -41,8 +41,6 @@
static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
{
- LASSERT(b != NULL);
-
b->suppgid = suppgid;
b->uid = from_kuid(&init_user_ns, current_uid());
b->gid = from_kgid(&init_user_ns, current_gid());
@@ -83,7 +81,6 @@ void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
{
struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
&RMF_MDT_BODY);
- LASSERT(b != NULL);
b->valid = valid;
b->eadatasize = ea_size;
b->flags = flags;
@@ -323,7 +320,7 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
return;
lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
- if (ea == NULL) { /* Remove LOV EA */
+ if (!ea) { /* Remove LOV EA */
lum->lmm_magic = LOV_USER_MAGIC_V1;
lum->lmm_stripe_size = 0;
lum->lmm_stripe_count = 0;
@@ -346,7 +343,6 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- LASSERT(rec != NULL);
rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ?
REINT_RMENTRY : REINT_UNLINK;
@@ -362,7 +358,7 @@ void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
rec->ul_bias = op_data->op_bias;
tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
- LASSERT(tmp != NULL);
+ LASSERT(tmp);
LOGL0(op_data->op_name, op_data->op_namelen, tmp);
}
@@ -373,7 +369,6 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- LASSERT(rec != NULL);
rec->lk_opcode = REINT_LINK;
rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */
@@ -456,10 +451,9 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
struct ldlm_lock *lock;
data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
- LASSERT(data != NULL);
lock = ldlm_handle2lock(&op_data->op_lease_handle);
- if (lock != NULL) {
+ if (lock) {
data->cd_handle = lock->l_remote_handle;
ldlm_lock_put(lock);
}
@@ -495,7 +489,8 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
/* We record requests in flight in cli->cl_r_in_flight here.
* There is only one write rpc possible in mdc anyway. If this to change
- * in the future - the code may need to be revisited. */
+ * in the future - the code may need to be revisited.
+ */
int mdc_enter_request(struct client_obd *cli)
{
int rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index ef9a1e124ea4..958a164f620d 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -129,7 +129,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
lock = ldlm_handle2lock((struct lustre_handle *)lockh);
- LASSERT(lock != NULL);
+ LASSERT(lock);
lock_res_and_lock(lock);
if (lock->l_resource->lr_lvb_inode &&
lock->l_resource->lr_lvb_inode != data) {
@@ -151,13 +151,13 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
return 0;
}
-ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
- const struct lu_fid *fid, ldlm_type_t type,
- ldlm_policy_data_t *policy, ldlm_mode_t mode,
- struct lustre_handle *lockh)
+enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
+ const struct lu_fid *fid, enum ldlm_type type,
+ ldlm_policy_data_t *policy, enum ldlm_mode mode,
+ struct lustre_handle *lockh)
{
struct ldlm_res_id res_id;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
fid_build_reg_res_name(fid, &res_id);
/* LU-4405: Clear bits not supported by server */
@@ -170,8 +170,8 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
int mdc_cancel_unused(struct obd_export *exp,
const struct lu_fid *fid,
ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
+ enum ldlm_mode mode,
+ enum ldlm_cancel_flags flags,
void *opaque)
{
struct ldlm_res_id res_id;
@@ -191,12 +191,12 @@ int mdc_null_inode(struct obd_export *exp,
struct ldlm_resource *res;
struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
- LASSERTF(ns != NULL, "no namespace passed\n");
+ LASSERTF(ns, "no namespace passed\n");
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (res == NULL)
+ if (!res)
return 0;
lock_res(res);
@@ -210,7 +210,8 @@ int mdc_null_inode(struct obd_export *exp,
/* find any ldlm lock of the inode in mdc
* return 0 not find
* 1 find one
- * < 0 error */
+ * < 0 error
+ */
int mdc_find_cbdata(struct obd_export *exp,
const struct lu_fid *fid,
ldlm_iterator_t it, void *data)
@@ -252,7 +253,8 @@ static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
* OOM here may cause recovery failure if lmm is needed (only for the
* original open if the MDS crashed just when this client also OOM'd)
* but this is incredibly unlikely, and questionable whether the client
- * could do MDS recovery under OOM anyways... */
+ * could do MDS recovery under OOM anyways...
+ */
static void mdc_realloc_openmsg(struct ptlrpc_request *req,
struct mdt_body *body)
{
@@ -317,7 +319,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_OPEN);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return ERR_PTR(-ENOMEM);
}
@@ -364,8 +366,8 @@ mdc_intent_getxattr_pack(struct obd_export *exp,
LIST_HEAD(cancels);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_GETXATTR);
- if (req == NULL)
+ &RQF_LDLM_INTENT_GETXATTR);
+ if (!req)
return ERR_PTR(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, &cancels, count);
@@ -384,14 +386,12 @@ mdc_intent_getxattr_pack(struct obd_export *exp,
mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid, maxdata, -1,
0);
- req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
- RCL_SERVER, maxdata);
+ req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, maxdata);
- req_capsule_set_size(&req->rq_pill, &RMF_EAVALS,
- RCL_SERVER, maxdata);
+ req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, RCL_SERVER, maxdata);
req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS,
- RCL_SERVER, maxdata);
+ RCL_SERVER, maxdata);
ptlrpc_request_set_replen(req);
@@ -409,7 +409,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_UNLINK);
- if (req == NULL)
+ if (!req)
return ERR_PTR(-ENOMEM);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -437,8 +437,8 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
}
static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
- struct lookup_intent *it,
- struct md_op_data *op_data)
+ struct lookup_intent *it,
+ struct md_op_data *op_data)
{
struct ptlrpc_request *req;
struct obd_device *obddev = class_exp2obd(exp);
@@ -453,7 +453,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_GETATTR);
- if (req == NULL)
+ if (!req)
return ERR_PTR(-ENOMEM);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -496,8 +496,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_INTENT_LAYOUT);
- if (req == NULL)
+ &RQF_LDLM_INTENT_LAYOUT);
+ if (!req)
return ERR_PTR(-ENOMEM);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
@@ -514,7 +514,8 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
/* pack the layout intent request */
layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT);
/* LAYOUT_INTENT_ACCESS is generic, specific operation will be
- * set for replication */
+ * set for replication
+ */
layout->li_opc = LAYOUT_INTENT_ACCESS;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
@@ -530,7 +531,7 @@ mdc_enqueue_pack(struct obd_export *exp, int lvb_len)
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
- if (req == NULL)
+ if (!req)
return ERR_PTR(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
@@ -561,7 +562,8 @@ static int mdc_finish_enqueue(struct obd_export *exp,
LASSERT(rc >= 0);
/* Similarly, if we're going to replay this request, we don't want to
- * actually get a lock, just perform the intent. */
+ * actually get a lock, just perform the intent.
+ */
if (req->rq_transno || req->rq_replay) {
lockreq = req_capsule_client_get(pill, &RMF_DLM_REQ);
lockreq->lock_flags |= ldlm_flags_to_wire(LDLM_FL_INTENT_ONLY);
@@ -573,10 +575,10 @@ static int mdc_finish_enqueue(struct obd_export *exp,
rc = 0;
} else { /* rc = 0 */
lock = ldlm_handle2lock(lockh);
- LASSERT(lock != NULL);
/* If the server gave us back a different lock mode, we should
- * fix up our variables. */
+ * fix up our variables.
+ */
if (lock->l_req_mode != einfo->ei_mode) {
ldlm_lock_addref(lockh, lock->l_req_mode);
ldlm_lock_decref(lockh, einfo->ei_mode);
@@ -586,7 +588,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
}
lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL); /* checked by ldlm_cli_enqueue() */
intent->it_disposition = (int)lockrep->lock_policy_res1;
intent->it_status = (int)lockrep->lock_policy_res2;
@@ -595,7 +596,8 @@ static int mdc_finish_enqueue(struct obd_export *exp,
intent->it_data = req;
/* Technically speaking rq_transno must already be zero if
- * it_status is in error, so the check is a bit redundant */
+ * it_status is in error, so the check is a bit redundant
+ */
if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
mdc_clear_replay_flag(req, intent->it_status);
@@ -605,7 +607,8 @@ static int mdc_finish_enqueue(struct obd_export *exp,
*
* It's important that we do this first! Otherwise we might exit the
* function without doing so, and try to replay a failed create
- * (bug 3440) */
+ * (bug 3440)
+ */
if (it->it_op & IT_OPEN && req->rq_replay &&
(!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
mdc_clear_replay_flag(req, intent->it_status);
@@ -618,7 +621,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
struct mdt_body *body;
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (body == NULL) {
+ if (!body) {
CERROR("Can't swab mdt_body\n");
return -EPROTO;
}
@@ -645,11 +648,12 @@ static int mdc_finish_enqueue(struct obd_export *exp,
*/
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
- if (eadata == NULL)
+ if (!eadata)
return -EPROTO;
/* save lvb data and length in case this is for layout
- * lock */
+ * lock
+ */
lvb_data = eadata;
lvb_len = body->eadatasize;
@@ -690,31 +694,32 @@ static int mdc_finish_enqueue(struct obd_export *exp,
LASSERT(client_is_remote(exp));
perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
- if (perm == NULL)
+ if (!perm)
return -EPROTO;
}
} else if (it->it_op & IT_LAYOUT) {
/* maybe the lock was granted right away and layout
- * is packed into RMF_DLM_LVB of req */
+ * is packed into RMF_DLM_LVB of req
+ */
lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
if (lvb_len > 0) {
lvb_data = req_capsule_server_sized_get(pill,
&RMF_DLM_LVB, lvb_len);
- if (lvb_data == NULL)
+ if (!lvb_data)
return -EPROTO;
}
}
/* fill in stripe data for layout lock */
lock = ldlm_handle2lock(lockh);
- if (lock != NULL && ldlm_has_layout(lock) && lvb_data != NULL) {
+ if (lock && ldlm_has_layout(lock) && lvb_data) {
void *lmm;
LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
- ldlm_it2str(it->it_op), lvb_len);
+ ldlm_it2str(it->it_op), lvb_len);
lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
- if (lmm == NULL) {
+ if (!lmm) {
LDLM_LOCK_PUT(lock);
return -ENOMEM;
}
@@ -722,24 +727,25 @@ static int mdc_finish_enqueue(struct obd_export *exp,
/* install lvb_data */
lock_res_and_lock(lock);
- if (lock->l_lvb_data == NULL) {
+ if (!lock->l_lvb_data) {
lock->l_lvb_type = LVB_T_LAYOUT;
lock->l_lvb_data = lmm;
lock->l_lvb_len = lvb_len;
lmm = NULL;
}
unlock_res_and_lock(lock);
- if (lmm != NULL)
+ if (lmm)
kvfree(lmm);
}
- if (lock != NULL)
+ if (lock)
LDLM_LOCK_PUT(lock);
return rc;
}
/* We always reserve enough space in the reply packet for a stripe MD, because
- * we don't know in advance the file type. */
+ * we don't know in advance the file type.
+ */
int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lookup_intent *it, struct md_op_data *op_data,
struct lustre_handle *lockh, void *lmm, int lmmsize,
@@ -782,14 +788,15 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
policy = &getxattr_policy;
}
- LASSERT(reqp == NULL);
+ LASSERT(!reqp);
generation = obddev->u.cli.cl_import->imp_generation;
resend:
flags = saved_flags;
if (!it) {
/* The only way right now is FLOCK, in this case we hide flock
- policy as lmm, but lmmsize is 0 */
+ * policy as lmm, but lmmsize is 0
+ */
LASSERT(lmm && lmmsize == 0);
LASSERTF(einfo->ei_type == LDLM_FLOCK, "lock type %d\n",
einfo->ei_type);
@@ -823,9 +830,10 @@ resend:
if (IS_ERR(req))
return PTR_ERR(req);
- if (req != NULL && it && it->it_op & IT_CREAT)
+ if (req && it && it->it_op & IT_CREAT)
/* ask ptlrpc not to resend on EINPROGRESS since we have our own
- * retry logic */
+ * retry logic
+ */
req->rq_no_retry_einprogress = 1;
if (resends) {
@@ -836,7 +844,8 @@ resend:
/* It is important to obtain rpc_lock first (if applicable), so that
* threads that are serialised with rpc_lock are not polluting our
- * rpcs in flight counter. We do not do flock request limiting, though*/
+ * rpcs in flight counter. We do not do flock request limiting, though
+ */
if (it) {
mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
rc = mdc_enter_request(&obddev->u.cli);
@@ -852,13 +861,14 @@ resend:
0, lvb_type, lockh, 0);
if (!it) {
/* For flock requests we immediately return without further
- delay and let caller deal with the rest, since rest of
- this function metadata processing makes no sense for flock
- requests anyway. But in case of problem during comms with
- Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
- can not rely on caller and this mainly for F_UNLCKs
- (explicits or automatically generated by Kernel to clean
- current FLocks upon exit) that can't be trashed */
+ * delay and let caller deal with the rest, since rest of
+ * this function metadata processing makes no sense for flock
+ * requests anyway. But in case of problem during comms with
+ * Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
+ * can not rely on caller and this mainly for F_UNLCKs
+ * (explicits or automatically generated by Kernel to clean
+ * current FLocks upon exit) that can't be trashed
+ */
if ((rc == -EINTR) || (rc == -ETIMEDOUT))
goto resend;
return rc;
@@ -878,13 +888,13 @@ resend:
}
lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL);
lockrep->lock_policy_res2 =
ptlrpc_status_ntoh(lockrep->lock_policy_res2);
/* Retry the create infinitely when we get -EINPROGRESS from
- * server. This is required by the new quota design. */
+ * server. This is required by the new quota design.
+ */
if (it->it_op & IT_CREAT &&
(int)lockrep->lock_policy_res2 == -EINPROGRESS) {
mdc_clear_replay_flag(req, rc);
@@ -930,13 +940,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
struct ldlm_lock *lock;
int rc;
- LASSERT(request != NULL);
LASSERT(request != LP_POISON);
LASSERT(request->rq_repmsg != LP_POISON);
if (!it_disposition(it, DISP_IT_EXECD)) {
/* The server failed before it even started executing the
- * intent, i.e. because it couldn't unpack the request. */
+ * intent, i.e. because it couldn't unpack the request.
+ */
LASSERT(it->d.lustre.it_status != 0);
return it->d.lustre.it_status;
}
@@ -945,10 +955,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
return rc;
mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
- LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
+ LASSERT(mdt_body); /* mdc_enqueue checked */
/* If we were revalidating a fid/name pair, mark the intent in
- * case we fail and get called again from lookup */
+ * case we fail and get called again from lookup
+ */
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
@@ -957,7 +968,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
/* sever can return one of two fids:
* op_fid2 - new allocated fid - if file is created.
* op_fid3 - existent fid - if file only open.
- * op_fid3 is saved in lmv_intent_open */
+ * op_fid3 is saved in lmv_intent_open
+ */
if ((!lu_fid_eq(&op_data->op_fid2, &mdt_body->fid1)) &&
(!lu_fid_eq(&op_data->op_fid3, &mdt_body->fid1))) {
CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
@@ -1001,7 +1013,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
* one. We have to set the data here instead of in
* mdc_enqueue, because we need to use the child's inode as
* the l_ast_data to match, and that's not available until
- * intent_finish has performed the iget().) */
+ * intent_finish has performed the iget().)
+ */
lock = ldlm_handle2lock(lockh);
if (lock) {
ldlm_policy_data_t policy = lock->l_policy_data;
@@ -1036,11 +1049,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
{
/* We could just return 1 immediately, but since we should only
* be called in revalidate_it if we already have a lock, let's
- * verify that. */
+ * verify that.
+ */
struct ldlm_res_id res_id;
struct lustre_handle lockh;
ldlm_policy_data_t policy;
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
if (it->d.lustre.it_lock_handle) {
lockh.cookie = it->d.lustre.it_lock_handle;
@@ -1059,10 +1073,12 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
* Unfortunately, if the bits are split across multiple
* locks, there's no easy way to match all of them here,
* so an extra RPC would be performed to fetch all
- * of those bits at once for now. */
+ * of those bits at once for now.
+ */
/* For new MDTs(> 2.4), UPDATE|PERM should be enough,
* but for old MDTs (< 2.4), permission is covered
- * by LOOKUP lock, so it needs to match all bits here.*/
+ * by LOOKUP lock, so it needs to match all bits here.
+ */
policy.l_inodebits.bits = MDS_INODELOCK_UPDATE |
MDS_INODELOCK_LOOKUP |
MDS_INODELOCK_PERM;
@@ -1076,7 +1092,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
}
mode = mdc_lock_match(exp, LDLM_FL_BLOCK_GRANTED, fid,
- LDLM_IBITS, &policy,
+ LDLM_IBITS, &policy,
LCK_CR | LCK_CW | LCK_PR | LCK_PW,
&lockh);
}
@@ -1147,11 +1163,13 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
(it->it_op & (IT_LOOKUP | IT_GETATTR))) {
/* We could just return 1 immediately, but since we should only
* be called in revalidate_it if we already have a lock, let's
- * verify that. */
+ * verify that.
+ */
it->d.lustre.it_lock_handle = 0;
rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
/* Only return failure if it was not GETATTR by cfid
- (from inode_revalidate) */
+ * (from inode_revalidate)
+ */
if (rc || op_data->op_namelen != 0)
return rc;
}
@@ -1206,7 +1224,6 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
}
lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- LASSERT(lockrep != NULL);
lockrep->lock_policy_res2 =
ptlrpc_status_ntoh(lockrep->lock_policy_res2);
@@ -1235,7 +1252,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
struct ldlm_res_id res_id;
/*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
* for statahead currently. Consider CMD in future, such two bits
- * maybe managed by different MDS, should be adjusted then. */
+ * maybe managed by different MDS, should be adjusted then.
+ */
ldlm_policy_data_t policy = {
.l_inodebits = { MDS_INODELOCK_LOOKUP |
MDS_INODELOCK_UPDATE }
@@ -1244,9 +1262,9 @@ int mdc_intent_getattr_async(struct obd_export *exp,
__u64 flags = LDLM_FL_HAS_INTENT;
CDEBUG(D_DLMTRACE,
- "name: %.*s in inode "DFID", intent: %s flags %#Lo\n",
- op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
- ldlm_it2str(it->it_op), it->it_flags);
+ "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n",
+ op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
+ ldlm_it2str(it->it_op), it->it_flags);
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index ac7695a10753..4ef3db147f87 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -65,9 +65,10 @@ static int mdc_reint(struct ptlrpc_request *request,
/* Find and cancel locally locks matched by inode @bits & @mode in the resource
* found by @fid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
+ * locks added to @cancels list.
+ */
int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
- struct list_head *cancels, ldlm_mode_t mode,
+ struct list_head *cancels, enum ldlm_mode mode,
__u64 bits)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
@@ -81,14 +82,15 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
*
* This distinguishes from a case when ELC is not supported originally,
* when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC. */
+ * locally, without sending any RPC.
+ */
if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
return 0;
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(exp->exp_obd->obd_namespace,
NULL, &res_id, 0, 0);
- if (res == NULL)
+ if (!res)
return 0;
LDLM_RESOURCE_ADDREF(res);
/* Initialize ibits lock policy. */
@@ -111,8 +113,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
int count = 0, rc;
__u64 bits;
- LASSERT(op_data != NULL);
-
bits = MDS_INODELOCK_UPDATE;
if (op_data->op_attr.ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID))
bits |= MDS_INODELOCK_LOOKUP;
@@ -123,7 +123,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
&cancels, LCK_EX, bits);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_REINT_SETATTR);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -151,10 +151,10 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
ptlrpc_request_set_replen(req);
if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
req->rq_import->imp_replayable) {
- LASSERT(*mod == NULL);
+ LASSERT(!*mod);
*mod = obd_mod_alloc();
- if (*mod == NULL) {
+ if (!*mod) {
DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
} else {
req->rq_replay = 1;
@@ -181,8 +181,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(epoch != NULL);
- LASSERT(body != NULL);
epoch->handle = body->handle;
epoch->ioepoch = body->ioepoch;
req->rq_replay_cb = mdc_replay_open;
@@ -195,7 +193,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
*request = req;
if (rc && req->rq_commit_cb) {
/* Put an extra reference on \var mod on error case. */
- if (mod != NULL && *mod != NULL)
+ if (mod && *mod)
obd_mod_put(*mod);
req->rq_commit_cb(req);
}
@@ -237,7 +235,7 @@ rebuild:
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_REINT_CREATE_RMT_ACL);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -262,7 +260,8 @@ rebuild:
ptlrpc_request_set_replen(req);
/* ask ptlrpc not to resend on EINPROGRESS since we have our own retry
- * logic here */
+ * logic here
+ */
req->rq_no_retry_einprogress = 1;
if (resends) {
@@ -280,7 +279,8 @@ rebuild:
goto resend;
} else if (rc == -EINPROGRESS) {
/* Retry create infinitely until succeed or get other
- * error code. */
+ * error code.
+ */
ptlrpc_req_finished(req);
resends++;
@@ -308,7 +308,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request *req = *request;
int count = 0, rc;
- LASSERT(req == NULL);
+ LASSERT(!req);
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
(fid_is_sane(&op_data->op_fid1)) &&
@@ -324,7 +324,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
MDS_INODELOCK_FULL);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_REINT_UNLINK);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -373,7 +373,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
MDS_INODELOCK_UPDATE);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -422,14 +422,14 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
&cancels, LCK_EX,
MDS_INODELOCK_LOOKUP);
if ((op_data->op_flags & MF_MDC_CANCEL_FID4) &&
- (fid_is_sane(&op_data->op_fid4)))
+ (fid_is_sane(&op_data->op_fid4)))
count += mdc_resource_get_unused(exp, &op_data->op_fid4,
&cancels, LCK_EX,
MDS_INODELOCK_FULL);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_REINT_RENAME);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 57e0fc1e8549..b91d3ff18b02 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -48,6 +48,7 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
#include "../include/lustre_log.h"
+#include "../include/lustre_kernelcomm.h"
#include "mdc_internal.h"
@@ -62,7 +63,8 @@ static inline int mdc_queue_wait(struct ptlrpc_request *req)
/* mdc_enter_request() ensures that this client has no more
* than cl_max_rpcs_in_flight RPCs simultaneously inf light
- * against an MDT. */
+ * against an MDT.
+ */
rc = mdc_enter_request(cli);
if (rc != 0)
return rc;
@@ -82,7 +84,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_GETSTATUS,
LUSTRE_MDS_VERSION, MDS_GETSTATUS);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
mdc_pack_body(req, NULL, 0, 0, -1, 0);
@@ -95,7 +97,7 @@ static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid)
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out;
}
@@ -135,7 +137,7 @@ static int mdc_getattr_common(struct obd_export *exp,
/* sanity check for the reply */
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
return -EPROTO;
CDEBUG(D_NET, "mode: %o\n", body->mode);
@@ -145,7 +147,7 @@ static int mdc_getattr_common(struct obd_export *exp,
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
- if (eadata == NULL)
+ if (!eadata)
return -EPROTO;
}
@@ -155,7 +157,7 @@ static int mdc_getattr_common(struct obd_export *exp,
LASSERT(client_is_remote(exp));
perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
- if (perm == NULL)
+ if (!perm)
return -EPROTO;
}
@@ -163,7 +165,7 @@ static int mdc_getattr_common(struct obd_export *exp,
}
static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
+ struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
@@ -175,7 +177,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
}
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
@@ -205,7 +207,7 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
}
static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
- struct ptlrpc_request **request)
+ struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
@@ -213,7 +215,7 @@ static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_GETATTR_NAME);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -260,7 +262,7 @@ static int mdc_is_subdir(struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
MDS_IS_SUBDIR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
mdc_is_subdir_pack(req, pfid, cfid, 0);
@@ -289,7 +291,7 @@ static int mdc_xattr_common(struct obd_export *exp,
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
if (xattr_name) {
@@ -424,7 +426,7 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
return -EPROTO;
acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
- if (acl == NULL)
+ if (!acl)
return 0;
if (IS_ERR(acl)) {
@@ -460,7 +462,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
memset(md, 0, sizeof(*md));
md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
- LASSERT(md->body != NULL);
if (md->body->valid & OBD_MD_FLEASIZE) {
int lmmsize;
@@ -592,17 +593,16 @@ void mdc_replay_open(struct ptlrpc_request *req)
struct lustre_handle old;
struct mdt_body *body;
- if (mod == NULL) {
+ if (!mod) {
DEBUG_REQ(D_ERROR, req,
"Can't properly replay without open data.");
return;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body != NULL);
och = mod->mod_och;
- if (och != NULL) {
+ if (och) {
struct lustre_handle *file_fh;
LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
@@ -614,7 +614,7 @@ void mdc_replay_open(struct ptlrpc_request *req)
*file_fh = body->handle;
}
close_req = mod->mod_close_req;
- if (close_req != NULL) {
+ if (close_req) {
__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
struct mdt_ioepoch *epoch;
@@ -623,7 +623,7 @@ void mdc_replay_open(struct ptlrpc_request *req)
&RMF_MDT_EPOCH);
LASSERT(epoch);
- if (och != NULL)
+ if (och)
LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
epoch->handle = body->handle;
@@ -634,7 +634,7 @@ void mdc_commit_open(struct ptlrpc_request *req)
{
struct md_open_data *mod = req->rq_cb_data;
- if (mod == NULL)
+ if (!mod)
return;
/**
@@ -674,15 +674,15 @@ int mdc_set_open_replay_data(struct obd_export *exp,
rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
- LASSERT(rec != NULL);
+ LASSERT(rec);
/* Incoming message in my byte order (it's been swabbed). */
/* Outgoing messages always in my byte order. */
- LASSERT(body != NULL);
+ LASSERT(body);
/* Only if the import is replayable, we set replay_open data */
if (och && imp->imp_replayable) {
mod = obd_mod_alloc();
- if (mod == NULL) {
+ if (!mod) {
DEBUG_REQ(D_ERROR, open_req,
"Can't allocate md_open_data");
return 0;
@@ -748,11 +748,11 @@ static int mdc_clear_open_replay_data(struct obd_export *exp,
* It is possible to not have \var mod in a case of eviction between
* lookup and ll_file_open().
**/
- if (mod == NULL)
+ if (!mod)
return 0;
LASSERT(mod != LP_POISON);
- LASSERT(mod->mod_open_req != NULL);
+ LASSERT(mod->mod_open_req);
mdc_free_open(mod);
mod->mod_och = NULL;
@@ -803,7 +803,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
@@ -814,13 +814,14 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
/* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
* portal whose threads are not taking any DLM locks and are therefore
- * always progressing */
+ * always progressing
+ */
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
/* Ensure that this close's handle is fixed up during replay. */
- if (likely(mod != NULL)) {
- LASSERTF(mod->mod_open_req != NULL &&
+ if (likely(mod)) {
+ LASSERTF(mod->mod_open_req &&
mod->mod_open_req->rq_type != LI_POISON,
"POISONED open %p!\n", mod->mod_open_req);
@@ -828,7 +829,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
/* We no longer want to preserve this open for replay even
- * though the open was committed. b=3632, b=3633 */
+ * though the open was committed. b=3632, b=3633
+ */
spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
spin_unlock(&mod->mod_open_req->rq_lock);
@@ -850,7 +852,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
rc = ptlrpc_queue_wait(req);
mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
- if (req->rq_repmsg == NULL) {
+ if (!req->rq_repmsg) {
CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
req->rq_status);
if (rc == 0)
@@ -866,7 +868,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
rc = -rc;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- if (body == NULL)
+ if (!body)
rc = -EPROTO;
} else if (rc == -ESTALE) {
/**
@@ -876,7 +878,6 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
*/
if (mod) {
DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
- LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
}
@@ -886,7 +887,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
if (rc != 0)
mod->mod_close_req = NULL;
/* Since now, mod is accessed through open_req only,
- * thus close req does not keep a reference on mod anymore. */
+ * thus close req does not keep a reference on mod anymore.
+ */
obd_mod_put(mod);
}
*request = req;
@@ -903,7 +905,7 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_DONE_WRITING);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
@@ -912,15 +914,16 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
return rc;
}
- if (mod != NULL) {
- LASSERTF(mod->mod_open_req != NULL &&
+ if (mod) {
+ LASSERTF(mod->mod_open_req &&
mod->mod_open_req->rq_type != LI_POISON,
"POISONED setattr %p!\n", mod->mod_open_req);
mod->mod_close_req = req;
DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
/* We no longer want to preserve this setattr for replay even
- * though the open was committed. b=3632, b=3633 */
+ * though the open was committed. b=3632, b=3633
+ */
spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
spin_unlock(&mod->mod_open_req->rq_lock);
@@ -940,7 +943,6 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
* Let's check if mod exists and return no error in that case
*/
if (mod) {
- LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
}
@@ -949,11 +951,12 @@ static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
- LASSERT(mod->mod_open_req != NULL);
+ LASSERT(mod->mod_open_req);
mdc_free_open(mod);
/* Since now, mod is accessed through setattr req only,
- * thus DW req does not keep a reference on mod anymore. */
+ * thus DW req does not keep a reference on mod anymore.
+ */
obd_mod_put(mod);
}
@@ -978,7 +981,7 @@ static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
@@ -992,17 +995,17 @@ restart_bulk:
desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
MDS_BULK_PORTAL);
- if (desc == NULL) {
+ if (!desc) {
ptlrpc_request_free(req);
return -ENOMEM;
}
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
- PAGE_CACHE_SIZE * op_data->op_npages,
+ PAGE_SIZE * op_data->op_npages,
&op_data->op_fid1);
ptlrpc_request_set_replen(req);
@@ -1033,8 +1036,8 @@ restart_bulk:
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
- req->rq_bulk->bd_nob_transferred,
- PAGE_CACHE_SIZE * op_data->op_npages);
+ req->rq_bulk->bd_nob_transferred,
+ PAGE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
return -EPROTO;
}
@@ -1066,7 +1069,7 @@ static int mdc_statfs(const struct lu_env *env,
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
LUSTRE_MDS_VERSION, MDS_STATFS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto output;
}
@@ -1088,7 +1091,7 @@ static int mdc_statfs(const struct lu_env *env,
}
msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
+ if (!msfs) {
rc = -EPROTO;
goto out;
}
@@ -1161,7 +1164,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -1170,7 +1173,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
- if (req_hpk == NULL) {
+ if (!req_hpk) {
rc = -EPROTO;
goto out;
}
@@ -1195,7 +1198,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
LUSTRE_MDS_VERSION,
MDS_HSM_CT_REGISTER);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -1205,7 +1208,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
&RMF_MDS_HSM_ARCHIVE);
- if (archive_mask == NULL) {
+ if (!archive_mask) {
rc = -EPROTO;
goto out;
}
@@ -1230,7 +1233,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_ACTION);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
@@ -1250,7 +1253,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
req_hca = req_capsule_server_get(&req->rq_pill,
&RMF_MDS_HSM_CURRENT_ACTION);
- if (req_hca == NULL) {
+ if (!req_hca) {
rc = -EPROTO;
goto out;
}
@@ -1270,7 +1273,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
LUSTRE_MDS_VERSION,
MDS_HSM_CT_UNREGISTER);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -1295,7 +1298,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_GET);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
@@ -1314,7 +1317,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
goto out;
req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
- if (req_hus == NULL) {
+ if (!req_hus) {
rc = -EPROTO;
goto out;
}
@@ -1336,7 +1339,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_SET);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
@@ -1350,7 +1353,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
/* Copy states */
req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
- if (req_hss == NULL) {
+ if (!req_hss) {
rc = -EPROTO;
goto out;
}
@@ -1375,7 +1378,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
int rc;
req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -1396,7 +1399,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
- if (req_hr == NULL) {
+ if (!req_hr) {
rc = -EPROTO;
goto out;
}
@@ -1404,7 +1407,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
/* Copy hsm_user_item structs */
req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
- if (req_hui == NULL) {
+ if (!req_hui) {
rc = -EPROTO;
goto out;
}
@@ -1413,7 +1416,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
/* Copy opaque field */
req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
- if (req_opaque == NULL) {
+ if (!req_opaque) {
rc = -EPROTO;
goto out;
}
@@ -1512,7 +1515,7 @@ static int mdc_changelog_send_thread(void *csdata)
/* Set up the remote catalog handle */
ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
- if (ctxt == NULL) {
+ if (!ctxt) {
rc = -ENOENT;
goto out;
}
@@ -1553,6 +1556,7 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
struct ioc_changelog *icc)
{
struct changelog_show *cs;
+ struct task_struct *task;
int rc;
/* Freed in mdc_changelog_send_thread */
@@ -1570,15 +1574,20 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
* New thread because we should return to user app before
* writing into our pipe
*/
- rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
- "mdc_clg_send_thread"));
- if (!IS_ERR_VALUE(rc)) {
- CDEBUG(D_CHANGELOG, "start changelog thread\n");
- return 0;
+ task = kthread_run(mdc_changelog_send_thread, cs,
+ "mdc_clg_send_thread");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: can't start changelog thread: rc = %d\n",
+ obd->obd_name, rc);
+ kfree(cs);
+ } else {
+ rc = 0;
+ CDEBUG(D_CHANGELOG, "%s: started changelog thread\n",
+ obd->obd_name);
}
CERROR("Failed to start changelog thread: %d\n", rc);
- kfree(cs);
return rc;
}
@@ -1596,7 +1605,7 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
MDS_QUOTACHECK);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -1605,7 +1614,8 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
ptlrpc_request_set_replen(req);
/* the next poll will find -ENODATA, that means quotacheck is
- * going on */
+ * going on
+ */
cli->cl_qchk_stat = -ENODATA;
rc = ptlrpc_queue_wait(req);
if (rc)
@@ -1640,7 +1650,7 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
MDS_QUOTACTL);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -1694,7 +1704,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_SWAP_LAYOUTS);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -1721,7 +1731,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
}
static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
+ void *karg, void __user *uarg)
{
struct obd_device *obd = exp->exp_obd;
struct obd_ioctl_data *data = karg;
@@ -1729,7 +1739,8 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
int rc;
if (!try_module_get(THIS_MODULE)) {
- CERROR("Can't get module. Is it alive?");
+ CERROR("%s: cannot get module '%s'\n", obd->obd_name,
+ module_name(THIS_MODULE));
return -EINVAL;
}
switch (cmd) {
@@ -1805,7 +1816,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
min_t(size_t, data->ioc_plen2,
- sizeof(struct obd_uuid)))) {
+ sizeof(struct obd_uuid)))) {
rc = -EFAULT;
goto out;
}
@@ -1818,7 +1829,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min_t(size_t, data->ioc_plen1,
- sizeof(stat_buf)))) {
+ sizeof(stat_buf)))) {
rc = -EFAULT;
goto out;
}
@@ -1880,7 +1891,7 @@ static int mdc_get_info_rpc(struct obd_export *exp,
int rc = -EINVAL;
req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
@@ -1905,7 +1916,8 @@ static int mdc_get_info_rpc(struct obd_export *exp,
rc = ptlrpc_queue_wait(req);
/* -EREMOTE means the get_info result is partial, and it needs to
- * continue on another MDT, see fid2path part in lmv_iocontrol */
+ * continue on another MDT, see fid2path part in lmv_iocontrol
+ */
if (rc == 0 || rc == -EREMOTE) {
tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
memcpy(val, tmp, vallen);
@@ -2013,21 +2025,27 @@ static int mdc_hsm_copytool_send(int len, void *val)
/**
* callback function passed to kuc for re-registering each HSM copytool
* running on MDC, after MDT shutdown/recovery.
- * @param data archive id served by the copytool
+ * @param data copytool registration data
* @param cb_arg callback argument (obd_import)
*/
-static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg)
+static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
{
+ struct kkuc_ct_data *kcd = data;
struct obd_import *imp = (struct obd_import *)cb_arg;
- __u32 archive = data;
int rc;
- CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n",
- archive);
- rc = mdc_ioc_hsm_ct_register(imp, archive);
+ if (!kcd || kcd->kcd_magic != KKUC_CT_DATA_MAGIC)
+ return -EPROTO;
+
+ if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid))
+ return 0;
+
+ CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n",
+ imp->imp_obd->obd_name, kcd->kcd_archive);
+ rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive);
/* ignore error if the copytool is already registered */
- return ((rc != 0) && (rc != -EEXIST)) ? rc : 0;
+ return (rc == -EEXIST) ? 0 : rc;
}
static int mdc_set_info_async(const struct lu_env *env,
@@ -2133,7 +2151,7 @@ static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
@@ -2175,7 +2193,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
* Flush current sequence to make client obtain new one
* from server in case of disconnect/reconnect.
*/
- if (cli->cl_seq != NULL)
+ if (cli->cl_seq)
seq_client_flush(cli->cl_seq);
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
@@ -2238,7 +2256,8 @@ static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
/* FIXME: if we ever get into a situation where there are too many
* opened files with open locks on a single node, then we really
- * should replay these open locks to reget it */
+ * should replay these open locks to reget it
+ */
if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
return 0;
@@ -2422,7 +2441,7 @@ static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
@@ -2519,6 +2538,7 @@ static void /*__exit*/ mdc_exit(void)
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Metadata Client");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(mdc_init);
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index ab4800c20a95..3924b095bfb0 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -90,7 +90,8 @@ static int mgc_name2resid(char *name, int len, struct ldlm_res_id *res_id,
int mgc_fsname2resid(char *fsname, struct ldlm_res_id *res_id, int type)
{
/* fsname is at most 8 chars long, maybe contain "-".
- * e.g. "lustre", "SUN-000" */
+ * e.g. "lustre", "SUN-000"
+ */
return mgc_name2resid(fsname, strlen(fsname), res_id, type);
}
EXPORT_SYMBOL(mgc_fsname2resid);
@@ -102,7 +103,8 @@ static int mgc_logname2resid(char *logname, struct ldlm_res_id *res_id, int type
/* logname consists of "fsname-nodetype".
* e.g. "lustre-MDT0001", "SUN-000-client"
- * there is an exception: llog "params" */
+ * there is an exception: llog "params"
+ */
name_end = strrchr(logname, '-');
if (!name_end)
len = strlen(logname);
@@ -125,7 +127,8 @@ static int config_log_get(struct config_llog_data *cld)
}
/* Drop a reference to a config log. When no longer referenced,
- we can free the config log data */
+ * we can free the config log data
+ */
static void config_log_put(struct config_llog_data *cld)
{
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
@@ -162,7 +165,7 @@ struct config_llog_data *config_log_find(char *logname,
struct config_llog_data *found = NULL;
void *instance;
- LASSERT(logname != NULL);
+ LASSERT(logname);
instance = cfg ? cfg->cfg_instance : NULL;
spin_lock(&config_list_lock);
@@ -242,17 +245,18 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
return cld;
}
-static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
- char *fsname,
- struct config_llog_instance *cfg,
- struct super_block *sb)
+static struct config_llog_data *
+config_recover_log_add(struct obd_device *obd, char *fsname,
+ struct config_llog_instance *cfg,
+ struct super_block *sb)
{
struct config_llog_instance lcfg = *cfg;
struct config_llog_data *cld;
char logname[32];
/* we have to use different llog for clients and mdts for cmd
- * where only clients are notified if one of cmd server restarts */
+ * where only clients are notified if one of cmd server restarts
+ */
LASSERT(strlen(fsname) < sizeof(logname) / 2);
strcpy(logname, fsname);
LASSERT(lcfg.cfg_instance);
@@ -262,8 +266,9 @@ static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
return cld;
}
-static struct config_llog_data *config_params_log_add(struct obd_device *obd,
- struct config_llog_instance *cfg, struct super_block *sb)
+static struct config_llog_data *
+config_params_log_add(struct obd_device *obd,
+ struct config_llog_instance *cfg, struct super_block *sb)
{
struct config_llog_instance lcfg = *cfg;
struct config_llog_data *cld;
@@ -300,7 +305,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
* <fsname>-sptlrpc. multiple regular logs may share one sptlrpc log.
*/
ptr = strrchr(logname, '-');
- if (ptr == NULL || ptr - logname > 8) {
+ if (!ptr || ptr - logname > 8) {
CERROR("logname %s is too long\n", logname);
return -EINVAL;
}
@@ -309,7 +314,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
strcpy(seclogname + (ptr - logname), "-sptlrpc");
sptlrpc_cld = config_log_find(seclogname, NULL);
- if (sptlrpc_cld == NULL) {
+ if (!sptlrpc_cld) {
sptlrpc_cld = do_config_log_add(obd, seclogname,
CONFIG_T_SPTLRPC, NULL, NULL);
if (IS_ERR(sptlrpc_cld)) {
@@ -339,7 +344,16 @@ static int config_log_add(struct obd_device *obd, char *logname,
LASSERT(lsi->lsi_lmd);
if (!(lsi->lsi_lmd->lmd_flags & LMD_FLG_NOIR)) {
struct config_llog_data *recover_cld;
- *strrchr(seclogname, '-') = 0;
+
+ ptr = strrchr(seclogname, '-');
+ if (ptr) {
+ *ptr = 0;
+ } else {
+ CERROR("%s: sptlrpc log name not correct, %s: rc = %d\n",
+ obd->obd_name, seclogname, -EINVAL);
+ config_log_put(cld);
+ return -EINVAL;
+ }
recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
if (IS_ERR(recover_cld)) {
rc = PTR_ERR(recover_cld);
@@ -376,7 +390,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
int rc = 0;
cld = config_log_find(logname, cfg);
- if (cld == NULL)
+ if (!cld)
return -ENOENT;
mutex_lock(&cld->cld_lock);
@@ -450,16 +464,16 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
ocd = &imp->imp_connect_data;
seq_printf(m, "imperative_recovery: %s\n",
- OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
+ OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
seq_printf(m, "client_state:\n");
spin_lock(&config_list_lock);
list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (cld->cld_recover == NULL)
+ if (!cld->cld_recover)
continue;
- seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
+ seq_printf(m, " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
}
spin_unlock(&config_list_lock);
@@ -483,8 +497,9 @@ static void do_requeue(struct config_llog_data *cld)
LASSERT(atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
- export which is being disconnected. Take the client
- semaphore to make the check non-racy. */
+ * export which is being disconnected. Take the client
+ * semaphore to make the check non-racy.
+ */
down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
@@ -529,8 +544,9 @@ static int mgc_requeue_thread(void *data)
}
/* Always wait a few seconds to allow the server who
- caused the lock revocation to finish its setup, plus some
- random so everyone doesn't try to reconnect at once. */
+ * caused the lock revocation to finish its setup, plus some
+ * random so everyone doesn't try to reconnect at once.
+ */
to = MGC_TIMEOUT_MIN_SECONDS * HZ;
to += rand * HZ / 100; /* rand is centi-seconds */
lwi = LWI_TIMEOUT(to, NULL, NULL);
@@ -549,8 +565,7 @@ static int mgc_requeue_thread(void *data)
spin_lock(&config_list_lock);
rq_state &= ~RQ_PRECLEANUP;
- list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
+ list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
if (!cld->cld_lostlock)
continue;
@@ -559,7 +574,8 @@ static int mgc_requeue_thread(void *data)
LASSERT(atomic_read(&cld->cld_refcount) > 0);
/* Whether we enqueued again or not in mgc_process_log,
- * we're done with the ref from the old enqueue */
+ * we're done with the ref from the old enqueue
+ */
if (cld_prev)
config_log_put(cld_prev);
cld_prev = cld;
@@ -575,7 +591,8 @@ static int mgc_requeue_thread(void *data)
config_log_put(cld_prev);
/* break after scanning the list so that we can drop
- * refcount to losing lock clds */
+ * refcount to losing lock clds
+ */
if (unlikely(stopped)) {
spin_lock(&config_list_lock);
break;
@@ -598,7 +615,8 @@ static int mgc_requeue_thread(void *data)
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
- We are responsible for dropping the config log reference from here on out. */
+ * We are responsible for dropping the config log reference from here on out.
+ */
static void mgc_requeue_add(struct config_llog_data *cld)
{
CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
@@ -635,7 +653,8 @@ static int mgc_llog_init(const struct lu_env *env, struct obd_device *obd)
int rc;
/* setup only remote ctxt, the local disk context is switched per each
- * filesystem during mgc_fs_setup() */
+ * filesystem during mgc_fs_setup()
+ */
rc = llog_setup(env, obd, &obd->obd_olg, LLOG_CONFIG_REPL_CTXT, obd,
&llog_client_ops);
if (rc)
@@ -697,7 +716,8 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
static int mgc_cleanup(struct obd_device *obd)
{
/* COMPAT_146 - old config logs may have added profiles we don't
- know about */
+ * know about
+ */
if (obd->obd_type->typ_refcnt <= 1)
/* Only for the last mgc */
class_del_profiles();
@@ -711,6 +731,7 @@ static int mgc_cleanup(struct obd_device *obd)
static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
struct lprocfs_static_vars lvars = { NULL };
+ struct task_struct *task;
int rc;
ptlrpcd_addref();
@@ -734,10 +755,10 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
init_waitqueue_head(&rq_waitq);
/* start requeue thread */
- rc = PTR_ERR(kthread_run(mgc_requeue_thread, NULL,
- "ll_cfg_requeue"));
- if (IS_ERR_VALUE(rc)) {
- CERROR("%s: Cannot start requeue thread (%d),no more log updates!\n",
+ task = kthread_run(mgc_requeue_thread, NULL, "ll_cfg_requeue");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: cannot start requeue thread: rc = %d; no more log updates\n",
obd->obd_name, rc);
goto err_cleanup;
}
@@ -793,7 +814,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
}
/* Make sure not to re-enqueue when the mgc is stopping
- (we get called from client_disconnect_export) */
+ * (we get called from client_disconnect_export)
+ */
if (!lock->l_conn_export ||
!lock->l_conn_export->exp_obd->u.cli.cl_conn_count) {
CDEBUG(D_MGC, "log %.8s: disconnecting, won't requeue\n",
@@ -815,7 +837,8 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
/* Not sure where this should go... */
/* This is the timeout value for MGS_CONNECT request plus a ping interval, such
- * that we can have a chance to try the secondary MGS if any. */
+ * that we can have a chance to try the secondary MGS if any.
+ */
#define MGC_ENQUEUE_LIMIT (INITIAL_CONNECT_TIMEOUT + (AT_OFF ? 0 : at_min) \
+ PING_INTERVAL)
#define MGC_TARGET_REG_LIMIT 10
@@ -879,11 +902,12 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
cld->cld_resid.name[0]);
/* We need a callback for every lockholder, so don't try to
- ldlm_lock_match (see rev 1.1.2.11.2.47) */
+ * ldlm_lock_match (see rev 1.1.2.11.2.47)
+ */
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
LDLM_ENQUEUE);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
@@ -894,7 +918,8 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
rc = ldlm_cli_enqueue(exp, &req, &einfo, &cld->cld_resid, NULL, flags,
NULL, 0, LVB_T_NONE, lockh, 0);
/* A failed enqueue should still call the mgc_blocking_ast,
- where it will be requeued if needed ("grant failed"). */
+ * where it will be requeued if needed ("grant failed").
+ */
ptlrpc_req_finished(req);
return rc;
}
@@ -921,7 +946,7 @@ static int mgc_target_register(struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
MGS_TARGET_REG);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
@@ -950,8 +975,8 @@ static int mgc_target_register(struct obd_export *exp,
}
static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
- u32 keylen, void *key, u32 vallen,
- void *val, struct ptlrpc_request_set *set)
+ u32 keylen, void *key, u32 vallen,
+ void *val, struct ptlrpc_request_set *set)
{
int rc = -EINVAL;
@@ -1088,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
}
enum {
- CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
+ CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
@@ -1109,22 +1134,22 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
int rc = 0;
int off = 0;
- LASSERT(cfg->cfg_instance != NULL);
+ LASSERT(cfg->cfg_instance);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
- inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!inst)
return -ENOMEM;
- pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
- if (pos >= PAGE_CACHE_SIZE) {
+ pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
+ if (pos >= PAGE_SIZE) {
kfree(inst);
return -E2BIG;
}
++pos;
buf = inst + pos;
- bufsz = PAGE_CACHE_SIZE - pos;
+ bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
@@ -1156,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
- if (entry->mne_length > PAGE_CACHE_SIZE) {
+ if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
@@ -1195,7 +1220,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
/* lustre-OST0001-osc-<instance #> */
strcpy(obdname, cld->cld_logname);
cname = strrchr(obdname, '-');
- if (cname == NULL) {
+ if (!cname) {
CERROR("mgc %s: invalid logname %s\n",
mgc->obd_name, obdname);
break;
@@ -1212,7 +1237,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
/* find the obd by obdname */
obd = class_name2obd(obdname);
- if (obd == NULL) {
+ if (!obd) {
CDEBUG(D_INFO, "mgc %s: cannot find obdname %s\n",
mgc->obd_name, obdname);
rc = 0;
@@ -1227,7 +1252,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
uuid = buf + pos;
down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import == NULL) {
+ if (!obd->u.cli.cl_import) {
/* client does not connect to the OST yet */
up_read(&obd->u.cli.cl_sem);
rc = 0;
@@ -1257,7 +1282,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
rc = -ENOMEM;
lcfg = lustre_cfg_new(LCFG_PARAM, &bufs);
- if (lcfg == NULL) {
+ if (IS_ERR(lcfg)) {
CERROR("mgc: cannot allocate memory\n");
break;
}
@@ -1309,14 +1334,14 @@ static int mgc_process_recover_log(struct obd_device *obd,
nrpages = CONFIG_READ_NRPAGES_INIT;
pages = kcalloc(nrpages, sizeof(*pages), GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < nrpages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
- if (pages[i] == NULL) {
+ if (!pages[i]) {
rc = -ENOMEM;
goto out;
}
@@ -1327,7 +1352,7 @@ again:
LASSERT(mutex_is_locked(&cld->cld_lock));
req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
&RQF_MGS_CONFIG_READ);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -1338,7 +1363,6 @@ again:
/* pack request */
body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY);
- LASSERT(body != NULL);
LASSERT(sizeof(body->mcb_name) > strlen(cld->cld_logname));
if (strlcpy(body->mcb_name, cld->cld_logname, sizeof(body->mcb_name))
>= sizeof(body->mcb_name)) {
@@ -1347,19 +1371,19 @@ again:
}
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
- body->mcb_bits = PAGE_CACHE_SHIFT;
+ body->mcb_bits = PAGE_SHIFT;
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
MGS_BULK_PORTAL);
- if (desc == NULL) {
+ if (!desc) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < nrpages; i++)
- ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
+ ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@@ -1373,7 +1397,8 @@ again:
}
/* always update the index even though it might have errors with
- * handling the recover logs */
+ * handling the recover logs
+ */
cfg->cfg_last_idx = res->mcr_offset;
eof = res->mcr_offset == res->mcr_size;
@@ -1386,7 +1411,7 @@ again:
goto out;
}
- if (ealen > nrpages << PAGE_CACHE_SHIFT) {
+ if (ealen > nrpages << PAGE_SHIFT) {
rc = -EINVAL;
goto out;
}
@@ -1400,7 +1425,8 @@ again:
mne_swab = !!ptlrpc_rep_need_swab(req);
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
/* This import flag means the server did an extra swab of IR MNE
- * records (fixed in LU-1252), reverse it here if needed. LU-1644 */
+ * records (fixed in LU-1252), reverse it here if needed. LU-1644
+ */
if (unlikely(req->rq_import->imp_need_mne_swab))
mne_swab = !mne_swab;
#else
@@ -1413,7 +1439,7 @@ again:
ptr = kmap(pages[i]);
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
- min_t(int, ealen, PAGE_CACHE_SIZE),
+ min_t(int, ealen, PAGE_SIZE),
mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
@@ -1422,7 +1448,7 @@ again:
break;
}
- ealen -= PAGE_CACHE_SIZE;
+ ealen -= PAGE_SIZE;
}
out:
@@ -1434,7 +1460,7 @@ out:
if (pages) {
for (i = 0; i < nrpages; i++) {
- if (pages[i] == NULL)
+ if (!pages[i])
break;
__free_page(pages[i]);
}
@@ -1489,7 +1515,8 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
/* logname and instance info should be the same, so use our
* copy of the instance for the update. The cfg_last_idx will
- * be updated here. */
+ * be updated here.
+ */
rc = class_config_parse_llog(env, ctxt, cld->cld_logname,
&cld->cld_cfg);
@@ -1529,9 +1556,10 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
LASSERT(cld);
/* I don't want multiple processes running process_log at once --
- sounds like badness. It actually might be fine, as long as
- we're not trying to update from the same log
- simultaneously (in which case we should use a per-log sem.) */
+ * sounds like badness. It actually might be fine, as long as
+ * we're not trying to update from the same log
+ * simultaneously (in which case we should use a per-log sem.)
+ */
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
@@ -1556,7 +1584,8 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
/* mark cld_lostlock so that it will requeue
- * after MGC becomes available. */
+ * after MGC becomes available.
+ */
cld->cld_lostlock = 1;
/* Get extra reference, it will be put in requeue thread */
config_log_get(cld);
@@ -1635,18 +1664,19 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
if (rc)
break;
cld = config_log_find(logname, cfg);
- if (cld == NULL) {
+ if (!cld) {
rc = -ENOENT;
break;
}
/* COMPAT_146 */
/* FIXME only set this for old logs! Right now this forces
- us to always skip the "inside markers" check */
+ * us to always skip the "inside markers" check
+ */
cld->cld_cfg.cfg_flags |= CFG_F_COMPAT146;
rc = mgc_process_log(obd, cld);
- if (rc == 0 && cld->cld_recover != NULL) {
+ if (rc == 0 && cld->cld_recover) {
if (OCD_HAS_FLAG(&obd->u.cli.cl_import->
imp_connect_data, IMP_RECOV)) {
rc = mgc_process_log(obd, cld->cld_recover);
@@ -1660,7 +1690,7 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
CERROR("Cannot process recover llog %d\n", rc);
}
- if (rc == 0 && cld->cld_params != NULL) {
+ if (rc == 0 && cld->cld_params) {
rc = mgc_process_log(obd, cld->cld_params);
if (rc == -ENOENT) {
CDEBUG(D_MGC,
@@ -1727,6 +1757,7 @@ static void /*__exit*/ mgc_exit(void)
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Management Client");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(mgc_init);
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index acc685712ce9..c404eb3864ff 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -2,8 +2,8 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o
obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
- genops.o uuid.o lprocfs_status.o \
- lustre_handles.o lustre_peer.o \
- statfs_pack.o obdo.o obd_config.o obd_mount.o \
- lu_object.o cl_object.o \
- cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o lprocfs_counters.o
+ genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
+ lustre_handles.o lustre_peer.o statfs_pack.o \
+ obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
+ cl_object.o cl_page.o cl_lock.o cl_io.o \
+ acl.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index 49ba8851c8ac..0e02ae97b7ed 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
return old_size;
new = kmemdup(*header, new_size, GFP_NOFS);
- if (unlikely(new == NULL))
+ if (unlikely(!new))
return -ENOMEM;
kfree(*header);
@@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
return 0;
new = kmemdup(*header, ext_size, GFP_NOFS);
- if (unlikely(new == NULL))
+ if (unlikely(!new))
return -ENOMEM;
kfree(*header);
@@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
new = kzalloc(esize, GFP_NOFS);
- if (unlikely(new == NULL))
+ if (unlikely(!new))
return ERR_PTR(-ENOMEM);
new->a_count = cpu_to_le32(count);
@@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
return -EINVAL;
new = kzalloc(size, GFP_NOFS);
- if (unlikely(new == NULL))
+ if (unlikely(!new))
return -ENOMEM;
new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
@@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
new = kzalloc(ext_size, GFP_NOFS);
- if (unlikely(new == NULL))
+ if (unlikely(!new))
return ERR_PTR(-ENOMEM);
for (i = 0, j = 0; i < posix_count; i++) {
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 63246ba36798..f5128b4f176f 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -44,6 +44,7 @@
#include "../include/obd_support.h"
#include "../include/lustre_fid.h"
#include <linux/list.h>
+#include <linux/sched.h>
#include "../include/cl_object.h"
#include "cl_internal.h"
@@ -93,7 +94,7 @@ static int cl_io_invariant(const struct cl_io *io)
* CIS_IO_GOING.
*/
ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
- (io->ci_state == CIS_LOCKED && up != NULL));
+ (io->ci_state == CIS_LOCKED && up));
}
/**
@@ -111,7 +112,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
slice = container_of(io->ci_layers.prev, struct cl_io_slice,
cis_linkage);
list_del_init(&slice->cis_linkage);
- if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
+ if (slice->cis_iop->op[io->ci_type].cio_fini)
slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
/*
* Invalidate slice to catch use after free. This assumes that
@@ -138,7 +139,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
case CIT_MISC:
/* Check ignore layout change conf */
LASSERT(ergo(io->ci_ignore_layout || !io->ci_verify_layout,
- !io->ci_need_restart));
+ !io->ci_need_restart));
break;
default:
LBUG();
@@ -164,7 +165,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
result = 0;
cl_object_for_each(scan, obj) {
- if (scan->co_ops->coo_io_init != NULL) {
+ if (scan->co_ops->coo_io_init) {
result = scan->co_ops->coo_io_init(env, scan, io);
if (result != 0)
break;
@@ -186,7 +187,7 @@ int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
struct cl_thread_info *info = cl_env_info(env);
LASSERT(obj != cl_object_top(obj));
- if (info->clt_current_io == NULL)
+ if (!info->clt_current_io)
info->clt_current_io = io;
return cl_io_init0(env, io, iot, obj);
}
@@ -208,7 +209,7 @@ int cl_io_init(const struct lu_env *env, struct cl_io *io,
struct cl_thread_info *info = cl_env_info(env);
LASSERT(obj == cl_object_top(obj));
- LASSERT(info->clt_current_io == NULL);
+ LASSERT(!info->clt_current_io);
info->clt_current_io = io;
return cl_io_init0(env, io, iot, obj);
@@ -224,7 +225,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
enum cl_io_type iot, loff_t pos, size_t count)
{
LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
- LINVRNT(io->ci_obj != NULL);
+ LINVRNT(io->ci_obj);
LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
"io range: %u [%llu, %llu) %u %u\n",
@@ -290,11 +291,11 @@ static void cl_io_locks_sort(struct cl_io *io)
prev = NULL;
list_for_each_entry_safe(curr, temp,
- &io->ci_lockset.cls_todo,
- cill_linkage) {
- if (prev != NULL) {
+ &io->ci_lockset.cls_todo,
+ cill_linkage) {
+ if (prev) {
switch (cl_lock_descr_sort(&prev->cill_descr,
- &curr->cill_descr)) {
+ &curr->cill_descr)) {
case 0:
/*
* IMPOSSIBLE: Identical locks are
@@ -305,10 +306,11 @@ static void cl_io_locks_sort(struct cl_io *io)
LBUG();
case 1:
list_move_tail(&curr->cill_linkage,
- &prev->cill_linkage);
+ &prev->cill_linkage);
done = 0;
continue; /* don't change prev: it's
- * still "previous" */
+ * still "previous"
+ */
case -1: /* already in order */
break;
}
@@ -327,32 +329,31 @@ static void cl_io_locks_sort(struct cl_io *io)
int cl_queue_match(const struct list_head *queue,
const struct cl_lock_descr *need)
{
- struct cl_io_lock_link *scan;
+ struct cl_io_lock_link *scan;
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- return 1;
- }
- return 0;
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_match(&scan->cill_descr, need))
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL(cl_queue_match);
static int cl_queue_merge(const struct list_head *queue,
const struct cl_lock_descr *need)
{
- struct cl_io_lock_link *scan;
-
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
- continue;
- cl_lock_descr_merge(&scan->cill_descr, need);
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
- scan->cill_descr.cld_end);
- return 1;
- }
- return 0;
+ struct cl_io_lock_link *scan;
+ list_for_each_entry(scan, queue, cill_linkage) {
+ if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ continue;
+ cl_lock_descr_merge(&scan->cill_descr, need);
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
+ scan->cill_descr.cld_end);
+ return 1;
+ }
+ return 0;
}
static int cl_lockset_match(const struct cl_lockset *set,
@@ -384,8 +385,7 @@ static int cl_lockset_lock_one(const struct lu_env *env,
if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
result = cl_wait(env, lock);
if (result == 0)
- list_move(&link->cill_linkage,
- &set->cls_done);
+ list_move(&link->cill_linkage, &set->cls_done);
} else
result = 0;
} else
@@ -399,11 +399,11 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
struct cl_lock *lock = link->cill_lock;
list_del_init(&link->cill_linkage);
- if (lock != NULL) {
+ if (lock) {
cl_lock_release(env, lock, "io", io);
link->cill_lock = NULL;
}
- if (link->cill_fini != NULL)
+ if (link->cill_fini)
link->cill_fini(env, link);
}
@@ -419,7 +419,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
if (!cl_lockset_match(set, &link->cill_descr)) {
/* XXX some locking to guarantee that locks aren't
- * expanded in between. */
+ * expanded in between.
+ */
result = cl_lockset_lock_one(env, io, set, link);
if (result != 0)
break;
@@ -428,12 +429,11 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
}
if (result == 0) {
list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
+ &set->cls_curr, cill_linkage) {
lock = link->cill_lock;
result = cl_wait(env, lock);
if (result == 0)
- list_move(&link->cill_linkage,
- &set->cls_done);
+ list_move(&link->cill_linkage, &set->cls_done);
else
break;
}
@@ -458,7 +458,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_invariant(io));
cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
+ if (!scan->cis_iop->op[io->ci_type].cio_lock)
continue;
result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
if (result != 0)
@@ -503,7 +503,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
cl_lock_link_fini(env, io, link);
}
cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
+ if (scan->cis_iop->op[io->ci_type].cio_unlock)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
}
io->ci_state = CIS_UNLOCKED;
@@ -529,7 +529,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
result = 0;
cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
+ if (!scan->cis_iop->op[io->ci_type].cio_iter_init)
continue;
result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
scan);
@@ -556,7 +556,7 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_invariant(io));
cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
+ if (scan->cis_iop->op[io->ci_type].cio_iter_fini)
scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
}
io->ci_state = CIS_IT_ENDED;
@@ -581,7 +581,7 @@ static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,
/* layers have to be notified. */
cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
+ if (scan->cis_iop->op[io->ci_type].cio_advance)
scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
nob);
}
@@ -621,7 +621,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
int result;
link = kzalloc(sizeof(*link), GFP_NOFS);
- if (link != NULL) {
+ if (link) {
link->cill_descr = *descr;
link->cill_fini = cl_free_io_lock_link;
result = cl_io_lock_add(env, io, link);
@@ -648,7 +648,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)
io->ci_state = CIS_IO_GOING;
cl_io_for_each(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
+ if (!scan->cis_iop->op[io->ci_type].cio_start)
continue;
result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
if (result != 0)
@@ -673,7 +673,7 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_invariant(io));
cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
+ if (scan->cis_iop->op[io->ci_type].cio_end)
scan->cis_iop->op[io->ci_type].cio_end(env, scan);
/* TODO: error handling. */
}
@@ -687,7 +687,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
const struct cl_page_slice *slice;
slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
- LINVRNT(slice != NULL);
+ LINVRNT(slice);
return slice;
}
@@ -759,11 +759,11 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
* "parallel io" (see CLO_REPEAT loops in cl_lock.c).
*/
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_read_page != NULL) {
+ if (scan->cis_iop->cio_read_page) {
const struct cl_page_slice *slice;
slice = cl_io_slice_page(scan, page);
- LINVRNT(slice != NULL);
+ LINVRNT(slice);
result = scan->cis_iop->cio_read_page(env, scan, slice);
if (result != 0)
break;
@@ -798,7 +798,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
LASSERT(cl_page_in_io(page, io));
cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->cio_prepare_write != NULL) {
+ if (scan->cis_iop->cio_prepare_write) {
const struct cl_page_slice *slice;
slice = cl_io_slice_page(scan, page);
@@ -833,11 +833,11 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
* state. Better (and more general) way of dealing with such situation
* is needed.
*/
- LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
+ LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
LASSERT(cl_page_in_io(page, io));
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_commit_write != NULL) {
+ if (scan->cis_iop->cio_commit_write) {
const struct cl_page_slice *slice;
slice = cl_io_slice_page(scan, page);
@@ -872,7 +872,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
cl_io_for_each(scan, io) {
- if (scan->cis_iop->req_op[crt].cio_submit == NULL)
+ if (!scan->cis_iop->req_op[crt].cio_submit)
continue;
result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
queue);
@@ -900,7 +900,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
int rc;
cl_page_list_for_each(pg, &queue->c2_qin) {
- LASSERT(pg->cp_sync_io == NULL);
+ LASSERT(!pg->cp_sync_io);
pg->cp_sync_io = anchor;
}
@@ -913,14 +913,14 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
* clean pages), count them as completed to avoid infinite
* wait.
*/
- cl_page_list_for_each(pg, &queue->c2_qin) {
+ cl_page_list_for_each(pg, &queue->c2_qin) {
pg->cp_sync_io = NULL;
cl_sync_io_note(anchor, 1);
- }
+ }
- /* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
+ /* wait for the IO to be finished. */
+ rc = cl_sync_io_wait(env, io, &queue->c2_qout,
+ anchor, timeout);
} else {
LASSERT(list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
@@ -1026,7 +1026,7 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
{
struct list_head *linkage = &slice->cis_linkage;
- LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
+ LASSERT((!linkage->prev && !linkage->next) ||
list_empty(linkage));
list_add_tail(linkage, &io->ci_layers);
@@ -1053,8 +1053,9 @@ EXPORT_SYMBOL(cl_page_list_init);
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
/* it would be better to check that page is owned by "current" io, but
- * it is not passed here. */
- LASSERT(page->cp_owner != NULL);
+ * it is not passed here.
+ */
+ LASSERT(page->cp_owner);
LINVRNT(plist->pl_owner == current);
lockdep_off();
@@ -1263,7 +1264,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
- while (io->ci_parent != NULL)
+ while (io->ci_parent)
io = io->ci_parent;
return io;
}
@@ -1296,13 +1297,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
LASSERT(list_empty(&req->crq_pages));
LASSERT(req->crq_nrpages == 0);
LINVRNT(list_empty(&req->crq_layers));
- LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
+ LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
- if (req->crq_o != NULL) {
+ if (req->crq_o) {
for (i = 0; i < req->crq_nrobjs; ++i) {
struct cl_object *obj = req->crq_o[i].ro_obj;
- if (obj != NULL) {
+ if (obj) {
lu_object_ref_del_at(&obj->co_lu,
&req->crq_o[i].ro_obj_ref,
"cl_req", req);
@@ -1326,7 +1327,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
do {
list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init != NULL) {
+ if (dev->cd_ops->cdo_req_init) {
result = dev->cd_ops->cdo_req_init(env,
dev, req);
if (result != 0)
@@ -1334,7 +1335,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
}
}
page = page->cp_child;
- } while (page != NULL && result == 0);
+ } while (page && result == 0);
return result;
}
@@ -1351,9 +1352,9 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
*/
while (!list_empty(&req->crq_layers)) {
slice = list_entry(req->crq_layers.prev,
- struct cl_req_slice, crs_linkage);
+ struct cl_req_slice, crs_linkage);
list_del_init(&slice->crs_linkage);
- if (slice->crs_ops->cro_completion != NULL)
+ if (slice->crs_ops->cro_completion)
slice->crs_ops->cro_completion(env, slice, rc);
}
cl_req_free(env, req);
@@ -1371,7 +1372,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
LINVRNT(nr_objects > 0);
req = kzalloc(sizeof(*req), GFP_NOFS);
- if (req != NULL) {
+ if (req) {
int result;
req->crq_type = crt;
@@ -1380,7 +1381,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
GFP_NOFS);
- if (req->crq_o != NULL) {
+ if (req->crq_o) {
req->crq_nrobjs = nr_objects;
result = cl_req_init(env, req, page);
} else
@@ -1408,7 +1409,7 @@ void cl_req_page_add(const struct lu_env *env,
page = cl_page_top(page);
LASSERT(list_empty(&page->cp_flight));
- LASSERT(page->cp_req == NULL);
+ LASSERT(!page->cp_req);
CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
req, req->crq_type, req->crq_nrpages);
@@ -1418,7 +1419,7 @@ void cl_req_page_add(const struct lu_env *env,
page->cp_req = req;
obj = cl_object_top(page->cp_obj);
for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
- if (rqo->ro_obj == NULL) {
+ if (!rqo->ro_obj) {
rqo->ro_obj = obj;
cl_object_get(obj);
lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
@@ -1463,11 +1464,11 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
* of objects.
*/
for (i = 0; i < req->crq_nrobjs; ++i)
- LASSERT(req->crq_o[i].ro_obj != NULL);
+ LASSERT(req->crq_o[i].ro_obj);
result = 0;
list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
- if (slice->crs_ops->cro_prep != NULL) {
+ if (slice->crs_ops->cro_prep) {
result = slice->crs_ops->cro_prep(env, slice);
if (result != 0)
break;
@@ -1501,9 +1502,8 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
scan = cl_page_at(page,
slice->crs_dev->cd_lu_dev.ld_type);
- LASSERT(scan != NULL);
obj = scan->cpl_obj;
- if (slice->crs_ops->cro_attr_set != NULL)
+ if (slice->crs_ops->cro_attr_set)
slice->crs_ops->cro_attr_set(env, slice, obj,
attr + i, flags);
}
@@ -1511,9 +1511,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
}
EXPORT_SYMBOL(cl_req_attr_set);
-/* XXX complete(), init_completion(), and wait_for_completion(), until they are
- * implemented in libcfs. */
-# include <linux/sched.h>
/**
* Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index 1836dc01499a..aec644eb4db9 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -96,8 +96,8 @@ static int cl_lock_invariant(const struct lu_env *env,
result = atomic_read(&lock->cll_ref) > 0 &&
cl_lock_invariant_trusted(env, lock);
- if (!result && env != NULL)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
+ if (!result && env)
+ CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
return result;
}
@@ -259,7 +259,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
struct cl_lock_slice *slice;
slice = list_entry(lock->cll_layers.next,
- struct cl_lock_slice, cls_linkage);
+ struct cl_lock_slice, cls_linkage);
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
@@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
obj = lock->cll_descr.cld_obj;
- LINVRNT(obj != NULL);
+ LINVRNT(obj);
CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
atomic_read(&lock->cll_ref), lock, RETIP);
@@ -361,8 +361,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
struct cl_lock *lock;
struct lu_object_header *head;
- lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (lock != NULL) {
+ lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
+ if (lock) {
atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
@@ -382,8 +382,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
CS_LOCK_INC(obj, total);
CS_LOCK_INC(obj, create);
cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers,
- co_lu.lo_linkage) {
+ list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
int err;
err = obj->co_ops->coo_lock_init(env, obj, lock, io);
@@ -461,7 +460,7 @@ static int cl_lock_fits_into(const struct lu_env *env,
LINVRNT(cl_lock_invariant_trusted(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into != NULL &&
+ if (slice->cls_ops->clo_fits_into &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
return 0;
}
@@ -524,17 +523,17 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
- if (lock == NULL) {
+ if (!lock) {
lock = cl_lock_alloc(env, obj, io, need);
if (!IS_ERR(lock)) {
struct cl_lock *ghost;
spin_lock(&head->coh_lock_guard);
ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
+ if (!ghost) {
cl_lock_get_trust(lock);
list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
+ &head->coh_locks);
spin_unlock(&head->coh_lock_guard);
CS_LOCK_INC(obj, busy);
} else {
@@ -572,7 +571,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
spin_unlock(&head->coh_lock_guard);
- if (lock == NULL)
+ if (!lock)
return NULL;
cl_lock_mutex_get(env, lock);
@@ -584,7 +583,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
cl_lock_put(env, lock);
lock = NULL;
}
- } while (lock == NULL);
+ } while (!lock);
cl_lock_hold_add(env, lock, scope, source);
cl_lock_user_add(env, lock);
@@ -774,8 +773,8 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
lock->cll_flags |= CLF_CANCELLED;
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel != NULL)
+ cls_linkage) {
+ if (slice->cls_ops->clo_cancel)
slice->cls_ops->clo_cancel(env, slice);
}
}
@@ -811,8 +810,8 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
* by cl_lock_lookup().
*/
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete != NULL)
+ cls_linkage) {
+ if (slice->cls_ops->clo_delete)
slice->cls_ops->clo_delete(env, slice);
}
/*
@@ -935,7 +934,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
if (result == 0) {
/* To avoid being interrupted by the 'non-fatal' signals
* (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305 */
+ * LU-305
+ */
blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
init_waitqueue_entry(&waiter, current);
@@ -946,7 +946,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
LASSERT(cl_lock_nr_mutexed(env) == 0);
/* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here */
+ * can be restarted if signals are pending here
+ */
result = -ERESTARTSYS;
if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
schedule();
@@ -974,7 +975,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state != NULL)
+ if (slice->cls_ops->clo_state)
slice->cls_ops->clo_state(env, slice, state);
wake_up_all(&lock->cll_wq);
}
@@ -1038,8 +1039,8 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse != NULL) {
+ cls_linkage) {
+ if (slice->cls_ops->clo_unuse) {
result = slice->cls_ops->clo_unuse(env, slice);
if (result != 0)
break;
@@ -1072,7 +1073,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use != NULL) {
+ if (slice->cls_ops->clo_use) {
result = slice->cls_ops->clo_use(env, slice);
if (result != 0)
break;
@@ -1125,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue != NULL) {
+ if (slice->cls_ops->clo_enqueue) {
result = slice->cls_ops->clo_enqueue(env,
slice, io, flags);
if (result != 0)
@@ -1170,7 +1171,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
/* kick layers. */
result = cl_enqueue_kick(env, lock, io, flags);
/* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already. */
+ * become CLS_HELD already.
+ */
if (result == 0 && lock->cll_state == CLS_QUEUING)
cl_lock_state_set(env, lock, CLS_ENQUEUED);
break;
@@ -1215,7 +1217,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict != NULL);
+ LASSERT(lock->cll_conflict);
conflict = lock->cll_conflict;
lock->cll_conflict = NULL;
@@ -1258,7 +1260,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
do {
result = cl_enqueue_try(env, lock, io, enqflags);
if (result == CLO_WAIT) {
- if (lock->cll_conflict != NULL)
+ if (lock->cll_conflict)
result = cl_lock_enqueue_wait(env, lock, 1);
else
result = cl_lock_state_wait(env, lock);
@@ -1300,7 +1302,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
}
/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources. */
+ * underlying resources.
+ */
if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
cl_lock_user_del(env, lock);
return 0;
@@ -1416,7 +1419,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait != NULL) {
+ if (slice->cls_ops->clo_wait) {
result = slice->cls_ops->clo_wait(env, slice);
if (result != 0)
break;
@@ -1449,7 +1452,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d \n", lock->cll_state);
+ "Wrong state %d\n", lock->cll_state);
LASSERT(lock->cll_holds > 0);
do {
@@ -1487,7 +1490,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
pound = 0;
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh != NULL) {
+ if (slice->cls_ops->clo_weigh) {
ounce = slice->cls_ops->clo_weigh(env, slice);
pound += ounce;
if (pound < ounce) /* over-weight^Wflow */
@@ -1523,7 +1526,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify != NULL) {
+ if (slice->cls_ops->clo_modify) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
return result;
@@ -1584,7 +1587,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
result = cl_lock_enclosure(env, lock, closure);
if (result == 0) {
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure != NULL) {
+ if (slice->cls_ops->clo_closure) {
result = slice->cls_ops->clo_closure(env, slice,
closure);
if (result != 0)
@@ -1654,7 +1657,7 @@ void cl_lock_disclosure(const struct lu_env *env,
cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure){
+ cll_inclosure) {
list_del_init(&scan->cll_inclosure);
cl_lock_mutex_put(env, scan);
lu_ref_del(&scan->cll_reference, "closure", closure);
@@ -1777,13 +1780,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
lock = NULL;
need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM */
+ * not PHANTOM
+ */
need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too */
+ * with a uniq gid and it conflicts with all other lock modes too
+ */
list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
if (scan != except &&
(scan->cll_descr.cld_mode == CLM_GROUP ||
@@ -1798,7 +1803,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
(canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
(pending || !(scan->cll_flags & CLF_CANCELPEND))) {
/* Don't increase cs_hit here since this
- * is just a helper function. */
+ * is just a helper function.
+ */
cl_lock_get_trust(scan);
lock = scan;
break;
@@ -1820,7 +1826,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
slice = cl_page_at(page, dtype);
- LASSERT(slice != NULL);
return slice->cpl_page->cp_index;
}
@@ -1839,12 +1844,13 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
/* refresh non-overlapped index */
tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp != NULL) {
+ lock, 1, 0);
+ if (tmp) {
/* Cache the first-non-overlapped index so as to skip
* all pages within [index, clt_fn_index). This
* is safe because if tmp lock is canceled, it will
- * discard these pages. */
+ * discard these pages.
+ */
info->clt_fn_index = tmp->cll_descr.cld_end + 1;
if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
info->clt_fn_index = CL_PAGE_EOF;
@@ -1950,7 +1956,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
* already destroyed (as otherwise they will be left unprotected).
*/
LASSERT(ergo(!cancel,
- head->coh_tree.rnode == NULL && head->coh_pages == 0));
+ !head->coh_tree.rnode && head->coh_pages == 0));
spin_lock(&head->coh_lock_guard);
while (!list_empty(&head->coh_locks)) {
@@ -2166,8 +2172,8 @@ EXPORT_SYMBOL(cl_lock_mode_name);
* Prints human readable representation of a lock description.
*/
void cl_lock_descr_print(const struct lu_env *env, void *cookie,
- lu_printer_t printer,
- const struct cl_lock_descr *descr)
+ lu_printer_t printer,
+ const struct cl_lock_descr *descr)
{
const struct lu_fid *fid;
@@ -2194,7 +2200,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, " %s@%p: ",
slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
slice);
- if (slice->cls_ops->clo_print != NULL)
+ if (slice->cls_ops->clo_print)
slice->cls_ops->clo_print(env, cookie, printer, slice);
(*printer)(env, cookie, "\n");
}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 57c8d5412bbd..43e299d4d416 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o)
struct cl_object_header *hdr = cl_object_header(o);
struct cl_object *top;
- while (hdr->coh_parent != NULL)
+ while (hdr->coh_parent)
hdr = hdr->coh_parent;
top = lu2cl(lu_object_top(&hdr->coh_lu));
@@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_get != NULL) {
+ if (obj->co_ops->coo_attr_get) {
result = obj->co_ops->coo_attr_get(env, obj, attr);
if (result != 0) {
if (result > 0)
@@ -247,9 +247,8 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_attr_set != NULL) {
+ list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_attr_set) {
result = obj->co_ops->coo_attr_set(env, obj, attr, v);
if (result != 0) {
if (result > 0)
@@ -278,9 +277,8 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
- list_for_each_entry_reverse(obj, &top->loh_layers,
- co_lu.lo_linkage) {
- if (obj->co_ops->coo_glimpse != NULL) {
+ list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
+ if (obj->co_ops->coo_glimpse) {
result = obj->co_ops->coo_glimpse(env, obj, lvb);
if (result != 0)
break;
@@ -306,7 +304,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
- if (obj->co_ops->coo_conf_set != NULL) {
+ if (obj->co_ops->coo_conf_set) {
result = obj->co_ops->coo_conf_set(env, obj, conf);
if (result != 0)
break;
@@ -328,7 +326,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
struct cl_object_header *hdr;
hdr = cl_object_header(obj);
- LASSERT(hdr->coh_tree.rnode == NULL);
+ LASSERT(!hdr->coh_tree.rnode);
LASSERT(hdr->coh_pages == 0);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
@@ -362,7 +360,8 @@ void cache_stats_init(struct cache_stats *cs, const char *name)
atomic_set(&cs->cs_stats[i], 0);
}
-int cache_stats_print(const struct cache_stats *cs, struct seq_file *m, int h)
+static int cache_stats_print(const struct cache_stats *cs,
+ struct seq_file *m, int h)
{
int i;
/*
@@ -456,13 +455,13 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
seq_printf(m, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
seq_printf(m, "%s: %u ", pstate[i],
- atomic_read(&site->cs_pages_state[i]));
+ atomic_read(&site->cs_pages_state[i]));
seq_printf(m, "]\n");
cache_stats_print(&site->cs_locks, m, 0);
seq_printf(m, " [");
for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
+ atomic_read(&site->cs_locks_state[i]));
seq_printf(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
seq_printf(m, "\n");
@@ -482,7 +481,6 @@ EXPORT_SYMBOL(cl_site_stats_print);
* because Lustre code may call into other fs which has certain assumptions
* about journal_info. Currently following fields in task_struct are identified
* can be used for this purpose:
- * - cl_env: for liblustre.
* - tux_info: only on RedHat kernel.
* - ...
* \note As long as we use task_struct to store cl_env, we assume that once
@@ -540,7 +538,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
{
LASSERT(cle->ce_ref == 0);
LASSERT(cle->ce_magic == &cl_env_init0);
- LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
+ LASSERT(!cle->ce_debug && !cle->ce_owner);
cle->ce_ref = 1;
cle->ce_debug = debug;
@@ -575,7 +573,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
{
struct cl_env *cle = cl_env_hops_obj(hn);
- LASSERT(cle->ce_owner != NULL);
+ LASSERT(cle->ce_owner);
return (key == cle->ce_owner);
}
@@ -609,7 +607,7 @@ static inline void cl_env_attach(struct cl_env *cle)
if (cle) {
int rc;
- LASSERT(cle->ce_owner == NULL);
+ LASSERT(!cle->ce_owner);
cle->ce_owner = (void *) (long) current->pid;
rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
&cle->ce_node);
@@ -637,7 +635,7 @@ static int cl_env_store_init(void)
CFS_HASH_MAX_THETA,
&cl_env_hops,
CFS_HASH_RW_BKTLOCK);
- return cl_env_hash != NULL ? 0 : -ENOMEM;
+ return cl_env_hash ? 0 : -ENOMEM;
}
static void cl_env_store_fini(void)
@@ -647,7 +645,7 @@ static void cl_env_store_fini(void)
static inline struct cl_env *cl_env_detach(struct cl_env *cle)
{
- if (cle == NULL)
+ if (!cle)
cle = cl_env_fetch();
if (cle && cle->ce_owner)
@@ -661,8 +659,8 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
struct lu_env *env;
struct cl_env *cle;
- cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO);
- if (cle != NULL) {
+ cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS);
+ if (cle) {
int rc;
INIT_LIST_HEAD(&cle->ce_linkage);
@@ -716,7 +714,7 @@ static struct lu_env *cl_env_peek(int *refcheck)
env = NULL;
cle = cl_env_fetch();
- if (cle != NULL) {
+ if (cle) {
CL_ENV_INC(hit);
env = &cle->ce_lu;
*refcheck = ++cle->ce_ref;
@@ -741,7 +739,7 @@ struct lu_env *cl_env_get(int *refcheck)
struct lu_env *env;
env = cl_env_peek(refcheck);
- if (env == NULL) {
+ if (!env) {
env = cl_env_new(lu_context_tags_default,
lu_session_tags_default,
__builtin_return_address(0));
@@ -768,7 +766,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
{
struct lu_env *env;
- LASSERT(cl_env_peek(refcheck) == NULL);
+ LASSERT(!cl_env_peek(refcheck));
env = cl_env_new(tags, tags, __builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -783,7 +781,7 @@ EXPORT_SYMBOL(cl_env_alloc);
static void cl_env_exit(struct cl_env *cle)
{
- LASSERT(cle->ce_owner == NULL);
+ LASSERT(!cle->ce_owner);
lu_context_exit(&cle->ce_lu.le_ctx);
lu_context_exit(&cle->ce_ses);
}
@@ -802,7 +800,7 @@ void cl_env_put(struct lu_env *env, int *refcheck)
cle = cl_env_container(env);
LASSERT(cle->ce_ref > 0);
- LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
+ LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
if (--cle->ce_ref == 0) {
@@ -877,7 +875,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
nest->cen_cookie = NULL;
env = cl_env_peek(&nest->cen_refcheck);
- if (env != NULL) {
+ if (env) {
if (!cl_io_is_going(env))
return env;
cl_env_put(env, &nest->cen_refcheck);
@@ -929,14 +927,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
const char *typename;
struct lu_device *d;
- LASSERT(ldt != NULL);
-
typename = ldt->ldt_name;
d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
if (!IS_ERR(d)) {
int rc;
- if (site != NULL)
+ if (site)
d->ld_site = site;
rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
if (rc == 0) {
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 61f28ebfc058..394580016638 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -69,7 +69,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
*/
static struct cl_page *cl_page_top_trusted(struct cl_page *page)
{
- while (page->cp_parent != NULL)
+ while (page->cp_parent)
page = page->cp_parent;
return page;
}
@@ -110,7 +110,7 @@ cl_page_at_trusted(const struct cl_page *page,
return slice;
}
page = page->cp_child;
- } while (page != NULL);
+ } while (page);
return NULL;
}
@@ -127,7 +127,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
assert_spin_locked(&hdr->coh_page_guard);
page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page != NULL)
+ if (page)
cl_page_get_trust(page);
return page;
}
@@ -188,7 +188,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
* Pages for lsm-less file has no underneath sub-page
* for osc, in case of ...
*/
- PASSERT(env, page, slice != NULL);
+ PASSERT(env, page, slice);
page = slice->cpl_page;
/*
@@ -245,9 +245,9 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
struct cl_object *obj = page->cp_obj;
PASSERT(env, page, list_empty(&page->cp_batch));
- PASSERT(env, page, page->cp_owner == NULL);
- PASSERT(env, page, page->cp_req == NULL);
- PASSERT(env, page, page->cp_parent == NULL);
+ PASSERT(env, page, !page->cp_owner);
+ PASSERT(env, page, !page->cp_req);
+ PASSERT(env, page, !page->cp_parent);
PASSERT(env, page, page->cp_state == CPS_FREEING);
might_sleep();
@@ -255,7 +255,7 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
struct cl_page_slice *slice;
slice = list_entry(page->cp_layers.next,
- struct cl_page_slice, cpl_linkage);
+ struct cl_page_slice, cpl_linkage);
list_del_init(page->cp_layers.next);
slice->cpl_ops->cpo_fini(env, slice);
}
@@ -277,14 +277,15 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
}
static struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind, struct page *vmpage,
- enum cl_page_type type)
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page;
struct lu_object_header *head;
page = kzalloc(cl_object_header(o)->coh_page_bufsize, GFP_NOFS);
- if (page != NULL) {
+ if (page) {
int result = 0;
atomic_set(&page->cp_ref, 1);
@@ -303,9 +304,8 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
mutex_init(&page->cp_mutex);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
- list_for_each_entry(o, &head->loh_layers,
- co_lu.lo_linkage) {
- if (o->co_ops->coo_page_init != NULL) {
+ list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init) {
result = o->co_ops->coo_page_init(env, o,
page, vmpage);
if (result != 0) {
@@ -369,13 +369,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
*/
page = cl_vmpage_page(vmpage, o);
PINVRNT(env, page,
- ergo(page != NULL,
+ ergo(page,
cl_page_vmpage(env, page) == vmpage &&
(void *)radix_tree_lookup(&hdr->coh_tree,
idx) == page));
}
- if (page != NULL)
+ if (page)
return page;
/* allocate and initialize cl_page */
@@ -385,7 +385,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
if (type == CPT_TRANSIENT) {
if (parent) {
- LASSERT(page->cp_parent == NULL);
+ LASSERT(!page->cp_parent);
page->cp_parent = parent;
parent->cp_child = page;
}
@@ -418,7 +418,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
"fail to insert into radix tree: %d\n", err);
} else {
if (parent) {
- LASSERT(page->cp_parent == NULL);
+ LASSERT(!page->cp_parent);
page->cp_parent = parent;
parent->cp_child = page;
}
@@ -426,7 +426,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
}
spin_unlock(&hdr->coh_page_guard);
- if (unlikely(ghost != NULL)) {
+ if (unlikely(ghost)) {
cl_page_delete0(env, ghost, 0);
cl_page_free(env, ghost);
}
@@ -467,14 +467,13 @@ static inline int cl_page_invariant(const struct cl_page *pg)
owner = pg->cp_owner;
return cl_page_in_use(pg) &&
- ergo(parent != NULL, parent->cp_child == pg) &&
- ergo(child != NULL, child->cp_parent == pg) &&
- ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
- ergo(parent != NULL, pg->cp_obj != parent->cp_obj) &&
- ergo(owner != NULL && parent != NULL,
+ ergo(parent, parent->cp_child == pg) &&
+ ergo(child, child->cp_parent == pg) &&
+ ergo(child, pg->cp_obj != child->cp_obj) &&
+ ergo(parent, pg->cp_obj != parent->cp_obj) &&
+ ergo(owner && parent,
parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner != NULL && child != NULL,
- child->cp_owner->ci_parent == owner) &&
+ ergo(owner && child, child->cp_owner->ci_parent == owner) &&
/*
* Either page is early in initialization (has neither child
* nor parent yet), or it is in the object radix tree.
@@ -482,7 +481,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
(void *)radix_tree_lookup(&header->coh_tree,
pg->cp_index) == pg ||
- (child == NULL && parent == NULL));
+ (!child && !parent));
}
static void cl_page_state_set0(const struct lu_env *env,
@@ -535,10 +534,10 @@ static void cl_page_state_set0(const struct lu_env *env,
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page != NULL; page = page->cp_child) {
+ for (; page; page = page->cp_child) {
PASSERT(env, page, page->cp_state == old);
PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner != NULL));
+ equi(state == CPS_OWNED, page->cp_owner));
cl_page_state_set_trust(page, state);
}
@@ -584,7 +583,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
LASSERT(page->cp_state == CPS_FREEING);
LASSERT(atomic_read(&page->cp_ref) == 0);
- PASSERT(env, page, page->cp_owner == NULL);
+ PASSERT(env, page, !page->cp_owner);
PASSERT(env, page, list_empty(&page->cp_batch));
/*
* Page is no longer reachable by other threads. Tear
@@ -609,11 +608,11 @@ struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
page = cl_page_top(page);
do {
list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage != NULL)
+ if (slice->cpl_ops->cpo_vmpage)
return slice->cpl_ops->cpo_vmpage(env, slice);
}
page = page->cp_child;
- } while (page != NULL);
+ } while (page);
LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
}
EXPORT_SYMBOL(cl_page_vmpage);
@@ -639,10 +638,10 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
* can be rectified easily.
*/
top = (struct cl_page *)vmpage->private;
- if (top == NULL)
+ if (!top)
return NULL;
- for (page = top; page != NULL; page = page->cp_child) {
+ for (page = top; page; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
cl_page_get_trust(page);
break;
@@ -689,7 +688,7 @@ EXPORT_SYMBOL(cl_page_at);
cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + \
__op); \
- if (__method != NULL) { \
+ if (__method) { \
__result = (*__method)(__env, __scan, \
## __VA_ARGS__); \
if (__result != 0) \
@@ -697,7 +696,7 @@ EXPORT_SYMBOL(cl_page_at);
} \
} \
__page = __page->cp_child; \
- } while (__page != NULL && __result == 0); \
+ } while (__page && __result == 0); \
if (__result > 0) \
__result = 0; \
__result; \
@@ -717,12 +716,12 @@ do { \
cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + \
__op); \
- if (__method != NULL) \
+ if (__method) \
(*__method)(__env, __scan, \
## __VA_ARGS__); \
} \
__page = __page->cp_child; \
- } while (__page != NULL); \
+ } while (__page); \
} while (0)
#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
@@ -734,19 +733,19 @@ do { \
void (*__method)_proto; \
\
/* get to the bottom page. */ \
- while (__page->cp_child != NULL) \
+ while (__page->cp_child) \
__page = __page->cp_child; \
do { \
list_for_each_entry_reverse(__scan, &__page->cp_layers, \
cpl_linkage) { \
__method = *(void **)((char *)__scan->cpl_ops + \
__op); \
- if (__method != NULL) \
+ if (__method) \
(*__method)(__env, __scan, \
## __VA_ARGS__); \
} \
__page = __page->cp_parent; \
- } while (__page != NULL); \
+ } while (__page); \
} while (0)
static int cl_page_invoke(const struct lu_env *env,
@@ -772,8 +771,8 @@ static void cl_page_invoid(const struct lu_env *env,
static void cl_page_owner_clear(struct cl_page *page)
{
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- if (page->cp_owner != NULL) {
+ for (page = cl_page_top(page); page; page = page->cp_child) {
+ if (page->cp_owner) {
LASSERT(page->cp_owner->ci_owned_nr > 0);
page->cp_owner->ci_owned_nr--;
page->cp_owner = NULL;
@@ -784,10 +783,8 @@ static void cl_page_owner_clear(struct cl_page *page)
static void cl_page_owner_set(struct cl_page *page)
{
- for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
- LASSERT(page->cp_owner != NULL);
+ for (page = cl_page_top(page); page; page = page->cp_child)
page->cp_owner->ci_owned_nr++;
- }
}
void cl_page_disown0(const struct lu_env *env,
@@ -862,8 +859,8 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
struct cl_io *, int),
io, nonblock);
if (result == 0) {
- PASSERT(env, pg, pg->cp_owner == NULL);
- PASSERT(env, pg, pg->cp_req == NULL);
+ PASSERT(env, pg, !pg->cp_owner);
+ PASSERT(env, pg, !pg->cp_req);
pg->cp_owner = io;
pg->cp_task = current;
cl_page_owner_set(pg);
@@ -921,7 +918,7 @@ void cl_page_assume(const struct lu_env *env,
io = cl_io_top(io);
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
- PASSERT(env, pg, pg->cp_owner == NULL);
+ PASSERT(env, pg, !pg->cp_owner);
pg->cp_owner = io;
pg->cp_task = current;
cl_page_owner_set(pg);
@@ -1037,7 +1034,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
* skip removing it.
*/
tmp = pg->cp_child;
- for (; tmp != NULL; tmp = tmp->cp_child) {
+ for (; tmp; tmp = tmp->cp_child) {
void *value;
struct cl_object_header *hdr;
@@ -1135,7 +1132,7 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
- PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked != NULL);
+ PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
/*
* Call ->cpo_is_vmlocked() directly instead of going through
* CL_PAGE_INVOKE(), because cl_page_is_vmlocked() is used by
@@ -1216,7 +1213,7 @@ void cl_page_completion(const struct lu_env *env,
PASSERT(env, pg, crt < CRT_NR);
/* cl_page::cp_req already cleared by the caller (osc_completion()) */
- PASSERT(env, pg, pg->cp_req == NULL);
+ PASSERT(env, pg, !pg->cp_req);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -1304,7 +1301,7 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
return -EINVAL;
list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
- if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
+ if (!scan->cpl_ops->io[crt].cpo_cache_add)
continue;
result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
@@ -1450,8 +1447,8 @@ void cl_page_print(const struct lu_env *env, void *cookie,
{
struct cl_page *scan;
- for (scan = cl_page_top((struct cl_page *)pg);
- scan != NULL; scan = scan->cp_child)
+ for (scan = cl_page_top((struct cl_page *)pg); scan;
+ scan = scan->cp_child)
cl_page_header_print(env, cookie, printer, scan);
CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
(const struct lu_env *env,
@@ -1480,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
/*
* XXX for now.
*/
- return (loff_t)idx << PAGE_CACHE_SHIFT;
+ return (loff_t)idx << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_offset);
@@ -1492,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
/*
* XXX for now.
*/
- return offset >> PAGE_CACHE_SHIFT;
+ return offset >> PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_index);
int cl_page_size(const struct cl_object *obj)
{
- return 1 << PAGE_CACHE_SHIFT;
+ return 1 << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 0975e443057c..c2cf015962dd 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -42,7 +42,6 @@
#include "../../include/linux/lnet/lnetctl.h"
#include "../include/lustre_debug.h"
#include "../include/lprocfs_status.h"
-#include "../include/lustre/lustre_build_version.h"
#include <linux/list.h>
#include "../include/cl_object.h"
#include "llog_internal.h"
@@ -52,7 +51,7 @@ EXPORT_SYMBOL(obd_devs);
struct list_head obd_types;
DEFINE_RWLOCK(obd_dev_lock);
-/* The following are visible and mutable through /proc/sys/lustre/. */
+/* The following are visible and mutable through /sys/fs/lustre. */
unsigned int obd_debug_peer_on_timeout;
EXPORT_SYMBOL(obd_debug_peer_on_timeout);
unsigned int obd_dump_on_timeout;
@@ -67,7 +66,7 @@ unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
EXPORT_SYMBOL(obd_timeout);
unsigned int obd_timeout_set;
EXPORT_SYMBOL(obd_timeout_set);
-/* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
+/* Adaptive timeout defs here instead of ptlrpc module for /sys/fs/ access */
unsigned int at_min;
EXPORT_SYMBOL(at_min);
unsigned int at_max = 600;
@@ -180,7 +179,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
}
CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
- if (obd_ioctl_getdata(&buf, &len, (void *)arg)) {
+ if (obd_ioctl_getdata(&buf, &len, (void __user *)arg)) {
CERROR("OBD ioctl: data error\n");
return -EINVAL;
}
@@ -200,8 +199,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
err = -ENOMEM;
goto out;
}
- err = copy_from_user(lcfg, data->ioc_pbuf1,
- data->ioc_plen1);
+ err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1);
if (!err)
err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1);
if (!err)
@@ -218,16 +216,16 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
- if (strlen(BUILD_VERSION) + 1 > data->ioc_inllen1) {
+ if (strlen(LUSTRE_VERSION_STRING) + 1 > data->ioc_inllen1) {
CERROR("ioctl buffer too small to hold version\n");
err = -EINVAL;
goto out;
}
- memcpy(data->ioc_bulk, BUILD_VERSION,
- strlen(BUILD_VERSION) + 1);
+ memcpy(data->ioc_bulk, LUSTRE_VERSION_STRING,
+ strlen(LUSTRE_VERSION_STRING) + 1);
- err = obd_ioctl_popdata((void *)arg, data, len);
+ err = obd_ioctl_popdata((void __user *)arg, data, len);
if (err)
err = -EFAULT;
goto out;
@@ -246,7 +244,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
- err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+ err = obd_ioctl_popdata((void __user *)arg, data,
+ sizeof(*data));
if (err)
err = -EFAULT;
goto out;
@@ -283,7 +282,8 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
CDEBUG(D_IOCTL, "device name %s, dev %d\n", data->ioc_inlbuf1,
dev);
- err = obd_ioctl_popdata((void *)arg, data, sizeof(*data));
+ err = obd_ioctl_popdata((void __user *)arg, data,
+ sizeof(*data));
if (err)
err = -EFAULT;
goto out;
@@ -330,7 +330,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
(int)index, status, obd->obd_type->typ_name,
obd->obd_name, obd->obd_uuid.uuid,
atomic_read(&obd->obd_refcount));
- err = obd_ioctl_popdata((void *)arg, data, len);
+ err = obd_ioctl_popdata((void __user *)arg, data, len);
err = 0;
goto out;
@@ -339,7 +339,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
}
if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
- if (data->ioc_inllen4 <= 0 || data->ioc_inlbuf4 == NULL) {
+ if (data->ioc_inllen4 <= 0 || !data->ioc_inlbuf4) {
err = -EINVAL;
goto out;
}
@@ -356,7 +356,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
goto out;
}
- if (obd == NULL) {
+ if (!obd) {
CERROR("OBD ioctl : No Device %d\n", data->ioc_dev);
err = -EINVAL;
goto out;
@@ -388,7 +388,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
if (err)
goto out;
- err = obd_ioctl_popdata((void *)arg, data, len);
+ err = obd_ioctl_popdata((void __user *)arg, data, len);
if (err)
err = -EFAULT;
goto out;
@@ -461,9 +461,9 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
+ if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
- (__u64)PAGE_CACHE_SIZE);
+ (__u64)PAGE_SIZE);
ret = -EINVAL;
}
@@ -473,13 +473,13 @@ static int obd_init_checks(void)
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
-static int __init init_obdclass(void)
+static int __init obdclass_init(void)
{
int i, err;
int lustre_register_fs(void);
- LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
+ LCONSOLE_INFO("Lustre: Build Version: " LUSTRE_VERSION_STRING "\n");
spin_lock_init(&obd_types_lock);
obd_zombie_impexp_init();
@@ -507,8 +507,9 @@ static int __init init_obdclass(void)
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
- * for other purposes (mostly for BGL). */
- if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ * for other purposes (mostly for BGL).
+ */
+ if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
obd_max_dirty_pages = totalram_pages / 4;
else
obd_max_dirty_pages = totalram_pages / 2;
@@ -542,9 +543,7 @@ static int __init init_obdclass(void)
return err;
}
-/* liblustre doesn't call cleanup_obdclass, apparently. we carry on in this
- * ifdef to the end of the file to cover module and versioning goo.*/
-static void cleanup_obdclass(void)
+static void obdclass_exit(void)
{
int i;
@@ -577,9 +576,9 @@ static void cleanup_obdclass(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lustre Class Driver");
MODULE_VERSION(LUSTRE_VERSION_STRING);
+MODULE_LICENSE("GPL");
-module_init(init_obdclass);
-module_exit(cleanup_obdclass);
+module_init(obdclass_init);
+module_exit(obdclass_exit);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 228c44c37c4a..cf97b8f06764 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -42,6 +42,7 @@
#define DEBUG_SUBSYSTEM S_CLASS
#include "../include/obd_class.h"
#include "../include/lprocfs_status.h"
+#include "../include/lustre_kernelcomm.h"
spinlock_t obd_types_lock;
@@ -68,18 +69,17 @@ static struct obd_device *obd_device_alloc(void)
{
struct obd_device *obd;
- obd = kmem_cache_alloc(obd_device_cachep, GFP_NOFS | __GFP_ZERO);
- if (obd != NULL)
+ obd = kmem_cache_zalloc(obd_device_cachep, GFP_NOFS);
+ if (obd)
obd->obd_magic = OBD_DEVICE_MAGIC;
return obd;
}
static void obd_device_free(struct obd_device *obd)
{
- LASSERT(obd != NULL);
LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC, "obd %p obd_magic %08x != %08x\n",
obd, obd->obd_magic, OBD_DEVICE_MAGIC);
- if (obd->obd_namespace != NULL) {
+ if (obd->obd_namespace) {
CERROR("obd %p: namespace %p was not properly cleaned up (obd_force=%d)!\n",
obd, obd->obd_namespace, obd->obd_force);
LBUG();
@@ -112,15 +112,6 @@ static struct obd_type *class_get_type(const char *name)
if (!type) {
const char *modname = name;
- if (strcmp(modname, "obdfilter") == 0)
- modname = "ofd";
-
- if (strcmp(modname, LUSTRE_LWP_NAME) == 0)
- modname = LUSTRE_OSP_NAME;
-
- if (!strncmp(modname, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME)))
- modname = LUSTRE_MDT_NAME;
-
if (!request_module("%s", modname)) {
CDEBUG(D_INFO, "Loaded module '%s'\n", modname);
type = class_search_type(name);
@@ -202,7 +193,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
goto failed;
}
- if (ldt != NULL) {
+ if (ldt) {
type->typ_lu = ldt;
rc = lu_device_type_init(ldt);
if (rc != 0)
@@ -364,7 +355,7 @@ void class_release_dev(struct obd_device *obd)
obd, obd->obd_magic, OBD_DEVICE_MAGIC);
LASSERTF(obd == obd_devs[obd->obd_minor], "obd %p != obd_devs[%d] %p\n",
obd, obd->obd_minor, obd_devs[obd->obd_minor]);
- LASSERT(obd_type != NULL);
+ LASSERT(obd_type);
CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
@@ -390,7 +381,8 @@ int class_name2dev(const char *name)
if (obd && strcmp(name, obd->obd_name) == 0) {
/* Make sure we finished attaching before we give
- out any references */
+ * out any references
+ */
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
if (obd->obd_attached) {
read_unlock(&obd_dev_lock);
@@ -465,11 +457,12 @@ struct obd_device *class_num2obd(int num)
EXPORT_SYMBOL(class_num2obd);
/* Search for a client OBD connected to tgt_uuid. If grp_uuid is
- specified, then only the client with that uuid is returned,
- otherwise any client connected to the tgt is returned. */
+ * specified, then only the client with that uuid is returned,
+ * otherwise any client connected to the tgt is returned.
+ */
struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
- const char *typ_name,
- struct obd_uuid *grp_uuid)
+ const char *typ_name,
+ struct obd_uuid *grp_uuid)
{
int i;
@@ -497,9 +490,10 @@ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
EXPORT_SYMBOL(class_find_client_obd);
/* Iterate the obd_device list looking devices have grp_uuid. Start
- searching at *next, and if a device is found, the next index to look
- at is saved in *next. If next is NULL, then the first matching device
- will always be returned. */
+ * searching at *next, and if a device is found, the next index to look
+ * at is saved in *next. If next is NULL, then the first matching device
+ * will always be returned.
+ */
struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
{
int i;
@@ -588,21 +582,21 @@ int obd_init_caches(void)
{
LASSERT(!obd_device_cachep);
obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
- sizeof(struct obd_device),
- 0, 0, NULL);
+ sizeof(struct obd_device),
+ 0, 0, NULL);
if (!obd_device_cachep)
goto out;
LASSERT(!obdo_cachep);
obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo),
- 0, 0, NULL);
+ 0, 0, NULL);
if (!obdo_cachep)
goto out;
LASSERT(!import_cachep);
import_cachep = kmem_cache_create("ll_import_cache",
- sizeof(struct obd_import),
- 0, 0, NULL);
+ sizeof(struct obd_import),
+ 0, 0, NULL);
if (!import_cachep)
goto out;
@@ -658,7 +652,7 @@ static void class_export_destroy(struct obd_export *exp)
struct obd_device *obd = exp->exp_obd;
LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
- LASSERT(obd != NULL);
+ LASSERT(obd);
CDEBUG(D_IOCTL, "destroying export %p/%s for %s\n", exp,
exp->exp_client_uuid.uuid, obd->obd_name);
@@ -698,7 +692,6 @@ EXPORT_SYMBOL(class_export_get);
void class_export_put(struct obd_export *exp)
{
- LASSERT(exp != NULL);
LASSERT_ATOMIC_GT_LT(&exp->exp_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "PUTting export %p : new refcount %d\n", exp,
atomic_read(&exp->exp_refcount) - 1);
@@ -718,7 +711,8 @@ EXPORT_SYMBOL(class_export_put);
/* Creates a new export, adds it to the hash table, and returns a
* pointer to it. The refcount is 2: one for the hash reference, and
- * one for the pointer returned by this function. */
+ * one for the pointer returned by this function.
+ */
struct obd_export *class_new_export(struct obd_device *obd,
struct obd_uuid *cluuid)
{
@@ -834,7 +828,7 @@ EXPORT_SYMBOL(class_unlink_export);
static void class_import_destroy(struct obd_import *imp)
{
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
- imp->imp_obd->obd_name);
+ imp->imp_obd->obd_name);
LASSERT_ATOMIC_ZERO(&imp->imp_refcount);
@@ -844,7 +838,7 @@ static void class_import_destroy(struct obd_import *imp)
struct obd_import_conn *imp_conn;
imp_conn = list_entry(imp->imp_conn_list.next,
- struct obd_import_conn, oic_item);
+ struct obd_import_conn, oic_item);
list_del_init(&imp_conn->oic_item);
ptlrpc_put_connection_superhack(imp_conn->oic_conn);
kfree(imp_conn);
@@ -901,8 +895,9 @@ static void init_imp_at(struct imp_at *at)
at_init(&at->iat_net_latency, 0, 0);
for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
/* max service estimates are tracked on the server side, so
- don't use the AT history here, just use the last reported
- val. (But keep hist for proc histogram, worst_ever) */
+ * don't use the AT history here, just use the last reported
+ * val. (But keep hist for proc histogram, worst_ever)
+ */
at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
AT_FLG_NOHIST);
}
@@ -941,7 +936,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
init_imp_at(&imp->imp_at);
/* the default magic is V2, will be used in connect RPC, and
- * then adjusted according to the flags in request/reply. */
+ * then adjusted according to the flags in request/reply.
+ */
imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;
return imp;
@@ -950,7 +946,7 @@ EXPORT_SYMBOL(class_new_import);
void class_destroy_import(struct obd_import *import)
{
- LASSERT(import != NULL);
+ LASSERT(import);
LASSERT(import != LP_POISON);
class_handle_unhash(&import->imp_handle);
@@ -970,8 +966,7 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
LASSERT(lock->l_exp_refs_nr >= 0);
- if (lock->l_exp_refs_target != NULL &&
- lock->l_exp_refs_target != exp) {
+ if (lock->l_exp_refs_target && lock->l_exp_refs_target != exp) {
LCONSOLE_WARN("setting export %p for lock %p which already has export %p\n",
exp, lock, lock->l_exp_refs_target);
}
@@ -1005,17 +1000,18 @@ EXPORT_SYMBOL(__class_export_del_lock_ref);
#endif
/* A connection defines an export context in which preallocation can
- be managed. This releases the export pointer reference, and returns
- the export handle, so the export refcount is 1 when this function
- returns. */
+ * be managed. This releases the export pointer reference, and returns
+ * the export handle, so the export refcount is 1 when this function
+ * returns.
+ */
int class_connect(struct lustre_handle *conn, struct obd_device *obd,
struct obd_uuid *cluuid)
{
struct obd_export *export;
- LASSERT(conn != NULL);
- LASSERT(obd != NULL);
- LASSERT(cluuid != NULL);
+ LASSERT(conn);
+ LASSERT(obd);
+ LASSERT(cluuid);
export = class_new_export(obd, cluuid);
if (IS_ERR(export))
@@ -1035,7 +1031,8 @@ EXPORT_SYMBOL(class_connect);
* and if disconnect really need
* 2 - removing from hash
* 3 - in client_unlink_export
- * The export pointer passed to this function can destroyed */
+ * The export pointer passed to this function can destroyed
+ */
int class_disconnect(struct obd_export *export)
{
int already_disconnected;
@@ -1052,7 +1049,8 @@ int class_disconnect(struct obd_export *export)
/* class_cleanup(), abort_recovery(), and class_fail_export()
* all end up in here, and if any of them race we shouldn't
- * call extra class_export_puts(). */
+ * call extra class_export_puts().
+ */
if (already_disconnected)
goto no_disconn;
@@ -1092,7 +1090,8 @@ void class_fail_export(struct obd_export *exp)
/* Most callers into obd_disconnect are removing their own reference
* (request, for example) in addition to the one from the hash table.
- * We don't have such a reference here, so make one. */
+ * We don't have such a reference here, so make one.
+ */
class_export_get(exp);
rc = obd_disconnect(exp);
if (rc)
@@ -1126,29 +1125,29 @@ static void obd_zombie_impexp_cull(void)
import = NULL;
if (!list_empty(&obd_zombie_imports)) {
import = list_entry(obd_zombie_imports.next,
- struct obd_import,
- imp_zombie_chain);
+ struct obd_import,
+ imp_zombie_chain);
list_del_init(&import->imp_zombie_chain);
}
export = NULL;
if (!list_empty(&obd_zombie_exports)) {
export = list_entry(obd_zombie_exports.next,
- struct obd_export,
- exp_obd_chain);
+ struct obd_export,
+ exp_obd_chain);
list_del_init(&export->exp_obd_chain);
}
spin_unlock(&obd_zombie_impexp_lock);
- if (import != NULL) {
+ if (import) {
class_import_destroy(import);
spin_lock(&obd_zombie_impexp_lock);
zombies_count--;
spin_unlock(&obd_zombie_impexp_lock);
}
- if (export != NULL) {
+ if (export) {
class_export_destroy(export);
spin_lock(&obd_zombie_impexp_lock);
zombies_count--;
@@ -1156,7 +1155,7 @@ static void obd_zombie_impexp_cull(void)
}
cond_resched();
- } while (import != NULL || export != NULL);
+ } while (import || export);
}
static struct completion obd_zombie_start;
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
index d8230aec9a2b..8405eccdac19 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/obdclass/kernelcomm.c
@@ -42,9 +42,8 @@
#define DEBUG_SUBSYSTEM S_CLASS
#define D_KUC D_OTHER
-#include "../../include/linux/libcfs/libcfs.h"
-
-/* This is the kernel side (liblustre as well). */
+#include "../include/obd_support.h"
+#include "../include/lustre_kernelcomm.h"
/**
* libcfs_kkuc_msg_put - send an message from kernel to userspace
@@ -58,14 +57,14 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload)
ssize_t count = kuch->kuc_msglen;
loff_t offset = 0;
mm_segment_t fs;
- int rc = -ENOSYS;
+ int rc = -ENXIO;
- if (filp == NULL || IS_ERR(filp))
+ if (IS_ERR_OR_NULL(filp))
return -EBADF;
if (kuch->kuc_magic != KUC_MAGIC) {
CERROR("KernelComm: bad magic %x\n", kuch->kuc_magic);
- return -ENOSYS;
+ return rc;
}
fs = get_fs();
@@ -90,18 +89,20 @@ int libcfs_kkuc_msg_put(struct file *filp, void *payload)
}
EXPORT_SYMBOL(libcfs_kkuc_msg_put);
-/* Broadcast groups are global across all mounted filesystems;
+/*
+ * Broadcast groups are global across all mounted filesystems;
* i.e. registering for a group on 1 fs will get messages for that
- * group from any fs */
+ * group from any fs
+ */
/** A single group registration has a uid and a file pointer */
struct kkuc_reg {
- struct list_head kr_chain;
- int kr_uid;
+ struct list_head kr_chain;
+ int kr_uid;
struct file *kr_fp;
- __u32 kr_data;
+ char kr_data[0];
};
-static struct list_head kkuc_groups[KUC_GRP_MAX+1] = {};
+static struct list_head kkuc_groups[KUC_GRP_MAX + 1] = {};
/* Protect message sending against remove and adds */
static DECLARE_RWSEM(kg_sem);
@@ -109,9 +110,10 @@ static DECLARE_RWSEM(kg_sem);
* @param filp pipe to write into
* @param uid identifier for this receiver
* @param group group number
+ * @param data user data
*/
int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group,
- __u32 data)
+ void *data, size_t data_len)
{
struct kkuc_reg *reg;
@@ -121,20 +123,20 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group,
}
/* fput in group_rem */
- if (filp == NULL)
+ if (!filp)
return -EBADF;
/* freed in group_rem */
- reg = kmalloc(sizeof(*reg), 0);
- if (reg == NULL)
+ reg = kmalloc(sizeof(*reg) + data_len, 0);
+ if (!reg)
return -ENOMEM;
reg->kr_fp = filp;
reg->kr_uid = uid;
- reg->kr_data = data;
+ memcpy(reg->kr_data, data, data_len);
down_write(&kg_sem);
- if (kkuc_groups[group].next == NULL)
+ if (!kkuc_groups[group].next)
INIT_LIST_HEAD(&kkuc_groups[group]);
list_add(&reg->kr_chain, &kkuc_groups[group]);
up_write(&kg_sem);
@@ -145,14 +147,14 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, unsigned int group,
}
EXPORT_SYMBOL(libcfs_kkuc_group_add);
-int libcfs_kkuc_group_rem(int uid, int group)
+int libcfs_kkuc_group_rem(int uid, unsigned int group)
{
struct kkuc_reg *reg, *next;
- if (kkuc_groups[group].next == NULL)
+ if (!kkuc_groups[group].next)
return 0;
- if (uid == 0) {
+ if (!uid) {
/* Broadcast a shutdown message */
struct kuc_hdr lh;
@@ -165,11 +167,11 @@ int libcfs_kkuc_group_rem(int uid, int group)
down_write(&kg_sem);
list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
- if ((uid == 0) || (uid == reg->kr_uid)) {
+ if (!uid || (uid == reg->kr_uid)) {
list_del(&reg->kr_chain);
CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n",
reg->kr_uid, reg->kr_fp, group);
- if (reg->kr_fp != NULL)
+ if (reg->kr_fp)
fput(reg->kr_fp);
kfree(reg);
}
@@ -180,28 +182,30 @@ int libcfs_kkuc_group_rem(int uid, int group)
}
EXPORT_SYMBOL(libcfs_kkuc_group_rem);
-int libcfs_kkuc_group_put(int group, void *payload)
+int libcfs_kkuc_group_put(unsigned int group, void *payload)
{
struct kkuc_reg *reg;
- int rc = 0;
+ int rc = 0;
int one_success = 0;
- down_read(&kg_sem);
+ down_write(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL) {
+ if (reg->kr_fp) {
rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
- if (rc == 0)
+ if (!rc) {
one_success = 1;
- else if (rc == -EPIPE) {
+ } else if (rc == -EPIPE) {
fput(reg->kr_fp);
reg->kr_fp = NULL;
}
}
}
- up_read(&kg_sem);
+ up_write(&kg_sem);
- /* don't return an error if the message has been delivered
- * at least to one agent */
+ /*
+ * don't return an error if the message has been delivered
+ * at least to one agent
+ */
if (one_success)
rc = 0;
@@ -213,9 +217,9 @@ EXPORT_SYMBOL(libcfs_kkuc_group_put);
* Calls a callback function for each link of the given kuc group.
* @param group the group to call the function on.
* @param cb_func the function to be called.
- * @param cb_arg iextra argument to be passed to the callback function.
+ * @param cb_arg extra argument to be passed to the callback function.
*/
-int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
+int libcfs_kkuc_group_foreach(unsigned int group, libcfs_kkuc_cb_t cb_func,
void *cb_arg)
{
struct kkuc_reg *reg;
@@ -227,15 +231,15 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
}
/* no link for this group */
- if (kkuc_groups[group].next == NULL)
+ if (!kkuc_groups[group].next)
return 0;
- down_write(&kg_sem);
+ down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
- if (reg->kr_fp != NULL)
+ if (reg->kr_fp)
rc = cb_func(reg->kr_data, cb_arg);
}
- up_write(&kg_sem);
+ up_read(&kg_sem);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index a055cbb4f162..8eddf206f1ed 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -59,7 +59,6 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <asm/ioctls.h>
-#include <linux/poll.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/seq_file.h>
@@ -71,17 +70,16 @@
#include "../../include/obd_class.h"
#include "../../include/lprocfs_status.h"
#include "../../include/lustre_ver.h"
-#include "../../include/lustre/lustre_build_version.h"
/* buffer MUST be at least the size of obd_ioctl_hdr */
-int obd_ioctl_getdata(char **buf, int *len, void *arg)
+int obd_ioctl_getdata(char **buf, int *len, void __user *arg)
{
struct obd_ioctl_hdr hdr;
struct obd_ioctl_data *data;
int err;
int offset = 0;
- if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
+ if (copy_from_user(&hdr, arg, sizeof(hdr)))
return -EFAULT;
if (hdr.ioc_version != OBD_IOCTL_VERSION) {
@@ -104,9 +102,10 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
/* When there are lots of processes calling vmalloc on multi-core
* system, the high lock contention will hurt performance badly,
* obdfilter-survey is an example, which relies on ioctl. So we'd
- * better avoid vmalloc on ioctl path. LU-66 */
+ * better avoid vmalloc on ioctl path. LU-66
+ */
*buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS);
- if (*buf == NULL) {
+ if (!*buf) {
CERROR("Cannot allocate control buffer of len %d\n",
hdr.ioc_len);
return -EINVAL;
@@ -114,7 +113,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
- if (copy_from_user(*buf, (void *)arg, hdr.ioc_len)) {
+ if (copy_from_user(*buf, arg, hdr.ioc_len)) {
err = -EFAULT;
goto free_buf;
}
@@ -144,9 +143,8 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
offset += cfs_size_round(data->ioc_inllen3);
}
- if (data->ioc_inllen4) {
+ if (data->ioc_inllen4)
data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
- }
return 0;
@@ -156,7 +154,7 @@ free_buf:
}
EXPORT_SYMBOL(obd_ioctl_getdata);
-int obd_ioctl_popdata(void *arg, void *data, int len)
+int obd_ioctl_popdata(void __user *arg, void *data, int len)
{
int err;
@@ -240,7 +238,7 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
struct obd_device *obd;
obd = class_num2obd(i);
- if (obd == NULL || !obd->obd_attached || !obd->obd_set_up)
+ if (!obd || !obd->obd_attached || !obd->obd_set_up)
continue;
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
@@ -250,9 +248,8 @@ static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
class_incref(obd, __func__, current);
read_unlock(&obd_dev_lock);
- if (obd_health_check(NULL, obd)) {
+ if (obd_health_check(NULL, obd))
healthy = false;
- }
class_decref(obd, __func__, current);
read_lock(&obd_dev_lock);
}
@@ -360,7 +357,7 @@ static int obd_device_list_seq_show(struct seq_file *p, void *v)
struct obd_device *obd = class_num2obd((int)index);
char *status;
- if (obd == NULL)
+ if (!obd)
return 0;
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
@@ -424,7 +421,7 @@ int class_procfs_init(void)
struct dentry *file;
lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
- if (lustre_kobj == NULL)
+ if (!lustre_kobj)
goto out;
/* Create the files associated with this kobject */
@@ -456,8 +453,7 @@ out:
int class_procfs_clean(void)
{
- if (debugfs_lustre_root != NULL)
- debugfs_remove_recursive(debugfs_lustre_root);
+ debugfs_remove_recursive(debugfs_lustre_root);
debugfs_lustre_root = NULL;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 9496c09b2b69..b41b65e2f021 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h>
-#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{
@@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
dst->i_blkbits = ffs(src->o_blksize) - 1;
- if (dst->i_blkbits < PAGE_CACHE_SHIFT)
- dst->i_blkbits = PAGE_CACHE_SHIFT;
+ if (dst->i_blkbits < PAGE_SHIFT)
+ dst->i_blkbits = PAGE_SHIFT;
/* allocation of space */
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 42fc26f4ae25..e6bf414a4444 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -62,8 +62,8 @@ struct static_lustre_uintvalue_attr {
};
static ssize_t static_uintvalue_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
+ struct attribute *attr,
+ char *buf)
{
struct static_lustre_uintvalue_attr *lattr = (void *)attr;
@@ -71,8 +71,8 @@ static ssize_t static_uintvalue_show(struct kobject *kobj,
}
static ssize_t static_uintvalue_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ struct attribute *attr,
+ const char *buffer, size_t count)
{
struct static_lustre_uintvalue_attr *lattr = (void *)attr;
int rc;
@@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
return sprintf(buf, "%ul\n",
- obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
+ obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
}
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
if (rc)
return rc;
- val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
+ val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
if (val > ((totalram_pages / 10) * 9)) {
/* Somebody wants to assign too much memory to dirty pages */
return -EINVAL;
}
- if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+ if (val < 4 << (20 - PAGE_SHIFT)) {
/* Less than 4 Mb for dirty cache is also bad */
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index f956d7ed6785..992573eae1b1 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -76,8 +76,6 @@ static struct llog_handle *llog_alloc_handle(void)
*/
static void llog_free_handle(struct llog_handle *loghandle)
{
- LASSERT(loghandle != NULL);
-
/* failed llog_init_handle */
if (!loghandle->lgh_hdr)
goto out;
@@ -115,7 +113,7 @@ static int llog_read_header(const struct lu_env *env,
if (rc)
return rc;
- if (lop->lop_read_header == NULL)
+ if (!lop->lop_read_header)
return -EOPNOTSUPP;
rc = lop->lop_read_header(env, handle);
@@ -144,7 +142,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
struct llog_log_hdr *llh;
int rc;
- LASSERT(handle->lgh_hdr == NULL);
+ LASSERT(!handle->lgh_hdr);
llh = kzalloc(sizeof(*llh), GFP_NOFS);
if (!llh)
@@ -228,11 +226,11 @@ static int llog_process_thread(void *arg)
return 0;
}
- if (cd != NULL) {
+ if (cd) {
last_called_index = cd->lpcd_first_idx;
index = cd->lpcd_first_idx + 1;
}
- if (cd != NULL && cd->lpcd_last_idx)
+ if (cd && cd->lpcd_last_idx)
last_index = cd->lpcd_last_idx;
else
last_index = LLOG_BITMAP_BYTES * 8 - 1;
@@ -262,7 +260,8 @@ repeat:
/* NB: when rec->lrh_len is accessed it is already swabbed
* since it is used at the "end" of the loop and the rec
- * swabbing is done at the beginning of the loop. */
+ * swabbing is done at the beginning of the loop.
+ */
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
@@ -328,7 +327,7 @@ repeat:
}
out:
- if (cd != NULL)
+ if (cd)
cd->lpcd_last_idx = last_called_index;
kfree(buf);
@@ -366,27 +365,28 @@ int llog_process_or_fork(const struct lu_env *env,
int rc;
lpi = kzalloc(sizeof(*lpi), GFP_NOFS);
- if (!lpi) {
- CERROR("cannot alloc pointer\n");
+ if (!lpi)
return -ENOMEM;
- }
lpi->lpi_loghandle = loghandle;
lpi->lpi_cb = cb;
lpi->lpi_cbdata = data;
lpi->lpi_catdata = catdata;
if (fork) {
+ struct task_struct *task;
+
/* The new thread can't use parent env,
- * init the new one in llog_process_thread_daemonize. */
+ * init the new one in llog_process_thread_daemonize.
+ */
lpi->lpi_env = NULL;
init_completion(&lpi->lpi_completion);
- rc = PTR_ERR(kthread_run(llog_process_thread_daemonize, lpi,
- "llog_process_thread"));
- if (IS_ERR_VALUE(rc)) {
+ task = kthread_run(llog_process_thread_daemonize, lpi,
+ "llog_process_thread");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
CERROR("%s: cannot start thread: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name, rc);
- kfree(lpi);
- return rc;
+ goto out_lpi;
}
wait_for_completion(&lpi->lpi_completion);
} else {
@@ -394,6 +394,7 @@ int llog_process_or_fork(const struct lu_env *env,
llog_process_thread(lpi);
}
rc = lpi->lpi_rc;
+out_lpi:
kfree(lpi);
return rc;
}
@@ -416,13 +417,13 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
LASSERT(ctxt);
LASSERT(ctxt->loc_logops);
- if (ctxt->loc_logops->lop_open == NULL) {
+ if (!ctxt->loc_logops->lop_open) {
*lgh = NULL;
return -EOPNOTSUPP;
}
*lgh = llog_alloc_handle();
- if (*lgh == NULL)
+ if (!*lgh)
return -ENOMEM;
(*lgh)->lgh_ctxt = ctxt;
(*lgh)->lgh_logops = ctxt->loc_logops;
@@ -449,7 +450,7 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
rc = llog_handle2ops(loghandle, &lop);
if (rc)
goto out;
- if (lop->lop_close == NULL) {
+ if (!lop->lop_close) {
rc = -EOPNOTSUPP;
goto out;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index 0f05e9c4a5b2..c27d4ec1df9e 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -69,12 +69,12 @@ static int llog_cat_id2handle(const struct lu_env *env,
struct llog_handle *loghandle;
int rc = 0;
- if (cathandle == NULL)
+ if (!cathandle)
return -EBADF;
down_write(&cathandle->lgh_lock);
list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ u.phd.phd_entry) {
struct llog_logid *cgl = &loghandle->lgh_id;
if (ostid_id(&cgl->lgl_oi) == ostid_id(&logid->lgl_oi) &&
@@ -130,7 +130,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
int rc;
list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
- u.phd.phd_entry) {
+ u.phd.phd_entry) {
/* unlink open-not-created llogs */
list_del_init(&loghandle->u.phd.phd_entry);
llog_close(env, loghandle);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 9bc51998c05c..826623f528da 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -88,7 +88,8 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
spin_unlock(&obd->obd_dev_lock);
/* obd->obd_starting is needed for the case of cleanup
- * in error case while obd is starting up. */
+ * in error case while obd is starting up.
+ */
LASSERTF(obd->obd_starting == 1 ||
obd->obd_stopping == 1 || obd->obd_set_up == 0,
"wrong obd state: %d/%d/%d\n", !!obd->obd_starting,
@@ -110,11 +111,8 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
struct obd_llog_group *olg;
int rc, idx;
- LASSERT(ctxt != NULL);
- LASSERT(ctxt != LP_POISON);
-
olg = ctxt->loc_olg;
- LASSERT(olg != NULL);
+ LASSERT(olg);
LASSERT(olg != LP_POISON);
idx = ctxt->loc_idx;
@@ -151,7 +149,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
if (index < 0 || index >= LLOG_MAX_CTXTS)
return -EINVAL;
- LASSERT(olg != NULL);
+ LASSERT(olg);
ctxt = llog_new_ctxt(obd);
if (!ctxt)
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 3aa7393b20c3..967ba2e1bfcb 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -346,7 +346,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
__swab32s(&lcfg->lcfg_buflens[i]);
print_lustre_cfg(lcfg);
- return;
}
EXPORT_SYMBOL(lustre_swab_lustre_cfg);
@@ -387,7 +386,8 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
*
* Overwrite fields from the end first, so they are not
* clobbered, and use memmove() instead of memcpy() because
- * the source and target buffers overlap. bug 16771 */
+ * the source and target buffers overlap. bug 16771
+ */
createtime = cm32->cm_createtime;
canceltime = cm32->cm_canceltime;
memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32);
@@ -406,7 +406,5 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
__swab64s(&marker->cm_createtime);
__swab64s(&marker->cm_canceltime);
}
-
- return;
}
EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
index 6acc4a10fde9..13aca5b93c6a 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c
@@ -48,14 +48,15 @@ void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount)
int smp_id;
unsigned long flags = 0;
- if (stats == NULL)
+ if (!stats)
return;
LASSERTF(0 <= idx && idx < stats->ls_num,
"idx %d, ls_num %hu\n", idx, stats->ls_num);
/* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always. */
+ * single CPU area, so the smp_id should be 0 always.
+ */
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
if (smp_id < 0)
return;
@@ -96,14 +97,15 @@ void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount)
int smp_id;
unsigned long flags = 0;
- if (stats == NULL)
+ if (!stats)
return;
LASSERTF(0 <= idx && idx < stats->ls_num,
"idx %d, ls_num %hu\n", idx, stats->ls_num);
/* With per-client stats, statistics are allocated only for
- * single CPU area, so the smp_id should be 0 always. */
+ * single CPU area, so the smp_id should be 0 always.
+ */
smp_id = lprocfs_stats_lock(stats, LPROCFS_GET_SMP_ID, &flags);
if (smp_id < 0)
return;
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 51fe15f5d687..d93f42fee420 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -109,7 +109,7 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
__u64 mask = 1;
int i, ret = 0;
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
if (flags & mask)
ret += snprintf(page + ret, count - ret, "%s%s",
ret ? sep : "", obd_connect_names[i]);
@@ -149,10 +149,10 @@ int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
}
/*
* Need to think these cases :
- * 1. #echo x.00 > /proc/xxx output result : x
- * 2. #echo x.0x > /proc/xxx output result : x.0x
- * 3. #echo x.x0 > /proc/xxx output result : x.x
- * 4. #echo x.xx > /proc/xxx output result : x.xx
+ * 1. #echo x.00 > /sys/xxx output result : x
+ * 2. #echo x.0x > /sys/xxx output result : x.0x
+ * 3. #echo x.x0 > /sys/xxx output result : x.x
+ * 4. #echo x.xx > /sys/xxx output result : x.xx
* Only reserved 2 bits fraction.
*/
for (i = 0; i < (5 - prtn); i++)
@@ -199,7 +199,7 @@ int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
if (pbuf == end)
return -EINVAL;
- if (end != NULL && *end == '.') {
+ if (end && *end == '.') {
int temp_val, pow = 1;
int i;
@@ -247,7 +247,7 @@ struct dentry *ldebugfs_add_simple(struct dentry *root,
struct dentry *entry;
umode_t mode = 0;
- if (root == NULL || name == NULL || fops == NULL)
+ if (!root || !name || !fops)
return ERR_PTR(-EINVAL);
if (fops->read)
@@ -256,12 +256,12 @@ struct dentry *ldebugfs_add_simple(struct dentry *root,
mode |= 0200;
entry = debugfs_create_file(name, mode, root, data, fops);
if (IS_ERR_OR_NULL(entry)) {
- CERROR("LprocFS: No memory to create <debugfs> entry %s", name);
+ CERROR("LprocFS: No memory to create <debugfs> entry %s\n", name);
return entry ?: ERR_PTR(-ENOMEM);
}
return entry;
}
-EXPORT_SYMBOL(ldebugfs_add_simple);
+EXPORT_SYMBOL_GPL(ldebugfs_add_simple);
static struct file_operations lprocfs_generic_fops = { };
@@ -272,7 +272,7 @@ int ldebugfs_add_vars(struct dentry *parent,
if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
return -EINVAL;
- while (list->name != NULL) {
+ while (list->name) {
struct dentry *entry;
umode_t mode = 0;
@@ -294,14 +294,14 @@ int ldebugfs_add_vars(struct dentry *parent,
}
return 0;
}
-EXPORT_SYMBOL(ldebugfs_add_vars);
+EXPORT_SYMBOL_GPL(ldebugfs_add_vars);
void ldebugfs_remove(struct dentry **entryp)
{
debugfs_remove_recursive(*entryp);
*entryp = NULL;
}
-EXPORT_SYMBOL(ldebugfs_remove);
+EXPORT_SYMBOL_GPL(ldebugfs_remove);
struct dentry *ldebugfs_register(const char *name,
struct dentry *parent,
@@ -327,7 +327,7 @@ struct dentry *ldebugfs_register(const char *name,
out:
return entry;
}
-EXPORT_SYMBOL(ldebugfs_register);
+EXPORT_SYMBOL_GPL(ldebugfs_register);
/* Generic callbacks */
int lprocfs_rd_uint(struct seq_file *m, void *data)
@@ -491,7 +491,7 @@ int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
char *imp_state_name = NULL;
int rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
@@ -514,7 +514,7 @@ int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
struct ptlrpc_connection *conn;
int rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
@@ -543,7 +543,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
memset(cnt, 0, sizeof(*cnt));
- if (stats == NULL) {
+ if (!stats) {
/* set count to 1 to avoid divide-by-zero errs in callers */
cnt->lc_count = 1;
return;
@@ -554,7 +554,7 @@ void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_entry; i++) {
- if (stats->ls_percpu[i] == NULL)
+ if (!stats->ls_percpu[i])
continue;
percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
@@ -577,7 +577,7 @@ EXPORT_SYMBOL(lprocfs_stats_collect);
#define flag2str(flag, first) \
do { \
if (imp->imp_##flag) \
- seq_printf(m, "%s" #flag, first ? "" : ", "); \
+ seq_printf(m, "%s" #flag, first ? "" : ", "); \
} while (0)
static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
{
@@ -604,16 +604,16 @@ static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep
int i;
bool first = true;
- for (i = 0; obd_connect_names[i] != NULL; i++, mask <<= 1) {
+ for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
if (flags & mask) {
seq_printf(m, "%s%s",
- first ? sep : "", obd_connect_names[i]);
+ first ? sep : "", obd_connect_names[i]);
first = false;
}
}
if (flags & ~(mask - 1))
seq_printf(m, "%sunknown flags %#llx",
- first ? sep : "", flags & ~(mask - 1));
+ first ? sep : "", flags & ~(mask - 1));
}
int lprocfs_rd_import(struct seq_file *m, void *data)
@@ -629,7 +629,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
int rw = 0;
int rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
@@ -637,26 +637,27 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
imp = obd->u.cli.cl_import;
seq_printf(m,
- "import:\n"
- " name: %s\n"
- " target: %s\n"
- " state: %s\n"
- " instance: %u\n"
- " connect_flags: [",
- obd->obd_name,
- obd2cli_tgt(obd),
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_connect_data.ocd_instance);
- obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags, ", ");
+ "import:\n"
+ " name: %s\n"
+ " target: %s\n"
+ " state: %s\n"
+ " instance: %u\n"
+ " connect_flags: [ ",
+ obd->obd_name,
+ obd2cli_tgt(obd),
+ ptlrpc_import_state_name(imp->imp_state),
+ imp->imp_connect_data.ocd_instance);
+ obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
+ ", ");
seq_printf(m,
- "]\n"
- " import_flags: [");
+ " ]\n"
+ " import_flags: [ ");
obd_import_flags2str(imp, m);
seq_printf(m,
- "]\n"
- " connection:\n"
- " failover_nids: [");
+ " ]\n"
+ " connection:\n"
+ " failover_nids: [ ");
spin_lock(&imp->imp_lock);
j = 0;
list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
@@ -665,24 +666,24 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
seq_printf(m, "%s%s", j ? ", " : "", nidstr);
j++;
}
- if (imp->imp_connection != NULL)
+ if (imp->imp_connection)
libcfs_nid2str_r(imp->imp_connection->c_peer.nid,
nidstr, sizeof(nidstr));
else
strncpy(nidstr, "<none>", sizeof(nidstr));
seq_printf(m,
- "]\n"
- " current_connection: %s\n"
- " connection_attempts: %u\n"
- " generation: %u\n"
- " in-progress_invalidations: %u\n",
- nidstr,
- imp->imp_conn_cnt,
- imp->imp_generation,
- atomic_read(&imp->imp_inval_count));
+ " ]\n"
+ " current_connection: %s\n"
+ " connection_attempts: %u\n"
+ " generation: %u\n"
+ " in-progress_invalidations: %u\n",
+ nidstr,
+ imp->imp_conn_cnt,
+ imp->imp_generation,
+ atomic_read(&imp->imp_inval_count));
spin_unlock(&imp->imp_lock);
- if (obd->obd_svc_stats == NULL)
+ if (!obd->obd_svc_stats)
goto out_climp;
header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
@@ -696,15 +697,15 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
} else
ret.lc_sum = 0;
seq_printf(m,
- " rpcs:\n"
- " inflight: %u\n"
- " unregistering: %u\n"
- " timeouts: %u\n"
- " avg_waittime: %llu %s\n",
- atomic_read(&imp->imp_inflight),
- atomic_read(&imp->imp_unregistering),
- atomic_read(&imp->imp_timeouts),
- ret.lc_sum, header->lc_units);
+ " rpcs:\n"
+ " inflight: %u\n"
+ " unregistering: %u\n"
+ " timeouts: %u\n"
+ " avg_waittime: %llu %s\n",
+ atomic_read(&imp->imp_inflight),
+ atomic_read(&imp->imp_unregistering),
+ atomic_read(&imp->imp_timeouts),
+ ret.lc_sum, header->lc_units);
k = 0;
for (j = 0; j < IMP_AT_MAX_PORTALS; j++) {
@@ -714,20 +715,20 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
at_get(&imp->imp_at.iat_service_estimate[j]));
}
seq_printf(m,
- " service_estimates:\n"
- " services: %u sec\n"
- " network: %u sec\n",
- k,
- at_get(&imp->imp_at.iat_net_latency));
+ " service_estimates:\n"
+ " services: %u sec\n"
+ " network: %u sec\n",
+ k,
+ at_get(&imp->imp_at.iat_net_latency));
seq_printf(m,
- " transactions:\n"
- " last_replay: %llu\n"
- " peer_committed: %llu\n"
- " last_checked: %llu\n",
- imp->imp_last_replay_transno,
- imp->imp_peer_committed_transno,
- imp->imp_last_transno_checked);
+ " transactions:\n"
+ " last_replay: %llu\n"
+ " peer_committed: %llu\n"
+ " last_checked: %llu\n",
+ imp->imp_last_replay_transno,
+ imp->imp_peer_committed_transno,
+ imp->imp_last_transno_checked);
/* avg data rates */
for (rw = 0; rw <= 1; rw++) {
@@ -741,10 +742,10 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
do_div(sum, ret.lc_count);
ret.lc_sum = sum;
seq_printf(m,
- " %s_data_averages:\n"
- " bytes_per_rpc: %llu\n",
- rw ? "write" : "read",
- ret.lc_sum);
+ " %s_data_averages:\n"
+ " bytes_per_rpc: %llu\n",
+ rw ? "write" : "read",
+ ret.lc_sum);
}
k = (int)ret.lc_sum;
j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES;
@@ -757,13 +758,13 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
do_div(sum, ret.lc_count);
ret.lc_sum = sum;
seq_printf(m,
- " %s_per_rpc: %llu\n",
- header->lc_units, ret.lc_sum);
+ " %s_per_rpc: %llu\n",
+ header->lc_units, ret.lc_sum);
j = (int)ret.lc_sum;
if (j > 0)
seq_printf(m,
- " MB_per_sec: %u.%.02u\n",
- k / j, (100 * k / j) % 100);
+ " MB_per_sec: %u.%.02u\n",
+ k / j, (100 * k / j) % 100);
}
}
@@ -779,7 +780,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data)
struct obd_import *imp;
int j, k, rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
@@ -787,7 +788,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data)
imp = obd->u.cli.cl_import;
seq_printf(m, "current_state: %s\n",
- ptlrpc_import_state_name(imp->imp_state));
+ ptlrpc_import_state_name(imp->imp_state));
seq_printf(m, "state_history:\n");
k = imp->imp_state_hist_idx;
for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
@@ -795,7 +796,7 @@ int lprocfs_rd_state(struct seq_file *m, void *data)
&imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
if (ish->ish_state == 0)
continue;
- seq_printf(m, " - [%lld, %s]\n", (s64)ish->ish_time,
+ seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time,
ptlrpc_import_state_name(ish->ish_state));
}
@@ -825,7 +826,7 @@ int lprocfs_rd_timeouts(struct seq_file *m, void *data)
struct dhms ts;
int i, rc;
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = lprocfs_climp_check(obd);
if (rc)
return rc;
@@ -942,7 +943,7 @@ int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
return rc;
}
-EXPORT_SYMBOL(lprocfs_obd_setup);
+EXPORT_SYMBOL_GPL(lprocfs_obd_setup);
int lprocfs_obd_cleanup(struct obd_device *obd)
{
@@ -957,7 +958,7 @@ int lprocfs_obd_cleanup(struct obd_device *obd)
return 0;
}
-EXPORT_SYMBOL(lprocfs_obd_cleanup);
+EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup);
int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
{
@@ -967,12 +968,12 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
unsigned long flags = 0;
int i;
- LASSERT(stats->ls_percpu[cpuid] == NULL);
+ LASSERT(!stats->ls_percpu[cpuid]);
LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
percpusize = lprocfs_stats_counter_size(stats);
LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
- if (stats->ls_percpu[cpuid] != NULL) {
+ if (stats->ls_percpu[cpuid]) {
rc = 0;
if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
@@ -1017,7 +1018,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
/* alloc percpu pointers for all possible cpu slots */
LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
- if (stats == NULL)
+ if (!stats)
return NULL;
stats->ls_num = num;
@@ -1027,14 +1028,14 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
/* alloc num of counter headers */
LIBCFS_ALLOC(stats->ls_cnt_header,
stats->ls_num * sizeof(struct lprocfs_counter_header));
- if (stats->ls_cnt_header == NULL)
+ if (!stats->ls_cnt_header)
goto fail;
if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
/* contains only one set counters */
percpusize = lprocfs_stats_counter_size(stats);
LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
- if (stats->ls_percpu[0] == NULL)
+ if (!stats->ls_percpu[0])
goto fail;
stats->ls_biggest_alloc_num = 1;
} else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
@@ -1059,7 +1060,7 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
unsigned int percpusize;
unsigned int i;
- if (stats == NULL || stats->ls_num == 0)
+ if (!stats || stats->ls_num == 0)
return;
*statsh = NULL;
@@ -1070,9 +1071,9 @@ void lprocfs_free_stats(struct lprocfs_stats **statsh)
percpusize = lprocfs_stats_counter_size(stats);
for (i = 0; i < num_entry; i++)
- if (stats->ls_percpu[i] != NULL)
+ if (stats->ls_percpu[i])
LIBCFS_FREE(stats->ls_percpu[i], percpusize);
- if (stats->ls_cnt_header != NULL)
+ if (stats->ls_cnt_header)
LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
sizeof(struct lprocfs_counter_header));
LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
@@ -1090,7 +1091,7 @@ void lprocfs_clear_stats(struct lprocfs_stats *stats)
num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_entry; i++) {
- if (stats->ls_percpu[i] == NULL)
+ if (!stats->ls_percpu[i])
continue;
for (j = 0; j < stats->ls_num; j++) {
percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
@@ -1196,7 +1197,7 @@ static int lprocfs_stats_seq_open(struct inode *inode, struct file *file)
return 0;
}
-struct file_operations lprocfs_stats_seq_fops = {
+static const struct file_operations lprocfs_stats_seq_fops = {
.owner = THIS_MODULE,
.open = lprocfs_stats_seq_open,
.read = seq_read,
@@ -1206,7 +1207,7 @@ struct file_operations lprocfs_stats_seq_fops = {
};
int ldebugfs_register_stats(struct dentry *parent, const char *name,
- struct lprocfs_stats *stats)
+ struct lprocfs_stats *stats)
{
struct dentry *entry;
@@ -1219,7 +1220,7 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name,
return 0;
}
-EXPORT_SYMBOL(ldebugfs_register_stats);
+EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
unsigned conf, const char *name, const char *units)
@@ -1230,10 +1231,8 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
unsigned int i;
unsigned int num_cpu;
- LASSERT(stats != NULL);
-
header = &stats->ls_cnt_header[index];
- LASSERTF(header != NULL, "Failed to allocate stats header:[%d]%s/%s\n",
+ LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n",
index, name, units);
header->lc_config = conf;
@@ -1242,7 +1241,7 @@ void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
for (i = 0; i < num_cpu; ++i) {
- if (stats->ls_percpu[i] == NULL)
+ if (!stats->ls_percpu[i])
continue;
percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
percpu_cntr->lc_count = 0;
@@ -1270,7 +1269,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
{
__s64 ret = 0;
- if (lc == NULL || header == NULL)
+ if (!lc || !header)
return 0;
switch (field) {
@@ -1319,8 +1318,8 @@ int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count,
}
EXPORT_SYMBOL(lprocfs_write_u64_helper);
-int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
- __u64 *val, int mult)
+int lprocfs_write_frac_u64_helper(const char __user *buffer,
+ unsigned long count, __u64 *val, int mult)
{
char kernbuf[22], *end, *pbuf;
__u64 whole, frac = 0, units;
@@ -1360,17 +1359,19 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count,
}
units = 1;
- switch (tolower(*end)) {
- case 'p':
- units <<= 10;
- case 't':
- units <<= 10;
- case 'g':
- units <<= 10;
- case 'm':
- units <<= 10;
- case 'k':
- units <<= 10;
+ if (end) {
+ switch (tolower(*end)) {
+ case 'p':
+ units <<= 10;
+ case 't':
+ units <<= 10;
+ case 'g':
+ units <<= 10;
+ case 'm':
+ units <<= 10;
+ case 'k':
+ units <<= 10;
+ }
}
/* Specified units override the multiplier */
if (units > 1)
@@ -1412,7 +1413,7 @@ char *lprocfs_find_named_value(const char *buffer, const char *name,
/* there is no strnstr() in rhel5 and ubuntu kernels */
val = lprocfs_strnstr(buffer, name, buflen);
- if (val == NULL)
+ if (!val)
return (char *)buffer;
val += strlen(name); /* skip prefix */
@@ -1429,11 +1430,9 @@ char *lprocfs_find_named_value(const char *buffer, const char *name,
}
EXPORT_SYMBOL(lprocfs_find_named_value);
-int ldebugfs_seq_create(struct dentry *parent,
- const char *name,
- umode_t mode,
- const struct file_operations *seq_fops,
- void *data)
+int ldebugfs_seq_create(struct dentry *parent, const char *name,
+ umode_t mode, const struct file_operations *seq_fops,
+ void *data)
{
struct dentry *entry;
@@ -1446,7 +1445,7 @@ int ldebugfs_seq_create(struct dentry *parent,
return 0;
}
-EXPORT_SYMBOL(ldebugfs_seq_create);
+EXPORT_SYMBOL_GPL(ldebugfs_seq_create);
int ldebugfs_obd_seq_create(struct obd_device *dev,
const char *name,
@@ -1457,7 +1456,7 @@ int ldebugfs_obd_seq_create(struct obd_device *dev,
return ldebugfs_seq_create(dev->obd_debugfs_entry, name,
mode, seq_fops, data);
}
-EXPORT_SYMBOL(ldebugfs_obd_seq_create);
+EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create);
void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
{
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index ce248f4072c2..978568ada8e9 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -86,13 +86,12 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
*/
fid = lu_object_fid(o);
if (fid_is_zero(fid)) {
- LASSERT(top->loh_hash.next == NULL
- && top->loh_hash.pprev == NULL);
+ LASSERT(!top->loh_hash.next && !top->loh_hash.pprev);
LASSERT(list_empty(&top->loh_lru));
if (!atomic_dec_and_test(&top->loh_ref))
return;
list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release != NULL)
+ if (o->lo_ops->loo_object_release)
o->lo_ops->loo_object_release(env, o);
}
lu_object_free(env, orig);
@@ -119,7 +118,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
* layers, and notify them that object is no longer busy.
*/
list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
- if (o->lo_ops->loo_object_release != NULL)
+ if (o->lo_ops->loo_object_release)
o->lo_ops->loo_object_release(env, o);
}
@@ -135,7 +134,7 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
}
/*
- * If object is dying (will not be cached), removed it
+ * If object is dying (will not be cached), then removed it
* from hash table and LRU.
*
* This is done with hash table and LRU lists locked. As the only
@@ -210,7 +209,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
* lu_object_header.
*/
top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
- if (top == NULL)
+ if (!top)
return ERR_PTR(-ENOMEM);
if (IS_ERR(top))
return top;
@@ -245,7 +244,7 @@ next:
} while (!clean);
list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_start != NULL) {
+ if (scan->lo_ops->loo_object_start) {
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
lu_object_free(env, top);
@@ -276,7 +275,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
* First call ->loo_object_delete() method to release all resources.
*/
list_for_each_entry_reverse(scan, layers, lo_linkage) {
- if (scan->lo_ops->loo_object_delete != NULL)
+ if (scan->lo_ops->loo_object_delete)
scan->lo_ops->loo_object_delete(env, scan);
}
@@ -296,7 +295,6 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
*/
o = container_of0(splice.prev, struct lu_object, lo_linkage);
list_del_init(&o->lo_linkage);
- LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
@@ -451,7 +449,6 @@ int lu_cdebug_printer(const struct lu_env *env,
va_start(args, format);
key = lu_context_key_get(&env->le_ctx, &lu_global_key);
- LASSERT(key != NULL);
used = strlen(key->lck_area);
complete = format[strlen(format) - 1] == '\n';
@@ -462,7 +459,7 @@ int lu_cdebug_printer(const struct lu_env *env,
ARRAY_SIZE(key->lck_area) - used, format, args);
if (complete) {
if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
- libcfs_debug_msg(msgdata, "%s", key->lck_area);
+ libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
key->lck_area[0] = 0;
}
va_end(args);
@@ -508,7 +505,7 @@ void lu_object_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
o->lo_dev->ld_type->ldt_name, o);
- if (o->lo_ops->loo_object_print != NULL)
+ if (o->lo_ops->loo_object_print)
(*o->lo_ops->loo_object_print)(env, cookie, printer, o);
(*printer)(env, cookie, "\n");
@@ -535,9 +532,10 @@ static struct lu_object *htable_lookup(struct lu_site *s,
*version = ver;
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
/* cfs_hash_bd_peek_locked is a somehow "internal" function
- * of cfs_hash, it doesn't add refcount on object. */
+ * of cfs_hash, it doesn't add refcount on object.
+ */
hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
- if (hnode == NULL) {
+ if (!hnode) {
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
return ERR_PTR(-ENOENT);
}
@@ -636,7 +634,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
* If dying object is found during index search, add @waiter to the
* site wait-queue and return ERR_PTR(-EAGAIN).
*/
- if (conf != NULL && conf->loc_flags & LOC_F_NEW)
+ if (conf && conf->loc_flags & LOC_F_NEW)
return lu_object_new(env, dev, f, conf);
s = dev->ld_site;
@@ -715,7 +713,7 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
top = lu_object_find(env, dev, f, conf);
if (!IS_ERR(top)) {
obj = lu_object_locate(top->lo_header, dev->ld_type);
- if (obj == NULL)
+ if (!obj)
lu_object_put(env, top);
} else
obj = top;
@@ -842,8 +840,8 @@ static int lu_htable_order(void)
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
- if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
- cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
+ if (cache_size > 1 << (30 - PAGE_SHIFT))
+ cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
#endif
/* clear off unreasonable cache setting. */
@@ -855,7 +853,7 @@ static int lu_htable_order(void)
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
}
cache_size = cache_size / 100 * lu_cache_percent *
- (PAGE_CACHE_SIZE / 1024);
+ (PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;
@@ -966,11 +964,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
CFS_HASH_NO_ITEMREF |
CFS_HASH_DEPTH |
CFS_HASH_ASSERT_EMPTY);
- if (s->ls_obj_hash != NULL)
+ if (s->ls_obj_hash)
break;
}
- if (s->ls_obj_hash == NULL) {
+ if (!s->ls_obj_hash) {
CERROR("failed to create lu_site hash with bits: %d\n", bits);
return -ENOMEM;
}
@@ -982,7 +980,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
}
s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
- if (s->ls_stats == NULL) {
+ if (!s->ls_stats) {
cfs_hash_putref(s->ls_obj_hash);
s->ls_obj_hash = NULL;
return -ENOMEM;
@@ -1031,19 +1029,19 @@ void lu_site_fini(struct lu_site *s)
list_del_init(&s->ls_linkage);
mutex_unlock(&lu_sites_guard);
- if (s->ls_obj_hash != NULL) {
+ if (s->ls_obj_hash) {
cfs_hash_putref(s->ls_obj_hash);
s->ls_obj_hash = NULL;
}
- if (s->ls_top_dev != NULL) {
+ if (s->ls_top_dev) {
s->ls_top_dev->ld_site = NULL;
lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
lu_device_put(s->ls_top_dev);
s->ls_top_dev = NULL;
}
- if (s->ls_stats != NULL)
+ if (s->ls_stats)
lprocfs_free_stats(&s->ls_stats);
}
EXPORT_SYMBOL(lu_site_fini);
@@ -1088,7 +1086,7 @@ EXPORT_SYMBOL(lu_device_put);
*/
int lu_device_init(struct lu_device *d, struct lu_device_type *t)
{
- if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
+ if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start)
t->ldt_ops->ldto_start(t);
memset(d, 0, sizeof(*d));
atomic_set(&d->ld_ref, 0);
@@ -1107,7 +1105,7 @@ void lu_device_fini(struct lu_device *d)
struct lu_device_type *t;
t = d->ld_type;
- if (d->ld_obd != NULL) {
+ if (d->ld_obd) {
d->ld_obd->obd_lu_dev = NULL;
d->ld_obd = NULL;
}
@@ -1116,7 +1114,7 @@ void lu_device_fini(struct lu_device *d)
LASSERTF(atomic_read(&d->ld_ref) == 0,
"Refcount is %u\n", atomic_read(&d->ld_ref));
LASSERT(t->ldt_device_nr > 0);
- if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
+ if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop)
t->ldt_ops->ldto_stop(t);
}
EXPORT_SYMBOL(lu_device_fini);
@@ -1148,7 +1146,7 @@ void lu_object_fini(struct lu_object *o)
LASSERT(list_empty(&o->lo_linkage));
- if (dev != NULL) {
+ if (dev) {
lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
"lu_object", o);
lu_device_put(dev);
@@ -1239,7 +1237,7 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
struct lu_device *next;
lu_site_purge(env, site, ~0);
- for (scan = top; scan != NULL; scan = next) {
+ for (scan = top; scan; scan = next) {
next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
lu_device_put(scan);
@@ -1248,13 +1246,13 @@ void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
/* purge again. */
lu_site_purge(env, site, ~0);
- for (scan = top; scan != NULL; scan = next) {
+ for (scan = top; scan; scan = next) {
const struct lu_device_type *ldt = scan->ld_type;
struct obd_type *type;
next = ldt->ldt_ops->ldto_device_free(env, scan);
type = ldt->ldt_obd_type;
- if (type != NULL) {
+ if (type) {
type->typ_refcnt--;
class_put_type(type);
}
@@ -1289,14 +1287,14 @@ int lu_context_key_register(struct lu_context_key *key)
int result;
int i;
- LASSERT(key->lct_init != NULL);
- LASSERT(key->lct_fini != NULL);
+ LASSERT(key->lct_init);
+ LASSERT(key->lct_fini);
LASSERT(key->lct_tags != 0);
result = -ENFILE;
spin_lock(&lu_keys_guard);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- if (lu_keys[i] == NULL) {
+ if (!lu_keys[i]) {
key->lct_index = i;
atomic_set(&key->lct_used, 1);
lu_keys[i] = key;
@@ -1313,12 +1311,10 @@ EXPORT_SYMBOL(lu_context_key_register);
static void key_fini(struct lu_context *ctx, int index)
{
- if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
+ if (ctx->lc_value && ctx->lc_value[index]) {
struct lu_context_key *key;
key = lu_keys[index];
- LASSERT(key != NULL);
- LASSERT(key->lct_fini != NULL);
LASSERT(atomic_read(&key->lct_used) > 1);
key->lct_fini(ctx, key, ctx->lc_value[index]);
@@ -1376,7 +1372,7 @@ int lu_context_key_register_many(struct lu_context_key *k, ...)
if (result)
break;
key = va_arg(args, struct lu_context_key *);
- } while (key != NULL);
+ } while (key);
va_end(args);
if (result != 0) {
@@ -1404,7 +1400,7 @@ void lu_context_key_degister_many(struct lu_context_key *k, ...)
do {
lu_context_key_degister(k);
k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
+ } while (k);
va_end(args);
}
EXPORT_SYMBOL(lu_context_key_degister_many);
@@ -1420,7 +1416,7 @@ void lu_context_key_revive_many(struct lu_context_key *k, ...)
do {
lu_context_key_revive(k);
k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
+ } while (k);
va_end(args);
}
EXPORT_SYMBOL(lu_context_key_revive_many);
@@ -1436,7 +1432,7 @@ void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
do {
lu_context_key_quiesce(k);
k = va_arg(args, struct lu_context_key*);
- } while (k != NULL);
+ } while (k);
va_end(args);
}
EXPORT_SYMBOL(lu_context_key_quiesce_many);
@@ -1477,8 +1473,7 @@ void lu_context_key_quiesce(struct lu_context_key *key)
* XXX memory barrier has to go here.
*/
spin_lock(&lu_keys_guard);
- list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
+ list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
key_fini(ctx, key->lct_index);
spin_unlock(&lu_keys_guard);
++key_set_version;
@@ -1497,7 +1492,7 @@ static void keys_fini(struct lu_context *ctx)
{
int i;
- if (ctx->lc_value == NULL)
+ if (!ctx->lc_value)
return;
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
@@ -1511,12 +1506,12 @@ static int keys_fill(struct lu_context *ctx)
{
int i;
- LINVRNT(ctx->lc_value != NULL);
+ LINVRNT(ctx->lc_value);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
struct lu_context_key *key;
key = lu_keys[i];
- if (ctx->lc_value[i] == NULL && key != NULL &&
+ if (!ctx->lc_value[i] && key &&
(key->lct_tags & ctx->lc_tags) &&
/*
* Don't create values for a LCT_QUIESCENT key, as this
@@ -1525,7 +1520,7 @@ static int keys_fill(struct lu_context *ctx)
!(key->lct_tags & LCT_QUIESCENT)) {
void *value;
- LINVRNT(key->lct_init != NULL);
+ LINVRNT(key->lct_init);
LINVRNT(key->lct_index == i);
value = key->lct_init(ctx, key);
@@ -1542,7 +1537,7 @@ static int keys_fill(struct lu_context *ctx)
* value.
*/
ctx->lc_value[i] = value;
- if (key->lct_exit != NULL)
+ if (key->lct_exit)
ctx->lc_tags |= LCT_HAS_EXIT;
}
ctx->lc_version = key_set_version;
@@ -1554,7 +1549,7 @@ static int keys_init(struct lu_context *ctx)
{
ctx->lc_value = kcalloc(ARRAY_SIZE(lu_keys), sizeof(ctx->lc_value[0]),
GFP_NOFS);
- if (likely(ctx->lc_value != NULL))
+ if (likely(ctx->lc_value))
return keys_fill(ctx);
return -ENOMEM;
@@ -1626,14 +1621,13 @@ void lu_context_exit(struct lu_context *ctx)
LINVRNT(ctx->lc_state == LCS_ENTERED);
ctx->lc_state = LCS_LEFT;
- if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
+ if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
- if (ctx->lc_value[i] != NULL) {
+ if (ctx->lc_value[i]) {
struct lu_context_key *key;
key = lu_keys[i];
- LASSERT(key != NULL);
- if (key->lct_exit != NULL)
+ if (key->lct_exit)
key->lct_exit(ctx,
key, ctx->lc_value[i]);
}
@@ -1688,7 +1682,7 @@ int lu_env_refill(struct lu_env *env)
int result;
result = lu_context_refill(&env->le_ctx);
- if (result == 0 && env->le_ses != NULL)
+ if (result == 0 && env->le_ses)
result = lu_context_refill(env->le_ses);
return result;
}
@@ -1922,11 +1916,11 @@ int lu_kmem_init(struct lu_kmem_descr *caches)
int result;
struct lu_kmem_descr *iter = caches;
- for (result = 0; iter->ckd_cache != NULL; ++iter) {
+ for (result = 0; iter->ckd_cache; ++iter) {
*iter->ckd_cache = kmem_cache_create(iter->ckd_name,
iter->ckd_size,
0, 0, NULL);
- if (*iter->ckd_cache == NULL) {
+ if (!*iter->ckd_cache) {
result = -ENOMEM;
/* free all previously allocated caches */
lu_kmem_fini(caches);
@@ -1943,7 +1937,7 @@ EXPORT_SYMBOL(lu_kmem_init);
*/
void lu_kmem_fini(struct lu_kmem_descr *caches)
{
- for (; caches->ckd_cache != NULL; ++caches) {
+ for (; caches->ckd_cache; ++caches) {
kmem_cache_destroy(*caches->ckd_cache);
*caches->ckd_cache = NULL;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index fb9147cc607f..403ceea06186 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -65,7 +65,7 @@ void class_handle_hash(struct portals_handle *h,
{
struct handle_bucket *bucket;
- LASSERT(h != NULL);
+ LASSERT(h);
LASSERT(list_empty(&h->h_link));
/*
@@ -140,10 +140,11 @@ void *class_handle2object(__u64 cookie)
struct portals_handle *h;
void *retval = NULL;
- LASSERT(handle_hash != NULL);
+ LASSERT(handle_hash);
/* Be careful when you want to change this code. See the
- * rcu_read_lock() definition on top this file. - jxiong */
+ * rcu_read_lock() definition on top this file. - jxiong
+ */
bucket = handle_hash + (cookie & HANDLE_HASH_MASK);
rcu_read_lock();
@@ -170,7 +171,7 @@ void class_handle_free_cb(struct rcu_head *rcu)
struct portals_handle *h = RCU2HANDLE(rcu);
void *ptr = (void *)(unsigned long)h->h_cookie;
- if (h->h_ops->hop_free != NULL)
+ if (h->h_ops->hop_free)
h->h_ops->hop_free(ptr, h->h_size);
else
kfree(ptr);
@@ -183,11 +184,11 @@ int class_handle_init(void)
struct timespec64 ts;
int seed[2];
- LASSERT(handle_hash == NULL);
+ LASSERT(!handle_hash);
handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE,
GFP_NOFS);
- if (handle_hash == NULL)
+ if (!handle_hash)
return -ENOMEM;
spin_lock_init(&handle_base_lock);
@@ -234,7 +235,7 @@ void class_handle_cleanup(void)
{
int count;
- LASSERT(handle_hash != NULL);
+ LASSERT(handle_hash);
count = cleanup_all_handles();
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index d6184f821cd0..5f812460b3ea 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -93,7 +93,8 @@ int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
EXPORT_SYMBOL(lustre_uuid_to_peer);
/* Add a nid to a niduuid. Multiple nids can be added to a single uuid;
- LNET will choose the best one. */
+ * LNET will choose the best one.
+ */
int class_add_uuid(const char *uuid, __u64 nid)
{
struct uuid_nid_data *data, *entry;
@@ -149,9 +150,10 @@ int class_del_uuid(const char *uuid)
{
LIST_HEAD(deathrow);
struct uuid_nid_data *data;
+ struct uuid_nid_data *temp;
spin_lock(&g_uuid_lock);
- if (uuid != NULL) {
+ if (uuid) {
struct obd_uuid tmp;
obd_str2uuid(&tmp, uuid);
@@ -165,14 +167,12 @@ int class_del_uuid(const char *uuid)
list_splice_init(&g_uuid_list, &deathrow);
spin_unlock(&g_uuid_lock);
- if (uuid != NULL && list_empty(&deathrow)) {
+ if (uuid && list_empty(&deathrow)) {
CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
return -EINVAL;
}
- while (!list_empty(&deathrow)) {
- data = list_entry(deathrow.next, struct uuid_nid_data,
- un_list);
+ list_for_each_entry_safe(data, temp, &deathrow, un_list) {
list_del(&data->un_list);
CDEBUG(D_INFO, "del uuid %s %s/%d\n",
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 49cdc647910c..5395e994deab 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -71,8 +71,9 @@ int class_find_param(char *buf, char *key, char **valp)
EXPORT_SYMBOL(class_find_param);
/* returns 0 if this is the first key in the buffer, else 1.
- valp points to first char after key. */
-static int class_match_param(char *buf, char *key, char **valp)
+ * valp points to first char after key.
+ */
+static int class_match_param(char *buf, const char *key, char **valp)
{
if (!buf)
return 1;
@@ -114,9 +115,10 @@ enum {
};
/* 0 is good nid,
- 1 not found
- < 0 error
- endh is set to next separator */
+ * 1 not found
+ * < 0 error
+ * endh is set to next separator
+ */
static int class_parse_value(char *buf, int opc, void *value, char **endh,
int quiet)
{
@@ -210,7 +212,7 @@ static int class_attach(struct lustre_cfg *lcfg)
name, typename, rc);
goto out;
}
- LASSERTF(obd != NULL, "Cannot get obd device %s of type %s\n",
+ LASSERTF(obd, "Cannot get obd device %s of type %s\n",
name, typename);
LASSERTF(obd->obd_magic == OBD_DEVICE_MAGIC,
"obd %p obd_magic %08X != %08X\n",
@@ -230,7 +232,8 @@ static int class_attach(struct lustre_cfg *lcfg)
mutex_init(&obd->obd_dev_mutex);
spin_lock_init(&obd->obd_osfs_lock);
/* obd->obd_osfs_age must be set to a value in the distant
- * past to guarantee a fresh statfs is fetched on mount. */
+ * past to guarantee a fresh statfs is fetched on mount.
+ */
obd->obd_osfs_age = cfs_time_shift_64(-1000);
/* XXX belongs in setup not attach */
@@ -272,9 +275,9 @@ static int class_attach(struct lustre_cfg *lcfg)
obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
return 0;
out:
- if (obd != NULL) {
+ if (obd)
class_release_dev(obd);
- }
+
return rc;
}
@@ -286,7 +289,7 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
int err = 0;
struct obd_export *exp;
- LASSERT(obd != NULL);
+ LASSERT(obd);
LASSERTF(obd == class_num2obd(obd->obd_minor),
"obd %p != obd_devs[%d] %p\n",
obd, obd->obd_minor, class_num2obd(obd->obd_minor));
@@ -315,7 +318,8 @@ static int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
return -EEXIST;
}
/* just leave this on forever. I can't use obd_set_up here because
- other fns check that status, and we're not actually set up yet. */
+ * other fns check that status, and we're not actually set up yet.
+ */
obd->obd_starting = 1;
obd->obd_uuid_hash = NULL;
spin_unlock(&obd->obd_dev_lock);
@@ -503,7 +507,8 @@ void class_decref(struct obd_device *obd, const char *scope, const void *source)
if ((refs == 1) && obd->obd_stopping) {
/* All exports have been destroyed; there should
- be no more in-progress ops by this point.*/
+ * be no more in-progress ops by this point.
+ */
spin_lock(&obd->obd_self_export->exp_lock);
obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
@@ -723,7 +728,8 @@ static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
}
/* We can't call ll_process_config or lquota_process_config directly because
- * it lives in a module that must be loaded after this one. */
+ * it lives in a module that must be loaded after this one.
+ */
static int (*client_process_config)(struct lustre_cfg *lcfg);
static int (*quota_process_config)(struct lustre_cfg *lcfg);
@@ -812,7 +818,8 @@ int class_process_config(struct lustre_cfg *lcfg)
lustre_cfg_string(lcfg, 2),
lustre_cfg_string(lcfg, 3));
/* set these mount options somewhere, so ll_fill_super
- * can find them. */
+ * can find them.
+ */
err = class_add_profile(LUSTRE_CFG_BUFLEN(lcfg, 1),
lustre_cfg_string(lcfg, 1),
LUSTRE_CFG_BUFLEN(lcfg, 2),
@@ -988,8 +995,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
fakefile.private_data = &fake_seqfile;
fake_seqfile.private = data;
/* e.g. tunefs.lustre --param mdt.group_upcall=foo /r/tmp/lustre-mdt
- or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
- or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36 */
+ * or lctl conf_param lustre-MDT0000.mdt.group_upcall=bar
+ * or lctl conf_param lustre-OST0000.osc.max_dirty_mb=36
+ */
for (i = 1; i < lcfg->lcfg_bufcount; i++) {
key = lustre_cfg_buf(lcfg, i);
/* Strip off prefix */
@@ -1008,7 +1016,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
/* Search proc entries */
while (lvars[j].name) {
var = &lvars[j];
- if (class_match_param(key, (char *)var->name, NULL) == 0
+ if (!class_match_param(key, var->name, NULL)
&& keylen == strlen(var->name)) {
matched++;
rc = -EROFS;
@@ -1027,9 +1035,10 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
}
if (!matched) {
/* If the prefix doesn't match, return error so we
- can pass it down the stack */
+ * can pass it down the stack
+ */
if (strnchr(key, keylen, '.'))
- return -ENOSYS;
+ return -ENOSYS;
CERROR("%s: unknown param %s\n",
(char *)lustre_cfg_string(lcfg, 0), key);
/* rc = -EINVAL; continue parsing other params */
@@ -1040,9 +1049,9 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
rc = 0;
} else {
CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
- lustre_cfg_string(lcfg, 0),
- (int)strlen(prefix) - 1, prefix,
- (int)(sval - key - 1), key, sval);
+ lustre_cfg_string(lcfg, 0),
+ (int)strlen(prefix) - 1, prefix,
+ (int)(sval - key - 1), key, sval);
}
}
@@ -1116,7 +1125,8 @@ int class_config_llog_handler(const struct lu_env *env,
}
}
/* A config command without a start marker before it is
- illegal (post 146) */
+ * illegal (post 146)
+ */
if (!(clli->cfg_flags & CFG_F_COMPAT146) &&
!(clli->cfg_flags & CFG_F_MARKER) &&
(lcfg->lcfg_command != LCFG_MARKER)) {
@@ -1182,8 +1192,9 @@ int class_config_llog_handler(const struct lu_env *env,
}
/* we override the llog's uuid for clients, to insure they
- are unique */
- if (clli && clli->cfg_instance != NULL &&
+ * are unique
+ */
+ if (clli && clli->cfg_instance &&
lcfg->lcfg_command == LCFG_ATTACH) {
lustre_cfg_bufs_set_string(&bufs, 2,
clli->cfg_uuid.uuid);
@@ -1211,7 +1222,8 @@ int class_config_llog_handler(const struct lu_env *env,
lcfg_new->lcfg_flags = lcfg->lcfg_flags;
/* XXX Hack to try to remain binary compatible with
- * pre-newconfig logs */
+ * pre-newconfig logs
+ */
if (lcfg->lcfg_nal != 0 && /* pre-newconfig log? */
(lcfg->lcfg_nid >> 32) == 0) {
__u32 addr = (__u32)(lcfg->lcfg_nid & 0xffffffff);
@@ -1270,7 +1282,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
if (cfg) {
cd.lpcd_first_idx = cfg->cfg_last_idx;
callback = cfg->cfg_callback;
- LASSERT(callback != NULL);
+ LASSERT(callback);
} else {
callback = class_config_llog_handler;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index b5aa8168dbff..d3e28a389ac1 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -72,7 +72,7 @@ static void (*kill_super_cb)(struct super_block *sb);
* this log, and is added to the mgc's list of logs to follow.
*/
int lustre_process_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
+ struct config_llog_instance *cfg)
{
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs *bufs;
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(lustre_process_log);
/* Stop watching this config log for updates */
int lustre_end_log(struct super_block *sb, char *logname,
- struct config_llog_instance *cfg)
+ struct config_llog_instance *cfg)
{
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs bufs;
@@ -283,9 +283,10 @@ int lustre_start_mgc(struct super_block *sb)
recov_bk = 0;
/* Try all connections, but only once (again).
- We don't want to block another target from starting
- (using its local copy of the log), but we do want to connect
- if at all possible. */
+ * We don't want to block another target from starting
+ * (using its local copy of the log), but we do want to connect
+ * if at all possible.
+ */
recov_bk++;
CDEBUG(D_MOUNT, "%s: Set MGC reconnect %d\n", mgcname,
recov_bk);
@@ -339,7 +340,7 @@ int lustre_start_mgc(struct super_block *sb)
/* Add any failover MGS nids */
i = 1;
while (ptr && ((*ptr == ':' ||
- class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) {
+ class_find_param(ptr, PARAM_MGSNODE, &ptr) == 0))) {
/* New failover node */
sprintf(niduuid, "%s_%x", mgcname, i);
j = 0;
@@ -375,7 +376,8 @@ int lustre_start_mgc(struct super_block *sb)
goto out_free;
/* Keep a refcount of servers/clients who started with "mount",
- so we know when we can get rid of the mgc. */
+ * so we know when we can get rid of the mgc.
+ */
atomic_set(&obd->u.cli.cl_mgc_refcount, 1);
/* We connect to the MGS at setup, and don't disconnect until cleanup */
@@ -403,7 +405,8 @@ int lustre_start_mgc(struct super_block *sb)
out:
/* Keep the mgc info in the sb. Note that many lsi's can point
- to the same mgc.*/
+ * to the same mgc.
+ */
lsi->lsi_mgc = obd;
out_free:
mutex_unlock(&mgc_start_lock);
@@ -432,7 +435,8 @@ static int lustre_stop_mgc(struct super_block *sb)
LASSERT(atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
if (!atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
- will call in here. */
+ * will call in here.
+ */
CDEBUG(D_MOUNT, "mgc still has %d references.\n",
atomic_read(&obd->u.cli.cl_mgc_refcount));
rc = -EBUSY;
@@ -440,19 +444,20 @@ static int lustre_stop_mgc(struct super_block *sb)
}
/* The MGC has no recoverable data in any case.
- * force shutdown set in umount_begin */
+ * force shutdown set in umount_begin
+ */
obd->obd_no_recov = 1;
if (obd->u.cli.cl_mgc_mgsexp) {
/* An error is not fatal, if we are unable to send the
- disconnect mgs ping evictor cleans up the export */
+ * disconnect mgs ping evictor cleans up the export
+ */
rc = obd_disconnect(obd->u.cli.cl_mgc_mgsexp);
if (rc)
CDEBUG(D_MOUNT, "disconnect failed %d\n", rc);
}
- /* Save the obdname for cleaning the nid uuids, which are
- obdname_XX */
+ /* Save the obdname for cleaning the nid uuids, which are obdname_XX */
len = strlen(obd->obd_name) + 6;
niduuid = kzalloc(len, GFP_NOFS);
if (niduuid) {
@@ -518,13 +523,12 @@ static int lustre_free_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- LASSERT(lsi != NULL);
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
/* someone didn't call server_put_mount. */
LASSERT(atomic_read(&lsi->lsi_mounts) == 0);
- if (lsi->lsi_lmd != NULL) {
+ if (lsi->lsi_lmd) {
kfree(lsi->lsi_lmd->lmd_dev);
kfree(lsi->lsi_lmd->lmd_profile);
kfree(lsi->lsi_lmd->lmd_mgssec);
@@ -538,7 +542,7 @@ static int lustre_free_lsi(struct super_block *sb)
kfree(lsi->lsi_lmd);
}
- LASSERT(lsi->lsi_llsbi == NULL);
+ LASSERT(!lsi->lsi_llsbi);
kfree(lsi);
s2lsi_nocast(sb) = NULL;
@@ -546,13 +550,12 @@ static int lustre_free_lsi(struct super_block *sb)
}
/* The lsi has one reference for every server that is using the disk -
- e.g. MDT, MGS, and potentially MGC */
+ * e.g. MDT, MGS, and potentially MGC
+ */
static int lustre_put_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- LASSERT(lsi != NULL);
-
CDEBUG(D_MOUNT, "put %p %d\n", sb, atomic_read(&lsi->lsi_mounts));
if (atomic_dec_and_test(&lsi->lsi_mounts)) {
lustre_free_lsi(sb);
@@ -588,21 +591,22 @@ static int server_name2fsname(const char *svname, char *fsname,
if (dash == svname)
return -EINVAL;
- if (fsname != NULL) {
+ if (fsname) {
strncpy(fsname, svname, dash - svname);
fsname[dash - svname] = '\0';
}
- if (endptr != NULL)
+ if (endptr)
*endptr = dash;
return 0;
}
/* Get the index from the obd name.
- rc = server type, or
- rc < 0 on error
- if endptr isn't NULL it is set to end of name */
+ * rc = server type, or
+ * rc < 0 on error
+ * if endptr isn't NULL it is set to end of name
+ */
static int server_name2index(const char *svname, __u32 *idx,
const char **endptr)
{
@@ -627,18 +631,18 @@ static int server_name2index(const char *svname, __u32 *idx,
dash += 3;
if (strncmp(dash, "all", 3) == 0) {
- if (endptr != NULL)
+ if (endptr)
*endptr = dash + 3;
return rc | LDD_F_SV_ALL;
}
index = simple_strtoul(dash, (char **)endptr, 16);
- if (idx != NULL)
+ if (idx)
*idx = index;
/* Account for -mdc after index that is possible when specifying mdt */
- if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
- sizeof(LUSTRE_MDC_NAME)-1) == 0)
+ if (endptr && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
+ sizeof(LUSTRE_MDC_NAME) - 1) == 0)
*endptr += sizeof(LUSTRE_MDC_NAME);
return rc;
@@ -661,7 +665,8 @@ int lustre_common_put_super(struct super_block *sb)
return rc;
}
/* BUSY just means that there's some other obd that
- needs the mgc. Let him clean it up. */
+ * needs the mgc. Let him clean it up.
+ */
CDEBUG(D_MOUNT, "MGC still in use\n");
}
/* Drop a ref to the mounted disk */
@@ -731,8 +736,9 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
int rc = 0, devmax;
/* The shortest an ost name can be is 8 chars: -OST0000.
- We don't actually know the fsname at this time, so in fact
- a user could specify any fsname. */
+ * We don't actually know the fsname at this time, so in fact
+ * a user could specify any fsname.
+ */
devmax = strlen(ptr) / 8 + 1;
/* temp storage until we figure out how many we have */
@@ -756,7 +762,8 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
(uint)(s2-s1), s1, rc);
s1 = s2;
/* now we are pointing at ':' (next exclude)
- or ',' (end of excludes) */
+ * or ',' (end of excludes)
+ */
if (lmd->lmd_exclude_count >= devmax)
break;
}
@@ -788,7 +795,7 @@ static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
lmd->lmd_mgssec = NULL;
tail = strchr(ptr, ',');
- if (tail == NULL)
+ if (!tail)
length = strlen(ptr);
else
length = tail - ptr;
@@ -807,14 +814,14 @@ static int lmd_parse_string(char **handle, char *ptr)
char *tail;
int length;
- if ((handle == NULL) || (ptr == NULL))
+ if (!handle || !ptr)
return -EINVAL;
kfree(*handle);
*handle = NULL;
tail = strchr(ptr, ',');
- if (tail == NULL)
+ if (!tail)
length = strlen(ptr);
else
length = tail - ptr;
@@ -847,14 +854,14 @@ static int lmd_parse_mgs(struct lustre_mount_data *lmd, char **ptr)
return -EINVAL;
}
- if (lmd->lmd_mgs != NULL)
+ if (lmd->lmd_mgs)
oldlen = strlen(lmd->lmd_mgs) + 1;
mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
if (!mgsnid)
return -ENOMEM;
- if (lmd->lmd_mgs != NULL) {
+ if (lmd->lmd_mgs) {
/* Multiple mgsnid= are taken to mean failover locations */
memcpy(mgsnid, lmd->lmd_mgs, oldlen);
mgsnid[oldlen - 1] = ':';
@@ -909,10 +916,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
s1++;
/* Client options are parsed in ll_options: eg. flock,
- user_xattr, acl */
+ * user_xattr, acl
+ */
/* Parse non-ldiskfs options here. Rather than modifying
- ldiskfs, we just zero these out here */
+ * ldiskfs, we just zero these out here
+ */
if (strncmp(s1, "abort_recov", 11) == 0) {
lmd->lmd_flags |= LMD_FLG_ABORT_RECOV;
clear++;
@@ -940,7 +949,8 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
sizeof(PARAM_MGSNODE) - 1) == 0) {
s2 = s1 + sizeof(PARAM_MGSNODE) - 1;
/* Assume the next mount opt is the first
- invalid nid we get to. */
+ * invalid nid we get to.
+ */
rc = lmd_parse_mgs(lmd, &s2);
if (rc)
goto invalid;
@@ -981,7 +991,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
size_t length, params_length;
char *tail = strchr(s1 + 6, ',');
- if (tail == NULL)
+ if (!tail)
length = strlen(s1);
else
length = tail - s1;
@@ -1000,18 +1010,20 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
clear++;
}
/* Linux 2.4 doesn't pass the device, so we stuck it at the
- end of the options. */
+ * end of the options.
+ */
else if (strncmp(s1, "device=", 7) == 0) {
devname = s1 + 7;
/* terminate options right before device. device
- must be the last one. */
+ * must be the last one.
+ */
*s1 = '\0';
break;
}
/* Find next opt */
s2 = strchr(s1, ',');
- if (s2 == NULL) {
+ if (!s2) {
if (clear)
*s1 = '\0';
break;
@@ -1113,9 +1125,9 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
if (lmd_is_client(lmd)) {
CDEBUG(D_MOUNT, "Mounting client %s\n", lmd->lmd_profile);
- if (client_fill_super == NULL)
+ if (!client_fill_super)
request_module("lustre");
- if (client_fill_super == NULL) {
+ if (!client_fill_super) {
LCONSOLE_ERROR_MSG(0x165, "Nothing registered for client mount! Is the 'lustre' module loaded?\n");
lustre_put_lsi(sb);
rc = -ENODEV;
@@ -1136,7 +1148,8 @@ static int lustre_fill_super(struct super_block *sb, void *data, int silent)
}
/* If error happens in fill_super() call, @lsi will be killed there.
- * This is why we do not put it here. */
+ * This is why we do not put it here.
+ */
goto out;
out:
if (rc) {
@@ -1151,7 +1164,8 @@ out:
}
/* We can't call ll_fill_super by name because it lives in a module that
- must be loaded after this one. */
+ * must be loaded after this one.
+ */
void lustre_register_client_fill_super(int (*cfs)(struct super_block *sb,
struct vfsmount *mnt))
{
@@ -1166,8 +1180,8 @@ void lustre_register_kill_super_cb(void (*cfs)(struct super_block *sb))
EXPORT_SYMBOL(lustre_register_kill_super_cb);
/***************** FS registration ******************/
-struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
- const char *devname, void *data)
+static struct dentry *lustre_mount(struct file_system_type *fs_type, int flags,
+ const char *devname, void *data)
{
struct lustre_mount_data2 lmd2 = {
.lmd2_data = data,
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 75e1deadddd9..e6436cb4ac62 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -55,7 +55,8 @@ void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent)
EXPORT_SYMBOL(obdo_set_parent_fid);
/* WARNING: the file systems must take care not to tinker with
- attributes they don't manage (such as blocks). */
+ * attributes they don't manage (such as blocks).
+ */
void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
{
u32 newvalid = 0;
@@ -122,7 +123,8 @@ void obdo_to_ioobj(struct obdo *oa, struct obd_ioobj *ioobj)
ostid_set_seq_mdt0(&ioobj->ioo_oid);
/* Since 2.4 this does not contain o_mode in the low 16 bits.
- * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs */
+ * Instead, it holds (bd_md_max_brw - 1) for multi-bulk BRW RPCs
+ */
ioobj->ioo_max_brw = 0;
}
EXPORT_SYMBOL(obdo_to_ioobj);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 7b53f7dd1797..1e83669c204d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -60,7 +60,6 @@ struct echo_device {
struct cl_site ed_site_myself;
struct cl_site *ed_site;
struct lu_device *ed_next;
- int ed_next_islov;
};
struct echo_object {
@@ -147,7 +146,7 @@ static inline struct echo_thread_info *echo_env_info(const struct lu_env *env)
struct echo_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &echo_thread_key);
- LASSERT(info != NULL);
+ LASSERT(info);
return info;
}
@@ -162,9 +161,6 @@ struct echo_object_conf *cl2echo_conf(const struct cl_object_conf *c)
static struct echo_object *cl_echo_object_find(struct echo_device *d,
struct lov_stripe_md **lsm);
static int cl_echo_object_put(struct echo_object *eco);
-static int cl_echo_enqueue(struct echo_object *eco, u64 start,
- u64 end, int mode, __u64 *cookie);
-static int cl_echo_cancel(struct echo_device *d, __u64 cookie);
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
@@ -224,7 +220,7 @@ static struct lu_kmem_descr echo_caches[] = {
* @{
*/
static struct page *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
return cl2echo_page(slice)->ep_vmpage;
}
@@ -271,7 +267,7 @@ static void echo_page_completion(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- LASSERT(slice->cpl_page->cp_sync_io != NULL);
+ LASSERT(slice->cpl_page->cp_sync_io);
}
static void echo_page_fini(const struct lu_env *env,
@@ -282,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
- page_cache_release(vmpage);
+ put_page(vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@@ -371,13 +367,13 @@ static struct cl_lock_operations echo_lock_ops = {
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
- page_cache_get(vmpage);
+ get_page(vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
atomic_inc(&eco->eo_npages);
@@ -396,14 +392,14 @@ static int echo_lock_init(const struct lu_env *env,
{
struct echo_lock *el;
- el = kmem_cache_alloc(echo_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (el != NULL) {
+ el = kmem_cache_zalloc(echo_lock_kmem, GFP_NOFS);
+ if (el) {
cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops);
el->el_object = cl2echo_obj(obj);
INIT_LIST_HEAD(&el->el_chain);
atomic_set(&el->el_refcount, 0);
}
- return el == NULL ? -ENOMEM : 0;
+ return !el ? -ENOMEM : 0;
}
static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -443,7 +439,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
under = ed->ed_next;
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
under);
- if (below == NULL)
+ if (!below)
return -ENOMEM;
lu_object_add(obj, below);
}
@@ -474,12 +470,12 @@ static int echo_alloc_memmd(struct echo_device *ed,
int lsm_size;
/* If export is lov/osc then use their obd method */
- if (ed->ed_next != NULL)
+ if (ed->ed_next)
return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
/* OFD has no unpackmd method, do everything here */
lsm_size = lov_stripe_md_size(1);
- LASSERT(*lsmp == NULL);
+ LASSERT(!*lsmp);
*lsmp = kzalloc(lsm_size, GFP_NOFS);
if (!*lsmp)
return -ENOMEM;
@@ -502,12 +498,11 @@ static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
int lsm_size;
/* If export is lov/osc then use their obd method */
- if (ed->ed_next != NULL)
+ if (ed->ed_next)
return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
/* OFD has no unpackmd method, do everything here */
lsm_size = lov_stripe_md_size(1);
- LASSERT(*lsmp != NULL);
kfree((*lsmp)->lsm_oinfo[0]);
kfree(*lsmp);
*lsmp = NULL;
@@ -534,7 +529,7 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
}
static int echo_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
+ lu_printer_t p, const struct lu_object *o)
{
struct echo_object *obj = cl2echo_obj(lu2cl(o));
@@ -566,9 +561,9 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
struct lu_object *obj = NULL;
/* we're the top dev. */
- LASSERT(hdr == NULL);
- eco = kmem_cache_alloc(echo_object_kmem, GFP_NOFS | __GFP_ZERO);
- if (eco != NULL) {
+ LASSERT(!hdr);
+ eco = kmem_cache_zalloc(echo_object_kmem, GFP_NOFS);
+ if (eco) {
struct cl_object_header *hdr = &eco->eo_hdr;
obj = &echo_obj2cl(eco)->co_lu;
@@ -582,13 +577,13 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
return obj;
}
-static struct lu_device_operations echo_device_lu_ops = {
+static const struct lu_device_operations echo_device_lu_ops = {
.ldo_object_alloc = echo_object_alloc,
};
/** @} echo_lu_dev_ops */
-static struct cl_device_operations echo_device_cl_ops = {
+static const struct cl_device_operations echo_device_cl_ops = {
};
/** \defgroup echo_init Setup and teardown
@@ -626,18 +621,18 @@ static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
}
static void *echo_thread_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
struct echo_thread_info *info;
- info = kmem_cache_alloc(echo_thread_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(echo_thread_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
static void echo_thread_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+ struct lu_context_key *key, void *data)
{
struct echo_thread_info *info = data;
@@ -645,7 +640,7 @@ static void echo_thread_key_fini(const struct lu_context *ctx,
}
static void echo_thread_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+ struct lu_context_key *key, void *data)
{
}
@@ -657,18 +652,18 @@ static struct lu_context_key echo_thread_key = {
};
static void *echo_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+ struct lu_context_key *key)
{
struct echo_session_info *session;
- session = kmem_cache_alloc(echo_session_kmem, GFP_NOFS | __GFP_ZERO);
- if (session == NULL)
+ session = kmem_cache_zalloc(echo_session_kmem, GFP_NOFS);
+ if (!session)
session = ERR_PTR(-ENOMEM);
return session;
}
static void echo_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+ struct lu_context_key *key, void *data)
{
struct echo_session_info *session = data;
@@ -676,7 +671,7 @@ static void echo_session_key_fini(const struct lu_context *ctx,
}
static void echo_session_key_exit(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+ struct lu_context_key *key, void *data)
{
}
@@ -719,13 +714,13 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
cleanup = 2;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
- LASSERT(env != NULL);
+ LASSERT(obd);
+ LASSERT(env);
tgt = class_name2obd(lustre_cfg_string(cfg, 1));
- if (tgt == NULL) {
+ if (!tgt) {
CERROR("Can not find tgt device %s\n",
- lustre_cfg_string(cfg, 1));
+ lustre_cfg_string(cfg, 1));
rc = -ENODEV;
goto out;
}
@@ -751,14 +746,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
cleanup = 4;
/* if echo client is to be stacked upon ost device, the next is
- * NULL since ost is not a clio device so far */
- if (next != NULL && !lu_device_is_cl(next))
+ * NULL since ost is not a clio device so far
+ */
+ if (next && !lu_device_is_cl(next))
next = NULL;
tgt_type_name = tgt->obd_type->typ_name;
- if (next != NULL) {
- LASSERT(next != NULL);
- if (next->ld_site != NULL) {
+ if (next) {
+ if (next->ld_site) {
rc = -EBUSY;
goto out;
}
@@ -770,14 +765,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (rc)
goto out;
- /* Tricky case, I have to determine the obd type since
- * CLIO uses the different parameters to initialize
- * objects for lov & osc. */
- if (strcmp(tgt_type_name, LUSTRE_LOV_NAME) == 0)
- ed->ed_next_islov = 1;
- else
- LASSERT(strcmp(tgt_type_name,
- LUSTRE_OSC_NAME) == 0);
} else {
LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
}
@@ -809,7 +796,7 @@ out:
}
static int echo_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
+ const char *name, struct lu_device *next)
{
LBUG();
return 0;
@@ -963,20 +950,11 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
info = echo_env_info(env);
conf = &info->eti_conf;
if (d->ed_next) {
- if (!d->ed_next_islov) {
- struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
-
- LASSERT(oinfo != NULL);
- oinfo->loi_oi = lsm->lsm_oi;
- conf->eoc_cl.u.coc_oinfo = oinfo;
- } else {
- struct lustre_md *md;
+ struct lov_oinfo *oinfo = lsm->lsm_oinfo[0];
- md = &info->eti_md;
- memset(md, 0, sizeof(*md));
- md->lsm = lsm;
- conf->eoc_cl.u.coc_md = md;
- }
+ LASSERT(oinfo);
+ oinfo->loi_oi = lsm->lsm_oi;
+ conf->eoc_cl.u.coc_oinfo = oinfo;
}
conf->eoc_md = lsmp;
@@ -988,7 +966,8 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
}
/* In the function below, .hs_keycmp resolves to
- * lu_obj_hop_keycmp() */
+ * lu_obj_hop_keycmp()
+ */
/* coverity[overrun-buffer-val] */
obj = cl_object_find(env, echo_dev2cl(d), fid, &conf->eoc_cl);
if (IS_ERR(obj)) {
@@ -1076,36 +1055,6 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
return rc;
}
-static int cl_echo_enqueue(struct echo_object *eco, u64 start, u64 end,
- int mode, __u64 *cookie)
-{
- struct echo_thread_info *info;
- struct lu_env *env;
- struct cl_io *io;
- int refcheck;
- int result;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- info = echo_env_info(env);
- io = &info->eti_io;
-
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, echo_obj2cl(eco));
- if (result < 0)
- goto out;
- LASSERT(result == 0);
-
- result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
- cl_io_fini(env, io);
-
-out:
- cl_env_put(env, &refcheck);
- return result;
-}
-
static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
__u64 cookie)
{
@@ -1114,7 +1063,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
struct list_head *el;
int found = 0, still_used = 0;
- LASSERT(ec != NULL);
spin_lock(&ec->ec_lock);
list_for_each(el, &ec->ec_locks) {
ecl = list_entry(el, struct echo_lock, el_chain);
@@ -1137,22 +1085,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
return 0;
}
-static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
-{
- struct lu_env *env;
- int refcheck;
- int rc;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- rc = cl_echo_cancel0(env, ed, cookie);
-
- cl_env_put(env, &refcheck);
- return rc;
-}
-
static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type unused, struct cl_2queue *queue)
{
@@ -1188,7 +1120,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
int i;
LASSERT((offset & ~CFS_PAGE_MASK) == 0);
- LASSERT(ed->ed_next != NULL);
+ LASSERT(ed->ed_next);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
@@ -1206,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
LASSERT(rc == 0);
rc = cl_echo_enqueue0(env, eco, offset,
- offset + npages * PAGE_CACHE_SIZE - 1,
+ offset + npages * PAGE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
@@ -1234,7 +1166,8 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
cl_page_list_add(&queue->c2_qin, clp);
/* drop the reference count for cl_page_find, so that the page
- * will be freed in cl_2queue_fini. */
+ * will be freed in cl_2queue_fini.
+ */
cl_page_put(env, clp);
cl_page_clip(env, clp, 0, page_size);
@@ -1268,61 +1201,8 @@ out:
static u64 last_object_id;
-static int
-echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
-{
- struct lov_stripe_md *ulsm = _ulsm;
- struct lov_oinfo **p;
- int nob, i;
-
- nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
- if (nob > ulsm_nob)
- return -EINVAL;
-
- if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
- return -EFAULT;
-
- for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
- struct lov_oinfo __user *up;
- if (get_user(up, ulsm->lsm_oinfo + i) ||
- copy_to_user(up, *p, sizeof(struct lov_oinfo)))
- return -EFAULT;
- }
- return 0;
-}
-
-static int
-echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
- struct lov_stripe_md __user *ulsm, int ulsm_nob)
-{
- struct echo_client_obd *ec = ed->ed_ec;
- struct lov_oinfo **p;
- int i;
-
- if (ulsm_nob < sizeof(*lsm))
- return -EINVAL;
-
- if (copy_from_user(lsm, ulsm, sizeof(*lsm)))
- return -EFAULT;
-
- if (lsm->lsm_stripe_count > ec->ec_nstripes ||
- lsm->lsm_magic != LOV_MAGIC ||
- (lsm->lsm_stripe_size & (~CFS_PAGE_MASK)) != 0 ||
- ((__u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count > ~0UL))
- return -EINVAL;
-
- for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
- struct lov_oinfo __user *up;
- if (get_user(up, ulsm->lsm_oinfo + i) ||
- copy_from_user(*p, up, sizeof(struct lov_oinfo)))
- return -EFAULT;
- }
- return 0;
-}
-
static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
- int on_target, struct obdo *oa, void *ulsm,
- int ulsm_nob, struct obd_trans_info *oti)
+ struct obdo *oa, struct obd_trans_info *oti)
{
struct echo_object *eco;
struct echo_client_obd *ec = ed->ed_ec;
@@ -1330,10 +1210,10 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
int rc;
int created = 0;
- if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
- (on_target || /* set_stripe */
- ec->ec_nstripes != 0)) { /* LOV */
- CERROR("No valid oid\n");
+ if (!(oa->o_valid & OBD_MD_FLID) ||
+ !(oa->o_valid & OBD_MD_FLGROUP) ||
+ !fid_seq_is_echo(ostid_seq(&oa->o_oi))) {
+ CERROR("invalid oid " DOSTID "\n", POSTID(&oa->o_oi));
return -EINVAL;
}
@@ -1343,52 +1223,18 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
goto failed;
}
- if (ulsm != NULL) {
- int i, idx;
-
- rc = echo_copyin_lsm(ed, lsm, ulsm, ulsm_nob);
- if (rc != 0)
- goto failed;
-
- if (lsm->lsm_stripe_count == 0)
- lsm->lsm_stripe_count = ec->ec_nstripes;
-
- if (lsm->lsm_stripe_size == 0)
- lsm->lsm_stripe_size = PAGE_CACHE_SIZE;
-
- idx = cfs_rand();
-
- /* setup stripes: indices + default ids if required */
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
- if (ostid_id(&lsm->lsm_oinfo[i]->loi_oi) == 0)
- lsm->lsm_oinfo[i]->loi_oi = lsm->lsm_oi;
-
- lsm->lsm_oinfo[i]->loi_ost_idx =
- (idx + i) % ec->ec_nstripes;
- }
- }
-
- /* setup object ID here for !on_target and LOV hint */
- if (oa->o_valid & OBD_MD_FLID) {
- LASSERT(oa->o_valid & OBD_MD_FLGROUP);
- lsm->lsm_oi = oa->o_oi;
- }
+ /* setup object ID here */
+ lsm->lsm_oi = oa->o_oi;
if (ostid_id(&lsm->lsm_oi) == 0)
ostid_set_id(&lsm->lsm_oi, ++last_object_id);
- rc = 0;
- if (on_target) {
- /* Only echo objects are allowed to be created */
- LASSERT((oa->o_valid & OBD_MD_FLGROUP) &&
- (ostid_seq(&oa->o_oi) == FID_SEQ_ECHO));
- rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
- if (rc != 0) {
- CERROR("Cannot create objects: rc = %d\n", rc);
- goto failed;
- }
- created = 1;
+ rc = obd_create(env, ec->ec_exp, oa, &lsm, oti);
+ if (rc != 0) {
+ CERROR("Cannot create objects: rc = %d\n", rc);
+ goto failed;
}
+ created = 1;
/* See what object ID we were given */
oa->o_oi = lsm->lsm_oi;
@@ -1447,42 +1293,16 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
static void echo_put_object(struct echo_object *eco)
{
- if (cl_echo_object_put(eco))
- CERROR("echo client: drop an object failed");
-}
-
-static void
-echo_get_stripe_off_id(struct lov_stripe_md *lsm, u64 *offp, u64 *idp)
-{
- unsigned long stripe_count;
- unsigned long stripe_size;
- unsigned long width;
- unsigned long woffset;
- int stripe_index;
- u64 offset;
-
- if (lsm->lsm_stripe_count <= 1)
- return;
-
- offset = *offp;
- stripe_size = lsm->lsm_stripe_size;
- stripe_count = lsm->lsm_stripe_count;
-
- /* width = # bytes in all stripes */
- width = stripe_size * stripe_count;
-
- /* woffset = offset within a width; offset = whole number of widths */
- woffset = do_div(offset, width);
-
- stripe_index = woffset / stripe_size;
+ int rc;
- *idp = ostid_id(&lsm->lsm_oinfo[stripe_index]->loi_oi);
- *offp = offset * stripe_size + woffset % stripe_size;
+ rc = cl_echo_object_put(eco);
+ if (rc)
+ CERROR("%s: echo client drop an object failed: rc = %d\n",
+ eco->eo_dev->ed_ec->ec_exp->exp_obd->obd_name, rc);
}
static void
-echo_client_page_debug_setup(struct lov_stripe_md *lsm,
- struct page *page, int rw, u64 id,
+echo_client_page_debug_setup(struct page *page, int rw, u64 id,
u64 offset, u64 count)
{
char *addr;
@@ -1491,15 +1311,14 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm,
int delta;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
- echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
} else {
stripe_off = 0xdeadbeef00c0ffeeULL;
stripe_id = 0xdeadbeef00c0ffeeULL;
@@ -1511,8 +1330,7 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm,
kunmap(page);
}
-static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
- struct page *page, u64 id,
+static int echo_client_page_debug_check(struct page *page, u64 id,
u64 offset, u64 count)
{
u64 stripe_off;
@@ -1523,14 +1341,13 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm,
int rc2;
/* no partial pages on the client */
- LASSERT(count == PAGE_CACHE_SIZE);
+ LASSERT(count == PAGE_SIZE);
addr = kmap(page);
- for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
+ for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
- echo_get_stripe_off_id(lsm, &stripe_off, &stripe_id);
rc2 = block_debug_check("test_brw",
addr + delta, OBD_ECHO_BLOCK_SIZE,
@@ -1550,7 +1367,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
u64 count, int async,
struct obd_trans_info *oti)
{
- struct lov_stripe_md *lsm = eco->eo_lsm;
u32 npages;
struct brw_page *pga;
struct brw_page *pgp;
@@ -1569,53 +1385,51 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER;
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
- LASSERT(lsm != NULL);
- LASSERT(ostid_id(&lsm->lsm_oi) == ostid_id(&oa->o_oi));
if (count <= 0 ||
(count & (~CFS_PAGE_MASK)) != 0)
return -EINVAL;
/* XXX think again with misaligned I/O */
- npages = count >> PAGE_CACHE_SHIFT;
+ npages = count >> PAGE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
pga = kcalloc(npages, sizeof(*pga), GFP_NOFS);
- if (pga == NULL)
+ if (!pga)
return -ENOMEM;
pages = kcalloc(npages, sizeof(*pages), GFP_NOFS);
- if (pages == NULL) {
+ if (!pages) {
kfree(pga);
return -ENOMEM;
}
for (i = 0, pgp = pga, off = offset;
i < npages;
- i++, pgp++, off += PAGE_CACHE_SIZE) {
+ i++, pgp++, off += PAGE_SIZE) {
- LASSERT(pgp->pg == NULL); /* for cleanup */
+ LASSERT(!pgp->pg); /* for cleanup */
rc = -ENOMEM;
pgp->pg = alloc_page(gfp_mask);
- if (pgp->pg == NULL)
+ if (!pgp->pg)
goto out;
pages[i] = pgp->pg;
- pgp->count = PAGE_CACHE_SIZE;
+ pgp->count = PAGE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
if (verify)
- echo_client_page_debug_setup(lsm, pgp->pg, rw,
+ echo_client_page_debug_setup(pgp->pg, rw,
ostid_id(&oa->o_oi), off,
pgp->count);
}
/* brw mode can only be used at client */
- LASSERT(ed->ed_next != NULL);
+ LASSERT(ed->ed_next);
rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async);
out:
@@ -1623,13 +1437,13 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
verify = 0;
for (i = 0, pgp = pga; i < npages; i++, pgp++) {
- if (pgp->pg == NULL)
+ if (!pgp->pg)
continue;
if (verify) {
int vrc;
- vrc = echo_client_page_debug_check(lsm, pgp->pg,
+ vrc = echo_client_page_debug_check(pgp->pg,
ostid_id(&oa->o_oi),
pgp->off, pgp->count);
if (vrc != 0 && rc == 0)
@@ -1649,7 +1463,6 @@ static int echo_client_prep_commit(const struct lu_env *env,
u64 batch, struct obd_trans_info *oti,
int async)
{
- struct lov_stripe_md *lsm = eco->eo_lsm;
struct obd_ioobj ioo;
struct niobuf_local *lnb;
struct niobuf_remote *rnb;
@@ -1657,17 +1470,16 @@ static int echo_client_prep_commit(const struct lu_env *env,
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
- (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
+ if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
return -EINVAL;
- npages = batch >> PAGE_CACHE_SHIFT;
- tot_pages = count >> PAGE_CACHE_SHIFT;
+ npages = batch >> PAGE_SHIFT;
+ tot_pages = count >> PAGE_SHIFT;
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
- if (lnb == NULL || rnb == NULL) {
+ if (!lnb || !rnb) {
ret = -ENOMEM;
goto out;
}
@@ -1685,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (tot_pages < npages)
npages = tot_pages;
- for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
+ for (i = 0; i < npages; i++, off += PAGE_SIZE) {
rnb[i].offset = off;
- rnb[i].len = PAGE_CACHE_SIZE;
+ rnb[i].len = PAGE_SIZE;
rnb[i].flags = brw_flags;
}
@@ -1705,7 +1517,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
struct page *page = lnb[i].page;
/* read past eof? */
- if (page == NULL && lnb[i].rc == 0)
+ if (!page && lnb[i].rc == 0)
continue;
if (async)
@@ -1717,12 +1529,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
continue;
if (rw == OBD_BRW_WRITE)
- echo_client_page_debug_setup(lsm, page, rw,
+ echo_client_page_debug_setup(page, rw,
ostid_id(&oa->o_oi),
rnb[i].offset,
rnb[i].len);
else
- echo_client_page_debug_check(lsm, page,
+ echo_client_page_debug_check(page,
ostid_id(&oa->o_oi),
rnb[i].offset,
rnb[i].len);
@@ -1774,7 +1586,7 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
if (test_mode == 1)
async = 0;
- if (ed->ed_next == NULL && test_mode != 3) {
+ if (!ed->ed_next && test_mode != 3) {
test_mode = 3;
data->ioc_plen1 = data->ioc_count;
}
@@ -1805,55 +1617,8 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
}
static int
-echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
- int mode, u64 offset, u64 nob)
-{
- struct echo_device *ed = obd2echo_dev(exp->exp_obd);
- struct lustre_handle *ulh = &oa->o_handle;
- struct echo_object *eco;
- u64 end;
- int rc;
-
- if (ed->ed_next == NULL)
- return -EOPNOTSUPP;
-
- if (!(mode == LCK_PR || mode == LCK_PW))
- return -EINVAL;
-
- if ((offset & (~CFS_PAGE_MASK)) != 0 ||
- (nob & (~CFS_PAGE_MASK)) != 0)
- return -EINVAL;
-
- rc = echo_get_object(&eco, ed, oa);
- if (rc != 0)
- return rc;
-
- end = (nob == 0) ? ((u64) -1) : (offset + nob - 1);
- rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie);
- if (rc == 0) {
- oa->o_valid |= OBD_MD_FLHANDLE;
- CDEBUG(D_INFO, "Cookie is %#llx\n", ulh->cookie);
- }
- echo_put_object(eco);
- return rc;
-}
-
-static int
-echo_client_cancel(struct obd_export *exp, struct obdo *oa)
-{
- struct echo_device *ed = obd2echo_dev(exp->exp_obd);
- __u64 cookie = oa->o_handle.cookie;
-
- if ((oa->o_valid & OBD_MD_FLHANDLE) == 0)
- return -EINVAL;
-
- CDEBUG(D_INFO, "Cookie is %#llx\n", cookie);
- return cl_echo_cancel(ed, cookie);
-}
-
-static int
echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
+ void *karg, void __user *uarg)
{
struct obd_device *obd = exp->exp_obd;
struct echo_device *ed = obd2echo_dev(obd);
@@ -1899,8 +1664,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
goto out;
}
- rc = echo_create_object(env, ed, 1, oa, data->ioc_pbuf1,
- data->ioc_plen1, &dummy_oti);
+ rc = echo_create_object(env, ed, oa, &dummy_oti);
goto out;
case OBD_IOC_DESTROY:
@@ -1911,7 +1675,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- rc = obd_destroy(env, ec->ec_exp, oa, eco->eo_lsm,
+ rc = obd_destroy(env, ec->ec_exp, oa, NULL,
&dummy_oti, NULL);
if (rc == 0)
eco->eo_deleted = 1;
@@ -1922,10 +1686,10 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
case OBD_IOC_GETATTR:
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = { };
+ struct obd_info oinfo = {
+ .oi_oa = oa,
+ };
- oinfo.oi_md = eco->eo_lsm;
- oinfo.oi_oa = oa;
rc = obd_getattr(env, ec->ec_exp, &oinfo);
echo_put_object(eco);
}
@@ -1939,10 +1703,9 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_get_object(&eco, ed, oa);
if (rc == 0) {
- struct obd_info oinfo = { };
-
- oinfo.oi_oa = oa;
- oinfo.oi_md = eco->eo_lsm;
+ struct obd_info oinfo = {
+ .oi_oa = oa,
+ };
rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
echo_put_object(eco);
@@ -1961,50 +1724,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
goto out;
- case ECHO_IOC_GET_STRIPE:
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- rc = echo_copyout_lsm(eco->eo_lsm, data->ioc_pbuf1,
- data->ioc_plen1);
- echo_put_object(eco);
- }
- goto out;
-
- case ECHO_IOC_SET_STRIPE:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- if (data->ioc_pbuf1 == NULL) { /* unset */
- rc = echo_get_object(&eco, ed, oa);
- if (rc == 0) {
- eco->eo_deleted = 1;
- echo_put_object(eco);
- }
- } else {
- rc = echo_create_object(env, ed, 0, oa,
- data->ioc_pbuf1,
- data->ioc_plen1, &dummy_oti);
- }
- goto out;
-
- case ECHO_IOC_ENQUEUE:
- if (!capable(CFS_CAP_SYS_ADMIN)) {
- rc = -EPERM;
- goto out;
- }
-
- rc = echo_client_enqueue(exp, oa,
- data->ioc_conn1, /* lock mode */
- data->ioc_offset,
- data->ioc_count);/*extent*/
- goto out;
-
- case ECHO_IOC_CANCEL:
- rc = echo_client_cancel(exp, oa);
- goto out;
-
default:
CERROR("echo_ioctl(): unrecognised ioctl %#x\n", cmd);
rc = -ENOTTY;
@@ -2051,14 +1770,10 @@ static int echo_client_setup(const struct lu_env *env,
INIT_LIST_HEAD(&ec->ec_objects);
INIT_LIST_HEAD(&ec->ec_locks);
ec->ec_unique = 0;
- ec->ec_nstripes = 0;
ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
- if (!ocd) {
- CERROR("Can't alloc ocd connecting to %s\n",
- lustre_cfg_string(lcfg, 1));
+ if (!ocd)
return -ENOMEM;
- }
ocd->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_REQPORTAL |
OBD_CONNECT_BRW_SIZE |
@@ -2120,7 +1835,7 @@ static int echo_client_disconnect(struct obd_export *exp)
{
int rc;
- if (exp == NULL) {
+ if (!exp) {
rc = -EINVAL;
goto out;
}
@@ -2163,7 +1878,7 @@ static int __init obdecho_init(void)
{
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
- LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
+ LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
return echo_client_init();
}
@@ -2175,9 +1890,9 @@ static void /*__exit*/ obdecho_exit(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
-MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lustre Echo Client test driver");
MODULE_VERSION(LUSTRE_VERSION_STRING);
+MODULE_LICENSE("GPL");
module_init(obdecho_init);
module_exit(obdecho_exit);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_internal.h b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
index 69063fa65d35..f5034a253f6d 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_internal.h
+++ b/drivers/staging/lustre/lustre/obdecho/echo_internal.h
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 1091536fc90d..a3358c39b2f1 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
if (rc)
return rc;
- pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
+ pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number <= 0 ||
- pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
+ pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
+ cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
struct client_obd *cli = &dev->u.cli;
- int shift = 20 - PAGE_CACHE_SHIFT;
+ int shift = 20 - PAGE_SHIFT;
seq_printf(m,
"used_mb: %d\n"
@@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@@ -381,7 +381,7 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
DECLARE_CKSUM_NAME;
- if (obd == NULL)
+ if (!obd)
return 0;
for (i = 0; i < ARRAY_SIZE(cksum_name); i++) {
@@ -397,8 +397,8 @@ static int osc_checksum_type_seq_show(struct seq_file *m, void *v)
}
static ssize_t osc_checksum_type_seq_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *off)
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
int i;
@@ -406,7 +406,7 @@ static ssize_t osc_checksum_type_seq_write(struct file *file,
DECLARE_CKSUM_NAME;
char kernbuf[10];
- if (obd == NULL)
+ if (!obd)
return 0;
if (count > sizeof(kernbuf) - 1)
@@ -422,8 +422,8 @@ static ssize_t osc_checksum_type_seq_write(struct file *file,
if (((1 << i) & obd->u.cli.cl_supp_cksum_types) == 0)
continue;
if (!strcmp(kernbuf, cksum_name[i])) {
- obd->u.cli.cl_cksum_type = 1 << i;
- return count;
+ obd->u.cli.cl_cksum_type = 1 << i;
+ return count;
}
}
return -EINVAL;
@@ -480,9 +480,19 @@ static ssize_t contention_seconds_store(struct kobject *kobj,
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kobj);
struct osc_device *od = obd2osc_dev(obd);
+ int rc;
+ int val;
+
+ rc = kstrtoint(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ if (val < 0)
+ return -EINVAL;
+
+ od->od_contention_time = val;
- return lprocfs_write_helper(buffer, count, &od->od_contention_time) ?:
- count;
+ return count;
}
LUSTRE_RW_ATTR(contention_seconds);
@@ -505,9 +515,16 @@ static ssize_t lockless_truncate_store(struct kobject *kobj,
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kobj);
struct osc_device *od = obd2osc_dev(obd);
+ int rc;
+ unsigned int val;
- return lprocfs_write_helper(buffer, count, &od->od_lockless_truncate) ?:
- count;
+ rc = kstrtouint(buffer, 10, &val);
+ if (rc)
+ return rc;
+
+ od->od_lockless_truncate = val;
+
+ return count;
}
LUSTRE_RW_ATTR(lockless_truncate);
@@ -552,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
/* if the max_pages is specified in bytes, convert to pages */
if (val >= ONE_MB_BRW_SIZE)
- val >>= PAGE_CACHE_SHIFT;
+ val >>= PAGE_SHIFT;
- chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
+ chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
- if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
+ if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -635,10 +652,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
read_cum += r;
write_cum += w;
seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- 1 << i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
+ 1 << i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
if (read_cum == read_tot && write_cum == write_tot)
break;
}
@@ -659,10 +676,10 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
read_cum += r;
write_cum += w;
seq_printf(seq, "%d:\t\t%10lu %3lu %3lu | %10lu %3lu %3lu\n",
- i, r, pct(r, read_tot),
- pct(read_cum, read_tot), w,
- pct(w, write_tot),
- pct(write_cum, write_tot));
+ i, r, pct(r, read_tot),
+ pct(read_cum, read_tot), w,
+ pct(w, write_tot),
+ pct(write_cum, write_tot));
if (read_cum == read_tot && write_cum == write_tot)
break;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 2229419b7184..5f25bf83dcfc 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -140,7 +140,7 @@ static const char *oes_strings[] = {
static inline struct osc_extent *rb_extent(struct rb_node *n)
{
- if (n == NULL)
+ if (!n)
return NULL;
return container_of(n, struct osc_extent, oe_node);
@@ -148,7 +148,7 @@ static inline struct osc_extent *rb_extent(struct rb_node *n)
static inline struct osc_extent *next_extent(struct osc_extent *ext)
{
- if (ext == NULL)
+ if (!ext)
return NULL;
LASSERT(ext->oe_intree);
@@ -157,7 +157,7 @@ static inline struct osc_extent *next_extent(struct osc_extent *ext)
static inline struct osc_extent *prev_extent(struct osc_extent *ext)
{
- if (ext == NULL)
+ if (!ext)
return NULL;
LASSERT(ext->oe_intree);
@@ -240,7 +240,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
goto out;
}
- if (ext->oe_osclock == NULL && ext->oe_grants > 0) {
+ if (!ext->oe_osclock && ext->oe_grants > 0) {
rc = 90;
goto out;
}
@@ -262,7 +262,8 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
}
/* Do not verify page list if extent is in RPC. This is because an
- * in-RPC extent is supposed to be exclusively accessible w/o lock. */
+ * in-RPC extent is supposed to be exclusively accessible w/o lock.
+ */
if (ext->oe_state > OES_CACHE) {
rc = 0;
goto out;
@@ -319,7 +320,7 @@ static int osc_extent_is_overlapped(struct osc_object *obj,
if (!extent_debug)
return 0;
- for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
+ for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) {
if (tmp == ext)
continue;
if (tmp->oe_end >= ext->oe_start &&
@@ -346,8 +347,8 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
{
struct osc_extent *ext;
- ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO);
- if (ext == NULL)
+ ext = kmem_cache_zalloc(osc_extent_kmem, GFP_NOFS);
+ if (!ext)
return NULL;
RB_CLEAR_NODE(&ext->oe_node);
@@ -415,7 +416,7 @@ static struct osc_extent *osc_extent_search(struct osc_object *obj,
struct osc_extent *tmp, *p = NULL;
LASSERT(osc_object_is_locked(obj));
- while (n != NULL) {
+ while (n) {
tmp = rb_extent(n);
if (index < tmp->oe_start) {
n = n->rb_left;
@@ -439,7 +440,7 @@ static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
struct osc_extent *ext;
ext = osc_extent_search(obj, index);
- if (ext != NULL && ext->oe_start <= index && index <= ext->oe_end)
+ if (ext && ext->oe_start <= index && index <= ext->oe_end)
return osc_extent_get(ext);
return NULL;
}
@@ -454,7 +455,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
LASSERT(ext->oe_intree == 0);
LASSERT(ext->oe_obj == obj);
LASSERT(osc_object_is_locked(obj));
- while (*n != NULL) {
+ while (*n) {
tmp = rb_extent(*n);
parent = *n;
@@ -463,7 +464,7 @@ static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
else if (ext->oe_start > tmp->oe_end)
n = &(*n)->rb_right;
else
- EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
+ EASSERTF(0, tmp, EXTSTR"\n", EXTPARA(ext));
}
rb_link_node(&ext->oe_node, parent, n);
rb_insert_color(&ext->oe_node, &obj->oo_root);
@@ -533,7 +534,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
LASSERT(cur->oe_state == OES_CACHE);
LASSERT(osc_object_is_locked(obj));
- if (victim == NULL)
+ if (!victim)
return -EINVAL;
if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
@@ -543,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
return -ERANGE;
LASSERT(cur->oe_osclock == victim->oe_osclock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@@ -587,7 +588,8 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
if (ext->oe_trunc_pending) {
/* a truncate process is waiting for this extent.
* This may happen due to a race, check
- * osc_cache_truncate_start(). */
+ * osc_cache_truncate_start().
+ */
osc_extent_state_set(ext, OES_TRUNC);
ext->oe_trunc_pending = 0;
} else {
@@ -601,7 +603,7 @@ void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
if (ext->oe_urgent)
list_move_tail(&ext->oe_link,
- &obj->oo_urgent_exts);
+ &obj->oo_urgent_exts);
}
osc_object_unlock(obj);
@@ -639,15 +641,14 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
int rc;
cur = osc_extent_alloc(obj);
- if (cur == NULL)
+ if (!cur)
return ERR_PTR(-ENOMEM);
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock != NULL);
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
- ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
+ ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
chunk_mask = ~((1 << ppc_bits) - 1);
chunksize = 1 << cli->cl_chunkbits;
chunk = index >> ppc_bits;
@@ -673,14 +674,15 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
/* grants has been allocated by caller */
LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
"%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
- LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
+ LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n",
+ EXTPARA(cur));
restart:
osc_object_lock(obj);
ext = osc_extent_search(obj, cur->oe_start);
- if (ext == NULL)
+ if (!ext)
ext = first_extent(obj);
- while (ext != NULL) {
+ while (ext) {
loff_t ext_chk_start = ext->oe_start >> ppc_bits;
loff_t ext_chk_end = ext->oe_end >> ppc_bits;
@@ -691,7 +693,7 @@ restart:
/* if covering by different locks, no chance to match */
if (lock != ext->oe_osclock) {
EASSERTF(!overlapped(ext, cur), ext,
- EXTSTR, EXTPARA(cur));
+ EXTSTR"\n", EXTPARA(cur));
ext = next_extent(ext);
continue;
@@ -705,18 +707,21 @@ restart:
/* ok, from now on, ext and cur have these attrs:
* 1. covered by the same lock
- * 2. contiguous at chunk level or overlapping. */
+ * 2. contiguous at chunk level or overlapping.
+ */
if (overlapped(ext, cur)) {
/* cur is the minimum unit, so overlapping means
- * full contain. */
+ * full contain.
+ */
EASSERTF((ext->oe_start <= cur->oe_start &&
ext->oe_end >= cur->oe_end),
- ext, EXTSTR, EXTPARA(cur));
+ ext, EXTSTR"\n", EXTPARA(cur));
if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
/* for simplicity, we wait for this extent to
- * finish before going forward. */
+ * finish before going forward.
+ */
conflict = osc_extent_get(ext);
break;
}
@@ -729,17 +734,20 @@ restart:
if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
/* we can't do anything for a non OES_CACHE extent, or
* if there is someone waiting for this extent to be
- * flushed, try next one. */
+ * flushed, try next one.
+ */
ext = next_extent(ext);
continue;
}
/* check if they belong to the same rpc slot before trying to
* merge. the extents are not overlapped and contiguous at
- * chunk level to get here. */
+ * chunk level to get here.
+ */
if (ext->oe_max_end != max_end) {
/* if they don't belong to the same RPC slot or
- * max_pages_per_rpc has ever changed, do not merge. */
+ * max_pages_per_rpc has ever changed, do not merge.
+ */
ext = next_extent(ext);
continue;
}
@@ -748,7 +756,8 @@ restart:
* level so that we know the whole extent is covered by grant
* (the pages in the extent are NOT required to be contiguous).
* Otherwise, it will be too much difficult to know which
- * chunks have grants allocated. */
+ * chunks have grants allocated.
+ */
/* try to do front merge - extend ext's start */
if (chunk + 1 == ext_chk_start) {
@@ -768,28 +777,29 @@ restart:
*grants -= chunksize;
/* try to merge with the next one because we just fill
- * in a gap */
+ * in a gap
+ */
if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
/* we can save extent tax from next extent */
*grants += cli->cl_extent_tax;
found = osc_extent_hold(ext);
}
- if (found != NULL)
+ if (found)
break;
ext = next_extent(ext);
}
osc_extent_tree_dump(D_CACHE, obj);
- if (found != NULL) {
- LASSERT(conflict == NULL);
+ if (found) {
+ LASSERT(!conflict);
if (!IS_ERR(found)) {
LASSERT(found->oe_osclock == cur->oe_osclock);
OSC_EXTENT_DUMP(D_CACHE, found,
"found caching ext for %lu.\n", index);
}
- } else if (conflict == NULL) {
+ } else if (!conflict) {
/* create a new extent */
EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
cur->oe_grants = chunksize + cli->cl_extent_tax;
@@ -804,11 +814,12 @@ restart:
}
osc_object_unlock(obj);
- if (conflict != NULL) {
- LASSERT(found == NULL);
+ if (conflict) {
+ LASSERT(!found);
/* waiting for IO to finish. Please notice that it's impossible
- * to be an OES_TRUNC extent. */
+ * to be an OES_TRUNC extent.
+ */
rc = osc_extent_wait(env, conflict, OES_INV);
osc_extent_put(env, conflict);
conflict = NULL;
@@ -845,8 +856,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
ext->oe_rc = rc ?: ext->oe_nr_pages;
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
if (last_off <= oap->oap_obj_off) {
@@ -861,11 +871,12 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (!sent) {
lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_CACHE_SIZE &&
- last_count != PAGE_CACHE_SIZE) {
+ } else if (blocksize < PAGE_SIZE &&
+ last_count != PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
- * wrong. Should match the code in filter_grant_check. */
+ * wrong. Should match the code in filter_grant_check.
+ */
int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
int count = oap->oap_count + (offset & (blocksize - 1));
int end = (offset + oap->oap_count) & (blocksize - 1);
@@ -873,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (end)
count += blocksize - end;
- lost_grant = PAGE_CACHE_SIZE - count;
+ lost_grant = PAGE_SIZE - count;
}
if (ext->oe_grants > 0)
osc_free_grant(cli, nr_pages, lost_grant);
@@ -909,7 +920,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
osc_object_lock(obj);
LASSERT(sanity_check_nolock(ext) == 0);
/* `Kick' this extent only if the caller is waiting for it to be
- * written out. */
+ * written out.
+ */
if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
!ext->oe_trunc_pending) {
if (ext->oe_state == OES_ACTIVE) {
@@ -955,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
struct osc_async_page *oap;
struct osc_async_page *tmp;
int pages_in_chunk = 0;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
@@ -967,7 +979,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
/* Request new lu_env.
* We can't use that env from osc_cache_truncate_start() because
- * it's from lov_io_sub and not fully initialized. */
+ * it's from lov_io_sub and not fully initialized.
+ */
env = cl_env_nested_get(&nest);
io = &osc_env_info(env)->oti_io;
io->ci_obj = cl_object_top(osc2cl(obj));
@@ -976,15 +989,15 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
goto out;
/* discard all pages with index greater then trunc_index */
- list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
- oap_pending_item) {
+ list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
struct cl_page *sub = oap2cl_page(oap);
struct cl_page *page = cl_page_top(sub);
LASSERT(list_empty(&oap->oap_rpc_item));
/* only discard the pages with their index greater than
- * trunc_index, and ... */
+ * trunc_index, and ...
+ */
if (sub->cp_index < trunc_index ||
(sub->cp_index == trunc_index && partial)) {
/* accounting how many pages remaining in the chunk
@@ -1028,11 +1041,13 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
pgoff_t last_index;
/* if there is no pages in this chunk, we can also free grants
- * for the last chunk */
+ * for the last chunk
+ */
if (pages_in_chunk == 0) {
/* if this is the 1st chunk and no pages in this chunk,
* ext->oe_nr_pages must be zero, so we should be in
- * the other if-clause. */
+ * the other if-clause.
+ */
LASSERT(trunc_chunk > 0);
--trunc_chunk;
++chunks;
@@ -1074,13 +1089,13 @@ static int osc_extent_make_ready(const struct lu_env *env,
LASSERT(sanity_check(ext) == 0);
/* in locking state, any process should not touch this extent. */
EASSERT(ext->oe_state == OES_LOCKING, ext);
- EASSERT(ext->oe_owner != NULL, ext);
+ EASSERT(ext->oe_owner, ext);
OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
++page_count;
- if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
+ if (!last || last->oap_obj_off < oap->oap_obj_off)
last = oap;
/* checking ASYNC_READY is race safe */
@@ -1103,21 +1118,23 @@ static int osc_extent_make_ready(const struct lu_env *env,
}
LASSERT(page_count == ext->oe_nr_pages);
- LASSERT(last != NULL);
+ LASSERT(last);
/* the last page is the only one we need to refresh its count by
- * the size of file. */
+ * the size of file.
+ */
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
- LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
+ LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
}
/* for the rest of pages, we don't need to call osf_refresh_count()
- * because it's known they are not the last page */
+ * because it's known they are not the last page
+ */
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+ oap->oap_count = PAGE_SIZE - oap->oap_page_off;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
}
}
@@ -1141,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
@@ -1167,9 +1184,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
next = next_extent(ext);
- if (next != NULL && next->oe_start <= end_index) {
+ if (next && next->oe_start <= end_index) {
/* complex mode - overlapped with the next extent,
- * this case will be handled by osc_extent_find() */
+ * this case will be handled by osc_extent_find()
+ */
rc = -EAGAIN;
goto out;
}
@@ -1197,7 +1215,7 @@ static void osc_extent_tree_dump0(int level, struct osc_object *obj,
/* osc_object_lock(obj); */
cnt = 1;
- for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
+ for (ext = first_extent(obj); ext; ext = next_extent(ext))
OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
cnt = 1;
@@ -1262,7 +1280,6 @@ static int osc_refresh_count(const struct lu_env *env,
/* readpage queues with _COUNT_STABLE, shouldn't get here. */
LASSERT(!(cmd & OBD_BRW_READ));
- LASSERT(opg != NULL);
obj = opg->ops_cl.cpl_obj;
cl_object_attr_lock(obj);
@@ -1276,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
return 0;
else if (cl_offset(obj, page->cp_index + 1) > kms)
/* catch sub-page write at end of file */
- return kms % PAGE_CACHE_SIZE;
+ return kms % PAGE_SIZE;
else
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@@ -1299,16 +1316,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
* page->cp_req can be NULL if io submission failed before
* cl_req was allocated.
*/
- if (page->cp_req != NULL)
+ if (page->cp_req)
cl_req_page_done(env, page);
- LASSERT(page->cp_req == NULL);
+ LASSERT(!page->cp_req);
crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
+ LASSERT(opg->ops_submitter);
LASSERT(!list_empty(&opg->ops_inflight));
list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
@@ -1359,15 +1376,16 @@ static void osc_consume_write_grant(struct client_obd *cli,
assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
- cli->cl_dirty += PAGE_CACHE_SIZE;
+ cli->cl_dirty += PAGE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_CACHE_SIZE, pga, pga->pg);
+ PAGE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
/* the companion to osc_consume_write_grant, called when a brw has completed.
- * must be called with the loi lock held. */
+ * must be called with the loi lock held.
+ */
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
@@ -1378,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
pga->flag &= ~OBD_BRW_FROM_GRANT;
atomic_dec(&obd_dirty_pages);
- cli->cl_dirty -= PAGE_CACHE_SIZE;
+ cli->cl_dirty -= PAGE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
atomic_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
+ cli->cl_dirty_transit -= PAGE_SIZE;
}
}
@@ -1410,7 +1428,8 @@ static void __osc_unreserve_grant(struct client_obd *cli,
/* it's quite normal for us to get more grant than reserved.
* Thinking about a case that two extents merged by adding a new
* chunk, we can save one extent tax. If extent tax is greater than
- * one chunk, we can save more grant by adding a new chunk */
+ * one chunk, we can save more grant by adding a new chunk
+ */
cli->cl_reserved_grant -= reserved;
if (unused > reserved) {
cli->cl_avail_grant += reserved;
@@ -1437,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
@@ -1450,11 +1469,12 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
client_obd_list_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
- cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
+ cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
- * truncate uses up all avail grant */
+ * truncate uses up all avail grant
+ */
cli->cl_lost_grant -= grant;
cli->cl_avail_grant += grant;
}
@@ -1492,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
if (rc < 0)
return 0;
- if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
+ if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
- cli->cl_dirty_transit += PAGE_CACHE_SIZE;
+ cli->cl_dirty_transit += PAGE_SIZE;
atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
@@ -1539,9 +1559,10 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
client_obd_list_lock(&cli->cl_loi_list_lock);
/* force the caller to try sync io. this can jump the list
- * of queued writes and create a discontiguous rpc stream */
+ * of queued writes and create a discontiguous rpc stream
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
- cli->cl_dirty_max < PAGE_CACHE_SIZE ||
+ cli->cl_dirty_max < PAGE_SIZE ||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
rc = -EDQUOT;
goto out;
@@ -1558,7 +1579,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* Adding a cache waiter will trigger urgent write-out no matter what
* RPC size will be.
* The exiting condition is no avail grants and no dirty pages caching,
- * that really means there is no space on the OST. */
+ * that really means there is no space on the OST.
+ */
init_waitqueue_head(&ocw.ocw_waitq);
ocw.ocw_oap = oap;
ocw.ocw_grant = bytes;
@@ -1610,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
- if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
+ if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
(atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
@@ -1640,7 +1662,8 @@ static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
/* This maintains the lists of pending pages to read/write for a given object
* (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
- * to quickly find objects that are ready to send an RPC. */
+ * to quickly find objects that are ready to send an RPC.
+ */
static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
int cmd)
{
@@ -1649,8 +1672,9 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
/* if we have an invalid import we want to drain the queued pages
* by forcing them through rpcs that immediately fail and complete
* the pages. recovery relies on this to empty the queued pages
- * before canceling the locks and evicting down the llite pages */
- if ((cli->cl_import == NULL || cli->cl_import->imp_invalid))
+ * before canceling the locks and evicting down the llite pages
+ */
+ if (!cli->cl_import || cli->cl_import->imp_invalid)
invalid_import = 1;
if (cmd & OBD_BRW_WRITE) {
@@ -1670,7 +1694,8 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
}
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
- * create more pages to coalesce with what's waiting.. */
+ * create more pages to coalesce with what's waiting..
+ */
if (!list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
return 1;
@@ -1723,7 +1748,8 @@ static void on_list(struct list_head *item, struct list_head *list, int should_b
}
/* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
- * can find pages to build into rpcs quickly */
+ * can find pages to build into rpcs quickly
+ */
static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
{
if (osc_makes_hprpc(osc)) {
@@ -1761,7 +1787,8 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
* application. As an async write fails we record the error code for later if
* the app does an fsync. As long as errors persist we force future rpcs to be
* sync so that the app can get a sync error and break the cycle of queueing
- * pages for which writeback will fail. */
+ * pages for which writeback will fail.
+ */
static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
int rc)
{
@@ -1780,7 +1807,8 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
}
/* this must be called holding the loi list lock to give coverage to exit_cache,
- * async_flag maintenance, and oap_request */
+ * async_flag maintenance, and oap_request
+ */
static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct osc_async_page *oap, int sent, int rc)
{
@@ -1788,7 +1816,7 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct lov_oinfo *loi = osc->oo_oinfo;
__u64 xid = 0;
- if (oap->oap_request != NULL) {
+ if (oap->oap_request) {
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1877,13 +1905,12 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
+ struct osc_extent *temp;
int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
LASSERT(osc_object_is_locked(obj));
- while (!list_empty(&obj->oo_hp_exts)) {
- ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
- oe_link);
+ list_for_each_entry_safe(ext, temp, &obj->oo_hp_exts, oe_link) {
LASSERT(ext->oe_state == OES_CACHE);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
@@ -1895,7 +1922,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
while (!list_empty(&obj->oo_urgent_exts)) {
ext = list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
+ struct osc_extent, oe_link);
if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
&max_pages))
return page_count;
@@ -1906,7 +1933,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
while ((ext = next_extent(ext)) != NULL) {
if ((ext->oe_state != OES_CACHE) ||
(!list_empty(&ext->oe_link) &&
- ext->oe_owner != NULL))
+ ext->oe_owner))
continue;
if (!try_to_add_extent_for_io(cli, ext, rpclist,
@@ -1918,10 +1945,10 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
return page_count;
ext = first_extent(obj);
- while (ext != NULL) {
+ while (ext) {
if ((ext->oe_state != OES_CACHE) ||
/* this extent may be already in current rpclist */
- (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
+ (!list_empty(&ext->oe_link) && ext->oe_owner)) {
ext = next_extent(ext);
continue;
}
@@ -1938,6 +1965,7 @@ static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
static int
osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc)
+ __must_hold(osc)
{
LIST_HEAD(rpclist);
struct osc_extent *ext;
@@ -1967,7 +1995,8 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
}
/* we're going to grab page lock, so release object lock because
- * lock order is page lock -> object lock. */
+ * lock order is page lock -> object lock.
+ */
osc_object_unlock(osc);
list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
@@ -1979,7 +2008,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
continue;
}
}
- if (first == NULL) {
+ if (!first) {
first = ext;
srvlock = ext->oe_srvlock;
} else {
@@ -2010,6 +2039,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
static int
osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc)
+ __must_hold(osc)
{
struct osc_extent *ext;
struct osc_extent *next;
@@ -2019,8 +2049,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
int rc = 0;
LASSERT(osc_object_is_locked(osc));
- list_for_each_entry_safe(ext, next,
- &osc->oo_reading_exts, oe_link) {
+ list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) {
EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
&max_pages))
@@ -2051,12 +2080,14 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
})
/* This is called by osc_check_rpcs() to find which objects have pages that
- * we could be sending. These lists are maintained by osc_makes_rpc(). */
+ * we could be sending. These lists are maintained by osc_makes_rpc().
+ */
static struct osc_object *osc_next_obj(struct client_obd *cli)
{
/* First return objects that have blocked locks so that they
* will be flushed quickly and other clients can get the lock,
- * then objects which have pages ready to be stuffed into RPCs */
+ * then objects which have pages ready to be stuffed into RPCs
+ */
if (!list_empty(&cli->cl_loi_hp_ready_list))
return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
if (!list_empty(&cli->cl_loi_ready_list))
@@ -2065,14 +2096,16 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshold */
+ * they don't pass the nr_pending/object threshold
+ */
if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
return list_to_obj(&cli->cl_loi_write_list, write_item);
/* then return all queued objects when we have an invalid import
- * so that they get flushed */
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
+ * so that they get flushed
+ */
+ if (!cli->cl_import || cli->cl_import->imp_invalid) {
if (!list_empty(&cli->cl_loi_write_list))
return list_to_obj(&cli->cl_loi_write_list, write_item);
if (!list_empty(&cli->cl_loi_read_list))
@@ -2083,6 +2116,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
/* called with the loi list lock held */
static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
+ __must_hold(&cli->cl_loi_list_lock)
{
struct osc_object *osc;
int rc = 0;
@@ -2108,7 +2142,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
* would be redundant if we were getting read/write work items
* instead of objects. we don't want send_oap_rpc to drain a
* partial read pending queue when we're given this object to
- * do io on writes while there are cache waiters */
+ * do io on writes while there are cache waiters
+ */
osc_object_lock(osc);
if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
rc = osc_send_write_rpc(env, cli, osc);
@@ -2130,7 +2165,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
* because it might be blocked at grabbing
* the page lock as we mentioned.
*
- * Anyway, continue to drain pages. */
+ * Anyway, continue to drain pages.
+ */
/* break; */
}
}
@@ -2155,12 +2191,13 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
{
int rc = 0;
- if (osc != NULL && osc_list_maint(cli, osc) == 0)
+ if (osc && osc_list_maint(cli, osc) == 0)
return 0;
if (!async) {
/* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859 */
+ * potential stack overrun problem. LU-2859
+ */
atomic_inc(&cli->cl_lru_shrinkers);
client_obd_list_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
@@ -2168,7 +2205,7 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
- LASSERT(cli->cl_writeback_work != NULL);
+ LASSERT(cli->cl_writeback_work);
rc = ptlrpcd_queue_work(cli->cl_writeback_work);
}
return rc;
@@ -2233,7 +2270,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
if (oap->oap_magic != OAP_MAGIC)
return -EINVAL;
- if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
+ if (!cli->cl_import || cli->cl_import->imp_invalid)
return -EIO;
if (!list_empty(&oap->oap_pending_item) ||
@@ -2284,12 +2321,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
* 1. if there exists an active extent for this IO, mostly this page
* can be added to the active extent and sometimes we need to
* expand extent to accommodate this page;
- * 2. otherwise, a new extent will be allocated. */
+ * 2. otherwise, a new extent will be allocated.
+ */
ext = oio->oi_active;
- if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
+ if (ext && ext->oe_start <= index && ext->oe_max_end >= index) {
/* one chunk plus extent overhead must be enough to write this
- * page */
+ * page
+ */
grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
if (ext->oe_end >= index)
grants = 0;
@@ -2316,7 +2355,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
}
}
rc = 0;
- } else if (ext != NULL) {
+ } else if (ext) {
/* index is located outside of active extent */
need_release = 1;
}
@@ -2326,13 +2365,14 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
ext = NULL;
}
- if (ext == NULL) {
+ if (!ext) {
int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
/* try to find new extent to cover this page */
- LASSERT(oio->oi_active == NULL);
+ LASSERT(!oio->oi_active);
/* we may have allocated grant for this page if we failed
- * to expand the previous active extent. */
+ * to expand the previous active extent.
+ */
LASSERT(ergo(grants > 0, grants >= tmp));
rc = 0;
@@ -2359,8 +2399,8 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
osc_unreserve_grant(cli, grants, tmp);
}
- LASSERT(ergo(rc == 0, ext != NULL));
- if (ext != NULL) {
+ LASSERT(ergo(rc == 0, ext));
+ if (ext) {
EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
ext, "index = %lu.\n", index);
LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
@@ -2397,15 +2437,16 @@ int osc_teardown_async_page(const struct lu_env *env,
ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
- * for details. */
- if (ext != NULL && ext->oe_state != OES_TRUNC) {
+ * for details.
+ */
+ if (ext && ext->oe_state != OES_TRUNC) {
OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
oap2cl_page(oap)->cp_index);
rc = -EBUSY;
}
}
osc_object_unlock(obj);
- if (ext != NULL)
+ if (ext)
osc_extent_put(env, ext);
return rc;
}
@@ -2430,7 +2471,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
osc_object_lock(obj);
ext = osc_extent_lookup(obj, index);
- if (ext == NULL) {
+ if (!ext) {
osc_extent_tree_dump(D_ERROR, obj);
LASSERTF(0, "page index %lu is NOT covered.\n", index);
}
@@ -2448,7 +2489,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* exists a deadlock problem because other process can wait for
* page writeback bit holding page lock; and meanwhile in
* vvp_page_make_ready(), we need to grab page lock before
- * really sending the RPC. */
+ * really sending the RPC.
+ */
case OES_TRUNC:
/* race with truncate, page will be redirtied */
case OES_ACTIVE:
@@ -2456,7 +2498,8 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
* re-dirty the page. If we continued on here, and we were the
* one making the extent active, we could deadlock waiting for
* the page writeback to clear but it won't because the extent
- * is active and won't be written out. */
+ * is active and won't be written out.
+ */
rc = -EAGAIN;
goto out;
default:
@@ -2527,12 +2570,13 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
if (ext->oe_start <= index && ext->oe_end >= index) {
LASSERT(ext->oe_state == OES_LOCK_DONE);
/* For OES_LOCK_DONE state extent, it has already held
- * a refcount for RPC. */
+ * a refcount for RPC.
+ */
found = osc_extent_get(ext);
break;
}
}
- if (found != NULL) {
+ if (found) {
list_del_init(&found->oe_link);
osc_update_pending(obj, cmd, -found->oe_nr_pages);
osc_object_unlock(obj);
@@ -2543,8 +2587,9 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
} else {
osc_object_unlock(obj);
/* ok, it's been put in an rpc. only one oap gets a request
- * reference */
- if (oap->oap_request != NULL) {
+ * reference
+ */
+ if (oap->oap_request) {
ptlrpc_mark_interrupted(oap->oap_request);
ptlrpcd_wake(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
@@ -2579,7 +2624,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
}
ext = osc_extent_alloc(obj);
- if (ext == NULL) {
+ if (!ext) {
list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
list_del_init(&oap->oap_pending_item);
osc_ap_completion(env, cli, oap, 0, -ENOMEM);
@@ -2621,6 +2666,7 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
+ struct osc_extent *temp;
struct osc_extent *waiting = NULL;
pgoff_t index;
LIST_HEAD(list);
@@ -2634,18 +2680,19 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
again:
osc_object_lock(obj);
ext = osc_extent_search(obj, index);
- if (ext == NULL)
+ if (!ext)
ext = first_extent(obj);
else if (ext->oe_end < index)
ext = next_extent(ext);
- while (ext != NULL) {
+ while (ext) {
EASSERT(ext->oe_state != OES_TRUNC, ext);
if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
/* if ext is in urgent state, it means there must exist
* a page already having been flushed by write_page().
* We have to wait for this extent because we can't
- * truncate that page. */
+ * truncate that page.
+ */
LASSERT(!ext->oe_hp);
OSC_EXTENT_DUMP(D_CACHE, ext,
"waiting for busy extent\n");
@@ -2660,7 +2707,8 @@ again:
/* though we grab inode mutex for write path, but we
* release it before releasing extent(in osc_io_end()),
* so there is a race window that an extent is still
- * in OES_ACTIVE when truncate starts. */
+ * in OES_ACTIVE when truncate starts.
+ */
LASSERT(!ext->oe_trunc_pending);
ext->oe_trunc_pending = 1;
} else {
@@ -2678,14 +2726,14 @@ again:
osc_list_maint(cli, obj);
- while (!list_empty(&list)) {
+ list_for_each_entry_safe(ext, temp, &list, oe_link) {
int rc;
- ext = list_entry(list.next, struct osc_extent, oe_link);
list_del_init(&ext->oe_link);
/* extent may be in OES_ACTIVE state because inode mutex
- * is released before osc_io_end() in file write case */
+ * is released before osc_io_end() in file write case
+ */
if (ext->oe_state != OES_TRUNC)
osc_extent_wait(env, ext, OES_TRUNC);
@@ -2710,19 +2758,21 @@ again:
/* we need to hold this extent in OES_TRUNC state so
* that no writeback will happen. This is to avoid
- * BUG 17397. */
- LASSERT(oio->oi_trunc == NULL);
+ * BUG 17397.
+ */
+ LASSERT(!oio->oi_trunc);
oio->oi_trunc = osc_extent_get(ext);
OSC_EXTENT_DUMP(D_CACHE, ext,
"trunc at %llu\n", size);
}
osc_extent_put(env, ext);
}
- if (waiting != NULL) {
+ if (waiting) {
int rc;
/* ignore the result of osc_extent_wait the write initiator
- * should take care of it. */
+ * should take care of it.
+ */
rc = osc_extent_wait(env, waiting, OES_INV);
if (rc < 0)
OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
@@ -2743,7 +2793,7 @@ void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
struct osc_extent *ext = oio->oi_trunc;
oio->oi_trunc = NULL;
- if (ext != NULL) {
+ if (ext) {
bool unplug = false;
EASSERT(ext->oe_nr_pages > 0, ext);
@@ -2786,11 +2836,11 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
again:
osc_object_lock(obj);
ext = osc_extent_search(obj, index);
- if (ext == NULL)
+ if (!ext)
ext = first_extent(obj);
else if (ext->oe_end < index)
ext = next_extent(ext);
- while (ext != NULL) {
+ while (ext) {
int rc;
if (ext->oe_start > end)
@@ -2841,11 +2891,11 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
osc_object_lock(obj);
ext = osc_extent_search(obj, start);
- if (ext == NULL)
+ if (!ext)
ext = first_extent(obj);
else if (ext->oe_end < start)
ext = next_extent(ext);
- while (ext != NULL) {
+ while (ext) {
if (ext->oe_start > end)
break;
@@ -2864,18 +2914,18 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
ext->oe_urgent = 1;
list = &obj->oo_urgent_exts;
}
- if (list != NULL)
+ if (list)
list_move_tail(&ext->oe_link, list);
unplug = true;
} else {
/* the only discarder is lock cancelling, so
- * [start, end] must contain this extent */
+ * [start, end] must contain this extent
+ */
EASSERT(ext->oe_start >= start &&
ext->oe_max_end <= end, ext);
osc_extent_state_set(ext, OES_LOCKING);
ext->oe_owner = current;
- list_move_tail(&ext->oe_link,
- &discard_list);
+ list_move_tail(&ext->oe_link, &discard_list);
osc_update_pending(obj, OBD_BRW_WRITE,
-ext->oe_nr_pages);
}
@@ -2884,14 +2934,16 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
/* It's pretty bad to wait for ACTIVE extents, because
* we don't know how long we will wait for it to be
* flushed since it may be blocked at awaiting more
- * grants. We do this for the correctness of fsync. */
+ * grants. We do this for the correctness of fsync.
+ */
LASSERT(hp == 0 && discard == 0);
ext->oe_urgent = 1;
break;
case OES_TRUNC:
/* this extent is being truncated, can't do anything
* for it now. it will be set to urgent after truncate
- * is finished in osc_cache_truncate_end(). */
+ * is finished in osc_cache_truncate_end().
+ */
default:
break;
}
@@ -2910,7 +2962,8 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
EASSERT(ext->oe_state == OES_LOCKING, ext);
/* Discard caching pages. We don't actually write this
- * extent out but we complete it as if we did. */
+ * extent out but we complete it as if we did.
+ */
rc = osc_extent_make_ready(env, ext);
if (unlikely(rc < 0)) {
OSC_EXTENT_DUMP(D_ERROR, ext,
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 415c27e4ab66..d55d04d0428b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -69,10 +69,12 @@ struct osc_io {
/** true if this io is lockless. */
int oi_lockless;
/** active extents, we know how many bytes is going to be written,
- * so having an active extent will prevent it from being fragmented */
+ * so having an active extent will prevent it from being fragmented
+ */
struct osc_extent *oi_active;
/** partially truncated extent, we need to hold this extent to prevent
- * page writeback from happening. */
+ * page writeback from happening.
+ */
struct osc_extent *oi_trunc;
struct obd_info oi_info;
@@ -154,7 +156,8 @@ struct osc_object {
atomic_t oo_nr_writes;
/** Protect extent tree. Will be used to protect
- * oo_{read|write}_pages soon. */
+ * oo_{read|write}_pages soon.
+ */
spinlock_t oo_lock;
};
@@ -472,7 +475,7 @@ static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
struct osc_thread_info *info;
info = lu_context_key_get(&env->le_ctx, &osc_key);
- LASSERT(info != NULL);
+ LASSERT(info);
return info;
}
@@ -481,7 +484,7 @@ static inline struct osc_session *osc_env_session(const struct lu_env *env)
struct osc_session *ses;
ses = lu_context_key_get(env->le_ses, &osc_session_key);
- LASSERT(ses != NULL);
+ LASSERT(ses);
return ses;
}
@@ -522,7 +525,7 @@ static inline struct cl_object *osc2cl(const struct osc_object *obj)
return (struct cl_object *)&obj->oo_cl;
}
-static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
+static inline enum ldlm_mode osc_cl_lock2ldlm(enum cl_lock_mode mode)
{
LASSERT(mode == CLM_READ || mode == CLM_WRITE || mode == CLM_GROUP);
if (mode == CLM_READ)
@@ -533,7 +536,7 @@ static inline ldlm_mode_t osc_cl_lock2ldlm(enum cl_lock_mode mode)
return LCK_GROUP;
}
-static inline enum cl_lock_mode osc_ldlm2cl_lock(ldlm_mode_t mode)
+static inline enum cl_lock_mode osc_ldlm2cl_lock(enum ldlm_mode mode)
{
LASSERT(mode == LCK_PR || mode == LCK_PW || mode == LCK_GROUP);
if (mode == LCK_PR)
@@ -627,22 +630,26 @@ struct osc_extent {
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
- * is released, it will turn into TRUNC state instead of CACHE. */
+ * is released, it will turn into TRUNC state instead of CACHE.
+ */
oe_trunc_pending:1,
/** this extent should be written asap and someone may wait for the
* write to finish. This bit is usually set along with urgent if
* the extent was CACHE state.
* fsync_wait extent can't be merged because new extent region may
- * exceed fsync range. */
+ * exceed fsync range.
+ */
oe_fsync_wait:1,
/** covering lock is being canceled */
oe_hp:1,
/** this extent should be written back asap. set if one of pages is
- * called by page WB daemon, or sync write or reading requests. */
+ * called by page WB daemon, or sync write or reading requests.
+ */
oe_urgent:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
- * for reading extents and sync write extents. */
+ * for reading extents and sync write extents.
+ */
unsigned int oe_grants;
/** # of dirty pages in this extent */
unsigned int oe_nr_pages;
@@ -655,21 +662,25 @@ struct osc_extent {
struct osc_page *oe_next_page;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
- * oe_start is used as keyword for red-black tree. */
+ * oe_start is used as keyword for red-black tree.
+ */
pgoff_t oe_start;
pgoff_t oe_end;
/** maximum ending index of this extent, this is limited by
- * max_pages_per_rpc, lock extent and chunk size. */
+ * max_pages_per_rpc, lock extent and chunk size.
+ */
pgoff_t oe_max_end;
/** waitqueue - for those who want to be notified if this extent's
- * state has changed. */
+ * state has changed.
+ */
wait_queue_head_t oe_waitq;
/** lock covering this extent */
struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
- * this value can be known by outside world. */
+ * this value can be known by outside world.
+ */
int oe_rc;
/** max pages per rpc when this extent was created */
unsigned int oe_mppr;
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 7078cc57d8b9..d4fe507f165f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -122,8 +122,8 @@ static void *osc_key_init(const struct lu_context *ctx,
{
struct osc_thread_info *info;
- info = kmem_cache_alloc(osc_thread_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(osc_thread_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -147,8 +147,8 @@ static void *osc_session_init(const struct lu_context *ctx,
{
struct osc_session *info;
- info = kmem_cache_alloc(osc_session_kmem, GFP_NOFS | __GFP_ZERO);
- if (info == NULL)
+ info = kmem_cache_zalloc(osc_session_kmem, GFP_NOFS);
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -228,7 +228,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
/* Setup OSC OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
- LASSERT(obd != NULL);
+ LASSERT(obd);
rc = osc_setup(obd, cfg);
if (rc) {
osc_device_free(env, d);
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index a4c61463b1c7..ea695c2099ee 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -47,11 +47,13 @@ struct lu_env;
enum async_flags {
ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
- page is added to an rpc */
+ * page is added to an rpc
+ */
ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
- to give the caller a chance to update
- or cancel the size of the io */
+ * to give the caller a chance to update
+ * or cancel the size of the io
+ */
ASYNC_HP = 0x10,
};
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index abd0beb483fe..6bd0a45d8b06 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -73,7 +73,7 @@ static struct osc_page *osc_cl_page_osc(struct cl_page *page)
const struct cl_page_slice *slice;
slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice != NULL);
+ LASSERT(slice);
return cl2osc_page(slice);
}
@@ -135,7 +135,7 @@ static int osc_io_submit(const struct lu_env *env,
/* Top level IO. */
io = page->cp_owner;
- LASSERT(io != NULL);
+ LASSERT(io);
opg = osc_cl_page_osc(page);
oap = &opg->ops_oap;
@@ -266,13 +266,14 @@ static int osc_io_prepare_write(const struct lu_env *env,
* This implements OBD_BRW_CHECK logic from old client.
*/
- if (imp == NULL || imp->imp_invalid)
+ if (!imp || imp->imp_invalid)
result = -EIO;
if (result == 0 && oio->oi_lockless)
/* this page contains `invalid' data, but who cares?
* nobody can access the invalid data.
* in osc_io_commit_write(), we're going to write exact
- * [from, to) bytes of this page to OST. -jay */
+ * [from, to) bytes of this page to OST. -jay
+ */
cl_page_export(env, slice->cpl_page, 1);
return result;
@@ -349,14 +350,14 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
__u64 start = *(__u64 *)cbdata;
slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice != NULL);
+ LASSERT(slice);
ops = cl2osc_page(slice);
oap = &ops->ops_oap;
if (oap->oap_cmd & OBD_BRW_WRITE &&
!list_empty(&oap->oap_pending_item))
CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
- start, current->comm);
+ start, current->comm);
{
struct page *vmpage = cl_page_vmpage(env, page);
@@ -500,7 +501,7 @@ static void osc_io_setattr_end(const struct lu_env *env,
__u64 size = io->u.ci_setattr.sa_attr.lvb_size;
osc_trunc_check(env, io, oio, size);
- if (oio->oi_trunc != NULL) {
+ if (oio->oi_trunc) {
osc_cache_truncate_end(env, oio, cl2osc(obj));
oio->oi_trunc = NULL;
}
@@ -596,7 +597,8 @@ static int osc_io_fsync_start(const struct lu_env *env,
* send OST_SYNC RPC. This is bad because it causes extents
* to be written osc by osc. However, we usually start
* writeback before CL_FSYNC_ALL so this won't have any real
- * problem. */
+ * problem.
+ */
rc = osc_cache_wait_range(env, osc, start, end);
if (result == 0)
result = rc;
@@ -754,13 +756,12 @@ static void osc_req_attr_set(const struct lu_env *env,
opg = osc_cl_page_osc(apage);
apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
- if (lock == NULL) {
+ if (!lock) {
struct cl_object_header *head;
struct cl_lock *scan;
head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks,
- cll_linkage)
+ list_for_each_entry(scan, &head->coh_locks, cll_linkage)
CL_LOCK_DEBUG(D_ERROR, env, scan,
"no cover page!\n");
CL_PAGE_DEBUG(D_ERROR, env, apage,
@@ -770,10 +771,9 @@ static void osc_req_attr_set(const struct lu_env *env,
}
olck = osc_lock_at(lock);
- LASSERT(olck != NULL);
- LASSERT(ergo(opg->ops_srvlock, olck->ols_lock == NULL));
+ LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
/* check for lockless io. */
- if (olck->ols_lock != NULL) {
+ if (olck->ols_lock) {
oa->o_handle = olck->ols_lock->l_remote_handle;
oa->o_valid |= OBD_MD_FLHANDLE;
}
@@ -803,8 +803,8 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
struct osc_req *or;
int result;
- or = kmem_cache_alloc(osc_req_kmem, GFP_NOFS | __GFP_ZERO);
- if (or != NULL) {
+ or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
+ if (or) {
cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
result = 0;
} else
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 71f2810d18b9..013df9787f3e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -79,7 +79,7 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(handle);
- if (lock != NULL)
+ if (lock)
LDLM_LOCK_PUT(lock);
return lock;
}
@@ -94,42 +94,40 @@ static int osc_lock_invariant(struct osc_lock *ols)
int handle_used = lustre_handle_is_used(&ols->ols_handle);
if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && ols->ols_lock == NULL))
+ ols->ols_locklessable && !ols->ols_lock))
return 1;
/*
* If all the following "ergo"s are true, return 1, otherwise 0
*/
- if (!ergo(olock != NULL, handle_used))
+ if (!ergo(olock, handle_used))
return 0;
- if (!ergo(olock != NULL,
- olock->l_handle.h_cookie == ols->ols_handle.cookie))
+ if (!ergo(olock, olock->l_handle.h_cookie == ols->ols_handle.cookie))
return 0;
if (!ergo(handle_used,
- ergo(lock != NULL && olock != NULL, lock == olock) &&
- ergo(lock == NULL, olock == NULL)))
+ ergo(lock && olock, lock == olock) &&
+ ergo(!lock, !olock)))
return 0;
/*
* Check that ->ols_handle and ->ols_lock are consistent, but
* take into account that they are set at the different time.
*/
if (!ergo(ols->ols_state == OLS_CANCELLED,
- olock == NULL && !handle_used))
+ !olock && !handle_used))
return 0;
/*
* DLM lock is destroyed only after we have seen cancellation
* ast.
*/
- if (!ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
+ ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
return 0;
if (!ergo(ols->ols_state == OLS_GRANTED,
- olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
- ols->ols_hold))
+ olock && olock->l_req_mode == olock->l_granted_mode &&
+ ols->ols_hold))
return 0;
return 1;
}
@@ -149,14 +147,15 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
spin_lock(&osc_ast_guard);
dlmlock = olck->ols_lock;
- if (dlmlock == NULL) {
+ if (!dlmlock) {
spin_unlock(&osc_ast_guard);
return;
}
olck->ols_lock = NULL;
/* wb(); --- for all who checks (ols->ols_lock != NULL) before
- * call to osc_lock_detach() */
+ * call to osc_lock_detach()
+ */
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
spin_unlock(&osc_ast_guard);
@@ -171,7 +170,8 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
/* Must get the value under the lock to avoid possible races. */
old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
/* Update the kms. Need to loop all granted locks.
- * Not a problem for the client */
+ * Not a problem for the client
+ */
attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
cl_object_attr_set(env, obj, attr, CAT_KMS);
@@ -223,8 +223,7 @@ static int osc_lock_unuse(const struct lu_env *env,
/*
* Move lock into OLS_RELEASED state before calling
* osc_cancel_base() so that possible synchronous cancellation
- * (that always happens e.g., for liblustre) sees that lock is
- * released.
+ * sees that lock is released.
*/
ols->ols_state = OLS_RELEASED;
return osc_lock_unhold(ols);
@@ -247,7 +246,7 @@ static void osc_lock_fini(const struct lu_env *env,
* lock is destroyed immediately after upcall.
*/
osc_lock_unhold(ols);
- LASSERT(ols->ols_lock == NULL);
+ LASSERT(!ols->ols_lock);
LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
@@ -292,7 +291,7 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
lock_res_and_lock(dlm_lock);
spin_lock(&osc_ast_guard);
olck = dlm_lock->l_ast_data;
- if (olck != NULL) {
+ if (olck) {
struct cl_lock *lock = olck->ols_cl.cls_lock;
/*
* If osc_lock holds a reference on ldlm lock, return it even
@@ -359,13 +358,13 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
__u64 size;
dlmlock = olck->ols_lock;
- LASSERT(dlmlock != NULL);
/* re-grab LVB from a dlm lock under DLM spin-locks. */
*lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
size = lvb->lvb_size;
/* Extend KMS up to the end of this lock and no further
- * A lock on [x,y] means a KMS of up to y + 1 bytes! */
+ * A lock on [x,y] means a KMS of up to y + 1 bytes!
+ */
if (size > dlmlock->l_policy_data.l_extent.end)
size = dlmlock->l_policy_data.l_extent.end + 1;
if (size >= oinfo->loi_kms) {
@@ -429,7 +428,8 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
* to take a semaphore on a parent lock. This is safe, because
* spin-locks are needed to protect consistency of
* dlmlock->l_*_mode and LVB, and we have finished processing
- * them. */
+ * them.
+ */
unlock_res_and_lock(dlmlock);
cl_lock_modify(env, lock, descr);
cl_lock_signal(env, lock);
@@ -444,12 +444,12 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
struct ldlm_lock *dlmlock;
dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
- LASSERT(dlmlock != NULL);
+ LASSERT(dlmlock);
lock_res_and_lock(dlmlock);
spin_lock(&osc_ast_guard);
LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(olck->ols_lock == NULL);
+ LASSERT(!olck->ols_lock);
olck->ols_lock = dlmlock;
spin_unlock(&osc_ast_guard);
@@ -470,7 +470,8 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
olck->ols_hold = 1;
/* lock reference taken by ldlm_handle2lock_long() is owned by
- * osc_lock and released in osc_lock_detach() */
+ * osc_lock and released in osc_lock_detach()
+ */
lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
olck->ols_has_ref = 1;
}
@@ -508,10 +509,10 @@ static int osc_lock_upcall(void *cookie, int errcode)
struct ldlm_lock *dlmlock;
dlmlock = ldlm_handle2lock(&olck->ols_handle);
- if (dlmlock != NULL) {
+ if (dlmlock) {
lock_res_and_lock(dlmlock);
spin_lock(&osc_ast_guard);
- LASSERT(olck->ols_lock == NULL);
+ LASSERT(!olck->ols_lock);
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
spin_unlock(&osc_ast_guard);
@@ -548,7 +549,8 @@ static int osc_lock_upcall(void *cookie, int errcode)
/* For AGL case, the RPC sponsor may exits the cl_lock
* processing without wait() called before related OSC
* lock upcall(). So update the lock status according
- * to the enqueue result inside AGL upcall(). */
+ * to the enqueue result inside AGL upcall().
+ */
if (olck->ols_agl) {
lock->cll_flags |= CLF_FROM_UPCALL;
cl_wait_try(env, lock);
@@ -571,7 +573,8 @@ static int osc_lock_upcall(void *cookie, int errcode)
lu_ref_del(&lock->cll_reference, "upcall", lock);
/* This maybe the last reference, so must be called after
- * cl_lock_mutex_put(). */
+ * cl_lock_mutex_put().
+ */
cl_lock_put(env, lock);
cl_env_nested_put(&nest, env);
@@ -634,7 +637,7 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
cancel = 0;
olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
+ if (olck) {
lock = olck->ols_cl.cls_lock;
cl_lock_mutex_get(env, lock);
LINVRNT(osc_lock_invariant(olck));
@@ -786,17 +789,17 @@ static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
+ if (olck) {
lock = olck->ols_cl.cls_lock;
cl_lock_mutex_get(env, lock);
/*
* ldlm_handle_cp_callback() copied LVB from request
* to lock->l_lvb_data, store it in osc_lock.
*/
- LASSERT(dlmlock->l_lvb_data != NULL);
+ LASSERT(dlmlock->l_lvb_data);
lock_res_and_lock(dlmlock);
olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (olck->ols_lock == NULL) {
+ if (!olck->ols_lock) {
/*
* upcall (osc_lock_upcall()) hasn't yet been
* called. Do nothing now, upcall will bind
@@ -850,14 +853,15 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
* environment.
*/
olck = osc_ast_data_get(dlmlock);
- if (olck != NULL) {
+ if (olck) {
lock = olck->ols_cl.cls_lock;
/* Do not grab the mutex of cl_lock for glimpse.
* See LU-1274 for details.
* BTW, it's okay for cl_lock to be cancelled during
* this period because server can handle this race.
* See ldlm_server_glimpse_ast() for details.
- * cl_lock_mutex_get(env, lock); */
+ * cl_lock_mutex_get(env, lock);
+ */
cap = &req->rq_pill;
req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
@@ -1017,7 +1021,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
/* make it enqueue anyway for glimpse lock, because we actually
- * don't need to cancel any conflicting locks. */
+ * don't need to cancel any conflicting locks.
+ */
if (olck->ols_glimpse)
return 0;
@@ -1051,7 +1056,8 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
* imagine that client has PR lock on [0, 1000], and thread T0
* is doing lockless IO in [500, 1500] region. Concurrent
* thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale. */
+ * wrong, because these data are possibly stale.
+ */
if (!lockless && osc_lock_compatible(olck, scan_ols))
continue;
@@ -1074,7 +1080,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
} else {
CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
lock, conflict);
- LASSERT(lock->cll_conflict == NULL);
+ LASSERT(!lock->cll_conflict);
lu_ref_add(&conflict->cll_reference, "cancel-wait",
lock);
lock->cll_conflict = conflict;
@@ -1111,7 +1117,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
"Impossible state: %d\n", ols->ols_state);
LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
+ "lock = %p, ols = %p\n", lock, ols);
result = osc_lock_enqueue_wait(env, ols);
if (result == 0) {
@@ -1123,7 +1129,8 @@ static int osc_lock_enqueue(const struct lu_env *env,
struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
/* lock will be passed as upcall cookie,
- * hold ref to prevent to be released. */
+ * hold ref to prevent to be released.
+ */
cl_lock_hold_add(env, lock, "upcall", lock);
/* a user for lock also */
cl_lock_user_add(env, lock);
@@ -1137,12 +1144,12 @@ static int osc_lock_enqueue(const struct lu_env *env,
ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
osc_lock_build_policy(env, lock, policy);
result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
+ &ols->ols_flags, policy,
+ &ols->ols_lvb,
+ obj->oo_oinfo->loi_kms_valid,
+ osc_lock_upcall,
+ ols, einfo, &ols->ols_handle,
+ PTLRPCD_SET, 1, ols->ols_agl);
if (result != 0) {
cl_lock_user_del(env, lock);
cl_lock_unhold(env, lock, "upcall", lock);
@@ -1174,7 +1181,8 @@ static int osc_lock_wait(const struct lu_env *env,
} else if (olck->ols_agl) {
if (lock->cll_flags & CLF_FROM_UPCALL)
/* It is from enqueue RPC reply upcall for
- * updating state. Do not re-enqueue. */
+ * updating state. Do not re-enqueue.
+ */
return -ENAVAIL;
olck->ols_state = OLS_NEW;
} else {
@@ -1197,7 +1205,7 @@ static int osc_lock_wait(const struct lu_env *env,
}
LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
- lock->cll_error == 0, olck->ols_lock != NULL));
+ lock->cll_error == 0, olck->ols_lock));
return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
}
@@ -1235,7 +1243,8 @@ static int osc_lock_use(const struct lu_env *env,
LASSERT(lock->cll_state == CLS_INTRANSIT);
LASSERT(lock->cll_users > 0);
/* set a flag for osc_dlm_blocking_ast0() to signal the
- * lock.*/
+ * lock.
+ */
olck->ols_ast_wait = 1;
rc = CLO_WAIT;
}
@@ -1257,11 +1266,12 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
if (descr->cld_mode >= CLM_WRITE) {
result = osc_cache_writeback_range(env, obj,
- descr->cld_start, descr->cld_end,
- 1, discard);
+ descr->cld_start,
+ descr->cld_end,
+ 1, discard);
LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
+ "lock %p: %d pages were %s.\n", lock, result,
+ discard ? "discarded" : "written");
if (result > 0)
result = 0;
}
@@ -1306,7 +1316,7 @@ static void osc_lock_cancel(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(lock));
LINVRNT(osc_lock_invariant(olck));
- if (dlmlock != NULL) {
+ if (dlmlock) {
int do_cancel;
discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
@@ -1318,7 +1328,8 @@ static void osc_lock_cancel(const struct lu_env *env,
/* Now that we're the only user of dlm read/write reference,
* mostly the ->l_readers + ->l_writers should be zero.
* However, there is a corner case.
- * See bug 18829 for details.*/
+ * See bug 18829 for details.
+ */
do_cancel = (dlmlock->l_readers == 0 &&
dlmlock->l_writers == 0);
dlmlock->l_flags |= LDLM_FL_CBPENDING;
@@ -1382,7 +1393,7 @@ static void osc_lock_state(const struct lu_env *env,
if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
struct osc_io *oio = osc_env_io(env);
- LASSERT(lock->ols_owner == NULL);
+ LASSERT(!lock->ols_owner);
lock->ols_owner = oio;
} else if (state != CLS_HELD)
lock->ols_owner = NULL;
@@ -1517,7 +1528,8 @@ static void osc_lock_lockless_state(const struct lu_env *env,
lock->ols_owner = oio;
/* set the io to be lockless if this lock is for io's
- * host object */
+ * host object
+ */
if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
oio->oi_lockless = 1;
}
@@ -1555,8 +1567,8 @@ int osc_lock_init(const struct lu_env *env,
struct osc_lock *clk;
int result;
- clk = kmem_cache_alloc(osc_lock_kmem, GFP_NOFS | __GFP_ZERO);
- if (clk != NULL) {
+ clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
+ if (clk) {
__u32 enqflags = lock->cll_descr.cld_enq_flags;
osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
@@ -1578,8 +1590,8 @@ int osc_lock_init(const struct lu_env *env,
if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
- lock, clk, clk->ols_flags);
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
+ lock, clk, clk->ols_flags);
result = 0;
} else
@@ -1599,9 +1611,9 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
* doesn't matter because in the worst case we don't cancel a lock
* which we actually can, that's no harm.
*/
- if (olock != NULL &&
+ if (olock &&
atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
+ &olock->ols_pageref) != _PAGEREF_MAGIC) {
atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
rc = 1;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index fdd6219aacf6..9d474fcdd9a7 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -113,7 +113,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
LASSERT(list_empty(&osc->oo_write_item));
LASSERT(list_empty(&osc->oo_read_item));
- LASSERT(osc->oo_root.rb_node == NULL);
+ LASSERT(!osc->oo_root.rb_node);
LASSERT(list_empty(&osc->oo_hp_exts));
LASSERT(list_empty(&osc->oo_urgent_exts));
LASSERT(list_empty(&osc->oo_rpc_exts));
@@ -255,8 +255,8 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
struct osc_object *osc;
struct lu_object *obj;
- osc = kmem_cache_alloc(osc_object_kmem, GFP_NOFS | __GFP_ZERO);
- if (osc != NULL) {
+ osc = kmem_cache_zalloc(osc_object_kmem, GFP_NOFS);
+ if (osc) {
obj = osc2lu(osc);
lu_object_init(obj, NULL, dev);
osc->oo_cl.co_ops = &osc_ops;
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2439d804fe75..ce9ddd515f64 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -51,111 +51,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
* @{
*/
-/*
- * Comment out osc_page_protected because it may sleep inside the
- * the client_obd_list_lock.
- * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
- * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
- * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
- */
-#if 0
-static int osc_page_is_dlocked(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int pending, int unref)
-{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- ldlm_policy_data_t *policy;
- ldlm_mode_t dlmmode;
- __u64 flags;
-
- might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
-
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
-
- dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
- osc_lock_build_res(env, obj, resname);
- osc_index2policy(policy, page->cp_obj, page->cp_index, page->cp_index);
- return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- dlmmode, &flags, NULL, lockh, unref);
-}
-
-/**
- * Checks an invariant that a page in the cache is covered by a lock, as
- * needed.
- */
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- struct cl_object_header *hdr;
- struct cl_lock *scan;
- struct cl_page *page;
- struct cl_lock_descr *descr;
- int result;
-
- LINVRNT(!opg->ops_temp);
-
- page = opg->ops_cl.cpl_page;
- if (page->cp_owner != NULL &&
- cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
- /*
- * If IO is done without locks (liblustre, or lloop), lock is
- * not required.
- */
- result = 1;
- else
- /* otherwise check for a DLM lock */
- result = osc_page_is_dlocked(env, opg, mode, 1, unref);
- if (result == 0) {
- /* maybe this page is a part of a lockless io? */
- hdr = cl_object_header(opg->ops_cl.cpl_obj);
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_mode = mode;
- descr->cld_start = page->cp_index;
- descr->cld_end = page->cp_index;
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- /*
- * Lock-less sub-lock has to be either in HELD state
- * (when io is actively going on), or in CACHED state,
- * when top-lock is being unlocked:
- * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
- */
- if ((scan->cll_state == CLS_HELD ||
- scan->cll_state == CLS_CACHED) &&
- cl_lock_ext_match(&scan->cll_descr, descr)) {
- struct osc_lock *olck;
-
- olck = osc_lock_at(scan);
- result = osc_lock_is_lockless(olck);
- break;
- }
- }
- spin_unlock(&hdr->coh_lock_guard);
- }
- return result;
-}
-#else
static int osc_page_protected(const struct lu_env *env,
const struct osc_page *opg,
enum cl_lock_mode mode, int unref)
{
return 1;
}
-#endif
/*****************************************************************************
*
@@ -168,7 +69,7 @@ static void osc_page_fini(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
CDEBUG(D_TRACE, "%p\n", opg);
- LASSERT(opg->ops_lock == NULL);
+ LASSERT(!opg->ops_lock);
}
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
@@ -204,7 +105,8 @@ static void osc_page_transfer_add(const struct lu_env *env,
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
/* ops_lru and ops_inflight share the same field, so take it from LRU
- * first and then use it as inflight. */
+ * first and then use it as inflight.
+ */
osc_lru_del(osc_cli(obj), opg, false);
spin_lock(&obj->oo_seatbelt);
@@ -232,9 +134,10 @@ static int osc_page_cache_add(const struct lu_env *env,
/* for sync write, kernel will wait for this page to be flushed before
* osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages. */
+ * for mkwrite(), it's known there is no further pages.
+ */
if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
- if (oio->oi_active != NULL) {
+ if (oio->oi_active) {
osc_extent_release(env, oio->oi_active);
oio->oi_active = NULL;
}
@@ -258,7 +161,7 @@ static int osc_page_addref_lock(const struct lu_env *env,
struct osc_lock *olock;
int rc;
- LASSERT(opg->ops_lock == NULL);
+ LASSERT(!opg->ops_lock);
olock = osc_lock_at(lock);
if (atomic_inc_return(&olock->ols_pageref) <= 0) {
@@ -278,7 +181,7 @@ static void osc_page_putref_lock(const struct lu_env *env,
struct cl_lock *lock = opg->ops_lock;
struct osc_lock *olock;
- LASSERT(lock != NULL);
+ LASSERT(lock);
olock = osc_lock_at(lock);
atomic_dec(&olock->ols_pageref);
@@ -296,7 +199,7 @@ static int osc_page_is_under_lock(const struct lu_env *env,
lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
NULL, 1, 0);
- if (lock != NULL) {
+ if (lock) {
if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
result = -EBUSY;
cl_lock_put(env, lock);
@@ -424,7 +327,7 @@ static void osc_page_delete(const struct lu_env *env,
}
spin_lock(&obj->oo_seatbelt);
- if (opg->ops_submitter != NULL) {
+ if (opg->ops_submitter) {
LASSERT(!list_empty(&opg->ops_inflight));
list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
@@ -434,8 +337,8 @@ static void osc_page_delete(const struct lu_env *env,
osc_lru_del(osc_cli(obj), opg, true);
}
-void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
- int from, int to)
+static void osc_page_clip(const struct lu_env *env,
+ const struct cl_page_slice *slice, int from, int to)
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_async_page *oap = &opg->ops_oap;
@@ -458,7 +361,8 @@ static int osc_page_cancel(const struct lu_env *env,
LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
/* Check if the transferring against this page
- * is completed, or not even queued. */
+ * is completed, or not even queued.
+ */
if (opg->ops_transfer_pinned)
/* FIXME: may not be interrupted.. */
rc = osc_cancel_async_page(env, opg);
@@ -499,30 +403,30 @@ static const struct cl_page_operations osc_page_ops = {
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, struct page *vmpage)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
int result;
opg->ops_from = 0;
- opg->ops_to = PAGE_CACHE_SIZE;
+ opg->ops_to = PAGE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
+ cl_offset(obj, page->cp_index));
if (result == 0) {
struct osc_io *oio = osc_env_io(env);
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj,
- &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
}
/*
* Cannot assert osc_page_protected() here as read-ahead
* creates temporary pages outside of a lock.
*/
/* ops_inflight and ops_lru are the same field, but it doesn't
- * hurt to initialize it twice :-) */
+ * hurt to initialize it twice :-)
+ */
INIT_LIST_HEAD(&opg->ops_inflight);
INIT_LIST_HEAD(&opg->ops_lru);
@@ -557,7 +461,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE)) {
+ capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
oap->oap_cmd |= OBD_BRW_NOQUOTA;
}
@@ -581,16 +485,18 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and.. */
-static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
+ * number of pages to avoid running out of LRU budget, and..
+ */
+static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
+static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
* step to maintain fairness among OSCs.
*
- * Return how many LRU pages should be freed. */
+ * Return how many LRU pages should be freed.
+ */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
@@ -602,7 +508,8 @@ static int osc_cache_too_much(struct client_obd *cli)
return min(pages, lru_shrink_max);
/* if it's going to run out LRU slots, we should free some, but not
- * too much to maintain fairness among OSCs. */
+ * too much to maintain fairness among OSCs.
+ */
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
unsigned long tmp;
@@ -630,7 +537,8 @@ static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
/* free LRU page only if nobody is using it.
* This check is necessary to avoid freeing the pages
* having already been removed from LRU and pinned
- * for IO. */
+ * for IO.
+ */
if (!cl_page_in_use(page)) {
cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
@@ -655,6 +563,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
struct cl_object *clobj = NULL;
struct cl_page **pvec;
struct osc_page *opg;
+ struct osc_page *temp;
int maxscan = 0;
int count = 0;
int index = 0;
@@ -674,28 +583,26 @@ int osc_lru_shrink(struct client_obd *cli, int target)
client_obd_list_lock(&cli->cl_lru_list_lock);
atomic_inc(&cli->cl_lru_shrinkers);
maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
- while (!list_empty(&cli->cl_lru_list)) {
+ list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
if (--maxscan < 0)
break;
- opg = list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
page = cl_page_top(opg->ops_cl.cpl_page);
if (cl_page_in_use_noref(page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
}
- LASSERT(page->cp_obj != NULL);
+ LASSERT(page->cp_obj);
if (clobj != page->cp_obj) {
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (clobj != NULL) {
+ if (clobj) {
count -= discard_pagevec(env, io, pvec, index);
index = 0;
@@ -720,11 +627,13 @@ int osc_lru_shrink(struct client_obd *cli, int target)
/* move this page to the end of list as it will be discarded
* soon. The page will be finally removed from LRU list in
- * osc_page_delete(). */
+ * osc_page_delete().
+ */
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
/* it's okay to grab a refcount here w/o holding lock because
- * it has to grab cl_lru_list_lock to delete the page. */
+ * it has to grab cl_lru_list_lock to delete the page.
+ */
cl_page_get(page);
pvec[index++] = page;
if (++count >= target)
@@ -740,7 +649,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
}
client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (clobj != NULL) {
+ if (clobj) {
count -= discard_pagevec(env, io, pvec, index);
cl_io_fini(env, io);
@@ -775,7 +684,8 @@ static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
}
/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache. */
+ * reasons: redirtied or deleted from page cache.
+ */
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
{
if (opg->ops_in_lru) {
@@ -797,7 +707,8 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
* this osc occupies too many LRU pages and kernel is
* stealing one of them.
* cl_lru_shrinkers is to avoid recursive call in case
- * we're already in the context of osc_lru_shrink(). */
+ * we're already in the context of osc_lru_shrink().
+ */
if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
!memory_pressure_get())
osc_lru_shrink(cli, osc_cache_too_much(cli));
@@ -819,22 +730,23 @@ static int osc_lru_reclaim(struct client_obd *cli)
int max_scans;
int rc;
- LASSERT(cache != NULL);
+ LASSERT(cache);
rc = osc_lru_shrink(cli, lru_shrink_min);
if (rc != 0) {
CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
- cli->cl_import->imp_obd->obd_name, rc, cli);
+ cli->cl_import->imp_obd->obd_name, rc, cli);
return rc;
}
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
- * from its own. This should rarely happen. */
+ * from its own. This should rarely happen.
+ */
spin_lock(&cache->ccc_lru_lock);
LASSERT(!list_empty(&cache->ccc_lru));
@@ -844,12 +756,12 @@ static int osc_lru_reclaim(struct client_obd *cli)
max_scans = atomic_read(&cache->ccc_users);
while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
cli = list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
+ cl_lru_osc);
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
- cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ cli->cl_import->imp_obd->obd_name, cli,
+ atomic_read(&cli->cl_lru_in_list),
+ atomic_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (atomic_read(&cli->cl_lru_in_list) > 0) {
@@ -864,7 +776,7 @@ static int osc_lru_reclaim(struct client_obd *cli)
spin_unlock(&cache->ccc_lru_lock);
CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
- cli->cl_import->imp_obd->obd_name, cli, rc);
+ cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
@@ -875,7 +787,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct client_obd *cli = osc_cli(obj);
int rc = 0;
- if (cli->cl_cache == NULL) /* shall not be in LRU */
+ if (!cli->cl_cache) /* shall not be in LRU */
return 0;
LASSERT(atomic_read(cli->cl_lru_left) >= 0);
@@ -892,15 +804,16 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
/* slowest case, all of caching pages are busy, notifying
- * other OSCs that we're lack of LRU slots. */
+ * other OSCs that we're lack of LRU slots.
+ */
atomic_inc(&osc_lru_waiters);
gen = atomic_read(&cli->cl_lru_in_list);
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
- &lwi);
+ atomic_read(cli->cl_lru_left) > 0 ||
+ (atomic_read(&cli->cl_lru_in_list) > 0 &&
+ gen != atomic_read(&cli->cl_lru_in_list)),
+ &lwi);
atomic_dec(&osc_lru_waiters);
if (rc < 0)
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index e70e7961d763..194d8ede40a2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -13,11 +13,6 @@
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA
- *
* GPL HEADER END
*/
/*
@@ -35,8 +30,8 @@ static inline struct osc_quota_info *osc_oqi_alloc(u32 id)
{
struct osc_quota_info *oqi;
- oqi = kmem_cache_alloc(osc_quota_kmem, GFP_NOFS | __GFP_ZERO);
- if (oqi != NULL)
+ oqi = kmem_cache_zalloc(osc_quota_kmem, GFP_NOFS);
+ if (oqi)
oqi->oqi_id = id;
return oqi;
@@ -52,10 +47,12 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if (oqi) {
/* do not try to access oqi here, it could have been
- * freed by osc_quota_setdq() */
+ * freed by osc_quota_setdq()
+ */
/* the slot is busy, the user is about to run out of
- * quota space on this OST */
+ * quota space on this OST
+ */
CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
type == USRQUOTA ? "user" : "grout", qid[type]);
return NO_QUOTA;
@@ -89,12 +86,13 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
oqi = cfs_hash_lookup(cli->cl_quota_hash[type], &qid[type]);
if ((flags & FL_QUOTA_FLAG(type)) != 0) {
/* This ID is getting close to its quota limit, let's
- * switch to sync I/O */
- if (oqi != NULL)
+ * switch to sync I/O
+ */
+ if (oqi)
continue;
oqi = osc_oqi_alloc(qid[type]);
- if (oqi == NULL) {
+ if (!oqi) {
rc = -ENOMEM;
break;
}
@@ -113,8 +111,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
qid[type], rc);
} else {
/* This ID is now off the hook, let's remove it from
- * the hash table */
- if (oqi == NULL)
+ * the hash table
+ */
+ if (!oqi)
continue;
oqi = cfs_hash_del_key(cli->cl_quota_hash[type],
@@ -147,7 +146,7 @@ oqi_keycmp(const void *key, struct hlist_node *hnode)
struct osc_quota_info *oqi;
u32 uid;
- LASSERT(key != NULL);
+ LASSERT(key);
uid = *((u32 *)key);
oqi = hlist_entry(hnode, struct osc_quota_info, oqi_hash);
@@ -218,7 +217,7 @@ int osc_quota_setup(struct obd_device *obd)
CFS_HASH_MAX_THETA,
&quota_hash_ops,
CFS_HASH_DEFAULT);
- if (cli->cl_quota_hash[type] == NULL)
+ if (!cli->cl_quota_hash[type])
break;
}
@@ -252,7 +251,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
OST_QUOTACTL);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -294,7 +293,7 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
OST_QUOTACHECK);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
@@ -302,8 +301,8 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
ptlrpc_request_set_replen(req);
- /* the next poll will find -ENODATA, that means quotacheck is
- * going on */
+ /* the next poll will find -ENODATA, that means quotacheck is going on
+ */
cli->cl_qchk_stat = -ENODATA;
rc = ptlrpc_queue_wait(req);
if (rc)
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 7034f0a942c5..30526ebcad04 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -104,7 +104,6 @@ struct osc_enqueue_args {
static void osc_release_ppga(struct brw_page **ppga, u32 count);
static int brw_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *data, int rc);
-static int osc_cleanup(struct obd_device *obd);
/* Pack OSC object metadata for disk storage (LE byte order). */
static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
@@ -113,18 +112,18 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
int lmm_size;
lmm_size = sizeof(**lmmp);
- if (lmmp == NULL)
+ if (!lmmp)
return lmm_size;
- if (*lmmp != NULL && lsm == NULL) {
+ if (*lmmp && !lsm) {
kfree(*lmmp);
*lmmp = NULL;
return 0;
- } else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
+ } else if (unlikely(lsm && ostid_id(&lsm->lsm_oi) == 0)) {
return -EBADF;
}
- if (*lmmp == NULL) {
+ if (!*lmmp) {
*lmmp = kzalloc(lmm_size, GFP_NOFS);
if (!*lmmp)
return -ENOMEM;
@@ -143,7 +142,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
int lsm_size;
struct obd_import *imp = class_exp2cliimp(exp);
- if (lmm != NULL) {
+ if (lmm) {
if (lmm_bytes < sizeof(*lmm)) {
CERROR("%s: lov_mds_md too small: %d, need %d\n",
exp->exp_obd->obd_name, lmm_bytes,
@@ -160,23 +159,23 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
}
lsm_size = lov_stripe_md_size(1);
- if (lsmp == NULL)
+ if (!lsmp)
return lsm_size;
- if (*lsmp != NULL && lmm == NULL) {
+ if (*lsmp && !lmm) {
kfree((*lsmp)->lsm_oinfo[0]);
kfree(*lsmp);
*lsmp = NULL;
return 0;
}
- if (*lsmp == NULL) {
+ if (!*lsmp) {
*lsmp = kzalloc(lsm_size, GFP_NOFS);
- if (unlikely(*lsmp == NULL))
+ if (unlikely(!*lsmp))
return -ENOMEM;
(*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
GFP_NOFS);
- if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
+ if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
kfree(*lsmp);
return -ENOMEM;
}
@@ -185,11 +184,11 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
return -EBADF;
}
- if (lmm != NULL)
+ if (lmm)
/* XXX zero *lsmp? */
ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
- if (imp != NULL &&
+ if (imp &&
(imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
(*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
else
@@ -246,7 +245,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
@@ -276,7 +275,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
@@ -294,7 +293,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out;
}
@@ -321,7 +320,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
@@ -339,7 +338,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out;
}
@@ -362,7 +361,7 @@ static int osc_setattr_interpret(const struct lu_env *env,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out;
}
@@ -384,7 +383,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
@@ -451,7 +450,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
}
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -482,7 +481,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
goto out_req;
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EPROTO;
goto out_req;
}
@@ -500,7 +499,7 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
lsm->lsm_oi = oa->o_oi;
*ea = lsm;
- if (oti != NULL) {
+ if (oti) {
oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
if (oa->o_valid & OBD_MD_FLCOOKIE) {
@@ -530,7 +529,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
@@ -573,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
CERROR("can't unpack ost_body\n");
rc = -EPROTO;
goto out;
@@ -595,7 +594,7 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
@@ -629,10 +628,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
/* Find and cancel locally locks matched by @mode in the resource found by
* @objid. Found locks are added into @cancel list. Returns the amount of
- * locks added to @cancels list. */
+ * locks added to @cancels list.
+ */
static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
struct list_head *cancels,
- ldlm_mode_t mode, __u64 lock_flags)
+ enum ldlm_mode mode, __u64 lock_flags)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
struct ldlm_res_id res_id;
@@ -644,13 +644,14 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
*
* This distinguishes from a case when ELC is not supported originally,
* when we still want to cancel locks in advance and just cancel them
- * locally, without sending any RPC. */
+ * locally, without sending any RPC.
+ */
if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
return 0;
ostid_build_res_name(&oa->o_oi, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (res == NULL)
+ if (!res)
return 0;
LDLM_RESOURCE_ADDREF(res);
@@ -723,7 +724,8 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
* If the client dies, or the OST is down when the object should be destroyed,
* the records are not cancelled, and when the OST reconnects to the MDS next,
* it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions. */
+ * cookies to the MDS after committing destroy transactions.
+ */
static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
struct obdo *oa, struct lov_stripe_md *ea,
struct obd_trans_info *oti, struct obd_export *md_export)
@@ -743,7 +745,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
LDLM_FL_DISCARD_DATA);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
- if (req == NULL) {
+ if (!req) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
@@ -758,7 +760,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
- if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
+ if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
oa->o_lcookie = *oti->oti_logcookies;
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
@@ -769,7 +771,8 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
/* If osc_destroy is for destroying the unlink orphan,
* sent from MDT to OST, which should not be blocked here,
* because the process might be triggered by ptlrpcd, and
- * it is not good to block ptlrpcd thread (b=16006)*/
+ * it is not good to block ptlrpcd thread (b=16006
+ **/
if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
@@ -810,7 +813,8 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
- * this CERROR() unless we add in a small fudge factor (+1). */
+ * this CERROR() unless we add in a small fudge factor (+1).
+ */
CERROR("dirty %d - %d > system dirty_max %d\n",
atomic_read(&obd_dirty_pages),
atomic_read(&obd_dirty_transit_pages),
@@ -822,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_undirty = 0;
} else {
long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_CACHE_SHIFT)*
+ PAGE_SHIFT)*
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
}
@@ -839,7 +843,7 @@ void osc_update_next_shrink(struct client_obd *cli)
{
cli->cl_next_shrink_grant =
cfs_time_shift(cli->cl_grant_shrink_interval);
- CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
+ CDEBUG(D_CACHE, "next time %ld to shrink grant\n",
cli->cl_next_shrink_grant);
}
@@ -900,15 +904,16 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
/* Shrink the current grant, either from some large amount to enough for a
* full set of in-flight RPCs, or if we have already shrunk to that limit
* then to enough for a single RPC. This avoids keeping more grant than
- * needed, and avoids shrinking the grant piecemeal. */
+ * needed, and avoids shrinking the grant piecemeal.
+ */
static int osc_shrink_grant(struct client_obd *cli)
{
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
- (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
+ (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
client_obd_list_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
@@ -922,9 +927,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
client_obd_list_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
- * impact block allocation and long-term performance. */
- if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
- target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ * impact block allocation and long-term performance.
+ */
+ if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
+ target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
@@ -970,8 +976,9 @@ static int osc_should_shrink_grant(struct client_obd *client)
if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
/* Get the current RPC size directly, instead of going via:
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
- * Keep comment here so that it can be found by searching. */
- int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
+ * Keep comment here so that it can be found by searching.
+ */
+ int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > brw_size)
@@ -986,8 +993,7 @@ static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
{
struct client_obd *client;
- list_for_each_entry(client, &item->ti_obd_list,
- cl_grant_shrink_list) {
+ list_for_each_entry(client, &item->ti_obd_list, cl_grant_shrink_list) {
if (osc_should_shrink_grant(client))
osc_shrink_grant(client);
}
@@ -1004,10 +1010,10 @@ static int osc_add_shrink_grant(struct client_obd *client)
&client->cl_grant_shrink_list);
if (rc) {
CERROR("add grant client %s error %d\n",
- client->cl_import->imp_obd->obd_name, rc);
+ client->cl_import->imp_obd->obd_name, rc);
return rc;
}
- CDEBUG(D_CACHE, "add grant client %s \n",
+ CDEBUG(D_CACHE, "add grant client %s\n",
client->cl_import->imp_obd->obd_name);
osc_update_next_shrink(client);
return 0;
@@ -1040,12 +1046,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
ocd->ocd_grant, cli->cl_dirty);
/* workaround for servers which do not have the patch from
- * LU-2679 */
+ * LU-2679
+ */
cli->cl_avail_grant = ocd->ocd_grant;
}
/* determine the appropriate chunk size used by osc_extent. */
- cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
+ cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@@ -1060,7 +1067,8 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
/* We assume that the reason this OSC got a short read is because it read
* beyond the end of a stripe file; i.e. lustre is reading a sparse file
* via the LOV, and it _knows_ it's reading inside the file, it's just that
- * this stripe never got written at or beyond this stripe offset yet. */
+ * this stripe never got written at or beyond this stripe offset yet.
+ */
static void handle_short_read(int nob_read, u32 page_count,
struct brw_page **pga)
{
@@ -1106,7 +1114,7 @@ static int check_write_rcs(struct ptlrpc_request *req,
remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
sizeof(*remote_rcs) *
niocount);
- if (remote_rcs == NULL) {
+ if (!remote_rcs) {
CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
return -EPROTO;
}
@@ -1118,7 +1126,7 @@ static int check_write_rcs(struct ptlrpc_request *req,
if (remote_rcs[i] != 0) {
CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
- i, remote_rcs[i], req);
+ i, remote_rcs[i], req);
return -EPROTO;
}
}
@@ -1139,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
/* warn if we try to combine flags that we don't know to be
- * safe to combine */
+ * safe to combine
+ */
if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
CWARN("Saw flags 0x%x and 0x%x in the same brw, please report this at http://bugs.whamcloud.com/\n",
p1->flag, p2->flag);
@@ -1152,7 +1161,7 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
static u32 osc_checksum_bulk(int nob, u32 pg_count,
struct brw_page **pga, int opc,
- cksum_type_t cksum_type)
+ enum cksum_type cksum_type)
{
__u32 cksum;
int i = 0;
@@ -1174,7 +1183,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
int count = pga[i]->count > nob ? nob : pga[i]->count;
/* corrupt the data before we compute the checksum, to
- * simulate an OST->client data error */
+ * simulate an OST->client data error
+ */
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
unsigned char *ptr = kmap(pga[i]->pg);
@@ -1184,7 +1194,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~CFS_PAGE_MASK,
count);
CDEBUG(D_PAGE,
"page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
@@ -1205,7 +1215,8 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
cfs_crypto_hash_final(hdesc, NULL, NULL);
/* For sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo */
+ * of corrupting the data so it is still correct on a redo
+ */
if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
cksum++;
@@ -1244,7 +1255,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
opc = OST_READ;
req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
}
- if (req == NULL)
+ if (!req)
return -ENOMEM;
for (niocount = i = 1; i < page_count; i++) {
@@ -1266,7 +1277,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
/* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
- * retry logic */
+ * retry logic
+ */
req->rq_no_retry_einprogress = 1;
desc = ptlrpc_prep_bulk_imp(req, page_count,
@@ -1274,7 +1286,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
OST_BULK_PORTAL);
- if (desc == NULL) {
+ if (!desc) {
rc = -ENOMEM;
goto out;
}
@@ -1283,7 +1295,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
body = req_capsule_client_get(pill, &RMF_OST_BODY);
ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
- LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
+ LASSERT(body && ioobj && niobuf);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
@@ -1293,7 +1305,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
* that might be send for this request. The actual number is decided
* when the RPC is finally sent in ptlrpc_register_bulk(). It sends
* "max - 1" for old client compatibility sending "0", and also so the
- * the actual maximum is a power-of-two number, not one less. LU-1431 */
+ * the actual maximum is a power-of-two number, not one less. LU-1431
+ */
ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
LASSERT(page_count > 0);
pg_prev = pga[0];
@@ -1304,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
LASSERTF(page_count == 1 ||
- (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
+ (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
ergo(i > 0 && i < page_count - 1,
- poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
+ poff == 0 && pg->count == PAGE_SIZE) &&
ergo(i == page_count - 1, poff == 0)),
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
@@ -1355,8 +1368,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
if (cli->cl_checksum &&
!sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
/* store cl_cksum_type in a local variable since
- * it can be changed via lprocfs */
- cksum_type_t cksum_type = cli->cl_cksum_type;
+ * it can be changed via lprocfs
+ */
+ enum cksum_type cksum_type = cli->cl_cksum_type;
if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
oa->o_flags &= OBD_FL_LOCAL_MASK;
@@ -1375,7 +1389,8 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
oa->o_flags |= cksum_type_pack(cksum_type);
} else {
/* clear out the checksum flag, in case this is a
- * resend but cl_checksum is no longer set. b=11238 */
+ * resend but cl_checksum is no longer set. b=11238
+ */
oa->o_valid &= ~OBD_MD_FLCKSUM;
}
oa->o_cksum = body->oa.o_cksum;
@@ -1415,11 +1430,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
__u32 client_cksum, __u32 server_cksum, int nob,
u32 page_count, struct brw_page **pga,
- cksum_type_t client_cksum_type)
+ enum cksum_type client_cksum_type)
{
__u32 new_cksum;
char *msg;
- cksum_type_t cksum_type;
+ enum cksum_type cksum_type;
if (server_cksum == client_cksum) {
CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
@@ -1472,9 +1487,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
return rc;
}
- LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
+ LASSERTF(req->rq_repmsg, "rc = %d\n", rc);
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
- if (body == NULL) {
+ if (!body) {
DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
return -EPROTO;
}
@@ -1538,7 +1553,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
if (rc != req->rq_bulk->bd_nob_transferred) {
CERROR("Unexpected rc %d (%d transferred)\n",
- rc, req->rq_bulk->bd_nob_transferred);
+ rc, req->rq_bulk->bd_nob_transferred);
return -EPROTO;
}
@@ -1550,7 +1565,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
__u32 server_cksum = body->oa.o_cksum;
char *via = "";
char *router = "";
- cksum_type_t cksum_type;
+ enum cksum_type cksum_type;
cksum_type = cksum_type_unpack(body->oa.o_valid&OBD_MD_FLFLAGS ?
body->oa.o_flags : 0);
@@ -1627,7 +1642,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
return rc;
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request != NULL) {
+ if (oap->oap_request) {
LASSERTF(request == oap->oap_request,
"request %p != oap_request %p\n",
request, oap->oap_request);
@@ -1638,12 +1653,14 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
}
}
/* New request takes over pga and oaps from old request.
- * Note that copying a list_head doesn't work, need to move it... */
+ * Note that copying a list_head doesn't work, need to move it...
+ */
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
/* cap resend delay to the current request timeout, this is similar to
- * what ptlrpc does (see after_reply()) */
+ * what ptlrpc does (see after_reply())
+ */
if (aa->aa_resends > new_req->rq_timeout)
new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
else
@@ -1669,7 +1686,8 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
/* XXX: This code will run into problem if we're going to support
* to add a series of BRW RPCs into a self-defined ptlrpc_request_set
* and wait for all of them to be finished. We should inherit request
- * set from old request. */
+ * set from old request.
+ */
ptlrpcd_add_req(new_req);
DEBUG_REQ(D_INFO, new_req, "new request");
@@ -1709,7 +1727,7 @@ static void sort_brw_pages(struct brw_page **array, int num)
static void osc_release_ppga(struct brw_page **ppga, u32 count)
{
- LASSERT(ppga != NULL);
+ LASSERT(ppga);
kfree(ppga);
}
@@ -1725,7 +1743,8 @@ static int brw_interpret(const struct lu_env *env,
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
/* When server return -EINPROGRESS, client should always retry
- * regardless of the number of times the bulk was resent already. */
+ * regardless of the number of times the bulk was resent already.
+ */
if (osc_recoverable_error(rc)) {
if (req->rq_import_generation !=
req->rq_import->imp_generation) {
@@ -1748,7 +1767,7 @@ static int brw_interpret(const struct lu_env *env,
}
list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- if (obj == NULL && rc == 0) {
+ if (!obj && rc == 0) {
obj = osc2cl(ext->oe_obj);
cl_object_get(obj);
}
@@ -1759,7 +1778,7 @@ static int brw_interpret(const struct lu_env *env,
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
- if (obj != NULL) {
+ if (obj) {
struct obdo *oa = aa->aa_oa;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned long valid = 0;
@@ -1798,7 +1817,8 @@ static int brw_interpret(const struct lu_env *env,
client_obd_list_lock(&cli->cl_loi_list_lock);
/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
* is called so we know whether to go to sync BRWs or wait for more
- * RPCs to complete */
+ * RPCs to complete
+ */
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
cli->cl_w_in_flight--;
else
@@ -1857,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count ==
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
}
}
@@ -1871,13 +1891,13 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
- if (pga == NULL) {
+ if (!pga) {
rc = -ENOMEM;
goto out;
}
- oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
- if (oa == NULL) {
+ oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
+ if (!oa) {
rc = -ENOMEM;
goto out;
}
@@ -1886,7 +1906,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
struct cl_page *page = oap2cl_page(oap);
- if (clerq == NULL) {
+ if (!clerq) {
clerq = cl_req_alloc(env, page, crt,
1 /* only 1-object rpcs for now */);
if (IS_ERR(clerq)) {
@@ -1907,7 +1927,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
}
/* always get the data for the obdo for the rpc */
- LASSERT(clerq != NULL);
+ LASSERT(clerq);
crattr->cra_oa = oa;
cl_req_attr_set(env, clerq, crattr, ~0ULL);
if (lock) {
@@ -1923,7 +1943,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
sort_brw_pages(pga, page_count);
rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
- pga, &req, 1, 0);
+ pga, &req, 1, 0);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
goto out;
@@ -1938,7 +1958,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
* we race with setattr (locally or in queue at OST). If OST gets
* later setattr before earlier BRW (as determined by the request xid),
* the OST will not use BRW timestamps. Sadly, there is no obvious
- * way to do this in a single call. bug 10150 */
+ * way to do this in a single call. bug 10150
+ */
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
cl_req_attr_set(env, clerq, crattr,
@@ -1955,23 +1976,24 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
aa->aa_clerq = clerq;
/* queued sync pages can be torn down while the pages
- * were between the pending list and the rpc */
+ * were between the pending list and the rpc
+ */
tmp = NULL;
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
/* only one oap gets a request reference */
- if (tmp == NULL)
+ if (!tmp)
tmp = oap;
if (oap->oap_interrupted && !req->rq_intr) {
CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
- oap, req);
+ oap, req);
ptlrpc_mark_interrupted(req);
}
}
- if (tmp != NULL)
+ if (tmp)
tmp->oap_request = ptlrpc_request_addref(req);
client_obd_list_lock(&cli->cl_loi_list_lock);
- starting_offset >>= PAGE_CACHE_SHIFT;
+ starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@@ -2001,16 +2023,17 @@ out:
kfree(crattr);
if (rc != 0) {
- LASSERT(req == NULL);
+ LASSERT(!req);
if (oa)
kmem_cache_free(obdo_cachep, oa);
kfree(pga);
/* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order */
+ * pending list not follow the dirty order
+ */
while (!list_empty(ext_list)) {
ext = list_entry(ext_list->next, struct osc_extent,
- oe_link);
+ oe_link);
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}
@@ -2026,7 +2049,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
void *data = einfo->ei_cbdata;
int set = 0;
- LASSERT(lock != NULL);
LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
LASSERT(lock->l_resource->lr_type == einfo->ei_type);
LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
@@ -2035,7 +2057,7 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
lock_res_and_lock(lock);
spin_lock(&osc_ast_guard);
- if (lock->l_ast_data == NULL)
+ if (!lock->l_ast_data)
lock->l_ast_data = data;
if (lock->l_ast_data == data)
set = 1;
@@ -2052,7 +2074,7 @@ static int osc_set_data_with_check(struct lustre_handle *lockh,
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
int set = 0;
- if (lock != NULL) {
+ if (lock) {
set = osc_set_lock_data_with_check(lock, einfo);
LDLM_LOCK_PUT(lock);
} else
@@ -2064,7 +2086,8 @@ static int osc_set_data_with_check(struct lustre_handle *lockh,
/* find any ldlm lock of the inode in osc
* return 0 not find
* 1 find one
- * < 0 error */
+ * < 0 error
+ */
static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
ldlm_iterator_t replace, void *data)
{
@@ -2095,7 +2118,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
rep = req_capsule_server_get(&req->rq_pill,
&RMF_DLM_REP);
- LASSERT(rep != NULL);
rep->lock_policy_res1 =
ptlrpc_status_ntoh(rep->lock_policy_res1);
if (rep->lock_policy_res1)
@@ -2127,18 +2149,21 @@ static int osc_enqueue_interpret(const struct lu_env *env,
__u64 *flags = aa->oa_flags;
/* Make a local copy of a lock handle and a mode, because aa->oa_*
- * might be freed anytime after lock upcall has been called. */
+ * might be freed anytime after lock upcall has been called.
+ */
lustre_handle_copy(&handle, aa->oa_lockh);
mode = aa->oa_ei->ei_mode;
/* ldlm_cli_enqueue is holding a reference on the lock, so it must
- * be valid. */
+ * be valid.
+ */
lock = ldlm_handle2lock(&handle);
/* Take an additional reference so that a blocking AST that
* ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
* to arrive after an upcall has been executed by
- * osc_enqueue_fini(). */
+ * osc_enqueue_fini().
+ */
ldlm_lock_addref(&handle, mode);
/* Let CP AST to grant the lock first. */
@@ -2170,7 +2195,7 @@ static int osc_enqueue_interpret(const struct lu_env *env,
*/
ldlm_lock_decref(&handle, mode);
- LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
+ LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
aa->oa_lockh, req, aa);
ldlm_lock_decref(&handle, mode);
LDLM_LOCK_PUT(lock);
@@ -2185,7 +2210,8 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* others may take a considerable amount of time in a case of ost failure; and
* when other sync requests do not get released lock from a client, the client
* is excluded from the cluster -- such scenarious make the life difficult, so
- * release locks just after they are obtained. */
+ * release locks just after they are obtained.
+ */
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
@@ -2198,11 +2224,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
__u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
- ldlm_mode_t mode;
+ enum ldlm_mode mode;
int rc;
/* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother. */
+ * dealing with the page cache is a little smoother.
+ */
policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
policy->l_extent.end |= ~CFS_PAGE_MASK;
@@ -2226,7 +2253,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
*
* At some point we should cancel the read lock instead of making them
* send us a blocking callback, but there are problems with canceling
- * locks out from other users right now, too. */
+ * locks out from other users right now, too.
+ */
mode = einfo->ei_mode;
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
@@ -2238,7 +2266,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
/* For AGL, if enqueue RPC is sent but the lock is not
* granted, then skip to process this strpe.
- * Return -ECANCELED to tell the caller. */
+ * Return -ECANCELED to tell the caller.
+ */
ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
@@ -2247,19 +2276,22 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if (osc_set_lock_data_with_check(matched, einfo)) {
*flags |= LDLM_FL_LVB_READY;
/* addref the lock only if not async requests and PW
- * lock is matched whereas we asked for PR. */
+ * lock is matched whereas we asked for PR.
+ */
if (!rqset && einfo->ei_mode != mode)
ldlm_lock_addref(lockh, LCK_PR);
if (intent) {
/* I would like to be able to ASSERT here that
* rss <= kms, but I can't, for reasons which
- * are explained in lov_enqueue() */
+ * are explained in lov_enqueue()
+ */
}
/* We already have a lock, and it's referenced.
*
* At this point, the cl_lock::cll_state is CLS_QUEUING,
- * AGL upcall may change it to CLS_HELD directly. */
+ * AGL upcall may change it to CLS_HELD directly.
+ */
(*upcall)(cookie, ELDLM_OK);
if (einfo->ei_mode != mode)
@@ -2281,7 +2313,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE_LVB);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
@@ -2341,27 +2373,29 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
- ldlm_mode_t rc;
+ enum ldlm_mode rc;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
return -EIO;
/* Filesystem lock extents are extended to page boundaries so that
- * dealing with the page cache is a little smoother */
+ * dealing with the page cache is a little smoother
+ */
policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
policy->l_extent.end |= ~CFS_PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
* VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock. */
+ * writers can share a single PW lock.
+ */
rc = mode;
if (mode == LCK_PR)
rc |= LCK_PW;
rc = ldlm_lock_match(obd->obd_namespace, lflags,
res_id, type, policy, rc, lockh, unref);
if (rc) {
- if (data != NULL) {
+ if (data) {
if (!osc_set_data_with_check(lockh, data)) {
if (!(lflags & LDLM_FL_TEST_LOCK))
ldlm_lock_decref(lockh, rc);
@@ -2398,8 +2432,9 @@ static int osc_statfs_interpret(const struct lu_env *env,
* due to issues at a higher level (LOV).
* Exit immediately since the caller is
* aware of the problem and takes care
- * of the clean up */
- return rc;
+ * of the clean up
+ */
+ return rc;
if ((rc == -ENOTCONN || rc == -EAGAIN) &&
(aa->aa_oi->oi_flags & OBD_STATFS_NODELAY)) {
@@ -2411,7 +2446,7 @@ static int osc_statfs_interpret(const struct lu_env *env,
goto out;
msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
+ if (!msfs) {
rc = -EPROTO;
goto out;
}
@@ -2436,9 +2471,10 @@ static int osc_statfs_async(struct obd_export *exp,
* extra calls into the filesystem if that isn't necessary (e.g.
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization. */
+ * timestamps are not ideal because they need time synchronization.
+ */
req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
@@ -2474,8 +2510,9 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_import *imp = NULL;
int rc;
- /*Since the request might also come from lprocfs, so we need
- *sync this with client_disconnect_export Bug15684*/
+ /* Since the request might also come from lprocfs, so we need
+ * sync this with client_disconnect_export Bug15684
+ */
down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
@@ -2488,12 +2525,13 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
* extra calls into the filesystem if that isn't necessary (e.g.
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
- * timestamps are not ideal because they need time synchronization. */
+ * timestamps are not ideal because they need time synchronization.
+ */
req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
class_import_put(imp);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
@@ -2516,7 +2554,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
goto out;
msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
- if (msfs == NULL) {
+ if (!msfs) {
rc = -EPROTO;
goto out;
}
@@ -2534,7 +2572,8 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
* the maximum number of OST indices which will fit in the user buffer.
* lmm_magic must be LOV_MAGIC (we only use 1 slot here).
*/
-static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
+static int osc_getstripe(struct lov_stripe_md *lsm,
+ struct lov_user_md __user *lump)
{
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
struct lov_user_md_v3 lum, *lumk;
@@ -2545,7 +2584,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
return -ENODATA;
/* we only need the header part from user space to get lmm_magic and
- * lmm_stripe_count, (the header part is common to v1 and v3) */
+ * lmm_stripe_count, (the header part is common to v1 and v3)
+ */
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size))
return -EFAULT;
@@ -2560,7 +2600,8 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
/* we can use lov_mds_md_size() to compute lum_size
- * because lov_user_md_vX and lov_mds_md_vX have the same size */
+ * because lov_user_md_vX and lov_mds_md_vX have the same size
+ */
if (lum.lmm_stripe_count > 0) {
lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
lumk = kzalloc(lum_size, GFP_NOFS);
@@ -2591,14 +2632,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
}
static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
- void *karg, void *uarg)
+ void *karg, void __user *uarg)
{
struct obd_device *obd = exp->exp_obd;
struct obd_ioctl_data *data = karg;
int err = 0;
if (!try_module_get(THIS_MODULE)) {
- CERROR("Can't get module. Is it alive?");
+ CERROR("%s: cannot get module '%s'\n", obd->obd_name,
+ module_name(THIS_MODULE));
return -EINVAL;
}
switch (cmd) {
@@ -2700,7 +2742,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_OST_GET_INFO_LAST_ID);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -2721,7 +2763,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
goto out;
reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
- if (reply == NULL) {
+ if (!reply) {
rc = -EPROTO;
goto out;
}
@@ -2735,7 +2777,7 @@ out:
struct ldlm_res_id res_id;
ldlm_policy_data_t policy;
struct lustre_handle lockh;
- ldlm_mode_t mode = 0;
+ enum ldlm_mode mode = 0;
struct ptlrpc_request *req;
struct ll_user_fiemap *reply;
char *tmp;
@@ -2748,12 +2790,12 @@ out:
CFS_PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
- fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
+ fm_key->fiemap.fm_start + PAGE_SIZE - 1)
policy.l_extent.end = OBD_OBJECT_EOF;
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_SIZE - 1) & CFS_PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -2774,7 +2816,7 @@ out:
skip_locking:
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_OST_GET_INFO_FIEMAP);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto drop_lock;
}
@@ -2803,7 +2845,7 @@ skip_locking:
goto fini_req;
reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
- if (reply == NULL) {
+ if (!reply) {
rc = -EPROTO;
goto fini_req;
}
@@ -2852,7 +2894,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
if (KEY_IS(KEY_CACHE_SET)) {
struct client_obd *cli = &obd->u.cli;
- LASSERT(cli->cl_cache == NULL); /* only once */
+ LASSERT(!cli->cl_cache); /* only once */
cli->cl_cache = val;
atomic_inc(&cli->cl_cache->ccc_users);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
@@ -2880,16 +2922,17 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
return -EINVAL;
/* We pass all other commands directly to OST. Since nobody calls osc
- methods directly and everybody is supposed to go through LOV, we
- assume lov checked invalid values for us.
- The only recognised values so far are evict_by_nid and mds_conn.
- Even if something bad goes through, we'd get a -EINVAL from OST
- anyway. */
+ * methods directly and everybody is supposed to go through LOV, we
+ * assume lov checked invalid values for us.
+ * The only recognised values so far are evict_by_nid and mds_conn.
+ * Even if something bad goes through, we'd get a -EINVAL from OST
+ * anyway.
+ */
req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
&RQF_OST_SET_GRANT_INFO :
&RQF_OBD_SET_INFO);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -2916,7 +2959,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- oa = kmem_cache_alloc(obdo_cachep, GFP_NOFS | __GFP_ZERO);
+ oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
ptlrpc_req_finished(req);
return -ENOMEM;
@@ -2928,7 +2971,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
ptlrpc_request_set_replen(req);
if (!KEY_IS(KEY_GRANT_SHRINK)) {
- LASSERT(set != NULL);
+ LASSERT(set);
ptlrpc_set_add_req(set, req);
ptlrpc_check_set(NULL, set);
} else {
@@ -2946,7 +2989,7 @@ static int osc_reconnect(const struct lu_env *env,
{
struct client_obd *cli = &obd->u.cli;
- if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
+ if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
long lost_grant;
client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -2987,7 +3030,7 @@ static int osc_disconnect(struct obd_export *exp)
* So the osc should be disconnected from the shrink list, after we
* are sure the import has been destroyed. BUG18662
*/
- if (obd->u.cli.cl_import == NULL)
+ if (!obd->u.cli.cl_import)
osc_del_shrink_grant(&obd->u.cli);
return rc;
}
@@ -3024,7 +3067,8 @@ static int osc_import_event(struct obd_device *obd,
/* Reset grants */
cli = &obd->u.cli;
/* all pages go to failing rpcs due to the invalid
- * import */
+ * import
+ */
osc_io_unplug(env, cli, NULL);
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
@@ -3206,13 +3250,13 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
return 0;
}
-int osc_cleanup(struct obd_device *obd)
+static int osc_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int rc;
/* lru cleanup */
- if (cli->cl_cache != NULL) {
+ if (cli->cl_cache) {
LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
spin_lock(&cli->cl_cache->ccc_lru_lock);
list_del_init(&cli->cl_lru_osc);
@@ -3255,7 +3299,7 @@ static int osc_process_config(struct obd_device *obd, u32 len, void *buf)
return osc_process_config_base(obd, buf);
}
-struct obd_ops osc_obd_ops = {
+static struct obd_ops osc_obd_ops = {
.owner = THIS_MODULE,
.setup = osc_setup,
.precleanup = osc_precleanup,
@@ -3298,7 +3342,8 @@ static int __init osc_init(void)
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
- * symbols from modules.*/
+ * symbols from modules.
+ */
CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
rc = lu_kmem_init(osc_caches);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index efdda09507bf..cf3ac8eee9ee 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -145,7 +145,7 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
- if (desc == NULL)
+ if (!desc)
return NULL;
desc->bd_import_generation = req->rq_import_generation;
@@ -171,15 +171,15 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len, int pin)
{
LASSERT(desc->bd_iov_count < desc->bd_max_iov);
- LASSERT(page != NULL);
+ LASSERT(page);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
- LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
+ LASSERT(pageoffset + len <= PAGE_SIZE);
desc->bd_nob += len;
if (pin)
- page_cache_get(page);
+ get_page(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
@@ -193,7 +193,6 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
{
int i;
- LASSERT(desc != NULL);
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
LASSERT(desc->bd_md_count == 0); /* network hands off */
LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
@@ -207,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
if (unpin) {
for (i = 0; i < desc->bd_iov_count; i++)
- page_cache_release(desc->bd_iov[i].kiov_page);
+ put_page(desc->bd_iov[i].kiov_page);
}
kfree(desc);
@@ -353,6 +352,7 @@ static int unpack_reply(struct ptlrpc_request *req)
* If anything goes wrong just ignore it - same as if it never happened
*/
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
+ __must_hold(&req->rq_lock)
{
struct ptlrpc_request *early_req;
time64_t olddl;
@@ -411,7 +411,7 @@ int ptlrpc_request_cache_init(void)
request_cache = kmem_cache_create("ptlrpc_cache",
sizeof(struct ptlrpc_request),
0, SLAB_HWCACHE_ALIGN, NULL);
- return request_cache == NULL ? -ENOMEM : 0;
+ return !request_cache ? -ENOMEM : 0;
}
void ptlrpc_request_cache_fini(void)
@@ -423,7 +423,7 @@ struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags)
{
struct ptlrpc_request *req;
- req = kmem_cache_alloc(request_cache, flags | __GFP_ZERO);
+ req = kmem_cache_zalloc(request_cache, flags);
return req;
}
@@ -441,8 +441,6 @@ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
struct list_head *l, *tmp;
struct ptlrpc_request *req;
- LASSERT(pool != NULL);
-
spin_lock(&pool->prp_lock);
list_for_each_safe(l, tmp, &pool->prp_req_list) {
req = list_entry(l, struct ptlrpc_request, rq_list);
@@ -559,7 +557,7 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
}
request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
+ rq_list);
list_del_init(&request->rq_list);
spin_unlock(&pool->prp_lock);
@@ -724,10 +722,10 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
request = ptlrpc_prep_req_from_pool(pool);
if (request) {
- LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
+ LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
LASSERT(imp != LP_POISON);
- LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
- imp->imp_client);
+ LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
+ imp->imp_client);
LASSERT(imp->imp_client != LP_POISON);
request->rq_import = class_import_get(imp);
@@ -752,7 +750,7 @@ ptlrpc_request_alloc_internal(struct obd_import *imp,
struct ptlrpc_request *request;
request = __ptlrpc_request_alloc(imp, pool);
- if (request == NULL)
+ if (!request)
return NULL;
req_capsule_init(&request->rq_pill, request, RCL_CLIENT);
@@ -898,8 +896,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
n++;
@@ -911,8 +908,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
list_del_init(&req->rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
@@ -951,10 +947,10 @@ void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
atomic_inc(&set->set_remaining);
req->rq_queued_time = cfs_time_current();
- if (req->rq_reqmsg != NULL)
+ if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
- if (set->set_producer != NULL)
+ if (set->set_producer)
/*
* If the request set has a producer callback, the RPC must be
* sent straight away
@@ -974,7 +970,7 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
struct ptlrpc_request_set *set = pc->pc_set;
int count, i;
- LASSERT(req->rq_set == NULL);
+ LASSERT(!req->rq_set);
LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
spin_lock(&set->set_new_req_lock);
@@ -1015,7 +1011,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
{
int delay = 0;
- LASSERT(status != NULL);
*status = 0;
if (req->rq_ctx_init || req->rq_ctx_fini) {
@@ -1078,7 +1073,7 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
__u32 opc;
int err;
- LASSERT(req->rq_reqmsg != NULL);
+ LASSERT(req->rq_reqmsg);
opc = lustre_msg_get_opc(req->rq_reqmsg);
/*
@@ -1167,7 +1162,7 @@ static int after_reply(struct ptlrpc_request *req)
struct timespec64 work_start;
long timediff;
- LASSERT(obd != NULL);
+ LASSERT(obd);
/* repbuf must be unlinked */
LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
@@ -1247,7 +1242,7 @@ static int after_reply(struct ptlrpc_request *req)
ktime_get_real_ts64(&work_start);
timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
(work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
- if (obd->obd_svc_stats != NULL) {
+ if (obd->obd_svc_stats) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
ptlrpc_lprocfs_rpc_sent(req, timediff);
@@ -1310,7 +1305,7 @@ static int after_reply(struct ptlrpc_request *req)
/* version recovery */
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
- } else if (req->rq_commit_cb != NULL &&
+ } else if (req->rq_commit_cb &&
list_empty(&req->rq_replay_list)) {
/*
* NB: don't call rq_commit_cb if it's already on
@@ -1334,8 +1329,8 @@ static int after_reply(struct ptlrpc_request *req)
struct ptlrpc_request *last;
last = list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
+ struct ptlrpc_request,
+ rq_replay_list);
/*
* Requests with rq_replay stay on the list even if no
* commit is expected.
@@ -1437,7 +1432,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
{
int remaining, rc;
- LASSERT(set->set_producer != NULL);
+ LASSERT(set->set_producer);
remaining = atomic_read(&set->set_remaining);
@@ -1478,8 +1473,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
struct obd_import *imp = req->rq_import;
int unregistered = 0;
int rc = 0;
@@ -1621,8 +1615,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
*/
list_del_init(&req->rq_list);
list_add_tail(&req->rq_list,
- &imp->
- imp_delayed_list);
+ &imp->imp_delayed_list);
spin_unlock(&imp->imp_lock);
continue;
}
@@ -1630,7 +1623,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
if (status != 0) {
req->rq_status = status;
ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
+ RQ_PHASE_INTERPRET);
spin_unlock(&imp->imp_lock);
goto interpret;
}
@@ -1645,7 +1638,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
list_del_init(&req->rq_list);
list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
+ &imp->imp_sending_list);
spin_unlock(&imp->imp_lock);
@@ -1750,7 +1743,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
* process the reply. Similarly if the RPC returned
* an error, and therefore the bulk will never arrive.
*/
- if (req->rq_bulk == NULL || req->rq_status < 0) {
+ if (!req->rq_bulk || req->rq_status < 0) {
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
goto interpret;
}
@@ -1802,7 +1795,7 @@ interpret:
}
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
- CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
+ CDEBUG(req->rq_reqmsg ? D_RPCTRACE : 0,
"Completed RPC pname:cluuid:pid:xid:nid:opc %s:%s:%d:%llu:%s:%d\n",
current_comm(), imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
@@ -1882,8 +1875,8 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
"timed out for sent delay" : "timed out for slow reply"),
(s64)req->rq_sent, (s64)req->rq_real_sent);
- if (imp != NULL && obd_debug_peer_on_timeout)
- LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
+ if (imp && obd_debug_peer_on_timeout)
+ LNetDebugPeer(imp->imp_connection->c_peer);
ptlrpc_unregister_reply(req, async_unlink);
ptlrpc_unregister_bulk(req, async_unlink);
@@ -1891,7 +1884,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
if (obd_dump_on_timeout)
libcfs_debug_dumplog();
- if (imp == NULL) {
+ if (!imp) {
DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
return 1;
}
@@ -1944,13 +1937,10 @@ int ptlrpc_expired_set(void *data)
struct list_head *tmp;
time64_t now = ktime_get_real_seconds();
- LASSERT(set != NULL);
-
/* A timeout expired. See which reqs it applies to... */
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
@@ -2002,13 +1992,11 @@ void ptlrpc_interrupted_set(void *data)
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
- LASSERT(set != NULL);
CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set);
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
req->rq_phase != RQ_PHASE_UNREGISTERING)
@@ -2081,7 +2069,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
else
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
@@ -2155,7 +2143,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
@@ -2174,7 +2162,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
rc = req->rq_status;
}
- if (set->set_interpret != NULL) {
+ if (set->set_interpret) {
int (*interpreter)(struct ptlrpc_request_set *set, void *, int) =
set->set_interpret;
rc = interpreter(set, set->set_arg, rc);
@@ -2206,10 +2194,10 @@ EXPORT_SYMBOL(ptlrpc_set_wait);
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- if (request == NULL)
+ if (!request)
return;
LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
- LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */
+ LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */
LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
@@ -2221,7 +2209,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
* We must take it off the imp_replay_list first. Otherwise, we'll set
* request->rq_reqmsg to NULL while osc_close is dereferencing it.
*/
- if (request->rq_import != NULL) {
+ if (request->rq_import) {
if (!locked)
spin_lock(&request->rq_import->imp_lock);
list_del_init(&request->rq_replay_list);
@@ -2236,20 +2224,20 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
LBUG();
}
- if (request->rq_repbuf != NULL)
+ if (request->rq_repbuf)
sptlrpc_cli_free_repbuf(request);
- if (request->rq_export != NULL) {
+ if (request->rq_export) {
class_export_put(request->rq_export);
request->rq_export = NULL;
}
- if (request->rq_import != NULL) {
+ if (request->rq_import) {
class_import_put(request->rq_import);
request->rq_import = NULL;
}
- if (request->rq_bulk != NULL)
+ if (request->rq_bulk)
ptlrpc_free_bulk_pin(request->rq_bulk);
- if (request->rq_reqbuf != NULL || request->rq_clrbuf != NULL)
+ if (request->rq_reqbuf || request->rq_clrbuf)
sptlrpc_cli_free_reqbuf(request);
if (request->rq_cli_ctx)
@@ -2269,7 +2257,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
- if (request == NULL)
+ if (!request)
return 1;
if (request == LP_POISON ||
@@ -2351,7 +2339,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
* a chance to run reply_in_callback(), and to make sure we've
* unlinked before returning a req to the pool.
*/
- if (request->rq_set != NULL)
+ if (request->rq_set)
wq = &request->rq_set->set_waitq;
else
wq = &request->rq_reply_waitq;
@@ -2386,7 +2374,7 @@ static void ptlrpc_free_request(struct ptlrpc_request *req)
req->rq_replay = 0;
spin_unlock(&req->rq_lock);
- if (req->rq_commit_cb != NULL)
+ if (req->rq_commit_cb)
req->rq_commit_cb(req);
list_del_init(&req->rq_replay_list);
@@ -2427,7 +2415,6 @@ void ptlrpc_free_committed(struct obd_import *imp)
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
bool skip_committed_list = true;
- LASSERT(imp != NULL);
assert_spin_locked(&imp->imp_lock);
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
@@ -2575,8 +2562,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
ptlrpc_request_addref(req);
list_for_each_prev(tmp, &imp->imp_replay_list) {
struct ptlrpc_request *iter =
- list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ list_entry(tmp, struct ptlrpc_request, rq_replay_list);
/*
* We may have duplicate transnos if we create and then
@@ -2611,12 +2597,12 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
struct ptlrpc_request_set *set;
int rc;
- LASSERT(req->rq_set == NULL);
+ LASSERT(!req->rq_set);
LASSERT(!req->rq_receiving_reply);
set = ptlrpc_prep_set();
- if (set == NULL) {
- CERROR("Unable to allocate ptlrpc set.");
+ if (!set) {
+ CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
return -ENOMEM;
}
@@ -2847,12 +2833,9 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set)
{
struct list_head *tmp, *pos;
- LASSERT(set != NULL);
-
list_for_each_safe(pos, tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(pos, struct ptlrpc_request, rq_set_chain);
spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
@@ -2994,7 +2977,6 @@ static int work_interpreter(const struct lu_env *env,
struct ptlrpc_work_async_args *arg = data;
LASSERT(ptlrpcd_check_work(req));
- LASSERT(arg->cb != NULL);
rc = arg->cb(env, arg->cbdata);
@@ -3026,12 +3008,12 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
might_sleep();
- if (cb == NULL)
+ if (!cb)
return ERR_PTR(-EINVAL);
/* copy some code from deprecated fakereq. */
req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (req == NULL) {
+ if (!req) {
CERROR("ptlrpc: run out of memory!\n");
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index da1f0b1ac3e3..a14daff3fca0 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -72,7 +72,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
* returned and may be compared against out object.
*/
/* In the function below, .hs_keycmp resolves to
- * conn_keycmp() */
+ * conn_keycmp()
+ */
/* coverity[overrun-buffer-val] */
conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
if (conn != conn2) {
@@ -172,7 +173,7 @@ conn_keycmp(const void *key, struct hlist_node *hnode)
struct ptlrpc_connection *conn;
const lnet_process_id_t *conn_key;
- LASSERT(key != NULL);
+ LASSERT(key);
conn_key = key;
conn = hlist_entry(hnode, struct ptlrpc_connection, c_hash);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 990156986986..47be21ac9f10 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -71,7 +71,8 @@ void request_out_callback(lnet_event_t *ev)
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
/* Failed send: make it seem like the reply timed out, just
- * like failing sends in client.c does currently... */
+ * like failing sends in client.c does currently...
+ */
req->rq_net_err = 1;
ptlrpc_client_wake_req(req);
@@ -95,7 +96,8 @@ void reply_in_callback(lnet_event_t *ev)
LASSERT(ev->md.start == req->rq_repbuf);
LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
- for adaptive timeouts' early reply. */
+ * for adaptive timeouts' early reply.
+ */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
spin_lock(&req->rq_lock);
@@ -151,7 +153,8 @@ void reply_in_callback(lnet_event_t *ev)
req->rq_reply_off = ev->offset;
req->rq_nob_received = ev->mlength;
/* LNetMDUnlink can't be called under the LNET_LOCK,
- so we must unlink in ptlrpc_unregister_reply */
+ * so we must unlink in ptlrpc_unregister_reply
+ */
DEBUG_REQ(D_INFO, req,
"reply in flags=%x mlen=%u offset=%d replen=%d",
lustre_msg_get_flags(req->rq_reqmsg),
@@ -162,7 +165,8 @@ void reply_in_callback(lnet_event_t *ev)
out_wake:
/* NB don't unlock till after wakeup; req can disappear under us
- * since we don't have our own ref */
+ * since we don't have our own ref
+ */
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
}
@@ -213,7 +217,8 @@ void client_bulk_callback(lnet_event_t *ev)
desc->bd_failure = 1;
/* NB don't unlock till after wakeup; desc can disappear under us
- * otherwise */
+ * otherwise
+ */
if (desc->bd_md_count == 0)
ptlrpc_client_wake_req(desc->bd_req);
@@ -250,7 +255,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
__u64 new_seq;
/* set sequence ID for request and add it to history list,
- * it must be called with hold svcpt::scp_lock */
+ * it must be called with hold svcpt::scp_lock
+ */
new_seq = (sec << REQS_SEC_SHIFT) |
(usec << REQS_USEC_SHIFT) |
@@ -258,7 +264,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
if (new_seq > svcpt->scp_hist_seq) {
/* This handles the initial case of scp_hist_seq == 0 or
- * we just jumped into a new time window */
+ * we just jumped into a new time window
+ */
svcpt->scp_hist_seq = new_seq;
} else {
LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
@@ -266,7 +273,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
* however, it's possible that we used up all bits for
* sequence and jumped into the next usec bucket (future time),
* then we hope there will be less RPCs per bucket at some
- * point, and sequence will catch up again */
+ * point, and sequence will catch up again
+ */
svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
new_seq = svcpt->scp_hist_seq;
}
@@ -302,7 +310,8 @@ void request_in_callback(lnet_event_t *ev)
* request buffer we can use the request object embedded in
* rqbd. Note that if we failed to allocate a request,
* we'd have to re-post the rqbd, which we can't do in this
- * context. */
+ * context.
+ */
req = &rqbd->rqbd_req;
memset(req, 0, sizeof(*req));
} else {
@@ -312,7 +321,7 @@ void request_in_callback(lnet_event_t *ev)
return;
}
req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
- if (req == NULL) {
+ if (!req) {
CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
service->srv_name,
libcfs_id2str(ev->initiator));
@@ -322,7 +331,8 @@ void request_in_callback(lnet_event_t *ev)
/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
* flags are reset and scalars are zero. We only set the message
- * size to non-zero if this was a successful receive. */
+ * size to non-zero if this was a successful receive.
+ */
req->rq_xid = ev->match_bits;
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
@@ -352,7 +362,8 @@ void request_in_callback(lnet_event_t *ev)
svcpt->scp_nrqbds_posted);
/* Normally, don't complain about 0 buffers posted; LNET won't
- * drop incoming reqs since we set the portal lazy */
+ * drop incoming reqs since we set the portal lazy
+ */
if (test_req_buffer_pressure &&
ev->type != LNET_EVENT_UNLINK &&
svcpt->scp_nrqbds_posted == 0)
@@ -369,7 +380,8 @@ void request_in_callback(lnet_event_t *ev)
svcpt->scp_nreqs_incoming++;
/* NB everything can disappear under us once the request
- * has been queued and we unlock, so do the wake now... */
+ * has been queued and we unlock, so do the wake now...
+ */
wake_up(&svcpt->scp_waitq);
spin_unlock(&svcpt->scp_lock);
@@ -390,7 +402,8 @@ void reply_out_callback(lnet_event_t *ev)
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
- * net's ref on 'rs' */
+ * net's ref on 'rs'
+ */
LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
@@ -400,7 +413,8 @@ void reply_out_callback(lnet_event_t *ev)
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
- * until ptlrpc_handle_rs() is done with it */
+ * until ptlrpc_handle_rs() is done with it
+ */
spin_lock(&svcpt->scp_rep_lock);
spin_lock(&rs->rs_lock);
@@ -438,15 +452,12 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
__u32 best_order = 0;
int count = 0;
int rc = -ENOENT;
- int portals_compatibility;
int dist;
__u32 order;
lnet_nid_t dst_nid;
lnet_nid_t src_nid;
- portals_compatibility = LNetCtl(IOC_LIBCFS_PORTALS_COMPATIBILITY, NULL);
-
- peer->pid = LUSTRE_SRV_LNET_PID;
+ peer->pid = LNET_PID_LUSTRE;
/* Choose the matching UUID that's closest */
while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
@@ -466,14 +477,6 @@ int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
best_dist = dist;
best_order = order;
- if (portals_compatibility > 1) {
- /* Strong portals compatibility: Zero the nid's
- * NET, so if I'm reading new config logs, or
- * getting configured by (new) lconf I can
- * still talk to old servers. */
- dst_nid = LNET_MKNID(0, LNET_NIDADDR(dst_nid));
- src_nid = LNET_MKNID(0, LNET_NIDADDR(src_nid));
- }
peer->nid = dst_nid;
*self = src_nid;
rc = 0;
@@ -494,7 +497,8 @@ static void ptlrpc_ni_fini(void)
/* Wait for the event queue to become idle since there may still be
* messages in flight with pending events (i.e. the fire-and-forget
* messages == client requests and "non-difficult" server
- * replies */
+ * replies
+ */
for (retries = 0;; retries++) {
rc = LNetEQFree(ptlrpc_eq_h);
@@ -524,7 +528,7 @@ static lnet_pid_t ptl_get_pid(void)
{
lnet_pid_t pid;
- pid = LUSTRE_SRV_LNET_PID;
+ pid = LNET_PID_LUSTRE;
return pid;
}
@@ -544,11 +548,13 @@ static int ptlrpc_ni_init(void)
}
/* CAVEAT EMPTOR: how we process portals events is _radically_
- * different depending on... */
+ * different depending on...
+ */
/* kernel LNet calls our master callback when there are new event,
* because we are guaranteed to get every event via callback,
* so we just set EQ size to 0 to avoid overhead of serializing
- * enqueue/dequeue operations in LNet. */
+ * enqueue/dequeue operations in LNet.
+ */
rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
if (rc == 0)
return 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index f752c789bda0..cd94fed0ffdf 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -112,7 +112,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
* CLOSED. I would rather refcount the import and free it after
* disconnection like we do with exports. To do that, the client_obd
* will need to save the peer info somewhere other than in the import,
- * though. */
+ * though.
+ */
int ptlrpc_init_import(struct obd_import *imp)
{
spin_lock(&imp->imp_lock);
@@ -139,7 +140,7 @@ static void deuuidify(char *uuid, const char *prefix, char **uuid_start,
return;
if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
- UUID_STR, strlen(UUID_STR)))
+ UUID_STR, strlen(UUID_STR)))
*uuid_len -= strlen(UUID_STR);
}
@@ -282,11 +283,13 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
/* Wait forever until inflight == 0. We really can't do it another
* way because in some cases we need to wait for very long reply
* unlink. We can't do anything before that because there is really
- * no guarantee that some rdma transfer is not in progress right now. */
+ * no guarantee that some rdma transfer is not in progress right now.
+ */
do {
/* Calculate max timeout for waiting on rpcs to error
* out. Use obd_timeout if calculated value is smaller
- * than it. */
+ * than it.
+ */
if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
timeout = ptlrpc_inflight_timeout(imp);
timeout += timeout / 3;
@@ -304,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
/* Wait for all requests to error out and call completion
* callbacks. Cap it at obd_timeout -- these should all
- * have been locally cancelled by ptlrpc_abort_inflight. */
+ * have been locally cancelled by ptlrpc_abort_inflight.
+ */
lwi = LWI_TIMEOUT_INTERVAL(
cfs_timeout_cap(cfs_time_seconds(timeout)),
(timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
@@ -328,28 +332,30 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
* maybe waiting for long reply unlink in
* sluggish nets). Let's check this. If there
* is no inflight and unregistering != 0, this
- * is bug. */
+ * is bug.
+ */
LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
count);
/* Let's save one loop as soon as inflight have
* dropped to zero. No new inflights possible at
- * this point. */
+ * this point.
+ */
rc = 0;
} else {
list_for_each_safe(tmp, n,
- &imp->imp_sending_list) {
+ &imp->imp_sending_list) {
req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on sending list");
}
list_for_each_safe(tmp, n,
- &imp->imp_delayed_list) {
+ &imp->imp_delayed_list) {
req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on delayed list");
}
@@ -427,7 +433,6 @@ EXPORT_SYMBOL(ptlrpc_fail_import);
int ptlrpc_reconnect_import(struct obd_import *imp)
{
-#ifdef ENABLE_PINGER
struct l_wait_info lwi;
int secs = cfs_time_seconds(obd_timeout);
int rc;
@@ -443,33 +448,6 @@ int ptlrpc_reconnect_import(struct obd_import *imp)
CDEBUG(D_HA, "%s: recovery finished s:%s\n", obd2cli_tgt(imp->imp_obd),
ptlrpc_import_state_name(imp->imp_state));
return rc;
-#else
- ptlrpc_set_import_discon(imp, 0);
- /* Force a new connect attempt */
- ptlrpc_invalidate_import(imp);
- /* Do a fresh connect next time by zeroing the handle */
- ptlrpc_disconnect_import(imp, 1);
- /* Wait for all invalidate calls to finish */
- if (atomic_read(&imp->imp_inval_count) > 0) {
- int rc;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
- rc = l_wait_event(imp->imp_recovery_waitq,
- (atomic_read(&imp->imp_inval_count) == 0),
- &lwi);
- if (rc)
- CERROR("Interrupted, inval=%d\n",
- atomic_read(&imp->imp_inval_count));
- }
-
- /* Allow reconnect attempts */
- imp->imp_obd->obd_no_recov = 0;
- /* Remove 'invalid' flag */
- ptlrpc_activate_import(imp);
- /* Attempt a new connect */
- ptlrpc_recover_import(imp, NULL, 0);
- return 0;
-#endif
}
EXPORT_SYMBOL(ptlrpc_reconnect_import);
@@ -501,18 +479,20 @@ static int import_select_connection(struct obd_import *imp)
conn->oic_last_attempt);
/* If we have not tried this connection since
- the last successful attempt, go with this one */
+ * the last successful attempt, go with this one
+ */
if ((conn->oic_last_attempt == 0) ||
cfs_time_beforeq_64(conn->oic_last_attempt,
- imp->imp_last_success_conn)) {
+ imp->imp_last_success_conn)) {
imp_conn = conn;
tried_all = 0;
break;
}
/* If all of the connections have already been tried
- since the last successful connection; just choose the
- least recently used */
+ * since the last successful connection; just choose the
+ * least recently used
+ */
if (!imp_conn)
imp_conn = conn;
else if (cfs_time_before_64(conn->oic_last_attempt,
@@ -529,10 +509,11 @@ static int import_select_connection(struct obd_import *imp)
LASSERT(imp_conn->oic_conn);
/* If we've tried everything, and we're back to the beginning of the
- list, increase our timeout and try again. It will be reset when
- we do finally connect. (FIXME: really we should wait for all network
- state associated with the last connection attempt to drain before
- trying to reconnect on it.) */
+ * list, increase our timeout and try again. It will be reset when
+ * we do finally connect. (FIXME: really we should wait for all network
+ * state associated with the last connection attempt to drain before
+ * trying to reconnect on it.)
+ */
if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;
@@ -553,7 +534,6 @@ static int import_select_connection(struct obd_import *imp)
imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
dlmexp = class_conn2export(&imp->imp_dlm_handle);
- LASSERT(dlmexp != NULL);
ptlrpc_connection_put(dlmexp->exp_connection);
dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
class_export_put(dlmexp);
@@ -590,7 +570,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
struct list_head *tmp;
/* The requests in committed_list always have smaller transnos than
- * the requests in replay_list */
+ * the requests in replay_list
+ */
if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.next;
req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
@@ -674,7 +655,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
goto out;
/* Reset connect flags to the originally requested flags, in case
- * the server is updated on-the-fly we will get the new features. */
+ * the server is updated on-the-fly we will get the new features.
+ */
imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
/* Reset ocd_version each time so the server knows the exact versions */
imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
@@ -687,7 +669,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
goto out;
request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
- if (request == NULL) {
+ if (!request) {
rc = -ENOMEM;
goto out;
}
@@ -700,7 +682,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
}
/* Report the rpc service time to the server so that it knows how long
- * to wait for clients to join recovery */
+ * to wait for clients to join recovery
+ */
lustre_msg_set_service_time(request->rq_reqmsg,
at_timeout2est(request->rq_timeout));
@@ -708,7 +691,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
* import_select_connection will increase the net latency on
* repeated reconnect attempts to cover slow networks.
* We override/ignore the server rpc completion estimate here,
- * which may be large if this is a reconnect attempt */
+ * which may be large if this is a reconnect attempt
+ */
request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
@@ -799,7 +783,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
if (rc) {
/* if this reconnect to busy export - not need select new target
- * for connecting*/
+ * for connecting
+ */
imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
spin_unlock(&imp->imp_lock);
ptlrpc_maybe_ping_import_soon(imp);
@@ -817,7 +802,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
ocd = req_capsule_server_sized_get(&request->rq_pill,
&RMF_CONNECT_DATA, ret);
- if (ocd == NULL) {
+ if (!ocd) {
CERROR("%s: no connect data from server\n",
imp->imp_obd->obd_name);
rc = -EPROTO;
@@ -851,7 +836,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
if (!exp) {
/* This could happen if export is cleaned during the
- connect attempt */
+ * connect attempt
+ */
CERROR("%s: missing export after connect\n",
imp->imp_obd->obd_name);
rc = -ENODEV;
@@ -877,14 +863,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
}
/* if applies, adjust the imp->imp_msg_magic here
- * according to reply flags */
+ * according to reply flags
+ */
imp->imp_remote_handle =
*lustre_msg_get_handle(request->rq_repmsg);
/* Initial connects are allowed for clients with non-random
* uuids when servers are in recovery. Simply signal the
- * servers replay is complete and wait in REPLAY_WAIT. */
+ * servers replay is complete and wait in REPLAY_WAIT.
+ */
if (msg_flags & MSG_CONNECT_RECOVERING) {
CDEBUG(D_HA, "connect to %s during recovery\n",
obd2cli_tgt(imp->imp_obd));
@@ -923,7 +911,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
* already erased all of our state because of previous
* eviction. If it is in recovery - we are safe to
* participate since we can reestablish all of our state
- * with server again */
+ * with server again
+ */
if ((msg_flags & MSG_CONNECT_RECOVERING)) {
CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
obd2cli_tgt(imp->imp_obd),
@@ -1015,8 +1004,7 @@ finish:
spin_lock(&imp->imp_lock);
list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item,
- &imp->imp_conn_list);
+ list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
imp->imp_last_success_conn =
imp->imp_conn_current->oic_last_attempt;
@@ -1039,7 +1027,8 @@ finish:
ocd->ocd_version < LUSTRE_VERSION_CODE -
LUSTRE_VERSION_OFFSET_WARN)) {
/* Sigh, some compilers do not like #ifdef in the middle
- of macro arguments */
+ * of macro arguments
+ */
const char *older = "older. Consider upgrading server or downgrading client"
;
const char *newer = "newer than client version. Consider upgrading client"
@@ -1061,7 +1050,8 @@ finish:
* fixup is version-limited, because we don't want to carry the
* OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
* need interop with unpatched 2.2 servers. For newer servers,
- * the client will do MNE swabbing only as needed. LU-1644 */
+ * the client will do MNE swabbing only as needed. LU-1644
+ */
if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
!(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
@@ -1079,7 +1069,8 @@ finish:
if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
/* We sent to the server ocd_cksum_types with bits set
* for algorithms we understand. The server masked off
- * the checksum types it doesn't support */
+ * the checksum types it doesn't support
+ */
if ((ocd->ocd_cksum_types &
cksum_types_supported_client()) == 0) {
LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
@@ -1093,14 +1084,15 @@ finish:
}
} else {
/* The server does not support OBD_CONNECT_CKSUM.
- * Enforce ADLER for backward compatibility*/
+ * Enforce ADLER for backward compatibility
+ */
cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
}
cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
cli->cl_max_pages_per_rpc =
- min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
+ min(ocd->ocd_brw_size >> PAGE_SHIFT,
cli->cl_max_pages_per_rpc);
else if (imp->imp_connect_op == MDS_CONNECT ||
imp->imp_connect_op == MGS_CONNECT)
@@ -1109,7 +1101,8 @@ finish:
/* Reset ns_connect_flags only for initial connect. It might be
* changed in while using FS and if we reset it in reconnect
* this leads to losing user settings done before such as
- * disable lru_resize, etc. */
+ * disable lru_resize, etc.
+ */
if (old_connect_flags != exp_connect_flags(exp) ||
aa->pcaa_initial_connect) {
CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
@@ -1123,13 +1116,14 @@ finish:
if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
(imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
/* We need a per-message support flag, because
- a. we don't know if the incoming connect reply
- supports AT or not (in reply_in_callback)
- until we unpack it.
- b. failovered server means export and flags are gone
- (in ptlrpc_send_reply).
- Can only be set when we know AT is supported at
- both ends */
+ * a. we don't know if the incoming connect reply
+ * supports AT or not (in reply_in_callback)
+ * until we unpack it.
+ * b. failovered server means export and flags are gone
+ * (in ptlrpc_send_reply).
+ * Can only be set when we know AT is supported at
+ * both ends
+ */
imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
else
imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
@@ -1162,7 +1156,7 @@ out:
struct obd_connect_data *ocd;
/* reply message might not be ready */
- if (request->rq_repmsg == NULL)
+ if (!request->rq_repmsg)
return -EPROTO;
ocd = req_capsule_server_get(&request->rq_pill,
@@ -1243,7 +1237,7 @@ static int signal_completed_replay(struct obd_import *imp)
req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING, LUSTRE_OBD_VERSION,
OBD_PING);
- if (req == NULL) {
+ if (!req) {
atomic_dec(&imp->imp_replay_inflight);
return -ENOMEM;
}
@@ -1337,12 +1331,13 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
{
struct task_struct *task;
/* bug 17802: XXX client_disconnect_export vs connect request
- * race. if client will evicted at this time, we start
+ * race. if client is evicted at this time, we start
* invalidate thread without reference to import and import can
- * be freed at same time. */
+ * be freed at same time.
+ */
class_import_get(imp);
task = kthread_run(ptlrpc_invalidate_import_thread, imp,
- "ll_imp_inval");
+ "ll_imp_inval");
if (IS_ERR(task)) {
class_import_put(imp);
CERROR("error starting invalidate thread: %d\n", rc);
@@ -1471,11 +1466,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
if (req) {
/* We are disconnecting, do not retry a failed DISCONNECT rpc if
* it fails. We can get through the above with a down server
- * if the client doesn't know the server is gone yet. */
+ * if the client doesn't know the server is gone yet.
+ */
req->rq_no_resend = 1;
/* We want client umounts to happen quickly, no matter the
- server state... */
+ * server state...
+ */
req->rq_timeout = min_t(int, req->rq_timeout,
INITIAL_CONNECT_TIMEOUT);
@@ -1507,9 +1504,10 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import);
extern unsigned int at_min, at_max, at_history;
/* Bin into timeslices using AT_BINS bins.
- This gives us a max of the last binlimit*AT_BINS secs without the storage,
- but still smoothing out a return to normalcy from a slow response.
- (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
+ * This gives us a max of the last binlimit*AT_BINS secs without the storage,
+ * but still smoothing out a return to normalcy from a slow response.
+ * (E.g. remember the maximum latency in each minute of the last 4 minutes.)
+ */
int at_measured(struct adaptive_timeout *at, unsigned int val)
{
unsigned int old = at->at_current;
@@ -1523,7 +1521,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
if (val == 0)
/* 0's don't count, because we never want our timeout to
- drop to 0, and because 0 could mean an error */
+ * drop to 0, and because 0 could mean an error
+ */
return 0;
spin_lock(&at->at_lock);
@@ -1565,7 +1564,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
if (at->at_flags & AT_FLG_NOHIST)
/* Only keep last reported val; keeping the rest of the history
- for proc only */
+ * for debugfs only
+ */
at->at_current = val;
if (at_max > 0)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index c0e613c23854..5b06901e5729 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -118,25 +118,6 @@ static const struct req_msg_field *quotactl_only[] = {
&RMF_OBD_QUOTACTL
};
-static const struct req_msg_field *quota_body_only[] = {
- &RMF_PTLRPC_BODY,
- &RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REQ,
- &RMF_LDLM_INTENT,
- &RMF_QUOTA_BODY
-};
-
-static const struct req_msg_field *ldlm_intent_quota_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_DLM_REP,
- &RMF_DLM_LVB,
- &RMF_QUOTA_BODY
-};
-
static const struct req_msg_field *mdt_close_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_EPOCH,
@@ -514,16 +495,6 @@ static const struct req_msg_field *mds_setattr_server[] = {
&RMF_CAPA2
};
-static const struct req_msg_field *mds_update_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_UPDATE,
-};
-
-static const struct req_msg_field *mds_update_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_UPDATE_REPLY,
-};
-
static const struct req_msg_field *llog_origin_handle_create_client[] = {
&RMF_PTLRPC_BODY,
&RMF_LLOGD_BODY,
@@ -551,16 +522,6 @@ static const struct req_msg_field *llog_origin_handle_next_block_server[] = {
&RMF_EADATA
};
-static const struct req_msg_field *obd_idx_read_client[] = {
- &RMF_PTLRPC_BODY,
- &RMF_IDX_INFO
-};
-
-static const struct req_msg_field *obd_idx_read_server[] = {
- &RMF_PTLRPC_BODY,
- &RMF_IDX_INFO
-};
-
static const struct req_msg_field *ost_body_only[] = {
&RMF_PTLRPC_BODY,
&RMF_OST_BODY
@@ -676,7 +637,6 @@ static const struct req_msg_field *mdt_hsm_request[] = {
static struct req_format *req_formats[] = {
&RQF_OBD_PING,
&RQF_OBD_SET_INFO,
- &RQF_OBD_IDX_READ,
&RQF_SEC_CTX,
&RQF_MGS_TARGET_REG,
&RQF_MGS_SET_INFO,
@@ -721,7 +681,6 @@ static struct req_format *req_formats[] = {
&RQF_MDS_HSM_ACTION,
&RQF_MDS_HSM_REQUEST,
&RQF_MDS_SWAP_LAYOUTS,
- &RQF_UPDATE_OBJ,
&RQF_QC_CALLBACK,
&RQF_OST_CONNECT,
&RQF_OST_DISCONNECT,
@@ -759,8 +718,6 @@ static struct req_format *req_formats[] = {
&RQF_LDLM_INTENT_CREATE,
&RQF_LDLM_INTENT_UNLINK,
&RQF_LDLM_INTENT_GETXATTR,
- &RQF_LDLM_INTENT_QUOTA,
- &RQF_QUOTA_DQACQ,
&RQF_LOG_CANCEL,
&RQF_LLOG_ORIGIN_HANDLE_CREATE,
&RQF_LLOG_ORIGIN_HANDLE_DESTROY,
@@ -899,11 +856,6 @@ struct req_msg_field RMF_OBD_QUOTACTL =
lustre_swab_obd_quotactl, NULL);
EXPORT_SYMBOL(RMF_OBD_QUOTACTL);
-struct req_msg_field RMF_QUOTA_BODY =
- DEFINE_MSGF("quota_body", 0,
- sizeof(struct quota_body), lustre_swab_quota_body, NULL);
-EXPORT_SYMBOL(RMF_QUOTA_BODY);
-
struct req_msg_field RMF_MDT_EPOCH =
DEFINE_MSGF("mdt_ioepoch", 0,
sizeof(struct mdt_ioepoch), lustre_swab_mdt_ioepoch, NULL);
@@ -938,12 +890,12 @@ EXPORT_SYMBOL(RMF_SYMTGT);
struct req_msg_field RMF_TGTUUID =
DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
+ NULL);
EXPORT_SYMBOL(RMF_TGTUUID);
struct req_msg_field RMF_CLUUID =
DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
+ NULL);
EXPORT_SYMBOL(RMF_CLUUID);
struct req_msg_field RMF_STRING =
@@ -1078,7 +1030,7 @@ EXPORT_SYMBOL(RMF_RCS);
struct req_msg_field RMF_EAVALS_LENS =
DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, NULL);
+ lustre_swab_generic_32s, NULL);
EXPORT_SYMBOL(RMF_EAVALS_LENS);
struct req_msg_field RMF_OBD_ID =
@@ -1105,10 +1057,6 @@ struct req_msg_field RMF_FIEMAP_VAL =
DEFINE_MSGF("fiemap", 0, -1, lustre_swab_fiemap, NULL);
EXPORT_SYMBOL(RMF_FIEMAP_VAL);
-struct req_msg_field RMF_IDX_INFO =
- DEFINE_MSGF("idx_info", 0, sizeof(struct idx_info),
- lustre_swab_idx_info, NULL);
-EXPORT_SYMBOL(RMF_IDX_INFO);
struct req_msg_field RMF_HSM_USER_STATE =
DEFINE_MSGF("hsm_user_state", 0, sizeof(struct hsm_user_state),
lustre_swab_hsm_user_state, NULL);
@@ -1145,15 +1093,6 @@ struct req_msg_field RMF_MDS_HSM_REQUEST =
lustre_swab_hsm_request, NULL);
EXPORT_SYMBOL(RMF_MDS_HSM_REQUEST);
-struct req_msg_field RMF_UPDATE = DEFINE_MSGF("update", 0, -1,
- lustre_swab_update_buf, NULL);
-EXPORT_SYMBOL(RMF_UPDATE);
-
-struct req_msg_field RMF_UPDATE_REPLY = DEFINE_MSGF("update_reply", 0, -1,
- lustre_swab_update_reply_buf,
- NULL);
-EXPORT_SYMBOL(RMF_UPDATE_REPLY);
-
struct req_msg_field RMF_SWAP_LAYOUTS =
DEFINE_MSGF("swap_layouts", 0, sizeof(struct mdc_swap_layouts),
lustre_swab_swap_layouts, NULL);
@@ -1196,29 +1135,23 @@ struct req_format RQF_OBD_SET_INFO =
DEFINE_REQ_FMT0("OBD_SET_INFO", obd_set_info_client, empty);
EXPORT_SYMBOL(RQF_OBD_SET_INFO);
-/* Read index file through the network */
-struct req_format RQF_OBD_IDX_READ =
- DEFINE_REQ_FMT0("OBD_IDX_READ",
- obd_idx_read_client, obd_idx_read_server);
-EXPORT_SYMBOL(RQF_OBD_IDX_READ);
-
struct req_format RQF_SEC_CTX =
DEFINE_REQ_FMT0("SEC_CTX", empty, empty);
EXPORT_SYMBOL(RQF_SEC_CTX);
struct req_format RQF_MGS_TARGET_REG =
DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only,
- mgs_target_info_only);
+ mgs_target_info_only);
EXPORT_SYMBOL(RQF_MGS_TARGET_REG);
struct req_format RQF_MGS_SET_INFO =
DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info,
- mgs_set_info);
+ mgs_set_info);
EXPORT_SYMBOL(RQF_MGS_SET_INFO);
struct req_format RQF_MGS_CONFIG_READ =
DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client,
- mgs_config_read_server);
+ mgs_config_read_server);
EXPORT_SYMBOL(RQF_MGS_CONFIG_READ);
struct req_format RQF_SEQ_QUERY =
@@ -1253,16 +1186,6 @@ struct req_format RQF_QC_CALLBACK =
DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
EXPORT_SYMBOL(RQF_QC_CALLBACK);
-struct req_format RQF_QUOTA_DQACQ =
- DEFINE_REQ_FMT0("QUOTA_DQACQ", quota_body_only, quota_body_only);
-EXPORT_SYMBOL(RQF_QUOTA_DQACQ);
-
-struct req_format RQF_LDLM_INTENT_QUOTA =
- DEFINE_REQ_FMT0("LDLM_INTENT_QUOTA",
- ldlm_intent_quota_client,
- ldlm_intent_quota_server);
-EXPORT_SYMBOL(RQF_LDLM_INTENT_QUOTA);
-
struct req_format RQF_MDS_GETSTATUS =
DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1357,11 +1280,6 @@ struct req_format RQF_MDS_GET_INFO =
mds_getinfo_server);
EXPORT_SYMBOL(RQF_MDS_GET_INFO);
-struct req_format RQF_UPDATE_OBJ =
- DEFINE_REQ_FMT0("OBJECT_UPDATE_OBJ", mds_update_client,
- mds_update_server);
-EXPORT_SYMBOL(RQF_UPDATE_OBJ);
-
struct req_format RQF_LDLM_ENQUEUE =
DEFINE_REQ_FMT0("LDLM_ENQUEUE",
ldlm_enqueue_client, ldlm_enqueue_lvb_server);
@@ -1598,32 +1516,32 @@ EXPORT_SYMBOL(RQF_OST_STATFS);
struct req_format RQF_OST_SET_GRANT_INFO =
DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client,
- ost_body_only);
+ ost_body_only);
EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
struct req_format RQF_OST_GET_INFO_GENERIC =
DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
- ost_get_info_generic_server);
+ ost_get_info_generic_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
struct req_format RQF_OST_GET_INFO_LAST_ID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
- ost_get_last_id_server);
+ ost_get_last_id_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
struct req_format RQF_OST_GET_INFO_LAST_FID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
- ost_get_last_fid_server);
+ ost_get_last_fid_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
struct req_format RQF_OST_SET_INFO_LAST_FID =
DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client,
- empty);
+ empty);
EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID);
struct req_format RQF_OST_GET_INFO_FIEMAP =
DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client,
- ost_get_fiemap_server);
+ ost_get_fiemap_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
#if !defined(__REQ_LAYOUT_USER__)
@@ -1712,7 +1630,7 @@ void req_capsule_init(struct req_capsule *pill,
* high-priority RPC queue getting peeked at before ost_handle()
* handles an OST RPC.
*/
- if (req != NULL && pill == &req->rq_pill && req->rq_pill_init)
+ if (req && pill == &req->rq_pill && req->rq_pill_init)
return;
memset(pill, 0, sizeof(*pill));
@@ -1720,7 +1638,7 @@ void req_capsule_init(struct req_capsule *pill,
pill->rc_loc = location;
req_capsule_init_area(pill);
- if (req != NULL && pill == &req->rq_pill)
+ if (req && pill == &req->rq_pill)
req->rq_pill_init = 1;
}
EXPORT_SYMBOL(req_capsule_init);
@@ -1752,7 +1670,7 @@ static struct lustre_msg *__req_msg(const struct req_capsule *pill,
*/
void req_capsule_set(struct req_capsule *pill, const struct req_format *fmt)
{
- LASSERT(pill->rc_fmt == NULL || pill->rc_fmt == fmt);
+ LASSERT(!pill->rc_fmt || pill->rc_fmt == fmt);
LASSERT(__req_format_is_sane(fmt));
pill->rc_fmt = fmt;
@@ -1773,8 +1691,6 @@ int req_capsule_filled_sizes(struct req_capsule *pill,
const struct req_format *fmt = pill->rc_fmt;
int i;
- LASSERT(fmt != NULL);
-
for (i = 0; i < fmt->rf_fields[loc].nr; ++i) {
if (pill->rc_area[loc][i] == -1) {
pill->rc_area[loc][i] =
@@ -1810,15 +1726,15 @@ int req_capsule_server_pack(struct req_capsule *pill)
LASSERT(pill->rc_loc == RCL_SERVER);
fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
+ LASSERT(fmt);
count = req_capsule_filled_sizes(pill, RCL_SERVER);
rc = lustre_pack_reply(pill->rc_req, count,
pill->rc_area[RCL_SERVER], NULL);
if (rc != 0) {
DEBUG_REQ(D_ERROR, pill->rc_req,
- "Cannot pack %d fields in format `%s': ",
- count, fmt->rf_name);
+ "Cannot pack %d fields in format `%s': ",
+ count, fmt->rf_name);
}
return rc;
}
@@ -1835,9 +1751,8 @@ static int __req_capsule_offset(const struct req_capsule *pill,
int offset;
offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
- LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n",
- pill->rc_fmt->rf_name,
- field->rmf_name, offset, loc);
+ LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name,
+ field->rmf_name, offset, loc);
offset--;
LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR);
@@ -1865,7 +1780,7 @@ swabber_dumper_helper(struct req_capsule *pill,
swabber = swabber ?: field->rmf_swabber;
if (ptlrpc_buf_need_swab(pill->rc_req, inout, offset) &&
- swabber != NULL && value != NULL)
+ swabber && value)
do_swab = 1;
else
do_swab = 0;
@@ -1883,7 +1798,7 @@ swabber_dumper_helper(struct req_capsule *pill,
return;
swabber(value);
ptlrpc_buf_set_swabbed(pill->rc_req, inout, offset);
- if (dump) {
+ if (dump && field->rmf_dumper) {
CDEBUG(D_RPCTRACE, "Dump of swabbed field %s follows\n",
field->rmf_name);
field->rmf_dumper(value);
@@ -1947,17 +1862,15 @@ static void *__req_capsule_get(struct req_capsule *pill,
[RCL_SERVER] = "server"
};
- LASSERT(pill != NULL);
- LASSERT(pill != LP_POISON);
fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
+ LASSERT(fmt);
LASSERT(fmt != LP_POISON);
LASSERT(__req_format_is_sane(fmt));
offset = __req_capsule_offset(pill, field, loc);
msg = __req_msg(pill, loc);
- LASSERT(msg != NULL);
+ LASSERT(msg);
getter = (field->rmf_flags & RMF_F_STRING) ?
(typeof(getter))lustre_msg_string : lustre_msg_buf;
@@ -1980,7 +1893,7 @@ static void *__req_capsule_get(struct req_capsule *pill,
}
value = getter(msg, offset, len);
- if (value == NULL) {
+ if (!value) {
DEBUG_REQ(D_ERROR, pill->rc_req,
"Wrong buffer for field `%s' (%d of %d) in format `%s': %d vs. %d (%s)\n",
field->rmf_name, offset, lustre_msg_bufcount(msg),
@@ -2209,7 +2122,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
const struct req_format *old;
- LASSERT(pill->rc_fmt != NULL);
+ LASSERT(pill->rc_fmt);
LASSERT(__req_format_is_sane(fmt));
old = pill->rc_fmt;
@@ -2222,7 +2135,7 @@ void req_capsule_extend(struct req_capsule *pill, const struct req_format *fmt)
const struct req_msg_field *ofield = FMT_FIELD(old, i, j);
/* "opaque" fields can be transmogrified */
- if (ofield->rmf_swabber == NULL &&
+ if (!ofield->rmf_swabber &&
(ofield->rmf_flags & ~RMF_F_NO_SIZE_CHECK) == 0 &&
(ofield->rmf_size == -1 ||
ofield->rmf_flags == RMF_F_NO_SIZE_CHECK))
@@ -2289,7 +2202,7 @@ void req_capsule_shrink(struct req_capsule *pill,
int offset;
fmt = pill->rc_fmt;
- LASSERT(fmt != NULL);
+ LASSERT(fmt);
LASSERT(__req_format_is_sane(fmt));
LASSERT(req_capsule_has_field(pill, field, loc));
LASSERT(req_capsule_field_present(pill, field, loc));
@@ -2299,7 +2212,7 @@ void req_capsule_shrink(struct req_capsule *pill,
msg = __req_msg(pill, loc);
len = lustre_msg_buflen(msg, offset);
LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n",
- fmt->rf_name, field->rmf_name, len, newlen);
+ fmt->rf_name, field->rmf_name, len, newlen);
if (loc == RCL_CLIENT)
pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index e87702073f1f..a23ac5f9ae96 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -75,7 +75,8 @@
} while (0)
/* This is a callback from the llog_* functions.
- * Assumes caller has already pushed us into the kernel context. */
+ * Assumes caller has already pushed us into the kernel context.
+ */
static int llog_client_open(const struct lu_env *env,
struct llog_handle *lgh, struct llog_logid *logid,
char *name, enum llog_open_param open_param)
@@ -93,7 +94,7 @@ static int llog_client_open(const struct lu_env *env,
LASSERT(lgh);
req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto out;
}
@@ -130,7 +131,7 @@ static int llog_client_open(const struct lu_env *env,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EFAULT;
goto out;
}
@@ -158,7 +159,7 @@ static int llog_client_next_block(const struct lu_env *env,
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
LUSTRE_LOG_VERSION,
LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto err_exit;
}
@@ -179,14 +180,14 @@ static int llog_client_next_block(const struct lu_env *env,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EFAULT;
goto out;
}
/* The log records are swabbed as they are processed */
ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (ptr == NULL) {
+ if (!ptr) {
rc = -EFAULT;
goto out;
}
@@ -216,7 +217,7 @@ static int llog_client_prev_block(const struct lu_env *env,
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
LUSTRE_LOG_VERSION,
LLOG_ORIGIN_HANDLE_PREV_BLOCK);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto err_exit;
}
@@ -236,13 +237,13 @@ static int llog_client_prev_block(const struct lu_env *env,
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
- if (body == NULL) {
+ if (!body) {
rc = -EFAULT;
goto out;
}
ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA);
- if (ptr == NULL) {
+ if (!ptr) {
rc = -EFAULT;
goto out;
}
@@ -269,7 +270,7 @@ static int llog_client_read_header(const struct lu_env *env,
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
LUSTRE_LOG_VERSION,
LLOG_ORIGIN_HANDLE_READ_HEADER);
- if (req == NULL) {
+ if (!req) {
rc = -ENOMEM;
goto err_exit;
}
@@ -285,7 +286,7 @@ static int llog_client_read_header(const struct lu_env *env,
goto out;
hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
- if (hdr == NULL) {
+ if (!hdr) {
rc = -EFAULT;
goto out;
}
@@ -316,8 +317,9 @@ static int llog_client_close(const struct lu_env *env,
struct llog_handle *handle)
{
/* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
- the servers all close the file at the end of every
- other LLOG_ RPC. */
+ * the servers all close the file at the end of every
+ * other LLOG_ RPC.
+ */
return 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index dac66f5b39da..fbccb62213b5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -58,7 +58,7 @@ int llog_initiator_connect(struct llog_ctxt *ctxt)
LASSERT(ctxt);
new_imp = ctxt->loc_obd->u.cli.cl_import;
- LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
+ LASSERTF(!ctxt->loc_imp || ctxt->loc_imp == new_imp,
"%p - %p\n", ctxt->loc_imp, new_imp);
mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != new_imp) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index cc55b7973721..c95a91ce26c9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,7 +131,6 @@ static struct ll_rpc_opcode {
{ SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
{ SEC_CTX_FINI, "sec_ctx_fini" },
{ FLD_QUERY, "fld_query" },
- { UPDATE_OBJ, "update_obj" },
};
static struct ll_eopcode {
@@ -192,15 +191,15 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
unsigned int svc_counter_config = LPROCFS_CNTR_AVGMINMAX |
LPROCFS_CNTR_STDDEV;
- LASSERT(*debugfs_root_ret == NULL);
- LASSERT(*stats_ret == NULL);
+ LASSERT(!*debugfs_root_ret);
+ LASSERT(!*stats_ret);
svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,
0);
- if (svc_stats == NULL)
+ if (!svc_stats)
return;
- if (dir != NULL) {
+ if (dir) {
svc_debugfs_entry = ldebugfs_register(dir, root, NULL, NULL);
if (IS_ERR(svc_debugfs_entry)) {
lprocfs_free_stats(&svc_stats);
@@ -246,11 +245,11 @@ ptlrpc_ldebugfs_register(struct dentry *root, char *dir,
rc = ldebugfs_register_stats(svc_debugfs_entry, name, svc_stats);
if (rc < 0) {
- if (dir != NULL)
+ if (dir)
ldebugfs_remove(&svc_debugfs_entry);
lprocfs_free_stats(&svc_stats);
} else {
- if (dir != NULL)
+ if (dir)
*debugfs_root_ret = svc_debugfs_entry;
*stats_ret = svc_stats;
}
@@ -307,8 +306,9 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
/* This sanity check is more of an insanity check; we can still
* hose a kernel by allowing the request history to grow too
- * far. */
- bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ * far.
+ */
+ bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
@@ -454,10 +454,8 @@ static const char *nrs_state2str(enum ptlrpc_nrs_pol_state state)
* \param[out] info Holds returned status information
*/
static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_pol_info *info)
+ struct ptlrpc_nrs_pol_info *info)
{
- LASSERT(policy != NULL);
- LASSERT(info != NULL);
assert_spin_locked(&policy->pol_nrs->nrs_lock);
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
@@ -508,7 +506,7 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
spin_unlock(&nrs->nrs_lock);
infos = kcalloc(num_pols, sizeof(*infos), GFP_NOFS);
- if (infos == NULL) {
+ if (!infos) {
rc = -ENOMEM;
goto unlock;
}
@@ -520,8 +518,7 @@ again:
pol_idx = 0;
- list_for_each_entry(policy, &nrs->nrs_policy_list,
- pol_list) {
+ list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) {
LASSERT(pol_idx < num_pols);
nrs_policy_get_info_locked(policy, &tmp);
@@ -592,7 +589,7 @@ again:
* active: 0
*/
seq_printf(m, "%s\n",
- !hp ? "\nregular_requests:" : "high_priority_requests:");
+ !hp ? "\nregular_requests:" : "high_priority_requests:");
for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
seq_printf(m, " - name: %s\n"
@@ -676,7 +673,7 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
/**
* No [reg|hp] token has been specified
*/
- if (cmd == NULL)
+ if (!cmd)
goto default_queue;
/**
@@ -733,15 +730,15 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
struct list_head *e;
struct ptlrpc_request *req;
- if (srhi->srhi_req != NULL &&
- srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
+ if (srhi->srhi_req && srhi->srhi_seq > svcpt->scp_hist_seq_culled &&
srhi->srhi_seq <= seq) {
/* If srhi_req was set previously, hasn't been culled and
* we're searching for a seq on or after it (i.e. more
* recent), search from it onwards.
* Since the service history is LRU (i.e. culled reqs will
* be near the head), we shouldn't have to do long
- * re-scans */
+ * re-scans
+ */
LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
"%s:%d: seek seq %llu, request seq %llu\n",
svcpt->scp_service->srv_name, svcpt->scp_cpt,
@@ -919,7 +916,8 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
* here. The request could contain any old crap, so you
* must be just as careful as the service's request
* parser. Currently I only print stuff here I know is OK
- * to look at coz it was set up in request_in_callback()!!! */
+ * to look at coz it was set up in request_in_callback()!!!
+ */
seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
req->rq_history_seq, nidstr,
libcfs_id2str(req->rq_peer), req->rq_xid,
@@ -927,7 +925,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
(s64)req->rq_arrival_time.tv_sec,
(long)(req->rq_sent - req->rq_arrival_time.tv_sec),
(long)(req->rq_sent - req->rq_deadline));
- if (svc->srv_ops.so_req_printer == NULL)
+ if (!svc->srv_ops.so_req_printer)
seq_putc(s, '\n');
else
svc->srv_ops.so_req_printer(s, srhi->srhi_req);
@@ -971,7 +969,7 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
if (AT_OFF) {
seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
- obd_timeout);
+ obd_timeout);
return 0;
}
@@ -982,8 +980,8 @@ static int ptlrpc_lprocfs_timeouts_seq_show(struct seq_file *m, void *n)
s2dhms(&ts, ktime_get_real_seconds() - worstt);
seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
- DHMS_FMT" ago) ", "service",
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
+ DHMS_FMT " ago) ", "service",
+ cur, worst, (s64)worstt, DHMS_VARS(&ts));
lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
}
@@ -1103,7 +1101,7 @@ void ptlrpc_ldebugfs_register_service(struct dentry *entry,
"stats", &svc->srv_debugfs_entry,
&svc->srv_stats);
- if (svc->srv_debugfs_entry == NULL)
+ if (IS_ERR_OR_NULL(svc->srv_debugfs_entry))
return;
ldebugfs_add_vars(svc->srv_debugfs_entry, lproc_vars, NULL);
@@ -1129,7 +1127,7 @@ void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount)
int opc = opcode_offset(op);
svc_stats = req->rq_import->imp_obd->obd_svc_stats;
- if (svc_stats == NULL || opc <= 0)
+ if (!svc_stats || opc <= 0)
return;
LASSERT(opc < LUSTRE_MAX_OPCODES);
if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
@@ -1166,7 +1164,7 @@ EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
{
- if (svc->srv_debugfs_entry != NULL)
+ if (!IS_ERR_OR_NULL(svc->srv_debugfs_entry))
ldebugfs_remove(&svc->srv_debugfs_entry);
if (svc->srv_stats)
@@ -1198,7 +1196,7 @@ int lprocfs_wr_ping(struct file *file, const char __user *buffer,
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
up_read(&obd->u.cli.cl_sem);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req->rq_send_state = LUSTRE_IMP_FULL;
@@ -1228,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
const char prefix[] = "connection=";
const int prefix_len = sizeof(prefix) - 1;
- if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
+ if (count > PAGE_SIZE - 1 || count <= prefix_len)
return -EINVAL;
kbuf = kzalloc(count + 1, GFP_NOFS);
@@ -1298,7 +1296,7 @@ int lprocfs_rd_pinger_recov(struct seq_file *m, void *n)
EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &obd->u.cli;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index c5d7ff5cbd73..10b8fe82a342 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -56,7 +56,6 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
lnet_md_t md;
LASSERT(portal != 0);
- LASSERT(conn != NULL);
CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
@@ -88,7 +87,8 @@ static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
int rc2;
/* We're going to get an UNLINK event when I unlink below,
* which will complete just like any other failed send, so
- * I fall through and return success here! */
+ * I fall through and return success here!
+ */
CERROR("LNetPut(%s, %d, %lld) failed: %d\n",
libcfs_id2str(conn->c_peer), portal, xid, rc);
rc2 = LNetMDUnlink(*mdh);
@@ -130,7 +130,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_md_count == 0);
LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
- LASSERT(desc->bd_req != NULL);
+ LASSERT(desc->bd_req);
LASSERT(desc->bd_type == BULK_PUT_SINK ||
desc->bd_type == BULK_GET_SOURCE);
@@ -153,7 +153,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
* using the same RDMA match bits after an error.
*
* For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
- * first bulk XID is power-of-two aligned before rq_xid. LU-1431 */
+ * first bulk XID is power-of-two aligned before rq_xid. LU-1431
+ */
xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
LASSERTF(!(desc->bd_registered &&
req->rq_send_state != LUSTRE_IMP_REPLAY) ||
@@ -209,7 +210,8 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
}
/* Set rq_xid to matchbits of the final bulk so that server can
- * infer the number of bulks that were prepared */
+ * infer the number of bulks that were prepared
+ */
req->rq_xid = --xid;
LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
"bd_last_xid = x%llu, rq_xid = x%llu\n",
@@ -260,7 +262,8 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
/* the unlink ensures the callback happens ASAP and is the last
* one. If it fails, it must be because completion just happened,
* but we must still l_wait_event() in this case to give liblustre
- * a chance to run client_bulk_callback() */
+ * a chance to run client_bulk_callback()
+ */
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
@@ -273,14 +276,15 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
if (async)
return 0;
- if (req->rq_set != NULL)
+ if (req->rq_set)
wq = &req->rq_set->set_waitq;
else
wq = &req->rq_reply_waitq;
for (;;) {
/* Network access will complete in finite time but the HUGE
- * timeout lets us CWARN for visibility of sluggish NALs */
+ * timeout lets us CWARN for visibility of sluggish LNDs
+ */
lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
@@ -305,13 +309,13 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
- (req->rq_type != PTL_RPC_MSG_ERR) &&
- (req->rq_reqmsg != NULL) &&
+ (req->rq_type != PTL_RPC_MSG_ERR) && req->rq_reqmsg &&
!(lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_RESENT | MSG_REPLAY |
MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE))) {
/* early replies, errors and recovery requests don't count
- * toward our service time estimate */
+ * toward our service time estimate
+ */
int oldse = at_measured(&svcpt->scp_at_estimate, service_time);
if (oldse != 0) {
@@ -325,7 +329,8 @@ static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
lustre_msg_set_service_time(req->rq_repmsg, service_time);
/* Report service time estimate for future client reqs, but report 0
* (to be ignored by client) if it's a error reply during recovery.
- * (bz15815) */
+ * (bz15815)
+ */
if (req->rq_type == PTL_RPC_MSG_ERR && !req->rq_export)
lustre_msg_set_timeout(req->rq_repmsg, 0);
else
@@ -360,10 +365,10 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* target_queue_final_reply().
*/
LASSERT(req->rq_no_reply == 0);
- LASSERT(req->rq_reqbuf != NULL);
- LASSERT(rs != NULL);
+ LASSERT(req->rq_reqbuf);
+ LASSERT(rs);
LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
- LASSERT(req->rq_repmsg != NULL);
+ LASSERT(req->rq_repmsg);
LASSERT(req->rq_repmsg == rs->rs_msg);
LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
LASSERT(rs->rs_cb_id.cbid_arg == rs);
@@ -403,12 +408,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
ptlrpc_at_set_reply(req, flags);
- if (req->rq_export == NULL || req->rq_export->exp_connection == NULL)
+ if (!req->rq_export || !req->rq_export->exp_connection)
conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL);
else
conn = ptlrpc_connection_addref(req->rq_export->exp_connection);
- if (unlikely(conn == NULL)) {
+ if (unlikely(!conn)) {
CERROR("not replying on NULL connection\n"); /* bug 9635 */
return -ENOTCONN;
}
@@ -498,14 +503,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
LASSERT(request->rq_wait_ctx == 0);
/* If this is a re-transmit, we're required to have disengaged
- * cleanly from the previous attempt */
+ * cleanly from the previous attempt
+ */
LASSERT(!request->rq_receiving_reply);
LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
+ (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
- if (unlikely(obd != NULL && obd->obd_fail)) {
+ if (unlikely(obd && obd->obd_fail)) {
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- obd->obd_name);
+ obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
@@ -535,7 +541,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
goto out;
/* bulk register should be done after wrap_request() */
- if (request->rq_bulk != NULL) {
+ if (request->rq_bulk) {
rc = ptlrpc_register_bulk(request);
if (rc != 0)
goto out;
@@ -543,14 +549,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
if (!noreply) {
LASSERT(request->rq_replen != 0);
- if (request->rq_repbuf == NULL) {
- LASSERT(request->rq_repdata == NULL);
- LASSERT(request->rq_repmsg == NULL);
+ if (!request->rq_repbuf) {
+ LASSERT(!request->rq_repdata);
+ LASSERT(!request->rq_repmsg);
rc = sptlrpc_cli_alloc_repbuf(request,
request->rq_replen);
if (rc) {
/* this prevents us from looping in
- * ptlrpc_queue_wait */
+ * ptlrpc_queue_wait
+ */
spin_lock(&request->rq_lock);
request->rq_err = 1;
spin_unlock(&request->rq_lock);
@@ -602,7 +609,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
reply_md.eq_handle = ptlrpc_eq_h;
/* We must see the unlink callback to unset rq_reply_unlink,
- so we can't auto-unlink */
+ * so we can't auto-unlink
+ */
rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
&request->rq_reply_md_h);
if (rc != 0) {
@@ -623,7 +631,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
/* add references on request for request_out_callback */
ptlrpc_request_addref(request);
- if (obd != NULL && obd->obd_svc_stats != NULL)
+ if (obd && obd->obd_svc_stats)
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQACTIVE_CNTR,
atomic_read(&request->rq_import->imp_inflight));
@@ -632,7 +640,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
ktime_get_real_ts64(&request->rq_arrival_time);
request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
- add the network latency for our local timeout. */
+ * add the network latency for our local timeout.
+ */
request->rq_deadline = request->rq_sent + request->rq_timeout +
ptlrpc_at_get_net_latency(request);
@@ -656,7 +665,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
cleanup_me:
/* MEUnlink is safe; the PUT didn't even get off the ground, and
* nobody apart from the PUT's target has the right nid+XID to
- * access the reply buffer. */
+ * access the reply buffer.
+ */
rc2 = LNetMEUnlink(reply_me_h);
LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
@@ -664,7 +674,8 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
cleanup_bulk:
/* We do sync unlink here as there was no real transfer here so
- * the chance to have long unlink to sluggish net is smaller here. */
+ * the chance to have long unlink to sluggish net is smaller here.
+ */
ptlrpc_unregister_bulk(request, 0);
out:
if (request->rq_memalloc)
@@ -692,7 +703,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
/* NB: CPT affinity service should use new LNet flag LNET_INS_LOCAL,
* which means buffer can only be attached on local CPT, and LND
- * threads can find it by grabbing a local lock */
+ * threads can find it by grabbing a local lock
+ */
rc = LNetMEAttach(service->srv_req_portal,
match_id, 0, ~0, LNET_UNLINK,
rqbd->rqbd_svcpt->scp_cpt >= 0 ?
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 7044e1ff6692..710fb806f122 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -13,10 +13,6 @@
* GNU General Public License version 2 for more details. A copy is
* included in the COPYING file that accompanied this code.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* GPL HEADER END
*/
/*
@@ -47,9 +43,6 @@
#include "../../include/linux/libcfs/libcfs.h"
#include "ptlrpc_internal.h"
-/* XXX: This is just for liblustre. Remove the #if defined directive when the
- * "cfs_" prefix is dropped from cfs_list_head. */
-
/**
* NRS core object.
*/
@@ -57,7 +50,7 @@ struct nrs_core nrs_core;
static int nrs_policy_init(struct ptlrpc_nrs_policy *policy)
{
- return policy->pol_desc->pd_ops->op_policy_init != NULL ?
+ return policy->pol_desc->pd_ops->op_policy_init ?
policy->pol_desc->pd_ops->op_policy_init(policy) : 0;
}
@@ -66,7 +59,7 @@ static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy)
LASSERT(policy->pol_ref == 0);
LASSERT(policy->pol_req_queued == 0);
- if (policy->pol_desc->pd_ops->op_policy_fini != NULL)
+ if (policy->pol_desc->pd_ops->op_policy_fini)
policy->pol_desc->pd_ops->op_policy_fini(policy);
}
@@ -82,7 +75,7 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
if (policy->pol_state == NRS_POL_STATE_STOPPED)
return -ENODEV;
- return policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
+ return policy->pol_desc->pd_ops->op_policy_ctl ?
policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
-ENOSYS;
}
@@ -91,7 +84,7 @@ static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
- if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
+ if (policy->pol_desc->pd_ops->op_policy_stop) {
spin_unlock(&nrs->nrs_lock);
policy->pol_desc->pd_ops->op_policy_stop(policy);
@@ -154,7 +147,7 @@ static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
{
struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
- if (tmp == NULL)
+ if (!tmp)
return;
nrs->nrs_policy_primary = NULL;
@@ -220,12 +213,12 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
* nrs_policy_flags::PTLRPC_NRS_FL_FALLBACK flag set can
* register with NRS core.
*/
- LASSERT(nrs->nrs_policy_fallback == NULL);
+ LASSERT(!nrs->nrs_policy_fallback);
} else {
/**
* Shouldn't start primary policy if w/o fallback policy.
*/
- if (nrs->nrs_policy_fallback == NULL)
+ if (!nrs->nrs_policy_fallback)
return -EPERM;
if (policy->pol_state == NRS_POL_STATE_STARTED)
@@ -311,7 +304,7 @@ static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy)
policy->pol_ref--;
if (unlikely(policy->pol_ref == 0 &&
- policy->pol_state == NRS_POL_STATE_STOPPING))
+ policy->pol_state == NRS_POL_STATE_STOPPING))
nrs_policy_stop0(policy);
}
@@ -326,7 +319,7 @@ static void nrs_policy_put(struct ptlrpc_nrs_policy *policy)
* Find and return a policy by name.
*/
static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs,
- char *name)
+ char *name)
{
struct ptlrpc_nrs_policy *tmp;
@@ -348,10 +341,10 @@ static void nrs_resource_put(struct ptlrpc_nrs_resource *res)
{
struct ptlrpc_nrs_policy *policy = res->res_policy;
- if (policy->pol_desc->pd_ops->op_res_put != NULL) {
+ if (policy->pol_desc->pd_ops->op_res_put) {
struct ptlrpc_nrs_resource *parent;
- for (; res != NULL; res = parent) {
+ for (; res; res = parent) {
parent = res->res_parent;
policy->pol_desc->pd_ops->op_res_put(policy, res);
}
@@ -390,12 +383,11 @@ struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy,
rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res,
&tmp, moving_req);
if (rc < 0) {
- if (res != NULL)
+ if (res)
nrs_resource_put(res);
return NULL;
}
- LASSERT(tmp != NULL);
tmp->res_parent = res;
tmp->res_policy = policy;
res = tmp;
@@ -445,7 +437,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
nrs_policy_get_locked(fallback);
primary = nrs->nrs_policy_primary;
- if (primary != NULL)
+ if (primary)
nrs_policy_get_locked(primary);
spin_unlock(&nrs->nrs_lock);
@@ -454,9 +446,9 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
* Obtain resource hierarchy references.
*/
resp[NRS_RES_FALLBACK] = nrs_resource_get(fallback, nrq, moving_req);
- LASSERT(resp[NRS_RES_FALLBACK] != NULL);
+ LASSERT(resp[NRS_RES_FALLBACK]);
- if (primary != NULL) {
+ if (primary) {
resp[NRS_RES_PRIMARY] = nrs_resource_get(primary, nrq,
moving_req);
/**
@@ -465,7 +457,7 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
* reference on the policy as it will not be used for this
* request.
*/
- if (resp[NRS_RES_PRIMARY] == NULL)
+ if (!resp[NRS_RES_PRIMARY])
nrs_policy_put(primary);
}
}
@@ -482,11 +474,10 @@ static void nrs_resource_get_safe(struct ptlrpc_nrs *nrs,
static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
{
struct ptlrpc_nrs_policy *pols[NRS_RES_MAX];
- struct ptlrpc_nrs *nrs = NULL;
int i;
for (i = 0; i < NRS_RES_MAX; i++) {
- if (resp[i] != NULL) {
+ if (resp[i]) {
pols[i] = resp[i]->res_policy;
nrs_resource_put(resp[i]);
resp[i] = NULL;
@@ -496,18 +487,9 @@ static void nrs_resource_put_safe(struct ptlrpc_nrs_resource **resp)
}
for (i = 0; i < NRS_RES_MAX; i++) {
- if (pols[i] == NULL)
- continue;
-
- if (nrs == NULL) {
- nrs = pols[i]->pol_nrs;
- spin_lock(&nrs->nrs_lock);
- }
- nrs_policy_put_locked(pols[i]);
+ if (pols[i])
+ nrs_policy_put(pols[i]);
}
-
- if (nrs != NULL)
- spin_unlock(&nrs->nrs_lock);
}
/**
@@ -536,7 +518,7 @@ struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy,
nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force);
- LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy));
+ LASSERT(ergo(nrq, nrs_request_policy(nrq) == policy));
return nrq;
}
@@ -562,7 +544,7 @@ static inline void nrs_request_enqueue(struct ptlrpc_nrs_request *nrq)
* the preferred choice.
*/
for (i = NRS_RES_MAX - 1; i >= 0; i--) {
- if (nrq->nr_res_ptrs[i] == NULL)
+ if (!nrq->nr_res_ptrs[i])
continue;
nrq->nr_res_idx = i;
@@ -632,7 +614,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
spin_lock(&nrs->nrs_lock);
policy = nrs_policy_find_locked(nrs, name);
- if (policy == NULL) {
+ if (!policy) {
rc = -ENOENT;
goto out;
}
@@ -654,7 +636,7 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
break;
}
out:
- if (policy != NULL)
+ if (policy)
nrs_policy_put_locked(policy);
spin_unlock(&nrs->nrs_lock);
@@ -679,7 +661,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
spin_lock(&nrs->nrs_lock);
policy = nrs_policy_find_locked(nrs, name);
- if (policy == NULL) {
+ if (!policy) {
spin_unlock(&nrs->nrs_lock);
CERROR("Can't find NRS policy %s\n", name);
@@ -712,7 +694,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
nrs_policy_fini(policy);
- LASSERT(policy->pol_private == NULL);
+ LASSERT(!policy->pol_private);
kfree(policy);
return 0;
@@ -736,18 +718,16 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
int rc;
- LASSERT(svcpt != NULL);
- LASSERT(desc->pd_ops != NULL);
- LASSERT(desc->pd_ops->op_res_get != NULL);
- LASSERT(desc->pd_ops->op_req_get != NULL);
- LASSERT(desc->pd_ops->op_req_enqueue != NULL);
- LASSERT(desc->pd_ops->op_req_dequeue != NULL);
- LASSERT(desc->pd_compat != NULL);
+ LASSERT(desc->pd_ops->op_res_get);
+ LASSERT(desc->pd_ops->op_req_get);
+ LASSERT(desc->pd_ops->op_req_enqueue);
+ LASSERT(desc->pd_ops->op_req_dequeue);
+ LASSERT(desc->pd_compat);
policy = kzalloc_node(sizeof(*policy), GFP_NOFS,
cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
svcpt->scp_cpt));
- if (policy == NULL)
+ if (!policy)
return -ENOMEM;
policy->pol_nrs = nrs;
@@ -767,7 +747,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
spin_lock(&nrs->nrs_lock);
tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name);
- if (tmp != NULL) {
+ if (tmp) {
CERROR("NRS policy %s has been registered, can't register it for %s\n",
policy->pol_desc->pd_name,
svcpt->scp_service->srv_name);
@@ -817,7 +797,7 @@ static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req)
*/
if (unlikely(list_empty(&policy->pol_list_queued)))
list_add_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
+ &policy->pol_nrs->nrs_policy_queued);
}
/**
@@ -957,14 +937,14 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
/**
* Optionally allocate a high-priority NRS head.
*/
- if (svcpt->scp_service->srv_ops.so_hpreq_handler == NULL)
+ if (!svcpt->scp_service->srv_ops.so_hpreq_handler)
goto out;
svcpt->scp_nrs_hp =
kzalloc_node(sizeof(*svcpt->scp_nrs_hp), GFP_NOFS,
cfs_cpt_spread_node(svcpt->scp_service->srv_cptable,
svcpt->scp_cpt));
- if (svcpt->scp_nrs_hp == NULL) {
+ if (!svcpt->scp_nrs_hp) {
rc = -ENOMEM;
goto out;
}
@@ -998,8 +978,7 @@ again:
nrs = nrs_svcpt2nrs(svcpt, hp);
nrs->nrs_stopping = 1;
- list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
- pol_list) {
+ list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
LASSERT(rc == 0);
}
@@ -1089,7 +1068,7 @@ again:
}
}
- if (desc->pd_ops->op_lprocfs_fini != NULL)
+ if (desc->pd_ops->op_lprocfs_fini)
desc->pd_ops->op_lprocfs_fini(svc);
}
@@ -1115,15 +1094,15 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
{
struct ptlrpc_service *svc;
struct ptlrpc_nrs_pol_desc *desc;
+ size_t len;
int rc = 0;
- LASSERT(conf != NULL);
- LASSERT(conf->nc_ops != NULL);
- LASSERT(conf->nc_compat != NULL);
+ LASSERT(conf->nc_ops);
+ LASSERT(conf->nc_compat);
LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
- conf->nc_compat_svc_name != NULL));
+ conf->nc_compat_svc_name));
LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
- conf->nc_owner != NULL));
+ conf->nc_owner));
conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
@@ -1146,7 +1125,7 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
mutex_lock(&nrs_core.nrs_mutex);
- if (nrs_policy_find_desc_locked(conf->nc_name) != NULL) {
+ if (nrs_policy_find_desc_locked(conf->nc_name)) {
CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n",
conf->nc_name);
rc = -EEXIST;
@@ -1159,7 +1138,12 @@ static int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
goto fail;
}
- strncpy(desc->pd_name, conf->nc_name, NRS_POL_NAME_MAX);
+ len = strlcpy(desc->pd_name, conf->nc_name, sizeof(desc->pd_name));
+ if (len >= sizeof(desc->pd_name)) {
+ kfree(desc);
+ rc = -E2BIG;
+ goto fail;
+ }
desc->pd_ops = conf->nc_ops;
desc->pd_compat = conf->nc_compat;
desc->pd_compat_svc_name = conf->nc_compat_svc_name;
@@ -1224,7 +1208,7 @@ again:
* No need to take a reference to other modules here, as we
* will be calling from the module's init() function.
*/
- if (desc->pd_ops->op_lprocfs_init != NULL) {
+ if (desc->pd_ops->op_lprocfs_init) {
rc = desc->pd_ops->op_lprocfs_init(svc);
if (rc != 0) {
rc2 = nrs_policy_unregister_locked(desc);
@@ -1288,7 +1272,7 @@ int ptlrpc_service_nrs_setup(struct ptlrpc_service *svc)
if (!nrs_policy_compatible(svc, desc))
continue;
- if (desc->pd_ops->op_lprocfs_init != NULL) {
+ if (desc->pd_ops->op_lprocfs_init) {
rc = desc->pd_ops->op_lprocfs_init(svc);
if (rc != 0)
goto failed;
@@ -1329,7 +1313,7 @@ void ptlrpc_service_nrs_cleanup(struct ptlrpc_service *svc)
if (!nrs_policy_compatible(svc, desc))
continue;
- if (desc->pd_ops->op_lprocfs_fini != NULL)
+ if (desc->pd_ops->op_lprocfs_fini)
desc->pd_ops->op_lprocfs_fini(svc);
}
@@ -1376,7 +1360,8 @@ void ptlrpc_nrs_req_finalize(struct ptlrpc_request *req)
if (req->rq_nrq.nr_initialized) {
nrs_resource_put_safe(req->rq_nrq.nr_res_ptrs);
/* no protection on bit nr_initialized because no
- * contention at this late stage */
+ * contention at this late stage
+ */
req->rq_nrq.nr_finalized = 1;
}
}
@@ -1434,7 +1419,7 @@ static void nrs_request_removed(struct ptlrpc_nrs_policy *policy)
policy->pol_nrs->nrs_req_queued);
list_move_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
+ &policy->pol_nrs->nrs_policy_queued);
}
}
@@ -1466,10 +1451,9 @@ ptlrpc_nrs_req_get_nolock0(struct ptlrpc_service_part *svcpt, bool hp,
* Always try to drain requests from all NRS polices even if they are
* inactive, because the user can change policy status at runtime.
*/
- list_for_each_entry(policy, &nrs->nrs_policy_queued,
- pol_list_queued) {
+ list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) {
nrq = nrs_request_get(policy, peek, force);
- if (nrq != NULL) {
+ if (nrq) {
if (likely(!peek)) {
nrq->nr_started = 1;
@@ -1619,8 +1603,7 @@ void ptlrpc_nrs_fini(void)
struct ptlrpc_nrs_pol_desc *desc;
struct ptlrpc_nrs_pol_desc *tmp;
- list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
- pd_list) {
+ list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) {
list_del_init(&desc->pd_list);
kfree(desc);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
index 8e21f0cdc8f8..b123a93242ba 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c
@@ -13,10 +13,6 @@
* GNU General Public License version 2 for more details. A copy is
* included in the COPYING file that accompanied this code.
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* GPL HEADER END
*/
/*
@@ -83,7 +79,7 @@ static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy)
head = kzalloc_node(sizeof(*head), GFP_NOFS,
cfs_cpt_spread_node(nrs_pol2cptab(policy),
nrs_pol2cptid(policy)));
- if (head == NULL)
+ if (!head)
return -ENOMEM;
INIT_LIST_HEAD(&head->fh_list);
@@ -104,7 +100,7 @@ static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy)
{
struct nrs_fifo_head *head = policy->pol_private;
- LASSERT(head != NULL);
+ LASSERT(head);
LASSERT(list_empty(&head->fh_list));
kfree(head);
@@ -167,9 +163,9 @@ struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy,
nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
- nr_u.fifo.fr_list);
+ nr_u.fifo.fr_list);
- if (likely(!peek && nrq != NULL)) {
+ if (likely(!peek && nrq)) {
struct ptlrpc_request *req = container_of(nrq,
struct ptlrpc_request,
rq_nrq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index f3cb5184fa85..492d63fad6f9 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -133,7 +133,8 @@ EXPORT_SYMBOL(lustre_msg_size_v2);
* NOTE: this should only be used for NEW requests, and should always be
* in the form of a v2 request. If this is a connection to a v1
* target then the first buffer will be stripped because the ptlrpc
- * data is part of the lustre_msg_v1 header. b=14043 */
+ * data is part of the lustre_msg_v1 header. b=14043
+ */
int lustre_msg_size(__u32 magic, int count, __u32 *lens)
{
__u32 size[] = { sizeof(struct ptlrpc_body) };
@@ -157,7 +158,8 @@ int lustre_msg_size(__u32 magic, int count, __u32 *lens)
EXPORT_SYMBOL(lustre_msg_size);
/* This is used to determine the size of a buffer that was already packed
- * and will correctly handle the different message formats. */
+ * and will correctly handle the different message formats.
+ */
int lustre_packed_msg_size(struct lustre_msg *msg)
{
switch (msg->lm_magic) {
@@ -183,7 +185,7 @@ void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
for (i = 0; i < count; i++)
msg->lm_buflens[i] = lens[i];
- if (bufs == NULL)
+ if (!bufs)
return;
ptr = (char *)msg + lustre_msg_hdr_size_v2(count);
@@ -267,7 +269,8 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
spin_unlock(&svcpt->scp_rep_lock);
/* If we cannot get anything for some long time, we better
- * bail out instead of waiting infinitely */
+ * bail out instead of waiting infinitely
+ */
lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
rc = l_wait_event(svcpt->scp_rep_waitq,
!list_empty(&svcpt->scp_rep_idle), &lwi);
@@ -277,7 +280,7 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
}
rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state, rs_list);
+ struct ptlrpc_reply_state, rs_list);
list_del(&rs->rs_list);
spin_unlock(&svcpt->scp_rep_lock);
@@ -306,7 +309,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
struct ptlrpc_reply_state *rs;
int msg_len, rc;
- LASSERT(req->rq_reply_state == NULL);
+ LASSERT(!req->rq_reply_state);
if ((flags & LPRFL_EARLY_REPLY) == 0) {
spin_lock(&req->rq_lock);
@@ -383,7 +386,6 @@ void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size)
{
int i, offset, buflen, bufcount;
- LASSERT(m != NULL);
LASSERT(n >= 0);
bufcount = m->lm_bufcount;
@@ -488,7 +490,7 @@ void lustre_free_reply_state(struct ptlrpc_reply_state *rs)
LASSERT(!rs->rs_difficult || rs->rs_handled);
LASSERT(!rs->rs_on_net);
LASSERT(!rs->rs_scheduled);
- LASSERT(rs->rs_export == NULL);
+ LASSERT(!rs->rs_export);
LASSERT(rs->rs_nlocks == 0);
LASSERT(list_empty(&rs->rs_exp_list));
LASSERT(list_empty(&rs->rs_obd_list));
@@ -677,7 +679,8 @@ int lustre_msg_buflen(struct lustre_msg *m, int n)
EXPORT_SYMBOL(lustre_msg_buflen);
/* NB return the bufcount for lustre_msg_v2 format, so if message is packed
- * in V1 format, the result is one bigger. (add struct ptlrpc_body). */
+ * in V1 format, the result is one bigger. (add struct ptlrpc_body).
+ */
int lustre_msg_bufcount(struct lustre_msg *m)
{
switch (m->lm_magic) {
@@ -705,7 +708,7 @@ char *lustre_msg_string(struct lustre_msg *m, int index, int max_len)
LASSERTF(0, "incorrect message magic: %08x\n", m->lm_magic);
}
- if (str == NULL) {
+ if (!str) {
CERROR("can't unpack string in msg %p buffer[%d]\n", m, index);
return NULL;
}
@@ -740,7 +743,6 @@ static inline void *__lustre_swab_buf(struct lustre_msg *msg, int index,
{
void *ptr = NULL;
- LASSERT(msg != NULL);
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
ptr = lustre_msg_buf_v2(msg, index, min_size);
@@ -799,7 +801,8 @@ __u32 lustre_msg_get_flags(struct lustre_msg *msg)
/* no break */
default:
/* flags might be printed in debug code while message
- * uninitialized */
+ * uninitialized
+ */
return 0;
}
}
@@ -1032,7 +1035,8 @@ int lustre_msg_get_status(struct lustre_msg *msg)
/* no break */
default:
/* status might be printed in debug code while message
- * uninitialized */
+ * uninitialized
+ */
return -EINVAL;
}
}
@@ -1368,7 +1372,8 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
struct ptlrpc_body *pb;
/* Don't set jobid for ldlm ast RPCs, they've been shrunk.
- * See the comment in ptlrpc_request_pack(). */
+ * See the comment in ptlrpc_request_pack().
+ */
if (!opc || opc == LDLM_BL_CALLBACK ||
opc == LDLM_CP_CALLBACK || opc == LDLM_GL_CALLBACK)
return;
@@ -1377,7 +1382,7 @@ void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid)
sizeof(struct ptlrpc_body));
LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
- if (jobid != NULL)
+ if (jobid)
memcpy(pb->pb_jobid, jobid, JOBSTATS_JOBID_SIZE);
else if (pb->pb_jobid[0] == '\0')
lustre_get_jobid(pb->pb_jobid);
@@ -1427,7 +1432,7 @@ int do_set_info_async(struct obd_import *imp,
int rc;
req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
@@ -1488,7 +1493,8 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
* clients and servers without ptlrpc_body_v2 (< 2.3)
* do not swab any fields beyond pb_jobid, as we are
* using this swab function for both ptlrpc_body
- * and ptlrpc_body_v2. */
+ * and ptlrpc_body_v2.
+ */
CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
}
EXPORT_SYMBOL(lustre_swab_ptlrpc_body);
@@ -1502,7 +1508,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
__swab32s(&ocd->ocd_index);
__swab32s(&ocd->ocd_brw_size);
/* ocd_blocksize and ocd_inodespace don't need to be swabbed because
- * they are 8-byte values */
+ * they are 8-byte values
+ */
__swab16s(&ocd->ocd_grant_extent);
__swab32s(&ocd->ocd_unused);
__swab64s(&ocd->ocd_transno);
@@ -1512,7 +1519,8 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
/* Fields after ocd_cksum_types are only accessible by the receiver
* if the corresponding flag in ocd_connect_flags is set. Accessing
* any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops. */
+ * may result in out-of-bound memory access and kernel oops.
+ */
if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)
__swab32s(&ocd->ocd_max_easize);
if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
@@ -1848,20 +1856,6 @@ void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
}
EXPORT_SYMBOL(lustre_swab_fiemap);
-void lustre_swab_idx_info(struct idx_info *ii)
-{
- __swab32s(&ii->ii_magic);
- __swab32s(&ii->ii_flags);
- __swab16s(&ii->ii_count);
- __swab32s(&ii->ii_attrs);
- lustre_swab_lu_fid(&ii->ii_fid);
- __swab64s(&ii->ii_version);
- __swab64s(&ii->ii_hash_start);
- __swab64s(&ii->ii_hash_end);
- __swab16s(&ii->ii_keysize);
- __swab16s(&ii->ii_recsize);
-}
-
void lustre_swab_mdt_rec_reint (struct mdt_rec_reint *rr)
{
__swab32s(&rr->rr_opcode);
@@ -1914,7 +1908,7 @@ static void print_lum(struct lov_user_md *lum)
CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
- lum->lmm_stripe_offset);
+ lum->lmm_stripe_offset);
}
static void lustre_swab_lmm_oi(struct ost_id *oi)
@@ -1986,7 +1980,8 @@ static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
{
/* the lock data is a union and the first two fields are always an
* extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
- * data the same way. */
+ * data the same way.
+ */
__swab64s(&d->l_extent.start);
__swab64s(&d->l_extent.end);
__swab64s(&d->l_extent.gid);
@@ -2035,16 +2030,6 @@ void lustre_swab_ldlm_reply(struct ldlm_reply *r)
}
EXPORT_SYMBOL(lustre_swab_ldlm_reply);
-void lustre_swab_quota_body(struct quota_body *b)
-{
- lustre_swab_lu_fid(&b->qb_fid);
- lustre_swab_lu_fid((struct lu_fid *)&b->qb_id);
- __swab32s(&b->qb_flags);
- __swab64s(&b->qb_count);
- __swab64s(&b->qb_usage);
- __swab64s(&b->qb_slv_ver);
-}
-
/* Dump functions */
void dump_ioo(struct obd_ioobj *ioo)
{
@@ -2288,24 +2273,6 @@ void lustre_swab_hsm_request(struct hsm_request *hr)
}
EXPORT_SYMBOL(lustre_swab_hsm_request);
-void lustre_swab_update_buf(struct update_buf *ub)
-{
- __swab32s(&ub->ub_magic);
- __swab32s(&ub->ub_count);
-}
-EXPORT_SYMBOL(lustre_swab_update_buf);
-
-void lustre_swab_update_reply_buf(struct update_reply *ur)
-{
- int i;
-
- __swab32s(&ur->ur_version);
- __swab32s(&ur->ur_count);
- for (i = 0; i < ur->ur_count; i++)
- __swab32s(&ur->ur_lens[i]);
-}
-EXPORT_SYMBOL(lustre_swab_update_reply_buf);
-
void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl)
{
__swab64s(&msl->msl_flags);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index fb2d5236a971..8a869315c258 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -68,7 +68,7 @@ int ptlrpc_obd_ping(struct obd_device *obd)
struct ptlrpc_request *req;
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
- if (req == NULL)
+ if (!req)
return -ENOMEM;
req->rq_send_state = LUSTRE_IMP_FULL;
@@ -86,7 +86,7 @@ static int ptlrpc_ping(struct obd_import *imp)
struct ptlrpc_request *req;
req = ptlrpc_prep_ping(imp);
- if (req == NULL) {
+ if (!req) {
CERROR("OOM trying to ping %s->%s\n",
imp->imp_obd->obd_uuid.uuid,
obd2cli_tgt(imp->imp_obd));
@@ -242,7 +242,7 @@ static int ptlrpc_pinger_main(void *arg)
list_for_each(iter, &pinger_imports) {
struct obd_import *imp =
list_entry(iter, struct obd_import,
- imp_pinger_chain);
+ imp_pinger_chain);
ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */
@@ -257,11 +257,12 @@ static int ptlrpc_pinger_main(void *arg)
/* Wait until the next ping time, or until we're stopped. */
time_to_next_wake = pinger_check_timeout(this_ping);
/* The ping sent by ptlrpc_send_rpc may get sent out
- say .01 second after this.
- ptlrpc_pinger_sending_on_import will then set the
- next ping time to next_ping + .01 sec, which means
- we will SKIP the next ping at next_ping, and the
- ping will get sent 2 timeouts from now! Beware. */
+ * say .01 second after this.
+ * ptlrpc_pinger_sending_on_import will then set the
+ * next ping time to next_ping + .01 sec, which means
+ * we will SKIP the next ping at next_ping, and the
+ * ping will get sent 2 timeouts from now! Beware.
+ */
CDEBUG(D_INFO, "next wakeup in " CFS_DURATION_T " (%ld)\n",
time_to_next_wake,
cfs_time_add(this_ping,
@@ -293,6 +294,7 @@ static struct ptlrpc_thread pinger_thread;
int ptlrpc_start_pinger(void)
{
struct l_wait_info lwi = { 0 };
+ struct task_struct *task;
int rc;
if (!thread_is_init(&pinger_thread) &&
@@ -303,10 +305,11 @@ int ptlrpc_start_pinger(void)
strcpy(pinger_thread.t_name, "ll_ping");
- rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
- "%s", pinger_thread.t_name));
- if (IS_ERR_VALUE(rc)) {
- CERROR("cannot start thread: %d\n", rc);
+ task = kthread_run(ptlrpc_pinger_main, &pinger_thread,
+ pinger_thread.t_name);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("cannot start pinger thread: rc = %d\n", rc);
return rc;
}
l_wait_event(pinger_thread.t_ctl_waitq,
@@ -401,7 +404,8 @@ EXPORT_SYMBOL(ptlrpc_pinger_del_import);
* be called when timeout happens.
*/
static struct timeout_item *ptlrpc_new_timeout(int time,
- enum timeout_event event, timeout_cb_t cb, void *data)
+ enum timeout_event event,
+ timeout_cb_t cb, void *data)
{
struct timeout_item *ti;
@@ -489,7 +493,6 @@ int ptlrpc_del_timeout_client(struct list_head *obd_list,
break;
}
}
- LASSERTF(ti != NULL, "ti is NULL !\n");
if (list_empty(&ti->ti_obd_list)) {
list_del(&ti->ti_chain);
kfree(ti);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index 8f67e0562b73..6ca26c98de1b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -101,8 +101,6 @@ struct nrs_core {
* registration/unregistration, and NRS core lprocfs operations.
*/
struct mutex nrs_mutex;
- /* XXX: This is just for liblustre. Remove the #if defined directive
- * when the * "cfs_" prefix is dropped from cfs_list_head. */
/**
* List of all policy descriptors registered with NRS core; protected
* by nrs_core::nrs_mutex.
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index c4f1d0f5deb2..a8ec0e9d7b2e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -162,8 +162,8 @@ static void __exit ptlrpc_exit(void)
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
-MODULE_VERSION("1.0.0");
module_init(ptlrpc_init);
module_exit(ptlrpc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 60fb0ced7137..db003f5da09e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -163,8 +163,6 @@ void ptlrpcd_wake(struct ptlrpc_request *req)
{
struct ptlrpc_request_set *rq_set = req->rq_set;
- LASSERT(rq_set != NULL);
-
wake_up(&rq_set->set_waitq);
}
EXPORT_SYMBOL(ptlrpcd_wake);
@@ -176,7 +174,7 @@ ptlrpcd_select_pc(struct ptlrpc_request *req)
int cpt;
int idx;
- if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
+ if (req && req->rq_send_state != LUSTRE_IMP_FULL)
return &ptlrpcd_rcv;
cpt = cfs_cpt_current(cfs_cpt_table, 1);
@@ -209,11 +207,10 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
if (likely(!list_empty(&src->set_new_requests))) {
list_for_each_safe(pos, tmp, &src->set_new_requests) {
req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
req->rq_set = des;
}
- list_splice_init(&src->set_new_requests,
- &des->set_requests);
+ list_splice_init(&src->set_new_requests, &des->set_requests);
rc = atomic_read(&src->set_new_count);
atomic_add(rc, &des->set_remaining);
atomic_set(&src->set_new_count, 0);
@@ -240,10 +237,11 @@ void ptlrpcd_add_req(struct ptlrpc_request *req)
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
- l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
+ l_wait_event(req->rq_set_waitq, !req->rq_set, &lwi);
} else if (req->rq_set) {
/* If we have a valid "rq_set", just reuse it to avoid double
- * linked. */
+ * linked.
+ */
LASSERT(req->rq_phase == RQ_PHASE_NEW);
LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
@@ -286,9 +284,9 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
spin_lock(&set->set_new_req_lock);
if (likely(!list_empty(&set->set_new_requests))) {
list_splice_init(&set->set_new_requests,
- &set->set_requests);
+ &set->set_requests);
atomic_add(atomic_read(&set->set_new_count),
- &set->set_remaining);
+ &set->set_remaining);
atomic_set(&set->set_new_count, 0);
/*
* Need to calculate its timeout.
@@ -321,7 +319,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
rc |= ptlrpc_check_set(env, set);
/* NB: ptlrpc_check_set has already moved completed request at the
- * head of seq::set_requests */
+ * head of seq::set_requests
+ */
list_for_each_safe(pos, tmp, &set->set_requests) {
req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_COMPLETE)
@@ -339,7 +338,8 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
rc = atomic_read(&set->set_new_count);
/* If we have nothing to do, check whether we can take some
- * work from our partner threads. */
+ * work from our partner threads.
+ */
if (rc == 0 && pc->pc_npartners > 0) {
struct ptlrpcd_ctl *partner;
struct ptlrpc_request_set *ps;
@@ -349,12 +349,12 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
partner = pc->pc_partners[pc->pc_cursor++];
if (pc->pc_cursor >= pc->pc_npartners)
pc->pc_cursor = 0;
- if (partner == NULL)
+ if (!partner)
continue;
spin_lock(&partner->pc_lock);
ps = partner->pc_set;
- if (ps == NULL) {
+ if (!ps) {
spin_unlock(&partner->pc_lock);
continue;
}
@@ -422,7 +422,6 @@ static int ptlrpcd(void *arg)
complete(&pc->pc_starting);
/*
-
* This mainloop strongly resembles ptlrpc_set_wait() except that our
* set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
* there are requests in the set. New requests come in on the set's
@@ -580,7 +579,7 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc)
return 0;
out_set:
- if (pc->pc_set != NULL) {
+ if (pc->pc_set) {
struct ptlrpc_request_set *set = pc->pc_set;
spin_lock(&pc->pc_lock);
@@ -631,7 +630,7 @@ void ptlrpcd_free(struct ptlrpcd_ctl *pc)
out:
if (pc->pc_npartners > 0) {
- LASSERT(pc->pc_partners != NULL);
+ LASSERT(pc->pc_partners);
kfree(pc->pc_partners);
pc->pc_partners = NULL;
@@ -645,7 +644,7 @@ static void ptlrpcd_fini(void)
int i;
int j;
- if (ptlrpcds != NULL) {
+ if (ptlrpcds) {
for (i = 0; i < ptlrpcds_num; i++) {
if (!ptlrpcds[i])
break;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index db6626cab6f2..30d9a164e52d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -107,14 +107,14 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
/* Replay all the committed open requests on committed_list first */
if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.prev;
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
/* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
/* Since the imp_committed_list is immutable before
* all of it's requests being replayed, it's safe to
- * use a cursor to accelerate the search */
+ * use a cursor to accelerate the search
+ */
imp->imp_replay_cursor = imp->imp_replay_cursor->next;
while (imp->imp_replay_cursor !=
@@ -137,8 +137,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
}
/* All the requests in committed list have been replayed, let's replay
- * the imp_replay_list */
- if (req == NULL) {
+ * the imp_replay_list
+ */
+ if (!req) {
list_for_each_safe(tmp, pos, &imp->imp_replay_list) {
req = list_entry(tmp, struct ptlrpc_request,
rq_replay_list);
@@ -152,15 +153,16 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
/* If need to resend the last sent transno (because a reconnect
* has occurred), then stop on the matching req and send it again.
* If, however, the last sent transno has been committed then we
- * continue replay from the next request. */
- if (req != NULL && imp->imp_resend_replay)
+ * continue replay from the next request.
+ */
+ if (req && imp->imp_resend_replay)
lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
spin_lock(&imp->imp_lock);
imp->imp_resend_replay = 0;
spin_unlock(&imp->imp_lock);
- if (req != NULL) {
+ if (req) {
rc = ptlrpc_replay_req(req);
if (rc) {
CERROR("recovery replay error %d for req %llu\n",
@@ -192,9 +194,8 @@ int ptlrpc_resend(struct obd_import *imp)
return -1;
}
- list_for_each_entry_safe(req, next, &imp->imp_sending_list,
- rq_list) {
- LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
+ list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
+ LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (!ptlrpc_no_resend(req))
@@ -249,7 +250,8 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
}
/* Wait for recovery to complete and resend. If evicted, then
- this request will be errored out later.*/
+ * this request will be errored out later.
+ */
spin_lock(&failed_req->rq_lock);
if (!failed_req->rq_no_resend)
failed_req->rq_resend = 1;
@@ -260,7 +262,7 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
* Administratively active/deactive a client.
* This should only be called by the ioctl interface, currently
* - the lctl deactivate and activate commands
- * - echo 0/1 >> /proc/osc/XXX/active
+ * - echo 0/1 >> /sys/fs/lustre/osc/XXX/active
* - client umount -f (ll_umount_begin)
*/
int ptlrpc_set_import_active(struct obd_import *imp, int active)
@@ -271,13 +273,15 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
LASSERT(obd);
/* When deactivating, mark import invalid, and abort in-flight
- * requests. */
+ * requests.
+ */
if (!active) {
LCONSOLE_WARN("setting import %s INACTIVE by administrator request\n",
obd2cli_tgt(imp->imp_obd));
/* set before invalidate to avoid messages about imp_inval
- * set without imp_deactive in ptlrpc_import_delay_req */
+ * set without imp_deactive in ptlrpc_import_delay_req
+ */
spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 39f5261c9854..187fd1d6898c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -94,7 +94,7 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
LASSERT(number < SPTLRPC_POLICY_MAX);
write_lock(&policy_lock);
- if (unlikely(policies[number] == NULL)) {
+ if (unlikely(!policies[number])) {
write_unlock(&policy_lock);
CERROR("%s: already unregistered\n", policy->sp_name);
return -EINVAL;
@@ -126,11 +126,11 @@ struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
policy = policies[number];
if (policy && !try_module_get(policy->sp_owner))
policy = NULL;
- if (policy == NULL)
+ if (!policy)
flag = atomic_read(&loaded);
read_unlock(&policy_lock);
- if (policy != NULL || flag != 0 ||
+ if (policy || flag != 0 ||
number != SPTLRPC_POLICY_GSS)
break;
@@ -327,7 +327,7 @@ static int import_sec_validate_get(struct obd_import *imp,
}
*sec = sptlrpc_import_sec_ref(imp);
- if (*sec == NULL) {
+ if (!*sec) {
CERROR("import %p (%s) with no sec\n",
imp, ptlrpc_import_state_name(imp->imp_state));
return -EACCES;
@@ -429,7 +429,7 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
reqmsg_size = req->rq_reqlen;
if (reqmsg_size != 0) {
reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS);
- if (reqmsg == NULL)
+ if (!reqmsg)
return -ENOMEM;
memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
}
@@ -445,7 +445,8 @@ int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
/* alloc new request buffer
* we don't need to alloc reply buffer here, leave it to the
- * rest procedure of ptlrpc */
+ * rest procedure of ptlrpc
+ */
if (reqmsg_size != 0) {
rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
if (!rc) {
@@ -609,7 +610,7 @@ again:
if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
- req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
+ req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
req_off_ctx_list(req, ctx);
sptlrpc_req_replace_dead_ctx(req);
ctx = req->rq_cli_ctx;
@@ -798,7 +799,8 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
spin_unlock(&sec->ps_lock);
/* force SVC_NULL for context initiation rpc, SVC_INTG for context
- * destruction rpc */
+ * destruction rpc
+ */
if (unlikely(req->rq_ctx_init))
flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
else if (unlikely(req->rq_ctx_fini))
@@ -938,7 +940,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req)
LASSERT(ctx->cc_sec);
LASSERT(req->rq_repbuf);
LASSERT(req->rq_repdata);
- LASSERT(req->rq_repmsg == NULL);
+ LASSERT(!req->rq_repmsg);
req->rq_rep_swab_mask = 0;
@@ -1000,8 +1002,8 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req)
int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
{
LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata == NULL);
- LASSERT(req->rq_repmsg == NULL);
+ LASSERT(!req->rq_repdata);
+ LASSERT(!req->rq_repmsg);
LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
if (req->rq_reply_off == 0 &&
@@ -1046,13 +1048,13 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
int rc;
early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (early_req == NULL)
+ if (!early_req)
return -ENOMEM;
early_size = req->rq_nob_received;
early_bufsz = size_roundup_power2(early_size);
early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
- if (early_buf == NULL) {
+ if (!early_buf) {
rc = -ENOMEM;
goto err_req;
}
@@ -1067,8 +1069,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
}
LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata == NULL);
- LASSERT(req->rq_repmsg == NULL);
+ LASSERT(!req->rq_repdata);
+ LASSERT(!req->rq_repmsg);
if (req->rq_reply_off != 0) {
CERROR("early reply with offset %u\n", req->rq_reply_off);
@@ -1354,12 +1356,12 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
might_sleep();
- if (imp == NULL)
+ if (!imp)
return 0;
conn = imp->imp_connection;
- if (svc_ctx == NULL) {
+ if (!svc_ctx) {
struct client_obd *cliobd = &imp->imp_obd->u.cli;
/*
* normal import, determine flavor from rule set, except
@@ -1447,11 +1449,11 @@ static void import_flush_ctx_common(struct obd_import *imp,
{
struct ptlrpc_sec *sec;
- if (imp == NULL)
+ if (!imp)
return;
sec = sptlrpc_import_sec_ref(imp);
- if (sec == NULL)
+ if (!sec)
return;
sec_cop_flush_ctx_cache(sec, uid, grace, force);
@@ -1484,7 +1486,7 @@ int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
LASSERT(ctx);
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
- LASSERT(req->rq_reqmsg == NULL);
+ LASSERT(!req->rq_reqmsg);
LASSERT_ATOMIC_POS(&ctx->cc_refcount);
policy = ctx->cc_sec->ps_policy;
@@ -1515,7 +1517,7 @@ void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
LASSERT(ctx->cc_sec->ps_policy);
LASSERT_ATOMIC_POS(&ctx->cc_refcount);
- if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
+ if (!req->rq_reqbuf && !req->rq_clrbuf)
return;
policy = ctx->cc_sec->ps_policy;
@@ -1632,7 +1634,7 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
LASSERT(ctx->cc_sec->ps_policy);
LASSERT_ATOMIC_POS(&ctx->cc_refcount);
- if (req->rq_repbuf == NULL)
+ if (!req->rq_repbuf)
return;
LASSERT(req->rq_repbuf_len);
@@ -1684,12 +1686,13 @@ int sptlrpc_target_export_check(struct obd_export *exp,
{
struct sptlrpc_flavor flavor;
- if (exp == NULL)
+ if (!exp)
return 0;
/* client side export has no imp_reverse, skip
- * FIXME maybe we should check flavor this as well??? */
- if (exp->exp_imp_reverse == NULL)
+ * FIXME maybe we should check flavor this as well???
+ */
+ if (!exp->exp_imp_reverse)
return 0;
/* don't care about ctx fini rpc */
@@ -1702,11 +1705,13 @@ int sptlrpc_target_export_check(struct obd_export *exp,
* the first req with the new flavor, then treat it as current flavor,
* adapt reverse sec according to it.
* note the first rpc with new flavor might not be with root ctx, in
- * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
+ * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
+ */
if (unlikely(exp->exp_flvr_changed) &&
flavor_allowed(&exp->exp_flvr_old[1], req)) {
/* make the new flavor as "current", and old ones as
- * about-to-expire */
+ * about-to-expire
+ */
CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
flavor = exp->exp_flvr_old[1];
@@ -1742,10 +1747,12 @@ int sptlrpc_target_export_check(struct obd_export *exp,
}
/* if it equals to the current flavor, we accept it, but need to
- * dealing with reverse sec/ctx */
+ * dealing with reverse sec/ctx
+ */
if (likely(flavor_allowed(&exp->exp_flvr, req))) {
/* most cases should return here, we only interested in
- * gss root ctx init */
+ * gss root ctx init
+ */
if (!req->rq_auth_gss || !req->rq_ctx_init ||
(!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
!req->rq_auth_usr_ost)) {
@@ -1755,7 +1762,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
/* if flavor just changed, we should not proceed, just leave
* it and current flavor will be discovered and replaced
- * shortly, and let _this_ rpc pass through */
+ * shortly, and let _this_ rpc pass through
+ */
if (exp->exp_flvr_changed) {
LASSERT(exp->exp_flvr_adapt);
spin_unlock(&exp->exp_lock);
@@ -1809,7 +1817,8 @@ int sptlrpc_target_export_check(struct obd_export *exp,
}
/* now it doesn't match the current flavor, the only chance we can
- * accept it is match the old flavors which is not expired. */
+ * accept it is match the old flavors which is not expired.
+ */
if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
@@ -1915,9 +1924,9 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
int rc;
LASSERT(msg);
- LASSERT(req->rq_reqmsg == NULL);
- LASSERT(req->rq_repmsg == NULL);
- LASSERT(req->rq_svc_ctx == NULL);
+ LASSERT(!req->rq_reqmsg);
+ LASSERT(!req->rq_repmsg);
+ LASSERT(!req->rq_svc_ctx);
req->rq_req_swab_mask = 0;
@@ -1986,15 +1995,15 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
if (svcpt->scp_service->srv_max_reply_size <
msglen + sizeof(struct ptlrpc_reply_state)) {
/* Just return failure if the size is too big */
- CERROR("size of message is too big (%zd), %d allowed",
- msglen + sizeof(struct ptlrpc_reply_state),
- svcpt->scp_service->srv_max_reply_size);
+ CERROR("size of message is too big (%zd), %d allowed\n",
+ msglen + sizeof(struct ptlrpc_reply_state),
+ svcpt->scp_service->srv_max_reply_size);
return -ENOMEM;
}
/* failed alloc, try emergency pool */
rs = lustre_get_emerg_rs(svcpt);
- if (rs == NULL)
+ if (!rs)
return -ENOMEM;
req->rq_reply_state = rs;
@@ -2059,7 +2068,7 @@ void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
{
struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx != NULL)
+ if (ctx)
atomic_inc(&ctx->sc_refcount);
}
@@ -2067,7 +2076,7 @@ void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
{
struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx == NULL)
+ if (!ctx)
return;
LASSERT_ATOMIC_POS(&ctx->sc_refcount);
@@ -2156,7 +2165,7 @@ int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
* in case of privacy mode, nob_transferred needs to be adjusted.
*/
if (desc->bd_nob != desc->bd_nob_transferred) {
- CERROR("nob %d doesn't match transferred nob %d",
+ CERROR("nob %d doesn't match transferred nob %d\n",
desc->bd_nob, desc->bd_nob_transferred);
return -EPROTO;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 6152c1b766c3..d3872b8c9a6e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -58,7 +58,7 @@
* bulk encryption page pools *
****************************************/
-#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
+#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (POINTERS_PER_PAGE)
#define IDLE_IDX_MAX (100)
@@ -120,7 +120,7 @@ static struct ptlrpc_enc_page_pool {
} page_pools;
/*
- * /proc/fs/lustre/sptlrpc/encrypt_page_pools
+ * /sys/kernel/debug/lustre/sptlrpc/encrypt_page_pools
*/
int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
{
@@ -195,7 +195,7 @@ static void enc_pools_release_free_pages(long npages)
while (npages--) {
LASSERT(page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+ LASSERT(page_pools.epp_pools[p_idx][g_idx]);
__free_page(page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] = NULL;
@@ -304,7 +304,6 @@ static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
static inline void enc_pools_wakeup(void)
{
assert_spin_locked(&page_pools.epp_lock);
- LASSERT(page_pools.epp_waitqlen >= 0);
if (unlikely(page_pools.epp_waitqlen)) {
LASSERT(waitqueue_active(&page_pools.epp_waitq));
@@ -317,7 +316,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
int p_idx, g_idx;
int i;
- if (desc->bd_enc_iov == NULL)
+ if (!desc->bd_enc_iov)
return;
LASSERT(desc->bd_iov_count > 0);
@@ -332,9 +331,9 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
+ LASSERT(desc->bd_enc_iov[i].kiov_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
+ LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
page_pools.epp_pools[p_idx][g_idx] =
desc->bd_enc_iov[i].kiov_page;
@@ -413,7 +412,7 @@ int sptlrpc_enc_pool_init(void)
page_pools.epp_st_max_wait = 0;
enc_pools_alloc();
- if (page_pools.epp_pools == NULL)
+ if (!page_pools.epp_pools)
return -ENOMEM;
register_shrinker(&pools_shrinker);
@@ -476,7 +475,7 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
int size = msg->lm_buflens[offset];
bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
- if (bsd == NULL) {
+ if (!bsd) {
CERROR("Invalid bulk sec desc: size %d\n", size);
return -EINVAL;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 4b0b81c115ee..a51b18bbfd34 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -78,7 +78,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
memset(flvr, 0, sizeof(*flvr));
- if (str == NULL || str[0] == '\0') {
+ if (!str || str[0] == '\0') {
flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
return 0;
}
@@ -103,7 +103,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
* format: plain-hash:<hash_alg>
*/
alg = strchr(bulk, ':');
- if (alg == NULL)
+ if (!alg)
goto err_out;
*alg++ = '\0';
@@ -166,7 +166,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
sptlrpc_rule_init(rule);
flavor = strchr(param, '=');
- if (flavor == NULL) {
+ if (!flavor) {
CERROR("invalid param, no '='\n");
return -EINVAL;
}
@@ -216,7 +216,7 @@ static int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
static void sptlrpc_rule_set_free(struct sptlrpc_rule_set *rset)
{
LASSERT(rset->srs_nslot ||
- (rset->srs_nrule == 0 && rset->srs_rules == NULL));
+ (rset->srs_nrule == 0 && !rset->srs_rules));
if (rset->srs_nslot) {
kfree(rset->srs_rules);
@@ -241,7 +241,7 @@ static int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
/* better use realloc() if available */
rules = kcalloc(nslot, sizeof(*rset->srs_rules), GFP_NOFS);
- if (rules == NULL)
+ if (!rules)
return -ENOMEM;
if (rset->srs_nrule) {
@@ -450,7 +450,7 @@ static void target2fsname(const char *tgt, char *fsname, int buflen)
}
/* if we didn't find the pattern, treat the whole string as fsname */
- if (ptr == NULL)
+ if (!ptr)
len = strlen(tgt);
else
len = ptr - tgt;
@@ -467,7 +467,7 @@ static void sptlrpc_conf_free_rsets(struct sptlrpc_conf *conf)
sptlrpc_rule_set_free(&conf->sc_rset);
list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
+ &conf->sc_tgts, sct_list) {
sptlrpc_rule_set_free(&conf_tgt->sct_rset);
list_del(&conf_tgt->sct_list);
kfree(conf_tgt);
@@ -517,6 +517,7 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
int create)
{
struct sptlrpc_conf *conf;
+ size_t len;
list_for_each_entry(conf, &sptlrpc_confs, sc_list) {
if (strcmp(conf->sc_fsname, fsname) == 0)
@@ -530,7 +531,11 @@ struct sptlrpc_conf *sptlrpc_conf_get(const char *fsname,
if (!conf)
return NULL;
- strcpy(conf->sc_fsname, fsname);
+ len = strlcpy(conf->sc_fsname, fsname, sizeof(conf->sc_fsname));
+ if (len >= sizeof(conf->sc_fsname)) {
+ kfree(conf);
+ return NULL;
+ }
sptlrpc_rule_set_init(&conf->sc_rset);
INIT_LIST_HEAD(&conf->sc_tgts);
list_add(&conf->sc_list, &sptlrpc_confs);
@@ -579,13 +584,13 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
int rc;
target = lustre_cfg_string(lcfg, 1);
- if (target == NULL) {
+ if (!target) {
CERROR("missing target name\n");
return -EINVAL;
}
param = lustre_cfg_string(lcfg, 2);
- if (param == NULL) {
+ if (!param) {
CERROR("missing parameter\n");
return -EINVAL;
}
@@ -603,12 +608,12 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
if (rc)
return -EINVAL;
- if (conf == NULL) {
+ if (!conf) {
target2fsname(target, fsname, sizeof(fsname));
mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
- if (conf == NULL) {
+ if (!conf) {
CERROR("can't find conf\n");
rc = -ENOMEM;
} else {
@@ -638,7 +643,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen)
int len;
ptr = strrchr(logname, '-');
- if (ptr == NULL || strcmp(ptr, "-sptlrpc")) {
+ if (!ptr || strcmp(ptr, "-sptlrpc")) {
CERROR("%s is not a sptlrpc config log\n", logname);
return -EINVAL;
}
@@ -772,7 +777,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(name, 0);
- if (conf == NULL)
+ if (!conf)
goto out;
/* convert uuid name (supposed end with _UUID) to target name */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 6e58d5f955d6..9082da06b28a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -109,7 +109,7 @@ static void sec_process_ctx_list(void)
while (!list_empty(&sec_gc_ctx_list)) {
ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
+ struct ptlrpc_cli_ctx, cc_gc_chain);
list_del_init(&ctx->cc_gc_chain);
spin_unlock(&sec_gc_ctx_list_lock);
@@ -131,7 +131,7 @@ static void sec_do_gc(struct ptlrpc_sec *sec)
if (unlikely(sec->ps_gc_next == 0)) {
CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
- sec, sec->ps_policy->sp_name);
+ sec, sec->ps_policy->sp_name);
return;
}
@@ -166,11 +166,13 @@ again:
* is not optimal. we perhaps want to use balanced binary tree
* to trace each sec as order of expiry time.
* another issue here is we wakeup as fixed interval instead of
- * according to each sec's expiry time */
+ * according to each sec's expiry time
+ */
mutex_lock(&sec_gc_mutex);
list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
/* if someone is waiting to be deleted, let it
- * proceed as soon as possible. */
+ * proceed as soon as possible.
+ */
if (atomic_read(&sec_gc_wait_del)) {
CDEBUG(D_SEC, "deletion pending, start over\n");
mutex_unlock(&sec_gc_mutex);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
index bda9a77af67a..e610a8ddd223 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
@@ -82,7 +82,7 @@ static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
if (cli->cl_import)
sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (sec == NULL)
+ if (!sec)
goto out;
sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
@@ -121,7 +121,7 @@ static int sptlrpc_ctxs_lprocfs_seq_show(struct seq_file *seq, void *v)
if (cli->cl_import)
sec = sptlrpc_import_sec_ref(cli->cl_import);
- if (sec == NULL)
+ if (!sec)
goto out;
if (sec->ps_policy->sp_cops->display)
@@ -178,7 +178,7 @@ int sptlrpc_lproc_init(void)
{
int rc;
- LASSERT(sptlrpc_debugfs_dir == NULL);
+ LASSERT(!sptlrpc_debugfs_dir);
sptlrpc_debugfs_dir = ldebugfs_register("sptlrpc", debugfs_lustre_root,
sptlrpc_lprocfs_vars, NULL);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
index ebfa6092be14..40e5349de38c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_null.c
@@ -250,7 +250,7 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
alloc_size = size_roundup_power2(newmsg_size);
newbuf = libcfs_kvzalloc(alloc_size, GFP_NOFS);
- if (newbuf == NULL)
+ if (!newbuf)
return -ENOMEM;
/* Must lock this, so that otherwise unprotected change of
@@ -258,7 +258,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
* imp_replay_list traversing threads. See LU-3333
* This is a bandaid at best, we really need to deal with this
* in request enlarging code before unpacking that's already
- * there */
+ * there
+ */
if (req->rq_import)
spin_lock(&req->rq_import->imp_lock);
memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
@@ -319,7 +320,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
LASSERT(rs->rs_size >= rs_size);
} else {
rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
- if (rs == NULL)
+ if (!rs)
return -ENOMEM;
rs->rs_size = rs_size;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 905a41451ca3..6276bf59c3aa 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -104,7 +104,7 @@ static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
return -EPROTO;
bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
- if (bsd == NULL) {
+ if (!bsd) {
CERROR("bulk sec desc has short size %d\n",
lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
return -EPROTO;
@@ -227,7 +227,7 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
swabbed = ptlrpc_rep_need_swab(req);
phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (phdr == NULL) {
+ if (!phdr) {
CERROR("missing plain header\n");
return -EPROTO;
}
@@ -264,7 +264,8 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
}
} else {
/* whether we sent with bulk or not, we expect the same
- * in reply, except for early reply */
+ * in reply, except for early reply
+ */
if (!req->rq_early &&
!equi(req->rq_pack_bulk == 1,
phdr->ph_flags & PLAIN_FL_BULK)) {
@@ -419,7 +420,7 @@ void plain_destroy_sec(struct ptlrpc_sec *sec)
LASSERT(sec->ps_import);
LASSERT(atomic_read(&sec->ps_refcount) == 0);
LASSERT(atomic_read(&sec->ps_nctx) == 0);
- LASSERT(plsec->pls_ctx == NULL);
+ LASSERT(!plsec->pls_ctx);
class_import_put(sec->ps_import);
@@ -468,7 +469,7 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
/* install ctx immediately if this is a reverse sec */
if (svc_ctx) {
ctx = plain_sec_install_ctx(plsec);
- if (ctx == NULL) {
+ if (!ctx) {
plain_destroy_sec(sec);
return NULL;
}
@@ -492,7 +493,7 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
atomic_inc(&ctx->cc_refcount);
read_unlock(&plsec->pls_lock);
- if (unlikely(ctx == NULL))
+ if (unlikely(!ctx))
ctx = plain_sec_install_ctx(plsec);
return ctx;
@@ -665,7 +666,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
newbuf_size = size_roundup_power2(newbuf_size);
newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
- if (newbuf == NULL)
+ if (!newbuf)
return -ENOMEM;
/* Must lock this, so that otherwise unprotected change of
@@ -673,7 +674,8 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
* imp_replay_list traversing threads. See LU-3333
* This is a bandaid at best, we really need to deal with this
* in request enlarging code before unpacking that's already
- * there */
+ * there
+ */
if (req->rq_import)
spin_lock(&req->rq_import->imp_lock);
@@ -732,7 +734,7 @@ int plain_accept(struct ptlrpc_request *req)
swabbed = ptlrpc_req_need_swab(req);
phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
- if (phdr == NULL) {
+ if (!phdr) {
CERROR("missing plain header\n");
return -EPROTO;
}
@@ -801,7 +803,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
LASSERT(rs->rs_size >= rs_size);
} else {
rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
- if (rs == NULL)
+ if (!rs)
return -ENOMEM;
rs->rs_size = rs_size;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 8598300a61d1..1bbd1d39ccf8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -77,7 +77,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
rqbd = kzalloc_node(sizeof(*rqbd), GFP_NOFS,
cfs_cpt_spread_node(svc->srv_cptable,
svcpt->scp_cpt));
- if (rqbd == NULL)
+ if (!rqbd)
return NULL;
rqbd->rqbd_svcpt = svcpt;
@@ -89,7 +89,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
svcpt->scp_cpt,
svc->srv_buf_size,
GFP_KERNEL);
- if (rqbd->rqbd_buffer == NULL) {
+ if (!rqbd->rqbd_buffer) {
kfree(rqbd);
return NULL;
}
@@ -144,13 +144,14 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
for (i = 0; i < svc->srv_nbuf_per_group; i++) {
/* NB: another thread might have recycled enough rqbds, we
- * need to make sure it wouldn't over-allocate, see LU-1212. */
+ * need to make sure it wouldn't over-allocate, see LU-1212.
+ */
if (svcpt->scp_nrqbds_posted >= svc->srv_nbuf_per_group)
break;
rqbd = ptlrpc_alloc_rqbd(svcpt);
- if (rqbd == NULL) {
+ if (!rqbd) {
CERROR("%s: Can't allocate request buffer\n",
svc->srv_name);
rc = -ENOMEM;
@@ -298,8 +299,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
}
rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
list_del(&rqbd->rqbd_list);
/* assume we will post successfully */
@@ -322,7 +323,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
/* Don't complain if no request buffers are posted right now; LNET
- * won't drop requests because we set the portal lazy! */
+ * won't drop requests because we set the portal lazy!
+ */
spin_unlock(&svcpt->scp_lock);
@@ -363,13 +365,15 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
init = max_t(int, init, tc->tc_nthrs_init);
/* NB: please see comments in lustre_lnet.h for definition
- * details of these members */
+ * details of these members
+ */
LASSERT(tc->tc_nthrs_max != 0);
if (tc->tc_nthrs_user != 0) {
/* In case there is a reason to test a service with many
* threads, we give a less strict check here, it can
- * be up to 8 * nthrs_max */
+ * be up to 8 * nthrs_max
+ */
total = min(tc->tc_nthrs_max * 8, tc->tc_nthrs_user);
nthrs = total / svc->srv_ncpts;
init = max(init, nthrs);
@@ -379,7 +383,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
total = tc->tc_nthrs_max;
if (tc->tc_nthrs_base == 0) {
/* don't care about base threads number per partition,
- * this is most for non-affinity service */
+ * this is most for non-affinity service
+ */
nthrs = total / svc->srv_ncpts;
goto out;
}
@@ -390,7 +395,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
/* NB: Increase the base number if it's single partition
* and total number of cores/HTs is larger or equal to 4.
- * result will always < 2 * nthrs_base */
+ * result will always < 2 * nthrs_base
+ */
weight = cfs_cpt_weight(svc->srv_cptable, CFS_CPT_ANY);
for (i = 1; (weight >> (i + 1)) != 0 && /* >= 4 cores/HTs */
(tc->tc_nthrs_base >> i) != 0; i++)
@@ -490,7 +496,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
array->paa_reqs_array =
kzalloc_node(sizeof(struct list_head) * size, GFP_NOFS,
cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (array->paa_reqs_array == NULL)
+ if (!array->paa_reqs_array)
return -ENOMEM;
for (index = 0; index < size; index++)
@@ -499,14 +505,15 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
array->paa_reqs_count =
kzalloc_node(sizeof(__u32) * size, GFP_NOFS,
cfs_cpt_spread_node(svc->srv_cptable, cpt));
- if (array->paa_reqs_count == NULL)
+ if (!array->paa_reqs_count)
goto free_reqs_array;
setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
(unsigned long)svcpt);
/* At SOW, service time should be quick; 10s seems generous. If client
- * timeout is less than this, we'll be sending an early reply. */
+ * timeout is less than this, we'll be sending an early reply.
+ */
at_init(&svcpt->scp_at_estimate, 10, 0);
/* assign this before call ptlrpc_grow_req_bufs */
@@ -514,7 +521,8 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
/* Now allocate the request buffers, but don't post them now */
rc = ptlrpc_grow_req_bufs(svcpt, 0);
/* We shouldn't be under memory pressure at startup, so
- * fail if we can't allocate all our buffers at this time. */
+ * fail if we can't allocate all our buffers at this time.
+ */
if (rc != 0)
goto free_reqs_count;
@@ -556,14 +564,14 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
LASSERT(conf->psc_thr.tc_ctx_tags != 0);
cptable = cconf->cc_cptable;
- if (cptable == NULL)
+ if (!cptable)
cptable = cfs_cpt_table;
if (!conf->psc_thr.tc_cpu_affinity) {
ncpts = 1;
} else {
ncpts = cfs_cpt_number(cptable);
- if (cconf->cc_pattern != NULL) {
+ if (cconf->cc_pattern) {
struct cfs_expr_list *el;
rc = cfs_expr_list_parse(cconf->cc_pattern,
@@ -632,11 +640,11 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
if (!conf->psc_thr.tc_cpu_affinity)
cpt = CFS_CPT_ANY;
else
- cpt = cpts != NULL ? cpts[i] : i;
+ cpt = cpts ? cpts[i] : i;
svcpt = kzalloc_node(sizeof(*svcpt), GFP_NOFS,
cfs_cpt_spread_node(cptable, cpt));
- if (svcpt == NULL) {
+ if (!svcpt) {
rc = -ENOMEM;
goto failed;
}
@@ -696,7 +704,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req)
LASSERT(list_empty(&req->rq_timed_list));
/* DEBUG_REQ() assumes the reply state of a request with a valid
- * ref will not be destroyed until that reference is dropped. */
+ * ref will not be destroyed until that reference is dropped.
+ */
ptlrpc_req_drop_rs(req);
sptlrpc_svc_ctx_decref(req);
@@ -704,7 +713,8 @@ static void ptlrpc_server_free_request(struct ptlrpc_request *req)
if (req != &req->rq_rqbd->rqbd_req) {
/* NB request buffers use an embedded
* req if the incoming req unlinked the
- * MD; this isn't one of them! */
+ * MD; this isn't one of them!
+ */
ptlrpc_request_cache_free(req);
}
}
@@ -728,7 +738,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
if (req->rq_at_linked) {
spin_lock(&svcpt->scp_at_lock);
/* recheck with lock, in case it's unlinked by
- * ptlrpc_at_check_timed() */
+ * ptlrpc_at_check_timed()
+ */
if (likely(req->rq_at_linked))
ptlrpc_at_remove_timed(req);
spin_unlock(&svcpt->scp_at_lock);
@@ -755,20 +766,22 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
svcpt->scp_hist_nrqbds++;
/* cull some history?
- * I expect only about 1 or 2 rqbds need to be recycled here */
+ * I expect only about 1 or 2 rqbds need to be recycled here
+ */
while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
rqbd = list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
list_del(&rqbd->rqbd_list);
svcpt->scp_hist_nrqbds--;
/* remove rqbd's reqs from svc's req history while
- * I've got the service lock */
+ * I've got the service lock
+ */
list_for_each(tmp, &rqbd->rqbd_reqs) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_list);
+ rq_list);
/* Track the highest culled req seq */
if (req->rq_history_seq >
svcpt->scp_hist_seq_culled) {
@@ -782,8 +795,8 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
req = list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
list_del(&req->rq_list);
ptlrpc_server_free_request(req);
}
@@ -795,8 +808,7 @@ static void ptlrpc_server_drop_request(struct ptlrpc_request *req)
*/
LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
0);
- list_add_tail(&rqbd->rqbd_list,
- &svcpt->scp_rqbd_idle);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
}
spin_unlock(&svcpt->scp_lock);
@@ -846,7 +858,7 @@ static void ptlrpc_server_finish_active_request(
ptlrpc_nrs_req_finalize(req);
- if (req->rq_export != NULL)
+ if (req->rq_export)
class_export_rpc_dec(req->rq_export);
ptlrpc_server_finish_request(svcpt, req);
@@ -869,13 +881,13 @@ static int ptlrpc_check_req(struct ptlrpc_request *req)
req->rq_export->exp_conn_cnt);
return -EEXIST;
}
- if (unlikely(obd == NULL || obd->obd_fail)) {
+ if (unlikely(!obd || obd->obd_fail)) {
/*
* Failing over, don't handle any more reqs, send
* error response instead.
*/
CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
- req, (obd != NULL) ? obd->obd_name : "unknown");
+ req, obd ? obd->obd_name : "unknown");
rc = -ENODEV;
} else if (lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_REPLAY | MSG_REQ_REPLAY_DONE)) {
@@ -942,13 +954,13 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
div_u64_rem(req->rq_deadline, array->paa_size, &index);
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
- * so search backward. */
- list_for_each_entry_reverse(rq,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ * so search backward.
+ */
+ list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
+ rq_timed_list) {
if (req->rq_deadline >= rq->rq_deadline) {
list_add(&req->rq_timed_list,
- &rq->rq_timed_list);
+ &rq->rq_timed_list);
break;
}
}
@@ -956,8 +968,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
/* Add the request at the head of the list */
if (list_empty(&req->rq_timed_list))
- list_add(&req->rq_timed_list,
- &array->paa_reqs_array[index]);
+ list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
spin_lock(&req->rq_lock);
req->rq_at_linked = 1;
@@ -1003,7 +1014,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
int rc;
/* deadline is when the client expects us to reply, margin is the
- difference between clients' and servers' expectations */
+ * difference between clients' and servers' expectations
+ */
DEBUG_REQ(D_ADAPTTO, req,
"%ssending early reply (deadline %+lds, margin %+lds) for %d+%d",
AT_OFF ? "AT off - not " : "",
@@ -1027,12 +1039,14 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
}
/* Fake our processing time into the future to ask the clients
- * for some extra amount of time */
+ * for some extra amount of time
+ */
at_measured(&svcpt->scp_at_estimate, at_extra +
ktime_get_real_seconds() - req->rq_arrival_time.tv_sec);
/* Check to see if we've actually increased the deadline -
- * we may be past adaptive_max */
+ * we may be past adaptive_max
+ */
if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate)) {
DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
@@ -1044,7 +1058,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
- if (reqcopy == NULL)
+ if (!reqcopy)
return -ENOMEM;
reqmsg = libcfs_kvzalloc(req->rq_reqlen, GFP_NOFS);
if (!reqmsg) {
@@ -1074,7 +1088,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
/* Connection ref */
reqcopy->rq_export = class_conn2export(
lustre_msg_get_handle(reqcopy->rq_reqmsg));
- if (reqcopy->rq_export == NULL) {
+ if (!reqcopy->rq_export) {
rc = -ENODEV;
goto out;
}
@@ -1102,7 +1116,8 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
}
/* Free the (early) reply state from lustre_pack_reply.
- (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
+ * (ptlrpc_send_reply takes it's own rs ref, so this is safe here)
+ */
ptlrpc_req_drop_rs(reqcopy);
out_put:
@@ -1117,8 +1132,9 @@ out_free:
}
/* Send early replies to everybody expiring within at_early_margin
- asking for at_extra time */
-static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
+ * asking for at_extra time
+ */
+static void ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq, *n;
@@ -1132,14 +1148,14 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
spin_lock(&svcpt->scp_at_lock);
if (svcpt->scp_at_check == 0) {
spin_unlock(&svcpt->scp_at_lock);
- return 0;
+ return;
}
delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
svcpt->scp_at_check = 0;
if (array->paa_count == 0) {
spin_unlock(&svcpt->scp_at_lock);
- return 0;
+ return;
}
/* The timer went off, but maybe the nearest rpc already completed. */
@@ -1148,20 +1164,20 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
/* We've still got plenty of time. Reset the timer. */
ptlrpc_at_set_timer(svcpt);
spin_unlock(&svcpt->scp_at_lock);
- return 0;
+ return;
}
/* We're close to a timeout, and we don't know how much longer the
- server will take. Send early replies to everyone expiring soon. */
+ * server will take. Send early replies to everyone expiring soon.
+ */
INIT_LIST_HEAD(&work_list);
deadline = -1;
div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
- list_for_each_entry_safe(rq, n,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
+ rq_timed_list) {
if (rq->rq_deadline > now + at_early_margin) {
/* update the earliest deadline */
if (deadline == -1 ||
@@ -1194,7 +1210,8 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
first, at_extra, counter);
if (first < 0) {
/* We're already past request deadlines before we even get a
- chance to send early replies */
+ * chance to send early replies
+ */
LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
@@ -1204,10 +1221,11 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
}
/* we took additional refcount so entries can't be deleted from list, no
- * locking is needed */
+ * locking is needed
+ */
while (!list_empty(&work_list)) {
rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
+ rq_timed_list);
list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
@@ -1215,8 +1233,6 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
ptlrpc_server_drop_request(rq);
}
-
- return 1; /* return "did_something" for liblustre */
}
/**
@@ -1237,7 +1253,8 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
if (req->rq_export && req->rq_ops) {
/* Perform request specific check. We should do this check
* before the request is added into exp_hp_rpcs list otherwise
- * it may hit swab race at LU-1044. */
+ * it may hit swab race at LU-1044.
+ */
if (req->rq_ops->hpreq_check) {
rc = req->rq_ops->hpreq_check(req);
/**
@@ -1257,8 +1274,7 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
}
spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_add(&req->rq_exp_list,
- &req->rq_export->exp_hp_rpcs);
+ list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
@@ -1272,7 +1288,8 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
{
if (req->rq_export && req->rq_ops) {
/* refresh lock timeout again so that client has more
- * room to send lock cancel RPC. */
+ * room to send lock cancel RPC.
+ */
if (req->rq_ops->hpreq_fini)
req->rq_ops->hpreq_fini(req);
@@ -1316,7 +1333,7 @@ static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt,
CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
/* leave just 1 thread for normal RPCs */
running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler)
running += 1;
}
@@ -1355,7 +1372,7 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt,
CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
/* leave just 1 thread for normal RPCs */
running = PTLRPC_NTHRS_INIT;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL)
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler)
running += 1;
}
@@ -1405,7 +1422,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
if (ptlrpc_server_high_pending(svcpt, force)) {
req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
- if (req != NULL) {
+ if (req) {
svcpt->scp_hreq_count++;
goto got_request;
}
@@ -1413,7 +1430,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
if (ptlrpc_server_normal_pending(svcpt, force)) {
req = ptlrpc_nrs_req_get_nolock(svcpt, false, force);
- if (req != NULL) {
+ if (req) {
svcpt->scp_hreq_count = 0;
goto got_request;
}
@@ -1457,11 +1474,12 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
}
req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
svcpt->scp_nreqs_incoming--;
/* Consider this still a "queued" request as far as stats are
- * concerned */
+ * concerned
+ */
spin_unlock(&svcpt->scp_lock);
/* go through security check/transform */
@@ -1598,7 +1616,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
int fail_opc = 0;
request = ptlrpc_server_request_get(svcpt, false);
- if (request == NULL)
+ if (!request)
return 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
@@ -1620,7 +1638,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
timediff = timespec64_sub(work_start, request->rq_arrival_time);
timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
timediff.tv_nsec / NSEC_PER_USEC;
- if (likely(svc->srv_stats != NULL)) {
+ if (likely(svc->srv_stats)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
timediff_usecs);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
@@ -1652,7 +1670,8 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
}
/* Discard requests queued for longer than the deadline.
- The deadline is increased if we send an early reply. */
+ * The deadline is increased if we send an early reply.
+ */
if (ktime_get_real_seconds() > request->rq_deadline) {
DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
libcfs_id2str(request->rq_peer),
@@ -1718,7 +1737,7 @@ put_conn:
request->rq_status,
(request->rq_repmsg ?
lustre_msg_get_status(request->rq_repmsg) : -999));
- if (likely(svc->srv_stats != NULL && request->rq_reqmsg != NULL)) {
+ if (likely(svc->srv_stats && request->rq_reqmsg)) {
__u32 op = lustre_msg_get_opc(request->rq_reqmsg);
int opc = opcode_offset(op);
@@ -1804,7 +1823,8 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
if (nlocks == 0 && !been_handled) {
/* If we see this, we should already have seen the warning
- * in mds_steal_ack_locks() */
+ * in mds_steal_ack_locks()
+ */
CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld o%d NID %s\n",
rs,
rs->rs_xid, rs->rs_transno, rs->rs_opc,
@@ -1858,7 +1878,8 @@ ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
/* CAVEAT EMPTOR: We might be allocating buffers here because we've
* allowed the request history to grow out of control. We could put a
* sanity check on that here and cull some history if we need the
- * space. */
+ * space.
+ */
if (avail <= low_water)
ptlrpc_grow_req_bufs(svcpt, 1);
@@ -1992,7 +2013,8 @@ static int ptlrpc_main(void *arg)
/* NB: we will call cfs_cpt_bind() for all threads, because we
* might want to run lustre server only on a subset of system CPUs,
- * in that case ->scp_cpt is CFS_CPT_ANY */
+ * in that case ->scp_cpt is CFS_CPT_ANY
+ */
rc = cfs_cpt_bind(svc->srv_cptable, svcpt->scp_cpt);
if (rc != 0) {
CWARN("%s: failed to bind %s on CPT %d\n",
@@ -2008,7 +2030,7 @@ static int ptlrpc_main(void *arg)
set_current_groups(ginfo);
put_group_info(ginfo);
- if (svc->srv_ops.so_thr_init != NULL) {
+ if (svc->srv_ops.so_thr_init) {
rc = svc->srv_ops.so_thr_init(thread);
if (rc)
goto out;
@@ -2035,7 +2057,7 @@ static int ptlrpc_main(void *arg)
continue;
CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
- svc->srv_name, svcpt->scp_cpt, rc);
+ svc->srv_name, svcpt->scp_cpt, rc);
goto out_srv_fini;
}
@@ -2057,7 +2079,8 @@ static int ptlrpc_main(void *arg)
/* SVC_STOPPING may already be set here if someone else is trying
* to stop the service while this new thread has been dynamically
* forked. We still set SVC_RUNNING to let our creator know that
- * we are now running, however we will exit as soon as possible */
+ * we are now running, however we will exit as soon as possible
+ */
thread_add_flags(thread, SVC_RUNNING);
svcpt->scp_nthrs_running++;
spin_unlock(&svcpt->scp_lock);
@@ -2116,7 +2139,8 @@ static int ptlrpc_main(void *arg)
ptlrpc_server_post_idle_rqbds(svcpt) < 0) {
/* I just failed to repost request buffers.
* Wait for a timeout (unless something else
- * happens) before I try again */
+ * happens) before I try again
+ */
svcpt->scp_rqbd_timeout = cfs_time_seconds(1) / 10;
CDEBUG(D_RPCTRACE, "Posted buffers: %d\n",
svcpt->scp_nrqbds_posted);
@@ -2132,10 +2156,10 @@ out_srv_fini:
/*
* deconstruct service specific state created by ptlrpc_start_thread()
*/
- if (svc->srv_ops.so_thr_done != NULL)
+ if (svc->srv_ops.so_thr_done)
svc->srv_ops.so_thr_done(thread);
- if (env != NULL) {
+ if (env) {
lu_context_fini(&env->le_ctx);
kfree(env);
}
@@ -2183,7 +2207,7 @@ static int ptlrpc_hr_main(void *arg)
{
struct ptlrpc_hr_thread *hrt = arg;
struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- LIST_HEAD (replies);
+ LIST_HEAD(replies);
char threadname[20];
int rc;
@@ -2206,9 +2230,8 @@ static int ptlrpc_hr_main(void *arg)
while (!list_empty(&replies)) {
struct ptlrpc_reply_state *rs;
- rs = list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
+ rs = list_entry(replies.prev, struct ptlrpc_reply_state,
+ rs_list);
list_del_init(&rs->rs_list);
ptlrpc_handle_rs(rs);
}
@@ -2229,18 +2252,18 @@ static void ptlrpc_stop_hr_threads(void)
ptlrpc_hr.hr_stopping = 1;
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (hrp->hrp_thrs == NULL)
+ if (!hrp->hrp_thrs)
continue; /* uninitialized */
for (j = 0; j < hrp->hrp_nthrs; j++)
wake_up_all(&hrp->hrp_thrs[j].hrt_waitq);
}
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- if (hrp->hrp_thrs == NULL)
+ if (!hrp->hrp_thrs)
continue; /* uninitialized */
wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstopped) ==
- atomic_read(&hrp->hrp_nstarted));
+ atomic_read(&hrp->hrp_nstopped) ==
+ atomic_read(&hrp->hrp_nstarted));
}
}
@@ -2255,24 +2278,26 @@ static int ptlrpc_start_hr_threads(void)
for (j = 0; j < hrp->hrp_nthrs; j++) {
struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
-
- rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt,
- hrt->hrt_id));
- if (IS_ERR_VALUE(rc))
+ struct task_struct *task;
+
+ task = kthread_run(ptlrpc_hr_main,
+ &hrp->hrp_thrs[j],
+ "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt, hrt->hrt_id);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
break;
+ }
}
wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstarted) == j);
- if (!IS_ERR_VALUE(rc))
- continue;
+ atomic_read(&hrp->hrp_nstarted) == j);
- CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n",
- i, j, rc);
- ptlrpc_stop_hr_threads();
- return rc;
+ if (rc < 0) {
+ CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
+ i, j, rc);
+ ptlrpc_stop_hr_threads();
+ return rc;
+ }
}
return 0;
}
@@ -2281,7 +2306,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
{
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
- LIST_HEAD (zombie);
+ LIST_HEAD(zombie);
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
@@ -2298,7 +2323,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
while (!list_empty(&svcpt->scp_threads)) {
thread = list_entry(svcpt->scp_threads.next,
- struct ptlrpc_thread, t_link);
+ struct ptlrpc_thread, t_link);
if (thread_is_stopped(thread)) {
list_del(&thread->t_link);
list_add(&thread->t_link, &zombie);
@@ -2333,7 +2358,7 @@ static void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
int i;
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service != NULL)
+ if (svcpt->scp_service)
ptlrpc_svcpt_stop_threads(svcpt);
}
}
@@ -2374,10 +2399,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
struct ptlrpc_service *svc;
+ struct task_struct *task;
int rc;
- LASSERT(svcpt != NULL);
-
svc = svcpt->scp_service;
CDEBUG(D_RPCTRACE, "%s[%d] started %d min %d max %d\n",
@@ -2396,7 +2420,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
thread = kzalloc_node(sizeof(*thread), GFP_NOFS,
cfs_cpt_spread_node(svc->srv_cptable,
svcpt->scp_cpt));
- if (thread == NULL)
+ if (!thread)
return -ENOMEM;
init_waitqueue_head(&thread->t_ctl_waitq);
@@ -2409,7 +2433,8 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
if (svcpt->scp_nthrs_starting != 0) {
/* serialize starting because some modules (obdfilter)
- * might require unique and contiguous t_id */
+ * might require unique and contiguous t_id
+ */
LASSERT(svcpt->scp_nthrs_starting == 1);
spin_unlock(&svcpt->scp_lock);
kfree(thread);
@@ -2442,9 +2467,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
}
CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name));
- if (IS_ERR_VALUE(rc)) {
- CERROR("cannot start thread '%s': rc %d\n",
+ task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("cannot start thread '%s': rc = %d\n",
thread->t_name, rc);
spin_lock(&svcpt->scp_lock);
--svcpt->scp_nthrs_starting;
@@ -2488,7 +2514,7 @@ int ptlrpc_hr_init(void)
ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
sizeof(*hrp));
- if (ptlrpc_hr.hr_partitions == NULL)
+ if (!ptlrpc_hr.hr_partitions)
return -ENOMEM;
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
@@ -2509,7 +2535,7 @@ int ptlrpc_hr_init(void)
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
i));
- if (hrp->hrp_thrs == NULL) {
+ if (!hrp->hrp_thrs) {
rc = -ENOMEM;
goto out;
}
@@ -2537,7 +2563,7 @@ void ptlrpc_hr_fini(void)
struct ptlrpc_hr_partition *hrp;
int i;
- if (ptlrpc_hr.hr_partitions == NULL)
+ if (!ptlrpc_hr.hr_partitions)
return;
ptlrpc_stop_hr_threads();
@@ -2577,7 +2603,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
/* early disarm AT timer... */
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service != NULL)
+ if (svcpt->scp_service)
del_timer(&svcpt->scp_at_timer);
}
}
@@ -2592,18 +2618,20 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
int i;
/* All history will be culled when the next request buffer is
- * freed in ptlrpc_service_purge_all() */
+ * freed in ptlrpc_service_purge_all()
+ */
svc->srv_hist_nrqbds_cpt_max = 0;
rc = LNetClearLazyPortal(svc->srv_req_portal);
LASSERT(rc == 0);
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
+ if (!svcpt->scp_service)
break;
/* Unlink all the request buffers. This forces a 'final'
- * event with its 'unlink' flag set for each posted rqbd */
+ * event with its 'unlink' flag set for each posted rqbd
+ */
list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
rqbd_list) {
rc = LNetMDUnlink(rqbd->rqbd_md_h);
@@ -2612,17 +2640,19 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
}
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
+ if (!svcpt->scp_service)
break;
/* Wait for the network to release any buffers
- * it's currently filling */
+ * it's currently filling
+ */
spin_lock(&svcpt->scp_lock);
while (svcpt->scp_nrqbds_posted != 0) {
spin_unlock(&svcpt->scp_lock);
/* Network access will complete in finite time but
* the HUGE timeout lets us CWARN for visibility
- * of sluggish NALs */
+ * of sluggish LNDs
+ */
lwi = LWI_TIMEOUT_INTERVAL(
cfs_time_seconds(LONG_UNLINK),
cfs_time_seconds(1), NULL, NULL);
@@ -2648,13 +2678,13 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc)
int i;
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
+ if (!svcpt->scp_service)
break;
spin_lock(&svcpt->scp_rep_lock);
while (!list_empty(&svcpt->scp_rep_active)) {
rs = list_entry(svcpt->scp_rep_active.next,
- struct ptlrpc_reply_state, rs_list);
+ struct ptlrpc_reply_state, rs_list);
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
@@ -2663,10 +2693,11 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc)
/* purge the request queue. NB No new replies (rqbds
* all unlinked) and no service threads, so I'm the only
- * thread noodling the request queue now */
+ * thread noodling the request queue now
+ */
while (!list_empty(&svcpt->scp_req_incoming)) {
req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
+ struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
svcpt->scp_nreqs_incoming--;
@@ -2682,24 +2713,26 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc)
LASSERT(svcpt->scp_nreqs_incoming == 0);
LASSERT(svcpt->scp_nreqs_active == 0);
/* history should have been culled by
- * ptlrpc_server_finish_request */
+ * ptlrpc_server_finish_request
+ */
LASSERT(svcpt->scp_hist_nrqbds == 0);
/* Now free all the request buffers since nothing
- * references them any more... */
+ * references them any more...
+ */
while (!list_empty(&svcpt->scp_rqbd_idle)) {
rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
ptlrpc_free_rqbd(rqbd);
}
ptlrpc_wait_replies(svcpt);
while (!list_empty(&svcpt->scp_rep_idle)) {
rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state,
- rs_list);
+ struct ptlrpc_reply_state,
+ rs_list);
list_del(&rs->rs_list);
kvfree(rs);
}
@@ -2714,7 +2747,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc)
int i;
ptlrpc_service_for_each_part(svcpt, i, svc) {
- if (svcpt->scp_service == NULL)
+ if (!svcpt->scp_service)
break;
/* In case somebody rearmed this in the meantime */
@@ -2730,7 +2763,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc)
ptlrpc_service_for_each_part(svcpt, i, svc)
kfree(svcpt);
- if (svc->srv_cpts != NULL)
+ if (svc->srv_cpts)
cfs_expr_list_values_free(svc->srv_cpts, svc->srv_ncpts);
kfree(svc);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 61d9ca93c53a..3ffd2d91f274 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -333,17 +333,9 @@ void lustre_assert_wire_constants(void)
CLASSERT(LDLM_MAX_TYPE == 14);
CLASSERT(LUSTRE_RES_ID_SEQ_OFF == 0);
CLASSERT(LUSTRE_RES_ID_VER_OID_OFF == 1);
- LASSERTF(UPDATE_OBJ == 1000, "found %lld\n",
- (long long)UPDATE_OBJ);
- LASSERTF(UPDATE_LAST_OPC == 1001, "found %lld\n",
- (long long)UPDATE_LAST_OPC);
CLASSERT(LUSTRE_RES_ID_QUOTA_SEQ_OFF == 2);
CLASSERT(LUSTRE_RES_ID_QUOTA_VER_OID_OFF == 3);
CLASSERT(LUSTRE_RES_ID_HSH_OFF == 3);
- CLASSERT(LQUOTA_TYPE_USR == 0);
- CLASSERT(LQUOTA_TYPE_GRP == 1);
- CLASSERT(LQUOTA_RES_MD == 1);
- CLASSERT(LQUOTA_RES_DT == 2);
LASSERTF(OBD_PING == 400, "found %lld\n",
(long long)OBD_PING);
LASSERTF(OBD_LOG_CANCEL == 401, "found %lld\n",
@@ -437,30 +429,6 @@ void lustre_assert_wire_constants(void)
(unsigned)LMAC_NOT_IN_OI);
LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
(unsigned)LMAC_FID_ON_OST);
- LASSERTF(OBJ_CREATE == 1, "found %lld\n",
- (long long)OBJ_CREATE);
- LASSERTF(OBJ_DESTROY == 2, "found %lld\n",
- (long long)OBJ_DESTROY);
- LASSERTF(OBJ_REF_ADD == 3, "found %lld\n",
- (long long)OBJ_REF_ADD);
- LASSERTF(OBJ_REF_DEL == 4, "found %lld\n",
- (long long)OBJ_REF_DEL);
- LASSERTF(OBJ_ATTR_SET == 5, "found %lld\n",
- (long long)OBJ_ATTR_SET);
- LASSERTF(OBJ_ATTR_GET == 6, "found %lld\n",
- (long long)OBJ_ATTR_GET);
- LASSERTF(OBJ_XATTR_SET == 7, "found %lld\n",
- (long long)OBJ_XATTR_SET);
- LASSERTF(OBJ_XATTR_GET == 8, "found %lld\n",
- (long long)OBJ_XATTR_GET);
- LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
- (long long)OBJ_INDEX_LOOKUP);
- LASSERTF(OBJ_INDEX_LOOKUP == 9, "found %lld\n",
- (long long)OBJ_INDEX_LOOKUP);
- LASSERTF(OBJ_INDEX_INSERT == 10, "found %lld\n",
- (long long)OBJ_INDEX_INSERT);
- LASSERTF(OBJ_INDEX_DELETE == 11, "found %lld\n",
- (long long)OBJ_INDEX_DELETE);
/* Checks for struct ost_id */
LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -587,9 +555,6 @@ void lustre_assert_wire_constants(void)
(long long)LDF_COLLIDE);
LASSERTF(LU_PAGE_SIZE == 4096, "found %lld\n",
(long long)LU_PAGE_SIZE);
- /* Checks for union lu_page */
- LASSERTF((int)sizeof(union lu_page) == 4096, "found %lld\n",
- (long long)(int)sizeof(union lu_page));
/* Checks for struct lustre_handle */
LASSERTF((int)sizeof(struct lustre_handle) == 8, "found %lld\n",
@@ -1535,11 +1500,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
(long long)(int)sizeof(union lquota_id));
- LASSERTF(QUOTABLOCK_BITS == 10, "found %lld\n",
- (long long)QUOTABLOCK_BITS);
- LASSERTF(QUOTABLOCK_SIZE == 1024, "found %lld\n",
- (long long)QUOTABLOCK_SIZE);
-
/* Checks for struct obd_quotactl */
LASSERTF((int)sizeof(struct obd_quotactl) == 112, "found %lld\n",
(long long)(int)sizeof(struct obd_quotactl));
@@ -1642,138 +1602,6 @@ void lustre_assert_wire_constants(void)
LASSERTF(Q_FINVALIDATE == 0x800104, "found 0x%.8x\n",
Q_FINVALIDATE);
- /* Checks for struct lquota_acct_rec */
- LASSERTF((int)sizeof(struct lquota_acct_rec) == 16, "found %lld\n",
- (long long)(int)sizeof(struct lquota_acct_rec));
- LASSERTF((int)offsetof(struct lquota_acct_rec, bspace) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_acct_rec, bspace));
- LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->bspace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_acct_rec *)0)->bspace));
- LASSERTF((int)offsetof(struct lquota_acct_rec, ispace) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_acct_rec, ispace));
- LASSERTF((int)sizeof(((struct lquota_acct_rec *)0)->ispace) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_acct_rec *)0)->ispace));
-
- /* Checks for struct lquota_glb_rec */
- LASSERTF((int)sizeof(struct lquota_glb_rec) == 32, "found %lld\n",
- (long long)(int)sizeof(struct lquota_glb_rec));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_hardlimit) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_hardlimit));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_hardlimit));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_softlimit) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_softlimit));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_softlimit));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_time) == 16, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_time));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_time) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_time));
- LASSERTF((int)offsetof(struct lquota_glb_rec, qbr_granted) == 24, "found %lld\n",
- (long long)(int)offsetof(struct lquota_glb_rec, qbr_granted));
- LASSERTF((int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_glb_rec *)0)->qbr_granted));
-
- /* Checks for struct lquota_slv_rec */
- LASSERTF((int)sizeof(struct lquota_slv_rec) == 8, "found %lld\n",
- (long long)(int)sizeof(struct lquota_slv_rec));
- LASSERTF((int)offsetof(struct lquota_slv_rec, qsr_granted) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lquota_slv_rec, qsr_granted));
- LASSERTF((int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lquota_slv_rec *)0)->qsr_granted));
-
- /* Checks for struct idx_info */
- LASSERTF((int)sizeof(struct idx_info) == 80, "found %lld\n",
- (long long)(int)sizeof(struct idx_info));
- LASSERTF((int)offsetof(struct idx_info, ii_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_magic));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_magic));
- LASSERTF((int)offsetof(struct idx_info, ii_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_flags));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_flags));
- LASSERTF((int)offsetof(struct idx_info, ii_count) == 8, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_count));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_count) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_count));
- LASSERTF((int)offsetof(struct idx_info, ii_pad0) == 10, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad0));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad0) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad0));
- LASSERTF((int)offsetof(struct idx_info, ii_attrs) == 12, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_attrs));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_attrs) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_attrs));
- LASSERTF((int)offsetof(struct idx_info, ii_fid) == 16, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_fid));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_fid));
- LASSERTF((int)offsetof(struct idx_info, ii_version) == 32, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_version));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_version) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_version));
- LASSERTF((int)offsetof(struct idx_info, ii_hash_start) == 40, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_hash_start));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_start) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_start));
- LASSERTF((int)offsetof(struct idx_info, ii_hash_end) == 48, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_hash_end));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_hash_end) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_hash_end));
- LASSERTF((int)offsetof(struct idx_info, ii_keysize) == 56, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_keysize));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_keysize) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_keysize));
- LASSERTF((int)offsetof(struct idx_info, ii_recsize) == 58, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_recsize));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_recsize) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_recsize));
- LASSERTF((int)offsetof(struct idx_info, ii_pad1) == 60, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad1));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad1) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad1));
- LASSERTF((int)offsetof(struct idx_info, ii_pad2) == 64, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad2));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad2) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad2));
- LASSERTF((int)offsetof(struct idx_info, ii_pad3) == 72, "found %lld\n",
- (long long)(int)offsetof(struct idx_info, ii_pad3));
- LASSERTF((int)sizeof(((struct idx_info *)0)->ii_pad3) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct idx_info *)0)->ii_pad3));
- CLASSERT(IDX_INFO_MAGIC == 0x3D37CC37);
-
- /* Checks for struct lu_idxpage */
- LASSERTF((int)sizeof(struct lu_idxpage) == 16, "found %lld\n",
- (long long)(int)sizeof(struct lu_idxpage));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_magic));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_magic));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_flags) == 4, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_flags));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_flags) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_flags));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_nr) == 6, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_nr));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_nr) == 2, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_nr));
- LASSERTF((int)offsetof(struct lu_idxpage, lip_pad0) == 8, "found %lld\n",
- (long long)(int)offsetof(struct lu_idxpage, lip_pad0));
- LASSERTF((int)sizeof(((struct lu_idxpage *)0)->lip_pad0) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct lu_idxpage *)0)->lip_pad0));
- CLASSERT(LIP_MAGIC == 0x8A6D6B6C);
- LASSERTF(LIP_HDR_SIZE == 16, "found %lld\n",
- (long long)LIP_HDR_SIZE);
- LASSERTF(II_FL_NOHASH == 1, "found %lld\n",
- (long long)II_FL_NOHASH);
- LASSERTF(II_FL_VARKEY == 2, "found %lld\n",
- (long long)II_FL_VARKEY);
- LASSERTF(II_FL_VARREC == 4, "found %lld\n",
- (long long)II_FL_VARREC);
- LASSERTF(II_FL_NONUNQ == 8, "found %lld\n",
- (long long)II_FL_NONUNQ);
-
/* Checks for struct niobuf_remote */
LASSERTF((int)sizeof(struct niobuf_remote) == 16, "found %lld\n",
(long long)(int)sizeof(struct niobuf_remote));
@@ -3753,50 +3581,6 @@ void lustre_assert_wire_constants(void)
LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
(long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
- /* Checks for struct quota_body */
- LASSERTF((int)sizeof(struct quota_body) == 112, "found %lld\n",
- (long long)(int)sizeof(struct quota_body));
- LASSERTF((int)offsetof(struct quota_body, qb_fid) == 0, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_fid));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_fid));
- LASSERTF((int)offsetof(struct quota_body, qb_id) == 16, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_id));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_id) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_id));
- LASSERTF((int)offsetof(struct quota_body, qb_flags) == 32, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_flags));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_flags) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_flags));
- LASSERTF((int)offsetof(struct quota_body, qb_padding) == 36, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_padding));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_padding));
- LASSERTF((int)offsetof(struct quota_body, qb_count) == 40, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_count));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_count) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_count));
- LASSERTF((int)offsetof(struct quota_body, qb_usage) == 48, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_usage));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_usage) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_usage));
- LASSERTF((int)offsetof(struct quota_body, qb_slv_ver) == 56, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_slv_ver));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_slv_ver) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_slv_ver));
- LASSERTF((int)offsetof(struct quota_body, qb_lockh) == 64, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_lockh));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_lockh) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_lockh));
- LASSERTF((int)offsetof(struct quota_body, qb_glb_lockh) == 72, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_glb_lockh));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_glb_lockh) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_glb_lockh));
- LASSERTF((int)offsetof(struct quota_body, qb_padding1[4]) == 112, "found %lld\n",
- (long long)(int)offsetof(struct quota_body, qb_padding1[4]));
- LASSERTF((int)sizeof(((struct quota_body *)0)->qb_padding1[4]) == 8, "found %lld\n",
- (long long)(int)sizeof(((struct quota_body *)0)->qb_padding1[4]));
-
/* Checks for struct mgs_target_info */
LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
(long long)(int)sizeof(struct mgs_target_info));
@@ -4431,60 +4215,4 @@ void lustre_assert_wire_constants(void)
LASSERTF(sizeof(((struct hsm_user_import *)0)->hui_archive_id) == 4,
"found %lld\n",
(long long)sizeof(((struct hsm_user_import *)0)->hui_archive_id));
-
- /* Checks for struct update_buf */
- LASSERTF((int)sizeof(struct update_buf) == 8, "found %lld\n",
- (long long)(int)sizeof(struct update_buf));
- LASSERTF((int)offsetof(struct update_buf, ub_magic) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_magic));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_magic) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_magic));
- LASSERTF((int)offsetof(struct update_buf, ub_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_count));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_count));
- LASSERTF((int)offsetof(struct update_buf, ub_bufs) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update_buf, ub_bufs));
- LASSERTF((int)sizeof(((struct update_buf *)0)->ub_bufs) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update_buf *)0)->ub_bufs));
-
- /* Checks for struct update_reply */
- LASSERTF((int)sizeof(struct update_reply) == 8, "found %lld\n",
- (long long)(int)sizeof(struct update_reply));
- LASSERTF((int)offsetof(struct update_reply, ur_version) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_version));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_version) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_version));
- LASSERTF((int)offsetof(struct update_reply, ur_count) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_count));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_count) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_count));
- LASSERTF((int)offsetof(struct update_reply, ur_lens) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update_reply, ur_lens));
- LASSERTF((int)sizeof(((struct update_reply *)0)->ur_lens) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update_reply *)0)->ur_lens));
-
- /* Checks for struct update */
- LASSERTF((int)sizeof(struct update) == 56, "found %lld\n",
- (long long)(int)sizeof(struct update));
- LASSERTF((int)offsetof(struct update, u_type) == 0, "found %lld\n",
- (long long)(int)offsetof(struct update, u_type));
- LASSERTF((int)sizeof(((struct update *)0)->u_type) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_type));
- LASSERTF((int)offsetof(struct update, u_batchid) == 4, "found %lld\n",
- (long long)(int)offsetof(struct update, u_batchid));
- LASSERTF((int)sizeof(((struct update *)0)->u_batchid) == 4, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_batchid));
- LASSERTF((int)offsetof(struct update, u_fid) == 8, "found %lld\n",
- (long long)(int)offsetof(struct update, u_fid));
- LASSERTF((int)sizeof(((struct update *)0)->u_fid) == 16, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_fid));
- LASSERTF((int)offsetof(struct update, u_lens) == 24, "found %lld\n",
- (long long)(int)offsetof(struct update, u_lens));
- LASSERTF((int)sizeof(((struct update *)0)->u_lens) == 32, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_lens));
- LASSERTF((int)offsetof(struct update, u_bufs) == 56, "found %lld\n",
- (long long)(int)offsetof(struct update, u_bufs));
- LASSERTF((int)sizeof(((struct update *)0)->u_bufs) == 0, "found %lld\n",
- (long long)(int)sizeof(((struct update *)0)->u_bufs));
}
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index d48a5c29c417..0078b6a92f0b 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,7 +27,11 @@ source "drivers/staging/media/davinci_vpfe/Kconfig"
source "drivers/staging/media/mn88472/Kconfig"
-source "drivers/staging/media/mn88473/Kconfig"
+source "drivers/staging/media/mx2/Kconfig"
+
+source "drivers/staging/media/mx3/Kconfig"
+
+source "drivers/staging/media/omap1/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index fb94f045c40f..91495882a36c 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -2,7 +2,9 @@ obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
+obj-$(CONFIG_VIDEO_MX2) += mx2/
+obj-$(CONFIG_VIDEO_MX3) += mx3/
+obj-$(CONFIG_VIDEO_OMAP1) += omap1/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_DVB_MN88472) += mn88472/
-obj-$(CONFIG_DVB_MN88473) += mn88473/
obj-$(CONFIG_VIDEO_TIMBERDALE) += timb/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 8fdf0ac4f287..abf330f92c0b 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -1828,17 +1828,14 @@ static int bcm2048_deinit(struct bcm2048_device *bdev)
err = bcm2048_set_audio_route(bdev, 0);
if (err < 0)
- goto exit;
+ return err;
err = bcm2048_set_dac_output(bdev, 0);
if (err < 0)
- goto exit;
+ return err;
err = bcm2048_set_power_state(bdev, BCM2048_POWER_OFF);
- if (err < 0)
- goto exit;
-exit:
return err;
}
diff --git a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
index 3cc9be776f8b..f4f35c9ad1ab 100644
--- a/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
+++ b/drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
@@ -538,8 +538,8 @@ struct vpfe_isif_raw_config {
};
/**********************************************************************
-* IPIPE API Structures
-**********************************************************************/
+ * IPIPE API Structures
+ **********************************************************************/
/* IPIPE module configurations */
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
index ac78ed2f8bcc..ff47a8f369fc 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
@@ -1350,21 +1350,16 @@ error:
*/
static long ipipe_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
- int ret = 0;
-
switch (cmd) {
case VIDIOC_VPFE_IPIPE_S_CONFIG:
- ret = ipipe_s_config(sd, arg);
- break;
+ return ipipe_s_config(sd, arg);
case VIDIOC_VPFE_IPIPE_G_CONFIG:
- ret = ipipe_g_config(sd, arg);
- break;
+ return ipipe_g_config(sd, arg);
default:
- ret = -ENOIOCTLCMD;
+ return -ENOIOCTLCMD;
}
- return ret;
}
void vpfe_ipipe_enable(struct vpfe_device *vpfe_dev, int en)
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
index b1d5e23ae6e0..958ef71ee4d5 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c
@@ -242,7 +242,7 @@ static int get_ipipe_mode(struct vpfe_ipipe_device *ipipe)
if (ipipeif_sink == IPIPEIF_INPUT_MEMORY)
return IPIPE_MODE_SINGLE_SHOT;
- else if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
+ if (ipipeif_sink == IPIPEIF_INPUT_ISIF)
return IPIPE_MODE_CONTINUOUS;
return -EINVAL;
@@ -682,8 +682,10 @@ ipipe_set_rgb2rgb_regs(void __iomem *base_addr, unsigned int id,
ipipe_clock_enable(base_addr);
if (id == IPIPE_RGB2RGB_2) {
- /* For second RGB module, gain integer is 3 bits instead
- of 4, offset has 11 bits insread of 13 */
+ /*
+ * For second RGB module, gain integer is 3 bits instead
+ * of 4, offset has 11 bits insread of 13
+ */
offset = RGB2_MUL_BASE;
integ_mask = 0x7;
offset_mask = RGB2RGB_2_OFST_MASK;
@@ -792,8 +794,10 @@ ipipe_set_3d_lut_regs(void __iomem *base_addr, void __iomem *isp5_base_addr,
/* valied table */
tbl = lut_3d->table;
for (i = 0; i < VPFE_IPIPE_MAX_SIZE_3D_LUT; i++) {
- /* Each entry has 0-9 (B), 10-19 (G) and
- 20-29 R values */
+ /*
+ * Each entry has 0-9 (B), 10-19 (G) and
+ * 20-29 R values
+ */
val = tbl[i].b & D3_LUT_ENTRY_MASK;
val |= (tbl[i].g & D3_LUT_ENTRY_MASK) <<
D3_LUT_ENTRY_G_SHIFT;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
index 2bf2f7a69173..7ee157233047 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.h
@@ -278,9 +278,10 @@
/* Resizer Rescale Parameters */
#define RSZ_EN_A 0x58
#define RSZ_EN_B 0xe8
-/* offset of the registers to be added with base register of
- either RSZ0 or RSZ1
-*/
+/*
+ * offset of the registers to be added with base register of
+ * either RSZ0 or RSZ1
+ */
#define RSZ_MODE 0x4
#define RSZ_420 0x8
#define RSZ_I_VPS 0xc
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
index 633d6456fdce..46fd2c7f69c3 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
@@ -94,7 +94,7 @@ static int get_oneshot_mode(enum ipipeif_input_entity input)
{
if (input == IPIPEIF_INPUT_MEMORY)
return IPIPEIF_MODE_ONE_SHOT;
- else if (input == IPIPEIF_INPUT_ISIF)
+ if (input == IPIPEIF_INPUT_ISIF)
return IPIPEIF_MODE_CONTINUOUS;
return -EINVAL;
@@ -641,8 +641,9 @@ ipipeif_try_format(struct vpfe_ipipeif_device *ipipeif,
}
static int
-ipipeif_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
+ipipeif_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
{
struct vpfe_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
diff --git a/drivers/staging/media/davinci_vpfe/dm365_isif.c b/drivers/staging/media/davinci_vpfe/dm365_isif.c
index 99057892d88d..ae9202ded59f 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_isif.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_isif.c
@@ -282,7 +282,8 @@ isif_config_format(struct vpfe_device *vpfe_dev, unsigned int pad)
* @fmt: pointer to v4l2 subdev format structure
*/
static void
-isif_try_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
+isif_try_format(struct vpfe_isif_device *isif,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
unsigned int width = fmt->format.width;
@@ -625,21 +626,16 @@ static int isif_set_params(struct v4l2_subdev *sd, void *params)
*/
static long isif_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
- int ret;
-
switch (cmd) {
case VIDIOC_VPFE_ISIF_S_RAW_PARAMS:
- ret = isif_set_params(sd, arg);
- break;
+ return isif_set_params(sd, arg);
case VIDIOC_VPFE_ISIF_G_RAW_PARAMS:
- ret = isif_get_params(sd, arg);
- break;
+ return isif_get_params(sd, arg);
default:
- ret = -ENOIOCTLCMD;
+ return -ENOIOCTLCMD;
}
- return ret;
}
static void isif_config_gain_offset(struct vpfe_isif_device *isif)
@@ -1239,7 +1235,8 @@ static int isif_config_ycbcr(struct v4l2_subdev *sd, int mode)
* a lot of registers that we didn't touch
*/
/* start with all bits zero */
- ccdcfg = modeset = 0;
+ ccdcfg = 0;
+ modeset = 0;
pix_fmt = isif_get_pix_fmt(format->code);
if (pix_fmt < 0) {
pr_debug("Invalid pix_fmt(input mode)\n");
@@ -1398,8 +1395,9 @@ static int isif_set_stream(struct v4l2_subdev *sd, int enable)
* @which: wanted subdev format.
*/
static struct v4l2_mbus_framefmt *
-__isif_get_format(struct vpfe_isif_device *isif, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, enum v4l2_subdev_format_whence which)
+__isif_get_format(struct vpfe_isif_device *isif,
+ struct v4l2_subdev_pad_config *cfg, unsigned int pad,
+ enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY) {
struct v4l2_subdev_format fmt;
@@ -1570,7 +1568,7 @@ isif_pad_set_selection(struct v4l2_subdev *sd,
sel->r.height = format->height;
}
/* adjust the width to 16 pixel boundary */
- sel->r.width = ((sel->r.width + 15) & ~0xf);
+ sel->r.width = (sel->r.width + 15) & ~0xf;
vpfe_isif->crop = sel->r;
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
isif_set_image_window(vpfe_isif);
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index a91395ce91e1..3cd56cc132c7 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -404,7 +404,7 @@ resizer_calculate_down_scale_f_div_param(struct device *dev,
param->f_div.pass[0].src_hsz = upper_h1 + o;
param->f_div.pass[1].o_hsz = h2 - 1;
param->f_div.pass[1].i_hps = 10 + (val1 * two_power);
- param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].h_phs = val - (val1 << 8);
param->f_div.pass[1].src_hps = upper_h1 - o;
param->f_div.pass[1].src_hsz = upper_h2 + o;
@@ -425,8 +425,8 @@ resizer_configure_common_in_params(struct vpfe_resizer_device *resizer)
param->rsz_common.hps = param->user_config.hst;
if (vpfe_ipipeif_decimation_enabled(vpfe_dev))
- param->rsz_common.hsz = (((informat->width - 1) *
- IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev));
+ param->rsz_common.hsz = ((informat->width - 1) *
+ IPIPEIF_RSZ_CONST) / vpfe_ipipeif_get_rsz(vpfe_dev);
else
param->rsz_common.hsz = informat->width - 1;
@@ -650,7 +650,7 @@ resizer_calculate_normal_f_div_param(struct device *dev, int input_width,
param->f_div.pass[0].src_hsz = (input_width >> 2) + o;
param->f_div.pass[1].o_hsz = h2 - 1;
param->f_div.pass[1].i_hps = val1;
- param->f_div.pass[1].h_phs = (val - (val1 << 8));
+ param->f_div.pass[1].h_phs = val - (val1 << 8);
param->f_div.pass[1].src_hps = (input_width >> 2) - o;
param->f_div.pass[1].src_hsz = (input_width >> 2) + o;
@@ -1387,8 +1387,9 @@ resizer_try_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int resizer_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct vpfe_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
@@ -1447,8 +1448,9 @@ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_con
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
-static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int resizer_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -1670,7 +1672,7 @@ static int resizer_link_setup(struct media_entity *entity,
resizer->crop_resizer.input =
RESIZER_CROP_INPUT_IPIPEIF;
else if (ipipe_source == IPIPE_OUTPUT_RESIZER)
- resizer->crop_resizer.input =
+ resizer->crop_resizer.input =
RESIZER_CROP_INPUT_IPIPE;
else
return -EINVAL;
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index ec46f366dd17..bf077f8342f6 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -442,8 +442,10 @@ static int vpfe_register_entities(struct vpfe_device *vpfe_dev)
/* create links now, starting with external(i2c) entities */
for (i = 0; i < vpfe_dev->num_ext_subdevs; i++)
- /* if entity has no pads (ex: amplifier),
- cant establish link */
+ /*
+ * if entity has no pads (ex: amplifier),
+ * cant establish link
+ */
if (vpfe_dev->sd[i]->entity.num_pads) {
ret = media_create_pad_link(&vpfe_dev->sd[i]->entity,
0, &vpfe_dev->vpfe_isif.subdev.entity,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index db49af90217e..b793c04028a3 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,21 +172,19 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
static int vpfe_update_pipe_state(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
- int ret;
- ret = vpfe_prepare_pipeline(video);
- if (ret)
- return ret;
+ if (vpfe_prepare_pipeline(video))
+ return vpfe_prepare_pipeline(video);
- /* Find out if there is any input video
- if yes, it is single shot.
- */
+ /*
+ * Find out if there is any input video
+ * if yes, it is single shot.
+ */
if (pipe->input_num == 0) {
pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- ret = vpfe_update_current_ext_subdev(video);
- if (ret) {
+ if (vpfe_update_current_ext_subdev(video)) {
pr_err("Invalid external subdev\n");
- return ret;
+ return vpfe_update_current_ext_subdev(video);
}
} else {
pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -460,7 +458,7 @@ void vpfe_video_schedule_next_buffer(struct vpfe_video_device *video)
video->next_frm = list_entry(video->dma_queue.next,
struct vpfe_cap_buffer, list);
- if (VPFE_PIPELINE_STREAM_SINGLESHOT == video->pipe.state)
+ if (video->pipe.state == VPFE_PIPELINE_STREAM_SINGLESHOT)
video->cur_frm = video->next_frm;
list_del(&video->next_frm->list);
@@ -529,10 +527,11 @@ static int vpfe_release(struct file *file)
if (fh->io_allowed) {
if (video->started) {
vpfe_stop_capture(video);
- /* mark pipe state as stopped in vpfe_release(),
- as app might call streamon() after streamoff()
- in which case driver has to start streaming.
- */
+ /*
+ * mark pipe state as stopped in vpfe_release(),
+ * as app might call streamon() after streamoff()
+ * in which case driver has to start streaming.
+ */
video->pipe.state = VPFE_PIPELINE_STREAM_STOPPED;
vb2_streamoff(&video->buffer_queue,
video->buffer_queue.type);
@@ -668,12 +667,13 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
struct v4l2_subdev *subdev;
struct v4l2_format format;
struct media_pad *remote;
- int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
- /* since already subdev pad format is set,
- only one pixel format is available */
+ /*
+ * since already subdev pad format is set,
+ * only one pixel format is available
+ */
if (fmt->index > 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid index\n");
return -EINVAL;
@@ -695,11 +695,10 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
sd_fmt.pad = remote->index;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
/* get output format of remote subdev */
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
- if (ret) {
+ if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
- return ret;
+ return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
}
/* convert to pix format */
mbus.code = sd_fmt.format.code;
@@ -726,7 +725,6 @@ static int vpfe_s_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
- int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
/* If streaming is started, return error */
@@ -735,9 +733,8 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
/* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
+ if (__vpfe_video_get_format(video, &format))
+ return __vpfe_video_get_format(video, &format);
*fmt = format;
video->fmt = *fmt;
return 0;
@@ -760,13 +757,11 @@ static int vpfe_try_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
- int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
/* get adjacent subdev's output pad format */
- ret = __vpfe_video_get_format(video, &format);
- if (ret)
- return ret;
+ if (__vpfe_video_get_format(video, &format))
+ return __vpfe_video_get_format(video, &format);
*fmt = format;
return 0;
@@ -843,9 +838,8 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&video->lock))
+ return mutex_lock_interruptible(&video->lock);
/*
* If streaming is started return device busy
* error
@@ -946,9 +940,8 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
/* Call decoder driver function to set the standard */
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&video->lock))
+ return mutex_lock_interruptible(&video->lock);
sdinfo = video->current_ext_subdev;
/* If streaming is started, return device busy error */
if (video->started) {
@@ -1328,15 +1321,14 @@ static int vpfe_reqbufs(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != req_buf->type) {
+ if (req_buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ req_buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT){
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
return -EINVAL;
}
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&video->lock))
+ return mutex_lock_interruptible(&video->lock);
if (video->io_usrs != 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1362,11 +1354,10 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- ret = vb2_queue_init(q);
- if (ret) {
+ if (vb2_queue_init(q)) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return ret;
+ return vb2_queue_init(q);
}
fh->io_allowed = 1;
@@ -1390,8 +1381,8 @@ static int vpfe_querybuf(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
return -EINVAL;
}
@@ -1417,8 +1408,8 @@ static int vpfe_qbuf(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != p->type) {
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ p->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
return -EINVAL;
}
@@ -1445,8 +1436,8 @@ static int vpfe_dqbuf(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf->type) {
+ if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
return -EINVAL;
}
@@ -1478,8 +1469,8 @@ static int vpfe_streamon(struct file *file, void *priv,
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type &&
- V4L2_BUF_TYPE_VIDEO_OUTPUT != buf_type) {
+ if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ buf_type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
return ret;
}
@@ -1495,7 +1486,7 @@ static int vpfe_streamon(struct file *file, void *priv,
return -EIO;
}
/* Validate the pipeline */
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE == buf_type) {
+ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
ret = vpfe_video_validate_pipeline(pipe);
if (ret < 0)
return ret;
@@ -1542,9 +1533,8 @@ static int vpfe_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- ret = mutex_lock_interruptible(&video->lock);
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&video->lock))
+ return mutex_lock_interruptible(&video->lock);
vpfe_stop_capture(video);
ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index ce3b5f230e2e..3551aed589c0 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1680,9 +1680,7 @@ module_init(zilog_init);
module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
-MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
- "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
- "Andy Walls");
+MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
diff --git a/drivers/staging/media/mn88472/mn88472.c b/drivers/staging/media/mn88472/mn88472.c
index cf2e96bcf395..7ea749cf19f9 100644
--- a/drivers/staging/media/mn88472/mn88472.c
+++ b/drivers/staging/media/mn88472/mn88472.c
@@ -96,9 +96,9 @@ static int mn88472_set_frontend(struct dvb_frontend *fe)
/* Calculate IF registers ( (1<<24)*IF / Xtal ) */
tmp = div_u64(if_frequency * (u64)(1<<24) + (dev->xtal / 2),
dev->xtal);
- if_val[0] = ((tmp >> 16) & 0xff);
- if_val[1] = ((tmp >> 8) & 0xff);
- if_val[2] = ((tmp >> 0) & 0xff);
+ if_val[0] = (tmp >> 16) & 0xff;
+ if_val[1] = (tmp >> 8) & 0xff;
+ if_val[2] = (tmp >> 0) & 0xff;
ret = regmap_write(dev->regmap[2], 0xfb, 0x13);
ret = regmap_write(dev->regmap[2], 0xef, 0x13);
@@ -456,7 +456,7 @@ static int mn88472_probe(struct i2c_client *client,
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (dev == NULL) {
+ if (!dev) {
ret = -ENOMEM;
goto err;
}
@@ -483,7 +483,7 @@ static int mn88472_probe(struct i2c_client *client,
* 0x1a and 0x1c, in order to get own I2C client for each register page.
*/
dev->client[1] = i2c_new_dummy(client->adapter, 0x1a);
- if (dev->client[1] == NULL) {
+ if (!dev->client[1]) {
ret = -ENODEV;
dev_err(&client->dev, "I2C registration failed\n");
if (ret)
@@ -497,7 +497,7 @@ static int mn88472_probe(struct i2c_client *client,
i2c_set_clientdata(dev->client[1], dev);
dev->client[2] = i2c_new_dummy(client->adapter, 0x1c);
- if (dev->client[2] == NULL) {
+ if (!dev->client[2]) {
ret = -ENODEV;
dev_err(&client->dev, "2nd I2C registration failed\n");
if (ret)
diff --git a/drivers/staging/media/mn88473/Kconfig b/drivers/staging/media/mn88473/Kconfig
deleted file mode 100644
index 6c9ebf51c2c7..000000000000
--- a/drivers/staging/media/mn88473/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config DVB_MN88473
- tristate "Panasonic MN88473"
- depends on DVB_CORE && I2C
- select REGMAP_I2C
- default m if !MEDIA_SUBDRV_AUTOSELECT
- help
- Say Y when you want to support this frontend.
diff --git a/drivers/staging/media/mn88473/Makefile b/drivers/staging/media/mn88473/Makefile
deleted file mode 100644
index fac55410ce55..000000000000
--- a/drivers/staging/media/mn88473/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-obj-$(CONFIG_DVB_MN88473) += mn88473.o
-
-ccflags-y += -Idrivers/media/dvb-core/
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
diff --git a/drivers/staging/media/mn88473/TODO b/drivers/staging/media/mn88473/TODO
deleted file mode 100644
index b90a14be3beb..000000000000
--- a/drivers/staging/media/mn88473/TODO
+++ /dev/null
@@ -1,21 +0,0 @@
-Driver general quality is not good enough for mainline. Also, other
-device drivers (USB-bridge, tuner) needed for Astrometa receiver in
-question could need some changes. However, if that driver is mainlined
-due to some other device than Astrometa, unrelated TODOs could be
-skipped. In that case rtl28xxu driver needs module parameter to prevent
-driver loading.
-
-Required TODOs:
-* missing lock flags
-* I2C errors
-* tuner sensitivity
-
-*Do not* send any patch fixing checkpatch.pl issues. Currently it passes
-checkpatch.pl tests. I don't want waste my time to review this kind of
-trivial stuff. *Do not* add missing register I/O error checks. Those are
-missing for the reason it is much easier to compare I2C data sniffs when
-there is less lines. Those error checks are about the last thing to be added.
-
-Patches should be submitted to:
-linux-media@vger.kernel.org and Antti Palosaari <crope@iki.fi>
-
diff --git a/drivers/staging/media/mx2/Kconfig b/drivers/staging/media/mx2/Kconfig
new file mode 100644
index 000000000000..beaa885cf104
--- /dev/null
+++ b/drivers/staging/media/mx2/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_MX2
+ tristate "i.MX27 Camera Sensor Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA
+ depends on SOC_IMX27 || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for the i.MX27 Camera Sensor Interface
+
+ This driver is deprecated: it should become a stand-alone driver
+ instead of using the soc-camera framework.
+
+ Unless someone is willing to take this on (unlikely with such
+ ancient hardware) it is going to be removed from the kernel
+ soon.
diff --git a/drivers/staging/media/mx2/Makefile b/drivers/staging/media/mx2/Makefile
new file mode 100644
index 000000000000..fc5b2826a558
--- /dev/null
+++ b/drivers/staging/media/mx2/Makefile
@@ -0,0 +1,3 @@
+# Makefile for i.MX27 Camera Sensor driver
+
+obj-$(CONFIG_VIDEO_MX2) += mx2_camera.o
diff --git a/drivers/staging/media/mx2/TODO b/drivers/staging/media/mx2/TODO
new file mode 100644
index 000000000000..bc68fa443a3e
--- /dev/null
+++ b/drivers/staging/media/mx2/TODO
@@ -0,0 +1,10 @@
+This driver is deprecated: it should become a stand-alone driver instead of
+using the soc-camera framework.
+
+Unless someone is willing to take this on (unlikely with such ancient
+hardware) it is going to be removed from the kernel soon.
+
+Note that trivial patches will not be accepted anymore, only a full conversion.
+
+If you want to convert this driver, please contact the linux-media mailinglist
+(see http://linuxtv.org/lists.php).
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/staging/media/mx2/mx2_camera.c
index 48dd5b7851b5..48dd5b7851b5 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/staging/media/mx2/mx2_camera.c
diff --git a/drivers/staging/media/mx3/Kconfig b/drivers/staging/media/mx3/Kconfig
new file mode 100644
index 000000000000..595d5fe7cad1
--- /dev/null
+++ b/drivers/staging/media/mx3/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_MX3
+ tristate "i.MX3x Camera Sensor Interface driver"
+ depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
+ depends on MX3_IPU || COMPILE_TEST
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a v4l2 driver for the i.MX3x Camera Sensor Interface
+
+ This driver is deprecated: it should become a stand-alone driver
+ instead of using the soc-camera framework.
+
+ Unless someone is willing to take this on (unlikely with such
+ ancient hardware) it is going to be removed from the kernel
+ soon.
diff --git a/drivers/staging/media/mx3/Makefile b/drivers/staging/media/mx3/Makefile
new file mode 100644
index 000000000000..6d91dcd80c1d
--- /dev/null
+++ b/drivers/staging/media/mx3/Makefile
@@ -0,0 +1,3 @@
+# Makefile for i.MX3x Camera Sensor driver
+
+obj-$(CONFIG_VIDEO_MX3) += mx3_camera.o
diff --git a/drivers/staging/media/mx3/TODO b/drivers/staging/media/mx3/TODO
new file mode 100644
index 000000000000..bc68fa443a3e
--- /dev/null
+++ b/drivers/staging/media/mx3/TODO
@@ -0,0 +1,10 @@
+This driver is deprecated: it should become a stand-alone driver instead of
+using the soc-camera framework.
+
+Unless someone is willing to take this on (unlikely with such ancient
+hardware) it is going to be removed from the kernel soon.
+
+Note that trivial patches will not be accepted anymore, only a full conversion.
+
+If you want to convert this driver, please contact the linux-media mailinglist
+(see http://linuxtv.org/lists.php).
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/staging/media/mx3/mx3_camera.c
index 169ed1150226..aa39e9569b1a 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/staging/media/mx3/mx3_camera.c
@@ -146,8 +146,8 @@ static void mx3_cam_dma_done(void *arg)
struct idmac_channel *ichannel = to_idmac_chan(chan);
struct mx3_camera_dev *mx3_cam = ichannel->client;
- dev_dbg(chan->device->dev, "callback cookie %d, active DMA 0x%08x\n",
- desc->txd.cookie, mx3_cam->active ? sg_dma_address(&mx3_cam->active->sg) : 0);
+ dev_dbg(chan->device->dev, "callback cookie %d, active DMA %pad\n",
+ desc->txd.cookie, mx3_cam->active ? &sg_dma_address(&mx3_cam->active->sg) : NULL);
spin_lock(&mx3_cam->lock);
if (mx3_cam->active) {
@@ -314,8 +314,8 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
spin_unlock_irq(&mx3_cam->lock);
cookie = txd->tx_submit(txd);
- dev_dbg(icd->parent, "Submitted cookie %d DMA 0x%08x\n",
- cookie, sg_dma_address(&buf->sg));
+ dev_dbg(icd->parent, "Submitted cookie %d DMA %pad\n",
+ cookie, &sg_dma_address(&buf->sg));
if (cookie >= 0)
return;
@@ -344,8 +344,8 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
unsigned long flags;
dev_dbg(icd->parent,
- "Release%s DMA 0x%08x, queue %sempty\n",
- mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
+ "Release%s DMA %pad, queue %sempty\n",
+ mx3_cam->active == buf ? " active" : "", &sg_dma_address(&buf->sg),
list_empty(&buf->queue) ? "" : "not ");
spin_lock_irqsave(&mx3_cam->lock, flags);
diff --git a/drivers/staging/media/omap1/Kconfig b/drivers/staging/media/omap1/Kconfig
new file mode 100644
index 000000000000..6cfab3a04ae1
--- /dev/null
+++ b/drivers/staging/media/omap1/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_OMAP1
+ tristate "OMAP1 Camera Interface driver"
+ depends on VIDEO_DEV && SOC_CAMERA
+ depends on ARCH_OMAP1
+ depends on HAS_DMA
+ select VIDEOBUF_DMA_CONTIG
+ select VIDEOBUF_DMA_SG
+ ---help---
+ This is a v4l2 driver for the TI OMAP1 camera interface
+
+ This driver is deprecated and will be removed soon unless someone
+ will start the work to convert this driver to the vb2 framework
+ and remove the soc-camera dependency.
diff --git a/drivers/staging/media/omap1/Makefile b/drivers/staging/media/omap1/Makefile
new file mode 100644
index 000000000000..2885622600f2
--- /dev/null
+++ b/drivers/staging/media/omap1/Makefile
@@ -0,0 +1,3 @@
+# Makefile for OMAP1 driver
+
+obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
diff --git a/drivers/staging/media/omap1/TODO b/drivers/staging/media/omap1/TODO
new file mode 100644
index 000000000000..1025f9f60ff0
--- /dev/null
+++ b/drivers/staging/media/omap1/TODO
@@ -0,0 +1,8 @@
+This driver is deprecated and will be removed soon unless someone will start
+the work to convert this driver to the vb2 framework and remove the
+soc-camera dependency.
+
+Note that trivial patches will not be accepted anymore, only a full conversion.
+
+If you want to convert this driver, please contact the linux-media mailinglist
+(see http://linuxtv.org/lists.php).
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/staging/media/omap1/omap1_camera.c
index bd721e35474a..bd721e35474a 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/staging/media/omap1/omap1_camera.c
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index 30b473cfb020..c5a5138b3d3b 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -363,215 +363,6 @@ static irqreturn_t iss_isr(int irq, void *_iss)
}
/* -----------------------------------------------------------------------------
- * Pipeline power management
- *
- * Entities must be powered up when part of a pipeline that contains at least
- * one open video device node.
- *
- * To achieve this use the entity use_count field to track the number of users.
- * For entities corresponding to video device nodes the use_count field stores
- * the users count of the node. For entities corresponding to subdevs the
- * use_count field stores the total number of users of all video device nodes
- * in the pipeline.
- *
- * The omap4iss_pipeline_pm_use() function must be called in the open() and
- * close() handlers of video device nodes. It increments or decrements the use
- * count of all subdev entities in the pipeline.
- *
- * To react to link management on powered pipelines, the link setup notification
- * callback updates the use count of all entities in the source and sink sides
- * of the link.
- */
-
-/*
- * iss_pipeline_pm_use_count - Count the number of users of a pipeline
- * @entity: The entity
- *
- * Return the total number of users of all video device nodes in the pipeline.
- */
-static int iss_pipeline_pm_use_count(struct media_entity *entity,
- struct media_entity_graph *graph)
-{
- int use = 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while ((entity = media_entity_graph_walk_next(graph))) {
- if (is_media_entity_v4l2_io(entity))
- use += entity->use_count;
- }
-
- return use;
-}
-
-/*
- * iss_pipeline_pm_power_one - Apply power change to an entity
- * @entity: The entity
- * @change: Use count change
- *
- * Change the entity use count by @change. If the entity is a subdev update its
- * power state by calling the core::s_power operation when the use count goes
- * from 0 to != 0 or from != 0 to 0.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int iss_pipeline_pm_power_one(struct media_entity *entity, int change)
-{
- struct v4l2_subdev *subdev;
-
- subdev = is_media_entity_v4l2_subdev(entity)
- ? media_entity_to_v4l2_subdev(entity) : NULL;
-
- if (entity->use_count == 0 && change > 0 && subdev) {
- int ret;
-
- ret = v4l2_subdev_call(subdev, core, s_power, 1);
- if (ret < 0 && ret != -ENOIOCTLCMD)
- return ret;
- }
-
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- if (entity->use_count == 0 && change < 0 && subdev)
- v4l2_subdev_call(subdev, core, s_power, 0);
-
- return 0;
-}
-
-/*
- * iss_pipeline_pm_power - Apply power change to all entities in a pipeline
- * @entity: The entity
- * @change: Use count change
- *
- * Walk the pipeline to update the use count and the power state of all non-node
- * entities.
- *
- * Return 0 on success or a negative error code on failure.
- */
-static int iss_pipeline_pm_power(struct media_entity *entity, int change,
- struct media_entity_graph *graph)
-{
- struct media_entity *first = entity;
- int ret = 0;
-
- if (!change)
- return 0;
-
- media_entity_graph_walk_start(graph, entity);
-
- while (!ret && (entity = media_entity_graph_walk_next(graph)))
- if (is_media_entity_v4l2_subdev(entity))
- ret = iss_pipeline_pm_power_one(entity, change);
-
- if (!ret)
- return 0;
-
- media_entity_graph_walk_start(graph, first);
-
- while ((first = media_entity_graph_walk_next(graph)) &&
- first != entity)
- if (is_media_entity_v4l2_subdev(first))
- iss_pipeline_pm_power_one(first, -change);
-
- return ret;
-}
-
-/*
- * omap4iss_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
- *
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
- */
-int omap4iss_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph)
-{
- int change = use ? 1 : -1;
- int ret;
-
- mutex_lock(&entity->graph_obj.mdev->graph_mutex);
-
- /* Apply use count to node. */
- entity->use_count += change;
- WARN_ON(entity->use_count < 0);
-
- /* Apply power change to connected non-nodes. */
- ret = iss_pipeline_pm_power(entity, change, graph);
- if (ret < 0)
- entity->use_count -= change;
-
- mutex_unlock(&entity->graph_obj.mdev->graph_mutex);
-
- return ret;
-}
-
-/*
- * iss_pipeline_link_notify - Link management notification callback
- * @link: The link
- * @flags: New link flags that will be applied
- *
- * React to link management on powered pipelines by updating the use count of
- * all entities in the source and sink sides of the link. Entities are powered
- * on or off accordingly.
- *
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. This function will not fail for disconnection
- * events.
- */
-static int iss_pipeline_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct media_entity_graph *graph =
- &container_of(link->graph_obj.mdev, struct iss_device,
- media_dev)->pm_count_graph;
- struct media_entity *source = link->source->entity;
- struct media_entity *sink = link->sink->entity;
- int source_use;
- int sink_use;
- int ret;
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
- ret = media_entity_graph_walk_init(graph,
- link->graph_obj.mdev);
- if (ret)
- return ret;
- }
-
- source_use = iss_pipeline_pm_use_count(source, graph);
- sink_use = iss_pipeline_pm_use_count(sink, graph);
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
- !(flags & MEDIA_LNK_FL_ENABLED)) {
- /* Powering off entities is assumed to never fail. */
- iss_pipeline_pm_power(source, -sink_use, graph);
- iss_pipeline_pm_power(sink, -source_use, graph);
- return 0;
- }
-
- if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
- (flags & MEDIA_LNK_FL_ENABLED)) {
- ret = iss_pipeline_pm_power(source, sink_use, graph);
- if (ret < 0)
- return ret;
-
- ret = iss_pipeline_pm_power(sink, source_use, graph);
- if (ret < 0)
- iss_pipeline_pm_power(source, -sink_use, graph);
- }
-
- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH)
- media_entity_graph_walk_cleanup(graph);
-
- return ret;
-}
-
-/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
@@ -1197,7 +988,7 @@ static int iss_register_entities(struct iss_device *iss)
strlcpy(iss->media_dev.model, "TI OMAP4 ISS",
sizeof(iss->media_dev.model));
iss->media_dev.hw_revision = iss->revision;
- iss->media_dev.link_notify = iss_pipeline_link_notify;
+ iss->media_dev.link_notify = v4l2_pipeline_link_notify;
ret = media_device_register(&iss->media_dev);
if (ret < 0) {
dev_err(iss->dev, "Media device registration failed (%d)\n",
@@ -1527,8 +1318,6 @@ error_modules:
error_iss:
omap4iss_put(iss);
error:
- platform_set_drvdata(pdev, NULL);
-
mutex_destroy(&iss->iss_mutex);
return ret;
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
index 05f08a3caa19..760ee27da704 100644
--- a/drivers/staging/media/omap4iss/iss.h
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -15,6 +15,8 @@
#define _OMAP4_ISS_H_
#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
+
#include <linux/device.h>
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -87,7 +89,6 @@ struct iss_reg {
struct iss_device {
struct v4l2_device v4l2_dev;
struct media_device media_dev;
- struct media_entity_graph pm_count_graph;
struct device *dev;
u32 revision;
@@ -152,9 +153,6 @@ void omap4iss_isp_subclk_enable(struct iss_device *iss,
void omap4iss_isp_subclk_disable(struct iss_device *iss,
enum iss_isp_subclk_resource res);
-int omap4iss_pipeline_pm_use(struct media_entity *entity, int use,
- struct media_entity_graph *graph);
-
int omap4iss_register_entities(struct platform_device *pdev,
struct v4l2_device *v4l2_dev);
void omap4iss_unregister_entities(struct platform_device *pdev);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 058233a9de67..f54349bce4de 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -19,8 +19,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
+
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
#include "iss_video.h"
#include "iss.h"
@@ -1009,13 +1011,7 @@ static int iss_video_open(struct file *file)
goto done;
}
- ret = media_entity_graph_walk_init(&handle->graph,
- &video->iss->media_dev);
- if (ret)
- goto done;
-
- ret = omap4iss_pipeline_pm_use(&video->video.entity, 1,
- &handle->graph);
+ ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
if (ret < 0) {
omap4iss_put(video->iss);
goto done;
@@ -1054,7 +1050,6 @@ static int iss_video_open(struct file *file)
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
- media_entity_graph_walk_cleanup(&handle->graph);
kfree(handle);
}
@@ -1070,13 +1065,11 @@ static int iss_video_release(struct file *file)
/* Disable streaming and free the buffers queue resources. */
iss_video_streamoff(file, vfh, video->type);
- omap4iss_pipeline_pm_use(&video->video.entity, 0, &handle->graph);
+ v4l2_pipeline_pm_use(&video->video.entity, 0);
/* Release the videobuf2 queue */
vb2_queue_release(&handle->queue);
- /* Release the file handle. */
- media_entity_graph_walk_cleanup(&handle->graph);
v4l2_fh_del(vfh);
kfree(handle);
file->private_data = NULL;
diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h
index 34588b7176ca..c8bd2958a3f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.h
+++ b/drivers/staging/media/omap4iss/iss_video.h
@@ -183,7 +183,6 @@ struct iss_video_fh {
struct vb2_queue queue;
struct v4l2_format format;
struct v4l2_fract timeperframe;
- struct media_entity_graph graph;
};
#define to_iss_video_fh(fh) container_of(fh, struct iss_video_fh, vfh)
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index dc3fb25b52aa..de4f76abfb47 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -32,6 +32,7 @@ static struct most_aim cdev_aim;
struct aim_channel {
wait_queue_head_t wq;
+ spinlock_t unlink; /* synchronization lock to unlink channels */
struct cdev cdev;
struct device *dev;
struct mutex io_mutex;
@@ -39,11 +40,9 @@ struct aim_channel {
struct most_channel_config *cfg;
unsigned int channel_id;
dev_t devno;
- bool keep_mbo;
- unsigned int mbo_offs;
- struct mbo *stacked_mbo;
+ size_t mbo_offs;
DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
- atomic_t access_ref;
+ int access_ref;
struct list_head list;
};
@@ -51,15 +50,26 @@ struct aim_channel {
static struct list_head channel_list;
static spinlock_t ch_list_lock;
+static inline bool ch_has_mbo(struct aim_channel *c)
+{
+ return channel_has_mbo(c->iface, c->channel_id, &cdev_aim) > 0;
+}
+
+static inline bool ch_get_mbo(struct aim_channel *c, struct mbo **mbo)
+{
+ *mbo = most_get_mbo(c->iface, c->channel_id, &cdev_aim);
+ return *mbo;
+}
+
static struct aim_channel *get_channel(struct most_interface *iface, int id)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
unsigned long flags;
int found_channel = 0;
spin_lock_irqsave(&ch_list_lock, flags);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- if ((channel->iface == iface) && (channel->channel_id == id)) {
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ if ((c->iface == iface) && (c->channel_id == id)) {
found_channel = 1;
break;
}
@@ -67,7 +77,29 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id)
spin_unlock_irqrestore(&ch_list_lock, flags);
if (!found_channel)
return NULL;
- return channel;
+ return c;
+}
+
+static void stop_channel(struct aim_channel *c)
+{
+ struct mbo *mbo;
+
+ while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
+ most_put_mbo(mbo);
+ most_stop_channel(c->iface, c->channel_id, &cdev_aim);
+}
+
+static void destroy_cdev(struct aim_channel *c)
+{
+ unsigned long flags;
+
+ device_destroy(aim_class, c->devno);
+ cdev_del(&c->cdev);
+ kfifo_free(&c->fifo);
+ spin_lock_irqsave(&ch_list_lock, flags);
+ list_del(&c->list);
+ spin_unlock_irqrestore(&ch_list_lock, flags);
+ ida_simple_remove(&minor_id, MINOR(c->devno));
}
/**
@@ -80,29 +112,38 @@ static struct aim_channel *get_channel(struct most_interface *iface, int id)
*/
static int aim_open(struct inode *inode, struct file *filp)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
int ret;
- channel = to_channel(inode->i_cdev);
- filp->private_data = channel;
+ c = to_channel(inode->i_cdev);
+ filp->private_data = c;
- if (((channel->cfg->direction == MOST_CH_RX) &&
+ if (((c->cfg->direction == MOST_CH_RX) &&
((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
- ((channel->cfg->direction == MOST_CH_TX) &&
+ ((c->cfg->direction == MOST_CH_TX) &&
((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
pr_info("WARN: Access flags mismatch\n");
return -EACCES;
}
- if (!atomic_inc_and_test(&channel->access_ref)) {
+
+ mutex_lock(&c->io_mutex);
+ if (!c->dev) {
+ pr_info("WARN: Device is destroyed\n");
+ mutex_unlock(&c->io_mutex);
+ return -EBUSY;
+ }
+
+ if (c->access_ref) {
pr_info("WARN: Device is busy\n");
- atomic_dec(&channel->access_ref);
+ mutex_unlock(&c->io_mutex);
return -EBUSY;
}
- ret = most_start_channel(channel->iface, channel->channel_id,
- &cdev_aim);
- if (ret)
- atomic_dec(&channel->access_ref);
+ c->mbo_offs = 0;
+ ret = most_start_channel(c->iface, c->channel_id, &cdev_aim);
+ if (!ret)
+ c->access_ref = 1;
+ mutex_unlock(&c->io_mutex);
return ret;
}
@@ -115,33 +156,21 @@ static int aim_open(struct inode *inode, struct file *filp)
*/
static int aim_close(struct inode *inode, struct file *filp)
{
- int ret;
- struct mbo *mbo;
- struct aim_channel *channel = to_channel(inode->i_cdev);
-
- mutex_lock(&channel->io_mutex);
- if (!channel->dev) {
- mutex_unlock(&channel->io_mutex);
- atomic_dec(&channel->access_ref);
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- wake_up_interruptible(&channel->wq);
- kfree(channel);
- return 0;
+ struct aim_channel *c = to_channel(inode->i_cdev);
+
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->access_ref = 0;
+ spin_unlock(&c->unlink);
+ if (c->dev) {
+ stop_channel(c);
+ mutex_unlock(&c->io_mutex);
+ } else {
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
}
- mutex_unlock(&channel->io_mutex);
-
- while (kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
- most_put_mbo(mbo);
- if (channel->keep_mbo)
- most_put_mbo(channel->stacked_mbo);
- ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
- atomic_dec(&channel->access_ref);
- wake_up_interruptible(&channel->wq);
- return ret;
+ return 0;
}
/**
@@ -154,62 +183,48 @@ static int aim_close(struct inode *inode, struct file *filp)
static ssize_t aim_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
- int ret, err;
- size_t actual_len = 0;
- size_t max_len = 0;
- ssize_t retval;
- struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
-
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- return -EPIPE;
- }
- mutex_unlock(&channel->io_mutex);
+ int ret;
+ size_t actual_len;
+ size_t max_len;
+ struct mbo *mbo = NULL;
+ struct aim_channel *c = filp->private_data;
- mbo = most_get_mbo(channel->iface, channel->channel_id, &cdev_aim);
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !ch_get_mbo(c, &mbo)) {
+ mutex_unlock(&c->io_mutex);
- if (!mbo) {
if ((filp->f_flags & O_NONBLOCK))
return -EAGAIN;
- if (wait_event_interruptible(
- channel->wq,
- (mbo = most_get_mbo(channel->iface,
- channel->channel_id,
- &cdev_aim)) ||
- (!channel->dev)))
+ if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
}
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
- err = -EPIPE;
- goto error;
+ if (unlikely(!c->dev)) {
+ ret = -EPIPE;
+ goto unlock;
}
- mutex_unlock(&channel->io_mutex);
- max_len = channel->cfg->buffer_size;
+ max_len = c->cfg->buffer_size;
actual_len = min(count, max_len);
mbo->buffer_length = actual_len;
- retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
- if (retval) {
- err = -EIO;
- goto error;
+ if (copy_from_user(mbo->virt_address, buf, mbo->buffer_length)) {
+ ret = -EFAULT;
+ goto put_mbo;
}
ret = most_submit_mbo(mbo);
- if (ret) {
- pr_info("submitting MBO to core failed\n");
- err = ret;
- goto error;
- }
- return actual_len - retval;
-error:
+ if (ret)
+ goto put_mbo;
+
+ mutex_unlock(&c->io_mutex);
+ return actual_len;
+put_mbo:
most_put_mbo(mbo);
- return err;
+unlock:
+ mutex_unlock(&c->io_mutex);
+ return ret;
}
/**
@@ -222,59 +237,46 @@ error:
static ssize_t
aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
{
- ssize_t retval;
- size_t not_copied, proc_len;
+ size_t to_copy, not_copied, copied;
struct mbo *mbo;
- struct aim_channel *channel = filp->private_data;
+ struct aim_channel *c = filp->private_data;
- if (channel->keep_mbo) {
- mbo = channel->stacked_mbo;
- channel->keep_mbo = false;
- goto start_copy;
- }
- while ((!kfifo_out(&channel->fifo, &mbo, 1)) && (channel->dev)) {
+ mutex_lock(&c->io_mutex);
+ while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
+ mutex_unlock(&c->io_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
- if (wait_event_interruptible(channel->wq,
- (!kfifo_is_empty(&channel->fifo) ||
- (!channel->dev))))
+ if (wait_event_interruptible(c->wq,
+ (!kfifo_is_empty(&c->fifo) ||
+ (!c->dev))))
return -ERESTARTSYS;
+ mutex_lock(&c->io_mutex);
}
-start_copy:
/* make sure we don't submit to gone devices */
- mutex_lock(&channel->io_mutex);
- if (unlikely(!channel->dev)) {
- mutex_unlock(&channel->io_mutex);
+ if (unlikely(!c->dev)) {
+ mutex_unlock(&c->io_mutex);
return -EIO;
}
- if (count < mbo->processed_length)
- channel->keep_mbo = true;
-
- proc_len = min((int)count,
- (int)(mbo->processed_length - channel->mbo_offs));
+ to_copy = min_t(size_t,
+ count,
+ mbo->processed_length - c->mbo_offs);
not_copied = copy_to_user(buf,
- mbo->virt_address + channel->mbo_offs,
- proc_len);
+ mbo->virt_address + c->mbo_offs,
+ to_copy);
- retval = not_copied ? proc_len - not_copied : proc_len;
+ copied = to_copy - not_copied;
- if (channel->keep_mbo) {
- channel->mbo_offs = retval;
- channel->stacked_mbo = mbo;
- } else {
+ c->mbo_offs += copied;
+ if (c->mbo_offs >= mbo->processed_length) {
+ kfifo_skip(&c->fifo);
most_put_mbo(mbo);
- channel->mbo_offs = 0;
+ c->mbo_offs = 0;
}
- mutex_unlock(&channel->io_mutex);
- return retval;
-}
-
-static inline bool __must_check IS_ERR_OR_FALSE(int x)
-{
- return x <= 0;
+ mutex_unlock(&c->io_mutex);
+ return copied;
}
static unsigned int aim_poll(struct file *filp, poll_table *wait)
@@ -288,7 +290,7 @@ static unsigned int aim_poll(struct file *filp, poll_table *wait)
if (!kfifo_is_empty(&c->fifo))
mask |= POLLIN | POLLRDNORM;
} else {
- if (!IS_ERR_OR_FALSE(channel_has_mbo(c->iface, c->channel_id)))
+ if (ch_has_mbo(c))
mask |= POLLOUT | POLLWRNORM;
}
return mask;
@@ -316,33 +318,29 @@ static const struct file_operations channel_fops = {
*/
static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
- unsigned long flags;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- mutex_lock(&channel->io_mutex);
- channel->dev = NULL;
- mutex_unlock(&channel->io_mutex);
-
- if (atomic_read(&channel->access_ref)) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- spin_lock_irqsave(&ch_list_lock, flags);
- list_del(&channel->list);
- spin_unlock_irqrestore(&ch_list_lock, flags);
- kfree(channel);
+ mutex_lock(&c->io_mutex);
+ spin_lock(&c->unlink);
+ c->dev = NULL;
+ spin_unlock(&c->unlink);
+ if (c->access_ref) {
+ stop_channel(c);
+ wake_up_interruptible(&c->wq);
+ mutex_unlock(&c->io_mutex);
} else {
- wake_up_interruptible(&channel->wq);
+ destroy_cdev(c);
+ mutex_unlock(&c->io_mutex);
+ kfree(c);
}
return 0;
}
@@ -356,21 +354,27 @@ static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
*/
static int aim_rx_completion(struct mbo *mbo)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!mbo)
return -EINVAL;
- channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
- if (!channel)
+ c = get_channel(mbo->ifp, mbo->hdm_channel_id);
+ if (!c)
return -ENXIO;
- kfifo_in(&channel->fifo, &mbo, 1);
+ spin_lock(&c->unlink);
+ if (!c->access_ref || !c->dev) {
+ spin_unlock(&c->unlink);
+ return -EFAULT;
+ }
+ kfifo_in(&c->fifo, &mbo, 1);
+ spin_unlock(&c->unlink);
#ifdef DEBUG_MESG
- if (kfifo_is_full(&channel->fifo))
+ if (kfifo_is_full(&c->fifo))
pr_info("WARN: Fifo is full\n");
#endif
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
@@ -383,7 +387,7 @@ static int aim_rx_completion(struct mbo *mbo)
*/
static int aim_tx_completion(struct most_interface *iface, int channel_id)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
if (!iface) {
pr_info("Bad interface pointer\n");
@@ -394,15 +398,13 @@ static int aim_tx_completion(struct most_interface *iface, int channel_id)
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (!channel)
+ c = get_channel(iface, channel_id);
+ if (!c)
return -ENXIO;
- wake_up_interruptible(&channel->wq);
+ wake_up_interruptible(&c->wq);
return 0;
}
-static struct most_aim cdev_aim;
-
/**
* aim_probe - probe function of the driver module
* @iface: pointer to interface instance
@@ -419,7 +421,7 @@ static int aim_probe(struct most_interface *iface, int channel_id,
struct most_channel_config *cfg,
struct kobject *parent, char *name)
{
- struct aim_channel *channel;
+ struct aim_channel *c;
unsigned long cl_flags;
int retval;
int current_minor;
@@ -428,60 +430,60 @@ static int aim_probe(struct most_interface *iface, int channel_id,
pr_info("Probing AIM with bad arguments");
return -EINVAL;
}
- channel = get_channel(iface, channel_id);
- if (channel)
+ c = get_channel(iface, channel_id);
+ if (c)
return -EEXIST;
current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
if (current_minor < 0)
return current_minor;
- channel = kzalloc(sizeof(*channel), GFP_KERNEL);
- if (!channel) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
retval = -ENOMEM;
goto error_alloc_channel;
}
- channel->devno = MKDEV(major, current_minor);
- cdev_init(&channel->cdev, &channel_fops);
- channel->cdev.owner = THIS_MODULE;
- cdev_add(&channel->cdev, channel->devno, 1);
- channel->iface = iface;
- channel->cfg = cfg;
- channel->channel_id = channel_id;
- channel->mbo_offs = 0;
- atomic_set(&channel->access_ref, -1);
- INIT_KFIFO(channel->fifo);
- retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+ c->devno = MKDEV(major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+ cdev_add(&c->cdev, c->devno, 1);
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+ c->access_ref = 0;
+ spin_lock_init(&c->unlink);
+ INIT_KFIFO(c->fifo);
+ retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
if (retval) {
pr_info("failed to alloc channel kfifo");
goto error_alloc_kfifo;
}
- init_waitqueue_head(&channel->wq);
- mutex_init(&channel->io_mutex);
+ init_waitqueue_head(&c->wq);
+ mutex_init(&c->io_mutex);
spin_lock_irqsave(&ch_list_lock, cl_flags);
- list_add_tail(&channel->list, &channel_list);
+ list_add_tail(&c->list, &channel_list);
spin_unlock_irqrestore(&ch_list_lock, cl_flags);
- channel->dev = device_create(aim_class,
+ c->dev = device_create(aim_class,
NULL,
- channel->devno,
+ c->devno,
NULL,
"%s", name);
- retval = IS_ERR(channel->dev);
- if (retval) {
+ if (IS_ERR(c->dev)) {
+ retval = PTR_ERR(c->dev);
pr_info("failed to create new device node %s\n", name);
goto error_create_device;
}
- kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+ kobject_uevent(&c->dev->kobj, KOBJ_ADD);
return 0;
error_create_device:
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
+ kfifo_free(&c->fifo);
+ list_del(&c->list);
error_alloc_kfifo:
- cdev_del(&channel->cdev);
- kfree(channel);
+ cdev_del(&c->cdev);
+ kfree(c);
error_alloc_channel:
ida_simple_remove(&minor_id, current_minor);
return retval;
@@ -526,19 +528,15 @@ free_cdev:
static void __exit mod_exit(void)
{
- struct aim_channel *channel, *tmp;
+ struct aim_channel *c, *tmp;
pr_info("exit module\n");
most_deregister_aim(&cdev_aim);
- list_for_each_entry_safe(channel, tmp, &channel_list, list) {
- device_destroy(aim_class, channel->devno);
- cdev_del(&channel->cdev);
- kfifo_free(&channel->fifo);
- list_del(&channel->list);
- ida_simple_remove(&minor_id, MINOR(channel->devno));
- kfree(channel);
+ list_for_each_entry_safe(c, tmp, &channel_list, list) {
+ destroy_cdev(c);
+ kfree(c);
}
class_destroy(aim_class);
unregister_chrdev_region(aim_devno, 1);
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 3c7beb03871d..2f42de44d051 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -431,6 +431,7 @@ static int aim_rx_data(struct mbo *mbo)
u32 len = mbo->processed_length;
struct sk_buff *skb;
struct net_device *dev;
+ unsigned int skb_len;
nd = get_net_dev_context(mbo->ifp);
if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
@@ -482,9 +483,13 @@ static int aim_rx_data(struct mbo *mbo)
memcpy(skb_put(skb, len), buf, len);
skb->protocol = eth_type_trans(skb, dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
+ skb_len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb_len;
+ } else {
+ dev->stats.rx_dropped++;
+ }
out:
most_put_mbo(mbo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
index 172257596f1f..3c524506ee22 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -84,7 +84,7 @@ static inline bool dim_on_error(u8 error_id, const char *error_message)
struct lld_global_vars_t {
bool dim_is_initialized;
bool mcm_is_initialized;
- struct dim2_regs *dim2; /* DIM2 core base address */
+ struct dim2_regs __iomem *dim2; /* DIM2 core base address */
u32 dbr_map[DBR_MAP_SIZE];
};
@@ -650,7 +650,7 @@ static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
/* -------------------------------------------------------------------------- */
/* API */
-u8 dim_startup(void *dim_base_address, u32 mlb_clock)
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock)
{
g.dim_is_initialized = false;
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index 48cdd9c8cde1..fc73d4f97734 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -16,6 +16,7 @@
#define _DIM2_HAL_H
#include <linux/types.h>
+#include "dim2_reg.h"
#ifdef __cplusplus
extern "C" {
@@ -65,7 +66,7 @@ struct dim_channel {
u16 done_sw_buffers_number; /*< Done software buffers number. */
};
-u8 dim_startup(void *dim_base_address, u32 mlb_clock);
+u8 dim_startup(struct dim2_regs __iomem *dim_base_address, u32 mlb_clock);
void dim_shutdown(void);
@@ -103,9 +104,9 @@ bool dim_enqueue_buffer(struct dim_channel *ch, u32 buffer_addr,
bool dim_detach_buffers(struct dim_channel *ch, u16 buffers_number);
-u32 dimcb_io_read(u32 *ptr32);
+u32 dimcb_io_read(u32 __iomem *ptr32);
-void dimcb_io_write(u32 *ptr32, u32 value);
+void dimcb_io_write(u32 __iomem *ptr32, u32 value);
void dimcb_on_error(u8 error_id, const char *error_message);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 327d738c7194..a36449551513 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -99,7 +99,7 @@ struct dim2_hdm {
struct most_channel_capability capabilities[DMA_CHANNELS];
struct most_interface most_iface;
char name[16 + sizeof "dim2-"];
- void *io_base;
+ void __iomem *io_base;
unsigned int irq_ahb0;
int clk_speed;
struct task_struct *netinfo_task;
@@ -138,9 +138,9 @@ bool dim2_sysfs_get_state_cb(void)
* dimcb_io_read - callback from HAL to read an I/O register
* @ptr32: register address
*/
-u32 dimcb_io_read(u32 *ptr32)
+u32 dimcb_io_read(u32 __iomem *ptr32)
{
- return __raw_readl(ptr32);
+ return readl(ptr32);
}
/**
@@ -148,9 +148,9 @@ u32 dimcb_io_read(u32 *ptr32)
* @ptr32: register address
* @value: value to write
*/
-void dimcb_io_write(u32 *ptr32, u32 value)
+void dimcb_io_write(u32 __iomem *ptr32, u32 value)
{
- __raw_writel(value, ptr32);
+ writel(value, ptr32);
}
/**
@@ -251,7 +251,7 @@ static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
return -EAGAIN;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
buf_size = mbo->buffer_length;
BUG_ON(mbo->bus_address == 0);
@@ -362,7 +362,7 @@ static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
break;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
list_del(head->next);
spin_unlock_irqrestore(&dim_lock, flags);
@@ -495,7 +495,7 @@ static void complete_all_mbos(struct list_head *head)
break;
}
- mbo = list_entry(head->next, struct mbo, list);
+ mbo = list_first_entry(head, struct mbo, list);
list_del(head->next);
spin_unlock_irqrestore(&dim_lock, flags);
@@ -736,7 +736,7 @@ static int dim2_probe(struct platform_device *pdev)
int ret, i;
struct kobject *kobj;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -747,47 +747,31 @@ static int dim2_probe(struct platform_device *pdev)
test_dev = dev;
#else
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_err("no memory region defined\n");
- ret = -ENOENT;
- goto err_free_dev;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- pr_err("failed to request mem region\n");
- ret = -EBUSY;
- goto err_free_dev;
- }
-
- dev->io_base = ioremap(res->start, resource_size(res));
- if (!dev->io_base) {
- pr_err("failed to ioremap\n");
- ret = -ENOMEM;
- goto err_release_mem;
- }
+ dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->io_base))
+ return PTR_ERR(dev->io_base);
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- pr_err("failed to get irq\n");
- goto err_unmap_io;
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
}
dev->irq_ahb0 = ret;
- ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
+ ret = devm_request_irq(&pdev->dev, dev->irq_ahb0, dim2_ahb_isr, 0,
+ "mlb_ahb0", dev);
if (ret) {
- pr_err("failed to request IRQ: %d, err: %d\n",
- dev->irq_ahb0, ret);
- goto err_unmap_io;
+ dev_err(&pdev->dev, "failed to request IRQ: %d, err: %d\n",
+ dev->irq_ahb0, ret);
+ return ret;
}
#endif
init_waitqueue_head(&dev->netinfo_waitq);
dev->deliver_netinfo = 0;
dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
"dim2_netinfo");
- if (IS_ERR(dev->netinfo_task)) {
- ret = PTR_ERR(dev->netinfo_task);
- goto err_free_irq;
- }
+ if (IS_ERR(dev->netinfo_task))
+ return PTR_ERR(dev->netinfo_task);
for (i = 0; i < DMA_CHANNELS; i++) {
struct most_channel_capability *cap = dev->capabilities + i;
@@ -833,7 +817,7 @@ static int dim2_probe(struct platform_device *pdev)
kobj = most_register_interface(&dev->most_iface);
if (IS_ERR(kobj)) {
ret = PTR_ERR(kobj);
- pr_err("failed to register MOST interface\n");
+ dev_err(&pdev->dev, "failed to register MOST interface\n");
goto err_stop_thread;
}
@@ -843,7 +827,7 @@ static int dim2_probe(struct platform_device *pdev)
ret = startup_dim(pdev);
if (ret) {
- pr_err("failed to initialize DIM2\n");
+ dev_err(&pdev->dev, "failed to initialize DIM2\n");
goto err_destroy_bus;
}
@@ -855,16 +839,6 @@ err_unreg_iface:
most_deregister_interface(&dev->most_iface);
err_stop_thread:
kthread_stop(dev->netinfo_task);
-err_free_irq:
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
-err_unmap_io:
- iounmap(dev->io_base);
-err_release_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_dev:
-#endif
- kfree(dev);
return ret;
}
@@ -878,7 +852,6 @@ err_free_dev:
static int dim2_remove(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct dim2_platform_data *pdata = pdev->dev.platform_data;
unsigned long flags;
@@ -892,13 +865,6 @@ static int dim2_remove(struct platform_device *pdev)
dim2_sysfs_destroy(&dev->bus);
most_deregister_interface(&dev->most_iface);
kthread_stop(dev->netinfo_task);
-#if !defined(ENABLE_HDM_TEST)
- free_irq(dev->irq_ahb0, dev);
- iounmap(dev->io_base);
- release_mem_region(res->start, resource_size(res));
-#endif
- kfree(dev);
- platform_set_drvdata(pdev, NULL);
/*
* break link to local platform_device_id struct
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
index 1c94e3355fcc..4050e7c764ed 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h
@@ -18,7 +18,7 @@ struct device;
/* platform dependent data for dim2 interface */
struct dim2_platform_data {
- int (*init)(struct dim2_platform_data *pd, void *io_base,
+ int (*init)(struct dim2_platform_data *pd, void __iomem *io_base,
int clk_speed);
void (*destroy)(struct dim2_platform_data *pd);
void *priv;
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
index c5b10c7d2fac..2b28e4a51131 100644
--- a/drivers/staging/most/hdm-dim2/dim2_sysfs.c
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -63,7 +63,6 @@ static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
- ssize_t ret;
struct medialb_bus *bus =
container_of(kobj, struct medialb_bus, kobj_group);
struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
@@ -71,8 +70,7 @@ static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
if (!xattr->store)
return -EIO;
- ret = xattr->store(bus, buf, count);
- return ret;
+ return xattr->store(bus, buf, count);
}
static struct sysfs_ops const bus_kobj_sysfs_ops = {
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 41690f801fb8..aeae071f2823 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -40,7 +40,6 @@
#define MAX_SUFFIX_LEN 10
#define MAX_STRING_LEN 80
#define MAX_BUF_SIZE 0xFFFF
-#define CEILING(x, y) (((x) + (y) - 1) / (y))
#define USB_VENDOR_ID_SMSC 0x0424 /* VID: SMSC */
#define USB_DEV_ID_BRDG 0xC001 /* PID: USB Bridge */
@@ -137,7 +136,6 @@ struct most_dev {
#define to_mdev(d) container_of(d, struct most_dev, iface)
#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
-static struct workqueue_struct *schedule_usb_work;
static void wq_clear_halt(struct work_struct *wq_obj);
static void wq_netinfo(struct work_struct *wq_obj);
@@ -223,6 +221,7 @@ static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
}
spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
list_del(&anchor->list);
+ cancel_work_sync(&anchor->clear_work_obj);
kfree(anchor);
}
spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
@@ -411,7 +410,7 @@ static void hdm_write_completion(struct urb *urb)
mbo->status = MBO_E_INVAL;
usb_unlink_urb(urb);
INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ schedule_work(&anchor->clear_work_obj);
return;
case -ENODEV:
case -EPROTO:
@@ -575,7 +574,7 @@ static void hdm_read_completion(struct urb *urb)
mbo->status = MBO_E_INVAL;
usb_unlink_urb(urb);
INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
- queue_work(schedule_usb_work, &anchor->clear_work_obj);
+ schedule_work(&anchor->clear_work_obj);
return;
case -ENODEV:
case -EPROTO:
@@ -785,7 +784,7 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
temp_size += tail_space;
/* calculate extra length to comply w/ HW padding */
- conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
+ conf->extra_len = (DIV_ROUND_UP(temp_size, USB_MTU) * USB_MTU)
- conf->buffer_size;
exit:
mdev->conf[channel] = *conf;
@@ -872,7 +871,7 @@ static void link_stat_timer_handler(unsigned long data)
{
struct most_dev *mdev = (struct most_dev *)data;
- queue_work(schedule_usb_work, &mdev->poll_work_obj);
+ schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
add_timer(&mdev->link_stat_timer);
}
@@ -1299,7 +1298,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
MOST_CH_ISOC_AVP | MOST_CH_SYNC;
- if (ep_desc->bEndpointAddress & USB_DIR_IN)
+ if (usb_endpoint_dir_in(ep_desc))
tmp_cap->direction = MOST_CH_RX;
else
tmp_cap->direction = MOST_CH_TX;
@@ -1415,19 +1414,13 @@ static int __init hdm_usb_init(void)
pr_err("could not register hdm_usb driver\n");
return -EIO;
}
- schedule_usb_work = create_workqueue("hdmu_work");
- if (!schedule_usb_work) {
- pr_err("could not create workqueue\n");
- usb_deregister(&hdm_usb);
- return -ENOMEM;
- }
+
return 0;
}
static void __exit hdm_usb_exit(void)
{
pr_info("hdm_usb_exit()\n");
- destroy_workqueue(schedule_usb_work);
usb_deregister(&hdm_usb);
}
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index ed1ed25b6d1d..7c619feb12d3 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -35,7 +35,6 @@
static struct class *most_class;
static struct device *class_glue_dir;
static struct ida mdev_id;
-static int modref;
static int dummy_num_buffers;
struct most_c_aim_obj {
@@ -66,7 +65,6 @@ struct most_c_obj {
struct most_c_aim_obj aim1;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
- struct mutex stop_task_mutex;
wait_queue_head_t hdm_fifo_wq;
};
@@ -74,7 +72,6 @@ struct most_c_obj {
struct most_inst_obj {
int dev_id;
- atomic_t tainted;
struct most_interface *iface;
struct list_head channel_list;
struct most_c_obj *channel[MAX_CHANNELS];
@@ -82,6 +79,14 @@ struct most_inst_obj {
struct list_head list;
};
+static const struct {
+ int most_ch_data_type;
+ char *name;
+} ch_data_type[] = { { MOST_CH_CONTROL, "control\n" },
+ { MOST_CH_ASYNC, "async\n" },
+ { MOST_CH_SYNC, "sync\n" },
+ { MOST_CH_ISOC_AVP, "isoc_avp\n"} };
+
#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
/**
@@ -95,8 +100,6 @@ struct most_inst_obj {
_mbo; \
})
-static struct mutex deregister_mutex;
-
/* ___ ___
* ___C H A N N E L___
*/
@@ -414,14 +417,12 @@ static ssize_t show_set_datatype(struct most_c_obj *c,
struct most_c_attr *attr,
char *buf)
{
- if (c->cfg.data_type & MOST_CH_CONTROL)
- return snprintf(buf, PAGE_SIZE, "control\n");
- else if (c->cfg.data_type & MOST_CH_ASYNC)
- return snprintf(buf, PAGE_SIZE, "async\n");
- else if (c->cfg.data_type & MOST_CH_SYNC)
- return snprintf(buf, PAGE_SIZE, "sync\n");
- else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
- return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
+ return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+ }
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
@@ -430,15 +431,16 @@ static ssize_t store_set_datatype(struct most_c_obj *c,
const char *buf,
size_t count)
{
- if (!strcmp(buf, "control\n")) {
- c->cfg.data_type = MOST_CH_CONTROL;
- } else if (!strcmp(buf, "async\n")) {
- c->cfg.data_type = MOST_CH_ASYNC;
- } else if (!strcmp(buf, "sync\n")) {
- c->cfg.data_type = MOST_CH_SYNC;
- } else if (!strcmp(buf, "isoc_avp\n")) {
- c->cfg.data_type = MOST_CH_ISOC_AVP;
- } else {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
+ if (!strcmp(buf, ch_data_type[i].name)) {
+ c->cfg.data_type = ch_data_type[i].most_ch_data_type;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(ch_data_type)) {
pr_info("WARN: invalid attribute settings\n");
return -EINVAL;
}
@@ -551,29 +553,6 @@ create_most_c_obj(const char *name, struct kobject *parent)
return c;
}
-/**
- * destroy_most_c_obj - channel release function
- * @c: pointer to channel object
- *
- * This decrements the reference counter of the channel object.
- * If the reference count turns zero, its release function is called.
- */
-static void destroy_most_c_obj(struct most_c_obj *c)
-{
- if (c->aim0.ptr)
- c->aim0.ptr->disconnect_channel(c->iface, c->channel_id);
- if (c->aim1.ptr)
- c->aim1.ptr->disconnect_channel(c->iface, c->channel_id);
- c->aim0.ptr = NULL;
- c->aim1.ptr = NULL;
-
- mutex_lock(&deregister_mutex);
- flush_trash_fifo(c);
- flush_channel_fifos(c);
- mutex_unlock(&deregister_mutex);
- kobject_put(&c->kobj);
-}
-
/* ___ ___
* ___I N S T A N C E___
*/
@@ -761,12 +740,10 @@ static void destroy_most_inst_obj(struct most_inst_obj *inst)
{
struct most_c_obj *c, *tmp;
- /* need to destroy channels first, since
- * each channel incremented the
- * reference count of the inst->kobj
- */
list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
- destroy_most_c_obj(c);
+ flush_trash_fifo(c);
+ flush_channel_fifos(c);
+ kobject_put(&c->kobj);
}
kobject_put(&inst->kobj);
}
@@ -1006,11 +983,14 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
else
return -ENOSPC;
+ *aim_ptr = aim_obj->driver;
ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
&c->cfg, &c->kobj, mdev_devnod);
- if (ret)
+ if (ret) {
+ *aim_ptr = NULL;
return ret;
- *aim_ptr = aim_obj->driver;
+ }
+
return len;
}
@@ -1056,12 +1036,12 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
if (IS_ERR(c))
return -ENODEV;
+ if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+ return -EIO;
if (c->aim0.ptr == aim_obj->driver)
c->aim0.ptr = NULL;
if (c->aim1.ptr == aim_obj->driver)
c->aim1.ptr = NULL;
- if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
- return -EIO;
return len;
}
@@ -1279,7 +1259,6 @@ static int arm_mbo_chain(struct most_c_obj *c, int dir,
for (i = 0; i < c->cfg.num_buffers; i++) {
mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
if (!mbo) {
- pr_info("WARN: Allocation of MBO failed.\n");
retval = i;
goto _exit;
}
@@ -1319,18 +1298,10 @@ _exit:
*/
int most_submit_mbo(struct mbo *mbo)
{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
if (unlikely((!mbo) || (!mbo->context))) {
pr_err("Bad MBO or missing channel reference\n");
return -EINVAL;
}
- c = mbo->context;
- i = c->inst;
-
- if (unlikely(atomic_read(&i->tainted)))
- return -ENODEV;
nq_hdm_mbo(mbo);
return 0;
@@ -1387,7 +1358,7 @@ most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
return i->channel[id];
}
-int channel_has_mbo(struct most_interface *iface, int id)
+int channel_has_mbo(struct most_interface *iface, int id, struct most_aim *aim)
{
struct most_c_obj *c = get_channel_by_iface(iface, id);
unsigned long flags;
@@ -1396,6 +1367,11 @@ int channel_has_mbo(struct most_interface *iface, int id)
if (unlikely(!c))
return -EINVAL;
+ if (c->aim0.refs && c->aim1.refs &&
+ ((aim == c->aim0.ptr && c->aim0.num_buffers <= 0) ||
+ (aim == c->aim1.ptr && c->aim1.num_buffers <= 0)))
+ return 0;
+
spin_lock_irqsave(&c->fifo_lock, flags);
empty = list_empty(&c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
@@ -1456,17 +1432,8 @@ EXPORT_SYMBOL_GPL(most_get_mbo);
*/
void most_put_mbo(struct mbo *mbo)
{
- struct most_c_obj *c;
- struct most_inst_obj *i;
-
- c = mbo->context;
- i = c->inst;
+ struct most_c_obj *c = mbo->context;
- if (unlikely(atomic_read(&i->tainted))) {
- mbo->status = MBO_E_CLOSE;
- trash_mbo(mbo);
- return;
- }
if (c->cfg.direction == MOST_CH_TX) {
arm_mbo(mbo);
return;
@@ -1546,7 +1513,6 @@ int most_start_channel(struct most_interface *iface, int id,
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
- modref++;
c->cfg.extra_len = 0;
if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
@@ -1588,7 +1554,6 @@ out:
error:
module_put(iface->mod);
- modref--;
mutex_unlock(&c->start_mutex);
return ret;
}
@@ -1616,24 +1581,12 @@ int most_stop_channel(struct most_interface *iface, int id,
if (c->aim0.refs + c->aim1.refs >= 2)
goto out;
- mutex_lock(&c->stop_task_mutex);
if (c->hdm_enqueue_task)
kthread_stop(c->hdm_enqueue_task);
c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
- mutex_lock(&deregister_mutex);
- if (atomic_read(&c->inst->tainted)) {
- mutex_unlock(&deregister_mutex);
- mutex_unlock(&c->start_mutex);
- return -ENODEV;
- }
- mutex_unlock(&deregister_mutex);
-
- if (iface->mod && modref) {
+ if (iface->mod)
module_put(iface->mod);
- modref--;
- }
c->is_poisoned = true;
if (c->iface->poison_channel(c->iface, c->channel_id)) {
@@ -1762,6 +1715,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
inst = create_most_inst_obj(name);
if (!inst) {
pr_info("Failed to allocate interface instance\n");
+ ida_simple_remove(&mdev_id, id);
return ERR_PTR(-ENOMEM);
}
@@ -1769,7 +1723,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&inst->channel_list);
inst->iface = iface;
inst->dev_id = id;
- atomic_set(&inst->tainted, 0);
list_add_tail(&inst->list, &instance_list);
for (i = 0; i < iface->num_channels; i++) {
@@ -1808,7 +1761,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex);
- mutex_init(&c->stop_task_mutex);
list_add_tail(&c->list, &inst->channel_list);
}
pr_info("registered new MOST device mdev%d (%s)\n",
@@ -1818,6 +1770,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
free_instance:
pr_info("Failed allocate channel(s)\n");
list_del(&inst->list);
+ ida_simple_remove(&mdev_id, id);
destroy_most_inst_obj(inst);
return ERR_PTR(-ENOMEM);
}
@@ -1835,37 +1788,24 @@ void most_deregister_interface(struct most_interface *iface)
struct most_inst_obj *i = iface->priv;
struct most_c_obj *c;
- mutex_lock(&deregister_mutex);
if (unlikely(!i)) {
pr_info("Bad Interface\n");
- mutex_unlock(&deregister_mutex);
return;
}
pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
iface->description);
- atomic_set(&i->tainted, 1);
- mutex_unlock(&deregister_mutex);
-
- while (modref) {
- if (iface->mod && modref)
- module_put(iface->mod);
- modref--;
- }
-
list_for_each_entry(c, &i->channel_list, list) {
- if (c->aim0.refs + c->aim1.refs <= 0)
- continue;
-
- mutex_lock(&c->stop_task_mutex);
- if (c->hdm_enqueue_task)
- kthread_stop(c->hdm_enqueue_task);
- c->hdm_enqueue_task = NULL;
- mutex_unlock(&c->stop_task_mutex);
-
- if (iface->poison_channel(iface, c->channel_id))
- pr_err("Can't poison channel %d\n", c->channel_id);
+ if (c->aim0.ptr)
+ c->aim0.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ if (c->aim1.ptr)
+ c->aim1.ptr->disconnect_channel(c->iface,
+ c->channel_id);
+ c->aim0.ptr = NULL;
+ c->aim1.ptr = NULL;
}
+
ida_simple_remove(&mdev_id, i->dev_id);
list_del(&i->list);
destroy_most_inst_obj(i);
@@ -1913,41 +1853,52 @@ EXPORT_SYMBOL_GPL(most_resume_enqueue);
static int __init most_init(void)
{
+ int err;
+
pr_info("init()\n");
INIT_LIST_HEAD(&instance_list);
INIT_LIST_HEAD(&aim_list);
- mutex_init(&deregister_mutex);
ida_init(&mdev_id);
- if (bus_register(&most_bus)) {
+ err = bus_register(&most_bus);
+ if (err) {
pr_info("Cannot register most bus\n");
- goto exit;
+ return err;
}
most_class = class_create(THIS_MODULE, "most");
if (IS_ERR(most_class)) {
pr_info("No udev support.\n");
+ err = PTR_ERR(most_class);
goto exit_bus;
}
- if (driver_register(&mostcore)) {
+
+ err = driver_register(&mostcore);
+ if (err) {
pr_info("Cannot register core driver\n");
goto exit_class;
}
class_glue_dir =
device_create(most_class, NULL, 0, NULL, "mostcore");
- if (!class_glue_dir)
+ if (IS_ERR(class_glue_dir)) {
+ err = PTR_ERR(class_glue_dir);
goto exit_driver;
+ }
most_aim_kset =
kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
- if (!most_aim_kset)
+ if (!most_aim_kset) {
+ err = -ENOMEM;
goto exit_class_container;
+ }
most_inst_kset =
kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
- if (!most_inst_kset)
+ if (!most_inst_kset) {
+ err = -ENOMEM;
goto exit_driver_kset;
+ }
return 0;
@@ -1961,8 +1912,7 @@ exit_class:
class_destroy(most_class);
exit_bus:
bus_unregister(&most_bus);
-exit:
- return -ENOMEM;
+ return err;
}
static void __exit most_exit(void)
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
index bda3850d5435..60e018e499ef 100644
--- a/drivers/staging/most/mostcore/mostcore.h
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -310,7 +310,8 @@ int most_deregister_aim(struct most_aim *aim);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
struct most_aim *);
void most_put_mbo(struct mbo *mbo);
-int channel_has_mbo(struct most_interface *iface, int channel_idx);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+ struct most_aim *aim);
int most_start_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);
int most_stop_channel(struct most_interface *iface, int channel_idx,
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 197d1124733d..163f21a1298d 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -33,7 +33,7 @@ static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = (struct spinand_state *)info->priv;
+ struct spinand_state *state = info->priv;
return state;
}
@@ -49,7 +49,6 @@ static struct nand_ecclayout spinand_oob_64 = {
17, 18, 19, 20, 21, 22,
33, 34, 35, 36, 37, 38,
49, 50, 51, 52, 53, 54, },
- .oobavail = 32,
.oobfree = {
{.offset = 8,
.length = 8},
@@ -63,8 +62,8 @@ static struct nand_ecclayout spinand_oob_64 = {
};
#endif
-/*
- * spinand_cmd - to process a command to send to the SPI Nand
+/**
+ * spinand_cmd - process a command to send to the SPI Nand
* Description:
* Set up the command buffer to send to the SPI controller.
* The command buffer has to initialized to 0.
@@ -110,10 +109,10 @@ static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
return spi_sync(spi, &message);
}
-/*
- * spinand_read_id- Read SPI Nand ID
+/**
+ * spinand_read_id - Read SPI Nand ID
* Description:
- * Read ID: read two ID bytes from the SPI Nand device
+ * read two ID bytes from the SPI Nand device
*/
static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
{
@@ -135,8 +134,8 @@ static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
return retval;
}
-/*
- * spinand_read_status- send command 0xf to the SPI Nand status register
+/**
+ * spinand_read_status - send command 0xf to the SPI Nand status register
* Description:
* After read, write, or erase, the Nand device is expected to set the
* busy status.
@@ -175,7 +174,7 @@ static int wait_till_ready(struct spi_device *spi_nand)
retval = spinand_read_status(spi_nand, &stat);
if (retval < 0)
return -1;
- else if (!(stat & 0x1))
+ if (!(stat & 0x1))
break;
cond_resched();
@@ -188,7 +187,7 @@ static int wait_till_ready(struct spi_device *spi_nand)
}
/**
- * spinand_get_otp- send command 0xf to read the SPI Nand OTP register
+ * spinand_get_otp - send command 0xf to read the SPI Nand OTP register
* Description:
* There is one bit( bit 0x10 ) to set or to clear the internal ECC.
* Enable chip internal ECC, set the bit to 1
@@ -212,7 +211,7 @@ static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
}
/**
- * spinand_set_otp- send command 0x1f to write the SPI Nand OTP register
+ * spinand_set_otp - send command 0x1f to write the SPI Nand OTP register
* Description:
* There is one bit( bit 0x10 ) to set or to clear the internal ECC.
* Enable chip internal ECC, set the bit to 1
@@ -223,11 +222,11 @@ static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
int retval;
struct spinand_cmd cmd = {0};
- cmd.cmd = CMD_WRITE_REG,
- cmd.n_addr = 1,
- cmd.addr[0] = REG_OTP,
- cmd.n_tx = 1,
- cmd.tx_buf = otp,
+ cmd.cmd = CMD_WRITE_REG;
+ cmd.n_addr = 1;
+ cmd.addr[0] = REG_OTP;
+ cmd.n_tx = 1;
+ cmd.tx_buf = otp;
retval = spinand_cmd(spi_nand, &cmd);
if (retval < 0)
@@ -238,7 +237,7 @@ static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
#ifdef CONFIG_MTD_SPINAND_ONDIEECC
/**
- * spinand_enable_ecc- send command 0x1f to write the SPI Nand OTP register
+ * spinand_enable_ecc - send command 0x1f to write the SPI Nand OTP register
* Description:
* There is one bit( bit 0x10 ) to set or to clear the internal ECC.
* Enable chip internal ECC, set the bit to 1
@@ -283,7 +282,7 @@ static int spinand_disable_ecc(struct spi_device *spi_nand)
}
/**
- * spinand_write_enable- send command 0x06 to enable write or erase the
+ * spinand_write_enable - send command 0x06 to enable write or erase the
* Nand cells
* Description:
* Before write and erase the Nand cells, the write enable has to be set.
@@ -313,9 +312,9 @@ static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
return spinand_cmd(spi_nand, &cmd);
}
-/*
- * spinand_read_from_cache- send command 0x03 to read out the data from the
- * cache register(2112 bytes max)
+/**
+ * spinand_read_from_cache - send command 0x03 to read out the data from the
+ * cache register (2112 bytes max)
* Description:
* The read can specify 1 to 2112 bytes of data read at the corresponding
* locations.
@@ -341,15 +340,15 @@ static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
return spinand_cmd(spi_nand, &cmd);
}
-/*
- * spinand_read_page-to read a page with:
+/**
+ * spinand_read_page - read a page
* @page_id: the physical page number
* @offset: the location from 0 to 2111
* @len: number of bytes to read
* @rbuf: read buffer to hold @len bytes
*
* Description:
- * The read includes two commands to the Nand: 0x13 and 0x03 commands
+ * The read includes two commands to the Nand - 0x13 and 0x03 commands
* Poll to read status to wait for tRD time.
*/
static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
@@ -408,11 +407,11 @@ static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
return ret;
}
-/*
- * spinand_program_data_to_cache--to write a page to cache with:
+/**
+ * spinand_program_data_to_cache - write a page to cache
* @byte_id: the location to write to the cache
* @len: number of bytes to write
- * @rbuf: read buffer to hold @len bytes
+ * @wbuf: write buffer holding @len bytes
*
* Description:
* The write command used here is 0x84--indicating that the cache is
@@ -439,7 +438,7 @@ static int spinand_program_data_to_cache(struct spi_device *spi_nand,
}
/**
- * spinand_program_execute--to write a page from cache to the Nand array with
+ * spinand_program_execute - write a page from cache to the Nand array
* @page_id: the physical page location to write the page.
*
* Description:
@@ -462,11 +461,11 @@ static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
}
/**
- * spinand_program_page--to write a page with:
+ * spinand_program_page - write a page
* @page_id: the physical page location to write the page.
* @offset: the location from the cache starting from 0 to 2111
* @len: the number of bytes to write
- * @wbuf: the buffer to hold the number of bytes
+ * @buf: the buffer holding @len bytes
*
* Description:
* The commands used here are 0x06, 0x84, and 0x10--indicating that
@@ -483,8 +482,11 @@ static int spinand_program_page(struct spi_device *spi_nand,
#ifdef CONFIG_MTD_SPINAND_ONDIEECC
unsigned int i, j;
- enable_read_hw_ecc = 0;
wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
+ if (!wbuf)
+ return -ENOMEM;
+
+ enable_read_hw_ecc = 0;
spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
for (i = offset, j = 0; i < len; i++, j++)
@@ -547,7 +549,7 @@ static int spinand_program_page(struct spi_device *spi_nand,
}
/**
- * spinand_erase_block_erase--to erase a page with:
+ * spinand_erase_block_erase - erase a page
* @block_id: the physical block location to erase.
*
* Description:
@@ -570,7 +572,7 @@ static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
}
/**
- * spinand_erase_block--to erase a page with:
+ * spinand_erase_block - erase a page
* @block_id: the physical block location to erase.
*
* Description:
@@ -746,7 +748,7 @@ static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = (struct spinand_state *)info->priv;
+ struct spinand_state *state = info->priv;
switch (command) {
/*
@@ -810,7 +812,7 @@ static void spinand_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
/**
- * spinand_lock_block- send write register 0x1f command to the Nand device
+ * spinand_lock_block - send write register 0x1f command to the Nand device
*
* Description:
* After power up, all the Nand blocks are locked. This function allows
@@ -837,12 +839,12 @@ static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
return ret;
}
-/*
+/**
* spinand_probe - [spinand Interface]
* @spi_nand: registered device driver.
*
* Description:
- * To set up the device driver parameters to make the device available.
+ * Set up the device driver parameters to make the device available.
*/
static int spinand_probe(struct spi_device *spi_nand)
{
@@ -890,7 +892,8 @@ static int spinand_probe(struct spi_device *spi_nand)
#else
chip->ecc.mode = NAND_ECC_SOFT;
if (spinand_disable_ecc(spi_nand) < 0)
- pr_info("%s: disable ecc failed!\n", __func__);
+ dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
+ __func__);
#endif
nand_set_flash_node(chip, spi_nand->dev.of_node);
@@ -916,12 +919,12 @@ static int spinand_probe(struct spi_device *spi_nand)
return mtd_device_register(mtd, NULL, 0);
}
-/*
- * spinand_remove: Remove the device driver
+/**
+ * spinand_remove - remove the device driver
* @spi: the spi device.
*
* Description:
- * To remove the device driver parameters and free up allocated memories.
+ * Remove the device driver parameters and free up allocated memories.
*/
static int spinand_remove(struct spi_device *spi)
{
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
index ae62975cf44a..457dc7ffdaf1 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h
@@ -78,7 +78,6 @@
#define BL_ALL_UNLOCKED 0
struct spinand_info {
- struct nand_ecclayout *ecclayout;
struct spi_device *spi;
void *priv;
};
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
index 7806c2bc3af3..abf4c71ee66b 100644
--- a/drivers/staging/netlogic/platform_net.c
+++ b/drivers/staging/netlogic/platform_net.c
@@ -86,7 +86,8 @@ static void xlr_resource_init(struct resource *res, int offset, int irq)
res++;
res->name = "gmac";
- res->start = res->end = irq;
+ res->start = irq;
+ res->end = irq;
res->flags = IORESOURCE_IRQ;
}
@@ -121,8 +122,8 @@ static struct platform_device *gmac_controller2_init(void *gmac0_addr)
ndata1.phy_addr[mac] = mac + 4 + 0x10;
xlr_resource_init(&xlr_net1_res[mac * 2],
- xlr_gmac_offsets[mac + 4],
- xlr_gmac_irqs[mac + 4]);
+ xlr_gmac_offsets[mac + 4],
+ xlr_gmac_irqs[mac + 4]);
}
xlr_net_dev1.num_resources = 8;
@@ -169,7 +170,7 @@ static void xls_gmac_init(void)
xlr_net_dev0.num_resources = 2;
xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
- xlr_gmac_irqs[0]);
+ xlr_gmac_irqs[0]);
platform_device_register(&xlr_net_dev0);
/* second block is XAUI, not supported yet */
@@ -182,7 +183,7 @@ static void xls_gmac_init(void)
ndata0.phy_addr[mac] = mac + 0x10;
xlr_resource_init(&xlr_net0_res[mac * 2],
- xlr_gmac_offsets[mac],
+ xlr_gmac_offsets[mac],
xlr_gmac_irqs[mac]);
}
xlr_net_dev0.num_resources = 8;
@@ -208,7 +209,6 @@ static void xlr_gmac_init(void)
.gpio_addr = NULL,
};
-
static struct platform_device xlr_net_dev0 = {
.name = "xlr-net",
.id = 0,
@@ -223,7 +223,7 @@ static void xlr_gmac_init(void)
ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
ndata0.phy_addr[mac] = mac;
xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
+ xlr_gmac_irqs[mac]);
}
xlr_net_dev0.num_resources = 8;
xlr_net_dev0.resource = xlr_net0_res;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 0b4e819f5164..aa1cdf602cf6 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -69,8 +69,7 @@ static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
return __raw_readl(base + reg);
}
-static inline void xlr_reg_update(u32 *base_addr,
- u32 off, u32 val, u32 mask)
+static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
{
u32 tmp;
@@ -100,7 +99,7 @@ static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
return 0;
} while (++num_try < 10000);
- pr_err("Send to RFR failed in RX path\n");
+ netdev_err(priv->ndev, "Send to RFR failed in RX path\n");
return ret;
}
@@ -122,8 +121,8 @@ static inline unsigned char *xlr_alloc_skb(void)
return skb->data;
}
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size,
- int code, struct nlm_fmn_msg *msg, void *arg)
+static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
+ struct nlm_fmn_msg *msg, void *arg)
{
struct sk_buff *skb;
void *skb_data = NULL;
@@ -131,13 +130,13 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size,
struct xlr_net_priv *priv;
u32 port, length;
unsigned char *addr;
- struct xlr_adapter *adapter = (struct xlr_adapter *) arg;
+ struct xlr_adapter *adapter = arg;
length = (msg->msg0 >> 40) & 0x3fff;
if (length == 0) {
addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *) *(unsigned long *)addr;
+ skb = (struct sk_buff *)(*(unsigned long *)addr);
dev_kfree_skb_any((struct sk_buff *)addr);
} else {
addr = (unsigned char *)
@@ -145,9 +144,9 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size,
length = length - BYTE_OFFSET - MAC_CRC_LEN;
port = ((int)msg->msg0) & 0x0f;
addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *) *(unsigned long *)addr;
+ skb = (struct sk_buff *)(*(unsigned long *)addr);
skb->dev = adapter->netdev[port];
- if (skb->dev == NULL)
+ if (!skb->dev)
return;
ndev = skb->dev;
priv = netdev_priv(ndev);
@@ -207,15 +206,15 @@ static int xlr_net_fill_rx_ring(struct net_device *ndev)
struct xlr_net_priv *priv = netdev_priv(ndev);
int i;
- for (i = 0; i < MAX_FRIN_SPILL/4; i++) {
+ for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
skb_data = xlr_alloc_skb();
if (!skb_data) {
- pr_err("SKB allocation failed\n");
+ netdev_err(ndev, "SKB allocation failed\n");
return -ENOMEM;
}
send_to_rfr_fifo(priv, skb_data);
}
- pr_info("Rx ring setup done\n");
+ netdev_info(ndev, "Rx ring setup done\n");
return 0;
}
@@ -252,7 +251,7 @@ static int xlr_net_stop(struct net_device *ndev)
}
static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
unsigned long physkb = virt_to_phys(skb);
int cpu_core = nlm_core_id();
@@ -266,12 +265,13 @@ static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
((u64)fr_stn_id << 54) | /* Free back id */
(u64)0 << 40 | /* Set len to 0 */
((u64)physkb & 0xffffffff)); /* 32bit address */
- msg->msg2 = msg->msg3 = 0;
+ msg->msg2 = 0;
+ msg->msg3 = 0;
}
static void __maybe_unused xlr_wakeup_queue(unsigned long dev)
{
- struct net_device *ndev = (struct net_device *) dev;
+ struct net_device *ndev = (struct net_device *)dev;
struct xlr_net_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = xlr_get_phydev(priv);
@@ -280,7 +280,7 @@ static void __maybe_unused xlr_wakeup_queue(unsigned long dev)
}
static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+ struct net_device *ndev)
{
struct nlm_fmn_msg msg;
struct xlr_net_priv *priv = netdev_priv(ndev);
@@ -309,10 +309,10 @@ static void xlr_hw_set_mac_addr(struct net_device *ndev)
/* set mac station address */
xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
- ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
- (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
+ ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
+ (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
- ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
+ ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
@@ -320,12 +320,12 @@ static void xlr_hw_set_mac_addr(struct net_device *ndev)
xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
- (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
+ (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
+ (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
+ (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
}
@@ -406,7 +406,8 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
}
static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats
+ )
{
xlr_stats(ndev, stats);
return stats;
@@ -426,7 +427,7 @@ static struct net_device_ops xlr_netdev_ops = {
* Gmac init
*/
static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
- int reg_start_1, int reg_size, int size)
+ int reg_start_1, int reg_size, int size)
{
void *spill;
u32 *base;
@@ -436,13 +437,15 @@ static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
base = priv->base_addr;
spill_size = size;
spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_ATOMIC);
- if (!spill)
+ if (!spill) {
pr_err("Unable to allocate memory for spill area!\n");
+ return ZERO_SIZE_PTR;
+ }
spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
phys_addr = virt_to_phys(spill);
dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
- size, phys_addr);
+ size, phys_addr);
xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
xlr_nae_wreg(base, reg_size, spill_size);
@@ -511,19 +514,19 @@ static void xlr_config_pde(struct xlr_net_priv *priv)
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
- ((bkt_map >> 32) & 0xffffffff));
+ ((bkt_map >> 32) & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
- ((bkt_map >> 32) & 0xffffffff));
+ ((bkt_map >> 32) & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
- ((bkt_map >> 32) & 0xffffffff));
+ ((bkt_map >> 32) & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
- ((bkt_map >> 32) & 0xffffffff));
+ ((bkt_map >> 32) & 0xffffffff));
}
/*
@@ -541,8 +544,8 @@ static int xlr_config_common(struct xlr_net_priv *priv)
/* Setting non-core MsgBktSize(0x321 - 0x325) */
for (i = start_stn_id; i <= end_stn_id; i++) {
xlr_nae_wreg(priv->base_addr,
- R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
- bucket_size[i]);
+ R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
+ bucket_size[i]);
}
/*
@@ -552,8 +555,8 @@ static int xlr_config_common(struct xlr_net_priv *priv)
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
xlr_nae_wreg(priv->base_addr,
- (R_CC_CPU0_0 + (i * 8)) + j,
- gmac->credit_config[(i * 8) + j]);
+ (R_CC_CPU0_0 + (i * 8)) + j,
+ gmac->credit_config[(i * 8) + j]);
}
xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
@@ -567,7 +570,7 @@ static int xlr_config_common(struct xlr_net_priv *priv)
if (err)
return err;
nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
- priv->adapter);
+ priv->adapter);
return 0;
}
@@ -583,7 +586,7 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv)
cpu_mask = priv->nd->cpu_mask;
pr_info("Using %s-based distribution\n",
- (use_bkt) ? "bucket" : "class");
+ (use_bkt) ? "bucket" : "class");
j = 0;
for (i = 0; i < 32; i++) {
if ((1 << i) & cpu_mask) {
@@ -614,7 +617,7 @@ static void xlr_config_translate_table(struct xlr_net_priv *priv)
val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
(c2 << 7) | (b2 << 1) | (use_bkt << 0));
dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
- i, b1, b2, c1, c2);
+ i, b1, b2, c1, c2);
xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
c1 = c2;
}
@@ -629,16 +632,16 @@ static void xlr_config_parser(struct xlr_net_priv *priv)
/* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
- ((0x7f << 8) | (1 << 1)));
+ ((0x7f << 8) | (1 << 1)));
/* configure the parser : L2 Type is configured in the bootloader */
/* extract IP: src, dest protocol */
xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
- (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
- (0x0800 << 0));
+ (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
+ (0x0800 << 0));
xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
- (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
- (16 << 4) | 4);
+ (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
+ (16 << 4) | 4);
/* Configure to extract SRC port and Dest port for TCP and UDP pkts */
xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
@@ -663,7 +666,7 @@ static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
/* Write the data which starts the write cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32) val);
+ xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
/* poll for the read cycle to complete */
while (!timedout) {
@@ -692,11 +695,11 @@ static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
/* setup the phy reg to be used */
xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
- (phy_addr << 8) | (regnum << 0));
+ (phy_addr << 8) | (regnum << 0));
/* Issue the read command */
xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
- (1 << O_MII_MGMT_COMMAND__rstat));
+ (1 << O_MII_MGMT_COMMAND__rstat));
/* poll for the read cycle to complete */
while (!timedout) {
@@ -724,7 +727,7 @@ static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
- phy_addr, regnum, val, ret);
+ phy_addr, regnum, val, ret);
return ret;
}
@@ -735,7 +738,7 @@ static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum);
dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
- phy_addr, regnum, ret);
+ phy_addr, regnum, ret);
return ret;
}
@@ -797,13 +800,16 @@ void xlr_set_gmac_speed(struct xlr_net_priv *priv)
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
if (speed == SPEED_10)
xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_10);
+ R_INTERFACE_CONTROL,
+ SGMII_SPEED_10);
if (speed == SPEED_100)
xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_100);
+ R_INTERFACE_CONTROL,
+ SGMII_SPEED_100);
if (speed == SPEED_1000)
xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL, SGMII_SPEED_1000);
+ R_INTERFACE_CONTROL,
+ SGMII_SPEED_1000);
}
if (speed == SPEED_10)
xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
@@ -864,7 +870,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv)
}
static int xlr_setup_mdio(struct xlr_net_priv *priv,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
int err;
@@ -877,7 +883,7 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->priv = priv;
priv->mii_bus->name = "xlr-mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- priv->mii_bus->name, priv->port_id);
+ priv->mii_bus->name, priv->port_id);
priv->mii_bus->read = xlr_mii_read;
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
@@ -910,25 +916,31 @@ static void xlr_port_enable(struct xlr_net_priv *priv)
/* Setup MAC_CONFIG reg if (xls & rgmii) */
if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
- priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- (1 << O_RX_CONTROL__RGMII), (1 << O_RX_CONTROL__RGMII));
+ (1 << O_RX_CONTROL__RGMII),
+ (1 << O_RX_CONTROL__RGMII));
/* Rx Tx enable */
xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)));
+ ((1 << O_MAC_CONFIG_1__rxen) |
+ (1 << O_MAC_CONFIG_1__txen) |
+ (1 << O_MAC_CONFIG_1__rxfc) |
+ (1 << O_MAC_CONFIG_1__txfc)),
+ ((1 << O_MAC_CONFIG_1__rxen) |
+ (1 << O_MAC_CONFIG_1__txen) |
+ (1 << O_MAC_CONFIG_1__rxfc) |
+ (1 << O_MAC_CONFIG_1__txfc)));
/* Setup tx control reg */
xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TxEnable) |
- (512 << O_TX_CONTROL__TxThreshold)), 0x3fff);
+ ((1 << O_TX_CONTROL__TXENABLE) |
+ (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
/* Setup rx control reg */
xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RxEnable, 1 << O_RX_CONTROL__RxEnable);
+ 1 << O_RX_CONTROL__RXENABLE,
+ 1 << O_RX_CONTROL__RXENABLE);
}
static void xlr_port_disable(struct xlr_net_priv *priv)
@@ -936,25 +948,26 @@ static void xlr_port_disable(struct xlr_net_priv *priv)
/* Setup MAC_CONFIG reg */
/* Rx Tx disable*/
xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) | (1 << O_MAC_CONFIG_1__txfc)),
- 0x0);
+ ((1 << O_MAC_CONFIG_1__rxen) |
+ (1 << O_MAC_CONFIG_1__txen) |
+ (1 << O_MAC_CONFIG_1__rxfc) |
+ (1 << O_MAC_CONFIG_1__txfc)), 0x0);
/* Setup tx control reg */
xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TxEnable) |
- (512 << O_TX_CONTROL__TxThreshold)), 0);
+ ((1 << O_TX_CONTROL__TXENABLE) |
+ (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
/* Setup rx control reg */
xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RxEnable, 0);
+ 1 << O_RX_CONTROL__RXENABLE, 0);
}
/*
* Initialization of gmac
*/
static int xlr_gmac_init(struct xlr_net_priv *priv,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
int ret;
@@ -963,9 +976,9 @@ static int xlr_gmac_init(struct xlr_net_priv *priv,
xlr_port_disable(priv);
xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
- (1 << O_DESC_PACK_CTRL__MaxEntry)
- | (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset)
- | (1600 << O_DESC_PACK_CTRL__RegularSize));
+ (1 << O_DESC_PACK_CTRL__MAXENTRY) |
+ (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
+ (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
ret = xlr_setup_mdio(priv, pdev);
if (ret)
@@ -977,21 +990,14 @@ static int xlr_gmac_init(struct xlr_net_priv *priv,
/* speed 2.5Mhz */
xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
/* Setup Interrupt mask reg */
- xlr_nae_wreg(priv->base_addr, R_INTMASK,
- (1 << O_INTMASK__TxIllegal) |
- (1 << O_INTMASK__MDInt) |
- (1 << O_INTMASK__TxFetchError) |
- (1 << O_INTMASK__P2PSpillEcc) |
- (1 << O_INTMASK__TagFull) |
- (1 << O_INTMASK__Underrun) |
- (1 << O_INTMASK__Abort)
- );
+ xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
+ (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
+ (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
+ (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
/* Clear all stats */
- xlr_reg_update(priv->base_addr, R_STATCTRL,
- 0, 1 << O_STATCTRL__ClrCnt);
- xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2,
- 1 << 2);
+ xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
+ xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
return 0;
}
@@ -1019,10 +1025,11 @@ static int xlr_net_probe(struct platform_device *pdev)
* Each controller has 4 gmac ports, mapping each controller
* under one parent device, 4 gmac ports under one device.
*/
- for (port = 0; port < pdev->num_resources/2; port++) {
+ for (port = 0; port < pdev->num_resources / 2; port++) {
ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
if (!ndev) {
- pr_err("Allocation of Ethernet device failed\n");
+ dev_err(&pdev->dev,
+ "Allocation of Ethernet device failed\n");
return -ENOMEM;
}
@@ -1032,13 +1039,6 @@ static int xlr_net_probe(struct platform_device *pdev)
priv->port_id = (pdev->id * 4) + port;
priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, port);
-
- if (res == NULL) {
- pr_err("No memory resource for MAC %d\n",
- priv->port_id);
- err = -ENODEV;
- goto err_gmac;
- }
priv->base_addr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->base_addr)) {
err = PTR_ERR(priv->base_addr);
@@ -1048,8 +1048,9 @@ static int xlr_net_probe(struct platform_device *pdev)
adapter->netdev[port] = ndev;
res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
- if (res == NULL) {
- pr_err("No irq resource for MAC %d\n", priv->port_id);
+ if (!res) {
+ dev_err(&pdev->dev, "No irq resource for MAC %d\n",
+ priv->port_id);
err = -ENODEV;
goto err_gmac;
}
@@ -1084,7 +1085,8 @@ static int xlr_net_probe(struct platform_device *pdev)
if (strcmp(res->name, "gmac") == 0) {
err = xlr_gmac_init(priv, pdev);
if (err) {
- pr_err("gmac%d init failed\n", priv->port_id);
+ dev_err(&pdev->dev, "gmac%d init failed\n",
+ priv->port_id);
goto err_gmac;
}
}
@@ -1097,8 +1099,9 @@ static int xlr_net_probe(struct platform_device *pdev)
err = register_netdev(ndev);
if (err) {
- pr_err("Registering netdev failed for gmac%d\n",
- priv->port_id);
+ dev_err(&pdev->dev,
+ "Registering netdev failed for gmac%d\n",
+ priv->port_id);
goto err_netdev;
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index 7ae8874daee8..f76e16cfd15d 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -277,332 +277,332 @@
#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0
#define R_HASH_TABLE_VECTOR 0x30
#define R_TX_CONTROL 0x0A0
-#define O_TX_CONTROL__Tx15Halt 31
-#define O_TX_CONTROL__Tx14Halt 30
-#define O_TX_CONTROL__Tx13Halt 29
-#define O_TX_CONTROL__Tx12Halt 28
-#define O_TX_CONTROL__Tx11Halt 27
-#define O_TX_CONTROL__Tx10Halt 26
-#define O_TX_CONTROL__Tx9Halt 25
-#define O_TX_CONTROL__Tx8Halt 24
-#define O_TX_CONTROL__Tx7Halt 23
-#define O_TX_CONTROL__Tx6Halt 22
-#define O_TX_CONTROL__Tx5Halt 21
-#define O_TX_CONTROL__Tx4Halt 20
-#define O_TX_CONTROL__Tx3Halt 19
-#define O_TX_CONTROL__Tx2Halt 18
-#define O_TX_CONTROL__Tx1Halt 17
-#define O_TX_CONTROL__Tx0Halt 16
-#define O_TX_CONTROL__TxIdle 15
-#define O_TX_CONTROL__TxEnable 14
-#define O_TX_CONTROL__TxThreshold 0
-#define W_TX_CONTROL__TxThreshold 14
+#define O_TX_CONTROL__TX15HALT 31
+#define O_TX_CONTROL__TX14HALT 30
+#define O_TX_CONTROL__TX13HALT 29
+#define O_TX_CONTROL__TX12HALT 28
+#define O_TX_CONTROL__TX11HALT 27
+#define O_TX_CONTROL__TX10HALT 26
+#define O_TX_CONTROL__TX9HALT 25
+#define O_TX_CONTROL__TX8HALT 24
+#define O_TX_CONTROL__TX7HALT 23
+#define O_TX_CONTROL__TX6HALT 22
+#define O_TX_CONTROL__TX5HALT 21
+#define O_TX_CONTROL__TX4HALT 20
+#define O_TX_CONTROL__TX3HALT 19
+#define O_TX_CONTROL__TX2HALT 18
+#define O_TX_CONTROL__TX1HALT 17
+#define O_TX_CONTROL__TX0HALT 16
+#define O_TX_CONTROL__TXIDLE 15
+#define O_TX_CONTROL__TXENABLE 14
+#define O_TX_CONTROL__TXTHRESHOLD 0
+#define W_TX_CONTROL__TXTHRESHOLD 14
#define R_RX_CONTROL 0x0A1
#define O_RX_CONTROL__RGMII 10
-#define O_RX_CONTROL__SoftReset 2
-#define O_RX_CONTROL__RxHalt 1
-#define O_RX_CONTROL__RxEnable 0
+#define O_RX_CONTROL__SOFTRESET 2
+#define O_RX_CONTROL__RXHALT 1
+#define O_RX_CONTROL__RXENABLE 0
#define R_DESC_PACK_CTRL 0x0A2
-#define O_DESC_PACK_CTRL__ByteOffset 17
-#define W_DESC_PACK_CTRL__ByteOffset 3
-#define O_DESC_PACK_CTRL__PrePadEnable 16
-#define O_DESC_PACK_CTRL__MaxEntry 14
-#define W_DESC_PACK_CTRL__MaxEntry 2
-#define O_DESC_PACK_CTRL__RegularSize 0
-#define W_DESC_PACK_CTRL__RegularSize 14
+#define O_DESC_PACK_CTRL__BYTEOFFSET 17
+#define W_DESC_PACK_CTRL__BYTEOFFSET 3
+#define O_DESC_PACK_CTRL__PREPADENABLE 16
+#define O_DESC_PACK_CTRL__MAXENTRY 14
+#define W_DESC_PACK_CTRL__MAXENTRY 2
+#define O_DESC_PACK_CTRL__REGULARSIZE 0
+#define W_DESC_PACK_CTRL__REGULARSIZE 14
#define R_STATCTRL 0x0A3
-#define O_STATCTRL__OverFlowEn 4
+#define O_STATCTRL__OVERFLOWEN 4
#define O_STATCTRL__GIG 3
-#define O_STATCTRL__Sten 2
-#define O_STATCTRL__ClrCnt 1
-#define O_STATCTRL__AutoZ 0
+#define O_STATCTRL__STEN 2
+#define O_STATCTRL__CLRCNT 1
+#define O_STATCTRL__AUTOZ 0
#define R_L2ALLOCCTRL 0x0A4
-#define O_L2ALLOCCTRL__TxL2Allocate 9
-#define W_L2ALLOCCTRL__TxL2Allocate 9
-#define O_L2ALLOCCTRL__RxL2Allocate 0
-#define W_L2ALLOCCTRL__RxL2Allocate 9
+#define O_L2ALLOCCTRL__TXL2ALLOCATE 9
+#define W_L2ALLOCCTRL__TXL2ALLOCATE 9
+#define O_L2ALLOCCTRL__RXL2ALLOCATE 0
+#define W_L2ALLOCCTRL__RXL2ALLOCATE 9
#define R_INTMASK 0x0A5
-#define O_INTMASK__Spi4TxError 28
-#define O_INTMASK__Spi4RxError 27
-#define O_INTMASK__RGMIIHalfDupCollision 27
-#define O_INTMASK__Abort 26
-#define O_INTMASK__Underrun 25
-#define O_INTMASK__DiscardPacket 24
-#define O_INTMASK__AsyncFifoFull 23
-#define O_INTMASK__TagFull 22
-#define O_INTMASK__Class3Full 21
-#define O_INTMASK__C3EarlyFull 20
-#define O_INTMASK__Class2Full 19
-#define O_INTMASK__C2EarlyFull 18
-#define O_INTMASK__Class1Full 17
-#define O_INTMASK__C1EarlyFull 16
-#define O_INTMASK__Class0Full 15
-#define O_INTMASK__C0EarlyFull 14
-#define O_INTMASK__RxDataFull 13
-#define O_INTMASK__RxEarlyFull 12
-#define O_INTMASK__RFreeEmpty 9
-#define O_INTMASK__RFEarlyEmpty 8
-#define O_INTMASK__P2PSpillEcc 7
-#define O_INTMASK__FreeDescFull 5
-#define O_INTMASK__FreeEarlyFull 4
-#define O_INTMASK__TxFetchError 3
-#define O_INTMASK__StatCarry 2
-#define O_INTMASK__MDInt 1
-#define O_INTMASK__TxIllegal 0
+#define O_INTMASK__SPI4TXERROR 28
+#define O_INTMASK__SPI4RXERROR 27
+#define O_INTMASK__RGMIIHALFDUPCOLLISION 27
+#define O_INTMASK__ABORT 26
+#define O_INTMASK__UNDERRUN 25
+#define O_INTMASK__DISCARDPACKET 24
+#define O_INTMASK__ASYNCFIFOFULL 23
+#define O_INTMASK__TAGFULL 22
+#define O_INTMASK__CLASS3FULL 21
+#define O_INTMASK__C3EARLYFULL 20
+#define O_INTMASK__CLASS2FULL 19
+#define O_INTMASK__C2EARLYFULL 18
+#define O_INTMASK__CLASS1FULL 17
+#define O_INTMASK__C1EARLYFULL 16
+#define O_INTMASK__CLASS0FULL 15
+#define O_INTMASK__C0EARLYFULL 14
+#define O_INTMASK__RXDATAFULL 13
+#define O_INTMASK__RXEARLYFULL 12
+#define O_INTMASK__RFREEEMPTY 9
+#define O_INTMASK__RFEARLYEMPTY 8
+#define O_INTMASK__P2PSPILLECC 7
+#define O_INTMASK__FREEDESCFULL 5
+#define O_INTMASK__FREEEARLYFULL 4
+#define O_INTMASK__TXFETCHERROR 3
+#define O_INTMASK__STATCARRY 2
+#define O_INTMASK__MDINT 1
+#define O_INTMASK__TXILLEGAL 0
#define R_INTREG 0x0A6
-#define O_INTREG__Spi4TxError 28
-#define O_INTREG__Spi4RxError 27
-#define O_INTREG__RGMIIHalfDupCollision 27
-#define O_INTREG__Abort 26
-#define O_INTREG__Underrun 25
-#define O_INTREG__DiscardPacket 24
-#define O_INTREG__AsyncFifoFull 23
-#define O_INTREG__TagFull 22
-#define O_INTREG__Class3Full 21
-#define O_INTREG__C3EarlyFull 20
-#define O_INTREG__Class2Full 19
-#define O_INTREG__C2EarlyFull 18
-#define O_INTREG__Class1Full 17
-#define O_INTREG__C1EarlyFull 16
-#define O_INTREG__Class0Full 15
-#define O_INTREG__C0EarlyFull 14
-#define O_INTREG__RxDataFull 13
-#define O_INTREG__RxEarlyFull 12
-#define O_INTREG__RFreeEmpty 9
-#define O_INTREG__RFEarlyEmpty 8
-#define O_INTREG__P2PSpillEcc 7
-#define O_INTREG__FreeDescFull 5
-#define O_INTREG__FreeEarlyFull 4
-#define O_INTREG__TxFetchError 3
-#define O_INTREG__StatCarry 2
-#define O_INTREG__MDInt 1
-#define O_INTREG__TxIllegal 0
+#define O_INTREG__SPI4TXERROR 28
+#define O_INTREG__SPI4RXERROR 27
+#define O_INTREG__RGMIIHALFDUPCOLLISION 27
+#define O_INTREG__ABORT 26
+#define O_INTREG__UNDERRUN 25
+#define O_INTREG__DISCARDPACKET 24
+#define O_INTREG__ASYNCFIFOFULL 23
+#define O_INTREG__TAGFULL 22
+#define O_INTREG__CLASS3FULL 21
+#define O_INTREG__C3EARLYFULL 20
+#define O_INTREG__CLASS2FULL 19
+#define O_INTREG__C2EARLYFULL 18
+#define O_INTREG__CLASS1FULL 17
+#define O_INTREG__C1EARLYFULL 16
+#define O_INTREG__CLASS0FULL 15
+#define O_INTREG__C0EARLYFULL 14
+#define O_INTREG__RXDATAFULL 13
+#define O_INTREG__RXEARLYFULL 12
+#define O_INTREG__RFREEEMPTY 9
+#define O_INTREG__RFEARLYEMPTY 8
+#define O_INTREG__P2PSPILLECC 7
+#define O_INTREG__FREEDESCFULL 5
+#define O_INTREG__FREEEARLYFULL 4
+#define O_INTREG__TXFETCHERROR 3
+#define O_INTREG__STATCARRY 2
+#define O_INTREG__MDINT 1
+#define O_INTREG__TXILLEGAL 0
#define R_TXRETRY 0x0A7
-#define O_TXRETRY__CollisionRetry 6
-#define O_TXRETRY__BusErrorRetry 5
-#define O_TXRETRY__UnderRunRetry 4
-#define O_TXRETRY__Retries 0
-#define W_TXRETRY__Retries 4
+#define O_TXRETRY__COLLISIONRETRY 6
+#define O_TXRETRY__BUSERRORRETRY 5
+#define O_TXRETRY__UNDERRUNRETRY 4
+#define O_TXRETRY__RETRIES 0
+#define W_TXRETRY__RETRIES 4
#define R_CORECONTROL 0x0A8
-#define O_CORECONTROL__ErrorThread 4
-#define W_CORECONTROL__ErrorThread 7
-#define O_CORECONTROL__Shutdown 2
-#define O_CORECONTROL__Speed 0
-#define W_CORECONTROL__Speed 2
+#define O_CORECONTROL__ERRORTHREAD 4
+#define W_CORECONTROL__ERRORTHREAD 7
+#define O_CORECONTROL__SHUTDOWN 2
+#define O_CORECONTROL__SPEED 0
+#define W_CORECONTROL__SPEED 2
#define R_BYTEOFFSET0 0x0A9
#define R_BYTEOFFSET1 0x0AA
#define R_L2TYPE_0 0x0F0
-#define O_L2TYPE__ExtraHdrProtoSize 26
-#define W_L2TYPE__ExtraHdrProtoSize 5
-#define O_L2TYPE__ExtraHdrProtoOffset 20
-#define W_L2TYPE__ExtraHdrProtoOffset 6
-#define O_L2TYPE__ExtraHeaderSize 14
-#define W_L2TYPE__ExtraHeaderSize 6
-#define O_L2TYPE__ProtoOffset 8
-#define W_L2TYPE__ProtoOffset 6
-#define O_L2TYPE__L2HdrOffset 2
-#define W_L2TYPE__L2HdrOffset 6
-#define O_L2TYPE__L2Proto 0
-#define W_L2TYPE__L2Proto 2
+#define O_L2TYPE__EXTRAHDRPROTOSIZE 26
+#define W_L2TYPE__EXTRAHDRPROTOSIZE 5
+#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20
+#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6
+#define O_L2TYPE__EXTRAHEADERSIZE 14
+#define W_L2TYPE__EXTRAHEADERSIZE 6
+#define O_L2TYPE__PROTOOFFSET 8
+#define W_L2TYPE__PROTOOFFSET 6
+#define O_L2TYPE__L2HDROFFSET 2
+#define W_L2TYPE__L2HDROFFSET 6
+#define O_L2TYPE__L2PROTO 0
+#define W_L2TYPE__L2PROTO 2
#define R_L2TYPE_1 0xF0
#define R_L2TYPE_2 0xF0
#define R_L2TYPE_3 0xF0
#define R_PARSERCONFIGREG 0x100
-#define O_PARSERCONFIGREG__CRCHashPoly 8
-#define W_PARSERCONFIGREG__CRCHashPoly 7
-#define O_PARSERCONFIGREG__PrePadOffset 4
-#define W_PARSERCONFIGREG__PrePadOffset 4
-#define O_PARSERCONFIGREG__UseCAM 2
-#define O_PARSERCONFIGREG__UseHASH 1
-#define O_PARSERCONFIGREG__UseProto 0
+#define O_PARSERCONFIGREG__CRCHASHPOLY 8
+#define W_PARSERCONFIGREG__CRCHASHPOLY 7
+#define O_PARSERCONFIGREG__PREPADOFFSET 4
+#define W_PARSERCONFIGREG__PREPADOFFSET 4
+#define O_PARSERCONFIGREG__USECAM 2
+#define O_PARSERCONFIGREG__USEHASH 1
+#define O_PARSERCONFIGREG__USEPROTO 0
#define R_L3CTABLE 0x140
-#define O_L3CTABLE__Offset0 25
-#define W_L3CTABLE__Offset0 7
-#define O_L3CTABLE__Len0 21
-#define W_L3CTABLE__Len0 4
-#define O_L3CTABLE__Offset1 14
-#define W_L3CTABLE__Offset1 7
-#define O_L3CTABLE__Len1 10
-#define W_L3CTABLE__Len1 4
-#define O_L3CTABLE__Offset2 4
-#define W_L3CTABLE__Offset2 6
-#define O_L3CTABLE__Len2 0
-#define W_L3CTABLE__Len2 4
-#define O_L3CTABLE__L3HdrOffset 26
-#define W_L3CTABLE__L3HdrOffset 6
-#define O_L3CTABLE__L4ProtoOffset 20
-#define W_L3CTABLE__L4ProtoOffset 6
-#define O_L3CTABLE__IPChksumCompute 19
-#define O_L3CTABLE__L4Classify 18
-#define O_L3CTABLE__L2Proto 16
-#define W_L3CTABLE__L2Proto 2
-#define O_L3CTABLE__L3ProtoKey 0
-#define W_L3CTABLE__L3ProtoKey 16
+#define O_L3CTABLE__OFFSET0 25
+#define W_L3CTABLE__OFFSET0 7
+#define O_L3CTABLE__LEN0 21
+#define W_L3CTABLE__LEN0 4
+#define O_L3CTABLE__OFFSET1 14
+#define W_L3CTABLE__OFFSET1 7
+#define O_L3CTABLE__LEN1 10
+#define W_L3CTABLE__LEN1 4
+#define O_L3CTABLE__OFFSET2 4
+#define W_L3CTABLE__OFFSET2 6
+#define O_L3CTABLE__LEN2 0
+#define W_L3CTABLE__LEN2 4
+#define O_L3CTABLE__L3HDROFFSET 26
+#define W_L3CTABLE__L3HDROFFSET 6
+#define O_L3CTABLE__L4PROTOOFFSET 20
+#define W_L3CTABLE__L4PROTOOFFSET 6
+#define O_L3CTABLE__IPCHKSUMCOMPUTE 19
+#define O_L3CTABLE__L4CLASSIFY 18
+#define O_L3CTABLE__L2PROTO 16
+#define W_L3CTABLE__L2PROTO 2
+#define O_L3CTABLE__L3PROTOKEY 0
+#define W_L3CTABLE__L3PROTOKEY 16
#define R_L4CTABLE 0x160
-#define O_L4CTABLE__Offset0 21
-#define W_L4CTABLE__Offset0 6
-#define O_L4CTABLE__Len0 17
-#define W_L4CTABLE__Len0 4
-#define O_L4CTABLE__Offset1 11
-#define W_L4CTABLE__Offset1 6
-#define O_L4CTABLE__Len1 7
-#define W_L4CTABLE__Len1 4
-#define O_L4CTABLE__TCPChksumEnable 0
+#define O_L4CTABLE__OFFSET0 21
+#define W_L4CTABLE__OFFSET0 6
+#define O_L4CTABLE__LEN0 17
+#define W_L4CTABLE__LEN0 4
+#define O_L4CTABLE__OFFSET1 11
+#define W_L4CTABLE__OFFSET1 6
+#define O_L4CTABLE__LEN1 7
+#define W_L4CTABLE__LEN1 4
+#define O_L4CTABLE__TCPCHKSUMENABLE 0
#define R_CAM4X128TABLE 0x172
-#define O_CAM4X128TABLE__ClassId 7
-#define W_CAM4X128TABLE__ClassId 2
-#define O_CAM4X128TABLE__BucketId 1
-#define W_CAM4X128TABLE__BucketId 6
-#define O_CAM4X128TABLE__UseBucket 0
+#define O_CAM4X128TABLE__CLASSID 7
+#define W_CAM4X128TABLE__CLASSID 2
+#define O_CAM4X128TABLE__BUCKETID 1
+#define W_CAM4X128TABLE__BUCKETID 6
+#define O_CAM4X128TABLE__USEBUCKET 0
#define R_CAM4X128KEY 0x180
#define R_TRANSLATETABLE 0x1A0
#define R_DMACR0 0x200
-#define O_DMACR0__Data0WrMaxCr 27
-#define W_DMACR0__Data0WrMaxCr 3
-#define O_DMACR0__Data0RdMaxCr 24
-#define W_DMACR0__Data0RdMaxCr 3
-#define O_DMACR0__Data1WrMaxCr 21
-#define W_DMACR0__Data1WrMaxCr 3
-#define O_DMACR0__Data1RdMaxCr 18
-#define W_DMACR0__Data1RdMaxCr 3
-#define O_DMACR0__Data2WrMaxCr 15
-#define W_DMACR0__Data2WrMaxCr 3
-#define O_DMACR0__Data2RdMaxCr 12
-#define W_DMACR0__Data2RdMaxCr 3
-#define O_DMACR0__Data3WrMaxCr 9
-#define W_DMACR0__Data3WrMaxCr 3
-#define O_DMACR0__Data3RdMaxCr 6
-#define W_DMACR0__Data3RdMaxCr 3
-#define O_DMACR0__Data4WrMaxCr 3
-#define W_DMACR0__Data4WrMaxCr 3
-#define O_DMACR0__Data4RdMaxCr 0
-#define W_DMACR0__Data4RdMaxCr 3
+#define O_DMACR0__DATA0WRMAXCR 27
+#define W_DMACR0__DATA0WRMAXCR 3
+#define O_DMACR0__DATA0RDMAXCR 24
+#define W_DMACR0__DATA0RDMAXCR 3
+#define O_DMACR0__DATA1WRMAXCR 21
+#define W_DMACR0__DATA1WRMAXCR 3
+#define O_DMACR0__DATA1RDMAXCR 18
+#define W_DMACR0__DATA1RDMAXCR 3
+#define O_DMACR0__DATA2WRMAXCR 15
+#define W_DMACR0__DATA2WRMAXCR 3
+#define O_DMACR0__DATA2RDMAXCR 12
+#define W_DMACR0__DATA2RDMAXCR 3
+#define O_DMACR0__DATA3WRMAXCR 9
+#define W_DMACR0__DATA3WRMAXCR 3
+#define O_DMACR0__DATA3RDMAXCR 6
+#define W_DMACR0__DATA3RDMAXCR 3
+#define O_DMACR0__DATA4WRMAXCR 3
+#define W_DMACR0__DATA4WRMAXCR 3
+#define O_DMACR0__DATA4RDMAXCR 0
+#define W_DMACR0__DATA4RDMAXCR 3
#define R_DMACR1 0x201
-#define O_DMACR1__Data5WrMaxCr 27
-#define W_DMACR1__Data5WrMaxCr 3
-#define O_DMACR1__Data5RdMaxCr 24
-#define W_DMACR1__Data5RdMaxCr 3
-#define O_DMACR1__Data6WrMaxCr 21
-#define W_DMACR1__Data6WrMaxCr 3
-#define O_DMACR1__Data6RdMaxCr 18
-#define W_DMACR1__Data6RdMaxCr 3
-#define O_DMACR1__Data7WrMaxCr 15
-#define W_DMACR1__Data7WrMaxCr 3
-#define O_DMACR1__Data7RdMaxCr 12
-#define W_DMACR1__Data7RdMaxCr 3
-#define O_DMACR1__Data8WrMaxCr 9
-#define W_DMACR1__Data8WrMaxCr 3
-#define O_DMACR1__Data8RdMaxCr 6
-#define W_DMACR1__Data8RdMaxCr 3
-#define O_DMACR1__Data9WrMaxCr 3
-#define W_DMACR1__Data9WrMaxCr 3
-#define O_DMACR1__Data9RdMaxCr 0
-#define W_DMACR1__Data9RdMaxCr 3
+#define O_DMACR1__DATA5WRMAXCR 27
+#define W_DMACR1__DATA5WRMAXCR 3
+#define O_DMACR1__DATA5RDMAXCR 24
+#define W_DMACR1__DATA5RDMAXCR 3
+#define O_DMACR1__DATA6WRMAXCR 21
+#define W_DMACR1__DATA6WRMAXCR 3
+#define O_DMACR1__DATA6RDMAXCR 18
+#define W_DMACR1__DATA6RDMAXCR 3
+#define O_DMACR1__DATA7WRMAXCR 15
+#define W_DMACR1__DATA7WRMAXCR 3
+#define O_DMACR1__DATA7RDMAXCR 12
+#define W_DMACR1__DATA7RDMAXCR 3
+#define O_DMACR1__DATA8WRMAXCR 9
+#define W_DMACR1__DATA8WRMAXCR 3
+#define O_DMACR1__DATA8RDMAXCR 6
+#define W_DMACR1__DATA8RDMAXCR 3
+#define O_DMACR1__DATA9WRMAXCR 3
+#define W_DMACR1__DATA9WRMAXCR 3
+#define O_DMACR1__DATA9RDMAXCR 0
+#define W_DMACR1__DATA9RDMAXCR 3
#define R_DMACR2 0x202
-#define O_DMACR2__Data10WrMaxCr 27
-#define W_DMACR2__Data10WrMaxCr 3
-#define O_DMACR2__Data10RdMaxCr 24
-#define W_DMACR2__Data10RdMaxCr 3
-#define O_DMACR2__Data11WrMaxCr 21
-#define W_DMACR2__Data11WrMaxCr 3
-#define O_DMACR2__Data11RdMaxCr 18
-#define W_DMACR2__Data11RdMaxCr 3
-#define O_DMACR2__Data12WrMaxCr 15
-#define W_DMACR2__Data12WrMaxCr 3
-#define O_DMACR2__Data12RdMaxCr 12
-#define W_DMACR2__Data12RdMaxCr 3
-#define O_DMACR2__Data13WrMaxCr 9
-#define W_DMACR2__Data13WrMaxCr 3
-#define O_DMACR2__Data13RdMaxCr 6
-#define W_DMACR2__Data13RdMaxCr 3
-#define O_DMACR2__Data14WrMaxCr 3
-#define W_DMACR2__Data14WrMaxCr 3
-#define O_DMACR2__Data14RdMaxCr 0
-#define W_DMACR2__Data14RdMaxCr 3
+#define O_DMACR2__DATA10WRMAXCR 27
+#define W_DMACR2__DATA10WRMAXCR 3
+#define O_DMACR2__DATA10RDMAXCR 24
+#define W_DMACR2__DATA10RDMAXCR 3
+#define O_DMACR2__DATA11WRMAXCR 21
+#define W_DMACR2__DATA11WRMAXCR 3
+#define O_DMACR2__DATA11RDMAXCR 18
+#define W_DMACR2__DATA11RDMAXCR 3
+#define O_DMACR2__DATA12WRMAXCR 15
+#define W_DMACR2__DATA12WRMAXCR 3
+#define O_DMACR2__DATA12RDMAXCR 12
+#define W_DMACR2__DATA12RDMAXCR 3
+#define O_DMACR2__DATA13WRMAXCR 9
+#define W_DMACR2__DATA13WRMAXCR 3
+#define O_DMACR2__DATA13RDMAXCR 6
+#define W_DMACR2__DATA13RDMAXCR 3
+#define O_DMACR2__DATA14WRMAXCR 3
+#define W_DMACR2__DATA14WRMAXCR 3
+#define O_DMACR2__DATA14RDMAXCR 0
+#define W_DMACR2__DATA14RDMAXCR 3
#define R_DMACR3 0x203
-#define O_DMACR3__Data15WrMaxCr 27
-#define W_DMACR3__Data15WrMaxCr 3
-#define O_DMACR3__Data15RdMaxCr 24
-#define W_DMACR3__Data15RdMaxCr 3
-#define O_DMACR3__SpClassWrMaxCr 21
-#define W_DMACR3__SpClassWrMaxCr 3
-#define O_DMACR3__SpClassRdMaxCr 18
-#define W_DMACR3__SpClassRdMaxCr 3
-#define O_DMACR3__JumFrInWrMaxCr 15
-#define W_DMACR3__JumFrInWrMaxCr 3
-#define O_DMACR3__JumFrInRdMaxCr 12
-#define W_DMACR3__JumFrInRdMaxCr 3
-#define O_DMACR3__RegFrInWrMaxCr 9
-#define W_DMACR3__RegFrInWrMaxCr 3
-#define O_DMACR3__RegFrInRdMaxCr 6
-#define W_DMACR3__RegFrInRdMaxCr 3
-#define O_DMACR3__FrOutWrMaxCr 3
-#define W_DMACR3__FrOutWrMaxCr 3
-#define O_DMACR3__FrOutRdMaxCr 0
-#define W_DMACR3__FrOutRdMaxCr 3
+#define O_DMACR3__DATA15WRMAXCR 27
+#define W_DMACR3__DATA15WRMAXCR 3
+#define O_DMACR3__DATA15RDMAXCR 24
+#define W_DMACR3__DATA15RDMAXCR 3
+#define O_DMACR3__SPCLASSWRMAXCR 21
+#define W_DMACR3__SPCLASSWRMAXCR 3
+#define O_DMACR3__SPCLASSRDMAXCR 18
+#define W_DMACR3__SPCLASSRDMAXCR 3
+#define O_DMACR3__JUMFRINWRMAXCR 15
+#define W_DMACR3__JUMFRINWRMAXCR 3
+#define O_DMACR3__JUMFRINRDMAXCR 12
+#define W_DMACR3__JUMFRINRDMAXCR 3
+#define O_DMACR3__REGFRINWRMAXCR 9
+#define W_DMACR3__REGFRINWRMAXCR 3
+#define O_DMACR3__REGFRINRDMAXCR 6
+#define W_DMACR3__REGFRINRDMAXCR 3
+#define O_DMACR3__FROUTWRMAXCR 3
+#define W_DMACR3__FROUTWRMAXCR 3
+#define O_DMACR3__FROUTRDMAXCR 0
+#define W_DMACR3__FROUTRDMAXCR 3
#define R_REG_FRIN_SPILL_MEM_START_0 0x204
-#define O_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 0
-#define W_REG_FRIN_SPILL_MEM_START_0__RegFrInSpillMemStart0 32
+#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0
+#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32
#define R_REG_FRIN_SPILL_MEM_START_1 0x205
-#define O_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 0
-#define W_REG_FRIN_SPILL_MEM_START_1__RegFrInSpillMemStart1 3
+#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0
+#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3
#define R_REG_FRIN_SPILL_MEM_SIZE 0x206
-#define O_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 0
-#define W_REG_FRIN_SPILL_MEM_SIZE__RegFrInSpillMemSize 32
+#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0
+#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32
#define R_FROUT_SPILL_MEM_START_0 0x207
-#define O_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 0
-#define W_FROUT_SPILL_MEM_START_0__FrOutSpillMemStart0 32
+#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0
+#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32
#define R_FROUT_SPILL_MEM_START_1 0x208
-#define O_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 0
-#define W_FROUT_SPILL_MEM_START_1__FrOutSpillMemStart1 3
+#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0
+#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3
#define R_FROUT_SPILL_MEM_SIZE 0x209
-#define O_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 0
-#define W_FROUT_SPILL_MEM_SIZE__FrOutSpillMemSize 32
+#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0
+#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32
#define R_CLASS0_SPILL_MEM_START_0 0x20A
-#define O_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 0
-#define W_CLASS0_SPILL_MEM_START_0__Class0SpillMemStart0 32
+#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0
+#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32
#define R_CLASS0_SPILL_MEM_START_1 0x20B
-#define O_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 0
-#define W_CLASS0_SPILL_MEM_START_1__Class0SpillMemStart1 3
+#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0
+#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3
#define R_CLASS0_SPILL_MEM_SIZE 0x20C
-#define O_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 0
-#define W_CLASS0_SPILL_MEM_SIZE__Class0SpillMemSize 32
+#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0
+#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32
#define R_JUMFRIN_SPILL_MEM_START_0 0x20D
-#define O_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 0
-#define W_JUMFRIN_SPILL_MEM_START_0__JumFrInSpillMemStar0 32
+#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0
+#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32
#define R_JUMFRIN_SPILL_MEM_START_1 0x20E
-#define O_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 0
-#define W_JUMFRIN_SPILL_MEM_START_1__JumFrInSpillMemStart1 3
+#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0
+#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3
#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F
-#define O_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 0
-#define W_JUMFRIN_SPILL_MEM_SIZE__JumFrInSpillMemSize 32
+#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0
+#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32
#define R_CLASS1_SPILL_MEM_START_0 0x210
-#define O_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 0
-#define W_CLASS1_SPILL_MEM_START_0__Class1SpillMemStart0 32
+#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0
+#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32
#define R_CLASS1_SPILL_MEM_START_1 0x211
-#define O_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 0
-#define W_CLASS1_SPILL_MEM_START_1__Class1SpillMemStart1 3
+#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0
+#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3
#define R_CLASS1_SPILL_MEM_SIZE 0x212
-#define O_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 0
-#define W_CLASS1_SPILL_MEM_SIZE__Class1SpillMemSize 32
+#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0
+#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32
#define R_CLASS2_SPILL_MEM_START_0 0x213
-#define O_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 0
-#define W_CLASS2_SPILL_MEM_START_0__Class2SpillMemStart0 32
+#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0
+#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32
#define R_CLASS2_SPILL_MEM_START_1 0x214
-#define O_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 0
-#define W_CLASS2_SPILL_MEM_START_1__Class2SpillMemStart1 3
+#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0
+#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3
#define R_CLASS2_SPILL_MEM_SIZE 0x215
-#define O_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 0
-#define W_CLASS2_SPILL_MEM_SIZE__Class2SpillMemSize 32
+#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0
+#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32
#define R_CLASS3_SPILL_MEM_START_0 0x216
-#define O_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 0
-#define W_CLASS3_SPILL_MEM_START_0__Class3SpillMemStart0 32
+#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0
+#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32
#define R_CLASS3_SPILL_MEM_START_1 0x217
-#define O_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 0
-#define W_CLASS3_SPILL_MEM_START_1__Class3SpillMemStart1 3
+#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0
+#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3
#define R_CLASS3_SPILL_MEM_SIZE 0x218
-#define O_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 0
-#define W_CLASS3_SPILL_MEM_SIZE__Class3SpillMemSize 32
+#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0
+#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32
#define R_REG_FRIN1_SPILL_MEM_START_0 0x219
#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a
#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b
@@ -679,244 +679,244 @@
#define O_SPISTRV3__EG_STRV_THRESH_15 0
#define W_SPISTRV3__EG_STRV_THRESH_15 7
#define R_TXDATAFIFO0 0x221
-#define O_TXDATAFIFO0__Tx0DataFifoStart 24
-#define W_TXDATAFIFO0__Tx0DataFifoStart 7
-#define O_TXDATAFIFO0__Tx0DataFifoSize 16
-#define W_TXDATAFIFO0__Tx0DataFifoSize 7
-#define O_TXDATAFIFO0__Tx1DataFifoStart 8
-#define W_TXDATAFIFO0__Tx1DataFifoStart 7
-#define O_TXDATAFIFO0__Tx1DataFifoSize 0
-#define W_TXDATAFIFO0__Tx1DataFifoSize 7
+#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24
+#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7
+#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16
+#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7
+#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8
+#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7
+#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0
+#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7
#define R_TXDATAFIFO1 0x222
-#define O_TXDATAFIFO1__Tx2DataFifoStart 24
-#define W_TXDATAFIFO1__Tx2DataFifoStart 7
-#define O_TXDATAFIFO1__Tx2DataFifoSize 16
-#define W_TXDATAFIFO1__Tx2DataFifoSize 7
-#define O_TXDATAFIFO1__Tx3DataFifoStart 8
-#define W_TXDATAFIFO1__Tx3DataFifoStart 7
-#define O_TXDATAFIFO1__Tx3DataFifoSize 0
-#define W_TXDATAFIFO1__Tx3DataFifoSize 7
+#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24
+#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7
+#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16
+#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7
+#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8
+#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7
+#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0
+#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7
#define R_TXDATAFIFO2 0x223
-#define O_TXDATAFIFO2__Tx4DataFifoStart 24
-#define W_TXDATAFIFO2__Tx4DataFifoStart 7
-#define O_TXDATAFIFO2__Tx4DataFifoSize 16
-#define W_TXDATAFIFO2__Tx4DataFifoSize 7
-#define O_TXDATAFIFO2__Tx5DataFifoStart 8
-#define W_TXDATAFIFO2__Tx5DataFifoStart 7
-#define O_TXDATAFIFO2__Tx5DataFifoSize 0
-#define W_TXDATAFIFO2__Tx5DataFifoSize 7
+#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24
+#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7
+#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16
+#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7
+#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8
+#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7
+#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0
+#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7
#define R_TXDATAFIFO3 0x224
-#define O_TXDATAFIFO3__Tx6DataFifoStart 24
-#define W_TXDATAFIFO3__Tx6DataFifoStart 7
-#define O_TXDATAFIFO3__Tx6DataFifoSize 16
-#define W_TXDATAFIFO3__Tx6DataFifoSize 7
-#define O_TXDATAFIFO3__Tx7DataFifoStart 8
-#define W_TXDATAFIFO3__Tx7DataFifoStart 7
-#define O_TXDATAFIFO3__Tx7DataFifoSize 0
-#define W_TXDATAFIFO3__Tx7DataFifoSize 7
+#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24
+#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7
+#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16
+#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7
+#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8
+#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7
+#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0
+#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7
#define R_TXDATAFIFO4 0x225
-#define O_TXDATAFIFO4__Tx8DataFifoStart 24
-#define W_TXDATAFIFO4__Tx8DataFifoStart 7
-#define O_TXDATAFIFO4__Tx8DataFifoSize 16
-#define W_TXDATAFIFO4__Tx8DataFifoSize 7
-#define O_TXDATAFIFO4__Tx9DataFifoStart 8
-#define W_TXDATAFIFO4__Tx9DataFifoStart 7
-#define O_TXDATAFIFO4__Tx9DataFifoSize 0
-#define W_TXDATAFIFO4__Tx9DataFifoSize 7
+#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24
+#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7
+#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16
+#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7
+#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8
+#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7
+#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0
+#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7
#define R_TXDATAFIFO5 0x226
-#define O_TXDATAFIFO5__Tx10DataFifoStart 24
-#define W_TXDATAFIFO5__Tx10DataFifoStart 7
-#define O_TXDATAFIFO5__Tx10DataFifoSize 16
-#define W_TXDATAFIFO5__Tx10DataFifoSize 7
-#define O_TXDATAFIFO5__Tx11DataFifoStart 8
-#define W_TXDATAFIFO5__Tx11DataFifoStart 7
-#define O_TXDATAFIFO5__Tx11DataFifoSize 0
-#define W_TXDATAFIFO5__Tx11DataFifoSize 7
+#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24
+#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7
+#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16
+#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7
+#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8
+#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7
+#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0
+#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7
#define R_TXDATAFIFO6 0x227
-#define O_TXDATAFIFO6__Tx12DataFifoStart 24
-#define W_TXDATAFIFO6__Tx12DataFifoStart 7
-#define O_TXDATAFIFO6__Tx12DataFifoSize 16
-#define W_TXDATAFIFO6__Tx12DataFifoSize 7
-#define O_TXDATAFIFO6__Tx13DataFifoStart 8
-#define W_TXDATAFIFO6__Tx13DataFifoStart 7
-#define O_TXDATAFIFO6__Tx13DataFifoSize 0
-#define W_TXDATAFIFO6__Tx13DataFifoSize 7
+#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24
+#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7
+#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16
+#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7
+#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8
+#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7
+#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0
+#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7
#define R_TXDATAFIFO7 0x228
-#define O_TXDATAFIFO7__Tx14DataFifoStart 24
-#define W_TXDATAFIFO7__Tx14DataFifoStart 7
-#define O_TXDATAFIFO7__Tx14DataFifoSize 16
-#define W_TXDATAFIFO7__Tx14DataFifoSize 7
-#define O_TXDATAFIFO7__Tx15DataFifoStart 8
-#define W_TXDATAFIFO7__Tx15DataFifoStart 7
-#define O_TXDATAFIFO7__Tx15DataFifoSize 0
-#define W_TXDATAFIFO7__Tx15DataFifoSize 7
+#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24
+#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7
+#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16
+#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7
+#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8
+#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7
+#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0
+#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7
#define R_RXDATAFIFO0 0x229
-#define O_RXDATAFIFO0__Rx0DataFifoStart 24
-#define W_RXDATAFIFO0__Rx0DataFifoStart 7
-#define O_RXDATAFIFO0__Rx0DataFifoSize 16
-#define W_RXDATAFIFO0__Rx0DataFifoSize 7
-#define O_RXDATAFIFO0__Rx1DataFifoStart 8
-#define W_RXDATAFIFO0__Rx1DataFifoStart 7
-#define O_RXDATAFIFO0__Rx1DataFifoSize 0
-#define W_RXDATAFIFO0__Rx1DataFifoSize 7
+#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24
+#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7
+#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16
+#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7
+#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8
+#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7
+#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0
+#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7
#define R_RXDATAFIFO1 0x22A
-#define O_RXDATAFIFO1__Rx2DataFifoStart 24
-#define W_RXDATAFIFO1__Rx2DataFifoStart 7
-#define O_RXDATAFIFO1__Rx2DataFifoSize 16
-#define W_RXDATAFIFO1__Rx2DataFifoSize 7
-#define O_RXDATAFIFO1__Rx3DataFifoStart 8
-#define W_RXDATAFIFO1__Rx3DataFifoStart 7
-#define O_RXDATAFIFO1__Rx3DataFifoSize 0
-#define W_RXDATAFIFO1__Rx3DataFifoSize 7
+#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24
+#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7
+#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16
+#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7
+#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8
+#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7
+#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0
+#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7
#define R_RXDATAFIFO2 0x22B
-#define O_RXDATAFIFO2__Rx4DataFifoStart 24
-#define W_RXDATAFIFO2__Rx4DataFifoStart 7
-#define O_RXDATAFIFO2__Rx4DataFifoSize 16
-#define W_RXDATAFIFO2__Rx4DataFifoSize 7
-#define O_RXDATAFIFO2__Rx5DataFifoStart 8
-#define W_RXDATAFIFO2__Rx5DataFifoStart 7
-#define O_RXDATAFIFO2__Rx5DataFifoSize 0
-#define W_RXDATAFIFO2__Rx5DataFifoSize 7
+#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24
+#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7
+#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16
+#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7
+#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8
+#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7
+#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0
+#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7
#define R_RXDATAFIFO3 0x22C
-#define O_RXDATAFIFO3__Rx6DataFifoStart 24
-#define W_RXDATAFIFO3__Rx6DataFifoStart 7
-#define O_RXDATAFIFO3__Rx6DataFifoSize 16
-#define W_RXDATAFIFO3__Rx6DataFifoSize 7
-#define O_RXDATAFIFO3__Rx7DataFifoStart 8
-#define W_RXDATAFIFO3__Rx7DataFifoStart 7
-#define O_RXDATAFIFO3__Rx7DataFifoSize 0
-#define W_RXDATAFIFO3__Rx7DataFifoSize 7
+#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24
+#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7
+#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16
+#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7
+#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8
+#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7
+#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0
+#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7
#define R_RXDATAFIFO4 0x22D
-#define O_RXDATAFIFO4__Rx8DataFifoStart 24
-#define W_RXDATAFIFO4__Rx8DataFifoStart 7
-#define O_RXDATAFIFO4__Rx8DataFifoSize 16
-#define W_RXDATAFIFO4__Rx8DataFifoSize 7
-#define O_RXDATAFIFO4__Rx9DataFifoStart 8
-#define W_RXDATAFIFO4__Rx9DataFifoStart 7
-#define O_RXDATAFIFO4__Rx9DataFifoSize 0
-#define W_RXDATAFIFO4__Rx9DataFifoSize 7
+#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24
+#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7
+#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16
+#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7
+#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8
+#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7
+#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0
+#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7
#define R_RXDATAFIFO5 0x22E
-#define O_RXDATAFIFO5__Rx10DataFifoStart 24
-#define W_RXDATAFIFO5__Rx10DataFifoStart 7
-#define O_RXDATAFIFO5__Rx10DataFifoSize 16
-#define W_RXDATAFIFO5__Rx10DataFifoSize 7
-#define O_RXDATAFIFO5__Rx11DataFifoStart 8
-#define W_RXDATAFIFO5__Rx11DataFifoStart 7
-#define O_RXDATAFIFO5__Rx11DataFifoSize 0
-#define W_RXDATAFIFO5__Rx11DataFifoSize 7
+#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24
+#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7
+#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16
+#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7
+#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8
+#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7
+#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0
+#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7
#define R_RXDATAFIFO6 0x22F
-#define O_RXDATAFIFO6__Rx12DataFifoStart 24
-#define W_RXDATAFIFO6__Rx12DataFifoStart 7
-#define O_RXDATAFIFO6__Rx12DataFifoSize 16
-#define W_RXDATAFIFO6__Rx12DataFifoSize 7
-#define O_RXDATAFIFO6__Rx13DataFifoStart 8
-#define W_RXDATAFIFO6__Rx13DataFifoStart 7
-#define O_RXDATAFIFO6__Rx13DataFifoSize 0
-#define W_RXDATAFIFO6__Rx13DataFifoSize 7
+#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24
+#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7
+#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16
+#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7
+#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8
+#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7
+#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0
+#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7
#define R_RXDATAFIFO7 0x230
-#define O_RXDATAFIFO7__Rx14DataFifoStart 24
-#define W_RXDATAFIFO7__Rx14DataFifoStart 7
-#define O_RXDATAFIFO7__Rx14DataFifoSize 16
-#define W_RXDATAFIFO7__Rx14DataFifoSize 7
-#define O_RXDATAFIFO7__Rx15DataFifoStart 8
-#define W_RXDATAFIFO7__Rx15DataFifoStart 7
-#define O_RXDATAFIFO7__Rx15DataFifoSize 0
-#define W_RXDATAFIFO7__Rx15DataFifoSize 7
+#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24
+#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7
+#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16
+#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7
+#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8
+#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7
+#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0
+#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7
#define R_XGMACPADCALIBRATION 0x231
#define R_FREEQCARVE 0x233
#define R_SPI4STATICDELAY0 0x240
-#define O_SPI4STATICDELAY0__DataLine7 28
-#define W_SPI4STATICDELAY0__DataLine7 4
-#define O_SPI4STATICDELAY0__DataLine6 24
-#define W_SPI4STATICDELAY0__DataLine6 4
-#define O_SPI4STATICDELAY0__DataLine5 20
-#define W_SPI4STATICDELAY0__DataLine5 4
-#define O_SPI4STATICDELAY0__DataLine4 16
-#define W_SPI4STATICDELAY0__DataLine4 4
-#define O_SPI4STATICDELAY0__DataLine3 12
-#define W_SPI4STATICDELAY0__DataLine3 4
-#define O_SPI4STATICDELAY0__DataLine2 8
-#define W_SPI4STATICDELAY0__DataLine2 4
-#define O_SPI4STATICDELAY0__DataLine1 4
-#define W_SPI4STATICDELAY0__DataLine1 4
-#define O_SPI4STATICDELAY0__DataLine0 0
-#define W_SPI4STATICDELAY0__DataLine0 4
+#define O_SPI4STATICDELAY0__DATALINE7 28
+#define W_SPI4STATICDELAY0__DATALINE7 4
+#define O_SPI4STATICDELAY0__DATALINE6 24
+#define W_SPI4STATICDELAY0__DATALINE6 4
+#define O_SPI4STATICDELAY0__DATALINE5 20
+#define W_SPI4STATICDELAY0__DATALINE5 4
+#define O_SPI4STATICDELAY0__DATALINE4 16
+#define W_SPI4STATICDELAY0__DATALINE4 4
+#define O_SPI4STATICDELAY0__DATALINE3 12
+#define W_SPI4STATICDELAY0__DATALINE3 4
+#define O_SPI4STATICDELAY0__DATALINE2 8
+#define W_SPI4STATICDELAY0__DATALINE2 4
+#define O_SPI4STATICDELAY0__DATALINE1 4
+#define W_SPI4STATICDELAY0__DATALINE1 4
+#define O_SPI4STATICDELAY0__DATALINE0 0
+#define W_SPI4STATICDELAY0__DATALINE0 4
#define R_SPI4STATICDELAY1 0x241
-#define O_SPI4STATICDELAY1__DataLine15 28
-#define W_SPI4STATICDELAY1__DataLine15 4
-#define O_SPI4STATICDELAY1__DataLine14 24
-#define W_SPI4STATICDELAY1__DataLine14 4
-#define O_SPI4STATICDELAY1__DataLine13 20
-#define W_SPI4STATICDELAY1__DataLine13 4
-#define O_SPI4STATICDELAY1__DataLine12 16
-#define W_SPI4STATICDELAY1__DataLine12 4
-#define O_SPI4STATICDELAY1__DataLine11 12
-#define W_SPI4STATICDELAY1__DataLine11 4
-#define O_SPI4STATICDELAY1__DataLine10 8
-#define W_SPI4STATICDELAY1__DataLine10 4
-#define O_SPI4STATICDELAY1__DataLine9 4
-#define W_SPI4STATICDELAY1__DataLine9 4
-#define O_SPI4STATICDELAY1__DataLine8 0
-#define W_SPI4STATICDELAY1__DataLine8 4
+#define O_SPI4STATICDELAY1__DATALINE15 28
+#define W_SPI4STATICDELAY1__DATALINE15 4
+#define O_SPI4STATICDELAY1__DATALINE14 24
+#define W_SPI4STATICDELAY1__DATALINE14 4
+#define O_SPI4STATICDELAY1__DATALINE13 20
+#define W_SPI4STATICDELAY1__DATALINE13 4
+#define O_SPI4STATICDELAY1__DATALINE12 16
+#define W_SPI4STATICDELAY1__DATALINE12 4
+#define O_SPI4STATICDELAY1__DATALINE11 12
+#define W_SPI4STATICDELAY1__DATALINE11 4
+#define O_SPI4STATICDELAY1__DATALINE10 8
+#define W_SPI4STATICDELAY1__DATALINE10 4
+#define O_SPI4STATICDELAY1__DATALINE9 4
+#define W_SPI4STATICDELAY1__DATALINE9 4
+#define O_SPI4STATICDELAY1__DATALINE8 0
+#define W_SPI4STATICDELAY1__DATALINE8 4
#define R_SPI4STATICDELAY2 0x242
-#define O_SPI4STATICDELAY0__TxStat1 8
-#define W_SPI4STATICDELAY0__TxStat1 4
-#define O_SPI4STATICDELAY0__TxStat0 4
-#define W_SPI4STATICDELAY0__TxStat0 4
-#define O_SPI4STATICDELAY0__RxControl 0
-#define W_SPI4STATICDELAY0__RxControl 4
+#define O_SPI4STATICDELAY0__TXSTAT1 8
+#define W_SPI4STATICDELAY0__TXSTAT1 4
+#define O_SPI4STATICDELAY0__TXSTAT0 4
+#define W_SPI4STATICDELAY0__TXSTAT0 4
+#define O_SPI4STATICDELAY0__RXCONTROL 0
+#define W_SPI4STATICDELAY0__RXCONTROL 4
#define R_SPI4CONTROL 0x243
-#define O_SPI4CONTROL__StaticDelay 2
+#define O_SPI4CONTROL__STATICDELAY 2
#define O_SPI4CONTROL__LVDS_LVTTL 1
-#define O_SPI4CONTROL__SPI4Enable 0
+#define O_SPI4CONTROL__SPI4ENABLE 0
#define R_CLASSWATERMARKS 0x244
-#define O_CLASSWATERMARKS__Class0Watermark 24
-#define W_CLASSWATERMARKS__Class0Watermark 5
-#define O_CLASSWATERMARKS__Class1Watermark 16
-#define W_CLASSWATERMARKS__Class1Watermark 5
-#define O_CLASSWATERMARKS__Class3Watermark 0
-#define W_CLASSWATERMARKS__Class3Watermark 5
+#define O_CLASSWATERMARKS__CLASS0WATERMARK 24
+#define W_CLASSWATERMARKS__CLASS0WATERMARK 5
+#define O_CLASSWATERMARKS__CLASS1WATERMARK 16
+#define W_CLASSWATERMARKS__CLASS1WATERMARK 5
+#define O_CLASSWATERMARKS__CLASS3WATERMARK 0
+#define W_CLASSWATERMARKS__CLASS3WATERMARK 5
#define R_RXWATERMARKS1 0x245
-#define O_RXWATERMARKS__Rx0DataWatermark 24
-#define W_RXWATERMARKS__Rx0DataWatermark 7
-#define O_RXWATERMARKS__Rx1DataWatermark 16
-#define W_RXWATERMARKS__Rx1DataWatermark 7
-#define O_RXWATERMARKS__Rx3DataWatermark 0
-#define W_RXWATERMARKS__Rx3DataWatermark 7
+#define O_RXWATERMARKS__RX0DATAWATERMARK 24
+#define W_RXWATERMARKS__RX0DATAWATERMARK 7
+#define O_RXWATERMARKS__RX1DATAWATERMARK 16
+#define W_RXWATERMARKS__RX1DATAWATERMARK 7
+#define O_RXWATERMARKS__RX3DATAWATERMARK 0
+#define W_RXWATERMARKS__RX3DATAWATERMARK 7
#define R_RXWATERMARKS2 0x246
-#define O_RXWATERMARKS__Rx4DataWatermark 24
-#define W_RXWATERMARKS__Rx4DataWatermark 7
-#define O_RXWATERMARKS__Rx5DataWatermark 16
-#define W_RXWATERMARKS__Rx5DataWatermark 7
-#define O_RXWATERMARKS__Rx6DataWatermark 8
-#define W_RXWATERMARKS__Rx6DataWatermark 7
-#define O_RXWATERMARKS__Rx7DataWatermark 0
-#define W_RXWATERMARKS__Rx7DataWatermark 7
+#define O_RXWATERMARKS__RX4DATAWATERMARK 24
+#define W_RXWATERMARKS__RX4DATAWATERMARK 7
+#define O_RXWATERMARKS__RX5DATAWATERMARK 16
+#define W_RXWATERMARKS__RX5DATAWATERMARK 7
+#define O_RXWATERMARKS__RX6DATAWATERMARK 8
+#define W_RXWATERMARKS__RX6DATAWATERMARK 7
+#define O_RXWATERMARKS__RX7DATAWATERMARK 0
+#define W_RXWATERMARKS__RX7DATAWATERMARK 7
#define R_RXWATERMARKS3 0x247
-#define O_RXWATERMARKS__Rx8DataWatermark 24
-#define W_RXWATERMARKS__Rx8DataWatermark 7
-#define O_RXWATERMARKS__Rx9DataWatermark 16
-#define W_RXWATERMARKS__Rx9DataWatermark 7
-#define O_RXWATERMARKS__Rx10DataWatermark 8
-#define W_RXWATERMARKS__Rx10DataWatermark 7
-#define O_RXWATERMARKS__Rx11DataWatermark 0
-#define W_RXWATERMARKS__Rx11DataWatermark 7
+#define O_RXWATERMARKS__RX8DATAWATERMARK 24
+#define W_RXWATERMARKS__RX8DATAWATERMARK 7
+#define O_RXWATERMARKS__RX9DATAWATERMARK 16
+#define W_RXWATERMARKS__RX9DATAWATERMARK 7
+#define O_RXWATERMARKS__RX10DATAWATERMARK 8
+#define W_RXWATERMARKS__RX10DATAWATERMARK 7
+#define O_RXWATERMARKS__RX11DATAWATERMARK 0
+#define W_RXWATERMARKS__RX11DATAWATERMARK 7
#define R_RXWATERMARKS4 0x248
-#define O_RXWATERMARKS__Rx12DataWatermark 24
-#define W_RXWATERMARKS__Rx12DataWatermark 7
-#define O_RXWATERMARKS__Rx13DataWatermark 16
-#define W_RXWATERMARKS__Rx13DataWatermark 7
-#define O_RXWATERMARKS__Rx14DataWatermark 8
-#define W_RXWATERMARKS__Rx14DataWatermark 7
-#define O_RXWATERMARKS__Rx15DataWatermark 0
-#define W_RXWATERMARKS__Rx15DataWatermark 7
+#define O_RXWATERMARKS__RX12DATAWATERMARK 24
+#define W_RXWATERMARKS__RX12DATAWATERMARK 7
+#define O_RXWATERMARKS__RX13DATAWATERMARK 16
+#define W_RXWATERMARKS__RX13DATAWATERMARK 7
+#define O_RXWATERMARKS__RX14DATAWATERMARK 8
+#define W_RXWATERMARKS__RX14DATAWATERMARK 7
+#define O_RXWATERMARKS__RX15DATAWATERMARK 0
+#define W_RXWATERMARKS__RX15DATAWATERMARK 7
#define R_FREEWATERMARKS 0x249
-#define O_FREEWATERMARKS__FreeOutWatermark 16
-#define W_FREEWATERMARKS__FreeOutWatermark 16
-#define O_FREEWATERMARKS__JumFrWatermark 8
-#define W_FREEWATERMARKS__JumFrWatermark 7
-#define O_FREEWATERMARKS__RegFrWatermark 0
-#define W_FREEWATERMARKS__RegFrWatermark 7
+#define O_FREEWATERMARKS__FREEOUTWATERMARK 16
+#define W_FREEWATERMARKS__FREEOUTWATERMARK 16
+#define O_FREEWATERMARKS__JUMFRWATERMARK 8
+#define W_FREEWATERMARKS__JUMFRWATERMARK 7
+#define O_FREEWATERMARKS__REGFRWATERMARK 0
+#define W_FREEWATERMARKS__REGFRWATERMARK 7
#define R_EGRESSFIFOCARVINGSLOTS 0x24a
#define CTRL_RES0 0
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index e5ae42a0b44a..e4d85d9b4681 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -3,6 +3,4 @@ ToDo list (incomplete, unordered)
- move half of the nvec init stuff to i2c-tegra.c
- move event handling to nvec_events
- finish suspend/resume support
- - modifiy the sync_write method to return the received
- message in a variable (and return the error code).
- add support for more device implementations
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 4ae44a5168f9..9fda136b8e05 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -14,8 +14,6 @@
*
*/
-/* #define DEBUG */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/atomic.h>
@@ -40,18 +38,18 @@
#include "nvec.h"
#define I2C_CNFG 0x00
-#define I2C_CNFG_PACKET_MODE_EN (1 << 10)
-#define I2C_CNFG_NEW_MASTER_SFM (1 << 11)
+#define I2C_CNFG_PACKET_MODE_EN BIT(10)
+#define I2C_CNFG_NEW_MASTER_SFM BIT(11)
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_SL_CNFG 0x20
-#define I2C_SL_NEWSL (1 << 2)
-#define I2C_SL_NACK (1 << 1)
-#define I2C_SL_RESP (1 << 0)
-#define I2C_SL_IRQ (1 << 3)
-#define END_TRANS (1 << 4)
-#define RCVD (1 << 2)
-#define RNW (1 << 1)
+#define I2C_SL_NEWSL BIT(2)
+#define I2C_SL_NACK BIT(1)
+#define I2C_SL_RESP BIT(0)
+#define I2C_SL_IRQ BIT(3)
+#define END_TRANS BIT(4)
+#define RCVD BIT(2)
+#define RNW BIT(1)
#define I2C_SL_RCVD 0x24
#define I2C_SL_STATUS 0x28
@@ -143,14 +141,14 @@ static int nvec_status_notifier(struct notifier_block *nb,
{
struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
nvec_status_notifier);
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
if (event_type != NVEC_CNTL)
return NOTIFY_DONE;
dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
- msg, msg[1] + 2, true);
+ msg, msg[1] + 2, true);
return NOTIFY_OK;
}
@@ -259,7 +257,7 @@ static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
* occurred, the nvec driver may print an error.
*/
int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
- short size)
+ short size)
{
struct nvec_msg *msg;
unsigned long flags;
@@ -288,46 +286,49 @@ EXPORT_SYMBOL(nvec_write_async);
* @nvec: An &struct nvec_chip
* @data: The data to write
* @size: The size of @data
+ * @msg: The response message received
*
* This is similar to nvec_write_async(), but waits for the
* request to be answered before returning. This function
* uses a mutex and can thus not be called from e.g.
* interrupt handlers.
*
- * Returns: A pointer to the response message on success,
- * %NULL on failure. Free with nvec_msg_free() once no longer
- * used.
+ * Returns: 0 on success, a negative error code on failure.
+ * The response message is returned in @msg. Shall be freed with
+ * with nvec_msg_free() once no longer used.
+ *
*/
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
- const unsigned char *data, short size)
+int nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size,
+ struct nvec_msg **msg)
{
- struct nvec_msg *msg;
-
mutex_lock(&nvec->sync_write_mutex);
+ *msg = NULL;
nvec->sync_write_pending = (data[1] << 8) + data[0];
if (nvec_write_async(nvec, data, size) < 0) {
mutex_unlock(&nvec->sync_write_mutex);
- return NULL;
+ return -ENOMEM;
}
dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
- nvec->sync_write_pending);
+ nvec->sync_write_pending);
if (!(wait_for_completion_timeout(&nvec->sync_write,
- msecs_to_jiffies(2000)))) {
- dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
+ msecs_to_jiffies(2000)))) {
+ dev_warn(nvec->dev,
+ "timeout waiting for sync write to complete\n");
mutex_unlock(&nvec->sync_write_mutex);
- return NULL;
+ return -ETIMEDOUT;
}
dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
- msg = nvec->last_sync_msg;
+ *msg = nvec->last_sync_msg;
mutex_unlock(&nvec->sync_write_mutex);
- return msg;
+ return 0;
}
EXPORT_SYMBOL(nvec_write_sync);
@@ -422,8 +423,8 @@ static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
print_hex_dump(KERN_WARNING, "ec system event ",
- DUMP_PREFIX_NONE, 16, 1, msg->data,
- msg->data[1] + 2, true);
+ DUMP_PREFIX_NONE, 16, 1, msg->data,
+ msg->data[1] + 2, true);
atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
msg->data);
@@ -493,8 +494,8 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
{
if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
- (uint) nvec_msg_size(nvec->rx),
- (uint) nvec->rx->pos);
+ (uint)nvec_msg_size(nvec->rx),
+ (uint)nvec->rx->pos);
nvec_msg_free(nvec, nvec->rx);
nvec->state = 0;
@@ -508,8 +509,10 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
spin_lock(&nvec->rx_lock);
- /* add the received data to the work list
- and move the ring buffer pointer to the next entry */
+ /*
+ * Add the received data to the work list and move the ring buffer
+ * pointer to the next entry.
+ */
list_add_tail(&nvec->rx->node, &nvec->rx_data);
spin_unlock(&nvec->rx_lock);
@@ -638,11 +641,9 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
nvec_msg_free(nvec, nvec->rx);
nvec->state = 3;
nvec_tx_set(nvec);
- BUG_ON(nvec->tx->size < 1);
to_send = nvec->tx->data[0];
nvec->tx->pos = 1;
} else if (status == (I2C_SL_IRQ)) {
- BUG_ON(nvec->rx == NULL);
nvec->rx->data[1] = received;
nvec->rx->pos = 2;
nvec->state = 4;
@@ -686,8 +687,8 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
if ((status & (RCVD | RNW)) == RCVD) {
if (received != nvec->i2c_addr)
dev_err(nvec->dev,
- "received address 0x%02x, expected 0x%02x\n",
- received, nvec->i2c_addr);
+ "received address 0x%02x, expected 0x%02x\n",
+ received, nvec->i2c_addr);
nvec->state = 1;
}
@@ -776,7 +777,7 @@ static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
}
if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
- &nvec->i2c_addr)) {
+ &nvec->i2c_addr)) {
dev_err(nvec->dev, "no i2c address specified");
return -ENODEV;
}
@@ -852,14 +853,14 @@ static int tegra_nvec_probe(struct platform_device *pdev)
INIT_WORK(&nvec->tx_work, nvec_request_master);
err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
- "nvec gpio");
+ "nvec gpio");
if (err < 0) {
dev_err(nvec->dev, "couldn't request gpio\n");
return -ENODEV;
}
err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
- "nvec", nvec);
+ "nvec", nvec);
if (err) {
dev_err(nvec->dev, "couldn't request irq\n");
return -ENODEV;
@@ -878,11 +879,13 @@ static int tegra_nvec_probe(struct platform_device *pdev)
pm_power_off = nvec_power_off;
/* Get Firmware Version */
- msg = nvec_write_sync(nvec, get_firmware_version, 2);
+ err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
- if (msg) {
- dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
- msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
+ if (!err) {
+ dev_warn(nvec->dev,
+ "ec firmware version %02x.%02x.%02x / %02x\n",
+ msg->data[4], msg->data[5],
+ msg->data[6], msg->data[7]);
nvec_msg_free(nvec, msg);
}
@@ -924,6 +927,7 @@ static int tegra_nvec_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int nvec_suspend(struct device *dev)
{
+ int err;
struct platform_device *pdev = to_platform_device(dev);
struct nvec_chip *nvec = platform_get_drvdata(pdev);
struct nvec_msg *msg;
@@ -934,8 +938,9 @@ static int nvec_suspend(struct device *dev)
/* keep these sync or you'll break suspend */
nvec_toggle_global_events(nvec, false);
- msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
- nvec_msg_free(nvec, msg);
+ err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
+ if (!err)
+ nvec_msg_free(nvec, msg);
nvec_disable_i2c_slave(nvec);
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index 2ec9de906ca3..c03ca8d9572a 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -168,8 +168,9 @@ struct nvec_chip {
int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
short size);
-struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
- const unsigned char *data, short size);
+int nvec_write_sync(struct nvec_chip *nvec,
+ const unsigned char *data, short size,
+ struct nvec_msg **msg);
int nvec_register_notifier(struct nvec_chip *nvec,
struct notifier_block *nb,
diff --git a/drivers/staging/nvec/nvec_paz00.c b/drivers/staging/nvec/nvec_paz00.c
index 68146bfee2b3..51dbeeb3320e 100644
--- a/drivers/staging/nvec/nvec_paz00.c
+++ b/drivers/staging/nvec/nvec_paz00.c
@@ -41,7 +41,6 @@ static void nvec_led_brightness_set(struct led_classdev *led_cdev,
nvec_write_async(led->nvec, buf, sizeof(buf));
led->cdev.brightness = value;
-
}
static int nvec_paz00_probe(struct platform_device *pdev)
@@ -63,7 +62,7 @@ static int nvec_paz00_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, led);
- ret = led_classdev_register(&pdev->dev, &led->cdev);
+ ret = devm_led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
return ret;
@@ -73,18 +72,8 @@ static int nvec_paz00_probe(struct platform_device *pdev)
return 0;
}
-static int nvec_paz00_remove(struct platform_device *pdev)
-{
- struct nvec_led *led = platform_get_drvdata(pdev);
-
- led_classdev_unregister(&led->cdev);
-
- return 0;
-}
-
static struct platform_driver nvec_paz00_driver = {
.probe = nvec_paz00_probe,
- .remove = nvec_paz00_remove,
.driver = {
.name = "nvec-paz00",
},
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 04a7402ae2df..b4a0545e8806 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -207,8 +207,10 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
case TYPE:
memcpy(power->bat_type, &res->plc, res->length - 2);
power->bat_type[res->length - 2] = '\0';
- /* this differs a little from the spec
- fill in more if you find some */
+ /*
+ * This differs a little from the spec fill in more if you find
+ * some.
+ */
if (!strncmp(power->bat_type, "Li", 30))
power->bat_type_enum = POWER_SUPPLY_TECHNOLOGY_LION;
else
@@ -356,12 +358,14 @@ static void nvec_power_poll(struct work_struct *work)
if (counter >= ARRAY_SIZE(bat_iter))
counter = 0;
-/* AC status via sys req */
+ /* AC status via sys req */
nvec_write_async(power->nvec, buf, 2);
msleep(100);
-/* select a battery request function via round robin
- doing it all at once seems to overload the power supply */
+ /*
+ * Select a battery request function via round robin doing it all at
+ * once seems to overload the power supply.
+ */
buf[0] = NVEC_BAT;
buf[1] = bat_iter[counter++];
nvec_write_async(power->nvec, buf, 2);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index 0922dd3a08d3..a324322ee0ad 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -78,7 +78,7 @@ static int nvec_ps2_notifier(struct notifier_block *nb,
unsigned long event_type, void *data)
{
int i;
- unsigned char *msg = (unsigned char *)data;
+ unsigned char *msg = data;
switch (event_type) {
case NVEC_PS2_EVT:
diff --git a/drivers/staging/octeon-usb/TODO b/drivers/staging/octeon-usb/TODO
index cc58a7e88baf..2b29acca5caa 100644
--- a/drivers/staging/octeon-usb/TODO
+++ b/drivers/staging/octeon-usb/TODO
@@ -1,11 +1,8 @@
-This driver is functional and has been tested on EdgeRouter Lite with
-USB mass storage.
+This driver is functional and has been tested on EdgeRouter Lite,
+D-Link DSR-1000N and EBH5600 evaluation board with USB mass storage.
TODO:
- kernel coding style
- checkpatch warnings
- - dead code elimination
- - device tree bindings
- - possibly eliminate the extra "hardware abstraction layer"
Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 6f2871784ba5..17442b3ed849 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -43,29 +43,15 @@
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
*/
-#include <linux/kernel.h>
+
+#include <linux/usb.h>
+#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
+#include <linux/usb/hcd.h>
#include <linux/prefetch.h>
-#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/usb.h>
-
-#include <linux/time.h>
-#include <linux/delay.h>
-
-#include <asm/octeon/cvmx.h>
-#include <asm/octeon/cvmx-iob-defs.h>
-
-#include <linux/usb/hcd.h>
-
-#include <linux/err.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-sysinfo.h>
-#include <asm/octeon/cvmx-helper-board.h>
#include "octeon-hcd.h"
@@ -113,35 +99,35 @@ enum cvmx_usb_direction {
};
/**
- * enum cvmx_usb_complete - possible callback function status codes
+ * enum cvmx_usb_status - possible callback function status codes
*
- * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
+ * @CVMX_USB_STATUS_OK: The transaction / operation finished without
* any errors
- * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
- * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight
+ * @CVMX_USB_STATUS_SHORT: FIXME: This is currently not implemented
+ * @CVMX_USB_STATUS_CANCEL: The transaction was canceled while in flight
* by a user call to cvmx_usb_cancel
- * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
+ * @CVMX_USB_STATUS_ERROR: The transaction aborted with an unexpected
* error status
- * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
+ * @CVMX_USB_STATUS_STALL: The transaction received a USB STALL response
* from the device
- * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
+ * @CVMX_USB_STATUS_XACTERR: The transaction failed with an error from the
* device even after a number of retries
- * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
+ * @CVMX_USB_STATUS_DATATGLERR: The transaction failed with a data toggle
* error even after a number of retries
- * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
- * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
+ * @CVMX_USB_STATUS_BABBLEERR: The transaction failed with a babble error
+ * @CVMX_USB_STATUS_FRAMEERR: The transaction failed with a frame error
* even after a number of retries
*/
-enum cvmx_usb_complete {
- CVMX_USB_COMPLETE_SUCCESS,
- CVMX_USB_COMPLETE_SHORT,
- CVMX_USB_COMPLETE_CANCEL,
- CVMX_USB_COMPLETE_ERROR,
- CVMX_USB_COMPLETE_STALL,
- CVMX_USB_COMPLETE_XACTERR,
- CVMX_USB_COMPLETE_DATATGLERR,
- CVMX_USB_COMPLETE_BABBLEERR,
- CVMX_USB_COMPLETE_FRAMEERR,
+enum cvmx_usb_status {
+ CVMX_USB_STATUS_OK,
+ CVMX_USB_STATUS_SHORT,
+ CVMX_USB_STATUS_CANCEL,
+ CVMX_USB_STATUS_ERROR,
+ CVMX_USB_STATUS_STALL,
+ CVMX_USB_STATUS_XACTERR,
+ CVMX_USB_STATUS_DATATGLERR,
+ CVMX_USB_STATUS_BABBLEERR,
+ CVMX_USB_STATUS_FRAMEERR,
};
/**
@@ -160,13 +146,13 @@ enum cvmx_usb_complete {
* status call.
*/
struct cvmx_usb_port_status {
- uint32_t reserved : 25;
- uint32_t port_enabled : 1;
- uint32_t port_over_current : 1;
- uint32_t port_powered : 1;
+ u32 reserved : 25;
+ u32 port_enabled : 1;
+ u32 port_over_current : 1;
+ u32 port_powered : 1;
enum cvmx_usb_speed port_speed : 2;
- uint32_t connected : 1;
- uint32_t connect_change : 1;
+ u32 connected : 1;
+ u32 connect_change : 1;
};
/**
@@ -180,7 +166,7 @@ struct cvmx_usb_port_status {
struct cvmx_usb_iso_packet {
int offset;
int length;
- enum cvmx_usb_complete status;
+ enum cvmx_usb_status status;
};
/**
@@ -234,13 +220,13 @@ enum cvmx_usb_pipe_flags {
* The low level hardware can transfer a maximum of this number of bytes in each
* transfer. The field is 19 bits wide
*/
-#define MAX_TRANSFER_BYTES ((1<<19)-1)
+#define MAX_TRANSFER_BYTES ((1 << 19) - 1)
/*
* The low level hardware can transfer a maximum of this number of packets in
* each transfer. The field is 10 bits wide
*/
-#define MAX_TRANSFER_PACKETS ((1<<10)-1)
+#define MAX_TRANSFER_PACKETS ((1 << 10) - 1)
/**
* Logical transactions may take numerous low level
@@ -284,9 +270,9 @@ enum cvmx_usb_stage {
struct cvmx_usb_transaction {
struct list_head node;
enum cvmx_usb_transfer type;
- uint64_t buffer;
+ u64 buffer;
int buffer_length;
- uint64_t control_header;
+ u64 control_header;
int iso_start_frame;
int iso_number_packets;
struct cvmx_usb_iso_packet *iso_packets;
@@ -328,36 +314,37 @@ struct cvmx_usb_transaction {
struct cvmx_usb_pipe {
struct list_head node;
struct list_head transactions;
- uint64_t interval;
- uint64_t next_tx_frame;
+ u64 interval;
+ u64 next_tx_frame;
enum cvmx_usb_pipe_flags flags;
enum cvmx_usb_speed device_speed;
enum cvmx_usb_transfer transfer_type;
enum cvmx_usb_direction transfer_dir;
int multi_count;
- uint16_t max_packet;
- uint8_t device_addr;
- uint8_t endpoint_num;
- uint8_t hub_device_addr;
- uint8_t hub_port;
- uint8_t pid_toggle;
- uint8_t channel;
- int8_t split_sc_frame;
+ u16 max_packet;
+ u8 device_addr;
+ u8 endpoint_num;
+ u8 hub_device_addr;
+ u8 hub_port;
+ u8 pid_toggle;
+ u8 channel;
+ s8 split_sc_frame;
};
struct cvmx_usb_tx_fifo {
struct {
int channel;
int size;
- uint64_t address;
- } entry[MAX_CHANNELS+1];
+ u64 address;
+ } entry[MAX_CHANNELS + 1];
int head;
int tail;
};
/**
- * struct cvmx_usb_state - the state of the USB block
+ * struct octeon_hcd - the state of the USB block
*
+ * lock: Serialization lock.
* init_flags: Flags passed to initialize.
* index: Which USB block this is for.
* idle_hardware_channels: Bit set for every idle hardware channel.
@@ -372,7 +359,8 @@ struct cvmx_usb_tx_fifo {
* frame_number: Increments every SOF interrupt for time keeping.
* active_split: Points to the current active split, or NULL.
*/
-struct cvmx_usb_state {
+struct octeon_hcd {
+ spinlock_t lock; /* serialization lock */
int init_flags;
int index;
int idle_hardware_channels;
@@ -382,23 +370,18 @@ struct cvmx_usb_state {
struct cvmx_usb_port_status port_status;
struct list_head idle_pipes;
struct list_head active_pipes[4];
- uint64_t frame_number;
+ u64 frame_number;
struct cvmx_usb_transaction *active_split;
struct cvmx_usb_tx_fifo periodic;
struct cvmx_usb_tx_fifo nonperiodic;
};
-struct octeon_hcd {
- spinlock_t lock;
- struct cvmx_usb_state usb;
-};
-
/* This macro spins on a register waiting for it to reach a condition. */
#define CVMX_WAIT_FOR_FIELD32(address, _union, cond, timeout_usec) \
({int result; \
do { \
- uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
- octeon_get_clock_rate() / 1000000; \
+ u64 done = cvmx_get_cycle() + (u64)timeout_usec * \
+ octeon_get_clock_rate() / 1000000; \
union _union c; \
\
while (1) { \
@@ -431,7 +414,7 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) \
- (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
+ (CVMX_USBCX_GOTGCTL(usb_index) + ((channel) + 1) * 0x1000)
/**
* struct octeon_temp_buffer - a bounce buffer for USB transfers
@@ -447,11 +430,6 @@ struct octeon_temp_buffer {
u8 data[0];
};
-static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
-{
- return container_of(p, struct octeon_hcd, usb);
-}
-
static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
{
return container_of((void *)p, struct usb_hcd, hcd_priv);
@@ -562,14 +540,12 @@ static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
*
* Returns: Result of the read
*/
-static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb,
- uint64_t address)
+static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address)
{
- uint32_t result = cvmx_read64_uint32(address ^ 4);
+ u32 result = cvmx_read64_uint32(address ^ 4);
return result;
}
-
/**
* Write a USB 32bit CSR. It performs the necessary address
* swizzle for 32bit CSRs and logs the value in a readable format
@@ -579,8 +555,8 @@ static inline uint32_t cvmx_usb_read_csr32(struct cvmx_usb_state *usb,
* @address: 64bit address to write
* @value: Value to write
*/
-static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb,
- uint64_t address, uint32_t value)
+static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb,
+ u64 address, u32 value)
{
cvmx_write64_uint32(address ^ 4, value);
cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
@@ -595,14 +571,13 @@ static inline void cvmx_usb_write_csr32(struct cvmx_usb_state *usb,
*
* Returns: Non zero if we need to do split transactions
*/
-static inline int cvmx_usb_pipe_needs_split(struct cvmx_usb_state *usb,
+static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
return pipe->device_speed != CVMX_USB_SPEED_HIGH &&
usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH;
}
-
/**
* Trivial utility function to return the correct PID for a pipe
*
@@ -617,7 +592,7 @@ static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-static void cvmx_fifo_setup(struct cvmx_usb_state *usb)
+static void cvmx_fifo_setup(struct octeon_hcd *usb)
{
union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
union cvmx_usbcx_gnptxfsiz npsiz;
@@ -675,7 +650,7 @@ static void cvmx_fifo_setup(struct cvmx_usb_state *usb)
*
* Returns: 0 or a negative error code.
*/
-static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
+static int cvmx_usb_shutdown(struct octeon_hcd *usb)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
@@ -704,12 +679,12 @@ static int cvmx_usb_shutdown(struct cvmx_usb_state *usb)
* off in the disabled state.
*
* @dev: Pointer to struct device for logging purposes.
- * @usb: Pointer to struct cvmx_usb_state.
+ * @usb: Pointer to struct octeon_hcd.
*
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct device *dev,
- struct cvmx_usb_state *usb)
+ struct octeon_hcd *usb)
{
int channel;
int divisor;
@@ -975,7 +950,7 @@ retry:
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
-static void cvmx_usb_reset_port(struct cvmx_usb_state *usb)
+static void cvmx_usb_reset_port(struct octeon_hcd *usb)
{
usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HPRT(usb->index));
@@ -1002,7 +977,6 @@ static void cvmx_usb_reset_port(struct cvmx_usb_state *usb)
CVMX_USBCX_HPRT(usb->index));
}
-
/**
* Disable a USB port. After this call the USB port will not
* generate data transfers and will not generate events.
@@ -1013,7 +987,7 @@ static void cvmx_usb_reset_port(struct cvmx_usb_state *usb)
*
* Returns: 0 or a negative error code.
*/
-static int cvmx_usb_disable(struct cvmx_usb_state *usb)
+static int cvmx_usb_disable(struct octeon_hcd *usb)
{
/* Disable the port */
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt,
@@ -1021,7 +995,6 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb)
return 0;
}
-
/**
* Get the current state of the USB port. Use this call to
* determine if the usb port has anything connected, is enabled,
@@ -1033,8 +1006,7 @@ static int cvmx_usb_disable(struct cvmx_usb_state *usb)
*
* Returns: Port status information
*/
-static struct cvmx_usb_port_status cvmx_usb_get_status(
- struct cvmx_usb_state *usb)
+static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb)
{
union cvmx_usbcx_hprt usbc_hprt;
struct cvmx_usb_port_status result;
@@ -1048,7 +1020,7 @@ static struct cvmx_usb_port_status cvmx_usb_get_status(
result.port_speed = usbc_hprt.s.prtspd;
result.connected = usbc_hprt.s.prtconnsts;
result.connect_change =
- (result.connected != usb->port_status.connected);
+ result.connected != usb->port_status.connected;
return result;
}
@@ -1105,7 +1077,7 @@ static struct cvmx_usb_port_status cvmx_usb_get_status(
*
* Returns: A non-NULL value is a pipe. NULL means an error.
*/
-static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
+static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb,
int device_addr,
int endpoint_num,
enum cvmx_usb_speed
@@ -1125,8 +1097,8 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
if (!pipe)
return NULL;
if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
pipe->device_addr = device_addr;
pipe->endpoint_num = endpoint_num;
@@ -1143,9 +1115,9 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
if (!interval)
interval = 1;
if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval*8;
+ pipe->interval = interval * 8;
/* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number+7)&~7) +
+ pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) +
pipe->interval;
} else {
pipe->interval = interval;
@@ -1166,7 +1138,6 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
return pipe;
}
-
/**
* Poll the RX FIFOs and remove data as needed. This function is only used
* in non DMA mode. It is very important that this function be called quickly
@@ -1174,13 +1145,13 @@ static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct cvmx_usb_state *usb,
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
-static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
+static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb)
{
union cvmx_usbcx_grxstsph rx_status;
int channel;
int bytes;
- uint64_t address;
- uint32_t *ptr;
+ u64 address;
+ u32 *ptr;
rx_status.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_GRXSTSPH(usb->index));
@@ -1213,7 +1184,6 @@ static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
CVMX_SYNCW;
}
-
/**
* Fill the TX hardware fifo with data out of the software
* fifos
@@ -1225,7 +1195,7 @@ static void cvmx_usb_poll_rx_fifo(struct cvmx_usb_state *usb)
* Returns: Non zero if the hardware fifo was too small and needs
* to be serviced again.
*/
-static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
+static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb,
struct cvmx_usb_tx_fifo *fifo, int available)
{
/*
@@ -1234,9 +1204,9 @@ static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
*/
while (available && (fifo->head != fifo->tail)) {
int i = fifo->tail;
- const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
- usb->index) ^ 4;
+ const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ u64 csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel,
+ usb->index) ^ 4;
int words = available;
/* Limit the amount of data to what the SW fifo has */
@@ -1275,13 +1245,12 @@ static int cvmx_usb_fill_tx_hw(struct cvmx_usb_state *usb,
return fifo->head != fifo->tail;
}
-
/**
* Check the hardware FIFOs and fill them as needed
*
* @usb: USB device state populated by cvmx_usb_initialize().
*/
-static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
+static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb)
{
if (usb->periodic.head != usb->periodic.tail) {
union cvmx_usbcx_hptxsts tx_status;
@@ -1312,14 +1281,13 @@ static void cvmx_usb_poll_tx_fifo(struct cvmx_usb_state *usb)
}
}
-
/**
* Fill the TX FIFO with an outgoing packet
*
* @usb: USB device state populated by cvmx_usb_initialize().
* @channel: Channel number to get packet from
*/
-static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
+static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel)
{
union cvmx_usbcx_hccharx hcchar;
union cvmx_usbcx_hcspltx usbc_hcsplt;
@@ -1348,7 +1316,7 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
return;
if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
fifo = &usb->periodic;
else
fifo = &usb->nonperiodic;
@@ -1357,7 +1325,7 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
fifo->entry[fifo->head].address =
cvmx_read64_uint64(CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) +
channel * 8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2;
fifo->head++;
if (fifo->head > MAX_CHANNELS)
fifo->head = 0;
@@ -1373,12 +1341,11 @@ static void cvmx_usb_fill_tx_fifo(struct cvmx_usb_state *usb, int channel)
* @channel: Channel to setup
* @pipe: Pipe for control transaction
*/
-static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
+static void cvmx_usb_start_channel_control(struct octeon_hcd *usb,
int channel,
struct cvmx_usb_pipe *pipe)
{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction =
list_first_entry(&pipe->transactions, typeof(*transaction),
@@ -1488,9 +1455,9 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
*/
packets_to_transfer = DIV_ROUND_UP(bytes_to_transfer,
pipe->max_packet);
- if (packets_to_transfer == 0)
+ if (packets_to_transfer == 0) {
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) &&
+ } else if ((packets_to_transfer > 1) &&
(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must be
@@ -1515,7 +1482,6 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
usbc_hctsiz.u32);
}
-
/**
* Start a channel to perform the pipe's head transaction
*
@@ -1523,7 +1489,7 @@ static void cvmx_usb_start_channel_control(struct cvmx_usb_state *usb,
* @channel: Channel to setup
* @pipe: Pipe to start
*/
-static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
+static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel,
struct cvmx_usb_pipe *pipe)
{
struct cvmx_usb_transaction *transaction =
@@ -1539,7 +1505,7 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
pipe->flags |= CVMX_USB_PIPE_FLAGS_SCHEDULED;
/* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1<<channel);
+ usb->idle_hardware_channels &= ~(1 << channel);
/* Enable the channel interrupt bits */
{
@@ -1579,22 +1545,22 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
usbc_hcintmsk.s.xfercomplmsk = 1;
}
cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel, usb->index),
- usbc_hcintmsk.u32);
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ usbc_hcintmsk.u32);
/* Enable the channel interrupt to propagate */
usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb,
CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1<<channel;
+ usbc_haintmsk.s.haintmsk |= 1 << channel;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index),
usbc_haintmsk.u32);
}
/* Setup the location the DMA engine uses. */
{
- uint64_t reg;
- uint64_t dma_address = transaction->buffer +
- transaction->actual_bytes;
+ u64 reg;
+ u64 dma_address = transaction->buffer +
+ transaction->actual_bytes;
if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
dma_address = transaction->buffer +
@@ -1636,15 +1602,16 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
* We only store the lower two bits since the time ahead
* can only be two frames
*/
- if ((transaction->stage&1) == 0) {
+ if ((transaction->stage & 1) == 0) {
if (transaction->type == CVMX_USB_TRANSFER_BULK)
pipe->split_sc_frame =
(usb->frame_number + 1) & 0x7f;
else
pipe->split_sc_frame =
(usb->frame_number + 2) & 0x7f;
- } else
+ } else {
pipe->split_sc_frame = -1;
+ }
usbc_hcsplt.s.spltena = 1;
usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
@@ -1666,10 +1633,9 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
* begin/middle/end of the data or all
*/
if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type ==
+ CVMX_USB_TRANSFER_ISOCHRONOUS)) {
/*
* Clear the split complete frame number as
* there isn't going to be a split complete
@@ -1732,11 +1698,11 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
*/
packets_to_transfer =
DIV_ROUND_UP(bytes_to_transfer, pipe->max_packet);
- if (packets_to_transfer == 0)
+ if (packets_to_transfer == 0) {
packets_to_transfer = 1;
- else if ((packets_to_transfer > 1) &&
- (usb->init_flags &
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ } else if ((packets_to_transfer > 1) &&
+ (usb->init_flags &
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
/*
* Limit to one packet when not using DMA. Channels must
* be restarted between every packet for IN
@@ -1783,7 +1749,7 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
* Set the startframe odd/even properly. This is only used for
* periodic
*/
- usbc_hcchar.s.oddfrm = usb->frame_number&1;
+ usbc_hcchar.s.oddfrm = usb->frame_number & 1;
/*
* Set the number of back to back packets allowed by this
@@ -1843,9 +1809,11 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
break;
}
{
- union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 =
+ union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 =
cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ CVMX_USBCX_HCTSIZX(channel,
+ usb->index))
+ };
transaction->xfersize = usbc_hctsiz.s.xfersize;
transaction->pktcnt = usbc_hctsiz.s.pktcnt;
}
@@ -1858,21 +1826,19 @@ static void cvmx_usb_start_channel(struct cvmx_usb_state *usb, int channel,
cvmx_usb_fill_tx_fifo(usb, channel);
}
-
/**
* Find a pipe that is ready to be scheduled to hardware.
* @usb: USB device state populated by cvmx_usb_initialize().
- * @list: Pipe list to search
- * @current_frame:
- * Frame counter to use as a time reference.
+ * @xfer_type: Transfer type
*
* Returns: Pipe or NULL if none are ready
*/
static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
- struct cvmx_usb_state *usb,
- struct list_head *list,
- uint64_t current_frame)
+ struct octeon_hcd *usb,
+ enum cvmx_usb_transfer xfer_type)
{
+ struct list_head *list = usb->active_pipes + xfer_type;
+ u64 current_frame = usb->frame_number;
struct cvmx_usb_pipe *pipe;
list_for_each_entry(pipe, list, node) {
@@ -1880,11 +1846,11 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
list_first_entry(&pipe->transactions, typeof(*t),
node);
if (!(pipe->flags & CVMX_USB_PIPE_FLAGS_SCHEDULED) && t &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) ||
- ((((int)current_frame - (int)pipe->split_sc_frame)
- & 0x7f) < 0x40)) &&
- (!usb->active_split || (usb->active_split == t))) {
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) ||
+ ((((int)current_frame - pipe->split_sc_frame) & 0x7f) <
+ 0x40)) &&
+ (!usb->active_split || (usb->active_split == t))) {
prefetch(t);
return pipe;
}
@@ -1892,6 +1858,32 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
return NULL;
}
+static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb,
+ int is_sof)
+{
+ struct cvmx_usb_pipe *pipe;
+
+ /* Find a pipe needing service. */
+ if (is_sof) {
+ /*
+ * Only process periodic pipes on SOF interrupts. This way we
+ * are sure that the periodic data is sent in the beginning of
+ * the frame.
+ */
+ pipe = cvmx_usb_find_ready_pipe(usb,
+ CVMX_USB_TRANSFER_ISOCHRONOUS);
+ if (pipe)
+ return pipe;
+ pipe = cvmx_usb_find_ready_pipe(usb,
+ CVMX_USB_TRANSFER_INTERRUPT);
+ if (pipe)
+ return pipe;
+ }
+ pipe = cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_CONTROL);
+ if (pipe)
+ return pipe;
+ return cvmx_usb_find_ready_pipe(usb, CVMX_USB_TRANSFER_BULK);
+}
/**
* Called whenever a pipe might need to be scheduled to the
@@ -1900,7 +1892,7 @@ static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(
* @usb: USB device state populated by cvmx_usb_initialize().
* @is_sof: True if this schedule was called on a SOF interrupt.
*/
-static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
+static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof)
{
int channel;
struct cvmx_usb_pipe *pipe;
@@ -1922,7 +1914,7 @@ static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
CVMX_USBCX_HFIR(usb->index))
};
- if (hfnum.s.frrem < hfir.s.frint/4)
+ if (hfnum.s.frrem < hfir.s.frint / 4)
goto done;
}
@@ -1932,35 +1924,7 @@ static void cvmx_usb_schedule(struct cvmx_usb_state *usb, int is_sof)
if (unlikely(channel > 7))
break;
- /* Find a pipe needing service */
- pipe = NULL;
- if (is_sof) {
- /*
- * Only process periodic pipes on SOF interrupts. This
- * way we are sure that the periodic data is sent in the
- * beginning of the frame
- */
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- usb->frame_number);
- if (likely(!pipe))
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_INTERRUPT,
- usb->frame_number);
- }
- if (likely(!pipe)) {
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_CONTROL,
- usb->frame_number);
- if (likely(!pipe))
- pipe = cvmx_usb_find_ready_pipe(usb,
- usb->active_pipes +
- CVMX_USB_TRANSFER_BULK,
- usb->frame_number);
- }
+ pipe = cvmx_usb_next_pipe(usb, is_sof);
if (!pipe)
break;
@@ -1974,7 +1938,7 @@ done:
*/
need_sof = 0;
for (ttype = CVMX_USB_TRANSFER_CONTROL;
- ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
list_for_each_entry(pipe, &usb->active_pipes[ttype], node) {
if (pipe->next_tx_frame > usb->frame_number) {
need_sof = 1;
@@ -1986,19 +1950,18 @@ done:
cvmx_usbcx_gintmsk, sofmsk, need_sof);
}
-static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
- enum cvmx_usb_complete status,
+static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb,
+ enum cvmx_usb_status status,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction
*transaction,
int bytes_transferred,
struct urb *urb)
{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
- if (likely(status == CVMX_USB_COMPLETE_SUCCESS))
+ if (likely(status == CVMX_USB_STATUS_OK))
urb->actual_length = bytes_transferred;
else
urb->actual_length = 0;
@@ -2006,7 +1969,8 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
urb->hcpriv = NULL;
/* For Isochronous transactions we need to update the URB packet status
- list from data in our private copy */
+ * list from data in our private copy
+ */
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
int i;
/*
@@ -2014,12 +1978,11 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
* field.
*/
struct cvmx_usb_iso_packet *iso_packet =
- (struct cvmx_usb_iso_packet *) urb->setup_packet;
+ (struct cvmx_usb_iso_packet *)urb->setup_packet;
/* Recalculate the transfer size by adding up each packet */
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
- if (iso_packet[i].status ==
- CVMX_USB_COMPLETE_SUCCESS) {
+ if (iso_packet[i].status == CVMX_USB_STATUS_OK) {
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length =
iso_packet[i].length;
@@ -2039,41 +2002,41 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
}
switch (status) {
- case CVMX_USB_COMPLETE_SUCCESS:
+ case CVMX_USB_STATUS_OK:
urb->status = 0;
break;
- case CVMX_USB_COMPLETE_CANCEL:
+ case CVMX_USB_STATUS_CANCEL:
if (urb->status == 0)
urb->status = -ENOENT;
break;
- case CVMX_USB_COMPLETE_STALL:
+ case CVMX_USB_STATUS_STALL:
dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
- case CVMX_USB_COMPLETE_BABBLEERR:
+ case CVMX_USB_STATUS_BABBLEERR:
dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EPIPE;
break;
- case CVMX_USB_COMPLETE_SHORT:
+ case CVMX_USB_STATUS_SHORT:
dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d\n",
pipe, transaction, bytes_transferred);
urb->status = -EREMOTEIO;
break;
- case CVMX_USB_COMPLETE_ERROR:
- case CVMX_USB_COMPLETE_XACTERR:
- case CVMX_USB_COMPLETE_DATATGLERR:
- case CVMX_USB_COMPLETE_FRAMEERR:
+ case CVMX_USB_STATUS_ERROR:
+ case CVMX_USB_STATUS_XACTERR:
+ case CVMX_USB_STATUS_DATATGLERR:
+ case CVMX_USB_STATUS_FRAMEERR:
dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d\n",
status, pipe, transaction, bytes_transferred);
urb->status = -EPROTO;
break;
}
- usb_hcd_unlink_urb_from_ep(octeon_to_hcd(priv), urb);
- spin_unlock(&priv->lock);
- usb_hcd_giveback_urb(octeon_to_hcd(priv), urb, urb->status);
- spin_lock(&priv->lock);
+ usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb);
+ spin_unlock(&usb->lock);
+ usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status);
+ spin_lock(&usb->lock);
}
/**
@@ -2087,10 +2050,10 @@ static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
* @complete_code:
* Completion code
*/
-static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
- struct cvmx_usb_pipe *pipe,
- struct cvmx_usb_transaction *transaction,
- enum cvmx_usb_complete complete_code)
+static void cvmx_usb_complete(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_status complete_code)
{
/* If this was a split then clear our split in progress marker */
if (usb->active_split == transaction)
@@ -2110,7 +2073,7 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
* next one
*/
if ((transaction->iso_number_packets > 1) &&
- (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ (complete_code == CVMX_USB_STATUS_OK)) {
/* No bytes transferred for this packet as of yet */
transaction->actual_bytes = 0;
/* One less ISO waiting to transfer */
@@ -2133,7 +2096,6 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
kfree(transaction);
}
-
/**
* Submit a usb transaction to a pipe. Called for all types
* of transactions.
@@ -2157,12 +2119,12 @@ static void cvmx_usb_perform_complete(struct cvmx_usb_state *usb,
* Returns: Transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
- struct cvmx_usb_state *usb,
+ struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
enum cvmx_usb_transfer type,
- uint64_t buffer,
+ u64 buffer,
int buffer_length,
- uint64_t control_header,
+ u64 control_header,
int iso_start_frame,
int iso_number_packets,
struct cvmx_usb_iso_packet *iso_packets,
@@ -2208,7 +2170,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
return transaction;
}
-
/**
* Call to submit a USB Bulk transfer to a pipe.
*
@@ -2219,7 +2180,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_transaction(
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
- struct cvmx_usb_state *usb,
+ struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
@@ -2233,7 +2194,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
urb);
}
-
/**
* Call to submit a USB Interrupt transfer to a pipe.
*
@@ -2244,7 +2204,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_bulk(
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
- struct cvmx_usb_state *usb,
+ struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
@@ -2259,7 +2219,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
urb);
}
-
/**
* Call to submit a USB Control transfer to a pipe.
*
@@ -2270,12 +2229,12 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt(
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_control(
- struct cvmx_usb_state *usb,
+ struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
int buffer_length = urb->transfer_buffer_length;
- uint64_t control_header = urb->setup_dma;
+ u64 control_header = urb->setup_dma;
struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header);
if ((header->bRequestType & USB_DIR_IN) == 0)
@@ -2291,7 +2250,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control(
urb);
}
-
/**
* Call to submit a USB Isochronous transfer to a pipe.
*
@@ -2302,13 +2260,13 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_control(
* Returns: A submitted transaction or NULL on failure.
*/
static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
- struct cvmx_usb_state *usb,
+ struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct urb *urb)
{
struct cvmx_usb_iso_packet *packets;
- packets = (struct cvmx_usb_iso_packet *) urb->setup_packet;
+ packets = (struct cvmx_usb_iso_packet *)urb->setup_packet;
return cvmx_usb_submit_transaction(usb, pipe,
CVMX_USB_TRANSFER_ISOCHRONOUS,
urb->transfer_dma,
@@ -2319,7 +2277,6 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
packets, urb);
}
-
/**
* Cancel one outstanding request in a pipe. Canceling a request
* can fail if the transaction has already completed before cancel
@@ -2333,7 +2290,7 @@ static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous(
*
* Returns: 0 or a negative error code.
*/
-static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
+static int cvmx_usb_cancel(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe,
struct cvmx_usb_transaction *transaction)
{
@@ -2359,17 +2316,15 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
if (usbc_hcchar.s.chena) {
usbc_hcchar.s.chdis = 1;
cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(pipe->channel,
- usb->index),
- usbc_hcchar.u32);
+ CVMX_USBCX_HCCHARX(pipe->channel,
+ usb->index),
+ usbc_hcchar.u32);
}
}
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_CANCEL);
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_CANCEL);
return 0;
}
-
/**
* Cancel all outstanding requests in a pipe. Logically all this
* does is call cvmx_usb_cancel() in a loop.
@@ -2379,7 +2334,7 @@ static int cvmx_usb_cancel(struct cvmx_usb_state *usb,
*
* Returns: 0 or a negative error code.
*/
-static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
+static int cvmx_usb_cancel_all(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
struct cvmx_usb_transaction *transaction, *next;
@@ -2394,7 +2349,6 @@ static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
return 0;
}
-
/**
* Close a pipe created with cvmx_usb_open_pipe().
*
@@ -2404,7 +2358,7 @@ static int cvmx_usb_cancel_all(struct cvmx_usb_state *usb,
* Returns: 0 or a negative error code. EBUSY is returned if the pipe has
* outstanding transfers.
*/
-static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb,
+static int cvmx_usb_close_pipe(struct octeon_hcd *usb,
struct cvmx_usb_pipe *pipe)
{
/* Fail if the pipe has pending transactions */
@@ -2425,7 +2379,7 @@ static int cvmx_usb_close_pipe(struct cvmx_usb_state *usb,
*
* Returns: USB frame number
*/
-static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
+static int cvmx_usb_get_frame_number(struct octeon_hcd *usb)
{
int frame_number;
union cvmx_usbcx_hfnum usbc_hfnum;
@@ -2436,6 +2390,197 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
return frame_number;
}
+static void cvmx_usb_transfer_control(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ union cvmx_usbcx_hccharx usbc_hcchar,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage =
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ } else {
+ struct usb_ctrlrequest *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->wLength)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ struct usb_ctrlrequest *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->wLength)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /*
+ * For setup OUT data that are splits,
+ * the hardware doesn't appear to count
+ * transferred data. Here we manually
+ * update the data transferred
+ */
+ if (!usbc_hcchar.s.epdir) {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes +=
+ buffer_space_left;
+ else
+ transaction->actual_bytes +=
+ pipe->max_packet;
+ }
+ } else if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ } else {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage =
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ break;
+ }
+}
+
+static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ union cvmx_usbcx_hcintx usbc_hcint,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ /*
+ * The only time a bulk transfer isn't complete when it finishes with
+ * an ACK is during a split transaction. For splits we need to continue
+ * the transfer if more data is needed.
+ */
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else if (buffer_space_left &&
+ (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ } else {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left ||
+ (bytes_in_last_packet < pipe->max_packet))
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+}
+
+static void cvmx_usb_transfer_intr(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ int buffer_space_left,
+ int bytes_in_last_packet)
+{
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL) {
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ } else if (buffer_space_left &&
+ (bytes_in_last_packet == pipe->max_packet)) {
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ } else if (!buffer_space_left ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ }
+}
+
+static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ int buffer_space_left,
+ int bytes_in_last_packet,
+ int bytes_this_transfer)
+{
+ if (cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISOCHRONOUS OUT splits don't require a complete split stage.
+ * Instead they use a sequence of begin OUT splits to transfer
+ * the data 188 bytes at a time. Once the transfer is complete,
+ * the pipe sleeps until the next schedule interval.
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ /*
+ * If no space left or this wasn't a max size packet
+ * then this transfer is complete. Otherwise start it
+ * again to send the next 188 bytes
+ */
+ if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ return;
+ }
+ if (transaction->stage ==
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ /*
+ * We are in the incoming data phase. Keep getting data
+ * until we run out of space or get a small packet
+ */
+ if ((buffer_space_left == 0) ||
+ (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_OK);
+ }
+ } else {
+ transaction->stage =
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ cvmx_usb_complete(usb, pipe, transaction, CVMX_USB_STATUS_OK);
+ }
+}
/**
* Poll a channel for status
@@ -2445,10 +2590,9 @@ static int cvmx_usb_get_frame_number(struct cvmx_usb_state *usb)
*
* Returns: Zero on success
*/
-static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
+static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel)
{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
- struct usb_hcd *hcd = octeon_to_hcd(priv);
+ struct usb_hcd *hcd = octeon_to_hcd(usb);
struct device *dev = hcd->self.controller;
union cvmx_usbcx_hcintx usbc_hcint;
union cvmx_usbcx_hctsizx usbc_hctsiz;
@@ -2475,9 +2619,9 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* write of HCCHARX without changing things
*/
cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
+ CVMX_USBCX_HCCHARX(channel,
+ usb->index),
+ usbc_hcchar.u32);
return 0;
}
@@ -2492,14 +2636,12 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
hcintmsk.u32 = 0;
hcintmsk.s.chhltdmsk = 1;
cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCINTMSKX(channel,
- usb->index),
- hcintmsk.u32);
+ CVMX_USBCX_HCINTMSKX(channel, usb->index),
+ hcintmsk.u32);
usbc_hcchar.s.chdis = 1;
cvmx_usb_write_csr32(usb,
- CVMX_USBCX_HCCHARX(channel,
- usb->index),
- usbc_hcchar.u32);
+ CVMX_USBCX_HCCHARX(channel, usb->index),
+ usbc_hcchar.u32);
return 0;
} else if (usbc_hcint.s.xfercompl) {
/*
@@ -2523,7 +2665,7 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
/* Disable the channel interrupts now that it is done */
cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1<<channel);
+ usb->idle_hardware_channels |= (1 << channel);
/* Make sure this channel is tied to a valid pipe */
pipe = usb->pipe_for_channel[channel];
@@ -2593,7 +2735,7 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* transferred
*/
if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
bytes_this_transfer = 0;
/*
@@ -2621,8 +2763,8 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* will clear this flag
*/
if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
pipe->flags |= CVMX_USB_PIPE_FLAGS_NEED_PING;
if (unlikely(WARN_ON_ONCE(bytes_this_transfer < 0))) {
@@ -2631,8 +2773,8 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* keeps substracting same byte count over and over again. In
* such case we just need to fail every transaction.
*/
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_ERROR);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
return 0;
}
@@ -2644,24 +2786,24 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* the actual bytes transferred
*/
pipe->pid_toggle = 0;
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_STALL);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_STALL);
} else if (usbc_hcint.s.xacterr) {
/*
* XactErr as a response means the device signaled
* something wrong with the transfer. For example, PID
* toggle errors cause these.
*/
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_XACTERR);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_XACTERR);
} else if (usbc_hcint.s.bblerr) {
/* Babble Error (BblErr) */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_BABBLEERR);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_BABBLEERR);
} else if (usbc_hcint.s.datatglerr) {
/* Data toggle error */
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_DATATGLERR);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_DATATGLERR);
} else if (usbc_hcint.s.nyet) {
/*
* NYET as a response is only allowed in three cases: as a
@@ -2676,10 +2818,10 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* again. Otherwise this transaction is complete
*/
if ((buffer_space_left == 0) ||
- (bytes_in_last_packet < pipe->max_packet))
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
+ (bytes_in_last_packet < pipe->max_packet))
+ cvmx_usb_complete(usb, pipe,
+ transaction,
+ CVMX_USB_STATUS_OK);
} else {
/*
* Split transactions retry the split complete 4 times
@@ -2713,205 +2855,26 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
switch (transaction->type) {
case CVMX_USB_TRANSFER_CONTROL:
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- cvmx_usb_perform_complete(usb, pipe,
- transaction, CVMX_USB_COMPLETE_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- else {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- else
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- struct usb_ctrlrequest *header =
- cvmx_phys_to_ptr(transaction->control_header);
- if (header->wLength)
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- else
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage =
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /*
- * For setup OUT data that are splits,
- * the hardware doesn't appear to count
- * transferred data. Here we manually
- * update the data transferred
- */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes +=
- buffer_space_left;
- else
- transaction->actual_bytes +=
- pipe->max_packet;
- }
- } else if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage =
- CVMX_USB_STAGE_STATUS;
- } else {
- transaction->stage =
- CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage =
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- break;
- }
+ cvmx_usb_transfer_control(usb, pipe, transaction,
+ usbc_hcchar,
+ buffer_space_left,
+ bytes_in_last_packet);
break;
case CVMX_USB_TRANSFER_BULK:
+ cvmx_usb_transfer_bulk(usb, pipe, transaction,
+ usbc_hcint, buffer_space_left,
+ bytes_in_last_packet);
+ break;
case CVMX_USB_TRANSFER_INTERRUPT:
- /*
- * The only time a bulk transfer isn't complete when it
- * finishes with an ACK is during a split transaction.
- * For splits we need to continue the transfer if more
- * data is needed
- */
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else {
- if (buffer_space_left &&
- (bytes_in_last_packet ==
- pipe->max_packet))
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL;
- else {
- if (transaction->type ==
- CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(
- usb,
- pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- } else {
- if ((pipe->device_speed ==
- CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type ==
- CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |=
- CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- if (transaction->type ==
- CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- }
+ cvmx_usb_transfer_intr(usb, pipe, transaction,
+ buffer_space_left,
+ bytes_in_last_packet);
break;
case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (cvmx_usb_pipe_needs_split(usb, pipe)) {
- /*
- * ISOCHRONOUS OUT splits don't require a
- * complete split stage. Instead they use a
- * sequence of begin OUT splits to transfer the
- * data 188 bytes at a time. Once the transfer
- * is complete, the pipe sleeps until the next
- * schedule interval
- */
- if (pipe->transfer_dir ==
- CVMX_USB_DIRECTION_OUT) {
- /*
- * If no space left or this wasn't a max
- * size packet then this transfer is
- * complete. Otherwise start it again to
- * send the next 188 bytes
- */
- if (!buffer_space_left ||
- (bytes_this_transfer < 188)) {
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(usb,
- pipe, transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- } else {
- if (transaction->stage ==
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /*
- * We are in the incoming data
- * phase. Keep getting data
- * until we run out of space or
- * get a small packet
- */
- if ((buffer_space_left == 0) ||
- (bytes_in_last_packet <
- pipe->max_packet)) {
- pipe->next_tx_frame +=
- pipe->interval;
- cvmx_usb_perform_complete(
- usb,
- pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
- } else
- transaction->stage =
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- } else {
- pipe->next_tx_frame += pipe->interval;
- cvmx_usb_perform_complete(usb, pipe,
- transaction,
- CVMX_USB_COMPLETE_SUCCESS);
- }
+ cvmx_usb_transfer_isoc(usb, pipe, transaction,
+ buffer_space_left,
+ bytes_in_last_packet,
+ bytes_this_transfer);
break;
}
} else if (usbc_hcint.s.nak) {
@@ -2946,20 +2909,18 @@ static int cvmx_usb_poll_channel(struct cvmx_usb_state *usb, int channel)
* We get channel halted interrupts with no result bits
* sets when the cable is unplugged
*/
- cvmx_usb_perform_complete(usb, pipe, transaction,
- CVMX_USB_COMPLETE_ERROR);
+ cvmx_usb_complete(usb, pipe, transaction,
+ CVMX_USB_STATUS_ERROR);
}
}
return 0;
}
-static void octeon_usb_port_callback(struct cvmx_usb_state *usb)
+static void octeon_usb_port_callback(struct octeon_hcd *usb)
{
- struct octeon_hcd *priv = cvmx_usb_to_octeon(usb);
-
- spin_unlock(&priv->lock);
- usb_hcd_poll_rh_status(octeon_to_hcd(priv));
- spin_lock(&priv->lock);
+ spin_unlock(&usb->lock);
+ usb_hcd_poll_rh_status(octeon_to_hcd(usb));
+ spin_lock(&usb->lock);
}
/**
@@ -2972,7 +2933,7 @@ static void octeon_usb_port_callback(struct cvmx_usb_state *usb)
*
* Returns: 0 or a negative error code.
*/
-static int cvmx_usb_poll(struct cvmx_usb_state *usb)
+static int cvmx_usb_poll(struct octeon_hcd *usb)
{
union cvmx_usbcx_hfnum usbc_hfnum;
union cvmx_usbcx_gintsts usbc_gintsts;
@@ -2981,7 +2942,7 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb)
/* Update the frame counter */
usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+ if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum)
usb->frame_number += 0x4000;
usb->frame_number &= ~0x3fffull;
usb->frame_number |= usbc_hfnum.s.frnum;
@@ -3028,8 +2989,8 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb)
*/
octeon_usb_port_callback(usb);
/* Clear the port change bits */
- usbc_hprt.u32 = cvmx_usb_read_csr32(usb,
- CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.u32 =
+ cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
usbc_hprt.s.prtena = 0;
cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index),
usbc_hprt.u32);
@@ -3056,7 +3017,7 @@ static int cvmx_usb_poll(struct cvmx_usb_state *usb)
channel = __fls(usbc_haint.u32);
cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1<<channel;
+ usbc_haint.u32 ^= 1 << channel;
}
}
@@ -3073,12 +3034,12 @@ static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd)
static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_poll(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_poll(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
return IRQ_HANDLED;
}
@@ -3095,16 +3056,16 @@ static void octeon_usb_stop(struct usb_hcd *hcd)
static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
- return cvmx_usb_get_frame_number(&priv->usb);
+ return cvmx_usb_get_frame_number(usb);
}
static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
struct cvmx_usb_transaction *transaction = NULL;
struct cvmx_usb_pipe *pipe;
@@ -3114,11 +3075,11 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
int rc;
urb->status = 0;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
return rc;
}
@@ -3184,7 +3145,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
dev = dev->parent;
}
}
- pipe = cvmx_usb_open_pipe(&priv->usb, usb_pipedevice(urb->pipe),
+ pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe), speed,
le16_to_cpu(ep->desc.wMaxPacketSize)
& 0x7ff,
@@ -3198,7 +3159,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
split_device, split_port);
if (!pipe) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
dev_dbg(dev, "Failed to create pipe\n");
return -ENOMEM;
}
@@ -3227,8 +3188,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
urb->iso_frame_desc[i].offset;
iso_packet[i].length =
urb->iso_frame_desc[i].length;
- iso_packet[i].status =
- CVMX_USB_COMPLETE_ERROR;
+ iso_packet[i].status = CVMX_USB_STATUS_ERROR;
}
/*
* Store a pointer to the list in the URB setup_packet
@@ -3236,7 +3196,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
* this saves us a bunch of logic.
*/
urb->setup_packet = (char *)iso_packet;
- transaction = cvmx_usb_submit_isochronous(&priv->usb,
+ transaction = cvmx_usb_submit_isochronous(usb,
pipe, urb);
/*
* If submit failed we need to free our private packet
@@ -3252,29 +3212,29 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
dev_dbg(dev, "Submit interrupt to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_interrupt(&priv->usb, pipe, urb);
+ transaction = cvmx_usb_submit_interrupt(usb, pipe, urb);
break;
case PIPE_CONTROL:
dev_dbg(dev, "Submit control to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_control(&priv->usb, pipe, urb);
+ transaction = cvmx_usb_submit_control(usb, pipe, urb);
break;
case PIPE_BULK:
dev_dbg(dev, "Submit bulk to %d.%d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe));
- transaction = cvmx_usb_submit_bulk(&priv->usb, pipe, urb);
+ transaction = cvmx_usb_submit_bulk(usb, pipe, urb);
break;
}
if (!transaction) {
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
dev_dbg(dev, "Failed to submit\n");
return -ENOMEM;
}
urb->hcpriv = transaction;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
return 0;
}
@@ -3282,24 +3242,24 @@ static int octeon_usb_urb_dequeue(struct usb_hcd *hcd,
struct urb *urb,
int status)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
int rc;
if (!urb->dev)
return -EINVAL;
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc)
goto out;
urb->status = status;
- cvmx_usb_cancel(&priv->usb, urb->ep->hcpriv, urb->hcpriv);
+ cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv);
out:
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
return rc;
}
@@ -3310,28 +3270,28 @@ static void octeon_usb_endpoint_disable(struct usb_hcd *hcd,
struct device *dev = hcd->self.controller;
if (ep->hcpriv) {
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct cvmx_usb_pipe *pipe = ep->hcpriv;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_cancel_all(&priv->usb, pipe);
- if (cvmx_usb_close_pipe(&priv->usb, pipe))
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_cancel_all(usb, pipe);
+ if (cvmx_usb_close_pipe(usb, pipe))
dev_dbg(dev, "Closing pipe %p failed\n", pipe);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
ep->hcpriv = NULL;
}
}
static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct cvmx_usb_port_status port_status;
unsigned long flags;
- spin_lock_irqsave(&priv->lock, flags);
- port_status = cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
buf[0] = 0;
buf[0] = port_status.connect_change << 1;
@@ -3339,12 +3299,11 @@ static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
}
static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
+ u16 wIndex, char *buf, u16 wLength)
{
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
struct cvmx_usb_port_status usb_port_status;
- struct cvmx_usb_state *usb = &priv->usb;
int port_status;
struct usb_hub_descriptor *desc;
unsigned long flags;
@@ -3371,9 +3330,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
dev_dbg(dev, " ENABLE\n");
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_disable(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_disable(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_SUSPEND:
dev_dbg(dev, " SUSPEND\n");
@@ -3390,20 +3349,18 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_CONNECTION:
dev_dbg(dev, " C_CONNECTION\n");
/* Clears drivers internal connect status change flag */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_RESET:
dev_dbg(dev, " C_RESET\n");
/*
* Clears the driver's internal Port Reset Change flag.
*/
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_ENABLE:
dev_dbg(dev, " C_ENABLE\n");
@@ -3411,10 +3368,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* Clears the driver's internal Port Enable/Disable
* Change flag.
*/
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
break;
case USB_PORT_FEAT_C_SUSPEND:
dev_dbg(dev, " C_SUSPEND\n");
@@ -3427,10 +3383,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_OVER_CURRENT:
dev_dbg(dev, " C_OVER_CURRENT\n");
/* Clears the driver's overcurrent Change flag */
- spin_lock_irqsave(&priv->lock, flags);
- priv->usb.port_status =
- cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ usb->port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
break;
default:
dev_dbg(dev, " UNKNOWN\n");
@@ -3451,7 +3406,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetHubStatus:
dev_dbg(dev, "GetHubStatus\n");
- *(__le32 *) buf = 0;
+ *(__le32 *)buf = 0;
break;
case GetPortStatus:
dev_dbg(dev, "GetPortStatus\n");
@@ -3460,9 +3415,9 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return -EINVAL;
}
- spin_lock_irqsave(&priv->lock, flags);
- usb_port_status = cvmx_usb_get_status(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ usb_port_status = cvmx_usb_get_status(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
port_status = 0;
if (usb_port_status.connect_change) {
@@ -3503,7 +3458,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dev_dbg(dev, " LOWSPEED\n");
}
- *((__le32 *) buf) = cpu_to_le32(port_status);
+ *((__le32 *)buf) = cpu_to_le32(port_status);
break;
case SetHubFeature:
dev_dbg(dev, "SetHubFeature\n");
@@ -3525,16 +3480,16 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/*
* Program the port power bit to drive VBUS on the USB.
*/
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index),
cvmx_usbcx_hprt, prtpwr, 1);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&usb->lock, flags);
return 0;
case USB_PORT_FEAT_RESET:
dev_dbg(dev, " RESET\n");
- spin_lock_irqsave(&priv->lock, flags);
- cvmx_usb_reset_port(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ cvmx_usb_reset_port(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
return 0;
case USB_PORT_FEAT_INDICATOR:
dev_dbg(dev, " INDICATOR\n");
@@ -3579,23 +3534,26 @@ static int octeon_usb_probe(struct platform_device *pdev)
struct device_node *usbn_node;
int irq = platform_get_irq(pdev, 0);
struct device *dev = &pdev->dev;
- struct octeon_hcd *priv;
+ struct octeon_hcd *usb;
struct usb_hcd *hcd;
u32 clock_rate = 48000000;
bool is_crystal_clock = false;
const char *clock_type;
int i;
- if (dev->of_node == NULL) {
+ if (!dev->of_node) {
dev_err(dev, "Error: empty of_node\n");
return -ENXIO;
}
usbn_node = dev->of_node->parent;
i = of_property_read_u32(usbn_node,
- "refclk-frequency", &clock_rate);
+ "clock-frequency", &clock_rate);
+ if (i)
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
if (i) {
- dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ dev_err(dev, "No USBN \"clock-frequency\"\n");
return -ENXIO;
}
switch (clock_rate) {
@@ -3609,14 +3567,16 @@ static int octeon_usb_probe(struct platform_device *pdev)
initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
break;
default:
- dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n",
- clock_rate);
+ dev_err(dev, "Illegal USBN \"clock-frequency\" %u\n",
+ clock_rate);
return -ENXIO;
-
}
i = of_property_read_string(usbn_node,
- "refclk-type", &clock_type);
+ "cavium,refclk-type", &clock_type);
+ if (i)
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
if (!i && strcmp("crystal", clock_type) == 0)
is_crystal_clock = true;
@@ -3627,7 +3587,7 @@ static int octeon_usb_probe(struct platform_device *pdev)
initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
+ if (!res_mem) {
dev_err(dev, "found no memory resource\n");
return -ENXIO;
}
@@ -3673,31 +3633,31 @@ static int octeon_usb_probe(struct platform_device *pdev)
return -1;
}
hcd->uses_new_polling = 1;
- priv = (struct octeon_hcd *)hcd->hcd_priv;
+ usb = (struct octeon_hcd *)hcd->hcd_priv;
- spin_lock_init(&priv->lock);
+ spin_lock_init(&usb->lock);
- priv->usb.init_flags = initialize_flags;
+ usb->init_flags = initialize_flags;
/* Initialize the USB state structure */
- priv->usb.index = usb_num;
- INIT_LIST_HEAD(&priv->usb.idle_pipes);
- for (i = 0; i < ARRAY_SIZE(priv->usb.active_pipes); i++)
- INIT_LIST_HEAD(&priv->usb.active_pipes[i]);
+ usb->index = usb_num;
+ INIT_LIST_HEAD(&usb->idle_pipes);
+ for (i = 0; i < ARRAY_SIZE(usb->active_pipes); i++)
+ INIT_LIST_HEAD(&usb->active_pipes[i]);
/* Due to an errata, CN31XX doesn't support DMA */
if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- priv->usb.init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
/* Only use one channel with non DMA */
- priv->usb.idle_hardware_channels = 0x1;
+ usb->idle_hardware_channels = 0x1;
} else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
/* CN5XXX have an errata with channel 3 */
- priv->usb.idle_hardware_channels = 0xf7;
+ usb->idle_hardware_channels = 0xf7;
} else {
- priv->usb.idle_hardware_channels = 0xff;
+ usb->idle_hardware_channels = 0xff;
}
- status = cvmx_usb_initialize(dev, &priv->usb);
+ status = cvmx_usb_initialize(dev, usb);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3722,13 +3682,13 @@ static int octeon_usb_remove(struct platform_device *pdev)
int status;
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct octeon_hcd *priv = hcd_to_octeon(hcd);
+ struct octeon_hcd *usb = hcd_to_octeon(hcd);
unsigned long flags;
usb_remove_hcd(hcd);
- spin_lock_irqsave(&priv->lock, flags);
- status = cvmx_usb_shutdown(&priv->usb);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_lock_irqsave(&usb->lock, flags);
+ status = cvmx_usb_shutdown(usb);
+ spin_unlock_irqrestore(&usb->lock, flags);
if (status)
dev_dbg(dev, "USB shutdown failed with %d\n", status);
@@ -3747,7 +3707,7 @@ MODULE_DEVICE_TABLE(of, octeon_usb_match);
static struct platform_driver octeon_usb_driver = {
.driver = {
- .name = "OcteonUSB",
+ .name = "octeon-hcd",
.of_match_table = octeon_usb_match,
},
.probe = octeon_usb_probe,
diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h
index 70e7fa5e37d9..3353aefe662e 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.h
+++ b/drivers/staging/octeon-usb/octeon-hcd.h
@@ -110,7 +110,7 @@
* initialization. Do not change this register after the initial programming.
*/
union cvmx_usbcx_gahbcfg {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gahbcfg_s
* @ptxfemplvl: Periodic TxFIFO Empty Level (PTxFEmpLvl)
@@ -145,13 +145,13 @@ union cvmx_usbcx_gahbcfg {
* * 1'b1: Unmask the interrupt assertion to the application.
*/
struct cvmx_usbcx_gahbcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_9_31 : 23,
- __BITFIELD_FIELD(uint32_t ptxfemplvl : 1,
- __BITFIELD_FIELD(uint32_t nptxfemplvl : 1,
- __BITFIELD_FIELD(uint32_t reserved_6_6 : 1,
- __BITFIELD_FIELD(uint32_t dmaen : 1,
- __BITFIELD_FIELD(uint32_t hbstlen : 4,
- __BITFIELD_FIELD(uint32_t glblintrmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_9_31 : 23,
+ __BITFIELD_FIELD(u32 ptxfemplvl : 1,
+ __BITFIELD_FIELD(u32 nptxfemplvl : 1,
+ __BITFIELD_FIELD(u32 reserved_6_6 : 1,
+ __BITFIELD_FIELD(u32 dmaen : 1,
+ __BITFIELD_FIELD(u32 hbstlen : 4,
+ __BITFIELD_FIELD(u32 glblintrmsk : 1,
;)))))))
} s;
};
@@ -164,7 +164,7 @@ union cvmx_usbcx_gahbcfg {
* This register contains the configuration options of the O2P USB core.
*/
union cvmx_usbcx_ghwcfg3 {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_ghwcfg3_s
* @dfifodepth: DFIFO Depth (DfifoDepth)
@@ -212,16 +212,16 @@ union cvmx_usbcx_ghwcfg3 {
* * Others: Reserved
*/
struct cvmx_usbcx_ghwcfg3_s {
- __BITFIELD_FIELD(uint32_t dfifodepth : 16,
- __BITFIELD_FIELD(uint32_t reserved_13_15 : 3,
- __BITFIELD_FIELD(uint32_t ahbphysync : 1,
- __BITFIELD_FIELD(uint32_t rsttype : 1,
- __BITFIELD_FIELD(uint32_t optfeature : 1,
- __BITFIELD_FIELD(uint32_t vendor_control_interface_support : 1,
- __BITFIELD_FIELD(uint32_t i2c_selection : 1,
- __BITFIELD_FIELD(uint32_t otgen : 1,
- __BITFIELD_FIELD(uint32_t pktsizewidth : 3,
- __BITFIELD_FIELD(uint32_t xfersizewidth : 4,
+ __BITFIELD_FIELD(u32 dfifodepth : 16,
+ __BITFIELD_FIELD(u32 reserved_13_15 : 3,
+ __BITFIELD_FIELD(u32 ahbphysync : 1,
+ __BITFIELD_FIELD(u32 rsttype : 1,
+ __BITFIELD_FIELD(u32 optfeature : 1,
+ __BITFIELD_FIELD(u32 vendor_control_interface_support : 1,
+ __BITFIELD_FIELD(u32 i2c_selection : 1,
+ __BITFIELD_FIELD(u32 otgen : 1,
+ __BITFIELD_FIELD(u32 pktsizewidth : 3,
+ __BITFIELD_FIELD(u32 xfersizewidth : 4,
;))))))))))
} s;
};
@@ -238,7 +238,7 @@ union cvmx_usbcx_ghwcfg3 {
* Mask interrupt: 1'b0, Unmask interrupt: 1'b1
*/
union cvmx_usbcx_gintmsk {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gintmsk_s
* @wkupintmsk: Resume/Remote Wakeup Detected Interrupt Mask
@@ -279,38 +279,38 @@ union cvmx_usbcx_gintmsk {
* @modemismsk: Mode Mismatch Interrupt Mask (ModeMisMsk)
*/
struct cvmx_usbcx_gintmsk_s {
- __BITFIELD_FIELD(uint32_t wkupintmsk : 1,
- __BITFIELD_FIELD(uint32_t sessreqintmsk : 1,
- __BITFIELD_FIELD(uint32_t disconnintmsk : 1,
- __BITFIELD_FIELD(uint32_t conidstschngmsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_27_27 : 1,
- __BITFIELD_FIELD(uint32_t ptxfempmsk : 1,
- __BITFIELD_FIELD(uint32_t hchintmsk : 1,
- __BITFIELD_FIELD(uint32_t prtintmsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_23_23 : 1,
- __BITFIELD_FIELD(uint32_t fetsuspmsk : 1,
- __BITFIELD_FIELD(uint32_t incomplpmsk : 1,
- __BITFIELD_FIELD(uint32_t incompisoinmsk : 1,
- __BITFIELD_FIELD(uint32_t oepintmsk : 1,
- __BITFIELD_FIELD(uint32_t inepintmsk : 1,
- __BITFIELD_FIELD(uint32_t epmismsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t eopfmsk : 1,
- __BITFIELD_FIELD(uint32_t isooutdropmsk : 1,
- __BITFIELD_FIELD(uint32_t enumdonemsk : 1,
- __BITFIELD_FIELD(uint32_t usbrstmsk : 1,
- __BITFIELD_FIELD(uint32_t usbsuspmsk : 1,
- __BITFIELD_FIELD(uint32_t erlysuspmsk : 1,
- __BITFIELD_FIELD(uint32_t i2cint : 1,
- __BITFIELD_FIELD(uint32_t ulpickintmsk : 1,
- __BITFIELD_FIELD(uint32_t goutnakeffmsk : 1,
- __BITFIELD_FIELD(uint32_t ginnakeffmsk : 1,
- __BITFIELD_FIELD(uint32_t nptxfempmsk : 1,
- __BITFIELD_FIELD(uint32_t rxflvlmsk : 1,
- __BITFIELD_FIELD(uint32_t sofmsk : 1,
- __BITFIELD_FIELD(uint32_t otgintmsk : 1,
- __BITFIELD_FIELD(uint32_t modemismsk : 1,
- __BITFIELD_FIELD(uint32_t reserved_0_0 : 1,
+ __BITFIELD_FIELD(u32 wkupintmsk : 1,
+ __BITFIELD_FIELD(u32 sessreqintmsk : 1,
+ __BITFIELD_FIELD(u32 disconnintmsk : 1,
+ __BITFIELD_FIELD(u32 conidstschngmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_27_27 : 1,
+ __BITFIELD_FIELD(u32 ptxfempmsk : 1,
+ __BITFIELD_FIELD(u32 hchintmsk : 1,
+ __BITFIELD_FIELD(u32 prtintmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_23_23 : 1,
+ __BITFIELD_FIELD(u32 fetsuspmsk : 1,
+ __BITFIELD_FIELD(u32 incomplpmsk : 1,
+ __BITFIELD_FIELD(u32 incompisoinmsk : 1,
+ __BITFIELD_FIELD(u32 oepintmsk : 1,
+ __BITFIELD_FIELD(u32 inepintmsk : 1,
+ __BITFIELD_FIELD(u32 epmismsk : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 eopfmsk : 1,
+ __BITFIELD_FIELD(u32 isooutdropmsk : 1,
+ __BITFIELD_FIELD(u32 enumdonemsk : 1,
+ __BITFIELD_FIELD(u32 usbrstmsk : 1,
+ __BITFIELD_FIELD(u32 usbsuspmsk : 1,
+ __BITFIELD_FIELD(u32 erlysuspmsk : 1,
+ __BITFIELD_FIELD(u32 i2cint : 1,
+ __BITFIELD_FIELD(u32 ulpickintmsk : 1,
+ __BITFIELD_FIELD(u32 goutnakeffmsk : 1,
+ __BITFIELD_FIELD(u32 ginnakeffmsk : 1,
+ __BITFIELD_FIELD(u32 nptxfempmsk : 1,
+ __BITFIELD_FIELD(u32 rxflvlmsk : 1,
+ __BITFIELD_FIELD(u32 sofmsk : 1,
+ __BITFIELD_FIELD(u32 otgintmsk : 1,
+ __BITFIELD_FIELD(u32 modemismsk : 1,
+ __BITFIELD_FIELD(u32 reserved_0_0 : 1,
;))))))))))))))))))))))))))))))))
} s;
};
@@ -331,7 +331,7 @@ union cvmx_usbcx_gintmsk {
* automatically.
*/
union cvmx_usbcx_gintsts {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gintsts_s
* @wkupint: Resume/Remote Wakeup Detected Interrupt (WkUpInt)
@@ -509,38 +509,38 @@ union cvmx_usbcx_gintsts {
* * 1'b1: Host mode
*/
struct cvmx_usbcx_gintsts_s {
- __BITFIELD_FIELD(uint32_t wkupint : 1,
- __BITFIELD_FIELD(uint32_t sessreqint : 1,
- __BITFIELD_FIELD(uint32_t disconnint : 1,
- __BITFIELD_FIELD(uint32_t conidstschng : 1,
- __BITFIELD_FIELD(uint32_t reserved_27_27 : 1,
- __BITFIELD_FIELD(uint32_t ptxfemp : 1,
- __BITFIELD_FIELD(uint32_t hchint : 1,
- __BITFIELD_FIELD(uint32_t prtint : 1,
- __BITFIELD_FIELD(uint32_t reserved_23_23 : 1,
- __BITFIELD_FIELD(uint32_t fetsusp : 1,
- __BITFIELD_FIELD(uint32_t incomplp : 1,
- __BITFIELD_FIELD(uint32_t incompisoin : 1,
- __BITFIELD_FIELD(uint32_t oepint : 1,
- __BITFIELD_FIELD(uint32_t iepint : 1,
- __BITFIELD_FIELD(uint32_t epmis : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t eopf : 1,
- __BITFIELD_FIELD(uint32_t isooutdrop : 1,
- __BITFIELD_FIELD(uint32_t enumdone : 1,
- __BITFIELD_FIELD(uint32_t usbrst : 1,
- __BITFIELD_FIELD(uint32_t usbsusp : 1,
- __BITFIELD_FIELD(uint32_t erlysusp : 1,
- __BITFIELD_FIELD(uint32_t i2cint : 1,
- __BITFIELD_FIELD(uint32_t ulpickint : 1,
- __BITFIELD_FIELD(uint32_t goutnakeff : 1,
- __BITFIELD_FIELD(uint32_t ginnakeff : 1,
- __BITFIELD_FIELD(uint32_t nptxfemp : 1,
- __BITFIELD_FIELD(uint32_t rxflvl : 1,
- __BITFIELD_FIELD(uint32_t sof : 1,
- __BITFIELD_FIELD(uint32_t otgint : 1,
- __BITFIELD_FIELD(uint32_t modemis : 1,
- __BITFIELD_FIELD(uint32_t curmod : 1,
+ __BITFIELD_FIELD(u32 wkupint : 1,
+ __BITFIELD_FIELD(u32 sessreqint : 1,
+ __BITFIELD_FIELD(u32 disconnint : 1,
+ __BITFIELD_FIELD(u32 conidstschng : 1,
+ __BITFIELD_FIELD(u32 reserved_27_27 : 1,
+ __BITFIELD_FIELD(u32 ptxfemp : 1,
+ __BITFIELD_FIELD(u32 hchint : 1,
+ __BITFIELD_FIELD(u32 prtint : 1,
+ __BITFIELD_FIELD(u32 reserved_23_23 : 1,
+ __BITFIELD_FIELD(u32 fetsusp : 1,
+ __BITFIELD_FIELD(u32 incomplp : 1,
+ __BITFIELD_FIELD(u32 incompisoin : 1,
+ __BITFIELD_FIELD(u32 oepint : 1,
+ __BITFIELD_FIELD(u32 iepint : 1,
+ __BITFIELD_FIELD(u32 epmis : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 eopf : 1,
+ __BITFIELD_FIELD(u32 isooutdrop : 1,
+ __BITFIELD_FIELD(u32 enumdone : 1,
+ __BITFIELD_FIELD(u32 usbrst : 1,
+ __BITFIELD_FIELD(u32 usbsusp : 1,
+ __BITFIELD_FIELD(u32 erlysusp : 1,
+ __BITFIELD_FIELD(u32 i2cint : 1,
+ __BITFIELD_FIELD(u32 ulpickint : 1,
+ __BITFIELD_FIELD(u32 goutnakeff : 1,
+ __BITFIELD_FIELD(u32 ginnakeff : 1,
+ __BITFIELD_FIELD(u32 nptxfemp : 1,
+ __BITFIELD_FIELD(u32 rxflvl : 1,
+ __BITFIELD_FIELD(u32 sof : 1,
+ __BITFIELD_FIELD(u32 otgint : 1,
+ __BITFIELD_FIELD(u32 modemis : 1,
+ __BITFIELD_FIELD(u32 curmod : 1,
;))))))))))))))))))))))))))))))))
} s;
};
@@ -554,7 +554,7 @@ union cvmx_usbcx_gintsts {
* Non-Periodic TxFIFO.
*/
union cvmx_usbcx_gnptxfsiz {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gnptxfsiz_s
* @nptxfdep: Non-Periodic TxFIFO Depth (NPTxFDep)
@@ -566,8 +566,8 @@ union cvmx_usbcx_gnptxfsiz {
* Transmit FIFO RAM.
*/
struct cvmx_usbcx_gnptxfsiz_s {
- __BITFIELD_FIELD(uint32_t nptxfdep : 16,
- __BITFIELD_FIELD(uint32_t nptxfstaddr : 16,
+ __BITFIELD_FIELD(u32 nptxfdep : 16,
+ __BITFIELD_FIELD(u32 nptxfstaddr : 16,
;))
} s;
};
@@ -581,7 +581,7 @@ union cvmx_usbcx_gnptxfsiz {
* Non-Periodic TxFIFO and the Non-Periodic Transmit Request Queue.
*/
union cvmx_usbcx_gnptxsts {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gnptxsts_s
* @nptxqtop: Top of the Non-Periodic Transmit Request Queue (NPTxQTop)
@@ -617,10 +617,10 @@ union cvmx_usbcx_gnptxsts {
* * Others: Reserved
*/
struct cvmx_usbcx_gnptxsts_s {
- __BITFIELD_FIELD(uint32_t reserved_31_31 : 1,
- __BITFIELD_FIELD(uint32_t nptxqtop : 7,
- __BITFIELD_FIELD(uint32_t nptxqspcavail : 8,
- __BITFIELD_FIELD(uint32_t nptxfspcavail : 16,
+ __BITFIELD_FIELD(u32 reserved_31_31 : 1,
+ __BITFIELD_FIELD(u32 nptxqtop : 7,
+ __BITFIELD_FIELD(u32 nptxqspcavail : 8,
+ __BITFIELD_FIELD(u32 nptxfspcavail : 16,
;))))
} s;
};
@@ -634,7 +634,7 @@ union cvmx_usbcx_gnptxsts {
* the core.
*/
union cvmx_usbcx_grstctl {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_grstctl_s
* @ahbidle: AHB Master Idle (AHBIdle)
@@ -739,16 +739,16 @@ union cvmx_usbcx_grstctl {
* selected, the PHY domain has to be reset for proper operation.
*/
struct cvmx_usbcx_grstctl_s {
- __BITFIELD_FIELD(uint32_t ahbidle : 1,
- __BITFIELD_FIELD(uint32_t dmareq : 1,
- __BITFIELD_FIELD(uint32_t reserved_11_29 : 19,
- __BITFIELD_FIELD(uint32_t txfnum : 5,
- __BITFIELD_FIELD(uint32_t txfflsh : 1,
- __BITFIELD_FIELD(uint32_t rxfflsh : 1,
- __BITFIELD_FIELD(uint32_t intknqflsh : 1,
- __BITFIELD_FIELD(uint32_t frmcntrrst : 1,
- __BITFIELD_FIELD(uint32_t hsftrst : 1,
- __BITFIELD_FIELD(uint32_t csftrst : 1,
+ __BITFIELD_FIELD(u32 ahbidle : 1,
+ __BITFIELD_FIELD(u32 dmareq : 1,
+ __BITFIELD_FIELD(u32 reserved_11_29 : 19,
+ __BITFIELD_FIELD(u32 txfnum : 5,
+ __BITFIELD_FIELD(u32 txfflsh : 1,
+ __BITFIELD_FIELD(u32 rxfflsh : 1,
+ __BITFIELD_FIELD(u32 intknqflsh : 1,
+ __BITFIELD_FIELD(u32 frmcntrrst : 1,
+ __BITFIELD_FIELD(u32 hsftrst : 1,
+ __BITFIELD_FIELD(u32 csftrst : 1,
;))))))))))
} s;
};
@@ -762,7 +762,7 @@ union cvmx_usbcx_grstctl {
* RxFIFO.
*/
union cvmx_usbcx_grxfsiz {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_grxfsiz_s
* @rxfdep: RxFIFO Depth (RxFDep)
@@ -771,8 +771,8 @@ union cvmx_usbcx_grxfsiz {
* * Maximum value is 32768
*/
struct cvmx_usbcx_grxfsiz_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t rxfdep : 16,
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 rxfdep : 16,
;))
} s;
};
@@ -792,7 +792,7 @@ union cvmx_usbcx_grxfsiz {
* hardware.
*/
union cvmx_usbcx_grxstsph {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_grxstsph_s
* @pktsts: Packet Status (PktSts)
@@ -814,11 +814,11 @@ union cvmx_usbcx_grxstsph {
* packet belongs.
*/
struct cvmx_usbcx_grxstsph_s {
- __BITFIELD_FIELD(uint32_t reserved_21_31 : 11,
- __BITFIELD_FIELD(uint32_t pktsts : 4,
- __BITFIELD_FIELD(uint32_t dpid : 2,
- __BITFIELD_FIELD(uint32_t bcnt : 11,
- __BITFIELD_FIELD(uint32_t chnum : 4,
+ __BITFIELD_FIELD(u32 reserved_21_31 : 11,
+ __BITFIELD_FIELD(u32 pktsts : 4,
+ __BITFIELD_FIELD(u32 dpid : 2,
+ __BITFIELD_FIELD(u32 bcnt : 11,
+ __BITFIELD_FIELD(u32 chnum : 4,
;)))))
} s;
};
@@ -835,7 +835,7 @@ union cvmx_usbcx_grxstsph {
* to this register after the initial programming.
*/
union cvmx_usbcx_gusbcfg {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_gusbcfg_s
* @otgi2csel: UTMIFS or I2C Interface Select (OtgI2CSel)
@@ -895,19 +895,19 @@ union cvmx_usbcx_gusbcfg {
* * One 48-MHz PHY clock = 0.25 bit times
*/
struct cvmx_usbcx_gusbcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_17_31 : 15,
- __BITFIELD_FIELD(uint32_t otgi2csel : 1,
- __BITFIELD_FIELD(uint32_t phylpwrclksel : 1,
- __BITFIELD_FIELD(uint32_t reserved_14_14 : 1,
- __BITFIELD_FIELD(uint32_t usbtrdtim : 4,
- __BITFIELD_FIELD(uint32_t hnpcap : 1,
- __BITFIELD_FIELD(uint32_t srpcap : 1,
- __BITFIELD_FIELD(uint32_t ddrsel : 1,
- __BITFIELD_FIELD(uint32_t physel : 1,
- __BITFIELD_FIELD(uint32_t fsintf : 1,
- __BITFIELD_FIELD(uint32_t ulpi_utmi_sel : 1,
- __BITFIELD_FIELD(uint32_t phyif : 1,
- __BITFIELD_FIELD(uint32_t toutcal : 3,
+ __BITFIELD_FIELD(u32 reserved_17_31 : 15,
+ __BITFIELD_FIELD(u32 otgi2csel : 1,
+ __BITFIELD_FIELD(u32 phylpwrclksel : 1,
+ __BITFIELD_FIELD(u32 reserved_14_14 : 1,
+ __BITFIELD_FIELD(u32 usbtrdtim : 4,
+ __BITFIELD_FIELD(u32 hnpcap : 1,
+ __BITFIELD_FIELD(u32 srpcap : 1,
+ __BITFIELD_FIELD(u32 ddrsel : 1,
+ __BITFIELD_FIELD(u32 physel : 1,
+ __BITFIELD_FIELD(u32 fsintf : 1,
+ __BITFIELD_FIELD(u32 ulpi_utmi_sel : 1,
+ __BITFIELD_FIELD(u32 phyif : 1,
+ __BITFIELD_FIELD(u32 toutcal : 3,
;)))))))))))))
} s;
};
@@ -925,15 +925,15 @@ union cvmx_usbcx_gusbcfg {
* in the corresponding Host Channel-n Interrupt register.
*/
union cvmx_usbcx_haint {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_haint_s
* @haint: Channel Interrupts (HAINT)
* One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15
*/
struct cvmx_usbcx_haint_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t haint : 16,
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 haint : 16,
;))
} s;
};
@@ -950,15 +950,15 @@ union cvmx_usbcx_haint {
* Mask interrupt: 1'b0 Unmask interrupt: 1'b1
*/
union cvmx_usbcx_haintmsk {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_haintmsk_s
* @haintmsk: Channel Interrupt Mask (HAINTMsk)
* One bit per channel: Bit 0 for channel 0, bit 15 for channel 15
*/
struct cvmx_usbcx_haintmsk_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t haintmsk : 16,
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 haintmsk : 16,
;))
} s;
};
@@ -970,7 +970,7 @@ union cvmx_usbcx_haintmsk {
*
*/
union cvmx_usbcx_hccharx {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hccharx_s
* @chena: Channel Enable (ChEna)
@@ -1028,17 +1028,17 @@ union cvmx_usbcx_hccharx {
* Indicates the maximum packet size of the associated endpoint.
*/
struct cvmx_usbcx_hccharx_s {
- __BITFIELD_FIELD(uint32_t chena : 1,
- __BITFIELD_FIELD(uint32_t chdis : 1,
- __BITFIELD_FIELD(uint32_t oddfrm : 1,
- __BITFIELD_FIELD(uint32_t devaddr : 7,
- __BITFIELD_FIELD(uint32_t ec : 2,
- __BITFIELD_FIELD(uint32_t eptype : 2,
- __BITFIELD_FIELD(uint32_t lspddev : 1,
- __BITFIELD_FIELD(uint32_t reserved_16_16 : 1,
- __BITFIELD_FIELD(uint32_t epdir : 1,
- __BITFIELD_FIELD(uint32_t epnum : 4,
- __BITFIELD_FIELD(uint32_t mps : 11,
+ __BITFIELD_FIELD(u32 chena : 1,
+ __BITFIELD_FIELD(u32 chdis : 1,
+ __BITFIELD_FIELD(u32 oddfrm : 1,
+ __BITFIELD_FIELD(u32 devaddr : 7,
+ __BITFIELD_FIELD(u32 ec : 2,
+ __BITFIELD_FIELD(u32 eptype : 2,
+ __BITFIELD_FIELD(u32 lspddev : 1,
+ __BITFIELD_FIELD(u32 reserved_16_16 : 1,
+ __BITFIELD_FIELD(u32 epdir : 1,
+ __BITFIELD_FIELD(u32 epnum : 4,
+ __BITFIELD_FIELD(u32 mps : 11,
;)))))))))))
} s;
};
@@ -1052,7 +1052,7 @@ union cvmx_usbcx_hccharx {
* register after initializing the host.
*/
union cvmx_usbcx_hcfg {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hcfg_s
* @fslssupp: FS- and LS-Only Support (FSLSSupp)
@@ -1084,9 +1084,9 @@ union cvmx_usbcx_hcfg {
* * 2'b11: Reserved
*/
struct cvmx_usbcx_hcfg_s {
- __BITFIELD_FIELD(uint32_t reserved_3_31 : 29,
- __BITFIELD_FIELD(uint32_t fslssupp : 1,
- __BITFIELD_FIELD(uint32_t fslspclksel : 2,
+ __BITFIELD_FIELD(u32 reserved_3_31 : 29,
+ __BITFIELD_FIELD(u32 fslssupp : 1,
+ __BITFIELD_FIELD(u32 fslspclksel : 2,
;)))
} s;
};
@@ -1106,7 +1106,7 @@ union cvmx_usbcx_hcfg {
* HAINT and GINTSTS registers.
*/
union cvmx_usbcx_hcintx {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hcintx_s
* @datatglerr: Data Toggle Error (DataTglErr)
@@ -1126,18 +1126,18 @@ union cvmx_usbcx_hcintx {
* Transfer completed normally without any errors.
*/
struct cvmx_usbcx_hcintx_s {
- __BITFIELD_FIELD(uint32_t reserved_11_31 : 21,
- __BITFIELD_FIELD(uint32_t datatglerr : 1,
- __BITFIELD_FIELD(uint32_t frmovrun : 1,
- __BITFIELD_FIELD(uint32_t bblerr : 1,
- __BITFIELD_FIELD(uint32_t xacterr : 1,
- __BITFIELD_FIELD(uint32_t nyet : 1,
- __BITFIELD_FIELD(uint32_t ack : 1,
- __BITFIELD_FIELD(uint32_t nak : 1,
- __BITFIELD_FIELD(uint32_t stall : 1,
- __BITFIELD_FIELD(uint32_t ahberr : 1,
- __BITFIELD_FIELD(uint32_t chhltd : 1,
- __BITFIELD_FIELD(uint32_t xfercompl : 1,
+ __BITFIELD_FIELD(u32 reserved_11_31 : 21,
+ __BITFIELD_FIELD(u32 datatglerr : 1,
+ __BITFIELD_FIELD(u32 frmovrun : 1,
+ __BITFIELD_FIELD(u32 bblerr : 1,
+ __BITFIELD_FIELD(u32 xacterr : 1,
+ __BITFIELD_FIELD(u32 nyet : 1,
+ __BITFIELD_FIELD(u32 ack : 1,
+ __BITFIELD_FIELD(u32 nak : 1,
+ __BITFIELD_FIELD(u32 stall : 1,
+ __BITFIELD_FIELD(u32 ahberr : 1,
+ __BITFIELD_FIELD(u32 chhltd : 1,
+ __BITFIELD_FIELD(u32 xfercompl : 1,
;))))))))))))
} s;
};
@@ -1152,7 +1152,7 @@ union cvmx_usbcx_hcintx {
* Mask interrupt: 1'b0 Unmask interrupt: 1'b1
*/
union cvmx_usbcx_hcintmskx {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hcintmskx_s
* @datatglerrmsk: Data Toggle Error Mask (DataTglErrMsk)
@@ -1168,18 +1168,18 @@ union cvmx_usbcx_hcintmskx {
* @xfercomplmsk: Transfer Completed Mask (XferComplMsk)
*/
struct cvmx_usbcx_hcintmskx_s {
- __BITFIELD_FIELD(uint32_t reserved_11_31 : 21,
- __BITFIELD_FIELD(uint32_t datatglerrmsk : 1,
- __BITFIELD_FIELD(uint32_t frmovrunmsk : 1,
- __BITFIELD_FIELD(uint32_t bblerrmsk : 1,
- __BITFIELD_FIELD(uint32_t xacterrmsk : 1,
- __BITFIELD_FIELD(uint32_t nyetmsk : 1,
- __BITFIELD_FIELD(uint32_t ackmsk : 1,
- __BITFIELD_FIELD(uint32_t nakmsk : 1,
- __BITFIELD_FIELD(uint32_t stallmsk : 1,
- __BITFIELD_FIELD(uint32_t ahberrmsk : 1,
- __BITFIELD_FIELD(uint32_t chhltdmsk : 1,
- __BITFIELD_FIELD(uint32_t xfercomplmsk : 1,
+ __BITFIELD_FIELD(u32 reserved_11_31 : 21,
+ __BITFIELD_FIELD(u32 datatglerrmsk : 1,
+ __BITFIELD_FIELD(u32 frmovrunmsk : 1,
+ __BITFIELD_FIELD(u32 bblerrmsk : 1,
+ __BITFIELD_FIELD(u32 xacterrmsk : 1,
+ __BITFIELD_FIELD(u32 nyetmsk : 1,
+ __BITFIELD_FIELD(u32 ackmsk : 1,
+ __BITFIELD_FIELD(u32 nakmsk : 1,
+ __BITFIELD_FIELD(u32 stallmsk : 1,
+ __BITFIELD_FIELD(u32 ahberrmsk : 1,
+ __BITFIELD_FIELD(u32 chhltdmsk : 1,
+ __BITFIELD_FIELD(u32 xfercomplmsk : 1,
;))))))))))))
} s;
};
@@ -1191,7 +1191,7 @@ union cvmx_usbcx_hcintmskx {
*
*/
union cvmx_usbcx_hcspltx {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hcspltx_s
* @spltena: Split Enable (SpltEna)
@@ -1219,12 +1219,12 @@ union cvmx_usbcx_hcspltx {
* translator.
*/
struct cvmx_usbcx_hcspltx_s {
- __BITFIELD_FIELD(uint32_t spltena : 1,
- __BITFIELD_FIELD(uint32_t reserved_17_30 : 14,
- __BITFIELD_FIELD(uint32_t compsplt : 1,
- __BITFIELD_FIELD(uint32_t xactpos : 2,
- __BITFIELD_FIELD(uint32_t hubaddr : 7,
- __BITFIELD_FIELD(uint32_t prtaddr : 7,
+ __BITFIELD_FIELD(u32 spltena : 1,
+ __BITFIELD_FIELD(u32 reserved_17_30 : 14,
+ __BITFIELD_FIELD(u32 compsplt : 1,
+ __BITFIELD_FIELD(u32 xactpos : 2,
+ __BITFIELD_FIELD(u32 hubaddr : 7,
+ __BITFIELD_FIELD(u32 prtaddr : 7,
;))))))
} s;
};
@@ -1236,7 +1236,7 @@ union cvmx_usbcx_hcspltx {
*
*/
union cvmx_usbcx_hctsizx {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hctsizx_s
* @dopng: Do Ping (DoPng)
@@ -1265,10 +1265,10 @@ union cvmx_usbcx_hctsizx {
* size for IN transactions (periodic and non-periodic).
*/
struct cvmx_usbcx_hctsizx_s {
- __BITFIELD_FIELD(uint32_t dopng : 1,
- __BITFIELD_FIELD(uint32_t pid : 2,
- __BITFIELD_FIELD(uint32_t pktcnt : 10,
- __BITFIELD_FIELD(uint32_t xfersize : 19,
+ __BITFIELD_FIELD(u32 dopng : 1,
+ __BITFIELD_FIELD(u32 pid : 2,
+ __BITFIELD_FIELD(u32 pktcnt : 10,
+ __BITFIELD_FIELD(u32 xfersize : 19,
;))))
} s;
};
@@ -1282,7 +1282,7 @@ union cvmx_usbcx_hctsizx {
* which the O2P USB core has enumerated.
*/
union cvmx_usbcx_hfir {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hfir_s
* @frint: Frame Interval (FrInt)
@@ -1303,8 +1303,8 @@ union cvmx_usbcx_hfir {
* * 1 ms (PHY clock frequency for FS/LS)
*/
struct cvmx_usbcx_hfir_s {
- __BITFIELD_FIELD(uint32_t reserved_16_31 : 16,
- __BITFIELD_FIELD(uint32_t frint : 16,
+ __BITFIELD_FIELD(u32 reserved_16_31 : 16,
+ __BITFIELD_FIELD(u32 frint : 16,
;))
} s;
};
@@ -1319,7 +1319,7 @@ union cvmx_usbcx_hfir {
* in the current (micro)frame.
*/
union cvmx_usbcx_hfnum {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hfnum_s
* @frrem: Frame Time Remaining (FrRem)
@@ -1333,8 +1333,8 @@ union cvmx_usbcx_hfnum {
* USB, and is reset to 0 when it reaches 16'h3FFF.
*/
struct cvmx_usbcx_hfnum_s {
- __BITFIELD_FIELD(uint32_t frrem : 16,
- __BITFIELD_FIELD(uint32_t frnum : 16,
+ __BITFIELD_FIELD(u32 frrem : 16,
+ __BITFIELD_FIELD(u32 frnum : 16,
;))
} s;
};
@@ -1355,7 +1355,7 @@ union cvmx_usbcx_hfnum {
* the application must write a 1 to the bit to clear the interrupt.
*/
union cvmx_usbcx_hprt {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hprt_s
* @prtspd: Port Speed (PrtSpd)
@@ -1461,21 +1461,21 @@ union cvmx_usbcx_hprt {
* * 1: A device is attached to the port.
*/
struct cvmx_usbcx_hprt_s {
- __BITFIELD_FIELD(uint32_t reserved_19_31 : 13,
- __BITFIELD_FIELD(uint32_t prtspd : 2,
- __BITFIELD_FIELD(uint32_t prttstctl : 4,
- __BITFIELD_FIELD(uint32_t prtpwr : 1,
- __BITFIELD_FIELD(uint32_t prtlnsts : 2,
- __BITFIELD_FIELD(uint32_t reserved_9_9 : 1,
- __BITFIELD_FIELD(uint32_t prtrst : 1,
- __BITFIELD_FIELD(uint32_t prtsusp : 1,
- __BITFIELD_FIELD(uint32_t prtres : 1,
- __BITFIELD_FIELD(uint32_t prtovrcurrchng : 1,
- __BITFIELD_FIELD(uint32_t prtovrcurract : 1,
- __BITFIELD_FIELD(uint32_t prtenchng : 1,
- __BITFIELD_FIELD(uint32_t prtena : 1,
- __BITFIELD_FIELD(uint32_t prtconndet : 1,
- __BITFIELD_FIELD(uint32_t prtconnsts : 1,
+ __BITFIELD_FIELD(u32 reserved_19_31 : 13,
+ __BITFIELD_FIELD(u32 prtspd : 2,
+ __BITFIELD_FIELD(u32 prttstctl : 4,
+ __BITFIELD_FIELD(u32 prtpwr : 1,
+ __BITFIELD_FIELD(u32 prtlnsts : 2,
+ __BITFIELD_FIELD(u32 reserved_9_9 : 1,
+ __BITFIELD_FIELD(u32 prtrst : 1,
+ __BITFIELD_FIELD(u32 prtsusp : 1,
+ __BITFIELD_FIELD(u32 prtres : 1,
+ __BITFIELD_FIELD(u32 prtovrcurrchng : 1,
+ __BITFIELD_FIELD(u32 prtovrcurract : 1,
+ __BITFIELD_FIELD(u32 prtenchng : 1,
+ __BITFIELD_FIELD(u32 prtena : 1,
+ __BITFIELD_FIELD(u32 prtconndet : 1,
+ __BITFIELD_FIELD(u32 prtconnsts : 1,
;)))))))))))))))
} s;
};
@@ -1489,7 +1489,7 @@ union cvmx_usbcx_hprt {
* TxFIFO, as shown in Figures 310 and 311.
*/
union cvmx_usbcx_hptxfsiz {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hptxfsiz_s
* @ptxfsize: Host Periodic TxFIFO Depth (PTxFSize)
@@ -1499,8 +1499,8 @@ union cvmx_usbcx_hptxfsiz {
* @ptxfstaddr: Host Periodic TxFIFO Start Address (PTxFStAddr)
*/
struct cvmx_usbcx_hptxfsiz_s {
- __BITFIELD_FIELD(uint32_t ptxfsize : 16,
- __BITFIELD_FIELD(uint32_t ptxfstaddr : 16,
+ __BITFIELD_FIELD(u32 ptxfsize : 16,
+ __BITFIELD_FIELD(u32 ptxfstaddr : 16,
;))
} s;
};
@@ -1514,7 +1514,7 @@ union cvmx_usbcx_hptxfsiz {
* TxFIFO and the Periodic Transmit Request Queue
*/
union cvmx_usbcx_hptxsts {
- uint32_t u32;
+ u32 u32;
/**
* struct cvmx_usbcx_hptxsts_s
* @ptxqtop: Top of the Periodic Transmit Request Queue (PTxQTop)
@@ -1555,9 +1555,9 @@ union cvmx_usbcx_hptxsts {
* * Others: Reserved
*/
struct cvmx_usbcx_hptxsts_s {
- __BITFIELD_FIELD(uint32_t ptxqtop : 8,
- __BITFIELD_FIELD(uint32_t ptxqspcavail : 8,
- __BITFIELD_FIELD(uint32_t ptxfspcavail : 16,
+ __BITFIELD_FIELD(u32 ptxqtop : 8,
+ __BITFIELD_FIELD(u32 ptxqspcavail : 8,
+ __BITFIELD_FIELD(u32 ptxfspcavail : 16,
;)))
} s;
};
@@ -1571,7 +1571,7 @@ union cvmx_usbcx_hptxsts {
* hreset and phy_rst signals.
*/
union cvmx_usbnx_clk_ctl {
- uint64_t u64;
+ u64 u64;
/**
* struct cvmx_usbnx_clk_ctl_s
* @divide2: The 'hclk' used by the USB subsystem is derived
@@ -1661,21 +1661,21 @@ union cvmx_usbnx_clk_ctl {
* until AFTER this field is set and then read.
*/
struct cvmx_usbnx_clk_ctl_s {
- __BITFIELD_FIELD(uint64_t reserved_20_63 : 44,
- __BITFIELD_FIELD(uint64_t divide2 : 2,
- __BITFIELD_FIELD(uint64_t hclk_rst : 1,
- __BITFIELD_FIELD(uint64_t p_x_on : 1,
- __BITFIELD_FIELD(uint64_t p_rtype : 2,
- __BITFIELD_FIELD(uint64_t p_com_on : 1,
- __BITFIELD_FIELD(uint64_t p_c_sel : 2,
- __BITFIELD_FIELD(uint64_t cdiv_byp : 1,
- __BITFIELD_FIELD(uint64_t sd_mode : 2,
- __BITFIELD_FIELD(uint64_t s_bist : 1,
- __BITFIELD_FIELD(uint64_t por : 1,
- __BITFIELD_FIELD(uint64_t enable : 1,
- __BITFIELD_FIELD(uint64_t prst : 1,
- __BITFIELD_FIELD(uint64_t hrst : 1,
- __BITFIELD_FIELD(uint64_t divide : 3,
+ __BITFIELD_FIELD(u64 reserved_20_63 : 44,
+ __BITFIELD_FIELD(u64 divide2 : 2,
+ __BITFIELD_FIELD(u64 hclk_rst : 1,
+ __BITFIELD_FIELD(u64 p_x_on : 1,
+ __BITFIELD_FIELD(u64 p_rtype : 2,
+ __BITFIELD_FIELD(u64 p_com_on : 1,
+ __BITFIELD_FIELD(u64 p_c_sel : 2,
+ __BITFIELD_FIELD(u64 cdiv_byp : 1,
+ __BITFIELD_FIELD(u64 sd_mode : 2,
+ __BITFIELD_FIELD(u64 s_bist : 1,
+ __BITFIELD_FIELD(u64 por : 1,
+ __BITFIELD_FIELD(u64 enable : 1,
+ __BITFIELD_FIELD(u64 prst : 1,
+ __BITFIELD_FIELD(u64 hrst : 1,
+ __BITFIELD_FIELD(u64 divide : 3,
;)))))))))))))))
} s;
};
@@ -1688,7 +1688,7 @@ union cvmx_usbnx_clk_ctl {
* Contains general control and status information for the USBN block.
*/
union cvmx_usbnx_usbp_ctl_status {
- uint64_t u64;
+ u64 u64;
/**
* struct cvmx_usbnx_usbp_ctl_status_s
* @txrisetune: HS Transmitter Rise/Fall Time Adjustment
@@ -1804,41 +1804,41 @@ union cvmx_usbnx_usbp_ctl_status {
* de-assertion.
*/
struct cvmx_usbnx_usbp_ctl_status_s {
- __BITFIELD_FIELD(uint64_t txrisetune : 1,
- __BITFIELD_FIELD(uint64_t txvreftune : 4,
- __BITFIELD_FIELD(uint64_t txfslstune : 4,
- __BITFIELD_FIELD(uint64_t txhsxvtune : 2,
- __BITFIELD_FIELD(uint64_t sqrxtune : 3,
- __BITFIELD_FIELD(uint64_t compdistune : 3,
- __BITFIELD_FIELD(uint64_t otgtune : 3,
- __BITFIELD_FIELD(uint64_t otgdisable : 1,
- __BITFIELD_FIELD(uint64_t portreset : 1,
- __BITFIELD_FIELD(uint64_t drvvbus : 1,
- __BITFIELD_FIELD(uint64_t lsbist : 1,
- __BITFIELD_FIELD(uint64_t fsbist : 1,
- __BITFIELD_FIELD(uint64_t hsbist : 1,
- __BITFIELD_FIELD(uint64_t bist_done : 1,
- __BITFIELD_FIELD(uint64_t bist_err : 1,
- __BITFIELD_FIELD(uint64_t tdata_out : 4,
- __BITFIELD_FIELD(uint64_t siddq : 1,
- __BITFIELD_FIELD(uint64_t txpreemphasistune : 1,
- __BITFIELD_FIELD(uint64_t dma_bmode : 1,
- __BITFIELD_FIELD(uint64_t usbc_end : 1,
- __BITFIELD_FIELD(uint64_t usbp_bist : 1,
- __BITFIELD_FIELD(uint64_t tclk : 1,
- __BITFIELD_FIELD(uint64_t dp_pulld : 1,
- __BITFIELD_FIELD(uint64_t dm_pulld : 1,
- __BITFIELD_FIELD(uint64_t hst_mode : 1,
- __BITFIELD_FIELD(uint64_t tuning : 4,
- __BITFIELD_FIELD(uint64_t tx_bs_enh : 1,
- __BITFIELD_FIELD(uint64_t tx_bs_en : 1,
- __BITFIELD_FIELD(uint64_t loop_enb : 1,
- __BITFIELD_FIELD(uint64_t vtest_enb : 1,
- __BITFIELD_FIELD(uint64_t bist_enb : 1,
- __BITFIELD_FIELD(uint64_t tdata_sel : 1,
- __BITFIELD_FIELD(uint64_t taddr_in : 4,
- __BITFIELD_FIELD(uint64_t tdata_in : 8,
- __BITFIELD_FIELD(uint64_t ate_reset : 1,
+ __BITFIELD_FIELD(u64 txrisetune : 1,
+ __BITFIELD_FIELD(u64 txvreftune : 4,
+ __BITFIELD_FIELD(u64 txfslstune : 4,
+ __BITFIELD_FIELD(u64 txhsxvtune : 2,
+ __BITFIELD_FIELD(u64 sqrxtune : 3,
+ __BITFIELD_FIELD(u64 compdistune : 3,
+ __BITFIELD_FIELD(u64 otgtune : 3,
+ __BITFIELD_FIELD(u64 otgdisable : 1,
+ __BITFIELD_FIELD(u64 portreset : 1,
+ __BITFIELD_FIELD(u64 drvvbus : 1,
+ __BITFIELD_FIELD(u64 lsbist : 1,
+ __BITFIELD_FIELD(u64 fsbist : 1,
+ __BITFIELD_FIELD(u64 hsbist : 1,
+ __BITFIELD_FIELD(u64 bist_done : 1,
+ __BITFIELD_FIELD(u64 bist_err : 1,
+ __BITFIELD_FIELD(u64 tdata_out : 4,
+ __BITFIELD_FIELD(u64 siddq : 1,
+ __BITFIELD_FIELD(u64 txpreemphasistune : 1,
+ __BITFIELD_FIELD(u64 dma_bmode : 1,
+ __BITFIELD_FIELD(u64 usbc_end : 1,
+ __BITFIELD_FIELD(u64 usbp_bist : 1,
+ __BITFIELD_FIELD(u64 tclk : 1,
+ __BITFIELD_FIELD(u64 dp_pulld : 1,
+ __BITFIELD_FIELD(u64 dm_pulld : 1,
+ __BITFIELD_FIELD(u64 hst_mode : 1,
+ __BITFIELD_FIELD(u64 tuning : 4,
+ __BITFIELD_FIELD(u64 tx_bs_enh : 1,
+ __BITFIELD_FIELD(u64 tx_bs_en : 1,
+ __BITFIELD_FIELD(u64 loop_enb : 1,
+ __BITFIELD_FIELD(u64 vtest_enb : 1,
+ __BITFIELD_FIELD(u64 bist_enb : 1,
+ __BITFIELD_FIELD(u64 tdata_sel : 1,
+ __BITFIELD_FIELD(u64 taddr_in : 4,
+ __BITFIELD_FIELD(u64 tdata_in : 8,
+ __BITFIELD_FIELD(u64 ate_reset : 1,
;)))))))))))))))))))))))))))))))))))
} s;
};
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index fd9b3d899c1f..e13a4ab46977 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -118,13 +118,20 @@ void cvm_oct_adjust_link(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
cvmx_helper_link_info_t link_info;
+ link_info.u64 = 0;
+ link_info.s.link_up = priv->phydev->link ? 1 : 0;
+ link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
+ link_info.s.speed = priv->phydev->speed;
+ priv->link_info = link_info.u64;
+
+ /*
+ * The polling task need to know about link status changes.
+ */
+ if (priv->poll)
+ priv->poll(dev);
+
if (priv->last_link != priv->phydev->link) {
priv->last_link = priv->phydev->link;
- link_info.u64 = 0;
- link_info.s.link_up = priv->last_link ? 1 : 0;
- link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
- link_info.s.speed = priv->phydev->speed;
-
cvmx_helper_link_set(priv->port, link_info);
cvm_oct_note_carrier(priv, link_info);
}
@@ -174,13 +181,22 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
goto no_phy;
phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ if (!phy_node && of_phy_is_fixed_link(priv->of_node)) {
+ int rc;
+
+ rc = of_phy_register_fixed_link(priv->of_node);
+ if (rc)
+ return rc;
+
+ phy_node = of_node_get(priv->of_node);
+ }
if (!phy_node)
goto no_phy;
priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
PHY_INTERFACE_MODE_GMII);
- if (priv->phydev == NULL)
+ if (!priv->phydev)
return -ENODEV;
priv->last_link = 0;
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 5a5cdb3cd740..d6172e4dace5 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -34,7 +34,7 @@ static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements)
while (freed) {
struct sk_buff *skb = dev_alloc_skb(size + 256);
- if (unlikely(skb == NULL))
+ if (unlikely(!skb))
break;
skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f));
*(struct sk_buff **)(skb->data - sizeof(void *)) = skb;
@@ -98,7 +98,7 @@ static int cvm_oct_fill_hw_memory(int pool, int size, int elements)
* just before the block.
*/
memory = kmalloc(size + 256, GFP_ATOMIC);
- if (unlikely(memory == NULL)) {
+ if (unlikely(!memory)) {
pr_warn("Unable to allocate %u bytes for FPA pool %d\n",
elements * size, pool);
break;
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index 1055ee14b66a..91b148cfcbdb 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -30,8 +30,6 @@
static DEFINE_SPINLOCK(global_register_lock);
-static int number_rgmii_ports;
-
static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
{
union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
@@ -63,251 +61,106 @@ static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
gmxx_rxx_int_reg.u64);
}
-static void cvm_oct_rgmii_poll(struct net_device *dev)
+static void cvm_oct_check_preamble_errors(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- unsigned long flags = 0;
cvmx_helper_link_info_t link_info;
- int use_global_register_lock = (priv->phydev == NULL);
+ unsigned long flags;
+
+ link_info.u64 = priv->link_info;
- BUG_ON(in_interrupt());
- if (use_global_register_lock) {
+ /*
+ * Take the global register lock since we are going to
+ * touch registers that affect more than one port.
+ */
+ spin_lock_irqsave(&global_register_lock, flags);
+
+ if (link_info.s.speed == 10 && priv->last_speed == 10) {
/*
- * Take the global register lock since we are going to
- * touch registers that affect more than one port.
+ * Read the GMXX_RXX_INT_REG[PCTERR] bit and see if we are
+ * getting preamble errors.
*/
- spin_lock_irqsave(&global_register_lock, flags);
- } else {
- mutex_lock(&priv->phydev->mdio.bus->mdio_lock);
- }
+ int interface = INTERFACE(priv->port);
+ int index = INDEX(priv->port);
+ union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
- link_info = cvmx_helper_link_get(priv->port);
- if (link_info.u64 == priv->link_info) {
- if (link_info.s.speed == 10) {
+ gmxx_rxx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
+ (index, interface));
+ if (gmxx_rxx_int_reg.s.pcterr) {
/*
- * Read the GMXX_RXX_INT_REG[PCTERR] bit and
- * see if we are getting preamble errors.
+ * We are getting preamble errors at 10Mbps. Most
+ * likely the PHY is giving us packets with misaligned
+ * preambles. In order to get these packets we need to
+ * disable preamble checking and do it in software.
*/
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
- union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
-
- gmxx_rxx_int_reg.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- if (gmxx_rxx_int_reg.s.pcterr) {
- /*
- * We are getting preamble errors at
- * 10Mbps. Most likely the PHY is
- * giving us packets with mis aligned
- * preambles. In order to get these
- * packets we need to disable preamble
- * checking and do it in software.
- */
- cvm_oct_set_hw_preamble(priv, false);
- printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
- dev->name);
- }
+ cvm_oct_set_hw_preamble(priv, false);
+ printk_ratelimited("%s: Using 10Mbps with software preamble removal\n",
+ dev->name);
}
-
- if (use_global_register_lock)
- spin_unlock_irqrestore(&global_register_lock, flags);
- else
- mutex_unlock(&priv->phydev->mdio.bus->mdio_lock);
- return;
- }
-
- /* Since the 10Mbps preamble workaround is allowed we need to enable
- * preamble checking, FCS stripping, and clear error bits on
- * every speed change. If errors occur during 10Mbps operation
- * the above code will change this stuff
- */
- cvm_oct_set_hw_preamble(priv, true);
-
- if (priv->phydev == NULL) {
- link_info = cvmx_helper_link_autoconf(priv->port);
- priv->link_info = link_info.u64;
- }
-
- if (use_global_register_lock)
- spin_unlock_irqrestore(&global_register_lock, flags);
- else
- mutex_unlock(&priv->phydev->mdio.bus->mdio_lock);
-
- if (priv->phydev == NULL) {
- /* Tell core. */
- if (link_info.s.link_up) {
- if (!netif_carrier_ok(dev))
- netif_carrier_on(dev);
- } else if (netif_carrier_ok(dev)) {
- netif_carrier_off(dev);
- }
- cvm_oct_note_carrier(priv, link_info);
+ } else {
+ /*
+ * Since the 10Mbps preamble workaround is allowed we need to
+ * enable preamble checking, FCS stripping, and clear error
+ * bits on every speed change. If errors occur during 10Mbps
+ * operation the above code will change this stuff
+ */
+ if (priv->last_speed != link_info.s.speed)
+ cvm_oct_set_hw_preamble(priv, true);
+ priv->last_speed = link_info.s.speed;
}
+ spin_unlock_irqrestore(&global_register_lock, flags);
}
-static int cmv_oct_rgmii_gmx_interrupt(int interface)
+static void cvm_oct_rgmii_poll(struct net_device *dev)
{
- int index;
- int count = 0;
-
- /* Loop through every port of this interface */
- for (index = 0;
- index < cvmx_helper_ports_on_interface(interface);
- index++) {
- union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
+ struct octeon_ethernet *priv = netdev_priv(dev);
+ cvmx_helper_link_info_t link_info;
+ bool status_change;
- /* Read the GMX interrupt status bits */
- gmx_rx_int_reg.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
- (index, interface));
- gmx_rx_int_reg.u64 &= cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
+ link_info = cvmx_helper_link_autoconf(priv->port);
+ status_change = priv->link_info != link_info.u64;
+ priv->link_info = link_info.u64;
- /* Poll the port if inband status changed */
- if (gmx_rx_int_reg.s.phy_dupx || gmx_rx_int_reg.s.phy_link ||
- gmx_rx_int_reg.s.phy_spd) {
- struct net_device *dev =
- cvm_oct_device[cvmx_helper_get_ipd_port
- (interface, index)];
- struct octeon_ethernet *priv = netdev_priv(dev);
+ cvm_oct_check_preamble_errors(dev);
- if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
- queue_work(cvm_oct_poll_queue,
- &priv->port_work);
+ if (likely(!status_change))
+ return;
- gmx_rx_int_reg.u64 = 0;
- gmx_rx_int_reg.s.phy_dupx = 1;
- gmx_rx_int_reg.s.phy_link = 1;
- gmx_rx_int_reg.s.phy_spd = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
- gmx_rx_int_reg.u64);
- count++;
- }
+ /* Tell core. */
+ if (link_info.s.link_up) {
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else if (netif_carrier_ok(dev)) {
+ netif_carrier_off(dev);
}
- return count;
-}
-
-static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
-{
- union cvmx_npi_rsl_int_blocks rsl_int_blocks;
- int count = 0;
-
- rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
-
- /* Check and see if this interrupt was caused by the GMX0 block */
- if (rsl_int_blocks.s.gmx0)
- count += cmv_oct_rgmii_gmx_interrupt(0);
-
- /* Check and see if this interrupt was caused by the GMX1 block */
- if (rsl_int_blocks.s.gmx1)
- count += cmv_oct_rgmii_gmx_interrupt(1);
-
- return count ? IRQ_HANDLED : IRQ_NONE;
+ cvm_oct_note_carrier(priv, link_info);
}
int cvm_oct_rgmii_open(struct net_device *dev)
{
- return cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
-}
-
-static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
-{
- struct octeon_ethernet *priv =
- container_of(work, struct octeon_ethernet, port_work);
- cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
-}
-
-int cvm_oct_rgmii_init(struct net_device *dev)
-{
struct octeon_ethernet *priv = netdev_priv(dev);
- int r;
-
- cvm_oct_common_init(dev);
- INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
- /*
- * Due to GMX errata in CN3XXX series chips, it is necessary
- * to take the link down immediately when the PHY changes
- * state. In order to do this we call the poll function every
- * time the RGMII inband status changes. This may cause
- * problems if the PHY doesn't implement inband status
- * properly.
- */
- if (number_rgmii_ports == 0) {
- r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
- IRQF_SHARED, "RGMII", &number_rgmii_ports);
- if (r != 0)
- return r;
- }
- number_rgmii_ports++;
-
- /*
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- && (priv->port == 0))
- || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+ int ret;
- if (!octeon_is_simulation()) {
+ ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll);
+ if (ret)
+ return ret;
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /*
- * Enable interrupts on inband status changes
- * for this port.
- */
- gmx_rx_int_en.u64 = 0;
- gmx_rx_int_en.s.phy_dupx = 1;
- gmx_rx_int_en.s.phy_link = 1;
- gmx_rx_int_en.s.phy_spd = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
- gmx_rx_int_en.u64);
+ if (priv->phydev) {
+ /*
+ * In phydev mode, we need still periodic polling for the
+ * preamble error checking, and we also need to call this
+ * function on every link state change.
+ *
+ * Only true RGMII ports need to be polled. In GMII mode, port
+ * 0 is really a RGMII port.
+ */
+ if ((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII &&
+ priv->port == 0) ||
+ (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
+ priv->poll = cvm_oct_check_preamble_errors;
+ cvm_oct_check_preamble_errors(dev);
}
}
return 0;
}
-
-void cvm_oct_rgmii_uninit(struct net_device *dev)
-{
- struct octeon_ethernet *priv = netdev_priv(dev);
-
- cvm_oct_common_uninit(dev);
-
- /*
- * Only true RGMII ports need to be polled. In GMII mode, port
- * 0 is really a RGMII port.
- */
- if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
- && (priv->port == 0))
- || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
-
- if (!octeon_is_simulation()) {
-
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- int interface = INTERFACE(priv->port);
- int index = INDEX(priv->port);
-
- /*
- * Disable interrupts on inband status changes
- * for this port.
- */
- gmx_rx_int_en.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
- (index, interface));
- gmx_rx_int_en.s.phy_dupx = 0;
- gmx_rx_int_en.s.phy_link = 0;
- gmx_rx_int_en.s.phy_spd = 0;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
- gmx_rx_int_en.u64);
- }
- }
-
- /* Remove the interrupt handler when the last port is removed. */
- number_rgmii_ports--;
- if (number_rgmii_ports == 0)
- free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
- cancel_work_sync(&priv->port_work);
-}
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 6aed3cf6c0b4..b6993b0b8170 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -26,8 +26,6 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <linux/atomic.h>
-
#include <asm/octeon/octeon.h>
#include "ethernet-defines.h"
@@ -103,7 +101,6 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
gmxx_rxx_frm_ctl.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
-
u8 *ptr =
cvmx_phys_to_ptr(work->packet_ptr.s.addr);
int i = 0;
@@ -116,17 +113,11 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
}
if (*ptr == 0xd5) {
- /*
- printk_ratelimited("Port %d received 0xd5 preamble\n",
- port);
- */
+ /* Port received 0xd5 preamble */
work->packet_ptr.s.addr += i + 1;
work->word1.len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
- /*
- printk_ratelimited("Port %d received 0x?d preamble\n",
- port);
- */
+ /* Port received 0xd preamble */
work->packet_ptr.s.addr += i;
work->word1.len -= i + 4;
for (i = 0; i < work->word1.len; i++) {
@@ -138,9 +129,6 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
} else {
printk_ratelimited("Port %d unknown preamble, packet dropped\n",
port);
- /*
- cvmx_helper_dump_packet(work);
- */
cvm_oct_free_work(work);
return 1;
}
@@ -211,7 +199,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
prefetch(work);
did_work_request = 0;
- if (work == NULL) {
+ if (!work) {
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
1ull << pow_receive_group);
@@ -227,7 +215,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
break;
}
- pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ pskb = (struct sk_buff **)
+ (cvm_oct_get_buffer_ptr(work->packet_ptr) -
sizeof(void *));
prefetch(pskb);
@@ -309,7 +298,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
while (segments--) {
union cvmx_buf_ptr next_ptr =
- *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
+ *(union cvmx_buf_ptr *)
+ cvmx_phys_to_ptr(
+ segment_ptr.s.addr - 8);
/*
* Octeon Errata PKI-100: The segment size is
@@ -333,7 +324,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
segment_size = len;
/* Copy the data into the packet */
memcpy(skb_put(skb, segment_size),
- cvmx_phys_to_ptr(segment_ptr.s.addr),
+ cvmx_phys_to_ptr(
+ segment_ptr.s.addr),
segment_size);
len -= segment_size;
segment_ptr = next_ptr;
@@ -364,32 +356,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* Increment RX stats for virtual ports */
if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
-#ifdef CONFIG_64BIT
- atomic64_add(1,
- (atomic64_t *)&priv->stats.rx_packets);
- atomic64_add(skb->len,
- (atomic64_t *)&priv->stats.rx_bytes);
-#else
- atomic_add(1,
- (atomic_t *)&priv->stats.rx_packets);
- atomic_add(skb->len,
- (atomic_t *)&priv->stats.rx_bytes);
-#endif
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += skb->len;
}
netif_receive_skb(skb);
} else {
- /* Drop any packet received for a device that isn't up */
/*
- printk_ratelimited("%s: Device not up, packet dropped\n",
- dev->name);
- */
-#ifdef CONFIG_64BIT
- atomic64_add(1,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(1,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
+ * Drop any packet received for a device that
+ * isn't up.
+ */
+ priv->stats.rx_dropped++;
dev_kfree_skb_irq(skb);
}
} else {
@@ -433,7 +409,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
}
cvm_oct_rx_refill_pool(0);
- if (rx_count < budget && napi != NULL) {
+ if (rx_count < budget && napi) {
/* No more work */
napi_complete(napi);
enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
@@ -466,7 +442,7 @@ void cvm_oct_rx_initialize(void)
}
}
- if (NULL == dev_for_napi)
+ if (!dev_for_napi)
panic("No net_devices were allocated.");
netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 2ae1944b3a1b..063dcd07557b 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -167,9 +167,7 @@ static void cvm_oct_spi_poll(struct net_device *dev)
int interface;
for (interface = 0; interface < 2; interface++) {
-
if ((priv->port == interface * 16) && need_retrain[interface]) {
-
if (cvmx_spi_restart_interface
(interface, CVMX_SPI_MODE_DUPLEX, 10) == 0) {
need_retrain[interface] = 0;
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index c053c4a47a7e..ffe9bd77a7bb 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -95,12 +95,10 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
for (qos = 0; qos < queues_per_port; qos++) {
if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
continue;
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4,
+ skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
MAX_SKB_TO_FREE);
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau+qos*4);
-
-
+ priv->fau + qos * 4);
total_freed += skb_to_free;
if (skb_to_free > 0) {
struct sk_buff *to_free_list = NULL;
@@ -126,7 +124,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
}
total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
}
- if (total_freed >= 0 && netif_queue_stopped(dev))
+ if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
netif_wake_queue(dev);
if (total_remaining)
cvm_oct_kick_tx_poll_watchdog();
@@ -176,8 +174,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
qos = 0;
else if (qos >= cvmx_pko_get_num_queues(priv->port))
qos = 0;
- } else
+ } else {
qos = 0;
+ }
if (USE_ASYNC_IOBDMA) {
/* Save scratch in case userspace is using it */
@@ -309,55 +308,38 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
#if REUSE_SKBUFFS_WITHOUT_FREE
fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
if (unlikely(skb->data < fpa_head)) {
- /*
- * printk("TX buffer beginning can't meet FPA
- * alignment constraints\n");
- */
+ /* TX buffer beginning can't meet FPA alignment constraints */
goto dont_put_skbuff_in_hw;
}
if (unlikely
((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
- /*
- printk("TX buffer isn't large enough for the FPA\n");
- */
+ /* TX buffer isn't large enough for the FPA */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_shared(skb))) {
- /*
- printk("TX buffer sharing data with someone else\n");
- */
+ /* TX buffer sharing data with someone else */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_cloned(skb))) {
- /*
- printk("TX buffer has been cloned\n");
- */
+ /* TX buffer has been cloned */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_header_cloned(skb))) {
- /*
- printk("TX buffer header has been cloned\n");
- */
+ /* TX buffer header has been cloned */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb->destructor)) {
- /*
- printk("TX buffer has a destructor\n");
- */
+ /* TX buffer has a destructor */
goto dont_put_skbuff_in_hw;
}
if (unlikely(skb_shinfo(skb)->nr_frags)) {
- /*
- printk("TX buffer has fragments\n");
- */
+ /* TX buffer has fragments */
goto dont_put_skbuff_in_hw;
}
if (unlikely
(skb->truesize !=
sizeof(*skb) + skb_end_offset(skb))) {
- /*
- printk("TX buffer truesize has been changed\n");
- */
+ /* TX buffer truesize has been changed */
goto dont_put_skbuff_in_hw;
}
@@ -403,7 +385,7 @@ dont_put_skbuff_in_hw:
((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
(ip_hdr(skb)->protocol == IPPROTO_UDP))) {
/* Use hardware checksum calc */
- pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
+ pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
}
if (USE_ASYNC_IOBDMA) {
@@ -419,7 +401,8 @@ dont_put_skbuff_in_hw:
cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
}
- skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
+ skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
+ priv->fau + qos * 4);
/*
* If we're sending faster than the receive can free them then
@@ -430,7 +413,7 @@ dont_put_skbuff_in_hw:
if (pko_command.s.dontfree) {
queue_type = QUEUE_CORE;
- pko_command.s.reg0 = priv->fau+qos*4;
+ pko_command.s.reg0 = priv->fau + qos * 4;
} else {
queue_type = QUEUE_HW;
}
@@ -443,7 +426,6 @@ dont_put_skbuff_in_hw:
/* Drop this packet if we have too many already queued to the HW */
if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
MAX_OUT_QUEUE_DEPTH)) {
-
if (dev->tx_queue_len != 0) {
/* Drop the lock when notifying the core. */
spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
@@ -559,7 +541,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a packet buffer */
packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
- if (unlikely(packet_buffer == NULL)) {
+ if (unlikely(!packet_buffer)) {
printk_ratelimited("%s: Failed to allocate a packet buffer\n",
dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
@@ -617,8 +599,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
work->word2.s.dec_ipcomp = 0; /* FIXME */
#endif
work->word2.s.tcp_or_udp =
- (ip_hdr(skb)->protocol == IPPROTO_TCP)
- || (ip_hdr(skb)->protocol == IPPROTO_UDP);
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+ (ip_hdr(skb)->protocol == IPPROTO_UDP);
#if 0
/* FIXME */
work->word2.s.dec_ipsec = 0;
@@ -629,8 +611,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* No error, packet is internal */
work->word2.s.L4_error = 0;
#endif
- work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
- || (ip_hdr(skb)->frag_off ==
+ work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
+ (ip_hdr(skb)->frag_off ==
1 << 14));
#if 0
/* Assume Linux is sending a good packet */
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f69fb5cc7cb8..271e1b8d8506 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -86,10 +86,6 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-/*
- * cvm_oct_poll_queue - Workqueue for polling operations.
- */
-struct workqueue_struct *cvm_oct_poll_queue;
/*
* cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@@ -121,8 +117,7 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work)
cvm_oct_rx_refill_pool(num_packet_buffers / 2);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue,
- &cvm_oct_rx_refill_work, HZ);
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
}
static void cvm_oct_periodic_worker(struct work_struct *work)
@@ -138,8 +133,7 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
cvm_oct_device[priv->port]);
if (!atomic_read(&cvm_oct_poll_queue_stopping))
- queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ schedule_delayed_work(&priv->port_periodic_work, HZ);
}
static void cvm_oct_configure_common_hw(void)
@@ -226,18 +220,7 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
priv->stats.multicast += rx_status.multicast_packets;
priv->stats.rx_crc_errors += rx_status.inb_errors;
priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
-
- /*
- * The drop counter must be incremented atomically
- * since the RX tasklet also increments it.
- */
-#ifdef CONFIG_64BIT
- atomic64_add(rx_status.dropped_packets,
- (atomic64_t *)&priv->stats.rx_dropped);
-#else
- atomic_add(rx_status.dropped_packets,
- (atomic_t *)&priv->stats.rx_dropped);
-#endif
+ priv->stats.rx_dropped += rx_status.dropped_packets;
}
return &priv->stats;
@@ -265,22 +248,22 @@ static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
* Limit the MTU to make sure the ethernet packets are between
* 64 bytes and 65535 bytes.
*/
- if ((new_mtu + 14 + 4 + vlan_bytes < 64)
- || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
+ if ((new_mtu + 14 + 4 + vlan_bytes < 64) ||
+ (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
pr_err("MTU must be between %d and %d.\n",
64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
return -EINVAL;
}
dev->mtu = new_mtu;
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
/* Add ethernet header and FCS, and VLAN if configured. */
int max_packet = new_mtu + 14 + 4 + vlan_bytes;
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Signal errors on packets larger than the MTU */
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
max_packet);
@@ -319,8 +302,8 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
union cvmx_gmxx_rxx_adr_ctl control;
@@ -371,8 +354,8 @@ static int cvm_oct_set_mac_filter(struct net_device *dev)
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- if ((interface < 2)
- && (cvmx_helper_interface_get_mode(interface) !=
+ if ((interface < 2) &&
+ (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
int i;
u8 *ptr = dev->dev_addr;
@@ -445,8 +428,8 @@ int cvm_oct_common_init(struct net_device *dev)
* Force the interface to use the POW send if always_use_pow
* was specified or it is in the pow send list.
*/
- if ((pow_send_group != -1)
- && (always_use_pow || strstr(pow_send_list, dev->name)))
+ if ((pow_send_group != -1) &&
+ (always_use_pow || strstr(pow_send_list, dev->name)))
priv->queue = -1;
if (priv->queue != -1)
@@ -557,6 +540,7 @@ static const struct net_device_ops cvm_oct_npi_netdev_ops = {
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_uninit = cvm_oct_common_uninit,
@@ -572,6 +556,7 @@ static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_init = cvm_oct_sgmii_init,
.ndo_uninit = cvm_oct_common_uninit,
@@ -587,6 +572,7 @@ static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_init = cvm_oct_spi_init,
.ndo_uninit = cvm_oct_spi_uninit,
@@ -600,9 +586,10 @@ static const struct net_device_ops cvm_oct_spi_netdev_ops = {
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
- .ndo_init = cvm_oct_rgmii_init,
- .ndo_uninit = cvm_oct_rgmii_uninit,
+ .ndo_init = cvm_oct_common_init,
+ .ndo_uninit = cvm_oct_common_uninit,
.ndo_open = cvm_oct_rgmii_open,
.ndo_stop = cvm_oct_common_stop,
.ndo_start_xmit = cvm_oct_xmit,
@@ -615,6 +602,7 @@ static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
.ndo_poll_controller = cvm_oct_poll_controller,
#endif
};
+
static const struct net_device_ops cvm_oct_pow_netdev_ops = {
.ndo_init = cvm_oct_common_init,
.ndo_start_xmit = cvm_oct_xmit_pow,
@@ -677,11 +665,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
return -EINVAL;
}
- cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
- if (!cvm_oct_poll_queue) {
- pr_err("octeon-ethernet: Cannot create workqueue");
- return -ENOMEM;
- }
cvm_oct_configure_common_hw();
@@ -790,7 +773,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
switch (priv->imode) {
-
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
@@ -840,8 +822,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
fau -=
cvmx_pko_get_num_queues(priv->port) *
sizeof(u32);
- queue_delayed_work(cvm_oct_poll_queue,
- &priv->port_periodic_work, HZ);
+ schedule_delayed_work(&priv->port_periodic_work, HZ);
}
}
}
@@ -854,7 +835,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
*/
cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
- queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
+ schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
return 0;
}
@@ -897,7 +878,6 @@ static int cvm_oct_remove(struct platform_device *pdev)
}
}
- destroy_workqueue(cvm_oct_poll_queue);
cvmx_pko_shutdown();
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index fdf24d120e77..6275c15e0035 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -41,20 +41,18 @@ struct octeon_ethernet {
/* Device statistics */
struct net_device_stats stats;
struct phy_device *phydev;
+ unsigned int last_speed;
unsigned int last_link;
/* Last negotiated link state */
u64 link_info;
/* Called periodically to check link status */
void (*poll)(struct net_device *dev);
struct delayed_work port_periodic_work;
- struct work_struct port_work; /* may be unused. */
struct device_node *of_node;
};
int cvm_oct_free_work(void *work_queue_entry);
-int cvm_oct_rgmii_init(struct net_device *dev);
-void cvm_oct_rgmii_uninit(struct net_device *dev);
int cvm_oct_rgmii_open(struct net_device *dev);
int cvm_oct_sgmii_init(struct net_device *dev);
@@ -78,7 +76,6 @@ extern int pow_send_group;
extern int pow_receive_group;
extern char pow_send_list[];
extern struct net_device *cvm_oct_device[];
-extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
diff --git a/drivers/staging/panel/Kconfig b/drivers/staging/panel/Kconfig
deleted file mode 100644
index 3defa0133f2e..000000000000
--- a/drivers/staging/panel/Kconfig
+++ /dev/null
@@ -1,278 +0,0 @@
-config PANEL
- tristate "Parallel port LCD/Keypad Panel support"
- depends on PARPORT
- ---help---
- Say Y here if you have an HD44780 or KS-0074 LCD connected to your
- parallel port. This driver also features 4 and 6-key keypads. The LCD
- is accessible through the /dev/lcd char device (10, 156), and the
- keypad through /dev/keypad (10, 185). Both require misc device to be
- enabled. This code can either be compiled as a module, or linked into
- the kernel and started at boot. If you don't understand what all this
- is about, say N.
-
-config PANEL_PARPORT
- int "Default parallel port number (0=LPT1)"
- depends on PANEL
- range 0 255
- default "0"
- ---help---
- This is the index of the parallel port the panel is connected to. One
- driver instance only supports one parallel port, so if your keypad
- and LCD are connected to two separate ports, you have to start two
- modules with different arguments. Numbering starts with '0' for LPT1,
- and so on.
-
-config PANEL_PROFILE
- int "Default panel profile (0-5, 0=custom)"
- depends on PANEL
- range 0 5
- default "5"
- ---help---
- To ease configuration, the driver supports different configuration
- profiles for past and recent wirings. These profiles can also be
- used to define an approximative configuration, completed by a few
- other options. Here are the profiles :
-
- 0 = custom (see further)
- 1 = 2x16 parallel LCD, old keypad
- 2 = 2x16 serial LCD (KS-0074), new keypad
- 3 = 2x16 parallel LCD (Hantronix), no keypad
- 4 = 2x16 parallel LCD (Nexcom NSA1045) with Nexcom's keypad
- 5 = 2x40 parallel LCD (old one), with old keypad
-
- Custom configurations allow you to define how your display is
- wired to the parallel port, and how it works. This is only intended
- for experts.
-
-config PANEL_KEYPAD
- depends on PANEL && PANEL_PROFILE="0"
- int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
- range 0 3
- default 0
- ---help---
- This enables and configures a keypad connected to the parallel port.
- The keys will be read from character device 10,185. Valid values are :
-
- 0 : do not enable this driver
- 1 : old 6 keys keypad
- 2 : new 6 keys keypad, as used on the server at www.ant-computing.com
- 3 : Nexcom NSA1045's 4 keys keypad
-
- New profiles can be described in the driver source. The driver also
- supports simultaneous keys pressed when the keypad supports them.
-
-config PANEL_LCD
- depends on PANEL && PANEL_PROFILE="0"
- int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
- range 0 5
- default 0
- ---help---
- This enables and configures an LCD connected to the parallel port.
- The driver includes an interpreter for escape codes starting with
- '\e[L' which are specific to the LCD, and a few ANSI codes. The
- driver will be registered as character device 10,156, usually
- under the name '/dev/lcd'. There are a total of 6 supported types :
-
- 0 : do not enable the driver
- 1 : custom configuration and wiring (see further)
- 2 : 2x16 & 2x40 parallel LCD (old wiring)
- 3 : 2x16 serial LCD (KS-0074 based)
- 4 : 2x16 parallel LCD (Hantronix wiring)
- 5 : 2x16 parallel LCD (Nexcom wiring)
-
- When type '1' is specified, other options will appear to configure
- more precise aspects (wiring, dimensions, protocol, ...). Please note
- that those values changed from the 2.4 driver for better consistency.
-
-config PANEL_LCD_HEIGHT
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of lines on the LCD (1-2)"
- range 1 2
- default 2
- ---help---
- This is the number of visible character lines on the LCD in custom profile.
- It can either be 1 or 2.
-
-config PANEL_LCD_WIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Number of characters per line on the LCD (1-40)"
- range 1 40
- default 40
- ---help---
- This is the number of characters per line on the LCD in custom profile.
- Common values are 16,20,24,40.
-
-config PANEL_LCD_BWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Internal LCD line width (1-40, 40 by default)"
- range 1 40
- default 40
- ---help---
- Most LCDs use a standard controller which supports hardware lines of 40
- characters, although sometimes only 16, 20 or 24 of them are really wired
- to the terminal. This results in some non-visible but addressable characters,
- and is the case for most parallel LCDs. Other LCDs, and some serial ones,
- however, use the same line width internally as what is visible. The KS0074
- for example, uses 16 characters per line for 16 visible characters per line.
-
- This option lets you configure the value used by your LCD in 'custom' profile.
- If you don't know, put '40' here.
-
-config PANEL_LCD_HWIDTH
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Hardware LCD line width (1-64, 64 by default)"
- range 1 64
- default 64
- ---help---
- Most LCDs use a single address bit to differentiate line 0 and line 1. Since
- some of them need to be able to address 40 chars with the lower bits, they
- often use the immediately superior power of 2, which is 64, to address the
- next line.
-
- If you don't know what your LCD uses, in doubt let 16 here for a 2x16, and
- 64 here for a 2x40.
-
-config PANEL_LCD_CHARSET
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD character set (0=normal, 1=KS0074)"
- range 0 1
- default 0
- ---help---
- Some controllers such as the KS0074 use a somewhat strange character set
- where many symbols are at unusual places. The driver knows how to map
- 'standard' ASCII characters to the character sets used by these controllers.
- Valid values are :
-
- 0 : normal (untranslated) character set
- 1 : KS0074 character set
-
- If you don't know, use the normal one (0).
-
-config PANEL_LCD_PROTO
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "LCD communication mode (0=parallel 8 bits, 1=serial)"
- range 0 1
- default 0
- ---help---
- This driver now supports any serial or parallel LCD wired to a parallel
- port. But before assigning signals, the driver needs to know if it will
- be driving a serial LCD or a parallel one. Serial LCDs only use 2 wires
- (SDA/SCL), while parallel ones use 2 or 3 wires for the control signals
- (E, RS, sometimes RW), and 4 or 8 for the data. Use 0 here for a 8 bits
- parallel LCD, and 1 for a serial LCD.
-
-config PANEL_LCD_PIN_E
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
- range -17 17
- default 14
- ---help---
- This describes the number of the parallel port pin to which the LCD 'E'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'E' pin in custom profile is '14' (AUTOFEED).
-
-config PANEL_LCD_PIN_RS
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
- range -17 17
- default 17
- ---help---
- This describes the number of the parallel port pin to which the LCD 'RS'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RS' pin in custom profile is '17' (SELECT IN).
-
-config PANEL_LCD_PIN_RW
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
- int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
- range -17 17
- default 16
- ---help---
- This describes the number of the parallel port pin to which the LCD 'RW'
- signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'RW' pin in custom profile is '16' (INIT).
-
-config PANEL_LCD_PIN_SCL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
- range -17 17
- default 1
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SCL' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SCL' pin in custom profile is '1' (STROBE).
-
-config PANEL_LCD_PIN_SDA
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
- int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
- range -17 17
- default 2
- ---help---
- This describes the number of the parallel port pin to which the serial
- LCD 'SDA' signal has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'SDA' pin in custom profile is '2' (D0).
-
-config PANEL_LCD_PIN_BL
- depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
- int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
- range -17 17
- default 0
- ---help---
- This describes the number of the parallel port pin to which the LCD 'BL' signal
- has been connected. It can be :
-
- 0 : no connection (eg: connected to ground)
- 1..17 : directly connected to any of these pins on the DB25 plug
- -1..-17 : connected to the same pin through an inverter (eg: transistor).
-
- Default for the 'BL' pin in custom profile is '0' (uncontrolled).
-
-config PANEL_CHANGE_MESSAGE
- depends on PANEL
- bool "Change LCD initialization message ?"
- default "n"
- ---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- If you say 'Y' here, you'll be able to choose a message yourself. Otherwise,
- say 'N' and keep the default message with the version.
-
-config PANEL_BOOT_MESSAGE
- depends on PANEL && PANEL_CHANGE_MESSAGE="y"
- string "New initialization message"
- default ""
- ---help---
- This allows you to replace the boot message indicating the kernel version
- and the driver version with a custom message. This is useful on appliances
- where a simple 'Starting system' message can be enough to stop a customer
- from worrying.
-
- An empty message will only clear the display at driver init time. Any other
- printf()-formatted message is valid with newline and escape codes.
diff --git a/drivers/staging/panel/Makefile b/drivers/staging/panel/Makefile
deleted file mode 100644
index 747c238b82f9..000000000000
--- a/drivers/staging/panel/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_PANEL) += panel.o
diff --git a/drivers/staging/panel/TODO b/drivers/staging/panel/TODO
deleted file mode 100644
index 2db3f994b632..000000000000
--- a/drivers/staging/panel/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - review major/minor usages
- - review userspace api
- - see if all of this could be easier done in userspace instead.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Willy Tarreau <willy@meta-x.org>
diff --git a/drivers/staging/panel/lcd-panel-cgram.txt b/drivers/staging/panel/lcd-panel-cgram.txt
deleted file mode 100644
index 7f82c905763d..000000000000
--- a/drivers/staging/panel/lcd-panel-cgram.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Some LCDs allow you to define up to 8 characters, mapped to ASCII
-characters 0 to 7. The escape code to define a new character is
-'\e[LG' followed by one digit from 0 to 7, representing the character
-number, and up to 8 couples of hex digits terminated by a semi-colon
-(';'). Each couple of digits represents a line, with 1-bits for each
-illuminated pixel with LSB on the right. Lines are numbered from the
-top of the character to the bottom. On a 5x7 matrix, only the 5 lower
-bits of the 7 first bytes are used for each character. If the string
-is incomplete, only complete lines will be redefined. Here are some
-examples :
-
- printf "\e[LG0010101050D1F0C04;" => 0 = [enter]
- printf "\e[LG1040E1F0000000000;" => 1 = [up]
- printf "\e[LG2000000001F0E0400;" => 2 = [down]
- printf "\e[LG3040E1F001F0E0400;" => 3 = [up-down]
- printf "\e[LG40002060E1E0E0602;" => 4 = [left]
- printf "\e[LG500080C0E0F0E0C08;" => 5 = [right]
- printf "\e[LG60016051516141400;" => 6 = "IP"
-
- printf "\e[LG00103071F1F070301;" => big speaker
- printf "\e[LG00002061E1E060200;" => small speaker
-
-Willy
-
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig
index fd25078ee923..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/staging/rdma/hfi1/Kconfig
@@ -1,6 +1,8 @@
config INFINIBAND_HFI1
tristate "Intel OPA Gen1 support"
- depends on X86_64
+ depends on X86_64 && INFINIBAND_RDMAVT
+ select MMU_NOTIFIER
+ select CRC32
default m
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
@@ -25,13 +27,3 @@ config SDMA_VERBOSITY
---help---
This is a configuration flag to enable verbose
SDMA debug
-config PRESCAN_RXQ
- bool "Enable prescanning of the RX queue for ECNs"
- depends on INFINIBAND_HFI1
- default n
- ---help---
- This option toggles the prescanning of the receive queue for
- Explicit Congestion Notifications. If an ECN is detected, it
- is processed as quickly as possible, the ECN is toggled off.
- After the prescanning step, the receive queue is processed as
- usual.
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/staging/rdma/hfi1/Makefile
index 68c5a315e557..8dc59382ee96 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/staging/rdma/hfi1/Makefile
@@ -7,10 +7,12 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o efivar.o eprom.o file_ops.o firmware.o \
- init.o intr.o keys.o mad.o mmap.o mr.o pcie.o pio.o pio_copy.o \
- qp.o qsfp.o rc.o ruc.o sdma.o srq.o sysfs.o trace.o twsi.o \
- uc.o ud.o user_pages.o user_sdma.o verbs_mcast.o verbs.o
+hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
+ eprom.o file_ops.o firmware.o \
+ init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
+ qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
+ uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
+ verbs_txreq.o
hfi1-$(CONFIG_DEBUG_FS) += debugfs.o
CFLAGS_trace.o = -I$(src)
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/staging/rdma/hfi1/affinity.c
new file mode 100644
index 000000000000..2cb8ca77f876
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/affinity.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/topology.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+#include "trace.h"
+
+struct cpu_mask_set {
+ struct cpumask mask;
+ struct cpumask used;
+ uint gen;
+};
+
+struct hfi1_affinity {
+ struct cpu_mask_set def_intr;
+ struct cpu_mask_set rcv_intr;
+ struct cpu_mask_set proc;
+ /* spin lock to protect affinity struct */
+ spinlock_t lock;
+};
+
+/* Name of IRQ types, indexed by enum irq_type */
+static const char * const irq_type_names[] = {
+ "SDMA",
+ "RCVCTXT",
+ "GENERAL",
+ "OTHER",
+};
+
+static inline void init_cpu_mask_set(struct cpu_mask_set *set)
+{
+ cpumask_clear(&set->mask);
+ cpumask_clear(&set->used);
+ set->gen = 0;
+}
+
+/*
+ * Interrupt affinity.
+ *
+ * non-rcv avail gets a default mask that
+ * starts as possible cpus with threads reset
+ * and each rcv avail reset.
+ *
+ * rcv avail gets node relative 1 wrapping back
+ * to the node relative 1 as necessary.
+ *
+ */
+int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+{
+ int node = pcibus_to_node(dd->pcidev->bus);
+ struct hfi1_affinity *info;
+ const struct cpumask *local_mask;
+ int curr_cpu, possible, i, ht;
+
+ if (node < 0)
+ node = numa_node_id();
+ dd->node = node;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ spin_lock_init(&info->lock);
+
+ init_cpu_mask_set(&info->def_intr);
+ init_cpu_mask_set(&info->rcv_intr);
+ init_cpu_mask_set(&info->proc);
+
+ local_mask = cpumask_of_node(dd->node);
+ if (cpumask_first(local_mask) >= nr_cpu_ids)
+ local_mask = topology_core_cpumask(0);
+ /* use local mask as default */
+ cpumask_copy(&info->def_intr.mask, local_mask);
+ /*
+ * Remove HT cores from the default mask. Do this in two steps below.
+ */
+ possible = cpumask_weight(&info->def_intr.mask);
+ ht = cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&info->def_intr.mask)));
+ /*
+ * Step 1. Skip over the first N HT siblings and use them as the
+ * "real" cores. Assumes that HT cores are not enumerated in
+ * succession (except in the single core case).
+ */
+ curr_cpu = cpumask_first(&info->def_intr.mask);
+ for (i = 0; i < possible / ht; i++)
+ curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
+ /*
+ * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
+ * skip any gaps.
+ */
+ for (; i < possible; i++) {
+ cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
+ curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
+ }
+
+ /* fill in the receive list */
+ possible = cpumask_weight(&info->def_intr.mask);
+ curr_cpu = cpumask_first(&info->def_intr.mask);
+ if (possible == 1) {
+ /* only one CPU, everyone will use it */
+ cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
+ } else {
+ /*
+ * Retain the first CPU in the default list for the control
+ * context.
+ */
+ curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
+ /*
+ * Remove the remaining kernel receive queues from
+ * the default list and add them to the receive list.
+ */
+ for (i = 0; i < dd->n_krcv_queues - 1; i++) {
+ cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
+ cpumask_set_cpu(curr_cpu, &info->rcv_intr.mask);
+ curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
+ if (curr_cpu >= nr_cpu_ids)
+ break;
+ }
+ }
+
+ cpumask_copy(&info->proc.mask, cpu_online_mask);
+ dd->affinity = info;
+ return 0;
+}
+
+void hfi1_dev_affinity_free(struct hfi1_devdata *dd)
+{
+ kfree(dd->affinity);
+}
+
+int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
+{
+ int ret;
+ cpumask_var_t diff;
+ struct cpu_mask_set *set;
+ struct sdma_engine *sde = NULL;
+ struct hfi1_ctxtdata *rcd = NULL;
+ char extra[64];
+ int cpu = -1;
+
+ extra[0] = '\0';
+ cpumask_clear(&msix->mask);
+
+ ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ switch (msix->type) {
+ case IRQ_SDMA:
+ sde = (struct sdma_engine *)msix->arg;
+ scnprintf(extra, 64, "engine %u", sde->this_idx);
+ /* fall through */
+ case IRQ_GENERAL:
+ set = &dd->affinity->def_intr;
+ break;
+ case IRQ_RCVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ if (rcd->ctxt == HFI1_CTRL_CTXT) {
+ set = &dd->affinity->def_intr;
+ cpu = cpumask_first(&set->mask);
+ } else {
+ set = &dd->affinity->rcv_intr;
+ }
+ scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
+ break;
+ default:
+ dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
+ return -EINVAL;
+ }
+
+ /*
+ * The control receive context is placed on a particular CPU, which
+ * is set above. Skip accounting for it. Everything else finds its
+ * CPU here.
+ */
+ if (cpu == -1) {
+ spin_lock(&dd->affinity->lock);
+ if (cpumask_equal(&set->mask, &set->used)) {
+ /*
+ * We've used up all the CPUs, bump up the generation
+ * and reset the 'used' map
+ */
+ set->gen++;
+ cpumask_clear(&set->used);
+ }
+ cpumask_andnot(diff, &set->mask, &set->used);
+ cpu = cpumask_first(diff);
+ cpumask_set_cpu(cpu, &set->used);
+ spin_unlock(&dd->affinity->lock);
+ }
+
+ switch (msix->type) {
+ case IRQ_SDMA:
+ sde->cpu = cpu;
+ break;
+ case IRQ_GENERAL:
+ case IRQ_RCVCTXT:
+ case IRQ_OTHER:
+ break;
+ }
+
+ cpumask_set_cpu(cpu, &msix->mask);
+ dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
+ msix->msix.vector, irq_type_names[msix->type],
+ extra, cpu);
+ irq_set_affinity_hint(msix->msix.vector, &msix->mask);
+
+ free_cpumask_var(diff);
+ return 0;
+}
+
+void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
+ struct hfi1_msix_entry *msix)
+{
+ struct cpu_mask_set *set = NULL;
+ struct hfi1_ctxtdata *rcd;
+
+ switch (msix->type) {
+ case IRQ_SDMA:
+ case IRQ_GENERAL:
+ set = &dd->affinity->def_intr;
+ break;
+ case IRQ_RCVCTXT:
+ rcd = (struct hfi1_ctxtdata *)msix->arg;
+ /* only do accounting for non control contexts */
+ if (rcd->ctxt != HFI1_CTRL_CTXT)
+ set = &dd->affinity->rcv_intr;
+ break;
+ default:
+ return;
+ }
+
+ if (set) {
+ spin_lock(&dd->affinity->lock);
+ cpumask_andnot(&set->used, &set->used, &msix->mask);
+ if (cpumask_empty(&set->used) && set->gen) {
+ set->gen--;
+ cpumask_copy(&set->used, &set->mask);
+ }
+ spin_unlock(&dd->affinity->lock);
+ }
+
+ irq_set_affinity_hint(msix->msix.vector, NULL);
+ cpumask_clear(&msix->mask);
+}
+
+int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
+{
+ int cpu = -1, ret;
+ cpumask_var_t diff, mask, intrs;
+ const struct cpumask *node_mask,
+ *proc_mask = tsk_cpus_allowed(current);
+ struct cpu_mask_set *set = &dd->affinity->proc;
+ char buf[1024];
+
+ /*
+ * check whether process/context affinity has already
+ * been set
+ */
+ if (cpumask_weight(proc_mask) == 1) {
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
+ current->pid, current->comm, buf);
+ /*
+ * Mark the pre-set CPU as used. This is atomic so we don't
+ * need the lock
+ */
+ cpu = cpumask_first(proc_mask);
+ cpumask_set_cpu(cpu, &set->used);
+ goto done;
+ } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
+ current->pid, current->comm, buf);
+ goto done;
+ }
+
+ /*
+ * The process does not have a preset CPU affinity so find one to
+ * recommend. We prefer CPUs on the same NUMA as the device.
+ */
+
+ ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
+ if (!ret)
+ goto done;
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ goto free_diff;
+ ret = zalloc_cpumask_var(&intrs, GFP_KERNEL);
+ if (!ret)
+ goto free_mask;
+
+ spin_lock(&dd->affinity->lock);
+ /*
+ * If we've used all available CPUs, clear the mask and start
+ * overloading.
+ */
+ if (cpumask_equal(&set->mask, &set->used)) {
+ set->gen++;
+ cpumask_clear(&set->used);
+ }
+
+ /* CPUs used by interrupt handlers */
+ cpumask_copy(intrs, (dd->affinity->def_intr.gen ?
+ &dd->affinity->def_intr.mask :
+ &dd->affinity->def_intr.used));
+ cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
+ &dd->affinity->rcv_intr.mask :
+ &dd->affinity->rcv_intr.used));
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+
+ /*
+ * If we don't have a NUMA node requested, preference is towards
+ * device NUMA node
+ */
+ if (node == -1)
+ node = dd->node;
+ node_mask = cpumask_of_node(node);
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
+ hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+
+ /* diff will hold all unused cpus */
+ cpumask_andnot(diff, &set->mask, &set->used);
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
+ hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+
+ /* get cpumask of available CPUs on preferred NUMA */
+ cpumask_and(mask, diff, node_mask);
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
+ hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+
+ /*
+ * At first, we don't want to place processes on the same
+ * CPUs as interrupt handlers.
+ */
+ cpumask_andnot(diff, mask, intrs);
+ if (!cpumask_empty(diff))
+ cpumask_copy(mask, diff);
+
+ /*
+ * if we don't have a cpu on the preferred NUMA, get
+ * the list of the remaining available CPUs
+ */
+ if (cpumask_empty(mask)) {
+ cpumask_andnot(diff, &set->mask, &set->used);
+ cpumask_andnot(mask, diff, node_mask);
+ }
+ scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
+ hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+
+ cpu = cpumask_first(mask);
+ if (cpu >= nr_cpu_ids) /* empty */
+ cpu = -1;
+ else
+ cpumask_set_cpu(cpu, &set->used);
+ spin_unlock(&dd->affinity->lock);
+
+ free_cpumask_var(intrs);
+free_mask:
+ free_cpumask_var(mask);
+free_diff:
+ free_cpumask_var(diff);
+done:
+ return cpu;
+}
+
+void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu)
+{
+ struct cpu_mask_set *set = &dd->affinity->proc;
+
+ if (cpu < 0)
+ return;
+ spin_lock(&dd->affinity->lock);
+ cpumask_clear_cpu(cpu, &set->used);
+ if (cpumask_empty(&set->used) && set->gen) {
+ set->gen--;
+ cpumask_copy(&set->used, &set->mask);
+ }
+ spin_unlock(&dd->affinity->lock);
+}
+
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/staging/rdma/hfi1/affinity.h
new file mode 100644
index 000000000000..b287e4963024
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/affinity.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _HFI1_AFFINITY_H
+#define _HFI1_AFFINITY_H
+
+#include "hfi.h"
+
+enum irq_type {
+ IRQ_SDMA,
+ IRQ_RCVCTXT,
+ IRQ_GENERAL,
+ IRQ_OTHER
+};
+
+/* Can be used for both memory and cpu */
+enum affinity_flags {
+ AFF_AUTO,
+ AFF_NUMA_LOCAL,
+ AFF_DEV_LOCAL,
+ AFF_IRQ_LOCAL
+};
+
+struct hfi1_msix_entry;
+
+/* Initialize driver affinity data */
+int hfi1_dev_affinity_init(struct hfi1_devdata *);
+/* Free driver affinity data */
+void hfi1_dev_affinity_free(struct hfi1_devdata *);
+/*
+ * Set IRQ affinity to a CPU. The function will determine the
+ * CPU and set the affinity to it.
+ */
+int hfi1_get_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
+/*
+ * Remove the IRQ's CPU affinity. This function also updates
+ * any internal CPU tracking data
+ */
+void hfi1_put_irq_affinity(struct hfi1_devdata *, struct hfi1_msix_entry *);
+/*
+ * Determine a CPU affinity for a user process, if the process does not
+ * have an affinity set yet.
+ */
+int hfi1_get_proc_affinity(struct hfi1_devdata *, int);
+/* Release a CPU used by a user process. */
+void hfi1_put_proc_affinity(struct hfi1_devdata *, int);
+
+#endif /* _HFI1_AFFINITY_H */
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/staging/rdma/hfi1/aspm.h
new file mode 100644
index 000000000000..0d58fe3b49b5
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/aspm.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _ASPM_H
+#define _ASPM_H
+
+#include "hfi.h"
+
+extern uint aspm_mode;
+
+enum aspm_mode {
+ ASPM_MODE_DISABLED = 0, /* ASPM always disabled, performance mode */
+ ASPM_MODE_ENABLED = 1, /* ASPM always enabled, power saving mode */
+ ASPM_MODE_DYNAMIC = 2, /* ASPM enabled/disabled dynamically */
+};
+
+/* Time after which the timer interrupt will re-enable ASPM */
+#define ASPM_TIMER_MS 1000
+/* Time for which interrupts are ignored after a timer has been scheduled */
+#define ASPM_RESCHED_TIMER_MS (ASPM_TIMER_MS / 2)
+/* Two interrupts within this time trigger ASPM disable */
+#define ASPM_TRIGGER_MS 1
+#define ASPM_TRIGGER_NS (ASPM_TRIGGER_MS * 1000 * 1000ull)
+#define ASPM_L1_SUPPORTED(reg) \
+ (((reg & PCI_EXP_LNKCAP_ASPMS) >> 10) & 0x2)
+
+static inline bool aspm_hw_l1_supported(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+ u32 up, dn;
+
+ /*
+ * If the driver does not have access to the upstream component,
+ * it cannot support ASPM L1 at all.
+ */
+ if (!parent)
+ return false;
+
+ pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &dn);
+ dn = ASPM_L1_SUPPORTED(dn);
+
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &up);
+ up = ASPM_L1_SUPPORTED(up);
+
+ /* ASPM works on A-step but is reported as not supported */
+ return (!!dn || is_ax(dd)) && !!up;
+}
+
+/* Set L1 entrance latency for slower entry to L1 */
+static inline void aspm_hw_set_l1_ent_latency(struct hfi1_devdata *dd)
+{
+ u32 l1_ent_lat = 0x4u;
+ u32 reg32;
+
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, &reg32);
+ reg32 &= ~PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK;
+ reg32 |= l1_ent_lat << PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT;
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL3, reg32);
+}
+
+static inline void aspm_hw_enable_l1(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+
+ /*
+ * If the driver does not have access to the upstream component,
+ * it cannot support ASPM L1 at all.
+ */
+ if (!parent)
+ return;
+
+ /* Enable ASPM L1 first in upstream component and then downstream */
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ PCI_EXP_LNKCTL_ASPM_L1);
+ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ PCI_EXP_LNKCTL_ASPM_L1);
+}
+
+static inline void aspm_hw_disable_l1(struct hfi1_devdata *dd)
+{
+ struct pci_dev *parent = dd->pcidev->bus->self;
+
+ /* Disable ASPM L1 first in downstream component and then upstream */
+ pcie_capability_clear_and_set_word(dd->pcidev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0x0);
+ if (parent)
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, 0x0);
+}
+
+static inline void aspm_enable(struct hfi1_devdata *dd)
+{
+ if (dd->aspm_enabled || aspm_mode == ASPM_MODE_DISABLED ||
+ !dd->aspm_supported)
+ return;
+
+ aspm_hw_enable_l1(dd);
+ dd->aspm_enabled = true;
+}
+
+static inline void aspm_disable(struct hfi1_devdata *dd)
+{
+ if (!dd->aspm_enabled || aspm_mode == ASPM_MODE_ENABLED)
+ return;
+
+ aspm_hw_disable_l1(dd);
+ dd->aspm_enabled = false;
+}
+
+static inline void aspm_disable_inc(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->aspm_lock, flags);
+ aspm_disable(dd);
+ atomic_inc(&dd->aspm_disabled_cnt);
+ spin_unlock_irqrestore(&dd->aspm_lock, flags);
+}
+
+static inline void aspm_enable_dec(struct hfi1_devdata *dd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->aspm_lock, flags);
+ if (atomic_dec_and_test(&dd->aspm_disabled_cnt))
+ aspm_enable(dd);
+ spin_unlock_irqrestore(&dd->aspm_lock, flags);
+}
+
+/* ASPM processing for each receive context interrupt */
+static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd)
+{
+ bool restart_timer;
+ bool close_interrupts;
+ unsigned long flags;
+ ktime_t now, prev;
+
+ /* Quickest exit for minimum impact */
+ if (!rcd->aspm_intr_supported)
+ return;
+
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ /* PSM contexts are open */
+ if (!rcd->aspm_intr_enable)
+ goto unlock;
+
+ prev = rcd->aspm_ts_last_intr;
+ now = ktime_get();
+ rcd->aspm_ts_last_intr = now;
+
+ /* An interrupt pair close together in time */
+ close_interrupts = ktime_to_ns(ktime_sub(now, prev)) < ASPM_TRIGGER_NS;
+
+ /* Don't push out our timer till this much time has elapsed */
+ restart_timer = ktime_to_ns(ktime_sub(now, rcd->aspm_ts_timer_sched)) >
+ ASPM_RESCHED_TIMER_MS * NSEC_PER_MSEC;
+ restart_timer = restart_timer && close_interrupts;
+
+ /* Disable ASPM and schedule timer */
+ if (rcd->aspm_enabled && close_interrupts) {
+ aspm_disable_inc(rcd->dd);
+ rcd->aspm_enabled = false;
+ restart_timer = true;
+ }
+
+ if (restart_timer) {
+ mod_timer(&rcd->aspm_timer,
+ jiffies + msecs_to_jiffies(ASPM_TIMER_MS));
+ rcd->aspm_ts_timer_sched = now;
+ }
+unlock:
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+}
+
+/* Timer function for re-enabling ASPM in the absence of interrupt activity */
+static inline void aspm_ctx_timer_function(unsigned long data)
+{
+ struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ aspm_enable_dec(rcd->dd);
+ rcd->aspm_enabled = true;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+}
+
+/* Disable interrupt processing for verbs contexts when PSM contexts are open */
+static inline void aspm_disable_all(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned long flags;
+ unsigned i;
+
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ rcd = dd->rcd[i];
+ del_timer_sync(&rcd->aspm_timer);
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = false;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ }
+
+ aspm_disable(dd);
+ atomic_set(&dd->aspm_disabled_cnt, 0);
+}
+
+/* Re-enable interrupt processing for verbs contexts */
+static inline void aspm_enable_all(struct hfi1_devdata *dd)
+{
+ struct hfi1_ctxtdata *rcd;
+ unsigned long flags;
+ unsigned i;
+
+ aspm_enable(dd);
+
+ if (aspm_mode != ASPM_MODE_DYNAMIC)
+ return;
+
+ for (i = 0; i < dd->first_user_ctxt; i++) {
+ rcd = dd->rcd[i];
+ spin_lock_irqsave(&rcd->aspm_lock, flags);
+ rcd->aspm_intr_enable = true;
+ rcd->aspm_enabled = true;
+ spin_unlock_irqrestore(&rcd->aspm_lock, flags);
+ }
+}
+
+static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
+{
+ spin_lock_init(&rcd->aspm_lock);
+ setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
+ (unsigned long)rcd);
+ rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
+ aspm_mode == ASPM_MODE_DYNAMIC &&
+ rcd->ctxt < rcd->dd->first_user_ctxt;
+}
+
+static inline void aspm_init(struct hfi1_devdata *dd)
+{
+ unsigned i;
+
+ spin_lock_init(&dd->aspm_lock);
+ dd->aspm_supported = aspm_hw_l1_supported(dd);
+
+ for (i = 0; i < dd->first_user_ctxt; i++)
+ aspm_ctx_init(dd->rcd[i]);
+
+ /* Start with ASPM disabled */
+ aspm_hw_set_l1_ent_latency(dd);
+ dd->aspm_enabled = false;
+ aspm_hw_disable_l1(dd);
+
+ /* Now turn on ASPM if configured */
+ aspm_enable_all(dd);
+}
+
+static inline void aspm_exit(struct hfi1_devdata *dd)
+{
+ aspm_disable_all(dd);
+
+ /* Turn on ASPM on exit to conserve power */
+ aspm_enable(dd);
+}
+
+#endif /* _ASPM_H */
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index bbe5ad85cec0..16eb653903e0 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -64,6 +61,8 @@
#include "sdma.h"
#include "eprom.h"
#include "efivar.h"
+#include "platform.h"
+#include "aspm.h"
#define NUM_IB_PORTS 1
@@ -420,10 +419,10 @@ static struct flag_table pio_err_status_flags[] = {
SEC_SPC_FREEZE,
SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
/*23*/ FLAG_ENTRY("PioWriteQwValidParity",
- SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
+ SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
/*24*/ FLAG_ENTRY("PioBlockQwCountParity",
- SEC_WRITE_DROPPED|SEC_SPC_FREEZE,
+ SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
/*25*/ FLAG_ENTRY("PioVlfVlLenParity",
SEC_SPC_FREEZE,
@@ -509,6 +508,12 @@ static struct flag_table sdma_err_status_flags[] = {
| SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
| SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
+/* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
+#define PORT_DISCARD_EGRESS_ERRS \
+ (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
+ | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
+ | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
+
/*
* TXE Egress Error flags
*/
@@ -936,7 +941,7 @@ static struct flag_table dc8051_err_flags[] = {
FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
- D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
+ D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
};
@@ -950,7 +955,7 @@ static struct flag_table dc8051_info_err_flags[] = {
FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
FLAG_ENTRY0("Serdes internal loopback failure",
- FAILED_SERDES_INTERNAL_LOOPBACK),
+ FAILED_SERDES_INTERNAL_LOOPBACK),
FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
@@ -958,7 +963,8 @@ static struct flag_table dc8051_info_err_flags[] = {
FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
- FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT)
+ FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
+ FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT)
};
/*
@@ -978,7 +984,6 @@ static struct flag_table dc8051_info_host_msg_flags[] = {
FLAG_ENTRY0("Link going down", 0x0100),
};
-
static u32 encoded_size(u32 size);
static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
@@ -1140,11 +1145,8 @@ struct cntr_entry {
/*
* accessor for stat element, context either dd or ppd
*/
- u64 (*rw_cntr)(const struct cntr_entry *,
- void *context,
- int vl,
- int mode,
- u64 data);
+ u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
+ int mode, u64 data);
};
#define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
@@ -1188,7 +1190,7 @@ CNTR_ELEM(#name, \
#define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
#define OVR_ELM(ctx) \
CNTR_ELEM("RcvHdrOvr" #ctx, \
- (RCV_HDR_OVFL_CNT + ctx*0x100), \
+ (RCV_HDR_OVFL_CNT + ctx * 0x100), \
0, CNTR_NORMAL, port_access_u64_csr)
/* 32bit TXE */
@@ -1250,11 +1252,8 @@ CNTR_ELEM(#name, \
u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
{
- u64 val;
-
if (dd->flags & HFI1_PRESENT) {
- val = readq((void __iomem *)dd->kregbase + offset);
- return val;
+ return readq((void __iomem *)dd->kregbase + offset);
}
return -1;
}
@@ -1277,7 +1276,6 @@ static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
{
u64 ret;
-
if (mode == CNTR_MODE_R) {
ret = read_csr(dd, csr);
} else if (mode == CNTR_MODE_W) {
@@ -1294,17 +1292,65 @@ static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
/* Dev Access */
static u64 dev_access_u32_csr(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = context;
+ u64 csr = entry->csr;
- if (vl != CNTR_INVALID_VL)
- return 0;
- return read_write_csr(dd, entry->csr, mode, data);
+ if (entry->flags & CNTR_SDMA) {
+ if (vl == CNTR_INVALID_VL)
+ return 0;
+ csr += 0x100 * vl;
+ } else {
+ if (vl != CNTR_INVALID_VL)
+ return 0;
+ }
+ return read_write_csr(dd, csr, mode, data);
+}
+
+static u64 access_sde_err_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].err_cnt;
+ return 0;
+}
+
+static u64 access_sde_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].sdma_int_cnt;
+ return 0;
+}
+
+static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].idle_int_cnt;
+ return 0;
+}
+
+static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
+ void *context, int idx, int mode,
+ u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ if (dd->per_sdma && idx < dd->num_sdma)
+ return dd->per_sdma[idx].progress_int_cnt;
+ return 0;
}
static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
+ int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = context;
@@ -1325,7 +1371,7 @@ static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
}
static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
+ int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = context;
u32 csr = entry->csr;
@@ -1349,7 +1395,7 @@ static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
/* Port Access */
static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
+ int vl, int mode, u64 data)
{
struct hfi1_pportdata *ppd = context;
@@ -1359,7 +1405,7 @@ static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
}
static u64 port_access_u64_csr(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
struct hfi1_pportdata *ppd = context;
u64 val;
@@ -1399,7 +1445,7 @@ static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
}
static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
+ int vl, int mode, u64 data)
{
struct hfi1_pportdata *ppd = context;
@@ -1409,7 +1455,7 @@ static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
}
static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
- int vl, int mode, u64 data)
+ int vl, int mode, u64 data)
{
struct hfi1_pportdata *ppd = context;
@@ -1430,18 +1476,25 @@ static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
}
static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
- struct hfi1_pportdata *ppd = context;
+ struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
+ u64 zero = 0;
+ u64 *counter;
- if (vl != CNTR_INVALID_VL)
- return 0;
+ if (vl == CNTR_INVALID_VL)
+ counter = &ppd->port_xmit_discards;
+ else if (vl >= 0 && vl < C_VL_COUNT)
+ counter = &ppd->port_xmit_discards_vl[vl];
+ else
+ counter = &zero;
- return read_write_sw(ppd->dd, &ppd->port_xmit_discards, mode, data);
+ return read_write_sw(ppd->dd, counter, mode, data);
}
static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode,
+ u64 data)
{
struct hfi1_pportdata *ppd = context;
@@ -1453,7 +1506,7 @@ static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
}
static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
struct hfi1_pportdata *ppd = context;
@@ -1478,7 +1531,6 @@ static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
u64 __percpu *cntr,
int vl, int mode, u64 data)
{
-
u64 ret = 0;
if (vl != CNTR_INVALID_VL)
@@ -1510,7 +1562,7 @@ static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
}
static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = context;
@@ -1526,6 +1578,14 @@ static u64 access_sw_pio_wait(const struct cntr_entry *entry,
return dd->verbs_dev.n_piowait;
}
+static u64 access_sw_pio_drain(const struct cntr_entry *entry,
+ void *context, int vl, int mode, u64 data)
+{
+ struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
+
+ return dd->verbs_dev.n_piodrain;
+}
+
static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
void *context, int vl, int mode, u64 data)
{
@@ -1543,11 +1603,12 @@ static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
}
static u64 access_sw_send_schedule(const struct cntr_entry *entry,
- void *context, int vl, int mode, u64 data)
+ void *context, int vl, int mode, u64 data)
{
struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
- return dd->verbs_dev.n_send_schedule;
+ return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
+ mode, data);
}
/* Software counters for the error status bits within MISC_ERR_STATUS */
@@ -3885,8 +3946,8 @@ static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
void *context, int vl, int mode, u64 data) \
{ \
struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
- return read_write_cpu(ppd->dd, &ppd->ibport_data.z_ ##cntr, \
- ppd->ibport_data.cntr, vl, \
+ return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
+ ppd->ibport_data.rvp.cntr, vl, \
mode, data); \
}
@@ -3903,7 +3964,7 @@ static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
if (vl != CNTR_INVALID_VL) \
return 0; \
\
- return read_write_sw(ppd->dd, &ppd->ibport_data.n_ ##cntr, \
+ return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
mode, data); \
}
@@ -4066,10 +4127,28 @@ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
access_sw_vtx_wait),
[C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
access_sw_pio_wait),
+[C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
+ access_sw_pio_drain),
[C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
access_sw_kmem_wait),
[C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
access_sw_send_schedule),
+[C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
+ SEND_DMA_DESC_FETCHED_CNT, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ dev_access_u32_csr),
+[C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_int_cnt),
+[C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_err_cnt),
+[C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_idle_int_cnt),
+[C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
+ CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
+ access_sde_progress_int_cnt),
/* MISC_ERR_STATUS */
[C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
CNTR_NORMAL,
@@ -4879,28 +4958,28 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
[C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
[C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
[C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
+ CNTR_SYNTH | CNTR_VL),
[C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
+ CNTR_SYNTH | CNTR_VL),
[C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
- CNTR_SYNTH | CNTR_VL),
+ CNTR_SYNTH | CNTR_VL),
[C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
[C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
[C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_link_dn_cnt),
+ access_sw_link_dn_cnt),
[C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_link_up_cnt),
+ access_sw_link_up_cnt),
[C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
access_sw_unknown_frame_cnt),
[C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
- access_sw_xmit_discards),
+ access_sw_xmit_discards),
[C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
- CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
- access_sw_xmit_discards),
+ CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
+ access_sw_xmit_discards),
[C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
- access_xmit_constraint_errs),
+ access_xmit_constraint_errs),
[C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
- access_rcv_constraint_errs),
+ access_rcv_constraint_errs),
[C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
[C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
[C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
@@ -4916,9 +4995,9 @@ static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
[C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
access_sw_cpu_rc_acks),
[C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rc_qacks),
+ access_sw_cpu_rc_qacks),
[C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
- access_sw_cpu_rc_delayed_comp),
+ access_sw_cpu_rc_delayed_comp),
[OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
[OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
[OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
@@ -5067,7 +5146,7 @@ done:
* the buffer. End in '*' if the buffer is too short.
*/
static char *flag_string(char *buf, int buf_len, u64 flags,
- struct flag_table *table, int table_size)
+ struct flag_table *table, int table_size)
{
char extra[32];
char *p = buf;
@@ -5128,10 +5207,8 @@ static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
if (source < ARRAY_SIZE(cce_misc_names))
strncpy(buf, cce_misc_names[source], bsize);
else
- snprintf(buf,
- bsize,
- "Reserved%u",
- source + IS_GENERAL_ERR_START);
+ snprintf(buf, bsize, "Reserved%u",
+ source + IS_GENERAL_ERR_START);
return buf;
}
@@ -5170,7 +5247,7 @@ static char *is_various_name(char *buf, size_t bsize, unsigned int source)
if (source < ARRAY_SIZE(various_names))
strncpy(buf, various_names[source], bsize);
else
- snprintf(buf, bsize, "Reserved%u", source+IS_VARIOUS_START);
+ snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
return buf;
}
@@ -5255,51 +5332,56 @@ static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- cce_err_status_flags, ARRAY_SIZE(cce_err_status_flags));
+ cce_err_status_flags,
+ ARRAY_SIZE(cce_err_status_flags));
}
static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- rxe_err_status_flags, ARRAY_SIZE(rxe_err_status_flags));
+ rxe_err_status_flags,
+ ARRAY_SIZE(rxe_err_status_flags));
}
static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags, misc_err_status_flags,
- ARRAY_SIZE(misc_err_status_flags));
+ ARRAY_SIZE(misc_err_status_flags));
}
static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- pio_err_status_flags, ARRAY_SIZE(pio_err_status_flags));
+ pio_err_status_flags,
+ ARRAY_SIZE(pio_err_status_flags));
}
static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- sdma_err_status_flags,
- ARRAY_SIZE(sdma_err_status_flags));
+ sdma_err_status_flags,
+ ARRAY_SIZE(sdma_err_status_flags));
}
static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- egress_err_status_flags, ARRAY_SIZE(egress_err_status_flags));
+ egress_err_status_flags,
+ ARRAY_SIZE(egress_err_status_flags));
}
static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- egress_err_info_flags, ARRAY_SIZE(egress_err_info_flags));
+ egress_err_info_flags,
+ ARRAY_SIZE(egress_err_info_flags));
}
static char *send_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- send_err_status_flags,
- ARRAY_SIZE(send_err_status_flags));
+ send_err_status_flags,
+ ARRAY_SIZE(send_err_status_flags));
}
static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
@@ -5312,7 +5394,7 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* report or record it.
*/
dd_dev_info(dd, "CCE Error: %s\n",
- cce_err_status_string(buf, sizeof(buf), reg));
+ cce_err_status_string(buf, sizeof(buf), reg));
if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
@@ -5342,14 +5424,14 @@ static void update_rcverr_timer(unsigned long opaque)
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
- ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
+ ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
- set_link_down_reason(ppd,
- OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
- OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
+ set_link_down_reason(
+ ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
+ OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
}
- dd->rcv_ovfl_cnt = (u32) cur_ovfl_cnt;
+ dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
}
@@ -5375,7 +5457,7 @@ static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
int i = 0;
dd_dev_info(dd, "Receive Error: %s\n",
- rxe_err_status_string(buf, sizeof(buf), reg));
+ rxe_err_status_string(buf, sizeof(buf), reg));
if (reg & ALL_RXE_FREEZE_ERR) {
int flags = 0;
@@ -5402,7 +5484,7 @@ static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
int i = 0;
dd_dev_info(dd, "Misc Error: %s",
- misc_err_status_string(buf, sizeof(buf), reg));
+ misc_err_status_string(buf, sizeof(buf), reg));
for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
if (reg & (1ull << i))
incr_cntr64(&dd->misc_err_status_cnt[i]);
@@ -5415,7 +5497,7 @@ static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
int i = 0;
dd_dev_info(dd, "PIO Error: %s\n",
- pio_err_status_string(buf, sizeof(buf), reg));
+ pio_err_status_string(buf, sizeof(buf), reg));
if (reg & ALL_PIO_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
@@ -5432,7 +5514,7 @@ static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
int i = 0;
dd_dev_info(dd, "SDMA Error: %s\n",
- sdma_err_status_string(buf, sizeof(buf), reg));
+ sdma_err_status_string(buf, sizeof(buf), reg));
if (reg & ALL_SDMA_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
@@ -5443,12 +5525,14 @@ static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
}
}
-static void count_port_inactive(struct hfi1_devdata *dd)
+static inline void __count_port_discards(struct hfi1_pportdata *ppd)
{
- struct hfi1_pportdata *ppd = dd->pport;
+ incr_cntr64(&ppd->port_xmit_discards);
+}
- if (ppd->port_xmit_discards < ~(u64)0)
- ppd->port_xmit_discards++;
+static void count_port_inactive(struct hfi1_devdata *dd)
+{
+ __count_port_discards(dd->pport);
}
/*
@@ -5460,7 +5544,8 @@ static void count_port_inactive(struct hfi1_devdata *dd)
* egress error if more than one packet fails the same integrity check
* since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
*/
-static void handle_send_egress_err_info(struct hfi1_devdata *dd)
+static void handle_send_egress_err_info(struct hfi1_devdata *dd,
+ int vl)
{
struct hfi1_pportdata *ppd = dd->pport;
u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
@@ -5471,14 +5556,44 @@ static void handle_send_egress_err_info(struct hfi1_devdata *dd)
write_csr(dd, SEND_EGRESS_ERR_INFO, info);
dd_dev_info(dd,
- "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
- info, egress_err_info_string(buf, sizeof(buf), info), src);
+ "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
+ info, egress_err_info_string(buf, sizeof(buf), info), src);
/* Eventually add other counters for each bit */
+ if (info & PORT_DISCARD_EGRESS_ERRS) {
+ int weight, i;
- if (info & SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK) {
- if (ppd->port_xmit_discards < ~(u64)0)
- ppd->port_xmit_discards++;
+ /*
+ * Count all applicable bits as individual errors and
+ * attribute them to the packet that triggered this handler.
+ * This may not be completely accurate due to limitations
+ * on the available hardware error information. There is
+ * a single information register and any number of error
+ * packets may have occurred and contributed to it before
+ * this routine is called. This means that:
+ * a) If multiple packets with the same error occur before
+ * this routine is called, earlier packets are missed.
+ * There is only a single bit for each error type.
+ * b) Errors may not be attributed to the correct VL.
+ * The driver is attributing all bits in the info register
+ * to the packet that triggered this call, but bits
+ * could be an accumulation of different packets with
+ * different VLs.
+ * c) A single error packet may have multiple counts attached
+ * to it. There is no way for the driver to know if
+ * multiple bits set in the info register are due to a
+ * single packet or multiple packets. The driver assumes
+ * multiple packets.
+ */
+ weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
+ for (i = 0; i < weight; i++) {
+ __count_port_discards(ppd);
+ if (vl >= 0 && vl < TXE_NUM_DATA_VL)
+ incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
+ else if (vl == 15)
+ incr_cntr64(&ppd->port_xmit_discards_vl
+ [C_VL_15]);
+ }
}
}
@@ -5496,12 +5611,71 @@ static inline int port_inactive_err(u64 posn)
* Input value is a bit position within the SEND_EGRESS_ERR_STATUS
* register. Does it represent a 'disallowed packet' error?
*/
-static inline int disallowed_pkt_err(u64 posn)
+static inline int disallowed_pkt_err(int posn)
{
return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
}
+/*
+ * Input value is a bit position of one of the SDMA engine disallowed
+ * packet errors. Return which engine. Use of this must be guarded by
+ * disallowed_pkt_err().
+ */
+static inline int disallowed_pkt_engine(int posn)
+{
+ return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
+}
+
+/*
+ * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
+ * be done.
+ */
+static int engine_to_vl(struct hfi1_devdata *dd, int engine)
+{
+ struct sdma_vl_map *m;
+ int vl;
+
+ /* range check */
+ if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
+ return -1;
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->sdma_map);
+ vl = m->engine_to_vl[engine];
+ rcu_read_unlock();
+
+ return vl;
+}
+
+/*
+ * Translate the send context (sofware index) into a VL. Return -1 if the
+ * translation cannot be done.
+ */
+static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
+{
+ struct send_context_info *sci;
+ struct send_context *sc;
+ int i;
+
+ sci = &dd->send_contexts[sw_index];
+
+ /* there is no information for user (PSM) and ack contexts */
+ if (sci->type != SC_KERNEL)
+ return -1;
+
+ sc = sci->sc;
+ if (!sc)
+ return -1;
+ if (dd->vld[15].sc == sc)
+ return 15;
+ for (i = 0; i < num_vls; i++)
+ if (dd->vld[i].sc == sc)
+ return i;
+
+ return -1;
+}
+
static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
{
u64 reg_copy = reg, handled = 0;
@@ -5510,34 +5684,34 @@ static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
start_freeze_handling(dd->pport, 0);
- if (is_ax(dd) && (reg &
- SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK)
- && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
+ else if (is_ax(dd) &&
+ (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
+ (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
start_freeze_handling(dd->pport, 0);
while (reg_copy) {
int posn = fls64(reg_copy);
- /*
- * fls64() returns a 1-based offset, but we generally
- * want 0-based offsets.
- */
+ /* fls64() returns a 1-based offset, we want it zero based */
int shift = posn - 1;
+ u64 mask = 1ULL << shift;
if (port_inactive_err(shift)) {
count_port_inactive(dd);
- handled |= (1ULL << shift);
+ handled |= mask;
} else if (disallowed_pkt_err(shift)) {
- handle_send_egress_err_info(dd);
- handled |= (1ULL << shift);
+ int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
+
+ handle_send_egress_err_info(dd, vl);
+ handled |= mask;
}
- clear_bit(shift, (unsigned long *)&reg_copy);
+ reg_copy &= ~mask;
}
reg &= ~handled;
if (reg)
dd_dev_info(dd, "Egress Error: %s\n",
- egress_err_status_string(buf, sizeof(buf), reg));
+ egress_err_status_string(buf, sizeof(buf), reg));
for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
if (reg & (1ull << i))
@@ -5551,7 +5725,7 @@ static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
int i = 0;
dd_dev_info(dd, "Send Error: %s\n",
- send_err_status_string(buf, sizeof(buf), reg));
+ send_err_status_string(buf, sizeof(buf), reg));
for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
if (reg & (1ull << i))
@@ -5597,7 +5771,7 @@ static void interrupt_clear_down(struct hfi1_devdata *dd,
u64 mask;
dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
- eri->desc, reg);
+ eri->desc, reg);
/*
* Read-modify-write so any other masked bits
* remain masked.
@@ -5621,14 +5795,15 @@ static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
interrupt_clear_down(dd, 0, eri);
} else {
dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
- source);
+ source);
}
}
static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
{
return flag_string(buf, buf_len, flags,
- sc_err_status_flags, ARRAY_SIZE(sc_err_status_flags));
+ sc_err_status_flags,
+ ARRAY_SIZE(sc_err_status_flags));
}
/*
@@ -5653,15 +5828,15 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
sw_index = dd->hw_to_sw[hw_context];
if (sw_index >= dd->num_send_contexts) {
dd_dev_err(dd,
- "out of range sw index %u for send context %u\n",
- sw_index, hw_context);
+ "out of range sw index %u for send context %u\n",
+ sw_index, hw_context);
return;
}
sci = &dd->send_contexts[sw_index];
sc = sci->sc;
if (!sc) {
dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
- sw_index, hw_context);
+ sw_index, hw_context);
return;
}
@@ -5671,10 +5846,11 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
- send_context_err_status_string(flags, sizeof(flags), status));
+ send_context_err_status_string(flags, sizeof(flags),
+ status));
if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
- handle_send_egress_err_info(dd);
+ handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
/*
* Automatically restart halted kernel contexts out of interrupt
@@ -5707,6 +5883,7 @@ static void handle_sdma_eng_err(struct hfi1_devdata *dd,
dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
sde->this_idx, source, (unsigned long long)status);
#endif
+ sde->err_cnt++;
sdma_engine_error(sde, status);
/*
@@ -5755,23 +5932,22 @@ static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
interrupt_clear_down(dd, 0, eri);
else
dd_dev_info(dd,
- "%s: Unimplemented/reserved interrupt %d\n",
- __func__, source);
+ "%s: Unimplemented/reserved interrupt %d\n",
+ __func__, source);
}
static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
{
- /* source is always zero */
+ /* src_ctx is always zero */
struct hfi1_pportdata *ppd = dd->pport;
unsigned long flags;
u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
if (reg & QSFP_HFI0_MODPRST_N) {
-
- dd_dev_info(dd, "%s: ModPresent triggered QSFP interrupt\n",
- __func__);
-
if (!qsfp_mod_present(ppd)) {
+ dd_dev_info(dd, "%s: QSFP module removed\n",
+ __func__);
+
ppd->driver_link_ready = 0;
/*
* Cable removed, reset all our information about the
@@ -5784,14 +5960,23 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
* an interrupt when a cable is inserted
*/
ppd->qsfp_info.cache_valid = 0;
- ppd->qsfp_info.qsfp_interrupt_functional = 0;
+ ppd->qsfp_info.reset_needed = 0;
+ ppd->qsfp_info.limiting_active = 0;
spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
- write_csr(dd,
- dd->hfi1_id ?
- ASIC_QSFP2_INVERT :
- ASIC_QSFP1_INVERT,
- qsfp_int_mgmt);
+ flags);
+ /* Invert the ModPresent pin now to detect plug-in */
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT, qsfp_int_mgmt);
+
+ if ((ppd->offline_disabled_reason >
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
+ (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
+
if (ppd->host_link_state == HLS_DN_POLL) {
/*
* The link is still in POLL. This means
@@ -5802,28 +5987,33 @@ static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
queue_work(ppd->hfi1_wq, &ppd->link_down_work);
}
} else {
+ dd_dev_info(dd, "%s: QSFP module inserted\n",
+ __func__);
+
spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
ppd->qsfp_info.cache_valid = 0;
ppd->qsfp_info.cache_refresh_required = 1;
spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
+ flags);
+ /*
+ * Stop inversion of ModPresent pin to detect
+ * removal of the cable
+ */
qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
- write_csr(dd,
- dd->hfi1_id ?
- ASIC_QSFP2_INVERT :
- ASIC_QSFP1_INVERT,
- qsfp_int_mgmt);
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
+ ASIC_QSFP1_INVERT, qsfp_int_mgmt);
+
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
}
}
if (reg & QSFP_HFI0_INT_N) {
-
- dd_dev_info(dd, "%s: IntN triggered QSFP interrupt\n",
- __func__);
+ dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
+ __func__);
spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
ppd->qsfp_info.check_interrupt_flags = 1;
- ppd->qsfp_info.qsfp_interrupt_functional = 1;
spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
}
@@ -5837,11 +6027,11 @@ static int request_host_lcb_access(struct hfi1_devdata *dd)
int ret;
ret = do_8051_command(dd, HCMD_MISC,
- (u64)HCMD_MISC_REQUEST_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
- NULL);
+ (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
+ LOAD_DATA_FIELD_ID_SHIFT, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "%s: command failed with error %d\n",
- __func__, ret);
+ __func__, ret);
}
return ret == HCMD_SUCCESS ? 0 : -EBUSY;
}
@@ -5851,11 +6041,11 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd)
int ret;
ret = do_8051_command(dd, HCMD_MISC,
- (u64)HCMD_MISC_GRANT_LCB_ACCESS << LOAD_DATA_FIELD_ID_SHIFT,
- NULL);
+ (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
+ LOAD_DATA_FIELD_ID_SHIFT, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "%s: command failed with error %d\n",
- __func__, ret);
+ __func__, ret);
}
return ret == HCMD_SUCCESS ? 0 : -EBUSY;
}
@@ -5867,8 +6057,8 @@ static int request_8051_lcb_access(struct hfi1_devdata *dd)
static inline void set_host_lcb_access(struct hfi1_devdata *dd)
{
write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
- DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK
- | DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
+ DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
}
/*
@@ -5878,7 +6068,7 @@ static inline void set_host_lcb_access(struct hfi1_devdata *dd)
static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
{
write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
- DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
+ DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
}
/*
@@ -5912,7 +6102,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
/* this access is valid only when the link is up */
if ((ppd->host_link_state & HLS_UP) == 0) {
dd_dev_info(dd, "%s: link state %s not up\n",
- __func__, link_state_name(ppd->host_link_state));
+ __func__, link_state_name(ppd->host_link_state));
ret = -EBUSY;
goto done;
}
@@ -5921,8 +6111,8 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
ret = request_host_lcb_access(dd);
if (ret) {
dd_dev_err(dd,
- "%s: unable to acquire LCB access, err %d\n",
- __func__, ret);
+ "%s: unable to acquire LCB access, err %d\n",
+ __func__, ret);
goto done;
}
set_host_lcb_access(dd);
@@ -5959,7 +6149,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
if (dd->lcb_access_count == 0) {
dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
- __func__);
+ __func__);
goto done;
}
@@ -5968,8 +6158,8 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
ret = request_8051_lcb_access(dd);
if (ret) {
dd_dev_err(dd,
- "%s: unable to release LCB access, err %d\n",
- __func__, ret);
+ "%s: unable to release LCB access, err %d\n",
+ __func__, ret);
/* restore host access if the grant didn't work */
set_host_lcb_access(dd);
goto done;
@@ -6001,19 +6191,26 @@ static void init_lcb_access(struct hfi1_devdata *dd)
static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
{
write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
- DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK
- | (u64)return_code << DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT
- | (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
+ DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
+ (u64)return_code <<
+ DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
+ (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
}
/*
- * Handle requests from the 8051.
+ * Handle host requests from the 8051.
+ *
+ * This is a work-queue function outside of the interrupt.
*/
-static void handle_8051_request(struct hfi1_devdata *dd)
+void handle_8051_request(struct work_struct *work)
{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ dc_host_req_work);
+ struct hfi1_devdata *dd = ppd->dd;
u64 reg;
- u16 data;
- u8 type;
+ u16 data = 0;
+ u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
+ u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
@@ -6034,12 +6231,46 @@ static void handle_8051_request(struct hfi1_devdata *dd)
case HREQ_READ_CONFIG:
case HREQ_SET_TX_EQ_ABS:
case HREQ_SET_TX_EQ_REL:
- case HREQ_ENABLE:
dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
- type);
+ type);
hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
break;
+ case HREQ_ENABLE:
+ lanes = data & 0xF;
+ for (i = 0; lanes; lanes >>= 1, i++) {
+ if (!(lanes & 1))
+ continue;
+ if (data & 0x200) {
+ /* enable TX CDR */
+ if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
+ cache[QSFP_CDR_INFO_OFFS] & 0x80)
+ cdr_ctrl_byte |= (1 << (i + 4));
+ } else {
+ /* disable TX CDR */
+ if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
+ cache[QSFP_CDR_INFO_OFFS] & 0x80)
+ cdr_ctrl_byte &= ~(1 << (i + 4));
+ }
+
+ if (data & 0x800) {
+ /* enable RX CDR */
+ if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
+ cache[QSFP_CDR_INFO_OFFS] & 0x40)
+ cdr_ctrl_byte |= (1 << i);
+ } else {
+ /* disable RX CDR */
+ if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
+ cache[QSFP_CDR_INFO_OFFS] & 0x40)
+ cdr_ctrl_byte &= ~(1 << i);
+ }
+ }
+ one_qsfp_write(ppd, dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
+ &cdr_ctrl_byte, 1);
+ hreq_response(dd, HREQ_SUCCESS, data);
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ break;
+
case HREQ_CONFIG_DONE:
hreq_response(dd, HREQ_SUCCESS, 0);
break;
@@ -6059,11 +6290,11 @@ static void write_global_credit(struct hfi1_devdata *dd,
u8 vau, u16 total, u16 shared)
{
write_csr(dd, SEND_CM_GLOBAL_CREDIT,
- ((u64)total
- << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
- | ((u64)shared
- << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
- | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
+ ((u64)total <<
+ SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
+ ((u64)shared <<
+ SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
+ ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
}
/*
@@ -6100,7 +6331,7 @@ void reset_link_credits(struct hfi1_devdata *dd)
/* remove all previous VL credit limits */
for (i = 0; i < TXE_NUM_DATA_VL; i++)
- write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
write_csr(dd, SEND_CM_CREDIT_VL15, 0);
write_global_credit(dd, 0, 0, 0);
/* reset the CM block */
@@ -6142,15 +6373,14 @@ static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
write_csr(dd, DC_LCB_CFG_RUN, 0);
/* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
- 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
+ 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
/* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
reg = read_csr(dd, DCC_CFG_RESET);
- write_csr(dd, DCC_CFG_RESET,
- reg
- | (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT)
- | (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
- (void) read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
+ write_csr(dd, DCC_CFG_RESET, reg |
+ (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
+ (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
+ (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
if (!abort) {
udelay(1); /* must hold for the longer of 16cclks or 20ns */
write_csr(dd, DCC_CFG_RESET, reg);
@@ -6179,14 +6409,18 @@ static void dc_shutdown(struct hfi1_devdata *dd)
spin_unlock_irqrestore(&dd->dc8051_lock, flags);
/* Shutdown the LCB */
lcb_shutdown(dd, 1);
- /* Going to OFFLINE would have causes the 8051 to put the
+ /*
+ * Going to OFFLINE would have causes the 8051 to put the
* SerDes into reset already. Just need to shut down the 8051,
- * itself. */
+ * itself.
+ */
write_csr(dd, DC_DC8051_CFG_RST, 0x1);
}
-/* Calling this after the DC has been brought out of reset should not
- * do any damage. */
+/*
+ * Calling this after the DC has been brought out of reset should not
+ * do any damage.
+ */
static void dc_start(struct hfi1_devdata *dd)
{
unsigned long flags;
@@ -6202,7 +6436,7 @@ static void dc_start(struct hfi1_devdata *dd)
ret = wait_fm_ready(dd, TIMEOUT_8051_START);
if (ret) {
dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
- __func__);
+ __func__);
}
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -6295,7 +6529,7 @@ static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
/* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
- DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
+ DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
}
@@ -6312,8 +6546,10 @@ void handle_sma_message(struct work_struct *work)
u64 msg;
int ret;
- /* msg is bytes 1-4 of the 40-bit idle message - the command code
- is stripped off */
+ /*
+ * msg is bytes 1-4 of the 40-bit idle message - the command code
+ * is stripped off
+ */
ret = read_idle_sma(dd, &msg);
if (ret)
return;
@@ -6339,8 +6575,8 @@ void handle_sma_message(struct work_struct *work)
*
* Can activate the node. Discard otherwise.
*/
- if (ppd->host_link_state == HLS_UP_ARMED
- && ppd->is_active_optimize_enabled) {
+ if (ppd->host_link_state == HLS_UP_ARMED &&
+ ppd->is_active_optimize_enabled) {
ppd->neighbor_normal = 1;
ret = set_link_state(ppd, HLS_UP_ACTIVE);
if (ret)
@@ -6352,8 +6588,8 @@ void handle_sma_message(struct work_struct *work)
break;
default:
dd_dev_err(dd,
- "%s: received unexpected SMA idle message 0x%llx\n",
- __func__, msg);
+ "%s: received unexpected SMA idle message 0x%llx\n",
+ __func__, msg);
break;
}
}
@@ -6445,10 +6681,9 @@ static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
if (time_after(jiffies, timeout)) {
dd_dev_err(dd,
- "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
- freeze ? "" : "un",
- reg & ALL_FROZE,
- freeze ? ALL_FROZE : 0ull);
+ "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
+ freeze ? "" : "un", reg & ALL_FROZE,
+ freeze ? ALL_FROZE : 0ull);
return;
}
usleep_range(80, 120);
@@ -6478,11 +6713,17 @@ static void rxe_freeze(struct hfi1_devdata *dd)
*/
static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
{
+ u32 rcvmask;
int i;
/* enable all kernel contexts */
- for (i = 0; i < dd->n_krcv_queues; i++)
- hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, i);
+ for (i = 0; i < dd->n_krcv_queues; i++) {
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB;
+ /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
+ rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
+ HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
+ hfi1_rcvctrl(dd, rcvmask, i);
+ }
/* enable port */
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -6567,7 +6808,7 @@ void handle_freeze(struct work_struct *work)
void handle_link_up(struct work_struct *work)
{
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_up_work);
+ link_up_work);
set_link_state(ppd, HLS_UP_INIT);
/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
@@ -6586,17 +6827,20 @@ void handle_link_up(struct work_struct *work)
if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
/* oops - current speed is not enabled, bounce */
dd_dev_err(ppd->dd,
- "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
- ppd->link_speed_active, ppd->link_speed_enabled);
+ "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
+ ppd->link_speed_active, ppd->link_speed_enabled);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
- OPA_LINKDOWN_REASON_SPEED_POLICY);
+ OPA_LINKDOWN_REASON_SPEED_POLICY);
set_link_state(ppd, HLS_DN_OFFLINE);
+ tune_serdes(ppd);
start_link(ppd);
}
}
-/* Several pieces of LNI information were cached for SMA in ppd.
- * Reset these on link down */
+/*
+ * Several pieces of LNI information were cached for SMA in ppd.
+ * Reset these on link down
+ */
static void reset_neighbor_info(struct hfi1_pportdata *ppd)
{
ppd->neighbor_guid = 0;
@@ -6616,7 +6860,13 @@ void handle_link_down(struct work_struct *work)
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
link_down_work);
- /* go offline first, then deal with reasons */
+ if ((ppd->host_link_state &
+ (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
+ ppd->port_type == PORT_TYPE_FIXED)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
+
+ /* Go offline first, then deal with reading/writing through 8051 */
set_link_state(ppd, HLS_DN_OFFLINE);
lcl_reason = 0;
@@ -6636,12 +6886,16 @@ void handle_link_down(struct work_struct *work)
/* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
- /* If there is no cable attached, turn the DC off. Otherwise,
- * start the link bring up. */
- if (!qsfp_mod_present(ppd))
+ /*
+ * If there is no cable attached, turn the DC off. Otherwise,
+ * start the link bring up.
+ */
+ if (!qsfp_mod_present(ppd)) {
dc_shutdown(ppd->dd);
- else
+ } else {
+ tune_serdes(ppd);
start_link(ppd);
+ }
}
void handle_link_bounce(struct work_struct *work)
@@ -6654,10 +6908,11 @@ void handle_link_bounce(struct work_struct *work)
*/
if (ppd->host_link_state & HLS_UP) {
set_link_state(ppd, HLS_DN_OFFLINE);
+ tune_serdes(ppd);
start_link(ppd);
} else {
dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
- __func__, link_state_name(ppd->host_link_state));
+ __func__, link_state_name(ppd->host_link_state));
}
}
@@ -6754,7 +7009,7 @@ static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
case 3: return OPA_LINK_WIDTH_3X;
default:
dd_dev_info(dd, "%s: invalid width %d, using 4\n",
- __func__, width);
+ __func__, width);
/* fall through */
case 4: return OPA_LINK_WIDTH_4X;
}
@@ -6766,6 +7021,7 @@ static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
static const u8 bit_counts[16] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
};
+
static inline u8 nibble_to_count(u8 nibble)
{
return bit_counts[nibble & 0xf];
@@ -6791,7 +7047,7 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
/* read the active lanes */
read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
- &rx_polarity_inversion, &max_rate);
+ &rx_polarity_inversion, &max_rate);
read_local_lni(dd, &enable_lane_rx);
/* convert to counts */
@@ -6803,8 +7059,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
* handle_verify_cap(). The ASIC 8051 firmware does not correctly
* set the max_rate field in handle_verify_cap until v0.19.
*/
- if ((dd->icode == ICODE_RTL_SILICON)
- && (dd->dc8051_ver < dc8051_ver(0, 19))) {
+ if ((dd->icode == ICODE_RTL_SILICON) &&
+ (dd->dc8051_ver < dc8051_ver(0, 19))) {
/* max_rate: 0 = 12.5G, 1 = 25G */
switch (max_rate) {
case 0:
@@ -6812,8 +7068,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
break;
default:
dd_dev_err(dd,
- "%s: unexpected max rate %d, using 25Gb\n",
- __func__, (int)max_rate);
+ "%s: unexpected max rate %d, using 25Gb\n",
+ __func__, (int)max_rate);
/* fall through */
case 1:
dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
@@ -6822,8 +7078,8 @@ static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
}
dd_dev_info(dd,
- "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
- enable_lane_tx, tx, enable_lane_rx, rx);
+ "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
+ enable_lane_tx, tx, enable_lane_rx, rx);
*tx_width = link_width_to_bits(dd, tx);
*rx_width = link_width_to_bits(dd, rx);
}
@@ -6926,13 +7182,8 @@ void handle_verify_cap(struct work_struct *work)
*/
read_vc_remote_phy(dd, &power_management, &continious);
- read_vc_remote_fabric(
- dd,
- &vau,
- &z,
- &vcu,
- &vl15buf,
- &partner_supported_crc);
+ read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
+ &partner_supported_crc);
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
read_remote_device_id(dd, &device_id, &device_rev);
/*
@@ -6943,19 +7194,16 @@ void handle_verify_cap(struct work_struct *work)
/* print the active widths */
get_link_widths(dd, &active_tx, &active_rx);
dd_dev_info(dd,
- "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
- (int)power_management, (int)continious);
+ "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
+ (int)power_management, (int)continious);
dd_dev_info(dd,
- "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
- (int)vau,
- (int)z,
- (int)vcu,
- (int)vl15buf,
- (int)partner_supported_crc);
+ "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
+ (int)vau, (int)z, (int)vcu, (int)vl15buf,
+ (int)partner_supported_crc);
dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
- (u32)remote_tx_rate, (u32)link_widths);
+ (u32)remote_tx_rate, (u32)link_widths);
dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
- (u32)device_id, (u32)device_rev);
+ (u32)device_id, (u32)device_rev);
/*
* The peer vAU value just read is the peer receiver value. HFI does
* not support a transmit vAU of 0 (AU == 8). We advertised that
@@ -6990,10 +7238,10 @@ void handle_verify_cap(struct work_struct *work)
reg = read_csr(dd, SEND_CM_CTRL);
if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
write_csr(dd, SEND_CM_CTRL,
- reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
} else {
write_csr(dd, SEND_CM_CTRL,
- reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
+ reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
}
ppd->link_speed_active = 0; /* invalid value */
@@ -7018,7 +7266,7 @@ void handle_verify_cap(struct work_struct *work)
}
if (ppd->link_speed_active == 0) {
dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
- __func__, (int)remote_tx_rate);
+ __func__, (int)remote_tx_rate);
ppd->link_speed_active = OPA_LINK_SPEED_25G;
}
@@ -7074,9 +7322,9 @@ void handle_verify_cap(struct work_struct *work)
read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
dd_dev_info(dd,
- "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
- ppd->neighbor_guid, ppd->neighbor_type,
- ppd->mgmt_allowed, ppd->neighbor_fm_security);
+ "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
+ ppd->neighbor_guid, ppd->neighbor_type,
+ ppd->mgmt_allowed, ppd->neighbor_fm_security);
if (ppd->mgmt_allowed)
add_full_mgmt_pkey(ppd);
@@ -7130,28 +7378,27 @@ retry:
/* bounce if not at starting active width */
if ((ppd->link_width_active !=
- ppd->link_width_downgrade_tx_active)
- || (ppd->link_width_active !=
- ppd->link_width_downgrade_rx_active)) {
+ ppd->link_width_downgrade_tx_active) ||
+ (ppd->link_width_active !=
+ ppd->link_width_downgrade_rx_active)) {
dd_dev_err(ppd->dd,
- "Link downgrade is disabled and link has downgraded, downing link\n");
+ "Link downgrade is disabled and link has downgraded, downing link\n");
dd_dev_err(ppd->dd,
- " original 0x%x, tx active 0x%x, rx active 0x%x\n",
- ppd->link_width_active,
- ppd->link_width_downgrade_tx_active,
- ppd->link_width_downgrade_rx_active);
+ " original 0x%x, tx active 0x%x, rx active 0x%x\n",
+ ppd->link_width_active,
+ ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
do_bounce = 1;
}
- } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0
- || (lwde & ppd->link_width_downgrade_rx_active) == 0) {
+ } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
+ (lwde & ppd->link_width_downgrade_rx_active) == 0) {
/* Tx or Rx is outside the enabled policy */
dd_dev_err(ppd->dd,
- "Link is outside of downgrade allowed, downing link\n");
+ "Link is outside of downgrade allowed, downing link\n");
dd_dev_err(ppd->dd,
- " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
- lwde,
- ppd->link_width_downgrade_tx_active,
- ppd->link_width_downgrade_rx_active);
+ " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
+ lwde, ppd->link_width_downgrade_tx_active,
+ ppd->link_width_downgrade_rx_active);
do_bounce = 1;
}
@@ -7160,8 +7407,9 @@ done:
if (do_bounce) {
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
- OPA_LINKDOWN_REASON_WIDTH_POLICY);
+ OPA_LINKDOWN_REASON_WIDTH_POLICY);
set_link_state(ppd, HLS_DN_OFFLINE);
+ tune_serdes(ppd);
start_link(ppd);
}
}
@@ -7242,9 +7490,10 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
& (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
queue_link_down = 1;
dd_dev_info(dd, "Link error: %s\n",
- dc8051_info_err_string(buf,
- sizeof(buf),
- err & FAILED_LNI));
+ dc8051_info_err_string(buf,
+ sizeof(buf),
+ err &
+ FAILED_LNI));
}
err &= ~(u64)FAILED_LNI;
}
@@ -7256,7 +7505,8 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (err) {
/* report remaining errors, but do not do anything */
dd_dev_err(dd, "8051 info error: %s\n",
- dc8051_info_err_string(buf, sizeof(buf), err));
+ dc8051_info_err_string(buf, sizeof(buf),
+ err));
}
/*
@@ -7284,7 +7534,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
host_msg &= ~(u64)LINKUP_ACHIEVED;
}
if (host_msg & EXT_DEVICE_CFG_REQ) {
- handle_8051_request(dd);
+ queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
}
if (host_msg & VERIFY_CAP_FRAME) {
@@ -7309,8 +7559,9 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
if (host_msg) {
/* report remaining messages, but do not do anything */
dd_dev_info(dd, "8051 info host message: %s\n",
- dc8051_info_host_msg_string(buf, sizeof(buf),
- host_msg));
+ dc8051_info_host_msg_string(buf,
+ sizeof(buf),
+ host_msg));
}
reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
@@ -7323,25 +7574,27 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
*/
dd_dev_err(dd, "Lost 8051 heartbeat\n");
write_csr(dd, DC_DC8051_ERR_EN,
- read_csr(dd, DC_DC8051_ERR_EN)
- & ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
+ read_csr(dd, DC_DC8051_ERR_EN) &
+ ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
}
if (reg) {
/* report the error, but do not do anything */
dd_dev_err(dd, "8051 error: %s\n",
- dc8051_err_string(buf, sizeof(buf), reg));
+ dc8051_err_string(buf, sizeof(buf), reg));
}
if (queue_link_down) {
- /* if the link is already going down or disabled, do not
- * queue another */
- if ((ppd->host_link_state
- & (HLS_GOING_OFFLINE|HLS_LINK_COOLDOWN))
- || ppd->link_enabled == 0) {
+ /*
+ * if the link is already going down or disabled, do not
+ * queue another
+ */
+ if ((ppd->host_link_state &
+ (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
+ ppd->link_enabled == 0) {
dd_dev_info(dd, "%s: not queuing link down\n",
- __func__);
+ __func__);
} else {
queue_work(ppd->hfi1_wq, &ppd->link_down_work);
}
@@ -7483,8 +7736,10 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
/* set status bit */
dd->err_info_rcvport.status_and_code |=
OPA_EI_STATUS_SMASK;
- /* save first 2 flits in the packet that caused
- * the error */
+ /*
+ * save first 2 flits in the packet that caused
+ * the error
+ */
dd->err_info_rcvport.packet_flit1 = hdr0;
dd->err_info_rcvport.packet_flit2 = hdr1;
}
@@ -7517,7 +7772,7 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
/* just report this */
dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
dd_dev_info(dd, " hdr0 0x%llx, hdr1 0x%llx\n",
- hdr0, hdr1);
+ hdr0, hdr1);
reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
}
@@ -7536,7 +7791,7 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
/* report any remaining errors */
if (reg)
dd_dev_info(dd, "DCC Error: %s\n",
- dcc_err_string(buf, sizeof(buf), reg));
+ dcc_err_string(buf, sizeof(buf), reg));
if (lcl_reason == 0)
lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
@@ -7553,7 +7808,7 @@ static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
char buf[96];
dd_dev_info(dd, "LCB Error: %s\n",
- lcb_err_string(buf, sizeof(buf), reg));
+ lcb_err_string(buf, sizeof(buf), reg));
}
/*
@@ -7643,7 +7898,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
err_detail = "out of range";
}
dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
- err_detail, source);
+ err_detail, source);
}
/*
@@ -7669,7 +7924,7 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
err_detail = "out of range";
}
dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
- err_detail, source);
+ err_detail, source);
}
/*
@@ -7680,12 +7935,14 @@ static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
char name[64];
dd_dev_err(dd, "unexpected %s interrupt\n",
- is_reserved_name(name, sizeof(name), source));
+ is_reserved_name(name, sizeof(name), source));
}
static const struct is_table is_table[] = {
-/* start end
- name func interrupt func */
+/*
+ * start end
+ * name func interrupt func
+ */
{ IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
is_misc_err_name, is_misc_err_int },
{ IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
@@ -7756,7 +8013,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
/* phase 2: call the appropriate handler */
for_each_set_bit(bit, (unsigned long *)&regs[0],
- CCE_NUM_INT_CSRS*64) {
+ CCE_NUM_INT_CSRS * 64) {
is_interrupt(dd, bit);
}
@@ -7779,27 +8036,27 @@ static irqreturn_t sdma_interrupt(int irq, void *data)
/* This read_csr is really bad in the hot path */
status = read_csr(dd,
- CCE_INT_STATUS + (8*(IS_SDMA_START/64)))
- & sde->imask;
+ CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
+ & sde->imask;
if (likely(status)) {
/* clear the interrupt(s) */
write_csr(dd,
- CCE_INT_CLEAR + (8*(IS_SDMA_START/64)),
- status);
+ CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
+ status);
/* handle the interrupt(s) */
sdma_engine_interrupt(sde, status);
} else
dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
- sde->this_idx);
+ sde->this_idx);
return IRQ_HANDLED;
}
/*
- * Clear the receive interrupt, forcing the write and making sure
- * we have data from the chip, pushing everything in front of it
- * back to the host.
+ * Clear the receive interrupt. Use a read of the interrupt clear CSR
+ * to insure that the write completed. This does NOT guarantee that
+ * queued DMA writes to memory from the chip are pushed.
*/
static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
{
@@ -7813,27 +8070,45 @@ static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
}
/* force the receive interrupt */
-static inline void force_recv_intr(struct hfi1_ctxtdata *rcd)
+void force_recv_intr(struct hfi1_ctxtdata *rcd)
{
write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
}
-/* return non-zero if a packet is present */
+/*
+ * Return non-zero if a packet is present.
+ *
+ * This routine is called when rechecking for packets after the RcvAvail
+ * interrupt has been cleared down. First, do a quick check of memory for
+ * a packet present. If not found, use an expensive CSR read of the context
+ * tail to determine the actual tail. The CSR read is necessary because there
+ * is no method to push pending DMAs to memory other than an interrupt and we
+ * are trying to determine if we need to force an interrupt.
+ */
static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
{
+ u32 tail;
+ int present;
+
if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
- return (rcd->seq_cnt ==
+ present = (rcd->seq_cnt ==
rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
+ else /* is RDMA rtail */
+ present = (rcd->head != get_rcvhdrtail(rcd));
- /* else is RDMA rtail */
- return (rcd->head != get_rcvhdrtail(rcd));
+ if (present)
+ return 1;
+
+ /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
+ tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
+ return rcd->head != tail;
}
/*
* Receive packet IRQ handler. This routine expects to be on its own IRQ.
* This routine will try to handle packets immediately (latency), but if
* it finds too many, it will invoke the thread handler (bandwitdh). The
- * chip receive interupt is *not* cleared down until this or the thread (if
+ * chip receive interrupt is *not* cleared down until this or the thread (if
* invoked) is finished. The intent is to avoid extra interrupts while we
* are processing packets anyway.
*/
@@ -7846,6 +8121,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
trace_hfi1_receive_interrupt(dd, rcd->ctxt);
this_cpu_inc(*dd->int_counter);
+ aspm_ctx_disable(rcd);
/* receive interrupt remains blocked while processing packets */
disposition = rcd->do_interrupt(rcd, 0);
@@ -7912,7 +8188,7 @@ u32 read_physical_state(struct hfi1_devdata *dd)
& DC_DC8051_STS_CUR_STATE_PORT_MASK;
}
-static u32 read_logical_state(struct hfi1_devdata *dd)
+u32 read_logical_state(struct hfi1_devdata *dd)
{
u64 reg;
@@ -8160,8 +8436,8 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
@@ -8172,8 +8448,8 @@ static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
- "load 8051 config: field id %d, lane %d, err %d\n",
- (int)field_id, (int)lane_id, ret);
+ "load 8051 config: field id %d, lane %d, err %d\n",
+ (int)field_id, (int)lane_id, ret);
}
return ret;
}
@@ -8183,8 +8459,8 @@ static int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
* set the result, even on error.
* Return 0 on success, -errno on failure
*/
-static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
- u32 *result)
+int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
+ u32 *result)
{
u64 big_data;
u32 addr;
@@ -8210,7 +8486,7 @@ static int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
} else {
*result = 0;
dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
- __func__, lane_id, field_id);
+ __func__, lane_id, field_id);
}
return ret;
@@ -8247,7 +8523,7 @@ static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
u32 frame;
read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
- &frame);
+ &frame);
*misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
*flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
@@ -8329,7 +8605,7 @@ static void read_vc_remote_link_width(struct hfi1_devdata *dd,
u32 frame;
read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
- &frame);
+ &frame);
*remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
& REMOTE_TX_RATE_MASK;
*link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
@@ -8369,7 +8645,7 @@ void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
*link_quality = 0;
if (dd->pport->host_link_state & HLS_UP) {
ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
- &frame);
+ &frame);
if (ret == 0)
*link_quality = (frame >> LINK_QUALITY_SHIFT)
& LINK_QUALITY_MASK;
@@ -8429,10 +8705,9 @@ static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
for (lane = 0; lane < 4; lane++) {
ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
if (ret) {
- dd_dev_err(
- dd,
- "Unable to read lane %d firmware details\n",
- lane);
+ dd_dev_err(dd,
+ "Unable to read lane %d firmware details\n",
+ lane);
continue;
}
version = (frame >> SPICO_ROM_VERSION_SHIFT)
@@ -8440,8 +8715,8 @@ static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
& SPICO_ROM_PROD_ID_MASK;
dd_dev_info(dd,
- "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
- lane, version, prod_id);
+ "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
+ lane, version, prod_id);
}
}
@@ -8454,11 +8729,10 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
{
int ret;
- ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG,
- type, data_out);
+ ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "read idle message: type %d, err %d\n",
- (u32)type, ret);
+ (u32)type, ret);
return -EINVAL;
}
dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
@@ -8475,8 +8749,8 @@ static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
*/
static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
{
- return read_idle_message(dd,
- (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT, data);
+ return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
+ data);
}
/*
@@ -8492,7 +8766,7 @@ static int send_idle_message(struct hfi1_devdata *dd, u64 data)
ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
- data, ret);
+ data, ret);
return -EINVAL;
}
return 0;
@@ -8507,8 +8781,8 @@ int send_idle_sma(struct hfi1_devdata *dd, u64 message)
{
u64 data;
- data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT)
- | ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
+ data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
+ ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
return send_idle_message(dd, data);
}
@@ -8530,7 +8804,7 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
/* LCB_CFG_LOOPBACK.VAL = 2 */
/* LCB_CFG_LANE_WIDTH.VAL = 0 */
write_csr(dd, DC_LCB_CFG_LOOPBACK,
- IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
+ IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
}
@@ -8542,25 +8816,24 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
/* LCB_CFG_RUN.EN = 1 */
write_csr(dd, DC_LCB_CFG_RUN,
- 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
+ 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
/* watch LCB_STS_LINK_TRANSFER_ACTIVE */
timeout = jiffies + msecs_to_jiffies(10);
while (1) {
- reg = read_csr(dd,
- DC_LCB_STS_LINK_TRANSFER_ACTIVE);
+ reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
if (reg)
break;
if (time_after(jiffies, timeout)) {
dd_dev_err(dd,
- "timeout waiting for LINK_TRANSFER_ACTIVE\n");
+ "timeout waiting for LINK_TRANSFER_ACTIVE\n");
return -ETIMEDOUT;
}
udelay(2);
}
write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
- 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
+ 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
}
if (!loopback) {
@@ -8572,10 +8845,9 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
* done with LCB set up before resuming.
*/
dd_dev_err(dd,
- "Pausing for peer to be finished with LCB set up\n");
+ "Pausing for peer to be finished with LCB set up\n");
msleep(5000);
- dd_dev_err(dd,
- "Continuing with quick linkup\n");
+ dd_dev_err(dd, "Continuing with quick linkup\n");
}
write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
@@ -8589,8 +8861,8 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
- "%s: set physical link state to quick LinkUp failed with return %d\n",
- __func__, ret);
+ "%s: set physical link state to quick LinkUp failed with return %d\n",
+ __func__, ret);
set_host_lcb_access(dd);
write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
@@ -8615,8 +8887,8 @@ static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
if (ret == HCMD_SUCCESS)
return 0;
dd_dev_err(dd,
- "Set physical link state to SerDes Loopback failed with return %d\n",
- ret);
+ "Set physical link state to SerDes Loopback failed with return %d\n",
+ ret);
if (ret >= 0)
ret = -EINVAL;
return ret;
@@ -8631,7 +8903,7 @@ static int init_loopback(struct hfi1_devdata *dd)
/* all loopbacks should disable self GUID check */
write_csr(dd, DC_DC8051_CFG_MODE,
- (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
+ (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
/*
* The simulator has only one loopback option - LCB. Switch
@@ -8639,10 +8911,9 @@ static int init_loopback(struct hfi1_devdata *dd)
*
* Accept all valid loopback values.
*/
- if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- && (loopback == LOOPBACK_SERDES
- || loopback == LOOPBACK_LCB
- || loopback == LOOPBACK_CABLE)) {
+ if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
+ (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
+ loopback == LOOPBACK_CABLE)) {
loopback = LOOPBACK_LCB;
quick_linkup = 1;
return 0;
@@ -8663,7 +8934,7 @@ static int init_loopback(struct hfi1_devdata *dd)
/* not supported in emulation due to emulation RTL changes */
if (dd->icode == ICODE_FPGA_EMULATION) {
dd_dev_err(dd,
- "LCB loopback not supported in emulation\n");
+ "LCB loopback not supported in emulation\n");
return -EINVAL;
}
return 0;
@@ -8690,10 +8961,10 @@ static u16 opa_to_vc_link_widths(u16 opa_widths)
u16 from;
u16 to;
} opa_link_xlate[] = {
- { OPA_LINK_WIDTH_1X, 1 << (1-1) },
- { OPA_LINK_WIDTH_2X, 1 << (2-1) },
- { OPA_LINK_WIDTH_3X, 1 << (3-1) },
- { OPA_LINK_WIDTH_4X, 1 << (4-1) },
+ { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
+ { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
+ { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
+ { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
};
for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
@@ -8719,7 +8990,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
/* set the local tx rate - need to read-modify-write */
ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
- &rx_polarity_inversion, &ppd->local_tx_rate);
+ &rx_polarity_inversion, &ppd->local_tx_rate);
if (ret)
goto set_local_link_attributes_fail;
@@ -8740,15 +9011,16 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
enable_lane_tx = 0xF; /* enable all four lanes */
ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
- rx_polarity_inversion, ppd->local_tx_rate);
+ rx_polarity_inversion, ppd->local_tx_rate);
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
/*
* DC supports continuous updates.
*/
- ret = write_vc_local_phy(dd, 0 /* no power management */,
- 1 /* continuous updates */);
+ ret = write_vc_local_phy(dd,
+ 0 /* no power management */,
+ 1 /* continuous updates */);
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
@@ -8759,7 +9031,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
goto set_local_link_attributes_fail;
ret = write_vc_local_link_width(dd, 0, 0,
- opa_to_vc_link_widths(ppd->link_width_enabled));
+ opa_to_vc_link_widths(
+ ppd->link_width_enabled));
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
@@ -8770,8 +9043,8 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
set_local_link_attributes_fail:
dd_dev_err(dd,
- "Failed to set local link attributes, return 0x%x\n",
- ret);
+ "Failed to set local link attributes, return 0x%x\n",
+ ret);
return ret;
}
@@ -8784,54 +9057,101 @@ int start_link(struct hfi1_pportdata *ppd)
{
if (!ppd->link_enabled) {
dd_dev_info(ppd->dd,
- "%s: stopping link start because link is disabled\n",
- __func__);
+ "%s: stopping link start because link is disabled\n",
+ __func__);
return 0;
}
if (!ppd->driver_link_ready) {
dd_dev_info(ppd->dd,
- "%s: stopping link start because driver is not ready\n",
- __func__);
+ "%s: stopping link start because driver is not ready\n",
+ __func__);
return 0;
}
if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
- loopback == LOOPBACK_LCB ||
- ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ loopback == LOOPBACK_LCB ||
+ ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
return set_link_state(ppd, HLS_DN_POLL);
dd_dev_info(ppd->dd,
- "%s: stopping link start because no cable is present\n",
- __func__);
+ "%s: stopping link start because no cable is present\n",
+ __func__);
return -EAGAIN;
}
-static void reset_qsfp(struct hfi1_pportdata *ppd)
+static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask;
+ unsigned long timeout;
+
+ /*
+ * Check for QSFP interrupt for t_init (SFF 8679)
+ */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (1) {
+ mask = read_csr(dd, dd->hfi1_id ?
+ ASIC_QSFP2_IN : ASIC_QSFP1_IN);
+ if (!(mask & QSFP_HFI0_INT_N)) {
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
+ ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
+ break;
+ }
+ if (time_after(jiffies, timeout)) {
+ dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
+ __func__);
+ break;
+ }
+ udelay(2);
+ }
+}
+
+static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u64 mask;
+
+ mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
+ if (enable)
+ mask |= (u64)QSFP_HFI0_INT_N;
+ else
+ mask &= ~(u64)QSFP_HFI0_INT_N;
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
+}
+
+void reset_qsfp(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
u64 mask, qsfp_mask;
+ /* Disable INT_N from triggering QSFP interrupts */
+ set_qsfp_int_n(ppd, 0);
+
+ /* Reset the QSFP */
mask = (u64)QSFP_HFI0_RESET_N;
- qsfp_mask = read_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
+ qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
qsfp_mask |= mask;
- write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE,
- qsfp_mask);
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
qsfp_mask = read_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
qsfp_mask &= ~mask;
write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
- qsfp_mask);
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
udelay(10);
qsfp_mask |= mask;
write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT,
- qsfp_mask);
+ dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
+
+ wait_for_qsfp_init(ppd);
+
+ /*
+ * Allow INT_N to trigger the QSFP interrupt to watch
+ * for alarms and warnings
+ */
+ set_qsfp_int_n(ppd, 1);
}
static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -8840,102 +9160,86 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
struct hfi1_devdata *dd = ppd->dd;
if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
- (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
- dd_dev_info(dd,
- "%s: QSFP cable on fire\n",
- __func__);
+ (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
+ dd_dev_info(dd, "%s: QSFP cable on fire\n",
+ __func__);
if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
- (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
- dd_dev_info(dd,
- "%s: QSFP cable temperature too low\n",
- __func__);
+ (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
+ dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
+ __func__);
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
- (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
- dd_dev_info(dd,
- "%s: QSFP supply voltage too high\n",
- __func__);
+ (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
+ dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
+ __func__);
if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
- (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
- dd_dev_info(dd,
- "%s: QSFP supply voltage too low\n",
- __func__);
+ (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
+ dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
+ __func__);
/* Byte 2 is vendor specific */
if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable RX channel 1/2 power too high\n",
- __func__);
+ (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable RX channel 1/2 power too low\n",
- __func__);
+ (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable RX channel 3/4 power too high\n",
- __func__);
+ (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable RX channel 3/4 power too low\n",
- __func__);
+ (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
- (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 1/2 bias too high\n",
- __func__);
+ (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
+ __func__);
if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
- (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 1/2 bias too low\n",
- __func__);
+ (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
+ __func__);
if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
- (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 3/4 bias too high\n",
- __func__);
+ (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
+ __func__);
if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
- (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 3/4 bias too low\n",
- __func__);
+ (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
+ __func__);
if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 1/2 power too high\n",
- __func__);
+ (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 1/2 power too low\n",
- __func__);
+ (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
+ __func__);
if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
- (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 3/4 power too high\n",
- __func__);
+ (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
+ __func__);
if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
- (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
- dd_dev_info(dd,
- "%s: Cable TX channel 3/4 power too low\n",
- __func__);
+ (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
+ dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
+ __func__);
/* Bytes 9-10 and 11-12 are reserved */
/* Bytes 13-15 are vendor specific */
@@ -8943,35 +9247,8 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
return 0;
}
-static int do_pre_lni_host_behaviors(struct hfi1_pportdata *ppd)
-{
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
-
- return 0;
-}
-
-static int do_qsfp_intr_fallback(struct hfi1_pportdata *ppd)
-{
- struct hfi1_devdata *dd = ppd->dd;
- u8 qsfp_interrupt_status = 0;
-
- if (qsfp_read(ppd, dd->hfi1_id, 2, &qsfp_interrupt_status, 1)
- != 1) {
- dd_dev_info(dd,
- "%s: Failed to read status of QSFP module\n",
- __func__);
- return -EIO;
- }
-
- /* We don't care about alarms & warnings with a non-functional INT_N */
- if (!(qsfp_interrupt_status & QSFP_DATA_NOT_READY))
- do_pre_lni_host_behaviors(ppd);
-
- return 0;
-}
-
/* This routine will only be scheduled if the QSFP module is present */
-static void qsfp_event(struct work_struct *work)
+void qsfp_event(struct work_struct *work)
{
struct qsfp_data *qd;
struct hfi1_pportdata *ppd;
@@ -8993,76 +9270,75 @@ static void qsfp_event(struct work_struct *work)
dc_start(dd);
if (qd->cache_refresh_required) {
- msleep(3000);
- reset_qsfp(ppd);
+ set_qsfp_int_n(ppd, 0);
+
+ wait_for_qsfp_init(ppd);
- /* Check for QSFP interrupt after t_init (SFF 8679)
- * + extra
+ /*
+ * Allow INT_N to trigger the QSFP interrupt to watch
+ * for alarms and warnings
*/
- msleep(3000);
- if (!qd->qsfp_interrupt_functional) {
- if (do_qsfp_intr_fallback(ppd) < 0)
- dd_dev_info(dd, "%s: QSFP fallback failed\n",
- __func__);
- ppd->driver_link_ready = 1;
- start_link(ppd);
- }
+ set_qsfp_int_n(ppd, 1);
+
+ tune_serdes(ppd);
+
+ start_link(ppd);
}
if (qd->check_interrupt_flags) {
u8 qsfp_interrupt_status[16] = {0,};
- if (qsfp_read(ppd, dd->hfi1_id, 6,
- &qsfp_interrupt_status[0], 16) != 16) {
+ if (one_qsfp_read(ppd, dd->hfi1_id, 6,
+ &qsfp_interrupt_status[0], 16) != 16) {
dd_dev_info(dd,
- "%s: Failed to read status of QSFP module\n",
- __func__);
+ "%s: Failed to read status of QSFP module\n",
+ __func__);
} else {
unsigned long flags;
- u8 data_status;
+ handle_qsfp_error_conditions(
+ ppd, qsfp_interrupt_status);
spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
ppd->qsfp_info.check_interrupt_flags = 0;
spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
- flags);
-
- if (qsfp_read(ppd, dd->hfi1_id, 2, &data_status, 1)
- != 1) {
- dd_dev_info(dd,
- "%s: Failed to read status of QSFP module\n",
- __func__);
- }
- if (!(data_status & QSFP_DATA_NOT_READY)) {
- do_pre_lni_host_behaviors(ppd);
- start_link(ppd);
- } else
- handle_qsfp_error_conditions(ppd,
- qsfp_interrupt_status);
+ flags);
}
}
}
-void init_qsfp(struct hfi1_pportdata *ppd)
+static void init_qsfp_int(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd = ppd->dd;
- u64 qsfp_mask;
+ struct hfi1_pportdata *ppd = dd->pport;
+ u64 qsfp_mask, cce_int_mask;
+ const int qsfp1_int_smask = QSFP1_INT % 64;
+ const int qsfp2_int_smask = QSFP2_INT % 64;
- if (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
- ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
- ppd->driver_link_ready = 1;
- return;
+ /*
+ * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
+ * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
+ * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
+ * the index of the appropriate CSR in the CCEIntMask CSR array
+ */
+ cce_int_mask = read_csr(dd, CCE_INT_MASK +
+ (8 * (QSFP1_INT / 64)));
+ if (dd->hfi1_id) {
+ cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
+ write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
+ cce_int_mask);
+ } else {
+ cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
+ write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
+ cce_int_mask);
}
- ppd->qsfp_info.ppd = ppd;
- INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
-
qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
/* Clear current status to avoid spurious interrupts */
- write_csr(dd,
- dd->hfi1_id ?
- ASIC_QSFP2_CLEAR :
- ASIC_QSFP1_CLEAR,
- qsfp_mask);
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
+ qsfp_mask);
+ write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
+ qsfp_mask);
+
+ set_qsfp_int_n(ppd, 0);
/* Handle active low nature of INT_N and MODPRST_N pins */
if (qsfp_mod_present(ppd))
@@ -9070,29 +9346,6 @@ void init_qsfp(struct hfi1_pportdata *ppd)
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
qsfp_mask);
-
- /* Allow only INT_N and MODPRST_N to trigger QSFP interrupts */
- qsfp_mask |= (u64)QSFP_HFI0_MODPRST_N;
- write_csr(dd,
- dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
- qsfp_mask);
-
- if (qsfp_mod_present(ppd)) {
- msleep(3000);
- reset_qsfp(ppd);
-
- /* Check for QSFP interrupt after t_init (SFF 8679)
- * + extra
- */
- msleep(3000);
- if (!ppd->qsfp_info.qsfp_interrupt_functional) {
- if (do_qsfp_intr_fallback(ppd) < 0)
- dd_dev_info(dd,
- "%s: QSFP fallback failed\n",
- __func__);
- ppd->driver_link_ready = 1;
- }
- }
}
/*
@@ -9100,6 +9353,10 @@ void init_qsfp(struct hfi1_pportdata *ppd)
*/
static void init_lcb(struct hfi1_devdata *dd)
{
+ /* simulator does not correctly handle LCB cclk loopback, skip */
+ if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
+ return;
+
/* the DC has been reset earlier in the driver load */
/* set LCB for cclk loopback on the port */
@@ -9128,8 +9385,6 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
ppd->guid = guid;
}
- /* the link defaults to enabled */
- ppd->link_enabled = 1;
/* Set linkinit_reason on power up per OPA spec */
ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
@@ -9142,6 +9397,12 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
return ret;
}
+ /* tune the SERDES to a ballpark setting for
+ * optimal signal and bit error rate
+ * Needs to be done before starting the link
+ */
+ tune_serdes(ppd);
+
return start_link(ppd);
}
@@ -9159,8 +9420,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
ppd->driver_link_ready = 0;
ppd->link_enabled = 0;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
- OPA_LINKDOWN_REASON_SMA_DISABLED);
+ OPA_LINKDOWN_REASON_SMA_DISABLED);
set_link_state(ppd, HLS_DN_OFFLINE);
/* disable the port */
@@ -9174,14 +9437,14 @@ static inline int init_cpu_counters(struct hfi1_devdata *dd)
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
- ppd->ibport_data.rc_acks = NULL;
- ppd->ibport_data.rc_qacks = NULL;
- ppd->ibport_data.rc_acks = alloc_percpu(u64);
- ppd->ibport_data.rc_qacks = alloc_percpu(u64);
- ppd->ibport_data.rc_delayed_comp = alloc_percpu(u64);
- if ((ppd->ibport_data.rc_acks == NULL) ||
- (ppd->ibport_data.rc_delayed_comp == NULL) ||
- (ppd->ibport_data.rc_qacks == NULL))
+ ppd->ibport_data.rvp.rc_acks = NULL;
+ ppd->ibport_data.rvp.rc_qacks = NULL;
+ ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
+ ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
+ if (!ppd->ibport_data.rvp.rc_acks ||
+ !ppd->ibport_data.rvp.rc_delayed_comp ||
+ !ppd->ibport_data.rvp.rc_qacks)
return -ENOMEM;
}
@@ -9216,8 +9479,8 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
pa = 0;
} else if (type > PT_INVALID) {
dd_dev_err(dd,
- "unexpected receive array type %u for index %u, not handled\n",
- type, index);
+ "unexpected receive array type %u for index %u, not handled\n",
+ type, index);
goto done;
}
@@ -9432,12 +9695,15 @@ static void set_send_length(struct hfi1_pportdata *ppd)
/* all kernel receive contexts have the same hdrqentsize */
for (i = 0; i < ppd->vls_supported; i++) {
sc_set_cr_threshold(dd->vld[i].sc,
- sc_mtu_to_threshold(dd->vld[i].sc, dd->vld[i].mtu,
- dd->rcd[0]->rcvhdrqentsize));
+ sc_mtu_to_threshold(dd->vld[i].sc,
+ dd->vld[i].mtu,
+ dd->rcd[0]->
+ rcvhdrqentsize));
}
sc_set_cr_threshold(dd->vld[15].sc,
- sc_mtu_to_threshold(dd->vld[15].sc, dd->vld[15].mtu,
- dd->rcd[0]->rcvhdrqentsize));
+ sc_mtu_to_threshold(dd->vld[15].sc,
+ dd->vld[15].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
/* Adjust maximum MTU for the port in DC */
dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
@@ -9463,7 +9729,7 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
| DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
- << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT)|
+ << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
<< DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
@@ -9498,8 +9764,8 @@ static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
break;
if (time_after(jiffies, timeout)) {
dd_dev_err(dd,
- "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
- state, curr_state);
+ "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
+ state, curr_state);
return -ETIMEDOUT;
}
usleep_range(1950, 2050); /* sleep 2ms-ish */
@@ -9542,17 +9808,18 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
if (do_transition) {
ret = set_physical_link_state(dd,
- PLS_OFFLINE | (rem_reason << 8));
+ (rem_reason << 8) | PLS_OFFLINE);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
- "Failed to transition to Offline link state, return %d\n",
- ret);
+ "Failed to transition to Offline link state, return %d\n",
+ ret);
return -EINVAL;
}
- if (ppd->offline_disabled_reason == OPA_LINKDOWN_REASON_NONE)
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
ppd->offline_disabled_reason =
- OPA_LINKDOWN_REASON_TRANSIENT;
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
}
if (do_wait) {
@@ -9573,6 +9840,22 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
+ if (ppd->port_type == PORT_TYPE_QSFP &&
+ ppd->qsfp_info.limiting_active &&
+ qsfp_mod_present(ppd)) {
+ int ret;
+
+ ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
+ if (ret == 0) {
+ set_qsfp_tx(ppd, 0);
+ release_chip_resource(dd, qsfp_resource(dd));
+ } else {
+ /* not fatal, but should warn */
+ dd_dev_err(dd,
+ "Unable to acquire lock to turn off QSFP TX\n");
+ }
+ }
+
/*
* The LNI has a mandatory wait time after the physical state
* moves to Offline.Quiet. The wait time may be different
@@ -9585,7 +9868,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
ret = wait_fm_ready(dd, 7000);
if (ret) {
dd_dev_err(dd,
- "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
+ "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
/* state is really offline, so make it so */
ppd->host_link_state = HLS_DN_OFFLINE;
return ret;
@@ -9608,8 +9891,8 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
read_last_local_state(dd, &last_local_state);
read_last_remote_state(dd, &last_remote_state);
dd_dev_err(dd,
- "LNI failure last states: local 0x%08x, remote 0x%08x\n",
- last_local_state, last_remote_state);
+ "LNI failure last states: local 0x%08x, remote 0x%08x\n",
+ last_local_state, last_remote_state);
}
/* the active link width (downgrade) is 0 on link down */
@@ -9757,14 +10040,14 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
state = dd->link_default;
/* interpret poll -> poll as a link bounce */
- poll_bounce = ppd->host_link_state == HLS_DN_POLL
- && state == HLS_DN_POLL;
+ poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
+ state == HLS_DN_POLL;
dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
- link_state_name(ppd->host_link_state),
- link_state_name(orig_new_state),
- poll_bounce ? "(bounce) " : "",
- link_state_reason_name(ppd, state));
+ link_state_name(ppd->host_link_state),
+ link_state_name(orig_new_state),
+ poll_bounce ? "(bounce) " : "",
+ link_state_reason_name(ppd, state));
was_up = !!(ppd->host_link_state & HLS_UP);
@@ -9785,8 +10068,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
switch (state) {
case HLS_UP_INIT:
- if (ppd->host_link_state == HLS_DN_POLL && (quick_linkup
- || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
+ if (ppd->host_link_state == HLS_DN_POLL &&
+ (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
/*
* Quick link up jumps from polling to here.
*
@@ -9794,7 +10077,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
* simulator jumps from polling to link up.
* Accept that here.
*/
- /* OK */;
+ /* OK */
} else if (ppd->host_link_state != HLS_GOING_UP) {
goto unexpected;
}
@@ -9805,8 +10088,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
/* logical state didn't change, stay at going_up */
ppd->host_link_state = HLS_GOING_UP;
dd_dev_err(dd,
- "%s: logical state did not change to INIT\n",
- __func__);
+ "%s: logical state did not change to INIT\n",
+ __func__);
} else {
/* clear old transient LINKINIT_REASON code */
if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
@@ -9830,8 +10113,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
/* logical state didn't change, stay at init */
ppd->host_link_state = HLS_UP_INIT;
dd_dev_err(dd,
- "%s: logical state did not change to ARMED\n",
- __func__);
+ "%s: logical state did not change to ARMED\n",
+ __func__);
}
/*
* The simulator does not currently implement SMA messages,
@@ -9852,15 +10135,14 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
/* logical state didn't change, stay at armed */
ppd->host_link_state = HLS_UP_ARMED;
dd_dev_err(dd,
- "%s: logical state did not change to ACTIVE\n",
- __func__);
+ "%s: logical state did not change to ACTIVE\n",
+ __func__);
} else {
-
/* tell all engines to go running */
sdma_all_running(dd);
/* Signal the IB layer that the port has went active */
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = ppd->port;
event.event = IB_EVENT_PORT_ACTIVE;
}
@@ -9887,6 +10169,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ppd->link_enabled = 1;
}
+ set_all_slowpath(ppd->dd);
ret = set_local_link_attributes(ppd);
if (ret)
break;
@@ -9901,12 +10184,13 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ret1 = set_physical_link_state(dd, PLS_POLLING);
if (ret1 != HCMD_SUCCESS) {
dd_dev_err(dd,
- "Failed to transition to Polling link state, return 0x%x\n",
- ret1);
+ "Failed to transition to Polling link state, return 0x%x\n",
+ ret1);
ret = -EINVAL;
}
}
- ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
/*
* If an error occurred above, go back to offline. The
* caller may reschedule another attempt.
@@ -9931,8 +10215,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ret1 = set_physical_link_state(dd, PLS_DISABLED);
if (ret1 != HCMD_SUCCESS) {
dd_dev_err(dd,
- "Failed to transition to Disabled link state, return 0x%x\n",
- ret1);
+ "Failed to transition to Disabled link state, return 0x%x\n",
+ ret1);
ret = -EINVAL;
break;
}
@@ -9960,8 +10244,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
ret1 = set_physical_link_state(dd, PLS_LINKUP);
if (ret1 != HCMD_SUCCESS) {
dd_dev_err(dd,
- "Failed to transition to link up state, return 0x%x\n",
- ret1);
+ "Failed to transition to link up state, return 0x%x\n",
+ ret1);
ret = -EINVAL;
break;
}
@@ -9972,7 +10256,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
default:
dd_dev_info(dd, "%s: state 0x%x: not supported\n",
- __func__, state);
+ __func__, state);
ret = -EINVAL;
break;
}
@@ -9992,8 +10276,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
unexpected:
dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
- __func__, link_state_name(ppd->host_link_state),
- link_state_name(state));
+ __func__, link_state_name(ppd->host_link_state),
+ link_state_name(state));
ret = -EINVAL;
done:
@@ -10019,7 +10303,7 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
* The VL Arbitrator high limit is sent in units of 4k
* bytes, while HFI stores it in units of 64 bytes.
*/
- val *= 4096/64;
+ val *= 4096 / 64;
reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
<< SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
@@ -10034,12 +10318,6 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
ppd->vls_operational = val;
if (!ppd->port)
ret = -EINVAL;
- else
- ret = sdma_map_init(
- ppd->dd,
- ppd->port - 1,
- val,
- NULL);
}
break;
/*
@@ -10087,8 +10365,8 @@ int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
default:
if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
dd_dev_info(ppd->dd,
- "%s: which %s, val 0x%x: not implemented\n",
- __func__, ib_cfg_name(which), val);
+ "%s: which %s, val 0x%x: not implemented\n",
+ __func__, ib_cfg_name(which), val);
break;
}
return ret;
@@ -10155,6 +10433,7 @@ static int vl_arb_match_cache(struct vl_arb_cache *cache,
{
return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
}
+
/* end functions related to vl arbitration table caching */
static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
@@ -10242,7 +10521,7 @@ static int get_buffer_control(struct hfi1_devdata *dd,
/* OPA and HFI have a 1-1 mapping */
for (i = 0; i < TXE_NUM_DATA_VL; i++)
- read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8*i), &bc->vl[i]);
+ read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
/* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
@@ -10296,41 +10575,41 @@ static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
{
write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
- DC_SC_VL_VAL(15_0,
- 0, dp->vlnt[0] & 0xf,
- 1, dp->vlnt[1] & 0xf,
- 2, dp->vlnt[2] & 0xf,
- 3, dp->vlnt[3] & 0xf,
- 4, dp->vlnt[4] & 0xf,
- 5, dp->vlnt[5] & 0xf,
- 6, dp->vlnt[6] & 0xf,
- 7, dp->vlnt[7] & 0xf,
- 8, dp->vlnt[8] & 0xf,
- 9, dp->vlnt[9] & 0xf,
- 10, dp->vlnt[10] & 0xf,
- 11, dp->vlnt[11] & 0xf,
- 12, dp->vlnt[12] & 0xf,
- 13, dp->vlnt[13] & 0xf,
- 14, dp->vlnt[14] & 0xf,
- 15, dp->vlnt[15] & 0xf));
+ DC_SC_VL_VAL(15_0,
+ 0, dp->vlnt[0] & 0xf,
+ 1, dp->vlnt[1] & 0xf,
+ 2, dp->vlnt[2] & 0xf,
+ 3, dp->vlnt[3] & 0xf,
+ 4, dp->vlnt[4] & 0xf,
+ 5, dp->vlnt[5] & 0xf,
+ 6, dp->vlnt[6] & 0xf,
+ 7, dp->vlnt[7] & 0xf,
+ 8, dp->vlnt[8] & 0xf,
+ 9, dp->vlnt[9] & 0xf,
+ 10, dp->vlnt[10] & 0xf,
+ 11, dp->vlnt[11] & 0xf,
+ 12, dp->vlnt[12] & 0xf,
+ 13, dp->vlnt[13] & 0xf,
+ 14, dp->vlnt[14] & 0xf,
+ 15, dp->vlnt[15] & 0xf));
write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
- DC_SC_VL_VAL(31_16,
- 16, dp->vlnt[16] & 0xf,
- 17, dp->vlnt[17] & 0xf,
- 18, dp->vlnt[18] & 0xf,
- 19, dp->vlnt[19] & 0xf,
- 20, dp->vlnt[20] & 0xf,
- 21, dp->vlnt[21] & 0xf,
- 22, dp->vlnt[22] & 0xf,
- 23, dp->vlnt[23] & 0xf,
- 24, dp->vlnt[24] & 0xf,
- 25, dp->vlnt[25] & 0xf,
- 26, dp->vlnt[26] & 0xf,
- 27, dp->vlnt[27] & 0xf,
- 28, dp->vlnt[28] & 0xf,
- 29, dp->vlnt[29] & 0xf,
- 30, dp->vlnt[30] & 0xf,
- 31, dp->vlnt[31] & 0xf));
+ DC_SC_VL_VAL(31_16,
+ 16, dp->vlnt[16] & 0xf,
+ 17, dp->vlnt[17] & 0xf,
+ 18, dp->vlnt[18] & 0xf,
+ 19, dp->vlnt[19] & 0xf,
+ 20, dp->vlnt[20] & 0xf,
+ 21, dp->vlnt[21] & 0xf,
+ 22, dp->vlnt[22] & 0xf,
+ 23, dp->vlnt[23] & 0xf,
+ 24, dp->vlnt[24] & 0xf,
+ 25, dp->vlnt[25] & 0xf,
+ 26, dp->vlnt[26] & 0xf,
+ 27, dp->vlnt[27] & 0xf,
+ 28, dp->vlnt[28] & 0xf,
+ 29, dp->vlnt[29] & 0xf,
+ 30, dp->vlnt[30] & 0xf,
+ 31, dp->vlnt[31] & 0xf));
}
static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
@@ -10338,7 +10617,7 @@ static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
{
if (limit != 0)
dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
- what, (int)limit, idx);
+ what, (int)limit, idx);
}
/* change only the shared limit portion of SendCmGLobalCredit */
@@ -10416,14 +10695,14 @@ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
}
dd_dev_err(dd,
- "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
- which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
+ "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
+ which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
/*
* If this occurs, it is likely there was a credit loss on the link.
* The only recovery from that is a link bounce.
*/
dd_dev_err(dd,
- "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
+ "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
}
/*
@@ -10450,13 +10729,15 @@ static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
* raise = if the new limit is higher than the current value (may be changed
* earlier in the algorithm), set the new limit to the new value
*/
-static int set_buffer_control(struct hfi1_devdata *dd,
- struct buffer_control *new_bc)
+int set_buffer_control(struct hfi1_pportdata *ppd,
+ struct buffer_control *new_bc)
{
+ struct hfi1_devdata *dd = ppd->dd;
u64 changing_mask, ld_mask, stat_mask;
int change_count;
int i, use_all_mask;
int this_shared_changing;
+ int vl_count = 0, ret;
/*
* A0: add the variable any_shared_limit_changing below and in the
* algorithm above. If removing A0 support, it can be removed.
@@ -10481,7 +10762,6 @@ static int set_buffer_control(struct hfi1_devdata *dd,
#define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
#define NUM_USABLE_VLS 16 /* look at VL15 and less */
-
/* find the new total credits, do sanity check on unused VLs */
for (i = 0; i < OPA_MAX_VLS; i++) {
if (valid_vl(i)) {
@@ -10489,9 +10769,9 @@ static int set_buffer_control(struct hfi1_devdata *dd,
continue;
}
nonzero_msg(dd, i, "dedicated",
- be16_to_cpu(new_bc->vl[i].dedicated));
+ be16_to_cpu(new_bc->vl[i].dedicated));
nonzero_msg(dd, i, "shared",
- be16_to_cpu(new_bc->vl[i].shared));
+ be16_to_cpu(new_bc->vl[i].shared));
new_bc->vl[i].dedicated = 0;
new_bc->vl[i].shared = 0;
}
@@ -10505,8 +10785,10 @@ static int set_buffer_control(struct hfi1_devdata *dd,
*/
memset(changing, 0, sizeof(changing));
memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
- /* NOTE: Assumes that the individual VL bits are adjacent and in
- increasing order */
+ /*
+ * NOTE: Assumes that the individual VL bits are adjacent and in
+ * increasing order
+ */
stat_mask =
SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
changing_mask = 0;
@@ -10520,8 +10802,8 @@ static int set_buffer_control(struct hfi1_devdata *dd,
!= cur_bc.vl[i].shared;
if (this_shared_changing)
any_shared_limit_changing = 1;
- if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated
- || this_shared_changing) {
+ if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
+ this_shared_changing) {
changing[i] = 1;
changing_mask |= stat_mask;
change_count++;
@@ -10560,7 +10842,7 @@ static int set_buffer_control(struct hfi1_devdata *dd,
}
wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
- "shared");
+ "shared");
if (change_count > 0) {
for (i = 0; i < NUM_USABLE_VLS; i++) {
@@ -10569,7 +10851,8 @@ static int set_buffer_control(struct hfi1_devdata *dd,
if (lowering_dedicated[i]) {
set_vl_dedicated(dd, i,
- be16_to_cpu(new_bc->vl[i].dedicated));
+ be16_to_cpu(new_bc->
+ vl[i].dedicated));
cur_bc.vl[i].dedicated =
new_bc->vl[i].dedicated;
}
@@ -10585,7 +10868,8 @@ static int set_buffer_control(struct hfi1_devdata *dd,
if (be16_to_cpu(new_bc->vl[i].dedicated) >
be16_to_cpu(cur_bc.vl[i].dedicated))
set_vl_dedicated(dd, i,
- be16_to_cpu(new_bc->vl[i].dedicated));
+ be16_to_cpu(new_bc->
+ vl[i].dedicated));
}
}
@@ -10601,13 +10885,35 @@ static int set_buffer_control(struct hfi1_devdata *dd,
/* finally raise the global shared */
if (be16_to_cpu(new_bc->overall_shared_limit) >
- be16_to_cpu(cur_bc.overall_shared_limit))
+ be16_to_cpu(cur_bc.overall_shared_limit))
set_global_shared(dd,
- be16_to_cpu(new_bc->overall_shared_limit));
+ be16_to_cpu(new_bc->overall_shared_limit));
/* bracket the credit change with a total adjustment */
if (new_total < cur_total)
set_global_limit(dd, new_total);
+
+ /*
+ * Determine the actual number of operational VLS using the number of
+ * dedicated and shared credits for each VL.
+ */
+ if (change_count > 0) {
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
+ be16_to_cpu(new_bc->vl[i].shared) > 0)
+ vl_count++;
+ ppd->actual_vls_operational = vl_count;
+ ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
+ ppd->actual_vls_operational :
+ ppd->vls_operational,
+ NULL);
+ if (ret == 0)
+ ret = pio_map_init(dd, ppd->port - 1, vl_count ?
+ ppd->actual_vls_operational :
+ ppd->vls_operational, NULL);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -10699,7 +11005,7 @@ int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
VL_ARB_LOW_PRIO_TABLE_SIZE, t);
break;
case FM_TBL_BUFFER_CONTROL:
- ret = set_buffer_control(ppd->dd, t);
+ ret = set_buffer_control(ppd, t);
break;
case FM_TBL_SC2VLNT:
set_sc2vlnt(ppd->dd, t);
@@ -10849,10 +11155,13 @@ static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
}
rcd->rcvavail_timeout = timeout;
- /* timeout cannot be larger than rcv_intr_timeout_csr which has already
- been verified to be in range */
+ /*
+ * timeout cannot be larger than rcv_intr_timeout_csr which has already
+ * been verified to be in range
+ */
write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
- (u64)timeout << RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
+ (u64)timeout <<
+ RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
}
void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
@@ -10918,16 +11227,16 @@ u32 hdrqempty(struct hfi1_ctxtdata *rcd)
static u32 encoded_size(u32 size)
{
switch (size) {
- case 4*1024: return 0x1;
- case 8*1024: return 0x2;
- case 16*1024: return 0x3;
- case 32*1024: return 0x4;
- case 64*1024: return 0x5;
- case 128*1024: return 0x6;
- case 256*1024: return 0x7;
- case 512*1024: return 0x8;
- case 1*1024*1024: return 0x9;
- case 2*1024*1024: return 0xa;
+ case 4 * 1024: return 0x1;
+ case 8 * 1024: return 0x2;
+ case 16 * 1024: return 0x3;
+ case 32 * 1024: return 0x4;
+ case 64 * 1024: return 0x5;
+ case 128 * 1024: return 0x6;
+ case 256 * 1024: return 0x7;
+ case 512 * 1024: return 0x8;
+ case 1 * 1024 * 1024: return 0x9;
+ case 2 * 1024 * 1024: return 0xa;
}
return 0x1; /* if invalid, go with the minimum size */
}
@@ -10946,8 +11255,8 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
/* if the context already enabled, don't do the extra steps */
- if ((op & HFI1_RCVCTRL_CTXT_ENB)
- && !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
+ if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
+ !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
rcd->rcvhdrq_phys);
@@ -11021,6 +11330,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
if (dd->rcvhdrtail_dummy_physaddr) {
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
dd->rcvhdrtail_dummy_physaddr);
+ /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
}
@@ -11032,15 +11342,20 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_DIS)
- rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
+ /* See comment on RcvCtxtCtrl.TailUpd above */
+ if (!(op & HFI1_RCVCTRL_CTXT_DIS))
+ rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ }
if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
- /* In one-packet-per-eager mode, the size comes from
- the RcvArray entry. */
+ /*
+ * In one-packet-per-eager mode, the size comes from
+ * the RcvArray entry.
+ */
rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
}
@@ -11059,19 +11374,19 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
/* work around sticky RcvCtxtStatus.BlockedRHQFull */
- if (did_enable
- && (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
+ if (did_enable &&
+ (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
if (reg != 0) {
dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
- ctxt, reg);
+ ctxt, reg);
read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
- ctxt, reg, reg == 0 ? "not" : "still");
+ ctxt, reg, reg == 0 ? "not" : "still");
}
}
@@ -11082,7 +11397,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
*/
/* set interrupt timeout */
write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
- (u64)rcd->rcvavail_timeout <<
+ (u64)rcd->rcvavail_timeout <<
RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
/* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
@@ -11100,28 +11415,19 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
dd->rcvhdrtail_dummy_physaddr);
}
-u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
{
int ret;
u64 val = 0;
if (namep) {
ret = dd->cntrnameslen;
- if (pos != 0) {
- dd_dev_err(dd, "read_cntrs does not support indexing");
- return 0;
- }
*namep = dd->cntrnames;
} else {
const struct cntr_entry *entry;
int i, j;
ret = (dd->ndevcntrs) * sizeof(u64);
- if (pos != 0) {
- dd_dev_err(dd, "read_cntrs does not support indexing");
- return 0;
- }
/* Get the start of the block of counters */
*cntrp = dd->cntrs;
@@ -11150,6 +11456,20 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
dd->cntrs[entry->offset + j] =
val;
}
+ } else if (entry->flags & CNTR_SDMA) {
+ hfi1_cdbg(CNTR,
+ "\t Per SDMA Engine\n");
+ for (j = 0; j < dd->chip_sdma_engines;
+ j++) {
+ val =
+ entry->rw_cntr(entry, dd, j,
+ CNTR_MODE_R, 0);
+ hfi1_cdbg(CNTR,
+ "\t\tRead 0x%llx for %d\n",
+ val, j);
+ dd->cntrs[entry->offset + j] =
+ val;
+ }
} else {
val = entry->rw_cntr(entry, dd,
CNTR_INVALID_VL,
@@ -11166,30 +11486,19 @@ u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
/*
* Used by sysfs to create files for hfi stats to read
*/
-u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
+u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
{
int ret;
u64 val = 0;
if (namep) {
- ret = dd->portcntrnameslen;
- if (pos != 0) {
- dd_dev_err(dd, "index not supported");
- return 0;
- }
- *namep = dd->portcntrnames;
+ ret = ppd->dd->portcntrnameslen;
+ *namep = ppd->dd->portcntrnames;
} else {
const struct cntr_entry *entry;
- struct hfi1_pportdata *ppd;
int i, j;
- ret = (dd->nportcntrs) * sizeof(u64);
- if (pos != 0) {
- dd_dev_err(dd, "indexing not supported");
- return 0;
- }
- ppd = (struct hfi1_pportdata *)(dd + 1 + port);
+ ret = ppd->dd->nportcntrs * sizeof(u64);
*cntrp = ppd->cntrs;
for (i = 0; i < PORT_CNTR_LAST; i++) {
@@ -11238,14 +11547,14 @@ static void free_cntrs(struct hfi1_devdata *dd)
for (i = 0; i < dd->num_pports; i++, ppd++) {
kfree(ppd->cntrs);
kfree(ppd->scntrs);
- free_percpu(ppd->ibport_data.rc_acks);
- free_percpu(ppd->ibport_data.rc_qacks);
- free_percpu(ppd->ibport_data.rc_delayed_comp);
+ free_percpu(ppd->ibport_data.rvp.rc_acks);
+ free_percpu(ppd->ibport_data.rvp.rc_qacks);
+ free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
ppd->cntrs = NULL;
ppd->scntrs = NULL;
- ppd->ibport_data.rc_acks = NULL;
- ppd->ibport_data.rc_qacks = NULL;
- ppd->ibport_data.rc_delayed_comp = NULL;
+ ppd->ibport_data.rvp.rc_acks = NULL;
+ ppd->ibport_data.rvp.rc_qacks = NULL;
+ ppd->ibport_data.rvp.rc_delayed_comp = NULL;
}
kfree(dd->portcntrnames);
dd->portcntrnames = NULL;
@@ -11513,11 +11822,13 @@ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
#define C_MAX_NAME 13 /* 12 chars + one for /0 */
static int init_cntrs(struct hfi1_devdata *dd)
{
- int i, rcv_ctxts, index, j;
+ int i, rcv_ctxts, j;
size_t sz;
char *p;
char name[C_MAX_NAME];
struct hfi1_pportdata *ppd;
+ const char *bit_type_32 = ",32";
+ const int bit_type_32_sz = strlen(bit_type_32);
/* set up the stats timer; the add_timer is done at the end */
setup_timer(&dd->synth_stats_timer, update_synth_timer,
@@ -11530,49 +11841,57 @@ static int init_cntrs(struct hfi1_devdata *dd)
/* size names and determine how many we have*/
dd->ndevcntrs = 0;
sz = 0;
- index = 0;
for (i = 0; i < DEV_CNTR_LAST; i++) {
- hfi1_dbg_early("Init cntr %s\n", dev_cntrs[i].name);
if (dev_cntrs[i].flags & CNTR_DISABLED) {
hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
continue;
}
if (dev_cntrs[i].flags & CNTR_VL) {
- hfi1_dbg_early("\tProcessing VL cntr\n");
- dev_cntrs[i].offset = index;
+ dev_cntrs[i].offset = dd->ndevcntrs;
for (j = 0; j < C_VL_COUNT; j++) {
- memset(name, '\0', C_MAX_NAME);
snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name,
- vl_from_idx(j));
+ dev_cntrs[i].name, vl_from_idx(j));
+ sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ sz++;
+ dd->ndevcntrs++;
+ }
+ } else if (dev_cntrs[i].flags & CNTR_SDMA) {
+ dev_cntrs[i].offset = dd->ndevcntrs;
+ for (j = 0; j < dd->chip_sdma_engines; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name, j);
sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
sz++;
- hfi1_dbg_early("\t\t%s\n", name);
dd->ndevcntrs++;
- index++;
}
} else {
- /* +1 for newline */
+ /* +1 for newline. */
sz += strlen(dev_cntrs[i].name) + 1;
+ /* Add ",32" for 32-bit counters */
+ if (dev_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
+ dev_cntrs[i].offset = dd->ndevcntrs;
dd->ndevcntrs++;
- dev_cntrs[i].offset = index;
- index++;
- hfi1_dbg_early("\tAdding %s\n", dev_cntrs[i].name);
}
}
/* allocate space for the counter values */
- dd->cntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
+ dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
if (!dd->cntrs)
goto bail;
- dd->scntrs = kcalloc(index, sizeof(u64), GFP_KERNEL);
+ dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
if (!dd->scntrs)
goto bail;
-
/* allocate space for the counter names */
dd->cntrnameslen = sz;
dd->cntrnames = kmalloc(sz, GFP_KERNEL);
@@ -11580,27 +11899,51 @@ static int init_cntrs(struct hfi1_devdata *dd)
goto bail;
/* fill in the names */
- for (p = dd->cntrnames, i = 0, index = 0; i < DEV_CNTR_LAST; i++) {
+ for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
if (dev_cntrs[i].flags & CNTR_DISABLED) {
/* Nothing */
- } else {
- if (dev_cntrs[i].flags & CNTR_VL) {
- for (j = 0; j < C_VL_COUNT; j++) {
- memset(name, '\0', C_MAX_NAME);
- snprintf(name, C_MAX_NAME, "%s%d",
- dev_cntrs[i].name,
- vl_from_idx(j));
- memcpy(p, name, strlen(name));
- p += strlen(name);
- *p++ = '\n';
+ } else if (dev_cntrs[i].flags & CNTR_VL) {
+ for (j = 0; j < C_VL_COUNT; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name,
+ vl_from_idx(j));
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
}
- } else {
- memcpy(p, dev_cntrs[i].name,
- strlen(dev_cntrs[i].name));
- p += strlen(dev_cntrs[i].name);
+
*p++ = '\n';
}
- index++;
+ } else if (dev_cntrs[i].flags & CNTR_SDMA) {
+ for (j = 0; j < dd->chip_sdma_engines; j++) {
+ snprintf(name, C_MAX_NAME, "%s%d",
+ dev_cntrs[i].name, j);
+ memcpy(p, name, strlen(name));
+ p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
+ }
+ } else {
+ memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
+ p += strlen(dev_cntrs[i].name);
+
+ /* Counter is 32 bits */
+ if (dev_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
+ *p++ = '\n';
}
}
@@ -11623,31 +11966,31 @@ static int init_cntrs(struct hfi1_devdata *dd)
sz = 0;
dd->nportcntrs = 0;
for (i = 0; i < PORT_CNTR_LAST; i++) {
- hfi1_dbg_early("Init pcntr %s\n", port_cntrs[i].name);
if (port_cntrs[i].flags & CNTR_DISABLED) {
hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
continue;
}
if (port_cntrs[i].flags & CNTR_VL) {
- hfi1_dbg_early("\tProcessing VL cntr\n");
port_cntrs[i].offset = dd->nportcntrs;
for (j = 0; j < C_VL_COUNT; j++) {
- memset(name, '\0', C_MAX_NAME);
snprintf(name, C_MAX_NAME, "%s%d",
- port_cntrs[i].name,
- vl_from_idx(j));
+ port_cntrs[i].name, vl_from_idx(j));
sz += strlen(name);
+ /* Add ",32" for 32-bit counters */
+ if (port_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
sz++;
- hfi1_dbg_early("\t\t%s\n", name);
dd->nportcntrs++;
}
} else {
- /* +1 for newline */
+ /* +1 for newline */
sz += strlen(port_cntrs[i].name) + 1;
+ /* Add ",32" for 32-bit counters */
+ if (port_cntrs[i].flags & CNTR_32BIT)
+ sz += bit_type_32_sz;
port_cntrs[i].offset = dd->nportcntrs;
dd->nportcntrs++;
- hfi1_dbg_early("\tAdding %s\n", port_cntrs[i].name);
}
}
@@ -11664,18 +12007,30 @@ static int init_cntrs(struct hfi1_devdata *dd)
if (port_cntrs[i].flags & CNTR_VL) {
for (j = 0; j < C_VL_COUNT; j++) {
- memset(name, '\0', C_MAX_NAME);
snprintf(name, C_MAX_NAME, "%s%d",
- port_cntrs[i].name,
- vl_from_idx(j));
+ port_cntrs[i].name, vl_from_idx(j));
memcpy(p, name, strlen(name));
p += strlen(name);
+
+ /* Counter is 32 bits */
+ if (port_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
*p++ = '\n';
}
} else {
memcpy(p, port_cntrs[i].name,
strlen(port_cntrs[i].name));
p += strlen(port_cntrs[i].name);
+
+ /* Counter is 32 bits */
+ if (port_cntrs[i].flags & CNTR_32BIT) {
+ memcpy(p, bit_type_32, bit_type_32_sz);
+ p += bit_type_32_sz;
+ }
+
*p++ = '\n';
}
}
@@ -11703,14 +12058,13 @@ bail:
return -ENOMEM;
}
-
static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
{
switch (chip_lstate) {
default:
dd_dev_err(dd,
- "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
- chip_lstate);
+ "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
+ chip_lstate);
/* fall through */
case LSTATE_DOWN:
return IB_PORT_DOWN;
@@ -11729,7 +12083,7 @@ u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
switch (chip_pstate & 0xf0) {
default:
dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
- chip_pstate);
+ chip_pstate);
/* fall through */
case PLS_DISABLED:
return IB_PORTPHYSSTATE_DISABLED;
@@ -11795,7 +12149,7 @@ u32 get_logical_state(struct hfi1_pportdata *ppd)
new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
if (new_state != ppd->lstate) {
dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
- opa_lstate_name(new_state), new_state);
+ opa_lstate_name(new_state), new_state);
ppd->lstate = new_state;
}
/*
@@ -11854,18 +12208,17 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
{
- static u32 remembered_state = 0xff;
u32 pstate;
u32 ib_pstate;
pstate = read_physical_state(ppd->dd);
ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
- if (remembered_state != ib_pstate) {
+ if (ppd->last_pstate != ib_pstate) {
dd_dev_info(ppd->dd,
- "%s: physical state changed to %s (0x%x), phy 0x%x\n",
- __func__, opa_pstate_name(ib_pstate), ib_pstate,
- pstate);
- remembered_state = ib_pstate;
+ "%s: physical state changed to %s (0x%x), phy 0x%x\n",
+ __func__, opa_pstate_name(ib_pstate), ib_pstate,
+ pstate);
+ ppd->last_pstate = ib_pstate;
}
return ib_pstate;
}
@@ -11909,7 +12262,7 @@ u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
int hfi1_init_ctxt(struct send_context *sc)
{
- if (sc != NULL) {
+ if (sc) {
struct hfi1_devdata *dd = sc->dd;
u64 reg;
u8 set = (sc->type == SC_USER ?
@@ -11966,34 +12319,14 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable)
* In HFI, the mask needs to be 1 to allow interrupts.
*/
if (enable) {
- u64 cce_int_mask;
- const int qsfp1_int_smask = QSFP1_INT % 64;
- const int qsfp2_int_smask = QSFP2_INT % 64;
-
/* enable all interrupts */
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8*i), ~(u64)0);
+ write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
- /*
- * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
- * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
- * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
- * the index of the appropriate CSR in the CCEIntMask CSR array
- */
- cce_int_mask = read_csr(dd, CCE_INT_MASK +
- (8*(QSFP1_INT/64)));
- if (dd->hfi1_id) {
- cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
- write_csr(dd, CCE_INT_MASK + (8*(QSFP1_INT/64)),
- cce_int_mask);
- } else {
- cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
- write_csr(dd, CCE_INT_MASK + (8*(QSFP2_INT/64)),
- cce_int_mask);
- }
+ init_qsfp_int(dd);
} else {
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
+ write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
}
}
@@ -12005,7 +12338,7 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
int i;
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_CLEAR + (8*i), ~(u64)0);
+ write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
@@ -12040,10 +12373,9 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
struct hfi1_msix_entry *me = dd->msix_entries;
for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (me->arg == NULL) /* => no irq, no affinity */
- break;
- irq_set_affinity_hint(dd->msix_entries[i].msix.vector,
- NULL);
+ if (!me->arg) /* => no irq, no affinity */
+ continue;
+ hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
free_irq(me->msix.vector, me->arg);
}
} else {
@@ -12064,8 +12396,6 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
}
/* clean structures */
- for (i = 0; i < dd->num_msix_entries; i++)
- free_cpumask_var(dd->msix_entries[i].mask);
kfree(dd->msix_entries);
dd->msix_entries = NULL;
dd->num_msix_entries = 0;
@@ -12088,10 +12418,10 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
/* direct the chip source to the given MSI-X interrupt */
m = isrc / 8;
n = isrc % 8;
- reg = read_csr(dd, CCE_INT_MAP + (8*m));
- reg &= ~((u64)0xff << (8*n));
- reg |= ((u64)msix_intr & 0xff) << (8*n);
- write_csr(dd, CCE_INT_MAP + (8*m), reg);
+ reg = read_csr(dd, CCE_INT_MAP + (8 * m));
+ reg &= ~((u64)0xff << (8 * n));
+ reg |= ((u64)msix_intr & 0xff) << (8 * n);
+ write_csr(dd, CCE_INT_MAP + (8 * m), reg);
}
static void remap_sdma_interrupts(struct hfi1_devdata *dd,
@@ -12104,12 +12434,12 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
* SDMAProgress
* SDMAIdle
*/
- remap_intr(dd, IS_SDMA_START + 0*TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 1*TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 2*TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
+ remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
+ remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
+ remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
+ msix_intr);
}
static int request_intx_irq(struct hfi1_devdata *dd)
@@ -12119,10 +12449,10 @@ static int request_intx_irq(struct hfi1_devdata *dd)
snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
dd->unit);
ret = request_irq(dd->pcidev->irq, general_interrupt,
- IRQF_SHARED, dd->intx_name, dd);
+ IRQF_SHARED, dd->intx_name, dd);
if (ret)
dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
- ret);
+ ret);
else
dd->requested_intx_irq = 1;
return ret;
@@ -12130,70 +12460,20 @@ static int request_intx_irq(struct hfi1_devdata *dd)
static int request_msix_irqs(struct hfi1_devdata *dd)
{
- const struct cpumask *local_mask;
- cpumask_var_t def, rcv;
- bool def_ret, rcv_ret;
int first_general, last_general;
int first_sdma, last_sdma;
int first_rx, last_rx;
- int first_cpu, curr_cpu;
- int rcv_cpu, sdma_cpu;
- int i, ret = 0, possible;
- int ht;
+ int i, ret = 0;
/* calculate the ranges we are going to use */
first_general = 0;
- first_sdma = last_general = first_general + 1;
- first_rx = last_sdma = first_sdma + dd->num_sdma;
+ last_general = first_general + 1;
+ first_sdma = last_general;
+ last_sdma = first_sdma + dd->num_sdma;
+ first_rx = last_sdma;
last_rx = first_rx + dd->n_krcv_queues;
/*
- * Interrupt affinity.
- *
- * non-rcv avail gets a default mask that
- * starts as possible cpus with threads reset
- * and each rcv avail reset.
- *
- * rcv avail gets node relative 1 wrapping back
- * to the node relative 1 as necessary.
- *
- */
- local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- /* if first cpu is invalid, use NUMA 0 */
- if (cpumask_first(local_mask) >= nr_cpu_ids)
- local_mask = topology_core_cpumask(0);
-
- def_ret = zalloc_cpumask_var(&def, GFP_KERNEL);
- rcv_ret = zalloc_cpumask_var(&rcv, GFP_KERNEL);
- if (!def_ret || !rcv_ret)
- goto bail;
- /* use local mask as default */
- cpumask_copy(def, local_mask);
- possible = cpumask_weight(def);
- /* disarm threads from default */
- ht = cpumask_weight(
- topology_sibling_cpumask(cpumask_first(local_mask)));
- for (i = possible/ht; i < possible; i++)
- cpumask_clear_cpu(i, def);
- /* def now has full cores on chosen node*/
- first_cpu = cpumask_first(def);
- if (nr_cpu_ids >= first_cpu)
- first_cpu++;
- curr_cpu = first_cpu;
-
- /* One context is reserved as control context */
- for (i = first_cpu; i < dd->n_krcv_queues + first_cpu - 1; i++) {
- cpumask_clear_cpu(curr_cpu, def);
- cpumask_set_cpu(curr_cpu, rcv);
- curr_cpu = cpumask_next(curr_cpu, def);
- if (curr_cpu >= nr_cpu_ids)
- break;
- }
- /* def mask has non-rcv, rcv has recv mask */
- rcv_cpu = cpumask_first(rcv);
- sdma_cpu = cpumask_first(def);
-
- /*
* Sanity check - the code expects all SDMA chip source
* interrupts to be in the same CSR, starting at bit 0. Verify
* that this is true by checking the bit location of the start.
@@ -12218,6 +12498,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
snprintf(me->name, sizeof(me->name),
DRIVER_NAME "_%d", dd->unit);
err_info = "general";
+ me->type = IRQ_GENERAL;
} else if (first_sdma <= i && i < last_sdma) {
idx = i - first_sdma;
sde = &dd->per_sdma[idx];
@@ -12227,6 +12508,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
err_info = "sdma";
remap_sdma_interrupts(dd, idx, i);
+ me->type = IRQ_SDMA;
} else if (first_rx <= i && i < last_rx) {
idx = i - first_rx;
rcd = dd->rcd[idx];
@@ -12237,9 +12519,9 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
* Set the interrupt register and mask for this
* context's interrupt.
*/
- rcd->ireg = (IS_RCVAVAIL_START+idx) / 64;
+ rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START+idx) % 64);
+ ((IS_RCVAVAIL_START + idx) % 64);
handler = receive_context_interrupt;
thread = receive_context_thread;
arg = rcd;
@@ -12247,25 +12529,27 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
err_info = "receive context";
remap_intr(dd, IS_RCVAVAIL_START + idx, i);
+ me->type = IRQ_RCVCTXT;
} else {
/* not in our expected range - complain, then
- ignore it */
+ * ignore it
+ */
dd_dev_err(dd,
- "Unexpected extra MSI-X interrupt %d\n", i);
+ "Unexpected extra MSI-X interrupt %d\n", i);
continue;
}
/* no argument, no interrupt */
- if (arg == NULL)
+ if (!arg)
continue;
/* make sure the name is terminated */
- me->name[sizeof(me->name)-1] = 0;
+ me->name[sizeof(me->name) - 1] = 0;
ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
- me->name, arg);
+ me->name, arg);
if (ret) {
dd_dev_err(dd,
- "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
- err_info, me->msix.vector, idx, ret);
+ "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
+ err_info, me->msix.vector, idx, ret);
return ret;
}
/*
@@ -12274,52 +12558,13 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
*/
me->arg = arg;
- if (!zalloc_cpumask_var(
- &dd->msix_entries[i].mask,
- GFP_KERNEL))
- goto bail;
- if (handler == sdma_interrupt) {
- dd_dev_info(dd, "sdma engine %d cpu %d\n",
- sde->this_idx, sdma_cpu);
- sde->cpu = sdma_cpu;
- cpumask_set_cpu(sdma_cpu, dd->msix_entries[i].mask);
- sdma_cpu = cpumask_next(sdma_cpu, def);
- if (sdma_cpu >= nr_cpu_ids)
- sdma_cpu = cpumask_first(def);
- } else if (handler == receive_context_interrupt) {
- dd_dev_info(dd, "rcv ctxt %d cpu %d\n", rcd->ctxt,
- (rcd->ctxt == HFI1_CTRL_CTXT) ?
- cpumask_first(def) : rcv_cpu);
- if (rcd->ctxt == HFI1_CTRL_CTXT) {
- /* map to first default */
- cpumask_set_cpu(cpumask_first(def),
- dd->msix_entries[i].mask);
- } else {
- cpumask_set_cpu(rcv_cpu,
- dd->msix_entries[i].mask);
- rcv_cpu = cpumask_next(rcv_cpu, rcv);
- if (rcv_cpu >= nr_cpu_ids)
- rcv_cpu = cpumask_first(rcv);
- }
- } else {
- /* otherwise first def */
- dd_dev_info(dd, "%s cpu %d\n",
- err_info, cpumask_first(def));
- cpumask_set_cpu(
- cpumask_first(def), dd->msix_entries[i].mask);
- }
- irq_set_affinity_hint(
- dd->msix_entries[i].msix.vector,
- dd->msix_entries[i].mask);
+ ret = hfi1_get_irq_affinity(dd, me);
+ if (ret)
+ dd_dev_err(dd,
+ "unable to pin IRQ %d\n", ret);
}
-out:
- free_cpumask_var(def);
- free_cpumask_var(rcv);
return ret;
-bail:
- ret = -ENOMEM;
- goto out;
}
/*
@@ -12336,7 +12581,7 @@ static void reset_interrupts(struct hfi1_devdata *dd)
/* all chip interrupts map to MSI-X 0 */
for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
- write_csr(dd, CCE_INT_MAP + (8*i), 0);
+ write_csr(dd, CCE_INT_MAP + (8 * i), 0);
}
static int set_up_interrupts(struct hfi1_devdata *dd)
@@ -12445,7 +12690,7 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
else
- num_kernel_contexts = num_online_nodes();
+ num_kernel_contexts = num_online_nodes() + 1;
num_kernel_contexts =
max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
/*
@@ -12486,13 +12731,14 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts;
dd->first_user_ctxt = num_kernel_contexts;
+ dd->num_user_contexts = num_user_contexts;
dd->freectxts = num_user_contexts;
dd_dev_info(dd,
- "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
- (int)dd->chip_rcv_contexts,
- (int)dd->num_rcv_contexts,
- (int)dd->n_krcv_queues,
- (int)dd->num_rcv_contexts - dd->n_krcv_queues);
+ "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
+ (int)dd->chip_rcv_contexts,
+ (int)dd->num_rcv_contexts,
+ (int)dd->n_krcv_queues,
+ (int)dd->num_rcv_contexts - dd->n_krcv_queues);
/*
* Receive array allocation:
@@ -12518,8 +12764,8 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
dd->rcv_entries.group_size;
dd_dev_info(dd,
- "RcvArray group count too high, change to %u\n",
- dd->rcv_entries.ngroups);
+ "RcvArray group count too high, change to %u\n",
+ dd->rcv_entries.ngroups);
dd->rcv_entries.nctxt_extra = 0;
}
/*
@@ -12585,7 +12831,7 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
/* CceIntMap */
for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
- write_csr(dd, CCE_INT_MAP+(8*i), 0);
+ write_csr(dd, CCE_INT_MAP + (8 * i), 0);
/* SendCtxtCreditReturnAddr */
for (i = 0; i < dd->chip_send_contexts; i++)
@@ -12593,8 +12839,10 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
/* PIO Send buffers */
/* SDMA Send buffers */
- /* These are not normally read, and (presently) have no method
- to be read, so are not pre-initialized */
+ /*
+ * These are not normally read, and (presently) have no method
+ * to be read, so are not pre-initialized
+ */
/* RcvHdrAddr */
/* RcvHdrTailAddr */
@@ -12603,13 +12851,13 @@ static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
- write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE+(8*j), 0);
+ write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
}
/* RcvArray */
for (i = 0; i < dd->chip_rcv_array_count; i++)
- write_csr(dd, RCV_ARRAY + (8*i),
- RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
+ write_csr(dd, RCV_ARRAY + (8 * i),
+ RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
/* RcvQPMapTable */
for (i = 0; i < 32; i++)
@@ -12641,8 +12889,8 @@ static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
return;
if (time_after(jiffies, timeout)) {
dd_dev_err(dd,
- "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
- status_bits, reg & status_bits);
+ "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
+ status_bits, reg & status_bits);
return;
}
udelay(1);
@@ -12674,7 +12922,7 @@ static void reset_cce_csrs(struct hfi1_devdata *dd)
for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
- CCE_MSIX_TABLE_UPPER_RESETCSR);
+ CCE_MSIX_TABLE_UPPER_RESETCSR);
}
for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
/* CCE_MSIX_PBA read-only */
@@ -12694,91 +12942,6 @@ static void reset_cce_csrs(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
}
-/* set ASIC CSRs to chip reset defaults */
-static void reset_asic_csrs(struct hfi1_devdata *dd)
-{
- int i;
-
- /*
- * If the HFIs are shared between separate nodes or VMs,
- * then more will need to be done here. One idea is a module
- * parameter that returns early, letting the first power-on or
- * a known first load do the reset and blocking all others.
- */
-
- if (!(dd->flags & HFI1_DO_INIT_ASIC))
- return;
-
- if (dd->icode != ICODE_FPGA_EMULATION) {
- /* emulation does not have an SBus - leave these alone */
- /*
- * All writes to ASIC_CFG_SBUS_REQUEST do something.
- * Notes:
- * o The reset is not zero if aimed at the core. See the
- * SBus documentation for details.
- * o If the SBus firmware has been updated (e.g. by the BIOS),
- * will the reset revert that?
- */
- /* ASIC_CFG_SBUS_REQUEST leave alone */
- write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
- }
- /* ASIC_SBUS_RESULT read-only */
- write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
- for (i = 0; i < ASIC_NUM_SCRATCH; i++)
- write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
- write_csr(dd, ASIC_CFG_MUTEX, 0); /* this will clear it */
-
- /* We might want to retain this state across FLR if we ever use it */
- write_csr(dd, ASIC_CFG_DRV_STR, 0);
-
- /* ASIC_CFG_THERM_POLL_EN leave alone */
- /* ASIC_STS_THERM read-only */
- /* ASIC_CFG_RESET leave alone */
-
- write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
- /* ASIC_PCIE_SD_HOST_STATUS read-only */
- write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
- write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
- /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
- write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
- /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
- /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
- for (i = 0; i < 16; i++)
- write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
-
- /* ASIC_GPIO_IN read-only */
- write_csr(dd, ASIC_GPIO_OE, 0);
- write_csr(dd, ASIC_GPIO_INVERT, 0);
- write_csr(dd, ASIC_GPIO_OUT, 0);
- write_csr(dd, ASIC_GPIO_MASK, 0);
- /* ASIC_GPIO_STATUS read-only */
- write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
- /* ASIC_GPIO_FORCE leave alone */
-
- /* ASIC_QSFP1_IN read-only */
- write_csr(dd, ASIC_QSFP1_OE, 0);
- write_csr(dd, ASIC_QSFP1_INVERT, 0);
- write_csr(dd, ASIC_QSFP1_OUT, 0);
- write_csr(dd, ASIC_QSFP1_MASK, 0);
- /* ASIC_QSFP1_STATUS read-only */
- write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
- /* ASIC_QSFP1_FORCE leave alone */
-
- /* ASIC_QSFP2_IN read-only */
- write_csr(dd, ASIC_QSFP2_OE, 0);
- write_csr(dd, ASIC_QSFP2_INVERT, 0);
- write_csr(dd, ASIC_QSFP2_OUT, 0);
- write_csr(dd, ASIC_QSFP2_MASK, 0);
- /* ASIC_QSFP2_STATUS read-only */
- write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
- /* ASIC_QSFP2_FORCE leave alone */
-
- write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
- /* this also writes a NOP command, clearing paging mode */
- write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
- write_csr(dd, ASIC_EEP_DATA, 0);
-}
-
/* set MISC CSRs to chip reset defaults */
static void reset_misc_csrs(struct hfi1_devdata *dd)
{
@@ -12789,8 +12952,10 @@ static void reset_misc_csrs(struct hfi1_devdata *dd)
write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
}
- /* MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
- only be written 128-byte chunks */
+ /*
+ * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
+ * only be written 128-byte chunks
+ */
/* init RSA engine to clear lingering errors */
write_csr(dd, MISC_CFG_RSA_CMD, 1);
write_csr(dd, MISC_CFG_RSA_MU, 0);
@@ -12846,18 +13011,17 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
write_csr(dd, SEND_ERR_CLEAR, ~0ull);
/* SEND_ERR_FORCE read-only */
for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
- write_csr(dd, SEND_LOW_PRIORITY_LIST + (8*i), 0);
+ write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
- write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8*i), 0);
- for (i = 0; i < dd->chip_send_contexts/NUM_CONTEXTS_PER_SET; i++)
- write_csr(dd, SEND_CONTEXT_SET_CTRL + (8*i), 0);
+ write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
+ for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
+ write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
- write_csr(dd, SEND_COUNTER_ARRAY32 + (8*i), 0);
+ write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
- write_csr(dd, SEND_COUNTER_ARRAY64 + (8*i), 0);
+ write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
- write_csr(dd, SEND_CM_GLOBAL_CREDIT,
- SEND_CM_GLOBAL_CREDIT_RESETCSR);
+ write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
/* SEND_CM_CREDIT_USED_STATUS read-only */
write_csr(dd, SEND_CM_TIMER_CTRL, 0);
write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
@@ -12865,7 +13029,7 @@ static void reset_txe_csrs(struct hfi1_devdata *dd)
write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
for (i = 0; i < TXE_NUM_DATA_VL; i++)
- write_csr(dd, SEND_CM_CREDIT_VL + (8*i), 0);
+ write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
write_csr(dd, SEND_CM_CREDIT_VL15, 0);
/* SEND_CM_CREDIT_USED_VL read-only */
/* SEND_CM_CREDIT_USED_VL15 read-only */
@@ -12951,8 +13115,8 @@ static void init_rbufs(struct hfi1_devdata *dd)
*/
if (count++ > 500) {
dd_dev_err(dd,
- "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
- __func__, reg);
+ "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
+ __func__, reg);
break;
}
udelay(2); /* do not busy-wait the CSR */
@@ -12981,8 +13145,8 @@ static void init_rbufs(struct hfi1_devdata *dd)
/* give up after 100us - slowest possible at 33MHz is 73us */
if (count++ > 50) {
dd_dev_err(dd,
- "%s: RcvStatus.RxRbufInit not set, continuing\n",
- __func__);
+ "%s: RcvStatus.RxRbufInit not set, continuing\n",
+ __func__);
break;
}
}
@@ -13008,7 +13172,7 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
write_csr(dd, RCV_VL15, 0);
/* this is a clear-down */
write_csr(dd, RCV_ERR_INFO,
- RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
+ RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
/* RCV_ERR_STATUS read-only */
write_csr(dd, RCV_ERR_MASK, 0);
write_csr(dd, RCV_ERR_CLEAR, ~0ull);
@@ -13054,8 +13218,8 @@ static void reset_rxe_csrs(struct hfi1_devdata *dd)
write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
/* RCV_EGR_OFFSET_TAIL read-only */
for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
- write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j),
- 0);
+ write_uctxt_csr(dd, i,
+ RCV_TID_FLOW_TABLE + (8 * j), 0);
}
}
}
@@ -13157,7 +13321,7 @@ static void init_chip(struct hfi1_devdata *dd)
write_csr(dd, RCV_CTXT_CTRL, 0);
/* mask all interrupt sources */
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8*i), 0ull);
+ write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
/*
* DC Reset: do a full DC reset before the register clear.
@@ -13166,7 +13330,7 @@ static void init_chip(struct hfi1_devdata *dd)
* across the clear.
*/
write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
- (void) read_csr(dd, CCE_DC_CTRL);
+ (void)read_csr(dd, CCE_DC_CTRL);
if (use_flr) {
/*
@@ -13187,22 +13351,19 @@ static void init_chip(struct hfi1_devdata *dd)
hfi1_pcie_flr(dd);
restore_pci_variables(dd);
}
-
- reset_asic_csrs(dd);
} else {
dd_dev_info(dd, "Resetting CSRs with writes\n");
reset_cce_csrs(dd);
reset_txe_csrs(dd);
reset_rxe_csrs(dd);
- reset_asic_csrs(dd);
reset_misc_csrs(dd);
}
/* clear the DC reset */
write_csr(dd, CCE_DC_CTRL, 0);
/* Set the LED off */
- if (is_ax(dd))
- setextled(dd, 0);
+ setextled(dd, 0);
+
/*
* Clear the QSFP reset.
* An FLR enforces a 0 on all out pins. The driver does not touch
@@ -13215,6 +13376,7 @@ static void init_chip(struct hfi1_devdata *dd)
*/
write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
+ init_chip_resources(dd);
}
static void init_early_variables(struct hfi1_devdata *dd)
@@ -13255,12 +13417,12 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
kdeth_qp = DEFAULT_KDETH_QP;
write_csr(dd, SEND_BTH_QP,
- (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK)
- << SEND_BTH_QP_KDETH_QP_SHIFT);
+ (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
+ SEND_BTH_QP_KDETH_QP_SHIFT);
write_csr(dd, RCV_BTH_QP,
- (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK)
- << RCV_BTH_QP_KDETH_QP_SHIFT);
+ (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
+ RCV_BTH_QP_KDETH_QP_SHIFT);
}
/**
@@ -13385,22 +13547,21 @@ static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
/* add rule0 */
write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
- RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK
- << RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
- 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
+ RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
+ RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
+ 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
- LRH_BTH_MATCH_OFFSET
- << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
- LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
- LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
- ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
- QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
- ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
+ LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
+ LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
+ LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
+ ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
+ QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
+ ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
- LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
- LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
- LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
- LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
+ LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
+ LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
+ LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
+ LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
/* Enable RSM */
add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
kfree(rsmmap);
@@ -13418,9 +13579,8 @@ static void init_rxe(struct hfi1_devdata *dd)
/* enable all receive errors */
write_csr(dd, RCV_ERR_MASK, ~0ull);
/* setup QPN map table - start where VL15 context leaves off */
- init_qos(
- dd,
- dd->n_krcv_queues > MIN_KERNEL_KCTXTS ? MIN_KERNEL_KCTXTS : 0);
+ init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
+ MIN_KERNEL_KCTXTS : 0);
/*
* make sure RcvCtrl.RcvWcb <= PCIe Device Control
* Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
@@ -13457,36 +13617,33 @@ static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
u32 csr0to3, u32 csr4to7)
{
write_csr(dd, csr0to3,
- 0ull <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT
- | 1ull <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT
- | 2ull * cu <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT
- | 4ull * cu <<
- SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
+ 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
+ 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
+ 2ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
+ 4ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
write_csr(dd, csr4to7,
- 8ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT
- | 16ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT
- | 32ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT
- | 64ull * cu <<
- SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
-
+ 8ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
+ 16ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
+ 32ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
+ 64ull * cu <<
+ SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
}
static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
{
assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
- SEND_CM_LOCAL_AU_TABLE4_TO7);
+ SEND_CM_LOCAL_AU_TABLE4_TO7);
}
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
{
assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
- SEND_CM_REMOTE_AU_TABLE4_TO7);
+ SEND_CM_REMOTE_AU_TABLE4_TO7);
}
static void init_txe(struct hfi1_devdata *dd)
@@ -13537,7 +13694,6 @@ int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
/*
* Enable send-side J_KEY integrity check, unless this is A0 h/w
- * (due to A0 erratum).
*/
if (!is_ax(dd)) {
reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
@@ -13590,9 +13746,9 @@ int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
int ret = 0;
u64 reg;
- if (ctxt < dd->num_rcv_contexts)
+ if (ctxt < dd->num_rcv_contexts) {
rcd = dd->rcd[ctxt];
- else {
+ } else {
ret = -EINVAL;
goto done;
}
@@ -13618,9 +13774,9 @@ int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
int ret = 0;
u64 reg;
- if (ctxt < dd->num_rcv_contexts)
+ if (ctxt < dd->num_rcv_contexts) {
rcd = dd->rcd[ctxt];
- else {
+ } else {
ret = -EINVAL;
goto done;
}
@@ -13643,24 +13799,26 @@ done:
*/
void hfi1_start_cleanup(struct hfi1_devdata *dd)
{
+ aspm_exit(dd);
free_cntrs(dd);
free_rcverr(dd);
clean_up_interrupts(dd);
+ finish_chip_resources(dd);
}
#define HFI_BASE_GUID(dev) \
((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
/*
- * Certain chip functions need to be initialized only once per asic
- * instead of per-device. This function finds the peer device and
- * checks whether that chip initialization needs to be done by this
- * device.
+ * Information can be shared between the two HFIs on the same ASIC
+ * in the same OS. This function finds the peer device and sets
+ * up a shared structure.
*/
-static void asic_should_init(struct hfi1_devdata *dd)
+static int init_asic_data(struct hfi1_devdata *dd)
{
unsigned long flags;
struct hfi1_devdata *tmp, *peer = NULL;
+ int ret = 0;
spin_lock_irqsave(&hfi1_devs_lock, flags);
/* Find our peer device */
@@ -13672,13 +13830,21 @@ static void asic_should_init(struct hfi1_devdata *dd)
}
}
- /*
- * "Claim" the ASIC for initialization if it hasn't been
- " "claimed" yet.
- */
- if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
- dd->flags |= HFI1_DO_INIT_ASIC;
+ if (peer) {
+ dd->asic_data = peer->asic_data;
+ } else {
+ dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!dd->asic_data) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ mutex_init(&dd->asic_data->asic_resource_mutex);
+ }
+ dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
+
+done:
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+ return ret;
}
/*
@@ -13698,7 +13864,7 @@ static int obtain_boardname(struct hfi1_devdata *dd)
ret = read_hfi1_efi_var(dd, "description", &size,
(void **)&dd->boardname);
if (ret) {
- dd_dev_err(dd, "Board description not found\n");
+ dd_dev_info(dd, "Board description not found\n");
/* use generic description */
dd->boardname = kstrdup(generic, GFP_KERNEL);
if (!dd->boardname)
@@ -13707,6 +13873,50 @@ static int obtain_boardname(struct hfi1_devdata *dd)
return 0;
}
+/*
+ * Check the interrupt registers to make sure that they are mapped correctly.
+ * It is intended to help user identify any mismapping by VMM when the driver
+ * is running in a VM. This function should only be called before interrupt
+ * is set up properly.
+ *
+ * Return 0 on success, -EINVAL on failure.
+ */
+static int check_int_registers(struct hfi1_devdata *dd)
+{
+ u64 reg;
+ u64 all_bits = ~(u64)0;
+ u64 mask;
+
+ /* Clear CceIntMask[0] to avoid raising any interrupts */
+ mask = read_csr(dd, CCE_INT_MASK);
+ write_csr(dd, CCE_INT_MASK, 0ull);
+ reg = read_csr(dd, CCE_INT_MASK);
+ if (reg)
+ goto err_exit;
+
+ /* Clear all interrupt status bits */
+ write_csr(dd, CCE_INT_CLEAR, all_bits);
+ reg = read_csr(dd, CCE_INT_STATUS);
+ if (reg)
+ goto err_exit;
+
+ /* Set all interrupt status bits */
+ write_csr(dd, CCE_INT_FORCE, all_bits);
+ reg = read_csr(dd, CCE_INT_STATUS);
+ if (reg != all_bits)
+ goto err_exit;
+
+ /* Restore the interrupt mask */
+ write_csr(dd, CCE_INT_CLEAR, all_bits);
+ write_csr(dd, CCE_INT_MASK, mask);
+
+ return 0;
+err_exit:
+ write_csr(dd, CCE_INT_MASK, mask);
+ dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
+ return -EINVAL;
+}
+
/**
* Allocate and initialize the device structure for the hfi.
* @dev: the pci_dev for hfi1_ib device
@@ -13731,9 +13941,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"RTL FPGA emulation",
"Functional simulator"
};
+ struct pci_dev *parent = pdev->bus->self;
- dd = hfi1_alloc_devdata(pdev,
- NUM_IB_PORTS * sizeof(struct hfi1_pportdata));
+ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
+ sizeof(struct hfi1_pportdata));
if (IS_ERR(dd))
goto bail;
ppd = dd->pport;
@@ -13754,8 +13965,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* link width active is 0 when link is down */
/* link width downgrade active is 0 when link is down */
- if (num_vls < HFI1_MIN_VLS_SUPPORTED
- || num_vls > HFI1_MAX_VLS_SUPPORTED) {
+ if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
+ num_vls > HFI1_MAX_VLS_SUPPORTED) {
hfi1_early_err(&pdev->dev,
"Invalid num_vls %u, using %u VLs\n",
num_vls, HFI1_MAX_VLS_SUPPORTED);
@@ -13763,6 +13974,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
}
ppd->vls_supported = num_vls;
ppd->vls_operational = ppd->vls_supported;
+ ppd->actual_vls_operational = ppd->vls_supported;
/* Set the default MTU. */
for (vl = 0; vl < num_vls; vl++)
dd->vld[vl].mtu = hfi1_max_mtu;
@@ -13782,6 +13994,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* start in offline */
ppd->host_link_state = HLS_DN_OFFLINE;
init_vl_arb_caches(ppd);
+ ppd->last_pstate = 0xff; /* invalid value */
}
dd->link_default = HLS_DN_POLL;
@@ -13807,8 +14020,21 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
& CCE_REVISION_CHIP_REV_MINOR_MASK;
- /* obtain the hardware ID - NOT related to unit, which is a
- software enumeration */
+ /*
+ * Check interrupt registers mapping if the driver has no access to
+ * the upstream component. In this case, it is likely that the driver
+ * is running in a VM.
+ */
+ if (!parent) {
+ ret = check_int_registers(dd);
+ if (ret)
+ goto bail_cleanup;
+ }
+
+ /*
+ * obtain the hardware ID - NOT related to unit, which is a
+ * software enumeration
+ */
reg = read_csr(dd, CCE_REVISION2);
dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
& CCE_REVISION2_HFI_ID_MASK;
@@ -13816,8 +14042,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
- dd->icode < ARRAY_SIZE(inames) ? inames[dd->icode] : "unknown",
- (int)dd->irev);
+ dd->icode < ARRAY_SIZE(inames) ?
+ inames[dd->icode] : "unknown", (int)dd->irev);
/* speeds the hardware can support */
dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
@@ -13846,6 +14072,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
num_vls, dd->chip_sdma_engines);
num_vls = dd->chip_sdma_engines;
ppd->vls_supported = dd->chip_sdma_engines;
+ ppd->vls_operational = ppd->vls_supported;
}
/*
@@ -13867,8 +14094,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* needs to be done before we look for the peer device */
read_guid(dd);
- /* should this device init the ASIC block? */
- asic_should_init(dd);
+ /* set up shared ASIC data with peer device */
+ ret = init_asic_data(dd);
+ if (ret)
+ goto bail_cleanup;
/* obtain chip sizes, reset chip CSRs */
init_chip(dd);
@@ -13878,6 +14107,9 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ /* Needs to be called before hfi1_firmware_init */
+ get_platform_config(dd);
+
/* read in firmware */
ret = hfi1_firmware_init(dd);
if (ret)
@@ -13929,6 +14161,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up KDETH QP prefix in both RX and TX CSRs */
init_kdeth_qp(dd);
+ ret = hfi1_dev_affinity_init(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* send contexts must be set up before receive contexts */
ret = init_send_contexts(dd);
if (ret)
@@ -14026,7 +14262,6 @@ static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
return (u16)delta_cycles;
}
-
/**
* create_pbc - build a pbc for transmission
* @flags: special case flags or-ed in built pbc
@@ -14082,10 +14317,15 @@ static int thermal_init(struct hfi1_devdata *dd)
int ret = 0;
if (dd->icode != ICODE_RTL_SILICON ||
- !(dd->flags & HFI1_DO_INIT_ASIC))
+ check_chip_resource(dd, CR_THERM_INIT, NULL))
return ret;
- acquire_hw_mutex(dd);
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ THERM_FAILURE(dd, ret, "Acquire SBus");
+ return ret;
+ }
+
dd_dev_info(dd, "Initializing thermal sensor\n");
/* Disable polling of thermal readings */
write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
@@ -14132,8 +14372,14 @@ static int thermal_init(struct hfi1_devdata *dd)
/* Enable polling of thermal readings */
write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
+
+ /* Set initialized flag */
+ ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
+ if (ret)
+ THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
+
done:
- release_hw_mutex(dd);
+ release_chip_resource(dd, CR_SBUS);
return ret;
}
@@ -14148,7 +14394,7 @@ static void handle_temp_err(struct hfi1_devdata *dd)
dd_dev_emerg(dd,
"Critical temperature reached! Forcing device into freeze mode!\n");
dd->flags |= HFI1_FORCED_FREEZE;
- start_freeze_handling(ppd, FREEZE_SELF|FREEZE_ABORT);
+ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
/*
* Shut DC down as much and as quickly as possible.
*
@@ -14162,8 +14408,8 @@ static void handle_temp_err(struct hfi1_devdata *dd)
*/
ppd->driver_link_ready = 0;
ppd->link_enabled = 0;
- set_physical_link_state(dd, PLS_OFFLINE |
- (OPA_LINKDOWN_REASON_SMA_DISABLED << 8));
+ set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
+ PLS_OFFLINE);
/*
* Step 2: Shutdown LCB and 8051
* After shutdown, do not restore DC_CFG_RESET value.
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/staging/rdma/hfi1/chip.h
index 5b375ddc345d..4f3b878e43eb 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/staging/rdma/hfi1/chip.h
@@ -1,14 +1,13 @@
#ifndef _CHIP_H
#define _CHIP_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -79,8 +76,10 @@
#define PIO_CMASK 0x7ff /* counter mask for free and fill counters */
#define MAX_EAGER_ENTRIES 2048 /* max receive eager entries */
#define MAX_TID_PAIR_ENTRIES 1024 /* max receive expected pairs */
-/* Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
- at 64 bytes for all generation one devices */
+/*
+ * Virtual? Allocation Unit, defined as AU = 8*2^vAU, 64 bytes, AU is fixed
+ * at 64 bytes for all generation one devices
+ */
#define CM_VAU 3
/* HFI link credit count, AKA receive buffer depth (RBUF_DEPTH) */
#define CM_GLOBAL_CREDITS 0x940
@@ -93,15 +92,15 @@
#define TXE_PIO_SEND (TXE + TXE_PIO_SEND_OFFSET)
/* PBC flags */
-#define PBC_INTR (1ull << 31)
+#define PBC_INTR BIT_ULL(31)
#define PBC_DC_INFO_SHIFT (30)
-#define PBC_DC_INFO (1ull << PBC_DC_INFO_SHIFT)
-#define PBC_TEST_EBP (1ull << 29)
-#define PBC_PACKET_BYPASS (1ull << 28)
-#define PBC_CREDIT_RETURN (1ull << 25)
-#define PBC_INSERT_BYPASS_ICRC (1ull << 24)
-#define PBC_TEST_BAD_ICRC (1ull << 23)
-#define PBC_FECN (1ull << 22)
+#define PBC_DC_INFO BIT_ULL(PBC_DC_INFO_SHIFT)
+#define PBC_TEST_EBP BIT_ULL(29)
+#define PBC_PACKET_BYPASS BIT_ULL(28)
+#define PBC_CREDIT_RETURN BIT_ULL(25)
+#define PBC_INSERT_BYPASS_ICRC BIT_ULL(24)
+#define PBC_TEST_BAD_ICRC BIT_ULL(23)
+#define PBC_FECN BIT_ULL(22)
/* PbcInsertHcrc field settings */
#define PBC_IHCRC_LKDETH 0x0 /* insert @ local KDETH offset */
@@ -212,7 +211,7 @@
#define PLS_CONFIGPHY_DEBOUCE 0x40
#define PLS_CONFIGPHY_ESTCOMM 0x41
#define PLS_CONFIGPHY_ESTCOMM_TXRX_HUNT 0x42
-#define PLS_CONFIGPHY_ESTcOMM_LOCAL_COMPLETE 0x43
+#define PLS_CONFIGPHY_ESTCOMM_LOCAL_COMPLETE 0x43
#define PLS_CONFIGPHY_OPTEQ 0x44
#define PLS_CONFIGPHY_OPTEQ_OPTIMIZING 0x44
#define PLS_CONFIGPHY_OPTEQ_LOCAL_COMPLETE 0x45
@@ -242,36 +241,37 @@
#define HCMD_SUCCESS 2
/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR - error flags */
-#define SPICO_ROM_FAILED (1 << 0)
-#define UNKNOWN_FRAME (1 << 1)
-#define TARGET_BER_NOT_MET (1 << 2)
-#define FAILED_SERDES_INTERNAL_LOOPBACK (1 << 3)
-#define FAILED_SERDES_INIT (1 << 4)
-#define FAILED_LNI_POLLING (1 << 5)
-#define FAILED_LNI_DEBOUNCE (1 << 6)
-#define FAILED_LNI_ESTBCOMM (1 << 7)
-#define FAILED_LNI_OPTEQ (1 << 8)
-#define FAILED_LNI_VERIFY_CAP1 (1 << 9)
-#define FAILED_LNI_VERIFY_CAP2 (1 << 10)
-#define FAILED_LNI_CONFIGLT (1 << 11)
+#define SPICO_ROM_FAILED BIT(0)
+#define UNKNOWN_FRAME BIT(1)
+#define TARGET_BER_NOT_MET BIT(2)
+#define FAILED_SERDES_INTERNAL_LOOPBACK BIT(3)
+#define FAILED_SERDES_INIT BIT(4)
+#define FAILED_LNI_POLLING BIT(5)
+#define FAILED_LNI_DEBOUNCE BIT(6)
+#define FAILED_LNI_ESTBCOMM BIT(7)
+#define FAILED_LNI_OPTEQ BIT(8)
+#define FAILED_LNI_VERIFY_CAP1 BIT(9)
+#define FAILED_LNI_VERIFY_CAP2 BIT(10)
+#define FAILED_LNI_CONFIGLT BIT(11)
+#define HOST_HANDSHAKE_TIMEOUT BIT(12)
#define FAILED_LNI (FAILED_LNI_POLLING | FAILED_LNI_DEBOUNCE \
| FAILED_LNI_ESTBCOMM | FAILED_LNI_OPTEQ \
| FAILED_LNI_VERIFY_CAP1 \
| FAILED_LNI_VERIFY_CAP2 \
- | FAILED_LNI_CONFIGLT)
+ | FAILED_LNI_CONFIGLT | HOST_HANDSHAKE_TIMEOUT)
/* DC_DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG - host message flags */
-#define HOST_REQ_DONE (1 << 0)
-#define BC_PWR_MGM_MSG (1 << 1)
-#define BC_SMA_MSG (1 << 2)
-#define BC_BCC_UNKOWN_MSG (1 << 3)
-#define BC_IDLE_UNKNOWN_MSG (1 << 4)
-#define EXT_DEVICE_CFG_REQ (1 << 5)
-#define VERIFY_CAP_FRAME (1 << 6)
-#define LINKUP_ACHIEVED (1 << 7)
-#define LINK_GOING_DOWN (1 << 8)
-#define LINK_WIDTH_DOWNGRADED (1 << 9)
+#define HOST_REQ_DONE BIT(0)
+#define BC_PWR_MGM_MSG BIT(1)
+#define BC_SMA_MSG BIT(2)
+#define BC_BCC_UNKNOWN_MSG BIT(3)
+#define BC_IDLE_UNKNOWN_MSG BIT(4)
+#define EXT_DEVICE_CFG_REQ BIT(5)
+#define VERIFY_CAP_FRAME BIT(6)
+#define LINKUP_ACHIEVED BIT(7)
+#define LINK_GOING_DOWN BIT(8)
+#define LINK_WIDTH_DOWNGRADED BIT(9)
/* DC_DC8051_CFG_EXT_DEV_1.REQ_TYPE - 8051 host requests */
#define HREQ_LOAD_CONFIG 0x01
@@ -335,14 +335,14 @@
* the CSR fields hold multiples of this value.
*/
#define RCV_SHIFT 3
-#define RCV_INCREMENT (1 << RCV_SHIFT)
+#define RCV_INCREMENT BIT(RCV_SHIFT)
/*
* Receive header queue entry increment - the CSR holds multiples of
* this value.
*/
#define HDRQ_SIZE_SHIFT 5
-#define HDRQ_INCREMENT (1 << HDRQ_SIZE_SHIFT)
+#define HDRQ_INCREMENT BIT(HDRQ_SIZE_SHIFT)
/*
* Freeze handling flags
@@ -371,6 +371,9 @@
#define NUM_LANE_FIELDS 0x8
/* 8051 general register Field IDs */
+#define LINK_OPTIMIZATION_SETTINGS 0x00
+#define LINK_TUNING_PARAMETERS 0x02
+#define DC_HOST_COMM_SETTINGS 0x03
#define TX_SETTINGS 0x06
#define VERIFY_CAP_LOCAL_PHY 0x07
#define VERIFY_CAP_LOCAL_FABRIC 0x08
@@ -387,6 +390,10 @@
#define LINK_QUALITY_INFO 0x14
#define REMOTE_DEVICE_ID 0x15
+/* 8051 lane specific register field IDs */
+#define TX_EQ_SETTINGS 0x00
+#define CHANNEL_LOSS_SETTINGS 0x05
+
/* Lane ID for general configuration registers */
#define GENERAL_CONFIG 4
@@ -511,8 +518,10 @@ enum {
#define LCB_CRC_48B 0x2 /* 48b CRC */
#define LCB_CRC_12B_16B_PER_LANE 0x3 /* 12b-16b per lane CRC */
-/* the following enum is (almost) a copy/paste of the definition
- * in the OPA spec, section 20.2.2.6.8 (PortInfo) */
+/*
+ * the following enum is (almost) a copy/paste of the definition
+ * in the OPA spec, section 20.2.2.6.8 (PortInfo)
+ */
enum {
PORT_LTP_CRC_MODE_NONE = 0,
PORT_LTP_CRC_MODE_14 = 1, /* 14-bit LTP CRC mode (optional) */
@@ -614,6 +623,8 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64, int, u32, u32);
#define NUM_PCIE_SERDES 16 /* number of PCIe serdes on the SBus */
extern const u8 pcie_serdes_broadcast[];
extern const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES];
+extern uint platform_config_load;
+
/* SBus commands */
#define RESET_SBUS_RECEIVER 0x20
#define WRITE_SBUS_RECEIVER 0x21
@@ -629,6 +640,42 @@ int load_firmware(struct hfi1_devdata *dd);
void dispose_firmware(void);
int acquire_hw_mutex(struct hfi1_devdata *dd);
void release_hw_mutex(struct hfi1_devdata *dd);
+
+/*
+ * Bitmask of dynamic access for ASIC block chip resources. Each HFI has its
+ * own range of bits for the resource so it can clear its own bits on
+ * starting and exiting. If either HFI has the resource bit set, the
+ * resource is in use. The separate bit ranges are:
+ * HFI0 bits 7:0
+ * HFI1 bits 15:8
+ */
+#define CR_SBUS 0x01 /* SBUS, THERM, and PCIE registers */
+#define CR_EPROM 0x02 /* EEP, GPIO registers */
+#define CR_I2C1 0x04 /* QSFP1_OE register */
+#define CR_I2C2 0x08 /* QSFP2_OE register */
+#define CR_DYN_SHIFT 8 /* dynamic flag shift */
+#define CR_DYN_MASK ((1ull << CR_DYN_SHIFT) - 1)
+
+/*
+ * Bitmask of static ASIC states these are outside of the dynamic ASIC
+ * block chip resources above. These are to be set once and never cleared.
+ * Must be holding the SBus dynamic flag when setting.
+ */
+#define CR_THERM_INIT 0x010000
+
+int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait);
+void release_chip_resource(struct hfi1_devdata *dd, u32 resource);
+bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
+ const char *func);
+void init_chip_resources(struct hfi1_devdata *dd);
+void finish_chip_resources(struct hfi1_devdata *dd);
+
+/* ms wait time for access to an SBus resoure */
+#define SBUS_TIMEOUT 4000 /* long enough for a FW download and SBR */
+
+/* ms wait time for a qsfp (i2c) chain to become available */
+#define QSFP_WAIT 20000 /* long enough for FW update to the F4 uc */
+
void fabric_serdes_reset(struct hfi1_devdata *dd);
int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result);
@@ -644,13 +691,17 @@ void handle_verify_cap(struct work_struct *work);
void handle_freeze(struct work_struct *work);
void handle_link_up(struct work_struct *work);
void handle_link_down(struct work_struct *work);
+void handle_8051_request(struct work_struct *work);
void handle_link_downgrade(struct work_struct *work);
void handle_link_bounce(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
+void reset_qsfp(struct hfi1_pportdata *ppd);
+void qsfp_event(struct work_struct *work);
void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
int send_idle_sma(struct hfi1_devdata *dd, u64 message);
+int load_8051_config(struct hfi1_devdata *, u8, u8, u32);
+int read_8051_config(struct hfi1_devdata *, u8, u8, u32 *);
int start_link(struct hfi1_pportdata *ppd);
-void init_qsfp(struct hfi1_pportdata *ppd);
int bringup_serdes(struct hfi1_pportdata *ppd);
void set_intr_state(struct hfi1_devdata *dd, u32 enable);
void apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
@@ -690,6 +741,8 @@ u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl);
u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data);
u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl);
u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data);
+u32 read_logical_state(struct hfi1_devdata *dd);
+void force_recv_intr(struct hfi1_ctxtdata *rcd);
/* Per VL indexes */
enum {
@@ -785,8 +838,14 @@ enum {
C_SW_CPU_RCV_LIM,
C_SW_VTX_WAIT,
C_SW_PIO_WAIT,
+ C_SW_PIO_DRAIN,
C_SW_KMEM_WAIT,
C_SW_SEND_SCHED,
+ C_SDMA_DESC_FETCHED_CNT,
+ C_SDMA_INT_CNT,
+ C_SDMA_ERR_CNT,
+ C_SDMA_IDLE_INT_CNT,
+ C_SDMA_PROGRESS_INT_CNT,
/* MISC_ERR_STATUS */
C_MISC_PLL_LOCK_FAIL_ERR,
C_MISC_MBIST_FAIL_ERR,
@@ -1275,10 +1334,8 @@ void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
u32 type, unsigned long pa, u16 order);
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd);
void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt);
-u32 hfi1_read_cntrs(struct hfi1_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp);
-u32 hfi1_read_portcntrs(struct hfi1_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp);
+u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp);
+u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp);
u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd);
int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which);
int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val);
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/staging/rdma/hfi1/chip_registers.h
index 701e9e1012a6..770f05c9b8de 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/staging/rdma/hfi1/chip_registers.h
@@ -2,14 +2,13 @@
#define DEF_CHIP_REG
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -21,8 +20,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -551,6 +548,17 @@
#define CCE_MSIX_TABLE_UPPER (CCE + 0x000000100008)
#define CCE_MSIX_TABLE_UPPER_RESETCSR 0x0000000100000000ull
#define CCE_MSIX_VEC_CLR_WITHOUT_INT (CCE + 0x000000110400)
+#define CCE_PCIE_CTRL (CCE + 0x0000000000C0)
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK 0x3ull
+#define CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT 0
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK 0xFull
+#define CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT 2
+#define CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT 8
+#define CCE_PCIE_CTRL_XMT_MARGIN_SHIFT 9
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK 0x1ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT 12
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK 0x7ull
+#define CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT 13
#define CCE_REVISION (CCE + 0x000000000000)
#define CCE_REVISION2 (CCE + 0x000000000008)
#define CCE_REVISION2_HFI_ID_MASK 0x1ull
@@ -1270,6 +1278,9 @@
#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SHIFT 0
#define SEND_STATIC_RATE_CONTROL_CSR_SRC_RELOAD_SMASK 0xFFFFull
#define PCIE_CFG_REG_PL2 (PCIE + 0x000000000708)
+#define PCIE_CFG_REG_PL3 (PCIE + 0x00000000070C)
+#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SHIFT 27
+#define PCIE_CFG_REG_PL3_L1_ENT_LATENCY_SMASK 0x38000000
#define PCIE_CFG_REG_PL102 (PCIE + 0x000000000898)
#define PCIE_CFG_REG_PL102_GEN3_EQ_POST_CURSOR_PSET_SHIFT 12
#define PCIE_CFG_REG_PL102_GEN3_EQ_CURSOR_PSET_SHIFT 6
@@ -1290,5 +1301,6 @@
#define CCE_INT_BLOCKED (CCE + 0x000000110C00)
#define SEND_DMA_IDLE_CNT (TXE + 0x000000200040)
#define SEND_DMA_DESC_FETCHED_CNT (TXE + 0x000000200058)
+#define CCE_MSIX_PBA_OFFSET 0X0110000
#endif /* DEF_CHIP_REG */
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/staging/rdma/hfi1/common.h
index 5dd92720faae..e9b6bb322025 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/staging/rdma/hfi1/common.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -341,19 +338,16 @@ struct hfi1_message_header {
#define FULL_MGMT_P_KEY 0xFFFF
#define DEFAULT_P_KEY LIM_MGMT_P_KEY
-#define HFI1_PERMISSIVE_LID 0xFFFF
#define HFI1_AETH_CREDIT_SHIFT 24
#define HFI1_AETH_CREDIT_MASK 0x1F
#define HFI1_AETH_CREDIT_INVAL 0x1F
#define HFI1_MSN_MASK 0xFFFFFF
-#define HFI1_QPN_MASK 0xFFFFFF
#define HFI1_FECN_SHIFT 31
#define HFI1_FECN_MASK 1
-#define HFI1_FECN_SMASK (1 << HFI1_FECN_SHIFT)
+#define HFI1_FECN_SMASK BIT(HFI1_FECN_SHIFT)
#define HFI1_BECN_SHIFT 30
#define HFI1_BECN_MASK 1
-#define HFI1_BECN_SMASK (1 << HFI1_BECN_SHIFT)
-#define HFI1_MULTICAST_LID_BASE 0xC000
+#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/staging/rdma/hfi1/debugfs.c
index acd2269e9f14..dbab9d9cc288 100644
--- a/drivers/staging/rdma/hfi1/debugfs.c
+++ b/drivers/staging/rdma/hfi1/debugfs.c
@@ -1,13 +1,12 @@
#ifdef CONFIG_DEBUG_FS
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -19,8 +18,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,6 +49,7 @@
#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/module.h>
#include "hfi.h"
#include "debugfs.h"
@@ -71,6 +69,7 @@ static const struct seq_operations _##name##_seq_ops = { \
.stop = _##name##_seq_stop, \
.show = _##name##_seq_show \
}
+
#define DEBUGFS_SEQ_FILE_OPEN(name) \
static int _##name##_open(struct inode *inode, struct file *s) \
{ \
@@ -102,7 +101,6 @@ do { \
pr_warn("create of %s failed\n", name); \
} while (0)
-
#define DEBUGFS_SEQ_FILE_CREATE(name, parent, data) \
DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
@@ -127,7 +125,6 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
return pos;
}
-
static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
@@ -151,8 +148,8 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
if (!n_packets && !n_bytes)
return SEQ_SKIP;
seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long) n_packets,
- (unsigned long long) n_bytes);
+ (unsigned long long)n_packets,
+ (unsigned long long)n_bytes);
return 0;
}
@@ -247,7 +244,7 @@ __acquires(RCU)
}
static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
- loff_t *pos)
+ loff_t *pos)
{
struct qp_iter *iter = iter_ptr;
@@ -308,7 +305,6 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
return pos;
}
-
static void _sdes_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
@@ -341,7 +337,7 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
rcu_read_lock();
dd = private2dd(file);
- avail = hfi1_read_cntrs(dd, *ppos, NULL, &counters);
+ avail = hfi1_read_cntrs(dd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
rcu_read_unlock();
return rval;
@@ -358,7 +354,7 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
rcu_read_lock();
dd = private2dd(file);
- avail = hfi1_read_cntrs(dd, *ppos, &names, NULL);
+ avail = hfi1_read_cntrs(dd, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
rcu_read_unlock();
return rval;
@@ -385,8 +381,7 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
rcu_read_lock();
dd = private2dd(file);
- /* port number n/a here since names are constant */
- avail = hfi1_read_portcntrs(dd, *ppos, 0, &names, NULL);
+ avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
rcu_read_unlock();
return rval;
@@ -394,28 +389,150 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
/* read the per-port counters */
static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
u64 *counters;
size_t avail;
- struct hfi1_devdata *dd;
struct hfi1_pportdata *ppd;
ssize_t rval;
rcu_read_lock();
ppd = private2ppd(file);
- dd = ppd->dd;
- avail = hfi1_read_portcntrs(dd, *ppos, ppd->port - 1, NULL, &counters);
+ avail = hfi1_read_portcntrs(ppd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
rcu_read_unlock();
return rval;
}
+static void check_dyn_flag(u64 scratch0, char *p, int size, int *used,
+ int this_hfi, int hfi, u32 flag, const char *what)
+{
+ u32 mask;
+
+ mask = flag << (hfi ? CR_DYN_SHIFT : 0);
+ if (scratch0 & mask) {
+ *used += scnprintf(p + *used, size - *used,
+ " 0x%08x - HFI%d %s in use, %s device\n",
+ mask, hfi, what,
+ this_hfi == hfi ? "this" : "other");
+ }
+}
+
+static ssize_t asic_flags_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u64 scratch0;
+ char *tmp;
+ int ret = 0;
+ int size;
+ int used;
+ int i;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+ dd = ppd->dd;
+ size = PAGE_SIZE;
+ used = 0;
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ used += scnprintf(tmp + used, size - used,
+ "Resource flags: 0x%016llx\n", scratch0);
+
+ /* check permanent flag */
+ if (scratch0 & CR_THERM_INIT) {
+ used += scnprintf(tmp + used, size - used,
+ " 0x%08x - thermal monitoring initialized\n",
+ (u32)CR_THERM_INIT);
+ }
+
+ /* check each dynamic flag on each HFI */
+ for (i = 0; i < 2; i++) {
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_SBUS, "SBus");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_EPROM, "EPROM");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_I2C1, "i2c chain 1");
+ check_dyn_flag(scratch0, tmp, size, &used, dd->hfi1_id, i,
+ CR_I2C2, "i2c chain 2");
+ }
+ used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
+
+ ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
+ rcu_read_unlock();
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t asic_flags_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ char *buff;
+ int ret;
+ unsigned long long value;
+ u64 scratch0;
+ u64 clear;
+
+ rcu_read_lock();
+ ppd = private2ppd(file);
+ dd = ppd->dd;
+
+ buff = kmalloc(count + 1, GFP_KERNEL);
+ if (!buff) {
+ ret = -ENOMEM;
+ goto do_return;
+ }
+
+ ret = copy_from_user(buff, buf, count);
+ if (ret > 0) {
+ ret = -EFAULT;
+ goto do_free;
+ }
+
+ /* zero terminate and read the expected integer */
+ buff[count] = 0;
+ ret = kstrtoull(buff, 0, &value);
+ if (ret)
+ goto do_free;
+ clear = value;
+
+ /* obtain exclusive access */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+ acquire_hw_mutex(dd);
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ scratch0 &= ~clear;
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+
+ release_hw_mutex(dd);
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+
+ /* return the number of bytes written */
+ ret = count;
+
+ do_free:
+ kfree(buff);
+ do_return:
+ rcu_read_unlock();
+ return ret;
+}
+
/*
* read the per-port QSFP data for ppd
*/
static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
struct hfi1_pportdata *ppd;
char *tmp;
@@ -439,7 +556,7 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
/* Do an i2c write operation on the chain for the given HFI. */
static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos, u32 target)
+ size_t count, loff_t *ppos, u32 target)
{
struct hfi1_pportdata *ppd;
char *buff;
@@ -451,6 +568,16 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
rcu_read_lock();
ppd = private2ppd(file);
+ /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
+ i2c_addr = (*ppos >> 16) & 0xffff;
+ offset = *ppos & 0xffff;
+
+ /* explicitly reject invalid address 0 to catch cp and cat */
+ if (i2c_addr == 0) {
+ ret = -EINVAL;
+ goto _return;
+ }
+
buff = kmalloc(count, GFP_KERNEL);
if (!buff) {
ret = -ENOMEM;
@@ -463,9 +590,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
goto _free;
}
- i2c_addr = (*ppos >> 16) & 0xff;
- offset = *ppos & 0xffff;
-
total_written = i2c_write(ppd, target, i2c_addr, offset, buff, count);
if (total_written < 0) {
ret = total_written;
@@ -485,21 +609,21 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
/* Do an i2c write operation on chain for HFI 0. */
static ssize_t i2c1_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __i2c_debugfs_write(file, buf, count, ppos, 0);
}
/* Do an i2c write operation on chain for HFI 1. */
static ssize_t i2c2_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __i2c_debugfs_write(file, buf, count, ppos, 1);
}
/* Do an i2c read operation on the chain for the given HFI. */
static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos, u32 target)
+ size_t count, loff_t *ppos, u32 target)
{
struct hfi1_pportdata *ppd;
char *buff;
@@ -511,15 +635,22 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
rcu_read_lock();
ppd = private2ppd(file);
+ /* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
+ i2c_addr = (*ppos >> 16) & 0xffff;
+ offset = *ppos & 0xffff;
+
+ /* explicitly reject invalid address 0 to catch cp and cat */
+ if (i2c_addr == 0) {
+ ret = -EINVAL;
+ goto _return;
+ }
+
buff = kmalloc(count, GFP_KERNEL);
if (!buff) {
ret = -ENOMEM;
goto _return;
}
- i2c_addr = (*ppos >> 16) & 0xff;
- offset = *ppos & 0xffff;
-
total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
if (total_read < 0) {
ret = total_read;
@@ -545,21 +676,21 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
/* Do an i2c read operation on chain for HFI 0. */
static ssize_t i2c1_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __i2c_debugfs_read(file, buf, count, ppos, 0);
}
/* Do an i2c read operation on chain for HFI 1. */
static ssize_t i2c2_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __i2c_debugfs_read(file, buf, count, ppos, 1);
}
/* Do a QSFP write operation on the i2c chain for the given HFI. */
static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos, u32 target)
+ size_t count, loff_t *ppos, u32 target)
{
struct hfi1_pportdata *ppd;
char *buff;
@@ -605,21 +736,21 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
/* Do a QSFP write operation on i2c chain for HFI 0. */
static ssize_t qsfp1_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __qsfp_debugfs_write(file, buf, count, ppos, 0);
}
/* Do a QSFP write operation on i2c chain for HFI 1. */
static ssize_t qsfp2_debugfs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __qsfp_debugfs_write(file, buf, count, ppos, 1);
}
/* Do a QSFP read operation on the i2c chain for the given HFI. */
static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos, u32 target)
+ size_t count, loff_t *ppos, u32 target)
{
struct hfi1_pportdata *ppd;
char *buff;
@@ -665,18 +796,116 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
/* Do a QSFP read operation on i2c chain for HFI 0. */
static ssize_t qsfp1_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __qsfp_debugfs_read(file, buf, count, ppos, 0);
}
/* Do a QSFP read operation on i2c chain for HFI 1. */
static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+ size_t count, loff_t *ppos)
{
return __qsfp_debugfs_read(file, buf, count, ppos, 1);
}
+static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ ppd = private2ppd(fp);
+
+ ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
+ if (ret) /* failed - release the module */
+ module_put(THIS_MODULE);
+
+ return ret;
+}
+
+static int i2c1_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_open(in, fp, 0);
+}
+
+static int i2c2_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_open(in, fp, 1);
+}
+
+static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ release_chip_resource(ppd->dd, i2c_target(target));
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int i2c1_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_release(in, fp, 0);
+}
+
+static int i2c2_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __i2c_debugfs_release(in, fp, 1);
+}
+
+static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ ppd = private2ppd(fp);
+
+ ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
+ if (ret) /* failed - release the module */
+ module_put(THIS_MODULE);
+
+ return ret;
+}
+
+static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_open(in, fp, 0);
+}
+
+static int qsfp2_debugfs_open(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_open(in, fp, 1);
+}
+
+static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
+{
+ struct hfi1_pportdata *ppd;
+
+ ppd = private2ppd(fp);
+
+ release_chip_resource(ppd->dd, i2c_target(target));
+ module_put(THIS_MODULE);
+
+ return 0;
+}
+
+static int qsfp1_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_release(in, fp, 0);
+}
+
+static int qsfp2_debugfs_release(struct inode *in, struct file *fp)
+{
+ return __qsfp_debugfs_release(in, fp, 1);
+}
+
#define DEBUGFS_OPS(nm, readroutine, writeroutine) \
{ \
.name = nm, \
@@ -687,6 +916,18 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
}, \
}
+#define DEBUGFS_XOPS(nm, readf, writef, openf, releasef) \
+{ \
+ .name = nm, \
+ .ops = { \
+ .read = readf, \
+ .write = writef, \
+ .llseek = generic_file_llseek, \
+ .open = openf, \
+ .release = releasef \
+ }, \
+}
+
static const struct counter_info cntr_ops[] = {
DEBUGFS_OPS("counter_names", dev_names_read, NULL),
DEBUGFS_OPS("counters", dev_counters_read, NULL),
@@ -695,11 +936,16 @@ static const struct counter_info cntr_ops[] = {
static const struct counter_info port_cntr_ops[] = {
DEBUGFS_OPS("port%dcounters", portcntrs_debugfs_read, NULL),
- DEBUGFS_OPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write),
- DEBUGFS_OPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write),
+ DEBUGFS_XOPS("i2c1", i2c1_debugfs_read, i2c1_debugfs_write,
+ i2c1_debugfs_open, i2c1_debugfs_release),
+ DEBUGFS_XOPS("i2c2", i2c2_debugfs_read, i2c2_debugfs_write,
+ i2c2_debugfs_open, i2c2_debugfs_release),
DEBUGFS_OPS("qsfp_dump%d", qsfp_debugfs_dump, NULL),
- DEBUGFS_OPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write),
- DEBUGFS_OPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write),
+ DEBUGFS_XOPS("qsfp1", qsfp1_debugfs_read, qsfp1_debugfs_write,
+ qsfp1_debugfs_open, qsfp1_debugfs_release),
+ DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
+ qsfp2_debugfs_open, qsfp2_debugfs_release),
+ DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
};
void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
@@ -747,8 +993,8 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
ibd->hfi1_ibdev_dbg,
ppd,
&port_cntr_ops[i].ops,
- port_cntr_ops[i].ops.write == NULL ?
- S_IRUGO : S_IRUGO|S_IWUSR);
+ !port_cntr_ops[i].ops.write ?
+ S_IRUGO : S_IRUGO | S_IWUSR);
}
}
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/staging/rdma/hfi1/debugfs.h
index 92d6fe146714..b6fb6814f1b8 100644
--- a/drivers/staging/rdma/hfi1/debugfs.h
+++ b/drivers/staging/rdma/hfi1/debugfs.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_DEBUGFS_H
#define _HFI1_DEBUGFS_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
index 58472e5ac4e5..c05c39da83b1 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/staging/rdma/hfi1/device.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
index 2850ff739d81..5bb3e83cf2da 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/staging/rdma/hfi1/device.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_DEVICE_H
#define _HFI1_DEVICE_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 0c8831705664..c5b520bf610e 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -70,6 +67,7 @@
#include "hfi.h"
#include "device.h"
#include "common.h"
+#include "verbs_txreq.h"
#include "trace.h"
#undef pr_fmt
@@ -80,15 +78,15 @@
/* Snoop option mask */
#define SNOOP_DROP_SEND BIT(0)
#define SNOOP_USE_METADATA BIT(1)
+#define SNOOP_SET_VL0TOVL15 BIT(2)
static u8 snoop_flags;
/*
* Extract packet length from LRH header.
- * Why & 0x7FF? Because len is only 11 bits in case it wasn't 0'd we throw the
- * bogus bits away. This is in Dwords so multiply by 4 to get size in bytes
+ * This is in Dwords so multiply by 4 to get size in bytes
*/
-#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0x7FF)) << 2)
+#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
enum hfi1_filter_status {
HFI1_FILTER_HIT,
@@ -257,7 +255,7 @@ static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
-static struct hfi1_filter_array hfi1_filters[] = {
+static const struct hfi1_filter_array hfi1_filters[] = {
{ hfi1_filter_lid },
{ hfi1_filter_dlid },
{ hfi1_filter_mad_mgmt_class },
@@ -860,7 +858,7 @@ static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
vl = sc4;
} else {
sl = (byte_two >> 4) & 0xf;
- ibp = to_iport(&dd->verbs_dev.ibdev, 1);
+ ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
sc5 = ibp->sl_to_sc[sl];
vl = sc_to_vlt(dd, sc5);
if (vl != sc4) {
@@ -966,6 +964,65 @@ static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
return ret;
}
+/**
+ * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
+ * @ppd : ptr to hfi1 port data
+ * @value : options from user space
+ *
+ * Assumes the rest of the CM credit registers are zero from a
+ * previous global or credit reset.
+ * Leave shared count at zero for both global and all vls.
+ * In snoop mode ideally we don't use shared credits
+ * Reserve 8.5k for VL15
+ * If total credits less than 8.5kbytes return error.
+ * Divide the rest of the credits across VL0 to VL7 and if
+ * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
+ * return with an error.
+ * The credit registers will be reset to zero on link negotiation or link up
+ * so this function should be activated from user space only if the port has
+ * gone past link negotiation and link up.
+ *
+ * Return -- 0 if successful else error condition
+ *
+ */
+static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
+ int value)
+{
+#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */
+ struct buffer_control t;
+ int i;
+ struct hfi1_devdata *dd = ppd->dd;
+ u16 total_credits = (value >> 16) & 0xffff;
+ u16 vl15_credits = dd->vl15_init / 2;
+ u16 per_vl_credits;
+ __be16 be_per_vl_credits;
+
+ if (!(ppd->host_link_state & HLS_UP))
+ goto err_exit;
+ if (total_credits < vl15_credits)
+ goto err_exit;
+
+ per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
+
+ if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
+ goto err_exit;
+
+ memset(&t, 0, sizeof(t));
+ be_per_vl_credits = cpu_to_be16(per_vl_credits);
+
+ for (i = 0; i < TXE_NUM_DATA_VL; i++)
+ t.vl[i].dedicated = be_per_vl_credits;
+
+ t.vl[15].dedicated = cpu_to_be16(vl15_credits);
+ return set_buffer_control(ppd, &t);
+
+err_exit:
+ snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
+ ppd->host_link_state, total_credits, vl15_credits);
+
+ return -EINVAL;
+}
+
static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
struct hfi1_devdata *dd;
@@ -1192,6 +1249,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
snoop_flags |= SNOOP_DROP_SEND;
if (value & SNOOP_USE_METADATA)
snoop_flags |= SNOOP_USE_METADATA;
+ if (value & (SNOOP_SET_VL0TOVL15)) {
+ ppd = &dd->pport[0]; /* first port will do */
+ ret = hfi1_assign_snoop_link_credits(ppd, value);
+ }
break;
default:
return -ENOTTY;
@@ -1603,7 +1664,7 @@ int snoop_recv_handler(struct hfi1_packet *packet)
/*
* Handle snooping and capturing packets when sdma is being used.
*/
-int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
@@ -1616,20 +1677,19 @@ int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
* bypass packets. The only way to send a bypass packet currently is to use the
* diagpkt interface. When that interface is enable snoop/capture is not.
*/
-int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
- struct ahg_ib_header *ahdr = qp->s_hdr;
u32 hdrwords = qp->s_hdrwords;
- struct hfi1_sge_state *ss = qp->s_cur_sge;
+ struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
u32 dwords = (len + 3) >> 2;
u32 plen = hdrwords + dwords + 2; /* includes pbc */
struct hfi1_pportdata *ppd = ps->ppd;
struct snoop_packet *s_packet = NULL;
- u32 *hdr = (u32 *)&ahdr->ibh;
+ u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
u32 length = 0;
- struct hfi1_sge_state temp_ss;
+ struct rvt_sge_state temp_ss;
void *data = NULL;
void *data_start = NULL;
int ret;
@@ -1638,7 +1698,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
struct capture_md md;
u32 vl;
u32 hdr_len = hdrwords << 2;
- u32 tlen = HFI1_GET_PKT_LEN(&ahdr->ibh);
+ u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
md.u.pbc = 0;
@@ -1665,7 +1725,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
md.port = 1;
md.dir = PKT_DIR_EGRESS;
if (likely(pbc == 0)) {
- vl = be16_to_cpu(ahdr->ibh.lrh[0]) >> 12;
+ vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
} else {
md.u.pbc = 0;
@@ -1727,7 +1787,7 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
ret = HFI1_FILTER_HIT;
} else {
ret = ppd->dd->hfi1_snoop.filter_callback(
- &ahdr->ibh,
+ &ps->s_txreq->phdr.hdr,
NULL,
ppd->dd->hfi1_snoop.filter_value);
}
@@ -1759,9 +1819,16 @@ int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp, &ahdr->ibh);
+ hfi1_rc_send_complete(qp,
+ &ps->s_txreq->phdr.hdr);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
+
+ /*
+ * If snoop is dropping the packet we need to put the
+ * txreq back because no one else will.
+ */
+ hfi1_put_txreq(ps->s_txreq);
return 0;
}
break;
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/staging/rdma/hfi1/dma.c
index e03bd735173c..7e8dab892848 100644
--- a/drivers/staging/rdma/hfi1/dma.c
+++ b/drivers/staging/rdma/hfi1/dma.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,7 +49,7 @@
#include "verbs.h"
-#define BAD_DMA_ADDRESS ((u64) 0)
+#define BAD_DMA_ADDRESS ((u64)0)
/*
* The following functions implement driver specific replacements
@@ -74,7 +71,7 @@ static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr,
if (WARN_ON(!valid_dma_direction(direction)))
return BAD_DMA_ADDRESS;
- return (u64) cpu_addr;
+ return (u64)cpu_addr;
}
static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
@@ -95,7 +92,7 @@ static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page,
if (offset + size > PAGE_SIZE)
return BAD_DMA_ADDRESS;
- addr = (u64) page_address(page);
+ addr = (u64)page_address(page);
if (addr)
addr += offset;
@@ -120,7 +117,7 @@ static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl,
return BAD_DMA_ADDRESS;
for_each_sg(sgl, sg, nents, i) {
- addr = (u64) page_address(sg_page(sg));
+ addr = (u64)page_address(sg_page(sg));
if (!addr) {
ret = 0;
break;
@@ -161,14 +158,14 @@ static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size,
if (p)
addr = page_address(p);
if (dma_handle)
- *dma_handle = (u64) addr;
+ *dma_handle = (u64)addr;
return addr;
}
static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
- free_pages((unsigned long) cpu_addr, get_order(size));
+ free_pages((unsigned long)cpu_addr, get_order(size));
}
struct ib_dma_mapping_ops hfi1_dma_mapping_ops = {
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/staging/rdma/hfi1/driver.c
index 8485de1fce08..34511e5df1d5 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/staging/rdma/hfi1/driver.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -56,6 +53,7 @@
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/prefetch.h>
+#include <rdma/ib_verbs.h>
#include "hfi.h"
#include "trace.h"
@@ -162,6 +160,22 @@ const char *get_unit_name(int unit)
return iname;
}
+const char *get_card_name(struct rvt_dev_info *rdi)
+{
+ struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
+ struct hfi1_devdata *dd = container_of(ibdev,
+ struct hfi1_devdata, verbs_dev);
+ return get_unit_name(dd->unit);
+}
+
+struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
+{
+ struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
+ struct hfi1_devdata *dd = container_of(ibdev,
+ struct hfi1_devdata, verbs_dev);
+ return dd->pcidev;
+}
+
/*
* Return count of units with at least one port ACTIVE.
*/
@@ -246,7 +260,7 @@ static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
*/
inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
{
- if (unlikely(!IS_ALIGNED(size, PAGE_SIZE)))
+ if (unlikely(!PAGE_ALIGNED(size)))
return 0;
if (unlikely(size < MIN_EAGER_BUFFER))
return 0;
@@ -265,6 +279,8 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
u32 rte = rhf_rcv_type_err(packet->rhf);
int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct hfi1_devdata *dd = ppd->dd;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
return;
@@ -283,9 +299,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
goto drop;
/* Check for GRH */
- if (lnh == HFI1_LRH_BTH)
+ if (lnh == HFI1_LRH_BTH) {
ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH) {
+ } else if (lnh == HFI1_LRH_GRH) {
u32 vtf;
ohdr = &hdr->u.l.oth;
@@ -295,17 +311,17 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
rcv_flags |= HFI1_HAS_GRH;
- } else
+ } else {
goto drop;
-
+ }
/* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
- if (lid < HFI1_MULTICAST_LID_BASE) {
- struct hfi1_qp *qp;
+ qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
+ struct rvt_qp *qp;
unsigned long flags;
rcu_read_lock();
- qp = hfi1_lookup_qpn(ibp, qp_num);
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!qp) {
rcu_read_unlock();
goto drop;
@@ -318,9 +334,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
spin_lock_irqsave(&qp->r_lock, flags);
/* Check for valid receive state. */
- if (!(ib_hfi1_state_ops[qp->state] &
- HFI1_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
}
switch (qp->ibqp.qp_type) {
@@ -352,7 +368,7 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if (rhf_use_egr_bfr(packet->rhf))
ebuf = packet->ebuf;
- if (ebuf == NULL)
+ if (!ebuf)
goto drop; /* this should never happen */
if (lnh == HFI1_LRH_BTH)
@@ -368,9 +384,9 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
if (opcode == IB_OPCODE_CNP) {
/*
* Only in pre-B0 h/w is the CNP_OPCODE handled
- * via this code path (errata 291394).
+ * via this code path.
*/
- struct hfi1_qp *qp = NULL;
+ struct rvt_qp *qp = NULL;
u32 lqpn, rqpn;
u16 rlid;
u8 svc_type, sl, sc5;
@@ -380,10 +396,10 @@ static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
sc5 |= 0x10;
sl = ibp->sc_to_sl[sc5];
- lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK;
+ lqpn = be32_to_cpu(bth[1]) & RVT_QPN_MASK;
rcu_read_lock();
- qp = hfi1_lookup_qpn(ibp, lqpn);
- if (qp == NULL) {
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn);
+ if (!qp) {
rcu_read_unlock();
goto drop;
}
@@ -419,9 +435,8 @@ drop:
}
static inline void init_packet(struct hfi1_ctxtdata *rcd,
- struct hfi1_packet *packet)
+ struct hfi1_packet *packet)
{
-
packet->rsize = rcd->rcvhdrqentsize; /* words */
packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
packet->rcd = rcd;
@@ -434,12 +449,7 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
packet->rcv_flags = 0;
}
-#ifndef CONFIG_PRESCAN_RXQ
-static void prescan_rxq(struct hfi1_packet *packet) {}
-#else /* !CONFIG_PRESCAN_RXQ */
-static int prescan_receive_queue;
-
-static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
+static void process_ecn(struct rvt_qp *qp, struct hfi1_ib_header *hdr,
struct hfi1_other_headers *ohdr,
u64 rhf, u32 bth1, struct ib_grh *grh)
{
@@ -453,7 +463,7 @@ static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
case IB_QPT_GSI:
case IB_QPT_UD:
rlid = be16_to_cpu(hdr->lrh[3]);
- rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
+ rqpn = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
svc_type = IB_CC_SVCTYPE_UD;
break;
case IB_QPT_UC:
@@ -483,7 +493,7 @@ static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
if (bth1 & HFI1_BECN_SMASK) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = bth1 & HFI1_QPN_MASK;
+ u32 lqpn = bth1 & RVT_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
@@ -562,26 +572,31 @@ static inline void update_ps_mdata(struct ps_mdata *mdata,
* containing Excplicit Congestion Notifications (FECNs, or BECNs).
* When an ECN is found, process the Congestion Notification, and toggle
* it off.
+ * This is declared as a macro to allow quick checking of the port to avoid
+ * the overhead of a function call if not enabled.
*/
-static void prescan_rxq(struct hfi1_packet *packet)
+#define prescan_rxq(rcd, packet) \
+ do { \
+ if (rcd->ppd->cc_prescan) \
+ __prescan_rxq(packet); \
+ } while (0)
+static void __prescan_rxq(struct hfi1_packet *packet)
{
struct hfi1_ctxtdata *rcd = packet->rcd;
struct ps_mdata mdata;
- if (!prescan_receive_queue)
- return;
-
init_ps_mdata(&mdata, packet);
while (1) {
struct hfi1_devdata *dd = rcd->dd;
struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
- __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head +
+ __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head +
dd->rhf_offset;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
struct hfi1_ib_header *hdr;
struct hfi1_other_headers *ohdr;
struct ib_grh *grh = NULL;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
u64 rhf = rhf_to_cpu(rhf_addr);
u32 etype = rhf_rcv_type(rhf), qpn, bth1;
int is_ecn = 0;
@@ -600,25 +615,25 @@ static void prescan_rxq(struct hfi1_packet *packet)
hfi1_get_msgheader(dd, rhf_addr);
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH)
+ if (lnh == HFI1_LRH_BTH) {
ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH) {
+ } else if (lnh == HFI1_LRH_GRH) {
ohdr = &hdr->u.l.oth;
grh = &hdr->u.l.grh;
- } else
+ } else {
goto next; /* just in case */
-
+ }
bth1 = be32_to_cpu(ohdr->bth[1]);
is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
if (!is_ecn)
goto next;
- qpn = bth1 & HFI1_QPN_MASK;
+ qpn = bth1 & RVT_QPN_MASK;
rcu_read_lock();
- qp = hfi1_lookup_qpn(ibp, qpn);
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
- if (qp == NULL) {
+ if (!qp) {
rcu_read_unlock();
goto next;
}
@@ -633,7 +648,6 @@ next:
update_ps_mdata(&mdata, rcd);
}
}
-#endif /* CONFIG_PRESCAN_RXQ */
static inline int skip_rcv_packet(struct hfi1_packet *packet, int thread)
{
@@ -683,8 +697,9 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
* The +2 is the size of the RHF.
*/
prefetch_range(packet->ebuf,
- packet->tlen - ((packet->rcd->rcvhdrqentsize -
- (rhf_hdrq_offset(packet->rhf)+2)) * 4));
+ packet->tlen - ((packet->rcd->rcvhdrqentsize -
+ (rhf_hdrq_offset(packet->rhf)
+ + 2)) * 4));
}
/*
@@ -712,7 +727,7 @@ static inline int process_rcv_packet(struct hfi1_packet *packet, int thread)
}
}
- packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff +
+ packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff +
packet->rcd->dd->rhf_offset;
packet->rhf = rhf_to_cpu(packet->rhf_addr);
@@ -737,7 +752,6 @@ static inline void process_rcv_update(int last, struct hfi1_packet *packet)
static inline void finish_packet(struct hfi1_packet *packet)
{
-
/*
* Nothing we need to free for the packet.
*
@@ -746,14 +760,12 @@ static inline void finish_packet(struct hfi1_packet *packet)
*/
update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
packet->etail, rcv_intr_dynamic, packet->numpkt);
-
}
static inline void process_rcv_qp_work(struct hfi1_packet *packet)
{
-
struct hfi1_ctxtdata *rcd;
- struct hfi1_qp *qp, *nqp;
+ struct rvt_qp *qp, *nqp;
rcd = packet->rcd;
rcd->head = packet->rhqoff;
@@ -764,17 +776,17 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
*/
list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
list_del_init(&qp->rspwait);
- if (qp->r_flags & HFI1_R_RSP_DEFERED_ACK) {
- qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
+ if (qp->r_flags & RVT_R_RSP_NAK) {
+ qp->r_flags &= ~RVT_R_RSP_NAK;
hfi1_send_rc_ack(rcd, qp, 0);
}
- if (qp->r_flags & HFI1_R_RSP_SEND) {
+ if (qp->r_flags & RVT_R_RSP_SEND) {
unsigned long flags;
- qp->r_flags &= ~HFI1_R_RSP_SEND;
+ qp->r_flags &= ~RVT_R_RSP_SEND;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_hfi1_state_ops[qp->state] &
- HFI1_PROCESS_OR_FLUSH_SEND)
+ if (ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_OR_FLUSH_SEND)
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
@@ -799,7 +811,7 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread)
goto bail;
}
- prescan_rxq(&packet);
+ prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) {
last = process_rcv_packet(&packet, thread);
@@ -830,7 +842,7 @@ int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread)
}
smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
- prescan_rxq(&packet);
+ prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) {
last = process_rcv_packet(&packet, thread);
@@ -862,6 +874,37 @@ static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
&handle_receive_interrupt_dma_rtail;
}
+void set_all_slowpath(struct hfi1_devdata *dd)
+{
+ int i;
+
+ /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */
+ for (i = HFI1_CTRL_CTXT + 1; i < dd->first_user_ctxt; i++)
+ dd->rcd[i]->do_interrupt = &handle_receive_interrupt;
+}
+
+static inline int set_armed_to_active(struct hfi1_ctxtdata *rcd,
+ struct hfi1_packet packet,
+ struct hfi1_devdata *dd)
+{
+ struct work_struct *lsaw = &rcd->ppd->linkstate_active_work;
+ struct hfi1_message_header *hdr = hfi1_get_msgheader(packet.rcd->dd,
+ packet.rhf_addr);
+
+ if (hdr2sc(hdr, packet.rhf) != 0xf) {
+ int hwstate = read_logical_state(dd);
+
+ if (hwstate != LSTATE_ACTIVE) {
+ dd_dev_info(dd, "Unexpected link state %d\n", hwstate);
+ return 0;
+ }
+
+ queue_work(rcd->ppd->hfi1_wq, lsaw);
+ return 1;
+ }
+ return 0;
+}
+
/*
* handle_receive_interrupt - receive a packet
* @rcd: the context
@@ -910,17 +953,17 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
}
}
- prescan_rxq(&packet);
+ prescan_rxq(rcd, &packet);
while (last == RCV_PKT_OK) {
-
- if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet,
- DROP_PACKET_OFF) == DROP_PACKET_ON)) {
+ if (unlikely(dd->do_drop &&
+ atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
+ DROP_PACKET_ON)) {
dd->do_drop = 0;
/* On to the next packet */
packet.rhqoff += packet.rsize;
- packet.rhf_addr = (__le32 *) rcd->rcvhdrq +
+ packet.rhf_addr = (__le32 *)rcd->rcvhdrq +
packet.rhqoff +
dd->rhf_offset;
packet.rhf = rhf_to_cpu(packet.rhf_addr);
@@ -929,6 +972,11 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
last = skip_rcv_packet(&packet, thread);
skip_pkt = 0;
} else {
+ /* Auto activate link on non-SC15 packet receive */
+ if (unlikely(rcd->ppd->host_link_state ==
+ HLS_UP_ARMED) &&
+ set_armed_to_active(rcd, packet, dd))
+ goto bail;
last = process_rcv_packet(&packet, thread);
}
@@ -940,8 +988,7 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
if (seq != rcd->seq_cnt)
last = RCV_PKT_DONE;
if (needset) {
- dd_dev_info(dd,
- "Switching to NO_DMA_RTAIL\n");
+ dd_dev_info(dd, "Switching to NO_DMA_RTAIL\n");
set_all_nodma_rtail(dd);
needset = 0;
}
@@ -984,6 +1031,42 @@ bail:
}
/*
+ * We may discover in the interrupt that the hardware link state has
+ * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet),
+ * and we need to update the driver's notion of the link state. We cannot
+ * run set_link_state from interrupt context, so we queue this function on
+ * a workqueue.
+ *
+ * We delay the regular interrupt processing until after the state changes
+ * so that the link will be in the correct state by the time any application
+ * we wake up attempts to send a reply to any message it received.
+ * (Subsequent receive interrupts may possibly force the wakeup before we
+ * update the link state.)
+ *
+ * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes
+ * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues,
+ * so we're safe from use-after-free of the rcd.
+ */
+void receive_interrupt_work(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ linkstate_active_work);
+ struct hfi1_devdata *dd = ppd->dd;
+ int i;
+
+ /* Received non-SC15 packet implies neighbor_normal */
+ ppd->neighbor_normal = 1;
+ set_link_state(ppd, HLS_UP_ACTIVE);
+
+ /*
+ * Interrupt all kernel contexts that could have had an
+ * interrupt during auto activation.
+ */
+ for (i = HFI1_CTRL_CTXT; i < dd->first_user_ctxt; i++)
+ force_recv_intr(dd->rcd[i]);
+}
+
+/*
* Convert a given MTU size to the on-wire MAD packet enumeration.
* Return -1 if the size is invalid.
*/
@@ -1037,9 +1120,9 @@ int set_mtu(struct hfi1_pportdata *ppd)
ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
mutex_lock(&ppd->hls_lock);
- if (ppd->host_link_state == HLS_UP_INIT
- || ppd->host_link_state == HLS_UP_ARMED
- || ppd->host_link_state == HLS_UP_ACTIVE)
+ if (ppd->host_link_state == HLS_UP_INIT ||
+ ppd->host_link_state == HLS_UP_ARMED ||
+ ppd->host_link_state == HLS_UP_ACTIVE)
is_up = 1;
drain = !is_ax(dd) && is_up;
@@ -1082,79 +1165,80 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
return 0;
}
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDs, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
+void shutdown_led_override(struct hfi1_pportdata *ppd)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ if (atomic_read(&ppd->led_override_timer_active)) {
+ del_timer_sync(&ppd->led_override_timer);
+ atomic_set(&ppd->led_override_timer_active, 0);
+ /* Ensure the atomic_set is visible to all CPUs */
+ smp_wmb();
+ }
+
+ /* Hand control of the LED to the DC for normal operation */
+ write_csr(dd, DCC_CFG_LED_CNTRL, 0);
+}
static void run_led_override(unsigned long opaque)
{
struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
struct hfi1_devdata *dd = ppd->dd;
- int timeoff;
- int ph_idx;
+ unsigned long timeout;
+ int phase_idx;
if (!(dd->flags & HFI1_INITTED))
return;
- ph_idx = ppd->led_override_phase++ & 1;
- ppd->led_override = ppd->led_override_vals[ph_idx];
- timeoff = ppd->led_override_timeoff;
+ phase_idx = ppd->led_override_phase & 1;
- /*
- * don't re-fire the timer if user asked for it to be off; we let
- * it fire one more time after they turn it off to simplify
- */
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + timeoff);
+ setextled(dd, phase_idx);
+
+ timeout = ppd->led_override_vals[phase_idx];
+
+ /* Set up for next phase */
+ ppd->led_override_phase = !ppd->led_override_phase;
+
+ mod_timer(&ppd->led_override_timer, jiffies + timeout);
}
-void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val)
+/*
+ * To have the LED blink in a particular pattern, provide timeon and timeoff
+ * in milliseconds.
+ * To turn off custom blinking and return to normal operation, use
+ * shutdown_led_override()
+ */
+void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
+ unsigned int timeoff)
{
- struct hfi1_devdata *dd = ppd->dd;
- int timeoff, freq;
-
- if (!(dd->flags & HFI1_INITTED))
+ if (!(ppd->dd->flags & HFI1_INITTED))
return;
- /* First check if we are blinking. If not, use 1HZ polling */
- timeoff = HZ;
- freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
+ /* Convert to jiffies for direct use in timer */
+ ppd->led_override_vals[0] = msecs_to_jiffies(timeoff);
+ ppd->led_override_vals[1] = msecs_to_jiffies(timeon);
- if (freq) {
- /* For blink, set each phase from one nybble of val */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = (val >> 4) & 0xF;
- timeoff = (HZ << 4)/freq;
- } else {
- /* Non-blink set both phases the same. */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = val & 0xF;
- }
- ppd->led_override_timeoff = timeoff;
+ /* Arbitrarily start from LED on phase */
+ ppd->led_override_phase = 1;
/*
* If the timer has not already been started, do so. Use a "quick"
- * timeout so the function will be called soon, to look at our request.
+ * timeout so the handler will be called soon to look at our request.
*/
- if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
- /* Need to start timer */
+ if (!timer_pending(&ppd->led_override_timer)) {
setup_timer(&ppd->led_override_timer, run_led_override,
- (unsigned long)ppd);
-
+ (unsigned long)ppd);
ppd->led_override_timer.expires = jiffies + 1;
add_timer(&ppd->led_override_timer);
- } else {
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + 1);
- atomic_dec(&ppd->led_override_timer_active);
+ atomic_set(&ppd->led_override_timer_active, 1);
+ /* Ensure the atomic_set is visible to all CPUs */
+ smp_wmb();
}
}
@@ -1184,8 +1268,8 @@ int hfi1_reset_device(int unit)
if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
dd_dev_info(dd,
- "Invalid unit number %u or not initialized or not present\n",
- unit);
+ "Invalid unit number %u or not initialized or not present\n",
+ unit);
ret = -ENXIO;
goto bail;
}
@@ -1203,14 +1287,8 @@ int hfi1_reset_device(int unit)
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- if (atomic_read(&ppd->led_override_timer_active)) {
- /* Need to stop LED timer, _then_ shut off LEDs */
- del_timer_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
- /* Shut off LEDs after we are sure timer is not running */
- ppd->led_override = LED_OVER_BOTH_OFF;
+ shutdown_led_override(ppd);
}
if (dd->flags & HFI1_HAS_SEND_DMA)
sdma_exit(dd);
@@ -1221,11 +1299,11 @@ int hfi1_reset_device(int unit)
if (ret)
dd_dev_err(dd,
- "Reinitialize unit %u after reset failed with %d\n",
- unit, ret);
+ "Reinitialize unit %u after reset failed with %d\n",
+ unit, ret);
else
dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
- unit);
+ unit);
bail:
return ret;
@@ -1282,7 +1360,7 @@ int process_receive_bypass(struct hfi1_packet *packet)
handle_eflags(packet);
dd_dev_err(packet->rcd->dd,
- "Bypass packets are not supported in normal operation. Dropping\n");
+ "Bypass packets are not supported in normal operation. Dropping\n");
return RHF_RCV_CONTINUE;
}
@@ -1320,6 +1398,6 @@ int kdeth_process_eager(struct hfi1_packet *packet)
int process_receive_invalid(struct hfi1_packet *packet)
{
dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
- rhf_rcv_type(packet->rhf));
+ rhf_rcv_type(packet->rhf));
return RHF_RCV_CONTINUE;
}
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/staging/rdma/hfi1/efivar.c
index 7dc5bae220e0..106349fc1fb9 100644
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ b/drivers/staging/rdma/hfi1/efivar.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -83,8 +80,7 @@ static int read_efi_var(const char *name, unsigned long *size,
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return -EOPNOTSUPP;
- uni_name = kzalloc(sizeof(efi_char16_t) * (strlen(name) + 1),
- GFP_KERNEL);
+ uni_name = kcalloc(strlen(name) + 1, sizeof(efi_char16_t), GFP_KERNEL);
temp_buffer = kzalloc(EFI_DATA_SIZE, GFP_KERNEL);
if (!uni_name || !temp_buffer) {
@@ -128,13 +124,12 @@ static int read_efi_var(const char *name, unsigned long *size,
* temporary buffer. Now allocate a correctly sized
* buffer.
*/
- data = kmalloc(temp_size, GFP_KERNEL);
+ data = kmemdup(temp_buffer, temp_size, GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto fail;
}
- memcpy(data, temp_buffer, temp_size);
*size = temp_size;
*return_data = data;
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/staging/rdma/hfi1/efivar.h
index 070706225c51..94e9e70de568 100644
--- a/drivers/staging/rdma/hfi1/efivar.h
+++ b/drivers/staging/rdma/hfi1/efivar.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
index fb620c97f592..bd8771570f81 100644
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ b/drivers/staging/rdma/hfi1/eprom.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -99,17 +96,17 @@
/* sleep length while waiting for controller */
#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
-#define COUNT_DELAY_SEC(n) ((n) * (1000000/WAIT_SLEEP_US))
+#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
/* GPIO pins */
-#define EPROM_WP_N (1ull << 14) /* EPROM write line */
+#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
/*
- * Use the EP mutex to guard against other callers from within the driver.
- * Also covers usage of eprom_available.
+ * How long to wait for the EPROM to become available, in ms.
+ * The spec 32 Mb EPROM takes around 40s to erase then write.
+ * Double it for safety.
*/
-static DEFINE_MUTEX(eprom_mutex);
-static int eprom_available; /* default: not available */
+#define EPROM_TIMEOUT 80000 /* ms */
/*
* Turn on external enable line that allows writing on the flash.
@@ -117,11 +114,9 @@ static int eprom_available; /* default: not available */
static void write_enable(struct hfi1_devdata *dd)
{
/* raise signal */
- write_csr(dd, ASIC_GPIO_OUT,
- read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
+ write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
/* raise enable */
- write_csr(dd, ASIC_GPIO_OE,
- read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
+ write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
}
/*
@@ -130,11 +125,9 @@ static void write_enable(struct hfi1_devdata *dd)
static void write_disable(struct hfi1_devdata *dd)
{
/* lower signal */
- write_csr(dd, ASIC_GPIO_OUT,
- read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
+ write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
/* lower enable */
- write_csr(dd, ASIC_GPIO_OE,
- read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
+ write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
}
/*
@@ -212,8 +205,8 @@ static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
/* check the end points for the minimum erase */
if ((start & MASK_4KB) || (end & MASK_4KB)) {
dd_dev_err(dd,
- "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
- __func__, start, end);
+ "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
+ __func__, start, end);
return -EINVAL;
}
@@ -256,7 +249,7 @@ static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
int i;
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
- for (i = 0; i < EP_PAGE_SIZE/sizeof(u32); i++)
+ for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
}
@@ -267,7 +260,7 @@ static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
{
u32 offset;
- u32 buffer[EP_PAGE_SIZE/sizeof(u32)];
+ u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
int ret = 0;
/* reject anything not on an EPROM page boundary */
@@ -277,7 +270,7 @@ static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
read_page(dd, start + offset, buffer);
if (copy_to_user((void __user *)(addr + offset),
- buffer, EP_PAGE_SIZE)) {
+ buffer, EP_PAGE_SIZE)) {
ret = -EFAULT;
goto done;
}
@@ -298,7 +291,7 @@ static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
write_csr(dd, ASIC_EEP_DATA, data[0]);
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
- for (i = 1; i < EP_PAGE_SIZE/sizeof(u32); i++)
+ for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
write_csr(dd, ASIC_EEP_DATA, data[i]);
/* will close the open page */
return wait_for_not_busy(dd);
@@ -310,7 +303,7 @@ static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
{
u32 offset;
- u32 buffer[EP_PAGE_SIZE/sizeof(u32)];
+ u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
int ret = 0;
/* reject anything not on an EPROM page boundary */
@@ -321,7 +314,7 @@ static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
if (copy_from_user(buffer, (void __user *)(addr + offset),
- EP_PAGE_SIZE)) {
+ EP_PAGE_SIZE)) {
ret = -EFAULT;
goto done;
}
@@ -353,44 +346,42 @@ static inline u32 extract_rstart(u32 composite)
*
* Return 0 on success, -ERRNO on error
*/
-int handle_eprom_command(const struct hfi1_cmd *cmd)
+int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
{
struct hfi1_devdata *dd;
u32 dev_id;
u32 rlen; /* range length */
u32 rstart; /* range start */
+ int i_minor;
int ret = 0;
/*
- * The EPROM is per-device, so use unit 0 as that will always
- * exist.
+ * Map the device file to device data using the relative minor.
+ * The device file minor number is the unit number + 1. 0 is
+ * the generic device file - reject it.
*/
- dd = hfi1_lookup(0);
+ i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
+ if (i_minor <= 0)
+ return -EINVAL;
+ dd = hfi1_lookup(i_minor - 1);
if (!dd) {
- pr_err("%s: cannot find unit 0!\n", __func__);
+ pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
return -EINVAL;
}
- /* lock against other callers touching the ASIC block */
- mutex_lock(&eprom_mutex);
-
- /* some platforms do not have an EPROM */
- if (!eprom_available) {
- ret = -ENOSYS;
- goto done_asic;
- }
+ /* some devices do not have an EPROM */
+ if (!dd->eprom_available)
+ return -EOPNOTSUPP;
- /* lock against the other HFI on another OS */
- ret = acquire_hw_mutex(dd);
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
if (ret) {
- dd_dev_err(dd,
- "%s: unable to acquire hw mutex, no EPROM support\n",
- __func__);
+ dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
+ __func__);
goto done_asic;
}
dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
- __func__, cmd->type, cmd->len, cmd->addr);
+ __func__, cmd->type, cmd->len, cmd->addr);
switch (cmd->type) {
case HFI1_CMD_EP_INFO:
@@ -401,7 +392,7 @@ int handle_eprom_command(const struct hfi1_cmd *cmd)
dev_id = read_device_id(dd);
/* addr points to a u32 user buffer */
if (copy_to_user((void __user *)cmd->addr, &dev_id,
- sizeof(u32)))
+ sizeof(u32)))
ret = -EFAULT;
break;
@@ -429,14 +420,13 @@ int handle_eprom_command(const struct hfi1_cmd *cmd)
default:
dd_dev_err(dd, "%s: unexpected command %d\n",
- __func__, cmd->type);
+ __func__, cmd->type);
ret = -EINVAL;
break;
}
- release_hw_mutex(dd);
+ release_chip_resource(dd, CR_EPROM);
done_asic:
- mutex_unlock(&eprom_mutex);
return ret;
}
@@ -447,44 +437,35 @@ int eprom_init(struct hfi1_devdata *dd)
{
int ret = 0;
- /* only the discrete chip has an EPROM, nothing to do */
+ /* only the discrete chip has an EPROM */
if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
return 0;
- /* lock against other callers */
- mutex_lock(&eprom_mutex);
- if (eprom_available) /* already initialized */
- goto done_asic;
-
/*
- * Lock against the other HFI on another OS - the mutex above
- * would have caught anything in this driver. It is OK if
- * both OSes reset the EPROM - as long as they don't do it at
- * the same time.
+ * It is OK if both HFIs reset the EPROM as long as they don't
+ * do it at the same time.
*/
- ret = acquire_hw_mutex(dd);
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
if (ret) {
dd_dev_err(dd,
- "%s: unable to acquire hw mutex, no EPROM support\n",
- __func__);
+ "%s: unable to acquire EPROM resource, no EPROM support\n",
+ __func__);
goto done_asic;
}
/* reset EPROM to be sure it is in a good state */
/* set reset */
- write_csr(dd, ASIC_EEP_CTL_STAT,
- ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
/* clear reset, set speed */
write_csr(dd, ASIC_EEP_CTL_STAT,
- EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+ EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
/* wake the device with command "release powerdown NoID" */
write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
- eprom_available = 1;
- release_hw_mutex(dd);
+ dd->eprom_available = true;
+ release_chip_resource(dd, CR_EPROM);
done_asic:
- mutex_unlock(&eprom_mutex);
return ret;
}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/staging/rdma/hfi1/eprom.h
index 64a64276be81..d41f0b1afb15 100644
--- a/drivers/staging/rdma/hfi1/eprom.h
+++ b/drivers/staging/rdma/hfi1/eprom.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,4 +49,4 @@ struct hfi1_cmd;
struct hfi1_devdata;
int eprom_init(struct hfi1_devdata *dd);
-int handle_eprom_command(const struct hfi1_cmd *cmd);
+int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd);
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index d57d549052c8..8396dc5fb6c1 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -60,6 +57,8 @@
#include "user_sdma.h"
#include "user_exp_rcv.h"
#include "eprom.h"
+#include "aspm.h"
+#include "mmu_rb.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -96,9 +95,6 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
static int vma_fault(struct vm_area_struct *, struct vm_fault *);
-static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
-static int exp_tid_free(struct file *, struct hfi1_tid_info *);
-static void unlock_exp_tids(struct hfi1_ctxtdata *);
static const struct file_operations hfi1_file_ops = {
.owner = THIS_MODULE,
@@ -164,7 +160,6 @@ enum mmap_types {
#define dbg(fmt, ...) \
pr_info(fmt, ##__VA_ARGS__)
-
static inline int is_valid_mmap(u64 token)
{
return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
@@ -188,6 +183,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
struct hfi1_cmd cmd;
struct hfi1_user_info uinfo;
struct hfi1_tid_info tinfo;
+ unsigned long addr;
ssize_t consumed = 0, copy = 0, ret = 0;
void *dest = NULL;
__u64 user_val = 0;
@@ -219,6 +215,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
break;
case HFI1_CMD_TID_UPDATE:
case HFI1_CMD_TID_FREE:
+ case HFI1_CMD_TID_INVAL_READ:
copy = sizeof(tinfo);
dest = &tinfo;
break;
@@ -294,9 +291,8 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
sc_return_credits(uctxt->sc);
break;
case HFI1_CMD_TID_UPDATE:
- ret = exp_tid_setup(fp, &tinfo);
+ ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
if (!ret) {
- unsigned long addr;
/*
* Copy the number of tidlist entries we used
* and the length of the buffer we registered.
@@ -311,8 +307,25 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
ret = -EFAULT;
}
break;
+ case HFI1_CMD_TID_INVAL_READ:
+ ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
+ if (ret)
+ break;
+ addr = (unsigned long)cmd.addr +
+ offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ ret = -EFAULT;
+ break;
case HFI1_CMD_TID_FREE:
- ret = exp_tid_free(fp, &tinfo);
+ ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
+ if (ret)
+ break;
+ addr = (unsigned long)cmd.addr +
+ offsetof(struct hfi1_tid_info, tidcnt);
+ if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
+ sizeof(tinfo.tidcnt)))
+ ret = -EFAULT;
break;
case HFI1_CMD_RECV_CTRL:
ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
@@ -373,8 +386,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
break;
}
if (dd->flags & HFI1_FORCED_FREEZE) {
- /* Don't allow context reset if we are into
- * forced freeze */
+ /*
+ * Don't allow context reset if we are into
+ * forced freeze
+ */
ret = -ENODEV;
break;
}
@@ -382,8 +397,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
ret = sc_enable(sc);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
uctxt->ctxt);
- } else
+ } else {
ret = sc_restart(sc);
+ }
if (!ret)
sc_return_credits(sc);
break;
@@ -393,7 +409,7 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
case HFI1_CMD_EP_ERASE_RANGE:
case HFI1_CMD_EP_READ_RANGE:
case HFI1_CMD_EP_WRITE_RANGE:
- ret = handle_eprom_command(&cmd);
+ ret = handle_eprom_command(fp, &cmd);
break;
}
@@ -487,8 +503,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* Map only the amount allocated to the context, not the
* entire available context's PIO space.
*/
- memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
- PAGE_SIZE);
+ memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
flags &= ~VM_MAYREAD;
flags |= VM_DONTCOPY | VM_DONTEXPAND;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -638,7 +653,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
memaddr = (u64)cq->comps;
- memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
+ memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
flags |= VM_IO | VM_DONTEXPAND;
vmf = 1;
break;
@@ -733,6 +748,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
/* drain user sdma queue */
hfi1_user_sdma_free_queues(fdata);
+ /* release the cpu */
+ hfi1_put_proc_affinity(dd, fdata->rec_cpu_num);
+
/*
* Clear any left over, unhandled events so the next process that
* gets this context doesn't get confused.
@@ -756,6 +774,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
HFI1_RCVCTRL_TIDFLOW_DIS |
HFI1_RCVCTRL_INTRAVAIL_DIS |
+ HFI1_RCVCTRL_TAILUPD_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
@@ -778,14 +797,12 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_clear_tids(uctxt);
+ hfi1_user_exp_rcv_free(fdata);
hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
- if (uctxt->tid_pg_list)
- unlock_exp_tids(uctxt);
-
hfi1_stats.sps_ctxts--;
- dd->freectxts++;
+ if (++dd->freectxts == dd->num_user_contexts)
+ aspm_enable_all(dd);
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
@@ -827,8 +844,16 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
mutex_lock(&hfi1_mutex);
/* First, lets check if we need to setup a shared context? */
- if (uinfo->subctxt_cnt)
+ if (uinfo->subctxt_cnt) {
+ struct hfi1_filedata *fd = fp->private_data;
+
ret = find_shared_ctxt(fp, uinfo);
+ if (ret < 0)
+ goto done_unlock;
+ if (ret)
+ fd->rec_cpu_num = hfi1_get_proc_affinity(
+ fd->uctxt->dd, fd->uctxt->numa_id);
+ }
/*
* We execute the following block if we couldn't find a
@@ -838,6 +863,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
ret = get_user_context(fp, uinfo, i_minor - 1, alg);
}
+done_unlock:
mutex_unlock(&hfi1_mutex);
done:
return ret;
@@ -963,7 +989,7 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt;
unsigned ctxt;
- int ret;
+ int ret, numa;
if (dd->flags & HFI1_FROZEN) {
/*
@@ -983,17 +1009,26 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
if (ctxt == dd->num_rcv_contexts)
return -EBUSY;
- uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
+ fd->rec_cpu_num = hfi1_get_proc_affinity(dd, -1);
+ if (fd->rec_cpu_num != -1)
+ numa = cpu_to_node(fd->rec_cpu_num);
+ else
+ numa = numa_node_id();
+ uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
if (!uctxt) {
dd_dev_err(dd,
"Unable to allocate ctxtdata memory, failing open\n");
return -ENOMEM;
}
+ hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
+ uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
+ uctxt->numa_id);
+
/*
* Allocate and enable a PIO send context.
*/
uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
- uctxt->numa_id);
+ uctxt->dd->node);
if (!uctxt->sc)
return -ENOMEM;
@@ -1027,7 +1062,12 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
INIT_LIST_HEAD(&uctxt->sdma_queues);
spin_lock_init(&uctxt->sdma_qlock);
hfi1_stats.sps_ctxts++;
- dd->freectxts--;
+ /*
+ * Disable ASPM when there are open user/PSM contexts to avoid
+ * issues with ASPM L1 exit latency
+ */
+ if (dd->freectxts-- == dd->num_user_contexts)
+ aspm_disable_all(dd);
fd->uctxt = uctxt;
return 0;
@@ -1036,22 +1076,19 @@ static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
static int init_subctxts(struct hfi1_ctxtdata *uctxt,
const struct hfi1_user_info *uinfo)
{
- int ret = 0;
unsigned num_subctxts;
num_subctxts = uinfo->subctxt_cnt;
- if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
- ret = -EINVAL;
- goto bail;
- }
+ if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
+ return -EINVAL;
uctxt->subctxt_cnt = uinfo->subctxt_cnt;
uctxt->subctxt_id = uinfo->subctxt_id;
uctxt->active_slaves = 1;
uctxt->redirect_seq_cnt = 1;
set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
-bail:
- return ret;
+
+ return 0;
}
static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
@@ -1106,10 +1143,10 @@ static int user_init(struct file *fp)
* has done it.
*/
if (fd->subctxt) {
- ret = wait_event_interruptible(uctxt->wait,
- !test_bit(HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- goto done;
+ ret = wait_event_interruptible(uctxt->wait, !test_bit(
+ HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ goto expected;
}
/* initialize poll variables... */
@@ -1147,8 +1184,16 @@ static int user_init(struct file *fp)
rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
+ /*
+ * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
+ * We can't rely on the correct value to be set from prior
+ * uses of the chip or ctxt. Therefore, add the rcvctrl op
+ * for both cases.
+ */
if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
+ else
+ rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
/* Notify any waiting slaves */
@@ -1156,8 +1201,18 @@ static int user_init(struct file *fp)
clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
wake_up(&uctxt->wait);
}
- ret = 0;
+expected:
+ /*
+ * Expected receive has to be setup for all processes (including
+ * shared contexts). However, it has to be done after the master
+ * context has been fully configured as it depends on the
+ * eager/expected split of the RcvArray entries.
+ * Setting it up here ensures that the subcontexts will be waiting
+ * (due to the above wait_event_interruptible() until the master
+ * is setup.
+ */
+ ret = hfi1_user_exp_rcv_init(fp);
done:
return ret;
}
@@ -1227,46 +1282,6 @@ static int setup_ctxt(struct file *fp)
if (ret)
goto done;
}
- /* Setup Expected Rcv memories */
- uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
- sizeof(struct page **));
- if (!uctxt->tid_pg_list) {
- ret = -ENOMEM;
- goto done;
- }
- uctxt->physshadow = vzalloc(uctxt->expected_count *
- sizeof(*uctxt->physshadow));
- if (!uctxt->physshadow) {
- ret = -ENOMEM;
- goto done;
- }
- /* allocate expected TID map and initialize the cursor */
- atomic_set(&uctxt->tidcursor, 0);
- uctxt->numtidgroups = uctxt->expected_count /
- dd->rcv_entries.group_size;
- uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
- !!(uctxt->numtidgroups % BITS_PER_LONG);
- uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
- sizeof(*uctxt->tidusemap),
- GFP_KERNEL, uctxt->numa_id);
- if (!uctxt->tidusemap) {
- ret = -ENOMEM;
- goto done;
- }
- /*
- * In case that the number of groups is not a multiple of
- * 64 (the number of groups in a tidusemap element), mark
- * the extra ones as used. This will effectively make them
- * permanently used and should never be assigned. Otherwise,
- * the code which checks how many free groups we have will
- * get completely confused about the state of the bits.
- */
- if (uctxt->numtidgroups % BITS_PER_LONG)
- uctxt->tidusemap[uctxt->tidmapcnt - 1] =
- ~((1ULL << (uctxt->numtidgroups %
- BITS_PER_LONG)) - 1);
- trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0,
- uctxt->tidusemap, uctxt->tidmapcnt);
}
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
if (ret)
@@ -1392,8 +1407,9 @@ static unsigned int poll_next(struct file *fp,
set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
pollflag = 0;
- } else
+ } else {
pollflag = POLLIN | POLLRDNORM;
+ }
spin_unlock_irq(&dd->uctxt_lock);
return pollflag;
@@ -1471,8 +1487,9 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
if (uctxt->rcvhdrtail_kvaddr)
clear_rcvhdrtail(uctxt);
rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
- } else
+ } else {
rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
+ }
hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
/* always; new head should be equal to new tail; see above */
bail:
@@ -1505,367 +1522,6 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
return 0;
}
-#define num_user_pages(vaddr, len) \
- (1 + (((((unsigned long)(vaddr) + \
- (unsigned long)(len) - 1) & PAGE_MASK) - \
- ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
-
-/**
- * tzcnt - count the number of trailing zeros in a 64bit value
- * @value: the value to be examined
- *
- * Returns the number of trailing least significant zeros in the
- * the input value. If the value is zero, return the number of
- * bits of the value.
- */
-static inline u8 tzcnt(u64 value)
-{
- return value ? __builtin_ctzl(value) : sizeof(value) * 8;
-}
-
-static inline unsigned num_free_groups(unsigned long map, u16 *start)
-{
- unsigned free;
- u16 bitidx = *start;
-
- if (bitidx >= BITS_PER_LONG)
- return 0;
- /* "Turn off" any bits set before our bit index */
- map &= ~((1ULL << bitidx) - 1);
- free = tzcnt(map) - bitidx;
- while (!free && bitidx < BITS_PER_LONG) {
- /* Zero out the last set bit so we look at the rest */
- map &= ~(1ULL << bitidx);
- /*
- * Account for the previously checked bits and advance
- * the bit index. We don't have to check for bitidx
- * getting bigger than BITS_PER_LONG here as it would
- * mean extra instructions that we don't need. If it
- * did happen, it would push free to a negative value
- * which will break the loop.
- */
- free = tzcnt(map) - ++bitidx;
- }
- *start = bitidx;
- return free;
-}
-
-static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
-{
- int ret = 0;
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned tid, mapped = 0, npages, ngroups, exp_groups,
- tidpairs = uctxt->expected_count / 2;
- struct page **pages;
- unsigned long vaddr, tidmap[uctxt->tidmapcnt];
- dma_addr_t *phys;
- u32 tidlist[tidpairs], pairidx = 0, tidcursor;
- u16 useidx, idx, bitidx, tidcnt = 0;
-
- vaddr = tinfo->vaddr;
-
- if (offset_in_page(vaddr)) {
- ret = -EINVAL;
- goto bail;
- }
-
- npages = num_user_pages(vaddr, tinfo->length);
- if (!npages) {
- ret = -EINVAL;
- goto bail;
- }
- if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
- npages * PAGE_SIZE)) {
- dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
- (void *)vaddr, npages);
- ret = -EFAULT;
- goto bail;
- }
-
- memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
- memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
-
- exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
- /* which group set do we look at first? */
- tidcursor = atomic_read(&uctxt->tidcursor);
- useidx = (tidcursor >> 16) & 0xffff;
- bitidx = tidcursor & 0xffff;
-
- /*
- * Keep going until we've mapped all pages or we've exhausted all
- * RcvArray entries.
- * This iterates over the number of tidmaps + 1
- * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
- * started from one more time for any free bits before the
- * starting point bit.
- */
- for (mapped = 0, idx = 0;
- mapped < npages && idx <= uctxt->tidmapcnt;) {
- u64 i, offset = 0;
- unsigned free, pinned, pmapped = 0, bits_used;
- u16 grp;
-
- /*
- * "Reserve" the needed group bits under lock so other
- * processes can't step in the middle of it. Once
- * reserved, we don't need the lock anymore since we
- * are guaranteed the groups.
- */
- spin_lock(&uctxt->exp_lock);
- if (uctxt->tidusemap[useidx] == -1ULL ||
- bitidx >= BITS_PER_LONG) {
- /* no free groups in the set, use the next */
- useidx = (useidx + 1) % uctxt->tidmapcnt;
- idx++;
- bitidx = 0;
- spin_unlock(&uctxt->exp_lock);
- continue;
- }
- ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
- !!((npages - mapped) % dd->rcv_entries.group_size);
-
- /*
- * If we've gotten here, the current set of groups does have
- * one or more free groups.
- */
- free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
- if (!free) {
- /*
- * Despite the check above, free could still come back
- * as 0 because we don't check the entire bitmap but
- * we start from bitidx.
- */
- spin_unlock(&uctxt->exp_lock);
- continue;
- }
- bits_used = min(free, ngroups);
- tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
- uctxt->tidusemap[useidx] |= tidmap[useidx];
- spin_unlock(&uctxt->exp_lock);
-
- /*
- * At this point, we know where in the map we have free bits.
- * properly offset into the various "shadow" arrays and compute
- * the RcvArray entry index.
- */
- offset = ((useidx * BITS_PER_LONG) + bitidx) *
- dd->rcv_entries.group_size;
- pages = uctxt->tid_pg_list + offset;
- phys = uctxt->physshadow + offset;
- tid = uctxt->expected_base + offset;
-
- /* Calculate how many pages we can pin based on free bits */
- pinned = min((bits_used * dd->rcv_entries.group_size),
- (npages - mapped));
- /*
- * Now that we know how many free RcvArray entries we have,
- * we can pin that many user pages.
- */
- ret = hfi1_acquire_user_pages(vaddr + (mapped * PAGE_SIZE),
- pinned, true, pages);
- if (ret) {
- /*
- * We can't continue because the pages array won't be
- * initialized. This should never happen,
- * unless perhaps the user has mpin'ed the pages
- * themselves.
- */
- dd_dev_info(dd,
- "Failed to lock addr %p, %u pages: errno %d\n",
- (void *) vaddr, pinned, -ret);
- /*
- * Let go of the bits that we reserved since we are not
- * going to use them.
- */
- spin_lock(&uctxt->exp_lock);
- uctxt->tidusemap[useidx] &=
- ~(((1ULL << bits_used) - 1) << bitidx);
- spin_unlock(&uctxt->exp_lock);
- goto done;
- }
- /*
- * How many groups do we need based on how many pages we have
- * pinned?
- */
- ngroups = (pinned / dd->rcv_entries.group_size) +
- !!(pinned % dd->rcv_entries.group_size);
- /*
- * Keep programming RcvArray entries for all the <ngroups> free
- * groups.
- */
- for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
- unsigned j;
- u32 pair_size = 0, tidsize;
- /*
- * This inner loop will program an entire group or the
- * array of pinned pages (which ever limit is hit
- * first).
- */
- for (j = 0; j < dd->rcv_entries.group_size &&
- pmapped < pinned; j++, pmapped++, tid++) {
- tidsize = PAGE_SIZE;
- phys[pmapped] = hfi1_map_page(dd->pcidev,
- pages[pmapped], 0,
- tidsize, PCI_DMA_FROMDEVICE);
- trace_hfi1_exp_rcv_set(uctxt->ctxt,
- fd->subctxt,
- tid, vaddr,
- phys[pmapped],
- pages[pmapped]);
- /*
- * Each RcvArray entry is programmed with one
- * page * worth of memory. This will handle
- * the 8K MTU as well as anything smaller
- * due to the fact that both entries in the
- * RcvTidPair are programmed with a page.
- * PSM currently does not handle anything
- * bigger than 8K MTU, so should we even worry
- * about 10K here?
- */
- hfi1_put_tid(dd, tid, PT_EXPECTED,
- phys[pmapped],
- ilog2(tidsize >> PAGE_SHIFT) + 1);
- pair_size += tidsize >> PAGE_SHIFT;
- EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
- if (!(tid % 2)) {
- tidlist[pairidx] |=
- EXP_TID_SET(IDX,
- (tid - uctxt->expected_base)
- / 2);
- tidlist[pairidx] |=
- EXP_TID_SET(CTRL, 1);
- tidcnt++;
- } else {
- tidlist[pairidx] |=
- EXP_TID_SET(CTRL, 2);
- pair_size = 0;
- pairidx++;
- }
- }
- /*
- * We've programmed the entire group (or as much of the
- * group as we'll use. Now, it's time to push it out...
- */
- flush_wc();
- }
- mapped += pinned;
- atomic_set(&uctxt->tidcursor,
- (((useidx & 0xffffff) << 16) |
- ((bitidx + bits_used) & 0xffffff)));
- }
- trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 0, uctxt->tidusemap,
- uctxt->tidmapcnt);
-
-done:
- /* If we've mapped anything, copy relevant info to user */
- if (mapped) {
- if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
- tidlist, sizeof(tidlist[0]) * tidcnt)) {
- ret = -EFAULT;
- goto done;
- }
- /* copy TID info to user */
- if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
- tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
- ret = -EFAULT;
- }
-bail:
- /*
- * Calculate mapped length. New Exp TID protocol does not "unwind" and
- * report an error if it can't map the entire buffer. It just reports
- * the length that was mapped.
- */
- tinfo->length = mapped * PAGE_SIZE;
- tinfo->tidcnt = tidcnt;
- return ret;
-}
-
-static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
-{
- struct hfi1_filedata *fd = fp->private_data;
- struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned long tidmap[uctxt->tidmapcnt];
- struct page **pages;
- dma_addr_t *phys;
- u16 idx, bitidx, tid;
- int ret = 0;
-
- if (copy_from_user(&tidmap, (void __user *)(unsigned long)
- tinfo->tidmap,
- sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
- ret = -EFAULT;
- goto done;
- }
- for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
- unsigned long map;
-
- bitidx = 0;
- if (!tidmap[idx])
- continue;
- map = tidmap[idx];
- while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
- int i, pcount = 0;
- struct page *pshadow[dd->rcv_entries.group_size];
- unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
- dd->rcv_entries.group_size;
-
- pages = uctxt->tid_pg_list + offset;
- phys = uctxt->physshadow + offset;
- tid = uctxt->expected_base + offset;
- for (i = 0; i < dd->rcv_entries.group_size;
- i++, tid++) {
- if (pages[i]) {
- hfi1_put_tid(dd, tid, PT_INVALID,
- 0, 0);
- trace_hfi1_exp_rcv_free(uctxt->ctxt,
- fd->subctxt,
- tid, phys[i],
- pages[i]);
- pci_unmap_page(dd->pcidev, phys[i],
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
- pshadow[pcount] = pages[i];
- pages[i] = NULL;
- pcount++;
- phys[i] = 0;
- }
- }
- flush_wc();
- hfi1_release_user_pages(pshadow, pcount, true);
- clear_bit(bitidx, &uctxt->tidusemap[idx]);
- map &= ~(1ULL<<bitidx);
- }
- }
- trace_hfi1_exp_tid_map(uctxt->ctxt, fd->subctxt, 1, uctxt->tidusemap,
- uctxt->tidmapcnt);
-done:
- return ret;
-}
-
-static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
-{
- struct hfi1_devdata *dd = uctxt->dd;
- unsigned tid;
-
- dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
- uctxt->ctxt);
- for (tid = 0; tid < uctxt->expected_count; tid++) {
- struct page *p = uctxt->tid_pg_list[tid];
- dma_addr_t phys;
-
- if (!p)
- continue;
-
- phys = uctxt->physshadow[tid];
- uctxt->physshadow[tid] = 0;
- uctxt->tid_pg_list[tid] = NULL;
- pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(&p, 1, true);
- }
-}
-
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
u16 pkey)
{
@@ -1934,10 +1590,9 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
return filp->f_pos;
}
-
/* NOTE: assumes unsigned long is 8 bytes */
static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
+ loff_t *f_pos)
{
struct hfi1_devdata *dd = filp->private_data;
void __iomem *base = dd->kregbase;
@@ -1973,12 +1628,12 @@ static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
* them. These registers are defined as having a read value
* of 0.
*/
- else if (csr_off == ASIC_GPIO_CLEAR
- || csr_off == ASIC_GPIO_FORCE
- || csr_off == ASIC_QSFP1_CLEAR
- || csr_off == ASIC_QSFP1_FORCE
- || csr_off == ASIC_QSFP2_CLEAR
- || csr_off == ASIC_QSFP2_FORCE)
+ else if (csr_off == ASIC_GPIO_CLEAR ||
+ csr_off == ASIC_GPIO_FORCE ||
+ csr_off == ASIC_QSFP1_CLEAR ||
+ csr_off == ASIC_QSFP1_FORCE ||
+ csr_off == ASIC_QSFP2_CLEAR ||
+ csr_off == ASIC_QSFP2_FORCE)
data = 0;
else if (csr_off >= barlen) {
/*
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/staging/rdma/hfi1/firmware.c
index 28ae42faa018..3040162cb326 100644
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ b/drivers/staging/rdma/hfi1/firmware.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -77,7 +74,13 @@ static uint fw_8051_load = 1;
static uint fw_fabric_serdes_load = 1;
static uint fw_pcie_serdes_load = 1;
static uint fw_sbus_load = 1;
-static uint platform_config_load = 1;
+
+/*
+ * Access required in platform.c
+ * Maintains state of whether the platform config was fetched via the
+ * fallback option
+ */
+uint platform_config_load;
/* Firmware file names get set in hfi1_firmware_init() based on the above */
static char *fw_8051_name;
@@ -107,6 +110,7 @@ struct css_header {
u32 exponent_size; /* in DWORDs */
u32 reserved[22];
};
+
/* expected field values */
#define CSS_MODULE_TYPE 0x00000006
#define CSS_HEADER_LEN 0x000000a1
@@ -166,6 +170,7 @@ enum fw_state {
FW_FINAL,
FW_ERR
};
+
static enum fw_state fw_state = FW_EMPTY;
static int fw_err;
static struct firmware_details fw_8051;
@@ -193,7 +198,7 @@ static const struct firmware *platform_config;
#define RSA_ENGINE_TIMEOUT 100 /* ms */
/* hardware mutex timeout, in ms */
-#define HM_TIMEOUT 4000 /* 4 s */
+#define HM_TIMEOUT 10 /* ms */
/* 8051 memory access timeout, in us */
#define DC8051_ACCESS_TIMEOUT 100 /* us */
@@ -233,6 +238,8 @@ static const u8 all_pcie_serdes_broadcast = 0xe0;
/* forwards */
static void dispose_one_firmware(struct firmware_details *fdet);
+static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
+ struct firmware_details *fdet);
/*
* Read a single 64-bit value from 8051 data memory.
@@ -372,8 +379,8 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what,
return 0;
dd_dev_err(dd,
- "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
- what, expected, actual);
+ "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
+ what, expected, actual);
return 1;
}
@@ -383,19 +390,19 @@ static int invalid_header(struct hfi1_devdata *dd, const char *what,
static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
{
/* verify CSS header fields (most sizes are in DW, so add /4) */
- if (invalid_header(dd, "module_type", css->module_type, CSS_MODULE_TYPE)
- || invalid_header(dd, "header_len", css->header_len,
- (sizeof(struct firmware_file)/4))
- || invalid_header(dd, "header_version",
- css->header_version, CSS_HEADER_VERSION)
- || invalid_header(dd, "module_vendor",
- css->module_vendor, CSS_MODULE_VENDOR)
- || invalid_header(dd, "key_size",
- css->key_size, KEY_SIZE/4)
- || invalid_header(dd, "modulus_size",
- css->modulus_size, KEY_SIZE/4)
- || invalid_header(dd, "exponent_size",
- css->exponent_size, EXPONENT_SIZE/4)) {
+ if (invalid_header(dd, "module_type", css->module_type,
+ CSS_MODULE_TYPE) ||
+ invalid_header(dd, "header_len", css->header_len,
+ (sizeof(struct firmware_file) / 4)) ||
+ invalid_header(dd, "header_version", css->header_version,
+ CSS_HEADER_VERSION) ||
+ invalid_header(dd, "module_vendor", css->module_vendor,
+ CSS_MODULE_VENDOR) ||
+ invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
+ invalid_header(dd, "modulus_size", css->modulus_size,
+ KEY_SIZE / 4) ||
+ invalid_header(dd, "exponent_size", css->exponent_size,
+ EXPONENT_SIZE / 4)) {
return -EINVAL;
}
return 0;
@@ -410,8 +417,8 @@ static int payload_check(struct hfi1_devdata *dd, const char *name,
/* make sure we have some payload */
if (prefix_size >= file_size) {
dd_dev_err(dd,
- "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
- name, file_size, prefix_size);
+ "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
+ name, file_size, prefix_size);
return -EINVAL;
}
@@ -433,8 +440,8 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
if (ret) {
- dd_dev_err(dd, "cannot find firmware \"%s\", err %d\n",
- name, ret);
+ dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
+ name, ret);
return ret;
}
@@ -480,14 +487,14 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
ret = verify_css_header(dd, css);
if (ret) {
dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
- } else if ((css->size*4) == fdet->fw->size) {
+ } else if ((css->size * 4) == fdet->fw->size) {
/* non-augmented firmware file */
struct firmware_file *ff = (struct firmware_file *)
fdet->fw->data;
/* make sure there are bytes in the payload */
ret = payload_check(dd, name, fdet->fw->size,
- sizeof(struct firmware_file));
+ sizeof(struct firmware_file));
if (ret == 0) {
fdet->css_header = css;
fdet->modulus = ff->modulus;
@@ -505,14 +512,14 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
ret = -EINVAL;
}
- } else if ((css->size*4) + AUGMENT_SIZE == fdet->fw->size) {
+ } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
/* augmented firmware file */
struct augmented_firmware_file *aff =
(struct augmented_firmware_file *)fdet->fw->data;
/* make sure there are bytes in the payload */
ret = payload_check(dd, name, fdet->fw->size,
- sizeof(struct augmented_firmware_file));
+ sizeof(struct augmented_firmware_file));
if (ret == 0) {
fdet->css_header = css;
fdet->modulus = aff->modulus;
@@ -527,9 +534,10 @@ static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
} else {
/* css->size check failed */
dd_dev_err(dd,
- "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
- fdet->fw->size/4, (fdet->fw->size - AUGMENT_SIZE)/4,
- css->size);
+ "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
+ fdet->fw->size / 4,
+ (fdet->fw->size - AUGMENT_SIZE) / 4,
+ css->size);
ret = -EINVAL;
}
@@ -572,7 +580,7 @@ retry:
* We tried the original and it failed. Move to the
* alternate.
*/
- dd_dev_info(dd, "using alternate firmware names\n");
+ dd_dev_warn(dd, "using alternate firmware names\n");
/*
* Let others run. Some systems, when missing firmware, does
* something that holds for 30 seconds. If we do that twice
@@ -593,27 +601,27 @@ retry:
fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
}
- if (fw_8051_load) {
- err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
+ if (fw_sbus_load) {
+ err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
if (err)
goto done;
}
- if (fw_fabric_serdes_load) {
- err = obtain_one_firmware(dd, fw_fabric_serdes_name,
- &fw_fabric);
+ if (fw_pcie_serdes_load) {
+ err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
if (err)
goto done;
}
- if (fw_sbus_load) {
- err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
+ if (fw_fabric_serdes_load) {
+ err = obtain_one_firmware(dd, fw_fabric_serdes_name,
+ &fw_fabric);
if (err)
goto done;
}
- if (fw_pcie_serdes_load) {
- err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
+ if (fw_8051_load) {
+ err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
if (err)
goto done;
}
@@ -621,16 +629,18 @@ retry:
done:
if (err) {
/* oops, had problems obtaining a firmware */
- if (fw_state == FW_EMPTY) {
- /* retry with alternate */
+ if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
+ /* retry with alternate (RTL only) */
fw_state = FW_TRY;
goto retry;
}
+ dd_dev_err(dd, "unable to obtain working firmware\n");
fw_state = FW_ERR;
fw_err = -ENOENT;
} else {
/* success */
- if (fw_state == FW_EMPTY)
+ if (fw_state == FW_EMPTY &&
+ dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
fw_state = FW_TRY; /* may retry later */
else
fw_state = FW_FINAL; /* cannot try again */
@@ -673,10 +683,15 @@ static int obtain_firmware(struct hfi1_devdata *dd)
}
/* not in FW_TRY state */
- if (fw_state == FW_FINAL)
+ if (fw_state == FW_FINAL) {
+ if (platform_config) {
+ dd->platform_config.data = platform_config->data;
+ dd->platform_config.size = platform_config->size;
+ }
goto done; /* already acquired */
- else if (fw_state == FW_ERR)
+ } else if (fw_state == FW_ERR) {
goto done; /* already tried and failed */
+ }
/* fw_state is FW_EMPTY */
/* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
@@ -685,9 +700,13 @@ static int obtain_firmware(struct hfi1_devdata *dd)
if (platform_config_load) {
platform_config = NULL;
err = request_firmware(&platform_config, platform_config_name,
- &dd->pcidev->dev);
- if (err)
+ &dd->pcidev->dev);
+ if (err) {
platform_config = NULL;
+ goto done;
+ }
+ dd->platform_config.data = platform_config->data;
+ dd->platform_config.size = platform_config->size;
}
done:
@@ -761,7 +780,7 @@ static int retry_firmware(struct hfi1_devdata *dd, int load_result)
static void write_rsa_data(struct hfi1_devdata *dd, int what,
const u8 *data, int nbytes)
{
- int qw_size = nbytes/8;
+ int qw_size = nbytes / 8;
int i;
if (((unsigned long)data & 0x7) == 0) {
@@ -769,14 +788,14 @@ static void write_rsa_data(struct hfi1_devdata *dd, int what,
u64 *ptr = (u64 *)data;
for (i = 0; i < qw_size; i++, ptr++)
- write_csr(dd, what + (8*i), *ptr);
+ write_csr(dd, what + (8 * i), *ptr);
} else {
/* not aligned */
for (i = 0; i < qw_size; i++, data += 8) {
u64 value;
memcpy(&value, data, 8);
- write_csr(dd, what + (8*i), value);
+ write_csr(dd, what + (8 * i), value);
}
}
}
@@ -789,7 +808,7 @@ static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
const u8 *data, int nbytes)
{
u64 *ptr = (u64 *)data;
- int qw_size = nbytes/8;
+ int qw_size = nbytes / 8;
for (; qw_size > 0; qw_size--, ptr++)
write_csr(dd, what, *ptr);
@@ -822,7 +841,7 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who,
>> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
if (status != RSA_STATUS_IDLE) {
dd_dev_err(dd, "%s security engine not idle - giving up\n",
- who);
+ who);
return -EBUSY;
}
@@ -859,7 +878,7 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who,
if (status == RSA_STATUS_IDLE) {
/* should not happen */
dd_dev_err(dd, "%s firmware security bad idle state\n",
- who);
+ who);
ret = -EINVAL;
break;
} else if (status == RSA_STATUS_DONE) {
@@ -893,19 +912,20 @@ static int run_rsa(struct hfi1_devdata *dd, const char *who,
* is not keeping the error high.
*/
write_csr(dd, MISC_ERR_CLEAR,
- MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK
- | MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
+ MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
+ MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
/*
- * All that is left are the current errors. Print failure details,
- * if any.
+ * All that is left are the current errors. Print warnings on
+ * authorization failure details, if any. Firmware authorization
+ * can be retried, so these are only warnings.
*/
reg = read_csr(dd, MISC_ERR_STATUS);
if (ret) {
if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
- dd_dev_err(dd, "%s firmware authorization failed\n",
- who);
+ dd_dev_warn(dd, "%s firmware authorization failed\n",
+ who);
if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
- dd_dev_err(dd, "%s firmware key mismatch\n", who);
+ dd_dev_warn(dd, "%s firmware key mismatch\n", who);
}
return ret;
@@ -922,7 +942,8 @@ static void load_security_variables(struct hfi1_devdata *dd,
write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
/* Security variables d. Write the header */
write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
- (u8 *)fdet->css_header, sizeof(struct css_header));
+ (u8 *)fdet->css_header,
+ sizeof(struct css_header));
}
/* return the 8051 firmware state */
@@ -1002,7 +1023,7 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
/* Firmware load steps 3-5 */
ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
- fdet->firmware_len);
+ fdet->firmware_len);
if (ret)
return ret;
@@ -1029,13 +1050,13 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
ret = wait_fm_ready(dd, TIMEOUT_8051_START);
if (ret) { /* timed out */
dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
- get_firmware_state(dd));
+ get_firmware_state(dd));
return -ETIMEDOUT;
}
read_misc_status(dd, &ver_a, &ver_b);
dd_dev_info(dd, "8051 firmware version %d.%d\n",
- (int)ver_b, (int)ver_a);
+ (int)ver_b, (int)ver_a);
dd->dc8051_ver = dc8051_ver(ver_b, ver_a);
return 0;
@@ -1050,11 +1071,11 @@ void sbus_request(struct hfi1_devdata *dd,
u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
{
write_csr(dd, ASIC_CFG_SBUS_REQUEST,
- ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT)
- | ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT)
- | ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT)
- | ((u64)receiver_addr
- << ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
+ ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
+ ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
+ ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
+ ((u64)receiver_addr <<
+ ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
}
/*
@@ -1072,14 +1093,14 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
return;
dd_dev_info(dd, "Turning off spicos:%s%s\n",
- flags & SPICO_SBUS ? " SBus" : "",
- flags & SPICO_FABRIC ? " fabric" : "");
+ flags & SPICO_SBUS ? " SBus" : "",
+ flags & SPICO_FABRIC ? " fabric" : "");
write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
/* disable SBus spico */
if (flags & SPICO_SBUS)
sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
- WRITE_SBUS_RECEIVER, 0x00000040);
+ WRITE_SBUS_RECEIVER, 0x00000040);
/* disable the fabric serdes spicos */
if (flags & SPICO_FABRIC)
@@ -1089,29 +1110,60 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
}
/*
- * Reset all of the fabric serdes for our HFI.
+ * Reset all of the fabric serdes for this HFI in preparation to take the
+ * link to Polling.
+ *
+ * To do a reset, we need to write to to the serdes registers. Unfortunately,
+ * the fabric serdes download to the other HFI on the ASIC will have turned
+ * off the firmware validation on this HFI. This means we can't write to the
+ * registers to reset the serdes. Work around this by performing a complete
+ * re-download and validation of the fabric serdes firmware. This, as a
+ * by-product, will reset the serdes. NOTE: the re-download requires that
+ * the 8051 be in the Offline state. I.e. not actively trying to use the
+ * serdes. This routine is called at the point where the link is Offline and
+ * is getting ready to go to Polling.
*/
void fabric_serdes_reset(struct hfi1_devdata *dd)
{
- u8 ra;
+ int ret;
- if (dd->icode != ICODE_RTL_SILICON) /* only for RTL */
+ if (!fw_fabric_serdes_load)
return;
- ra = fabric_serdes_broadcast[dd->hfi1_id];
-
- acquire_hw_mutex(dd);
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd,
+ "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
+ return;
+ }
set_sbus_fast_mode(dd);
- /* place SerDes in reset and disable SPICO */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
- /* wait 100 refclk cycles @ 156.25MHz => 640ns */
- udelay(1);
- /* remove SerDes reset */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
- /* turn SPICO enable on */
- sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+
+ if (is_ax(dd)) {
+ /* A0 serdes do not work with a re-download */
+ u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
+
+ /* place SerDes in reset and disable SPICO */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
+ /* wait 100 refclk cycles @ 156.25MHz => 640ns */
+ udelay(1);
+ /* remove SerDes reset */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
+ /* turn SPICO enable on */
+ sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
+ } else {
+ turn_off_spicos(dd, SPICO_FABRIC);
+ /*
+ * No need for firmware retry - what to download has already
+ * been decided.
+ * No need to pay attention to the load return - the only
+ * failure is a validation failure, which has already been
+ * checked by the initial download.
+ */
+ (void)load_fabric_serdes_firmware(dd, &fw_fabric);
+ }
+
clear_sbus_fast_mode(dd);
- release_hw_mutex(dd);
+ release_chip_resource(dd, CR_SBUS);
}
/* Access to the SBus in this routine should probably be serialized */
@@ -1120,6 +1172,9 @@ int sbus_request_slow(struct hfi1_devdata *dd,
{
u64 reg, count = 0;
+ /* make sure fast mode is clear */
+ clear_sbus_fast_mode(dd);
+
sbus_request(dd, receiver_addr, data_addr, command, data_in);
write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
@@ -1177,7 +1232,7 @@ static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
/* step 5: download SerDes machine code */
for (i = 0; i < fdet->firmware_len; i += 4) {
sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
+ *(u32 *)&fdet->firmware_ptr[i]);
}
/* step 6: IMEM override off */
sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
@@ -1216,7 +1271,7 @@ static int load_sbus_firmware(struct hfi1_devdata *dd,
/* step 5: download the SBus Master machine code */
for (i = 0; i < fdet->firmware_len; i += 4) {
sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
+ *(u32 *)&fdet->firmware_ptr[i]);
}
/* step 6: set IMEM_CNTL_EN off */
sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
@@ -1249,19 +1304,23 @@ static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
/* step 3: enable XDMEM access */
sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
/* step 4: load firmware into SBus Master XDMEM */
- /* NOTE: the dmem address, write_en, and wdata are all pre-packed,
- we only need to pick up the bytes and write them */
+ /*
+ * NOTE: the dmem address, write_en, and wdata are all pre-packed,
+ * we only need to pick up the bytes and write them
+ */
for (i = 0; i < fdet->firmware_len; i += 4) {
sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
- *(u32 *)&fdet->firmware_ptr[i]);
+ *(u32 *)&fdet->firmware_ptr[i]);
}
/* step 5: disable XDMEM access */
sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
/* step 6: allow SBus Spico to run */
sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
- /* steps 7-11: run RSA, if it succeeds, firmware is available to
- be swapped */
+ /*
+ * steps 7-11: run RSA, if it succeeds, firmware is available to
+ * be swapped
+ */
return run_rsa(dd, "PCIe serdes", fdet->signature);
}
@@ -1285,7 +1344,7 @@ static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
* 23:16 BROADCAST_GROUP_2 (default 0xff)
*/
sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
- (u32)bg1 << 4 | (u32)bg2 << 16);
+ (u32)bg1 << 4 | (u32)bg2 << 16);
}
}
@@ -1310,8 +1369,8 @@ retry:
/* timed out */
dd_dev_err(dd,
- "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
- (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
+ "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
+ (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
if (try == 0) {
/* break mutex and retry */
@@ -1328,10 +1387,197 @@ void release_hw_mutex(struct hfi1_devdata *dd)
write_csr(dd, ASIC_CFG_MUTEX, 0);
}
+/* return the given resource bit(s) as a mask for the given HFI */
+static inline u64 resource_mask(u32 hfi1_id, u32 resource)
+{
+ return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
+}
+
+static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
+ const char *func)
+{
+ dd_dev_err(dd,
+ "%s: hardware mutex stuck - suggest rebooting the machine\n",
+ func);
+}
+
+/*
+ * Acquire access to a chip resource.
+ *
+ * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
+ */
+static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
+{
+ u64 scratch0, all_bits, my_bit;
+ int ret;
+
+ if (resource & CR_DYN_MASK) {
+ /* a dynamic resource is in use if either HFI has set the bit */
+ all_bits = resource_mask(0, resource) |
+ resource_mask(1, resource);
+ my_bit = resource_mask(dd->hfi1_id, resource);
+ } else {
+ /* non-dynamic resources are not split between HFIs */
+ all_bits = resource;
+ my_bit = resource;
+ }
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ ret = acquire_hw_mutex(dd);
+ if (ret) {
+ fail_mutex_acquire_message(dd, __func__);
+ ret = -EIO;
+ goto done;
+ }
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if (scratch0 & all_bits) {
+ ret = -EBUSY;
+ } else {
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+ }
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+ return ret;
+}
+
+/*
+ * Acquire access to a chip resource, wait up to mswait milliseconds for
+ * the resource to become available.
+ *
+ * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
+ * acquire failed.
+ */
+int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
+{
+ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + msecs_to_jiffies(mswait);
+ while (1) {
+ ret = __acquire_chip_resource(dd, resource);
+ if (ret != -EBUSY)
+ return ret;
+ /* resource is busy, check our timeout */
+ if (time_after_eq(jiffies, timeout))
+ return -EBUSY;
+ usleep_range(80, 120); /* arbitrary delay */
+ }
+}
+
+/*
+ * Release access to a chip resource
+ */
+void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
+{
+ u64 scratch0, bit;
+
+ /* only dynamic resources should ever be cleared */
+ if (!(resource & CR_DYN_MASK)) {
+ dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
+ resource);
+ return;
+ }
+ bit = resource_mask(dd->hfi1_id, resource);
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ if (acquire_hw_mutex(dd)) {
+ fail_mutex_acquire_message(dd, __func__);
+ goto done;
+ }
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if ((scratch0 & bit) != 0) {
+ scratch0 &= ~bit;
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+ } else {
+ dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
+ __func__, dd->hfi1_id, resource);
+ }
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+}
+
+/*
+ * Return true if resource is set, false otherwise. Print a warning
+ * if not set and a function is supplied.
+ */
+bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
+ const char *func)
+{
+ u64 scratch0, bit;
+
+ if (resource & CR_DYN_MASK)
+ bit = resource_mask(dd->hfi1_id, resource);
+ else
+ bit = resource;
+
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ if ((scratch0 & bit) == 0) {
+ if (func)
+ dd_dev_warn(dd,
+ "%s: id %d, resource 0x%x, not acquired!\n",
+ func, dd->hfi1_id, resource);
+ return false;
+ }
+ return true;
+}
+
+static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
+{
+ u64 scratch0;
+
+ /* lock against other callers within the driver wanting a resource */
+ mutex_lock(&dd->asic_data->asic_resource_mutex);
+
+ if (acquire_hw_mutex(dd)) {
+ fail_mutex_acquire_message(dd, func);
+ goto done;
+ }
+
+ /* clear all dynamic access bits for this HFI */
+ scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
+ scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
+ write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
+ /* force write to be visible to other HFI on another OS */
+ (void)read_csr(dd, ASIC_CFG_SCRATCH);
+
+ release_hw_mutex(dd);
+
+done:
+ mutex_unlock(&dd->asic_data->asic_resource_mutex);
+}
+
+void init_chip_resources(struct hfi1_devdata *dd)
+{
+ /* clear any holds left by us */
+ clear_chip_resources(dd, __func__);
+}
+
+void finish_chip_resources(struct hfi1_devdata *dd)
+{
+ /* clear any holds left by us */
+ clear_chip_resources(dd, __func__);
+}
+
void set_sbus_fast_mode(struct hfi1_devdata *dd)
{
write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
- ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
+ ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
}
void clear_sbus_fast_mode(struct hfi1_devdata *dd)
@@ -1354,23 +1600,23 @@ int load_firmware(struct hfi1_devdata *dd)
int ret;
if (fw_fabric_serdes_load) {
- ret = acquire_hw_mutex(dd);
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
if (ret)
return ret;
set_sbus_fast_mode(dd);
set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
- fabric_serdes_broadcast[dd->hfi1_id],
- fabric_serdes_addrs[dd->hfi1_id],
- NUM_FABRIC_SERDES);
+ fabric_serdes_broadcast[dd->hfi1_id],
+ fabric_serdes_addrs[dd->hfi1_id],
+ NUM_FABRIC_SERDES);
turn_off_spicos(dd, SPICO_FABRIC);
do {
ret = load_fabric_serdes_firmware(dd, &fw_fabric);
} while (retry_firmware(dd, ret));
clear_sbus_fast_mode(dd);
- release_hw_mutex(dd);
+ release_chip_resource(dd, CR_SBUS);
if (ret)
return ret;
}
@@ -1419,18 +1665,57 @@ int hfi1_firmware_init(struct hfi1_devdata *dd)
return obtain_firmware(dd);
}
+/*
+ * This function is a helper function for parse_platform_config(...) and
+ * does not check for validity of the platform configuration cache
+ * (because we know it is invalid as we are building up the cache).
+ * As such, this should not be called from anywhere other than
+ * parse_platform_config
+ */
+static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
+{
+ u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
+ struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
+
+ if (!system_table)
+ return -EINVAL;
+
+ meta_ver_meta =
+ *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
+ + SYSTEM_TABLE_META_VERSION);
+
+ mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
+ ver_start = meta_ver_meta & mask;
+
+ meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
+
+ mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
+ ver_len = meta_ver_meta & mask;
+
+ ver_start /= 8;
+ meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
+
+ if (meta_ver < 5) {
+ dd_dev_info(
+ dd, "%s:Please update platform config\n", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
int parse_platform_config(struct hfi1_devdata *dd)
{
struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
u32 *ptr = NULL;
- u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0;
+ u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
+ int ret = -EINVAL; /* assume failure */
- if (platform_config == NULL) {
+ if (!dd->platform_config.data) {
dd_dev_info(dd, "%s: Missing config file\n", __func__);
goto bail;
}
- ptr = (u32 *)platform_config->data;
+ ptr = (u32 *)dd->platform_config.data;
magic_num = *ptr;
ptr++;
@@ -1439,12 +1724,32 @@ int parse_platform_config(struct hfi1_devdata *dd)
goto bail;
}
- while (ptr < (u32 *)(platform_config->data + platform_config->size)) {
+ /* Field is file size in DWORDs */
+ file_length = (*ptr) * 4;
+ ptr++;
+
+ if (file_length > dd->platform_config.size) {
+ dd_dev_info(dd, "%s:File claims to be larger than read size\n",
+ __func__);
+ goto bail;
+ } else if (file_length < dd->platform_config.size) {
+ dd_dev_info(dd,
+ "%s:File claims to be smaller than read size, continuing\n",
+ __func__);
+ }
+ /* exactly equal, perfection */
+
+ /*
+ * In both cases where we proceed, using the self-reported file length
+ * is the safer option
+ */
+ while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
header1 = *ptr;
header2 = *(ptr + 1);
if (header1 != ~header2) {
dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
- __func__, (ptr - (u32 *)platform_config->data));
+ __func__, (ptr - (u32 *)
+ dd->platform_config.data));
goto bail;
}
@@ -1467,6 +1772,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
case PLATFORM_CONFIG_SYSTEM_TABLE:
pcfgcache->config_tables[table_type].num_table =
1;
+ ret = check_meta_version(dd, ptr);
+ if (ret)
+ goto bail;
break;
case PLATFORM_CONFIG_PORT_TABLE:
pcfgcache->config_tables[table_type].num_table =
@@ -1484,9 +1792,10 @@ int parse_platform_config(struct hfi1_devdata *dd)
break;
default:
dd_dev_info(dd,
- "%s: Unknown data table %d, offset %ld\n",
- __func__, table_type,
- (ptr - (u32 *)platform_config->data));
+ "%s: Unknown data table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr - (u32 *)
+ dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table = ptr;
@@ -1507,9 +1816,10 @@ int parse_platform_config(struct hfi1_devdata *dd)
break;
default:
dd_dev_info(dd,
- "%s: Unknown metadata table %d, offset %ld\n",
- __func__, table_type,
- (ptr - (u32 *)platform_config->data));
+ "%s: Unknown meta table %d, offset %ld\n",
+ __func__, table_type,
+ (ptr -
+ (u32 *)dd->platform_config.data));
goto bail; /* We don't trust this file now */
}
pcfgcache->config_tables[table_type].table_metadata =
@@ -1518,14 +1828,16 @@ int parse_platform_config(struct hfi1_devdata *dd)
/* Calculate and check table crc */
crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
- (table_length_dwords * 4));
+ (table_length_dwords * 4));
crc ^= ~(u32)0;
/* Jump the table */
ptr += table_length_dwords;
if (crc != *ptr) {
dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
- __func__, (ptr - (u32 *)platform_config->data));
+ __func__, (ptr -
+ (u32 *)
+ dd->platform_config.data));
goto bail;
}
/* Jump the CRC DWORD */
@@ -1536,11 +1848,12 @@ int parse_platform_config(struct hfi1_devdata *dd)
return 0;
bail:
memset(pcfgcache, 0, sizeof(struct platform_config_cache));
- return -EINVAL;
+ return ret;
}
static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
- int field, u32 *field_len_bits, u32 *field_start_bits)
+ int field, u32 *field_len_bits,
+ u32 *field_start_bits)
{
struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
u32 *src_ptr = NULL;
@@ -1600,8 +1913,9 @@ static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
* @len: length of memory pointed by @data in bytes.
*/
int get_platform_config_field(struct hfi1_devdata *dd,
- enum platform_config_table_type_encoding table_type,
- int table_index, int field_index, u32 *data, u32 len)
+ enum platform_config_table_type_encoding
+ table_type, int table_index, int field_index,
+ u32 *data, u32 len)
{
int ret = 0, wlen = 0, seek = 0;
u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
@@ -1613,7 +1927,8 @@ int get_platform_config_field(struct hfi1_devdata *dd,
return -EINVAL;
ret = get_platform_fw_field_metadata(dd, table_type, field_index,
- &field_len_bits, &field_start_bits);
+ &field_len_bits,
+ &field_start_bits);
if (ret)
return -EINVAL;
@@ -1629,19 +1944,21 @@ int get_platform_config_field(struct hfi1_devdata *dd,
if (len < field_len_bits)
return -EINVAL;
- seek = field_start_bits/8;
- wlen = field_len_bits/8;
+ seek = field_start_bits / 8;
+ wlen = field_len_bits / 8;
src_ptr = (u32 *)((u8 *)src_ptr + seek);
- /* We expect the field to be byte aligned and whole byte
- * lengths if we are here */
+ /*
+ * We expect the field to be byte aligned and whole byte
+ * lengths if we are here
+ */
memcpy(data, src_ptr, wlen);
return 0;
}
break;
case PLATFORM_CONFIG_PORT_TABLE:
- /* Port table is 4 DWORDS in META_VERSION 0 */
+ /* Port table is 4 DWORDS */
src_ptr = dd->hfi1_id ?
pcfgcache->config_tables[table_type].table + 4 :
pcfgcache->config_tables[table_type].table;
@@ -1669,7 +1986,7 @@ int get_platform_config_field(struct hfi1_devdata *dd,
if (!src_ptr || len < field_len_bits)
return -EINVAL;
- src_ptr += (field_start_bits/32);
+ src_ptr += (field_start_bits / 32);
*data = (*src_ptr >> (field_start_bits % 32)) &
((1 << field_len_bits) - 1);
@@ -1680,7 +1997,7 @@ int get_platform_config_field(struct hfi1_devdata *dd,
* Download the firmware needed for the Gen3 PCIe SerDes. An update
* to the SBus firmware is needed before updating the PCIe firmware.
*
- * Note: caller must be holding the HW mutex.
+ * Note: caller must be holding the SBus resource.
*/
int load_pcie_firmware(struct hfi1_devdata *dd)
{
@@ -1701,9 +2018,9 @@ int load_pcie_firmware(struct hfi1_devdata *dd)
if (fw_pcie_serdes_load) {
dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
- pcie_serdes_broadcast[dd->hfi1_id],
- pcie_serdes_addrs[dd->hfi1_id],
- NUM_PCIE_SERDES);
+ pcie_serdes_broadcast[dd->hfi1_id],
+ pcie_serdes_addrs[dd->hfi1_id],
+ NUM_PCIE_SERDES);
do {
ret = load_pcie_serdes_firmware(dd, &fw_pcie);
} while (retry_firmware(dd, ret));
@@ -1724,9 +2041,9 @@ void read_guid(struct hfi1_devdata *dd)
{
/* Take the DC out of reset to get a valid GUID value */
write_csr(dd, CCE_DC_CTRL, 0);
- (void) read_csr(dd, CCE_DC_CTRL);
+ (void)read_csr(dd, CCE_DC_CTRL);
dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
dd_dev_info(dd, "GUID %llx",
- (unsigned long long)dd->base_guid);
+ (unsigned long long)dd->base_guid);
}
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h
index 2611bb2e764d..16cbdc4073e0 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/staging/rdma/hfi1/hfi.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_KERNEL_H
#define _HFI1_KERNEL_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -65,6 +62,7 @@
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <rdma/rdma_vt.h>
#include "chip_registers.h"
#include "common.h"
@@ -73,7 +71,8 @@
#include "chip.h"
#include "mad.h"
#include "qsfp.h"
-#include "platform_config.h"
+#include "platform.h"
+#include "affinity.h"
/* bumped 1 from s/w major version of TrueScale */
#define HFI1_CHIP_VERS_MAJ 3U
@@ -98,6 +97,8 @@ extern unsigned long hfi1_cap_mask;
#define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
#define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
HFI1_CAP_MISC_MASK)
+/* Offline Disabled Reason is 4-bits */
+#define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
/*
* Control context is always 0 and handles the error packets.
@@ -177,6 +178,11 @@ struct ctxt_eager_bufs {
} *rcvtids;
};
+struct exp_tid_set {
+ struct list_head list;
+ u32 count;
+};
+
struct hfi1_ctxtdata {
/* shadow the ctxt's RcvCtrl register */
u64 rcvctrl;
@@ -233,20 +239,13 @@ struct hfi1_ctxtdata {
u32 expected_count;
/* index of first expected TID entry. */
u32 expected_base;
- /* cursor into the exp group sets */
- atomic_t tidcursor;
- /* number of exp TID groups assigned to the ctxt */
- u16 numtidgroups;
- /* size of exp TID group fields in tidusemap */
- u16 tidmapcnt;
- /* exp TID group usage bitfield array */
- unsigned long *tidusemap;
- /* pinned pages for exp sends, allocated at open */
- struct page **tid_pg_list;
- /* dma handles for exp tid pages */
- dma_addr_t *physshadow;
+
+ struct exp_tid_set tid_group_list;
+ struct exp_tid_set tid_used_list;
+ struct exp_tid_set tid_full_list;
+
/* lock protecting all Expected TID data */
- spinlock_t exp_lock;
+ struct mutex exp_lock;
/* number of pio bufs for this ctxt (all procs, if shared) */
u32 piocnt;
/* first pio buffer for this ctxt */
@@ -311,8 +310,24 @@ struct hfi1_ctxtdata {
*/
struct task_struct *progress;
struct list_head sdma_queues;
+ /* protect sdma queues */
spinlock_t sdma_qlock;
+ /* Is ASPM interrupt supported for this context */
+ bool aspm_intr_supported;
+ /* ASPM state (enabled/disabled) for this context */
+ bool aspm_enabled;
+ /* Timer for re-enabling ASPM if interrupt activity quietens down */
+ struct timer_list aspm_timer;
+ /* Lock to serialize between intr, timer intr and user threads */
+ spinlock_t aspm_lock;
+ /* Is ASPM processing enabled for this context (in intr context) */
+ bool aspm_intr_enable;
+ /* Last interrupt timestamp */
+ ktime_t aspm_ts_last_intr;
+ /* Last timestamp at which we scheduled a timer for this context */
+ ktime_t aspm_ts_timer_sched;
+
/*
* The interrupt handler for a particular receive context can vary
* throughout it's lifetime. This is not a lock protected data member so
@@ -335,7 +350,7 @@ struct hfi1_packet {
void *hdr;
struct hfi1_ctxtdata *rcd;
__le32 *rhf_addr;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
struct hfi1_other_headers *ohdr;
u64 rhf;
u32 maxcnt;
@@ -363,6 +378,7 @@ struct hfi1_snoop_data {
int mode_flag;
struct cdev cdev;
struct device *class_dev;
+ /* protect snoop data */
spinlock_t snoop_lock;
struct list_head queue;
wait_queue_head_t waitq;
@@ -375,7 +391,7 @@ struct hfi1_snoop_data {
#define HFI1_PORT_SNOOP_MODE 1U
#define HFI1_PORT_CAPTURE_MODE 2U
-struct hfi1_sge_state;
+struct rvt_sge_state;
/*
* Get/Set IB link-level config parameters for f_get/set_ib_cfg()
@@ -424,17 +440,17 @@ struct hfi1_sge_state;
#define __HLS_GOING_OFFLINE_BP 9
#define __HLS_LINK_COOLDOWN_BP 10
-#define HLS_UP_INIT (1 << __HLS_UP_INIT_BP)
-#define HLS_UP_ARMED (1 << __HLS_UP_ARMED_BP)
-#define HLS_UP_ACTIVE (1 << __HLS_UP_ACTIVE_BP)
-#define HLS_DN_DOWNDEF (1 << __HLS_DN_DOWNDEF_BP) /* link down default */
-#define HLS_DN_POLL (1 << __HLS_DN_POLL_BP)
-#define HLS_DN_DISABLE (1 << __HLS_DN_DISABLE_BP)
-#define HLS_DN_OFFLINE (1 << __HLS_DN_OFFLINE_BP)
-#define HLS_VERIFY_CAP (1 << __HLS_VERIFY_CAP_BP)
-#define HLS_GOING_UP (1 << __HLS_GOING_UP_BP)
-#define HLS_GOING_OFFLINE (1 << __HLS_GOING_OFFLINE_BP)
-#define HLS_LINK_COOLDOWN (1 << __HLS_LINK_COOLDOWN_BP)
+#define HLS_UP_INIT BIT(__HLS_UP_INIT_BP)
+#define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP)
+#define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP)
+#define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
+#define HLS_DN_POLL BIT(__HLS_DN_POLL_BP)
+#define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP)
+#define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP)
+#define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP)
+#define HLS_GOING_UP BIT(__HLS_GOING_UP_BP)
+#define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
+#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
@@ -490,6 +506,7 @@ struct hfi1_sge_state;
#define CNTR_DISABLED 0x2 /* Disable this counter */
#define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */
#define CNTR_VL 0x8 /* Per VL counter */
+#define CNTR_SDMA 0x10
#define CNTR_INVALID_VL -1 /* Specifies invalid VL */
#define CNTR_MODE_W 0x0
#define CNTR_MODE_R 0x1
@@ -512,10 +529,11 @@ static inline void incr_cntr32(u32 *cntr)
#define MAX_NAME_SIZE 64
struct hfi1_msix_entry {
+ enum irq_type type;
struct msix_entry msix;
void *arg;
char name[MAX_NAME_SIZE];
- cpumask_var_t mask;
+ cpumask_t mask;
};
/* per-SL CCA information */
@@ -542,6 +560,7 @@ enum {
};
struct vl_arb_cache {
+ /* protect vl arb cache */
spinlock_t lock;
struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
};
@@ -561,7 +580,8 @@ struct hfi1_pportdata {
struct kobject sl2sc_kobj;
struct kobject vl2mtu_kobj;
- /* QSFP support */
+ /* PHY support */
+ u32 port_type;
struct qsfp_data qsfp_info;
/* GUID for this interface, in host order */
@@ -586,6 +606,7 @@ struct hfi1_pportdata {
struct work_struct link_vc_work;
struct work_struct link_up_work;
struct work_struct link_down_work;
+ struct work_struct dc_host_req_work;
struct work_struct sma_message_work;
struct work_struct freeze_work;
struct work_struct link_downgrade_work;
@@ -623,6 +644,7 @@ struct hfi1_pportdata {
u16 link_speed_active;
u8 vls_supported;
u8 vls_operational;
+ u8 actual_vls_operational;
/* LID mask control */
u8 lmc;
/* Rx Polarity inversion (compensate for ~tx on partner) */
@@ -642,19 +664,23 @@ struct hfi1_pportdata {
u8 link_enabled; /* link enabled? */
u8 linkinit_reason;
u8 local_tx_rate; /* rate given to 8051 firmware */
+ u8 last_pstate; /* info only */
/* placeholders for IB MAD packet settings */
u8 overrun_threshold;
u8 phy_error_threshold;
- /* used to override LED behavior */
- u8 led_override; /* Substituted for normal value, if non-zero */
- u16 led_override_timeoff; /* delta to next timer event */
- u8 led_override_vals[2]; /* Alternates per blink-frame */
- u8 led_override_phase; /* Just counts, LSB picks from vals[] */
+ /* Used to override LED behavior for things like maintenance beaconing*/
+ /*
+ * Alternates per phase of blink
+ * [0] holds LED off duration, [1] holds LED on duration
+ */
+ unsigned long led_override_vals[2];
+ u8 led_override_phase; /* LSB picks from vals[] */
atomic_t led_override_timer_active;
/* Used to flash LEDs in override mode */
struct timer_list led_override_timer;
+
u32 sm_trap_qp;
u32 sa_qp;
@@ -689,10 +715,12 @@ struct hfi1_pportdata {
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
- /* begin congestion log related entries
- * cc_log_lock protects all congestion log related data */
+ /*
+ * begin congestion log related entries
+ * cc_log_lock protects all congestion log related data
+ */
spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
- u8 threshold_cong_event_map[OPA_MAX_SLS/8];
+ u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
u16 threshold_event_counter;
struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
int cc_log_idx; /* index for logging events */
@@ -705,8 +733,9 @@ struct hfi1_pportdata {
u64 *cntrs;
/* port relative synthetic counter buffer */
u64 *scntrs;
- /* we synthesize port_xmit_discards from several egress errors */
+ /* port_xmit_discards are synthesized from different egress errors */
u64 port_xmit_discards;
+ u64 port_xmit_discards_vl[C_VL_COUNT];
u64 port_xmit_constraint_errors;
u64 port_rcv_constraint_errors;
/* count of 'link_err' interrupts from DC */
@@ -728,6 +757,9 @@ struct hfi1_pportdata {
u8 remote_link_down_reason;
/* Error events that will cause a port bounce. */
u32 port_error_action;
+ struct work_struct linkstate_active_work;
+ /* Does this port need to prescan for FECNs */
+ bool cc_prescan;
};
typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
@@ -773,6 +805,12 @@ struct hfi1_temp {
u8 triggers; /* temperature triggers */
};
+/* common data between shared ASIC HFIs */
+struct hfi1_asic_data {
+ struct hfi1_devdata *dds[2]; /* back pointers */
+ struct mutex asic_resource_mutex;
+};
+
/* device data struct now contains only "general per-device" info.
* fields related to a physical IB port are in a hfi1_pportdata struct.
*/
@@ -782,6 +820,7 @@ struct sdma_vl_map;
#define BOARD_VERS_MAX 96 /* how long the version string can be */
#define SERIAL_MAX 16 /* length of the serial number */
+typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
struct hfi1_devdata {
struct hfi1_ibdev verbs_dev; /* must be first */
struct list_head list;
@@ -811,6 +850,12 @@ struct hfi1_devdata {
spinlock_t sc_lock;
/* Per VL data. Enough for all VLs but not all elements are set/used. */
struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
+ /* lock for pio_map */
+ spinlock_t pio_map_lock;
+ /* array of kernel send contexts */
+ struct send_context **kernel_send_context;
+ /* array of vl maps */
+ struct pio_vl_map __rcu *pio_map;
/* seqlock for sc2vl */
seqlock_t sc2vl_lock;
u64 sc2vl[4];
@@ -841,6 +886,8 @@ struct hfi1_devdata {
wait_queue_head_t sdma_unfreeze_wq;
atomic_t sdma_unfreeze_count;
+ /* common data between shared ASIC HFIs in this OS */
+ struct hfi1_asic_data *asic_data;
/* hfi1_pportdata, points to array of (physical) port-specific
* data structs, indexed by pidx (0..n-1)
@@ -873,10 +920,11 @@ struct hfi1_devdata {
/* reset value */
u64 z_int_counter;
u64 z_rcv_limit;
+ u64 z_send_schedule;
/* percpu int_counter */
u64 __percpu *int_counter;
u64 __percpu *rcv_limit;
-
+ u64 __percpu *send_schedule;
/* number of receive contexts in use by the driver */
u32 num_rcv_contexts;
/* number of pio send contexts in use by the driver */
@@ -885,6 +933,8 @@ struct hfi1_devdata {
* number of ctxts available for PSM open
*/
u32 freectxts;
+ /* total number of available user/PSM contexts */
+ u32 num_user_contexts;
/* base receive interrupt timeout, in CSR units */
u32 rcv_intr_timeout_csr;
@@ -996,9 +1046,8 @@ struct hfi1_devdata {
u16 irev; /* implementation revision */
u16 dc8051_ver; /* 8051 firmware version */
+ struct platform_config platform_config;
struct platform_config_cache pcfg_cache;
- /* control high-level access to qsfp */
- struct mutex qsfp_i2c_mutex;
struct diag_client *diag_client;
spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
@@ -1008,8 +1057,6 @@ struct hfi1_devdata {
u16 psxmitwait_check_rate;
/* high volume overflow errors deferred to tasklet */
struct tasklet_struct error_tasklet;
- /* per device cq worker */
- struct kthread_worker *worker;
/* MSI-X information */
struct hfi1_msix_entry *msix_entries;
@@ -1090,10 +1137,8 @@ struct hfi1_devdata {
* Handlers for outgoing data so that snoop/capture does not
* have to have its hooks in the send path
*/
- int (*process_pio_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
- int (*process_dma_send)(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc);
+ send_routine process_pio_send;
+ send_routine process_dma_send;
void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
@@ -1105,7 +1150,6 @@ struct hfi1_devdata {
struct timer_list rcverr_timer;
u32 rcv_ovfl_cnt;
- int assigned_node_id;
wait_queue_head_t event_queue;
/* Save the enabled LCB error bits */
@@ -1115,6 +1159,16 @@ struct hfi1_devdata {
/* receive context tail dummy address */
__le64 *rcvhdrtail_dummy_kvaddr;
dma_addr_t rcvhdrtail_dummy_physaddr;
+
+ bool eprom_available; /* true if EPROM is available for this device */
+ bool aspm_supported; /* Does HW support ASPM */
+ bool aspm_enabled; /* ASPM state: enabled/disabled */
+ /* Serialize ASPM enable/disable between multiple verbs contexts */
+ spinlock_t aspm_lock;
+ /* Number of verbs contexts which have disabled ASPM */
+ atomic_t aspm_disabled_cnt;
+
+ struct hfi1_affinity *affinity;
};
/* 8051 firmware version helper */
@@ -1125,6 +1179,9 @@ struct hfi1_devdata {
#define PT_EAGER 1
#define PT_INVALID 2
+struct tid_rb_node;
+struct mmu_rb_node;
+
/* Private data for file operations */
struct hfi1_filedata {
struct hfi1_ctxtdata *uctxt;
@@ -1133,6 +1190,16 @@ struct hfi1_filedata {
struct hfi1_user_sdma_pkt_q *pq;
/* for cpu affinity; -1 if none */
int rec_cpu_num;
+ u32 tid_n_pinned;
+ struct rb_root tid_rb_root;
+ struct tid_rb_node **entry_to_rb;
+ spinlock_t tid_lock; /* protect tid_[limit,used] counters */
+ u32 tid_limit;
+ u32 tid_used;
+ u32 *invalid_tids;
+ u32 invalid_tid_idx;
+ /* protect invalid_tids array and invalid_tid_idx */
+ spinlock_t invalid_lock;
};
extern struct list_head hfi1_dev_list;
@@ -1156,7 +1223,7 @@ void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
int hfi1_create_rcvhdrq(struct hfi1_devdata *, struct hfi1_ctxtdata *);
int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *);
int hfi1_create_ctxts(struct hfi1_devdata *dd);
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32);
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *, u32, int);
void hfi1_init_pportdata(struct pci_dev *, struct hfi1_pportdata *,
struct hfi1_devdata *, u8, u8);
void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *);
@@ -1164,6 +1231,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *, struct hfi1_ctxtdata *);
int handle_receive_interrupt(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
+void set_all_slowpath(struct hfi1_devdata *dd);
/* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */
@@ -1184,6 +1252,15 @@ static inline u32 driver_lstate(struct hfi1_pportdata *ppd)
return ppd->lstate; /* use the cached value */
}
+void receive_interrupt_work(struct work_struct *work);
+
+/* extract service channel from header and rhf */
+static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
+{
+ return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
+ ((!!(rhf & RHF_DC_INFO_MASK)) << 4);
+}
+
static inline u16 generate_jkey(kuid_t uid)
{
return from_kuid(current_user_ns(), uid) & 0xffff;
@@ -1253,7 +1330,7 @@ static inline u32 egress_cycles(u32 len, u32 rate)
void set_link_ipg(struct hfi1_pportdata *ppd);
void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
u32 rqpn, u8 svc_type);
-void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
+void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh);
@@ -1424,6 +1501,7 @@ static inline int valid_ib_mtu(unsigned int mtu)
mtu == 1024 || mtu == 2048 ||
mtu == 4096;
}
+
static inline int valid_opa_max_mtu(unsigned int mtu)
{
return mtu >= 2048 &&
@@ -1445,12 +1523,13 @@ void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
int snoop_recv_handler(struct hfi1_packet *packet);
-int snoop_send_dma_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-int snoop_send_pio_handler(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
u64 pbc, const void *from, size_t count);
+int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
{
@@ -1472,6 +1551,11 @@ static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
return container_of(ibp, struct hfi1_pportdata, ibport_data);
}
+static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
+{
+ return container_of(rdi, struct hfi1_ibdev, rdi);
+}
+
static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
@@ -1515,12 +1599,10 @@ static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
#define HFI1_HAS_SDMA_TIMEOUT 0x8
#define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
#define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
-#define HFI1_DO_INIT_ASIC 0x100 /* This device will init the ASIC */
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
#define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
-
/* ctxt_flag bit offsets */
/* context has been setup */
#define HFI1_CTXT_SETUP_DONE 1
@@ -1538,14 +1620,10 @@ void hfi1_free_devdata(struct hfi1_devdata *);
void cc_state_reclaim(struct rcu_head *rcu);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define HFI1_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define HFI1_LED_LOG 2 /* Logical (link) YELLOW LED */
-void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val);
+/* LED beaconing functions */
+void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
+ unsigned int timeoff);
+void shutdown_led_override(struct hfi1_pportdata *ppd);
#define HFI1_CREDIT_RETURN_RATE (100)
@@ -1587,12 +1665,13 @@ void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val);
*/
#define DEFAULT_RCVHDR_ENTSIZE 32
+bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
-void hfi1_release_user_pages(struct page **, size_t, bool);
+void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
- *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
+ *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL;
}
static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
@@ -1601,7 +1680,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
* volatile because it's a DMA target from the chip, routine is
* inlined, and don't want register caching or reordering.
*/
- return (u32) le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
+ return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr);
}
/*
@@ -1633,12 +1712,13 @@ void restore_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
- enum platform_config_table_type_encoding table_type,
- int table_index, int field_index, u32 *data, u32 len);
+ enum platform_config_table_type_encoding
+ table_type, int table_index, int field_index,
+ u32 *data, u32 len);
-dma_addr_t hfi1_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
const char *get_unit_name(int unit);
+const char *get_card_name(struct rvt_dev_info *rdi);
+struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
/*
* Flush write combining store buffers (if present) and perform a write
@@ -1659,7 +1739,7 @@ int process_receive_invalid(struct hfi1_packet *packet);
extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
-void update_sge(struct hfi1_sge_state *ss, u32 length);
+void update_sge(struct rvt_sge_state *ss, u32 length);
/* global module parameter variables */
extern unsigned int hfi1_max_mtu;
@@ -1667,7 +1747,7 @@ extern unsigned int hfi1_cu;
extern unsigned int user_credit_return_threshold;
extern int num_user_contexts;
extern unsigned n_krcvqs;
-extern u8 krcvqs[];
+extern uint krcvqs[];
extern int krcvqsset;
extern uint kdeth_qp;
extern uint loopback;
@@ -1730,7 +1810,7 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
if (is_ax(dd))
- /* turn off send-side job key checks - A0 erratum */
+ /* turn off send-side job key checks - A0 */
return base_sc_integrity &
~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
return base_sc_integrity;
@@ -1757,7 +1837,7 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
| SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
if (is_ax(dd))
- /* turn off send-side job key checks - A0 erratum */
+ /* turn off send-side job key checks - A0 */
return base_sdma_integrity &
~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
return base_sdma_integrity;
@@ -1794,6 +1874,10 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
get_unit_name((dd)->unit), ##__VA_ARGS__)
+#define dd_dev_dbg(dd, fmt, ...) \
+ dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
+ get_unit_name((dd)->unit), ##__VA_ARGS__)
+
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
get_unit_name((dd)->unit), (dd)->unit, (port), \
@@ -1825,13 +1909,14 @@ static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
dd->z_int_counter = get_all_cpu_total(dd->int_counter);
dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
+ dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
ppd = (struct hfi1_pportdata *)(dd + 1);
for (i = 0; i < dd->num_pports; i++, ppd++) {
- ppd->ibport_data.z_rc_acks =
- get_all_cpu_total(ppd->ibport_data.rc_acks);
- ppd->ibport_data.z_rc_qacks =
- get_all_cpu_total(ppd->ibport_data.rc_qacks);
+ ppd->ibport_data.rvp.z_rc_acks =
+ get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
+ ppd->ibport_data.rvp.z_rc_qacks =
+ get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
}
}
@@ -1844,6 +1929,18 @@ static inline void setextled(struct hfi1_devdata *dd, u32 on)
write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
}
+/* return the i2c resource given the target */
+static inline u32 i2c_target(u32 target)
+{
+ return target ? CR_I2C2 : CR_I2C1;
+}
+
+/* return the i2c chain chip resource that this HFI uses for QSFP */
+static inline u32 qsfp_resource(struct hfi1_devdata *dd)
+{
+ return i2c_target(dd->hfi1_id);
+}
+
int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
#endif /* _HFI1_KERNEL_H */
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/staging/rdma/hfi1/init.c
index 4dd8051aba7e..cfcdc16b41c3 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/staging/rdma/hfi1/init.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -56,6 +53,7 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
+#include <rdma/rdma_vt.h>
#include "hfi.h"
#include "device.h"
@@ -65,6 +63,7 @@
#include "sdma.h"
#include "debugfs.h"
#include "verbs.h"
+#include "aspm.h"
#undef pr_fmt
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
@@ -75,6 +74,7 @@
#define HFI1_MIN_USER_CTXT_BUFCNT 7
#define HFI1_MIN_HDRQ_EGRBUF_CNT 2
+#define HFI1_MAX_HDRQ_EGRBUF_CNT 16352
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
@@ -87,9 +87,9 @@ module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
MODULE_PARM_DESC(
num_user_contexts, "Set max number of user contexts to use");
-u8 krcvqs[RXE_NUM_DATA_VL];
+uint krcvqs[RXE_NUM_DATA_VL];
int krcvqsset;
-module_param_array(krcvqs, byte, &krcvqsset, S_IRUGO);
+module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
/* computed based on above array */
@@ -128,16 +128,12 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
{
unsigned i;
int ret;
- int local_node_id = pcibus_to_node(dd->pcidev->bus);
/* Control context has to be always 0 */
BUILD_BUG_ON(HFI1_CTRL_CTXT != 0);
- if (local_node_id < 0)
- local_node_id = numa_node_id();
- dd->assigned_node_id = local_node_id;
-
- dd->rcd = kcalloc(dd->num_rcv_contexts, sizeof(*dd->rcd), GFP_KERNEL);
+ dd->rcd = kzalloc_node(dd->num_rcv_contexts * sizeof(*dd->rcd),
+ GFP_KERNEL, dd->node);
if (!dd->rcd)
goto nomem;
@@ -147,10 +143,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
struct hfi1_ctxtdata *rcd;
ppd = dd->pport + (i % dd->num_pports);
- rcd = hfi1_create_ctxtdata(ppd, i);
+ rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
if (!rcd) {
dd_dev_err(dd,
- "Unable to allocate kernel receive context, failing\n");
+ "Unable to allocate kernel receive context, failing\n");
goto nomem;
}
/*
@@ -171,7 +167,7 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
rcd->sc = sc_alloc(dd, SC_ACK, rcd->rcvhdrqentsize, dd->node);
if (!rcd->sc) {
dd_dev_err(dd,
- "Unable to allocate kernel send context, failing\n");
+ "Unable to allocate kernel send context, failing\n");
dd->rcd[rcd->ctxt] = NULL;
hfi1_free_ctxtdata(dd, rcd);
goto nomem;
@@ -189,6 +185,12 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
}
}
+ /*
+ * Initialize aspm, to be done after gen3 transition and setting up
+ * contexts and before enabling interrupts
+ */
+ aspm_init(dd);
+
return 0;
nomem:
ret = -ENOMEM;
@@ -201,7 +203,8 @@ bail:
/*
* Common code for user and kernel context setup.
*/
-struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
+struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
+ int numa)
{
struct hfi1_devdata *dd = ppd->dd;
struct hfi1_ctxtdata *rcd;
@@ -224,10 +227,10 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
rcd->cnt = 1;
rcd->ctxt = ctxt;
dd->rcd[ctxt] = rcd;
- rcd->numa_id = numa_node_id();
+ rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
- spin_lock_init(&rcd->exp_lock);
+ mutex_init(&rcd->exp_lock);
/*
* Calculate the context's RcvArray entry starting point.
@@ -260,7 +263,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
/* Validate and initialize Rcv Hdr Q variables */
if (rcvhdrcnt % HDRQ_INCREMENT) {
dd_dev_err(dd,
- "ctxt%u: header queue count %d must be divisible by %d\n",
+ "ctxt%u: header queue count %d must be divisible by %lu\n",
rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
goto bail;
}
@@ -332,7 +335,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt)
}
return rcd;
bail:
- kfree(rcd->opstats);
kfree(rcd->egrbufs.rcvtids);
kfree(rcd->egrbufs.buffers);
kfree(rcd);
@@ -380,7 +382,7 @@ void set_link_ipg(struct hfi1_pportdata *ppd)
cc_state = get_cc_state(ppd);
- if (cc_state == NULL)
+ if (!cc_state)
/*
* This should _never_ happen - rcu_read_lock() is held,
* and set_link_ipg() should not be called if cc_state
@@ -432,7 +434,7 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
cc_state = get_cc_state(ppd);
- if (cc_state == NULL) {
+ if (!cc_state) {
rcu_read_unlock();
return HRTIMER_NORESTART;
}
@@ -494,14 +496,19 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
INIT_WORK(&ppd->link_up_work, handle_link_up);
INIT_WORK(&ppd->link_down_work, handle_link_down);
+ INIT_WORK(&ppd->dc_host_req_work, handle_8051_request);
INIT_WORK(&ppd->freeze_work, handle_freeze);
INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
INIT_WORK(&ppd->sma_message_work, handle_sma_message);
INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+ INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
+ INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
+
mutex_init(&ppd->hls_lock);
spin_lock_init(&ppd->sdma_alllock);
spin_lock_init(&ppd->qsfp_info.qsfp_lock);
+ ppd->qsfp_info.ppd = ppd;
ppd->sm_trap_qp = 0x0;
ppd->sa_qp = 0x1;
@@ -583,8 +590,8 @@ static void enable_chip(struct hfi1_devdata *dd)
* Enable kernel ctxts' receive and receive interrupt.
* Other ctxts done as user opens and initializes them.
*/
- rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
for (i = 0; i < dd->first_user_ctxt; ++i) {
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB;
rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
if (!HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, MULTI_PKT_EGR))
@@ -730,14 +737,14 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_setup_eagerbufs(rcd);
if (lastfail)
dd_dev_err(dd,
- "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+ "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
}
if (lastfail)
ret = lastfail;
/* Allocate enough memory for user event notification. */
- len = ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
- sizeof(*dd->events), PAGE_SIZE);
+ len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
+ sizeof(*dd->events));
dd->events = vmalloc_user(len);
if (!dd->events)
dd_dev_err(dd, "Failed to allocate user events page\n");
@@ -763,7 +770,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
/* enable chip even if we have an error, so we can debug cause */
enable_chip(dd);
- ret = hfi1_cq_init(dd);
done:
/*
* Set status even if port serdes is not initialized
@@ -780,20 +786,15 @@ done:
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
- /* initialize the qsfp if it exists
- * Requires interrupts to be enabled so we are notified
- * when the QSFP completes reset, and has
- * to be done before bringing up the SERDES
+ /*
+ * start the serdes - must be after interrupts are
+ * enabled so we are notified when the link goes up
*/
- init_qsfp(ppd);
-
- /* start the serdes - must be after interrupts are
- enabled so we are notified when the link goes up */
lastfail = bringup_serdes(ppd);
if (lastfail)
dd_dev_info(dd,
- "Failed to bring up port %u\n",
- ppd->port);
+ "Failed to bring up port %u\n",
+ ppd->port);
/*
* Set status even if port serdes is not initialized
@@ -905,6 +906,8 @@ static void shutdown_device(struct hfi1_devdata *dd)
/* disable the send device */
pio_send_control(dd, PSC_GLOBAL_DISABLE);
+ shutdown_led_override(ppd);
+
/*
* Clear SerdesEnable.
* We can't count on interrupts since we are stopping.
@@ -962,17 +965,33 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
kfree(rcd->egrbufs.buffers);
sc_free(rcd->sc);
- vfree(rcd->physshadow);
- vfree(rcd->tid_pg_list);
vfree(rcd->user_event_mask);
vfree(rcd->subctxt_uregbase);
vfree(rcd->subctxt_rcvegrbuf);
vfree(rcd->subctxt_rcvhdr_base);
- kfree(rcd->tidusemap);
kfree(rcd->opstats);
kfree(rcd);
}
+/*
+ * Release our hold on the shared asic data. If we are the last one,
+ * free the structure. Must be holding hfi1_devs_lock.
+ */
+static void release_asic_data(struct hfi1_devdata *dd)
+{
+ int other;
+
+ if (!dd->asic_data)
+ return;
+ dd->asic_data->dds[dd->hfi1_id] = NULL;
+ other = dd->hfi1_id ? 0 : 1;
+ if (!dd->asic_data->dds[other]) {
+ /* we are the last holder, free it */
+ kfree(dd->asic_data);
+ }
+ dd->asic_data = NULL;
+}
+
void hfi1_free_devdata(struct hfi1_devdata *dd)
{
unsigned long flags;
@@ -980,12 +999,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
spin_lock_irqsave(&hfi1_devs_lock, flags);
idr_remove(&hfi1_unit_table, dd->unit);
list_del(&dd->list);
+ release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
- hfi1_dbg_ibdev_exit(&dd->verbs_dev);
+ free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */
free_percpu(dd->int_counter);
free_percpu(dd->rcv_limit);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ hfi1_dev_affinity_free(dd);
+ free_percpu(dd->send_schedule);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
}
/*
@@ -1000,19 +1022,19 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
{
unsigned long flags;
struct hfi1_devdata *dd;
- int ret;
+ int ret, nports;
+
+ /* extra is * number of ports */
+ nports = extra / sizeof(struct hfi1_pportdata);
- dd = (struct hfi1_devdata *)ib_alloc_device(sizeof(*dd) + extra);
+ dd = (struct hfi1_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
+ nports);
if (!dd)
return ERR_PTR(-ENOMEM);
- /* extra is * number of ports */
- dd->num_pports = extra / sizeof(struct hfi1_pportdata);
+ dd->num_pports = nports;
dd->pport = (struct hfi1_pportdata *)(dd + 1);
INIT_LIST_HEAD(&dd->list);
- dd->node = dev_to_node(&pdev->dev);
- if (dd->node < 0)
- dd->node = 0;
idr_preload(GFP_KERNEL);
spin_lock_irqsave(&hfi1_devs_lock, flags);
@@ -1042,9 +1064,9 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
spin_lock_init(&dd->sc_init_lock);
spin_lock_init(&dd->dc8051_lock);
spin_lock_init(&dd->dc8051_memlock);
- mutex_init(&dd->qsfp_i2c_mutex);
seqlock_init(&dd->sc2vl_lock);
spin_lock_init(&dd->sde_map_lock);
+ spin_lock_init(&dd->pio_map_lock);
init_waitqueue_head(&dd->event_queue);
dd->int_counter = alloc_percpu(u64);
@@ -1063,6 +1085,14 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
goto bail;
}
+ dd->send_schedule = alloc_percpu(u64);
+ if (!dd->send_schedule) {
+ ret = -ENOMEM;
+ hfi1_early_err(&pdev->dev,
+ "Could not allocate per-cpu int_counter\n");
+ goto bail;
+ }
+
if (!hfi1_cpulist_count) {
u32 count = num_online_cpus();
@@ -1075,13 +1105,12 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
&pdev->dev,
"Could not alloc cpulist info, cpu affinity might be wrong\n");
}
- hfi1_dbg_ibdev_init(&dd->verbs_dev);
return dd;
bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.ibdev);
+ ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
return ERR_PTR(ret);
}
@@ -1174,8 +1203,10 @@ static int __init hfi1_mod_init(void)
user_credit_return_threshold = 100;
compute_krcvqs();
- /* sanitize receive interrupt count, time must wait until after
- the hardware type is known */
+ /*
+ * sanitize receive interrupt count, time must wait until after
+ * the hardware type is known
+ */
if (rcv_intr_count > RCV_HDR_HEAD_COUNTER_MASK)
rcv_intr_count = RCV_HDR_HEAD_COUNTER_MASK;
/* reject invalid combinations */
@@ -1210,6 +1241,9 @@ static int __init hfi1_mod_init(void)
idr_init(&hfi1_unit_table);
hfi1_dbg_init();
+ ret = hfi1_wss_init();
+ if (ret < 0)
+ goto bail_wss;
ret = pci_register_driver(&hfi1_pci_driver);
if (ret < 0) {
pr_err("Unable to register driver: error %d\n", -ret);
@@ -1218,6 +1252,8 @@ static int __init hfi1_mod_init(void)
goto bail; /* all OK */
bail_dev:
+ hfi1_wss_exit();
+bail_wss:
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
dev_cleanup();
@@ -1233,6 +1269,7 @@ module_init(hfi1_mod_init);
static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
+ hfi1_wss_exit();
hfi1_dbg_exit();
hfi1_cpulist_count = 0;
kfree(hfi1_cpulist);
@@ -1304,16 +1341,18 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
}
}
kfree(tmp);
+ free_pio_map(dd);
/* must follow rcv context free - need to remove rcv's hooks */
for (ctxt = 0; ctxt < dd->num_send_contexts; ctxt++)
sc_free(dd->send_contexts[ctxt].sc);
dd->num_send_contexts = 0;
kfree(dd->send_contexts);
dd->send_contexts = NULL;
+ kfree(dd->hw_to_sw);
+ dd->hw_to_sw = NULL;
kfree(dd->boardname);
vfree(dd->events);
vfree(dd->status);
- hfi1_cq_exit(dd);
}
/*
@@ -1347,6 +1386,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = -EINVAL;
goto bail;
}
+ if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
+ hfi1_early_err(&pdev->dev,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
+ ret = -EINVAL;
+ goto bail;
+ }
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1423,8 +1469,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* we still create devices, so diags, etc. can be used
* to determine cause of problem.
*/
- if (!initfail && !ret)
+ if (!initfail && !ret) {
dd->flags |= HFI1_INITTED;
+ /* create debufs files after init and ib register */
+ hfi1_dbg_ibdev_init(&dd->verbs_dev);
+ }
j = hfi1_device_create(dd);
if (j)
@@ -1465,6 +1514,8 @@ static void remove_one(struct pci_dev *pdev)
{
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
+ /* close debugfs files before ib unregister */
+ hfi1_dbg_ibdev_exit(&dd->verbs_dev);
/* unregister from IB core */
hfi1_unregister_ib_device(dd);
@@ -1506,8 +1557,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
* rcvhdrqentsize is in DWs, so we have to convert to bytes
* (* sizeof(u32)).
*/
- amt = ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
- sizeof(u32), PAGE_SIZE);
+ amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
+ sizeof(u32));
gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
GFP_USER : GFP_KERNEL;
@@ -1517,18 +1568,11 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
if (!rcd->rcvhdrq) {
dd_dev_err(dd,
- "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
+ "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
goto bail;
}
- /* Event mask is per device now and is in hfi1_devdata */
- /*if (rcd->ctxt >= dd->first_user_ctxt) {
- rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
- if (!rcd->user_event_mask)
- goto bail_free_hdrq;
- }*/
-
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
@@ -1569,8 +1613,8 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
bail_free:
dd_dev_err(dd,
- "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
- rcd->ctxt);
+ "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
+ rcd->ctxt);
vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL;
dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
@@ -1660,7 +1704,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
if (rcd->egrbufs.rcvtid_size == round_mtu ||
!HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
- rcd->ctxt);
+ rcd->ctxt);
goto bail_rcvegrbuf_phys;
}
@@ -1695,8 +1739,9 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->egrbufs.buffers[j].len)) {
j++;
offset = 0;
- } else
+ } else {
offset += new_size;
+ }
}
rcd->egrbufs.rcvtid_size = new_size;
}
@@ -1709,7 +1754,6 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
rcd->ctxt, rcd->egrbufs.alloced, rcd->egrbufs.rcvtid_size,
rcd->egrbufs.size);
-
/*
* Set the contexts rcv array head update threshold to the closest
* power of 2 (so we can use a mask instead of modulo) below half
@@ -1743,14 +1787,14 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
- rcd->egrbufs.rcvtids[idx].phys, order);
+ rcd->egrbufs.rcvtids[idx].phys, order);
cond_resched();
}
goto bail;
bail_rcvegrbuf_phys:
for (idx = 0; idx < rcd->egrbufs.alloced &&
- rcd->egrbufs.buffers[idx].addr;
+ rcd->egrbufs.buffers[idx].addr;
idx++) {
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[idx].len,
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/staging/rdma/hfi1/intr.c
index 426582b9ab65..65348d16ab2f 100644
--- a/drivers/staging/rdma/hfi1/intr.c
+++ b/drivers/staging/rdma/hfi1/intr.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -98,7 +95,7 @@ static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
*/
if (!(dd->flags & HFI1_INITTED))
return;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = ppd->port;
event.event = ev;
ib_dispatch_event(&event);
@@ -131,28 +128,26 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
* NOTE: This uses this device's vAU, vCU, and vl15_init for
* the remote values. Both sides must be using the values.
*/
- if (quick_linkup
- || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
set_up_vl15(dd, dd->vau, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu);
ppd->neighbor_guid =
- read_csr(dd,
- DC_DC8051_STS_REMOTE_GUID);
+ read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
ppd->neighbor_type =
read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
ppd->neighbor_port_number =
read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
- DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
- dd_dev_info(dd,
- "Neighbor GUID: %llx Neighbor type %d\n",
- ppd->neighbor_guid,
- ppd->neighbor_type);
+ DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
+ dd_dev_info(dd, "Neighbor GUID: %llx Neighbor type %d\n",
+ ppd->neighbor_guid,
+ ppd->neighbor_type);
}
/* physical link went up */
ppd->linkup = 1;
- ppd->offline_disabled_reason = OPA_LINKDOWN_REASON_NONE;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
/* link widths are not available until the link is fully up */
get_linkup_link_widths(ppd);
@@ -165,7 +160,7 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
reset_link_credits(dd);
/* freeze after a link down to guarantee a clean egress */
- start_freeze_handling(ppd, FREEZE_SELF|FREEZE_LINK_DOWN);
+ start_freeze_handling(ppd, FREEZE_SELF | FREEZE_LINK_DOWN);
ev = IB_EVENT_PORT_ERR;
@@ -177,8 +172,6 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
/* notify IB of the link change */
signal_ib_event(ppd, ev);
}
-
-
}
/*
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/staging/rdma/hfi1/iowait.h
index e8ba5606d08d..2ec6ef38d389 100644
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ b/drivers/staging/rdma/hfi1/iowait.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_IOWAIT_H
#define _HFI1_IOWAIT_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -54,6 +51,8 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include "sdma_txreq.h"
+
/*
* typedef (*restart_t)() - restart callback
* @work: pointer to work structure
@@ -67,9 +66,11 @@ struct sdma_engine;
* @list: used to add/insert into QP/PQ wait lists
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
- * @wakeup: space callback
+ * @wakeup: space callback wakeup
+ * @sdma_drained: sdma count drained
* @iowork: workqueue overhead
* @wait_dma: wait for sdma_busy == 0
+ * @wait_pio: wait for pio_busy == 0
* @sdma_busy: # of packets in flight
* @count: total number of descriptors in tx_head'ed list
* @tx_limit: limit for overflow queuing
@@ -101,9 +102,12 @@ struct iowait {
struct sdma_txreq *tx,
unsigned seq);
void (*wakeup)(struct iowait *wait, int reason);
+ void (*sdma_drained)(struct iowait *wait);
struct work_struct iowork;
wait_queue_head_t wait_dma;
+ wait_queue_head_t wait_pio;
atomic_t sdma_busy;
+ atomic_t pio_busy;
u32 count;
u32 tx_limit;
u32 tx_count;
@@ -117,7 +121,7 @@ struct iowait {
* @tx_limit: limit for overflow queuing
* @func: restart function for workqueue
* @sleep: sleep function for no space
- * @wakeup: wakeup function for no space
+ * @resume: wakeup function for no space
*
* This function initializes the iowait
* structure embedded in the QP or PQ.
@@ -133,17 +137,21 @@ static inline void iowait_init(
struct iowait *wait,
struct sdma_txreq *tx,
unsigned seq),
- void (*wakeup)(struct iowait *wait, int reason))
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait))
{
wait->count = 0;
INIT_LIST_HEAD(&wait->list);
INIT_LIST_HEAD(&wait->tx_head);
INIT_WORK(&wait->iowork, func);
init_waitqueue_head(&wait->wait_dma);
+ init_waitqueue_head(&wait->wait_pio);
atomic_set(&wait->sdma_busy, 0);
+ atomic_set(&wait->pio_busy, 0);
wait->tx_limit = tx_limit;
wait->sleep = sleep;
wait->wakeup = wakeup;
+ wait->sdma_drained = sdma_drained;
}
/**
@@ -174,6 +182,88 @@ static inline void iowait_sdma_drain(struct iowait *wait)
}
/**
+ * iowait_sdma_pending() - return sdma pending count
+ *
+ * @wait: iowait structure
+ *
+ */
+static inline int iowait_sdma_pending(struct iowait *wait)
+{
+ return atomic_read(&wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_inc - note sdma io pending
+ * @wait: iowait structure
+ */
+static inline void iowait_sdma_inc(struct iowait *wait)
+{
+ atomic_inc(&wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_add - add count to pending
+ * @wait: iowait structure
+ */
+static inline void iowait_sdma_add(struct iowait *wait, int count)
+{
+ atomic_add(count, &wait->sdma_busy);
+}
+
+/**
+ * iowait_sdma_dec - note sdma complete
+ * @wait: iowait structure
+ */
+static inline int iowait_sdma_dec(struct iowait *wait)
+{
+ return atomic_dec_and_test(&wait->sdma_busy);
+}
+
+/**
+ * iowait_pio_drain() - wait for pios to drain
+ *
+ * @wait: iowait structure
+ *
+ * This will delay until the iowait pios have
+ * completed.
+ */
+static inline void iowait_pio_drain(struct iowait *wait)
+{
+ wait_event_timeout(wait->wait_pio,
+ !atomic_read(&wait->pio_busy),
+ HZ);
+}
+
+/**
+ * iowait_pio_pending() - return pio pending count
+ *
+ * @wait: iowait structure
+ *
+ */
+static inline int iowait_pio_pending(struct iowait *wait)
+{
+ return atomic_read(&wait->pio_busy);
+}
+
+/**
+ * iowait_pio_inc - note pio pending
+ * @wait: iowait structure
+ */
+static inline void iowait_pio_inc(struct iowait *wait)
+{
+ atomic_inc(&wait->pio_busy);
+}
+
+/**
+ * iowait_sdma_dec - note pio complete
+ * @wait: iowait structure
+ */
+static inline int iowait_pio_dec(struct iowait *wait)
+{
+ return atomic_dec_and_test(&wait->pio_busy);
+}
+
+/**
* iowait_drain_wakeup() - trigger iowait_drain() waiter
*
* @wait: iowait structure
@@ -183,6 +273,28 @@ static inline void iowait_sdma_drain(struct iowait *wait)
static inline void iowait_drain_wakeup(struct iowait *wait)
{
wake_up(&wait->wait_dma);
+ wake_up(&wait->wait_pio);
+ if (wait->sdma_drained)
+ wait->sdma_drained(wait);
+}
+
+/**
+ * iowait_get_txhead() - get packet off of iowait list
+ *
+ * @wait wait struture
+ */
+static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
+{
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&wait->tx_head)) {
+ tx = list_first_entry(
+ &wait->tx_head,
+ struct sdma_txreq,
+ list);
+ list_del_init(&tx->list);
+ }
+ return tx;
}
#endif
diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c
deleted file mode 100644
index cb4e6087dfdb..000000000000
--- a/drivers/staging/rdma/hfi1/keys.c
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "hfi.h"
-
-/**
- * hfi1_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct hfi1_ibdev *dev = to_idev(mr->pd->device);
- struct hfi1_lkey_table *rkt = &dev->lk_table;
-
- hfi1_get_mr(mr);
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct hfi1_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- } else {
- hfi1_put_mr(mr);
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (!rcu_access_pointer(rkt->table[r]))
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in verbs.c to ensure enough bits for
- * generation number
- */
- mr->lkey = (r << (32 - hfi1_lkey_table_size)) |
- ((((1 << (24 - hfi1_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- hfi1_put_mr(mr);
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * hfi1_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void hfi1_free_lkey(struct hfi1_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct hfi1_ibdev *dev = to_idev(mr->pd->device);
- struct hfi1_lkey_table *rkt = &dev->lk_table;
- int freed = 0;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - hfi1_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- mr->lkey_published = 0;
- freed++;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
- if (freed) {
- synchronize_rcu();
- hfi1_put_mr(mr);
- }
-}
-
-/**
- * hfi1_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @pd: protection domain
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * increments the reference count upon success
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd,
- struct hfi1_sge *isge, struct ib_sge *sge, int acc)
-{
- struct hfi1_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use LKEY == zero for kernel virtual addresses
- * (see hfi1_get_dma_mr and dma.c).
- */
- rcu_read_lock();
- if (sge->lkey == 0) {
- struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- atomic_inc(&mr->refcount);
- rcu_read_unlock();
-
- isge->mr = mr;
- isge->vaddr = (void *) sge->addr;
- isge->length = sge->length;
- isge->sge_length = sge->length;
- isge->m = 0;
- isge->n = 0;
- goto ok;
- }
- mr = rcu_dereference(
- rkt->table[(sge->lkey >> (32 - hfi1_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
- goto bail;
-
- off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc))
- goto bail;
- atomic_inc(&mr->refcount);
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / HFI1_SEGSZ;
- n = entries_spanned_by_off % HFI1_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= HFI1_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- isge->mr = mr;
- isge->vaddr = mr->map[m]->segs[n].vaddr + off;
- isge->length = mr->map[m]->segs[n].length - off;
- isge->sge_length = sge->length;
- isge->m = m;
- isge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
-/**
- * hfi1_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct hfi1_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /*
- * We use RKEY == zero for kernel virtual addresses
- * (see hfi1_get_dma_mr and dma.c).
- */
- rcu_read_lock();
- if (rkey == 0) {
- struct hfi1_pd *pd = to_ipd(qp->ibqp.pd);
- struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- atomic_inc(&mr->refcount);
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - hfi1_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- atomic_inc(&mr->refcount);
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / HFI1_SEGSZ;
- n = entries_spanned_by_off % HFI1_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= HFI1_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 4f5dbd14b5de..d1e7f4d7cf6f 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -55,6 +52,7 @@
#include "hfi.h"
#include "mad.h"
#include "trace.h"
+#include "qp.h"
/* the reset value from the FM is supposed to be 0xffff, handle both */
#define OPA_LINK_WIDTH_RESET_OLD 0x0fff
@@ -91,7 +89,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
int pkey_idx;
u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
- agent = ibp->send_agent;
+ agent = ibp->rvp.send_agent;
if (!agent)
return;
@@ -100,7 +98,8 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
return;
/* o14-2 */
- if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
+ if (ibp->rvp.trap_timeout && time_before(jiffies,
+ ibp->rvp.trap_timeout))
return;
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
@@ -121,42 +120,43 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = OPA_SMI_CLASS_VERSION;
smp->method = IB_MGMT_METHOD_TRAP;
- ibp->tid++;
- smp->tid = cpu_to_be64(ibp->tid);
+ ibp->rvp.tid++;
+ smp->tid = cpu_to_be64(ibp->rvp.tid);
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
memcpy(smp->route.lid.data, data, len);
- spin_lock_irqsave(&ibp->lock, flags);
- if (!ibp->sm_ah) {
- if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (!ibp->rvp.sm_ah) {
+ if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
- ah = hfi1_create_qp0_ah(ibp, ibp->sm_lid);
- if (IS_ERR(ah))
+ ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
+ if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
- else {
+ } else {
send_buf->ah = ah;
- ibp->sm_ah = to_iah(ah);
+ ibp->rvp.sm_ah = ibah_to_rvtah(ah);
ret = 0;
}
- } else
+ } else {
ret = -EINVAL;
+ }
} else {
- send_buf->ah = &ibp->sm_ah->ibah;
+ send_buf->ah = &ibp->rvp.sm_ah->ibah;
ret = 0;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
if (!ret)
ret = ib_post_send_mad(send_buf, NULL);
if (!ret) {
/* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
- ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
+ timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
+ ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
} else {
ib_free_send_mad(send_buf);
- ibp->trap_timeout = 0;
+ ibp->rvp.trap_timeout = 0;
}
}
@@ -174,10 +174,10 @@ void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
memset(&data, 0, sizeof(data));
if (trap_num == OPA_TRAP_BAD_P_KEY)
- ibp->pkey_violations++;
+ ibp->rvp.pkey_violations++;
else
- ibp->qkey_violations++;
- ibp->n_pkt_drops++;
+ ibp->rvp.qkey_violations++;
+ ibp->rvp.n_pkt_drops++;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
@@ -233,9 +233,12 @@ static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
-void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
{
struct opa_mad_notice_attr data;
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
u32 lid = ppd_from_ibp(ibp)->lid;
memset(&data, 0, sizeof(data));
@@ -245,7 +248,7 @@ void hfi1_cap_mask_chg(struct hfi1_ibport *ibp)
data.trap_num = OPA_TRAP_CHANGE_CAPABILITY;
data.issuer_lid = cpu_to_be32(lid);
data.ntc_144.lid = data.issuer_lid;
- data.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
+ data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
send_trap(ibp, &data, sizeof(data));
}
@@ -407,37 +410,38 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
int ret = 0;
/* Is the mkey in the process of expiring? */
- if (ibp->mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
+ if (ibp->rvp.mkey_lease_timeout &&
+ time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */
- ibp->mkey_lease_timeout = 0;
- ibp->mkeyprot = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
+ ibp->rvp.mkeyprot = 0;
}
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 ||
- ibp->mkey == mkey)
+ if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
+ ibp->rvp.mkey == mkey)
valid_mkey = 1;
/* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->mkey_lease_timeout &&
+ if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
(mad->method == IB_MGMT_METHOD_GET ||
mad->method == IB_MGMT_METHOD_SET ||
mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->mkey_lease_timeout = 0;
+ ibp->rvp.mkey_lease_timeout = 0;
if (!valid_mkey) {
switch (mad->method) {
case IB_MGMT_METHOD_GET:
/* Bad mkey not a violation below level 2 */
- if (ibp->mkeyprot < 2)
+ if (ibp->rvp.mkeyprot < 2)
break;
case IB_MGMT_METHOD_SET:
case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->mkey_violations != 0xFFFF)
- ++ibp->mkey_violations;
- if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
- ibp->mkey_lease_timeout = jiffies +
- ibp->mkey_lease_period * HZ;
+ if (ibp->rvp.mkey_violations != 0xFFFF)
+ ++ibp->rvp.mkey_violations;
+ if (!ibp->rvp.mkey_lease_timeout &&
+ ibp->rvp.mkey_lease_period)
+ ibp->rvp.mkey_lease_timeout = jiffies +
+ ibp->rvp.mkey_lease_period * HZ;
/* Generate a trap notice. */
bad_mkey(ibp, mad, mkey, dr_slid, return_path,
hop_cnt);
@@ -501,16 +505,6 @@ void read_ltp_rtt(struct hfi1_devdata *dd)
write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
}
-static u8 __opa_porttype(struct hfi1_pportdata *ppd)
-{
- if (qsfp_mod_present(ppd)) {
- if (ppd->qsfp_info.cache_valid)
- return OPA_PORT_TYPE_STANDARD;
- return OPA_PORT_TYPE_DISCONNECTED;
- }
- return OPA_PORT_TYPE_UNKNOWN;
-}
-
static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
@@ -522,6 +516,7 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
struct opa_port_info *pi = (struct opa_port_info *)data;
u8 mtu;
u8 credit_rate;
+ u8 is_beaconing_active;
u32 state;
u32 num_ports = OPA_AM_NPORT(am);
u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
@@ -538,8 +533,8 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
ppd = dd->pport + (port - 1);
ibp = &ppd->ibport_data;
- if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
- ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -548,14 +543,14 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
/* Only return the mkey if the protection field allows it. */
if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->mkey != smp->mkey &&
- ibp->mkeyprot == 1))
- pi->mkey = ibp->mkey;
-
- pi->subnet_prefix = ibp->gid_prefix;
- pi->sm_lid = cpu_to_be32(ibp->sm_lid);
- pi->ib_cap_mask = cpu_to_be32(ibp->port_cap_flags);
- pi->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
+ ibp->rvp.mkey != smp->mkey &&
+ ibp->rvp.mkeyprot == 1))
+ pi->mkey = ibp->rvp.mkey;
+
+ pi->subnet_prefix = ibp->rvp.gid_prefix;
+ pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
+ pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
+ pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
pi->sa_qp = cpu_to_be32(ppd->sa_qp);
@@ -581,38 +576,45 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
if (start_of_sm_config && (state == IB_PORT_INIT))
ppd->is_sm_config_started = 1;
- pi->port_phys_conf = __opa_porttype(ppd) & 0xf;
+ pi->port_phys_conf = (ppd->port_type & 0xf);
#if PI_LED_ENABLE_SUP
pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
pi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
+ pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
pi->port_states.ledenable_offlinereason |=
- ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
+ ppd->offline_disabled_reason;
#else
pi->port_states.offline_reason = ppd->neighbor_normal << 4;
pi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- pi->port_states.offline_reason |= ppd->offline_disabled_reason &
- OPA_PI_MASK_OFFLINE_REASON;
+ pi->port_states.offline_reason |= ppd->offline_disabled_reason;
#endif /* PI_LED_ENABLE_SUP */
pi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | state;
- pi->mkeyprotect_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
+ pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
for (i = 0; i < ppd->vls_supported; i++) {
mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
if ((i % 2) == 0)
- pi->neigh_mtu.pvlx_to_mtu[i/2] |= (mtu << 4);
+ pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
else
- pi->neigh_mtu.pvlx_to_mtu[i/2] |= mtu;
+ pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
}
/* don't forget VL 15 */
mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
- pi->neigh_mtu.pvlx_to_mtu[15/2] |= mtu;
- pi->smsl = ibp->sm_sl & OPA_PI_MASK_SMSL;
+ pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
+ pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
pi->partenforce_filterraw |=
(ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
@@ -620,17 +622,17 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
- pi->mkey_violations = cpu_to_be16(ibp->mkey_violations);
+ pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
/* P_KeyViolations are counted by hardware. */
- pi->pkey_violations = cpu_to_be16(ibp->pkey_violations);
- pi->qkey_violations = cpu_to_be16(ibp->qkey_violations);
+ pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
+ pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
pi->vl.cap = ppd->vls_supported;
- pi->vl.high_limit = cpu_to_be16(ibp->vl_high_limit);
+ pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
- pi->clientrereg_subnettimeout = ibp->subnet_timeout;
+ pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
pi->port_link_mode = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
OPA_PORT_LINK_MODE_OPA << 5 |
@@ -701,8 +703,10 @@ static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
/* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
- /* this counter is 16 bits wide, but the replay_depth.wire
- * variable is only 8 bits */
+ /*
+ * this counter is 16 bits wide, but the replay_depth.wire
+ * variable is only 8 bits
+ */
if (tmp > 0xff)
tmp = 0xff;
pi->replay_depth.wire = tmp;
@@ -749,7 +753,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- n_blocks_avail = (u16) (npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+ n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
@@ -763,7 +767,7 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- p = (__be16 *) data;
+ p = (__be16 *)data;
q = (u16 *)data;
/* get the real pkeys if we are requesting the first block */
if (start_block == 0) {
@@ -772,9 +776,9 @@ static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
p[i] = cpu_to_be16(q[i]);
if (resp_len)
*resp_len += size;
- } else
+ } else {
smp->status |= IB_SMP_INVALID_FIELD;
-
+ }
return reply((struct ib_mad_hdr *)smp);
}
@@ -901,8 +905,8 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
u32 logical_old = driver_logical_state(ppd);
int ret, logical_allowed, physical_allowed;
- logical_allowed = ret =
- logical_transition_allowed(logical_old, logical_new);
+ ret = logical_transition_allowed(logical_old, logical_new);
+ logical_allowed = ret;
if (ret == HFI_TRANSITION_DISALLOWED ||
ret == HFI_TRANSITION_UNDEFINED) {
@@ -912,8 +916,8 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
return ret;
}
- physical_allowed = ret =
- physical_transition_allowed(physical_old, physical_new);
+ ret = physical_transition_allowed(physical_old, physical_new);
+ physical_allowed = ret;
if (ret == HFI_TRANSITION_DISALLOWED ||
ret == HFI_TRANSITION_UNDEFINED) {
@@ -928,6 +932,14 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
return HFI_TRANSITION_IGNORED;
/*
+ * A change request of Physical Port State from
+ * 'Offline' to 'Polling' should be ignored.
+ */
+ if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
+ (physical_new == IB_PORTPHYSSTATE_POLLING))
+ return HFI_TRANSITION_IGNORED;
+
+ /*
* Either physical_allowed or logical_allowed is
* HFI_TRANSITION_ALLOWED.
*/
@@ -972,16 +984,15 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
break;
/* FALLTHROUGH */
case IB_PORT_DOWN:
- if (phys_state == IB_PORTPHYSSTATE_NOP)
+ if (phys_state == IB_PORTPHYSSTATE_NOP) {
link_state = HLS_DN_DOWNDEF;
- else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
+ } else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
link_state = HLS_DN_POLL;
- set_link_down_reason(ppd,
- OPA_LINKDOWN_REASON_FM_BOUNCE, 0,
- OPA_LINKDOWN_REASON_FM_BOUNCE);
- } else if (phys_state == IB_PORTPHYSSTATE_DISABLED)
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
+ 0, OPA_LINKDOWN_REASON_FM_BOUNCE);
+ } else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
link_state = HLS_DN_DISABLE;
- else {
+ } else {
pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
phys_state);
smp->status |= IB_SMP_INVALID_FIELD;
@@ -991,11 +1002,11 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
set_link_state(ppd, link_state);
if (link_state == HLS_DN_DISABLE &&
(ppd->offline_disabled_reason >
- OPA_LINKDOWN_REASON_SMA_DISABLED ||
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
ppd->offline_disabled_reason ==
- OPA_LINKDOWN_REASON_NONE))
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
ppd->offline_disabled_reason =
- OPA_LINKDOWN_REASON_SMA_DISABLED;
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
/*
* Don't send a reply if the response would be sent
* through the disabled port.
@@ -1091,13 +1102,13 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
ls_old = driver_lstate(ppd);
- ibp->mkey = pi->mkey;
- ibp->gid_prefix = pi->subnet_prefix;
- ibp->mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
+ ibp->rvp.mkey = pi->mkey;
+ ibp->rvp.gid_prefix = pi->subnet_prefix;
+ ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
/* Must be a valid unicast LID address. */
if ((lid == 0 && ls_old > IB_PORT_INIT) ||
- lid >= HFI1_MULTICAST_LID_BASE) {
+ lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
smp->status |= IB_SMP_INVALID_FIELD;
pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
lid);
@@ -1130,23 +1141,23 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
/* Must be a valid unicast LID address. */
if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
- smlid >= HFI1_MULTICAST_LID_BASE) {
+ smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
smp->status |= IB_SMP_INVALID_FIELD;
pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
- } else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+ } else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
- spin_lock_irqsave(&ibp->lock, flags);
- if (ibp->sm_ah) {
- if (smlid != ibp->sm_lid)
- ibp->sm_ah->attr.dlid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_ah->attr.sl = msl;
+ spin_lock_irqsave(&ibp->rvp.lock, flags);
+ if (ibp->rvp.sm_ah) {
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_ah->attr.dlid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_ah->attr.sl = msl;
}
- spin_unlock_irqrestore(&ibp->lock, flags);
- if (smlid != ibp->sm_lid)
- ibp->sm_lid = smlid;
- if (msl != ibp->sm_sl)
- ibp->sm_sl = msl;
+ spin_unlock_irqrestore(&ibp->rvp.lock, flags);
+ if (smlid != ibp->rvp.sm_lid)
+ ibp->rvp.sm_lid = smlid;
+ if (msl != ibp->rvp.sm_sl)
+ ibp->rvp.sm_sl = msl;
event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event);
}
@@ -1167,8 +1178,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
ppd->port_error_action = be32_to_cpu(pi->port_error_action);
lwe = be16_to_cpu(pi->link_width.enabled);
if (lwe) {
- if (lwe == OPA_LINK_WIDTH_RESET
- || lwe == OPA_LINK_WIDTH_RESET_OLD)
+ if (lwe == OPA_LINK_WIDTH_RESET ||
+ lwe == OPA_LINK_WIDTH_RESET_OLD)
set_link_width_enabled(ppd, ppd->link_width_supported);
else if ((lwe & ~ppd->link_width_supported) == 0)
set_link_width_enabled(ppd, lwe);
@@ -1177,19 +1188,21 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
}
lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
/* LWD.E is always applied - 0 means "disabled" */
- if (lwe == OPA_LINK_WIDTH_RESET
- || lwe == OPA_LINK_WIDTH_RESET_OLD) {
+ if (lwe == OPA_LINK_WIDTH_RESET ||
+ lwe == OPA_LINK_WIDTH_RESET_OLD) {
set_link_width_downgrade_enabled(ppd,
- ppd->link_width_downgrade_supported);
+ ppd->
+ link_width_downgrade_supported
+ );
} else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
/* only set and apply if something changed */
if (lwe != ppd->link_width_downgrade_enabled) {
set_link_width_downgrade_enabled(ppd, lwe);
call_link_downgrade_policy = 1;
}
- } else
+ } else {
smp->status |= IB_SMP_INVALID_FIELD;
-
+ }
lse = be16_to_cpu(pi->link_speed.enabled);
if (lse) {
if (lse & be16_to_cpu(pi->link_speed.supported))
@@ -1198,22 +1211,24 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
smp->status |= IB_SMP_INVALID_FIELD;
}
- ibp->mkeyprot = (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
- ibp->vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
+ ibp->rvp.mkeyprot =
+ (pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
+ ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
- ibp->vl_high_limit);
+ ibp->rvp.vl_high_limit);
- if (ppd->vls_supported/2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
- ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
+ if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
+ ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
for (i = 0; i < ppd->vls_supported; i++) {
if ((i % 2) == 0)
- mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i/2] >> 4)
- & 0xF);
+ mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
+ 4) & 0xF);
else
- mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i/2] & 0xF);
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
+ 0xF);
if (mtu == 0xffff) {
pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
mtu,
@@ -1223,8 +1238,8 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
}
if (dd->vld[i].mtu != mtu) {
dd_dev_info(dd,
- "MTU change on vl %d from %d to %d\n",
- i, dd->vld[i].mtu, mtu);
+ "MTU change on vl %d from %d to %d\n",
+ i, dd->vld[i].mtu, mtu);
dd->vld[i].mtu = mtu;
call_set_mtu++;
}
@@ -1232,13 +1247,13 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
/* As per OPAV1 spec: VL15 must support and be configured
* for operation with a 2048 or larger MTU.
*/
- mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15/2] & 0xF);
+ mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
if (mtu < 2048 || mtu == 0xffff)
mtu = 2048;
if (dd->vld[15].mtu != mtu) {
dd_dev_info(dd,
- "MTU change on vl 15 from %d to %d\n",
- dd->vld[15].mtu, mtu);
+ "MTU change on vl 15 from %d to %d\n",
+ dd->vld[15].mtu, mtu);
dd->vld[15].mtu = mtu;
call_set_mtu++;
}
@@ -1254,21 +1269,21 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
smp->status |= IB_SMP_INVALID_FIELD;
} else {
if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
- vls) == -EINVAL)
+ vls) == -EINVAL)
smp->status |= IB_SMP_INVALID_FIELD;
}
}
if (pi->mkey_violations == 0)
- ibp->mkey_violations = 0;
+ ibp->rvp.mkey_violations = 0;
if (pi->pkey_violations == 0)
- ibp->pkey_violations = 0;
+ ibp->rvp.pkey_violations = 0;
if (pi->qkey_violations == 0)
- ibp->qkey_violations = 0;
+ ibp->rvp.qkey_violations = 0;
- ibp->subnet_timeout =
+ ibp->rvp.subnet_timeout =
pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
@@ -1388,7 +1403,7 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.ibdev;
+ event.device = &dd->verbs_dev.rdi.ibdev;
event.element.port_num = port;
ib_dispatch_event(&event);
}
@@ -1402,7 +1417,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 n_blocks_sent = OPA_AM_NBLK(am);
u32 start_block = am & 0x7ff;
- u16 *p = (u16 *) data;
+ u16 *p = (u16 *)data;
__be16 *q = (__be16 *)data;
int i;
u16 n_blocks_avail;
@@ -1415,7 +1430,7 @@ static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- n_blocks_avail = (u16)(npkeys/OPA_PARTITION_TABLE_BLK_SIZE) + 1;
+ n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
if (start_block + n_blocks_sent > n_blocks_avail ||
n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
@@ -1514,14 +1529,22 @@ static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
struct hfi1_ibport *ibp = to_iport(ibdev, port);
u8 *p = data;
int i;
+ u8 sc;
if (am) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
- ibp->sl_to_sc[i] = *p++;
+ for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++) {
+ sc = *p++;
+ if (ibp->sl_to_sc[i] != sc) {
+ ibp->sl_to_sc[i] = sc;
+
+ /* Put all stale qps into error state */
+ hfi1_error_port_qps(ibp, i);
+ }
+ }
return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len);
}
@@ -1574,7 +1597,7 @@ static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
{
u32 n_blocks = OPA_AM_NBLK(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- void *vp = (void *) data;
+ void *vp = (void *)data;
size_t size = 4 * sizeof(u64);
if (n_blocks != 1) {
@@ -1597,7 +1620,7 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NBLK(am);
int async_update = OPA_AM_ASYNC(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- void *vp = (void *) data;
+ void *vp = (void *)data;
struct hfi1_pportdata *ppd;
int lstate;
@@ -1609,8 +1632,10 @@ static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
/* IB numbers ports from 1, hw from 0 */
ppd = dd->pport + (port - 1);
lstate = driver_lstate(ppd);
- /* it's known that async_update is 0 by this point, but include
- * the explicit check for clarity */
+ /*
+ * it's known that async_update is 0 by this point, but include
+ * the explicit check for clarity
+ */
if (!async_update &&
(lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
smp->status |= IB_SMP_INVALID_FIELD;
@@ -1629,7 +1654,7 @@ static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
- void *vp = (void *) data;
+ void *vp = (void *)data;
int size;
if (n_blocks != 1) {
@@ -1654,7 +1679,7 @@ static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
u32 n_blocks = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
- void *vp = (void *) data;
+ void *vp = (void *)data;
int lstate;
if (n_blocks != 1) {
@@ -1687,7 +1712,7 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
u32 lstate;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
- struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
if (nports != 1) {
smp->status |= IB_SMP_INVALID_FIELD;
@@ -1707,12 +1732,11 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
psi->port_states.ledenable_offlinereason |=
ppd->is_sm_config_started << 5;
psi->port_states.ledenable_offlinereason |=
- ppd->offline_disabled_reason & OPA_PI_MASK_OFFLINE_REASON;
+ ppd->offline_disabled_reason;
#else
psi->port_states.offline_reason = ppd->neighbor_normal << 4;
psi->port_states.offline_reason |= ppd->is_sm_config_started << 5;
- psi->port_states.offline_reason |= ppd->offline_disabled_reason &
- OPA_PI_MASK_OFFLINE_REASON;
+ psi->port_states.offline_reason |= ppd->offline_disabled_reason;
#endif /* PI_LED_ENABLE_SUP */
psi->port_states.portphysstate_portstate =
@@ -1737,7 +1761,7 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
u8 ls_new, ps_new;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
- struct opa_port_state_info *psi = (struct opa_port_state_info *) data;
+ struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
int ret, invalid = 0;
if (nports != 1) {
@@ -1782,14 +1806,16 @@ static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
u32 len = OPA_AM_CI_LEN(am) + 1;
int ret;
-#define __CI_PAGE_SIZE (1 << 7) /* 128 bytes */
+#define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
#define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
#define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
- /* check that addr is within spec, and
- * addr and (addr + len - 1) are on the same "page" */
+ /*
+ * check that addr is within spec, and
+ * addr and (addr + len - 1) are on the same "page"
+ */
if (addr >= 4096 ||
- (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
+ (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
@@ -1823,7 +1849,7 @@ static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
- struct buffer_control *p = (struct buffer_control *) data;
+ struct buffer_control *p = (struct buffer_control *)data;
int size;
if (num_ports != 1) {
@@ -1846,7 +1872,7 @@ static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
u32 num_ports = OPA_AM_NPORT(am);
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
struct hfi1_pportdata *ppd;
- struct buffer_control *p = (struct buffer_control *) data;
+ struct buffer_control *p = (struct buffer_control *)data;
if (num_ports != 1) {
smp->status |= IB_SMP_INVALID_FIELD;
@@ -1919,13 +1945,15 @@ static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
switch (section) {
case OPA_VLARB_LOW_ELEMENTS:
- (void) fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
+ (void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
break;
case OPA_VLARB_HIGH_ELEMENTS:
- (void) fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
+ (void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
break;
- /* neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
- * can be changed from the default values */
+ /*
+ * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
+ * can be changed from the default values
+ */
case OPA_VLARB_PREEMPT_ELEMENTS:
/* FALLTHROUGH */
case OPA_VLARB_PREEMPT_MATRIX:
@@ -2137,8 +2165,10 @@ struct opa_port_data_counters_msg {
};
struct opa_port_error_counters64_msg {
- /* Request contains first two fields, response contains the
- * whole magilla */
+ /*
+ * Request contains first two fields, response contains the
+ * whole magilla
+ */
__be64 port_select_mask[4];
__be32 vl_select_mask;
@@ -2172,7 +2202,6 @@ struct opa_port_error_info_msg {
__be32 error_info_select_mask;
__be32 reserved1;
struct _port_ei {
-
u8 port_number;
u8 reserved2[7];
@@ -2251,7 +2280,7 @@ enum error_info_selects {
};
static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u32 *resp_len)
+ struct ib_device *ibdev, u32 *resp_len)
{
struct opa_class_port_info *p =
(struct opa_class_port_info *)pmp->data;
@@ -2279,23 +2308,29 @@ static void a0_portstatus(struct hfi1_pportdata *ppd,
{
if (!is_bx(ppd->dd)) {
unsigned long vl;
- u64 max_vl_xmit_wait = 0, tmp;
+ u64 sum_vl_xmit_wait = 0;
u32 vl_all_mask = VL_MASK_ALL;
for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
8 * sizeof(vl_all_mask)) {
- tmp = read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl));
- if (tmp > max_vl_xmit_wait)
- max_vl_xmit_wait = tmp;
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+ if (tmp < sum_vl_xmit_wait) {
+ /* we wrapped */
+ sum_vl_xmit_wait = (u64)~0;
+ break;
+ }
+ sum_vl_xmit_wait = tmp;
}
- rsp->port_xmit_wait = cpu_to_be64(max_vl_xmit_wait);
+ if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
+ rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
}
}
-
static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
struct opa_port_status_req *req =
(struct opa_port_status_req *)pmp->data;
@@ -2320,8 +2355,8 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
- if (nports != 1 || (port_num && port_num != port)
- || num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
+ if (nports != 1 || (port_num && port_num != port) ||
+ num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
@@ -2351,7 +2386,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
CNTR_INVALID_VL));
rsp->port_multicast_xmit_pkts =
cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
- CNTR_INVALID_VL));
+ CNTR_INVALID_VL));
rsp->port_multicast_rcv_pkts =
cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
CNTR_INVALID_VL));
@@ -2380,7 +2415,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
}
tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL);
+ CNTR_INVALID_VL);
if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
/* overflow/wrapped */
rsp->link_error_recovery = cpu_to_be32(~0);
@@ -2395,13 +2430,13 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
CNTR_INVALID_VL));
rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
- CNTR_INVALID_VL));
+ CNTR_INVALID_VL));
/* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
- vlinfo = &(rsp->vls[0]);
+ vlinfo = &rsp->vls[0];
vfi = 0;
/* The vl_select_mask has been checked above, and we know
* that it contains only entries which represent valid VLs.
@@ -2417,27 +2452,27 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
rsp->vls[vfi].port_vl_rcv_pkts =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_data =
cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_pkts =
cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_wait =
cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_fecn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_becn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
vlinfo++;
vfi++;
@@ -2467,7 +2502,7 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
CNTR_INVALID_VL);
error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL);
+ CNTR_INVALID_VL);
/* local link integrity must be right-shifted by the lli resolution */
tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
tmp += read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
@@ -2477,10 +2512,10 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
error_counter_summary += (tmp >> res_ler);
error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
- CNTR_INVALID_VL);
+ CNTR_INVALID_VL);
error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
- CNTR_INVALID_VL);
+ CNTR_INVALID_VL);
/* ppd->link_downed is a 32-bit value */
error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
CNTR_INVALID_VL);
@@ -2491,21 +2526,22 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
return error_counter_summary;
}
-static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
+static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
u32 vl_select_mask)
{
- if (!is_bx(dd)) {
+ if (!is_bx(ppd->dd)) {
unsigned long vl;
- int vfi = 0;
u64 sum_vl_xmit_wait = 0;
+ u32 vl_all_mask = VL_MASK_ALL;
- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(vl_select_mask)) {
+ for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+ 8 * sizeof(vl_all_mask)) {
u64 tmp = sum_vl_xmit_wait +
- be64_to_cpu(rsp->vls[vfi++].port_vl_xmit_wait);
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
if (tmp < sum_vl_xmit_wait) {
/* we wrapped */
- sum_vl_xmit_wait = (u64) ~0;
+ sum_vl_xmit_wait = (u64)~0;
break;
}
sum_vl_xmit_wait = tmp;
@@ -2515,8 +2551,30 @@ static void a0_datacounters(struct hfi1_devdata *dd, struct _port_dctrs *rsp,
}
}
+static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
+ struct _port_dctrs *rsp)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_xmit_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
+ CNTR_INVALID_VL));
+ rsp->port_multicast_rcv_pkts =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
+ CNTR_INVALID_VL));
+}
+
static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
struct opa_port_data_counters_msg *req =
(struct opa_port_data_counters_msg *)pmp->data;
@@ -2572,7 +2630,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
- rsp = (struct _port_dctrs *)&(req->port[0]);
+ rsp = &req->port[0];
memset(rsp, 0, sizeof(*rsp));
rsp->port_number = port;
@@ -2583,39 +2641,19 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
*/
hfi1_read_link_quality(dd, &lq);
rsp->link_quality_indicator = cpu_to_be32((u32)lq);
+ pma_get_opa_port_dctrs(ibdev, rsp);
- /* rsp->sw_port_congestion is 0 for HFIs */
- /* rsp->port_xmit_time_cong is 0 for HFIs */
- /* rsp->port_xmit_wasted_bw ??? */
- /* rsp->port_xmit_wait_data ??? */
- /* rsp->port_mark_fecn is 0 for HFIs */
-
- rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
- CNTR_INVALID_VL));
- rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
- CNTR_INVALID_VL));
- rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_xmit_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
- CNTR_INVALID_VL));
- rsp->port_multicast_rcv_pkts =
- cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
- CNTR_INVALID_VL));
rsp->port_xmit_wait =
cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL));
rsp->port_rcv_fecn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
rsp->port_rcv_becn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
-
rsp->port_error_counter_summary =
cpu_to_be64(get_error_counter_summary(ibdev, port,
res_lli, res_ler));
- vlinfo = &(rsp->vls[0]);
+ vlinfo = &rsp->vls[0];
vfi = 0;
/* The vl_select_mask has been checked above, and we know
* that it contains only entries which represent valid VLs.
@@ -2623,49 +2661,50 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
* any additional checks for vl.
*/
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
- 8 * sizeof(req->vl_select_mask)) {
+ 8 * sizeof(req->vl_select_mask)) {
memset(vlinfo, 0, sizeof(*vlinfo));
rsp->vls[vfi].port_vl_xmit_data =
cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_data =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_pkts =
cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_pkts =
cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_xmit_wait =
cpu_to_be64(read_port_cntr(ppd, C_TX_WAIT_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_fecn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
rsp->vls[vfi].port_vl_rcv_becn =
cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
- idx_from_vl(vl)));
+ idx_from_vl(vl)));
/* rsp->port_vl_xmit_time_cong is 0 for HFIs */
/* rsp->port_vl_xmit_wasted_bw ??? */
/* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
- * does this differ from rsp->vls[vfi].port_vl_xmit_wait */
+ * does this differ from rsp->vls[vfi].port_vl_xmit_wait
+ */
/*rsp->vls[vfi].port_vl_mark_fecn =
- cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
- + offset));
- */
+ * cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
+ * + offset));
+ */
vlinfo++;
vfi++;
}
- a0_datacounters(dd, rsp, vl_select_mask);
+ a0_datacounters(ppd, rsp, vl_select_mask);
if (resp_len)
*resp_len += response_data_size;
@@ -2673,12 +2712,88 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
+static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
+ pmp->data;
+ struct _port_dctrs rsp;
+
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ memset(&rsp, 0, sizeof(rsp));
+ pma_get_opa_port_dctrs(ibdev, &rsp);
+
+ p->port_xmit_data = rsp.port_xmit_data;
+ p->port_rcv_data = rsp.port_rcv_data;
+ p->port_xmit_packets = rsp.port_xmit_pkts;
+ p->port_rcv_packets = rsp.port_rcv_pkts;
+ p->port_unicast_xmit_packets = 0;
+ p->port_unicast_rcv_packets = 0;
+ p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
+ p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
+
+bail:
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
+static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
+ struct _port_ectrs *rsp, u8 port)
+{
+ u64 tmp, tmp2;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+ tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
+ CNTR_INVALID_VL);
+ if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->link_error_recovery = cpu_to_be32(~0);
+ } else {
+ rsp->link_error_recovery = cpu_to_be32(tmp2);
+ }
+
+ rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
+ rsp->port_rcv_remote_physical_errors =
+ cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_switch_relay_errors = 0;
+ rsp->port_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
+ CNTR_INVALID_VL));
+ rsp->port_xmit_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_constraint_errors =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
+ CNTR_INVALID_VL));
+ tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
+ tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
+ if (tmp2 < tmp) {
+ /* overflow/wrapped */
+ rsp->local_link_integrity_errors = cpu_to_be64(~0);
+ } else {
+ rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
+ }
+ rsp->excessive_buffer_overruns =
+ cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+}
+
static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ectrs *rsp;
- unsigned long port_num;
+ u8 port_num;
struct opa_port_error_counters64_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u32 num_ports;
@@ -2688,7 +2803,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
struct hfi1_pportdata *ppd;
struct _vls_ectrs *vlinfo;
unsigned long vl;
- u64 port_mask, tmp, tmp2;
+ u64 port_mask, tmp;
u32 vl_select_mask;
int vfi;
@@ -2717,62 +2832,34 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask));
- if ((u8)port_num != port) {
+ if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
- rsp = (struct _port_ectrs *)&(req->port[0]);
+ rsp = &req->port[0];
ibp = to_iport(ibdev, port_num);
ppd = ppd_from_ibp(ibp);
memset(rsp, 0, sizeof(*rsp));
- rsp->port_number = (u8)port_num;
+ rsp->port_number = port_num;
+
+ pma_get_opa_port_ectrs(ibdev, rsp, port_num);
- rsp->port_rcv_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
- CNTR_INVALID_VL));
- /* port_rcv_switch_relay_errors is 0 for HFIs */
- rsp->port_xmit_discards =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
- CNTR_INVALID_VL));
rsp->port_rcv_remote_physical_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
- CNTR_INVALID_VL));
- tmp = read_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_TX_REPLAY, CNTR_INVALID_VL);
- if (tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->local_link_integrity_errors = cpu_to_be64(~0);
- } else {
- rsp->local_link_integrity_errors = cpu_to_be64(tmp2);
- }
- tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
- tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL);
- if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
- /* overflow/wrapped */
- rsp->link_error_recovery = cpu_to_be32(~0);
- } else {
- rsp->link_error_recovery = cpu_to_be32(tmp2);
- }
- rsp->port_xmit_constraint_errors =
- cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
- CNTR_INVALID_VL));
- rsp->excessive_buffer_overruns =
- cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
+ CNTR_INVALID_VL));
rsp->fm_config_errors =
cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
- CNTR_INVALID_VL));
- rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
- CNTR_INVALID_VL));
+ CNTR_INVALID_VL));
tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
+
rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
- vlinfo = (struct _vls_ectrs *)&(rsp->vls[0]);
+ vlinfo = &rsp->vls[0];
vfi = 0;
vl_select_mask = be32_to_cpu(req->vl_select_mask);
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
@@ -2789,8 +2876,94 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
return reply((struct ib_mad_hdr *)pmp);
}
+static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
+ pmp->data;
+ struct _port_ectrs rsp;
+ u64 temp_link_overrun_errors;
+ u64 temp_64;
+ u32 temp_32;
+
+ memset(&rsp, 0, sizeof(rsp));
+ pma_get_opa_port_ectrs(ibdev, &rsp, port);
+
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
+ goto bail;
+ }
+
+ p->symbol_error_counter = 0; /* N/A for OPA */
+
+ temp_32 = be32_to_cpu(rsp.link_error_recovery);
+ if (temp_32 > 0xFFUL)
+ p->link_error_recovery_counter = 0xFF;
+ else
+ p->link_error_recovery_counter = (u8)temp_32;
+
+ temp_32 = be32_to_cpu(rsp.link_downed);
+ if (temp_32 > 0xFFUL)
+ p->link_downed_counter = 0xFF;
+ else
+ p->link_downed_counter = (u8)temp_32;
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_errors);
+ if (temp_64 > 0xFFFFUL)
+ p->port_rcv_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
+ if (temp_64 > 0xFFFFUL)
+ p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
+ else
+ p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
+ p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_xmit_discards);
+ if (temp_64 > 0xFFFFUL)
+ p->port_xmit_discards = cpu_to_be16(0xFFFF);
+ else
+ p->port_xmit_discards = cpu_to_be16((u16)temp_64);
+
+ temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
+ if (temp_64 > 0xFFUL)
+ p->port_xmit_constraint_errors = 0xFF;
+ else
+ p->port_xmit_constraint_errors = (u8)temp_64;
+
+ temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
+ if (temp_64 > 0xFFUL)
+ p->port_rcv_constraint_errors = 0xFFUL;
+ else
+ p->port_rcv_constraint_errors = (u8)temp_64;
+
+ /* LocalLink: 7:4, BufferOverrun: 3:0 */
+ temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
+ if (temp_64 > 0xFUL)
+ temp_64 = 0xFUL;
+
+ temp_link_overrun_errors = temp_64 << 4;
+
+ temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
+ if (temp_64 > 0xFUL)
+ temp_64 = 0xFUL;
+ temp_link_overrun_errors |= temp_64;
+
+ p->link_overrun_errors = (u8)temp_link_overrun_errors;
+
+ p->vl15_dropped = 0; /* N/A for OPA */
+
+bail:
+ return reply((struct ib_mad_hdr *)pmp);
+}
+
static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
size_t response_data_size;
struct _port_ei *rsp;
@@ -2798,12 +2971,12 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- unsigned long port_num;
+ u8 port_num;
u8 num_pslm;
u64 reg;
req = (struct opa_port_error_info_msg *)pmp->data;
- rsp = (struct _port_ei *)&(req->port[0]);
+ rsp = &req->port[0];
num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
@@ -2831,7 +3004,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
port_num = find_first_bit((unsigned long *)&port_mask,
sizeof(port_mask));
- if ((u8)port_num != port) {
+ if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
@@ -2840,15 +3013,17 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
rsp->port_rcv_ei.status_and_code =
dd->err_info_rcvport.status_and_code;
memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
- &dd->err_info_rcvport.packet_flit1, sizeof(u64));
+ &dd->err_info_rcvport.packet_flit1, sizeof(u64));
memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
- &dd->err_info_rcvport.packet_flit2, sizeof(u64));
+ &dd->err_info_rcvport.packet_flit2, sizeof(u64));
/* ExcessiverBufferOverrunInfo */
reg = read_csr(dd, RCV_ERR_INFO);
if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
- /* if the RcvExcessBufferOverrun bit is set, save SC of
- * first pkt that encountered an excess buffer overrun */
+ /*
+ * if the RcvExcessBufferOverrun bit is set, save SC of
+ * first pkt that encountered an excess buffer overrun
+ */
u8 tmp = (u8)reg;
tmp &= RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
@@ -2885,7 +3060,8 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
}
static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
struct opa_clear_port_status *req =
(struct opa_clear_port_status *)pmp->data;
@@ -2944,8 +3120,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
/* Only applicable for switch */
- /*if (counter_select & CS_PORT_MARK_FECN)
- write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);*/
+ /* if (counter_select & CS_PORT_MARK_FECN)
+ * write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
+ */
if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
@@ -2968,7 +3145,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_LINK_ERROR_RECOVERY) {
write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
- CNTR_INVALID_VL, 0);
+ CNTR_INVALID_VL, 0);
}
if (counter_select & CS_PORT_RCV_ERRORS)
@@ -2990,7 +3167,6 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
8 * sizeof(vl_select_mask)) {
-
if (counter_select & CS_PORT_XMIT_DATA)
write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
@@ -3019,9 +3195,9 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
if (counter_select & CS_PORT_RCV_BUBBLE)
write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
- /*if (counter_select & CS_PORT_MARK_FECN)
- write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
- */
+ /* if (counter_select & CS_PORT_MARK_FECN)
+ * write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
+ */
/* port_vl_xmit_discards ??? */
}
@@ -3032,19 +3208,20 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
}
static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
- struct ib_device *ibdev, u8 port, u32 *resp_len)
+ struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
struct _port_ei *rsp;
struct opa_port_error_info_msg *req;
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
u64 port_mask;
u32 num_ports;
- unsigned long port_num;
+ u8 port_num;
u8 num_pslm;
u32 error_info_select;
req = (struct opa_port_error_info_msg *)pmp->data;
- rsp = (struct _port_ei *)&(req->port[0]);
+ rsp = &req->port[0];
num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
@@ -3064,7 +3241,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
port_num = find_first_bit((unsigned long *)&port_mask,
sizeof(port_mask));
- if ((u8)port_num != port) {
+ if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
@@ -3078,8 +3255,10 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
/* ExcessiverBufferOverrunInfo */
if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
- /* status bit is essentially kept in the h/w - bit 5 of
- * RCV_ERR_INFO */
+ /*
+ * status bit is essentially kept in the h/w - bit 5 of
+ * RCV_ERR_INFO
+ */
write_csr(dd, RCV_ERR_INFO,
RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
@@ -3131,13 +3310,12 @@ static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
}
static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
- u8 *data,
- struct ib_device *ibdev,
- u8 port, u32 *resp_len)
+ u8 *data, struct ib_device *ibdev,
+ u8 port, u32 *resp_len)
{
int i;
struct opa_congestion_setting_attr *p =
- (struct opa_congestion_setting_attr *) data;
+ (struct opa_congestion_setting_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_congestion_setting_entry_shadow *entries;
@@ -3147,7 +3325,7 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
cc_state = get_cc_state(ppd);
- if (cc_state == NULL) {
+ if (!cc_state) {
rcu_read_unlock();
return reply((struct ib_mad_hdr *)smp);
}
@@ -3176,7 +3354,7 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len)
{
struct opa_congestion_setting_attr *p =
- (struct opa_congestion_setting_attr *) data;
+ (struct opa_congestion_setting_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct opa_congestion_setting_entry_shadow *entries;
@@ -3238,7 +3416,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
continue;
memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
memcpy(cong_log->events[i].remote_qp_number_cn_entry,
- &cce->rqpn, 3);
+ &cce->rqpn, 3);
cong_log->events[i].sl_svc_type_cn_entry =
((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
cong_log->events[i].remote_lid_cn_entry =
@@ -3268,7 +3446,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len)
{
struct ib_cc_table_attr *cc_table_attr =
- (struct ib_cc_table_attr *) data;
+ (struct ib_cc_table_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 start_block = OPA_AM_START_BLK(am);
@@ -3289,7 +3467,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
cc_state = get_cc_state(ppd);
- if (cc_state == NULL) {
+ if (!cc_state) {
rcu_read_unlock();
return reply((struct ib_mad_hdr *)smp);
}
@@ -3309,7 +3487,7 @@ static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
rcu_read_unlock();
if (resp_len)
- *resp_len += sizeof(u16)*(IB_CCT_ENTRIES * n_blocks + 1);
+ *resp_len += sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
return reply((struct ib_mad_hdr *)smp);
}
@@ -3325,7 +3503,7 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
{
- struct ib_cc_table_attr *p = (struct ib_cc_table_attr *) data;
+ struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
struct hfi1_ibport *ibp = to_iport(ibdev, port);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 start_block = OPA_AM_START_BLK(am);
@@ -3355,14 +3533,14 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
}
new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
- if (new_cc_state == NULL)
+ if (!new_cc_state)
goto getit;
spin_lock(&ppd->cc_state_lock);
old_cc_state = get_cc_state(ppd);
- if (old_cc_state == NULL) {
+ if (!old_cc_state) {
spin_unlock(&ppd->cc_state_lock);
kfree(new_cc_state);
return reply((struct ib_mad_hdr *)smp);
@@ -3402,26 +3580,31 @@ struct opa_led_info {
};
#define OPA_LED_SHIFT 31
-#define OPA_LED_MASK (1 << OPA_LED_SHIFT)
+#define OPA_LED_MASK BIT(OPA_LED_SHIFT)
static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct opa_led_info *p = (struct opa_led_info *) data;
+ struct hfi1_pportdata *ppd = dd->pport;
+ struct opa_led_info *p = (struct opa_led_info *)data;
u32 nport = OPA_AM_NPORT(am);
- u64 reg;
+ u32 is_beaconing_active;
if (nport != 1) {
smp->status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)smp);
}
- reg = read_csr(dd, DCC_CFG_LED_CNTRL);
- if ((reg & DCC_CFG_LED_CNTRL_LED_CNTRL_SMASK) &&
- ((reg & DCC_CFG_LED_CNTRL_LED_SW_BLINK_RATE_SMASK) == 0xf))
- p->rsvd_led_mask = cpu_to_be32(OPA_LED_MASK);
+ /*
+ * This pairs with the memory barrier in hfi1_start_led_override to
+ * ensure that we read the correct state of LED beaconing represented
+ * by led_override_timer_active
+ */
+ smp_rmb();
+ is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
+ p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
if (resp_len)
*resp_len += sizeof(struct opa_led_info);
@@ -3434,7 +3617,7 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
u32 *resp_len)
{
struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct opa_led_info *p = (struct opa_led_info *) data;
+ struct opa_led_info *p = (struct opa_led_info *)data;
u32 nport = OPA_AM_NPORT(am);
int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
@@ -3443,7 +3626,10 @@ static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- setextled(dd, on);
+ if (on)
+ hfi1_start_led_override(dd->pport, 2000, 1500);
+ else
+ shutdown_led_override(dd->pport);
return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len);
}
@@ -3486,7 +3672,7 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
break;
case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len);
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
@@ -3525,9 +3711,9 @@ static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
resp_len);
break;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (ibp->port_cap_flags & IB_PORT_SM)
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
return IB_MAD_RESULT_SUCCESS;
/* FALLTHROUGH */
default:
@@ -3568,7 +3754,7 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
break;
case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
- resp_len);
+ resp_len);
break;
case OPA_ATTRIB_ID_PORT_STATE_INFO:
ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
@@ -3595,9 +3781,9 @@ static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
resp_len);
break;
case IB_SMP_ATTR_SM_INFO:
- if (ibp->port_cap_flags & IB_PORT_SM_DISABLED)
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- if (ibp->port_cap_flags & IB_PORT_SM)
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
return IB_MAD_RESULT_SUCCESS;
/* FALLTHROUGH */
default:
@@ -3647,14 +3833,13 @@ static int subn_get_opa_aggregate(struct opa_smp *smp,
/* zero the payload for this segment */
memset(next_smp + sizeof(*agg), 0, agg_data_len);
- (void) subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
+ (void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
ibdev, port, NULL);
if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg);
return reply((struct ib_mad_hdr *)smp);
}
next_smp += agg_size;
-
}
return reply((struct ib_mad_hdr *)smp);
@@ -3691,14 +3876,13 @@ static int subn_set_opa_aggregate(struct opa_smp *smp,
return reply((struct ib_mad_hdr *)smp);
}
- (void) subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
+ (void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
ibdev, port, NULL);
if (smp->status & ~IB_SMP_DIRECTION) {
set_aggr_error(agg);
return reply((struct ib_mad_hdr *)smp);
}
next_smp += agg_size;
-
}
return reply((struct ib_mad_hdr *)smp);
@@ -3816,7 +4000,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
if (smp->class_version != OPA_SMI_CLASS_VERSION) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_mad_hdr *)smp);
- goto bail;
+ return ret;
}
ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
smp->route.dr.dr_slid, smp->route.dr.return_path,
@@ -3836,13 +4020,13 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
smp->method == IB_MGMT_METHOD_SET) &&
port_num && port_num <= ibdev->phys_port_cnt &&
port != port_num)
- (void) check_mkey(to_iport(ibdev, port_num),
+ (void)check_mkey(to_iport(ibdev, port_num),
(struct ib_mad_hdr *)smp, 0,
smp->mkey, smp->route.dr.dr_slid,
smp->route.dr.return_path,
smp->hop_cnt);
ret = IB_MAD_RESULT_FAILURE;
- goto bail;
+ return ret;
}
*resp_len = opa_get_smp_header_size(smp);
@@ -3854,23 +4038,25 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
clear_opa_smp_data(smp);
ret = subn_get_opa_sma(attr_id, smp, am, data,
ibdev, port, resp_len);
- goto bail;
+ break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_get_opa_aggregate(smp, ibdev, port,
resp_len);
- goto bail;
+ break;
}
+ break;
case IB_MGMT_METHOD_SET:
switch (attr_id) {
default:
ret = subn_set_opa_sma(attr_id, smp, am, data,
ibdev, port, resp_len);
- goto bail;
+ break;
case OPA_ATTRIB_ID_AGGREGATE:
ret = subn_set_opa_aggregate(smp, ibdev, port,
resp_len);
- goto bail;
+ break;
}
+ break;
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_REPORT:
case IB_MGMT_METHOD_REPORT_RESP:
@@ -3881,13 +4067,13 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
+ break;
default:
smp->status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_mad_hdr *)smp);
+ break;
}
-bail:
return ret;
}
@@ -3903,7 +4089,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
if (smp->class_version != 1) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_mad_hdr *)smp);
- goto bail;
+ return ret;
}
ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
@@ -3924,13 +4110,13 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
smp->method == IB_MGMT_METHOD_SET) &&
port_num && port_num <= ibdev->phys_port_cnt &&
port != port_num)
- (void) check_mkey(to_iport(ibdev, port_num),
- (struct ib_mad_hdr *)smp, 0,
- smp->mkey,
- (__force __be32)smp->dr_slid,
- smp->return_path, smp->hop_cnt);
+ (void)check_mkey(to_iport(ibdev, port_num),
+ (struct ib_mad_hdr *)smp, 0,
+ smp->mkey,
+ (__force __be32)smp->dr_slid,
+ smp->return_path, smp->hop_cnt);
ret = IB_MAD_RESULT_FAILURE;
- goto bail;
+ return ret;
}
switch (smp->method) {
@@ -3938,15 +4124,77 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
switch (smp->attr_id) {
case IB_SMP_ATTR_NODE_INFO:
ret = subn_get_nodeinfo(smp, ibdev, port);
- goto bail;
+ break;
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_mad_hdr *)smp);
- goto bail;
+ break;
}
+ break;
+ }
+
+ return ret;
+}
+
+static int process_perf(struct ib_device *ibdev, u8 port,
+ const struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
+ struct ib_class_port_info *cpi = (struct ib_class_port_info *)
+ &pmp->data;
+ int ret = IB_MAD_RESULT_FAILURE;
+
+ *out_mad = *in_mad;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ return ret;
+ }
+
+ switch (pmp->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ switch (pmp->mad_hdr.attr_id) {
+ case IB_PMA_PORT_COUNTERS:
+ ret = pma_get_ib_portcounters(pmp, ibdev, port);
+ break;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
+ break;
+ case IB_PMA_CLASS_PORT_INFO:
+ cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
+ }
+ break;
+
+ case IB_MGMT_METHOD_SET:
+ if (pmp->mad_hdr.attr_id) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ }
+ break;
+
+ case IB_MGMT_METHOD_TRAP:
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ break;
+
+ default:
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_mad_hdr *)pmp);
+ break;
}
-bail:
return ret;
}
@@ -3971,44 +4219,46 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
- goto bail;
+ break;
case OPA_PM_ATTRIB_ID_PORT_STATUS:
ret = pma_get_opa_portstatus(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
ret = pma_get_opa_datacounters(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
ret = pma_get_opa_porterrors(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
case OPA_PM_ATTRIB_ID_ERROR_INFO:
ret = pma_get_opa_errorinfo(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_mad_hdr *)pmp);
- goto bail;
+ break;
}
+ break;
case IB_MGMT_METHOD_SET:
switch (pmp->mad_hdr.attr_id) {
case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
ret = pma_set_opa_portstatus(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
case OPA_PM_ATTRIB_ID_ERROR_INFO:
ret = pma_set_opa_errorinfo(pmp, ibdev, port,
- resp_len);
- goto bail;
+ resp_len);
+ break;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_mad_hdr *)pmp);
- goto bail;
+ break;
}
+ break;
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_GET_RESP:
@@ -4018,14 +4268,14 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
+ break;
default:
pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_mad_hdr *)pmp);
+ break;
}
-bail:
return ret;
}
@@ -4090,12 +4340,15 @@ static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
- goto bail;
+ break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ ret = process_perf(ibdev, port, in_mad, out_mad);
+ break;
default:
ret = IB_MAD_RESULT_SUCCESS;
+ break;
}
-bail:
return ret;
}
@@ -4147,66 +4400,3 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
return IB_MAD_RESULT_FAILURE;
}
-
-static void send_handler(struct ib_mad_agent *agent,
- struct ib_mad_send_wc *mad_send_wc)
-{
- ib_free_send_mad(mad_send_wc->send_buf);
-}
-
-int hfi1_create_agents(struct hfi1_ibdev *dev)
-{
- struct hfi1_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct hfi1_ibport *ibp;
- int p;
- int ret;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
- NULL, 0, send_handler,
- NULL, NULL, 0);
- if (IS_ERR(agent)) {
- ret = PTR_ERR(agent);
- goto err;
- }
-
- ibp->send_agent = agent;
- }
-
- return 0;
-
-err:
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- }
-
- return ret;
-}
-
-void hfi1_free_agents(struct hfi1_ibdev *dev)
-{
- struct hfi1_devdata *dd = dd_from_dev(dev);
- struct ib_mad_agent *agent;
- struct hfi1_ibport *ibp;
- int p;
-
- for (p = 0; p < dd->num_pports; p++) {
- ibp = &dd->pport[p].ibport_data;
- if (ibp->send_agent) {
- agent = ibp->send_agent;
- ibp->send_agent = NULL;
- ib_unregister_mad_agent(agent);
- }
- if (ibp->sm_ah) {
- ib_destroy_ah(&ibp->sm_ah->ibah);
- ibp->sm_ah = NULL;
- }
- }
-}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/staging/rdma/hfi1/mad.h
index f0317750e2fc..55ee08675333 100644
--- a/drivers/staging/rdma/hfi1/mad.h
+++ b/drivers/staging/rdma/hfi1/mad.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,8 +48,10 @@
#define _HFI1_MAD_H
#include <rdma/ib_pma.h>
-#define USE_PI_LED_ENABLE 1 /* use led enabled bit in struct
- * opa_port_states, if available */
+#define USE_PI_LED_ENABLE 1 /*
+ * use led enabled bit in struct
+ * opa_port_states, if available
+ */
#include <rdma/opa_smi.h>
#include <rdma/opa_port_info.h>
#ifndef PI_LED_ENABLE_SUP
@@ -235,7 +234,6 @@ struct ib_pma_portcounters_cong {
#define IB_CC_SVCTYPE_RD 0x2
#define IB_CC_SVCTYPE_UD 0x3
-
/*
* There should be an equivalent IB #define for the following, but
* I cannot find it.
@@ -267,7 +265,7 @@ struct opa_hfi1_cong_log {
u8 congestion_flags;
__be16 threshold_event_counter;
__be32 current_time_stamp;
- u8 threshold_cong_event_map[OPA_MAX_SLS/8];
+ u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
struct opa_hfi1_cong_log_event events[OPA_CONG_LOG_ELEMS];
} __packed;
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
new file mode 100644
index 000000000000..c7ad0164ea9a
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/list.h>
+#include <linux/mmu_notifier.h>
+#include <linux/interval_tree_generic.h>
+
+#include "mmu_rb.h"
+#include "trace.h"
+
+struct mmu_rb_handler {
+ struct list_head list;
+ struct mmu_notifier mn;
+ struct rb_root *root;
+ spinlock_t lock; /* protect the RB tree */
+ struct mmu_rb_ops *ops;
+};
+
+static LIST_HEAD(mmu_rb_handlers);
+static DEFINE_SPINLOCK(mmu_rb_lock); /* protect mmu_rb_handlers list */
+
+static unsigned long mmu_node_start(struct mmu_rb_node *);
+static unsigned long mmu_node_last(struct mmu_rb_node *);
+static struct mmu_rb_handler *find_mmu_handler(struct rb_root *);
+static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *,
+ unsigned long);
+static inline void mmu_notifier_range_start(struct mmu_notifier *,
+ struct mm_struct *,
+ unsigned long, unsigned long);
+static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+ unsigned long, unsigned long);
+static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
+ unsigned long, unsigned long);
+
+static struct mmu_notifier_ops mn_opts = {
+ .invalidate_page = mmu_notifier_page,
+ .invalidate_range_start = mmu_notifier_range_start,
+};
+
+INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
+ mmu_node_start, mmu_node_last, static, __mmu_int_rb);
+
+static unsigned long mmu_node_start(struct mmu_rb_node *node)
+{
+ return node->addr & PAGE_MASK;
+}
+
+static unsigned long mmu_node_last(struct mmu_rb_node *node)
+{
+ return PAGE_ALIGN((node->addr & PAGE_MASK) + node->len) - 1;
+}
+
+int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
+{
+ struct mmu_rb_handler *handlr;
+ unsigned long flags;
+
+ if (!ops->invalidate)
+ return -EINVAL;
+
+ handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
+ if (!handlr)
+ return -ENOMEM;
+
+ handlr->root = root;
+ handlr->ops = ops;
+ INIT_HLIST_NODE(&handlr->mn.hlist);
+ spin_lock_init(&handlr->lock);
+ handlr->mn.ops = &mn_opts;
+ spin_lock_irqsave(&mmu_rb_lock, flags);
+ list_add_tail(&handlr->list, &mmu_rb_handlers);
+ spin_unlock_irqrestore(&mmu_rb_lock, flags);
+
+ return mmu_notifier_register(&handlr->mn, current->mm);
+}
+
+void hfi1_mmu_rb_unregister(struct rb_root *root)
+{
+ struct mmu_rb_handler *handler = find_mmu_handler(root);
+ unsigned long flags;
+
+ if (!handler)
+ return;
+
+ spin_lock_irqsave(&mmu_rb_lock, flags);
+ list_del(&handler->list);
+ spin_unlock_irqrestore(&mmu_rb_lock, flags);
+
+ if (!RB_EMPTY_ROOT(root)) {
+ struct rb_node *node;
+ struct mmu_rb_node *rbnode;
+
+ while ((node = rb_first(root))) {
+ rbnode = rb_entry(node, struct mmu_rb_node, node);
+ rb_erase(node, root);
+ if (handler->ops->remove)
+ handler->ops->remove(root, rbnode, false);
+ }
+ }
+
+ if (current->mm)
+ mmu_notifier_unregister(&handler->mn, current->mm);
+ kfree(handler);
+}
+
+int hfi1_mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+{
+ struct mmu_rb_handler *handler = find_mmu_handler(root);
+ struct mmu_rb_node *node;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!handler)
+ return -EINVAL;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ hfi1_cdbg(MMU, "Inserting node addr 0x%llx, len %u", mnode->addr,
+ mnode->len);
+ node = __mmu_rb_search(handler, mnode->addr, mnode->len);
+ if (node) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ __mmu_int_rb_insert(mnode, root);
+
+ if (handler->ops->insert) {
+ ret = handler->ops->insert(root, mnode);
+ if (ret)
+ __mmu_int_rb_remove(mnode, root);
+ }
+unlock:
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return ret;
+}
+
+/* Caller must host handler lock */
+static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ unsigned long addr,
+ unsigned long len)
+{
+ struct mmu_rb_node *node = NULL;
+
+ hfi1_cdbg(MMU, "Searching for addr 0x%llx, len %u", addr, len);
+ if (!handler->ops->filter) {
+ node = __mmu_int_rb_iter_first(handler->root, addr,
+ (addr + len) - 1);
+ } else {
+ for (node = __mmu_int_rb_iter_first(handler->root, addr,
+ (addr + len) - 1);
+ node;
+ node = __mmu_int_rb_iter_next(node, addr,
+ (addr + len) - 1)) {
+ if (handler->ops->filter(node, addr, len))
+ return node;
+ }
+ }
+ return node;
+}
+
+static void __mmu_rb_remove(struct mmu_rb_handler *handler,
+ struct mmu_rb_node *node, bool arg)
+{
+ /* Validity of handler and node pointers has been checked by caller. */
+ hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
+ node->len);
+ __mmu_int_rb_remove(node, handler->root);
+ if (handler->ops->remove)
+ handler->ops->remove(handler->root, node, arg);
+}
+
+struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
+ unsigned long len)
+{
+ struct mmu_rb_handler *handler = find_mmu_handler(root);
+ struct mmu_rb_node *node;
+ unsigned long flags;
+
+ if (!handler)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&handler->lock, flags);
+ node = __mmu_rb_search(handler, addr, len);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return node;
+}
+
+void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
+{
+ struct mmu_rb_handler *handler = find_mmu_handler(root);
+ unsigned long flags;
+
+ if (!handler || !node)
+ return;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, false);
+ spin_unlock_irqrestore(&handler->lock, flags);
+}
+
+static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
+{
+ struct mmu_rb_handler *handler;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu_rb_lock, flags);
+ list_for_each_entry(handler, &mmu_rb_handlers, list) {
+ if (handler->root == root)
+ goto unlock;
+ }
+ handler = NULL;
+unlock:
+ spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ return handler;
+}
+
+static inline void mmu_notifier_page(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long addr)
+{
+ mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+}
+
+static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ mmu_notifier_mem_invalidate(mn, start, end);
+}
+
+static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+ unsigned long start, unsigned long end)
+{
+ struct mmu_rb_handler *handler =
+ container_of(mn, struct mmu_rb_handler, mn);
+ struct rb_root *root = handler->root;
+ struct mmu_rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
+ node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+ hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
+ node->addr, node->len);
+ if (handler->ops->invalidate(root, node))
+ __mmu_rb_remove(handler, node, true);
+ }
+ spin_unlock_irqrestore(&handler->lock, flags);
+}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
new file mode 100644
index 000000000000..f8523fdb8a18
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef _HFI1_MMU_RB_H
+#define _HFI1_MMU_RB_H
+
+#include "hfi.h"
+
+struct mmu_rb_node {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long __last;
+ struct rb_node node;
+};
+
+struct mmu_rb_ops {
+ bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
+ int (*insert)(struct rb_root *, struct mmu_rb_node *);
+ void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+ int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
+};
+
+int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops);
+void hfi1_mmu_rb_unregister(struct rb_root *);
+int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
+void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
+struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
+ unsigned long);
+
+#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c
deleted file mode 100644
index a3f8b884fdd6..000000000000
--- a/drivers/staging/rdma/hfi1/mr.c
+++ /dev/null
@@ -1,473 +0,0 @@
-/*
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_smi.h>
-
-#include "hfi.h"
-
-/* Fast memory region */
-struct hfi1_fmr {
- struct ib_fmr ibfmr;
- struct hfi1_mregion mr; /* must be last */
-};
-
-static inline struct hfi1_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
- return container_of(ibfmr, struct hfi1_fmr, ibfmr);
-}
-
-static int init_mregion(struct hfi1_mregion *mr, struct ib_pd *pd,
- int count)
-{
- int m, i = 0;
- int rval = 0;
-
- m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
- for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
- if (!mr->map[i])
- goto bail;
- }
- mr->mapsz = m;
- init_completion(&mr->comp);
- /* count returning the ptr to user */
- atomic_set(&mr->refcount, 1);
- mr->pd = pd;
- mr->max_segs = count;
-out:
- return rval;
-bail:
- while (i)
- kfree(mr->map[--i]);
- rval = -ENOMEM;
- goto out;
-}
-
-static void deinit_mregion(struct hfi1_mregion *mr)
-{
- int i = mr->mapsz;
-
- mr->mapsz = 0;
- while (i)
- kfree(mr->map[--i]);
-}
-
-
-/**
- * hfi1_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see dma.c).
- */
-struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc)
-{
- struct hfi1_mr *mr = NULL;
- struct ib_mr *ret;
- int rval;
-
- if (to_ipd(pd)->user) {
- ret = ERR_PTR(-EPERM);
- goto bail;
- }
-
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- rval = init_mregion(&mr->mr, pd, 0);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail;
- }
-
-
- rval = hfi1_alloc_lkey(&mr->mr, 1);
- if (rval) {
- ret = ERR_PTR(rval);
- goto bail_mregion;
- }
-
- mr->mr.access_flags = acc;
- ret = &mr->ibmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_mregion(&mr->mr);
-bail:
- kfree(mr);
- goto done;
-}
-
-static struct hfi1_mr *alloc_mr(int count, struct ib_pd *pd)
-{
- struct hfi1_mr *mr;
- int rval = -ENOMEM;
- int m;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (count + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
- mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
- if (!mr)
- goto bail;
-
- rval = init_mregion(&mr->mr, pd, count);
- if (rval)
- goto bail;
-
- rval = hfi1_alloc_lkey(&mr->mr, 0);
- if (rval)
- goto bail_mregion;
- mr->ibmr.lkey = mr->mr.lkey;
- mr->ibmr.rkey = mr->mr.lkey;
-done:
- return mr;
-
-bail_mregion:
- deinit_mregion(&mr->mr);
-bail:
- kfree(mr);
- mr = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * hfi1_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
-{
- struct hfi1_mr *mr;
- struct ib_umem *umem;
- struct scatterlist *sg;
- int n, m, entry;
- struct ib_mr *ret;
-
- if (length == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- umem = ib_umem_get(pd->uobject->context, start, length,
- mr_access_flags, 0);
- if (IS_ERR(umem))
- return (void *) umem;
-
- n = umem->nmap;
-
- mr = alloc_mr(n, pd);
- if (IS_ERR(mr)) {
- ret = (struct ib_mr *)mr;
- ib_umem_release(umem);
- goto bail;
- }
-
- mr->mr.user_base = start;
- mr->mr.iova = virt_addr;
- mr->mr.length = length;
- mr->mr.offset = ib_umem_offset(umem);
- mr->mr.access_flags = mr_access_flags;
- mr->umem = umem;
-
- if (is_power_of_2(umem->page_size))
- mr->mr.page_shift = ilog2(umem->page_size);
- m = 0;
- n = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- void *vaddr;
-
- vaddr = page_address(sg_page(sg));
- if (!vaddr) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- mr->mr.map[m]->segs[n].vaddr = vaddr;
- mr->mr.map[m]->segs[n].length = umem->page_size;
- n++;
- if (n == HFI1_SEGSZ) {
- m++;
- n = 0;
- }
- }
- ret = &mr->ibmr;
-
-bail:
- return ret;
-}
-
-/**
- * hfi1_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by hfi1_get_dma_mr()
- * or hfi1_reg_user_mr().
- */
-int hfi1_dereg_mr(struct ib_mr *ibmr)
-{
- struct hfi1_mr *mr = to_imr(ibmr);
- int ret = 0;
- unsigned long timeout;
-
- hfi1_free_lkey(&mr->mr);
-
- hfi1_put_mr(&mr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&mr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- dd_dev_err(
- dd_from_ibdev(mr->mr.pd->device),
- "hfi1_dereg_mr timeout mr %p pd %p refcount %u\n",
- mr, mr->mr.pd, atomic_read(&mr->mr.refcount));
- hfi1_get_mr(&mr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_mregion(&mr->mr);
- if (mr->umem)
- ib_umem_release(mr->umem);
- kfree(mr);
-out:
- return ret;
-}
-
-/*
- * Allocate a memory region usable with the
- * IB_WR_REG_MR send work request.
- *
- * Return the memory region on success, otherwise return an errno.
- * FIXME: IB_WR_REG_MR is not supported
- */
-struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
- struct hfi1_mr *mr;
-
- if (mr_type != IB_MR_TYPE_MEM_REG)
- return ERR_PTR(-EINVAL);
-
- mr = alloc_mr(max_num_sg, pd);
- if (IS_ERR(mr))
- return (struct ib_mr *)mr;
-
- return &mr->ibmr;
-}
-
-/**
- * hfi1_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr)
-{
- struct hfi1_fmr *fmr;
- int m;
- struct ib_fmr *ret;
- int rval = -ENOMEM;
-
- /* Allocate struct plus pointers to first level page tables. */
- m = (fmr_attr->max_pages + HFI1_SEGSZ - 1) / HFI1_SEGSZ;
- fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
- if (!fmr)
- goto bail;
-
- rval = init_mregion(&fmr->mr, pd, fmr_attr->max_pages);
- if (rval)
- goto bail;
-
- /*
- * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
- * rkey.
- */
- rval = hfi1_alloc_lkey(&fmr->mr, 0);
- if (rval)
- goto bail_mregion;
- fmr->ibfmr.rkey = fmr->mr.lkey;
- fmr->ibfmr.lkey = fmr->mr.lkey;
- /*
- * Resources are allocated but no valid mapping (RKEY can't be
- * used).
- */
- fmr->mr.access_flags = mr_access_flags;
- fmr->mr.max_segs = fmr_attr->max_pages;
- fmr->mr.page_shift = fmr_attr->page_shift;
-
- ret = &fmr->ibfmr;
-done:
- return ret;
-
-bail_mregion:
- deinit_mregion(&fmr->mr);
-bail:
- kfree(fmr);
- ret = ERR_PTR(rval);
- goto done;
-}
-
-/**
- * hfi1_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova)
-{
- struct hfi1_fmr *fmr = to_ifmr(ibfmr);
- struct hfi1_lkey_table *rkt;
- unsigned long flags;
- int m, n, i;
- u32 ps;
- int ret;
-
- i = atomic_read(&fmr->mr.refcount);
- if (i > 2)
- return -EBUSY;
-
- if (list_len > fmr->mr.max_segs) {
- ret = -EINVAL;
- goto bail;
- }
- rkt = &to_idev(ibfmr->device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = iova;
- fmr->mr.iova = iova;
- ps = 1 << fmr->mr.page_shift;
- fmr->mr.length = list_len * ps;
- m = 0;
- n = 0;
- for (i = 0; i < list_len; i++) {
- fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
- fmr->mr.map[m]->segs[n].length = ps;
- if (++n == HFI1_SEGSZ) {
- m++;
- n = 0;
- }
- }
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * hfi1_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int hfi1_unmap_fmr(struct list_head *fmr_list)
-{
- struct hfi1_fmr *fmr;
- struct hfi1_lkey_table *rkt;
- unsigned long flags;
-
- list_for_each_entry(fmr, fmr_list, ibfmr.list) {
- rkt = &to_idev(fmr->ibfmr.device)->lk_table;
- spin_lock_irqsave(&rkt->lock, flags);
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- spin_unlock_irqrestore(&rkt->lock, flags);
- }
- return 0;
-}
-
-/**
- * hfi1_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int hfi1_dealloc_fmr(struct ib_fmr *ibfmr)
-{
- struct hfi1_fmr *fmr = to_ifmr(ibfmr);
- int ret = 0;
- unsigned long timeout;
-
- hfi1_free_lkey(&fmr->mr);
- hfi1_put_mr(&fmr->mr); /* will set completion if last */
- timeout = wait_for_completion_timeout(&fmr->mr.comp,
- 5 * HZ);
- if (!timeout) {
- hfi1_get_mr(&fmr->mr);
- ret = -EBUSY;
- goto out;
- }
- deinit_mregion(&fmr->mr);
- kfree(fmr);
-out:
- return ret;
-}
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/staging/rdma/hfi1/opa_compat.h
index f64eec1c2951..6ef3c1cbdcd7 100644
--- a/drivers/staging/rdma/hfi1/opa_compat.h
+++ b/drivers/staging/rdma/hfi1/opa_compat.h
@@ -1,14 +1,13 @@
#ifndef _LINUX_H
#define _LINUX_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -111,19 +108,4 @@ enum opa_port_phys_state {
/* values 12-15 are reserved/ignored */
};
-/* OPA_PORT_TYPE_* definitions - these belong in opa_port_info.h */
-#define OPA_PORT_TYPE_UNKNOWN 0
-#define OPA_PORT_TYPE_DISCONNECTED 1
-/* port is not currently usable, CableInfo not available */
-#define OPA_PORT_TYPE_FIXED 2
-/* A fixed backplane port in a director class switch. All OPA ASICS */
-#define OPA_PORT_TYPE_VARIABLE 3
-/* A backplane port in a blade system, possibly mixed configuration */
-#define OPA_PORT_TYPE_STANDARD 4
-/* implies a SFF-8636 defined format for CableInfo (QSFP) */
-#define OPA_PORT_TYPE_SI_PHOTONICS 5
-/* A silicon photonics module implies TBD defined format for CableInfo
- * as defined by Intel SFO group */
-/* 6 - 15 are reserved */
-
#endif /* _LINUX_H */
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/staging/rdma/hfi1/pcie.c
index 8317b07d722a..0bac21e6a658 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/staging/rdma/hfi1/pcie.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -57,6 +54,7 @@
#include "hfi.h"
#include "chip_registers.h"
+#include "aspm.h"
/* link speed vector for Gen3 speed - not in Linux headers */
#define GEN1_SPEED_VECTOR 0x1
@@ -122,8 +120,9 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else
+ } else {
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ }
if (ret) {
hfi1_early_err(&pdev->dev,
"Unable to set DMA consistent mask: %d\n", ret);
@@ -131,13 +130,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
}
pci_set_master(pdev);
- ret = pci_enable_pcie_error_reporting(pdev);
- if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to enable pcie error reporting: %d\n",
- ret);
- ret = 0;
- }
+ (void)pci_enable_pcie_error_reporting(pdev);
goto done;
bail:
@@ -222,10 +215,9 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &dd->pcie_lnkctl);
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL2,
- &dd->pcie_devctl2);
+ &dd->pcie_devctl2);
pci_read_config_dword(dd->pcidev, PCI_CFG_MSIX0, &dd->pci_msix0);
- pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- &dd->pci_lnkctl3);
+ pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, &dd->pci_lnkctl3);
pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
return 0;
@@ -238,7 +230,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev,
*/
void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
{
- u64 __iomem *base = (void __iomem *) dd->kregbase;
+ u64 __iomem *base = (void __iomem *)dd->kregbase;
dd->flags &= ~HFI1_PRESENT;
dd->kregbase = NULL;
@@ -247,8 +239,6 @@ void hfi1_pcie_ddcleanup(struct hfi1_devdata *dd)
iounmap(dd->rcvarray_wc);
if (dd->piobase)
iounmap(dd->piobase);
-
- pci_set_drvdata(dd->pcidev, NULL);
}
/*
@@ -276,7 +266,7 @@ void hfi1_pcie_flr(struct hfi1_devdata *dd)
clear:
pcie_capability_set_word(dd->pcidev, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ PCI_EXP_DEVCTL_BCR_FLR);
/* PCIe spec requires the function to be back within 100ms */
msleep(100);
}
@@ -289,9 +279,11 @@ static void msix_setup(struct hfi1_devdata *dd, int pos, u32 *msixcnt,
struct msix_entry *msix_entry;
int i;
- /* We can't pass hfi1_msix_entry array to msix_setup
+ /*
+ * We can't pass hfi1_msix_entry array to msix_setup
* so use a dummy msix_entry array and copy the allocated
- * irq back to the hfi1_msix_entry array. */
+ * irq back to the hfi1_msix_entry array.
+ */
msix_entry = kmalloc_array(nvec, sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) {
ret = -ENOMEM;
@@ -321,7 +313,6 @@ do_intx:
nvec, ret);
*msixcnt = 0;
hfi1_enable_intx(dd->pcidev);
-
}
/* return the PCIe link speed from the given link status */
@@ -369,6 +360,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
int pcie_speeds(struct hfi1_devdata *dd)
{
u32 linkcap;
+ struct pci_dev *parent = dd->pcidev->bus->self;
if (!pci_is_pcie(dd->pcidev)) {
dd_dev_err(dd, "Can't find PCI Express capability!\n");
@@ -381,15 +373,15 @@ int pcie_speeds(struct hfi1_devdata *dd)
pcie_capability_read_dword(dd->pcidev, PCI_EXP_LNKCAP, &linkcap);
if ((linkcap & PCI_EXP_LNKCAP_SLS) != GEN3_SPEED_VECTOR) {
dd_dev_info(dd,
- "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
- linkcap & PCI_EXP_LNKCAP_SLS);
+ "This HFI is not Gen3 capable, max speed 0x%x, need 0x3\n",
+ linkcap & PCI_EXP_LNKCAP_SLS);
dd->link_gen3_capable = 0;
}
/*
* bus->max_bus_speed is set from the bridge's linkcap Max Link Speed
*/
- if (dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
+ if (parent && dd->pcidev->bus->max_bus_speed != PCIE_SPEED_8_0GT) {
dd_dev_info(dd, "Parent PCIe bridge does not support Gen3\n");
dd->link_gen3_capable = 0;
}
@@ -397,9 +389,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
/* obtain the link width and current speed */
update_lbus_info(dd);
- /* check against expected pcie width and complain if "wrong" */
- if (dd->lbus_width < 16)
- dd_dev_err(dd, "PCIe width %u (x16 HFI)\n", dd->lbus_width);
+ dd_dev_info(dd, "%s\n", dd->lbus_info);
return 0;
}
@@ -438,23 +428,18 @@ void hfi1_enable_intx(struct pci_dev *pdev)
void restore_pci_variables(struct hfi1_devdata *dd)
{
pci_write_config_word(dd->pcidev, PCI_COMMAND, dd->pci_command);
- pci_write_config_dword(dd->pcidev,
- PCI_BASE_ADDRESS_0, dd->pcibar0);
- pci_write_config_dword(dd->pcidev,
- PCI_BASE_ADDRESS_1, dd->pcibar1);
- pci_write_config_dword(dd->pcidev,
- PCI_ROM_ADDRESS, dd->pci_rom);
+ pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0);
+ pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, dd->pcibar1);
+ pci_write_config_dword(dd->pcidev, PCI_ROM_ADDRESS, dd->pci_rom);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL, dd->pcie_devctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, dd->pcie_lnkctl);
pcie_capability_write_word(dd->pcidev, PCI_EXP_DEVCTL2,
- dd->pcie_devctl2);
+ dd->pcie_devctl2);
pci_write_config_dword(dd->pcidev, PCI_CFG_MSIX0, dd->pci_msix0);
- pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
- dd->pci_lnkctl3);
+ pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, dd->pci_lnkctl3);
pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
}
-
/*
* BIOS may not set PCIe bus-utilization parameters for best performance.
* Check and optionally adjust them to maximize our throughput.
@@ -463,6 +448,10 @@ static int hfi1_pcie_caps;
module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
+uint aspm_mode = ASPM_MODE_DISABLED;
+module_param_named(aspm, aspm_mode, uint, S_IRUGO);
+MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
+
static void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
@@ -481,6 +470,12 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
}
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
+ /*
+ * The driver cannot perform the tuning if it does not have
+ * access to the upstream component.
+ */
+ if (!parent)
+ return;
if (!pci_is_root_bus(parent->bus)) {
dd_dev_info(dd, "Parent not root\n");
return;
@@ -534,6 +529,7 @@ static void tune_pcie_caps(struct hfi1_devdata *dd)
pcie_set_readrq(dd->pcidev, ep_mrrs);
}
}
+
/* End of PCIe capability tuning */
/*
@@ -748,21 +744,22 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
c0 = fs - (eq[i][PREC] / div) - (eq[i][POST] / div);
c_plus1 = eq[i][POST] / div;
pci_write_config_dword(pdev, PCIE_CFG_REG_PL102,
- eq_value(c_minus1, c0, c_plus1));
+ eq_value(c_minus1, c0, c_plus1));
/* check if these coefficients violate EQ rules */
pci_read_config_dword(dd->pcidev, PCIE_CFG_REG_PL105,
- &violation);
+ &violation);
if (violation
& PCIE_CFG_REG_PL105_GEN3_EQ_VIOLATE_COEF_RULES_SMASK){
if (hit_error == 0) {
dd_dev_err(dd,
- "Gen3 EQ Table Coefficient rule violations\n");
+ "Gen3 EQ Table Coefficient rule violations\n");
dd_dev_err(dd, " prec attn post\n");
}
dd_dev_err(dd, " p%02d: %02x %02x %02x\n",
- i, (u32)eq[i][0], (u32)eq[i][1], (u32)eq[i][2]);
+ i, (u32)eq[i][0], (u32)eq[i][1],
+ (u32)eq[i][2]);
dd_dev_err(dd, " %02x %02x %02x\n",
- (u32)c_minus1, (u32)c0, (u32)c_plus1);
+ (u32)c_minus1, (u32)c0, (u32)c_plus1);
hit_error = 1;
}
}
@@ -774,7 +771,7 @@ static int load_eq_table(struct hfi1_devdata *dd, const u8 eq[11][3], u8 fs,
/*
* Steps to be done after the PCIe firmware is downloaded and
* before the SBR for the Pcie Gen3.
- * The hardware mutex is already being held.
+ * The SBus resource is already being held.
*/
static void pcie_post_steps(struct hfi1_devdata *dd)
{
@@ -817,8 +814,8 @@ static int trigger_sbr(struct hfi1_devdata *dd)
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
if (pdev != dev) {
dd_dev_err(dd,
- "%s: another device is on the same bus\n",
- __func__);
+ "%s: another device is on the same bus\n",
+ __func__);
return -ENOTTY;
}
@@ -842,8 +839,8 @@ static void write_gasket_interrupt(struct hfi1_devdata *dd, int index,
u16 code, u16 data)
{
write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (index * 8),
- (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT)
- |((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
+ (((u64)code << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_CODE_SHIFT) |
+ ((u64)data << ASIC_PCIE_SD_INTRPT_LIST_INTRPT_DATA_SHIFT)));
}
/*
@@ -853,25 +850,101 @@ static void arm_gasket_logic(struct hfi1_devdata *dd)
{
u64 reg;
- reg = (((u64)1 << dd->hfi1_id)
- << ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT)
- | ((u64)pcie_serdes_broadcast[dd->hfi1_id]
- << ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT
- | ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK
- | ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK)
- << ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT
- );
+ reg = (((u64)1 << dd->hfi1_id) <<
+ ASIC_PCIE_SD_HOST_CMD_INTRPT_CMD_SHIFT) |
+ ((u64)pcie_serdes_broadcast[dd->hfi1_id] <<
+ ASIC_PCIE_SD_HOST_CMD_SBUS_RCVR_ADDR_SHIFT |
+ ASIC_PCIE_SD_HOST_CMD_SBR_MODE_SMASK |
+ ((u64)SBR_DELAY_US & ASIC_PCIE_SD_HOST_CMD_TIMER_MASK) <<
+ ASIC_PCIE_SD_HOST_CMD_TIMER_SHIFT);
write_csr(dd, ASIC_PCIE_SD_HOST_CMD, reg);
/* read back to push the write */
read_csr(dd, ASIC_PCIE_SD_HOST_CMD);
}
/*
+ * CCE_PCIE_CTRL long name helpers
+ * We redefine these shorter macros to use in the code while leaving
+ * chip_registers.h to be autogenerated from the hardware spec.
+ */
+#define LANE_BUNDLE_MASK CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_MASK
+#define LANE_BUNDLE_SHIFT CCE_PCIE_CTRL_PCIE_LANE_BUNDLE_SHIFT
+#define LANE_DELAY_MASK CCE_PCIE_CTRL_PCIE_LANE_DELAY_MASK
+#define LANE_DELAY_SHIFT CCE_PCIE_CTRL_PCIE_LANE_DELAY_SHIFT
+#define MARGIN_OVERWRITE_ENABLE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_SHIFT
+#define MARGIN_G1_G2_OVERWRITE_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_MASK
+#define MARGIN_G1_G2_OVERWRITE_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_OVERWRITE_ENABLE_SHIFT
+#define MARGIN_GEN1_GEN2_MASK CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_MASK
+#define MARGIN_GEN1_GEN2_SHIFT CCE_PCIE_CTRL_XMT_MARGIN_GEN1_GEN2_SHIFT
+
+ /*
+ * Write xmt_margin for full-swing (WFR-B) or half-swing (WFR-C).
+ */
+static void write_xmt_margin(struct hfi1_devdata *dd, const char *fname)
+{
+ u64 pcie_ctrl;
+ u64 xmt_margin;
+ u64 xmt_margin_oe;
+ u64 lane_delay;
+ u64 lane_bundle;
+
+ pcie_ctrl = read_csr(dd, CCE_PCIE_CTRL);
+
+ /*
+ * For Discrete, use full-swing.
+ * - PCIe TX defaults to full-swing.
+ * Leave this register as default.
+ * For Integrated, use half-swing
+ * - Copy xmt_margin and xmt_margin_oe
+ * from Gen1/Gen2 to Gen3.
+ */
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL1) { /* integrated */
+ /* extract initial fields */
+ xmt_margin = (pcie_ctrl >> MARGIN_GEN1_GEN2_SHIFT)
+ & MARGIN_GEN1_GEN2_MASK;
+ xmt_margin_oe = (pcie_ctrl >> MARGIN_G1_G2_OVERWRITE_SHIFT)
+ & MARGIN_G1_G2_OVERWRITE_MASK;
+ lane_delay = (pcie_ctrl >> LANE_DELAY_SHIFT) & LANE_DELAY_MASK;
+ lane_bundle = (pcie_ctrl >> LANE_BUNDLE_SHIFT)
+ & LANE_BUNDLE_MASK;
+
+ /*
+ * For A0, EFUSE values are not set. Override with the
+ * correct values.
+ */
+ if (is_ax(dd)) {
+ /*
+ * xmt_margin and OverwiteEnabel should be the
+ * same for Gen1/Gen2 and Gen3
+ */
+ xmt_margin = 0x5;
+ xmt_margin_oe = 0x1;
+ lane_delay = 0xF; /* Delay 240ns. */
+ lane_bundle = 0x0; /* Set to 1 lane. */
+ }
+
+ /* overwrite existing values */
+ pcie_ctrl = (xmt_margin << MARGIN_GEN1_GEN2_SHIFT)
+ | (xmt_margin_oe << MARGIN_G1_G2_OVERWRITE_SHIFT)
+ | (xmt_margin << MARGIN_SHIFT)
+ | (xmt_margin_oe << MARGIN_OVERWRITE_ENABLE_SHIFT)
+ | (lane_delay << LANE_DELAY_SHIFT)
+ | (lane_bundle << LANE_BUNDLE_SHIFT);
+
+ write_csr(dd, CCE_PCIE_CTRL, pcie_ctrl);
+ }
+
+ dd_dev_dbg(dd, "%s: program XMT margin, CcePcieCtrl 0x%llx\n",
+ fname, pcie_ctrl);
+}
+
+/*
* Do all the steps needed to transition the PCIe link to Gen3 speed.
*/
int do_pcie_gen3_transition(struct hfi1_devdata *dd)
{
- struct pci_dev *parent;
+ struct pci_dev *parent = dd->pcidev->bus->self;
u64 fw_ctrl;
u64 reg, therm;
u32 reg32, fs, lf;
@@ -880,8 +953,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
int do_retry, retry_count = 0;
uint default_pset;
u16 target_vector, target_speed;
- u16 lnkctl, lnkctl2, vendor;
- u8 nsbr = 1;
+ u16 lnkctl2, vendor;
u8 div;
const u8 (*eq)[3];
int return_error = 0;
@@ -908,17 +980,21 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
/* if already at target speed, done (unless forced) */
if (dd->lbus_speed == target_speed) {
dd_dev_info(dd, "%s: PCIe already at gen%d, %s\n", __func__,
- pcie_target,
- pcie_force ? "re-doing anyway" : "skipping");
+ pcie_target,
+ pcie_force ? "re-doing anyway" : "skipping");
if (!pcie_force)
return 0;
}
/*
- * A0 needs an additional SBR
+ * The driver cannot do the transition if it has no access to the
+ * upstream component
*/
- if (is_ax(dd))
- nsbr++;
+ if (!parent) {
+ dd_dev_info(dd, "%s: No upstream, Can't do gen3 transition\n",
+ __func__);
+ return 0;
+ }
/*
* Do the Gen3 transition. Steps are those of the PCIe Gen3
@@ -934,10 +1010,13 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
goto done_no_mutex;
}
- /* hold the HW mutex across the firmware download and SBR */
- ret = acquire_hw_mutex(dd);
- if (ret)
+ /* hold the SBus resource across the firmware download and SBR */
+ ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd, "%s: unable to acquire SBus resource\n",
+ __func__);
return ret;
+ }
/* make sure thermal polling is not causing interrupts */
therm = read_csr(dd, ASIC_CFG_THERM_POLL_EN);
@@ -955,8 +1034,11 @@ retry:
/* step 4: download PCIe Gen3 SerDes firmware */
dd_dev_info(dd, "%s: downloading firmware\n", __func__);
ret = load_pcie_firmware(dd);
- if (ret)
+ if (ret) {
+ /* do not proceed if the firmware cannot be downloaded */
+ return_error = 1;
goto done;
+ }
/* step 5: set up device parameter settings */
dd_dev_info(dd, "%s: setting PCIe registers\n", __func__);
@@ -986,7 +1068,7 @@ retry:
* PcieCfgRegPl100 - Gen3 Control
*
* turn off PcieCfgRegPl100.Gen3ZRxDcNonCompl
- * turn on PcieCfgRegPl100.EqEieosCnt (erratum)
+ * turn on PcieCfgRegPl100.EqEieosCnt
* Everything else zero.
*/
reg32 = PCIE_CFG_REG_PL100_EQ_EIEOS_CNT_SMASK;
@@ -1016,8 +1098,10 @@ retry:
default_pset = DEFAULT_MCP_PSET;
}
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL101,
- (fs << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT)
- | (lf << PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
+ (fs <<
+ PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_FS_SHIFT) |
+ (lf <<
+ PCIE_CFG_REG_PL101_GEN3_EQ_LOCAL_LF_SHIFT));
ret = load_eq_table(dd, eq, fs, div);
if (ret)
goto done;
@@ -1031,15 +1115,15 @@ retry:
pcie_pset = default_pset;
if (pcie_pset > 10) { /* valid range is 0-10, inclusive */
dd_dev_err(dd, "%s: Invalid Eq Pset %u, setting to %d\n",
- __func__, pcie_pset, default_pset);
+ __func__, pcie_pset, default_pset);
pcie_pset = default_pset;
}
dd_dev_info(dd, "%s: using EQ Pset %u\n", __func__, pcie_pset);
pci_write_config_dword(dd->pcidev, PCIE_CFG_REG_PL106,
- ((1 << pcie_pset)
- << PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT)
- | PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK
- | PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
+ ((1 << pcie_pset) <<
+ PCIE_CFG_REG_PL106_GEN3_EQ_PSET_REQ_VEC_SHIFT) |
+ PCIE_CFG_REG_PL106_GEN3_EQ_EVAL2MS_DISABLE_SMASK |
+ PCIE_CFG_REG_PL106_GEN3_EQ_PHASE23_EXIT_MODE_SMASK);
/*
* step 5b: Do post firmware download steps via SBus
@@ -1064,17 +1148,15 @@ retry:
/*
* step 5d: program XMT margin
- * Right now, leave the default alone. To change, do a
- * read-modify-write of:
- * CcePcieCtrl.XmtMargin
- * CcePcieCtrl.XmitMarginOverwriteEnable
*/
+ write_xmt_margin(dd, __func__);
- /* step 5e: disable active state power management (ASPM) */
+ /*
+ * step 5e: disable active state power management (ASPM). It
+ * will be enabled if required later
+ */
dd_dev_info(dd, "%s: clearing ASPM\n", __func__);
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL, &lnkctl);
- lnkctl &= ~PCI_EXP_LNKCTL_ASPMC;
- pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL, lnkctl);
+ aspm_hw_disable_l1(dd);
/*
* step 5f: clear DirectSpeedChange
@@ -1093,16 +1175,15 @@ retry:
* that it is Gen3 capable earlier.
*/
dd_dev_info(dd, "%s: setting parent target link speed\n", __func__);
- parent = dd->pcidev->bus->self;
pcie_capability_read_word(parent, PCI_EXP_LNKCTL2, &lnkctl2);
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
+ (u32)lnkctl2);
/* only write to parent if target is not as high as ours */
if ((lnkctl2 & LNKCTL2_TARGET_LINK_SPEED_MASK) < target_vector) {
lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
+ (u32)lnkctl2);
pcie_capability_write_word(parent, PCI_EXP_LNKCTL2, lnkctl2);
} else {
dd_dev_info(dd, "%s: ..target speed is OK\n", __func__);
@@ -1111,17 +1192,17 @@ retry:
dd_dev_info(dd, "%s: setting target link speed\n", __func__);
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKCTL2, &lnkctl2);
dd_dev_info(dd, "%s: ..old link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
+ (u32)lnkctl2);
lnkctl2 &= ~LNKCTL2_TARGET_LINK_SPEED_MASK;
lnkctl2 |= target_vector;
dd_dev_info(dd, "%s: ..new link control2: 0x%x\n", __func__,
- (u32)lnkctl2);
+ (u32)lnkctl2);
pcie_capability_write_word(dd->pcidev, PCI_EXP_LNKCTL2, lnkctl2);
/* step 5h: arm gasket logic */
/* hold DC in reset across the SBR */
write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
- (void) read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
+ (void)read_csr(dd, CCE_DC_CTRL); /* DC reset hold */
/* save firmware control across the SBR */
fw_ctrl = read_csr(dd, MISC_CFG_FW_CTRL);
@@ -1152,8 +1233,8 @@ retry:
ret = pci_read_config_word(dd->pcidev, PCI_VENDOR_ID, &vendor);
if (ret) {
dd_dev_info(dd,
- "%s: read of VendorID failed after SBR, err %d\n",
- __func__, ret);
+ "%s: read of VendorID failed after SBR, err %d\n",
+ __func__, ret);
return_error = 1;
goto done;
}
@@ -1193,8 +1274,7 @@ retry:
write_csr(dd, CCE_DC_CTRL, 0);
/* Set the LED off */
- if (is_ax(dd))
- setextled(dd, 0);
+ setextled(dd, 0);
/* check for any per-lane errors */
pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE2, &reg32);
@@ -1205,8 +1285,8 @@ retry:
& ASIC_PCIE_SD_HOST_STATUS_FW_DNLD_STS_MASK;
if ((status & (1 << dd->hfi1_id)) == 0) {
dd_dev_err(dd,
- "%s: gasket status 0x%x, expecting 0x%x\n",
- __func__, status, 1 << dd->hfi1_id);
+ "%s: gasket status 0x%x, expecting 0x%x\n",
+ __func__, status, 1 << dd->hfi1_id);
ret = -EIO;
goto done;
}
@@ -1223,13 +1303,13 @@ retry:
/* update our link information cache */
update_lbus_info(dd);
dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
- dd->lbus_info);
+ dd->lbus_info);
if (dd->lbus_speed != target_speed) { /* not target */
/* maybe retry */
do_retry = retry_count < pcie_retry;
dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
- pcie_target, do_retry ? ", retrying" : "");
+ pcie_target, do_retry ? ", retrying" : "");
retry_count++;
if (do_retry) {
msleep(100); /* allow time to settle */
@@ -1245,7 +1325,7 @@ done:
dd_dev_info(dd, "%s: Re-enable therm polling\n",
__func__);
}
- release_hw_mutex(dd);
+ release_chip_resource(dd, CR_SBUS);
done_no_mutex:
/* return no error if it is OK to be at current speed */
if (ret && !return_error) {
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/staging/rdma/hfi1/pio.c
index b51a4416312b..c6849ce9e5eb 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/staging/rdma/hfi1/pio.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -101,7 +98,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
/* Fall through */
case PSC_DATA_VL_ENABLE:
/* Disallow sending on VLs not enabled */
- mask = (((~0ull)<<num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK)<<
+ mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
@@ -130,7 +127,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
if (write) {
write_csr(dd, SEND_CTRL, reg);
if (flush)
- (void) read_csr(dd, SEND_CTRL); /* flush write */
+ (void)read_csr(dd, SEND_CTRL); /* flush write */
}
spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
@@ -177,8 +174,10 @@ static struct mem_pool_config sc_mem_pool_config[NUM_SC_POOLS] = {
/* memory pool information, used when calculating final sizes */
struct mem_pool_info {
- int centipercent; /* 100th of 1% of memory to use, -1 if blocks
- already set */
+ int centipercent; /*
+ * 100th of 1% of memory to use, -1 if blocks
+ * already set
+ */
int count; /* count of contexts in the pool */
int blocks; /* block size of the pool */
int size; /* context size, in blocks */
@@ -312,7 +311,7 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
if (i == SC_ACK) {
count = dd->n_krcv_queues;
} else if (i == SC_KERNEL) {
- count = num_vls + 1 /* VL15 */;
+ count = (INIT_SC_PER_VL * num_vls) + 1 /* VL15 */;
} else if (count == SCC_PER_CPU) {
count = dd->num_rcv_contexts - dd->n_krcv_queues;
} else if (count < 0) {
@@ -509,7 +508,7 @@ static void sc_hw_free(struct hfi1_devdata *dd, u32 sw_index, u32 hw_context)
sci = &dd->send_contexts[sw_index];
if (!sci->allocated) {
dd_dev_err(dd, "%s: sw_index %u not allocated? hw_context %u\n",
- __func__, sw_index, hw_context);
+ __func__, sw_index, hw_context);
}
sci->allocated = 0;
dd->hw_to_sw[hw_context] = INVALID_SCI;
@@ -625,7 +624,7 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
& SC(CREDIT_CTRL_THRESHOLD_MASK))
<< SC(CREDIT_CTRL_THRESHOLD_SHIFT));
write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
+ SC(CREDIT_CTRL), sc->credit_ctrl);
/* force a credit return on change to avoid a possible stall */
force_return = 1;
@@ -700,7 +699,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
if (dd->flags & HFI1_FROZEN)
return NULL;
- sc = kzalloc_node(sizeof(struct send_context), GFP_KERNEL, numa);
+ sc = kzalloc_node(sizeof(*sc), GFP_KERNEL, numa);
if (!sc)
return NULL;
@@ -763,9 +762,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
/* set the default partition key */
write_kctxt_csr(dd, hw_context, SC(CHECK_PARTITION_KEY),
- (DEFAULT_PKEY &
- SC(CHECK_PARTITION_KEY_VALUE_MASK))
- << SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
+ (SC(CHECK_PARTITION_KEY_VALUE_MASK) &
+ DEFAULT_PKEY) <<
+ SC(CHECK_PARTITION_KEY_VALUE_SHIFT));
/* per context type checks */
if (type == SC_USER) {
@@ -778,8 +777,8 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
/* set the send context check opcode mask and value */
write_kctxt_csr(dd, hw_context, SC(CHECK_OPCODE),
- ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
- ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
+ ((u64)opmask << SC(CHECK_OPCODE_MASK_SHIFT)) |
+ ((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
/* set up credit return */
reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
@@ -797,7 +796,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
thresh = sc_percent_to_threshold(sc, 50);
} else if (type == SC_USER) {
thresh = sc_percent_to_threshold(sc,
- user_credit_return_threshold);
+ user_credit_return_threshold);
} else { /* kernel */
thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
}
@@ -852,7 +851,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
sc->credit_ctrl,
thresh);
-
return sc;
}
@@ -971,11 +969,11 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
if (loop > 500) {
/* timed out - bounce the link */
dd_dev_err(dd,
- "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
- __func__, sc->sw_index,
- sc->hw_context, (u32)reg);
+ "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
+ __func__, sc->sw_index,
+ sc->hw_context, (u32)reg);
queue_work(dd->pport->hfi1_wq,
- &dd->pport->link_bounce_work);
+ &dd->pport->link_bounce_work);
break;
}
loop++;
@@ -1021,7 +1019,7 @@ int sc_restart(struct send_context *sc)
return -EINVAL;
dd_dev_info(dd, "restarting send context %u(%u)\n", sc->sw_index,
- sc->hw_context);
+ sc->hw_context);
/*
* Step 1: Wait for the context to actually halt.
@@ -1036,7 +1034,7 @@ int sc_restart(struct send_context *sc)
break;
if (loop > 100) {
dd_dev_err(dd, "%s: context %u(%u) not halting, skipping\n",
- __func__, sc->sw_index, sc->hw_context);
+ __func__, sc->sw_index, sc->hw_context);
return -ETIME;
}
loop++;
@@ -1062,9 +1060,9 @@ int sc_restart(struct send_context *sc)
break;
if (loop > 100) {
dd_dev_err(dd,
- "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
- __func__, sc->sw_index,
- sc->hw_context, count);
+ "%s: context %u(%u) timeout waiting for PIO buffers to zero, remaining %d\n",
+ __func__, sc->sw_index,
+ sc->hw_context, count);
}
loop++;
udelay(1);
@@ -1177,18 +1175,18 @@ void pio_reset_all(struct hfi1_devdata *dd)
if (ret == -EIO) {
/* clear the error */
write_csr(dd, SEND_PIO_ERR_CLEAR,
- SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
+ SEND_PIO_ERR_CLEAR_PIO_INIT_SM_IN_ERR_SMASK);
}
/* reset init all */
write_csr(dd, SEND_PIO_INIT_CTXT,
- SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
+ SEND_PIO_INIT_CTXT_PIO_ALL_CTXT_INIT_SMASK);
udelay(2);
ret = pio_init_wait_progress(dd);
if (ret < 0) {
dd_dev_err(dd,
- "PIO send context init %s while initializing all PIO blocks\n",
- ret == -ETIMEDOUT ? "is stuck" : "had an error");
+ "PIO send context init %s while initializing all PIO blocks\n",
+ ret == -ETIMEDOUT ? "is stuck" : "had an error");
}
}
@@ -1236,8 +1234,7 @@ int sc_enable(struct send_context *sc)
*/
reg = read_kctxt_csr(dd, sc->hw_context, SC(ERR_STATUS));
if (reg)
- write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR),
- reg);
+ write_kctxt_csr(dd, sc->hw_context, SC(ERR_CLEAR), reg);
/*
* The HW PIO initialization engine can handle only one init
@@ -1295,7 +1292,7 @@ void sc_return_credits(struct send_context *sc)
/* a 0->1 transition schedules a credit return */
write_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_FORCE),
- SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
+ SC(CREDIT_FORCE_FORCE_RETURN_SMASK));
/*
* Ensure that the write is flushed and the credit return is
* scheduled. We care more about the 0 -> 1 transition.
@@ -1321,7 +1318,7 @@ void sc_drop(struct send_context *sc)
return;
dd_dev_info(sc->dd, "%s: context %u(%u) - not implemented\n",
- __func__, sc->sw_index, sc->hw_context);
+ __func__, sc->sw_index, sc->hw_context);
}
/*
@@ -1346,7 +1343,7 @@ void sc_stop(struct send_context *sc, int flag)
wake_up(&sc->halt_wait);
}
-#define BLOCK_DWORDS (PIO_BLOCK_SIZE/sizeof(u32))
+#define BLOCK_DWORDS (PIO_BLOCK_SIZE / sizeof(u32))
#define dwords_to_blocks(x) DIV_ROUND_UP(x, BLOCK_DWORDS)
/*
@@ -1430,8 +1427,10 @@ retry:
next = head + 1;
if (next >= sc->sr_size)
next = 0;
- /* update the head - must be last! - the releaser can look at fields
- in pbuf once we move the head */
+ /*
+ * update the head - must be last! - the releaser can look at fields
+ * in pbuf once we move the head
+ */
smp_wmb();
sc->sr_head = next;
spin_unlock_irqrestore(&sc->alloc_lock, flags);
@@ -1469,7 +1468,7 @@ void sc_add_credit_return_intr(struct send_context *sc)
if (sc->credit_intr_count == 0) {
sc->credit_ctrl |= SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
+ SC(CREDIT_CTRL), sc->credit_ctrl);
}
sc->credit_intr_count++;
spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
@@ -1491,7 +1490,7 @@ void sc_del_credit_return_intr(struct send_context *sc)
if (sc->credit_intr_count == 0) {
sc->credit_ctrl &= ~SC(CREDIT_CTRL_CREDIT_INTR_SMASK);
write_kctxt_csr(sc->dd, sc->hw_context,
- SC(CREDIT_CTRL), sc->credit_ctrl);
+ SC(CREDIT_CTRL), sc->credit_ctrl);
}
spin_unlock_irqrestore(&sc->credit_ctrl_lock, flags);
}
@@ -1526,8 +1525,9 @@ static void sc_piobufavail(struct send_context *sc)
struct hfi1_devdata *dd = sc->dd;
struct hfi1_ibdev *dev = &dd->verbs_dev;
struct list_head *list;
- struct hfi1_qp *qps[PIO_WAIT_BATCH_SIZE];
- struct hfi1_qp *qp;
+ struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
unsigned long flags;
unsigned i, n = 0;
@@ -1545,24 +1545,28 @@ static void sc_piobufavail(struct send_context *sc)
struct iowait *wait;
if (n == ARRAY_SIZE(qps))
- goto full;
+ break;
wait = list_first_entry(list, struct iowait, list);
- qp = container_of(wait, struct hfi1_qp, s_iowait);
- list_del_init(&qp->s_iowait.list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
qps[n++] = qp;
}
/*
- * Counting: only call wantpiobuf_intr() if there were waiters and they
- * are now all gone.
+ * If there had been waiters and there are more
+ * insure that we redo the force to avoid a potential hang.
*/
- if (n)
+ if (n) {
hfi1_sc_wantpiobuf_intr(sc, 0);
-full:
+ if (!list_empty(list))
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
for (i = 0; i < n; i++)
- hfi1_qp_wakeup(qps[i], HFI1_S_WAIT_PIO);
+ hfi1_qp_wakeup(qps[i],
+ RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN);
}
/* translate a send credit update to a bit code of reasons */
@@ -1661,7 +1665,7 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
sw_index = dd->hw_to_sw[hw_context];
if (unlikely(sw_index >= dd->num_send_contexts)) {
dd_dev_err(dd, "%s: invalid hw (%u) to sw (%u) mapping\n",
- __func__, hw_context, sw_index);
+ __func__, hw_context, sw_index);
goto done;
}
sc = dd->send_contexts[sw_index].sc;
@@ -1674,8 +1678,8 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context)
sw_index = dd->hw_to_sw[gc];
if (unlikely(sw_index >= dd->num_send_contexts)) {
dd_dev_err(dd,
- "%s: invalid hw (%u) to sw (%u) mapping\n",
- __func__, hw_context, sw_index);
+ "%s: invalid hw (%u) to sw (%u) mapping\n",
+ __func__, hw_context, sw_index);
continue;
}
sc_release_update(dd->send_contexts[sw_index].sc);
@@ -1684,11 +1688,217 @@ done:
spin_unlock(&dd->sc_lock);
}
+/*
+ * pio_select_send_context_vl() - select send context
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @vl: this vl
+ *
+ * This function returns a send context based on the selector and a vl.
+ * The mapping fields are protected by RCU
+ */
+struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
+ u32 selector, u8 vl)
+{
+ struct pio_vl_map *m;
+ struct pio_map_elem *e;
+ struct send_context *rval;
+
+ /*
+ * NOTE This should only happen if SC->VL changed after the initial
+ * checks on the QP/AH
+ * Default will return VL0's send context below
+ */
+ if (unlikely(vl >= num_vls)) {
+ rval = NULL;
+ goto done;
+ }
+
+ rcu_read_lock();
+ m = rcu_dereference(dd->pio_map);
+ if (unlikely(!m)) {
+ rcu_read_unlock();
+ return dd->vld[0].sc;
+ }
+ e = m->map[vl & m->mask];
+ rval = e->ksc[selector & e->mask];
+ rcu_read_unlock();
+
+done:
+ rval = !rval ? dd->vld[0].sc : rval;
+ return rval;
+}
+
+/*
+ * pio_select_send_context_sc() - select send context
+ * @dd: devdata
+ * @selector: a spreading factor
+ * @sc5: the 5 bit sc
+ *
+ * This function returns an send context based on the selector and an sc
+ */
+struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
+ u32 selector, u8 sc5)
+{
+ u8 vl = sc_to_vlt(dd, sc5);
+
+ return pio_select_send_context_vl(dd, selector, vl);
+}
+
+/*
+ * Free the indicated map struct
+ */
+static void pio_map_free(struct pio_vl_map *m)
+{
+ int i;
+
+ for (i = 0; m && i < m->actual_vls; i++)
+ kfree(m->map[i]);
+ kfree(m);
+}
+
+/*
+ * Handle RCU callback
+ */
+static void pio_map_rcu_callback(struct rcu_head *list)
+{
+ struct pio_vl_map *m = container_of(list, struct pio_vl_map, list);
+
+ pio_map_free(m);
+}
+
+/*
+ * pio_map_init - called when #vls change
+ * @dd: hfi1_devdata
+ * @port: port number
+ * @num_vls: number of vls
+ * @vl_scontexts: per vl send context mapping (optional)
+ *
+ * This routine changes the mapping based on the number of vls.
+ *
+ * vl_scontexts is used to specify a non-uniform vl/send context
+ * loading. NULL implies auto computing the loading and giving each
+ * VL an uniform distribution of send contexts per VL.
+ *
+ * The auto algorithm computers the sc_per_vl and the number of extra
+ * send contexts. Any extra send contexts are added from the last VL
+ * on down
+ *
+ * rcu locking is used here to control access to the mapping fields.
+ *
+ * If either the num_vls or num_send_contexts are non-power of 2, the
+ * array sizes in the struct pio_vl_map and the struct pio_map_elem are
+ * rounded up to the next highest power of 2 and the first entry is
+ * reused in a round robin fashion.
+ *
+ * If an error occurs the map change is not done and the mapping is not
+ * chaged.
+ *
+ */
+int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
+{
+ int i, j;
+ int extra, sc_per_vl;
+ int scontext = 1;
+ int num_kernel_send_contexts = 0;
+ u8 lvl_scontexts[OPA_MAX_VLS];
+ struct pio_vl_map *oldmap, *newmap;
+
+ if (!vl_scontexts) {
+ /* send context 0 reserved for VL15 */
+ for (i = 1; i < dd->num_send_contexts; i++)
+ if (dd->send_contexts[i].type == SC_KERNEL)
+ num_kernel_send_contexts++;
+ /* truncate divide */
+ sc_per_vl = num_kernel_send_contexts / num_vls;
+ /* extras */
+ extra = num_kernel_send_contexts % num_vls;
+ vl_scontexts = lvl_scontexts;
+ /* add extras from last vl down */
+ for (i = num_vls - 1; i >= 0; i--, extra--)
+ vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
+ }
+ /* build new map */
+ newmap = kzalloc(sizeof(*newmap) +
+ roundup_pow_of_two(num_vls) *
+ sizeof(struct pio_map_elem *),
+ GFP_KERNEL);
+ if (!newmap)
+ goto bail;
+ newmap->actual_vls = num_vls;
+ newmap->vls = roundup_pow_of_two(num_vls);
+ newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+ for (i = 0; i < newmap->vls; i++) {
+ /* save for wrap around */
+ int first_scontext = scontext;
+
+ if (i < newmap->actual_vls) {
+ int sz = roundup_pow_of_two(vl_scontexts[i]);
+
+ /* only allocate once */
+ newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
+ sz * sizeof(struct
+ send_context *),
+ GFP_KERNEL);
+ if (!newmap->map[i])
+ goto bail;
+ newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
+ /* assign send contexts */
+ for (j = 0; j < sz; j++) {
+ if (dd->kernel_send_context[scontext])
+ newmap->map[i]->ksc[j] =
+ dd->kernel_send_context[scontext];
+ if (++scontext >= first_scontext +
+ vl_scontexts[i])
+ /* wrap back to first send context */
+ scontext = first_scontext;
+ }
+ } else {
+ /* just re-use entry without allocating */
+ newmap->map[i] = newmap->map[i % num_vls];
+ }
+ scontext = first_scontext + vl_scontexts[i];
+ }
+ /* newmap in hand, save old map */
+ spin_lock_irq(&dd->pio_map_lock);
+ oldmap = rcu_dereference_protected(dd->pio_map,
+ lockdep_is_held(&dd->pio_map_lock));
+
+ /* publish newmap */
+ rcu_assign_pointer(dd->pio_map, newmap);
+
+ spin_unlock_irq(&dd->pio_map_lock);
+ /* success, free any old map after grace period */
+ if (oldmap)
+ call_rcu(&oldmap->list, pio_map_rcu_callback);
+ return 0;
+bail:
+ /* free any partial allocation */
+ pio_map_free(newmap);
+ return -ENOMEM;
+}
+
+void free_pio_map(struct hfi1_devdata *dd)
+{
+ /* Free PIO map if allocated */
+ if (rcu_access_pointer(dd->pio_map)) {
+ spin_lock_irq(&dd->pio_map_lock);
+ pio_map_free(rcu_access_pointer(dd->pio_map));
+ RCU_INIT_POINTER(dd->pio_map, NULL);
+ spin_unlock_irq(&dd->pio_map_lock);
+ synchronize_rcu();
+ }
+ kfree(dd->kernel_send_context);
+ dd->kernel_send_context = NULL;
+}
+
int init_pervl_scs(struct hfi1_devdata *dd)
{
int i;
- u64 mask, all_vl_mask = (u64) 0x80ff; /* VLs 0-7, 15 */
+ u64 mask, all_vl_mask = (u64)0x80ff; /* VLs 0-7, 15 */
+ u64 data_vls_mask = (u64)0x00ff; /* VLs 0-7 */
u32 ctxt;
+ struct hfi1_pportdata *ppd = dd->pport;
dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
dd->rcd[0]->rcvhdrqentsize, dd->node);
@@ -1696,6 +1906,12 @@ int init_pervl_scs(struct hfi1_devdata *dd)
goto nomem;
hfi1_init_ctxt(dd->vld[15].sc);
dd->vld[15].mtu = enum_to_mtu(OPA_MTU_2048);
+
+ dd->kernel_send_context = kmalloc_node(dd->num_send_contexts *
+ sizeof(struct send_context *),
+ GFP_KERNEL, dd->node);
+ dd->kernel_send_context[0] = dd->vld[15].sc;
+
for (i = 0; i < num_vls; i++) {
/*
* Since this function does not deal with a specific
@@ -1708,12 +1924,19 @@ int init_pervl_scs(struct hfi1_devdata *dd)
dd->rcd[0]->rcvhdrqentsize, dd->node);
if (!dd->vld[i].sc)
goto nomem;
-
+ dd->kernel_send_context[i + 1] = dd->vld[i].sc;
hfi1_init_ctxt(dd->vld[i].sc);
-
/* non VL15 start with the max MTU */
dd->vld[i].mtu = hfi1_max_mtu;
}
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
+ dd->kernel_send_context[i + 1] =
+ sc_alloc(dd, SC_KERNEL, dd->rcd[0]->rcvhdrqentsize, dd->node);
+ if (!dd->kernel_send_context[i + 1])
+ goto nomem;
+ hfi1_init_ctxt(dd->kernel_send_context[i + 1]);
+ }
+
sc_enable(dd->vld[15].sc);
ctxt = dd->vld[15].sc->hw_context;
mask = all_vl_mask & ~(1LL << 15);
@@ -1721,17 +1944,29 @@ int init_pervl_scs(struct hfi1_devdata *dd)
dd_dev_info(dd,
"Using send context %u(%u) for VL15\n",
dd->vld[15].sc->sw_index, ctxt);
+
for (i = 0; i < num_vls; i++) {
sc_enable(dd->vld[i].sc);
ctxt = dd->vld[i].sc->hw_context;
- mask = all_vl_mask & ~(1LL << i);
+ mask = all_vl_mask & ~(data_vls_mask);
write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
}
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++) {
+ sc_enable(dd->kernel_send_context[i + 1]);
+ ctxt = dd->kernel_send_context[i + 1]->hw_context;
+ mask = all_vl_mask & ~(data_vls_mask);
+ write_kctxt_csr(dd, ctxt, SC(CHECK_VL), mask);
+ }
+
+ if (pio_map_init(dd, ppd->port - 1, num_vls, NULL))
+ goto nomem;
return 0;
nomem:
sc_free(dd->vld[15].sc);
for (i = 0; i < num_vls; i++)
sc_free(dd->vld[i].sc);
+ for (i = num_vls; i < INIT_SC_PER_VL * num_vls; i++)
+ sc_free(dd->kernel_send_context[i + 1]);
return -ENOMEM;
}
@@ -1769,11 +2004,11 @@ int init_credit_return(struct hfi1_devdata *dd)
bytes,
&dd->cr_base[i].pa,
GFP_KERNEL);
- if (dd->cr_base[i].va == NULL) {
+ if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
dd_dev_err(dd,
- "Unable to allocate credit return DMA range for NUMA %d\n",
- i);
+ "Unable to allocate credit return DMA range for NUMA %d\n",
+ i);
ret = -ENOMEM;
goto done;
}
@@ -1797,10 +2032,10 @@ void free_credit_return(struct hfi1_devdata *dd)
for (i = 0; i < num_numa; i++) {
if (dd->cr_base[i].va) {
dma_free_coherent(&dd->pcidev->dev,
- TXE_NUM_CONTEXTS
- * sizeof(struct credit_return),
- dd->cr_base[i].va,
- dd->cr_base[i].pa);
+ TXE_NUM_CONTEXTS *
+ sizeof(struct credit_return),
+ dd->cr_base[i].va,
+ dd->cr_base[i].pa);
}
}
kfree(dd->cr_base);
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/staging/rdma/hfi1/pio.h
index 53d3e0a79375..0026976ce4f6 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/staging/rdma/hfi1/pio.h
@@ -1,14 +1,13 @@
#ifndef _PIO_H
#define _PIO_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -50,7 +47,6 @@
*
*/
-
/* send context types */
#define SC_KERNEL 0
#define SC_ACK 1
@@ -106,6 +102,7 @@ struct send_context {
struct hfi1_devdata *dd; /* device */
void __iomem *base_addr; /* start of PIO memory */
union pio_shadow_ring *sr; /* shadow ring */
+
volatile __le64 *hw_free; /* HW free counter */
struct work_struct halt_work; /* halted context work queue entry */
unsigned long flags; /* flags */
@@ -165,6 +162,112 @@ struct sc_config_sizes {
short int count;
};
+/*
+ * The diagram below details the relationship of the mapping structures
+ *
+ * Since the mapping now allows for non-uniform send contexts per vl, the
+ * number of send contexts for a vl is either the vl_scontexts[vl] or
+ * a computation based on num_kernel_send_contexts/num_vls:
+ *
+ * For example:
+ * nactual = vl_scontexts ? vl_scontexts[vl] : num_kernel_send_contexts/num_vls
+ *
+ * n = roundup to next highest power of 2 using nactual
+ *
+ * In the case where there are num_kernel_send_contexts/num_vls doesn't divide
+ * evenly, the extras are added from the last vl downward.
+ *
+ * For the case where n > nactual, the send contexts are assigned
+ * in a round robin fashion wrapping back to the first send context
+ * for a particular vl.
+ *
+ * dd->pio_map
+ * | pio_map_elem[0]
+ * | +--------------------+
+ * v | mask |
+ * pio_vl_map |--------------------|
+ * +--------------------------+ | ksc[0] -> sc 1 |
+ * | list (RCU) | |--------------------|
+ * |--------------------------| ->| ksc[1] -> sc 2 |
+ * | mask | --/ |--------------------|
+ * |--------------------------| -/ | * |
+ * | actual_vls (max 8) | -/ |--------------------|
+ * |--------------------------| --/ | ksc[n] -> sc n |
+ * | vls (max 8) | -/ +--------------------+
+ * |--------------------------| --/
+ * | map[0] |-/
+ * |--------------------------| +--------------------+
+ * | map[1] |--- | mask |
+ * |--------------------------| \---- |--------------------|
+ * | * | \-- | ksc[0] -> sc 1+n |
+ * | * | \---- |--------------------|
+ * | * | \->| ksc[1] -> sc 2+n |
+ * |--------------------------| |--------------------|
+ * | map[vls - 1] |- | * |
+ * +--------------------------+ \- |--------------------|
+ * \- | ksc[m] -> sc m+n |
+ * \ +--------------------+
+ * \-
+ * \
+ * \- +--------------------+
+ * \- | mask |
+ * \ |--------------------|
+ * \- | ksc[0] -> sc 1+m+n |
+ * \- |--------------------|
+ * >| ksc[1] -> sc 2+m+n |
+ * |--------------------|
+ * | * |
+ * |--------------------|
+ * | ksc[o] -> sc o+m+n |
+ * +--------------------+
+ *
+ */
+
+/* Initial number of send contexts per VL */
+#define INIT_SC_PER_VL 2
+
+/*
+ * struct pio_map_elem - mapping for a vl
+ * @mask - selector mask
+ * @ksc - array of kernel send contexts for this vl
+ *
+ * The mask is used to "mod" the selector to
+ * produce index into the trailing array of
+ * kscs
+ */
+struct pio_map_elem {
+ u32 mask;
+ struct send_context *ksc[0];
+};
+
+/*
+ * struct pio_vl_map - mapping for a vl
+ * @list - rcu head for free callback
+ * @mask - vl mask to "mod" the vl to produce an index to map array
+ * @actual_vls - number of vls
+ * @vls - numbers of vls rounded to next power of 2
+ * @map - array of pio_map_elem entries
+ *
+ * This is the parent mapping structure. The trailing members of the
+ * struct point to pio_map_elem entries, which in turn point to an
+ * array of kscs for that vl.
+ */
+struct pio_vl_map {
+ struct rcu_head list;
+ u32 mask;
+ u8 actual_vls;
+ u8 vls;
+ struct pio_map_elem *map[0];
+};
+
+int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls,
+ u8 *vl_scontexts);
+void free_pio_map(struct hfi1_devdata *dd);
+struct send_context *pio_select_send_context_vl(struct hfi1_devdata *dd,
+ u32 selector, u8 vl);
+struct send_context *pio_select_send_context_sc(struct hfi1_devdata *dd,
+ u32 selector, u8 sc5);
+
/* send context functions */
int init_credit_return(struct hfi1_devdata *dd);
void free_credit_return(struct hfi1_devdata *dd);
@@ -183,7 +286,7 @@ void sc_flush(struct send_context *sc);
void sc_drop(struct send_context *sc);
void sc_stop(struct send_context *sc, int bit);
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
- pio_release_cb cb, void *arg);
+ pio_release_cb cb, void *arg);
void sc_release_update(struct send_context *sc);
void sc_return_credits(struct send_context *sc);
void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
@@ -212,12 +315,11 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd);
void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl);
void pio_send_control(struct hfi1_devdata *dd, int op);
-
/* PIO copy routines */
void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
const void *from, size_t count);
void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t nbytes);
+ const void *from, size_t nbytes);
void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
void seg_pio_copy_end(struct pio_buf *pbuf);
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/staging/rdma/hfi1/pio_copy.c
index ebb0bafc68cb..8c25e1b58849 100644
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ b/drivers/staging/rdma/hfi1/pio_copy.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -52,9 +49,9 @@
/* additive distance between non-SOP and SOP space */
#define SOP_DISTANCE (TXE_PIO_SIZE / 2)
-#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE-1)
+#define PIO_BLOCK_MASK (PIO_BLOCK_SIZE - 1)
/* number of QUADWORDs in a block */
-#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE/sizeof(u64))
+#define PIO_BLOCK_QWS (PIO_BLOCK_SIZE / sizeof(u64))
/**
* pio_copy - copy data block to MMIO space
@@ -83,11 +80,13 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
/* calculate where the QWORD data ends - in SOP=1 space */
- dend = dest + ((count>>1) * sizeof(u64));
+ dend = dest + ((count >> 1) * sizeof(u64));
if (dend < send) {
- /* all QWORD data is within the SOP block, does *not*
- reach the end of the SOP block */
+ /*
+ * all QWORD data is within the SOP block, does *not*
+ * reach the end of the SOP block
+ */
while (dest < dend) {
writeq(*(u64 *)from, dest);
@@ -152,8 +151,10 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
writeq(val.val64, dest);
dest += sizeof(u64);
}
- /* fill in rest of block, no need to check pbuf->end
- as we only wrap on a block boundary */
+ /*
+ * fill in rest of block, no need to check pbuf->end
+ * as we only wrap on a block boundary
+ */
while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
writeq(0, dest);
dest += sizeof(u64);
@@ -177,7 +178,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
* "zero" shift - bit shift used to zero out upper bytes. Input is
* the count of LSB bytes to preserve.
*/
-#define zshift(x) (8 * (8-(x)))
+#define zshift(x) (8 * (8 - (x)))
/*
* "merge" shift - bit shift used to merge with carry bytes. Input is
@@ -196,7 +197,7 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
* o nbytes must not span a QW boundary
*/
static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
- unsigned int nbytes)
+ unsigned int nbytes)
{
unsigned long off;
@@ -223,7 +224,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
* o nbytes may span a QW boundary
*/
static inline void read_extra_bytes(struct pio_buf *pbuf,
- const void *from, unsigned int nbytes)
+ const void *from, unsigned int nbytes)
{
unsigned long off = (unsigned long)from & 0x7;
unsigned int room, xbytes;
@@ -235,7 +236,7 @@ static inline void read_extra_bytes(struct pio_buf *pbuf,
while (nbytes) {
/* find the number of bytes in this u64 */
room = 8 - off; /* this u64 has room for this many bytes */
- xbytes = nbytes > room ? room : nbytes;
+ xbytes = min(room, nbytes);
/*
* shift down to zero lower bytes, shift up to zero upper
@@ -244,7 +245,7 @@ static inline void read_extra_bytes(struct pio_buf *pbuf,
pbuf->carry.val64 |= (((*(u64 *)from)
>> mshift(off))
<< zshift(xbytes))
- >> zshift(xbytes+pbuf->carry_bytes);
+ >> zshift(xbytes + pbuf->carry_bytes);
off = 0;
pbuf->carry_bytes += xbytes;
nbytes -= xbytes;
@@ -362,7 +363,7 @@ static inline void jcopy(u8 *dest, const u8 *src, u32 n)
* o from may _not_ be u64 aligned.
*/
static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
- unsigned int nbytes)
+ unsigned int nbytes)
{
jcopy(&pbuf->carry.val8[0], from, nbytes);
pbuf->carry_bytes = nbytes;
@@ -377,7 +378,7 @@ static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
* o nbytes may span a QW boundary
*/
static inline void read_extra_bytes(struct pio_buf *pbuf,
- const void *from, unsigned int nbytes)
+ const void *from, unsigned int nbytes)
{
jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
pbuf->carry_bytes += nbytes;
@@ -411,7 +412,7 @@ static inline void merge_write8(
jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder);
writeq(pbuf->carry.val64, dest);
- jcopy(&pbuf->carry.val8[0], src+remainder, pbuf->carry_bytes);
+ jcopy(&pbuf->carry.val8[0], src + remainder, pbuf->carry_bytes);
}
/*
@@ -433,7 +434,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest)
u64 zero = 0;
jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
- 8 - pbuf->carry_bytes);
+ 8 - pbuf->carry_bytes);
writeq(pbuf->carry.val64, dest);
return 1;
}
@@ -453,7 +454,7 @@ static inline int carry_write8(struct pio_buf *pbuf, void *dest)
* @nbytes: bytes to copy
*/
void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
- const void *from, size_t nbytes)
+ const void *from, size_t nbytes)
{
void __iomem *dest = pbuf->start + SOP_DISTANCE;
void __iomem *send = dest + PIO_BLOCK_SIZE;
@@ -463,11 +464,13 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
dest += sizeof(u64);
/* calculate where the QWORD data ends - in SOP=1 space */
- dend = dest + ((nbytes>>3) * sizeof(u64));
+ dend = dest + ((nbytes >> 3) * sizeof(u64));
if (dend < send) {
- /* all QWORD data is within the SOP block, does *not*
- reach the end of the SOP block */
+ /*
+ * all QWORD data is within the SOP block, does *not*
+ * reach the end of the SOP block
+ */
while (dest < dend) {
writeq(*(u64 *)from, dest);
@@ -562,10 +565,12 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
void __iomem *send; /* SOP end */
void __iomem *xend;
- /* calculate the end of data or end of block, whichever
- comes first */
+ /*
+ * calculate the end of data or end of block, whichever
+ * comes first
+ */
send = pbuf->start + PIO_BLOCK_SIZE;
- xend = send < dend ? send : dend;
+ xend = min(send, dend);
/* shift up to SOP=1 space */
dest += SOP_DISTANCE;
@@ -639,13 +644,13 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
* Must handle nbytes < 8.
*/
static void mid_copy_straight(struct pio_buf *pbuf,
- const void *from, size_t nbytes)
+ const void *from, size_t nbytes)
{
void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
void __iomem *dend; /* 8-byte data end */
/* calculate 8-byte data end */
- dend = dest + ((nbytes>>3) * sizeof(u64));
+ dend = dest + ((nbytes >> 3) * sizeof(u64));
if (pbuf->qw_written < PIO_BLOCK_QWS) {
/*
@@ -656,10 +661,12 @@ static void mid_copy_straight(struct pio_buf *pbuf,
void __iomem *send; /* SOP end */
void __iomem *xend;
- /* calculate the end of data or end of block, whichever
- comes first */
+ /*
+ * calculate the end of data or end of block, whichever
+ * comes first
+ */
send = pbuf->start + PIO_BLOCK_SIZE;
- xend = send < dend ? send : dend;
+ xend = min(send, dend);
/* shift up to SOP=1 space */
dest += SOP_DISTANCE;
@@ -713,7 +720,7 @@ static void mid_copy_straight(struct pio_buf *pbuf,
/* we know carry_bytes was zero on entry to this routine */
read_low_bytes(pbuf, from, nbytes & 0x7);
- pbuf->qw_written += nbytes>>3;
+ pbuf->qw_written += nbytes >> 3;
}
/*
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/staging/rdma/hfi1/platform.c
new file mode 100644
index 000000000000..0a1d074583e4
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/platform.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "efivar.h"
+
+void get_platform_config(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+ unsigned long size = 0;
+ u8 *temp_platform_config = NULL;
+
+ ret = read_hfi1_efi_var(dd, "configuration", &size,
+ (void **)&temp_platform_config);
+ if (ret) {
+ dd_dev_info(dd,
+ "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
+ __func__);
+ /* fall back to request firmware */
+ platform_config_load = 1;
+ goto bail;
+ }
+
+ dd->platform_config.data = temp_platform_config;
+ dd->platform_config.size = size;
+
+bail:
+ /* exit */;
+}
+
+void free_platform_config(struct hfi1_devdata *dd)
+{
+ if (!platform_config_load) {
+ /*
+ * was loaded from EFI, release memory
+ * allocated by read_efi_var
+ */
+ kfree(dd->platform_config.data);
+ }
+ /*
+ * else do nothing, dispose_firmware will release
+ * struct firmware platform_config on driver exit
+ */
+}
+
+int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
+{
+ u8 tx_ctrl_byte = on ? 0x0 : 0xF;
+ int ret = 0;
+
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
+ &tx_ctrl_byte, 1);
+ /* we expected 1, so consider 0 an error */
+ if (ret == 0)
+ ret = -EIO;
+ else if (ret == 1)
+ ret = 0;
+ return ret;
+}
+
+static int qual_power(struct hfi1_pportdata *ppd)
+{
+ u32 cable_power_class = 0, power_class_max = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+ int ret = 0;
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
+ SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
+ if (ret)
+ return ret;
+
+ if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
+ cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
+ else
+ cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class <= 3 && cable_power_class > (power_class_max - 1))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
+ else if (cable_power_class > 4 && cable_power_class > (power_class_max))
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
+ /*
+ * cable_power_class will never have value 4 as this simply
+ * means the high power settings are unused
+ */
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: Port disabled due to system power restrictions\n",
+ __func__);
+ ret = -EPERM;
+ }
+ return ret;
+}
+
+static int qual_bitrate(struct hfi1_pportdata *ppd)
+{
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
+ cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
+
+ if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
+ cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: Cable failed bitrate check, disabling port\n",
+ __func__);
+ return -EPERM;
+ }
+ return 0;
+}
+
+static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
+{
+ u8 cable_power_class = 0, power_ctrl_byte = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+ int ret;
+
+ if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
+ cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
+ else
+ cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class) {
+ power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
+
+ power_ctrl_byte |= 1;
+ power_ctrl_byte &= ~(0x2);
+
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id,
+ QSFP_PWR_CTRL_BYTE_OFFS,
+ &power_ctrl_byte, 1);
+ if (ret != 1)
+ return -EIO;
+
+ if (cable_power_class > 3) {
+ /* > power class 4*/
+ power_ctrl_byte |= (1 << 2);
+ ret = qsfp_write(ppd, ppd->dd->hfi1_id,
+ QSFP_PWR_CTRL_BYTE_OFFS,
+ &power_ctrl_byte, 1);
+ if (ret != 1)
+ return -EIO;
+ }
+
+ /* SFF 8679 rev 1.7 LPMode Deassert time */
+ msleep(300);
+ }
+ return 0;
+}
+
+static void apply_rx_cdr(struct hfi1_pportdata *ppd,
+ u32 rx_preset_index,
+ u8 *cdr_ctrl_byte)
+{
+ u32 rx_preset;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
+ (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
+ return;
+
+ /* rx_preset preset to zero to catch error */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: RX_CDR_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
+ &rx_preset, 4);
+
+ /* Expand cdr setting to all 4 lanes */
+ rx_preset = (rx_preset | (rx_preset << 1) |
+ (rx_preset << 2) | (rx_preset << 3));
+
+ if (rx_preset) {
+ *cdr_ctrl_byte |= rx_preset;
+ } else {
+ *cdr_ctrl_byte &= rx_preset;
+ /* Preserve current TX CDR status */
+ *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
+ }
+}
+
+static void apply_tx_cdr(struct hfi1_pportdata *ppd,
+ u32 tx_preset_index,
+ u8 *ctr_ctrl_byte)
+{
+ u32 tx_preset;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
+ (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
+ return;
+
+ get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
+ TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
+
+ if (!tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX_CDR_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index,
+ TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
+
+ /* Expand cdr setting to all 4 lanes */
+ tx_preset = (tx_preset | (tx_preset << 1) |
+ (tx_preset << 2) | (tx_preset << 3));
+
+ if (tx_preset)
+ *ctr_ctrl_byte |= (tx_preset << 4);
+ else
+ /* Preserve current/determined RX CDR status */
+ *ctr_ctrl_byte &= ((tx_preset << 4) | 0xF);
+}
+
+static void apply_cdr_settings(
+ struct hfi1_pportdata *ppd, u32 rx_preset_index,
+ u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
+
+ apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
+
+ apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
+
+ qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
+ &cdr_ctrl_byte, 1);
+}
+
+static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u8 tx_eq;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
+ return;
+ /* Disable adaptive TX EQ if present */
+ tx_eq = cache[(128 * 3) + 241];
+ tx_eq &= 0xF0;
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
+}
+
+static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+ u32 tx_preset;
+ u8 tx_eq;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
+ return;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
+ &tx_preset, 4);
+ if (!tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX_EQ_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
+ &tx_preset, 4);
+
+ if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: TX EQ %x unsupported\n",
+ __func__, tx_preset);
+
+ dd_dev_info(
+ ppd->dd,
+ "%s: Applying EQ %x\n",
+ __func__, cache[608] & 0xF0);
+
+ tx_preset = (cache[608] & 0xF0) >> 4;
+ }
+
+ tx_eq = tx_preset | (tx_preset << 4);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
+}
+
+static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
+{
+ u32 rx_preset;
+ u8 rx_eq, *cache = ppd->qsfp_info.cache;
+
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
+ return;
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: RX_EMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
+ &rx_preset, 4);
+
+ if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
+ dd_dev_info(
+ ppd->dd,
+ "%s: Requested RX EMP %x\n",
+ __func__, rx_preset);
+
+ dd_dev_info(
+ ppd->dd,
+ "%s: Applying supported EMP %x\n",
+ __func__, cache[608] & 0xF);
+
+ rx_preset = cache[608] & 0xF;
+ }
+
+ rx_eq = rx_preset | (rx_preset << 4);
+
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
+}
+
+static void apply_eq_settings(struct hfi1_pportdata *ppd,
+ u32 rx_preset_index, u32 tx_preset_index)
+{
+ u8 *cache = ppd->qsfp_info.cache;
+
+ /* no point going on w/o a page 3 */
+ if (cache[2] & 4) {
+ dd_dev_info(ppd->dd,
+ "%s: Upper page 03 not present\n",
+ __func__);
+ return;
+ }
+
+ apply_tx_eq_auto(ppd);
+
+ apply_tx_eq_prog(ppd, tx_preset_index);
+
+ apply_rx_eq_emp(ppd, rx_preset_index);
+}
+
+static void apply_rx_amplitude_settings(
+ struct hfi1_pportdata *ppd, u32 rx_preset_index,
+ u32 tx_preset_index)
+{
+ u32 rx_preset;
+ u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
+
+ /* no point going on w/o a page 3 */
+ if (cache[2] & 4) {
+ dd_dev_info(ppd->dd,
+ "%s: Upper page 03 not present\n",
+ __func__);
+ return;
+ }
+ if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
+ dd_dev_info(ppd->dd,
+ "%s: RX_AMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+
+ get_platform_config_field(ppd->dd,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index,
+ RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
+ &rx_preset, 4);
+
+ if (!rx_preset) {
+ dd_dev_info(ppd->dd,
+ "%s: RX_AMP_APPLY is set to disabled\n",
+ __func__);
+ return;
+ }
+ get_platform_config_field(ppd->dd,
+ PLATFORM_CONFIG_RX_PRESET_TABLE,
+ rx_preset_index,
+ RX_PRESET_TABLE_QSFP_RX_AMP,
+ &rx_preset, 4);
+
+ dd_dev_info(ppd->dd,
+ "%s: Requested RX AMP %x\n",
+ __func__,
+ rx_preset);
+
+ for (i = 0; i < 4; i++) {
+ if (cache[(128 * 3) + 225] & (1 << i)) {
+ preferred = i;
+ if (preferred == rx_preset)
+ break;
+ }
+ }
+
+ /*
+ * Verify that preferred RX amplitude is not just a
+ * fall through of the default
+ */
+ if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
+ dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
+ return;
+ }
+
+ dd_dev_info(ppd->dd,
+ "%s: Applying RX AMP %x\n", __func__, preferred);
+
+ rx_amp = preferred | (preferred << 4);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
+ qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
+}
+
+#define OPA_INVALID_INDEX 0xFFF
+
+static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
+ u32 config_data, const char *message)
+{
+ u8 i;
+ int ret = HCMD_SUCCESS;
+
+ for (i = 0; i < 4; i++) {
+ ret = load_8051_config(ppd->dd, field_id, i, config_data);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(
+ ppd->dd,
+ "%s: %s for lane %u failed\n",
+ message, __func__, i);
+ }
+ }
+}
+
+static void apply_tunings(
+ struct hfi1_pportdata *ppd, u32 tx_preset_index,
+ u8 tuning_method, u32 total_atten, u8 limiting_active)
+{
+ int ret = 0;
+ u32 config_data = 0, tx_preset = 0;
+ u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ /* Enable external device config if channel is limiting active */
+ read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
+ GENERAL_CONFIG, &config_data);
+ config_data |= limiting_active;
+ ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
+ GENERAL_CONFIG, config_data);
+ if (ret != HCMD_SUCCESS)
+ dd_dev_err(
+ ppd->dd,
+ "%s: Failed to set enable external device config\n",
+ __func__);
+
+ config_data = 0; /* re-init */
+ /* Pass tuning method to 8051 */
+ read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
+ &config_data);
+ config_data |= tuning_method;
+ ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
+ config_data);
+ if (ret != HCMD_SUCCESS)
+ dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
+ __func__);
+
+ /* Set same channel loss for both TX and RX */
+ config_data = 0 | (total_atten << 16) | (total_atten << 24);
+ apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
+ "Setting channel loss");
+
+ /* Inform 8051 of cable capabilities */
+ if (ppd->qsfp_info.cache_valid) {
+ external_device_config =
+ ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
+ ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
+ ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
+ (cache[QSFP_EQ_INFO_OFFS] & 0x4);
+ ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
+ GENERAL_CONFIG, &config_data);
+ /* Clear, then set the external device config field */
+ config_data &= ~(0xFF << 24);
+ config_data |= (external_device_config << 24);
+ ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
+ GENERAL_CONFIG, config_data);
+ if (ret != HCMD_SUCCESS)
+ dd_dev_info(ppd->dd,
+ "%s: Failed set ext device config params\n",
+ __func__);
+ }
+
+ if (tx_preset_index == OPA_INVALID_INDEX) {
+ if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
+ dd_dev_info(ppd->dd, "%s: Invalid Tx preset index\n",
+ __func__);
+ return;
+ }
+
+ /* Following for limiting active channels only */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
+ TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
+ precur = tx_preset;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
+ attn = tx_preset;
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
+ tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
+ postcur = tx_preset;
+
+ config_data = precur | (attn << 8) | (postcur << 16);
+
+ apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
+ "Applying TX settings");
+}
+
+static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
+ u32 *ptr_rx_preset, u32 *ptr_total_atten)
+{
+ int ret;
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
+ if (ret) {
+ dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
+ __func__, (int)ppd->dd->hfi1_id);
+ return ret;
+ }
+
+ ppd->qsfp_info.limiting_active = 1;
+
+ ret = set_qsfp_tx(ppd, 0);
+ if (ret)
+ goto bail_unlock;
+
+ ret = qual_power(ppd);
+ if (ret)
+ goto bail_unlock;
+
+ ret = qual_bitrate(ppd);
+ if (ret)
+ goto bail_unlock;
+
+ if (ppd->qsfp_info.reset_needed) {
+ reset_qsfp(ppd);
+ ppd->qsfp_info.reset_needed = 0;
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ } else {
+ ppd->qsfp_info.reset_needed = 1;
+ }
+
+ ret = set_qsfp_high_power(ppd);
+ if (ret)
+ goto bail_unlock;
+
+ if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
+ ptr_tx_preset, 4);
+ if (ret) {
+ *ptr_tx_preset = OPA_INVALID_INDEX;
+ goto bail_unlock;
+ }
+ } else {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
+ ptr_tx_preset, 4);
+ if (ret) {
+ *ptr_tx_preset = OPA_INVALID_INDEX;
+ goto bail_unlock;
+ }
+ }
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
+ if (ret) {
+ *ptr_rx_preset = OPA_INVALID_INDEX;
+ goto bail_unlock;
+ }
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
+ else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
+
+ apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
+
+ ret = set_qsfp_tx(ppd, 1);
+
+bail_unlock:
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+ return ret;
+}
+
+static int tune_qsfp(struct hfi1_pportdata *ppd,
+ u32 *ptr_tx_preset, u32 *ptr_rx_preset,
+ u8 *ptr_tuning_method, u32 *ptr_total_atten)
+{
+ u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
+ u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
+ int ret = 0;
+ u8 *cache = ppd->qsfp_info.cache;
+
+ switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+ case 0xA ... 0xB:
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G,
+ &platform_atten, 4);
+ if (ret)
+ return ret;
+
+ if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
+ cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
+ else if ((lss & OPA_LINK_SPEED_12_5G) &&
+ (lse & OPA_LINK_SPEED_12_5G))
+ cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
+
+ /* Fallback to configured attenuation if cable memory is bad */
+ if (cable_atten == 0 || cable_atten > 36) {
+ ret = get_platform_config_field(
+ ppd->dd,
+ PLATFORM_CONFIG_SYSTEM_TABLE, 0,
+ SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
+ &cable_atten, 4);
+ if (ret)
+ return ret;
+ }
+
+ ret = get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
+ if (ret)
+ return ret;
+
+ *ptr_total_atten = platform_atten + cable_atten + remote_atten;
+
+ *ptr_tuning_method = OPA_PASSIVE_TUNING;
+ break;
+ case 0x0 ... 0x9: /* fallthrough */
+ case 0xC: /* fallthrough */
+ case 0xE:
+ ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
+ ptr_total_atten);
+ if (ret)
+ return ret;
+
+ *ptr_tuning_method = OPA_ACTIVE_TUNING;
+ break;
+ case 0xD: /* fallthrough */
+ case 0xF:
+ default:
+ dd_dev_info(ppd->dd, "%s: Unknown/unsupported cable\n",
+ __func__);
+ break;
+ }
+ return ret;
+}
+
+/*
+ * This function communicates its success or failure via ppd->driver_link_ready
+ * Thus, it depends on its association with start_link(...) which checks
+ * driver_link_ready before proceeding with the link negotiation and
+ * initialization process.
+ */
+void tune_serdes(struct hfi1_pportdata *ppd)
+{
+ int ret = 0;
+ u32 total_atten = 0;
+ u32 remote_atten = 0, platform_atten = 0;
+ u32 rx_preset_index, tx_preset_index;
+ u8 tuning_method = 0, limiting_active = 0;
+ struct hfi1_devdata *dd = ppd->dd;
+
+ rx_preset_index = OPA_INVALID_INDEX;
+ tx_preset_index = OPA_INVALID_INDEX;
+
+ /* the link defaults to enabled */
+ ppd->link_enabled = 1;
+ /* the driver link ready state defaults to not ready */
+ ppd->driver_link_ready = 0;
+ ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+
+ /* Skip the tuning for testing (loopback != none) and simulations */
+ if (loopback != LOOPBACK_NONE ||
+ ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
+ ppd->driver_link_ready = 1;
+ return;
+ }
+
+ ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_PORT_TYPE, &ppd->port_type,
+ 4);
+ if (ret)
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+
+ switch (ppd->port_type) {
+ case PORT_TYPE_DISCONNECTED:
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
+ dd_dev_info(dd, "%s: Port disconnected, disabling port\n",
+ __func__);
+ goto bail;
+ case PORT_TYPE_FIXED:
+ /* platform_atten, remote_atten pre-zeroed to catch error */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
+
+ total_atten = platform_atten + remote_atten;
+
+ tuning_method = OPA_PASSIVE_TUNING;
+ break;
+ case PORT_TYPE_VARIABLE:
+ if (qsfp_mod_present(ppd)) {
+ /*
+ * platform_atten, remote_atten pre-zeroed to
+ * catch error
+ */
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_LOCAL_ATTEN_25G,
+ &platform_atten, 4);
+
+ get_platform_config_field(
+ ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_REMOTE_ATTEN_25G,
+ &remote_atten, 4);
+
+ total_atten = platform_atten + remote_atten;
+
+ tuning_method = OPA_PASSIVE_TUNING;
+ } else
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
+ break;
+ case PORT_TYPE_QSFP:
+ if (qsfp_mod_present(ppd)) {
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+
+ if (ppd->qsfp_info.cache_valid) {
+ ret = tune_qsfp(ppd,
+ &tx_preset_index,
+ &rx_preset_index,
+ &tuning_method,
+ &total_atten);
+
+ /*
+ * We may have modified the QSFP memory, so
+ * update the cache to reflect the changes
+ */
+ refresh_qsfp_cache(ppd, &ppd->qsfp_info);
+ if (ret)
+ goto bail;
+
+ limiting_active =
+ ppd->qsfp_info.limiting_active;
+ } else {
+ dd_dev_err(dd,
+ "%s: Reading QSFP memory failed\n",
+ __func__);
+ goto bail;
+ }
+ } else
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(
+ OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
+ break;
+ default:
+ dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+ tuning_method = OPA_UNKNOWN_TUNING;
+ total_atten = 0;
+ limiting_active = 0;
+ tx_preset_index = OPA_INVALID_INDEX;
+ break;
+ }
+
+ if (ppd->offline_disabled_reason ==
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
+ apply_tunings(ppd, tx_preset_index, tuning_method,
+ total_atten, limiting_active);
+
+ if (!ret)
+ ppd->driver_link_ready = 1;
+
+ return;
+bail:
+ ppd->driver_link_ready = 0;
+}
diff --git a/drivers/staging/rdma/hfi1/platform_config.h b/drivers/staging/rdma/hfi1/platform.h
index 8a94a8342052..19620cf546d5 100644
--- a/drivers/staging/rdma/hfi1/platform_config.h
+++ b/drivers/staging/rdma/hfi1/platform.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -47,8 +44,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
-#ifndef __PLATFORM_CONFIG_H
-#define __PLATFORM_CONFIG_H
+#ifndef __PLATFORM_H
+#define __PLATFORM_H
#define METADATA_TABLE_FIELD_START_SHIFT 0
#define METADATA_TABLE_FIELD_START_LEN_BITS 15
@@ -94,17 +91,18 @@ enum platform_config_system_table_fields {
enum platform_config_port_table_fields {
PORT_TABLE_RESERVED,
PORT_TABLE_PORT_TYPE,
- PORT_TABLE_ATTENUATION_12G,
- PORT_TABLE_ATTENUATION_25G,
+ PORT_TABLE_LOCAL_ATTEN_12G,
+ PORT_TABLE_LOCAL_ATTEN_25G,
PORT_TABLE_LINK_SPEED_SUPPORTED,
PORT_TABLE_LINK_WIDTH_SUPPORTED,
+ PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED,
+ PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED,
PORT_TABLE_VL_CAP,
PORT_TABLE_MTU_CAP,
PORT_TABLE_TX_LANE_ENABLE_MASK,
PORT_TABLE_LOCAL_MAX_TIMEOUT,
- PORT_TABLE_AUTO_LANE_SHEDDING_ENABLED,
- PORT_TABLE_EXTERNAL_LOOPBACK_ALLOWED,
- PORT_TABLE_TX_PRESET_IDX_PASSIVE_CU,
+ PORT_TABLE_REMOTE_ATTEN_12G,
+ PORT_TABLE_REMOTE_ATTEN_25G,
PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
PORT_TABLE_RX_PRESET_IDX,
@@ -115,10 +113,10 @@ enum platform_config_port_table_fields {
enum platform_config_rx_preset_table_fields {
RX_PRESET_TABLE_RESERVED,
RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
- RX_PRESET_TABLE_QSFP_RX_EQ_APPLY,
+ RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
RX_PRESET_TABLE_QSFP_RX_CDR,
- RX_PRESET_TABLE_QSFP_RX_EQ,
+ RX_PRESET_TABLE_QSFP_RX_EMP,
RX_PRESET_TABLE_QSFP_RX_AMP,
RX_PRESET_TABLE_MAX
};
@@ -149,6 +147,11 @@ enum platform_config_variable_settings_table_fields {
VARIABLE_SETTINGS_TABLE_MAX
};
+struct platform_config {
+ size_t size;
+ const u8 *data;
+};
+
struct platform_config_data {
u32 *table;
u32 *table_metadata;
@@ -179,9 +182,11 @@ static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
* fields defined for each table above
*/
-/*=====================================================
+/*
+ * =====================================================
* System table encodings
- *====================================================*/
+ * =====================================================
+ */
#define PLATFORM_CONFIG_MAGIC_NUM 0x3d4f5041
#define PLATFORM_CONFIG_MAGIC_NUMBER_LEN 4
@@ -199,12 +204,13 @@ enum platform_config_qsfp_power_class_encoding {
QSFP_POWER_CLASS_7
};
-
-/*=====================================================
+/*
+ * ====================================================
* Port table encodings
- *==================================================== */
+ * ====================================================
+ */
enum platform_config_port_type_encoding {
- PORT_TYPE_RESERVED,
+ PORT_TYPE_UNKNOWN,
PORT_TYPE_DISCONNECTED,
PORT_TYPE_FIXED,
PORT_TYPE_VARIABLE,
@@ -283,4 +289,16 @@ enum platform_config_local_max_timeout_encoding {
LOCAL_MAX_TIMEOUT_1000_S
};
-#endif /*__PLATFORM_CONFIG_H*/
+enum link_tuning_encoding {
+ OPA_PASSIVE_TUNING,
+ OPA_ACTIVE_TUNING,
+ OPA_UNKNOWN_TUNING
+};
+
+/* platform.c */
+void get_platform_config(struct hfi1_devdata *dd);
+void free_platform_config(struct hfi1_devdata *dd);
+int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
+void tune_serdes(struct hfi1_pportdata *ppd);
+
+#endif /*__PLATFORM_H*/
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index ce036810d576..29a5ad28019b 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -54,31 +51,32 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/seq_file.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#include "hfi.h"
#include "qp.h"
#include "trace.h"
-#include "sdma.h"
+#include "verbs_txreq.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
-
-static unsigned int hfi1_qp_table_size = 256;
+unsigned int hfi1_qp_table_size = 256;
module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
-static void flush_tx_list(struct hfi1_qp *qp);
+static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
struct sdma_engine *sde,
struct iowait *wait,
struct sdma_txreq *stx,
unsigned seq);
static void iowait_wakeup(struct iowait *wait, int reason);
+static void iowait_sdma_drained(struct iowait *wait);
+static void qp_pio_drain(struct rvt_qp *qp);
-static inline unsigned mk_qpn(struct hfi1_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
/*
@@ -118,437 +116,15 @@ static const u16 credit_table[31] = {
32768 /* 1E */
};
-static void get_map_page(struct hfi1_qpn_table *qpt, struct qpn_map *map)
-{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
-
- /*
- * Free the page if someone raced with us installing it.
- */
-
- spin_lock(&qpt->lock);
- if (map->page)
- free_page(page);
- else
- map->page = (void *)page;
- spin_unlock(&qpt->lock);
-}
-
-/*
- * Allocate the next available QPN or
- * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
- */
-static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt,
- enum ib_qp_type type, u8 port)
-{
- u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
- u32 ret;
-
- if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- unsigned n;
-
- ret = type == IB_QPT_GSI;
- n = 1 << (ret + 2 * (port - 1));
- spin_lock(&qpt->lock);
- if (qpt->flags & n)
- ret = -EINVAL;
- else
- qpt->flags |= n;
- spin_unlock(&qpt->lock);
- goto bail;
- }
-
- qpn = qpt->last + qpt->incr;
- if (qpn >= QPN_MAX)
- qpn = qpt->incr | ((qpt->last & 1) ^ 1);
- /* offset carries bit 0 */
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
- max_scan = qpt->nmaps - !offset;
- for (i = 0;;) {
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page))
- break;
- }
- do {
- if (!test_and_set_bit(offset, map->page)) {
- qpt->last = qpn;
- ret = qpn;
- goto bail;
- }
- offset += qpt->incr;
- /*
- * This qpn might be bogus if offset >= BITS_PER_PAGE.
- * That is OK. It gets re-assigned below
- */
- qpn = mk_qpn(qpt, map, offset);
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
- /*
- * In order to keep the number of pages allocated to a
- * minimum, we scan the all existing pages before increasing
- * the size of the bitmap table.
- */
- if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
- break;
- map = &qpt->map[qpt->nmaps++];
- /* start at incr with current bit 0 */
- offset = qpt->incr | (offset & 1);
- } else if (map < &qpt->map[qpt->nmaps]) {
- ++map;
- /* start at incr with current bit 0 */
- offset = qpt->incr | (offset & 1);
- } else {
- map = &qpt->map[0];
- /* wrap to first map page, invert bit 0 */
- offset = qpt->incr | ((offset & 1) ^ 1);
- }
- /* there can be no bits at shift and below */
- WARN_ON(offset & (dd->qos_shift - 1));
- qpn = mk_qpn(qpt, map, offset);
- }
-
- ret = -ENOMEM;
-
-bail:
- return ret;
-}
-
-static void free_qpn(struct hfi1_qpn_table *qpt, u32 qpn)
-{
- struct qpn_map *map;
-
- map = qpt->map + qpn / BITS_PER_PAGE;
- if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-}
-
-/*
- * Put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static void insert_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned long flags;
-
- atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
-
- if (qp->ibqp.qp_num <= 1) {
- rcu_assign_pointer(ibp->qp[qp->ibqp.qp_num], qp);
- } else {
- u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
-
- qp->next = dev->qp_dev->qp_table[n];
- rcu_assign_pointer(dev->qp_dev->qp_table[n], qp);
- trace_hfi1_qpinsert(qp, n);
- }
-
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
-}
-
-/*
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void remove_qp(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
-{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
- unsigned long flags;
- int removed = 1;
-
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
-
- if (rcu_dereference_protected(ibp->qp[0],
- lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp[0], NULL);
- } else if (rcu_dereference_protected(ibp->qp[1],
- lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->qp[1], NULL);
- } else {
- struct hfi1_qp *q;
- struct hfi1_qp __rcu **qpp;
-
- removed = 0;
- qpp = &dev->qp_dev->qp_table[n];
- for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qp_dev->qpt_lock)))
- != NULL;
- qpp = &q->next)
- if (q == qp) {
- RCU_INIT_POINTER(*qpp,
- rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qp_dev->qpt_lock)));
- removed = 1;
- trace_hfi1_qpremove(qp, n);
- break;
- }
- }
-
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
- if (removed) {
- synchronize_rcu();
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
-/**
- * free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-static unsigned free_all_qps(struct hfi1_devdata *dd)
-{
- struct hfi1_ibdev *dev = &dd->verbs_dev;
- unsigned long flags;
- struct hfi1_qp *qp;
- unsigned n, qp_inuse = 0;
-
- for (n = 0; n < dd->num_pports; n++) {
- struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
-
- if (!hfi1_mcast_tree_empty(ibp))
- qp_inuse++;
- rcu_read_lock();
- if (rcu_dereference(ibp->qp[0]))
- qp_inuse++;
- if (rcu_dereference(ibp->qp[1]))
- qp_inuse++;
- rcu_read_unlock();
- }
-
- if (!dev->qp_dev)
- goto bail;
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_dev->qp_table[n],
- lockdep_is_held(&dev->qp_dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_dev->qp_table[n], NULL);
-
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qp_dev->qpt_lock)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
- synchronize_rcu();
-bail:
- return qp_inuse;
-}
-
-/**
- * reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void reset_qp(struct hfi1_qp *qp, enum ib_qp_type type)
-{
- qp->remote_qpn = 0;
- qp->qkey = 0;
- qp->qp_access_flags = 0;
- iowait_init(
- &qp->s_iowait,
- 1,
- hfi1_do_send,
- iowait_sleep,
- iowait_wakeup);
- qp->s_flags &= HFI1_S_SIGNAL_REQ_WR;
- qp->s_hdrwords = 0;
- qp->s_wqe = NULL;
- qp->s_draining = 0;
- qp->s_next_psn = 0;
- qp->s_last_psn = 0;
- qp->s_sending_psn = 0;
- qp->s_sending_hpsn = 0;
- qp->s_psn = 0;
- qp->r_psn = 0;
- qp->r_msn = 0;
- if (type == IB_QPT_RC) {
- qp->s_state = IB_OPCODE_RC_SEND_LAST;
- qp->r_state = IB_OPCODE_RC_SEND_LAST;
- } else {
- qp->s_state = IB_OPCODE_UC_SEND_LAST;
- qp->r_state = IB_OPCODE_UC_SEND_LAST;
- }
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
- qp->r_nak_state = 0;
- qp->r_adefered = 0;
- qp->r_aflags = 0;
- qp->r_flags = 0;
- qp->s_head = 0;
- qp->s_tail = 0;
- qp->s_cur = 0;
- qp->s_acked = 0;
- qp->s_last = 0;
- qp->s_ssn = 1;
- qp->s_lsn = 0;
- clear_ahg(qp);
- qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
- qp->r_head_ack_queue = 0;
- qp->s_tail_ack_queue = 0;
- qp->s_num_rd_atomic = 0;
- if (qp->r_rq.wq) {
- qp->r_rq.wq->head = 0;
- qp->r_rq.wq->tail = 0;
- }
- qp->r_sge.num_sge = 0;
-}
-
-static void clear_mr_refs(struct hfi1_qp *qp, int clr_sends)
-{
- unsigned n;
-
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
- hfi1_put_ss(&qp->s_rdma_read_sge);
-
- hfi1_put_ss(&qp->r_sge);
-
- if (clr_sends) {
- while (qp->s_last != qp->s_head) {
- struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
- unsigned i;
-
- for (i = 0; i < wqe->wr.num_sge; i++) {
- struct hfi1_sge *sge = &wqe->sg_list[i];
-
- hfi1_put_mr(sge->mr);
- }
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- }
- if (qp->s_rdma_mr) {
- hfi1_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- }
-
- if (qp->ibqp.qp_type != IB_QPT_RC)
- return;
-
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
- struct hfi1_ack_entry *e = &qp->s_ack_queue[n];
-
- if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
- e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- }
-}
-
-/**
- * hfi1_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err)
+static void flush_tx_list(struct rvt_qp *qp)
{
- struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
- int ret = 0;
-
- if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
- goto bail;
-
- qp->state = IB_QPS_ERR;
-
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
- if (qp->s_flags & HFI1_S_ANY_WAIT_SEND)
- qp->s_flags &= ~HFI1_S_ANY_WAIT_SEND;
-
- write_seqlock(&dev->iowait_lock);
- if (!list_empty(&qp->s_iowait.list) && !(qp->s_flags & HFI1_S_BUSY)) {
- qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
- list_del_init(&qp->s_iowait.list);
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
- write_sequnlock(&dev->iowait_lock);
-
- if (!(qp->s_flags & HFI1_S_BUSY)) {
- qp->s_hdrwords = 0;
- if (qp->s_rdma_mr) {
- hfi1_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- flush_tx_list(qp);
- }
-
- /* Schedule the sending tasklet to drain the send work queue. */
- if (qp->s_last != qp->s_head)
- hfi1_schedule_send(qp);
-
- clear_mr_refs(qp, 0);
-
- memset(&wc, 0, sizeof(wc));
- wc.qp = &qp->ibqp;
- wc.opcode = IB_WC_RECV;
-
- if (test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags)) {
- wc.wr_id = qp->r_wr_id;
- wc.status = err;
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wc.status = IB_WC_WR_FLUSH_ERR;
+ struct hfi1_qp_priv *priv = qp->priv;
- if (qp->r_rq.wq) {
- struct hfi1_rwq *wq;
- u32 head;
- u32 tail;
-
- spin_lock(&qp->r_rq.lock);
-
- /* sanity check pointers before trusting them */
- wq = qp->r_rq.wq;
- head = wq->head;
- if (head >= qp->r_rq.size)
- head = 0;
- tail = wq->tail;
- if (tail >= qp->r_rq.size)
- tail = 0;
- while (tail != head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
- if (++tail >= qp->r_rq.size)
- tail = 0;
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
- wq->tail = tail;
-
- spin_unlock(&qp->r_rq.lock);
- } else if (qp->ibqp.event_handler)
- ret = 1;
-
-bail:
- return ret;
-}
-
-static void flush_tx_list(struct hfi1_qp *qp)
-{
- while (!list_empty(&qp->s_iowait.tx_head)) {
+ while (!list_empty(&priv->s_iowait.tx_head)) {
struct sdma_txreq *tx;
tx = list_first_entry(
- &qp->s_iowait.tx_head,
+ &priv->s_iowait.tx_head,
struct sdma_txreq,
list);
list_del_init(&tx->list);
@@ -557,14 +133,15 @@ static void flush_tx_list(struct hfi1_qp *qp)
}
}
-static void flush_iowait(struct hfi1_qp *qp)
+static void flush_iowait(struct rvt_qp *qp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
unsigned long flags;
write_seqlock_irqsave(&dev->iowait_lock, flags);
- if (!list_empty(&qp->s_iowait.list)) {
- list_del_init(&qp->s_iowait.list);
+ if (!list_empty(&priv->s_iowait.list)) {
+ list_del_init(&priv->s_iowait.list);
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
@@ -597,362 +174,106 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
return ib_mtu_enum_to_int(mtu);
}
-
-/**
- * hfi1_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for libibverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata)
+int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
{
+ struct ib_qp *ibqp = &qp->ibqp;
struct hfi1_ibdev *dev = to_idev(ibqp->device);
- struct hfi1_qp *qp = to_iqp(ibqp);
- enum ib_qp_state cur_state, new_state;
- struct ib_event ev;
- int lastwqe = 0;
- int mig = 0;
- int ret;
- u32 pmtu = 0; /* for gcc warning only */
struct hfi1_devdata *dd = dd_from_dev(dev);
-
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
-
- cur_state = attr_mask & IB_QP_CUR_STATE ?
- attr->cur_qp_state : qp->state;
- new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_UNSPECIFIED))
- goto inval;
+ u8 sc;
if (attr_mask & IB_QP_AV) {
- u8 sc;
-
- if (attr->ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
- goto inval;
- if (hfi1_check_ah(qp->ibqp.device, &attr->ah_attr))
- goto inval;
sc = ah_to_sc(ibqp->device, &attr->ah_attr);
+ if (sc == 0xf)
+ return -EINVAL;
+
if (!qp_to_sdma_engine(qp, sc) &&
dd->flags & HFI1_HAS_SEND_DMA)
- goto inval;
+ return -EINVAL;
+
+ if (!qp_to_send_context(qp, sc))
+ return -EINVAL;
}
if (attr_mask & IB_QP_ALT_PATH) {
- u8 sc;
-
- if (attr->alt_ah_attr.dlid >= HFI1_MULTICAST_LID_BASE)
- goto inval;
- if (hfi1_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
- goto inval;
- if (attr->alt_pkey_index >= hfi1_get_npkeys(dd))
- goto inval;
sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
+ if (sc == 0xf)
+ return -EINVAL;
+
if (!qp_to_sdma_engine(qp, sc) &&
dd->flags & HFI1_HAS_SEND_DMA)
- goto inval;
- }
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- if (attr->pkey_index >= hfi1_get_npkeys(dd))
- goto inval;
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- if (attr->min_rnr_timer > 31)
- goto inval;
+ return -EINVAL;
- if (attr_mask & IB_QP_PORT)
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI ||
- attr->port_num == 0 ||
- attr->port_num > ibqp->device->phys_port_cnt)
- goto inval;
-
- if (attr_mask & IB_QP_DEST_QPN)
- if (attr->dest_qp_num > HFI1_QPN_MASK)
- goto inval;
+ if (!qp_to_send_context(qp, sc))
+ return -EINVAL;
+ }
- if (attr_mask & IB_QP_RETRY_CNT)
- if (attr->retry_cnt > 7)
- goto inval;
+ return 0;
+}
- if (attr_mask & IB_QP_RNR_RETRY)
- if (attr->rnr_retry > 7)
- goto inval;
+void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct hfi1_qp_priv *priv = qp->priv;
- /*
- * Don't allow invalid path_mtu values. OK to set greater
- * than the active mtu (or even the max_cap, if we have tuned
- * that to a small mtu. We'll set qp->path_mtu
- * to the lesser of requested attribute mtu and active,
- * for packetizing messages.
- * Note that the QP port has to be set in INIT and MTU in RTR.
- */
- if (attr_mask & IB_QP_PATH_MTU) {
- int mtu, pidx = qp->port_num - 1;
-
- dd = dd_from_dev(dev);
- mtu = verbs_mtu_enum_to_int(ibqp->device, attr->path_mtu);
- if (mtu == -1)
- goto inval;
-
- if (mtu > dd->pport[pidx].ibmtu)
- pmtu = mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
- else
- pmtu = attr->path_mtu;
+ if (attr_mask & IB_QP_AV) {
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
}
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- if (attr->path_mig_state == IB_MIG_REARM) {
- if (qp->s_mig_state == IB_MIG_ARMED)
- goto inval;
- if (new_state != IB_QPS_RTS)
- goto inval;
- } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
- if (qp->s_mig_state == IB_MIG_REARM)
- goto inval;
- if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
- goto inval;
- if (qp->s_mig_state == IB_MIG_ARMED)
- mig = 1;
- } else
- goto inval;
+ if (attr_mask & IB_QP_PATH_MIG_STATE &&
+ attr->path_mig_state == IB_MIG_MIGRATED &&
+ qp->s_mig_state == IB_MIG_ARMED) {
+ qp->s_flags |= RVT_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
}
+}
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- if (attr->max_dest_rd_atomic > HFI1_MAX_RDMA_ATOMIC)
- goto inval;
-
- switch (new_state) {
- case IB_QPS_RESET:
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- /* Stop the sending work queue and retry timer */
- cancel_work_sync(&qp->s_iowait.iowork);
- del_timer_sync(&qp->s_timer);
- iowait_sdma_drain(&qp->s_iowait);
- flush_tx_list(qp);
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- clear_mr_refs(qp, 1);
- clear_ahg(qp);
- reset_qp(qp, ibqp->qp_type);
- }
- break;
-
- case IB_QPS_RTR:
- /* Allow event to re-trigger if QP set to RTR more than once */
- qp->r_flags &= ~HFI1_R_COMM_EST;
- qp->state = new_state;
- break;
-
- case IB_QPS_SQD:
- qp->s_draining = qp->s_last != qp->s_cur;
- qp->state = new_state;
- break;
+/**
+ * hfi1_check_send_wqe - validate wqe
+ * @qp - The qp
+ * @wqe - The built wqe
+ *
+ * validate wqe. This is called
+ * prior to inserting the wqe into
+ * the ring but after the wqe has been
+ * setup.
+ *
+ * Returns 0 on success, -EINVAL on failure
+ *
+ */
+int hfi1_check_send_wqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe)
+{
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct rvt_ah *ah;
- case IB_QPS_SQE:
- if (qp->ibqp.qp_type == IB_QPT_RC)
- goto inval;
- qp->state = new_state;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ if (wqe->length > 0x80000000U)
+ return -EINVAL;
break;
-
- case IB_QPS_ERR:
- lastwqe = hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ case IB_QPT_SMI:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
break;
-
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ ah = ibah_to_rvtah(wqe->ud_wr.ah);
+ if (wqe->length > (1 << ah->log_pmtu))
+ return -EINVAL;
+ if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
+ return -EINVAL;
default:
- qp->state = new_state;
break;
}
-
- if (attr_mask & IB_QP_PKEY_INDEX)
- qp->s_pkey_index = attr->pkey_index;
-
- if (attr_mask & IB_QP_PORT)
- qp->port_num = attr->port_num;
-
- if (attr_mask & IB_QP_DEST_QPN)
- qp->remote_qpn = attr->dest_qp_num;
-
- if (attr_mask & IB_QP_SQ_PSN) {
- qp->s_next_psn = attr->sq_psn & PSN_MODIFY_MASK;
- qp->s_psn = qp->s_next_psn;
- qp->s_sending_psn = qp->s_next_psn;
- qp->s_last_psn = qp->s_next_psn - 1;
- qp->s_sending_hpsn = qp->s_last_psn;
- }
-
- if (attr_mask & IB_QP_RQ_PSN)
- qp->r_psn = attr->rq_psn & PSN_MODIFY_MASK;
-
- if (attr_mask & IB_QP_ACCESS_FLAGS)
- qp->qp_access_flags = attr->qp_access_flags;
-
- if (attr_mask & IB_QP_AV) {
- qp->remote_ah_attr = attr->ah_attr;
- qp->s_srate = attr->ah_attr.static_rate;
- qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
- qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
- }
-
- if (attr_mask & IB_QP_ALT_PATH) {
- qp->alt_ah_attr = attr->alt_ah_attr;
- qp->s_alt_pkey_index = attr->alt_pkey_index;
- }
-
- if (attr_mask & IB_QP_PATH_MIG_STATE) {
- qp->s_mig_state = attr->path_mig_state;
- if (mig) {
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = qp->alt_ah_attr.port_num;
- qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
- qp->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
- }
- }
-
- if (attr_mask & IB_QP_PATH_MTU) {
- struct hfi1_ibport *ibp;
- u8 sc, vl;
- u32 mtu;
-
- dd = dd_from_dev(dev);
- ibp = &dd->pport[qp->port_num - 1].ibport_data;
-
- sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
- vl = sc_to_vlt(dd, sc);
-
- mtu = verbs_mtu_enum_to_int(ibqp->device, pmtu);
- if (vl < PER_VL_SEND_CONTEXTS)
- mtu = min_t(u32, mtu, dd->vld[vl].mtu);
- pmtu = mtu_to_enum(mtu, OPA_MTU_8192);
-
- qp->path_mtu = pmtu;
- qp->pmtu = mtu;
- }
-
- if (attr_mask & IB_QP_RETRY_CNT) {
- qp->s_retry_cnt = attr->retry_cnt;
- qp->s_retry = attr->retry_cnt;
- }
-
- if (attr_mask & IB_QP_RNR_RETRY) {
- qp->s_rnr_retry_cnt = attr->rnr_retry;
- qp->s_rnr_retry = attr->rnr_retry;
- }
-
- if (attr_mask & IB_QP_MIN_RNR_TIMER)
- qp->r_min_rnr_timer = attr->min_rnr_timer;
-
- if (attr_mask & IB_QP_TIMEOUT) {
- qp->timeout = attr->timeout;
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- }
-
- if (attr_mask & IB_QP_QKEY)
- qp->qkey = attr->qkey;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
- if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
- qp->s_max_rd_atomic = attr->max_rd_atomic;
-
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
- insert_qp(dev, qp);
-
- if (lastwqe) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- if (mig) {
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
- }
- ret = 0;
- goto bail;
-
-inval:
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- ret = -EINVAL;
-
-bail:
- return ret;
-}
-
-int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr)
-{
- struct hfi1_qp *qp = to_iqp(ibqp);
-
- attr->qp_state = qp->state;
- attr->cur_qp_state = attr->qp_state;
- attr->path_mtu = qp->path_mtu;
- attr->path_mig_state = qp->s_mig_state;
- attr->qkey = qp->qkey;
- attr->rq_psn = mask_psn(qp->r_psn);
- attr->sq_psn = mask_psn(qp->s_next_psn);
- attr->dest_qp_num = qp->remote_qpn;
- attr->qp_access_flags = qp->qp_access_flags;
- attr->cap.max_send_wr = qp->s_size - 1;
- attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
- attr->cap.max_send_sge = qp->s_max_sge;
- attr->cap.max_recv_sge = qp->r_rq.max_sge;
- attr->cap.max_inline_data = 0;
- attr->ah_attr = qp->remote_ah_attr;
- attr->alt_ah_attr = qp->alt_ah_attr;
- attr->pkey_index = qp->s_pkey_index;
- attr->alt_pkey_index = qp->s_alt_pkey_index;
- attr->en_sqd_async_notify = 0;
- attr->sq_draining = qp->s_draining;
- attr->max_rd_atomic = qp->s_max_rd_atomic;
- attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
- attr->min_rnr_timer = qp->r_min_rnr_timer;
- attr->port_num = qp->port_num;
- attr->timeout = qp->timeout;
- attr->retry_cnt = qp->s_retry_cnt;
- attr->rnr_retry = qp->s_rnr_retry_cnt;
- attr->alt_port_num = qp->alt_ah_attr.port_num;
- attr->alt_timeout = qp->alt_timeout;
-
- init_attr->event_handler = qp->ibqp.event_handler;
- init_attr->qp_context = qp->ibqp.qp_context;
- init_attr->send_cq = qp->ibqp.send_cq;
- init_attr->recv_cq = qp->ibqp.recv_cq;
- init_attr->srq = qp->ibqp.srq;
- init_attr->cap = attr->cap;
- if (qp->s_flags & HFI1_S_SIGNAL_REQ_WR)
- init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- else
- init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr->qp_type = qp->ibqp.qp_type;
- init_attr->port_num = qp->port_num;
- return 0;
+ return wqe->length <= piothreshold;
}
/**
@@ -961,7 +282,7 @@ int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
*
* Returns the AETH.
*/
-__be32 hfi1_compute_aeth(struct hfi1_qp *qp)
+__be32 hfi1_compute_aeth(struct rvt_qp *qp)
{
u32 aeth = qp->r_msn & HFI1_MSN_MASK;
@@ -974,7 +295,7 @@ __be32 hfi1_compute_aeth(struct hfi1_qp *qp)
} else {
u32 min, max, x;
u32 credits;
- struct hfi1_rwq *wq = qp->r_rq.wq;
+ struct rvt_rwq *wq = qp->r_rq.wq;
u32 head;
u32 tail;
@@ -1004,12 +325,13 @@ __be32 hfi1_compute_aeth(struct hfi1_qp *qp)
x = (min + max) / 2;
if (credit_table[x] == credits)
break;
- if (credit_table[x] > credits)
+ if (credit_table[x] > credits) {
max = x;
- else if (min == x)
- break;
- else
+ } else {
+ if (min == x)
+ break;
min = x;
+ }
}
aeth |= x << HFI1_AETH_CREDIT_SHIFT;
}
@@ -1017,348 +339,58 @@ __be32 hfi1_compute_aeth(struct hfi1_qp *qp)
}
/**
- * hfi1_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: user data for libibverbs.so
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
- struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
-{
- struct hfi1_qp *qp;
- int err;
- struct hfi1_swqe *swq = NULL;
- struct hfi1_ibdev *dev;
- struct hfi1_devdata *dd;
- size_t sz;
- size_t sg_list_sz;
- struct ib_qp *ret;
-
- if (init_attr->cap.max_send_sge > hfi1_max_sges ||
- init_attr->cap.max_send_wr > hfi1_max_qp_wrs ||
- init_attr->create_flags) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- /* Check receive queue parameters if no SRQ is specified. */
- if (!init_attr->srq) {
- if (init_attr->cap.max_recv_sge > hfi1_max_sges ||
- init_attr->cap.max_recv_wr > hfi1_max_qp_wrs) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- if (init_attr->cap.max_send_sge +
- init_attr->cap.max_send_wr +
- init_attr->cap.max_recv_sge +
- init_attr->cap.max_recv_wr == 0) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- }
-
- switch (init_attr->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (init_attr->port_num == 0 ||
- init_attr->port_num > ibpd->device->phys_port_cnt) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
- case IB_QPT_UC:
- case IB_QPT_RC:
- case IB_QPT_UD:
- sz = sizeof(struct hfi1_sge) *
- init_attr->cap.max_send_sge +
- sizeof(struct hfi1_swqe);
- swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
- if (swq == NULL) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
- sz = sizeof(*qp);
- sg_list_sz = 0;
- if (init_attr->srq) {
- struct hfi1_srq *srq = to_isrq(init_attr->srq);
-
- if (srq->rq.max_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (srq->rq.max_sge - 1);
- } else if (init_attr->cap.max_recv_sge > 1)
- sg_list_sz = sizeof(*qp->r_sg_list) *
- (init_attr->cap.max_recv_sge - 1);
- qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
- if (!qp) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_swq;
- }
- RCU_INIT_POINTER(qp->next, NULL);
- qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
- if (!qp->s_hdr) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- qp->timeout_jiffies =
- usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
- 1000UL);
- if (init_attr->srq)
- sz = 0;
- else {
- qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
- qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
- sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
- sizeof(struct hfi1_rwqe);
- qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) +
- qp->r_rq.size * sz);
- if (!qp->r_rq.wq) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_qp;
- }
- }
-
- /*
- * ib_create_qp() will initialize qp->ibqp
- * except for qp->ibqp.qp_num.
- */
- spin_lock_init(&qp->r_lock);
- spin_lock_init(&qp->s_lock);
- spin_lock_init(&qp->r_rq.lock);
- atomic_set(&qp->refcount, 0);
- init_waitqueue_head(&qp->wait);
- init_timer(&qp->s_timer);
- qp->s_timer.data = (unsigned long)qp;
- INIT_LIST_HEAD(&qp->rspwait);
- qp->state = IB_QPS_RESET;
- qp->s_wq = swq;
- qp->s_size = init_attr->cap.max_send_wr + 1;
- qp->s_max_sge = init_attr->cap.max_send_sge;
- if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
- qp->s_flags = HFI1_S_SIGNAL_REQ_WR;
- dev = to_idev(ibpd->device);
- dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
- init_attr->port_num);
- if (err < 0) {
- ret = ERR_PTR(err);
- vfree(qp->r_rq.wq);
- goto bail_qp;
- }
- qp->ibqp.qp_num = err;
- qp->port_num = init_attr->port_num;
- reset_qp(qp, init_attr->qp_type);
-
- break;
-
- default:
- /* Don't support raw QPs */
- ret = ERR_PTR(-ENOSYS);
- goto bail;
- }
-
- init_attr->cap.max_inline_data = 0;
-
- /*
- * Return the address of the RWQ as the offset to mmap.
- * See hfi1_mmap() for details.
- */
- if (udata && udata->outlen >= sizeof(__u64)) {
- if (!qp->r_rq.wq) {
- __u64 offset = 0;
-
- err = ib_copy_to_udata(udata, &offset,
- sizeof(offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- } else {
- u32 s = sizeof(struct hfi1_rwq) + qp->r_rq.size * sz;
-
- qp->ip = hfi1_create_mmap_info(dev, s,
- ibpd->uobject->context,
- qp->r_rq.wq);
- if (!qp->ip) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- err = ib_copy_to_udata(udata, &(qp->ip->offset),
- sizeof(qp->ip->offset));
- if (err) {
- ret = ERR_PTR(err);
- goto bail_ip;
- }
- }
- }
-
- spin_lock(&dev->n_qps_lock);
- if (dev->n_qps_allocated == hfi1_max_qps) {
- spin_unlock(&dev->n_qps_lock);
- ret = ERR_PTR(-ENOMEM);
- goto bail_ip;
- }
-
- dev->n_qps_allocated++;
- spin_unlock(&dev->n_qps_lock);
-
- if (qp->ip) {
- spin_lock_irq(&dev->pending_lock);
- list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
- spin_unlock_irq(&dev->pending_lock);
- }
-
- ret = &qp->ibqp;
-
- /*
- * We have our QP and its good, now keep track of what types of opcodes
- * can be processed on this QP. We do this by keeping track of what the
- * 3 high order bits of the opcode are.
- */
- switch (init_attr->qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & OPCODE_QP_MASK;
- break;
- case IB_QPT_RC:
- qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & OPCODE_QP_MASK;
- break;
- case IB_QPT_UC:
- qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & OPCODE_QP_MASK;
- break;
- default:
- ret = ERR_PTR(-EINVAL);
- goto bail_ip;
- }
-
- goto bail;
-
-bail_ip:
- if (qp->ip)
- kref_put(&qp->ip->ref, hfi1_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
-bail_qp:
- kfree(qp->s_hdr);
- kfree(qp);
-bail_swq:
- vfree(swq);
-bail:
- return ret;
-}
-
-/**
- * hfi1_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
+ * _hfi1_schedule_send - schedule progress
+ * @qp: the QP
*
- * Returns 0 on success.
+ * This schedules qp progress w/o regard to the s_flags.
*
- * Note that this can be called while the QP is actively sending or
- * receiving!
+ * It is only used in the post send, which doesn't hold
+ * the s_lock.
*/
-int hfi1_destroy_qp(struct ib_qp *ibqp)
+void _hfi1_schedule_send(struct rvt_qp *qp)
{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_ibdev *dev = to_idev(ibqp->device);
-
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- flush_iowait(qp);
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_ANY_WAIT);
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
- cancel_work_sync(&qp->s_iowait.iowork);
- del_timer_sync(&qp->s_timer);
- iowait_sdma_drain(&qp->s_iowait);
- flush_tx_list(qp);
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_lock);
- clear_mr_refs(qp, 1);
- clear_ahg(qp);
- }
- spin_unlock(&qp->s_lock);
- spin_unlock_irq(&qp->r_lock);
-
- /* all user's cleaned up, mark it available */
- free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
- spin_lock(&dev->n_qps_lock);
- dev->n_qps_allocated--;
- spin_unlock(&dev->n_qps_lock);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- if (qp->ip)
- kref_put(&qp->ip->ref, hfi1_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
- kfree(qp->s_hdr);
- kfree(qp);
- return 0;
+ iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
}
-/**
- * init_qpn_table - initialize the QP number table for a device
- * @qpt: the QPN table
- */
-static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt)
+static void qp_pio_drain(struct rvt_qp *qp)
{
- u32 offset, qpn, i;
- struct qpn_map *map;
- int ret = 0;
+ struct hfi1_ibdev *dev;
+ struct hfi1_qp_priv *priv = qp->priv;
- spin_lock_init(&qpt->lock);
-
- qpt->last = 0;
- qpt->incr = 1 << dd->qos_shift;
-
- /* insure we don't assign QPs from KDETH 64K window */
- qpn = kdeth_qp << 16;
- qpt->nmaps = qpn / BITS_PER_PAGE;
- /* This should always be zero */
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpt->nmaps];
- dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n",
- qpn, qpn + 65535);
- for (i = 0; i < 65536; i++) {
- if (!map->page) {
- get_map_page(qpt, map);
- if (!map->page) {
- ret = -ENOMEM;
- break;
- }
- }
- set_bit(offset, map->page);
- offset++;
- if (offset == BITS_PER_PAGE) {
- /* next page */
- qpt->nmaps++;
- map++;
- offset = 0;
- }
+ if (!priv->s_sendcontext)
+ return;
+ dev = to_idev(qp->ibqp.device);
+ while (iowait_pio_pending(&priv->s_iowait)) {
+ write_seqlock_irq(&dev->iowait_lock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
+ write_sequnlock_irq(&dev->iowait_lock);
+ iowait_pio_drain(&priv->s_iowait);
+ write_seqlock_irq(&dev->iowait_lock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
+ write_sequnlock_irq(&dev->iowait_lock);
}
- return ret;
}
/**
- * free_qpn_table - free the QP number table for a device
- * @qpt: the QPN table
+ * hfi1_schedule_send - schedule progress
+ * @qp: the QP
+ *
+ * This schedules qp progress and caller should hold
+ * the s_lock.
*/
-static void free_qpn_table(struct hfi1_qpn_table *qpt)
+void hfi1_schedule_send(struct rvt_qp *qp)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
- free_page((unsigned long) qpt->map[i].page);
+ if (hfi1_send_ok(qp))
+ _hfi1_schedule_send(qp);
}
/**
@@ -1368,7 +400,7 @@ static void free_qpn_table(struct hfi1_qpn_table *qpt)
*
* The QP s_lock should be held.
*/
-void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth)
+void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
{
u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
@@ -1378,27 +410,27 @@ void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth)
* honor the credit field.
*/
if (credit == HFI1_AETH_CREDIT_INVAL) {
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
- qp->s_flags |= HFI1_S_UNLIMITED_CREDIT;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
+ qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
- } else if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT)) {
+ } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
/* Compute new LSN (i.e., MSN + credit) */
credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
if (cmp_msn(credit, qp->s_lsn) > 0) {
qp->s_lsn = credit;
- if (qp->s_flags & HFI1_S_WAIT_SSN_CREDIT) {
- qp->s_flags &= ~HFI1_S_WAIT_SSN_CREDIT;
+ if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
+ qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
hfi1_schedule_send(qp);
}
}
}
}
-void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag)
+void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
{
unsigned long flags;
@@ -1421,16 +453,17 @@ static int iowait_sleep(
unsigned seq)
{
struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
+ struct hfi1_qp_priv *priv;
unsigned long flags;
int ret = 0;
struct hfi1_ibdev *dev;
qp = tx->qp;
+ priv = qp->priv;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
-
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
/*
* If we couldn't queue the DMA request, save the info
* and try again later rather than destroying the
@@ -1442,18 +475,18 @@ static int iowait_sleep(
write_seqlock(&dev->iowait_lock);
if (sdma_progress(sde, seq, stx))
goto eagain;
- if (list_empty(&qp->s_iowait.list)) {
+ if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_dmawait++;
- qp->s_flags |= HFI1_S_WAIT_DMA_DESC;
- list_add_tail(&qp->s_iowait.list, &sde->dmawait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_DMA_DESC);
+ ibp->rvp.n_dmawait++;
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
atomic_inc(&qp->refcount);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
@@ -1470,61 +503,25 @@ eagain:
static void iowait_wakeup(struct iowait *wait, int reason)
{
- struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
+ struct rvt_qp *qp = iowait_to_qp(wait);
WARN_ON(reason != SDMA_AVAIL_REASON);
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_DMA_DESC);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
}
-int hfi1_qp_init(struct hfi1_ibdev *dev)
+static void iowait_sdma_drained(struct iowait *wait)
{
- struct hfi1_devdata *dd = dd_from_dev(dev);
- int i;
- int ret = -ENOMEM;
-
- /* allocate parent object */
- dev->qp_dev = kzalloc(sizeof(*dev->qp_dev), GFP_KERNEL);
- if (!dev->qp_dev)
- goto nomem;
- /* allocate hash table */
- dev->qp_dev->qp_table_size = hfi1_qp_table_size;
- dev->qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size);
- dev->qp_dev->qp_table =
- kmalloc(dev->qp_dev->qp_table_size *
- sizeof(*dev->qp_dev->qp_table),
- GFP_KERNEL);
- if (!dev->qp_dev->qp_table)
- goto nomem;
- for (i = 0; i < dev->qp_dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL);
- spin_lock_init(&dev->qp_dev->qpt_lock);
- /* initialize qpn map */
- ret = init_qpn_table(dd, &dev->qp_dev->qpn_table);
- if (ret)
- goto nomem;
- return ret;
-nomem:
- if (dev->qp_dev) {
- kfree(dev->qp_dev->qp_table);
- free_qpn_table(&dev->qp_dev->qpn_table);
- kfree(dev->qp_dev);
- }
- return ret;
-}
+ struct rvt_qp *qp = iowait_to_qp(wait);
-void hfi1_qp_exit(struct hfi1_ibdev *dev)
-{
- struct hfi1_devdata *dd = dd_from_dev(dev);
- u32 qps_inuse;
-
- qps_inuse = free_all_qps(dd);
- if (qps_inuse)
- dd_dev_err(dd, "QP memory leak! %u still in use\n",
- qps_inuse);
- if (dev->qp_dev) {
- kfree(dev->qp_dev->qp_table);
- free_qpn_table(&dev->qp_dev->qpn_table);
- kfree(dev->qp_dev);
+ /*
+ * This happens when the send engine notes
+ * a QP in the error state and cannot
+ * do the flush work until that QP's
+ * sdma work has finished.
+ */
+ if (qp->s_flags & RVT_S_WAIT_DMA) {
+ qp->s_flags &= ~RVT_S_WAIT_DMA;
+ hfi1_schedule_send(qp);
}
}
@@ -1537,7 +534,7 @@ void hfi1_qp_exit(struct hfi1_ibdev *dev)
* Return:
* A send engine for the qp or NULL for SMI type qp.
*/
-struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
+struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct sdma_engine *sde;
@@ -1554,9 +551,33 @@ struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5)
return sde;
}
+/*
+ * qp_to_send_context - map a qp to a send context
+ * @qp: the QP
+ * @sc5: the 5 bit sc
+ *
+ * Return:
+ * A send context for the qp
+ */
+struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ /* SMA packets to VL15 */
+ return dd->vld[15].sc;
+ default:
+ break;
+ }
+
+ return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
+ sc5);
+}
+
struct qp_iter {
struct hfi1_ibdev *dev;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
int specials;
int n;
};
@@ -1570,7 +591,7 @@ struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
return NULL;
iter->dev = dev;
- iter->specials = dev->ibdev.phys_port_cnt * 2;
+ iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
if (qp_iter_next(iter)) {
kfree(iter);
return NULL;
@@ -1584,8 +605,8 @@ int qp_iter_next(struct qp_iter *iter)
struct hfi1_ibdev *dev = iter->dev;
int n = iter->n;
int ret = 1;
- struct hfi1_qp *pqp = iter->qp;
- struct hfi1_qp *qp;
+ struct rvt_qp *pqp = iter->qp;
+ struct rvt_qp *qp;
/*
* The approach is to consider the special qps
@@ -1597,11 +618,11 @@ int qp_iter_next(struct qp_iter *iter)
*
* n = 0..iter->specials is the special qp indices
*
- * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are
+ * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
* the potential hash bucket entries
*
*/
- for (; n < dev->qp_dev->qp_table_size + iter->specials; n++) {
+ for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
if (pqp) {
qp = rcu_dereference(pqp->next);
} else {
@@ -1610,17 +631,17 @@ int qp_iter_next(struct qp_iter *iter)
struct hfi1_ibport *ibp;
int pidx;
- pidx = n % dev->ibdev.phys_port_cnt;
+ pidx = n % dev->rdi.ibdev.phys_port_cnt;
ppd = &dd_from_dev(dev)->pport[pidx];
ibp = &ppd->ibport_data;
if (!(n & 1))
- qp = rcu_dereference(ibp->qp[0]);
+ qp = rcu_dereference(ibp->rvp.qp[0]);
else
- qp = rcu_dereference(ibp->qp[1]);
+ qp = rcu_dereference(ibp->rvp.qp[1]);
} else {
qp = rcu_dereference(
- dev->qp_dev->qp_table[
+ dev->rdi.qp_dev->qp_table[
(n - iter->specials)]);
}
}
@@ -1638,7 +659,7 @@ static const char * const qp_type_str[] = {
"SMI", "GSI", "RC", "UC", "UD",
};
-static int qp_idle(struct hfi1_qp *qp)
+static int qp_idle(struct rvt_qp *qp)
{
return
qp->s_last == qp->s_acked &&
@@ -1649,14 +670,17 @@ static int qp_idle(struct hfi1_qp *qp)
void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
{
- struct hfi1_swqe *wqe;
- struct hfi1_qp *qp = iter->qp;
+ struct rvt_swqe *wqe;
+ struct rvt_qp *qp = iter->qp;
+ struct hfi1_qp_priv *priv = qp->priv;
struct sdma_engine *sde;
+ struct send_context *send_context;
- sde = qp_to_sdma_engine(qp, qp->s_sc);
- wqe = get_swqe_ptr(qp, qp->s_last);
+ sde = qp_to_sdma_engine(qp, priv->s_sc);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ send_context = qp_to_send_context(qp, priv->s_sc);
seq_printf(s,
- "N %d %s QP%u R %u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x SL %u MTU %d %u %u %u SDE %p,%u\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -1666,8 +690,9 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
wqe ? wqe->wr.opcode : 0,
qp->s_hdrwords,
qp->s_flags,
- atomic_read(&qp->s_iowait.sdma_busy),
- !list_empty(&qp->s_iowait.list),
+ iowait_sdma_pending(&priv->s_iowait),
+ iowait_pio_pending(&priv->s_iowait),
+ !list_empty(&priv->s_iowait.list),
qp->timeout,
wqe ? wqe->ssn : 0,
qp->s_lsn,
@@ -1676,20 +701,26 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->s_sending_psn, qp->s_sending_hpsn,
qp->s_last, qp->s_acked, qp->s_cur,
qp->s_tail, qp->s_head, qp->s_size,
+ qp->s_avail,
qp->remote_qpn,
qp->remote_ah_attr.dlid,
qp->remote_ah_attr.sl,
qp->pmtu,
+ qp->s_retry,
qp->s_retry_cnt,
- qp->timeout,
qp->s_rnr_retry_cnt,
sde,
- sde ? sde->this_idx : 0);
+ sde ? sde->this_idx : 0,
+ send_context,
+ send_context ? send_context->sw_index : 0,
+ ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
+ ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
+ qp->pid);
}
-void qp_comm_est(struct hfi1_qp *qp)
+void qp_comm_est(struct rvt_qp *qp)
{
- qp->r_flags |= HFI1_R_COMM_EST;
+ qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -1700,24 +731,241 @@ void qp_comm_est(struct hfi1_qp *qp)
}
}
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ gfp_t gfp)
+{
+ struct hfi1_qp_priv *priv;
+
+ priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->owner = qp;
+
+ priv->s_hdr = kzalloc_node(sizeof(*priv->s_hdr), gfp, rdi->dparms.node);
+ if (!priv->s_hdr) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
+ }
+ setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
+ qp->s_timer.function = hfi1_rc_timeout;
+ return priv;
+}
+
+void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ kfree(priv->s_hdr);
+ kfree(priv);
+}
+
+unsigned free_all_qps(struct rvt_dev_info *rdi)
+{
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ int n;
+ unsigned qp_inuse = 0;
+
+ for (n = 0; n < dd->num_pports; n++) {
+ struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
+
+ rcu_read_lock();
+ if (rcu_dereference(ibp->rvp.qp[0]))
+ qp_inuse++;
+ if (rcu_dereference(ibp->rvp.qp[1]))
+ qp_inuse++;
+ rcu_read_unlock();
+ }
+
+ return qp_inuse;
+}
+
+void flush_qp_waiters(struct rvt_qp *qp)
+{
+ flush_iowait(qp);
+ hfi1_stop_rc_timers(qp);
+}
+
+void stop_send_queue(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ cancel_work_sync(&priv->s_iowait.iowork);
+ hfi1_del_timers_sync(qp);
+}
+
+void quiesce_qp(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ iowait_sdma_drain(&priv->s_iowait);
+ qp_pio_drain(qp);
+ flush_tx_list(qp);
+}
+
+void notify_qp_reset(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ iowait_init(
+ &priv->s_iowait,
+ 1,
+ _hfi1_do_send,
+ iowait_sleep,
+ iowait_wakeup,
+ iowait_sdma_drained);
+ priv->r_adefered = 0;
+ clear_ahg(qp);
+}
+
/*
* Switch to alternate path.
* The QP s_lock should be held and interrupts disabled.
*/
-void hfi1_migrate_qp(struct hfi1_qp *qp)
+void hfi1_migrate_qp(struct rvt_qp *qp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct ib_event ev;
qp->s_mig_state = IB_MIG_MIGRATED;
qp->remote_ah_attr = qp->alt_ah_attr;
qp->port_num = qp->alt_ah_attr.port_num;
qp->s_pkey_index = qp->s_alt_pkey_index;
- qp->s_flags |= HFI1_S_AHG_CLEAR;
- qp->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
+ qp->s_flags |= RVT_S_AHG_CLEAR;
+ priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_PATH_MIG;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
+
+int mtu_to_path_mtu(u32 mtu)
+{
+ return mtu_to_enum(mtu, OPA_MTU_8192);
+}
+
+u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
+{
+ u32 mtu;
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ struct hfi1_ibport *ibp;
+ u8 sc, vl;
+
+ ibp = &dd->pport[qp->port_num - 1].ibport_data;
+ sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
+ vl = sc_to_vlt(dd, sc);
+
+ mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
+ if (vl < PER_VL_SEND_CONTEXTS)
+ mtu = min_t(u32, mtu, dd->vld[vl].mtu);
+ return mtu;
+}
+
+int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr)
+{
+ int mtu, pidx = qp->port_num - 1;
+ struct hfi1_ibdev *verbs_dev = container_of(rdi,
+ struct hfi1_ibdev,
+ rdi);
+ struct hfi1_devdata *dd = container_of(verbs_dev,
+ struct hfi1_devdata,
+ verbs_dev);
+ mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
+ if (mtu == -1)
+ return -1; /* values less than 0 are error */
+
+ if (mtu > dd->pport[pidx].ibmtu)
+ return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
+ else
+ return attr->path_mtu;
+}
+
+void notify_error_qp(struct rvt_qp *qp)
+{
+ struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ write_seqlock(&dev->iowait_lock);
+ if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
+ list_del_init(&priv->s_iowait.list);
+ if (atomic_dec_and_test(&qp->refcount))
+ wake_up(&qp->wait);
+ }
+ write_sequnlock(&dev->iowait_lock);
+
+ if (!(qp->s_flags & RVT_S_BUSY)) {
+ qp->s_hdrwords = 0;
+ if (qp->s_rdma_mr) {
+ rvt_put_mr(qp->s_rdma_mr);
+ qp->s_rdma_mr = NULL;
+ }
+ flush_tx_list(qp);
+ }
+}
+
+/**
+ * hfi1_error_port_qps - put a port's RC/UC qps into error state
+ * @ibp: the ibport.
+ * @sl: the service level.
+ *
+ * This function places all RC/UC qps with a given service level into error
+ * state. It is generally called to force upper lay apps to abandon stale qps
+ * after an sl->sc mapping change.
+ */
+void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
+{
+ struct rvt_qp *qp = NULL;
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
+ int n;
+ int lastwqe;
+ struct ib_event ev;
+
+ rcu_read_lock();
+
+ /* Deal only with RC/UC qps that use the given SL. */
+ for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
+ for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
+ qp = rcu_dereference(qp->next)) {
+ if (qp->port_num == ppd->port &&
+ (qp->ibqp.qp_type == IB_QPT_UC ||
+ qp->ibqp.qp_type == IB_QPT_RC) &&
+ qp->remote_ah_attr.sl == sl &&
+ (ib_rvt_state_ops[qp->state] &
+ RVT_POST_SEND_OK)) {
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ lastwqe = rvt_error_qp(qp,
+ IB_WC_WR_FLUSH_ERR);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+ if (lastwqe) {
+ ev.device = qp->ibqp.device;
+ ev.element.qp = &qp->ibqp;
+ ev.event =
+ IB_EVENT_QP_LAST_WQE_REACHED;
+ qp->ibqp.event_handler(&ev,
+ qp->ibqp.qp_context);
+ }
+ }
+ }
+ }
+
+ rcu_read_unlock();
+}
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/staging/rdma/hfi1/qp.h
index 62a94c5d7dca..e7bc8d6cf681 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/staging/rdma/hfi1/qp.h
@@ -1,14 +1,13 @@
#ifndef _QP_H
#define _QP_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,119 +48,33 @@
*/
#include <linux/hash.h>
+#include <rdma/rdmavt_qp.h>
#include "verbs.h"
#include "sdma.h"
-#define QPN_MAX (1 << 24)
-#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
+extern unsigned int hfi1_qp_table_size;
/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
- void *page;
-};
-
-struct hfi1_qpn_table {
- spinlock_t lock; /* protect changes in this struct */
- unsigned flags; /* flags for QP0/1 allocated for each port */
- u32 last; /* last QP number allocated */
- u32 nmaps; /* size of the map table */
- u16 limit;
- u8 incr;
- /* bit map of free QP numbers other than 0/1 */
- struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-struct hfi1_qp_ibdev {
- u32 qp_table_size;
- u32 qp_table_bits;
- struct hfi1_qp __rcu **qp_table;
- spinlock_t qpt_lock;
- struct hfi1_qpn_table qpn_table;
-};
-
-static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn)
-{
- return hash_32(qpn, dev->qp_table_bits);
-}
-
-/**
- * hfi1_lookup_qpn - return the QP with the given QPN
- * @ibp: the ibport
- * @qpn: the QP number to look up
- *
- * The caller must hold the rcu_read_lock(), and keep the lock until
- * the returned qp is no longer in use.
+ * free_ahg - clear ahg from QP
*/
-static inline struct hfi1_qp *hfi1_lookup_qpn(struct hfi1_ibport *ibp,
- u32 qpn) __must_hold(RCU)
+static inline void clear_ahg(struct rvt_qp *qp)
{
- struct hfi1_qp *qp = NULL;
-
- if (unlikely(qpn <= 1)) {
- qp = rcu_dereference(ibp->qp[qpn]);
- } else {
- struct hfi1_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
- u32 n = qpn_hash(dev->qp_dev, qpn);
-
- for (qp = rcu_dereference(dev->qp_dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next))
- if (qp->ibqp.qp_num == qpn)
- break;
- }
- return qp;
-}
+ struct hfi1_qp_priv *priv = qp->priv;
-/**
- * clear_ahg - reset ahg status in qp
- * @qp - qp pointer
- */
-static inline void clear_ahg(struct hfi1_qp *qp)
-{
- qp->s_hdr->ahgcount = 0;
- qp->s_flags &= ~(HFI1_S_AHG_VALID | HFI1_S_AHG_CLEAR);
- if (qp->s_sde && qp->s_ahgidx >= 0)
- sdma_ahg_free(qp->s_sde, qp->s_ahgidx);
+ priv->s_hdr->ahgcount = 0;
+ qp->s_flags &= ~(RVT_S_AHG_VALID | RVT_S_AHG_CLEAR);
+ if (priv->s_sde && qp->s_ahgidx >= 0)
+ sdma_ahg_free(priv->s_sde, qp->s_ahgidx);
qp->s_ahgidx = -1;
}
/**
- * hfi1_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-int hfi1_error_qp(struct hfi1_qp *qp, enum ib_wc_status err);
-
-/**
- * hfi1_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for libibverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int hfi1_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_udata *udata);
-
-int hfi1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
- int attr_mask, struct ib_qp_init_attr *init_attr);
-
-/**
* hfi1_compute_aeth - compute the AETH (syndrome + MSN)
* @qp: the queue pair to compute the AETH for
*
* Returns the AETH.
*/
-__be32 hfi1_compute_aeth(struct hfi1_qp *qp);
+__be32 hfi1_compute_aeth(struct rvt_qp *qp);
/**
* hfi1_create_qp - create a queue pair for a device
@@ -179,45 +90,23 @@ struct ib_qp *hfi1_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
/**
- * hfi1_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int hfi1_destroy_qp(struct ib_qp *ibqp);
-
-/**
* hfi1_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
* @aeth: the Acknowledge Extended Transport Header
*
* The QP s_lock should be held.
*/
-void hfi1_get_credit(struct hfi1_qp *qp, u32 aeth);
-
-/**
- * hfi1_qp_init - allocate QP tables
- * @dev: a pointer to the hfi1_ibdev
- */
-int hfi1_qp_init(struct hfi1_ibdev *dev);
-
-/**
- * hfi1_qp_exit - free the QP related structures
- * @dev: a pointer to the hfi1_ibdev
- */
-void hfi1_qp_exit(struct hfi1_ibdev *dev);
+void hfi1_get_credit(struct rvt_qp *qp, u32 aeth);
/**
* hfi1_qp_wakeup - wake up on the indicated event
* @qp: the QP
* @flag: flag the qp on which the qp is stalled
*/
-void hfi1_qp_wakeup(struct hfi1_qp *qp, u32 flag);
+void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag);
-struct sdma_engine *qp_to_sdma_engine(struct hfi1_qp *qp, u8 sc5);
+struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5);
+struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
struct qp_iter;
@@ -244,43 +133,28 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
* qp_comm_est - handle trap with QP established
* @qp: the QP
*/
-void qp_comm_est(struct hfi1_qp *qp);
+void qp_comm_est(struct rvt_qp *qp);
-/**
- * _hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress w/o regard to the s_flags.
- *
- * It is only used in the post send, which doesn't hold
- * the s_lock.
- */
-static inline void _hfi1_schedule_send(struct hfi1_qp *qp)
-{
- struct hfi1_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+void _hfi1_schedule_send(struct rvt_qp *qp);
+void hfi1_schedule_send(struct rvt_qp *qp);
- iowait_schedule(&qp->s_iowait, ppd->hfi1_wq,
- qp->s_sde ?
- qp->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->assigned_node_id)));
-}
-
-/**
- * hfi1_schedule_send - schedule progress
- * @qp: the QP
- *
- * This schedules qp progress and caller should hold
- * the s_lock.
- */
-static inline void hfi1_schedule_send(struct hfi1_qp *qp)
-{
- if (hfi1_send_ok(qp))
- _hfi1_schedule_send(qp);
-}
-
-void hfi1_migrate_qp(struct hfi1_qp *qp);
+void hfi1_migrate_qp(struct rvt_qp *qp);
+/*
+ * Functions provided by hfi1 driver for rdmavt to use
+ */
+void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ gfp_t gfp);
+void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
+unsigned free_all_qps(struct rvt_dev_info *rdi);
+void notify_qp_reset(struct rvt_qp *qp);
+int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ struct ib_qp_attr *attr);
+void flush_qp_waiters(struct rvt_qp *qp);
+void notify_error_qp(struct rvt_qp *qp);
+void stop_send_queue(struct rvt_qp *qp);
+void quiesce_qp(struct rvt_qp *qp);
+u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
+int mtu_to_path_mtu(u32 mtu);
+void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
#endif /* _QP_H */
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/staging/rdma/hfi1/qsfp.c
index 6326a915d7fd..9ed1963010fe 100644
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ b/drivers/staging/rdma/hfi1/qsfp.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -62,7 +59,7 @@
#define I2C_MAX_RETRY 4
/*
- * Unlocked i2c write. Must hold dd->qsfp_i2c_mutex.
+ * Raw i2c write. No set-up or lock checking.
*/
static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
int offset, void *bp, int len)
@@ -71,14 +68,6 @@ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
int ret, cnt;
u8 *buff = bp;
- /* Make sure TWSI bus is in sane state. */
- ret = hfi1_twsi_reset(dd, target);
- if (ret) {
- hfi1_dev_porterr(dd, ppd->port,
- "I2C interface Reset for write failed\n");
- return -EIO;
- }
-
cnt = 0;
while (cnt < len) {
int wlen = len - cnt;
@@ -99,48 +88,45 @@ static int __i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
return cnt;
}
+/*
+ * Caller must hold the i2c chain resource.
+ */
int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
{
- struct hfi1_devdata *dd = ppd->dd;
int ret;
- ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
- if (!ret) {
- ret = __i2c_write(ppd, target, i2c_addr, offset, bp, len);
- mutex_unlock(&dd->qsfp_i2c_mutex);
+ if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ return -EACCES;
+
+ /* make sure the TWSI bus is in a sane state */
+ ret = hfi1_twsi_reset(ppd->dd, target);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "I2C chain %d write interface reset failed\n",
+ target);
+ return ret;
}
- return ret;
+ return __i2c_write(ppd, target, i2c_addr, offset, bp, len);
}
/*
- * Unlocked i2c read. Must hold dd->qsfp_i2c_mutex.
+ * Raw i2c read. No set-up or lock checking.
*/
static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
int offset, void *bp, int len)
{
struct hfi1_devdata *dd = ppd->dd;
int ret, cnt, pass = 0;
- int stuck = 0;
- u8 *buff = bp;
-
- /* Make sure TWSI bus is in sane state. */
- ret = hfi1_twsi_reset(dd, target);
- if (ret) {
- hfi1_dev_porterr(dd, ppd->port,
- "I2C interface Reset for read failed\n");
- ret = -EIO;
- stuck = 1;
- goto exit;
- }
+ int orig_offset = offset;
cnt = 0;
while (cnt < len) {
int rlen = len - cnt;
ret = hfi1_twsi_blk_rd(dd, target, i2c_addr, offset,
- buff + cnt, rlen);
+ bp + cnt, rlen);
/* Some QSFP's fail first try. Retry as experiment */
if (ret && cnt == 0 && ++pass < I2C_MAX_RETRY)
continue;
@@ -156,14 +142,11 @@ static int __i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr,
ret = cnt;
exit:
- if (stuck)
- dd_dev_err(dd, "I2C interface bus stuck non-idle\n");
-
- if (pass >= I2C_MAX_RETRY && ret)
+ if (ret < 0) {
hfi1_dev_porterr(dd, ppd->port,
- "I2C failed even retrying\n");
- else if (pass)
- hfi1_dev_porterr(dd, ppd->port, "I2C retries: %d\n", pass);
+ "I2C chain %d read failed, addr 0x%x, offset 0x%x, len %d\n",
+ target, i2c_addr, orig_offset, len);
+ }
/* Must wait min 20us between qsfp i2c transactions */
udelay(20);
@@ -171,21 +154,35 @@ exit:
return ret;
}
+/*
+ * Caller must hold the i2c chain resource.
+ */
int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
void *bp, int len)
{
- struct hfi1_devdata *dd = ppd->dd;
int ret;
- ret = mutex_lock_interruptible(&dd->qsfp_i2c_mutex);
- if (!ret) {
- ret = __i2c_read(ppd, target, i2c_addr, offset, bp, len);
- mutex_unlock(&dd->qsfp_i2c_mutex);
+ if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ return -EACCES;
+
+ /* make sure the TWSI bus is in a sane state */
+ ret = hfi1_twsi_reset(ppd->dd, target);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "I2C chain %d read interface reset failed\n",
+ target);
+ return ret;
}
- return ret;
+ return __i2c_read(ppd, target, i2c_addr, offset, bp, len);
}
+/*
+ * Write page n, offset m of QSFP memory as defined by SFF 8636
+ * by writing @addr = ((256 * n) + m)
+ *
+ * Caller must hold the i2c chain resource.
+ */
int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
{
@@ -195,50 +192,81 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int ret;
u8 page;
- ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
- if (ret)
+ if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ return -EACCES;
+
+ /* make sure the TWSI bus is in a sane state */
+ ret = hfi1_twsi_reset(ppd->dd, target);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d write interface reset failed\n",
+ target);
return ret;
+ }
while (count < len) {
/*
- * Set the qsfp page based on a zero-based addresss
+ * Set the qsfp page based on a zero-based address
* and a page size of QSFP_PAGESIZE bytes.
*/
page = (u8)(addr / QSFP_PAGESIZE);
- ret = __i2c_write(ppd, target, QSFP_DEV,
- QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
if (ret != 1) {
- hfi1_dev_porterr(
- ppd->dd,
- ppd->port,
- "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
+ target, ret);
ret = -EIO;
break;
}
- /* truncate write to end of page if crossing page boundary */
offset = addr % QSFP_PAGESIZE;
nwrite = len - count;
- if ((offset + nwrite) > QSFP_PAGESIZE)
- nwrite = QSFP_PAGESIZE - offset;
+ /* truncate write to boundary if crossing boundary */
+ if (((addr % QSFP_RW_BOUNDARY) + nwrite) > QSFP_RW_BOUNDARY)
+ nwrite = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
- ret = __i2c_write(ppd, target, QSFP_DEV, offset, bp + count,
- nwrite);
- if (ret <= 0) /* stop on error or nothing read */
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ offset, bp + count, nwrite);
+ if (ret <= 0) /* stop on error or nothing written */
break;
count += ret;
addr += ret;
}
- mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
-
if (ret < 0)
return ret;
return count;
}
+/*
+ * Perform a stand-alone single QSFP write. Acquire the resource, do the
+ * read, then release the resource.
+ */
+int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 resource = qsfp_resource(dd);
+ int ret;
+
+ ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
+ if (ret)
+ return ret;
+ ret = qsfp_write(ppd, target, addr, bp, len);
+ release_chip_resource(dd, resource);
+
+ return ret;
+}
+
+/*
+ * Access page n, offset m of QSFP memory as defined by SFF 8636
+ * by reading @addr = ((256 * n) + m)
+ *
+ * Caller must hold the i2c chain resource.
+ */
int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len)
{
@@ -248,9 +276,17 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int ret;
u8 page;
- ret = mutex_lock_interruptible(&ppd->dd->qsfp_i2c_mutex);
- if (ret)
+ if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ return -EACCES;
+
+ /* make sure the TWSI bus is in a sane state */
+ ret = hfi1_twsi_reset(ppd->dd, target);
+ if (ret) {
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d read interface reset failed\n",
+ target);
return ret;
+ }
while (count < len) {
/*
@@ -258,25 +294,26 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
* and a page size of QSFP_PAGESIZE bytes.
*/
page = (u8)(addr / QSFP_PAGESIZE);
- ret = __i2c_write(ppd, target, QSFP_DEV,
- QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
+ ret = __i2c_write(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ QSFP_PAGE_SELECT_BYTE_OFFS, &page, 1);
if (ret != 1) {
- hfi1_dev_porterr(
- ppd->dd,
- ppd->port,
- "can't write QSFP_PAGE_SELECT_BYTE: %d\n", ret);
+ hfi1_dev_porterr(ppd->dd, ppd->port,
+ "QSFP chain %d can't write QSFP_PAGE_SELECT_BYTE: %d\n",
+ target, ret);
ret = -EIO;
break;
}
- /* truncate read to end of page if crossing page boundary */
offset = addr % QSFP_PAGESIZE;
nread = len - count;
- if ((offset + nread) > QSFP_PAGESIZE)
- nread = QSFP_PAGESIZE - offset;
-
- ret = __i2c_read(ppd, target, QSFP_DEV, offset, bp + count,
- nread);
+ /* truncate read to boundary if crossing boundary */
+ if (((addr % QSFP_RW_BOUNDARY) + nread) > QSFP_RW_BOUNDARY)
+ nread = QSFP_RW_BOUNDARY - (addr % QSFP_RW_BOUNDARY);
+
+ /* QSFPs require a 5-10msec delay after write operations */
+ mdelay(5);
+ ret = __i2c_read(ppd, target, QSFP_DEV | QSFP_OFFSET_SIZE,
+ offset, bp + count, nread);
if (ret <= 0) /* stop on error or nothing read */
break;
@@ -284,17 +321,40 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
addr += ret;
}
- mutex_unlock(&ppd->dd->qsfp_i2c_mutex);
-
if (ret < 0)
return ret;
return count;
}
/*
+ * Perform a stand-alone single QSFP read. Acquire the resource, do the
+ * read, then release the resource.
+ */
+int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len)
+{
+ struct hfi1_devdata *dd = ppd->dd;
+ u32 resource = qsfp_resource(dd);
+ int ret;
+
+ ret = acquire_chip_resource(dd, resource, QSFP_WAIT);
+ if (ret)
+ return ret;
+ ret = qsfp_read(ppd, target, addr, bp, len);
+ release_chip_resource(dd, resource);
+
+ return ret;
+}
+
+/*
* This function caches the QSFP memory range in 128 byte chunks.
* As an example, the next byte after address 255 is byte 128 from
* upper page 01H (if existing) rather than byte 0 from lower page 00H.
+ * Access page n, offset m of QSFP memory as defined by SFF 8636
+ * in the cache by reading byte ((128 * n) + m)
+ * The calls to qsfp_{read,write} in this function correctly handle the
+ * address map difference between this mapping and the mapping implemented
+ * by those functions
*/
int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
{
@@ -304,79 +364,84 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
u8 *cache = &cp->cache[0];
/* ensure sane contents on invalid reads, for cable swaps */
- memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
- dd_dev_info(ppd->dd, "%s: called\n", __func__);
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
+ spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
+ ppd->qsfp_info.cache_valid = 0;
+ spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
+
if (!qsfp_mod_present(ppd)) {
ret = -ENODEV;
- goto bail;
+ goto bail_no_release;
}
- ret = qsfp_read(ppd, target, 0, cache, 256);
- if (ret != 256) {
+ ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
+ if (ret)
+ goto bail_no_release;
+
+ ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
+ if (ret != QSFP_PAGESIZE) {
dd_dev_info(ppd->dd,
- "%s: Read of pages 00H failed, expected 256, got %d\n",
- __func__, ret);
+ "%s: Page 0 read failed, expected %d, got %d\n",
+ __func__, QSFP_PAGESIZE, ret);
goto bail;
}
- if (cache[0] != 0x0C && cache[0] != 0x0D)
- goto bail;
-
/* Is paging enabled? */
if (!(cache[2] & 4)) {
-
/* Paging enabled, page 03 required */
if ((cache[195] & 0xC0) == 0xC0) {
/* all */
ret = qsfp_read(ppd, target, 384, cache + 256, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
ret = qsfp_read(ppd, target, 640, cache + 384, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
ret = qsfp_read(ppd, target, 896, cache + 512, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
} else if ((cache[195] & 0x80) == 0x80) {
/* only page 2 and 3 */
ret = qsfp_read(ppd, target, 640, cache + 384, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
ret = qsfp_read(ppd, target, 896, cache + 512, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
} else if ((cache[195] & 0x40) == 0x40) {
/* only page 1 and 3 */
ret = qsfp_read(ppd, target, 384, cache + 256, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
ret = qsfp_read(ppd, target, 896, cache + 512, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
} else {
/* only page 3 */
ret = qsfp_read(ppd, target, 896, cache + 512, 128);
if (ret <= 0 || ret != 128) {
- dd_dev_info(ppd->dd, "%s: failed\n", __func__);
+ dd_dev_info(ppd->dd, "%s failed\n", __func__);
goto bail;
}
}
}
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+
spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
ppd->qsfp_info.cache_valid = 1;
ppd->qsfp_info.cache_refresh_required = 0;
@@ -385,7 +450,9 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
return 0;
bail:
- memset(cache, 0, (QSFP_MAX_NUM_PAGES*128));
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+bail_no_release:
+ memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
return ret;
}
@@ -434,7 +501,7 @@ int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr, u32 len,
if (port_num > dd->num_pports || port_num < 1) {
dd_dev_info(dd, "%s: Invalid port number %d\n",
- __func__, port_num);
+ __func__, port_num);
ret = -EINVAL;
goto set_zeroes;
}
@@ -485,7 +552,6 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
lenstr[1] = '\0';
if (ppd->qsfp_info.cache_valid) {
-
if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
@@ -529,7 +595,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK);
for (iidx = 0; iidx < QSFP_DUMP_CHUNK; ++iidx) {
- sofar += scnprintf(buf + sofar, len-sofar,
+ sofar += scnprintf(buf + sofar, len - sofar,
" %02X", bin_buff[iidx]);
}
sofar += scnprintf(buf + sofar, len - sofar, "\n");
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/staging/rdma/hfi1/qsfp.h
index d30c2a6baa0b..831fe4cf1345 100644
--- a/drivers/staging/rdma/hfi1/qsfp.h
+++ b/drivers/staging/rdma/hfi1/qsfp.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -59,23 +56,28 @@
* Below are masks for QSFP pins. Pins are the same for HFI0 and HFI1.
* _N means asserted low
*/
-#define QSFP_HFI0_I2CCLK (1 << 0)
-#define QSFP_HFI0_I2CDAT (1 << 1)
-#define QSFP_HFI0_RESET_N (1 << 2)
-#define QSFP_HFI0_INT_N (1 << 3)
-#define QSFP_HFI0_MODPRST_N (1 << 4)
+#define QSFP_HFI0_I2CCLK BIT(0)
+#define QSFP_HFI0_I2CDAT BIT(1)
+#define QSFP_HFI0_RESET_N BIT(2)
+#define QSFP_HFI0_INT_N BIT(3)
+#define QSFP_HFI0_MODPRST_N BIT(4)
/* QSFP is paged at 256 bytes */
#define QSFP_PAGESIZE 256
+/* Reads/writes cannot cross 128 byte boundaries */
+#define QSFP_RW_BOUNDARY 128
+
+/* number of bytes in i2c offset for QSFP devices */
+#define __QSFP_OFFSET_SIZE 1 /* num address bytes */
+#define QSFP_OFFSET_SIZE (__QSFP_OFFSET_SIZE << 8) /* shifted value */
/* Defined fields that Intel requires of qualified cables */
/* Byte 0 is Identifier, not checked */
/* Byte 1 is reserved "status MSB" */
-/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
-/*
- * Rest of first 128 not used, although 127 is reserved for page select
- * if module is not "Flat memory".
- */
+#define QSFP_TX_CTRL_BYTE_OFFS 86
+#define QSFP_PWR_CTRL_BYTE_OFFS 93
+#define QSFP_CDR_CTRL_BYTE_OFFS 98
+
#define QSFP_PAGE_SELECT_BYTE_OFFS 127
/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
#define QSFP_MOD_ID_OFFS 128
@@ -87,7 +89,8 @@
/* Byte 130 is Connector type. Not Intel req'd */
/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
/* Byte 139 is encoding. code 0x01 is 8b10b. Not Intel req'd */
-/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not Intel req'd */
+/* byte 140 is nominal bit-rate, in units of 100Mbits/sec */
+#define QSFP_NOM_BIT_RATE_100_OFFS 140
/* Byte 141 is Extended Rate Select. Not Intel req'd */
/* Bytes 142..145 are lengths for various fiber types. Not Intel req'd */
/* Byte 146 is length for Copper. Units of 1 meter */
@@ -135,11 +138,18 @@ extern const char *const hfi1_qsfp_devtech[16];
*/
#define QSFP_ATTEN_OFFS 186
#define QSFP_ATTEN_LEN 2
-/* Bytes 188,189 are Wavelength tolerance, not Intel req'd */
+/*
+ * Bytes 188,189 are Wavelength tolerance, if optical
+ * If copper, they are attenuation in dB:
+ * Byte 188 is at 12.5 Gb/s, Byte 189 at 25 Gb/s
+ */
+#define QSFP_CU_ATTEN_7G_OFFS 188
+#define QSFP_CU_ATTEN_12G_OFFS 189
/* Byte 190 is Max Case Temp. Not Intel req'd */
/* Byte 191 is LSB of sum of bytes 128..190. Not Intel req'd */
#define QSFP_CC_OFFS 191
-/* Bytes 192..195 are Options implemented in qsfp. Not Intel req'd */
+#define QSFP_EQ_INFO_OFFS 193
+#define QSFP_CDR_INFO_OFFS 194
/* Bytes 196..211 are Serial Number, String */
#define QSFP_SN_OFFS 196
#define QSFP_SN_LEN 16
@@ -150,6 +160,8 @@ extern const char *const hfi1_qsfp_devtech[16];
#define QSFP_LOT_OFFS 218
#define QSFP_LOT_LEN 2
/* Bytes 220, 221 indicate monitoring options, Not Intel req'd */
+/* Byte 222 indicates nominal bitrate in units of 250Mbits/sec */
+#define QSFP_NOM_BIT_RATE_250_OFFS 222
/* Byte 223 is LSB of sum of bytes 192..222 */
#define QSFP_CC_EXT_OFFS 223
@@ -191,6 +203,7 @@ extern const char *const hfi1_qsfp_devtech[16];
*/
#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_HIGH_PWR(pbyte) (((pbyte) & 3) | 4)
#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
@@ -198,10 +211,12 @@ struct qsfp_data {
/* Helps to find our way */
struct hfi1_pportdata *ppd;
struct work_struct qsfp_work;
- u8 cache[QSFP_MAX_NUM_PAGES*128];
+ u8 cache[QSFP_MAX_NUM_PAGES * 128];
+ /* protect qsfp data */
spinlock_t qsfp_lock;
u8 check_interrupt_flags;
- u8 qsfp_interrupt_functional;
+ u8 reset_needed;
+ u8 limiting_active;
u8 cache_valid;
u8 cache_refresh_required;
};
@@ -220,3 +235,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int len);
+int one_qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
+int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
+ int len);
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/staging/rdma/hfi1/rc.c
index 6f4a155f7931..0d7e1017f3cb 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/staging/rdma/hfi1/rc.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -49,18 +46,151 @@
*/
#include <linux/io.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
#include "hfi.h"
#include "qp.h"
-#include "sdma.h"
+#include "verbs_txreq.h"
#include "trace.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
-static void rc_timeout(unsigned long arg);
+/**
+ * hfi1_add_retry_timer - add/start a retry timer
+ * @qp - the QP
+ *
+ * add a retry timer on the QP
+ */
+static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ qp->s_flags |= RVT_S_TIMER;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ qp->s_timer.expires = jiffies + qp->timeout_jiffies +
+ rdi->busy_jiffies;
+ add_timer(&qp->s_timer);
+}
+
+/**
+ * hfi1_add_rnr_timer - add/start an rnr timer
+ * @qp - the QP
+ * @to - timeout in usecs
+ *
+ * add an rnr timer on the QP
+ */
+void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ qp->s_flags |= RVT_S_WAIT_RNR;
+ qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
+ add_timer(&priv->s_rnr_timer);
+}
+
+/**
+ * hfi1_mod_retry_timer - mod a retry timer
+ * @qp - the QP
+ *
+ * Modify a potentially already running retry
+ * timer
+ */
+static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ qp->s_flags |= RVT_S_TIMER;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
+ rdi->busy_jiffies);
+}
+
+/**
+ * hfi1_stop_retry_timer - stop a retry timer
+ * @qp - the QP
+ *
+ * stop a retry timer and return if the timer
+ * had been pending.
+ */
+static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
+{
+ int rval = 0;
+
+ /* Remove QP from retry */
+ if (qp->s_flags & RVT_S_TIMER) {
+ qp->s_flags &= ~RVT_S_TIMER;
+ rval = del_timer(&qp->s_timer);
+ }
+ return rval;
+}
+
+/**
+ * hfi1_stop_rc_timers - stop all timers
+ * @qp - the QP
+ *
+ * stop any pending timers
+ */
+void hfi1_stop_rc_timers(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /* Remove QP from all timers */
+ if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
+ qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
+ del_timer(&qp->s_timer);
+ del_timer(&priv->s_rnr_timer);
+ }
+}
+
+/**
+ * hfi1_stop_rnr_timer - stop an rnr timer
+ * @qp - the QP
+ *
+ * stop an rnr timer and return if the timer
+ * had been pending.
+ */
+static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
+{
+ int rval = 0;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ /* Remove QP from rnr timer */
+ if (qp->s_flags & RVT_S_WAIT_RNR) {
+ qp->s_flags &= ~RVT_S_WAIT_RNR;
+ rval = del_timer(&priv->s_rnr_timer);
+ }
+ return rval;
+}
+
+/**
+ * hfi1_del_timers_sync - wait for any timeout routines to exit
+ * @qp - the QP
+ */
+void hfi1_del_timers_sync(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ del_timer_sync(&qp->s_timer);
+ del_timer_sync(&priv->s_rnr_timer);
+}
-static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe,
+/* only opcode mask for adaptive pio */
+const u32 rc_only_opcode =
+ BIT(OP(SEND_ONLY) & 0x1f) |
+ BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
+ BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
+ BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) |
+ BIT(OP(RDMA_READ_REQUEST & 0x1f)) |
+ BIT(OP(ACKNOWLEDGE & 0x1f)) |
+ BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) |
+ BIT(OP(COMPARE_SWAP & 0x1f)) |
+ BIT(OP(FETCH_ADD & 0x1f));
+
+static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
{
u32 len;
@@ -74,38 +204,32 @@ static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe,
return wqe->length - len;
}
-static void start_timer(struct hfi1_qp *qp)
-{
- qp->s_flags |= HFI1_S_TIMER;
- qp->s_timer.function = rc_timeout;
- /* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies;
- add_timer(&qp->s_timer);
-}
-
/**
* make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
* @dev: the device for this QP
* @qp: a pointer to the QP
* @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
+ * @ps: the xmit packet state
*
* Return 1 if constructed; otherwise, return 0.
* Note that we are in the responder's side of the QP context.
* Note the QP s_lock must be held.
*/
-static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
- struct hfi1_other_headers *ohdr, u32 pmtu)
+static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
+ struct hfi1_other_headers *ohdr,
+ struct hfi1_pkt_state *ps)
{
- struct hfi1_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 hwords;
u32 len;
u32 bth0;
u32 bth2;
int middle = 0;
+ u32 pmtu = qp->pmtu;
+ struct hfi1_qp_priv *priv = qp->priv;
/* Don't send an ACK if we aren't supposed to. */
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
goto bail;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
@@ -116,7 +240,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
/* FALLTHROUGH */
@@ -133,7 +257,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & HFI1_S_ACK_PENDING)
+ if (qp->s_flags & RVT_S_ACK_PENDING)
goto normal;
goto bail;
}
@@ -152,9 +276,9 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
goto bail;
}
/* Copy SGE state in case we need to resend */
- qp->s_rdma_mr = e->rdma_sge.mr;
- if (qp->s_rdma_mr)
- hfi1_get_mr(qp->s_rdma_mr);
+ ps->s_txreq->mr = e->rdma_sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
qp->s_cur_sge = &qp->s_ack_rdma_sge;
@@ -191,9 +315,9 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct hfi1_qp *qp,
/* FALLTHROUGH */
case OP(RDMA_READ_RESPONSE_MIDDLE):
qp->s_cur_sge = &qp->s_ack_rdma_sge;
- qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
- if (qp->s_rdma_mr)
- hfi1_get_mr(qp->s_rdma_mr);
+ ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
+ if (ps->s_txreq->mr)
+ rvt_get_mr(ps->s_txreq->mr);
len = qp->s_ack_rdma_sge.sge.sge_length;
if (len > pmtu) {
len = pmtu;
@@ -218,7 +342,7 @@ normal:
* (see above).
*/
qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~HFI1_S_ACK_PENDING;
+ qp->s_flags &= ~RVT_S_ACK_PENDING;
qp->s_cur_sge = NULL;
if (qp->s_nak_state)
ohdr->u.aeth =
@@ -234,20 +358,23 @@ normal:
}
qp->s_rdma_ack_cnt++;
qp->s_hdrwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
qp->s_cur_size = len;
- hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle);
+ hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
+ /* pbc */
+ ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
return 1;
bail:
qp->s_ack_state = OP(ACKNOWLEDGE);
/*
* Ensure s_rdma_ack_cnt changes are committed prior to resetting
- * HFI1_S_RESP_PENDING
+ * RVT_S_RESP_PENDING
*/
smp_wmb();
- qp->s_flags &= ~(HFI1_S_RESP_PENDING
- | HFI1_S_ACK_PENDING
- | HFI1_S_AHG_VALID);
+ qp->s_flags &= ~(RVT_S_RESP_PENDING
+ | RVT_S_ACK_PENDING
+ | RVT_S_AHG_VALID);
return 0;
}
@@ -255,14 +382,17 @@ bail:
* hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
* @qp: a pointer to the QP
*
+ * Assumes s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int hfi1_make_rc_req(struct hfi1_qp *qp)
+int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
struct hfi1_other_headers *ohdr;
- struct hfi1_sge_state *ss;
- struct hfi1_swqe *wqe;
+ struct rvt_sge_state *ss;
+ struct rvt_swqe *wqe;
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
u32 hwords = 5;
u32 len;
@@ -270,51 +400,48 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
u32 bth2;
u32 pmtu = qp->pmtu;
char newreq;
- unsigned long flags;
- int ret = 0;
int middle = 0;
int delta;
- ohdr = &qp->s_hdr->ibh.u.oth;
- if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->ibh.u.l.oth;
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (IS_ERR(ps->s_txreq))
+ goto bail_no_tx;
- /*
- * The lock is needed to synchronize between the sending tasklet,
- * the receive interrupt handler, and timeout re-sends.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
+ ohdr = &ps->s_txreq->phdr.hdr.u.oth;
+ if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
+ ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
/* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & HFI1_S_RESP_PENDING) &&
- make_rc_ack(dev, qp, ohdr, pmtu))
- goto done;
+ if ((qp->s_flags & RVT_S_RESP_PENDING) &&
+ make_rc_ack(dev, qp, ohdr, ps))
+ return 1;
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
clear_ahg(qp);
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
- goto done;
+ goto done_free_tx;
}
- if (qp->s_flags & (HFI1_S_WAIT_RNR | HFI1_S_WAIT_ACK))
+ if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
goto bail;
if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= HFI1_S_WAIT_PSN;
+ qp->s_flags |= RVT_S_WAIT_PSN;
goto bail;
}
qp->s_sending_psn = qp->s_psn;
@@ -322,10 +449,10 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
}
/* Send a request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
switch (qp->s_state) {
default:
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/*
* Resend an old request or start a new one.
@@ -347,11 +474,11 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
*/
if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
qp->s_num_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_FENCE;
+ qp->s_flags |= RVT_S_WAIT_FENCE;
goto bail;
}
- wqe->psn = qp->s_next_psn;
newreq = 1;
+ qp->s_psn = wqe->psn;
}
/*
* Note that we have to be careful not to modify the
@@ -365,21 +492,19 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(SEND_FIRST);
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
- else {
+ } else {
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
@@ -393,14 +518,14 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
break;
case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
/* FALLTHROUGH */
case IB_WR_RDMA_WRITE_WITH_IMM:
/* If no credit, return. */
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT) &&
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
- qp->s_flags |= HFI1_S_WAIT_SSN_CREDIT;
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
goto bail;
}
ohdr->u.rc.reth.vaddr =
@@ -409,16 +534,14 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
cpu_to_be32(wqe->rdma_wr.rkey);
ohdr->u.rc.reth.length = cpu_to_be32(len);
hwords += sizeof(struct ib_reth) / sizeof(u32);
- wqe->lpsn = wqe->psn;
if (len > pmtu) {
- wqe->lpsn += (len - 1) / pmtu;
qp->s_state = OP(RDMA_WRITE_FIRST);
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
+ } else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after RETH */
@@ -440,19 +563,12 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- /*
- * Adjust s_next_psn to count the
- * expected number of responses.
- */
- if (len > pmtu)
- qp->s_next_psn += (len - 1) / pmtu;
- wqe->lpsn = qp->s_next_psn++;
}
ohdr->u.rc.reth.vaddr =
cpu_to_be64(wqe->rdma_wr.remote_addr);
@@ -477,13 +593,12 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
if (newreq) {
if (qp->s_num_rd_atomic >=
qp->s_max_rd_atomic) {
- qp->s_flags |= HFI1_S_WAIT_RDMAR;
+ qp->s_flags |= RVT_S_WAIT_RDMAR;
goto bail;
}
qp->s_num_rd_atomic++;
- if (!(qp->s_flags & HFI1_S_UNLIMITED_CREDIT))
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
qp->s_lsn++;
- wqe->lpsn = wqe->psn;
}
if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
qp->s_state = OP(COMPARE_SWAP);
@@ -526,11 +641,8 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
}
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_psn = wqe->lpsn + 1;
- else {
+ else
qp->s_psn++;
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
- }
break;
case OP(RDMA_READ_RESPONSE_FIRST):
@@ -550,8 +662,6 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
/* FALLTHROUGH */
case OP(SEND_MIDDLE):
bth2 = mask_psn(qp->s_psn++);
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -559,9 +669,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
- else {
+ } else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
@@ -592,8 +702,6 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
/* FALLTHROUGH */
case OP(RDMA_WRITE_MIDDLE):
bth2 = mask_psn(qp->s_psn++);
- if (cmp_psn(qp->s_psn, qp->s_next_psn) > 0)
- qp->s_next_psn = qp->s_psn;
ss = &qp->s_sge;
len = qp->s_len;
if (len > pmtu) {
@@ -601,9 +709,9 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_LAST);
- else {
+ } else {
qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
@@ -648,13 +756,14 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
delta = delta_psn(bth2, wqe->psn);
if (delta && delta % HFI1_PSN_CREDIT == 0)
bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & HFI1_S_SEND_ONE) {
- qp->s_flags &= ~HFI1_S_SEND_ONE;
- qp->s_flags |= HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_SEND_ONE) {
+ qp->s_flags &= ~RVT_S_SEND_ONE;
+ qp->s_flags |= RVT_S_WAIT_ACK;
bth2 |= IB_BTH_REQ_ACK;
}
qp->s_len -= len;
qp->s_hdrwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
qp->s_cur_sge = ss;
qp->s_cur_size = len;
hfi1_make_ruc_header(
@@ -662,16 +771,25 @@ int hfi1_make_rc_req(struct hfi1_qp *qp)
ohdr,
bth0 | (qp->s_state << 24),
bth2,
- middle);
-done:
- ret = 1;
- goto unlock;
+ middle,
+ ps);
+ /* pbc */
+ ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
+ return 1;
+
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ qp->s_hdrwords = 0;
+ return 0;
}
/**
@@ -682,7 +800,7 @@ unlock:
* Note that RDMA reads and atomics are handled in the
* send side QP state and tasklet.
*/
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
int is_fecn)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
@@ -700,7 +818,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
unsigned long flags;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if (qp->s_flags & HFI1_S_RESP_PENDING)
+ if (qp->s_flags & RVT_S_RESP_PENDING)
goto queue_ack;
/* Ensure s_rdma_ack_cnt changes are committed */
@@ -763,7 +881,7 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
goto queue_ack;
}
- trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
+ trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
/* write the pbc and data */
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
@@ -771,13 +889,13 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct hfi1_qp *qp,
return;
queue_ack:
- this_cpu_inc(*ibp->rc_qacks);
+ this_cpu_inc(*ibp->rvp.rc_qacks);
spin_lock_irqsave(&qp->s_lock, flags);
- qp->s_flags |= HFI1_S_ACK_PENDING | HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
qp->s_nak_state = qp->r_nak_state;
qp->s_ack_psn = qp->r_ack_psn;
if (is_fecn)
- qp->s_flags |= HFI1_S_ECN;
+ qp->s_flags |= RVT_S_ECN;
/* Schedule the send tasklet. */
hfi1_schedule_send(qp);
@@ -793,10 +911,10 @@ queue_ack:
* for the given QP.
* Called at interrupt level with the QP s_lock held.
*/
-static void reset_psn(struct hfi1_qp *qp, u32 psn)
+static void reset_psn(struct rvt_qp *qp, u32 psn)
{
u32 n = qp->s_acked;
- struct hfi1_swqe *wqe = get_swqe_ptr(qp, n);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
u32 opcode;
qp->s_cur = n;
@@ -819,7 +937,7 @@ static void reset_psn(struct hfi1_qp *qp, u32 psn)
n = 0;
if (n == qp->s_tail)
break;
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
diff = cmp_psn(psn, wqe->psn);
if (diff < 0)
break;
@@ -865,23 +983,23 @@ static void reset_psn(struct hfi1_qp *qp, u32 psn)
done:
qp->s_psn = psn;
/*
- * Set HFI1_S_WAIT_PSN as rc_complete() may start the timer
+ * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
* asynchronously before the send tasklet can get scheduled.
* Doing it in hfi1_make_rc_req() is too late.
*/
if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
(cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= HFI1_S_WAIT_PSN;
- qp->s_flags &= ~HFI1_S_AHG_VALID;
+ qp->s_flags |= RVT_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_AHG_VALID;
}
/*
* Back up requester to resend the last un-ACKed request.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
-static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait)
+static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
- struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
+ struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct hfi1_ibport *ibp;
if (qp->s_retry == 0) {
@@ -890,42 +1008,44 @@ static void restart_rc(struct hfi1_qp *qp, u32 psn, int wait)
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
- } else /* need to handle delayed completion */
+ } else { /* need to handle delayed completion */
return;
- } else
+ }
+ } else {
qp->s_retry--;
+ }
ibp = to_iport(qp->ibqp.device, qp->port_num);
if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->n_rc_resends++;
+ ibp->rvp.n_rc_resends++;
else
- ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
+ ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
- qp->s_flags &= ~(HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR |
- HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_PSN |
- HFI1_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
+ RVT_S_WAIT_ACK);
if (wait)
- qp->s_flags |= HFI1_S_SEND_ONE;
+ qp->s_flags |= RVT_S_SEND_ONE;
reset_psn(qp, psn);
}
/*
* This is called from s_timer for missing responses.
*/
-static void rc_timeout(unsigned long arg)
+void hfi1_rc_timeout(unsigned long arg)
{
- struct hfi1_qp *qp = (struct hfi1_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
struct hfi1_ibport *ibp;
unsigned long flags;
spin_lock_irqsave(&qp->r_lock, flags);
spin_lock(&qp->s_lock);
- if (qp->s_flags & HFI1_S_TIMER) {
+ if (qp->s_flags & RVT_S_TIMER) {
ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->n_rc_timeouts++;
- qp->s_flags &= ~HFI1_S_TIMER;
+ ibp->rvp.n_rc_timeouts++;
+ qp->s_flags &= ~RVT_S_TIMER;
del_timer(&qp->s_timer);
trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
restart_rc(qp, qp->s_last_psn + 1, 1);
@@ -940,15 +1060,12 @@ static void rc_timeout(unsigned long arg)
*/
void hfi1_rc_rnr_retry(unsigned long arg)
{
- struct hfi1_qp *qp = (struct hfi1_qp *)arg;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & HFI1_S_WAIT_RNR) {
- qp->s_flags &= ~HFI1_S_WAIT_RNR;
- del_timer(&qp->s_timer);
- hfi1_schedule_send(qp);
- }
+ hfi1_stop_rnr_timer(qp);
+ hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
@@ -956,14 +1073,14 @@ void hfi1_rc_rnr_retry(unsigned long arg)
* Set qp->s_sending_psn to the next PSN after the given one.
* This would be psn+1 except when RDMA reads are present.
*/
-static void reset_sending_psn(struct hfi1_qp *qp, u32 psn)
+static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
{
- struct hfi1_swqe *wqe;
+ struct rvt_swqe *wqe;
u32 n = qp->s_last;
/* Find the work request corresponding to the given PSN. */
for (;;) {
- wqe = get_swqe_ptr(qp, n);
+ wqe = rvt_get_swqe_ptr(qp, n);
if (cmp_psn(psn, wqe->lpsn) <= 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ)
qp->s_sending_psn = wqe->lpsn + 1;
@@ -981,16 +1098,16 @@ static void reset_sending_psn(struct hfi1_qp *qp, u32 psn)
/*
* This should be called with the QP s_lock held and interrupts disabled.
*/
-void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr)
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
{
struct hfi1_other_headers *ohdr;
- struct hfi1_swqe *wqe;
+ struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i;
u32 opcode;
u32 psn;
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
/* Find out where the BTH is */
@@ -1016,22 +1133,30 @@ void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr)
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
!(qp->s_flags &
- (HFI1_S_TIMER | HFI1_S_WAIT_RNR | HFI1_S_WAIT_PSN)) &&
- (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK))
- start_timer(qp);
+ (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
+ (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
+ hfi1_add_retry_timer(qp);
while (qp->s_last != qp->s_acked) {
- wqe = get_swqe_ptr(qp, qp->s_last);
+ u32 s_last;
+
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct hfi1_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1039,26 +1164,24 @@ void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr)
wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
}
/*
* If we were waiting for sends to complete before re-sending,
* and they are now complete, restart sending.
*/
trace_hfi1_rc_sendcomplete(qp, psn);
- if (qp->s_flags & HFI1_S_WAIT_PSN &&
+ if (qp->s_flags & RVT_S_WAIT_PSN &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~HFI1_S_WAIT_PSN;
+ qp->s_flags &= ~RVT_S_WAIT_PSN;
qp->s_sending_psn = qp->s_psn;
qp->s_sending_hpsn = qp->s_psn - 1;
hfi1_schedule_send(qp);
}
}
-static inline void update_last_psn(struct hfi1_qp *qp, u32 psn)
+static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
{
qp->s_last_psn = psn;
}
@@ -1068,9 +1191,9 @@ static inline void update_last_psn(struct hfi1_qp *qp, u32 psn)
* This is similar to hfi1_send_complete but has to check to be sure
* that the SGEs are not being referenced if the SWQE is being resent.
*/
-static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
- struct hfi1_swqe *wqe,
- struct hfi1_ibport *ibp)
+static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ struct hfi1_ibport *ibp)
{
struct ib_wc wc;
unsigned i;
@@ -1082,13 +1205,21 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
*/
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+ u32 s_last;
+
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct hfi1_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
+ s_last = qp->s_last;
+ if (++s_last >= qp->s_size)
+ s_last = 0;
+ qp->s_last = s_last;
+ /* see post_send() */
+ barrier();
/* Post a send completion queue entry if requested. */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
@@ -1096,14 +1227,12 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
- hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
} else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- this_cpu_inc(*ibp->rc_delayed_comp);
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
/*
* If send progress not running attempt to progress
* SDMA queue.
@@ -1131,7 +1260,7 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
if (++qp->s_cur >= qp->s_size)
qp->s_cur = 0;
qp->s_acked = qp->s_cur;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
if (qp->s_acked != qp->s_tail) {
qp->s_state = OP(SEND_LAST);
qp->s_psn = wqe->psn;
@@ -1141,7 +1270,7 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
qp->s_acked = 0;
if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
qp->s_draining = 0;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
}
return wqe;
}
@@ -1157,21 +1286,16 @@ static struct hfi1_swqe *do_rc_completion(struct hfi1_qp *qp,
* May be called at interrupt level, with the QP s_lock held.
* Returns 1 if OK, 0 if current operation should be aborted (NAK).
*/
-static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
+static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u64 val, struct hfi1_ctxtdata *rcd)
{
struct hfi1_ibport *ibp;
enum ib_wc_status status;
- struct hfi1_swqe *wqe;
+ struct rvt_swqe *wqe;
int ret = 0;
u32 ack_psn;
int diff;
-
- /* Remove QP from retry timer */
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
+ unsigned long to;
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
@@ -1182,7 +1306,7 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
ack_psn = psn;
if (aeth >> 29)
ack_psn--;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
ibp = to_iport(qp->ibqp.device, qp->port_num);
/*
@@ -1200,7 +1324,7 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
diff == 0) {
ret = 1;
- goto bail;
+ goto bail_stop;
}
/*
* If this request is a RDMA read or atomic, and the ACK is
@@ -1217,11 +1341,11 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
(opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
/* Retry this request. */
- if (!(qp->r_flags & HFI1_R_RDMAR_SEQ)) {
- qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait,
&rcd->qp_wait_list);
@@ -1231,7 +1355,7 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
* No need to process the ACK/NAK since we are
* restarting an earlier request.
*/
- goto bail;
+ goto bail_stop;
}
if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
@@ -1244,14 +1368,14 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
qp->s_num_rd_atomic--;
/* Restart sending task if fence is complete */
- if ((qp->s_flags & HFI1_S_WAIT_FENCE) &&
+ if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
!qp->s_num_rd_atomic) {
- qp->s_flags &= ~(HFI1_S_WAIT_FENCE |
- HFI1_S_WAIT_ACK);
+ qp->s_flags &= ~(RVT_S_WAIT_FENCE |
+ RVT_S_WAIT_ACK);
hfi1_schedule_send(qp);
- } else if (qp->s_flags & HFI1_S_WAIT_RDMAR) {
- qp->s_flags &= ~(HFI1_S_WAIT_RDMAR |
- HFI1_S_WAIT_ACK);
+ } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
+ qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
+ RVT_S_WAIT_ACK);
hfi1_schedule_send(qp);
}
}
@@ -1262,40 +1386,43 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
switch (aeth >> 29) {
case 0: /* ACK */
- this_cpu_inc(*ibp->rc_acks);
+ this_cpu_inc(*ibp->rvp.rc_acks);
if (qp->s_acked != qp->s_tail) {
/*
* We are expecting more ACKs so
- * reset the re-transmit timer.
+ * mod the retry timer.
*/
- start_timer(qp);
+ hfi1_mod_retry_timer(qp);
/*
* We can stop re-sending the earlier packets and
* continue with the next packet the receiver wants.
*/
if (cmp_psn(qp->s_psn, psn) <= 0)
reset_psn(qp, psn + 1);
- } else if (cmp_psn(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
+ } else {
+ /* No more acks - kill all timers */
+ hfi1_stop_rc_timers(qp);
+ if (cmp_psn(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
}
- if (qp->s_flags & HFI1_S_WAIT_ACK) {
- qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
hfi1_schedule_send(qp);
}
hfi1_get_credit(qp, aeth);
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, psn);
- ret = 1;
- goto bail;
+ return 1;
case 1: /* RNR NAK */
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
if (qp->s_acked == qp->s_tail)
- goto bail;
- if (qp->s_flags & HFI1_S_WAIT_RNR)
- goto bail;
+ goto bail_stop;
+ if (qp->s_flags & RVT_S_WAIT_RNR)
+ goto bail_stop;
if (qp->s_rnr_retry == 0) {
status = IB_WC_RNR_RETRY_EXC_ERR;
goto class_b;
@@ -1306,28 +1433,27 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
/* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1);
- ibp->n_rc_resends += delta_psn(qp->s_psn, psn);
+ ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
reset_psn(qp, psn);
- qp->s_flags &= ~(HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_ACK);
- qp->s_flags |= HFI1_S_WAIT_RNR;
- qp->s_timer.function = hfi1_rc_rnr_retry;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(
+ qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
+ hfi1_stop_rc_timers(qp);
+ to =
ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
- HFI1_AETH_CREDIT_MASK]);
- add_timer(&qp->s_timer);
- goto bail;
+ HFI1_AETH_CREDIT_MASK];
+ hfi1_add_rnr_timer(qp, to);
+ return 0;
case 3: /* NAK */
if (qp->s_acked == qp->s_tail)
- goto bail;
+ goto bail_stop;
/* The last valid PSN is the previous PSN. */
update_last_psn(qp, psn - 1);
switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
HFI1_AETH_CREDIT_MASK) {
case 0: /* PSN sequence error */
- ibp->n_seq_naks++;
+ ibp->rvp.n_seq_naks++;
/*
* Back up to the responder's expected PSN.
* Note that we might get a NAK in the middle of an
@@ -1340,21 +1466,21 @@ static int do_rc_ack(struct hfi1_qp *qp, u32 aeth, u32 psn, int opcode,
case 1: /* Invalid Request */
status = IB_WC_REM_INV_REQ_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 2: /* Remote Access Error */
status = IB_WC_REM_ACCESS_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
goto class_b;
case 3: /* Remote Operation Error */
status = IB_WC_REM_OP_ERR;
- ibp->n_other_naks++;
+ ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, status);
- hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1364,15 +1490,16 @@ class_b:
}
qp->s_retry = qp->s_retry_cnt;
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail;
+ goto bail_stop;
default: /* 2: reserved */
reserved:
/* Ignore reserved NAK codes. */
- goto bail;
+ goto bail_stop;
}
-
-bail:
+ return ret;
+bail_stop:
+ hfi1_stop_rc_timers(qp);
return ret;
}
@@ -1380,18 +1507,15 @@ bail:
* We have seen an out of sequence RDMA read middle or last packet.
* This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
*/
-static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn,
+static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
struct hfi1_ctxtdata *rcd)
{
- struct hfi1_swqe *wqe;
+ struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (HFI1_S_TIMER | HFI1_S_WAIT_RNR)) {
- qp->s_flags &= ~(HFI1_S_TIMER | HFI1_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
+ hfi1_stop_rc_timers(qp);
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
while (cmp_psn(psn, wqe->lpsn) > 0) {
if (wqe->wr.opcode == IB_WR_RDMA_READ ||
@@ -1401,11 +1525,11 @@ static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn,
wqe = do_rc_completion(qp, wqe, ibp);
}
- ibp->n_rdma_seq++;
- qp->r_flags |= HFI1_R_RDMAR_SEQ;
+ ibp->rvp.n_rdma_seq++;
+ qp->r_flags |= RVT_R_RDMAR_SEQ;
restart_rc(qp, qp->s_last_psn + 1, 0);
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_SEND;
+ qp->r_flags |= RVT_R_RSP_SEND;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
@@ -1429,11 +1553,11 @@ static void rdma_seq_err(struct hfi1_qp *qp, struct hfi1_ibport *ibp, u32 psn,
*/
static void rc_rcv_resp(struct hfi1_ibport *ibp,
struct hfi1_other_headers *ohdr,
- void *data, u32 tlen, struct hfi1_qp *qp,
+ void *data, u32 tlen, struct rvt_qp *qp,
u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
struct hfi1_ctxtdata *rcd)
{
- struct hfi1_swqe *wqe;
+ struct rvt_swqe *wqe;
enum ib_wc_status status;
unsigned long flags;
int diff;
@@ -1446,7 +1570,8 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
trace_hfi1_rc_ack(qp, psn);
/* Ignore invalid responses. */
- if (cmp_psn(psn, qp->s_next_psn) >= 0)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
/* Ignore duplicate responses. */
@@ -1465,15 +1590,15 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
* Skip everything other than the PSN we expect, if we are waiting
* for a reply to a restarted RDMA read or atomic op.
*/
- if (qp->r_flags & HFI1_R_RDMAR_SEQ) {
+ if (qp->r_flags & RVT_R_RDMAR_SEQ) {
if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
goto ack_done;
- qp->r_flags &= ~HFI1_R_RDMAR_SEQ;
+ qp->r_flags &= ~RVT_R_RDMAR_SEQ;
}
if (unlikely(qp->s_acked == qp->s_tail))
goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
status = IB_WC_SUCCESS;
switch (opcode) {
@@ -1484,14 +1609,15 @@ static void rc_rcv_resp(struct hfi1_ibport *ibp,
if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
__be32 *p = ohdr->u.at.atomic_ack_eth;
- val = ((u64) be32_to_cpu(p[0]) << 32) |
+ val = ((u64)be32_to_cpu(p[0]) << 32) |
be32_to_cpu(p[1]);
- } else
+ } else {
val = 0;
+ }
if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
opcode != OP(RDMA_READ_RESPONSE_FIRST))
goto ack_done;
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
goto ack_op_err;
/*
@@ -1519,10 +1645,10 @@ read_middle:
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= HFI1_S_TIMER;
+ qp->s_flags |= RVT_S_TIMER;
mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
- if (qp->s_flags & HFI1_S_WAIT_ACK) {
- qp->s_flags &= ~HFI1_S_WAIT_ACK;
+ if (qp->s_flags & RVT_S_WAIT_ACK) {
+ qp->s_flags &= ~RVT_S_WAIT_ACK;
hfi1_schedule_send(qp);
}
@@ -1536,7 +1662,7 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1556,7 +1682,7 @@ read_middle:
* have to be careful to copy the data to the right
* location.
*/
- wqe = get_swqe_ptr(qp, qp->s_acked);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
wqe, psn, pmtu);
goto read_last;
@@ -1580,9 +1706,9 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
WARN_ON(qp->s_rdma_read_sge.num_sge);
- (void) do_rc_ack(qp, aeth, psn,
+ (void)do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
goto ack_done;
}
@@ -1600,7 +1726,7 @@ ack_len_err:
ack_err:
if (qp->s_last == qp->s_acked) {
hfi1_send_complete(qp, wqe, status);
- hfi1_error_qp(qp, IB_WC_WR_FLUSH_ERR);
+ rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1609,22 +1735,24 @@ bail:
}
static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
- struct hfi1_qp *qp)
+ struct rvt_qp *qp)
{
if (list_empty(&qp->rspwait)) {
- qp->r_flags |= HFI1_R_RSP_DEFERED_ACK;
+ qp->r_flags |= RVT_R_RSP_NAK;
atomic_inc(&qp->refcount);
list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
}
}
-static inline void rc_cancel_ack(struct hfi1_qp *qp)
+static inline void rc_cancel_ack(struct rvt_qp *qp)
{
- qp->r_adefered = 0;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ priv->r_adefered = 0;
if (list_empty(&qp->rspwait))
return;
list_del_init(&qp->rspwait);
- qp->r_flags &= ~HFI1_R_RSP_DEFERED_ACK;
+ qp->r_flags &= ~RVT_R_RSP_NAK;
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
}
@@ -1645,11 +1773,11 @@ static inline void rc_cancel_ack(struct hfi1_qp *qp)
* schedule a response to be sent.
*/
static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
- struct hfi1_qp *qp, u32 opcode, u32 psn, int diff,
- struct hfi1_ctxtdata *rcd)
+ struct rvt_qp *qp, u32 opcode, u32 psn,
+ int diff, struct hfi1_ctxtdata *rcd)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct hfi1_ack_entry *e;
+ struct rvt_ack_entry *e;
unsigned long flags;
u8 i, prev;
int old_req;
@@ -1662,7 +1790,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* Don't queue the NAK if we already sent one.
*/
if (!qp->r_nak_state) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn;
@@ -1694,7 +1822,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
*/
e = NULL;
old_req = 1;
- ibp->n_rc_dupreq++;
+ ibp->rvp.n_rc_dupreq++;
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1747,7 +1875,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
if (e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
if (len != 0) {
@@ -1755,8 +1883,8 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
u64 vaddr = be64_to_cpu(reth->vaddr);
int ok;
- ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
+ IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto unlock_done;
} else {
@@ -1778,7 +1906,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
* or the send tasklet is already backed up to send an
* earlier entry, we can ignore this request.
*/
- if (!e || e->opcode != (u8) opcode || old_req)
+ if (!e || e->opcode != (u8)opcode || old_req)
goto unlock_done;
qp->s_tail_ack_queue = prev;
break;
@@ -1810,7 +1938,7 @@ static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
break;
}
qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
qp->r_nak_state = 0;
hfi1_schedule_send(qp);
@@ -1823,13 +1951,13 @@ send_ack:
return 0;
}
-void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err)
+void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
- lastwqe = hfi1_error_qp(qp, err);
+ lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
@@ -1842,7 +1970,7 @@ void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err)
}
}
-static inline void update_ack_queue(struct hfi1_qp *qp, unsigned n)
+static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
{
unsigned next;
@@ -1864,14 +1992,14 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
spin_lock_irqsave(&ppd->cc_log_lock, flags);
- ppd->threshold_cong_event_map[sl/8] |= 1 << (sl % 8);
+ ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
ppd->threshold_event_counter++;
cc_event = &ppd->cc_events[ppd->cc_log_idx++];
if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
ppd->cc_log_idx = 0;
- cc_event->lqpn = lqpn & HFI1_QPN_MASK;
- cc_event->rqpn = rqpn & HFI1_QPN_MASK;
+ cc_event->lqpn = lqpn & RVT_QPN_MASK;
+ cc_event->rqpn = rqpn & RVT_QPN_MASK;
cc_event->sl = sl;
cc_event->svc_type = svc_type;
cc_event->rlid = rlid;
@@ -1897,7 +2025,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
cc_state = get_cc_state(ppd);
- if (cc_state == NULL)
+ if (!cc_state)
return;
/*
@@ -1957,7 +2085,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
- struct hfi1_qp *qp = packet->qp;
+ struct rvt_qp *qp = packet->qp;
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_other_headers *ohdr = packet->ohdr;
@@ -1972,6 +2100,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
unsigned long flags;
u32 bth1;
int ret, is_fecn = 0;
+ int copy_last = 0;
bth0 = be32_to_cpu(ohdr->bth[0]);
if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
@@ -2054,13 +2183,13 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
break;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp);
/* OK, process the packet. */
switch (opcode) {
case OP(SEND_FIRST):
- ret = hfi1_get_rwqe(qp, 0);
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2076,12 +2205,12 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
/* consume RWQE */
- ret = hfi1_get_rwqe(qp, 1);
+ ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2090,7 +2219,7 @@ send_middle:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = hfi1_get_rwqe(qp, 0);
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2104,8 +2233,10 @@ send_last_imm:
wc.ex.imm_data = ohdr->u.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
goto send_last;
- case OP(SEND_LAST):
case OP(RDMA_WRITE_LAST):
+ copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
+ /* fall through */
+ case OP(SEND_LAST):
no_immediate_data:
wc.wc_flags = 0;
wc.ex.imm_data = 0;
@@ -2121,10 +2252,10 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
- hfi1_put_ss(&qp->r_sge);
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
+ rvt_put_ss(&qp->r_sge);
qp->r_msn++;
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
break;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -2154,12 +2285,14 @@ send_last:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (bth0 & IB_BTH_SOLICITED) != 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ (bth0 & IB_BTH_SOLICITED) != 0);
break;
- case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY):
+ copy_last = 1;
+ /* fall through */
+ case OP(RDMA_WRITE_FIRST):
case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto nack_inv;
@@ -2174,8 +2307,8 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
- rkey, IB_ACCESS_REMOTE_WRITE);
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
+ rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto nack_acc;
qp->r_sge.num_sge = 1;
@@ -2190,7 +2323,7 @@ send_last:
goto send_middle;
else if (opcode == OP(RDMA_WRITE_ONLY))
goto no_immediate_data;
- ret = hfi1_get_rwqe(qp, 1);
+ ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
if (!ret)
@@ -2200,7 +2333,7 @@ send_last:
goto send_last;
case OP(RDMA_READ_REQUEST): {
- struct hfi1_ack_entry *e;
+ struct rvt_ack_entry *e;
u32 len;
u8 next;
@@ -2218,7 +2351,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
reth = &ohdr->u.rc.reth;
@@ -2229,8 +2362,8 @@ send_last:
int ok;
/* Check rkey & NAK */
- ok = hfi1_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
+ ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
+ rkey, IB_ACCESS_REMOTE_READ);
if (unlikely(!ok))
goto nack_acc_unlck;
/*
@@ -2261,7 +2394,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2273,7 +2406,7 @@ send_last:
case OP(COMPARE_SWAP):
case OP(FETCH_ADD): {
struct ib_atomic_eth *ateth;
- struct hfi1_ack_entry *e;
+ struct rvt_ack_entry *e;
u64 vaddr;
atomic64_t *maddr;
u64 sdata;
@@ -2293,29 +2426,29 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- hfi1_put_mr(e->rdma_sge.mr);
+ rvt_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
- vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
+ vaddr = ((u64)be32_to_cpu(ateth->vaddr[0]) << 32) |
be32_to_cpu(ateth->vaddr[1]);
if (unlikely(vaddr & (sizeof(u64) - 1)))
goto nack_inv_unlck;
rkey = be32_to_cpu(ateth->rkey);
/* Check rkey & NAK */
- if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ vaddr, rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
goto nack_acc_unlck;
/* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
sdata = be64_to_cpu(ateth->swap_data);
e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data),
sdata);
- hfi1_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
e->opcode = opcode;
e->sent = 0;
@@ -2328,7 +2461,7 @@ send_last:
qp->r_head_ack_queue = next;
/* Schedule the send tasklet. */
- qp->s_flags |= HFI1_S_RESP_PENDING;
+ qp->s_flags |= RVT_S_RESP_PENDING;
hfi1_schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2347,11 +2480,13 @@ send_last:
qp->r_nak_state = 0;
/* Send an ACK if requested or required. */
if (psn & IB_BTH_REQ_ACK) {
+ struct hfi1_qp_priv *priv = qp->priv;
+
if (packet->numpkt == 0) {
rc_cancel_ack(qp);
goto send_ack;
}
- if (qp->r_adefered >= HFI1_PSN_CREDIT) {
+ if (priv->r_adefered >= HFI1_PSN_CREDIT) {
rc_cancel_ack(qp);
goto send_ack;
}
@@ -2359,13 +2494,13 @@ send_last:
rc_cancel_ack(qp);
goto send_ack;
}
- qp->r_adefered++;
+ priv->r_adefered++;
rc_defered_ack(rcd, qp);
}
return;
rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
+ qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
qp->r_ack_psn = qp->r_psn;
/* Queue RNR NAK for later */
rc_defered_ack(rcd, qp);
@@ -2403,7 +2538,7 @@ void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
struct hfi1_ib_header *hdr,
u32 rcv_flags,
- struct hfi1_qp *qp)
+ struct rvt_qp *qp)
{
int has_grh = rcv_flags & HFI1_HAS_GRH;
struct hfi1_other_headers *ohdr;
@@ -2428,7 +2563,7 @@ void hfi1_rc_hdrerr(
if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
diff = delta_psn(psn, qp->r_psn);
if (!qp->r_nak_state && diff >= 0) {
- ibp->n_rc_seqnak++;
+ ibp->rvp.n_rc_seqnak++;
qp->r_nak_state = IB_NAK_PSN_ERROR;
/* Use the expected PSN. */
qp->r_ack_psn = qp->r_psn;
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/staging/rdma/hfi1/ruc.c
index 4a91975b68d7..08813cdbd475 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/staging/rdma/hfi1/ruc.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -53,7 +50,8 @@
#include "hfi.h"
#include "mad.h"
#include "qp.h"
-#include "sdma.h"
+#include "verbs_txreq.h"
+#include "trace.h"
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -97,16 +95,16 @@ const u32 ib_hfi1_rnr_table[32] = {
* Validate a RWQE and fill in the SGE state.
* Return 1 if OK.
*/
-static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
+static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
{
int i, j, ret;
struct ib_wc wc;
- struct hfi1_lkey_table *rkt;
- struct hfi1_pd *pd;
- struct hfi1_sge_state *ss;
+ struct rvt_lkey_table *rkt;
+ struct rvt_pd *pd;
+ struct rvt_sge_state *ss;
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
+ rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
+ pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list;
qp->r_len = 0;
@@ -114,8 +112,8 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
if (wqe->sg_list[i].length == 0)
continue;
/* Check LKEY */
- if (!hfi1_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
- &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
+ if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
+ &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
goto bad_lkey;
qp->r_len += wqe->sg_list[i].length;
j++;
@@ -127,9 +125,9 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
bad_lkey:
while (j) {
- struct hfi1_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
+ struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
@@ -138,14 +136,14 @@ bad_lkey:
wc.opcode = IB_WC_RECV;
wc.qp = &qp->ibqp;
/* Signal solicited completion event. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
ret = 0;
bail:
return ret;
}
/**
- * hfi1_get_rwqe - copy the next RWQE into the QP's RWQE
+ * hfi1_rvt_get_rwqe - copy the next RWQE into the QP's RWQE
* @qp: the QP
* @wr_id_only: update qp->r_wr_id only, not qp->r_sge
*
@@ -154,19 +152,19 @@ bail:
*
* Can be called from interrupt level.
*/
-int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
+int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
{
unsigned long flags;
- struct hfi1_rq *rq;
- struct hfi1_rwq *wq;
- struct hfi1_srq *srq;
- struct hfi1_rwqe *wqe;
+ struct rvt_rq *rq;
+ struct rvt_rwq *wq;
+ struct rvt_srq *srq;
+ struct rvt_rwqe *wqe;
void (*handler)(struct ib_event *, void *);
u32 tail;
int ret;
if (qp->ibqp.srq) {
- srq = to_isrq(qp->ibqp.srq);
+ srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
handler = srq->ibsrq.event_handler;
rq = &srq->rq;
} else {
@@ -176,7 +174,7 @@ int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
}
spin_lock_irqsave(&rq->lock, flags);
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
ret = 0;
goto unlock;
}
@@ -192,7 +190,7 @@ int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
}
/* Make sure entry is read after head index is read. */
smp_rmb();
- wqe = get_rwqe_ptr(rq, tail);
+ wqe = rvt_get_rwqe_ptr(rq, tail);
/*
* Even though we update the tail index in memory, the verbs
* consumer is not supposed to post more entries until a
@@ -208,7 +206,7 @@ int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only)
qp->r_wr_id = wqe->wr_id;
ret = 1;
- set_bit(HFI1_R_WRID_VALID, &qp->r_aflags);
+ set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
if (handler) {
u32 n;
@@ -265,7 +263,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
* The s_lock will be acquired around the hfi1_migrate_qp() call.
*/
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
- int has_grh, struct hfi1_qp *qp, u32 bth0)
+ int has_grh, struct rvt_qp *qp, u32 bth0)
{
__be64 guid;
unsigned long flags;
@@ -279,11 +277,13 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
goto err;
guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
+ guid))
goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
- qp->alt_ah_attr.grh.dgid.global.interface_id))
+ if (!gid_ok(
+ &hdr->u.l.grh.sgid,
+ qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->alt_ah_attr.grh.dgid.global.interface_id))
goto err;
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
@@ -312,11 +312,13 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
goto err;
guid = get_sguid(ibp,
qp->remote_ah_attr.grh.sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
+ if (!gid_ok(&hdr->u.l.grh.dgid, ibp->rvp.gid_prefix,
+ guid))
goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
- qp->remote_ah_attr.grh.dgid.global.interface_id))
+ if (!gid_ok(
+ &hdr->u.l.grh.sgid,
+ qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
+ qp->remote_ah_attr.grh.dgid.global.interface_id))
goto err;
}
if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), (u16)bth0,
@@ -355,12 +357,12 @@ err:
* receive interrupts since this is a connected protocol and all packets
* will pass through here.
*/
-static void ruc_loopback(struct hfi1_qp *sqp)
+static void ruc_loopback(struct rvt_qp *sqp)
{
struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct hfi1_qp *qp;
- struct hfi1_swqe *wqe;
- struct hfi1_sge *sge;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
unsigned long flags;
struct ib_wc wc;
u64 sdata;
@@ -368,6 +370,8 @@ static void ruc_loopback(struct hfi1_qp *sqp)
enum ib_wc_status send_status;
int release;
int ret;
+ int copy_last = 0;
+ u32 to;
rcu_read_lock();
@@ -375,25 +379,27 @@ static void ruc_loopback(struct hfi1_qp *sqp)
* Note that we check the responder QP state after
* checking the requester's state.
*/
- qp = hfi1_lookup_qpn(ibp, sqp->remote_qpn);
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
+ sqp->remote_qpn);
spin_lock_irqsave(&sqp->s_lock, flags);
/* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT)) ||
- !(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
goto unlock;
- sqp->s_flags |= HFI1_S_BUSY;
+ sqp->s_flags |= RVT_S_BUSY;
again:
- if (sqp->s_last == sqp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (sqp->s_last == ACCESS_ONCE(sqp->s_head))
goto clr_busy;
- wqe = get_swqe_ptr(sqp, sqp->s_last);
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
/* Return if it is not OK to start a new work request. */
- if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_hfi1_state_ops[sqp->state] & HFI1_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
goto clr_busy;
/* We are in the error state, flush the work request. */
send_status = IB_WC_WR_FLUSH_ERR;
@@ -411,9 +417,9 @@ again:
}
spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (!qp || !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) ||
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
/*
* For RC, the requester would timeout and retry so
* shortcut the timeouts and just signal too many retries.
@@ -439,7 +445,7 @@ again:
wc.ex.imm_data = wqe->wr.ex.imm_data;
/* FALLTHROUGH */
case IB_WR_SEND:
- ret = hfi1_get_rwqe(qp, 0);
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
if (!ret)
@@ -451,21 +457,24 @@ again:
goto inv_err;
wc.wc_flags = IB_WC_WITH_IMM;
wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = hfi1_get_rwqe(qp, 1);
+ ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto op_err;
if (!ret)
goto rnr_nak;
- /* FALLTHROUGH */
+ /* skip copy_last set and qp_access_flags recheck */
+ goto do_write;
case IB_WR_RDMA_WRITE:
+ copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
goto inv_err;
+do_write:
if (wqe->length == 0)
break;
- if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
goto acc_err;
qp->r_sge.sg_list = NULL;
qp->r_sge.num_sge = 1;
@@ -475,10 +484,10 @@ again:
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
goto inv_err;
- if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_READ)))
goto acc_err;
release = 0;
sqp->s_sge.sg_list = NULL;
@@ -493,20 +502,20 @@ again:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
- if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->atomic_wr.remote_addr,
+ wqe->atomic_wr.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
/* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
sdata = wqe->atomic_wr.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
+ *(u64 *)sqp->s_sge.sge.vaddr =
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
sdata, wqe->atomic_wr.swap);
- hfi1_put_mr(qp->r_sge.sge.mr);
+ rvt_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
goto send_comp;
@@ -524,17 +533,17 @@ again:
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release);
+ hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -547,9 +556,9 @@ again:
sqp->s_len -= len;
}
if (release)
- hfi1_put_ss(&qp->r_sge);
+ rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
@@ -565,12 +574,12 @@ again:
wc.sl = qp->remote_ah_attr.sl;
wc.port_num = 1;
/* Signal completion event if the solicited bit is set. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
send_comp:
spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->n_loop_pkts++;
+ ibp->rvp.n_loop_pkts++;
flush_send:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
hfi1_send_complete(sqp, wqe, send_status);
@@ -580,7 +589,7 @@ rnr_nak:
/* Handle RNR NAK */
if (qp->ibqp.qp_type == IB_QPT_UC)
goto send_comp;
- ibp->n_rnr_naks++;
+ ibp->rvp.n_rnr_naks++;
/*
* Note: we don't need the s_lock held since the BUSY flag
* makes this single threaded.
@@ -592,13 +601,10 @@ rnr_nak:
if (sqp->s_rnr_retry_cnt < 7)
sqp->s_rnr_retry--;
spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_hfi1_state_ops[sqp->state] & HFI1_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
goto clr_busy;
- sqp->s_flags |= HFI1_S_WAIT_RNR;
- sqp->s_timer.function = hfi1_rc_rnr_retry;
- sqp->s_timer.expires = jiffies +
- usecs_to_jiffies(ib_hfi1_rnr_table[qp->r_min_rnr_timer]);
- add_timer(&sqp->s_timer);
+ to = ib_hfi1_rnr_table[qp->r_min_rnr_timer];
+ hfi1_add_rnr_timer(sqp, to);
goto clr_busy;
op_err:
@@ -622,9 +628,9 @@ serr:
spin_lock_irqsave(&sqp->s_lock, flags);
hfi1_send_complete(sqp, wqe, send_status);
if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = hfi1_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
- sqp->s_flags &= ~HFI1_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
spin_unlock_irqrestore(&sqp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
@@ -637,7 +643,7 @@ serr:
goto done;
}
clr_busy:
- sqp->s_flags &= ~HFI1_S_BUSY;
+ sqp->s_flags &= ~RVT_S_BUSY;
unlock:
spin_unlock_irqrestore(&sqp->s_lock, flags);
done:
@@ -666,7 +672,7 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit;
/* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
+ hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id =
grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
ibp->guids[grh->sgid_index - 1] :
@@ -690,29 +696,31 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
* Subsequent middles use the copied entry, editing the
* PSN with 1 or 2 edits.
*/
-static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
+static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
{
- if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (unlikely(qp->s_flags & RVT_S_AHG_CLEAR))
clear_ahg(qp);
- if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
+ if (!(qp->s_flags & RVT_S_AHG_VALID)) {
/* first middle that needs copy */
if (qp->s_ahgidx < 0)
- qp->s_ahgidx = sdma_ahg_alloc(qp->s_sde);
+ qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
- qp->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
+ priv->s_hdr->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
/* save to protect a change in another thread */
- qp->s_hdr->sde = qp->s_sde;
- qp->s_hdr->ahgidx = qp->s_ahgidx;
- qp->s_flags |= HFI1_S_AHG_VALID;
+ priv->s_hdr->sde = priv->s_sde;
+ priv->s_hdr->ahgidx = qp->s_ahgidx;
+ qp->s_flags |= RVT_S_AHG_VALID;
}
} else {
/* subsequent middle after valid */
if (qp->s_ahgidx >= 0) {
- qp->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
- qp->s_hdr->ahgidx = qp->s_ahgidx;
- qp->s_hdr->ahgcount++;
- qp->s_hdr->ahgdesc[0] =
+ priv->s_hdr->tx_flags |= SDMA_TXREQ_F_USE_AHG;
+ priv->s_hdr->ahgidx = qp->s_ahgidx;
+ priv->s_hdr->ahgcount++;
+ priv->s_hdr->ahgdesc[0] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16((u16)npsn),
BTH2_OFFSET,
@@ -720,8 +728,8 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
16);
if ((npsn & 0xffff0000) !=
(qp->s_ahgpsn & 0xffff0000)) {
- qp->s_hdr->ahgcount++;
- qp->s_hdr->ahgdesc[1] =
+ priv->s_hdr->ahgcount++;
+ priv->s_hdr->ahgdesc[1] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16(
(u16)(npsn >> 16)),
@@ -733,10 +741,12 @@ static inline void build_ahg(struct hfi1_qp *qp, u32 npsn)
}
}
-void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle)
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps)
{
- struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ibport *ibp = ps->ibp;
u16 lrh0;
u32 nwords;
u32 extra_bytes;
@@ -747,13 +757,14 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
nwords = (qp->s_cur_size + extra_bytes) >> 2;
lrh0 = HFI1_LRH_BTH;
if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
- qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
- &qp->remote_ah_attr.grh,
- qp->s_hdrwords, nwords);
+ qp->s_hdrwords += hfi1_make_grh(ibp,
+ &ps->s_txreq->phdr.hdr.u.l.grh,
+ &qp->remote_ah_attr.grh,
+ qp->s_hdrwords, nwords);
lrh0 = HFI1_LRH_GRH;
middle = 0;
}
- lrh0 |= (qp->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
+ lrh0 |= (priv->s_sc & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
/*
* reset s_hdr/AHG fields
*
@@ -765,10 +776,10 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
* build_ahg() will modify as appropriate
* to use the AHG feature.
*/
- qp->s_hdr->tx_flags = 0;
- qp->s_hdr->ahgcount = 0;
- qp->s_hdr->ahgidx = 0;
- qp->s_hdr->sde = NULL;
+ priv->s_hdr->tx_flags = 0;
+ priv->s_hdr->ahgcount = 0;
+ priv->s_hdr->ahgidx = 0;
+ priv->s_hdr->sde = NULL;
if (qp->s_mig_state == IB_MIG_MIGRATED)
bth0 |= IB_BTH_MIG_REQ;
else
@@ -776,19 +787,19 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
if (middle)
build_ahg(qp, bth2);
else
- qp->s_flags &= ~HFI1_S_AHG_VALID;
- qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->ibh.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
- qp->s_hdr->ibh.lrh[2] =
+ qp->s_flags &= ~RVT_S_AHG_VALID;
+ ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
+ ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
+ ps->s_txreq->phdr.hdr.lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- qp->s_hdr->ibh.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
+ ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
qp->remote_ah_attr.src_path_bits);
bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
bth0 |= extra_bytes << 20;
ohdr->bth[0] = cpu_to_be32(bth0);
bth1 = qp->remote_qpn;
- if (qp->s_flags & HFI1_S_ECN) {
- qp->s_flags &= ~HFI1_S_ECN;
+ if (qp->s_flags & RVT_S_ECN) {
+ qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
bth1 |= (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
}
@@ -799,6 +810,14 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
/* when sending, force a reschedule every one of these periods */
#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
+void _hfi1_do_send(struct work_struct *work)
+{
+ struct iowait *wait = container_of(work, struct iowait, iowork);
+ struct rvt_qp *qp = iowait_to_qp(wait);
+
+ hfi1_do_send(qp);
+}
+
/**
* hfi1_do_send - perform a send on a QP
* @work: contains a pointer to the QP
@@ -807,34 +826,45 @@ void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
* exhausted. Only allow one CPU to send a packet per QP (tasklet).
* Otherwise, two threads could send packets out of order.
*/
-void hfi1_do_send(struct work_struct *work)
+void hfi1_do_send(struct rvt_qp *qp)
{
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct hfi1_qp *qp = container_of(wait, struct hfi1_qp, s_iowait);
struct hfi1_pkt_state ps;
- int (*make_req)(struct hfi1_qp *qp);
+ struct hfi1_qp_priv *priv = qp->priv;
+ int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
unsigned long flags;
unsigned long timeout;
+ unsigned long timeout_int;
+ int cpu;
ps.dev = to_idev(qp->ibqp.device);
ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
ps.ppd = ppd_from_ibp(ps.ibp);
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC) &&
- !loopback &&
- (qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc) - 1)) ==
- ps.ppd->lid) {
- ruc_loopback(qp);
- return;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_RC)
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
+ ) - 1)) ==
+ ps.ppd->lid)) {
+ ruc_loopback(qp);
+ return;
+ }
make_req = hfi1_make_rc_req;
- else if (qp->ibqp.qp_type == IB_QPT_UC)
+ timeout_int = (qp->timeout_jiffies);
+ break;
+ case IB_QPT_UC:
+ if (!loopback && ((qp->remote_ah_attr.dlid & ~((1 << ps.ppd->lmc
+ ) - 1)) ==
+ ps.ppd->lid)) {
+ ruc_loopback(qp);
+ return;
+ }
make_req = hfi1_make_uc_req;
- else
+ timeout_int = SEND_RESCHED_TIMEOUT;
+ break;
+ default:
make_req = hfi1_make_ud_req;
+ timeout_int = SEND_RESCHED_TIMEOUT;
+ }
spin_lock_irqsave(&qp->s_lock, flags);
@@ -844,57 +874,83 @@ void hfi1_do_send(struct work_struct *work)
return;
}
- qp->s_flags |= HFI1_S_BUSY;
+ qp->s_flags |= RVT_S_BUSY;
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- timeout = jiffies + SEND_RESCHED_TIMEOUT;
+ timeout = jiffies + (timeout_int) / 8;
+ cpu = priv->s_sde ? priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(ps.ppd->dd->node));
+ /* insure a pre-built packet is handled */
+ ps.s_txreq = get_waiting_verbs_txreq(qp);
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
+ spin_unlock_irqrestore(&qp->s_lock, flags);
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
*/
if (hfi1_verbs_send(qp, &ps))
- break;
+ return;
/* Record that s_hdr is empty. */
qp->s_hdrwords = 0;
+ /* allow other tasks to run */
+ if (unlikely(time_after(jiffies, timeout))) {
+ if (workqueue_congested(cpu,
+ ps.ppd->hfi1_wq)) {
+ spin_lock_irqsave(&qp->s_lock, flags);
+ qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_schedule_send(qp);
+ spin_unlock_irqrestore(&qp->s_lock,
+ flags);
+ this_cpu_inc(
+ *ps.ppd->dd->send_schedule);
+ return;
+ }
+ if (!irqs_disabled()) {
+ cond_resched();
+ this_cpu_inc(
+ *ps.ppd->dd->send_schedule);
+ }
+ timeout = jiffies + (timeout_int) / 8;
+ }
+ spin_lock_irqsave(&qp->s_lock, flags);
}
+ } while (make_req(qp, &ps));
- /* allow other tasks to run */
- if (unlikely(time_after(jiffies, timeout))) {
- cond_resched();
- ps.ppd->dd->verbs_dev.n_send_schedule++;
- timeout = jiffies + SEND_RESCHED_TIMEOUT;
- }
- } while (make_req(qp));
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
/*
* This should be called with s_lock held.
*/
-void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
+void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status)
{
u32 old_last, last;
unsigned i;
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_OR_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
return;
+ last = qp->s_last;
+ old_last = last;
+ if (++last >= qp->s_size)
+ last = 0;
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
for (i = 0; i < wqe->wr.num_sge; i++) {
- struct hfi1_sge *sge = &wqe->sg_list[i];
+ struct rvt_sge *sge = &wqe->sg_list[i];
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount);
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */
- if (!(qp->s_flags & HFI1_S_SIGNAL_REQ_WR) ||
+ if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
@@ -906,15 +962,10 @@ void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
- hfi1_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+ status != IB_WC_SUCCESS);
}
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
if (qp->s_acked == old_last)
qp->s_acked = last;
if (qp->s_cur == old_last)
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
index 9a15f1f32b45..abb8ebc1fcac 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -112,10 +109,10 @@ MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
| SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
/* sdma_sendctrl operations */
-#define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
-#define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
-#define SDMA_SENDCTRL_OP_HALT (1U << 2)
-#define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
+#define SDMA_SENDCTRL_OP_ENABLE BIT(0)
+#define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
+#define SDMA_SENDCTRL_OP_HALT BIT(2)
+#define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
/* handle long defines */
#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
@@ -325,9 +322,9 @@ static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
if (lcnt++ > 500) {
/* timed out - bounce the link */
dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
- __func__, sde->this_idx, (u32)reg);
+ __func__, sde->this_idx, (u32)reg);
queue_work(dd->pport->hfi1_wq,
- &dd->pport->link_bounce_work);
+ &dd->pport->link_bounce_work);
break;
}
udelay(1);
@@ -361,6 +358,28 @@ static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
write_sde_csr(sde, SD(DESC_CNT), reg);
}
+static inline void complete_tx(struct sdma_engine *sde,
+ struct sdma_txreq *tx,
+ int res)
+{
+ /* protect against complete modifying */
+ struct iowait *wait = tx->wait;
+ callback_t complete = tx->complete;
+
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ trace_hfi1_sdma_out_sn(sde, tx->sn);
+ if (WARN_ON_ONCE(sde->head_sn != tx->sn))
+ dd_dev_err(sde->dd, "expected %llu got %llu\n",
+ sde->head_sn, tx->sn);
+ sde->head_sn++;
+#endif
+ sdma_txclean(sde->dd, tx);
+ if (complete)
+ (*complete)(tx, res);
+ if (iowait_sdma_dec(wait) && wait)
+ iowait_drain_wakeup(wait);
+}
+
/*
* Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
*
@@ -395,27 +414,8 @@ static void sdma_flush(struct sdma_engine *sde)
}
spin_unlock_irqrestore(&sde->flushlist_lock, flags);
/* flush from flush list */
- list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
- int drained = 0;
- /* protect against complete modifying */
- struct iowait *wait = txp->wait;
-
- list_del_init(&txp->list);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- trace_hfi1_sdma_out_sn(sde, txp->sn);
- if (WARN_ON_ONCE(sde->head_sn != txp->sn))
- dd_dev_err(sde->dd, "expected %llu got %llu\n",
- sde->head_sn, txp->sn);
- sde->head_sn++;
-#endif
- sdma_txclean(sde->dd, txp);
- if (wait)
- drained = atomic_dec_and_test(&wait->sdma_busy);
- if (txp->complete)
- (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
- if (wait && drained)
- iowait_drain_wakeup(wait);
- }
+ list_for_each_entry_safe(txp, txp_next, &flushlist, list)
+ complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
}
/*
@@ -455,8 +455,8 @@ static void sdma_err_halt_wait(struct work_struct *work)
break;
if (time_after(jiffies, timeout)) {
dd_dev_err(sde->dd,
- "SDMA engine %d - timeout waiting for engine to halt\n",
- sde->this_idx);
+ "SDMA engine %d - timeout waiting for engine to halt\n",
+ sde->this_idx);
/*
* Continue anyway. This could happen if there was
* an uncorrectable error in the wrong spot.
@@ -472,7 +472,6 @@ static void sdma_err_halt_wait(struct work_struct *work)
static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
{
if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
-
unsigned index;
struct hfi1_devdata *dd = sde->dd;
@@ -531,7 +530,7 @@ static void sdma_err_progress_check(unsigned long data)
static void sdma_hw_clean_up_task(unsigned long opaque)
{
- struct sdma_engine *sde = (struct sdma_engine *) opaque;
+ struct sdma_engine *sde = (struct sdma_engine *)opaque;
u64 statuscsr;
while (1) {
@@ -577,31 +576,10 @@ static void sdma_flush_descq(struct sdma_engine *sde)
head = ++sde->descq_head & sde->sdma_mask;
/* if now past this txp's descs, do the callback */
if (txp && txp->next_descq_idx == head) {
- int drained = 0;
- /* protect against complete modifying */
- struct iowait *wait = txp->wait;
-
/* remove from list */
sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
- if (wait)
- drained = atomic_dec_and_test(&wait->sdma_busy);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- trace_hfi1_sdma_out_sn(sde, txp->sn);
- if (WARN_ON_ONCE(sde->head_sn != txp->sn))
- dd_dev_err(sde->dd, "expected %llu got %llu\n",
- sde->head_sn, txp->sn);
- sde->head_sn++;
-#endif
- sdma_txclean(sde->dd, txp);
+ complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
trace_hfi1_sdma_progress(sde, head, tail, txp);
- if (txp->complete)
- (*txp->complete)(
- txp,
- SDMA_TXREQ_S_ABORTED,
- drained);
- if (wait && drained)
- iowait_drain_wakeup(wait);
- /* see if there is another txp */
txp = get_txhead(sde);
}
progress++;
@@ -612,7 +590,7 @@ static void sdma_flush_descq(struct sdma_engine *sde)
static void sdma_sw_clean_up_task(unsigned long opaque)
{
- struct sdma_engine *sde = (struct sdma_engine *) opaque;
+ struct sdma_engine *sde = (struct sdma_engine *)opaque;
unsigned long flags;
spin_lock_irqsave(&sde->tail_lock, flags);
@@ -627,7 +605,6 @@ static void sdma_sw_clean_up_task(unsigned long opaque)
* descq are ours to play with.
*/
-
/*
* In the error clean up sequence, software clean must be called
* before the hardware clean so we can use the hardware head in
@@ -676,7 +653,7 @@ static void sdma_start_hw_clean_up(struct sdma_engine *sde)
}
static void sdma_set_state(struct sdma_engine *sde,
- enum sdma_states next_state)
+ enum sdma_states next_state)
{
struct sdma_state *ss = &sde->state;
const struct sdma_set_state_action *action = sdma_action_table;
@@ -692,8 +669,8 @@ static void sdma_set_state(struct sdma_engine *sde,
ss->previous_op = ss->current_op;
ss->current_state = next_state;
- if (ss->previous_state != sdma_state_s99_running
- && next_state == sdma_state_s99_running)
+ if (ss->previous_state != sdma_state_s99_running &&
+ next_state == sdma_state_s99_running)
sdma_flush(sde);
if (action[next_state].op_enable)
@@ -890,6 +867,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
newmap->actual_vls = num_vls;
newmap->vls = roundup_pow_of_two(num_vls);
newmap->mask = (1 << ilog2(newmap->vls)) - 1;
+ /* initialize back-map */
+ for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
+ newmap->engine_to_vl[i] = -1;
for (i = 0; i < newmap->vls; i++) {
/* save for wrap around */
int first_engine = engine;
@@ -913,6 +893,9 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
/* wrap back to first engine */
engine = first_engine;
}
+ /* assign back-map */
+ for (j = 0; j < vl_engines[i]; j++)
+ newmap->engine_to_vl[first_engine + j] = i;
} else {
/* just re-use entry without allocating */
newmap->map[i] = newmap->map[i % num_vls];
@@ -922,7 +905,7 @@ int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
/* newmap in hand, save old map */
spin_lock_irq(&dd->sde_map_lock);
oldmap = rcu_dereference_protected(dd->sdma_map,
- lockdep_is_held(&dd->sde_map_lock));
+ lockdep_is_held(&dd->sde_map_lock));
/* publish newmap */
rcu_assign_pointer(dd->sdma_map, newmap);
@@ -983,7 +966,7 @@ static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
sde->tx_ring = NULL;
}
spin_lock_irq(&dd->sde_map_lock);
- kfree(rcu_access_pointer(dd->sdma_map));
+ sdma_map_free(rcu_access_pointer(dd->sdma_map));
RCU_INIT_POINTER(dd->sdma_map, NULL);
spin_unlock_irq(&dd->sde_map_lock);
synchronize_rcu();
@@ -1020,19 +1003,19 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
return 0;
}
if (mod_num_sdma &&
- /* can't exceed chip support */
- mod_num_sdma <= dd->chip_sdma_engines &&
- /* count must be >= vls */
- mod_num_sdma >= num_vls)
+ /* can't exceed chip support */
+ mod_num_sdma <= dd->chip_sdma_engines &&
+ /* count must be >= vls */
+ mod_num_sdma >= num_vls)
num_engines = mod_num_sdma;
dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
- dd->chip_sdma_mem_size);
+ dd->chip_sdma_mem_size);
per_sdma_credits =
- dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
+ dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
/* set up freeze waitqueue */
init_waitqueue_head(&dd->sdma_unfreeze_wq);
@@ -1040,7 +1023,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
descq_cnt = sdma_get_descq_cnt();
dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
- num_engines, descq_cnt);
+ num_engines, descq_cnt);
/* alloc memory for array of send engines */
dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
@@ -1061,18 +1044,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
sde->desc_avail = sdma_descq_freecnt(sde);
sde->sdma_shift = ilog2(descq_cnt);
sde->sdma_mask = (1 << sde->sdma_shift) - 1;
- sde->descq_full_count = 0;
-
- /* Create a mask for all 3 chip interrupt sources */
- sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
- | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
- | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
- /* Create a mask specifically for sdma_idle */
- sde->idle_mask =
- (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
- /* Create a mask specifically for sdma_progress */
- sde->progress_mask =
- (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
+
+ /* Create a mask specifically for each interrupt source */
+ sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
+ this_idx);
+ /* Create a combined mask to cover all 3 interrupt sources */
+ sde->imask = sde->int_mask | sde->progress_mask |
+ sde->idle_mask;
+
spin_lock_init(&sde->tail_lock);
seqlock_init(&sde->head_lock);
spin_lock_init(&sde->senddmactrl_lock);
@@ -1100,10 +1083,10 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
SDMA_DESC1_INT_REQ_FLAG;
tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
- (unsigned long)sde);
+ (unsigned long)sde);
tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
- (unsigned long)sde);
+ (unsigned long)sde);
INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
INIT_WORK(&sde->flush_worker, sdma_field_flush);
@@ -1251,11 +1234,10 @@ void sdma_exit(struct hfi1_devdata *dd)
for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
++this_idx) {
-
sde = &dd->per_sdma[this_idx];
if (!list_empty(&sde->dmawait))
dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
- sde->this_idx);
+ sde->this_idx);
sdma_process_event(sde, sdma_event_e00_go_hw_down);
del_timer_sync(&sde->err_progress_check_timer);
@@ -1358,8 +1340,8 @@ retry:
use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
(dd->flags & HFI1_HAS_SDMA_TIMEOUT);
hwhead = use_dmahead ?
- (u16) le64_to_cpu(*sde->head_dma) :
- (u16) read_sde_csr(sde, SD(HEAD));
+ (u16)le64_to_cpu(*sde->head_dma) :
+ (u16)read_sde_csr(sde, SD(HEAD));
if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
u16 cnt;
@@ -1385,9 +1367,9 @@ retry:
if (unlikely(!sane)) {
dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
- sde->this_idx,
- use_dmahead ? "dma" : "kreg",
- hwhead, swhead, swtail, cnt);
+ sde->this_idx,
+ use_dmahead ? "dma" : "kreg",
+ hwhead, swhead, swtail, cnt);
if (use_dmahead) {
/* try one more time, using csr */
use_dmahead = 0;
@@ -1464,7 +1446,7 @@ static void sdma_make_progress(struct sdma_engine *sde, u64 status)
{
struct sdma_txreq *txp = NULL;
int progress = 0;
- u16 hwhead, swhead, swtail;
+ u16 hwhead, swhead;
int idle_check_done = 0;
hwhead = sdma_gethead(sde);
@@ -1485,29 +1467,9 @@ retry:
/* if now past this txp's descs, do the callback */
if (txp && txp->next_descq_idx == swhead) {
- int drained = 0;
- /* protect against complete modifying */
- struct iowait *wait = txp->wait;
-
/* remove from list */
sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
- if (wait)
- drained = atomic_dec_and_test(&wait->sdma_busy);
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- trace_hfi1_sdma_out_sn(sde, txp->sn);
- if (WARN_ON_ONCE(sde->head_sn != txp->sn))
- dd_dev_err(sde->dd, "expected %llu got %llu\n",
- sde->head_sn, txp->sn);
- sde->head_sn++;
-#endif
- sdma_txclean(sde->dd, txp);
- if (txp->complete)
- (*txp->complete)(
- txp,
- SDMA_TXREQ_S_OK,
- drained);
- if (wait && drained)
- iowait_drain_wakeup(wait);
+ complete_tx(sde, txp, SDMA_TXREQ_S_OK);
/* see if there is another txp */
txp = get_txhead(sde);
}
@@ -1525,6 +1487,8 @@ retry:
* of sdma_make_progress(..) which is ensured by idle_check_done flag
*/
if ((status & sde->idle_mask) && !idle_check_done) {
+ u16 swtail;
+
swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
if (swtail != hwhead) {
hwhead = (u16)read_sde_csr(sde, SD(HEAD));
@@ -1552,6 +1516,12 @@ void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
trace_hfi1_sdma_engine_interrupt(sde, status);
write_seqlock(&sde->head_lock);
sdma_set_desc_cnt(sde, sdma_desct_intr);
+ if (status & sde->idle_mask)
+ sde->idle_int_cnt++;
+ else if (status & sde->progress_mask)
+ sde->progress_int_cnt++;
+ else if (status & sde->int_mask)
+ sde->sdma_int_cnt++;
sdma_make_progress(sde, status);
write_sequnlock(&sde->head_lock);
}
@@ -1577,10 +1547,10 @@ void sdma_engine_error(struct sdma_engine *sde, u64 status)
__sdma_process_event(sde, sdma_event_e60_hw_halted);
if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
dd_dev_err(sde->dd,
- "SDMA (%u) engine error: 0x%llx state %s\n",
- sde->this_idx,
- (unsigned long long)status,
- sdma_state_names[sde->state.current_state]);
+ "SDMA (%u) engine error: 0x%llx state %s\n",
+ sde->this_idx,
+ (unsigned long long)status,
+ sdma_state_names[sde->state.current_state]);
dump_sdma_state(sde);
}
write_sequnlock(&sde->head_lock);
@@ -1624,8 +1594,8 @@ static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
if (op & SDMA_SENDCTRL_OP_CLEANUP)
write_sde_csr(sde, SD(CTRL),
- sde->p_senddmactrl |
- SD(CTRL_SDMA_CLEANUP_SMASK));
+ sde->p_senddmactrl |
+ SD(CTRL_SDMA_CLEANUP_SMASK));
else
write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
@@ -1649,12 +1619,10 @@ static void sdma_setlengen(struct sdma_engine *sde)
* generation counter.
*/
write_sde_csr(sde, SD(LEN_GEN),
- (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
- );
+ (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
write_sde_csr(sde, SD(LEN_GEN),
- ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
- | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
- );
+ ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
+ (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
}
static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
@@ -1714,7 +1682,6 @@ static void set_sdma_integrity(struct sdma_engine *sde)
write_sde_csr(sde, SD(CHECK_ENABLE), reg);
}
-
static void init_sdma_regs(
struct sdma_engine *sde,
u32 credits,
@@ -1735,17 +1702,16 @@ static void init_sdma_regs(
write_sde_csr(sde, SD(DESC_CNT), 0);
write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
write_sde_csr(sde, SD(MEMORY),
- ((u64)credits <<
- SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
- ((u64)(credits * sde->this_idx) <<
- SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
+ ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
+ ((u64)(credits * sde->this_idx) <<
+ SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
set_sdma_integrity(sde);
opmask = OPCODE_CHECK_MASK_DISABLED;
opval = OPCODE_CHECK_VAL_DISABLED;
write_sde_csr(sde, SD(CHECK_OPCODE),
- (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
- (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
+ (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
+ (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
}
#ifdef CONFIG_SDMA_VERBOSITY
@@ -1824,12 +1790,9 @@ static void dump_sdma_state(struct sdma_engine *sde)
descq = sde->descq;
dd_dev_err(sde->dd,
- "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
- sde->this_idx,
- head,
- tail,
- cnt,
- !list_empty(&sde->flushlist));
+ "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
+ sde->this_idx, head, tail, cnt,
+ !list_empty(&sde->flushlist));
/* print info for each entry in the descriptor queue */
while (head != tail) {
@@ -1850,20 +1813,23 @@ static void dump_sdma_state(struct sdma_engine *sde)
len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
& SDMA_DESC0_BYTE_COUNT_MASK;
dd_dev_err(sde->dd,
- "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
- head, flags, addr, gen, len);
+ "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
dd_dev_err(sde->dd,
- "\tdesc0:0x%016llx desc1 0x%016llx\n",
- desc[0], desc[1]);
+ "\tdesc0:0x%016llx desc1 0x%016llx\n",
+ desc[0], desc[1]);
if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
dd_dev_err(sde->dd,
- "\taidx: %u amode: %u alen: %u\n",
- (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
- >> SDMA_DESC1_HEADER_INDEX_SHIFT),
- (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
- >> SDMA_DESC1_HEADER_MODE_SHIFT),
- (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
- >> SDMA_DESC1_HEADER_DWS_SHIFT));
+ "\taidx: %u amode: %u alen: %u\n",
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_INDEX_SMASK) >>
+ SDMA_DESC1_HEADER_INDEX_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_MODE_SMASK) >>
+ SDMA_DESC1_HEADER_MODE_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_DWS_SMASK) >>
+ SDMA_DESC1_HEADER_DWS_SHIFT));
head++;
head &= sde->sdma_mask;
}
@@ -1890,29 +1856,26 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
head = sde->descq_head & sde->sdma_mask;
tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
seq_printf(s, SDE_FMT, sde->this_idx,
- sde->cpu,
- sdma_state_name(sde->state.current_state),
- (unsigned long long)read_sde_csr(sde, SD(CTRL)),
- (unsigned long long)read_sde_csr(sde, SD(STATUS)),
- (unsigned long long)read_sde_csr(sde,
- SD(ENG_ERR_STATUS)),
- (unsigned long long)read_sde_csr(sde, SD(TAIL)),
- tail,
- (unsigned long long)read_sde_csr(sde, SD(HEAD)),
- head,
- (unsigned long long)le64_to_cpu(*sde->head_dma),
- (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
- (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
- (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
- (unsigned long long)sde->last_status,
- (unsigned long long)sde->ahg_bits,
- sde->tx_tail,
- sde->tx_head,
- sde->descq_tail,
- sde->descq_head,
+ sde->cpu,
+ sdma_state_name(sde->state.current_state),
+ (unsigned long long)read_sde_csr(sde, SD(CTRL)),
+ (unsigned long long)read_sde_csr(sde, SD(STATUS)),
+ (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
+ (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
+ (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
+ (unsigned long long)le64_to_cpu(*sde->head_dma),
+ (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
+ (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
+ (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
+ (unsigned long long)sde->last_status,
+ (unsigned long long)sde->ahg_bits,
+ sde->tx_tail,
+ sde->tx_head,
+ sde->descq_tail,
+ sde->descq_head,
!list_empty(&sde->flushlist),
- sde->descq_full_count,
- (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
+ sde->descq_full_count,
+ (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
/* print info for each entry in the descriptor queue */
while (head != tail) {
@@ -1933,14 +1896,16 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
& SDMA_DESC0_BYTE_COUNT_MASK;
seq_printf(s,
- "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
- head, flags, addr, gen, len);
+ "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
+ head, flags, addr, gen, len);
if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
- (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
- >> SDMA_DESC1_HEADER_INDEX_SHIFT),
- (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
- >> SDMA_DESC1_HEADER_MODE_SHIFT));
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_INDEX_SMASK) >>
+ SDMA_DESC1_HEADER_INDEX_SHIFT),
+ (u8)((desc[1] &
+ SDMA_DESC1_HEADER_MODE_SMASK) >>
+ SDMA_DESC1_HEADER_MODE_SHIFT));
head = (head + 1) & sde->sdma_mask;
}
}
@@ -2041,8 +2006,9 @@ static int sdma_check_progress(
ret = wait->sleep(sde, wait, tx, seq);
if (ret == -EAGAIN)
sde->desc_avail = sdma_descq_freecnt(sde);
- } else
+ } else {
ret = -EBUSY;
+ }
return ret;
}
@@ -2080,14 +2046,14 @@ retry:
goto nodesc;
tail = submit_tx(sde, tx);
if (wait)
- atomic_inc(&wait->sdma_busy);
+ iowait_sdma_inc(wait);
sdma_update_tail(sde, tail);
unlock:
spin_unlock_irqrestore(&sde->tail_lock, flags);
return ret;
unlock_noconn:
if (wait)
- atomic_inc(&wait->sdma_busy);
+ iowait_sdma_inc(wait);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
@@ -2132,13 +2098,12 @@ nodesc:
* side locking.
*
* Return:
- * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
- * (wait == NULL)
+ * > 0 - Success (value is number of sdma_txreq's submitted),
+ * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
-int sdma_send_txlist(struct sdma_engine *sde,
- struct iowait *wait,
- struct list_head *tx_list)
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
+ struct list_head *tx_list)
{
struct sdma_txreq *tx, *tx_next;
int ret = 0;
@@ -2169,18 +2134,18 @@ retry:
}
update_tail:
if (wait)
- atomic_add(count, &wait->sdma_busy);
+ iowait_sdma_add(wait, count);
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
spin_unlock_irqrestore(&sde->tail_lock, flags);
- return ret;
+ return ret == 0 ? count : ret;
unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
tx->wait = wait;
list_del_init(&tx->list);
if (wait)
- atomic_inc(&wait->sdma_busy);
+ iowait_sdma_inc(wait);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
@@ -2206,8 +2171,7 @@ nodesc:
goto update_tail;
}
-static void sdma_process_event(struct sdma_engine *sde,
- enum sdma_events event)
+static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
{
unsigned long flags;
@@ -2224,7 +2188,7 @@ static void sdma_process_event(struct sdma_engine *sde,
}
static void __sdma_process_event(struct sdma_engine *sde,
- enum sdma_events event)
+ enum sdma_events event)
{
struct sdma_state *ss = &sde->state;
int need_progress = 0;
@@ -2247,14 +2211,15 @@ static void __sdma_process_event(struct sdma_engine *sde,
* of link up, then we need to start up.
* This can happen when hw down is requested while
* bringing the link up with traffic active on
- * 7220, e.g. */
+ * 7220, e.g.
+ */
ss->go_s99_running = 1;
/* fall through and start dma engine */
case sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&sde->state);
sdma_set_state(sde,
- sdma_state_s10_hw_start_up_halt_wait);
+ sdma_state_s10_hw_start_up_halt_wait);
break;
case sdma_event_e15_hw_halt_done:
break;
@@ -2292,7 +2257,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
break;
case sdma_event_e15_hw_halt_done:
sdma_set_state(sde,
- sdma_state_s15_hw_start_up_clean_wait);
+ sdma_state_s15_hw_start_up_clean_wait);
sdma_start_hw_clean_up(sde);
break;
case sdma_event_e25_hw_clean_up_done:
@@ -2767,7 +2732,7 @@ enomem:
* This function calls _extend_sdma_tx_descs to extend or allocate
* coalesce buffer. If there is a allocated coalesce buffer, it will
* copy the input packet data into the coalesce buffer. It also adds
- * coalesce buffer descriptor once whe whole packet is received.
+ * coalesce buffer descriptor once when whole packet is received.
*
* Return:
* <0 - error
@@ -3030,7 +2995,8 @@ void sdma_freeze(struct hfi1_devdata *dd)
* continuing.
*/
ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
- atomic_read(&dd->sdma_unfreeze_count) <= 0);
+ atomic_read(&dd->sdma_unfreeze_count) <=
+ 0);
/* interrupted or count is negative, then unloading - just exit */
if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
return;
@@ -3047,7 +3013,7 @@ void sdma_freeze(struct hfi1_devdata *dd)
* software clean will read engine CSRs, so must be completed before
* the next step, which will clear the engine CSRs.
*/
- (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
+ (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
atomic_read(&dd->sdma_unfreeze_count) <= 0);
/* no need to check results - done no matter what */
}
@@ -3067,7 +3033,7 @@ void sdma_unfreeze(struct hfi1_devdata *dd)
/* tell all engines start freeze clean up */
for (i = 0; i < dd->num_sdma; i++)
sdma_process_event(&dd->per_sdma[i],
- sdma_event_e82_hw_unfreeze);
+ sdma_event_e82_hw_unfreeze);
}
/**
@@ -3081,5 +3047,6 @@ void _sdma_engine_progress_schedule(
trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
/* assume we have selected a good cpu */
write_csr(sde->dd,
- CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);
+ CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
+ sde->progress_mask);
}
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
index da89e6458162..8f50c99fe711 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_SDMA_H
#define _HFI1_SDMA_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -58,15 +55,13 @@
#include "hfi.h"
#include "verbs.h"
+#include "sdma_txreq.h"
-/* increased for AHG */
-#define NUM_DESC 6
/* Hardware limit */
#define MAX_DESC 64
/* Hardware limit for SDMA packet size */
#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
-
#define SDMA_TXREQ_S_OK 0
#define SDMA_TXREQ_S_SENDERROR 1
#define SDMA_TXREQ_S_ABORTED 2
@@ -109,8 +104,8 @@
/*
* Bits defined in the send DMA descriptor.
*/
-#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63)
-#define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62)
+#define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63)
+#define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62)
#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
#define SDMA_DESC0_BYTE_COUNT_MASK \
@@ -154,8 +149,8 @@
((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
#define SDMA_DESC1_GENERATION_SMASK \
(SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
-#define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1)
-#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0)
+#define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1)
+#define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0)
enum sdma_states {
sdma_state_s00_hw_down,
@@ -311,83 +306,6 @@ struct hw_sdma_desc {
__le64 qw[2];
};
-/*
- * struct sdma_desc - canonical fragment descriptor
- *
- * This is the descriptor carried in the tx request
- * corresponding to each fragment.
- *
- */
-struct sdma_desc {
- /* private: don't use directly */
- u64 qw[2];
-};
-
-struct sdma_txreq;
-typedef void (*callback_t)(struct sdma_txreq *, int, int);
-
-/**
- * struct sdma_txreq - the sdma_txreq structure (one per packet)
- * @list: for use by user and by queuing for wait
- *
- * This is the representation of a packet which consists of some
- * number of fragments. Storage is provided to within the structure.
- * for all fragments.
- *
- * The storage for the descriptors are automatically extended as needed
- * when the currently allocation is exceeded.
- *
- * The user (Verbs or PSM) may overload this structure with fields
- * specific to their use by putting this struct first in their struct.
- * The method of allocation of the overloaded structure is user dependent
- *
- * The list is the only public field in the structure.
- *
- */
-
-struct sdma_txreq {
- struct list_head list;
- /* private: */
- struct sdma_desc *descp;
- /* private: */
- void *coalesce_buf;
- /* private: */
- u16 coalesce_idx;
- /* private: */
- struct iowait *wait;
- /* private: */
- callback_t complete;
-#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
- u64 sn;
-#endif
- /* private: - used in coalesce/pad processing */
- u16 packet_len;
- /* private: - down-counted to trigger last */
- u16 tlen;
- /* private: flags */
- u16 flags;
- /* private: */
- u16 num_desc;
- /* private: */
- u16 desc_limit;
- /* private: */
- u16 next_descq_idx;
- /* private: */
- struct sdma_desc descs[NUM_DESC];
-};
-
-struct verbs_txreq {
- struct hfi1_pio_header phdr;
- struct sdma_txreq txreq;
- struct hfi1_qp *qp;
- struct hfi1_swqe *wqe;
- struct hfi1_mregion *mr;
- struct hfi1_sge_state *ss;
- struct sdma_engine *sde;
- u16 hdr_dwords;
- u16 hdr_inx;
-};
-
/**
* struct sdma_engine - Data pertaining to each SDMA engine.
* @dd: a back-pointer to the device data
@@ -409,6 +327,7 @@ struct sdma_engine {
u64 imask; /* clear interrupt mask */
u64 idle_mask;
u64 progress_mask;
+ u64 int_mask;
/* private: */
volatile __le64 *head_dma; /* DMA'ed by chip */
/* private: */
@@ -465,6 +384,12 @@ struct sdma_engine {
u16 tx_head;
/* private: */
u64 last_status;
+ /* private */
+ u64 err_cnt;
+ /* private */
+ u64 sdma_int_cnt;
+ u64 idle_int_cnt;
+ u64 progress_int_cnt;
/* private: */
struct list_head dmawait;
@@ -484,12 +409,12 @@ struct sdma_engine {
u32 progress_check_head;
/* private: */
struct work_struct flush_worker;
+ /* protect flush list */
spinlock_t flushlist_lock;
/* private: */
struct list_head flushlist;
};
-
int sdma_init(struct hfi1_devdata *dd, u8 port);
void sdma_start(struct hfi1_devdata *dd);
void sdma_exit(struct hfi1_devdata *dd);
@@ -535,7 +460,6 @@ static inline int __sdma_running(struct sdma_engine *engine)
return engine->state.current_state == sdma_state_s99_running;
}
-
/**
* sdma_running() - state suitability test
* @engine: sdma engine
@@ -565,7 +489,6 @@ void _sdma_txreq_ahgadd(
u32 *ahg,
u8 ahg_hlen);
-
/**
* sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
* @tx: tx request to initialize
@@ -626,7 +549,7 @@ static inline int sdma_txinit_ahg(
u8 num_ahg,
u32 *ahg,
u8 ahg_hlen,
- void (*cb)(struct sdma_txreq *, int, int))
+ void (*cb)(struct sdma_txreq *, int))
{
if (tlen == 0)
return -ENODATA;
@@ -640,7 +563,8 @@ static inline int sdma_txinit_ahg(
tx->complete = cb;
tx->coalesce_buf = NULL;
tx->wait = NULL;
- tx->tlen = tx->packet_len = tlen;
+ tx->packet_len = tlen;
+ tx->tlen = tx->packet_len;
tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
tx->descs[0].qw[1] = 0;
if (flags & SDMA_TXREQ_F_AHG_COPY)
@@ -689,7 +613,7 @@ static inline int sdma_txinit(
struct sdma_txreq *tx,
u16 flags,
u16 tlen,
- void (*cb)(struct sdma_txreq *, int, int))
+ void (*cb)(struct sdma_txreq *, int))
{
return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
}
@@ -753,7 +677,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
dd->default_desc1;
if (tx->flags & SDMA_TXREQ_F_URGENT)
tx->descp[tx->num_desc].qw[1] |=
- (SDMA_DESC1_HEAD_TO_HOST_FLAG|
+ (SDMA_DESC1_HEAD_TO_HOST_FLAG |
SDMA_DESC1_INT_REQ_FLAG);
}
@@ -1080,6 +1004,7 @@ struct sdma_map_elem {
/**
* struct sdma_map_el - mapping for a vl
+ * @engine_to_vl - map of an engine to a vl
* @list - rcu head for free callback
* @mask - vl mask to "mod" the vl to produce an index to map array
* @actual_vls - number of vls
@@ -1091,6 +1016,7 @@ struct sdma_map_elem {
* in turn point to an array of sde's for that vl.
*/
struct sdma_vl_map {
+ s8 engine_to_vl[TXE_NUM_SDMA_ENGINES];
struct rcu_head list;
u32 mask;
u8 actual_vls;
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/staging/rdma/hfi1/sdma_txreq.h
new file mode 100644
index 000000000000..bf7d777d756e
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/sdma_txreq.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef HFI1_SDMA_TXREQ_H
+#define HFI1_SDMA_TXREQ_H
+
+/* increased for AHG */
+#define NUM_DESC 6
+
+/*
+ * struct sdma_desc - canonical fragment descriptor
+ *
+ * This is the descriptor carried in the tx request
+ * corresponding to each fragment.
+ *
+ */
+struct sdma_desc {
+ /* private: don't use directly */
+ u64 qw[2];
+};
+
+/**
+ * struct sdma_txreq - the sdma_txreq structure (one per packet)
+ * @list: for use by user and by queuing for wait
+ *
+ * This is the representation of a packet which consists of some
+ * number of fragments. Storage is provided to within the structure.
+ * for all fragments.
+ *
+ * The storage for the descriptors are automatically extended as needed
+ * when the currently allocation is exceeded.
+ *
+ * The user (Verbs or PSM) may overload this structure with fields
+ * specific to their use by putting this struct first in their struct.
+ * The method of allocation of the overloaded structure is user dependent
+ *
+ * The list is the only public field in the structure.
+ *
+ */
+
+#define SDMA_TXREQ_S_OK 0
+#define SDMA_TXREQ_S_SENDERROR 1
+#define SDMA_TXREQ_S_ABORTED 2
+#define SDMA_TXREQ_S_SHUTDOWN 3
+
+/* flags bits */
+#define SDMA_TXREQ_F_URGENT 0x0001
+#define SDMA_TXREQ_F_AHG_COPY 0x0002
+#define SDMA_TXREQ_F_USE_AHG 0x0004
+
+struct sdma_txreq;
+typedef void (*callback_t)(struct sdma_txreq *, int);
+
+struct iowait;
+struct sdma_txreq {
+ struct list_head list;
+ /* private: */
+ struct sdma_desc *descp;
+ /* private: */
+ void *coalesce_buf;
+ /* private: */
+ struct iowait *wait;
+ /* private: */
+ callback_t complete;
+#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
+ u64 sn;
+#endif
+ /* private: - used in coalesce/pad processing */
+ u16 packet_len;
+ /* private: - down-counted to trigger last */
+ u16 tlen;
+ /* private: */
+ u16 num_desc;
+ /* private: */
+ u16 desc_limit;
+ /* private: */
+ u16 next_descq_idx;
+ /* private: */
+ u16 coalesce_idx;
+ /* private: flags */
+ u16 flags;
+ /* private: */
+ struct sdma_desc descs[NUM_DESC];
+};
+
+static inline int sdma_txreq_built(struct sdma_txreq *tx)
+{
+ return tx->num_desc;
+}
+
+#endif /* HFI1_SDMA_TXREQ_H */
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/staging/rdma/hfi1/sysfs.c
index 1dd6727dd5ef..c7f1271190af 100644
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ b/drivers/staging/rdma/hfi1/sysfs.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -53,7 +50,6 @@
#include "mad.h"
#include "trace.h"
-
/*
* Start of per-port congestion control structures and support code
*/
@@ -62,8 +58,8 @@
* Congestion control table size followed by table entries
*/
static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd =
@@ -84,7 +80,7 @@ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
rcu_read_lock();
cc_state = get_cc_state(ppd);
- if (cc_state == NULL) {
+ if (!cc_state) {
rcu_read_unlock();
return -EINVAL;
}
@@ -99,10 +95,6 @@ static void port_release(struct kobject *kobj)
/* nothing to do since memory is freed by hfi1_free_devdata() */
}
-static struct kobj_type port_cc_ktype = {
- .release = port_release,
-};
-
static struct bin_attribute cc_table_bin_attr = {
.attr = {.name = "cc_table_bin", .mode = 0444},
.read = read_cc_table_bin,
@@ -115,8 +107,8 @@ static struct bin_attribute cc_table_bin_attr = {
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
{
int ret;
struct hfi1_pportdata *ppd =
@@ -135,7 +127,7 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
rcu_read_lock();
cc_state = get_cc_state(ppd);
- if (cc_state == NULL) {
+ if (!cc_state) {
rcu_read_unlock();
return -EINVAL;
}
@@ -151,6 +143,68 @@ static struct bin_attribute cc_setting_bin_attr = {
.size = PAGE_SIZE,
};
+struct hfi1_port_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct hfi1_pportdata *, char *);
+ ssize_t (*store)(struct hfi1_pportdata *, const char *, size_t);
+};
+
+static ssize_t cc_prescan_show(struct hfi1_pportdata *ppd, char *buf)
+{
+ return sprintf(buf, "%s\n", ppd->cc_prescan ? "on" : "off");
+}
+
+static ssize_t cc_prescan_store(struct hfi1_pportdata *ppd, const char *buf,
+ size_t count)
+{
+ if (!memcmp(buf, "on", 2))
+ ppd->cc_prescan = true;
+ else if (!memcmp(buf, "off", 3))
+ ppd->cc_prescan = false;
+
+ return count;
+}
+
+static struct hfi1_port_attr cc_prescan_attr =
+ __ATTR(cc_prescan, 0600, cc_prescan_show, cc_prescan_store);
+
+static ssize_t cc_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct hfi1_port_attr *port_attr =
+ container_of(attr, struct hfi1_port_attr, attr);
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
+
+ return port_attr->show(ppd, buf);
+}
+
+static ssize_t cc_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hfi1_port_attr *port_attr =
+ container_of(attr, struct hfi1_port_attr, attr);
+ struct hfi1_pportdata *ppd =
+ container_of(kobj, struct hfi1_pportdata, pport_cc_kobj);
+
+ return port_attr->store(ppd, buf, count);
+}
+
+static const struct sysfs_ops port_cc_sysfs_ops = {
+ .show = cc_attr_show,
+ .store = cc_attr_store
+};
+
+static struct attribute *port_cc_default_attributes[] = {
+ &cc_prescan_attr.attr
+};
+
+static struct kobj_type port_cc_ktype = {
+ .release = port_release,
+ .sysfs_ops = &port_cc_sysfs_ops,
+ .default_attrs = port_cc_default_attributes
+};
+
/* Start sc2vl */
#define HFI1_SC2VL_ATTR(N) \
static struct hfi1_sc2vl_attr hfi1_sc2vl_attr_##N = { \
@@ -196,7 +250,6 @@ HFI1_SC2VL_ATTR(29);
HFI1_SC2VL_ATTR(30);
HFI1_SC2VL_ATTR(31);
-
static struct attribute *sc2vl_default_attributes[] = {
&hfi1_sc2vl_attr_0.attr,
&hfi1_sc2vl_attr_1.attr,
@@ -302,7 +355,6 @@ HFI1_SL2SC_ATTR(29);
HFI1_SL2SC_ATTR(30);
HFI1_SL2SC_ATTR(31);
-
static struct attribute *sl2sc_default_attributes[] = {
&hfi1_sl2sc_attr_0.attr,
&hfi1_sl2sc_attr_1.attr,
@@ -435,7 +487,6 @@ static struct kobj_type hfi1_vl2mtu_ktype = {
.default_attrs = vl2mtu_default_attributes
};
-
/* end of per-port file structures and support code */
/*
@@ -446,7 +497,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
@@ -455,7 +506,7 @@ static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
int ret;
@@ -470,19 +521,18 @@ static ssize_t show_boardversion(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
-
static ssize_t show_nctxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/*
@@ -497,10 +547,10 @@ static ssize_t show_nctxts(struct device *device,
}
static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
@@ -511,11 +561,10 @@ static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
-
}
static ssize_t store_chip_reset(struct device *device,
@@ -523,7 +572,7 @@ static ssize_t store_chip_reset(struct device *device,
size_t count)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
int ret;
@@ -552,7 +601,7 @@ static ssize_t show_tempsense(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
- container_of(device, struct hfi1_ibdev, ibdev.dev);
+ container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
struct hfi1_devdata *dd = dd_from_dev(dev);
struct hfi1_temp temp;
int ret;
@@ -608,8 +657,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
if (!port_num || port_num > dd->num_pports) {
dd_dev_err(dd,
- "Skipping infiniband class with invalid port %u\n",
- port_num);
+ "Skipping infiniband class with invalid port %u\n",
+ port_num);
return -ENODEV;
}
ppd = &dd->pport[port_num - 1];
@@ -644,39 +693,36 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
}
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
-
ret = kobject_init_and_add(&ppd->pport_cc_kobj, &port_cc_ktype,
kobj, "CCMgtA");
if (ret) {
dd_dev_err(dd,
- "Skipping Congestion Control sysfs info, (err %d) port %u\n",
- ret, port_num);
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
goto bail_vl2mtu;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_setting_bin_attr);
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
if (ret) {
dd_dev_err(dd,
- "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
- ret, port_num);
+ "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
+ ret, port_num);
goto bail_cc;
}
- ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
- &cc_table_bin_attr);
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr);
if (ret) {
dd_dev_err(dd,
- "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
- ret, port_num);
+ "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
+ ret, port_num);
goto bail_cc_entry_bin;
}
dd_dev_info(dd,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
+ "IB%u: Congestion Control Agent enabled for port %d\n",
+ dd->unit, port_num);
return 0;
@@ -700,7 +746,7 @@ bail:
*/
int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
{
- struct ib_device *dev = &dd->verbs_dev.ibdev;
+ struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/staging/rdma/hfi1/trace.c
index 10122e84cb2f..8b62fefcf903 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/staging/rdma/hfi1/trace.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -109,17 +106,17 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE):
trace_seq_printf(p, IMM_PRN,
- be32_to_cpu(eh->imm_data));
+ be32_to_cpu(eh->imm_data));
break;
/* reth + imm */
case OP(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
case OP(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, RETH_PRN " " IMM_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
- be32_to_cpu(eh->rc.reth.rkey),
- be32_to_cpu(eh->rc.reth.length),
- be32_to_cpu(eh->rc.imm_data));
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->rc.reth.vaddr),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length),
+ be32_to_cpu(eh->rc.imm_data));
break;
/* reth */
case OP(RC, RDMA_READ_REQUEST):
@@ -128,10 +125,10 @@ const char *parse_everbs_hdrs(
case OP(RC, RDMA_WRITE_ONLY):
case OP(UC, RDMA_WRITE_ONLY):
trace_seq_printf(p, RETH_PRN,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->rc.reth.vaddr),
- be32_to_cpu(eh->rc.reth.rkey),
- be32_to_cpu(eh->rc.reth.length));
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->rc.reth.vaddr),
+ be32_to_cpu(eh->rc.reth.rkey),
+ be32_to_cpu(eh->rc.reth.length));
break;
case OP(RC, RDMA_READ_RESPONSE_FIRST):
case OP(RC, RDMA_READ_RESPONSE_LAST):
@@ -154,19 +151,20 @@ const char *parse_everbs_hdrs(
case OP(RC, COMPARE_SWAP):
case OP(RC, FETCH_ADD):
trace_seq_printf(p, ATOMICETH_PRN,
- (unsigned long long)ib_u64_get(eh->atomic_eth.vaddr),
- eh->atomic_eth.rkey,
- (unsigned long long)ib_u64_get(
- (__be32 *)&eh->atomic_eth.swap_data),
- (unsigned long long) ib_u64_get(
+ (unsigned long long)ib_u64_get(
+ eh->atomic_eth.vaddr),
+ eh->atomic_eth.rkey,
+ (unsigned long long)ib_u64_get(
+ (__be32 *)&eh->atomic_eth.swap_data),
+ (unsigned long long)ib_u64_get(
(__be32 *)&eh->atomic_eth.compare_data));
break;
/* deth */
case OP(UD, SEND_ONLY):
case OP(UD, SEND_ONLY_WITH_IMMEDIATE):
trace_seq_printf(p, DETH_PRN,
- be32_to_cpu(eh->ud.deth[0]),
- be32_to_cpu(eh->ud.deth[1]) & HFI1_QPN_MASK);
+ be32_to_cpu(eh->ud.deth[0]),
+ be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break;
}
trace_seq_putc(p, 0);
@@ -187,12 +185,12 @@ const char *parse_sdma_flags(
trace_seq_printf(p, "%s", flags);
if (desc0 & SDMA_DESC0_FIRST_DESC_FLAG)
trace_seq_printf(p, " amode:%u aidx:%u alen:%u",
- (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT)
- & SDMA_DESC1_HEADER_MODE_MASK),
- (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT)
- & SDMA_DESC1_HEADER_INDEX_MASK),
- (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT)
- & SDMA_DESC1_HEADER_DWS_MASK));
+ (u8)((desc1 >> SDMA_DESC1_HEADER_MODE_SHIFT) &
+ SDMA_DESC1_HEADER_MODE_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_INDEX_SHIFT) &
+ SDMA_DESC1_HEADER_INDEX_MASK),
+ (u8)((desc1 >> SDMA_DESC1_HEADER_DWS_SHIFT) &
+ SDMA_DESC1_HEADER_DWS_MASK));
return ret;
}
@@ -234,3 +232,4 @@ __hfi1_trace_fn(DC8051);
__hfi1_trace_fn(FIRMWARE);
__hfi1_trace_fn(RCVCTRL);
__hfi1_trace_fn(TID);
+__hfi1_trace_fn(MMU);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/staging/rdma/hfi1/trace.h
index 86c12ebfd4f0..963dc948c38a 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/staging/rdma/hfi1/trace.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -76,304 +73,295 @@ __print_symbolic(etype, \
#define TRACE_SYSTEM hfi1_rx
TRACE_EVENT(hfi1_rcvhdr,
- TP_PROTO(struct hfi1_devdata *dd,
- u64 eflags,
- u32 ctxt,
- u32 etype,
- u32 hlen,
- u32 tlen,
- u32 updegr,
- u32 etail),
- TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u64, eflags)
- __field(u32, ctxt)
- __field(u32, etype)
- __field(u32, hlen)
- __field(u32, tlen)
- __field(u32, updegr)
- __field(u32, etail)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->eflags = eflags;
- __entry->ctxt = ctxt;
- __entry->etype = etype;
- __entry->hlen = hlen;
- __entry->tlen = tlen;
- __entry->updegr = updegr;
- __entry->etail = etail;
- ),
- TP_printk(
-"[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->eflags,
- __entry->etype, show_packettype(__entry->etype),
- __entry->hlen,
- __entry->tlen,
- __entry->updegr,
- __entry->etail
- )
+ TP_PROTO(struct hfi1_devdata *dd,
+ u64 eflags,
+ u32 ctxt,
+ u32 etype,
+ u32 hlen,
+ u32 tlen,
+ u32 updegr,
+ u32 etail
+ ),
+ TP_ARGS(dd, ctxt, eflags, etype, hlen, tlen, updegr, etail),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u64, eflags)
+ __field(u32, ctxt)
+ __field(u32, etype)
+ __field(u32, hlen)
+ __field(u32, tlen)
+ __field(u32, updegr)
+ __field(u32, etail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->eflags = eflags;
+ __entry->ctxt = ctxt;
+ __entry->etype = etype;
+ __entry->hlen = hlen;
+ __entry->tlen = tlen;
+ __entry->updegr = updegr;
+ __entry->etail = etail;
+ ),
+ TP_printk(
+ "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->eflags,
+ __entry->etype, show_packettype(__entry->etype),
+ __entry->hlen,
+ __entry->tlen,
+ __entry->updegr,
+ __entry->etail
+ )
);
TRACE_EVENT(hfi1_receive_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
- TP_ARGS(dd, ctxt),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u32, ctxt)
- __field(u8, slow_path)
- __field(u8, dma_rtail)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt) {
- __entry->slow_path = 1;
- __entry->dma_rtail = 0xFF;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_dma_rtail){
- __entry->dma_rtail = 1;
- __entry->slow_path = 0;
- } else if (dd->rcd[ctxt]->do_interrupt ==
- &handle_receive_interrupt_nodma_rtail) {
- __entry->dma_rtail = 0;
- __entry->slow_path = 0;
- }
- ),
- TP_printk(
- "[%s] ctxt %d SlowPath: %d DmaRtail: %d",
- __get_str(dev),
- __entry->ctxt,
- __entry->slow_path,
- __entry->dma_rtail
- )
+ TP_PROTO(struct hfi1_devdata *dd, u32 ctxt),
+ TP_ARGS(dd, ctxt),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, ctxt)
+ __field(u8, slow_path)
+ __field(u8, dma_rtail)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt) {
+ __entry->slow_path = 1;
+ __entry->dma_rtail = 0xFF;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_dma_rtail){
+ __entry->dma_rtail = 1;
+ __entry->slow_path = 0;
+ } else if (dd->rcd[ctxt]->do_interrupt ==
+ &handle_receive_interrupt_nodma_rtail) {
+ __entry->dma_rtail = 0;
+ __entry->slow_path = 0;
+ }
+ ),
+ TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d",
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->slow_path,
+ __entry->dma_rtail
+ )
);
-const char *print_u64_array(struct trace_seq *, u64 *, int);
+TRACE_EVENT(hfi1_exp_tid_reg,
+ TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr,
+ u32 npages, unsigned long va, unsigned long pa,
+ dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
+ TP_STRUCT__entry(
+ __field(unsigned, ctxt)
+ __field(u16, subctxt)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
+ ),
+ TP_fast_assign(
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
+ ),
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
+ )
+ );
-TRACE_EVENT(hfi1_exp_tid_map,
- TP_PROTO(unsigned ctxt, u16 subctxt, int dir,
- unsigned long *maps, u16 count),
- TP_ARGS(ctxt, subctxt, dir, maps, count),
+TRACE_EVENT(hfi1_exp_tid_unreg,
+ TP_PROTO(unsigned ctxt, u16 subctxt, u32 rarr, u32 npages,
+ unsigned long va, unsigned long pa, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
TP_STRUCT__entry(
__field(unsigned, ctxt)
__field(u16, subctxt)
- __field(int, dir)
- __field(u16, count)
- __dynamic_array(unsigned long, maps, sizeof(*maps) * count)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(unsigned long, va)
+ __field(unsigned long, pa)
+ __field(dma_addr_t, dma)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
- __entry->dir = dir;
- __entry->count = count;
- memcpy(__get_dynamic_array(maps), maps,
- sizeof(*maps) * count);
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->va = va;
+ __entry->pa = pa;
+ __entry->dma = dma;
),
- TP_printk("[%3u:%02u] %s tidmaps %s",
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
__entry->ctxt,
__entry->subctxt,
- (__entry->dir ? ">" : "<"),
- print_u64_array(p, __get_dynamic_array(maps),
- __entry->count)
+ __entry->rarr,
+ __entry->npages,
+ __entry->pa,
+ __entry->va,
+ __entry->dma
)
);
-TRACE_EVENT(hfi1_exp_rcv_set,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
- unsigned long vaddr, u64 phys_addr, void *page),
- TP_ARGS(ctxt, subctxt, tid, vaddr, phys_addr, page),
+TRACE_EVENT(hfi1_exp_tid_inval,
+ TP_PROTO(unsigned ctxt, u16 subctxt, unsigned long va, u32 rarr,
+ u32 npages, dma_addr_t dma),
+ TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
TP_STRUCT__entry(
__field(unsigned, ctxt)
__field(u16, subctxt)
- __field(u32, tid)
- __field(unsigned long, vaddr)
- __field(u64, phys_addr)
- __field(void *, page)
+ __field(unsigned long, va)
+ __field(u32, rarr)
+ __field(u32, npages)
+ __field(dma_addr_t, dma)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
- __entry->tid = tid;
- __entry->vaddr = vaddr;
- __entry->phys_addr = phys_addr;
- __entry->page = page;
+ __entry->va = va;
+ __entry->rarr = rarr;
+ __entry->npages = npages;
+ __entry->dma = dma;
),
- TP_printk("[%u:%u] TID %u, vaddrs 0x%lx, physaddr 0x%llx, pgp %p",
+ TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
__entry->ctxt,
__entry->subctxt,
- __entry->tid,
- __entry->vaddr,
- __entry->phys_addr,
- __entry->page
+ __entry->rarr,
+ __entry->npages,
+ __entry->va,
+ __entry->dma
)
);
-TRACE_EVENT(hfi1_exp_rcv_free,
- TP_PROTO(unsigned ctxt, u16 subctxt, u32 tid,
- unsigned long phys, void *page),
- TP_ARGS(ctxt, subctxt, tid, phys, page),
+TRACE_EVENT(hfi1_mmu_invalidate,
+ TP_PROTO(unsigned ctxt, u16 subctxt, const char *type,
+ unsigned long start, unsigned long end),
+ TP_ARGS(ctxt, subctxt, type, start, end),
TP_STRUCT__entry(
__field(unsigned, ctxt)
__field(u16, subctxt)
- __field(u32, tid)
- __field(unsigned long, phys)
- __field(void *, page)
+ __string(type, type)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
),
TP_fast_assign(
__entry->ctxt = ctxt;
__entry->subctxt = subctxt;
- __entry->tid = tid;
- __entry->phys = phys;
- __entry->page = page;
+ __assign_str(type, type);
+ __entry->start = start;
+ __entry->end = end;
),
- TP_printk("[%u:%u] freeing TID %u, 0x%lx, pgp %p",
+ TP_printk("[%3u:%02u] MMU Invalidate (%s) 0x%lx - 0x%lx",
__entry->ctxt,
__entry->subctxt,
- __entry->tid,
- __entry->phys,
- __entry->page
+ __get_str(type),
+ __entry->start,
+ __entry->end
)
);
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_tx
TRACE_EVENT(hfi1_piofree,
- TP_PROTO(struct send_context *sc, int extra),
- TP_ARGS(sc, extra),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(int, extra)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->extra = extra;
- ),
- TP_printk(
- "[%s] ctxt %u(%u) extra %d",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->extra
- )
+ TP_PROTO(struct send_context *sc, int extra),
+ TP_ARGS(sc, extra),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(int, extra)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->extra = extra;
+ ),
+ TP_printk("[%s] ctxt %u(%u) extra %d",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->extra
+ )
);
TRACE_EVENT(hfi1_wantpiointr,
- TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
- TP_ARGS(sc, needint, credit_ctrl),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sc->dd)
- __field(u32, sw_index)
- __field(u32, hw_context)
- __field(u32, needint)
- __field(u64, credit_ctrl)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sc->dd);
- __entry->sw_index = sc->sw_index;
- __entry->hw_context = sc->hw_context;
- __entry->needint = needint;
- __entry->credit_ctrl = credit_ctrl;
- ),
- TP_printk(
- "[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
- __get_str(dev),
- __entry->sw_index,
- __entry->hw_context,
- __entry->needint,
- (unsigned long long)__entry->credit_ctrl
- )
+ TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
+ TP_ARGS(sc, needint, credit_ctrl),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
+ __field(u32, sw_index)
+ __field(u32, hw_context)
+ __field(u32, needint)
+ __field(u64, credit_ctrl)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
+ __entry->sw_index = sc->sw_index;
+ __entry->hw_context = sc->hw_context;
+ __entry->needint = needint;
+ __entry->credit_ctrl = credit_ctrl;
+ ),
+ TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
+ __get_str(dev),
+ __entry->sw_index,
+ __entry->hw_context,
+ __entry->needint,
+ (unsigned long long)__entry->credit_ctrl
+ )
);
DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
- TP_PROTO(struct hfi1_qp *qp, u32 flags),
- TP_ARGS(qp, flags),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, flags)
- __field(u32, s_flags)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->flags = flags;
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- ),
- TP_printk(
- "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->flags,
- __entry->s_flags
- )
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
+ TP_ARGS(qp, flags),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, flags)
+ __field(u32, s_flags)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->flags = flags;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x flags 0x%x s_flags 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->flags,
+ __entry->s_flags
+ )
);
DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
- TP_PROTO(struct hfi1_qp *qp, u32 flags),
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
TP_ARGS(qp, flags));
DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
- TP_PROTO(struct hfi1_qp *qp, u32 flags),
+ TP_PROTO(struct rvt_qp *qp, u32 flags),
TP_ARGS(qp, flags));
#undef TRACE_SYSTEM
-#define TRACE_SYSTEM hfi1_qphash
-DECLARE_EVENT_CLASS(hfi1_qphash_template,
- TP_PROTO(struct hfi1_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, bucket)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->bucket = bucket;
- ),
- TP_printk(
- "[%s] qpn 0x%x bucket %u",
- __get_str(dev),
- __entry->qpn,
- __entry->bucket
- )
-);
-
-DEFINE_EVENT(hfi1_qphash_template, hfi1_qpinsert,
- TP_PROTO(struct hfi1_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-DEFINE_EVENT(hfi1_qphash_template, hfi1_qpremove,
- TP_PROTO(struct hfi1_qp *qp, u32 bucket),
- TP_ARGS(qp, bucket));
-
-#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_ibhdrs
u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr);
-const char *parse_everbs_hdrs(
- struct trace_seq *p,
- u8 opcode,
- void *ehdrs);
+const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
-const char *parse_sdma_flags(
- struct trace_seq *p,
- u64 desc0, u64 desc1);
+const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
#define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
-
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
#define show_lnh(lrh) \
__print_symbolic(lrh, \
@@ -420,7 +408,6 @@ __print_symbolic(opcode, \
ib_opcode_name(UD_SEND_ONLY_WITH_IMMEDIATE), \
ib_opcode_name(CNP))
-
#define LRH_PRN "vl %d lver %d sl %d lnh %d,%s dlid %.4x len %d slid %.4x"
#define BTH_PRN \
"op 0x%.2x,%s se %d m %d pad %d tver %d pkey 0x%.4x " \
@@ -428,124 +415,130 @@ __print_symbolic(opcode, \
#define EHDR_PRN "%s"
DECLARE_EVENT_CLASS(hfi1_ibhdr_template,
- TP_PROTO(struct hfi1_devdata *dd,
- struct hfi1_ib_header *hdr),
- TP_ARGS(dd, hdr),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- /* LRH */
- __field(u8, vl)
- __field(u8, lver)
- __field(u8, sl)
- __field(u8, lnh)
- __field(u16, dlid)
- __field(u16, len)
- __field(u16, slid)
- /* BTH */
- __field(u8, opcode)
- __field(u8, se)
- __field(u8, m)
- __field(u8, pad)
- __field(u8, tver)
- __field(u16, pkey)
- __field(u8, f)
- __field(u8, b)
- __field(u32, qpn)
- __field(u8, a)
- __field(u32, psn)
- /* extended headers */
- __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
- ),
- TP_fast_assign(
- struct hfi1_other_headers *ohdr;
-
- DD_DEV_ASSIGN(dd);
- /* LRH */
- __entry->vl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
- __entry->lver =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
- __entry->sl =
- (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
- __entry->lnh =
- (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
- __entry->dlid =
- be16_to_cpu(hdr->lrh[1]);
- /* allow for larger len */
- __entry->len =
- be16_to_cpu(hdr->lrh[2]);
- __entry->slid =
- be16_to_cpu(hdr->lrh[3]);
- /* BTH */
- if (__entry->lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
- __entry->opcode =
- (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
- __entry->se =
- (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
- __entry->m =
- (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
- __entry->pad =
- (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- __entry->tver =
- (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
- __entry->pkey =
- be32_to_cpu(ohdr->bth[0]) & 0xffff;
- __entry->f =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT)
- & HFI1_FECN_MASK;
- __entry->b =
- (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT)
- & HFI1_BECN_MASK;
- __entry->qpn =
- be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
- __entry->a =
- (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
- /* allow for larger PSN */
- __entry->psn =
- be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
- /* extended headers */
- memcpy(
- __get_dynamic_array(ehdrs),
- &ohdr->u,
- ibhdr_exhdr_len(hdr));
- ),
- TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
- __get_str(dev),
- /* LRH */
- __entry->vl,
- __entry->lver,
- __entry->sl,
- __entry->lnh, show_lnh(__entry->lnh),
- __entry->dlid,
- __entry->len,
- __entry->slid,
- /* BTH */
- __entry->opcode, show_ib_opcode(__entry->opcode),
- __entry->se,
- __entry->m,
- __entry->pad,
- __entry->tver,
- __entry->pkey,
- __entry->f,
- __entry->b,
- __entry->qpn,
- __entry->a,
- __entry->psn,
- /* extended headers */
- __parse_ib_ehdrs(
- __entry->opcode,
- (void *)__get_dynamic_array(ehdrs))
- )
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd)
+ /* LRH */
+ __field(u8, vl)
+ __field(u8, lver)
+ __field(u8, sl)
+ __field(u8, lnh)
+ __field(u16, dlid)
+ __field(u16, len)
+ __field(u16, slid)
+ /* BTH */
+ __field(u8, opcode)
+ __field(u8, se)
+ __field(u8, m)
+ __field(u8, pad)
+ __field(u8, tver)
+ __field(u16, pkey)
+ __field(u8, f)
+ __field(u8, b)
+ __field(u32, qpn)
+ __field(u8, a)
+ __field(u32, psn)
+ /* extended headers */
+ __dynamic_array(u8, ehdrs, ibhdr_exhdr_len(hdr))
+ ),
+ TP_fast_assign(
+ struct hfi1_other_headers *ohdr;
+
+ DD_DEV_ASSIGN(dd);
+ /* LRH */
+ __entry->vl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 12);
+ __entry->lver =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 8) & 0xf;
+ __entry->sl =
+ (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
+ __entry->lnh =
+ (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
+ __entry->dlid =
+ be16_to_cpu(hdr->lrh[1]);
+ /* allow for larger len */
+ __entry->len =
+ be16_to_cpu(hdr->lrh[2]);
+ __entry->slid =
+ be16_to_cpu(hdr->lrh[3]);
+ /* BTH */
+ if (__entry->lnh == HFI1_LRH_BTH)
+ ohdr = &hdr->u.oth;
+ else
+ ohdr = &hdr->u.l.oth;
+ __entry->opcode =
+ (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
+ __entry->se =
+ (be32_to_cpu(ohdr->bth[0]) >> 23) & 1;
+ __entry->m =
+ (be32_to_cpu(ohdr->bth[0]) >> 22) & 1;
+ __entry->pad =
+ (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
+ __entry->tver =
+ (be32_to_cpu(ohdr->bth[0]) >> 16) & 0xf;
+ __entry->pkey =
+ be32_to_cpu(ohdr->bth[0]) & 0xffff;
+ __entry->f =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
+ HFI1_FECN_MASK;
+ __entry->b =
+ (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
+ HFI1_BECN_MASK;
+ __entry->qpn =
+ be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ __entry->a =
+ (be32_to_cpu(ohdr->bth[2]) >> 31) & 1;
+ /* allow for larger PSN */
+ __entry->psn =
+ be32_to_cpu(ohdr->bth[2]) & 0x7fffffff;
+ /* extended headers */
+ memcpy(__get_dynamic_array(ehdrs), &ohdr->u,
+ ibhdr_exhdr_len(hdr));
+ ),
+ TP_printk("[%s] " LRH_PRN " " BTH_PRN " " EHDR_PRN,
+ __get_str(dev),
+ /* LRH */
+ __entry->vl,
+ __entry->lver,
+ __entry->sl,
+ __entry->lnh, show_lnh(__entry->lnh),
+ __entry->dlid,
+ __entry->len,
+ __entry->slid,
+ /* BTH */
+ __entry->opcode, show_ib_opcode(__entry->opcode),
+ __entry->se,
+ __entry->m,
+ __entry->pad,
+ __entry->tver,
+ __entry->pkey,
+ __entry->f,
+ __entry->b,
+ __entry->qpn,
+ __entry->a,
+ __entry->psn,
+ /* extended headers */
+ __parse_ib_ehdrs(
+ __entry->opcode,
+ (void *)__get_dynamic_array(ehdrs))
+ )
);
DEFINE_EVENT(hfi1_ibhdr_template, input_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
-DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
+DEFINE_EVENT(hfi1_ibhdr_template, pio_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, ack_output_ibhdr,
+ TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
+ TP_ARGS(dd, hdr));
+
+DEFINE_EVENT(hfi1_ibhdr_template, sdma_output_ibhdr,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ib_header *hdr),
TP_ARGS(dd, hdr));
@@ -556,15 +549,14 @@ DEFINE_EVENT(hfi1_ibhdr_template, output_ibhdr,
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_snoop
-
TRACE_EVENT(snoop_capture,
- TP_PROTO(struct hfi1_devdata *dd,
- int hdr_len,
- struct hfi1_ib_header *hdr,
- int data_len,
- void *data),
- TP_ARGS(dd, hdr_len, hdr, data_len, data),
- TP_STRUCT__entry(
+ TP_PROTO(struct hfi1_devdata *dd,
+ int hdr_len,
+ struct hfi1_ib_header *hdr,
+ int data_len,
+ void *data),
+ TP_ARGS(dd, hdr_len, hdr, data_len, data),
+ TP_STRUCT__entry(
DD_DEV_ENTRY(dd)
__field(u16, slid)
__field(u16, dlid)
@@ -577,8 +569,8 @@ TRACE_EVENT(snoop_capture,
__field(u8, lnh)
__dynamic_array(u8, raw_hdr, hdr_len)
__dynamic_array(u8, raw_pkt, data_len)
- ),
- TP_fast_assign(
+ ),
+ TP_fast_assign(
struct hfi1_other_headers *ohdr;
__entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
@@ -589,7 +581,7 @@ TRACE_EVENT(snoop_capture,
DD_DEV_ASSIGN(dd);
__entry->slid = be16_to_cpu(hdr->lrh[3]);
__entry->dlid = be16_to_cpu(hdr->lrh[1]);
- __entry->qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
__entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
__entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
__entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
@@ -597,8 +589,9 @@ TRACE_EVENT(snoop_capture,
__entry->data_len = data_len;
memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
memcpy(__get_dynamic_array(raw_pkt), data, data_len);
- ),
- TP_printk("[%s] " SNOOP_PRN,
+ ),
+ TP_printk(
+ "[%s] " SNOOP_PRN,
__get_str(dev),
__entry->slid,
__entry->dlid,
@@ -609,7 +602,7 @@ TRACE_EVENT(snoop_capture,
__entry->pkey,
__entry->hdr_len,
__entry->data_len
- )
+ )
);
#undef TRACE_SYSTEM
@@ -621,41 +614,39 @@ TRACE_EVENT(snoop_capture,
TRACE_EVENT(hfi1_uctxtdata,
TP_PROTO(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt),
TP_ARGS(dd, uctxt),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(u32, credits)
- __field(u64, hw_free)
- __field(u64, piobase)
- __field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
- __field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = uctxt->ctxt;
- __entry->credits = uctxt->sc->credits;
- __entry->hw_free = (u64)uctxt->sc->hw_free;
- __entry->piobase = (u64)uctxt->sc->base_addr;
- __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
- __entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys = uctxt->egrbufs.rcvtids[0].phys;
- ),
- TP_printk(
- "[%s] ctxt %u " UCTXT_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->credits,
- __entry->hw_free,
- __entry->piobase,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
- __entry->eager_cnt,
- __entry->rcvegr_phys
- )
- );
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned, ctxt)
+ __field(u32, credits)
+ __field(u64, hw_free)
+ __field(u64, piobase)
+ __field(u16, rcvhdrq_cnt)
+ __field(u64, rcvhdrq_phys)
+ __field(u32, eager_cnt)
+ __field(u64, rcvegr_phys)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = uctxt->ctxt;
+ __entry->credits = uctxt->sc->credits;
+ __entry->hw_free = (u64)uctxt->sc->hw_free;
+ __entry->piobase = (u64)uctxt->sc->base_addr;
+ __entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
+ __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->eager_cnt = uctxt->egrbufs.alloced;
+ __entry->rcvegr_phys =
+ uctxt->egrbufs.rcvtids[0].phys;
+ ),
+ TP_printk("[%s] ctxt %u " UCTXT_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->credits,
+ __entry->hw_free,
+ __entry->piobase,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_phys,
+ __entry->eager_cnt,
+ __entry->rcvegr_phys
+ )
+);
#define CINFO_FMT \
"egrtids:%u, egr_size:%u, hdrq_cnt:%u, hdrq_size:%u, sdma_ring_size:%u"
@@ -663,38 +654,35 @@ TRACE_EVENT(hfi1_ctxt_info,
TP_PROTO(struct hfi1_devdata *dd, unsigned ctxt, unsigned subctxt,
struct hfi1_ctxt_info cinfo),
TP_ARGS(dd, ctxt, subctxt, cinfo),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(unsigned, ctxt)
- __field(unsigned, subctxt)
- __field(u16, egrtids)
- __field(u16, rcvhdrq_cnt)
- __field(u16, rcvhdrq_size)
- __field(u16, sdma_ring_size)
- __field(u32, rcvegr_size)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->ctxt = ctxt;
- __entry->subctxt = subctxt;
- __entry->egrtids = cinfo.egrtids;
- __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
- __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
- __entry->sdma_ring_size = cinfo.sdma_ring_size;
- __entry->rcvegr_size = cinfo.rcvegr_size;
- ),
- TP_printk(
- "[%s] ctxt %u:%u " CINFO_FMT,
- __get_str(dev),
- __entry->ctxt,
- __entry->subctxt,
- __entry->egrtids,
- __entry->rcvegr_size,
- __entry->rcvhdrq_cnt,
- __entry->rcvhdrq_size,
- __entry->sdma_ring_size
- )
- );
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(unsigned, ctxt)
+ __field(unsigned, subctxt)
+ __field(u16, egrtids)
+ __field(u16, rcvhdrq_cnt)
+ __field(u16, rcvhdrq_size)
+ __field(u16, sdma_ring_size)
+ __field(u32, rcvegr_size)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->ctxt = ctxt;
+ __entry->subctxt = subctxt;
+ __entry->egrtids = cinfo.egrtids;
+ __entry->rcvhdrq_cnt = cinfo.rcvhdrq_cnt;
+ __entry->rcvhdrq_size = cinfo.rcvhdrq_entsize;
+ __entry->sdma_ring_size = cinfo.sdma_ring_size;
+ __entry->rcvegr_size = cinfo.rcvegr_size;
+ ),
+ TP_printk("[%s] ctxt %u:%u " CINFO_FMT,
+ __get_str(dev),
+ __entry->ctxt,
+ __entry->subctxt,
+ __entry->egrtids,
+ __entry->rcvegr_size,
+ __entry->rcvhdrq_cnt,
+ __entry->rcvhdrq_size,
+ __entry->sdma_ring_size
+ )
+);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_sma
@@ -708,52 +696,48 @@ TRACE_EVENT(hfi1_ctxt_info,
)
DECLARE_EVENT_CLASS(hfi1_bct_template,
- TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
- TP_ARGS(dd, bc),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __dynamic_array(u8, bct, sizeof(*bc))
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- memcpy(
- __get_dynamic_array(bct),
- bc,
- sizeof(*bc));
- ),
- TP_printk(BCT_FORMAT,
- BCT(overall_shared_limit),
-
- BCT(vl[0].dedicated),
- BCT(vl[0].shared),
-
- BCT(vl[1].dedicated),
- BCT(vl[1].shared),
-
- BCT(vl[2].dedicated),
- BCT(vl[2].shared),
-
- BCT(vl[3].dedicated),
- BCT(vl[3].shared),
-
- BCT(vl[4].dedicated),
- BCT(vl[4].shared),
-
- BCT(vl[5].dedicated),
- BCT(vl[5].shared),
-
- BCT(vl[6].dedicated),
- BCT(vl[6].shared),
-
- BCT(vl[7].dedicated),
- BCT(vl[7].shared),
-
- BCT(vl[15].dedicated),
- BCT(vl[15].shared)
- )
+ TP_PROTO(struct hfi1_devdata *dd,
+ struct buffer_control *bc),
+ TP_ARGS(dd, bc),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __dynamic_array(u8, bct, sizeof(*bc))
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ memcpy(__get_dynamic_array(bct), bc,
+ sizeof(*bc));
+ ),
+ TP_printk(BCT_FORMAT,
+ BCT(overall_shared_limit),
+
+ BCT(vl[0].dedicated),
+ BCT(vl[0].shared),
+
+ BCT(vl[1].dedicated),
+ BCT(vl[1].shared),
+
+ BCT(vl[2].dedicated),
+ BCT(vl[2].shared),
+
+ BCT(vl[3].dedicated),
+ BCT(vl[3].shared),
+
+ BCT(vl[4].dedicated),
+ BCT(vl[4].shared),
+
+ BCT(vl[5].dedicated),
+ BCT(vl[5].shared),
+
+ BCT(vl[6].dedicated),
+ BCT(vl[6].shared),
+
+ BCT(vl[7].dedicated),
+ BCT(vl[7].shared),
+
+ BCT(vl[15].dedicated),
+ BCT(vl[15].shared)
+ )
);
-
DEFINE_EVENT(hfi1_bct_template, bct_set,
TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
TP_ARGS(dd, bc));
@@ -766,252 +750,209 @@ DEFINE_EVENT(hfi1_bct_template, bct_get,
#define TRACE_SYSTEM hfi1_sdma
TRACE_EVENT(hfi1_sdma_descriptor,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 desc0,
- u64 desc1,
- u16 e,
- void *descp),
+ TP_PROTO(struct sdma_engine *sde,
+ u64 desc0,
+ u64 desc1,
+ u16 e,
+ void *descp),
TP_ARGS(sde, desc0, desc1, e, descp),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(void *, descp)
- __field(u64, desc0)
- __field(u64, desc1)
- __field(u16, e)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->desc0 = desc0;
- __entry->desc1 = desc1;
- __entry->idx = sde->this_idx;
- __entry->descp = descp;
- __entry->e = e;
- ),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(void *, descp)
+ __field(u64, desc0)
+ __field(u64, desc1)
+ __field(u16, e)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->desc0 = desc0;
+ __entry->desc1 = desc1;
+ __entry->idx = sde->this_idx;
+ __entry->descp = descp;
+ __entry->e = e;
+ ),
TP_printk(
- "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
- __get_str(dev),
- __entry->idx,
- __parse_sdma_flags(__entry->desc0, __entry->desc1),
- (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT)
- & SDMA_DESC0_PHY_ADDR_MASK,
- (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT)
- & SDMA_DESC1_GENERATION_MASK),
- (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT)
- & SDMA_DESC0_BYTE_COUNT_MASK),
- __entry->desc0,
- __entry->desc1,
- __entry->descp,
- __entry->e
- )
+ "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
+ __get_str(dev),
+ __entry->idx,
+ __parse_sdma_flags(__entry->desc0, __entry->desc1),
+ (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
+ SDMA_DESC0_PHY_ADDR_MASK,
+ (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
+ SDMA_DESC1_GENERATION_MASK),
+ (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
+ SDMA_DESC0_BYTE_COUNT_MASK),
+ __entry->desc0,
+ __entry->desc1,
+ __entry->descp,
+ __entry->e
+ )
);
TRACE_EVENT(hfi1_sdma_engine_select,
- TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
- TP_ARGS(dd, sel, vl, idx),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __field(u32, sel)
- __field(u8, vl)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd);
- __entry->sel = sel;
- __entry->vl = vl;
- __entry->idx = idx;
- ),
- TP_printk(
- "[%s] selecting SDE %u sel 0x%x vl %u",
- __get_str(dev),
- __entry->idx,
- __entry->sel,
- __entry->vl
- )
+ TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
+ TP_ARGS(dd, sel, vl, idx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __field(u32, sel)
+ __field(u8, vl)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd);
+ __entry->sel = sel;
+ __entry->vl = vl;
+ __entry->idx = idx;
+ ),
+ TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sel,
+ __entry->vl
+ )
);
DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 status
- ),
- TP_ARGS(sde, status),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(u64, status)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->status = status;
- __entry->idx = sde->this_idx;
- ),
- TP_printk(
- "[%s] SDE(%u) status %llx",
- __get_str(dev),
- __entry->idx,
- (unsigned long long)__entry->status
- )
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, status)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->status = status;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) status %llx",
+ __get_str(dev),
+ __entry->idx,
+ (unsigned long long)__entry->status
+ )
);
DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 status
- ),
- TP_ARGS(sde, status)
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
);
DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 status
- ),
- TP_ARGS(sde, status)
+ TP_PROTO(struct sdma_engine *sde, u64 status),
+ TP_ARGS(sde, status)
);
DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
- TP_PROTO(
- struct sdma_engine *sde,
- int aidx
- ),
- TP_ARGS(sde, aidx),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(int, aidx)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->idx = sde->this_idx;
- __entry->aidx = aidx;
- ),
- TP_printk(
- "[%s] SDE(%u) aidx %d",
- __get_str(dev),
- __entry->idx,
- __entry->aidx
- )
+ TP_PROTO(struct sdma_engine *sde, int aidx),
+ TP_ARGS(sde, aidx),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(int, aidx)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->idx = sde->this_idx;
+ __entry->aidx = aidx;
+ ),
+ TP_printk("[%s] SDE(%u) aidx %d",
+ __get_str(dev),
+ __entry->idx,
+ __entry->aidx
+ )
);
DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
- TP_PROTO(
- struct sdma_engine *sde,
- int aidx
- ),
+ TP_PROTO(struct sdma_engine *sde, int aidx),
TP_ARGS(sde, aidx));
DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
- TP_PROTO(
- struct sdma_engine *sde,
- int aidx
- ),
+ TP_PROTO(struct sdma_engine *sde, int aidx),
TP_ARGS(sde, aidx));
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(
- struct sdma_engine *sde,
- u16 hwhead,
- u16 swhead,
- struct sdma_txreq *txp
- ),
- TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- __entry->sn = txp ? txp->sn : ~0;
- ),
- TP_printk(
- "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->sn,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead,
+ u16 swhead,
+ struct sdma_txreq *txp
+ ),
+ TP_ARGS(sde, hwhead, swhead, txp),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ __entry->sn = txp ? txp->sn : ~0;
+ ),
+ TP_printk(
+ "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
);
#else
TRACE_EVENT(hfi1_sdma_progress,
- TP_PROTO(
- struct sdma_engine *sde,
- u16 hwhead,
- u16 swhead,
- struct sdma_txreq *txp
+ TP_PROTO(struct sdma_engine *sde,
+ u16 hwhead, u16 swhead,
+ struct sdma_txreq *txp
),
TP_ARGS(sde, hwhead, swhead, txp),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(u16, hwhead)
- __field(u16, swhead)
- __field(u16, txnext)
- __field(u16, tx_tail)
- __field(u16, tx_head)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->hwhead = hwhead;
- __entry->swhead = swhead;
- __entry->tx_tail = sde->tx_tail;
- __entry->tx_head = sde->tx_head;
- __entry->txnext = txp ? txp->next_descq_idx : ~0;
- __entry->idx = sde->this_idx;
- ),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u16, hwhead)
+ __field(u16, swhead)
+ __field(u16, txnext)
+ __field(u16, tx_tail)
+ __field(u16, tx_head)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->hwhead = hwhead;
+ __entry->swhead = swhead;
+ __entry->tx_tail = sde->tx_tail;
+ __entry->tx_head = sde->tx_head;
+ __entry->txnext = txp ? txp->next_descq_idx : ~0;
+ __entry->idx = sde->this_idx;
+ ),
TP_printk(
- "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
- __get_str(dev),
- __entry->idx,
- __entry->hwhead,
- __entry->swhead,
- __entry->txnext,
- __entry->tx_head,
- __entry->tx_tail
- )
+ "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
+ __get_str(dev),
+ __entry->idx,
+ __entry->hwhead,
+ __entry->swhead,
+ __entry->txnext,
+ __entry->tx_head,
+ __entry->tx_tail
+ )
);
#endif
DECLARE_EVENT_CLASS(hfi1_sdma_sn,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 sn
- ),
- TP_ARGS(sde, sn),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __field(u64, sn)
- __field(u8, idx)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __entry->sn = sn;
- __entry->idx = sde->this_idx;
- ),
- TP_printk(
- "[%s] SDE(%u) sn %llu",
- __get_str(dev),
- __entry->idx,
- __entry->sn
- )
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
+ TP_ARGS(sde, sn),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __field(u64, sn)
+ __field(u8, idx)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __entry->sn = sn;
+ __entry->idx = sde->this_idx;
+ ),
+ TP_printk("[%s] SDE(%u) sn %llu",
+ __get_str(dev),
+ __entry->idx,
+ __entry->sn
+ )
);
DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
@@ -1023,10 +964,7 @@ DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
);
DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
- TP_PROTO(
- struct sdma_engine *sde,
- u64 sn
- ),
+ TP_PROTO(struct sdma_engine *sde, u64 sn),
TP_ARGS(sde, sn)
);
@@ -1227,88 +1165,85 @@ TRACE_EVENT(hfi1_sdma_user_header_ahg,
);
TRACE_EVENT(hfi1_sdma_state,
- TP_PROTO(
- struct sdma_engine *sde,
- const char *cstate,
- const char *nstate
- ),
- TP_ARGS(sde, cstate, nstate),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(sde->dd)
- __string(curstate, cstate)
- __string(newstate, nstate)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(sde->dd);
- __assign_str(curstate, cstate);
- __assign_str(newstate, nstate);
- ),
+ TP_PROTO(struct sdma_engine *sde,
+ const char *cstate,
+ const char *nstate
+ ),
+ TP_ARGS(sde, cstate, nstate),
+ TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
+ __string(curstate, cstate)
+ __string(newstate, nstate)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
+ __assign_str(curstate, cstate);
+ __assign_str(newstate, nstate);
+ ),
TP_printk("[%s] current state %s new state %s",
- __get_str(dev),
- __get_str(curstate),
- __get_str(newstate)
- )
+ __get_str(dev),
+ __get_str(curstate),
+ __get_str(newstate)
+ )
);
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hfi1_rc
DECLARE_EVENT_CLASS(hfi1_rc_template,
- TP_PROTO(struct hfi1_qp *qp, u32 psn),
- TP_ARGS(qp, psn),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
- __field(u32, qpn)
- __field(u32, s_flags)
- __field(u32, psn)
- __field(u32, s_psn)
- __field(u32, s_next_psn)
- __field(u32, s_sending_psn)
- __field(u32, s_sending_hpsn)
- __field(u32, r_psn)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
- __entry->qpn = qp->ibqp.qp_num;
- __entry->s_flags = qp->s_flags;
- __entry->psn = psn;
- __entry->s_psn = qp->s_psn;
- __entry->s_next_psn = qp->s_next_psn;
- __entry->s_sending_psn = qp->s_sending_psn;
- __entry->s_sending_hpsn = qp->s_sending_hpsn;
- __entry->r_psn = qp->r_psn;
- ),
- TP_printk(
- "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
- __get_str(dev),
- __entry->qpn,
- __entry->s_flags,
- __entry->psn,
- __entry->s_psn,
- __entry->s_next_psn,
- __entry->s_sending_psn,
- __entry->s_sending_hpsn,
- __entry->r_psn
- )
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
+ TP_ARGS(qp, psn),
+ TP_STRUCT__entry(
+ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(u32, s_flags)
+ __field(u32, psn)
+ __field(u32, s_psn)
+ __field(u32, s_next_psn)
+ __field(u32, s_sending_psn)
+ __field(u32, s_sending_hpsn)
+ __field(u32, r_psn)
+ ),
+ TP_fast_assign(
+ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->s_flags = qp->s_flags;
+ __entry->psn = psn;
+ __entry->s_psn = qp->s_psn;
+ __entry->s_next_psn = qp->s_next_psn;
+ __entry->s_sending_psn = qp->s_sending_psn;
+ __entry->s_sending_hpsn = qp->s_sending_hpsn;
+ __entry->r_psn = qp->r_psn;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x s_flags 0x%x psn 0x%x s_psn 0x%x s_next_psn 0x%x s_sending_psn 0x%x sending_hpsn 0x%x r_psn 0x%x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->s_flags,
+ __entry->psn,
+ __entry->s_psn,
+ __entry->s_next_psn,
+ __entry->s_sending_psn,
+ __entry->s_sending_hpsn,
+ __entry->r_psn
+ )
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_sendcomplete,
- TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_ack,
- TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_timeout,
- TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
- TP_PROTO(struct hfi1_qp *qp, u32 psn),
+ TP_PROTO(struct rvt_qp *qp, u32 psn),
TP_ARGS(qp, psn)
);
@@ -1316,21 +1251,20 @@ DEFINE_EVENT(hfi1_rc_template, hfi1_rc_rcv_error,
#define TRACE_SYSTEM hfi1_misc
TRACE_EVENT(hfi1_interrupt,
- TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
- int src),
- TP_ARGS(dd, is_entry, src),
- TP_STRUCT__entry(
- DD_DEV_ENTRY(dd)
- __array(char, buf, 64)
- __field(int, src)
- ),
- TP_fast_assign(
- DD_DEV_ASSIGN(dd)
- is_entry->is_name(__entry->buf, 64, src - is_entry->start);
- __entry->src = src;
- ),
- TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
- __entry->src)
+ TP_PROTO(struct hfi1_devdata *dd, const struct is_table *is_entry,
+ int src),
+ TP_ARGS(dd, is_entry, src),
+ TP_STRUCT__entry(DD_DEV_ENTRY(dd)
+ __array(char, buf, 64)
+ __field(int, src)
+ ),
+ TP_fast_assign(DD_DEV_ASSIGN(dd)
+ is_entry->is_name(__entry->buf, 64,
+ src - is_entry->start);
+ __entry->src = src;
+ ),
+ TP_printk("[%s] source: %s [%d]", __get_str(dev), __entry->buf,
+ __entry->src)
);
/*
@@ -1345,21 +1279,21 @@ TRACE_EVENT(hfi1_interrupt,
#define MAX_MSG_LEN 512
DECLARE_EVENT_CLASS(hfi1_trace_template,
- TP_PROTO(const char *function, struct va_format *vaf),
- TP_ARGS(function, vaf),
- TP_STRUCT__entry(
- __string(function, function)
- __dynamic_array(char, msg, MAX_MSG_LEN)
- ),
- TP_fast_assign(
- __assign_str(function, function);
- WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
- MAX_MSG_LEN, vaf->fmt,
- *vaf->va) >= MAX_MSG_LEN);
- ),
- TP_printk("(%s) %s",
- __get_str(function),
- __get_str(msg))
+ TP_PROTO(const char *function, struct va_format *vaf),
+ TP_ARGS(function, vaf),
+ TP_STRUCT__entry(__string(function, function)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(__assign_str(function, function);
+ WARN_ON_ONCE(vsnprintf
+ (__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >=
+ MAX_MSG_LEN);
+ ),
+ TP_printk("(%s) %s",
+ __get_str(function),
+ __get_str(msg))
);
/*
@@ -1406,6 +1340,7 @@ __hfi1_trace_def(DC8051);
__hfi1_trace_def(FIRMWARE);
__hfi1_trace_def(RCVCTRL);
__hfi1_trace_def(TID);
+__hfi1_trace_def(MMU);
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/staging/rdma/hfi1/twsi.c
index ea54fd2700ad..e82e52a63d35 100644
--- a/drivers/staging/rdma/hfi1/twsi.c
+++ b/drivers/staging/rdma/hfi1/twsi.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -119,9 +116,9 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
* Allow for slow slaves by simple
* delay for falling edge, sampling on rise.
*/
- if (!bit)
+ if (!bit) {
udelay(2);
- else {
+ } else {
int rise_usec;
for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
@@ -131,11 +128,24 @@ static void scl_out(struct hfi1_devdata *dd, u32 target, u8 bit)
}
if (rise_usec <= 0)
dd_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
+ SCL_WAIT_USEC);
}
i2c_wait_for_writes(dd, target);
}
+static u8 scl_in(struct hfi1_devdata *dd, u32 target, int wait)
+{
+ u32 read_val, mask;
+
+ mask = QSFP_HFI0_I2CCLK;
+ /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
+ hfi1_gpio_mod(dd, target, 0, 0, mask);
+ read_val = hfi1_gpio_mod(dd, target, 0, 0, 0);
+ if (wait)
+ i2c_wait_for_writes(dd, target);
+ return (read_val & mask) >> GPIO_SCL_NUM;
+}
+
static void sda_out(struct hfi1_devdata *dd, u32 target, u8 bit)
{
u32 mask;
@@ -274,13 +284,12 @@ static void stop_cmd(struct hfi1_devdata *dd, u32 target)
/**
* hfi1_twsi_reset - reset I2C communication
* @dd: the hfi1_ib device
+ * returns 0 if ok, -EIO on error
*/
-
int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target)
{
int clock_cycles_left = 9;
- int was_high = 0;
- u32 pins, mask;
+ u32 mask;
/* Both SCL and SDA should be high. If not, there
* is something wrong.
@@ -294,43 +303,23 @@ int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target)
*/
hfi1_gpio_mod(dd, target, 0, 0, mask);
- /*
- * Clock nine times to get all listeners into a sane state.
- * If SDA does not go high at any point, we are wedged.
- * One vendor recommends then issuing START followed by STOP.
- * we cannot use our "normal" functions to do that, because
- * if SCL drops between them, another vendor's part will
- * wedge, dropping SDA and keeping it low forever, at the end of
- * the next transaction (even if it was not the device addressed).
- * So our START and STOP take place with SCL held high.
+ /* Check if SCL is low, if it is low then we have a slave device
+ * misbehaving and there is not much we can do.
+ */
+ if (!scl_in(dd, target, 0))
+ return -EIO;
+
+ /* Check if SDA is low, if it is low then we have to clock SDA
+ * up to 9 times for the device to release the bus
*/
while (clock_cycles_left--) {
+ if (sda_in(dd, target, 0))
+ return 0;
scl_out(dd, target, 0);
scl_out(dd, target, 1);
- /* Note if SDA is high, but keep clocking to sync slave */
- was_high |= sda_in(dd, target, 0);
- }
-
- if (was_high) {
- /*
- * We saw a high, which we hope means the slave is sync'd.
- * Issue START, STOP, pause for T_BUF.
- */
-
- pins = hfi1_gpio_mod(dd, target, 0, 0, 0);
- if ((pins & mask) != mask)
- dd_dev_err(dd, "GPIO pins not at rest: %d\n",
- pins & mask);
- /* Drop SDA to issue START */
- udelay(1); /* Guarantee .6 uSec setup */
- sda_out(dd, target, 0);
- udelay(1); /* Guarantee .6 uSec hold */
- /* At this point, SCL is high, SDA low. Raise SDA for STOP */
- sda_out(dd, target, 1);
- udelay(TWSI_BUF_WAIT_USEC);
}
- return !was_high;
+ return -EIO;
}
#define HFI1_TWSI_START 0x100
@@ -365,17 +354,25 @@ static int twsi_wr(struct hfi1_devdata *dd, u32 target, int data, int flags)
* HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
* which responded to all TWSI device codes, interpreting them as
* address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
+ * this driver, the device is followed by a N-byte "address" which selects
* the "register" or "offset" within the device from which data should
* be read.
*/
int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
void *buffer, int len)
{
- int ret;
u8 *bp = buffer;
+ int ret = 1;
+ int i;
+ int offset_size;
+
+ /* obtain the offset size, strip it from the device address */
+ offset_size = (dev >> 8) & 0xff;
+ dev &= 0xff;
- ret = 1;
+ /* allow at most a 2 byte offset */
+ if (offset_size > 2)
+ goto bail;
if (dev == HFI1_TWSI_NO_DEV) {
/* legacy not-really-I2C */
@@ -383,34 +380,29 @@ int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
ret = twsi_wr(dd, target, addr, HFI1_TWSI_START);
} else {
/* Actual I2C */
- ret = twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START);
- if (ret) {
- stop_cmd(dd, target);
- ret = 1;
- goto bail;
- }
- /*
- * SFF spec claims we do _not_ stop after the addr
- * but simply issue a start with the "read" dev-addr.
- * Since we are implicitly waiting for ACK here,
- * we need t_buf (nominally 20uSec) before that start,
- * and cannot rely on the delay built in to the STOP
- */
- ret = twsi_wr(dd, target, addr, 0);
- udelay(TWSI_BUF_WAIT_USEC);
+ if (offset_size) {
+ ret = twsi_wr(dd, target,
+ dev | WRITE_CMD, HFI1_TWSI_START);
+ if (ret) {
+ stop_cmd(dd, target);
+ goto bail;
+ }
- if (ret) {
- dd_dev_err(dd,
- "Failed to write interface read addr %02X\n",
- addr);
- ret = 1;
- goto bail;
+ for (i = 0; i < offset_size; i++) {
+ ret = twsi_wr(dd, target,
+ (addr >> (i * 8)) & 0xff, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+ if (ret) {
+ dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
+ i, addr);
+ goto bail;
+ }
+ }
}
ret = twsi_wr(dd, target, dev | READ_CMD, HFI1_TWSI_START);
}
if (ret) {
stop_cmd(dd, target);
- ret = 1;
goto bail;
}
@@ -442,76 +434,55 @@ bail:
* HFI1_TWSI_NO_DEV and does the correct operation for the legacy part,
* which responded to all TWSI device codes, interpreting them as
* address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
+ * this driver, the device is followed by a N-byte "address" which selects
* the "register" or "offset" within the device to which data should
* be written.
*/
int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
const void *buffer, int len)
{
- int sub_len;
const u8 *bp = buffer;
- int max_wait_time, i;
int ret = 1;
+ int i;
+ int offset_size;
- while (len > 0) {
- if (dev == HFI1_TWSI_NO_DEV) {
- if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD,
- HFI1_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (twsi_wr(dd, target,
- dev | WRITE_CMD, HFI1_TWSI_START))
- goto failed_write;
- ret = twsi_wr(dd, target, addr, 0);
- if (ret) {
- dd_dev_err(dd,
- "Failed to write interface write addr %02X\n",
- addr);
- goto failed_write;
- }
- }
-
- sub_len = min(len, 4);
- addr += sub_len;
- len -= sub_len;
+ /* obtain the offset size, strip it from the device address */
+ offset_size = (dev >> 8) & 0xff;
+ dev &= 0xff;
- for (i = 0; i < sub_len; i++)
- if (twsi_wr(dd, target, *bp++, 0))
- goto failed_write;
+ /* allow at most a 2 byte offset */
+ if (offset_size > 2)
+ goto bail;
- stop_cmd(dd, target);
+ if (dev == HFI1_TWSI_NO_DEV) {
+ if (twsi_wr(dd, target, (addr << 1) | WRITE_CMD,
+ HFI1_TWSI_START)) {
+ goto failed_write;
+ }
+ } else {
+ /* Real I2C */
+ if (twsi_wr(dd, target, dev | WRITE_CMD, HFI1_TWSI_START))
+ goto failed_write;
+ }
- /*
- * Wait for write complete by waiting for a successful
- * read (the chip replies with a zero after the write
- * cmd completes, and before it writes to the eeprom.
- * The startcmd for the read will fail the ack until
- * the writes have completed. We do this inline to avoid
- * the debug prints that are in the real read routine
- * if the startcmd fails.
- * We also use the proper device address, so it doesn't matter
- * whether we have real eeprom_dev. Legacy likes any address.
- */
- max_wait_time = 100;
- while (twsi_wr(dd, target,
- dev | READ_CMD, HFI1_TWSI_START)) {
- stop_cmd(dd, target);
- if (!--max_wait_time)
- goto failed_write;
+ for (i = 0; i < offset_size; i++) {
+ ret = twsi_wr(dd, target, (addr >> (i * 8)) & 0xff, 0);
+ udelay(TWSI_BUF_WAIT_USEC);
+ if (ret) {
+ dd_dev_err(dd, "Failed to write byte %d of offset 0x%04X\n",
+ i, addr);
+ goto bail;
}
- /* now read (and ignore) the resulting byte */
- rd_byte(dd, target, 1);
}
+ for (i = 0; i < len; i++)
+ if (twsi_wr(dd, target, *bp++, 0))
+ goto failed_write;
+
ret = 0;
- goto bail;
failed_write:
stop_cmd(dd, target);
- ret = 1;
bail:
return ret;
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/staging/rdma/hfi1/twsi.h
index 5907e029613d..5b8a5b5e7eae 100644
--- a/drivers/staging/rdma/hfi1/twsi.h
+++ b/drivers/staging/rdma/hfi1/twsi.h
@@ -1,14 +1,13 @@
#ifndef _TWSI_H
#define _TWSI_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -54,8 +51,9 @@
struct hfi1_devdata;
-/* Bit position of SDA pin in ASIC_QSFP* registers */
+/* Bit position of SDA/SCL pins in ASIC_QSFP* registers */
#define GPIO_SDA_NUM 1
+#define GPIO_SCL_NUM 0
/* these functions must be called with qsfp_lock held */
int hfi1_twsi_reset(struct hfi1_devdata *dd, u32 target);
@@ -64,5 +62,4 @@ int hfi1_twsi_blk_rd(struct hfi1_devdata *dd, u32 target, int dev, int addr,
int hfi1_twsi_blk_wr(struct hfi1_devdata *dd, u32 target, int dev, int addr,
const void *buffer, int len);
-
#endif /* _TWSI_H */
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/staging/rdma/hfi1/uc.c
index 4f2a7889a852..df773d433297 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/staging/rdma/hfi1/uc.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -49,71 +46,82 @@
*/
#include "hfi.h"
-#include "sdma.h"
+#include "verbs_txreq.h"
#include "qp.h"
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_UC_##x
+/* only opcode mask for adaptive pio */
+const u32 uc_only_opcode =
+ BIT(OP(SEND_ONLY) & 0x1f) |
+ BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
+ BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
+ BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f));
+
/**
* hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
+ * Assume s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int hfi1_make_uc_req(struct hfi1_qp *qp)
+int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
- struct hfi1_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 hwords = 5;
u32 bth0 = 0;
u32 len;
u32 pmtu = qp->pmtu;
- int ret = 0;
int middle = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (IS_ERR(ps->s_txreq))
+ goto bail_no_tx;
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_SEND_OK)) {
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
clear_ahg(qp);
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
+ goto done_free_tx;
}
- ohdr = &qp->s_hdr->ibh.u.oth;
+ ohdr = &ps->s_txreq->phdr.hdr.u.oth;
if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
- ohdr = &qp->s_hdr->ibh.u.l.oth;
+ ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
/* Get the next send request. */
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
qp->s_wqe = NULL;
switch (qp->s_state) {
default:
- if (!(ib_hfi1_state_ops[qp->state] &
- HFI1_PROCESS_NEXT_SEND_OK))
+ if (!(ib_rvt_state_ops[qp->state] &
+ RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head) {
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
}
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -128,9 +136,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_ONLY);
- else {
+ } else {
qp->s_state =
OP(SEND_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
@@ -157,9 +165,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
len = pmtu;
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
+ } else {
qp->s_state =
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
/* Immediate data comes after the RETH */
@@ -188,9 +196,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_SEND)
+ if (wqe->wr.opcode == IB_WR_SEND) {
qp->s_state = OP(SEND_LAST);
- else {
+ } else {
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
ohdr->u.imm_data = wqe->wr.ex.imm_data;
@@ -213,9 +221,9 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
middle = HFI1_CAP_IS_KSET(SDMA_AHG);
break;
}
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
qp->s_state = OP(RDMA_WRITE_LAST);
- else {
+ } else {
qp->s_state =
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
/* Immediate data comes after the BTH */
@@ -231,19 +239,28 @@ int hfi1_make_uc_req(struct hfi1_qp *qp)
}
qp->s_len -= len;
qp->s_hdrwords = hwords;
+ ps->s_txreq->sde = priv->s_sde;
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- mask_psn(qp->s_next_psn++), middle);
-done:
- ret = 1;
- goto unlock;
+ mask_psn(qp->s_psn++), middle, ps);
+ /* pbc */
+ ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
+ return 1;
+
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ qp->s_hdrwords = 0;
+ return 0;
}
/**
@@ -266,7 +283,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
- struct hfi1_qp *qp = packet->qp;
+ struct rvt_qp *qp = packet->qp;
struct hfi1_other_headers *ohdr = packet->ohdr;
u32 bth0, opcode;
u32 hdrsize = packet->hlen;
@@ -291,14 +308,14 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
u16 rlid = be16_to_cpu(hdr->lrh[3]);
u8 sl, sc5;
- lqpn = bth1 & HFI1_QPN_MASK;
+ lqpn = bth1 & RVT_QPN_MASK;
rqpn = qp->remote_qpn;
sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, rlid, lqpn, rqpn,
- IB_CC_SVCTYPE_UC);
+ IB_CC_SVCTYPE_UC);
}
if (bth1 & HFI1_FECN_SMASK) {
@@ -331,10 +348,11 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
inv:
if (qp->r_state == OP(SEND_FIRST) ||
qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
- } else
- hfi1_put_ss(&qp->r_sge);
+ } else {
+ rvt_put_ss(&qp->r_sge);
+ }
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
@@ -381,7 +399,7 @@ inv:
goto inv;
}
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & HFI1_R_COMM_EST))
+ if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp);
/* OK, process the packet. */
@@ -390,10 +408,10 @@ inv:
case OP(SEND_ONLY):
case OP(SEND_ONLY_WITH_IMMEDIATE):
send_first:
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
qp->r_sge = qp->s_rdma_read_sge;
- else {
- ret = hfi1_get_rwqe(qp, 0);
+ } else {
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0)
goto op_err;
if (!ret)
@@ -417,7 +435,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 0);
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 0, 0);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -442,8 +460,8 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 0);
- hfi1_put_ss(&qp->s_rdma_read_sge);
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 0, 0);
+ rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -468,9 +486,9 @@ last_imm:
wc.dlid_path_bits = 0;
wc.port_num = 0;
/* Signal completion event if the solicited bit is set. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
break;
case OP(RDMA_WRITE_FIRST):
@@ -491,8 +509,8 @@ rdma_first:
int ok;
/* Check rkey */
- ok = hfi1_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
- vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
+ ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
+ vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
if (unlikely(!ok))
goto drop;
qp->r_sge.num_sge = 1;
@@ -503,9 +521,9 @@ rdma_first:
qp->r_sge.sge.length = 0;
qp->r_sge.sge.sge_length = 0;
}
- if (opcode == OP(RDMA_WRITE_ONLY))
+ if (opcode == OP(RDMA_WRITE_ONLY)) {
goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
+ } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
wc.ex.imm_data = ohdr->u.rc.imm_data;
goto rdma_last_imm;
}
@@ -517,7 +535,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, 1);
+ hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -535,10 +553,10 @@ rdma_last_imm:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- if (test_and_clear_bit(HFI1_R_REWIND_SGE, &qp->r_aflags))
- hfi1_put_ss(&qp->s_rdma_read_sge);
- else {
- ret = hfi1_get_rwqe(qp, 1);
+ if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
+ rvt_put_ss(&qp->s_rdma_read_sge);
+ } else {
+ ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0)
goto op_err;
if (!ret)
@@ -546,8 +564,8 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
- hfi1_put_ss(&qp->r_sge);
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
+ rvt_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
@@ -562,8 +580,8 @@ rdma_last:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, tlen, 1);
- hfi1_put_ss(&qp->r_sge);
+ hfi1_copy_sge(&qp->r_sge, data, tlen, 1, 0);
+ rvt_put_ss(&qp->r_sge);
break;
default:
@@ -575,14 +593,12 @@ rdma_last:
return;
rewind:
- set_bit(HFI1_R_REWIND_SGE, &qp->r_aflags);
+ set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return;
op_err:
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
-
}
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index bd1b402c1e14..ae8a70f703eb 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -53,6 +50,7 @@
#include "hfi.h"
#include "mad.h"
+#include "verbs_txreq.h"
#include "qp.h"
/**
@@ -65,24 +63,25 @@
* Note that the receive interrupt handler may be calling hfi1_ud_rcv()
* while this is being called.
*/
-static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
+static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
{
struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
struct hfi1_pportdata *ppd;
- struct hfi1_qp *qp;
+ struct rvt_qp *qp;
struct ib_ah_attr *ah_attr;
unsigned long flags;
- struct hfi1_sge_state ssge;
- struct hfi1_sge *sge;
+ struct rvt_sge_state ssge;
+ struct rvt_sge *sge;
struct ib_wc wc;
u32 length;
enum ib_qp_type sqptype, dqptype;
rcu_read_lock();
- qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
+ swqe->ud_wr.remote_qpn);
if (!qp) {
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
rcu_read_unlock();
return;
}
@@ -93,12 +92,12 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
IB_QPT_UD : qp->ibqp.qp_type;
if (dqptype != sqptype ||
- !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
- ibp->n_pkt_drops++;
+ !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
+ ibp->rvp.n_pkt_drops++;
goto drop;
}
- ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
+ ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
ppd = ppd_from_ibp(ibp);
if (qp->ibqp.qp_num > 1) {
@@ -161,35 +160,36 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & HFI1_R_REUSE_SGE)
- qp->r_flags &= ~HFI1_R_REUSE_SGE;
- else {
+ if (qp->r_flags & RVT_R_REUSE_SGE) {
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
+ } else {
int ret;
- ret = hfi1_get_rwqe(qp, 0);
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0) {
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
goto bail_unlock;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= HFI1_R_REUSE_SGE;
- ibp->n_pkt_drops++;
+ qp->r_flags |= RVT_R_REUSE_SGE;
+ ibp->rvp.n_pkt_drops++;
goto bail_unlock;
}
if (ah_attr->ah_flags & IB_AH_GRH) {
hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
- sizeof(struct ib_grh), 1);
+ sizeof(struct ib_grh), 1, 0);
wc.wc_flags |= IB_WC_GRH;
- } else
+ } else {
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
+ }
ssge.sg_list = swqe->sg_list + 1;
ssge.sge = *swqe->sg_list;
ssge.num_sge = swqe->wr.num_sge;
@@ -202,7 +202,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1, 0);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -210,7 +210,7 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
if (--ssge.num_sge)
*sge = *ssge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -222,8 +222,8 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
}
length -= len;
}
- hfi1_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -242,14 +242,14 @@ static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
/* Check for loopback when the port lid is not set */
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
- wc.slid = HFI1_PERMISSIVE_LID;
+ wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
wc.sl = ah_attr->sl;
wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->n_loop_pkts++;
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ swqe->wr.send_flags & IB_SEND_SOLICITED);
+ ibp->rvp.n_loop_pkts++;
bail_unlock:
spin_unlock_irqrestore(&qp->r_lock, flags);
drop:
@@ -260,47 +260,53 @@ drop:
* hfi1_make_ud_req - construct a UD request packet
* @qp: the QP
*
+ * Assume s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
-int hfi1_make_ud_req(struct hfi1_qp *qp)
+int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
struct ib_ah_attr *ah_attr;
struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp;
- struct hfi1_swqe *wqe;
- unsigned long flags;
+ struct rvt_swqe *wqe;
u32 nwords;
u32 extra_bytes;
u32 bth0;
u16 lrh0;
u16 lid;
- int ret = 0;
int next_cur;
u8 sc5;
- spin_lock_irqsave(&qp->s_lock, flags);
+ ps->s_txreq = get_txreq(ps->dev, qp);
+ if (IS_ERR(ps->s_txreq))
+ goto bail_no_tx;
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&qp->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
- wqe = get_swqe_ptr(qp, qp->s_last);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
+ goto done_free_tx;
}
- if (qp->s_cur == qp->s_head)
+ /* see post_one_send() */
+ smp_read_barrier_depends();
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
- wqe = get_swqe_ptr(qp, qp->s_cur);
+ wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
next_cur = qp->s_cur + 1;
if (next_cur >= qp->s_size)
next_cur = 0;
@@ -308,13 +314,15 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
/* Construct the header. */
ibp = to_iport(qp->ibqp.device, qp->port_num);
ppd = ppd_from_ibp(ibp);
- ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
- if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE ||
- ah_attr->dlid == HFI1_PERMISSIVE_LID) {
+ ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
+ if (ah_attr->dlid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
+ ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
- if (unlikely(!loopback && (lid == ppd->lid ||
- (lid == HFI1_PERMISSIVE_LID &&
- qp->ibqp.qp_type == IB_QPT_GSI)))) {
+ if (unlikely(!loopback &&
+ (lid == ppd->lid ||
+ (lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
+ qp->ibqp.qp_type == IB_QPT_GSI)))) {
+ unsigned long flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -322,16 +330,17 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
* Instead of waiting, we could queue a
* zero length descriptor so we get a callback.
*/
- if (atomic_read(&qp->s_iowait.sdma_busy)) {
- qp->s_flags |= HFI1_S_WAIT_DMA;
+ if (iowait_sdma_pending(&priv->s_iowait)) {
+ qp->s_flags |= RVT_S_WAIT_DMA;
goto bail;
}
qp->s_cur = next_cur;
+ local_irq_save(flags);
spin_unlock_irqrestore(&qp->s_lock, flags);
ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, flags);
hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done;
+ goto done_free_tx;
}
}
@@ -353,11 +362,12 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
if (ah_attr->ah_flags & IB_AH_GRH) {
/* Header size in 32-bit words. */
- qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
- &ah_attr->grh,
- qp->s_hdrwords, nwords);
+ qp->s_hdrwords += hfi1_make_grh(ibp,
+ &ps->s_txreq->phdr.hdr.u.l.grh,
+ &ah_attr->grh,
+ qp->s_hdrwords, nwords);
lrh0 = HFI1_LRH_GRH;
- ohdr = &qp->s_hdr->ibh.u.l.oth;
+ ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
/*
* Don't worry about sending to locally attached multicast
* QPs. It is unspecified by the spec. what happens.
@@ -365,37 +375,42 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
} else {
/* Header size in 32-bit words. */
lrh0 = HFI1_LRH_BTH;
- ohdr = &qp->s_hdr->ibh.u.oth;
+ ohdr = &ps->s_txreq->phdr.hdr.u.oth;
}
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
qp->s_hdrwords++;
ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else
+ } else {
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
+ }
sc5 = ibp->sl_to_sc[ah_attr->sl];
lrh0 |= (ah_attr->sl & 0xf) << 4;
if (qp->ibqp.qp_type == IB_QPT_SMI) {
lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- qp->s_sc = 0xf;
+ priv->s_sc = 0xf;
} else {
lrh0 |= (sc5 & 0xf) << 12;
- qp->s_sc = sc5;
+ priv->s_sc = sc5;
}
- qp->s_sde = qp_to_sdma_engine(qp, qp->s_sc);
- qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
- qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
- qp->s_hdr->ibh.lrh[2] =
+ priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
+ ps->s_txreq->sde = priv->s_sde;
+ priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
+ ps->s_txreq->psc = priv->s_sendcontext;
+ ps->s_txreq->phdr.hdr.lrh[0] = cpu_to_be16(lrh0);
+ ps->s_txreq->phdr.hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);
+ ps->s_txreq->phdr.hdr.lrh[2] =
cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE))
- qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
- else {
+ if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
+ ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
+ } else {
lid = ppd->lid;
if (lid) {
lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
- qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid);
- } else
- qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
+ ps->s_txreq->phdr.hdr.lrh[3] = cpu_to_be16(lid);
+ } else {
+ ps->s_txreq->phdr.hdr.lrh[3] = IB_LID_PERMISSIVE;
+ }
}
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
bth0 |= IB_BTH_SOLICITED;
@@ -406,7 +421,7 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
+ ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
/*
* Qkeys with the high order bit set mean use the
* qkey from the QP context instead of the WR (see 10.2.5).
@@ -415,20 +430,28 @@ int hfi1_make_ud_req(struct hfi1_qp *qp)
qp->qkey : wqe->ud_wr.remote_qkey);
ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
/* disarm any ahg */
- qp->s_hdr->ahgcount = 0;
- qp->s_hdr->ahgidx = 0;
- qp->s_hdr->tx_flags = 0;
- qp->s_hdr->sde = NULL;
+ priv->s_hdr->ahgcount = 0;
+ priv->s_hdr->ahgidx = 0;
+ priv->s_hdr->tx_flags = 0;
+ priv->s_hdr->sde = NULL;
+ /* pbc */
+ ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
+
+ return 1;
-done:
- ret = 1;
- goto unlock;
+done_free_tx:
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ return 1;
bail:
- qp->s_flags &= ~HFI1_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
+ hfi1_put_txreq(ps->s_txreq);
+
+bail_no_tx:
+ ps->s_txreq = NULL;
+ qp->s_flags &= ~RVT_S_BUSY;
+ qp->s_hdrwords = 0;
+ return 0;
}
/*
@@ -476,7 +499,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
return -1;
}
-void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
+void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh)
{
@@ -550,7 +573,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
* opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
*/
static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
- struct hfi1_qp *qp, u16 slid, struct opa_smp *smp)
+ struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
{
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
@@ -607,7 +630,7 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_GET_RESP:
case IB_MGMT_METHOD_REPORT_RESP:
- if (ibp->port_cap_flags & IB_PORT_SM)
+ if (ibp->rvp.port_cap_flags & IB_PORT_SM)
return 0;
if (pkey == FULL_MGMT_P_KEY) {
smp->status |= IB_SMP_UNSUP_METHOD;
@@ -624,7 +647,6 @@ static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
return 0;
}
-
/**
* hfi1_ud_rcv - receive an incoming UD packet
* @ibp: the port the packet came in on
@@ -654,7 +676,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 rcv_flags = packet->rcv_flags;
void *data = packet->ebuf;
u32 tlen = packet->tlen;
- struct hfi1_qp *qp = packet->qp;
+ struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
bool sc4_bit = has_sc4_bit(packet);
u8 sc;
@@ -663,18 +685,18 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
struct ib_grh *grh = NULL;
qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
+ src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
dlid = be16_to_cpu(hdr->lrh[1]);
- is_mcast = (dlid > HFI1_MULTICAST_LID_BASE) &&
- (dlid != HFI1_PERMISSIVE_LID);
+ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
bth1 = be32_to_cpu(ohdr->bth[1]);
if (unlikely(bth1 & HFI1_BECN_SMASK)) {
/*
* In pre-B0 h/w the CNP_OPCODE is handled via an
- * error path (errata 291394).
+ * error path.
*/
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
+ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
u8 sl, sc5;
sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
@@ -750,7 +772,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
if (mgmt_pkey_idx < 0)
goto drop;
-
}
if (unlikely(qkey != qp->qkey)) {
hfi1_bad_pqkey(ibp, OPA_TRAP_BAD_Q_KEY, qkey,
@@ -788,7 +809,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
if (mgmt_pkey_idx < 0)
goto drop;
-
}
if (qp->ibqp.qp_num > 1 &&
@@ -799,8 +819,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
- } else
+ } else {
goto drop;
+ }
/*
* A GRH is expected to precede the data even if not
@@ -811,36 +832,38 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
/*
* Get the next work request entry to find where to put the data.
*/
- if (qp->r_flags & HFI1_R_REUSE_SGE)
- qp->r_flags &= ~HFI1_R_REUSE_SGE;
- else {
+ if (qp->r_flags & RVT_R_REUSE_SGE) {
+ qp->r_flags &= ~RVT_R_REUSE_SGE;
+ } else {
int ret;
- ret = hfi1_get_rwqe(qp, 0);
+ ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0) {
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return;
}
if (!ret) {
if (qp->ibqp.qp_num == 0)
- ibp->n_vl15_dropped++;
+ ibp->rvp.n_vl15_dropped++;
return;
}
}
/* Silently drop packets which are too big. */
if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= HFI1_R_REUSE_SGE;
+ qp->r_flags |= RVT_R_REUSE_SGE;
goto drop;
}
if (has_grh) {
hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1);
+ sizeof(struct ib_grh), 1, 0);
wc.wc_flags |= IB_WC_GRH;
- } else
+ } else {
hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
- hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
- hfi1_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
+ }
+ hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ 1, 0);
+ rvt_put_ss(&qp->r_sge);
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
@@ -862,8 +885,9 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
}
wc.pkey_index = (unsigned)mgmt_pkey_idx;
- } else
+ } else {
wc.pkey_index = 0;
+ }
wc.slid = be16_to_cpu(hdr->lrh[3]);
sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
@@ -873,15 +897,15 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
*/
- wc.dlid_path_bits = dlid >= HFI1_MULTICAST_LID_BASE ? 0 :
+ wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
wc.port_num = qp->port_num;
/* Signal completion event if the solicited bit is set. */
- hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
- (ohdr->bth[0] &
- cpu_to_be32(IB_BTH_SOLICITED)) != 0);
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ (ohdr->bth[0] &
+ cpu_to_be32(IB_BTH_SOLICITED)) != 0);
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
new file mode 100644
index 000000000000..0861e095df8d
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <asm/page.h>
+
+#include "user_exp_rcv.h"
+#include "trace.h"
+#include "mmu_rb.h"
+
+struct tid_group {
+ struct list_head list;
+ unsigned base;
+ u8 size;
+ u8 used;
+ u8 map;
+};
+
+struct tid_rb_node {
+ struct mmu_rb_node mmu;
+ unsigned long phys;
+ struct tid_group *grp;
+ u32 rcventry;
+ dma_addr_t dma_addr;
+ bool freed;
+ unsigned npages;
+ struct page *pages[0];
+};
+
+struct tid_pageset {
+ u16 idx;
+ u16 count;
+};
+
+#define EXP_TID_SET_EMPTY(set) (set.count == 0 && list_empty(&set.list))
+
+#define num_user_pages(vaddr, len) \
+ (1 + (((((unsigned long)(vaddr) + \
+ (unsigned long)(len) - 1) & PAGE_MASK) - \
+ ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
+
+static void unlock_exp_tids(struct hfi1_ctxtdata *, struct exp_tid_set *,
+ struct rb_root *);
+static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
+static int set_rcvarray_entry(struct file *, unsigned long, u32,
+ struct tid_group *, struct page **, unsigned);
+static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
+ struct tid_pageset *, unsigned, u16, struct page **,
+ u32 *, unsigned *, unsigned *);
+static int unprogram_rcvarray(struct file *, u32, struct tid_group **);
+static void clear_tid_node(struct hfi1_filedata *, u16, struct tid_rb_node *);
+
+static struct mmu_rb_ops tid_rb_ops = {
+ .insert = mmu_rb_insert,
+ .remove = mmu_rb_remove,
+ .invalidate = mmu_rb_invalidate
+};
+
+static inline u32 rcventry2tidinfo(u32 rcventry)
+{
+ u32 pair = rcventry & ~0x1;
+
+ return EXP_TID_SET(IDX, pair >> 1) |
+ EXP_TID_SET(CTRL, 1 << (rcventry - pair));
+}
+
+static inline void exp_tid_group_init(struct exp_tid_set *set)
+{
+ INIT_LIST_HEAD(&set->list);
+ set->count = 0;
+}
+
+static inline void tid_group_remove(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_del_init(&grp->list);
+ set->count--;
+}
+
+static inline void tid_group_add_tail(struct tid_group *grp,
+ struct exp_tid_set *set)
+{
+ list_add_tail(&grp->list, &set->list);
+ set->count++;
+}
+
+static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
+{
+ struct tid_group *grp =
+ list_first_entry(&set->list, struct tid_group, list);
+ list_del_init(&grp->list);
+ set->count--;
+ return grp;
+}
+
+static inline void tid_group_move(struct tid_group *group,
+ struct exp_tid_set *s1,
+ struct exp_tid_set *s2)
+{
+ tid_group_remove(group, s1);
+ tid_group_add_tail(group, s2);
+}
+
+/*
+ * Initialize context and file private data needed for Expected
+ * receive caching. This needs to be done after the context has
+ * been configured with the eager/expected RcvEntry counts.
+ */
+int hfi1_user_exp_rcv_init(struct file *fp)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned tidbase;
+ int i, ret = 0;
+
+ spin_lock_init(&fd->tid_lock);
+ spin_lock_init(&fd->invalid_lock);
+ fd->tid_rb_root = RB_ROOT;
+
+ if (!uctxt->subctxt_cnt || !fd->subctxt) {
+ exp_tid_group_init(&uctxt->tid_group_list);
+ exp_tid_group_init(&uctxt->tid_used_list);
+ exp_tid_group_init(&uctxt->tid_full_list);
+
+ tidbase = uctxt->expected_base;
+ for (i = 0; i < uctxt->expected_count /
+ dd->rcv_entries.group_size; i++) {
+ struct tid_group *grp;
+
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp) {
+ /*
+ * If we fail here, the groups already
+ * allocated will be freed by the close
+ * call.
+ */
+ ret = -ENOMEM;
+ goto done;
+ }
+ grp->size = dd->rcv_entries.group_size;
+ grp->base = tidbase;
+ tid_group_add_tail(grp, &uctxt->tid_group_list);
+ tidbase += dd->rcv_entries.group_size;
+ }
+ }
+
+ fd->entry_to_rb = kcalloc(uctxt->expected_count,
+ sizeof(struct rb_node *),
+ GFP_KERNEL);
+ if (!fd->entry_to_rb)
+ return -ENOMEM;
+
+ if (!HFI1_CAP_IS_USET(TID_UNMAP)) {
+ fd->invalid_tid_idx = 0;
+ fd->invalid_tids = kzalloc(uctxt->expected_count *
+ sizeof(u32), GFP_KERNEL);
+ if (!fd->invalid_tids) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Register MMU notifier callbacks. If the registration
+ * fails, continue but turn off the TID caching for
+ * all user contexts.
+ */
+ ret = hfi1_mmu_rb_register(&fd->tid_rb_root, &tid_rb_ops);
+ if (ret) {
+ dd_dev_info(dd,
+ "Failed MMU notifier registration %d\n",
+ ret);
+ HFI1_CAP_USET(TID_UNMAP);
+ ret = 0;
+ }
+ }
+
+ /*
+ * PSM does not have a good way to separate, count, and
+ * effectively enforce a limit on RcvArray entries used by
+ * subctxts (when context sharing is used) when TID caching
+ * is enabled. To help with that, we calculate a per-process
+ * RcvArray entry share and enforce that.
+ * If TID caching is not in use, PSM deals with usage on its
+ * own. In that case, we allow any subctxt to take all of the
+ * entries.
+ *
+ * Make sure that we set the tid counts only after successful
+ * init.
+ */
+ spin_lock(&fd->tid_lock);
+ if (uctxt->subctxt_cnt && !HFI1_CAP_IS_USET(TID_UNMAP)) {
+ u16 remainder;
+
+ fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
+ remainder = uctxt->expected_count % uctxt->subctxt_cnt;
+ if (remainder && fd->subctxt < remainder)
+ fd->tid_limit++;
+ } else {
+ fd->tid_limit = uctxt->expected_count;
+ }
+ spin_unlock(&fd->tid_lock);
+done:
+ return ret;
+}
+
+int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct tid_group *grp, *gptr;
+
+ /*
+ * The notifier would have been removed when the process'es mm
+ * was freed.
+ */
+ if (!HFI1_CAP_IS_USET(TID_UNMAP))
+ hfi1_mmu_rb_unregister(&fd->tid_rb_root);
+
+ kfree(fd->invalid_tids);
+
+ if (!uctxt->cnt) {
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_full_list,
+ &fd->tid_rb_root);
+ if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
+ unlock_exp_tids(uctxt, &uctxt->tid_used_list,
+ &fd->tid_rb_root);
+ list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+ list) {
+ list_del_init(&grp->list);
+ kfree(grp);
+ }
+ hfi1_clear_tids(uctxt);
+ }
+
+ kfree(fd->entry_to_rb);
+ return 0;
+}
+
+/*
+ * Write an "empty" RcvArray entry.
+ * This function exists so the TID registaration code can use it
+ * to write to unused/unneeded entries and still take advantage
+ * of the WC performance improvements. The HFI will ignore this
+ * write to the RcvArray entry.
+ */
+static inline void rcv_array_wc_fill(struct hfi1_devdata *dd, u32 index)
+{
+ /*
+ * Doing the WC fill writes only makes sense if the device is
+ * present and the RcvArray has been mapped as WC memory.
+ */
+ if ((dd->flags & HFI1_PRESENT) && dd->rcvarray_wc)
+ writeq(0, dd->rcvarray_wc + (index * 8));
+}
+
+/*
+ * RcvArray entry allocation for Expected Receives is done by the
+ * following algorithm:
+ *
+ * The context keeps 3 lists of groups of RcvArray entries:
+ * 1. List of empty groups - tid_group_list
+ * This list is created during user context creation and
+ * contains elements which describe sets (of 8) of empty
+ * RcvArray entries.
+ * 2. List of partially used groups - tid_used_list
+ * This list contains sets of RcvArray entries which are
+ * not completely used up. Another mapping request could
+ * use some of all of the remaining entries.
+ * 3. List of full groups - tid_full_list
+ * This is the list where sets that are completely used
+ * up go.
+ *
+ * An attempt to optimize the usage of RcvArray entries is
+ * made by finding all sets of physically contiguous pages in a
+ * user's buffer.
+ * These physically contiguous sets are further split into
+ * sizes supported by the receive engine of the HFI. The
+ * resulting sets of pages are stored in struct tid_pageset,
+ * which describes the sets as:
+ * * .count - number of pages in this set
+ * * .idx - starting index into struct page ** array
+ * of this set
+ *
+ * From this point on, the algorithm deals with the page sets
+ * described above. The number of pagesets is divided by the
+ * RcvArray group size to produce the number of full groups
+ * needed.
+ *
+ * Groups from the 3 lists are manipulated using the following
+ * rules:
+ * 1. For each set of 8 pagesets, a complete group from
+ * tid_group_list is taken, programmed, and moved to
+ * the tid_full_list list.
+ * 2. For all remaining pagesets:
+ * 2.1 If the tid_used_list is empty and the tid_group_list
+ * is empty, stop processing pageset and return only
+ * what has been programmed up to this point.
+ * 2.2 If the tid_used_list is empty and the tid_group_list
+ * is not empty, move a group from tid_group_list to
+ * tid_used_list.
+ * 2.3 For each group is tid_used_group, program as much as
+ * can fit into the group. If the group becomes fully
+ * used, move it to tid_full_list.
+ */
+int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
+{
+ int ret = 0, need_group = 0, pinned;
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ unsigned npages, ngroups, pageidx = 0, pageset_count, npagesets,
+ tididx = 0, mapped, mapped_pages = 0;
+ unsigned long vaddr = tinfo->vaddr;
+ struct page **pages = NULL;
+ u32 *tidlist = NULL;
+ struct tid_pageset *pagesets = NULL;
+
+ /* Get the number of pages the user buffer spans */
+ npages = num_user_pages(vaddr, tinfo->length);
+ if (!npages)
+ return -EINVAL;
+
+ if (npages > uctxt->expected_count) {
+ dd_dev_err(dd, "Expected buffer too big\n");
+ return -EINVAL;
+ }
+
+ /* Verify that access is OK for the user buffer */
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
+ npages * PAGE_SIZE)) {
+ dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
+ (void *)vaddr, npages);
+ return -EFAULT;
+ }
+
+ pagesets = kcalloc(uctxt->expected_count, sizeof(*pagesets),
+ GFP_KERNEL);
+ if (!pagesets)
+ return -ENOMEM;
+
+ /* Allocate the array of struct page pointers needed for pinning */
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
+ /*
+ * Pin all the pages of the user buffer. If we can't pin all the
+ * pages, accept the amount pinned so far and program only that.
+ * User space knows how to deal with partially programmed buffers.
+ */
+ if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages))
+ return -ENOMEM;
+ pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
+ if (pinned <= 0) {
+ ret = pinned;
+ goto bail;
+ }
+ fd->tid_n_pinned += npages;
+
+ /* Find sets of physically contiguous pages */
+ npagesets = find_phys_blocks(pages, pinned, pagesets);
+
+ /*
+ * We don't need to access this under a lock since tid_used is per
+ * process and the same process cannot be in hfi1_user_exp_rcv_clear()
+ * and hfi1_user_exp_rcv_setup() at the same time.
+ */
+ spin_lock(&fd->tid_lock);
+ if (fd->tid_used + npagesets > fd->tid_limit)
+ pageset_count = fd->tid_limit - fd->tid_used;
+ else
+ pageset_count = npagesets;
+ spin_unlock(&fd->tid_lock);
+
+ if (!pageset_count)
+ goto bail;
+
+ ngroups = pageset_count / dd->rcv_entries.group_size;
+ tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
+ if (!tidlist) {
+ ret = -ENOMEM;
+ goto nomem;
+ }
+
+ tididx = 0;
+
+ /*
+ * From this point on, we are going to be using shared (between master
+ * and subcontexts) context resources. We need to take the lock.
+ */
+ mutex_lock(&uctxt->exp_lock);
+ /*
+ * The first step is to program the RcvArray entries which are complete
+ * groups.
+ */
+ while (ngroups && uctxt->tid_group_list.count) {
+ struct tid_group *grp =
+ tid_group_pop(&uctxt->tid_group_list);
+
+ ret = program_rcvarray(fp, vaddr, grp, pagesets,
+ pageidx, dd->rcv_entries.group_size,
+ pages, tidlist, &tididx, &mapped);
+ /*
+ * If there was a failure to program the RcvArray
+ * entries for the entire group, reset the grp fields
+ * and add the grp back to the free group list.
+ */
+ if (ret <= 0) {
+ tid_group_add_tail(grp, &uctxt->tid_group_list);
+ hfi1_cdbg(TID,
+ "Failed to program RcvArray group %d", ret);
+ goto unlock;
+ }
+
+ tid_group_add_tail(grp, &uctxt->tid_full_list);
+ ngroups--;
+ pageidx += ret;
+ mapped_pages += mapped;
+ }
+
+ while (pageidx < pageset_count) {
+ struct tid_group *grp, *ptr;
+ /*
+ * If we don't have any partially used tid groups, check
+ * if we have empty groups. If so, take one from there and
+ * put in the partially used list.
+ */
+ if (!uctxt->tid_used_list.count || need_group) {
+ if (!uctxt->tid_group_list.count)
+ goto unlock;
+
+ grp = tid_group_pop(&uctxt->tid_group_list);
+ tid_group_add_tail(grp, &uctxt->tid_used_list);
+ need_group = 0;
+ }
+ /*
+ * There is an optimization opportunity here - instead of
+ * fitting as many page sets as we can, check for a group
+ * later on in the list that could fit all of them.
+ */
+ list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
+ list) {
+ unsigned use = min_t(unsigned, pageset_count - pageidx,
+ grp->size - grp->used);
+
+ ret = program_rcvarray(fp, vaddr, grp, pagesets,
+ pageidx, use, pages, tidlist,
+ &tididx, &mapped);
+ if (ret < 0) {
+ hfi1_cdbg(TID,
+ "Failed to program RcvArray entries %d",
+ ret);
+ ret = -EFAULT;
+ goto unlock;
+ } else if (ret > 0) {
+ if (grp->used == grp->size)
+ tid_group_move(grp,
+ &uctxt->tid_used_list,
+ &uctxt->tid_full_list);
+ pageidx += ret;
+ mapped_pages += mapped;
+ need_group = 0;
+ /* Check if we are done so we break out early */
+ if (pageidx >= pageset_count)
+ break;
+ } else if (WARN_ON(ret == 0)) {
+ /*
+ * If ret is 0, we did not program any entries
+ * into this group, which can only happen if
+ * we've screwed up the accounting somewhere.
+ * Warn and try to continue.
+ */
+ need_group = 1;
+ }
+ }
+ }
+unlock:
+ mutex_unlock(&uctxt->exp_lock);
+nomem:
+ hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
+ mapped_pages, ret);
+ if (tididx) {
+ spin_lock(&fd->tid_lock);
+ fd->tid_used += tididx;
+ spin_unlock(&fd->tid_lock);
+ tinfo->tidcnt = tididx;
+ tinfo->length = mapped_pages * PAGE_SIZE;
+
+ if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
+ tidlist, sizeof(tidlist[0]) * tididx)) {
+ /*
+ * On failure to copy to the user level, we need to undo
+ * everything done so far so we don't leak resources.
+ */
+ tinfo->tidlist = (unsigned long)&tidlist;
+ hfi1_user_exp_rcv_clear(fp, tinfo);
+ tinfo->tidlist = 0;
+ ret = -EFAULT;
+ goto bail;
+ }
+ }
+
+ /*
+ * If not everything was mapped (due to insufficient RcvArray entries,
+ * for example), unpin all unmapped pages so we can pin them nex time.
+ */
+ if (mapped_pages != pinned) {
+ hfi1_release_user_pages(current->mm, &pages[mapped_pages],
+ pinned - mapped_pages,
+ false);
+ fd->tid_n_pinned -= pinned - mapped_pages;
+ }
+bail:
+ kfree(pagesets);
+ kfree(pages);
+ kfree(tidlist);
+ return ret > 0 ? 0 : ret;
+}
+
+int hfi1_user_exp_rcv_clear(struct file *fp, struct hfi1_tid_info *tinfo)
+{
+ int ret = 0;
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ u32 *tidinfo;
+ unsigned tididx;
+
+ tidinfo = kcalloc(tinfo->tidcnt, sizeof(*tidinfo), GFP_KERNEL);
+ if (!tidinfo)
+ return -ENOMEM;
+
+ if (copy_from_user(tidinfo, (void __user *)(unsigned long)
+ tinfo->tidlist, sizeof(tidinfo[0]) *
+ tinfo->tidcnt)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ mutex_lock(&uctxt->exp_lock);
+ for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
+ ret = unprogram_rcvarray(fp, tidinfo[tididx], NULL);
+ if (ret) {
+ hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
+ ret);
+ break;
+ }
+ }
+ spin_lock(&fd->tid_lock);
+ fd->tid_used -= tididx;
+ spin_unlock(&fd->tid_lock);
+ tinfo->tidcnt = tididx;
+ mutex_unlock(&uctxt->exp_lock);
+done:
+ kfree(tidinfo);
+ return ret;
+}
+
+int hfi1_user_exp_rcv_invalid(struct file *fp, struct hfi1_tid_info *tinfo)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ unsigned long *ev = uctxt->dd->events +
+ (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
+ u32 *array;
+ int ret = 0;
+
+ if (!fd->invalid_tids)
+ return -EINVAL;
+
+ /*
+ * copy_to_user() can sleep, which will leave the invalid_lock
+ * locked and cause the MMU notifier to be blocked on the lock
+ * for a long time.
+ * Copy the data to a local buffer so we can release the lock.
+ */
+ array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
+ if (!array)
+ return -EFAULT;
+
+ spin_lock(&fd->invalid_lock);
+ if (fd->invalid_tid_idx) {
+ memcpy(array, fd->invalid_tids, sizeof(*array) *
+ fd->invalid_tid_idx);
+ memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
+ fd->invalid_tid_idx);
+ tinfo->tidcnt = fd->invalid_tid_idx;
+ fd->invalid_tid_idx = 0;
+ /*
+ * Reset the user flag while still holding the lock.
+ * Otherwise, PSM can miss events.
+ */
+ clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
+ } else {
+ tinfo->tidcnt = 0;
+ }
+ spin_unlock(&fd->invalid_lock);
+
+ if (tinfo->tidcnt) {
+ if (copy_to_user((void __user *)tinfo->tidlist,
+ array, sizeof(*array) * tinfo->tidcnt))
+ ret = -EFAULT;
+ }
+ kfree(array);
+
+ return ret;
+}
+
+static u32 find_phys_blocks(struct page **pages, unsigned npages,
+ struct tid_pageset *list)
+{
+ unsigned pagecount, pageidx, setcount = 0, i;
+ unsigned long pfn, this_pfn;
+
+ if (!npages)
+ return 0;
+
+ /*
+ * Look for sets of physically contiguous pages in the user buffer.
+ * This will allow us to optimize Expected RcvArray entry usage by
+ * using the bigger supported sizes.
+ */
+ pfn = page_to_pfn(pages[0]);
+ for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
+ this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
+
+ /*
+ * If the pfn's are not sequential, pages are not physically
+ * contiguous.
+ */
+ if (this_pfn != ++pfn) {
+ /*
+ * At this point we have to loop over the set of
+ * physically contiguous pages and break them down it
+ * sizes supported by the HW.
+ * There are two main constraints:
+ * 1. The max buffer size is MAX_EXPECTED_BUFFER.
+ * If the total set size is bigger than that
+ * program only a MAX_EXPECTED_BUFFER chunk.
+ * 2. The buffer size has to be a power of two. If
+ * it is not, round down to the closes power of
+ * 2 and program that size.
+ */
+ while (pagecount) {
+ int maxpages = pagecount;
+ u32 bufsize = pagecount * PAGE_SIZE;
+
+ if (bufsize > MAX_EXPECTED_BUFFER)
+ maxpages =
+ MAX_EXPECTED_BUFFER >>
+ PAGE_SHIFT;
+ else if (!is_power_of_2(bufsize))
+ maxpages =
+ rounddown_pow_of_two(bufsize) >>
+ PAGE_SHIFT;
+
+ list[setcount].idx = pageidx;
+ list[setcount].count = maxpages;
+ pagecount -= maxpages;
+ pageidx += maxpages;
+ setcount++;
+ }
+ pageidx = i;
+ pagecount = 1;
+ pfn = this_pfn;
+ } else {
+ pagecount++;
+ }
+ }
+ return setcount;
+}
+
+/**
+ * program_rcvarray() - program an RcvArray group with receive buffers
+ * @fp: file pointer
+ * @vaddr: starting user virtual address
+ * @grp: RcvArray group
+ * @sets: array of struct tid_pageset holding information on physically
+ * contiguous chunks from the user buffer
+ * @start: starting index into sets array
+ * @count: number of struct tid_pageset's to program
+ * @pages: an array of struct page * for the user buffer
+ * @tidlist: the array of u32 elements when the information about the
+ * programmed RcvArray entries is to be encoded.
+ * @tididx: starting offset into tidlist
+ * @pmapped: (output parameter) number of pages programmed into the RcvArray
+ * entries.
+ *
+ * This function will program up to 'count' number of RcvArray entries from the
+ * group 'grp'. To make best use of write-combining writes, the function will
+ * perform writes to the unused RcvArray entries which will be ignored by the
+ * HW. Each RcvArray entry will be programmed with a physically contiguous
+ * buffer chunk from the user's virtual buffer.
+ *
+ * Return:
+ * -EINVAL if the requested count is larger than the size of the group,
+ * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
+ * number of RcvArray entries programmed.
+ */
+static int program_rcvarray(struct file *fp, unsigned long vaddr,
+ struct tid_group *grp,
+ struct tid_pageset *sets,
+ unsigned start, u16 count, struct page **pages,
+ u32 *tidlist, unsigned *tididx, unsigned *pmapped)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ u16 idx;
+ u32 tidinfo = 0, rcventry, useidx = 0;
+ int mapped = 0;
+
+ /* Count should never be larger than the group size */
+ if (count > grp->size)
+ return -EINVAL;
+
+ /* Find the first unused entry in the group */
+ for (idx = 0; idx < grp->size; idx++) {
+ if (!(grp->map & (1 << idx))) {
+ useidx = idx;
+ break;
+ }
+ rcv_array_wc_fill(dd, grp->base + idx);
+ }
+
+ idx = 0;
+ while (idx < count) {
+ u16 npages, pageidx, setidx = start + idx;
+ int ret = 0;
+
+ /*
+ * If this entry in the group is used, move to the next one.
+ * If we go past the end of the group, exit the loop.
+ */
+ if (useidx >= grp->size) {
+ break;
+ } else if (grp->map & (1 << useidx)) {
+ rcv_array_wc_fill(dd, grp->base + useidx);
+ useidx++;
+ continue;
+ }
+
+ rcventry = grp->base + useidx;
+ npages = sets[setidx].count;
+ pageidx = sets[setidx].idx;
+
+ ret = set_rcvarray_entry(fp, vaddr + (pageidx * PAGE_SIZE),
+ rcventry, grp, pages + pageidx,
+ npages);
+ if (ret)
+ return ret;
+ mapped += npages;
+
+ tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
+ EXP_TID_SET(LEN, npages);
+ tidlist[(*tididx)++] = tidinfo;
+ grp->used++;
+ grp->map |= 1 << useidx++;
+ idx++;
+ }
+
+ /* Fill the rest of the group with "blank" writes */
+ for (; useidx < grp->size; useidx++)
+ rcv_array_wc_fill(dd, grp->base + useidx);
+ *pmapped = mapped;
+ return idx;
+}
+
+static int set_rcvarray_entry(struct file *fp, unsigned long vaddr,
+ u32 rcventry, struct tid_group *grp,
+ struct page **pages, unsigned npages)
+{
+ int ret;
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct tid_rb_node *node;
+ struct hfi1_devdata *dd = uctxt->dd;
+ struct rb_root *root = &fd->tid_rb_root;
+ dma_addr_t phys;
+
+ /*
+ * Allocate the node first so we can handle a potential
+ * failure before we've programmed anything.
+ */
+ node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
+ GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ phys = pci_map_single(dd->pcidev,
+ __va(page_to_phys(pages[0])),
+ npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev, phys)) {
+ dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
+ phys);
+ kfree(node);
+ return -EFAULT;
+ }
+
+ node->mmu.addr = vaddr;
+ node->mmu.len = npages * PAGE_SIZE;
+ node->phys = page_to_phys(pages[0]);
+ node->npages = npages;
+ node->rcventry = rcventry;
+ node->dma_addr = phys;
+ node->grp = grp;
+ node->freed = false;
+ memcpy(node->pages, pages, sizeof(struct page *) * npages);
+
+ if (HFI1_CAP_IS_USET(TID_UNMAP))
+ ret = mmu_rb_insert(root, &node->mmu);
+ else
+ ret = hfi1_mmu_rb_insert(root, &node->mmu);
+
+ if (ret) {
+ hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
+ node->rcventry, node->mmu.addr, node->phys, ret);
+ pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ kfree(node);
+ return -EFAULT;
+ }
+ hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
+ trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
+ node->mmu.addr, node->phys, phys);
+ return 0;
+}
+
+static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
+ struct tid_group **grp)
+{
+ struct hfi1_filedata *fd = fp->private_data;
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+ struct tid_rb_node *node;
+ u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
+ u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
+
+ if (tididx >= uctxt->expected_count) {
+ dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
+ tididx, uctxt->ctxt);
+ return -EINVAL;
+ }
+
+ if (tidctrl == 0x3)
+ return -EINVAL;
+
+ rcventry = tididx + (tidctrl - 1);
+
+ node = fd->entry_to_rb[rcventry];
+ if (!node || node->rcventry != (uctxt->expected_base + rcventry))
+ return -EBADF;
+ if (HFI1_CAP_IS_USET(TID_UNMAP))
+ mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+ else
+ hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
+
+ if (grp)
+ *grp = node->grp;
+ clear_tid_node(fd, fd->subctxt, node);
+ return 0;
+}
+
+static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
+ struct tid_rb_node *node)
+{
+ struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ struct hfi1_devdata *dd = uctxt->dd;
+
+ trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
+ node->npages, node->mmu.addr, node->phys,
+ node->dma_addr);
+
+ hfi1_put_tid(dd, node->rcventry, PT_INVALID, 0, 0);
+ /*
+ * Make sure device has seen the write before we unpin the
+ * pages.
+ */
+ flush_wc();
+
+ pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
+ PCI_DMA_FROMDEVICE);
+ hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
+ fd->tid_n_pinned -= node->npages;
+
+ node->grp->used--;
+ node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
+
+ if (node->grp->used == node->grp->size - 1)
+ tid_group_move(node->grp, &uctxt->tid_full_list,
+ &uctxt->tid_used_list);
+ else if (!node->grp->used)
+ tid_group_move(node->grp, &uctxt->tid_used_list,
+ &uctxt->tid_group_list);
+ kfree(node);
+}
+
+static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
+ struct exp_tid_set *set, struct rb_root *root)
+{
+ struct tid_group *grp, *ptr;
+ struct hfi1_filedata *fd = container_of(root, struct hfi1_filedata,
+ tid_rb_root);
+ int i;
+
+ list_for_each_entry_safe(grp, ptr, &set->list, list) {
+ list_del_init(&grp->list);
+
+ for (i = 0; i < grp->size; i++) {
+ if (grp->map & (1 << i)) {
+ u16 rcventry = grp->base + i;
+ struct tid_rb_node *node;
+
+ node = fd->entry_to_rb[rcventry -
+ uctxt->expected_base];
+ if (!node || node->rcventry != rcventry)
+ continue;
+ if (HFI1_CAP_IS_USET(TID_UNMAP))
+ mmu_rb_remove(&fd->tid_rb_root,
+ &node->mmu, false);
+ else
+ hfi1_mmu_rb_remove(&fd->tid_rb_root,
+ &node->mmu);
+ clear_tid_node(fd, -1, node);
+ }
+ }
+ }
+}
+
+static int mmu_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+{
+ struct hfi1_filedata *fdata =
+ container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct hfi1_ctxtdata *uctxt = fdata->uctxt;
+ struct tid_rb_node *node =
+ container_of(mnode, struct tid_rb_node, mmu);
+
+ if (node->freed)
+ return 0;
+
+ trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
+ node->rcventry, node->npages, node->dma_addr);
+ node->freed = true;
+
+ spin_lock(&fdata->invalid_lock);
+ if (fdata->invalid_tid_idx < uctxt->expected_count) {
+ fdata->invalid_tids[fdata->invalid_tid_idx] =
+ rcventry2tidinfo(node->rcventry - uctxt->expected_base);
+ fdata->invalid_tids[fdata->invalid_tid_idx] |=
+ EXP_TID_SET(LEN, node->npages);
+ if (!fdata->invalid_tid_idx) {
+ unsigned long *ev;
+
+ /*
+ * hfi1_set_uevent_bits() sets a user event flag
+ * for all processes. Because calling into the
+ * driver to process TID cache invalidations is
+ * expensive and TID cache invalidations are
+ * handled on a per-process basis, we can
+ * optimize this to set the flag only for the
+ * process in question.
+ */
+ ev = uctxt->dd->events +
+ (((uctxt->ctxt - uctxt->dd->first_user_ctxt) *
+ HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
+ set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
+ }
+ fdata->invalid_tid_idx++;
+ }
+ spin_unlock(&fdata->invalid_lock);
+ return 0;
+}
+
+static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
+{
+ struct hfi1_filedata *fdata =
+ container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct tid_rb_node *tnode =
+ container_of(node, struct tid_rb_node, mmu);
+ u32 base = fdata->uctxt->expected_base;
+
+ fdata->entry_to_rb[tnode->rcventry - base] = tnode;
+ return 0;
+}
+
+static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
+ bool notifier)
+{
+ struct hfi1_filedata *fdata =
+ container_of(root, struct hfi1_filedata, tid_rb_root);
+ struct tid_rb_node *tnode =
+ container_of(node, struct tid_rb_node, mmu);
+ u32 base = fdata->uctxt->expected_base;
+
+ fdata->entry_to_rb[tnode->rcventry - base] = NULL;
+}
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/staging/rdma/hfi1/user_exp_rcv.h
index 4f4876e1d353..9bc8d9fba87e 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.h
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.h
@@ -1,14 +1,13 @@
#ifndef _HFI1_USER_EXP_RCV_H
#define _HFI1_USER_EXP_RCV_H
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -20,8 +19,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -50,6 +47,8 @@
*
*/
+#include "hfi.h"
+
#define EXP_TID_TIDLEN_MASK 0x7FFULL
#define EXP_TID_TIDLEN_SHIFT 0
#define EXP_TID_TIDCTRL_MASK 0x3ULL
@@ -71,4 +70,10 @@
(tid) |= EXP_TID_SET(field, (value)); \
} while (0)
+int hfi1_user_exp_rcv_init(struct file *);
+int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
+int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
+int hfi1_user_exp_rcv_clear(struct file *, struct hfi1_tid_info *);
+int hfi1_user_exp_rcv_invalid(struct file *, struct hfi1_tid_info *);
+
#endif /* _HFI1_USER_EXP_RCV_H */
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c
index 692de658f0dc..88e10b5f55f1 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/staging/rdma/hfi1/user_pages.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,36 +48,62 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/device.h>
+#include <linux/module.h>
#include "hfi.h"
-/**
- * hfi1_map_page - a safety wrapper around pci_map_page()
+static unsigned long cache_size = 256;
+module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
+
+/*
+ * Determine whether the caller can pin pages.
+ *
+ * This function should be used in the implementation of buffer caches.
+ * The cache implementation should call this function prior to attempting
+ * to pin buffer pages in order to determine whether they should do so.
+ * The function computes cache limits based on the configured ulimit and
+ * cache size. Use of this function is especially important for caches
+ * which are not limited in any other way (e.g. by HW resources) and, thus,
+ * could keeping caching buffers.
*
*/
-dma_addr_t hfi1_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+bool hfi1_can_pin_pages(struct hfi1_devdata *dd, u32 nlocked, u32 npages)
{
- dma_addr_t phys;
+ unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
+ size = (cache_size * (1UL << 20)); /* convert to bytes */
+ unsigned usr_ctxts = dd->num_rcv_contexts - dd->first_user_ctxt;
+ bool can_lock = capable(CAP_IPC_LOCK);
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ /*
+ * Calculate per-cache size. The calculation below uses only a quarter
+ * of the available per-context limit. This leaves space for other
+ * pinning. Should we worry about shared ctxts?
+ */
+ cache_limit = (ulimit / usr_ctxts) / 4;
- return phys;
-}
+ /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
+ if (ulimit != (-1UL) && size > cache_limit)
+ size = cache_limit;
-int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
- struct page **pages)
-{
- unsigned long pinned, lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- bool can_lock = capable(CAP_IPC_LOCK);
- int ret;
+ /* Convert to number of pages */
+ size = DIV_ROUND_UP(size, PAGE_SIZE);
down_read(&current->mm->mmap_sem);
pinned = current->mm->pinned_vm;
up_read(&current->mm->mmap_sem);
- if (pinned + npages > lock_limit && !can_lock)
- return -ENOMEM;
+ /* First, check the absolute limit against all pinned pages. */
+ if (pinned + npages >= ulimit && !can_lock)
+ return false;
+
+ return ((nlocked + npages) <= size) || can_lock;
+}
+
+int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
+ struct page **pages)
+{
+ int ret;
ret = get_user_pages_fast(vaddr, npages, writable, pages);
if (ret < 0)
@@ -93,7 +116,8 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
return ret;
}
-void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty)
{
size_t i;
@@ -103,9 +127,9 @@ void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
put_page(p[i]);
}
- if (current->mm) { /* during close after signal, mm can be NULL */
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm -= npages;
- up_write(&current->mm->mmap_sem);
+ if (mm) { /* during close after signal, mm can be NULL */
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm -= npages;
+ up_write(&mm->mmap_sem);
}
}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index d3de771a0770..ab6b6a42000f 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -67,10 +64,10 @@
#include "hfi.h"
#include "sdma.h"
#include "user_sdma.h"
-#include "sdma.h"
#include "verbs.h" /* for the headers */
#include "common.h" /* for struct hfi1_tid_info */
#include "trace.h"
+#include "mmu_rb.h"
static uint hfi1_sdma_comp_ring_size = 128;
module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
@@ -147,7 +144,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
/* Last packet in the request */
#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
-#define TXREQ_FLAGS_IOVEC_LAST_PKT BIT(0)
#define SDMA_REQ_IN_USE 0
#define SDMA_REQ_FOR_THREAD 1
@@ -171,16 +167,28 @@ static unsigned initial_pkt_count = 8;
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
struct user_sdma_iovec {
+ struct list_head list;
struct iovec iov;
/* number of pages in this vector */
unsigned npages;
/* array of pinned pages for this vector */
struct page **pages;
- /* offset into the virtual address space of the vector at
- * which we last left off. */
+ /*
+ * offset into the virtual address space of the vector at
+ * which we last left off.
+ */
u64 offset;
};
+struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct list_head list;
+ struct hfi1_user_sdma_pkt_q *pq;
+ atomic_t refcount;
+ struct page **pages;
+ unsigned npages;
+};
+
struct user_sdma_request {
struct sdma_req_info info;
struct hfi1_user_sdma_pkt_q *pq;
@@ -214,15 +222,6 @@ struct user_sdma_request {
*/
u8 omfactor;
/*
- * pointer to the user's mm_struct. We are going to
- * get a reference to it so it doesn't get freed
- * since we might not be in process context when we
- * are processing the iov's.
- * Using this mm_struct, we can get vma based on the
- * iov's address (find_vma()).
- */
- struct mm_struct *user_mm;
- /*
* We copy the iovs for this request (based on
* info.iovcnt). These are only the data vectors
*/
@@ -239,13 +238,12 @@ struct user_sdma_request {
u16 tididx;
u32 sent;
u64 seqnum;
+ u64 seqcomp;
+ u64 seqsubmitted;
struct list_head txps;
- spinlock_t txcmp_lock; /* protect txcmp list */
- struct list_head txcmp;
unsigned long flags;
/* status of the last txreq completed */
int status;
- struct work_struct worker;
};
/*
@@ -260,11 +258,6 @@ struct user_sdma_txreq {
struct sdma_txreq txreq;
struct list_head list;
struct user_sdma_request *req;
- struct {
- struct user_sdma_iovec *vec;
- u8 flags;
- } iovecs[3];
- int idx;
u16 flags;
unsigned busycount;
u64 seqnum;
@@ -280,21 +273,21 @@ struct user_sdma_txreq {
static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
static int num_user_pages(const struct iovec *);
-static void user_sdma_txreq_cb(struct sdma_txreq *, int, int);
-static void user_sdma_delayed_completion(struct work_struct *);
-static void user_sdma_free_request(struct user_sdma_request *);
+static void user_sdma_txreq_cb(struct sdma_txreq *, int);
+static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
+static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct user_sdma_request *,
- struct user_sdma_iovec *);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
struct user_sdma_txreq *, u32);
static int set_txreq_header_ahg(struct user_sdma_request *,
struct user_sdma_txreq *, u32);
-static inline void set_comp_state(struct user_sdma_request *,
- enum hfi1_sdma_comp_state, int);
+static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
+ struct hfi1_user_sdma_comp_q *,
+ u16, enum hfi1_sdma_comp_state, int);
static inline u32 set_pkt_bth_psn(__be32, u8, u32);
static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
@@ -304,6 +297,17 @@ static int defer_packet_queue(
struct sdma_txreq *,
unsigned seq);
static void activate_packet_queue(struct iowait *, int);
+static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
+static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
+
+static struct mmu_rb_ops sdma_rb_ops = {
+ .filter = sdma_rb_filter,
+ .insert = sdma_rb_insert,
+ .remove = sdma_rb_remove,
+ .invalidate = sdma_rb_invalidate
+};
static int defer_packet_queue(
struct sdma_engine *sde,
@@ -346,7 +350,7 @@ static void activate_packet_queue(struct iowait *wait, int reason)
static void sdma_kmem_cache_ctor(void *obj)
{
- struct user_sdma_txreq *tx = (struct user_sdma_txreq *)obj;
+ struct user_sdma_txreq *tx = obj;
memset(tx, 0, sizeof(*tx));
}
@@ -381,7 +385,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
goto pq_nomem;
memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
- pq->reqs = kmalloc(memsize, GFP_KERNEL);
+ pq->reqs = kzalloc(memsize, GFP_KERNEL);
if (!pq->reqs)
goto pq_reqs_nomem;
@@ -393,9 +397,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
+ pq->sdma_rb_root = RB_ROOT;
+ INIT_LIST_HEAD(&pq->evict);
+ spin_lock_init(&pq->evict_lock);
iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
- activate_packet_queue);
+ activate_packet_queue, NULL);
pq->reqidx = 0;
snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
fd->subctxt);
@@ -414,8 +421,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
if (!cq)
goto cq_nomem;
- memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size,
- PAGE_SIZE);
+ memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size);
cq->comps = vmalloc_user(memsize);
if (!cq->comps)
goto cq_comps_nomem;
@@ -423,6 +429,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
cq->nentries = hfi1_sdma_comp_ring_size;
fd->cq = cq;
+ ret = hfi1_mmu_rb_register(&pq->sdma_rb_root, &sdma_rb_ops);
+ if (ret) {
+ dd_dev_err(dd, "Failed to register with MMU %d", ret);
+ goto done;
+ }
+
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
list_add(&pq->list, &uctxt->sdma_queues);
spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
@@ -452,6 +464,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
uctxt->ctxt, fd->subctxt);
pq = fd->pq;
+ hfi1_mmu_rb_unregister(&pq->sdma_rb_root);
if (pq) {
spin_lock_irqsave(&uctxt->sdma_qlock, flags);
if (!list_empty(&pq->list))
@@ -468,8 +481,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
fd->pq = NULL;
}
if (fd->cq) {
- if (fd->cq->comps)
- vfree(fd->cq->comps);
+ vfree(fd->cq->comps);
kfree(fd->cq);
fd->cq = NULL;
}
@@ -479,7 +491,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
unsigned long dim, unsigned long *count)
{
- int ret = 0, i = 0, sent;
+ int ret = 0, i = 0;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
@@ -505,9 +517,11 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
dd->unit, uctxt->ctxt, fd->subctxt, ret);
return -EFAULT;
}
+
trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
(u16 *)&info);
- if (cq->comps[info.comp_idx].status == QUEUED) {
+ if (cq->comps[info.comp_idx].status == QUEUED ||
+ test_bit(SDMA_REQ_IN_USE, &pq->reqs[info.comp_idx].flags)) {
hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in QUEUED state",
dd->unit, uctxt->ctxt, fd->subctxt,
info.comp_idx);
@@ -534,10 +548,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
req->cq = cq;
req->status = -1;
INIT_LIST_HEAD(&req->txps);
- INIT_LIST_HEAD(&req->txcmp);
- INIT_WORK(&req->worker, user_sdma_delayed_completion);
- spin_lock_init(&req->txcmp_lock);
memcpy(&req->info, &info, sizeof(info));
if (req_opcode(info.ctrl) == EXPECTED)
@@ -596,8 +607,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
}
req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
- /* Calculate the initial TID offset based on the values of
- KDETH.OFFSET and KDETH.OM that are passed in. */
+ /*
+ * Calculate the initial TID offset based on the values of
+ * KDETH.OFFSET and KDETH.OM that are passed in.
+ */
req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
(KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
KDETH_OM_LARGE : KDETH_OM_SMALL);
@@ -606,8 +619,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
/* Save all the IO vector structures */
while (i < req->data_iovs) {
+ INIT_LIST_HEAD(&req->iovs[i].list);
memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
- req->iovs[i].offset = 0;
+ ret = pin_vector_pages(req, &req->iovs[i]);
+ if (ret) {
+ req->status = ret;
+ goto free_req;
+ }
req->data_len += req->iovs[i++].iov.iov_len;
}
SDMA_DBG(req, "total data length %u", req->data_len);
@@ -671,52 +689,59 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
}
}
- set_comp_state(req, QUEUED, 0);
+ set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+ atomic_inc(&pq->n_reqs);
/* Send the first N packets in the request to buy us some time */
- sent = user_sdma_send_pkts(req, pcount);
- if (unlikely(sent < 0)) {
- if (sent != -EBUSY) {
- req->status = sent;
- set_comp_state(req, ERROR, req->status);
- return sent;
- } else
- sent = 0;
+ ret = user_sdma_send_pkts(req, pcount);
+ if (unlikely(ret < 0 && ret != -EBUSY)) {
+ req->status = ret;
+ goto free_req;
}
- atomic_inc(&pq->n_reqs);
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
- if (sent < req->info.npkts) {
- /*
- * This is a somewhat blocking send implementation.
- * The driver will block the caller until all packets of the
- * request have been submitted to the SDMA engine. However, it
- * will not wait for send completions.
- */
- while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
- ret = user_sdma_send_pkts(req, pcount);
- if (ret < 0) {
- if (ret != -EBUSY) {
- req->status = ret;
- return ret;
- }
- wait_event_interruptible_timeout(
- pq->busy.wait_dma,
- (pq->state == SDMA_PKT_Q_ACTIVE),
- msecs_to_jiffies(
- SDMA_IOWAIT_TIMEOUT));
+ /*
+ * It is possible that the SDMA engine would have processed all the
+ * submitted packets by the time we get here. Therefore, only set
+ * packet queue state to ACTIVE if there are still uncompleted
+ * requests.
+ */
+ if (atomic_read(&pq->n_reqs))
+ xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
+
+ /*
+ * This is a somewhat blocking send implementation.
+ * The driver will block the caller until all packets of the
+ * request have been submitted to the SDMA engine. However, it
+ * will not wait for send completions.
+ */
+ while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) {
+ ret = user_sdma_send_pkts(req, pcount);
+ if (ret < 0) {
+ if (ret != -EBUSY) {
+ req->status = ret;
+ set_bit(SDMA_REQ_DONE_ERROR, &req->flags);
+ if (ACCESS_ONCE(req->seqcomp) ==
+ req->seqsubmitted - 1)
+ goto free_req;
+ return ret;
}
+ wait_event_interruptible_timeout(
+ pq->busy.wait_dma,
+ (pq->state == SDMA_PKT_Q_ACTIVE),
+ msecs_to_jiffies(
+ SDMA_IOWAIT_TIMEOUT));
}
-
}
*count += idx;
return 0;
free_req:
- user_sdma_free_request(req);
+ user_sdma_free_request(req, true);
+ pq_update(pq);
+ set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
return ret;
}
static inline u32 compute_data_length(struct user_sdma_request *req,
- struct user_sdma_txreq *tx)
+ struct user_sdma_txreq *tx)
{
/*
* Determine the proper size of the packet data.
@@ -734,8 +759,10 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
} else if (req_opcode(req->info.ctrl) == EXPECTED) {
u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
PAGE_SIZE;
- /* Get the data length based on the remaining space in the
- * TID pair. */
+ /*
+ * Get the data length based on the remaining space in the
+ * TID pair.
+ */
len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
/* If we've filled up the TID pair, move to the next one. */
if (unlikely(!len) && ++req->tididx < req->n_tids &&
@@ -745,12 +772,15 @@ static inline u32 compute_data_length(struct user_sdma_request *req,
req->tidoffset = 0;
len = min_t(u32, tidlen, req->info.fragsize);
}
- /* Since the TID pairs map entire pages, make sure that we
+ /*
+ * Since the TID pairs map entire pages, make sure that we
* are not going to try to send more data that we have
- * remaining. */
+ * remaining.
+ */
len = min(len, req->data_len - req->sent);
- } else
+ } else {
len = min(req->data_len - req->sent, (u32)req->info.fragsize);
+ }
SDMA_DBG(req, "Data Length = %u", len);
return len;
}
@@ -813,9 +843,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
tx->flags = 0;
tx->req = req;
tx->busycount = 0;
- tx->idx = -1;
INIT_LIST_HEAD(&tx->list);
- memset(tx->iovecs, 0, sizeof(tx->iovecs));
if (req->seqnum == req->info.npkts - 1)
tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
@@ -836,18 +864,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
WARN_ON(iovec->offset);
}
- /*
- * This request might include only a header and no user
- * data, so pin pages only if there is data and it the
- * pages have not been pinned already.
- */
- if (unlikely(!iovec->pages && iovec->iov.iov_len)) {
- ret = pin_vector_pages(req, iovec);
- if (ret)
- goto free_tx;
- }
-
- tx->iovecs[++tx->idx].vec = iovec;
datalen = compute_data_length(req, tx);
if (!datalen) {
SDMA_DBG(req,
@@ -926,8 +942,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
unsigned pageidx, len;
base = (unsigned long)iovec->iov.iov_base;
- offset = ((base + iovec->offset + iov_offset) &
- ~PAGE_MASK);
+ offset = offset_in_page(base + iovec->offset +
+ iov_offset);
pageidx = (((iovec->offset + iov_offset +
base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
len = offset + req->info.fragsize > PAGE_SIZE ?
@@ -937,16 +953,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
iovec->pages[pageidx],
offset, len);
if (ret) {
- int i;
-
SDMA_DBG(req, "SDMA txreq add page failed %d\n",
ret);
- /* Mark all assigned vectors as complete so they
- * are unpinned in the callback. */
- for (i = tx->idx; i >= 0; i--) {
- tx->iovecs[i].flags |=
- TXREQ_FLAGS_IOVEC_LAST_PKT;
- }
goto free_txreq;
}
iov_offset += len;
@@ -954,19 +962,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
data_sent += len;
if (unlikely(queued < datalen &&
pageidx == iovec->npages &&
- req->iov_idx < req->data_iovs - 1 &&
- tx->idx < ARRAY_SIZE(tx->iovecs))) {
+ req->iov_idx < req->data_iovs - 1)) {
iovec->offset += iov_offset;
- tx->iovecs[tx->idx].flags |=
- TXREQ_FLAGS_IOVEC_LAST_PKT;
iovec = &req->iovs[++req->iov_idx];
- if (!iovec->pages) {
- ret = pin_vector_pages(req, iovec);
- if (ret)
- goto free_txreq;
- }
iov_offset = 0;
- tx->iovecs[++tx->idx].vec = iovec;
}
}
/*
@@ -977,28 +976,21 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (req_opcode(req->info.ctrl) == EXPECTED)
req->tidoffset += datalen;
req->sent += data_sent;
- if (req->data_len) {
- tx->iovecs[tx->idx].vec->offset += iov_offset;
- /* If we've reached the end of the io vector, mark it
- * so the callback can unpin the pages and free it. */
- if (tx->iovecs[tx->idx].vec->offset ==
- tx->iovecs[tx->idx].vec->iov.iov_len)
- tx->iovecs[tx->idx].flags |=
- TXREQ_FLAGS_IOVEC_LAST_PKT;
- }
-
+ if (req->data_len)
+ iovec->offset += iov_offset;
+ list_add_tail(&tx->txreq.list, &req->txps);
/*
* It is important to increment this here as it is used to
* generate the BTH.PSN and, therefore, can't be bulk-updated
* outside of the loop.
*/
tx->seqnum = req->seqnum++;
- list_add_tail(&tx->txreq.list, &req->txps);
npkts++;
}
dosend:
ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps);
- if (list_empty(&req->txps))
+ if (list_empty(&req->txps)) {
+ req->seqsubmitted = req->seqnum;
if (req->seqnum == req->info.npkts) {
set_bit(SDMA_REQ_SEND_DONE, &req->flags);
/*
@@ -1010,6 +1002,10 @@ dosend:
if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
sdma_ahg_free(req->sde, req->ahg_idx);
}
+ } else if (ret > 0) {
+ req->seqsubmitted += ret;
+ ret = 0;
+ }
return ret;
free_txreq:
@@ -1024,7 +1020,7 @@ free_tx:
*/
static inline int num_user_pages(const struct iovec *iov)
{
- const unsigned long addr = (unsigned long) iov->iov_base;
+ const unsigned long addr = (unsigned long)iov->iov_base;
const unsigned long len = iov->iov_len;
const unsigned long spage = addr & PAGE_MASK;
const unsigned long epage = (addr + len - 1) & PAGE_MASK;
@@ -1032,64 +1028,129 @@ static inline int num_user_pages(const struct iovec *iov)
return 1 + ((epage - spage) >> PAGE_SHIFT);
}
-static int pin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec) {
- int pinned, npages;
+/* Caller must hold pq->evict_lock */
+static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+{
+ u32 cleared = 0;
+ struct sdma_mmu_node *node, *ptr;
- npages = num_user_pages(&iovec->iov);
- iovec->pages = kcalloc(npages, sizeof(*iovec->pages), GFP_KERNEL);
- if (!iovec->pages) {
- SDMA_DBG(req, "Failed page array alloc");
- return -ENOMEM;
+ list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) {
+ /* Make sure that no one is still using the node. */
+ if (!atomic_read(&node->refcount)) {
+ /*
+ * Need to use the page count now as the remove callback
+ * will free the node.
+ */
+ cleared += node->npages;
+ spin_unlock(&pq->evict_lock);
+ hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
+ spin_lock(&pq->evict_lock);
+ if (cleared >= npages)
+ break;
+ }
}
+ return cleared;
+}
- /*
- * Get a reference to the process's mm so we can use it when
- * unpinning the io vectors.
- */
- req->pq->user_mm = get_task_mm(current);
+static int pin_vector_pages(struct user_sdma_request *req,
+ struct user_sdma_iovec *iovec) {
+ int ret = 0, pinned, npages, cleared;
+ struct page **pages;
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct sdma_mmu_node *node = NULL;
+ struct mmu_rb_node *rb_node;
+
+ rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
+ (unsigned long)iovec->iov.iov_base,
+ iovec->iov.iov_len);
+ if (rb_node)
+ node = container_of(rb_node, struct sdma_mmu_node, rb);
+
+ if (!node) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
- pinned = hfi1_acquire_user_pages((unsigned long)iovec->iov.iov_base,
- npages, 0, iovec->pages);
+ node->rb.addr = (unsigned long)iovec->iov.iov_base;
+ node->rb.len = iovec->iov.iov_len;
+ node->pq = pq;
+ atomic_set(&node->refcount, 0);
+ INIT_LIST_HEAD(&node->list);
+ }
- if (pinned < 0)
- return pinned;
+ npages = num_user_pages(&iovec->iov);
+ if (node->npages < npages) {
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ SDMA_DBG(req, "Failed page array alloc");
+ ret = -ENOMEM;
+ goto bail;
+ }
+ memcpy(pages, node->pages, node->npages * sizeof(*pages));
+
+ npages -= node->npages;
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) {
+ spin_lock(&pq->evict_lock);
+ cleared = sdma_cache_evict(pq, npages);
+ spin_unlock(&pq->evict_lock);
+ if (cleared >= npages)
+ goto retry;
+ }
+ pinned = hfi1_acquire_user_pages(
+ ((unsigned long)iovec->iov.iov_base +
+ (node->npages * PAGE_SIZE)), npages, 0,
+ pages + node->npages);
+ if (pinned < 0) {
+ kfree(pages);
+ ret = pinned;
+ goto bail;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(current->mm, pages, pinned);
+ ret = -EFAULT;
+ goto bail;
+ }
+ kfree(node->pages);
+ node->pages = pages;
+ node->npages += pinned;
+ npages = node->npages;
+ spin_lock(&pq->evict_lock);
+ if (!rb_node)
+ list_add(&node->list, &pq->evict);
+ else
+ list_move(&node->list, &pq->evict);
+ pq->n_locked += pinned;
+ spin_unlock(&pq->evict_lock);
+ }
+ iovec->pages = node->pages;
+ iovec->npages = npages;
- iovec->npages = pinned;
- if (pinned != npages) {
- SDMA_DBG(req, "Failed to pin pages (%d/%u)", pinned, npages);
- unpin_vector_pages(req, iovec);
- return -EFAULT;
+ if (!rb_node) {
+ ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
+ if (ret) {
+ spin_lock(&pq->evict_lock);
+ list_del(&node->list);
+ pq->n_locked -= node->npages;
+ spin_unlock(&pq->evict_lock);
+ ret = 0;
+ goto bail;
+ }
+ } else {
+ atomic_inc(&node->refcount);
}
return 0;
+bail:
+ if (!rb_node)
+ kfree(node);
+ return ret;
}
-static void unpin_vector_pages(struct user_sdma_request *req,
- struct user_sdma_iovec *iovec)
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned npages)
{
- /*
- * Unpinning is done through the workqueue so use the
- * process's mm if we have a reference to it.
- */
- if ((current->flags & PF_KTHREAD) && req->pq->user_mm)
- use_mm(req->pq->user_mm);
-
- hfi1_release_user_pages(iovec->pages, iovec->npages, 0);
-
- /*
- * Unuse the user's mm (see above) and release the
- * reference to it.
- */
- if (req->pq->user_mm) {
- if (current->flags & PF_KTHREAD)
- unuse_mm(req->pq->user_mm);
- mmput(req->pq->user_mm);
- }
-
- kfree(iovec->pages);
- iovec->pages = NULL;
- iovec->npages = 0;
- iovec->offset = 0;
+ hfi1_release_user_pages(mm, pages, npages, 0);
+ kfree(pages);
}
static int check_header_template(struct user_sdma_request *req,
@@ -1212,7 +1273,6 @@ static int set_txreq_header(struct user_sdma_request *req,
if (ret)
return ret;
goto done;
-
}
hdr->bth[2] = cpu_to_be32(
@@ -1222,7 +1282,7 @@ static int set_txreq_header(struct user_sdma_request *req,
/* Set ACK request on last packet */
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
- hdr->bth[2] |= cpu_to_be32(1UL<<31);
+ hdr->bth[2] |= cpu_to_be32(1UL << 31);
/* Set the new offset */
hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
@@ -1236,8 +1296,10 @@ static int set_txreq_header(struct user_sdma_request *req,
if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
PAGE_SIZE)) {
req->tidoffset = 0;
- /* Since we don't copy all the TIDs, all at once,
- * we have to check again. */
+ /*
+ * Since we don't copy all the TIDs, all at once,
+ * we have to check again.
+ */
if (++req->tididx > req->n_tids - 1 ||
!req->tids[req->tididx]) {
return -EINVAL;
@@ -1318,8 +1380,10 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
PAGE_SIZE)) {
req->tidoffset = 0;
- /* Since we don't copy all the TIDs, all at once,
- * we have to check again. */
+ /*
+ * Since we don't copy all the TIDs, all at once,
+ * we have to check again.
+ */
if (++req->tididx > req->n_tids - 1 ||
!req->tids[req->tididx]) {
return -EINVAL;
@@ -1343,8 +1407,9 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
INTR) >> 16);
val &= cpu_to_le16(~(1U << 13));
AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
- } else
+ } else {
AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
+ }
}
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
@@ -1359,113 +1424,62 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
* tx request have been processed by the DMA engine. Called in
* interrupt context.
*/
-static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status,
- int drain)
+static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
{
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
struct user_sdma_request *req;
- bool defer;
- int i;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct hfi1_user_sdma_comp_q *cq;
+ u16 idx;
if (!tx->req)
return;
req = tx->req;
- /*
- * If this is the callback for the last packet of the request,
- * queue up the request for clean up.
- */
- defer = (tx->seqnum == req->info.npkts - 1);
-
- /*
- * If we have any io vectors associated with this txreq,
- * check whether they need to be 'freed'. We can't free them
- * here because the unpin function needs to be able to sleep.
- */
- for (i = tx->idx; i >= 0; i--) {
- if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT) {
- defer = true;
- break;
- }
- }
+ pq = req->pq;
+ cq = req->cq;
- req->status = status;
if (status != SDMA_TXREQ_S_OK) {
SDMA_DBG(req, "SDMA completion with error %d",
status);
set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
- defer = true;
}
- /*
- * Defer the clean up of the iovectors and the request until later
- * so it can be done outside of interrupt context.
- */
- if (defer) {
- spin_lock(&req->txcmp_lock);
- list_add_tail(&tx->list, &req->txcmp);
- spin_unlock(&req->txcmp_lock);
- schedule_work(&req->worker);
+ req->seqcomp = tx->seqnum;
+ kmem_cache_free(pq->txreq_cache, tx);
+ tx = NULL;
+
+ idx = req->info.comp_idx;
+ if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
+ if (req->seqcomp == req->info.npkts - 1) {
+ req->status = 0;
+ user_sdma_free_request(req, false);
+ pq_update(pq);
+ set_comp_state(pq, cq, idx, COMPLETE, 0);
+ }
} else {
- kmem_cache_free(req->pq->txreq_cache, tx);
+ if (status != SDMA_TXREQ_S_OK)
+ req->status = status;
+ if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
+ (test_bit(SDMA_REQ_SEND_DONE, &req->flags) ||
+ test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) {
+ user_sdma_free_request(req, false);
+ pq_update(pq);
+ set_comp_state(pq, cq, idx, ERROR, req->status);
+ }
}
}
-static void user_sdma_delayed_completion(struct work_struct *work)
+static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{
- struct user_sdma_request *req =
- container_of(work, struct user_sdma_request, worker);
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct user_sdma_txreq *tx = NULL;
- unsigned long flags;
- u64 seqnum;
- int i;
-
- while (1) {
- spin_lock_irqsave(&req->txcmp_lock, flags);
- if (!list_empty(&req->txcmp)) {
- tx = list_first_entry(&req->txcmp,
- struct user_sdma_txreq, list);
- list_del(&tx->list);
- }
- spin_unlock_irqrestore(&req->txcmp_lock, flags);
- if (!tx)
- break;
-
- for (i = tx->idx; i >= 0; i--)
- if (tx->iovecs[i].flags & TXREQ_FLAGS_IOVEC_LAST_PKT)
- unpin_vector_pages(req, tx->iovecs[i].vec);
-
- seqnum = tx->seqnum;
- kmem_cache_free(pq->txreq_cache, tx);
- tx = NULL;
-
- if (req->status != SDMA_TXREQ_S_OK) {
- if (seqnum == ACCESS_ONCE(req->seqnum) &&
- test_bit(SDMA_REQ_DONE_ERROR, &req->flags)) {
- atomic_dec(&pq->n_reqs);
- set_comp_state(req, ERROR, req->status);
- user_sdma_free_request(req);
- break;
- }
- } else {
- if (seqnum == req->info.npkts - 1) {
- atomic_dec(&pq->n_reqs);
- set_comp_state(req, COMPLETE, 0);
- user_sdma_free_request(req);
- break;
- }
- }
- }
-
- if (!atomic_read(&pq->n_reqs)) {
+ if (atomic_dec_and_test(&pq->n_reqs)) {
xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
wake_up(&pq->wait);
}
}
-static void user_sdma_free_request(struct user_sdma_request *req)
+static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
{
if (!list_empty(&req->txps)) {
struct sdma_txreq *t, *p;
@@ -1479,25 +1493,87 @@ static void user_sdma_free_request(struct user_sdma_request *req)
}
}
if (req->data_iovs) {
+ struct sdma_mmu_node *node;
+ struct mmu_rb_node *mnode;
int i;
- for (i = 0; i < req->data_iovs; i++)
- if (req->iovs[i].npages && req->iovs[i].pages)
- unpin_vector_pages(req, &req->iovs[i]);
+ for (i = 0; i < req->data_iovs; i++) {
+ mnode = hfi1_mmu_rb_search(
+ &req->pq->sdma_rb_root,
+ (unsigned long)req->iovs[i].iov.iov_base,
+ req->iovs[i].iov.iov_len);
+ if (!mnode)
+ continue;
+
+ node = container_of(mnode, struct sdma_mmu_node, rb);
+ if (unpin)
+ hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
+ &node->rb);
+ else
+ atomic_dec(&node->refcount);
+ }
}
kfree(req->tids);
clear_bit(SDMA_REQ_IN_USE, &req->flags);
}
-static inline void set_comp_state(struct user_sdma_request *req,
- enum hfi1_sdma_comp_state state,
- int ret)
+static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ struct hfi1_user_sdma_comp_q *cq,
+ u16 idx, enum hfi1_sdma_comp_state state,
+ int ret)
{
- SDMA_DBG(req, "Setting completion status %u %d", state, ret);
- req->cq->comps[req->info.comp_idx].status = state;
+ hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
+ pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
+ cq->comps[idx].status = state;
if (state == ERROR)
- req->cq->comps[req->info.comp_idx].errcode = -ret;
- trace_hfi1_sdma_user_completion(req->pq->dd, req->pq->ctxt,
- req->pq->subctxt, req->info.comp_idx,
- state, ret);
+ cq->comps[idx].errcode = -ret;
+ trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
+ idx, state, ret);
+}
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len)
+{
+ return (bool)(node->addr == addr);
+}
+
+static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ atomic_inc(&node->refcount);
+ return 0;
+}
+
+static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
+ bool notifier)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ spin_lock(&node->pq->evict_lock);
+ list_del(&node->list);
+ node->pq->n_locked -= node->npages;
+ spin_unlock(&node->pq->evict_lock);
+
+ unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ node->npages);
+ /*
+ * If called by the MMU notifier, we have to adjust the pinned
+ * page count ourselves.
+ */
+ if (notifier)
+ current->mm->pinned_vm -= node->npages;
+ kfree(node);
+}
+
+static int sdma_rb_invalidate(struct rb_root *root, struct mmu_rb_node *mnode)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ if (!atomic_read(&node->refcount))
+ return 1;
+ return 0;
}
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/staging/rdma/hfi1/user_sdma.h
index 0afa28508a8a..b9240e351161 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ b/drivers/staging/rdma/hfi1/user_sdma.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -69,7 +66,11 @@ struct hfi1_user_sdma_pkt_q {
struct iowait busy;
unsigned state;
wait_queue_head_t wait;
- struct mm_struct *user_mm;
+ unsigned long unpinned;
+ struct rb_root sdma_rb_root;
+ u32 n_locked;
+ struct list_head evict;
+ spinlock_t evict_lock; /* protect evict and n_locked */
};
struct hfi1_user_sdma_comp_q {
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 09b8d412ee90..89f2aad45c1b 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -63,9 +60,9 @@
#include "device.h"
#include "trace.h"
#include "qp.h"
-#include "sdma.h"
+#include "verbs_txreq.h"
-unsigned int hfi1_lkey_table_size = 16;
+static unsigned int hfi1_lkey_table_size = 16;
module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
S_IRUGO);
MODULE_PARM_DESC(lkey_table_size,
@@ -124,45 +121,181 @@ unsigned int hfi1_max_srq_wrs = 0x1FFFF;
module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
+unsigned short piothreshold = 256;
+module_param(piothreshold, ushort, S_IRUGO);
+MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
+
+#define COPY_CACHELESS 1
+#define COPY_ADAPTIVE 2
+static unsigned int sge_copy_mode;
+module_param(sge_copy_mode, uint, S_IRUGO);
+MODULE_PARM_DESC(sge_copy_mode,
+ "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
+
static void verbs_sdma_complete(
struct sdma_txreq *cookie,
- int status,
- int drained);
+ int status);
+
+static int pio_wait(struct rvt_qp *qp,
+ struct send_context *sc,
+ struct hfi1_pkt_state *ps,
+ u32 flag);
/* Length of buffer to create verbs txreq cache name */
#define TXREQ_NAME_LEN 24
+static uint wss_threshold;
+module_param(wss_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
+static uint wss_clean_period = 256;
+module_param(wss_clean_period, uint, S_IRUGO);
+MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
+
+/* memory working set size */
+struct hfi1_wss {
+ unsigned long *entries;
+ atomic_t total_count;
+ atomic_t clean_counter;
+ atomic_t clean_entry;
+
+ int threshold;
+ int num_entries;
+ long pages_mask;
+};
+
+static struct hfi1_wss wss;
+
+int hfi1_wss_init(void)
+{
+ long llc_size;
+ long llc_bits;
+ long table_size;
+ long table_bits;
+
+ /* check for a valid percent range - default to 80 if none or invalid */
+ if (wss_threshold < 1 || wss_threshold > 100)
+ wss_threshold = 80;
+ /* reject a wildly large period */
+ if (wss_clean_period > 1000000)
+ wss_clean_period = 256;
+ /* reject a zero period */
+ if (wss_clean_period == 0)
+ wss_clean_period = 1;
+
+ /*
+ * Calculate the table size - the next power of 2 larger than the
+ * LLC size. LLC size is in KiB.
+ */
+ llc_size = wss_llc_size() * 1024;
+ table_size = roundup_pow_of_two(llc_size);
+
+ /* one bit per page in rounded up table */
+ llc_bits = llc_size / PAGE_SIZE;
+ table_bits = table_size / PAGE_SIZE;
+ wss.pages_mask = table_bits - 1;
+ wss.num_entries = table_bits / BITS_PER_LONG;
+
+ wss.threshold = (llc_bits * wss_threshold) / 100;
+ if (wss.threshold == 0)
+ wss.threshold = 1;
+
+ atomic_set(&wss.clean_counter, wss_clean_period);
+
+ wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
+ GFP_KERNEL);
+ if (!wss.entries) {
+ hfi1_wss_exit();
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void hfi1_wss_exit(void)
+{
+ /* coded to handle partially initialized and repeat callers */
+ kfree(wss.entries);
+ wss.entries = NULL;
+}
+
/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; hfi1_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
+ * Advance the clean counter. When the clean period has expired,
+ * clean an entry.
+ *
+ * This is implemented in atomics to avoid locking. Because multiple
+ * variables are involved, it can be racy which can lead to slightly
+ * inaccurate information. Since this is only a heuristic, this is
+ * OK. Any innaccuracies will clean themselves out as the counter
+ * advances. That said, it is unlikely the entry clean operation will
+ * race - the next possible racer will not start until the next clean
+ * period.
+ *
+ * The clean counter is implemented as a decrement to zero. When zero
+ * is reached an entry is cleaned.
*/
-const int ib_hfi1_state_ops[IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = 0,
- [IB_QPS_INIT] = HFI1_POST_RECV_OK,
- [IB_QPS_RTR] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK,
- [IB_QPS_RTS] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
- HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK |
- HFI1_PROCESS_NEXT_SEND_OK,
- [IB_QPS_SQD] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
- HFI1_POST_SEND_OK | HFI1_PROCESS_SEND_OK,
- [IB_QPS_SQE] = HFI1_POST_RECV_OK | HFI1_PROCESS_RECV_OK |
- HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
- [IB_QPS_ERR] = HFI1_POST_RECV_OK | HFI1_FLUSH_RECV |
- HFI1_POST_SEND_OK | HFI1_FLUSH_SEND,
-};
+static void wss_advance_clean_counter(void)
+{
+ int entry;
+ int weight;
+ unsigned long bits;
-struct hfi1_ucontext {
- struct ib_ucontext ibucontext;
-};
+ /* become the cleaner if we decrement the counter to zero */
+ if (atomic_dec_and_test(&wss.clean_counter)) {
+ /*
+ * Set, not add, the clean period. This avoids an issue
+ * where the counter could decrement below the clean period.
+ * Doing a set can result in lost decrements, slowing the
+ * clean advance. Since this a heuristic, this possible
+ * slowdown is OK.
+ *
+ * An alternative is to loop, advancing the counter by a
+ * clean period until the result is > 0. However, this could
+ * lead to several threads keeping another in the clean loop.
+ * This could be mitigated by limiting the number of times
+ * we stay in the loop.
+ */
+ atomic_set(&wss.clean_counter, wss_clean_period);
-static inline struct hfi1_ucontext *to_iucontext(struct ib_ucontext
- *ibucontext)
+ /*
+ * Uniquely grab the entry to clean and move to next.
+ * The current entry is always the lower bits of
+ * wss.clean_entry. The table size, wss.num_entries,
+ * is always a power-of-2.
+ */
+ entry = (atomic_inc_return(&wss.clean_entry) - 1)
+ & (wss.num_entries - 1);
+
+ /* clear the entry and count the bits */
+ bits = xchg(&wss.entries[entry], 0);
+ weight = hweight64((u64)bits);
+ /* only adjust the contended total count if needed */
+ if (weight)
+ atomic_sub(weight, &wss.total_count);
+ }
+}
+
+/*
+ * Insert the given address into the working set array.
+ */
+static void wss_insert(void *address)
{
- return container_of(ibucontext, struct hfi1_ucontext, ibucontext);
+ u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
+ u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
+ u32 nr = page & (BITS_PER_LONG - 1);
+
+ if (!test_and_set_bit(nr, &wss.entries[entry]))
+ atomic_inc(&wss.total_count);
+
+ wss_advance_clean_counter();
}
-static inline void _hfi1_schedule_send(struct hfi1_qp *qp);
+/*
+ * Is the working set larger than the threshold?
+ */
+static inline int wss_exceeds_threshold(void)
+{
+ return atomic_read(&wss.total_count) >= wss.threshold;
+}
/*
* Translate ib_wr_opcode into ib_wc_opcode.
@@ -274,14 +407,47 @@ __be64 ib_hfi1_sys_image_guid;
* @ss: the SGE state
* @data: the data to copy
* @length: the length of the data
+ * @copy_last: do a separate copy of the last 8 bytes
*/
void hfi1_copy_sge(
- struct hfi1_sge_state *ss,
+ struct rvt_sge_state *ss,
void *data, u32 length,
- int release)
+ int release,
+ int copy_last)
{
- struct hfi1_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
+ int in_last = 0;
+ int i;
+ int cacheless_copy = 0;
+
+ if (sge_copy_mode == COPY_CACHELESS) {
+ cacheless_copy = length >= PAGE_SIZE;
+ } else if (sge_copy_mode == COPY_ADAPTIVE) {
+ if (length >= PAGE_SIZE) {
+ /*
+ * NOTE: this *assumes*:
+ * o The first vaddr is the dest.
+ * o If multiple pages, then vaddr is sequential.
+ */
+ wss_insert(sge->vaddr);
+ if (length >= (2 * PAGE_SIZE))
+ wss_insert(sge->vaddr + PAGE_SIZE);
+
+ cacheless_copy = wss_exceeds_threshold();
+ } else {
+ wss_advance_clean_counter();
+ }
+ }
+ if (copy_last) {
+ if (length > 8) {
+ length -= 8;
+ } else {
+ copy_last = 0;
+ in_last = 1;
+ }
+ }
+again:
while (length) {
u32 len = sge->length;
@@ -290,17 +456,25 @@ void hfi1_copy_sge(
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- memcpy(sge->vaddr, data, len);
+ if (unlikely(in_last)) {
+ /* enforce byte transfer ordering */
+ for (i = 0; i < len; i++)
+ ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
+ } else if (cacheless_copy) {
+ cacheless_memcpy(sge->vaddr, data, len);
+ } else {
+ memcpy(sge->vaddr, data, len);
+ }
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -313,6 +487,13 @@ void hfi1_copy_sge(
data += len;
length -= len;
}
+
+ if (copy_last) {
+ copy_last = 0;
+ in_last = 1;
+ length = 8;
+ goto again;
+ }
}
/**
@@ -320,9 +501,9 @@ void hfi1_copy_sge(
* @ss: the SGE state
* @length: the number of bytes to skip
*/
-void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release)
+void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
{
- struct hfi1_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
while (length) {
u32 len = sge->length;
@@ -337,11 +518,11 @@ void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- hfi1_put_mr(sge->mr);
+ rvt_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
@@ -355,231 +536,6 @@ void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release)
}
}
-/**
- * post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
-{
- struct hfi1_swqe *wqe;
- u32 next;
- int i;
- int j;
- int acc;
- struct hfi1_lkey_table *rkt;
- struct hfi1_pd *pd;
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct hfi1_pportdata *ppd;
- struct hfi1_ibport *ibp;
-
- /* IB spec says that num_sge == 0 is OK. */
- if (unlikely(wr->num_sge > qp->s_max_sge))
- return -EINVAL;
-
- ppd = &dd->pport[qp->port_num - 1];
- ibp = &ppd->ibport_data;
-
- /*
- * Don't allow RDMA reads or atomic operations on UC or
- * undefined operations.
- * Make sure buffer is large enough to hold the result for atomics.
- */
- if (qp->ibqp.qp_type == IB_QPT_UC) {
- if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
- return -EINVAL;
- } else if (qp->ibqp.qp_type != IB_QPT_RC) {
- /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
- if (wr->opcode != IB_WR_SEND &&
- wr->opcode != IB_WR_SEND_WITH_IMM)
- return -EINVAL;
- /* Check UD destination address PD */
- if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
- return -EINVAL;
- } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
- return -EINVAL;
- else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
- (wr->num_sge == 0 ||
- wr->sg_list[0].length < sizeof(u64) ||
- wr->sg_list[0].addr & (sizeof(u64) - 1)))
- return -EINVAL;
- else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
- return -EINVAL;
-
- next = qp->s_head + 1;
- if (next >= qp->s_size)
- next = 0;
- if (next == qp->s_last)
- return -ENOMEM;
-
- rkt = &to_idev(qp->ibqp.device)->lk_table;
- pd = to_ipd(qp->ibqp.pd);
- wqe = get_swqe_ptr(qp, qp->s_head);
-
-
- if (qp->ibqp.qp_type != IB_QPT_UC &&
- qp->ibqp.qp_type != IB_QPT_RC)
- memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
- else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE ||
- wr->opcode == IB_WR_RDMA_READ)
- memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
- else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
- else
- memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
- wqe->length = 0;
- j = 0;
- if (wr->num_sge) {
- acc = wr->opcode >= IB_WR_RDMA_READ ?
- IB_ACCESS_LOCAL_WRITE : 0;
- for (i = 0; i < wr->num_sge; i++) {
- u32 length = wr->sg_list[i].length;
- int ok;
-
- if (length == 0)
- continue;
- ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j],
- &wr->sg_list[i], acc);
- if (!ok)
- goto bail_inval_free;
- wqe->length += length;
- j++;
- }
- wqe->wr.num_sge = j;
- }
- if (qp->ibqp.qp_type == IB_QPT_UC ||
- qp->ibqp.qp_type == IB_QPT_RC) {
- if (wqe->length > 0x80000000U)
- goto bail_inval_free;
- } else {
- struct hfi1_ah *ah = to_iah(ud_wr(wr)->ah);
-
- atomic_inc(&ah->refcount);
- }
- wqe->ssn = qp->s_ssn++;
- qp->s_head = next;
-
- return 0;
-
-bail_inval_free:
- /* release mr holds */
- while (j) {
- struct hfi1_sge *sge = &wqe->sg_list[--j];
-
- hfi1_put_mr(sge->mr);
- }
- return -EINVAL;
-}
-
-/**
- * post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
- struct ib_send_wr **bad_wr)
-{
- struct hfi1_qp *qp = to_iqp(ibqp);
- int err = 0;
- int call_send;
- unsigned long flags;
- unsigned nreq = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Check that state is OK to post send. */
- if (unlikely(!(ib_hfi1_state_ops[qp->state] & HFI1_POST_SEND_OK))) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return -EINVAL;
- }
-
- /* sq empty and not list -> call send */
- call_send = qp->s_head == qp->s_last && !wr->next;
-
- for (; wr; wr = wr->next) {
- err = post_one_send(qp, wr);
- if (unlikely(err)) {
- *bad_wr = wr;
- goto bail;
- }
- nreq++;
- }
-bail:
- spin_unlock_irqrestore(&qp->s_lock, flags);
- if (nreq && !call_send)
- _hfi1_schedule_send(qp);
- if (nreq && call_send)
- hfi1_do_send(&qp->s_iowait.iowork);
- return err;
-}
-
-/**
- * post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr)
-{
- struct hfi1_qp *qp = to_iqp(ibqp);
- struct hfi1_rwq *wq = qp->r_rq.wq;
- unsigned long flags;
- int ret;
-
- /* Check that state is OK to post receive. */
- if (!(ib_hfi1_state_ops[qp->state] & HFI1_POST_RECV_OK) || !wq) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- for (; wr; wr = wr->next) {
- struct hfi1_rwqe *wqe;
- u32 next;
- int i;
-
- if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
- *bad_wr = wr;
- ret = -EINVAL;
- goto bail;
- }
-
- spin_lock_irqsave(&qp->r_rq.lock, flags);
- next = wq->head + 1;
- if (next >= qp->r_rq.size)
- next = 0;
- if (next == wq->tail) {
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- *bad_wr = wr;
- ret = -ENOMEM;
- goto bail;
- }
-
- wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
- wqe->wr_id = wr->wr_id;
- wqe->num_sge = wr->num_sge;
- for (i = 0; i < wr->num_sge; i++)
- wqe->sg_list[i] = wr->sg_list[i];
- /* Make sure queue entry is written before the head index. */
- smp_wmb();
- wq->head = next;
- spin_unlock_irqrestore(&qp->r_rq.lock, flags);
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
@@ -587,18 +543,17 @@ static inline int qp_ok(int opcode, struct hfi1_packet *packet)
{
struct hfi1_ibport *ibp;
- if (!(ib_hfi1_state_ops[packet->qp->state] & HFI1_PROCESS_RECV_OK))
+ if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
goto dropit;
if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
(opcode == IB_OPCODE_CNP))
return 1;
dropit:
ibp = &packet->rcd->ppd->ibport_data;
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return 0;
}
-
/**
* hfi1_ib_rcv - process an incoming packet
* @packet: data packet information
@@ -614,6 +569,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct hfi1_pportdata *ppd = rcd->ppd;
struct hfi1_ibport *ibp = &ppd->ibport_data;
+ struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
unsigned long flags;
u32 qp_num;
int lnh;
@@ -622,9 +578,9 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
/* Check for GRH */
lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_BTH)
+ if (lnh == HFI1_LRH_BTH) {
packet->ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH) {
+ } else if (lnh == HFI1_LRH_GRH) {
u32 vtf;
packet->ohdr = &hdr->u.l.oth;
@@ -634,8 +590,9 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
goto drop;
packet->rcv_flags |= HFI1_HAS_GRH;
- } else
+ } else {
goto drop;
+ }
trace_input_ibhdr(rcd->dd, hdr);
@@ -643,17 +600,17 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
inc_opstats(tlen, &rcd->opstats->stats[opcode]);
/* Get the destination QP number. */
- qp_num = be32_to_cpu(packet->ohdr->bth[1]) & HFI1_QPN_MASK;
+ qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
lid = be16_to_cpu(hdr->lrh[1]);
- if (unlikely((lid >= HFI1_MULTICAST_LID_BASE) &&
- (lid != HFI1_PERMISSIVE_LID))) {
- struct hfi1_mcast *mcast;
- struct hfi1_mcast_qp *p;
+ if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
+ struct rvt_mcast *mcast;
+ struct rvt_mcast_qp *p;
if (lnh != HFI1_LRH_GRH)
goto drop;
- mcast = hfi1_mcast_find(ibp, &hdr->u.l.grh.dgid);
- if (mcast == NULL)
+ mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
+ if (!mcast)
goto drop;
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
@@ -663,14 +620,14 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
}
/*
- * Notify hfi1_multicast_detach() if it is waiting for us
+ * Notify rvt_multicast_detach() if it is waiting for us
* to finish.
*/
if (atomic_dec_return(&mcast->refcount) <= 1)
wake_up(&mcast->wait);
} else {
rcu_read_lock();
- packet->qp = hfi1_lookup_qpn(ibp, qp_num);
+ packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
if (!packet->qp) {
rcu_read_unlock();
goto drop;
@@ -684,7 +641,7 @@ void hfi1_ib_rcv(struct hfi1_packet *packet)
return;
drop:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
}
/*
@@ -695,15 +652,17 @@ static void mem_timer(unsigned long data)
{
struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
struct list_head *list = &dev->memwait;
- struct hfi1_qp *qp = NULL;
+ struct rvt_qp *qp = NULL;
struct iowait *wait;
unsigned long flags;
+ struct hfi1_qp_priv *priv;
write_seqlock_irqsave(&dev->iowait_lock, flags);
if (!list_empty(list)) {
wait = list_first_entry(list, struct iowait, list);
- qp = container_of(wait, struct hfi1_qp, s_iowait);
- list_del_init(&qp->s_iowait.list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
/* refcount held until actual wake up */
if (!list_empty(list))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -711,12 +670,12 @@ static void mem_timer(unsigned long data)
write_sequnlock_irqrestore(&dev->iowait_lock, flags);
if (qp)
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_KMEM);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
}
-void update_sge(struct hfi1_sge_state *ss, u32 length)
+void update_sge(struct rvt_sge_state *ss, u32 length)
{
- struct hfi1_sge *sge = &ss->sge;
+ struct rvt_sge *sge = &ss->sge;
sge->vaddr += length;
sge->length -= length;
@@ -725,7 +684,7 @@ void update_sge(struct hfi1_sge_state *ss, u32 length)
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= HFI1_SEGSZ) {
+ if (++sge->n >= RVT_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
return;
sge->n = 0;
@@ -735,143 +694,55 @@ void update_sge(struct hfi1_sge_state *ss, u32 length)
}
}
-static noinline struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
- struct hfi1_qp *qp)
-{
- struct verbs_txreq *tx;
- unsigned long flags;
-
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
- if (!tx) {
- spin_lock_irqsave(&qp->s_lock, flags);
- write_seqlock(&dev->iowait_lock);
- if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK &&
- list_empty(&qp->s_iowait.list)) {
- dev->n_txwait++;
- qp->s_flags |= HFI1_S_WAIT_TX;
- list_add_tail(&qp->s_iowait.list, &dev->txwait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_TX);
- atomic_inc(&qp->refcount);
- }
- qp->s_flags &= ~HFI1_S_BUSY;
- write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = ERR_PTR(-EBUSY);
- }
- return tx;
-}
-
-static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
- struct hfi1_qp *qp)
-{
- struct verbs_txreq *tx;
-
- tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
- if (!tx) {
- /* call slow path to get the lock */
- tx = __get_txreq(dev, qp);
- if (IS_ERR(tx))
- return tx;
- }
- tx->qp = qp;
- return tx;
-}
-
-void hfi1_put_txreq(struct verbs_txreq *tx)
-{
- struct hfi1_ibdev *dev;
- struct hfi1_qp *qp;
- unsigned long flags;
- unsigned int seq;
-
- qp = tx->qp;
- dev = to_idev(qp->ibqp.device);
-
- if (tx->mr) {
- hfi1_put_mr(tx->mr);
- tx->mr = NULL;
- }
- sdma_txclean(dd_from_dev(dev), &tx->txreq);
-
- /* Free verbs_txreq and return to slab cache */
- kmem_cache_free(dev->verbs_txreq_cache, tx);
-
- do {
- seq = read_seqbegin(&dev->iowait_lock);
- if (!list_empty(&dev->txwait)) {
- struct iowait *wait;
-
- write_seqlock_irqsave(&dev->iowait_lock, flags);
- /* Wake up first QP wanting a free struct */
- wait = list_first_entry(&dev->txwait, struct iowait,
- list);
- qp = container_of(wait, struct hfi1_qp, s_iowait);
- list_del_init(&qp->s_iowait.list);
- /* refcount held until actual wake up */
- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
- hfi1_qp_wakeup(qp, HFI1_S_WAIT_TX);
- break;
- }
- } while (read_seqretry(&dev->iowait_lock, seq));
-}
-
/*
* This is called with progress side lock held.
*/
/* New API */
static void verbs_sdma_complete(
struct sdma_txreq *cookie,
- int status,
- int drained)
+ int status)
{
struct verbs_txreq *tx =
container_of(cookie, struct verbs_txreq, txreq);
- struct hfi1_qp *qp = tx->qp;
+ struct rvt_qp *qp = tx->qp;
spin_lock(&qp->s_lock);
- if (tx->wqe)
+ if (tx->wqe) {
hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
- else if (qp->ibqp.qp_type == IB_QPT_RC) {
+ } else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct hfi1_ib_header *hdr;
hdr = &tx->phdr.hdr;
hfi1_rc_send_complete(qp, hdr);
}
- if (drained) {
- /*
- * This happens when the send engine notes
- * a QP in the error state and cannot
- * do the flush work until that QP's
- * sdma work has finished.
- */
- if (qp->s_flags & HFI1_S_WAIT_DMA) {
- qp->s_flags &= ~HFI1_S_WAIT_DMA;
- hfi1_schedule_send(qp);
- }
- }
spin_unlock(&qp->s_lock);
hfi1_put_txreq(tx);
}
-static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
+static int wait_kmem(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp,
+ struct hfi1_pkt_state *ps)
{
+ struct hfi1_qp_priv *priv = qp->priv;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
- if (list_empty(&qp->s_iowait.list)) {
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &priv->s_iowait.tx_head);
+ if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= HFI1_S_WAIT_KMEM;
- list_add_tail(&qp->s_iowait.list, &dev->memwait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_KMEM);
+ qp->s_flags |= RVT_S_WAIT_KMEM;
+ list_add_tail(&priv->s_iowait.list, &dev->memwait);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
atomic_inc(&qp->refcount);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -884,14 +755,14 @@ static int wait_kmem(struct hfi1_ibdev *dev, struct hfi1_qp *qp)
*
* Add failures will revert the sge cursor
*/
-static int build_verbs_ulp_payload(
+static noinline int build_verbs_ulp_payload(
struct sdma_engine *sde,
- struct hfi1_sge_state *ss,
+ struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx)
{
- struct hfi1_sge *sg_list = ss->sg_list;
- struct hfi1_sge sge = ss->sge;
+ struct rvt_sge *sg_list = ss->sg_list;
+ struct rvt_sge sge = ss->sge;
u8 num_sge = ss->num_sge;
u32 len;
int ret = 0;
@@ -928,23 +799,21 @@ bail_txadd:
* NOTE: DMA mapping is held in the tx until completed in the ring or
* the tx desc is freed without having been submitted to the ring
*
- * This routine insures the following all the helper routine
- * calls succeed.
+ * This routine ensures all the helper routine calls succeed.
*/
/* New API */
static int build_verbs_tx_desc(
struct sdma_engine *sde,
- struct hfi1_sge_state *ss,
+ struct rvt_sge_state *ss,
u32 length,
struct verbs_txreq *tx,
struct ahg_ib_header *ahdr,
u64 pbc)
{
int ret = 0;
- struct hfi1_pio_header *phdr;
+ struct hfi1_pio_header *phdr = &tx->phdr;
u16 hdrbytes = tx->hdr_dwords << 2;
- phdr = &tx->phdr;
if (!ahdr->ahgcount) {
ret = sdma_txinit_ahg(
&tx->txreq,
@@ -958,29 +827,14 @@ static int build_verbs_tx_desc(
if (ret)
goto bail_txadd;
phdr->pbc = cpu_to_le64(pbc);
- memcpy(&phdr->hdr, &ahdr->ibh, hdrbytes - sizeof(phdr->pbc));
- /* add the header */
ret = sdma_txadd_kvaddr(
sde->dd,
&tx->txreq,
- &tx->phdr,
- tx->hdr_dwords << 2);
+ phdr,
+ hdrbytes);
if (ret)
goto bail_txadd;
} else {
- struct hfi1_other_headers *sohdr = &ahdr->ibh.u.oth;
- struct hfi1_other_headers *dohdr = &phdr->hdr.u.oth;
-
- /* needed in rc_send_complete() */
- phdr->hdr.lrh[0] = ahdr->ibh.lrh[0];
- if ((be16_to_cpu(phdr->hdr.lrh[0]) & 3) == HFI1_LRH_GRH) {
- sohdr = &ahdr->ibh.u.l.oth;
- dohdr = &phdr->hdr.u.l.oth;
- }
- /* opcode */
- dohdr->bth[0] = sohdr->bth[0];
- /* PSN/ACK */
- dohdr->bth[2] = sohdr->bth[2];
ret = sdma_txinit_ahg(
&tx->txreq,
ahdr->tx_flags,
@@ -1001,80 +855,75 @@ bail_txadd:
return ret;
}
-int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
- struct ahg_ib_header *ahdr = qp->s_hdr;
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct ahg_ib_header *ahdr = priv->s_hdr;
u32 hdrwords = qp->s_hdrwords;
- struct hfi1_sge_state *ss = qp->s_cur_sge;
+ struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
struct hfi1_ibdev *dev = ps->dev;
struct hfi1_pportdata *ppd = ps->ppd;
struct verbs_txreq *tx;
- struct sdma_txreq *stx;
u64 pbc_flags = 0;
- u8 sc5 = qp->s_sc;
+ u8 sc5 = priv->s_sc;
+
int ret;
- if (!list_empty(&qp->s_iowait.tx_head)) {
- stx = list_first_entry(
- &qp->s_iowait.tx_head,
- struct sdma_txreq,
- list);
- list_del_init(&stx->list);
- tx = container_of(stx, struct verbs_txreq, txreq);
- ret = sdma_send_txreq(tx->sde, &qp->s_iowait, stx);
- if (unlikely(ret == -ECOMM))
+ tx = ps->s_txreq;
+ if (!sdma_txreq_built(&tx->txreq)) {
+ if (likely(pbc == 0)) {
+ u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+ /* No vl15 here */
+ /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
+ pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
+
+ pbc = create_pbc(ppd,
+ pbc_flags,
+ qp->srate_mbps,
+ vl,
+ plen);
+ }
+ tx->wqe = qp->s_wqe;
+ ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
+ if (unlikely(ret))
+ goto bail_build;
+ }
+ ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
+ if (unlikely(ret < 0)) {
+ if (ret == -ECOMM)
goto bail_ecomm;
return ret;
}
-
- tx = get_txreq(dev, qp);
- if (IS_ERR(tx))
- goto bail_tx;
-
- tx->sde = qp->s_sde;
-
- if (likely(pbc == 0)) {
- u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
- /* No vl15 here */
- /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
- pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
-
- pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
- }
- tx->wqe = qp->s_wqe;
- tx->mr = qp->s_rdma_mr;
- if (qp->s_rdma_mr)
- qp->s_rdma_mr = NULL;
- tx->hdr_dwords = hdrwords + 2;
- ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
- if (unlikely(ret))
- goto bail_build;
- trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
- ret = sdma_send_txreq(tx->sde, &qp->s_iowait, &tx->txreq);
- if (unlikely(ret == -ECOMM))
- goto bail_ecomm;
+ trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &ps->s_txreq->phdr.hdr);
return ret;
bail_ecomm:
/* The current one got "sent" */
return 0;
bail_build:
- /* kmalloc or mapping fail */
- hfi1_put_txreq(tx);
- return wait_kmem(dev, qp);
-bail_tx:
- return PTR_ERR(tx);
+ ret = wait_kmem(dev, qp, ps);
+ if (!ret) {
+ /* free txreq - bad state */
+ hfi1_put_txreq(ps->s_txreq);
+ ps->s_txreq = NULL;
+ }
+ return ret;
}
/*
* If we are now in the error state, return zero to flush the
* send work request.
*/
-static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc)
+static int pio_wait(struct rvt_qp *qp,
+ struct send_context *sc,
+ struct hfi1_pkt_state *ps,
+ u32 flag)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_devdata *dd = sc->dd;
struct hfi1_ibdev *dev = &dd->verbs_dev;
unsigned long flags;
@@ -1087,74 +936,89 @@ static int no_bufs_available(struct hfi1_qp *qp, struct send_context *sc)
* enabling the PIO avail interrupt.
*/
spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK) {
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
- if (list_empty(&qp->s_iowait.list)) {
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &priv->s_iowait.tx_head);
+ if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibdev *dev = &dd->verbs_dev;
int was_empty;
+ dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
+ dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
dev->n_piowait++;
- qp->s_flags |= HFI1_S_WAIT_PIO;
+ qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
- list_add_tail(&qp->s_iowait.list, &sc->piowait);
- trace_hfi1_qpsleep(qp, HFI1_S_WAIT_PIO);
+ list_add_tail(&priv->s_iowait.list, &sc->piowait);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
atomic_inc(&qp->refcount);
/* counting: only call wantpiobuf_intr if first user */
if (was_empty)
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~HFI1_S_BUSY;
+ qp->s_flags &= ~RVT_S_BUSY;
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
-struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5)
+static void verbs_pio_complete(void *arg, int code)
{
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct hfi1_pportdata *ppd = dd->pport + (qp->port_num - 1);
- u8 vl;
+ struct rvt_qp *qp = (struct rvt_qp *)arg;
+ struct hfi1_qp_priv *priv = qp->priv;
- vl = sc_to_vlt(dd, sc5);
- if (vl >= ppd->vls_supported && vl != 15)
- return NULL;
- return dd->vld[vl].sc;
+ if (iowait_pio_dec(&priv->s_iowait))
+ iowait_drain_wakeup(&priv->s_iowait);
}
-int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc)
{
- struct ahg_ib_header *ahdr = qp->s_hdr;
+ struct hfi1_qp_priv *priv = qp->priv;
u32 hdrwords = qp->s_hdrwords;
- struct hfi1_sge_state *ss = qp->s_cur_sge;
+ struct rvt_sge_state *ss = qp->s_cur_sge;
u32 len = qp->s_cur_size;
u32 dwords = (len + 3) >> 2;
u32 plen = hdrwords + dwords + 2; /* includes pbc */
struct hfi1_pportdata *ppd = ps->ppd;
- u32 *hdr = (u32 *)&ahdr->ibh;
+ u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
u64 pbc_flags = 0;
- u32 sc5;
+ u8 sc5;
unsigned long flags = 0;
struct send_context *sc;
struct pio_buf *pbuf;
int wc_status = IB_WC_SUCCESS;
+ int ret = 0;
+ pio_release_cb cb = NULL;
+
+ /* only RC/UC use complete */
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ cb = verbs_pio_complete;
+ break;
+ default:
+ break;
+ }
/* vl15 special case taken care of in ud.c */
- sc5 = qp->s_sc;
- sc = qp_to_send_context(qp, sc5);
+ sc5 = priv->s_sc;
+ sc = ps->s_txreq->psc;
- if (!sc)
- return -EINVAL;
if (likely(pbc == 0)) {
- u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
+ u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
/* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
}
- pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
- if (unlikely(pbuf == NULL)) {
+ if (cb)
+ iowait_pio_inc(&priv->s_iowait);
+ pbuf = sc_buffer_alloc(sc, plen, cb, qp);
+ if (unlikely(!pbuf)) {
+ if (cb)
+ verbs_pio_complete(qp, 0);
if (ppd->host_link_state != HLS_UP_ACTIVE) {
/*
* If we have filled the PIO buffers to capacity and are
@@ -1174,7 +1038,12 @@ int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
* so lets continue to queue the request.
*/
hfi1_cdbg(PIO, "alloc failed. state active, queuing");
- return no_bufs_available(qp, sc);
+ ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
+ if (!ret)
+ /* txreq not queued - free */
+ goto bail;
+ /* tx consumed in wait */
+ return ret;
}
}
@@ -1182,7 +1051,7 @@ int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
} else {
if (ss) {
- seg_pio_copy_start(pbuf, pbc, hdr, hdrwords*4);
+ seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
while (len) {
void *addr = ss->sge.vaddr;
u32 slen = ss->sge.length;
@@ -1197,12 +1066,8 @@ int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
}
}
- trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &ahdr->ibh);
-
- if (qp->s_rdma_mr) {
- hfi1_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
+ trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
+ &ps->s_txreq->phdr.hdr);
pio_bail:
if (qp->s_wqe) {
@@ -1211,10 +1076,15 @@ pio_bail:
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp, &ahdr->ibh);
+ hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
- return 0;
+
+ ret = 0;
+
+bail:
+ hfi1_put_txreq(ps->s_txreq);
+ return ret;
}
/*
@@ -1247,13 +1117,14 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
*/
static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
struct hfi1_ib_header *hdr,
- struct hfi1_qp *qp)
+ struct rvt_qp *qp)
{
+ struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_other_headers *ohdr;
struct hfi1_devdata *dd;
int i = 0;
u16 pkey;
- u8 lnh, sc5 = qp->s_sc;
+ u8 lnh, sc5 = priv->s_sc;
if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
return 0;
@@ -1271,14 +1142,14 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
goto bad;
-
/* Is the pkey = 0x0, or 0x8000? */
if ((pkey & PKEY_LOW_15_MASK) == 0)
goto bad;
/* The most likely matching pkey has index qp->s_pkey_index */
if (unlikely(!egress_pkey_matches_entry(pkey,
- ppd->pkeys[qp->s_pkey_index]))) {
+ ppd->pkeys
+ [qp->s_pkey_index]))) {
/* no match - try the entire table */
for (; i < MAX_PKEY_VALUES; i++) {
if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
@@ -1302,31 +1173,65 @@ bad:
}
/**
+ * get_send_routine - choose an egress routine
+ *
+ * Choose an egress routine based on QP type
+ * and size
+ */
+static inline send_routine get_send_routine(struct rvt_qp *qp,
+ struct verbs_txreq *tx)
+{
+ struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_ib_header *h = &tx->phdr.hdr;
+
+ if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
+ return dd->process_pio_send;
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_SMI:
+ return dd->process_pio_send;
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ break;
+ case IB_QPT_RC:
+ if (piothreshold &&
+ qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
+ (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) &&
+ iowait_sdma_pending(&priv->s_iowait) == 0 &&
+ !sdma_txreq_built(&tx->txreq))
+ return dd->process_pio_send;
+ break;
+ case IB_QPT_UC:
+ if (piothreshold &&
+ qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
+ (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) &&
+ iowait_sdma_pending(&priv->s_iowait) == 0 &&
+ !sdma_txreq_built(&tx->txreq))
+ return dd->process_pio_send;
+ break;
+ default:
+ break;
+ }
+ return dd->process_dma_send;
+}
+
+/**
* hfi1_verbs_send - send a packet
* @qp: the QP to send on
* @ps: the state of the packet to send
*
* Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags HFI1_S_BUSY otherwise.
+ * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
*/
-int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps)
+int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct ahg_ib_header *ahdr = qp->s_hdr;
+ struct hfi1_qp_priv *priv = qp->priv;
+ send_routine sr;
int ret;
- int pio = 0;
- unsigned long flags = 0;
-
- /*
- * VL15 packets (IB_QPT_SMI) will always use PIO, so we
- * can defer SDMA restart until link goes ACTIVE without
- * worrying about just how we got there.
- */
- if ((qp->ibqp.qp_type == IB_QPT_SMI) ||
- !(dd->flags & HFI1_HAS_SEND_DMA))
- pio = 1;
- ret = egress_pkey_check(dd->pport, &ahdr->ibh, qp);
+ sr = get_send_routine(qp, ps->s_txreq);
+ ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp);
if (unlikely(ret)) {
/*
* The value we are returning here does not get propagated to
@@ -1336,7 +1241,9 @@ int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps)
* mechanism for handling the errors. So for SDMA we can just
* return.
*/
- if (pio) {
+ if (sr == dd->process_pio_send) {
+ unsigned long flags;
+
hfi1_cdbg(PIO, "%s() Failed. Completing with err",
__func__);
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1345,71 +1252,57 @@ int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps)
}
return -EINVAL;
}
-
- if (pio) {
- ret = dd->process_pio_send(qp, ps, 0);
- } else {
-#ifdef CONFIG_SDMA_VERBOSITY
- dd_dev_err(dd, "CONFIG SDMA %s:%d %s()\n",
- slashstrip(__FILE__), __LINE__, __func__);
- dd_dev_err(dd, "SDMA hdrwords = %u, len = %u\n", qp->s_hdrwords,
- qp->s_cur_size);
-#endif
- ret = dd->process_dma_send(qp, ps, 0);
- }
-
- return ret;
+ if (sr == dd->process_dma_send && iowait_pio_pending(&priv->s_iowait))
+ return pio_wait(qp,
+ ps->s_txreq->psc,
+ ps,
+ RVT_S_WAIT_PIO_DRAIN);
+ return sr(qp, ps, 0);
}
-static int query_device(struct ib_device *ibdev,
- struct ib_device_attr *props,
- struct ib_udata *uhw)
+/**
+ * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
+ * @dd: the device data structure
+ */
+static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibdev *dev = to_idev(ibdev);
-
- if (uhw->inlen || uhw->outlen)
- return -EINVAL;
- memset(props, 0, sizeof(*props));
-
- props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
-
- props->page_size_cap = PAGE_SIZE;
- props->vendor_id =
- dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
- props->vendor_part_id = dd->pcidev->device;
- props->hw_ver = dd->minrev;
- props->sys_image_guid = ib_hfi1_sys_image_guid;
- props->max_mr_size = ~0ULL;
- props->max_qp = hfi1_max_qps;
- props->max_qp_wr = hfi1_max_qp_wrs;
- props->max_sge = hfi1_max_sges;
- props->max_sge_rd = hfi1_max_sges;
- props->max_cq = hfi1_max_cqs;
- props->max_ah = hfi1_max_ahs;
- props->max_cqe = hfi1_max_cqes;
- props->max_mr = dev->lk_table.max;
- props->max_fmr = dev->lk_table.max;
- props->max_map_per_fmr = 32767;
- props->max_pd = hfi1_max_pds;
- props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
- props->max_qp_init_rd_atom = 255;
- /* props->max_res_rd_atom */
- props->max_srq = hfi1_max_srqs;
- props->max_srq_wr = hfi1_max_srq_wrs;
- props->max_srq_sge = hfi1_max_srq_sges;
- /* props->local_ca_ack_delay */
- props->atomic_cap = IB_ATOMIC_GLOB;
- props->max_pkeys = hfi1_get_npkeys(dd);
- props->max_mcast_grp = hfi1_max_mcast_grps;
- props->max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
- props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
- props->max_mcast_grp;
-
- return 0;
+ struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
+
+ memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
+
+ rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
+ IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
+ rdi->dparms.props.page_size_cap = PAGE_SIZE;
+ rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
+ rdi->dparms.props.vendor_part_id = dd->pcidev->device;
+ rdi->dparms.props.hw_ver = dd->minrev;
+ rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
+ rdi->dparms.props.max_mr_size = ~0ULL;
+ rdi->dparms.props.max_qp = hfi1_max_qps;
+ rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
+ rdi->dparms.props.max_sge = hfi1_max_sges;
+ rdi->dparms.props.max_sge_rd = hfi1_max_sges;
+ rdi->dparms.props.max_cq = hfi1_max_cqs;
+ rdi->dparms.props.max_ah = hfi1_max_ahs;
+ rdi->dparms.props.max_cqe = hfi1_max_cqes;
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
+ rdi->dparms.props.max_map_per_fmr = 32767;
+ rdi->dparms.props.max_pd = hfi1_max_pds;
+ rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
+ rdi->dparms.props.max_qp_init_rd_atom = 255;
+ rdi->dparms.props.max_srq = hfi1_max_srqs;
+ rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
+ rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
+ rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
+ rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
+ rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
+ rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
+ rdi->dparms.props.max_total_mcast_qp_attach =
+ rdi->dparms.props.max_mcast_qp_attach *
+ rdi->dparms.props.max_mcast_grp;
}
static inline u16 opa_speed_to_ib(u16 in)
@@ -1443,33 +1336,24 @@ static inline u16 opa_width_to_ib(u16 in)
}
}
-static int query_port(struct ib_device *ibdev, u8 port,
+static int query_port(struct rvt_dev_info *rdi, u8 port_num,
struct ib_port_attr *props)
{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
u16 lid = ppd->lid;
- memset(props, 0, sizeof(*props));
props->lid = lid ? lid : 0;
props->lmc = ppd->lmc;
- props->sm_lid = ibp->sm_lid;
- props->sm_sl = ibp->sm_sl;
/* OPA logical states match IB logical states */
props->state = driver_lstate(ppd);
props->phys_state = hfi1_ibphys_portstate(ppd);
- props->port_cap_flags = ibp->port_cap_flags;
props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
- props->max_msg_sz = 0x80000000;
- props->pkey_tbl_len = hfi1_get_npkeys(dd);
- props->bad_pkey_cntr = ibp->pkey_violations;
- props->qkey_viol_cntr = ibp->qkey_violations;
props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
/* see rate_show() in ib core/sysfs.c */
props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
props->max_vl_num = ppd->vls_supported;
- props->init_type_reply = 0;
/* Once we are a "first class" citizen and have added the OPA MTUs to
* the core we can advertise the larger MTU enum to the ULPs, for now
@@ -1483,27 +1367,6 @@ static int query_port(struct ib_device *ibdev, u8 port,
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
- props->subnet_timeout = ibp->subnet_timeout;
-
- return 0;
-}
-
-static int port_immutable(struct ib_device *ibdev, u8 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- err = query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- memset(immutable, 0, sizeof(*immutable));
-
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
- immutable->max_mad_size = OPA_MGMT_MAD_SIZE;
return 0;
}
@@ -1547,102 +1410,31 @@ bail:
return ret;
}
-static int modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
-{
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- int ret = 0;
-
- ibp->port_cap_flags |= props->set_port_cap_mask;
- ibp->port_cap_flags &= ~props->clr_port_cap_mask;
- if (props->set_port_cap_mask || props->clr_port_cap_mask)
- hfi1_cap_mask_chg(ibp);
- if (port_modify_mask & IB_PORT_SHUTDOWN) {
- set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
- OPA_LINKDOWN_REASON_UNKNOWN);
- ret = set_link_state(ppd, HLS_DN_DOWNDEF);
- }
- if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
- ibp->qkey_violations = 0;
- return ret;
-}
-
-static int query_gid(struct ib_device *ibdev, u8 port,
- int index, union ib_gid *gid)
+static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- int ret = 0;
-
- if (!port || port > dd->num_pports)
- ret = -EINVAL;
- else {
- struct hfi1_ibport *ibp = to_iport(ibdev, port);
- struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
- gid->global.subnet_prefix = ibp->gid_prefix;
- if (index == 0)
- gid->global.interface_id = cpu_to_be64(ppd->guid);
- else if (index < HFI1_GUIDS_PER_PORT)
- gid->global.interface_id = ibp->guids[index - 1];
- else
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static struct ib_pd *alloc_pd(struct ib_device *ibdev,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-{
- struct hfi1_ibdev *dev = to_idev(ibdev);
- struct hfi1_pd *pd;
- struct ib_pd *ret;
-
- /*
- * This is actually totally arbitrary. Some correctness tests
- * assume there's a maximum number of PDs that can be allocated.
- * We don't actually have this limit, but we fail the test if
- * we allow allocations of more than we report for this value.
- */
-
- pd = kmalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock(&dev->n_pds_lock);
- if (dev->n_pds_allocated == hfi1_max_pds) {
- spin_unlock(&dev->n_pds_lock);
- kfree(pd);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_pds_allocated++;
- spin_unlock(&dev->n_pds_lock);
-
- /* ib_alloc_pd() will initialize pd->ibpd. */
- pd->user = udata != NULL;
-
- ret = &pd->ibpd;
+ struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
+ struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
+ struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
+ int ret;
-bail:
+ set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
+ OPA_LINKDOWN_REASON_UNKNOWN);
+ ret = set_link_state(ppd, HLS_DN_DOWNDEF);
return ret;
}
-static int dealloc_pd(struct ib_pd *ibpd)
+static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
+ int guid_index, __be64 *guid)
{
- struct hfi1_pd *pd = to_ipd(ibpd);
- struct hfi1_ibdev *dev = to_idev(ibpd->device);
-
- spin_lock(&dev->n_pds_lock);
- dev->n_pds_allocated--;
- spin_unlock(&dev->n_pds_lock);
+ struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- kfree(pd);
+ if (guid_index == 0)
+ *guid = cpu_to_be64(ppd->guid);
+ else if (guid_index < HFI1_GUIDS_PER_PORT)
+ *guid = ibp->guids[guid_index - 1];
+ else
+ return -EINVAL;
return 0;
}
@@ -1657,101 +1449,57 @@ u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
return ibp->sl_to_sc[ah->sl];
}
-int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
+static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
{
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
- /* A multicast address requires a GRH (see ch. 8.4.1). */
- if (ah_attr->dlid >= HFI1_MULTICAST_LID_BASE &&
- ah_attr->dlid != HFI1_PERMISSIVE_LID &&
- !(ah_attr->ah_flags & IB_AH_GRH))
- goto bail;
- if ((ah_attr->ah_flags & IB_AH_GRH) &&
- ah_attr->grh.sgid_index >= HFI1_GUIDS_PER_PORT)
- goto bail;
- if (ah_attr->dlid == 0)
- goto bail;
- if (ah_attr->port_num < 1 ||
- ah_attr->port_num > ibdev->phys_port_cnt)
- goto bail;
- if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
- ib_rate_to_mbps(ah_attr->static_rate) < 0)
- goto bail;
- if (ah_attr->sl >= OPA_MAX_SLS)
- goto bail;
/* test the mapping for validity */
ibp = to_iport(ibdev, ah_attr->port_num);
ppd = ppd_from_ibp(ibp);
sc5 = ibp->sl_to_sc[ah_attr->sl];
dd = dd_from_ppd(ppd);
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
- goto bail;
+ return -EINVAL;
return 0;
-bail:
- return -EINVAL;
}
-/**
- * create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *create_ah(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr)
+static void hfi1_notify_new_ah(struct ib_device *ibdev,
+ struct ib_ah_attr *ah_attr,
+ struct rvt_ah *ah)
{
- struct hfi1_ah *ah;
- struct ib_ah *ret;
- struct hfi1_ibdev *dev = to_idev(pd->device);
- unsigned long flags;
-
- if (hfi1_check_ah(pd->device, ah_attr)) {
- ret = ERR_PTR(-EINVAL);
- goto bail;
- }
-
- ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- if (dev->n_ahs_allocated == hfi1_max_ahs) {
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
- kfree(ah);
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- dev->n_ahs_allocated++;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- /* ib_create_ah() will initialize ah->ibah. */
- ah->attr = *ah_attr;
- atomic_set(&ah->refcount, 0);
+ struct hfi1_ibport *ibp;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
+ u8 sc5;
- ret = &ah->ibah;
+ /*
+ * Do not trust reading anything from rvt_ah at this point as it is not
+ * done being setup. We can however modify things which we need to set.
+ */
-bail:
- return ret;
+ ibp = to_iport(ibdev, ah_attr->port_num);
+ ppd = ppd_from_ibp(ibp);
+ sc5 = ibp->sl_to_sc[ah->attr.sl];
+ dd = dd_from_ppd(ppd);
+ ah->vl = sc_to_vlt(dd, sc5);
+ if (ah->vl < num_vls || ah->vl == 15)
+ ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
}
struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
{
struct ib_ah_attr attr;
struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct hfi1_qp *qp0;
+ struct rvt_qp *qp0;
memset(&attr, 0, sizeof(attr));
attr.dlid = dlid;
attr.port_num = ppd_from_ibp(ibp)->port;
rcu_read_lock();
- qp0 = rcu_dereference(ibp->qp[0]);
+ qp0 = rcu_dereference(ibp->rvp.qp[0]);
if (qp0)
ah = ib_create_ah(qp0->ibqp.pd, &attr);
rcu_read_unlock();
@@ -1759,51 +1507,6 @@ struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
}
/**
- * destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int destroy_ah(struct ib_ah *ibah)
-{
- struct hfi1_ibdev *dev = to_idev(ibah->device);
- struct hfi1_ah *ah = to_iah(ibah);
- unsigned long flags;
-
- if (atomic_read(&ah->refcount) != 0)
- return -EBUSY;
-
- spin_lock_irqsave(&dev->n_ahs_lock, flags);
- dev->n_ahs_allocated--;
- spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
- kfree(ah);
-
- return 0;
-}
-
-static int modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct hfi1_ah *ah = to_iah(ibah);
-
- if (hfi1_check_ah(ibah->device, ah_attr))
- return -EINVAL;
-
- ah->attr = *ah_attr;
-
- return 0;
-}
-
-static int query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
- struct hfi1_ah *ah = to_iah(ibah);
-
- *ah_attr = ah->attr;
-
- return 0;
-}
-
-/**
* hfi1_get_npkeys - return the size of the PKEY table for context 0
* @dd: the hfi1_ib device
*/
@@ -1812,54 +1515,6 @@ unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
return ARRAY_SIZE(dd->pport[0].pkeys);
}
-static int query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- int ret;
-
- if (index >= hfi1_get_npkeys(dd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- *pkey = hfi1_get_pkey(to_iport(ibdev, port), index);
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the driver
- */
-
-static struct ib_ucontext *alloc_ucontext(struct ib_device *ibdev,
- struct ib_udata *udata)
-{
- struct hfi1_ucontext *context;
- struct ib_ucontext *ret;
-
- context = kmalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
- ret = &context->ibucontext;
-
-bail:
- return ret;
-}
-
-static int dealloc_ucontext(struct ib_ucontext *context)
-{
- kfree(to_iucontext(context));
- return 0;
-}
-
static void init_ibport(struct hfi1_pportdata *ppd)
{
struct hfi1_ibport *ibp = &ppd->ibport_data;
@@ -1871,28 +1526,21 @@ static void init_ibport(struct hfi1_pportdata *ppd)
ibp->sc_to_sl[i] = i;
}
- spin_lock_init(&ibp->lock);
+ spin_lock_init(&ibp->rvp.lock);
/* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->sm_lid = 0;
+ ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
+ ibp->rvp.sm_lid = 0;
/* Below should only set bits defined in OPA PortInfo.CapabilityMask */
- ibp->port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
+ ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
IB_PORT_CAP_MASK_NOTICE_SUP;
- ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- RCU_INIT_POINTER(ibp->qp[0], NULL);
- RCU_INIT_POINTER(ibp->qp[1], NULL);
-}
-
-static void verbs_txreq_kmem_cache_ctor(void *obj)
-{
- struct verbs_txreq *tx = obj;
-
- memset(tx, 0, sizeof(*tx));
+ ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
+ ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
+ ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
+ ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
+ ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
+
+ RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
+ RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
}
/**
@@ -1903,76 +1551,26 @@ static void verbs_txreq_kmem_cache_ctor(void *obj)
int hfi1_register_ib_device(struct hfi1_devdata *dd)
{
struct hfi1_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
+ struct ib_device *ibdev = &dev->rdi.ibdev;
struct hfi1_pportdata *ppd = dd->pport;
- unsigned i, lk_tab_size;
+ unsigned i;
int ret;
size_t lcpysz = IB_DEVICE_NAME_MAX;
- u16 descq_cnt;
- char buf[TXREQ_NAME_LEN];
-
- ret = hfi1_qp_init(dev);
- if (ret)
- goto err_qp_init;
-
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */
- spin_lock_init(&dev->n_pds_lock);
- spin_lock_init(&dev->n_ahs_lock);
- spin_lock_init(&dev->n_cqs_lock);
- spin_lock_init(&dev->n_qps_lock);
- spin_lock_init(&dev->n_srqs_lock);
- spin_lock_init(&dev->n_mcast_grps_lock);
- init_timer(&dev->mem_timer);
- dev->mem_timer.function = mem_timer;
- dev->mem_timer.data = (unsigned long) dev;
- /*
- * The top hfi1_lkey_table_size bits are used to index the
- * table. The lower 8 bits can be owned by the user (copied from
- * the LKEY). The remaining bits act as a generation number or tag.
- */
- spin_lock_init(&dev->lk_table.lock);
- dev->lk_table.max = 1 << hfi1_lkey_table_size;
- /* ensure generation is at least 4 bits (keys.c) */
- if (hfi1_lkey_table_size > MAX_LKEY_TABLE_BITS) {
- dd_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
- hfi1_lkey_table_size, MAX_LKEY_TABLE_BITS);
- hfi1_lkey_table_size = MAX_LKEY_TABLE_BITS;
- }
- lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct hfi1_mregion __rcu **)
- vmalloc(lk_tab_size);
- if (dev->lk_table.table == NULL) {
- ret = -ENOMEM;
- goto err_lk;
- }
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- for (i = 0; i < dev->lk_table.max; i++)
- RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
- INIT_LIST_HEAD(&dev->pending_mmaps);
- spin_lock_init(&dev->pending_lock);
+ setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
+
seqlock_init(&dev->iowait_lock);
- dev->mmap_offset = PAGE_SIZE;
- spin_lock_init(&dev->mmap_offset_lock);
INIT_LIST_HEAD(&dev->txwait);
INIT_LIST_HEAD(&dev->memwait);
- descq_cnt = sdma_get_descq_cnt();
-
- snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
- /* SLAB_HWCACHE_ALIGN for AHG */
- dev->verbs_txreq_cache = kmem_cache_create(buf,
- sizeof(struct verbs_txreq),
- 0, SLAB_HWCACHE_ALIGN,
- verbs_txreq_kmem_cache_ctor);
- if (!dev->verbs_txreq_cache) {
- ret = -ENOMEM;
+ ret = verbs_txreq_init(dev);
+ if (ret)
goto err_verbs_txreq;
- }
/*
* The system image GUID is supposed to be the same for all
@@ -1985,142 +1583,119 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = cpu_to_be64(ppd->guid);
- ibdev->uverbs_abi_ver = HFI1_UVERBS_ABI_VERSION;
- ibdev->uverbs_cmd_mask =
- (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
- (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
- (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
- (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
- (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
- (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
- (1ull << IB_USER_VERBS_CMD_REG_MR) |
- (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
- (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
- (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
- (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
- (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
- (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
- (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
- (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
- (1ull << IB_USER_VERBS_CMD_POST_SEND) |
- (1ull << IB_USER_VERBS_CMD_POST_RECV) |
- (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
- ibdev->node_type = RDMA_NODE_IB_CA;
ibdev->phys_port_cnt = dd->num_pports;
- ibdev->num_comp_vectors = 1;
ibdev->dma_device = &dd->pcidev->dev;
- ibdev->query_device = query_device;
ibdev->modify_device = modify_device;
- ibdev->query_port = query_port;
- ibdev->modify_port = modify_port;
- ibdev->query_pkey = query_pkey;
- ibdev->query_gid = query_gid;
- ibdev->alloc_ucontext = alloc_ucontext;
- ibdev->dealloc_ucontext = dealloc_ucontext;
- ibdev->alloc_pd = alloc_pd;
- ibdev->dealloc_pd = dealloc_pd;
- ibdev->create_ah = create_ah;
- ibdev->destroy_ah = destroy_ah;
- ibdev->modify_ah = modify_ah;
- ibdev->query_ah = query_ah;
- ibdev->create_srq = hfi1_create_srq;
- ibdev->modify_srq = hfi1_modify_srq;
- ibdev->query_srq = hfi1_query_srq;
- ibdev->destroy_srq = hfi1_destroy_srq;
- ibdev->create_qp = hfi1_create_qp;
- ibdev->modify_qp = hfi1_modify_qp;
- ibdev->query_qp = hfi1_query_qp;
- ibdev->destroy_qp = hfi1_destroy_qp;
- ibdev->post_send = post_send;
- ibdev->post_recv = post_receive;
- ibdev->post_srq_recv = hfi1_post_srq_receive;
- ibdev->create_cq = hfi1_create_cq;
- ibdev->destroy_cq = hfi1_destroy_cq;
- ibdev->resize_cq = hfi1_resize_cq;
- ibdev->poll_cq = hfi1_poll_cq;
- ibdev->req_notify_cq = hfi1_req_notify_cq;
- ibdev->get_dma_mr = hfi1_get_dma_mr;
- ibdev->reg_user_mr = hfi1_reg_user_mr;
- ibdev->dereg_mr = hfi1_dereg_mr;
- ibdev->alloc_mr = hfi1_alloc_mr;
- ibdev->alloc_fmr = hfi1_alloc_fmr;
- ibdev->map_phys_fmr = hfi1_map_phys_fmr;
- ibdev->unmap_fmr = hfi1_unmap_fmr;
- ibdev->dealloc_fmr = hfi1_dealloc_fmr;
- ibdev->attach_mcast = hfi1_multicast_attach;
- ibdev->detach_mcast = hfi1_multicast_detach;
+
+ /* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
- ibdev->mmap = hfi1_mmap;
- ibdev->dma_ops = &hfi1_dma_mapping_ops;
- ibdev->get_port_immutable = port_immutable;
strncpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
- ret = ib_register_device(ibdev, hfi1_create_port_files);
- if (ret)
- goto err_reg;
-
- ret = hfi1_create_agents(dev);
+ /*
+ * Fill in rvt info object.
+ */
+ dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
+ dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
+ dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
+ dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
+ dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
+ dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
+ dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
+ dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
+ dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
+ /*
+ * Fill in rvt info device attributes.
+ */
+ hfi1_fill_device_attr(dd);
+
+ /* queue pair */
+ dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
+ dd->verbs_dev.rdi.dparms.qpn_start = 0;
+ dd->verbs_dev.rdi.dparms.qpn_inc = 1;
+ dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
+ dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
+ dd->verbs_dev.rdi.dparms.qpn_res_end =
+ dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
+ dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
+ dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
+ dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
+ dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
+ dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
+ dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
+
+ dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
+ dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
+ dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
+ dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
+ dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
+ dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
+ dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
+ dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
+ dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
+ dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
+ dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
+ dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
+ dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+
+ /* completeion queue */
+ snprintf(dd->verbs_dev.rdi.dparms.cq_name,
+ sizeof(dd->verbs_dev.rdi.dparms.cq_name),
+ "hfi1_cq%d", dd->unit);
+ dd->verbs_dev.rdi.dparms.node = dd->node;
+
+ /* misc settings */
+ dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
+ dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
+ dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
+ dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+
+ ppd = dd->pport;
+ for (i = 0; i < dd->num_pports; i++, ppd++)
+ rvt_init_port(&dd->verbs_dev.rdi,
+ &ppd->ibport_data.rvp,
+ i,
+ ppd->pkeys);
+
+ ret = rvt_register_device(&dd->verbs_dev.rdi);
if (ret)
- goto err_agents;
+ goto err_verbs_txreq;
ret = hfi1_verbs_register_sysfs(dd);
if (ret)
goto err_class;
- goto bail;
+ return ret;
err_class:
- hfi1_free_agents(dev);
-err_agents:
- ib_unregister_device(ibdev);
-err_reg:
+ rvt_unregister_device(&dd->verbs_dev.rdi);
err_verbs_txreq:
- kmem_cache_destroy(dev->verbs_txreq_cache);
- vfree(dev->lk_table.table);
-err_lk:
- hfi1_qp_exit(dev);
-err_qp_init:
+ verbs_txreq_exit(dev);
dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-bail:
return ret;
}
void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
{
struct hfi1_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->ibdev;
hfi1_verbs_unregister_sysfs(dd);
- hfi1_free_agents(dev);
-
- ib_unregister_device(ibdev);
+ rvt_unregister_device(&dd->verbs_dev.rdi);
if (!list_empty(&dev->txwait))
dd_dev_err(dd, "txwait list not empty!\n");
if (!list_empty(&dev->memwait))
dd_dev_err(dd, "memwait list not empty!\n");
- if (dev->dma_mr)
- dd_dev_err(dd, "DMA MR not NULL!\n");
- hfi1_qp_exit(dev);
del_timer_sync(&dev->mem_timer);
- kmem_cache_destroy(dev->verbs_txreq_cache);
- vfree(dev->lk_table.table);
+ verbs_txreq_exit(dev);
}
void hfi1_cnp_rcv(struct hfi1_packet *packet)
@@ -2128,7 +1703,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_ib_header *hdr = packet->hdr;
- struct hfi1_qp *qp = packet->qp;
+ struct rvt_qp *qp = packet->qp;
u32 lqpn, rqpn = 0;
u16 rlid = 0;
u8 sl, sc5, sc4_bit, svc_type;
@@ -2151,7 +1726,7 @@ void hfi1_cnp_rcv(struct hfi1_packet *packet)
svc_type = IB_CC_SVCTYPE_UD;
break;
default:
- ibp->n_pkt_drops++;
+ ibp->rvp.n_pkt_drops++;
return;
}
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
index 286e468b0479..6c4670fffdbb 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -1,12 +1,11 @@
/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -18,8 +17,6 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -59,9 +56,13 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/completion.h>
+#include <linux/slab.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_mad.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_qp.h>
+#include <rdma/rdmavt_cq.h>
struct hfi1_ctxtdata;
struct hfi1_pportdata;
@@ -79,12 +80,6 @@ struct hfi1_packet;
*/
#define HFI1_UVERBS_ABI_VERSION 2
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
-
#define IB_SEQ_NAK (3 << 29)
/* AETH NAK opcode values */
@@ -95,17 +90,6 @@ struct hfi1_packet;
#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
#define IB_NAK_INVALID_RD_REQUEST 0x64
-/* Flags for checking QP state (see ib_hfi1_state_ops[]) */
-#define HFI1_POST_SEND_OK 0x01
-#define HFI1_POST_RECV_OK 0x02
-#define HFI1_PROCESS_RECV_OK 0x04
-#define HFI1_PROCESS_SEND_OK 0x08
-#define HFI1_PROCESS_NEXT_SEND_OK 0x10
-#define HFI1_FLUSH_SEND 0x20
-#define HFI1_FLUSH_RECV 0x40
-#define HFI1_PROCESS_OR_FLUSH_SEND \
- (HFI1_PROCESS_SEND_OK | HFI1_FLUSH_SEND)
-
/* IB Performance Manager status values */
#define IB_PMA_SAMPLE_STATUS_DONE 0x00
#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
@@ -208,341 +192,18 @@ struct hfi1_pio_header {
} __packed;
/*
- * used for force cacheline alignment for AHG
- */
-struct tx_pio_header {
- struct hfi1_pio_header phdr;
-} ____cacheline_aligned;
-
-/*
- * There is one struct hfi1_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct hfi1_mcast_qp.
- */
-struct hfi1_mcast_qp {
- struct list_head list;
- struct hfi1_qp *qp;
-};
-
-struct hfi1_mcast {
- struct rb_node rb_node;
- union ib_gid mgid;
- struct list_head qp_list;
- wait_queue_head_t wait;
- atomic_t refcount;
- int n_attached;
-};
-
-/* Protection domain */
-struct hfi1_pd {
- struct ib_pd ibpd;
- int user; /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct hfi1_ah {
- struct ib_ah ibah;
- struct ib_ah_attr attr;
- atomic_t refcount;
-};
-
-/*
- * This structure is used by hfi1_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct hfi1_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct hfi1_cq_wc {
- u32 head; /* index of next entry to fill */
- u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
-};
-
-/*
- * The completion queue structure.
- */
-struct hfi1_cq {
- struct ib_cq ibcq;
- struct kthread_work comptask;
- struct hfi1_devdata *dd;
- spinlock_t lock; /* protect changes in this struct */
- u8 notify;
- u8 triggered;
- struct hfi1_cq_wc *queue;
- struct hfi1_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * Used by the verbs layer.
- */
-struct hfi1_seg {
- void *vaddr;
- size_t length;
-};
-
-/* The number of hfi1_segs that fit in a page. */
-#define HFI1_SEGSZ (PAGE_SIZE / sizeof(struct hfi1_seg))
-
-struct hfi1_segarray {
- struct hfi1_seg segs[HFI1_SEGSZ];
-};
-
-struct hfi1_mregion {
- struct ib_pd *pd; /* shares refcnt of ibmr.pd */
- u64 user_base; /* User's address for this region */
- u64 iova; /* IB start address of this region */
- size_t length;
- u32 lkey;
- u32 offset; /* offset (bytes) to start of region */
- int access_flags;
- u32 max_segs; /* number of hfi1_segs in all the arrays */
- u32 mapsz; /* size of the map array */
- u8 page_shift; /* 0 - non unform/non powerof2 sizes */
- u8 lkey_published; /* in global table */
- struct completion comp; /* complete when refcount goes to zero */
- atomic_t refcount;
- struct hfi1_segarray *map[0]; /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct hfi1_sge {
- struct hfi1_mregion *mr;
- void *vaddr; /* kernel virtual address of segment */
- u32 sge_length; /* length of the SGE */
- u32 length; /* remaining length of the segment */
- u16 m; /* current index: mr->map[m] */
- u16 n; /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct hfi1_mr {
- struct ib_mr ibmr;
- struct ib_umem *umem;
- struct hfi1_mregion mr; /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct hfi1_swqe {
- union {
- struct ib_send_wr wr; /* don't use wr.sg_list */
- struct ib_rdma_wr rdma_wr;
- struct ib_atomic_wr atomic_wr;
- struct ib_ud_wr ud_wr;
- };
- u32 psn; /* first packet sequence number */
- u32 lpsn; /* last packet sequence number */
- u32 ssn; /* send sequence number */
- u32 length; /* total length of data in sg_list */
- struct hfi1_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct hfi1_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
+ * hfi1 specific data structures that will be hidden from rvt after the queue
+ * pair is made common
*/
-struct hfi1_rwq {
- u32 head; /* new work requests posted to the head */
- u32 tail; /* receives pull requests from here. */
- struct hfi1_rwqe wq[0];
-};
-
-struct hfi1_rq {
- struct hfi1_rwq *wq;
- u32 size; /* size of RWQE array */
- u8 max_sge;
- /* protect changes in this struct */
- spinlock_t lock ____cacheline_aligned_in_smp;
-};
-
-struct hfi1_srq {
- struct ib_srq ibsrq;
- struct hfi1_rq rq;
- struct hfi1_mmap_info *ip;
- /* send signal when number of RWQEs < limit */
- u32 limit;
-};
-
-struct hfi1_sge_state {
- struct hfi1_sge *sg_list; /* next SGE to be used if any */
- struct hfi1_sge sge; /* progress state for the current SGE */
- u32 total_len;
- u8 num_sge;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct hfi1_ack_entry {
- u8 opcode;
- u8 sent;
- u32 psn;
- u32 lpsn;
- union {
- struct hfi1_sge rdma_sge;
- u64 atomic_data;
- };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct hfi1_qp {
- struct ib_qp ibqp;
- /* read mostly fields above and below */
- struct ib_ah_attr remote_ah_attr;
- struct ib_ah_attr alt_ah_attr;
- struct hfi1_qp __rcu *next; /* link list for QPN hash table */
- struct hfi1_swqe *s_wq; /* send work queue */
- struct hfi1_mmap_info *ip;
- struct ahg_ib_header *s_hdr; /* next packet header to send */
- /* sc for UC/RC QPs - based on ah for UD */
- u8 s_sc;
- unsigned long timeout_jiffies; /* computed from timeout */
-
- enum ib_mtu path_mtu;
- int srate_mbps; /* s_srate (below) converted to Mbit/s */
- u32 remote_qpn;
- u32 pmtu; /* decoded from path_mtu */
- u32 qkey; /* QKEY for this QP (for UD or RD) */
- u32 s_size; /* send work queue size */
- u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
- u32 s_ahgpsn; /* set to the psn in the copy of the header */
-
- u8 state; /* QP state */
- u8 allowed_ops; /* high order bits of allowed opcodes */
- u8 qp_access_flags;
- u8 alt_timeout; /* Alternate path timeout for this QP */
- u8 timeout; /* Timeout for this QP */
- u8 s_srate;
- u8 s_mig_state;
- u8 port_num;
- u8 s_pkey_index; /* PKEY index to use */
- u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
- u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
- u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
- u8 s_retry_cnt; /* number of times to retry */
- u8 s_rnr_retry_cnt;
- u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
- u8 s_max_sge; /* size of s_wq->sg_list */
- u8 s_draining;
-
- /* start of read/write fields */
- atomic_t refcount ____cacheline_aligned_in_smp;
- wait_queue_head_t wait;
-
-
- struct hfi1_ack_entry s_ack_queue[HFI1_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
- struct hfi1_sge_state s_rdma_read_sge;
-
- spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
- unsigned long r_aflags;
- u64 r_wr_id; /* ID for current receive WQE */
- u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
- u32 r_len; /* total length of r_sge */
- u32 r_rcv_len; /* receive data len processed */
- u32 r_psn; /* expected rcv packet sequence number */
- u32 r_msn; /* message sequence number */
-
- u8 r_adefered; /* number of acks defered */
- u8 r_state; /* opcode of last packet received */
- u8 r_flags;
- u8 r_head_ack_queue; /* index into s_ack_queue[] */
-
- struct list_head rspwait; /* link for waiting to respond */
-
- struct hfi1_sge_state r_sge; /* current receive data */
- struct hfi1_rq r_rq; /* receive work queue */
-
- spinlock_t s_lock ____cacheline_aligned_in_smp;
- struct hfi1_sge_state *s_cur_sge;
- u32 s_flags;
- struct hfi1_swqe *s_wqe;
- struct hfi1_sge_state s_sge; /* current send request data */
- struct hfi1_mregion *s_rdma_mr;
- struct sdma_engine *s_sde; /* current sde */
- u32 s_cur_size; /* size of send packet in bytes */
- u32 s_len; /* total length of s_sge */
- u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
- u32 s_next_psn; /* PSN for next request */
- u32 s_last_psn; /* last response PSN processed */
- u32 s_sending_psn; /* lowest PSN that is being sent */
- u32 s_sending_hpsn; /* highest PSN that is being sent */
- u32 s_psn; /* current packet sequence number */
- u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
- u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
- u32 s_head; /* new entries added here */
- u32 s_tail; /* next entry to process */
- u32 s_cur; /* current work queue entry */
- u32 s_acked; /* last un-ACK'ed entry */
- u32 s_last; /* last completed entry */
- u32 s_ssn; /* SSN of tail entry */
- u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
- u16 s_rdma_ack_cnt;
- s8 s_ahgidx;
- u8 s_state; /* opcode of last packet sent */
- u8 s_ack_state; /* opcode of packet to ACK */
- u8 s_nak_state; /* non-zero if NAK is pending */
- u8 r_nak_state; /* non-zero if NAK is pending */
- u8 s_retry; /* requester retry counter */
- u8 s_rnr_retry; /* requester RNR retry counter */
- u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
- u8 s_tail_ack_queue; /* index into s_ack_queue[] */
-
- struct hfi1_sge_state s_ack_rdma_sge;
- struct timer_list s_timer;
-
+struct hfi1_qp_priv {
+ struct ahg_ib_header *s_hdr; /* next header to send */
+ struct sdma_engine *s_sde; /* current sde */
+ struct send_context *s_sendcontext; /* current sendcontext */
+ u8 s_sc; /* SC[0..4] for next packet */
+ u8 r_adefered; /* number of acks defered */
struct iowait s_iowait;
-
- struct hfi1_sge r_sg_list[0] /* verified SGEs */
- ____cacheline_aligned_in_smp;
+ struct timer_list s_rnr_timer;
+ struct rvt_qp *owner;
};
/*
@@ -553,123 +214,11 @@ struct hfi1_pkt_state {
struct hfi1_ibdev *dev;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
+ struct verbs_txreq *s_txreq;
};
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define HFI1_R_WRID_VALID 0
-#define HFI1_R_REWIND_SGE 1
-
-/*
- * Bit definitions for r_flags.
- */
-#define HFI1_R_REUSE_SGE 0x01
-#define HFI1_R_RDMAR_SEQ 0x02
-/* defer ack until end of interrupt session */
-#define HFI1_R_RSP_DEFERED_ACK 0x04
-/* relay ack to send engine */
-#define HFI1_R_RSP_SEND 0x08
-#define HFI1_R_COMM_EST 0x10
-
-/*
- * Bit definitions for s_flags.
- *
- * HFI1_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
- * HFI1_S_BUSY - send tasklet is processing the QP
- * HFI1_S_TIMER - the RC retry timer is active
- * HFI1_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics
- * HFI1_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs
- * before processing the next SWQE
- * HFI1_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete
- * before processing the next SWQE
- * HFI1_S_WAIT_RNR - waiting for RNR timeout
- * HFI1_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * HFI1_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- * next send completion entry not via send DMA
- * HFI1_S_WAIT_PIO - waiting for a send buffer to be available
- * HFI1_S_WAIT_TX - waiting for a struct verbs_txreq to be available
- * HFI1_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
- * HFI1_S_WAIT_KMEM - waiting for kernel memory to be available
- * HFI1_S_WAIT_PSN - waiting for a packet to exit the send DMA queue
- * HFI1_S_WAIT_ACK - waiting for an ACK packet before sending more requests
- * HFI1_S_SEND_ONE - send one packet, request ACK, then wait for ACK
- * HFI1_S_ECN - a BECN was queued to the send engine
- */
-#define HFI1_S_SIGNAL_REQ_WR 0x0001
-#define HFI1_S_BUSY 0x0002
-#define HFI1_S_TIMER 0x0004
-#define HFI1_S_RESP_PENDING 0x0008
-#define HFI1_S_ACK_PENDING 0x0010
-#define HFI1_S_WAIT_FENCE 0x0020
-#define HFI1_S_WAIT_RDMAR 0x0040
-#define HFI1_S_WAIT_RNR 0x0080
-#define HFI1_S_WAIT_SSN_CREDIT 0x0100
-#define HFI1_S_WAIT_DMA 0x0200
-#define HFI1_S_WAIT_PIO 0x0400
-#define HFI1_S_WAIT_TX 0x0800
-#define HFI1_S_WAIT_DMA_DESC 0x1000
-#define HFI1_S_WAIT_KMEM 0x2000
-#define HFI1_S_WAIT_PSN 0x4000
-#define HFI1_S_WAIT_ACK 0x8000
-#define HFI1_S_SEND_ONE 0x10000
-#define HFI1_S_UNLIMITED_CREDIT 0x20000
-#define HFI1_S_AHG_VALID 0x40000
-#define HFI1_S_AHG_CLEAR 0x80000
-#define HFI1_S_ECN 0x100000
-
-/*
- * Wait flags that would prevent any packet type from being sent.
- */
-#define HFI1_S_ANY_WAIT_IO (HFI1_S_WAIT_PIO | HFI1_S_WAIT_TX | \
- HFI1_S_WAIT_DMA_DESC | HFI1_S_WAIT_KMEM)
-
-/*
- * Wait flags that would prevent send work requests from making progress.
- */
-#define HFI1_S_ANY_WAIT_SEND (HFI1_S_WAIT_FENCE | HFI1_S_WAIT_RDMAR | \
- HFI1_S_WAIT_RNR | HFI1_S_WAIT_SSN_CREDIT | HFI1_S_WAIT_DMA | \
- HFI1_S_WAIT_PSN | HFI1_S_WAIT_ACK)
-
-#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | HFI1_S_ANY_WAIT_SEND)
-
#define HFI1_PSN_CREDIT 16
-/*
- * Since struct hfi1_swqe is not a fixed size, we can't simply index into
- * struct hfi1_qp.s_wq. This function does the array index computation.
- */
-static inline struct hfi1_swqe *get_swqe_ptr(struct hfi1_qp *qp,
- unsigned n)
-{
- return (struct hfi1_swqe *)((char *)qp->s_wq +
- (sizeof(struct hfi1_swqe) +
- qp->s_max_sge *
- sizeof(struct hfi1_sge)) * n);
-}
-
-/*
- * Since struct hfi1_rwqe is not a fixed size, we can't simply index into
- * struct hfi1_rwq.wq. This function does the array index computation.
- */
-static inline struct hfi1_rwqe *get_rwqe_ptr(struct hfi1_rq *rq, unsigned n)
-{
- return (struct hfi1_rwqe *)
- ((char *) rq->wq->wq +
- (sizeof(struct hfi1_rwqe) +
- rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-#define MAX_LKEY_TABLE_BITS 23
-
-struct hfi1_lkey_table {
- spinlock_t lock; /* protect changes in this struct */
- u32 next; /* next unused index (speeds search) */
- u32 gen; /* generation count */
- u32 max; /* size of the table */
- struct hfi1_mregion __rcu **table;
-};
-
struct hfi1_opcode_stats {
u64 n_packets; /* number of packets */
u64 n_bytes; /* total number of bytes */
@@ -690,75 +239,20 @@ static inline void inc_opstats(
}
struct hfi1_ibport {
- struct hfi1_qp __rcu *qp[2];
- struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
- struct hfi1_ah *sm_ah;
- struct hfi1_ah *smi_ah;
- struct rb_root mcast_tree;
- spinlock_t lock; /* protect changes in this struct */
-
- /* non-zero when timer is set */
- unsigned long mkey_lease_timeout;
- unsigned long trap_timeout;
- __be64 gid_prefix; /* in network order */
- __be64 mkey;
+ struct rvt_qp __rcu *qp[2];
+ struct rvt_ibport rvp;
+
__be64 guids[HFI1_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- u64 tid; /* TID for traps */
- u64 n_rc_resends;
- u64 n_seq_naks;
- u64 n_rdma_seq;
- u64 n_rnr_naks;
- u64 n_other_naks;
- u64 n_loop_pkts;
- u64 n_pkt_drops;
- u64 n_vl15_dropped;
- u64 n_rc_timeouts;
- u64 n_dmawait;
- u64 n_unaligned;
- u64 n_rc_dupreq;
- u64 n_rc_seqnak;
-
- /* Hot-path per CPU counters to avoid cacheline trading to update */
- u64 z_rc_acks;
- u64 z_rc_qacks;
- u64 z_rc_delayed_comp;
- u64 __percpu *rc_acks;
- u64 __percpu *rc_qacks;
- u64 __percpu *rc_delayed_comp;
-
- u32 port_cap_flags;
- u32 pma_sample_start;
- u32 pma_sample_interval;
- __be16 pma_counter_select[5];
- u16 pma_tag;
- u16 pkey_violations;
- u16 qkey_violations;
- u16 mkey_violations;
- u16 mkey_lease_period;
- u16 sm_lid;
- u16 repress_traps;
- u8 sm_sl;
- u8 mkeyprot;
- u8 subnet_timeout;
- u8 vl_high_limit;
+
/* the first 16 entries are sl_to_vl for !OPA */
u8 sl_to_sc[32];
u8 sc_to_sl[32];
};
-
-struct hfi1_qp_ibdev;
struct hfi1_ibdev {
- struct ib_device ibdev;
- struct list_head pending_mmaps;
- spinlock_t mmap_offset_lock; /* protect mmap_offset */
- u32 mmap_offset;
- struct hfi1_mregion __rcu *dma_mr;
-
- struct hfi1_qp_ibdev *qp_dev;
+ struct rvt_dev_info rdi; /* Must be first */
/* QP numbers are shared by all IB ports */
- struct hfi1_lkey_table lk_table;
/* protect wait lists */
seqlock_t iowait_lock;
struct list_head txwait; /* list for wait verbs_txreq */
@@ -767,26 +261,11 @@ struct hfi1_ibdev {
struct kmem_cache *verbs_txreq_cache;
struct timer_list mem_timer;
- /* other waiters */
- spinlock_t pending_lock;
-
u64 n_piowait;
+ u64 n_piodrain;
u64 n_txwait;
u64 n_kmem_wait;
- u64 n_send_schedule;
-
- u32 n_pds_allocated; /* number of PDs allocated for device */
- spinlock_t n_pds_lock;
- u32 n_ahs_allocated; /* number of AHs allocated for device */
- spinlock_t n_ahs_lock;
- u32 n_cqs_allocated; /* number of CQs allocated for device */
- spinlock_t n_cqs_lock;
- u32 n_qps_allocated; /* number of QPs allocated for device */
- spinlock_t n_qps_lock;
- u32 n_srqs_allocated; /* number of SRQs allocated for device */
- spinlock_t n_srqs_lock;
- u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
- spinlock_t n_mcast_grps_lock;
+
#ifdef CONFIG_DEBUG_FS
/* per HFI debugfs */
struct dentry *hfi1_ibdev_dbg;
@@ -795,66 +274,31 @@ struct hfi1_ibdev {
#endif
};
-struct hfi1_verbs_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
- u32 vl15_dropped;
-};
-
-static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr)
-{
- return container_of(ibmr, struct hfi1_mr, ibmr);
-}
-
-static inline struct hfi1_pd *to_ipd(struct ib_pd *ibpd)
-{
- return container_of(ibpd, struct hfi1_pd, ibpd);
-}
-
-static inline struct hfi1_ah *to_iah(struct ib_ah *ibah)
-{
- return container_of(ibah, struct hfi1_ah, ibah);
-}
-
-static inline struct hfi1_cq *to_icq(struct ib_cq *ibcq)
+static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
{
- return container_of(ibcq, struct hfi1_cq, ibcq);
-}
+ struct rvt_dev_info *rdi;
-static inline struct hfi1_srq *to_isrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct hfi1_srq, ibsrq);
+ rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
+ return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct hfi1_qp *to_iqp(struct ib_qp *ibqp)
+static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
{
- return container_of(ibqp, struct hfi1_qp, ibqp);
-}
+ struct hfi1_qp_priv *priv;
-static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
-{
- return container_of(ibdev, struct hfi1_ibdev, ibdev);
+ priv = container_of(s_iowait, struct hfi1_qp_priv, s_iowait);
+ return priv->owner;
}
/*
* Send if not busy or waiting for I/O and either
* a RC response is pending or we can process send work requests.
*/
-static inline int hfi1_send_ok(struct hfi1_qp *qp)
+static inline int hfi1_send_ok(struct rvt_qp *qp)
{
- return !(qp->s_flags & (HFI1_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & HFI1_S_RESP_PENDING) ||
- !(qp->s_flags & HFI1_S_ANY_WAIT_SEND));
+ return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
+ (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
/*
@@ -862,7 +306,7 @@ static inline int hfi1_send_ok(struct hfi1_qp *qp)
*/
void hfi1_bad_pqkey(struct hfi1_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
u32 qp1, u32 qp2, u16 lid1, u16 lid2);
-void hfi1_cap_mask_chg(struct hfi1_ibport *ibp);
+void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num);
void hfi1_sys_guid_chg(struct hfi1_ibport *ibp);
void hfi1_node_desc_chg(struct hfi1_ibport *ibp);
int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
@@ -870,8 +314,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
const struct ib_mad_hdr *in_mad, size_t in_mad_size,
struct ib_mad_hdr *out_mad, size_t *out_mad_size,
u16 *out_mad_pkey_index);
-int hfi1_create_agents(struct hfi1_ibdev *dev);
-void hfi1_free_agents(struct hfi1_ibdev *dev);
/*
* The PSN_MASK and PSN_SHIFT allow for
@@ -901,7 +343,7 @@ void hfi1_free_agents(struct hfi1_ibdev *dev);
*/
static inline int cmp_msn(u32 a, u32 b)
{
- return (((int) a) - ((int) b)) << 8;
+ return (((int)a) - ((int)b)) << 8;
}
/*
@@ -910,7 +352,7 @@ static inline int cmp_msn(u32 a, u32 b)
*/
static inline int cmp_psn(u32 a, u32 b)
{
- return (((int) a) - ((int) b)) << PSN_SHIFT;
+ return (((int)a) - ((int)b)) << PSN_SHIFT;
}
/*
@@ -929,23 +371,15 @@ static inline u32 delta_psn(u32 a, u32 b)
return (((int)a - (int)b) << PSN_SHIFT) >> PSN_SHIFT;
}
-struct hfi1_mcast *hfi1_mcast_find(struct hfi1_ibport *ibp, union ib_gid *mgid);
-
-int hfi1_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int hfi1_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int hfi1_mcast_tree_empty(struct hfi1_ibport *ibp);
-
struct verbs_txreq;
void hfi1_put_txreq(struct verbs_txreq *tx);
-int hfi1_verbs_send(struct hfi1_qp *qp, struct hfi1_pkt_state *ps);
+int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-void hfi1_copy_sge(struct hfi1_sge_state *ss, void *data, u32 length,
- int release);
+void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
+ int release, int copy_last);
-void hfi1_skip_sge(struct hfi1_sge_state *ss, u32 length, int release);
+void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release);
void hfi1_cnp_rcv(struct hfi1_packet *packet);
@@ -957,147 +391,75 @@ void hfi1_rc_hdrerr(
struct hfi1_ctxtdata *rcd,
struct hfi1_ib_header *hdr,
u32 rcv_flags,
- struct hfi1_qp *qp);
+ struct rvt_qp *qp);
u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
-int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
-
struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid);
void hfi1_rc_rnr_retry(unsigned long arg);
+void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to);
+void hfi1_rc_timeout(unsigned long arg);
+void hfi1_del_timers_sync(struct rvt_qp *qp);
+void hfi1_stop_rc_timers(struct rvt_qp *qp);
-void hfi1_rc_send_complete(struct hfi1_qp *qp, struct hfi1_ib_header *hdr);
+void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr);
-void hfi1_rc_error(struct hfi1_qp *qp, enum ib_wc_status err);
+void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
void hfi1_ud_rcv(struct hfi1_packet *packet);
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
-int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
-
-void hfi1_free_lkey(struct hfi1_mregion *mr);
-
-int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd,
- struct hfi1_sge *isge, struct ib_sge *sge, int acc);
-
-int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc);
-
-int hfi1_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
- struct ib_recv_wr **bad_wr);
-
-struct ib_srq *hfi1_create_srq(struct ib_pd *ibpd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
-
-int hfi1_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
- enum ib_srq_attr_mask attr_mask,
- struct ib_udata *udata);
-
-int hfi1_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int hfi1_destroy_srq(struct ib_srq *ibsrq);
-
-int hfi1_cq_init(struct hfi1_devdata *dd);
-
-void hfi1_cq_exit(struct hfi1_devdata *dd);
-
-void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int sig);
-
-int hfi1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *hfi1_create_cq(
- struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-
-int hfi1_destroy_cq(struct ib_cq *ibcq);
-
-int hfi1_req_notify_cq(
- struct ib_cq *ibcq,
- enum ib_cq_notify_flags notify_flags);
-
-int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *hfi1_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata);
+int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only);
-int hfi1_dereg_mr(struct ib_mr *ibmr);
+void hfi1_migrate_qp(struct rvt_qp *qp);
-struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_entries);
+int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
-struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
+void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
-int hfi1_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
- int list_len, u64 iova);
+int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
-int hfi1_unmap_fmr(struct list_head *fmr_list);
+extern const u32 rc_only_opcode;
+extern const u32 uc_only_opcode;
-int hfi1_dealloc_fmr(struct ib_fmr *ibfmr);
-
-static inline void hfi1_get_mr(struct hfi1_mregion *mr)
-{
- atomic_inc(&mr->refcount);
-}
-
-static inline void hfi1_put_mr(struct hfi1_mregion *mr)
+static inline u8 get_opcode(struct hfi1_ib_header *h)
{
- if (unlikely(atomic_dec_and_test(&mr->refcount)))
- complete(&mr->comp);
-}
+ u16 lnh = be16_to_cpu(h->lrh[0]) & 3;
-static inline void hfi1_put_ss(struct hfi1_sge_state *ss)
-{
- while (ss->num_sge) {
- hfi1_put_mr(ss->sge.mr);
- if (--ss->num_sge)
- ss->sge = *ss->sg_list++;
- }
+ if (lnh == IB_LNH_IBA_LOCAL)
+ return be32_to_cpu(h->u.oth.bth[0]) >> 24;
+ else
+ return be32_to_cpu(h->u.l.oth.bth[0]) >> 24;
}
-void hfi1_release_mmap_info(struct kref *ref);
-
-struct hfi1_mmap_info *hfi1_create_mmap_info(struct hfi1_ibdev *dev, u32 size,
- struct ib_ucontext *context,
- void *obj);
-
-void hfi1_update_mmap_info(struct hfi1_ibdev *dev, struct hfi1_mmap_info *ip,
- u32 size, void *obj);
-
-int hfi1_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int hfi1_get_rwqe(struct hfi1_qp *qp, int wr_id_only);
-
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_ib_header *hdr,
- int has_grh, struct hfi1_qp *qp, u32 bth0);
+ int has_grh, struct rvt_qp *qp, u32 bth0);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
struct ib_global_route *grh, u32 hwords, u32 nwords);
-void hfi1_make_ruc_header(struct hfi1_qp *qp, struct hfi1_other_headers *ohdr,
- u32 bth0, u32 bth2, int middle);
+void hfi1_make_ruc_header(struct rvt_qp *qp, struct hfi1_other_headers *ohdr,
+ u32 bth0, u32 bth2, int middle,
+ struct hfi1_pkt_state *ps);
-void hfi1_do_send(struct work_struct *work);
+void _hfi1_do_send(struct work_struct *work);
-void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
+void hfi1_do_send(struct rvt_qp *qp);
+
+void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status);
-void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct hfi1_qp *qp, int is_fecn);
+void hfi1_send_rc_ack(struct hfi1_ctxtdata *, struct rvt_qp *qp, int is_fecn);
-int hfi1_make_rc_req(struct hfi1_qp *qp);
+int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-int hfi1_make_uc_req(struct hfi1_qp *qp);
+int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-int hfi1_make_ud_req(struct hfi1_qp *qp);
+int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
int hfi1_register_ib_device(struct hfi1_devdata *);
@@ -1107,24 +469,42 @@ void hfi1_ib_rcv(struct hfi1_packet *packet);
unsigned hfi1_get_npkeys(struct hfi1_devdata *);
-int hfi1_verbs_send_dma(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-int hfi1_verbs_send_pio(struct hfi1_qp *qp, struct hfi1_pkt_state *ps,
+int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-struct send_context *qp_to_send_context(struct hfi1_qp *qp, u8 sc5);
+int hfi1_wss_init(void);
+void hfi1_wss_exit(void);
+
+/* platform specific: return the lowest level cache (llc) size, in KiB */
+static inline int wss_llc_size(void)
+{
+ /* assume that the boot CPU value is universal for all CPUs */
+ return boot_cpu_data.x86_cache_size;
+}
+
+/* platform specific: cacheless copy */
+static inline void cacheless_memcpy(void *dst, void *src, size_t n)
+{
+ /*
+ * Use the only available X64 cacheless copy. Add a __user cast
+ * to quiet sparse. The src agument is already in the kernel so
+ * there are no security issues. The extra fault recovery machinery
+ * is not invoked.
+ */
+ __copy_user_nocache(dst, (void __user *)src, n, 0);
+}
extern const enum ib_wc_opcode ib_hfi1_wc_opcode[];
extern const u8 hdr_len_by_opcode[];
-extern const int ib_hfi1_state_ops[];
+extern const int ib_rvt_state_ops[];
extern __be64 ib_hfi1_sys_image_guid; /* in network order */
-extern unsigned int hfi1_lkey_table_size;
-
extern unsigned int hfi1_max_cqes;
extern unsigned int hfi1_max_cqs;
@@ -1145,8 +525,8 @@ extern unsigned int hfi1_max_srq_sges;
extern unsigned int hfi1_max_srq_wrs;
-extern const u32 ib_hfi1_rnr_table[];
+extern unsigned short piothreshold;
-extern struct ib_dma_mapping_ops hfi1_dma_mapping_ops;
+extern const u32 ib_hfi1_rnr_table[];
#endif /* HFI1_VERBS_H */
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/staging/rdma/hfi1/verbs_txreq.c
new file mode 100644
index 000000000000..bc95c4112c61
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/verbs_txreq.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "verbs_txreq.h"
+#include "qp.h"
+#include "trace.h"
+
+#define TXREQ_LEN 24
+
+void hfi1_put_txreq(struct verbs_txreq *tx)
+{
+ struct hfi1_ibdev *dev;
+ struct rvt_qp *qp;
+ unsigned long flags;
+ unsigned int seq;
+ struct hfi1_qp_priv *priv;
+
+ qp = tx->qp;
+ dev = to_idev(qp->ibqp.device);
+
+ if (tx->mr)
+ rvt_put_mr(tx->mr);
+
+ sdma_txclean(dd_from_dev(dev), &tx->txreq);
+
+ /* Free verbs_txreq and return to slab cache */
+ kmem_cache_free(dev->verbs_txreq_cache, tx);
+
+ do {
+ seq = read_seqbegin(&dev->iowait_lock);
+ if (!list_empty(&dev->txwait)) {
+ struct iowait *wait;
+
+ write_seqlock_irqsave(&dev->iowait_lock, flags);
+ wait = list_first_entry(&dev->txwait, struct iowait,
+ list);
+ qp = iowait_to_qp(wait);
+ priv = qp->priv;
+ list_del_init(&priv->s_iowait.list);
+ /* refcount held until actual wake up */
+ write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+ hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
+ break;
+ }
+ } while (read_seqretry(&dev->iowait_lock, seq));
+}
+
+struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
+{
+ struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->s_lock, flags);
+ write_seqlock(&dev->iowait_lock);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+ struct hfi1_qp_priv *priv;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ if (tx)
+ goto out;
+ priv = qp->priv;
+ if (list_empty(&priv->s_iowait.list)) {
+ dev->n_txwait++;
+ qp->s_flags |= RVT_S_WAIT_TX;
+ list_add_tail(&priv->s_iowait.list, &dev->txwait);
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
+ atomic_inc(&qp->refcount);
+ }
+ qp->s_flags &= ~RVT_S_BUSY;
+ }
+out:
+ write_sequnlock(&dev->iowait_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ return tx;
+}
+
+static void verbs_txreq_kmem_cache_ctor(void *obj)
+{
+ struct verbs_txreq *tx = (struct verbs_txreq *)obj;
+
+ memset(tx, 0, sizeof(*tx));
+}
+
+int verbs_txreq_init(struct hfi1_ibdev *dev)
+{
+ char buf[TXREQ_LEN];
+ struct hfi1_devdata *dd = dd_from_dev(dev);
+
+ snprintf(buf, sizeof(buf), "hfi1_%u_vtxreq_cache", dd->unit);
+ dev->verbs_txreq_cache = kmem_cache_create(buf,
+ sizeof(struct verbs_txreq),
+ 0, SLAB_HWCACHE_ALIGN,
+ verbs_txreq_kmem_cache_ctor);
+ if (!dev->verbs_txreq_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void verbs_txreq_exit(struct hfi1_ibdev *dev)
+{
+ kmem_cache_destroy(dev->verbs_txreq_cache);
+ dev->verbs_txreq_cache = NULL;
+}
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/staging/rdma/hfi1/verbs_txreq.h
new file mode 100644
index 000000000000..1cf69b2fe4a5
--- /dev/null
+++ b/drivers/staging/rdma/hfi1/verbs_txreq.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef HFI1_VERBS_TXREQ_H
+#define HFI1_VERBS_TXREQ_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "verbs.h"
+#include "sdma_txreq.h"
+#include "iowait.h"
+
+struct verbs_txreq {
+ struct hfi1_pio_header phdr;
+ struct sdma_txreq txreq;
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_mregion *mr;
+ struct rvt_sge_state *ss;
+ struct sdma_engine *sde;
+ struct send_context *psc;
+ u16 hdr_dwords;
+};
+
+struct hfi1_ibdev;
+struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp);
+
+static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
+ struct rvt_qp *qp)
+{
+ struct verbs_txreq *tx;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
+ if (unlikely(!tx)) {
+ /* call slow path to get the lock */
+ tx = __get_txreq(dev, qp);
+ if (IS_ERR(tx))
+ return tx;
+ }
+ tx->qp = qp;
+ tx->mr = NULL;
+ tx->sde = priv->s_sde;
+ tx->psc = priv->s_sendcontext;
+ /* so that we can test if the sdma decriptors are there */
+ tx->txreq.num_desc = 0;
+ return tx;
+}
+
+static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
+{
+ return &tx->txreq;
+}
+
+static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp)
+{
+ struct sdma_txreq *stx;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ stx = iowait_get_txhead(&priv->s_iowait);
+ if (stx)
+ return container_of(stx, struct verbs_txreq, txreq);
+ return NULL;
+}
+
+void hfi1_put_txreq(struct verbs_txreq *tx);
+int verbs_txreq_init(struct hfi1_ibdev *dev);
+void verbs_txreq_exit(struct hfi1_ibdev *dev);
+
+#endif /* HFI1_VERBS_TXREQ_H */
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index ed723585b502..29b9834870fd 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -53,4 +53,4 @@ r8188eu-y := \
obj-$(CONFIG_R8188EU) := r8188eu.o
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/include
+ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
index b574b235b340..ce60f07b9977 100644
--- a/drivers/staging/rtl8188eu/TODO
+++ b/drivers/staging/rtl8188eu/TODO
@@ -15,5 +15,5 @@ TODO:
rcu_read_unlock();
Perhaps delete it, perhaps assign to some local variable.
-Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index e5d29fe9d446..012860b34651 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -76,90 +76,87 @@ static void update_BCNTIM(struct adapter *padapter)
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
unsigned char *pie = pnetwork_mlmeext->IEs;
+ u8 *p, *dst_ie, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
/* update TIM IE */
- if (true) {
- u8 *p, *dst_ie, *premainder_ie = NULL;
- u8 *pbackup_remainder_ie = NULL;
- uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
-
- p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
- pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
- if (p != NULL && tim_ielen > 0) {
- tim_ielen += 2;
- premainder_ie = p+tim_ielen;
- tim_ie_offset = (int)(p - pie);
- remainder_ielen = pnetwork_mlmeext->IELength -
- tim_ie_offset - tim_ielen;
- /* append TIM IE from dst_ie offset */
- dst_ie = p;
- } else {
- tim_ielen = 0;
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen,
+ pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && tim_ielen > 0) {
+ tim_ielen += 2;
+ premainder_ie = p+tim_ielen;
+ tim_ie_offset = (int)(p - pie);
+ remainder_ielen = pnetwork_mlmeext->IELength -
+ tim_ie_offset - tim_ielen;
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else {
+ tim_ielen = 0;
- /* calculate head_len */
- offset = _FIXED_IE_LENGTH_;
- offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
+ /* calculate head_len */
+ offset = _FIXED_IE_LENGTH_;
+ offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
- /* get supported rates len */
- p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
- _SUPPORTEDRATES_IE_, &tmp_len,
- (pnetwork_mlmeext->IELength -
- _BEACON_IE_OFFSET_));
- if (p != NULL)
- offset += tmp_len+2;
+ /* get supported rates len */
+ p = rtw_get_ie(pie + _BEACON_IE_OFFSET_,
+ _SUPPORTEDRATES_IE_, &tmp_len,
+ (pnetwork_mlmeext->IELength -
+ _BEACON_IE_OFFSET_));
+ if (p != NULL)
+ offset += tmp_len+2;
- /* DS Parameter Set IE, len = 3 */
- offset += 3;
+ /* DS Parameter Set IE, len = 3 */
+ offset += 3;
- premainder_ie = pie + offset;
+ premainder_ie = pie + offset;
- remainder_ielen = pnetwork_mlmeext->IELength -
- offset - tim_ielen;
+ remainder_ielen = pnetwork_mlmeext->IELength -
+ offset - tim_ielen;
- /* append TIM IE from offset */
- dst_ie = pie + offset;
- }
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+ }
- if (remainder_ielen > 0) {
- pbackup_remainder_ie = rtw_malloc(remainder_ielen);
- if (pbackup_remainder_ie && premainder_ie)
- memcpy(pbackup_remainder_ie,
- premainder_ie, remainder_ielen);
- }
- *dst_ie++ = _TIM_IE_;
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie,
+ premainder_ie, remainder_ielen);
+ }
+ *dst_ie++ = _TIM_IE_;
- if ((pstapriv->tim_bitmap&0xff00) &&
- (pstapriv->tim_bitmap&0x00fc))
- tim_ielen = 5;
- else
- tim_ielen = 4;
+ if ((pstapriv->tim_bitmap&0xff00) &&
+ (pstapriv->tim_bitmap&0x00fc))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
- *dst_ie++ = tim_ielen;
+ *dst_ie++ = tim_ielen;
- *dst_ie++ = 0;/* DTIM count */
- *dst_ie++ = 1;/* DTIM period */
+ *dst_ie++ = 0;/* DTIM count */
+ *dst_ie++ = 1;/* DTIM period */
- if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
- *dst_ie++ = BIT(0);/* bitmap ctrl */
- else
- *dst_ie++ = 0;
+ if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
- if (tim_ielen == 4) {
- *dst_ie++ = pstapriv->tim_bitmap & 0xff;
- } else if (tim_ielen == 5) {
- put_unaligned_le16(pstapriv->tim_bitmap, dst_ie);
- dst_ie += 2;
- }
+ if (tim_ielen == 4) {
+ *dst_ie++ = pstapriv->tim_bitmap & 0xff;
+ } else if (tim_ielen == 5) {
+ put_unaligned_le16(pstapriv->tim_bitmap, dst_ie);
+ dst_ie += 2;
+ }
- /* copy remainder IE */
- if (pbackup_remainder_ie) {
- memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
- kfree(pbackup_remainder_ie);
- }
- offset = (uint)(dst_ie - pie);
- pnetwork_mlmeext->IELength = offset + remainder_ielen;
+ kfree(pbackup_remainder_ie);
}
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
set_tx_beacon_cmd(padapter);
}
@@ -203,7 +200,7 @@ void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
if (bmatch)
dst_ie = p;
else
- dst_ie = (p+ielen);
+ dst_ie = p+ielen;
}
if (remainder_ielen > 0) {
@@ -569,7 +566,7 @@ static void update_bmc_sta(struct adapter *padapter)
psta->ieee8021x_blocked = 0;
- memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+ memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats));
/* prepare for add_RATid */
supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
@@ -692,7 +689,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
/* todo: init other variables */
- memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+ memset(&psta->sta_stats, 0, sizeof(struct stainfo_stats));
spin_lock_bh(&psta->lock);
psta->state |= _FW_LINKED;
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 433b926ceae7..e5a6b7a70df7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -69,23 +69,17 @@ exit:
return _SUCCESS;
}
-struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue)
+struct cmd_obj *rtw_dequeue_cmd(struct __queue *queue)
{
unsigned long irqL;
struct cmd_obj *obj;
-
spin_lock_irqsave(&queue->lock, irqL);
- if (list_empty(&(queue->queue))) {
- obj = NULL;
- } else {
- obj = container_of((&queue->queue)->next, struct cmd_obj, list);
+ obj = list_first_entry_or_null(&queue->queue, struct cmd_obj, list);
+ if (obj)
list_del_init(&obj->list);
- }
-
spin_unlock_irqrestore(&queue->lock, irqL);
-
return obj;
}
@@ -400,9 +394,8 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
- RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
goto exit;
}
/* for IEs is fix buf size */
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 2c4afb80fc64..93e898d598fe 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -149,7 +149,7 @@ int proc_get_fwstate(char *page, char **start,
{
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int len = 0;
@@ -184,7 +184,7 @@ int proc_get_mlmext_state(char *page, char **start,
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
int len = 0;
@@ -200,7 +200,7 @@ int proc_get_qos_option(char *page, char **start,
{
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int len = 0;
@@ -216,7 +216,7 @@ int proc_get_ht_option(char *page, char **start,
{
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int len = 0;
@@ -247,9 +247,9 @@ int proc_get_ap_info(char *page, char **start,
struct sta_info *psta;
struct net_device *dev = data;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
struct sta_priv *pstapriv = &padapter->stapriv;
int len = 0;
@@ -851,7 +851,7 @@ int proc_get_all_sta_info(char *page, char **start,
spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
- phead = &(pstapriv->sta_hash[i]);
+ phead = &pstapriv->sta_hash[i];
plist = phead->next;
while (phead != plist) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 2320fb11af24..19f11d04d152 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -104,13 +104,11 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 u1temp = 0;
efuseTbl = kzalloc(EFUSE_MAP_LEN_88E, GFP_KERNEL);
- if (efuseTbl == NULL) {
- DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
+ if (!efuseTbl)
return;
- }
eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
- if (eFuseWord == NULL) {
+ if (!eFuseWord) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
goto eFuseWord_failed;
}
@@ -394,7 +392,7 @@ u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_e
u8 badworden = 0x0F;
u8 tmpdata[8];
- memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
+ memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
@@ -495,13 +493,13 @@ int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data)
EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section);
- if (data == NULL)
+ if (!data)
return false;
if (offset > max_section)
return false;
- memset((void *)data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
- memset((void *)tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+ memset(data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
+ memset(tmpdata, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
/* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
/* Skip dummy parts to prevent unexpected data read from Efuse. */
@@ -572,7 +570,7 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
u16 efuse_addr = *pAddr;
u32 PgWriteSuccess = 0;
- memset((void *)originaldata, 0xff, 8);
+ memset(originaldata, 0xff, 8);
if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata)) {
/* check if data exist */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 742b29c590df..f4e4baf6054a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -207,8 +207,8 @@ inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
ie_data[0] = ttl;
ie_data[1] = flags;
- *(u16 *)(ie_data+2) = cpu_to_le16(reason);
- *(u16 *)(ie_data+4) = cpu_to_le16(precedence);
+ *(u16 *)(ie_data + 2) = cpu_to_le16(reason);
+ *(u16 *)(ie_data + 4) = cpu_to_le16(precedence);
return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len);
}
@@ -268,18 +268,18 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
cnt = 0;
while (cnt < in_len) {
- if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt + 2], oui, oui_len))) {
target_ie = &in_ie[cnt];
if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
if (ielen)
- *ielen = in_ie[cnt+1]+2;
+ *ielen = in_ie[cnt + 1] + 2;
break;
} else {
- cnt += in_ie[cnt+1]+2; /* goto next */
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
}
return target_ie;
@@ -530,8 +530,8 @@ int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwis
}
- if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
- (memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
+ if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie + 1) != (u8)(wpa_ie_len - 2)) ||
+ (memcmp(wpa_ie + 2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN)))
return _FAIL;
pos = wpa_ie;
@@ -599,7 +599,7 @@ int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwi
}
- if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
+ if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie + 1) != (u8)(rsn_ie_len - 2)))
return _FAIL;
pos = rsn_ie;
@@ -671,45 +671,45 @@ int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie,
while (cnt < in_len) {
authmode = in_ie[cnt];
- if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
- sec_idx, in_ie[cnt+1]+2));
+ sec_idx, in_ie[cnt + 1] + 2));
if (wpa_ie) {
- memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
- for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
- wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4],
- wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7]));
+ wpa_ie[i], wpa_ie[i + 1], wpa_ie[i + 2], wpa_ie[i + 3], wpa_ie[i + 4],
+ wpa_ie[i + 5], wpa_ie[i + 6], wpa_ie[i + 7]));
}
}
- *wpa_len = in_ie[cnt+1]+2;
- cnt += in_ie[cnt+1]+2; /* get next */
+ *wpa_len = in_ie[cnt + 1] + 2;
+ cnt += in_ie[cnt + 1] + 2; /* get next */
} else {
if (authmode == _WPA2_IE_ID_) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
- sec_idx, in_ie[cnt+1]+2));
+ sec_idx, in_ie[cnt + 1] + 2));
if (rsn_ie) {
- memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
- for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ for (i = 0; i < (in_ie[cnt + 1] + 2); i += 8) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
- rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4],
- rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7]));
+ rsn_ie[i], rsn_ie[i + 1], rsn_ie[i + 2], rsn_ie[i + 3], rsn_ie[i + 4],
+ rsn_ie[i + 5], rsn_ie[i + 6], rsn_ie[i + 7]));
}
}
- *rsn_len = in_ie[cnt+1]+2;
- cnt += in_ie[cnt+1]+2; /* get next */
+ *rsn_len = in_ie[cnt + 1] + 2;
+ cnt += in_ie[cnt + 1] + 2; /* get next */
} else {
- cnt += in_ie[cnt+1]+2; /* get next */
+ cnt += in_ie[cnt + 1] + 2; /* get next */
}
}
}
@@ -729,7 +729,7 @@ u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
eid = ie_ptr[0];
if ((eid == _WPA_IE_ID_) && (!memcmp(&ie_ptr[2], wps_oui, 4))) {
- *wps_ielen = ie_ptr[1]+2;
+ *wps_ielen = ie_ptr[1] + 2;
match = true;
}
return match;
@@ -761,20 +761,20 @@ u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
while (cnt < in_len) {
eid = in_ie[cnt];
- if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ if ((eid == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], wps_oui, 4))) {
wpsie_ptr = &in_ie[cnt];
if (wps_ie)
- memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(wps_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
if (wps_ielen)
- *wps_ielen = in_ie[cnt+1]+2;
+ *wps_ielen = in_ie[cnt + 1] + 2;
- cnt += in_ie[cnt+1]+2;
+ cnt += in_ie[cnt + 1] + 2;
break;
} else {
- cnt += in_ie[cnt+1]+2; /* goto next */
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
}
}
return wpsie_ptr;
@@ -848,12 +848,12 @@ u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id, u8
if (attr_ptr && attr_len) {
if (buf_content)
- memcpy(buf_content, attr_ptr+4, attr_len-4);
+ memcpy(buf_content, attr_ptr + 4, attr_len - 4);
if (len_content)
- *len_content = attr_len-4;
+ *len_content = attr_len - 4;
- return attr_ptr+4;
+ return attr_ptr + 4;
}
return NULL;
@@ -935,8 +935,8 @@ static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
}
break;
default:
- DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
- pos[0], pos[1], pos[2], (unsigned long)elen);
+ DBG_88E("unknown vendor specific information element ignored (vendor OUI %3phC len=%lu)\n",
+ pos, (unsigned long)elen);
return -1;
}
return 0;
@@ -1106,9 +1106,9 @@ void dump_ies(u8 *buf, u32 buf_len)
u8 *pos = buf;
u8 id, len;
- while (pos-buf <= buf_len) {
+ while (pos - buf <= buf_len) {
id = *pos;
- len = *(pos+1);
+ len = *(pos + 1);
DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
dump_wps_ie(pos, len);
@@ -1130,11 +1130,11 @@ void dump_wps_ie(u8 *ie, u32 ie_len)
return;
pos += 6;
- while (pos-ie < ie_len) {
+ while (pos - ie < ie_len) {
id = get_unaligned_be16(pos);
len = get_unaligned_be16(pos + 2);
DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
- pos += (4+len);
+ pos += (4 + len);
}
}
@@ -1188,11 +1188,11 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
unsigned char *pbuf;
int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
int ret = _FAIL;
- pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
- if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
pnetwork->BcnInfo.is_8021x = is8021x;
@@ -1201,11 +1201,11 @@ static int rtw_get_cipher_info(struct wlan_network *pnetwork)
ret = _SUCCESS;
}
} else {
- pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength - 12);
if (pbuf && (wpa_ielen > 0)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
- if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is8021x)) {
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
pnetwork->BcnInfo.group_cipher = group_cipher;
@@ -1349,8 +1349,8 @@ int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *act
fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl);
- if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE)) !=
- (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION))
+ if ((fc & (RTW_IEEE80211_FCTL_FTYPE | RTW_IEEE80211_FCTL_STYPE)) !=
+ (RTW_IEEE80211_FTYPE_MGMT | RTW_IEEE80211_STYPE_ACTION))
return false;
c = frame_body[0];
diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c
index cdcf0eacc0e0..2e2145caa56b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_iol.c
+++ b/drivers/staging/rtl8188eu/core/rtw_iol.c
@@ -11,21 +11,18 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
-#include<rtw_iol.h>
+#include <rtw_iol.h>
-bool rtw_IOL_applied(struct adapter *adapter)
+bool rtw_IOL_applied(struct adapter *adapter)
{
- if (1 == adapter->registrypriv.fw_iol)
+ if (adapter->registrypriv.fw_iol == 1)
return true;
- if ((2 == adapter->registrypriv.fw_iol) && (!adapter_to_dvobj(adapter)->ishighspeed))
+ if ((adapter->registrypriv.fw_iol == 2) &&
+ (!adapter_to_dvobj(adapter)->ishighspeed))
return true;
return false;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index abab854e6889..a645a620ebe2 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -122,31 +122,26 @@ void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
rtw_free_mlme_priv_ie_data(pmlmepriv);
- if (pmlmepriv) {
- if (pmlmepriv->free_bss_buf)
- vfree(pmlmepriv->free_bss_buf);
- }
+ if (pmlmepriv)
+ vfree(pmlmepriv->free_bss_buf);
}
-struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)
+ /* _queue *free_queue) */
{
- struct wlan_network *pnetwork;
+ struct wlan_network *pnetwork;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct list_head *plist = NULL;
spin_lock_bh(&free_queue->lock);
-
- if (list_empty(&free_queue->queue)) {
- pnetwork = NULL;
+ pnetwork = list_first_entry_or_null(&free_queue->queue,
+ struct wlan_network, list);
+ if (!pnetwork)
goto exit;
- }
- plist = free_queue->queue.next;
-
- pnetwork = container_of(plist, struct wlan_network, list);
list_del_init(&pnetwork->list);
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("_rtw_alloc_network: ptr=%p\n", &pnetwork->list));
pnetwork->network_type = 0;
pnetwork->fixed = false;
pnetwork->last_scanned = jiffies;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 3eca6874b6df..591a9127b573 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -20,6 +20,7 @@
#define _RTW_MLME_EXT_C_
#include <linux/ieee80211.h>
+#include <asm/unaligned.h>
#include <osdep_service.h>
#include <drv_types.h>
@@ -1027,7 +1028,6 @@ static void issue_assocreq(struct adapter *padapter)
unsigned char *pframe, *p;
struct rtw_ieee80211_hdr *pwlanhdr;
__le16 *fctrl;
- __le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
struct ndis_802_11_var_ie *pIE;
@@ -1073,8 +1073,7 @@ static void issue_assocreq(struct adapter *padapter)
/* listen interval */
/* todo: listen interval for power saving */
- le_tmp = cpu_to_le16(3);
- memcpy(pframe , (unsigned char *)&le_tmp, 2);
+ put_unaligned_le16(3, pframe);
pframe += 2;
pattrib->pktlen += 2;
@@ -1673,7 +1672,6 @@ static void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
fctrl = &(pwlanhdr->frame_ctl);
*(fctrl) = 0;
- /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
memcpy(pwlanhdr->addr3, pnetwork->MacAddress, ETH_ALEN);
@@ -3653,7 +3651,7 @@ static unsigned int on_action_spct(struct adapter *padapter,
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
u8 category;
u8 action;
@@ -3740,10 +3738,10 @@ static unsigned int OnAction_back(struct adapter *padapter,
memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
- if (pmlmeinfo->bAcceptAddbaReq)
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
- else
- issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
+ /* 37 = reject ADDBA Req */
+ issue_action_BA(padapter, addr,
+ RTW_WLAN_ACTION_ADDBA_RESP,
+ pmlmeinfo->accept_addba_req ? 0 : 37);
break;
case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
status = get_unaligned_le16(&frame_body[3]);
@@ -4150,7 +4148,7 @@ int init_mlme_ext_priv(struct adapter *padapter)
pmlmeext->padapter = padapter;
init_mlme_ext_priv_value(padapter);
- pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+ pmlmeinfo->accept_addba_req = pregistrypriv->accept_addba_req;
init_mlme_ext_timer(padapter);
@@ -5063,7 +5061,7 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* clear CAM */
flush_all_cam_entry(padapter);
- memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
@@ -5122,7 +5120,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
pmlmeinfo->candidate_tid_bitmap = 0;
pmlmeinfo->bwmode_updated = false;
- memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ memcpy(pnetwork, pbuf, offsetof(struct wlan_bssid_ex, IELength));
pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 110b8c0b6cd7..5f53aa1cfd8a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -116,9 +116,7 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
rtw_free_uc_swdec_pending_queue(padapter);
- if (precvpriv->pallocated_frame_buf) {
- vfree(precvpriv->pallocated_frame_buf);
- }
+ vfree(precvpriv->pallocated_frame_buf);
rtw_hal_free_recv_priv(padapter);
@@ -127,29 +125,22 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
{
struct recv_frame *hdr;
- struct list_head *plist, *phead;
struct adapter *padapter;
struct recv_priv *precvpriv;
- if (list_empty(&pfree_recv_queue->queue)) {
- hdr = NULL;
- } else {
- phead = get_list_head(pfree_recv_queue);
-
- plist = phead->next;
-
- hdr = container_of(plist, struct recv_frame, list);
-
+ hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
+ struct recv_frame, list);
+ if (hdr) {
list_del_init(&hdr->list);
padapter = hdr->adapter;
- if (padapter != NULL) {
+ if (padapter) {
precvpriv = &padapter->recvpriv;
if (pfree_recv_queue == &precvpriv->free_recv_queue)
precvpriv->free_recvframe_cnt--;
}
}
- return (struct recv_frame *)hdr;
+ return hdr;
}
struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
@@ -248,7 +239,7 @@ void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfre
plist = plist->next;
- rtw_free_recvframe((struct recv_frame *)hdr, pfree_recv_queue);
+ rtw_free_recvframe(hdr, pfree_recv_queue);
}
spin_unlock(&pframequeue->lock);
@@ -917,9 +908,8 @@ static int sta2ap_data_frame(struct adapter *adapter,
process_pwrbit_data(adapter, precv_frame);
- if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE)
process_wmmps_data(adapter, precv_frame);
- }
if (GetFrameSubType(ptr) & BIT(6)) {
/* No data, will not indicate to upper layer, temporily count it here */
@@ -1274,32 +1264,25 @@ static int validate_recv_frame(struct adapter *adapter,
/* Dump rx packets */
rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
if (bDumpRxPkt == 1) {/* dump all rx packets */
- int i;
- DBG_88E("#############################\n");
-
- for (i = 0; i < 64; i += 8)
- DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
- *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
- DBG_88E("#############################\n");
+ if (_drv_err_ <= GlobalDebugLevel) {
+ pr_info(DRIVER_PREFIX "#############################\n");
+ print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+ 16, 1, ptr, 64, false);
+ pr_info(DRIVER_PREFIX "#############################\n");
+ }
} else if (bDumpRxPkt == 2) {
- if (type == WIFI_MGT_TYPE) {
- int i;
- DBG_88E("#############################\n");
-
- for (i = 0; i < 64; i += 8)
- DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
- *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
- DBG_88E("#############################\n");
+ if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_MGT_TYPE)) {
+ pr_info(DRIVER_PREFIX "#############################\n");
+ print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+ 16, 1, ptr, 64, false);
+ pr_info(DRIVER_PREFIX "#############################\n");
}
} else if (bDumpRxPkt == 3) {
- if (type == WIFI_DATA_TYPE) {
- int i;
- DBG_88E("#############################\n");
-
- for (i = 0; i < 64; i += 8)
- DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
- *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
- DBG_88E("#############################\n");
+ if ((_drv_err_ <= GlobalDebugLevel) && (type == WIFI_DATA_TYPE)) {
+ pr_info(DRIVER_PREFIX "#############################\n");
+ print_hex_dump(KERN_INFO, DRIVER_PREFIX, DUMP_PREFIX_NONE,
+ 16, 1, ptr, 64, false);
+ pr_info(DRIVER_PREFIX "#############################\n");
}
}
switch (type) {
@@ -1433,7 +1416,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
phead = get_list_head(defrag_q);
plist = phead->next;
pfhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)pfhdr;
+ prframe = pfhdr;
list_del_init(&(prframe->list));
if (curfragnum != pfhdr->attrib.frag_num) {
@@ -1453,7 +1436,7 @@ static struct recv_frame *recvframe_defrag(struct adapter *adapter,
while (phead != plist) {
pnfhdr = container_of(plist, struct recv_frame, list);
- pnextrframe = (struct recv_frame *)pnfhdr;
+ pnextrframe = pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1541,10 +1524,9 @@ struct recv_frame *recvframe_chk_defrag(struct adapter *padapter,
if (pdefrag_q != NULL) {
if (fragnum == 0) {
/* the first fragment */
- if (!list_empty(&pdefrag_q->queue)) {
+ if (!list_empty(&pdefrag_q->queue))
/* free current defrag_q */
rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
- }
}
/* Then enqueue the 0~(n-1) fragment into the defrag_q */
@@ -1660,9 +1642,8 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
a_len -= nSubframe_Length;
if (a_len != 0) {
padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
- if (padding_len == 4) {
+ if (padding_len == 4)
padding_len = 0;
- }
if (a_len < padding_len) {
goto exit;
@@ -1798,7 +1779,7 @@ static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reor
/* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
prhdr = container_of(plist, struct recv_frame, list);
- prframe = (struct recv_frame *)prhdr;
+ prframe = prhdr;
pattrib = &prframe->attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 6983c572b358..4ad2d8f63acf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -70,20 +70,3 @@ u32 rtw_ch2freq(u32 channel)
return freq;
}
-
-u32 rtw_freq2ch(u32 freq)
-{
- u8 i;
- u32 ch = 0;
-
- for (i = 0; i < ch_freq_map_num; i++) {
- if (freq == ch_freq_map[i].frequency) {
- ch = ch_freq_map[i].channel;
- break;
- }
- }
- if (i == ch_freq_map_num)
- ch = 1;
-
- return ch;
-}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 22839d57dc8c..b781ccf45bc0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1081,13 +1081,13 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
frsubtype >>= 4;
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
+ memset(mic_iv, 0, 16);
+ memset(mic_header1, 0, 16);
+ memset(mic_header2, 0, 16);
+ memset(ctr_preload, 0, 16);
+ memset(chain_buffer, 0, 16);
+ memset(aes_out, 0, 16);
+ memset(padded_buffer, 0, 16);
if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
a4_exists = 0;
@@ -1279,13 +1279,13 @@ static int aes_decipher(u8 *key, uint hdrlen,
uint frsubtype = GetFrameSubType(pframe);
frsubtype >>= 4;
- memset((void *)mic_iv, 0, 16);
- memset((void *)mic_header1, 0, 16);
- memset((void *)mic_header2, 0, 16);
- memset((void *)ctr_preload, 0, 16);
- memset((void *)chain_buffer, 0, 16);
- memset((void *)aes_out, 0, 16);
- memset((void *)padded_buffer, 0, 16);
+ memset(mic_iv, 0, 16);
+ memset(mic_header1, 0, 16);
+ memset(mic_header2, 0, 16);
+ memset(ctr_preload, 0, 16);
+ memset(chain_buffer, 0, 16);
+ memset(aes_out, 0, 16);
+ memset(padded_buffer, 0, 16);
/* start to decrypt the payload */
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 1beeac46bfe7..78a9b9bf3b32 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -172,16 +172,15 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
spin_unlock_bh(&pstapriv->sta_hash_lock);
/*===============================*/
- if (pstapriv->pallocated_stainfo_buf)
- vfree(pstapriv->pallocated_stainfo_buf);
+ vfree(pstapriv->pallocated_stainfo_buf);
}
return _SUCCESS;
}
-struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- s32 index;
+ s32 index;
struct list_head *phash_list;
struct sta_info *psta;
struct __queue *pfree_sta_queue;
@@ -189,17 +188,15 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
int i = 0;
u16 wRxSeqInitialValue = 0xffff;
-
pfree_sta_queue = &pstapriv->free_sta_queue;
- spin_lock_bh(&(pfree_sta_queue->lock));
-
- if (list_empty(&pfree_sta_queue->queue)) {
+ spin_lock_bh(&pfree_sta_queue->lock);
+ psta = list_first_entry_or_null(&pfree_sta_queue->queue,
+ struct sta_info, list);
+ if (!psta) {
spin_unlock_bh(&pfree_sta_queue->lock);
- psta = NULL;
} else {
- psta = container_of((&pfree_sta_queue->queue)->next, struct sta_info, list);
- list_del_init(&(psta->list));
+ list_del_init(&psta->list);
spin_unlock_bh(&pfree_sta_queue->lock);
_rtw_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
@@ -210,14 +207,11 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
psta = NULL;
goto exit;
}
- phash_list = &(pstapriv->sta_hash[index]);
-
- spin_lock_bh(&(pstapriv->sta_hash_lock));
+ phash_list = &pstapriv->sta_hash[index];
+ spin_lock_bh(&pstapriv->sta_hash_lock);
list_add_tail(&psta->hash_list, phash_list);
-
pstapriv->asoc_sta_count++;
-
spin_unlock_bh(&pstapriv->sta_hash_lock);
/* Commented by Albert 2009/08/13 */
@@ -493,11 +487,9 @@ exit:
struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
{
- struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- psta = rtw_get_stainfo(pstapriv, bc_addr);
- return psta;
+ return rtw_get_stainfo(pstapriv, bc_addr);
}
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 59b443255a90..83096696cd5b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -1374,7 +1374,7 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
epigram_vendor_flag = 1;
if (ralink_vendor_flag) {
DBG_88E("link to Tenda W311R AP\n");
- return HT_IOT_PEER_TENDA;
+ return HT_IOT_PEER_TENDA;
} else {
DBG_88E("Capture EPIGRAM_OUI\n");
}
@@ -1579,7 +1579,8 @@ void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
tid = (param>>2)&0x0f;
preorder_ctrl = &psta->recvreorder_ctrl[tid];
preorder_ctrl->indicate_seq = 0xffff;
- preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
+ preorder_ctrl->enable = (pmlmeinfo->accept_addba_req) ? true
+ : false;
}
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index e778132b73dc..f2dd7a60f67c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -247,11 +247,8 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
pxmitbuf++;
}
- if (pxmitpriv->pallocated_frame_buf)
- vfree(pxmitpriv->pallocated_frame_buf);
-
- if (pxmitpriv->pallocated_xmitbuf)
- vfree(pxmitpriv->pallocated_xmitbuf);
+ vfree(pxmitpriv->pallocated_frame_buf);
+ vfree(pxmitpriv->pallocated_xmitbuf);
/* free xmit extension buff */
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
@@ -1216,40 +1213,24 @@ void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe,
struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
{
unsigned long irql;
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *plist, *phead;
+ struct xmit_buf *pxmitbuf;
struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
-
spin_lock_irqsave(&pfree_queue->lock, irql);
-
- if (list_empty(&pfree_queue->queue)) {
- pxmitbuf = NULL;
- } else {
- phead = get_list_head(pfree_queue);
-
- plist = phead->next;
-
- pxmitbuf = container_of(plist, struct xmit_buf, list);
-
- list_del_init(&(pxmitbuf->list));
- }
-
- if (pxmitbuf != NULL) {
+ pxmitbuf = list_first_entry_or_null(&pfree_queue->queue,
+ struct xmit_buf, list);
+ if (pxmitbuf) {
+ list_del_init(&pxmitbuf->list);
pxmitpriv->free_xmit_extbuf_cnt--;
-
pxmitbuf->priv_data = NULL;
/* pxmitbuf->ext_tag = true; */
-
if (pxmitbuf->sctx) {
DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
}
}
-
spin_unlock_irqrestore(&pfree_queue->lock, irql);
-
return pxmitbuf;
}
@@ -1278,28 +1259,16 @@ s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
{
unsigned long irql;
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *plist, *phead;
+ struct xmit_buf *pxmitbuf;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
-
/* DBG_88E("+rtw_alloc_xmitbuf\n"); */
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irql);
-
- if (list_empty(&pfree_xmitbuf_queue->queue)) {
- pxmitbuf = NULL;
- } else {
- phead = get_list_head(pfree_xmitbuf_queue);
-
- plist = phead->next;
-
- pxmitbuf = container_of(plist, struct xmit_buf, list);
-
- list_del_init(&(pxmitbuf->list));
- }
-
- if (pxmitbuf != NULL) {
+ pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
+ struct xmit_buf, list);
+ if (pxmitbuf) {
+ list_del_init(&pxmitbuf->list);
pxmitpriv->free_xmitbuf_cnt--;
pxmitbuf->priv_data = NULL;
if (pxmitbuf->sctx) {
@@ -1309,7 +1278,6 @@ struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
}
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irql);
-
return pxmitbuf;
}
@@ -1355,38 +1323,33 @@ Must be very very cautious...
*/
-struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
+ /* _queue *pfree_xmit_queue) */
{
/*
Please remember to use all the osdep_service api,
and lock/unlock or _enter/_exit critical to protect
pfree_xmit_queue
*/
-
- struct xmit_frame *pxframe = NULL;
- struct list_head *plist, *phead;
+ struct xmit_frame *pxframe;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
-
spin_lock_bh(&pfree_xmit_queue->lock);
-
- if (list_empty(&pfree_xmit_queue->queue)) {
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
- pxframe = NULL;
+ pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
+ struct xmit_frame, list);
+ if (!pxframe) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("rtw_alloc_xmitframe:%d\n",
+ pxmitpriv->free_xmitframe_cnt));
} else {
- phead = get_list_head(pfree_xmit_queue);
-
- plist = phead->next;
-
- pxframe = container_of(plist, struct xmit_frame, list);
+ list_del_init(&pxframe->list);
- list_del_init(&(pxframe->list));
- }
-
- if (pxframe != NULL) { /* default value setting */
+ /* default value setting */
pxmitpriv->free_xmitframe_cnt--;
- RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n",
+ pxmitpriv->free_xmitframe_cnt));
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
@@ -1402,10 +1365,8 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
pxframe->agg_num = 1;
pxframe->ack_report = 0;
}
-
spin_unlock_bh(&pfree_xmit_queue->lock);
-
return pxframe;
}
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index f58a8222c899..c2ad6a3b99da 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -598,18 +598,12 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter)
reg[RF_PATH_A] = &hal_data->PHYRegDef[RF_PATH_A];
reg[RF_PATH_B] = &hal_data->PHYRegDef[RF_PATH_B];
- reg[RF_PATH_C] = &hal_data->PHYRegDef[RF_PATH_C];
- reg[RF_PATH_D] = &hal_data->PHYRegDef[RF_PATH_D];
reg[RF_PATH_A]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
reg[RF_PATH_B]->rfintfs = rFPGA0_XAB_RFInterfaceSW;
- reg[RF_PATH_C]->rfintfs = rFPGA0_XCD_RFInterfaceSW;
- reg[RF_PATH_D]->rfintfs = rFPGA0_XCD_RFInterfaceSW;
reg[RF_PATH_A]->rfintfi = rFPGA0_XAB_RFInterfaceRB;
reg[RF_PATH_B]->rfintfi = rFPGA0_XAB_RFInterfaceRB;
- reg[RF_PATH_C]->rfintfi = rFPGA0_XCD_RFInterfaceRB;
- reg[RF_PATH_D]->rfintfi = rFPGA0_XCD_RFInterfaceRB;
reg[RF_PATH_A]->rfintfo = rFPGA0_XA_RFInterfaceOE;
reg[RF_PATH_B]->rfintfo = rFPGA0_XB_RFInterfaceOE;
@@ -622,13 +616,9 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter)
reg[RF_PATH_A]->rfLSSI_Select = rFPGA0_XAB_RFParameter;
reg[RF_PATH_B]->rfLSSI_Select = rFPGA0_XAB_RFParameter;
- reg[RF_PATH_C]->rfLSSI_Select = rFPGA0_XCD_RFParameter;
- reg[RF_PATH_D]->rfLSSI_Select = rFPGA0_XCD_RFParameter;
reg[RF_PATH_A]->rfTxGainStage = rFPGA0_TxGainStage;
reg[RF_PATH_B]->rfTxGainStage = rFPGA0_TxGainStage;
- reg[RF_PATH_C]->rfTxGainStage = rFPGA0_TxGainStage;
- reg[RF_PATH_D]->rfTxGainStage = rFPGA0_TxGainStage;
reg[RF_PATH_A]->rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
reg[RF_PATH_B]->rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
@@ -638,43 +628,27 @@ static void rtl88e_phy_init_bb_rf_register_definition(struct adapter *adapter)
reg[RF_PATH_A]->rfSwitchControl = rFPGA0_XAB_SwitchControl;
reg[RF_PATH_B]->rfSwitchControl = rFPGA0_XAB_SwitchControl;
- reg[RF_PATH_C]->rfSwitchControl = rFPGA0_XCD_SwitchControl;
- reg[RF_PATH_D]->rfSwitchControl = rFPGA0_XCD_SwitchControl;
reg[RF_PATH_A]->rfAGCControl1 = rOFDM0_XAAGCCore1;
reg[RF_PATH_B]->rfAGCControl1 = rOFDM0_XBAGCCore1;
- reg[RF_PATH_C]->rfAGCControl1 = rOFDM0_XCAGCCore1;
- reg[RF_PATH_D]->rfAGCControl1 = rOFDM0_XDAGCCore1;
reg[RF_PATH_A]->rfAGCControl2 = rOFDM0_XAAGCCore2;
reg[RF_PATH_B]->rfAGCControl2 = rOFDM0_XBAGCCore2;
- reg[RF_PATH_C]->rfAGCControl2 = rOFDM0_XCAGCCore2;
- reg[RF_PATH_D]->rfAGCControl2 = rOFDM0_XDAGCCore2;
reg[RF_PATH_A]->rfRxIQImbalance = rOFDM0_XARxIQImbalance;
reg[RF_PATH_B]->rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- reg[RF_PATH_C]->rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- reg[RF_PATH_D]->rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
reg[RF_PATH_A]->rfRxAFE = rOFDM0_XARxAFE;
reg[RF_PATH_B]->rfRxAFE = rOFDM0_XBRxAFE;
- reg[RF_PATH_C]->rfRxAFE = rOFDM0_XCRxAFE;
- reg[RF_PATH_D]->rfRxAFE = rOFDM0_XDRxAFE;
reg[RF_PATH_A]->rfTxIQImbalance = rOFDM0_XATxIQImbalance;
reg[RF_PATH_B]->rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- reg[RF_PATH_C]->rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- reg[RF_PATH_D]->rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
reg[RF_PATH_A]->rfTxAFE = rOFDM0_XATxAFE;
reg[RF_PATH_B]->rfTxAFE = rOFDM0_XBTxAFE;
- reg[RF_PATH_C]->rfTxAFE = rOFDM0_XCTxAFE;
- reg[RF_PATH_D]->rfTxAFE = rOFDM0_XDTxAFE;
reg[RF_PATH_A]->rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
reg[RF_PATH_B]->rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- reg[RF_PATH_C]->rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- reg[RF_PATH_D]->rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
reg[RF_PATH_A]->rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
reg[RF_PATH_B]->rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 4d72537644b3..656133c47426 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -75,16 +75,6 @@ static void _rtl88e_fw_block_write(struct adapter *adapt,
usb_write8(adapt, write_address, byte_buffer[i]);
}
-static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
- u32 i;
-
- for (i = *pfwlen; i < roundup(*pfwlen, 4); i++)
- pfwbuf[i] = 0;
-
- *pfwlen = i;
-}
-
static void _rtl88e_fw_page_write(struct adapter *adapt,
u32 page, const u8 *buffer, u32 size)
{
@@ -103,8 +93,6 @@ static void _rtl88e_write_fw(struct adapter *adapt, u8 *buffer, u32 size)
u32 page_no, remain;
u32 page, offset;
- _rtl88e_fill_dummy(buf_ptr, &size);
-
page_no = size / FW_8192C_PAGE_SIZE;
remain = size % FW_8192C_PAGE_SIZE;
@@ -170,14 +158,14 @@ exit:
int rtl88eu_download_fw(struct adapter *adapt)
{
- struct hal_data_8188e *rtlhal = GET_HAL_DATA(adapt);
struct dvobj_priv *dvobj = adapter_to_dvobj(adapt);
struct device *device = dvobj_to_dev(dvobj);
const struct firmware *fw;
const char fw_name[] = "rtlwifi/rtl8188eufw.bin";
struct rtl92c_firmware_header *pfwheader = NULL;
- u8 *pfwdata;
- u32 fwsize;
+ u8 *download_data, *fw_data;
+ size_t download_size;
+ unsigned int trailing_zeros_length;
if (request_firmware(&fw, fw_name, device)) {
dev_err(device, "Firmware %s not available\n", fw_name);
@@ -186,35 +174,43 @@ int rtl88eu_download_fw(struct adapter *adapt)
if (fw->size > FW_8188E_SIZE) {
dev_err(device, "Firmware size exceed 0x%X. Check it.\n",
- FW_8188E_SIZE);
+ FW_8188E_SIZE);
+ release_firmware(fw);
return -1;
}
- pfwdata = kzalloc(FW_8188E_SIZE, GFP_KERNEL);
- if (!pfwdata)
+ trailing_zeros_length = (4 - fw->size % 4) % 4;
+
+ fw_data = kmalloc(fw->size + trailing_zeros_length, GFP_KERNEL);
+ if (!fw_data) {
+ release_firmware(fw);
return -ENOMEM;
+ }
- rtlhal->pfirmware = pfwdata;
- memcpy(rtlhal->pfirmware, fw->data, fw->size);
- rtlhal->fwsize = fw->size;
- release_firmware(fw);
+ memcpy(fw_data, fw->data, fw->size);
+ memset(fw_data + fw->size, 0, trailing_zeros_length);
- fwsize = rtlhal->fwsize;
- pfwheader = (struct rtl92c_firmware_header *)pfwdata;
+ pfwheader = (struct rtl92c_firmware_header *)fw_data;
if (IS_FW_HEADER_EXIST(pfwheader)) {
- pfwdata = pfwdata + 32;
- fwsize = fwsize - 32;
+ download_data = fw_data + 32;
+ download_size = fw->size + trailing_zeros_length - 32;
+ } else {
+ download_data = fw_data;
+ download_size = fw->size + trailing_zeros_length;
}
+ release_firmware(fw);
+
if (usb_read8(adapt, REG_MCUFWDL) & RAM_DL_SEL) {
usb_write8(adapt, REG_MCUFWDL, 0);
rtl88e_firmware_selfreset(adapt);
}
_rtl88e_enable_fw_download(adapt, true);
usb_write8(adapt, REG_MCUFWDL, usb_read8(adapt, REG_MCUFWDL) | FWDL_ChkSum_rpt);
- _rtl88e_write_fw(adapt, pfwdata, fwsize);
+ _rtl88e_write_fw(adapt, download_data, download_size);
_rtl88e_enable_fw_download(adapt, false);
+ kfree(fw_data);
return _rtl88e_fw_free_to_go(adapt);
}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 2c25d3b02036..8d2316b9e6e5 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -255,9 +255,6 @@ void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn
case ODM_CMNINFO_CUT_VER:
pDM_Odm->CutVersion = (u8)Value;
break;
- case ODM_CMNINFO_FAB_VER:
- pDM_Odm->FabVersion = (u8)Value;
- break;
case ODM_CMNINFO_RF_TYPE:
pDM_Odm->RFType = (u8)Value;
break;
@@ -477,7 +474,6 @@ void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion));
- ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion=%d\n", pDM_Odm->FabVersion));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType=%d\n", pDM_Odm->RFType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType));
ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA));
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index d3e8a8ea1829..ae42b4492c77 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -180,32 +180,6 @@ static void get_tx_power_index(struct adapter *adapt, u8 channel, u8 *cck_pwr,
hal_data->BW20_24G_Diff[TxCount][RF_PATH_A]+
hal_data->BW20_24G_Diff[TxCount][index];
bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_C) {
- cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
- ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_B][index]+
- hal_data->BW20_24G_Diff[TxCount][index];
-
- bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_B][index]+
- hal_data->BW20_24G_Diff[TxCount][index];
- bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_D) {
- cck_pwr[TxCount] = hal_data->Index24G_CCK_Base[TxCount][index];
- ofdm_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_B][index]+
- hal_data->BW20_24G_Diff[RF_PATH_C][index]+
- hal_data->BW20_24G_Diff[TxCount][index];
-
- bw20_pwr[TxCount] = hal_data->Index24G_BW40_Base[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_A][index]+
- hal_data->BW20_24G_Diff[RF_PATH_B][index]+
- hal_data->BW20_24G_Diff[RF_PATH_C][index]+
- hal_data->BW20_24G_Diff[TxCount][index];
- bw40_pwr[TxCount] = hal_data->Index24G_BW40_Base[TxCount][index];
}
}
}
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index 3e60b23819ae..b76b0f5d6220 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -23,8 +23,8 @@
/* This routine deals with the Power Configuration CMDs parsing
* for RTL8723/RTL8188E Series IC.
*/
-u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
- u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[])
+u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
+ struct wl_pwr_cfg pwrseqcmd[])
{
struct wl_pwr_cfg pwrcfgcmd = {0};
u8 poll_bit = false;
@@ -39,21 +39,16 @@ u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
RT_TRACE(_module_hal_init_c_, _drv_info_,
("rtl88eu_pwrseqcmdparsing: offset(%#x) cut_msk(%#x)"
- "fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x)"
+ " cmd(%#x)"
"msk(%#x) value(%#x)\n",
GET_PWR_CFG_OFFSET(pwrcfgcmd),
GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
- GET_PWR_CFG_FAB_MASK(pwrcfgcmd),
- GET_PWR_CFG_INTF_MASK(pwrcfgcmd),
- GET_PWR_CFG_BASE(pwrcfgcmd),
GET_PWR_CFG_CMD(pwrcfgcmd),
GET_PWR_CFG_MASK(pwrcfgcmd),
GET_PWR_CFG_VALUE(pwrcfgcmd)));
- /* Only Handle the command whose FAB, CUT, and Interface are matched */
- if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) &&
- (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) &&
- (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) {
+ /* Only Handle the command whose CUT is matched */
+ if (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) {
switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
case PWR_CMD_READ:
RT_TRACE(_module_hal_init_c_, _drv_info_,
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index 199a77acd7a9..f9919a94a77e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -50,7 +50,7 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
- u8 cut_ver, fab_ver;
+ u8 cut_ver;
/* Init Value */
memset(dm_odm, 0, sizeof(*dm_odm));
@@ -61,10 +61,8 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
- fab_ver = ODM_TSMC;
cut_ver = ODM_CUT_A;
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_FAB_VER, fab_ver);
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver);
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, hal_data->VersionID.ChipType == NORMAL_CHIP ? true : false);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index e04303ce80af..c96d80487a56 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -415,7 +415,7 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- u32 len = 0;
+ u32 len;
/* no consider fragement */
len = pattrib->hdrlen + pattrib->iv_len +
@@ -614,7 +614,7 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
- s32 res = _SUCCESS;
+ s32 res;
res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
if (res == _SUCCESS)
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 5789e1e23f0a..07a61b8271f0 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -108,7 +108,6 @@ static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
return _SUCCESS;
if (!rtl88eu_pwrseqcmdparsing(adapt, PWR_CUT_ALL_MSK,
- PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,
Rtl8188E_NIC_PWR_ON_FLOW)) {
DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
return _FAIL;
@@ -926,7 +925,6 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
/* Run LPS WL RFOFF flow */
rtl88eu_pwrseqcmdparsing(Adapter, PWR_CUT_ALL_MSK,
- PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,
Rtl8188E_NIC_LPS_ENTER_FLOW);
/* 2. 0x1F[7:0] = 0 turn off RF */
@@ -949,7 +947,6 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
/* Card disable power action flow */
rtl88eu_pwrseqcmdparsing(Adapter, PWR_CUT_ALL_MSK,
- PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,
Rtl8188E_NIC_DISABLE_FLOW);
/* Reset MCU IO Wrapper */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index e058162fe2ba..2670d6b6a79e 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -69,13 +69,11 @@ enum hw90_block {
enum rf_radio_path {
RF_PATH_A = 0, /* Radio Path A */
RF_PATH_B = 1, /* Radio Path B */
- RF_PATH_C = 2, /* Radio Path C */
- RF_PATH_D = 3, /* Radio Path D */
};
#define MAX_PG_GROUP 13
-#define RF_PATH_MAX 3
+#define RF_PATH_MAX 2
#define MAX_RF_PATH RF_PATH_MAX
#define MAX_TX_COUNT 4 /* path numbers */
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 6a2a147e6d15..3fb691daa5af 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -23,10 +23,6 @@
#include <linux/types.h>
#define NDIS_OID uint
-typedef void (*proc_t)(void *);
-
-#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
-
/* port from fw */
/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
* to check correctness */
@@ -46,31 +42,6 @@ typedef void (*proc_t)(void *);
#define EF4BYTE(_val) \
(le32_to_cpu(_val))
-/* Read data from memory */
-#define READEF1BYTE(_ptr) \
- EF1BYTE(*((u8 *)(_ptr)))
-/* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr) \
- EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr) \
- EF4BYTE(*(_ptr))
-
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val) \
- do { \
- (*((u8 *)(_ptr))) = EF1BYTE(_val) \
- } while (0)
-/* Write le data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val) \
- do { \
- (*((u16 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
-#define WRITEEF4BYTE(_ptr, _val) \
- do { \
- (*((u32 *)(_ptr))) = EF2BYTE(_val) \
- } while (0)
-
/* Create a bit mask
* Examples:
* BIT_LEN_MASK_32(0) => 0x00000000
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 0729bd40b02a..dcb032b6c3a7 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -110,7 +110,7 @@ struct registry_priv {
u8 wifi_spec;/* !turbo_mode */
u8 channel_plan;
- bool bAcceptAddbaReq;
+ bool accept_addba_req; /* true = accept AP's Add BA req */
u8 antdiv_cfg;
u8 antdiv_type;
@@ -135,9 +135,9 @@ struct registry_priv {
};
/* For registry parameters */
-#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
+#define RGTRY_OFT(field) ((u32)offsetof(struct registry_priv, field))
#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
-#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
+#define BSSID_OFT(field) ((u32)offsetofT(struct wlan_bssid_ex, field))
#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
#define MAX_CONTINUAL_URB_ERR 4
@@ -176,8 +176,6 @@ static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
};
struct adapter {
- u16 chip_type;
-
struct dvobj_priv *dvobj;
struct mlme_priv mlmepriv;
struct mlme_ext_priv mlmeextpriv;
diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
deleted file mode 100644
index 15e53d380ad0..000000000000
--- a/drivers/staging/rtl8188eu/include/ieee80211_ext.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
- ******************************************************************************/
-#ifndef __IEEE80211_EXT_H
-#define __IEEE80211_EXT_H
-
-#include <osdep_service.h>
-#include <drv_types.h>
-
-#define WMM_OUI_TYPE 2
-#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
-#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
-#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
-#define WMM_VERSION 1
-
-#define WPA_PROTO_WPA BIT(0)
-#define WPA_PROTO_RSN BIT(1)
-
-#define WPA_KEY_MGMT_IEEE8021X BIT(0)
-#define WPA_KEY_MGMT_PSK BIT(1)
-#define WPA_KEY_MGMT_NONE BIT(2)
-#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
-#define WPA_KEY_MGMT_WPA_NONE BIT(4)
-
-
-#define WPA_CAPABILITY_PREAUTH BIT(0)
-#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6)
-#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9)
-
-
-#define PMKID_LEN 16
-
-
-struct wpa_ie_hdr {
- u8 elem_id;
- u8 len;
- u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */
- u8 version[2]; /* little endian */
-} __packed;
-
-struct rsn_ie_hdr {
- u8 elem_id; /* WLAN_EID_RSN */
- u8 len;
- u8 version[2]; /* little endian */
-} __packed;
-
-struct wme_ac_parameter {
-#if defined(__LITTLE_ENDIAN)
- /* byte 1 */
- u8 aifsn:4,
- acm:1,
- aci:2,
- reserved:1;
-
- /* byte 2 */
- u8 eCWmin:4,
- eCWmax:4;
-#elif defined(__BIG_ENDIAN)
- /* byte 1 */
- u8 reserved:1,
- aci:2,
- acm:1,
- aifsn:4;
-
- /* byte 2 */
- u8 eCWmax:4,
- eCWmin:4;
-#else
-#error "Please fix <endian.h>"
-#endif
-
- /* bytes 3 & 4 */
- u16 txopLimit;
-} __packed;
-
-struct wme_parameter_element {
- /* required fields for WME version 1 */
- u8 oui[3];
- u8 oui_type;
- u8 oui_subtype;
- u8 version;
- u8 acInfo;
- u8 reserved;
- struct wme_ac_parameter ac[4];
-
-} __packed;
-
-#define WPA_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16)(val)) >> 8; \
- (a)[0] = ((u16)(val)) & 0xff; \
- } while (0)
-
-#define WPA_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8)((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8)((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8)((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8)(((u32) (val)) & 0xff); \
- } while (0)
-
-#define WPA_PUT_LE32(a, val) \
- do { \
- (a)[3] = (u8)((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8)((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8)((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8)(((u32) (val)) & 0xff); \
- } while (0)
-
-#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
-
-/* Action category code */
-enum ieee80211_category {
- WLAN_CATEGORY_SPECTRUM_MGMT = 0,
- WLAN_CATEGORY_QOS = 1,
- WLAN_CATEGORY_DLS = 2,
- WLAN_CATEGORY_BACK = 3,
- WLAN_CATEGORY_HT = 7,
- WLAN_CATEGORY_WMM = 17,
-};
-
-/* SPECTRUM_MGMT action code */
-enum ieee80211_spectrum_mgmt_actioncode {
- WLAN_ACTION_SPCT_MSR_REQ = 0,
- WLAN_ACTION_SPCT_MSR_RPRT = 1,
- WLAN_ACTION_SPCT_TPC_REQ = 2,
- WLAN_ACTION_SPCT_TPC_RPRT = 3,
- WLAN_ACTION_SPCT_CHL_SWITCH = 4,
- WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
-};
-
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* HT features action code */
-enum ieee80211_ht_actioncode {
- WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
- WLAN_ACTION_SM_PS = 1,
- WLAN_ACTION_PSPM = 2,
- WLAN_ACTION_PCO_PHASE = 3,
- WLAN_ACTION_MIMO_CSI_MX = 4,
- WLAN_ACTION_MIMO_NONCP_BF = 5,
- WLAN_ACTION_MIMP_CP_BF = 6,
- WLAN_ACTION_ASEL_INDICATES_FB = 7,
- WLAN_ACTION_HI_INFO_EXCHG = 8,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
- WLAN_BACK_TIMER = 2,
-};
-
-struct ieee80211_mgmt {
- u16 frame_control;
- u16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
- u16 seq_ctrl;
- union {
- struct {
- u16 auth_alg;
- u16 auth_transaction;
- u16 status_code;
- /* possibly followed by Challenge text */
- u8 variable[0];
- } __packed auth;
- struct {
- u16 reason_code;
- } __packed deauth;
- struct {
- u16 capab_info;
- u16 listen_interval;
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed assoc_req;
- struct {
- u16 capab_info;
- u16 status_code;
- u16 aid;
- /* followed by Supported rates */
- u8 variable[0];
- } __packed assoc_resp, reassoc_resp;
- struct {
- u16 capab_info;
- u16 listen_interval;
- u8 current_ap[6];
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed reassoc_req;
- struct {
- u16 reason_code;
- } __packed disassoc;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
- } __packed beacon;
- struct {
- /* only variable items: SSID, Supported rates */
- u8 variable[0];
- } __packed probe_req;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
- } __packed probe_resp;
- struct {
- u8 category;
- union {
- struct {
- u8 action_code;
- u8 dialog_token;
- u8 status_code;
- u8 variable[0];
- } __packed wme_action;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 capab;
- u16 timeout;
- u16 start_seq_num;
- } __packed addba_req;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 status;
- u16 capab;
- u16 timeout;
- } __packed addba_resp;
- struct {
- u8 action_code;
- u16 params;
- u16 reason_code;
- } __packed delba;
- structi {
- u8 action_code;
- /* capab_info for open and confirm,
- * reason for close
- */
- u16 aux;
- /* Followed in plink_confirm by status
- * code, AID and supported rates,
- * and directly by supported rates in
- * plink_open and plink_close
- */
- u8 variable[0];
- } __packed plink_action;
- struct{
- u8 action_code;
- u8 variable[0];
- } __packed mesh_action;
- } __packed u;
- } __packed action;
- } __packed u;
-} __packed;
-
-/* mgmt header + 1 byte category code */
-#define IEEE80211_MIN_ACTION_SIZE \
- FIELD_OFFSET(struct ieee80211_mgmt, u.action.u)
-
-#endif
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index bc970caf7eda..af781c7cd3a5 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -348,7 +348,6 @@ enum odm_common_info_def {
ODM_CMNINFO_MP_TEST_CHIP,
ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */
ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */
- ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */
ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
ODM_CMNINFO_EXT_LNA, /* true */
@@ -451,12 +450,6 @@ enum odm_cut_version {
ODM_CUT_TEST = 7,
};
-/* ODM_CMNINFO_FAB_VER */
-enum odm_fab_Version {
- ODM_TSMC = 0,
- ODM_UMC = 1,
-};
-
/* ODM_CMNINFO_RF_TYPE */
/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
enum odm_rf_path {
@@ -752,8 +745,6 @@ struct odm_dm_struct {
u32 SupportICType;
/* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
u8 CutVersion;
- /* Fab Version TSMC/UMC = 0/1 */
- u8 FabVersion;
/* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
u8 RFType;
/* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index 62a00498e473..ef792bfd535e 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -69,7 +69,7 @@ struct phy_rx_agc_info {
};
struct phy_status_rpt {
- struct phy_rx_agc_info path_agc[3];
+ struct phy_rx_agc_info path_agc[RF_PATH_MAX];
u8 ch_corr[2];
u8 cck_sig_qual_ofdm_pwdb_all;
u8 cck_agc_rpt_ofdm_cfosho_a;
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index 8c876c6c7a4f..9dbf8435f147 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -60,254 +60,172 @@
#define RTL8188E_TRANS_CARDEMU_TO_ACT \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value
+ * { offset, cut_msk, cmd, msk, value
* },
* comment here
*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
/* wait till 0x04[17] = 1 power ready*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) | BIT(1), 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0) | BIT(1), 0}, \
/* 0x02[1:0] = 0 reset BB*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
/*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), 0}, \
/* 0x04[15] = 0 disable HWPDN (control by DRV)*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, \
/*0x04[12:11] = 2b'00 disable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
/*0x04[8] = 1 polling until return 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(0), 0}, \
/*wait till 0x04[8] = 0*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
- /*LDO normal mode*/ \
- {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
- /*SDIO Driving*/
+ {0x0023, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \
+ /*LDO normal mode*/
#define RTL8188E_TRANS_ACT_TO_CARDEMU \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value
+ * { offset, cut_msk, cmd, msk, value
* },
* comments here
*/ \
- {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, \
/*0x1F[7:0] = 0 turn off RF*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
/*LDO Sleep mode*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
/*0x04[9] = 1 turn off MAC by HW state machine*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(1), 0}, \
/*wait till 0x04[9] = 0 polling until return 0 to disable*/
#define RTL8188E_TRANS_CARDEMU_TO_SUS \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
/* 0x04[12:11] = 2b'01enable WL suspend */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)}, \
- /* 0x04[12:11] = 2b'11enable WL suspend for PCIe */ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, 0xFF, BIT(7)}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, BIT(7)}, \
/* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \
/*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, BIT(4), BIT(4)}, \
- /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
- /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, \
- /*wait power state to suspend*/
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ /*Set USB suspend enable local register 0xfe10[4]=1 */
#define RTL8188E_TRANS_SUS_TO_CARDEMU \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
- /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
- /*wait power state to suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \
/*0x04[12:11] = 2b'01enable WL suspend*/
#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
/*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, \
/*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, 0xFF, 0}, \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, \
/* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */\
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
- PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, \
- PWR_CMD_WRITE, BIT(4), 0}, \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \
/*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
- /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
- /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, \
- /*wait power state to suspend*/
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), BIT(4)}, \
+ /*Set USB suspend enable local register 0xfe10[4]=1 */
#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
- /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
- /*wait power state to suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, \
/*0x04[12:11] = 2b'01enable WL suspend*/
#define RTL8188E_TRANS_CARDEMU_TO_PDN \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), 0}, \
/* 0x04[16] = 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), BIT(7)}, \
/* 0x04[15] = 1*/
#define RTL8188E_TRANS_PDN_TO_CARDEMU \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(7), 0}, \
/* 0x04[15] = 0*/
/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
#define RTL8188E_TRANS_ACT_TO_LPS \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
- {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \
/*Should be zero if no packet is transmitting*/ \
- {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \
/*Should be zero if no packet is transmitting*/ \
- {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \
/*Should be zero if no packet is transmitting*/ \
- {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, 0xFF, 0}, \
/*Should be zero if no packet is transmitting*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(0), 0}, \
/*CCK and OFDM are disabled,and clock are gated*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, \
- PWRSEQ_DELAY_US},/*Delay 1us*/ \
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/\
- {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)}, \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \
+ /*Delay 1us*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x3F}, \
+ /*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), 0}, \
+ /*check if removed later*/\
+ {0x0553, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(5), BIT(5)}, \
/*Respond TxOK to scheduler*/
#define RTL8188E_TRANS_LPS_TO_ACT \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
- PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/ \
- {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/ \
- {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/ \
- {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0x84}, \
+ /*USB RPWM*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \
+ /*Delay*/ \
+ {0x0008, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(4), 0}, \
/* 0x08[4] = 0 switch TSF to 40M */ \
- {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
+ {0x0109, PWR_CUT_ALL_MSK, PWR_CMD_POLLING, BIT(7), 0}, \
/* Polling 0x109[7]=0 TSF in 40M */ \
- {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6) | BIT(7), 0}, \
+ {0x0029, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(6) | BIT(7), 0}, \
/* 0x29[7:6] = 2b'00 enable BB clock */ \
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
/* 0x101[1] = 1 */ \
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0xFF}, \
/* 0x100[7:0] = 0xFF enable WMAC TRX */ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
+ {0x0002, PWR_CUT_ALL_MSK, \
+ PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
/* 0x02[1:0] = 2b'11 enable BB macro */ \
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
- PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+ {0x0522, PWR_CUT_ALL_MSK, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
#define RTL8188E_TRANS_END \
/* format
- * { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk,
+ * { offset, cut_msk, cmd, msk,
* value },
* comments here
*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \
- PWR_CMD_END, 0, 0},
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_CMD_END, 0, 0},
extern struct wl_pwr_cfg rtl8188E_power_on_flow
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index 980a49769157..468a3fb28e00 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -29,24 +29,6 @@
#define PWR_CMD_DELAY 0x03
#define PWR_CMD_END 0x04
-/* The value of base: 4 bits */
-/* define the base address of each block */
-#define PWR_BASEADDR_MAC 0x00
-#define PWR_BASEADDR_USB 0x01
-#define PWR_BASEADDR_PCIE 0x02
-#define PWR_BASEADDR_SDIO 0x03
-
-/* The value of interface_msk: 4 bits */
-#define PWR_INTF_SDIO_MSK BIT(0)
-#define PWR_INTF_USB_MSK BIT(1)
-#define PWR_INTF_PCI_MSK BIT(2)
-#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/* The value of fab_msk: 4 bits */
-#define PWR_FAB_TSMC_MSK BIT(0)
-#define PWR_FAB_UMC_MSK BIT(1)
-#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
/* The value of cut_msk: 8 bits */
#define PWR_CUT_TESTCHIP_MSK BIT(0)
#define PWR_CUT_A_MSK BIT(1)
@@ -67,9 +49,6 @@ enum pwrseq_cmd_delat_unit {
struct wl_pwr_cfg {
u16 offset;
u8 cut_msk;
- u8 fab_msk:4;
- u8 interface_msk:4;
- u8 base:4;
u8 cmd:4;
u8 msk;
u8 value;
@@ -77,14 +56,11 @@ struct wl_pwr_cfg {
#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
-#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
-#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
-#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
-u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
- u8 ifacetype, struct wl_pwr_cfg pwrcfgCmd[]);
+u8 rtl88eu_pwrseqcmdparsing(struct adapter *padapter, u8 cut_vers,
+ struct wl_pwr_cfg pwrcfgCmd[]);
#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index cbad364f189c..9f5050e6f6ab 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -191,8 +191,6 @@ struct txpowerinfo24g {
struct hal_data_8188e {
struct HAL_VERSION VersionID;
u16 CustomerID;
- u8 *pfirmware;
- u32 fwsize;
u16 FirmwareVersion;
u16 FirmwareVersionRev;
u16 FirmwareSubVersion;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 9093a5f94d32..44711332b90c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -348,7 +348,7 @@ struct mlme_ext_info {
u8 candidate_tid_bitmap;
u8 dialogToken;
/* Accept ADDBA Request */
- bool bAcceptAddbaReq;
+ bool accept_addba_req;
u8 bwmode_updated;
u8 hidden_ssid_mode;
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 2df88370de59..35f61be12acd 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -140,7 +140,6 @@ enum rt_rf_type_def {
};
u32 rtw_ch2freq(u32 ch);
-u32 rtw_freq2ch(u32 freq);
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index a076ede50b22..911980495fb2 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
if (pwep == NULL) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
@@ -1677,7 +1677,7 @@ static int rtw_wx_set_enc(struct net_device *dev,
if (erq->length > 0) {
wep.KeyLength = erq->length <= 5 ? 5 : 13;
- wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ wep.Length = wep.KeyLength + offsetof(struct ndis_802_11_wep, KeyMaterial);
} else {
wep.KeyLength = 0;
@@ -1907,7 +1907,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
memset(param, 0, param_len);
param->cmd = IEEE_CMD_SET_ENCRYPTION;
- memset(param->sta_addr, 0xff, ETH_ALEN);
+ eth_broadcast_addr(param->sta_addr);
switch (pext->alg) {
case IW_ENCODE_ALG_NONE:
@@ -2277,7 +2277,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ wep_total_len = wep_key_len + offsetof(struct ndis_802_11_wep, KeyMaterial);
pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
if (pwep == NULL) {
DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n");
@@ -3095,7 +3095,6 @@ struct iw_handler_def rtw_handlers_def = {
.get_wireless_stats = rtw_get_wireless_stats,
};
-#include <rtw_android.h>
int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *)rq;
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 9201b94d017c..7986e678521a 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -101,8 +101,6 @@ static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
static int rtw_low_power;
static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
-/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
-static int rtw_AcceptAddbaReq = true;
static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
@@ -593,7 +591,7 @@ static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->low_power = (u8)rtw_low_power;
registry_par->wifi_spec = (u8)rtw_wifi_spec;
registry_par->channel_plan = (u8)rtw_channel_plan;
- registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->accept_addba_req = true;
registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
registry_par->antdiv_type = (u8)rtw_antdiv_type;
registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;
@@ -1157,7 +1155,6 @@ int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
static int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- struct hal_data_8188e *rtlhal = GET_HAL_DATA(padapter);
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
@@ -1190,9 +1187,6 @@ static int netdev_close(struct net_device *pnetdev)
rtw_led_control(padapter, LED_CTL_POWER_OFF);
}
- kfree(rtlhal->pfirmware);
- rtlhal->pfirmware = NULL;
-
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
return 0;
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index d87b54711c0d..f090bef59594 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -41,10 +41,7 @@ inline int RTW_STATUS_CODE(int error_code)
u8 *_rtw_malloc(u32 sz)
{
- u8 *pbuf = NULL;
-
- pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- return pbuf;
+ return kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
}
void *rtw_malloc2d(int h, int w, int size)
@@ -67,8 +64,7 @@ u32 _rtw_down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
return _FAIL;
- else
- return _SUCCESS;
+ return _SUCCESS;
}
void _rtw_init_queue(struct __queue *pqueue)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 01d50f7c1667..794cc114348c 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -360,7 +360,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->bDriverStopped = true;
mutex_init(&padapter->hw_init_mutex);
- padapter->chip_type = RTL8188E;
pnetdev = rtw_init_netdev(padapter);
if (pnetdev == NULL)
@@ -442,7 +441,7 @@ free_adapter:
if (status != _SUCCESS) {
if (pnetdev)
rtw_free_netdev(pnetdev);
- else if (padapter)
+ else
vfree(padapter);
padapter = NULL;
}
@@ -474,8 +473,7 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n",
if1->hw_init_completed);
rtw_free_drv_sw(if1);
- if (pnetdev)
- rtw_free_netdev(pnetdev);
+ rtw_free_netdev(pnetdev);
}
static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
@@ -483,24 +481,20 @@ static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device
struct adapter *if1 = NULL;
struct dvobj_priv *dvobj;
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
-
/* Initialize dvobj_priv */
dvobj = usb_dvobj_init(pusb_intf);
- if (dvobj == NULL) {
+ if (!dvobj) {
RT_TRACE(_module_hci_intfs_c_, _drv_err_,
("initialize device object priv Failed!\n"));
goto exit;
}
if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
- if (if1 == NULL) {
+ if (!if1) {
pr_debug("rtw_init_primarystruct adapter Failed!\n");
goto free_dvobj;
}
- RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
-
return 0;
free_dvobj:
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 2c19054cf027..735a199ebdcf 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -17,8 +17,6 @@
#include "rtllib.h"
-
-
struct chnl_txpow_triple {
u8 FirstChnl;
u8 NumChnls;
@@ -42,7 +40,6 @@ enum dot11d_state {
*/
struct rt_dot11d_info {
-
bool bEnabled;
u16 CountryIeLen;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index e9c4f973bba9..ba64a4f1b3a8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -680,7 +680,7 @@ static void _rtl92e_hwconfig(struct net_device *dev)
rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
{
- u32 ratr_value = 0;
+ u32 ratr_value;
ratr_value = regRATR;
if (priv->rf_type == RF_1T2R)
@@ -1000,7 +1000,7 @@ void rtl92e_link_change(struct net_device *dev)
_rtl92e_update_msr(dev);
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
- u32 reg = 0;
+ u32 reg;
reg = rtl92e_readl(dev, RCR);
if (priv->rtllib->state == RTLLIB_LINKED) {
@@ -1186,7 +1186,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
struct r8192_priv *priv = rtllib_priv(dev);
dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
- struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
+ struct tx_fwinfo_8190pci *pTxFwInfo;
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
memset(pTxFwInfo, 0, sizeof(struct tx_fwinfo_8190pci));
@@ -2235,7 +2235,7 @@ void rtl92e_disable_irq(struct net_device *dev)
void rtl92e_clear_irq(struct net_device *dev)
{
- u32 tmp = 0;
+ u32 tmp;
tmp = rtl92e_readl(dev, ISR);
rtl92e_writel(dev, ISR, tmp);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 8f989a95a019..9b7cc7dc7cb8 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -249,7 +249,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
if (StateToSet == eRfOn) {
if (bConnectBySSID && priv->blinked_ingpio) {
- queue_delayed_work_rsl(ieee->wq,
+ schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->blinked_ingpio = false;
}
@@ -288,7 +288,7 @@ static void _rtl92e_tx_timeout(struct net_device *dev)
void rtl92e_irq_enable(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
priv->irq_enabled = 1;
@@ -297,7 +297,7 @@ void rtl92e_irq_enable(struct net_device *dev)
void rtl92e_irq_disable(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
priv->ops->irq_disable(dev);
@@ -306,7 +306,7 @@ void rtl92e_irq_disable(struct net_device *dev)
static void _rtl92e_set_chan(struct net_device *dev, short ch)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_CH, "=====>%s()====ch:%d\n", __func__, ch);
if (priv->chan_forced)
@@ -437,7 +437,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
network->qos_data.old_param_count =
network->qos_data.param_count;
priv->rtllib->wmm_acm = network->qos_data.wmm_acm;
- queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
RT_TRACE(COMP_QOS,
"QoS parameters change call qos_activate\n");
}
@@ -446,7 +446,7 @@ static int _rtl92e_qos_handle_probe_response(struct r8192_priv *priv,
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
- queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
RT_TRACE(COMP_QOS,
"QoS was disabled call qos_activate\n");
}
@@ -465,7 +465,7 @@ static int _rtl92e_handle_beacon(struct net_device *dev,
_rtl92e_qos_handle_probe_response(priv, 1, network);
- queue_delayed_work_rsl(priv->priv_wq, &priv->update_beacon_wq, 0);
+ schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
}
@@ -512,7 +512,7 @@ static int _rtl92e_qos_assoc_resp(struct r8192_priv *priv,
network->flags, priv->rtllib->current_network.qos_data.active);
if (set_qos_param == 1) {
rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
- queue_work_rsl(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
}
return 0;
}
@@ -1002,7 +1002,6 @@ static void _rtl92e_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- priv->priv_wq = create_workqueue(DRV_NAME);
INIT_WORK_RSL(&priv->reset_wq, (void *)_rtl92e_restart, dev);
INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq,
dev);
@@ -1327,7 +1326,7 @@ RESET_START:
ieee->set_chan(ieee->dev,
ieee->current_network.channel);
- queue_work_rsl(ieee->wq, &ieee->associate_complete_wq);
+ schedule_work(&ieee->associate_complete_wq);
} else if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
IW_MODE_ADHOC) {
@@ -1499,7 +1498,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if (!(ieee->rtllib_ap_sec_type(ieee) &
(SEC_ALG_CCMP|SEC_ALG_TKIP)))
- queue_delayed_work_rsl(ieee->wq,
+ schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
priv->check_roaming_cnt = 0;
@@ -1536,7 +1535,7 @@ static void _rtl92e_watchdog_timer_cb(unsigned long data)
{
struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
- queue_delayed_work_rsl(priv->priv_wq, &priv->watch_dog_wq, 0);
+ schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer, jiffies +
msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME));
}
@@ -1546,14 +1545,14 @@ static void _rtl92e_watchdog_timer_cb(unsigned long data)
*****************************************************************************/
void rtl92e_rx_enable(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
priv->ops->rx_enable(dev);
}
void rtl92e_tx_enable(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
priv->ops->tx_enable(dev);
@@ -1612,7 +1611,7 @@ static void _rtl92e_free_tx_ring(struct net_device *dev, unsigned int prio)
static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
int rate)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
@@ -1643,7 +1642,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
@@ -1676,7 +1675,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void _rtl92e_tx_isr(struct net_device *dev, int prio)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
struct rtl8192_tx_ring *ring = &priv->tx_ring[prio];
@@ -1850,7 +1849,7 @@ static short _rtl92e_alloc_rx_ring(struct net_device *dev)
static int _rtl92e_alloc_tx_ring(struct net_device *dev, unsigned int prio,
unsigned int entries)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
struct tx_desc *ring;
dma_addr_t dma;
int i;
@@ -1944,7 +1943,7 @@ void rtl92e_reset_desc_ring(struct net_device *dev)
void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
struct rtllib_rx_stats *stats)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
if (stats->bIsAMPDU && !stats->bFirstMPDU)
stats->mac_time = priv->LastRxDescTSF;
@@ -2022,7 +2021,7 @@ void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
static void _rtl92e_rx_normal(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_hdr_1addr *rtllib_hdr = NULL;
bool unicast_packet = false;
bool bLedBlinking = true;
@@ -2128,7 +2127,7 @@ done:
static void _rtl92e_tx_resume(struct net_device *dev)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
struct sk_buff *skb;
int queue_index;
@@ -2161,8 +2160,8 @@ static void _rtl92e_irq_rx_tasklet(struct r8192_priv *priv)
*****************************************************************************/
static void _rtl92e_cancel_deferred_work(struct r8192_priv *priv)
{
- cancel_delayed_work(&priv->watch_dog_wq);
- cancel_delayed_work(&priv->update_beacon_wq);
+ cancel_delayed_work_sync(&priv->watch_dog_wq);
+ cancel_delayed_work_sync(&priv->update_beacon_wq);
cancel_delayed_work(&priv->rtllib->hw_sleep_wq);
cancel_work_sync(&priv->reset_wq);
cancel_work_sync(&priv->qos_activate);
@@ -2279,7 +2278,7 @@ static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
/* based on ipw2200 driver */
static int _rtl92e_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct r8192_priv *priv = rtllib_priv(dev);
struct iwreq *wrq = (struct iwreq *)rq;
int ret = -1;
struct rtllib_device *ieee = priv->rtllib;
@@ -2402,8 +2401,8 @@ out:
static irqreturn_t _rtl92e_irq(int irq, void *netdev)
{
- struct net_device *dev = (struct net_device *) netdev;
- struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
+ struct net_device *dev = netdev;
+ struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags;
u32 inta;
u32 intb;
@@ -2693,7 +2692,7 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
priv = rtllib_priv(dev);
del_timer_sync(&priv->gpio_polling_timer);
- cancel_delayed_work(&priv->gpio_change_rf_wq);
+ cancel_delayed_work_sync(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
_rtl92e_down(dev, true);
rtl92e_dm_deinit(dev);
@@ -2701,7 +2700,6 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
vfree(priv->pFirmware);
priv->pFirmware = NULL;
}
- destroy_workqueue(priv->priv_wq);
_rtl92e_free_rx_ring(dev);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
_rtl92e_free_tx_ring(dev, i);
@@ -2783,7 +2781,7 @@ void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
priv->polling_timer_on = 1;
- queue_delayed_work_rsl(priv->priv_wq, &priv->gpio_change_rf_wq, 0);
+ schedule_delayed_work(&priv->gpio_change_rf_wq, 0);
mod_timer(&priv->gpio_polling_timer, jiffies +
msecs_to_jiffies(RTLLIB_WATCH_DOG_TIME));
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index a7777a319c02..f627fdc15a58 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -338,8 +338,6 @@ struct r8192_priv {
struct delayed_work rfpath_check_wq;
struct delayed_work gpio_change_rf_wq;
- struct workqueue_struct *priv_wq;
-
struct channel_access_setting ChannelAccessSetting;
struct rtl819x_ops *ops;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index ef03242113be..9bc284812c30 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -994,8 +994,7 @@ static void _rtl92e_dm_check_tx_power_tracking_tssi(struct net_device *dev)
if (tx_power_track_counter >= 180) {
- queue_delayed_work_rsl(priv->priv_wq,
- &priv->txpower_tracking_wq, 0);
+ schedule_delayed_work(&priv->txpower_tracking_wq, 0);
tx_power_track_counter = 0;
}
@@ -1028,7 +1027,7 @@ static void _rtl92e_dm_check_tx_power_tracking_thermal(struct net_device *dev)
return;
}
netdev_info(dev, "===============>Schedule TxPowerTrackingWorkItem\n");
- queue_delayed_work_rsl(priv->priv_wq, &priv->txpower_tracking_wq, 0);
+ schedule_delayed_work(&priv->txpower_tracking_wq, 0);
TM_Trigger = 0;
}
@@ -1875,7 +1874,7 @@ void rtl92e_dm_rf_pathcheck_wq(void *data)
struct r8192_priv,
rfpath_check_wq);
struct net_device *dev = priv->rtllib->dev;
- u8 rfpath = 0, i;
+ u8 rfpath, i;
rfpath = rtl92e_readb(dev, 0xc04);
@@ -2121,7 +2120,7 @@ static void _rtl92e_dm_check_rx_path_selection(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- queue_delayed_work_rsl(priv->priv_wq, &priv->rfpath_check_wq, 0);
+ schedule_delayed_work(&priv->rfpath_check_wq, 0);
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index b2b5ada69e73..9e04dc29fbbb 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -27,7 +27,7 @@ int rtl92e_suspend(struct pci_dev *pdev, pm_message_t state)
netdev_info(dev, "============> r8192E suspend call.\n");
del_timer_sync(&priv->gpio_polling_timer);
- cancel_delayed_work(&priv->gpio_change_rf_wq);
+ cancel_delayed_work_sync(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
if (!netif_running(dev)) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 9a4d1bcb881d..98e4d88d0e73 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -63,9 +63,8 @@ void rtl92e_hw_wakeup(struct net_device *dev)
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
RT_TRACE(COMP_DBG,
"rtl92e_hw_wakeup(): RF Change in progress!\n");
- queue_delayed_work_rsl(priv->rtllib->wq,
- &priv->rtllib->hw_wakeup_wq,
- msecs_to_jiffies(10));
+ schedule_delayed_work(&priv->rtllib->hw_wakeup_wq,
+ msecs_to_jiffies(10));
return;
}
spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
@@ -111,10 +110,8 @@ void rtl92e_enter_sleep(struct net_device *dev, u64 time)
return;
}
tmp = time - jiffies;
- queue_delayed_work_rsl(priv->rtllib->wq,
- &priv->rtllib->hw_wakeup_wq, tmp);
- queue_delayed_work_rsl(priv->rtllib->wq,
- (void *)&priv->rtllib->hw_sleep_wq, 0);
+ schedule_delayed_work(&priv->rtllib->hw_wakeup_wq, tmp);
+ schedule_delayed_work(&priv->rtllib->hw_sleep_wq, 0);
spin_unlock_irqrestore(&priv->ps_lock, flags);
}
@@ -203,8 +200,7 @@ void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
}
netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n",
__func__);
- queue_work_rsl(priv->rtllib->wq,
- &priv->rtllib->ips_leave_wq);
+ schedule_work(&priv->rtllib->ips_leave_wq);
}
}
}
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c04a020f6d6c..c7fd1b1653d6 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -189,7 +189,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = rtllib_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ);
@@ -204,7 +204,7 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA, u16 StatusCode)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = rtllib_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP);
if (skb)
@@ -217,7 +217,7 @@ static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *pBA, enum tr_select TxRxSelect,
u16 ReasonCode)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = rtllib_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode);
if (skb)
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 563ac12f0b2c..776e179d5bfd 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -76,7 +76,7 @@
#define container_of_work_rsl(x, y, z) container_of(x, y, z)
#define container_of_dwork_rsl(x, y, z) \
- container_of(container_of(x, struct delayed_work, work), y, z)
+ container_of(to_delayed_work(x), y, z)
#define iwe_stream_add_event_rsl(info, start, stop, iwe, len) \
iwe_stream_add_event(info, start, stop, iwe, len)
@@ -1728,7 +1728,6 @@ struct rtllib_device {
struct delayed_work link_change_wq;
struct work_struct wx_sync_scan_wq;
- struct workqueue_struct *wq;
union {
struct rtllib_rxb *RfdArray[REORDER_WIN_SIZE];
struct rtllib_rxb *stats_IndicateArray[REORDER_WIN_SIZE];
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 496de4f6a7bc..bc45cf098b04 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -233,7 +233,7 @@ static int rtllib_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len,
b0, b, s0);
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
@@ -319,7 +319,7 @@ static int rtllib_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
xor_block(mic, b, CCMP_MIC_LEN);
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 2096d78913bd..ae103b0b7a2a 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -9,6 +9,8 @@
* more details.
*/
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -18,7 +20,6 @@
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
@@ -48,10 +49,10 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_blkcipher *rx_tfm_arc4;
- struct crypto_hash *rx_tfm_michael;
- struct crypto_blkcipher *tx_tfm_arc4;
- struct crypto_hash *tx_tfm_michael;
+ struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_ahash *rx_tfm_michael;
+ struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_ahash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
u8 tx_hdr[16];
@@ -65,32 +66,32 @@ static void *rtllib_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
goto fail;
}
- priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->tx_tfm_michael = NULL;
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
goto fail;
}
- priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+ CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
pr_debug("Could not allocate crypto API michael_mic\n");
priv->rx_tfm_michael = NULL;
@@ -100,14 +101,10 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
- if (priv->tx_tfm_michael)
- crypto_free_hash(priv->tx_tfm_michael);
- if (priv->tx_tfm_arc4)
- crypto_free_blkcipher(priv->tx_tfm_arc4);
- if (priv->rx_tfm_michael)
- crypto_free_hash(priv->rx_tfm_michael);
- if (priv->rx_tfm_arc4)
- crypto_free_blkcipher(priv->rx_tfm_arc4);
+ crypto_free_ahash(priv->tx_tfm_michael);
+ crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_ahash(priv->rx_tfm_michael);
+ crypto_free_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -120,14 +117,10 @@ static void rtllib_tkip_deinit(void *priv)
struct rtllib_tkip_data *_priv = priv;
if (_priv) {
- if (_priv->tx_tfm_michael)
- crypto_free_hash(_priv->tx_tfm_michael);
- if (_priv->tx_tfm_arc4)
- crypto_free_blkcipher(_priv->tx_tfm_arc4);
- if (_priv->rx_tfm_michael)
- crypto_free_hash(_priv->rx_tfm_michael);
- if (_priv->rx_tfm_arc4)
- crypto_free_blkcipher(_priv->rx_tfm_arc4);
+ crypto_free_ahash(_priv->tx_tfm_michael);
+ crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_ahash(_priv->rx_tfm_michael);
+ crypto_free_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -301,7 +294,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int ret = 0;
u8 rc4key[16], *icv;
u32 crc;
@@ -347,6 +339,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
@@ -357,8 +351,12 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, len+4);
- crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
}
tkey->tx_iv16++;
@@ -369,8 +367,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
if (!tcb_desc->bHwSec)
return ret;
- else
- return 0;
+ return 0;
}
@@ -384,12 +381,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
struct rtllib_hdr_4addr *hdr;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
u8 rc4key[16];
u8 icv[4];
u32 crc;
struct scatterlist sg;
int plen;
+ int err;
if (skb->len < hdr_len + 8 + 4)
return -1;
@@ -425,6 +422,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
+ SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
tkey->initialized) {
@@ -450,8 +449,13 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, plen+4);
- crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+ crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+ err = crypto_skcipher_decrypt(req);
+ skcipher_request_zero(req);
+ if (err) {
if (net_ratelimit()) {
netdev_dbg(skb->dev,
"Failed to decrypt received packet from %pM\n",
@@ -500,11 +504,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
u8 *data, size_t data_len, u8 *mic)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm_michael);
struct scatterlist sg[2];
+ int err;
if (tfm_michael == NULL) {
pr_warn("michael_mic: tfm_michael == NULL\n");
@@ -514,12 +519,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
- if (crypto_hash_setkey(tfm_michael, key, 8))
+ if (crypto_ahash_setkey(tfm_michael, key, 8))
return -1;
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+ ahash_request_set_tfm(req, tfm_michael);
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_crypt(req, sg, mic, data_len + 16);
+ err = crypto_ahash_digest(req);
+ ahash_request_zero(req);
+ return err;
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -655,10 +663,10 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
int keyidx;
- struct crypto_hash *tfm = tkey->tx_tfm_michael;
- struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+ struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index 21d7eee4c9a9..b3343a5d0fd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -9,6 +9,7 @@
* more details.
*/
+#include <crypto/skcipher.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -17,8 +18,6 @@
#include <linux/string.h>
#include "rtllib.h"
-#include <linux/crypto.h>
-
#include <linux/scatterlist.h>
#include <linux/crc32.h>
@@ -28,8 +27,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_blkcipher *tx_tfm;
- struct crypto_blkcipher *rx_tfm;
+ struct crypto_skcipher *tx_tfm;
+ struct crypto_skcipher *rx_tfm;
};
@@ -42,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->rx_tfm = NULL;
@@ -62,10 +61,8 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
- if (priv->tx_tfm)
- crypto_free_blkcipher(priv->tx_tfm);
- if (priv->rx_tfm)
- crypto_free_blkcipher(priv->rx_tfm);
+ crypto_free_skcipher(priv->tx_tfm);
+ crypto_free_skcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -77,10 +74,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- if (_priv->tx_tfm)
- crypto_free_blkcipher(_priv->tx_tfm);
- if (_priv->rx_tfm)
- crypto_free_blkcipher(_priv->rx_tfm);
+ crypto_free_skcipher(_priv->tx_tfm);
+ crypto_free_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -99,10 +94,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 *pos;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
u32 crc;
u8 *icv;
struct scatterlist sg;
+ int err;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len){
@@ -140,6 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -150,8 +146,13 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
sg_init_one(&sg, pos, len+4);
- crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
- return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+ skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return err;
}
return 0;
@@ -173,10 +174,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 keyidx, *pos;
struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
u32 crc;
u8 icv[4];
struct scatterlist sg;
+ int err;
if (skb->len < hdr_len + 8)
return -1;
@@ -198,9 +199,16 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
sg_init_one(&sg, pos, plen+4);
- crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+ crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+ skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+ err = crypto_skcipher_decrypt(req);
+ skcipher_request_zero(req);
+ if (err)
return -7;
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 113fbf7fbb17..f4f318abb299 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -45,15 +45,11 @@
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <net/arp.h>
-
#include "rtllib.h"
-
u32 rt_global_debug_component = COMP_ERR;
EXPORT_SYMBOL(rt_global_debug_component);
-
-
static inline int rtllib_networks_allocate(struct rtllib_device *ieee)
{
if (ieee->networks)
@@ -110,7 +106,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
}
rtllib_networks_initialize(ieee);
-
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 37343ec3b484..c743182b933e 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -467,7 +467,7 @@ static bool AddReorderEntry(struct rx_ts_record *pTS,
else if (SN_EQUAL(pReorderEntry->SeqNum,
((struct rx_reorder_entry *)list_entry(pList->next,
struct rx_reorder_entry, List))->SeqNum))
- return false;
+ return false;
else
break;
}
@@ -905,7 +905,7 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
- size_t hdrlen = 0;
+ size_t hdrlen;
hdrlen = rtllib_get_hdrlen(fc);
if (HTCCheck(ieee, skb->data)) {
@@ -1829,7 +1829,6 @@ static inline void rtllib_extract_country_ie(
if (IS_EQUAL_CIE_SRC(ieee, addr2))
UPDATE_CIE_WATCHDOG(ieee);
}
-
}
static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
@@ -1902,7 +1901,6 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data,
network->bssht.bdHTInfoLen);
}
-
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d0fedb0ff132..cfab715495ad 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -355,9 +355,9 @@ static inline struct sk_buff *rtllib_probe_req(struct rtllib_device *ieee)
req->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_PROBE_REQ);
req->header.duration_id = 0;
- memset(req->header.addr1, 0xff, ETH_ALEN);
+ eth_broadcast_addr(req->header.addr1);
ether_addr_copy(req->header.addr2, ieee->dev->dev_addr);
- memset(req->header.addr3, 0xff, ETH_ALEN);
+ eth_broadcast_addr(req->header.addr3);
tag = (u8 *) skb_put(skb, len + 2 + rate_len);
@@ -615,8 +615,8 @@ static void rtllib_softmac_scan_wq(void *data)
if (ieee->active_channel_map[ieee->current_network.channel] == 1)
rtllib_send_probe_requests(ieee, 0);
- queue_delayed_work_rsl(ieee->wq, &ieee->softmac_scan_wq,
- msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME));
+ schedule_delayed_work(&ieee->softmac_scan_wq,
+ msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME));
up(&ieee->scan_sem);
return;
@@ -689,7 +689,7 @@ static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
ieee->scanning_continue = 0;
ieee->actscanning = false;
- cancel_delayed_work(&ieee->softmac_scan_wq);
+ cancel_delayed_work_sync(&ieee->softmac_scan_wq);
}
up(&ieee->scan_sem);
@@ -745,8 +745,7 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
if (ieee->scanning_continue == 0) {
ieee->actscanning = true;
ieee->scanning_continue = 1;
- queue_delayed_work_rsl(ieee->wq,
- &ieee->softmac_scan_wq, 0);
+ schedule_delayed_work(&ieee->softmac_scan_wq, 0);
}
} else {
if (ieee->rtllib_start_hw_scan)
@@ -776,7 +775,7 @@ inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
{
struct sk_buff *skb;
struct rtllib_authentication *auth;
- int len = 0;
+ int len;
len = sizeof(struct rtllib_authentication) + challengelen +
ieee->tx_headroom + 4;
@@ -1428,8 +1427,8 @@ static void rtllib_associate_abort(struct rtllib_device *ieee)
ieee->state = RTLLIB_ASSOCIATING_RETRY;
- queue_delayed_work_rsl(ieee->wq, &ieee->associate_retry_wq,
- RTLLIB_SOFTMAC_ASSOC_RETRY_TIME);
+ schedule_delayed_work(&ieee->associate_retry_wq,
+ RTLLIB_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
}
@@ -1580,7 +1579,7 @@ static void rtllib_associate_complete(struct rtllib_device *ieee)
ieee->state = RTLLIB_LINKED;
rtllib_sta_send_associnfo(ieee);
- queue_work_rsl(ieee->wq, &ieee->associate_complete_wq);
+ schedule_work(&ieee->associate_complete_wq);
}
static void rtllib_associate_procedure_wq(void *data)
@@ -1729,7 +1728,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
if (ieee->LedControlHandler != NULL)
ieee->LedControlHandler(ieee->dev,
LED_CTL_START_TO_LINK);
- queue_delayed_work_rsl(ieee->wq,
+ schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
} else {
if (rtllib_is_54g(&ieee->current_network) &&
@@ -2283,7 +2282,7 @@ inline int rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
"Association response status code 0x%x\n",
errcode);
if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
- queue_delayed_work_rsl(ieee->wq,
+ schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
else
rtllib_associate_abort(ieee);
@@ -2393,7 +2392,7 @@ inline int rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
if (!(ieee->rtllib_ap_sec_type(ieee) &
(SEC_ALG_CCMP|SEC_ALG_TKIP)))
- queue_delayed_work_rsl(ieee->wq,
+ schedule_delayed_work(
&ieee->associate_procedure_wq, 5);
}
return 0;
@@ -2538,12 +2537,6 @@ void rtllib_wake_all_queues(struct rtllib_device *ieee)
netif_tx_wake_all_queues(ieee->dev);
}
-inline void rtllib_randomize_cell(struct rtllib_device *ieee)
-{
-
- random_ether_addr(ieee->current_network.bssid);
-}
-
/* called in user context only */
static void rtllib_start_master_bss(struct rtllib_device *ieee)
{
@@ -2634,7 +2627,7 @@ static void rtllib_start_ibss_wq(void *data)
netdev_info(ieee->dev, "creating new IBSS cell\n");
ieee->current_network.channel = ieee->IbssStartChnl;
if (!ieee->wap_set)
- rtllib_randomize_cell(ieee);
+ eth_random_addr(ieee->current_network.bssid);
if (ieee->modulation & RTLLIB_CCK_MODULATION) {
@@ -2715,8 +2708,7 @@ static void rtllib_start_ibss_wq(void *data)
inline void rtllib_start_ibss(struct rtllib_device *ieee)
{
- queue_delayed_work_rsl(ieee->wq, &ieee->start_ibss_wq,
- msecs_to_jiffies(150));
+ schedule_delayed_work(&ieee->start_ibss_wq, msecs_to_jiffies(150));
}
/* this is called only in user context, with wx_sem held */
@@ -2770,7 +2762,7 @@ void rtllib_disassociate(struct rtllib_device *ieee)
ieee->is_set_key = false;
ieee->wap_set = 0;
- queue_delayed_work_rsl(ieee->wq, &ieee->link_change_wq, 0);
+ schedule_delayed_work(&ieee->link_change_wq, 0);
notify_wx_assoc_event(ieee);
}
@@ -2882,9 +2874,9 @@ void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
rtllib_stop_send_beacons(ieee);
del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- cancel_delayed_work(&ieee->start_ibss_wq);
- cancel_delayed_work(&ieee->link_change_wq);
+ cancel_delayed_work_sync(&ieee->associate_retry_wq);
+ cancel_delayed_work_sync(&ieee->start_ibss_wq);
+ cancel_delayed_work_sync(&ieee->link_change_wq);
rtllib_stop_scan(ieee);
if (ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED)
@@ -3027,9 +3019,6 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
rtllib_send_beacon_cb,
(unsigned long) ieee);
-
- ieee->wq = create_workqueue(DRV_NAME);
-
INIT_DELAYED_WORK_RSL(&ieee->link_change_wq,
(void *)rtllib_link_change_wq, ieee);
INIT_DELAYED_WORK_RSL(&ieee->start_ibss_wq,
@@ -3065,8 +3054,16 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
ieee->pDot11dInfo = NULL;
del_timer_sync(&ieee->associate_timer);
- cancel_delayed_work(&ieee->associate_retry_wq);
- destroy_workqueue(ieee->wq);
+ cancel_delayed_work_sync(&ieee->associate_retry_wq);
+ cancel_delayed_work_sync(&ieee->associate_procedure_wq);
+ cancel_delayed_work_sync(&ieee->softmac_scan_wq);
+ cancel_delayed_work_sync(&ieee->start_ibss_wq);
+ cancel_delayed_work_sync(&ieee->hw_wakeup_wq);
+ cancel_delayed_work_sync(&ieee->hw_sleep_wq);
+ cancel_delayed_work_sync(&ieee->link_change_wq);
+ cancel_work_sync(&ieee->associate_complete_wq);
+ cancel_work_sync(&ieee->ips_leave_wq);
+ cancel_work_sync(&ieee->wx_sync_scan_wq);
up(&ieee->wx_sem);
tasklet_kill(&ieee->ps_task);
}
@@ -3328,7 +3325,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
goto done;
}
new_crypt->ops = ops;
- if (new_crypt->ops)
+ if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv =
new_crypt->ops->init(param->u.crypt.idx);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 86f52ac7d33e..61ed8b0413e4 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -243,7 +243,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- u32 tmp_rate = 0;
+ u32 tmp_rate;
tmp_rate = TxCountToDataRate(ieee,
ieee->softmac_stats.CurrentShowTxate);
@@ -429,7 +429,7 @@ int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
}
if (ieee->state == RTLLIB_LINKED) {
- queue_work_rsl(ieee->wq, &ieee->wx_sync_scan_wq);
+ schedule_work(&ieee->wx_sync_scan_wq);
/* intentionally forget to up sem */
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 80f7a099dff1..84e6272f28cd 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -623,7 +623,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
goto done;
}
new_crypt->ops = ops;
- if (new_crypt->ops)
+ if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv = new_crypt->ops->init(idx);
if (new_crypt->priv == NULL) {
diff --git a/drivers/staging/rtl8192u/ieee80211/dot11d.c b/drivers/staging/rtl8192u/ieee80211/dot11d.c
index 82d60380bb40..00b6052fbbac 100644
--- a/drivers/staging/rtl8192u/ieee80211/dot11d.c
+++ b/drivers/staging/rtl8192u/ieee80211/dot11d.c
@@ -103,7 +103,7 @@ u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
u8 MaxTxPwrInDbm = 255;
- if (MAX_CHANNEL_NUMBER < Channel) {
+ if (Channel > MAX_CHANNEL_NUMBER) {
netdev_err(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
return MaxTxPwrInDbm;
}
@@ -139,7 +139,7 @@ int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
{
PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
- if (MAX_CHANNEL_NUMBER < channel) {
+ if (channel > MAX_CHANNEL_NUMBER) {
netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return 0;
}
@@ -162,7 +162,7 @@ int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
}
}
- if (MAX_CHANNEL_NUMBER < channel) {
+ if (channel > MAX_CHANNEL_NUMBER) {
netdev_err(dev->dev, "IsLegalChannel(): Invalid Channel\n");
return default_chn;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 967ef9a98fc0..68931e5ecd8f 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -238,8 +238,6 @@ typedef struct cb_desc {
#define ieee80211_tkip_null ieee80211_tkip_null_rsl
-#define ieee80211_wep_null ieee80211_wep_null_rsl
-
#define free_ieee80211 free_ieee80211_rsl
#define alloc_ieee80211 alloc_ieee80211_rsl
@@ -329,9 +327,6 @@ typedef struct ieee_param {
// linux under 2.6.9 release may not support it, so modify it for common use
-#define MSECS(t) msecs_to_jiffies(t)
-#define msleep_interruptible_rsl msleep_interruptible
-
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -2260,7 +2255,6 @@ void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
/* ieee80211_crypt_ccmp&tkip&wep.c */
void ieee80211_tkip_null(void);
-void ieee80211_wep_null(void);
void ieee80211_ccmp_null(void);
int ieee80211_crypto_init(void);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
index 3995620b3442..9cf90d040cfe 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c
@@ -176,8 +176,7 @@ struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
if (found_alg)
return found_alg->ops;
- else
- return NULL;
+ return NULL;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 27ce4817faeb..2dc25cc2c726 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -242,7 +242,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
@@ -331,7 +331,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
xor_block(mic, b, CCMP_MIC_LEN);
- blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+ blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
last = data_len % AES_BLOCK_LEN;
for (i = 1; i <= blocks; i++) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 908bc2eb4d29..6fa96d57d316 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -21,7 +21,8 @@
#include "ieee80211.h"
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
@@ -52,10 +53,10 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_blkcipher *rx_tfm_arc4;
- struct crypto_hash *rx_tfm_michael;
- struct crypto_blkcipher *tx_tfm_arc4;
- struct crypto_hash *tx_tfm_michael;
+ struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_ahash *rx_tfm_michael;
+ struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_ahash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
@@ -70,7 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+ priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -79,7 +80,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+ priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -88,7 +89,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+ priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -97,7 +98,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+ priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -110,14 +111,10 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
- if (priv->tx_tfm_michael)
- crypto_free_hash(priv->tx_tfm_michael);
- if (priv->tx_tfm_arc4)
- crypto_free_blkcipher(priv->tx_tfm_arc4);
- if (priv->rx_tfm_michael)
- crypto_free_hash(priv->rx_tfm_michael);
- if (priv->rx_tfm_arc4)
- crypto_free_blkcipher(priv->rx_tfm_arc4);
+ crypto_free_ahash(priv->tx_tfm_michael);
+ crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_ahash(priv->rx_tfm_michael);
+ crypto_free_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -130,14 +127,10 @@ static void ieee80211_tkip_deinit(void *priv)
struct ieee80211_tkip_data *_priv = priv;
if (_priv) {
- if (_priv->tx_tfm_michael)
- crypto_free_hash(_priv->tx_tfm_michael);
- if (_priv->tx_tfm_arc4)
- crypto_free_blkcipher(_priv->tx_tfm_arc4);
- if (_priv->rx_tfm_michael)
- crypto_free_hash(_priv->rx_tfm_michael);
- if (_priv->rx_tfm_arc4)
- crypto_free_blkcipher(_priv->rx_tfm_arc4);
+ crypto_free_ahash(_priv->tx_tfm_michael);
+ crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_ahash(_priv->rx_tfm_michael);
+ crypto_free_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -312,7 +305,6 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 *pos;
struct rtl_80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
int ret = 0;
u8 rc4key[16], *icv;
u32 crc;
@@ -357,15 +349,21 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len+4);
- ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+ ret = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
}
tkey->tx_iv16++;
@@ -390,12 +388,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u16 iv16;
struct rtl_80211_hdr_4addr *hdr;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
u8 rc4key[16];
u8 icv[4];
u32 crc;
struct scatterlist sg;
int plen;
+ int err;
if (skb->len < hdr_len + 8 + 4)
return -1;
@@ -429,6 +427,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
if (net_ratelimit()) {
@@ -449,10 +449,16 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen+4);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+ skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+ err = crypto_skcipher_decrypt(req);
+ skcipher_request_zero(req);
+ if (err) {
if (net_ratelimit()) {
printk(KERN_DEBUG ": TKIP: failed to decrypt "
"received packet from %pM\n",
@@ -501,11 +507,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
u8 *data, size_t data_len, u8 *mic)
{
- struct hash_desc desc;
+ AHASH_REQUEST_ON_STACK(req, tfm_michael);
struct scatterlist sg[2];
+ int err;
if (tfm_michael == NULL) {
printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
@@ -516,12 +523,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
sg_set_buf(&sg[0], hdr, 16);
sg_set_buf(&sg[1], data, data_len);
- if (crypto_hash_setkey(tfm_michael, key, 8))
+ if (crypto_ahash_setkey(tfm_michael, key, 8))
return -1;
- desc.tfm = tfm_michael;
- desc.flags = 0;
- return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+ ahash_request_set_tfm(req, tfm_michael);
+ ahash_request_set_callback(req, 0, NULL, NULL);
+ ahash_request_set_crypt(req, sg, mic, data_len + 16);
+ err = crypto_ahash_digest(req);
+ ahash_request_zero(req);
+ return err;
}
static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -660,10 +670,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
- struct crypto_hash *tfm = tkey->tx_tfm_michael;
- struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
- struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+ struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+ struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index 681611dc93d3..1999bc5cbbc1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -18,7 +18,7 @@
#include "ieee80211.h"
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
@@ -32,8 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_blkcipher *tx_tfm;
- struct crypto_blkcipher *rx_tfm;
+ struct crypto_skcipher *tx_tfm;
+ struct crypto_skcipher *rx_tfm;
};
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm))
goto free_priv;
- priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm))
goto free_tx;
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
return priv;
free_tx:
- crypto_free_blkcipher(priv->tx_tfm);
+ crypto_free_skcipher(priv->tx_tfm);
free_priv:
kfree(priv);
return NULL;
@@ -70,10 +70,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- if (_priv->tx_tfm)
- crypto_free_blkcipher(_priv->tx_tfm);
- if (_priv->rx_tfm)
- crypto_free_blkcipher(_priv->rx_tfm);
+ crypto_free_skcipher(_priv->tx_tfm);
+ crypto_free_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -91,10 +89,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 key[WEP_KEY_LEN + 3];
u8 *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
u32 crc;
u8 *icv;
struct scatterlist sg;
+ int err;
if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
skb->len < hdr_len)
@@ -129,6 +127,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
+ SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
icv = skb_put(skb, 4);
@@ -137,10 +137,16 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+ crypto_skcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
- return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+ skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+
+ err = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
+ return err;
}
return 0;
@@ -161,10 +167,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos;
cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
u32 crc;
u8 icv[4];
struct scatterlist sg;
+ int err;
if (skb->len < hdr_len + 8)
return -1;
@@ -186,10 +192,18 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+ SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
+ crypto_skcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen+4);
- if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+ skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+ err = crypto_skcipher_decrypt(req);
+ skcipher_request_zero(req);
+ if (err)
return -7;
crc = ~crc32_le(~0, pos, plen);
@@ -274,6 +288,3 @@ void __exit ieee80211_crypto_wep_exit(void)
ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
}
-void ieee80211_wep_null(void)
-{
-}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index 425b2ddfc916..30fff6c5696b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -177,7 +177,6 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
/* These function were added to load crypte module autoly */
ieee80211_tkip_null();
- ieee80211_wep_null();
ieee80211_ccmp_null();
return dev;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 130c852ffa02..f18fc0b6775b 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -460,10 +460,8 @@ static int is_duplicate_packet(struct ieee80211_device *ieee,
// if (memcmp(entry->mac, mac, ETH_ALEN)){
if (p == &ieee->ibss_mac_hash[index]) {
entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
- if (!entry) {
- printk(KERN_WARNING "Cannot malloc new mac entry\n");
+ if (!entry)
return 0;
- }
memcpy(entry->mac, mac, ETH_ALEN);
entry->seq_num[tid] = seq;
entry->frag_num[tid] = frag;
@@ -594,12 +592,18 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PRX_REORDER_ENTRY pReorderEntry = NULL;
- struct ieee80211_rxb *prxbIndicateArray[REORDER_WIN_SIZE];
+ struct ieee80211_rxb **prxbIndicateArray;
u8 WinSize = pHTInfo->RxReorderWinSize;
u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096;
u8 index = 0;
bool bMatchWinStart = false, bPktInBuf = false;
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize);
+
+ prxbIndicateArray = kmalloc(sizeof(struct ieee80211_rxb *) *
+ REORDER_WIN_SIZE, GFP_KERNEL);
+ if (!prxbIndicateArray)
+ return;
+
/* Rx Reorder initialize condition.*/
if (pTS->RxIndicateSeq == 0xffff) {
pTS->RxIndicateSeq = SeqNum;
@@ -618,6 +622,8 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
kfree(prxb);
prxb = NULL;
}
+
+ kfree(prxbIndicateArray);
return;
}
@@ -741,6 +747,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
// Indicate packets
if(index>REORDER_WIN_SIZE){
IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ kfree(prxbIndicateArray);
return;
}
ieee80211_indicate_packets(ieee, prxbIndicateArray, index);
@@ -752,9 +759,12 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq;
if(timer_pending(&pTS->RxPktPendingTimer))
del_timer_sync(&pTS->RxPktPendingTimer);
- pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime);
+ pTS->RxPktPendingTimer.expires = jiffies +
+ msecs_to_jiffies(pHTInfo->RxReorderPendingTime);
add_timer(&pTS->RxPktPendingTimer);
}
+
+ kfree(prxbIndicateArray);
}
static u8 parse_subframe(struct sk_buff *skb,
@@ -897,7 +907,6 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
//added by amy for reorder
#ifdef NOT_YET
struct net_device *wds = NULL;
- struct sk_buff *skb2 = NULL;
struct net_device *wds = NULL;
int from_assoc_ap = 0;
void *sta = NULL;
@@ -1277,11 +1286,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
payload = skb->data + hdrlen;
//ethertype = (payload[6] << 8) | payload[7];
rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC);
- if (rxb == NULL)
- {
- IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__func__);
+ if (!rxb)
goto rx_dropped;
- }
/* to parse amsdu packets */
/* qos data packets & reserved bit is 1 */
if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) {
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 38c3eb78094e..ae1274cfb392 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -103,12 +103,12 @@ static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- *tag++ = MFIE_TYPE_GENERIC; //0
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0x50;
*tag++ = 0xf2;
- *tag++ = 0x02;//5
+ *tag++ = 0x02; /* 5 */
*tag++ = 0x00;
*tag++ = 0x01;
#ifdef SUPPORT_USPD
@@ -128,12 +128,12 @@ static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- *tag++ = MFIE_TYPE_GENERIC; //0
+ *tag++ = MFIE_TYPE_GENERIC; /* 0 */
*tag++ = 7;
*tag++ = 0x00;
*tag++ = 0xe0;
*tag++ = 0x4c;
- *tag++ = 0x01;//5
+ *tag++ = 0x01; /* 5 */
*tag++ = 0x02;
*tag++ = 0x11;
*tag++ = 0x00;
@@ -186,14 +186,14 @@ static u8 MgntQuery_MgntFrameTxRate(struct ieee80211_device *ieee)
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
u8 rate;
- // 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M.
+ /* 2008/01/25 MH For broadcom, MGNT frame set as OFDM 6M. */
if(pHTInfo->IOTAction & HT_IOT_ACT_MGNT_USE_CCK_6M)
rate = 0x0c;
else
rate = ieee->basic_rate & 0x7f;
if (rate == 0) {
- // 2005.01.26, by rcnjko.
+ /* 2005.01.26, by rcnjko. */
if(ieee->mode == IEEE_A||
ieee->mode== IEEE_N_5G||
(ieee->mode== IEEE_N_24G&&!pHTInfo->bCurSuppCCK))
@@ -340,11 +340,11 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- req->header.duration_id = 0; //FIXME: is this OK ?
+ req->header.duration_id = 0; /* FIXME: is this OK? */
- memset(req->header.addr1, 0xff, ETH_ALEN);
+ eth_broadcast_addr(req->header.addr1);
memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
- memset(req->header.addr3, 0xff, ETH_ALEN);
+ eth_broadcast_addr(req->header.addr3);
tag = (u8 *) skb_put(skb,len+2+rate_len);
@@ -380,7 +380,8 @@ static void ieee80211_send_beacon(struct ieee80211_device *ieee)
if (ieee->beacon_txing && ieee->ieee_up) {
// if(!timer_pending(&ieee->beacon_timer))
// add_timer(&ieee->beacon_timer);
- mod_timer(&ieee->beacon_timer,jiffies+(MSECS(ieee->current_network.beacon_interval-5)));
+ mod_timer(&ieee->beacon_timer,
+ jiffies + msecs_to_jiffies(ieee->current_network.beacon_interval-5));
}
//spin_unlock_irqrestore(&ieee->beacon_lock,flags);
}
@@ -468,7 +469,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
if (ieee->state >= IEEE80211_LINKED && ieee->sync_scan_hurryup)
goto out;
- msleep_interruptible_rsl(IEEE80211_SOFTMAC_SCAN_TIME);
+ msleep_interruptible(IEEE80211_SOFTMAC_SCAN_TIME);
}
out:
@@ -487,7 +488,7 @@ EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
static void ieee80211_softmac_scan_wq(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
static short watchdog;
u8 channel_map[MAX_CHANNEL_NUMBER+1];
@@ -514,7 +515,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
ieee80211_send_probe_requests(ieee);
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
+ schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
up(&ieee->scan_sem);
return;
@@ -613,7 +614,7 @@ static void ieee80211_start_scan(struct ieee80211_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN){
if (ieee->scanning == 0) {
ieee->scanning = 1;
- queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
+ schedule_delayed_work(&ieee->softmac_scan_wq, 0);
}
}else
ieee->start_scan(ieee->dev);
@@ -672,7 +673,7 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
else if(ieee->auth_mode == 1)
auth->algorithm = cpu_to_le16(WLAN_AUTH_SHARED_KEY);
else if(ieee->auth_mode == 2)
- auth->algorithm = WLAN_AUTH_OPEN;//0x80;
+ auth->algorithm = WLAN_AUTH_OPEN; /* 0x80; */
printk("=================>%s():auth->algorithm is %d\n",__func__,auth->algorithm);
auth->transaction = cpu_to_le16(ieee->associate_seq);
ieee->associate_seq++;
@@ -727,7 +728,7 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
- //HT ralated element
+ /* HT ralated element */
tmp_ht_cap_buf =(u8 *) &(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
tmp_ht_info_buf =(u8 *) &(ieee->pHTInfo->SelfHTInfo);
@@ -765,13 +766,13 @@ static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
memcpy (beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy (beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
- beacon_buf->header.duration_id = 0; //FIXME
+ beacon_buf->header.duration_id = 0; /* FIXME */
beacon_buf->beacon_interval =
cpu_to_le16(ieee->current_network.beacon_interval);
beacon_buf->capability =
cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
beacon_buf->capability |=
- cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); //add short preamble here
+ cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE); /* add short preamble here */
if(ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
@@ -1012,7 +1013,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
crypt = ieee->crypt[ieee->tx_keyidx];
encrypt = ieee->host_encrypt && crypt && crypt->ops && ((0 == strcmp(crypt->ops->name,"WEP") || wpa_ie_len));
- //Include High Throuput capability && Realtek proprietary
+ /* Include High Throuput capability && Realtek proprietary */
if (ieee->pHTInfo->bCurrentHTSupport&&ieee->pHTInfo->bEnableHT)
{
ht_cap_buf = (u8 *)&(ieee->pHTInfo->SelfHTCap);
@@ -1044,8 +1045,8 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
#ifdef THOMAS_TURBO
len = sizeof(struct ieee80211_assoc_request_frame)+ 2
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
+ + beacon->ssid_len /* essid tagged val */
+ + rate_len /* rates tagged val */
+ wpa_ie_len
+ wmm_info_len
+ turbo_info_len
@@ -1057,8 +1058,8 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
+ ieee->tx_headroom;
#else
len = sizeof(struct ieee80211_assoc_request_frame)+ 2
- + beacon->ssid_len//essid tagged val
- + rate_len//rates tagged val
+ + beacon->ssid_len /* essid tagged val */
+ + rate_len /* rates tagged val */
+ wpa_ie_len
+ wmm_info_len
+ ht_cap_len
@@ -1240,7 +1241,7 @@ void ieee80211_associate_abort(struct ieee80211_device *ieee)
ieee->state = IEEE80211_ASSOCIATING_RETRY;
- queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \
+ schedule_delayed_work(&ieee->associate_retry_wq, \
IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
spin_unlock_irqrestore(&ieee->lock, flags);
@@ -1381,7 +1382,7 @@ static void ieee80211_associate_complete(struct ieee80211_device *ieee)
ieee->state = IEEE80211_LINKED;
//ieee->UpdateHalRATRTableHandler(dev, ieee->dot11HTOperationalRateSet);
- queue_work(ieee->wq, &ieee->associate_complete_wq);
+ schedule_work(&ieee->associate_complete_wq);
}
static void ieee80211_associate_procedure_wq(struct work_struct *work)
@@ -1482,7 +1483,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
}
ieee->state = IEEE80211_ASSOCIATING;
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
+ schedule_work(&ieee->associate_procedure_wq);
}else{
if(ieee80211_is_54g(&ieee->current_network) &&
(ieee->modulation & IEEE80211_OFDM_MODULATION)){
@@ -1735,10 +1736,12 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
if(dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST)& ieee->ps))
return 2;
- if(!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
+ if(!time_after(jiffies,
+ ieee->dev->trans_start + msecs_to_jiffies(timeout)))
return 0;
- if(!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
+ if(!time_after(jiffies,
+ ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
return 0;
if((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE ) &&
@@ -2041,7 +2044,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
"Association response status code 0x%x\n",
errcode);
if(ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT) {
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
+ schedule_work(&ieee->associate_procedure_wq);
} else {
ieee80211_associate_abort(ieee);
}
@@ -2097,7 +2100,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
notify_wx_assoc_event(ieee);
//HTSetConnectBwMode(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT);
RemovePeerTS(ieee, header->addr2);
- queue_work(ieee->wq, &ieee->associate_procedure_wq);
+ schedule_work(&ieee->associate_procedure_wq);
}
break;
case IEEE80211_STYPE_MANAGE_ACT:
@@ -2284,12 +2287,6 @@ void ieee80211_stop_queue(struct ieee80211_device *ieee)
}
EXPORT_SYMBOL(ieee80211_stop_queue);
-inline void ieee80211_randomize_cell(struct ieee80211_device *ieee)
-{
-
- random_ether_addr(ieee->current_network.bssid);
-}
-
/* called in user context only */
void ieee80211_start_master_bss(struct ieee80211_device *ieee)
{
@@ -2330,7 +2327,7 @@ static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
static void ieee80211_start_ibss_wq(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
/* iwconfig mode ad-hoc will schedule this and return
* on the other hand this will block further iwconfig SET
@@ -2379,7 +2376,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
if (ieee->state == IEEE80211_NOLINK) {
printk("creating new IBSS cell\n");
if(!ieee->wap_set)
- ieee80211_randomize_cell(ieee);
+ random_ether_addr(ieee->current_network.bssid);
if(ieee->modulation & IEEE80211_CCK_MODULATION){
@@ -2439,7 +2436,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
{
- queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150);
+ schedule_delayed_work(&ieee->start_ibss_wq, 150);
}
/* this is called only in user context, with wx_sem held */
@@ -2504,7 +2501,7 @@ EXPORT_SYMBOL(ieee80211_disassociate);
static void ieee80211_associate_retry_wq(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
unsigned long flags;
@@ -2722,7 +2719,6 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
(unsigned long)ieee);
- ieee->wq = create_workqueue(DRV_NAME);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
INIT_WORK(&ieee->associate_complete_wq, ieee80211_associate_complete_wq);
@@ -2752,7 +2748,6 @@ void ieee80211_softmac_free(struct ieee80211_device *ieee)
del_timer_sync(&ieee->associate_timer);
cancel_delayed_work(&ieee->associate_retry_wq);
- destroy_workqueue(ieee->wq);
up(&ieee->wx_sem);
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 3bde744604c2..28737ec65186 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -19,7 +19,7 @@ static void ActivateBAEntry(struct ieee80211_device *ieee, PBA_RECORD pBA, u16 T
{
pBA->bValid = true;
if(Time != 0)
- mod_timer(&pBA->Timer, jiffies + MSECS(Time));
+ mod_timer(&pBA->Timer, jiffies + msecs_to_jiffies(Time));
}
/********************************************************************************************************************
@@ -254,7 +254,7 @@ static struct sk_buff *ieee80211_DELBA(
static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee,
u8 *dst, PBA_RECORD pBA)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = ieee80211_ADDBA(ieee, dst, pBA, 0, ACT_ADDBAREQ); //construct ACT_ADDBAREQ frames so set statuscode zero.
if (skb)
@@ -282,7 +282,7 @@ static void ieee80211_send_ADDBAReq(struct ieee80211_device *ieee,
static void ieee80211_send_ADDBARsp(struct ieee80211_device *ieee, u8 *dst,
PBA_RECORD pBA, u16 StatusCode)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = ieee80211_ADDBA(ieee, dst, pBA, StatusCode, ACT_ADDBARSP); //construct ACT_ADDBARSP frames
if (skb)
{
@@ -311,7 +311,7 @@ static void ieee80211_send_DELBA(struct ieee80211_device *ieee, u8 *dst,
PBA_RECORD pBA, TR_SELECT TxRxSelect,
u16 ReasonCode)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
skb = ieee80211_DELBA(ieee, dst, pBA, TxRxSelect, ReasonCode); //construct ACT_ADDBARSP frames
if (skb)
{
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index f33c74342cf3..148d0d45547b 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -35,9 +35,7 @@ static void RxPktPendingTimeout(unsigned long data)
u8 index = 0;
bool bPktInBuf = false;
-
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
- //PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
IEEE80211_DEBUG(IEEE80211_DL_REORDER,"==================>%s()\n",__func__);
if(pRxTs->RxTimeoutIndicateSeq != 0xffff)
{
@@ -87,10 +85,10 @@ static void RxPktPendingTimeout(unsigned long data)
if(bPktInBuf && (pRxTs->RxTimeoutIndicateSeq==0xffff))
{
pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
- mod_timer(&pRxTs->RxPktPendingTimer, jiffies + MSECS(ieee->pHTInfo->RxReorderPendingTime));
+ mod_timer(&pRxTs->RxPktPendingTimer,
+ jiffies + msecs_to_jiffies(ieee->pHTInfo->RxReorderPendingTime));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
}
/********************************************************************************************************************
@@ -212,7 +210,8 @@ static void AdmitTS(struct ieee80211_device *ieee,
del_timer_sync(&pTsCommonInfo->InactTimer);
if(InactTime!=0)
- mod_timer(&pTsCommonInfo->InactTimer, jiffies + MSECS(InactTime));
+ mod_timer(&pTsCommonInfo->InactTimer,
+ jiffies + msecs_to_jiffies(InactTime));
}
@@ -469,7 +468,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
while(!list_empty(&pRxTS->RxPendingPktList))
{
- // PlatformAcquireSpinLock(Adapter, RT_RX_SPINLOCK);
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
//pRxReorderEntry = list_entry(&pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
pRxReorderEntry = (PRX_REORDER_ENTRY)list_entry(pRxTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List);
@@ -489,7 +487,6 @@ static void RemoveTsEntry(struct ieee80211_device *ieee, PTS_COMMON_INFO pTs,
prxb = NULL;
}
list_add_tail(&pRxReorderEntry->List,&ieee->RxReorder_Unused_List);
- //PlatformReleaseSpinLock(Adapter, RT_RX_SPINLOCK);
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
@@ -590,7 +587,8 @@ void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD pTxTS)
if(pTxTS->bAddBaReqDelayed)
{
IEEE80211_DEBUG(IEEE80211_DL_BA, "TsStartAddBaProcess(): Delayed Start ADDBA after 60 sec!!\n");
- mod_timer(&pTxTS->TsAddBaTimer, jiffies + MSECS(TS_ADDBA_DELAY));
+ mod_timer(&pTxTS->TsAddBaTimer,
+ jiffies + msecs_to_jiffies(TS_ADDBA_DELAY));
}
else
{
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index e00032947e0f..5c3bb3be2720 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -1,12 +1,11 @@
/*
- This is part of the rtl8192 driver
- released under the GPL (See file COPYING for details).
-
- This files contains programming code for the rtl8256
- radio frontend.
-
- *Many* thanks to Realtek Corp. for their great support!
-
+* This is part of the rtl8192 driver
+* released under the GPL (See file COPYING for details).
+*
+* This files contains programming code for the rtl8256
+* radio frontend.
+*
+* *Many* thanks to Realtek Corp. for their great support!
*/
#include "r8192U.h"
@@ -22,7 +21,8 @@
* Output: NONE
* Return: NONE
* Note: 8226 support both 20M and 40 MHz
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
{
u8 eRFPath;
@@ -83,7 +83,8 @@ void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth)
* Input: struct net_device* dev
* Output: NONE
* Return: NONE
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
void PHY_RF8256_Config(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -100,7 +101,8 @@ void PHY_RF8256_Config(struct net_device *dev)
* Input: struct net_device* dev
* Output: NONE
* Return: NONE
- *---------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
void phy_RF8256_Config_ParaFile(struct net_device *dev)
{
u32 u4RegValue = 0;
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index f4a4eae72aa4..849a95ef723c 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -1092,10 +1092,17 @@ static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void rtl8192_tx_isr(struct urb *tx_urb)
{
struct sk_buff *skb = (struct sk_buff *)tx_urb->context;
- struct net_device *dev = (struct net_device *)(skb->cb);
+ struct net_device *dev;
struct r8192_priv *priv = NULL;
- cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
- u8 queue_index = tcb_desc->queue_index;
+ cb_desc *tcb_desc;
+ u8 queue_index;
+
+ if (!skb)
+ return;
+
+ dev = (struct net_device *)(skb->cb);
+ tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
+ queue_index = tcb_desc->queue_index;
priv = ieee80211_priv(dev);
@@ -1113,11 +1120,9 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
}
/* free skb and tx_urb */
- if (skb != NULL) {
- dev_kfree_skb_any(skb);
- usb_free_urb(tx_urb);
- atomic_dec(&priv->tx_pending[queue_index]);
- }
+ dev_kfree_skb_any(skb);
+ usb_free_urb(tx_urb);
+ atomic_dec(&priv->tx_pending[queue_index]);
/*
* Handle HW Beacon:
@@ -1371,7 +1376,7 @@ short rtl819xU_tx_cmd(struct net_device *dev, struct sk_buff *skb)
*/
static u8 MapHwQueueToFirmwareQueue(u8 QueueID)
{
- u8 QueueSelect = 0x0; /* defualt set to */
+ u8 QueueSelect = 0x0; /* default set to */
switch (QueueID) {
case BE_QUEUE:
@@ -1727,7 +1732,7 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
priv->rx_urb = kmalloc(sizeof(struct urb *) * (MAX_RX_URB + 1),
GFP_KERNEL);
- if (priv->rx_urb == NULL)
+ if (!priv->rx_urb)
return -ENOMEM;
#ifndef JACKSON_NEW_RX
@@ -1957,7 +1962,7 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
network->qos_data.param_count)) {
network->qos_data.old_param_count =
network->qos_data.param_count;
- queue_work(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
RT_TRACE(COMP_QOS,
"QoS parameters change call qos_activate\n");
}
@@ -1966,7 +1971,7 @@ static int rtl8192_qos_handle_probe_response(struct r8192_priv *priv,
&def_qos_parameters, size);
if ((network->qos_data.active == 1) && (active_network == 1)) {
- queue_work(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
RT_TRACE(COMP_QOS,
"QoS was disabled call qos_activate\n");
}
@@ -1985,7 +1990,7 @@ static int rtl8192_handle_beacon(struct net_device *dev,
struct r8192_priv *priv = ieee80211_priv(dev);
rtl8192_qos_handle_probe_response(priv, 1, network);
- queue_delayed_work(priv->priv_wq, &priv->update_beacon_wq, 0);
+ schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
}
@@ -2037,7 +2042,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
network->flags,
priv->ieee80211->current_network.qos_data.active);
if (set_qos_param == 1)
- queue_work(priv->priv_wq, &priv->qos_activate);
+ schedule_work(&priv->qos_activate);
return 0;
@@ -2382,7 +2387,6 @@ static void rtl8192_init_priv_task(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- priv->priv_wq = create_workqueue(DRV_NAME);
INIT_WORK(&priv->reset_wq, rtl8192_restart);
@@ -3436,8 +3440,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
static void rtl819x_watchdog_wqcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work,
- struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct r8192_priv *priv = container_of(dwork,
struct r8192_priv, watch_dog_wq);
struct net_device *dev = priv->ieee80211->dev;
@@ -3514,7 +3517,7 @@ static void watch_dog_timer_callback(unsigned long data)
{
struct r8192_priv *priv = ieee80211_priv((struct net_device *)data);
- queue_delayed_work(priv->priv_wq, &priv->watch_dog_wq, 0);
+ schedule_delayed_work(&priv->watch_dog_wq, 0);
mod_timer(&priv->watch_dog_timer,
jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
}
@@ -4297,7 +4300,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
if (is_cck_rate) {
/* (1)Hardware does not provide RSSI for CCK */
- /* (2)PWDB, Average PWDB cacluated by hardware
+ /* (2)PWDB, Average PWDB calculated by hardware
* (for rate adaptive)
*/
u8 report;
@@ -4398,7 +4401,7 @@ static void rtl8192_query_rxphystatus(struct r8192_priv *priv,
}
- /* (2)PWDB, Average PWDB cacluated by hardware
+ /* (2)PWDB, Average PWDB calculated by hardware
* (for rate adaptive)
*/
rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
@@ -5018,7 +5021,6 @@ fail2:
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
- destroy_workqueue(priv->priv_wq);
mdelay(10);
fail:
free_ieee80211(dev);
@@ -5056,7 +5058,6 @@ static void rtl8192_usb_disconnect(struct usb_interface *intf)
kfree(priv->pFirmware);
priv->pFirmware = NULL;
rtl8192_usb_deleteendpoints(dev);
- destroy_workqueue(priv->priv_wq);
mdelay(10);
}
free_ieee80211(dev);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 375ec96b9469..1e0e53c9c314 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -767,7 +767,7 @@ static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
void dm_txpower_trackingcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct r8192_priv *priv = container_of(dwork, struct r8192_priv, txpower_tracking_wq);
struct net_device *dev = priv->ieee80211->dev;
@@ -1628,47 +1628,75 @@ static void dm_bb_initialgain_backup(struct net_device *dev)
void dm_change_dynamic_initgain_thresh(struct net_device *dev, u32 dm_type,
u32 dm_value)
{
- if (dm_type == DIG_TYPE_THRESH_HIGH) {
+ switch (dm_type) {
+ case DIG_TYPE_THRESH_HIGH:
dm_digtable.rssi_high_thresh = dm_value;
- } else if (dm_type == DIG_TYPE_THRESH_LOW) {
+ break;
+
+ case DIG_TYPE_THRESH_LOW:
dm_digtable.rssi_low_thresh = dm_value;
- } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_HIGH) {
+ break;
+
+ case DIG_TYPE_THRESH_HIGHPWR_HIGH:
dm_digtable.rssi_high_power_highthresh = dm_value;
- } else if (dm_type == DIG_TYPE_THRESH_HIGHPWR_LOW) {
+ break;
+
+ case DIG_TYPE_THRESH_HIGHPWR_LOW:
dm_digtable.rssi_high_power_lowthresh = dm_value;
- } else if (dm_type == DIG_TYPE_ENABLE) {
+ break;
+
+ case DIG_TYPE_ENABLE:
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = true;
- } else if (dm_type == DIG_TYPE_DISABLE) {
+ break;
+
+ case DIG_TYPE_DISABLE:
dm_digtable.dig_state = DM_STA_DIG_MAX;
dm_digtable.dig_enable_flag = false;
- } else if (dm_type == DIG_TYPE_DBG_MODE) {
+ break;
+
+ case DIG_TYPE_DBG_MODE:
if (dm_value >= DM_DBG_MAX)
dm_value = DM_DBG_OFF;
dm_digtable.dbg_mode = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_RSSI) {
+ break;
+
+ case DIG_TYPE_RSSI:
if (dm_value > 100)
dm_value = 30;
dm_digtable.rssi_val = (long)dm_value;
- } else if (dm_type == DIG_TYPE_ALGORITHM) {
+ break;
+
+ case DIG_TYPE_ALGORITHM:
if (dm_value >= DIG_ALGO_MAX)
dm_value = DIG_ALGO_BY_FALSE_ALARM;
if (dm_digtable.dig_algorithm != (u8)dm_value)
dm_digtable.dig_algorithm_switch = 1;
dm_digtable.dig_algorithm = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_BACKOFF) {
+ break;
+
+ case DIG_TYPE_BACKOFF:
if (dm_value > 30)
dm_value = 30;
dm_digtable.backoff_val = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_RX_GAIN_MIN) {
+ break;
+
+ case DIG_TYPE_RX_GAIN_MIN:
if (dm_value == 0)
dm_value = 0x1;
dm_digtable.rx_gain_range_min = (u8)dm_value;
- } else if (dm_type == DIG_TYPE_RX_GAIN_MAX) {
+ break;
+
+ case DIG_TYPE_RX_GAIN_MAX:
if (dm_value > 0x50)
dm_value = 0x50;
dm_digtable.rx_gain_range_max = (u8)dm_value;
+ break;
+
+ default:
+ break;
}
+
} /* DM_ChangeDynamicInitGainThresh */
/*-----------------------------------------------------------------------------
@@ -2412,7 +2440,7 @@ static void dm_check_pbc_gpio(struct net_device *dev)
*---------------------------------------------------------------------------*/
void dm_rf_pathcheck_workitemcallback(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct r8192_priv *priv = container_of(dwork, struct r8192_priv, rfpath_check_wq);
struct net_device *dev = priv->ieee80211->dev;
/*bool bactually_set = false;*/
@@ -2769,12 +2797,14 @@ void dm_fsync_timer_callback(unsigned long data)
if (bDoubleTimeInterval) {
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
+ priv->fsync_timer.expires = jiffies +
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
add_timer(&priv->fsync_timer);
} else {
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
+ priv->fsync_timer.expires = jiffies +
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
}
} else {
@@ -2847,7 +2877,8 @@ static void dm_StartSWFsync(struct net_device *dev)
}
if (timer_pending(&priv->fsync_timer))
del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies + MSECS(priv->ieee80211->fsync_time_interval);
+ priv->fsync_timer.expires = jiffies +
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
add_timer(&priv->fsync_timer);
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 4911fef2e2e5..f828e6441f2d 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -1,21 +1,23 @@
-/*
- This file contains wireless extension handlers.
-
- This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
- Released under the terms of GPL (General Public Licence)
-
- Parts of this driver are based on the GPL part
- of the official realtek driver.
-
- Parts of this driver are based on the rtl8180 driver skeleton
- from Patric Schenke & Andres Salomon.
-
- Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
-
- We want to thank the Authors of those projects and the Ndiswrapper
- project Authors.
-*/
+/******************************************************************************
+ *
+ * This file contains wireless extension handlers.
+ *
+ * This is part of rtl8180 OpenSource driver.
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
+ * Released under the terms of GPL (General Public Licence)
+ *
+ * Parts of this driver are based on the GPL part
+ * of the official realtek driver.
+ *
+ * Parts of this driver are based on the rtl8180 driver skeleton
+ * from Patric Schenke & Andres Salomon.
+ *
+ * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
+ *
+ * We want to thank the Authors of those projects and the Ndiswrapper
+ * project Authors.
+ *
+ *****************************************************************************/
#include <linux/string.h>
#include "r8192U.h"
diff --git a/drivers/staging/rtl8192u/r819xU_phy.c b/drivers/staging/rtl8192u/r819xU_phy.c
index f264d88364a1..696df3440077 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.c
+++ b/drivers/staging/rtl8192u/r819xU_phy.c
@@ -1683,8 +1683,7 @@ void InitialGain819xUsb(struct net_device *dev, u8 Operation)
void InitialGainOperateWorkItemCallBack(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct r8192_priv *priv = container_of(dwork, struct r8192_priv,
initialgain_operate_wq);
struct net_device *dev = priv->ieee80211->dev;
diff --git a/drivers/staging/rtl8712/TODO b/drivers/staging/rtl8712/TODO
index d8dfe5bfe702..847c8c41f4f7 100644
--- a/drivers/staging/rtl8712/TODO
+++ b/drivers/staging/rtl8712/TODO
@@ -4,10 +4,10 @@ TODO:
- switch to use MAC80211
- checkpatch.pl fixes - only a few remain
-Please send any patches to Greg Kroah-Hartman <greg@kroah.com>,
-Larry Finger <Larry.Finger@lwfinger.net> and
-Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
-
-
-
+A replacement for this driver with MAC80211 support is available
+at https://github.com/chunkeey/rtl8192su
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
+Larry Finger <Larry.Finger@lwfinger.net>,
+Florian Schilhabel <florian.c.schilhabel@googlemail.com> and
+Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>.
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 3d64feeb80e7..29e47e1501c5 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -159,6 +159,7 @@ struct _adapter {
struct mp_priv mppriv;
s32 bDriverStopped;
s32 bSurpriseRemoved;
+ s32 bSuspended;
u32 IsrContent;
u32 ImrContent;
u8 EepromAddressSize;
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 974ca021ccef..d13b4d53c256 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -376,7 +376,7 @@ int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
uint cnt;
/*Search required WPA or WPA2 IE and copy to sec_ie[ ]*/
- cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+ cnt = _TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_;
while (cnt < in_len) {
authmode = in_ie[cnt];
if ((authmode == _WPA_IE_ID_) &&
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index d374824c4f33..67ab58084e8a 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -12,8 +12,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
@@ -61,7 +60,6 @@
#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
-
#define IEEE_CRYPT_ALG_NAME_LEN 16
#define WPA_CIPHER_NONE BIT(0)
@@ -70,8 +68,6 @@
#define WPA_CIPHER_TKIP BIT(3)
#define WPA_CIPHER_CCMP BIT(4)
-
-
#define WPA_SELECTOR_LEN 4
#define RSN_HEADER_LEN 4
@@ -88,7 +84,6 @@ enum NETWORK_TYPE {
WIRELESS_11BGN = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11N),
};
-
struct ieee_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
@@ -161,7 +156,6 @@ struct ieee80211_hdr_3addr {
u16 seq_ctl;
} __packed;
-
struct ieee80211_hdr_qos {
u16 frame_ctl;
u16 duration_id;
@@ -191,7 +185,6 @@ struct eapol {
u16 length;
} __packed;
-
enum eap_type {
EAP_PACKET = 0,
EAPOL_START,
@@ -255,7 +248,6 @@ enum eap_type {
#define IEEE80211_STYPE_CFPOLL 0x0060
#define IEEE80211_STYPE_CFACKPOLL 0x0070
#define IEEE80211_QOS_DATAGRP 0x0080
-#define IEEE80211_QoS_DATAGRP IEEE80211_QOS_DATAGRP
#define IEEE80211_SCTL_FRAG 0x000F
#define IEEE80211_SCTL_SEQ 0xFFF0
@@ -305,15 +297,15 @@ struct ieee80211_snap_hdr {
#define WLAN_AUTH_CHALLENGE_LEN 128
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
+#define WLAN_CAPABILITY_BSS BIT(0)
+#define WLAN_CAPABILITY_IBSS BIT(1)
+#define WLAN_CAPABILITY_CF_POLLABLE BIT(2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST BIT(3)
+#define WLAN_CAPABILITY_PRIVACY BIT(4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE BIT(5)
+#define WLAN_CAPABILITY_PBCC BIT(6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY BIT(7)
+#define WLAN_CAPABILITY_SHORT_SLOT BIT(10)
/* Information Element IDs */
#define WLAN_EID_SSID 0
@@ -331,24 +323,21 @@ struct ieee80211_snap_hdr {
#define IEEE80211_DATA_HDR3_LEN 24
#define IEEE80211_DATA_HDR4_LEN 30
-
-#define IEEE80211_STATMASK_SIGNAL (1<<0)
-#define IEEE80211_STATMASK_RSSI (1<<1)
-#define IEEE80211_STATMASK_NOISE (1<<2)
-#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_SIGNAL BIT(0)
+#define IEEE80211_STATMASK_RSSI BIT(1)
+#define IEEE80211_STATMASK_NOISE BIT(2)
+#define IEEE80211_STATMASK_RATE BIT(3)
#define IEEE80211_STATMASK_WEMASK 0x7
+#define IEEE80211_CCK_MODULATION BIT(0)
+#define IEEE80211_OFDM_MODULATION BIT(1)
-#define IEEE80211_CCK_MODULATION (1<<0)
-#define IEEE80211_OFDM_MODULATION (1<<1)
-
-#define IEEE80211_24GHZ_BAND (1<<0)
-#define IEEE80211_52GHZ_BAND (1<<1)
+#define IEEE80211_24GHZ_BAND BIT(0)
+#define IEEE80211_52GHZ_BAND BIT(1)
#define IEEE80211_CCK_RATE_LEN 4
#define IEEE80211_NUM_OFDM_RATESLEN 8
-
#define IEEE80211_CCK_RATE_1MB 0x02
#define IEEE80211_CCK_RATE_2MB 0x04
#define IEEE80211_CCK_RATE_5MB 0x0B
@@ -364,18 +353,18 @@ struct ieee80211_snap_hdr {
#define IEEE80211_OFDM_RATE_54MB 0x6C
#define IEEE80211_BASIC_RATE_MASK 0x80
-#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
-#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
-#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
-#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
-#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
-#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
-#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
-#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
-#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
-#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
-#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
-#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+#define IEEE80211_CCK_RATE_1MB_MASK BIT(0)
+#define IEEE80211_CCK_RATE_2MB_MASK BIT(1)
+#define IEEE80211_CCK_RATE_5MB_MASK BIT(2)
+#define IEEE80211_CCK_RATE_11MB_MASK BIT(3)
+#define IEEE80211_OFDM_RATE_6MB_MASK BIT(4)
+#define IEEE80211_OFDM_RATE_9MB_MASK BIT(5)
+#define IEEE80211_OFDM_RATE_12MB_MASK BIT(6)
+#define IEEE80211_OFDM_RATE_18MB_MASK BIT(7)
+#define IEEE80211_OFDM_RATE_24MB_MASK BIT(8)
+#define IEEE80211_OFDM_RATE_36MB_MASK BIT(9)
+#define IEEE80211_OFDM_RATE_48MB_MASK BIT(10)
+#define IEEE80211_OFDM_RATE_54MB_MASK BIT(11)
#define IEEE80211_CCK_RATES_MASK 0x0000000F
#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
@@ -401,9 +390,6 @@ struct ieee80211_snap_hdr {
#define IEEE80211_NUM_CCK_RATES 4
#define IEEE80211_OFDM_SHIFT_MASK_A 4
-
-
-
/* NOTE: This data is for statistical purposes; not all hardware provides this
* information for frames received. Not setting these will not cause
* any adverse affects.
@@ -481,15 +467,15 @@ struct ieee80211_softmac_stats {
uint swtxawake;
};
-#define SEC_KEY_1 (1<<0)
-#define SEC_KEY_2 (1<<1)
-#define SEC_KEY_3 (1<<2)
-#define SEC_KEY_4 (1<<3)
-#define SEC_ACTIVE_KEY (1<<4)
-#define SEC_AUTH_MODE (1<<5)
-#define SEC_UNICAST_GROUP (1<<6)
-#define SEC_LEVEL (1<<7)
-#define SEC_ENABLED (1<<8)
+#define SEC_KEY_1 BIT(0)
+#define SEC_KEY_2 BIT(1)
+#define SEC_KEY_3 BIT(2)
+#define SEC_KEY_4 BIT(3)
+#define SEC_ACTIVE_KEY BIT(4)
+#define SEC_AUTH_MODE BIT(5)
+#define SEC_UNICAST_GROUP BIT(6)
+#define SEC_LEVEL BIT(7)
+#define SEC_ENABLED BIT(8)
#define SEC_LEVEL_0 0 /* None */
#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
@@ -645,9 +631,9 @@ struct ieee80211_txb {
#define MAX_WPA_IE_LEN 128
-#define NETWORK_EMPTY_ESSID (1<<0)
-#define NETWORK_HAS_OFDM (1<<1)
-#define NETWORK_HAS_CCK (1<<2)
+#define NETWORK_EMPTY_ESSID BIT(0)
+#define NETWORK_HAS_OFDM BIT(1)
+#define NETWORK_HAS_CCK BIT(2)
#define IEEE80211_DTIM_MBCAST 4
#define IEEE80211_DTIM_UCAST 2
@@ -699,15 +685,15 @@ enum ieee80211_state {
#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
#define DEFAULT_FTS 2346
-#define CFG_IEEE80211_RESERVE_FCS (1<<0)
-#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+#define CFG_IEEE80211_RESERVE_FCS BIT(0)
+#define CFG_IEEE80211_COMPUTE_FCS BIT(1)
#define MAXTID 16
-#define IEEE_A (1<<0)
-#define IEEE_B (1<<1)
-#define IEEE_G (1<<2)
-#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+#define IEEE_A BIT(0)
+#define IEEE_B BIT(1)
+#define IEEE_G BIT(2)
+#define IEEE_MODE_MASK (IEEE_A | IEEE_B | IEEE_G)
static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
{
@@ -757,7 +743,7 @@ unsigned char *r8712_get_wpa_ie(unsigned char *pie, int *rsn_ie_len, int limit);
unsigned char *r8712_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len,
int limit);
int r8712_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
- int *pairwise_cipher);
+ int *pairwise_cipher);
int r8712_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
int *pairwise_cipher);
int r8712_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index b89e2d3c4fe1..ab19112eae13 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -269,7 +269,6 @@ void r8712_stop_drv_timers(struct _adapter *padapter)
static u8 init_default_value(struct _adapter *padapter)
{
- u8 ret = _SUCCESS;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -302,7 +301,7 @@ static u8 init_default_value(struct _adapter *padapter)
r8712_init_registrypriv_dev_network(padapter);
r8712_update_registrypriv_dev_network(padapter);
/*misc.*/
- return ret;
+ return _SUCCESS;
}
u8 r8712_init_drv_sw(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/recv_linux.c b/drivers/staging/rtl8712/recv_linux.c
index 2f5460dbda8b..735a0eadd98c 100644
--- a/drivers/staging/rtl8712/recv_linux.c
+++ b/drivers/staging/rtl8712/recv_linux.c
@@ -44,7 +44,8 @@
int r8712_os_recv_resource_alloc(struct _adapter *padapter,
union recv_frame *precvframe)
{
- precvframe->u.hdr.pkt_newalloc = precvframe->u.hdr.pkt = NULL;
+ precvframe->u.hdr.pkt_newalloc = NULL;
+ precvframe->u.hdr.pkt = NULL;
return _SUCCESS;
}
@@ -56,7 +57,7 @@ int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
precvbuf->irp_pending = false;
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
- if (precvbuf->purb == NULL)
+ if (!precvbuf->purb)
res = _FAIL;
precvbuf->pskb = NULL;
precvbuf->reuse = false;
@@ -114,7 +115,7 @@ void r8712_recv_indicatepkt(struct _adapter *padapter,
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
skb = precv_frame->u.hdr.pkt;
- if (skb == NULL)
+ if (!skb)
goto _recv_indicatepkt_drop;
skb->data = precv_frame->u.hdr.rx_data;
skb->len = precv_frame->u.hdr.len;
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 9b9160947943..50f400234593 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -293,7 +293,7 @@ u8 r8712_fw_cmd(struct _adapter *pAdapter, u32 cmd)
r8712_write32(pAdapter, IOCMD_CTRL_REG, cmd);
msleep(100);
- while ((0 != r8712_read32(pAdapter, IOCMD_CTRL_REG)) &&
+ while ((r8712_read32(pAdapter, IOCMD_CTRL_REG != 0)) &&
(pollingcnts > 0)) {
pollingcnts--;
msleep(20);
@@ -317,7 +317,7 @@ int r8712_cmd_thread(void *context)
unsigned int cmdsz, wr_sz, *pcmdbuf;
struct tx_desc *pdesc;
void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct _adapter *padapter = (struct _adapter *)context;
+ struct _adapter *padapter = context;
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
allow_signal(SIGTERM);
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index eaa93fbb95a3..76f60ba5ee9b 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -161,7 +161,7 @@ static u8 efuse_is_empty(struct _adapter *padapter, u8 *empty)
/* read one byte to check if E-Fuse is empty */
if (efuse_one_byte_rw(padapter, true, 0, &value)) {
- if (0xFF == value)
+ if (value == 0xFF)
*empty = true;
else
*empty = false;
@@ -345,7 +345,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
ret = false;
} else if (pkt.data[i * 2] != value) {
ret = false;
- if (0xFF == value) /* write again */
+ if (value == 0xFF) /* write again */
efuse_one_byte_write(padapter, addr,
pkt.data[i * 2]);
}
@@ -353,7 +353,7 @@ static u8 fix_header(struct _adapter *padapter, u8 header, u16 header_addr)
ret = false;
} else if (pkt.data[i * 2 + 1] != value) {
ret = false;
- if (0xFF == value) /* write again */
+ if (value == 0xFF) /* write again */
efuse_one_byte_write(padapter, addr + 1,
pkt.data[i * 2 +
1]);
@@ -420,7 +420,7 @@ u8 r8712_efuse_pg_packet_write(struct _adapter *padapter, const u8 offset,
}
/* write header fail */
bResult = false;
- if (0xFF == efuse_data)
+ if (efuse_data == 0xFF)
return bResult; /* nothing damaged. */
/* call rescue procedure */
if (!fix_header(padapter, efuse_data, efuse_addr))
diff --git a/drivers/staging/rtl8712/rtl8712_io.c b/drivers/staging/rtl8712/rtl8712_io.c
index 4148d48ece62..391eff37f573 100644
--- a/drivers/staging/rtl8712/rtl8712_io.c
+++ b/drivers/staging/rtl8712/rtl8712_io.c
@@ -36,109 +36,76 @@
u8 r8712_read8(struct _adapter *adapter, u32 addr)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _read8 = pintfhdl->io_ops._read8;
- return _read8(pintfhdl, addr);
+ return hdl->io_ops._read8(hdl, addr);
}
u16 r8712_read16(struct _adapter *adapter, u32 addr)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _read16 = pintfhdl->io_ops._read16;
- return _read16(pintfhdl, addr);
+ return hdl->io_ops._read16(hdl, addr);
}
u32 r8712_read32(struct _adapter *adapter, u32 addr)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _read32 = pintfhdl->io_ops._read32;
- return _read32(pintfhdl, addr);
+ return hdl->io_ops._read32(hdl, addr);
}
void r8712_write8(struct _adapter *adapter, u32 addr, u8 val)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- void (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _write8 = pintfhdl->io_ops._write8;
- _write8(pintfhdl, addr, val);
+ hdl->io_ops._write8(hdl, addr, val);
}
void r8712_write16(struct _adapter *adapter, u32 addr, u16 val)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- void (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _write16 = pintfhdl->io_ops._write16;
- _write16(pintfhdl, addr, val);
+ hdl->io_ops._write16(hdl, addr, val);
}
void r8712_write32(struct _adapter *adapter, u32 addr, u32 val)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- void (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
-
- _write32 = pintfhdl->io_ops._write32;
- _write32(pintfhdl, addr, val);
+ hdl->io_ops._write32(hdl, addr, val);
}
void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- _read_mem = pintfhdl->io_ops._read_mem;
- _read_mem(pintfhdl, addr, cnt, pmem);
+
+ hdl->io_ops._read_mem(hdl, addr, cnt, pmem);
}
void r8712_write_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
- void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- _write_mem = pintfhdl->io_ops._write_mem;
- _write_mem(pintfhdl, addr, cnt, pmem);
+ hdl->io_ops._write_mem(hdl, addr, cnt, pmem);
}
void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
return;
- _read_port = pintfhdl->io_ops._read_port;
- _read_port(pintfhdl, addr, cnt, pmem);
+
+ hdl->io_ops._read_port(hdl, addr, cnt, pmem);
}
void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
{
- struct io_queue *pio_queue = adapter->pio_queue;
- struct intf_hdl *pintfhdl = &(pio_queue->intf);
+ struct intf_hdl *hdl = &adapter->pio_queue->intf;
- u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
- _write_port = pintfhdl->io_ops._write_port;
- _write_port(pintfhdl, addr, cnt, pmem);
+ hdl->io_ops._write_port(hdl, addr, cnt, pmem);
}
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 562a10203127..86136cc73672 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -136,15 +136,12 @@ static struct cmd_obj *_dequeue_cmd(struct __queue *queue)
unsigned long irqL;
struct cmd_obj *obj;
- spin_lock_irqsave(&(queue->lock), irqL);
- if (list_empty(&(queue->queue))) {
- obj = NULL;
- } else {
- obj = LIST_CONTAINOR(queue->queue.next,
- struct cmd_obj, list);
+ spin_lock_irqsave(&queue->lock, irqL);
+ obj = list_first_entry_or_null(&queue->queue,
+ struct cmd_obj, list);
+ if (obj)
list_del_init(&obj->list);
- }
- spin_unlock_irqrestore(&(queue->lock), irqL);
+ spin_unlock_irqrestore(&queue->lock, irqL);
return obj;
}
@@ -318,27 +315,6 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
return _SUCCESS;
}
-/* power tracking mechanism setting */
-u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type)
-{
- struct cmd_obj *ph2c;
- struct writePTM_parm *pwriteptmparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
- return _FAIL;
- pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
- kfree(ph2c);
- return _FAIL;
- }
- init_h2fwcmd_w_parm_no_rsp(ph2c, pwriteptmparm, GEN_CMD_CODE(_SetPT));
- pwriteptmparm->type = type;
- r8712_enqueue_cmd(pcmdpriv, ph2c);
- return _SUCCESS;
-}
-
u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
{
struct cmd_obj *ph2c;
@@ -733,32 +709,6 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
return _SUCCESS;
}
-u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
-{
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct cmd_obj *ph2c;
- struct readTSSI_parm *prdtssiparm;
-
- ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
- return _FAIL;
- prdtssiparm = kmalloc(sizeof(*prdtssiparm), GFP_ATOMIC);
- if (prdtssiparm == NULL) {
- kfree(ph2c);
- return _FAIL;
- }
- INIT_LIST_HEAD(&ph2c->list);
- ph2c->cmdcode = GEN_CMD_CODE(_ReadTSSI);
- ph2c->parmbuf = (unsigned char *)prdtssiparm;
- ph2c->cmdsz = sizeof(struct readTSSI_parm);
- ph2c->rsp = pval;
- ph2c->rspsz = sizeof(struct readTSSI_rsp);
-
- prdtssiparm->offset = offset;
- r8712_enqueue_cmd(pcmdpriv, ph2c);
- return _SUCCESS;
-}
-
u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index 818cd8807a38..e4a2a50c85de 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -736,8 +736,6 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode);
u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val);
u8 r8712_setrttbl_cmd(struct _adapter *padapter,
struct setratable_parm *prate_table);
-u8 r8712_gettssi_cmd(struct _adapter *padapter, u8 offset, u8 *pval);
-u8 r8712_setptm_cmd(struct _adapter *padapter, u8 type);
u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type);
u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type);
u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid);
diff --git a/drivers/staging/rtl8712/rtl871x_io.c b/drivers/staging/rtl8712/rtl871x_io.c
index fbbc63570eab..3a10940db9b7 100644
--- a/drivers/staging/rtl8712/rtl871x_io.c
+++ b/drivers/staging/rtl8712/rtl871x_io.c
@@ -113,7 +113,7 @@ uint r8712_alloc_io_queue(struct _adapter *adapter)
struct io_req *pio_req;
pio_queue = kmalloc(sizeof(*pio_queue), GFP_ATOMIC);
- if (pio_queue == NULL)
+ if (!pio_queue)
goto alloc_io_queue_fail;
INIT_LIST_HEAD(&pio_queue->free_ioreqs);
INIT_LIST_HEAD(&pio_queue->processing);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index edfc6805e012..1b9e24900477 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -137,7 +137,7 @@ static inline void handle_group_key(struct ieee_param *param,
}
}
-static inline char *translate_scan(struct _adapter *padapter,
+static noinline_for_stack char *translate_scan(struct _adapter *padapter,
struct iw_request_info *info,
struct wlan_network *pnetwork,
char *start, char *stop)
@@ -398,12 +398,9 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
wep_key_idx = 0;
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
- pwep = kmalloc((u32)(wep_key_len +
- FIELD_OFFSET(struct NDIS_802_11_WEP,
- KeyMaterial)), GFP_ATOMIC);
+ pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
if (pwep == NULL)
return -ENOMEM;
- memset(pwep, 0, sizeof(struct NDIS_802_11_WEP));
pwep->KeyLength = wep_key_len;
pwep->Length = wep_key_len +
FIELD_OFFSET(struct NDIS_802_11_WEP,
@@ -1964,7 +1961,7 @@ static int r871x_get_ap_info(struct net_device *dev,
struct list_head *plist, *phead;
unsigned char *pbuf;
u8 bssid[ETH_ALEN];
- char data[32];
+ char data[33];
if (padapter->bDriverStopped || (pdata == NULL))
return -EINVAL;
@@ -1979,6 +1976,7 @@ static int r871x_get_ap_info(struct net_device *dev,
if (pdata->length >= 32) {
if (copy_from_user(data, pdata->pointer, 32))
return -EINVAL;
+ data[32] = 0;
} else {
return -EINVAL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
index 7c346a405a20..c7f2e5167cb7 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_rtl.c
@@ -49,8 +49,7 @@ uint oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -66,8 +65,7 @@ uint oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -83,8 +81,7 @@ uint oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -115,8 +112,7 @@ uint oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -147,8 +143,7 @@ uint oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -172,8 +167,7 @@ uint oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv
uint oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
u32 preamblemode = 0;
if (poid_par_priv->type_of_oid != QUERY_OID)
@@ -202,8 +196,7 @@ uint oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
if (poid_par_priv->type_of_oid != QUERY_OID)
@@ -216,8 +209,7 @@ uint oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv)
uint oid_rt_set_channelplan_hdl(struct oid_par_priv
*poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
struct eeprom_priv *peeprompriv = &padapter->eeprompriv;
if (poid_par_priv->type_of_oid != SET_OID)
@@ -229,8 +221,7 @@ uint oid_rt_set_channelplan_hdl(struct oid_par_priv
uint oid_rt_set_preamble_mode_hdl(struct oid_par_priv
*poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
u32 preamblemode = 0;
if (poid_par_priv->type_of_oid != SET_OID)
@@ -267,8 +258,7 @@ uint oid_rt_dedicate_probe_hdl(struct oid_par_priv
uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv
*poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -285,8 +275,7 @@ uint oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv
uint oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv
*poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != QUERY_OID)
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -325,8 +314,7 @@ uint oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv
uint oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct NDIS_802_11_CONFIGURATION *pnic_Config;
u32 channelnum;
@@ -449,8 +437,7 @@ uint oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv*
poid_par_priv)
{
uint status = RNDIS_STATUS_SUCCESS;
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *Adapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -470,8 +457,7 @@ uint oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv*
uint oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv)
{
uint status = RNDIS_STATUS_SUCCESS;
- struct _adapter *Adapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *Adapter = poid_par_priv->adapter_context;
if (poid_par_priv->type_of_oid != SET_OID) /* QUERY_OID */
return RNDIS_STATUS_NOT_ACCEPTED;
@@ -516,8 +502,7 @@ enum _CONNECT_STATE_ {
uint oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv)
{
- struct _adapter *padapter = (struct _adapter *)
- (poid_par_priv->adapter_context);
+ struct _adapter *padapter = poid_par_priv->adapter_context;
struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
u32 ulInfo;
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 04f727fc95ea..62d4ae85af15 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -64,7 +64,7 @@ static sint _init_mlme_priv(struct _adapter *padapter)
memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
pbuf = kmalloc_array(MAX_BSS_CNT, sizeof(struct wlan_network),
GFP_ATOMIC);
- if (pbuf == NULL)
+ if (!pbuf)
return _FAIL;
pmlmepriv->free_bss_buf = pbuf;
pnetwork = (struct wlan_network *)pbuf;
@@ -87,16 +87,15 @@ struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv)
unsigned long irqL;
struct wlan_network *pnetwork;
struct __queue *free_queue = &pmlmepriv->free_bss_pool;
- struct list_head *plist = NULL;
- if (list_empty(&free_queue->queue))
- return NULL;
spin_lock_irqsave(&free_queue->lock, irqL);
- plist = free_queue->queue.next;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
- list_del_init(&pnetwork->list);
- pnetwork->last_scanned = jiffies;
- pmlmepriv->num_of_scanned++;
+ pnetwork = list_first_entry_or_null(&free_queue->queue,
+ struct wlan_network, list);
+ if (pnetwork) {
+ list_del_init(&pnetwork->list);
+ pnetwork->last_scanned = jiffies;
+ pmlmepriv->num_of_scanned++;
+ }
spin_unlock_irqrestore(&free_queue->lock, irqL);
return pnetwork;
}
@@ -469,8 +468,7 @@ static int is_desired_network(struct _adapter *adapter,
pnetwork->network.IELength, wps_ie,
&wps_ielen))
return true;
- else
- return false;
+ return false;
}
if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) &&
(pnetwork->network.Privacy == 0))
@@ -1203,7 +1201,7 @@ sint r8712_set_auth(struct _adapter *adapter,
struct setauth_parm *psetauthparm;
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
@@ -1233,7 +1231,7 @@ sint r8712_set_key(struct _adapter *adapter,
sint ret = _SUCCESS;
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
if (psetkeyparm == NULL) {
diff --git a/drivers/staging/rtl8712/rtl871x_mp.c b/drivers/staging/rtl8712/rtl871x_mp.c
index 44da4fe89381..5e4fda1890f5 100644
--- a/drivers/staging/rtl8712/rtl871x_mp.c
+++ b/drivers/staging/rtl8712/rtl871x_mp.c
@@ -235,7 +235,7 @@ static u8 set_bb_reg(struct _adapter *pAdapter,
if (bitmask != bMaskDWord) {
org_value = r8712_bb_reg_read(pAdapter, offset);
bit_shift = bitshift(bitmask);
- new_value = ((org_value & (~bitmask)) | (value << bit_shift));
+ new_value = (org_value & (~bitmask)) | (value << bit_shift);
} else {
new_value = value;
}
@@ -260,7 +260,7 @@ static u8 set_rf_reg(struct _adapter *pAdapter, u8 path, u8 offset, u32 bitmask,
if (bitmask != bMaskDWord) {
org_value = r8712_rf_reg_read(pAdapter, path, offset);
bit_shift = bitshift(bitmask);
- new_value = ((org_value & (~bitmask)) | (value << bit_shift));
+ new_value = (org_value & (~bitmask)) | (value << bit_shift);
} else {
new_value = value;
}
@@ -281,10 +281,10 @@ void r8712_SetChannel(struct _adapter *pAdapter)
u16 code = GEN_CMD_CODE(_SetChannel);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return;
pparm = kmalloc(sizeof(*pparm), GFP_ATOMIC);
- if (pparm == NULL) {
+ if (!pparm) {
kfree(pcmd);
return;
}
@@ -327,10 +327,10 @@ void r8712_SetTxAGCOffset(struct _adapter *pAdapter, u32 ulTxAGCOffset)
{
u32 TxAGCOffset_B, TxAGCOffset_C, TxAGCOffset_D, tmpAGC;
- TxAGCOffset_B = (ulTxAGCOffset & 0x000000ff);
+ TxAGCOffset_B = ulTxAGCOffset & 0x000000ff;
TxAGCOffset_C = (ulTxAGCOffset & 0x0000ff00) >> 8;
TxAGCOffset_D = (ulTxAGCOffset & 0x00ff0000) >> 16;
- tmpAGC = (TxAGCOffset_D << 8 | TxAGCOffset_C << 4 | TxAGCOffset_B);
+ tmpAGC = TxAGCOffset_D << 8 | TxAGCOffset_C << 4 | TxAGCOffset_B;
set_bb_reg(pAdapter, rFPGA0_TxGainStage,
(bXBTxAGC | bXCTxAGC | bXDTxAGC), tmpAGC);
}
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
index 8e7c7f8b69f9..8dc898024e07 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.h
@@ -150,90 +150,90 @@ uint oid_rt_get_power_mode_hdl(
#ifdef _RTL871X_MP_IOCTL_C_ /* CAUTION!!! */
/* This ifdef _MUST_ be left in!! */
static const struct oid_obj_priv oid_rtl_seg_81_80_00[] = {
- {1, &oid_null_function}, /*0x00 OID_RT_PRO_RESET_DUT */
- {1, &oid_rt_pro_set_data_rate_hdl}, /*0x01*/
- {1, &oid_rt_pro_start_test_hdl},/*0x02*/
- {1, &oid_rt_pro_stop_test_hdl}, /*0x03*/
- {1, &oid_null_function}, /*0x04 OID_RT_PRO_SET_PREAMBLE*/
- {1, &oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/
- {1, &oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/
- {1, &oid_null_function}, /*0x07
+ {1, oid_null_function}, /*0x00 OID_RT_PRO_RESET_DUT */
+ {1, oid_rt_pro_set_data_rate_hdl}, /*0x01*/
+ {1, oid_rt_pro_start_test_hdl}, /*0x02*/
+ {1, oid_rt_pro_stop_test_hdl}, /*0x03*/
+ {1, oid_null_function}, /*0x04 OID_RT_PRO_SET_PREAMBLE*/
+ {1, oid_null_function}, /*0x05 OID_RT_PRO_SET_SCRAMBLER*/
+ {1, oid_null_function}, /*0x06 OID_RT_PRO_SET_FILTER_BB*/
+ {1, oid_null_function}, /*0x07
* OID_RT_PRO_SET_MANUAL_DIVERS_BB*/
- {1, &oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/
- {1, &oid_null_function}, /*0x09
+ {1, oid_rt_pro_set_channel_direct_call_hdl}, /*0x08*/
+ {1, oid_null_function}, /*0x09
* OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL*/
- {1, &oid_null_function}, /*0x0A
+ {1, oid_null_function}, /*0x0A
* OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL*/
- {1, &oid_rt_pro_set_continuous_tx_hdl}, /*0x0B
+ {1, oid_rt_pro_set_continuous_tx_hdl}, /*0x0B
* OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL*/
- {1, &oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C
+ {1, oid_rt_pro_set_single_carrier_tx_hdl}, /*0x0C
* OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS*/
- {1, &oid_null_function}, /*0x0D
+ {1, oid_null_function}, /*0x0D
* OID_RT_PRO_SET_TX_ANTENNA_BB*/
- {1, &oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/
- {1, &oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/
- {1, &oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/
- {1, &oid_rt_pro_set_tx_power_control_hdl}, /*0x11
+ {1, oid_rt_pro_set_antenna_bb_hdl}, /*0x0E*/
+ {1, oid_null_function}, /*0x0F OID_RT_PRO_SET_CR_SCRAMBLER*/
+ {1, oid_null_function}, /*0x10 OID_RT_PRO_SET_CR_NEW_FILTER*/
+ {1, oid_rt_pro_set_tx_power_control_hdl}, /*0x11
* OID_RT_PRO_SET_TX_POWER_CONTROL*/
- {1, &oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/
- {1, &oid_null_function}, /*0x13
+ {1, oid_null_function}, /*0x12 OID_RT_PRO_SET_CR_TX_CONFIG*/
+ {1, oid_null_function}, /*0x13
* OID_RT_PRO_GET_TX_POWER_CONTROL*/
- {1, &oid_null_function}, /*0x14
+ {1, oid_null_function}, /*0x14
* OID_RT_PRO_GET_CR_SIGNAL_QUALITY*/
- {1, &oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/
- {1, &oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/
- {1, &oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/
- {1, &oid_null_function}, /*0x18 OID_RT_PRO_GET_INTEGRATOR*/
- {1, &oid_null_function}, /*0x19 OID_RT_PRO_GET_SIGNAL_QUALITY*/
- {1, &oid_null_function}, /*0x1A OID_RT_PRO_QUERY_EEPROM_TYPE*/
- {1, &oid_null_function}, /*0x1B OID_RT_PRO_WRITE_MAC_ADDRESS*/
- {1, &oid_null_function}, /*0x1C OID_RT_PRO_READ_MAC_ADDRESS*/
- {1, &oid_null_function}, /*0x1D OID_RT_PRO_WRITE_CIS_DATA*/
- {1, &oid_null_function}, /*0x1E OID_RT_PRO_READ_CIS_DATA*/
- {1, &oid_null_function} /*0x1F OID_RT_PRO_WRITE_POWER_CONTROL*/
+ {1, oid_null_function}, /*0x15 OID_RT_PRO_SET_CR_SETPOINT*/
+ {1, oid_null_function}, /*0x16 OID_RT_PRO_SET_INTEGRATOR*/
+ {1, oid_null_function}, /*0x17 OID_RT_PRO_SET_SIGNAL_QUALITY*/
+ {1, oid_null_function}, /*0x18 OID_RT_PRO_GET_INTEGRATOR*/
+ {1, oid_null_function}, /*0x19 OID_RT_PRO_GET_SIGNAL_QUALITY*/
+ {1, oid_null_function}, /*0x1A OID_RT_PRO_QUERY_EEPROM_TYPE*/
+ {1, oid_null_function}, /*0x1B OID_RT_PRO_WRITE_MAC_ADDRESS*/
+ {1, oid_null_function}, /*0x1C OID_RT_PRO_READ_MAC_ADDRESS*/
+ {1, oid_null_function}, /*0x1D OID_RT_PRO_WRITE_CIS_DATA*/
+ {1, oid_null_function}, /*0x1E OID_RT_PRO_READ_CIS_DATA*/
+ {1, oid_null_function} /*0x1F OID_RT_PRO_WRITE_POWER_CONTROL*/
};
static const struct oid_obj_priv oid_rtl_seg_81_80_20[] = {
- {1, &oid_null_function}, /*0x20 OID_RT_PRO_READ_POWER_CONTROL*/
- {1, &oid_null_function}, /*0x21 OID_RT_PRO_WRITE_EEPROM*/
- {1, &oid_null_function}, /*0x22 OID_RT_PRO_READ_EEPROM*/
- {1, &oid_rt_pro_reset_tx_packet_sent_hdl}, /*0x23*/
- {1, &oid_rt_pro_query_tx_packet_sent_hdl}, /*0x24*/
- {1, &oid_rt_pro_reset_rx_packet_received_hdl}, /*0x25*/
- {1, &oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/
- {1, &oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/
- {1, &oid_null_function}, /*0x28
+ {1, oid_null_function}, /*0x20 OID_RT_PRO_READ_POWER_CONTROL*/
+ {1, oid_null_function}, /*0x21 OID_RT_PRO_WRITE_EEPROM*/
+ {1, oid_null_function}, /*0x22 OID_RT_PRO_READ_EEPROM*/
+ {1, oid_rt_pro_reset_tx_packet_sent_hdl}, /*0x23*/
+ {1, oid_rt_pro_query_tx_packet_sent_hdl}, /*0x24*/
+ {1, oid_rt_pro_reset_rx_packet_received_hdl}, /*0x25*/
+ {1, oid_rt_pro_query_rx_packet_received_hdl}, /*0x26*/
+ {1, oid_rt_pro_query_rx_packet_crc32_error_hdl},/*0x27*/
+ {1, oid_null_function}, /*0x28
*OID_RT_PRO_QUERY_CURRENT_ADDRESS*/
- {1, &oid_null_function}, /*0x29
+ {1, oid_null_function}, /*0x29
*OID_RT_PRO_QUERY_PERMANENT_ADDRESS*/
- {1, &oid_null_function}, /*0x2A
+ {1, oid_null_function}, /*0x2A
*OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS*/
- {1, &oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B
+ {1, oid_rt_pro_set_carrier_suppression_tx_hdl},/*0x2B
*OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX*/
- {1, &oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/
- {1, &oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/
- {1, &oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/
- {1, &oid_rt_pro_set_modulation_hdl} /*0x2F*/
+ {1, oid_null_function}, /*0x2C OID_RT_PRO_RECEIVE_PACKET*/
+ {1, oid_null_function}, /*0x2D OID_RT_PRO_WRITE_EEPROM_BYTE*/
+ {1, oid_null_function}, /*0x2E OID_RT_PRO_READ_EEPROM_BYTE*/
+ {1, oid_rt_pro_set_modulation_hdl} /*0x2F*/
};
static const struct oid_obj_priv oid_rtl_seg_81_80_40[] = {
- {1, &oid_null_function}, /*0x40*/
- {1, &oid_null_function}, /*0x41*/
- {1, &oid_null_function}, /*0x42*/
- {1, &oid_rt_pro_set_single_tone_tx_hdl}, /*0x43*/
- {1, &oid_null_function}, /*0x44*/
- {1, &oid_null_function} /*0x45*/
+ {1, oid_null_function}, /*0x40*/
+ {1, oid_null_function}, /*0x41*/
+ {1, oid_null_function}, /*0x42*/
+ {1, oid_rt_pro_set_single_tone_tx_hdl}, /*0x43*/
+ {1, oid_null_function}, /*0x44*/
+ {1, oid_null_function} /*0x45*/
};
static const struct oid_obj_priv oid_rtl_seg_81_80_80[] = {
- {1, &oid_null_function}, /*0x80 OID_RT_DRIVER_OPTION*/
- {1, &oid_null_function}, /*0x81 OID_RT_RF_OFF*/
- {1, &oid_null_function} /*0x82 OID_RT_AUTH_STATUS*/
+ {1, oid_null_function}, /*0x80 OID_RT_DRIVER_OPTION*/
+ {1, oid_null_function}, /*0x81 OID_RT_RF_OFF*/
+ {1, oid_null_function} /*0x82 OID_RT_AUTH_STATUS*/
};
static const struct oid_obj_priv oid_rtl_seg_81_85[] = {
- {1, &oid_rt_wireless_mode_hdl} /*0x00 OID_RT_WIRELESS_MODE*/
+ {1, oid_rt_wireless_mode_hdl} /*0x00 OID_RT_WIRELESS_MODE*/
};
#else /* _RTL871X_MP_IOCTL_C_ */
@@ -384,7 +384,7 @@ static struct mp_ioctl_handler mp_ioctl_hdl[] = {
oid_rt_pro_write_rf_reg_hdl,
OID_RT_PRO_RF_WRITE_REGISTRY},
{sizeof(struct rfintfs_parm), NULL, 0},
- {0, &mp_ioctl_xmit_packet_hdl, 0},/*12*/
+ {0, mp_ioctl_xmit_packet_hdl, 0},/*12*/
{sizeof(struct psmode_param), NULL, 0},/*13*/
{sizeof(struct eeprom_rw_param), NULL, 0},/*14*/
{sizeof(struct eeprom_rw_param), NULL, 0},/*15*/
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 4ff530155187..616ca3965919 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -72,14 +72,12 @@ sint _r8712_init_recv_priv(struct recv_priv *precvpriv,
_init_queue(&precvpriv->recv_pending_queue);
precvpriv->adapter = padapter;
precvpriv->free_recvframe_cnt = NR_RECVFRAME;
- precvpriv->pallocated_frame_buf = kmalloc(NR_RECVFRAME *
+ precvpriv->pallocated_frame_buf = kzalloc(NR_RECVFRAME *
sizeof(union recv_frame) + RXFRAME_ALIGN_SZ,
GFP_ATOMIC);
if (precvpriv->pallocated_frame_buf == NULL)
return _FAIL;
kmemleak_not_leak(precvpriv->pallocated_frame_buf);
- memset(precvpriv->pallocated_frame_buf, 0, NR_RECVFRAME *
- sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
precvpriv->precv_frame_buf = precvpriv->pallocated_frame_buf +
RXFRAME_ALIGN_SZ -
((addr_t)(precvpriv->pallocated_frame_buf) &
@@ -103,21 +101,17 @@ void _r8712_free_recv_priv(struct recv_priv *precvpriv)
r8712_free_recv_priv(precvpriv);
}
-union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
+union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
{
unsigned long irqL;
union recv_frame *precvframe;
- struct list_head *plist, *phead;
struct _adapter *padapter;
struct recv_priv *precvpriv;
spin_lock_irqsave(&pfree_recv_queue->lock, irqL);
- if (list_empty(&pfree_recv_queue->queue)) {
- precvframe = NULL;
- } else {
- phead = &pfree_recv_queue->queue;
- plist = phead->next;
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = list_first_entry_or_null(&pfree_recv_queue->queue,
+ union recv_frame, u.hdr.list);
+ if (precvframe) {
list_del_init(&precvframe->u.hdr.list);
padapter = precvframe->u.hdr.adapter;
if (padapter != NULL) {
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 162e61c6ea06..e90c00de7499 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -53,7 +53,7 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv)
pstapriv->pallocated_stainfo_buf = kmalloc(sizeof(struct sta_info) *
NUM_STA + 4, GFP_ATOMIC);
- if (pstapriv->pallocated_stainfo_buf == NULL)
+ if (!pstapriv->pallocated_stainfo_buf)
return _FAIL;
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((addr_t)(pstapriv->pallocated_stainfo_buf) & 3);
@@ -89,16 +89,11 @@ static void mfree_all_stainfo(struct sta_priv *pstapriv)
spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL);
}
-
-static void mfree_sta_priv_lock(struct sta_priv *pstapriv)
-{
- mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
-}
-
u32 _r8712_free_sta_priv(struct sta_priv *pstapriv)
{
if (pstapriv) {
- mfree_sta_priv_lock(pstapriv);
+ /* be done before free sta_hash_lock */
+ mfree_all_stainfo(pstapriv);
kfree(pstapriv->pallocated_stainfo_buf);
}
return _SUCCESS;
@@ -116,13 +111,11 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
unsigned long flags;
pfree_sta_queue = &pstapriv->free_sta_queue;
- spin_lock_irqsave(&(pfree_sta_queue->lock), flags);
- if (list_empty(&pfree_sta_queue->queue)) {
- psta = NULL;
- } else {
- psta = LIST_CONTAINOR(pfree_sta_queue->queue.next,
- struct sta_info, list);
- list_del_init(&(psta->list));
+ spin_lock_irqsave(&pfree_sta_queue->lock, flags);
+ psta = list_first_entry_or_null(&pfree_sta_queue->queue,
+ struct sta_info, list);
+ if (psta) {
+ list_del_init(&psta->list);
_init_stainfo(psta);
memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
index = wifi_mac_hash(hwaddr);
@@ -130,7 +123,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
psta = NULL;
goto exit;
}
- phash_list = &(pstapriv->sta_hash[index]);
+ phash_list = &pstapriv->sta_hash[index];
list_add_tail(&psta->hash_list, phash_list);
pstapriv->asoc_sta_count++;
@@ -154,7 +147,7 @@ struct sta_info *r8712_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
}
}
exit:
- spin_unlock_irqrestore(&(pfree_sta_queue->lock), flags);
+ spin_unlock_irqrestore(&pfree_sta_queue->lock, flags);
return psta;
}
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index 68d65d230fe3..c6d952f5d8f9 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -89,7 +89,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
*/
pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
GFP_ATOMIC);
- if (pxmitpriv->pallocated_frame_buf == NULL) {
+ if (!pxmitpriv->pallocated_frame_buf) {
pxmitpriv->pxmit_frame_buf = NULL;
return _FAIL;
}
@@ -128,7 +128,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
GFP_ATOMIC);
- if (pxmitpriv->pallocated_xmitbuf == NULL)
+ if (!pxmitpriv->pallocated_xmitbuf)
return _FAIL;
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
@@ -137,7 +137,7 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
INIT_LIST_HEAD(&pxmitbuf->list);
pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ,
GFP_ATOMIC);
- if (pxmitbuf->pallocated_buf == NULL)
+ if (!pxmitbuf->pallocated_buf)
return _FAIL;
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
((addr_t) (pxmitbuf->pallocated_buf) &
@@ -241,7 +241,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
}
/* r8712_xmitframe_coalesce() overwrite this!*/
pattrib->pktlen = pktfile.pkt_len;
- if (ETH_P_IP == pattrib->ether_type) {
+ if (pattrib->ether_type == ETH_P_IP) {
/* The following is for DHCP and ARP packet, we use cck1M to
* tx these packets and let LPS awake some time
* to prevent DHCP protocol fail */
@@ -250,7 +250,7 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
_r8712_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/*MINIMUM_DHCP_PACKET_SIZE)*/
- if (ETH_P_IP == pattrib->ether_type) {/* IP header*/
+ if (pattrib->ether_type == ETH_P_IP) {/* IP header*/
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client
@@ -741,21 +741,16 @@ void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len)
struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
{
unsigned long irqL;
- struct xmit_buf *pxmitbuf = NULL;
- struct list_head *plist, *phead;
+ struct xmit_buf *pxmitbuf;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
- if (list_empty(&pfree_xmitbuf_queue->queue)) {
- pxmitbuf = NULL;
- } else {
- phead = &pfree_xmitbuf_queue->queue;
- plist = phead->next;
- pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
- list_del_init(&(pxmitbuf->list));
- }
- if (pxmitbuf != NULL)
+ pxmitbuf = list_first_entry_or_null(&pfree_xmitbuf_queue->queue,
+ struct xmit_buf, list);
+ if (pxmitbuf) {
+ list_del_init(&pxmitbuf->list);
pxmitpriv->free_xmitbuf_cnt--;
+ }
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
return pxmitbuf;
}
@@ -795,20 +790,14 @@ struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
pfree_xmit_queue
*/
unsigned long irqL;
- struct xmit_frame *pxframe = NULL;
- struct list_head *plist, *phead;
+ struct xmit_frame *pxframe;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
- if (list_empty(&pfree_xmit_queue->queue)) {
- pxframe = NULL;
- } else {
- phead = &pfree_xmit_queue->queue;
- plist = phead->next;
- pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
- list_del_init(&(pxframe->list));
- }
- if (pxframe != NULL) {
+ pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
+ struct xmit_frame, list);
+ if (pxframe) {
+ list_del_init(&pxframe->list);
pxmitpriv->free_xmitframe_cnt--;
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
@@ -954,7 +943,7 @@ static void alloc_hwxmits(struct _adapter *padapter)
pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
pxmitpriv->hwxmits = kmalloc_array(pxmitpriv->hwxmit_entry,
sizeof(struct hw_xmit), GFP_ATOMIC);
- if (pxmitpriv->hwxmits == NULL)
+ if (!pxmitpriv->hwxmits)
return;
hwxmits = pxmitpriv->hwxmits;
if (pxmitpriv->hwxmit_entry == 5) {
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index c71333fbe823..c1a0ca490546 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -205,12 +205,15 @@ struct drv_priv {
static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
+ struct _adapter *padapter = netdev_priv(pnetdev);
netdev_info(pnetdev, "Suspending...\n");
if (!pnetdev || !netif_running(pnetdev)) {
netdev_info(pnetdev, "Unable to suspend\n");
return 0;
}
+ padapter->bSuspended = true;
+ rtl871x_intf_stop(padapter);
if (pnetdev->netdev_ops->ndo_stop)
pnetdev->netdev_ops->ndo_stop(pnetdev);
mdelay(10);
@@ -218,9 +221,16 @@ static int r871x_suspend(struct usb_interface *pusb_intf, pm_message_t state)
return 0;
}
+static void rtl871x_intf_resume(struct _adapter *padapter)
+{
+ if (padapter->dvobjpriv.inirp_init)
+ padapter->dvobjpriv.inirp_init(padapter);
+}
+
static int r871x_resume(struct usb_interface *pusb_intf)
{
struct net_device *pnetdev = usb_get_intfdata(pusb_intf);
+ struct _adapter *padapter = netdev_priv(pnetdev);
netdev_info(pnetdev, "Resuming...\n");
if (!pnetdev || !netif_running(pnetdev)) {
@@ -230,6 +240,8 @@ static int r871x_resume(struct usb_interface *pusb_intf)
netif_device_attach(pnetdev);
if (pnetdev->netdev_ops->ndo_open)
pnetdev->netdev_ops->ndo_open(pnetdev);
+ padapter->bSuspended = false;
+ rtl871x_intf_resume(padapter);
return 0;
}
@@ -387,11 +399,11 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
pnetdev->dev.type = &wlan_type;
/* step 2. */
- padapter->dvobj_init = &r8712_usb_dvobj_init;
- padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
- padapter->halpriv.hal_bus_init = &r8712_usb_hal_bus_init;
- padapter->dvobjpriv.inirp_init = &r8712_usb_inirp_init;
- padapter->dvobjpriv.inirp_deinit = &r8712_usb_inirp_deinit;
+ padapter->dvobj_init = r8712_usb_dvobj_init;
+ padapter->dvobj_deinit = r8712_usb_dvobj_deinit;
+ padapter->halpriv.hal_bus_init = r8712_usb_hal_bus_init;
+ padapter->dvobjpriv.inirp_init = r8712_usb_inirp_init;
+ padapter->dvobjpriv.inirp_deinit = r8712_usb_inirp_deinit;
/* step 3.
* initialize the dvobj_priv
*/
diff --git a/drivers/staging/rtl8712/usb_ops.c b/drivers/staging/rtl8712/usb_ops.c
index 856f257bb77e..9172400efe9a 100644
--- a/drivers/staging/rtl8712/usb_ops.c
+++ b/drivers/staging/rtl8712/usb_ops.c
@@ -179,22 +179,22 @@ static void usb_intf_hdl_close(u8 *priv)
void r8712_usb_set_intf_funs(struct intf_hdl *pintf_hdl)
{
- pintf_hdl->intf_hdl_init = &usb_intf_hdl_init;
- pintf_hdl->intf_hdl_unload = &usb_intf_hdl_unload;
- pintf_hdl->intf_hdl_open = &usb_intf_hdl_open;
- pintf_hdl->intf_hdl_close = &usb_intf_hdl_close;
+ pintf_hdl->intf_hdl_init = usb_intf_hdl_init;
+ pintf_hdl->intf_hdl_unload = usb_intf_hdl_unload;
+ pintf_hdl->intf_hdl_open = usb_intf_hdl_open;
+ pintf_hdl->intf_hdl_close = usb_intf_hdl_close;
}
void r8712_usb_set_intf_ops(struct _io_ops *pops)
{
memset((u8 *)pops, 0, sizeof(struct _io_ops));
- pops->_read8 = &usb_read8;
- pops->_read16 = &usb_read16;
- pops->_read32 = &usb_read32;
- pops->_read_port = &r8712_usb_read_port;
- pops->_write8 = &usb_write8;
- pops->_write16 = &usb_write16;
- pops->_write32 = &usb_write32;
- pops->_write_mem = &r8712_usb_write_mem;
- pops->_write_port = &r8712_usb_write_port;
+ pops->_read8 = usb_read8;
+ pops->_read16 = usb_read16;
+ pops->_read32 = usb_read32;
+ pops->_read_port = r8712_usb_read_port;
+ pops->_write8 = usb_write8;
+ pops->_write16 = usb_write16;
+ pops->_write32 = usb_write32;
+ pops->_write_mem = r8712_usb_write_mem;
+ pops->_write_port = r8712_usb_write_port;
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 489a9e6d52fc..454cdf6c7fa1 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -232,9 +232,14 @@ static void r8712_usb_read_port_complete(struct urb *purb)
case -EPIPE:
case -ENODEV:
case -ESHUTDOWN:
- case -ENOENT:
padapter->bDriverStopped = true;
break;
+ case -ENOENT:
+ if (!padapter->bSuspended) {
+ padapter->bDriverStopped = true;
+ break;
+ }
+ /* Fall through. */
case -EPROTO:
precvbuf->reuse = true;
r8712_read_port(padapter, precvpriv->ff_hwaddr, 0,
@@ -329,7 +334,7 @@ void r8712_usb_read_port_cancel(struct _adapter *padapter)
void r8712_xmit_bh(void *priv)
{
int ret = false;
- struct _adapter *padapter = (struct _adapter *)priv;
+ struct _adapter *padapter = priv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
if (padapter->bDriverStopped ||
diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c
index d3981836ce26..695f9b9fc749 100644
--- a/drivers/staging/rtl8712/xmit_linux.c
+++ b/drivers/staging/rtl8712/xmit_linux.c
@@ -70,10 +70,7 @@ uint _r8712_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
sint r8712_endofpktfile(struct pkt_file *pfile)
{
- if (pfile->pkt_len == 0)
- return true;
- else
- return false;
+ return (pfile->pkt_len == 0);
}
@@ -131,7 +128,7 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter,
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (pxmitbuf->pxmit_urb[i] == NULL) {
+ if (!pxmitbuf->pxmit_urb[i]) {
netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n");
return _FAIL;
}
@@ -164,19 +161,15 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev)
struct xmit_frame *pxmitframe = NULL;
struct _adapter *padapter = netdev_priv(pnetdev);
struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
- int ret = 0;
if (!r8712_if_up(padapter)) {
- ret = 0;
goto _xmit_entry_drop;
}
pxmitframe = r8712_alloc_xmitframe(pxmitpriv);
- if (pxmitframe == NULL) {
- ret = 0;
+ if (!pxmitframe) {
goto _xmit_entry_drop;
}
if ((!r8712_update_attrib(padapter, pkt, &pxmitframe->attrib))) {
- ret = 0;
goto _xmit_entry_drop;
}
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_TX);
@@ -188,11 +181,11 @@ int r8712_xmit_entry(_pkt *pkt, struct net_device *pnetdev)
}
pxmitpriv->tx_pkts++;
pxmitpriv->tx_bytes += pxmitframe->attrib.last_txcmdsz;
- return ret;
+ return 0;
_xmit_entry_drop:
if (pxmitframe)
r8712_free_xmitframe(pxmitpriv, pxmitframe);
pxmitpriv->tx_drop++;
dev_kfree_skb_any(pkt);
- return ret;
+ return 0;
}
diff --git a/drivers/staging/rtl8723au/TODO b/drivers/staging/rtl8723au/TODO
index 175a0ceb7421..42b86e478df8 100644
--- a/drivers/staging/rtl8723au/TODO
+++ b/drivers/staging/rtl8723au/TODO
@@ -9,5 +9,8 @@ TODO:
- merge Realtek's bugfixes and new features into the driver
- switch to use MAC80211
-Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+A mac80211 driver for this hardware already was integrated at
+drivers/net/wireless/realtek/rtl8xxxu/
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
Jes Sorensen <Jes.Sorensen@redhat.com>, and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index 1aa9b267c30e..f68e2770255d 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -171,24 +171,20 @@ static u8 chk_sta_is_alive(struct sta_info *psta)
return ret;
}
-void expire_timeout_chk23a(struct rtw_adapter *padapter)
+void expire_timeout_chk23a(struct rtw_adapter *padapter)
{
- struct list_head *phead, *plist, *ptmp;
+ struct list_head *phead;
u8 updated = 0;
- struct sta_info *psta;
+ struct sta_info *psta, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 chk_alive_num = 0;
struct sta_info *chk_alive_list[NUM_STA];
int i;
spin_lock_bh(&pstapriv->auth_list_lock);
-
phead = &pstapriv->auth_list;
-
/* check auth_queue */
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, auth_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, auth_list) {
if (psta->expire_to > 0) {
psta->expire_to--;
if (psta->expire_to == 0) {
@@ -206,19 +202,13 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
spin_lock_bh(&pstapriv->auth_list_lock);
}
}
-
}
-
spin_unlock_bh(&pstapriv->auth_list_lock);
spin_lock_bh(&pstapriv->asoc_list_lock);
-
phead = &pstapriv->asoc_list;
-
/* check asoc_queue */
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
if (chk_sta_is_alive(psta) || !psta->expire_to) {
psta->expire_to = pstapriv->expire_to;
psta->keep_alive_trycnt = 0;
@@ -283,7 +273,6 @@ void expire_timeout_chk23a(struct rtw_adapter *padapter)
}
}
}
-
spin_unlock_bh(&pstapriv->asoc_list_lock);
if (chk_alive_num) {
@@ -1057,103 +1046,6 @@ void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode)
pacl_list->mode = mode;
}
-int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr)
-{
- struct list_head *plist, *phead;
- u8 added = false;
- int i, ret = 0;
- struct rtw_wlan_acl_node *paclnode;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
-
- DBG_8723A("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, addr);
-
- if ((NUM_ACL-1) < pacl_list->num)
- return -1;
-
- spin_lock_bh(&pacl_node_q->lock);
-
- phead = get_list_head(pacl_node_q);
-
- list_for_each(plist, phead) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
- if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
- if (paclnode->valid == true) {
- added = true;
- DBG_8723A("%s, sta has been added\n", __func__);
- break;
- }
- }
- }
-
- spin_unlock_bh(&pacl_node_q->lock);
-
- if (added)
- return ret;
-
- spin_lock_bh(&pacl_node_q->lock);
-
- for (i = 0; i < NUM_ACL; i++) {
- paclnode = &pacl_list->aclnode[i];
-
- if (!paclnode->valid) {
- INIT_LIST_HEAD(&paclnode->list);
-
- memcpy(paclnode->addr, addr, ETH_ALEN);
-
- paclnode->valid = true;
-
- list_add_tail(&paclnode->list, get_list_head(pacl_node_q));
-
- pacl_list->num++;
-
- break;
- }
- }
-
- DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
-
- spin_unlock_bh(&pacl_node_q->lock);
- return ret;
-}
-
-int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr)
-{
- struct list_head *plist, *phead, *ptmp;
- struct rtw_wlan_acl_node *paclnode;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct rtw_queue *pacl_node_q = &pacl_list->acl_node_q;
-
- DBG_8723A("%s(acl_num =%d) = %pM\n", __func__, pacl_list->num, addr);
-
- spin_lock_bh(&pacl_node_q->lock);
-
- phead = get_list_head(pacl_node_q);
-
- list_for_each_safe(plist, ptmp, phead) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
- if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
- if (paclnode->valid) {
- paclnode->valid = false;
-
- list_del_init(&paclnode->list);
-
- pacl_list->num--;
- }
- }
- }
-
- spin_unlock_bh(&pacl_node_q->lock);
-
- DBG_8723A("%s, acl_num =%d\n", __func__, pacl_list->num);
-
- return 0;
-}
-
static void update_bcn_erpinfo_ie(struct rtw_adapter *padapter)
{
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1354,20 +1246,14 @@ void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated)
{
/* update associated stations cap. */
if (updated == true) {
- struct list_head *phead, *plist, *ptmp;
- struct sta_info *psta;
+ struct list_head *phead;
+ struct sta_info *psta, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
spin_lock_bh(&pstapriv->asoc_list_lock);
-
phead = &pstapriv->asoc_list;
-
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
VCS_update23a(padapter, psta);
- }
-
spin_unlock_bh(&pstapriv->asoc_list_lock);
}
}
@@ -1625,41 +1511,10 @@ u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool acti
return beacon_updated;
}
-int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset)
-{
- struct list_head *phead, *plist;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if ((pmlmeinfo->state&0x03) != MSR_AP)
- return 0;
-
- DBG_8723A("%s(%s): with ch:%u, offset:%u\n", __func__,
- padapter->pnetdev->name, new_ch, ch_offset);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
-
- list_for_each(plist, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
- issue_action_spct_ch_switch23a(padapter, psta->hwaddr, new_ch, ch_offset);
- psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- issue_action_spct_ch_switch23a(padapter, bc_addr, new_ch, ch_offset);
-
- return 0;
-}
-
int rtw_sta_flush23a(struct rtw_adapter *padapter)
{
- struct list_head *phead, *plist, *ptmp;
- struct sta_info *psta;
+ struct list_head *phead;
+ struct sta_info *psta, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -1675,10 +1530,7 @@ int rtw_sta_flush23a(struct rtw_adapter *padapter)
spin_lock_bh(&pstapriv->asoc_list_lock);
phead = &pstapriv->asoc_list;
-
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
/* Remove sta from asoc_list */
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
@@ -1744,9 +1596,9 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter)
struct mlme_priv *mlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct sta_info *psta;
+ struct sta_info *psta, *ptmp;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct list_head *phead, *plist, *ptmp;
+ struct list_head *phead;
u8 chk_alive_num = 0;
struct sta_info *chk_alive_list[NUM_STA];
int i;
@@ -1775,15 +1627,9 @@ void rtw_ap_restore_network(struct rtw_adapter *padapter)
}
spin_lock_bh(&pstapriv->asoc_list_lock);
-
phead = &pstapriv->asoc_list;
-
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, asoc_list)
chk_alive_list[chk_alive_num++] = psta;
- }
-
spin_unlock_bh(&pstapriv->asoc_list_lock);
for (i = 0; i < chk_alive_num; i++) {
@@ -1841,8 +1687,8 @@ void start_ap_mode23a(struct rtw_adapter *padapter)
void stop_ap_mode23a(struct rtw_adapter *padapter)
{
- struct list_head *phead, *plist, *ptmp;
- struct rtw_wlan_acl_node *paclnode;
+ struct list_head *phead;
+ struct rtw_wlan_acl_node *paclnode, *ptmp;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1864,15 +1710,10 @@ void stop_ap_mode23a(struct rtw_adapter *padapter)
/* for ACL */
spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
-
- list_for_each_safe(plist, ptmp, phead) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+ list_for_each_entry_safe(paclnode, ptmp, phead, list) {
if (paclnode->valid == true) {
paclnode->valid = false;
-
list_del_init(&paclnode->list);
-
pacl_list->num--;
}
}
diff --git a/drivers/staging/rtl8723au/core/rtw_cmd.c b/drivers/staging/rtl8723au/core/rtw_cmd.c
index 3035bb864c39..cd4e0f05d82f 100644
--- a/drivers/staging/rtl8723au/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723au/core/rtw_cmd.c
@@ -295,8 +295,7 @@ static void rtw_cmd_work(struct work_struct *work)
post_process:
/* call callback function for post-processed */
- if (pcmd->cmdcode < (sizeof(rtw_cmd_callback) /
- sizeof(struct _cmd_callback))) {
+ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
if (!pcmd_callback) {
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_,
diff --git a/drivers/staging/rtl8723au/core/rtw_efuse.c b/drivers/staging/rtl8723au/core/rtw_efuse.c
index f174b4d1a018..359ef4197e94 100644
--- a/drivers/staging/rtl8723au/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723au/core/rtw_efuse.c
@@ -269,8 +269,8 @@ u8 EFUSE_Read1Byte23a(struct rtw_adapter *Adapter, u16 Address)
}
data = rtl8723au_read8(Adapter, EFUSE_CTRL);
return data;
- } else
- return 0xFF;
+ }
+ return 0xFF;
}
/* Read one byte from real Efuse. */
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme.c b/drivers/staging/rtl8723au/core/rtw_mlme.c
index 3c09ea9b7348..a786fc4bdb53 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme.c
@@ -171,21 +171,15 @@ exit:
void rtw_free_network_queue23a(struct rtw_adapter *padapter)
{
- struct list_head *phead, *plist, *ptmp;
- struct wlan_network *pnetwork;
+ struct list_head *phead;
+ struct wlan_network *pnetwork, *ptmp;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct rtw_queue *scanned_queue = &pmlmepriv->scanned_queue;
spin_lock_bh(&scanned_queue->lock);
-
phead = get_list_head(scanned_queue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
+ list_for_each_entry_safe(pnetwork, ptmp, phead, list)
_rtw_free_network23a(pmlmepriv, pnetwork);
- }
-
spin_unlock_bh(&scanned_queue->lock);
}
@@ -329,15 +323,12 @@ int is_same_network23a(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
struct wlan_network *
rtw_get_oldest_wlan_network23a(struct rtw_queue *scanned_queue)
{
- struct list_head *plist, *phead;
+ struct list_head *phead;
struct wlan_network *pwlan;
struct wlan_network *oldest = NULL;
phead = get_list_head(scanned_queue);
-
- list_for_each(plist, phead) {
- pwlan = container_of(plist, struct wlan_network, list);
-
+ list_for_each_entry(pwlan, phead, list) {
if (pwlan->fixed != true) {
if (!oldest || time_after(oldest->last_scanned,
pwlan->last_scanned))
@@ -445,7 +436,6 @@ static void rtw_update_scanned_network(struct rtw_adapter *adapter,
spin_lock_bh(&queue->lock);
phead = get_list_head(queue);
-
list_for_each(plist, phead) {
pnetwork = container_of(plist, struct wlan_network, list);
@@ -710,21 +700,17 @@ rtw_surveydone_event_callback23a(struct rtw_adapter *adapter, const u8 *pbuf)
static void free_scanqueue(struct mlme_priv *pmlmepriv)
{
- struct wlan_network *pnetwork;
+ struct wlan_network *pnetwork, *ptemp;
struct rtw_queue *scan_queue = &pmlmepriv->scanned_queue;
- struct list_head *plist, *phead, *ptemp;
+ struct list_head *phead;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, "+free_scanqueue\n");
spin_lock_bh(&scan_queue->lock);
-
phead = get_list_head(scan_queue);
-
- list_for_each_safe(plist, ptemp, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
+ list_for_each_entry_safe(pnetwork, ptemp, phead, list) {
pnetwork->fixed = false;
_rtw_free_network23a(pmlmepriv, pnetwork);
}
-
spin_unlock_bh(&scan_queue->lock);
}
@@ -1625,27 +1611,16 @@ exit:
static struct wlan_network *
rtw_select_candidate_from_queue(struct mlme_priv *pmlmepriv)
{
- struct wlan_network *pnetwork, *candidate = NULL;
+ struct wlan_network *pnetwork, *ptmp, *candidate = NULL;
struct rtw_queue *queue = &pmlmepriv->scanned_queue;
- struct list_head *phead, *plist, *ptmp;
+ struct list_head *phead;
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
phead = get_list_head(queue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
- if (!pnetwork) {
- RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
- "%s: return _FAIL:(pnetwork == NULL)\n",
- __func__);
- goto exit;
- }
-
+ list_for_each_entry_safe(pnetwork, ptmp, phead, list)
rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
- }
-
-exit:
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
return candidate;
}
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index d28f29a93810..f4fff385aeb2 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -2154,8 +2154,7 @@ OnAction23a(struct rtw_adapter *padapter, struct recv_frame *precv_frame)
category = mgmt->u.action.category;
- for (i = 0;
- i < sizeof(OnAction23a_tbl) / sizeof(struct action_handler); i++) {
+ for (i = 0; i < ARRAY_SIZE(OnAction23a_tbl); i++) {
ptable = &OnAction23a_tbl[i];
if (category == ptable->num)
@@ -2656,8 +2655,6 @@ static void issue_probersp(struct rtw_adapter *padapter, unsigned char *da)
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe23a(padapter, pmgntframe);
-
- return;
}
static int _issue_probereq(struct rtw_adapter *padapter,
@@ -2957,8 +2954,6 @@ static void issue_auth(struct rtw_adapter *padapter, struct sta_info *psta,
rtw_wep_encrypt23a(padapter, pmgntframe);
DBG_8723A("%s\n", __func__);
dump_mgntframe23a(padapter, pmgntframe);
-
- return;
}
#ifdef CONFIG_8723AU_AP_MODE
@@ -3338,8 +3333,6 @@ exit:
}
} else
kfree(pmlmepriv->assoc_req);
-
- return;
}
/* when wait_ack is true, this function should be called at process context */
@@ -4102,8 +4095,6 @@ static void rtw_site_survey(struct rtw_adapter *padapter)
pmlmeext->chan_scan_time = SURVEY_TO;
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
}
-
- return;
}
/* collect bss info from Beacon and Probe request/response frames. */
@@ -4759,8 +4750,6 @@ void report_survey_event23a(struct rtw_adapter *padapter,
rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
pmlmeext->sitesurvey_res.bss_cnt++;
-
- return;
}
void report_surveydone_event23a(struct rtw_adapter *padapter)
@@ -4802,8 +4791,6 @@ void report_surveydone_event23a(struct rtw_adapter *padapter)
DBG_8723A("survey done event(%x)\n", psurveydone_evt->bss_cnt);
rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
- return;
}
void report_join_res23a(struct rtw_adapter *padapter, int res)
@@ -4850,8 +4837,6 @@ void report_join_res23a(struct rtw_adapter *padapter, int res)
rtw_joinbss_event_prehandle23a(padapter, (u8 *)&pjoinbss_evt->network);
rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
- return;
}
void report_del_sta_event23a(struct rtw_adapter *padapter,
@@ -4906,8 +4891,6 @@ void report_del_sta_event23a(struct rtw_adapter *padapter,
DBG_8723A("report_del_sta_event23a: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event23a(struct rtw_adapter *padapter,
@@ -4951,8 +4934,6 @@ void report_add_sta_event23a(struct rtw_adapter *padapter,
DBG_8723A("report_add_sta_event23a: add STA\n");
rtw_enqueue_cmd23a(pcmdpriv, pcmd_obj);
-
- return;
}
/****************************************************************************
@@ -5394,8 +5375,6 @@ static void link_timer_hdl(unsigned long data)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
-
- return;
}
static void addba_timer_hdl(unsigned long data)
@@ -6082,10 +6061,10 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
#ifdef CONFIG_8723AU_AP_MODE
else { /* tx bc/mc frames after update TIM */
struct sta_info *psta_bmc;
- struct list_head *plist, *phead, *ptmp;
- struct xmit_frame *pxmitframe;
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe, *ptmp;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
psta_bmc = rtw_get_bcmc_stainfo23a(padapter);
@@ -6099,10 +6078,8 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
phead = get_list_head(&psta_bmc->sleep_q);
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist,
- struct xmit_frame,
- list);
+ list_for_each_entry_safe(pxmitframe, ptmp,
+ phead, list) {
list_del_init(&pxmitframe->list);
@@ -6119,7 +6096,6 @@ int tx_beacon_hdl23a(struct rtw_adapter *padapter, const u8 *pbuf)
rtl8723au_hal_xmitframe_enqueue(padapter,
pxmitframe);
}
-
/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
}
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 404b61898d08..989ed0726817 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -85,16 +85,15 @@ int _rtw_init_recv_priv23a(struct recv_priv *precvpriv,
return res;
}
-void _rtw_free_recv_priv23a (struct recv_priv *precvpriv)
+void _rtw_free_recv_priv23a(struct recv_priv *precvpriv)
{
struct rtw_adapter *padapter = precvpriv->adapter;
- struct recv_frame *precvframe;
- struct list_head *plist, *ptmp;
+ struct recv_frame *precvframe, *ptmp;
rtw_free_uc_swdec_pending_queue23a(padapter);
- list_for_each_safe(plist, ptmp, &precvpriv->free_recv_queue.queue) {
- precvframe = container_of(plist, struct recv_frame, list);
+ list_for_each_entry_safe(precvframe, ptmp,
+ &precvpriv->free_recv_queue.queue, list) {
list_del_init(&precvframe->list);
kfree(precvframe);
}
@@ -105,21 +104,14 @@ void _rtw_free_recv_priv23a (struct recv_priv *precvpriv)
struct recv_frame *rtw_alloc_recvframe23a(struct rtw_queue *pfree_recv_queue)
{
struct recv_frame *pframe;
- struct list_head *plist, *phead;
struct rtw_adapter *padapter;
struct recv_priv *precvpriv;
spin_lock_bh(&pfree_recv_queue->lock);
- if (list_empty(&pfree_recv_queue->queue))
- pframe = NULL;
- else {
- phead = get_list_head(pfree_recv_queue);
-
- plist = phead->next;
-
- pframe = container_of(plist, struct recv_frame, list);
-
+ pframe = list_first_entry_or_null(&pfree_recv_queue->queue,
+ struct recv_frame, list);
+ if (pframe) {
list_del_init(&pframe->list);
padapter = pframe->adapter;
if (padapter) {
@@ -195,19 +187,13 @@ using spinlock to protect
static void rtw_free_recvframe23a_queue(struct rtw_queue *pframequeue)
{
- struct recv_frame *hdr;
- struct list_head *plist, *phead, *ptmp;
+ struct recv_frame *hdr, *ptmp;
+ struct list_head *phead;
spin_lock(&pframequeue->lock);
-
phead = get_list_head(pframequeue);
- plist = phead->next;
-
- list_for_each_safe(plist, ptmp, phead) {
- hdr = container_of(plist, struct recv_frame, list);
+ list_for_each_entry_safe(hdr, ptmp, phead, list)
rtw_free_recvframe23a(hdr);
- }
-
spin_unlock(&pframequeue->lock);
}
@@ -254,21 +240,13 @@ struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
{
unsigned long irqL;
struct recv_buf *precvbuf;
- struct list_head *plist, *phead;
spin_lock_irqsave(&queue->lock, irqL);
- if (list_empty(&queue->queue)) {
- precvbuf = NULL;
- } else {
- phead = get_list_head(queue);
-
- plist = phead->next;
-
- precvbuf = container_of(plist, struct recv_buf, list);
-
+ precvbuf = list_first_entry_or_null(&queue->queue,
+ struct recv_buf, list);
+ if (precvbuf)
list_del_init(&precvbuf->list);
- }
spin_unlock_irqrestore(&queue->lock, irqL);
@@ -286,7 +264,6 @@ int recvframe_chkmic(struct rtw_adapter *adapter,
u8 bmic_err = false, brpt_micerror = true;
u8 *pframe, *payload, *pframemic;
u8 *mickey;
- /* u8 *iv, rxdata_key_idx = 0; */
struct sta_info *stainfo;
struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &adapter->securitypriv;
@@ -361,33 +338,19 @@ int recvframe_chkmic(struct rtw_adapter *adapter,
int i;
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "*(pframemic-8)-*(pframemic-1) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- *(pframemic - 8), *(pframemic - 7),
- *(pframemic - 6), *(pframemic - 5),
- *(pframemic - 4), *(pframemic - 3),
- *(pframemic - 2), *(pframemic - 1));
+ "*(pframemic-8)-*(pframemic-1) =%*phC\n",
+ 8, pframemic - 8);
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
- "*(pframemic-16)-*(pframemic-9) =0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- *(pframemic - 16), *(pframemic - 15),
- *(pframemic - 14), *(pframemic - 13),
- *(pframemic - 12), *(pframemic - 11),
- *(pframemic - 10), *(pframemic - 9));
+ "*(pframemic-16)-*(pframemic-9) =%*phC\n",
+ 8, pframemic - 16);
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
"====== demp packet (len =%d) ======\n",
precvframe->pkt->len);
for (i = 0; i < precvframe->pkt->len; i = i + 8) {
RT_TRACE(_module_rtl871x_recv_c_,
- _drv_err_,
- "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
- *(precvframe->pkt->data+i),
- *(precvframe->pkt->data+i+1),
- *(precvframe->pkt->data+i+2),
- *(precvframe->pkt->data+i+3),
- *(precvframe->pkt->data+i+4),
- *(precvframe->pkt->data+i+5),
- *(precvframe->pkt->data+i+6),
- *(precvframe->pkt->data+i+7));
+ _drv_err_, "%*phC\n",
+ 8, precvframe->pkt->data + i);
}
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
"====== demp packet end [len =%d]======\n",
@@ -1100,22 +1063,17 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
if ((psta->state & WIFI_SLEEP_STATE) &&
(pstapriv->sta_dz_bitmap & CHKBIT(psta->aid))) {
- struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct list_head *xmitframe_phead;
struct xmit_frame *pxmitframe;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
- xmitframe_plist = xmitframe_phead->next;
-
- if (!list_empty(xmitframe_phead)) {
- pxmitframe = container_of(xmitframe_plist,
- struct xmit_frame,
- list);
-
- xmitframe_plist = xmitframe_plist->next;
-
+ pxmitframe = list_first_entry_or_null(xmitframe_phead,
+ struct xmit_frame,
+ list);
+ if (pxmitframe) {
list_del_init(&pxmitframe->list);
psta->sleepq_len--;
@@ -1127,30 +1085,20 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
pxmitframe->attrib.triggered = 1;
- /* DBG_8723A("handling ps-poll, q_len =%d, tim =%x\n", psta->sleepq_len, pstapriv->tim_bitmap); */
-
rtl8723au_hal_xmitframe_enqueue(padapter,
pxmitframe);
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
-
- /* DBG_8723A("after handling ps-poll, tim =%x\n", pstapriv->tim_bitmap); */
-
- /* update BCN for TIM IE */
- /* update_BCNTIM(padapter); */
update_beacon23a(padapter, WLAN_EID_TIM,
NULL, false);
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
} else {
- /* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
- /* DBG_8723A("no buffered packets to xmit\n"); */
if (pstapriv->tim_bitmap & CHKBIT(psta->aid)) {
if (psta->sleepq_len == 0) {
DBG_8723A("no buffered packets "
@@ -1169,8 +1117,6 @@ static int validate_recv_ctrl_frame(struct rtw_adapter *padapter,
pstapriv->tim_bitmap &= ~CHKBIT(psta->aid);
- /* update BCN for TIM IE */
- /* update_BCNTIM(padapter); */
update_beacon23a(padapter, WLAN_EID_TIM,
NULL, false);
}
@@ -1190,7 +1136,6 @@ static int validate_recv_mgnt_frame(struct rtw_adapter *padapter,
struct sta_info *psta;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- /* struct mlme_priv *pmlmepriv = &adapter->mlmepriv; */
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
"+validate_recv_mgnt_frame\n");
@@ -1298,8 +1243,6 @@ static int validate_recv_data_frame(struct rtw_adapter *adapter,
goto exit;
}
- /* psta->rssi = prxcmd->rssi; */
- /* psta->signal_quality = prxcmd->sq; */
precv_frame->psta = psta;
pattrib->hdrlen = sizeof(struct ieee80211_hdr_3addr);
@@ -1402,11 +1345,7 @@ static void dump_rx_pkt(struct sk_buff *skb, u16 type, int level)
DBG_8723A("#############################\n");
for (i = 0; i < 64; i = i + 8)
- DBG_8723A("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n",
- *(ptr + i), *(ptr + i + 1), *(ptr + i + 2),
- *(ptr + i + 3), *(ptr + i + 4),
- *(ptr + i + 5), *(ptr + i + 6),
- *(ptr + i + 7));
+ DBG_8723A("%*phC:\n", 8, ptr + i);
DBG_8723A("#############################\n");
}
}
@@ -1513,7 +1452,6 @@ static int wlanhdr_to_ethhdr (struct recv_frame *precvframe)
psnap = ptr + hdrlen;
eth_type = (psnap[6] << 8) | psnap[7];
/* convert hdr + possible LLC headers into Ethernet header */
- /* eth_type = (psnap_type[0] << 8) | psnap_type[1]; */
if ((ether_addr_equal(psnap, rfc1042_header) &&
eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
ether_addr_equal(psnap, bridge_tunnel_header)) {
@@ -1567,22 +1505,19 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
struct rtw_queue *defrag_q)
{
- struct list_head *plist, *phead, *ptmp;
- u8 *data, wlanhdr_offset;
- u8 curfragnum;
- struct recv_frame *pnfhdr;
+ struct list_head *phead;
+ u8 wlanhdr_offset;
+ u8 curfragnum;
+ struct recv_frame *pnfhdr, *ptmp;
struct recv_frame *prframe, *pnextrframe;
- struct rtw_queue *pfree_recv_queue;
+ struct rtw_queue *pfree_recv_queue;
struct sk_buff *skb;
-
-
curfragnum = 0;
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = get_list_head(defrag_q);
- plist = phead->next;
- prframe = container_of(plist, struct recv_frame, list);
+ prframe = list_first_entry(phead, struct recv_frame, list);
list_del_init(&prframe->list);
skb = prframe->pkt;
@@ -1597,12 +1532,7 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
curfragnum++;
- phead = get_list_head(defrag_q);
-
- data = prframe->pkt->data;
-
- list_for_each_safe(plist, ptmp, phead) {
- pnfhdr = container_of(plist, struct recv_frame, list);
+ list_for_each_entry_safe(pnfhdr, ptmp, phead, list) {
pnextrframe = (struct recv_frame *)pnfhdr;
/* check the fragment sequence (2nd ~n fragment frame) */
@@ -1644,8 +1574,6 @@ struct recv_frame *recvframe_defrag(struct rtw_adapter *adapter,
RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
"Performance defrag!!!!!\n");
-
-
return prframe;
}
@@ -1844,11 +1772,6 @@ static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl,
struct rx_pkt_attrib *pnextattrib;
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- /* DbgPrint("+enqueue_reorder_recvframe23a()\n"); */
-
- /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
- /* spin_lock_ex(&ppending_recvframe_queue->lock); */
-
phead = get_list_head(ppending_recvframe_queue);
list_for_each_safe(plist, ptmp, phead) {
@@ -1859,26 +1782,17 @@ static int enqueue_reorder_recvframe23a(struct recv_reorder_ctrl *preorder_ctrl,
continue;
} else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num)) {
/* Duplicate entry is found!! Do not insert current entry. */
-
- /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
return false;
} else {
break;
}
- /* DbgPrint("enqueue_reorder_recvframe23a():while\n"); */
}
- /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
- /* spin_lock_ex(&ppending_recvframe_queue->lock); */
-
list_del_init(&prframe->list);
list_add_tail(&prframe->list, plist);
- /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
- /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
-
return true;
}
@@ -1889,30 +1803,21 @@ int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
struct recv_reorder_ctrl *preorder_ctrl,
int bforced)
{
- /* u8 bcancelled; */
struct list_head *phead, *plist;
struct recv_frame *prframe;
struct rx_pkt_attrib *pattrib;
- /* u8 index = 0; */
int bPktInBuf = false;
struct recv_priv *precvpriv;
struct rtw_queue *ppending_recvframe_queue;
precvpriv = &padapter->recvpriv;
ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
- /* DbgPrint("+recv_indicatepkts_in_order\n"); */
-
- /* spin_lock_irqsave(&ppending_recvframe_queue->lock); */
- /* spin_lock_ex(&ppending_recvframe_queue->lock); */
-
phead = get_list_head(ppending_recvframe_queue);
plist = phead->next;
/* Handling some condition for forced indicate case. */
if (bforced) {
if (list_empty(phead)) {
- /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
- /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
return true;
}
@@ -1962,12 +1867,8 @@ int recv_indicatepkts_in_order(struct rtw_adapter *padapter,
break;
}
- /* DbgPrint("recv_indicatepkts_in_order():while\n"); */
}
- /* spin_unlock_ex(&ppending_recvframe_queue->lock); */
- /* spin_unlock_irqrestore(&ppending_recvframe_queue->lock); */
-
return bPktInBuf;
}
@@ -2083,8 +1984,6 @@ void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext)
return;
}
- /* DBG_8723A("+rtw_reordering_ctrl_timeout_handler23a() =>\n"); */
-
spin_lock_bh(&ppending_recvframe_queue->lock);
if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true) {
@@ -2101,14 +2000,10 @@ int process_recv_indicatepkts(struct rtw_adapter *padapter,
struct recv_frame *prframe)
{
int retval = _SUCCESS;
- /* struct recv_priv *precvpriv = &padapter->recvpriv; */
- /* struct rx_pkt_attrib *pattrib = &prframe->attrib; */
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
if (phtpriv->ht_option == true) { /* B/G/N Mode */
- /* prframe->preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
-
/* including perform A-MPDU Rx Ordering Buffer Control */
if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
if ((padapter->bDriverStopped == false) &&
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
index 038b57b3afe2..5a4cfdf1ebd4 100644
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -634,7 +634,7 @@ int rtw_tkip_encrypt23a(struct rtw_adapter *padapter,
&pattrib->ra[0]);
}
- if (stainfo == NULL) {
+ if (!stainfo) {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
"%s: stainfo == NULL!!!\n", __func__);
DBG_8723A("%s, psta == NUL\n", __func__);
@@ -731,7 +731,7 @@ int rtw_tkip_decrypt23a(struct rtw_adapter *padapter,
stainfo = rtw_get_stainfo23a(&padapter->stapriv,
&prxattrib->ta[0]);
- if (stainfo == NULL) {
+ if (!stainfo) {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
"%s: stainfo == NULL!!!\n", __func__);
return _FAIL;
@@ -1617,9 +1617,9 @@ exit:
return res;
}
-void rtw_use_tkipkey_handler23a(void *FunctionContext)
+void rtw_use_tkipkey_handler23a(void *function_context)
{
- struct rtw_adapter *padapter = (struct rtw_adapter *)FunctionContext;
+ struct rtw_adapter *padapter = function_context;
RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
"^^^%s ^^^\n", __func__);
diff --git a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
index b06bff74502a..a9b778c45d44 100644
--- a/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723au/core/rtw_sta_mgt.c
@@ -83,8 +83,8 @@ int _rtw_init_sta_priv23a(struct sta_priv *pstapriv)
int _rtw_free_sta_priv23a(struct sta_priv *pstapriv)
{
- struct list_head *phead, *plist, *ptmp;
- struct sta_info *psta;
+ struct list_head *phead;
+ struct sta_info *psta, *ptmp;
struct recv_reorder_ctrl *preorder_ctrl;
int index;
@@ -93,12 +93,9 @@ int _rtw_free_sta_priv23a(struct sta_priv *pstapriv)
spin_lock_bh(&pstapriv->sta_hash_lock);
for (index = 0; index < NUM_STA; index++) {
phead = &pstapriv->sta_hash[index];
-
- list_for_each_safe(plist, ptmp, phead) {
+ list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
int i;
- psta = container_of(plist, struct sta_info,
- hash_list);
for (i = 0; i < 16 ; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i];
del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
@@ -203,7 +200,7 @@ int rtw_free_stainfo23a(struct rtw_adapter *padapter, struct sta_info *psta)
struct hw_xmit *phwxmit;
int i;
- if (psta == NULL)
+ if (!psta)
goto exit;
spin_lock_bh(&psta->lock);
@@ -325,8 +322,8 @@ exit:
/* free all stainfo which in sta_hash[all] */
void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
{
- struct list_head *plist, *phead, *ptmp;
- struct sta_info *psta;
+ struct list_head *phead;
+ struct sta_info *psta, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo23a(padapter);
s32 index;
@@ -335,13 +332,9 @@ void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
return;
spin_lock_bh(&pstapriv->sta_hash_lock);
-
for (index = 0; index < NUM_STA; index++) {
phead = &pstapriv->sta_hash[index];
-
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, hash_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, hash_list) {
if (pbcmc_stainfo != psta)
rtw_free_stainfo23a(padapter, psta);
}
@@ -352,12 +345,12 @@ void rtw_free_all_stainfo23a(struct rtw_adapter *padapter)
/* any station allocated can be searched by hash list */
struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr)
{
- struct list_head *plist, *phead;
- struct sta_info *psta = NULL;
- u32 index;
+ struct list_head *phead;
+ struct sta_info *pos, *psta = NULL;
+ u32 index;
const u8 *addr;
- if (hwaddr == NULL)
+ if (!hwaddr)
return NULL;
if (is_multicast_ether_addr(hwaddr))
@@ -368,11 +361,9 @@ struct sta_info *rtw_get_stainfo23a(struct sta_priv *pstapriv, const u8 *hwaddr)
index = wifi_mac_hash(addr);
spin_lock_bh(&pstapriv->sta_hash_lock);
-
phead = &pstapriv->sta_hash[index];
-
- list_for_each(plist, phead) {
- psta = container_of(plist, struct sta_info, hash_list);
+ list_for_each_entry(pos, phead, hash_list) {
+ psta = pos;
/* if found the matched address */
if (ether_addr_equal(psta->hwaddr, addr))
@@ -392,7 +383,7 @@ int rtw_init_bcmc_stainfo23a(struct rtw_adapter *padapter)
int res = _SUCCESS;
psta = rtw_alloc_stainfo23a(pstapriv, bc_addr, GFP_KERNEL);
- if (psta == NULL) {
+ if (!psta) {
res = _FAIL;
RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_,
"rtw_alloc_stainfo23a fail\n");
@@ -418,7 +409,7 @@ bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr)
{
bool res = true;
#ifdef CONFIG_8723AU_AP_MODE
- struct list_head *plist, *phead;
+ struct list_head *phead;
struct rtw_wlan_acl_node *paclnode;
bool match = false;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -427,10 +418,7 @@ bool rtw_access_ctrl23a(struct rtw_adapter *padapter, u8 *mac_addr)
spin_lock_bh(&pacl_node_q->lock);
phead = get_list_head(pacl_node_q);
-
- list_for_each(plist, phead) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
-
+ list_for_each_entry(paclnode, phead, list) {
if (ether_addr_equal(paclnode->addr, mac_addr)) {
if (paclnode->valid) {
match = true;
diff --git a/drivers/staging/rtl8723au/core/rtw_xmit.c b/drivers/staging/rtl8723au/core/rtw_xmit.c
index a4b6bb6c79a9..3de40cfa5f3b 100644
--- a/drivers/staging/rtl8723au/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723au/core/rtw_xmit.c
@@ -193,39 +193,38 @@ fail:
goto exit;
}
-void _rtw_free_xmit_priv23a (struct xmit_priv *pxmitpriv)
+void _rtw_free_xmit_priv23a(struct xmit_priv *pxmitpriv)
{
struct rtw_adapter *padapter = pxmitpriv->adapter;
- struct xmit_frame *pxframe;
- struct xmit_buf *pxmitbuf;
- struct list_head *plist, *ptmp;
+ struct xmit_frame *pxframe, *ptmp;
+ struct xmit_buf *pxmitbuf, *ptmp2;
- list_for_each_safe(plist, ptmp, &pxmitpriv->free_xmit_queue.queue) {
- pxframe = container_of(plist, struct xmit_frame, list);
+ list_for_each_entry_safe(pxframe, ptmp,
+ &pxmitpriv->free_xmit_queue.queue, list) {
list_del_init(&pxframe->list);
rtw_os_xmit_complete23a(padapter, pxframe);
kfree(pxframe);
}
- list_for_each_safe(plist, ptmp, &pxmitpriv->xmitbuf_list) {
- pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_for_each_entry_safe(pxmitbuf, ptmp2,
+ &pxmitpriv->xmitbuf_list, list2) {
list_del_init(&pxmitbuf->list2);
rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
kfree(pxmitbuf);
}
/* free xframe_ext queue, the same count as extbuf */
- list_for_each_safe(plist, ptmp,
- &pxmitpriv->free_xframe_ext_queue.queue) {
- pxframe = container_of(plist, struct xmit_frame, list);
+ list_for_each_entry_safe(pxframe, ptmp,
+ &pxmitpriv->free_xframe_ext_queue.queue,
+ list) {
list_del_init(&pxframe->list);
rtw_os_xmit_complete23a(padapter, pxframe);
kfree(pxframe);
}
/* free xmit extension buff */
- list_for_each_safe(plist, ptmp, &pxmitpriv->xmitextbuf_list) {
- pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_for_each_entry_safe(pxmitbuf, ptmp2,
+ &pxmitpriv->xmitextbuf_list, list2) {
list_del_init(&pxmitbuf->list2);
rtw_os_xmit_resource_free23a(padapter, pxmitbuf);
kfree(pxmitbuf);
@@ -1444,24 +1443,18 @@ Must be very very cautious...
*/
static struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
{
- struct xmit_frame *pxframe = NULL;
- struct list_head *plist, *phead;
+ struct xmit_frame *pxframe;
struct rtw_queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
spin_lock_bh(&pfree_xmit_queue->lock);
- if (list_empty(&pfree_xmit_queue->queue)) {
+ pxframe = list_first_entry_or_null(&pfree_xmit_queue->queue,
+ struct xmit_frame, list);
+ if (!pxframe) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
"rtw_alloc_xmitframe:%d\n",
pxmitpriv->free_xmitframe_cnt);
- pxframe = NULL;
} else {
- phead = get_list_head(pfree_xmit_queue);
-
- plist = phead->next;
-
- pxframe = container_of(plist, struct xmit_frame, list);
-
list_del_init(&pxframe->list);
pxmitpriv->free_xmitframe_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
@@ -1478,22 +1471,18 @@ static struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)
struct xmit_frame *rtw_alloc_xmitframe23a_ext(struct xmit_priv *pxmitpriv)
{
- struct xmit_frame *pxframe = NULL;
- struct list_head *plist, *phead;
+ struct xmit_frame *pxframe;
struct rtw_queue *queue = &pxmitpriv->free_xframe_ext_queue;
spin_lock_bh(&queue->lock);
- if (list_empty(&queue->queue)) {
+ pxframe = list_first_entry_or_null(&queue->queue,
+ struct xmit_frame, list);
+ if (!pxframe) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
"rtw_alloc_xmitframe23a_ext:%d\n",
pxmitpriv->free_xframe_ext_cnt);
- pxframe = NULL;
} else {
- phead = get_list_head(queue);
- plist = phead->next;
- pxframe = container_of(plist, struct xmit_frame, list);
-
list_del_init(&pxframe->list);
pxmitpriv->free_xframe_ext_cnt--;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
@@ -1563,18 +1552,13 @@ exit:
void rtw_free_xmitframe_queue23a(struct xmit_priv *pxmitpriv,
struct rtw_queue *pframequeue)
{
- struct list_head *plist, *phead, *ptmp;
- struct xmit_frame *pxmitframe;
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe, *ptmp;
spin_lock_bh(&pframequeue->lock);
-
phead = get_list_head(pframequeue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
+ list_for_each_entry_safe(pxmitframe, ptmp, phead, list)
rtw_free_xmitframe23a(pxmitpriv, pxmitframe);
- }
spin_unlock_bh(&pframequeue->lock);
}
@@ -1612,9 +1596,9 @@ struct xmit_frame *
rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i,
int entry)
{
- struct list_head *sta_plist, *sta_phead, *ptmp;
+ struct list_head *sta_phead;
struct hw_xmit *phwxmit;
- struct tx_servq *ptxservq = NULL;
+ struct tx_servq *ptxservq = NULL, *ptmp;
struct rtw_queue *pframe_queue = NULL;
struct xmit_frame *pxmitframe = NULL;
struct rtw_adapter *padapter = pxmitpriv->adapter;
@@ -1638,11 +1622,8 @@ rtw_dequeue_xframe23a(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i,
phwxmit = phwxmit_i + inx[i];
sta_phead = get_list_head(phwxmit->sta_queue);
-
- list_for_each_safe(sta_plist, ptmp, sta_phead) {
- ptxservq = container_of(sta_plist, struct tx_servq,
- tx_pending);
-
+ list_for_each_entry_safe(ptxservq, ptmp, sta_phead,
+ tx_pending) {
pframe_queue = &ptxservq->sta_pending;
pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
@@ -2052,18 +2033,15 @@ dequeue_xmitframes_to_sleeping_queue(struct rtw_adapter *padapter,
struct rtw_queue *pframequeue)
{
int ret;
- struct list_head *plist, *phead, *ptmp;
- u8 ac_index;
+ struct list_head *phead;
+ u8 ac_index;
struct tx_servq *ptxservq;
- struct pkt_attrib *pattrib;
- struct xmit_frame *pxmitframe;
- struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+ struct pkt_attrib *pattrib;
+ struct xmit_frame *pxmitframe, *ptmp;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
phead = get_list_head(pframequeue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
+ list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
ret = xmitframe_enqueue_for_sleeping_sta23a(padapter, pxmitframe);
if (ret == true) {
@@ -2124,17 +2102,14 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
{
u8 update_mask = 0, wmmps_ac = 0;
struct sta_info *psta_bmc;
- struct list_head *plist, *phead, *ptmp;
- struct xmit_frame *pxmitframe = NULL;
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe = NULL, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
spin_lock_bh(&pxmitpriv->lock);
-
phead = get_list_head(&psta->sleep_q);
-
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
+ list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
list_del_init(&pxmitframe->list);
switch (pxmitframe->attrib.priority) {
@@ -2194,7 +2169,6 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
pstapriv->sta_dz_bitmap &= ~CHKBIT(psta->aid);
}
-
/* spin_unlock_bh(&psta->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
@@ -2206,13 +2180,8 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) {
/* no any sta in ps mode */
spin_lock_bh(&pxmitpriv->lock);
-
phead = get_list_head(&psta_bmc->sleep_q);
-
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist, struct xmit_frame,
- list);
-
+ list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
list_del_init(&pxmitframe->list);
psta_bmc->sleepq_len--;
@@ -2232,7 +2201,6 @@ void wakeup_sta_to_xmit23a(struct rtw_adapter *padapter, struct sta_info *psta)
/* update_BCNTIM(padapter); */
update_mask |= BIT(1);
}
-
/* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
spin_unlock_bh(&pxmitpriv->lock);
}
@@ -2245,19 +2213,15 @@ void xmit_delivery_enabled_frames23a(struct rtw_adapter *padapter,
struct sta_info *psta)
{
u8 wmmps_ac = 0;
- struct list_head *plist, *phead, *ptmp;
- struct xmit_frame *pxmitframe;
+ struct list_head *phead;
+ struct xmit_frame *pxmitframe, *ptmp;
struct sta_priv *pstapriv = &padapter->stapriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
/* spin_lock_bh(&psta->sleep_q.lock); */
spin_lock_bh(&pxmitpriv->lock);
-
phead = get_list_head(&psta->sleep_q);
-
- list_for_each_safe(plist, ptmp, phead) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
+ list_for_each_entry_safe(pxmitframe, ptmp, phead, list) {
switch (pxmitframe->attrib.priority) {
case 1:
case 2:
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
index e8cab9e97385..8d3ea6c0cbe6 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_BB.c
@@ -219,7 +219,7 @@ void ODM_ReadAndConfig_AGC_TAB_1T_8723A(struct dm_odm_t *pDM_Odm)
u32 i;
u8 platform = 0x04;
u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = sizeof(Array_AGC_TAB_1T_8723A)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_AGC_TAB_1T_8723A);
u32 *Array = Array_AGC_TAB_1T_8723A;
hex = board;
@@ -467,7 +467,7 @@ void ODM_ReadAndConfig_PHY_REG_1T_8723A(struct dm_odm_t *pDM_Odm)
u32 i = 0;
u8 platform = 0x04;
u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = sizeof(Array_PHY_REG_1T_8723A)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_1T_8723A);
u32 *Array = Array_PHY_REG_1T_8723A;
hex += board;
@@ -523,7 +523,7 @@ void ODM_ReadAndConfig_PHY_REG_MP_8723A(struct dm_odm_t *pDM_Odm)
u32 i;
u8 platform = 0x04;
u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = sizeof(Array_PHY_REG_MP_8723A)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_PHY_REG_MP_8723A);
u32 *Array = Array_PHY_REG_MP_8723A;
hex += board;
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
index 93b2d183d694..9bf685905e68 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_MAC.c
@@ -145,7 +145,7 @@ void ODM_ReadAndConfig_MAC_REG_8723A(struct dm_odm_t *pDM_Odm)
u32 i = 0;
u8 platform = 0x04;
u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = sizeof(Array_MAC_REG_8723A)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_MAC_REG_8723A);
u32 *Array = Array_MAC_REG_8723A;
hex += board;
diff --git a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
index dbf571e8b908..286f3ea3d263 100644
--- a/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
+++ b/drivers/staging/rtl8723au/hal/HalHWImg8723A_RF.c
@@ -215,7 +215,7 @@ void ODM_ReadAndConfig_RadioA_1T_8723A(struct dm_odm_t *pDM_Odm)
u32 i = 0;
u8 platform = 0x04;
u8 board = pDM_Odm->BoardType;
- u32 ArrayLen = sizeof(Array_RadioA_1T_8723A)/sizeof(u32);
+ u32 ArrayLen = ARRAY_SIZE(Array_RadioA_1T_8723A);
u32 *Array = Array_RadioA_1T_8723A;
hex += board;
diff --git a/drivers/staging/rtl8723au/hal/hal_com.c b/drivers/staging/rtl8723au/hal/hal_com.c
index 530db57e8842..9d7b11b63957 100644
--- a/drivers/staging/rtl8723au/hal/hal_com.c
+++ b/drivers/staging/rtl8723au/hal/hal_com.c
@@ -328,7 +328,7 @@ int c2h_evt_read23a(struct rtw_adapter *adapter, u8 *buf)
if (trigger == C2H_EVT_HOST_CLOSE)
goto exit; /* Not ready */
- else if (trigger != C2H_EVT_FW_CLOSE)
+ if (trigger != C2H_EVT_FW_CLOSE)
goto clear_evt; /* Not a valid value */
c2h_evt = (struct c2h_evt_hdr *)buf;
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
index 6b9dbeffafcb..e279c34b3fc6 100644
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -185,7 +185,6 @@ void odm_CCKPacketDetectionThresh23a(struct dm_odm_t *pDM_Odm);
/* START-------BB POWER SAVE----------------------- */
void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm);
-void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm);
/* END---------BB POWER SAVE----------------------- */
@@ -270,7 +269,6 @@ void ODM_DMWatchdog23a(struct rtw_adapter *adapter)
odm_RefreshRateAdaptiveMask(pDM_Odm);
- odm_DynamicBBPowerSaving23a(pDM_Odm);
odm_EdcaTurboCheck23a(pDM_Odm);
}
@@ -894,10 +892,6 @@ void odm23a_DynBBPSInit(struct dm_odm_t *pDM_Odm)
pDM_PSTable->initialize = 0;
}
-void odm_DynamicBBPowerSaving23a(struct dm_odm_t *pDM_Odm)
-{
- return;
-}
void ODM_RF_Saving23a(struct dm_odm_t *pDM_Odm, u8 bForceInNormal)
{
@@ -1274,7 +1268,7 @@ static void odm_RSSIMonitorCheck(struct dm_odm_t *pDM_Odm)
for (i = 0; i < sta_cnt; i++) {
if (PWDB_rssi[i] != (0))
- rtl8723a_set_rssi_cmd(Adapter, (u8 *)&PWDB_rssi[i]);
+ rtl8723a_set_rssi_cmd(Adapter, PWDB_rssi[i]);
}
pdmpriv->EntryMaxUndecoratedSmoothedPWDB = MaxDB;
diff --git a/drivers/staging/rtl8723au/hal/odm_HWConfig.c b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
index 7b9799e3dbda..0562f61bd1dc 100644
--- a/drivers/staging/rtl8723au/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8723au/hal/odm_HWConfig.c
@@ -270,10 +270,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct dm_odm_t *pDM_Odm,
}
}
-void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm)
-{
-}
-
static void odm_Process_RSSIForDM(struct dm_odm_t *pDM_Odm,
struct phy_info *pPhyInfo,
struct odm_packet_info *pPktinfo)
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
index d5c48a56d4ac..bfcbd7a349cf 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_bt-coexist.c
@@ -77,12 +77,6 @@ if ((BTCoexDbgLevel == _bt_dbg_on_)) {\
#define PlatformZeroMemory(ptr, sz) memset(ptr, 0, sz)
-#define PlatformProcessHCICommands(...)
-#define PlatformTxBTQueuedPackets(...)
-#define PlatformIndicateBTACLData(...) (RT_STATUS_SUCCESS)
-#define PlatformAcquireSpinLock(padapter, type)
-#define PlatformReleaseSpinLock(padapter, type)
-
#define GET_UNDECORATED_AVERAGE_RSSI(padapter) \
(GET_HAL_DATA(padapter)->dmpriv.EntryMinUndecoratedSmoothedPWDB)
#define RT_RF_CHANGE_SOURCE u32
@@ -798,11 +792,7 @@ bthci_IndicateEvent(
u32 dataLen
)
{
- enum rt_status rt_status;
-
- rt_status = PlatformIndicateBTEvent(padapter, pEvntData, dataLen);
-
- return rt_status;
+ return PlatformIndicateBTEvent(padapter, pEvntData, dataLen);
}
static void
@@ -1454,21 +1444,11 @@ bthci_StartBeaconAndConnect(
}
if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_CREATOR) {
- snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
- padapter->eeprompriv.mac_addr[0],
- padapter->eeprompriv.mac_addr[1],
- padapter->eeprompriv.mac_addr[2],
- padapter->eeprompriv.mac_addr[3],
- padapter->eeprompriv.mac_addr[4],
- padapter->eeprompriv.mac_addr[5]);
+ snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
+ "AMP-%pMF", padapter->eeprompriv.mac_addr);
} else if (pBTInfo->BtAsocEntry[CurrentAssocNum].AMPRole == AMP_BTAP_JOINER) {
- snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32, "AMP-%02x-%02x-%02x-%02x-%02x-%02x",
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[0],
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[1],
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[2],
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[3],
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[4],
- pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr[5]);
+ snprintf((char *)pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 32,
+ "AMP-%pMF", pBTInfo->BtAsocEntry[CurrentAssocNum].BTRemoteMACAddr);
}
FillOctetString(pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsid, pBTInfo->BtAsocEntry[CurrentAssocNum].BTSsidBuf, 21);
@@ -2909,16 +2889,13 @@ bthci_CmdCreatePhysicalLink(
struct packet_irp_hcicmd_data *pHciCmd
)
{
- enum hci_status status;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
pBtDbg->dbgHciInfo.hciCmdCntCreatePhyLink++;
- status = bthci_BuildPhysicalLink(padapter,
+ return bthci_BuildPhysicalLink(padapter,
pHciCmd, HCI_CREATE_PHYSICAL_LINK);
-
- return status;
}
static enum hci_status
@@ -3184,16 +3161,13 @@ static enum hci_status
bthci_CmdAcceptPhysicalLink(struct rtw_adapter *padapter,
struct packet_irp_hcicmd_data *pHciCmd)
{
- enum hci_status status;
struct bt_30info *pBTInfo = GET_BT_INFO(padapter);
struct bt_dgb *pBtDbg = &pBTInfo->BtDbg;
pBtDbg->dbgHciInfo.hciCmdCntAcceptPhyLink++;
- status = bthci_BuildPhysicalLink(padapter,
+ return bthci_BuildPhysicalLink(padapter,
pHciCmd, HCI_ACCEPT_PHYSICAL_LINK);
-
- return status;
}
static enum hci_status
@@ -9475,10 +9449,8 @@ static void BTDM_Display8723ABtCoexInfo(struct rtw_adapter *padapter)
psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm1Ant.curPsTdma;
else
psTdmaCase = pHalData->bt_coexist.halCoex8723.btdm2Ant.curPsTdma;
- snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x case-%d", "PS TDMA(0x3a)", \
- pHalData->bt_coexist.fw3aVal[0], pHalData->bt_coexist.fw3aVal[1],
- pHalData->bt_coexist.fw3aVal[2], pHalData->bt_coexist.fw3aVal[3],
- pHalData->bt_coexist.fw3aVal[4], psTdmaCase);
+ snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %*ph case-%d",
+ "PS TDMA(0x3a)", 5, pHalData->bt_coexist.fw3aVal, psTdmaCase);
DCMD_Printf(btCoexDbgBuf);
snprintf(btCoexDbgBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d ", "Decrease Bt Power", \
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
index 1662c03c1323..2230f4c539ec 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_cmd.c
@@ -113,11 +113,11 @@ exit:
return ret;
}
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param)
+int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param)
{
- *((u32 *)param) = cpu_to_le32(*((u32 *)param));
+ __le32 cmd = cpu_to_le32(param);
- FillH2CCmd(padapter, RSSI_SETTING_EID, 3, param);
+ FillH2CCmd(padapter, RSSI_SETTING_EID, 3, (void *)&cmd);
return _SUCCESS;
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index ecf54ee47f7c..e81301fcb01d 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -399,10 +399,8 @@ hal_ReadEFuse_WiFi(struct rtw_adapter *padapter,
}
efuseTbl = kmalloc(EFUSE_MAP_LEN_8723A, GFP_KERNEL);
- if (efuseTbl == NULL) {
- DBG_8723A("%s: alloc efuseTbl fail!\n", __func__);
+ if (!efuseTbl)
return;
- }
/* 0xff will be efuse default value instead of 0x00. */
memset(efuseTbl, 0xFF, EFUSE_MAP_LEN_8723A);
@@ -491,10 +489,8 @@ hal_ReadEFuse_BT(struct rtw_adapter *padapter,
}
efuseTbl = kmalloc(EFUSE_BT_MAP_LEN, GFP_KERNEL);
- if (efuseTbl == NULL) {
- DBG_8723A("%s: efuseTbl malloc fail!\n", __func__);
+ if (!efuseTbl)
return;
- }
/* 0xff will be efuse default value instead of 0x00. */
memset(efuseTbl, 0xFF, EFUSE_BT_MAP_LEN);
@@ -1044,7 +1040,7 @@ void rtl8723a_InitAntenna_Selection(struct rtw_adapter *padapter)
u8 val;
val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna settting */
+ /* Let 8051 take control antenna setting */
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
rtl8723au_write8(padapter, REG_LEDCFG2, val);
}
@@ -1054,7 +1050,7 @@ void rtl8723a_CheckAntenna_Selection(struct rtw_adapter *padapter)
u8 val;
val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna settting */
+ /* Let 8051 take control antenna setting */
if (!(val & BIT(7))) {
val |= BIT(7); /* DPDT_SEL_EN, 0x4C[23] */
rtl8723au_write8(padapter, REG_LEDCFG2, val);
@@ -1066,7 +1062,7 @@ void rtl8723a_DeinitAntenna_Selection(struct rtw_adapter *padapter)
u8 val;
val = rtl8723au_read8(padapter, REG_LEDCFG2);
- /* Let 8051 take control antenna settting */
+ /* Let 8051 take control antenna setting */
val &= ~BIT(7); /* DPDT_SEL_EN, clear 0x4C[23] */
rtl8723au_write8(padapter, REG_LEDCFG2, val);
}
@@ -1297,7 +1293,7 @@ static void _ResetDigitalProcedure1_92C(struct rtw_adapter *padapter,
/* If we want to SS mode, we can not reset 8051. */
if ((val8 & BIT(1)) && padapter->bFWReady) {
/* IF fw in RAM code, do reset */
- /* 2010/08/25 MH Accordign to RD alfred's
+ /* 2010/08/25 MH According to RD alfred's
suggestion, we need to disable other */
/* HRCV INT to influence 8051 reset. */
rtl8723au_write8(padapter, REG_FWIMR, 0x20);
diff --git a/drivers/staging/rtl8723au/hal/usb_halinit.c b/drivers/staging/rtl8723au/hal/usb_halinit.c
index 9926b0790e75..fa47aebf8b98 100644
--- a/drivers/staging/rtl8723au/hal/usb_halinit.c
+++ b/drivers/staging/rtl8723au/hal/usb_halinit.c
@@ -736,8 +736,7 @@ int rtl8723au_hal_init(struct rtw_adapter *Adapter)
rtl8723a_InitHalDm(Adapter);
- val8 = (WiFiNavUpperUs + HAL_8723A_NAV_UPPER_UNIT - 1) /
- HAL_8723A_NAV_UPPER_UNIT;
+ val8 = DIV_ROUND_UP(WiFiNavUpperUs, HAL_8723A_NAV_UPPER_UNIT);
rtl8723au_write8(Adapter, REG_NAV_UPPER, val8);
/* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, but we need to fin root cause. */
@@ -1021,10 +1020,8 @@ static void Hal_EfuseParseMACAddr_8723AU(struct rtw_adapter *padapter,
}
RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
- "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%02x:%02x:%02x:%02x:%02x:%02x\n",
- pEEPROM->mac_addr[0], pEEPROM->mac_addr[1],
- pEEPROM->mac_addr[2], pEEPROM->mac_addr[3],
- pEEPROM->mac_addr[4], pEEPROM->mac_addr[5]);
+ "Hal_EfuseParseMACAddr_8723AU: Permanent Address =%pM\n",
+ pEEPROM->mac_addr);
}
static void readAdapterInfo(struct rtw_adapter *padapter)
diff --git a/drivers/staging/rtl8723au/hal/usb_ops_linux.c b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
index 371e6b373420..5c81ff48252e 100644
--- a/drivers/staging/rtl8723au/hal/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/hal/usb_ops_linux.c
@@ -256,12 +256,8 @@ static void usb_read_interrupt_complete(struct urb *purb)
c2w = kmalloc(sizeof(struct evt_work),
GFP_ATOMIC);
- if (!c2w) {
- printk(KERN_WARNING "%s: unable to "
- "allocate work buffer\n",
- __func__);
+ if (!c2w)
goto urb_submit;
- }
c2w->adapter = padapter;
INIT_WORK(&c2w->work, rtw_evt_work);
diff --git a/drivers/staging/rtl8723au/include/odm_HWConfig.h b/drivers/staging/rtl8723au/include/odm_HWConfig.h
index ce7abe770b5a..c748d5fb47fa 100644
--- a/drivers/staging/rtl8723au/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8723au/include/odm_HWConfig.h
@@ -142,8 +142,6 @@ struct phy_status_rpt_8195 {
};
-void odm_Init_RSSIForDM23a(struct dm_odm_t *pDM_Odm);
-
void
ODM_PhyStatusQuery23a(
struct dm_odm_t *pDM_Odm,
diff --git a/drivers/staging/rtl8723au/include/osdep_service.h b/drivers/staging/rtl8723au/include/osdep_service.h
index dedb41874de5..98250b12e9f2 100644
--- a/drivers/staging/rtl8723au/include/osdep_service.h
+++ b/drivers/staging/rtl8723au/include/osdep_service.h
@@ -29,10 +29,10 @@
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
#include <linux/semaphore.h>
#include <linux/sem.h>
#include <linux/sched.h>
diff --git a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
index 014c02edded6..f95535a915ab 100644
--- a/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
+++ b/drivers/staging/rtl8723au/include/rtl8723a_cmd.h
@@ -149,7 +149,7 @@ void rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(struct rtw_adapter *padapter);
#else
#define rtl8723a_set_BTCoex_AP_mode_FwRsvdPkt_cmd(padapter) do {} while(0)
#endif
-int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u8 *param);
+int rtl8723a_set_rssi_cmd(struct rtw_adapter *padapter, u32 param);
int rtl8723a_set_raid_cmd(struct rtw_adapter *padapter, u32 mask, u8 arg);
void rtl8723a_add_rateatid(struct rtw_adapter *padapter, u32 bitmap, u8 arg, u8 rssi_level);
diff --git a/drivers/staging/rtl8723au/include/rtw_ap.h b/drivers/staging/rtl8723au/include/rtw_ap.h
index 9f8d235c992f..55a708f9fc5b 100644
--- a/drivers/staging/rtl8723au/include/rtw_ap.h
+++ b/drivers/staging/rtl8723au/include/rtw_ap.h
@@ -36,8 +36,6 @@ int rtw_check_beacon_data23a(struct rtw_adapter *padapter,
struct ieee80211_mgmt *mgmt, unsigned int len);
void rtw_ap_restore_network(struct rtw_adapter *padapter);
void rtw_set_macaddr_acl23a(struct rtw_adapter *padapter, int mode);
-int rtw_acl_add_sta23a(struct rtw_adapter *padapter, u8 *addr);
-int rtw_acl_remove_sta23a(struct rtw_adapter *padapter, u8 *addr);
void associated_clients_update23a(struct rtw_adapter *padapter, u8 updated);
void bss_cap_update_on_sta_join23a(struct rtw_adapter *padapter, struct sta_info *psta);
@@ -46,7 +44,6 @@ void sta_info_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
void ap_sta_info_defer_update23a(struct rtw_adapter *padapter, struct sta_info *psta);
u8 ap_free_sta23a(struct rtw_adapter *padapter, struct sta_info *psta, bool active, u16 reason);
int rtw_sta_flush23a(struct rtw_adapter *padapter);
-int rtw_ap_inform_ch_switch23a(struct rtw_adapter *padapter, u8 new_ch, u8 ch_offset);
void start_ap_mode23a(struct rtw_adapter *padapter);
void stop_ap_mode23a(struct rtw_adapter *padapter);
#endif /* end of CONFIG_8723AU_AP_MODE */
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 0ae2180a35b7..12d18440e824 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -458,7 +458,7 @@ static int set_group_key(struct rtw_adapter *padapter, struct key_params *parms,
pcmd->cmdcode = _SetKey_CMD_;
pcmd->parmbuf = (u8 *) psetkeyparm;
- pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->cmdsz = sizeof(struct setkey_parm);
pcmd->rsp = NULL;
pcmd->rspsz = 0;
@@ -543,7 +543,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
memcpy(psecuritypriv->
dot118021XGrpKey[key_index].skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
/* set mic key */
memcpy(psecuritypriv->
@@ -565,7 +565,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
memcpy(psecuritypriv->
dot118021XGrpKey[key_index].skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
} else {
DBG_8723A("%s, set group_key, none\n",
__func__);
@@ -603,7 +603,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
if (set_tx == 1) {
/* pairwise key */
memcpy(psta->dot118021x_UncstKey.skey,
- keyparms->key, (key_len > 16 ? 16 : key_len));
+ keyparms->key, (min(16, key_len)));
if (keyparms->cipher == WLAN_CIPHER_SUITE_WEP40 ||
keyparms->cipher == WLAN_CIPHER_SUITE_WEP104) {
@@ -661,7 +661,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
memcpy(psecuritypriv->
dot118021XGrpKey[key_index].skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
/* set mic key */
memcpy(psecuritypriv->
@@ -679,7 +679,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, u8 key_index,
memcpy(psecuritypriv->
dot118021XGrpKey[key_index].skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
} else {
psecuritypriv->dot118021XGrpPrivacy = 0;
}
@@ -789,7 +789,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index,
memcpy(psta->dot118021x_UncstKey.skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
if (keyparms->cipher ==
WLAN_CIPHER_SUITE_TKIP) {
@@ -812,7 +812,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, u8 key_index,
memcpy(padapter->securitypriv.
dot118021XGrpKey[key_index].skey,
keyparms->key,
- (key_len > 16 ? 16 : key_len));
+ (min(16, key_len)));
memcpy(padapter->securitypriv.
dot118021XGrptxmickey[key_index].
skey, &keyparms->key[16], 8);
@@ -1270,18 +1270,14 @@ void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter)
{
- struct list_head *plist, *phead, *ptmp;
+ struct list_head *phead;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct rtw_queue *queue = &pmlmepriv->scanned_queue;
- struct wlan_network *pnetwork;
+ struct wlan_network *pnetwork, *ptmp;
spin_lock_bh(&pmlmepriv->scanned_queue.lock);
-
phead = get_list_head(queue);
-
- list_for_each_safe(plist, ptmp, phead) {
- pnetwork = container_of(plist, struct wlan_network, list);
-
+ list_for_each_entry_safe(pnetwork, ptmp, phead, list) {
/* report network only if the current channel set
contains the channel to which this network belongs */
if (rtw_ch_set_search_ch23a
@@ -1289,7 +1285,6 @@ void rtw_cfg80211_surveydone_event_callback(struct rtw_adapter *padapter)
pnetwork->network.DSConfig) >= 0)
rtw_cfg80211_inform_bss(padapter, pnetwork);
}
-
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* call this after other things have been done */
@@ -2202,7 +2197,7 @@ static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
struct wireless_dev *wdev, int *dbm)
{
DBG_8723A("%s\n", __func__);
- *dbm = (12);
+ *dbm = 12;
return 0;
}
@@ -2615,8 +2610,6 @@ static int rtw_cfg80211_add_monitor_if(struct rtw_adapter *padapter, char *name,
/* wdev */
mon_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!mon_wdev) {
- DBG_8723A("%s(%s): allocate mon_wdev fail\n", __func__,
- padapter->pnetdev->name);
ret = -ENOMEM;
goto out;
}
@@ -2850,9 +2843,9 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy,
{
const u8 *mac = params->mac;
int ret = 0;
- struct list_head *phead, *plist, *ptmp;
+ struct list_head *phead;
u8 updated = 0;
- struct sta_info *psta;
+ struct sta_info *psta, *ptmp;
struct rtw_adapter *padapter = netdev_priv(ndev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -2881,13 +2874,9 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy,
return -EINVAL;
spin_lock_bh(&pstapriv->asoc_list_lock);
-
phead = &pstapriv->asoc_list;
-
/* check asoc_queue */
- list_for_each_safe(plist, ptmp, phead) {
- psta = container_of(plist, struct sta_info, asoc_list);
-
+ list_for_each_entry_safe(psta, ptmp, phead, asoc_list) {
if (ether_addr_equal(mac, psta->hwaddr)) {
if (psta->dot8021xalg == 1 &&
psta->bpairwise_key_installed == false) {
@@ -2912,7 +2901,6 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy,
}
}
}
-
spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update23a(padapter, updated);
@@ -3272,7 +3260,6 @@ int rtw_wdev_alloc(struct rtw_adapter *padapter, struct device *dev)
/* wdev */
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev) {
- DBG_8723A("Couldn't allocate wireless device\n");
ret = -ENOMEM;
goto free_wiphy;
}
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index 0cdaef0a8c24..cf4a50618670 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -210,22 +210,21 @@ exit:
void rtl8723au_write_port_cancel(struct rtw_adapter *padapter)
{
struct xmit_buf *pxmitbuf;
- struct list_head *plist;
int j;
DBG_8723A("%s\n", __func__);
padapter->bWritePortCancel = true;
- list_for_each(plist, &padapter->xmitpriv.xmitbuf_list) {
- pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitbuf_list,
+ list2) {
for (j = 0; j < 8; j++) {
if (pxmitbuf->pxmit_urb[j])
usb_kill_urb(pxmitbuf->pxmit_urb[j]);
}
}
- list_for_each(plist, &padapter->xmitpriv.xmitextbuf_list) {
- pxmitbuf = container_of(plist, struct xmit_buf, list2);
+ list_for_each_entry(pxmitbuf, &padapter->xmitpriv.xmitextbuf_list,
+ list2) {
for (j = 0; j < 8; j++) {
if (pxmitbuf->pxmit_urb[j])
usb_kill_urb(pxmitbuf->pxmit_urb[j]);
diff --git a/drivers/staging/rtl8723au/os_dep/xmit_linux.c b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
index 9a14074ecec0..64be72ac38ee 100644
--- a/drivers/staging/rtl8723au/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/xmit_linux.c
@@ -37,7 +37,7 @@ int rtw_os_xmit_resource_alloc23a(struct rtw_adapter *padapter,
for (i = 0; i < 8; i++) {
pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
- if (pxmitbuf->pxmit_urb[i] == NULL) {
+ if (!pxmitbuf->pxmit_urb[i]) {
DBG_8723A("pxmitbuf->pxmit_urb[i]==NULL");
return _FAIL;
}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index cdaa1aba50ed..a780185a3754 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -30,14 +30,14 @@
static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
ms_card->err_code = err_code;
}
static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
return (ms_card->err_code == err_code);
}
@@ -51,7 +51,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip)
static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
u8 tpc, u8 cnt, u8 cfg)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 *ptr;
@@ -185,7 +185,7 @@ static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode,
static int ms_write_bytes(struct rtsx_chip *chip,
u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
if (!data || (data_len < cnt)) {
@@ -255,7 +255,7 @@ static int ms_write_bytes(struct rtsx_chip *chip,
static int ms_read_bytes(struct rtsx_chip *chip,
u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 *ptr;
@@ -369,7 +369,7 @@ static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg)
static int ms_set_init_para(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_HG8BIT(ms_card)) {
@@ -408,7 +408,7 @@ static int ms_set_init_para(struct rtsx_chip *chip)
static int ms_switch_clock(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
retval = select_card(chip, MS_CARD);
@@ -542,7 +542,7 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
static int ms_prepare_reset(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 oc_mask = 0;
@@ -653,7 +653,7 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val;
@@ -829,7 +829,7 @@ static int ms_switch_parallel_bus(struct rtsx_chip *chip)
static int ms_switch_8bit_bus(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 data[2];
@@ -873,7 +873,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
for (i = 0; i < 3; i++) {
@@ -994,7 +994,7 @@ static int msxc_change_power(struct rtsx_chip *chip, u8 mode)
static int ms_read_attribute_info(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, *buf, class_code, device_type, sub_class, data[16];
u16 total_blk = 0, blk_size = 0;
@@ -1039,7 +1039,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
}
buf = kmalloc(64 * 512, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return STATUS_ERROR;
}
@@ -1303,7 +1303,7 @@ static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
static int reset_ms_pro(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
#ifdef XC_POWERCLASS
u8 change_power_class;
@@ -1421,7 +1421,7 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
static int ms_read_extra_data(struct rtsx_chip *chip,
u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[10];
@@ -1516,7 +1516,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
static int ms_write_extra_data(struct rtsx_chip *chip,
u16 block_addr, u8 page_num, u8 *buf, int buf_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 val, data[16];
@@ -1585,7 +1585,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 val, data[6];
@@ -1670,7 +1670,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
u8 val, data[8], extra[MS_EXTRA_SIZE];
@@ -1741,7 +1741,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i = 0;
u8 val, data[6];
@@ -1862,7 +1862,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 start_page, u8 end_page)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
bool uncorrect_flag = false;
int retval, rty_cnt;
u8 extra[MS_EXTRA_SIZE], val, i, j, data[16];
@@ -2155,7 +2155,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
static int reset_ms(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
u16 i, reg_addr, block_size;
u8 val, extra[MS_EXTRA_SIZE], j, *ptr;
@@ -2394,7 +2394,7 @@ RE_SEARCH:
static int ms_init_l2p_tbl(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int size, i, seg_no, retval;
u16 defect_block, reg_addr;
u8 val1, val2;
@@ -2405,7 +2405,7 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip)
size = ms_card->segment_cnt * sizeof(struct zone_entry);
ms_card->segment = vzalloc(size);
- if (ms_card->segment == NULL) {
+ if (!ms_card->segment) {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2457,20 +2457,18 @@ static int ms_init_l2p_tbl(struct rtsx_chip *chip)
return STATUS_SUCCESS;
INIT_FAIL:
- if (ms_card->segment) {
- vfree(ms_card->segment);
- ms_card->segment = NULL;
- }
+ vfree(ms_card->segment);
+ ms_card->segment = NULL;
return STATUS_FAIL;
}
static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
- if (ms_card->segment == NULL)
+ if (!ms_card->segment)
return 0xFFFF;
segment = &(ms_card->segment[seg_no]);
@@ -2484,10 +2482,10 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
static void ms_set_l2p_tbl(struct rtsx_chip *chip,
int seg_no, u16 log_off, u16 phy_blk)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
- if (ms_card->segment == NULL)
+ if (!ms_card->segment)
return;
segment = &(ms_card->segment[seg_no]);
@@ -2497,7 +2495,7 @@ static void ms_set_l2p_tbl(struct rtsx_chip *chip,
static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
int seg_no;
@@ -2513,7 +2511,7 @@ static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk)
static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
u16 phy_blk;
@@ -2540,7 +2538,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
u16 log_off, u8 us1, u8 us2)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
int seg_no;
u16 tmp_blk;
@@ -2582,7 +2580,7 @@ static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct zone_entry *segment;
bool defect_flag;
int retval, table_size, disable_cnt, i;
@@ -2591,7 +2589,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
dev_dbg(rtsx_dev(chip), "ms_build_l2p_tbl: %d\n", seg_no);
- if (ms_card->segment == NULL) {
+ if (!ms_card->segment) {
retval = ms_init_l2p_tbl(chip);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
@@ -2612,18 +2610,18 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
segment = &(ms_card->segment[seg_no]);
- if (segment->l2p_table == NULL) {
+ if (!segment->l2p_table) {
segment->l2p_table = vmalloc(table_size * 2);
- if (segment->l2p_table == NULL) {
+ if (!segment->l2p_table) {
rtsx_trace(chip);
goto BUILD_FAIL;
}
}
memset((u8 *)(segment->l2p_table), 0xff, table_size * 2);
- if (segment->free_table == NULL) {
+ if (!segment->free_table) {
segment->free_table = vmalloc(MS_FREE_TABLE_CNT * 2);
- if (segment->free_table == NULL) {
+ if (!segment->free_table) {
rtsx_trace(chip);
goto BUILD_FAIL;
}
@@ -2803,14 +2801,10 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
BUILD_FAIL:
segment->build_flag = 0;
- if (segment->l2p_table) {
- vfree(segment->l2p_table);
- segment->l2p_table = NULL;
- }
- if (segment->free_table) {
- vfree(segment->free_table);
- segment->free_table = NULL;
- }
+ vfree(segment->l2p_table);
+ segment->l2p_table = NULL;
+ vfree(segment->free_table);
+ segment->free_table = NULL;
return STATUS_FAIL;
}
@@ -2818,7 +2812,7 @@ BUILD_FAIL:
int reset_ms_card(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
memset(ms_card, 0, sizeof(struct ms_info));
@@ -2905,7 +2899,7 @@ static int mspro_set_rw_cmd(struct rtsx_chip *chip,
void mspro_stop_seq_mode(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
if (ms_card->seq_mode) {
@@ -2923,7 +2917,7 @@ void mspro_stop_seq_mode(struct rtsx_chip *chip)
static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
if (chip->asic_code) {
@@ -2949,7 +2943,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
struct rtsx_chip *chip, u32 start_sector,
u16 sector_cnt)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
bool mode_2k = false;
int retval;
u16 count;
@@ -3092,7 +3086,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
static int mspro_read_format_progress(struct rtsx_chip *chip,
const int short_data_len)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u32 total_progress, cur_progress;
u8 cnt, tmp;
@@ -3214,7 +3208,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
void mspro_polling_format_status(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int i;
if (ms_card->pro_under_formatting &&
@@ -3232,7 +3226,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
int short_data_len, bool quick_format)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 buf[8], tmp;
u16 para;
@@ -3324,7 +3318,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
u8 *buf, unsigned int *index,
unsigned int *offset)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6];
u8 *ptr;
@@ -3508,7 +3502,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
u8 end_page, u8 *buf, unsigned int *index,
unsigned int *offset)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, i;
u8 page_addr, val, data[16];
u8 *ptr;
@@ -3729,7 +3723,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
u16 log_blk, u8 page_off)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval, seg_no;
retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
@@ -3775,7 +3769,7 @@ static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
#ifdef MS_DELAY_WRITE
int ms_delay_write(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
struct ms_delay_write_tag *delay_write = &(ms_card->delay_write);
int retval;
@@ -3814,7 +3808,7 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 start_sector, u16 sector_cnt)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
unsigned int lun = SCSI_LUN(srb);
int retval, seg_no;
unsigned int index = 0, offset = 0;
@@ -4075,7 +4069,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
u32 start_sector, u16 sector_cnt)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
if (CHK_MSPRO(ms_card))
@@ -4091,19 +4085,15 @@ int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
void ms_free_l2p_tbl(struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int i = 0;
if (ms_card->segment != NULL) {
for (i = 0; i < ms_card->segment_cnt; i++) {
- if (ms_card->segment[i].l2p_table != NULL) {
- vfree(ms_card->segment[i].l2p_table);
- ms_card->segment[i].l2p_table = NULL;
- }
- if (ms_card->segment[i].free_table != NULL) {
- vfree(ms_card->segment[i].free_table);
- ms_card->segment[i].free_table = NULL;
- }
+ vfree(ms_card->segment[i].l2p_table);
+ ms_card->segment[i].l2p_table = NULL;
+ vfree(ms_card->segment[i].free_table);
+ ms_card->segment[i].free_table = NULL;
}
vfree(ms_card->segment);
ms_card->segment = NULL;
@@ -4351,7 +4341,7 @@ GetEKBFinish:
int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
int i;
@@ -4435,7 +4425,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
@@ -4495,7 +4485,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
int i;
int bufflen;
@@ -4547,7 +4537,7 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
unsigned int lun = SCSI_LUN(srb);
@@ -4604,7 +4594,7 @@ GetICVFinish:
int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- struct ms_info *ms_card = &(chip->ms_card);
+ struct ms_info *ms_card = &chip->ms_card;
int retval;
int bufflen;
#ifdef MG_SET_ICV_SLOW
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 1fe8e3e0a3fb..25d095a5ade7 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -320,7 +320,6 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
rtsx_do_before_power_down(chip, PM_S3);
if (dev->irq >= 0) {
- synchronize_irq(dev->irq);
free_irq(dev->irq, (void *)dev);
dev->irq = -1;
}
@@ -398,7 +397,6 @@ static void rtsx_shutdown(struct pci_dev *pci)
rtsx_do_before_power_down(chip, PM_S1);
if (dev->irq >= 0) {
- synchronize_irq(dev->irq);
free_irq(dev->irq, (void *)dev);
dev->irq = -1;
}
@@ -658,9 +656,6 @@ static void rtsx_release_resources(struct rtsx_dev *dev)
if (dev->remap_addr)
iounmap(dev->remap_addr);
- pci_disable_device(dev->pci);
- pci_release_regions(dev->pci);
-
rtsx_release_chip(dev->chip);
kfree(dev->chip);
}
@@ -715,7 +710,7 @@ static void release_everything(struct rtsx_dev *dev)
/* Thread to carry out delayed SCSI-device scanning */
static int rtsx_scan_thread(void *__dev)
{
- struct rtsx_dev *dev = (struct rtsx_dev *)__dev;
+ struct rtsx_dev *dev = __dev;
struct rtsx_chip *chip = dev->chip;
/* Wait for the timeout to expire or for a disconnect */
@@ -852,7 +847,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n");
- err = pci_enable_device(pci);
+ err = pcim_enable_device(pci);
if (err < 0) {
dev_err(&pci->dev, "PCI enable device failed!\n");
return err;
@@ -862,7 +857,6 @@ static int rtsx_probe(struct pci_dev *pci,
if (err < 0) {
dev_err(&pci->dev, "PCI request regions for %s failed!\n",
CR_DRIVER_NAME);
- pci_disable_device(pci);
return err;
}
@@ -873,8 +867,6 @@ static int rtsx_probe(struct pci_dev *pci,
host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
if (!host) {
dev_err(&pci->dev, "Unable to allocate the scsi host\n");
- pci_release_regions(pci);
- pci_disable_device(pci);
return -ENOMEM;
}
@@ -882,7 +874,7 @@ static int rtsx_probe(struct pci_dev *pci,
memset(dev, 0, sizeof(struct rtsx_dev));
dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
- if (dev->chip == NULL) {
+ if (!dev->chip) {
err = -ENOMEM;
goto errout;
}
@@ -903,7 +895,7 @@ static int rtsx_probe(struct pci_dev *pci,
(unsigned int)pci_resource_len(pci, 0));
dev->addr = pci_resource_start(pci, 0);
dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
- if (dev->remap_addr == NULL) {
+ if (!dev->remap_addr) {
dev_err(&pci->dev, "ioremap error\n");
err = -ENXIO;
goto errout;
@@ -918,7 +910,7 @@ static int rtsx_probe(struct pci_dev *pci,
dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
&dev->rtsx_resv_buf_addr, GFP_KERNEL);
- if (dev->rtsx_resv_buf == NULL) {
+ if (!dev->rtsx_resv_buf) {
dev_err(&pci->dev, "alloc dma buffer fail\n");
err = -ENXIO;
goto errout;
@@ -1011,8 +1003,6 @@ static void rtsx_remove(struct pci_dev *pci)
quiesce_and_remove_host(dev);
release_everything(dev);
-
- pci_set_drvdata(pci, NULL);
}
/* PCI IDs */
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 60871f3022b1..d2031044ea34 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -507,7 +507,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
buf = vmalloc(scsi_bufflen(srb));
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -644,7 +644,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
buf = vmalloc(scsi_bufflen(srb));
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -792,7 +792,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
buf = kmalloc(dataSize, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1017,7 +1017,7 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf_len = (scsi_bufflen(srb) > 12) ? 0x14 : 12;
buf = kmalloc(buf_len, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1096,7 +1096,7 @@ static int read_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
buf = kmalloc(8, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1206,7 +1206,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb),
len);
buf = vmalloc(len);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1315,7 +1315,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1410,7 +1410,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
clear = srb->cmnd[2];
buf = vmalloc(scsi_bufflen(srb));
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -1931,20 +1931,15 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
static int suit_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
- int result;
-
switch (srb->cmnd[3]) {
case INIT_BATCHCMD:
case ADD_BATCHCMD:
case SEND_BATCHCMD:
case GET_BATCHRSP:
- result = rw_mem_cmd_buf(srb, chip);
- break;
+ return rw_mem_cmd_buf(srb, chip);
default:
- result = TRANSPORT_ERROR;
+ return TRANSPORT_ERROR;
}
-
- return result;
}
static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
@@ -2035,7 +2030,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len);
buf = vmalloc(len);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2191,7 +2186,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (unsigned short)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -2295,7 +2290,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
len = (u8)min_t(unsigned int, scsi_bufflen(srb), len);
buf = vmalloc(len);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index f27491e802ed..4d8e7c5c26d5 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -1,4 +1,5 @@
-/* Driver for Realtek PCI-Express card reader
+/*
+ * Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
@@ -30,74 +31,76 @@
* Scatter-gather transfer buffer access routines
***********************************************************************/
-/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+/*
+ * Copy a buffer of length buflen to/from the srb's transfer buffer.
* (Note: for scatter-gather transfers (srb->use_sg > 0), srb->request_buffer
* points to a list of s-g entries and we ignore srb->request_bufflen.
* For non-scatter-gather transfers, srb->request_buffer points to the
* transfer buffer itself and srb->request_bufflen is the buffer's length.)
* Update the *index and *offset variables so that the next copy will
- * pick up from where this one left off. */
+ * pick up from where this one left off.
+ */
unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
- unsigned int *offset, enum xfer_buf_dir dir)
+ unsigned int buflen,
+ struct scsi_cmnd *srb,
+ unsigned int *index,
+ unsigned int *offset,
+ enum xfer_buf_dir dir)
{
unsigned int cnt;
- /* If not using scatter-gather, just transfer the data directly.
- * Make certain it will fit in the available buffer space. */
+ /* If not using scatter-gather, just transfer the data directly. */
if (scsi_sg_count(srb) == 0) {
+ unsigned char *sgbuffer;
+
if (*offset >= scsi_bufflen(srb))
return 0;
cnt = min(buflen, scsi_bufflen(srb) - *offset);
+
+ sgbuffer = (unsigned char *)scsi_sglist(srb) + *offset;
+
if (dir == TO_XFER_BUF)
- memcpy((unsigned char *) scsi_sglist(srb) + *offset,
- buffer, cnt);
+ memcpy(sgbuffer, buffer, cnt);
else
- memcpy(buffer, (unsigned char *) scsi_sglist(srb) +
- *offset, cnt);
+ memcpy(buffer, sgbuffer, cnt);
*offset += cnt;
- /* Using scatter-gather. We have to go through the list one entry
+ /*
+ * Using scatter-gather. We have to go through the list one entry
* at a time. Each s-g entry contains some number of pages, and
- * each page has to be kmap()'ed separately. If the page is already
- * in kernel-addressable memory then kmap() will return its address.
- * If the page is not directly accessible -- such as a user buffer
- * located in high memory -- then kmap() will map it to a temporary
- * position in the kernel's virtual address space. */
+ * each page has to be kmap()'ed separately.
+ */
} else {
struct scatterlist *sg =
- (struct scatterlist *) scsi_sglist(srb)
+ (struct scatterlist *)scsi_sglist(srb)
+ *index;
- /* This loop handles a single s-g list entry, which may
+ /*
+ * This loop handles a single s-g list entry, which may
* include multiple pages. Find the initial page structure
* and the starting offset within the page, and update
- * the *offset and *index values for the next loop. */
+ * the *offset and *index values for the next loop.
+ */
cnt = 0;
while (cnt < buflen && *index < scsi_sg_count(srb)) {
struct page *page = sg_page(sg) +
((sg->offset + *offset) >> PAGE_SHIFT);
- unsigned int poff =
- (sg->offset + *offset) & (PAGE_SIZE-1);
+ unsigned int poff = (sg->offset + *offset) &
+ (PAGE_SIZE - 1);
unsigned int sglen = sg->length - *offset;
if (sglen > buflen - cnt) {
-
/* Transfer ends within this s-g entry */
sglen = buflen - cnt;
*offset += sglen;
} else {
-
/* Transfer continues to next s-g entry */
*offset = 0;
++*index;
++sg;
}
- /* Transfer the data for all the pages in this
- * s-g entry. For each page: call kmap(), do the
- * transfer, and call kunmap() immediately after. */
while (sglen > 0) {
unsigned int plen = min(sglen, (unsigned int)
PAGE_SIZE - poff);
@@ -122,10 +125,12 @@ unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
return cnt;
}
-/* Store the contents of buffer into srb's transfer buffer and set the
-* SCSI residue. */
+/*
+ * Store the contents of buffer into srb's transfer buffer and set the
+ * SCSI residue.
+ */
void rtsx_stor_set_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb)
+ unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
@@ -136,7 +141,7 @@ void rtsx_stor_set_xfer_buf(unsigned char *buffer,
}
void rtsx_stor_get_xfer_buf(unsigned char *buffer,
- unsigned int buflen, struct scsi_cmnd *srb)
+ unsigned int buflen, struct scsi_cmnd *srb)
{
unsigned int index = 0, offset = 0;
@@ -146,12 +151,12 @@ void rtsx_stor_get_xfer_buf(unsigned char *buffer,
scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
}
-
/***********************************************************************
* Transport routines
***********************************************************************/
-/* Invoke the transport and basic error-handling/recovery methods
+/*
+ * Invoke the transport and basic error-handling/recovery methods
*
* This is used to send the message to the device and receive the response.
*/
@@ -161,20 +166,21 @@ void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
result = rtsx_scsi_handler(srb, chip);
- /* if the command gets aborted by the higher layers, we need to
- * short-circuit all other processing
+ /*
+ * if the command gets aborted by the higher layers, we need to
+ * short-circuit all other processing.
*/
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
dev_dbg(rtsx_dev(chip), "-- command was aborted\n");
srb->result = DID_ABORT << 16;
- goto Handle_Errors;
+ goto handle_errors;
}
/* if there is a transport error, reset and don't auto-sense */
if (result == TRANSPORT_ERROR) {
dev_dbg(rtsx_dev(chip), "-- transport indicates error, resetting\n");
srb->result = DID_ERROR << 16;
- goto Handle_Errors;
+ goto handle_errors;
}
srb->result = SAM_STAT_GOOD;
@@ -188,21 +194,18 @@ void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* set the result so the higher layers expect this data */
srb->result = SAM_STAT_CHECK_CONDITION;
memcpy(srb->sense_buffer,
- (unsigned char *)&(chip->sense_buffer[SCSI_LUN(srb)]),
- sizeof(struct sense_data_t));
+ (unsigned char *)&chip->sense_buffer[SCSI_LUN(srb)],
+ sizeof(struct sense_data_t));
}
return;
- /* Error and abort processing: try to resynchronize with the device
- * by issuing a port reset. If that fails, try a class-specific
- * device reset. */
-Handle_Errors:
+handle_errors:
return;
}
void rtsx_add_cmd(struct rtsx_chip *chip,
- u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
{
u32 *cb = (u32 *)(chip->host_cmds_ptr);
u32 val = 0;
@@ -221,7 +224,7 @@ void rtsx_add_cmd(struct rtsx_chip *chip,
void rtsx_send_cmd_no_wait(struct rtsx_chip *chip)
{
- u32 val = 1 << 31;
+ u32 val = BIT(31);
rtsx_writel(chip, RTSX_HCBAR, chip->host_cmds_addr);
@@ -235,7 +238,7 @@ int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
- u32 val = 1 << 31;
+ u32 val = BIT(31);
long timeleft;
int err = 0;
@@ -321,9 +324,11 @@ static inline void rtsx_add_sg_tbl(
}
static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
- struct scatterlist *sg, int num_sg, unsigned int *index,
- unsigned int *offset, int size,
- enum dma_data_direction dma_dir, int timeout)
+ struct scatterlist *sg, int num_sg,
+ unsigned int *index,
+ unsigned int *offset, int size,
+ enum dma_data_direction dma_dir,
+ int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
@@ -334,7 +339,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg_ptr;
u32 val = TRIG_DMA;
- if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
+ if (!sg || (num_sg <= 0) || !offset || !index)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
@@ -363,15 +368,16 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
- sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+ sg_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
resid = size;
sg_ptr = sg;
chip->sgi = 0;
- /* Usually the next entry will be @sg@ + 1, but if this sg element
+ /*
+ * Usually the next entry will be @sg@ + 1, but if this sg element
* is part of a chained scatterlist, it could jump to the start of
* a new scatterlist array. So here we use sg_next to move to
- * the proper sg
+ * the proper sg.
*/
for (i = 0; i < *index; i++)
sg_ptr = sg_next(sg_ptr);
@@ -476,7 +482,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
- dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+ dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
@@ -485,8 +491,9 @@ out:
}
static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
- struct scatterlist *sg, int num_sg,
- enum dma_data_direction dma_dir, int timeout)
+ struct scatterlist *sg, int num_sg,
+ enum dma_data_direction dma_dir,
+ int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
@@ -496,7 +503,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
long timeleft;
struct scatterlist *sg_ptr;
- if ((sg == NULL) || (num_sg <= 0))
+ if (!sg || (num_sg <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
@@ -525,7 +532,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
spin_unlock_irq(&rtsx->reg_lock);
- buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+ buf_cnt = dma_map_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
sg_ptr = sg;
@@ -623,7 +630,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
- dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
+ dma_unmap_sg(&rtsx->pci->dev, sg, num_sg, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
@@ -632,17 +639,18 @@ out:
}
static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
- size_t len, enum dma_data_direction dma_dir, int timeout)
+ size_t len, enum dma_data_direction dma_dir,
+ int timeout)
{
struct rtsx_dev *rtsx = chip->rtsx;
struct completion trans_done;
dma_addr_t addr;
u8 dir;
int err = 0;
- u32 val = 1 << 31;
+ u32 val = BIT(31);
long timeleft;
- if ((buf == NULL) || (len <= 0))
+ if (!buf || (len <= 0))
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
@@ -652,8 +660,8 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
else
return -ENXIO;
- addr = dma_map_single(&(rtsx->pci->dev), buf, len, dma_dir);
- if (!addr)
+ addr = dma_map_single(&rtsx->pci->dev, buf, len, dma_dir);
+ if (dma_mapping_error(&rtsx->pci->dev, addr))
return -ENOMEM;
if (card == SD_CARD)
@@ -706,7 +714,7 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
out:
rtsx->done = NULL;
rtsx->trans_state = STATE_TRANS_NONE;
- dma_unmap_single(&(rtsx->pci->dev), addr, len, dma_dir);
+ dma_unmap_single(&rtsx->pci->dev, addr, len, dma_dir);
if (err < 0)
rtsx_stop_cmd(chip, card);
@@ -715,9 +723,9 @@ out:
}
int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
- void *buf, size_t len, int use_sg, unsigned int *index,
- unsigned int *offset, enum dma_data_direction dma_dir,
- int timeout)
+ void *buf, size_t len, int use_sg,
+ unsigned int *index, unsigned int *offset,
+ enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
@@ -725,13 +733,16 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
if (rtsx_chk_stat(chip, RTSX_STAT_ABORT))
return -EIO;
- if (use_sg)
- err = rtsx_transfer_sglist_adma_partial(chip, card,
- (struct scatterlist *)buf, use_sg,
- index, offset, (int)len, dma_dir, timeout);
- else
+ if (use_sg) {
+ struct scatterlist *sg = buf;
+
+ err = rtsx_transfer_sglist_adma_partial(chip, card, sg, use_sg,
+ index, offset, (int)len,
+ dma_dir, timeout);
+ } else {
err = rtsx_transfer_buf(chip, card,
buf, len, dma_dir, timeout);
+ }
if (err < 0) {
if (RTSX_TST_DELINK(chip)) {
RTSX_CLR_DELINK(chip);
@@ -744,7 +755,7 @@ int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
}
int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
- int use_sg, enum dma_data_direction dma_dir, int timeout)
+ int use_sg, enum dma_data_direction dma_dir, int timeout)
{
int err = 0;
@@ -756,8 +767,8 @@ int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
if (use_sg) {
err = rtsx_transfer_sglist_adma(chip, card,
- (struct scatterlist *)buf,
- use_sg, dma_dir, timeout);
+ (struct scatterlist *)buf,
+ use_sg, dma_dir, timeout);
} else {
err = rtsx_transfer_buf(chip, card, buf, len, dma_dir, timeout);
}
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index d6c498209b2c..87d697623cba 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -303,7 +303,7 @@ static int sd_read_data(struct rtsx_chip *chip,
if (cmd_len) {
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40);
- for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++)
+ for (i = 0; i < (min(cmd_len, 6)); i++)
rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0 + i,
0xFF, cmd[i]);
}
@@ -383,7 +383,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
if (cmd_len) {
dev_dbg(rtsx_dev(chip), "SD/MMC CMD %d\n", cmd[0] - 0x40);
- for (i = 0; i < (cmd_len < 6 ? cmd_len : 6); i++) {
+ for (i = 0; i < (min(cmd_len, 6)); i++) {
rtsx_add_cmd(chip, WRITE_REG_CMD,
REG_SD_CMD0 + i, 0xFF, cmd[i]);
}
@@ -4260,10 +4260,10 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
- (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
- (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
- (0x64 != srb->cmnd[8])) {
+ if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -4284,7 +4284,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- buf[5] = (1 == CHK_SD(sd_card)) ? 0x01 : 0x02;
+ buf[5] = (CHK_SD(sd_card) == 1) ? 0x01 : 0x02;
if (chip->card_wp & SD_CARD)
buf[5] |= 0x80;
@@ -4588,7 +4588,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
cmd[4] = srb->cmnd[6];
buf = kmalloc(data_len, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -4871,7 +4871,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
u8 *buf;
buf = kmalloc(data_len, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return TRANSPORT_ERROR;
}
@@ -5176,10 +5176,10 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((0x53 != srb->cmnd[2]) || (0x44 != srb->cmnd[3]) ||
- (0x20 != srb->cmnd[4]) || (0x43 != srb->cmnd[5]) ||
- (0x61 != srb->cmnd[6]) || (0x72 != srb->cmnd[7]) ||
- (0x64 != srb->cmnd[8])) {
+ if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
+ (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+ (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+ (srb->cmnd[8] != 0x64)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
rtsx_trace(chip);
return TRANSPORT_FAILED;
@@ -5188,7 +5188,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (srb->cmnd[1] & 0x0F) {
case 0:
#ifdef SUPPORT_SD_LOCK
- if (0x64 == srb->cmnd[9])
+ if (srb->cmnd[9] == 0x64)
sd_card->sd_lock_status |= SD_SDR_RST;
#endif
retval = reset_sd_card(chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index e67e7ecc2cbd..26eb2a184f91 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -420,7 +420,6 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
return STATUS_SUCCESS;
}
-
int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
{
int retval;
@@ -516,7 +515,6 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
return STATUS_SUCCESS;
}
-
int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
{
struct spi_info *spi = &(chip->spi);
@@ -664,7 +662,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
buf = kmalloc(SF_PAGE_LEN, GFP_KERNEL);
- if (buf == NULL) {
+ if (!buf) {
rtsx_trace(chip);
return STATUS_ERROR;
}
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 10fea7bb8f30..fc1dfe0991d4 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -903,14 +903,10 @@ static inline void free_zone(struct zone_entry *zone)
zone->set_index = 0;
zone->get_index = 0;
zone->unused_blk_cnt = 0;
- if (zone->l2p_table) {
- vfree(zone->l2p_table);
- zone->l2p_table = NULL;
- }
- if (zone->free_table) {
- vfree(zone->free_table);
- zone->free_table = NULL;
- }
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ vfree(zone->free_table);
+ zone->free_table = NULL;
}
static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
@@ -1435,7 +1431,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
if (zone->l2p_table == NULL) {
zone->l2p_table = vmalloc(2000);
- if (zone->l2p_table == NULL) {
+ if (!zone->l2p_table) {
rtsx_trace(chip);
goto Build_Fail;
}
@@ -1444,7 +1440,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
if (zone->free_table == NULL) {
zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
- if (zone->free_table == NULL) {
+ if (!zone->free_table) {
rtsx_trace(chip);
goto Build_Fail;
}
@@ -1588,14 +1584,10 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
return STATUS_SUCCESS;
Build_Fail:
- if (zone->l2p_table) {
- vfree(zone->l2p_table);
- zone->l2p_table = NULL;
- }
- if (zone->free_table) {
- vfree(zone->free_table);
- zone->free_table = NULL;
- }
+ vfree(zone->l2p_table);
+ zone->l2p_table = NULL;
+ vfree(zone->free_table);
+ zone->free_table = NULL;
return STATUS_FAIL;
}
@@ -2251,14 +2243,10 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip)
if (xd_card->zone != NULL) {
for (i = 0; i < xd_card->zone_cnt; i++) {
- if (xd_card->zone[i].l2p_table != NULL) {
- vfree(xd_card->zone[i].l2p_table);
- xd_card->zone[i].l2p_table = NULL;
- }
- if (xd_card->zone[i].free_table != NULL) {
- vfree(xd_card->zone[i].free_table);
- xd_card->zone[i].free_table = NULL;
- }
+ vfree(xd_card->zone[i].l2p_table);
+ xd_card->zone[i].l2p_table = NULL;
+ vfree(xd_card->zone[i].free_table);
+ xd_card->zone[i].free_table = NULL;
}
vfree(xd_card->zone);
xd_card->zone = NULL;
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index bd1e15caae4e..e19ac4368651 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -1,3 +1,4 @@
+#include <linux/bitops.h>
#include "threefish_api.h"
void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
@@ -13,479 +14,479 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
b1 += k1 + t0;
b0 += b1 + k0;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k3;
b2 += b3 + k2 + t1;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k2 + t1;
b0 += b1 + k1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k4 + 1;
b2 += b3 + k3 + t2;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k3 + t2;
b0 += b1 + k2;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k0 + 2;
b2 += b3 + k4 + t0;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k4 + t0;
b0 += b1 + k3;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k1 + 3;
b2 += b3 + k0 + t1;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k0 + t1;
b0 += b1 + k4;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k2 + 4;
b2 += b3 + k1 + t2;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k1 + t2;
b0 += b1 + k0;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k3 + 5;
b2 += b3 + k2 + t0;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k2 + t0;
b0 += b1 + k1;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k4 + 6;
b2 += b3 + k3 + t1;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k3 + t1;
b0 += b1 + k2;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k0 + 7;
b2 += b3 + k4 + t2;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k4 + t2;
b0 += b1 + k3;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k1 + 8;
b2 += b3 + k0 + t0;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k0 + t0;
b0 += b1 + k4;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k2 + 9;
b2 += b3 + k1 + t1;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k1 + t1;
b0 += b1 + k0;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k3 + 10;
b2 += b3 + k2 + t2;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k2 + t2;
b0 += b1 + k1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k4 + 11;
b2 += b3 + k3 + t0;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k3 + t0;
b0 += b1 + k2;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k0 + 12;
b2 += b3 + k4 + t1;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k4 + t1;
b0 += b1 + k3;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k1 + 13;
b2 += b3 + k0 + t2;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k0 + t2;
b0 += b1 + k4;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k2 + 14;
b2 += b3 + k1 + t0;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k1 + t0;
b0 += b1 + k0;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k3 + 15;
b2 += b3 + k2 + t1;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
b1 += k2 + t1;
b0 += b1 + k1;
- b1 = ((b1 << 14) | (b1 >> (64 - 14))) ^ b0;
+ b1 = rol64(b1, 14) ^ b0;
b3 += k4 + 16;
b2 += b3 + k3 + t2;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b2;
+ b3 = rol64(b3, 16) ^ b2;
b0 += b3;
- b3 = ((b3 << 52) | (b3 >> (64 - 52))) ^ b0;
+ b3 = rol64(b3, 52) ^ b0;
b2 += b1;
- b1 = ((b1 << 57) | (b1 >> (64 - 57))) ^ b2;
+ b1 = rol64(b1, 57) ^ b2;
b0 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b0;
+ b1 = rol64(b1, 23) ^ b0;
b2 += b3;
- b3 = ((b3 << 40) | (b3 >> (64 - 40))) ^ b2;
+ b3 = rol64(b3, 40) ^ b2;
b0 += b3;
- b3 = ((b3 << 5) | (b3 >> (64 - 5))) ^ b0;
+ b3 = rol64(b3, 5) ^ b0;
b2 += b1;
- b1 = ((b1 << 37) | (b1 >> (64 - 37))) ^ b2;
+ b1 = rol64(b1, 37) ^ b2;
b1 += k3 + t2;
b0 += b1 + k2;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b0;
+ b1 = rol64(b1, 25) ^ b0;
b3 += k0 + 17;
b2 += b3 + k4 + t0;
- b3 = ((b3 << 33) | (b3 >> (64 - 33))) ^ b2;
+ b3 = rol64(b3, 33) ^ b2;
b0 += b3;
- b3 = ((b3 << 46) | (b3 >> (64 - 46))) ^ b0;
+ b3 = rol64(b3, 46) ^ b0;
b2 += b1;
- b1 = ((b1 << 12) | (b1 >> (64 - 12))) ^ b2;
+ b1 = rol64(b1, 12) ^ b2;
b0 += b1;
- b1 = ((b1 << 58) | (b1 >> (64 - 58))) ^ b0;
+ b1 = rol64(b1, 58) ^ b0;
b2 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b2;
+ b3 = rol64(b3, 22) ^ b2;
b0 += b3;
- b3 = ((b3 << 32) | (b3 >> (64 - 32))) ^ b0;
+ b3 = rol64(b3, 32) ^ b0;
b2 += b1;
- b1 = ((b1 << 32) | (b1 >> (64 - 32))) ^ b2;
+ b1 = rol64(b1, 32) ^ b2;
output[0] = b0 + k3;
output[1] = b1 + k4 + t0;
@@ -1153,939 +1154,939 @@ void threefish_encrypt_512(struct threefish_key *key_ctx, u64 *input,
b1 += k1;
b0 += b1 + k0;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k3;
b2 += b3 + k2;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k5 + t0;
b4 += b5 + k4;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k7;
b6 += b7 + k6 + t1;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k2;
b0 += b1 + k1;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k4;
b2 += b3 + k3;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k6 + t1;
b4 += b5 + k5;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k8 + 1;
b6 += b7 + k7 + t2;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k3;
b0 += b1 + k2;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k5;
b2 += b3 + k4;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k7 + t2;
b4 += b5 + k6;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k0 + 2;
b6 += b7 + k8 + t0;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k4;
b0 += b1 + k3;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k6;
b2 += b3 + k5;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k8 + t0;
b4 += b5 + k7;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k1 + 3;
b6 += b7 + k0 + t1;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k5;
b0 += b1 + k4;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k7;
b2 += b3 + k6;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k0 + t1;
b4 += b5 + k8;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k2 + 4;
b6 += b7 + k1 + t2;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k6;
b0 += b1 + k5;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k8;
b2 += b3 + k7;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k1 + t2;
b4 += b5 + k0;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k3 + 5;
b6 += b7 + k2 + t0;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k7;
b0 += b1 + k6;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k0;
b2 += b3 + k8;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k2 + t0;
b4 += b5 + k1;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k4 + 6;
b6 += b7 + k3 + t1;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k8;
b0 += b1 + k7;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k1;
b2 += b3 + k0;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k3 + t1;
b4 += b5 + k2;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k5 + 7;
b6 += b7 + k4 + t2;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k0;
b0 += b1 + k8;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k2;
b2 += b3 + k1;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k4 + t2;
b4 += b5 + k3;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k6 + 8;
b6 += b7 + k5 + t0;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k1;
b0 += b1 + k0;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k3;
b2 += b3 + k2;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k5 + t0;
b4 += b5 + k4;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k7 + 9;
b6 += b7 + k6 + t1;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k2;
b0 += b1 + k1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k4;
b2 += b3 + k3;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k6 + t1;
b4 += b5 + k5;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k8 + 10;
b6 += b7 + k7 + t2;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k3;
b0 += b1 + k2;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k5;
b2 += b3 + k4;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k7 + t2;
b4 += b5 + k6;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k0 + 11;
b6 += b7 + k8 + t0;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k4;
b0 += b1 + k3;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k6;
b2 += b3 + k5;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k8 + t0;
b4 += b5 + k7;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k1 + 12;
b6 += b7 + k0 + t1;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k5;
b0 += b1 + k4;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k7;
b2 += b3 + k6;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k0 + t1;
b4 += b5 + k8;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k2 + 13;
b6 += b7 + k1 + t2;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k6;
b0 += b1 + k5;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k8;
b2 += b3 + k7;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k1 + t2;
b4 += b5 + k0;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k3 + 14;
b6 += b7 + k2 + t0;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k7;
b0 += b1 + k6;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k0;
b2 += b3 + k8;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k2 + t0;
b4 += b5 + k1;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k4 + 15;
b6 += b7 + k3 + t1;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
b1 += k8;
b0 += b1 + k7;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b0;
+ b1 = rol64(b1, 46) ^ b0;
b3 += k1;
b2 += b3 + k0;
- b3 = ((b3 << 36) | (b3 >> (64 - 36))) ^ b2;
+ b3 = rol64(b3, 36) ^ b2;
b5 += k3 + t1;
b4 += b5 + k2;
- b5 = ((b5 << 19) | (b5 >> (64 - 19))) ^ b4;
+ b5 = rol64(b5, 19) ^ b4;
b7 += k5 + 16;
b6 += b7 + k4 + t2;
- b7 = ((b7 << 37) | (b7 >> (64 - 37))) ^ b6;
+ b7 = rol64(b7, 37) ^ b6;
b2 += b1;
- b1 = ((b1 << 33) | (b1 >> (64 - 33))) ^ b2;
+ b1 = rol64(b1, 33) ^ b2;
b4 += b7;
- b7 = ((b7 << 27) | (b7 >> (64 - 27))) ^ b4;
+ b7 = rol64(b7, 27) ^ b4;
b6 += b5;
- b5 = ((b5 << 14) | (b5 >> (64 - 14))) ^ b6;
+ b5 = rol64(b5, 14) ^ b6;
b0 += b3;
- b3 = ((b3 << 42) | (b3 >> (64 - 42))) ^ b0;
+ b3 = rol64(b3, 42) ^ b0;
b4 += b1;
- b1 = ((b1 << 17) | (b1 >> (64 - 17))) ^ b4;
+ b1 = rol64(b1, 17) ^ b4;
b6 += b3;
- b3 = ((b3 << 49) | (b3 >> (64 - 49))) ^ b6;
+ b3 = rol64(b3, 49) ^ b6;
b0 += b5;
- b5 = ((b5 << 36) | (b5 >> (64 - 36))) ^ b0;
+ b5 = rol64(b5, 36) ^ b0;
b2 += b7;
- b7 = ((b7 << 39) | (b7 >> (64 - 39))) ^ b2;
+ b7 = rol64(b7, 39) ^ b2;
b6 += b1;
- b1 = ((b1 << 44) | (b1 >> (64 - 44))) ^ b6;
+ b1 = rol64(b1, 44) ^ b6;
b0 += b7;
- b7 = ((b7 << 9) | (b7 >> (64 - 9))) ^ b0;
+ b7 = rol64(b7, 9) ^ b0;
b2 += b5;
- b5 = ((b5 << 54) | (b5 >> (64 - 54))) ^ b2;
+ b5 = rol64(b5, 54) ^ b2;
b4 += b3;
- b3 = ((b3 << 56) | (b3 >> (64 - 56))) ^ b4;
+ b3 = rol64(b3, 56) ^ b4;
b1 += k0;
b0 += b1 + k8;
- b1 = ((b1 << 39) | (b1 >> (64 - 39))) ^ b0;
+ b1 = rol64(b1, 39) ^ b0;
b3 += k2;
b2 += b3 + k1;
- b3 = ((b3 << 30) | (b3 >> (64 - 30))) ^ b2;
+ b3 = rol64(b3, 30) ^ b2;
b5 += k4 + t2;
b4 += b5 + k3;
- b5 = ((b5 << 34) | (b5 >> (64 - 34))) ^ b4;
+ b5 = rol64(b5, 34) ^ b4;
b7 += k6 + 17;
b6 += b7 + k5 + t0;
- b7 = ((b7 << 24) | (b7 >> (64 - 24))) ^ b6;
+ b7 = rol64(b7, 24) ^ b6;
b2 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b2;
+ b1 = rol64(b1, 13) ^ b2;
b4 += b7;
- b7 = ((b7 << 50) | (b7 >> (64 - 50))) ^ b4;
+ b7 = rol64(b7, 50) ^ b4;
b6 += b5;
- b5 = ((b5 << 10) | (b5 >> (64 - 10))) ^ b6;
+ b5 = rol64(b5, 10) ^ b6;
b0 += b3;
- b3 = ((b3 << 17) | (b3 >> (64 - 17))) ^ b0;
+ b3 = rol64(b3, 17) ^ b0;
b4 += b1;
- b1 = ((b1 << 25) | (b1 >> (64 - 25))) ^ b4;
+ b1 = rol64(b1, 25) ^ b4;
b6 += b3;
- b3 = ((b3 << 29) | (b3 >> (64 - 29))) ^ b6;
+ b3 = rol64(b3, 29) ^ b6;
b0 += b5;
- b5 = ((b5 << 39) | (b5 >> (64 - 39))) ^ b0;
+ b5 = rol64(b5, 39) ^ b0;
b2 += b7;
- b7 = ((b7 << 43) | (b7 >> (64 - 43))) ^ b2;
+ b7 = rol64(b7, 43) ^ b2;
b6 += b1;
- b1 = ((b1 << 8) | (b1 >> (64 - 8))) ^ b6;
+ b1 = rol64(b1, 8) ^ b6;
b0 += b7;
- b7 = ((b7 << 35) | (b7 >> (64 - 35))) ^ b0;
+ b7 = rol64(b7, 35) ^ b0;
b2 += b5;
- b5 = ((b5 << 56) | (b5 >> (64 - 56))) ^ b2;
+ b5 = rol64(b5, 56) ^ b2;
b4 += b3;
- b3 = ((b3 << 22) | (b3 >> (64 - 22))) ^ b4;
+ b3 = rol64(b3, 22) ^ b4;
output[0] = b0 + k0;
output[1] = b1 + k1;
@@ -3383,2083 +3384,2083 @@ void threefish_encrypt_1024(struct threefish_key *key_ctx, u64 *input,
b1 += k1;
b0 += b1 + k0;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k3;
b2 += b3 + k2;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k5;
b4 += b5 + k4;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k7;
b6 += b7 + k6;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k9;
b8 += b9 + k8;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k11;
b10 += b11 + k10;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k13 + t0;
b12 += b13 + k12;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k15;
b14 += b15 + k14 + t1;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k2;
b0 += b1 + k1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k4;
b2 += b3 + k3;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k6;
b4 += b5 + k5;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k8;
b6 += b7 + k7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k10;
b8 += b9 + k9;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k12;
b10 += b11 + k11;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k14 + t1;
b12 += b13 + k13;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k16 + 1;
b14 += b15 + k15 + t2;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k3;
b0 += b1 + k2;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k5;
b2 += b3 + k4;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k7;
b4 += b5 + k6;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k9;
b6 += b7 + k8;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k11;
b8 += b9 + k10;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k13;
b10 += b11 + k12;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k15 + t2;
b12 += b13 + k14;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k0 + 2;
b14 += b15 + k16 + t0;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k4;
b0 += b1 + k3;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k6;
b2 += b3 + k5;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k8;
b4 += b5 + k7;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k10;
b6 += b7 + k9;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k12;
b8 += b9 + k11;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k14;
b10 += b11 + k13;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k16 + t0;
b12 += b13 + k15;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k1 + 3;
b14 += b15 + k0 + t1;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k5;
b0 += b1 + k4;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k7;
b2 += b3 + k6;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k9;
b4 += b5 + k8;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k11;
b6 += b7 + k10;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k13;
b8 += b9 + k12;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k15;
b10 += b11 + k14;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k0 + t1;
b12 += b13 + k16;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k2 + 4;
b14 += b15 + k1 + t2;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k6;
b0 += b1 + k5;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k8;
b2 += b3 + k7;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k10;
b4 += b5 + k9;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k12;
b6 += b7 + k11;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k14;
b8 += b9 + k13;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k16;
b10 += b11 + k15;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k1 + t2;
b12 += b13 + k0;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k3 + 5;
b14 += b15 + k2 + t0;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k7;
b0 += b1 + k6;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k9;
b2 += b3 + k8;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k11;
b4 += b5 + k10;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k13;
b6 += b7 + k12;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k15;
b8 += b9 + k14;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k0;
b10 += b11 + k16;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k2 + t0;
b12 += b13 + k1;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k4 + 6;
b14 += b15 + k3 + t1;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k8;
b0 += b1 + k7;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k10;
b2 += b3 + k9;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k12;
b4 += b5 + k11;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k14;
b6 += b7 + k13;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k16;
b8 += b9 + k15;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k1;
b10 += b11 + k0;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k3 + t1;
b12 += b13 + k2;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k5 + 7;
b14 += b15 + k4 + t2;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k9;
b0 += b1 + k8;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k11;
b2 += b3 + k10;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k13;
b4 += b5 + k12;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k15;
b6 += b7 + k14;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k0;
b8 += b9 + k16;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k2;
b10 += b11 + k1;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k4 + t2;
b12 += b13 + k3;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k6 + 8;
b14 += b15 + k5 + t0;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k10;
b0 += b1 + k9;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k12;
b2 += b3 + k11;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k14;
b4 += b5 + k13;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k16;
b6 += b7 + k15;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k1;
b8 += b9 + k0;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k3;
b10 += b11 + k2;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k5 + t0;
b12 += b13 + k4;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k7 + 9;
b14 += b15 + k6 + t1;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k11;
b0 += b1 + k10;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k13;
b2 += b3 + k12;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k15;
b4 += b5 + k14;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k0;
b6 += b7 + k16;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k2;
b8 += b9 + k1;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k4;
b10 += b11 + k3;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k6 + t1;
b12 += b13 + k5;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k8 + 10;
b14 += b15 + k7 + t2;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k12;
b0 += b1 + k11;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k14;
b2 += b3 + k13;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k16;
b4 += b5 + k15;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k1;
b6 += b7 + k0;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k3;
b8 += b9 + k2;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k5;
b10 += b11 + k4;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k7 + t2;
b12 += b13 + k6;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k9 + 11;
b14 += b15 + k8 + t0;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k13;
b0 += b1 + k12;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k15;
b2 += b3 + k14;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k0;
b4 += b5 + k16;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k2;
b6 += b7 + k1;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k4;
b8 += b9 + k3;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k6;
b10 += b11 + k5;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k8 + t0;
b12 += b13 + k7;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k10 + 12;
b14 += b15 + k9 + t1;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k14;
b0 += b1 + k13;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k16;
b2 += b3 + k15;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k1;
b4 += b5 + k0;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k3;
b6 += b7 + k2;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k5;
b8 += b9 + k4;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k7;
b10 += b11 + k6;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k9 + t1;
b12 += b13 + k8;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k11 + 13;
b14 += b15 + k10 + t2;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k15;
b0 += b1 + k14;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k0;
b2 += b3 + k16;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k2;
b4 += b5 + k1;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k4;
b6 += b7 + k3;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k6;
b8 += b9 + k5;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k8;
b10 += b11 + k7;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k10 + t2;
b12 += b13 + k9;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k12 + 14;
b14 += b15 + k11 + t0;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k16;
b0 += b1 + k15;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k1;
b2 += b3 + k0;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k3;
b4 += b5 + k2;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k5;
b6 += b7 + k4;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k7;
b8 += b9 + k6;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k9;
b10 += b11 + k8;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k11 + t0;
b12 += b13 + k10;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k13 + 15;
b14 += b15 + k12 + t1;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k0;
b0 += b1 + k16;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k2;
b2 += b3 + k1;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k4;
b4 += b5 + k3;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k6;
b6 += b7 + k5;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k8;
b8 += b9 + k7;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k10;
b10 += b11 + k9;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k12 + t1;
b12 += b13 + k11;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k14 + 16;
b14 += b15 + k13 + t2;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k1;
b0 += b1 + k0;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k3;
b2 += b3 + k2;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k5;
b4 += b5 + k4;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k7;
b6 += b7 + k6;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k9;
b8 += b9 + k8;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k11;
b10 += b11 + k10;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k13 + t2;
b12 += b13 + k12;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k15 + 17;
b14 += b15 + k14 + t0;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
b1 += k2;
b0 += b1 + k1;
- b1 = ((b1 << 24) | (b1 >> (64 - 24))) ^ b0;
+ b1 = rol64(b1, 24) ^ b0;
b3 += k4;
b2 += b3 + k3;
- b3 = ((b3 << 13) | (b3 >> (64 - 13))) ^ b2;
+ b3 = rol64(b3, 13) ^ b2;
b5 += k6;
b4 += b5 + k5;
- b5 = ((b5 << 8) | (b5 >> (64 - 8))) ^ b4;
+ b5 = rol64(b5, 8) ^ b4;
b7 += k8;
b6 += b7 + k7;
- b7 = ((b7 << 47) | (b7 >> (64 - 47))) ^ b6;
+ b7 = rol64(b7, 47) ^ b6;
b9 += k10;
b8 += b9 + k9;
- b9 = ((b9 << 8) | (b9 >> (64 - 8))) ^ b8;
+ b9 = rol64(b9, 8) ^ b8;
b11 += k12;
b10 += b11 + k11;
- b11 = ((b11 << 17) | (b11 >> (64 - 17))) ^ b10;
+ b11 = rol64(b11, 17) ^ b10;
b13 += k14 + t0;
b12 += b13 + k13;
- b13 = ((b13 << 22) | (b13 >> (64 - 22))) ^ b12;
+ b13 = rol64(b13, 22) ^ b12;
b15 += k16 + 18;
b14 += b15 + k15 + t1;
- b15 = ((b15 << 37) | (b15 >> (64 - 37))) ^ b14;
+ b15 = rol64(b15, 37) ^ b14;
b0 += b9;
- b9 = ((b9 << 38) | (b9 >> (64 - 38))) ^ b0;
+ b9 = rol64(b9, 38) ^ b0;
b2 += b13;
- b13 = ((b13 << 19) | (b13 >> (64 - 19))) ^ b2;
+ b13 = rol64(b13, 19) ^ b2;
b6 += b11;
- b11 = ((b11 << 10) | (b11 >> (64 - 10))) ^ b6;
+ b11 = rol64(b11, 10) ^ b6;
b4 += b15;
- b15 = ((b15 << 55) | (b15 >> (64 - 55))) ^ b4;
+ b15 = rol64(b15, 55) ^ b4;
b10 += b7;
- b7 = ((b7 << 49) | (b7 >> (64 - 49))) ^ b10;
+ b7 = rol64(b7, 49) ^ b10;
b12 += b3;
- b3 = ((b3 << 18) | (b3 >> (64 - 18))) ^ b12;
+ b3 = rol64(b3, 18) ^ b12;
b14 += b5;
- b5 = ((b5 << 23) | (b5 >> (64 - 23))) ^ b14;
+ b5 = rol64(b5, 23) ^ b14;
b8 += b1;
- b1 = ((b1 << 52) | (b1 >> (64 - 52))) ^ b8;
+ b1 = rol64(b1, 52) ^ b8;
b0 += b7;
- b7 = ((b7 << 33) | (b7 >> (64 - 33))) ^ b0;
+ b7 = rol64(b7, 33) ^ b0;
b2 += b5;
- b5 = ((b5 << 4) | (b5 >> (64 - 4))) ^ b2;
+ b5 = rol64(b5, 4) ^ b2;
b4 += b3;
- b3 = ((b3 << 51) | (b3 >> (64 - 51))) ^ b4;
+ b3 = rol64(b3, 51) ^ b4;
b6 += b1;
- b1 = ((b1 << 13) | (b1 >> (64 - 13))) ^ b6;
+ b1 = rol64(b1, 13) ^ b6;
b12 += b15;
- b15 = ((b15 << 34) | (b15 >> (64 - 34))) ^ b12;
+ b15 = rol64(b15, 34) ^ b12;
b14 += b13;
- b13 = ((b13 << 41) | (b13 >> (64 - 41))) ^ b14;
+ b13 = rol64(b13, 41) ^ b14;
b8 += b11;
- b11 = ((b11 << 59) | (b11 >> (64 - 59))) ^ b8;
+ b11 = rol64(b11, 59) ^ b8;
b10 += b9;
- b9 = ((b9 << 17) | (b9 >> (64 - 17))) ^ b10;
+ b9 = rol64(b9, 17) ^ b10;
b0 += b15;
- b15 = ((b15 << 5) | (b15 >> (64 - 5))) ^ b0;
+ b15 = rol64(b15, 5) ^ b0;
b2 += b11;
- b11 = ((b11 << 20) | (b11 >> (64 - 20))) ^ b2;
+ b11 = rol64(b11, 20) ^ b2;
b6 += b13;
- b13 = ((b13 << 48) | (b13 >> (64 - 48))) ^ b6;
+ b13 = rol64(b13, 48) ^ b6;
b4 += b9;
- b9 = ((b9 << 41) | (b9 >> (64 - 41))) ^ b4;
+ b9 = rol64(b9, 41) ^ b4;
b14 += b1;
- b1 = ((b1 << 47) | (b1 >> (64 - 47))) ^ b14;
+ b1 = rol64(b1, 47) ^ b14;
b8 += b5;
- b5 = ((b5 << 28) | (b5 >> (64 - 28))) ^ b8;
+ b5 = rol64(b5, 28) ^ b8;
b10 += b3;
- b3 = ((b3 << 16) | (b3 >> (64 - 16))) ^ b10;
+ b3 = rol64(b3, 16) ^ b10;
b12 += b7;
- b7 = ((b7 << 25) | (b7 >> (64 - 25))) ^ b12;
+ b7 = rol64(b7, 25) ^ b12;
b1 += k3;
b0 += b1 + k2;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b0;
+ b1 = rol64(b1, 41) ^ b0;
b3 += k5;
b2 += b3 + k4;
- b3 = ((b3 << 9) | (b3 >> (64 - 9))) ^ b2;
+ b3 = rol64(b3, 9) ^ b2;
b5 += k7;
b4 += b5 + k6;
- b5 = ((b5 << 37) | (b5 >> (64 - 37))) ^ b4;
+ b5 = rol64(b5, 37) ^ b4;
b7 += k9;
b6 += b7 + k8;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b6;
+ b7 = rol64(b7, 31) ^ b6;
b9 += k11;
b8 += b9 + k10;
- b9 = ((b9 << 12) | (b9 >> (64 - 12))) ^ b8;
+ b9 = rol64(b9, 12) ^ b8;
b11 += k13;
b10 += b11 + k12;
- b11 = ((b11 << 47) | (b11 >> (64 - 47))) ^ b10;
+ b11 = rol64(b11, 47) ^ b10;
b13 += k15 + t1;
b12 += b13 + k14;
- b13 = ((b13 << 44) | (b13 >> (64 - 44))) ^ b12;
+ b13 = rol64(b13, 44) ^ b12;
b15 += k0 + 19;
b14 += b15 + k16 + t2;
- b15 = ((b15 << 30) | (b15 >> (64 - 30))) ^ b14;
+ b15 = rol64(b15, 30) ^ b14;
b0 += b9;
- b9 = ((b9 << 16) | (b9 >> (64 - 16))) ^ b0;
+ b9 = rol64(b9, 16) ^ b0;
b2 += b13;
- b13 = ((b13 << 34) | (b13 >> (64 - 34))) ^ b2;
+ b13 = rol64(b13, 34) ^ b2;
b6 += b11;
- b11 = ((b11 << 56) | (b11 >> (64 - 56))) ^ b6;
+ b11 = rol64(b11, 56) ^ b6;
b4 += b15;
- b15 = ((b15 << 51) | (b15 >> (64 - 51))) ^ b4;
+ b15 = rol64(b15, 51) ^ b4;
b10 += b7;
- b7 = ((b7 << 4) | (b7 >> (64 - 4))) ^ b10;
+ b7 = rol64(b7, 4) ^ b10;
b12 += b3;
- b3 = ((b3 << 53) | (b3 >> (64 - 53))) ^ b12;
+ b3 = rol64(b3, 53) ^ b12;
b14 += b5;
- b5 = ((b5 << 42) | (b5 >> (64 - 42))) ^ b14;
+ b5 = rol64(b5, 42) ^ b14;
b8 += b1;
- b1 = ((b1 << 41) | (b1 >> (64 - 41))) ^ b8;
+ b1 = rol64(b1, 41) ^ b8;
b0 += b7;
- b7 = ((b7 << 31) | (b7 >> (64 - 31))) ^ b0;
+ b7 = rol64(b7, 31) ^ b0;
b2 += b5;
- b5 = ((b5 << 44) | (b5 >> (64 - 44))) ^ b2;
+ b5 = rol64(b5, 44) ^ b2;
b4 += b3;
- b3 = ((b3 << 47) | (b3 >> (64 - 47))) ^ b4;
+ b3 = rol64(b3, 47) ^ b4;
b6 += b1;
- b1 = ((b1 << 46) | (b1 >> (64 - 46))) ^ b6;
+ b1 = rol64(b1, 46) ^ b6;
b12 += b15;
- b15 = ((b15 << 19) | (b15 >> (64 - 19))) ^ b12;
+ b15 = rol64(b15, 19) ^ b12;
b14 += b13;
- b13 = ((b13 << 42) | (b13 >> (64 - 42))) ^ b14;
+ b13 = rol64(b13, 42) ^ b14;
b8 += b11;
- b11 = ((b11 << 44) | (b11 >> (64 - 44))) ^ b8;
+ b11 = rol64(b11, 44) ^ b8;
b10 += b9;
- b9 = ((b9 << 25) | (b9 >> (64 - 25))) ^ b10;
+ b9 = rol64(b9, 25) ^ b10;
b0 += b15;
- b15 = ((b15 << 9) | (b15 >> (64 - 9))) ^ b0;
+ b15 = rol64(b15, 9) ^ b0;
b2 += b11;
- b11 = ((b11 << 48) | (b11 >> (64 - 48))) ^ b2;
+ b11 = rol64(b11, 48) ^ b2;
b6 += b13;
- b13 = ((b13 << 35) | (b13 >> (64 - 35))) ^ b6;
+ b13 = rol64(b13, 35) ^ b6;
b4 += b9;
- b9 = ((b9 << 52) | (b9 >> (64 - 52))) ^ b4;
+ b9 = rol64(b9, 52) ^ b4;
b14 += b1;
- b1 = ((b1 << 23) | (b1 >> (64 - 23))) ^ b14;
+ b1 = rol64(b1, 23) ^ b14;
b8 += b5;
- b5 = ((b5 << 31) | (b5 >> (64 - 31))) ^ b8;
+ b5 = rol64(b5, 31) ^ b8;
b10 += b3;
- b3 = ((b3 << 37) | (b3 >> (64 - 37))) ^ b10;
+ b3 = rol64(b3, 37) ^ b10;
b12 += b7;
- b7 = ((b7 << 20) | (b7 >> (64 - 20))) ^ b12;
+ b7 = rol64(b7, 20) ^ b12;
output[0] = b0 + k3;
output[1] = b1 + k4;
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
index c95b3abad646..cc0afeeb68c1 100644
--- a/drivers/staging/slicoss/slic.h
+++ b/drivers/staging/slicoss/slic.h
@@ -478,6 +478,8 @@ struct adapter {
u32 max_isr_xmits;
u32 rcv_interrupt_yields;
u32 intagg_period;
+ u32 intagg_delay;
+ u32 dynamic_intagg;
struct inicpm_state *inicpm_info;
void *pinicpm_info;
struct slic_ifevents if_events;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index b23a2d1f61a2..6d50fc4fd02e 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -58,9 +58,9 @@
#define DEBUG_MICROCODE 1
#define DBG 1
#define SLIC_INTERRUPT_PROCESS_LIMIT 1
-#define SLIC_OFFLOAD_IP_CHECKSUM 1
-#define STATS_TIMER_INTERVAL 2
-#define PING_TIMER_INTERVAL 1
+#define SLIC_OFFLOAD_IP_CHECKSUM 1
+#define STATS_TIMER_INTERVAL 2
+#define PING_TIMER_INTERVAL 1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
@@ -102,8 +102,7 @@ static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Ac
static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
-static int intagg_delay = 100;
-static u32 dynamic_intagg;
+#define DEFAULT_INTAGG_DELAY 100
static unsigned int rcv_count;
#define DRV_NAME "slicoss"
@@ -119,17 +118,14 @@ MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("Dual BSD/GPL");
-module_param(dynamic_intagg, int, 0);
-MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
-module_param(intagg_delay, int, 0);
-MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
-
static const struct pci_device_id slic_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
{ 0 }
};
+static struct ethtool_ops slic_ethtool_ops;
+
MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
@@ -549,14 +545,6 @@ static int slic_card_download(struct adapter *adapter)
instruction = *(u32 *)(fw->data + index);
index += 4;
- /* Check SRAM location zero. If it is non-zero. Abort.*/
- /*
- * failure = readl((u32 __iomem *)&slic_regs->slic_reset);
- * if (failure) {
- * release_firmware(fw);
- * return -EIO;
- * }
- */
}
}
release_firmware(fw);
@@ -796,7 +784,6 @@ static bool slic_mac_filter(struct adapter *adapter,
return true;
}
return false;
-
}
static int slic_mac_set_address(struct net_device *dev, void *ptr)
@@ -884,7 +871,7 @@ static int slic_upr_queue_request(struct adapter *adapter,
struct slic_upr *upr;
struct slic_upr *uprqueue;
- upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC);
+ upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
if (!upr)
return -ENOMEM;
@@ -911,11 +898,6 @@ static void slic_upr_start(struct adapter *adapter)
{
struct slic_upr *upr;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
-/*
- * char * ptr1;
- * char * ptr2;
- * uint cmdoffset;
- */
upr = adapter->upr_list;
if (!upr)
return;
@@ -1773,7 +1755,6 @@ static void slic_init_cleanup(struct adapter *adapter)
if (adapter->intrregistered) {
adapter->intrregistered = 0;
free_irq(adapter->netdev->irq, adapter->netdev);
-
}
if (adapter->pshmem) {
pci_free_consistent(adapter->pcidev,
@@ -1810,8 +1791,8 @@ static int slic_mcast_add_list(struct adapter *adapter, char *address)
}
/* Doesn't already exist. Allocate a structure to hold it */
- mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
- if (mcaddr == NULL)
+ mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC);
+ if (!mcaddr)
return 1;
ether_addr_copy(mcaddr->address, address);
@@ -1892,7 +1873,7 @@ static void slic_xmit_fail(struct adapter *adapter,
{
if (adapter->xmitq_full)
netif_stop_queue(adapter->netdev);
- if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
+ if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
switch (status) {
case XMIT_FAIL_LINK_STATE:
dev_err(&adapter->netdev->dev,
@@ -2860,7 +2841,7 @@ static int slic_card_init(struct sliccard *card, struct adapter *adapter)
if (slic_global.dynamic_intagg)
slic_intagg_set(adapter, 0);
else
- slic_intagg_set(adapter, intagg_delay);
+ slic_intagg_set(adapter, adapter->intagg_delay);
/*
* Initialize ping status to "ok"
@@ -2881,6 +2862,26 @@ card_init_err:
return status;
}
+static int slic_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct adapter *adapter = netdev_priv(dev);
+
+ adapter->intagg_delay = coalesce->rx_coalesce_usecs;
+ adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
+ return 0;
+}
+
+static int slic_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coalesce)
+{
+ struct adapter *adapter = netdev_priv(dev);
+
+ coalesce->rx_coalesce_usecs = adapter->intagg_delay;
+ coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
+ return 0;
+}
+
static void slic_init_driver(void)
{
if (slic_first_init) {
@@ -2907,9 +2908,8 @@ static void slic_init_adapter(struct net_device *netdev,
adapter->functionnumber = (pcidev->devfn & 0x7);
adapter->slic_regs = memaddr;
adapter->irq = pcidev->irq;
-/* adapter->netdev = netdev;*/
adapter->chipid = chip_idx;
- adapter->port = 0; /*adapter->functionnumber;*/
+ adapter->port = 0;
adapter->cardindex = adapter->port;
spin_lock_init(&adapter->upr_lock);
spin_lock_init(&adapter->bit64reglock);
@@ -2982,8 +2982,8 @@ static u32 slic_card_locate(struct adapter *adapter)
/* Initialize a new card structure if need be */
if (card_hostid == SLIC_HOSTID_DEFAULT) {
- card = kzalloc(sizeof(struct sliccard), GFP_KERNEL);
- if (card == NULL)
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card)
return -ENOMEM;
card->next = slic_global.slic_card;
@@ -3033,7 +3033,7 @@ static u32 slic_card_locate(struct adapter *adapter)
}
if (!physcard) {
/* no structure allocated for this physical card yet */
- physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
+ physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC);
if (!physcard) {
if (card_hostid == SLIC_HOSTID_DEFAULT)
kfree(card);
@@ -3069,8 +3069,6 @@ static int slic_entry_probe(struct pci_dev *pcidev,
struct sliccard *card = NULL;
int pci_using_dac = 0;
- slic_global.dynamic_intagg = dynamic_intagg;
-
err = pci_enable_device(pcidev);
if (err)
@@ -3112,19 +3110,20 @@ static int slic_entry_probe(struct pci_dev *pcidev,
goto err_out_exit_slic_probe;
}
+ netdev->ethtool_ops = &slic_ethtool_ops;
SET_NETDEV_DEV(netdev, &pcidev->dev);
pci_set_drvdata(pcidev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pcidev = pcidev;
+ slic_global.dynamic_intagg = adapter->dynamic_intagg;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
-/* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/
memmapped_ioaddr = ioremap(mmio_start, mmio_len);
if (!memmapped_ioaddr) {
dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
@@ -3204,5 +3203,10 @@ static void __exit slic_module_cleanup(void)
pci_unregister_driver(&slic_driver);
}
+static struct ethtool_ops slic_ethtool_ops = {
+ .get_coalesce = slic_get_coalesce,
+ .set_coalesce = slic_set_coalesce
+};
+
module_init(slic_module_init);
module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 0331d34458ae..95f7cae3cc23 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
#include <linux/sizes.h>
#include "ddk750_help.h"
@@ -5,6 +6,10 @@
#include "ddk750_chip.h"
#include "ddk750_power.h"
+/* n / d + 1 / 2 = (2n + d) / 2d */
+#define roundedDiv(num, denom) ((2 * (num) + (denom)) / (2 * (denom)))
+#define MHz(x) ((x) * 1000000)
+
logical_chip_type_t getChipType(void)
{
unsigned short physicalID;
@@ -36,10 +41,10 @@ static unsigned int get_mxclk_freq(void)
return MHz(130);
pll_reg = PEEK32(MXCLK_PLL_CTRL);
- M = FIELD_GET(pll_reg, PANEL_PLL_CTRL, M);
- N = FIELD_GET(pll_reg, PANEL_PLL_CTRL, N);
- OD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, OD);
- POD = FIELD_GET(pll_reg, PANEL_PLL_CTRL, POD);
+ M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
+ N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT;
+ OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
+ POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
return DEFAULT_INPUT_CLOCK * M / N / (1 << OD) / (1 << POD);
}
@@ -79,7 +84,7 @@ static void setChipClock(unsigned int frequency)
static void setMemoryClock(unsigned int frequency)
{
- unsigned int ulReg, divisor;
+ unsigned int reg, divisor;
/* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
if (getChipType() == SM750LE)
@@ -95,24 +100,24 @@ static void setMemoryClock(unsigned int frequency)
divisor = roundedDiv(get_mxclk_freq(), frequency);
/* Set the corresponding divisor in the register. */
- ulReg = PEEK32(CURRENT_GATE);
+ reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_M2XCLK_MASK;
switch (divisor) {
default:
case 1:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_1);
+ reg |= CURRENT_GATE_M2XCLK_DIV_1;
break;
case 2:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_2);
+ reg |= CURRENT_GATE_M2XCLK_DIV_2;
break;
case 3:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_3);
+ reg |= CURRENT_GATE_M2XCLK_DIV_3;
break;
case 4:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, M2XCLK, DIV_4);
+ reg |= CURRENT_GATE_M2XCLK_DIV_4;
break;
}
- setCurrentGate(ulReg);
+ setCurrentGate(reg);
}
}
@@ -126,7 +131,7 @@ static void setMemoryClock(unsigned int frequency)
*/
static void setMasterClock(unsigned int frequency)
{
- unsigned int ulReg, divisor;
+ unsigned int reg, divisor;
/* Cheok_0509: For SM750LE, the memory clock is fixed. Nothing to set. */
if (getChipType() == SM750LE)
@@ -142,24 +147,24 @@ static void setMasterClock(unsigned int frequency)
divisor = roundedDiv(get_mxclk_freq(), frequency);
/* Set the corresponding divisor in the register. */
- ulReg = PEEK32(CURRENT_GATE);
+ reg = PEEK32(CURRENT_GATE) & ~CURRENT_GATE_MCLK_MASK;
switch (divisor) {
default:
case 3:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_3);
+ reg |= CURRENT_GATE_MCLK_DIV_3;
break;
case 4:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_4);
+ reg |= CURRENT_GATE_MCLK_DIV_4;
break;
case 6:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_6);
+ reg |= CURRENT_GATE_MCLK_DIV_6;
break;
case 8:
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, MCLK, DIV_8);
+ reg |= CURRENT_GATE_MCLK_DIV_8;
break;
}
- setCurrentGate(ulReg);
+ setCurrentGate(reg);
}
}
@@ -174,11 +179,11 @@ unsigned int ddk750_getVMSize(void)
/* for 750,always use power mode0*/
reg = PEEK32(MODE0_GATE);
- reg = FIELD_SET(reg, MODE0_GATE, GPIO, ON);
+ reg |= MODE0_GATE_GPIO;
POKE32(MODE0_GATE, reg);
/* get frame buffer size from GPIO */
- reg = FIELD_GET(PEEK32(MISC_CTRL), MISC_CTRL, LOCALMEM_SIZE);
+ reg = PEEK32(MISC_CTRL) & MISC_CTRL_LOCALMEM_SIZE_MASK;
switch (reg) {
case MISC_CTRL_LOCALMEM_SIZE_8M:
data = SZ_8M; break; /* 8 Mega byte */
@@ -197,24 +202,22 @@ unsigned int ddk750_getVMSize(void)
int ddk750_initHw(initchip_param_t *pInitParam)
{
- unsigned int ulReg;
+ unsigned int reg;
if (pInitParam->powerMode != 0)
pInitParam->powerMode = 0;
setPowerMode(pInitParam->powerMode);
/* Enable display power gate & LOCALMEM power gate*/
- ulReg = PEEK32(CURRENT_GATE);
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, DISPLAY, ON);
- ulReg = FIELD_SET(ulReg, CURRENT_GATE, LOCALMEM, ON);
- setCurrentGate(ulReg);
+ reg = PEEK32(CURRENT_GATE);
+ reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
+ setCurrentGate(reg);
if (getChipType() != SM750LE) {
/* set panel pll and graphic mode via mmio_88 */
- ulReg = PEEK32(VGA_CONFIGURATION);
- ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, PLL, PANEL);
- ulReg = FIELD_SET(ulReg, VGA_CONFIGURATION, MODE, GRAPHIC);
- POKE32(VGA_CONFIGURATION, ulReg);
+ reg = PEEK32(VGA_CONFIGURATION);
+ reg |= (VGA_CONFIGURATION_PLL | VGA_CONFIGURATION_MODE);
+ POKE32(VGA_CONFIGURATION, reg);
} else {
#if defined(__i386__) || defined(__x86_64__)
/* set graphic mode via IO method */
@@ -238,36 +241,36 @@ int ddk750_initHw(initchip_param_t *pInitParam)
The memory should be resetted after changing the MXCLK.
*/
if (pInitParam->resetMemory == 1) {
- ulReg = PEEK32(MISC_CTRL);
- ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, RESET);
- POKE32(MISC_CTRL, ulReg);
+ reg = PEEK32(MISC_CTRL);
+ reg &= ~MISC_CTRL_LOCALMEM_RESET;
+ POKE32(MISC_CTRL, reg);
- ulReg = FIELD_SET(ulReg, MISC_CTRL, LOCALMEM_RESET, NORMAL);
- POKE32(MISC_CTRL, ulReg);
+ reg |= MISC_CTRL_LOCALMEM_RESET;
+ POKE32(MISC_CTRL, reg);
}
if (pInitParam->setAllEngOff == 1) {
enable2DEngine(0);
/* Disable Overlay, if a former application left it on */
- ulReg = PEEK32(VIDEO_DISPLAY_CTRL);
- ulReg = FIELD_SET(ulReg, VIDEO_DISPLAY_CTRL, PLANE, DISABLE);
- POKE32(VIDEO_DISPLAY_CTRL, ulReg);
+ reg = PEEK32(VIDEO_DISPLAY_CTRL);
+ reg &= ~DISPLAY_CTRL_PLANE;
+ POKE32(VIDEO_DISPLAY_CTRL, reg);
/* Disable video alpha, if a former application left it on */
- ulReg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL);
- ulReg = FIELD_SET(ulReg, VIDEO_ALPHA_DISPLAY_CTRL, PLANE, DISABLE);
- POKE32(VIDEO_ALPHA_DISPLAY_CTRL, ulReg);
+ reg = PEEK32(VIDEO_ALPHA_DISPLAY_CTRL);
+ reg &= ~DISPLAY_CTRL_PLANE;
+ POKE32(VIDEO_ALPHA_DISPLAY_CTRL, reg);
/* Disable alpha plane, if a former application left it on */
- ulReg = PEEK32(ALPHA_DISPLAY_CTRL);
- ulReg = FIELD_SET(ulReg, ALPHA_DISPLAY_CTRL, PLANE, DISABLE);
- POKE32(ALPHA_DISPLAY_CTRL, ulReg);
+ reg = PEEK32(ALPHA_DISPLAY_CTRL);
+ reg &= ~DISPLAY_CTRL_PLANE;
+ POKE32(ALPHA_DISPLAY_CTRL, reg);
/* Disable DMA Channel, if a former application left it on */
- ulReg = PEEK32(DMA_ABORT_INTERRUPT);
- ulReg = FIELD_SET(ulReg, DMA_ABORT_INTERRUPT, ABORT_1, ABORT);
- POKE32(DMA_ABORT_INTERRUPT, ulReg);
+ reg = PEEK32(DMA_ABORT_INTERRUPT);
+ reg |= DMA_ABORT_INTERRUPT_ABORT_1;
+ POKE32(DMA_ABORT_INTERRUPT, reg);
/* Disable DMA Power, if a former application left it on */
enableDMA(0);
@@ -337,7 +340,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
unsigned int diff;
tmpClock = pll->inputFreq * M / N / X;
- diff = absDiff(tmpClock, request_orig);
+ diff = abs(tmpClock - request_orig);
if (diff < mini_diff) {
pll->M = M;
pll->N = N;
@@ -356,24 +359,29 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
unsigned int formatPllReg(pll_value_t *pPLL)
{
- unsigned int ulPllReg = 0;
-
- /* Note that all PLL's have the same format. Here, we just use Panel PLL parameter
- to work out the bit fields in the register.
- On returning a 32 bit number, the value can be applied to any PLL in the calling function.
- */
- ulPllReg =
- FIELD_SET(0, PANEL_PLL_CTRL, BYPASS, OFF)
- | FIELD_SET(0, PANEL_PLL_CTRL, POWER, ON)
- | FIELD_SET(0, PANEL_PLL_CTRL, INPUT, OSC)
#ifndef VALIDATION_CHIP
- | FIELD_VALUE(0, PANEL_PLL_CTRL, POD, pPLL->POD)
+ unsigned int POD = pPLL->POD;
+#endif
+ unsigned int OD = pPLL->OD;
+ unsigned int M = pPLL->M;
+ unsigned int N = pPLL->N;
+ unsigned int reg = 0;
+
+ /*
+ * Note that all PLL's have the same format. Here, we just use
+ * Panel PLL parameter to work out the bit fields in the
+ * register. On returning a 32 bit number, the value can be
+ * applied to any PLL in the calling function.
+ */
+ reg = PLL_CTRL_POWER |
+#ifndef VALIDATION_CHIP
+ ((POD << PLL_CTRL_POD_SHIFT) & PLL_CTRL_POD_MASK) |
#endif
- | FIELD_VALUE(0, PANEL_PLL_CTRL, OD, pPLL->OD)
- | FIELD_VALUE(0, PANEL_PLL_CTRL, N, pPLL->N)
- | FIELD_VALUE(0, PANEL_PLL_CTRL, M, pPLL->M);
+ ((OD << PLL_CTRL_OD_SHIFT) & PLL_CTRL_OD_MASK) |
+ ((N << PLL_CTRL_N_SHIFT) & PLL_CTRL_N_MASK) |
+ ((M << PLL_CTRL_M_SHIFT) & PLL_CTRL_M_MASK);
- return ulPllReg;
+ return reg;
}
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 84f6e8b8c0e2..ca4973ee49e4 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -9,111 +9,55 @@
static void setDisplayControl(int ctrl, int disp_state)
{
/* state != 0 means turn on both timing & plane en_bit */
- unsigned long ulDisplayCtrlReg, ulReservedBits;
- int cnt;
+ unsigned long reg, val, reserved;
+ int cnt = 0;
- cnt = 0;
-
- /* Set the primary display control */
if (!ctrl) {
- ulDisplayCtrlReg = PEEK32(PANEL_DISPLAY_CTRL);
- /* Turn on/off the Panel display control */
- if (disp_state) {
- /* Timing should be enabled first before enabling the plane
- * because changing at the same time does not guarantee that
- * the plane will also enabled or disabled.
- */
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- PANEL_DISPLAY_CTRL, TIMING, ENABLE);
- POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- PANEL_DISPLAY_CTRL, PLANE, ENABLE);
-
- /* Added some masks to mask out the reserved bits.
- * Sometimes, the reserved bits are set/reset randomly when
- * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register
- * reserved bits are needed to be masked out.
- */
- ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE);
-
- /* Somehow the register value on the plane is not set
- * until a few delay. Need to write
- * and read it a couple times
- */
- do {
- cnt++;
- POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
- } while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
- (ulDisplayCtrlReg & ~ulReservedBits));
- printk("Set Panel Plane enbit:after tried %d times\n", cnt);
- } else {
- /* When turning off, there is no rule on the programming
- * sequence since whenever the clock is off, then it does not
- * matter whether the plane is enabled or disabled.
- * Note: Modifying the plane bit will take effect on the
- * next vertical sync. Need to find out if it is necessary to
- * wait for 1 vsync before modifying the timing enable bit.
- * */
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- PANEL_DISPLAY_CTRL, PLANE, DISABLE);
- POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- PANEL_DISPLAY_CTRL, TIMING, DISABLE);
- POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
- }
-
+ reg = PANEL_DISPLAY_CTRL;
+ reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK;
} else {
- /* Set the secondary display control */
- ulDisplayCtrlReg = PEEK32(CRT_DISPLAY_CTRL);
-
- if (disp_state) {
- /* Timing should be enabled first before enabling the plane because changing at the
- same time does not guarantee that the plane will also enabled or disabled.
- */
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- CRT_DISPLAY_CTRL, TIMING, ENABLE);
- POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- CRT_DISPLAY_CTRL, PLANE, ENABLE);
-
- /* Added some masks to mask out the reserved bits.
- * Sometimes, the reserved bits are set/reset randomly when
- * writing to the PRIMARY_DISPLAY_CTRL, therefore, the register
- * reserved bits are needed to be masked out.
- */
-
- ulReservedBits = FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
- FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
- FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE) |
- FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_4_MASK, ENABLE);
+ reg = CRT_DISPLAY_CTRL;
+ reserved = CRT_DISPLAY_CTRL_RESERVED_MASK;
+ }
- do {
- cnt++;
- POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
- } while ((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
- (ulDisplayCtrlReg & ~ulReservedBits));
- printk("Set Crt Plane enbit:after tried %d times\n", cnt);
- } else {
- /* When turning off, there is no rule on the programming
- * sequence since whenever the clock is off, then it does not
- * matter whether the plane is enabled or disabled.
- * Note: Modifying the plane bit will take effect on the next
- * vertical sync. Need to find out if it is necessary to
- * wait for 1 vsync before modifying the timing enable bit.
- */
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- CRT_DISPLAY_CTRL, PLANE, DISABLE);
- POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-
- ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
- CRT_DISPLAY_CTRL, TIMING, DISABLE);
- POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
- }
+ val = PEEK32(reg);
+ if (disp_state) {
+ /*
+ * Timing should be enabled first before enabling the
+ * plane because changing at the same time does not
+ * guarantee that the plane will also enabled or
+ * disabled.
+ */
+ val |= DISPLAY_CTRL_TIMING;
+ POKE32(reg, val);
+
+ val |= DISPLAY_CTRL_PLANE;
+
+ /*
+ * Somehow the register value on the plane is not set
+ * until a few delay. Need to write and read it a
+ * couple times
+ */
+ do {
+ cnt++;
+ POKE32(reg, val);
+ } while ((PEEK32(reg) & ~reserved) != (val & ~reserved));
+ pr_debug("Set Plane enbit:after tried %d times\n", cnt);
+ } else {
+ /*
+ * When turning off, there is no rule on the
+ * programming sequence since whenever the clock is
+ * off, then it does not matter whether the plane is
+ * enabled or disabled. Note: Modifying the plane bit
+ * will take effect on the next vertical sync. Need to
+ * find out if it is necessary to wait for 1 vsync
+ * before modifying the timing enable bit.
+ */
+ val &= ~DISPLAY_CTRL_PLANE;
+ POKE32(reg, val);
+
+ val &= ~DISPLAY_CTRL_TIMING;
+ POKE32(reg, val);
}
}
@@ -126,54 +70,42 @@ static void waitNextVerticalSync(int ctrl, int delay)
/* Do not wait when the Primary PLL is off or display control is already off.
This will prevent the software to wait forever. */
- if ((FIELD_GET(PEEK32(PANEL_PLL_CTRL), PANEL_PLL_CTRL, POWER) ==
- PANEL_PLL_CTRL_POWER_OFF) ||
- (FIELD_GET(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, TIMING) ==
- PANEL_DISPLAY_CTRL_TIMING_DISABLE)) {
+ if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
+ !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
return;
}
while (delay-- > 0) {
/* Wait for end of vsync. */
do {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- PANEL_VSYNC);
- } while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
+ status = PEEK32(SYSTEM_CTRL);
+ } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
/* Wait for start of vsync. */
do {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- PANEL_VSYNC);
- } while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
+ status = PEEK32(SYSTEM_CTRL);
+ } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
}
} else {
/* Do not wait when the Primary PLL is off or display control is already off.
This will prevent the software to wait forever. */
- if ((FIELD_GET(PEEK32(CRT_PLL_CTRL), CRT_PLL_CTRL, POWER) ==
- CRT_PLL_CTRL_POWER_OFF) ||
- (FIELD_GET(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, TIMING) ==
- CRT_DISPLAY_CTRL_TIMING_DISABLE)) {
+ if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
+ !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
return;
}
while (delay-- > 0) {
/* Wait for end of vsync. */
do {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- CRT_VSYNC);
- } while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
+ status = PEEK32(SYSTEM_CTRL);
+ } while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
/* Wait for start of vsync. */
do {
- status = FIELD_GET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- CRT_VSYNC);
- } while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
+ status = PEEK32(SYSTEM_CTRL);
+ } while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
}
}
}
@@ -184,22 +116,22 @@ static void swPanelPowerSequence(int disp, int delay)
/* disp should be 1 to open sequence */
reg = PEEK32(PANEL_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp);
+ reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
primaryWaitVerticalSync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, DATA, disp);
+ reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
primaryWaitVerticalSync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, VBIASEN, disp);
+ reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
primaryWaitVerticalSync(delay);
reg = PEEK32(PANEL_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, FPEN, disp);
+ reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
POKE32(PANEL_DISPLAY_CTRL, reg);
primaryWaitVerticalSync(delay);
@@ -212,16 +144,20 @@ void ddk750_setLogicalDispOut(disp_output_t output)
if (output & PNL_2_USAGE) {
/* set panel path controller select */
reg = PEEK32(PANEL_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, SELECT, (output & PNL_2_MASK)>>PNL_2_OFFSET);
+ reg &= ~PANEL_DISPLAY_CTRL_SELECT_MASK;
+ reg |= (((output & PNL_2_MASK) >> PNL_2_OFFSET) <<
+ PANEL_DISPLAY_CTRL_SELECT_SHIFT);
POKE32(PANEL_DISPLAY_CTRL, reg);
}
if (output & CRT_2_USAGE) {
/* set crt path controller select */
reg = PEEK32(CRT_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, SELECT, (output & CRT_2_MASK)>>CRT_2_OFFSET);
+ reg &= ~CRT_DISPLAY_CTRL_SELECT_MASK;
+ reg |= (((output & CRT_2_MASK) >> CRT_2_OFFSET) <<
+ CRT_DISPLAY_CTRL_SELECT_SHIFT);
/*se blank off */
- reg = FIELD_SET(reg, CRT_DISPLAY_CTRL, BLANK, OFF);
+ reg &= ~CRT_DISPLAY_CTRL_BLANK;
POKE32(CRT_DISPLAY_CTRL, reg);
}
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index a7a23514ac39..a4a255007c8d 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -53,44 +53,6 @@ int dviInit(
return -1; /* error */
}
-
-/*
- * dviGetVendorID
- * This function gets the vendor ID of the DVI controller chip.
- *
- * Output:
- * Vendor ID
- */
-unsigned short dviGetVendorID(void)
-{
- dvi_ctrl_device_t *pCurrentDviCtrl;
-
- pCurrentDviCtrl = g_dcftSupportedDviController;
- if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
- return pCurrentDviCtrl->pfnGetVendorId();
-
- return 0x0000;
-}
-
-
-/*
- * dviGetDeviceID
- * This function gets the device ID of the DVI controller chip.
- *
- * Output:
- * Device ID
- */
-unsigned short dviGetDeviceID(void)
-{
- dvi_ctrl_device_t *pCurrentDviCtrl;
-
- pCurrentDviCtrl = g_dcftSupportedDviController;
- if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
- return pCurrentDviCtrl->pfnGetDeviceId();
-
- return 0x0000;
-}
-
#endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index e1d4c9a2d50a..677939cb5130 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -55,8 +55,5 @@ int dviInit(
unsigned char pllFilterValue
);
-unsigned short dviGetVendorID(void);
-unsigned short dviGetDeviceID(void);
-
#endif
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
index 5be814eed735..009db9213a73 100644
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ b/drivers/staging/sm750fb/ddk750_help.h
@@ -6,7 +6,6 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include "sm750_help.h"
/* software control endianness */
#define PEEK32(addr) readl(addr + mmio750)
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 7be2111284f4..39c3e1cdbc0c 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -17,8 +17,7 @@ unsigned char bus_speed_mode
/* Enable GPIO 30 & 31 as IIC clock & data */
value = PEEK32(GPIO_MUX);
- value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
- FIELD_SET(0, GPIO_MUX, 31, I2C);
+ value |= (GPIO_MUX_30 | GPIO_MUX_31);
POKE32(GPIO_MUX, value);
/* Enable Hardware I2C power.
@@ -27,12 +26,10 @@ unsigned char bus_speed_mode
enableI2C(1);
/* Enable the I2C Controller and set the bus speed mode */
- value = PEEK32(I2C_CTRL);
- if (bus_speed_mode == 0)
- value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
- else
- value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
- value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
+ value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
+ if (bus_speed_mode)
+ value |= I2C_CTRL_MODE;
+ value |= I2C_CTRL_EN;
POKE32(I2C_CTRL, value);
return 0;
@@ -43,8 +40,7 @@ void sm750_hw_i2c_close(void)
unsigned int value;
/* Disable I2C controller */
- value = PEEK32(I2C_CTRL);
- value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
+ value = PEEK32(I2C_CTRL) & ~I2C_CTRL_EN;
POKE32(I2C_CTRL, value);
/* Disable I2C Power */
@@ -52,8 +48,8 @@ void sm750_hw_i2c_close(void)
/* Set GPIO 30 & 31 back as GPIO pins */
value = PEEK32(GPIO_MUX);
- value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
- value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
+ value &= ~GPIO_MUX_30;
+ value &= ~GPIO_MUX_31;
POKE32(GPIO_MUX, value);
}
@@ -63,13 +59,11 @@ static long hw_i2c_wait_tx_done(void)
/* Wait until the transfer is completed. */
timeout = HWI2C_WAIT_TIMEOUT;
- while ((FIELD_GET(PEEK32(I2C_STATUS),
- I2C_STATUS, TX) != I2C_STATUS_TX_COMPLETED) &&
- (timeout != 0))
+ while (!(PEEK32(I2C_STATUS) & I2C_STATUS_TX) && (timeout != 0))
timeout--;
if (timeout == 0)
- return (-1);
+ return -1;
return 0;
}
@@ -121,14 +115,13 @@ static unsigned int hw_i2c_write_data(
POKE32(I2C_DATA0 + i, *buf++);
/* Start the I2C */
- POKE32(I2C_CTRL,
- FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+ POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL);
/* Wait until the transfer is completed. */
if (hw_i2c_wait_tx_done() != 0)
break;
- /* Substract length */
+ /* Subtract length */
length -= (count + 1);
/* Total byte written */
@@ -184,8 +177,7 @@ static unsigned int hw_i2c_read_data(
POKE32(I2C_BYTE_COUNT, count);
/* Start the I2C */
- POKE32(I2C_CTRL,
- FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+ POKE32(I2C_CTRL, PEEK32(I2C_CTRL) | I2C_CTRL_CTRL);
/* Wait until transaction done. */
if (hw_i2c_wait_tx_done() != 0)
@@ -195,7 +187,7 @@ static unsigned int hw_i2c_read_data(
for (i = 0; i <= count; i++)
*buf++ = PEEK32(I2C_DATA0 + i);
- /* Substract length by 16 */
+ /* Subtract length by 16 */
length -= (count + 1);
/* Number of bytes read. */
@@ -256,7 +248,7 @@ int sm750_hw_i2c_write_reg(
if (hw_i2c_write_data(addr, 2, value) == 2)
return 0;
- return (-1);
+ return -1;
}
#endif
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index fa35926680ab..ccb4e067661a 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -25,13 +25,12 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
Note that normal SM750/SM718 only use those two register for
auto-centering mode.
*/
- POKE32(CRT_AUTO_CENTERING_TL,
- FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
- | FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
+ POKE32(CRT_AUTO_CENTERING_TL, 0);
POKE32(CRT_AUTO_CENTERING_BR,
- FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
- | FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
+ (((y - 1) << CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT) &
+ CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
+ ((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
/* Assume common fields in dispControl have been properly set before
calling this function.
@@ -39,33 +38,32 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
*/
/* Clear bit 29:27 of display control register */
- dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
+ dispControl &= ~CRT_DISPLAY_CTRL_CLK_MASK;
/* Set bit 29:27 of display control register for the right clock */
- /* Note that SM750LE only need to supported 7 resoluitons. */
+ /* Note that SM750LE only need to supported 7 resolutions. */
if (x == 800 && y == 600)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL41;
else if (x == 1024 && y == 768)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL65;
else if (x == 1152 && y == 864)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
else if (x == 1280 && y == 768)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL80;
else if (x == 1280 && y == 720)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL74;
else if (x == 1280 && y == 960)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
else if (x == 1280 && y == 1024)
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL108;
else /* default to VGA clock */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
+ dispControl |= CRT_DISPLAY_CTRL_CLK_PLL25;
/* Set bit 25:24 of display controller */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
+ dispControl |= (CRT_DISPLAY_CTRL_CRTSELECT | CRT_DISPLAY_CTRL_RGBBIT);
/* Set bit 14 of display controller */
- dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
+ dispControl = DISPLAY_CTRL_CLOCK_PHASE;
POKE32(CRT_DISPLAY_CTRL, dispControl);
@@ -79,85 +77,105 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
{
int ret = 0;
int cnt = 0;
- unsigned int ulTmpValue, ulReg;
+ unsigned int tmp, reg;
if (pll->clockType == SECONDARY_PLL) {
/* programe secondary pixel clock */
POKE32(CRT_PLL_CTRL, formatPllReg(pll));
POKE32(CRT_HORIZONTAL_TOTAL,
- FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
- | FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+ (((pModeParam->horizontal_total - 1) <<
+ CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
+ CRT_HORIZONTAL_TOTAL_TOTAL_MASK) |
+ ((pModeParam->horizontal_display_end - 1) &
+ CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK));
POKE32(CRT_HORIZONTAL_SYNC,
- FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
- | FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+ ((pModeParam->horizontal_sync_width <<
+ CRT_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+ CRT_HORIZONTAL_SYNC_WIDTH_MASK) |
+ ((pModeParam->horizontal_sync_start - 1) &
+ CRT_HORIZONTAL_SYNC_START_MASK));
POKE32(CRT_VERTICAL_TOTAL,
- FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
- | FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+ (((pModeParam->vertical_total - 1) <<
+ CRT_VERTICAL_TOTAL_TOTAL_SHIFT) &
+ CRT_VERTICAL_TOTAL_TOTAL_MASK) |
+ ((pModeParam->vertical_display_end - 1) &
+ CRT_VERTICAL_TOTAL_DISPLAY_END_MASK));
POKE32(CRT_VERTICAL_SYNC,
- FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
- | FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+ ((pModeParam->vertical_sync_height <<
+ CRT_VERTICAL_SYNC_HEIGHT_SHIFT) &
+ CRT_VERTICAL_SYNC_HEIGHT_MASK) |
+ ((pModeParam->vertical_sync_start - 1) &
+ CRT_VERTICAL_SYNC_START_MASK));
- ulTmpValue = FIELD_VALUE(0, CRT_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
- FIELD_VALUE(0, CRT_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
- FIELD_SET(0, CRT_DISPLAY_CTRL, TIMING, ENABLE)|
- FIELD_SET(0, CRT_DISPLAY_CTRL, PLANE, ENABLE);
-
+ tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
+ if (pModeParam->vertical_sync_polarity)
+ tmp |= DISPLAY_CTRL_VSYNC_PHASE;
+ if (pModeParam->horizontal_sync_polarity)
+ tmp |= DISPLAY_CTRL_HSYNC_PHASE;
if (getChipType() == SM750LE) {
- displayControlAdjust_SM750LE(pModeParam, ulTmpValue);
+ displayControlAdjust_SM750LE(pModeParam, tmp);
} else {
- ulReg = PEEK32(CRT_DISPLAY_CTRL)
- & FIELD_CLEAR(CRT_DISPLAY_CTRL, VSYNC_PHASE)
- & FIELD_CLEAR(CRT_DISPLAY_CTRL, HSYNC_PHASE)
- & FIELD_CLEAR(CRT_DISPLAY_CTRL, TIMING)
- & FIELD_CLEAR(CRT_DISPLAY_CTRL, PLANE);
+ reg = PEEK32(CRT_DISPLAY_CTRL) &
+ ~(DISPLAY_CTRL_VSYNC_PHASE |
+ DISPLAY_CTRL_HSYNC_PHASE |
+ DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE);
- POKE32(CRT_DISPLAY_CTRL, ulTmpValue|ulReg);
+ POKE32(CRT_DISPLAY_CTRL, tmp | reg);
}
} else if (pll->clockType == PRIMARY_PLL) {
- unsigned int ulReservedBits;
+ unsigned int reserved;
POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
- POKE32(PANEL_HORIZONTAL_TOTAL,
- FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
- | FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+ reg = ((pModeParam->horizontal_total - 1) <<
+ PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
+ PANEL_HORIZONTAL_TOTAL_TOTAL_MASK;
+ reg |= ((pModeParam->horizontal_display_end - 1) &
+ PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK);
+ POKE32(PANEL_HORIZONTAL_TOTAL, reg);
POKE32(PANEL_HORIZONTAL_SYNC,
- FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
- | FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+ ((pModeParam->horizontal_sync_width <<
+ PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT) &
+ PANEL_HORIZONTAL_SYNC_WIDTH_MASK) |
+ ((pModeParam->horizontal_sync_start - 1) &
+ PANEL_HORIZONTAL_SYNC_START_MASK));
POKE32(PANEL_VERTICAL_TOTAL,
- FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
- | FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+ (((pModeParam->vertical_total - 1) <<
+ PANEL_VERTICAL_TOTAL_TOTAL_SHIFT) &
+ PANEL_VERTICAL_TOTAL_TOTAL_MASK) |
+ ((pModeParam->vertical_display_end - 1) &
+ PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK));
POKE32(PANEL_VERTICAL_SYNC,
- FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
- | FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
-
- ulTmpValue = FIELD_VALUE(0, PANEL_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
- FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
- FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
-
- ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
- FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
- FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
-
- ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
- & FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
-
+ ((pModeParam->vertical_sync_height <<
+ PANEL_VERTICAL_SYNC_HEIGHT_SHIFT) &
+ PANEL_VERTICAL_SYNC_HEIGHT_MASK) |
+ ((pModeParam->vertical_sync_start - 1) &
+ PANEL_VERTICAL_SYNC_START_MASK));
+
+ tmp = DISPLAY_CTRL_TIMING | DISPLAY_CTRL_PLANE;
+ if (pModeParam->vertical_sync_polarity)
+ tmp |= DISPLAY_CTRL_VSYNC_PHASE;
+ if (pModeParam->horizontal_sync_polarity)
+ tmp |= DISPLAY_CTRL_HSYNC_PHASE;
+ if (pModeParam->clock_phase_polarity)
+ tmp |= DISPLAY_CTRL_CLOCK_PHASE;
+
+ reserved = PANEL_DISPLAY_CTRL_RESERVED_MASK |
+ PANEL_DISPLAY_CTRL_VSYNC;
+
+ reg = (PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) &
+ ~(DISPLAY_CTRL_CLOCK_PHASE | DISPLAY_CTRL_VSYNC_PHASE |
+ DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
+ DISPLAY_CTRL_PLANE);
/* May a hardware bug or just my test chip (not confirmed).
* PANEL_DISPLAY_CTRL register seems requiring few writes
@@ -167,13 +185,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
* next vertical sync to turn on/off the plane.
*/
- POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
+ POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
- while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg)) {
+ while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
+ (tmp | reg)) {
cnt++;
if (cnt > 1000)
break;
- POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
+ POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
}
} else {
ret = -1;
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 667e4f822544..b3c3791b95bd 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -7,12 +7,12 @@ void ddk750_setDPMS(DPMS_t state)
unsigned int value;
if (getChipType() == SM750LE) {
- value = PEEK32(CRT_DISPLAY_CTRL);
- POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL,
- DPMS, state));
+ value = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK;
+ value |= (state << CRT_DISPLAY_CTRL_DPMS_SHIFT);
+ POKE32(CRT_DISPLAY_CTRL, value);
} else {
value = PEEK32(SYSTEM_CTRL);
- value = FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
+ value = (value & ~SYSTEM_CTRL_DPMS_MASK) | state;
POKE32(SYSTEM_CTRL, value);
}
}
@@ -21,7 +21,7 @@ static unsigned int getPowerMode(void)
{
if (getChipType() == SM750LE)
return 0;
- return FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE);
+ return PEEK32(POWER_MODE_CTRL) & POWER_MODE_CTRL_MODE_MASK;
}
@@ -33,25 +33,22 @@ void setPowerMode(unsigned int powerMode)
{
unsigned int control_value = 0;
- control_value = PEEK32(POWER_MODE_CTRL);
+ control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
if (getChipType() == SM750LE)
return;
switch (powerMode) {
case POWER_MODE_CTRL_MODE_MODE0:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
- MODE0);
+ control_value |= POWER_MODE_CTRL_MODE_MODE0;
break;
case POWER_MODE_CTRL_MODE_MODE1:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
- MODE1);
+ control_value |= POWER_MODE_CTRL_MODE_MODE1;
break;
case POWER_MODE_CTRL_MODE_SLEEP:
- control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
- SLEEP);
+ control_value |= POWER_MODE_CTRL_MODE_SLEEP;
break;
default:
@@ -60,17 +57,15 @@ void setPowerMode(unsigned int powerMode)
/* Set up other fields in Power Control Register */
if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
- control_value =
+ control_value &= ~POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, OFF) |
+ control_value &= ~POWER_MODE_CTRL_336CLK;
#endif
- FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, OFF);
} else {
- control_value =
+ control_value |= POWER_MODE_CTRL_OSC_INPUT;
#ifdef VALIDATION_CHIP
- FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, ON) |
+ control_value |= POWER_MODE_CTRL_336CLK;
#endif
- FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT, ON);
}
/* Program new power mode. */
@@ -111,13 +106,10 @@ void enable2DEngine(unsigned int enable)
u32 gate;
gate = PEEK32(CURRENT_GATE);
- if (enable) {
- gate = FIELD_SET(gate, CURRENT_GATE, DE, ON);
- gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
- } else {
- gate = FIELD_SET(gate, CURRENT_GATE, DE, OFF);
- gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
- }
+ if (enable)
+ gate |= (CURRENT_GATE_DE | CURRENT_GATE_CSC);
+ else
+ gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
setCurrentGate(gate);
}
@@ -129,9 +121,9 @@ void enableDMA(unsigned int enable)
/* Enable DMA Gate */
gate = PEEK32(CURRENT_GATE);
if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
+ gate |= CURRENT_GATE_DMA;
else
- gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
+ gate &= ~CURRENT_GATE_DMA;
setCurrentGate(gate);
}
@@ -146,9 +138,9 @@ void enableGPIO(unsigned int enable)
/* Enable GPIO Gate */
gate = PEEK32(CURRENT_GATE);
if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+ gate |= CURRENT_GATE_GPIO;
else
- gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
+ gate &= ~CURRENT_GATE_GPIO;
setCurrentGate(gate);
}
@@ -163,9 +155,9 @@ void enableI2C(unsigned int enable)
/* Enable I2C Gate */
gate = PEEK32(CURRENT_GATE);
if (enable)
- gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
+ gate |= CURRENT_GATE_I2C;
else
- gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
+ gate &= ~CURRENT_GATE_I2C;
setCurrentGate(gate);
}
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 6e804d990cff..5963691f9a68 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -9,13 +9,10 @@ typedef enum _DPMS_t {
}
DPMS_t;
-#define setDAC(off) \
- { \
- POKE32(MISC_CTRL, FIELD_VALUE(PEEK32(MISC_CTRL), \
- MISC_CTRL, \
- DAC_POWER, \
- off)); \
- }
+#define setDAC(off) { \
+ POKE32(MISC_CTRL, \
+ (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
+}
void ddk750_setDPMS(DPMS_t);
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 16a01c25442c..955247979aaa 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -3,1865 +3,1149 @@
/* New register for SM750LE */
#define DE_STATE1 0x100054
-#define DE_STATE1_DE_ABORT 0:0
-#define DE_STATE1_DE_ABORT_OFF 0
-#define DE_STATE1_DE_ABORT_ON 1
+#define DE_STATE1_DE_ABORT BIT(0)
#define DE_STATE2 0x100058
-#define DE_STATE2_DE_FIFO 3:3
-#define DE_STATE2_DE_FIFO_NOTEMPTY 0
-#define DE_STATE2_DE_FIFO_EMPTY 1
-#define DE_STATE2_DE_STATUS 2:2
-#define DE_STATE2_DE_STATUS_IDLE 0
-#define DE_STATE2_DE_STATUS_BUSY 1
-#define DE_STATE2_DE_MEM_FIFO 1:1
-#define DE_STATE2_DE_MEM_FIFO_NOTEMPTY 0
-#define DE_STATE2_DE_MEM_FIFO_EMPTY 1
-#define DE_STATE2_DE_RESERVED 0:0
-
-
+#define DE_STATE2_DE_FIFO_EMPTY BIT(3)
+#define DE_STATE2_DE_STATUS_BUSY BIT(2)
+#define DE_STATE2_DE_MEM_FIFO_EMPTY BIT(1)
#define SYSTEM_CTRL 0x000000
-#define SYSTEM_CTRL_DPMS 31:30
-#define SYSTEM_CTRL_DPMS_VPHP 0
-#define SYSTEM_CTRL_DPMS_VPHN 1
-#define SYSTEM_CTRL_DPMS_VNHP 2
-#define SYSTEM_CTRL_DPMS_VNHN 3
-#define SYSTEM_CTRL_PCI_BURST 29:29
-#define SYSTEM_CTRL_PCI_BURST_OFF 0
-#define SYSTEM_CTRL_PCI_BURST_ON 1
-#define SYSTEM_CTRL_PCI_MASTER 25:25
-#define SYSTEM_CTRL_PCI_MASTER_OFF 0
-#define SYSTEM_CTRL_PCI_MASTER_ON 1
-#define SYSTEM_CTRL_LATENCY_TIMER 24:24
-#define SYSTEM_CTRL_LATENCY_TIMER_ON 0
-#define SYSTEM_CTRL_LATENCY_TIMER_OFF 1
-#define SYSTEM_CTRL_DE_FIFO 23:23
-#define SYSTEM_CTRL_DE_FIFO_NOTEMPTY 0
-#define SYSTEM_CTRL_DE_FIFO_EMPTY 1
-#define SYSTEM_CTRL_DE_STATUS 22:22
-#define SYSTEM_CTRL_DE_STATUS_IDLE 0
-#define SYSTEM_CTRL_DE_STATUS_BUSY 1
-#define SYSTEM_CTRL_DE_MEM_FIFO 21:21
-#define SYSTEM_CTRL_DE_MEM_FIFO_NOTEMPTY 0
-#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY 1
-#define SYSTEM_CTRL_CSC_STATUS 20:20
-#define SYSTEM_CTRL_CSC_STATUS_IDLE 0
-#define SYSTEM_CTRL_CSC_STATUS_BUSY 1
-#define SYSTEM_CTRL_CRT_VSYNC 19:19
-#define SYSTEM_CTRL_CRT_VSYNC_INACTIVE 0
-#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE 1
-#define SYSTEM_CTRL_PANEL_VSYNC 18:18
-#define SYSTEM_CTRL_PANEL_VSYNC_INACTIVE 0
-#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE 1
-#define SYSTEM_CTRL_CURRENT_BUFFER 17:17
-#define SYSTEM_CTRL_CURRENT_BUFFER_NORMAL 0
-#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING 1
-#define SYSTEM_CTRL_DMA_STATUS 16:16
-#define SYSTEM_CTRL_DMA_STATUS_IDLE 0
-#define SYSTEM_CTRL_DMA_STATUS_BUSY 1
-#define SYSTEM_CTRL_PCI_BURST_READ 15:15
-#define SYSTEM_CTRL_PCI_BURST_READ_OFF 0
-#define SYSTEM_CTRL_PCI_BURST_READ_ON 1
-#define SYSTEM_CTRL_DE_ABORT 13:13
-#define SYSTEM_CTRL_DE_ABORT_OFF 0
-#define SYSTEM_CTRL_DE_ABORT_ON 1
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK 11:11
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_OFF 0
-#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK_ON 1
-#define SYSTEM_CTRL_PCI_RETRY 7:7
-#define SYSTEM_CTRL_PCI_RETRY_ON 0
-#define SYSTEM_CTRL_PCI_RETRY_OFF 1
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE 5:4
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1 0
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2 1
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4 2
-#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8 3
-#define SYSTEM_CTRL_CRT_TRISTATE 3:3
-#define SYSTEM_CTRL_CRT_TRISTATE_OFF 0
-#define SYSTEM_CTRL_CRT_TRISTATE_ON 1
-#define SYSTEM_CTRL_PCIMEM_TRISTATE 2:2
-#define SYSTEM_CTRL_PCIMEM_TRISTATE_OFF 0
-#define SYSTEM_CTRL_PCIMEM_TRISTATE_ON 1
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE 1:1
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE_OFF 0
-#define SYSTEM_CTRL_LOCALMEM_TRISTATE_ON 1
-#define SYSTEM_CTRL_PANEL_TRISTATE 0:0
-#define SYSTEM_CTRL_PANEL_TRISTATE_OFF 0
-#define SYSTEM_CTRL_PANEL_TRISTATE_ON 1
+#define SYSTEM_CTRL_DPMS_MASK (0x3 << 30)
+#define SYSTEM_CTRL_DPMS_VPHP (0x0 << 30)
+#define SYSTEM_CTRL_DPMS_VPHN (0x1 << 30)
+#define SYSTEM_CTRL_DPMS_VNHP (0x2 << 30)
+#define SYSTEM_CTRL_DPMS_VNHN (0x3 << 30)
+#define SYSTEM_CTRL_PCI_BURST BIT(29)
+#define SYSTEM_CTRL_PCI_MASTER BIT(25)
+#define SYSTEM_CTRL_LATENCY_TIMER_OFF BIT(24)
+#define SYSTEM_CTRL_DE_FIFO_EMPTY BIT(23)
+#define SYSTEM_CTRL_DE_STATUS_BUSY BIT(22)
+#define SYSTEM_CTRL_DE_MEM_FIFO_EMPTY BIT(21)
+#define SYSTEM_CTRL_CSC_STATUS_BUSY BIT(20)
+#define SYSTEM_CTRL_CRT_VSYNC_ACTIVE BIT(19)
+#define SYSTEM_CTRL_PANEL_VSYNC_ACTIVE BIT(18)
+#define SYSTEM_CTRL_CURRENT_BUFFER_FLIP_PENDING BIT(17)
+#define SYSTEM_CTRL_DMA_STATUS_BUSY BIT(16)
+#define SYSTEM_CTRL_PCI_BURST_READ BIT(15)
+#define SYSTEM_CTRL_DE_ABORT BIT(13)
+#define SYSTEM_CTRL_PCI_SUBSYS_ID_LOCK BIT(11)
+#define SYSTEM_CTRL_PCI_RETRY_OFF BIT(7)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_MASK (0x3 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_1 (0x0 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_2 (0x1 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_4 (0x2 << 4)
+#define SYSTEM_CTRL_PCI_SLAVE_BURST_READ_SIZE_8 (0x3 << 4)
+#define SYSTEM_CTRL_CRT_TRISTATE BIT(3)
+#define SYSTEM_CTRL_PCIMEM_TRISTATE BIT(2)
+#define SYSTEM_CTRL_LOCALMEM_TRISTATE BIT(1)
+#define SYSTEM_CTRL_PANEL_TRISTATE BIT(0)
#define MISC_CTRL 0x000004
-#define MISC_CTRL_DRAM_RERESH_COUNT 27:27
-#define MISC_CTRL_DRAM_RERESH_COUNT_1ROW 0
-#define MISC_CTRL_DRAM_RERESH_COUNT_3ROW 1
-#define MISC_CTRL_DRAM_REFRESH_TIME 26:25
-#define MISC_CTRL_DRAM_REFRESH_TIME_8 0
-#define MISC_CTRL_DRAM_REFRESH_TIME_16 1
-#define MISC_CTRL_DRAM_REFRESH_TIME_32 2
-#define MISC_CTRL_DRAM_REFRESH_TIME_64 3
-#define MISC_CTRL_INT_OUTPUT 24:24
-#define MISC_CTRL_INT_OUTPUT_NORMAL 0
-#define MISC_CTRL_INT_OUTPUT_INVERT 1
-#define MISC_CTRL_PLL_CLK_COUNT 23:23
-#define MISC_CTRL_PLL_CLK_COUNT_OFF 0
-#define MISC_CTRL_PLL_CLK_COUNT_ON 1
-#define MISC_CTRL_DAC_POWER 20:20
-#define MISC_CTRL_DAC_POWER_ON 0
-#define MISC_CTRL_DAC_POWER_OFF 1
-#define MISC_CTRL_CLK_SELECT 16:16
-#define MISC_CTRL_CLK_SELECT_OSC 0
-#define MISC_CTRL_CLK_SELECT_TESTCLK 1
-#define MISC_CTRL_DRAM_COLUMN_SIZE 15:14
-#define MISC_CTRL_DRAM_COLUMN_SIZE_256 0
-#define MISC_CTRL_DRAM_COLUMN_SIZE_512 1
-#define MISC_CTRL_DRAM_COLUMN_SIZE_1024 2
-#define MISC_CTRL_LOCALMEM_SIZE 13:12
-#define MISC_CTRL_LOCALMEM_SIZE_8M 3
-#define MISC_CTRL_LOCALMEM_SIZE_16M 0
-#define MISC_CTRL_LOCALMEM_SIZE_32M 1
-#define MISC_CTRL_LOCALMEM_SIZE_64M 2
-#define MISC_CTRL_DRAM_TWTR 11:11
-#define MISC_CTRL_DRAM_TWTR_2CLK 0
-#define MISC_CTRL_DRAM_TWTR_1CLK 1
-#define MISC_CTRL_DRAM_TWR 10:10
-#define MISC_CTRL_DRAM_TWR_3CLK 0
-#define MISC_CTRL_DRAM_TWR_2CLK 1
-#define MISC_CTRL_DRAM_TRP 9:9
-#define MISC_CTRL_DRAM_TRP_3CLK 0
-#define MISC_CTRL_DRAM_TRP_4CLK 1
-#define MISC_CTRL_DRAM_TRFC 8:8
-#define MISC_CTRL_DRAM_TRFC_12CLK 0
-#define MISC_CTRL_DRAM_TRFC_14CLK 1
-#define MISC_CTRL_DRAM_TRAS 7:7
-#define MISC_CTRL_DRAM_TRAS_7CLK 0
-#define MISC_CTRL_DRAM_TRAS_8CLK 1
-#define MISC_CTRL_LOCALMEM_RESET 6:6
-#define MISC_CTRL_LOCALMEM_RESET_RESET 0
-#define MISC_CTRL_LOCALMEM_RESET_NORMAL 1
-#define MISC_CTRL_LOCALMEM_STATE 5:5
-#define MISC_CTRL_LOCALMEM_STATE_ACTIVE 0
-#define MISC_CTRL_LOCALMEM_STATE_INACTIVE 1
-#define MISC_CTRL_CPU_CAS_LATENCY 4:4
-#define MISC_CTRL_CPU_CAS_LATENCY_2CLK 0
-#define MISC_CTRL_CPU_CAS_LATENCY_3CLK 1
-#define MISC_CTRL_DLL 3:3
-#define MISC_CTRL_DLL_ON 0
-#define MISC_CTRL_DLL_OFF 1
-#define MISC_CTRL_DRAM_OUTPUT 2:2
-#define MISC_CTRL_DRAM_OUTPUT_LOW 0
-#define MISC_CTRL_DRAM_OUTPUT_HIGH 1
-#define MISC_CTRL_LOCALMEM_BUS_SIZE 1:1
-#define MISC_CTRL_LOCALMEM_BUS_SIZE_32 0
-#define MISC_CTRL_LOCALMEM_BUS_SIZE_64 1
-#define MISC_CTRL_EMBEDDED_LOCALMEM 0:0
-#define MISC_CTRL_EMBEDDED_LOCALMEM_ON 0
-#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF 1
+#define MISC_CTRL_DRAM_RERESH_COUNT BIT(27)
+#define MISC_CTRL_DRAM_REFRESH_TIME_MASK (0x3 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_8 (0x0 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_16 (0x1 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_32 (0x2 << 25)
+#define MISC_CTRL_DRAM_REFRESH_TIME_64 (0x3 << 25)
+#define MISC_CTRL_INT_OUTPUT_INVERT BIT(24)
+#define MISC_CTRL_PLL_CLK_COUNT BIT(23)
+#define MISC_CTRL_DAC_POWER_OFF BIT(20)
+#define MISC_CTRL_CLK_SELECT_TESTCLK BIT(16)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_MASK (0x3 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_256 (0x0 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_512 (0x1 << 14)
+#define MISC_CTRL_DRAM_COLUMN_SIZE_1024 (0x2 << 14)
+#define MISC_CTRL_LOCALMEM_SIZE_MASK (0x3 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_8M (0x3 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_16M (0x0 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_32M (0x1 << 12)
+#define MISC_CTRL_LOCALMEM_SIZE_64M (0x2 << 12)
+#define MISC_CTRL_DRAM_TWTR BIT(11)
+#define MISC_CTRL_DRAM_TWR BIT(10)
+#define MISC_CTRL_DRAM_TRP BIT(9)
+#define MISC_CTRL_DRAM_TRFC BIT(8)
+#define MISC_CTRL_DRAM_TRAS BIT(7)
+#define MISC_CTRL_LOCALMEM_RESET BIT(6)
+#define MISC_CTRL_LOCALMEM_STATE_INACTIVE BIT(5)
+#define MISC_CTRL_CPU_CAS_LATENCY BIT(4)
+#define MISC_CTRL_DLL_OFF BIT(3)
+#define MISC_CTRL_DRAM_OUTPUT_HIGH BIT(2)
+#define MISC_CTRL_LOCALMEM_BUS_SIZE BIT(1)
+#define MISC_CTRL_EMBEDDED_LOCALMEM_OFF BIT(0)
#define GPIO_MUX 0x000008
-#define GPIO_MUX_31 31:31
-#define GPIO_MUX_31_GPIO 0
-#define GPIO_MUX_31_I2C 1
-#define GPIO_MUX_30 30:30
-#define GPIO_MUX_30_GPIO 0
-#define GPIO_MUX_30_I2C 1
-#define GPIO_MUX_29 29:29
-#define GPIO_MUX_29_GPIO 0
-#define GPIO_MUX_29_SSP1 1
-#define GPIO_MUX_28 28:28
-#define GPIO_MUX_28_GPIO 0
-#define GPIO_MUX_28_SSP1 1
-#define GPIO_MUX_27 27:27
-#define GPIO_MUX_27_GPIO 0
-#define GPIO_MUX_27_SSP1 1
-#define GPIO_MUX_26 26:26
-#define GPIO_MUX_26_GPIO 0
-#define GPIO_MUX_26_SSP1 1
-#define GPIO_MUX_25 25:25
-#define GPIO_MUX_25_GPIO 0
-#define GPIO_MUX_25_SSP1 1
-#define GPIO_MUX_24 24:24
-#define GPIO_MUX_24_GPIO 0
-#define GPIO_MUX_24_SSP0 1
-#define GPIO_MUX_23 23:23
-#define GPIO_MUX_23_GPIO 0
-#define GPIO_MUX_23_SSP0 1
-#define GPIO_MUX_22 22:22
-#define GPIO_MUX_22_GPIO 0
-#define GPIO_MUX_22_SSP0 1
-#define GPIO_MUX_21 21:21
-#define GPIO_MUX_21_GPIO 0
-#define GPIO_MUX_21_SSP0 1
-#define GPIO_MUX_20 20:20
-#define GPIO_MUX_20_GPIO 0
-#define GPIO_MUX_20_SSP0 1
-#define GPIO_MUX_19 19:19
-#define GPIO_MUX_19_GPIO 0
-#define GPIO_MUX_19_PWM 1
-#define GPIO_MUX_18 18:18
-#define GPIO_MUX_18_GPIO 0
-#define GPIO_MUX_18_PWM 1
-#define GPIO_MUX_17 17:17
-#define GPIO_MUX_17_GPIO 0
-#define GPIO_MUX_17_PWM 1
-#define GPIO_MUX_16 16:16
-#define GPIO_MUX_16_GPIO_ZVPORT 0
-#define GPIO_MUX_16_TEST_DATA 1
-#define GPIO_MUX_15 15:15
-#define GPIO_MUX_15_GPIO_ZVPORT 0
-#define GPIO_MUX_15_TEST_DATA 1
-#define GPIO_MUX_14 14:14
-#define GPIO_MUX_14_GPIO_ZVPORT 0
-#define GPIO_MUX_14_TEST_DATA 1
-#define GPIO_MUX_13 13:13
-#define GPIO_MUX_13_GPIO_ZVPORT 0
-#define GPIO_MUX_13_TEST_DATA 1
-#define GPIO_MUX_12 12:12
-#define GPIO_MUX_12_GPIO_ZVPORT 0
-#define GPIO_MUX_12_TEST_DATA 1
-#define GPIO_MUX_11 11:11
-#define GPIO_MUX_11_GPIO_ZVPORT 0
-#define GPIO_MUX_11_TEST_DATA 1
-#define GPIO_MUX_10 10:10
-#define GPIO_MUX_10_GPIO_ZVPORT 0
-#define GPIO_MUX_10_TEST_DATA 1
-#define GPIO_MUX_9 9:9
-#define GPIO_MUX_9_GPIO_ZVPORT 0
-#define GPIO_MUX_9_TEST_DATA 1
-#define GPIO_MUX_8 8:8
-#define GPIO_MUX_8_GPIO_ZVPORT 0
-#define GPIO_MUX_8_TEST_DATA 1
-#define GPIO_MUX_7 7:7
-#define GPIO_MUX_7_GPIO_ZVPORT 0
-#define GPIO_MUX_7_TEST_DATA 1
-#define GPIO_MUX_6 6:6
-#define GPIO_MUX_6_GPIO_ZVPORT 0
-#define GPIO_MUX_6_TEST_DATA 1
-#define GPIO_MUX_5 5:5
-#define GPIO_MUX_5_GPIO_ZVPORT 0
-#define GPIO_MUX_5_TEST_DATA 1
-#define GPIO_MUX_4 4:4
-#define GPIO_MUX_4_GPIO_ZVPORT 0
-#define GPIO_MUX_4_TEST_DATA 1
-#define GPIO_MUX_3 3:3
-#define GPIO_MUX_3_GPIO_ZVPORT 0
-#define GPIO_MUX_3_TEST_DATA 1
-#define GPIO_MUX_2 2:2
-#define GPIO_MUX_2_GPIO_ZVPORT 0
-#define GPIO_MUX_2_TEST_DATA 1
-#define GPIO_MUX_1 1:1
-#define GPIO_MUX_1_GPIO_ZVPORT 0
-#define GPIO_MUX_1_TEST_DATA 1
-#define GPIO_MUX_0 0:0
-#define GPIO_MUX_0_GPIO_ZVPORT 0
-#define GPIO_MUX_0_TEST_DATA 1
+#define GPIO_MUX_31 BIT(31)
+#define GPIO_MUX_30 BIT(30)
+#define GPIO_MUX_29 BIT(29)
+#define GPIO_MUX_28 BIT(28)
+#define GPIO_MUX_27 BIT(27)
+#define GPIO_MUX_26 BIT(26)
+#define GPIO_MUX_25 BIT(25)
+#define GPIO_MUX_24 BIT(24)
+#define GPIO_MUX_23 BIT(23)
+#define GPIO_MUX_22 BIT(22)
+#define GPIO_MUX_21 BIT(21)
+#define GPIO_MUX_20 BIT(20)
+#define GPIO_MUX_19 BIT(19)
+#define GPIO_MUX_18 BIT(18)
+#define GPIO_MUX_17 BIT(17)
+#define GPIO_MUX_16 BIT(16)
+#define GPIO_MUX_15 BIT(15)
+#define GPIO_MUX_14 BIT(14)
+#define GPIO_MUX_13 BIT(13)
+#define GPIO_MUX_12 BIT(12)
+#define GPIO_MUX_11 BIT(11)
+#define GPIO_MUX_10 BIT(10)
+#define GPIO_MUX_9 BIT(9)
+#define GPIO_MUX_8 BIT(8)
+#define GPIO_MUX_7 BIT(7)
+#define GPIO_MUX_6 BIT(6)
+#define GPIO_MUX_5 BIT(5)
+#define GPIO_MUX_4 BIT(4)
+#define GPIO_MUX_3 BIT(3)
+#define GPIO_MUX_2 BIT(2)
+#define GPIO_MUX_1 BIT(1)
+#define GPIO_MUX_0 BIT(0)
#define LOCALMEM_ARBITRATION 0x00000C
-#define LOCALMEM_ARBITRATION_ROTATE 28:28
-#define LOCALMEM_ARBITRATION_ROTATE_OFF 0
-#define LOCALMEM_ARBITRATION_ROTATE_ON 1
-#define LOCALMEM_ARBITRATION_VGA 26:24
-#define LOCALMEM_ARBITRATION_VGA_OFF 0
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_DMA 22:20
-#define LOCALMEM_ARBITRATION_DMA_OFF 0
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_ZVPORT1 18:16
-#define LOCALMEM_ARBITRATION_ZVPORT1_OFF 0
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_ZVPORT0 14:12
-#define LOCALMEM_ARBITRATION_ZVPORT0_OFF 0
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_VIDEO 10:8
-#define LOCALMEM_ARBITRATION_VIDEO_OFF 0
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_PANEL 6:4
-#define LOCALMEM_ARBITRATION_PANEL_OFF 0
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7 7
-#define LOCALMEM_ARBITRATION_CRT 2:0
-#define LOCALMEM_ARBITRATION_CRT_OFF 0
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1 1
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2 2
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3 3
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4 4
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5 5
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6 6
-#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7 7
+#define LOCALMEM_ARBITRATION_ROTATE BIT(28)
+#define LOCALMEM_ARBITRATION_VGA_MASK (0x7 << 24)
+#define LOCALMEM_ARBITRATION_VGA_OFF (0x0 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_1 (0x1 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_2 (0x2 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_3 (0x3 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_4 (0x4 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_5 (0x5 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_6 (0x6 << 24)
+#define LOCALMEM_ARBITRATION_VGA_PRIORITY_7 (0x7 << 24)
+#define LOCALMEM_ARBITRATION_DMA_MASK (0x7 << 20)
+#define LOCALMEM_ARBITRATION_DMA_OFF (0x0 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_1 (0x1 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_2 (0x2 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_3 (0x3 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_4 (0x4 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_5 (0x5 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_6 (0x6 << 20)
+#define LOCALMEM_ARBITRATION_DMA_PRIORITY_7 (0x7 << 20)
+#define LOCALMEM_ARBITRATION_ZVPORT1_MASK (0x7 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_OFF (0x0 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_1 (0x1 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_2 (0x2 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_3 (0x3 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_4 (0x4 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_5 (0x5 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_6 (0x6 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT1_PRIORITY_7 (0x7 << 16)
+#define LOCALMEM_ARBITRATION_ZVPORT0_MASK (0x7 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_OFF (0x0 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_1 (0x1 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_2 (0x2 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_3 (0x3 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_4 (0x4 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_5 (0x5 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_6 (0x6 << 12)
+#define LOCALMEM_ARBITRATION_ZVPORT0_PRIORITY_7 (0x7 << 12)
+#define LOCALMEM_ARBITRATION_VIDEO_MASK (0x7 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_OFF (0x0 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_1 (0x1 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_2 (0x2 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_3 (0x3 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_4 (0x4 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_5 (0x5 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_6 (0x6 << 8)
+#define LOCALMEM_ARBITRATION_VIDEO_PRIORITY_7 (0x7 << 8)
+#define LOCALMEM_ARBITRATION_PANEL_MASK (0x7 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_OFF (0x0 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_1 (0x1 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_2 (0x2 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_3 (0x3 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_4 (0x4 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_5 (0x5 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_6 (0x6 << 4)
+#define LOCALMEM_ARBITRATION_PANEL_PRIORITY_7 (0x7 << 4)
+#define LOCALMEM_ARBITRATION_CRT_MASK 0x7
+#define LOCALMEM_ARBITRATION_CRT_OFF 0x0
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_1 0x1
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_2 0x2
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_3 0x3
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_4 0x4
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_5 0x5
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_6 0x6
+#define LOCALMEM_ARBITRATION_CRT_PRIORITY_7 0x7
#define PCIMEM_ARBITRATION 0x000010
-#define PCIMEM_ARBITRATION_ROTATE 28:28
-#define PCIMEM_ARBITRATION_ROTATE_OFF 0
-#define PCIMEM_ARBITRATION_ROTATE_ON 1
-#define PCIMEM_ARBITRATION_VGA 26:24
-#define PCIMEM_ARBITRATION_VGA_OFF 0
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_VGA_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_DMA 22:20
-#define PCIMEM_ARBITRATION_DMA_OFF 0
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_DMA_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_ZVPORT1 18:16
-#define PCIMEM_ARBITRATION_ZVPORT1_OFF 0
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_ZVPORT0 14:12
-#define PCIMEM_ARBITRATION_ZVPORT0_OFF 0
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_VIDEO 10:8
-#define PCIMEM_ARBITRATION_VIDEO_OFF 0
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_PANEL 6:4
-#define PCIMEM_ARBITRATION_PANEL_OFF 0
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7 7
-#define PCIMEM_ARBITRATION_CRT 2:0
-#define PCIMEM_ARBITRATION_CRT_OFF 0
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_1 1
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_2 2
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_3 3
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_4 4
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_5 5
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_6 6
-#define PCIMEM_ARBITRATION_CRT_PRIORITY_7 7
+#define PCIMEM_ARBITRATION_ROTATE BIT(28)
+#define PCIMEM_ARBITRATION_VGA_MASK (0x7 << 24)
+#define PCIMEM_ARBITRATION_VGA_OFF (0x0 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_1 (0x1 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_2 (0x2 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_3 (0x3 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_4 (0x4 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_5 (0x5 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_6 (0x6 << 24)
+#define PCIMEM_ARBITRATION_VGA_PRIORITY_7 (0x7 << 24)
+#define PCIMEM_ARBITRATION_DMA_MASK (0x7 << 20)
+#define PCIMEM_ARBITRATION_DMA_OFF (0x0 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_1 (0x1 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_2 (0x2 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_3 (0x3 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_4 (0x4 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_5 (0x5 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_6 (0x6 << 20)
+#define PCIMEM_ARBITRATION_DMA_PRIORITY_7 (0x7 << 20)
+#define PCIMEM_ARBITRATION_ZVPORT1_MASK (0x7 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_OFF (0x0 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_1 (0x1 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_2 (0x2 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_3 (0x3 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_4 (0x4 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_5 (0x5 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_6 (0x6 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT1_PRIORITY_7 (0x7 << 16)
+#define PCIMEM_ARBITRATION_ZVPORT0_MASK (0x7 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_OFF (0x0 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_1 (0x1 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_2 (0x2 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_3 (0x3 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_4 (0x4 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_5 (0x5 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_6 (0x6 << 12)
+#define PCIMEM_ARBITRATION_ZVPORT0_PRIORITY_7 (0x7 << 12)
+#define PCIMEM_ARBITRATION_VIDEO_MASK (0x7 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_OFF (0x0 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_1 (0x1 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_2 (0x2 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_3 (0x3 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_4 (0x4 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_5 (0x5 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_6 (0x6 << 8)
+#define PCIMEM_ARBITRATION_VIDEO_PRIORITY_7 (0x7 << 8)
+#define PCIMEM_ARBITRATION_PANEL_MASK (0x7 << 4)
+#define PCIMEM_ARBITRATION_PANEL_OFF (0x0 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_1 (0x1 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_2 (0x2 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_3 (0x3 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_4 (0x4 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_5 (0x5 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_6 (0x6 << 4)
+#define PCIMEM_ARBITRATION_PANEL_PRIORITY_7 (0x7 << 4)
+#define PCIMEM_ARBITRATION_CRT_MASK 0x7
+#define PCIMEM_ARBITRATION_CRT_OFF 0x0
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_1 0x1
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_2 0x2
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_3 0x3
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_4 0x4
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_5 0x5
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_6 0x6
+#define PCIMEM_ARBITRATION_CRT_PRIORITY_7 0x7
#define RAW_INT 0x000020
-#define RAW_INT_ZVPORT1_VSYNC 4:4
-#define RAW_INT_ZVPORT1_VSYNC_INACTIVE 0
-#define RAW_INT_ZVPORT1_VSYNC_ACTIVE 1
-#define RAW_INT_ZVPORT1_VSYNC_CLEAR 1
-#define RAW_INT_ZVPORT0_VSYNC 3:3
-#define RAW_INT_ZVPORT0_VSYNC_INACTIVE 0
-#define RAW_INT_ZVPORT0_VSYNC_ACTIVE 1
-#define RAW_INT_ZVPORT0_VSYNC_CLEAR 1
-#define RAW_INT_CRT_VSYNC 2:2
-#define RAW_INT_CRT_VSYNC_INACTIVE 0
-#define RAW_INT_CRT_VSYNC_ACTIVE 1
-#define RAW_INT_CRT_VSYNC_CLEAR 1
-#define RAW_INT_PANEL_VSYNC 1:1
-#define RAW_INT_PANEL_VSYNC_INACTIVE 0
-#define RAW_INT_PANEL_VSYNC_ACTIVE 1
-#define RAW_INT_PANEL_VSYNC_CLEAR 1
-#define RAW_INT_VGA_VSYNC 0:0
-#define RAW_INT_VGA_VSYNC_INACTIVE 0
-#define RAW_INT_VGA_VSYNC_ACTIVE 1
-#define RAW_INT_VGA_VSYNC_CLEAR 1
+#define RAW_INT_ZVPORT1_VSYNC BIT(4)
+#define RAW_INT_ZVPORT0_VSYNC BIT(3)
+#define RAW_INT_CRT_VSYNC BIT(2)
+#define RAW_INT_PANEL_VSYNC BIT(1)
+#define RAW_INT_VGA_VSYNC BIT(0)
#define INT_STATUS 0x000024
-#define INT_STATUS_GPIO31 31:31
-#define INT_STATUS_GPIO31_INACTIVE 0
-#define INT_STATUS_GPIO31_ACTIVE 1
-#define INT_STATUS_GPIO30 30:30
-#define INT_STATUS_GPIO30_INACTIVE 0
-#define INT_STATUS_GPIO30_ACTIVE 1
-#define INT_STATUS_GPIO29 29:29
-#define INT_STATUS_GPIO29_INACTIVE 0
-#define INT_STATUS_GPIO29_ACTIVE 1
-#define INT_STATUS_GPIO28 28:28
-#define INT_STATUS_GPIO28_INACTIVE 0
-#define INT_STATUS_GPIO28_ACTIVE 1
-#define INT_STATUS_GPIO27 27:27
-#define INT_STATUS_GPIO27_INACTIVE 0
-#define INT_STATUS_GPIO27_ACTIVE 1
-#define INT_STATUS_GPIO26 26:26
-#define INT_STATUS_GPIO26_INACTIVE 0
-#define INT_STATUS_GPIO26_ACTIVE 1
-#define INT_STATUS_GPIO25 25:25
-#define INT_STATUS_GPIO25_INACTIVE 0
-#define INT_STATUS_GPIO25_ACTIVE 1
-#define INT_STATUS_I2C 12:12
-#define INT_STATUS_I2C_INACTIVE 0
-#define INT_STATUS_I2C_ACTIVE 1
-#define INT_STATUS_PWM 11:11
-#define INT_STATUS_PWM_INACTIVE 0
-#define INT_STATUS_PWM_ACTIVE 1
-#define INT_STATUS_DMA1 10:10
-#define INT_STATUS_DMA1_INACTIVE 0
-#define INT_STATUS_DMA1_ACTIVE 1
-#define INT_STATUS_DMA0 9:9
-#define INT_STATUS_DMA0_INACTIVE 0
-#define INT_STATUS_DMA0_ACTIVE 1
-#define INT_STATUS_PCI 8:8
-#define INT_STATUS_PCI_INACTIVE 0
-#define INT_STATUS_PCI_ACTIVE 1
-#define INT_STATUS_SSP1 7:7
-#define INT_STATUS_SSP1_INACTIVE 0
-#define INT_STATUS_SSP1_ACTIVE 1
-#define INT_STATUS_SSP0 6:6
-#define INT_STATUS_SSP0_INACTIVE 0
-#define INT_STATUS_SSP0_ACTIVE 1
-#define INT_STATUS_DE 5:5
-#define INT_STATUS_DE_INACTIVE 0
-#define INT_STATUS_DE_ACTIVE 1
-#define INT_STATUS_ZVPORT1_VSYNC 4:4
-#define INT_STATUS_ZVPORT1_VSYNC_INACTIVE 0
-#define INT_STATUS_ZVPORT1_VSYNC_ACTIVE 1
-#define INT_STATUS_ZVPORT0_VSYNC 3:3
-#define INT_STATUS_ZVPORT0_VSYNC_INACTIVE 0
-#define INT_STATUS_ZVPORT0_VSYNC_ACTIVE 1
-#define INT_STATUS_CRT_VSYNC 2:2
-#define INT_STATUS_CRT_VSYNC_INACTIVE 0
-#define INT_STATUS_CRT_VSYNC_ACTIVE 1
-#define INT_STATUS_PANEL_VSYNC 1:1
-#define INT_STATUS_PANEL_VSYNC_INACTIVE 0
-#define INT_STATUS_PANEL_VSYNC_ACTIVE 1
-#define INT_STATUS_VGA_VSYNC 0:0
-#define INT_STATUS_VGA_VSYNC_INACTIVE 0
-#define INT_STATUS_VGA_VSYNC_ACTIVE 1
+#define INT_STATUS_GPIO31 BIT(31)
+#define INT_STATUS_GPIO30 BIT(30)
+#define INT_STATUS_GPIO29 BIT(29)
+#define INT_STATUS_GPIO28 BIT(28)
+#define INT_STATUS_GPIO27 BIT(27)
+#define INT_STATUS_GPIO26 BIT(26)
+#define INT_STATUS_GPIO25 BIT(25)
+#define INT_STATUS_I2C BIT(12)
+#define INT_STATUS_PWM BIT(11)
+#define INT_STATUS_DMA1 BIT(10)
+#define INT_STATUS_DMA0 BIT(9)
+#define INT_STATUS_PCI BIT(8)
+#define INT_STATUS_SSP1 BIT(7)
+#define INT_STATUS_SSP0 BIT(6)
+#define INT_STATUS_DE BIT(5)
+#define INT_STATUS_ZVPORT1_VSYNC BIT(4)
+#define INT_STATUS_ZVPORT0_VSYNC BIT(3)
+#define INT_STATUS_CRT_VSYNC BIT(2)
+#define INT_STATUS_PANEL_VSYNC BIT(1)
+#define INT_STATUS_VGA_VSYNC BIT(0)
#define INT_MASK 0x000028
-#define INT_MASK_GPIO31 31:31
-#define INT_MASK_GPIO31_DISABLE 0
-#define INT_MASK_GPIO31_ENABLE 1
-#define INT_MASK_GPIO30 30:30
-#define INT_MASK_GPIO30_DISABLE 0
-#define INT_MASK_GPIO30_ENABLE 1
-#define INT_MASK_GPIO29 29:29
-#define INT_MASK_GPIO29_DISABLE 0
-#define INT_MASK_GPIO29_ENABLE 1
-#define INT_MASK_GPIO28 28:28
-#define INT_MASK_GPIO28_DISABLE 0
-#define INT_MASK_GPIO28_ENABLE 1
-#define INT_MASK_GPIO27 27:27
-#define INT_MASK_GPIO27_DISABLE 0
-#define INT_MASK_GPIO27_ENABLE 1
-#define INT_MASK_GPIO26 26:26
-#define INT_MASK_GPIO26_DISABLE 0
-#define INT_MASK_GPIO26_ENABLE 1
-#define INT_MASK_GPIO25 25:25
-#define INT_MASK_GPIO25_DISABLE 0
-#define INT_MASK_GPIO25_ENABLE 1
-#define INT_MASK_I2C 12:12
-#define INT_MASK_I2C_DISABLE 0
-#define INT_MASK_I2C_ENABLE 1
-#define INT_MASK_PWM 11:11
-#define INT_MASK_PWM_DISABLE 0
-#define INT_MASK_PWM_ENABLE 1
-#define INT_MASK_DMA1 10:10
-#define INT_MASK_DMA1_DISABLE 0
-#define INT_MASK_DMA1_ENABLE 1
-#define INT_MASK_DMA 9:9
-#define INT_MASK_DMA_DISABLE 0
-#define INT_MASK_DMA_ENABLE 1
-#define INT_MASK_PCI 8:8
-#define INT_MASK_PCI_DISABLE 0
-#define INT_MASK_PCI_ENABLE 1
-#define INT_MASK_SSP1 7:7
-#define INT_MASK_SSP1_DISABLE 0
-#define INT_MASK_SSP1_ENABLE 1
-#define INT_MASK_SSP0 6:6
-#define INT_MASK_SSP0_DISABLE 0
-#define INT_MASK_SSP0_ENABLE 1
-#define INT_MASK_DE 5:5
-#define INT_MASK_DE_DISABLE 0
-#define INT_MASK_DE_ENABLE 1
-#define INT_MASK_ZVPORT1_VSYNC 4:4
-#define INT_MASK_ZVPORT1_VSYNC_DISABLE 0
-#define INT_MASK_ZVPORT1_VSYNC_ENABLE 1
-#define INT_MASK_ZVPORT0_VSYNC 3:3
-#define INT_MASK_ZVPORT0_VSYNC_DISABLE 0
-#define INT_MASK_ZVPORT0_VSYNC_ENABLE 1
-#define INT_MASK_CRT_VSYNC 2:2
-#define INT_MASK_CRT_VSYNC_DISABLE 0
-#define INT_MASK_CRT_VSYNC_ENABLE 1
-#define INT_MASK_PANEL_VSYNC 1:1
-#define INT_MASK_PANEL_VSYNC_DISABLE 0
-#define INT_MASK_PANEL_VSYNC_ENABLE 1
-#define INT_MASK_VGA_VSYNC 0:0
-#define INT_MASK_VGA_VSYNC_DISABLE 0
-#define INT_MASK_VGA_VSYNC_ENABLE 1
+#define INT_MASK_GPIO31 BIT(31)
+#define INT_MASK_GPIO30 BIT(30)
+#define INT_MASK_GPIO29 BIT(29)
+#define INT_MASK_GPIO28 BIT(28)
+#define INT_MASK_GPIO27 BIT(27)
+#define INT_MASK_GPIO26 BIT(26)
+#define INT_MASK_GPIO25 BIT(25)
+#define INT_MASK_I2C BIT(12)
+#define INT_MASK_PWM BIT(11)
+#define INT_MASK_DMA1 BIT(10)
+#define INT_MASK_DMA BIT(9)
+#define INT_MASK_PCI BIT(8)
+#define INT_MASK_SSP1 BIT(7)
+#define INT_MASK_SSP0 BIT(6)
+#define INT_MASK_DE BIT(5)
+#define INT_MASK_ZVPORT1_VSYNC BIT(4)
+#define INT_MASK_ZVPORT0_VSYNC BIT(3)
+#define INT_MASK_CRT_VSYNC BIT(2)
+#define INT_MASK_PANEL_VSYNC BIT(1)
+#define INT_MASK_VGA_VSYNC BIT(0)
#define CURRENT_GATE 0x000040
-#define CURRENT_GATE_MCLK 15:14
+#define CURRENT_GATE_MCLK_MASK (0x3 << 14)
#ifdef VALIDATION_CHIP
- #define CURRENT_GATE_MCLK_112MHZ 0
- #define CURRENT_GATE_MCLK_84MHZ 1
- #define CURRENT_GATE_MCLK_56MHZ 2
- #define CURRENT_GATE_MCLK_42MHZ 3
+ #define CURRENT_GATE_MCLK_112MHZ (0x0 << 14)
+ #define CURRENT_GATE_MCLK_84MHZ (0x1 << 14)
+ #define CURRENT_GATE_MCLK_56MHZ (0x2 << 14)
+ #define CURRENT_GATE_MCLK_42MHZ (0x3 << 14)
#else
- #define CURRENT_GATE_MCLK_DIV_3 0
- #define CURRENT_GATE_MCLK_DIV_4 1
- #define CURRENT_GATE_MCLK_DIV_6 2
- #define CURRENT_GATE_MCLK_DIV_8 3
+ #define CURRENT_GATE_MCLK_DIV_3 (0x0 << 14)
+ #define CURRENT_GATE_MCLK_DIV_4 (0x1 << 14)
+ #define CURRENT_GATE_MCLK_DIV_6 (0x2 << 14)
+ #define CURRENT_GATE_MCLK_DIV_8 (0x3 << 14)
#endif
-#define CURRENT_GATE_M2XCLK 13:12
+#define CURRENT_GATE_M2XCLK_MASK (0x3 << 12)
#ifdef VALIDATION_CHIP
- #define CURRENT_GATE_M2XCLK_336MHZ 0
- #define CURRENT_GATE_M2XCLK_168MHZ 1
- #define CURRENT_GATE_M2XCLK_112MHZ 2
- #define CURRENT_GATE_M2XCLK_84MHZ 3
+ #define CURRENT_GATE_M2XCLK_336MHZ (0x0 << 12)
+ #define CURRENT_GATE_M2XCLK_168MHZ (0x1 << 12)
+ #define CURRENT_GATE_M2XCLK_112MHZ (0x2 << 12)
+ #define CURRENT_GATE_M2XCLK_84MHZ (0x3 << 12)
#else
- #define CURRENT_GATE_M2XCLK_DIV_1 0
- #define CURRENT_GATE_M2XCLK_DIV_2 1
- #define CURRENT_GATE_M2XCLK_DIV_3 2
- #define CURRENT_GATE_M2XCLK_DIV_4 3
+ #define CURRENT_GATE_M2XCLK_DIV_1 (0x0 << 12)
+ #define CURRENT_GATE_M2XCLK_DIV_2 (0x1 << 12)
+ #define CURRENT_GATE_M2XCLK_DIV_3 (0x2 << 12)
+ #define CURRENT_GATE_M2XCLK_DIV_4 (0x3 << 12)
#endif
-#define CURRENT_GATE_VGA 10:10
-#define CURRENT_GATE_VGA_OFF 0
-#define CURRENT_GATE_VGA_ON 1
-#define CURRENT_GATE_PWM 9:9
-#define CURRENT_GATE_PWM_OFF 0
-#define CURRENT_GATE_PWM_ON 1
-#define CURRENT_GATE_I2C 8:8
-#define CURRENT_GATE_I2C_OFF 0
-#define CURRENT_GATE_I2C_ON 1
-#define CURRENT_GATE_SSP 7:7
-#define CURRENT_GATE_SSP_OFF 0
-#define CURRENT_GATE_SSP_ON 1
-#define CURRENT_GATE_GPIO 6:6
-#define CURRENT_GATE_GPIO_OFF 0
-#define CURRENT_GATE_GPIO_ON 1
-#define CURRENT_GATE_ZVPORT 5:5
-#define CURRENT_GATE_ZVPORT_OFF 0
-#define CURRENT_GATE_ZVPORT_ON 1
-#define CURRENT_GATE_CSC 4:4
-#define CURRENT_GATE_CSC_OFF 0
-#define CURRENT_GATE_CSC_ON 1
-#define CURRENT_GATE_DE 3:3
-#define CURRENT_GATE_DE_OFF 0
-#define CURRENT_GATE_DE_ON 1
-#define CURRENT_GATE_DISPLAY 2:2
-#define CURRENT_GATE_DISPLAY_OFF 0
-#define CURRENT_GATE_DISPLAY_ON 1
-#define CURRENT_GATE_LOCALMEM 1:1
-#define CURRENT_GATE_LOCALMEM_OFF 0
-#define CURRENT_GATE_LOCALMEM_ON 1
-#define CURRENT_GATE_DMA 0:0
-#define CURRENT_GATE_DMA_OFF 0
-#define CURRENT_GATE_DMA_ON 1
+#define CURRENT_GATE_VGA BIT(10)
+#define CURRENT_GATE_PWM BIT(9)
+#define CURRENT_GATE_I2C BIT(8)
+#define CURRENT_GATE_SSP BIT(7)
+#define CURRENT_GATE_GPIO BIT(6)
+#define CURRENT_GATE_ZVPORT BIT(5)
+#define CURRENT_GATE_CSC BIT(4)
+#define CURRENT_GATE_DE BIT(3)
+#define CURRENT_GATE_DISPLAY BIT(2)
+#define CURRENT_GATE_LOCALMEM BIT(1)
+#define CURRENT_GATE_DMA BIT(0)
#define MODE0_GATE 0x000044
-#define MODE0_GATE_MCLK 15:14
-#define MODE0_GATE_MCLK_112MHZ 0
-#define MODE0_GATE_MCLK_84MHZ 1
-#define MODE0_GATE_MCLK_56MHZ 2
-#define MODE0_GATE_MCLK_42MHZ 3
-#define MODE0_GATE_M2XCLK 13:12
-#define MODE0_GATE_M2XCLK_336MHZ 0
-#define MODE0_GATE_M2XCLK_168MHZ 1
-#define MODE0_GATE_M2XCLK_112MHZ 2
-#define MODE0_GATE_M2XCLK_84MHZ 3
-#define MODE0_GATE_VGA 10:10
-#define MODE0_GATE_VGA_OFF 0
-#define MODE0_GATE_VGA_ON 1
-#define MODE0_GATE_PWM 9:9
-#define MODE0_GATE_PWM_OFF 0
-#define MODE0_GATE_PWM_ON 1
-#define MODE0_GATE_I2C 8:8
-#define MODE0_GATE_I2C_OFF 0
-#define MODE0_GATE_I2C_ON 1
-#define MODE0_GATE_SSP 7:7
-#define MODE0_GATE_SSP_OFF 0
-#define MODE0_GATE_SSP_ON 1
-#define MODE0_GATE_GPIO 6:6
-#define MODE0_GATE_GPIO_OFF 0
-#define MODE0_GATE_GPIO_ON 1
-#define MODE0_GATE_ZVPORT 5:5
-#define MODE0_GATE_ZVPORT_OFF 0
-#define MODE0_GATE_ZVPORT_ON 1
-#define MODE0_GATE_CSC 4:4
-#define MODE0_GATE_CSC_OFF 0
-#define MODE0_GATE_CSC_ON 1
-#define MODE0_GATE_DE 3:3
-#define MODE0_GATE_DE_OFF 0
-#define MODE0_GATE_DE_ON 1
-#define MODE0_GATE_DISPLAY 2:2
-#define MODE0_GATE_DISPLAY_OFF 0
-#define MODE0_GATE_DISPLAY_ON 1
-#define MODE0_GATE_LOCALMEM 1:1
-#define MODE0_GATE_LOCALMEM_OFF 0
-#define MODE0_GATE_LOCALMEM_ON 1
-#define MODE0_GATE_DMA 0:0
-#define MODE0_GATE_DMA_OFF 0
-#define MODE0_GATE_DMA_ON 1
+#define MODE0_GATE_MCLK_MASK (0x3 << 14)
+#define MODE0_GATE_MCLK_112MHZ (0x0 << 14)
+#define MODE0_GATE_MCLK_84MHZ (0x1 << 14)
+#define MODE0_GATE_MCLK_56MHZ (0x2 << 14)
+#define MODE0_GATE_MCLK_42MHZ (0x3 << 14)
+#define MODE0_GATE_M2XCLK_MASK (0x3 << 12)
+#define MODE0_GATE_M2XCLK_336MHZ (0x0 << 12)
+#define MODE0_GATE_M2XCLK_168MHZ (0x1 << 12)
+#define MODE0_GATE_M2XCLK_112MHZ (0x2 << 12)
+#define MODE0_GATE_M2XCLK_84MHZ (0x3 << 12)
+#define MODE0_GATE_VGA BIT(10)
+#define MODE0_GATE_PWM BIT(9)
+#define MODE0_GATE_I2C BIT(8)
+#define MODE0_GATE_SSP BIT(7)
+#define MODE0_GATE_GPIO BIT(6)
+#define MODE0_GATE_ZVPORT BIT(5)
+#define MODE0_GATE_CSC BIT(4)
+#define MODE0_GATE_DE BIT(3)
+#define MODE0_GATE_DISPLAY BIT(2)
+#define MODE0_GATE_LOCALMEM BIT(1)
+#define MODE0_GATE_DMA BIT(0)
#define MODE1_GATE 0x000048
-#define MODE1_GATE_MCLK 15:14
-#define MODE1_GATE_MCLK_112MHZ 0
-#define MODE1_GATE_MCLK_84MHZ 1
-#define MODE1_GATE_MCLK_56MHZ 2
-#define MODE1_GATE_MCLK_42MHZ 3
-#define MODE1_GATE_M2XCLK 13:12
-#define MODE1_GATE_M2XCLK_336MHZ 0
-#define MODE1_GATE_M2XCLK_168MHZ 1
-#define MODE1_GATE_M2XCLK_112MHZ 2
-#define MODE1_GATE_M2XCLK_84MHZ 3
-#define MODE1_GATE_VGA 10:10
-#define MODE1_GATE_VGA_OFF 0
-#define MODE1_GATE_VGA_ON 1
-#define MODE1_GATE_PWM 9:9
-#define MODE1_GATE_PWM_OFF 0
-#define MODE1_GATE_PWM_ON 1
-#define MODE1_GATE_I2C 8:8
-#define MODE1_GATE_I2C_OFF 0
-#define MODE1_GATE_I2C_ON 1
-#define MODE1_GATE_SSP 7:7
-#define MODE1_GATE_SSP_OFF 0
-#define MODE1_GATE_SSP_ON 1
-#define MODE1_GATE_GPIO 6:6
-#define MODE1_GATE_GPIO_OFF 0
-#define MODE1_GATE_GPIO_ON 1
-#define MODE1_GATE_ZVPORT 5:5
-#define MODE1_GATE_ZVPORT_OFF 0
-#define MODE1_GATE_ZVPORT_ON 1
-#define MODE1_GATE_CSC 4:4
-#define MODE1_GATE_CSC_OFF 0
-#define MODE1_GATE_CSC_ON 1
-#define MODE1_GATE_DE 3:3
-#define MODE1_GATE_DE_OFF 0
-#define MODE1_GATE_DE_ON 1
-#define MODE1_GATE_DISPLAY 2:2
-#define MODE1_GATE_DISPLAY_OFF 0
-#define MODE1_GATE_DISPLAY_ON 1
-#define MODE1_GATE_LOCALMEM 1:1
-#define MODE1_GATE_LOCALMEM_OFF 0
-#define MODE1_GATE_LOCALMEM_ON 1
-#define MODE1_GATE_DMA 0:0
-#define MODE1_GATE_DMA_OFF 0
-#define MODE1_GATE_DMA_ON 1
+#define MODE1_GATE_MCLK_MASK (0x3 << 14)
+#define MODE1_GATE_MCLK_112MHZ (0x0 << 14)
+#define MODE1_GATE_MCLK_84MHZ (0x1 << 14)
+#define MODE1_GATE_MCLK_56MHZ (0x2 << 14)
+#define MODE1_GATE_MCLK_42MHZ (0x3 << 14)
+#define MODE1_GATE_M2XCLK_MASK (0x3 << 12)
+#define MODE1_GATE_M2XCLK_336MHZ (0x0 << 12)
+#define MODE1_GATE_M2XCLK_168MHZ (0x1 << 12)
+#define MODE1_GATE_M2XCLK_112MHZ (0x2 << 12)
+#define MODE1_GATE_M2XCLK_84MHZ (0x3 << 12)
+#define MODE1_GATE_VGA BIT(10)
+#define MODE1_GATE_PWM BIT(9)
+#define MODE1_GATE_I2C BIT(8)
+#define MODE1_GATE_SSP BIT(7)
+#define MODE1_GATE_GPIO BIT(6)
+#define MODE1_GATE_ZVPORT BIT(5)
+#define MODE1_GATE_CSC BIT(4)
+#define MODE1_GATE_DE BIT(3)
+#define MODE1_GATE_DISPLAY BIT(2)
+#define MODE1_GATE_LOCALMEM BIT(1)
+#define MODE1_GATE_DMA BIT(0)
#define POWER_MODE_CTRL 0x00004C
#ifdef VALIDATION_CHIP
- #define POWER_MODE_CTRL_336CLK 4:4
- #define POWER_MODE_CTRL_336CLK_OFF 0
- #define POWER_MODE_CTRL_336CLK_ON 1
+ #define POWER_MODE_CTRL_336CLK BIT(4)
#endif
-#define POWER_MODE_CTRL_OSC_INPUT 3:3
-#define POWER_MODE_CTRL_OSC_INPUT_OFF 0
-#define POWER_MODE_CTRL_OSC_INPUT_ON 1
-#define POWER_MODE_CTRL_ACPI 2:2
-#define POWER_MODE_CTRL_ACPI_OFF 0
-#define POWER_MODE_CTRL_ACPI_ON 1
-#define POWER_MODE_CTRL_MODE 1:0
-#define POWER_MODE_CTRL_MODE_MODE0 0
-#define POWER_MODE_CTRL_MODE_MODE1 1
-#define POWER_MODE_CTRL_MODE_SLEEP 2
+#define POWER_MODE_CTRL_OSC_INPUT BIT(3)
+#define POWER_MODE_CTRL_ACPI BIT(2)
+#define POWER_MODE_CTRL_MODE_MASK (0x3 << 0)
+#define POWER_MODE_CTRL_MODE_MODE0 (0x0 << 0)
+#define POWER_MODE_CTRL_MODE_MODE1 (0x1 << 0)
+#define POWER_MODE_CTRL_MODE_SLEEP (0x2 << 0)
#define PCI_MASTER_BASE 0x000050
-#define PCI_MASTER_BASE_ADDRESS 7:0
+#define PCI_MASTER_BASE_ADDRESS_MASK 0xff
#define DEVICE_ID 0x000054
-#define DEVICE_ID_DEVICE_ID 31:16
-#define DEVICE_ID_REVISION_ID 7:0
+#define DEVICE_ID_DEVICE_ID_MASK (0xffff << 16)
+#define DEVICE_ID_REVISION_ID_MASK 0xff
#define PLL_CLK_COUNT 0x000058
-#define PLL_CLK_COUNT_COUNTER 15:0
+#define PLL_CLK_COUNT_COUNTER_MASK 0xffff
#define PANEL_PLL_CTRL 0x00005C
-#define PANEL_PLL_CTRL_BYPASS 18:18
-#define PANEL_PLL_CTRL_BYPASS_OFF 0
-#define PANEL_PLL_CTRL_BYPASS_ON 1
-#define PANEL_PLL_CTRL_POWER 17:17
-#define PANEL_PLL_CTRL_POWER_OFF 0
-#define PANEL_PLL_CTRL_POWER_ON 1
-#define PANEL_PLL_CTRL_INPUT 16:16
-#define PANEL_PLL_CTRL_INPUT_OSC 0
-#define PANEL_PLL_CTRL_INPUT_TESTCLK 1
+#define PLL_CTRL_BYPASS BIT(18)
+#define PLL_CTRL_POWER BIT(17)
+#define PLL_CTRL_INPUT BIT(16)
#ifdef VALIDATION_CHIP
- #define PANEL_PLL_CTRL_OD 15:14
+ #define PLL_CTRL_OD_SHIFT 14
+ #define PLL_CTRL_OD_MASK (0x3 << 14)
#else
- #define PANEL_PLL_CTRL_POD 15:14
- #define PANEL_PLL_CTRL_OD 13:12
+ #define PLL_CTRL_POD_SHIFT 14
+ #define PLL_CTRL_POD_MASK (0x3 << 14)
+ #define PLL_CTRL_OD_SHIFT 12
+ #define PLL_CTRL_OD_MASK (0x3 << 12)
#endif
-#define PANEL_PLL_CTRL_N 11:8
-#define PANEL_PLL_CTRL_M 7:0
+#define PLL_CTRL_N_SHIFT 8
+#define PLL_CTRL_N_MASK (0xf << 8)
+#define PLL_CTRL_M_SHIFT 0
+#define PLL_CTRL_M_MASK 0xff
#define CRT_PLL_CTRL 0x000060
-#define CRT_PLL_CTRL_BYPASS 18:18
-#define CRT_PLL_CTRL_BYPASS_OFF 0
-#define CRT_PLL_CTRL_BYPASS_ON 1
-#define CRT_PLL_CTRL_POWER 17:17
-#define CRT_PLL_CTRL_POWER_OFF 0
-#define CRT_PLL_CTRL_POWER_ON 1
-#define CRT_PLL_CTRL_INPUT 16:16
-#define CRT_PLL_CTRL_INPUT_OSC 0
-#define CRT_PLL_CTRL_INPUT_TESTCLK 1
-#ifdef VALIDATION_CHIP
- #define CRT_PLL_CTRL_OD 15:14
-#else
- #define CRT_PLL_CTRL_POD 15:14
- #define CRT_PLL_CTRL_OD 13:12
-#endif
-#define CRT_PLL_CTRL_N 11:8
-#define CRT_PLL_CTRL_M 7:0
#define VGA_PLL0_CTRL 0x000064
-#define VGA_PLL0_CTRL_BYPASS 18:18
-#define VGA_PLL0_CTRL_BYPASS_OFF 0
-#define VGA_PLL0_CTRL_BYPASS_ON 1
-#define VGA_PLL0_CTRL_POWER 17:17
-#define VGA_PLL0_CTRL_POWER_OFF 0
-#define VGA_PLL0_CTRL_POWER_ON 1
-#define VGA_PLL0_CTRL_INPUT 16:16
-#define VGA_PLL0_CTRL_INPUT_OSC 0
-#define VGA_PLL0_CTRL_INPUT_TESTCLK 1
-#ifdef VALIDATION_CHIP
- #define VGA_PLL0_CTRL_OD 15:14
-#else
- #define VGA_PLL0_CTRL_POD 15:14
- #define VGA_PLL0_CTRL_OD 13:12
-#endif
-#define VGA_PLL0_CTRL_N 11:8
-#define VGA_PLL0_CTRL_M 7:0
#define VGA_PLL1_CTRL 0x000068
-#define VGA_PLL1_CTRL_BYPASS 18:18
-#define VGA_PLL1_CTRL_BYPASS_OFF 0
-#define VGA_PLL1_CTRL_BYPASS_ON 1
-#define VGA_PLL1_CTRL_POWER 17:17
-#define VGA_PLL1_CTRL_POWER_OFF 0
-#define VGA_PLL1_CTRL_POWER_ON 1
-#define VGA_PLL1_CTRL_INPUT 16:16
-#define VGA_PLL1_CTRL_INPUT_OSC 0
-#define VGA_PLL1_CTRL_INPUT_TESTCLK 1
-#ifdef VALIDATION_CHIP
- #define VGA_PLL1_CTRL_OD 15:14
-#else
- #define VGA_PLL1_CTRL_POD 15:14
- #define VGA_PLL1_CTRL_OD 13:12
-#endif
-#define VGA_PLL1_CTRL_N 11:8
-#define VGA_PLL1_CTRL_M 7:0
#define SCRATCH_DATA 0x00006c
#ifndef VALIDATION_CHIP
#define MXCLK_PLL_CTRL 0x000070
-#define MXCLK_PLL_CTRL_BYPASS 18:18
-#define MXCLK_PLL_CTRL_BYPASS_OFF 0
-#define MXCLK_PLL_CTRL_BYPASS_ON 1
-#define MXCLK_PLL_CTRL_POWER 17:17
-#define MXCLK_PLL_CTRL_POWER_OFF 0
-#define MXCLK_PLL_CTRL_POWER_ON 1
-#define MXCLK_PLL_CTRL_INPUT 16:16
-#define MXCLK_PLL_CTRL_INPUT_OSC 0
-#define MXCLK_PLL_CTRL_INPUT_TESTCLK 1
-#define MXCLK_PLL_CTRL_POD 15:14
-#define MXCLK_PLL_CTRL_OD 13:12
-#define MXCLK_PLL_CTRL_N 11:8
-#define MXCLK_PLL_CTRL_M 7:0
#define VGA_CONFIGURATION 0x000088
-#define VGA_CONFIGURATION_USER_DEFINE 5:4
-#define VGA_CONFIGURATION_PLL 2:2
-#define VGA_CONFIGURATION_PLL_VGA 0
-#define VGA_CONFIGURATION_PLL_PANEL 1
-#define VGA_CONFIGURATION_MODE 1:1
-#define VGA_CONFIGURATION_MODE_TEXT 0
-#define VGA_CONFIGURATION_MODE_GRAPHIC 1
+#define VGA_CONFIGURATION_USER_DEFINE_MASK (0x3 << 4)
+#define VGA_CONFIGURATION_PLL BIT(2)
+#define VGA_CONFIGURATION_MODE BIT(1)
#endif
#define GPIO_DATA 0x010000
-#define GPIO_DATA_31 31:31
-#define GPIO_DATA_30 30:30
-#define GPIO_DATA_29 29:29
-#define GPIO_DATA_28 28:28
-#define GPIO_DATA_27 27:27
-#define GPIO_DATA_26 26:26
-#define GPIO_DATA_25 25:25
-#define GPIO_DATA_24 24:24
-#define GPIO_DATA_23 23:23
-#define GPIO_DATA_22 22:22
-#define GPIO_DATA_21 21:21
-#define GPIO_DATA_20 20:20
-#define GPIO_DATA_19 19:19
-#define GPIO_DATA_18 18:18
-#define GPIO_DATA_17 17:17
-#define GPIO_DATA_16 16:16
-#define GPIO_DATA_15 15:15
-#define GPIO_DATA_14 14:14
-#define GPIO_DATA_13 13:13
-#define GPIO_DATA_12 12:12
-#define GPIO_DATA_11 11:11
-#define GPIO_DATA_10 10:10
-#define GPIO_DATA_9 9:9
-#define GPIO_DATA_8 8:8
-#define GPIO_DATA_7 7:7
-#define GPIO_DATA_6 6:6
-#define GPIO_DATA_5 5:5
-#define GPIO_DATA_4 4:4
-#define GPIO_DATA_3 3:3
-#define GPIO_DATA_2 2:2
-#define GPIO_DATA_1 1:1
-#define GPIO_DATA_0 0:0
+#define GPIO_DATA_31 BIT(31)
+#define GPIO_DATA_30 BIT(30)
+#define GPIO_DATA_29 BIT(29)
+#define GPIO_DATA_28 BIT(28)
+#define GPIO_DATA_27 BIT(27)
+#define GPIO_DATA_26 BIT(26)
+#define GPIO_DATA_25 BIT(25)
+#define GPIO_DATA_24 BIT(24)
+#define GPIO_DATA_23 BIT(23)
+#define GPIO_DATA_22 BIT(22)
+#define GPIO_DATA_21 BIT(21)
+#define GPIO_DATA_20 BIT(20)
+#define GPIO_DATA_19 BIT(19)
+#define GPIO_DATA_18 BIT(18)
+#define GPIO_DATA_17 BIT(17)
+#define GPIO_DATA_16 BIT(16)
+#define GPIO_DATA_15 BIT(15)
+#define GPIO_DATA_14 BIT(14)
+#define GPIO_DATA_13 BIT(13)
+#define GPIO_DATA_12 BIT(12)
+#define GPIO_DATA_11 BIT(11)
+#define GPIO_DATA_10 BIT(10)
+#define GPIO_DATA_9 BIT(9)
+#define GPIO_DATA_8 BIT(8)
+#define GPIO_DATA_7 BIT(7)
+#define GPIO_DATA_6 BIT(6)
+#define GPIO_DATA_5 BIT(5)
+#define GPIO_DATA_4 BIT(4)
+#define GPIO_DATA_3 BIT(3)
+#define GPIO_DATA_2 BIT(2)
+#define GPIO_DATA_1 BIT(1)
+#define GPIO_DATA_0 BIT(0)
#define GPIO_DATA_DIRECTION 0x010004
-#define GPIO_DATA_DIRECTION_31 31:31
-#define GPIO_DATA_DIRECTION_31_INPUT 0
-#define GPIO_DATA_DIRECTION_31_OUTPUT 1
-#define GPIO_DATA_DIRECTION_30 30:30
-#define GPIO_DATA_DIRECTION_30_INPUT 0
-#define GPIO_DATA_DIRECTION_30_OUTPUT 1
-#define GPIO_DATA_DIRECTION_29 29:29
-#define GPIO_DATA_DIRECTION_29_INPUT 0
-#define GPIO_DATA_DIRECTION_29_OUTPUT 1
-#define GPIO_DATA_DIRECTION_28 28:28
-#define GPIO_DATA_DIRECTION_28_INPUT 0
-#define GPIO_DATA_DIRECTION_28_OUTPUT 1
-#define GPIO_DATA_DIRECTION_27 27:27
-#define GPIO_DATA_DIRECTION_27_INPUT 0
-#define GPIO_DATA_DIRECTION_27_OUTPUT 1
-#define GPIO_DATA_DIRECTION_26 26:26
-#define GPIO_DATA_DIRECTION_26_INPUT 0
-#define GPIO_DATA_DIRECTION_26_OUTPUT 1
-#define GPIO_DATA_DIRECTION_25 25:25
-#define GPIO_DATA_DIRECTION_25_INPUT 0
-#define GPIO_DATA_DIRECTION_25_OUTPUT 1
-#define GPIO_DATA_DIRECTION_24 24:24
-#define GPIO_DATA_DIRECTION_24_INPUT 0
-#define GPIO_DATA_DIRECTION_24_OUTPUT 1
-#define GPIO_DATA_DIRECTION_23 23:23
-#define GPIO_DATA_DIRECTION_23_INPUT 0
-#define GPIO_DATA_DIRECTION_23_OUTPUT 1
-#define GPIO_DATA_DIRECTION_22 22:22
-#define GPIO_DATA_DIRECTION_22_INPUT 0
-#define GPIO_DATA_DIRECTION_22_OUTPUT 1
-#define GPIO_DATA_DIRECTION_21 21:21
-#define GPIO_DATA_DIRECTION_21_INPUT 0
-#define GPIO_DATA_DIRECTION_21_OUTPUT 1
-#define GPIO_DATA_DIRECTION_20 20:20
-#define GPIO_DATA_DIRECTION_20_INPUT 0
-#define GPIO_DATA_DIRECTION_20_OUTPUT 1
-#define GPIO_DATA_DIRECTION_19 19:19
-#define GPIO_DATA_DIRECTION_19_INPUT 0
-#define GPIO_DATA_DIRECTION_19_OUTPUT 1
-#define GPIO_DATA_DIRECTION_18 18:18
-#define GPIO_DATA_DIRECTION_18_INPUT 0
-#define GPIO_DATA_DIRECTION_18_OUTPUT 1
-#define GPIO_DATA_DIRECTION_17 17:17
-#define GPIO_DATA_DIRECTION_17_INPUT 0
-#define GPIO_DATA_DIRECTION_17_OUTPUT 1
-#define GPIO_DATA_DIRECTION_16 16:16
-#define GPIO_DATA_DIRECTION_16_INPUT 0
-#define GPIO_DATA_DIRECTION_16_OUTPUT 1
-#define GPIO_DATA_DIRECTION_15 15:15
-#define GPIO_DATA_DIRECTION_15_INPUT 0
-#define GPIO_DATA_DIRECTION_15_OUTPUT 1
-#define GPIO_DATA_DIRECTION_14 14:14
-#define GPIO_DATA_DIRECTION_14_INPUT 0
-#define GPIO_DATA_DIRECTION_14_OUTPUT 1
-#define GPIO_DATA_DIRECTION_13 13:13
-#define GPIO_DATA_DIRECTION_13_INPUT 0
-#define GPIO_DATA_DIRECTION_13_OUTPUT 1
-#define GPIO_DATA_DIRECTION_12 12:12
-#define GPIO_DATA_DIRECTION_12_INPUT 0
-#define GPIO_DATA_DIRECTION_12_OUTPUT 1
-#define GPIO_DATA_DIRECTION_11 11:11
-#define GPIO_DATA_DIRECTION_11_INPUT 0
-#define GPIO_DATA_DIRECTION_11_OUTPUT 1
-#define GPIO_DATA_DIRECTION_10 10:10
-#define GPIO_DATA_DIRECTION_10_INPUT 0
-#define GPIO_DATA_DIRECTION_10_OUTPUT 1
-#define GPIO_DATA_DIRECTION_9 9:9
-#define GPIO_DATA_DIRECTION_9_INPUT 0
-#define GPIO_DATA_DIRECTION_9_OUTPUT 1
-#define GPIO_DATA_DIRECTION_8 8:8
-#define GPIO_DATA_DIRECTION_8_INPUT 0
-#define GPIO_DATA_DIRECTION_8_OUTPUT 1
-#define GPIO_DATA_DIRECTION_7 7:7
-#define GPIO_DATA_DIRECTION_7_INPUT 0
-#define GPIO_DATA_DIRECTION_7_OUTPUT 1
-#define GPIO_DATA_DIRECTION_6 6:6
-#define GPIO_DATA_DIRECTION_6_INPUT 0
-#define GPIO_DATA_DIRECTION_6_OUTPUT 1
-#define GPIO_DATA_DIRECTION_5 5:5
-#define GPIO_DATA_DIRECTION_5_INPUT 0
-#define GPIO_DATA_DIRECTION_5_OUTPUT 1
-#define GPIO_DATA_DIRECTION_4 4:4
-#define GPIO_DATA_DIRECTION_4_INPUT 0
-#define GPIO_DATA_DIRECTION_4_OUTPUT 1
-#define GPIO_DATA_DIRECTION_3 3:3
-#define GPIO_DATA_DIRECTION_3_INPUT 0
-#define GPIO_DATA_DIRECTION_3_OUTPUT 1
-#define GPIO_DATA_DIRECTION_2 2:2
-#define GPIO_DATA_DIRECTION_2_INPUT 0
-#define GPIO_DATA_DIRECTION_2_OUTPUT 1
-#define GPIO_DATA_DIRECTION_1 131
-#define GPIO_DATA_DIRECTION_1_INPUT 0
-#define GPIO_DATA_DIRECTION_1_OUTPUT 1
-#define GPIO_DATA_DIRECTION_0 0:0
-#define GPIO_DATA_DIRECTION_0_INPUT 0
-#define GPIO_DATA_DIRECTION_0_OUTPUT 1
+#define GPIO_DATA_DIRECTION_31 BIT(31)
+#define GPIO_DATA_DIRECTION_30 BIT(30)
+#define GPIO_DATA_DIRECTION_29 BIT(29)
+#define GPIO_DATA_DIRECTION_28 BIT(28)
+#define GPIO_DATA_DIRECTION_27 BIT(27)
+#define GPIO_DATA_DIRECTION_26 BIT(26)
+#define GPIO_DATA_DIRECTION_25 BIT(25)
+#define GPIO_DATA_DIRECTION_24 BIT(24)
+#define GPIO_DATA_DIRECTION_23 BIT(23)
+#define GPIO_DATA_DIRECTION_22 BIT(22)
+#define GPIO_DATA_DIRECTION_21 BIT(21)
+#define GPIO_DATA_DIRECTION_20 BIT(20)
+#define GPIO_DATA_DIRECTION_19 BIT(19)
+#define GPIO_DATA_DIRECTION_18 BIT(18)
+#define GPIO_DATA_DIRECTION_17 BIT(17)
+#define GPIO_DATA_DIRECTION_16 BIT(16)
+#define GPIO_DATA_DIRECTION_15 BIT(15)
+#define GPIO_DATA_DIRECTION_14 BIT(14)
+#define GPIO_DATA_DIRECTION_13 BIT(13)
+#define GPIO_DATA_DIRECTION_12 BIT(12)
+#define GPIO_DATA_DIRECTION_11 BIT(11)
+#define GPIO_DATA_DIRECTION_10 BIT(10)
+#define GPIO_DATA_DIRECTION_9 BIT(9)
+#define GPIO_DATA_DIRECTION_8 BIT(8)
+#define GPIO_DATA_DIRECTION_7 BIT(7)
+#define GPIO_DATA_DIRECTION_6 BIT(6)
+#define GPIO_DATA_DIRECTION_5 BIT(5)
+#define GPIO_DATA_DIRECTION_4 BIT(4)
+#define GPIO_DATA_DIRECTION_3 BIT(3)
+#define GPIO_DATA_DIRECTION_2 BIT(2)
+#define GPIO_DATA_DIRECTION_1 BIT(1)
+#define GPIO_DATA_DIRECTION_0 BIT(0)
#define GPIO_INTERRUPT_SETUP 0x010008
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31 22:22
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_31_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30 21:21
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_30_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29 20:20
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_29_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28 19:19
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_28_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27 18:18
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_27_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26 17:17
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_26_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25 16:16
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25_EDGE 0
-#define GPIO_INTERRUPT_SETUP_TRIGGER_25_LEVEL 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31 14:14
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_31_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30 13:13
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_30_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29 12:12
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_29_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28 11:11
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_28_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27 10:10
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_27_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26 9:9
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_26_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25 8:8
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25_LOW 0
-#define GPIO_INTERRUPT_SETUP_ACTIVE_25_HIGH 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_31 6:6
-#define GPIO_INTERRUPT_SETUP_ENABLE_31_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_31_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_30 5:5
-#define GPIO_INTERRUPT_SETUP_ENABLE_30_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_30_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_29 4:4
-#define GPIO_INTERRUPT_SETUP_ENABLE_29_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_29_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_28 3:3
-#define GPIO_INTERRUPT_SETUP_ENABLE_28_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_28_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_27 2:2
-#define GPIO_INTERRUPT_SETUP_ENABLE_27_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_27_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_26 1:1
-#define GPIO_INTERRUPT_SETUP_ENABLE_26_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_26_INTERRUPT 1
-#define GPIO_INTERRUPT_SETUP_ENABLE_25 0:0
-#define GPIO_INTERRUPT_SETUP_ENABLE_25_GPIO 0
-#define GPIO_INTERRUPT_SETUP_ENABLE_25_INTERRUPT 1
+#define GPIO_INTERRUPT_SETUP_TRIGGER_31 BIT(22)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_30 BIT(21)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_29 BIT(20)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_28 BIT(19)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_27 BIT(18)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_26 BIT(17)
+#define GPIO_INTERRUPT_SETUP_TRIGGER_25 BIT(16)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_31 BIT(14)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_30 BIT(13)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_29 BIT(12)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_28 BIT(11)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_27 BIT(10)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_26 BIT(9)
+#define GPIO_INTERRUPT_SETUP_ACTIVE_25 BIT(8)
+#define GPIO_INTERRUPT_SETUP_ENABLE_31 BIT(6)
+#define GPIO_INTERRUPT_SETUP_ENABLE_30 BIT(5)
+#define GPIO_INTERRUPT_SETUP_ENABLE_29 BIT(4)
+#define GPIO_INTERRUPT_SETUP_ENABLE_28 BIT(3)
+#define GPIO_INTERRUPT_SETUP_ENABLE_27 BIT(2)
+#define GPIO_INTERRUPT_SETUP_ENABLE_26 BIT(1)
+#define GPIO_INTERRUPT_SETUP_ENABLE_25 BIT(0)
#define GPIO_INTERRUPT_STATUS 0x01000C
-#define GPIO_INTERRUPT_STATUS_31 22:22
-#define GPIO_INTERRUPT_STATUS_31_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_31_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_31_RESET 1
-#define GPIO_INTERRUPT_STATUS_30 21:21
-#define GPIO_INTERRUPT_STATUS_30_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_30_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_30_RESET 1
-#define GPIO_INTERRUPT_STATUS_29 20:20
-#define GPIO_INTERRUPT_STATUS_29_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_29_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_29_RESET 1
-#define GPIO_INTERRUPT_STATUS_28 19:19
-#define GPIO_INTERRUPT_STATUS_28_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_28_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_28_RESET 1
-#define GPIO_INTERRUPT_STATUS_27 18:18
-#define GPIO_INTERRUPT_STATUS_27_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_27_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_27_RESET 1
-#define GPIO_INTERRUPT_STATUS_26 17:17
-#define GPIO_INTERRUPT_STATUS_26_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_26_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_26_RESET 1
-#define GPIO_INTERRUPT_STATUS_25 16:16
-#define GPIO_INTERRUPT_STATUS_25_INACTIVE 0
-#define GPIO_INTERRUPT_STATUS_25_ACTIVE 1
-#define GPIO_INTERRUPT_STATUS_25_RESET 1
+#define GPIO_INTERRUPT_STATUS_31 BIT(22)
+#define GPIO_INTERRUPT_STATUS_30 BIT(21)
+#define GPIO_INTERRUPT_STATUS_29 BIT(20)
+#define GPIO_INTERRUPT_STATUS_28 BIT(19)
+#define GPIO_INTERRUPT_STATUS_27 BIT(18)
+#define GPIO_INTERRUPT_STATUS_26 BIT(17)
+#define GPIO_INTERRUPT_STATUS_25 BIT(16)
#define PANEL_DISPLAY_CTRL 0x080000
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK 31:30
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0
-#define PANEL_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 3
-#define PANEL_DISPLAY_CTRL_SELECT 29:28
-#define PANEL_DISPLAY_CTRL_SELECT_PANEL 0
-#define PANEL_DISPLAY_CTRL_SELECT_VGA 1
-#define PANEL_DISPLAY_CTRL_SELECT_CRT 2
-#define PANEL_DISPLAY_CTRL_FPEN 27:27
-#define PANEL_DISPLAY_CTRL_FPEN_LOW 0
-#define PANEL_DISPLAY_CTRL_FPEN_HIGH 1
-#define PANEL_DISPLAY_CTRL_VBIASEN 26:26
-#define PANEL_DISPLAY_CTRL_VBIASEN_LOW 0
-#define PANEL_DISPLAY_CTRL_VBIASEN_HIGH 1
-#define PANEL_DISPLAY_CTRL_DATA 25:25
-#define PANEL_DISPLAY_CTRL_DATA_DISABLE 0
-#define PANEL_DISPLAY_CTRL_DATA_ENABLE 1
-#define PANEL_DISPLAY_CTRL_FPVDDEN 24:24
-#define PANEL_DISPLAY_CTRL_FPVDDEN_LOW 0
-#define PANEL_DISPLAY_CTRL_FPVDDEN_HIGH 1
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK 23:20
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0
-#define PANEL_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 15
-
-#define PANEL_DISPLAY_CTRL_TFT_DISP 19:18
-#define PANEL_DISPLAY_CTRL_TFT_DISP_24 0
-#define PANEL_DISPLAY_CTRL_TFT_DISP_36 1
-#define PANEL_DISPLAY_CTRL_TFT_DISP_18 2
-
-
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY 19:19
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_DISABLE 0
-#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY_ENABLE 1
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL 18:18
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_DISABLE 0
-#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL_ENABLE 1
-#define PANEL_DISPLAY_CTRL_FIFO 17:16
-#define PANEL_DISPLAY_CTRL_FIFO_1 0
-#define PANEL_DISPLAY_CTRL_FIFO_3 1
-#define PANEL_DISPLAY_CTRL_FIFO_7 2
-#define PANEL_DISPLAY_CTRL_FIFO_11 3
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK 15:15
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE 0
-#define PANEL_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE 1
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE 14:14
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH 0
-#define PANEL_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW 1
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE 13:13
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH 0
-#define PANEL_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW 1
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE 12:12
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH 0
-#define PANEL_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW 1
-#define PANEL_DISPLAY_CTRL_VSYNC 11:11
-#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_HIGH 0
-#define PANEL_DISPLAY_CTRL_VSYNC_ACTIVE_LOW 1
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING 10:10
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_DISABLE 0
-#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING_ENABLE 1
-#define PANEL_DISPLAY_CTRL_COLOR_KEY 9:9
-#define PANEL_DISPLAY_CTRL_COLOR_KEY_DISABLE 0
-#define PANEL_DISPLAY_CTRL_COLOR_KEY_ENABLE 1
-#define PANEL_DISPLAY_CTRL_TIMING 8:8
-#define PANEL_DISPLAY_CTRL_TIMING_DISABLE 0
-#define PANEL_DISPLAY_CTRL_TIMING_ENABLE 1
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR 7:7
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_DOWN 0
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR_UP 1
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN 6:6
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DISABLE 0
-#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_ENABLE 1
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR 5:5
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_RIGHT 0
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR_LEFT 1
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN 4:4
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DISABLE 0
-#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_ENABLE 1
-#define PANEL_DISPLAY_CTRL_GAMMA 3:3
-#define PANEL_DISPLAY_CTRL_GAMMA_DISABLE 0
-#define PANEL_DISPLAY_CTRL_GAMMA_ENABLE 1
-#define PANEL_DISPLAY_CTRL_PLANE 2:2
-#define PANEL_DISPLAY_CTRL_PLANE_DISABLE 0
-#define PANEL_DISPLAY_CTRL_PLANE_ENABLE 1
-#define PANEL_DISPLAY_CTRL_FORMAT 1:0
-#define PANEL_DISPLAY_CTRL_FORMAT_8 0
-#define PANEL_DISPLAY_CTRL_FORMAT_16 1
-#define PANEL_DISPLAY_CTRL_FORMAT_32 2
+#define PANEL_DISPLAY_CTRL_RESERVED_MASK 0xc0f08000
+#define PANEL_DISPLAY_CTRL_SELECT_SHIFT 28
+#define PANEL_DISPLAY_CTRL_SELECT_MASK (0x3 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_PANEL (0x0 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_VGA (0x1 << 28)
+#define PANEL_DISPLAY_CTRL_SELECT_CRT (0x2 << 28)
+#define PANEL_DISPLAY_CTRL_FPEN BIT(27)
+#define PANEL_DISPLAY_CTRL_VBIASEN BIT(26)
+#define PANEL_DISPLAY_CTRL_DATA BIT(25)
+#define PANEL_DISPLAY_CTRL_FPVDDEN BIT(24)
+#define PANEL_DISPLAY_CTRL_DUAL_DISPLAY BIT(19)
+#define PANEL_DISPLAY_CTRL_DOUBLE_PIXEL BIT(18)
+#define PANEL_DISPLAY_CTRL_FIFO (0x3 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_1 (0x0 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_3 (0x1 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_7 (0x2 << 16)
+#define PANEL_DISPLAY_CTRL_FIFO_11 (0x3 << 16)
+#define DISPLAY_CTRL_CLOCK_PHASE BIT(14)
+#define DISPLAY_CTRL_VSYNC_PHASE BIT(13)
+#define DISPLAY_CTRL_HSYNC_PHASE BIT(12)
+#define PANEL_DISPLAY_CTRL_VSYNC BIT(11)
+#define PANEL_DISPLAY_CTRL_CAPTURE_TIMING BIT(10)
+#define PANEL_DISPLAY_CTRL_COLOR_KEY BIT(9)
+#define DISPLAY_CTRL_TIMING BIT(8)
+#define PANEL_DISPLAY_CTRL_VERTICAL_PAN_DIR BIT(7)
+#define PANEL_DISPLAY_CTRL_VERTICAL_PAN BIT(6)
+#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN_DIR BIT(5)
+#define PANEL_DISPLAY_CTRL_HORIZONTAL_PAN BIT(4)
+#define DISPLAY_CTRL_GAMMA BIT(3)
+#define DISPLAY_CTRL_PLANE BIT(2)
+#define PANEL_DISPLAY_CTRL_FORMAT (0x3 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_8 (0x0 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_16 (0x1 << 0)
+#define PANEL_DISPLAY_CTRL_FORMAT_32 (0x2 << 0)
#define PANEL_PAN_CTRL 0x080004
-#define PANEL_PAN_CTRL_VERTICAL_PAN 31:24
-#define PANEL_PAN_CTRL_VERTICAL_VSYNC 21:16
-#define PANEL_PAN_CTRL_HORIZONTAL_PAN 15:8
-#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC 5:0
+#define PANEL_PAN_CTRL_VERTICAL_PAN_MASK (0xff << 24)
+#define PANEL_PAN_CTRL_VERTICAL_VSYNC_MASK (0x3f << 16)
+#define PANEL_PAN_CTRL_HORIZONTAL_PAN_MASK (0xff << 8)
+#define PANEL_PAN_CTRL_HORIZONTAL_VSYNC_MASK 0x3f
#define PANEL_COLOR_KEY 0x080008
-#define PANEL_COLOR_KEY_MASK 31:16
-#define PANEL_COLOR_KEY_VALUE 15:0
+#define PANEL_COLOR_KEY_MASK_MASK (0xffff << 16)
+#define PANEL_COLOR_KEY_VALUE_MASK 0xffff
#define PANEL_FB_ADDRESS 0x08000C
-#define PANEL_FB_ADDRESS_STATUS 31:31
-#define PANEL_FB_ADDRESS_STATUS_CURRENT 0
-#define PANEL_FB_ADDRESS_STATUS_PENDING 1
-#define PANEL_FB_ADDRESS_EXT 27:27
-#define PANEL_FB_ADDRESS_EXT_LOCAL 0
-#define PANEL_FB_ADDRESS_EXT_EXTERNAL 1
-#define PANEL_FB_ADDRESS_ADDRESS 25:0
+#define PANEL_FB_ADDRESS_STATUS BIT(31)
+#define PANEL_FB_ADDRESS_EXT BIT(27)
+#define PANEL_FB_ADDRESS_ADDRESS_MASK 0x1ffffff
#define PANEL_FB_WIDTH 0x080010
-#define PANEL_FB_WIDTH_WIDTH 29:16
-#define PANEL_FB_WIDTH_OFFSET 13:0
+#define PANEL_FB_WIDTH_WIDTH_SHIFT 16
+#define PANEL_FB_WIDTH_WIDTH_MASK (0x3fff << 16)
+#define PANEL_FB_WIDTH_OFFSET_MASK 0x3fff
#define PANEL_WINDOW_WIDTH 0x080014
-#define PANEL_WINDOW_WIDTH_WIDTH 27:16
-#define PANEL_WINDOW_WIDTH_X 11:0
+#define PANEL_WINDOW_WIDTH_WIDTH_SHIFT 16
+#define PANEL_WINDOW_WIDTH_WIDTH_MASK (0xfff << 16)
+#define PANEL_WINDOW_WIDTH_X_MASK 0xfff
#define PANEL_WINDOW_HEIGHT 0x080018
-#define PANEL_WINDOW_HEIGHT_HEIGHT 27:16
-#define PANEL_WINDOW_HEIGHT_Y 11:0
+#define PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT 16
+#define PANEL_WINDOW_HEIGHT_HEIGHT_MASK (0xfff << 16)
+#define PANEL_WINDOW_HEIGHT_Y_MASK 0xfff
#define PANEL_PLANE_TL 0x08001C
-#define PANEL_PLANE_TL_TOP 26:16
-#define PANEL_PLANE_TL_LEFT 10:0
+#define PANEL_PLANE_TL_TOP_SHIFT 16
+#define PANEL_PLANE_TL_TOP_MASK (0xeff << 16)
+#define PANEL_PLANE_TL_LEFT_MASK 0xeff
#define PANEL_PLANE_BR 0x080020
-#define PANEL_PLANE_BR_BOTTOM 26:16
-#define PANEL_PLANE_BR_RIGHT 10:0
+#define PANEL_PLANE_BR_BOTTOM_SHIFT 16
+#define PANEL_PLANE_BR_BOTTOM_MASK (0xeff << 16)
+#define PANEL_PLANE_BR_RIGHT_MASK 0xeff
#define PANEL_HORIZONTAL_TOTAL 0x080024
-#define PANEL_HORIZONTAL_TOTAL_TOTAL 27:16
-#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END 11:0
+#define PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
+#define PANEL_HORIZONTAL_TOTAL_TOTAL_MASK (0xfff << 16)
+#define PANEL_HORIZONTAL_TOTAL_DISPLAY_END_MASK 0xfff
#define PANEL_HORIZONTAL_SYNC 0x080028
-#define PANEL_HORIZONTAL_SYNC_WIDTH 23:16
-#define PANEL_HORIZONTAL_SYNC_START 11:0
+#define PANEL_HORIZONTAL_SYNC_WIDTH_SHIFT 16
+#define PANEL_HORIZONTAL_SYNC_WIDTH_MASK (0xff << 16)
+#define PANEL_HORIZONTAL_SYNC_START_MASK 0xfff
#define PANEL_VERTICAL_TOTAL 0x08002C
-#define PANEL_VERTICAL_TOTAL_TOTAL 26:16
-#define PANEL_VERTICAL_TOTAL_DISPLAY_END 10:0
+#define PANEL_VERTICAL_TOTAL_TOTAL_SHIFT 16
+#define PANEL_VERTICAL_TOTAL_TOTAL_MASK (0x7ff << 16)
+#define PANEL_VERTICAL_TOTAL_DISPLAY_END_MASK 0x7ff
#define PANEL_VERTICAL_SYNC 0x080030
-#define PANEL_VERTICAL_SYNC_HEIGHT 21:16
-#define PANEL_VERTICAL_SYNC_START 10:0
+#define PANEL_VERTICAL_SYNC_HEIGHT_SHIFT 16
+#define PANEL_VERTICAL_SYNC_HEIGHT_MASK (0x3f << 16)
+#define PANEL_VERTICAL_SYNC_START_MASK 0x7ff
#define PANEL_CURRENT_LINE 0x080034
-#define PANEL_CURRENT_LINE_LINE 10:0
+#define PANEL_CURRENT_LINE_LINE_MASK 0x7ff
/* Video Control */
#define VIDEO_DISPLAY_CTRL 0x080040
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER 18:18
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_LINE_BUFFER_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_FIFO 17:16
-#define VIDEO_DISPLAY_CTRL_FIFO_1 0
-#define VIDEO_DISPLAY_CTRL_FIFO_3 1
-#define VIDEO_DISPLAY_CTRL_FIFO_7 2
-#define VIDEO_DISPLAY_CTRL_FIFO_11 3
-#define VIDEO_DISPLAY_CTRL_BUFFER 15:15
-#define VIDEO_DISPLAY_CTRL_BUFFER_0 0
-#define VIDEO_DISPLAY_CTRL_BUFFER_1 1
-#define VIDEO_DISPLAY_CTRL_CAPTURE 14:14
-#define VIDEO_DISPLAY_CTRL_CAPTURE_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_CAPTURE_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER 13:13
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP 12:12
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_BYTE_SWAP_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE 11:11
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_NORMAL 0
-#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE_HALF 1
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE 10:10
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_NORMAL 0
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE_HALF 1
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE 9:9
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE 0
-#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE 1
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE 8:8
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE 0
-#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE 1
-#define VIDEO_DISPLAY_CTRL_PIXEL 7:4
-#define VIDEO_DISPLAY_CTRL_GAMMA 3:3
-#define VIDEO_DISPLAY_CTRL_GAMMA_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_GAMMA_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_PLANE 2:2
-#define VIDEO_DISPLAY_CTRL_PLANE_DISABLE 0
-#define VIDEO_DISPLAY_CTRL_PLANE_ENABLE 1
-#define VIDEO_DISPLAY_CTRL_FORMAT 1:0
-#define VIDEO_DISPLAY_CTRL_FORMAT_8 0
-#define VIDEO_DISPLAY_CTRL_FORMAT_16 1
-#define VIDEO_DISPLAY_CTRL_FORMAT_32 2
-#define VIDEO_DISPLAY_CTRL_FORMAT_YUV 3
+#define VIDEO_DISPLAY_CTRL_LINE_BUFFER BIT(18)
+#define VIDEO_DISPLAY_CTRL_FIFO_MASK (0x3 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_1 (0x0 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_3 (0x1 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_7 (0x2 << 16)
+#define VIDEO_DISPLAY_CTRL_FIFO_11 (0x3 << 16)
+#define VIDEO_DISPLAY_CTRL_BUFFER BIT(15)
+#define VIDEO_DISPLAY_CTRL_CAPTURE BIT(14)
+#define VIDEO_DISPLAY_CTRL_DOUBLE_BUFFER BIT(13)
+#define VIDEO_DISPLAY_CTRL_BYTE_SWAP BIT(12)
+#define VIDEO_DISPLAY_CTRL_VERTICAL_SCALE BIT(11)
+#define VIDEO_DISPLAY_CTRL_HORIZONTAL_SCALE BIT(10)
+#define VIDEO_DISPLAY_CTRL_VERTICAL_MODE BIT(9)
+#define VIDEO_DISPLAY_CTRL_HORIZONTAL_MODE BIT(8)
+#define VIDEO_DISPLAY_CTRL_PIXEL_MASK (0xf << 4)
+#define VIDEO_DISPLAY_CTRL_GAMMA BIT(3)
+#define VIDEO_DISPLAY_CTRL_FORMAT_MASK 0x3
+#define VIDEO_DISPLAY_CTRL_FORMAT_8 0x0
+#define VIDEO_DISPLAY_CTRL_FORMAT_16 0x1
+#define VIDEO_DISPLAY_CTRL_FORMAT_32 0x2
+#define VIDEO_DISPLAY_CTRL_FORMAT_YUV 0x3
#define VIDEO_FB_0_ADDRESS 0x080044
-#define VIDEO_FB_0_ADDRESS_STATUS 31:31
-#define VIDEO_FB_0_ADDRESS_STATUS_CURRENT 0
-#define VIDEO_FB_0_ADDRESS_STATUS_PENDING 1
-#define VIDEO_FB_0_ADDRESS_EXT 27:27
-#define VIDEO_FB_0_ADDRESS_EXT_LOCAL 0
-#define VIDEO_FB_0_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_FB_0_ADDRESS_ADDRESS 25:0
+#define VIDEO_FB_0_ADDRESS_STATUS BIT(31)
+#define VIDEO_FB_0_ADDRESS_EXT BIT(27)
+#define VIDEO_FB_0_ADDRESS_ADDRESS_MASK 0x3ffffff
#define VIDEO_FB_WIDTH 0x080048
-#define VIDEO_FB_WIDTH_WIDTH 29:16
-#define VIDEO_FB_WIDTH_OFFSET 13:0
+#define VIDEO_FB_WIDTH_WIDTH_MASK (0x3fff << 16)
+#define VIDEO_FB_WIDTH_OFFSET_MASK 0x3fff
#define VIDEO_FB_0_LAST_ADDRESS 0x08004C
-#define VIDEO_FB_0_LAST_ADDRESS_EXT 27:27
-#define VIDEO_FB_0_LAST_ADDRESS_EXT_LOCAL 0
-#define VIDEO_FB_0_LAST_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS 25:0
+#define VIDEO_FB_0_LAST_ADDRESS_EXT BIT(27)
+#define VIDEO_FB_0_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff
#define VIDEO_PLANE_TL 0x080050
-#define VIDEO_PLANE_TL_TOP 26:16
-#define VIDEO_PLANE_TL_LEFT 10:0
+#define VIDEO_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define VIDEO_PLANE_TL_LEFT_MASK 0x7ff
#define VIDEO_PLANE_BR 0x080054
-#define VIDEO_PLANE_BR_BOTTOM 26:16
-#define VIDEO_PLANE_BR_RIGHT 10:0
+#define VIDEO_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define VIDEO_PLANE_BR_RIGHT_MASK 0x7ff
#define VIDEO_SCALE 0x080058
-#define VIDEO_SCALE_VERTICAL_MODE 31:31
-#define VIDEO_SCALE_VERTICAL_MODE_EXPAND 0
-#define VIDEO_SCALE_VERTICAL_MODE_SHRINK 1
-#define VIDEO_SCALE_VERTICAL_SCALE 27:16
-#define VIDEO_SCALE_HORIZONTAL_MODE 15:15
-#define VIDEO_SCALE_HORIZONTAL_MODE_EXPAND 0
-#define VIDEO_SCALE_HORIZONTAL_MODE_SHRINK 1
-#define VIDEO_SCALE_HORIZONTAL_SCALE 11:0
+#define VIDEO_SCALE_VERTICAL_MODE BIT(31)
+#define VIDEO_SCALE_VERTICAL_SCALE_MASK (0xfff << 16)
+#define VIDEO_SCALE_HORIZONTAL_MODE BIT(15)
+#define VIDEO_SCALE_HORIZONTAL_SCALE_MASK 0xfff
#define VIDEO_INITIAL_SCALE 0x08005C
-#define VIDEO_INITIAL_SCALE_FB_1 27:16
-#define VIDEO_INITIAL_SCALE_FB_0 11:0
+#define VIDEO_INITIAL_SCALE_FB_1_MASK (0xfff << 16)
+#define VIDEO_INITIAL_SCALE_FB_0_MASK 0xfff
#define VIDEO_YUV_CONSTANTS 0x080060
-#define VIDEO_YUV_CONSTANTS_Y 31:24
-#define VIDEO_YUV_CONSTANTS_R 23:16
-#define VIDEO_YUV_CONSTANTS_G 15:8
-#define VIDEO_YUV_CONSTANTS_B 7:0
+#define VIDEO_YUV_CONSTANTS_Y_MASK (0xff << 24)
+#define VIDEO_YUV_CONSTANTS_R_MASK (0xff << 16)
+#define VIDEO_YUV_CONSTANTS_G_MASK (0xff << 8)
+#define VIDEO_YUV_CONSTANTS_B_MASK 0xff
#define VIDEO_FB_1_ADDRESS 0x080064
-#define VIDEO_FB_1_ADDRESS_STATUS 31:31
-#define VIDEO_FB_1_ADDRESS_STATUS_CURRENT 0
-#define VIDEO_FB_1_ADDRESS_STATUS_PENDING 1
-#define VIDEO_FB_1_ADDRESS_EXT 27:27
-#define VIDEO_FB_1_ADDRESS_EXT_LOCAL 0
-#define VIDEO_FB_1_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_FB_1_ADDRESS_ADDRESS 25:0
+#define VIDEO_FB_1_ADDRESS_STATUS BIT(31)
+#define VIDEO_FB_1_ADDRESS_EXT BIT(27)
+#define VIDEO_FB_1_ADDRESS_ADDRESS_MASK 0x3ffffff
#define VIDEO_FB_1_LAST_ADDRESS 0x080068
-#define VIDEO_FB_1_LAST_ADDRESS_EXT 27:27
-#define VIDEO_FB_1_LAST_ADDRESS_EXT_LOCAL 0
-#define VIDEO_FB_1_LAST_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS 25:0
+#define VIDEO_FB_1_LAST_ADDRESS_EXT BIT(27)
+#define VIDEO_FB_1_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff
/* Video Alpha Control */
#define VIDEO_ALPHA_DISPLAY_CTRL 0x080080
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT 28:28
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT_ALPHA 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA 27:24
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO 17:16
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7 2
-#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11 3
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE 11:11
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_NORMAL 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE_HALF 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE 10:10
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_NORMAL 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE_HALF 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE 9:9
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_REPLICATE 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE_INTERPOLATE 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE 8:8
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_REPLICATE 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE_INTERPOLATE 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL 7:4
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY 3:3
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE 2:2
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_DISABLE 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_PLANE_ENABLE 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT 1:0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8 0
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16 1
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 2
-#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 3
+#define VIDEO_ALPHA_DISPLAY_CTRL_SELECT BIT(28)
+#define VIDEO_ALPHA_DISPLAY_CTRL_ALPHA_MASK (0xf << 24)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_MASK (0x3 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_1 (0x0 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_3 (0x1 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_7 (0x2 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FIFO_11 (0x3 << 16)
+#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_SCALE BIT(11)
+#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_SCALE BIT(10)
+#define VIDEO_ALPHA_DISPLAY_CTRL_VERT_MODE BIT(9)
+#define VIDEO_ALPHA_DISPLAY_CTRL_HORZ_MODE BIT(8)
+#define VIDEO_ALPHA_DISPLAY_CTRL_PIXEL_MASK (0xf << 4)
+#define VIDEO_ALPHA_DISPLAY_CTRL_CHROMA_KEY BIT(3)
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_MASK 0x3
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_8 0x0
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_16 0x1
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 0x2
+#define VIDEO_ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 0x3
#define VIDEO_ALPHA_FB_ADDRESS 0x080084
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS 31:31
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS_CURRENT 0
-#define VIDEO_ALPHA_FB_ADDRESS_STATUS_PENDING 1
-#define VIDEO_ALPHA_FB_ADDRESS_EXT 27:27
-#define VIDEO_ALPHA_FB_ADDRESS_EXT_LOCAL 0
-#define VIDEO_ALPHA_FB_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS 25:0
+#define VIDEO_ALPHA_FB_ADDRESS_STATUS BIT(31)
+#define VIDEO_ALPHA_FB_ADDRESS_EXT BIT(27)
+#define VIDEO_ALPHA_FB_ADDRESS_ADDRESS_MASK 0x3ffffff
#define VIDEO_ALPHA_FB_WIDTH 0x080088
-#define VIDEO_ALPHA_FB_WIDTH_WIDTH 29:16
-#define VIDEO_ALPHA_FB_WIDTH_OFFSET 13:0
+#define VIDEO_ALPHA_FB_WIDTH_WIDTH_MASK (0x3fff << 16)
+#define VIDEO_ALPHA_FB_WIDTH_OFFSET_MASK 0x3fff
#define VIDEO_ALPHA_FB_LAST_ADDRESS 0x08008C
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT 27:27
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_LOCAL 0
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT_EXTERNAL 1
-#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS 25:0
+#define VIDEO_ALPHA_FB_LAST_ADDRESS_EXT BIT(27)
+#define VIDEO_ALPHA_FB_LAST_ADDRESS_ADDRESS_MASK 0x3ffffff
#define VIDEO_ALPHA_PLANE_TL 0x080090
-#define VIDEO_ALPHA_PLANE_TL_TOP 26:16
-#define VIDEO_ALPHA_PLANE_TL_LEFT 10:0
+#define VIDEO_ALPHA_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define VIDEO_ALPHA_PLANE_TL_LEFT_MASK 0x7ff
#define VIDEO_ALPHA_PLANE_BR 0x080094
-#define VIDEO_ALPHA_PLANE_BR_BOTTOM 26:16
-#define VIDEO_ALPHA_PLANE_BR_RIGHT 10:0
+#define VIDEO_ALPHA_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define VIDEO_ALPHA_PLANE_BR_RIGHT_MASK 0x7ff
#define VIDEO_ALPHA_SCALE 0x080098
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE 31:31
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_EXPAND 0
-#define VIDEO_ALPHA_SCALE_VERTICAL_MODE_SHRINK 1
-#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE 27:16
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE 15:15
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_EXPAND 0
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE_SHRINK 1
-#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE 11:0
+#define VIDEO_ALPHA_SCALE_VERTICAL_MODE BIT(31)
+#define VIDEO_ALPHA_SCALE_VERTICAL_SCALE_MASK (0xfff << 16)
+#define VIDEO_ALPHA_SCALE_HORIZONTAL_MODE BIT(15)
+#define VIDEO_ALPHA_SCALE_HORIZONTAL_SCALE_MASK 0xfff
#define VIDEO_ALPHA_INITIAL_SCALE 0x08009C
-#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL 27:16
-#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL 11:0
+#define VIDEO_ALPHA_INITIAL_SCALE_VERTICAL_MASK (0xfff << 16)
+#define VIDEO_ALPHA_INITIAL_SCALE_HORIZONTAL_MASK 0xfff
#define VIDEO_ALPHA_CHROMA_KEY 0x0800A0
-#define VIDEO_ALPHA_CHROMA_KEY_MASK 31:16
-#define VIDEO_ALPHA_CHROMA_KEY_VALUE 15:0
+#define VIDEO_ALPHA_CHROMA_KEY_MASK_MASK (0xffff << 16)
+#define VIDEO_ALPHA_CHROMA_KEY_VALUE_MASK 0xffff
#define VIDEO_ALPHA_COLOR_LOOKUP_01 0x0800A4
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_23 0x0800A8
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_45 0x0800AC
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_67 0x0800B0
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_89 0x0800B4
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_AB 0x0800B8
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_CD 0x0800BC
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK 0x1f
#define VIDEO_ALPHA_COLOR_LOOKUP_EF 0x0800C0
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F 31:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED 31:27
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN 26:21
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE 20:16
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E 15:0
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED 15:11
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN 10:5
-#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE 4:0
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_MASK (0xffff << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_RED_MASK (0x1f << 27)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK (0x3f << 21)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK (0x1f << 16)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_MASK 0xffff
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_RED_MASK (0x1f << 11)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK (0x3f << 5)
+#define VIDEO_ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK 0x1f
/* Panel Cursor Control */
#define PANEL_HWC_ADDRESS 0x0800F0
-#define PANEL_HWC_ADDRESS_ENABLE 31:31
-#define PANEL_HWC_ADDRESS_ENABLE_DISABLE 0
-#define PANEL_HWC_ADDRESS_ENABLE_ENABLE 1
-#define PANEL_HWC_ADDRESS_EXT 27:27
-#define PANEL_HWC_ADDRESS_EXT_LOCAL 0
-#define PANEL_HWC_ADDRESS_EXT_EXTERNAL 1
-#define PANEL_HWC_ADDRESS_ADDRESS 25:0
+#define PANEL_HWC_ADDRESS_ENABLE BIT(31)
+#define PANEL_HWC_ADDRESS_EXT BIT(27)
+#define PANEL_HWC_ADDRESS_ADDRESS_MASK 0x3ffffff
#define PANEL_HWC_LOCATION 0x0800F4
-#define PANEL_HWC_LOCATION_TOP 27:27
-#define PANEL_HWC_LOCATION_TOP_INSIDE 0
-#define PANEL_HWC_LOCATION_TOP_OUTSIDE 1
-#define PANEL_HWC_LOCATION_Y 26:16
-#define PANEL_HWC_LOCATION_LEFT 11:11
-#define PANEL_HWC_LOCATION_LEFT_INSIDE 0
-#define PANEL_HWC_LOCATION_LEFT_OUTSIDE 1
-#define PANEL_HWC_LOCATION_X 10:0
+#define PANEL_HWC_LOCATION_TOP BIT(27)
+#define PANEL_HWC_LOCATION_Y_MASK (0x7ff << 16)
+#define PANEL_HWC_LOCATION_LEFT BIT(11)
+#define PANEL_HWC_LOCATION_X_MASK 0x7ff
#define PANEL_HWC_COLOR_12 0x0800F8
-#define PANEL_HWC_COLOR_12_2_RGB565 31:16
-#define PANEL_HWC_COLOR_12_1_RGB565 15:0
+#define PANEL_HWC_COLOR_12_2_RGB565_MASK (0xffff << 16)
+#define PANEL_HWC_COLOR_12_1_RGB565_MASK 0xffff
#define PANEL_HWC_COLOR_3 0x0800FC
-#define PANEL_HWC_COLOR_3_RGB565 15:0
+#define PANEL_HWC_COLOR_3_RGB565_MASK 0xffff
/* Old Definitions +++ */
#define PANEL_HWC_COLOR_01 0x0800F8
-#define PANEL_HWC_COLOR_01_1_RED 31:27
-#define PANEL_HWC_COLOR_01_1_GREEN 26:21
-#define PANEL_HWC_COLOR_01_1_BLUE 20:16
-#define PANEL_HWC_COLOR_01_0_RED 15:11
-#define PANEL_HWC_COLOR_01_0_GREEN 10:5
-#define PANEL_HWC_COLOR_01_0_BLUE 4:0
+#define PANEL_HWC_COLOR_01_1_RED_MASK (0x1f << 27)
+#define PANEL_HWC_COLOR_01_1_GREEN_MASK (0x3f << 21)
+#define PANEL_HWC_COLOR_01_1_BLUE_MASK (0x1f << 16)
+#define PANEL_HWC_COLOR_01_0_RED_MASK (0x1f << 11)
+#define PANEL_HWC_COLOR_01_0_GREEN_MASK (0x3f << 5)
+#define PANEL_HWC_COLOR_01_0_BLUE_MASK 0x1f
#define PANEL_HWC_COLOR_2 0x0800FC
-#define PANEL_HWC_COLOR_2_RED 15:11
-#define PANEL_HWC_COLOR_2_GREEN 10:5
-#define PANEL_HWC_COLOR_2_BLUE 4:0
+#define PANEL_HWC_COLOR_2_RED_MASK (0x1f << 11)
+#define PANEL_HWC_COLOR_2_GREEN_MASK (0x3f << 5)
+#define PANEL_HWC_COLOR_2_BLUE_MASK 0x1f
/* Old Definitions --- */
/* Alpha Control */
#define ALPHA_DISPLAY_CTRL 0x080100
-#define ALPHA_DISPLAY_CTRL_SELECT 28:28
-#define ALPHA_DISPLAY_CTRL_SELECT_PER_PIXEL 0
-#define ALPHA_DISPLAY_CTRL_SELECT_ALPHA 1
-#define ALPHA_DISPLAY_CTRL_ALPHA 27:24
-#define ALPHA_DISPLAY_CTRL_FIFO 17:16
-#define ALPHA_DISPLAY_CTRL_FIFO_1 0
-#define ALPHA_DISPLAY_CTRL_FIFO_3 1
-#define ALPHA_DISPLAY_CTRL_FIFO_7 2
-#define ALPHA_DISPLAY_CTRL_FIFO_11 3
-#define ALPHA_DISPLAY_CTRL_PIXEL 7:4
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY 3:3
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_DISABLE 0
-#define ALPHA_DISPLAY_CTRL_CHROMA_KEY_ENABLE 1
-#define ALPHA_DISPLAY_CTRL_PLANE 2:2
-#define ALPHA_DISPLAY_CTRL_PLANE_DISABLE 0
-#define ALPHA_DISPLAY_CTRL_PLANE_ENABLE 1
-#define ALPHA_DISPLAY_CTRL_FORMAT 1:0
-#define ALPHA_DISPLAY_CTRL_FORMAT_16 1
-#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 2
-#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 3
+#define ALPHA_DISPLAY_CTRL_SELECT BIT(28)
+#define ALPHA_DISPLAY_CTRL_ALPHA_MASK (0xf << 24)
+#define ALPHA_DISPLAY_CTRL_FIFO_MASK (0x3 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_1 (0x0 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_3 (0x1 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_7 (0x2 << 16)
+#define ALPHA_DISPLAY_CTRL_FIFO_11 (0x3 << 16)
+#define ALPHA_DISPLAY_CTRL_PIXEL_MASK (0xf << 4)
+#define ALPHA_DISPLAY_CTRL_CHROMA_KEY BIT(3)
+#define ALPHA_DISPLAY_CTRL_FORMAT_MASK 0x3
+#define ALPHA_DISPLAY_CTRL_FORMAT_16 0x1
+#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4 0x2
+#define ALPHA_DISPLAY_CTRL_FORMAT_ALPHA_4_4_4_4 0x3
#define ALPHA_FB_ADDRESS 0x080104
-#define ALPHA_FB_ADDRESS_STATUS 31:31
-#define ALPHA_FB_ADDRESS_STATUS_CURRENT 0
-#define ALPHA_FB_ADDRESS_STATUS_PENDING 1
-#define ALPHA_FB_ADDRESS_EXT 27:27
-#define ALPHA_FB_ADDRESS_EXT_LOCAL 0
-#define ALPHA_FB_ADDRESS_EXT_EXTERNAL 1
-#define ALPHA_FB_ADDRESS_ADDRESS 25:0
+#define ALPHA_FB_ADDRESS_STATUS BIT(31)
+#define ALPHA_FB_ADDRESS_EXT BIT(27)
+#define ALPHA_FB_ADDRESS_ADDRESS_MASK 0x3ffffff
#define ALPHA_FB_WIDTH 0x080108
-#define ALPHA_FB_WIDTH_WIDTH 29:16
-#define ALPHA_FB_WIDTH_OFFSET 13:0
+#define ALPHA_FB_WIDTH_WIDTH_MASK (0x3fff << 16)
+#define ALPHA_FB_WIDTH_OFFSET_MASK 0x3fff
#define ALPHA_PLANE_TL 0x08010C
-#define ALPHA_PLANE_TL_TOP 26:16
-#define ALPHA_PLANE_TL_LEFT 10:0
+#define ALPHA_PLANE_TL_TOP_MASK (0x7ff << 16)
+#define ALPHA_PLANE_TL_LEFT_MASK 0x7ff
#define ALPHA_PLANE_BR 0x080110
-#define ALPHA_PLANE_BR_BOTTOM 26:16
-#define ALPHA_PLANE_BR_RIGHT 10:0
+#define ALPHA_PLANE_BR_BOTTOM_MASK (0x7ff << 16)
+#define ALPHA_PLANE_BR_RIGHT_MASK 0x7ff
#define ALPHA_CHROMA_KEY 0x080114
-#define ALPHA_CHROMA_KEY_MASK 31:16
-#define ALPHA_CHROMA_KEY_VALUE 15:0
+#define ALPHA_CHROMA_KEY_MASK_MASK (0xffff << 16)
+#define ALPHA_CHROMA_KEY_VALUE_MASK 0xffff
#define ALPHA_COLOR_LOOKUP_01 0x080118
-#define ALPHA_COLOR_LOOKUP_01_1 31:16
-#define ALPHA_COLOR_LOOKUP_01_1_RED 31:27
-#define ALPHA_COLOR_LOOKUP_01_1_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_01_1_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_01_0 15:0
-#define ALPHA_COLOR_LOOKUP_01_0_RED 15:11
-#define ALPHA_COLOR_LOOKUP_01_0_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_01_0_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_01_1_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_01_1_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_01_1_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_01_1_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_01_0_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_01_0_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_01_0_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_01_0_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_23 0x08011C
-#define ALPHA_COLOR_LOOKUP_23_3 31:16
-#define ALPHA_COLOR_LOOKUP_23_3_RED 31:27
-#define ALPHA_COLOR_LOOKUP_23_3_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_23_3_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_23_2 15:0
-#define ALPHA_COLOR_LOOKUP_23_2_RED 15:11
-#define ALPHA_COLOR_LOOKUP_23_2_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_23_2_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_23_3_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_23_3_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_23_3_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_23_3_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_23_2_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_23_2_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_23_2_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_23_2_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_45 0x080120
-#define ALPHA_COLOR_LOOKUP_45_5 31:16
-#define ALPHA_COLOR_LOOKUP_45_5_RED 31:27
-#define ALPHA_COLOR_LOOKUP_45_5_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_45_5_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_45_4 15:0
-#define ALPHA_COLOR_LOOKUP_45_4_RED 15:11
-#define ALPHA_COLOR_LOOKUP_45_4_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_45_4_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_45_5_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_45_5_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_45_5_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_45_5_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_45_4_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_45_4_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_45_4_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_45_4_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_67 0x080124
-#define ALPHA_COLOR_LOOKUP_67_7 31:16
-#define ALPHA_COLOR_LOOKUP_67_7_RED 31:27
-#define ALPHA_COLOR_LOOKUP_67_7_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_67_7_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_67_6 15:0
-#define ALPHA_COLOR_LOOKUP_67_6_RED 15:11
-#define ALPHA_COLOR_LOOKUP_67_6_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_67_6_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_67_7_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_67_7_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_67_7_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_67_7_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_67_6_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_67_6_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_67_6_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_67_6_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_89 0x080128
-#define ALPHA_COLOR_LOOKUP_89_9 31:16
-#define ALPHA_COLOR_LOOKUP_89_9_RED 31:27
-#define ALPHA_COLOR_LOOKUP_89_9_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_89_9_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_89_8 15:0
-#define ALPHA_COLOR_LOOKUP_89_8_RED 15:11
-#define ALPHA_COLOR_LOOKUP_89_8_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_89_8_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_89_9_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_89_9_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_89_9_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_89_9_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_89_8_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_89_8_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_89_8_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_89_8_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_AB 0x08012C
-#define ALPHA_COLOR_LOOKUP_AB_B 31:16
-#define ALPHA_COLOR_LOOKUP_AB_B_RED 31:27
-#define ALPHA_COLOR_LOOKUP_AB_B_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_AB_B_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_AB_A 15:0
-#define ALPHA_COLOR_LOOKUP_AB_A_RED 15:11
-#define ALPHA_COLOR_LOOKUP_AB_A_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_AB_A_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_AB_B_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_AB_B_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_AB_B_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_AB_B_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_AB_A_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_AB_A_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_AB_A_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_AB_A_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_CD 0x080130
-#define ALPHA_COLOR_LOOKUP_CD_D 31:16
-#define ALPHA_COLOR_LOOKUP_CD_D_RED 31:27
-#define ALPHA_COLOR_LOOKUP_CD_D_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_CD_D_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_CD_C 15:0
-#define ALPHA_COLOR_LOOKUP_CD_C_RED 15:11
-#define ALPHA_COLOR_LOOKUP_CD_C_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_CD_C_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_CD_D_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_CD_D_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_CD_D_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_CD_D_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_CD_C_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_CD_C_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_CD_C_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_CD_C_BLUE_MASK 0x1f
#define ALPHA_COLOR_LOOKUP_EF 0x080134
-#define ALPHA_COLOR_LOOKUP_EF_F 31:16
-#define ALPHA_COLOR_LOOKUP_EF_F_RED 31:27
-#define ALPHA_COLOR_LOOKUP_EF_F_GREEN 26:21
-#define ALPHA_COLOR_LOOKUP_EF_F_BLUE 20:16
-#define ALPHA_COLOR_LOOKUP_EF_E 15:0
-#define ALPHA_COLOR_LOOKUP_EF_E_RED 15:11
-#define ALPHA_COLOR_LOOKUP_EF_E_GREEN 10:5
-#define ALPHA_COLOR_LOOKUP_EF_E_BLUE 4:0
+#define ALPHA_COLOR_LOOKUP_EF_F_MASK (0xffff << 16)
+#define ALPHA_COLOR_LOOKUP_EF_F_RED_MASK (0x1f << 27)
+#define ALPHA_COLOR_LOOKUP_EF_F_GREEN_MASK (0x3f << 21)
+#define ALPHA_COLOR_LOOKUP_EF_F_BLUE_MASK (0x1f << 16)
+#define ALPHA_COLOR_LOOKUP_EF_E_MASK 0xffff
+#define ALPHA_COLOR_LOOKUP_EF_E_RED_MASK (0x1f << 11)
+#define ALPHA_COLOR_LOOKUP_EF_E_GREEN_MASK (0x3f << 5)
+#define ALPHA_COLOR_LOOKUP_EF_E_BLUE_MASK 0x1f
/* CRT Graphics Control */
#define CRT_DISPLAY_CTRL 0x080200
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK 31:27
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE 0
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 0x1F
+#define CRT_DISPLAY_CTRL_RESERVED_MASK 0xfb008200
/* SM750LE definition */
-#define CRT_DISPLAY_CTRL_DPMS 31:30
-#define CRT_DISPLAY_CTRL_DPMS_0 0
-#define CRT_DISPLAY_CTRL_DPMS_1 1
-#define CRT_DISPLAY_CTRL_DPMS_2 2
-#define CRT_DISPLAY_CTRL_DPMS_3 3
-#define CRT_DISPLAY_CTRL_CLK 29:27
-#define CRT_DISPLAY_CTRL_CLK_PLL25 0
-#define CRT_DISPLAY_CTRL_CLK_PLL41 1
-#define CRT_DISPLAY_CTRL_CLK_PLL62 2
-#define CRT_DISPLAY_CTRL_CLK_PLL65 3
-#define CRT_DISPLAY_CTRL_CLK_PLL74 4
-#define CRT_DISPLAY_CTRL_CLK_PLL80 5
-#define CRT_DISPLAY_CTRL_CLK_PLL108 6
-#define CRT_DISPLAY_CTRL_CLK_RESERVED 7
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC 26:26
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE 1
-#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE 0
-
-
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK 25:24
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 3
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 0
+#define CRT_DISPLAY_CTRL_DPMS_SHIFT 30
+#define CRT_DISPLAY_CTRL_DPMS_MASK (0x3 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_0 (0x0 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_1 (0x1 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_2 (0x2 << 30)
+#define CRT_DISPLAY_CTRL_DPMS_3 (0x3 << 30)
+#define CRT_DISPLAY_CTRL_CLK_MASK (0x7 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL25 (0x0 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL41 (0x1 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL62 (0x2 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL65 (0x3 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL74 (0x4 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL80 (0x5 << 27)
+#define CRT_DISPLAY_CTRL_CLK_PLL108 (0x6 << 27)
+#define CRT_DISPLAY_CTRL_CLK_RESERVED (0x7 << 27)
+#define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC BIT(26)
/* SM750LE definition */
-#define CRT_DISPLAY_CTRL_CRTSELECT 25:25
-#define CRT_DISPLAY_CTRL_CRTSELECT_VGA 0
-#define CRT_DISPLAY_CTRL_CRTSELECT_CRT 1
-#define CRT_DISPLAY_CTRL_RGBBIT 24:24
-#define CRT_DISPLAY_CTRL_RGBBIT_24BIT 0
-#define CRT_DISPLAY_CTRL_RGBBIT_12BIT 1
-
-
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK 15:15
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE 0
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE 1
-
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK 9:9
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_DISABLE 0
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK_ENABLE 1
+#define CRT_DISPLAY_CTRL_CRTSELECT BIT(25)
+#define CRT_DISPLAY_CTRL_RGBBIT BIT(24)
#ifndef VALIDATION_CHIP
- #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC 26:26
- #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_DISABLE 1
- #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE 0
- #define CRT_DISPLAY_CTRL_CENTERING 24:24
- #define CRT_DISPLAY_CTRL_CENTERING_DISABLE 0
- #define CRT_DISPLAY_CTRL_CENTERING_ENABLE 1
+ #define CRT_DISPLAY_CTRL_CENTERING BIT(24)
#endif
-#define CRT_DISPLAY_CTRL_LOCK_TIMING 23:23
-#define CRT_DISPLAY_CTRL_LOCK_TIMING_DISABLE 0
-#define CRT_DISPLAY_CTRL_LOCK_TIMING_ENABLE 1
-#define CRT_DISPLAY_CTRL_EXPANSION 22:22
-#define CRT_DISPLAY_CTRL_EXPANSION_DISABLE 0
-#define CRT_DISPLAY_CTRL_EXPANSION_ENABLE 1
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE 21:21
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE_REPLICATE 0
-#define CRT_DISPLAY_CTRL_VERTICAL_MODE_INTERPOLATE 1
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE 20:20
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_REPLICATE 0
-#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE_INTERPOLATE 1
-#define CRT_DISPLAY_CTRL_SELECT 19:18
-#define CRT_DISPLAY_CTRL_SELECT_PANEL 0
-#define CRT_DISPLAY_CTRL_SELECT_VGA 1
-#define CRT_DISPLAY_CTRL_SELECT_CRT 2
-#define CRT_DISPLAY_CTRL_FIFO 17:16
-#define CRT_DISPLAY_CTRL_FIFO_1 0
-#define CRT_DISPLAY_CTRL_FIFO_3 1
-#define CRT_DISPLAY_CTRL_FIFO_7 2
-#define CRT_DISPLAY_CTRL_FIFO_11 3
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE 14:14
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_HIGH 0
-#define CRT_DISPLAY_CTRL_CLOCK_PHASE_ACTIVE_LOW 1
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE 13:13
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_HIGH 0
-#define CRT_DISPLAY_CTRL_VSYNC_PHASE_ACTIVE_LOW 1
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE 12:12
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_HIGH 0
-#define CRT_DISPLAY_CTRL_HSYNC_PHASE_ACTIVE_LOW 1
-#define CRT_DISPLAY_CTRL_BLANK 10:10
-#define CRT_DISPLAY_CTRL_BLANK_OFF 0
-#define CRT_DISPLAY_CTRL_BLANK_ON 1
-#define CRT_DISPLAY_CTRL_TIMING 8:8
-#define CRT_DISPLAY_CTRL_TIMING_DISABLE 0
-#define CRT_DISPLAY_CTRL_TIMING_ENABLE 1
-#define CRT_DISPLAY_CTRL_PIXEL 7:4
-#define CRT_DISPLAY_CTRL_GAMMA 3:3
-#define CRT_DISPLAY_CTRL_GAMMA_DISABLE 0
-#define CRT_DISPLAY_CTRL_GAMMA_ENABLE 1
-#define CRT_DISPLAY_CTRL_PLANE 2:2
-#define CRT_DISPLAY_CTRL_PLANE_DISABLE 0
-#define CRT_DISPLAY_CTRL_PLANE_ENABLE 1
-#define CRT_DISPLAY_CTRL_FORMAT 1:0
-#define CRT_DISPLAY_CTRL_FORMAT_8 0
-#define CRT_DISPLAY_CTRL_FORMAT_16 1
-#define CRT_DISPLAY_CTRL_FORMAT_32 2
-#define CRT_DISPLAY_CTRL_RESERVED_BITS_MASK 0xFF000200
+#define CRT_DISPLAY_CTRL_LOCK_TIMING BIT(23)
+#define CRT_DISPLAY_CTRL_EXPANSION BIT(22)
+#define CRT_DISPLAY_CTRL_VERTICAL_MODE BIT(21)
+#define CRT_DISPLAY_CTRL_HORIZONTAL_MODE BIT(20)
+#define CRT_DISPLAY_CTRL_SELECT_SHIFT 18
+#define CRT_DISPLAY_CTRL_SELECT_MASK (0x3 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_PANEL (0x0 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_VGA (0x1 << 18)
+#define CRT_DISPLAY_CTRL_SELECT_CRT (0x2 << 18)
+#define CRT_DISPLAY_CTRL_FIFO_MASK (0x3 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_1 (0x0 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_3 (0x1 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_7 (0x2 << 16)
+#define CRT_DISPLAY_CTRL_FIFO_11 (0x3 << 16)
+#define CRT_DISPLAY_CTRL_BLANK BIT(10)
+#define CRT_DISPLAY_CTRL_PIXEL_MASK (0xf << 4)
+#define CRT_DISPLAY_CTRL_FORMAT_MASK (0x3 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_8 (0x0 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_16 (0x1 << 0)
+#define CRT_DISPLAY_CTRL_FORMAT_32 (0x2 << 0)
#define CRT_FB_ADDRESS 0x080204
-#define CRT_FB_ADDRESS_STATUS 31:31
-#define CRT_FB_ADDRESS_STATUS_CURRENT 0
-#define CRT_FB_ADDRESS_STATUS_PENDING 1
-#define CRT_FB_ADDRESS_EXT 27:27
-#define CRT_FB_ADDRESS_EXT_LOCAL 0
-#define CRT_FB_ADDRESS_EXT_EXTERNAL 1
-#define CRT_FB_ADDRESS_ADDRESS 25:0
+#define CRT_FB_ADDRESS_STATUS BIT(31)
+#define CRT_FB_ADDRESS_EXT BIT(27)
+#define CRT_FB_ADDRESS_ADDRESS_MASK 0x3ffffff
#define CRT_FB_WIDTH 0x080208
-#define CRT_FB_WIDTH_WIDTH 29:16
-#define CRT_FB_WIDTH_OFFSET 13:0
+#define CRT_FB_WIDTH_WIDTH_SHIFT 16
+#define CRT_FB_WIDTH_WIDTH_MASK (0x3fff << 16)
+#define CRT_FB_WIDTH_OFFSET_MASK 0x3fff
#define CRT_HORIZONTAL_TOTAL 0x08020C
-#define CRT_HORIZONTAL_TOTAL_TOTAL 27:16
-#define CRT_HORIZONTAL_TOTAL_DISPLAY_END 11:0
+#define CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT 16
+#define CRT_HORIZONTAL_TOTAL_TOTAL_MASK (0xfff << 16)
+#define CRT_HORIZONTAL_TOTAL_DISPLAY_END_MASK 0xfff
#define CRT_HORIZONTAL_SYNC 0x080210
-#define CRT_HORIZONTAL_SYNC_WIDTH 23:16
-#define CRT_HORIZONTAL_SYNC_START 11:0
+#define CRT_HORIZONTAL_SYNC_WIDTH_SHIFT 16
+#define CRT_HORIZONTAL_SYNC_WIDTH_MASK (0xff << 16)
+#define CRT_HORIZONTAL_SYNC_START_MASK 0xfff
#define CRT_VERTICAL_TOTAL 0x080214
-#define CRT_VERTICAL_TOTAL_TOTAL 26:16
-#define CRT_VERTICAL_TOTAL_DISPLAY_END 10:0
+#define CRT_VERTICAL_TOTAL_TOTAL_SHIFT 16
+#define CRT_VERTICAL_TOTAL_TOTAL_MASK (0x7ff << 16)
+#define CRT_VERTICAL_TOTAL_DISPLAY_END_MASK (0x7ff)
#define CRT_VERTICAL_SYNC 0x080218
-#define CRT_VERTICAL_SYNC_HEIGHT 21:16
-#define CRT_VERTICAL_SYNC_START 10:0
+#define CRT_VERTICAL_SYNC_HEIGHT_SHIFT 16
+#define CRT_VERTICAL_SYNC_HEIGHT_MASK (0x3f << 16)
+#define CRT_VERTICAL_SYNC_START_MASK 0x7ff
#define CRT_SIGNATURE_ANALYZER 0x08021C
-#define CRT_SIGNATURE_ANALYZER_STATUS 31:16
-#define CRT_SIGNATURE_ANALYZER_ENABLE 3:3
-#define CRT_SIGNATURE_ANALYZER_ENABLE_DISABLE 0
-#define CRT_SIGNATURE_ANALYZER_ENABLE_ENABLE 1
-#define CRT_SIGNATURE_ANALYZER_RESET 2:2
-#define CRT_SIGNATURE_ANALYZER_RESET_NORMAL 0
-#define CRT_SIGNATURE_ANALYZER_RESET_RESET 1
-#define CRT_SIGNATURE_ANALYZER_SOURCE 1:0
+#define CRT_SIGNATURE_ANALYZER_STATUS_MASK (0xffff << 16)
+#define CRT_SIGNATURE_ANALYZER_ENABLE BIT(3)
+#define CRT_SIGNATURE_ANALYZER_RESET BIT(2)
+#define CRT_SIGNATURE_ANALYZER_SOURCE_MASK 0x3
#define CRT_SIGNATURE_ANALYZER_SOURCE_RED 0
#define CRT_SIGNATURE_ANALYZER_SOURCE_GREEN 1
#define CRT_SIGNATURE_ANALYZER_SOURCE_BLUE 2
#define CRT_CURRENT_LINE 0x080220
-#define CRT_CURRENT_LINE_LINE 10:0
+#define CRT_CURRENT_LINE_LINE_MASK 0x7ff
#define CRT_MONITOR_DETECT 0x080224
-#define CRT_MONITOR_DETECT_VALUE 25:25
-#define CRT_MONITOR_DETECT_VALUE_DISABLE 0
-#define CRT_MONITOR_DETECT_VALUE_ENABLE 1
-#define CRT_MONITOR_DETECT_ENABLE 24:24
-#define CRT_MONITOR_DETECT_ENABLE_DISABLE 0
-#define CRT_MONITOR_DETECT_ENABLE_ENABLE 1
-#define CRT_MONITOR_DETECT_RED 23:16
-#define CRT_MONITOR_DETECT_GREEN 15:8
-#define CRT_MONITOR_DETECT_BLUE 7:0
+#define CRT_MONITOR_DETECT_VALUE BIT(25)
+#define CRT_MONITOR_DETECT_ENABLE BIT(24)
+#define CRT_MONITOR_DETECT_RED_MASK (0xff << 16)
+#define CRT_MONITOR_DETECT_GREEN_MASK (0xff << 8)
+#define CRT_MONITOR_DETECT_BLUE_MASK 0xff
#define CRT_SCALE 0x080228
-#define CRT_SCALE_VERTICAL_MODE 31:31
-#define CRT_SCALE_VERTICAL_MODE_EXPAND 0
-#define CRT_SCALE_VERTICAL_MODE_SHRINK 1
-#define CRT_SCALE_VERTICAL_SCALE 27:16
-#define CRT_SCALE_HORIZONTAL_MODE 15:15
-#define CRT_SCALE_HORIZONTAL_MODE_EXPAND 0
-#define CRT_SCALE_HORIZONTAL_MODE_SHRINK 1
-#define CRT_SCALE_HORIZONTAL_SCALE 11:0
+#define CRT_SCALE_VERTICAL_MODE BIT(31)
+#define CRT_SCALE_VERTICAL_SCALE_MASK (0xfff << 16)
+#define CRT_SCALE_HORIZONTAL_MODE BIT(15)
+#define CRT_SCALE_HORIZONTAL_SCALE_MASK 0xfff
/* CRT Cursor Control */
#define CRT_HWC_ADDRESS 0x080230
-#define CRT_HWC_ADDRESS_ENABLE 31:31
-#define CRT_HWC_ADDRESS_ENABLE_DISABLE 0
-#define CRT_HWC_ADDRESS_ENABLE_ENABLE 1
-#define CRT_HWC_ADDRESS_EXT 27:27
-#define CRT_HWC_ADDRESS_EXT_LOCAL 0
-#define CRT_HWC_ADDRESS_EXT_EXTERNAL 1
-#define CRT_HWC_ADDRESS_ADDRESS 25:0
+#define CRT_HWC_ADDRESS_ENABLE BIT(31)
+#define CRT_HWC_ADDRESS_EXT BIT(27)
+#define CRT_HWC_ADDRESS_ADDRESS_MASK 0x3ffffff
#define CRT_HWC_LOCATION 0x080234
-#define CRT_HWC_LOCATION_TOP 27:27
-#define CRT_HWC_LOCATION_TOP_INSIDE 0
-#define CRT_HWC_LOCATION_TOP_OUTSIDE 1
-#define CRT_HWC_LOCATION_Y 26:16
-#define CRT_HWC_LOCATION_LEFT 11:11
-#define CRT_HWC_LOCATION_LEFT_INSIDE 0
-#define CRT_HWC_LOCATION_LEFT_OUTSIDE 1
-#define CRT_HWC_LOCATION_X 10:0
+#define CRT_HWC_LOCATION_TOP BIT(27)
+#define CRT_HWC_LOCATION_Y_MASK (0x7ff << 16)
+#define CRT_HWC_LOCATION_LEFT BIT(11)
+#define CRT_HWC_LOCATION_X_MASK 0x7ff
#define CRT_HWC_COLOR_12 0x080238
-#define CRT_HWC_COLOR_12_2_RGB565 31:16
-#define CRT_HWC_COLOR_12_1_RGB565 15:0
+#define CRT_HWC_COLOR_12_2_RGB565_MASK (0xffff << 16)
+#define CRT_HWC_COLOR_12_1_RGB565_MASK 0xffff
#define CRT_HWC_COLOR_3 0x08023C
-#define CRT_HWC_COLOR_3_RGB565 15:0
+#define CRT_HWC_COLOR_3_RGB565_MASK 0xffff
/* This vertical expansion below start at 0x080240 ~ 0x080264 */
#define CRT_VERTICAL_EXPANSION 0x080240
#ifndef VALIDATION_CHIP
- #define CRT_VERTICAL_CENTERING_VALUE 31:24
+ #define CRT_VERTICAL_CENTERING_VALUE_MASK (0xff << 24)
#endif
-#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE 23:16
-#define CRT_VERTICAL_EXPANSION_LINE_BUFFER 15:12
-#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR 11:0
+#define CRT_VERTICAL_EXPANSION_COMPARE_VALUE_MASK (0xff << 16)
+#define CRT_VERTICAL_EXPANSION_LINE_BUFFER_MASK (0xf << 12)
+#define CRT_VERTICAL_EXPANSION_SCALE_FACTOR_MASK 0xfff
/* This horizontal expansion below start at 0x080268 ~ 0x08027C */
#define CRT_HORIZONTAL_EXPANSION 0x080268
#ifndef VALIDATION_CHIP
- #define CRT_HORIZONTAL_CENTERING_VALUE 31:24
+ #define CRT_HORIZONTAL_CENTERING_VALUE_MASK (0xff << 24)
#endif
-#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE 23:16
-#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR 11:0
+#define CRT_HORIZONTAL_EXPANSION_COMPARE_VALUE_MASK (0xff << 16)
+#define CRT_HORIZONTAL_EXPANSION_SCALE_FACTOR_MASK 0xfff
#ifndef VALIDATION_CHIP
/* Auto Centering */
#define CRT_AUTO_CENTERING_TL 0x080280
- #define CRT_AUTO_CENTERING_TL_TOP 26:16
- #define CRT_AUTO_CENTERING_TL_LEFT 10:0
+ #define CRT_AUTO_CENTERING_TL_TOP_MASK (0x7ff << 16)
+ #define CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7ff
#define CRT_AUTO_CENTERING_BR 0x080284
- #define CRT_AUTO_CENTERING_BR_BOTTOM 26:16
- #define CRT_AUTO_CENTERING_BR_RIGHT 10:0
+ #define CRT_AUTO_CENTERING_BR_BOTTOM_MASK (0x7ff << 16)
+ #define CRT_AUTO_CENTERING_BR_BOTTOM_SHIFT 16
+ #define CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7ff
#endif
/* sm750le new register to control panel output */
@@ -1877,155 +1161,106 @@
/* Color Space Conversion registers. */
#define CSC_Y_SOURCE_BASE 0x1000C8
-#define CSC_Y_SOURCE_BASE_EXT 27:27
-#define CSC_Y_SOURCE_BASE_EXT_LOCAL 0
-#define CSC_Y_SOURCE_BASE_EXT_EXTERNAL 1
-#define CSC_Y_SOURCE_BASE_CS 26:26
-#define CSC_Y_SOURCE_BASE_CS_0 0
-#define CSC_Y_SOURCE_BASE_CS_1 1
-#define CSC_Y_SOURCE_BASE_ADDRESS 25:0
+#define CSC_Y_SOURCE_BASE_EXT BIT(27)
+#define CSC_Y_SOURCE_BASE_CS BIT(26)
+#define CSC_Y_SOURCE_BASE_ADDRESS_MASK 0x3ffffff
#define CSC_CONSTANTS 0x1000CC
-#define CSC_CONSTANTS_Y 31:24
-#define CSC_CONSTANTS_R 23:16
-#define CSC_CONSTANTS_G 15:8
-#define CSC_CONSTANTS_B 7:0
+#define CSC_CONSTANTS_Y_MASK (0xff << 24)
+#define CSC_CONSTANTS_R_MASK (0xff << 16)
+#define CSC_CONSTANTS_G_MASK (0xff << 8)
+#define CSC_CONSTANTS_B_MASK 0xff
#define CSC_Y_SOURCE_X 0x1000D0
-#define CSC_Y_SOURCE_X_INTEGER 26:16
-#define CSC_Y_SOURCE_X_FRACTION 15:3
+#define CSC_Y_SOURCE_X_INTEGER_MASK (0x7ff << 16)
+#define CSC_Y_SOURCE_X_FRACTION_MASK (0x1fff << 3)
#define CSC_Y_SOURCE_Y 0x1000D4
-#define CSC_Y_SOURCE_Y_INTEGER 27:16
-#define CSC_Y_SOURCE_Y_FRACTION 15:3
+#define CSC_Y_SOURCE_Y_INTEGER_MASK (0xfff << 16)
+#define CSC_Y_SOURCE_Y_FRACTION_MASK (0x1fff << 3)
#define CSC_U_SOURCE_BASE 0x1000D8
-#define CSC_U_SOURCE_BASE_EXT 27:27
-#define CSC_U_SOURCE_BASE_EXT_LOCAL 0
-#define CSC_U_SOURCE_BASE_EXT_EXTERNAL 1
-#define CSC_U_SOURCE_BASE_CS 26:26
-#define CSC_U_SOURCE_BASE_CS_0 0
-#define CSC_U_SOURCE_BASE_CS_1 1
-#define CSC_U_SOURCE_BASE_ADDRESS 25:0
+#define CSC_U_SOURCE_BASE_EXT BIT(27)
+#define CSC_U_SOURCE_BASE_CS BIT(26)
+#define CSC_U_SOURCE_BASE_ADDRESS_MASK 0x3ffffff
#define CSC_V_SOURCE_BASE 0x1000DC
-#define CSC_V_SOURCE_BASE_EXT 27:27
-#define CSC_V_SOURCE_BASE_EXT_LOCAL 0
-#define CSC_V_SOURCE_BASE_EXT_EXTERNAL 1
-#define CSC_V_SOURCE_BASE_CS 26:26
-#define CSC_V_SOURCE_BASE_CS_0 0
-#define CSC_V_SOURCE_BASE_CS_1 1
-#define CSC_V_SOURCE_BASE_ADDRESS 25:0
+#define CSC_V_SOURCE_BASE_EXT BIT(27)
+#define CSC_V_SOURCE_BASE_CS BIT(26)
+#define CSC_V_SOURCE_BASE_ADDRESS_MASK 0x3ffffff
#define CSC_SOURCE_DIMENSION 0x1000E0
-#define CSC_SOURCE_DIMENSION_X 31:16
-#define CSC_SOURCE_DIMENSION_Y 15:0
+#define CSC_SOURCE_DIMENSION_X_MASK (0xffff << 16)
+#define CSC_SOURCE_DIMENSION_Y_MASK 0xffff
#define CSC_SOURCE_PITCH 0x1000E4
-#define CSC_SOURCE_PITCH_Y 31:16
-#define CSC_SOURCE_PITCH_UV 15:0
+#define CSC_SOURCE_PITCH_Y_MASK (0xffff << 16)
+#define CSC_SOURCE_PITCH_UV_MASK 0xffff
#define CSC_DESTINATION 0x1000E8
-#define CSC_DESTINATION_WRAP 31:31
-#define CSC_DESTINATION_WRAP_DISABLE 0
-#define CSC_DESTINATION_WRAP_ENABLE 1
-#define CSC_DESTINATION_X 27:16
-#define CSC_DESTINATION_Y 11:0
+#define CSC_DESTINATION_WRAP BIT(31)
+#define CSC_DESTINATION_X_MASK (0xfff << 16)
+#define CSC_DESTINATION_Y_MASK 0xfff
#define CSC_DESTINATION_DIMENSION 0x1000EC
-#define CSC_DESTINATION_DIMENSION_X 31:16
-#define CSC_DESTINATION_DIMENSION_Y 15:0
+#define CSC_DESTINATION_DIMENSION_X_MASK (0xffff << 16)
+#define CSC_DESTINATION_DIMENSION_Y_MASK 0xffff
#define CSC_DESTINATION_PITCH 0x1000F0
-#define CSC_DESTINATION_PITCH_X 31:16
-#define CSC_DESTINATION_PITCH_Y 15:0
+#define CSC_DESTINATION_PITCH_X_MASK (0xffff << 16)
+#define CSC_DESTINATION_PITCH_Y_MASK 0xffff
#define CSC_SCALE_FACTOR 0x1000F4
-#define CSC_SCALE_FACTOR_HORIZONTAL 31:16
-#define CSC_SCALE_FACTOR_VERTICAL 15:0
+#define CSC_SCALE_FACTOR_HORIZONTAL_MASK (0xffff << 16)
+#define CSC_SCALE_FACTOR_VERTICAL_MASK 0xffff
#define CSC_DESTINATION_BASE 0x1000F8
-#define CSC_DESTINATION_BASE_EXT 27:27
-#define CSC_DESTINATION_BASE_EXT_LOCAL 0
-#define CSC_DESTINATION_BASE_EXT_EXTERNAL 1
-#define CSC_DESTINATION_BASE_CS 26:26
-#define CSC_DESTINATION_BASE_CS_0 0
-#define CSC_DESTINATION_BASE_CS_1 1
-#define CSC_DESTINATION_BASE_ADDRESS 25:0
+#define CSC_DESTINATION_BASE_EXT BIT(27)
+#define CSC_DESTINATION_BASE_CS BIT(26)
+#define CSC_DESTINATION_BASE_ADDRESS_MASK 0x3ffffff
#define CSC_CONTROL 0x1000FC
-#define CSC_CONTROL_STATUS 31:31
-#define CSC_CONTROL_STATUS_STOP 0
-#define CSC_CONTROL_STATUS_START 1
-#define CSC_CONTROL_SOURCE_FORMAT 30:28
-#define CSC_CONTROL_SOURCE_FORMAT_YUV422 0
-#define CSC_CONTROL_SOURCE_FORMAT_YUV420I 1
-#define CSC_CONTROL_SOURCE_FORMAT_YUV420 2
-#define CSC_CONTROL_SOURCE_FORMAT_YVU9 3
-#define CSC_CONTROL_SOURCE_FORMAT_IYU1 4
-#define CSC_CONTROL_SOURCE_FORMAT_IYU2 5
-#define CSC_CONTROL_SOURCE_FORMAT_RGB565 6
-#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 7
-#define CSC_CONTROL_DESTINATION_FORMAT 27:26
-#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 0
-#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 1
-#define CSC_CONTROL_HORIZONTAL_FILTER 25:25
-#define CSC_CONTROL_HORIZONTAL_FILTER_DISABLE 0
-#define CSC_CONTROL_HORIZONTAL_FILTER_ENABLE 1
-#define CSC_CONTROL_VERTICAL_FILTER 24:24
-#define CSC_CONTROL_VERTICAL_FILTER_DISABLE 0
-#define CSC_CONTROL_VERTICAL_FILTER_ENABLE 1
-#define CSC_CONTROL_BYTE_ORDER 23:23
-#define CSC_CONTROL_BYTE_ORDER_YUYV 0
-#define CSC_CONTROL_BYTE_ORDER_UYVY 1
+#define CSC_CONTROL_STATUS BIT(31)
+#define CSC_CONTROL_SOURCE_FORMAT_MASK (0x7 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV422 (0x0 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420I (0x1 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YUV420 (0x2 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_YVU9 (0x3 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_IYU1 (0x4 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_IYU2 (0x5 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_RGB565 (0x6 << 28)
+#define CSC_CONTROL_SOURCE_FORMAT_RGB8888 (0x7 << 28)
+#define CSC_CONTROL_DESTINATION_FORMAT_MASK (0x3 << 26)
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB565 (0x0 << 26)
+#define CSC_CONTROL_DESTINATION_FORMAT_RGB8888 (0x1 << 26)
+#define CSC_CONTROL_HORIZONTAL_FILTER BIT(25)
+#define CSC_CONTROL_VERTICAL_FILTER BIT(24)
+#define CSC_CONTROL_BYTE_ORDER BIT(23)
#define DE_DATA_PORT 0x110000
#define I2C_BYTE_COUNT 0x010040
-#define I2C_BYTE_COUNT_COUNT 3:0
+#define I2C_BYTE_COUNT_COUNT_MASK 0xf
#define I2C_CTRL 0x010041
-#define I2C_CTRL_INT 4:4
-#define I2C_CTRL_INT_DISABLE 0
-#define I2C_CTRL_INT_ENABLE 1
-#define I2C_CTRL_DIR 3:3
-#define I2C_CTRL_DIR_WR 0
-#define I2C_CTRL_DIR_RD 1
-#define I2C_CTRL_CTRL 2:2
-#define I2C_CTRL_CTRL_STOP 0
-#define I2C_CTRL_CTRL_START 1
-#define I2C_CTRL_MODE 1:1
-#define I2C_CTRL_MODE_STANDARD 0
-#define I2C_CTRL_MODE_FAST 1
-#define I2C_CTRL_EN 0:0
-#define I2C_CTRL_EN_DISABLE 0
-#define I2C_CTRL_EN_ENABLE 1
+#define I2C_CTRL_INT BIT(4)
+#define I2C_CTRL_DIR BIT(3)
+#define I2C_CTRL_CTRL BIT(2)
+#define I2C_CTRL_MODE BIT(1)
+#define I2C_CTRL_EN BIT(0)
#define I2C_STATUS 0x010042
-#define I2C_STATUS_TX 3:3
-#define I2C_STATUS_TX_PROGRESS 0
-#define I2C_STATUS_TX_COMPLETED 1
-#define I2C_TX_DONE 0x08
-#define I2C_STATUS_ERR 2:2
-#define I2C_STATUS_ERR_NORMAL 0
-#define I2C_STATUS_ERR_ERROR 1
-#define I2C_STATUS_ERR_CLEAR 0
-#define I2C_STATUS_ACK 1:1
-#define I2C_STATUS_ACK_RECEIVED 0
-#define I2C_STATUS_ACK_NOT 1
-#define I2C_STATUS_BSY 0:0
-#define I2C_STATUS_BSY_IDLE 0
-#define I2C_STATUS_BSY_BUSY 1
+#define I2C_STATUS_TX BIT(3)
+#define I2C_STATUS_ERR BIT(2)
+#define I2C_STATUS_ACK BIT(1)
+#define I2C_STATUS_BSY BIT(0)
#define I2C_RESET 0x010042
-#define I2C_RESET_BUS_ERROR 2:2
-#define I2C_RESET_BUS_ERROR_CLEAR 0
+#define I2C_RESET_BUS_ERROR BIT(2)
#define I2C_SLAVE_ADDRESS 0x010043
-#define I2C_SLAVE_ADDRESS_ADDRESS 7:1
-#define I2C_SLAVE_ADDRESS_RW 0:0
-#define I2C_SLAVE_ADDRESS_RW_W 0
-#define I2C_SLAVE_ADDRESS_RW_R 1
+#define I2C_SLAVE_ADDRESS_ADDRESS_MASK (0x7f << 1)
+#define I2C_SLAVE_ADDRESS_RW BIT(0)
#define I2C_DATA0 0x010044
#define I2C_DATA1 0x010045
@@ -2046,120 +1281,59 @@
#define ZV0_CAPTURE_CTRL 0x090000
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT 27:27
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD 0
-#define ZV0_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD 1
-#define ZV0_CAPTURE_CTRL_SCAN 26:26
-#define ZV0_CAPTURE_CTRL_SCAN_PROGRESSIVE 0
-#define ZV0_CAPTURE_CTRL_SCAN_INTERLACE 1
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER 25:25
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_0 0
-#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER_1 1
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC 24:24
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE 0
-#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE 1
-#define ZV0_CAPTURE_CTRL_ADJ 19:19
-#define ZV0_CAPTURE_CTRL_ADJ_NORMAL 0
-#define ZV0_CAPTURE_CTRL_ADJ_DELAY 1
-#define ZV0_CAPTURE_CTRL_HA 18:18
-#define ZV0_CAPTURE_CTRL_HA_DISABLE 0
-#define ZV0_CAPTURE_CTRL_HA_ENABLE 1
-#define ZV0_CAPTURE_CTRL_VSK 17:17
-#define ZV0_CAPTURE_CTRL_VSK_DISABLE 0
-#define ZV0_CAPTURE_CTRL_VSK_ENABLE 1
-#define ZV0_CAPTURE_CTRL_HSK 16:16
-#define ZV0_CAPTURE_CTRL_HSK_DISABLE 0
-#define ZV0_CAPTURE_CTRL_HSK_ENABLE 1
-#define ZV0_CAPTURE_CTRL_FD 15:15
-#define ZV0_CAPTURE_CTRL_FD_RISING 0
-#define ZV0_CAPTURE_CTRL_FD_FALLING 1
-#define ZV0_CAPTURE_CTRL_VP 14:14
-#define ZV0_CAPTURE_CTRL_VP_HIGH 0
-#define ZV0_CAPTURE_CTRL_VP_LOW 1
-#define ZV0_CAPTURE_CTRL_HP 13:13
-#define ZV0_CAPTURE_CTRL_HP_HIGH 0
-#define ZV0_CAPTURE_CTRL_HP_LOW 1
-#define ZV0_CAPTURE_CTRL_CP 12:12
-#define ZV0_CAPTURE_CTRL_CP_HIGH 0
-#define ZV0_CAPTURE_CTRL_CP_LOW 1
-#define ZV0_CAPTURE_CTRL_UVS 11:11
-#define ZV0_CAPTURE_CTRL_UVS_DISABLE 0
-#define ZV0_CAPTURE_CTRL_UVS_ENABLE 1
-#define ZV0_CAPTURE_CTRL_BS 10:10
-#define ZV0_CAPTURE_CTRL_BS_DISABLE 0
-#define ZV0_CAPTURE_CTRL_BS_ENABLE 1
-#define ZV0_CAPTURE_CTRL_CS 9:9
-#define ZV0_CAPTURE_CTRL_CS_16 0
-#define ZV0_CAPTURE_CTRL_CS_8 1
-#define ZV0_CAPTURE_CTRL_CF 8:8
-#define ZV0_CAPTURE_CTRL_CF_YUV 0
-#define ZV0_CAPTURE_CTRL_CF_RGB 1
-#define ZV0_CAPTURE_CTRL_FS 7:7
-#define ZV0_CAPTURE_CTRL_FS_DISABLE 0
-#define ZV0_CAPTURE_CTRL_FS_ENABLE 1
-#define ZV0_CAPTURE_CTRL_WEAVE 6:6
-#define ZV0_CAPTURE_CTRL_WEAVE_DISABLE 0
-#define ZV0_CAPTURE_CTRL_WEAVE_ENABLE 1
-#define ZV0_CAPTURE_CTRL_BOB 5:5
-#define ZV0_CAPTURE_CTRL_BOB_DISABLE 0
-#define ZV0_CAPTURE_CTRL_BOB_ENABLE 1
-#define ZV0_CAPTURE_CTRL_DB 4:4
-#define ZV0_CAPTURE_CTRL_DB_DISABLE 0
-#define ZV0_CAPTURE_CTRL_DB_ENABLE 1
-#define ZV0_CAPTURE_CTRL_CC 3:3
-#define ZV0_CAPTURE_CTRL_CC_CONTINUE 0
-#define ZV0_CAPTURE_CTRL_CC_CONDITION 1
-#define ZV0_CAPTURE_CTRL_RGB 2:2
-#define ZV0_CAPTURE_CTRL_RGB_DISABLE 0
-#define ZV0_CAPTURE_CTRL_RGB_ENABLE 1
-#define ZV0_CAPTURE_CTRL_656 1:1
-#define ZV0_CAPTURE_CTRL_656_DISABLE 0
-#define ZV0_CAPTURE_CTRL_656_ENABLE 1
-#define ZV0_CAPTURE_CTRL_CAP 0:0
-#define ZV0_CAPTURE_CTRL_CAP_DISABLE 0
-#define ZV0_CAPTURE_CTRL_CAP_ENABLE 1
+#define ZV0_CAPTURE_CTRL_FIELD_INPUT BIT(27)
+#define ZV0_CAPTURE_CTRL_SCAN BIT(26)
+#define ZV0_CAPTURE_CTRL_CURRENT_BUFFER BIT(25)
+#define ZV0_CAPTURE_CTRL_VERTICAL_SYNC BIT(24)
+#define ZV0_CAPTURE_CTRL_ADJ BIT(19)
+#define ZV0_CAPTURE_CTRL_HA BIT(18)
+#define ZV0_CAPTURE_CTRL_VSK BIT(17)
+#define ZV0_CAPTURE_CTRL_HSK BIT(16)
+#define ZV0_CAPTURE_CTRL_FD BIT(15)
+#define ZV0_CAPTURE_CTRL_VP BIT(14)
+#define ZV0_CAPTURE_CTRL_HP BIT(13)
+#define ZV0_CAPTURE_CTRL_CP BIT(12)
+#define ZV0_CAPTURE_CTRL_UVS BIT(11)
+#define ZV0_CAPTURE_CTRL_BS BIT(10)
+#define ZV0_CAPTURE_CTRL_CS BIT(9)
+#define ZV0_CAPTURE_CTRL_CF BIT(8)
+#define ZV0_CAPTURE_CTRL_FS BIT(7)
+#define ZV0_CAPTURE_CTRL_WEAVE BIT(6)
+#define ZV0_CAPTURE_CTRL_BOB BIT(5)
+#define ZV0_CAPTURE_CTRL_DB BIT(4)
+#define ZV0_CAPTURE_CTRL_CC BIT(3)
+#define ZV0_CAPTURE_CTRL_RGB BIT(2)
+#define ZV0_CAPTURE_CTRL_656 BIT(1)
+#define ZV0_CAPTURE_CTRL_CAP BIT(0)
#define ZV0_CAPTURE_CLIP 0x090004
-#define ZV0_CAPTURE_CLIP_YCLIP_EVEN_FIELD 25:16
-#define ZV0_CAPTURE_CLIP_YCLIP 25:16
-#define ZV0_CAPTURE_CLIP_XCLIP 9:0
+#define ZV0_CAPTURE_CLIP_EYCLIP_MASK (0x3ff << 16)
+#define ZV0_CAPTURE_CLIP_XCLIP_MASK 0x3ff
#define ZV0_CAPTURE_SIZE 0x090008
-#define ZV0_CAPTURE_SIZE_HEIGHT 26:16
-#define ZV0_CAPTURE_SIZE_WIDTH 10:0
+#define ZV0_CAPTURE_SIZE_HEIGHT_MASK (0x7ff << 16)
+#define ZV0_CAPTURE_SIZE_WIDTH_MASK 0x7ff
#define ZV0_CAPTURE_BUF0_ADDRESS 0x09000C
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS 31:31
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT 0
-#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS_PENDING 1
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT 27:27
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_LOCAL 0
-#define ZV0_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL 1
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS 26:26
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS_0 0
-#define ZV0_CAPTURE_BUF0_ADDRESS_CS_1 1
-#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS 25:0
+#define ZV0_CAPTURE_BUF0_ADDRESS_STATUS BIT(31)
+#define ZV0_CAPTURE_BUF0_ADDRESS_EXT BIT(27)
+#define ZV0_CAPTURE_BUF0_ADDRESS_CS BIT(26)
+#define ZV0_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK 0x3ffffff
#define ZV0_CAPTURE_BUF1_ADDRESS 0x090010
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS 31:31
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT 0
-#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS_PENDING 1
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT 27:27
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_LOCAL 0
-#define ZV0_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL 1
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS 26:26
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS_0 0
-#define ZV0_CAPTURE_BUF1_ADDRESS_CS_1 1
-#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS 25:0
+#define ZV0_CAPTURE_BUF1_ADDRESS_STATUS BIT(31)
+#define ZV0_CAPTURE_BUF1_ADDRESS_EXT BIT(27)
+#define ZV0_CAPTURE_BUF1_ADDRESS_CS BIT(26)
+#define ZV0_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK 0x3ffffff
#define ZV0_CAPTURE_BUF_OFFSET 0x090014
#ifndef VALIDATION_CHIP
- #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD 25:16
+ #define ZV0_CAPTURE_BUF_OFFSET_YCLIP_ODD_FIELD (0x3ff << 16)
#endif
-#define ZV0_CAPTURE_BUF_OFFSET_OFFSET 15:0
+#define ZV0_CAPTURE_BUF_OFFSET_OFFSET_MASK 0xffff
#define ZV0_CAPTURE_FIFO_CTRL 0x090018
-#define ZV0_CAPTURE_FIFO_CTRL_FIFO 2:0
+#define ZV0_CAPTURE_FIFO_CTRL_FIFO_MASK 0x7
#define ZV0_CAPTURE_FIFO_CTRL_FIFO_0 0
#define ZV0_CAPTURE_FIFO_CTRL_FIFO_1 1
#define ZV0_CAPTURE_FIFO_CTRL_FIFO_2 2
@@ -2170,130 +1344,68 @@
#define ZV0_CAPTURE_FIFO_CTRL_FIFO_7 7
#define ZV0_CAPTURE_YRGB_CONST 0x09001C
-#define ZV0_CAPTURE_YRGB_CONST_Y 31:24
-#define ZV0_CAPTURE_YRGB_CONST_R 23:16
-#define ZV0_CAPTURE_YRGB_CONST_G 15:8
-#define ZV0_CAPTURE_YRGB_CONST_B 7:0
+#define ZV0_CAPTURE_YRGB_CONST_Y_MASK (0xff << 24)
+#define ZV0_CAPTURE_YRGB_CONST_R_MASK (0xff << 16)
+#define ZV0_CAPTURE_YRGB_CONST_G_MASK (0xff << 8)
+#define ZV0_CAPTURE_YRGB_CONST_B_MASK 0xff
#define ZV0_CAPTURE_LINE_COMP 0x090020
-#define ZV0_CAPTURE_LINE_COMP_LC 10:0
+#define ZV0_CAPTURE_LINE_COMP_LC_MASK 0x7ff
/* ZV1 */
#define ZV1_CAPTURE_CTRL 0x098000
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT 27:27
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT_EVEN_FIELD 0
-#define ZV1_CAPTURE_CTRL_FIELD_INPUT_ODD_FIELD 0
-#define ZV1_CAPTURE_CTRL_SCAN 26:26
-#define ZV1_CAPTURE_CTRL_SCAN_PROGRESSIVE 0
-#define ZV1_CAPTURE_CTRL_SCAN_INTERLACE 1
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER 25:25
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_0 0
-#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER_1 1
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC 24:24
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_INACTIVE 0
-#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC_ACTIVE 1
-#define ZV1_CAPTURE_CTRL_PANEL 20:20
-#define ZV1_CAPTURE_CTRL_PANEL_DISABLE 0
-#define ZV1_CAPTURE_CTRL_PANEL_ENABLE 1
-#define ZV1_CAPTURE_CTRL_ADJ 19:19
-#define ZV1_CAPTURE_CTRL_ADJ_NORMAL 0
-#define ZV1_CAPTURE_CTRL_ADJ_DELAY 1
-#define ZV1_CAPTURE_CTRL_HA 18:18
-#define ZV1_CAPTURE_CTRL_HA_DISABLE 0
-#define ZV1_CAPTURE_CTRL_HA_ENABLE 1
-#define ZV1_CAPTURE_CTRL_VSK 17:17
-#define ZV1_CAPTURE_CTRL_VSK_DISABLE 0
-#define ZV1_CAPTURE_CTRL_VSK_ENABLE 1
-#define ZV1_CAPTURE_CTRL_HSK 16:16
-#define ZV1_CAPTURE_CTRL_HSK_DISABLE 0
-#define ZV1_CAPTURE_CTRL_HSK_ENABLE 1
-#define ZV1_CAPTURE_CTRL_FD 15:15
-#define ZV1_CAPTURE_CTRL_FD_RISING 0
-#define ZV1_CAPTURE_CTRL_FD_FALLING 1
-#define ZV1_CAPTURE_CTRL_VP 14:14
-#define ZV1_CAPTURE_CTRL_VP_HIGH 0
-#define ZV1_CAPTURE_CTRL_VP_LOW 1
-#define ZV1_CAPTURE_CTRL_HP 13:13
-#define ZV1_CAPTURE_CTRL_HP_HIGH 0
-#define ZV1_CAPTURE_CTRL_HP_LOW 1
-#define ZV1_CAPTURE_CTRL_CP 12:12
-#define ZV1_CAPTURE_CTRL_CP_HIGH 0
-#define ZV1_CAPTURE_CTRL_CP_LOW 1
-#define ZV1_CAPTURE_CTRL_UVS 11:11
-#define ZV1_CAPTURE_CTRL_UVS_DISABLE 0
-#define ZV1_CAPTURE_CTRL_UVS_ENABLE 1
-#define ZV1_CAPTURE_CTRL_BS 10:10
-#define ZV1_CAPTURE_CTRL_BS_DISABLE 0
-#define ZV1_CAPTURE_CTRL_BS_ENABLE 1
-#define ZV1_CAPTURE_CTRL_CS 9:9
-#define ZV1_CAPTURE_CTRL_CS_16 0
-#define ZV1_CAPTURE_CTRL_CS_8 1
-#define ZV1_CAPTURE_CTRL_CF 8:8
-#define ZV1_CAPTURE_CTRL_CF_YUV 0
-#define ZV1_CAPTURE_CTRL_CF_RGB 1
-#define ZV1_CAPTURE_CTRL_FS 7:7
-#define ZV1_CAPTURE_CTRL_FS_DISABLE 0
-#define ZV1_CAPTURE_CTRL_FS_ENABLE 1
-#define ZV1_CAPTURE_CTRL_WEAVE 6:6
-#define ZV1_CAPTURE_CTRL_WEAVE_DISABLE 0
-#define ZV1_CAPTURE_CTRL_WEAVE_ENABLE 1
-#define ZV1_CAPTURE_CTRL_BOB 5:5
-#define ZV1_CAPTURE_CTRL_BOB_DISABLE 0
-#define ZV1_CAPTURE_CTRL_BOB_ENABLE 1
-#define ZV1_CAPTURE_CTRL_DB 4:4
-#define ZV1_CAPTURE_CTRL_DB_DISABLE 0
-#define ZV1_CAPTURE_CTRL_DB_ENABLE 1
-#define ZV1_CAPTURE_CTRL_CC 3:3
-#define ZV1_CAPTURE_CTRL_CC_CONTINUE 0
-#define ZV1_CAPTURE_CTRL_CC_CONDITION 1
-#define ZV1_CAPTURE_CTRL_RGB 2:2
-#define ZV1_CAPTURE_CTRL_RGB_DISABLE 0
-#define ZV1_CAPTURE_CTRL_RGB_ENABLE 1
-#define ZV1_CAPTURE_CTRL_656 1:1
-#define ZV1_CAPTURE_CTRL_656_DISABLE 0
-#define ZV1_CAPTURE_CTRL_656_ENABLE 1
-#define ZV1_CAPTURE_CTRL_CAP 0:0
-#define ZV1_CAPTURE_CTRL_CAP_DISABLE 0
-#define ZV1_CAPTURE_CTRL_CAP_ENABLE 1
+#define ZV1_CAPTURE_CTRL_FIELD_INPUT BIT(27)
+#define ZV1_CAPTURE_CTRL_SCAN BIT(26)
+#define ZV1_CAPTURE_CTRL_CURRENT_BUFFER BIT(25)
+#define ZV1_CAPTURE_CTRL_VERTICAL_SYNC BIT(24)
+#define ZV1_CAPTURE_CTRL_PANEL BIT(20)
+#define ZV1_CAPTURE_CTRL_ADJ BIT(19)
+#define ZV1_CAPTURE_CTRL_HA BIT(18)
+#define ZV1_CAPTURE_CTRL_VSK BIT(17)
+#define ZV1_CAPTURE_CTRL_HSK BIT(16)
+#define ZV1_CAPTURE_CTRL_FD BIT(15)
+#define ZV1_CAPTURE_CTRL_VP BIT(14)
+#define ZV1_CAPTURE_CTRL_HP BIT(13)
+#define ZV1_CAPTURE_CTRL_CP BIT(12)
+#define ZV1_CAPTURE_CTRL_UVS BIT(11)
+#define ZV1_CAPTURE_CTRL_BS BIT(10)
+#define ZV1_CAPTURE_CTRL_CS BIT(9)
+#define ZV1_CAPTURE_CTRL_CF BIT(8)
+#define ZV1_CAPTURE_CTRL_FS BIT(7)
+#define ZV1_CAPTURE_CTRL_WEAVE BIT(6)
+#define ZV1_CAPTURE_CTRL_BOB BIT(5)
+#define ZV1_CAPTURE_CTRL_DB BIT(4)
+#define ZV1_CAPTURE_CTRL_CC BIT(3)
+#define ZV1_CAPTURE_CTRL_RGB BIT(2)
+#define ZV1_CAPTURE_CTRL_656 BIT(1)
+#define ZV1_CAPTURE_CTRL_CAP BIT(0)
#define ZV1_CAPTURE_CLIP 0x098004
-#define ZV1_CAPTURE_CLIP_YCLIP 25:16
-#define ZV1_CAPTURE_CLIP_XCLIP 9:0
+#define ZV1_CAPTURE_CLIP_YCLIP_MASK (0x3ff << 16)
+#define ZV1_CAPTURE_CLIP_XCLIP_MASK 0x3ff
#define ZV1_CAPTURE_SIZE 0x098008
-#define ZV1_CAPTURE_SIZE_HEIGHT 26:16
-#define ZV1_CAPTURE_SIZE_WIDTH 10:0
+#define ZV1_CAPTURE_SIZE_HEIGHT_MASK (0x7ff << 16)
+#define ZV1_CAPTURE_SIZE_WIDTH_MASK 0x7ff
#define ZV1_CAPTURE_BUF0_ADDRESS 0x09800C
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS 31:31
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_CURRENT 0
-#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS_PENDING 1
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT 27:27
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_LOCAL 0
-#define ZV1_CAPTURE_BUF0_ADDRESS_EXT_EXTERNAL 1
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS 26:26
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS_0 0
-#define ZV1_CAPTURE_BUF0_ADDRESS_CS_1 1
-#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS 25:0
+#define ZV1_CAPTURE_BUF0_ADDRESS_STATUS BIT(31)
+#define ZV1_CAPTURE_BUF0_ADDRESS_EXT BIT(27)
+#define ZV1_CAPTURE_BUF0_ADDRESS_CS BIT(26)
+#define ZV1_CAPTURE_BUF0_ADDRESS_ADDRESS_MASK 0x3ffffff
#define ZV1_CAPTURE_BUF1_ADDRESS 0x098010
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS 31:31
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_CURRENT 0
-#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS_PENDING 1
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT 27:27
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_LOCAL 0
-#define ZV1_CAPTURE_BUF1_ADDRESS_EXT_EXTERNAL 1
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS 26:26
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS_0 0
-#define ZV1_CAPTURE_BUF1_ADDRESS_CS_1 1
-#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS 25:0
+#define ZV1_CAPTURE_BUF1_ADDRESS_STATUS BIT(31)
+#define ZV1_CAPTURE_BUF1_ADDRESS_EXT BIT(27)
+#define ZV1_CAPTURE_BUF1_ADDRESS_CS BIT(26)
+#define ZV1_CAPTURE_BUF1_ADDRESS_ADDRESS_MASK 0x3ffffff
#define ZV1_CAPTURE_BUF_OFFSET 0x098014
-#define ZV1_CAPTURE_BUF_OFFSET_OFFSET 15:0
+#define ZV1_CAPTURE_BUF_OFFSET_OFFSET_MASK 0xffff
#define ZV1_CAPTURE_FIFO_CTRL 0x098018
-#define ZV1_CAPTURE_FIFO_CTRL_FIFO 2:0
+#define ZV1_CAPTURE_FIFO_CTRL_FIFO_MASK 0x7
#define ZV1_CAPTURE_FIFO_CTRL_FIFO_0 0
#define ZV1_CAPTURE_FIFO_CTRL_FIFO_1 1
#define ZV1_CAPTURE_FIFO_CTRL_FIFO_2 2
@@ -2304,52 +1416,30 @@
#define ZV1_CAPTURE_FIFO_CTRL_FIFO_7 7
#define ZV1_CAPTURE_YRGB_CONST 0x09801C
-#define ZV1_CAPTURE_YRGB_CONST_Y 31:24
-#define ZV1_CAPTURE_YRGB_CONST_R 23:16
-#define ZV1_CAPTURE_YRGB_CONST_G 15:8
-#define ZV1_CAPTURE_YRGB_CONST_B 7:0
+#define ZV1_CAPTURE_YRGB_CONST_Y_MASK (0xff << 24)
+#define ZV1_CAPTURE_YRGB_CONST_R_MASK (0xff << 16)
+#define ZV1_CAPTURE_YRGB_CONST_G_MASK (0xff << 8)
+#define ZV1_CAPTURE_YRGB_CONST_B_MASK 0xff
#define DMA_1_SOURCE 0x0D0010
-#define DMA_1_SOURCE_ADDRESS_EXT 27:27
-#define DMA_1_SOURCE_ADDRESS_EXT_LOCAL 0
-#define DMA_1_SOURCE_ADDRESS_EXT_EXTERNAL 1
-#define DMA_1_SOURCE_ADDRESS_CS 26:26
-#define DMA_1_SOURCE_ADDRESS_CS_0 0
-#define DMA_1_SOURCE_ADDRESS_CS_1 1
-#define DMA_1_SOURCE_ADDRESS 25:0
+#define DMA_1_SOURCE_ADDRESS_EXT BIT(27)
+#define DMA_1_SOURCE_ADDRESS_CS BIT(26)
+#define DMA_1_SOURCE_ADDRESS_MASK 0x3ffffff
#define DMA_1_DESTINATION 0x0D0014
-#define DMA_1_DESTINATION_ADDRESS_EXT 27:27
-#define DMA_1_DESTINATION_ADDRESS_EXT_LOCAL 0
-#define DMA_1_DESTINATION_ADDRESS_EXT_EXTERNAL 1
-#define DMA_1_DESTINATION_ADDRESS_CS 26:26
-#define DMA_1_DESTINATION_ADDRESS_CS_0 0
-#define DMA_1_DESTINATION_ADDRESS_CS_1 1
-#define DMA_1_DESTINATION_ADDRESS 25:0
+#define DMA_1_DESTINATION_ADDRESS_EXT BIT(27)
+#define DMA_1_DESTINATION_ADDRESS_CS BIT(26)
+#define DMA_1_DESTINATION_ADDRESS_MASK 0x3ffffff
#define DMA_1_SIZE_CONTROL 0x0D0018
-#define DMA_1_SIZE_CONTROL_STATUS 31:31
-#define DMA_1_SIZE_CONTROL_STATUS_IDLE 0
-#define DMA_1_SIZE_CONTROL_STATUS_ACTIVE 1
-#define DMA_1_SIZE_CONTROL_SIZE 23:0
+#define DMA_1_SIZE_CONTROL_STATUS BIT(31)
+#define DMA_1_SIZE_CONTROL_SIZE_MASK 0xffffff
#define DMA_ABORT_INTERRUPT 0x0D0020
-#define DMA_ABORT_INTERRUPT_ABORT_1 5:5
-#define DMA_ABORT_INTERRUPT_ABORT_1_ENABLE 0
-#define DMA_ABORT_INTERRUPT_ABORT_1_ABORT 1
-#define DMA_ABORT_INTERRUPT_ABORT_0 4:4
-#define DMA_ABORT_INTERRUPT_ABORT_0_ENABLE 0
-#define DMA_ABORT_INTERRUPT_ABORT_0_ABORT 1
-#define DMA_ABORT_INTERRUPT_INT_1 1:1
-#define DMA_ABORT_INTERRUPT_INT_1_CLEAR 0
-#define DMA_ABORT_INTERRUPT_INT_1_FINISHED 1
-#define DMA_ABORT_INTERRUPT_INT_0 0:0
-#define DMA_ABORT_INTERRUPT_INT_0_CLEAR 0
-#define DMA_ABORT_INTERRUPT_INT_0_FINISHED 1
-
-
-
-
+#define DMA_ABORT_INTERRUPT_ABORT_1 BIT(5)
+#define DMA_ABORT_INTERRUPT_ABORT_0 BIT(4)
+#define DMA_ABORT_INTERRUPT_INT_1 BIT(1)
+#define DMA_ABORT_INTERRUPT_INT_0 BIT(0)
/* Default i2c CLK and Data GPIO. These are the default i2c pins */
#define DEFAULT_I2C_SCL 30
@@ -2357,16 +1447,12 @@
#define GPIO_DATA_SM750LE 0x020018
-#define GPIO_DATA_SM750LE_1 1:1
-#define GPIO_DATA_SM750LE_0 0:0
+#define GPIO_DATA_SM750LE_1 BIT(1)
+#define GPIO_DATA_SM750LE_0 BIT(0)
#define GPIO_DATA_DIRECTION_SM750LE 0x02001C
-#define GPIO_DATA_DIRECTION_SM750LE_1 1:1
-#define GPIO_DATA_DIRECTION_SM750LE_1_INPUT 0
-#define GPIO_DATA_DIRECTION_SM750LE_1_OUTPUT 1
-#define GPIO_DATA_DIRECTION_SM750LE_0 0:0
-#define GPIO_DATA_DIRECTION_SM750LE_0_INPUT 0
-#define GPIO_DATA_DIRECTION_SM750LE_0_OUTPUT 1
+#define GPIO_DATA_DIRECTION_SM750LE_1 BIT(1)
+#define GPIO_DATA_DIRECTION_SM750LE_0 BIT(0)
#endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 241b77b927ee..67f36e71da7e 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -14,8 +14,8 @@
#define i2cWriteReg sm750_hw_i2c_write_reg
#define i2cReadReg sm750_hw_i2c_read_reg
#else
- #define i2cWriteReg swI2CWriteReg
- #define i2cReadReg swI2CReadReg
+ #define i2cWriteReg sm750_sw_i2c_write_reg
+ #define i2cReadReg sm750_sw_i2c_read_reg
#endif
/* SII164 Vendor and Device ID */
@@ -236,7 +236,7 @@ long sii164InitChip(
}
/* Return -1 if initialization fails. */
- return (-1);
+ return -1;
}
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index f2610c90eeb4..664ad089f753 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -39,7 +39,10 @@ unsigned char sii164IsConnected(void);
unsigned char sii164CheckInterrupt(void);
void sii164ClearInterrupt(void);
#endif
-/* below register definination is used for Silicon Image SiI164 DVI controller chip */
+/*
+ * below register definition is used for
+ * Silicon Image SiI164 DVI controller chip
+ */
/*
* Vendor ID registers
*/
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index c78421b5b0e7..6ed004e40855 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -13,8 +13,6 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/screen_info.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
#include <linux/console.h>
#include <asm/fb.h>
#include "sm750.h"
@@ -189,7 +187,7 @@ static void lynxfb_ops_fillrect(struct fb_info *info,
* If not use spin_lock,system will die if user load driver
* and immediately unload driver frequently (dual)
*/
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_lock(&sm750_dev->slock);
sm750_dev->accel.de_fillrect(&sm750_dev->accel,
@@ -197,7 +195,7 @@ static void lynxfb_ops_fillrect(struct fb_info *info,
region->dx, region->dy,
region->width, region->height,
color, rop);
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_unlock(&sm750_dev->slock);
}
@@ -223,7 +221,7 @@ static void lynxfb_ops_copyarea(struct fb_info *info,
* If not use spin_lock, system will die if user load driver
* and immediately unload driver frequently (dual)
*/
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_lock(&sm750_dev->slock);
sm750_dev->accel.de_copyarea(&sm750_dev->accel,
@@ -231,7 +229,7 @@ static void lynxfb_ops_copyarea(struct fb_info *info,
base, pitch, Bpp, region->dx, region->dy,
region->width, region->height,
HW_ROP2_COPY);
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_unlock(&sm750_dev->slock);
}
@@ -272,7 +270,7 @@ static void lynxfb_ops_imageblit(struct fb_info *info,
* If not use spin_lock, system will die if user load driver
* and immediately unload driver frequently (dual)
*/
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_lock(&sm750_dev->slock);
sm750_dev->accel.de_imageblit(&sm750_dev->accel,
@@ -281,7 +279,7 @@ static void lynxfb_ops_imageblit(struct fb_info *info,
image->dx, image->dy,
image->width, image->height,
fgcol, bgcol, HW_ROP2_COPY);
- if (sm750_dev->dual)
+ if (sm750_dev->fb_count > 1)
spin_unlock(&sm750_dev->slock);
}
@@ -319,7 +317,7 @@ static int lynxfb_ops_set_par(struct fb_info *info)
var = &info->var;
fix = &info->fix;
- /* fix structur is not so FIX ... */
+ /* fix structure is not so FIX ... */
line_length = var->xres_virtual * var->bits_per_pixel / 8;
line_length = ALIGN(line_length, crtc->line_pad);
fix->line_length = line_length;
@@ -420,14 +418,16 @@ static int lynxfb_suspend(struct pci_dev *pdev, pm_message_t mesg)
ret = pci_save_state(pdev);
if (ret) {
- pr_err("error:%d occurred in pci_save_state\n", ret);
+ dev_err(&pdev->dev,
+ "error:%d occurred in pci_save_state\n", ret);
return ret;
}
- pci_disable_device(pdev);
ret = pci_set_power_state(pdev, pci_choose_state(pdev, mesg));
if (ret) {
- pr_err("error:%d occurred in pci_set_power_state\n", ret);
+ dev_err(&pdev->dev,
+ "error:%d occurred in pci_set_power_state\n",
+ ret);
return ret;
}
}
@@ -455,7 +455,8 @@ static int lynxfb_resume(struct pci_dev *pdev)
ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
- pr_err("error:%d occurred in pci_set_power_state\n", ret);
+ dev_err(&pdev->dev,
+ "error:%d occurred in pci_set_power_state\n", ret);
return ret;
}
@@ -463,7 +464,9 @@ static int lynxfb_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret) {
- pr_err("error:%d occurred in pci_enable_device\n", ret);
+ dev_err(&pdev->dev,
+ "error:%d occurred in pci_enable_device\n",
+ ret);
return ret;
}
pci_set_master(pdev);
@@ -650,8 +653,10 @@ static int sm750fb_set_drv(struct lynxfb_par *par)
output = &par->output;
crtc = &par->crtc;
- crtc->vidmem_size = (sm750_dev->dual) ? sm750_dev->vidmem_size >> 1 :
- sm750_dev->vidmem_size;
+ crtc->vidmem_size = sm750_dev->vidmem_size;
+ if (sm750_dev->fb_count > 1)
+ crtc->vidmem_size >>= 1;
+
/* setup crtc and output member */
sm750_dev->hwCursor = g_hwcursor;
@@ -981,7 +986,7 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
NO_PARAM:
if (sm750_dev->revid != SM750LE_REVISION_ID) {
- if (sm750_dev->dual) {
+ if (sm750_dev->fb_count > 1) {
if (swap)
sm750_dev->dataflow = sm750_dual_swap;
else
@@ -1000,35 +1005,75 @@ NO_PARAM:
}
}
+static void sm750fb_frambuffer_release(struct sm750_dev *sm750_dev)
+{
+ struct fb_info *fb_info;
+
+ while (sm750_dev->fb_count) {
+ fb_info = sm750_dev->fbinfo[sm750_dev->fb_count - 1];
+ unregister_framebuffer(fb_info);
+ framebuffer_release(fb_info);
+ sm750_dev->fb_count--;
+ }
+}
+
+static int sm750fb_frambuffer_alloc(struct sm750_dev *sm750_dev, int fbidx)
+{
+ struct fb_info *fb_info;
+ struct lynxfb_par *par;
+ int err;
+
+ fb_info = framebuffer_alloc(sizeof(struct lynxfb_par),
+ &sm750_dev->pdev->dev);
+ if (!fb_info)
+ return -ENOMEM;
+
+ sm750_dev->fbinfo[fbidx] = fb_info;
+ par = fb_info->par;
+ par->dev = sm750_dev;
+
+ err = lynxfb_set_fbinfo(fb_info, fbidx);
+ if (err)
+ goto release_fb;
+
+ err = register_framebuffer(fb_info);
+ if (err < 0)
+ goto release_fb;
+
+ sm750_dev->fb_count++;
+
+ return 0;
+
+release_fb:
+ framebuffer_release(fb_info);
+ return err;
+}
+
static int lynxfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- struct fb_info *info[] = {NULL, NULL};
struct sm750_dev *sm750_dev = NULL;
+ int max_fb;
int fbidx;
+ int err;
/* enable device */
- if (pci_enable_device(pdev)) {
- pr_err("can not enable device.\n");
- goto err_enable;
- }
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
- sm750_dev = kzalloc(sizeof(*sm750_dev), GFP_KERNEL);
- if (!sm750_dev) {
- pr_err("Could not allocate memory for share.\n");
- goto err_share;
- }
+ err = -ENOMEM;
+ sm750_dev = devm_kzalloc(&pdev->dev, sizeof(*sm750_dev), GFP_KERNEL);
+ if (!sm750_dev)
+ return err;
sm750_dev->fbinfo[0] = sm750_dev->fbinfo[1] = NULL;
sm750_dev->devid = pdev->device;
sm750_dev->revid = pdev->revision;
-
- pr_info("share->revid = %02x\n", sm750_dev->revid);
sm750_dev->pdev = pdev;
sm750_dev->mtrr_off = g_nomtrr;
sm750_dev->mtrr.vram = 0;
sm750_dev->accel_off = g_noaccel;
- sm750_dev->dual = g_dualview;
spin_lock_init(&sm750_dev->slock);
if (!sm750_dev->accel_off) {
@@ -1042,19 +1087,15 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
sm750_dev->accel.de_fillrect = hw_fillrect;
sm750_dev->accel.de_copyarea = hw_copyarea;
sm750_dev->accel.de_imageblit = hw_imageblit;
- pr_info("enable 2d acceleration\n");
- } else {
- pr_info("disable 2d acceleration\n");
}
/* call chip specific setup routine */
sm750fb_setup(sm750_dev, g_settings);
/* call chip specific mmap routine */
- if (hw_sm750_map(sm750_dev, pdev)) {
- pr_err("Memory map failed\n");
- goto err_map;
- }
+ err = hw_sm750_map(sm750_dev, pdev);
+ if (err)
+ return err;
if (!sm750_dev->mtrr_off)
sm750_dev->mtrr.vram = arch_phys_wc_add(sm750_dev->vidmem_start,
@@ -1062,107 +1103,38 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
memset_io(sm750_dev->pvMem, 0, sm750_dev->vidmem_size);
- pr_info("sm%3x mmio address = %p\n", sm750_dev->devid,
- sm750_dev->pvReg);
-
pci_set_drvdata(pdev, sm750_dev);
/* call chipInit routine */
hw_sm750_inithw(sm750_dev, pdev);
- /* allocate frame buffer info structor according to g_dualview */
- fbidx = 0;
-ALLOC_FB:
- info[fbidx] = framebuffer_alloc(sizeof(struct lynxfb_par), &pdev->dev);
- if (!info[fbidx]) {
- pr_err("Could not allocate framebuffer #%d.\n", fbidx);
- if (fbidx == 0)
- goto err_info0_alloc;
- else
- goto err_info1_alloc;
- } else {
- struct lynxfb_par *par;
- int errno;
-
- pr_info("framebuffer #%d alloc okay\n", fbidx);
- sm750_dev->fbinfo[fbidx] = info[fbidx];
- par = info[fbidx]->par;
- par->dev = sm750_dev;
-
- /* set fb_info structure */
- if (lynxfb_set_fbinfo(info[fbidx], fbidx)) {
- pr_err("Failed to initial fb_info #%d.\n", fbidx);
- if (fbidx == 0)
- goto err_info0_set;
- else
- goto err_info1_set;
- }
-
- /* register frame buffer */
- pr_info("Ready to register framebuffer #%d.\n", fbidx);
- errno = register_framebuffer(info[fbidx]);
- if (errno < 0) {
- pr_err("Failed to register fb_info #%d. err %d\n",
- fbidx,
- errno);
- if (fbidx == 0)
- goto err_register0;
- else
- goto err_register1;
- }
- pr_info("Accomplished register framebuffer #%d.\n", fbidx);
+ /* allocate frame buffer info structures according to g_dualview */
+ max_fb = g_dualview ? 2 : 1;
+ for (fbidx = 0; fbidx < max_fb; fbidx++) {
+ err = sm750fb_frambuffer_alloc(sm750_dev, fbidx);
+ if (err)
+ goto release_fb;
}
- /* no dual view by far */
- fbidx++;
- if (sm750_dev->dual && fbidx < 2)
- goto ALLOC_FB;
-
return 0;
-err_register1:
-err_info1_set:
- framebuffer_release(info[1]);
-err_info1_alloc:
- unregister_framebuffer(info[0]);
-err_register0:
-err_info0_set:
- framebuffer_release(info[0]);
-err_info0_alloc:
-err_map:
- kfree(sm750_dev);
-err_share:
-err_enable:
- return -ENODEV;
+release_fb:
+ sm750fb_frambuffer_release(sm750_dev);
+ return err;
}
static void lynxfb_pci_remove(struct pci_dev *pdev)
{
- struct fb_info *info;
struct sm750_dev *sm750_dev;
- struct lynxfb_par *par;
- int cnt;
- cnt = 2;
sm750_dev = pci_get_drvdata(pdev);
- while (cnt-- > 0) {
- info = sm750_dev->fbinfo[cnt];
- if (!info)
- continue;
- par = info->par;
-
- unregister_framebuffer(info);
- /* release frame buffer */
- framebuffer_release(info);
- }
+ sm750fb_frambuffer_release(sm750_dev);
arch_phys_wc_del(sm750_dev->mtrr.vram);
iounmap(sm750_dev->pvReg);
iounmap(sm750_dev->pvMem);
kfree(g_settings);
- kfree(sm750_dev);
- pci_set_drvdata(pdev, NULL);
}
static int __init lynxfb_setup(char *options)
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index b0a93cdc7292..8e70ce0d6da4 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -53,7 +53,7 @@ struct lynx_accel {
/* base virtual address of de data port */
volatile unsigned char __iomem *dpPortBase;
- /* function fointers */
+ /* function pointers */
void (*de_init)(struct lynx_accel *);
int (*de_wait)(void);/* see if hardware ready to work */
@@ -79,7 +79,7 @@ struct sm750_dev {
struct fb_info *fbinfo[2];
struct lynx_accel accel;
int accel_off;
- int dual;
+ int fb_count;
int mtrr_off;
struct{
int vram;
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 43e59725920c..9aa4066ac86d 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -17,7 +17,6 @@
#include "sm750.h"
#include "sm750_accel.h"
-#include "sm750_help.h"
static inline void write_dpr(struct lynx_accel *accel, int offset, u32 regValue)
{
writel(regValue, accel->dprBase + offset);
@@ -41,20 +40,16 @@ void hw_de_init(struct lynx_accel *accel)
write_dpr(accel, DE_MASKS, 0xFFFFFFFF);
/* dpr1c */
- reg = FIELD_SET(0, DE_STRETCH_FORMAT, PATTERN_XY, NORMAL)|
- FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_Y, 0)|
- FIELD_VALUE(0, DE_STRETCH_FORMAT, PATTERN_X, 0)|
- FIELD_SET(0, DE_STRETCH_FORMAT, ADDRESSING, XY)|
- FIELD_VALUE(0, DE_STRETCH_FORMAT, SOURCE_HEIGHT, 3);
+ reg = 0x3;
- clr = FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_XY)&
- FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_Y)&
- FIELD_CLEAR(DE_STRETCH_FORMAT, PATTERN_X)&
- FIELD_CLEAR(DE_STRETCH_FORMAT, ADDRESSING)&
- FIELD_CLEAR(DE_STRETCH_FORMAT, SOURCE_HEIGHT);
+ clr = DE_STRETCH_FORMAT_PATTERN_XY | DE_STRETCH_FORMAT_PATTERN_Y_MASK |
+ DE_STRETCH_FORMAT_PATTERN_X_MASK |
+ DE_STRETCH_FORMAT_ADDRESSING_MASK |
+ DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK;
- /* DE_STRETCH bpp format need be initilized in setMode routine */
- write_dpr(accel, DE_STRETCH_FORMAT, (read_dpr(accel, DE_STRETCH_FORMAT) & clr) | reg);
+ /* DE_STRETCH bpp format need be initialized in setMode routine */
+ write_dpr(accel, DE_STRETCH_FORMAT,
+ (read_dpr(accel, DE_STRETCH_FORMAT) & ~clr) | reg);
/* disable clipping and transparent */
write_dpr(accel, DE_CLIP_TL, 0); /* dpr2c */
@@ -63,16 +58,11 @@ void hw_de_init(struct lynx_accel *accel)
write_dpr(accel, DE_COLOR_COMPARE_MASK, 0); /* dpr24 */
write_dpr(accel, DE_COLOR_COMPARE, 0);
- reg = FIELD_SET(0, DE_CONTROL, TRANSPARENCY, DISABLE)|
- FIELD_SET(0, DE_CONTROL, TRANSPARENCY_MATCH, OPAQUE)|
- FIELD_SET(0, DE_CONTROL, TRANSPARENCY_SELECT, SOURCE);
-
- clr = FIELD_CLEAR(DE_CONTROL, TRANSPARENCY)&
- FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_MATCH)&
- FIELD_CLEAR(DE_CONTROL, TRANSPARENCY_SELECT);
+ clr = DE_CONTROL_TRANSPARENCY | DE_CONTROL_TRANSPARENCY_MATCH |
+ DE_CONTROL_TRANSPARENCY_SELECT;
/* dpr0c */
- write_dpr(accel, DE_CONTROL, (read_dpr(accel, DE_CONTROL)&clr)|reg);
+ write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
}
/* set2dformat only be called from setmode functions
@@ -85,7 +75,9 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt)
/* fmt=0,1,2 for 8,16,32,bpp on sm718/750/502 */
reg = read_dpr(accel, DE_STRETCH_FORMAT);
- reg = FIELD_VALUE(reg, DE_STRETCH_FORMAT, PIXEL_FORMAT, fmt);
+ reg &= ~DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK;
+ reg |= ((fmt << DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT) &
+ DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK);
write_dpr(accel, DE_STRETCH_FORMAT, reg);
}
@@ -105,31 +97,28 @@ int hw_fillrect(struct lynx_accel *accel,
write_dpr(accel, DE_WINDOW_DESTINATION_BASE, base); /* dpr40 */
write_dpr(accel, DE_PITCH,
- FIELD_VALUE(0, DE_PITCH, DESTINATION, pitch/Bpp)|
- FIELD_VALUE(0, DE_PITCH, SOURCE, pitch/Bpp)); /* dpr10 */
+ ((pitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
+ DE_PITCH_DESTINATION_MASK) |
+ (pitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
write_dpr(accel, DE_WINDOW_WIDTH,
- FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, pitch/Bpp)|
- FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, pitch/Bpp)); /* dpr44 */
+ ((pitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
+ DE_WINDOW_WIDTH_DST_MASK) |
+ (pitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr44 */
write_dpr(accel, DE_FOREGROUND, color); /* DPR14 */
write_dpr(accel, DE_DESTINATION,
- FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE)|
- FIELD_VALUE(0, DE_DESTINATION, X, x)|
- FIELD_VALUE(0, DE_DESTINATION, Y, y)); /* dpr4 */
+ ((x << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+ (y & DE_DESTINATION_Y_MASK)); /* dpr4 */
write_dpr(accel, DE_DIMENSION,
- FIELD_VALUE(0, DE_DIMENSION, X, width)|
- FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr8 */
+ ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+ (height & DE_DIMENSION_Y_ET_MASK)); /* dpr8 */
- deCtrl =
- FIELD_SET(0, DE_CONTROL, STATUS, START)|
- FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)|
- FIELD_SET(0, DE_CONTROL, LAST_PIXEL, ON)|
- FIELD_SET(0, DE_CONTROL, COMMAND, RECTANGLE_FILL)|
- FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2)|
- FIELD_VALUE(0, DE_CONTROL, ROP, rop); /* dpr0xc */
+ deCtrl = DE_CONTROL_STATUS | DE_CONTROL_LAST_PIXEL |
+ DE_CONTROL_COMMAND_RECTANGLE_FILL | DE_CONTROL_ROP_SELECT |
+ (rop & DE_CONTROL_ROP_MASK); /* dpr0xc */
write_dpr(accel, DE_CONTROL, deCtrl);
return 0;
@@ -237,18 +226,18 @@ unsigned int rop2) /* ROP value */
Note that input pitch is BYTE value, but the 2D Pitch register uses
pixel values. Need Byte to pixel conversion.
*/
- {
- write_dpr(accel, DE_PITCH,
- FIELD_VALUE(0, DE_PITCH, DESTINATION, (dPitch/Bpp)) |
- FIELD_VALUE(0, DE_PITCH, SOURCE, (sPitch/Bpp))); /* dpr10 */
- }
+ write_dpr(accel, DE_PITCH,
+ ((dPitch / Bpp << DE_PITCH_DESTINATION_SHIFT) &
+ DE_PITCH_DESTINATION_MASK) |
+ (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
/* Screen Window width in Pixels.
2D engine uses this value to calculate the linear address in frame buffer for a given point.
*/
write_dpr(accel, DE_WINDOW_WIDTH,
- FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
- FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (sPitch/Bpp))); /* dpr3c */
+ ((dPitch / Bpp << DE_WINDOW_WIDTH_DST_SHIFT) &
+ DE_WINDOW_WIDTH_DST_MASK) |
+ (sPitch / Bpp & DE_WINDOW_WIDTH_SRC_MASK)); /* dpr3c */
if (accel->de_wait() != 0)
return -1;
@@ -256,24 +245,18 @@ unsigned int rop2) /* ROP value */
{
write_dpr(accel, DE_SOURCE,
- FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_SOURCE, X_K1, sx) |
- FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
+ ((sx << DE_SOURCE_X_K1_SHIFT) & DE_SOURCE_X_K1_MASK) |
+ (sy & DE_SOURCE_Y_K2_MASK)); /* dpr0 */
write_dpr(accel, DE_DESTINATION,
- FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_DESTINATION, X, dx) |
- FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
+ ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+ (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */
write_dpr(accel, DE_DIMENSION,
- FIELD_VALUE(0, DE_DIMENSION, X, width) |
- FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
-
- de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
- FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
- FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
- ((nDirection == RIGHT_TO_LEFT) ?
- FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
- : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
- FIELD_SET(0, DE_CONTROL, STATUS, START);
+ ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+ (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */
+
+ de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) | DE_CONTROL_ROP_SELECT |
+ ((nDirection == RIGHT_TO_LEFT) ? DE_CONTROL_DIRECTION : 0) |
+ DE_CONTROL_COMMAND_BITBLT | DE_CONTROL_STATUS;
write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
}
@@ -287,10 +270,8 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
de_ctrl = read_dpr(accel, DE_CONTROL);
- de_ctrl &=
- FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
- FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
- FIELD_MASK(DE_CONTROL_TRANSPARENCY);
+ de_ctrl &= (DE_CONTROL_TRANSPARENCY_MATCH |
+ DE_CONTROL_TRANSPARENCY_SELECT | DE_CONTROL_TRANSPARENCY);
return de_ctrl;
}
@@ -305,7 +286,7 @@ int hw_imageblit(struct lynx_accel *accel,
u32 dx,
u32 dy, /* Starting coordinate of destination surface */
u32 width,
- u32 height, /* width and height of rectange in pixel value */
+ u32 height, /* width and height of rectangle in pixel value */
u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
u32 rop2) /* ROP value */
@@ -338,42 +319,39 @@ int hw_imageblit(struct lynx_accel *accel,
Note that input pitch is BYTE value, but the 2D Pitch register uses
pixel values. Need Byte to pixel conversion.
*/
- {
- write_dpr(accel, DE_PITCH,
- FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch/bytePerPixel) |
- FIELD_VALUE(0, DE_PITCH, SOURCE, dPitch/bytePerPixel)); /* dpr10 */
- }
+ write_dpr(accel, DE_PITCH,
+ ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
+ DE_PITCH_DESTINATION_MASK) |
+ (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
/* Screen Window width in Pixels.
2D engine uses this value to calculate the linear address in frame buffer for a given point.
*/
write_dpr(accel, DE_WINDOW_WIDTH,
- FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
- FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE, (dPitch/bytePerPixel)));
+ ((dPitch / bytePerPixel << DE_WINDOW_WIDTH_DST_SHIFT) &
+ DE_WINDOW_WIDTH_DST_MASK) |
+ (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
/* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
For mono bitmap, use startBit for X_K1. */
write_dpr(accel, DE_SOURCE,
- FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
+ (startBit << DE_SOURCE_X_K1_SHIFT) &
+ DE_SOURCE_X_K1_MONO_MASK); /* dpr00 */
write_dpr(accel, DE_DESTINATION,
- FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
- FIELD_VALUE(0, DE_DESTINATION, X, dx) |
- FIELD_VALUE(0, DE_DESTINATION, Y, dy)); /* dpr04 */
+ ((dx << DE_DESTINATION_X_SHIFT) & DE_DESTINATION_X_MASK) |
+ (dy & DE_DESTINATION_Y_MASK)); /* dpr04 */
write_dpr(accel, DE_DIMENSION,
- FIELD_VALUE(0, DE_DIMENSION, X, width) |
- FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+ ((width << DE_DIMENSION_X_SHIFT) & DE_DIMENSION_X_MASK) |
+ (height & DE_DIMENSION_Y_ET_MASK)); /* dpr08 */
write_dpr(accel, DE_FOREGROUND, fColor);
write_dpr(accel, DE_BACKGROUND, bColor);
- de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
- FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
- FIELD_SET(0, DE_CONTROL, COMMAND, HOST_WRITE) |
- FIELD_SET(0, DE_CONTROL, HOST, MONO) |
- FIELD_SET(0, DE_CONTROL, STATUS, START);
+ de_ctrl = (rop2 & DE_CONTROL_ROP_MASK) |
+ DE_CONTROL_ROP_SELECT | DE_CONTROL_COMMAND_HOST_WRITE |
+ DE_CONTROL_HOST | DE_CONTROL_STATUS;
write_dpr(accel, DE_CONTROL, de_ctrl | deGetTransparency(accel));
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index f252e47d5ee9..d59d005e0add 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -21,212 +21,162 @@
#define DE_PORT_ADDR_TYPE3 0x100000
#define DE_SOURCE 0x0
-#define DE_SOURCE_WRAP 31:31
-#define DE_SOURCE_WRAP_DISABLE 0
-#define DE_SOURCE_WRAP_ENABLE 1
-#define DE_SOURCE_X_K1 29:16
-#define DE_SOURCE_Y_K2 15:0
-#define DE_SOURCE_X_K1_MONO 20:16
+#define DE_SOURCE_WRAP BIT(31)
+#define DE_SOURCE_X_K1_SHIFT 16
+#define DE_SOURCE_X_K1_MASK (0x3fff << 16)
+#define DE_SOURCE_X_K1_MONO_MASK (0x1f << 16)
+#define DE_SOURCE_Y_K2_MASK 0xffff
#define DE_DESTINATION 0x4
-#define DE_DESTINATION_WRAP 31:31
-#define DE_DESTINATION_WRAP_DISABLE 0
-#define DE_DESTINATION_WRAP_ENABLE 1
-#define DE_DESTINATION_X 28:16
-#define DE_DESTINATION_Y 15:0
+#define DE_DESTINATION_WRAP BIT(31)
+#define DE_DESTINATION_X_SHIFT 16
+#define DE_DESTINATION_X_MASK (0x1fff << 16)
+#define DE_DESTINATION_Y_MASK 0xffff
#define DE_DIMENSION 0x8
-#define DE_DIMENSION_X 28:16
-#define DE_DIMENSION_Y_ET 15:0
+#define DE_DIMENSION_X_SHIFT 16
+#define DE_DIMENSION_X_MASK (0x1fff << 16)
+#define DE_DIMENSION_Y_ET_MASK 0x1fff
#define DE_CONTROL 0xC
-#define DE_CONTROL_STATUS 31:31
-#define DE_CONTROL_STATUS_STOP 0
-#define DE_CONTROL_STATUS_START 1
-#define DE_CONTROL_PATTERN 30:30
-#define DE_CONTROL_PATTERN_MONO 0
-#define DE_CONTROL_PATTERN_COLOR 1
-#define DE_CONTROL_UPDATE_DESTINATION_X 29:29
-#define DE_CONTROL_UPDATE_DESTINATION_X_DISABLE 0
-#define DE_CONTROL_UPDATE_DESTINATION_X_ENABLE 1
-#define DE_CONTROL_QUICK_START 28:28
-#define DE_CONTROL_QUICK_START_DISABLE 0
-#define DE_CONTROL_QUICK_START_ENABLE 1
-#define DE_CONTROL_DIRECTION 27:27
-#define DE_CONTROL_DIRECTION_LEFT_TO_RIGHT 0
-#define DE_CONTROL_DIRECTION_RIGHT_TO_LEFT 1
-#define DE_CONTROL_MAJOR 26:26
-#define DE_CONTROL_MAJOR_X 0
-#define DE_CONTROL_MAJOR_Y 1
-#define DE_CONTROL_STEP_X 25:25
-#define DE_CONTROL_STEP_X_POSITIVE 1
-#define DE_CONTROL_STEP_X_NEGATIVE 0
-#define DE_CONTROL_STEP_Y 24:24
-#define DE_CONTROL_STEP_Y_POSITIVE 1
-#define DE_CONTROL_STEP_Y_NEGATIVE 0
-#define DE_CONTROL_STRETCH 23:23
-#define DE_CONTROL_STRETCH_DISABLE 0
-#define DE_CONTROL_STRETCH_ENABLE 1
-#define DE_CONTROL_HOST 22:22
-#define DE_CONTROL_HOST_COLOR 0
-#define DE_CONTROL_HOST_MONO 1
-#define DE_CONTROL_LAST_PIXEL 21:21
-#define DE_CONTROL_LAST_PIXEL_OFF 0
-#define DE_CONTROL_LAST_PIXEL_ON 1
-#define DE_CONTROL_COMMAND 20:16
-#define DE_CONTROL_COMMAND_BITBLT 0
-#define DE_CONTROL_COMMAND_RECTANGLE_FILL 1
-#define DE_CONTROL_COMMAND_DE_TILE 2
-#define DE_CONTROL_COMMAND_TRAPEZOID_FILL 3
-#define DE_CONTROL_COMMAND_ALPHA_BLEND 4
-#define DE_CONTROL_COMMAND_RLE_STRIP 5
-#define DE_CONTROL_COMMAND_SHORT_STROKE 6
-#define DE_CONTROL_COMMAND_LINE_DRAW 7
-#define DE_CONTROL_COMMAND_HOST_WRITE 8
-#define DE_CONTROL_COMMAND_HOST_READ 9
-#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP 10
-#define DE_CONTROL_COMMAND_ROTATE 11
-#define DE_CONTROL_COMMAND_FONT 12
-#define DE_CONTROL_COMMAND_TEXTURE_LOAD 15
-#define DE_CONTROL_ROP_SELECT 15:15
-#define DE_CONTROL_ROP_SELECT_ROP3 0
-#define DE_CONTROL_ROP_SELECT_ROP2 1
-#define DE_CONTROL_ROP2_SOURCE 14:14
-#define DE_CONTROL_ROP2_SOURCE_BITMAP 0
-#define DE_CONTROL_ROP2_SOURCE_PATTERN 1
-#define DE_CONTROL_MONO_DATA 13:12
-#define DE_CONTROL_MONO_DATA_NOT_PACKED 0
-#define DE_CONTROL_MONO_DATA_8_PACKED 1
-#define DE_CONTROL_MONO_DATA_16_PACKED 2
-#define DE_CONTROL_MONO_DATA_32_PACKED 3
-#define DE_CONTROL_REPEAT_ROTATE 11:11
-#define DE_CONTROL_REPEAT_ROTATE_DISABLE 0
-#define DE_CONTROL_REPEAT_ROTATE_ENABLE 1
-#define DE_CONTROL_TRANSPARENCY_MATCH 10:10
-#define DE_CONTROL_TRANSPARENCY_MATCH_OPAQUE 0
-#define DE_CONTROL_TRANSPARENCY_MATCH_TRANSPARENT 1
-#define DE_CONTROL_TRANSPARENCY_SELECT 9:9
-#define DE_CONTROL_TRANSPARENCY_SELECT_SOURCE 0
-#define DE_CONTROL_TRANSPARENCY_SELECT_DESTINATION 1
-#define DE_CONTROL_TRANSPARENCY 8:8
-#define DE_CONTROL_TRANSPARENCY_DISABLE 0
-#define DE_CONTROL_TRANSPARENCY_ENABLE 1
-#define DE_CONTROL_ROP 7:0
+#define DE_CONTROL_STATUS BIT(31)
+#define DE_CONTROL_PATTERN BIT(30)
+#define DE_CONTROL_UPDATE_DESTINATION_X BIT(29)
+#define DE_CONTROL_QUICK_START BIT(28)
+#define DE_CONTROL_DIRECTION BIT(27)
+#define DE_CONTROL_MAJOR BIT(26)
+#define DE_CONTROL_STEP_X BIT(25)
+#define DE_CONTROL_STEP_Y BIT(24)
+#define DE_CONTROL_STRETCH BIT(23)
+#define DE_CONTROL_HOST BIT(22)
+#define DE_CONTROL_LAST_PIXEL BIT(21)
+#define DE_CONTROL_COMMAND_SHIFT 16
+#define DE_CONTROL_COMMAND_MASK (0x1f << 16)
+#define DE_CONTROL_COMMAND_BITBLT (0x0 << 16)
+#define DE_CONTROL_COMMAND_RECTANGLE_FILL (0x1 << 16)
+#define DE_CONTROL_COMMAND_DE_TILE (0x2 << 16)
+#define DE_CONTROL_COMMAND_TRAPEZOID_FILL (0x3 << 16)
+#define DE_CONTROL_COMMAND_ALPHA_BLEND (0x4 << 16)
+#define DE_CONTROL_COMMAND_RLE_STRIP (0x5 << 16)
+#define DE_CONTROL_COMMAND_SHORT_STROKE (0x6 << 16)
+#define DE_CONTROL_COMMAND_LINE_DRAW (0x7 << 16)
+#define DE_CONTROL_COMMAND_HOST_WRITE (0x8 << 16)
+#define DE_CONTROL_COMMAND_HOST_READ (0x9 << 16)
+#define DE_CONTROL_COMMAND_HOST_WRITE_BOTTOM_UP (0xa << 16)
+#define DE_CONTROL_COMMAND_ROTATE (0xb << 16)
+#define DE_CONTROL_COMMAND_FONT (0xc << 16)
+#define DE_CONTROL_COMMAND_TEXTURE_LOAD (0xe << 16)
+#define DE_CONTROL_ROP_SELECT BIT(15)
+#define DE_CONTROL_ROP2_SOURCE BIT(14)
+#define DE_CONTROL_MONO_DATA_SHIFT 12
+#define DE_CONTROL_MONO_DATA_MASK (0x3 << 12)
+#define DE_CONTROL_MONO_DATA_NOT_PACKED (0x0 << 12)
+#define DE_CONTROL_MONO_DATA_8_PACKED (0x1 << 12)
+#define DE_CONTROL_MONO_DATA_16_PACKED (0x2 << 12)
+#define DE_CONTROL_MONO_DATA_32_PACKED (0x3 << 12)
+#define DE_CONTROL_REPEAT_ROTATE BIT(11)
+#define DE_CONTROL_TRANSPARENCY_MATCH BIT(10)
+#define DE_CONTROL_TRANSPARENCY_SELECT BIT(9)
+#define DE_CONTROL_TRANSPARENCY BIT(8)
+#define DE_CONTROL_ROP_MASK 0xff
/* Pseudo fields. */
-#define DE_CONTROL_SHORT_STROKE_DIR 27:24
-#define DE_CONTROL_SHORT_STROKE_DIR_225 0
-#define DE_CONTROL_SHORT_STROKE_DIR_135 1
-#define DE_CONTROL_SHORT_STROKE_DIR_315 2
-#define DE_CONTROL_SHORT_STROKE_DIR_45 3
-#define DE_CONTROL_SHORT_STROKE_DIR_270 4
-#define DE_CONTROL_SHORT_STROKE_DIR_90 5
-#define DE_CONTROL_SHORT_STROKE_DIR_180 8
-#define DE_CONTROL_SHORT_STROKE_DIR_0 10
-#define DE_CONTROL_ROTATION 25:24
-#define DE_CONTROL_ROTATION_0 0
-#define DE_CONTROL_ROTATION_270 1
-#define DE_CONTROL_ROTATION_90 2
-#define DE_CONTROL_ROTATION_180 3
+#define DE_CONTROL_SHORT_STROKE_DIR_MASK (0xf << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_225 (0x0 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_135 (0x1 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_315 (0x2 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_45 (0x3 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_270 (0x4 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_90 (0x5 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_180 (0x8 << 24)
+#define DE_CONTROL_SHORT_STROKE_DIR_0 (0xa << 24)
+#define DE_CONTROL_ROTATION_MASK (0x3 << 24)
+#define DE_CONTROL_ROTATION_0 (0x0 << 24)
+#define DE_CONTROL_ROTATION_270 (0x1 << 24)
+#define DE_CONTROL_ROTATION_90 (0x2 << 24)
+#define DE_CONTROL_ROTATION_180 (0x3 << 24)
#define DE_PITCH 0x000010
-#define DE_PITCH_DESTINATION 28:16
-#define DE_PITCH_SOURCE 12:0
+#define DE_PITCH_DESTINATION_SHIFT 16
+#define DE_PITCH_DESTINATION_MASK (0x1fff << 16)
+#define DE_PITCH_SOURCE_MASK 0x1fff
#define DE_FOREGROUND 0x000014
-#define DE_FOREGROUND_COLOR 31:0
+#define DE_FOREGROUND_COLOR_MASK 0xffffffff
#define DE_BACKGROUND 0x000018
-#define DE_BACKGROUND_COLOR 31:0
+#define DE_BACKGROUND_COLOR_MASK 0xffffffff
#define DE_STRETCH_FORMAT 0x00001C
-#define DE_STRETCH_FORMAT_PATTERN_XY 30:30
-#define DE_STRETCH_FORMAT_PATTERN_XY_NORMAL 0
-#define DE_STRETCH_FORMAT_PATTERN_XY_OVERWRITE 1
-#define DE_STRETCH_FORMAT_PATTERN_Y 29:27
-#define DE_STRETCH_FORMAT_PATTERN_X 25:23
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT 21:20
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 0
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 1
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 2
-#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 3
-
-#define DE_STRETCH_FORMAT_ADDRESSING 19:16
-#define DE_STRETCH_FORMAT_ADDRESSING_XY 0
-#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR 15
-#define DE_STRETCH_FORMAT_SOURCE_HEIGHT 11:0
+#define DE_STRETCH_FORMAT_PATTERN_XY BIT(30)
+#define DE_STRETCH_FORMAT_PATTERN_Y_SHIFT 27
+#define DE_STRETCH_FORMAT_PATTERN_Y_MASK (0x7 << 27)
+#define DE_STRETCH_FORMAT_PATTERN_X_SHIFT 23
+#define DE_STRETCH_FORMAT_PATTERN_X_MASK (0x7 << 23)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_SHIFT 20
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_MASK (0x3 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_8 (0x0 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_16 (0x1 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_32 (0x2 << 20)
+#define DE_STRETCH_FORMAT_PIXEL_FORMAT_24 (0x3 << 20)
+#define DE_STRETCH_FORMAT_ADDRESSING_SHIFT 16
+#define DE_STRETCH_FORMAT_ADDRESSING_MASK (0xf << 16)
+#define DE_STRETCH_FORMAT_ADDRESSING_XY (0x0 << 16)
+#define DE_STRETCH_FORMAT_ADDRESSING_LINEAR (0xf << 16)
+#define DE_STRETCH_FORMAT_SOURCE_HEIGHT_MASK 0xfff
#define DE_COLOR_COMPARE 0x000020
-#define DE_COLOR_COMPARE_COLOR 23:0
+#define DE_COLOR_COMPARE_COLOR_MASK 0xffffff
#define DE_COLOR_COMPARE_MASK 0x000024
-#define DE_COLOR_COMPARE_MASK_MASKS 23:0
+#define DE_COLOR_COMPARE_MASK_MASK 0xffffff
#define DE_MASKS 0x000028
-#define DE_MASKS_BYTE_MASK 31:16
-#define DE_MASKS_BIT_MASK 15:0
+#define DE_MASKS_BYTE_MASK (0xffff << 16)
+#define DE_MASKS_BIT_MASK 0xffff
#define DE_CLIP_TL 0x00002C
-#define DE_CLIP_TL_TOP 31:16
-#define DE_CLIP_TL_STATUS 13:13
-#define DE_CLIP_TL_STATUS_DISABLE 0
-#define DE_CLIP_TL_STATUS_ENABLE 1
-#define DE_CLIP_TL_INHIBIT 12:12
-#define DE_CLIP_TL_INHIBIT_OUTSIDE 0
-#define DE_CLIP_TL_INHIBIT_INSIDE 1
-#define DE_CLIP_TL_LEFT 11:0
+#define DE_CLIP_TL_TOP_MASK (0xffff << 16)
+#define DE_CLIP_TL_STATUS BIT(13)
+#define DE_CLIP_TL_INHIBIT BIT(12)
+#define DE_CLIP_TL_LEFT_MASK 0xfff
#define DE_CLIP_BR 0x000030
-#define DE_CLIP_BR_BOTTOM 31:16
-#define DE_CLIP_BR_RIGHT 12:0
+#define DE_CLIP_BR_BOTTOM_MASK (0xffff << 16)
+#define DE_CLIP_BR_RIGHT_MASK 0x1fff
#define DE_MONO_PATTERN_LOW 0x000034
-#define DE_MONO_PATTERN_LOW_PATTERN 31:0
+#define DE_MONO_PATTERN_LOW_PATTERN_MASK 0xffffffff
#define DE_MONO_PATTERN_HIGH 0x000038
-#define DE_MONO_PATTERN_HIGH_PATTERN 31:0
+#define DE_MONO_PATTERN_HIGH_PATTERN_MASK 0xffffffff
#define DE_WINDOW_WIDTH 0x00003C
-#define DE_WINDOW_WIDTH_DESTINATION 28:16
-#define DE_WINDOW_WIDTH_SOURCE 12:0
+#define DE_WINDOW_WIDTH_DST_SHIFT 16
+#define DE_WINDOW_WIDTH_DST_MASK (0x1fff << 16)
+#define DE_WINDOW_WIDTH_SRC_MASK 0x1fff
#define DE_WINDOW_SOURCE_BASE 0x000040
-#define DE_WINDOW_SOURCE_BASE_EXT 27:27
-#define DE_WINDOW_SOURCE_BASE_EXT_LOCAL 0
-#define DE_WINDOW_SOURCE_BASE_EXT_EXTERNAL 1
-#define DE_WINDOW_SOURCE_BASE_CS 26:26
-#define DE_WINDOW_SOURCE_BASE_CS_0 0
-#define DE_WINDOW_SOURCE_BASE_CS_1 1
-#define DE_WINDOW_SOURCE_BASE_ADDRESS 25:0
+#define DE_WINDOW_SOURCE_BASE_EXT BIT(27)
+#define DE_WINDOW_SOURCE_BASE_CS BIT(26)
+#define DE_WINDOW_SOURCE_BASE_ADDRESS_MASK 0x3ffffff
#define DE_WINDOW_DESTINATION_BASE 0x000044
-#define DE_WINDOW_DESTINATION_BASE_EXT 27:27
-#define DE_WINDOW_DESTINATION_BASE_EXT_LOCAL 0
-#define DE_WINDOW_DESTINATION_BASE_EXT_EXTERNAL 1
-#define DE_WINDOW_DESTINATION_BASE_CS 26:26
-#define DE_WINDOW_DESTINATION_BASE_CS_0 0
-#define DE_WINDOW_DESTINATION_BASE_CS_1 1
-#define DE_WINDOW_DESTINATION_BASE_ADDRESS 25:0
+#define DE_WINDOW_DESTINATION_BASE_EXT BIT(27)
+#define DE_WINDOW_DESTINATION_BASE_CS BIT(26)
+#define DE_WINDOW_DESTINATION_BASE_ADDRESS_MASK 0x3ffffff
#define DE_ALPHA 0x000048
-#define DE_ALPHA_VALUE 7:0
+#define DE_ALPHA_VALUE_MASK 0xff
#define DE_WRAP 0x00004C
-#define DE_WRAP_X 31:16
-#define DE_WRAP_Y 15:0
+#define DE_WRAP_X_MASK (0xffff << 16)
+#define DE_WRAP_Y_MASK 0xffff
#define DE_STATUS 0x000050
-#define DE_STATUS_CSC 1:1
-#define DE_STATUS_CSC_CLEAR 0
-#define DE_STATUS_CSC_NOT_ACTIVE 0
-#define DE_STATUS_CSC_ACTIVE 1
-#define DE_STATUS_2D 0:0
-#define DE_STATUS_2D_CLEAR 0
-#define DE_STATUS_2D_NOT_ACTIVE 0
-#define DE_STATUS_2D_ACTIVE 1
-
-
+#define DE_STATUS_CSC BIT(1)
+#define DE_STATUS_2D BIT(0)
/* blt direction */
#define TOP_TO_BOTTOM 0
@@ -268,7 +218,7 @@ int hw_imageblit(struct lynx_accel *accel,
u32 dx,
u32 dy, /* Starting coordinate of destination surface */
u32 width,
- u32 height, /* width and height of rectange in pixel value */
+ u32 height, /* width and height of rectangle in pixel value */
u32 fColor, /* Foreground color (corresponding to a 1 in the monochrome data */
u32 bColor, /* Background color (corresponding to a 0 in the monochrome data */
u32 rop2);
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index 3b7ce9275f51..d622d65b6cee 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -16,45 +16,34 @@
#include <linux/screen_info.h>
#include "sm750.h"
-#include "sm750_help.h"
#include "sm750_cursor.h"
-#define PEEK32(addr) \
-readl(cursor->mmio + (addr))
#define POKE32(addr, data) \
writel((data), cursor->mmio + (addr))
/* cursor control for voyager and 718/750*/
#define HWC_ADDRESS 0x0
-#define HWC_ADDRESS_ENABLE 31:31
-#define HWC_ADDRESS_ENABLE_DISABLE 0
-#define HWC_ADDRESS_ENABLE_ENABLE 1
-#define HWC_ADDRESS_EXT 27:27
-#define HWC_ADDRESS_EXT_LOCAL 0
-#define HWC_ADDRESS_EXT_EXTERNAL 1
-#define HWC_ADDRESS_CS 26:26
-#define HWC_ADDRESS_CS_0 0
-#define HWC_ADDRESS_CS_1 1
-#define HWC_ADDRESS_ADDRESS 25:0
+#define HWC_ADDRESS_ENABLE BIT(31)
+#define HWC_ADDRESS_EXT BIT(27)
+#define HWC_ADDRESS_CS BIT(26)
+#define HWC_ADDRESS_ADDRESS_MASK 0x3ffffff
#define HWC_LOCATION 0x4
-#define HWC_LOCATION_TOP 27:27
-#define HWC_LOCATION_TOP_INSIDE 0
-#define HWC_LOCATION_TOP_OUTSIDE 1
-#define HWC_LOCATION_Y 26:16
-#define HWC_LOCATION_LEFT 11:11
-#define HWC_LOCATION_LEFT_INSIDE 0
-#define HWC_LOCATION_LEFT_OUTSIDE 1
-#define HWC_LOCATION_X 10:0
+#define HWC_LOCATION_TOP BIT(27)
+#define HWC_LOCATION_Y_SHIFT 16
+#define HWC_LOCATION_Y_MASK (0x7ff << 16)
+#define HWC_LOCATION_LEFT BIT(11)
+#define HWC_LOCATION_X_MASK 0x7ff
#define HWC_COLOR_12 0x8
-#define HWC_COLOR_12_2_RGB565 31:16
-#define HWC_COLOR_12_1_RGB565 15:0
+#define HWC_COLOR_12_2_RGB565_SHIFT 16
+#define HWC_COLOR_12_2_RGB565_MASK (0xffff << 16)
+#define HWC_COLOR_12_1_RGB565_MASK 0xffff
#define HWC_COLOR_3 0xC
-#define HWC_COLOR_3_RGB565 15:0
+#define HWC_COLOR_3_RGB565_MASK 0xffff
/* hw_cursor_xxx works for voyager,718 and 750 */
@@ -62,9 +51,7 @@ void hw_cursor_enable(struct lynx_cursor *cursor)
{
u32 reg;
- reg = FIELD_VALUE(0, HWC_ADDRESS, ADDRESS, cursor->offset)|
- FIELD_SET(0, HWC_ADDRESS, EXT, LOCAL)|
- FIELD_SET(0, HWC_ADDRESS, ENABLE, ENABLE);
+ reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
POKE32(HWC_ADDRESS, reg);
}
void hw_cursor_disable(struct lynx_cursor *cursor)
@@ -83,14 +70,17 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
{
u32 reg;
- reg = FIELD_VALUE(0, HWC_LOCATION, Y, y)|
- FIELD_VALUE(0, HWC_LOCATION, X, x);
+ reg = (((y << HWC_LOCATION_Y_SHIFT) & HWC_LOCATION_Y_MASK) |
+ (x & HWC_LOCATION_X_MASK));
POKE32(HWC_LOCATION, reg);
}
void hw_cursor_setColor(struct lynx_cursor *cursor,
u32 fg, u32 bg)
{
- POKE32(HWC_COLOR_12, (fg<<16)|(bg&0xffff));
+ u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
+ HWC_COLOR_12_2_RGB565_MASK;
+
+ POKE32(HWC_COLOR_12, reg | (bg & HWC_COLOR_12_1_RGB565_MASK));
POKE32(HWC_COLOR_3, 0xffe0);
}
@@ -115,15 +105,6 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
pstart = cursor->vstart;
pbuffer = pstart;
-/*
- if(odd &1){
- hw_cursor_setData2(cursor,rop,pcol,pmsk);
- }
- odd++;
- if(odd > 0xfffffff0)
- odd=0;
-*/
-
for (i = 0; i < count; i++) {
color = *pcol++;
mask = *pmsk++;
@@ -143,8 +124,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
iowrite16(data, pbuffer);
/* assume pitch is 1,2,4,8,...*/
- if ((i+1) % pitch == 0)
- {
+ if ((i + 1) % pitch == 0) {
/* need a return */
pstart += offset;
pbuffer = pstart;
diff --git a/drivers/staging/sm750fb/sm750_help.h b/drivers/staging/sm750fb/sm750_help.h
deleted file mode 100644
index c070cf25a7d6..000000000000
--- a/drivers/staging/sm750fb/sm750_help.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef LYNX_HELP_H__
-#define LYNX_HELP_H__
-
-/* Internal macros */
-#define _F_START(f) (0 ? f)
-#define _F_END(f) (1 ? f)
-#define _F_SIZE(f) (1 + _F_END(f) - _F_START(f))
-#define _F_MASK(f) (((1 << _F_SIZE(f)) - 1) << _F_START(f))
-#define _F_NORMALIZE(v, f) (((v) & _F_MASK(f)) >> _F_START(f))
-#define _F_DENORMALIZE(v, f) (((v) << _F_START(f)) & _F_MASK(f))
-
-/* Global macros */
-#define FIELD_GET(x, reg, field) \
-( \
- _F_NORMALIZE((x), reg ## _ ## field) \
-)
-
-#define FIELD_SET(x, reg, field, value) \
-( \
- (x & ~_F_MASK(reg ## _ ## field)) \
- | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
-)
-
-#define FIELD_VALUE(x, reg, field, value) \
-( \
- (x & ~_F_MASK(reg ## _ ## field)) \
- | _F_DENORMALIZE(value, reg ## _ ## field) \
-)
-
-#define FIELD_CLEAR(reg, field) \
-( \
- ~_F_MASK(reg ## _ ## field) \
-)
-
-/* Field Macros */
-#define FIELD_START(field) (0 ? field)
-#define FIELD_END(field) (1 ? field)
-#define FIELD_SIZE(field) (1 + FIELD_END(field) - FIELD_START(field))
-#define FIELD_MASK(field) (((1 << (FIELD_SIZE(field)-1)) | ((1 << (FIELD_SIZE(field)-1)) - 1)) << FIELD_START(field))
-
-static inline unsigned int absDiff(unsigned int a, unsigned int b)
-{
- if (a < b)
- return b-a;
- else
- return a-b;
-}
-
-/* n / d + 1 / 2 = (2n + d) / 2d */
-#define roundedDiv(num, denom) ((2 * (num) + (denom)) / (2 * (denom)))
-#define MHz(x) ((x) * 1000000)
-
-
-
-
-#endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 41822c6c0380..2daeedd88c30 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -1,4 +1,3 @@
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -108,65 +107,62 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
/* for sm718,open pci burst */
if (sm750_dev->devid == 0x718) {
POKE32(SYSTEM_CTRL,
- FIELD_SET(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, PCI_BURST, ON));
+ PEEK32(SYSTEM_CTRL) | SYSTEM_CTRL_PCI_BURST);
}
if (getChipType() != SM750LE) {
+ unsigned int val;
/* does user need CRT ?*/
if (sm750_dev->nocrt) {
POKE32(MISC_CTRL,
- FIELD_SET(PEEK32(MISC_CTRL),
- MISC_CTRL,
- DAC_POWER, OFF));
+ PEEK32(MISC_CTRL) | MISC_CTRL_DAC_POWER_OFF);
/* shut off dpms */
- POKE32(SYSTEM_CTRL,
- FIELD_SET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- DPMS, VNHN));
+ val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
+ val |= SYSTEM_CTRL_DPMS_VPHN;
+ POKE32(SYSTEM_CTRL, val);
} else {
POKE32(MISC_CTRL,
- FIELD_SET(PEEK32(MISC_CTRL),
- MISC_CTRL,
- DAC_POWER, ON));
+ PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF);
/* turn on dpms */
- POKE32(SYSTEM_CTRL,
- FIELD_SET(PEEK32(SYSTEM_CTRL),
- SYSTEM_CTRL,
- DPMS, VPHP));
+ val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
+ val |= SYSTEM_CTRL_DPMS_VPHP;
+ POKE32(SYSTEM_CTRL, val);
}
+ val = PEEK32(PANEL_DISPLAY_CTRL) &
+ ~(PANEL_DISPLAY_CTRL_DUAL_DISPLAY |
+ PANEL_DISPLAY_CTRL_DOUBLE_PIXEL);
switch (sm750_dev->pnltype) {
- case sm750_doubleTFT:
case sm750_24TFT:
+ break;
+ case sm750_doubleTFT:
+ val |= PANEL_DISPLAY_CTRL_DOUBLE_PIXEL;
+ break;
case sm750_dualTFT:
- POKE32(PANEL_DISPLAY_CTRL,
- FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
- PANEL_DISPLAY_CTRL,
- TFT_DISP,
- sm750_dev->pnltype));
- break;
+ val |= PANEL_DISPLAY_CTRL_DUAL_DISPLAY;
+ break;
}
+ POKE32(PANEL_DISPLAY_CTRL, val);
} else {
- /* for 750LE ,no DVI chip initilization makes Monitor no signal */
+ /* for 750LE ,no DVI chip initialization makes Monitor no signal */
/* Set up GPIO for software I2C to program DVI chip in the
Xilinx SP605 board, in order to have video signal.
*/
- sm750_sw_i2c_init(0, 1);
+ sm750_sw_i2c_init(0, 1);
-
- /* Customer may NOT use CH7301 DVI chip, which has to be
- initialized differently.
- */
- if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
+ /* Customer may NOT use CH7301 DVI chip, which has to be
+ initialized differently.
+ */
+ if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
/* The following register values for CH7301 are from
Chrontel app note and our experiment.
*/
pr_info("yes,CH7301 DVI chip found\n");
- sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
- sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
- sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0);
+ sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
+ sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
+ sm750_sw_i2c_write_reg(0xec, 0x49, 0xC0);
pr_info("okay,CH7301 DVI chip setup done\n");
- }
+ }
}
/* init 2d engine */
@@ -310,53 +306,51 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
if (crtc->channel != sm750_secondary) {
/* set pitch, offset ,width,start address ,etc... */
POKE32(PANEL_FB_ADDRESS,
- FIELD_SET(0, PANEL_FB_ADDRESS, STATUS, CURRENT)|
- FIELD_SET(0, PANEL_FB_ADDRESS, EXT, LOCAL)|
- FIELD_VALUE(0, PANEL_FB_ADDRESS, ADDRESS, crtc->oScreen));
+ crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
reg = var->xres * (var->bits_per_pixel >> 3);
/* crtc->channel is not equal to par->index on numeric,be aware of that */
reg = ALIGN(reg, crtc->line_pad);
-
- POKE32(PANEL_FB_WIDTH,
- FIELD_VALUE(0, PANEL_FB_WIDTH, WIDTH, reg)|
- FIELD_VALUE(0, PANEL_FB_WIDTH, OFFSET, fix->line_length));
-
- POKE32(PANEL_WINDOW_WIDTH,
- FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres - 1)|
- FIELD_VALUE(0, PANEL_WINDOW_WIDTH, X, var->xoffset));
-
- POKE32(PANEL_WINDOW_HEIGHT,
- FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, HEIGHT, var->yres_virtual - 1)|
- FIELD_VALUE(0, PANEL_WINDOW_HEIGHT, Y, var->yoffset));
+ reg = (reg << PANEL_FB_WIDTH_WIDTH_SHIFT) &
+ PANEL_FB_WIDTH_WIDTH_MASK;
+ reg |= (fix->line_length & PANEL_FB_WIDTH_OFFSET_MASK);
+ POKE32(PANEL_FB_WIDTH, reg);
+
+ reg = ((var->xres - 1) << PANEL_WINDOW_WIDTH_WIDTH_SHIFT) &
+ PANEL_WINDOW_WIDTH_WIDTH_MASK;
+ reg |= (var->xoffset & PANEL_WINDOW_WIDTH_X_MASK);
+ POKE32(PANEL_WINDOW_WIDTH, reg);
+
+ reg = ((var->yres_virtual - 1) <<
+ PANEL_WINDOW_HEIGHT_HEIGHT_SHIFT);
+ reg &= PANEL_WINDOW_HEIGHT_HEIGHT_MASK;
+ reg |= (var->yoffset & PANEL_WINDOW_HEIGHT_Y_MASK);
+ POKE32(PANEL_WINDOW_HEIGHT, reg);
POKE32(PANEL_PLANE_TL, 0);
- POKE32(PANEL_PLANE_BR,
- FIELD_VALUE(0, PANEL_PLANE_BR, BOTTOM, var->yres - 1)|
- FIELD_VALUE(0, PANEL_PLANE_BR, RIGHT, var->xres - 1));
+ reg = ((var->yres - 1) << PANEL_PLANE_BR_BOTTOM_SHIFT) &
+ PANEL_PLANE_BR_BOTTOM_MASK;
+ reg |= ((var->xres - 1) & PANEL_PLANE_BR_RIGHT_MASK);
+ POKE32(PANEL_PLANE_BR, reg);
/* set pixel format */
reg = PEEK32(PANEL_DISPLAY_CTRL);
- POKE32(PANEL_DISPLAY_CTRL,
- FIELD_VALUE(reg,
- PANEL_DISPLAY_CTRL, FORMAT,
- (var->bits_per_pixel >> 4)
- ));
+ POKE32(PANEL_DISPLAY_CTRL, reg | (var->bits_per_pixel >> 4));
} else {
/* not implemented now */
POKE32(CRT_FB_ADDRESS, crtc->oScreen);
reg = var->xres * (var->bits_per_pixel >> 3);
/* crtc->channel is not equal to par->index on numeric,be aware of that */
- reg = ALIGN(reg, crtc->line_pad);
-
- POKE32(CRT_FB_WIDTH,
- FIELD_VALUE(0, CRT_FB_WIDTH, WIDTH, reg)|
- FIELD_VALUE(0, CRT_FB_WIDTH, OFFSET, fix->line_length));
+ reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
+ reg &= CRT_FB_WIDTH_WIDTH_MASK;
+ reg |= (fix->line_length & CRT_FB_WIDTH_OFFSET_MASK);
+ POKE32(CRT_FB_WIDTH, reg);
/* SET PIXEL FORMAT */
reg = PEEK32(CRT_DISPLAY_CTRL);
- reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, FORMAT, var->bits_per_pixel >> 4);
+ reg |= ((var->bits_per_pixel >> 4) &
+ CRT_DISPLAY_CTRL_FORMAT_MASK);
POKE32(CRT_DISPLAY_CTRL, reg);
}
@@ -382,31 +376,36 @@ int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank)
switch (blank) {
case FB_BLANK_UNBLANK:
dpms = CRT_DISPLAY_CTRL_DPMS_0;
- crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+ crtdb = 0;
break;
case FB_BLANK_NORMAL:
dpms = CRT_DISPLAY_CTRL_DPMS_0;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_VSYNC_SUSPEND:
dpms = CRT_DISPLAY_CTRL_DPMS_2;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_HSYNC_SUSPEND:
dpms = CRT_DISPLAY_CTRL_DPMS_1;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_POWERDOWN:
dpms = CRT_DISPLAY_CTRL_DPMS_3;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
default:
return -EINVAL;
}
if (output->paths & sm750_crt) {
- POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, DPMS, dpms));
- POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
+ unsigned int val;
+
+ val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_DPMS_MASK;
+ POKE32(CRT_DISPLAY_CTRL, val | dpms);
+
+ val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK;
+ POKE32(CRT_DISPLAY_CTRL, val | crtdb);
}
return 0;
}
@@ -419,42 +418,45 @@ int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
switch (blank) {
case FB_BLANK_UNBLANK:
- pr_info("flag = FB_BLANK_UNBLANK\n");
+ pr_debug("flag = FB_BLANK_UNBLANK\n");
dpms = SYSTEM_CTRL_DPMS_VPHP;
- pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+ pps = PANEL_DISPLAY_CTRL_DATA;
break;
case FB_BLANK_NORMAL:
- pr_info("flag = FB_BLANK_NORMAL\n");
+ pr_debug("flag = FB_BLANK_NORMAL\n");
dpms = SYSTEM_CTRL_DPMS_VPHP;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_VSYNC_SUSPEND:
dpms = SYSTEM_CTRL_DPMS_VNHP;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_HSYNC_SUSPEND:
dpms = SYSTEM_CTRL_DPMS_VPHN;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
case FB_BLANK_POWERDOWN:
dpms = SYSTEM_CTRL_DPMS_VNHN;
- pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
- crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+ crtdb = CRT_DISPLAY_CTRL_BLANK;
break;
}
if (output->paths & sm750_crt) {
+ unsigned int val = PEEK32(SYSTEM_CTRL) & ~SYSTEM_CTRL_DPMS_MASK;
- POKE32(SYSTEM_CTRL, FIELD_VALUE(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, DPMS, dpms));
- POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
+ POKE32(SYSTEM_CTRL, val | dpms);
+
+ val = PEEK32(CRT_DISPLAY_CTRL) & ~CRT_DISPLAY_CTRL_BLANK;
+ POKE32(CRT_DISPLAY_CTRL, val | crtdb);
}
- if (output->paths & sm750_panel)
- POKE32(PANEL_DISPLAY_CTRL, FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, DATA, pps));
+ if (output->paths & sm750_panel) {
+ unsigned int val = PEEK32(PANEL_DISPLAY_CTRL);
+
+ val &= ~PANEL_DISPLAY_CTRL_DATA;
+ val |= pps;
+ POKE32(PANEL_DISPLAY_CTRL, val);
+ }
return 0;
}
@@ -468,21 +470,21 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
if (getChipType() == SM750LE) {
reg = PEEK32(DE_STATE1);
- reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, ON);
+ reg |= DE_STATE1_DE_ABORT;
POKE32(DE_STATE1, reg);
reg = PEEK32(DE_STATE1);
- reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, OFF);
+ reg &= ~DE_STATE1_DE_ABORT;
POKE32(DE_STATE1, reg);
} else {
/* engine reset */
reg = PEEK32(SYSTEM_CTRL);
- reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, ON);
+ reg |= SYSTEM_CTRL_DE_ABORT;
POKE32(SYSTEM_CTRL, reg);
reg = PEEK32(SYSTEM_CTRL);
- reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, OFF);
+ reg &= ~SYSTEM_CTRL_DE_ABORT;
POKE32(SYSTEM_CTRL, reg);
}
@@ -493,15 +495,15 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
int hw_sm750le_deWait(void)
{
int i = 0x10000000;
+ unsigned int mask = DE_STATE2_DE_STATUS_BUSY | DE_STATE2_DE_FIFO_EMPTY |
+ DE_STATE2_DE_MEM_FIFO_EMPTY;
while (i--) {
- unsigned int dwVal = PEEK32(DE_STATE2);
+ unsigned int val = PEEK32(DE_STATE2);
- if ((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
- (FIELD_GET(dwVal, DE_STATE2, DE_FIFO) == DE_STATE2_DE_FIFO_EMPTY) &&
- (FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY)) {
+ if ((val & mask) ==
+ (DE_STATE2_DE_FIFO_EMPTY | DE_STATE2_DE_MEM_FIFO_EMPTY))
return 0;
- }
}
/* timeout error */
return -1;
@@ -511,15 +513,16 @@ int hw_sm750le_deWait(void)
int hw_sm750_deWait(void)
{
int i = 0x10000000;
+ unsigned int mask = SYSTEM_CTRL_DE_STATUS_BUSY |
+ SYSTEM_CTRL_DE_FIFO_EMPTY |
+ SYSTEM_CTRL_DE_MEM_FIFO_EMPTY;
while (i--) {
- unsigned int dwVal = PEEK32(SYSTEM_CTRL);
+ unsigned int val = PEEK32(SYSTEM_CTRL);
- if ((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
- (FIELD_GET(dwVal, SYSTEM_CTRL, DE_FIFO) == SYSTEM_CTRL_DE_FIFO_EMPTY) &&
- (FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) {
+ if ((val & mask) ==
+ (SYSTEM_CTRL_DE_FIFO_EMPTY | SYSTEM_CTRL_DE_MEM_FIFO_EMPTY))
return 0;
- }
}
/* timeout error */
return -1;
@@ -541,12 +544,12 @@ int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
total += crtc->oScreen;
if (crtc->channel == sm750_primary) {
POKE32(PANEL_FB_ADDRESS,
- FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
- PANEL_FB_ADDRESS, ADDRESS, total));
+ PEEK32(PANEL_FB_ADDRESS) |
+ (total & PANEL_FB_ADDRESS_ADDRESS_MASK));
} else {
POKE32(CRT_FB_ADDRESS,
- FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
- CRT_FB_ADDRESS, ADDRESS, total));
+ PEEK32(CRT_FB_ADDRESS) |
+ (total & CRT_FB_ADDRESS_ADDRESS_MASK));
}
return 0;
}
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index 8565c2343968..723d5df44221 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -27,7 +27,7 @@ void speakup_start_ttys(void)
for (i = 0; i < MAX_NR_CONSOLES; i++) {
if (speakup_console[i] && speakup_console[i]->tty_stopped)
continue;
- if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
+ if ((vc_cons[i].d) && (vc_cons[i].d->port.tty))
start_tty(vc_cons[i].d->port.tty);
}
}
@@ -38,7 +38,7 @@ static void speakup_stop_ttys(void)
int i;
for (i = 0; i < MAX_NR_CONSOLES; i++)
- if ((vc_cons[i].d != NULL) && (vc_cons[i].d->port.tty != NULL))
+ if ((vc_cons[i].d && (vc_cons[i].d->port.tty)))
stop_tty(vc_cons[i].d->port.tty);
}
diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c
index d1ffdf4c0c4b..84989711ae67 100644
--- a/drivers/staging/speakup/devsynth.c
+++ b/drivers/staging/speakup/devsynth.c
@@ -76,9 +76,9 @@ void speakup_register_devsynth(void)
if (misc_registered != 0)
return;
/* zero it so if register fails, deregister will not ref invalid ptrs */
- if (misc_register(&synth_device))
+ if (misc_register(&synth_device)) {
pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
- else {
+ } else {
pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
MISC_MAJOR, SYNTH_MINOR);
misc_registered = 1;
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 5e1f16c36b49..8f058b42f68d 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -12,10 +12,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/slab.h>
@@ -28,7 +24,7 @@
#define PRESSED 1
#define RELEASED 0
-static DEFINE_PER_CPU(bool, reporting_keystroke);
+static DEFINE_PER_CPU(int, reporting_keystroke);
static struct input_dev *virt_keyboard;
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 12f880ed4ddf..8960079e4d60 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -393,10 +393,7 @@ static const int num_groups = ARRAY_SIZE(all_groups);
char *spk_msg_get(enum msg_index_t index)
{
- char *ch;
-
- ch = speakup_msgs[index];
- return ch;
+ return speakup_msgs[index];
}
/*
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 02d5c706aee7..ce94cb13e256 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -14,10 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/keyboard.h>
@@ -74,7 +70,7 @@ static void build_key_data(void)
for (i = 0; i < nstates; i++, kp++) {
if (!*kp)
continue;
- if ((state_tbl[i]&16) != 0 && *kp == SPK_KEY)
+ if ((state_tbl[i] & 16) != 0 && *kp == SPK_KEY)
continue;
counters[*kp]++;
}
@@ -83,7 +79,7 @@ static void build_key_data(void)
if (counters[i] == 0)
continue;
key_offsets[i] = offset;
- offset += (counters[i]+1);
+ offset += (counters[i] + 1);
if (offset >= MAXKEYS)
break;
}
@@ -97,7 +93,7 @@ static void build_key_data(void)
ch1 = *kp++;
if (!ch1)
continue;
- if ((state_tbl[i]&16) != 0 && ch1 == SPK_KEY)
+ if ((state_tbl[i] & 16) != 0 && ch1 == SPK_KEY)
continue;
key = (state_tbl[i] << 8) + ch;
counters[ch1]--;
@@ -130,14 +126,14 @@ static int help_init(void)
int i;
int num_funcs = MSG_FUNCNAMES_END - MSG_FUNCNAMES_START + 1;
- state_tbl = spk_our_keys[0]+SHIFT_TBL_SIZE+2;
+ state_tbl = spk_our_keys[0] + SHIFT_TBL_SIZE + 2;
for (i = 0; i < num_funcs; i++) {
char *cur_funcname = spk_msg_get(MSG_FUNCNAMES_START + i);
if (start == *cur_funcname)
continue;
start = *cur_funcname;
- letter_offsets[(start&31)-1] = i;
+ letter_offsets[(start & 31) - 1] = i;
}
return 0;
}
@@ -160,12 +156,12 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
ch |= 32; /* lower case */
if (ch < 'a' || ch > 'z')
return -1;
- if (letter_offsets[ch-'a'] == -1) {
+ if (letter_offsets[ch - 'a'] == -1) {
synth_printf(spk_msg_get(MSG_NO_COMMAND), ch);
synth_printf("\n");
return 1;
}
- cur_item = letter_offsets[ch-'a'];
+ cur_item = letter_offsets[ch - 'a'];
} else if (type == KT_CUR) {
if (ch == 0
&& (MSG_FUNCNAMES_START + cur_item + 1) <=
@@ -186,7 +182,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
name = NULL;
if ((type != KT_SPKUP) && (key > 0) && (key <= num_key_names)) {
synth_printf("%s\n",
- spk_msg_get(MSG_KEYNAMES_START + key-1));
+ spk_msg_get(MSG_KEYNAMES_START + key - 1));
return 1;
}
for (i = 0; funcvals[i] != 0 && !name; i++) {
@@ -195,7 +191,7 @@ int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key)
}
if (!name)
return -1;
- kp = spk_our_keys[key]+1;
+ kp = spk_our_keys[key] + 1;
for (i = 0; i < nstates; i++) {
if (ch == kp[i])
break;
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index fdfeb42b2b8f..528cbdce4227 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -862,66 +862,66 @@ static struct kobj_attribute version_attribute =
__ATTR_RO(version);
static struct kobj_attribute delimiters_attribute =
- __ATTR(delimiters, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(delimiters, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute ex_num_attribute =
- __ATTR(ex_num, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(ex_num, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_all_attribute =
- __ATTR(punc_all, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(punc_all, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_most_attribute =
- __ATTR(punc_most, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(punc_most, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute punc_some_attribute =
- __ATTR(punc_some, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(punc_some, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute repeats_attribute =
- __ATTR(repeats, S_IWUSR|S_IRUGO, punc_show, punc_store);
+ __ATTR(repeats, S_IWUSR | S_IRUGO, punc_show, punc_store);
static struct kobj_attribute attrib_bleep_attribute =
- __ATTR(attrib_bleep, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(attrib_bleep, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bell_pos_attribute =
- __ATTR(bell_pos, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(bell_pos, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bleep_time_attribute =
- __ATTR(bleep_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(bleep_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute bleeps_attribute =
- __ATTR(bleeps, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(bleeps, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute cursor_time_attribute =
- __ATTR(cursor_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(cursor_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute key_echo_attribute =
- __ATTR(key_echo, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(key_echo, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute no_interrupt_attribute =
- __ATTR(no_interrupt, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(no_interrupt, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punc_level_attribute =
- __ATTR(punc_level, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punc_level, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute reading_punc_attribute =
- __ATTR(reading_punc, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(reading_punc, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute say_control_attribute =
- __ATTR(say_control, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(say_control, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute say_word_ctl_attribute =
- __ATTR(say_word_ctl, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(say_word_ctl, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute spell_delay_attribute =
- __ATTR(spell_delay, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(spell_delay, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* These attributes are i18n related.
*/
static struct kobj_attribute announcements_attribute =
- __ATTR(announcements, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(announcements, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute characters_attribute =
- __ATTR(characters, S_IWUSR|S_IRUGO, chars_chartab_show,
+ __ATTR(characters, S_IWUSR | S_IRUGO, chars_chartab_show,
chars_chartab_store);
static struct kobj_attribute chartab_attribute =
- __ATTR(chartab, S_IWUSR|S_IRUGO, chars_chartab_show,
+ __ATTR(chartab, S_IWUSR | S_IRUGO, chars_chartab_show,
chars_chartab_store);
static struct kobj_attribute ctl_keys_attribute =
- __ATTR(ctl_keys, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(ctl_keys, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute colors_attribute =
- __ATTR(colors, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(colors, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute formatted_attribute =
- __ATTR(formatted, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(formatted, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute function_names_attribute =
- __ATTR(function_names, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(function_names, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute key_names_attribute =
- __ATTR(key_names, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(key_names, S_IWUSR | S_IRUGO, message_show, message_store);
static struct kobj_attribute states_attribute =
- __ATTR(states, S_IWUSR|S_IRUGO, message_show, message_store);
+ __ATTR(states, S_IWUSR | S_IRUGO, message_show, message_store);
/*
* Create groups of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 30cf973f326d..a22fb07512a1 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
@@ -1192,7 +1188,8 @@ static void do_handle_latin(struct vc_data *vc, u_char value, char up_flag)
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (up_flag) {
- spk_lastkey = spk_keydown = 0;
+ spk_lastkey = 0;
+ spk_keydown = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return;
}
@@ -1666,7 +1663,8 @@ static void cursor_done(u_long data)
if (win_enabled) {
if (vc->vc_x >= win_left && vc->vc_x <= win_right &&
vc->vc_y >= win_top && vc->vc_y <= win_bottom) {
- spk_keydown = is_cursor = 0;
+ spk_keydown = 0;
+ is_cursor = 0;
goto out;
}
}
@@ -1676,7 +1674,8 @@ static void cursor_done(u_long data)
}
if (cursor_track == CT_Highlight) {
if (speak_highlight(vc)) {
- spk_keydown = is_cursor = 0;
+ spk_keydown = 0;
+ is_cursor = 0;
goto out;
}
}
@@ -1686,7 +1685,8 @@ static void cursor_done(u_long data)
say_line_from_to(vc, 0, vc->vc_cols, 0);
else
say_char(vc);
- spk_keydown = is_cursor = 0;
+ spk_keydown = 0;
+ is_cursor = 0;
out:
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
}
@@ -1866,8 +1866,10 @@ static void speakup_win_set(struct vc_data *vc)
static void speakup_win_clear(struct vc_data *vc)
{
- win_top = win_bottom = 0;
- win_left = win_right = 0;
+ win_top = 0;
+ win_bottom = 0;
+ win_left = 0;
+ win_right = 0;
win_start = 0;
synth_printf("%s\n", spk_msg_get(MSG_WINDOW_CLEARED));
}
@@ -2002,10 +2004,13 @@ static u_char key_speakup, spk_key_locked;
static void speakup_lock(struct vc_data *vc)
{
- if (!spk_key_locked)
- spk_key_locked = key_speakup = 16;
- else
- spk_key_locked = key_speakup = 0;
+ if (!spk_key_locked) {
+ spk_key_locked = 16;
+ key_speakup = 16;
+ } else {
+ spk_key_locked = 0;
+ key_speakup = 0;
+ }
}
typedef void (*spkup_hand) (struct vc_data *);
@@ -2269,7 +2274,7 @@ static void __exit speakup_exit(void)
unregister_vt_notifier(&vt_notifier_block);
speakup_unregister_devsynth();
speakup_cancel_paste();
- del_timer(&cursor_timer);
+ del_timer_sync(&cursor_timer);
kthread_stop(speakup_task);
speakup_task = NULL;
mutex_lock(&spk_mutex);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index a5bbb338f275..c2c435cc3d63 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -8,7 +8,8 @@
#include <linux/serial_core.h>
/* WARNING: Do not change this to <linux/serial.h> without testing that
- * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+ * SERIAL_PORT_DFNS does get defined to the appropriate value.
+ */
#include <asm/serial.h>
#ifndef SERIAL_PORT_DFNS
@@ -92,8 +93,6 @@ const struct old_serial_port *spk_serial_init(int index)
static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
{
unsigned long flags;
-/*printk(KERN_ERR "in irq\n"); */
-/*pr_warn("in IRQ\n"); */
int c;
spin_lock_irqsave(&speakup_info.spinlock, flags);
@@ -101,8 +100,6 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
c = inb_p(speakup_info.port_tts+UART_RX);
synth->read_buff_add((u_char) c);
-/*printk(KERN_ERR "c = %d\n", c); */
-/*pr_warn("C = %d\n", c); */
}
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
return IRQ_HANDLED;
@@ -175,9 +172,6 @@ int spk_wait_for_xmitr(void)
while (!((inb_p(speakup_info.port_tts + UART_MSR)) & UART_MSR_CTS)) {
/* CTS */
if (--tmout == 0) {
- /* pr_warn("%s: timed out (cts)\n",
- * synth->long_name);
- */
timeouts++;
return 0;
}
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index f418893928ec..efb791bb642b 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -14,11 +14,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
* This driver is for the Aicom Acent PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_acntsa.c b/drivers/staging/speakup/speakup_acntsa.c
index af2690f38950..34f45d3549b2 100644
--- a/drivers/staging/speakup/speakup_acntsa.c
+++ b/drivers/staging/speakup/speakup_acntsa.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 51788f7d4480..3cbc8a7ad1ef 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_audptr.c b/drivers/staging/speakup/speakup_audptr.c
index a9a687232955..7a12b8408b67 100644
--- a/drivers/staging/speakup/speakup_audptr.c
+++ b/drivers/staging/speakup/speakup_audptr.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_bns.c b/drivers/staging/speakup/speakup_bns.c
index 80f8358d4199..570f0c21745e 100644
--- a/drivers/staging/speakup/speakup_bns.c
+++ b/drivers/staging/speakup/speakup_bns.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index e0b5db9bb46e..1a5cf3d0a559 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
@@ -71,30 +67,30 @@ static struct var_t vars[] = {
* These attributes will appear in /sys/accessibility/speakup/decext.
*/
static struct kobj_attribute caps_start_attribute =
- __ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
- __ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
- __ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
- __ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
- __ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
- __ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
- __ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
- __ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
- __ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
- __ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
- __ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
- __ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+ __ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 4893fef3f894..d6479bd2163b 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -24,10 +24,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 09063b82326f..764656759fbf 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 345efd3344b0..38aa4013bf62 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the RC Systems DoubleTalk PC internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_dummy.c b/drivers/staging/speakup/speakup_dummy.c
index f66811269475..87d2a8002b47 100644
--- a/drivers/staging/speakup/speakup_dummy.c
+++ b/drivers/staging/speakup/speakup_dummy.c
@@ -17,10 +17,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 6ea027365664..5e2170bf4a8b 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -13,10 +13,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* package it's not a general device driver.
* This driver is for the Keynote Gold internal synthesizer.
diff --git a/drivers/staging/speakup/speakup_ltlk.c b/drivers/staging/speakup/speakup_ltlk.c
index cc4806be806b..b474e8b65f9a 100644
--- a/drivers/staging/speakup/speakup_ltlk.c
+++ b/drivers/staging/speakup/speakup_ltlk.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index b2eb5b133a5d..6b1d0f538bbd 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -14,9 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* this code is specificly written as a driver for the speakup screenreview
* package and is not a general device driver.
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index 1007a6168c3c..e449f2770c1f 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index 6c21e7112210..fd98d4ffcb3e 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -15,10 +15,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* specificly written as a driver for the speakup screenreview
* s not a general device driver.
*/
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 9bb281d36556..98c4b6f0344a 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _SPEAKUP_PRIVATE_H
#define _SPEAKUP_PRIVATE_H
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 3116ef78c196..130e9cb0118b 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -16,10 +16,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _SPEAKUP_KEYINFO_H
diff --git a/drivers/staging/speakup/spkguide.txt b/drivers/staging/speakup/spkguide.txt
index b699de3c649f..c23549c54c3c 100644
--- a/drivers/staging/speakup/spkguide.txt
+++ b/drivers/staging/speakup/spkguide.txt
@@ -1179,7 +1179,6 @@ if desired.
Copyright (C) 2000,2001,2002 Free Software Foundation, Inc.
- 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 01eddab93c66..4f462c35fdd9 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -179,7 +179,7 @@ int spk_synth_is_alive_restart(struct spk_synth *synth)
{
if (synth->alive)
return 1;
- if (!synth->alive && spk_wait_for_xmitr() > 0) {
+ if (spk_wait_for_xmitr() > 0) {
/* restart */
synth->alive = 1;
synth_printf("%s", synth->init);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index ab4fe8de415f..e1393d2a2b0f 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -176,7 +176,6 @@ struct punc_var_t *spk_get_punc_var(enum var_id_t var_id)
int spk_set_num_var(int input, struct st_var_header *var, int how)
{
int val;
- short ret = 0;
int *p_val = var->p_val;
int l;
char buf[32];
@@ -186,50 +185,51 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
if (!var_data)
return -ENODATA;
- if (how == E_NEW_DEFAULT) {
+ val = var_data->u.n.value;
+ switch (how) {
+ case E_NEW_DEFAULT:
if (input < var_data->u.n.low || input > var_data->u.n.high)
return -ERANGE;
var_data->u.n.default_val = input;
return 0;
- }
- if (how == E_DEFAULT) {
+ case E_DEFAULT:
val = var_data->u.n.default_val;
- ret = -ERESTART;
- } else {
- if (how == E_SET)
- val = input;
- else
- val = var_data->u.n.value;
- if (how == E_INC)
- val += input;
- else if (how == E_DEC)
- val -= input;
- if (val < var_data->u.n.low || val > var_data->u.n.high)
- return -ERANGE;
+ break;
+ case E_SET:
+ val = input;
+ break;
+ case E_INC:
+ val += input;
+ break;
+ case E_DEC:
+ val -= input;
+ break;
}
+
+ if (val < var_data->u.n.low || val > var_data->u.n.high)
+ return -ERANGE;
+
var_data->u.n.value = val;
if (var->var_type == VAR_TIME && p_val != NULL) {
*p_val = msecs_to_jiffies(val);
- return ret;
+ return 0;
}
if (p_val != NULL)
*p_val = val;
if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val];
- return ret;
+ return 0;
}
if (var_data->u.n.multiplier != 0)
val *= var_data->u.n.multiplier;
val += var_data->u.n.offset;
if (var->var_id < FIRST_SYNTH_VAR || !synth)
- return ret;
- if (synth->synth_adjust) {
- int status = synth->synth_adjust(var);
+ return 0;
+ if (synth->synth_adjust)
+ return synth->synth_adjust(var);
- return (status != 0) ? status : ret;
- }
if (!var_data->u.n.synth_fmt)
- return ret;
+ return 0;
if (var->var_id == PITCH)
cp = spk_pitch_buff;
else
@@ -240,7 +240,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
l = sprintf(cp,
var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
synth_printf("%s", cp);
- return ret;
+ return 0;
}
int spk_set_string_var(const char *page, struct st_var_header *var, int len)
diff --git a/drivers/staging/staging.c b/drivers/staging/staging.c
deleted file mode 100644
index 233e589c0932..000000000000
--- a/drivers/staging/staging.c
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-static int __init staging_init(void)
-{
- return 0;
-}
-
-static void __exit staging_exit(void)
-{
-}
-
-module_init(staging_init);
-module_exit(staging_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman");
-MODULE_DESCRIPTION("Staging Core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/ste_rmi4/Kconfig b/drivers/staging/ste_rmi4/Kconfig
deleted file mode 100644
index e8679509e525..000000000000
--- a/drivers/staging/ste_rmi4/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config TOUCHSCREEN_SYNAPTICS_I2C_RMI4
- tristate "Synaptics i2c rmi4 touchscreen"
- depends on I2C && INPUT
- help
- Say Y here if you have a Synaptics RMI4 and
- want to enable support for the built-in touchscreen.
-
- To compile this driver as a module, choose M here: the
- module will be called synaptics_rmi4_ts.
diff --git a/drivers/staging/ste_rmi4/Makefile b/drivers/staging/ste_rmi4/Makefile
deleted file mode 100644
index 6cce2ed187ef..000000000000
--- a/drivers/staging/ste_rmi4/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-#
-# Makefile for the RMI4 touchscreen driver.
-#
-obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
diff --git a/drivers/staging/ste_rmi4/TODO b/drivers/staging/ste_rmi4/TODO
deleted file mode 100644
index 9be2437da85f..000000000000
--- a/drivers/staging/ste_rmi4/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO
-----
-
-Wait for the official upstream synaptics rmi4 clearpad drivers as promised over the past few months
-Merge any device support needed from this driver into it
-Delete this driver
-
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
deleted file mode 100644
index 824d460911ec..000000000000
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ /dev/null
@@ -1,1140 +0,0 @@
-/**
- *
- * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
- * Copyright (c) 2007-2010, Synaptics Incorporated
- *
- * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Copyright 2010 (c) ST-Ericsson AB
- */
-/*
- * This file is licensed under the GPL2 license.
- *
- *#############################################################################
- * GPL
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- *#############################################################################
- */
-
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-#include <linux/input/mt.h>
-#include "synaptics_i2c_rmi4.h"
-
-/* TODO: for multiple device support will need a per-device mutex */
-#define DRIVER_NAME "synaptics_rmi4_i2c"
-
-#define MAX_ERROR_REPORT 6
-#define MAX_TOUCH_MAJOR 15
-#define MAX_RETRY_COUNT 5
-#define STD_QUERY_LEN 21
-#define PAGE_LEN 2
-#define DATA_BUF_LEN 32
-#define BUF_LEN 37
-#define QUERY_LEN 9
-#define DATA_LEN 12
-#define HAS_TAP 0x01
-#define HAS_PALMDETECT 0x01
-#define HAS_ROTATE 0x02
-#define HAS_TAPANDHOLD 0x02
-#define HAS_DOUBLETAP 0x04
-#define HAS_EARLYTAP 0x08
-#define HAS_RELEASE 0x08
-#define HAS_FLICK 0x10
-#define HAS_PRESS 0x20
-#define HAS_PINCH 0x40
-
-#define MASK_16BIT 0xFFFF
-#define MASK_8BIT 0xFF
-#define MASK_7BIT 0x7F
-#define MASK_5BIT 0x1F
-#define MASK_4BIT 0x0F
-#define MASK_3BIT 0x07
-#define MASK_2BIT 0x03
-#define TOUCHPAD_CTRL_INTR 0x8
-#define PDT_START_SCAN_LOCATION (0x00E9)
-#define PDT_END_SCAN_LOCATION (0x000A)
-#define PDT_ENTRY_SIZE (0x0006)
-#define SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM (0x11)
-#define SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM (0x01)
-
-/**
- * struct synaptics_rmi4_fn_desc - contains the function descriptor information
- * @query_base_addr: base address for query
- * @cmd_base_addr: base address for command
- * @ctrl_base_addr: base address for control
- * @data_base_addr: base address for data
- * @intr_src_count: count for the interrupt source
- * @fn_number: function number
- *
- * This structure is used to gives the function descriptor information
- * of the particular functionality.
- */
-struct synaptics_rmi4_fn_desc {
- unsigned char query_base_addr;
- unsigned char cmd_base_addr;
- unsigned char ctrl_base_addr;
- unsigned char data_base_addr;
- unsigned char intr_src_count;
- unsigned char fn_number;
-};
-
-/**
- * struct synaptics_rmi4_fn - contains the function information
- * @fn_number: function number
- * @num_of_data_sources: number of data sources
- * @num_of_data_points: number of fingers touched
- * @size_of_data_register_block: data register block size
- * @index_to_intr_reg: index for interrupt register
- * @intr_mask: interrupt mask value
- * @fn_desc: variable for function descriptor structure
- * @link: linked list for function descriptors
- *
- * This structure gives information about the number of data sources and
- * the number of data registers associated with the function.
- */
-struct synaptics_rmi4_fn {
- unsigned char fn_number;
- unsigned char num_of_data_sources;
- unsigned char num_of_data_points;
- unsigned char size_of_data_register_block;
- unsigned char index_to_intr_reg;
- unsigned char intr_mask;
- struct synaptics_rmi4_fn_desc fn_desc;
- struct list_head link;
-};
-
-/**
- * struct synaptics_rmi4_device_info - contains the rmi4 device information
- * @version_major: protocol major version number
- * @version_minor: protocol minor version number
- * @manufacturer_id: manufacturer identification byte
- * @product_props: product properties information
- * @product_info: product info array
- * @date_code: device manufacture date
- * @tester_id: tester id array
- * @serial_number: serial number for that device
- * @product_id_string: product id for the device
- * @support_fn_list: linked list for device information
- *
- * This structure gives information about the number of data sources and
- * the number of data registers associated with the function.
- */
-struct synaptics_rmi4_device_info {
- unsigned int version_major;
- unsigned int version_minor;
- unsigned char manufacturer_id;
- unsigned char product_props;
- unsigned char product_info[2];
- unsigned char date_code[3];
- unsigned short tester_id;
- unsigned short serial_number;
- unsigned char product_id_string[11];
- struct list_head support_fn_list;
-};
-
-/**
- * struct synaptics_rmi4_data - contains the rmi4 device data
- * @rmi4_mod_info: structure variable for rmi4 device info
- * @input_dev: pointer for input device
- * @i2c_client: pointer for i2c client
- * @board: constant pointer for touch platform data
- * @fn_list_mutex: mutex for function list
- * @rmi4_page_mutex: mutex for rmi4 page
- * @current_page: variable for integer
- * @number_of_interrupt_register: interrupt registers count
- * @fn01_ctrl_base_addr: control base address for fn01
- * @fn01_query_base_addr: query base address for fn01
- * @fn01_data_base_addr: data base address for fn01
- * @sensor_max_x: sensor maximum x value
- * @sensor_max_y: sensor maximum y value
- * @regulator: pointer to the regulator structure
- * @wait: wait queue structure variable
- * @touch_stopped: flag to stop the thread function
- * @fingers_supported: maximum supported fingers
- *
- * This structure gives the device data information.
- */
-struct synaptics_rmi4_data {
- struct synaptics_rmi4_device_info rmi4_mod_info;
- struct input_dev *input_dev;
- struct i2c_client *i2c_client;
- const struct synaptics_rmi4_platform_data *board;
- struct mutex fn_list_mutex;
- struct mutex rmi4_page_mutex;
- int current_page;
- unsigned int number_of_interrupt_register;
- unsigned short fn01_ctrl_base_addr;
- unsigned short fn01_query_base_addr;
- unsigned short fn01_data_base_addr;
- int sensor_max_x;
- int sensor_max_y;
- struct regulator *regulator;
- wait_queue_head_t wait;
- bool touch_stopped;
- unsigned char fingers_supported;
-};
-
-/**
- * synaptics_rmi4_set_page() - sets the page
- * @pdata: pointer to synaptics_rmi4_data structure
- * @address: set the address of the page
- *
- * This function is used to set the page and returns integer.
- */
-static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *pdata,
- unsigned int address)
-{
- unsigned char txbuf[PAGE_LEN];
- int retval;
- unsigned int page;
- struct i2c_client *i2c = pdata->i2c_client;
-
- page = ((address >> 8) & MASK_8BIT);
- if (page != pdata->current_page) {
- txbuf[0] = MASK_8BIT;
- txbuf[1] = page;
- retval = i2c_master_send(i2c, txbuf, PAGE_LEN);
- if (retval != PAGE_LEN)
- dev_err(&i2c->dev, "failed:%d\n", retval);
- else
- pdata->current_page = page;
- } else
- retval = PAGE_LEN;
- return retval;
-}
-/**
- * synaptics_rmi4_i2c_block_read() - read the block of data
- * @pdata: pointer to synaptics_rmi4_data structure
- * @address: read the block of data from this offset
- * @valp: pointer to a buffer containing the data to be read
- * @size: number of bytes to read
- *
- * This function is to read the block of data and returns integer.
- */
-static int synaptics_rmi4_i2c_block_read(struct synaptics_rmi4_data *pdata,
- unsigned short address,
- unsigned char *valp, int size)
-{
- int retval = 0;
- int retry_count = 0;
- int index;
- struct i2c_client *i2c = pdata->i2c_client;
-
- mutex_lock(&(pdata->rmi4_page_mutex));
- retval = synaptics_rmi4_set_page(pdata, address);
- if (retval != PAGE_LEN)
- goto exit;
- index = address & MASK_8BIT;
-retry:
- retval = i2c_smbus_read_i2c_block_data(i2c, index, size, valp);
- if (retval != size) {
- if (++retry_count == MAX_RETRY_COUNT)
- dev_err(&i2c->dev,
- "%s:address 0x%04x size %d failed:%d\n",
- __func__, address, size, retval);
- else {
- synaptics_rmi4_set_page(pdata, address);
- goto retry;
- }
- }
-exit:
- mutex_unlock(&(pdata->rmi4_page_mutex));
- return retval;
-}
-
-/**
- * synaptics_rmi4_i2c_byte_write() - write the single byte data
- * @pdata: pointer to synaptics_rmi4_data structure
- * @address: write the block of data from this offset
- * @data: data to be write
- *
- * This function is to write the single byte data and returns integer.
- */
-static int synaptics_rmi4_i2c_byte_write(struct synaptics_rmi4_data *pdata,
- unsigned short address,
- unsigned char data)
-{
- unsigned char txbuf[2];
- int retval = 0;
- struct i2c_client *i2c = pdata->i2c_client;
-
- /* Can't have anyone else changing the page behind our backs */
- mutex_lock(&(pdata->rmi4_page_mutex));
-
- retval = synaptics_rmi4_set_page(pdata, address);
- if (retval != PAGE_LEN)
- goto exit;
- txbuf[0] = address & MASK_8BIT;
- txbuf[1] = data;
- retval = i2c_master_send(pdata->i2c_client, txbuf, 2);
- /* Add in retry on writes only in certain error return values */
- if (retval != 2) {
- dev_err(&i2c->dev, "failed:%d\n", retval);
- retval = -EIO;
- } else
- retval = 1;
-exit:
- mutex_unlock(&(pdata->rmi4_page_mutex));
- return retval;
-}
-
-/**
- * synpatics_rmi4_touchpad_report() - reports for the rmi4 touchpad device
- * @pdata: pointer to synaptics_rmi4_data structure
- * @rfi: pointer to synaptics_rmi4_fn structure
- *
- * This function calls to reports for the rmi4 touchpad device
- */
-static int synpatics_rmi4_touchpad_report(struct synaptics_rmi4_data *pdata,
- struct synaptics_rmi4_fn *rfi)
-{
- /* number of touch points - fingers down in this case */
- int touch_count = 0;
- int finger;
- int finger_registers;
- int reg;
- int finger_shift;
- int finger_status;
- int retval;
- int x, y;
- int wx, wy;
- unsigned short data_base_addr;
- unsigned short data_offset;
- unsigned char data_reg_blk_size;
- unsigned char values[2];
- unsigned char data[DATA_LEN];
- unsigned char fingers_supported = pdata->fingers_supported;
- struct i2c_client *client = pdata->i2c_client;
- struct input_dev *input_dev = pdata->input_dev;
-
- /* get 2D sensor finger data */
- /*
- * First get the finger status field - the size of the finger status
- * field is determined by the number of finger supporte - 2 bits per
- * finger, so the number of registers to read is:
- * registerCount = ceil(numberOfFingers/4).
- * Read the required number of registers and check each 2 bit field to
- * determine if a finger is down:
- * 00 = finger not present,
- * 01 = finger present and data accurate,
- * 10 = finger present but data may not be accurate,
- * 11 = reserved for product use.
- */
- finger_registers = (fingers_supported + 3)/4;
- data_base_addr = rfi->fn_desc.data_base_addr;
- retval = synaptics_rmi4_i2c_block_read(pdata, data_base_addr, values,
- finger_registers);
- if (retval != finger_registers) {
- dev_err(&client->dev, "%s:read status registers failed\n",
- __func__);
- return 0;
- }
- /*
- * For each finger present, read the proper number of registers
- * to get absolute data.
- */
- data_reg_blk_size = rfi->size_of_data_register_block;
- for (finger = 0; finger < fingers_supported; finger++) {
- /* determine which data byte the finger status is in */
- reg = finger/4;
- /* bit shift to get finger's status */
- finger_shift = (finger % 4) * 2;
- finger_status = (values[reg] >> finger_shift) & 3;
- /*
- * if finger status indicates a finger is present then
- * read the finger data and report it
- */
- input_mt_slot(input_dev, finger);
- input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
- finger_status != 0);
-
- if (finger_status) {
- /* Read the finger data */
- data_offset = data_base_addr +
- ((finger * data_reg_blk_size) +
- finger_registers);
- retval = synaptics_rmi4_i2c_block_read(pdata,
- data_offset, data,
- data_reg_blk_size);
- if (retval != data_reg_blk_size) {
- dev_err(&client->dev, "%s:read data failed\n",
- __func__);
- return 0;
- }
- x = (data[0] << 4) | (data[2] & MASK_4BIT);
- y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT);
- wy = (data[3] >> 4) & MASK_4BIT;
- wx = (data[3] & MASK_4BIT);
-
- if (pdata->board->x_flip)
- x = pdata->sensor_max_x - x;
- if (pdata->board->y_flip)
- y = pdata->sensor_max_y - y;
-
- input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR,
- max(wx, wy));
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
-
- /* number of active touch points */
- touch_count++;
- }
- }
-
- /* sync after groups of events */
- input_mt_sync_frame(input_dev);
- input_sync(input_dev);
- /* return the number of touch points */
- return touch_count;
-}
-
-/**
- * synaptics_rmi4_report_device() - reports the rmi4 device
- * @pdata: pointer to synaptics_rmi4_data structure
- * @rfi: pointer to synaptics_rmi4_fn
- *
- * This function is used to call the report function of the rmi4 device.
- */
-static int synaptics_rmi4_report_device(struct synaptics_rmi4_data *pdata,
- struct synaptics_rmi4_fn *rfi)
-{
- int touch = 0;
- struct i2c_client *client = pdata->i2c_client;
- static int num_error_reports;
-
- if (rfi->fn_number != SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
- num_error_reports++;
- if (num_error_reports < MAX_ERROR_REPORT)
- dev_err(&client->dev, "%s:report not supported\n",
- __func__);
- } else
- touch = synpatics_rmi4_touchpad_report(pdata, rfi);
- return touch;
-}
-/**
- * synaptics_rmi4_sensor_report() - reports to input subsystem
- * @pdata: pointer to synaptics_rmi4_data structure
- *
- * This function is used to reads in all data sources and reports
- * them to the input subsystem.
- */
-static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *pdata)
-{
- unsigned char intr_status[4];
- /* number of touch points - fingers or buttons */
- int touch = 0;
- unsigned int retval;
- struct synaptics_rmi4_fn *rfi;
- struct synaptics_rmi4_device_info *rmi;
- struct i2c_client *client = pdata->i2c_client;
-
- /*
- * Get the interrupt status from the function $01
- * control register+1 to find which source(s) were interrupting
- * so we can read the data from the source(s) (2D sensor, buttons..)
- */
- retval = synaptics_rmi4_i2c_block_read(pdata,
- pdata->fn01_data_base_addr + 1,
- intr_status,
- pdata->number_of_interrupt_register);
- if (retval != pdata->number_of_interrupt_register) {
- dev_err(&client->dev,
- "could not read interrupt status registers\n");
- return 0;
- }
- /*
- * check each function that has data sources and if the interrupt for
- * that triggered then call that RMI4 functions report() function to
- * gather data and report it to the input subsystem
- */
- rmi = &(pdata->rmi4_mod_info);
- list_for_each_entry(rfi, &rmi->support_fn_list, link) {
- if (rfi->num_of_data_sources) {
- if (intr_status[rfi->index_to_intr_reg] &
- rfi->intr_mask)
- touch = synaptics_rmi4_report_device(pdata,
- rfi);
- }
- }
- /* return the number of touch points */
- return touch;
-}
-
-/**
- * synaptics_rmi4_irq() - thread function for rmi4 attention line
- * @irq: irq value
- * @data: void pointer
- *
- * This function is interrupt thread function. It just notifies the
- * application layer that attention is required.
- */
-static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
-{
- struct synaptics_rmi4_data *pdata = data;
- int touch_count;
-
- do {
- touch_count = synaptics_rmi4_sensor_report(pdata);
- if (touch_count)
- wait_event_timeout(pdata->wait, pdata->touch_stopped,
- msecs_to_jiffies(1));
- else
- break;
- } while (!pdata->touch_stopped);
- return IRQ_HANDLED;
-}
-
-/**
- * synpatics_rmi4_touchpad_detect() - detects the rmi4 touchpad device
- * @pdata: pointer to synaptics_rmi4_data structure
- * @rfi: pointer to synaptics_rmi4_fn structure
- * @fd: pointer to synaptics_rmi4_fn_desc structure
- * @interruptcount: count the number of interrupts
- *
- * This function calls to detects the rmi4 touchpad device
- */
-static int synpatics_rmi4_touchpad_detect(struct synaptics_rmi4_data *pdata,
- struct synaptics_rmi4_fn *rfi,
- struct synaptics_rmi4_fn_desc *fd,
- unsigned int interruptcount)
-{
- unsigned char queries[QUERY_LEN];
- unsigned short intr_offset;
- unsigned char abs_data_size;
- unsigned char abs_data_blk_size;
- unsigned char egr_0, egr_1;
- unsigned int all_data_blk_size;
- int has_pinch, has_flick, has_tap;
- int has_tapandhold, has_doubletap;
- int has_earlytap, has_press;
- int has_palmdetect, has_rotate;
- int has_rel;
- int i;
- int retval;
- struct i2c_client *client = pdata->i2c_client;
-
- rfi->fn_desc.query_base_addr = fd->query_base_addr;
- rfi->fn_desc.data_base_addr = fd->data_base_addr;
- rfi->fn_desc.intr_src_count = fd->intr_src_count;
- rfi->fn_desc.fn_number = fd->fn_number;
- rfi->fn_number = fd->fn_number;
- rfi->num_of_data_sources = fd->intr_src_count;
- rfi->fn_desc.ctrl_base_addr = fd->ctrl_base_addr;
- rfi->fn_desc.cmd_base_addr = fd->cmd_base_addr;
-
- /*
- * need to get number of fingers supported, data size, etc.
- * to be used when getting data since the number of registers to
- * read depends on the number of fingers supported and data size.
- */
- retval = synaptics_rmi4_i2c_block_read(pdata, fd->query_base_addr,
- queries,
- sizeof(queries));
- if (retval != sizeof(queries)) {
- dev_err(&client->dev, "%s:read function query registers\n",
- __func__);
- return retval;
- }
- /*
- * 2D data sources have only 3 bits for the number of fingers
- * supported - so the encoding is a bit weird.
- */
- if ((queries[1] & MASK_3BIT) <= 4)
- /* add 1 since zero based */
- rfi->num_of_data_points = (queries[1] & MASK_3BIT) + 1;
- else {
- /*
- * a value of 5 is up to 10 fingers - 6 and 7 are reserved
- * (shouldn't get these i int retval;n a normal 2D source).
- */
- if ((queries[1] & MASK_3BIT) == 5)
- rfi->num_of_data_points = 10;
- }
- pdata->fingers_supported = rfi->num_of_data_points;
- /* Need to get interrupt info for handling interrupts */
- rfi->index_to_intr_reg = (interruptcount + 7)/8;
- if (rfi->index_to_intr_reg != 0)
- rfi->index_to_intr_reg -= 1;
- /*
- * loop through interrupts for each source in fn $11
- * and or in a bit to the interrupt mask for each.
- */
- intr_offset = interruptcount % 8;
- rfi->intr_mask = 0;
- for (i = intr_offset;
- i < ((fd->intr_src_count & MASK_3BIT) + intr_offset); i++)
- rfi->intr_mask |= 1 << i;
-
- /* Size of just the absolute data for one finger */
- abs_data_size = queries[5] & MASK_2BIT;
- /* One each for X and Y, one for LSB for X & Y, one for W, one for Z */
- abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
- rfi->size_of_data_register_block = abs_data_blk_size;
-
- /*
- * need to determine the size of data to read - this depends on
- * conditions such as whether Relative data is reported and if Gesture
- * data is reported.
- */
- egr_0 = queries[7];
- egr_1 = queries[8];
-
- /*
- * Get info about what EGR data is supported, whether it has
- * Relative data supported, etc.
- */
- has_pinch = egr_0 & HAS_PINCH;
- has_flick = egr_0 & HAS_FLICK;
- has_tap = egr_0 & HAS_TAP;
- has_earlytap = egr_0 & HAS_EARLYTAP;
- has_press = egr_0 & HAS_PRESS;
- has_rotate = egr_1 & HAS_ROTATE;
- has_rel = queries[1] & HAS_RELEASE;
- has_tapandhold = egr_0 & HAS_TAPANDHOLD;
- has_doubletap = egr_0 & HAS_DOUBLETAP;
- has_palmdetect = egr_1 & HAS_PALMDETECT;
-
- /*
- * Size of all data including finger status, absolute data for each
- * finger, relative data and EGR data
- */
- all_data_blk_size =
- /* finger status, four fingers per register */
- ((rfi->num_of_data_points + 3) / 4) +
- /* absolute data, per finger times number of fingers */
- (abs_data_blk_size * rfi->num_of_data_points) +
- /*
- * two relative registers (if relative is being reported)
- */
- 2 * has_rel +
- /*
- * F11_2D_data8 is only present if the egr_0
- * register is non-zero.
- */
- !!(egr_0) +
- /*
- * F11_2D_data9 is only present if either egr_0 or
- * egr_1 registers are non-zero.
- */
- (egr_0 || egr_1) +
- /*
- * F11_2D_data10 is only present if EGR_PINCH or EGR_FLICK of
- * egr_0 reports as 1.
- */
- !!(has_pinch | has_flick) +
- /*
- * F11_2D_data11 and F11_2D_data12 are only present if
- * EGR_FLICK of egr_0 reports as 1.
- */
- 2 * !!(has_flick);
- return retval;
-}
-
-/**
- * synaptics_rmi4_touchpad_config() - configures the rmi4 touchpad device
- * @pdata: pointer to synaptics_rmi4_data structure
- * @rfi: pointer to synaptics_rmi4_fn structure
- *
- * This function calls to configures the rmi4 touchpad device
- */
-static int synaptics_rmi4_touchpad_config(struct synaptics_rmi4_data *pdata,
- struct synaptics_rmi4_fn *rfi)
-{
- /*
- * For the data source - print info and do any
- * source specific configuration.
- */
- unsigned char data[BUF_LEN];
- int retval = 0;
- struct i2c_client *client = pdata->i2c_client;
-
- /* Get and print some info about the data source... */
- /* To Query 2D devices we need to read from the address obtained
- * from the function descriptor stored in the RMI function info.
- */
- retval = synaptics_rmi4_i2c_block_read(pdata,
- rfi->fn_desc.query_base_addr,
- data, QUERY_LEN);
- if (retval != QUERY_LEN)
- dev_err(&client->dev, "%s:read query registers failed\n",
- __func__);
- else {
- retval = synaptics_rmi4_i2c_block_read(pdata,
- rfi->fn_desc.ctrl_base_addr,
- data, DATA_BUF_LEN);
- if (retval != DATA_BUF_LEN) {
- dev_err(&client->dev,
- "%s:read control registers failed\n",
- __func__);
- return retval;
- }
- /* Store these for use later*/
- pdata->sensor_max_x = ((data[6] & MASK_8BIT) << 0) |
- ((data[7] & MASK_4BIT) << 8);
- pdata->sensor_max_y = ((data[8] & MASK_5BIT) << 0) |
- ((data[9] & MASK_4BIT) << 8);
- }
- return retval;
-}
-
-/**
- * synaptics_rmi4_i2c_query_device() - query the rmi4 device
- * @pdata: pointer to synaptics_rmi4_data structure
- *
- * This function is used to query the rmi4 device.
- */
-static int synaptics_rmi4_i2c_query_device(struct synaptics_rmi4_data *pdata)
-{
- int i;
- int retval;
- unsigned char std_queries[STD_QUERY_LEN];
- unsigned char intr_count = 0;
- int data_sources = 0;
- unsigned int ctrl_offset;
- struct synaptics_rmi4_fn *rfi;
- struct synaptics_rmi4_fn_desc rmi_fd;
- struct synaptics_rmi4_device_info *rmi;
- struct i2c_client *client = pdata->i2c_client;
-
- /*
- * init the physical drivers RMI module
- * info list of functions
- */
- INIT_LIST_HEAD(&pdata->rmi4_mod_info.support_fn_list);
-
- /*
- * Read the Page Descriptor Table to determine what functions
- * are present
- */
- for (i = PDT_START_SCAN_LOCATION; i > PDT_END_SCAN_LOCATION;
- i -= PDT_ENTRY_SIZE) {
- retval = synaptics_rmi4_i2c_block_read(pdata, i,
- (unsigned char *)&rmi_fd,
- sizeof(rmi_fd));
- if (retval != sizeof(rmi_fd)) {
- /* failed to read next PDT entry */
- dev_err(&client->dev, "%s: read error\n", __func__);
- return -EIO;
- }
- rfi = NULL;
- if (rmi_fd.fn_number) {
- switch (rmi_fd.fn_number & MASK_8BIT) {
- case SYNAPTICS_RMI4_DEVICE_CONTROL_FUNC_NUM:
- pdata->fn01_query_base_addr =
- rmi_fd.query_base_addr;
- pdata->fn01_ctrl_base_addr =
- rmi_fd.ctrl_base_addr;
- pdata->fn01_data_base_addr =
- rmi_fd.data_base_addr;
- break;
- case SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM:
- if (rmi_fd.intr_src_count) {
- rfi = kmalloc(sizeof(*rfi),
- GFP_KERNEL);
- if (!rfi)
- return -ENOMEM;
- retval = synpatics_rmi4_touchpad_detect
- (pdata, rfi,
- &rmi_fd,
- intr_count);
- if (retval < 0) {
- kfree(rfi);
- return retval;
- }
- }
- break;
- }
- /* interrupt count for next iteration */
- intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
- /*
- * We only want to add functions to the list
- * that have data associated with them.
- */
- if (rfi && rmi_fd.intr_src_count) {
- /* link this function info to the RMI module */
- mutex_lock(&(pdata->fn_list_mutex));
- list_add_tail(&rfi->link,
- &pdata->rmi4_mod_info.support_fn_list);
- mutex_unlock(&(pdata->fn_list_mutex));
- }
- } else {
- /*
- * A zero in the function number
- * signals the end of the PDT
- */
- dev_dbg(&client->dev,
- "%s:end of PDT\n", __func__);
- break;
- }
- }
- /*
- * calculate the interrupt register count - used in the
- * ISR to read the correct number of interrupt registers
- */
- pdata->number_of_interrupt_register = (intr_count + 7) / 8;
- /*
- * Function $01 will be used to query the product properties,
- * and product ID so we had to read the PDT above first to get
- * the Fn $01 query address and prior to filling in the product
- * info. NOTE: Even an unflashed device will still have FN $01.
- */
-
- /* Load up the standard queries and get the RMI4 module info */
- retval = synaptics_rmi4_i2c_block_read(pdata,
- pdata->fn01_query_base_addr,
- std_queries,
- sizeof(std_queries));
- if (retval != sizeof(std_queries)) {
- dev_err(&client->dev, "%s:Failed reading queries\n",
- __func__);
- return -EIO;
- }
-
- /* Currently supported RMI version is 4.0 */
- pdata->rmi4_mod_info.version_major = 4;
- pdata->rmi4_mod_info.version_minor = 0;
- /*
- * get manufacturer id, product_props, product info,
- * date code, tester id, serial num and product id (name)
- */
- pdata->rmi4_mod_info.manufacturer_id = std_queries[0];
- pdata->rmi4_mod_info.product_props = std_queries[1];
- pdata->rmi4_mod_info.product_info[0] = std_queries[2];
- pdata->rmi4_mod_info.product_info[1] = std_queries[3];
- /* year - 2001-2032 */
- pdata->rmi4_mod_info.date_code[0] = std_queries[4] & MASK_5BIT;
- /* month - 1-12 */
- pdata->rmi4_mod_info.date_code[1] = std_queries[5] & MASK_4BIT;
- /* day - 1-31 */
- pdata->rmi4_mod_info.date_code[2] = std_queries[6] & MASK_5BIT;
- pdata->rmi4_mod_info.tester_id = ((std_queries[7] & MASK_7BIT) << 8) |
- (std_queries[8] & MASK_7BIT);
- pdata->rmi4_mod_info.serial_number =
- ((std_queries[9] & MASK_7BIT) << 8) |
- (std_queries[10] & MASK_7BIT);
- memcpy(pdata->rmi4_mod_info.product_id_string, &std_queries[11], 10);
-
- /* Check if this is a Synaptics device - report if not. */
- if (pdata->rmi4_mod_info.manufacturer_id != 1)
- dev_err(&client->dev, "non-Synaptics mfg id:%d\n",
- pdata->rmi4_mod_info.manufacturer_id);
-
- list_for_each_entry(rfi, &pdata->rmi4_mod_info.support_fn_list, link)
- data_sources += rfi->num_of_data_sources;
- if (data_sources) {
- rmi = &(pdata->rmi4_mod_info);
- list_for_each_entry(rfi, &rmi->support_fn_list, link) {
- if (rfi->num_of_data_sources) {
- if (rfi->fn_number ==
- SYNAPTICS_RMI4_TOUCHPAD_FUNC_NUM) {
- retval = synaptics_rmi4_touchpad_config
- (pdata, rfi);
- if (retval < 0)
- return retval;
- } else
- dev_err(&client->dev,
- "%s:fn_number not supported\n",
- __func__);
- /*
- * Turn on interrupts for this
- * function's data sources.
- */
- ctrl_offset = pdata->fn01_ctrl_base_addr + 1 +
- rfi->index_to_intr_reg;
- retval = synaptics_rmi4_i2c_byte_write(pdata,
- ctrl_offset,
- rfi->intr_mask);
- if (retval < 0)
- return retval;
- }
- }
- }
- return 0;
-}
-
-/*
- * Descriptor structure.
- * Describes the number of i2c devices on the bus that speak RMI.
- */
-static struct synaptics_rmi4_platform_data synaptics_rmi4_platformdata = {
- .irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
- .x_flip = false,
- .y_flip = true,
-};
-
-/**
- * synaptics_rmi4_probe() - Initialze the i2c-client touchscreen driver
- * @i2c: i2c client structure pointer
- * @id:i2c device id pointer
- *
- * This function will allocate and initialize the instance
- * data and request the irq and set the instance data as the clients
- * platform data then register the physical driver which will do a scan of
- * the rmi4 Physical Device Table and enumerate any rmi4 functions that
- * have data sources associated with them.
- */
-static int synaptics_rmi4_probe
- (struct i2c_client *client, const struct i2c_device_id *dev_id)
-{
- int retval;
- unsigned char intr_status[4];
- struct synaptics_rmi4_data *rmi4_data;
- const struct synaptics_rmi4_platform_data *platformdata =
- client->dev.platform_data;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "i2c smbus byte data not supported\n");
- return -EIO;
- }
-
- if (!platformdata)
- platformdata = &synaptics_rmi4_platformdata;
-
- /* Allocate and initialize the instance data for this client */
- rmi4_data = kcalloc(2, sizeof(struct synaptics_rmi4_data),
- GFP_KERNEL);
- if (!rmi4_data)
- return -ENOMEM;
-
- rmi4_data->input_dev = input_allocate_device();
- if (rmi4_data->input_dev == NULL) {
- retval = -ENOMEM;
- goto err_input;
- }
-
- rmi4_data->regulator = regulator_get(&client->dev, "vdd");
- if (IS_ERR(rmi4_data->regulator)) {
- dev_err(&client->dev, "%s:get regulator failed\n",
- __func__);
- retval = PTR_ERR(rmi4_data->regulator);
- goto err_get_regulator;
- }
- retval = regulator_enable(rmi4_data->regulator);
- if (retval < 0) {
- dev_err(&client->dev, "%s:regulator enable failed\n",
- __func__);
- goto err_regulator_enable;
- }
- init_waitqueue_head(&rmi4_data->wait);
- /*
- * Copy i2c_client pointer into RTID's i2c_client pointer for
- * later use in rmi4_read, rmi4_write, etc.
- */
- rmi4_data->i2c_client = client;
- /* So we set the page correctly the first time */
- rmi4_data->current_page = MASK_16BIT;
- rmi4_data->board = platformdata;
- rmi4_data->touch_stopped = false;
-
- /* init the mutexes for maintain the lists */
- mutex_init(&(rmi4_data->fn_list_mutex));
- mutex_init(&(rmi4_data->rmi4_page_mutex));
-
- /*
- * Register physical driver - this will call the detect function that
- * will then scan the device and determine the supported
- * rmi4 functions.
- */
- retval = synaptics_rmi4_i2c_query_device(rmi4_data);
- if (retval) {
- dev_err(&client->dev, "%s: rmi4 query device failed\n",
- __func__);
- goto err_query_dev;
- }
-
- /* Store the instance data in the i2c_client */
- i2c_set_clientdata(client, rmi4_data);
-
- /*initialize the input device parameters */
- rmi4_data->input_dev->name = DRIVER_NAME;
- rmi4_data->input_dev->phys = "Synaptics_Clearpad";
- rmi4_data->input_dev->id.bustype = BUS_I2C;
- rmi4_data->input_dev->dev.parent = &client->dev;
- input_set_drvdata(rmi4_data->input_dev, rmi4_data);
-
- /* Initialize the function handlers for rmi4 */
- set_bit(EV_SYN, rmi4_data->input_dev->evbit);
- set_bit(EV_KEY, rmi4_data->input_dev->evbit);
- set_bit(EV_ABS, rmi4_data->input_dev->evbit);
-
- input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_X, 0,
- rmi4_data->sensor_max_x, 0, 0);
- input_set_abs_params(rmi4_data->input_dev, ABS_MT_POSITION_Y, 0,
- rmi4_data->sensor_max_y, 0, 0);
- input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
- MAX_TOUCH_MAJOR, 0, 0);
- input_mt_init_slots(rmi4_data->input_dev,
- rmi4_data->fingers_supported, 0);
-
- /* Clear interrupts */
- synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1, intr_status,
- rmi4_data->number_of_interrupt_register);
- retval = request_threaded_irq(client->irq, NULL,
- synaptics_rmi4_irq,
- platformdata->irq_type,
- DRIVER_NAME, rmi4_data);
- if (retval) {
- dev_err(&client->dev, "Unable to get attn irq %d\n",
- client->irq);
- goto err_query_dev;
- }
-
- retval = input_register_device(rmi4_data->input_dev);
- if (retval) {
- dev_err(&client->dev, "%s:input register failed\n", __func__);
- goto err_free_irq;
- }
-
- return retval;
-
-err_free_irq:
- free_irq(client->irq, rmi4_data);
-err_query_dev:
- regulator_disable(rmi4_data->regulator);
-err_regulator_enable:
- regulator_put(rmi4_data->regulator);
-err_get_regulator:
- input_free_device(rmi4_data->input_dev);
- rmi4_data->input_dev = NULL;
-err_input:
- kfree(rmi4_data);
-
- return retval;
-}
-/**
- * synaptics_rmi4_remove() - Removes the i2c-client touchscreen driver
- * @client: i2c client structure pointer
- *
- * This function uses to remove the i2c-client
- * touchscreen driver and returns integer.
- */
-static int synaptics_rmi4_remove(struct i2c_client *client)
-{
- struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
-
- rmi4_data->touch_stopped = true;
- wake_up(&rmi4_data->wait);
- free_irq(client->irq, rmi4_data);
- input_unregister_device(rmi4_data->input_dev);
- regulator_disable(rmi4_data->regulator);
- regulator_put(rmi4_data->regulator);
- kfree(rmi4_data);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-/**
- * synaptics_rmi4_suspend() - suspend the touch screen controller
- * @dev: pointer to device structure
- *
- * This function is used to suspend the
- * touch panel controller and returns integer
- */
-static int synaptics_rmi4_suspend(struct device *dev)
-{
- /* Touch sleep mode */
- int retval;
- unsigned char intr_status;
- struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
-
- rmi4_data->touch_stopped = true;
- disable_irq(rmi4_data->i2c_client->irq);
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status & ~TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
-
- regulator_disable(rmi4_data->regulator);
-
- return 0;
-}
-/**
- * synaptics_rmi4_resume() - resume the touch screen controller
- * @dev: pointer to device structure
- *
- * This function is used to resume the touch panel
- * controller and returns integer.
- */
-static int synaptics_rmi4_resume(struct device *dev)
-{
- int retval;
- unsigned char intr_status;
- struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
-
- retval = regulator_enable(rmi4_data->regulator);
- if (retval) {
- dev_err(dev, "Regulator enable failed (%d)\n", retval);
- return retval;
- }
-
- enable_irq(rmi4_data->i2c_client->irq);
- rmi4_data->touch_stopped = false;
-
- retval = synaptics_rmi4_i2c_block_read(rmi4_data,
- rmi4_data->fn01_data_base_addr + 1,
- &intr_status,
- rmi4_data->number_of_interrupt_register);
- if (retval < 0)
- return retval;
-
- retval = synaptics_rmi4_i2c_byte_write(rmi4_data,
- rmi4_data->fn01_ctrl_base_addr + 1,
- (intr_status | TOUCHPAD_CTRL_INTR));
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-#endif
-
-static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend,
- synaptics_rmi4_resume);
-
-static const struct i2c_device_id synaptics_rmi4_id_table[] = {
- { DRIVER_NAME, 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
-
-static struct i2c_driver synaptics_rmi4_driver = {
- .driver = {
- .name = DRIVER_NAME,
- .pm = &synaptics_rmi4_dev_pm_ops,
- },
- .probe = synaptics_rmi4_probe,
- .remove = synaptics_rmi4_remove,
- .id_table = synaptics_rmi4_id_table,
-};
-
-module_i2c_driver(synaptics_rmi4_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("naveen.gaddipati@stericsson.com, js.ha@stericsson.com");
-MODULE_DESCRIPTION("synaptics rmi4 i2c touch Driver");
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
deleted file mode 100644
index 8c9166ba71c7..000000000000
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- *
- * Synaptics Register Mapped Interface (RMI4) I2C Physical Layer Driver.
- * Copyright (c) 2007-2010, Synaptics Incorporated
- *
- * Author: Js HA <js.ha@stericsson.com> for ST-Ericsson
- * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
- * Copyright 2010 (c) ST-Ericsson AB
- */
-/*
- * This file is licensed under the GPL2 license.
- *
- *#############################################################################
- * GPL
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- *#############################################################################
- */
-
-#ifndef _SYNAPTICS_RMI4_H_INCLUDED_
-#define _SYNAPTICS_RMI4_H_INCLUDED_
-
-/**
- * struct synaptics_rmi4_platform_data - contains the rmi4 platform data
- * @irq_number: irq number
- * @irq_type: irq type
- * @x flip: x flip flag
- * @y flip: y flip flag
- *
- * This structure gives platform data for rmi4.
- */
-struct synaptics_rmi4_platform_data {
- int irq_type;
- bool x_flip;
- bool y_flip;
-};
-
-#endif
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index c9cef0b91531..cc46e37e64c1 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -1,5 +1,5 @@
Unisys s-Par drivers
-M: Ben Romer <sparmaintainer@unisys.com>
+M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
F: Documentation/s-Par/overview.txt
F: Documentation/s-Par/proc-entries.txt
diff --git a/drivers/staging/unisys/include/guestlinuxdebug.h b/drivers/staging/unisys/include/guestlinuxdebug.h
index 82ee565395ba..b81287f5e2c3 100644
--- a/drivers/staging/unisys/include/guestlinuxdebug.h
+++ b/drivers/staging/unisys/include/guestlinuxdebug.h
@@ -17,9 +17,10 @@
#define __GUESTLINUXDEBUG_H__
/*
-* This file contains supporting interface for "vmcallinterface.h", particularly
-* regarding adding additional structure and functionality to linux
-* ISSUE_IO_VMCALL_POSTCODE_SEVERITY */
+ * This file contains supporting interface for "vmcallinterface.h", particularly
+ * regarding adding additional structure and functionality to linux
+ * ISSUE_IO_VMCALL_POSTCODE_SEVERITY
+ */
/******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
enum driver_pc { /* POSTCODE driver identifier tuples */
@@ -133,9 +134,9 @@ enum event_pc { /* POSTCODE event identifier tuples */
#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT /* TODO-> Info currently
- * doesn't show, so we
- * set info=warning */
+/* TODO-> Info currently doesn't show, so we set info=warning */
+#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
+
/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
* Please also note that the resulting postcode is in hex, so if you are
* searching for the __LINE__ number, convert it first to decimal. The line
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 162ca187a66b..880d9f04cbcf 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -575,7 +575,7 @@ struct spar_io_channel_protocol {
* room)
*/
static inline u16
-add_physinfo_entries(u32 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
+add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
u16 max_pi_arr_entries, struct phys_info pi_arr[])
{
u32 len;
@@ -589,21 +589,19 @@ add_physinfo_entries(u32 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
pi_arr[index].pi_pfn = inp_pfn;
pi_arr[index].pi_off = (u16)inp_off;
pi_arr[index].pi_len = (u16)inp_len;
- return index + 1;
+ return index + 1;
}
- /* this entry spans multiple pages */
- for (len = inp_len, i = 0; len;
- len -= pi_arr[index + i].pi_len, i++) {
+ /* this entry spans multiple pages */
+ for (len = inp_len, i = 0; len;
+ len -= pi_arr[index + i].pi_len, i++) {
if (index + i >= max_pi_arr_entries)
return 0;
pi_arr[index + i].pi_pfn = inp_pfn + i;
if (i == 0) {
pi_arr[index].pi_off = inp_off;
pi_arr[index].pi_len = firstlen;
- }
-
- else {
+ } else {
pi_arr[index + i].pi_off = 0;
pi_arr[index + i].pi_len =
(u16)MINNUM(len, (u32)PI_PAGE_SIZE);
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index ec25366b127c..03e36fb6a5a0 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -55,22 +55,25 @@
#define CONTROLVM_CRASHMSG_MAX 2
struct spar_segment_state {
- u16 enabled:1; /* Bit 0: May enter other states */
- u16 active:1; /* Bit 1: Assigned to active partition */
- u16 alive:1; /* Bit 2: Configure message sent to
- * service/server */
- u16 revoked:1; /* Bit 3: similar to partition state
- * ShuttingDown */
- u16 allocated:1; /* Bit 4: memory (device/port number)
- * has been selected by Command */
- u16 known:1; /* Bit 5: has been introduced to the
- * service/guest partition */
- u16 ready:1; /* Bit 6: service/Guest partition has
- * responded to introduction */
- u16 operating:1; /* Bit 7: resource is configured and
- * operating */
- /* Note: don't use high bit unless we need to switch to ushort
- * which is non-compliant */
+ /* Bit 0: May enter other states */
+ u16 enabled:1;
+ /* Bit 1: Assigned to active partition */
+ u16 active:1;
+ /* Bit 2: Configure message sent to service/server */
+ u16 alive:1;
+ /* Bit 3: similar to partition state ShuttingDown */
+ u16 revoked:1;
+ /* Bit 4: memory (device/port number) has been selected by Command */
+ u16 allocated:1;
+ /* Bit 5: has been introduced to the service/guest partition */
+ u16 known:1;
+ /* Bit 6: service/Guest partition has responded to introduction */
+ u16 ready:1;
+ /* Bit 7: resource is configured and operating */
+ u16 operating:1;
+/* Note: don't use high bit unless we need to switch to ushort
+ * which is non-compliant
+ */
};
static const struct spar_segment_state segment_state_running = {
@@ -177,53 +180,53 @@ struct controlvm_message_header {
/* For requests, indicates the message type. */
/* For responses, indicates the type of message we are responding to. */
- u32 message_size; /* Includes size of this struct + size
- * of message */
- u32 segment_index; /* Index of segment containing Vm
- * message/information */
- u32 completion_status; /* Error status code or result of
- * message completion */
+ /* Includes size of this struct + size of message */
+ u32 message_size;
+ /* Index of segment containing Vm message/information */
+ u32 segment_index;
+ /* Error status code or result of message completion */
+ u32 completion_status;
struct {
- u32 failed:1; /* =1 in a response to * signify
- * failure */
- u32 response_expected:1; /* =1 in all messages that expect a
- * response (Control ignores this
- * bit) */
- u32 server:1; /* =1 in all bus & device-related
- * messages where the message
- * receiver is to act as the bus or
- * device server */
- u32 test_message:1; /* =1 for testing use only
- * (Control and Command ignore this
- * bit) */
- u32 partial_completion:1; /* =1 if there are forthcoming
- * responses/acks associated
- * with this message */
- u32 preserve:1; /* =1 this is to let us know to
- * preserve channel contents
- * (for running guests)*/
- u32 writer_in_diag:1; /* =1 the DiagWriter is active in the
- * Diagnostic Partition*/
+ /* =1 in a response to signify failure */
+ u32 failed:1;
+ /* =1 in all messages that expect a response */
+ u32 response_expected:1;
+ /* =1 in all bus & device-related messages where the message
+ * receiver is to act as the bus or device server
+ */
+ u32 server:1;
+ /* =1 for testing use only (Control and Command ignore this */
+ u32 test_message:1;
+ /* =1 if there are forthcoming responses/acks associated
+ * with this message
+ */
+ u32 partial_completion:1;
+ /* =1 this is to let us know to preserve channel contents */
+ u32 preserve:1;
+ /* =1 the DiagWriter is active in the Diagnostic Partition */
+ u32 writer_in_diag:1;
} flags;
- u32 reserved; /* Natural alignment */
- u64 message_handle; /* Identifies the particular message instance,
- * and is used to match particular */
+ /* Natural alignment */
+ u32 reserved;
+ /* Identifies the particular message instance */
+ u64 message_handle;
/* request instances with the corresponding response instance. */
- u64 payload_vm_offset; /* Offset of payload area from start of this
- * instance of ControlVm segment */
- u32 payload_max_bytes; /* Maximum bytes allocated in payload
- * area of ControlVm segment */
- u32 payload_bytes; /* Actual number of bytes of payload
- * area to copy between IO/Command; */
+ /* Offset of payload area from start of this instance */
+ u64 payload_vm_offset;
+ /* Maximum bytes allocated in payload area of ControlVm segment */
+ u32 payload_max_bytes;
+ /* Actual number of bytes of payload area to copy between IO/Command */
+ u32 payload_bytes;
/* if non-zero, there is a payload to copy. */
};
struct controlvm_packet_device_create {
u32 bus_no; /* bus # (0..n-1) from the msg receiver's end */
u32 dev_no; /* bus-relative (0..n-1) device number */
- u64 channel_addr; /* Guest physical address of the channel, which
- * can be dereferenced by the receiver of this
- * ControlVm command */
+ /* Guest physical address of the channel, which can be dereferenced by
+ * the receiver of this ControlVm command
+ */
+ u64 channel_addr;
u64 channel_bytes; /* specifies size of the channel in bytes */
uuid_le data_type_uuid; /* specifies format of data in channel */
uuid_le dev_inst_uuid; /* instance guid for the device */
@@ -231,8 +234,8 @@ struct controlvm_packet_device_create {
}; /* for CONTROLVM_DEVICE_CREATE */
struct controlvm_packet_device_configure {
- u32 bus_no; /* bus # (0..n-1) from the msg
- * receiver's perspective */
+ /* bus # (0..n-1) from the msg receiver's perspective */
+ u32 bus_no;
/* Control uses header SegmentIndex field to access bus number... */
u32 dev_no; /* bus-relative (0..n-1) device number */
} ; /* for CONTROLVM_DEVICE_CONFIGURE */
@@ -251,50 +254,50 @@ struct controlvm_message_device_configure {
struct controlvm_message_packet {
union {
struct {
- u32 bus_no; /* bus # (0..n-1) from the msg
- * receiver's perspective */
- u32 dev_count; /* indicates the max number of
- * devices on this bus */
- u64 channel_addr; /* Guest physical address of
- * the channel, which can be
- * dereferenced by the receiver
- * of this ControlVm command */
+ /* bus # (0..n-1) from the msg receiver's perspective */
+ u32 bus_no;
+ /* indicates the max number of devices on this bus */
+ u32 dev_count;
+ /* Guest physical address of the channel, which can be
+ * dereferenced by the receiver of this ControlVm command
+ */
+ u64 channel_addr;
u64 channel_bytes; /* size of the channel */
- uuid_le bus_data_type_uuid; /* indicates format of
- * data in bus channel*/
+ /* indicates format of data in bus channel*/
+ uuid_le bus_data_type_uuid;
uuid_le bus_inst_uuid; /* instance uuid for the bus */
} create_bus; /* for CONTROLVM_BUS_CREATE */
struct {
- u32 bus_no; /* bus # (0..n-1) from the msg
- * receiver's perspective */
+ /* bus # (0..n-1) from the msg receiver's perspective */
+ u32 bus_no;
u32 reserved; /* Natural alignment purposes */
} destroy_bus; /* for CONTROLVM_BUS_DESTROY */
struct {
- u32 bus_no; /* bus # (0..n-1) from the receiver's
- * perspective */
+ /* bus # (0..n-1) from the receiver's perspective */
+ u32 bus_no;
u32 reserved1; /* for alignment purposes */
- u64 guest_handle; /* This is used to convert
- * guest physical address to
- * physical address */
+ /* This is used to convert guest physical address to physical address */
+ u64 guest_handle;
u64 recv_bus_irq_handle;
/* specifies interrupt info. It is used by SP
* to register to receive interrupts from the
* CP. This interrupt is used for bus level
* notifications. The corresponding
- * sendBusInterruptHandle is kept in CP. */
+ * sendBusInterruptHandle is kept in CP.
+ */
} configure_bus; /* for CONTROLVM_BUS_CONFIGURE */
/* for CONTROLVM_DEVICE_CREATE */
struct controlvm_packet_device_create create_device;
struct {
- u32 bus_no; /* bus # (0..n-1) from the msg
- * receiver's perspective */
+ /* bus # (0..n-1) from the msg receiver's perspective */
+ u32 bus_no;
u32 dev_no; /* bus-relative (0..n-1) device # */
} destroy_device; /* for CONTROLVM_DEVICE_DESTROY */
/* for CONTROLVM_DEVICE_CONFIGURE */
struct controlvm_packet_device_configure configure_device;
struct {
- u32 bus_no; /* bus # (0..n-1) from the msg
- * receiver's perspective */
+ /* bus # (0..n-1) from the msg receiver's perspective */
+ u32 bus_no;
u32 dev_no; /* bus-relative (0..n-1) device # */
} reconfigure_device; /* for CONTROLVM_DEVICE_RECONFIGURE */
struct {
@@ -307,8 +310,8 @@ struct controlvm_message_packet {
u32 dev_no;
struct spar_segment_state state;
struct {
- u32 phys_device:1; /* =1 if message is for
- * a physical device */
+ /* =1 if message is for a physical device */
+ u32 phys_device:1;
} flags;
u8 reserved[2]; /* Natural alignment purposes */
} device_change_state; /* for CONTROLVM_DEVICE_CHANGESTATE */
@@ -320,9 +323,10 @@ struct controlvm_message_packet {
} device_change_state_event;
/* for CONTROLVM_DEVICE_CHANGESTATE_EVENT */
struct {
- u32 bus_count; /* indicates the max number of busses */
- u32 switch_count; /* indicates the max number of
- * switches if a service partition */
+ /* indicates the max number of busses */
+ u32 bus_count;
+ /* indicates the max number of switches */
+ u32 switch_count;
enum ultra_chipset_feature features;
u32 platform_number; /* Platform Number */
} init_chipset; /* for CONTROLVM_CHIPSET_INIT */
@@ -330,11 +334,12 @@ struct controlvm_message_packet {
u32 options; /* reserved */
u32 test; /* bit 0 set to run embedded selftest */
} chipset_selftest; /* for CONTROLVM_CHIPSET_SELFTEST */
- u64 addr; /* a physical address of something, that can be
- * dereferenced by the receiver of this
- * ControlVm command (depends on command id) */
- u64 handle; /* a handle of something (depends on command
- * id) */
+ /* a physical address of something, that can be dereferenced
+ * by the receiver of this ControlVm command
+ */
+ u64 addr;
+ /* a handle of something (depends on command id) */
+ u64 handle;
};
};
@@ -357,8 +362,8 @@ struct spar_controlvm_channel_protocol {
u64 gp_nvram; /* guest phys addr of NVRAM channel */
u64 request_payload_offset; /* Offset to request payload area */
u64 event_payload_offset; /* Offset to event payload area */
- u32 request_payload_bytes; /* Bytes available in request payload
- * area */
+ /* Bytes available in request payload area */
+ u32 request_payload_bytes;
u32 event_payload_bytes;/* Bytes available in event payload area */
u32 control_channel_bytes;
u32 nvram_channel_bytes; /* Bytes in PartitionNvram segment */
@@ -384,41 +389,37 @@ struct spar_controlvm_channel_protocol {
u64 virtual_guest_image_size;
u64 prototype_control_channel_offset;
u64 virtual_guest_partition_handle;
-
- u16 restore_action; /* Restore Action field to restore the guest
- * partition */
- u16 dump_action; /* For Windows guests it shows if the visordisk
- * is running in dump mode */
+ /* Restore Action field to restore the guest partition */
+ u16 restore_action;
+ /* For Windows guests it shows if the visordisk is in dump mode */
+ u16 dump_action;
u16 nvram_fail_count;
u16 saved_crash_message_count; /* = CONTROLVM_CRASHMSG_MAX */
- u32 saved_crash_message_offset; /* Offset to request payload area needed
- * for crash dump */
- u32 installation_error; /* Type of error encountered during
- * installation */
+ /* Offset to request payload area needed for crash dump */
+ u32 saved_crash_message_offset;
+ /* Type of error encountered during installation */
+ u32 installation_error;
u32 installation_text_id; /* Id of string to display */
- u16 installation_remaining_steps;/* Number of remaining installation
- * steps (for progress bars) */
- u8 tool_action; /* ULTRA_TOOL_ACTIONS Installation Action
- * field */
+ /* Number of remaining installation steps (for progress bars) */
+ u16 installation_remaining_steps;
+ /* ULTRA_TOOL_ACTIONS Installation Action field */
+ u8 tool_action;
u8 reserved; /* alignment */
struct efi_spar_indication efi_spar_ind;
struct efi_spar_indication efi_spar_ind_supported;
u32 sp_reserved;
- u8 reserved2[28]; /* Force signals to begin on 128-byte cache
- * line */
- struct signal_queue_header request_queue;/* Service or guest partition
- * uses this queue to send
- * requests to Control */
- struct signal_queue_header response_queue;/* Control uses this queue to
- * respond to service or guest
- * partition requests */
- struct signal_queue_header event_queue; /* Control uses this queue to
- * send events to service or
- * guest partition */
- struct signal_queue_header event_ack_queue;/* Service or guest partition
- * uses this queue to ack
- * Control events */
-
+ /* Force signals to begin on 128-byte cache line */
+ u8 reserved2[28];
+ /* guest partition uses this queue to send requests to Control */
+ struct signal_queue_header request_queue;
+ /* Control uses this queue to respond to service or guest
+ * partition requests
+ */
+ struct signal_queue_header response_queue;
+ /* Control uses this queue to send events to guest partition */
+ struct signal_queue_header event_queue;
+ /* Service or guest partition uses this queue to ack Control events */
+ struct signal_queue_header event_ack_queue;
/* Request fixed-size message pool - does not include payload */
struct controlvm_message request_msg[CONTROLVM_MESSAGE_MAX];
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
index f59fd8a523c4..abdab4ad0b36 100644
--- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
@@ -62,7 +62,7 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL) {
+ } else if (!p) {
chars++;
}
nonprintable_streak = 0;
@@ -72,7 +72,7 @@ vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
p++;
remain--;
chars++;
- } else if (p == NULL) {
+ } else if (!p) {
chars++;
}
} else {
@@ -124,7 +124,8 @@ vbuschannel_itoa(char *p, int remain, int num)
}
if (remain < digits) {
/* not enough room left at <p> to hold number, so fill with
- * '?' */
+ * '?'
+ */
for (i = 0; i < remain; i++, p++)
*p = '?';
return remain;
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index eac97d22278a..533bb5b3d284 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -221,7 +221,6 @@ visorbus_release_busdevice(struct device *xdev)
{
struct visor_device *dev = dev_get_drvdata(xdev);
- dev_set_drvdata(xdev, NULL);
kfree(dev);
}
@@ -701,12 +700,10 @@ DRIVER_ATTR_version(struct device_driver *xdrv, char *buf)
static int
register_driver_attributes(struct visor_driver *drv)
{
- int rc;
struct driver_attribute version =
__ATTR(version, S_IRUGO, DRIVER_ATTR_version, NULL);
drv->version_attr = version;
- rc = driver_create_file(&drv->driver, &drv->version_attr);
- return rc;
+ return driver_create_file(&drv->driver, &drv->version_attr);
}
static void
@@ -771,7 +768,7 @@ visordriver_probe_device(struct device *xdev)
get_device(&dev->device);
if (!drv->probe) {
up(&dev->visordriver_callback_lock);
- rc = -1;
+ rc = -ENODEV;
goto away;
}
rc = drv->probe(dev);
@@ -973,7 +970,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
static int
create_visor_device(struct visor_device *dev)
{
- int rc = -1;
+ int rc;
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
@@ -995,6 +992,7 @@ create_visor_device(struct visor_device *dev)
if (!dev->periodic_work) {
POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
DIAG_SEVERITY_ERR);
+ rc = -EINVAL;
goto away;
}
@@ -1032,14 +1030,15 @@ create_visor_device(struct visor_device *dev)
if (rc < 0) {
POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
DIAG_SEVERITY_ERR);
- goto away_register;
+ goto away_unregister;
}
list_add_tail(&dev->list_all, &list_all_device_instances);
return 0;
-away_register:
+away_unregister:
device_unregister(&dev->device);
+
away:
put_device(&dev->device);
return rc;
@@ -1058,23 +1057,21 @@ static int
get_vbus_header_info(struct visorchannel *chan,
struct spar_vbus_headerinfo *hdr_info)
{
- int rc = -1;
-
if (!SPAR_VBUS_CHANNEL_OK_CLIENT(visorchannel_get_header(chan)))
- goto away;
+ return -EINVAL;
+
if (visorchannel_read(chan, sizeof(struct channel_header), hdr_info,
sizeof(*hdr_info)) < 0) {
- goto away;
+ return -EIO;
}
if (hdr_info->struct_bytes < sizeof(struct spar_vbus_headerinfo))
- goto away;
+ return -EINVAL;
+
if (hdr_info->device_info_struct_bytes <
sizeof(struct ultra_vbus_deviceinfo)) {
- goto away;
+ return -EINVAL;
}
- rc = 0;
-away:
- return rc;
+ return 0;
}
/* Write the contents of <info> to the struct
@@ -1197,17 +1194,14 @@ fix_vbus_dev_info(struct visor_device *visordev)
static int
create_bus_instance(struct visor_device *dev)
{
- int rc;
int id = dev->chipset_bus_no;
struct spar_vbus_headerinfo *hdr_info;
POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
- if (!hdr_info) {
- rc = -1;
- goto away;
- }
+ if (!hdr_info)
+ return -ENOMEM;
dev_set_name(&dev->device, "visorbus%d", id);
dev->device.bus = &visorbus_type;
@@ -1217,8 +1211,8 @@ create_bus_instance(struct visor_device *dev)
if (device_register(&dev->device) < 0) {
POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
POSTCODE_SEVERITY_ERR);
- rc = -1;
- goto away_mem;
+ kfree(hdr_info);
+ return -ENODEV;
}
if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -1234,11 +1228,6 @@ create_bus_instance(struct visor_device *dev)
list_add_tail(&dev->list_all, &list_all_bus_instances);
dev_set_drvdata(&dev->device, dev);
return 0;
-
-away_mem:
- kfree(hdr_info);
-away:
- return rc;
}
/** Remove a device instance for the visor bus itself.
@@ -1328,7 +1317,7 @@ chipset_bus_destroy(struct visor_device *dev)
static void
chipset_device_create(struct visor_device *dev_info)
{
- int rc = -1;
+ int rc;
u32 bus_no = dev_info->chipset_bus_no;
u32 dev_no = dev_info->chipset_dev_no;
@@ -1371,9 +1360,9 @@ pause_state_change_complete(struct visor_device *dev, int status)
return;
/* Notify the chipset driver that the pause is complete, which
- * will presumably want to send some sort of response to the
- * initiator.
- */
+ * will presumably want to send some sort of response to the
+ * initiator.
+ */
(*chipset_responders.device_pause) (dev, status);
}
@@ -1405,7 +1394,7 @@ resume_state_change_complete(struct visor_device *dev, int status)
static void
initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
{
- int rc = -1, x;
+ int rc;
struct visor_driver *drv = NULL;
void (*notify_func)(struct visor_device *dev, int response) = NULL;
@@ -1414,14 +1403,18 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
else
notify_func = chipset_responders.device_resume;
if (!notify_func)
- goto away;
+ return;
drv = to_visor_driver(dev->device.driver);
- if (!drv)
- goto away;
+ if (!drv) {
+ (*notify_func)(dev, -ENODEV);
+ return;
+ }
- if (dev->pausing || dev->resuming)
- goto away;
+ if (dev->pausing || dev->resuming) {
+ (*notify_func)(dev, -EBUSY);
+ return;
+ }
/* Note that even though both drv->pause() and drv->resume
* specify a callback function, it is NOT necessary for us to
@@ -1431,11 +1424,13 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
* visorbus while child function drivers are still running.
*/
if (is_pause) {
- if (!drv->pause)
- goto away;
+ if (!drv->pause) {
+ (*notify_func)(dev, -EINVAL);
+ return;
+ }
dev->pausing = true;
- x = drv->pause(dev, pause_state_change_complete);
+ rc = drv->pause(dev, pause_state_change_complete);
} else {
/* This should be done at BUS resume time, but an
* existing problem prevents us from ever getting a bus
@@ -1444,24 +1439,20 @@ initiate_chipset_device_pause_resume(struct visor_device *dev, bool is_pause)
* would never even get here in that case.
*/
fix_vbus_dev_info(dev);
- if (!drv->resume)
- goto away;
+ if (!drv->resume) {
+ (*notify_func)(dev, -EINVAL);
+ return;
+ }
dev->resuming = true;
- x = drv->resume(dev, resume_state_change_complete);
+ rc = drv->resume(dev, resume_state_change_complete);
}
- if (x < 0) {
+ if (rc < 0) {
if (is_pause)
dev->pausing = false;
else
dev->resuming = false;
- goto away;
- }
- rc = 0;
-away:
- if (rc < 0) {
- if (notify_func)
- (*notify_func)(dev, rc);
+ (*notify_func)(dev, -EINVAL);
}
}
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 891b8db7c5ec..b68a904ac617 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -73,7 +73,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
channel = kzalloc(sizeof(*channel), gfp);
if (!channel)
- goto cleanup;
+ return NULL;
channel->needs_lock = needs_lock;
spin_lock_init(&channel->insert_lock);
@@ -89,14 +89,14 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
if (!channel->requested) {
if (uuid_le_cmp(guid, spar_video_guid)) {
/* Not the video channel we care about this */
- goto cleanup;
+ goto err_destroy_channel;
}
}
channel->mapped = memremap(physaddr, size, MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(physaddr, size);
- goto cleanup;
+ goto err_destroy_channel;
}
channel->physaddr = physaddr;
@@ -105,7 +105,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
err = visorchannel_read(channel, 0, &channel->chan_hdr,
sizeof(struct channel_header));
if (err)
- goto cleanup;
+ goto err_destroy_channel;
/* we had better be a CLIENT of this channel */
if (channel_bytes == 0)
@@ -122,7 +122,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
if (!channel->requested) {
if (uuid_le_cmp(guid, spar_video_guid)) {
/* Different we care about this */
- goto cleanup;
+ goto err_destroy_channel;
}
}
@@ -130,7 +130,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
MEMREMAP_WB);
if (!channel->mapped) {
release_mem_region(channel->physaddr, channel_bytes);
- goto cleanup;
+ goto err_destroy_channel;
}
channel->nbytes = channel_bytes;
@@ -139,7 +139,7 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
channel->guid = guid;
return channel;
-cleanup:
+err_destroy_channel:
visorchannel_destroy(channel);
return NULL;
}
@@ -293,14 +293,14 @@ visorchannel_clear(struct visorchannel *channel, ulong offset, u8 ch,
err = visorchannel_write(channel, offset + written,
buf, thisbytes);
if (err)
- goto cleanup;
+ goto out_free_page;
written += thisbytes;
nbytes -= thisbytes;
}
err = 0;
-cleanup:
+out_free_page:
free_page((unsigned long)buf);
return err;
}
@@ -461,7 +461,7 @@ signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
if (!sig_read_header(channel, queue, &sig_hdr))
return false;
- sig_hdr.head = ((sig_hdr.head + 1) % sig_hdr.max_slots);
+ sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots;
if (sig_hdr.head == sig_hdr.tail) {
sig_hdr.num_overflows++;
visorchannel_write(channel,
@@ -521,7 +521,7 @@ visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue)
tail = sig_hdr.tail;
if (head < tail)
head = head + sig_hdr.max_slots;
- slots_used = (head - tail);
+ slots_used = head - tail;
slots_avail = sig_hdr.max_signals - slots_used;
return (int)slots_avail;
}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 07594f43853d..5fbda7b218c7 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -43,11 +43,10 @@
#define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
#define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
-#define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
+#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
#define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
-
#define UNISYS_SPAR_LEAF_ID 0x40000000
/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
@@ -62,6 +61,7 @@ static int visorchipset_major;
static int visorchipset_visorbusregwait = 1; /* default is on */
static int visorchipset_holdchipsetready;
static unsigned long controlvm_payload_bytes_buffered;
+static u32 dump_vhba_bus;
static int
visorchipset_open(struct inode *inode, struct file *file)
@@ -86,8 +86,8 @@ visorchipset_release(struct inode *inode, struct file *file)
*/
#define MIN_IDLE_SECONDS 10
static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
-static unsigned long most_recent_message_jiffies; /* when we got our last
- * controlvm message */
+/* when we got our last controlvm message */
+static unsigned long most_recent_message_jiffies;
static int visorbusregistered;
#define MAX_CHIPSET_EVENTS 2
@@ -103,7 +103,6 @@ struct parser_context {
};
static struct delayed_work periodic_controlvm_work;
-static struct workqueue_struct *periodic_controlvm_workqueue;
static DEFINE_SEMAPHORE(notifier_lock);
static struct cdev file_cdev;
@@ -120,7 +119,8 @@ static struct visorchannel *controlvm_channel;
struct visor_controlvm_payload_info {
u8 *ptr; /* pointer to base address of payload pool */
u64 offset; /* offset from beginning of controlvm
- * channel to beginning of payload * pool */
+ * channel to beginning of payload * pool
+ */
u32 bytes; /* number of bytes in payload pool */
};
@@ -184,7 +184,8 @@ struct putfile_request {
* - this list is added to when controlvm messages come in that supply
* file data
* - this list is removed from via the hotplug program that is actually
- * consuming these buffers to write as file data */
+ * consuming these buffers to write as file data
+ */
struct list_head input_buffer_list;
spinlock_t req_list_lock; /* lock for input_buffer_list */
@@ -352,7 +353,6 @@ static void controlvm_respond_physdev_changestate(
struct controlvm_message_header *msg_hdr, int response,
struct spar_segment_state state);
-
static void parser_done(struct parser_context *ctx);
static struct parser_context *
@@ -377,7 +377,7 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
rc = NULL;
goto cleanup;
}
- ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
+ ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
if (!ctx) {
if (retry)
*retry = true;
@@ -397,24 +397,16 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
rc = NULL;
goto cleanup;
}
- p = __va((unsigned long) (addr));
+ p = __va((unsigned long)(addr));
memcpy(ctx->data, p, bytes);
} else {
- void *mapping;
-
- if (!request_mem_region(addr, bytes, "visorchipset")) {
- rc = NULL;
- goto cleanup;
- }
+ void *mapping = memremap(addr, bytes, MEMREMAP_WB);
- mapping = memremap(addr, bytes, MEMREMAP_WB);
if (!mapping) {
- release_mem_region(addr, bytes);
rc = NULL;
goto cleanup;
}
memcpy(ctx->data, mapping, bytes);
- release_mem_region(addr, bytes);
memunmap(mapping);
}
@@ -437,7 +429,7 @@ parser_id_get(struct parser_context *ctx)
{
struct spar_controlvm_parameters_header *phdr = NULL;
- if (ctx == NULL)
+ if (!ctx)
return NULL_UUID_LE;
phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
return phdr->id;
@@ -460,8 +452,9 @@ parser_param_start(struct parser_context *ctx,
{
struct spar_controlvm_parameters_header *phdr = NULL;
- if (ctx == NULL)
- goto Away;
+ if (!ctx)
+ return;
+
phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
switch (which_string) {
case PARSERSTRING_INITIATOR:
@@ -483,9 +476,6 @@ parser_param_start(struct parser_context *ctx,
default:
break;
}
-
-Away:
- return;
}
static void parser_done(struct parser_context *ctx)
@@ -520,16 +510,15 @@ parser_string_get(struct parser_context *ctx)
}
if (value_length < 0) /* '\0' was not included in the length */
value_length = nscan;
- value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
- if (value == NULL)
+ value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
+ if (!value)
return NULL;
if (value_length > 0)
memcpy(value, pscan, value_length);
- ((u8 *) (value))[value_length] = '\0';
+ ((u8 *)(value))[value_length] = '\0';
return value;
}
-
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -537,8 +526,8 @@ static ssize_t toolaction_show(struct device *dev,
u8 tool_action;
visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action), &tool_action, sizeof(u8));
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action), &tool_action, sizeof(u8));
return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
}
@@ -706,6 +695,7 @@ static int match_visorbus_dev_by_id(struct device *dev, void *data)
return 0;
}
+
struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
struct visor_device *from)
{
@@ -788,13 +778,15 @@ chipset_init(struct controlvm_message *inmsg)
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
/* Set features to indicate we support parahotplug (if Command
- * also supports it). */
+ * also supports it).
+ */
features =
inmsg->cmd.init_chipset.
features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
/* Set the "reply" bit so Command knows this is a
- * features-aware driver. */
+ * features-aware driver.
+ */
features |= ULTRA_CHIPSET_FEATURE_REPLY;
cleanup:
@@ -813,7 +805,7 @@ controlvm_init_response(struct controlvm_message *msg,
msg->hdr.payload_max_bytes = 0;
if (response < 0) {
msg->hdr.flags.failed = 1;
- msg->hdr.completion_status = (u32) (-response);
+ msg->hdr.completion_status = (u32)(-response);
}
}
@@ -868,11 +860,64 @@ enum crash_obj_type {
};
static void
+save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
+{
+ u32 local_crash_msg_offset;
+ u16 local_crash_msg_count;
+
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_count),
+ &local_crash_msg_count, sizeof(u16)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
+ POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
+ local_crash_msg_count,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (visorchannel_read(controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ saved_crash_message_offset),
+ &local_crash_msg_offset, sizeof(u32)) < 0) {
+ POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+
+ if (typ == CRASH_BUS) {
+ if (visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message)) < 0) {
+ POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ } else {
+ local_crash_msg_offset += sizeof(struct controlvm_message);
+ if (visorchannel_write(controlvm_channel,
+ local_crash_msg_offset,
+ msg,
+ sizeof(struct controlvm_message)) < 0) {
+ POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
+ POSTCODE_SEVERITY_ERR);
+ return;
+ }
+ }
+}
+
+static void
bus_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
- if (pending_msg_hdr == NULL)
+ if (!pending_msg_hdr)
return; /* no controlvm response needed */
if (pending_msg_hdr->id != (u32)cmd_id)
@@ -890,7 +935,7 @@ device_changestate_responder(enum controlvm_id cmd_id,
u32 bus_no = p->chipset_bus_no;
u32 dev_no = p->chipset_dev_no;
- if (p->pending_msg_hdr == NULL)
+ if (!p->pending_msg_hdr)
return; /* no controlvm response needed */
if (p->pending_msg_hdr->id != cmd_id)
return;
@@ -911,7 +956,7 @@ device_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
- if (pending_msg_hdr == NULL)
+ if (!pending_msg_hdr)
return; /* no controlvm response needed */
if (pending_msg_hdr->id != (u32)cmd_id)
@@ -1127,6 +1172,10 @@ bus_create(struct controlvm_message *inmsg)
goto cleanup;
}
bus_info->visorchannel = visorchannel;
+ if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
+ dump_vhba_bus = bus_no;
+ save_crash_message(inmsg, CRASH_BUS);
+ }
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
@@ -1177,7 +1226,7 @@ bus_configure(struct controlvm_message *inmsg,
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- } else if (bus_info->pending_msg_hdr != NULL) {
+ } else if (bus_info->pending_msg_hdr) {
POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
@@ -1263,6 +1312,10 @@ my_device_create(struct controlvm_message *inmsg)
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
+ if (uuid_le_cmp(cmd->create_device.data_type_uuid,
+ spar_vhba_channel_protocol_uuid) == 0)
+ save_crash_message(inmsg, CRASH_DEV);
+
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
cleanup:
@@ -1913,8 +1966,7 @@ cleanup:
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
}
- queue_delayed_work(periodic_controlvm_workqueue,
- &periodic_controlvm_work, poll_jiffies);
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
}
static void
@@ -2011,8 +2063,7 @@ cleanup:
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
- queue_delayed_work(periodic_controlvm_workqueue,
- &periodic_controlvm_work, poll_jiffies);
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
}
static void
@@ -2197,7 +2248,7 @@ static inline int issue_vmcall_update_physical_time(u64 adjustment)
static long visorchipset_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- s64 adjustment;
+ u64 adjustment;
s64 vrtc_offset;
switch (cmd) {
@@ -2262,7 +2313,6 @@ visorchipset_init(struct acpi_device *acpi_device)
{
int rc = 0;
u64 addr;
- int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
addr = controlvm_get_channel_address();
@@ -2272,8 +2322,10 @@ visorchipset_init(struct acpi_device *acpi_device)
memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
- controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
+ controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
+ if (!controlvm_channel)
+ return -ENODEV;
if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
visorchannel_get_header(controlvm_channel))) {
initialize_controlvm_payload();
@@ -2299,29 +2351,15 @@ visorchipset_init(struct acpi_device *acpi_device)
else
INIT_DELAYED_WORK(&periodic_controlvm_work,
controlvm_periodic_work);
- periodic_controlvm_workqueue =
- create_singlethread_workqueue("visorchipset_controlvm");
- if (!periodic_controlvm_workqueue) {
- POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
- DIAG_SEVERITY_ERR);
- rc = -ENOMEM;
- goto cleanup;
- }
most_recent_message_jiffies = jiffies;
poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
- rc = queue_delayed_work(periodic_controlvm_workqueue,
- &periodic_controlvm_work, poll_jiffies);
- if (rc < 0) {
- POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
- DIAG_SEVERITY_ERR);
- goto cleanup;
- }
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
- rc = -1;
+ rc = -ENODEV;
goto cleanup;
}
POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
@@ -2351,10 +2389,7 @@ visorchipset_exit(struct acpi_device *acpi_device)
visorbus_exit();
- cancel_delayed_work(&periodic_controlvm_work);
- flush_workqueue(periodic_controlvm_workqueue);
- destroy_workqueue(periodic_controlvm_workqueue);
- periodic_controlvm_workqueue = NULL;
+ cancel_delayed_work_sync(&periodic_controlvm_work);
destroy_controlvm_payload_info(&controlvm_payload_info);
memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index d5178b44ba8c..e93bb1dbfd97 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -167,7 +167,7 @@ static int visor_thread_start(struct visor_thread_info *thrinfo,
{
/* used to stop the thread */
init_completion(&thrinfo->has_stopped);
- thrinfo->task = kthread_run(threadfn, thrcontext, name);
+ thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name);
if (IS_ERR(thrinfo->task)) {
thrinfo->id = 0;
return PTR_ERR(thrinfo->task);
@@ -323,9 +323,9 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
goto err_del_scsipending_ent;
if (tasktype == TASK_MGMT_ABORT_TASK)
- scsicmd->result = (DID_ABORT << 16);
+ scsicmd->result = DID_ABORT << 16;
else
- scsicmd->result = (DID_RESET << 16);
+ scsicmd->result = DID_RESET << 16;
scsicmd->scsi_done(scsicmd);
@@ -1062,7 +1062,7 @@ static int visorhba_resume(struct visor_device *dev,
return -EINVAL;
if (devdata->serverdown && !devdata->serverchangingstate)
- devdata->serverchangingstate = 1;
+ devdata->serverchangingstate = true;
visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
devdata, "vhba_incming");
diff --git a/drivers/staging/unisys/visorinput/Kconfig b/drivers/staging/unisys/visorinput/Kconfig
index 3476d419d32c..655cd62433de 100644
--- a/drivers/staging/unisys/visorinput/Kconfig
+++ b/drivers/staging/unisys/visorinput/Kconfig
@@ -4,7 +4,7 @@
config UNISYS_VISORINPUT
tristate "Unisys visorinput driver"
- depends on UNISYSSPAR && UNISYS_VISORBUS && FB
+ depends on UNISYSSPAR && UNISYS_VISORBUS && FB && INPUT
---help---
The Unisys s-Par visorinput driver provides a virtualized system
console (keyboard and mouse) that is accessible through the
diff --git a/drivers/staging/unisys/visorinput/ultrainputreport.h b/drivers/staging/unisys/visorinput/ultrainputreport.h
index 3e6a52f4b6bf..1bc3d2064080 100644
--- a/drivers/staging/unisys/visorinput/ultrainputreport.h
+++ b/drivers/staging/unisys/visorinput/ultrainputreport.h
@@ -29,33 +29,40 @@ enum ultra_inputaction {
inputaction_mouse_button_up = 3, /* arg1: 1=left,2=center,3=right */
inputaction_mouse_button_click = 4, /* arg1: 1=left,2=center,3=right */
inputaction_mouse_button_dclick = 5, /* arg1: 1=left,2=center,
- 3=right */
+ * 3=right
+ */
inputaction_wheel_rotate_away = 6, /* arg1: wheel rotation away from
- user */
+ * user
+ */
inputaction_wheel_rotate_toward = 7, /* arg1: wheel rotation toward
- user */
+ * user
+ */
inputaction_set_max_xy = 8, /* set screen maxXY; arg1=x, arg2=y */
inputaction_key_down = 64, /* arg1: scancode, as follows:
- If arg1 <= 0xff, it's a 1-byte
- scancode and arg1 is that scancode.
- If arg1 > 0xff, it's a 2-byte
- scanecode, with the 1st byte in the
- low 8 bits, and the 2nd byte in the
- high 8 bits. E.g., the right ALT key
- would appear as x'38e0'. */
+ * If arg1 <= 0xff, it's a 1-byte
+ * scancode and arg1 is that scancode.
+ * If arg1 > 0xff, it's a 2-byte
+ * scanecode, with the 1st byte in the
+ * low 8 bits, and the 2nd byte in the
+ * high 8 bits. E.g., the right ALT key
+ * would appear as x'38e0'.
+ */
inputaction_key_up = 65, /* arg1: scancode (in same format as
- inputaction_keyDown) */
+ * inputaction_keyDown)
+ */
inputaction_set_locking_key_state = 66,
/* arg1: scancode (in same format
- as inputaction_keyDown);
- MUST refer to one of the
- locking keys, like capslock,
- numlock, or scrolllock
- arg2: 1 iff locking key should be
- in the LOCKED position
- (e.g., light is ON) */
+ * as inputaction_keyDown);
+ * MUST refer to one of the
+ * locking keys, like capslock,
+ * numlock, or scrolllock
+ * arg2: 1 iff locking key should be
+ * in the LOCKED position
+ * (e.g., light is ON)
+ */
inputaction_key_down_up = 67, /* arg1: scancode (in same format
- as inputaction_keyDown) */
+ * as inputaction_keyDown)
+ */
inputaction_last
};
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 38d4d5b884df..13c0316112ac 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -222,8 +222,9 @@ static int visorinput_open(struct input_dev *visorinput_dev)
struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
if (!devdata) {
- pr_err("%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
+ dev_err(&visorinput_dev->dev,
+ "%s input_get_drvdata(%p) returned NULL\n",
+ __func__, visorinput_dev);
return -EINVAL;
}
dev_dbg(&visorinput_dev->dev, "%s opened\n", __func__);
@@ -236,8 +237,9 @@ static void visorinput_close(struct input_dev *visorinput_dev)
struct visorinput_devdata *devdata = input_get_drvdata(visorinput_dev);
if (!devdata) {
- pr_err("%s input_get_drvdata(%p) returned NULL\n",
- __func__, visorinput_dev);
+ dev_err(&visorinput_dev->dev,
+ "%s input_get_drvdata(%p) returned NULL\n",
+ __func__, visorinput_dev);
return;
}
dev_dbg(&visorinput_dev->dev, "%s closed\n", __func__);
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 05194707278a..be0d057346c3 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -36,6 +36,7 @@
* = 163840 bytes
*/
#define MAX_BUF 163840
+#define NAPI_WEIGHT 64
static int visornic_probe(struct visor_device *dev);
static void visornic_remove(struct visor_device *dev);
@@ -58,8 +59,6 @@ static const struct file_operations debugfs_enable_ints_fops = {
.write = enable_ints_write,
};
-static struct workqueue_struct *visornic_timeout_reset_workqueue;
-
/* GUIDS for director channel type supported by this driver. */
static struct visor_channeltype_descriptor visornic_channel_types[] = {
/* Note that the only channel type we expect to be reported by the
@@ -376,8 +375,8 @@ visornic_serverdown(struct visornic_devdata *devdata,
__func__);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
return -EINVAL;
- } else
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ }
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
return 0;
}
@@ -761,9 +760,8 @@ static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
return devdata->chstat.sent_xmit -
devdata->chstat.got_xmit_done;
- else
- return (ULONG_MAX - devdata->chstat.got_xmit_done
- + devdata->chstat.sent_xmit + 1);
+ return (ULONG_MAX - devdata->chstat.got_xmit_done
+ + devdata->chstat.sent_xmit + 1);
}
/**
@@ -1028,7 +1026,7 @@ visornic_set_multi(struct net_device *netdev)
cmdrsp->net.type = NET_RCV_PROMISC;
cmdrsp->net.enbdis.context = netdev;
cmdrsp->net.enbdis.enable =
- (netdev->flags & IFF_PROMISC);
+ netdev->flags & IFF_PROMISC;
visorchannel_signalinsert(devdata->dev->visorchannel,
IOCHAN_TO_IOPART,
cmdrsp);
@@ -1069,7 +1067,7 @@ visornic_xmit_timeout(struct net_device *netdev)
spin_unlock_irqrestore(&devdata->priv_lock, flags);
return;
}
- queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
+ schedule_work(&devdata->timeout_reset);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
}
@@ -1218,8 +1216,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
/* length rcvd is greater than firstfrag in this skb rcv buf */
skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
- will be in
- frag_list */
+ * will be in
+ * frag_list
+ */
} else {
/* data fits in this skb - no chaining - do
* PRECAUTIONARY check
@@ -1315,12 +1314,14 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
}
if (found_mc)
break; /* accept packet, dest
- matches a multicast
- address */
+ * matches a multicast
+ * address
+ */
}
} else if (skb->pkt_type == PACKET_HOST) {
break; /* accept packet, h_dest must match vnic
- mac address */
+ * mac address
+ */
} else if (skb->pkt_type == PACKET_OTHERHOST) {
/* something is not right */
dev_err(&devdata->netdev->dev,
@@ -1363,7 +1364,6 @@ devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
{
if (!devdata)
return NULL;
- memset(devdata, '\0', sizeof(struct visornic_devdata));
devdata->dev = dev;
devdata->incarnation_id = get_jiffies_64();
return devdata;
@@ -1613,14 +1613,15 @@ drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
*/
static void
service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
- int *rx_work_done)
+ int *rx_work_done, int budget)
{
unsigned long flags;
struct net_device *netdev;
+ while (*rx_work_done < budget) {
/* TODO: CLIENT ACQUIRE -- Don't really need this at the
- * moment */
- for (;;) {
+ * moment
+ */
if (!visorchannel_signalremove(devdata->dev->visorchannel,
IOCHAN_FROM_IOPART,
cmdrsp))
@@ -1709,7 +1710,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
int rx_count = 0;
send_rcv_posts_if_needed(devdata);
- service_resp_queue(devdata->cmdrsp, devdata, &rx_count);
+ service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
/*
* If there aren't any more packets to receive
@@ -1768,7 +1769,7 @@ static int visornic_probe(struct visor_device *dev)
}
netdev->netdev_ops = &visornic_dev_ops;
- netdev->watchdog_timeo = (5 * HZ);
+ netdev->watchdog_timeo = 5 * HZ;
SET_NETDEV_DEV(netdev, &dev->device);
/* Get MAC adddress from channel and read it into the device. */
@@ -1893,6 +1894,16 @@ static int visornic_probe(struct visor_device *dev)
goto cleanup_napi_add;
}
+ /* Let's start our threads to get responses */
+ netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
+
+ /*
+ * Note: Interupts have to be enable before the while
+ * loop below because the napi routine is responsible for
+ * setting enab_dis_acked
+ */
+ visorbus_enable_channel_interrupts(dev);
+
err = register_netdev(netdev);
if (err) {
dev_err(&dev->device,
@@ -1984,7 +1995,7 @@ static void visornic_remove(struct visor_device *dev)
}
/* going_away prevents new items being added to the workqueues */
- flush_workqueue(visornic_timeout_reset_workqueue);
+ cancel_work_sync(&devdata->timeout_reset);
debugfs_remove_recursive(devdata->eth_debugfs_dir);
@@ -2103,21 +2114,10 @@ static int visornic_init(void)
if (!ret)
goto cleanup_debugfs;
- /* create workqueue for tx timeout reset */
- visornic_timeout_reset_workqueue =
- create_singlethread_workqueue("visornic_timeout_reset");
- if (!visornic_timeout_reset_workqueue)
- goto cleanup_workqueue;
-
err = visorbus_register_visor_driver(&visornic_driver);
if (!err)
return 0;
-cleanup_workqueue:
- if (visornic_timeout_reset_workqueue) {
- flush_workqueue(visornic_timeout_reset_workqueue);
- destroy_workqueue(visornic_timeout_reset_workqueue);
- }
cleanup_debugfs:
debugfs_remove_recursive(visornic_debugfs_dir);
@@ -2133,10 +2133,6 @@ static void visornic_cleanup(void)
{
visorbus_unregister_visor_driver(&visornic_driver);
- if (visornic_timeout_reset_workqueue) {
- flush_workqueue(visornic_timeout_reset_workqueue);
- destroy_workqueue(visornic_timeout_reset_workqueue);
- }
debugfs_remove_recursive(visornic_debugfs_dir);
}
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 4f3cdbcedb3e..28a45689e2f4 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -215,11 +215,9 @@ static int pio2_probe(struct vme_dev *vdev)
u8 reg;
int vec;
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card) {
- retval = -ENOMEM;
- goto err_struct;
- }
+ card = devm_kzalloc(&vdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
card->id = vdev->num;
card->bus = bus[card->id];
@@ -232,8 +230,7 @@ static int pio2_probe(struct vme_dev *vdev)
for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
if (!isdigit(card->variant[i])) {
dev_err(&card->vdev->dev, "Variant invalid\n");
- retval = -EINVAL;
- goto err_variant;
+ return -EINVAL;
}
}
@@ -244,8 +241,7 @@ static int pio2_probe(struct vme_dev *vdev)
if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
dev_err(&card->vdev->dev,
"Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
- retval = -EINVAL;
- goto err_vector;
+ return -EINVAL;
}
/*
@@ -284,8 +280,7 @@ static int pio2_probe(struct vme_dev *vdev)
if (!card->window) {
dev_err(&card->vdev->dev,
"Unable to assign VME master resource\n");
- retval = -EIO;
- goto err_window;
+ return -EIO;
}
retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
@@ -430,11 +425,6 @@ err_read:
vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
err_set:
vme_master_free(card->window);
-err_window:
-err_vector:
-err_variant:
- kfree(card);
-err_struct:
return retval;
}
@@ -466,8 +456,6 @@ static int pio2_remove(struct vme_dev *vdev)
vme_master_free(card->window);
- kfree(card);
-
return 0;
}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index b6730a8068fd..3d338122b590 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -443,7 +443,6 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
break;
-
}
MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
@@ -499,7 +498,6 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
SOFTPWRCTL_SWPE3));
break;
-
}
priv->bRadioOff = false;
@@ -535,11 +533,9 @@ CARDvSafeResetTx(
}
/* set MAC TD pointer */
- MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv->PortOffset,
- (priv->td0_pool_dma));
+ MACvSetCurrTXDescAddr(TYPE_TXDMA0, priv, priv->td0_pool_dma);
- MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv->PortOffset,
- (priv->td1_pool_dma));
+ MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
MACvSetCurrBCNTxDescAddr(priv->PortOffset,
@@ -590,11 +586,9 @@ CARDvSafeResetRx(
MACvRx0PerPktMode(priv->PortOffset);
MACvRx1PerPktMode(priv->PortOffset);
/* set MAC RD pointer */
- MACvSetCurrRx0DescAddr(priv->PortOffset,
- priv->rd0_pool_dma);
+ MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
- MACvSetCurrRx1DescAddr(priv->PortOffset,
- priv->rd1_pool_dma);
+ MACvSetCurrRx1DescAddr(priv, priv->rd1_pool_dma);
}
/*
@@ -816,7 +810,6 @@ bool CARDbIsOFDMinBasicRate(struct vnt_private *priv)
unsigned char CARDbyGetPktType(struct vnt_private *priv)
{
-
if (priv->byBBType == BB_TYPE_11A || priv->byBBType == BB_TYPE_11B)
return (unsigned char)priv->byBBType;
else if (CARDbIsOFDMinBasicRate((void *)priv))
@@ -839,8 +832,6 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
*/
void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
{
- void __iomem *dwIoBase = priv->PortOffset;
-
switch (wLoopbackMode) {
case CARD_LB_NONE:
case CARD_LB_MAC:
@@ -850,7 +841,7 @@ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode
break;
}
/* set MAC loopback */
- MACvSetLoopbackMode(dwIoBase, LOBYTE(wLoopbackMode));
+ MACvSetLoopbackMode(priv, LOBYTE(wLoopbackMode));
/* set Baseband loopback */
}
@@ -867,9 +858,8 @@ void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode
*/
bool CARDbSoftwareReset(struct vnt_private *priv)
{
-
/* reset MAC */
- if (!MACbSafeSoftwareReset(priv->PortOffset))
+ if (!MACbSafeSoftwareReset(priv))
return false;
return true;
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 7a717828fa09..9ac1ef9d0d51 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -174,64 +174,63 @@ void vnt_init_bands(struct vnt_private *priv)
* Return Value: true if succeeded; false if failed.
*
*/
-bool set_channel(void *pDeviceHandler, struct ieee80211_channel *ch)
+bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
{
- struct vnt_private *pDevice = pDeviceHandler;
- bool bResult = true;
+ bool ret = true;
- if (pDevice->byCurrentCh == ch->hw_value)
- return bResult;
+ if (priv->byCurrentCh == ch->hw_value)
+ return ret;
/* Set VGA to max sensitivity */
- if (pDevice->bUpdateBBVGA &&
- pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
- pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+ if (priv->bUpdateBBVGA &&
+ priv->byBBVGACurrent != priv->abyBBVGA[0]) {
+ priv->byBBVGACurrent = priv->abyBBVGA[0];
- BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ BBvSetVGAGainOffset(priv, priv->byBBVGACurrent);
}
/* clear NAV */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
/* TX_PE will reserve 3 us for MAX2829 A mode only,
it is for better TX throughput */
- if (pDevice->byRFType == RF_AIROHA7230)
- RFbAL7230SelectChannelPostProcess(pDevice, pDevice->byCurrentCh,
+ if (priv->byRFType == RF_AIROHA7230)
+ RFbAL7230SelectChannelPostProcess(priv, priv->byCurrentCh,
ch->hw_value);
- pDevice->byCurrentCh = ch->hw_value;
- bResult &= RFbSelectChannel(pDevice, pDevice->byRFType,
- ch->hw_value);
+ priv->byCurrentCh = ch->hw_value;
+ ret &= RFbSelectChannel(priv, priv->byRFType,
+ ch->hw_value);
/* Init Synthesizer Table */
- if (pDevice->bEnablePSMode)
- RFvWriteWakeProgSyn(pDevice, pDevice->byRFType, ch->hw_value);
+ if (priv->bEnablePSMode)
+ RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
- BBvSoftwareReset(pDevice);
+ BBvSoftwareReset(priv);
- if (pDevice->byLocalID > REV_ID_VT3253_B1) {
+ if (priv->byLocalID > REV_ID_VT3253_B1) {
unsigned long flags;
- spin_lock_irqsave(&pDevice->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
/* set HW default power register */
- MACvSelectPage1(pDevice->PortOffset);
- RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWRCCK,
- pDevice->byCurPwr);
- RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
- VNSvOutPortB(pDevice->PortOffset + MAC_REG_PWROFDM,
- pDevice->byCurPwr);
- MACvSelectPage0(pDevice->PortOffset);
-
- spin_unlock_irqrestore(&pDevice->lock, flags);
+ MACvSelectPage1(priv->PortOffset);
+ RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_PWRCCK,
+ priv->byCurPwr);
+ RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_PWROFDM,
+ priv->byCurPwr);
+ MACvSelectPage0(priv->PortOffset);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- if (pDevice->byBBType == BB_TYPE_11B)
- RFbSetPower(pDevice, RATE_1M, pDevice->byCurrentCh);
+ if (priv->byBBType == BB_TYPE_11B)
+ RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
else
- RFbSetPower(pDevice, RATE_6M, pDevice->byCurrentCh);
+ RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
- return bResult;
+ return ret;
}
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index e2be6fca5f26..2d613e7f169c 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -27,6 +27,6 @@
void vnt_init_bands(struct vnt_private *);
-bool set_channel(void *pDeviceHandler, struct ieee80211_channel *);
+bool set_channel(struct vnt_private *, struct ieee80211_channel *);
#endif /* _CHANNEL_H_ */
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index fefbf826c622..c3eea07ca97e 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -211,11 +211,11 @@ static void device_init_registers(struct vnt_private *priv)
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
- MACbShutdown(priv->PortOffset);
+ MACbShutdown(priv);
BBvSoftwareReset(priv);
/* Do MACbSoftwareReset in MACvInitialize */
- MACbSoftwareReset(priv->PortOffset);
+ MACbSoftwareReset(priv);
priv->bAES = false;
@@ -229,7 +229,7 @@ static void device_init_registers(struct vnt_private *priv)
priv->byTopCCKBasicRate = RATE_1M;
/* init MAC */
- MACvInitialize(priv->PortOffset);
+ MACvInitialize(priv);
/* Get Local ID */
VNSvInPortB(priv->PortOffset + MAC_REG_LOCALID, &priv->byLocalID);
@@ -357,8 +357,8 @@ static void device_init_registers(struct vnt_private *priv)
MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
- MACvSetShortRetryLimit(priv->PortOffset, priv->byShortRetryLimit);
- MACvSetLongRetryLimit(priv->PortOffset, priv->byLongRetryLimit);
+ MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
+ MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
@@ -742,6 +742,11 @@ static bool device_alloc_rx_buf(struct vnt_private *priv,
dma_map_single(&priv->pcid->dev,
skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
priv->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
+ dev_kfree_skb(rd_info->skb);
+ rd_info->skb = NULL;
+ return false;
+ }
*((unsigned int *)&rd->rd0) = 0; /* FIX cast */
@@ -884,7 +889,7 @@ static void device_error(struct vnt_private *priv, unsigned short status)
if (status & ISR_FETALERR) {
dev_err(&priv->pcid->dev, "Hardware fatal error\n");
- MACbShutdown(priv->PortOffset);
+ MACbShutdown(priv);
return;
}
}
@@ -1012,7 +1017,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
if ((priv->op_mode == NL80211_IFTYPE_AP ||
priv->op_mode == NL80211_IFTYPE_ADHOC) &&
priv->vif->bss_conf.enable_beacon) {
- MACvOneShotTimer1MicroSec(priv->PortOffset,
+ MACvOneShotTimer1MicroSec(priv,
(priv->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10);
}
@@ -1166,7 +1171,7 @@ static int vnt_start(struct ieee80211_hw *hw)
if (!device_init_rings(priv))
return -ENOMEM;
- ret = request_irq(priv->pcid->irq, &vnt_interrupt,
+ ret = request_irq(priv->pcid->irq, vnt_interrupt,
IRQF_SHARED, "vt6655", priv);
if (ret) {
dev_dbg(&priv->pcid->dev, "failed to start irq\n");
@@ -1197,8 +1202,8 @@ static void vnt_stop(struct ieee80211_hw *hw)
cancel_work_sync(&priv->interrupt_work);
- MACbShutdown(priv->PortOffset);
- MACbSoftwareReset(priv->PortOffset);
+ MACbShutdown(priv);
+ MACbSoftwareReset(priv);
CARDbRadioPowerOff(priv);
device_free_td0_ring(priv);
@@ -1636,13 +1641,13 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
/* do reset */
- if (!MACbSoftwareReset(priv->PortOffset)) {
+ if (!MACbSoftwareReset(priv)) {
dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
device_free_info(priv);
return -ENODEV;
}
/* initial to reload eeprom */
- MACvInitialize(priv->PortOffset);
+ MACvInitialize(priv);
MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
/* Get RFType */
@@ -1690,7 +1695,7 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
pci_save_state(pcid);
- MACbShutdown(priv->PortOffset);
+ MACbShutdown(priv);
pci_disable_device(pcid);
pci_set_power_state(pcid, pci_choose_state(pcid, state));
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index f2b3fea90533..ffcaf25fdd8b 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -36,7 +36,7 @@ int vnt_key_init_table(struct vnt_private *priv)
u32 i;
for (i = 0; i < MAX_KEY_TABLE; i++)
- MACvDisableKeyEntry(priv->PortOffset, i);
+ MACvDisableKeyEntry(priv, i);
return 0;
}
@@ -104,7 +104,7 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
key->key[15] |= 0x80;
}
- MACvSetKeyEntry(priv->PortOffset, key_mode, entry, key_inx,
+ MACvSetKeyEntry(priv, key_mode, entry, key_inx,
bssid, (u32 *)key->key, priv->byLocalID);
return 0;
@@ -126,13 +126,13 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
switch (key->cipher) {
case 0:
for (u = 0 ; u < MAX_KEY_TABLE; u++)
- MACvDisableKeyEntry(priv->PortOffset, u);
+ MACvDisableKeyEntry(priv, u);
return ret;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
for (u = 0; u < MAX_KEY_TABLE; u++)
- MACvDisableKeyEntry(priv->PortOffset, u);
+ MACvDisableKeyEntry(priv, u);
vnt_set_keymode(hw, mac_addr,
key, VNT_KEY_DEFAULTKEY, KEY_CTL_WEP, true);
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 688c3be168d1..45196c6e9e12 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -61,7 +61,7 @@
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* byRegOfs - Offset of MAC Register
* byTestBits - Test bits
* Out:
@@ -70,13 +70,12 @@
* Return Value: true if all test bits On; otherwise false
*
*/
-bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOn(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits)
{
- unsigned char byData;
+ void __iomem *io_base = priv->PortOffset;
- VNSvInPortB(dwIoBase + byRegOfs, &byData);
- return (byData & byTestBits) == byTestBits;
+ return (ioread8(io_base + byRegOfs) & byTestBits) == byTestBits;
}
/*
@@ -85,7 +84,7 @@ bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* byRegOfs - Offset of MAC Register
* byTestBits - Test bits
* Out:
@@ -94,13 +93,12 @@ bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
* Return Value: true if all test bits Off; otherwise false
*
*/
-bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits)
{
- unsigned char byData;
+ void __iomem *io_base = priv->PortOffset;
- VNSvInPortB(dwIoBase + byRegOfs, &byData);
- return !(byData & byTestBits);
+ return !(ioread8(io_base + byRegOfs) & byTestBits);
}
/*
@@ -109,19 +107,18 @@ bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if interrupt is disable; otherwise false
*
*/
-bool MACbIsIntDisable(void __iomem *dwIoBase)
+bool MACbIsIntDisable(struct vnt_private *priv)
{
- unsigned long dwData;
+ void __iomem *io_base = priv->PortOffset;
- VNSvInPortD(dwIoBase + MAC_REG_IMR, &dwData);
- if (dwData != 0)
+ if (ioread32(io_base + MAC_REG_IMR))
return false;
return true;
@@ -133,7 +130,7 @@ bool MACbIsIntDisable(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* byRetryLimit- Retry Limit
* Out:
* none
@@ -141,10 +138,11 @@ bool MACbIsIntDisable(void __iomem *dwIoBase)
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
+void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
{
+ void __iomem *io_base = priv->PortOffset;
/* set SRT */
- VNSvOutPortB(dwIoBase + MAC_REG_SRT, byRetryLimit);
+ iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
}
@@ -154,7 +152,7 @@ void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* byRetryLimit- Retry Limit
* Out:
* none
@@ -162,10 +160,11 @@ void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
* Return Value: none
*
*/
-void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
+void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
{
+ void __iomem *io_base = priv->PortOffset;
/* set LRT */
- VNSvOutPortB(dwIoBase + MAC_REG_LRT, byRetryLimit);
+ iowrite8(byRetryLimit, io_base + MAC_REG_LRT);
}
/*
@@ -174,7 +173,7 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* byLoopbackMode - Loopback Mode
* Out:
* none
@@ -182,16 +181,14 @@ void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit)
* Return Value: none
*
*/
-void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
+void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
{
- unsigned char byOrgValue;
+ void __iomem *io_base = priv->PortOffset;
byLoopbackMode <<= 6;
/* set TCR */
- VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
- byOrgValue = byOrgValue & 0x3F;
- byOrgValue = byOrgValue | byLoopbackMode;
- VNSvOutPortB(dwIoBase + MAC_REG_TEST, byOrgValue);
+ iowrite8((ioread8(io_base + MAC_REG_TEST) & 0x3f) | byLoopbackMode,
+ io_base + MAC_REG_TEST);
}
/*
@@ -200,29 +197,27 @@ void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
- * pbyCxtBuf - Context buffer
+ * cxt_buf - Context buffer
*
* Return Value: none
*
*/
-void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
+void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
{
- int ii;
+ void __iomem *io_base = priv->PortOffset;
/* read page0 register */
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE0; ii++)
- VNSvInPortB((dwIoBase + ii), (pbyCxtBuf + ii));
+ memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0);
- MACvSelectPage1(dwIoBase);
+ MACvSelectPage1(io_base);
/* read page1 register */
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++)
- VNSvInPortB((dwIoBase + ii),
- (pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii));
+ memcpy_fromio(cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0, io_base,
+ MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(dwIoBase);
+ MACvSelectPage0(io_base);
}
/*
@@ -231,53 +226,50 @@ void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
- * pbyCxtBuf - Context buffer
+ * io_base - Base Address for MAC
+ * cxt_buf - Context buffer
* Out:
* none
*
* Return Value: none
*
*/
-void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
+void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf)
{
- int ii;
+ void __iomem *io_base = priv->PortOffset;
- MACvSelectPage1(dwIoBase);
+ MACvSelectPage1(io_base);
/* restore page1 */
- for (ii = 0; ii < MAC_MAX_CONTEXT_SIZE_PAGE1; ii++)
- VNSvOutPortB((dwIoBase + ii),
- *(pbyCxtBuf + MAC_MAX_CONTEXT_SIZE_PAGE0 + ii));
+ memcpy_toio(io_base, cxt_buf + MAC_MAX_CONTEXT_SIZE_PAGE0,
+ MAC_MAX_CONTEXT_SIZE_PAGE1);
- MACvSelectPage0(dwIoBase);
+ MACvSelectPage0(io_base);
/* restore RCR,TCR,IMR... */
- for (ii = MAC_REG_RCR; ii < MAC_REG_ISR; ii++)
- VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
+ memcpy_toio(io_base + MAC_REG_RCR, cxt_buf + MAC_REG_RCR,
+ MAC_REG_ISR - MAC_REG_RCR);
/* restore MAC Config. */
- for (ii = MAC_REG_LRT; ii < MAC_REG_PAGE1SEL; ii++)
- VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
+ memcpy_toio(io_base + MAC_REG_LRT, cxt_buf + MAC_REG_LRT,
+ MAC_REG_PAGE1SEL - MAC_REG_LRT);
- VNSvOutPortB(dwIoBase + MAC_REG_CFG, *(pbyCxtBuf + MAC_REG_CFG));
+ iowrite8(*(cxt_buf + MAC_REG_CFG), io_base + MAC_REG_CFG);
/* restore PS Config. */
- for (ii = MAC_REG_PSCFG; ii < MAC_REG_BBREGCTL; ii++)
- VNSvOutPortB(dwIoBase + ii, *(pbyCxtBuf + ii));
+ memcpy_toio(io_base + MAC_REG_PSCFG, cxt_buf + MAC_REG_PSCFG,
+ MAC_REG_BBREGCTL - MAC_REG_PSCFG);
/* restore CURR_RX_DESC_ADDR, CURR_TX_DESC_ADDR */
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0,
- *(unsigned long *)(pbyCxtBuf + MAC_REG_TXDMAPTR0));
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR,
- *(unsigned long *)(pbyCxtBuf + MAC_REG_AC0DMAPTR));
- VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR,
- *(unsigned long *)(pbyCxtBuf + MAC_REG_BCNDMAPTR));
-
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0,
- *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR0));
-
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1,
- *(unsigned long *)(pbyCxtBuf + MAC_REG_RXDMAPTR1));
+ iowrite32(*(u32 *)(cxt_buf + MAC_REG_TXDMAPTR0),
+ io_base + MAC_REG_TXDMAPTR0);
+ iowrite32(*(u32 *)(cxt_buf + MAC_REG_AC0DMAPTR),
+ io_base + MAC_REG_AC0DMAPTR);
+ iowrite32(*(u32 *)(cxt_buf + MAC_REG_BCNDMAPTR),
+ io_base + MAC_REG_BCNDMAPTR);
+ iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR0),
+ io_base + MAC_REG_RXDMAPTR0);
+ iowrite32(*(u32 *)(cxt_buf + MAC_REG_RXDMAPTR1),
+ io_base + MAC_REG_RXDMAPTR1);
}
/*
@@ -286,24 +278,23 @@ void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if Reset Success; otherwise false
*
*/
-bool MACbSoftwareReset(void __iomem *dwIoBase)
+bool MACbSoftwareReset(struct vnt_private *priv)
{
- unsigned char byData;
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
/* turn on HOSTCR_SOFTRST, just write 0x01 to reset */
- VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR, 0x01);
+ iowrite8(0x01, io_base + MAC_REG_HOSTCR);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
- if (!(byData & HOSTCR_SOFTRST))
+ if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_SOFTRST))
break;
}
if (ww == W_MAX_TIMEOUT)
@@ -317,14 +308,14 @@ bool MACbSoftwareReset(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeSoftwareReset(void __iomem *dwIoBase)
+bool MACbSafeSoftwareReset(struct vnt_private *priv)
{
unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
bool bRetVal;
@@ -334,11 +325,11 @@ bool MACbSafeSoftwareReset(void __iomem *dwIoBase)
* reset, then restore register's value
*/
/* save MAC context */
- MACvSaveContext(dwIoBase, abyTmpRegData);
+ MACvSaveContext(priv, abyTmpRegData);
/* do reset */
- bRetVal = MACbSoftwareReset(dwIoBase);
+ bRetVal = MACbSoftwareReset(priv);
/* restore MAC context, except CR0 */
- MACvRestoreContext(dwIoBase, abyTmpRegData);
+ MACvRestoreContext(priv, abyTmpRegData);
return bRetVal;
}
@@ -349,27 +340,25 @@ bool MACbSafeSoftwareReset(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeRxOff(void __iomem *dwIoBase)
+bool MACbSafeRxOff(struct vnt_private *priv)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned long dwData;
- unsigned char byData;
/* turn off wow temp for turn off Rx safely */
/* Clear RX DMA0,1 */
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_CLRRUN);
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_CLRRUN);
+ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL0);
+ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL1);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData);
- if (!(dwData & DMACTL_RUN))
+ if (!(ioread32(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -377,8 +366,7 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
return false;
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData);
- if (!(dwData & DMACTL_RUN))
+ if (!(ioread32(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -387,11 +375,10 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
}
/* try to safe shutdown RX */
- MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON);
+ MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_RXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
- if (!(byData & HOSTCR_RXONST))
+ if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -407,28 +394,26 @@ bool MACbSafeRxOff(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeTxOff(void __iomem *dwIoBase)
+bool MACbSafeTxOff(struct vnt_private *priv)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned long dwData;
- unsigned char byData;
/* Clear TX DMA */
/* Tx0 */
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_CLRRUN);
+ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_TXDMACTL0);
/* AC0 */
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_CLRRUN);
+ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_AC0DMACTL);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData);
- if (!(dwData & DMACTL_RUN))
+ if (!(ioread32(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -436,8 +421,7 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
return false;
}
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData);
- if (!(dwData & DMACTL_RUN))
+ if (!(ioread32(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -446,12 +430,11 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
}
/* try to safe shutdown TX */
- MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON);
+ MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_TXON);
/* W_MAX_TIMEOUT is the timeout period */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_HOSTCR, &byData);
- if (!(byData & HOSTCR_TXONST))
+ if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_TXONST))
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -467,29 +450,31 @@ bool MACbSafeTxOff(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
-bool MACbSafeStop(void __iomem *dwIoBase)
+bool MACbSafeStop(struct vnt_private *priv)
{
- MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
+ void __iomem *io_base = priv->PortOffset;
- if (!MACbSafeRxOff(dwIoBase)) {
+ MACvRegBitsOff(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
+
+ if (!MACbSafeRxOff(priv)) {
pr_debug(" MACbSafeRxOff == false)\n");
- MACbSafeSoftwareReset(dwIoBase);
+ MACbSafeSoftwareReset(priv);
return false;
}
- if (!MACbSafeTxOff(dwIoBase)) {
+ if (!MACbSafeTxOff(priv)) {
pr_debug(" MACbSafeTxOff == false)\n");
- MACbSafeSoftwareReset(dwIoBase);
+ MACbSafeSoftwareReset(priv);
return false;
}
- MACvRegBitsOff(dwIoBase, MAC_REG_HOSTCR, HOSTCR_MACEN);
+ MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_MACEN);
return true;
}
@@ -500,24 +485,25 @@ bool MACbSafeStop(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: true if success; otherwise false
*
*/
-bool MACbShutdown(void __iomem *dwIoBase)
+bool MACbShutdown(struct vnt_private *priv)
{
+ void __iomem *io_base = priv->PortOffset;
/* disable MAC IMR */
- MACvIntDisable(dwIoBase);
- MACvSetLoopbackMode(dwIoBase, MAC_LB_INTERNAL);
+ MACvIntDisable(io_base);
+ MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
/* stop the adapter */
- if (!MACbSafeStop(dwIoBase)) {
- MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
+ if (!MACbSafeStop(priv)) {
+ MACvSetLoopbackMode(priv, MAC_LB_NONE);
return false;
}
- MACvSetLoopbackMode(dwIoBase, MAC_LB_NONE);
+ MACvSetLoopbackMode(priv, MAC_LB_NONE);
return true;
}
@@ -527,28 +513,29 @@ bool MACbShutdown(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* Out:
* none
*
* Return Value: none
*
*/
-void MACvInitialize(void __iomem *dwIoBase)
+void MACvInitialize(struct vnt_private *priv)
{
+ void __iomem *io_base = priv->PortOffset;
/* clear sticky bits */
- MACvClearStckDS(dwIoBase);
+ MACvClearStckDS(io_base);
/* disable force PME-enable */
- VNSvOutPortB(dwIoBase + MAC_REG_PMC1, PME_OVR);
+ iowrite8(PME_OVR, io_base + MAC_REG_PMC1);
/* only 3253 A */
/* do reset */
- MACbSoftwareReset(dwIoBase);
+ MACbSoftwareReset(priv);
/* reset TSF counter */
- VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ iowrite8(TFTCTL_TSFCNTRST, io_base + MAC_REG_TFTCTL);
/* enable TSF counter */
- VNSvOutPortB(dwIoBase + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ iowrite8(TFTCTL_TSFCNTREN, io_base + MAC_REG_TFTCTL);
}
/*
@@ -557,33 +544,32 @@ void MACvInitialize(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
- * dwCurrDescAddr - Descriptor Address
+ * io_base - Base Address for MAC
+ * curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr)
+void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned char byData;
- unsigned char byOrgDMACtl;
+ unsigned char org_dma_ctl;
- VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byOrgDMACtl);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0+2, DMACTL_RUN);
+ org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL0);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL0, &byData);
- if (!(byData & DMACTL_RUN))
+ if (!(ioread8(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN))
break;
}
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, dwCurrDescAddr);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN);
+ iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR0);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL0);
}
/*
@@ -592,33 +578,32 @@ void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
- * dwCurrDescAddr - Descriptor Address
+ * io_base - Base Address for MAC
+ * curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr)
+void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned char byData;
- unsigned char byOrgDMACtl;
+ unsigned char org_dma_ctl;
- VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byOrgDMACtl);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1+2, DMACTL_RUN);
+ org_dma_ctl = ioread8(io_base + MAC_REG_RXDMACTL1);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_RXDMACTL1, &byData);
- if (!(byData & DMACTL_RUN))
+ if (!(ioread8(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN))
break;
}
- VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, dwCurrDescAddr);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN);
+ iowrite32(curr_desc_addr, io_base + MAC_REG_RXDMAPTR1);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_RXDMACTL1);
}
@@ -628,34 +613,33 @@ void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase, unsigned long dwCurrDescAddr
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
- * dwCurrDescAddr - Descriptor Address
+ * io_base - Base Address for MAC
+ * curr_desc_addr - Descriptor Address
* Out:
* none
*
* Return Value: none
*
*/
-void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr)
+void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
+ u32 curr_desc_addr)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned char byData;
- unsigned char byOrgDMACtl;
+ unsigned char org_dma_ctl;
- VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byOrgDMACtl);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0+2, DMACTL_RUN);
+ org_dma_ctl = ioread8(io_base + MAC_REG_TXDMACTL0);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0 + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_TXDMACTL0, &byData);
- if (!(byData & DMACTL_RUN))
+ if (!(ioread8(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN))
break;
}
- VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, dwCurrDescAddr);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN);
+ iowrite32(curr_desc_addr, io_base + MAC_REG_TXDMAPTR0);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_TXDMACTL0);
}
/*
@@ -664,8 +648,8 @@ void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
- * dwCurrDescAddr - Descriptor Address
+ * io_base - Base Address for MAC
+ * curr_desc_addr - Descriptor Address
* Out:
* none
*
@@ -673,36 +657,35 @@ void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
*
*/
/* TxDMA1 = AC0DMA */
-void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr)
+void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
+ u32 curr_desc_addr)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned short ww;
- unsigned char byData;
- unsigned char byOrgDMACtl;
+ unsigned char org_dma_ctl;
- VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byOrgDMACtl);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL+2, DMACTL_RUN);
+ org_dma_ctl = ioread8(io_base + MAC_REG_AC0DMACTL);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL + 2);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_AC0DMACTL, &byData);
- if (!(byData & DMACTL_RUN))
+ if (!(ioread8(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN))
break;
}
if (ww == W_MAX_TIMEOUT)
pr_debug(" DBG_PORT80(0x26)\n");
- VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, dwCurrDescAddr);
- if (byOrgDMACtl & DMACTL_RUN)
- VNSvOutPortB(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN);
+ iowrite32(curr_desc_addr, io_base + MAC_REG_AC0DMAPTR);
+ if (org_dma_ctl & DMACTL_RUN)
+ iowrite8(DMACTL_RUN, io_base + MAC_REG_AC0DMACTL);
}
-void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr)
+void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
+ u32 curr_desc_addr)
{
if (iTxType == TYPE_AC0DMA)
- MACvSetCurrAC0DescAddrEx(dwIoBase, dwCurrDescAddr);
+ MACvSetCurrAC0DescAddrEx(priv, curr_desc_addr);
else if (iTxType == TYPE_TXDMA0)
- MACvSetCurrTx0DescAddrEx(dwIoBase, dwCurrDescAddr);
+ MACvSetCurrTx0DescAddrEx(priv, curr_desc_addr);
}
/*
@@ -711,7 +694,7 @@ void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* uDelay - Delay time (timer resolution is 4 us)
* Out:
* none
@@ -719,25 +702,26 @@ void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
* Return Value: none
*
*/
-void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
+void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
{
+ void __iomem *io_base = priv->PortOffset;
unsigned char byValue;
unsigned int uu, ii;
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
- VNSvOutPortD(dwIoBase + MAC_REG_TMDATA0, uDelay);
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, (TMCTL_TMD | TMCTL_TE));
+ iowrite8(0, io_base + MAC_REG_TMCTL0);
+ iowrite32(uDelay, io_base + MAC_REG_TMDATA0);
+ iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL0);
for (ii = 0; ii < 66; ii++) { /* assume max PCI clock is 66Mhz */
for (uu = 0; uu < uDelay; uu++) {
- VNSvInPortB(dwIoBase + MAC_REG_TMCTL0, &byValue);
+ byValue = ioread8(io_base + MAC_REG_TMCTL0);
if ((byValue == 0) ||
(byValue & TMCTL_TSUSP)) {
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
+ iowrite8(0, io_base + MAC_REG_TMCTL0);
return;
}
}
}
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL0, 0);
+ iowrite8(0, io_base + MAC_REG_TMCTL0);
}
/*
@@ -746,7 +730,7 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
* uDelay - Delay time
* Out:
* none
@@ -754,38 +738,41 @@ void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime)
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime)
{
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, 0);
- VNSvOutPortD(dwIoBase + MAC_REG_TMDATA1, uDelayTime);
- VNSvOutPortB(dwIoBase + MAC_REG_TMCTL1, (TMCTL_TMD | TMCTL_TE));
+ void __iomem *io_base = priv->PortOffset;
+
+ iowrite8(0, io_base + MAC_REG_TMCTL1);
+ iowrite32(uDelayTime, io_base + MAC_REG_TMDATA1);
+ iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL1);
}
-void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset,
- unsigned long dwData)
+void MACvSetMISCFifo(struct vnt_private *priv, unsigned short offset,
+ u32 data)
{
- if (wOffset > 273)
+ void __iomem *io_base = priv->PortOffset;
+
+ if (offset > 273)
return;
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
+ iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
+ iowrite32(data, io_base + MAC_REG_MISCFFDATA);
+ iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
-bool MACbPSWakeup(void __iomem *dwIoBase)
+bool MACbPSWakeup(struct vnt_private *priv)
{
- unsigned char byOrgValue;
+ void __iomem *io_base = priv->PortOffset;
unsigned int ww;
/* Read PSCTL */
- if (MACbIsRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PS))
+ if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS))
return true;
/* Disable PS */
- MACvRegBitsOff(dwIoBase, MAC_REG_PSCTL, PSCTL_PSEN);
+ MACvRegBitsOff(io_base, MAC_REG_PSCTL, PSCTL_PSEN);
/* Check if SyncFlushOK */
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(dwIoBase + MAC_REG_PSCTL, &byOrgValue);
- if (byOrgValue & PSCTL_WAKEDONE)
+ if (ioread8(io_base + MAC_REG_PSCTL) & PSCTL_WAKEDONE)
break;
}
if (ww == W_MAX_TIMEOUT) {
@@ -801,7 +788,7 @@ bool MACbPSWakeup(void __iomem *dwIoBase)
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
*
* Out:
* none
@@ -810,57 +797,58 @@ bool MACbPSWakeup(void __iomem *dwIoBase)
*
*/
-void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
+void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
unsigned int uEntryIdx, unsigned int uKeyIdx,
unsigned char *pbyAddr, u32 *pdwKey,
unsigned char byLocalID)
{
- unsigned short wOffset;
- u32 dwData;
+ void __iomem *io_base = priv->PortOffset;
+ unsigned short offset;
+ u32 data;
int ii;
if (byLocalID <= 1)
return;
pr_debug("MACvSetKeyEntry\n");
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
-
- dwData = 0;
- dwData |= wKeyCtl;
- dwData <<= 16;
- dwData |= MAKEWORD(*(pbyAddr+4), *(pbyAddr+5));
- pr_debug("1. wOffset: %d, Data: %X, KeyCtl:%X\n",
- wOffset, dwData, wKeyCtl);
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- dwData = 0;
- dwData |= *(pbyAddr+3);
- dwData <<= 8;
- dwData |= *(pbyAddr+2);
- dwData <<= 8;
- dwData |= *(pbyAddr+1);
- dwData <<= 8;
- dwData |= *(pbyAddr+0);
- pr_debug("2. wOffset: %d, Data: %X\n", wOffset, dwData);
-
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, dwData);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
- wOffset++;
-
- wOffset += (uKeyIdx * 4);
+ offset = MISCFIFO_KEYETRY0;
+ offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
+
+ data = 0;
+ data |= wKeyCtl;
+ data <<= 16;
+ data |= MAKEWORD(*(pbyAddr + 4), *(pbyAddr + 5));
+ pr_debug("1. offset: %d, Data: %X, KeyCtl:%X\n",
+ offset, data, wKeyCtl);
+
+ iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
+ iowrite32(data, io_base + MAC_REG_MISCFFDATA);
+ iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
+ offset++;
+
+ data = 0;
+ data |= *(pbyAddr + 3);
+ data <<= 8;
+ data |= *(pbyAddr + 2);
+ data <<= 8;
+ data |= *(pbyAddr + 1);
+ data <<= 8;
+ data |= *pbyAddr;
+ pr_debug("2. offset: %d, Data: %X\n", offset, data);
+
+ iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
+ iowrite32(data, io_base + MAC_REG_MISCFFDATA);
+ iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
+ offset++;
+
+ offset += (uKeyIdx * 4);
for (ii = 0; ii < 4; ii++) {
/* always push 128 bits */
- pr_debug("3.(%d) wOffset: %d, Data: %X\n",
- ii, wOffset+ii, *pdwKey);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset+ii);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, *pdwKey++);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
+ pr_debug("3.(%d) offset: %d, Data: %X\n",
+ ii, offset + ii, *pdwKey);
+ iowrite16(offset + ii, io_base + MAC_REG_MISCFFNDEX);
+ iowrite32(*pdwKey++, io_base + MAC_REG_MISCFFDATA);
+ iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
}
@@ -870,7 +858,7 @@ void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
*
* Parameters:
* In:
- * dwIoBase - Base Address for MAC
+ * io_base - Base Address for MAC
*
* Out:
* none
@@ -878,14 +866,15 @@ void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
* Return Value: none
*
*/
-void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx)
+void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx)
{
- unsigned short wOffset;
+ void __iomem *io_base = priv->PortOffset;
+ unsigned short offset;
- wOffset = MISCFIFO_KEYETRY0;
- wOffset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
+ offset = MISCFIFO_KEYETRY0;
+ offset += (uEntryIdx * MISCFIFO_KEYENTRYSIZE);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, wOffset);
- VNSvOutPortD(dwIoBase + MAC_REG_MISCFFDATA, 0);
- VNSvOutPortW(dwIoBase + MAC_REG_MISCFFCTL, MISCFFCTL_WRITE);
+ iowrite16(offset, io_base + MAC_REG_MISCFFNDEX);
+ iowrite32(0, io_base + MAC_REG_MISCFFDATA);
+ iowrite16(MISCFFCTL_WRITE, io_base + MAC_REG_MISCFFCTL);
}
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 8e0200a78b19..030f529c339b 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -890,57 +890,57 @@ do { \
#define MACvSetRFLE_LatchBase(dwIoBase) \
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
-bool MACbIsRegBitsOn(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
unsigned char byTestBits);
-bool MACbIsRegBitsOff(void __iomem *dwIoBase, unsigned char byRegOfs,
+bool MACbIsRegBitsOff(struct vnt_private *, unsigned char byRegOfs,
unsigned char byTestBits);
-bool MACbIsIntDisable(void __iomem *dwIoBase);
+bool MACbIsIntDisable(struct vnt_private *);
-void MACvSetShortRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
+void MACvSetShortRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
-void MACvSetLongRetryLimit(void __iomem *dwIoBase, unsigned char byRetryLimit);
-void MACvGetLongRetryLimit(void __iomem *dwIoBase,
+void MACvSetLongRetryLimit(struct vnt_private *, unsigned char byRetryLimit);
+void MACvGetLongRetryLimit(struct vnt_private *,
unsigned char *pbyRetryLimit);
-void MACvSetLoopbackMode(void __iomem *dwIoBase, unsigned char byLoopbackMode);
-
-void MACvSaveContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
-void MACvRestoreContext(void __iomem *dwIoBase, unsigned char *pbyCxtBuf);
-
-bool MACbSoftwareReset(void __iomem *dwIoBase);
-bool MACbSafeSoftwareReset(void __iomem *dwIoBase);
-bool MACbSafeRxOff(void __iomem *dwIoBase);
-bool MACbSafeTxOff(void __iomem *dwIoBase);
-bool MACbSafeStop(void __iomem *dwIoBase);
-bool MACbShutdown(void __iomem *dwIoBase);
-void MACvInitialize(void __iomem *dwIoBase);
-void MACvSetCurrRx0DescAddr(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrRx1DescAddr(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrTXDescAddr(int iTxType, void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrTx0DescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrAC0DescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrSyncDescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvSetCurrATIMDescAddrEx(void __iomem *dwIoBase,
- unsigned long dwCurrDescAddr);
-void MACvTimer0MicroSDelay(void __iomem *dwIoBase, unsigned int uDelay);
-void MACvOneShotTimer1MicroSec(void __iomem *dwIoBase, unsigned int uDelayTime);
-
-void MACvSetMISCFifo(void __iomem *dwIoBase, unsigned short wOffset,
- unsigned long dwData);
-
-bool MACbPSWakeup(void __iomem *dwIoBase);
-
-void MACvSetKeyEntry(void __iomem *dwIoBase, unsigned short wKeyCtl,
+void MACvSetLoopbackMode(struct vnt_private *, unsigned char byLoopbackMode);
+
+void MACvSaveContext(struct vnt_private *, unsigned char *pbyCxtBuf);
+void MACvRestoreContext(struct vnt_private *, unsigned char *pbyCxtBuf);
+
+bool MACbSoftwareReset(struct vnt_private *);
+bool MACbSafeSoftwareReset(struct vnt_private *);
+bool MACbSafeRxOff(struct vnt_private *);
+bool MACbSafeTxOff(struct vnt_private *);
+bool MACbSafeStop(struct vnt_private *);
+bool MACbShutdown(struct vnt_private *);
+void MACvInitialize(struct vnt_private *);
+void MACvSetCurrRx0DescAddr(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrRx1DescAddr(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrTx0DescAddrEx(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrAC0DescAddrEx(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrSyncDescAddrEx(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvSetCurrATIMDescAddrEx(struct vnt_private *,
+ u32 curr_desc_addr);
+void MACvTimer0MicroSDelay(struct vnt_private *, unsigned int uDelay);
+void MACvOneShotTimer1MicroSec(struct vnt_private *, unsigned int uDelayTime);
+
+void MACvSetMISCFifo(struct vnt_private *, unsigned short wOffset,
+ u32 dwData);
+
+bool MACbPSWakeup(struct vnt_private *);
+
+void MACvSetKeyEntry(struct vnt_private *, unsigned short wKeyCtl,
unsigned int uEntryIdx, unsigned int uKeyIdx,
unsigned char *pbyAddr, u32 *pdwKey,
unsigned char byLocalID);
-void MACvDisableKeyEntry(void __iomem *dwIoBase, unsigned int uEntryIdx);
+void MACvDisableKeyEntry(struct vnt_private *, unsigned int uEntryIdx);
#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 06e6b9d871c4..bc8ca981a629 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -64,44 +64,43 @@
void
PSvEnablePowerSaving(
- void *hDeviceContext,
+ struct vnt_private *priv,
unsigned short wListenInterval
)
{
- struct vnt_private *pDevice = hDeviceContext;
- u16 wAID = pDevice->current_aid | BIT(14) | BIT(15);
+ u16 wAID = priv->current_aid | BIT(14) | BIT(15);
/* set period of power up before TBTT */
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_PWBT, C_PWBT);
- if (pDevice->op_mode != NL80211_IFTYPE_ADHOC) {
+ VNSvOutPortW(priv->PortOffset + MAC_REG_PWBT, C_PWBT);
+ if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
- VNSvOutPortW(pDevice->PortOffset + MAC_REG_AIDATIM, wAID);
+ VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID);
} else {
/* set ATIM Window */
#if 0 /* TODO atim window */
- MACvWriteATIMW(pDevice->PortOffset, pMgmt->wCurrATIMWindow);
+ MACvWriteATIMW(priv->PortOffset, pMgmt->wCurrATIMWindow);
#endif
}
/* Set AutoSleep */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* Set HWUTSF */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
/* clear always listen beacon */
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
} else {
/* always listen beacon */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
}
/* enable power saving hw function */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
- pDevice->bEnablePSMode = true;
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
+ priv->bEnablePSMode = true;
- pDevice->bPWBitOn = true;
+ priv->bPWBitOn = true;
pr_debug("PS:Power Saving Mode Enable...\n");
}
@@ -117,23 +116,21 @@ PSvEnablePowerSaving(
void
PSvDisablePowerSaving(
- void *hDeviceContext
+ struct vnt_private *priv
)
{
- struct vnt_private *pDevice = hDeviceContext;
-
/* disable power saving hw function */
- MACbPSWakeup(pDevice->PortOffset);
+ MACbPSWakeup(priv);
/* clear AutoSleep */
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* clear HWUTSF */
- MACvRegBitsOff(pDevice->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
/* set always listen beacon */
- MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
- pDevice->bEnablePSMode = false;
+ priv->bEnablePSMode = false;
- pDevice->bPWBitOn = false;
+ priv->bPWBitOn = false;
}
@@ -149,27 +146,26 @@ PSvDisablePowerSaving(
bool
PSbIsNextTBTTWakeUp(
- void *hDeviceContext
+ struct vnt_private *priv
)
{
- struct vnt_private *pDevice = hDeviceContext;
- struct ieee80211_hw *hw = pDevice->hw;
+ struct ieee80211_hw *hw = priv->hw;
struct ieee80211_conf *conf = &hw->conf;
- bool bWakeUp = false;
+ bool wake_up = false;
if (conf->listen_interval > 1) {
- if (!pDevice->wake_up_count)
- pDevice->wake_up_count = conf->listen_interval;
+ if (!priv->wake_up_count)
+ priv->wake_up_count = conf->listen_interval;
- --pDevice->wake_up_count;
+ --priv->wake_up_count;
- if (pDevice->wake_up_count == 1) {
+ if (priv->wake_up_count == 1) {
/* Turn on wake up to listen next beacon */
- MACvRegBitsOn(pDevice->PortOffset,
+ MACvRegBitsOn(priv->PortOffset,
MAC_REG_PSCTL, PSCTL_LNBCN);
- bWakeUp = true;
+ wake_up = true;
}
}
- return bWakeUp;
+ return wake_up;
}
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index 538e68507bb0..d82dd8d6d68b 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -29,25 +29,27 @@
#ifndef __POWER_H__
#define __POWER_H__
+#include "device.h"
+
#define C_PWBT 1000 /* micro sec. power up before TBTT */
#define PS_FAST_INTERVAL 1 /* Fast power saving listen interval */
#define PS_MAX_INTERVAL 4 /* MAX power saving listen interval */
void
PSvDisablePowerSaving(
- void *hDeviceContext
+ struct vnt_private *
);
void
PSvEnablePowerSaving(
- void *hDeviceContext,
+ struct vnt_private *,
unsigned short wListenInterval
);
bool
PSbIsNextTBTTWakeUp(
- void *hDeviceContext
+ struct vnt_private *
);
#endif /* __POWER_H__ */
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 4c22bb318c79..ae10da21ddd0 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -420,9 +420,9 @@ static bool s_bAL7230Init(struct vnt_private *priv)
{
void __iomem *dwIoBase = priv->PortOffset;
int ii;
- bool bResult;
+ bool ret;
- bResult = true;
+ ret = true;
/* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -432,21 +432,21 @@ static bool s_bAL7230Init(struct vnt_private *priv)
BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
/* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Calibration */
- MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
+ MACvTimer0MicroSDelay(priv, 150);/* 150us */
/* TXDCOC:active, RCK:disable */
- bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
+ ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:active */
- bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
+ ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+ MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -459,7 +459,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
/* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
- return bResult;
+ return ret;
}
/* Need to Pull PLLON low when writing channel registers through
@@ -467,27 +467,27 @@ static bool s_bAL7230Init(struct vnt_private *priv)
static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
void __iomem *dwIoBase = priv->PortOffset;
- bool bResult;
+ bool ret;
- bResult = true;
+ ret = true;
/* PLLON Off */
MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
/* PLLOn On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
- MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL7230);
+ MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
- return bResult;
+ return ret;
}
/*
@@ -540,9 +540,9 @@ static bool RFbAL2230Init(struct vnt_private *priv)
{
void __iomem *dwIoBase = priv->PortOffset;
int ii;
- bool bResult;
+ bool ret;
- bResult = true;
+ ret = true;
/* 3-wire control for normal mode */
VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
@@ -556,18 +556,18 @@ static bool RFbAL2230Init(struct vnt_private *priv)
IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
- MACvTimer0MicroSDelay(dwIoBase, 30); /* delay 30 us */
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
+ MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
- bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
- bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
- MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
+ MACvTimer0MicroSDelay(priv, 150);/* 150us */
+ ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ MACvTimer0MicroSDelay(priv, 30);/* 30us */
+ ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+ MACvTimer0MicroSDelay(priv, 30);/* 30us */
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -577,26 +577,26 @@ static bool RFbAL2230Init(struct vnt_private *priv)
/* 3-wire control for power saving mode */
VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
- return bResult;
+ return ret;
}
static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
void __iomem *dwIoBase = priv->PortOffset;
- bool bResult;
+ bool ret;
- bResult = true;
+ ret = true;
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
- MACvTimer0MicroSDelay(dwIoBase, SWITCH_CHANNEL_DELAY_AL2230);
+ MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
/* Set Channel[7] = 1 to tell H/W channel change is done. */
VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
- return bResult;
+ return ret;
}
/*
@@ -612,30 +612,28 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
* Return Value: true if succeeded; false if failed.
*
*/
-bool RFbInit(
- struct vnt_private *priv
-)
+bool RFbInit(struct vnt_private *priv)
{
- bool bResult = true;
+ bool ret = true;
switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
- bResult = RFbAL2230Init(priv);
+ ret = RFbAL2230Init(priv);
break;
case RF_AIROHA7230:
priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
- bResult = s_bAL7230Init(priv);
+ ret = s_bAL7230Init(priv);
break;
case RF_NOTHING:
- bResult = true;
+ ret = true;
break;
default:
- bResult = false;
+ ret = false;
break;
}
- return bResult;
+ return ret;
}
/*
@@ -654,26 +652,26 @@ bool RFbInit(
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
u16 byChannel)
{
- bool bResult = true;
+ bool ret = true;
switch (byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- bResult = RFbAL2230SelectChannel(priv, byChannel);
+ ret = RFbAL2230SelectChannel(priv, byChannel);
break;
/*{{ RobertYu: 20050104 */
case RF_AIROHA7230:
- bResult = s_bAL7230SelectChannel(priv, byChannel);
+ ret = s_bAL7230SelectChannel(priv, byChannel);
break;
/*}} RobertYu */
case RF_NOTHING:
- bResult = true;
+ ret = true;
break;
default:
- bResult = false;
+ ret = false;
break;
}
- return bResult;
+ return ret;
}
/*
@@ -711,11 +709,11 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
return false;
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel-1]);
ii++;
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel-1]);
break;
/* Need to check, PLLON need to be low for channel setting */
@@ -728,17 +726,17 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
if (uChannel <= CB_MAX_CHANNEL_24G) {
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
} else {
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
}
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel-1]);
ii++;
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel-1]);
ii++;
- MACvSetMISCFifo(dwIoBase, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel-1]);
break;
case RF_NOTHING:
@@ -748,7 +746,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
return false;
}
- MACvSetMISCFifo(dwIoBase, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount));
+ MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount));
return true;
}
@@ -772,7 +770,7 @@ bool RFbSetPower(
u16 uCH
)
{
- bool bResult = true;
+ bool ret = true;
unsigned char byPwr = 0;
unsigned char byDec = 0;
@@ -818,11 +816,11 @@ bool RFbSetPower(
if (priv->byCurPwr == byPwr)
return true;
- bResult = RFbRawSetPower(priv, byPwr, rate);
- if (bResult)
+ ret = RFbRawSetPower(priv, byPwr, rate);
+ if (ret)
priv->byCurPwr = byPwr;
- return bResult;
+ return ret;
}
/*
@@ -845,7 +843,7 @@ bool RFbRawSetPower(
unsigned int rate
)
{
- bool bResult = true;
+ bool ret = true;
unsigned long dwMax7230Pwr = 0;
if (byPwr >= priv->byMaxPwrLevel)
@@ -853,22 +851,22 @@ bool RFbRawSetPower(
switch (priv->byRFType) {
case RF_AIROHA:
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M)
- bResult &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
else
- bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
break;
case RF_AL2230S:
- bResult &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
if (rate <= RATE_11M) {
- bResult &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
} else {
- bResult &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
- bResult &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+ ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
}
break;
@@ -879,13 +877,13 @@ bool RFbRawSetPower(
dwMax7230Pwr = 0x080C0B00 | ((byPwr) << 12) |
(BY_AL7230_REG_LEN << 3) | IFREGCTL_REGW;
- bResult &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
+ ret &= IFRFbWriteEmbedded(priv, dwMax7230Pwr);
break;
default:
break;
}
- return bResult;
+ return ret;
}
/*+
@@ -934,32 +932,32 @@ bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
u16 byOldChannel,
u16 byNewChannel)
{
- bool bResult;
+ bool ret;
- bResult = true;
+ ret = true;
/* if change between 11 b/g and 11a need to update the following
* register
* Channel Index 1~14 */
if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
/* Change from 2.4G to 5G [Reg] */
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
} else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) {
/* Change from 5G to 2.4G [Reg] */
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
- bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
+ ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
}
- return bResult;
+ return ret;
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index b668db6a45fb..1a2dda09b69d 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1210,7 +1210,7 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
struct sk_buff *skb, u16 payload_len,
struct vnt_mic_hdr *mic_hdr)
{
- struct ieee80211_key_seq seq;
+ u64 pn64;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -1243,9 +1243,13 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer,
mic_hdr->payload_len = cpu_to_be16(payload_len);
ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- ieee80211_get_key_tx_seq(tx_key, &seq);
-
- memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ pn64 = atomic64_read(&tx_key->tx_pn);
+ mic_hdr->ccmp_pn[5] = pn64;
+ mic_hdr->ccmp_pn[4] = pn64 >> 8;
+ mic_hdr->ccmp_pn[3] = pn64 >> 16;
+ mic_hdr->ccmp_pn[2] = pn64 >> 24;
+ mic_hdr->ccmp_pn[1] = pn64 >> 32;
+ mic_hdr->ccmp_pn[0] = pn64 >> 40;
if (ieee80211_has_a4(hdr->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index 76b5f4127f95..4832666cc580 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -259,8 +259,8 @@ enum {
};
/* flags for options */
-#define DEVICE_FLAGS_UNPLUG BIT(0)
-#define DEVICE_FLAGS_DISCONNECTED BIT(1)
+#define DEVICE_FLAGS_UNPLUG 0
+#define DEVICE_FLAGS_DISCONNECTED 1
struct vnt_private {
/* mac80211 */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ee8d1e1a24c2..f9afab77b79f 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -74,10 +74,10 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
#define LONG_RETRY_DEF 4
/* BasebandType[] baseband type selected
- 0: indicate 802.11a type
- 1: indicate 802.11b type
- 2: indicate 802.11g type
-*/
+ * 0: indicate 802.11a type
+ * 1: indicate 802.11b type
+ * 2: indicate 802.11g type
+ */
#define BBP_TYPE_DEF 2
@@ -284,7 +284,8 @@ static int vnt_init_registers(struct vnt_private *priv)
calib_rx_iq = priv->eeprom[EEP_OFS_CALIB_RX_IQ];
if (calib_tx_iq || calib_tx_dc || calib_rx_iq) {
/* CR255, enable TX/RX IQ and
- DC compensation mode */
+ * DC compensation mode
+ */
vnt_control_out_u8(priv,
MESSAGE_REQUEST_BBREG,
0xff,
@@ -306,7 +307,8 @@ static int vnt_init_registers(struct vnt_private *priv)
calib_rx_iq);
} else {
/* CR255, turn off
- BB Calibration compensation */
+ * BB Calibration compensation
+ */
vnt_control_out_u8(priv,
MESSAGE_REQUEST_BBREG,
0xff,
@@ -429,7 +431,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(struct vnt_usb_send_context),
GFP_KERNEL);
- if (tx_context == NULL)
+ if (!tx_context)
goto free_tx;
priv->tx_context[ii] = tx_context;
@@ -437,7 +439,7 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
tx_context->pkt_no = ii;
/* allocate URBs */
- tx_context->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_context->urb) {
dev_err(&priv->usb->dev, "alloc tx urb failed\n");
goto free_tx;
@@ -459,14 +461,14 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
rcb->priv = priv;
/* allocate URBs */
- rcb->urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (rcb->urb == NULL) {
+ rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rcb->urb) {
dev_err(&priv->usb->dev, "Failed to alloc rx urb\n");
goto free_rx_tx;
}
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
- if (rcb->skb == NULL)
+ if (!rcb->skb)
goto free_rx_tx;
rcb->in_use = false;
@@ -476,14 +478,14 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
goto free_rx_tx;
}
- priv->interrupt_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (priv->interrupt_urb == NULL) {
+ priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!priv->interrupt_urb) {
dev_err(&priv->usb->dev, "Failed to alloc int urb\n");
goto free_rx_tx;
}
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
- if (priv->int_buf.data_buf == NULL) {
+ if (!priv->int_buf.data_buf) {
usb_free_urb(priv->interrupt_urb);
goto free_rx_tx;
}
diff --git a/drivers/staging/vt6656/power.c b/drivers/staging/vt6656/power.c
index c025dab0f62c..e322b7d8c617 100644
--- a/drivers/staging/vt6656/power.c
+++ b/drivers/staging/vt6656/power.c
@@ -103,7 +103,7 @@ void vnt_disable_power_saving(struct vnt_private *priv)
/* disable power saving hw function */
vnt_control_out(priv, MESSAGE_TYPE_DISABLE_PS, 0,
- 0, 0, NULL);
+ 0, 0, NULL);
/* clear AutoSleep */
vnt_mac_reg_bits_off(priv, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 816206c92f57..79a3108719a6 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -917,8 +917,8 @@ void vnt_rf_table_download(struct vnt_private *priv)
if (priv->rf_type == RF_AIROHA7230) {
length1 = CB_AL7230_INIT_SEQ * 3;
length2 = CB_MAX_CHANNEL * 3;
- addr1 = &(al7230_init_table_amode[0][0]);
- addr2 = &(al7230_channel_table2[0][0]);
+ addr1 = &al7230_init_table_amode[0][0];
+ addr2 = &al7230_channel_table2[0][0];
memcpy(array, addr1, length1);
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index a0c69b697901..b74e32001318 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -716,7 +716,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
u16 payload_len, struct vnt_mic_hdr *mic_hdr)
{
struct ieee80211_hdr *hdr = tx_context->hdr;
- struct ieee80211_key_seq seq;
+ u64 pn64;
u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb));
/* strip header and icv len from payload */
@@ -749,9 +749,13 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context,
mic_hdr->payload_len = cpu_to_be16(payload_len);
ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2);
- ieee80211_get_key_tx_seq(tx_key, &seq);
-
- memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ pn64 = atomic64_read(&tx_key->tx_pn);
+ mic_hdr->ccmp_pn[5] = pn64;
+ mic_hdr->ccmp_pn[4] = pn64 >> 8;
+ mic_hdr->ccmp_pn[3] = pn64 >> 16;
+ mic_hdr->ccmp_pn[2] = pn64 >> 24;
+ mic_hdr->ccmp_pn[1] = pn64 >> 32;
+ mic_hdr->ccmp_pn[0] = pn64 >> 40;
if (ieee80211_has_a4(hdr->frame_control))
mic_hdr->hlen = cpu_to_be16(28);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 351a99f3d684..f546553de66f 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -101,9 +101,9 @@ void vnt_control_in_u8(struct vnt_private *priv, u8 reg, u8 reg_off, u8 *data)
static void vnt_start_interrupt_urb_complete(struct urb *urb)
{
struct vnt_private *priv = urb->context;
- int status;
+ int status = urb->status;
- switch (urb->status) {
+ switch (status) {
case 0:
case -ETIMEDOUT:
break;
@@ -116,9 +116,7 @@ static void vnt_start_interrupt_urb_complete(struct urb *urb)
break;
}
- status = urb->status;
-
- if (status != STATUS_SUCCESS) {
+ if (status) {
priv->int_buf.in_use = false;
dev_dbg(&priv->usb->dev, "%s status = %d\n", __func__, status);
@@ -207,10 +205,9 @@ static void vnt_submit_rx_urb_complete(struct urb *urb)
int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb)
{
int status = 0;
- struct urb *urb;
+ struct urb *urb = rcb->urb;
- urb = rcb->urb;
- if (rcb->skb == NULL) {
+ if (!rcb->skb) {
dev_dbg(&priv->usb->dev, "rcb->skb is null\n");
return status;
}
@@ -224,7 +221,7 @@ int vnt_submit_rx_urb(struct vnt_private *priv, struct vnt_rcb *rcb)
rcb);
status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status != 0) {
+ if (status) {
dev_dbg(&priv->usb->dev, "Submit Rx URB failed %d\n", status);
return STATUS_FAILURE;
}
@@ -269,15 +266,13 @@ int vnt_tx_context(struct vnt_private *priv,
struct vnt_usb_send_context *context)
{
int status;
- struct urb *urb;
+ struct urb *urb = context->urb;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) {
context->in_use = false;
return STATUS_RESOURCES;
}
- urb = context->urb;
-
usb_fill_bulk_urb(urb,
priv->usb,
usb_sndbulkpipe(priv->usb, 3),
@@ -287,7 +282,7 @@ int vnt_tx_context(struct vnt_private *priv,
context);
status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status != 0) {
+ if (status) {
dev_dbg(&priv->usb->dev, "Submit Tx URB failed %d\n", status);
context->in_use = false;
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index 20a5cb9d4f4c..acc3f3e8481b 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,11 +1,9 @@
obj-$(CONFIG_WILC1000) += wilc1000.o
-ccflags-y += -DSTA_FIRMWARE=\"atmel/wilc1000_fw.bin\" \
- -DAP_FIRMWARE=\"atmel/wilc1000_ap_fw.bin\" \
- -DP2P_CONCURRENCY_FIRMWARE=\"atmel/wilc1000_p2p_fw.bin\"
+ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
+ -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS
-#ccflags-y += -DTCP_ACK_FILTER
wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
wilc_msgqueue.o \
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 2d4d3f190c01..4b51c0ac27ac 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -1,22 +1,11 @@
-
-/*!
- * @file coreconfigurator.c
- * @brief
- * @author
- * @sa coreconfigurator.h
- * @date 1 Mar 2012
- * @version 1.0
- */
-
#include "coreconfigurator.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include <linux/errno.h>
#include <linux/slab.h>
#define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \
- BEACON_INTERVAL_LEN + CAP_INFO_LEN)
+ BEACON_INTERVAL_LEN + CAP_INFO_LEN)
-/* Basic Frame Type Codes (2-bit) */
enum basic_frame_type {
FRAME_TYPE_CONTROL = 0x04,
FRAME_TYPE_DATA = 0x08,
@@ -25,7 +14,6 @@ enum basic_frame_type {
FRAME_TYPE_FORCE_32BIT = 0xFFFFFFFF
};
-/* Frame Type and Subtype Codes (6-bit) */
enum sub_frame_type {
ASSOC_REQ = 0x00,
ASSOC_RSP = 0x10,
@@ -65,7 +53,6 @@ enum sub_frame_type {
FRAME_SUBTYPE_FORCE_32BIT = 0xFFFFFFFF
};
-/* Element ID of various Information Elements */
enum info_element_id {
ISSID = 0, /* Service Set Identifier */
ISUPRATES = 1, /* Supported Rates */
@@ -109,8 +96,6 @@ enum info_element_id {
INFOELEM_ID_FORCE_32BIT = 0xFFFFFFFF
};
-/* This function extracts the beacon period field from the beacon or probe */
-/* response frame. */
static inline u16 get_beacon_period(u8 *data)
{
u16 bcn_per;
@@ -147,54 +132,36 @@ static inline u32 get_beacon_timestamp_hi(u8 *data)
return time_stamp;
}
-/* This function extracts the 'frame type and sub type' bits from the MAC */
-/* header of the input frame. */
-/* Returns the value in the LSB of the returned value. */
static inline enum sub_frame_type get_sub_type(u8 *header)
{
return ((enum sub_frame_type)(header[0] & 0xFC));
}
-/* This function extracts the 'to ds' bit from the MAC header of the input */
-/* frame. */
-/* Returns the value in the LSB of the returned value. */
static inline u8 get_to_ds(u8 *header)
{
return (header[1] & 0x01);
}
-/* This function extracts the 'from ds' bit from the MAC header of the input */
-/* frame. */
-/* Returns the value in the LSB of the returned value. */
static inline u8 get_from_ds(u8 *header)
{
return ((header[1] & 0x02) >> 1);
}
-/* This function extracts the MAC Address in 'address1' field of the MAC */
-/* header and updates the MAC Address in the allocated 'addr' variable. */
static inline void get_address1(u8 *pu8msa, u8 *addr)
{
memcpy(addr, pu8msa + 4, 6);
}
-/* This function extracts the MAC Address in 'address2' field of the MAC */
-/* header and updates the MAC Address in the allocated 'addr' variable. */
static inline void get_address2(u8 *pu8msa, u8 *addr)
{
memcpy(addr, pu8msa + 10, 6);
}
-/* This function extracts the MAC Address in 'address3' field of the MAC */
-/* header and updates the MAC Address in the allocated 'addr' variable. */
static inline void get_address3(u8 *pu8msa, u8 *addr)
{
memcpy(addr, pu8msa + 16, 6);
}
-/* This function extracts the BSSID from the incoming WLAN packet based on */
-/* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */
-/* variable. */
static inline void get_BSSID(u8 *data, u8 *bssid)
{
if (get_from_ds(data) == 1)
@@ -205,20 +172,15 @@ static inline void get_BSSID(u8 *data, u8 *bssid)
get_address3(data, bssid);
}
-/* This function extracts the SSID from a beacon/probe response frame */
static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len)
{
u8 len = 0;
u8 i = 0;
u8 j = 0;
- len = data[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN +
- CAP_INFO_LEN + 1];
- j = MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN +
- CAP_INFO_LEN + 2;
+ len = data[TAG_PARAM_OFFSET + 1];
+ j = TAG_PARAM_OFFSET + 2;
- /* If the SSID length field is set wrongly to a value greater than the */
- /* allowed maximum SSID length limit, reset the length to 0 */
if (len >= MAX_SSID_LEN)
len = 0;
@@ -230,8 +192,6 @@ static inline void get_ssid(u8 *data, u8 *ssid, u8 *p_ssid_len)
*p_ssid_len = len;
}
-/* This function extracts the capability info field from the beacon or probe */
-/* response frame. */
static inline u16 get_cap_info(u8 *data)
{
u16 cap_info = 0;
@@ -240,8 +200,6 @@ static inline u16 get_cap_info(u8 *data)
st = get_sub_type(data);
- /* Location of the Capability field is different for Beacon and */
- /* Association frames. */
if ((st == BEACON) || (st == PROBE_RSP))
index += TIME_STAMP_LEN + BEACON_INTERVAL_LEN;
@@ -251,8 +209,6 @@ static inline u16 get_cap_info(u8 *data)
return cap_info;
}
-/* This function extracts the capability info field from the Association */
-/* response frame. */
static inline u16 get_assoc_resp_cap_info(u8 *data)
{
u16 cap_info;
@@ -263,8 +219,6 @@ static inline u16 get_assoc_resp_cap_info(u8 *data)
return cap_info;
}
-/* This function extracts the association status code from the incoming */
-/* association response frame and returns association status code */
static inline u16 get_asoc_status(u8 *data)
{
u16 asoc_status;
@@ -275,8 +229,6 @@ static inline u16 get_asoc_status(u8 *data)
return asoc_status;
}
-/* This function extracts association ID from the incoming association */
-/* response frame */
static inline u16 get_asoc_id(u8 *data)
{
u16 asoc_id;
@@ -287,347 +239,151 @@ static inline u16 get_asoc_id(u8 *data)
return asoc_id;
}
-static u8 *get_tim_elm(u8 *pu8msa, u16 u16RxLen, u16 u16TagParamOffset)
+static u8 *get_tim_elm(u8 *pu8msa, u16 rx_len, u16 tag_param_offset)
{
- u16 u16index;
-
- /*************************************************************************/
- /* Beacon Frame - Frame Body */
- /* --------------------------------------------------------------------- */
- /* |Timestamp |BeaconInt |CapInfo |SSID |SupRates |DSParSet |TIM elm | */
- /* --------------------------------------------------------------------- */
- /* |8 |2 |2 |2-34 |3-10 |3 |4-256 | */
- /* --------------------------------------------------------------------- */
- /* */
- /*************************************************************************/
-
- u16index = u16TagParamOffset;
-
- /* Search for the TIM Element Field and return if the element is found */
- while (u16index < (u16RxLen - FCS_LEN)) {
- if (pu8msa[u16index] == ITIM)
- return &pu8msa[u16index];
- u16index += (IE_HDR_LEN + pu8msa[u16index + 1]);
+ u16 index;
+
+ index = tag_param_offset;
+
+ while (index < (rx_len - FCS_LEN)) {
+ if (pu8msa[index] == ITIM)
+ return &pu8msa[index];
+ index += (IE_HDR_LEN + pu8msa[index + 1]);
}
return NULL;
}
-/* This function gets the current channel information from
- * the 802.11n beacon/probe response frame */
-static u8 get_current_channel_802_11n(u8 *pu8msa, u16 u16RxLen)
+static u8 get_current_channel_802_11n(u8 *pu8msa, u16 rx_len)
{
u16 index;
index = TAG_PARAM_OFFSET;
- while (index < (u16RxLen - FCS_LEN)) {
+ while (index < (rx_len - FCS_LEN)) {
if (pu8msa[index] == IDSPARMS)
return pu8msa[index + 2];
- /* Increment index by length information and header */
index += pu8msa[index + 1] + IE_HDR_LEN;
}
- /* Return current channel information from the MIB, if beacon/probe */
- /* response frame does not contain the DS parameter set IE */
- /* return (mget_CurrentChannel() + 1); */
- return 0; /* no MIB here */
+ return 0;
}
-/**
- * @brief parses the received 'N' message
- * @details
- * @param[in] pu8MsgBuffer The message to be parsed
- * @param[out] ppstrNetworkInfo pointer to pointer to the structure containing the parsed Network Info
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 1 Mar 2012
- * @version 1.0
- */
-s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo)
+s32 wilc_parse_network_info(u8 *msg_buffer,
+ struct network_info **ret_network_info)
{
- tstrNetworkInfo *pstrNetworkInfo = NULL;
- u8 u8MsgType = 0;
- u8 u8MsgID = 0;
- u16 u16MsgLen = 0;
+ struct network_info *network_info = NULL;
+ u8 msg_type = 0;
+ u8 msg_id = 0;
+ u16 msg_len = 0;
- u16 u16WidID = (u16)WID_NIL;
- u16 u16WidLen = 0;
- u8 *pu8WidVal = NULL;
+ u16 wid_id = (u16)WID_NIL;
+ u16 wid_len = 0;
+ u8 *wid_val = NULL;
- u8MsgType = pu8MsgBuffer[0];
+ msg_type = msg_buffer[0];
- /* Check whether the received message type is 'N' */
- if ('N' != u8MsgType) {
- PRINT_ER("Received Message format incorrect.\n");
+ if ('N' != msg_type)
return -EFAULT;
- }
-
- /* Extract message ID */
- u8MsgID = pu8MsgBuffer[1];
-
- /* Extract message Length */
- u16MsgLen = MAKE_WORD16(pu8MsgBuffer[2], pu8MsgBuffer[3]);
-
- /* Extract WID ID */
- u16WidID = MAKE_WORD16(pu8MsgBuffer[4], pu8MsgBuffer[5]);
-
- /* Extract WID Length */
- u16WidLen = MAKE_WORD16(pu8MsgBuffer[6], pu8MsgBuffer[7]);
- /* Assign a pointer to the WID value */
- pu8WidVal = &pu8MsgBuffer[8];
+ msg_id = msg_buffer[1];
+ msg_len = MAKE_WORD16(msg_buffer[2], msg_buffer[3]);
+ wid_id = MAKE_WORD16(msg_buffer[4], msg_buffer[5]);
+ wid_len = MAKE_WORD16(msg_buffer[6], msg_buffer[7]);
+ wid_val = &msg_buffer[8];
- /* parse the WID value of the WID "WID_NEWORK_INFO" */
{
- u8 *pu8msa = NULL;
- u16 u16RxLen = 0;
- u8 *pu8TimElm = NULL;
- u8 *pu8IEs = NULL;
- u16 u16IEsLen = 0;
- u8 u8index = 0;
- u32 u32Tsf_Lo;
- u32 u32Tsf_Hi;
-
- pstrNetworkInfo = kzalloc(sizeof(tstrNetworkInfo), GFP_KERNEL);
- if (!pstrNetworkInfo)
+ u8 *msa = NULL;
+ u16 rx_len = 0;
+ u8 *tim_elm = NULL;
+ u8 *ies = NULL;
+ u16 ies_len = 0;
+ u8 index = 0;
+ u32 tsf_lo;
+ u32 tsf_hi;
+
+ network_info = kzalloc(sizeof(*network_info), GFP_KERNEL);
+ if (!network_info)
return -ENOMEM;
- pstrNetworkInfo->s8rssi = pu8WidVal[0];
+ network_info->rssi = wid_val[0];
- /* Assign a pointer to msa "Mac Header Start Address" */
- pu8msa = &pu8WidVal[1];
+ msa = &wid_val[1];
- u16RxLen = u16WidLen - 1;
+ rx_len = wid_len - 1;
+ network_info->cap_info = get_cap_info(msa);
+ network_info->tsf_lo = get_beacon_timestamp_lo(msa);
- /* parse msa*/
+ tsf_lo = get_beacon_timestamp_lo(msa);
+ tsf_hi = get_beacon_timestamp_hi(msa);
- /* Get the cap_info */
- pstrNetworkInfo->u16CapInfo = get_cap_info(pu8msa);
- /* Get time-stamp [Low only 32 bit] */
- pstrNetworkInfo->u32Tsf = get_beacon_timestamp_lo(pu8msa);
- PRINT_D(CORECONFIG_DBG, "TSF :%x\n", pstrNetworkInfo->u32Tsf);
+ network_info->tsf_hi = tsf_lo | ((u64)tsf_hi << 32);
- /* Get full time-stamp [Low and High 64 bit] */
- u32Tsf_Lo = get_beacon_timestamp_lo(pu8msa);
- u32Tsf_Hi = get_beacon_timestamp_hi(pu8msa);
+ get_ssid(msa, network_info->ssid, &network_info->ssid_len);
+ get_BSSID(msa, network_info->bssid);
- pstrNetworkInfo->u64Tsf = u32Tsf_Lo | ((u64)u32Tsf_Hi << 32);
+ network_info->ch = get_current_channel_802_11n(msa,
+ rx_len + FCS_LEN);
- /* Get SSID */
- get_ssid(pu8msa, pstrNetworkInfo->au8ssid, &pstrNetworkInfo->u8SsidLen);
+ index = MAC_HDR_LEN + TIME_STAMP_LEN;
- /* Get BSSID */
- get_BSSID(pu8msa, pstrNetworkInfo->au8bssid);
+ network_info->beacon_period = get_beacon_period(msa + index);
- /*
- * Extract current channel information from
- * the beacon/probe response frame
- */
- pstrNetworkInfo->u8channel = get_current_channel_802_11n(pu8msa,
- u16RxLen + FCS_LEN);
+ index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
- /* Get beacon period */
- u8index = MAC_HDR_LEN + TIME_STAMP_LEN;
+ tim_elm = get_tim_elm(msa, rx_len + FCS_LEN, index);
+ if (tim_elm)
+ network_info->dtim_period = tim_elm[3];
+ ies = &msa[TAG_PARAM_OFFSET];
+ ies_len = rx_len - TAG_PARAM_OFFSET;
- pstrNetworkInfo->u16BeaconPeriod = get_beacon_period(pu8msa + u8index);
-
- u8index += BEACON_INTERVAL_LEN + CAP_INFO_LEN;
-
- /* Get DTIM Period */
- pu8TimElm = get_tim_elm(pu8msa, u16RxLen + FCS_LEN, u8index);
- if (pu8TimElm)
- pstrNetworkInfo->u8DtimPeriod = pu8TimElm[3];
- pu8IEs = &pu8msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN];
- u16IEsLen = u16RxLen - (MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN);
-
- if (u16IEsLen > 0) {
- pstrNetworkInfo->pu8IEs = kmemdup(pu8IEs, u16IEsLen,
- GFP_KERNEL);
- if (!pstrNetworkInfo->pu8IEs)
+ if (ies_len > 0) {
+ network_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!network_info->ies) {
+ kfree(network_info);
return -ENOMEM;
+ }
}
- pstrNetworkInfo->u16IEsLen = u16IEsLen;
-
+ network_info->ies_len = ies_len;
}
- *ppstrNetworkInfo = pstrNetworkInfo;
+ *ret_network_info = network_info;
return 0;
}
-/**
- * @brief Deallocates the parsed Network Info
- * @details
- * @param[in] pstrNetworkInfo Network Info to be deallocated
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 1 Mar 2012
- * @version 1.0
- */
-s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo)
+s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
+ struct connect_resp_info **ret_connect_resp_info)
{
- s32 s32Error = 0;
-
- if (pstrNetworkInfo) {
- if (pstrNetworkInfo->pu8IEs) {
- kfree(pstrNetworkInfo->pu8IEs);
- pstrNetworkInfo->pu8IEs = NULL;
- } else {
- s32Error = -EFAULT;
- }
-
- kfree(pstrNetworkInfo);
- pstrNetworkInfo = NULL;
-
- } else {
- s32Error = -EFAULT;
- }
-
- return s32Error;
-}
+ struct connect_resp_info *connect_resp_info = NULL;
+ u16 assoc_resp_len = 0;
+ u8 *ies = NULL;
+ u16 ies_len = 0;
-/**
- * @brief parses the received Association Response frame
- * @details
- * @param[in] pu8Buffer The Association Response frame to be parsed
- * @param[out] ppstrConnectRespInfo pointer to pointer to the structure containing the parsed Association Response Info
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 2 Apr 2012
- * @version 1.0
- */
-s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen,
- tstrConnectRespInfo **ppstrConnectRespInfo)
-{
- s32 s32Error = 0;
- tstrConnectRespInfo *pstrConnectRespInfo = NULL;
- u16 u16AssocRespLen = 0;
- u8 *pu8IEs = NULL;
- u16 u16IEsLen = 0;
-
- pstrConnectRespInfo = kzalloc(sizeof(tstrConnectRespInfo), GFP_KERNEL);
- if (!pstrConnectRespInfo)
+ connect_resp_info = kzalloc(sizeof(*connect_resp_info), GFP_KERNEL);
+ if (!connect_resp_info)
return -ENOMEM;
- /* u16AssocRespLen = pu8Buffer[0]; */
- u16AssocRespLen = (u16)u32BufferLen;
-
- /* get the status code */
- pstrConnectRespInfo->u16ConnectStatus = get_asoc_status(pu8Buffer);
- if (pstrConnectRespInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE) {
-
- /* get the capability */
- pstrConnectRespInfo->u16capability = get_assoc_resp_cap_info(pu8Buffer);
+ assoc_resp_len = (u16)buffer_len;
- /* get the Association ID */
- pstrConnectRespInfo->u16AssocID = get_asoc_id(pu8Buffer);
+ connect_resp_info->status = get_asoc_status(buffer);
+ if (connect_resp_info->status == SUCCESSFUL_STATUSCODE) {
+ connect_resp_info->capability = get_assoc_resp_cap_info(buffer);
+ connect_resp_info->assoc_id = get_asoc_id(buffer);
- /* get the Information Elements */
- pu8IEs = &pu8Buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
- u16IEsLen = u16AssocRespLen - (CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN);
+ ies = &buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
+ ies_len = assoc_resp_len - (CAP_INFO_LEN + STATUS_CODE_LEN +
+ AID_LEN);
- pstrConnectRespInfo->pu8RespIEs = kmemdup(pu8IEs, u16IEsLen, GFP_KERNEL);
- if (!pstrConnectRespInfo->pu8RespIEs)
+ connect_resp_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!connect_resp_info->ies) {
+ kfree(connect_resp_info);
return -ENOMEM;
-
- pstrConnectRespInfo->u16RespIEsLen = u16IEsLen;
- }
-
- *ppstrConnectRespInfo = pstrConnectRespInfo;
-
- return s32Error;
-}
-
-/**
- * @brief Deallocates the parsed Association Response Info
- * @details
- * @param[in] pstrNetworkInfo Network Info to be deallocated
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 2 Apr 2012
- * @version 1.0
- */
-s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo)
-{
- s32 s32Error = 0;
-
- if (pstrConnectRespInfo) {
- if (pstrConnectRespInfo->pu8RespIEs) {
- kfree(pstrConnectRespInfo->pu8RespIEs);
- pstrConnectRespInfo->pu8RespIEs = NULL;
- } else {
- s32Error = -EFAULT;
}
- kfree(pstrConnectRespInfo);
- pstrConnectRespInfo = NULL;
-
- } else {
- s32Error = -EFAULT;
+ connect_resp_info->ies_len = ies_len;
}
- return s32Error;
-}
-
-/**
- * @brief sends certain Configuration Packet based on the input WIDs pstrWIDs
- * using driver config layer
- *
- * @details
- * @param[in] pstrWIDs WIDs to be sent in the configuration packet
- * @param[in] u32WIDsCount number of WIDs to be sent in the configuration packet
- * @param[out] pu8RxResp The received Packet Response
- * @param[out] ps32RxRespLen Length of the received Packet Response
- * @return Error code indicating success/failure
- * @note
- * @author mabubakr
- * @date 1 Mar 2012
- * @version 1.0
- */
-s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids,
- u32 count, u32 drv)
-{
- s32 counter = 0, ret = 0;
-
- if (mode == GET_CFG) {
- for (counter = 0; counter < count; counter++) {
- PRINT_INFO(CORECONFIG_DBG, "Sending CFG packet [%d][%d]\n", !counter,
- (counter == count - 1));
- if (!wilc_wlan_cfg_get(wilc, !counter,
- wids[counter].id,
- (counter == count - 1),
- drv)) {
- ret = -ETIMEDOUT;
- printk("[Sendconfigpkt]Get Timed out\n");
- break;
- }
- }
- counter = 0;
- for (counter = 0; counter < count; counter++) {
- wids[counter].size = wilc_wlan_cfg_get_val(
- wids[counter].id,
- wids[counter].val,
- wids[counter].size);
- }
- } else if (mode == SET_CFG) {
- for (counter = 0; counter < count; counter++) {
- PRINT_D(CORECONFIG_DBG, "Sending config SET PACKET WID:%x\n", wids[counter].id);
- if (!wilc_wlan_cfg_set(wilc, !counter,
- wids[counter].id,
- wids[counter].val,
- wids[counter].size,
- (counter == count - 1),
- drv)) {
- ret = -ETIMEDOUT;
- printk("[Sendconfigpkt]Set Timed out\n");
- break;
- }
- }
- }
+ *ret_connect_resp_info = connect_resp_info;
- return ret;
+ return 0;
}
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index fc43d04ca1da..076e06ac0d66 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -50,7 +50,7 @@
#define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb))
#define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw))
-typedef enum {
+enum connect_status {
SUCCESSFUL_STATUSCODE = 0,
UNSPEC_FAIL = 1,
UNSUP_CAP = 10,
@@ -68,13 +68,6 @@ typedef enum {
SHORT_SLOT_UNSUP = 25,
OFDM_DSSS_UNSUP = 26,
CONNECT_STS_FORCE_16_BIT = 0xFFFF
-} tenuConnectSts;
-
-struct wid {
- u16 id;
- enum wid_type type;
- s32 size;
- s8 *val;
};
typedef struct {
@@ -83,58 +76,54 @@ typedef struct {
s8 as8RSSI[NUM_RSSI];
} tstrRSSI;
-typedef struct {
- s8 s8rssi;
- u16 u16CapInfo;
- u8 au8ssid[MAX_SSID_LEN];
- u8 u8SsidLen;
- u8 au8bssid[6];
- u16 u16BeaconPeriod;
- u8 u8DtimPeriod;
- u8 u8channel;
- unsigned long u32TimeRcvdInScanCached;
- unsigned long u32TimeRcvdInScan;
- bool bNewNetwork;
- u8 u8Found;
- u32 u32Tsf;
- u8 *pu8IEs;
- u16 u16IEsLen;
- void *pJoinParams;
- tstrRSSI strRssi;
- u64 u64Tsf;
-} tstrNetworkInfo;
+struct network_info {
+ s8 rssi;
+ u16 cap_info;
+ u8 ssid[MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 bssid[6];
+ u16 beacon_period;
+ u8 dtim_period;
+ u8 ch;
+ unsigned long time_scan_cached;
+ unsigned long time_scan;
+ bool new_network;
+ u8 found;
+ u32 tsf_lo;
+ u8 *ies;
+ u16 ies_len;
+ void *join_params;
+ tstrRSSI str_rssi;
+ u64 tsf_hi;
+};
-typedef struct {
- u16 u16capability;
- u16 u16ConnectStatus;
- u16 u16AssocID;
- u8 *pu8RespIEs;
- u16 u16RespIEsLen;
-} tstrConnectRespInfo;
+struct connect_resp_info {
+ u16 capability;
+ u16 status;
+ u16 assoc_id;
+ u8 *ies;
+ u16 ies_len;
+};
-typedef struct {
- u8 au8bssid[6];
- u8 *pu8ReqIEs;
- size_t ReqIEsLen;
- u8 *pu8RespIEs;
- u16 u16RespIEsLen;
- u16 u16ConnectStatus;
-} tstrConnectInfo;
+struct connect_info {
+ u8 bssid[6];
+ u8 *req_ies;
+ size_t req_ies_len;
+ u8 *resp_ies;
+ u16 resp_ies_len;
+ u16 status;
+};
-typedef struct {
- u16 u16reason;
+struct disconnect_info {
+ u16 reason;
u8 *ie;
size_t ie_len;
-} tstrDisconnectNotifInfo;
-
-s32 wilc_send_config_pkt(struct wilc *wilc, u8 mode, struct wid *wids,
- u32 count, u32 drv);
-s32 wilc_parse_network_info(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
-s32 wilc_dealloc_network_info(tstrNetworkInfo *pstrNetworkInfo);
+};
-s32 wilc_parse_assoc_resp_info(u8 *pu8Buffer, u32 u32BufferLen,
- tstrConnectRespInfo **ppstrConnectRespInfo);
-s32 wilc_dealloc_assoc_resp_info(tstrConnectRespInfo *pstrConnectRespInfo);
+s32 wilc_parse_network_info(u8 *msg_buffer,
+ struct network_info **ret_network_info);
+s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len,
+ struct connect_resp_info **ret_connect_resp_info);
void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
u32 u32Length);
void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 8c7752034032..0a922c7c7cbf 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -20,7 +20,6 @@
#define HOST_IF_MSG_SET_CHANNEL 7
#define HOST_IF_MSG_DISCONNECT 8
#define HOST_IF_MSG_GET_RSSI 9
-#define HOST_IF_MSG_GET_CHNL 10
#define HOST_IF_MSG_ADD_BEACON 11
#define HOST_IF_MSG_DEL_BEACON 12
#define HOST_IF_MSG_ADD_STATION 13
@@ -33,20 +32,17 @@
#define HOST_IF_MSG_REMAIN_ON_CHAN 20
#define HOST_IF_MSG_REGISTER_FRAME 21
#define HOST_IF_MSG_LISTEN_TIMER_FIRED 22
-#define HOST_IF_MSG_GET_LINKSPEED 23
#define HOST_IF_MSG_SET_WFIDRV_HANDLER 24
-#define HOST_IF_MSG_SET_MAC_ADDRESS 25
#define HOST_IF_MSG_GET_MAC_ADDRESS 26
#define HOST_IF_MSG_SET_OPERATION_MODE 27
#define HOST_IF_MSG_SET_IPADDRESS 28
#define HOST_IF_MSG_GET_IPADDRESS 29
-#define HOST_IF_MSG_FLUSH_CONNECT 30
#define HOST_IF_MSG_GET_STATISTICS 31
#define HOST_IF_MSG_SET_MULTICAST_FILTER 32
#define HOST_IF_MSG_DEL_BA_SESSION 34
-#define HOST_IF_MSG_Q_IDLE 35
#define HOST_IF_MSG_DEL_ALL_STA 36
-#define HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS 34
+#define HOST_IF_MSG_SET_TX_POWER 38
+#define HOST_IF_MSG_GET_TX_POWER 39
#define HOST_IF_MSG_EXIT 100
#define HOST_IF_SCAN_TIMEOUT 4000
@@ -57,9 +53,8 @@
#define BLOCK_ACK_REQ_SIZE 0x14
#define FALSE_FRMWR_CHANNEL 100
-struct cfg_param_attr {
- struct cfg_param_val cfg_attr_info;
-};
+#define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
+#define DEFAULT_LINK_SPEED 72
struct host_if_wpa_attr {
u8 *key;
@@ -163,6 +158,10 @@ struct sta_inactive_t {
u8 mac[6];
};
+struct tx_power {
+ u8 tx_pwr;
+};
+
union message_body {
struct scan_attr scan_info;
struct connect_attr con_info;
@@ -188,6 +187,7 @@ union message_body {
struct reg_frame reg_frame;
char *data;
struct del_all_sta del_all_sta_info;
+ struct tx_power tx_power;
};
struct host_if_msg {
@@ -201,7 +201,7 @@ struct join_bss_param {
u8 dtim_period;
u16 beacon_period;
u16 cap_info;
- u8 au8bssid[6];
+ u8 bssid[6];
char ssid[MAX_SSID_LEN];
u8 ssid_len;
u8 supp_rates[MAX_RATES_SUPPORTED + 1];
@@ -225,11 +225,11 @@ struct join_bss_param {
u8 start_time[4];
};
-struct host_if_drv *terminated_handle;
+static struct host_if_drv *terminated_handle;
bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
static struct task_struct *hif_thread_handler;
-static WILC_MsgQueueHandle hif_msg_q;
+static struct message_queue hif_msg_q;
static struct semaphore hif_sema_thread;
static struct semaphore hif_sema_driver;
static struct semaphore hif_sema_wait_response;
@@ -243,8 +243,6 @@ static u8 rcv_assoc_resp[MAX_ASSOC_RESP_FRAME_SIZE];
static bool scan_while_connected;
static s8 rssi;
-static s8 link_speed;
-static u8 ch_no;
static u8 set_ip[2][4];
static u8 get_ip[2][4];
static u32 inactive_time;
@@ -262,7 +260,8 @@ static struct wilc_vif *join_req_vif;
#define FLUSHED_JOIN_REQ 1
#define FLUSHED_BYTE_POS 79
-static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo);
+static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
+static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -270,7 +269,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo);
*/
int wilc_get_vif_idx(struct wilc_vif *vif)
{
- return vif->u8IfIdx + 1;
+ return vif->idx + 1;
}
/* We need to minus 1 from idx which is from wilc device to get real index
@@ -288,10 +287,10 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
return wilc->vif[index];
}
-static s32 handle_set_channel(struct wilc_vif *vif,
- struct channel_attr *hif_set_ch)
+static void handle_set_channel(struct wilc_vif *vif,
+ struct channel_attr *hif_set_ch)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_CURRENT_CHANNEL;
@@ -299,17 +298,11 @@ static s32 handle_set_channel(struct wilc_vif *vif,
wid.val = (char *)&hif_set_ch->set_ch;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Setting channel\n");
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- if (result) {
- PRINT_ER("Failed to set channel\n");
- return -EINVAL;
- }
-
- return result;
+ if (ret)
+ netdev_err(vif->ndev, "Failed to set channel\n");
}
static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
@@ -319,18 +312,18 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
struct wid wid;
wid.id = (u16)WID_SET_DRV_HANDLER;
- wid.type = WID_INT;
- wid.val = (s8 *)&hif_drv_handler->handler;
- wid.size = sizeof(u32);
+ wid.type = WID_STR;
+ wid.val = (s8 *)hif_drv_handler;
+ wid.size = sizeof(*hif_drv_handler);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
hif_drv_handler->handler);
if (!hif_drv_handler->handler)
up(&hif_sema_driver);
if (result) {
- PRINT_ER("Failed to set driver handler\n");
+ netdev_err(vif->ndev, "Failed to set driver handler\n");
return -EINVAL;
}
@@ -348,37 +341,29 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif,
wid.val = (s8 *)&hif_op_mode->mode;
wid.size = sizeof(u32);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if ((hif_op_mode->mode) == IDLE_MODE)
up(&hif_sema_driver);
if (result) {
- PRINT_ER("Failed to set driver handler\n");
+ netdev_err(vif->ndev, "Failed to set driver handler\n");
return -EINVAL;
}
return result;
}
-static s32 host_int_get_ipaddress(struct wilc_vif *vif,
- struct host_if_drv *hif_drv,
- u8 *u16ipadd, u8 idx);
-
static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
s32 result = 0;
struct wid wid;
char firmware_ip_addr[4] = {0};
- struct host_if_drv *hif_drv = vif->hif_drv;
if (ip_addr[0] < 192)
ip_addr[0] = 0;
- PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set IP = %pI4\n",
- idx, ip_addr);
-
memcpy(set_ip[idx], ip_addr, IP_ALEN);
wid.id = (u16)WID_IP_ADDRESS;
@@ -386,18 +371,16 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
wid.val = (u8 *)ip_addr;
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
- host_int_get_ipaddress(vif, hif_drv, firmware_ip_addr, idx);
+ host_int_get_ipaddress(vif, firmware_ip_addr, idx);
if (result) {
- PRINT_ER("Failed to set IP address\n");
+ netdev_err(vif->ndev, "Failed to set IP address\n");
return -EINVAL;
}
- PRINT_INFO(HOSTINF_DBG, "IP address set\n");
-
return result;
}
@@ -411,10 +394,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- PRINT_INFO(HOSTINF_DBG, "%pI4\n", wid.val);
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
memcpy(get_ip[idx], wid.val, IP_ALEN);
@@ -424,44 +405,10 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
wilc_setup_ipaddress(vif, set_ip[idx], idx);
if (result != 0) {
- PRINT_ER("Failed to get IP address\n");
+ netdev_err(vif->ndev, "Failed to get IP address\n");
return -EINVAL;
}
- PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d\n", idx);
- PRINT_INFO(HOSTINF_DBG, "%pI4\n", get_ip[idx]);
- PRINT_INFO(HOSTINF_DBG, "\n");
-
- return result;
-}
-
-static s32 handle_set_mac_address(struct wilc_vif *vif,
- struct set_mac_addr *set_mac_addr)
-{
- s32 result = 0;
- struct wid wid;
- u8 *mac_buf = kmalloc(ETH_ALEN, GFP_KERNEL);
-
- if (!mac_buf) {
- PRINT_ER("No buffer to send mac address\n");
- return -EFAULT;
- }
- memcpy(mac_buf, set_mac_addr->mac_addr, ETH_ALEN);
-
- wid.id = (u16)WID_MAC_ADDR;
- wid.type = WID_STR;
- wid.val = mac_buf;
- wid.size = ETH_ALEN;
- PRINT_D(GENERIC_DBG, "mac addr = :%pM\n", wid.val);
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
- if (result) {
- PRINT_ER("Failed to set mac address\n");
- result = -EFAULT;
- }
-
- kfree(mac_buf);
return result;
}
@@ -476,11 +423,11 @@ static s32 handle_get_mac_address(struct wilc_vif *vif,
wid.val = get_mac_addr->mac_addr;
wid.size = ETH_ALEN;
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to get mac address\n");
+ netdev_err(vif->ndev, "Failed to get mac address\n");
result = -EFAULT;
}
up(&hif_sema_wait_response);
@@ -494,301 +441,294 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
s32 result = 0;
struct wid wid_list[32];
struct host_if_drv *hif_drv = vif->hif_drv;
- u8 wid_cnt = 0;
-
- down(&hif_drv->sem_cfg_values);
+ int i = 0;
- PRINT_D(HOSTINF_DBG, "Setting CFG params\n");
+ mutex_lock(&hif_drv->cfg_values_lock);
- if (cfg_param_attr->cfg_attr_info.flag & BSS_TYPE) {
- if (cfg_param_attr->cfg_attr_info.bss_type < 6) {
- wid_list[wid_cnt].id = WID_BSS_TYPE;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.bss_type;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->cfg_attr_info.bss_type;
+ if (cfg_param_attr->flag & BSS_TYPE) {
+ if (cfg_param_attr->bss_type < 6) {
+ wid_list[i].id = WID_BSS_TYPE;
+ wid_list[i].val = (s8 *)&cfg_param_attr->bss_type;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type;
} else {
- PRINT_ER("check value 6 over\n");
+ netdev_err(vif->ndev, "check value 6 over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & AUTH_TYPE) {
- if (cfg_param_attr->cfg_attr_info.auth_type == 1 ||
- cfg_param_attr->cfg_attr_info.auth_type == 2 ||
- cfg_param_attr->cfg_attr_info.auth_type == 5) {
- wid_list[wid_cnt].id = WID_AUTH_TYPE;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_type;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->cfg_attr_info.auth_type;
+ i++;
+ }
+ if (cfg_param_attr->flag & AUTH_TYPE) {
+ if (cfg_param_attr->auth_type == 1 ||
+ cfg_param_attr->auth_type == 2 ||
+ cfg_param_attr->auth_type == 5) {
+ wid_list[i].id = WID_AUTH_TYPE;
+ wid_list[i].val = (s8 *)&cfg_param_attr->auth_type;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type;
} else {
- PRINT_ER("Impossible value \n");
+ netdev_err(vif->ndev, "Impossible value\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & AUTHEN_TIMEOUT) {
- if (cfg_param_attr->cfg_attr_info.auth_timeout > 0 &&
- cfg_param_attr->cfg_attr_info.auth_timeout < 65536) {
- wid_list[wid_cnt].id = WID_AUTH_TIMEOUT;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.auth_timeout;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.auth_timeout = cfg_param_attr->cfg_attr_info.auth_timeout;
+ i++;
+ }
+ if (cfg_param_attr->flag & AUTHEN_TIMEOUT) {
+ if (cfg_param_attr->auth_timeout > 0 &&
+ cfg_param_attr->auth_timeout < 65536) {
+ wid_list[i].id = WID_AUTH_TIMEOUT;
+ wid_list[i].val = (s8 *)&cfg_param_attr->auth_timeout;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout;
} else {
- PRINT_ER("Range(1 ~ 65535) over\n");
+ netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & POWER_MANAGEMENT) {
- if (cfg_param_attr->cfg_attr_info.power_mgmt_mode < 5) {
- wid_list[wid_cnt].id = WID_POWER_MANAGEMENT;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.power_mgmt_mode;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->cfg_attr_info.power_mgmt_mode;
+ i++;
+ }
+ if (cfg_param_attr->flag & POWER_MANAGEMENT) {
+ if (cfg_param_attr->power_mgmt_mode < 5) {
+ wid_list[i].id = WID_POWER_MANAGEMENT;
+ wid_list[i].val = (s8 *)&cfg_param_attr->power_mgmt_mode;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode;
} else {
- PRINT_ER("Invalide power mode\n");
+ netdev_err(vif->ndev, "Invalid power mode\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & RETRY_SHORT) {
- if (cfg_param_attr->cfg_attr_info.short_retry_limit > 0 &&
- cfg_param_attr->cfg_attr_info.short_retry_limit < 256) {
- wid_list[wid_cnt].id = WID_SHORT_RETRY_LIMIT;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_retry_limit;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.short_retry_limit = cfg_param_attr->cfg_attr_info.short_retry_limit;
+ i++;
+ }
+ if (cfg_param_attr->flag & RETRY_SHORT) {
+ if (cfg_param_attr->short_retry_limit > 0 &&
+ cfg_param_attr->short_retry_limit < 256) {
+ wid_list[i].id = WID_SHORT_RETRY_LIMIT;
+ wid_list[i].val = (s8 *)&cfg_param_attr->short_retry_limit;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit;
} else {
- PRINT_ER("Range(1~256) over\n");
+ netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & RETRY_LONG) {
- if (cfg_param_attr->cfg_attr_info.long_retry_limit > 0 &&
- cfg_param_attr->cfg_attr_info.long_retry_limit < 256) {
- wid_list[wid_cnt].id = WID_LONG_RETRY_LIMIT;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.long_retry_limit;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.long_retry_limit = cfg_param_attr->cfg_attr_info.long_retry_limit;
+ i++;
+ }
+ if (cfg_param_attr->flag & RETRY_LONG) {
+ if (cfg_param_attr->long_retry_limit > 0 &&
+ cfg_param_attr->long_retry_limit < 256) {
+ wid_list[i].id = WID_LONG_RETRY_LIMIT;
+ wid_list[i].val = (s8 *)&cfg_param_attr->long_retry_limit;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit;
} else {
- PRINT_ER("Range(1~256) over\n");
+ netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & FRAG_THRESHOLD) {
- if (cfg_param_attr->cfg_attr_info.frag_threshold > 255 &&
- cfg_param_attr->cfg_attr_info.frag_threshold < 7937) {
- wid_list[wid_cnt].id = WID_FRAG_THRESHOLD;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.frag_threshold;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.frag_threshold = cfg_param_attr->cfg_attr_info.frag_threshold;
+ i++;
+ }
+ if (cfg_param_attr->flag & FRAG_THRESHOLD) {
+ if (cfg_param_attr->frag_threshold > 255 &&
+ cfg_param_attr->frag_threshold < 7937) {
+ wid_list[i].id = WID_FRAG_THRESHOLD;
+ wid_list[i].val = (s8 *)&cfg_param_attr->frag_threshold;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold;
} else {
- PRINT_ER("Threshold Range fail\n");
+ netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & RTS_THRESHOLD) {
- if (cfg_param_attr->cfg_attr_info.rts_threshold > 255 &&
- cfg_param_attr->cfg_attr_info.rts_threshold < 65536) {
- wid_list[wid_cnt].id = WID_RTS_THRESHOLD;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.rts_threshold;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.rts_threshold = cfg_param_attr->cfg_attr_info.rts_threshold;
+ i++;
+ }
+ if (cfg_param_attr->flag & RTS_THRESHOLD) {
+ if (cfg_param_attr->rts_threshold > 255 &&
+ cfg_param_attr->rts_threshold < 65536) {
+ wid_list[i].id = WID_RTS_THRESHOLD;
+ wid_list[i].val = (s8 *)&cfg_param_attr->rts_threshold;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold;
} else {
- PRINT_ER("Threshold Range fail\n");
+ netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & PREAMBLE) {
- if (cfg_param_attr->cfg_attr_info.preamble_type < 3) {
- wid_list[wid_cnt].id = WID_PREAMBLE;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.preamble_type;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.preamble_type = cfg_param_attr->cfg_attr_info.preamble_type;
+ i++;
+ }
+ if (cfg_param_attr->flag & PREAMBLE) {
+ if (cfg_param_attr->preamble_type < 3) {
+ wid_list[i].id = WID_PREAMBLE;
+ wid_list[i].val = (s8 *)&cfg_param_attr->preamble_type;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type;
} else {
- PRINT_ER("Preamle Range(0~2) over\n");
+ netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & SHORT_SLOT_ALLOWED) {
- if (cfg_param_attr->cfg_attr_info.short_slot_allowed < 2) {
- wid_list[wid_cnt].id = WID_SHORT_SLOT_ALLOWED;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.short_slot_allowed;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->cfg_attr_info.short_slot_allowed;
+ i++;
+ }
+ if (cfg_param_attr->flag & SHORT_SLOT_ALLOWED) {
+ if (cfg_param_attr->short_slot_allowed < 2) {
+ wid_list[i].id = WID_SHORT_SLOT_ALLOWED;
+ wid_list[i].val = (s8 *)&cfg_param_attr->short_slot_allowed;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed;
} else {
- PRINT_ER("Short slot(2) over\n");
+ netdev_err(vif->ndev, "Short slot(2) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & TXOP_PROT_DISABLE) {
- if (cfg_param_attr->cfg_attr_info.txop_prot_disabled < 2) {
- wid_list[wid_cnt].id = WID_11N_TXOP_PROT_DISABLE;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.txop_prot_disabled;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->cfg_attr_info.txop_prot_disabled;
+ i++;
+ }
+ if (cfg_param_attr->flag & TXOP_PROT_DISABLE) {
+ if (cfg_param_attr->txop_prot_disabled < 2) {
+ wid_list[i].id = WID_11N_TXOP_PROT_DISABLE;
+ wid_list[i].val = (s8 *)&cfg_param_attr->txop_prot_disabled;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled;
} else {
- PRINT_ER("TXOP prot disable\n");
+ netdev_err(vif->ndev, "TXOP prot disable\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & BEACON_INTERVAL) {
- if (cfg_param_attr->cfg_attr_info.beacon_interval > 0 &&
- cfg_param_attr->cfg_attr_info.beacon_interval < 65536) {
- wid_list[wid_cnt].id = WID_BEACON_INTERVAL;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.beacon_interval;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.beacon_interval = cfg_param_attr->cfg_attr_info.beacon_interval;
+ i++;
+ }
+ if (cfg_param_attr->flag & BEACON_INTERVAL) {
+ if (cfg_param_attr->beacon_interval > 0 &&
+ cfg_param_attr->beacon_interval < 65536) {
+ wid_list[i].id = WID_BEACON_INTERVAL;
+ wid_list[i].val = (s8 *)&cfg_param_attr->beacon_interval;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval;
} else {
- PRINT_ER("Beacon interval(1~65535) fail\n");
+ netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & DTIM_PERIOD) {
- if (cfg_param_attr->cfg_attr_info.dtim_period > 0 &&
- cfg_param_attr->cfg_attr_info.dtim_period < 256) {
- wid_list[wid_cnt].id = WID_DTIM_PERIOD;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.dtim_period;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.dtim_period = cfg_param_attr->cfg_attr_info.dtim_period;
+ i++;
+ }
+ if (cfg_param_attr->flag & DTIM_PERIOD) {
+ if (cfg_param_attr->dtim_period > 0 &&
+ cfg_param_attr->dtim_period < 256) {
+ wid_list[i].id = WID_DTIM_PERIOD;
+ wid_list[i].val = (s8 *)&cfg_param_attr->dtim_period;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period;
} else {
- PRINT_ER("DTIM range(1~255) fail\n");
+ netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY) {
- if (cfg_param_attr->cfg_attr_info.site_survey_enabled < 3) {
- wid_list[wid_cnt].id = WID_SITE_SURVEY;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_enabled;
- wid_list[wid_cnt].type = WID_CHAR;
- wid_list[wid_cnt].size = sizeof(char);
- hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->cfg_attr_info.site_survey_enabled;
+ i++;
+ }
+ if (cfg_param_attr->flag & SITE_SURVEY) {
+ if (cfg_param_attr->site_survey_enabled < 3) {
+ wid_list[i].id = WID_SITE_SURVEY;
+ wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_enabled;
+ wid_list[i].type = WID_CHAR;
+ wid_list[i].size = sizeof(char);
+ hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled;
} else {
- PRINT_ER("Site survey disable\n");
+ netdev_err(vif->ndev, "Site survey disable\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & SITE_SURVEY_SCAN_TIME) {
- if (cfg_param_attr->cfg_attr_info.site_survey_scan_time > 0 &&
- cfg_param_attr->cfg_attr_info.site_survey_scan_time < 65536) {
- wid_list[wid_cnt].id = WID_SITE_SURVEY_SCAN_TIME;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.site_survey_scan_time;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->cfg_attr_info.site_survey_scan_time;
+ i++;
+ }
+ if (cfg_param_attr->flag & SITE_SURVEY_SCAN_TIME) {
+ if (cfg_param_attr->site_survey_scan_time > 0 &&
+ cfg_param_attr->site_survey_scan_time < 65536) {
+ wid_list[i].id = WID_SITE_SURVEY_SCAN_TIME;
+ wid_list[i].val = (s8 *)&cfg_param_attr->site_survey_scan_time;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time;
} else {
- PRINT_ER("Site survey scan time(1~65535) over\n");
+ netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & ACTIVE_SCANTIME) {
- if (cfg_param_attr->cfg_attr_info.active_scan_time > 0 &&
- cfg_param_attr->cfg_attr_info.active_scan_time < 65536) {
- wid_list[wid_cnt].id = WID_ACTIVE_SCAN_TIME;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.active_scan_time;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.active_scan_time = cfg_param_attr->cfg_attr_info.active_scan_time;
+ i++;
+ }
+ if (cfg_param_attr->flag & ACTIVE_SCANTIME) {
+ if (cfg_param_attr->active_scan_time > 0 &&
+ cfg_param_attr->active_scan_time < 65536) {
+ wid_list[i].id = WID_ACTIVE_SCAN_TIME;
+ wid_list[i].val = (s8 *)&cfg_param_attr->active_scan_time;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time;
} else {
- PRINT_ER("Active scan time(1~65535) over\n");
+ netdev_err(vif->ndev, "Active time(1~65535) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & PASSIVE_SCANTIME) {
- if (cfg_param_attr->cfg_attr_info.passive_scan_time > 0 &&
- cfg_param_attr->cfg_attr_info.passive_scan_time < 65536) {
- wid_list[wid_cnt].id = WID_PASSIVE_SCAN_TIME;
- wid_list[wid_cnt].val = (s8 *)&cfg_param_attr->cfg_attr_info.passive_scan_time;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
- hif_drv->cfg_values.passive_scan_time = cfg_param_attr->cfg_attr_info.passive_scan_time;
+ i++;
+ }
+ if (cfg_param_attr->flag & PASSIVE_SCANTIME) {
+ if (cfg_param_attr->passive_scan_time > 0 &&
+ cfg_param_attr->passive_scan_time < 65536) {
+ wid_list[i].id = WID_PASSIVE_SCAN_TIME;
+ wid_list[i].val = (s8 *)&cfg_param_attr->passive_scan_time;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
+ hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time;
} else {
- PRINT_ER("Passive scan time(1~65535) over\n");
+ netdev_err(vif->ndev, "Passive time(1~65535) over\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
- }
- if (cfg_param_attr->cfg_attr_info.flag & CURRENT_TX_RATE) {
- enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->cfg_attr_info.curr_tx_rate;
-
- if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1
- || curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5
- || curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6
- || curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12
- || curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24
- || curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 || curr_tx_rate == MBPS_54) {
- wid_list[wid_cnt].id = WID_CURRENT_TX_RATE;
- wid_list[wid_cnt].val = (s8 *)&curr_tx_rate;
- wid_list[wid_cnt].type = WID_SHORT;
- wid_list[wid_cnt].size = sizeof(u16);
+ i++;
+ }
+ if (cfg_param_attr->flag & CURRENT_TX_RATE) {
+ enum CURRENT_TXRATE curr_tx_rate = cfg_param_attr->curr_tx_rate;
+
+ if (curr_tx_rate == AUTORATE || curr_tx_rate == MBPS_1 ||
+ curr_tx_rate == MBPS_2 || curr_tx_rate == MBPS_5_5 ||
+ curr_tx_rate == MBPS_11 || curr_tx_rate == MBPS_6 ||
+ curr_tx_rate == MBPS_9 || curr_tx_rate == MBPS_12 ||
+ curr_tx_rate == MBPS_18 || curr_tx_rate == MBPS_24 ||
+ curr_tx_rate == MBPS_36 || curr_tx_rate == MBPS_48 ||
+ curr_tx_rate == MBPS_54) {
+ wid_list[i].id = WID_CURRENT_TX_RATE;
+ wid_list[i].val = (s8 *)&curr_tx_rate;
+ wid_list[i].type = WID_SHORT;
+ wid_list[i].size = sizeof(u16);
hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate;
} else {
- PRINT_ER("out of TX rate\n");
+ netdev_err(vif->ndev, "out of TX rate\n");
result = -EINVAL;
goto ERRORHANDLER;
}
- wid_cnt++;
+ i++;
}
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, wid_list,
- wid_cnt, wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ i, wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Error in setting CFG params\n");
+ netdev_err(vif->ndev, "Error in setting CFG params\n");
ERRORHANDLER:
- up(&hif_drv->sem_cfg_values);
+ mutex_unlock(&hif_drv->cfg_values_lock);
return result;
}
-static void Handle_wait_msg_q_empty(void)
-{
- wilc_initialized = 0;
- up(&hif_sema_wait_response);
-}
-
static s32 Handle_ScanDone(struct wilc_vif *vif,
enum scan_event enuEvent);
@@ -804,50 +744,40 @@ static s32 Handle_Scan(struct wilc_vif *vif,
u8 *pu8HdnNtwrksWidVal = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
- PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
- PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state\n", hif_drv->hif_state);
-
hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result;
hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg;
if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
(hif_drv->hif_state < HOST_IF_CONNECTED)) {
- PRINT_D(GENERIC_DBG, "Don't scan already in [%d] state\n",
- hif_drv->hif_state);
- PRINT_ER("Already scan\n");
+ netdev_err(vif->ndev, "Already scan\n");
result = -EBUSY;
goto ERRORHANDLER;
}
if (wilc_optaining_ip || wilc_connecting) {
- PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n");
- PRINT_ER("Don't do obss scan\n");
+ netdev_err(vif->ndev, "Don't do obss scan\n");
result = -EBUSY;
goto ERRORHANDLER;
}
- PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
-
hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ;
strWIDList[u32WidsCount].type = WID_STR;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++)
- valuesize += ((pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen) + 1);
+ for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++)
+ valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1);
pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal;
if (strWIDList[u32WidsCount].val) {
pu8Buffer = strWIDList[u32WidsCount].val;
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.u8ssidnum;
+ *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids;
- PRINT_D(HOSTINF_DBG, "In Handle_ProbeRequest number of ssid %d\n", pstrHostIFscanAttr->hidden_network.u8ssidnum);
-
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.u8ssidnum; i++) {
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen;
- memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen);
- pu8Buffer += pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo[i].u8ssidlen;
+ for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) {
+ *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
+ memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len);
+ pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
}
strWIDList[u32WidsCount].size = (s32)(valuesize + 1);
@@ -896,14 +826,12 @@ static s32 Handle_Scan(struct wilc_vif *vif,
else if (hif_drv->hif_state == HOST_IF_IDLE)
scan_while_connected = false;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
+ result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
u32WidsCount,
wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send scan paramters config packet\n");
- else
- PRINT_D(HOSTINF_DBG, "Successfully sent SCAN params config packet\n");
+ netdev_err(vif->ndev, "Failed to send scan parameters\n");
ERRORHANDLER:
if (result) {
@@ -916,8 +844,8 @@ ERRORHANDLER:
kfree(pstrHostIFscanAttr->ies);
pstrHostIFscanAttr->ies = NULL;
- kfree(pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo);
- pstrHostIFscanAttr->hidden_network.pstrHiddenNetworkInfo = NULL;
+ kfree(pstrHostIFscanAttr->hidden_network.net_info);
+ pstrHostIFscanAttr->hidden_network.net_info = NULL;
kfree(pu8HdnNtwrksWidVal);
@@ -932,27 +860,24 @@ static s32 Handle_ScanDone(struct wilc_vif *vif,
struct wid wid;
struct host_if_drv *hif_drv = vif->hif_drv;
- PRINT_D(HOSTINF_DBG, "in Handle_ScanDone()\n");
-
if (enuEvent == SCAN_EVENT_ABORTED) {
- PRINT_D(GENERIC_DBG, "Abort running scan\n");
u8abort_running_scan = 1;
wid.id = (u16)WID_ABORT_RUNNING_SCAN;
wid.type = WID_CHAR;
wid.val = (s8 *)&u8abort_running_scan;
wid.size = sizeof(char);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to set abort running scan\n");
+ netdev_err(vif->ndev, "Failed to set abort running\n");
result = -EFAULT;
}
}
if (!hif_drv) {
- PRINT_ER("Driver handler is NULL\n");
+ netdev_err(vif->ndev, "Driver handler is NULL\n");
return result;
}
@@ -976,35 +901,31 @@ static s32 Handle_Connect(struct wilc_vif *vif,
struct join_bss_param *ptstrJoinBssParam;
struct host_if_drv *hif_drv = vif->hif_drv;
- PRINT_D(GENERIC_DBG, "Handling connect request\n");
-
if (memcmp(pstrHostIFconnectAttr->bssid, wilc_connected_ssid, ETH_ALEN) == 0) {
result = 0;
- PRINT_ER("Trying to connect to an already connected AP, Discard connect request\n");
+ netdev_err(vif->ndev, "Discard connect request\n");
return result;
}
- PRINT_INFO(HOSTINF_DBG, "Saving connection parameters in global structure\n");
-
- ptstrJoinBssParam = (struct join_bss_param *)pstrHostIFconnectAttr->params;
+ ptstrJoinBssParam = pstrHostIFconnectAttr->params;
if (!ptstrJoinBssParam) {
- PRINT_ER("Required BSSID not found\n");
+ netdev_err(vif->ndev, "Required BSSID not found\n");
result = -ENOENT;
goto ERRORHANDLER;
}
if (pstrHostIFconnectAttr->bssid) {
- hif_drv->usr_conn_req.pu8bssid = kmalloc(6, GFP_KERNEL);
- memcpy(hif_drv->usr_conn_req.pu8bssid, pstrHostIFconnectAttr->bssid, 6);
+ hif_drv->usr_conn_req.bssid = kmalloc(6, GFP_KERNEL);
+ memcpy(hif_drv->usr_conn_req.bssid, pstrHostIFconnectAttr->bssid, 6);
}
hif_drv->usr_conn_req.ssid_len = pstrHostIFconnectAttr->ssid_len;
if (pstrHostIFconnectAttr->ssid) {
- hif_drv->usr_conn_req.pu8ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL);
- memcpy(hif_drv->usr_conn_req.pu8ssid,
+ hif_drv->usr_conn_req.ssid = kmalloc(pstrHostIFconnectAttr->ssid_len + 1, GFP_KERNEL);
+ memcpy(hif_drv->usr_conn_req.ssid,
pstrHostIFconnectAttr->ssid,
pstrHostIFconnectAttr->ssid_len);
- hif_drv->usr_conn_req.pu8ssid[pstrHostIFconnectAttr->ssid_len] = '\0';
+ hif_drv->usr_conn_req.ssid[pstrHostIFconnectAttr->ssid_len] = '\0';
}
hif_drv->usr_conn_req.ies_len = pstrHostIFconnectAttr->ies_len;
@@ -1015,7 +936,7 @@ static s32 Handle_Connect(struct wilc_vif *vif,
pstrHostIFconnectAttr->ies_len);
}
- hif_drv->usr_conn_req.u8security = pstrHostIFconnectAttr->security;
+ hif_drv->usr_conn_req.security = pstrHostIFconnectAttr->security;
hif_drv->usr_conn_req.auth_type = pstrHostIFconnectAttr->auth_type;
hif_drv->usr_conn_req.conn_result = pstrHostIFconnectAttr->result;
hif_drv->usr_conn_req.arg = pstrHostIFconnectAttr->arg;
@@ -1055,13 +976,11 @@ static s32 Handle_Connect(struct wilc_vif *vif,
strWIDList[u32WidsCount].id = (u16)WID_11I_MODE;
strWIDList[u32WidsCount].type = WID_CHAR;
strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.u8security;
+ strWIDList[u32WidsCount].val = (s8 *)&hif_drv->usr_conn_req.security;
u32WidsCount++;
if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7))
- mode_11i = hif_drv->usr_conn_req.u8security;
-
- PRINT_INFO(HOSTINF_DBG, "Encrypt Mode = %x\n", hif_drv->usr_conn_req.u8security);
+ mode_11i = hif_drv->usr_conn_req.security;
strWIDList[u32WidsCount].id = (u16)WID_AUTH_TYPE;
strWIDList[u32WidsCount].type = WID_CHAR;
@@ -1072,11 +991,6 @@ static s32 Handle_Connect(struct wilc_vif *vif,
if (memcmp("DIRECT-", pstrHostIFconnectAttr->ssid, 7))
auth_type = (u8)hif_drv->usr_conn_req.auth_type;
- PRINT_INFO(HOSTINF_DBG, "Authentication Type = %x\n",
- hif_drv->usr_conn_req.auth_type);
- PRINT_D(HOSTINF_DBG, "Connecting to network of SSID %s on channel %d\n",
- hif_drv->usr_conn_req.pu8ssid, pstrHostIFconnectAttr->ch);
-
strWIDList[u32WidsCount].id = (u16)WID_JOIN_REQ_EXTENDED;
strWIDList[u32WidsCount].type = WID_STR;
strWIDList[u32WidsCount].size = 112;
@@ -1103,12 +1017,11 @@ static s32 Handle_Connect(struct wilc_vif *vif,
if ((pstrHostIFconnectAttr->ch >= 1) && (pstrHostIFconnectAttr->ch <= 14)) {
*(pu8CurrByte++) = pstrHostIFconnectAttr->ch;
} else {
- PRINT_ER("Channel out of range\n");
+ netdev_err(vif->ndev, "Channel out of range\n");
*(pu8CurrByte++) = 0xFF;
}
*(pu8CurrByte++) = (ptstrJoinBssParam->cap_info) & 0xFF;
*(pu8CurrByte++) = ((ptstrJoinBssParam->cap_info) >> 8) & 0xFF;
- PRINT_D(HOSTINF_DBG, "* Cap Info %0x*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
if (pstrHostIFconnectAttr->bssid)
memcpy(pu8CurrByte, pstrHostIFconnectAttr->bssid, 6);
@@ -1120,26 +1033,20 @@ static s32 Handle_Connect(struct wilc_vif *vif,
*(pu8CurrByte++) = (ptstrJoinBssParam->beacon_period) & 0xFF;
*(pu8CurrByte++) = ((ptstrJoinBssParam->beacon_period) >> 8) & 0xFF;
- PRINT_D(HOSTINF_DBG, "* Beacon Period %d*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
*(pu8CurrByte++) = ptstrJoinBssParam->dtim_period;
- PRINT_D(HOSTINF_DBG, "* DTIM Period %d*\n", (*(pu8CurrByte - 1)));
memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
pu8CurrByte += (MAX_RATES_SUPPORTED + 1);
*(pu8CurrByte++) = ptstrJoinBssParam->wmm_cap;
- PRINT_D(HOSTINF_DBG, "* wmm cap%d*\n", (*(pu8CurrByte - 1)));
*(pu8CurrByte++) = ptstrJoinBssParam->uapsd_cap;
*(pu8CurrByte++) = ptstrJoinBssParam->ht_capable;
hif_drv->usr_conn_req.ht_capable = ptstrJoinBssParam->ht_capable;
*(pu8CurrByte++) = ptstrJoinBssParam->rsn_found;
- PRINT_D(HOSTINF_DBG, "* rsn found %d*\n", *(pu8CurrByte - 1));
*(pu8CurrByte++) = ptstrJoinBssParam->rsn_grp_policy;
- PRINT_D(HOSTINF_DBG, "* rsn group policy %0x*\n", (*(pu8CurrByte - 1)));
*(pu8CurrByte++) = ptstrJoinBssParam->mode_802_11i;
- PRINT_D(HOSTINF_DBG, "* mode_802_11i %d*\n", (*(pu8CurrByte - 1)));
memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_pcip_policy);
@@ -1154,8 +1061,6 @@ static s32 Handle_Connect(struct wilc_vif *vif,
*(pu8CurrByte++) = ptstrJoinBssParam->noa_enabled;
if (ptstrJoinBssParam->noa_enabled) {
- PRINT_D(HOSTINF_DBG, "NOA present\n");
-
*(pu8CurrByte++) = (ptstrJoinBssParam->tsf) & 0xFF;
*(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 8) & 0xFF;
*(pu8CurrByte++) = ((ptstrJoinBssParam->tsf) >> 16) & 0xFF;
@@ -1177,8 +1082,7 @@ static s32 Handle_Connect(struct wilc_vif *vif,
memcpy(pu8CurrByte, ptstrJoinBssParam->start_time, sizeof(ptstrJoinBssParam->start_time));
pu8CurrByte += sizeof(ptstrJoinBssParam->start_time);
- } else
- PRINT_D(HOSTINF_DBG, "NOA not present\n");
+ }
pu8CurrByte = strWIDList[u32WidsCount].val;
u32WidsCount++;
@@ -1188,46 +1092,37 @@ static s32 Handle_Connect(struct wilc_vif *vif,
join_req_vif = vif;
}
- PRINT_D(GENERIC_DBG, "send HOST_IF_WAITING_CONN_RESP\n");
-
- if (pstrHostIFconnectAttr->bssid) {
+ if (pstrHostIFconnectAttr->bssid)
memcpy(wilc_connected_ssid,
pstrHostIFconnectAttr->bssid, ETH_ALEN);
- PRINT_D(GENERIC_DBG, "save Bssid = %pM\n",
- pstrHostIFconnectAttr->bssid);
- PRINT_D(GENERIC_DBG, "save bssid = %pM\n", wilc_connected_ssid);
- }
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
+ result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
u32WidsCount,
wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("failed to send config packet\n");
+ netdev_err(vif->ndev, "failed to send config packet\n");
result = -EFAULT;
goto ERRORHANDLER;
} else {
- PRINT_D(GENERIC_DBG, "set HOST_IF_WAITING_CONN_RESP\n");
hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP;
}
ERRORHANDLER:
if (result) {
- tstrConnectInfo strConnectInfo;
+ struct connect_info strConnectInfo;
del_timer(&hif_drv->connect_timer);
- PRINT_D(HOSTINF_DBG, "could not start wilc_connecting to the required network\n");
-
- memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(struct connect_info));
if (pstrHostIFconnectAttr->result) {
if (pstrHostIFconnectAttr->bssid)
- memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->bssid, 6);
+ memcpy(strConnectInfo.bssid, pstrHostIFconnectAttr->bssid, 6);
if (pstrHostIFconnectAttr->ies) {
- strConnectInfo.ReqIEsLen = pstrHostIFconnectAttr->ies_len;
- strConnectInfo.pu8ReqIEs = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL);
- memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.req_ies_len = pstrHostIFconnectAttr->ies_len;
+ strConnectInfo.req_ies = kmalloc(pstrHostIFconnectAttr->ies_len, GFP_KERNEL);
+ memcpy(strConnectInfo.req_ies,
pstrHostIFconnectAttr->ies,
pstrHostIFconnectAttr->ies_len);
}
@@ -1238,15 +1133,14 @@ ERRORHANDLER:
NULL,
pstrHostIFconnectAttr->arg);
hif_drv->hif_state = HOST_IF_IDLE;
- kfree(strConnectInfo.pu8ReqIEs);
- strConnectInfo.pu8ReqIEs = NULL;
+ kfree(strConnectInfo.req_ies);
+ strConnectInfo.req_ies = NULL;
} else {
- PRINT_ER("Connect callback function pointer is NULL\n");
+ netdev_err(vif->ndev, "Connect callback is NULL\n");
}
}
- PRINT_D(HOSTINF_DBG, "Deallocating connection parameters\n");
kfree(pstrHostIFconnectAttr->bssid);
pstrHostIFconnectAttr->bssid = NULL;
@@ -1260,63 +1154,16 @@ ERRORHANDLER:
return result;
}
-static s32 Handle_FlushConnect(struct wilc_vif *vif)
-{
- s32 result = 0;
- struct wid strWIDList[5];
- u32 u32WidsCount = 0;
- u8 *pu8CurrByte = NULL;
-
- strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_ASSOCIATE;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
- strWIDList[u32WidsCount].val = info_element;
- strWIDList[u32WidsCount].size = info_element_size;
- u32WidsCount++;
-
- strWIDList[u32WidsCount].id = (u16)WID_11I_MODE;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)(&(mode_11i));
- u32WidsCount++;
-
- strWIDList[u32WidsCount].id = (u16)WID_AUTH_TYPE;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)(&auth_type);
- u32WidsCount++;
-
- strWIDList[u32WidsCount].id = (u16)WID_JOIN_REQ_EXTENDED;
- strWIDList[u32WidsCount].type = WID_STR;
- strWIDList[u32WidsCount].size = join_req_size;
- strWIDList[u32WidsCount].val = (s8 *)join_req;
- pu8CurrByte = strWIDList[u32WidsCount].val;
-
- pu8CurrByte += FLUSHED_BYTE_POS;
- *(pu8CurrByte) = FLUSHED_JOIN_REQ;
-
- u32WidsCount++;
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, strWIDList,
- u32WidsCount,
- wilc_get_vif_idx(join_req_vif));
- if (result) {
- PRINT_ER("failed to send config packet\n");
- result = -EINVAL;
- }
-
- return result;
-}
-
static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
{
s32 result = 0;
- tstrConnectInfo strConnectInfo;
+ struct connect_info strConnectInfo;
struct wid wid;
u16 u16DummyReasonCode = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("Driver handler is NULL\n");
+ netdev_err(vif->ndev, "Driver handler is NULL\n");
return result;
}
@@ -1324,18 +1171,18 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
scan_while_connected = false;
- memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(struct connect_info));
if (hif_drv->usr_conn_req.conn_result) {
- if (hif_drv->usr_conn_req.pu8bssid) {
- memcpy(strConnectInfo.au8bssid,
- hif_drv->usr_conn_req.pu8bssid, 6);
+ if (hif_drv->usr_conn_req.bssid) {
+ memcpy(strConnectInfo.bssid,
+ hif_drv->usr_conn_req.bssid, 6);
}
if (hif_drv->usr_conn_req.ies) {
- strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len;
- strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
- memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len;
+ strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
+ memcpy(strConnectInfo.req_ies,
hif_drv->usr_conn_req.ies,
hif_drv->usr_conn_req.ies_len);
}
@@ -1346,10 +1193,10 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
NULL,
hif_drv->usr_conn_req.arg);
- kfree(strConnectInfo.pu8ReqIEs);
- strConnectInfo.pu8ReqIEs = NULL;
+ kfree(strConnectInfo.req_ies);
+ strConnectInfo.req_ies = NULL;
} else {
- PRINT_ER("Connect callback function pointer is NULL\n");
+ netdev_err(vif->ndev, "Connect callback is NULL\n");
}
wid.id = (u16)WID_DISCONNECT;
@@ -1357,18 +1204,16 @@ static s32 Handle_ConnectTimeout(struct wilc_vif *vif)
wid.val = (s8 *)&u16DummyReasonCode;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send dissconect config packet\n");
+ netdev_err(vif->ndev, "Failed to send dissconect\n");
hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.pu8ssid);
- hif_drv->usr_conn_req.pu8ssid = NULL;
- kfree(hif_drv->usr_conn_req.pu8bssid);
- hif_drv->usr_conn_req.pu8bssid = NULL;
+ kfree(hif_drv->usr_conn_req.ssid);
+ hif_drv->usr_conn_req.ssid = NULL;
+ kfree(hif_drv->usr_conn_req.bssid);
+ hif_drv->usr_conn_req.bssid = NULL;
hif_drv->usr_conn_req.ies_len = 0;
kfree(hif_drv->usr_conn_req.ies);
hif_drv->usr_conn_req.ies = NULL;
@@ -1394,33 +1239,30 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
u32 i;
bool bNewNtwrkFound;
s32 result = 0;
- tstrNetworkInfo *pstrNetworkInfo = NULL;
+ struct network_info *pstrNetworkInfo = NULL;
void *pJoinParams = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
bNewNtwrkFound = true;
- PRINT_INFO(HOSTINF_DBG, "Handling received network info\n");
if (hif_drv->usr_scan_req.scan_result) {
- PRINT_D(HOSTINF_DBG, "State: Scanning, parsing network information received\n");
wilc_parse_network_info(pstrRcvdNetworkInfo->buffer, &pstrNetworkInfo);
if ((!pstrNetworkInfo) ||
(!hif_drv->usr_scan_req.scan_result)) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
result = -EINVAL;
goto done;
}
for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) {
- if ((hif_drv->usr_scan_req.net_info[i].au8bssid) &&
- (pstrNetworkInfo->au8bssid)) {
- if (memcmp(hif_drv->usr_scan_req.net_info[i].au8bssid,
- pstrNetworkInfo->au8bssid, 6) == 0) {
- if (pstrNetworkInfo->s8rssi <= hif_drv->usr_scan_req.net_info[i].s8rssi) {
- PRINT_D(HOSTINF_DBG, "Network previously discovered\n");
+ if ((hif_drv->usr_scan_req.net_info[i].bssid) &&
+ (pstrNetworkInfo->bssid)) {
+ if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
+ pstrNetworkInfo->bssid, 6) == 0) {
+ if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
goto done;
} else {
- hif_drv->usr_scan_req.net_info[i].s8rssi = pstrNetworkInfo->s8rssi;
+ hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
bNewNtwrkFound = false;
break;
}
@@ -1429,30 +1271,26 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
}
if (bNewNtwrkFound) {
- PRINT_D(HOSTINF_DBG, "New network found\n");
-
if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
- hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].s8rssi = pstrNetworkInfo->s8rssi;
+ hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi;
- if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid &&
- pstrNetworkInfo->au8bssid) {
- memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].au8bssid,
- pstrNetworkInfo->au8bssid, 6);
+ if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid &&
+ pstrNetworkInfo->bssid) {
+ memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
+ pstrNetworkInfo->bssid, 6);
hif_drv->usr_scan_req.rcvd_ch_cnt++;
- pstrNetworkInfo->bNewNetwork = true;
+ pstrNetworkInfo->new_network = true;
pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
hif_drv->usr_scan_req.arg,
pJoinParams);
}
- } else {
- PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit\n");
}
} else {
- pstrNetworkInfo->bNewNetwork = false;
+ pstrNetworkInfo->new_network = false;
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
hif_drv->usr_scan_req.arg, NULL);
}
@@ -1463,8 +1301,8 @@ done:
pstrRcvdNetworkInfo->buffer = NULL;
if (pstrNetworkInfo) {
- wilc_dealloc_network_info(pstrNetworkInfo);
- pstrNetworkInfo = NULL;
+ kfree(pstrNetworkInfo->ies);
+ kfree(pstrNetworkInfo);
}
return result;
@@ -1487,31 +1325,29 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
u8 u8MacStatus;
u8 u8MacStatusReasonCode;
u8 u8MacStatusAdditionalInfo;
- tstrConnectInfo strConnectInfo;
- tstrDisconnectNotifInfo strDisconnectNotifInfo;
+ struct connect_info strConnectInfo;
+ struct disconnect_info strDisconnectNotifInfo;
s32 s32Err = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("Driver handler is NULL\n");
+ netdev_err(vif->ndev, "Driver handler is NULL\n");
return -ENODEV;
}
- PRINT_D(GENERIC_DBG, "Current State = %d,Received state = %d\n",
- hif_drv->hif_state, pstrRcvdGnrlAsyncInfo->buffer[7]);
if ((hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
(hif_drv->hif_state == HOST_IF_CONNECTED) ||
hif_drv->usr_scan_req.scan_result) {
if (!pstrRcvdGnrlAsyncInfo->buffer ||
!hif_drv->usr_conn_req.conn_result) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EINVAL;
}
u8MsgType = pstrRcvdGnrlAsyncInfo->buffer[0];
if ('I' != u8MsgType) {
- PRINT_ER("Received Message format incorrect.\n");
+ netdev_err(vif->ndev, "Received Message incorrect.\n");
return -EFAULT;
}
@@ -1522,14 +1358,11 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
u8MacStatus = pstrRcvdGnrlAsyncInfo->buffer[7];
u8MacStatusReasonCode = pstrRcvdGnrlAsyncInfo->buffer[8];
u8MacStatusAdditionalInfo = pstrRcvdGnrlAsyncInfo->buffer[9];
- PRINT_INFO(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Info = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
u32 u32RcvdAssocRespInfoLen = 0;
- tstrConnectRespInfo *pstrConnectRespInfo = NULL;
-
- PRINT_D(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Code = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
+ struct connect_resp_info *pstrConnectRespInfo = NULL;
- memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+ memset(&strConnectInfo, 0, sizeof(struct connect_info));
if (u8MacStatus == MAC_CONNECTED) {
memset(rcv_assoc_resp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
@@ -1539,59 +1372,54 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
MAX_ASSOC_RESP_FRAME_SIZE,
&u32RcvdAssocRespInfoLen);
- PRINT_INFO(HOSTINF_DBG, "Received association response with length = %d\n", u32RcvdAssocRespInfoLen);
-
if (u32RcvdAssocRespInfoLen != 0) {
- PRINT_D(HOSTINF_DBG, "Parsing association response\n");
s32Err = wilc_parse_assoc_resp_info(rcv_assoc_resp, u32RcvdAssocRespInfoLen,
&pstrConnectRespInfo);
if (s32Err) {
- PRINT_ER("wilc_parse_assoc_resp_info() returned error %d\n", s32Err);
+ netdev_err(vif->ndev, "wilc_parse_assoc_resp_info() returned error %d\n", s32Err);
} else {
- strConnectInfo.u16ConnectStatus = pstrConnectRespInfo->u16ConnectStatus;
-
- if (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE) {
- PRINT_INFO(HOSTINF_DBG, "Association response received : Successful connection status\n");
- if (pstrConnectRespInfo->pu8RespIEs) {
- strConnectInfo.u16RespIEsLen = pstrConnectRespInfo->u16RespIEsLen;
- strConnectInfo.pu8RespIEs = kmalloc(pstrConnectRespInfo->u16RespIEsLen, GFP_KERNEL);
- memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
- pstrConnectRespInfo->u16RespIEsLen);
+ strConnectInfo.status = pstrConnectRespInfo->status;
+
+ if (strConnectInfo.status == SUCCESSFUL_STATUSCODE) {
+ if (pstrConnectRespInfo->ies) {
+ strConnectInfo.resp_ies_len = pstrConnectRespInfo->ies_len;
+ strConnectInfo.resp_ies = kmalloc(pstrConnectRespInfo->ies_len, GFP_KERNEL);
+ memcpy(strConnectInfo.resp_ies, pstrConnectRespInfo->ies,
+ pstrConnectRespInfo->ies_len);
}
}
if (pstrConnectRespInfo) {
- wilc_dealloc_assoc_resp_info(pstrConnectRespInfo);
- pstrConnectRespInfo = NULL;
+ kfree(pstrConnectRespInfo->ies);
+ kfree(pstrConnectRespInfo);
}
}
}
}
if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.u16ConnectStatus != SUCCESSFUL_STATUSCODE)) {
- PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+ (strConnectInfo.status != SUCCESSFUL_STATUSCODE)) {
+ netdev_err(vif->ndev, "Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
eth_zero_addr(wilc_connected_ssid);
} else if (u8MacStatus == MAC_DISCONNECTED) {
- PRINT_ER("Received MAC status is MAC_DISCONNECTED\n");
+ netdev_err(vif->ndev, "Received MAC status is MAC_DISCONNECTED\n");
eth_zero_addr(wilc_connected_ssid);
}
- if (hif_drv->usr_conn_req.pu8bssid) {
- PRINT_D(HOSTINF_DBG, "Retrieving actual BSSID from AP\n");
- memcpy(strConnectInfo.au8bssid, hif_drv->usr_conn_req.pu8bssid, 6);
+ if (hif_drv->usr_conn_req.bssid) {
+ memcpy(strConnectInfo.bssid, hif_drv->usr_conn_req.bssid, 6);
if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
+ (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) {
memcpy(hif_drv->assoc_bssid,
- hif_drv->usr_conn_req.pu8bssid, ETH_ALEN);
+ hif_drv->usr_conn_req.bssid, ETH_ALEN);
}
}
if (hif_drv->usr_conn_req.ies) {
- strConnectInfo.ReqIEsLen = hif_drv->usr_conn_req.ies_len;
- strConnectInfo.pu8ReqIEs = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
- memcpy(strConnectInfo.pu8ReqIEs,
+ strConnectInfo.req_ies_len = hif_drv->usr_conn_req.ies_len;
+ strConnectInfo.req_ies = kmalloc(hif_drv->usr_conn_req.ies_len, GFP_KERNEL);
+ memcpy(strConnectInfo.req_ies,
hif_drv->usr_conn_req.ies,
hif_drv->usr_conn_req.ies_len);
}
@@ -1604,48 +1432,42 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
hif_drv->usr_conn_req.arg);
if ((u8MacStatus == MAC_CONNECTED) &&
- (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
+ (strConnectInfo.status == SUCCESSFUL_STATUSCODE)) {
wilc_set_power_mgmt(vif, 0, 0);
- PRINT_D(HOSTINF_DBG, "MAC status : CONNECTED and Connect Status : Successful\n");
hif_drv->hif_state = HOST_IF_CONNECTED;
- PRINT_D(GENERIC_DBG, "Obtaining an IP, Disable Scan\n");
wilc_optaining_ip = true;
mod_timer(&wilc_during_ip_timer,
jiffies + msecs_to_jiffies(10000));
} else {
- PRINT_D(HOSTINF_DBG, "MAC status : %d and Connect Status : %d\n", u8MacStatus, strConnectInfo.u16ConnectStatus);
hif_drv->hif_state = HOST_IF_IDLE;
scan_while_connected = false;
}
- kfree(strConnectInfo.pu8RespIEs);
- strConnectInfo.pu8RespIEs = NULL;
+ kfree(strConnectInfo.resp_ies);
+ strConnectInfo.resp_ies = NULL;
- kfree(strConnectInfo.pu8ReqIEs);
- strConnectInfo.pu8ReqIEs = NULL;
+ kfree(strConnectInfo.req_ies);
+ strConnectInfo.req_ies = NULL;
hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.pu8ssid);
- hif_drv->usr_conn_req.pu8ssid = NULL;
- kfree(hif_drv->usr_conn_req.pu8bssid);
- hif_drv->usr_conn_req.pu8bssid = NULL;
+ kfree(hif_drv->usr_conn_req.ssid);
+ hif_drv->usr_conn_req.ssid = NULL;
+ kfree(hif_drv->usr_conn_req.bssid);
+ hif_drv->usr_conn_req.bssid = NULL;
hif_drv->usr_conn_req.ies_len = 0;
kfree(hif_drv->usr_conn_req.ies);
hif_drv->usr_conn_req.ies = NULL;
} else if ((u8MacStatus == MAC_DISCONNECTED) &&
(hif_drv->hif_state == HOST_IF_CONNECTED)) {
- PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW\n");
-
- memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+ memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info));
if (hif_drv->usr_scan_req.scan_result) {
- PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >>\n\n");
del_timer(&hif_drv->scan_timer);
Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
}
- strDisconnectNotifInfo.u16reason = 0;
+ strDisconnectNotifInfo.reason = 0;
strDisconnectNotifInfo.ie = NULL;
strDisconnectNotifInfo.ie_len = 0;
@@ -1659,16 +1481,16 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
&strDisconnectNotifInfo,
hif_drv->usr_conn_req.arg);
} else {
- PRINT_ER("Connect result callback function is NULL\n");
+ netdev_err(vif->ndev, "Connect result NULL\n");
}
eth_zero_addr(hif_drv->assoc_bssid);
hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.pu8ssid);
- hif_drv->usr_conn_req.pu8ssid = NULL;
- kfree(hif_drv->usr_conn_req.pu8bssid);
- hif_drv->usr_conn_req.pu8bssid = NULL;
+ kfree(hif_drv->usr_conn_req.ssid);
+ hif_drv->usr_conn_req.ssid = NULL;
+ kfree(hif_drv->usr_conn_req.bssid);
+ hif_drv->usr_conn_req.bssid = NULL;
hif_drv->usr_conn_req.ies_len = 0;
kfree(hif_drv->usr_conn_req.ies);
hif_drv->usr_conn_req.ies = NULL;
@@ -1688,9 +1510,6 @@ static s32 Handle_RcvdGnrlAsyncInfo(struct wilc_vif *vif,
} else if ((u8MacStatus == MAC_DISCONNECTED) &&
(hif_drv->usr_scan_req.scan_result)) {
- PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW while scanning\n");
- PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >>\n\n");
-
del_timer(&hif_drv->scan_timer);
if (hif_drv->usr_scan_req.scan_result)
Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
@@ -1719,8 +1538,6 @@ static int Handle_Key(struct wilc_vif *vif,
case WEP:
if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
- PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
- PRINT_D(GENERIC_DBG, "ID Hostint is %d\n", pstrHostIFkeyAttr->attr.wep.index);
strWIDList[0].id = (u16)WID_11I_MODE;
strWIDList[0].type = WID_CHAR;
strWIDList[0].size = sizeof(char);
@@ -1731,39 +1548,32 @@ static int Handle_Key(struct wilc_vif *vif,
strWIDList[1].size = sizeof(char);
strWIDList[1].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.auth_type;
- strWIDList[2].id = (u16)WID_KEY_ID;
- strWIDList[2].type = WID_CHAR;
-
- strWIDList[2].val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index;
- strWIDList[2].size = sizeof(char);
-
- pu8keybuf = kmemdup(pstrHostIFkeyAttr->attr.wep.key,
- pstrHostIFkeyAttr->attr.wep.key_len,
+ pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2,
GFP_KERNEL);
-
- if (pu8keybuf == NULL) {
- PRINT_ER("No buffer to send Key\n");
+ if (!pu8keybuf)
return -ENOMEM;
- }
+
+ pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index;
+ pu8keybuf[1] = pstrHostIFkeyAttr->attr.wep.key_len;
+
+ memcpy(&pu8keybuf[2], pstrHostIFkeyAttr->attr.wep.key,
+ pstrHostIFkeyAttr->attr.wep.key_len);
kfree(pstrHostIFkeyAttr->attr.wep.key);
- strWIDList[3].id = (u16)WID_WEP_KEY_VALUE;
- strWIDList[3].type = WID_STR;
- strWIDList[3].size = pstrHostIFkeyAttr->attr.wep.key_len;
- strWIDList[3].val = (s8 *)pu8keybuf;
+ strWIDList[2].id = (u16)WID_WEP_KEY_VALUE;
+ strWIDList[2].type = WID_STR;
+ strWIDList[2].size = pstrHostIFkeyAttr->attr.wep.key_len + 2;
+ strWIDList[2].val = (s8 *)pu8keybuf;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- strWIDList, 4,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ strWIDList, 3,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
- PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
pu8keybuf = kmalloc(pstrHostIFkeyAttr->attr.wep.key_len + 2, GFP_KERNEL);
- if (!pu8keybuf) {
- PRINT_ER("No buffer to send Key\n");
+ if (!pu8keybuf)
return -ENOMEM;
- }
pu8keybuf[0] = pstrHostIFkeyAttr->attr.wep.index;
memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->attr.wep.key_len, 1);
memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->attr.wep.key,
@@ -1775,12 +1585,11 @@ static int Handle_Key(struct wilc_vif *vif,
wid.val = (s8 *)pu8keybuf;
wid.size = pstrHostIFkeyAttr->attr.wep.key_len + 2;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ &wid, 1,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
} else if (pstrHostIFkeyAttr->action & REMOVEKEY) {
- PRINT_D(HOSTINF_DBG, "Removing key\n");
wid.id = (u16)WID_REMOVE_WEP_KEY;
wid.type = WID_STR;
@@ -1788,20 +1597,18 @@ static int Handle_Key(struct wilc_vif *vif,
wid.val = s8idxarray;
wid.size = 1;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- &wid, 1,
- wilc_get_vif_idx(vif));
- } else {
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ &wid, 1,
+ wilc_get_vif_idx(vif));
+ } else if (pstrHostIFkeyAttr->action & DEFAULTKEY) {
wid.id = (u16)WID_KEY_ID;
wid.type = WID_CHAR;
wid.val = (s8 *)&pstrHostIFkeyAttr->attr.wep.index;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Setting default key index\n");
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ &wid, 1,
+ wilc_get_vif_idx(vif));
}
up(&hif_drv->sem_test_key_block);
break;
@@ -1810,7 +1617,6 @@ static int Handle_Key(struct wilc_vif *vif,
if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
if (!pu8keybuf) {
- PRINT_ER("No buffer to send RxGTK Key\n");
ret = -ENOMEM;
goto _WPARxGtk_end_case_;
}
@@ -1833,18 +1639,15 @@ static int Handle_Key(struct wilc_vif *vif,
strWIDList[1].val = (s8 *)pu8keybuf;
strWIDList[1].size = RX_MIC_KEY_MSG_LEN;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- strWIDList, 2,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ strWIDList, 2,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
up(&hif_drv->sem_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
- PRINT_D(HOSTINF_DBG, "Handling group key(Rx) function\n");
-
pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
if (pu8keybuf == NULL) {
- PRINT_ER("No buffer to send RxGTK Key\n");
ret = -ENOMEM;
goto _WPARxGtk_end_case_;
}
@@ -1852,7 +1655,7 @@ static int Handle_Key(struct wilc_vif *vif,
if (hif_drv->hif_state == HOST_IF_CONNECTED)
memcpy(pu8keybuf, hif_drv->assoc_bssid, ETH_ALEN);
else
- PRINT_ER("Couldn't handle WPARxGtk while state is not HOST_IF_CONNECTED\n");
+ netdev_err(vif->ndev, "Couldn't handle\n");
memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->attr.wpa.seq, 8);
memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->attr.wpa.index, 1);
@@ -1865,9 +1668,9 @@ static int Handle_Key(struct wilc_vif *vif,
wid.val = (s8 *)pu8keybuf;
wid.size = RX_MIC_KEY_MSG_LEN;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ &wid, 1,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
up(&hif_drv->sem_test_key_block);
@@ -1884,7 +1687,6 @@ _WPARxGtk_end_case_:
if (pstrHostIFkeyAttr->action & ADDKEY_AP) {
pu8keybuf = kmalloc(PTK_KEY_MSG_LEN + 1, GFP_KERNEL);
if (!pu8keybuf) {
- PRINT_ER("No buffer to send PTK Key\n");
ret = -ENOMEM;
goto _WPAPtk_end_case_;
}
@@ -1905,15 +1707,15 @@ _WPARxGtk_end_case_:
strWIDList[1].val = (s8 *)pu8keybuf;
strWIDList[1].size = PTK_KEY_MSG_LEN + 1;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- strWIDList, 2,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ strWIDList, 2,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
up(&hif_drv->sem_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
if (!pu8keybuf) {
- PRINT_ER("No buffer to send PTK Key\n");
+ netdev_err(vif->ndev, "No buffer send PTK\n");
ret = -ENOMEM;
goto _WPAPtk_end_case_;
}
@@ -1928,9 +1730,9 @@ _WPARxGtk_end_case_:
wid.val = (s8 *)pu8keybuf;
wid.size = PTK_KEY_MSG_LEN;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG,
- &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG,
+ &wid, 1,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
up(&hif_drv->sem_test_key_block);
}
@@ -1943,12 +1745,9 @@ _WPAPtk_end_case_:
break;
case PMKSA:
-
- PRINT_D(HOSTINF_DBG, "Handling PMKSA key\n");
-
pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
if (!pu8keybuf) {
- PRINT_ER("No buffer to send PMKSA Key\n");
+ netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
return -ENOMEM;
}
@@ -1964,15 +1763,15 @@ _WPAPtk_end_case_:
wid.val = (s8 *)pu8keybuf;
wid.size = (pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
kfree(pu8keybuf);
break;
}
if (result)
- PRINT_ER("Failed to send key config packet\n");
+ netdev_err(vif->ndev, "Failed to send key config packet\n");
return result;
}
@@ -1990,24 +1789,22 @@ static void Handle_Disconnect(struct wilc_vif *vif)
wid.val = (s8 *)&u16DummyReasonCode;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
-
wilc_optaining_ip = false;
wilc_set_power_mgmt(vif, 0, 0);
eth_zero_addr(wilc_connected_ssid);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to send dissconect config packet\n");
+ netdev_err(vif->ndev, "Failed to send dissconect\n");
} else {
- tstrDisconnectNotifInfo strDisconnectNotifInfo;
+ struct disconnect_info strDisconnectNotifInfo;
- memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+ memset(&strDisconnectNotifInfo, 0, sizeof(struct disconnect_info));
- strDisconnectNotifInfo.u16reason = 0;
+ strDisconnectNotifInfo.reason = 0;
strDisconnectNotifInfo.ie = NULL;
strDisconnectNotifInfo.ie_len = 0;
@@ -2021,10 +1818,8 @@ static void Handle_Disconnect(struct wilc_vif *vif)
}
if (hif_drv->usr_conn_req.conn_result) {
- if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
- PRINT_D(HOSTINF_DBG, "Upper layer requested termination of connection\n");
+ if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP)
del_timer(&hif_drv->connect_timer);
- }
hif_drv->usr_conn_req.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF,
NULL,
@@ -2032,7 +1827,7 @@ static void Handle_Disconnect(struct wilc_vif *vif)
&strDisconnectNotifInfo,
hif_drv->usr_conn_req.arg);
} else {
- PRINT_ER("usr_conn_req.conn_result = NULL\n");
+ netdev_err(vif->ndev, "conn_result = NULL\n");
}
scan_while_connected = false;
@@ -2042,10 +1837,10 @@ static void Handle_Disconnect(struct wilc_vif *vif)
eth_zero_addr(hif_drv->assoc_bssid);
hif_drv->usr_conn_req.ssid_len = 0;
- kfree(hif_drv->usr_conn_req.pu8ssid);
- hif_drv->usr_conn_req.pu8ssid = NULL;
- kfree(hif_drv->usr_conn_req.pu8bssid);
- hif_drv->usr_conn_req.pu8bssid = NULL;
+ kfree(hif_drv->usr_conn_req.ssid);
+ hif_drv->usr_conn_req.ssid = NULL;
+ kfree(hif_drv->usr_conn_req.bssid);
+ hif_drv->usr_conn_req.bssid = NULL;
hif_drv->usr_conn_req.ies_len = 0;
kfree(hif_drv->usr_conn_req.ies);
hif_drv->usr_conn_req.ies = NULL;
@@ -2069,36 +1864,8 @@ void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
if (!vif->hif_drv)
return;
if ((vif->hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) ||
- (vif->hif_drv->hif_state == HOST_IF_CONNECTING)) {
- PRINT_D(HOSTINF_DBG, "\n\n<< correcting Supplicant state machine >>\n\n");
+ (vif->hif_drv->hif_state == HOST_IF_CONNECTING))
wilc_disconnect(vif, 1);
- }
-}
-
-static s32 Handle_GetChnl(struct wilc_vif *vif)
-{
- s32 result = 0;
- struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- wid.id = (u16)WID_CURRENT_CHANNEL;
- wid.type = WID_CHAR;
- wid.val = (s8 *)&ch_no;
- wid.size = sizeof(char);
-
- PRINT_D(HOSTINF_DBG, "Getting channel value\n");
-
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
-
- if (result) {
- PRINT_ER("Failed to get channel number\n");
- result = -EFAULT;
- }
-
- up(&hif_drv->sem_get_chnl);
-
- return result;
}
static void Handle_GetRssi(struct wilc_vif *vif)
@@ -2111,43 +1878,16 @@ static void Handle_GetRssi(struct wilc_vif *vif)
wid.val = &rssi;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Getting RSSI value\n");
-
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to get RSSI value\n");
+ netdev_err(vif->ndev, "Failed to get RSSI value\n");
result = -EFAULT;
}
up(&vif->hif_drv->sem_get_rssi);
}
-static void Handle_GetLinkspeed(struct wilc_vif *vif)
-{
- s32 result = 0;
- struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- link_speed = 0;
-
- wid.id = (u16)WID_LINKSPEED;
- wid.type = WID_CHAR;
- wid.val = &link_speed;
- wid.size = sizeof(char);
-
- PRINT_D(HOSTINF_DBG, "Getting LINKSPEED value\n");
-
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
- if (result) {
- PRINT_ER("Failed to get LINKSPEED value\n");
- result = -EFAULT;
- }
-
- up(&hif_drv->sem_get_link_speed);
-}
-
static s32 Handle_GetStatistics(struct wilc_vif *vif,
struct rf_info *pstrStatistics)
{
@@ -2184,14 +1924,21 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif,
strWIDList[u32WidsCount].val = (s8 *)&pstrStatistics->tx_fail_cnt;
u32WidsCount++;
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, strWIDList,
- u32WidsCount,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, GET_CFG, strWIDList,
+ u32WidsCount,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send scan paramters config packet\n");
+ netdev_err(vif->ndev, "Failed to send scan parameters\n");
- up(&hif_sema_wait_response);
+ if (pstrStatistics->link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH &&
+ pstrStatistics->link_speed != DEFAULT_LINK_SPEED)
+ wilc_enable_tcp_ack_filter(true);
+ else if (pstrStatistics->link_speed != DEFAULT_LINK_SPEED)
+ wilc_enable_tcp_ack_filter(false);
+
+ if (pstrStatistics != &vif->wilc->dummy_statistics)
+ up(&hif_sema_wait_response);
return 0;
}
@@ -2211,13 +1958,11 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
stamac = wid.val;
memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
- PRINT_D(CFG80211_DBG, "SETING STA inactive time\n");
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to SET incative time\n");
+ netdev_err(vif->ndev, "Failed to SET incative time\n");
return -EFAULT;
}
@@ -2226,16 +1971,14 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
wid.val = (s8 *)&inactive_time;
wid.size = sizeof(u32);
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to get incative time\n");
+ netdev_err(vif->ndev, "Failed to get incative time\n");
return -EFAULT;
}
- PRINT_D(CFG80211_DBG, "Getting inactive time : %d\n", inactive_time);
-
up(&hif_drv->sem_inactive_time);
return result;
@@ -2248,8 +1991,6 @@ static void Handle_AddBeacon(struct wilc_vif *vif,
struct wid wid;
u8 *pu8CurrByte;
- PRINT_D(HOSTINF_DBG, "Adding BEACON\n");
-
wid.id = (u16)WID_ADD_BEACON;
wid.type = WID_BIN;
wid.size = pstrSetBeaconParam->head_len + pstrSetBeaconParam->tail_len + 16;
@@ -2285,10 +2026,10 @@ static void Handle_AddBeacon(struct wilc_vif *vif,
memcpy(pu8CurrByte, pstrSetBeaconParam->tail, pstrSetBeaconParam->tail_len);
pu8CurrByte += pstrSetBeaconParam->tail_len;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send add beacon config packet\n");
+ netdev_err(vif->ndev, "Failed to send add beacon\n");
ERRORHANDLER:
kfree(wid.val);
@@ -2312,12 +2053,10 @@ static void Handle_DelBeacon(struct wilc_vif *vif)
pu8CurrByte = wid.val;
- PRINT_D(HOSTINF_DBG, "Deleting BEACON\n");
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send delete beacon config packet\n");
+ netdev_err(vif->ndev, "Failed to send delete beacon\n");
}
static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer,
@@ -2327,7 +2066,6 @@ static u32 WILC_HostIf_PackStaParam(u8 *pu8Buffer,
pu8CurrByte = pu8Buffer;
- PRINT_D(HOSTINF_DBG, "Packing STA params\n");
memcpy(pu8CurrByte, pstrStationParam->bssid, ETH_ALEN);
pu8CurrByte += ETH_ALEN;
@@ -2375,7 +2113,6 @@ static void Handle_AddStation(struct wilc_vif *vif,
struct wid wid;
u8 *pu8CurrByte;
- PRINT_D(HOSTINF_DBG, "Handling add station\n");
wid.id = (u16)WID_ADD_STA;
wid.type = WID_BIN;
wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len;
@@ -2387,10 +2124,10 @@ static void Handle_AddStation(struct wilc_vif *vif,
pu8CurrByte = wid.val;
pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result != 0)
- PRINT_ER("Failed to send add station config packet\n");
+ netdev_err(vif->ndev, "Failed to send add station\n");
ERRORHANDLER:
kfree(pstrStationParam->rates);
@@ -2410,8 +2147,6 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
wid.type = WID_STR;
wid.size = (pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1;
- PRINT_D(HOSTINF_DBG, "Handling delete station\n");
-
wid.val = kmalloc((pstrDelAllStaParam->assoc_sta * ETH_ALEN) + 1, GFP_KERNEL);
if (!wid.val)
goto ERRORHANDLER;
@@ -2429,10 +2164,10 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
pu8CurrByte += ETH_ALEN;
}
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send add station config packet\n");
+ netdev_err(vif->ndev, "Failed to send add station\n");
ERRORHANDLER:
kfree(wid.val);
@@ -2451,8 +2186,6 @@ static void Handle_DelStation(struct wilc_vif *vif,
wid.type = WID_BIN;
wid.size = ETH_ALEN;
- PRINT_D(HOSTINF_DBG, "Handling delete station\n");
-
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
goto ERRORHANDLER;
@@ -2461,10 +2194,10 @@ static void Handle_DelStation(struct wilc_vif *vif,
memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send add station config packet\n");
+ netdev_err(vif->ndev, "Failed to send add station\n");
ERRORHANDLER:
kfree(wid.val);
@@ -2481,7 +2214,6 @@ static void Handle_EditStation(struct wilc_vif *vif,
wid.type = WID_BIN;
wid.size = WILC_ADD_STA_LENGTH + pstrStationParam->rates_len;
- PRINT_D(HOSTINF_DBG, "Handling edit station\n");
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val)
goto ERRORHANDLER;
@@ -2489,10 +2221,10 @@ static void Handle_EditStation(struct wilc_vif *vif,
pu8CurrByte = wid.val;
pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send edit station config packet\n");
+ netdev_err(vif->ndev, "Failed to send edit station\n");
ERRORHANDLER:
kfree(pstrStationParam->rates);
@@ -2518,26 +2250,20 @@ static int Handle_RemainOnChan(struct wilc_vif *vif,
}
if (hif_drv->usr_scan_req.scan_result) {
- PRINT_INFO(GENERIC_DBG, "Required to remain on chan while scanning return\n");
hif_drv->remain_on_ch_pending = 1;
result = -EBUSY;
goto ERRORHANDLER;
}
if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) {
- PRINT_INFO(GENERIC_DBG, "Required to remain on chan while connecting return\n");
result = -EBUSY;
goto ERRORHANDLER;
}
if (wilc_optaining_ip || wilc_connecting) {
- PRINT_D(GENERIC_DBG, "[handle_scan]: Don't do obss scan until IP adresss is obtained\n");
result = -EBUSY;
goto ERRORHANDLER;
}
- PRINT_D(HOSTINF_DBG, "Setting channel :%d\n",
- pstrHostIfRemainOnChan->ch);
-
u8remain_on_chan_flag = true;
wid.id = (u16)WID_REMAIN_ON_CHAN;
wid.type = WID_STR;
@@ -2551,10 +2277,10 @@ static int Handle_RemainOnChan(struct wilc_vif *vif,
wid.val[0] = u8remain_on_chan_flag;
wid.val[1] = (s8)pstrHostIfRemainOnChan->ch;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result != 0)
- PRINT_ER("Failed to set remain on channel\n");
+ netdev_err(vif->ndev, "Failed to set remain on channel\n");
ERRORHANDLER:
{
@@ -2562,7 +2288,7 @@ ERRORHANDLER:
hif_drv->remain_on_ch_timer.data = (unsigned long)vif;
mod_timer(&hif_drv->remain_on_ch_timer,
jiffies +
- msecs_to_jiffies(pstrHostIfRemainOnChan->u32duration));
+ msecs_to_jiffies(pstrHostIfRemainOnChan->duration));
if (hif_drv->remain_on_ch.ready)
hif_drv->remain_on_ch.ready(hif_drv->remain_on_ch.arg);
@@ -2581,10 +2307,6 @@ static int Handle_RegisterFrame(struct wilc_vif *vif,
struct wid wid;
u8 *pu8CurrByte;
- PRINT_D(HOSTINF_DBG, "Handling frame register : %d FrameType: %d\n",
- pstrHostIfRegisterFrame->reg,
- pstrHostIfRegisterFrame->frame_type);
-
wid.id = (u16)WID_REGISTER_FRAME;
wid.type = WID_STR;
wid.val = kmalloc(sizeof(u16) + 2, GFP_KERNEL);
@@ -2599,10 +2321,10 @@ static int Handle_RegisterFrame(struct wilc_vif *vif,
wid.size = sizeof(u16) + 2;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
- PRINT_ER("Failed to frame register config packet\n");
+ netdev_err(vif->ndev, "Failed to frame register\n");
result = -EINVAL;
}
@@ -2617,8 +2339,6 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
s32 result = 0;
struct host_if_drv *hif_drv = vif->hif_drv;
- PRINT_D(HOSTINF_DBG, "CANCEL REMAIN ON CHAN\n");
-
if (P2P_LISTEN_STATE) {
u8remain_on_chan_flag = false;
wid.id = (u16)WID_REMAIN_ON_CHAN;
@@ -2627,17 +2347,17 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
wid.val = kmalloc(wid.size, GFP_KERNEL);
if (!wid.val) {
- PRINT_ER("Failed to allocate memory\n");
+ netdev_err(vif->ndev, "Failed to allocate memory\n");
return -ENOMEM;
}
wid.val[0] = u8remain_on_chan_flag;
wid.val[1] = FALSE_FRMWR_CHANNEL;
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result != 0) {
- PRINT_ER("Failed to set remain on channel\n");
+ netdev_err(vif->ndev, "Failed to set remain channel\n");
goto _done_;
}
@@ -2647,7 +2367,7 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
}
P2P_LISTEN_STATE = 0;
} else {
- PRINT_D(GENERIC_DBG, "Not in listen state\n");
+ netdev_dbg(vif->ndev, "Not in listen state\n");
result = -EFAULT;
}
@@ -2670,7 +2390,7 @@ static void ListenTimerCB(unsigned long arg)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
}
static void Handle_PowerManagement(struct wilc_vif *vif,
@@ -2686,16 +2406,14 @@ static void Handle_PowerManagement(struct wilc_vif *vif,
s8PowerMode = MIN_FAST_PS;
else
s8PowerMode = NO_POWERSAVE;
- PRINT_D(HOSTINF_DBG, "Handling power mgmt to %d\n", s8PowerMode);
+
wid.val = &s8PowerMode;
wid.size = sizeof(char);
- PRINT_D(HOSTINF_DBG, "Handling Power Management\n");
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send power management config packet\n");
+ netdev_err(vif->ndev, "Failed to send power management\n");
}
static void Handle_SetMulticastFilter(struct wilc_vif *vif,
@@ -2705,8 +2423,6 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
struct wid wid;
u8 *pu8CurrByte;
- PRINT_D(HOSTINF_DBG, "Setup Multicast Filter\n");
-
wid.id = (u16)WID_SETUP_MULTICAST_FILTER;
wid.type = WID_BIN;
wid.size = sizeof(struct set_multicast) + ((strHostIfSetMulti->cnt) * ETH_ALEN);
@@ -2729,59 +2445,54 @@ static void Handle_SetMulticastFilter(struct wilc_vif *vif,
memcpy(pu8CurrByte, wilc_multicast_mac_addr_list,
((strHostIfSetMulti->cnt) * ETH_ALEN));
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result)
- PRINT_ER("Failed to send setup multicast config packet\n");
+ netdev_err(vif->ndev, "Failed to send setup multicast\n");
ERRORHANDLER:
kfree(wid.val);
}
-static s32 Handle_DelAllRxBASessions(struct wilc_vif *vif,
- struct ba_session_info *strHostIfBASessionInfo)
+static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
{
- s32 result = 0;
+ int ret;
struct wid wid;
- char *ptr = NULL;
- PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
- strHostIfBASessionInfo->bssid[0],
- strHostIfBASessionInfo->bssid[1],
- strHostIfBASessionInfo->bssid[2],
- strHostIfBASessionInfo->tid);
+ wid.id = (u16)WID_TX_POWER;
+ wid.type = WID_CHAR;
+ wid.val = &tx_pwr;
+ wid.size = sizeof(char);
- wid.id = (u16)WID_DEL_ALL_RX_BA;
- wid.type = WID_STR;
- wid.val = kmalloc(BLOCK_ACK_REQ_SIZE, GFP_KERNEL);
- wid.size = BLOCK_ACK_REQ_SIZE;
- ptr = wid.val;
- *ptr++ = 0x14;
- *ptr++ = 0x3;
- *ptr++ = 0x2;
- memcpy(ptr, strHostIfBASessionInfo->bssid, ETH_ALEN);
- ptr += ETH_ALEN;
- *ptr++ = strHostIfBASessionInfo->tid;
- *ptr++ = 0;
- *ptr++ = 32;
-
- result = wilc_send_config_pkt(vif->wilc, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
- if (result)
- PRINT_D(HOSTINF_DBG, "Couldn't delete BA Session\n");
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
+ if (ret)
+ netdev_err(vif->ndev, "Failed to set TX PWR\n");
+}
- kfree(wid.val);
+static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
+{
+ s32 ret = 0;
+ struct wid wid;
- up(&hif_sema_wait_response);
+ wid.id = (u16)WID_TX_POWER;
+ wid.type = WID_CHAR;
+ wid.val = (s8 *)tx_pwr;
+ wid.size = sizeof(char);
- return result;
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
+ if (ret)
+ netdev_err(vif->ndev, "Failed to get TX PWR\n");
+
+ up(&hif_sema_wait_response);
}
static int hostIFthread(void *pvArg)
{
u32 u32Ret;
struct host_if_msg msg;
- struct wilc *wilc = (struct wilc*)pvArg;
+ struct wilc *wilc = pvArg;
struct wilc_vif *vif;
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -2789,13 +2500,10 @@ static int hostIFthread(void *pvArg)
while (1) {
wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret);
vif = msg.vif;
- if (msg.id == HOST_IF_MSG_EXIT) {
- PRINT_D(GENERIC_DBG, "THREAD: Exiting HostIfThread\n");
+ if (msg.id == HOST_IF_MSG_EXIT)
break;
- }
if ((!wilc_initialized)) {
- PRINT_D(GENERIC_DBG, "--WAIT--");
usleep_range(200 * 1000, 200 * 1000);
wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
continue;
@@ -2803,17 +2511,12 @@ static int hostIFthread(void *pvArg)
if (msg.id == HOST_IF_MSG_CONNECT &&
vif->hif_drv->usr_scan_req.scan_result) {
- PRINT_D(HOSTINF_DBG, "Requeue connect request till scan done received\n");
wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
usleep_range(2 * 1000, 2 * 1000);
continue;
}
switch (msg.id) {
- case HOST_IF_MSG_Q_IDLE:
- Handle_wait_msg_q_empty();
- break;
-
case HOST_IF_MSG_SCAN:
Handle_Scan(msg.vif, &msg.body.scan_info);
break;
@@ -2822,10 +2525,6 @@ static int hostIFthread(void *pvArg)
Handle_Connect(msg.vif, &msg.body.con_info);
break;
- case HOST_IF_MSG_FLUSH_CONNECT:
- Handle_FlushConnect(msg.vif);
- break;
-
case HOST_IF_MSG_RCVD_NTWRK_INFO:
Handle_RcvdNtwrkInfo(msg.vif, &msg.body.net_info);
break;
@@ -2853,7 +2552,6 @@ static int hostIFthread(void *pvArg)
case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
del_timer(&vif->hif_drv->scan_timer);
- PRINT_D(HOSTINF_DBG, "scan completed successfully\n");
if (!wilc_wlan_get_num_conn_ifcs(wilc))
wilc_chip_sleep_manually(wilc);
@@ -2870,19 +2568,11 @@ static int hostIFthread(void *pvArg)
Handle_GetRssi(msg.vif);
break;
- case HOST_IF_MSG_GET_LINKSPEED:
- Handle_GetLinkspeed(msg.vif);
- break;
-
case HOST_IF_MSG_GET_STATISTICS:
Handle_GetStatistics(msg.vif,
(struct rf_info *)msg.body.data);
break;
- case HOST_IF_MSG_GET_CHNL:
- Handle_GetChnl(msg.vif);
- break;
-
case HOST_IF_MSG_ADD_BEACON:
Handle_AddBeacon(msg.vif, &msg.body.beacon_info);
break;
@@ -2908,13 +2598,11 @@ static int hostIFthread(void *pvArg)
break;
case HOST_IF_MSG_SCAN_TIMER_FIRED:
- PRINT_D(HOSTINF_DBG, "Scan Timeout\n");
Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED);
break;
case HOST_IF_MSG_CONNECT_TIMER_FIRED:
- PRINT_D(HOSTINF_DBG, "Connect Timeout\n");
Handle_ConnectTimeout(msg.vif);
break;
@@ -2932,34 +2620,25 @@ static int hostIFthread(void *pvArg)
break;
case HOST_IF_MSG_SET_IPADDRESS:
- PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n");
handle_set_ip_address(vif,
msg.body.ip_info.ip_addr,
msg.body.ip_info.idx);
break;
case HOST_IF_MSG_GET_IPADDRESS:
- PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_IPADDRESS\n");
handle_get_ip_address(vif, msg.body.ip_info.idx);
break;
- case HOST_IF_MSG_SET_MAC_ADDRESS:
- handle_set_mac_address(msg.vif,
- &msg.body.set_mac_info);
- break;
-
case HOST_IF_MSG_GET_MAC_ADDRESS:
handle_get_mac_address(msg.vif,
&msg.body.get_mac_info);
break;
case HOST_IF_MSG_REMAIN_ON_CHAN:
- PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REMAIN_ON_CHAN\n");
Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch);
break;
case HOST_IF_MSG_REGISTER_FRAME:
- PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_REGISTER_FRAME\n");
Handle_RegisterFrame(msg.vif, &msg.body.reg_frame);
break;
@@ -2968,25 +2647,26 @@ static int hostIFthread(void *pvArg)
break;
case HOST_IF_MSG_SET_MULTICAST_FILTER:
- PRINT_D(HOSTINF_DBG, "HOST_IF_MSG_SET_MULTICAST_FILTER\n");
Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info);
break;
- case HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS:
- Handle_DelAllRxBASessions(msg.vif, &msg.body.session_info);
- break;
-
case HOST_IF_MSG_DEL_ALL_STA:
Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info);
break;
+ case HOST_IF_MSG_SET_TX_POWER:
+ handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr);
+ break;
+
+ case HOST_IF_MSG_GET_TX_POWER:
+ handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr);
+ break;
default:
- PRINT_ER("[Host Interface] undefined Received Msg ID\n");
+ netdev_err(vif->ndev, "[Host Interface] undefined\n");
break;
}
}
- PRINT_D(HOSTINF_DBG, "Releasing thread exit semaphore\n");
up(&hif_sema_thread);
return 0;
}
@@ -3035,7 +2715,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
if (!hif_drv) {
result = -EFAULT;
- PRINT_ER("Failed to send setup multicast config packet\n");
+ netdev_err(vif->ndev, "Failed to send setup multicast\n");
return result;
}
@@ -3049,7 +2729,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue : Request to remove WEP key\n");
+ netdev_err(vif->ndev, "Request to remove WEP key\n");
down(&hif_drv->sem_test_key_block);
return result;
@@ -3063,7 +2743,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
if (!hif_drv) {
result = -EFAULT;
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return result;
}
@@ -3077,7 +2757,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue : Default key index\n");
+ netdev_err(vif->ndev, "Default key index\n");
down(&hif_drv->sem_test_key_block);
return result;
@@ -3091,7 +2771,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -3110,7 +2790,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue :WEP Key\n");
+ netdev_err(vif->ndev, "STA - WEP Key\n");
down(&hif_drv->sem_test_key_block);
return result;
@@ -3122,19 +2802,14 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
- int i;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- if (INFO) {
- for (i = 0; i < len; i++)
- PRINT_INFO(HOSTAPD_DBG, "KEY is %x\n", key[i]);
- }
msg.id = HOST_IF_MSG_KEY;
msg.body.key_info.type = WEP;
msg.body.key_info.action = ADDKEY_AP;
@@ -3151,7 +2826,7 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue :WEP Key\n");
+ netdev_err(vif->ndev, "AP - WEP Key\n");
down(&hif_drv->sem_test_key_block);
return result;
@@ -3165,10 +2840,9 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
u8 key_len = ptk_key_len;
- int i;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -3193,20 +2867,11 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (!msg.body.key_info.attr.wpa.key)
return -ENOMEM;
- if (rx_mic) {
+ if (rx_mic)
memcpy(msg.body.key_info.attr.wpa.key + 16, rx_mic, RX_MIC_KEY_LEN);
- if (INFO) {
- for (i = 0; i < RX_MIC_KEY_LEN; i++)
- PRINT_INFO(CFG80211_DBG, "PairwiseRx[%d] = %x\n", i, rx_mic[i]);
- }
- }
- if (tx_mic) {
+
+ if (tx_mic)
memcpy(msg.body.key_info.attr.wpa.key + 24, tx_mic, TX_MIC_KEY_LEN);
- if (INFO) {
- for (i = 0; i < TX_MIC_KEY_LEN; i++)
- PRINT_INFO(CFG80211_DBG, "PairwiseTx[%d] = %x\n", i, tx_mic[i]);
- }
- }
msg.body.key_info.attr.wpa.key_len = key_len;
msg.body.key_info.attr.wpa.mac_addr = mac_addr;
@@ -3216,7 +2881,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue: PTK Key\n");
+ netdev_err(vif->ndev, "PTK Key\n");
down(&hif_drv->sem_test_key_block);
@@ -3234,7 +2899,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 key_len = gtk_key_len;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3284,23 +2949,23 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue: RX GTK\n");
+ netdev_err(vif->ndev, "RX GTK\n");
down(&hif_drv->sem_test_key_block);
return result;
}
-s32 wilc_set_pmkid_info(struct wilc_vif *vif,
- struct host_if_pmkid_attr *pu8PmkidInfoArray)
+int wilc_set_pmkid_info(struct wilc_vif *vif,
+ struct host_if_pmkid_attr *pmkid)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
- u32 i;
+ int i;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -3311,34 +2976,34 @@ s32 wilc_set_pmkid_info(struct wilc_vif *vif,
msg.body.key_info.action = ADDKEY;
msg.vif = vif;
- for (i = 0; i < pu8PmkidInfoArray->numpmkid; i++) {
+ for (i = 0; i < pmkid->numpmkid; i++) {
memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].bssid,
- &pu8PmkidInfoArray->pmkidlist[i].bssid, ETH_ALEN);
+ &pmkid->pmkidlist[i].bssid, ETH_ALEN);
memcpy(msg.body.key_info.attr.pmkid.pmkidlist[i].pmkid,
- &pu8PmkidInfoArray->pmkidlist[i].pmkid, PMKID_LEN);
+ &pmkid->pmkidlist[i].pmkid, PMKID_LEN);
}
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER(" Error in sending messagequeue: PMKID Info\n");
+ netdev_err(vif->ndev, "PMKID Info\n");
return result;
}
-s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress)
+int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_GET_MAC_ADDRESS;
- msg.body.get_mac_info.mac_addr = pu8MacAddress;
+ msg.body.get_mac_info.mac_addr = mac_addr;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("Failed to send get mac address\n");
+ netdev_err(vif->ndev, "Failed to send get mac address\n");
return -EFAULT;
}
@@ -3346,42 +3011,23 @@ s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress)
return result;
}
-s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress)
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
+ size_t ssid_len, const u8 *ies, size_t ies_len,
+ wilc_connect_result connect_result, void *user_arg,
+ u8 security, enum AUTHTYPE auth_type,
+ u8 channel, void *join_params)
{
- s32 result = 0;
- struct host_if_msg msg;
-
- PRINT_D(GENERIC_DBG, "mac addr = %x:%x:%x\n", pu8MacAddress[0], pu8MacAddress[1], pu8MacAddress[2]);
-
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_SET_MAC_ADDRESS;
- memcpy(msg.body.set_mac_info.mac_addr, pu8MacAddress, ETH_ALEN);
- msg.vif = vif;
-
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- if (result)
- PRINT_ER("Failed to send message queue: Set mac address\n");
-
- return result;
-}
-
-s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
- size_t ssidLen, const u8 *pu8IEs, size_t IEsLen,
- wilc_connect_result pfConnectResult, void *pvUserArg,
- u8 u8security, enum AUTHTYPE tenuAuth_type,
- u8 u8channel, void *pJoinParams)
-{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (!hif_drv || !pfConnectResult) {
- PRINT_ER("Driver is null\n");
+ if (!hif_drv || !connect_result) {
+ netdev_err(vif->ndev, "Driver is null\n");
return -EFAULT;
}
- if (!pJoinParams) {
- PRINT_ER("Unable to Join - JoinParams is NULL\n");
+ if (!join_params) {
+ netdev_err(vif->ndev, "Unable to Join - JoinParams is NULL\n");
return -EFAULT;
}
@@ -3389,39 +3035,39 @@ s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
msg.id = HOST_IF_MSG_CONNECT;
- msg.body.con_info.security = u8security;
- msg.body.con_info.auth_type = tenuAuth_type;
- msg.body.con_info.ch = u8channel;
- msg.body.con_info.result = pfConnectResult;
- msg.body.con_info.arg = pvUserArg;
- msg.body.con_info.params = pJoinParams;
+ msg.body.con_info.security = security;
+ msg.body.con_info.auth_type = auth_type;
+ msg.body.con_info.ch = channel;
+ msg.body.con_info.result = connect_result;
+ msg.body.con_info.arg = user_arg;
+ msg.body.con_info.params = join_params;
msg.vif = vif;
- if (pu8bssid) {
- msg.body.con_info.bssid = kmalloc(6, GFP_KERNEL);
- memcpy(msg.body.con_info.bssid, pu8bssid, 6);
+ if (bssid) {
+ msg.body.con_info.bssid = kmemdup(bssid, 6, GFP_KERNEL);
+ if (!msg.body.con_info.bssid)
+ return -ENOMEM;
}
- if (pu8ssid) {
- msg.body.con_info.ssid_len = ssidLen;
- msg.body.con_info.ssid = kmalloc(ssidLen, GFP_KERNEL);
- memcpy(msg.body.con_info.ssid, pu8ssid, ssidLen);
+ if (ssid) {
+ msg.body.con_info.ssid_len = ssid_len;
+ msg.body.con_info.ssid = kmemdup(ssid, ssid_len, GFP_KERNEL);
+ if (!msg.body.con_info.ssid)
+ return -ENOMEM;
}
- if (pu8IEs) {
- msg.body.con_info.ies_len = IEsLen;
- msg.body.con_info.ies = kmalloc(IEsLen, GFP_KERNEL);
- memcpy(msg.body.con_info.ies, pu8IEs, IEsLen);
+ if (ies) {
+ msg.body.con_info.ies_len = ies_len;
+ msg.body.con_info.ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!msg.body.con_info.ies)
+ return -ENOMEM;
}
if (hif_drv->hif_state < HOST_IF_CONNECTING)
hif_drv->hif_state = HOST_IF_CONNECTING;
- else
- PRINT_D(GENERIC_DBG, "Don't set state to 'connecting' : %d\n",
- hif_drv->hif_state);
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("Failed to send message queue: Set join request\n");
+ netdev_err(vif->ndev, "send message: Set join request\n");
return -EFAULT;
}
@@ -3432,40 +3078,14 @@ s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
return result;
}
-s32 wilc_flush_join_req(struct wilc_vif *vif)
+int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
{
- s32 result = 0;
- struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!join_req)
- return -EFAULT;
-
- if (!hif_drv) {
- PRINT_ER("Driver is null\n");
- return -EFAULT;
- }
-
- msg.id = HOST_IF_MSG_FLUSH_CONNECT;
- msg.vif = vif;
-
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- if (result) {
- PRINT_ER("Failed to send message queue: Flush join request\n");
- return -EFAULT;
- }
-
- return result;
-}
-
-s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode)
-{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("Driver is null\n");
+ netdev_err(vif->ndev, "Driver is null\n");
return -EFAULT;
}
@@ -3476,7 +3096,7 @@ s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Failed to send message queue: disconnect\n");
+ netdev_err(vif->ndev, "Failed to send message: disconnect\n");
down(&hif_drv->sem_test_disconn_block);
@@ -3493,7 +3113,7 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("Driver is null\n");
+ netdev_err(vif->ndev, "Driver is null\n");
return -EFAULT;
}
@@ -3502,16 +3122,15 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
wid.val = pu8AssocRespInfo;
wid.size = u32MaxAssocRespInfoLen;
- result = wilc_send_config_pkt(vif->wilc, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if (result) {
*pu32RcvdAssocRespInfoLen = 0;
- PRINT_ER("Failed to send association response config packet\n");
+ netdev_err(vif->ndev, "Failed to send association response\n");
return -EINVAL;
- } else {
- *pu32RcvdAssocRespInfoLen = wid.size;
}
+ *pu32RcvdAssocRespInfoLen = wid.size;
return result;
}
@@ -3522,7 +3141,7 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -3533,32 +3152,14 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
return -EINVAL;
}
return 0;
}
-int wilc_wait_msg_queue_idle(void)
-{
- int result = 0;
- struct host_if_msg msg;
-
- memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_Q_IDLE;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- if (result) {
- PRINT_ER("wilc mq send fail\n");
- result = -EINVAL;
- }
-
- down(&hif_sema_wait_response);
-
- return result;
-}
-
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index)
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
{
int result = 0;
struct host_if_msg msg;
@@ -3566,11 +3167,12 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index)
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_SET_WFIDRV_HANDLER;
msg.body.drv.handler = index;
+ msg.body.drv.mac_idx = mac_idx;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
}
@@ -3589,7 +3191,7 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
result = -EINVAL;
}
@@ -3604,7 +3206,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -3616,7 +3218,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Failed to send get host channel param's message queue ");
+ netdev_err(vif->ndev, "Failed to send get host ch param\n");
down(&hif_drv->sem_inactive_time);
@@ -3625,9 +3227,9 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
return result;
}
-s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi)
+int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
@@ -3637,53 +3239,55 @@ s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("Failed to send get host channel param's message queue ");
+ netdev_err(vif->ndev, "Failed to send get host ch param\n");
return -EFAULT;
}
down(&hif_drv->sem_get_rssi);
- if (!ps8Rssi) {
- PRINT_ER("RSS pointer value is null");
+ if (!rssi_level) {
+ netdev_err(vif->ndev, "RSS pointer value is null\n");
return -EFAULT;
}
- *ps8Rssi = rssi;
+ *rssi_level = rssi;
return result;
}
-s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics)
+int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_GET_STATISTICS;
- msg.body.data = (char *)pstrStatistics;
+ msg.body.data = (char *)stats;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("Failed to send get host channel param's message queue ");
+ netdev_err(vif->ndev, "Failed to send get host channel\n");
return -EFAULT;
}
- down(&hif_sema_wait_response);
+ if (stats != &vif->wilc->dummy_statistics)
+ down(&hif_sema_wait_response);
return result;
}
-s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
- u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs,
- size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg,
- struct hidden_network *pstrHiddenNetwork)
+int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
+ u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
+ size_t ies_len, wilc_scan_result scan_result, void *user_arg,
+ struct hidden_network *hidden_network)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
+ struct scan_attr *scan_info = &msg.body.scan_info;
struct host_if_drv *hif_drv = vif->hif_drv;
- if (!hif_drv || !ScanResult) {
- PRINT_ER("hif_drv or ScanResult = NULL\n");
+ if (!hif_drv || !scan_result) {
+ netdev_err(vif->ndev, "hif_drv or scan_result = NULL\n");
return -EFAULT;
}
@@ -3691,34 +3295,35 @@ s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
msg.id = HOST_IF_MSG_SCAN;
- if (pstrHiddenNetwork) {
- msg.body.scan_info.hidden_network.pstrHiddenNetworkInfo = pstrHiddenNetwork->pstrHiddenNetworkInfo;
- msg.body.scan_info.hidden_network.u8ssidnum = pstrHiddenNetwork->u8ssidnum;
-
- } else
- PRINT_D(HOSTINF_DBG, "pstrHiddenNetwork IS EQUAL TO NULL\n");
+ if (hidden_network) {
+ scan_info->hidden_network.net_info = hidden_network->net_info;
+ scan_info->hidden_network.n_ssids = hidden_network->n_ssids;
+ }
msg.vif = vif;
- msg.body.scan_info.src = u8ScanSource;
- msg.body.scan_info.type = u8ScanType;
- msg.body.scan_info.result = ScanResult;
- msg.body.scan_info.arg = pvUserArg;
-
- msg.body.scan_info.ch_list_len = u8ChnlListLen;
- msg.body.scan_info.ch_freq_list = kmalloc(u8ChnlListLen, GFP_KERNEL);
- memcpy(msg.body.scan_info.ch_freq_list, pu8ChnlFreqList, u8ChnlListLen);
+ scan_info->src = scan_source;
+ scan_info->type = scan_type;
+ scan_info->result = scan_result;
+ scan_info->arg = user_arg;
+
+ scan_info->ch_list_len = ch_list_len;
+ scan_info->ch_freq_list = kmemdup(ch_freq_list,
+ ch_list_len,
+ GFP_KERNEL);
+ if (!scan_info->ch_freq_list)
+ return -ENOMEM;
- msg.body.scan_info.ies_len = IEsLen;
- msg.body.scan_info.ies = kmalloc(IEsLen, GFP_KERNEL);
- memcpy(msg.body.scan_info.ies, pu8IEs, IEsLen);
+ scan_info->ies_len = ies_len;
+ scan_info->ies = kmemdup(ies, ies_len, GFP_KERNEL);
+ if (!scan_info->ies)
+ return -ENOMEM;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result) {
- PRINT_ER("Error in sending message queue\n");
+ netdev_err(vif->ndev, "Error in sending message queue\n");
return -EINVAL;
}
- PRINT_D(HOSTINF_DBG, ">> Starting the SCAN timer\n");
hif_drv->scan_timer.data = (unsigned long)vif;
mod_timer(&hif_drv->scan_timer,
jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
@@ -3726,21 +3331,21 @@ s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
return result;
}
-s32 wilc_hif_set_cfg(struct wilc_vif *vif,
- struct cfg_param_val *pstrCfgParamVal)
+int wilc_hif_set_cfg(struct wilc_vif *vif,
+ struct cfg_param_attr *cfg_param)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("hif_drv NULL\n");
+ netdev_err(vif->ndev, "hif_drv NULL\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_CFG_PARAMS;
- msg.body.cfg_info.cfg_attr_info = *pstrCfgParamVal;
+ msg.body.cfg_info = *cfg_param;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
@@ -3753,32 +3358,20 @@ static void GetPeriodicRSSI(unsigned long arg)
struct wilc_vif *vif = (struct wilc_vif *)arg;
if (!vif->hif_drv) {
- PRINT_ER("Driver handler is NULL\n");
+ netdev_err(vif->ndev, "Driver handler is NULL\n");
return;
}
- if (vif->hif_drv->hif_state == HOST_IF_CONNECTED) {
- s32 result = 0;
- struct host_if_msg msg;
+ if (vif->hif_drv->hif_state == HOST_IF_CONNECTED)
+ wilc_get_statistics(vif, &vif->wilc->dummy_statistics);
- memset(&msg, 0, sizeof(struct host_if_msg));
-
- msg.id = HOST_IF_MSG_GET_RSSI;
- msg.vif = vif;
-
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- if (result) {
- PRINT_ER("Failed to send get host channel param's message queue ");
- return;
- }
- }
periodic_rssi.data = (unsigned long)vif;
mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
}
-s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
+int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
- s32 result = 0;
+ int result = 0;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
struct wilc *wilc;
@@ -3787,8 +3380,6 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif = netdev_priv(dev);
wilc = vif->wilc;
- PRINT_D(HOSTINF_DBG, "Initializing host interface for client %d\n", clients_count + 1);
-
scan_while_connected = false;
sema_init(&hif_sema_wait_response, 0);
@@ -3807,7 +3398,6 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
- PRINT_D(HOSTINF_DBG, "Global handle pointer value=%p\n", hif_drv);
if (clients_count == 0) {
sema_init(&hif_sema_thread, 0);
sema_init(&hif_sema_driver, 0);
@@ -3817,17 +3407,13 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
sema_init(&hif_drv->sem_test_key_block, 0);
sema_init(&hif_drv->sem_test_disconn_block, 0);
sema_init(&hif_drv->sem_get_rssi, 0);
- sema_init(&hif_drv->sem_get_link_speed, 0);
- sema_init(&hif_drv->sem_get_chnl, 0);
sema_init(&hif_drv->sem_inactive_time, 0);
- PRINT_D(HOSTINF_DBG, "INIT: CLIENT COUNT %d\n", clients_count);
-
if (clients_count == 0) {
result = wilc_mq_create(&hif_msg_q);
if (result < 0) {
- PRINT_ER("Failed to creat MQ\n");
+ netdev_err(vif->ndev, "Failed to creat MQ\n");
goto _fail_;
}
@@ -3835,7 +3421,7 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
"WILC_kthread");
if (IS_ERR(hif_thread_handler)) {
- PRINT_ER("Failed to creat Thread\n");
+ netdev_err(vif->ndev, "Failed to creat Thread\n");
result = -EFAULT;
goto _fail_mq_;
}
@@ -3848,8 +3434,8 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
setup_timer(&hif_drv->connect_timer, TimerCB_Connect, 0);
setup_timer(&hif_drv->remain_on_ch_timer, ListenTimerCB, 0);
- sema_init(&hif_drv->sem_cfg_values, 1);
- down(&hif_drv->sem_cfg_values);
+ mutex_init(&hif_drv->cfg_values_lock);
+ mutex_lock(&hif_drv->cfg_values_lock);
hif_drv->hif_state = HOST_IF_IDLE;
hif_drv->cfg_values.site_survey_enabled = SITE_SURVEY_OFF;
@@ -3860,14 +3446,7 @@ s32 wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
hif_drv->p2p_timeout = 0;
- PRINT_INFO(HOSTINF_DBG, "Initialization values, Site survey value: %d\n Scan source: %d\n Active scan time: %d\n Passive scan time: %d\nCurrent tx Rate = %d\n",
- hif_drv->cfg_values.site_survey_enabled,
- hif_drv->cfg_values.scan_source,
- hif_drv->cfg_values.active_scan_time,
- hif_drv->cfg_values.passive_scan_time,
- hif_drv->cfg_values.curr_tx_rate);
-
- up(&hif_drv->sem_cfg_values);
+ mutex_unlock(&hif_drv->cfg_values_lock);
clients_count++;
@@ -3879,34 +3458,27 @@ _fail_:
return result;
}
-s32 wilc_deinit(struct wilc_vif *vif)
+int wilc_deinit(struct wilc_vif *vif)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("hif_drv = NULL\n");
- return 0;
+ netdev_err(vif->ndev, "hif_drv = NULL\n");
+ return -EFAULT;
}
down(&hif_sema_deinit);
terminated_handle = hif_drv;
- PRINT_D(HOSTINF_DBG, "De-initializing host interface for client %d\n", clients_count);
-
- if (del_timer_sync(&hif_drv->scan_timer))
- PRINT_D(HOSTINF_DBG, ">> Scan timer is active\n");
-
- if (del_timer_sync(&hif_drv->connect_timer))
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
-
- if (del_timer_sync(&periodic_rssi))
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
+ del_timer_sync(&hif_drv->scan_timer);
+ del_timer_sync(&hif_drv->connect_timer);
+ del_timer_sync(&periodic_rssi);
del_timer_sync(&hif_drv->remain_on_ch_timer);
- wilc_set_wfi_drv_handler(vif, 0);
+ wilc_set_wfi_drv_handler(vif, 0, 0);
down(&hif_sema_driver);
if (hif_drv->usr_scan_req.scan_result) {
@@ -3922,15 +3494,13 @@ s32 wilc_deinit(struct wilc_vif *vif)
memset(&msg, 0, sizeof(struct host_if_msg));
if (clients_count == 1) {
- if (del_timer_sync(&periodic_rssi))
- PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
-
+ del_timer_sync(&periodic_rssi);
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result != 0)
- PRINT_ER("Error in sending deinit's message queue message function: Error(%d)\n", result);
+ netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
down(&hif_sema_thread);
@@ -3961,7 +3531,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
hif_drv = vif->hif_drv;
if (!hif_drv || hif_drv == terminated_handle) {
- PRINT_ER("NetworkInfo received but driver not init[%p]\n", hif_drv);
+ netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
return;
}
@@ -3976,7 +3546,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending network info message queue message parameters: Error(%d)\n", result);
+ netdev_err(vif->ndev, "message parameters (%d)\n", result);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3998,16 +3568,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
}
hif_drv = vif->hif_drv;
- PRINT_D(HOSTINF_DBG, "General asynchronous info packet received\n");
if (!hif_drv || hif_drv == terminated_handle) {
- PRINT_D(HOSTINF_DBG, "Wifi driver handler is equal to NULL\n");
up(&hif_sema_deinit);
return;
}
if (!hif_drv->usr_conn_req.conn_result) {
- PRINT_ER("Received mac status is not needed when there is no current Connect Reques\n");
+ netdev_err(vif->ndev, "there is no current Connect Request\n");
up(&hif_sema_deinit);
return;
}
@@ -4023,7 +3591,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue asynchronous message info: Error(%d)\n", result);
+ netdev_err(vif->ndev, "synchronous info (%d)\n", result);
up(&hif_sema_deinit);
}
@@ -4043,8 +3611,6 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
return;
hif_drv = vif->hif_drv;
- PRINT_D(GENERIC_DBG, "Scan notification received %p\n", hif_drv);
-
if (!hif_drv || hif_drv == terminated_handle)
return;
@@ -4056,24 +3622,22 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("Error in sending message queue scan complete parameters: Error(%d)\n", result);
+ netdev_err(vif->ndev, "complete param (%d)\n", result);
}
-
- return;
}
-s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID,
- u32 u32duration, u16 chan,
- wilc_remain_on_chan_expired RemainOnChanExpired,
- wilc_remain_on_chan_ready RemainOnChanReady,
- void *pvUserArg)
+int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+ u32 duration, u16 chan,
+ wilc_remain_on_chan_expired expired,
+ wilc_remain_on_chan_ready ready,
+ void *user_arg)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -4081,28 +3645,28 @@ s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID,
msg.id = HOST_IF_MSG_REMAIN_ON_CHAN;
msg.body.remain_on_ch.ch = chan;
- msg.body.remain_on_ch.expired = RemainOnChanExpired;
- msg.body.remain_on_ch.ready = RemainOnChanReady;
- msg.body.remain_on_ch.arg = pvUserArg;
- msg.body.remain_on_ch.u32duration = u32duration;
- msg.body.remain_on_ch.id = u32SessionID;
+ msg.body.remain_on_ch.expired = expired;
+ msg.body.remain_on_ch.ready = ready;
+ msg.body.remain_on_ch.arg = user_arg;
+ msg.body.remain_on_ch.duration = duration;
+ msg.body.remain_on_ch.id = session_id;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
return result;
}
-s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID)
+int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
@@ -4111,104 +3675,98 @@ s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID)
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_LISTEN_TIMER_FIRED;
msg.vif = vif;
- msg.body.remain_on_ch.id = u32SessionID;
+ msg.body.remain_on_ch.id = session_id;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
return result;
}
-s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg)
+int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_REGISTER_FRAME;
- switch (u16FrameType) {
+ switch (frame_type) {
case ACTION:
- PRINT_D(HOSTINF_DBG, "ACTION\n");
msg.body.reg_frame.reg_id = ACTION_FRM_IDX;
break;
case PROBE_REQ:
- PRINT_D(HOSTINF_DBG, "PROBE REQ\n");
msg.body.reg_frame.reg_id = PROBE_REQ_IDX;
break;
default:
- PRINT_D(HOSTINF_DBG, "Not valid frame type\n");
break;
}
- msg.body.reg_frame.frame_type = u16FrameType;
- msg.body.reg_frame.reg = bReg;
+ msg.body.reg_frame.frame_type = frame_type;
+ msg.body.reg_frame.reg = reg;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
return result;
}
-s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod,
- u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail)
+int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
+ u32 head_len, u8 *head, u32 tail_len, u8 *tail)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct beacon_attr *pstrSetBeaconParam = &msg.body.beacon_info;
+ struct beacon_attr *beacon_info = &msg.body.beacon_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- PRINT_D(HOSTINF_DBG, "Setting adding beacon message queue params\n");
-
msg.id = HOST_IF_MSG_ADD_BEACON;
msg.vif = vif;
- pstrSetBeaconParam->interval = u32Interval;
- pstrSetBeaconParam->dtim_period = u32DTIMPeriod;
- pstrSetBeaconParam->head_len = u32HeadLen;
- pstrSetBeaconParam->head = kmemdup(pu8Head, u32HeadLen, GFP_KERNEL);
- if (!pstrSetBeaconParam->head) {
+ beacon_info->interval = interval;
+ beacon_info->dtim_period = dtim_period;
+ beacon_info->head_len = head_len;
+ beacon_info->head = kmemdup(head, head_len, GFP_KERNEL);
+ if (!beacon_info->head) {
result = -ENOMEM;
goto ERRORHANDLER;
}
- pstrSetBeaconParam->tail_len = u32TailLen;
+ beacon_info->tail_len = tail_len;
- if (u32TailLen > 0) {
- pstrSetBeaconParam->tail = kmemdup(pu8Tail, u32TailLen,
- GFP_KERNEL);
- if (!pstrSetBeaconParam->tail) {
+ if (tail_len > 0) {
+ beacon_info->tail = kmemdup(tail, tail_len, GFP_KERNEL);
+ if (!beacon_info->tail) {
result = -ENOMEM;
goto ERRORHANDLER;
}
} else {
- pstrSetBeaconParam->tail = NULL;
+ beacon_info->tail = NULL;
}
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc mq send fail\n");
+ netdev_err(vif->ndev, "wilc mq send fail\n");
ERRORHANDLER:
if (result) {
- kfree(pstrSetBeaconParam->head);
+ kfree(beacon_info->head);
- kfree(pstrSetBeaconParam->tail);
+ kfree(beacon_info->tail);
}
return result;
@@ -4221,17 +3779,16 @@ int wilc_del_beacon(struct wilc_vif *vif)
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
- PRINT_D(HOSTINF_DBG, "Setting deleting beacon message queue params\n");
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
@@ -4244,14 +3801,12 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- PRINT_D(HOSTINF_DBG, "Setting adding station message queue params\n");
-
msg.id = HOST_IF_MSG_ADD_STATION;
msg.vif = vif;
@@ -4266,7 +3821,7 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
@@ -4278,14 +3833,12 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- PRINT_D(HOSTINF_DBG, "Setting deleting station message queue params\n");
-
msg.id = HOST_IF_MSG_DEL_STATION;
msg.vif = vif;
@@ -4296,160 +3849,141 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN])
+int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct del_all_sta *pstrDelAllStationMsg = &msg.body.del_all_sta_info;
+ struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
struct host_if_drv *hif_drv = vif->hif_drv;
- u8 au8Zero_Buff[ETH_ALEN] = {0};
- u32 i;
- u8 u8AssocNumb = 0;
+ u8 zero_addr[ETH_ALEN] = {0};
+ int i;
+ u8 assoc_sta = 0;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- PRINT_D(HOSTINF_DBG, "Setting deauthenticating station message queue params\n");
-
msg.id = HOST_IF_MSG_DEL_ALL_STA;
msg.vif = vif;
for (i = 0; i < MAX_NUM_STA; i++) {
- if (memcmp(pu8MacAddr[i], au8Zero_Buff, ETH_ALEN)) {
- memcpy(pstrDelAllStationMsg->del_all_sta[i], pu8MacAddr[i], ETH_ALEN);
- PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n",
- pstrDelAllStationMsg->del_all_sta[i][0],
- pstrDelAllStationMsg->del_all_sta[i][1],
- pstrDelAllStationMsg->del_all_sta[i][2],
- pstrDelAllStationMsg->del_all_sta[i][3],
- pstrDelAllStationMsg->del_all_sta[i][4],
- pstrDelAllStationMsg->del_all_sta[i][5]);
- u8AssocNumb++;
+ if (memcmp(mac_addr[i], zero_addr, ETH_ALEN)) {
+ memcpy(del_all_sta_info->del_all_sta[i], mac_addr[i], ETH_ALEN);
+ assoc_sta++;
}
}
- if (!u8AssocNumb) {
- PRINT_D(CFG80211_DBG, "NO ASSOCIATED STAS\n");
+ if (!assoc_sta)
return result;
- }
- pstrDelAllStationMsg->assoc_sta = u8AssocNumb;
+ del_all_sta_info->assoc_sta = assoc_sta;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
down(&hif_sema_wait_response);
return result;
}
-s32 wilc_edit_station(struct wilc_vif *vif,
- struct add_sta_param *pstrStaParams)
+int wilc_edit_station(struct wilc_vif *vif,
+ struct add_sta_param *sta_param)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct add_sta_param *pstrAddStationMsg = &msg.body.add_sta_info;
+ struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
- PRINT_D(HOSTINF_DBG, "Setting editing station message queue params\n");
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_EDIT_STATION;
msg.vif = vif;
- memcpy(pstrAddStationMsg, pstrStaParams, sizeof(struct add_sta_param));
- if (pstrAddStationMsg->rates_len > 0) {
- u8 *rates = kmalloc(pstrAddStationMsg->rates_len, GFP_KERNEL);
-
- if (!rates)
+ memcpy(add_sta_info, sta_param, sizeof(struct add_sta_param));
+ if (add_sta_info->rates_len > 0) {
+ add_sta_info->rates = kmemdup(sta_param->rates,
+ add_sta_info->rates_len,
+ GFP_KERNEL);
+ if (!add_sta_info->rates)
return -ENOMEM;
-
- memcpy(rates, pstrStaParams->rates,
- pstrAddStationMsg->rates_len);
- pstrAddStationMsg->rates = rates;
}
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout)
+int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct power_mgmt_param *pstrPowerMgmtParam = &msg.body.pwr_mgmt_info;
+ struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
struct host_if_drv *hif_drv = vif->hif_drv;
- PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d <<\n\n", bIsEnabled);
-
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
- PRINT_D(HOSTINF_DBG, "Setting Power management message queue params\n");
+ if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
+ return 0;
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_POWER_MGMT;
msg.vif = vif;
- pstrPowerMgmtParam->enabled = bIsEnabled;
- pstrPowerMgmtParam->timeout = u32Timeout;
+ pwr_mgmt_info->enabled = enabled;
+ pwr_mgmt_info->timeout = timeout;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled,
- u32 u32count)
+int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
+ u32 count)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct set_multicast *pstrMulticastFilterParam = &msg.body.multicast_info;
+ struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
- PRINT_D(HOSTINF_DBG, "Setting Multicast Filter params\n");
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_SET_MULTICAST_FILTER;
msg.vif = vif;
- pstrMulticastFilterParam->enabled = bIsEnabled;
- pstrMulticastFilterParam->cnt = u32count;
+ multicast_filter_param->enabled = enabled;
+ multicast_filter_param->cnt = count;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
+static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo)
{
struct join_bss_param *pNewJoinBssParam = NULL;
u8 *pu8IEs;
@@ -4464,17 +3998,18 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
u8 authTotalCount = 0;
u8 i, j;
- pu8IEs = ptstrNetworkInfo->pu8IEs;
- u16IEsLen = ptstrNetworkInfo->u16IEsLen;
+ pu8IEs = ptstrNetworkInfo->ies;
+ u16IEsLen = ptstrNetworkInfo->ies_len;
pNewJoinBssParam = kzalloc(sizeof(struct join_bss_param), GFP_KERNEL);
if (pNewJoinBssParam) {
- pNewJoinBssParam->dtim_period = ptstrNetworkInfo->u8DtimPeriod;
- pNewJoinBssParam->beacon_period = ptstrNetworkInfo->u16BeaconPeriod;
- pNewJoinBssParam->cap_info = ptstrNetworkInfo->u16CapInfo;
- memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
- memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
- pNewJoinBssParam->ssid_len = ptstrNetworkInfo->u8SsidLen;
+ pNewJoinBssParam->dtim_period = ptstrNetworkInfo->dtim_period;
+ pNewJoinBssParam->beacon_period = ptstrNetworkInfo->beacon_period;
+ pNewJoinBssParam->cap_info = ptstrNetworkInfo->cap_info;
+ memcpy(pNewJoinBssParam->bssid, ptstrNetworkInfo->bssid, 6);
+ memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->ssid,
+ ptstrNetworkInfo->ssid_len + 1);
+ pNewJoinBssParam->ssid_len = ptstrNetworkInfo->ssid_len;
memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
@@ -4523,7 +4058,7 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
(pu8IEs[index + 5] == 0x09) && (pu8IEs[index + 6] == 0x0c)) {
u16 u16P2P_count;
- pNewJoinBssParam->tsf = ptstrNetworkInfo->u32Tsf;
+ pNewJoinBssParam->tsf = ptstrNetworkInfo->tsf_lo;
pNewJoinBssParam->noa_enabled = 1;
pNewJoinBssParam->idx = pu8IEs[index + 9];
@@ -4534,10 +4069,6 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
pNewJoinBssParam->opp_enabled = 0;
}
- PRINT_D(GENERIC_DBG, "P2P Dump\n");
- for (i = 0; i < pu8IEs[index + 7]; i++)
- PRINT_D(GENERIC_DBG, " %x\n", pu8IEs[index + 9 + i]);
-
pNewJoinBssParam->cnt = pu8IEs[index + 11];
u16P2P_count = index + 12;
@@ -4606,94 +4137,92 @@ static void *host_int_ParseJoinBssParam(tstrNetworkInfo *ptstrNetworkInfo)
return (void *)pNewJoinBssParam;
}
-void wilc_free_join_params(void *pJoinParams)
+int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- if ((struct bss_param *)pJoinParams)
- kfree((struct bss_param *)pJoinParams);
- else
- PRINT_ER("Unable to FREE null pointer\n");
-}
-
-s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID)
-{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
- struct ba_session_info *pBASessionInfo = &msg.body.session_info;
struct host_if_drv *hif_drv = vif->hif_drv;
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS;
+ msg.id = HOST_IF_MSG_SET_IPADDRESS;
- memcpy(pBASessionInfo->bssid, pBSSID, ETH_ALEN);
- pBASessionInfo->tid = TID;
+ msg.body.ip_info.ip_addr = ip_addr;
msg.vif = vif;
+ msg.body.ip_info.idx = idx;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
-
- down(&hif_sema_wait_response);
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx)
+static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- s32 result = 0;
+ int result = 0;
struct host_if_msg msg;
struct host_if_drv *hif_drv = vif->hif_drv;
- return 0;
-
if (!hif_drv) {
- PRINT_ER("driver is null\n");
+ netdev_err(vif->ndev, "driver is null\n");
return -EFAULT;
}
memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_SET_IPADDRESS;
+ msg.id = HOST_IF_MSG_GET_IPADDRESS;
- msg.body.ip_info.ip_addr = u16ipadd;
+ msg.body.ip_info.ip_addr = ip_addr;
msg.vif = vif;
msg.body.ip_info.idx = idx;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
return result;
}
-static s32 host_int_get_ipaddress(struct wilc_vif *vif,
- struct host_if_drv *hif_drv,
- u8 *u16ipadd, u8 idx)
+int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
{
- s32 result = 0;
+ int ret = 0;
struct host_if_msg msg;
- if (!hif_drv) {
- PRINT_ER("driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
- msg.id = HOST_IF_MSG_GET_IPADDRESS;
+ msg.id = HOST_IF_MSG_SET_TX_POWER;
+ msg.body.tx_power.tx_pwr = tx_power;
+ msg.vif = vif;
+
+ ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ if (ret)
+ netdev_err(vif->ndev, "wilc_mq_send fail\n");
- msg.body.ip_info.ip_addr = u16ipadd;
+ return ret;
+}
+
+int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
+{
+ int ret = 0;
+ struct host_if_msg msg;
+
+ memset(&msg, 0, sizeof(struct host_if_msg));
+
+ msg.id = HOST_IF_MSG_GET_TX_POWER;
msg.vif = vif;
- msg.body.ip_info.idx = idx;
- result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
- if (result)
- PRINT_ER("wilc_mq_send fail\n");
+ ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+ if (ret)
+ netdev_err(vif->ndev, "Failed to get TX PWR\n");
- return result;
+ down(&hif_sema_wait_response);
+ *tx_power = msg.body.tx_power.tx_pwr;
+
+ return ret;
}
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 8faac27002e9..01f3222a4231 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -96,7 +96,7 @@ enum CURRENT_TXRATE {
MBPS_54 = 54
};
-struct cfg_param_val {
+struct cfg_param_attr {
u32 flag;
u8 ht_enable;
u8 bss_type;
@@ -144,8 +144,8 @@ enum cfg_param {
};
struct found_net_info {
- u8 au8bssid[6];
- s8 s8rssi;
+ u8 bssid[6];
+ s8 rssi;
};
enum scan_event {
@@ -168,13 +168,13 @@ enum KEY_TYPE {
PMKSA,
};
-typedef void (*wilc_scan_result)(enum scan_event, tstrNetworkInfo *,
- void *, void *);
+typedef void (*wilc_scan_result)(enum scan_event, struct network_info *,
+ void *, void *);
typedef void (*wilc_connect_result)(enum conn_event,
- tstrConnectInfo *,
+ struct connect_info *,
u8,
- tstrDisconnectNotifInfo *,
+ struct disconnect_info *,
void *);
typedef void (*wilc_remain_on_chan_expired)(void *, u32);
@@ -186,13 +186,13 @@ struct rcvd_net_info {
};
struct hidden_net_info {
- u8 *pu8ssid;
- u8 u8ssidlen;
+ u8 *ssid;
+ u8 ssid_len;
};
struct hidden_network {
- struct hidden_net_info *pstrHiddenNetworkInfo;
- u8 u8ssidnum;
+ struct hidden_net_info *net_info;
+ u8 n_ssids;
};
struct user_scan_req {
@@ -203,9 +203,9 @@ struct user_scan_req {
};
struct user_conn_req {
- u8 *pu8bssid;
- u8 *pu8ssid;
- u8 u8security;
+ u8 *bssid;
+ u8 *ssid;
+ u8 security;
enum AUTHTYPE auth_type;
size_t ssid_len;
u8 *ies;
@@ -217,6 +217,7 @@ struct user_conn_req {
struct drv_handler {
u32 handler;
+ u8 mac_idx;
};
struct op_mode {
@@ -240,7 +241,7 @@ struct ba_session_info {
struct remain_ch {
u16 ch;
- u32 u32duration;
+ u32 duration;
wilc_remain_on_chan_expired expired;
wilc_remain_on_chan_ready ready;
void *arg;
@@ -271,14 +272,12 @@ struct host_if_drv {
enum host_if_state hif_state;
u8 assoc_bssid[ETH_ALEN];
- struct cfg_param_val cfg_values;
+ struct cfg_param_attr cfg_values;
- struct semaphore sem_cfg_values;
+ struct mutex cfg_values_lock;
struct semaphore sem_test_key_block;
struct semaphore sem_test_disconn_block;
struct semaphore sem_get_rssi;
- struct semaphore sem_get_link_speed;
- struct semaphore sem_get_chnl;
struct semaphore sem_inactive_time;
struct timer_list scan_timer;
@@ -312,68 +311,60 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
u8 index);
int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
u8 index, u8 mode, enum AUTHTYPE auth_type);
-s32 wilc_add_ptk(struct wilc_vif *vif, const u8 *pu8Ptk, u8 u8PtkKeylen,
- const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic,
- u8 mode, u8 u8Ciphermode, u8 u8Idx);
+int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
+ const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic,
+ u8 mode, u8 cipher_mode, u8 index);
s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
u32 *pu32InactiveTime);
-s32 wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *pu8RxGtk, u8 u8GtkKeylen,
- u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
- const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode,
- u8 u8Ciphermode);
-s32 wilc_add_tx_gtk(struct host_if_drv *hWFIDrv, u8 u8KeyLen,
- u8 *pu8TxGtk, u8 u8KeyIdx);
-s32 wilc_set_pmkid_info(struct wilc_vif *vif,
- struct host_if_pmkid_attr *pu8PmkidInfoArray);
-s32 wilc_get_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress);
-s32 wilc_set_mac_address(struct wilc_vif *vif, u8 *pu8MacAddress);
-int wilc_wait_msg_queue_idle(void);
-s32 wilc_set_start_scan_req(struct host_if_drv *hWFIDrv, u8 scanSource);
-s32 wilc_set_join_req(struct wilc_vif *vif, u8 *pu8bssid, const u8 *pu8ssid,
- size_t ssidLen, const u8 *pu8IEs, size_t IEsLen,
- wilc_connect_result pfConnectResult, void *pvUserArg,
- u8 u8security, enum AUTHTYPE tenuAuth_type,
- u8 u8channel, void *pJoinParams);
-s32 wilc_flush_join_req(struct wilc_vif *vif);
-s32 wilc_disconnect(struct wilc_vif *vif, u16 u16ReasonCode);
+int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
+ u8 index, u32 key_rsc_len, const u8 *key_rsc,
+ const u8 *rx_mic, const u8 *tx_mic, u8 mode,
+ u8 cipher_mode);
+int wilc_set_pmkid_info(struct wilc_vif *vif,
+ struct host_if_pmkid_attr *pmkid);
+int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
+int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
+ size_t ssid_len, const u8 *ies, size_t ies_len,
+ wilc_connect_result connect_result, void *user_arg,
+ u8 security, enum AUTHTYPE auth_type,
+ u8 channel, void *join_params);
+int wilc_disconnect(struct wilc_vif *vif, u16 reason_code);
int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel);
-s32 wilc_get_rssi(struct wilc_vif *vif, s8 *ps8Rssi);
-s32 wilc_scan(struct wilc_vif *vif, u8 u8ScanSource, u8 u8ScanType,
- u8 *pu8ChnlFreqList, u8 u8ChnlListLen, const u8 *pu8IEs,
- size_t IEsLen, wilc_scan_result ScanResult, void *pvUserArg,
- struct hidden_network *pstrHiddenNetwork);
-s32 wilc_hif_set_cfg(struct wilc_vif *vif,
- struct cfg_param_val *pstrCfgParamVal);
-s32 wilc_init(struct net_device *dev, struct host_if_drv **phWFIDrv);
-s32 wilc_deinit(struct wilc_vif *vif);
-s32 wilc_add_beacon(struct wilc_vif *vif, u32 u32Interval, u32 u32DTIMPeriod,
- u32 u32HeadLen, u8 *pu8Head, u32 u32TailLen, u8 *pu8Tail);
+int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level);
+int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
+ u8 *ch_freq_list, u8 ch_list_len, const u8 *ies,
+ size_t ies_len, wilc_scan_result scan_result, void *user_arg,
+ struct hidden_network *hidden_network);
+int wilc_hif_set_cfg(struct wilc_vif *vif,
+ struct cfg_param_attr *cfg_param);
+int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler);
+int wilc_deinit(struct wilc_vif *vif);
+int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
+ u32 head_len, u8 *head, u32 tail_len, u8 *tail);
int wilc_del_beacon(struct wilc_vif *vif);
int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param);
-s32 wilc_del_allstation(struct wilc_vif *vif, u8 pu8MacAddr[][ETH_ALEN]);
+int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN]);
int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr);
-s32 wilc_edit_station(struct wilc_vif *vif,
- struct add_sta_param *pstrStaParams);
-s32 wilc_set_power_mgmt(struct wilc_vif *vif, bool bIsEnabled, u32 u32Timeout);
-s32 wilc_setup_multicast_filter(struct wilc_vif *vif, bool bIsEnabled,
- u32 u32count);
-s32 wilc_setup_ipaddress(struct wilc_vif *vif, u8 *u16ipadd, u8 idx);
-s32 wilc_del_all_rx_ba_session(struct wilc_vif *vif, char *pBSSID, char TID);
-s32 wilc_remain_on_channel(struct wilc_vif *vif, u32 u32SessionID,
- u32 u32duration, u16 chan,
- wilc_remain_on_chan_expired RemainOnChanExpired,
- wilc_remain_on_chan_ready RemainOnChanReady,
- void *pvUserArg);
-s32 wilc_listen_state_expired(struct wilc_vif *vif, u32 u32SessionID);
-s32 wilc_frame_register(struct wilc_vif *vif, u16 u16FrameType, bool bReg);
-int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index);
+int wilc_edit_station(struct wilc_vif *vif,
+ struct add_sta_param *sta_param);
+int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout);
+int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
+ u32 count);
+int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
+int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
+ u32 duration, u16 chan,
+ wilc_remain_on_chan_expired expired,
+ wilc_remain_on_chan_ready ready,
+ void *user_arg);
+int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id);
+int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg);
+int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx);
int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode);
-
-void wilc_free_join_params(void *pJoinParams);
-
-s32 wilc_get_statistics(struct wilc_vif *vif, struct rf_info *pstrStatistics);
+int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif);
int wilc_get_vif_idx(struct wilc_vif *vif);
+int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
+int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
extern bool wilc_optaining_ip;
extern u8 wilc_connected_ssid[6];
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index e550027645b7..7d9e5ded8ff4 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -7,22 +7,20 @@
* @version 1.0
*/
#include "wilc_wfi_cfgoperations.h"
-#include "linux_wlan_common.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
-
struct wilc_wfi_radiotap_hdr {
struct ieee80211_radiotap_header hdr;
u8 rate;
-} __attribute__((packed));
+} __packed;
struct wilc_wfi_radiotap_cb_hdr {
struct ieee80211_radiotap_header hdr;
u8 rate;
u8 dump;
u16 tx_flags;
-} __attribute__((packed));
+} __packed;
static struct net_device *wilc_wfi_mon; /* global monitor netdev */
@@ -53,15 +51,11 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
struct wilc_wfi_radiotap_hdr *hdr;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
- PRINT_INFO(HOSTAPD_DBG, "In monitor interface receive function\n");
-
- if (wilc_wfi_mon == NULL)
+ if (!wilc_wfi_mon)
return;
- if (!netif_running(wilc_wfi_mon)) {
- PRINT_INFO(HOSTAPD_DBG, "Monitor interface already RUNNING\n");
+ if (!netif_running(wilc_wfi_mon))
return;
- }
/* Get WILC header */
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
@@ -71,18 +65,15 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
-
/* hostapd callback mgmt frame */
skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_cb_hdr));
- if (skb == NULL) {
- PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb");
+ if (!skb)
return;
- }
memcpy(skb_put(skb, size), buff, size);
- cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb, sizeof(*cb_hdr));
+ cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb, sizeof(*cb_hdr));
memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
@@ -103,29 +94,21 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
}
} else {
-
skb = dev_alloc_skb(size + sizeof(struct wilc_wfi_radiotap_hdr));
- if (skb == NULL) {
- PRINT_INFO(HOSTAPD_DBG, "Monitor if : No memory to allocate skb");
+ if (!skb)
return;
- }
memcpy(skb_put(skb, size), buff, size);
- hdr = (struct wilc_wfi_radiotap_hdr *) skb_push(skb, sizeof(*hdr));
+ hdr = (struct wilc_wfi_radiotap_hdr *)skb_push(skb, sizeof(*hdr));
memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
- PRINT_INFO(HOSTAPD_DBG, "Radiotap len %d\n", hdr->hdr.it_len);
hdr->hdr.it_present = cpu_to_le32
(1 << IEEE80211_RADIOTAP_RATE); /* | */
- PRINT_INFO(HOSTAPD_DBG, "Presentflags %d\n", hdr->hdr.it_present);
hdr->rate = 5; /* txrate->bitrate / 5; */
-
}
-
-
skb->dev = wilc_wfi_mon;
skb_set_mac_header(skb, 0);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -134,8 +117,6 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
-
-
}
struct tx_complete_mon_data {
@@ -145,48 +126,30 @@ struct tx_complete_mon_data {
static void mgmt_tx_complete(void *priv, int status)
{
-
- struct tx_complete_mon_data *pv_data = (struct tx_complete_mon_data *)priv;
- u8 *buf = pv_data->buff;
-
-
-
- if (status == 1) {
- if (INFO || buf[0] == 0x10 || buf[0] == 0xb0)
- PRINT_INFO(HOSTAPD_DBG, "Packet sent successfully - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff);
- } else {
- PRINT_INFO(HOSTAPD_DBG, "Couldn't send packet - Size = %d - Address = %p.\n", pv_data->size, pv_data->buff);
- }
-
-
+ struct tx_complete_mon_data *pv_data = priv;
/* incase of fully hosting mode, the freeing will be done in response to the cfg packet */
kfree(pv_data->buff);
kfree(pv_data);
}
+
static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
{
struct tx_complete_mon_data *mgmt_tx = NULL;
- if (dev == NULL) {
- PRINT_D(HOSTAPD_DBG, "ERROR: dev == NULL\n");
+ if (!dev)
return -EFAULT;
- }
netif_stop_queue(dev);
- mgmt_tx = kmalloc(sizeof(struct tx_complete_mon_data), GFP_ATOMIC);
- if (mgmt_tx == NULL) {
- PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
- return -EFAULT;
- }
+ mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_ATOMIC);
+ if (!mgmt_tx)
+ return -ENOMEM;
mgmt_tx->buff = kmalloc(len, GFP_ATOMIC);
- if (mgmt_tx->buff == NULL) {
- PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
+ if (!mgmt_tx->buff) {
kfree(mgmt_tx);
- return -EFAULT;
-
+ return -ENOMEM;
}
mgmt_tx->size = len;
@@ -211,47 +174,30 @@ static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- u32 rtap_len, i, ret = 0;
+ u32 rtap_len, ret = 0;
struct WILC_WFI_mon_priv *mon_priv;
struct sk_buff *skb2;
struct wilc_wfi_radiotap_cb_hdr *cb_hdr;
- if (wilc_wfi_mon == NULL)
+ if (!wilc_wfi_mon)
return -EFAULT;
mon_priv = netdev_priv(wilc_wfi_mon);
-
- if (mon_priv == NULL) {
- PRINT_ER("Monitor interface private structure is NULL\n");
+ if (!mon_priv)
return -EFAULT;
- }
-
-
rtap_len = ieee80211_get_radiotap_len(skb->data);
- if (skb->len < rtap_len) {
- PRINT_ER("Error in radiotap header\n");
+ if (skb->len < rtap_len)
return -1;
- }
- /* skip the radiotap header */
- PRINT_INFO(HOSTAPD_DBG, "Radiotap len: %d\n", rtap_len);
- if (INFO) {
- for (i = 0; i < rtap_len; i++)
- PRINT_INFO(HOSTAPD_DBG, "Radiotap_hdr[%d] %02x\n", i, skb->data[i]);
- }
- /* Skip the ratio tap header */
skb_pull(skb, rtap_len);
- if (skb->data[0] == 0xc0)
- PRINT_INFO(HOSTAPD_DBG, "%x:%x:%x:%x:%x%x\n", skb->data[4], skb->data[5], skb->data[6], skb->data[7], skb->data[8], skb->data[9]);
-
if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) {
skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr));
memcpy(skb_put(skb2, skb->len), skb->data, skb->len);
- cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *) skb_push(skb2, sizeof(*cb_hdr));
+ cb_hdr = (struct wilc_wfi_radiotap_cb_hdr *)skb_push(skb2, sizeof(*cb_hdr));
memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr));
cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
@@ -278,24 +224,19 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
}
skb->dev = mon_priv->real_ndev;
- PRINT_INFO(HOSTAPD_DBG, "Skipping the radiotap header\n");
-
-
-
- /* actual deliver of data is device-specific, and not shown here */
- PRINT_INFO(HOSTAPD_DBG, "SKB netdevice name = %s\n", skb->dev->name);
- PRINT_INFO(HOSTAPD_DBG, "MONITOR real dev name = %s\n", mon_priv->real_ndev->name);
-
/* Identify if Ethernet or MAC header (data or mgmt) */
memcpy(srcAdd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
/* if source address and bssid fields are equal>>Mac header */
/*send it to mgmt frames handler */
if (!(memcmp(srcAdd, bssid, 6))) {
- mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
+ ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
+ if (ret)
+ netdev_err(dev, "fail to mgmt tx\n");
dev_kfree_skb(skb);
- } else
+ } else {
ret = wilc_mac_xmit(skb, mon_priv->real_ndev);
+ }
return ret;
}
@@ -316,23 +257,16 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
*/
struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev)
{
-
-
u32 ret = 0;
struct WILC_WFI_mon_priv *priv;
/*If monitor interface is already initialized, return it*/
- if (wilc_wfi_mon) {
+ if (wilc_wfi_mon)
return wilc_wfi_mon;
- }
wilc_wfi_mon = alloc_etherdev(sizeof(struct WILC_WFI_mon_priv));
- if (!wilc_wfi_mon) {
- PRINT_ER("failed to allocate memory\n");
+ if (!wilc_wfi_mon)
return NULL;
-
- }
-
wilc_wfi_mon->type = ARPHRD_IEEE80211_RADIOTAP;
strncpy(wilc_wfi_mon->name, name, IFNAMSIZ);
wilc_wfi_mon->name[IFNAMSIZ - 1] = 0;
@@ -340,14 +274,12 @@ struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_devi
ret = register_netdevice(wilc_wfi_mon);
if (ret) {
- PRINT_ER(" register_netdevice failed (%d)\n", ret);
+ netdev_err(real_dev, "register_netdevice failed\n");
return NULL;
}
priv = netdev_priv(wilc_wfi_mon);
- if (priv == NULL) {
- PRINT_ER("private structure is NULL\n");
+ if (!priv)
return NULL;
- }
priv->real_ndev = real_dev;
@@ -367,14 +299,11 @@ int WILC_WFI_deinit_mon_interface(void)
{
bool rollback_lock = false;
- if (wilc_wfi_mon != NULL) {
- PRINT_D(HOSTAPD_DBG, "In Deinit monitor interface\n");
- PRINT_D(HOSTAPD_DBG, "RTNL is being locked\n");
+ if (wilc_wfi_mon) {
if (rtnl_is_locked()) {
rtnl_unlock();
rollback_lock = true;
}
- PRINT_D(HOSTAPD_DBG, "Unregister netdev\n");
unregister_netdev(wilc_wfi_mon);
if (rollback_lock) {
@@ -384,5 +313,4 @@ int WILC_WFI_deinit_mon_interface(void)
wilc_wfi_mon = NULL;
}
return 0;
-
}
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 54fe9d74b780..bfa754bb022d 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,5 +1,4 @@
#include "wilc_wfi_cfgoperations.h"
-#include "linux_wlan_common.h"
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
@@ -13,7 +12,6 @@
#include <linux/kthread.h>
#include <linux/firmware.h>
-#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -25,7 +23,8 @@
#include <linux/semaphore.h>
-static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr);
+static int dev_state_ev_handler(struct notifier_block *this,
+ unsigned long event, void *ptr);
static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
@@ -57,9 +56,10 @@ static const struct net_device_ops wilc_netdev_ops = {
};
-static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr)
+static int dev_state_ev_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
- struct in_ifaddr *dev_iface = (struct in_ifaddr *)ptr;
+ struct in_ifaddr *dev_iface = ptr;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
struct net_device *dev;
@@ -68,66 +68,48 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event
u8 null_ip[4] = {0};
char wlan_dev_name[5] = "wlan0";
- if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev) {
- PRINT_D(GENERIC_DBG, "dev_iface = NULL\n");
+ if (!dev_iface || !dev_iface->ifa_dev || !dev_iface->ifa_dev->dev)
return NOTIFY_DONE;
- }
if (memcmp(dev_iface->ifa_label, "wlan0", 5) &&
- memcmp(dev_iface->ifa_label, "p2p0", 4)) {
- PRINT_D(GENERIC_DBG, "Interface is neither WLAN0 nor P2P0\n");
+ memcmp(dev_iface->ifa_label, "p2p0", 4))
return NOTIFY_DONE;
- }
dev = (struct net_device *)dev_iface->ifa_dev->dev;
- if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) {
- PRINT_D(GENERIC_DBG, "No Wireless registerd\n");
+ if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
return NOTIFY_DONE;
- }
+
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- if (!priv) {
- PRINT_D(GENERIC_DBG, "No Wireless Priv\n");
+ if (!priv)
return NOTIFY_DONE;
- }
- hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
+
+ hif_drv = (struct host_if_drv *)priv->hif_drv;
vif = netdev_priv(dev);
- if (!vif || !hif_drv) {
- PRINT_D(GENERIC_DBG, "No Wireless Priv\n");
+ if (!vif || !hif_drv)
return NOTIFY_DONE;
- }
-
- PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler +++\n");
switch (event) {
case NETDEV_UP:
- PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_UP %p\n", dev);
-
- PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Obtained ===============\n\n");
-
if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
hif_drv->IFC_UP = 1;
wilc_optaining_ip = false;
del_timer(&wilc_during_ip_timer);
- PRINT_D(GENERIC_DBG, "IP obtained , enable scan\n");
}
if (wilc_enable_ps)
wilc_set_power_mgmt(vif, 1, 0);
- PRINT_D(GENERIC_DBG, "[%s] Up IP\n", dev_iface->ifa_label);
+ netdev_dbg(dev, "[%s] Up IP\n", dev_iface->ifa_label);
ip_addr_buf = (char *)&dev_iface->ifa_address;
- PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n",
- ip_addr_buf[0], ip_addr_buf[1],
- ip_addr_buf[2], ip_addr_buf[3]);
- wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx);
+ netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
+ ip_addr_buf[0], ip_addr_buf[1],
+ ip_addr_buf[2], ip_addr_buf[3]);
+ wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx);
break;
case NETDEV_DOWN:
- PRINT_D(GENERIC_DBG, "dev_state_ev_handler event=NETDEV_DOWN %p\n", dev);
-
- PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Released ===============\n\n");
if (vif->iftype == STATION_MODE || vif->iftype == CLIENT_MODE) {
hif_drv->IFC_UP = 0;
wilc_optaining_ip = false;
@@ -138,21 +120,18 @@ static int dev_state_ev_handler(struct notifier_block *this, unsigned long event
wilc_resolve_disconnect_aberration(vif);
- PRINT_D(GENERIC_DBG, "[%s] Down IP\n", dev_iface->ifa_label);
+ netdev_dbg(dev, "[%s] Down IP\n", dev_iface->ifa_label);
ip_addr_buf = null_ip;
- PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n",
- ip_addr_buf[0], ip_addr_buf[1],
- ip_addr_buf[2], ip_addr_buf[3]);
+ netdev_dbg(dev, "IP add=%d:%d:%d:%d\n",
+ ip_addr_buf[0], ip_addr_buf[1],
+ ip_addr_buf[2], ip_addr_buf[3]);
- wilc_setup_ipaddress(vif, ip_addr_buf, vif->u8IfIdx);
+ wilc_setup_ipaddress(vif, ip_addr_buf, vif->idx);
break;
default:
- PRINT_INFO(GENERIC_DBG, "dev_state_ev_handler event=default\n");
- PRINT_INFO(GENERIC_DBG, "[%s] unknown dev event: %lu\n", dev_iface->ifa_label, event);
-
break;
}
@@ -163,14 +142,13 @@ static irqreturn_t isr_uh_routine(int irq, void *user_data)
{
struct wilc_vif *vif;
struct wilc *wilc;
- struct net_device *dev = (struct net_device *)user_data;
+ struct net_device *dev = user_data;
vif = netdev_priv(dev);
wilc = vif->wilc;
- PRINT_D(INT_DBG, "Interrupt received UH\n");
if (wilc->close) {
- PRINT_ER("Driver is CLOSING: Can't handle UH interrupt\n");
+ netdev_err(dev, "Can't handle UH interrupt\n");
return IRQ_HANDLED;
}
return IRQ_WAKE_THREAD;
@@ -180,16 +158,16 @@ static irqreturn_t isr_bh_routine(int irq, void *userdata)
{
struct wilc_vif *vif;
struct wilc *wilc;
+ struct net_device *dev = userdata;
vif = netdev_priv(userdata);
wilc = vif->wilc;
if (wilc->close) {
- PRINT_ER("Driver is CLOSING: Can't handle BH interrupt\n");
+ netdev_err(dev, "Can't handle BH interrupt\n");
return IRQ_HANDLED;
}
- PRINT_D(INT_DBG, "Interrupt received BH\n");
wilc_handle_isr(wilc);
return IRQ_HANDLED;
@@ -209,7 +187,7 @@ static int init_irq(struct net_device *dev)
wl->dev_irq_num = gpio_to_irq(wl->gpio);
} else {
ret = -1;
- PRINT_ER("could not obtain gpio for WILC_INTR\n");
+ netdev_err(dev, "could not obtain gpio for WILC_INTR\n");
}
if (ret != -1 && request_threaded_irq(wl->dev_irq_num,
@@ -217,12 +195,13 @@ static int init_irq(struct net_device *dev)
isr_bh_routine,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"WILC_IRQ", dev) < 0) {
- PRINT_ER("Failed to request IRQ for GPIO: %d\n", wl->gpio);
+ netdev_err(dev, "Failed to request IRQ GPIO: %d\n", wl->gpio);
gpio_free(wl->gpio);
ret = -1;
} else {
- PRINT_D(INIT_DBG, "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n",
- wl->dev_irq_num, wl->gpio);
+ netdev_dbg(dev,
+ "IRQ request succeeded IRQ-NUM= %d on GPIO: %d\n",
+ wl->dev_irq_num, wl->gpio);
}
return ret;
@@ -243,22 +222,14 @@ static void deinit_irq(struct net_device *dev)
}
}
-void wilc_dbg(u8 *buff)
-{
- PRINT_D(INIT_DBG, "%d\n", *buff);
-}
-
int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
{
/* FIXME: replace with mutex_lock or wait_for_completion */
int error = -1;
- PRINT_D(LOCK_DBG, "Locking %p\n", vp);
if (vp)
- error = down_timeout((struct semaphore *)vp,
+ error = down_timeout(vp,
msecs_to_jiffies(timeout));
- else
- PRINT_ER("Failed, mutex is NULL\n");
return error;
}
@@ -275,8 +246,6 @@ void wilc_mac_indicate(struct wilc *wilc, int flag)
} else {
wilc->mac_status = status;
}
- } else if (flag == WILC_MAC_INDICATE_SCAN) {
- PRINT_D(GENERIC_DBG, "Scanning ...\n");
}
}
@@ -288,26 +257,19 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
bssid = mac_header + 10;
bssid1 = mac_header + 4;
- for (i = 0; i < wilc->vif_num; i++)
- if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) ||
- !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
- return wilc->vif[i]->ndev;
-
- PRINT_INFO(INIT_DBG, "Invalide handle\n");
- for (i = 0; i < 25; i++)
- PRINT_D(INIT_DBG, "%02x ", mac_header[i]);
- bssid = mac_header + 18;
- bssid1 = mac_header + 12;
- for (i = 0; i < wilc->vif_num; i++)
- if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN) ||
- !memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
- return wilc->vif[i]->ndev;
+ for (i = 0; i < wilc->vif_num; i++) {
+ if (wilc->vif[i]->mode == STATION_MODE)
+ if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
+ return wilc->vif[i]->ndev;
+ if (wilc->vif[i]->mode == AP_MODE)
+ if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN))
+ return wilc->vif[i]->ndev;
+ }
- PRINT_INFO(INIT_DBG, "\n");
return NULL;
}
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid)
+int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
{
int i = 0;
int ret = -1;
@@ -320,6 +282,7 @@ int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid)
for (i = 0; i < wilc->vif_num; i++)
if (wilc->vif[i]->ndev == wilc_netdev) {
memcpy(wilc->vif[i]->bssid, bssid, 6);
+ wilc->vif[i]->mode = mode;
ret = 0;
break;
}
@@ -362,28 +325,21 @@ static int linux_wlan_txq_task(void *vp)
up(&wl->txq_thread_started);
while (1) {
- PRINT_D(TX_DBG, "txq_task Taking a nap :)\n");
down(&wl->txq_event);
- PRINT_D(TX_DBG, "txq_task Who waked me up :$\n");
if (wl->close) {
up(&wl->txq_thread_started);
while (!kthread_should_stop())
schedule();
-
- PRINT_D(TX_DBG, "TX thread stopped\n");
break;
}
- PRINT_D(TX_DBG, "txq_task handle the sending packet and let me go to sleep.\n");
#if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
ret = wilc_wlan_handle_txq(dev, &txq_count);
#else
do {
ret = wilc_wlan_handle_txq(dev, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- PRINT_D(TX_DBG, "Waking up queue\n");
-
if (netif_queue_stopped(wl->vif[0]->ndev))
netif_wake_queue(wl->vif[0]->ndev);
if (netif_queue_stopped(wl->vif[1]->ndev))
@@ -391,9 +347,6 @@ static int linux_wlan_txq_task(void *vp)
}
if (ret == WILC_TX_ERR_NO_BUF) {
- do {
- msleep(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
- } while (0);
backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
backoff_weight = TX_BACKOFF_WEIGHT_MAX;
@@ -410,43 +363,31 @@ static int linux_wlan_txq_task(void *vp)
return 0;
}
-void wilc_rx_complete(struct wilc *nic)
-{
- PRINT_D(RX_DBG, "RX completed\n");
-}
-
int wilc_wlan_get_firmware(struct net_device *dev)
{
struct wilc_vif *vif;
struct wilc *wilc;
- int ret = 0;
+ int chip_id, ret = 0;
const struct firmware *wilc_firmware;
char *firmware;
vif = netdev_priv(dev);
wilc = vif->wilc;
- if (vif->iftype == AP_MODE) {
- firmware = AP_FIRMWARE;
- } else if (vif->iftype == STATION_MODE) {
- firmware = STA_FIRMWARE;
- } else {
- PRINT_D(INIT_DBG, "Get P2P_CONCURRENCY_FIRMWARE\n");
- firmware = P2P_CONCURRENCY_FIRMWARE;
- }
+ chip_id = wilc_get_chipid(wilc, false);
- if (!vif) {
- PRINT_ER("vif is NULL\n");
- goto _fail_;
- }
+ if (chip_id < 0x1003a0)
+ firmware = FIRMWARE_1002;
+ else
+ firmware = FIRMWARE_1003;
- if (!(&vif->ndev->dev)) {
- PRINT_ER("&vif->ndev->dev is NULL\n");
+ netdev_info(dev, "loading firmware %s\n", firmware);
+
+ if (!(&vif->ndev->dev))
goto _fail_;
- }
if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
- PRINT_ER("%s - firmare not available\n", firmware);
+ netdev_err(dev, "%s - firmare not available\n", firmware);
ret = -1;
goto _fail_;
}
@@ -466,20 +407,13 @@ static int linux_wlan_start_firmware(struct net_device *dev)
vif = netdev_priv(dev);
wilc = vif->wilc;
- PRINT_D(INIT_DBG, "Starting Firmware ...\n");
ret = wilc_wlan_start(wilc);
- if (ret < 0) {
- PRINT_ER("Failed to start Firmware\n");
+ if (ret < 0)
return ret;
- }
- PRINT_D(INIT_DBG, "Waiting for Firmware to get ready ...\n");
ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000);
- if (ret) {
- PRINT_D(INIT_DBG, "Firmware start timed out");
+ if (ret)
return ret;
- }
- PRINT_D(INIT_DBG, "Firmware successfully started\n");
return 0;
}
@@ -494,128 +428,123 @@ static int wilc1000_firmware_download(struct net_device *dev)
wilc = vif->wilc;
if (!wilc->firmware) {
- PRINT_ER("Firmware buffer is NULL\n");
+ netdev_err(dev, "Firmware buffer is NULL\n");
return -ENOBUFS;
}
- PRINT_D(INIT_DBG, "Downloading Firmware ...\n");
+
ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data,
wilc->firmware->size);
if (ret < 0)
return ret;
- PRINT_D(INIT_DBG, "Freeing FW buffer ...\n");
- PRINT_D(INIT_DBG, "Releasing firmware\n");
release_firmware(wilc->firmware);
wilc->firmware = NULL;
- PRINT_D(INIT_DBG, "Download Succeeded\n");
+ netdev_dbg(dev, "Download Succeeded\n");
return 0;
}
static int linux_wlan_init_test_config(struct net_device *dev,
- struct wilc *wilc)
+ struct wilc_vif *vif)
{
unsigned char c_val[64];
unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
-
+ struct wilc *wilc = vif->wilc;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
- PRINT_D(TX_DBG, "Start configuring Firmware\n");
- get_random_bytes(&mac_add[5], 1);
- get_random_bytes(&mac_add[4], 1);
+ netdev_dbg(dev, "Start configuring Firmware\n");
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
- PRINT_D(INIT_DBG, "Host = %p\n", hif_drv);
+ hif_drv = (struct host_if_drv *)priv->hif_drv;
+ netdev_dbg(dev, "Host = %p\n", hif_drv);
+ wilc_get_mac_address(vif, mac_add);
- PRINT_D(INIT_DBG, "MAC address is : %02x-%02x-%02x-%02x-%02x-%02x\n",
- mac_add[0], mac_add[1], mac_add[2],
- mac_add[3], mac_add[4], mac_add[5]);
- wilc_get_chipid(wilc, 0);
+ netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
+ wilc_get_chipid(wilc, false);
*(int *)c_val = 1;
- if (!wilc_wlan_cfg_set(wilc, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 1, WID_SET_DRV_HANDLER, c_val, 4, 0, 0))
goto _fail_;
c_val[0] = 0;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_PC_TEST_MODE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = INFRASTRUCTURE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = RATE_AUTO;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = G_MIXED_11B_2_MODE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_CHANNEL, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = G_SHORT_PREAMBLE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_PREAMBLE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = AUTO_PROT;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = ACTIVE_SCAN;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = SITE_SURVEY_OFF;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, c_val, 1, 0, 0))
goto _fail_;
*((int *)c_val) = 0xffff;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, c_val, 2, 0, 0))
goto _fail_;
*((int *)c_val) = 2346;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, c_val, 2, 0, 0))
goto _fail_;
c_val[0] = 0;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = NO_POWERSAVE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = NO_SECURITY; /* NO_ENCRYPT, 0x79 */
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_MODE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = OPEN_SYSTEM;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, c_val, 1, 0, 0))
goto _fail_;
strcpy(c_val, "123456790abcdef1234567890");
- if (!wilc_wlan_cfg_set(wilc, 0, WID_WEP_KEY_VALUE, c_val,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_WEP_KEY_VALUE, c_val,
(strlen(c_val) + 1), 0, 0))
goto _fail_;
strcpy(c_val, "12345678");
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11I_PSK, c_val, (strlen(c_val)), 0,
0))
goto _fail_;
strcpy(c_val, "password");
- if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
+ if (!wilc_wlan_cfg_set(vif, 0, WID_1X_KEY, c_val, (strlen(c_val) + 1),
0, 0))
goto _fail_;
@@ -623,106 +552,106 @@ static int linux_wlan_init_test_config(struct net_device *dev,
c_val[1] = 168;
c_val[2] = 1;
c_val[3] = 112;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_1X_SERV_ADDR, c_val, 4, 0, 0))
goto _fail_;
c_val[0] = 3;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 3;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = NORMAL_ACK;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 0;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, c_val, 1,
0, 0))
goto _fail_;
c_val[0] = 48;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = 28;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, c_val, 1, 0,
0))
goto _fail_;
*((int *)c_val) = 100;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, c_val, 2, 0, 0))
goto _fail_;
c_val[0] = REKEY_DISABLE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, c_val, 1, 0, 0))
goto _fail_;
*((int *)c_val) = 84600;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, c_val, 4, 0, 0))
goto _fail_;
*((int *)c_val) = 500;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, c_val, 4, 0,
0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = G_SELF_CTS_PROT;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = HT_MIXED_MODE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, c_val, 1, 0,
0))
goto _fail_;
memcpy(c_val, mac_add, 6);
- if (!wilc_wlan_cfg_set(wilc, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
goto _fail_;
c_val[0] = DETECT_PROTECT_REPORT;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
0, 0))
goto _fail_;
c_val[0] = RTS_CTS_NONHT_PROT;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 0;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = MIMO_MODE;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_SMPS_MODE, c_val, 1, 0, 0))
goto _fail_;
c_val[0] = 7;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, c_val, 1, 0,
0))
goto _fail_;
c_val[0] = 1;
- if (!wilc_wlan_cfg_set(wilc, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
+ if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, c_val, 1,
1, 1))
goto _fail_;
@@ -748,7 +677,6 @@ void wilc1000_wlan_deinit(struct net_device *dev)
if (wl->initialized) {
netdev_info(dev, "Deinitializing wilc1000...\n");
- PRINT_D(INIT_DBG, "Disabling IRQ\n");
if (!wl->dev_irq_num &&
wl->hif_func->disable_interrupt) {
mutex_lock(&wl->hif_cs);
@@ -758,37 +686,26 @@ void wilc1000_wlan_deinit(struct net_device *dev)
if (&wl->txq_event)
up(&wl->txq_event);
- PRINT_D(INIT_DBG, "Deinitializing Threads\n");
wlan_deinitialize_threads(dev);
-
- PRINT_D(INIT_DBG, "Deinitializing IRQ\n");
deinit_irq(dev);
wilc_wlan_stop(wl);
-
- PRINT_D(INIT_DBG, "Deinitializing WILC Wlan\n");
wilc_wlan_cleanup(dev);
#if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
if (!wl->dev_irq_num &&
wl->hif_func->disable_interrupt) {
-
- PRINT_D(INIT_DBG, "Disabling IRQ 2\n");
-
mutex_lock(&wl->hif_cs);
wl->hif_func->disable_interrupt(wl);
mutex_unlock(&wl->hif_cs);
}
#endif
-
- PRINT_D(INIT_DBG, "Deinitializing Locks\n");
wlan_deinit_locks(dev);
wl->initialized = false;
- PRINT_D(INIT_DBG, "wilc1000 deinitialization Done\n");
-
+ netdev_dbg(dev, "wilc1000 deinitialization Done\n");
} else {
- PRINT_D(INIT_DBG, "wilc1000 is not initialized\n");
+ netdev_dbg(dev, "wilc1000 is not initialized\n");
}
}
@@ -800,8 +717,6 @@ static int wlan_init_locks(struct net_device *dev)
vif = netdev_priv(dev);
wl = vif->wilc;
- PRINT_D(INIT_DBG, "Initializing Locks ...\n");
-
mutex_init(&wl->hif_cs);
mutex_init(&wl->rxq_cs);
@@ -826,8 +741,6 @@ static int wlan_deinit_locks(struct net_device *dev)
vif = netdev_priv(dev);
wilc = vif->wilc;
- PRINT_D(INIT_DBG, "De-Initializing Locks\n");
-
if (&wilc->hif_cs)
mutex_destroy(&wilc->hif_cs);
@@ -845,12 +758,10 @@ static int wlan_initialize_threads(struct net_device *dev)
vif = netdev_priv(dev);
wilc = vif->wilc;
- PRINT_D(INIT_DBG, "Initializing Threads ...\n");
- PRINT_D(INIT_DBG, "Creating kthread for transmission\n");
wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
"K_TXQ_TASK");
if (!wilc->txq_thread) {
- PRINT_ER("couldn't create TXQ thread\n");
+ netdev_err(dev, "couldn't create TXQ thread\n");
wilc->close = 0;
return -ENOBUFS;
}
@@ -863,11 +774,11 @@ static void wlan_deinitialize_threads(struct net_device *dev)
{
struct wilc_vif *vif;
struct wilc *wl;
+
vif = netdev_priv(dev);
wl = vif->wilc;
wl->close = 1;
- PRINT_D(INIT_DBG, "Deinitializing Threads\n");
if (&wl->txq_event)
up(&wl->txq_event);
@@ -891,20 +802,17 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif)
ret = wilc_wlan_init(dev);
if (ret < 0) {
- PRINT_ER("Initializing WILC_Wlan FAILED\n");
ret = -EIO;
goto _fail_locks_;
}
if (wl->gpio >= 0 && init_irq(dev)) {
- PRINT_ER("couldn't initialize IRQ\n");
ret = -EIO;
goto _fail_locks_;
}
ret = wlan_initialize_threads(dev);
if (ret < 0) {
- PRINT_ER("Initializing Threads FAILED\n");
ret = -EIO;
goto _fail_wilc_wlan_;
}
@@ -912,45 +820,41 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif)
if (!wl->dev_irq_num &&
wl->hif_func->enable_interrupt &&
wl->hif_func->enable_interrupt(wl)) {
- PRINT_ER("couldn't initialize IRQ\n");
ret = -EIO;
goto _fail_irq_init_;
}
if (wilc_wlan_get_firmware(dev)) {
- PRINT_ER("Can't get firmware\n");
ret = -EIO;
goto _fail_irq_enable_;
}
ret = wilc1000_firmware_download(dev);
if (ret < 0) {
- PRINT_ER("Failed to download firmware\n");
ret = -EIO;
goto _fail_irq_enable_;
}
ret = linux_wlan_start_firmware(dev);
if (ret < 0) {
- PRINT_ER("Failed to start firmware\n");
ret = -EIO;
goto _fail_irq_enable_;
}
- if (wilc_wlan_cfg_get(wl, 1, WID_FIRMWARE_VERSION, 1, 0)) {
+ if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) {
int size;
- char Firmware_ver[20];
+ char firmware_ver[20];
- size = wilc_wlan_cfg_get_val(
- WID_FIRMWARE_VERSION,
- Firmware_ver, sizeof(Firmware_ver));
- Firmware_ver[size] = '\0';
- PRINT_D(INIT_DBG, "***** Firmware Ver = %s *******\n", Firmware_ver);
+ size = wilc_wlan_cfg_get_val(WID_FIRMWARE_VERSION,
+ firmware_ver,
+ sizeof(firmware_ver));
+ firmware_ver[size] = '\0';
+ netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver);
}
- ret = linux_wlan_init_test_config(dev, wl);
+ ret = linux_wlan_init_test_config(dev, vif);
if (ret < 0) {
- PRINT_ER("Failed to configure firmware\n");
+ netdev_err(dev, "Failed to configure firmware\n");
ret = -EIO;
goto _fail_fw_start_;
}
@@ -974,9 +878,9 @@ _fail_wilc_wlan_:
wilc_wlan_cleanup(dev);
_fail_locks_:
wlan_deinit_locks(dev);
- PRINT_ER("WLAN Iinitialization FAILED\n");
+ netdev_err(dev, "WLAN Iinitialization FAILED\n");
} else {
- PRINT_D(INIT_DBG, "wilc1000 already initialized\n");
+ netdev_dbg(dev, "wilc1000 already initialized\n");
}
return ret;
}
@@ -1003,7 +907,7 @@ int wilc_mac_open(struct net_device *ndev)
vif = netdev_priv(ndev);
wl = vif->wilc;
- if (!wl|| !wl->dev) {
+ if (!wl || !wl->dev) {
netdev_err(ndev, "wilc1000: SPI device not ready\n");
return -ENODEV;
}
@@ -1011,31 +915,45 @@ int wilc_mac_open(struct net_device *ndev)
vif = netdev_priv(ndev);
wilc = vif->wilc;
priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
- PRINT_D(INIT_DBG, "MAC OPEN[%p]\n", ndev);
+ netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
- if (ret < 0) {
- PRINT_ER("Failed to initialize host interface\n");
-
+ if (ret < 0)
return ret;
- }
- PRINT_D(INIT_DBG, "*** re-init ***\n");
ret = wilc1000_wlan_init(ndev, vif);
if (ret < 0) {
- PRINT_ER("Failed to initialize wilc1000\n");
wilc_deinit_host_int(ndev);
return ret;
}
- wilc_set_machw_change_vir_if(ndev, false);
-
- wilc_get_mac_address(vif, mac_add);
- PRINT_D(INIT_DBG, "Mac address: %pM\n", mac_add);
-
for (i = 0; i < wl->vif_num; i++) {
if (ndev == wl->vif[i]->ndev) {
+ if (vif->iftype == AP_MODE) {
+ wilc_set_wfi_drv_handler(vif,
+ wilc_get_vif_idx(vif),
+ 0);
+ } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) {
+ wilc_set_wfi_drv_handler(vif,
+ wilc_get_vif_idx(vif),
+ wilc->open_ifcs);
+ } else {
+ if (memcmp(wilc->vif[i ^ 1]->bssid,
+ wilc->vif[i ^ 1]->src_addr, 6))
+ wilc_set_wfi_drv_handler(vif,
+ wilc_get_vif_idx(vif),
+ 0);
+ else
+ wilc_set_wfi_drv_handler(vif,
+ wilc_get_vif_idx(vif),
+ 1);
+ }
+ wilc_set_operation_mode(vif, vif->iftype);
+
+ wilc_get_mac_address(vif, mac_add);
+ netdev_dbg(ndev, "Mac address: %pM\n", mac_add);
memcpy(wl->vif[i]->src_addr, mac_add, ETH_ALEN);
+
break;
}
}
@@ -1043,7 +961,7 @@ int wilc_mac_open(struct net_device *ndev)
memcpy(ndev->dev_addr, wl->vif[i]->src_addr, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- PRINT_ER("Error: Wrong MAC address\n");
+ netdev_err(ndev, "Wrong MAC address\n");
wilc_deinit_host_int(ndev);
wilc1000_wlan_deinit(ndev);
return -EINVAL;
@@ -1065,7 +983,7 @@ int wilc_mac_open(struct net_device *ndev)
static struct net_device_stats *mac_stats(struct net_device *dev)
{
- struct wilc_vif *vif= netdev_priv(dev);
+ struct wilc_vif *vif = netdev_priv(dev);
return &vif->netstats;
}
@@ -1080,57 +998,41 @@ static void wilc_set_multicast_list(struct net_device *dev)
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
vif = netdev_priv(dev);
- hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
-
- if (!dev)
- return;
+ hif_drv = (struct host_if_drv *)priv->hif_drv;
- PRINT_D(INIT_DBG, "Setting Multicast List with count = %d.\n",
- dev->mc.count);
-
- if (dev->flags & IFF_PROMISC) {
- PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets\n");
+ if (dev->flags & IFF_PROMISC)
return;
- }
if ((dev->flags & IFF_ALLMULTI) ||
(dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) {
- PRINT_D(INIT_DBG, "Disable multicast filter, retrive all multicast packets\n");
wilc_setup_multicast_filter(vif, false, 0);
return;
}
if ((dev->mc.count) == 0) {
- PRINT_D(INIT_DBG, "Enable multicast filter, retrive directed packets only.\n");
wilc_setup_multicast_filter(vif, true, 0);
return;
}
netdev_for_each_mc_addr(ha, dev) {
memcpy(wilc_multicast_mac_addr_list[i], ha->addr, ETH_ALEN);
- PRINT_D(INIT_DBG, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
- wilc_multicast_mac_addr_list[i][0],
- wilc_multicast_mac_addr_list[i][1],
- wilc_multicast_mac_addr_list[i][2],
- wilc_multicast_mac_addr_list[i][3],
- wilc_multicast_mac_addr_list[i][4],
- wilc_multicast_mac_addr_list[i][5]);
+ netdev_dbg(dev, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
+ wilc_multicast_mac_addr_list[i][0],
+ wilc_multicast_mac_addr_list[i][1],
+ wilc_multicast_mac_addr_list[i][2],
+ wilc_multicast_mac_addr_list[i][3],
+ wilc_multicast_mac_addr_list[i][4],
+ wilc_multicast_mac_addr_list[i][5]);
i++;
}
wilc_setup_multicast_filter(vif, true, (dev->mc.count));
-
- return;
}
static void linux_wlan_tx_complete(void *priv, int status)
{
- struct tx_complete_data *pv_data = (struct tx_complete_data *)priv;
+ struct tx_complete_data *pv_data = priv;
- if (status == 1)
- PRINT_D(TX_DBG, "Packet sent successfully - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
- else
- PRINT_D(TX_DBG, "Couldn't send packet - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
dev_kfree_skb(pv_data->skb);
kfree(pv_data);
}
@@ -1148,16 +1050,13 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
vif = netdev_priv(ndev);
wilc = vif->wilc;
- PRINT_D(TX_DBG, "Sending packet just received from TCP/IP\n");
-
if (skb->dev != ndev) {
- PRINT_ER("Packet not destined to this device\n");
+ netdev_err(ndev, "Packet not destined to this device\n");
return 0;
}
tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
if (!tx_data) {
- PRINT_ER("Failed to allocate memory for tx_data structure\n");
dev_kfree_skb(skb);
netif_wake_queue(ndev);
return 0;
@@ -1169,21 +1068,19 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
eth_h = (struct ethhdr *)(skb->data);
if (eth_h->h_proto == 0x8e88)
- PRINT_D(INIT_DBG, "EAPOL transmitted\n");
+ netdev_dbg(ndev, "EAPOL transmitted\n");
ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
udp_buf = (char *)ih + sizeof(struct iphdr);
if ((udp_buf[1] == 68 && udp_buf[3] == 67) ||
(udp_buf[1] == 67 && udp_buf[3] == 68))
- PRINT_D(GENERIC_DBG, "DHCP Message transmitted, type:%x %x %x\n",
- udp_buf[248], udp_buf[249], udp_buf[250]);
+ netdev_dbg(ndev, "DHCP Message transmitted, type:%x %x %x\n",
+ udp_buf[248], udp_buf[249], udp_buf[250]);
- PRINT_D(TX_DBG, "Sending packet - Size = %d - Address = %p - SKB = %p\n", tx_data->size, tx_data->buff, tx_data->skb);
- PRINT_D(TX_DBG, "Adding tx packet to TX Queue\n");
vif->netstats.tx_packets++;
vif->netstats.tx_bytes += tx_data->size;
- tx_data->pBssid = wilc->vif[vif->u8IfIdx]->bssid;
+ tx_data->bssid = wilc->vif[vif->idx]->bssid;
queue_count = wilc_wlan_txq_add_net_pkt(ndev, (void *)tx_data,
tx_data->buff, tx_data->size,
linux_wlan_tx_complete);
@@ -1206,39 +1103,29 @@ int wilc_mac_close(struct net_device *ndev)
vif = netdev_priv(ndev);
if (!vif || !vif->ndev || !vif->ndev->ieee80211_ptr ||
- !vif->ndev->ieee80211_ptr->wiphy) {
- PRINT_ER("vif = NULL\n");
+ !vif->ndev->ieee80211_ptr->wiphy)
return 0;
- }
priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
wl = vif->wilc;
- if (!priv) {
- PRINT_ER("priv = NULL\n");
+ if (!priv)
return 0;
- }
- hif_drv = (struct host_if_drv *)priv->hWILCWFIDrv;
+ hif_drv = (struct host_if_drv *)priv->hif_drv;
- PRINT_D(GENERIC_DBG, "Mac close\n");
+ netdev_dbg(ndev, "Mac close\n");
- if (!wl) {
- PRINT_ER("wl = NULL\n");
+ if (!wl)
return 0;
- }
- if (!hif_drv) {
- PRINT_ER("hif_drv = NULL\n");
+ if (!hif_drv)
return 0;
- }
- if ((wl->open_ifcs) > 0) {
+ if ((wl->open_ifcs) > 0)
wl->open_ifcs--;
- } else {
- PRINT_ER("ERROR: MAC close called while number of opened interfaces is zero\n");
+ else
return 0;
- }
if (vif->ndev) {
netif_stop_queue(vif->ndev);
@@ -1247,7 +1134,7 @@ int wilc_mac_close(struct net_device *ndev)
}
if (wl->open_ifcs == 0) {
- PRINT_D(GENERIC_DBG, "Deinitializing wilc1000\n");
+ netdev_dbg(ndev, "Deinitializing wilc1000\n");
wl->close = 1;
wilc1000_wlan_deinit(ndev);
WILC_WFI_deinit_mon_interface();
@@ -1278,7 +1165,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
switch (cmd) {
case SIOCSIWPRIV:
{
- struct iwreq *wrq = (struct iwreq *) req;
+ struct iwreq *wrq = (struct iwreq *)req;
size = wrq->u.data.length;
@@ -1291,16 +1178,14 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
if (strncasecmp(buff, "RSSI", length) == 0) {
priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
ret = wilc_get_rssi(vif, &rssi);
- if (ret)
- PRINT_ER("Failed to send get rssi param's message queue ");
- PRINT_INFO(GENERIC_DBG, "RSSI :%d\n", rssi);
+ netdev_info(ndev, "RSSI :%d\n", rssi);
rssi += 5;
snprintf(buff, size, "rssi %d", rssi);
if (copy_to_user(wrq->u.data.pointer, buff, size)) {
- PRINT_ER("%s: failed to copy data to user buffer\n", __func__);
+ netdev_err(ndev, "failed to copy\n");
ret = -EFAULT;
goto done;
}
@@ -1311,7 +1196,7 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
default:
{
- PRINT_INFO(GENERIC_DBG, "Command - %d - has been received\n", cmd);
+ netdev_info(ndev, "Command - %d - has been received\n", cmd);
ret = -EOPNOTSUPP;
goto done;
}
@@ -1333,6 +1218,9 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset)
struct net_device *wilc_netdev;
struct wilc_vif *vif;
+ if (!wilc)
+ return;
+
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
return;
@@ -1345,18 +1233,11 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset)
buff_to_send = buff;
skb = dev_alloc_skb(frame_len);
- if (!skb) {
- PRINT_ER("Low memory - packet droped\n");
+ if (!skb)
return;
- }
- if (!wilc || !wilc_netdev)
- PRINT_ER("wilc_netdev in wilc is NULL");
skb->dev = wilc_netdev;
- if (!skb->dev)
- PRINT_ER("skb->dev is NULL\n");
-
memcpy(skb_put(skb, frame_len), buff_to_send, frame_len);
skb->protocol = eth_type_trans(skb, wilc_netdev);
@@ -1364,7 +1245,7 @@ void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset)
vif->netstats.rx_bytes += frame_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
stats = netif_rx(skb);
- PRINT_D(RX_DBG, "netif_rx ret value is: %d\n", stats);
+ netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
}
@@ -1403,7 +1284,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
release_firmware(wilc->firmware);
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
- wilc_lock_timeout(wilc, &close_exit_sync, 12 * 1000);
+ wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
for (i = 0; i < NUM_CONCURRENT_IFC; i++)
if (wilc->vif[i]->ndev)
@@ -1424,7 +1305,7 @@ EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
int gpio, const struct wilc_hif_func *ops)
{
- int i;
+ int i, ret;
struct wilc_vif *vif;
struct net_device *ndev;
struct wilc *wl;
@@ -1444,10 +1325,8 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
ndev = alloc_etherdev(sizeof(struct wilc_vif));
- if (!ndev) {
- PRINT_ER("Failed to allocate ethernet dev\n");
- return -1;
- }
+ if (!ndev)
+ return -ENOMEM;
vif = netdev_priv(ndev);
memset(vif, 0, sizeof(struct wilc_vif));
@@ -1457,7 +1336,7 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
else
strcpy(ndev->name, "p2p%d");
- vif->u8IfIdx = wl->vif_num;
+ vif->idx = wl->vif_num;
vif->wilc = *wilc;
wl->vif[i] = vif;
wl->vif[wl->vif_num]->ndev = ndev;
@@ -1466,13 +1345,14 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
{
struct wireless_dev *wdev;
+
wdev = wilc_create_wiphy(ndev, dev);
if (dev)
SET_NETDEV_DEV(ndev, dev);
if (!wdev) {
- PRINT_ER("Can't register WILC Wiphy\n");
+ netdev_err(ndev, "Can't register WILC Wiphy\n");
return -1;
}
@@ -1485,11 +1365,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
vif->netstats.tx_bytes = 0;
}
- if (register_netdev(ndev)) {
- PRINT_ER("Device couldn't be registered - %s\n",
- ndev->name);
- return -1;
- }
+ ret = register_netdev(ndev);
+ if (ret)
+ return ret;
vif->iftype = STATION_MODE;
vif->mac_opened = 0;
diff --git a/drivers/staging/wilc1000/linux_wlan_common.h b/drivers/staging/wilc1000/linux_wlan_common.h
deleted file mode 100644
index 5d40f05124c1..000000000000
--- a/drivers/staging/wilc1000/linux_wlan_common.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef LINUX_WLAN_COMMON_H
-#define LINUX_WLAN_COMMON_H
-
-enum debug_region {
- Generic_debug = 0,
- Hostapd_debug,
- Hostinf_debug,
- CFG80211_debug,
- Coreconfig_debug,
- Interrupt_debug,
- TX_debug,
- RX_debug,
- Lock_debug,
- Tcp_enhance,
- Spin_debug,
-
- Init_debug,
- Bus_debug,
- Mem_debug,
- Firmware_debug,
- COMP = 0xFFFFFFFF,
-};
-
-#define GENERIC_DBG (1 << Generic_debug)
-#define HOSTAPD_DBG (1 << Hostapd_debug)
-#define HOSTINF_DBG (1 << Hostinf_debug)
-#define CORECONFIG_DBG (1 << Coreconfig_debug)
-#define CFG80211_DBG (1 << CFG80211_debug)
-#define INT_DBG (1 << Interrupt_debug)
-#define TX_DBG (1 << TX_debug)
-#define RX_DBG (1 << RX_debug)
-#define LOCK_DBG (1 << Lock_debug)
-#define TCP_ENH (1 << Tcp_enhance)
-#define SPIN_DEBUG (1 << Spin_debug)
-#define INIT_DBG (1 << Init_debug)
-#define BUS_DBG (1 << Bus_debug)
-#define MEM_DBG (1 << Mem_debug)
-#define FIRM_DBG (1 << Firmware_debug)
-
-#if defined (WILC_DEBUGFS)
-extern atomic_t WILC_REGION;
-extern atomic_t WILC_DEBUG_LEVEL;
-
-#define DEBUG BIT(0)
-#define INFO BIT(1)
-#define WRN BIT(2)
-#define ERR BIT(3)
-
-#define PRINT_D(region, ...) \
- do { \
- if ((atomic_read(&WILC_DEBUG_LEVEL) & DEBUG) && \
- ((atomic_read(&WILC_REGION)) & (region))) { \
- printk("DBG [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_INFO(region, ...) \
- do { \
- if ((atomic_read(&WILC_DEBUG_LEVEL) & INFO) && \
- ((atomic_read(&WILC_REGION)) & (region))) { \
- printk("INFO [%s]", __func__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_WRN(region, ...) \
- do { \
- if ((atomic_read(&WILC_DEBUG_LEVEL) & WRN) && \
- ((atomic_read(&WILC_REGION)) & (region))) { \
- printk("WRN [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_ER(...) \
- do { \
- if ((atomic_read(&WILC_DEBUG_LEVEL) & ERR)) { \
- printk("ERR [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#else
-
-#define REGION (INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG)
-
-#define DEBUG 1
-#define INFO 0
-#define WRN 0
-
-#define PRINT_D(region, ...) \
- do { \
- if (DEBUG == 1 && ((REGION)&(region))) { \
- printk("DBG [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_INFO(region, ...) \
- do { \
- if (INFO == 1 && ((REGION)&(region))) { \
- printk("INFO [%s]", __func__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_WRN(region, ...) \
- do { \
- if (WRN == 1 && ((REGION)&(region))) { \
- printk("WRN [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } \
- } while (0)
-
-#define PRINT_ER(...) \
- do { \
- printk("ERR [%s: %d]", __func__, __LINE__); \
- printk(__VA_ARGS__); \
- } while (0)
-
-#endif
-
-#define FN_IN /* PRINT_D(">>> \n") */
-#define FN_OUT /* PRINT_D("<<<\n") */
-
-#define LINUX_RX_SIZE (96 * 1024)
-#define LINUX_TX_SIZE (64 * 1024)
-
-
-#define WILC_MULTICAST_TABLE_SIZE 8
-
-#if defined (BEAGLE_BOARD)
- #define SPI_CHANNEL 4
-
- #if SPI_CHANNEL == 4
- #define MODALIAS "wilc_spi4"
- #define GPIO_NUM 162
- #else
- #define MODALIAS "wilc_spi3"
- #define GPIO_NUM 133
- #endif
-#elif defined(PLAT_WMS8304) /* rachel */
- #define MODALIAS "wilc_spi"
- #define GPIO_NUM 139
-#elif defined (PLAT_RKXXXX)
- #define MODALIAS "WILC_IRQ"
- #define GPIO_NUM RK30_PIN3_PD2 /* RK30_PIN3_PA1 */
-/* RK30_PIN3_PD2 */
-/* RK2928_PIN1_PA7 */
-
-#elif defined(CUSTOMER_PLATFORM)
-/*
- TODO : specify MODALIAS name and GPIO number. This is certainly necessary for SPI interface.
- *
- * ex)
- * #define MODALIAS "WILC_SPI"
- * #define GPIO_NUM 139
- */
-
-#else
-/* base on SAMA5D3_Xplained Board */
- #define MODALIAS "WILC_SPI"
- #define GPIO_NUM 0x44
-#endif
-#endif
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 27c653a0cdf9..fcbc95d19d8e 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -23,11 +23,12 @@ static struct dentry *wilc_dir;
/*
* --------------------------------------------------------------------------------
*/
+#define DEBUG BIT(0)
+#define INFO BIT(1)
+#define WRN BIT(2)
+#define ERR BIT(3)
-#define DBG_REGION_ALL (GENERIC_DBG | HOSTAPD_DBG | HOSTINF_DBG | CORECONFIG_DBG | CFG80211_DBG | INT_DBG | TX_DBG | RX_DBG | LOCK_DBG | INIT_DBG | BUS_DBG | MEM_DBG)
#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
-atomic_t WILC_REGION = ATOMIC_INIT(INIT_DBG | GENERIC_DBG | CFG80211_DBG | FIRM_DBG | HOSTAPD_DBG);
-EXPORT_SYMBOL_GPL(WILC_REGION);
atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
@@ -68,48 +69,9 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
if (flag == 0)
- printk("Debug-level disabled\n");
+ printk(KERN_INFO "Debug-level disabled\n");
else
- printk("Debug-level enabled\n");
-
- return count;
-}
-
-static ssize_t wilc_debug_region_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos)
-{
- char buf[128];
- int res = 0;
-
- /* only allow read from start */
- if (*ppos > 0)
- return 0;
-
- res = scnprintf(buf, sizeof(buf), "Debug region: %x\n", atomic_read(&WILC_REGION));
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-
-static ssize_t wilc_debug_region_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
-{
- char buffer[128] = {};
- int flag;
-
- if (count > sizeof(buffer))
- return -EINVAL;
-
- if (copy_from_user(buffer, buf, count)) {
- return -EFAULT;
- }
-
- flag = buffer[0] - '0';
-
- if (flag > DBG_REGION_ALL) {
- printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_REGION));
- return -EFAULT;
- }
-
- atomic_set(&WILC_REGION, (int)flag);
- printk("new debug-region is %x\n", atomic_read(&WILC_REGION));
+ printk(KERN_INFO "Debug-level enabled\n");
return count;
}
@@ -130,12 +92,11 @@ struct wilc_debugfs_info_t {
const char *name;
int perm;
unsigned int data;
- struct file_operations fops;
+ const struct file_operations fops;
};
static struct wilc_debugfs_info_t debugfs_info[] = {
{ "wilc_debug_level", 0666, (DEBUG | ERR), FOPS(NULL, wilc_debug_level_read, wilc_debug_level_write, NULL), },
- { "wilc_debug_region", 0666, (INIT_DBG | GENERIC_DBG | CFG80211_DBG), FOPS(NULL, wilc_debug_region_read, wilc_debug_region_write, NULL), },
};
static int __init wilc_debugfs_init(void)
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
index 098390cdf319..6cb894e58f6d 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.c
+++ b/drivers/staging/wilc1000/wilc_msgqueue.c
@@ -1,7 +1,6 @@
#include "wilc_msgqueue.h"
#include <linux/spinlock.h>
-#include "linux_wlan_common.h"
#include <linux/errno.h>
#include <linux/slab.h>
@@ -11,13 +10,13 @@
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-int wilc_mq_create(WILC_MsgQueueHandle *pHandle)
+int wilc_mq_create(struct message_queue *mq)
{
- spin_lock_init(&pHandle->strCriticalSection);
- sema_init(&pHandle->hSem, 0);
- pHandle->pstrMessageList = NULL;
- pHandle->u32ReceiversCount = 0;
- pHandle->bExiting = false;
+ spin_lock_init(&mq->lock);
+ sema_init(&mq->sem, 0);
+ INIT_LIST_HEAD(&mq->msg_list);
+ mq->recv_count = 0;
+ mq->exiting = false;
return 0;
}
@@ -27,21 +26,22 @@ int wilc_mq_create(WILC_MsgQueueHandle *pHandle)
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle)
+int wilc_mq_destroy(struct message_queue *mq)
{
- pHandle->bExiting = true;
+ struct message *msg;
+
+ mq->exiting = true;
/* Release any waiting receiver thread. */
- while (pHandle->u32ReceiversCount > 0) {
- up(&pHandle->hSem);
- pHandle->u32ReceiversCount--;
+ while (mq->recv_count > 0) {
+ up(&mq->sem);
+ mq->recv_count--;
}
- while (pHandle->pstrMessageList) {
- Message *pstrMessge = pHandle->pstrMessageList->pstrNext;
-
- kfree(pHandle->pstrMessageList);
- pHandle->pstrMessageList = pstrMessge;
+ while (!list_empty(&mq->msg_list)) {
+ msg = list_first_entry(&mq->msg_list, struct message, list);
+ list_del(&msg->list);
+ kfree(msg->buf);
}
return 0;
@@ -53,53 +53,39 @@ int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle)
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-int wilc_mq_send(WILC_MsgQueueHandle *pHandle,
- const void *pvSendBuffer, u32 u32SendBufferSize)
+int wilc_mq_send(struct message_queue *mq,
+ const void *send_buf, u32 send_buf_size)
{
unsigned long flags;
- Message *pstrMessage = NULL;
+ struct message *new_msg = NULL;
- if ((!pHandle) || (u32SendBufferSize == 0) || (!pvSendBuffer)) {
- PRINT_ER("pHandle or pvSendBuffer is null\n");
- return -EFAULT;
- }
+ if (!mq || (send_buf_size == 0) || !send_buf)
+ return -EINVAL;
- if (pHandle->bExiting) {
- PRINT_ER("pHandle fail\n");
+ if (mq->exiting)
return -EFAULT;
- }
/* construct a new message */
- pstrMessage = kmalloc(sizeof(Message), GFP_ATOMIC);
- if (!pstrMessage)
+ new_msg = kmalloc(sizeof(*new_msg), GFP_ATOMIC);
+ if (!new_msg)
return -ENOMEM;
- pstrMessage->u32Length = u32SendBufferSize;
- pstrMessage->pstrNext = NULL;
- pstrMessage->pvBuffer = kmemdup(pvSendBuffer, u32SendBufferSize,
- GFP_ATOMIC);
- if (!pstrMessage->pvBuffer) {
- kfree(pstrMessage);
+ new_msg->len = send_buf_size;
+ INIT_LIST_HEAD(&new_msg->list);
+ new_msg->buf = kmemdup(send_buf, send_buf_size, GFP_ATOMIC);
+ if (!new_msg->buf) {
+ kfree(new_msg);
return -ENOMEM;
}
- spin_lock_irqsave(&pHandle->strCriticalSection, flags);
+ spin_lock_irqsave(&mq->lock, flags);
/* add it to the message queue */
- if (!pHandle->pstrMessageList) {
- pHandle->pstrMessageList = pstrMessage;
- } else {
- Message *pstrTailMsg = pHandle->pstrMessageList;
+ list_add_tail(&new_msg->list, &mq->msg_list);
- while (pstrTailMsg->pstrNext)
- pstrTailMsg = pstrTailMsg->pstrNext;
-
- pstrTailMsg->pstrNext = pstrMessage;
- }
+ spin_unlock_irqrestore(&mq->lock, flags);
- spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
-
- up(&pHandle->hSem);
+ up(&mq->sem);
return 0;
}
@@ -110,62 +96,49 @@ int wilc_mq_send(WILC_MsgQueueHandle *pHandle,
* @note copied from FLO glue implementatuion
* @version 1.0
*/
-int wilc_mq_recv(WILC_MsgQueueHandle *pHandle,
- void *pvRecvBuffer, u32 u32RecvBufferSize,
- u32 *pu32ReceivedLength)
+int wilc_mq_recv(struct message_queue *mq,
+ void *recv_buf, u32 recv_buf_size, u32 *recv_len)
{
- Message *pstrMessage;
+ struct message *msg;
unsigned long flags;
- if ((!pHandle) || (u32RecvBufferSize == 0)
- || (!pvRecvBuffer) || (!pu32ReceivedLength)) {
- PRINT_ER("pHandle or pvRecvBuffer is null\n");
+ if (!mq || (recv_buf_size == 0) || !recv_buf || !recv_len)
return -EINVAL;
- }
- if (pHandle->bExiting) {
- PRINT_ER("pHandle fail\n");
+ if (mq->exiting)
return -EFAULT;
- }
-
- spin_lock_irqsave(&pHandle->strCriticalSection, flags);
- pHandle->u32ReceiversCount++;
- spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
- down(&pHandle->hSem);
-
- if (pHandle->bExiting) {
- PRINT_ER("pHandle fail\n");
- return -EFAULT;
- }
+ spin_lock_irqsave(&mq->lock, flags);
+ mq->recv_count++;
+ spin_unlock_irqrestore(&mq->lock, flags);
- spin_lock_irqsave(&pHandle->strCriticalSection, flags);
+ down(&mq->sem);
+ spin_lock_irqsave(&mq->lock, flags);
- pstrMessage = pHandle->pstrMessageList;
- if (!pstrMessage) {
- spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
- PRINT_ER("pstrMessage is null\n");
+ if (list_empty(&mq->msg_list)) {
+ spin_unlock_irqrestore(&mq->lock, flags);
+ up(&mq->sem);
return -EFAULT;
}
/* check buffer size */
- if (u32RecvBufferSize < pstrMessage->u32Length) {
- spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
- up(&pHandle->hSem);
- PRINT_ER("u32RecvBufferSize overflow\n");
+ msg = list_first_entry(&mq->msg_list, struct message, list);
+ if (recv_buf_size < msg->len) {
+ spin_unlock_irqrestore(&mq->lock, flags);
+ up(&mq->sem);
return -EOVERFLOW;
}
/* consume the message */
- pHandle->u32ReceiversCount--;
- memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
- *pu32ReceivedLength = pstrMessage->u32Length;
+ mq->recv_count--;
+ memcpy(recv_buf, msg->buf, msg->len);
+ *recv_len = msg->len;
- pHandle->pstrMessageList = pstrMessage->pstrNext;
+ list_del(&msg->list);
- kfree(pstrMessage->pvBuffer);
- kfree(pstrMessage);
+ kfree(msg->buf);
+ kfree(msg);
- spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
return 0;
}
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
index d7e0328bacee..846a4840e6e7 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.h
+++ b/drivers/staging/wilc1000/wilc_msgqueue.h
@@ -1,94 +1,28 @@
#ifndef __WILC_MSG_QUEUE_H__
#define __WILC_MSG_QUEUE_H__
-/*!
- * @file wilc_msgqueue.h
- * @brief Message Queue OS wrapper functionality
- * @author syounan
- * @sa wilc_oswrapper.h top level OS wrapper file
- * @date 30 Aug 2010
- * @version 1.0
- */
-
#include <linux/semaphore.h>
-
-/* Message Queue type is a structure */
-typedef struct __Message_struct {
- void *pvBuffer;
- u32 u32Length;
- struct __Message_struct *pstrNext;
-} Message;
-
-typedef struct __MessageQueue_struct {
- struct semaphore hSem;
- spinlock_t strCriticalSection;
- bool bExiting;
- u32 u32ReceiversCount;
- Message *pstrMessageList;
-} WILC_MsgQueueHandle;
-
-/*!
- * @brief Creates a new Message queue
- * @details Creates a new Message queue, if the feature
- * CONFIG_WILC_MSG_QUEUE_IPC_NAME is enabled and pstrAttrs->pcName
- * is not Null, then this message queue can be used for IPC with
- * any other message queue having the same name in the system
- * @param[in,out] pHandle handle to the message queue object
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating success/failure
- * @author syounan
- * @date 30 Aug 2010
- * @version 1.0
- */
-int wilc_mq_create(WILC_MsgQueueHandle *pHandle);
-
-/*!
- * @brief Sends a message
- * @details Sends a message, this API will block until the message is
- * actually sent or until it is timedout (as long as the feature
- * CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout
- * is not set to WILC_OS_INFINITY), zero timeout is a valid value
- * @param[in] pHandle handle to the message queue object
- * @param[in] pvSendBuffer pointer to the data to send
- * @param[in] u32SendBufferSize the size of the data to send
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating success/failure
- * @author syounan
- * @date 30 Aug 2010
- * @version 1.0
- */
-int wilc_mq_send(WILC_MsgQueueHandle *pHandle,
- const void *pvSendBuffer, u32 u32SendBufferSize);
-
-/*!
- * @brief Receives a message
- * @details Receives a message, this API will block until a message is
- * received or until it is timedout (as long as the feature
- * CONFIG_WILC_MSG_QUEUE_TIMEOUT is enabled and pstrAttrs->u32Timeout
- * is not set to WILC_OS_INFINITY), zero timeout is a valid value
- * @param[in] pHandle handle to the message queue object
- * @param[out] pvRecvBuffer pointer to a buffer to fill with the received message
- * @param[in] u32RecvBufferSize the size of the receive buffer
- * @param[out] pu32ReceivedLength the length of received data
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating success/failure
- * @author syounan
- * @date 30 Aug 2010
- * @version 1.0
- */
-int wilc_mq_recv(WILC_MsgQueueHandle *pHandle,
- void *pvRecvBuffer, u32 u32RecvBufferSize,
- u32 *pu32ReceivedLength);
-
-/*!
- * @brief Destroys an existing Message queue
- * @param[in] pHandle handle to the message queue object
- * @param[in] pstrAttrs Optional attributes, NULL for default
- * @return Error code indicating success/failure
- * @author syounan
- * @date 30 Aug 2010
- * @version 1.0
- */
-int wilc_mq_destroy(WILC_MsgQueueHandle *pHandle);
+#include <linux/list.h>
+
+struct message {
+ void *buf;
+ u32 len;
+ struct list_head list;
+};
+
+struct message_queue {
+ struct semaphore sem;
+ spinlock_t lock;
+ bool exiting;
+ u32 recv_count;
+ struct list_head msg_list;
+};
+
+int wilc_mq_create(struct message_queue *mq);
+int wilc_mq_send(struct message_queue *mq,
+ const void *send_buf, u32 send_buf_size);
+int wilc_mq_recv(struct message_queue *mq,
+ void *recv_buf, u32 recv_buf_size, u32 *recv_len);
+int wilc_mq_destroy(struct message_queue *mq);
#endif
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index e961b5004902..a839a7967dd8 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -30,18 +30,19 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
#define WILC_SDIO_BLOCK_SIZE 512
-typedef struct {
+struct wilc_sdio {
bool irq_gpio;
u32 block_size;
int nint;
#define MAX_NUN_INT_THRPT_ENH2 (5) /* Max num interrupts allowed in registers 0xf7, 0xf8 */
int has_thrpt_enh3;
-} wilc_sdio_t;
+};
-static wilc_sdio_t g_sdio;
+static struct wilc_sdio g_sdio;
static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
+static int sdio_init(struct wilc *wilc, bool resume);
static void wilc_sdio_interrupt(struct sdio_func *func)
{
@@ -50,7 +51,7 @@ static void wilc_sdio_interrupt(struct sdio_func *func)
sdio_claim_host(func);
}
-static int wilc_sdio_cmd52(struct wilc *wilc, sdio_cmd52_t *cmd)
+static int wilc_sdio_cmd52(struct wilc *wilc, struct sdio_cmd52 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int ret;
@@ -80,7 +81,7 @@ static int wilc_sdio_cmd52(struct wilc *wilc, sdio_cmd52_t *cmd)
}
-static int wilc_sdio_cmd53(struct wilc *wilc, sdio_cmd53_t *cmd)
+static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
@@ -142,11 +143,82 @@ static void linux_sdio_remove(struct sdio_func *func)
wilc_netdev_cleanup(sdio_get_drvdata(func));
}
+static int sdio_reset(struct wilc *wilc)
+{
+ struct sdio_cmd52 cmd;
+ int ret;
+ struct sdio_func *func = dev_to_sdio_func(wilc->dev);
+
+ cmd.read_write = 1;
+ cmd.function = 0;
+ cmd.raw = 0;
+ cmd.address = 0x6;
+ cmd.data = 0x8;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int wilc_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wilc *wilc = sdio_get_drvdata(func);
+ int ret;
+
+ dev_info(dev, "sdio suspend\n");
+ chip_wakeup(wilc);
+
+ if (!wilc->suspend_event) {
+ wilc_chip_sleep_manually(wilc);
+ } else {
+ host_sleep_notify(wilc);
+ chip_allow_sleep(wilc);
+ }
+
+ ret = sdio_reset(wilc);
+ if (ret) {
+ dev_err(&func->dev, "Fail reset sdio\n");
+ return ret;
+ }
+ sdio_claim_host(func);
+
+ return 0;
+}
+
+static int wilc_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wilc *wilc = sdio_get_drvdata(func);
+
+ dev_info(dev, "sdio resume\n");
+ sdio_release_host(func);
+ chip_wakeup(wilc);
+ sdio_init(wilc, true);
+
+ if (wilc->suspend_event)
+ host_wakeup_notify(wilc);
+
+ chip_allow_sleep(wilc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops wilc_sdio_pm_ops = {
+ .suspend = wilc_sdio_suspend,
+ .resume = wilc_sdio_resume,
+};
+
static struct sdio_driver wilc1000_sdio_driver = {
.name = SDIO_MODALIAS,
.id_table = wilc_sdio_ids,
.probe = linux_sdio_probe,
.remove = linux_sdio_remove,
+ .drv = {
+ .pm = &wilc_sdio_pm_ops,
+ }
};
module_driver(wilc1000_sdio_driver,
sdio_register_driver,
@@ -185,11 +257,6 @@ static void wilc_sdio_disable_interrupt(struct wilc *dev)
dev_info(&func->dev, "wilc_sdio_disable_interrupt OUT\n");
}
-static int wilc_sdio_init(void)
-{
- return 1;
-}
-
/********************************************
*
* Function 0
@@ -199,7 +266,7 @@ static int wilc_sdio_init(void)
static int sdio_set_func0_csa_address(struct wilc *wilc, u32 adr)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
int ret;
/**
@@ -240,7 +307,7 @@ _fail_:
static int sdio_set_func0_block_size(struct wilc *wilc, u32 block_size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
int ret;
cmd.read_write = 1;
@@ -276,7 +343,7 @@ _fail_:
static int sdio_set_func1_block_size(struct wilc *wilc, u32 block_size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
int ret;
cmd.read_write = 1;
@@ -315,7 +382,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
data = cpu_to_le32(data);
if ((addr >= 0xf0) && (addr <= 0xff)) {
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
cmd.read_write = 1;
cmd.function = 0;
@@ -329,7 +396,7 @@ static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
goto _fail_;
}
} else {
- sdio_cmd53_t cmd;
+ struct sdio_cmd53 cmd;
/**
* set the AHB address
@@ -364,7 +431,7 @@ static int sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
u32 block_size = g_sdio.block_size;
- sdio_cmd53_t cmd;
+ struct sdio_cmd53 cmd;
int nblk, nleft, ret;
cmd.read_write = 1;
@@ -455,7 +522,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
int ret;
if ((addr >= 0xf0) && (addr <= 0xff)) {
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
cmd.read_write = 0;
cmd.function = 0;
@@ -469,7 +536,7 @@ static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
}
*data = cmd.data;
} else {
- sdio_cmd53_t cmd;
+ struct sdio_cmd53 cmd;
if (!sdio_set_func0_csa_address(wilc, addr))
goto _fail_;
@@ -504,7 +571,7 @@ static int sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
u32 block_size = g_sdio.block_size;
- sdio_cmd53_t cmd;
+ struct sdio_cmd53 cmd;
int nblk, nleft, ret;
cmd.read_write = 0;
@@ -600,22 +667,16 @@ static int sdio_deinit(struct wilc *wilc)
return 1;
}
-static int sdio_init(struct wilc *wilc)
+static int sdio_init(struct wilc *wilc, bool resume)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
int loop, ret;
u32 chipid;
- memset(&g_sdio, 0, sizeof(wilc_sdio_t));
-
- g_sdio.irq_gpio = (wilc->dev_irq_num);
-
- if (!wilc_sdio_init()) {
- dev_err(&func->dev, "Failed io init bus...\n");
- return 0;
- } else {
- return 0;
+ if (!resume) {
+ memset(&g_sdio, 0, sizeof(struct wilc_sdio));
+ g_sdio.irq_gpio = wilc->dev_irq_num;
}
/**
@@ -706,16 +767,19 @@ static int sdio_init(struct wilc *wilc)
/**
* make sure can read back chip id correctly
**/
- if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
- dev_err(&func->dev, "Fail cmd read chip id...\n");
- goto _fail_;
+ if (!resume) {
+ if (!sdio_read_reg(wilc, 0x1000, &chipid)) {
+ dev_err(&func->dev, "Fail cmd read chip id...\n");
+ goto _fail_;
+ }
+ dev_err(&func->dev, "chipid (%08x)\n", chipid);
+ if ((chipid & 0xfff) > 0x2a0)
+ g_sdio.has_thrpt_enh3 = 1;
+ else
+ g_sdio.has_thrpt_enh3 = 0;
+ dev_info(&func->dev, "has_thrpt_enh3 = %d...\n",
+ g_sdio.has_thrpt_enh3);
}
- dev_err(&func->dev, "chipid (%08x)\n", chipid);
- if ((chipid & 0xfff) > 0x2a0)
- g_sdio.has_thrpt_enh3 = 1;
- else
- g_sdio.has_thrpt_enh3 = 0;
- dev_info(&func->dev, "has_thrpt_enh3 = %d...\n", g_sdio.has_thrpt_enh3);
return 1;
@@ -727,7 +791,7 @@ _fail_:
static int sdio_read_size(struct wilc *wilc, u32 *size)
{
u32 tmp;
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
/**
* Read DMA count in words
@@ -756,7 +820,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
u32 tmp;
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
sdio_read_size(wilc, &tmp);
@@ -835,7 +899,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
if ((val & EN_VMM) == EN_VMM)
reg |= BIT(7);
if (reg) {
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
cmd.read_write = 1;
cmd.function = 0;
@@ -865,7 +929,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
ret = 1;
for (i = 0; i < g_sdio.nint; i++) {
if (flags & 1) {
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
cmd.read_write = 1;
cmd.function = 0;
@@ -913,7 +977,7 @@ static int sdio_clear_int_ext(struct wilc *wilc, u32 val)
vmm_ctl |= BIT(2);
if (vmm_ctl) {
- sdio_cmd52_t cmd;
+ struct sdio_cmd52 cmd;
cmd.read_write = 1;
cmd.function = 0;
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index 86de50c9f7f5..d41b8b6790af 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -18,19 +18,18 @@
#include <linux/spi/spi.h>
#include <linux/of_gpio.h>
-#include "linux_wlan_common.h"
#include <linux/string.h>
#include "wilc_wlan_if.h"
#include "wilc_wlan.h"
#include "wilc_wfi_netdevice.h"
-typedef struct {
+struct wilc_spi {
int crc_off;
int nint;
int has_thrpt_enh;
-} wilc_spi_t;
+};
-static wilc_spi_t g_spi;
+static struct wilc_spi g_spi;
static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -120,8 +119,6 @@ static u8 crc7(u8 crc, const u8 *buffer, u32 len)
#define USE_SPI_DMA 0
-static const struct wilc1000_ops wilc1000_spi_ops;
-
static int wilc_bus_probe(struct spi_device *spi)
{
int ret, gpio;
@@ -153,7 +150,7 @@ static const struct of_device_id wilc1000_of_match[] = {
};
MODULE_DEVICE_TABLE(of, wilc1000_of_match);
-struct spi_driver wilc1000_spi_driver = {
+static struct spi_driver wilc1000_spi_driver = {
.driver = {
.name = MODALIAS,
.of_match_table = wilc1000_of_match,
@@ -382,9 +379,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
break;
}
- if (result != N_OK) {
+ if (result != N_OK)
return result;
- }
if (!g_spi.crc_off)
wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1;
@@ -421,9 +417,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
return result;
}
/* zero spi write buffers. */
- for (wix = len; wix < len2; wix++) {
+ for (wix = len; wix < len2; wix++)
wb[wix] = 0;
- }
rix = len;
if (wilc_spi_tx_rx(wilc, wb, rb, len2)) {
@@ -447,8 +442,9 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
/* } while(&rptr[1] <= &rb[len2]); */
if (rsp != cmd) {
- dev_err(&spi->dev, "Failed cmd response, cmd (%02x)"
- ", resp (%02x)\n", cmd, rsp);
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, rsp);
result = N_FAIL;
return result;
}
@@ -516,7 +512,7 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
crc[0] = rb[rix++];
crc[1] = rb[rix++];
} else {
- dev_err(&spi->dev,"buffer overrun when reading crc.\n");
+ dev_err(&spi->dev, "buffer overrun when reading crc.\n");
result = N_FAIL;
return result;
}
@@ -525,9 +521,8 @@ static int spi_cmd_complete(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz,
int ix;
/* some data may be read in response to dummy bytes. */
- for (ix = 0; (rix < len2) && (ix < sz); ) {
+ for (ix = 0; (rix < len2) && (ix < sz); )
b[ix++] = rb[rix++];
- }
sz -= ix;
@@ -682,7 +677,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
**/
if (!g_spi.crc_off) {
if (wilc_spi_tx(wilc, crc, 2)) {
- dev_err(&spi->dev,"Failed data block crc write, bus error...\n");
+ dev_err(&spi->dev, "Failed data block crc write, bus error...\n");
result = N_FAIL;
break;
}
@@ -713,9 +708,8 @@ static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat)
dat = cpu_to_le32(dat);
result = spi_cmd_complete(wilc, CMD_INTERNAL_WRITE, adr, (u8 *)&dat, 4,
0);
- if (result != N_OK) {
+ if (result != N_OK)
dev_err(&spi->dev, "Failed internal write cmd...\n");
- }
return result;
}
@@ -758,9 +752,8 @@ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data)
}
result = spi_cmd_complete(wilc, cmd, addr, (u8 *)&data, 4, clockless);
- if (result != N_OK) {
+ if (result != N_OK)
dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr);
- }
return result;
}
@@ -788,9 +781,8 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
* Data
**/
result = spi_data_write(wilc, buf, size);
- if (result != N_OK) {
+ if (result != N_OK)
dev_err(&spi->dev, "Failed block data write...\n");
- }
return 1;
}
@@ -852,7 +844,7 @@ static int _wilc_spi_deinit(struct wilc *wilc)
return 1;
}
-static int wilc_spi_init(struct wilc *wilc)
+static int wilc_spi_init(struct wilc *wilc, bool resume)
{
struct spi_device *spi = to_spi_device(wilc->dev);
u32 reg;
@@ -869,7 +861,7 @@ static int wilc_spi_init(struct wilc *wilc)
return 1;
}
- memset(&g_spi, 0, sizeof(wilc_spi_t));
+ memset(&g_spi, 0, sizeof(struct wilc_spi));
/**
* configure protocol
@@ -1076,7 +1068,7 @@ static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val)
ret = wilc_spi_write_reg(wilc,
WILC_VMM_CORE_CTL, 1);
if (!ret) {
- dev_err(&spi->dev,"fail write reg vmm_core_ctl...\n");
+ dev_err(&spi->dev, "fail write reg vmm_core_ctl...\n");
goto _fail_;
}
}
@@ -1126,9 +1118,9 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
return 0;
}
- for (i = 0; (i < 5) && (nint > 0); i++, nint--) {
+ for (i = 0; (i < 5) && (nint > 0); i++, nint--)
reg |= (BIT((27 + i)));
- }
+
ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg);
if (!ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
@@ -1143,9 +1135,8 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
return 0;
}
- for (i = 0; (i < 3) && (nint > 0); i++, nint--) {
+ for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- }
ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
if (!ret) {
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 53fb2d4bb0bd..448a5c8c4514 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -74,6 +74,10 @@ static const struct ieee80211_txrx_stypes
}
};
+static const struct wiphy_wowlan_support wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY
+};
+
#define WILC_WFI_DWELL_PASSIVE 100
#define WILC_WFI_DWELL_ACTIVE 40
@@ -89,7 +93,7 @@ static const struct ieee80211_txrx_stypes
extern int wilc_mac_open(struct net_device *ndev);
extern int wilc_mac_close(struct net_device *ndev);
-static tstrNetworkInfo last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
+static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
static u32 last_scanned_cnt;
struct timer_list wilc_during_ip_timer;
static struct timer_list hAgingTimer;
@@ -153,7 +157,7 @@ static u8 wlan_channel = INVALID_CHANNEL;
static u8 curr_channel;
static u8 p2p_oui[] = {0x50, 0x6f, 0x9A, 0x09};
static u8 p2p_local_random = 0x01;
-static u8 p2p_recv_random = 0x00;
+static u8 p2p_recv_random;
static u8 p2p_vendor_spec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
static bool wilc_ie;
@@ -188,29 +192,29 @@ static void clear_shadow_scan(void)
if (op_ifcs == 0) {
del_timer_sync(&hAgingTimer);
- PRINT_INFO(CORECONFIG_DBG, "destroy aging timer\n");
for (i = 0; i < last_scanned_cnt; i++) {
- if (last_scanned_shadow[last_scanned_cnt].pu8IEs) {
- kfree(last_scanned_shadow[i].pu8IEs);
- last_scanned_shadow[last_scanned_cnt].pu8IEs = NULL;
+ if (last_scanned_shadow[last_scanned_cnt].ies) {
+ kfree(last_scanned_shadow[i].ies);
+ last_scanned_shadow[last_scanned_cnt].ies = NULL;
}
- wilc_free_join_params(last_scanned_shadow[i].pJoinParams);
- last_scanned_shadow[i].pJoinParams = NULL;
+ kfree(last_scanned_shadow[i].join_params);
+ last_scanned_shadow[i].join_params = NULL;
}
last_scanned_cnt = 0;
}
}
-static u32 get_rssi_avg(tstrNetworkInfo *network_info)
+static u32 get_rssi_avg(struct network_info *network_info)
{
u8 i;
int rssi_v = 0;
- u8 num_rssi = (network_info->strRssi.u8Full) ? NUM_RSSI : (network_info->strRssi.u8Index);
+ u8 num_rssi = (network_info->str_rssi.u8Full) ?
+ NUM_RSSI : (network_info->str_rssi.u8Index);
for (i = 0; i < num_rssi; i++)
- rssi_v += network_info->strRssi.as8RSSI[i];
+ rssi_v += network_info->str_rssi.as8RSSI[i];
rssi_v /= num_rssi;
return rssi_v;
@@ -224,28 +228,36 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan)
int i;
int rssi = 0;
- priv = (struct wilc_priv *)user_void;
+ priv = user_void;
wiphy = priv->dev->ieee80211_ptr->wiphy;
for (i = 0; i < last_scanned_cnt; i++) {
- tstrNetworkInfo *network_info;
+ struct network_info *network_info;
network_info = &last_scanned_shadow[i];
- if (!network_info->u8Found || all) {
+ if (!network_info->found || all) {
s32 freq;
struct ieee80211_channel *channel;
if (network_info) {
- freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
rssi = get_rssi_avg(network_info);
- if (memcmp("DIRECT-", network_info->au8ssid, 7) ||
+ if (memcmp("DIRECT-", network_info->ssid, 7) ||
direct_scan) {
- bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo,
- network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs,
- (size_t)network_info->u16IEsLen, (((s32)rssi) * 100), GFP_KERNEL);
+ bss = cfg80211_inform_bss(wiphy,
+ channel,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ network_info->bssid,
+ network_info->tsf_hi,
+ network_info->cap_info,
+ network_info->beacon_period,
+ (const u8 *)network_info->ies,
+ (size_t)network_info->ies_len,
+ (s32)rssi * 100,
+ GFP_KERNEL);
cfg80211_put_bss(wiphy, bss);
}
}
@@ -258,7 +270,7 @@ static void reset_shadow_found(void)
int i;
for (i = 0; i < last_scanned_cnt; i++)
- last_scanned_shadow[i].u8Found = 0;
+ last_scanned_shadow[i].found = 0;
}
static void update_scan_time(void)
@@ -266,7 +278,7 @@ static void update_scan_time(void)
int i;
for (i = 0; i < last_scanned_cnt; i++)
- last_scanned_shadow[i].u32TimeRcvdInScan = jiffies;
+ last_scanned_shadow[i].time_scan = jiffies;
}
static void remove_network_from_shadow(unsigned long arg)
@@ -276,13 +288,12 @@ static void remove_network_from_shadow(unsigned long arg)
for (i = 0; i < last_scanned_cnt; i++) {
- if (time_after(now, last_scanned_shadow[i].u32TimeRcvdInScan + (unsigned long)(SCAN_RESULT_EXPIRE))) {
- PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s\n", last_scanned_shadow[i].au8ssid);
-
- kfree(last_scanned_shadow[i].pu8IEs);
- last_scanned_shadow[i].pu8IEs = NULL;
+ if (time_after(now, last_scanned_shadow[i].time_scan +
+ (unsigned long)(SCAN_RESULT_EXPIRE))) {
+ kfree(last_scanned_shadow[i].ies);
+ last_scanned_shadow[i].ies = NULL;
- wilc_free_join_params(last_scanned_shadow[i].pJoinParams);
+ kfree(last_scanned_shadow[i].join_params);
for (j = i; (j < last_scanned_cnt - 1); j++)
last_scanned_shadow[j] = last_scanned_shadow[j + 1];
@@ -291,37 +302,31 @@ static void remove_network_from_shadow(unsigned long arg)
}
}
- PRINT_D(CFG80211_DBG, "Number of cached networks: %d\n",
- last_scanned_cnt);
if (last_scanned_cnt != 0) {
hAgingTimer.data = arg;
mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
- } else {
- PRINT_D(CFG80211_DBG, "No need to restart Aging timer\n");
}
}
static void clear_duringIP(unsigned long arg)
{
- PRINT_D(GENERIC_DBG, "GO:IP Obtained , enable scan\n");
wilc_optaining_ip = false;
}
-static int is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo,
+static int is_network_in_shadow(struct network_info *pstrNetworkInfo,
void *user_void)
{
int state = -1;
int i;
if (last_scanned_cnt == 0) {
- PRINT_D(CFG80211_DBG, "Starting Aging timer\n");
hAgingTimer.data = (unsigned long)user_void;
mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
state = -1;
} else {
for (i = 0; i < last_scanned_cnt; i++) {
- if (memcmp(last_scanned_shadow[i].au8bssid,
- pstrNetworkInfo->au8bssid, 6) == 0) {
+ if (memcmp(last_scanned_shadow[i].bssid,
+ pstrNetworkInfo->bssid, 6) == 0) {
state = i;
break;
}
@@ -330,58 +335,57 @@ static int is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo,
return state;
}
-static void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo,
+static void add_network_to_shadow(struct network_info *pstrNetworkInfo,
void *user_void, void *pJoinParams)
{
int ap_found = is_network_in_shadow(pstrNetworkInfo, user_void);
u32 ap_index = 0;
u8 rssi_index = 0;
- if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW) {
- PRINT_D(CFG80211_DBG, "Shadow network reached its maximum limit\n");
+ if (last_scanned_cnt >= MAX_NUM_SCANNED_NETWORKS_SHADOW)
return;
- }
+
if (ap_found == -1) {
ap_index = last_scanned_cnt;
last_scanned_cnt++;
} else {
ap_index = ap_found;
}
- rssi_index = last_scanned_shadow[ap_index].strRssi.u8Index;
- last_scanned_shadow[ap_index].strRssi.as8RSSI[rssi_index++] = pstrNetworkInfo->s8rssi;
+ rssi_index = last_scanned_shadow[ap_index].str_rssi.u8Index;
+ last_scanned_shadow[ap_index].str_rssi.as8RSSI[rssi_index++] = pstrNetworkInfo->rssi;
if (rssi_index == NUM_RSSI) {
rssi_index = 0;
- last_scanned_shadow[ap_index].strRssi.u8Full = 1;
- }
- last_scanned_shadow[ap_index].strRssi.u8Index = rssi_index;
- last_scanned_shadow[ap_index].s8rssi = pstrNetworkInfo->s8rssi;
- last_scanned_shadow[ap_index].u16CapInfo = pstrNetworkInfo->u16CapInfo;
- last_scanned_shadow[ap_index].u8SsidLen = pstrNetworkInfo->u8SsidLen;
- memcpy(last_scanned_shadow[ap_index].au8ssid,
- pstrNetworkInfo->au8ssid, pstrNetworkInfo->u8SsidLen);
- memcpy(last_scanned_shadow[ap_index].au8bssid,
- pstrNetworkInfo->au8bssid, ETH_ALEN);
- last_scanned_shadow[ap_index].u16BeaconPeriod = pstrNetworkInfo->u16BeaconPeriod;
- last_scanned_shadow[ap_index].u8DtimPeriod = pstrNetworkInfo->u8DtimPeriod;
- last_scanned_shadow[ap_index].u8channel = pstrNetworkInfo->u8channel;
- last_scanned_shadow[ap_index].u16IEsLen = pstrNetworkInfo->u16IEsLen;
- last_scanned_shadow[ap_index].u64Tsf = pstrNetworkInfo->u64Tsf;
+ last_scanned_shadow[ap_index].str_rssi.u8Full = 1;
+ }
+ last_scanned_shadow[ap_index].str_rssi.u8Index = rssi_index;
+ last_scanned_shadow[ap_index].rssi = pstrNetworkInfo->rssi;
+ last_scanned_shadow[ap_index].cap_info = pstrNetworkInfo->cap_info;
+ last_scanned_shadow[ap_index].ssid_len = pstrNetworkInfo->ssid_len;
+ memcpy(last_scanned_shadow[ap_index].ssid,
+ pstrNetworkInfo->ssid, pstrNetworkInfo->ssid_len);
+ memcpy(last_scanned_shadow[ap_index].bssid,
+ pstrNetworkInfo->bssid, ETH_ALEN);
+ last_scanned_shadow[ap_index].beacon_period = pstrNetworkInfo->beacon_period;
+ last_scanned_shadow[ap_index].dtim_period = pstrNetworkInfo->dtim_period;
+ last_scanned_shadow[ap_index].ch = pstrNetworkInfo->ch;
+ last_scanned_shadow[ap_index].ies_len = pstrNetworkInfo->ies_len;
+ last_scanned_shadow[ap_index].tsf_hi = pstrNetworkInfo->tsf_hi;
if (ap_found != -1)
- kfree(last_scanned_shadow[ap_index].pu8IEs);
- last_scanned_shadow[ap_index].pu8IEs =
- kmalloc(pstrNetworkInfo->u16IEsLen, GFP_KERNEL);
- memcpy(last_scanned_shadow[ap_index].pu8IEs,
- pstrNetworkInfo->pu8IEs, pstrNetworkInfo->u16IEsLen);
- last_scanned_shadow[ap_index].u32TimeRcvdInScan = jiffies;
- last_scanned_shadow[ap_index].u32TimeRcvdInScanCached = jiffies;
- last_scanned_shadow[ap_index].u8Found = 1;
+ kfree(last_scanned_shadow[ap_index].ies);
+ last_scanned_shadow[ap_index].ies = kmalloc(pstrNetworkInfo->ies_len,
+ GFP_KERNEL);
+ memcpy(last_scanned_shadow[ap_index].ies,
+ pstrNetworkInfo->ies, pstrNetworkInfo->ies_len);
+ last_scanned_shadow[ap_index].time_scan = jiffies;
+ last_scanned_shadow[ap_index].time_scan_cached = jiffies;
+ last_scanned_shadow[ap_index].found = 1;
if (ap_found != -1)
- wilc_free_join_params(last_scanned_shadow[ap_index].pJoinParams);
- last_scanned_shadow[ap_index].pJoinParams = pJoinParams;
+ kfree(last_scanned_shadow[ap_index].join_params);
+ last_scanned_shadow[ap_index].join_params = pJoinParams;
}
static void CfgScanResult(enum scan_event scan_event,
- tstrNetworkInfo *network_info,
+ struct network_info *network_info,
void *user_void,
void *join_params)
{
@@ -391,7 +395,7 @@ static void CfgScanResult(enum scan_event scan_event,
struct ieee80211_channel *channel;
struct cfg80211_bss *bss = NULL;
- priv = (struct wilc_priv *)user_void;
+ priv = user_void;
if (priv->bCfgScanning) {
if (scan_event == SCAN_EVENT_NETWORK_FOUND) {
wiphy = priv->dev->ieee80211_ptr->wiphy;
@@ -400,67 +404,53 @@ static void CfgScanResult(enum scan_event scan_event,
return;
if (wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
- (((s32)network_info->s8rssi * 100) < 0 ||
- ((s32)network_info->s8rssi * 100) > 100)) {
- PRINT_ER("wiphy signal type fial\n");
+ (((s32)network_info->rssi * 100) < 0 ||
+ ((s32)network_info->rssi * 100) > 100))
return;
- }
if (network_info) {
- s32Freq = ieee80211_channel_to_frequency((s32)network_info->u8channel, IEEE80211_BAND_2GHZ);
+ s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, s32Freq);
if (!channel)
return;
- PRINT_INFO(CFG80211_DBG, "Network Info:: CHANNEL Frequency: %d, RSSI: %d, CapabilityInfo: %d,"
- "BeaconPeriod: %d\n", channel->center_freq, (((s32)network_info->s8rssi) * 100),
- network_info->u16CapInfo, network_info->u16BeaconPeriod);
-
- if (network_info->bNewNetwork) {
+ if (network_info->new_network) {
if (priv->u32RcvdChCount < MAX_NUM_SCANNED_NETWORKS) {
- PRINT_D(CFG80211_DBG, "Network %s found\n", network_info->au8ssid);
priv->u32RcvdChCount++;
- if (!join_params)
- PRINT_INFO(CORECONFIG_DBG, ">> Something really bad happened\n");
add_network_to_shadow(network_info, priv, join_params);
- if (!(memcmp("DIRECT-", network_info->au8ssid, 7))) {
- bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, network_info->au8bssid, network_info->u64Tsf, network_info->u16CapInfo,
- network_info->u16BeaconPeriod, (const u8 *)network_info->pu8IEs,
- (size_t)network_info->u16IEsLen, (((s32)network_info->s8rssi) * 100), GFP_KERNEL);
+ if (!(memcmp("DIRECT-", network_info->ssid, 7))) {
+ bss = cfg80211_inform_bss(wiphy,
+ channel,
+ CFG80211_BSS_FTYPE_UNKNOWN,
+ network_info->bssid,
+ network_info->tsf_hi,
+ network_info->cap_info,
+ network_info->beacon_period,
+ (const u8 *)network_info->ies,
+ (size_t)network_info->ies_len,
+ (s32)network_info->rssi * 100,
+ GFP_KERNEL);
cfg80211_put_bss(wiphy, bss);
}
-
-
- } else {
- PRINT_ER("Discovered networks exceeded the max limit\n");
}
} else {
u32 i;
for (i = 0; i < priv->u32RcvdChCount; i++) {
- if (memcmp(last_scanned_shadow[i].au8bssid, network_info->au8bssid, 6) == 0) {
- PRINT_D(CFG80211_DBG, "Update RSSI of %s\n", last_scanned_shadow[i].au8ssid);
-
- last_scanned_shadow[i].s8rssi = network_info->s8rssi;
- last_scanned_shadow[i].u32TimeRcvdInScan = jiffies;
+ if (memcmp(last_scanned_shadow[i].bssid, network_info->bssid, 6) == 0) {
+ last_scanned_shadow[i].rssi = network_info->rssi;
+ last_scanned_shadow[i].time_scan = jiffies;
break;
}
}
}
}
} else if (scan_event == SCAN_EVENT_DONE) {
- PRINT_D(CFG80211_DBG, "Scan Done[%p]\n", priv->dev);
- PRINT_D(CFG80211_DBG, "Refreshing Scan ...\n");
refresh_scan(priv, 1, false);
- if (priv->u32RcvdChCount > 0)
- PRINT_D(CFG80211_DBG, "%d Network(s) found\n", priv->u32RcvdChCount);
- else
- PRINT_D(CFG80211_DBG, "No networks found\n");
-
down(&(priv->hSemScanReq));
if (priv->pstrScanReq) {
@@ -473,7 +463,6 @@ static void CfgScanResult(enum scan_event scan_event,
} else if (scan_event == SCAN_EVENT_ABORTED) {
down(&(priv->hSemScanReq));
- PRINT_D(CFG80211_DBG, "Scan Aborted\n");
if (priv->pstrScanReq) {
update_scan_time();
refresh_scan(priv, 1, false);
@@ -490,9 +479,9 @@ static void CfgScanResult(enum scan_event scan_event,
int wilc_connecting;
static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
- tstrConnectInfo *pstrConnectInfo,
+ struct connect_info *pstrConnectInfo,
u8 u8MacStatus,
- tstrDisconnectNotifInfo *pstrDisconnectNotifInfo,
+ struct disconnect_info *pstrDisconnectNotifInfo,
void *pUserVoid)
{
struct wilc_priv *priv;
@@ -504,49 +493,47 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
wilc_connecting = 0;
- priv = (struct wilc_priv *)pUserVoid;
+ priv = pUserVoid;
dev = priv->dev;
vif = netdev_priv(dev);
wl = vif->wilc;
- pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
if (enuConnDisconnEvent == CONN_DISCONN_EVENT_CONN_RESP) {
u16 u16ConnectStatus;
- u16ConnectStatus = pstrConnectInfo->u16ConnectStatus;
-
- PRINT_D(CFG80211_DBG, " Connection response received = %d\n", u8MacStatus);
+ u16ConnectStatus = pstrConnectInfo->status;
if ((u8MacStatus == MAC_DISCONNECTED) &&
- (pstrConnectInfo->u16ConnectStatus == SUCCESSFUL_STATUSCODE)) {
+ (pstrConnectInfo->status == SUCCESSFUL_STATUSCODE)) {
u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE;
- wilc_wlan_set_bssid(priv->dev, NullBssid);
+ wilc_wlan_set_bssid(priv->dev, NullBssid,
+ STATION_MODE);
eth_zero_addr(wilc_connected_ssid);
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d\n", u16ConnectStatus, u8MacStatus);
+ netdev_err(dev, "Unspecified failure\n");
}
if (u16ConnectStatus == WLAN_STATUS_SUCCESS) {
bool bNeedScanRefresh = false;
u32 i;
- PRINT_INFO(CFG80211_DBG, "Connection Successful:: BSSID: %x%x%x%x%x%x\n", pstrConnectInfo->au8bssid[0],
- pstrConnectInfo->au8bssid[1], pstrConnectInfo->au8bssid[2], pstrConnectInfo->au8bssid[3], pstrConnectInfo->au8bssid[4], pstrConnectInfo->au8bssid[5]);
- memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
+ memcpy(priv->au8AssociatedBss, pstrConnectInfo->bssid, ETH_ALEN);
for (i = 0; i < last_scanned_cnt; i++) {
- if (memcmp(last_scanned_shadow[i].au8bssid,
- pstrConnectInfo->au8bssid, ETH_ALEN) == 0) {
+ if (memcmp(last_scanned_shadow[i].bssid,
+ pstrConnectInfo->bssid,
+ ETH_ALEN) == 0) {
unsigned long now = jiffies;
if (time_after(now,
- last_scanned_shadow[i].u32TimeRcvdInScanCached + (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ)))) {
+ last_scanned_shadow[i].time_scan_cached +
+ (unsigned long)(nl80211_SCAN_RESULT_EXPIRE - (1 * HZ))))
bNeedScanRefresh = true;
- }
break;
}
@@ -556,34 +543,27 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
refresh_scan(priv, 1, true);
}
-
- PRINT_D(CFG80211_DBG, "Association request info elements length = %zu\n", pstrConnectInfo->ReqIEsLen);
-
- PRINT_D(CFG80211_DBG, "Association response info elements length = %d\n", pstrConnectInfo->u16RespIEsLen);
-
- cfg80211_connect_result(dev, pstrConnectInfo->au8bssid,
- pstrConnectInfo->pu8ReqIEs, pstrConnectInfo->ReqIEsLen,
- pstrConnectInfo->pu8RespIEs, pstrConnectInfo->u16RespIEsLen,
+ cfg80211_connect_result(dev, pstrConnectInfo->bssid,
+ pstrConnectInfo->req_ies, pstrConnectInfo->req_ies_len,
+ pstrConnectInfo->resp_ies, pstrConnectInfo->resp_ies_len,
u16ConnectStatus, GFP_KERNEL);
} else if (enuConnDisconnEvent == CONN_DISCONN_EVENT_DISCONN_NOTIF) {
wilc_optaining_ip = false;
- PRINT_ER("Received MAC_DISCONNECTED from firmware with reason %d on dev [%p]\n",
- pstrDisconnectNotifInfo->u16reason, priv->dev);
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
wilc_ie = false;
eth_zero_addr(priv->au8AssociatedBss);
- wilc_wlan_set_bssid(priv->dev, NullBssid);
+ wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
eth_zero_addr(wilc_connected_ssid);
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
- pstrDisconnectNotifInfo->u16reason = 3;
+ pstrDisconnectNotifInfo->reason = 3;
} else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
- pstrDisconnectNotifInfo->u16reason = 1;
+ pstrDisconnectNotifInfo->reason = 1;
}
- cfg80211_disconnected(dev, pstrDisconnectNotifInfo->u16reason, pstrDisconnectNotifInfo->ie,
+ cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
pstrDisconnectNotifInfo->ie_len, false,
GFP_KERNEL);
}
@@ -601,13 +581,12 @@ static int set_channel(struct wiphy *wiphy,
vif = netdev_priv(priv->dev);
channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq);
- PRINT_D(CFG80211_DBG, "Setting channel %d with frequency %d\n", channelnum, chandef->chan->center_freq);
curr_channel = channelnum;
result = wilc_set_mac_chnl_num(vif, channelnum);
if (result != 0)
- PRINT_ER("Error in setting channel %d\n", channelnum);
+ netdev_err(priv->dev, "Error in setting channel\n");
return result;
}
@@ -628,38 +607,33 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
priv->u32RcvdChCount = 0;
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif));
reset_shadow_found();
priv->bCfgScanning = true;
if (request->n_channels <= MAX_NUM_SCANNED_NETWORKS) {
- for (i = 0; i < request->n_channels; i++) {
+ for (i = 0; i < request->n_channels; i++)
au8ScanChanList[i] = (u8)ieee80211_frequency_to_channel(request->channels[i]->center_freq);
- PRINT_INFO(CFG80211_DBG, "ScanChannel List[%d] = %d,", i, au8ScanChanList[i]);
- }
-
- PRINT_D(CFG80211_DBG, "Requested num of scan channel %d\n", request->n_channels);
- PRINT_D(CFG80211_DBG, "Scan Request IE len = %zu\n", request->ie_len);
-
- PRINT_D(CFG80211_DBG, "Number of SSIDs %d\n", request->n_ssids);
if (request->n_ssids >= 1) {
- strHiddenNetwork.pstrHiddenNetworkInfo = kmalloc(request->n_ssids * sizeof(struct hidden_network), GFP_KERNEL);
- strHiddenNetwork.u8ssidnum = request->n_ssids;
+ strHiddenNetwork.net_info =
+ kmalloc_array(request->n_ssids,
+ sizeof(struct hidden_network),
+ GFP_KERNEL);
+ if (!strHiddenNetwork.net_info)
+ return -ENOMEM;
+ strHiddenNetwork.n_ssids = request->n_ssids;
for (i = 0; i < request->n_ssids; i++) {
if (request->ssids[i].ssid &&
request->ssids[i].ssid_len != 0) {
- strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
- memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
- strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen = request->ssids[i].ssid_len;
+ strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
+ memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
+ strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len;
} else {
- PRINT_D(CFG80211_DBG, "Received one NULL SSID\n");
- strHiddenNetwork.u8ssidnum -= 1;
+ strHiddenNetwork.n_ssids -= 1;
}
}
- PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
au8ScanChanList,
request->n_channels,
@@ -667,7 +641,6 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
request->ie_len, CfgScanResult,
(void *)priv, &strHiddenNetwork);
} else {
- PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
s32Error = wilc_scan(vif, USER_SCAN, ACTIVE_SCAN,
au8ScanChanList,
request->n_channels,
@@ -676,14 +649,11 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
(void *)priv, NULL);
}
} else {
- PRINT_ER("Requested num of scanned channels is greater than the max, supported"
- " channels\n");
+ netdev_err(priv->dev, "Requested scanned channels over\n");
}
- if (s32Error != 0) {
+ if (s32Error != 0)
s32Error = -EBUSY;
- PRINT_WRN(CFG80211_DBG, "Device is busy: Error(%d)\n", s32Error);
- }
return s32Error;
}
@@ -695,98 +665,52 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
u32 i;
u8 u8security = NO_ENCRYPT;
enum AUTHTYPE tenuAuth_type = ANY;
- char *pcgroup_encrypt_val = NULL;
- char *pccipher_group = NULL;
- char *pcwpa_version = NULL;
struct wilc_priv *priv;
struct host_if_drv *pstrWFIDrv;
- tstrNetworkInfo *pstrNetworkInfo = NULL;
+ struct network_info *pstrNetworkInfo = NULL;
struct wilc_vif *vif;
wilc_connecting = 1;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- pstrWFIDrv = (struct host_if_drv *)(priv->hWILCWFIDrv);
-
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif));
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
- PRINT_D(CFG80211_DBG, "Connecting to SSID [%s] on netdev [%p] host if [%p]\n", sme->ssid, dev, priv->hWILCWFIDrv);
- if (!(strncmp(sme->ssid, "DIRECT-", 7))) {
- PRINT_D(CFG80211_DBG, "Connected to Direct network,OBSS disabled\n");
+ if (!(strncmp(sme->ssid, "DIRECT-", 7)))
pstrWFIDrv->p2p_connect = 1;
- } else {
+ else
pstrWFIDrv->p2p_connect = 0;
- }
- PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d\n", sme->ssid, sme->auth_type);
for (i = 0; i < last_scanned_cnt; i++) {
- if ((sme->ssid_len == last_scanned_shadow[i].u8SsidLen) &&
- memcmp(last_scanned_shadow[i].au8ssid,
+ if ((sme->ssid_len == last_scanned_shadow[i].ssid_len) &&
+ memcmp(last_scanned_shadow[i].ssid,
sme->ssid,
sme->ssid_len) == 0) {
- PRINT_INFO(CFG80211_DBG, "Network with required SSID is found %s\n", sme->ssid);
- if (!sme->bssid) {
- PRINT_INFO(CFG80211_DBG, "BSSID is not passed from the user\n");
+ if (!sme->bssid)
break;
- } else {
- if (memcmp(last_scanned_shadow[i].au8bssid,
+ else
+ if (memcmp(last_scanned_shadow[i].bssid,
sme->bssid,
- ETH_ALEN) == 0) {
- PRINT_INFO(CFG80211_DBG, "BSSID is passed from the user and matched\n");
+ ETH_ALEN) == 0)
break;
- }
- }
}
}
if (i < last_scanned_cnt) {
- PRINT_D(CFG80211_DBG, "Required bss is in scan results\n");
-
pstrNetworkInfo = &last_scanned_shadow[i];
-
- PRINT_INFO(CFG80211_DBG, "network BSSID to be associated: %x%x%x%x%x%x\n",
- pstrNetworkInfo->au8bssid[0], pstrNetworkInfo->au8bssid[1],
- pstrNetworkInfo->au8bssid[2], pstrNetworkInfo->au8bssid[3],
- pstrNetworkInfo->au8bssid[4], pstrNetworkInfo->au8bssid[5]);
} else {
s32Error = -ENOENT;
- if (last_scanned_cnt == 0)
- PRINT_D(CFG80211_DBG, "No Scan results yet\n");
- else
- PRINT_D(CFG80211_DBG, "Required bss not in scan results: Error(%d)\n", s32Error);
-
- goto done;
+ wilc_connecting = 0;
+ return s32Error;
}
- priv->WILC_WFI_wep_default = 0;
memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
- PRINT_INFO(CFG80211_DBG, "sme->crypto.wpa_versions=%x\n", sme->crypto.wpa_versions);
- PRINT_INFO(CFG80211_DBG, "sme->crypto.cipher_group=%x\n", sme->crypto.cipher_group);
-
- PRINT_INFO(CFG80211_DBG, "sme->crypto.n_ciphers_pairwise=%d\n", sme->crypto.n_ciphers_pairwise);
-
- if (INFO) {
- for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++)
- PRINT_D(CORECONFIG_DBG, "sme->crypto.ciphers_pairwise[%d]=%x\n", i, sme->crypto.ciphers_pairwise[i]);
- }
-
if (sme->crypto.cipher_group != NO_ENCRYPT) {
- pcwpa_version = "Default";
- PRINT_D(CORECONFIG_DBG, ">> sme->crypto.wpa_versions: %x\n", sme->crypto.wpa_versions);
if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) {
u8security = ENCRYPT_ENABLED | WEP;
- pcgroup_encrypt_val = "WEP40";
- pccipher_group = "WLAN_CIPHER_SUITE_WEP40";
- PRINT_INFO(CFG80211_DBG, "WEP Default Key Idx = %d\n", sme->key_idx);
- if (INFO) {
- for (i = 0; i < sme->key_len; i++)
- PRINT_D(CORECONFIG_DBG, "WEP Key Value[%d] = %d\n", i, sme->key[i]);
- }
- priv->WILC_WFI_wep_default = sme->key_idx;
priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
@@ -801,10 +725,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
sme->key_idx);
} else if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104) {
u8security = ENCRYPT_ENABLED | WEP | WEP_EXTENDED;
- pcgroup_encrypt_val = "WEP104";
- pccipher_group = "WLAN_CIPHER_SUITE_WEP104";
- priv->WILC_WFI_wep_default = sme->key_idx;
priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
@@ -820,31 +741,21 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
- pcgroup_encrypt_val = "WPA2_TKIP";
- pccipher_group = "TKIP";
} else {
u8security = ENCRYPT_ENABLED | WPA2 | AES;
- pcgroup_encrypt_val = "WPA2_AES";
- pccipher_group = "AES";
}
- pcwpa_version = "WPA_VERSION_2";
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
u8security = ENCRYPT_ENABLED | WPA | TKIP;
- pcgroup_encrypt_val = "WPA_TKIP";
- pccipher_group = "TKIP";
} else {
u8security = ENCRYPT_ENABLED | WPA | AES;
- pcgroup_encrypt_val = "WPA_AES";
- pccipher_group = "AES";
}
- pcwpa_version = "WPA_VERSION_1";
} else {
s32Error = -ENOTSUPP;
- PRINT_ER("Not supported cipher: Error(%d)\n", s32Error);
-
- goto done;
+ netdev_err(dev, "Not supported cipher\n");
+ wilc_connecting = 0;
+ return s32Error;
}
}
@@ -859,22 +770,17 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- PRINT_D(CFG80211_DBG, "Adding key with cipher group = %x\n", sme->crypto.cipher_group);
-
- PRINT_D(CFG80211_DBG, "Authentication Type = %d\n", sme->auth_type);
switch (sme->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
- PRINT_D(CFG80211_DBG, "In OPEN SYSTEM\n");
tenuAuth_type = OPEN_SYSTEM;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
tenuAuth_type = SHARED_KEY;
- PRINT_D(CFG80211_DBG, "In SHARED KEY\n");
break;
default:
- PRINT_D(CFG80211_DBG, "Automatic Authentation type = %d\n", sme->auth_type);
+ break;
}
if (sme->crypto.n_akm_suites) {
@@ -888,33 +794,26 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
}
}
-
- PRINT_INFO(CFG80211_DBG, "Required Channel = %d\n", pstrNetworkInfo->u8channel);
-
- PRINT_INFO(CFG80211_DBG, "Group encryption value = %s\n Cipher Group = %s\n WPA version = %s\n",
- pcgroup_encrypt_val, pccipher_group, pcwpa_version);
-
- curr_channel = pstrNetworkInfo->u8channel;
+ curr_channel = pstrNetworkInfo->ch;
if (!pstrWFIDrv->p2p_connect)
- wlan_channel = pstrNetworkInfo->u8channel;
+ wlan_channel = pstrNetworkInfo->ch;
- wilc_wlan_set_bssid(dev, pstrNetworkInfo->au8bssid);
+ wilc_wlan_set_bssid(dev, pstrNetworkInfo->bssid, STATION_MODE);
- s32Error = wilc_set_join_req(vif, pstrNetworkInfo->au8bssid, sme->ssid,
+ s32Error = wilc_set_join_req(vif, pstrNetworkInfo->bssid, sme->ssid,
sme->ssid_len, sme->ie, sme->ie_len,
CfgConnectResult, (void *)priv,
u8security, tenuAuth_type,
- pstrNetworkInfo->u8channel,
- pstrNetworkInfo->pJoinParams);
+ pstrNetworkInfo->ch,
+ pstrNetworkInfo->join_params);
if (s32Error != 0) {
- PRINT_ER("wilc_set_join_req(): Error(%d)\n", s32Error);
+ netdev_err(dev, "wilc_set_join_req(): Error\n");
s32Error = -ENOENT;
- goto done;
+ wilc_connecting = 0;
+ return s32Error;
}
-done:
-
return s32Error;
}
@@ -930,12 +829,10 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- wilc_wlan_set_bssid(priv->dev, NullBssid);
-
- PRINT_D(CFG80211_DBG, "Disconnecting with reason code(%d)\n", reason_code);
+ wilc_wlan_set_bssid(priv->dev, NullBssid, STATION_MODE);
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
@@ -944,7 +841,7 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
s32Error = wilc_disconnect(vif, reason_code);
if (s32Error != 0) {
- PRINT_ER("Error in disconnecting: Error(%d)\n", s32Error);
+ netdev_err(priv->dev, "Error in disconnecting\n");
s32Error = -EINVAL;
}
@@ -957,7 +854,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
{
s32 s32Error = 0, KeyLen = params->key_len;
- u32 i;
struct wilc_priv *priv;
const u8 *pu8RxMic = NULL;
const u8 *pu8TxMic = NULL;
@@ -972,29 +868,13 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
vif = netdev_priv(netdev);
wl = vif->wilc;
- PRINT_D(CFG80211_DBG, "Adding key with cipher suite = %x\n", params->cipher);
-
- PRINT_D(CFG80211_DBG, "%p %p %d\n", wiphy, netdev, key_index);
-
- PRINT_D(CFG80211_DBG, "key %x %x %x\n", params->key[0],
- params->key[1],
- params->key[2]);
-
-
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (priv->wdev->iftype == NL80211_IFTYPE_AP) {
- priv->WILC_WFI_wep_default = key_index;
priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
- PRINT_D(CFG80211_DBG, "Adding AP WEP Default key Idx = %d\n", key_index);
- PRINT_D(CFG80211_DBG, "Adding AP WEP Key len= %d\n", params->key_len);
-
- for (i = 0; i < params->key_len; i++)
- PRINT_D(CFG80211_DBG, "WEP AP key val[%d] = %x\n", i, params->key[i]);
-
tenuAuth_type = OPEN_SYSTEM;
if (params->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -1008,16 +888,9 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
break;
}
if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
- priv->WILC_WFI_wep_default = key_index;
priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
- PRINT_D(CFG80211_DBG, "Adding WEP Default key Idx = %d\n", key_index);
- PRINT_D(CFG80211_DBG, "Adding WEP Key length = %d\n", params->key_len);
- if (INFO) {
- for (i = 0; i < params->key_len; i++)
- PRINT_INFO(CFG80211_DBG, "WEP key value[%d] = %d\n", i, params->key[i]);
- }
wilc_add_wep_key_bss_sta(vif, params->key,
params->key_len, key_index);
}
@@ -1068,22 +941,12 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
priv->wilc_gtk[key_index]->key_len = params->key_len;
priv->wilc_gtk[key_index]->seq_len = params->seq_len;
- if (INFO) {
- for (i = 0; i < params->key_len; i++)
- PRINT_INFO(CFG80211_DBG, "Adding group key value[%d] = %x\n", i, params->key[i]);
- for (i = 0; i < params->seq_len; i++)
- PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
- }
-
-
wilc_add_rx_gtk(vif, params->key, KeyLen,
key_index, params->seq_len,
params->seq, pu8RxMic,
pu8TxMic, AP_MODE, u8gmode);
} else {
- PRINT_INFO(CFG80211_DBG, "STA Address: %x%x%x%x%x\n", mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4]);
-
if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
u8pmode = ENCRYPT_ENABLED | WPA | TKIP;
else
@@ -1105,14 +968,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
if ((params->seq_len) > 0)
priv->wilc_ptk[key_index]->seq = kmalloc(params->seq_len, GFP_KERNEL);
- if (INFO) {
- for (i = 0; i < params->key_len; i++)
- PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %x\n", i, params->key[i]);
-
- for (i = 0; i < params->seq_len; i++)
- PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
- }
-
memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
if ((params->seq_len) > 0)
@@ -1156,10 +1011,6 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
memcpy(g_key_gtk_params.seq, params->seq, params->seq_len);
}
g_key_gtk_params.cipher = params->cipher;
-
- PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_gtk_params.key[0],
- g_key_gtk_params.key[1],
- g_key_gtk_params.key[2]);
g_gtk_keys_saved = true;
}
@@ -1193,27 +1044,18 @@ static int add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
memcpy(g_key_ptk_params.seq, params->seq, params->seq_len);
}
g_key_ptk_params.cipher = params->cipher;
-
- PRINT_D(CFG80211_DBG, "key %x %x %x\n", g_key_ptk_params.key[0],
- g_key_ptk_params.key[1],
- g_key_ptk_params.key[2]);
g_ptk_keys_saved = true;
}
wilc_add_ptk(vif, params->key, KeyLen,
mac_addr, pu8RxMic, pu8TxMic,
STATION_MODE, u8mode, key_index);
- PRINT_D(CFG80211_DBG, "Adding pairwise key\n");
- if (INFO) {
- for (i = 0; i < params->key_len; i++)
- PRINT_INFO(CFG80211_DBG, "Adding pairwise key value[%d] = %d\n", i, params->key[i]);
- }
}
}
break;
default:
- PRINT_ER("Not supported cipher: Error(%d)\n", s32Error);
+ netdev_err(netdev, "Not supported cipher\n");
s32Error = -ENOTSUPP;
}
@@ -1270,18 +1112,14 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
kfree(g_key_gtk_params.seq);
g_key_gtk_params.seq = NULL;
- wilc_set_machw_change_vir_if(netdev, false);
}
if (key_index >= 0 && key_index <= 3) {
memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
priv->WILC_WFI_wep_key_len[key_index] = 0;
-
- PRINT_D(CFG80211_DBG, "Removing WEP key with index = %d\n", key_index);
wilc_remove_wep_key(vif, key_index);
} else {
- PRINT_D(CFG80211_DBG, "Removing all installed keys\n");
- wilc_remove_key(priv->hWILCWFIDrv, mac_addr);
+ wilc_remove_key(priv->hif_drv, mac_addr);
}
return 0;
@@ -1293,26 +1131,17 @@ static int get_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index,
{
struct wilc_priv *priv;
struct key_params key_params;
- u32 i;
priv = wiphy_priv(wiphy);
if (!pairwise) {
- PRINT_D(CFG80211_DBG, "Getting group key idx: %x\n", key_index);
-
key_params.key = priv->wilc_gtk[key_index]->key;
key_params.cipher = priv->wilc_gtk[key_index]->cipher;
key_params.key_len = priv->wilc_gtk[key_index]->key_len;
key_params.seq = priv->wilc_gtk[key_index]->seq;
key_params.seq_len = priv->wilc_gtk[key_index]->seq_len;
- if (INFO) {
- for (i = 0; i < key_params.key_len; i++)
- PRINT_INFO(CFG80211_DBG, "Retrieved key value %x\n", key_params.key[i]);
- }
} else {
- PRINT_D(CFG80211_DBG, "Getting pairwise key\n");
-
key_params.key = priv->wilc_ptk[key_index]->key;
key_params.cipher = priv->wilc_ptk[key_index]->cipher;
key_params.key_len = priv->wilc_ptk[key_index]->key_len;
@@ -1334,11 +1163,7 @@ static int set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 ke
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(CFG80211_DBG, "Setting default key with idx = %d\n", key_index);
-
- if (key_index != priv->WILC_WFI_wep_default) {
- wilc_set_wep_default_keyid(vif, key_index);
- }
+ wilc_set_wep_default_keyid(vif, key_index);
return 0;
}
@@ -1355,10 +1180,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
- PRINT_D(HOSTAPD_DBG, "Getting station parameters\n");
-
- PRINT_INFO(HOSTAPD_DBG, ": %x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4]);
-
for (i = 0; i < NUM_STA_ASSOCIATED; i++) {
if (!(memcmp(mac, priv->assoc_stainfo.au8Sta_AssociatedBss[i], ETH_ALEN))) {
associatedsta = i;
@@ -1367,7 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
}
if (associatedsta == -1) {
- PRINT_ER("Station required is not associated\n");
+ netdev_err(dev, "sta required is not associated\n");
return -ENOENT;
}
@@ -1375,7 +1196,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_inactive_time(vif, mac, &inactive_time);
sinfo->inactive_time = 1000 * inactive_time;
- PRINT_D(CFG80211_DBG, "Inactive time %d\n", sinfo->inactive_time);
}
if (vif->iftype == STATION_MODE) {
@@ -1400,9 +1220,6 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
wilc_enable_tcp_ack_filter(true);
else if (strStatistics.link_speed != DEFAULT_LINK_SPEED)
wilc_enable_tcp_ack_filter(false);
-
- PRINT_D(CORECONFIG_DBG, "*** stats[%d][%d][%d][%d][%d]\n", sinfo->signal, sinfo->rx_packets, sinfo->tx_packets,
- sinfo->tx_failed, sinfo->txrate.legacy);
}
return 0;
}
@@ -1410,14 +1227,13 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
static int change_bss(struct wiphy *wiphy, struct net_device *dev,
struct bss_parameters *params)
{
- PRINT_D(CFG80211_DBG, "Changing Bss parametrs\n");
return 0;
}
static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
s32 s32Error = 0;
- struct cfg_param_val pstrCfgParamVal;
+ struct cfg_param_attr pstrCfgParamVal;
struct wilc_priv *priv;
struct wilc_vif *vif;
@@ -1425,37 +1241,28 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
vif = netdev_priv(priv->dev);
pstrCfgParamVal.flag = 0;
- PRINT_D(CFG80211_DBG, "Setting Wiphy params\n");
if (changed & WIPHY_PARAM_RETRY_SHORT) {
- PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_SHORT %d\n",
- priv->dev->ieee80211_ptr->wiphy->retry_short);
pstrCfgParamVal.flag |= RETRY_SHORT;
pstrCfgParamVal.short_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_short;
}
if (changed & WIPHY_PARAM_RETRY_LONG) {
- PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_LONG %d\n", priv->dev->ieee80211_ptr->wiphy->retry_long);
pstrCfgParamVal.flag |= RETRY_LONG;
pstrCfgParamVal.long_retry_limit = priv->dev->ieee80211_ptr->wiphy->retry_long;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
- PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_FRAG_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->frag_threshold);
pstrCfgParamVal.flag |= FRAG_THRESHOLD;
pstrCfgParamVal.frag_threshold = priv->dev->ieee80211_ptr->wiphy->frag_threshold;
}
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
- PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RTS_THRESHOLD %d\n", priv->dev->ieee80211_ptr->wiphy->rts_threshold);
-
pstrCfgParamVal.flag |= RTS_THRESHOLD;
pstrCfgParamVal.rts_threshold = priv->dev->ieee80211_ptr->wiphy->rts_threshold;
}
- PRINT_D(CFG80211_DBG, "Setting CFG params in the host interface\n");
s32Error = wilc_hif_set_cfg(vif, &pstrCfgParamVal);
if (s32Error)
- PRINT_ER("Error in setting WIPHY PARAMS\n");
-
+ netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
return s32Error;
}
@@ -1470,19 +1277,16 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct wilc_priv *priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(CFG80211_DBG, "Setting PMKSA\n");
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
flag = PMKID_FOUND;
- PRINT_D(CFG80211_DBG, "PMKID already exists\n");
break;
}
}
if (i < WILC_MAX_NUM_PMKIDS) {
- PRINT_D(CFG80211_DBG, "Setting PMKID in private structure\n");
memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
ETH_ALEN);
memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
@@ -1490,14 +1294,13 @@ static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
if (!(flag == PMKID_FOUND))
priv->pmkid_list.numpmkid++;
} else {
- PRINT_ER("Invalid PMKID index\n");
+ netdev_err(netdev, "Invalid PMKID index\n");
s32Error = -EINVAL;
}
- if (!s32Error) {
- PRINT_D(CFG80211_DBG, "Setting pmkid in the host interface\n");
+ if (!s32Error)
s32Error = wilc_set_pmkid_info(vif, &priv->pmkid_list);
- }
+
return s32Error;
}
@@ -1509,12 +1312,9 @@ static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct wilc_priv *priv = wiphy_priv(wiphy);
- PRINT_D(CFG80211_DBG, "Deleting PMKSA keys\n");
-
for (i = 0; i < priv->pmkid_list.numpmkid; i++) {
if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
ETH_ALEN)) {
- PRINT_D(CFG80211_DBG, "Reseting PMKID values\n");
memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct host_if_pmkid));
break;
}
@@ -1541,8 +1341,6 @@ static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
{
struct wilc_priv *priv = wiphy_priv(wiphy);
- PRINT_D(CFG80211_DBG, "Flushing PMKID key values\n");
-
memset(&priv->pmkid_list, 0, sizeof(struct host_if_pmkid_attr));
return 0;
@@ -1569,7 +1367,6 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
}
if (wlan_channel != INVALID_CHANNEL) {
if (channel_list_attr_index) {
- PRINT_D(GENERIC_DBG, "Modify channel list attribute\n");
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
@@ -1581,7 +1378,6 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
}
if (op_channel_attr_index) {
- PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n");
buf[op_channel_attr_index + 6] = 0x51;
buf[op_channel_attr_index + 7] = wlan_channel;
}
@@ -1611,7 +1407,6 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
}
if (wlan_channel != INVALID_CHANNEL && bOperChan) {
if (channel_list_attr_index) {
- PRINT_D(GENERIC_DBG, "Modify channel list attribute\n");
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
@@ -1623,14 +1418,13 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
}
if (op_channel_attr_index) {
- PRINT_D(GENERIC_DBG, "Modify operating channel attribute\n");
buf[op_channel_attr_index + 6] = 0x51;
buf[op_channel_attr_index + 7] = wlan_channel;
}
}
}
-void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size)
+void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
{
struct wilc_priv *priv;
u32 header, pkt_offset;
@@ -1639,7 +1433,7 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size)
s32 s32Freq;
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
@@ -1647,41 +1441,29 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size)
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
if (buff[FRAME_TYPE_ID] == IEEE80211_STYPE_PROBE_RESP) {
- PRINT_D(GENERIC_DBG, "Probe response ACK\n");
cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
return;
} else {
- if (pkt_offset & IS_MGMT_STATUS_SUCCES) {
- PRINT_D(GENERIC_DBG, "Success Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID],
- buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]);
+ if (pkt_offset & IS_MGMT_STATUS_SUCCES)
cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, true, GFP_KERNEL);
- } else {
- PRINT_D(GENERIC_DBG, "Fail Ack - Action frame category: %x Action Subtype: %d Dialog T: %x OR %x\n", buff[ACTION_CAT_ID], buff[ACTION_SUBTYPE_ID],
- buff[ACTION_SUBTYPE_ID + 1], buff[P2P_PUB_ACTION_SUBTYPE + 1]);
+ else
cfg80211_mgmt_tx_status(priv->wdev, priv->u64tx_cookie, buff, size, false, GFP_KERNEL);
- }
return;
}
} else {
- PRINT_D(GENERIC_DBG, "Rx Frame Type:%x\n", buff[FRAME_TYPE_ID]);
-
s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
- PRINT_D(GENERIC_DBG, "Rx Action Frame Type: %x %x\n", buff[ACTION_SUBTYPE_ID], buff[P2P_PUB_ACTION_SUBTYPE]);
-
if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
- PRINT_D(GENERIC_DBG, "Receiving action frames from wrong channels\n");
+ netdev_dbg(dev, "Receiving action wrong ch\n");
return;
}
if (buff[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
switch (buff[ACTION_SUBTYPE_ID]) {
case GAS_INTIAL_REQ:
- PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buff[ACTION_SUBTYPE_ID]);
break;
case GAS_INTIAL_RSP:
- PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buff[ACTION_SUBTYPE_ID]);
break;
case PUBLIC_ACT_VENDORSPEC:
@@ -1692,7 +1474,6 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size)
if (!memcmp(p2p_vendor_spec, &buff[i], 6)) {
p2p_recv_random = buff[i + 6];
wilc_ie = true;
- PRINT_D(GENERIC_DBG, "WILC Vendor specific IE:%02x\n", p2p_recv_random);
break;
}
}
@@ -1709,32 +1490,31 @@ void WILC_WFI_p2p_rx (struct net_device *dev, u8 *buff, u32 size)
}
}
} else {
- PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
+ netdev_dbg(dev, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
}
}
if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP) && (wilc_ie)) {
- PRINT_D(GENERIC_DBG, "Sending P2P to host without extra elemnt\n");
cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
return;
}
break;
default:
- PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]);
+ netdev_dbg(dev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buff[ACTION_SUBTYPE_ID]);
break;
}
}
}
- cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size - 7, 0);
+ cfg80211_rx_mgmt(priv->wdev, s32Freq, 0, buff, size, 0);
}
}
static void WILC_WFI_mgmt_tx_complete(void *priv, int status)
{
- struct p2p_mgmt_data *pv_data = (struct p2p_mgmt_data *)priv;
+ struct p2p_mgmt_data *pv_data = priv;
kfree(pv_data->buff);
@@ -1745,9 +1525,7 @@ static void WILC_WFI_RemainOnChannelReady(void *pUserVoid)
{
struct wilc_priv *priv;
- priv = (struct wilc_priv *)pUserVoid;
-
- PRINT_D(HOSTINF_DBG, "Remain on channel ready\n");
+ priv = pUserVoid;
priv->bInP2PlistenState = true;
@@ -1762,20 +1540,15 @@ static void WILC_WFI_RemainOnChannelExpired(void *pUserVoid, u32 u32SessionID)
{
struct wilc_priv *priv;
- priv = (struct wilc_priv *)pUserVoid;
+ priv = pUserVoid;
if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) {
- PRINT_D(GENERIC_DBG, "Remain on channel expired\n");
-
priv->bInP2PlistenState = false;
cfg80211_remain_on_channel_expired(priv->wdev,
priv->strRemainOnChanParams.u64ListenCookie,
priv->strRemainOnChanParams.pstrListenChan,
GFP_KERNEL);
- } else {
- PRINT_D(GENERIC_DBG, "Received ID 0x%x Expected ID 0x%x (No match)\n", u32SessionID
- , priv->strRemainOnChanParams.u32ListenSessionID);
}
}
@@ -1791,11 +1564,8 @@ static int remain_on_channel(struct wiphy *wiphy,
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(GENERIC_DBG, "Remaining on channel %d\n", chan->hw_value);
-
-
if (wdev->iftype == NL80211_IFTYPE_AP) {
- PRINT_D(GENERIC_DBG, "Required remain-on-channel while in AP mode");
+ netdev_dbg(vif->ndev, "Required while in AP mode\n");
return s32Error;
}
@@ -1826,8 +1596,6 @@ static int cancel_remain_on_channel(struct wiphy *wiphy,
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(CFG80211_DBG, "Cancel remain on channel\n");
-
s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID);
return s32Error;
}
@@ -1851,7 +1619,7 @@ static int mgmt_tx(struct wiphy *wiphy,
vif = netdev_priv(wdev->netdev);
priv = wiphy_priv(wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
*cookie = (unsigned long)buf;
priv->u64tx_cookie = *cookie;
@@ -1859,49 +1627,36 @@ static int mgmt_tx(struct wiphy *wiphy,
if (ieee80211_is_mgmt(mgmt->frame_control)) {
mgmt_tx = kmalloc(sizeof(struct p2p_mgmt_data), GFP_KERNEL);
- if (!mgmt_tx) {
- PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
+ if (!mgmt_tx)
return -EFAULT;
- }
+
mgmt_tx->buff = kmalloc(buf_len, GFP_KERNEL);
if (!mgmt_tx->buff) {
- PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
kfree(mgmt_tx);
- return -EFAULT;
+ return -ENOMEM;
}
+
memcpy(mgmt_tx->buff, buf, len);
mgmt_tx->size = len;
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
- PRINT_D(GENERIC_DBG, "TX: Probe Response\n");
- PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value);
wilc_set_mac_chnl_num(vif, chan->hw_value);
curr_channel = chan->hw_value;
} else if (ieee80211_is_action(mgmt->frame_control)) {
- PRINT_D(GENERIC_DBG, "ACTION FRAME:%x\n", (u16)mgmt->frame_control);
-
-
if (buf[ACTION_CAT_ID] == PUB_ACTION_ATTR_ID) {
if (buf[ACTION_SUBTYPE_ID] != PUBLIC_ACT_VENDORSPEC ||
buf[P2P_PUB_ACTION_SUBTYPE] != GO_NEG_CONF) {
- PRINT_D(GENERIC_DBG, "Setting channel: %d\n", chan->hw_value);
wilc_set_mac_chnl_num(vif,
chan->hw_value);
curr_channel = chan->hw_value;
}
switch (buf[ACTION_SUBTYPE_ID]) {
case GAS_INTIAL_REQ:
- {
- PRINT_D(GENERIC_DBG, "GAS INITIAL REQ %x\n", buf[ACTION_SUBTYPE_ID]);
break;
- }
case GAS_INTIAL_RSP:
- {
- PRINT_D(GENERIC_DBG, "GAS INITIAL RSP %x\n", buf[ACTION_SUBTYPE_ID]);
break;
- }
case PUBLIC_ACT_VENDORSPEC:
{
@@ -1916,8 +1671,6 @@ static int mgmt_tx(struct wiphy *wiphy,
if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
|| buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
if (p2p_local_random > p2p_recv_random) {
- PRINT_D(GENERIC_DBG, "LOCAL WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
-
for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(p2p_oui, &buf[i + 2], 4))) {
if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
@@ -1933,13 +1686,11 @@ static int mgmt_tx(struct wiphy *wiphy,
mgmt_tx->buff[len + sizeof(p2p_vendor_spec)] = p2p_local_random;
mgmt_tx->size = buf_len;
}
- } else {
- PRINT_D(GENERIC_DBG, "PEER WILL BE GO LocaRand=%02x RecvRand %02x\n", p2p_local_random, p2p_recv_random);
}
}
} else {
- PRINT_D(GENERIC_DBG, "Not a P2P public action frame\n");
+ netdev_dbg(vif->ndev, "Not a P2P public action frame\n");
}
break;
@@ -1947,24 +1698,18 @@ static int mgmt_tx(struct wiphy *wiphy,
default:
{
- PRINT_D(GENERIC_DBG, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]);
+ netdev_dbg(vif->ndev, "NOT HANDLED PUBLIC ACTION FRAME TYPE:%x\n", buf[ACTION_SUBTYPE_ID]);
break;
}
}
}
- PRINT_D(GENERIC_DBG, "TX: ACTION FRAME Type:%x : Chan:%d\n", buf[ACTION_SUBTYPE_ID], chan->hw_value);
pstrWFIDrv->p2p_timeout = (jiffies + msecs_to_jiffies(wait));
-
- PRINT_D(GENERIC_DBG, "Current Jiffies: %lu Timeout:%llu\n",
- jiffies, pstrWFIDrv->p2p_timeout);
}
wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx,
mgmt_tx->buff, mgmt_tx->size,
WILC_WFI_mgmt_tx_complete);
- } else {
- PRINT_D(GENERIC_DBG, "This function transmits only management frames\n");
}
return 0;
}
@@ -1977,10 +1722,7 @@ static int mgmt_tx_cancel_wait(struct wiphy *wiphy,
struct host_if_drv *pstrWFIDrv;
priv = wiphy_priv(wiphy);
- pstrWFIDrv = (struct host_if_drv *)priv->hWILCWFIDrv;
-
-
- PRINT_D(GENERIC_DBG, "Tx Cancel wait :%lu\n", jiffies);
+ pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
pstrWFIDrv->p2p_timeout = jiffies;
if (!priv->bInP2PlistenState) {
@@ -2007,7 +1749,6 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!frame_type)
return;
- PRINT_D(GENERIC_DBG, "Frame registering Frame Type: %x: Boolean: %d\n", frame_type, reg);
switch (frame_type) {
case PROBE_REQ:
{
@@ -2029,17 +1770,14 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
}
}
- if (!wl->initialized) {
- PRINT_D(GENERIC_DBG, "Return since mac is closed\n");
+ if (!wl->initialized)
return;
- }
wilc_frame_register(vif, frame_type, reg);
}
static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst)
{
- PRINT_D(CFG80211_DBG, "Setting CQM RSSi Function\n");
return 0;
}
@@ -2049,8 +1787,6 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv;
struct wilc_vif *vif;
- PRINT_D(CFG80211_DBG, "Dumping station information\n");
-
if (idx != 0)
return -ENOENT;
@@ -2070,17 +1806,13 @@ static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv;
struct wilc_vif *vif;
- PRINT_D(CFG80211_DBG, " Power save Enabled= %d , TimeOut = %d\n", enabled, timeout);
-
if (!wiphy)
return -ENOENT;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- if (!priv->hWILCWFIDrv) {
- PRINT_ER("Driver is NULL\n");
+ if (!priv->hif_drv)
return -EIO;
- }
if (wilc_enable_ps)
wilc_set_power_mgmt(vif, enabled, timeout);
@@ -2094,286 +1826,73 @@ static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_priv *priv;
struct wilc_vif *vif;
- u8 interface_type;
- u16 TID = 0;
- u8 i;
struct wilc *wl;
vif = netdev_priv(dev);
priv = wiphy_priv(wiphy);
wl = vif->wilc;
-
- PRINT_D(HOSTAPD_DBG, "In Change virtual interface function\n");
- PRINT_D(HOSTAPD_DBG, "Wireless interface name =%s\n", dev->name);
p2p_local_random = 0x01;
p2p_recv_random = 0x00;
wilc_ie = false;
wilc_optaining_ip = false;
del_timer(&wilc_during_ip_timer);
- PRINT_D(GENERIC_DBG, "Changing virtual interface, enable scan\n");
-
- if (g_ptk_keys_saved && g_gtk_keys_saved) {
- wilc_set_machw_change_vir_if(dev, true);
- }
switch (type) {
case NL80211_IFTYPE_STATION:
wilc_connecting = 0;
- PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_STATION\n");
-
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
vif->monitor_flag = 0;
vif->iftype = STATION_MODE;
+ wilc_set_operation_mode(vif, STATION_MODE);
memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN);
- interface_type = vif->iftype;
- vif->iftype = STATION_MODE;
-
- if (wl->initialized) {
- wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid,
- TID);
- wilc_wait_msg_queue_idle();
-
- up(&wl->cfg_event);
-
- wilc1000_wlan_deinit(dev);
- wilc1000_wlan_init(dev, vif);
- wilc_initialized = 1;
- vif->iftype = interface_type;
-
- wilc_set_wfi_drv_handler(vif,
- wilc_get_vif_idx(wl->vif[0]));
- wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
- wilc_set_operation_mode(vif, STATION_MODE);
-
- if (g_wep_keys_saved) {
- wilc_set_wep_default_keyid(wl->vif[0],
- g_key_wep_params.key_idx);
- wilc_add_wep_key_bss_sta(wl->vif[0],
- g_key_wep_params.key,
- g_key_wep_params.key_len,
- g_key_wep_params.key_idx);
- }
- wilc_flush_join_req(vif);
-
- if (g_ptk_keys_saved && g_gtk_keys_saved) {
- PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0],
- g_key_ptk_params.key[1],
- g_key_ptk_params.key[2]);
- PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0],
- g_key_gtk_params.key[1],
- g_key_gtk_params.key[2]);
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_ptk_key_params.key_idx,
- g_add_ptk_key_params.pairwise,
- g_add_ptk_key_params.mac_addr,
- (struct key_params *)(&g_key_ptk_params));
-
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_gtk_key_params.key_idx,
- g_add_gtk_key_params.pairwise,
- g_add_gtk_key_params.mac_addr,
- (struct key_params *)(&g_key_gtk_params));
- }
-
- if (wl->initialized) {
- for (i = 0; i < num_reg_frame; i++) {
- PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- wilc_frame_register(vif,
- vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- }
- }
-
- wilc_enable_ps = true;
- wilc_set_power_mgmt(vif, 1, 0);
- }
+ wilc_enable_ps = true;
+ wilc_set_power_mgmt(vif, 1, 0);
break;
case NL80211_IFTYPE_P2P_CLIENT:
- wilc_enable_ps = false;
- wilc_set_power_mgmt(vif, 0, 0);
wilc_connecting = 0;
- PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_P2P_CLIENT\n");
-
- wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID);
-
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
vif->monitor_flag = 0;
-
- PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
vif->iftype = CLIENT_MODE;
+ wilc_set_operation_mode(vif, STATION_MODE);
-
- if (wl->initialized) {
- wilc_wait_msg_queue_idle();
-
- wilc1000_wlan_deinit(dev);
- wilc1000_wlan_init(dev, vif);
- wilc_initialized = 1;
-
- wilc_set_wfi_drv_handler(vif,
- wilc_get_vif_idx(wl->vif[0]));
- wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
- wilc_set_operation_mode(vif, STATION_MODE);
-
- if (g_wep_keys_saved) {
- wilc_set_wep_default_keyid(wl->vif[0],
- g_key_wep_params.key_idx);
- wilc_add_wep_key_bss_sta(wl->vif[0],
- g_key_wep_params.key,
- g_key_wep_params.key_len,
- g_key_wep_params.key_idx);
- }
-
- wilc_flush_join_req(vif);
-
- if (g_ptk_keys_saved && g_gtk_keys_saved) {
- PRINT_D(CFG80211_DBG, "ptk %x %x %x\n", g_key_ptk_params.key[0],
- g_key_ptk_params.key[1],
- g_key_ptk_params.key[2]);
- PRINT_D(CFG80211_DBG, "gtk %x %x %x\n", g_key_gtk_params.key[0],
- g_key_gtk_params.key[1],
- g_key_gtk_params.key[2]);
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_ptk_key_params.key_idx,
- g_add_ptk_key_params.pairwise,
- g_add_ptk_key_params.mac_addr,
- (struct key_params *)(&g_key_ptk_params));
-
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_gtk_key_params.key_idx,
- g_add_gtk_key_params.pairwise,
- g_add_gtk_key_params.mac_addr,
- (struct key_params *)(&g_key_gtk_params));
- }
-
- refresh_scan(priv, 1, true);
- wilc_set_machw_change_vir_if(dev, false);
-
- if (wl->initialized) {
- for (i = 0; i < num_reg_frame; i++) {
- PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- wilc_frame_register(vif,
- vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- }
- }
- }
+ wilc_enable_ps = false;
+ wilc_set_power_mgmt(vif, 0, 0);
break;
case NL80211_IFTYPE_AP:
wilc_enable_ps = false;
- PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_AP %d\n", type);
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
vif->iftype = AP_MODE;
- PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
-
- PRINT_D(HOSTAPD_DBG, "Downloading AP firmware\n");
- wilc_wlan_get_firmware(dev);
-
- if (wl->initialized) {
- vif->iftype = AP_MODE;
- wilc_mac_close(dev);
- wilc_mac_open(dev);
-
- for (i = 0; i < num_reg_frame; i++) {
- PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- wilc_frame_register(vif,
- vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- }
+
+ if (wl->initialized) {
+ wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(vif),
+ 0);
+ wilc_set_operation_mode(vif, AP_MODE);
+ wilc_set_power_mgmt(vif, 0, 0);
}
break;
case NL80211_IFTYPE_P2P_GO:
- PRINT_D(GENERIC_DBG, "start duringIP timer\n");
-
wilc_optaining_ip = true;
mod_timer(&wilc_during_ip_timer,
jiffies + msecs_to_jiffies(during_ip_time));
- wilc_set_power_mgmt(vif, 0, 0);
- wilc_del_all_rx_ba_session(vif, wl->vif[0]->bssid, TID);
- wilc_enable_ps = false;
- PRINT_D(HOSTAPD_DBG, "Interface type = NL80211_IFTYPE_GO\n");
+ wilc_set_operation_mode(vif, AP_MODE);
dev->ieee80211_ptr->iftype = type;
priv->wdev->iftype = type;
-
- PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
-
- PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
-
-
vif->iftype = GO_MODE;
- wilc_wait_msg_queue_idle();
- wilc1000_wlan_deinit(dev);
- wilc1000_wlan_init(dev, vif);
- wilc_initialized = 1;
-
- wilc_set_wfi_drv_handler(vif, wilc_get_vif_idx(wl->vif[0]));
- wilc_set_mac_address(wl->vif[0], wl->vif[0]->src_addr);
- wilc_set_operation_mode(vif, AP_MODE);
-
- if (g_wep_keys_saved) {
- wilc_set_wep_default_keyid(wl->vif[0],
- g_key_wep_params.key_idx);
- wilc_add_wep_key_bss_sta(wl->vif[0],
- g_key_wep_params.key,
- g_key_wep_params.key_len,
- g_key_wep_params.key_idx);
- }
-
- wilc_flush_join_req(vif);
-
- if (g_ptk_keys_saved && g_gtk_keys_saved) {
- PRINT_D(CFG80211_DBG, "ptk %x %x %x cipher %x\n", g_key_ptk_params.key[0],
- g_key_ptk_params.key[1],
- g_key_ptk_params.key[2],
- g_key_ptk_params.cipher);
- PRINT_D(CFG80211_DBG, "gtk %x %x %x cipher %x\n", g_key_gtk_params.key[0],
- g_key_gtk_params.key[1],
- g_key_gtk_params.key[2],
- g_key_gtk_params.cipher);
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_ptk_key_params.key_idx,
- g_add_ptk_key_params.pairwise,
- g_add_ptk_key_params.mac_addr,
- (struct key_params *)(&g_key_ptk_params));
-
- add_key(wl->vif[0]->ndev->ieee80211_ptr->wiphy,
- wl->vif[0]->ndev,
- g_add_gtk_key_params.key_idx,
- g_add_gtk_key_params.pairwise,
- g_add_gtk_key_params.mac_addr,
- (struct key_params *)(&g_key_gtk_params));
- }
-
- if (wl->initialized) {
- for (i = 0; i < num_reg_frame; i++) {
- PRINT_D(INIT_DBG, "Frame registering Type: %x - Reg: %d\n", vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- wilc_frame_register(vif,
- vif->g_struct_frame_reg[i].frame_type,
- vif->g_struct_frame_reg[i].reg);
- }
- }
+ wilc_enable_ps = false;
+ wilc_set_power_mgmt(vif, 0, 0);
break;
default:
- PRINT_ER("Unknown interface type= %d\n", type);
+ netdev_err(dev, "Unknown interface type= %d\n", type);
return -EINVAL;
}
@@ -2391,18 +1910,15 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
- wl = vif ->wilc;
- PRINT_D(HOSTAPD_DBG, "Starting ap\n");
-
- PRINT_D(HOSTAPD_DBG, "Interval = %d\n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
- settings->beacon_interval, settings->dtim_period, beacon->head_len, beacon->tail_len);
+ wl = vif->wilc;
s32Error = set_channel(wiphy, &settings->chandef);
if (s32Error != 0)
- PRINT_ER("Error in setting channel\n");
+ netdev_err(dev, "Error in setting channel\n");
- wilc_wlan_set_bssid(dev, wl->vif[0]->src_addr);
+ wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
+ wilc_set_power_mgmt(vif, 0, 0);
s32Error = wilc_add_beacon(vif, settings->beacon_interval,
settings->dtim_period, beacon->head_len,
@@ -2421,8 +1937,6 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(HOSTAPD_DBG, "Setting beacon\n");
-
s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len,
(u8 *)beacon->head, beacon->tail_len,
@@ -2444,14 +1958,12 @@ static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
- PRINT_D(HOSTAPD_DBG, "Deleting beacon\n");
-
- wilc_wlan_set_bssid(dev, NullBssid);
+ wilc_wlan_set_bssid(dev, NullBssid, AP_MODE);
s32Error = wilc_del_beacon(vif);
if (s32Error)
- PRINT_ER("Host delete beacon fail\n");
+ netdev_err(dev, "Host delete beacon fail\n");
return s32Error;
}
@@ -2477,14 +1989,6 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.rates_len = params->supported_rates_len;
strStaParams.rates = params->supported_rates;
- PRINT_D(CFG80211_DBG, "Adding station parameters %d\n", params->aid);
-
- PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][0], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][1], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][2], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][3], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][4],
- priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][5]);
- PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid);
- PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n",
- strStaParams.rates_len);
-
if (!params->ht_capa) {
strStaParams.ht_supported = false;
} else {
@@ -2502,26 +2006,9 @@ static int add_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.flags_mask = params->sta_flags_mask;
strStaParams.flags_set = params->sta_flags_set;
- PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n",
- strStaParams.ht_supported);
- PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n",
- strStaParams.ht_capa_info);
- PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n",
- strStaParams.ht_ampdu_params);
- PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n",
- strStaParams.ht_ext_params);
- PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n",
- strStaParams.ht_tx_bf_cap);
- PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n",
- strStaParams.ht_ante_sel);
- PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n",
- strStaParams.flags_mask);
- PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n",
- strStaParams.flags_set);
-
s32Error = wilc_add_station(vif, &strStaParams);
if (s32Error)
- PRINT_ER("Host add station fail\n");
+ netdev_err(dev, "Host add station fail\n");
}
return s32Error;
@@ -2542,21 +2029,14 @@ static int del_station(struct wiphy *wiphy, struct net_device *dev,
vif = netdev_priv(dev);
if (vif->iftype == AP_MODE || vif->iftype == GO_MODE) {
- PRINT_D(HOSTAPD_DBG, "Deleting station\n");
-
-
- if (!mac) {
- PRINT_D(HOSTAPD_DBG, "All associated stations\n");
+ if (!mac)
s32Error = wilc_del_allstation(vif,
priv->assoc_stainfo.au8Sta_AssociatedBss);
- } else {
- PRINT_D(HOSTAPD_DBG, "With mac address: %x%x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- }
s32Error = wilc_del_station(vif, mac);
if (s32Error)
- PRINT_ER("Host delete station fail\n");
+ netdev_err(dev, "Host delete station fail\n");
}
return s32Error;
}
@@ -2569,9 +2049,6 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
struct add_sta_param strStaParams = { {0} };
struct wilc_vif *vif;
-
- PRINT_D(HOSTAPD_DBG, "Change station paramters\n");
-
if (!wiphy)
return -EFAULT;
@@ -2584,14 +2061,6 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.rates_len = params->supported_rates_len;
strStaParams.rates = params->supported_rates;
- PRINT_D(HOSTAPD_DBG, "BSSID = %x%x%x%x%x%x\n",
- strStaParams.bssid[0], strStaParams.bssid[1],
- strStaParams.bssid[2], strStaParams.bssid[3],
- strStaParams.bssid[4], strStaParams.bssid[5]);
- PRINT_D(HOSTAPD_DBG, "ASSOC ID = %d\n", strStaParams.aid);
- PRINT_D(HOSTAPD_DBG, "Number of supported rates = %d\n",
- strStaParams.rates_len);
-
if (!params->ht_capa) {
strStaParams.ht_supported = false;
} else {
@@ -2609,26 +2078,9 @@ static int change_station(struct wiphy *wiphy, struct net_device *dev,
strStaParams.flags_mask = params->sta_flags_mask;
strStaParams.flags_set = params->sta_flags_set;
- PRINT_D(HOSTAPD_DBG, "IS HT supported = %d\n",
- strStaParams.ht_supported);
- PRINT_D(HOSTAPD_DBG, "Capability Info = %d\n",
- strStaParams.ht_capa_info);
- PRINT_D(HOSTAPD_DBG, "AMPDU Params = %d\n",
- strStaParams.ht_ampdu_params);
- PRINT_D(HOSTAPD_DBG, "HT Extended params = %d\n",
- strStaParams.ht_ext_params);
- PRINT_D(HOSTAPD_DBG, "Tx Beamforming Cap = %d\n",
- strStaParams.ht_tx_bf_cap);
- PRINT_D(HOSTAPD_DBG, "Antenna selection info = %d\n",
- strStaParams.ht_ante_sel);
- PRINT_D(HOSTAPD_DBG, "Flag Mask = %d\n",
- strStaParams.flags_mask);
- PRINT_D(HOSTAPD_DBG, "Flag Set = %d\n",
- strStaParams.flags_set);
-
s32Error = wilc_edit_station(vif, &strStaParams);
if (s32Error)
- PRINT_ER("Host edit station fail\n");
+ netdev_err(dev, "Host edit station fail\n");
}
return s32Error;
}
@@ -2645,34 +2097,94 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
struct net_device *new_ifc = NULL;
priv = wiphy_priv(wiphy);
-
-
-
- PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", priv->wdev->netdev);
-
vif = netdev_priv(priv->wdev->netdev);
if (type == NL80211_IFTYPE_MONITOR) {
- PRINT_D(HOSTAPD_DBG, "Monitor interface mode: Initializing mon interface virtual device driver\n");
- PRINT_D(HOSTAPD_DBG, "Adding monitor interface[%p]\n", vif->ndev);
new_ifc = WILC_WFI_init_mon_interface(name, vif->ndev);
if (new_ifc) {
- PRINT_D(HOSTAPD_DBG, "Setting monitor flag in private structure\n");
vif = netdev_priv(priv->wdev->netdev);
vif->monitor_flag = 1;
- } else
- PRINT_ER("Error in initializing monitor interface\n ");
+ }
}
return priv->wdev;
}
static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- PRINT_D(HOSTAPD_DBG, "Deleting virtual interface\n");
return 0;
}
+static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
+{
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+
+ if (!wow && wilc_wlan_get_num_conn_ifcs(vif->wilc))
+ vif->wilc->suspend_event = true;
+ else
+ vif->wilc->suspend_event = false;
+
+ return 0;
+}
+
+static int wilc_resume(struct wiphy *wiphy)
+{
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+
+ netdev_info(vif->ndev, "cfg resume\n");
+ return 0;
+}
+
+static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
+{
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+
+ netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
+}
+
+static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
+{
+ int ret;
+ s32 tx_power = MBM_TO_DBM(mbm);
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+
+ if (tx_power < 0)
+ tx_power = 0;
+ else if (tx_power > 18)
+ tx_power = 18;
+ ret = wilc_set_tx_power(vif, tx_power);
+ if (ret)
+ netdev_err(vif->ndev, "Failed to set tx power\n");
+
+ return ret;
+}
+
+static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+ int *dbm)
+{
+ int ret;
+ struct wilc_priv *priv = wiphy_priv(wiphy);
+ struct wilc_vif *vif = netdev_priv(priv->dev);
+ struct wilc *wl;
+
+ wl = vif->wilc;
+
+ /* If firmware is not started, return. */
+ if (!wl->initialized)
+ return -EIO;
+
+ ret = wilc_get_tx_power(vif, (u8 *)dbm);
+ if (ret)
+ netdev_err(vif->ndev, "Failed to get tx power\n");
+
+ return ret;
+}
+
static struct cfg80211_ops wilc_cfg80211_ops = {
.set_monitor_channel = set_channel,
.scan = scan,
@@ -2708,55 +2220,25 @@ static struct cfg80211_ops wilc_cfg80211_ops = {
.set_power_mgmt = set_power_mgmt,
.set_cqm_rssi_config = set_cqm_rssi_config,
-};
-
-int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed)
-{
- struct wilc_priv *priv;
+ .suspend = wilc_suspend,
+ .resume = wilc_resume,
+ .set_wakeup = wilc_set_wakeup,
+ .set_tx_power = set_tx_power,
+ .get_tx_power = get_tx_power,
- priv = wiphy_priv(wiphy);
- switch (changed) {
- case WILC_WFI_RX_PKT:
- {
- priv->netstats.rx_packets++;
- priv->netstats.rx_bytes += pktlen;
- priv->netstats.rx_time = get_jiffies_64();
- }
- break;
-
- case WILC_WFI_TX_PKT:
- {
- priv->netstats.tx_packets++;
- priv->netstats.tx_bytes += pktlen;
- priv->netstats.tx_time = get_jiffies_64();
-
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
+};
static struct wireless_dev *WILC_WFI_CfgAlloc(void)
{
struct wireless_dev *wdev;
-
- PRINT_D(CFG80211_DBG, "Allocating wireless device\n");
-
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- PRINT_ER("Cannot allocate wireless device\n");
+ if (!wdev)
goto _fail_;
- }
wdev->wiphy = wiphy_new(&wilc_cfg80211_ops, sizeof(struct wilc_priv));
- if (!wdev->wiphy) {
- PRINT_ER("Cannot allocate wiphy\n");
+ if (!wdev->wiphy)
goto _fail_mem_;
- }
WILC_WFI_band_2ghz.ht_cap.ht_supported = 1;
WILC_WFI_band_2ghz.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
@@ -2780,11 +2262,9 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
struct wireless_dev *wdev;
s32 s32Error = 0;
- PRINT_D(CFG80211_DBG, "Registering wifi device\n");
-
wdev = WILC_WFI_CfgAlloc();
if (!wdev) {
- PRINT_ER("CfgAlloc Failed\n");
+ netdev_err(net, "wiphy new allocate failed\n");
return NULL;
}
@@ -2792,9 +2272,10 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
sema_init(&(priv->SemHandleUpdateStats), 1);
priv->wdev = wdev;
wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
+#ifdef CONFIG_PM
+ wdev->wiphy->wowlan = &wowlan_support;
+#endif
wdev->wiphy->max_num_pmkids = WILC_MAX_NUM_PMKIDS;
- PRINT_INFO(CFG80211_DBG, "Max number of PMKIDs = %d\n", wdev->wiphy->max_num_pmkids);
-
wdev->wiphy->max_scan_ie_len = 1000;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wdev->wiphy->cipher_suites = cipher_suites;
@@ -2807,20 +2288,11 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wdev->iftype = NL80211_IFTYPE_STATION;
-
-
- PRINT_INFO(CFG80211_DBG, "Max scan ids = %d,Max scan IE len = %d,Signal Type = %d,Interface Modes = %d,Interface Type = %d\n",
- wdev->wiphy->max_scan_ssids, wdev->wiphy->max_scan_ie_len, wdev->wiphy->signal_type,
- wdev->wiphy->interface_modes, wdev->iftype);
-
set_wiphy_dev(wdev->wiphy, dev);
s32Error = wiphy_register(wdev->wiphy);
- if (s32Error) {
- PRINT_ER("Cannot register wiphy device\n");
- } else {
- PRINT_D(CFG80211_DBG, "Successful Registering\n");
- }
+ if (s32Error)
+ netdev_err(net, "Cannot register wiphy device\n");
priv->dev = net;
return wdev;
@@ -2832,26 +2304,21 @@ int wilc_init_host_int(struct net_device *net)
struct wilc_priv *priv;
- PRINT_D(INIT_DBG, "Host[%p][%p]\n", net, net->ieee80211_ptr);
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
}
op_ifcs++;
- if (s32Error < 0) {
- PRINT_ER("Failed to creat refresh Timer\n");
- return s32Error;
- }
priv->gbAutoRateAdjusted = false;
priv->bInP2PlistenState = false;
sema_init(&(priv->hSemScanReq), 1);
- s32Error = wilc_init(net, &priv->hWILCWFIDrv);
+ s32Error = wilc_init(net, &priv->hif_drv);
if (s32Error)
- PRINT_ER("Error while initializing hostinterface\n");
+ netdev_err(net, "Error while initializing hostinterface\n");
return s32Error;
}
@@ -2874,39 +2341,28 @@ int wilc_deinit_host_int(struct net_device *net)
s32Error = wilc_deinit(vif);
clear_shadow_scan();
- if (op_ifcs == 0) {
- PRINT_D(CORECONFIG_DBG, "destroy during ip\n");
+ if (op_ifcs == 0)
del_timer_sync(&wilc_during_ip_timer);
- }
if (s32Error)
- PRINT_ER("Error while deintializing host interface\n");
+ netdev_err(net, "Error while deintializing host interface\n");
return s32Error;
}
void wilc_free_wiphy(struct net_device *net)
{
- PRINT_D(CFG80211_DBG, "Unregistering wiphy\n");
-
- if (!net) {
- PRINT_D(INIT_DBG, "net_device is NULL\n");
+ if (!net)
return;
- }
- if (!net->ieee80211_ptr) {
- PRINT_D(INIT_DBG, "ieee80211_ptr is NULL\n");
+ if (!net->ieee80211_ptr)
return;
- }
- if (!net->ieee80211_ptr->wiphy) {
- PRINT_D(INIT_DBG, "wiphy is NULL\n");
+ if (!net->ieee80211_ptr->wiphy)
return;
- }
wiphy_unregister(net->ieee80211_ptr->wiphy);
- PRINT_D(INIT_DBG, "Freeing wiphy\n");
wiphy_free(net->ieee80211_ptr->wiphy);
kfree(net->ieee80211_ptr);
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index ab53d9d59081..85a3810d7bb5 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -12,7 +12,6 @@
struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *dev);
void wilc_free_wiphy(struct net_device *net);
-int WILC_WFI_update_stats(struct wiphy *wiphy, u32 pktlen, u8 changed);
int wilc_deinit_host_int(struct net_device *net);
int wilc_init_host_int(struct net_device *net);
void WILC_WFI_monitor_rx(u8 *buff, u32 size);
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 98ac8ed04a06..4123cffe3a6e 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -35,8 +35,6 @@
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
-#include <linux/ieee80211.h>
-#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
@@ -121,10 +119,9 @@ struct wilc_priv {
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
- struct host_if_drv *hWILCWFIDrv;
+ struct host_if_drv *hif_drv;
struct host_if_pmkid_attr pmkid_list;
struct WILC_WFI_stats netstats;
- u8 WILC_WFI_wep_default;
u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104];
u8 WILC_WFI_wep_key_len[4];
/* The real interface that the monitor is on */
@@ -149,7 +146,7 @@ typedef struct {
} struct_frame_reg;
struct wilc_vif {
- u8 u8IfIdx;
+ u8 idx;
u8 iftype;
int monitor_flag;
int mac_opened;
@@ -160,6 +157,7 @@ struct wilc_vif {
u8 bssid[ETH_ALEN];
struct host_if_drv *hif_drv;
struct net_device *ndev;
+ u8 mode;
};
struct wilc {
@@ -215,6 +213,9 @@ struct wilc {
const struct firmware *firmware;
struct device *dev;
+ bool suspend_event;
+
+ struct rf_info dummy_statistics;
};
struct WILC_WFI_mon_priv {
@@ -225,17 +226,13 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc, int flag);
-void wilc_rx_complete(struct wilc *wilc);
-void wilc_dbg(u8 *buff);
-
int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
void wilc_netdev_cleanup(struct wilc *wilc);
int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
const struct wilc_hif_func *ops);
void wilc1000_wlan_deinit(struct net_device *dev);
void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
-u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value);
int wilc_wlan_get_firmware(struct net_device *dev);
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid);
+int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 83af51bb83e8..fd938fb43dd3 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -3,54 +3,24 @@
#include "wilc_wfi_netdevice.h"
#include "wilc_wlan_cfg.h"
-#ifdef WILC_OPTIMIZE_SLEEP_INT
-static inline void chip_allow_sleep(struct wilc *wilc);
-#endif
-static inline void chip_wakeup(struct wilc *wilc);
-static u32 dbgflag = N_INIT | N_ERR | N_INTR | N_TXQ | N_RXQ;
-
-/* FIXME: replace with dev_debug() */
-static void wilc_debug(u32 flag, char *fmt, ...)
-{
- char buf[256];
- va_list args;
-
- if (flag & dbgflag) {
- va_start(args, fmt);
- vsprintf(buf, fmt, args);
- va_end(args);
-
- wilc_dbg(buf);
- }
-}
-
static CHIP_PS_STATE_T chip_ps_state = CHIP_WAKEDUP;
static inline void acquire_bus(struct wilc *wilc, BUS_ACQUIRE_T acquire)
{
mutex_lock(&wilc->hif_cs);
- #ifndef WILC_OPTIMIZE_SLEEP_INT
- if (chip_ps_state != CHIP_WAKEDUP)
- #endif
- {
- if (acquire == ACQUIRE_AND_WAKEUP)
- chip_wakeup(wilc);
- }
+ if (acquire == ACQUIRE_AND_WAKEUP)
+ chip_wakeup(wilc);
}
static inline void release_bus(struct wilc *wilc, BUS_RELEASE_T release)
{
- #ifdef WILC_OPTIMIZE_SLEEP_INT
if (release == RELEASE_ALLOW_SLEEP)
chip_allow_sleep(wilc);
- #endif
mutex_unlock(&wilc->hif_cs);
}
-#ifdef TCP_ACK_FILTER
-static void wilc_wlan_txq_remove(struct txq_entry_t *tqe)
+static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe)
{
-
if (tqe == wilc->txq_head) {
wilc->txq_head = tqe->next;
if (wilc->txq_head)
@@ -65,7 +35,6 @@ static void wilc_wlan_txq_remove(struct txq_entry_t *tqe)
}
wilc->txq_entries -= 1;
}
-#endif
static struct txq_entry_t *
wilc_wlan_txq_remove_from_head(struct net_device *dev)
@@ -117,18 +86,18 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
wilc->txq_tail = tqe;
}
wilc->txq_entries += 1;
- PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries);
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- PRINT_D(TX_DBG, "Wake the txq_handling\n");
-
up(&wilc->txq_event);
}
-static int wilc_wlan_txq_add_to_head(struct wilc *wilc, struct txq_entry_t *tqe)
+static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
+ struct txq_entry_t *tqe)
{
unsigned long flags;
+ struct wilc *wilc = vif->wilc;
+
if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
CFG_PKTS_TIMEOUT))
return -1;
@@ -147,17 +116,14 @@ static int wilc_wlan_txq_add_to_head(struct wilc *wilc, struct txq_entry_t *tqe)
wilc->txq_head = tqe;
}
wilc->txq_entries += 1;
- PRINT_D(TX_DBG, "Number of entries in TxQ = %d\n", wilc->txq_entries);
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
up(&wilc->txq_add_to_head_cs);
up(&wilc->txq_event);
- PRINT_D(TX_DBG, "Wake up the txq_handler\n");
return 0;
}
-#ifdef TCP_ACK_FILTER
struct ack_session_info;
struct ack_session_info {
u32 seq_num;
@@ -173,7 +139,6 @@ struct pending_acks_info {
struct txq_entry_t *txqe;
};
-
#define NOT_TCP_ACK (-1)
#define MAX_TCP_SESSION 25
@@ -192,19 +157,20 @@ static inline int init_tcp_tracking(void)
static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
{
- ack_session_info[tcp_session].seq_num = seq;
- ack_session_info[tcp_session].bigger_ack_num = 0;
- ack_session_info[tcp_session].src_port = src_prt;
- ack_session_info[tcp_session].dst_port = dst_prt;
- tcp_session++;
-
- PRINT_D(TCP_ENH, "TCP Session %d to Ack %d\n", tcp_session, seq);
+ if (tcp_session < 2 * MAX_TCP_SESSION) {
+ ack_session_info[tcp_session].seq_num = seq;
+ ack_session_info[tcp_session].bigger_ack_num = 0;
+ ack_session_info[tcp_session].src_port = src_prt;
+ ack_session_info[tcp_session].dst_port = dst_prt;
+ tcp_session++;
+ }
return 0;
}
static inline int update_tcp_session(u32 index, u32 ack)
{
- if (ack > ack_session_info[index].bigger_ack_num)
+ if (index < 2 * MAX_TCP_SESSION &&
+ ack > ack_session_info[index].bigger_ack_num)
ack_session_info[index].bigger_ack_num = ack;
return 0;
}
@@ -212,7 +178,7 @@ static inline int update_tcp_session(u32 index, u32 ack)
static inline int add_tcp_pending_ack(u32 ack, u32 session_index,
struct txq_entry_t *txqe)
{
- if (pending_acks < MAX_PENDING_ACKS) {
+ if (pending_base + pending_acks < MAX_PENDING_ACKS) {
pending_acks_info[pending_base + pending_acks].ack_num = ack;
pending_acks_info[pending_base + pending_acks].txqe = txqe;
pending_acks_info[pending_base + pending_acks].session_index = session_index;
@@ -221,19 +187,9 @@ static inline int add_tcp_pending_ack(u32 ack, u32 session_index,
}
return 0;
}
-static inline int remove_TCP_related(struct wilc *wilc)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&wilc->txq_spinlock, flags);
-
- spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- return 0;
-}
-static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
+static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
{
- int ret;
u8 *eth_hdr_ptr;
u8 *buffer = tqe->buffer;
unsigned short h_proto;
@@ -245,10 +201,11 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
vif = netdev_priv(dev);
wilc = vif->wilc;
+ spin_lock_irqsave(&wilc->txq_spinlock, flags);
eth_hdr_ptr = &buffer[0];
h_proto = ntohs(*((unsigned short *)&eth_hdr_ptr[12]));
- if (h_proto == 0x0800) {
+ if (h_proto == ETH_P_IP) {
u8 *ip_hdr_ptr;
u8 protocol;
@@ -278,7 +235,8 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
(u32)tcp_hdr_ptr[11];
for (i = 0; i < tcp_session; i++) {
- if (ack_session_info[i].seq_num == seq_no) {
+ if (i < 2 * MAX_TCP_SESSION &&
+ ack_session_info[i].seq_num == seq_no) {
update_tcp_session(i, ack_no);
break;
}
@@ -288,15 +246,9 @@ static inline int tcp_process(struct net_device *dev, struct txq_entry_t *tqe)
add_tcp_pending_ack(ack_no, i, tqe);
}
-
- } else {
- ret = 0;
}
- } else {
- ret = 0;
}
spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
- return ret;
}
static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
@@ -311,14 +263,15 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
spin_lock_irqsave(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
for (i = pending_base; i < (pending_base + pending_acks); i++) {
+ if (i >= MAX_PENDING_ACKS ||
+ pending_acks_info[i].session_index >= 2 * MAX_TCP_SESSION)
+ break;
if (pending_acks_info[i].ack_num < ack_session_info[pending_acks_info[i].session_index].bigger_ack_num) {
struct txq_entry_t *tqe;
- PRINT_D(TCP_ENH, "DROP ACK: %u\n",
- pending_acks_info[i].ack_num);
tqe = pending_acks_info[i].txqe;
if (tqe) {
- wilc_wlan_txq_remove(tqe);
+ wilc_wlan_txq_remove(wilc, tqe);
tqe->status = 1;
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv,
@@ -345,50 +298,39 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
return 1;
}
-#endif
-static bool enabled = false;
+static bool enabled;
void wilc_enable_tcp_ack_filter(bool value)
{
enabled = value;
}
-#ifdef TCP_ACK_FILTER
-static bool is_tcp_ack_filter_enabled(void)
-{
- return enabled;
-}
-#endif
-
-static int wilc_wlan_txq_add_cfg_pkt(struct wilc *wilc, u8 *buffer, u32 buffer_size)
+static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
+ u32 buffer_size)
{
struct txq_entry_t *tqe;
+ struct wilc *wilc = vif->wilc;
- PRINT_D(TX_DBG, "Adding config packet ...\n");
+ netdev_dbg(vif->ndev, "Adding config packet ...\n");
if (wilc->quit) {
- PRINT_D(TX_DBG, "Return due to clear function\n");
+ netdev_dbg(vif->ndev, "Return due to clear function\n");
up(&wilc->cfg_event);
return 0;
}
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
- if (!tqe) {
- PRINT_ER("Failed to allocate memory\n");
+ if (!tqe)
return 0;
- }
tqe->type = WILC_CFG_PKT;
tqe->buffer = buffer;
tqe->buffer_size = buffer_size;
tqe->tx_complete_func = NULL;
tqe->priv = NULL;
-#ifdef TCP_ACK_FILTER
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
-#endif
- PRINT_D(TX_DBG, "Adding the config packet at the Queue tail\n");
- if (wilc_wlan_txq_add_to_head(wilc, tqe))
+ if (wilc_wlan_txq_add_to_head(vif, tqe))
return 0;
return 1;
}
@@ -415,12 +357,9 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
tqe->tx_complete_func = func;
tqe->priv = priv;
- PRINT_D(TX_DBG, "Adding mgmt packet at the Queue tail\n");
-#ifdef TCP_ACK_FILTER
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
- if (is_tcp_ack_filter_enabled())
+ if (enabled)
tcp_process(dev, tqe);
-#endif
wilc_wlan_txq_add_to_tail(dev, tqe);
return wilc->txq_entries;
}
@@ -446,10 +385,7 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
tqe->buffer_size = buffer_size;
tqe->tx_complete_func = func;
tqe->priv = priv;
-#ifdef TCP_ACK_FILTER
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
-#endif
- PRINT_D(TX_DBG, "Adding Network packet at the Queue tail\n");
wilc_wlan_txq_add_to_tail(dev, tqe);
return 1;
}
@@ -483,32 +419,26 @@ static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc,
static int wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe)
{
-
if (wilc->quit)
return 0;
mutex_lock(&wilc->rxq_cs);
if (!wilc->rxq_head) {
- PRINT_D(RX_DBG, "Add to Queue head\n");
rqe->next = NULL;
wilc->rxq_head = rqe;
wilc->rxq_tail = rqe;
} else {
- PRINT_D(RX_DBG, "Add to Queue tail\n");
wilc->rxq_tail->next = rqe;
rqe->next = NULL;
wilc->rxq_tail = rqe;
}
wilc->rxq_entries += 1;
- PRINT_D(RX_DBG, "Number of queue entries: %d\n", wilc->rxq_entries);
mutex_unlock(&wilc->rxq_cs);
return wilc->rxq_entries;
}
static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc)
{
-
- PRINT_D(RX_DBG, "Getting rxQ element\n");
if (wilc->rxq_head) {
struct rxq_entry_t *rqe;
@@ -516,29 +446,26 @@ static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc)
rqe = wilc->rxq_head;
wilc->rxq_head = wilc->rxq_head->next;
wilc->rxq_entries -= 1;
- PRINT_D(RX_DBG, "RXQ entries decreased\n");
mutex_unlock(&wilc->rxq_cs);
return rqe;
}
- PRINT_D(RX_DBG, "Nothing to get from Q\n");
return NULL;
}
-#ifdef WILC_OPTIMIZE_SLEEP_INT
-
-static inline void chip_allow_sleep(struct wilc *wilc)
+void chip_allow_sleep(struct wilc *wilc)
{
u32 reg = 0;
wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
+ wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
}
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
-static inline void chip_wakeup(struct wilc *wilc)
+void chip_wakeup(struct wilc *wilc)
{
- u32 reg, clk_status_reg, trials = 0;
- u32 sleep_time;
+ u32 reg, clk_status_reg;
if ((wilc->io_type & 0x1) == HIF_SPI) {
do {
@@ -548,13 +475,12 @@ static inline void chip_wakeup(struct wilc *wilc)
do {
usleep_range(2 * 1000, 2 * 1000);
- if ((wilc_get_chipid(wilc, true) == 0))
- wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n");
-
- } while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0));
-
+ wilc_get_chipid(wilc, true);
+ } while (wilc_get_chipid(wilc, true) == 0);
} while (wilc_get_chipid(wilc, true) == 0);
} else if ((wilc->io_type & 0x1) == HIF_SDIO) {
+ wilc->hif_func->hif_write_reg(wilc, 0xfa, 1);
+ udelay(200);
wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
do {
wilc->hif_func->hif_write_reg(wilc, 0xf0,
@@ -562,14 +488,11 @@ static inline void chip_wakeup(struct wilc *wilc)
wilc->hif_func->hif_read_reg(wilc, 0xf1,
&clk_status_reg);
- while (((clk_status_reg & 0x1) == 0) && (((++trials) % 3) == 0)) {
+ while ((clk_status_reg & 0x1) == 0) {
usleep_range(2 * 1000, 2 * 1000);
wilc->hif_func->hif_read_reg(wilc, 0xf1,
&clk_status_reg);
-
- if ((clk_status_reg & 0x1) == 0)
- wilc_debug(N_ERR, "clocks still OFF. Wake up failed\n");
}
if ((clk_status_reg & 0x1) == 0) {
wilc->hif_func->hif_write_reg(wilc, 0xf0,
@@ -579,11 +502,7 @@ static inline void chip_wakeup(struct wilc *wilc)
}
if (chip_ps_state == CHIP_SLEEPING_MANUAL) {
- wilc->hif_func->hif_read_reg(wilc, 0x1C0C, &reg);
- reg &= ~BIT(0);
- wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg);
-
- if (wilc_get_chipid(wilc, false) >= 0x1002b0) {
+ if (wilc_get_chipid(wilc, false) < 0x1002b0) {
u32 val32;
wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32);
@@ -597,71 +516,37 @@ static inline void chip_wakeup(struct wilc *wilc)
}
chip_ps_state = CHIP_WAKEDUP;
}
-#else
-static inline void chip_wakeup(struct wilc *wilc)
-{
- u32 reg, trials = 0;
-
- do {
- if ((wilc->io_type & 0x1) == HIF_SPI) {
- wilc->hif_func->hif_read_reg(wilc, 1, &reg);
- wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1));
- wilc->hif_func->hif_write_reg(wilc, 1, reg | BIT(1));
- wilc->hif_func->hif_write_reg(wilc, 1, reg & ~BIT(1));
- } else if ((wilc->io_type & 0x1) == HIF_SDIO) {
- wilc->hif_func->hif_read_reg(wilc, 0xf0, &reg);
- wilc->hif_func->hif_write_reg(wilc, 0xf0,
- reg & ~BIT(0));
- wilc->hif_func->hif_write_reg(wilc, 0xf0,
- reg | BIT(0));
- wilc->hif_func->hif_write_reg(wilc, 0xf0,
- reg & ~BIT(0));
- }
-
- do {
- mdelay(3);
+EXPORT_SYMBOL_GPL(chip_wakeup);
- if ((wilc_get_chipid(wilc, true) == 0))
- wilc_debug(N_ERR, "Couldn't read chip id. Wake up failed\n");
-
- } while ((wilc_get_chipid(wilc, true) == 0) && ((++trials % 3) == 0));
-
- } while (wilc_get_chipid(wilc, true) == 0);
-
- if (chip_ps_state == CHIP_SLEEPING_MANUAL) {
- wilc->hif_func->hif_read_reg(wilc, 0x1C0C, &reg);
- reg &= ~BIT(0);
- wilc->hif_func->hif_write_reg(wilc, 0x1C0C, reg);
-
- if (wilc_get_chipid(wilc, false) >= 0x1002b0) {
- u32 val32;
-
- wilc->hif_func->hif_read_reg(wilc, 0x1e1c, &val32);
- val32 |= BIT(6);
- wilc->hif_func->hif_write_reg(wilc, 0x1e1c, val32);
-
- wilc->hif_func->hif_read_reg(wilc, 0x1e9c, &val32);
- val32 |= BIT(6);
- wilc->hif_func->hif_write_reg(wilc, 0x1e9c, val32);
- }
- }
- chip_ps_state = CHIP_WAKEDUP;
-}
-#endif
void wilc_chip_sleep_manually(struct wilc *wilc)
{
if (chip_ps_state != CHIP_WAKEDUP)
return;
acquire_bus(wilc, ACQUIRE_ONLY);
-#ifdef WILC_OPTIMIZE_SLEEP_INT
chip_allow_sleep(wilc);
-#endif
wilc->hif_func->hif_write_reg(wilc, 0x10a8, 1);
chip_ps_state = CHIP_SLEEPING_MANUAL;
release_bus(wilc, RELEASE_ONLY);
}
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
+
+void host_wakeup_notify(struct wilc *wilc)
+{
+ acquire_bus(wilc, ACQUIRE_ONLY);
+ wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
+ release_bus(wilc, RELEASE_ONLY);
+}
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
+
+void host_sleep_notify(struct wilc *wilc)
+{
+ acquire_bus(wilc, ACQUIRE_ONLY);
+ wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
+ release_bus(wilc, RELEASE_ONLY);
+}
+EXPORT_SYMBOL_GPL(host_sleep_notify);
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
{
@@ -690,10 +575,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
CFG_PKTS_TIMEOUT);
-#ifdef TCP_ACK_FILTER
wilc_wlan_txq_filter_dup_tcp_ack(dev);
-#endif
- PRINT_D(TX_DBG, "Getting the head of the TxQ\n");
tqe = wilc_wlan_txq_get_first(wilc);
i = 0;
sum = 0;
@@ -709,65 +591,48 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
vmm_sz = HOST_HDR_OFFSET;
vmm_sz += tqe->buffer_size;
- PRINT_D(TX_DBG, "VMM Size before alignment = %d\n", vmm_sz);
+
if (vmm_sz & 0x3)
vmm_sz = (vmm_sz + 4) & ~0x3;
if ((sum + vmm_sz) > LINUX_TX_SIZE)
break;
- PRINT_D(TX_DBG, "VMM Size AFTER alignment = %d\n", vmm_sz);
vmm_table[i] = vmm_sz / 4;
- PRINT_D(TX_DBG, "VMMTable entry size = %d\n",
- vmm_table[i]);
-
- if (tqe->type == WILC_CFG_PKT) {
+ if (tqe->type == WILC_CFG_PKT)
vmm_table[i] |= BIT(10);
- PRINT_D(TX_DBG, "VMMTable entry changed for CFG packet = %d\n", vmm_table[i]);
- }
vmm_table[i] = cpu_to_le32(vmm_table[i]);
i++;
sum += vmm_sz;
- PRINT_D(TX_DBG, "sum = %d\n", sum);
tqe = wilc_wlan_txq_get_next(wilc, tqe);
} else {
break;
}
} while (1);
- if (i == 0) {
- PRINT_D(TX_DBG, "Nothing in TX-Q\n");
+ if (i == 0)
break;
- } else {
- PRINT_D(TX_DBG, "Mark the last entry in VMM table - number of previous entries = %d\n", i);
- vmm_table[i] = 0x0;
- }
+ vmm_table[i] = 0x0;
+
acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
counter = 0;
do {
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL,
- &reg);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't read reg vmm_tbl_entry..\n");
+ ret = wilc->hif_func->hif_read_reg(wilc,
+ WILC_HOST_TX_CTRL,
+ &reg);
+ if (!ret)
break;
- }
if ((reg & 0x1) == 0) {
- PRINT_D(TX_DBG, "Writing VMM table ... with Size = %d\n", ((i + 1) * 4));
break;
} else {
counter++;
if (counter > 200) {
counter = 0;
- PRINT_D(TX_DBG, "Looping in tx ctrl , forcce quit\n");
ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
break;
}
- PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait...\n");
- release_bus(wilc, RELEASE_ALLOW_SLEEP);
- usleep_range(3000, 3000);
- acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
}
} while (!wilc->quit);
@@ -777,32 +642,24 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
timeout = 200;
do {
ret = wilc->hif_func->hif_block_tx(wilc, WILC_VMM_TBL_RX_SHADOW_BASE, (u8 *)vmm_table, ((i + 1) * 4));
- if (!ret) {
- wilc_debug(N_ERR, "ERR block TX of VMM table.\n");
+ if (!ret)
break;
- }
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL,
- 0x2);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't write reg host_vmm_ctl..\n");
+ ret = wilc->hif_func->hif_write_reg(wilc,
+ WILC_HOST_VMM_CTL,
+ 0x2);
+ if (!ret)
break;
- }
do {
ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't read reg host_vmm_ctl..\n");
+ if (!ret)
break;
- }
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
break;
} else {
release_bus(wilc, RELEASE_ALLOW_SLEEP);
- usleep_range(3000, 3000);
- acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
- PRINT_WRN(GENERIC_DBG, "Can't get VMM entery - reg = %2x\n", reg);
}
} while (--timeout);
if (timeout <= 0) {
@@ -814,19 +671,13 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
break;
if (entries == 0) {
- PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]]\n", reg, i, vmm_table[i - 1]);
-
ret = wilc->hif_func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't read reg WILC_HOST_TX_CTRL..\n");
+ if (!ret)
break;
- }
reg &= ~BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't write reg WILC_HOST_TX_CTRL..\n");
+ if (!ret)
break;
- }
break;
} else {
break;
@@ -866,7 +717,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (tqe->type == WILC_CFG_PKT) {
buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET;
} else if (tqe->type == WILC_NET_PKT) {
- char *bssid = ((struct tx_complete_data *)(tqe->priv))->pBssid;
+ char *bssid = ((struct tx_complete_data *)(tqe->priv))->bssid;
buffer_offset = ETH_ETHERNET_HDR_OFFSET;
memcpy(&txb[offset + 4], bssid, 6);
@@ -882,10 +733,9 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (tqe->tx_complete_func)
tqe->tx_complete_func(tqe->priv,
tqe->status);
- #ifdef TCP_ACK_FILTER
- if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK)
+ if (tqe->tcp_pending_ack_idx != NOT_TCP_ACK &&
+ tqe->tcp_pending_ack_idx < MAX_PENDING_ACKS)
pending_acks_info[tqe->tcp_pending_ack_idx].txqe = NULL;
- #endif
kfree(tqe);
} else {
break;
@@ -895,16 +745,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_clear_int_ext(wilc, ENABLE_TX_VMM);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't start tx VMM ...\n");
+ if (!ret)
goto _end_;
- }
ret = wilc->hif_func->hif_block_tx_ext(wilc, 0, txb, offset);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc txq]: fail can't block tx ext...\n");
+ if (!ret)
goto _end_;
- }
_end_:
@@ -915,14 +761,13 @@ _end_:
up(&wilc->txq_add_to_head_cs);
wilc->txq_exit = 1;
- PRINT_D(TX_DBG, "THREAD: Exiting txq\n");
*txq_count = wilc->txq_entries;
return ret;
}
static void wilc_wlan_handle_rxq(struct wilc *wilc)
{
- int offset = 0, size, has_packet = 0;
+ int offset = 0, size;
u8 *buffer;
struct rxq_entry_t *rqe;
@@ -930,19 +775,15 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
do {
if (wilc->quit) {
- PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function\n");
up(&wilc->cfg_event);
break;
}
rqe = wilc_wlan_rxq_remove(wilc);
- if (!rqe) {
- PRINT_D(RX_DBG, "nothing in the queue - exit 1st do-while\n");
+ if (!rqe)
break;
- }
+
buffer = rqe->buffer;
size = rqe->buffer_size;
- PRINT_D(RX_DBG, "rxQ entery Size = %d - Address = %p\n",
- size, buffer);
offset = 0;
do {
@@ -950,21 +791,16 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
u32 pkt_len, pkt_offset, tp_len;
int is_cfg_packet;
- PRINT_D(RX_DBG, "In the 2nd do-while\n");
memcpy(&header, &buffer[offset], 4);
header = cpu_to_le32(header);
- PRINT_D(RX_DBG, "Header = %04x - Offset = %d\n",
- header, offset);
is_cfg_packet = (header >> 31) & 0x1;
pkt_offset = (header >> 22) & 0x1ff;
tp_len = (header >> 11) & 0x7ff;
pkt_len = header & 0x7ff;
- if (pkt_len == 0 || tp_len == 0) {
- wilc_debug(N_RXQ, "[wilc rxq]: data corrupt, packet len or tp_len is 0 [%d][%d]\n", pkt_len, tp_len);
+ if (pkt_len == 0 || tp_len == 0)
break;
- }
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
@@ -983,14 +819,12 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
&buffer[offset],
pkt_len,
pkt_offset);
- has_packet = 1;
}
} else {
struct wilc_cfg_rsp rsp;
wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp);
if (rsp.type == WILC_CFG_RSP) {
- PRINT_D(RX_DBG, "wilc->cfg_seq_no = %d - rsp.seq_no = %d\n", wilc->cfg_seq_no, rsp.seq_no);
if (wilc->cfg_seq_no == rsp.seq_no)
up(&wilc->cfg_event);
} else if (rsp.type == WILC_CFG_RSP_STATUS) {
@@ -1006,14 +840,9 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
break;
} while (1);
kfree(rqe);
-
- if (has_packet)
- wilc_rx_complete(wilc);
-
} while (1);
wilc->rxq_exit = 1;
- PRINT_D(RX_DBG, "THREAD: Exiting RX thread\n");
}
static void wilc_unknown_isr_ext(struct wilc *wilc)
@@ -1032,18 +861,13 @@ static void wilc_pllupdate_isr_ext(struct wilc *wilc, u32 int_stats)
else
mdelay(WILC_PLL_TO_SPI);
- while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials)) {
- PRINT_D(TX_DBG, "PLL update retrying\n");
+ while (!(ISWILC1000(wilc_get_chipid(wilc, true)) && --trials))
mdelay(1);
- }
}
static void wilc_sleeptimer_isr_ext(struct wilc *wilc, u32 int_stats1)
{
wilc->hif_func->hif_clear_int_ext(wilc, SLEEP_INT_CLR);
-#ifndef WILC_OPTIMIZE_SLEEP_INT
- chip_ps_state = CHIP_SLEEPING_AUTO;
-#endif
}
static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
@@ -1055,14 +879,11 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
int ret = 0;
struct rxq_entry_t *rqe;
- size = ((int_status & 0x7fff) << 2);
+ size = (int_status & 0x7fff) << 2;
while (!size && retries < 10) {
- u32 time = 0;
-
- wilc_debug(N_ERR, "RX Size equal zero ... Trying to read it again for %d time\n", time++);
wilc->hif_func->hif_read_size(wilc, &size);
- size = ((size & 0x7fff) << 2);
+ size = (size & 0x7fff) << 2;
retries++;
}
@@ -1070,21 +891,17 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
if (LINUX_RX_SIZE - offset < size)
offset = 0;
- if (wilc->rx_buffer) {
+ if (wilc->rx_buffer)
buffer = &wilc->rx_buffer[offset];
- } else {
- wilc_debug(N_ERR, "[wilc isr]: fail Rx Buffer is NULL...drop the packets (%d)\n", size);
+ else
goto _end_;
- }
wilc->hif_func->hif_clear_int_ext(wilc,
DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret) {
- wilc_debug(N_ERR, "[wilc isr]: fail block rx...\n");
+ if (!ret)
goto _end_;
- }
_end_:
if (ret) {
offset += size;
@@ -1093,7 +910,6 @@ _end_:
if (rqe) {
rqe->buffer = buffer;
rqe->buffer_size = size;
- PRINT_D(RX_DBG, "rxq entery Size= %d - Address = %p\n", rqe->buffer_size, rqe->buffer);
wilc_wlan_rxq_add(wilc, rqe);
}
}
@@ -1111,23 +927,21 @@ void wilc_handle_isr(struct wilc *wilc)
if (int_status & PLL_INT_EXT)
wilc_pllupdate_isr_ext(wilc, int_status);
- if (int_status & DATA_INT_EXT) {
+ if (int_status & DATA_INT_EXT)
wilc_wlan_handle_isr_ext(wilc, int_status);
- #ifndef WILC_OPTIMIZE_SLEEP_INT
- chip_ps_state = CHIP_WAKEDUP;
- #endif
- }
+
if (int_status & SLEEP_INT_EXT)
wilc_sleeptimer_isr_ext(wilc, int_status);
- if (!(int_status & (ALL_INT_EXT))) {
+ if (!(int_status & (ALL_INT_EXT)))
wilc_unknown_isr_ext(wilc);
- }
+
release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
EXPORT_SYMBOL_GPL(wilc_handle_isr);
-int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size)
+int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
+ u32 buffer_size)
{
u32 offset;
u32 addr, size, size2, blksz;
@@ -1139,12 +953,9 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_
dma_buffer = kmalloc(blksz, GFP_KERNEL);
if (!dma_buffer) {
ret = -EIO;
- PRINT_ER("Can't allocate buffer for firmware download IO error\n ");
goto _fail_1;
}
- PRINT_D(INIT_DBG, "Downloading firmware size = %d ...\n", buffer_size);
-
offset = 0;
do {
memcpy(&addr, &buffer[offset], 4);
@@ -1160,8 +971,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_
size2 = blksz;
memcpy(dma_buffer, &buffer[offset], size2);
- ret = wilc->hif_func->hif_block_tx(wilc, addr, dma_buffer,
- size2);
+ ret = wilc->hif_func->hif_block_tx(wilc, addr,
+ dma_buffer, size2);
if (!ret)
break;
@@ -1173,10 +984,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_
if (!ret) {
ret = -EIO;
- PRINT_ER("Can't download firmware IO error\n ");
goto _fail_;
}
- PRINT_D(INIT_DBG, "Offset = %d\n", offset);
} while (offset < buffer_size);
_fail_:
@@ -1203,7 +1012,6 @@ int wilc_wlan_start(struct wilc *wilc)
acquire_bus(wilc, ACQUIRE_ONLY);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail write reg vmm_core_cfg...\n");
release_bus(wilc, RELEASE_ONLY);
ret = -EIO;
return ret;
@@ -1226,7 +1034,7 @@ int wilc_wlan_start(struct wilc *wilc)
#ifdef WILC_EXT_PA_INV_TX_RX
reg |= WILC_HAVE_EXT_PA_INV_TX_RX;
#endif
-
+ reg |= WILC_HAVE_USE_IRQ_AS_HOST_WAKE;
reg |= WILC_HAVE_LEGACY_RF_SETTINGS;
#ifdef XTAL_24
reg |= WILC_HAVE_XTAL_24;
@@ -1237,7 +1045,6 @@ int wilc_wlan_start(struct wilc *wilc)
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail write WILC_GP_REG_1 ...\n");
release_bus(wilc, RELEASE_ONLY);
ret = -EIO;
return ret;
@@ -1247,7 +1054,6 @@ int wilc_wlan_start(struct wilc *wilc)
ret = wilc->hif_func->hif_read_reg(wilc, 0x1000, &chipid);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1000 ...\n");
release_bus(wilc, RELEASE_ONLY);
ret = -EIO;
return ret;
@@ -1268,22 +1074,16 @@ int wilc_wlan_start(struct wilc *wilc)
return (ret < 0) ? ret : 0;
}
-void wilc_wlan_global_reset(struct wilc *wilc)
-{
- acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
- wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, 0x0);
- release_bus(wilc, RELEASE_ONLY);
-}
int wilc_wlan_stop(struct wilc *wilc)
{
u32 reg = 0;
int ret;
u8 timeout = 10;
+
acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
if (!ret) {
- PRINT_ER("Error while reading reg\n");
release_bus(wilc, RELEASE_ALLOW_SLEEP);
return ret;
}
@@ -1291,40 +1091,32 @@ int wilc_wlan_stop(struct wilc *wilc)
reg &= ~BIT(10);
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
if (!ret) {
- PRINT_ER("Error while writing reg\n");
release_bus(wilc, RELEASE_ALLOW_SLEEP);
return ret;
}
do {
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc,
+ WILC_GLB_RESET_0, &reg);
if (!ret) {
- PRINT_ER("Error while reading reg\n");
release_bus(wilc, RELEASE_ALLOW_SLEEP);
return ret;
}
- PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n",
- reg, timeout);
if ((reg & BIT(10))) {
- PRINT_D(GENERIC_DBG, "Bit 10 not reset : Retry %d\n",
- timeout);
reg &= ~BIT(10);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0,
- reg);
+ ret = wilc->hif_func->hif_write_reg(wilc,
+ WILC_GLB_RESET_0,
+ reg);
timeout--;
} else {
- PRINT_D(GENERIC_DBG, "Bit 10 reset after : Retry %d\n",
- timeout);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0,
- &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc,
+ WILC_GLB_RESET_0,
+ &reg);
if (!ret) {
- PRINT_ER("Error while reading reg\n");
release_bus(wilc, RELEASE_ALLOW_SLEEP);
return ret;
}
- PRINT_D(GENERIC_DBG, "Read RESET Reg %x : Retry%d\n",
- reg, timeout);
break;
}
@@ -1379,23 +1171,22 @@ void wilc_wlan_cleanup(struct net_device *dev)
acquire_bus(wilc, ACQUIRE_AND_WAKEUP);
ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (!ret) {
- PRINT_ER("Error while reading reg\n");
+ if (!ret)
release_bus(wilc, RELEASE_ALLOW_SLEEP);
- }
- PRINT_ER("Writing ABORT reg\n");
+
ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
(reg | ABORT_INT));
- if (!ret) {
- PRINT_ER("Error while writing reg\n");
+ if (!ret)
release_bus(wilc, RELEASE_ALLOW_SLEEP);
- }
+
release_bus(wilc, RELEASE_ALLOW_SLEEP);
wilc->hif_func->hif_deinit(NULL);
}
-static int wilc_wlan_cfg_commit(struct wilc *wilc, int type, u32 drv_handler)
+static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
+ u32 drv_handler)
{
+ struct wilc *wilc = vif->wilc;
struct wilc_cfg_frame *cfg = &wilc->cfg_frame;
int total_len = wilc->cfg_frame_offset + 4 + DRIVER_HANDLER_SIZE;
int seq_no = wilc->cfg_seq_no % 256;
@@ -1414,17 +1205,18 @@ static int wilc_wlan_cfg_commit(struct wilc *wilc, int type, u32 drv_handler)
cfg->wid_header[7] = (u8)(driver_handler >> 24);
wilc->cfg_seq_no = seq_no;
- if (!wilc_wlan_txq_add_cfg_pkt(wilc, &cfg->wid_header[0], total_len))
+ if (!wilc_wlan_txq_add_cfg_pkt(vif, &cfg->wid_header[0], total_len))
return -1;
return 0;
}
-int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler)
{
u32 offset;
int ret_size;
+ struct wilc *wilc = vif->wilc;
if (wilc->cfg_frame_in_use)
return 0;
@@ -1439,17 +1231,18 @@ int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
wilc->cfg_frame_offset = offset;
if (commit) {
- PRINT_D(TX_DBG, "[WILC]PACKET Commit with sequence number %d\n",
- wilc->cfg_seq_no);
- PRINT_D(RX_DBG, "Processing cfg_set()\n");
+ netdev_dbg(vif->ndev,
+ "[WILC]PACKET Commit with sequence number %d\n",
+ wilc->cfg_seq_no);
+ netdev_dbg(vif->ndev, "Processing cfg_set()\n");
wilc->cfg_frame_in_use = 1;
- if (wilc_wlan_cfg_commit(wilc, WILC_CFG_SET, drv_handler))
+ if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
ret_size = 0;
if (wilc_lock_timeout(wilc, &wilc->cfg_event,
CFG_PKTS_TIMEOUT)) {
- PRINT_D(TX_DBG, "Set Timed Out\n");
+ netdev_dbg(vif->ndev, "Set Timed Out\n");
ret_size = 0;
}
wilc->cfg_frame_in_use = 0;
@@ -1460,11 +1253,12 @@ int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
return ret_size;
}
-int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
u32 drv_handler)
{
u32 offset;
int ret_size;
+ struct wilc *wilc = vif->wilc;
if (wilc->cfg_frame_in_use)
return 0;
@@ -1481,15 +1275,14 @@ int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
if (commit) {
wilc->cfg_frame_in_use = 1;
- if (wilc_wlan_cfg_commit(wilc, WILC_CFG_QUERY, drv_handler))
+ if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler))
ret_size = 0;
if (wilc_lock_timeout(wilc, &wilc->cfg_event,
CFG_PKTS_TIMEOUT)) {
- PRINT_D(TX_DBG, "Get Timed Out\n");
+ netdev_dbg(vif->ndev, "Get Timed Out\n");
ret_size = 0;
}
- PRINT_D(GENERIC_DBG, "[WILC]Get Response received\n");
wilc->cfg_frame_in_use = 0;
wilc->cfg_frame_offset = 0;
wilc->cfg_seq_no += 1;
@@ -1500,9 +1293,43 @@ int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size)
{
- int ret;
+ return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+}
- ret = wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
+ u32 count, u32 drv)
+{
+ int i;
+ int ret = 0;
+
+ if (mode == GET_CFG) {
+ for (i = 0; i < count; i++) {
+ if (!wilc_wlan_cfg_get(vif, !i,
+ wids[i].id,
+ (i == count - 1),
+ drv)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ for (i = 0; i < count; i++) {
+ wids[i].size = wilc_wlan_cfg_get_val(wids[i].id,
+ wids[i].val,
+ wids[i].size);
+ }
+ } else if (mode == SET_CFG) {
+ for (i = 0; i < count; i++) {
+ if (!wilc_wlan_cfg_set(vif, !i,
+ wids[i].id,
+ wids[i].val,
+ wids[i].size,
+ (i == count - 1),
+ drv)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ }
return ret;
}
@@ -1524,18 +1351,18 @@ static u32 init_chip(struct net_device *dev)
if ((chipid & 0xfff) != 0xa0) {
ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail read reg 0x1118 ...\n");
+ netdev_err(dev, "fail read reg 0x1118\n");
return ret;
}
reg |= BIT(0);
ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail write reg 0x1118 ...\n");
+ netdev_err(dev, "fail write reg 0x1118\n");
return ret;
}
ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
if (!ret) {
- wilc_debug(N_ERR, "[wilc start]: fail write reg 0xc0000 ...\n");
+ netdev_err(dev, "fail write reg 0xc0000\n");
return ret;
}
}
@@ -1545,36 +1372,31 @@ static u32 init_chip(struct net_device *dev)
return ret;
}
-u32 wilc_get_chipid(struct wilc *wilc, u8 update)
+u32 wilc_get_chipid(struct wilc *wilc, bool update)
{
static u32 chipid;
u32 tempchipid = 0;
- u32 rfrevid;
+ u32 rfrevid = 0;
- if (chipid == 0 || update != 0) {
+ if (chipid == 0 || update) {
wilc->hif_func->hif_read_reg(wilc, 0x1000, &tempchipid);
wilc->hif_func->hif_read_reg(wilc, 0x13f4, &rfrevid);
if (!ISWILC1000(tempchipid)) {
chipid = 0;
- goto _fail_;
+ return chipid;
}
if (tempchipid == 0x1002a0) {
- if (rfrevid == 0x1) {
- } else {
+ if (rfrevid != 0x1)
tempchipid = 0x1002a1;
- }
} else if (tempchipid == 0x1002b0) {
- if (rfrevid == 3) {
- } else if (rfrevid == 4) {
+ if (rfrevid == 0x4)
tempchipid = 0x1002b1;
- } else {
+ else if (rfrevid != 0x3)
tempchipid = 0x1002b2;
- }
}
chipid = tempchipid;
}
-_fail_:
return chipid;
}
@@ -1586,34 +1408,31 @@ int wilc_wlan_init(struct net_device *dev)
wilc = vif->wilc;
- PRINT_D(INIT_DBG, "Initializing WILC_Wlan ...\n");
+ wilc->quit = 0;
- if (!wilc->hif_func->hif_init(wilc)) {
+ if (!wilc->hif_func->hif_init(wilc, false)) {
ret = -EIO;
goto _fail_;
}
- if (!wilc_wlan_cfg_init(wilc_debug)) {
+ if (!wilc_wlan_cfg_init()) {
ret = -ENOBUFS;
goto _fail_;
}
if (!wilc->tx_buffer)
wilc->tx_buffer = kmalloc(LINUX_TX_SIZE, GFP_KERNEL);
- PRINT_D(TX_DBG, "wilc->tx_buffer = %p\n", wilc->tx_buffer);
if (!wilc->tx_buffer) {
ret = -ENOBUFS;
- PRINT_ER("Can't allocate Tx Buffer");
goto _fail_;
}
if (!wilc->rx_buffer)
wilc->rx_buffer = kmalloc(LINUX_RX_SIZE, GFP_KERNEL);
- PRINT_D(TX_DBG, "wilc->rx_buffer =%p\n", wilc->rx_buffer);
+
if (!wilc->rx_buffer) {
ret = -ENOBUFS;
- PRINT_ER("Can't allocate Rx Buffer");
goto _fail_;
}
@@ -1621,9 +1440,7 @@ int wilc_wlan_init(struct net_device *dev)
ret = -EIO;
goto _fail_;
}
-#ifdef TCP_ACK_FILTER
init_tcp_tracking();
-#endif
return 1;
@@ -1636,35 +1453,3 @@ _fail_:
return ret;
}
-
-u16 wilc_set_machw_change_vir_if(struct net_device *dev, bool value)
-{
- u16 ret;
- u32 reg;
- struct wilc_vif *vif;
- struct wilc *wilc;
-
- vif = netdev_priv(dev);
- wilc = vif->wilc;
-
- mutex_lock(&wilc->hif_cs);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHANGING_VIR_IF,
- &reg);
- if (!ret)
- PRINT_ER("Error while Reading reg WILC_CHANGING_VIR_IF\n");
-
- if (value)
- reg |= BIT(31);
- else
- reg &= ~BIT(31);
-
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_CHANGING_VIR_IF,
- reg);
-
- if (!ret)
- PRINT_ER("Error while writing reg WILC_CHANGING_VIR_IF\n");
-
- mutex_unlock(&wilc->hif_cs);
-
- return ret;
-}
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 2edd7445f4a3..bcd4bfa5accc 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -106,6 +106,7 @@
#define WILC_HAVE_LEGACY_RF_SETTINGS BIT(5)
#define WILC_HAVE_XTAL_24 BIT(6)
#define WILC_HAVE_DISABLE_WILC_UART BIT(7)
+#define WILC_HAVE_USE_IRQ_AS_HOST_WAKE BIT(8)
/********************************************
*
@@ -127,6 +128,11 @@
#define WILC_PLL_TO_SPI 2
#define ABORT_INT BIT(31)
+#define LINUX_RX_SIZE (96 * 1024)
+#define LINUX_TX_SIZE (64 * 1024)
+
+#define MODALIAS "WILC_SPI"
+#define GPIO_NUM 0x44
/*******************************************/
/* E0 and later Interrupt flags. */
/*******************************************/
@@ -226,7 +232,7 @@ struct rxq_entry_t {
********************************************/
struct wilc;
struct wilc_hif_func {
- int (*hif_init)(struct wilc *);
+ int (*hif_init)(struct wilc *, bool resume);
int (*hif_deinit)(struct wilc *);
int (*hif_read_reg)(struct wilc *, u32, u32 *);
int (*hif_write_reg)(struct wilc *, u32, u32);
@@ -267,8 +273,10 @@ struct wilc_cfg_rsp {
};
struct wilc;
+struct wilc_vif;
-int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size);
+int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
+ u32 buffer_size);
int wilc_wlan_start(struct wilc *);
int wilc_wlan_stop(struct wilc *);
int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
@@ -276,9 +284,9 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
void wilc_handle_isr(struct wilc *wilc);
void wilc_wlan_cleanup(struct net_device *dev);
-int wilc_wlan_cfg_set(struct wilc *wilc, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler);
-int wilc_wlan_cfg_get(struct wilc *wilc, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
u32 drv_handler);
int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
@@ -292,9 +300,12 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
int wilc_mac_open(struct net_device *ndev);
int wilc_mac_close(struct net_device *ndev);
-int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *pBSSID);
void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
-
+void host_wakeup_notify(struct wilc *wilc);
+void host_sleep_notify(struct wilc *wilc);
extern bool wilc_enable_ps;
-
+void chip_allow_sleep(struct wilc *wilc);
+void chip_wakeup(struct wilc *wilc);
+int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
+ u32 count, u32 drv);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b72c77bb35f1..b3425b9cec94 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -18,10 +18,15 @@
* Global Data
*
********************************************/
+enum cfg_cmd_type {
+ CFG_BYTE_CMD = 0,
+ CFG_HWORD_CMD = 1,
+ CFG_WORD_CMD = 2,
+ CFG_STR_CMD = 3,
+ CFG_BIN_CMD = 4
+};
-typedef struct {
- wilc_debug_func dPrint;
-
+struct wilc_mac_cfg {
int mac_status;
u8 mac_address[7];
u8 ip_address[5];
@@ -31,7 +36,7 @@ typedef struct {
u8 supp_rate[24];
u8 wep_key[28];
u8 i_psk[66];
- u8 hardwareProductVersion[33];
+ u8 hw_product_version[33];
u8 phyversion[17];
u8 supp_username[21];
u8 supp_password[64];
@@ -40,11 +45,11 @@ typedef struct {
u8 firmware_info[8];
u8 scan_result[256];
u8 scan_result1[256];
-} wilc_mac_cfg_t;
+};
-static wilc_mac_cfg_t g_mac;
+static struct wilc_mac_cfg g_mac;
-static wilc_cfg_byte_t g_cfg_byte[] = {
+static struct wilc_cfg_byte g_cfg_byte[] = {
{WID_BSS_TYPE, 0},
{WID_CURRENT_TX_RATE, 0},
{WID_CURRENT_CHANNEL, 0},
@@ -87,7 +92,7 @@ static wilc_cfg_byte_t g_cfg_byte[] = {
{WID_NIL, 0}
};
-static wilc_cfg_hword_t g_cfg_hword[] = {
+static struct wilc_cfg_hword g_cfg_hword[] = {
{WID_LINK_LOSS_THRESHOLD, 0},
{WID_RTS_THRESHOLD, 0},
{WID_FRAG_THRESHOLD, 0},
@@ -108,7 +113,7 @@ static wilc_cfg_hword_t g_cfg_hword[] = {
{WID_NIL, 0}
};
-static wilc_cfg_word_t g_cfg_word[] = {
+static struct wilc_cfg_word g_cfg_word[] = {
{WID_FAILED_COUNT, 0},
{WID_RETRY_COUNT, 0},
{WID_MULTIPLE_RETRY_COUNT, 0},
@@ -131,25 +136,22 @@ static wilc_cfg_word_t g_cfg_word[] = {
};
-static wilc_cfg_str_t g_cfg_str[] = {
+static struct wilc_cfg_str g_cfg_str[] = {
{WID_SSID, g_mac.ssid}, /* 33 + 1 bytes */
{WID_FIRMWARE_VERSION, g_mac.firmware_version},
{WID_OPERATIONAL_RATE_SET, g_mac.supp_rate},
{WID_BSSID, g_mac.bssid}, /* 6 bytes */
{WID_WEP_KEY_VALUE, g_mac.wep_key}, /* 27 bytes */
{WID_11I_PSK, g_mac.i_psk}, /* 65 bytes */
- /* {WID_11E_P_ACTION_REQ, g_mac.action_req}, */
- {WID_HARDWARE_VERSION, g_mac.hardwareProductVersion},
+ {WID_HARDWARE_VERSION, g_mac.hw_product_version},
{WID_MAC_ADDR, g_mac.mac_address},
{WID_PHY_VERSION, g_mac.phyversion},
{WID_SUPP_USERNAME, g_mac.supp_username},
{WID_SUPP_PASSWORD, g_mac.supp_password},
{WID_SITE_SURVEY_RESULTS, g_mac.scan_result},
{WID_SITE_SURVEY_RESULTS, g_mac.scan_result1},
- /* {WID_RX_POWER_LEVEL, g_mac.channel_rssi}, */
{WID_ASSOC_REQ_INFO, g_mac.assoc_req},
{WID_ASSOC_RES_INFO, g_mac.assoc_rsp},
- /* {WID_11N_P_ACTION_REQ, g_mac.action_req}, */
{WID_FIRMWARE_INFO, g_mac.firmware_version},
{WID_IP_ADDRESS, g_mac.ip_address},
{WID_NIL, NULL}
@@ -270,13 +272,12 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
static void wilc_wlan_parse_response_frame(u8 *info, int size)
{
u32 wid, len = 0, i = 0;
- static int seq;
while (size > 0) {
i = 0;
wid = info[0] | (info[1] << 8);
wid = cpu_to_le32(wid);
- PRINT_INFO(GENERIC_DBG, "Processing response for %d seq %d\n", wid, seq++);
+
switch ((wid >> 12) & 0x7) {
case WID_CHAR:
do {
@@ -329,10 +330,6 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
if (wid == WID_SITE_SURVEY_RESULTS) {
static int toggle;
- PRINT_INFO(GENERIC_DBG, "Site survey results received[%d]\n",
- size);
-
- PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]toggle[%d]\n", size, toggle);
i += toggle;
toggle ^= 1;
}
@@ -354,14 +351,14 @@ static void wilc_wlan_parse_response_frame(u8 *info, int size)
static int wilc_wlan_parse_info_frame(u8 *info, int size)
{
- wilc_mac_cfg_t *pd = &g_mac;
+ struct wilc_mac_cfg *pd = &g_mac;
u32 wid, len;
int type = WILC_CFG_RSP_STATUS;
wid = info[0] | (info[1] << 8);
len = info[2];
- PRINT_INFO(GENERIC_DBG, "Status Len = %d Id= %d\n", len, wid);
+
if ((len == 1) && (wid == WID_STATUS)) {
pd->mac_status = info[3];
type = WILC_CFG_RSP_STATUS;
@@ -381,21 +378,31 @@ int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size)
u8 type = (id >> 12) & 0xf;
int ret = 0;
- if (type == 0) { /* byte command */
+ switch (type) {
+ case CFG_BYTE_CMD:
if (size >= 1)
ret = wilc_wlan_cfg_set_byte(frame, offset, id, *buf);
- } else if (type == 1) { /* half word command */
+ break;
+
+ case CFG_HWORD_CMD:
if (size >= 2)
- ret = wilc_wlan_cfg_set_hword(frame, offset, id, *((u16 *)buf));
- } else if (type == 2) { /* word command */
+ ret = wilc_wlan_cfg_set_hword(frame, offset, id,
+ *((u16 *)buf));
+ break;
+
+ case CFG_WORD_CMD:
if (size >= 4)
- ret = wilc_wlan_cfg_set_word(frame, offset, id, *((u32 *)buf));
- } else if (type == 3) { /* string command */
+ ret = wilc_wlan_cfg_set_word(frame, offset, id,
+ *((u32 *)buf));
+ break;
+
+ case CFG_STR_CMD:
ret = wilc_wlan_cfg_set_str(frame, offset, id, buf, size);
- } else if (type == 4) { /* binary command */
+ break;
+
+ case CFG_BIN_CMD:
ret = wilc_wlan_cfg_set_bin(frame, offset, id, buf, size);
- } else {
- g_mac.dPrint(N_ERR, "illegal id\n");
+ break;
}
return ret;
@@ -427,7 +434,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
}
i = 0;
- if (type == 0) { /* byte command */
+ if (type == CFG_BYTE_CMD) {
do {
if (g_cfg_byte[i].id == WID_NIL)
break;
@@ -439,7 +446,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
}
i++;
} while (1);
- } else if (type == 1) { /* half word command */
+ } else if (type == CFG_HWORD_CMD) {
do {
if (g_cfg_hword[i].id == WID_NIL)
break;
@@ -451,7 +458,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
}
i++;
} while (1);
- } else if (type == 2) { /* word command */
+ } else if (type == CFG_WORD_CMD) {
do {
if (g_cfg_word[i].id == WID_NIL)
break;
@@ -463,7 +470,7 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
}
i++;
} while (1);
- } else if (type == 3) { /* string command */
+ } else if (type == CFG_STR_CMD) {
do {
if (g_cfg_str[i].id == WID_NIL)
break;
@@ -475,8 +482,6 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
if (g_cfg_str[i].id == WID_SITE_SURVEY_RESULTS) {
static int toggle;
- PRINT_INFO(GENERIC_DBG, "Site survey results value[%d]\n",
- size);
i += toggle;
toggle ^= 1;
@@ -488,8 +493,6 @@ int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size)
}
i++;
} while (1);
- } else {
- g_mac.dPrint(N_ERR, "[CFG]: illegal type (%08x)\n", wid);
}
return ret;
@@ -522,7 +525,6 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
rsp->type = wilc_wlan_parse_info_frame(frame, size);
rsp->seq_no = msg_id;
/*call host interface info parse as well*/
- PRINT_INFO(RX_DBG, "Info message received\n");
wilc_gnrl_async_info_received(wilc, frame - 4, size + 4);
break;
@@ -532,14 +534,10 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
break;
case 'S':
- PRINT_INFO(RX_DBG, "Scan Notification Received\n");
wilc_scan_complete_received(wilc, frame - 4, size + 4);
break;
default:
- PRINT_INFO(RX_DBG, "Receive unknown message type[%d-%d-%d-%d-%d-%d-%d-%d]\n",
- frame[0], frame[1], frame[2], frame[3], frame[4],
- frame[5], frame[6], frame[7]);
rsp->type = 0;
rsp->seq_no = msg_id;
ret = 0;
@@ -549,9 +547,8 @@ int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
return ret;
}
-int wilc_wlan_cfg_init(wilc_debug_func func)
+int wilc_wlan_cfg_init(void)
{
- memset((void *)&g_mac, 0, sizeof(wilc_mac_cfg_t));
- g_mac.dPrint = func;
+ memset((void *)&g_mac, 0, sizeof(struct wilc_mac_cfg));
return 1;
}
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.h b/drivers/staging/wilc1000/wilc_wlan_cfg.h
index 5f74eb83562f..b8641a273547 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.h
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.h
@@ -10,25 +10,25 @@
#ifndef WILC_WLAN_CFG_H
#define WILC_WLAN_CFG_H
-typedef struct {
+struct wilc_cfg_byte {
u16 id;
u16 val;
-} wilc_cfg_byte_t;
+};
-typedef struct {
+struct wilc_cfg_hword {
u16 id;
u16 val;
-} wilc_cfg_hword_t;
+};
-typedef struct {
+struct wilc_cfg_word {
u32 id;
u32 val;
-} wilc_cfg_word_t;
+};
-typedef struct {
+struct wilc_cfg_str {
u32 id;
u8 *str;
-} wilc_cfg_str_t;
+};
struct wilc;
int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size);
@@ -36,6 +36,6 @@ int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id);
int wilc_wlan_cfg_get_wid_value(u16 wid, u8 *buffer, u32 buffer_size);
int wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size,
struct wilc_cfg_rsp *rsp);
-int wilc_wlan_cfg_init(wilc_debug_func func);
+int wilc_wlan_cfg_init(void);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 618903caff54..83cf84dd63b5 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -11,7 +11,6 @@
#define WILC_WLAN_IF_H
#include <linux/semaphore.h>
-#include "linux_wlan_common.h"
#include <linux/netdevice.h>
/********************************************
@@ -51,26 +50,24 @@
*
********************************************/
-typedef struct {
+struct sdio_cmd52 {
u32 read_write: 1;
u32 function: 3;
u32 raw: 1;
u32 address: 17;
u32 data: 8;
-} sdio_cmd52_t;
+};
-typedef struct {
- /* struct { */
+struct sdio_cmd53 {
u32 read_write: 1;
u32 function: 3;
u32 block_mode: 1;
u32 increment: 1;
u32 address: 17;
u32 count: 9;
- /* } bit; */
u8 *buffer;
u32 block_size;
-} sdio_cmd53_t;
+};
#define WILC_MAC_INDICATE_STATUS 0x1
#define WILC_MAC_STATUS_INIT -1
@@ -82,7 +79,7 @@ typedef struct {
struct tx_complete_data {
int size;
void *buff;
- u8 *pBssid;
+ u8 *bssid;
struct sk_buff *skb;
};
@@ -95,12 +92,10 @@ typedef void (*wilc_tx_complete_func_t)(void *, int);
* Wlan Configuration ID
*
********************************************/
-
+#define WILC_MULTICAST_TABLE_SIZE 8
#define MAX_SSID_LEN 33
#define MAX_RATES_SUPPORTED 12
-#define INFINITE_SLEEP_TIME ((u32)0xFFFFFFFF)
-
typedef enum {
SUPP_RATES_IE = 1,
EXT_SUPP_RATES_IE = 50,
@@ -300,6 +295,13 @@ enum wid_type {
WID_TYPE_FORCE_32BIT = 0xFFFFFFFF
};
+struct wid {
+ u16 id;
+ enum wid_type type;
+ s32 size;
+ s8 *val;
+};
+
typedef enum {
WID_NIL = 0xffff,
@@ -761,6 +763,7 @@ typedef enum {
WID_DEL_BEACON = 0x00CA,
WID_LOGTerminal_Switch = 0x00CD,
+ WID_TX_POWER = 0x00CE,
/* EMAC Short WID list */
/* RTS Threshold */
/*
@@ -832,7 +835,6 @@ typedef enum {
/* Custom Integer WID list */
WID_GET_INACTIVE_TIME = 0x2084,
- WID_SET_DRV_HANDLER = 0X2085,
WID_SET_OPERATION_MODE = 0X2086,
/* EMAC String WID list */
WID_SSID = 0x3000,
@@ -865,6 +867,7 @@ typedef enum {
WID_MODEL_NAME = 0x3027, /*Added for CAPI tool */
WID_MODEL_NUM = 0x3028, /*Added for CAPI tool */
WID_DEVICE_NAME = 0x3029, /*Added for CAPI tool */
+ WID_SET_DRV_HANDLER = 0x3030,
/* NMAC String WID list */
WID_11N_P_ACTION_REQ = 0x3080,
@@ -911,8 +914,6 @@ typedef enum {
struct wilc;
int wilc_wlan_init(struct net_device *dev);
-void wilc_bus_set_max_speed(void);
-void wilc_bus_set_default_speed(void);
-u32 wilc_get_chipid(struct wilc *wilc, u8 update);
+u32 wilc_get_chipid(struct wilc *wilc, bool update);
#endif
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8c1e3f06a215..8bad018eda47 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -73,13 +73,13 @@ static int prism2_domibset_uint32(wlandevice_t *wlandev, u32 did, u32 data)
{
struct p80211msg_dot11req_mibset msg;
p80211item_uint32_t *mibitem =
- (p80211item_uint32_t *) &msg.mibattribute.data;
+ (p80211item_uint32_t *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
mibitem->data = data;
- return p80211req_dorequest(wlandev, (u8 *) &msg);
+ return p80211req_dorequest(wlandev, (u8 *)&msg);
}
static int prism2_domibset_pstr32(wlandevice_t *wlandev,
@@ -87,14 +87,14 @@ static int prism2_domibset_pstr32(wlandevice_t *wlandev,
{
struct p80211msg_dot11req_mibset msg;
p80211item_pstr32_t *mibitem =
- (p80211item_pstr32_t *) &msg.mibattribute.data;
+ (p80211item_pstr32_t *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibset;
mibitem->did = did;
mibitem->data.len = len;
memcpy(mibitem->data.data, data, len);
- return p80211req_dorequest(wlandev, (u8 *) &msg);
+ return p80211req_dorequest(wlandev, (u8 *)&msg);
}
/* The interface functions, called by the cfg80211 layer */
@@ -239,7 +239,9 @@ static int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
int result = 0;
/* There is no direct way in the hardware (AFAIK) of removing
- a key, so we will cheat by setting the key to a bogus value */
+ * a key, so we will cheat by setting the key to a bogus value
+ */
+
/* send key to driver */
switch (key_index) {
case 0:
@@ -315,7 +317,7 @@ static int prism2_get_station(struct wiphy *wiphy, struct net_device *dev,
if (wlandev->mlmerequest == NULL)
return -EOPNOTSUPP;
- result = wlandev->mlmerequest(wlandev, (struct p80211msg *) &quality);
+ result = wlandev->mlmerequest(wlandev, (struct p80211msg *)&quality);
if (result == 0) {
sinfo->txrate.legacy = quality.txrate.data;
@@ -387,7 +389,7 @@ static int prism2_scan(struct wiphy *wiphy,
msg1.maxchanneltime.data = 250;
msg1.minchanneltime.data = 200;
- result = p80211req_dorequest(wlandev, (u8 *) &msg1);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg1);
if (result) {
err = prism2_result2err(msg1.resultcode.data);
goto exit;
@@ -402,7 +404,7 @@ static int prism2_scan(struct wiphy *wiphy,
msg2.msgcode = DIDmsg_dot11req_scan_results;
msg2.bssindex.data = i;
- result = p80211req_dorequest(wlandev, (u8 *) &msg2);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg2);
if ((result != 0) ||
(msg2.resultcode.data != P80211ENUM_resultcode_success)) {
break;
@@ -417,7 +419,7 @@ static int prism2_scan(struct wiphy *wiphy,
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
- (const u8 *) &(msg2.bssid.data.data),
+ (const u8 *)&(msg2.bssid.data.data),
msg2.timestamp.data, msg2.capinfo.data,
msg2.beaconperiod.data,
ie_buf,
@@ -558,12 +560,12 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
(u8 *)sme->key);
if (result)
goto exit;
-
}
/* Assume we should set privacy invoked and exclude unencrypted
- We could possibly use sme->privacy here, but the assumption
- seems reasonable anyway */
+ * We could possible use sme->privacy here, but the assumption
+ * seems reasonable anyways
+ */
result = prism2_domibset_uint32(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_true);
@@ -578,7 +580,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
} else {
/* Assume we should unset privacy invoked
- and exclude unencrypted */
+ * and exclude unencrypted
+ */
result = prism2_domibset_uint32(wlandev,
DIDmib_dot11smt_dot11PrivacyTable_dot11PrivacyInvoked,
P80211ENUM_truth_false);
@@ -590,17 +593,17 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
P80211ENUM_truth_false);
if (result)
goto exit;
-
}
/* Now do the actual join. Note there is no way that I can
- see to request a specific bssid */
+ * see to request a specific bssid
+ */
msg_join.msgcode = DIDmsg_lnxreq_autojoin;
memcpy(msg_join.ssid.data.data, sme->ssid, length);
msg_join.ssid.data.len = length;
- result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg_join);
exit:
if (result)
@@ -623,7 +626,7 @@ static int prism2_disconnect(struct wiphy *wiphy, struct net_device *dev,
memcpy(msg_join.ssid.data.data, "---", 3);
msg_join.ssid.data.len = 3;
- result = p80211req_dorequest(wlandev, (u8 *) &msg_join);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg_join);
if (result)
err = -EFAULT;
@@ -679,12 +682,12 @@ static int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
int result;
int err = 0;
- mibitem = (p80211item_uint32_t *) &msg.mibattribute.data;
+ mibitem = (p80211item_uint32_t *)&msg.mibattribute.data;
msg.msgcode = DIDmsg_dot11req_mibget;
mibitem->did =
DIDmib_dot11phy_dot11PhyTxPowerTable_dot11CurrentTxPowerLevel;
- result = p80211req_dorequest(wlandev, (u8 *) &msg);
+ result = p80211req_dorequest(wlandev, (u8 *)&msg);
if (result) {
err = -EFAULT;
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 8dfe4381ddf7..cec6d0ba3b65 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1360,7 +1360,6 @@ void hfa384x_destroy(hfa384x_t *hw);
int
hfa384x_corereset(hfa384x_t *hw, int holdtime, int settletime, int genesis);
-int hfa384x_drvr_commtallies(hfa384x_t *hw);
int hfa384x_drvr_disable(hfa384x_t *hw, u16 macport);
int hfa384x_drvr_enable(hfa384x_t *hw, u16 macport);
int hfa384x_drvr_flashdl_enable(hfa384x_t *hw);
@@ -1391,10 +1390,6 @@ static inline int hfa384x_drvr_setconfig16(hfa384x_t *hw, u16 rid, u16 val)
}
int
-hfa384x_drvr_getconfig_async(hfa384x_t *hw,
- u16 rid, ctlx_usercb_t usercb, void *usercb_data);
-
-int
hfa384x_drvr_setconfig_async(hfa384x_t *hw,
u16 rid,
void *buf,
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 7551ac25d89d..21a92df85931 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -126,8 +126,6 @@
#include <linux/usb.h>
#include <linux/byteorder/generic.h>
-#define SUBMIT_URB(u, f) usb_submit_urb(u, f)
-
#include "p80211types.h"
#include "p80211hdr.h"
#include "p80211mgmt.h"
@@ -145,11 +143,11 @@ enum cmd_mode {
DOASYNC
};
-#define THROTTLE_JIFFIES (HZ/8)
+#define THROTTLE_JIFFIES (HZ / 8)
#define URB_ASYNC_UNLINK 0
#define USB_QUEUE_BULK 0
-#define ROUNDUP64(a) (((a)+63)&~63)
+#define ROUNDUP64(a) (((a) + 63) & ~63)
#ifdef DEBUG_USB
static void dbprint_urb(struct urb *urb);
@@ -213,8 +211,6 @@ unlocked_usbctlx_cancel_async(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx);
-static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx);
-
static int
usbctlx_get_status(const hfa384x_usb_cmdresp_t *cmdresp,
hfa384x_cmdresult_t *result);
@@ -332,7 +328,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
int result;
skb = dev_alloc_skb(sizeof(hfa384x_usbin_t));
- if (skb == NULL) {
+ if (!skb) {
result = -ENOMEM;
goto done;
}
@@ -348,7 +344,7 @@ static int submit_rx_urb(hfa384x_t *hw, gfp_t memflags)
result = -ENOLINK;
if (!hw->wlandev->hwremoved &&
!test_bit(WORK_RX_HALT, &hw->usb_flags)) {
- result = SUBMIT_URB(&hw->rx_urb, memflags);
+ result = usb_submit_urb(&hw->rx_urb, memflags);
/* Check whether we need to reset the RX pipe */
if (result == -EPIPE) {
@@ -397,7 +393,7 @@ static int submit_tx_urb(hfa384x_t *hw, struct urb *tx_urb, gfp_t memflags)
if (netif_running(netdev)) {
if (!hw->wlandev->hwremoved &&
!test_bit(WORK_TX_HALT, &hw->usb_flags)) {
- result = SUBMIT_URB(tx_urb, memflags);
+ result = usb_submit_urb(tx_urb, memflags);
/* Test whether we need to reset the TX pipe */
if (result == -EPIPE) {
@@ -816,43 +812,6 @@ static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
}
}
-/*----------------------------------------------------------------
-* hfa384x_cb_rrid
-*
-* CTLX completion handler for async RRID type control exchanges.
-*
-* Note: If the handling is changed here, it should probably be
-* changed in dorrid as well.
-*
-* Arguments:
-* hw hw struct
-* ctlx completed CTLX
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
-static void hfa384x_cb_rrid(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
-{
- if (ctlx->usercb != NULL) {
- hfa384x_rridresult_t rridresult;
-
- if (ctlx->state != CTLX_COMPLETE) {
- memset(&rridresult, 0, sizeof(rridresult));
- rridresult.rid = le16_to_cpu(ctlx->outbuf.rridreq.rid);
- } else {
- usbctlx_get_rridresult(&ctlx->inbuf.rridresp,
- &rridresult);
- }
-
- ctlx->usercb(hw, &rridresult, ctlx->usercb_data);
- }
-}
-
static inline int hfa384x_docmd_wait(hfa384x_t *hw, hfa384x_metacmd_t *cmd)
{
return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
@@ -1012,7 +971,6 @@ int hfa384x_cmd_initialize(hfa384x_t *hw)
----------------------------------------------------------------*/
int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
{
- int result = 0;
hfa384x_metacmd_t cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_DISABLE) |
@@ -1021,9 +979,7 @@ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
cmd.parm1 = 0;
cmd.parm2 = 0;
- result = hfa384x_docmd_wait(hw, &cmd);
-
- return result;
+ return hfa384x_docmd_wait(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1048,7 +1004,6 @@ int hfa384x_cmd_disable(hfa384x_t *hw, u16 macport)
----------------------------------------------------------------*/
int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
{
- int result = 0;
hfa384x_metacmd_t cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_ENABLE) |
@@ -1057,9 +1012,7 @@ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
cmd.parm1 = 0;
cmd.parm2 = 0;
- result = hfa384x_docmd_wait(hw, &cmd);
-
- return result;
+ return hfa384x_docmd_wait(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1093,7 +1046,6 @@ int hfa384x_cmd_enable(hfa384x_t *hw, u16 macport)
----------------------------------------------------------------*/
int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
{
- int result = 0;
hfa384x_metacmd_t cmd;
cmd.cmd = HFA384x_CMD_CMDCODE_SET(HFA384x_CMDCODE_MONITOR) |
@@ -1102,9 +1054,7 @@ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
cmd.parm1 = 0;
cmd.parm2 = 0;
- result = hfa384x_docmd_wait(hw, &cmd);
-
- return result;
+ return hfa384x_docmd_wait(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1148,7 +1098,6 @@ int hfa384x_cmd_monitor(hfa384x_t *hw, u16 enable)
int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr,
u16 highaddr, u16 codelen)
{
- int result = 0;
hfa384x_metacmd_t cmd;
pr_debug("mode=%d, lowaddr=0x%04x, highaddr=0x%04x, codelen=%d\n",
@@ -1161,9 +1110,7 @@ int hfa384x_cmd_download(hfa384x_t *hw, u16 mode, u16 lowaddr,
cmd.parm1 = highaddr;
cmd.parm2 = codelen;
- result = hfa384x_docmd_wait(hw, &cmd);
-
- return result;
+ return hfa384x_docmd_wait(hw, &cmd);
}
/*----------------------------------------------------------------
@@ -1351,7 +1298,7 @@ hfa384x_docmd(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx;
ctlx = usbctlx_alloc();
- if (ctlx == NULL) {
+ if (!ctlx) {
result = -ENOMEM;
goto done;
}
@@ -1441,7 +1388,7 @@ hfa384x_dorrid(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx;
ctlx = usbctlx_alloc();
- if (ctlx == NULL) {
+ if (!ctlx) {
result = -ENOMEM;
goto done;
}
@@ -1522,7 +1469,7 @@ hfa384x_dowrid(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx;
ctlx = usbctlx_alloc();
- if (ctlx == NULL) {
+ if (!ctlx) {
result = -ENOMEM;
goto done;
}
@@ -1610,7 +1557,7 @@ hfa384x_dormem(hfa384x_t *hw,
hfa384x_usbctlx_t *ctlx;
ctlx = usbctlx_alloc();
- if (ctlx == NULL) {
+ if (!ctlx) {
result = -ENOMEM;
goto done;
}
@@ -1703,7 +1650,7 @@ hfa384x_dowmem(hfa384x_t *hw,
pr_debug("page=0x%04x offset=0x%04x len=%d\n", page, offset, len);
ctlx = usbctlx_alloc();
- if (ctlx == NULL) {
+ if (!ctlx) {
result = -ENOMEM;
goto done;
}
@@ -1747,37 +1694,6 @@ done:
}
/*----------------------------------------------------------------
-* hfa384x_drvr_commtallies
-*
-* Send a commtallies inquiry to the MAC. Note that this is an async
-* call that will result in an info frame arriving sometime later.
-*
-* Arguments:
-* hw device structure
-*
-* Returns:
-* zero success.
-*
-* Side effects:
-*
-* Call context:
-* process
-----------------------------------------------------------------*/
-int hfa384x_drvr_commtallies(hfa384x_t *hw)
-{
- hfa384x_metacmd_t cmd;
-
- cmd.cmd = HFA384x_CMDCODE_INQ;
- cmd.parm0 = HFA384x_IT_COMMTALLIES;
- cmd.parm1 = 0;
- cmd.parm2 = 0;
-
- hfa384x_docmd_async(hw, &cmd, NULL, NULL, NULL);
-
- return 0;
-}
-
-/*----------------------------------------------------------------
* hfa384x_drvr_disable
*
* Issues the disable command to stop communications on one of
@@ -2122,41 +2038,6 @@ int hfa384x_drvr_getconfig(hfa384x_t *hw, u16 rid, void *buf, u16 len)
}
/*----------------------------------------------------------------
- * hfa384x_drvr_getconfig_async
- *
- * Performs the sequence necessary to perform an async read of
- * of a config/info item.
- *
- * Arguments:
- * hw device structure
- * rid config/info record id (host order)
- * buf host side record buffer. Upon return it will
- * contain the body portion of the record (minus the
- * RID and len).
- * len buffer length (in bytes, should match record length)
- * cbfn caller supplied callback, called when the command
- * is done (successful or not).
- * cbfndata pointer to some caller supplied data that will be
- * passed in as an argument to the cbfn.
- *
- * Returns:
- * nothing the cbfn gets a status argument identifying if
- * any errors occur.
- * Side effects:
- * Queues an hfa384x_usbcmd_t for subsequent execution.
- *
- * Call context:
- * Any
- ----------------------------------------------------------------*/
-int
-hfa384x_drvr_getconfig_async(hfa384x_t *hw,
- u16 rid, ctlx_usercb_t usercb, void *usercb_data)
-{
- return hfa384x_dorrid_async(hw, rid, NULL, 0,
- hfa384x_cb_rrid, usercb, usercb_data);
-}
-
-/*----------------------------------------------------------------
* hfa384x_drvr_setconfig_async
*
* Performs the sequence necessary to write a config/info item.
@@ -2810,8 +2691,7 @@ void hfa384x_tx_timeout(wlandevice_t *wlandev)
static void hfa384x_usbctlx_reaper_task(unsigned long data)
{
hfa384x_t *hw = (hfa384x_t *)data;
- struct list_head *entry;
- struct list_head *temp;
+ hfa384x_usbctlx_t *ctlx, *temp;
unsigned long flags;
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -2819,10 +2699,7 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
/* This list is guaranteed to be empty if someone
* has unplugged the adapter.
*/
- list_for_each_safe(entry, temp, &hw->ctlxq.reapable) {
- hfa384x_usbctlx_t *ctlx;
-
- ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+ list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.reapable, list) {
list_del(&ctlx->list);
kfree(ctlx);
}
@@ -2847,8 +2724,7 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
static void hfa384x_usbctlx_completion_task(unsigned long data)
{
hfa384x_t *hw = (hfa384x_t *)data;
- struct list_head *entry;
- struct list_head *temp;
+ hfa384x_usbctlx_t *ctlx, *temp;
unsigned long flags;
int reap = 0;
@@ -2858,11 +2734,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
/* This list is guaranteed to be empty if someone
* has unplugged the adapter ...
*/
- list_for_each_safe(entry, temp, &hw->ctlxq.completing) {
- hfa384x_usbctlx_t *ctlx;
-
- ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
-
+ list_for_each_entry_safe(ctlx, temp, &hw->ctlxq.completing, list) {
/* Call the completion function that this
* command was assigned, assuming it has one.
*/
@@ -3051,7 +2923,7 @@ static void hfa384x_usbctlxq_run(hfa384x_t *hw)
hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
/* Now submit the URB and update the CTLX's state */
- result = SUBMIT_URB(&hw->ctlx_urb, GFP_ATOMIC);
+ result = usb_submit_urb(&hw->ctlx_urb, GFP_ATOMIC);
if (result == 0) {
/* This CTLX is now running on the active queue */
head->state = CTLX_REQ_SUBMITTED;
@@ -3574,7 +3446,7 @@ static void hfa384x_int_rxmonitor(wlandevice_t *wlandev,
}
skb = dev_alloc_skb(skblen);
- if (skb == NULL)
+ if (!skb)
return;
/* only prepend the prism header if in the right mode */
@@ -3985,8 +3857,7 @@ static void hfa384x_usb_throttlefn(unsigned long data)
pr_debug("flags=0x%lx\n", hw->usb_flags);
if (!hw->wlandev->hwremoved &&
((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) &&
- !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags))
- |
+ !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) |
(test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) &&
!test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags))
)) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 1b02cdf9d1fa..0a8f3960d465 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -49,7 +49,8 @@
*
* --------------------------------------------------------------------
*
-*================================================================ */
+*================================================================
+*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -101,12 +102,12 @@ static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
struct sk_buff *skb, union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
-
__le16 fc;
u16 proto;
struct wlan_ethhdr e_hdr;
@@ -148,11 +149,11 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on SNAP */
e_snap =
- (struct wlan_snap *) skb_push(skb,
+ (struct wlan_snap *)skb_push(skb,
sizeof(struct wlan_snap));
e_snap->type = htons(proto);
- if (ethconv == WLAN_ETHCONV_8021h
- && p80211_stt_findproto(proto)) {
+ if (ethconv == WLAN_ETHCONV_8021h &&
+ p80211_stt_findproto(proto)) {
memcpy(e_snap->oui, oui_8021h,
WLAN_IEEE_OUI_LEN);
} else {
@@ -162,12 +163,11 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
/* tack on llc */
e_llc =
- (struct wlan_llc *) skb_push(skb,
+ (struct wlan_llc *)skb_push(skb,
sizeof(struct wlan_llc));
e_llc->dsap = 0xAA; /* SNAP, see IEEE 802 */
e_llc->ssap = 0xAA;
e_llc->ctl = 0x03;
-
}
}
@@ -202,8 +202,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
p80211_wep->data = NULL;
- if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED)
- && (wlandev->hostwep & HOSTWEP_ENCRYPT)) {
+ if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
+ (wlandev->hostwep & HOSTWEP_ENCRYPT)) {
/* XXXX need to pick keynum other than default? */
p80211_wep->data = kmalloc(skb->len, GFP_ATOMIC);
@@ -215,8 +215,8 @@ int skb_ether_to_p80211(wlandevice_t *wlandev, u32 ethconv,
p80211_wep->iv, p80211_wep->icv);
if (foo) {
netdev_warn(wlandev->netdev,
- "Host en-WEP failed, dropping frame (%d).\n",
- foo);
+ "Host en-WEP failed, dropping frame (%d).\n",
+ foo);
return 2;
}
fc |= cpu_to_le16(WLAN_SET_FC_ISWEP(1));
@@ -238,10 +238,10 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
int i;
/* Gather wireless spy statistics: for each packet, compare the
- * source address with out list, and if match, get the stats... */
+ * source address with out list, and if match, get the stats...
+ */
for (i = 0; i < wlandev->spy_number; i++) {
-
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
wlandev->spy_stat[i].level = rxmeta->signal;
@@ -273,7 +273,8 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
struct sk_buff *skb)
{
@@ -293,19 +294,19 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN;
payload_offset = WLAN_HDR_A3_LEN;
- w_hdr = (union p80211_hdr *) skb->data;
+ w_hdr = (union p80211_hdr *)skb->data;
/* setup some vars for convenience */
fc = le16_to_cpu(w_hdr->a3.fc);
if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) {
ether_addr_copy(daddr, w_hdr->a3.a1);
ether_addr_copy(saddr, w_hdr->a3.a2);
- } else if ((WLAN_GET_FC_TODS(fc) == 0)
- && (WLAN_GET_FC_FROMDS(fc) == 1)) {
+ } else if ((WLAN_GET_FC_TODS(fc) == 0) &&
+ (WLAN_GET_FC_FROMDS(fc) == 1)) {
ether_addr_copy(daddr, w_hdr->a3.a1);
ether_addr_copy(saddr, w_hdr->a3.a3);
- } else if ((WLAN_GET_FC_TODS(fc) == 1)
- && (WLAN_GET_FC_FROMDS(fc) == 0)) {
+ } else if ((WLAN_GET_FC_TODS(fc) == 1) &&
+ (WLAN_GET_FC_FROMDS(fc) == 0)) {
ether_addr_copy(daddr, w_hdr->a3.a3);
ether_addr_copy(saddr, w_hdr->a3.a2);
} else {
@@ -320,18 +321,19 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
}
/* perform de-wep if necessary.. */
- if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc)
- && (wlandev->hostwep & HOSTWEP_DECRYPT)) {
+ if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) &&
+ WLAN_GET_FC_ISWEP(fc) &&
+ (wlandev->hostwep & HOSTWEP_DECRYPT)) {
if (payload_length <= 8) {
netdev_err(netdev,
"WEP frame too short (%u).\n", skb->len);
return 1;
}
foo = wep_decrypt(wlandev, skb->data + payload_offset + 4,
- payload_length - 8, -1,
- skb->data + payload_offset,
- skb->data + payload_offset +
- payload_length - 4);
+ payload_length - 8, -1,
+ skb->data + payload_offset,
+ skb->data + payload_offset +
+ payload_length - 4);
if (foo) {
/* de-wep failed, drop skb. */
pr_debug("Host de-WEP failed, dropping frame (%d).\n",
@@ -350,11 +352,11 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
wlandev->rx.decrypt++;
}
- e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset);
+ e_hdr = (struct wlan_ethhdr *)(skb->data + payload_offset);
- e_llc = (struct wlan_llc *) (skb->data + payload_offset);
+ e_llc = (struct wlan_llc *)(skb->data + payload_offset);
e_snap =
- (struct wlan_snap *) (skb->data + payload_offset +
+ (struct wlan_snap *)(skb->data + payload_offset +
sizeof(struct wlan_llc));
/* Test for the various encodings */
@@ -369,7 +371,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* A bogus length ethfrm has been encap'd. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "ENCAP frame too large (%d > %d)\n",
- payload_length, netdev->mtu + ETH_HLEN);
+ payload_length, netdev->mtu + ETH_HLEN);
return 1;
}
@@ -379,15 +381,15 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
- sizeof(struct wlan_snap))
- && (e_llc->dsap == 0xaa)
- && (e_llc->ssap == 0xaa)
- && (e_llc->ctl == 0x03)
- &&
- (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0)
- && (ethconv == WLAN_ETHCONV_8021h)
- && (p80211_stt_findproto(le16_to_cpu(e_snap->type))))
- || (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
+ sizeof(struct wlan_snap)) &&
+ (e_llc->dsap == 0xaa) &&
+ (e_llc->ssap == 0xaa) &&
+ (e_llc->ctl == 0x03) &&
+ (((memcmp(e_snap->oui, oui_rfc1042,
+ WLAN_IEEE_OUI_LEN) == 0) &&
+ (ethconv == WLAN_ETHCONV_8021h) &&
+ (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) ||
+ (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) !=
0))) {
pr_debug("SNAP+RFC1042 len: %d\n", payload_length);
/* it's a SNAP + RFC1042 frame && protocol is in STT */
@@ -398,7 +400,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "SNAP frame too large (%d > %d)\n",
- payload_length, netdev->mtu);
+ payload_length, netdev->mtu);
return 1;
}
@@ -415,13 +417,14 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
skb_trim(skb, skb->len - WLAN_CRC_LEN);
} else if ((payload_length >= sizeof(struct wlan_llc) +
- sizeof(struct wlan_snap))
- && (e_llc->dsap == 0xaa)
- && (e_llc->ssap == 0xaa)
- && (e_llc->ctl == 0x03)) {
+ sizeof(struct wlan_snap)) &&
+ (e_llc->dsap == 0xaa) &&
+ (e_llc->ssap == 0xaa) &&
+ (e_llc->ctl == 0x03)) {
pr_debug("802.1h/RFC1042 len: %d\n", payload_length);
/* it's an 802.1h frame || (an RFC1042 && protocol not in STT)
- build a DIXII + RFC894 */
+ * build a DIXII + RFC894
+ */
/* Test for an overlength frame */
if ((payload_length - sizeof(struct wlan_llc) -
@@ -430,9 +433,9 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "DIXII frame too large (%ld > %d)\n",
- (long int)(payload_length -
- sizeof(struct wlan_llc) -
- sizeof(struct wlan_snap)), netdev->mtu);
+ (long int)(payload_length -
+ sizeof(struct wlan_llc) -
+ sizeof(struct wlan_snap)), netdev->mtu);
return 1;
}
@@ -465,7 +468,7 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* A bogus length ethfrm has been sent. */
/* Is someone trying an oflow attack? */
netdev_err(netdev, "OTHER frame too large (%d > %d)\n",
- payload_length, netdev->mtu);
+ payload_length, netdev->mtu);
return 1;
}
@@ -480,7 +483,6 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
/* chop off the 802.11 CRC */
skb_trim(skb, skb->len - WLAN_CRC_LEN);
-
}
/*
@@ -521,14 +523,15 @@ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv,
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
int p80211_stt_findproto(u16 proto)
{
/* Always return found for now. This is the behavior used by the */
- /* Zoom Win95 driver when 802.1h mode is selected */
+ /* Zoom Win95 driver when 802.1h mode is selected */
/* TODO: If necessary, add an actual search we'll probably
- need this to match the CMAC's way of doing things.
- Need to do some testing to confirm.
+ * need this to match the CMAC's way of doing things.
+ * Need to do some testing to confirm.
*/
if (proto == ETH_P_AARP) /* APPLETALK */
@@ -551,24 +554,25 @@ int p80211_stt_findproto(u16 proto)
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
void p80211skb_rxmeta_detach(struct sk_buff *skb)
{
struct p80211_rxmeta *rxmeta;
struct p80211_frmmeta *frmmeta;
/* Sanity checks */
- if (skb == NULL) { /* bad skb */
+ if (!skb) { /* bad skb */
pr_debug("Called w/ null skb.\n");
return;
}
frmmeta = P80211SKB_FRMMETA(skb);
- if (frmmeta == NULL) { /* no magic */
+ if (!frmmeta) { /* no magic */
pr_debug("Called w/ bad frmmeta magic.\n");
return;
}
rxmeta = frmmeta->rx;
- if (rxmeta == NULL) { /* bad meta ptr */
+ if (!rxmeta) { /* bad meta ptr */
pr_debug("Called w/ bad rxmeta ptr.\n");
return;
}
@@ -595,7 +599,8 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
{
int result = 0;
@@ -603,7 +608,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
struct p80211_frmmeta *frmmeta;
/* If these already have metadata, we error out! */
- if (P80211SKB_RXMETA(skb) != NULL) {
+ if (P80211SKB_RXMETA(skb)) {
netdev_err(wlandev->netdev,
"%s: RXmeta already attached!\n", wlandev->name);
result = 0;
@@ -613,7 +618,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
/* Allocate the rxmeta */
rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
- if (rxmeta == NULL) {
+ if (!rxmeta) {
netdev_err(wlandev->netdev,
"%s: Failed to allocate rxmeta.\n", wlandev->name);
result = 1;
@@ -626,7 +631,7 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
/* Overlay a frmmeta_t onto skb->cb */
memset(skb->cb, 0, sizeof(struct p80211_frmmeta));
- frmmeta = (struct p80211_frmmeta *) (skb->cb);
+ frmmeta = (struct p80211_frmmeta *)(skb->cb);
frmmeta->magic = P80211_FRMMETA_MAGIC;
frmmeta->rx = rxmeta;
exit:
@@ -648,7 +653,8 @@ exit:
*
* Call context:
* May be called in interrupt or non-interrupt context
-----------------------------------------------------------------*/
+*----------------------------------------------------------------
+*/
void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
{
struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a9c1e0bafa62..88255ce2871b 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -328,7 +328,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
p80211_wep.data = NULL;
- if (skb == NULL)
+ if (!skb)
return NETDEV_TX_OK;
if (wlandev->state != WLAN_DEVICE_OPEN) {
@@ -388,7 +388,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
}
- if (wlandev->txframe == NULL) {
+ if (!wlandev->txframe) {
result = 1;
goto failed;
}
@@ -736,7 +736,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
/* Allocate and initialize the wiphy struct */
wiphy = wlan_create_wiphy(physdev, wlandev);
- if (wiphy == NULL) {
+ if (!wiphy) {
dev_err(physdev, "Failed to alloc wiphy.\n");
return 1;
}
@@ -744,7 +744,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
/* Allocate and initialize the struct device */
netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
NET_NAME_UNKNOWN, ether_setup);
- if (netdev == NULL) {
+ if (!netdev) {
dev_err(physdev, "Failed to alloc netdev.\n");
wlan_free_wiphy(wiphy);
result = 1;
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index c363456d93a3..22c79703e328 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -140,8 +140,8 @@ int wep_change_key(wlandevice_t *wlandev, int keynum, u8 *key, int keylen)
}
/*
- 4-byte IV at start of buffer, 4-byte ICV at end of buffer.
- if successful, buf start is payload begin, length -= 8;
+ * 4-byte IV at start of buffer, 4-byte ICV at end of buffer.
+ * if successful, buf start is payload begin, length -= 8;
*/
int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
u8 *iv, u8 *icv)
@@ -188,7 +188,8 @@ int wep_decrypt(wlandevice_t *wlandev, u8 *buf, u32 len, int key_override,
/* Apply the RC4 to the data, update the CRC32 */
crc = ~0;
- i = j = 0;
+ i = 0;
+ j = 0;
for (k = 0; k < len; k++) {
i = (i + 1) & 0xff;
j = (j + s[i]) & 0xff;
@@ -260,7 +261,8 @@ int wep_encrypt(wlandevice_t *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
/* Update CRC32 then apply RC4 to the data */
crc = ~0;
- i = j = 0;
+ i = 0;
+ j = 0;
for (k = 0; k < len; k++) {
crc = wep_crc32_table[(crc ^ buf[k]) & 0xff] ^ (crc >> 8);
i = (i + 1) & 0xff;
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 8fc80df0b53e..8564d9eb918f 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -333,6 +333,10 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Make the image chunks */
result = mkimage(fchunk, &nfchunks);
+ if (result) {
+ netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
+ return 1;
+ }
/* Do any plugging */
result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
@@ -538,7 +542,7 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
/* Allocate buffer space for chunks */
for (i = 0; i < *ccnt; i++) {
clist[i].data = kzalloc(clist[i].len, GFP_KERNEL);
- if (clist[i].data == NULL) {
+ if (!clist[i].data) {
pr_err("failed to allocate image space, exitting.\n");
return 1;
}
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 013a6240f193..d8ed9a05789c 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -375,7 +375,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
int count;
- req = (struct p80211msg_dot11req_scan_results *) msgp;
+ req = msgp;
req->resultcode.status = P80211ENUM_msgitem_status_data_ok;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 7a9f424607b7..e6472034da33 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -87,7 +87,6 @@ int prism2mgmt_autojoin(wlandevice_t *wlandev, void *msgp);
* Prism2 data types
---------------------------------------------------------------*/
/* byte area conversion functions*/
-void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr);
void prism2mgmt_bytearea2pstr(u8 *bytearea, p80211pstrd_t *pstr, int len);
/* byte string conversion functions*/
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index cdda07d1c268..fe914b1f904b 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -379,7 +379,7 @@ static int prism2mib_bytearea2pstr(struct mibrec *mib,
void *data)
{
int result;
- p80211pstrd_t *pstr = (p80211pstrd_t *) data;
+ p80211pstrd_t *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
if (isget) {
@@ -388,7 +388,7 @@ static int prism2mib_bytearea2pstr(struct mibrec *mib,
prism2mgmt_bytearea2pstr(bytebuf, pstr, mib->parm2);
} else {
memset(bytebuf, 0, mib->parm2);
- prism2mgmt_pstr2bytearea(bytebuf, pstr);
+ memcpy(bytebuf, pstr->data, pstr->len);
result =
hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, mib->parm2);
}
@@ -428,7 +428,7 @@ static int prism2mib_uint32(struct mibrec *mib,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
- u32 *uint32 = (u32 *) data;
+ u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 *wordbuf = (u16 *) bytebuf;
@@ -475,7 +475,7 @@ static int prism2mib_flag(struct mibrec *mib,
struct p80211msg_dot11req_mibset *msg, void *data)
{
int result;
- u32 *uint32 = (u32 *) data;
+ u32 *uint32 = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 *wordbuf = (u16 *) bytebuf;
u32 flags;
@@ -533,7 +533,7 @@ static int prism2mib_wepdefaultkey(struct mibrec *mib,
void *data)
{
int result;
- p80211pstrd_t *pstr = (p80211pstrd_t *) data;
+ p80211pstrd_t *pstr = data;
u8 bytebuf[MIB_TMP_MAXLEN];
u16 len;
@@ -543,7 +543,7 @@ static int prism2mib_wepdefaultkey(struct mibrec *mib,
len = (pstr->len > 5) ? HFA384x_RID_CNFWEP128DEFAULTKEY_LEN :
HFA384x_RID_CNFWEPDEFAULTKEY_LEN;
memset(bytebuf, 0, len);
- prism2mgmt_pstr2bytearea(bytebuf, pstr);
+ memcpy(bytebuf, pstr->data, pstr->len);
result = hfa384x_drvr_setconfig(hw, mib->parm1, bytebuf, len);
}
@@ -660,7 +660,7 @@ static int prism2mib_fragmentationthreshold(struct mibrec *mib,
struct p80211msg_dot11req_mibset *msg,
void *data)
{
- u32 *uint32 = (u32 *) data;
+ u32 *uint32 = data;
if (!isget)
if ((*uint32) % 2) {
@@ -705,7 +705,7 @@ static int prism2mib_priv(struct mibrec *mib,
hfa384x_t *hw,
struct p80211msg_dot11req_mibset *msg, void *data)
{
- p80211pstrd_t *pstr = (p80211pstrd_t *) data;
+ p80211pstrd_t *pstr = data;
switch (mib->did) {
case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
@@ -759,26 +759,6 @@ void prism2mgmt_pstr2bytestr(struct hfa384x_bytestr *bytestr,
}
/*----------------------------------------------------------------
-* prism2mgmt_pstr2bytearea
-*
-* Convert the pstr data in the WLAN message structure into an hfa384x
-* byte area format.
-*
-* Arguments:
-* bytearea hfa384x byte area data type
-* pstr wlan message data
-*
-* Returns:
-* Nothing
-*
-----------------------------------------------------------------*/
-
-void prism2mgmt_pstr2bytearea(u8 *bytearea, p80211pstrd_t *pstr)
-{
- memcpy(bytearea, pstr->data, pstr->len);
-}
-
-/*----------------------------------------------------------------
* prism2mgmt_bytestr2pstr
*
* Convert the data in an hfa384x byte string format into a
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index 131223afd918..64f90722b01b 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -242,7 +242,7 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
union p80211_hdr *p80211_hdr,
struct p80211_metawep *p80211_wep)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
/* If necessary, set the 802.11 WEP bit */
if ((wlandev->hostwep & (HOSTWEP_PRIVACYINVOKED | HOSTWEP_ENCRYPT)) ==
@@ -279,7 +279,7 @@ static int prism2sta_txframe(wlandevice_t *wlandev, struct sk_buff *skb,
*/
static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
int result = 0;
@@ -409,7 +409,7 @@ static int prism2sta_mlmerequest(wlandevice_t *wlandev, struct p80211msg *msg)
*/
u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
u32 result;
result = P80211ENUM_resultcode_implementation_failure;
@@ -583,7 +583,7 @@ u32 prism2sta_ifstate(wlandevice_t *wlandev, u32 ifstate)
static int prism2sta_getcardinfo(wlandevice_t *wlandev)
{
int result = 0;
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
u16 temp;
u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN];
@@ -911,7 +911,7 @@ done:
*/
static int prism2sta_globalsetup(wlandevice_t *wlandev)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
/* Set the maximum frame size */
return hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFMAXDATALEN,
@@ -921,7 +921,7 @@ static int prism2sta_globalsetup(wlandevice_t *wlandev)
static int prism2sta_setmulticast(wlandevice_t *wlandev, netdevice_t *dev)
{
int result = 0;
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
u16 promisc;
@@ -985,7 +985,7 @@ static void prism2sta_inf_handover(wlandevice_t *wlandev,
static void prism2sta_inf_tallies(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
u16 *src16;
u32 *dst;
u32 *src32;
@@ -1032,7 +1032,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
int nbss;
hfa384x_ScanResult_t *sr = &(inf->info.scanresult);
int i;
@@ -1087,7 +1087,7 @@ static void prism2sta_inf_scanresults(wlandevice_t *wlandev,
static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
int nbss;
nbss = (inf->framelen - 3) / 32;
@@ -1128,7 +1128,7 @@ static void prism2sta_inf_hostscanresults(wlandevice_t *wlandev,
static void prism2sta_inf_chinforesults(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
unsigned int i, n;
hw->channel_info.results.scanchannels =
@@ -1441,7 +1441,7 @@ void prism2sta_processing_defer(struct work_struct *data)
static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
hw->link_status_new = le16_to_cpu(inf->info.linkstatus.linkstatus);
@@ -1469,7 +1469,7 @@ static void prism2sta_inf_linkstatus(wlandevice_t *wlandev,
static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
hfa384x_AssocStatus_t rec;
int i;
@@ -1530,7 +1530,7 @@ static void prism2sta_inf_assocstatus(wlandevice_t *wlandev,
static void prism2sta_inf_authreq(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(*inf));
@@ -1545,7 +1545,7 @@ static void prism2sta_inf_authreq(wlandevice_t *wlandev,
static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
hfa384x_authenticateStation_data_t rec;
int i, added, result, cnt;
@@ -1719,7 +1719,7 @@ static void prism2sta_inf_authreq_defer(wlandevice_t *wlandev,
static void prism2sta_inf_psusercnt(wlandevice_t *wlandev,
hfa384x_InfFrame_t *inf)
{
- hfa384x_t *hw = (hfa384x_t *) wlandev->priv;
+ hfa384x_t *hw = wlandev->priv;
hw->psusercount = le16_to_cpu(inf->info.psusercnt.usercnt);
}
@@ -1886,7 +1886,6 @@ static wlandevice_t *create_wlan(void)
hw = kzalloc(sizeof(hfa384x_t), GFP_KERNEL);
if (!wlandev || !hw) {
- pr_err("%s: Memory allocation failure.\n", dev_info);
kfree(wlandev);
kfree(hw);
return NULL;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 8abf3f87a2d5..41358bbc6246 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -67,7 +67,7 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
dev = interface_to_usbdev(interface);
wlandev = create_wlan();
- if (wlandev == NULL) {
+ if (!wlandev) {
dev_err(&interface->dev, "Memory allocation failure.\n");
result = -EIO;
goto failed;
@@ -139,8 +139,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
wlandev = (wlandevice_t *)usb_get_intfdata(interface);
if (wlandev != NULL) {
LIST_HEAD(cleanlist);
- struct list_head *entry;
- struct list_head *temp;
+ hfa384x_usbctlx_t *ctlx, *temp;
unsigned long flags;
hfa384x_t *hw = wlandev->priv;
@@ -178,18 +177,15 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
tasklet_kill(&hw->completion_bh);
tasklet_kill(&hw->reaper_bh);
- flush_scheduled_work();
+ cancel_work_sync(&hw->link_bh);
+ cancel_work_sync(&hw->commsqual_bh);
/* Now we complete any outstanding commands
* and tell everyone who is waiting for their
* responses that we have shut down.
*/
- list_for_each(entry, &cleanlist) {
- hfa384x_usbctlx_t *ctlx;
-
- ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+ list_for_each_entry(ctlx, &cleanlist, list)
complete(&ctlx->done);
- }
/* Give any outstanding synchronous commands
* a chance to complete. All they need to do
@@ -199,12 +195,8 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
msleep(100);
/* Now delete the CTLXs, because no-one else can now. */
- list_for_each_safe(entry, temp, &cleanlist) {
- hfa384x_usbctlx_t *ctlx;
-
- ctlx = list_entry(entry, hfa384x_usbctlx_t, list);
+ list_for_each_entry_safe(ctlx, temp, &cleanlist, list)
kfree(ctlx);
- }
/* Unhook the wlandev */
unregister_wlandev(wlandev);
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 89f5b55ed546..7eadf922b21f 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -226,7 +226,6 @@ void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
XGI_Pr->Part4Port = BaseAddr + SIS_CRT2_PORT_14;
/* 301 palette address port registers */
XGI_Pr->Part5Port = BaseAddr + SIS_CRT2_PORT_14 + 2;
-
}
/* ------------------ Internal helper routines ----------------- */
@@ -315,10 +314,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
if (XGIbios_mode[myindex].bpp > 8)
return -1;
}
-
}
goto check_memory;
-
}
/* FIXME: for now, all is valid on XG27 */
@@ -518,7 +515,6 @@ check_memory:
if (required_mem > xgifb_info->video_size)
return -1;
return myindex;
-
}
static void XGIfb_search_crt2type(const char *name)
@@ -655,26 +651,26 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
switch (xgifb_info->display2) {
case XGIFB_DISP_CRT:
- cr30 = (SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_CRT2 | SIS_SIMULTANEOUS_VIEW_ENABLE;
cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_LCD:
- cr30 = (SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_LCD | SIS_SIMULTANEOUS_VIEW_ENABLE;
cr31 |= SIS_DRIVER_MODE;
break;
case XGIFB_DISP_TV:
if (xgifb_info->TV_type == TVMODE_HIVISION)
- cr30 = (SIS_VB_OUTPUT_HIVISION
- | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_HIVISION
+ | SIS_SIMULTANEOUS_VIEW_ENABLE;
else if (xgifb_info->TV_plug == TVPLUG_SVIDEO)
- cr30 = (SIS_VB_OUTPUT_SVIDEO
- | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_SVIDEO
+ | SIS_SIMULTANEOUS_VIEW_ENABLE;
else if (xgifb_info->TV_plug == TVPLUG_COMPOSITE)
- cr30 = (SIS_VB_OUTPUT_COMPOSITE
- | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_COMPOSITE
+ | SIS_SIMULTANEOUS_VIEW_ENABLE;
else if (xgifb_info->TV_plug == TVPLUG_SCART)
- cr30 = (SIS_VB_OUTPUT_SCART
- | SIS_SIMULTANEOUS_VIEW_ENABLE);
+ cr30 = SIS_VB_OUTPUT_SCART
+ | SIS_SIMULTANEOUS_VIEW_ENABLE;
cr31 |= SIS_DRIVER_MODE;
if (XGIfb_tvmode == 1 || xgifb_info->TV_type == TVMODE_PAL)
@@ -2064,8 +2060,6 @@ static struct pci_driver xgifb_driver = {
.remove = xgifb_remove
};
-
-
/*****************************************************/
/* MODULE */
/*****************************************************/
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index d9524a2e9ce4..94e2e3c7c264 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -228,7 +228,6 @@
#define RES1280x960x85 0x46
#define RES1280x960x120 0x47
-
#define XG27_CR8F 0x0C
#define XG27_SR36 0x30
#define XG27_SR40 0x04
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 879a7e6751ac..26b539bc6faf 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -57,7 +57,8 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
/* HOTPLUG_SUPPORT */
/* for current XG20 & XG21, GPIOH is floating, driver will
- * fix DDR temporarily */
+ * fix DDR temporarily
+ */
/* DVI read GPIOH */
data &= 0x01; /* 1=DDRII, 0=DDR */
/* ~HOTPLUG_SUPPORT */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index c886dd2892a4..f97c77d88173 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -61,7 +61,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
pVBInfo->XGINew_CR97 = 0x80;
}
-
}
static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
@@ -155,7 +154,6 @@ static void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo)
static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x20);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, XGI_VCLKData[0].SR2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, XGI_VCLKData[0].SR2C);
@@ -274,12 +272,12 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
for (i = 0x01; i <= 0x04; i++) {
data = pVBInfo->TimingH.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 1), data);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 1), data);
}
for (i = 0x05; i <= 0x06; i++) {
data = pVBInfo->TimingH.data[i];
- xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i + 6), data);
+ xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i + 6), data);
}
j = xgifb_reg_get(pVBInfo->P3c4, 0x0e);
@@ -325,17 +323,17 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
for (i = 0x00; i <= 0x01; i++) {
data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 6), data);
}
for (i = 0x02; i <= 0x03; i++) {
data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x0e), data);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x0e), data);
}
for (i = 0x04; i <= 0x05; i++) {
data = pVBInfo->TimingV.data[i];
- xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x11), data);
+ xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 0x11), data);
}
j = xgifb_reg_get(pVBInfo->P3c4, 0x0a);
@@ -433,7 +431,7 @@ static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
Temp2 |= 0x40; /* Temp2 + 0x40 */
Temp2 &= 0xFF;
- Tempax = (unsigned char) Temp2; /* Tempax: HRE[7:0] */
+ Tempax = (unsigned char)Temp2; /* Tempax: HRE[7:0] */
Tempax <<= 2; /* Tempax[7:2]: HRE[5:0] */
Tempdx >>= 6; /* Tempdx[7:6]->[1:0] HRS[9:8] */
Tempax |= Tempdx; /* HRE[5:0]HRS[9:8] */
@@ -483,11 +481,11 @@ static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
Temp2 |= 0x20; /* VRE + 0x20 */
Temp2 &= 0xFF;
- Tempax = (unsigned char) Temp2; /* Tempax: VRE[7:0] */
+ Tempax = (unsigned char)Temp2; /* Tempax: VRE[7:0] */
Tempax <<= 2; /* Tempax[7:0]; VRE[5:0]00 */
Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */
Temp1 >>= 9; /* Temp1[1:0]: VRS[10:9] */
- Tempbx = (unsigned char) Temp1;
+ Tempbx = (unsigned char)Temp1;
Tempax |= Tempbx; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */
Tempax &= 0x7F;
/* SR3F D[7:2]->VRE D[1:0]->VRS */
@@ -592,7 +590,6 @@ static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
}
static void xgifb_set_lcd(int chip_id,
@@ -716,10 +713,10 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
data = xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
- xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short) (tempcx & 0xff));
+ xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
- (unsigned short) ((tempcx & 0x0ff00) >> 10));
- xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short) (tempbx & 0xff));
+ (unsigned short)((tempcx & 0x0ff00) >> 10));
+ xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
tempax = 0;
tempbx >>= 8;
@@ -930,7 +927,6 @@ static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
}
static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
@@ -990,7 +986,6 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
if (HwDeviceExtension->jChipType >= XG27)
xgifb_reg_and_or(pVBInfo->P3c4, 0x40, 0xFC, data2 & 0x03);
-
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
@@ -1072,7 +1067,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data = 0x6c;
xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
}
-
}
static void XGI_WriteDAC(unsigned short dl,
@@ -1905,8 +1899,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
push <<= 8;
tempax = temp << 8;
tempbx = tempbx | tempax;
- temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
- | SetInSlaveMode | DisableCRT2Display);
+ temp = SetCRT2ToDualEdge | SetCRT2ToYPbPr525750 | XGI_SetCRT2ToLCDA
+ | SetInSlaveMode | DisableCRT2Display;
temp = 0xFFFF ^ temp;
tempbx &= temp;
@@ -2887,7 +2881,7 @@ static void XGI_SetGroup1(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
- tempcx = (pVBInfo->VGAVT - 1);
+ tempcx = pVBInfo->VGAVT - 1;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
@@ -2925,7 +2919,7 @@ static void XGI_SetGroup1(unsigned short ModeIdIndex,
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
temp = ((tempbx & 0xFF00) >> 8) << 4;
- temp = ((tempcx & 0x000F) | (temp));
+ temp = (tempcx & 0x000F) | (temp);
xgifb_reg_set(pVBInfo->Part1Port, 0x11, temp);
tempax = 0;
@@ -4080,7 +4074,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
tempcx |= 0x04000;
if (tempeax <= tempebx) {
- tempcx = (tempcx & (~0x4000));
+ tempcx = tempcx & (~0x4000);
tempeax = pVBInfo->VGAVDE;
} else {
tempeax -= tempebx;
@@ -4130,7 +4124,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
temp = (tempax & 0xFF00) >> 8;
temp = (temp & 0x0003) << 4;
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
- temp = (tempax & 0x00FF);
+ temp = tempax & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVision)) {
@@ -4932,7 +4926,7 @@ static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
tempcl -= ModeVGA;
if (tempcl >= 0) {
/* BT Color */
- tempah = (0x008 >> tempcl);
+ tempah = 0x008 >> tempcl;
if (tempah == 0)
tempah = 1;
tempah |= 0x040;
@@ -5073,7 +5067,6 @@ reg_and_or:
}
}
-
void XGI_UnLockCRT2(struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01);
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index 0d27594554ca..2fd1a5935e1d 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -65,7 +65,6 @@ struct XGI330_TVDataTablStruct {
struct SiS_TVData const *DATAPTR;
};
-
struct XGI_TimingHStruct {
unsigned char data[8];
};
@@ -117,7 +116,6 @@ struct XGI_CRT1TableStruct {
unsigned char CR[16];
};
-
struct XGI301C_Tap4TimingStruct {
unsigned short DE;
unsigned char Reg[64]; /* C0-FF */
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index f17e5b9bd333..45f2c992cd44 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1140,7 +1140,6 @@ static const struct SiS_LVDSData XGI_LVDS1024x768Data_1[] = {
{1344, 806, 1344, 806} /* 06 (512x384,1024x768) */
};
-
static const struct SiS_LVDSData XGI_LVDS1024x768Data_2[] = {
{1344, 806, 1344, 806},
{1344, 806, 1344, 806},
@@ -1228,7 +1227,6 @@ static const struct SiS_LVDSData XGI_LVDS1024x768Data_1x75[] = {
{1312, 800, 1312, 800}, /* 06 (512x384,1024x768) */
};
-
static const struct SiS_LVDSData XGI_LVDS1024x768Data_2x75[] = {
{1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,640x200,640x400) */
{1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
@@ -2314,7 +2312,6 @@ static const unsigned char TVAntiFlickList[] = {/* NTSCAntiFlicker */
0x00 /* ; 1 new anti-flicker ? */
};
-
static const unsigned char TVEdgeList[] = {
0x00, /* ; 0 NTSC No Edge enhance */
0x04, /* ; 1 NTSC Adaptive Edge enhance */
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 61fa10fd470f..de80e5c108dc 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -27,14 +27,16 @@ struct xgi_hw_device_info {
/* of Linear VGA memory */
unsigned long ulVideoMemorySize; /* size, in bytes, of the
- memory on the board */
+ * memory on the board
+ */
unsigned char jChipType; /* Used to Identify Graphics Chip */
/* defined in the data structure type */
/* "XGI_CHIP_TYPE" */
unsigned char jChipRevision; /* Used to Identify Graphics
- Chip Revision */
+ * Chip Revision
+ */
unsigned char ujVBChipID; /* the ID of video bridge */
/* defined in the data structure type */
@@ -46,4 +48,3 @@ struct xgi_hw_device_info {
/* Additional IOCTL for communication xgifb <> X driver */
/* If changing this, xgifb.h must also be changed (for xgifb) */
#endif
-
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 576a7a43470c..961202f4e9aa 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <crypto/hash.h>
#include <linux/string.h>
#include <linux/kthread.h>
-#include <linux/crypto.h>
#include <linux/completion.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -1190,7 +1190,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
static u32 iscsit_do_crypto_hash_sg(
- struct hash_desc *hash,
+ struct ahash_request *hash,
struct iscsi_cmd *cmd,
u32 data_offset,
u32 data_length,
@@ -1201,7 +1201,7 @@ static u32 iscsit_do_crypto_hash_sg(
struct scatterlist *sg;
unsigned int page_off;
- crypto_hash_init(hash);
+ crypto_ahash_init(hash);
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
@@ -1209,7 +1209,8 @@ static u32 iscsit_do_crypto_hash_sg(
while (data_length) {
u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
- crypto_hash_update(hash, sg, cur_len);
+ ahash_request_set_crypt(hash, sg, NULL, cur_len);
+ crypto_ahash_update(hash);
data_length -= cur_len;
page_off = 0;
@@ -1221,33 +1222,34 @@ static u32 iscsit_do_crypto_hash_sg(
struct scatterlist pad_sg;
sg_init_one(&pad_sg, pad_bytes, padding);
- crypto_hash_update(hash, &pad_sg, padding);
+ ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
+ padding);
+ crypto_ahash_finup(hash);
+ } else {
+ ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
+ crypto_ahash_final(hash);
}
- crypto_hash_final(hash, (u8 *) &data_crc);
return data_crc;
}
static void iscsit_do_crypto_hash_buf(
- struct hash_desc *hash,
+ struct ahash_request *hash,
const void *buf,
u32 payload_length,
u32 padding,
u8 *pad_bytes,
u8 *data_crc)
{
- struct scatterlist sg;
+ struct scatterlist sg[2];
- crypto_hash_init(hash);
+ sg_init_table(sg, ARRAY_SIZE(sg));
+ sg_set_buf(sg, buf, payload_length);
+ sg_set_buf(sg + 1, pad_bytes, padding);
- sg_init_one(&sg, buf, payload_length);
- crypto_hash_update(hash, &sg, payload_length);
+ ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
- if (padding) {
- sg_init_one(&sg, pad_bytes, padding);
- crypto_hash_update(hash, &sg, padding);
- }
- crypto_hash_final(hash, data_crc);
+ crypto_ahash_digest(hash);
}
int
@@ -1422,7 +1424,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
be32_to_cpu(hdr->offset),
payload_length, padding,
cmd->pad_bytes);
@@ -1682,7 +1684,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
ping_data, payload_length,
padding, cmd->pad_bytes,
(u8 *)&data_crc);
@@ -2101,7 +2103,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
text_in, payload_length,
padding, (u8 *)&pad_bytes,
(u8 *)&data_crc);
@@ -2440,7 +2442,7 @@ static int iscsit_handle_immediate_data(
if (conn->conn_ops->DataDigest) {
u32 data_crc;
- data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+ data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
cmd->write_data_done, length, padding,
cmd->pad_bytes);
@@ -2553,7 +2555,7 @@ static int iscsit_send_conn_drop_async_message(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->tx_size += ISCSI_CRC_LEN;
@@ -2683,7 +2685,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2711,7 +2713,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
cmd->padding);
}
if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+ cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
iov[iov_count].iov_base = &cmd->data_crc;
@@ -2857,7 +2859,7 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2915,7 +2917,7 @@ static int iscsit_send_unsolicited_nopin(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
tx_size += ISCSI_CRC_LEN;
@@ -2963,7 +2965,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2993,7 +2995,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
" padding bytes.\n", padding);
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
cmd->buf_ptr, cmd->buf_ptr_size,
padding, (u8 *)&cmd->pad_bytes,
(u8 *)&cmd->data_crc);
@@ -3049,7 +3051,7 @@ static int iscsit_send_r2t(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3239,7 +3241,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
cmd->sense_buffer,
(cmd->se_cmd.scsi_sense_length + padding),
0, NULL, (u8 *)&cmd->data_crc);
@@ -3262,7 +3264,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3332,7 +3334,7 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3601,7 +3603,7 @@ static int iscsit_send_text_rsp(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3611,7 +3613,7 @@ static int iscsit_send_text_rsp(
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
cmd->buf_ptr, text_length,
0, NULL, (u8 *)&cmd->data_crc);
@@ -3668,7 +3670,7 @@ static int iscsit_send_reject(
if (conn->conn_ops->HeaderDigest) {
u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3678,7 +3680,7 @@ static int iscsit_send_reject(
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
iov[iov_count].iov_base = &cmd->data_crc;
@@ -4145,7 +4147,7 @@ int iscsi_target_rx_thread(void *arg)
goto transport_err;
}
- iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
buffer, ISCSI_HDR_LEN,
0, NULL, (u8 *)&checksum);
@@ -4359,10 +4361,14 @@ int iscsit_close_connection(
*/
iscsit_check_conn_usage_count(conn);
- if (conn->conn_rx_hash.tfm)
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (conn->conn_tx_hash.tfm)
- crypto_free_hash(conn->conn_tx_hash.tfm);
+ ahash_request_free(conn->conn_tx_hash);
+ if (conn->conn_rx_hash) {
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+ ahash_request_free(conn->conn_rx_hash);
+ crypto_free_ahash(tfm);
+ }
free_cpumask_var(conn->conn_cpumask);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 47e249dccb5f..667406fcf4d3 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <crypto/hash.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -185,9 +185,8 @@ static int chap_server_compute_md5(
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
size_t compare_len;
struct iscsi_chap *chap = conn->auth_protocol;
- struct crypto_hash *tfm;
- struct hash_desc desc;
- struct scatterlist sg;
+ struct crypto_shash *tfm = NULL;
+ struct shash_desc *desc = NULL;
int auth_ret = -1, ret, challenge_len;
memset(identifier, 0, 10);
@@ -245,52 +244,47 @@ static int chap_server_compute_md5(
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
- tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
- pr_err("Unable to allocate struct crypto_hash\n");
+ tfm = NULL;
+ pr_err("Unable to allocate struct crypto_shash\n");
goto out;
}
- desc.tfm = tfm;
- desc.flags = 0;
- ret = crypto_hash_init(&desc);
- if (ret < 0) {
- pr_err("crypto_hash_init() failed\n");
- crypto_free_hash(tfm);
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+ if (!desc) {
+ pr_err("Unable to allocate struct shash_desc\n");
goto out;
}
- sg_init_one(&sg, &chap->id, 1);
- ret = crypto_hash_update(&desc, &sg, 1);
+ desc->tfm = tfm;
+ desc->flags = 0;
+
+ ret = crypto_shash_init(desc);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for id\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_init() failed\n");
goto out;
}
- sg_init_one(&sg, &auth->password, strlen(auth->password));
- ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+ ret = crypto_shash_update(desc, &chap->id, 1);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for password\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for id\n");
goto out;
}
- sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
- ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+ ret = crypto_shash_update(desc, (char *)&auth->password,
+ strlen(auth->password));
if (ret < 0) {
- pr_err("crypto_hash_update() failed for challenge\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for password\n");
goto out;
}
- ret = crypto_hash_final(&desc, server_digest);
+ ret = crypto_shash_finup(desc, chap->challenge,
+ CHAP_CHALLENGE_LENGTH, server_digest);
if (ret < 0) {
- pr_err("crypto_hash_final() failed for server digest\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_finup() failed for challenge\n");
goto out;
}
- crypto_free_hash(tfm);
chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
@@ -306,9 +300,8 @@ static int chap_server_compute_md5(
* authentication is not enabled.
*/
if (!auth->authenticate_target) {
- kfree(challenge);
- kfree(challenge_binhex);
- return 0;
+ auth_ret = 0;
+ goto out;
}
/*
* Get CHAP_I.
@@ -372,58 +365,37 @@ static int chap_server_compute_md5(
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
- tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(tfm)) {
- pr_err("Unable to allocate struct crypto_hash\n");
- goto out;
- }
- desc.tfm = tfm;
- desc.flags = 0;
-
- ret = crypto_hash_init(&desc);
+ ret = crypto_shash_init(desc);
if (ret < 0) {
- pr_err("crypto_hash_init() failed\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_init() failed\n");
goto out;
}
/* To handle both endiannesses */
id_as_uchar = id;
- sg_init_one(&sg, &id_as_uchar, 1);
- ret = crypto_hash_update(&desc, &sg, 1);
+ ret = crypto_shash_update(desc, &id_as_uchar, 1);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for id\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_update() failed for id\n");
goto out;
}
- sg_init_one(&sg, auth->password_mutual,
- strlen(auth->password_mutual));
- ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+ ret = crypto_shash_update(desc, auth->password_mutual,
+ strlen(auth->password_mutual));
if (ret < 0) {
- pr_err("crypto_hash_update() failed for"
+ pr_err("crypto_shash_update() failed for"
" password_mutual\n");
- crypto_free_hash(tfm);
goto out;
}
/*
* Convert received challenge to binary hex.
*/
- sg_init_one(&sg, challenge_binhex, challenge_len);
- ret = crypto_hash_update(&desc, &sg, challenge_len);
+ ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+ digest);
if (ret < 0) {
- pr_err("crypto_hash_update() failed for ma challenge\n");
- crypto_free_hash(tfm);
+ pr_err("crypto_shash_finup() failed for ma challenge\n");
goto out;
}
- ret = crypto_hash_final(&desc, digest);
- if (ret < 0) {
- pr_err("crypto_hash_final() failed for ma digest\n");
- crypto_free_hash(tfm);
- goto out;
- }
- crypto_free_hash(tfm);
/*
* Generate CHAP_N and CHAP_R.
*/
@@ -440,6 +412,8 @@ static int chap_server_compute_md5(
pr_debug("[server] Sending CHAP_R=0x%s\n", response);
auth_ret = 0;
out:
+ kzfree(desc);
+ crypto_free_shash(tfm);
kfree(challenge);
kfree(challenge_binhex);
return auth_ret;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 2f821de63049..97e5b69e0668 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -771,41 +771,14 @@ static int lio_target_init_nodeacl(struct se_node_acl *se_nacl,
{
struct iscsi_node_acl *acl =
container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
- struct config_group *stats_cg = &se_nacl->acl_fabric_stat_group;
-
- stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!stats_cg->default_groups) {
- pr_err("Unable to allocate memory for"
- " stats_cg->default_groups\n");
- return -ENOMEM;
- }
- stats_cg->default_groups[0] = &acl->node_stat_grps.iscsi_sess_stats_group;
- stats_cg->default_groups[1] = NULL;
config_group_init_type_name(&acl->node_stat_grps.iscsi_sess_stats_group,
"iscsi_sess_stats", &iscsi_stat_sess_cit);
-
+ configfs_add_default_group(&acl->node_stat_grps.iscsi_sess_stats_group,
+ &se_nacl->acl_fabric_stat_group);
return 0;
}
-static void lio_target_cleanup_nodeacl( struct se_node_acl *se_nacl)
-{
- struct iscsi_node_acl *acl = container_of(se_nacl,
- struct iscsi_node_acl, se_node_acl);
- struct config_item *df_item;
- struct config_group *stats_cg;
- int i;
-
- stats_cg = &acl->se_node_acl.acl_fabric_stat_group;
- for (i = 0; stats_cg->default_groups[i]; i++) {
- df_item = &stats_cg->default_groups[i]->cg_item;
- stats_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(stats_cg->default_groups);
-}
-
/* End items for lio_target_acl_cit */
/* Start items for lio_target_tpg_attrib_cit */
@@ -1260,64 +1233,52 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
struct config_group *group,
const char *name)
{
- struct config_group *stats_cg;
struct iscsi_tiqn *tiqn;
tiqn = iscsit_add_tiqn((unsigned char *)name);
if (IS_ERR(tiqn))
return ERR_CAST(tiqn);
- /*
- * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
- */
- stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
-
- stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
- GFP_KERNEL);
- if (!stats_cg->default_groups) {
- pr_err("Unable to allocate memory for"
- " stats_cg->default_groups\n");
- iscsit_del_tiqn(tiqn);
- return ERR_PTR(-ENOMEM);
- }
- stats_cg->default_groups[0] = &tiqn->tiqn_stat_grps.iscsi_instance_group;
- stats_cg->default_groups[1] = &tiqn->tiqn_stat_grps.iscsi_sess_err_group;
- stats_cg->default_groups[2] = &tiqn->tiqn_stat_grps.iscsi_tgt_attr_group;
- stats_cg->default_groups[3] = &tiqn->tiqn_stat_grps.iscsi_login_stats_group;
- stats_cg->default_groups[4] = &tiqn->tiqn_stat_grps.iscsi_logout_stats_group;
- stats_cg->default_groups[5] = NULL;
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
+ pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
+ " %s\n", name);
+ return &tiqn->tiqn_wwn;
+}
+
+static void lio_target_add_wwn_groups(struct se_wwn *wwn)
+{
+ struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_instance_group,
"iscsi_instance", &iscsi_stat_instance_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_instance_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
"iscsi_sess_err", &iscsi_stat_sess_err_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_sess_err_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
"iscsi_tgt_attr", &iscsi_stat_tgt_attr_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_tgt_attr_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
"iscsi_login_stats", &iscsi_stat_login_cit);
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_login_stats_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
+
config_group_init_type_name(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
"iscsi_logout_stats", &iscsi_stat_logout_cit);
-
- pr_debug("LIO_Target_ConfigFS: REGISTER -> %s\n", tiqn->tiqn);
- pr_debug("LIO_Target_ConfigFS: REGISTER -> Allocated Node:"
- " %s\n", name);
- return &tiqn->tiqn_wwn;
+ configfs_add_default_group(&tiqn->tiqn_stat_grps.iscsi_logout_stats_group,
+ &tiqn->tiqn_wwn.fabric_stat_group);
}
static void lio_target_call_coredeltiqn(
struct se_wwn *wwn)
{
struct iscsi_tiqn *tiqn = container_of(wwn, struct iscsi_tiqn, tiqn_wwn);
- struct config_item *df_item;
- struct config_group *stats_cg;
- int i;
-
- stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
- for (i = 0; stats_cg->default_groups[i]; i++) {
- df_item = &stats_cg->default_groups[i]->cg_item;
- stats_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(stats_cg->default_groups);
pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s\n",
tiqn->tiqn);
@@ -1693,12 +1654,12 @@ const struct target_core_fabric_ops iscsi_ops = {
.aborted_task = lio_aborted_task,
.fabric_make_wwn = lio_target_call_coreaddtiqn,
.fabric_drop_wwn = lio_target_call_coredeltiqn,
+ .add_wwn_groups = lio_target_add_wwn_groups,
.fabric_make_tpg = lio_target_tiqn_addtpg,
.fabric_drop_tpg = lio_target_tiqn_deltpg,
.fabric_make_np = lio_target_call_addnptotpg,
.fabric_drop_np = lio_target_call_delnpfromtpg,
.fabric_init_nodeacl = lio_target_init_nodeacl,
- .fabric_cleanup_nodeacl = lio_target_cleanup_nodeacl,
.tfc_discovery_attrs = lio_target_discovery_auth_attrs,
.tfc_wwn_attrs = lio_target_wwn_attrs,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96e78c823d13..8436d56c5f0c 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -16,9 +16,9 @@
* GNU General Public License for more details.
******************************************************************************/
+#include <crypto/hash.h>
#include <linux/string.h>
#include <linux/kthread.h>
-#include <linux/crypto.h>
#include <linux/idr.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
@@ -115,27 +115,36 @@ out_login:
*/
int iscsi_login_setup_crypto(struct iscsi_conn *conn)
{
+ struct crypto_ahash *tfm;
+
/*
* Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
* which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
* to software 1x8 byte slicing from crc32c.ko
*/
- conn->conn_rx_hash.flags = 0;
- conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(conn->conn_rx_hash.tfm)) {
- pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("crypto_alloc_ahash() failed\n");
return -ENOMEM;
}
- conn->conn_tx_hash.flags = 0;
- conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(conn->conn_tx_hash.tfm)) {
- pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
- crypto_free_hash(conn->conn_rx_hash.tfm);
+ conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!conn->conn_rx_hash) {
+ pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+ }
+ ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
+
+ conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!conn->conn_tx_hash) {
+ pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
+ ahash_request_free(conn->conn_rx_hash);
+ conn->conn_rx_hash = NULL;
+ crypto_free_ahash(tfm);
return -ENOMEM;
}
+ ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
return 0;
}
@@ -1174,10 +1183,14 @@ old_sess_out:
iscsit_dec_session_usage_count(conn->sess);
}
- if (!IS_ERR(conn->conn_rx_hash.tfm))
- crypto_free_hash(conn->conn_rx_hash.tfm);
- if (!IS_ERR(conn->conn_tx_hash.tfm))
- crypto_free_hash(conn->conn_tx_hash.tfm);
+ ahash_request_free(conn->conn_tx_hash);
+ if (conn->conn_rx_hash) {
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+ ahash_request_free(conn->conn_rx_hash);
+ crypto_free_ahash(tfm);
+ }
free_cpumask_var(conn->conn_cpumask);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index d41a5c300e31..0ad5ac541a7f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -802,58 +802,48 @@ static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
/* Start items for tcm_loop_nexus_cit */
+static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
+ struct tcm_loop_tpg, tl_se_tpg);
+
+ tl_tpg->tl_nexus = p;
+ return 0;
+}
+
static int tcm_loop_make_nexus(
struct tcm_loop_tpg *tl_tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
struct tcm_loop_nexus *tl_nexus;
- int ret = -ENOMEM;
+ int ret;
if (tl_tpg->tl_nexus) {
pr_debug("tl_tpg->tl_nexus already exists\n");
return -EEXIST;
}
- se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
- /*
- * Initialize the struct se_session pointer
- */
- tl_nexus->se_sess = transport_init_session(
- TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
+
+ tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+ name, tl_nexus, tcm_loop_alloc_sess_cb);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
- goto out;
- }
- /*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
- * Initiator port name of the passed configfs group 'name'.
- */
- tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, (unsigned char *)name);
- if (!tl_nexus->se_sess->se_node_acl) {
- transport_free_session(tl_nexus->se_sess);
- goto out;
+ kfree(tl_nexus);
+ return ret;
}
- /* Now, register the I_T Nexus as active. */
- transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
- tl_nexus->se_sess, tl_nexus);
- tl_tpg->tl_nexus = tl_nexus;
+
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
-
-out:
- kfree(tl_nexus);
- return ret;
}
static int tcm_loop_drop_nexus(
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 3072f1aca8ec..c57e7884973d 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -196,45 +196,30 @@ static struct sbp_session *sbp_session_create(
struct sbp_session *sess;
int ret;
char guid_str[17];
- struct se_node_acl *se_nacl;
+
+ snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
if (!sess) {
pr_err("failed to allocate session descriptor\n");
return ERR_PTR(-ENOMEM);
}
+ spin_lock_init(&sess->lock);
+ INIT_LIST_HEAD(&sess->login_list);
+ INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
+ sess->guid = guid;
- sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
+ sess->se_sess = target_alloc_session(&tpg->se_tpg, 128,
+ sizeof(struct sbp_target_request),
+ TARGET_PROT_NORMAL, guid_str,
+ sess, NULL);
if (IS_ERR(sess->se_sess)) {
pr_err("failed to init se_session\n");
-
ret = PTR_ERR(sess->se_sess);
kfree(sess);
return ERR_PTR(ret);
}
- snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
-
- se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
- if (!se_nacl) {
- pr_warn("Node ACL not found for %s\n", guid_str);
-
- transport_free_session(sess->se_sess);
- kfree(sess);
-
- return ERR_PTR(-EPERM);
- }
-
- sess->se_sess->se_node_acl = se_nacl;
-
- spin_lock_init(&sess->lock);
- INIT_LIST_HEAD(&sess->login_list);
- INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
-
- sess->guid = guid;
-
- transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
-
return sess;
}
@@ -908,7 +893,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_REQ_TYPE_NOTSUPP));
sbp_send_status(req);
- sbp_free_request(req);
return;
case 3: /* Dummy ORB */
req->status.status |= cpu_to_be32(
@@ -919,7 +903,6 @@ static void tgt_agent_process_work(struct work_struct *work)
STATUS_BLOCK_SBP_STATUS(
SBP_STATUS_DUMMY_ORB_COMPLETE));
sbp_send_status(req);
- sbp_free_request(req);
return;
default:
BUG();
@@ -938,6 +921,25 @@ static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
return active;
}
+static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
+ struct fw_card *card, u64 next_orb)
+{
+ struct se_session *se_sess = sess->se_sess;
+ struct sbp_target_request *req;
+ int tag;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ if (tag < 0)
+ return ERR_PTR(-ENOMEM);
+
+ req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
+ memset(req, 0, sizeof(*req));
+ req->se_cmd.map_tag = tag;
+ req->se_cmd.tag = next_orb;
+
+ return req;
+}
+
static void tgt_agent_fetch_work(struct work_struct *work)
{
struct sbp_target_agent *agent =
@@ -949,8 +951,8 @@ static void tgt_agent_fetch_work(struct work_struct *work)
u64 next_orb = agent->orb_pointer;
while (next_orb && tgt_agent_check_active(agent)) {
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req) {
+ req = sbp_mgt_get_req(sess, sess->card, next_orb);
+ if (IS_ERR(req)) {
spin_lock_bh(&agent->lock);
agent->state = AGENT_STATE_DEAD;
spin_unlock_bh(&agent->lock);
@@ -985,7 +987,6 @@ static void tgt_agent_fetch_work(struct work_struct *work)
spin_unlock_bh(&agent->lock);
sbp_send_status(req);
- sbp_free_request(req);
return;
}
@@ -1232,7 +1233,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
req->se_cmd.tag = req->orb_pointer;
if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
req->sense_buf, unpacked_lun, data_length,
- TCM_SIMPLE_TAG, data_dir, 0))
+ TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
goto err;
return;
@@ -1244,7 +1245,6 @@ err:
STATUS_BLOCK_LEN(1) |
STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
sbp_send_status(req);
- sbp_free_request(req);
}
/*
@@ -1343,22 +1343,29 @@ static int sbp_rw_data(struct sbp_target_request *req)
static int sbp_send_status(struct sbp_target_request *req)
{
- int ret, length;
+ int rc, ret = 0, length;
struct sbp_login_descriptor *login = req->login;
length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
- ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
+ rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
login->status_fifo_addr, &req->status, length);
- if (ret != RCODE_COMPLETE) {
- pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
- return -EIO;
+ if (rc != RCODE_COMPLETE) {
+ pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
+ ret = -EIO;
+ goto put_ref;
}
pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
req->orb_pointer);
-
- return 0;
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+ * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+put_ref:
+ target_put_sess_cmd(&req->se_cmd);
+ return ret;
}
static void sbp_sense_mangle(struct sbp_target_request *req)
@@ -1447,9 +1454,13 @@ static int sbp_send_sense(struct sbp_target_request *req)
static void sbp_free_request(struct sbp_target_request *req)
{
+ struct se_cmd *se_cmd = &req->se_cmd;
+ struct se_session *se_sess = se_cmd->se_sess;
+
kfree(req->pg_tbl);
kfree(req->cmd_buf);
- kfree(req);
+
+ percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
static void sbp_mgt_agent_process(struct work_struct *work)
@@ -1609,7 +1620,6 @@ static void sbp_mgt_agent_rw(struct fw_card *card,
rcode = RCODE_CONFLICT_ERROR;
goto out;
}
-
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
rcode = RCODE_CONFLICT_ERROR;
@@ -1815,8 +1825,7 @@ static int sbp_check_stop_free(struct se_cmd *se_cmd)
struct sbp_target_request *req = container_of(se_cmd,
struct sbp_target_request, se_cmd);
- transport_generic_free_cmd(&req->se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(&req->se_cmd, 0);
}
static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 713c63d9681b..d498533f09ee 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -194,13 +194,11 @@ static struct config_group *target_core_register_fabric(
pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&tf->tf_wwn_cit);
- tf->tf_group.default_groups = tf->tf_default_groups;
- tf->tf_group.default_groups[0] = &tf->tf_disc_group;
- tf->tf_group.default_groups[1] = NULL;
-
config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
+
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&tf->tf_discovery_cit);
+ configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
@@ -216,9 +214,6 @@ static void target_core_deregister_fabric(
{
struct target_fabric_configfs *tf = container_of(
to_config_group(item), struct target_fabric_configfs, tf_group);
- struct config_group *tf_group;
- struct config_item *df_item;
- int i;
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
@@ -230,12 +225,7 @@ static void target_core_deregister_fabric(
pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
- tf_group = &tf->tf_group;
- for (i = 0; tf_group->default_groups[i]; i++) {
- df_item = &tf_group->default_groups[i]->cg_item;
- tf_group->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&tf->tf_group);
config_item_put(item);
}
@@ -2151,7 +2141,6 @@ static void target_core_dev_release(struct config_item *item)
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
- kfree(dev_cg->default_groups);
target_free_device(dev);
}
@@ -2819,8 +2808,6 @@ static struct config_group *target_core_make_subdev(
struct se_hba *hba = item_to_hba(hba_ci);
struct target_backend *tb = hba->backend;
struct se_device *dev;
- struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
- struct config_group *dev_stat_grp = NULL;
int errno = -ENOMEM, ret;
ret = mutex_lock_interruptible(&hba->hba_access_mutex);
@@ -2831,73 +2818,52 @@ static struct config_group *target_core_make_subdev(
if (!dev)
goto out_unlock;
- dev_cg = &dev->dev_group;
-
- dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
- GFP_KERNEL);
- if (!dev_cg->default_groups)
- goto out_free_device;
+ config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
- config_group_init_type_name(dev_cg, name, &tb->tb_dev_cit);
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&tb->tb_dev_attrib_cit);
+ configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
+
config_group_init_type_name(&dev->dev_pr_group, "pr",
&tb->tb_dev_pr_cit);
+ configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
+
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
&tb->tb_dev_wwn_cit);
+ configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
+ &dev->dev_group);
+
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
"alua", &tb->tb_dev_alua_tg_pt_gps_cit);
+ configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
+ &dev->dev_group);
+
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
"statistics", &tb->tb_dev_stat_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.stat_group,
+ &dev->dev_group);
- dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
- dev_cg->default_groups[1] = &dev->dev_pr_group;
- dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
- dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
- dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
- dev_cg->default_groups[5] = NULL;
/*
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
if (!tg_pt_gp)
- goto out_free_dev_cg_default_groups;
+ goto out_free_device;
dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
- tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
- tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!tg_pt_gp_cg->default_groups) {
- pr_err("Unable to allocate tg_pt_gp_cg->"
- "default_groups\n");
- goto out_free_tg_pt_gp;
- }
-
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
- tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
- tg_pt_gp_cg->default_groups[1] = NULL;
+ configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
+ &dev->t10_alua.alua_tg_pt_gps_group);
+
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &dev->dev_stat_grps.stat_group;
- dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
- GFP_KERNEL);
- if (!dev_stat_grp->default_groups) {
- pr_err("Unable to allocate dev_stat_grp->default_groups\n");
- goto out_free_tg_pt_gp_cg_default_groups;
- }
target_stat_setup_dev_default_groups(dev);
mutex_unlock(&hba->hba_access_mutex);
- return dev_cg;
+ return &dev->dev_group;
-out_free_tg_pt_gp_cg_default_groups:
- kfree(tg_pt_gp_cg->default_groups);
-out_free_tg_pt_gp:
- core_alua_free_tg_pt_gp(tg_pt_gp);
-out_free_dev_cg_default_groups:
- kfree(dev_cg->default_groups);
out_free_device:
target_free_device(dev);
out_unlock:
@@ -2913,40 +2879,22 @@ static void target_core_drop_subdev(
struct se_device *dev =
container_of(dev_cg, struct se_device, dev_group);
struct se_hba *hba;
- struct config_item *df_item;
- struct config_group *tg_pt_gp_cg, *dev_stat_grp;
- int i;
hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
mutex_lock(&hba->hba_access_mutex);
- dev_stat_grp = &dev->dev_stat_grps.stat_group;
- for (i = 0; dev_stat_grp->default_groups[i]; i++) {
- df_item = &dev_stat_grp->default_groups[i]->cg_item;
- dev_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(dev_stat_grp->default_groups);
+ configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
+ configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
- tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
- for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
- df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
- tg_pt_gp_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(tg_pt_gp_cg->default_groups);
/*
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
dev->t10_alua.default_tg_pt_gp = NULL;
- for (i = 0; dev_cg->default_groups[i]; i++) {
- df_item = &dev_cg->default_groups[i]->cg_item;
- dev_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(dev_cg);
+
/*
* se_dev is released from target_core_dev_item_ops->release()
*/
@@ -3141,8 +3089,6 @@ void target_setup_backend_cits(struct target_backend *tb)
static int __init target_core_init_configfs(void)
{
- struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
- struct config_group *lu_gp_cg = NULL;
struct configfs_subsystem *subsys = &target_core_fabrics;
struct t10_alua_lu_gp *lu_gp;
int ret;
@@ -3161,51 +3107,24 @@ static int __init target_core_init_configfs(void)
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
- target_cg = &subsys->su_group;
- target_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!target_cg->default_groups) {
- pr_err("Unable to allocate target_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
+ config_group_init_type_name(&target_core_hbagroup, "core",
+ &target_core_cit);
+ configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
- config_group_init_type_name(&target_core_hbagroup,
- "core", &target_core_cit);
- target_cg->default_groups[0] = &target_core_hbagroup;
- target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
- hba_cg = &target_core_hbagroup;
- hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!hba_cg->default_groups) {
- pr_err("Unable to allocate hba_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
- config_group_init_type_name(&alua_group,
- "alua", &target_core_alua_cit);
- hba_cg->default_groups[0] = &alua_group;
- hba_cg->default_groups[1] = NULL;
+ config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
+ configfs_add_default_group(&alua_group, &target_core_hbagroup);
+
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
- alua_cg = &alua_group;
- alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!alua_cg->default_groups) {
- pr_err("Unable to allocate alua_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
+ config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
+ &target_core_alua_lu_gps_cit);
+ configfs_add_default_group(&alua_lu_gps_group, &alua_group);
- config_group_init_type_name(&alua_lu_gps_group,
- "lu_gps", &target_core_alua_lu_gps_cit);
- alua_cg->default_groups[0] = &alua_lu_gps_group;
- alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
*/
@@ -3215,20 +3134,12 @@ static int __init target_core_init_configfs(void)
goto out_global;
}
- lu_gp_cg = &alua_lu_gps_group;
- lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lu_gp_cg->default_groups) {
- pr_err("Unable to allocate lu_gp_cg->default_groups\n");
- ret = -ENOMEM;
- goto out_global;
- }
-
config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
&target_core_alua_lu_gp_cit);
- lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
- lu_gp_cg->default_groups[1] = NULL;
+ configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
+
default_lu_gp = lu_gp;
+
/*
* Register the target_core_mod subsystem with configfs.
*/
@@ -3267,55 +3178,21 @@ out_global:
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
}
- if (lu_gp_cg)
- kfree(lu_gp_cg->default_groups);
- if (alua_cg)
- kfree(alua_cg->default_groups);
- if (hba_cg)
- kfree(hba_cg->default_groups);
- kfree(target_cg->default_groups);
release_se_kmem_caches();
return ret;
}
static void __exit target_core_exit_configfs(void)
{
- struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
- struct config_item *item;
- int i;
+ configfs_remove_default_groups(&alua_lu_gps_group);
+ configfs_remove_default_groups(&alua_group);
+ configfs_remove_default_groups(&target_core_hbagroup);
- lu_gp_cg = &alua_lu_gps_group;
- for (i = 0; lu_gp_cg->default_groups[i]; i++) {
- item = &lu_gp_cg->default_groups[i]->cg_item;
- lu_gp_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(lu_gp_cg->default_groups);
- lu_gp_cg->default_groups = NULL;
-
- alua_cg = &alua_group;
- for (i = 0; alua_cg->default_groups[i]; i++) {
- item = &alua_cg->default_groups[i]->cg_item;
- alua_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(alua_cg->default_groups);
- alua_cg->default_groups = NULL;
-
- hba_cg = &target_core_hbagroup;
- for (i = 0; hba_cg->default_groups[i]; i++) {
- item = &hba_cg->default_groups[i]->cg_item;
- hba_cg->default_groups[i] = NULL;
- config_item_put(item);
- }
- kfree(hba_cg->default_groups);
- hba_cg->default_groups = NULL;
/*
* We expect subsys->su_group.default_groups to be released
* by configfs subsystem provider logic..
*/
configfs_unregister_subsystem(&target_core_fabrics);
- kfree(target_core_fabrics.su_group.default_groups);
core_alua_free_lu_gp(default_lu_gp);
default_lu_gp = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index da457e25717a..a4046ca6e60d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -86,7 +86,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
se_cmd->lun_ref_active = true;
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ deve->lun_access_ro) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08llx\n",
se_cmd->se_tfo->get_fabric_name(),
@@ -199,7 +199,7 @@ bool target_lun_is_rdonly(struct se_cmd *cmd)
rcu_read_lock();
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
- ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
+ ret = deve && deve->lun_access_ro;
rcu_read_unlock();
return ret;
@@ -258,22 +258,15 @@ void core_free_device_list_for_node(
void core_update_device_list_access(
u64 mapped_lun,
- u32 lun_access,
+ bool lun_access_ro,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, mapped_lun);
- if (deve) {
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- } else {
- deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
- }
- }
+ if (deve)
+ deve->lun_access_ro = lun_access_ro;
mutex_unlock(&nacl->lun_entry_mutex);
}
@@ -319,7 +312,7 @@ int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u64 mapped_lun,
- u32 lun_access,
+ bool lun_access_ro,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
@@ -340,11 +333,7 @@ int core_enable_device_list_for_node(
kref_init(&new->pr_kref);
init_completion(&new->pr_comp);
- if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
- new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
- else
- new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
-
+ new->lun_access_ro = lun_access_ro;
new->creation_time = get_jiffies_64();
new->attach_count++;
@@ -433,7 +422,7 @@ void core_disable_device_list_for_node(
hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
- orig->lun_flags = 0;
+ orig->lun_access_ro = false;
orig->creation_time = 0;
orig->attach_count--;
/*
@@ -558,8 +547,7 @@ int core_dev_add_lun(
{
int rc;
- rc = core_tpg_add_lun(tpg, lun,
- TRANSPORT_LUNFLAGS_READ_WRITE, dev);
+ rc = core_tpg_add_lun(tpg, lun, false, dev);
if (rc < 0)
return rc;
@@ -635,7 +623,7 @@ int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
struct se_lun *lun,
- u32 lun_access)
+ bool lun_access_ro)
{
struct se_node_acl *nacl = lacl->se_lun_nacl;
/*
@@ -647,20 +635,19 @@ int core_dev_add_initiator_node_lun_acl(
if (!nacl)
return -EINVAL;
- if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
- (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ if (lun->lun_access_ro)
+ lun_access_ro = true;
lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
- lun_access, nacl, tpg) < 0)
+ lun_access_ro, nacl, tpg) < 0)
return -EINVAL;
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
- (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
+ lun_access_ro ? "RO" : "RW",
nacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f916d18ccb48..31a096aa16ab 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -78,7 +78,7 @@ static int target_fabric_mappedlun_link(
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
- int lun_access;
+ bool lun_access_ro;
if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
@@ -115,19 +115,18 @@ static int target_fabric_mappedlun_link(
}
/*
* If this struct se_node_acl was dynamically generated with
- * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
- * which be will write protected (READ-ONLY) when
+ * tpg_1/attrib/generate_node_acls=1, use the existing
+ * deve->lun_access_ro value, which will be true when
* tpg_1/attrib/demo_mode_write_protect=1
*/
rcu_read_lock();
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
if (deve)
- lun_access = deve->lun_flags;
+ lun_access_ro = deve->lun_access_ro;
else
- lun_access =
+ lun_access_ro =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
- se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
- TRANSPORT_LUNFLAGS_READ_WRITE;
+ se_tpg)) ? true : false;
rcu_read_unlock();
/*
* Determine the actual mapped LUN value user wants..
@@ -135,7 +134,7 @@ static int target_fabric_mappedlun_link(
* This value is what the SCSI Initiator actually sees the
* $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
- return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
+ return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
}
static int target_fabric_mappedlun_unlink(
@@ -167,8 +166,7 @@ static ssize_t target_fabric_mappedlun_write_protect_show(
rcu_read_lock();
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
if (deve) {
- len = sprintf(page, "%d\n",
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+ len = sprintf(page, "%d\n", deve->lun_access_ro);
}
rcu_read_unlock();
@@ -181,25 +179,23 @@ static ssize_t target_fabric_mappedlun_write_protect_store(
struct se_lun_acl *lacl = item_to_lun_acl(item);
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
- unsigned long op;
+ unsigned long wp;
int ret;
- ret = kstrtoul(page, 0, &op);
+ ret = kstrtoul(page, 0, &wp);
if (ret)
return ret;
- if ((op != 1) && (op != 0))
+ if ((wp != 1) && (wp != 0))
return -EINVAL;
- core_update_device_list_access(lacl->mapped_lun, (op) ?
- TRANSPORT_LUNFLAGS_READ_ONLY :
- TRANSPORT_LUNFLAGS_READ_WRITE,
- lacl->se_lun_nacl);
+ /* wp=1 means lun_access_ro=true */
+ core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %llu Write Protect bit to %s\n",
se_tpg->se_tpg_tfo->get_fabric_name(),
- se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+ se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
return count;
@@ -273,18 +269,10 @@ static struct config_group *target_fabric_make_mappedlun(
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_lun_acl *lacl = NULL;
- struct config_item *acl_ci;
- struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
char *buf;
unsigned long long mapped_lun;
int ret = 0;
- acl_ci = &group->cg_item;
- if (!acl_ci) {
- pr_err("Unable to locatel acl_ci\n");
- return NULL;
- }
-
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!buf) {
pr_err("Unable to allocate memory for name buf\n");
@@ -315,37 +303,19 @@ static struct config_group *target_fabric_make_mappedlun(
goto out;
}
- lacl_cg = &lacl->se_lun_group;
- lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lacl_cg->default_groups) {
- pr_err("Unable to allocate lacl_cg->default_groups\n");
- ret = -ENOMEM;
- goto out;
- }
-
config_group_init_type_name(&lacl->se_lun_group, name,
&tf->tf_tpg_mappedlun_cit);
+
config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
"statistics", &tf->tf_tpg_mappedlun_stat_cit);
- lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
- lacl_cg->default_groups[1] = NULL;
-
- ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
- GFP_KERNEL);
- if (!ml_stat_grp->default_groups) {
- pr_err("Unable to allocate ml_stat_grp->default_groups\n");
- ret = -ENOMEM;
- goto out;
- }
+ configfs_add_default_group(&lacl->ml_stat_grps.stat_group,
+ &lacl->se_lun_group);
+
target_stat_setup_mappedlun_default_groups(lacl);
kfree(buf);
return &lacl->se_lun_group;
out:
- if (lacl_cg)
- kfree(lacl_cg->default_groups);
kfree(lacl);
kfree(buf);
return ERR_PTR(ret);
@@ -357,25 +327,9 @@ static void target_fabric_drop_mappedlun(
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
- struct config_item *df_item;
- struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
- int i;
-
- ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- for (i = 0; ml_stat_grp->default_groups[i]; i++) {
- df_item = &ml_stat_grp->default_groups[i]->cg_item;
- ml_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(ml_stat_grp->default_groups);
- lacl_cg = &lacl->se_lun_group;
- for (i = 0; lacl_cg->default_groups[i]; i++) {
- df_item = &lacl_cg->default_groups[i]->cg_item;
- lacl_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(lacl_cg->default_groups);
+ configfs_remove_default_groups(&lacl->ml_stat_grps.stat_group);
+ configfs_remove_default_groups(&lacl->se_lun_group);
config_item_put(item);
}
@@ -384,10 +338,8 @@ static void target_fabric_nacl_base_release(struct config_item *item)
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct target_fabric_configfs *tf = se_nacl->se_tpg->se_tpg_wwn->wwn_tf;
- if (tf->tf_ops->fabric_cleanup_nodeacl)
- tf->tf_ops->fabric_cleanup_nodeacl(se_nacl);
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
core_tpg_del_initiator_node_acl(se_nacl);
}
@@ -424,38 +376,42 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl;
- struct config_group *nacl_cg;
se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
if (IS_ERR(se_nacl))
return ERR_CAST(se_nacl);
- if (tf->tf_ops->fabric_init_nodeacl) {
- int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
- if (ret) {
- core_tpg_del_initiator_node_acl(se_nacl);
- return ERR_PTR(ret);
- }
- }
-
- nacl_cg = &se_nacl->acl_group;
- nacl_cg->default_groups = se_nacl->acl_default_groups;
- nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
- nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
- nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
- nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
- nacl_cg->default_groups[4] = NULL;
-
config_group_init_type_name(&se_nacl->acl_group, name,
&tf->tf_tpg_nacl_base_cit);
+
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
&tf->tf_tpg_nacl_attrib_cit);
+ configfs_add_default_group(&se_nacl->acl_attrib_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
&tf->tf_tpg_nacl_auth_cit);
+ configfs_add_default_group(&se_nacl->acl_auth_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_param_group, "param",
&tf->tf_tpg_nacl_param_cit);
+ configfs_add_default_group(&se_nacl->acl_param_group,
+ &se_nacl->acl_group);
+
config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
"fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
+ configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
+ &se_nacl->acl_group);
+
+ if (tf->tf_ops->fabric_init_nodeacl) {
+ int ret = tf->tf_ops->fabric_init_nodeacl(se_nacl, name);
+ if (ret) {
+ configfs_remove_default_groups(&se_nacl->acl_fabric_stat_group);
+ core_tpg_del_initiator_node_acl(se_nacl);
+ return ERR_PTR(ret);
+ }
+ }
return &se_nacl->acl_group;
}
@@ -466,16 +422,9 @@ static void target_fabric_drop_nodeacl(
{
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
- struct config_item *df_item;
- struct config_group *nacl_cg;
- int i;
-
- nacl_cg = &se_nacl->acl_group;
- for (i = 0; nacl_cg->default_groups[i]; i++) {
- df_item = &nacl_cg->default_groups[i]->cg_item;
- nacl_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+
+ configfs_remove_default_groups(&se_nacl->acl_group);
+
/*
* struct se_node_acl free is done in target_fabric_nacl_base_release()
*/
@@ -795,7 +744,6 @@ static struct config_group *target_fabric_make_lun(
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
unsigned long long unpacked_lun;
int errno;
@@ -812,31 +760,14 @@ static struct config_group *target_fabric_make_lun(
if (IS_ERR(lun))
return ERR_CAST(lun);
- lun_cg = &lun->lun_group;
- lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
- GFP_KERNEL);
- if (!lun_cg->default_groups) {
- pr_err("Unable to allocate lun_cg->default_groups\n");
- kfree(lun);
- return ERR_PTR(-ENOMEM);
- }
-
config_group_init_type_name(&lun->lun_group, name,
&tf->tf_tpg_port_cit);
+
config_group_init_type_name(&lun->port_stat_grps.stat_group,
"statistics", &tf->tf_tpg_port_stat_cit);
- lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
- lun_cg->default_groups[1] = NULL;
-
- port_stat_grp = &lun->port_stat_grps.stat_group;
- port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4,
- GFP_KERNEL);
- if (!port_stat_grp->default_groups) {
- pr_err("Unable to allocate port_stat_grp->default_groups\n");
- kfree(lun_cg->default_groups);
- kfree(lun);
- return ERR_PTR(-ENOMEM);
- }
+ configfs_add_default_group(&lun->port_stat_grps.stat_group,
+ &lun->lun_group);
+
target_stat_setup_port_default_groups(lun);
return &lun->lun_group;
@@ -848,25 +779,9 @@ static void target_fabric_drop_lun(
{
struct se_lun *lun = container_of(to_config_group(item),
struct se_lun, lun_group);
- struct config_item *df_item;
- struct config_group *lun_cg, *port_stat_grp;
- int i;
-
- port_stat_grp = &lun->port_stat_grps.stat_group;
- for (i = 0; port_stat_grp->default_groups[i]; i++) {
- df_item = &port_stat_grp->default_groups[i]->cg_item;
- port_stat_grp->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(port_stat_grp->default_groups);
- lun_cg = &lun->lun_group;
- for (i = 0; lun_cg->default_groups[i]; i++) {
- df_item = &lun_cg->default_groups[i]->cg_item;
- lun_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
- kfree(lun_cg->default_groups);
+ configfs_remove_default_groups(&lun->port_stat_grps.stat_group);
+ configfs_remove_default_groups(&lun->lun_group);
config_item_put(item);
}
@@ -922,32 +837,39 @@ static struct config_group *target_fabric_make_tpg(
se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
- /*
- * Setup default groups from pre-allocated se_tpg->tpg_default_groups
- */
- se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
- se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
- se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
- se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
- se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
- se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
- se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
- se_tpg->tpg_group.default_groups[6] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
&tf->tf_tpg_base_cit);
+
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
&tf->tf_tpg_lun_cit);
+ configfs_add_default_group(&se_tpg->tpg_lun_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
&tf->tf_tpg_np_cit);
+ configfs_add_default_group(&se_tpg->tpg_np_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
&tf->tf_tpg_nacl_cit);
+ configfs_add_default_group(&se_tpg->tpg_acl_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
&tf->tf_tpg_attrib_cit);
+ configfs_add_default_group(&se_tpg->tpg_attrib_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
&tf->tf_tpg_auth_cit);
+ configfs_add_default_group(&se_tpg->tpg_auth_group,
+ &se_tpg->tpg_group);
+
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
&tf->tf_tpg_param_cit);
+ configfs_add_default_group(&se_tpg->tpg_param_group,
+ &se_tpg->tpg_group);
return &se_tpg->tpg_group;
}
@@ -958,19 +880,8 @@ static void target_fabric_drop_tpg(
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
- struct config_group *tpg_cg = &se_tpg->tpg_group;
- struct config_item *df_item;
- int i;
- /*
- * Release default groups, but do not release tpg_cg->default_groups
- * memory as it is statically allocated at se_tpg->tpg_default_groups.
- */
- for (i = 0; tpg_cg->default_groups[i]; i++) {
- df_item = &tpg_cg->default_groups[i]->cg_item;
- tpg_cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&se_tpg->tpg_group);
config_item_put(item);
}
@@ -980,6 +891,7 @@ static void target_fabric_release_wwn(struct config_item *item)
struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
+ configfs_remove_default_groups(&wwn->fabric_stat_group);
tf->tf_ops->fabric_drop_wwn(wwn);
}
@@ -1026,17 +938,15 @@ static struct config_group *target_fabric_make_wwn(
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
- /*
- * Setup default groups from pre-allocated wwn->wwn_default_groups
- */
- wwn->wwn_group.default_groups = wwn->wwn_default_groups;
- wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
- wwn->wwn_group.default_groups[1] = NULL;
config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
+
config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
&tf->tf_wwn_fabric_stats_cit);
+ configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
+ if (tf->tf_ops->add_wwn_groups)
+ tf->tf_ops->add_wwn_groups(wwn);
return &wwn->wwn_group;
}
@@ -1046,16 +956,8 @@ static void target_fabric_drop_wwn(
{
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
- struct config_item *df_item;
- struct config_group *cg = &wwn->wwn_group;
- int i;
-
- for (i = 0; cg->default_groups[i]; i++) {
- df_item = &cg->default_groups[i]->cg_item;
- cg->default_groups[i] = NULL;
- config_item_put(df_item);
- }
+ configfs_remove_default_groups(&wwn->wwn_group);
config_item_put(item);
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index abe4eb997a84..026a758e5778 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -413,8 +413,39 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
}
static sense_reason_t
+iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ struct scatterlist *sg = &cmd->t_data_sg[0];
+ struct page *page = NULL;
+ int ret;
+
+ if (sg->offset) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return TCM_OUT_OF_RESOURCES;
+ sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page),
+ dev->dev_attrib.block_size);
+ }
+
+ ret = blkdev_issue_write_same(bdev,
+ target_to_linux_sector(dev, cmd->t_task_lba),
+ target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd)),
+ GFP_KERNEL, page ? page : sg_page(sg));
+ if (page)
+ __free_page(page);
+ if (ret)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static sense_reason_t
iblock_execute_write_same(struct se_cmd *cmd)
{
+ struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
struct iblock_req *ibr;
struct scatterlist *sg;
struct bio *bio;
@@ -439,6 +470,9 @@ iblock_execute_write_same(struct se_cmd *cmd)
return TCM_INVALID_CDB_FIELD;
}
+ if (bdev_write_same(bdev))
+ return iblock_execute_write_same_direct(bdev, cmd);
+
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index db4412fe6b8a..86b4a8375628 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -22,7 +22,6 @@ struct target_fabric_configfs {
struct list_head tf_list;
struct config_group tf_group;
struct config_group tf_disc_group;
- struct config_group *tf_default_groups[2];
const struct target_core_fabric_ops *tf_ops;
struct config_item_type tf_discovery_cit;
@@ -60,10 +59,10 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
void target_pr_kref_release(struct kref *);
void core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
-void core_update_device_list_access(u64, u32, struct se_node_acl *);
+void core_update_device_list_access(u64, bool, struct se_node_acl *);
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u64, u32, struct se_node_acl *, struct se_portal_group *);
+ u64, bool, struct se_node_acl *, struct se_portal_group *);
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
@@ -73,7 +72,7 @@ void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
struct se_node_acl *, u64, int *);
int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
- struct se_lun_acl *, struct se_lun *lun, u32);
+ struct se_lun_acl *, struct se_lun *lun, bool);
int core_dev_del_initiator_node_lun_acl(struct se_lun *,
struct se_lun_acl *);
void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
@@ -119,7 +118,7 @@ void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *,
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
- u32, struct se_device *);
+ bool, struct se_device *);
void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
const char *initiatorname);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 0aa47babd16c..2a91ed3ef380 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -997,7 +997,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
int length = 0;
int ret;
int i;
- bool read_only = target_lun_is_rdonly(cmd);;
memset(buf, 0, SE_MODE_PAGE_BUF);
@@ -1008,7 +1007,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */
- if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
+ if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd))
spc_modesense_write_protect(&buf[length], type);
/*
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 81a6b3e07687..1a39033d2bff 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -407,19 +407,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_device *dev)
{
- struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
-
config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_dev_group,
+ &dev->dev_stat_grps.stat_group);
+
config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_tgt_dev_group,
+ &dev->dev_stat_grps.stat_group);
+
config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
-
- dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
- dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
- dev_stat_grp->default_groups[3] = NULL;
+ configfs_add_default_group(&dev->dev_stat_grps.scsi_lu_group,
+ &dev->dev_stat_grps.stat_group);
}
/*
@@ -818,19 +819,20 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
- struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
-
config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
+ configfs_add_default_group(&lun->port_stat_grps.scsi_port_group,
+ &lun->port_stat_grps.stat_group);
+
config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
+ configfs_add_default_group(&lun->port_stat_grps.scsi_tgt_port_group,
+ &lun->port_stat_grps.stat_group);
+
config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
-
- port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
- port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
- port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
- port_stat_grp->default_groups[3] = NULL;
+ configfs_add_default_group(&lun->port_stat_grps.scsi_transport_group,
+ &lun->port_stat_grps.stat_group);
}
/*
@@ -1351,14 +1353,13 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
- struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
-
config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
+ configfs_add_default_group(&lacl->ml_stat_grps.scsi_auth_intr_group,
+ &lacl->ml_stat_grps.stat_group);
+
config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
-
- ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
- ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
- ml_stat_grp->default_groups[2] = NULL;
+ configfs_add_default_group(&lacl->ml_stat_grps.scsi_att_intr_port_group,
+ &lacl->ml_stat_grps.stat_group);
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 82a663ba9800..4f229e711e1c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -177,7 +177,6 @@ void core_tmr_abort_task(
if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- target_put_sess_cmd(se_cmd);
goto out;
}
list_del_init(&se_cmd->se_cmd_list);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 3608b1b5ecf7..ddf046080dc3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -121,7 +121,7 @@ void core_tpg_add_node_to_devs(
struct se_portal_group *tpg,
struct se_lun *lun_orig)
{
- u32 lun_access = 0;
+ bool lun_access_ro = true;
struct se_lun *lun;
struct se_device *dev;
@@ -137,27 +137,26 @@ void core_tpg_add_node_to_devs(
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access_ro = false;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (dev->transport->get_device_type(dev) == TYPE_DISK)
- lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ lun_access_ro = true;
else
- lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+ lun_access_ro = false;
}
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
" access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
- (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
- "READ-WRITE" : "READ-ONLY");
+ lun_access_ro ? "READ-ONLY" : "READ-WRITE");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg);
+ lun_access_ro, acl, tpg);
/*
* Check to see if there are any existing persistent reservation
* APTPL pre-registrations that need to be enabled for this dynamic
@@ -522,7 +521,7 @@ int core_tpg_register(
return PTR_ERR(se_tpg->tpg_virt_lun0);
ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
- TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
+ true, g_lun0_dev);
if (ret < 0) {
kfree(se_tpg->tpg_virt_lun0);
return ret;
@@ -616,7 +615,7 @@ struct se_lun *core_tpg_alloc_lun(
int core_tpg_add_lun(
struct se_portal_group *tpg,
struct se_lun *lun,
- u32 lun_access,
+ bool lun_access_ro,
struct se_device *dev)
{
int ret;
@@ -644,9 +643,9 @@ int core_tpg_add_lun(
spin_unlock(&dev->se_port_lock);
if (dev->dev_flags & DF_READ_ONLY)
- lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ lun->lun_access_ro = true;
else
- lun->lun_access = lun_access;
+ lun->lun_access_ro = lun_access_ro;
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 867bc6d0a68a..ab2bf12975e1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -281,6 +281,17 @@ struct se_session *transport_init_session_tags(unsigned int tag_num,
struct se_session *se_sess;
int rc;
+ if (tag_num != 0 && !tag_size) {
+ pr_err("init_session_tags called with percpu-ida tag_num:"
+ " %u, but zero tag_size\n", tag_num);
+ return ERR_PTR(-EINVAL);
+ }
+ if (!tag_num && tag_size) {
+ pr_err("init_session_tags called with percpu-ida tag_size:"
+ " %u, but zero tag_num\n", tag_size);
+ return ERR_PTR(-EINVAL);
+ }
+
se_sess = transport_init_session(sup_prot_ops);
if (IS_ERR(se_sess))
return se_sess;
@@ -374,6 +385,51 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
+struct se_session *
+target_alloc_session(struct se_portal_group *tpg,
+ unsigned int tag_num, unsigned int tag_size,
+ enum target_prot_op prot_op,
+ const char *initiatorname, void *private,
+ int (*callback)(struct se_portal_group *,
+ struct se_session *, void *))
+{
+ struct se_session *sess;
+
+ /*
+ * If the fabric driver is using percpu-ida based pre allocation
+ * of I/O descriptor tags, go ahead and perform that setup now..
+ */
+ if (tag_num != 0)
+ sess = transport_init_session_tags(tag_num, tag_size, prot_op);
+ else
+ sess = transport_init_session(prot_op);
+
+ if (IS_ERR(sess))
+ return sess;
+
+ sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
+ (unsigned char *)initiatorname);
+ if (!sess->se_node_acl) {
+ transport_free_session(sess);
+ return ERR_PTR(-EACCES);
+ }
+ /*
+ * Go ahead and perform any remaining fabric setup that is
+ * required before transport_register_session().
+ */
+ if (callback != NULL) {
+ int rc = callback(tpg, sess, private);
+ if (rc) {
+ transport_free_session(sess);
+ return ERR_PTR(rc);
+ }
+ }
+
+ transport_register_session(tpg, sess->se_node_acl, sess, private);
+ return sess;
+}
+EXPORT_SYMBOL(target_alloc_session);
+
static void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
@@ -1941,6 +1997,9 @@ static void transport_complete_qf(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
+ if (cmd->scsi_status)
+ goto queue_status;
+
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_data_in(cmd);
break;
@@ -1951,6 +2010,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
+queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
break;
@@ -2072,6 +2132,9 @@ static void target_complete_ok_work(struct work_struct *work)
queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
+ if (cmd->scsi_status)
+ goto queue_status;
+
atomic_long_add(cmd->data_length,
&cmd->se_lun->lun_stats.tx_data_octets);
/*
@@ -2111,6 +2174,7 @@ queue_rsp:
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
+queue_status:
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2596,8 +2660,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del_init(&se_cmd->se_cmd_list);
-
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 94f5154ac788..62bf4fe5704a 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/uio_driver.h>
#include <linux/stringify.h>
+#include <linux/bitops.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -63,8 +64,11 @@
#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
+#define DATA_BLOCK_BITS 256
+#define DATA_BLOCK_SIZE 4096
+
#define CMDR_SIZE (16 * 4096)
-#define DATA_SIZE (257 * 4096)
+#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
@@ -93,12 +97,11 @@ struct tcmu_dev {
u32 cmdr_size;
u32 cmdr_last_cleaned;
/* Offset of data ring from start of mb */
+ /* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
- /* Ring head + tail values. */
- /* Must add data_off and mb_addr to get the address */
- size_t data_head;
- size_t data_tail;
+
+ DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
wait_queue_head_t wait_cmdr;
/* TODO should this be a mutex? */
@@ -122,9 +125,9 @@ struct tcmu_cmd {
uint16_t cmd_id;
- /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
+ /* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
- size_t data_length;
+ DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
unsigned long deadline;
@@ -168,13 +171,6 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd->data_length = se_cmd->data_length;
-
- if (se_cmd->se_cmd_flags & SCF_BIDI) {
- BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length;
- }
-
tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
idr_preload(GFP_KERNEL);
@@ -231,105 +227,126 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
+static inline void new_iov(struct iovec **iov, int *iov_cnt,
+ struct tcmu_dev *udev)
+{
+ struct iovec *iovec;
+
+ if (*iov_cnt != 0)
+ (*iov)++;
+ (*iov_cnt)++;
+
+ iovec = *iov;
+ memset(iovec, 0, sizeof(struct iovec));
+}
+
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
+/* offset is relative to mb_addr */
+static inline size_t get_block_offset(struct tcmu_dev *dev,
+ int block, int remaining)
+{
+ return dev->data_off + block * DATA_BLOCK_SIZE +
+ DATA_BLOCK_SIZE - remaining;
+}
+
+static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov)
+{
+ return (size_t)iov->iov_base + iov->iov_len;
+}
+
static void alloc_and_scatter_data_area(struct tcmu_dev *udev,
struct scatterlist *data_sg, unsigned int data_nents,
struct iovec **iov, int *iov_cnt, bool copy_data)
{
- int i;
+ int i, block;
+ int block_remaining = 0;
void *from, *to;
- size_t copy_bytes;
+ size_t copy_bytes, to_offset;
struct scatterlist *sg;
for_each_sg(data_sg, sg, data_nents, i) {
- copy_bytes = min_t(size_t, sg->length,
- head_to_end(udev->data_head, udev->data_size));
+ int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
- to = (void *) udev->mb_addr + udev->data_off + udev->data_head;
-
- if (copy_data) {
- memcpy(to, from, copy_bytes);
- tcmu_flush_dcache_range(to, copy_bytes);
- }
-
- /* Even iov_base is relative to mb_addr */
- (*iov)->iov_len = copy_bytes;
- (*iov)->iov_base = (void __user *) udev->data_off +
- udev->data_head;
- (*iov_cnt)++;
- (*iov)++;
-
- UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
-
- /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
- if (sg->length != copy_bytes) {
- void *from_skip = from + copy_bytes;
-
- copy_bytes = sg->length - copy_bytes;
-
- (*iov)->iov_len = copy_bytes;
- (*iov)->iov_base = (void __user *) udev->data_off +
- udev->data_head;
-
+ while (sg_remaining > 0) {
+ if (block_remaining == 0) {
+ block = find_first_zero_bit(udev->data_bitmap,
+ DATA_BLOCK_BITS);
+ block_remaining = DATA_BLOCK_SIZE;
+ set_bit(block, udev->data_bitmap);
+ }
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
+ to_offset = get_block_offset(udev, block,
+ block_remaining);
+ to = (void *)udev->mb_addr + to_offset;
+ if (*iov_cnt != 0 &&
+ to_offset == iov_tail(udev, *iov)) {
+ (*iov)->iov_len += copy_bytes;
+ } else {
+ new_iov(iov, iov_cnt, udev);
+ (*iov)->iov_base = (void __user *) to_offset;
+ (*iov)->iov_len = copy_bytes;
+ }
if (copy_data) {
- to = (void *) udev->mb_addr +
- udev->data_off + udev->data_head;
- memcpy(to, from_skip, copy_bytes);
+ memcpy(to, from + sg->length - sg_remaining,
+ copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes);
}
-
- (*iov_cnt)++;
- (*iov)++;
-
- UPDATE_HEAD(udev->data_head,
- copy_bytes, udev->data_size);
+ sg_remaining -= copy_bytes;
+ block_remaining -= copy_bytes;
}
-
kunmap_atomic(from - sg->offset);
}
}
-static void gather_and_free_data_area(struct tcmu_dev *udev,
- struct scatterlist *data_sg, unsigned int data_nents)
+static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
{
- int i;
+ bitmap_xor(udev->data_bitmap, udev->data_bitmap, cmd->data_bitmap,
+ DATA_BLOCK_BITS);
+}
+
+static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
+ struct scatterlist *data_sg, unsigned int data_nents)
+{
+ int i, block;
+ int block_remaining = 0;
void *from, *to;
- size_t copy_bytes;
+ size_t copy_bytes, from_offset;
struct scatterlist *sg;
- /* It'd be easier to look at entry's iovec again, but UAM */
for_each_sg(data_sg, sg, data_nents, i) {
- copy_bytes = min_t(size_t, sg->length,
- head_to_end(udev->data_tail, udev->data_size));
-
+ int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
- WARN_ON(sg->length + sg->offset > PAGE_SIZE);
- from = (void *) udev->mb_addr +
- udev->data_off + udev->data_tail;
- tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to, from, copy_bytes);
-
- UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
-
- /* Uh oh, wrapped the data buffer for this sg's data */
- if (sg->length != copy_bytes) {
- void *to_skip = to + copy_bytes;
-
- from = (void *) udev->mb_addr +
- udev->data_off + udev->data_tail;
- WARN_ON(udev->data_tail);
- copy_bytes = sg->length - copy_bytes;
+ while (sg_remaining > 0) {
+ if (block_remaining == 0) {
+ block = find_first_bit(cmd_bitmap,
+ DATA_BLOCK_BITS);
+ block_remaining = DATA_BLOCK_SIZE;
+ clear_bit(block, cmd_bitmap);
+ }
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
+ from_offset = get_block_offset(udev, block,
+ block_remaining);
+ from = (void *) udev->mb_addr + from_offset;
tcmu_flush_dcache_range(from, copy_bytes);
- memcpy(to_skip, from, copy_bytes);
+ memcpy(to + sg->length - sg_remaining, from,
+ copy_bytes);
- UPDATE_HEAD(udev->data_tail,
- copy_bytes, udev->data_size);
+ sg_remaining -= copy_bytes;
+ block_remaining -= copy_bytes;
}
kunmap_atomic(to - sg->offset);
}
}
+static inline size_t spc_bitmap_free(unsigned long *bitmap)
+{
+ return DATA_BLOCK_SIZE * (DATA_BLOCK_BITS -
+ bitmap_weight(bitmap, DATA_BLOCK_BITS));
+}
+
/*
* We can't queue a command until we have space available on the cmd ring *and*
* space available on the data ring.
@@ -339,9 +356,8 @@ static void gather_and_free_data_area(struct tcmu_dev *udev,
static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
{
struct tcmu_mailbox *mb = udev->mb_addr;
- size_t space;
+ size_t space, cmd_needed;
u32 cmd_head;
- size_t cmd_needed;
tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -363,10 +379,10 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return false;
}
- space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
+ space = spc_bitmap_free(udev->data_bitmap);
if (space < data_needed) {
- pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
- udev->data_tail, udev->data_size);
+ pr_debug("no data space: only %zu available, but ask for %zu\n",
+ space, data_needed);
return false;
}
@@ -385,6 +401,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
uint32_t cmd_head;
uint64_t cdb_off;
bool copy_to_data_area;
+ size_t data_length;
+ DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
return -EINVAL;
@@ -393,12 +411,12 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
*
- * iovs = sgl_nents+1, for end-of-ring case, plus another 1
- * b/c size == offsetof one-past-element.
+ * We prepare way too many iovs for potential uses here, because it's
+ * expensive to tell how many regions are freed in the bitmap
*/
base_command_size = max(offsetof(struct tcmu_cmd_entry,
- req.iov[se_cmd->t_bidi_data_nents +
- se_cmd->t_data_nents + 2]),
+ req.iov[se_cmd->t_bidi_data_nents +
+ se_cmd->t_data_nents]),
sizeof(struct tcmu_cmd_entry));
command_size = base_command_size
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -409,13 +427,18 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+ data_length = se_cmd->data_length;
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+ data_length += se_cmd->t_bidi_data_sg->length;
+ }
if ((command_size > (udev->cmdr_size / 2))
- || tcmu_cmd->data_length > (udev->data_size - 1))
+ || data_length > udev->data_size)
pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
+ "cmd/data ring buffers\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
- while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
+ while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
DEFINE_WAIT(__wait);
@@ -462,6 +485,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry->hdr.kflags = 0;
entry->hdr.uflags = 0;
+ bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
+
/*
* Fix up iovecs, and handle if allocation in data ring wrapped.
*/
@@ -480,6 +505,10 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
entry->req.iov_bidi_cnt = iov_cnt;
+ /* cmd's data_bitmap is what changed in process */
+ bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
+ DATA_BLOCK_BITS);
+
/* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size;
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
@@ -530,35 +559,42 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
struct tcmu_dev *udev = cmd->tcmu_dev;
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- /* cmd has been completed already from timeout, just reclaim data
- ring space */
- UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ /*
+ * cmd has been completed already from timeout, just reclaim
+ * data ring space and free cmd
+ */
+ free_data_area(udev, cmd);
+
+ kmem_cache_free(tcmu_cmd_cache, cmd);
return;
}
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
- UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ free_data_area(udev, cmd);
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
} else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
se_cmd->scsi_sense_length);
-
- UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ free_data_area(udev, cmd);
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
- /* Discard data_out buffer */
- UPDATE_HEAD(udev->data_tail,
- (size_t)se_cmd->t_data_sg->length, udev->data_size);
+ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
- /* Get Data-In buffer */
- gather_and_free_data_area(udev,
+ /* Get Data-In buffer before clean up */
+ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+ gather_data_area(udev, bitmap,
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+ free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- gather_and_free_data_area(udev,
+ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+ gather_data_area(udev, bitmap,
se_cmd->t_data_sg, se_cmd->t_data_nents);
+ free_data_area(udev, cmd);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
- UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ free_data_area(udev, cmd);
} else if (se_cmd->data_direction != DMA_NONE) {
pr_warn("TCMU: data direction was %d!\n",
se_cmd->data_direction);
@@ -894,11 +930,13 @@ static int tcmu_configure_device(struct se_device *dev)
mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
WARN_ON(udev->data_size % PAGE_SIZE);
+ WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
info->version = __stringify(TCMU_MAILBOX_VERSION);
@@ -942,12 +980,12 @@ err_vzalloc:
return ret;
}
-static int tcmu_check_pending_cmd(int id, void *p, void *data)
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
{
- struct tcmu_cmd *cmd = p;
-
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
return 0;
+ }
return -EINVAL;
}
@@ -962,6 +1000,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
int i;
del_timer_sync(&udev->timeout);
@@ -970,10 +1010,13 @@ static void tcmu_free_device(struct se_device *dev)
/* Upper layer should drain all requests before calling this */
spin_lock_irq(&udev->commands_lock);
- i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
idr_destroy(&udev->commands);
spin_unlock_irq(&udev->commands_lock);
- WARN_ON(i);
+ WARN_ON(!all_expired);
/* Device was configured */
if (udev->uio_info.uio_dev) {
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 064d6dfb5b6d..216e18cc9133 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -107,8 +107,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
int ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(se_cmd, 0);
}
/*
@@ -179,6 +178,12 @@ int ft_queue_status(struct se_cmd *se_cmd)
return -ENOMEM;
}
lport->tt.exch_done(cmd->seq);
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_cmd()
+ * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
@@ -387,7 +392,7 @@ static void ft_send_tm(struct ft_cmd *cmd)
/* FIXME: Add referenced task tag for ABORT_TASK */
rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- cmd, tm_func, GFP_KERNEL, 0, 0);
+ cmd, tm_func, GFP_KERNEL, 0, TARGET_SCF_ACK_KREF);
if (rc < 0)
ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
}
@@ -422,6 +427,12 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
+ /*
+ * Drop the extra ACK_KREF reference taken by target_submit_tmr()
+ * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+ target_put_sess_cmd(&cmd->se_cmd);
}
void ft_aborted_task(struct se_cmd *se_cmd)
@@ -560,7 +571,8 @@ static void ft_send_work(struct work_struct *work)
*/
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+ ntohl(fcp->fc_dl), task_attr, data_dir,
+ TARGET_SCF_ACK_KREF))
goto err;
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index e19f4c58c6fa..d0c3e1894c61 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -186,6 +186,20 @@ out:
return NULL;
}
+static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct ft_sess *sess = p;
+ struct ft_tport *tport = sess->tport;
+ struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
+
+ pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ hlist_add_head_rcu(&sess->hash, head);
+ tport->sess_count++;
+
+ return 0;
+}
+
/*
* Allocate session and enter it in the hash for the local port.
* Caller holds ft_lport_lock.
@@ -194,7 +208,6 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
struct fc_rport_priv *rdata)
{
struct se_portal_group *se_tpg = &tport->tpg->se_tpg;
- struct se_node_acl *se_acl;
struct ft_sess *sess;
struct hlist_head *head;
unsigned char initiatorname[TRANSPORT_IQN_LEN];
@@ -210,31 +223,18 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
if (!sess)
return NULL;
- sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
- sizeof(struct ft_cmd),
- TARGET_PROT_NORMAL);
- if (IS_ERR(sess->se_sess)) {
- kfree(sess);
- return NULL;
- }
+ kref_init(&sess->kref); /* ref for table entry */
+ sess->tport = tport;
+ sess->port_id = port_id;
- se_acl = core_tpg_get_initiator_node_acl(se_tpg, &initiatorname[0]);
- if (!se_acl) {
- transport_free_session(sess->se_sess);
+ sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS,
+ sizeof(struct ft_cmd),
+ TARGET_PROT_NORMAL, &initiatorname[0],
+ sess, ft_sess_alloc_cb);
+ if (IS_ERR(sess->se_sess)) {
kfree(sess);
return NULL;
}
- sess->se_sess->se_node_acl = se_acl;
- sess->tport = tport;
- sess->port_id = port_id;
- kref_init(&sess->kref); /* ref for table entry */
- hlist_add_head_rcu(&sess->hash, head);
- tport->sess_count++;
-
- pr_debug("port_id %x sess %p\n", port_id, sess);
-
- transport_register_session(&tport->tpg->se_tpg, se_acl,
- sess->se_sess, sess);
return sess;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 7c92c09be213..c37eedc35a24 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -178,6 +178,7 @@ config THERMAL_EMULATION
config HISI_THERMAL
tristate "Hisilicon thermal driver"
depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enable this to plug hisilicon's thermal sensor driver into the Linux
thermal framework. cpufreq is used as the cooling device to throttle
@@ -197,6 +198,7 @@ config IMX_THERMAL
config SPEAR_THERMAL
tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST
+ depends on HAS_IOMEM
depends on OF
help
Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -206,6 +208,7 @@ config ROCKCHIP_THERMAL
tristate "Rockchip thermal driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on RESET_CONTROLLER
+ depends on HAS_IOMEM
help
Rockchip thermal driver provides support for Temperature sensor
ADC (TS-ADC) found on Rockchip SoCs. It supports one critical
@@ -214,7 +217,7 @@ config ROCKCHIP_THERMAL
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_IOMEM
help
Enable this to plug the R-Car thermal sensor driver into the Linux
@@ -223,6 +226,7 @@ config RCAR_THERMAL
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
depends on MACH_KIRKWOOD || COMPILE_TEST
+ depends on HAS_IOMEM
depends on OF
help
Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -231,6 +235,7 @@ config KIRKWOOD_THERMAL
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
+ depends on HAS_IOMEM
depends on OF
help
Support for the Dove thermal sensor driver in the Linux thermal
@@ -249,6 +254,7 @@ config DB8500_THERMAL
config ARMADA_THERMAL
tristate "Armada 370/XP thermal management"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_IOMEM
depends on OF
help
Enable this option if you want to have support for thermal management
@@ -266,7 +272,8 @@ config TEGRA_SOCTHERM
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
- depends on ARCH_U8500
+ depends on ARCH_U8500 || COMPILE_TEST
+ depends on HAS_IOMEM
depends on CPU_THERMAL
default y
help
@@ -365,8 +372,18 @@ config INTEL_PCH_THERMAL
Thermal reporting device will provide temperature reading,
programmable trip points and other information.
+config MTK_THERMAL
+ tristate "Temperature sensor driver for mediatek SoCs"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ default y
+ help
+ Enable this option if you want to have support for thermal management
+ controller present in Mediatek SoCs
+
menu "Texas Instruments thermal drivers"
depends on ARCH_HAS_BANDGAP || COMPILE_TEST
+depends on HAS_IOMEM
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index cfae6a654793..8e9cbc3b5679 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -48,3 +48,4 @@ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
+obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 00d81af648b8..6a6ec1c95a7a 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -24,6 +24,7 @@
/* Intel PCH thermal Device IDs */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
+#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -201,6 +202,10 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
ptd->ops = &pch_dev_ops_wpt;
dev_name = "pch_wildcat_point";
break;
+ case PCH_THERMAL_DID_SKL:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_skylake";
+ break;
default:
dev_err(&pdev->dev, "unknown pch thermal device\n");
return -ENODEV;
@@ -266,6 +271,7 @@ static void intel_pch_thermal_remove(struct pci_dev *pdev)
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
new file mode 100644
index 000000000000..3d93b1c07cee
--- /dev/null
+++ b/drivers/thermal/mtk_thermal.c
@@ -0,0 +1,625 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Hanyi Wu <hanyi.wu@mediatek.com>
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/thermal.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/nvmem-consumer.h>
+
+/* AUXADC Registers */
+#define AUXADC_CON0_V 0x000
+#define AUXADC_CON1_V 0x004
+#define AUXADC_CON1_SET_V 0x008
+#define AUXADC_CON1_CLR_V 0x00c
+#define AUXADC_CON2_V 0x010
+#define AUXADC_DATA(channel) (0x14 + (channel) * 4)
+#define AUXADC_MISC_V 0x094
+
+#define AUXADC_CON1_CHANNEL(x) BIT(x)
+
+#define APMIXED_SYS_TS_CON1 0x604
+
+/* Thermal Controller Registers */
+#define TEMP_MONCTL0 0x000
+#define TEMP_MONCTL1 0x004
+#define TEMP_MONCTL2 0x008
+#define TEMP_MONIDET0 0x014
+#define TEMP_MONIDET1 0x018
+#define TEMP_MSRCTL0 0x038
+#define TEMP_AHBPOLL 0x040
+#define TEMP_AHBTO 0x044
+#define TEMP_ADCPNP0 0x048
+#define TEMP_ADCPNP1 0x04c
+#define TEMP_ADCPNP2 0x050
+#define TEMP_ADCPNP3 0x0b4
+
+#define TEMP_ADCMUX 0x054
+#define TEMP_ADCEN 0x060
+#define TEMP_PNPMUXADDR 0x064
+#define TEMP_ADCMUXADDR 0x068
+#define TEMP_ADCENADDR 0x074
+#define TEMP_ADCVALIDADDR 0x078
+#define TEMP_ADCVOLTADDR 0x07c
+#define TEMP_RDCTRL 0x080
+#define TEMP_ADCVALIDMASK 0x084
+#define TEMP_ADCVOLTAGESHIFT 0x088
+#define TEMP_ADCWRITECTRL 0x08c
+#define TEMP_MSR0 0x090
+#define TEMP_MSR1 0x094
+#define TEMP_MSR2 0x098
+#define TEMP_MSR3 0x0B8
+
+#define TEMP_SPARE0 0x0f0
+
+#define PTPCORESEL 0x400
+
+#define TEMP_MONCTL1_PERIOD_UNIT(x) ((x) & 0x3ff)
+
+#define TEMP_MONCTL2_FILTER_INTERVAL(x) (((x) & 0x3ff) << 16)
+#define TEMP_MONCTL2_SENSOR_INTERVAL(x) ((x) & 0x3ff)
+
+#define TEMP_AHBPOLL_ADC_POLL_INTERVAL(x) (x)
+
+#define TEMP_ADCWRITECTRL_ADC_PNP_WRITE BIT(0)
+#define TEMP_ADCWRITECTRL_ADC_MUX_WRITE BIT(1)
+
+#define TEMP_ADCVALIDMASK_VALID_HIGH BIT(5)
+#define TEMP_ADCVALIDMASK_VALID_POS(bit) (bit)
+
+#define MT8173_TS1 0
+#define MT8173_TS2 1
+#define MT8173_TS3 2
+#define MT8173_TS4 3
+#define MT8173_TSABB 4
+
+/* AUXADC channel 11 is used for the temperature sensors */
+#define MT8173_TEMP_AUXADC_CHANNEL 11
+
+/* The total number of temperature sensors in the MT8173 */
+#define MT8173_NUM_SENSORS 5
+
+/* The number of banks in the MT8173 */
+#define MT8173_NUM_ZONES 4
+
+/* The number of sensing points per bank */
+#define MT8173_NUM_SENSORS_PER_ZONE 4
+
+/* Layout of the fuses providing the calibration data */
+#define MT8173_CALIB_BUF0_VALID BIT(0)
+#define MT8173_CALIB_BUF1_ADC_GE(x) (((x) >> 22) & 0x3ff)
+#define MT8173_CALIB_BUF0_VTS_TS1(x) (((x) >> 17) & 0x1ff)
+#define MT8173_CALIB_BUF0_VTS_TS2(x) (((x) >> 8) & 0x1ff)
+#define MT8173_CALIB_BUF1_VTS_TS3(x) (((x) >> 0) & 0x1ff)
+#define MT8173_CALIB_BUF2_VTS_TS4(x) (((x) >> 23) & 0x1ff)
+#define MT8173_CALIB_BUF2_VTS_TSABB(x) (((x) >> 14) & 0x1ff)
+#define MT8173_CALIB_BUF0_DEGC_CALI(x) (((x) >> 1) & 0x3f)
+#define MT8173_CALIB_BUF0_O_SLOPE(x) (((x) >> 26) & 0x3f)
+
+#define THERMAL_NAME "mtk-thermal"
+
+struct mtk_thermal;
+
+struct mtk_thermal_bank {
+ struct mtk_thermal *mt;
+ int id;
+};
+
+struct mtk_thermal {
+ struct device *dev;
+ void __iomem *thermal_base;
+
+ struct clk *clk_peri_therm;
+ struct clk *clk_auxadc;
+
+ struct mtk_thermal_bank banks[MT8173_NUM_ZONES];
+
+ /* lock: for getting and putting banks */
+ struct mutex lock;
+
+ /* Calibration values */
+ s32 adc_ge;
+ s32 degc_cali;
+ s32 o_slope;
+ s32 vts[MT8173_NUM_SENSORS];
+
+ struct thermal_zone_device *tzd;
+};
+
+struct mtk_thermal_bank_cfg {
+ unsigned int num_sensors;
+ unsigned int sensors[MT8173_NUM_SENSORS_PER_ZONE];
+};
+
+static const int sensor_mux_values[MT8173_NUM_SENSORS] = { 0, 1, 2, 3, 16 };
+
+/*
+ * The MT8173 thermal controller has four banks. Each bank can read up to
+ * four temperature sensors simultaneously. The MT8173 has a total of 5
+ * temperature sensors. We use each bank to measure a certain area of the
+ * SoC. Since TS2 is located centrally in the SoC it is influenced by multiple
+ * areas, hence is used in different banks.
+ *
+ * The thermal core only gets the maximum temperature of all banks, so
+ * the bank concept wouldn't be necessary here. However, the SVS (Smart
+ * Voltage Scaling) unit makes its decisions based on the same bank
+ * data, and this indeed needs the temperatures of the individual banks
+ * for making better decisions.
+ */
+static const struct mtk_thermal_bank_cfg bank_data[] = {
+ {
+ .num_sensors = 2,
+ .sensors = { MT8173_TS2, MT8173_TS3 },
+ }, {
+ .num_sensors = 2,
+ .sensors = { MT8173_TS2, MT8173_TS4 },
+ }, {
+ .num_sensors = 3,
+ .sensors = { MT8173_TS1, MT8173_TS2, MT8173_TSABB },
+ }, {
+ .num_sensors = 1,
+ .sensors = { MT8173_TS2 },
+ },
+};
+
+struct mtk_thermal_sense_point {
+ int msr;
+ int adcpnp;
+};
+
+static const struct mtk_thermal_sense_point
+ sensing_points[MT8173_NUM_SENSORS_PER_ZONE] = {
+ {
+ .msr = TEMP_MSR0,
+ .adcpnp = TEMP_ADCPNP0,
+ }, {
+ .msr = TEMP_MSR1,
+ .adcpnp = TEMP_ADCPNP1,
+ }, {
+ .msr = TEMP_MSR2,
+ .adcpnp = TEMP_ADCPNP2,
+ }, {
+ .msr = TEMP_MSR3,
+ .adcpnp = TEMP_ADCPNP3,
+ },
+};
+
+/**
+ * raw_to_mcelsius - convert a raw ADC value to mcelsius
+ * @mt: The thermal controller
+ * @raw: raw ADC value
+ *
+ * This converts the raw ADC value to mcelsius using the SoC specific
+ * calibration constants
+ */
+static int raw_to_mcelsius(struct mtk_thermal *mt, int sensno, s32 raw)
+{
+ s32 tmp;
+
+ raw &= 0xfff;
+
+ tmp = 203450520 << 3;
+ tmp /= 165 + mt->o_slope;
+ tmp /= 10000 + mt->adc_ge;
+ tmp *= raw - mt->vts[sensno] - 3350;
+ tmp >>= 3;
+
+ return mt->degc_cali * 500 - tmp;
+}
+
+/**
+ * mtk_thermal_get_bank - get bank
+ * @bank: The bank
+ *
+ * The bank registers are banked, we have to select a bank in the
+ * PTPCORESEL register to access it.
+ */
+static void mtk_thermal_get_bank(struct mtk_thermal_bank *bank)
+{
+ struct mtk_thermal *mt = bank->mt;
+ u32 val;
+
+ mutex_lock(&mt->lock);
+
+ val = readl(mt->thermal_base + PTPCORESEL);
+ val &= ~0xf;
+ val |= bank->id;
+ writel(val, mt->thermal_base + PTPCORESEL);
+}
+
+/**
+ * mtk_thermal_put_bank - release bank
+ * @bank: The bank
+ *
+ * release a bank previously taken with mtk_thermal_get_bank,
+ */
+static void mtk_thermal_put_bank(struct mtk_thermal_bank *bank)
+{
+ struct mtk_thermal *mt = bank->mt;
+
+ mutex_unlock(&mt->lock);
+}
+
+/**
+ * mtk_thermal_bank_temperature - get the temperature of a bank
+ * @bank: The bank
+ *
+ * The temperature of a bank is considered the maximum temperature of
+ * the sensors associated to the bank.
+ */
+static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
+{
+ struct mtk_thermal *mt = bank->mt;
+ int i, temp = INT_MIN, max = INT_MIN;
+ u32 raw;
+
+ for (i = 0; i < bank_data[bank->id].num_sensors; i++) {
+ raw = readl(mt->thermal_base + sensing_points[i].msr);
+
+ temp = raw_to_mcelsius(mt, bank_data[bank->id].sensors[i], raw);
+
+ /*
+ * The first read of a sensor often contains very high bogus
+ * temperature value. Filter these out so that the system does
+ * not immediately shut down.
+ */
+ if (temp > 200000)
+ temp = 0;
+
+ if (temp > max)
+ max = temp;
+ }
+
+ return max;
+}
+
+static int mtk_read_temp(void *data, int *temperature)
+{
+ struct mtk_thermal *mt = data;
+ int i;
+ int tempmax = INT_MIN;
+
+ for (i = 0; i < MT8173_NUM_ZONES; i++) {
+ struct mtk_thermal_bank *bank = &mt->banks[i];
+
+ mtk_thermal_get_bank(bank);
+
+ tempmax = max(tempmax, mtk_thermal_bank_temperature(bank));
+
+ mtk_thermal_put_bank(bank);
+ }
+
+ *temperature = tempmax;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops mtk_thermal_ops = {
+ .get_temp = mtk_read_temp,
+};
+
+static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
+ u32 apmixed_phys_base, u32 auxadc_phys_base)
+{
+ struct mtk_thermal_bank *bank = &mt->banks[num];
+ const struct mtk_thermal_bank_cfg *cfg = &bank_data[num];
+ int i;
+
+ bank->id = num;
+ bank->mt = mt;
+
+ mtk_thermal_get_bank(bank);
+
+ /* bus clock 66M counting unit is 12 * 15.15ns * 256 = 46.540us */
+ writel(TEMP_MONCTL1_PERIOD_UNIT(12), mt->thermal_base + TEMP_MONCTL1);
+
+ /*
+ * filt interval is 1 * 46.540us = 46.54us,
+ * sen interval is 429 * 46.540us = 19.96ms
+ */
+ writel(TEMP_MONCTL2_FILTER_INTERVAL(1) |
+ TEMP_MONCTL2_SENSOR_INTERVAL(429),
+ mt->thermal_base + TEMP_MONCTL2);
+
+ /* poll is set to 10u */
+ writel(TEMP_AHBPOLL_ADC_POLL_INTERVAL(768),
+ mt->thermal_base + TEMP_AHBPOLL);
+
+ /* temperature sampling control, 1 sample */
+ writel(0x0, mt->thermal_base + TEMP_MSRCTL0);
+
+ /* exceed this polling time, IRQ would be inserted */
+ writel(0xffffffff, mt->thermal_base + TEMP_AHBTO);
+
+ /* number of interrupts per event, 1 is enough */
+ writel(0x0, mt->thermal_base + TEMP_MONIDET0);
+ writel(0x0, mt->thermal_base + TEMP_MONIDET1);
+
+ /*
+ * The MT8173 thermal controller does not have its own ADC. Instead it
+ * uses AHB bus accesses to control the AUXADC. To do this the thermal
+ * controller has to be programmed with the physical addresses of the
+ * AUXADC registers and with the various bit positions in the AUXADC.
+ * Also the thermal controller controls a mux in the APMIXEDSYS register
+ * space.
+ */
+
+ /*
+ * this value will be stored to TEMP_PNPMUXADDR (TEMP_SPARE0)
+ * automatically by hw
+ */
+ writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCMUX);
+
+ /* AHB address for auxadc mux selection */
+ writel(auxadc_phys_base + AUXADC_CON1_CLR_V,
+ mt->thermal_base + TEMP_ADCMUXADDR);
+
+ /* AHB address for pnp sensor mux selection */
+ writel(apmixed_phys_base + APMIXED_SYS_TS_CON1,
+ mt->thermal_base + TEMP_PNPMUXADDR);
+
+ /* AHB value for auxadc enable */
+ writel(BIT(MT8173_TEMP_AUXADC_CHANNEL), mt->thermal_base + TEMP_ADCEN);
+
+ /* AHB address for auxadc enable (channel 0 immediate mode selected) */
+ writel(auxadc_phys_base + AUXADC_CON1_SET_V,
+ mt->thermal_base + TEMP_ADCENADDR);
+
+ /* AHB address for auxadc valid bit */
+ writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+ mt->thermal_base + TEMP_ADCVALIDADDR);
+
+ /* AHB address for auxadc voltage output */
+ writel(auxadc_phys_base + AUXADC_DATA(MT8173_TEMP_AUXADC_CHANNEL),
+ mt->thermal_base + TEMP_ADCVOLTADDR);
+
+ /* read valid & voltage are at the same register */
+ writel(0x0, mt->thermal_base + TEMP_RDCTRL);
+
+ /* indicate where the valid bit is */
+ writel(TEMP_ADCVALIDMASK_VALID_HIGH | TEMP_ADCVALIDMASK_VALID_POS(12),
+ mt->thermal_base + TEMP_ADCVALIDMASK);
+
+ /* no shift */
+ writel(0x0, mt->thermal_base + TEMP_ADCVOLTAGESHIFT);
+
+ /* enable auxadc mux write transaction */
+ writel(TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
+ mt->thermal_base + TEMP_ADCWRITECTRL);
+
+ for (i = 0; i < cfg->num_sensors; i++)
+ writel(sensor_mux_values[cfg->sensors[i]],
+ mt->thermal_base + sensing_points[i].adcpnp);
+
+ writel((1 << cfg->num_sensors) - 1, mt->thermal_base + TEMP_MONCTL0);
+
+ writel(TEMP_ADCWRITECTRL_ADC_PNP_WRITE |
+ TEMP_ADCWRITECTRL_ADC_MUX_WRITE,
+ mt->thermal_base + TEMP_ADCWRITECTRL);
+
+ mtk_thermal_put_bank(bank);
+}
+
+static u64 of_get_phys_base(struct device_node *np)
+{
+ u64 size64;
+ const __be32 *regaddr_p;
+
+ regaddr_p = of_get_address(np, 0, &size64, NULL);
+ if (!regaddr_p)
+ return OF_BAD_ADDR;
+
+ return of_translate_address(np, regaddr_p);
+}
+
+static int mtk_thermal_get_calibration_data(struct device *dev,
+ struct mtk_thermal *mt)
+{
+ struct nvmem_cell *cell;
+ u32 *buf;
+ size_t len;
+ int i, ret = 0;
+
+ /* Start with default values */
+ mt->adc_ge = 512;
+ for (i = 0; i < MT8173_NUM_SENSORS; i++)
+ mt->vts[i] = 260;
+ mt->degc_cali = 40;
+ mt->o_slope = 0;
+
+ cell = nvmem_cell_get(dev, "calibration-data");
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return PTR_ERR(cell);
+ return 0;
+ }
+
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (len < 3 * sizeof(u32)) {
+ dev_warn(dev, "invalid calibration data\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (buf[0] & MT8173_CALIB_BUF0_VALID) {
+ mt->adc_ge = MT8173_CALIB_BUF1_ADC_GE(buf[1]);
+ mt->vts[MT8173_TS1] = MT8173_CALIB_BUF0_VTS_TS1(buf[0]);
+ mt->vts[MT8173_TS2] = MT8173_CALIB_BUF0_VTS_TS2(buf[0]);
+ mt->vts[MT8173_TS3] = MT8173_CALIB_BUF1_VTS_TS3(buf[1]);
+ mt->vts[MT8173_TS4] = MT8173_CALIB_BUF2_VTS_TS4(buf[2]);
+ mt->vts[MT8173_TSABB] = MT8173_CALIB_BUF2_VTS_TSABB(buf[2]);
+ mt->degc_cali = MT8173_CALIB_BUF0_DEGC_CALI(buf[0]);
+ mt->o_slope = MT8173_CALIB_BUF0_O_SLOPE(buf[0]);
+ } else {
+ dev_info(dev, "Device not calibrated, using default calibration values\n");
+ }
+
+out:
+ kfree(buf);
+
+ return ret;
+}
+
+static int mtk_thermal_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
+ struct mtk_thermal *mt;
+ struct resource *res;
+ u64 auxadc_phys_base, apmixed_phys_base;
+
+ mt = devm_kzalloc(&pdev->dev, sizeof(*mt), GFP_KERNEL);
+ if (!mt)
+ return -ENOMEM;
+
+ mt->clk_peri_therm = devm_clk_get(&pdev->dev, "therm");
+ if (IS_ERR(mt->clk_peri_therm))
+ return PTR_ERR(mt->clk_peri_therm);
+
+ mt->clk_auxadc = devm_clk_get(&pdev->dev, "auxadc");
+ if (IS_ERR(mt->clk_auxadc))
+ return PTR_ERR(mt->clk_auxadc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mt->thermal_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mt->thermal_base))
+ return PTR_ERR(mt->thermal_base);
+
+ ret = mtk_thermal_get_calibration_data(&pdev->dev, mt);
+ if (ret)
+ return ret;
+
+ mutex_init(&mt->lock);
+
+ mt->dev = &pdev->dev;
+
+ auxadc = of_parse_phandle(np, "mediatek,auxadc", 0);
+ if (!auxadc) {
+ dev_err(&pdev->dev, "missing auxadc node\n");
+ return -ENODEV;
+ }
+
+ auxadc_phys_base = of_get_phys_base(auxadc);
+
+ of_node_put(auxadc);
+
+ if (auxadc_phys_base == OF_BAD_ADDR) {
+ dev_err(&pdev->dev, "Can't get auxadc phys address\n");
+ return -EINVAL;
+ }
+
+ apmixedsys = of_parse_phandle(np, "mediatek,apmixedsys", 0);
+ if (!apmixedsys) {
+ dev_err(&pdev->dev, "missing apmixedsys node\n");
+ return -ENODEV;
+ }
+
+ apmixed_phys_base = of_get_phys_base(apmixedsys);
+
+ of_node_put(apmixedsys);
+
+ if (apmixed_phys_base == OF_BAD_ADDR) {
+ dev_err(&pdev->dev, "Can't get auxadc phys address\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare_enable(mt->clk_auxadc);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable auxadc clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_reset(&pdev->dev);
+ if (ret)
+ goto err_disable_clk_auxadc;
+
+ ret = clk_prepare_enable(mt->clk_peri_therm);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable peri clk: %d\n", ret);
+ goto err_disable_clk_auxadc;
+ }
+
+ for (i = 0; i < MT8173_NUM_ZONES; i++)
+ mtk_thermal_init_bank(mt, i, apmixed_phys_base,
+ auxadc_phys_base);
+
+ platform_set_drvdata(pdev, mt);
+
+ mt->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
+ if (IS_ERR(mt->tzd))
+ goto err_register;
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(mt->clk_peri_therm);
+
+err_disable_clk_auxadc:
+ clk_disable_unprepare(mt->clk_auxadc);
+
+ return ret;
+}
+
+static int mtk_thermal_remove(struct platform_device *pdev)
+{
+ struct mtk_thermal *mt = platform_get_drvdata(pdev);
+
+ thermal_zone_of_sensor_unregister(&pdev->dev, mt->tzd);
+
+ clk_disable_unprepare(mt->clk_peri_therm);
+ clk_disable_unprepare(mt->clk_auxadc);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_thermal_of_match[] = {
+ {
+ .compatible = "mediatek,mt8173-thermal",
+ }, {
+ },
+};
+
+static struct platform_driver mtk_thermal_driver = {
+ .probe = mtk_thermal_probe,
+ .remove = mtk_thermal_remove,
+ .driver = {
+ .name = THERMAL_NAME,
+ .of_match_table = mtk_thermal_of_match,
+ },
+};
+
+module_platform_driver(mtk_thermal_driver);
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 9043f8f91852..49ac23d3e776 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -555,6 +555,87 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
}
EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_unregister);
+static void devm_thermal_zone_of_sensor_release(struct device *dev, void *res)
+{
+ thermal_zone_of_sensor_unregister(dev,
+ *(struct thermal_zone_device **)res);
+}
+
+static int devm_thermal_zone_of_sensor_match(struct device *dev, void *res,
+ void *data)
+{
+ struct thermal_zone_device **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_thermal_zone_of_sensor_register - Resource managed version of
+ * thermal_zone_of_sensor_register()
+ * @dev: a valid struct device pointer of a sensor device. Must contain
+ * a valid .of_node, for the sensor node.
+ * @sensor_id: a sensor identifier, in case the sensor IP has more
+ * than one sensors
+ * @data: a private pointer (owned by the caller) that will be passed
+ * back, when a temperature reading is needed.
+ * @ops: struct thermal_zone_of_device_ops *. Must contain at least .get_temp.
+ *
+ * Refer thermal_zone_of_sensor_register() for more details.
+ *
+ * Return: On success returns a valid struct thermal_zone_device,
+ * otherwise, it returns a corresponding ERR_PTR(). Caller must
+ * check the return value with help of IS_ERR() helper.
+ * Registered hermal_zone_device device will automatically be
+ * released when device is unbounded.
+ */
+struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int sensor_id,
+ void *data, const struct thermal_zone_of_device_ops *ops)
+{
+ struct thermal_zone_device **ptr, *tzd;
+
+ ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ tzd = thermal_zone_of_sensor_register(dev, sensor_id, data, ops);
+ if (IS_ERR(tzd)) {
+ devres_free(ptr);
+ return tzd;
+ }
+
+ *ptr = tzd;
+ devres_add(dev, ptr);
+
+ return tzd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_register);
+
+/**
+ * devm_thermal_zone_of_sensor_unregister - Resource managed version of
+ * thermal_zone_of_sensor_unregister().
+ * @dev: Device for which which resource was allocated.
+ * @tzd: a pointer to struct thermal_zone_device where the sensor is registered.
+ *
+ * This function removes the sensor callbacks and private data from the
+ * thermal zone device registered with devm_thermal_zone_of_sensor_register()
+ * API. It will also silent the zone by remove the .get_temp() and .get_trend()
+ * thermal zone device callbacks.
+ * Normally this function will not need to be called and the resource
+ * management code will ensure that the resource is freed.
+ */
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tzd)
+{
+ WARN_ON(devres_release(dev, devm_thermal_zone_of_sensor_release,
+ devm_thermal_zone_of_sensor_match, tzd));
+}
+EXPORT_SYMBOL_GPL(devm_thermal_zone_of_sensor_unregister);
+
/*** functions parsing device tree nodes ***/
/**
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 0e735acea33a..82daba09e150 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -430,8 +430,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
struct rcar_thermal_priv *priv;
struct device *dev = &pdev->dev;
struct resource *res, *irq;
- const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
- unsigned long of_data = (unsigned long)of_id->data;
+ unsigned long of_data = (unsigned long)of_device_get_match_data(dev);
int mres = 0;
int i;
int ret = -ENODEV;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b58e3fb9b311..233a564442a0 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -58,8 +58,8 @@ enum sensor_id {
/**
* The conversion table has the adc value and temperature.
- * ADC_DECREMENT: the adc value is of diminishing.(e.g. v2_code_table)
- * ADC_INCREMENT: the adc value is incremental.(e.g. v3_code_table)
+ * ADC_DECREMENT: the adc value is of diminishing.(e.g. rk3288_code_table)
+ * ADC_INCREMENT: the adc value is incremental.(e.g. rk3368_code_table)
*/
enum adc_sort_mode {
ADC_DECREMENT = 0,
@@ -135,7 +135,13 @@ struct rockchip_thermal_data {
enum tshut_polarity tshut_polarity;
};
-/* TSADC Sensor info define: */
+/**
+ * TSADC Sensor Register description:
+ *
+ * TSADCV2_* are used for RK3288 SoCs, the other chips can reuse it.
+ * TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399)
+ *
+ */
#define TSADCV2_AUTO_CON 0x04
#define TSADCV2_INT_EN 0x08
#define TSADCV2_INT_PD 0x0c
@@ -149,13 +155,20 @@ struct rockchip_thermal_data {
#define TSADCV2_AUTO_EN BIT(0)
#define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn))
#define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8)
+/**
+ * TSADCV1_AUTO_Q_SEL_EN:
+ * whether select (1024 - tsadc_q) as output
+ * 1'b0:use tsadc_q as output(temperature-code is rising sequence)
+ * 1'b1:use(1024 - tsadc_q) as output (temperature-code is falling sequence)
+ */
+#define TSADCV3_AUTO_Q_SEL_EN BIT(1)
#define TSADCV2_INT_SRC_EN(chn) BIT(chn)
#define TSADCV2_SHUT_2GPIO_SRC_EN(chn) BIT(4 + (chn))
#define TSADCV2_SHUT_2CRU_SRC_EN(chn) BIT(8 + (chn))
-#define TSADCV1_INT_PD_CLEAR_MASK ~BIT(16)
#define TSADCV2_INT_PD_CLEAR_MASK ~BIT(8)
+#define TSADCV3_INT_PD_CLEAR_MASK ~BIT(16)
#define TSADCV2_DATA_MASK 0xfff
#define TSADCV3_DATA_MASK 0x3ff
@@ -177,45 +190,46 @@ struct tsadc_table {
* linearly interpolated.
* Code to Temperature mapping should be updated based on sillcon results.
*/
-static const struct tsadc_table v1_code_table[] = {
- {TSADCV3_DATA_MASK, -40000},
- {436, -40000},
- {431, -35000},
- {426, -30000},
- {421, -25000},
- {416, -20000},
- {411, -15000},
- {406, -10000},
- {401, -5000},
- {395, 0},
- {390, 5000},
- {385, 10000},
- {380, 15000},
- {375, 20000},
- {370, 25000},
- {364, 30000},
- {359, 35000},
- {354, 40000},
- {349, 45000},
- {343, 50000},
- {338, 55000},
- {333, 60000},
- {328, 65000},
- {322, 70000},
- {317, 75000},
- {312, 80000},
- {307, 85000},
- {301, 90000},
- {296, 95000},
- {291, 100000},
- {286, 105000},
- {280, 110000},
- {275, 115000},
- {270, 120000},
- {264, 125000},
+static const struct tsadc_table rk3228_code_table[] = {
+ {0, -40000},
+ {588, -40000},
+ {593, -35000},
+ {598, -30000},
+ {603, -25000},
+ {608, -20000},
+ {613, -15000},
+ {618, -10000},
+ {623, -5000},
+ {629, 0},
+ {634, 5000},
+ {639, 10000},
+ {644, 15000},
+ {649, 20000},
+ {654, 25000},
+ {660, 30000},
+ {665, 35000},
+ {670, 40000},
+ {675, 45000},
+ {681, 50000},
+ {686, 55000},
+ {691, 60000},
+ {696, 65000},
+ {702, 70000},
+ {707, 75000},
+ {712, 80000},
+ {717, 85000},
+ {723, 90000},
+ {728, 95000},
+ {733, 100000},
+ {738, 105000},
+ {744, 110000},
+ {749, 115000},
+ {754, 120000},
+ {760, 125000},
+ {TSADCV2_DATA_MASK, 125000},
};
-static const struct tsadc_table v2_code_table[] = {
+static const struct tsadc_table rk3288_code_table[] = {
{TSADCV2_DATA_MASK, -40000},
{3800, -40000},
{3792, -35000},
@@ -253,7 +267,7 @@ static const struct tsadc_table v2_code_table[] = {
{3421, 125000},
};
-static const struct tsadc_table v3_code_table[] = {
+static const struct tsadc_table rk3368_code_table[] = {
{0, -40000},
{106, -40000},
{108, -35000},
@@ -292,42 +306,43 @@ static const struct tsadc_table v3_code_table[] = {
{TSADCV3_DATA_MASK, 125000},
};
-static const struct tsadc_table v4_code_table[] = {
- {TSADCV3_DATA_MASK, -40000},
- {431, -40000},
- {426, -35000},
- {421, -30000},
- {415, -25000},
- {410, -20000},
- {405, -15000},
- {399, -10000},
- {394, -5000},
- {389, 0},
- {383, 5000},
- {378, 10000},
- {373, 15000},
- {367, 20000},
- {362, 25000},
- {357, 30000},
- {351, 35000},
- {346, 40000},
- {340, 45000},
- {335, 50000},
- {330, 55000},
- {324, 60000},
- {319, 65000},
- {313, 70000},
- {308, 75000},
- {302, 80000},
- {297, 85000},
- {291, 90000},
- {286, 95000},
- {281, 100000},
- {275, 105000},
- {270, 110000},
- {264, 115000},
- {259, 120000},
- {253, 125000},
+static const struct tsadc_table rk3399_code_table[] = {
+ {0, -40000},
+ {593, -40000},
+ {598, -35000},
+ {603, -30000},
+ {609, -25000},
+ {614, -20000},
+ {619, -15000},
+ {625, -10000},
+ {630, -5000},
+ {635, 0},
+ {641, 5000},
+ {646, 10000},
+ {651, 15000},
+ {657, 20000},
+ {662, 25000},
+ {667, 30000},
+ {673, 35000},
+ {678, 40000},
+ {684, 45000},
+ {689, 50000},
+ {694, 55000},
+ {700, 60000},
+ {705, 65000},
+ {711, 70000},
+ {716, 75000},
+ {722, 80000},
+ {727, 85000},
+ {733, 90000},
+ {738, 95000},
+ {743, 100000},
+ {749, 105000},
+ {754, 110000},
+ {760, 115000},
+ {765, 120000},
+ {771, 125000},
+ {TSADCV3_DATA_MASK, 125000},
};
static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
@@ -411,7 +426,7 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* temperature between 2 table entries is linear and interpolate
* to produce less granular result.
*/
- num = table.id[mid].temp - v2_code_table[mid - 1].temp;
+ num = table.id[mid].temp - table.id[mid - 1].temp;
num *= abs(table.id[mid - 1].code - code);
denom = abs(table.id[mid - 1].code - table.id[mid].code);
*temp = table.id[mid - 1].temp + (num / denom);
@@ -453,20 +468,20 @@ static void rk_tsadcv2_initialize(void __iomem *regs,
regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
}
-static void rk_tsadcv1_irq_ack(void __iomem *regs)
+static void rk_tsadcv2_irq_ack(void __iomem *regs)
{
u32 val;
val = readl_relaxed(regs + TSADCV2_INT_PD);
- writel_relaxed(val & TSADCV1_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
+ writel_relaxed(val & TSADCV2_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
}
-static void rk_tsadcv2_irq_ack(void __iomem *regs)
+static void rk_tsadcv3_irq_ack(void __iomem *regs)
{
u32 val;
val = readl_relaxed(regs + TSADCV2_INT_PD);
- writel_relaxed(val & TSADCV2_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
+ writel_relaxed(val & TSADCV3_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
}
static void rk_tsadcv2_control(void __iomem *regs, bool enable)
@@ -482,6 +497,25 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
writel_relaxed(val, regs + TSADCV2_AUTO_CON);
}
+/**
+ * @rk_tsadcv3_control:
+ * TSADC controller works at auto mode, and some SoCs need set the tsadc_q_sel
+ * bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output adc value if
+ * setting this bit to enable.
+ */
+static void rk_tsadcv3_control(void __iomem *regs, bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(regs + TSADCV2_AUTO_CON);
+ if (enable)
+ val |= TSADCV2_AUTO_EN | TSADCV3_AUTO_Q_SEL_EN;
+ else
+ val &= ~TSADCV2_AUTO_EN;
+
+ writel_relaxed(val, regs + TSADCV2_AUTO_CON);
+}
+
static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
int chn, void __iomem *regs, int *temp)
{
@@ -531,17 +565,17 @@ static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
.tshut_temp = 95000,
.initialize = rk_tsadcv2_initialize,
- .irq_ack = rk_tsadcv1_irq_ack,
- .control = rk_tsadcv2_control,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
.table = {
- .id = v1_code_table,
- .length = ARRAY_SIZE(v1_code_table),
+ .id = rk3228_code_table,
+ .length = ARRAY_SIZE(rk3228_code_table),
.data_mask = TSADCV3_DATA_MASK,
- .mode = ADC_DECREMENT,
+ .mode = ADC_INCREMENT,
},
};
@@ -562,8 +596,8 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
.set_tshut_mode = rk_tsadcv2_tshut_mode,
.table = {
- .id = v2_code_table,
- .length = ARRAY_SIZE(v2_code_table),
+ .id = rk3288_code_table,
+ .length = ARRAY_SIZE(rk3288_code_table),
.data_mask = TSADCV2_DATA_MASK,
.mode = ADC_DECREMENT,
},
@@ -586,8 +620,8 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
.set_tshut_mode = rk_tsadcv2_tshut_mode,
.table = {
- .id = v3_code_table,
- .length = ARRAY_SIZE(v3_code_table),
+ .id = rk3368_code_table,
+ .length = ARRAY_SIZE(rk3368_code_table),
.data_mask = TSADCV3_DATA_MASK,
.mode = ADC_INCREMENT,
},
@@ -603,17 +637,17 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
.tshut_temp = 95000,
.initialize = rk_tsadcv2_initialize,
- .irq_ack = rk_tsadcv1_irq_ack,
- .control = rk_tsadcv2_control,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
.set_tshut_temp = rk_tsadcv2_tshut_temp,
.set_tshut_mode = rk_tsadcv2_tshut_mode,
.table = {
- .id = v4_code_table,
- .length = ARRAY_SIZE(v4_code_table),
+ .id = rk3399_code_table,
+ .length = ARRAY_SIZE(rk3399_code_table),
.data_mask = TSADCV3_DATA_MASK,
- .mode = ADC_DECREMENT,
+ .mode = ADC_INCREMENT,
},
};
@@ -693,15 +727,14 @@ static int rockchip_configure_from_dt(struct device *dev,
thermal->chip->tshut_temp);
thermal->tshut_temp = thermal->chip->tshut_temp;
} else {
+ if (shut_temp > INT_MAX) {
+ dev_err(dev, "Invalid tshut temperature specified: %d\n",
+ shut_temp);
+ return -ERANGE;
+ }
thermal->tshut_temp = shut_temp;
}
- if (thermal->tshut_temp > INT_MAX) {
- dev_err(dev, "Invalid tshut temperature specified: %d\n",
- thermal->tshut_temp);
- return -ERANGE;
- }
-
if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
dev_warn(dev,
"Missing tshut mode property, using default (%s)\n",
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index e0da3865e060..222e644169f0 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,7 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
depends on THERMAL_OF
+ depends on HAS_IOMEM
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fa61eff88496..f3ce94ec73b5 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -184,6 +184,7 @@
* @temp_error2: fused value of the second point trim.
* @regulator: pointer to the TMU regulator structure.
* @reg_conf: pointer to structure to register with core thermal.
+ * @ntrip: number of supported trip points.
* @tmu_initialize: SoC specific TMU initialization method
* @tmu_control: SoC specific TMU control method
* @tmu_read: SoC specific TMU temperature read method
@@ -203,6 +204,7 @@ struct exynos_tmu_data {
u16 temp_error1, temp_error2;
struct regulator *regulator;
struct thermal_zone_device *tzd;
+ unsigned int ntrip;
int (*tmu_initialize)(struct platform_device *pdev);
void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -346,6 +348,14 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
int ret;
+ if (of_thermal_get_ntrips(data->tzd) > data->ntrip) {
+ dev_info(&pdev->dev,
+ "More trip points than supported by this TMU.\n");
+ dev_info(&pdev->dev,
+ "%d trip points should be configured in polling mode.\n",
+ (of_thermal_get_ntrips(data->tzd) - data->ntrip));
+ }
+
mutex_lock(&data->lock);
clk_enable(data->clk);
if (!IS_ERR(data->clk_sec))
@@ -1210,6 +1220,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_control = exynos4210_tmu_control;
data->tmu_read = exynos4210_tmu_read;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ data->ntrip = 4;
break;
case SOC_ARCH_EXYNOS3250:
case SOC_ARCH_EXYNOS4412:
@@ -1222,6 +1233,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ data->ntrip = 4;
break;
case SOC_ARCH_EXYNOS5433:
data->tmu_initialize = exynos5433_tmu_initialize;
@@ -1229,6 +1241,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_read = exynos4412_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ data->ntrip = 8;
break;
case SOC_ARCH_EXYNOS5440:
data->tmu_initialize = exynos5440_tmu_initialize;
@@ -1236,6 +1249,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_read = exynos5440_tmu_read;
data->tmu_set_emulation = exynos5440_tmu_set_emulation;
data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
+ data->ntrip = 4;
break;
case SOC_ARCH_EXYNOS7:
data->tmu_initialize = exynos7_tmu_initialize;
@@ -1243,6 +1257,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
data->tmu_read = exynos7_tmu_read;
data->tmu_set_emulation = exynos4412_tmu_set_emulation;
data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+ data->ntrip = 8;
break;
default:
dev_err(&pdev->dev, "Platform not supported\n");
@@ -1295,7 +1310,7 @@ static int exynos_tmu_probe(struct platform_device *pdev)
* TODO: Add regulator as an SOC feature, so that regulator enable
* is a compulsory call.
*/
- data->regulator = devm_regulator_get(&pdev->dev, "vtmu");
+ data->regulator = devm_regulator_get_optional(&pdev->dev, "vtmu");
if (!IS_ERR(data->regulator)) {
ret = regulator_enable(data->regulator);
if (ret) {
@@ -1303,6 +1318,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
return ret;
}
} else {
+ if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(&pdev->dev, "Regulator node (vtmu) not found\n");
}
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
index 74ea5765938b..136975220c92 100644
--- a/drivers/thermal/tegra_soctherm.c
+++ b/drivers/thermal/tegra_soctherm.c
@@ -57,7 +57,7 @@
#define READBACK_VALUE_MASK 0xff00
#define READBACK_VALUE_SHIFT 8
#define READBACK_ADD_HALF BIT(7)
-#define READBACK_NEGATE BIT(1)
+#define READBACK_NEGATE BIT(0)
#define FUSE_TSENSOR8_CALIB 0x180
#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a0a8fd1235e2..d4b54653ecf8 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -454,6 +454,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
{
enum thermal_trip_type type;
+ /* Ignore disabled trip points */
+ if (test_bit(trip, &tz->trips_disabled))
+ return;
+
tz->ops->get_trip_type(tz, trip, &type);
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
@@ -1800,6 +1804,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
{
struct thermal_zone_device *tz;
enum thermal_trip_type trip_type;
+ int trip_temp;
int result;
int count;
int passive = 0;
@@ -1871,9 +1876,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
goto unregister;
for (count = 0; count < trips; count++) {
- tz->ops->get_trip_type(tz, count, &trip_type);
+ if (tz->ops->get_trip_type(tz, count, &trip_type))
+ set_bit(count, &tz->trips_disabled);
if (trip_type == THERMAL_TRIP_PASSIVE)
passive = 1;
+ if (tz->ops->get_trip_temp(tz, count, &trip_temp))
+ set_bit(count, &tz->trips_disabled);
+ /* Check for bogus trip points */
+ if (trip_temp == 0)
+ set_bit(count, &tz->trips_disabled);
}
if (!passive) {
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 1e34a1efc554..06ea9766a70a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1265,7 +1265,7 @@ static
int ti_bandgap_probe(struct platform_device *pdev)
{
struct ti_bandgap *bgp;
- int clk_rate, ret = 0, i;
+ int clk_rate, ret, i;
bgp = ti_bandgap_build(pdev);
if (IS_ERR(bgp)) {
@@ -1288,16 +1288,14 @@ int ti_bandgap_probe(struct platform_device *pdev)
}
bgp->fclock = clk_get(NULL, bgp->conf->fclock_name);
- ret = IS_ERR(bgp->fclock);
- if (ret) {
+ if (IS_ERR(bgp->fclock)) {
dev_err(&pdev->dev, "failed to request fclock reference\n");
ret = PTR_ERR(bgp->fclock);
goto free_irqs;
}
bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name);
- ret = IS_ERR(bgp->div_clk);
- if (ret) {
+ if (IS_ERR(bgp->div_clk)) {
dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n");
ret = PTR_ERR(bgp->div_clk);
goto free_irqs;
@@ -1314,7 +1312,7 @@ int ti_bandgap_probe(struct platform_device *pdev)
* may not be accurate
*/
val = ti_bandgap_readl(bgp, tsr->bgap_efuse);
- if (ret || !val)
+ if (!val)
dev_info(&pdev->dev,
"Non-trimmed BGAP, Temp not accurate\n");
}
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index c01f45095877..82c4d2e45319 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -226,7 +226,7 @@ config CYCLADES
config CYZ_INTR
bool "Cyclades-Z interrupt mode operation"
- depends on CYCLADES
+ depends on CYCLADES && PCI
help
The Cyclades-Z family of multiport cards allows 2 (two) driver op
modes: polling and interrupt. In polling mode, the driver will check
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 2caaf5a2516d..eacf4c9f3b29 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -639,7 +639,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
custom.adkcon = AC_UARTBRK;
mb();
- if (tty->termios.c_cflag & HUPCL)
+ if (C_HUPCL(tty))
info->MCR &= ~(SER_DTR|SER_RTS);
rtsdtr_ctrl(info->MCR);
@@ -965,8 +965,7 @@ static void rs_throttle(struct tty_struct * tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
- printk("throttle %s: %d....\n", tty_name(tty),
- tty->ldisc.chars_in_buffer(tty));
+ printk("throttle %s ....\n", tty_name(tty));
#endif
if (serial_paranoia_check(info, tty->name, "rs_throttle"))
@@ -975,7 +974,7 @@ static void rs_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
rs_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
info->MCR &= ~SER_RTS;
local_irq_save(flags);
@@ -988,8 +987,7 @@ static void rs_unthrottle(struct tty_struct * tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
- printk("unthrottle %s: %d....\n", tty_name(tty),
- tty->ldisc.chars_in_buffer(tty));
+ printk("unthrottle %s ....\n", tty_name(tty));
#endif
if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
@@ -1001,7 +999,7 @@ static void rs_unthrottle(struct tty_struct * tty)
else
rs_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
info->MCR |= SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
@@ -1334,8 +1332,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
change_speed(tty, info, old_termios);
/* Handle transition to B0 status */
- if ((old_termios->c_cflag & CBAUD) &&
- !(cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
info->MCR &= ~(SER_DTR|SER_RTS);
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
@@ -1343,21 +1340,17 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- (cflag & CBAUD)) {
+ if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
info->MCR |= SER_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->MCR |= SER_RTS;
- }
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
local_irq_restore(flags);
}
/* Handle turning off CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
rs_start(tty);
}
@@ -1369,8 +1362,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
- if (!(old_termios->c_cflag & CLOCAL) &&
- (tty->termios.c_cflag & CLOCAL))
+ if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
wake_up_interruptible(&info->open_wait);
#endif
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index abbed201dc74..d67e542bab1c 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1440,7 +1440,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
info->port.xmit_buf = NULL;
free_page((unsigned long)temp);
}
- if (tty->termios.c_cflag & HUPCL)
+ if (C_HUPCL(tty))
cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
@@ -1469,7 +1469,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
free_page((unsigned long)temp);
}
- if (tty->termios.c_cflag & HUPCL)
+ if (C_HUPCL(tty))
tty_port_lower_dtr_rts(&info->port);
set_bit(TTY_IO_ERROR, &tty->flags);
@@ -2795,8 +2795,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
cy_set_line_char(info, tty);
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
cy_start(tty);
}
@@ -2807,8 +2806,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
* XXX It's not clear whether the current behavior is correct
* or not. Hence, this may change.....
*/
- if (!(old_termios->c_cflag & CLOCAL) &&
- (tty->termios.c_cflag & CLOCAL))
+ if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
wake_up_interruptible(&info->port.open_wait);
#endif
} /* cy_set_termios */
@@ -2852,8 +2850,8 @@ static void cy_throttle(struct tty_struct *tty)
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
- printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty),
- tty->ldisc.chars_in_buffer(tty), info->line);
+ printk(KERN_DEBUG "cyc:throttle %s ...ttyC%d\n", tty_name(tty),
+ info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_throttle"))
@@ -2868,7 +2866,7 @@ static void cy_throttle(struct tty_struct *tty)
info->throttle = 1;
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
cyy_change_rts_dtr(info, 0, TIOCM_RTS);
@@ -2891,8 +2889,8 @@ static void cy_unthrottle(struct tty_struct *tty)
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
- printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
- tty_name(tty), tty_chars_in_buffer(tty), info->line);
+ printk(KERN_DEBUG "cyc:unthrottle %s ...ttyC%d\n",
+ tty_name(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
@@ -2905,7 +2903,7 @@ static void cy_unthrottle(struct tty_struct *tty)
cy_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
card = info->card;
if (!cy_is_Z(card)) {
spin_lock_irqsave(&card->card_lock, flags);
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 342b36b9ad35..7ac9bcdf1e61 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -23,7 +23,6 @@
* byte channel used for the console is designated as the default tty.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
@@ -719,19 +718,6 @@ error:
return ret;
}
-static int ehv_bc_tty_remove(struct platform_device *pdev)
-{
- struct ehv_bc_data *bc = dev_get_drvdata(&pdev->dev);
-
- tty_unregister_device(ehv_bc_driver, bc - bcs);
-
- tty_port_destroy(&bc->port);
- irq_dispose_mapping(bc->tx_irq);
- irq_dispose_mapping(bc->rx_irq);
-
- return 0;
-}
-
static const struct of_device_id ehv_bc_tty_of_ids[] = {
{ .compatible = "epapr,hv-byte-channel" },
{}
@@ -741,15 +727,15 @@ static struct platform_driver ehv_bc_tty_driver = {
.driver = {
.name = "ehv-bc",
.of_match_table = ehv_bc_tty_of_ids,
+ .suppress_bind_attrs = true,
},
.probe = ehv_bc_tty_probe,
- .remove = ehv_bc_tty_remove,
};
/**
* ehv_bc_init - ePAPR hypervisor byte channel driver initialization
*
- * This function is called when this module is loaded.
+ * This function is called when this driver is loaded.
*/
static int __init ehv_bc_init(void)
{
@@ -814,24 +800,4 @@ error:
return ret;
}
-
-
-/**
- * ehv_bc_exit - ePAPR hypervisor byte channel driver termination
- *
- * This function is called when this driver is unloaded.
- */
-static void __exit ehv_bc_exit(void)
-{
- platform_driver_unregister(&ehv_bc_tty_driver);
- tty_unregister_driver(ehv_bc_driver);
- put_tty_driver(ehv_bc_driver);
- kfree(bcs);
-}
-
-module_init(ehv_bc_init);
-module_exit(ehv_bc_exit);
-
-MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
-MODULE_DESCRIPTION("ePAPR hypervisor byte channel driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(ehv_bc_init);
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index 0f82c0b146f6..3fc912373adf 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -68,8 +68,7 @@ static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
- struct platform_device *pdev = dev_id;
- struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
+ struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
unsigned long irq_flags;
unsigned char *buf;
@@ -162,7 +161,7 @@ static int goldfish_tty_console_setup(struct console *co, char *options)
return 0;
}
-static struct tty_port_operations goldfish_port_ops = {
+static const struct tty_port_operations goldfish_port_ops = {
.activate = goldfish_tty_activate,
.shutdown = goldfish_tty_shutdown
};
@@ -233,6 +232,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
struct device *ttydev;
void __iomem *base;
u32 irq;
+ unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL)
@@ -248,10 +248,16 @@ static int goldfish_tty_probe(struct platform_device *pdev)
irq = r->start;
- if (pdev->id >= goldfish_tty_line_count)
- goto err_unmap;
-
mutex_lock(&goldfish_tty_lock);
+
+ if (pdev->id == PLATFORM_DEVID_NONE)
+ line = goldfish_tty_current_line_count;
+ else
+ line = pdev->id;
+
+ if (line >= goldfish_tty_line_count)
+ goto err_create_driver_failed;
+
if (goldfish_tty_current_line_count == 0) {
ret = goldfish_tty_create_driver();
if (ret)
@@ -259,7 +265,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
}
goldfish_tty_current_line_count++;
- qtty = &goldfish_ttys[pdev->id];
+ qtty = &goldfish_ttys[line];
spin_lock_init(&qtty->lock);
tty_port_init(&qtty->port);
qtty->port.ops = &goldfish_port_ops;
@@ -269,13 +275,13 @@ static int goldfish_tty_probe(struct platform_device *pdev)
writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED,
- "goldfish_tty", pdev);
+ "goldfish_tty", qtty);
if (ret)
goto err_request_irq_failed;
ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
- pdev->id, &pdev->dev);
+ line, &pdev->dev);
if (IS_ERR(ttydev)) {
ret = PTR_ERR(ttydev);
goto err_tty_register_device_failed;
@@ -286,8 +292,9 @@ static int goldfish_tty_probe(struct platform_device *pdev)
qtty->console.device = goldfish_tty_console_device;
qtty->console.setup = goldfish_tty_console_setup;
qtty->console.flags = CON_PRINTBUFFER;
- qtty->console.index = pdev->id;
+ qtty->console.index = line;
register_console(&qtty->console);
+ platform_set_drvdata(pdev, qtty);
mutex_unlock(&goldfish_tty_lock);
return 0;
@@ -307,13 +314,12 @@ err_unmap:
static int goldfish_tty_remove(struct platform_device *pdev)
{
- struct goldfish_tty *qtty;
+ struct goldfish_tty *qtty = platform_get_drvdata(pdev);
mutex_lock(&goldfish_tty_lock);
- qtty = &goldfish_ttys[pdev->id];
unregister_console(&qtty->console);
- tty_unregister_device(goldfish_tty_driver, pdev->id);
+ tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = NULL;
free_irq(qtty->irq, pdev);
@@ -324,11 +330,19 @@ static int goldfish_tty_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id goldfish_tty_of_match[] = {
+ { .compatible = "google,goldfish-tty", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
+
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
.remove = goldfish_tty_remove,
.driver = {
- .name = "goldfish_tty"
+ .name = "goldfish_tty",
+ .of_match_table = goldfish_tty_of_match,
}
};
diff --git a/drivers/tty/hvc/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
index f575a9b5ede7..b05dc5086627 100644
--- a/drivers/tty/hvc/hvc_vio.c
+++ b/drivers/tty/hvc/hvc_vio.c
@@ -41,7 +41,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
-#include <linux/module.h>
#include <asm/hvconsole.h>
#include <asm/vio.h>
@@ -61,7 +60,6 @@ static struct vio_device_id hvc_driver_table[] = {
#endif
{ "", "" }
};
-MODULE_DEVICE_TABLE(vio, hvc_driver_table);
typedef enum hv_protocol {
HV_PROTOCOL_RAW,
@@ -363,26 +361,13 @@ static int hvc_vio_probe(struct vio_dev *vdev,
return 0;
}
-static int hvc_vio_remove(struct vio_dev *vdev)
-{
- struct hvc_struct *hp = dev_get_drvdata(&vdev->dev);
- int rc, termno;
-
- termno = hp->vtermno;
- rc = hvc_remove(hp);
- if (rc == 0) {
- if (hvterm_privs[termno] != &hvterm_priv0)
- kfree(hvterm_privs[termno]);
- hvterm_privs[termno] = NULL;
- }
- return rc;
-}
-
static struct vio_driver hvc_vio_driver = {
.id_table = hvc_driver_table,
.probe = hvc_vio_probe,
- .remove = hvc_vio_remove,
.name = hvc_driver_name,
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
};
static int __init hvc_vio_init(void)
@@ -394,13 +379,7 @@ static int __init hvc_vio_init(void)
return rc;
}
-module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
-
-static void __exit hvc_vio_exit(void)
-{
- vio_unregister_driver(&hvc_vio_driver);
-}
-module_exit(hvc_vio_exit);
+device_initcall(hvc_vio_init); /* after drivers/tty/hvc/hvc_console.c */
void __init hvc_vio_init_early(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index fa816b7193b6..5e87e4866bcb 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/serial_core.h>
#include <asm/io.h>
#include <asm/xen/hypervisor.h>
@@ -162,7 +163,7 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
return recv;
}
-static struct hv_ops domU_hvc_ops = {
+static const struct hv_ops domU_hvc_ops = {
.get_chars = domU_read_console,
.put_chars = domU_write_console,
.notifier_add = notifier_add_irq,
@@ -188,7 +189,7 @@ static int dom0_write_console(uint32_t vtermno, const char *str, int len)
return len;
}
-static struct hv_ops dom0_hvc_ops = {
+static const struct hv_ops dom0_hvc_ops = {
.get_chars = dom0_read_console,
.put_chars = dom0_write_console,
.notifier_add = notifier_add_irq,
@@ -245,6 +246,18 @@ err:
return -ENODEV;
}
+static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+{
+ info->evtchn = xen_start_info->console.domU.evtchn;
+ /* GFN == MFN for PV guest */
+ info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
+ info->vtermno = vtermno;
+
+ list_add_tail(&info->list, &xenconsoles);
+
+ return 0;
+}
+
static int xen_pv_console_init(void)
{
struct xencons_info *info;
@@ -264,13 +277,8 @@ static int xen_pv_console_init(void)
/* already configured */
return 0;
}
- info->evtchn = xen_start_info->console.domU.evtchn;
- /* GFN == MFN for PV guest */
- info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
- info->vtermno = HVC_COOKIE;
-
spin_lock(&xencons_lock);
- list_add_tail(&info->list, &xenconsoles);
+ xencons_info_pv_init(info, HVC_COOKIE);
spin_unlock(&xencons_lock);
return 0;
@@ -323,6 +331,7 @@ void xen_console_resume(void)
}
}
+#ifdef CONFIG_HVC_XEN_FRONTEND
static void xencons_disconnect_backend(struct xencons_info *info)
{
if (info->irq > 0)
@@ -363,7 +372,6 @@ static int xen_console_remove(struct xencons_info *info)
return 0;
}
-#ifdef CONFIG_HVC_XEN_FRONTEND
static int xencons_remove(struct xenbus_device *dev)
{
return xen_console_remove(dev_get_drvdata(&dev->dev));
@@ -597,15 +605,39 @@ static int xen_cons_init(void)
}
console_initcall(xen_cons_init);
+#ifdef CONFIG_X86
+static void xen_hvm_early_write(uint32_t vtermno, const char *str, int len)
+{
+ if (xen_cpuid_base())
+ outsb(0xe9, str, len);
+}
+#else
+static void xen_hvm_early_write(uint32_t vtermno, const char *str, int len) { }
+#endif
+
#ifdef CONFIG_EARLY_PRINTK
+static int __init xenboot_setup_console(struct console *console, char *string)
+{
+ static struct xencons_info xenboot;
+
+ if (xen_initial_domain())
+ return 0;
+ if (!xen_pv_domain())
+ return -ENODEV;
+
+ return xencons_info_pv_init(&xenboot, 0);
+}
+
static void xenboot_write_console(struct console *console, const char *string,
unsigned len)
{
unsigned int linelen, off = 0;
const char *pos;
- if (!xen_pv_domain())
+ if (!xen_pv_domain()) {
+ xen_hvm_early_write(0, string, len);
return;
+ }
dom0_write_console(0, string, len);
@@ -628,6 +660,7 @@ static void xenboot_write_console(struct console *console, const char *string,
struct console xenboot_console = {
.name = "xenboot",
.write = xenboot_write_console,
+ .setup = xenboot_setup_console,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
@@ -640,17 +673,10 @@ void xen_raw_console_write(const char *str)
if (xen_domain()) {
rc = dom0_write_console(0, str, len);
-#ifdef CONFIG_X86
- if (rc == -ENOSYS && xen_hvm_domain())
- goto outb_print;
-
- } else if (xen_cpuid_base()) {
- int i;
-outb_print:
- for (i = 0; i < len; i++)
- outb(str[i], 0xe9);
-#endif
+ if (rc != -ENOSYS || !xen_hvm_domain())
+ return;
}
+ xen_hvm_early_write(0, str, len);
}
void xen_raw_printk(const char *fmt, ...)
@@ -664,3 +690,18 @@ void xen_raw_printk(const char *fmt, ...)
xen_raw_console_write(buf);
}
+
+static void xenboot_earlycon_write(struct console *console,
+ const char *string,
+ unsigned len)
+{
+ dom0_write_console(0, string, len);
+}
+
+static int __init xenboot_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ device->con->write = xenboot_earlycon_write;
+ return 0;
+}
+EARLYCON_DECLARE(xenboot, xenboot_earlycon_setup);
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 99875949bfb7..8bf67630018b 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -1204,8 +1204,7 @@ static void isicom_set_termios(struct tty_struct *tty,
isicom_config_port(tty);
spin_unlock_irqrestore(&port->card->card_lock, flags);
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
isicom_start(tty);
}
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 4c4a23674569..2f12bb9f4336 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -254,6 +254,7 @@ struct mxser_port {
int xmit_head;
int xmit_tail;
int xmit_cnt;
+ int closing;
struct ktermios normal_termios;
@@ -1081,6 +1082,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
return;
if (tty_port_close_start(port, tty, filp) == 0)
return;
+ info->closing = 1;
mutex_lock(&port->mutex);
mxser_close_port(port);
mxser_flush_buffer(tty);
@@ -1091,6 +1093,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
mxser_shutdown_port(port);
clear_bit(ASYNCB_INITIALIZED, &port->flags);
mutex_unlock(&port->mutex);
+ info->closing = 0;
/* Right now the tty_port set is done outside of the close_end helper
as we don't yet have everyone using refcounts */
tty_port_close_end(port, tty);
@@ -1864,7 +1867,7 @@ static void mxser_stoprx(struct tty_struct *tty)
}
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
info->MCR &= ~UART_MCR_RTS;
outb(info->MCR, info->ioaddr + UART_MCR);
}
@@ -1901,7 +1904,7 @@ static void mxser_unthrottle(struct tty_struct *tty)
}
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
info->MCR |= UART_MCR_RTS;
outb(info->MCR, info->ioaddr + UART_MCR);
}
@@ -1949,15 +1952,13 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
mxser_change_speed(tty, old_termios);
spin_unlock_irqrestore(&info->slock, flags);
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
mxser_start(tty);
}
/* Handle sw stopped */
- if ((old_termios->c_iflag & IXON) &&
- !(tty->termios.c_iflag & IXON)) {
+ if ((old_termios->c_iflag & IXON) && !I_IXON(tty)) {
tty->stopped = 0;
if (info->board->chip_flag) {
@@ -2255,10 +2256,8 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id)
break;
iir &= MOXA_MUST_IIR_MASK;
tty = tty_port_tty_get(&port->port);
- if (!tty ||
- (port->port.flags & ASYNC_CLOSING) ||
- !(port->port.flags &
- ASYNC_INITIALIZED)) {
+ if (!tty || port->closing ||
+ !(port->port.flags & ASYNC_INITIALIZED)) {
status = inb(port->ioaddr + UART_LSR);
outb(0x27, port->ioaddr + UART_FCR);
inb(port->ioaddr + UART_MSR);
@@ -2337,7 +2336,7 @@ static const struct tty_operations mxser_ops = {
.get_icount = mxser_get_icount,
};
-static struct tty_port_operations mxser_port_ops = {
+static const struct tty_port_operations mxser_port_ops = {
.carrier_raised = mxser_carrier_raised,
.dtr_rts = mxser_dtr_rts,
.activate = mxser_activate,
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index c3fe026d3168..c01620780f5b 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1066,7 +1066,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
/* Carrier drop -> hangup */
if (tty) {
if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
- if (!(tty->termios.c_cflag & CLOCAL))
+ if (!C_CLOCAL(tty))
tty_hangup(tty);
}
if (brk & 0x01)
@@ -2304,21 +2304,6 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
}
/**
- * gsmld_chars_in_buffer - report available bytes
- * @tty: tty device
- *
- * Report the number of characters buffered to be delivered to user
- * at this instant in time.
- *
- * Locking: gsm lock
- */
-
-static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
-{
- return 0;
-}
-
-/**
* gsmld_flush_buffer - clean input queue
* @tty: terminal device
*
@@ -2830,7 +2815,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
.open = gsmld_open,
.close = gsmld_close,
.flush_buffer = gsmld_flush_buffer,
- .chars_in_buffer = gsmld_chars_in_buffer,
.read = gsmld_read,
.write = gsmld_write,
.ioctl = gsmld_ioctl,
@@ -3132,7 +3116,7 @@ static void gsmtty_throttle(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
dlci->modem_tx &= ~TIOCM_DTR;
dlci->throttled = 1;
/* Send an MSC with DTR cleared */
@@ -3144,7 +3128,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
struct gsm_dlci *dlci = tty->driver_data;
if (dlci->state == DLCI_CLOSED)
return;
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
dlci->modem_tx |= TIOCM_DTR;
dlci->throttled = 0;
/* Send an MSC with DTR set */
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index bbc4ce66c2c1..bcaba17688f6 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -159,7 +159,6 @@ struct n_hdlc {
/*
* HDLC buffer list manipulation functions
*/
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -853,10 +852,10 @@ static struct n_hdlc *n_hdlc_alloc(void)
if (!n_hdlc)
return NULL;
- n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
- n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+ spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+ spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
@@ -885,16 +884,6 @@ static struct n_hdlc *n_hdlc_alloc(void)
} /* end of n_hdlc_alloc() */
/**
- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
- * @list - pointer to buffer list
- */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
-{
- memset(list, 0, sizeof(*list));
- spin_lock_init(&list->spinlock);
-} /* end of n_hdlc_buf_list_init() */
-
-/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
* @list - pointer to buffer list
* @buf - pointer to buffer
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index b280abaad91b..fb76a7d80e7e 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -113,8 +113,6 @@ struct n_tty_data {
DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE);
unsigned char echo_buf[N_TTY_BUF_SIZE];
- int minimum_to_wake;
-
/* consumer-published */
size_t read_tail;
size_t line_start;
@@ -153,15 +151,6 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
}
-static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
- unsigned char __user *ptr)
-{
- struct n_tty_data *ldata = tty->disc_data;
-
- tty_audit_add_data(tty, &x, 1, ldata->icanon);
- return put_user(x, ptr);
-}
-
static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
size_t tail, size_t n)
{
@@ -171,7 +160,7 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
int uncopied;
if (n > size) {
- tty_audit_add_data(tty, from, size, ldata->icanon);
+ tty_audit_add_data(tty, from, size);
uncopied = copy_to_user(to, from, size);
if (uncopied)
return uncopied;
@@ -180,7 +169,7 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
from = ldata->read_buf;
}
- tty_audit_add_data(tty, from, n, ldata->icanon);
+ tty_audit_add_data(tty, from, n);
return copy_to_user(to, from, n);
}
@@ -239,8 +228,8 @@ static ssize_t chars_in_buffer(struct tty_struct *tty)
static void n_tty_write_wakeup(struct tty_struct *tty)
{
- if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
- kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
}
static void n_tty_check_throttle(struct tty_struct *tty)
@@ -272,8 +261,6 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
return;
- if (!tty->count)
- return;
n_tty_kick_worker(tty);
tty_wakeup(tty->link);
return;
@@ -292,8 +279,6 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
break;
- if (!tty->count)
- break;
n_tty_kick_worker(tty);
unthrottled = tty_unthrottle_safe(tty);
if (!unthrottled)
@@ -381,28 +366,6 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
}
/**
- * n_tty_chars_in_buffer - report available bytes
- * @tty: tty device
- *
- * Report the number of characters buffered to be delivered to user
- * at this instant in time.
- *
- * Locking: exclusive termios_rwsem
- */
-
-static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
-{
- ssize_t n;
-
- WARN_ONCE(1, "%s is deprecated and scheduled for removal.", __func__);
-
- down_write(&tty->termios_rwsem);
- n = chars_in_buffer(tty);
- up_write(&tty->termios_rwsem);
- return n;
-}
-
-/**
* is_utf8_continuation - utf8 multibyte check
* @c: byte to check
*
@@ -1561,8 +1524,6 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
flag = *fp++;
if (likely(flag == TTY_NORMAL))
n_tty_receive_char_closing(tty, *cp++);
- else
- n_tty_receive_char_flagged(tty, *cp++, flag);
}
}
@@ -1664,7 +1625,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
/* publish read_head to consumer */
smp_store_release(&ldata->commit_head, ldata->read_head);
- if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
+ if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
}
@@ -1785,12 +1746,6 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
return n_tty_receive_buf_common(tty, cp, fp, count, 1);
}
-int is_ignored(int sig)
-{
- return (sigismember(&current->blocked, sig) ||
- current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
-}
-
/**
* n_tty_set_termios - termios data changed
* @tty: terminal
@@ -1937,7 +1892,6 @@ static int n_tty_open(struct tty_struct *tty)
reset_buffer_flags(tty->disc_data);
ldata->column = 0;
ldata->canon_column = 0;
- ldata->minimum_to_wake = 1;
ldata->num_overrun = 0;
ldata->no_room = 0;
ldata->lnext = 0;
@@ -2015,7 +1969,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
retval = copy_to_user(*b, from, n);
n -= retval;
is_eof = n == 1 && *from == EOF_CHAR(tty);
- tty_audit_add_data(tty, from, n, ldata->icanon);
+ tty_audit_add_data(tty, from, n);
smp_store_release(&ldata->read_tail, ldata->read_tail + n);
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
@@ -2109,7 +2063,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
ldata->line_start = ldata->read_tail;
else
ldata->push = 0;
- tty_audit_push(tty);
+ tty_audit_push();
}
return 0;
}
@@ -2200,14 +2154,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
minimum = MIN_CHAR(tty);
if (minimum) {
time = (HZ / 10) * TIME_CHAR(tty);
- if (time)
- ldata->minimum_to_wake = 1;
- else if (!waitqueue_active(&tty->read_wait) ||
- (ldata->minimum_to_wake > minimum))
- ldata->minimum_to_wake = minimum;
} else {
timeout = (HZ / 10) * TIME_CHAR(tty);
- ldata->minimum_to_wake = minimum = 1;
+ minimum = 1;
}
}
@@ -2225,19 +2174,15 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
cs = tty->link->ctrl_status;
tty->link->ctrl_status = 0;
spin_unlock_irq(&tty->link->ctrl_lock);
- if (tty_put_user(tty, cs, b++)) {
+ if (put_user(cs, b)) {
retval = -EFAULT;
- b--;
break;
}
+ b++;
nr--;
break;
}
- if (((minimum - (b - buf)) < ldata->minimum_to_wake) &&
- ((minimum - (b - buf)) >= 1))
- ldata->minimum_to_wake = (minimum - (b - buf));
-
done = check_other_done(tty);
if (!input_available_p(tty, 0)) {
@@ -2275,11 +2220,11 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
/* Deal with packet mode. */
if (packet && b == buf) {
- if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
+ if (put_user(TIOCPKT_DATA, b)) {
retval = -EFAULT;
- b--;
break;
}
+ b++;
nr--;
}
@@ -2303,9 +2248,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
up_read(&tty->termios_rwsem);
remove_wait_queue(&tty->read_wait, &wait);
- if (!waitqueue_active(&tty->read_wait))
- ldata->minimum_to_wake = minimum;
-
mutex_unlock(&ldata->atomic_read_lock);
if (b - buf)
@@ -2417,7 +2359,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
}
break_out:
remove_wait_queue(&tty->write_wait, &wait);
- if (b - buf != nr && tty->fasync)
+ if (nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
up_read(&tty->termios_rwsem);
return (b - buf) ? b - buf : retval;
@@ -2440,7 +2382,6 @@ break_out:
static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- struct n_tty_data *ldata = tty->disc_data;
unsigned int mask = 0;
poll_wait(file, &tty->read_wait, wait);
@@ -2453,12 +2394,6 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
mask |= POLLPRI | POLLIN | POLLRDNORM;
if (tty_hung_up_p(file))
mask |= POLLHUP;
- if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
- if (MIN_CHAR(tty) && !TIME_CHAR(tty))
- ldata->minimum_to_wake = MIN_CHAR(tty);
- else
- ldata->minimum_to_wake = 1;
- }
if (tty->ops->write && !tty_is_writelocked(tty) &&
tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
tty_write_room(tty) > 0)
@@ -2507,25 +2442,12 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
}
}
-static void n_tty_fasync(struct tty_struct *tty, int on)
-{
- struct n_tty_data *ldata = tty->disc_data;
-
- if (!waitqueue_active(&tty->read_wait)) {
- if (on)
- ldata->minimum_to_wake = 1;
- else if (!tty->fasync)
- ldata->minimum_to_wake = N_TTY_BUF_SIZE;
- }
-}
-
-struct tty_ldisc_ops tty_ldisc_N_TTY = {
+static struct tty_ldisc_ops n_tty_ops = {
.magic = TTY_LDISC_MAGIC,
.name = "n_tty",
.open = n_tty_open,
.close = n_tty_close,
.flush_buffer = n_tty_flush_buffer,
- .chars_in_buffer = n_tty_chars_in_buffer,
.read = n_tty_read,
.write = n_tty_write,
.ioctl = n_tty_ioctl,
@@ -2533,7 +2455,6 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
.poll = n_tty_poll,
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
- .fasync = n_tty_fasync,
.receive_buf2 = n_tty_receive_buf2,
};
@@ -2541,14 +2462,18 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
* n_tty_inherit_ops - inherit N_TTY methods
* @ops: struct tty_ldisc_ops where to save N_TTY methods
*
- * Enables a 'subclass' line discipline to 'inherit' N_TTY
- * methods.
+ * Enables a 'subclass' line discipline to 'inherit' N_TTY methods.
*/
void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
{
- *ops = tty_ldisc_N_TTY;
+ *ops = n_tty_ops;
ops->owner = NULL;
ops->refcount = ops->flags = 0;
}
EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+
+void __init n_tty_init(void)
+{
+ tty_register_ldisc(N_TTY, &n_tty_ops);
+}
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 80f9de907563..5cc80b80c82b 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- read_mem32((u32 *) &size, addr, 4);
+ size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 2348fa613707..0058d9fbf931 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -263,8 +263,7 @@ static void pty_set_termios(struct tty_struct *tty,
{
/* See if packet mode change of state. */
if (tty->link && tty->link->packet) {
- int extproc = (old_termios->c_lflag & EXTPROC) |
- (tty->termios.c_lflag & EXTPROC);
+ int extproc = (old_termios->c_lflag & EXTPROC) | L_EXTPROC(tty);
int old_flow = ((old_termios->c_iflag & IXON) &&
(old_termios->c_cc[VSTOP] == '\023') &&
(old_termios->c_cc[VSTART] == '\021'));
@@ -406,13 +405,8 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
if (legacy) {
/* We always use new tty termios data so we can do this
the easy way .. */
- retval = tty_init_termios(tty);
- if (retval)
- goto err_deinit_tty;
-
- retval = tty_init_termios(o_tty);
- if (retval)
- goto err_free_termios;
+ tty_init_termios(tty);
+ tty_init_termios(o_tty);
driver->other->ttys[idx] = o_tty;
driver->ttys[idx] = tty;
@@ -444,12 +438,7 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
tty->count++;
o_tty->count++;
return 0;
-err_free_termios:
- if (legacy)
- tty_free_termios(tty);
-err_deinit_tty:
- deinitialize_tty_struct(o_tty);
- free_tty_struct(o_tty);
+
err_put_module:
module_put(driver->other->owner);
err:
@@ -666,29 +655,22 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
return tty;
}
-/* We have no need to install and remove our tty objects as devpts does all
- the work for us */
-
static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
{
return pty_common_install(driver, tty, false);
}
-static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
-{
-}
-
/* this is called once with whichever end is closed last */
-static void pty_unix98_shutdown(struct tty_struct *tty)
+static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- struct inode *ptmx_inode;
+ struct pts_fs_info *fsi;
if (tty->driver->subtype == PTY_TYPE_MASTER)
- ptmx_inode = tty->driver_data;
+ fsi = tty->driver_data;
else
- ptmx_inode = tty->link->driver_data;
- devpts_kill_index(ptmx_inode, tty->index);
- devpts_del_ref(ptmx_inode);
+ fsi = tty->link->driver_data;
+ devpts_kill_index(fsi, tty->index);
+ devpts_put_ref(fsi);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -704,7 +686,6 @@ static const struct tty_operations ptm_unix98_ops = {
.unthrottle = pty_unthrottle,
.ioctl = pty_unix98_ioctl,
.resize = pty_resize,
- .shutdown = pty_unix98_shutdown,
.cleanup = pty_cleanup
};
@@ -722,7 +703,6 @@ static const struct tty_operations pty_unix98_ops = {
.set_termios = pty_set_termios,
.start = pty_start,
.stop = pty_stop,
- .shutdown = pty_unix98_shutdown,
.cleanup = pty_cleanup,
};
@@ -740,6 +720,7 @@ static const struct tty_operations pty_unix98_ops = {
static int ptmx_open(struct inode *inode, struct file *filp)
{
+ struct pts_fs_info *fsi;
struct tty_struct *tty;
struct inode *slave_inode;
int retval;
@@ -754,47 +735,41 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
+ fsi = devpts_get_ref(inode, filp);
+ retval = -ENODEV;
+ if (!fsi)
+ goto out_free_file;
+
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
- index = devpts_new_index(inode);
- if (index < 0) {
- retval = index;
- mutex_unlock(&devpts_mutex);
- goto err_file;
- }
-
+ index = devpts_new_index(fsi);
mutex_unlock(&devpts_mutex);
- mutex_lock(&tty_mutex);
- tty = tty_init_dev(ptm_driver, index);
+ retval = index;
+ if (index < 0)
+ goto out_put_ref;
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto out;
- }
+ mutex_lock(&tty_mutex);
+ tty = tty_init_dev(ptm_driver, index);
/* The tty returned here is locked so we can safely
drop the mutex */
mutex_unlock(&tty_mutex);
- set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
- tty->driver_data = inode;
+ retval = PTR_ERR(tty);
+ if (IS_ERR(tty))
+ goto out;
/*
- * In the case where all references to ptmx inode are dropped and we
- * still have /dev/tty opened pointing to the master/slave pair (ptmx
- * is closed/released before /dev/tty), we must make sure that the inode
- * is still valid when we call the final pty_unix98_shutdown, thus we
- * hold an additional reference to the ptmx inode. For the same /dev/tty
- * last close case, we also need to make sure the super_block isn't
- * destroyed (devpts instance unmounted), before /dev/tty is closed and
- * on its release devpts_kill_index is called.
+ * From here on out, the tty is "live", and the index and
+ * fsi will be killed/put by the tty_release()
*/
- devpts_add_ref(inode);
+ set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ tty->driver_data = fsi;
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(inode,
+ slave_inode = devpts_pty_new(fsi,
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
tty->link);
if (IS_ERR(slave_inode)) {
@@ -813,12 +788,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return 0;
err_release:
tty_unlock(tty);
+ // This will also put-ref the fsi
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
- devpts_kill_index(inode, index);
-err_file:
+ devpts_kill_index(fsi, index);
+out_put_ref:
+ devpts_put_ref(fsi);
+out_free_file:
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 802eac7e561b..0b802cdd70d0 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -643,7 +643,6 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
info->chan = chan;
tty_port_init(&info->port);
info->port.ops = &rocket_port_ops;
- init_completion(&info->close_wait);
info->flags &= ~ROCKET_MODE_MASK;
switch (pc104[board][line]) {
case 422:
@@ -960,7 +959,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
tty->alt_speed = 460800;
configure_r_port(tty, info, NULL);
- if (tty->termios.c_cflag & CBAUD) {
+ if (C_BAUD(tty)) {
sSetDTR(cp);
sSetRTS(cp);
}
@@ -1043,13 +1042,12 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
}
}
spin_lock_irq(&port->lock);
- info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE);
+ info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_NORMAL_ACTIVE);
tty->closing = 0;
spin_unlock_irq(&port->lock);
mutex_unlock(&port->mutex);
tty_port_tty_set(port, NULL);
- complete_all(&info->close_wait);
atomic_dec(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
@@ -1086,18 +1084,18 @@ static void rp_set_termios(struct tty_struct *tty,
cp = &info->channel;
/* Handle transition to B0 status */
- if ((old_termios->c_cflag & CBAUD) && !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
sClrDTR(cp);
sClrRTS(cp);
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) && (tty->termios.c_cflag & CBAUD)) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
sSetRTS(cp);
sSetDTR(cp);
}
- if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios.c_cflag & CRTSCTS))
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
rp_start(tty);
}
@@ -1360,8 +1358,7 @@ static void rp_throttle(struct tty_struct *tty)
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_THROTTLE
- printk(KERN_INFO "throttle %s: %d....\n", tty->name,
- tty->ldisc.chars_in_buffer(tty));
+ printk(KERN_INFO "throttle %s ....\n", tty->name);
#endif
if (rocket_paranoia_check(info, "rp_throttle"))
@@ -1377,8 +1374,7 @@ static void rp_unthrottle(struct tty_struct *tty)
{
struct r_port *info = tty->driver_data;
#ifdef ROCKET_DEBUG_THROTTLE
- printk(KERN_INFO "unthrottle %s: %d....\n", tty->name,
- tty->ldisc.chars_in_buffer(tty));
+ printk(KERN_INFO "unthrottle %s ....\n", tty->name);
#endif
if (rocket_paranoia_check(info, "rp_unthrottle"))
diff --git a/drivers/tty/rocket_int.h b/drivers/tty/rocket_int.h
index 67e0f1e778a2..ef1e1be6b26d 100644
--- a/drivers/tty/rocket_int.h
+++ b/drivers/tty/rocket_int.h
@@ -1144,7 +1144,6 @@ struct r_port {
int read_status_mask;
int cps;
- struct completion close_wait; /* Not yet matching the core */
spinlock_t slock;
struct mutex write_mtx;
};
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
deleted file mode 100644
index 0982c1a44187..000000000000
--- a/drivers/tty/serial/68328serial.c
+++ /dev/null
@@ -1,1322 +0,0 @@
-/* 68328serial.c: Serial port driver for 68328 microcontroller
- *
- * Copyright (C) 1995 David S. Miller <davem@caip.rutgers.edu>
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
- * Copyright (C) 1998, 1999 D. Jeff Dionne <jeff@uclinux.org>
- * Copyright (C) 1999 Vladimir Gurevich <vgurevic@cisco.com>
- * Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com>
- * Copyright (C) 2002 Greg Ungerer <gerg@snapgear.com>
- *
- * VZ Support/Fixes Evan Stawnyczy <e@lineo.ca>
- * Multiple UART support Daniel Potts <danielp@cse.unsw.edu.au>
- * Power management support Daniel Potts <danielp@cse.unsw.edu.au>
- * VZ Second Serial Port enable Phil Wilshire
- * 2.4/2.5 port David McCullough
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/serial.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/console.h>
-#include <linux/reboot.h>
-#include <linux/keyboard.h>
-#include <linux/init.h>
-#include <linux/pm.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/delay.h>
-#include <asm/uaccess.h>
-
-/* (es) */
-/* note: perhaps we can murge these files, so that you can just
- * define 1 of them, and they can sort that out for themselves
- */
-#if defined(CONFIG_M68EZ328)
-#include <asm/MC68EZ328.h>
-#else
-#if defined(CONFIG_M68VZ328)
-#include <asm/MC68VZ328.h>
-#else
-#include <asm/MC68328.h>
-#endif /* CONFIG_M68VZ328 */
-#endif /* CONFIG_M68EZ328 */
-
-/* Turn off usage of real serial interrupt code, to "support" Copilot */
-#ifdef CONFIG_XCOPILOT_BUGS
-#undef USE_INTS
-#else
-#define USE_INTS
-#endif
-
-/*
- * I believe this is the optimal setting that reduces the number of interrupts.
- * At high speeds the output might become a little "bursted" (use USTCNT_TXHE
- * if that bothers you), but in most cases it will not, since we try to
- * transmit characters every time rs_interrupt is called. Thus, quite often
- * you'll see that a receive interrupt occures before the transmit one.
- * -- Vladimir Gurevich
- */
-#define USTCNT_TX_INTR_MASK (USTCNT_TXEE)
-
-/*
- * 68328 and 68EZ328 UARTS are a little bit different. EZ328 has special
- * "Old data interrupt" which occures whenever the data stay in the FIFO
- * longer than 30 bits time. This allows us to use FIFO without compromising
- * latency. '328 does not have this feature and without the real 328-based
- * board I would assume that RXRE is the safest setting.
- *
- * For EZ328 I use RXHE (Half empty) interrupt to reduce the number of
- * interrupts. RXFE (receive queue full) causes the system to lose data
- * at least at 115200 baud
- *
- * If your board is busy doing other stuff, you might consider to use
- * RXRE (data ready intrrupt) instead.
- *
- * The other option is to make these INTR masks run-time configurable, so
- * that people can dynamically adapt them according to the current usage.
- * -- Vladimir Gurevich
- */
-
-/* (es) */
-#if defined(CONFIG_M68EZ328) || defined(CONFIG_M68VZ328)
-#define USTCNT_RX_INTR_MASK (USTCNT_RXHE | USTCNT_ODEN)
-#elif defined(CONFIG_M68328)
-#define USTCNT_RX_INTR_MASK (USTCNT_RXRE)
-#else
-#error Please, define the Rx interrupt events for your CPU
-#endif
-/* (/es) */
-
-/*
- * This is our internal structure for each serial port's state.
- */
-struct m68k_serial {
- struct tty_port tport;
- char is_cons; /* Is this our console. */
- int magic;
- int baud_base;
- int port;
- int irq;
- int type; /* UART type */
- int custom_divisor;
- int x_char; /* xon/xoff character */
- int line;
- unsigned char *xmit_buf;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
-};
-
-#define SERIAL_MAGIC 0x5301
-
-/*
- * Define the number of ports supported and their irqs.
- */
-#define NR_PORTS 1
-
-static struct m68k_serial m68k_soft[NR_PORTS];
-
-static unsigned int uart_irqs[NR_PORTS] = { UART_IRQ_NUM };
-
-/* multiple ports are contiguous in memory */
-m68328_uart *uart_addr = (m68328_uart *)USTCNT_ADDR;
-
-struct tty_driver *serial_driver;
-
-static void change_speed(struct m68k_serial *info, struct tty_struct *tty);
-
-/*
- * Setup for console. Argument comes from the boot command line.
- */
-
-/* note: this is messy, but it works, again, perhaps defined somewhere else?*/
-#ifdef CONFIG_M68VZ328
-#define CONSOLE_BAUD_RATE 19200
-#define DEFAULT_CBAUD B19200
-#endif
-
-
-#ifndef CONSOLE_BAUD_RATE
-#define CONSOLE_BAUD_RATE 9600
-#define DEFAULT_CBAUD B9600
-#endif
-
-
-static int m68328_console_initted;
-static int m68328_console_baud = CONSOLE_BAUD_RATE;
-static int m68328_console_cbaud = DEFAULT_CBAUD;
-
-
-static inline int serial_paranoia_check(struct m68k_serial *info,
- char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
- static const char *badmagic =
- "Warning: bad magic number for serial struct %s in %s\n";
- static const char *badinfo =
- "Warning: null m68k_serial for %s in %s\n";
-
- if (!info) {
- printk(badinfo, name, routine);
- return 1;
- }
- if (info->magic != SERIAL_MAGIC) {
- printk(badmagic, name, routine);
- return 1;
- }
-#endif
- return 0;
-}
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-static int baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
- 9600, 19200, 38400, 57600, 115200, 0 };
-
-/* Utility routines */
-static inline int get_baud(struct m68k_serial *ss)
-{
- unsigned long result = 115200;
- unsigned short int baud = uart_addr[ss->line].ubaud;
- if (GET_FIELD(baud, UBAUD_PRESCALER) == 0x38) result = 38400;
- result >>= GET_FIELD(baud, UBAUD_DIVIDE);
-
- return result;
-}
-
-/*
- * ------------------------------------------------------------
- * rs_stop() and rs_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- * ------------------------------------------------------------
- */
-static void rs_stop(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_stop"))
- return;
-
- local_irq_save(flags);
- uart->ustcnt &= ~USTCNT_TXEN;
- local_irq_restore(flags);
-}
-
-static int rs_put_char(char ch)
-{
- unsigned long flags;
- int loops = 0;
-
- local_irq_save(flags);
-
- while (!(UTX & UTX_TX_AVAIL) && (loops < 1000)) {
- loops++;
- udelay(5);
- }
-
- UTX_TXDATA = ch;
- udelay(5);
- local_irq_restore(flags);
- return 1;
-}
-
-static void rs_start(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_start"))
- return;
-
- local_irq_save(flags);
- if (info->xmit_cnt && info->xmit_buf && !(uart->ustcnt & USTCNT_TXEN)) {
-#ifdef USE_INTS
- uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
-#else
- uart->ustcnt |= USTCNT_TXEN;
-#endif
- }
- local_irq_restore(flags);
-}
-
-static void receive_chars(struct m68k_serial *info, unsigned short rx)
-{
- m68328_uart *uart = &uart_addr[info->line];
- unsigned char ch, flag;
-
- /*
- * This do { } while() loop will get ALL chars out of Rx FIFO
- */
-#ifndef CONFIG_XCOPILOT_BUGS
- do {
-#endif
- ch = GET_FIELD(rx, URX_RXDATA);
-
- if (info->is_cons) {
- if (URX_BREAK & rx) { /* whee, break received */
- return;
-#ifdef CONFIG_MAGIC_SYSRQ
- } else if (ch == 0x10) { /* ^P */
- show_state();
- show_free_areas(0);
- show_buffers();
-/* show_net_buffers(); */
- return;
- } else if (ch == 0x12) { /* ^R */
- emergency_restart();
- return;
-#endif /* CONFIG_MAGIC_SYSRQ */
- }
- }
-
- flag = TTY_NORMAL;
-
- if (rx & URX_PARITY_ERROR)
- flag = TTY_PARITY;
- else if (rx & URX_OVRUN)
- flag = TTY_OVERRUN;
- else if (rx & URX_FRAME_ERROR)
- flag = TTY_FRAME;
-
- tty_insert_flip_char(&info->tport, ch, flag);
-#ifndef CONFIG_XCOPILOT_BUGS
- } while ((rx = uart->urx.w) & URX_DATA_READY);
-#endif
-
- tty_schedule_flip(&info->tport);
-}
-
-static void transmit_chars(struct m68k_serial *info, struct tty_struct *tty)
-{
- m68328_uart *uart = &uart_addr[info->line];
-
- if (info->x_char) {
- /* Send next char */
- uart->utx.b.txdata = info->x_char;
- info->x_char = 0;
- goto clear_and_return;
- }
-
- if ((info->xmit_cnt <= 0) || !tty || tty->stopped) {
- /* That's peculiar... TX ints off */
- uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
- goto clear_and_return;
- }
-
- /* Send char */
- uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt--;
-
- if (info->xmit_cnt <= 0) {
- /* All done for now... TX ints off */
- uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
- goto clear_and_return;
- }
-
-clear_and_return:
- /* Clear interrupt (should be auto)*/
- return;
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-irqreturn_t rs_interrupt(int irq, void *dev_id)
-{
- struct m68k_serial *info = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&info->tport);
- m68328_uart *uart;
- unsigned short rx;
- unsigned short tx;
-
- uart = &uart_addr[info->line];
- rx = uart->urx.w;
-
-#ifdef USE_INTS
- tx = uart->utx.w;
-
- if (rx & URX_DATA_READY)
- receive_chars(info, rx);
- if (tx & UTX_TX_AVAIL)
- transmit_chars(info, tty);
-#else
- receive_chars(info, rx);
-#endif
- tty_kref_put(tty);
-
- return IRQ_HANDLED;
-}
-
-static int startup(struct m68k_serial *info, struct tty_struct *tty)
-{
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (info->tport.flags & ASYNC_INITIALIZED)
- return 0;
-
- if (!info->xmit_buf) {
- info->xmit_buf = (unsigned char *) __get_free_page(GFP_KERNEL);
- if (!info->xmit_buf)
- return -ENOMEM;
- }
-
- local_irq_save(flags);
-
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in change_speed())
- */
-
- uart->ustcnt = USTCNT_UEN;
- uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_TXEN;
- (void)uart->urx.w;
-
- /*
- * Finally, enable sequencing and interrupts
- */
-#ifdef USE_INTS
- uart->ustcnt = USTCNT_UEN | USTCNT_RXEN |
- USTCNT_RX_INTR_MASK | USTCNT_TX_INTR_MASK;
-#else
- uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
-#endif
-
- if (tty)
- clear_bit(TTY_IO_ERROR, &tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- /*
- * and set the speed of the serial port
- */
-
- change_speed(info, tty);
-
- info->tport.flags |= ASYNC_INITIALIZED;
- local_irq_restore(flags);
- return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct m68k_serial *info, struct tty_struct *tty)
-{
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- uart->ustcnt = 0; /* All off! */
- if (!(info->tport.flags & ASYNC_INITIALIZED))
- return;
-
- local_irq_save(flags);
-
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = 0;
- }
-
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
-
- info->tport.flags &= ~ASYNC_INITIALIZED;
- local_irq_restore(flags);
-}
-
-struct {
- int divisor, prescale;
-}
-#ifndef CONFIG_M68VZ328
- hw_baud_table[18] = {
- {0, 0}, /* 0 */
- {0, 0}, /* 50 */
- {0, 0}, /* 75 */
- {0, 0}, /* 110 */
- {0, 0}, /* 134 */
- {0, 0}, /* 150 */
- {0, 0}, /* 200 */
- {7, 0x26}, /* 300 */
- {6, 0x26}, /* 600 */
- {5, 0x26}, /* 1200 */
- {0, 0}, /* 1800 */
- {4, 0x26}, /* 2400 */
- {3, 0x26}, /* 4800 */
- {2, 0x26}, /* 9600 */
- {1, 0x26}, /* 19200 */
- {0, 0x26}, /* 38400 */
- {1, 0x38}, /* 57600 */
- {0, 0x38}, /* 115200 */
-};
-#else
- hw_baud_table[18] = {
- {0, 0}, /* 0 */
- {0, 0}, /* 50 */
- {0, 0}, /* 75 */
- {0, 0}, /* 110 */
- {0, 0}, /* 134 */
- {0, 0}, /* 150 */
- {0, 0}, /* 200 */
- {0, 0}, /* 300 */
- {7, 0x26}, /* 600 */
- {6, 0x26}, /* 1200 */
- {0, 0}, /* 1800 */
- {5, 0x26}, /* 2400 */
- {4, 0x26}, /* 4800 */
- {3, 0x26}, /* 9600 */
- {2, 0x26}, /* 19200 */
- {1, 0x26}, /* 38400 */
- {0, 0x26}, /* 57600 */
- {1, 0x38}, /* 115200 */
-};
-#endif
-/* rate = 1036800 / ((65 - prescale) * (1<<divider)) */
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct m68k_serial *info, struct tty_struct *tty)
-{
- m68328_uart *uart = &uart_addr[info->line];
- unsigned short port;
- unsigned short ustcnt;
- unsigned cflag;
- int i;
-
- cflag = tty->termios.c_cflag;
- port = info->port;
- if (!port)
- return;
-
- ustcnt = uart->ustcnt;
- uart->ustcnt = ustcnt & ~USTCNT_TXEN;
-
- i = cflag & CBAUD;
- if (i & CBAUDEX) {
- i = (i & ~CBAUDEX) + B38400;
- }
-
- uart->ubaud = PUT_FIELD(UBAUD_DIVIDE, hw_baud_table[i].divisor) |
- PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
-
- ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
-
- if ((cflag & CSIZE) == CS8)
- ustcnt |= USTCNT_8_7;
-
- if (cflag & CSTOPB)
- ustcnt |= USTCNT_STOP;
-
- if (cflag & PARENB)
- ustcnt |= USTCNT_PARITYEN;
- if (cflag & PARODD)
- ustcnt |= USTCNT_ODD_EVEN;
-
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
- if (cflag & CRTSCTS) {
- uart->utx.w &= ~UTX_NOCTS;
- } else {
- uart->utx.w |= UTX_NOCTS;
- }
-#endif
-
- ustcnt |= USTCNT_TXEN;
-
- uart->ustcnt = ustcnt;
- return;
-}
-
-/*
- * Fair output driver allows a process to speak.
- */
-static void rs_fair_output(void)
-{
- int left; /* Output no more than that */
- unsigned long flags;
- struct m68k_serial *info = &m68k_soft[0];
- char c;
-
- if (info == NULL) return;
- if (info->xmit_buf == NULL) return;
-
- local_irq_save(flags);
- left = info->xmit_cnt;
- while (left != 0) {
- c = info->xmit_buf[info->xmit_tail];
- info->xmit_tail = (info->xmit_tail+1) & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt--;
- local_irq_restore(flags);
-
- rs_put_char(c);
-
- local_irq_save(flags);
- left = min(info->xmit_cnt, left-1);
- }
-
- /* Last character is being transmitted now (hopefully). */
- udelay(5);
-
- local_irq_restore(flags);
- return;
-}
-
-/*
- * m68k_console_print is registered for printk.
- */
-void console_print_68328(const char *p)
-{
- char c;
-
- while ((c = *(p++)) != 0) {
- if (c == '\n')
- rs_put_char('\r');
- rs_put_char(c);
- }
-
- /* Comment this if you want to have a strict interrupt-driven output */
- rs_fair_output();
-
- return;
-}
-
-static void rs_set_ldisc(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_set_ldisc"))
- return;
-
- info->is_cons = (tty->termios.c_line == N_TTY);
-
- printk("ttyS%d console mode %s\n", info->line, info->is_cons ? "on" : "off");
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
- return;
-#ifndef USE_INTS
- for (;;) {
-#endif
-
- /* Enable transmitter */
- local_irq_save(flags);
-
- if (info->xmit_cnt <= 0 || tty->stopped || !info->xmit_buf) {
- local_irq_restore(flags);
- return;
- }
-
-#ifdef USE_INTS
- uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
-#else
- uart->ustcnt |= USTCNT_TXEN;
-#endif
-
-#ifdef USE_INTS
- if (uart->utx.w & UTX_TX_AVAIL) {
-#else
- if (1) {
-#endif
- /* Send char */
- uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt--;
- }
-
-#ifndef USE_INTS
- while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
- }
-#endif
- local_irq_restore(flags);
-}
-
-extern void console_printn(const char *b, int count);
-
-static int rs_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- int c, total = 0;
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_write"))
- return 0;
-
- if (!tty || !info->xmit_buf)
- return 0;
-
- local_save_flags(flags);
- while (1) {
- local_irq_disable();
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
- local_irq_restore(flags);
-
- if (c <= 0)
- break;
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
-
- local_irq_disable();
- info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt += c;
- local_irq_restore(flags);
- buf += c;
- count -= c;
- total += c;
- }
-
- if (info->xmit_cnt && !tty->stopped) {
- /* Enable transmitter */
- local_irq_disable();
-#ifndef USE_INTS
- while (info->xmit_cnt) {
-#endif
-
- uart->ustcnt |= USTCNT_TXEN;
-#ifdef USE_INTS
- uart->ustcnt |= USTCNT_TX_INTR_MASK;
-#else
- while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
-#endif
- if (uart->utx.w & UTX_TX_AVAIL) {
- uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
- info->xmit_cnt--;
- }
-
-#ifndef USE_INTS
- }
-#endif
- local_irq_restore(flags);
- }
-
- return total;
-}
-
-static int rs_write_room(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- int ret;
-
- if (serial_paranoia_check(info, tty->name, "rs_write_room"))
- return 0;
- ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
- return 0;
- return info->xmit_cnt;
-}
-
-static void rs_flush_buffer(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
- return;
- local_irq_save(flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- local_irq_restore(flags);
- tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void rs_throttle(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_throttle"))
- return;
-
- if (I_IXOFF(tty))
- info->x_char = STOP_CHAR(tty);
-
- /* Turn off RTS line (do this atomic) */
-}
-
-static void rs_unthrottle(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
- return;
-
- if (I_IXOFF(tty)) {
- if (info->x_char)
- info->x_char = 0;
- else
- info->x_char = START_CHAR(tty);
- }
-
- /* Assert RTS line (do this atomic) */
-}
-
-/*
- * ------------------------------------------------------------
- * rs_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-static int get_serial_info(struct m68k_serial *info,
- struct serial_struct *retinfo)
-{
- struct serial_struct tmp;
-
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->line;
- tmp.port = info->port;
- tmp.irq = info->irq;
- tmp.flags = info->tport.flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->tport.close_delay;
- tmp.closing_wait = info->tport.closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
-
- return 0;
-}
-
-static int set_serial_info(struct m68k_serial *info, struct tty_struct *tty,
- struct serial_struct *new_info)
-{
- struct tty_port *port = &info->tport;
- struct serial_struct new_serial;
- struct m68k_serial old_info;
- int retval = 0;
-
- if (!new_info)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
- old_info = *info;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.type != info->type) ||
- (new_serial.close_delay != port->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) !=
- (port->flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- port->flags = ((port->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- info->custom_divisor = new_serial.custom_divisor;
- goto check_and_exit;
- }
-
- if (port->count > 1)
- return -EBUSY;
-
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
-
- info->baud_base = new_serial.baud_base;
- port->flags = ((port->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->type = new_serial.type;
- port->close_delay = new_serial.close_delay;
- port->closing_wait = new_serial.closing_wait;
-
-check_and_exit:
- retval = startup(info, tty);
- return retval;
-}
-
-/*
- * get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int get_lsr_info(struct m68k_serial *info, unsigned int *value)
-{
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
- m68328_uart *uart = &uart_addr[info->line];
-#endif
- unsigned char status;
- unsigned long flags;
-
- local_irq_save(flags);
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
- status = (uart->utx.w & UTX_CTS_STAT) ? 1 : 0;
-#else
- status = 0;
-#endif
- local_irq_restore(flags);
- return put_user(status, value);
-}
-
-/*
- * This routine sends a break character out the serial port.
- */
-static void send_break(struct m68k_serial *info, unsigned int duration)
-{
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
- if (!info->port)
- return;
- local_irq_save(flags);
-#ifdef USE_INTS
- uart->utx.w |= UTX_SEND_BREAK;
- msleep_interruptible(duration);
- uart->utx.w &= ~UTX_SEND_BREAK;
-#endif
- local_irq_restore(flags);
-}
-
-static int rs_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- int retval;
-
- if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
- return -ENODEV;
-
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
- (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
- (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
-
- switch (cmd) {
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- if (!arg)
- send_break(info, 250); /* 1/4 second */
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- send_break(info, arg ? arg*(100) : 250);
- return 0;
- case TIOCGSERIAL:
- return get_serial_info(info,
- (struct serial_struct *) arg);
- case TIOCSSERIAL:
- return set_serial_info(info, tty,
- (struct serial_struct *) arg);
- case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, (unsigned int *) arg);
- case TIOCSERGSTRUCT:
- if (copy_to_user((struct m68k_serial *) arg,
- info, sizeof(struct m68k_serial)))
- return -EFAULT;
- return 0;
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- change_speed(info, tty);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS))
- rs_start(tty);
-
-}
-
-/*
- * ------------------------------------------------------------
- * rs_close()
- *
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * S structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- * ------------------------------------------------------------
- */
-static void rs_close(struct tty_struct *tty, struct file *filp)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
- struct tty_port *port = &info->tport;
- m68328_uart *uart = &uart_addr[info->line];
- unsigned long flags;
-
- if (serial_paranoia_check(info, tty->name, "rs_close"))
- return;
-
- local_irq_save(flags);
-
- if (tty_hung_up_p(filp)) {
- local_irq_restore(flags);
- return;
- }
-
- if ((tty->count == 1) && (port->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk("rs_close: bad serial port count; tty->count is 1, "
- "port->count is %d\n", port->count);
- port->count = 1;
- }
- if (--port->count < 0) {
- printk("rs_close: bad serial port count for ttyS%d: %d\n",
- info->line, port->count);
- port->count = 0;
- }
- if (port->count) {
- local_irq_restore(flags);
- return;
- }
- port->flags |= ASYNC_CLOSING;
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, port->closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
-
- uart->ustcnt &= ~USTCNT_RXEN;
- uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);
-
- shutdown(info, tty);
- rs_flush_buffer(tty);
-
- tty_ldisc_flush(tty);
- tty->closing = 0;
- tty_port_tty_set(&info->tport, NULL);
-#warning "This is not and has never been valid so fix it"
-#if 0
- if (tty->ldisc.num != ldiscs[N_TTY].num) {
- if (tty->ldisc.close)
- (tty->ldisc.close)(tty);
- tty->ldisc = ldiscs[N_TTY];
- tty->termios.c_line = N_TTY;
- if (tty->ldisc.open)
- (tty->ldisc.open)(tty);
- }
-#endif
- if (port->blocked_open) {
- if (port->close_delay)
- msleep_interruptible(jiffies_to_msecs(port->close_delay));
- wake_up_interruptible(&port->open_wait);
- }
- port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
- local_irq_restore(flags);
-}
-
-/*
- * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-void rs_hangup(struct tty_struct *tty)
-{
- struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
- if (serial_paranoia_check(info, tty->name, "rs_hangup"))
- return;
-
- rs_flush_buffer(tty);
- shutdown(info, tty);
- info->tport.count = 0;
- info->tport.flags &= ~ASYNC_NORMAL_ACTIVE;
- tty_port_tty_set(&info->tport, NULL);
- wake_up_interruptible(&info->tport.open_wait);
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its S structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-int rs_open(struct tty_struct *tty, struct file *filp)
-{
- struct m68k_serial *info;
- int retval;
-
- info = &m68k_soft[tty->index];
-
- if (serial_paranoia_check(info, tty->name, "rs_open"))
- return -ENODEV;
-
- info->tport.count++;
- tty->driver_data = info;
- tty_port_tty_set(&info->tport, tty);
-
- /*
- * Start up serial port
- */
- retval = startup(info, tty);
- if (retval)
- return retval;
-
- return tty_port_block_til_ready(&info->tport, tty, filp);
-}
-
-/* Finally, routines used to initialize the serial driver. */
-
-static void show_serial_version(void)
-{
- printk("MC68328 serial driver version 1.00\n");
-}
-
-static const struct tty_operations rs_ops = {
- .open = rs_open,
- .close = rs_close,
- .write = rs_write,
- .flush_chars = rs_flush_chars,
- .write_room = rs_write_room,
- .chars_in_buffer = rs_chars_in_buffer,
- .flush_buffer = rs_flush_buffer,
- .ioctl = rs_ioctl,
- .throttle = rs_throttle,
- .unthrottle = rs_unthrottle,
- .set_termios = rs_set_termios,
- .stop = rs_stop,
- .start = rs_start,
- .hangup = rs_hangup,
- .set_ldisc = rs_set_ldisc,
-};
-
-static const struct tty_port_operations rs_port_ops = {
-};
-
-/* rs_init inits the driver */
-static int __init
-rs68328_init(void)
-{
- unsigned long flags;
- int i;
- struct m68k_serial *info;
-
- serial_driver = alloc_tty_driver(NR_PORTS);
- if (!serial_driver)
- return -ENOMEM;
-
- show_serial_version();
-
- /* Initialize the tty_driver structure */
- /* SPARC: Not all of this is exactly right for us. */
-
- serial_driver->name = "ttyS";
- serial_driver->major = TTY_MAJOR;
- serial_driver->minor_start = 64;
- serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
- serial_driver->subtype = SERIAL_TYPE_NORMAL;
- serial_driver->init_termios = tty_std_termios;
- serial_driver->init_termios.c_cflag =
- m68328_console_cbaud | CS8 | CREAD | HUPCL | CLOCAL;
- serial_driver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(serial_driver, &rs_ops);
-
- local_irq_save(flags);
-
- for (i = 0; i < NR_PORTS; i++) {
-
- info = &m68k_soft[i];
- tty_port_init(&info->tport);
- info->tport.ops = &rs_port_ops;
- info->magic = SERIAL_MAGIC;
- info->port = (int) &uart_addr[i];
- info->irq = uart_irqs[i];
- info->custom_divisor = 16;
- info->x_char = 0;
- info->line = i;
- info->is_cons = 1; /* Means shortcuts work */
-
- printk("%s%d at 0x%08x (irq = %d)", serial_driver->name, info->line,
- info->port, info->irq);
- printk(" is a builtin MC68328 UART\n");
-
-#ifdef CONFIG_M68VZ328
- if (i > 0)
- PJSEL &= 0xCF; /* PSW enable second port output */
-#endif
-
- if (request_irq(uart_irqs[i],
- rs_interrupt,
- 0,
- "M68328_UART", info))
- panic("Unable to attach 68328 serial interrupt\n");
-
- tty_port_link_device(&info->tport, serial_driver, i);
- }
- local_irq_restore(flags);
-
- if (tty_register_driver(serial_driver)) {
- put_tty_driver(serial_driver);
- for (i = 0; i < NR_PORTS; i++)
- tty_port_destroy(&m68k_soft[i].tport);
- printk(KERN_ERR "Couldn't register serial driver\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-module_init(rs68328_init);
-
-
-
-static void m68328_set_baud(void)
-{
- unsigned short ustcnt;
- int i;
-
- ustcnt = USTCNT;
- USTCNT = ustcnt & ~USTCNT_TXEN;
-
-again:
- for (i = 0; i < ARRAY_SIZE(baud_table); i++)
- if (baud_table[i] == m68328_console_baud)
- break;
- if (i >= ARRAY_SIZE(baud_table)) {
- m68328_console_baud = 9600;
- goto again;
- }
-
- UBAUD = PUT_FIELD(UBAUD_DIVIDE, hw_baud_table[i].divisor) |
- PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
- ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
- ustcnt |= USTCNT_8_7;
- ustcnt |= USTCNT_TXEN;
- USTCNT = ustcnt;
- m68328_console_initted = 1;
- return;
-}
-
-
-int m68328_console_setup(struct console *cp, char *arg)
-{
- int i, n = CONSOLE_BAUD_RATE;
-
- if (!cp)
- return(-1);
-
- if (arg)
- n = simple_strtoul(arg, NULL, 0);
-
- for (i = 0; i < ARRAY_SIZE(baud_table); i++)
- if (baud_table[i] == n)
- break;
- if (i < ARRAY_SIZE(baud_table)) {
- m68328_console_baud = n;
- m68328_console_cbaud = 0;
- if (i > 15) {
- m68328_console_cbaud |= CBAUDEX;
- i -= 15;
- }
- m68328_console_cbaud |= i;
- }
-
- m68328_set_baud(); /* make sure baud rate changes */
- return 0;
-}
-
-
-static struct tty_driver *m68328_console_device(struct console *c, int *index)
-{
- *index = c->index;
- return serial_driver;
-}
-
-
-void m68328_console_write (struct console *co, const char *str,
- unsigned int count)
-{
- if (!m68328_console_initted)
- m68328_set_baud();
- while (count--) {
- if (*str == '\n')
- rs_put_char('\r');
- rs_put_char(*str++);
- }
-}
-
-
-static struct console m68328_driver = {
- .name = "ttyS",
- .write = m68328_console_write,
- .device = m68328_console_device,
- .setup = m68328_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-
-static int __init m68328_console_init(void)
-{
- register_console(&m68328_driver);
- return 0;
-}
-
-console_initcall(m68328_console_init);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index d54dcd87c67e..047a7ba6796a 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -92,6 +92,18 @@ struct serial8250_config {
#define SERIAL8250_SHARE_IRQS 0
#endif
+#define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \
+ { \
+ .iobase = _base, \
+ .irq = _irq, \
+ .uartclk = 1843200, \
+ .iotype = UPIO_PORT, \
+ .flags = UPF_BOOT_AUTOCONF | (_flags), \
+ }
+
+#define SERIAL8250_PORT(_base, _irq) SERIAL8250_PORT_FLAGS(_base, _irq, 0)
+
+
static inline int serial_in(struct uart_8250_port *up, int offset)
{
return up->port.serial_in(&up->port, offset);
@@ -117,6 +129,8 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
struct uart_8250_port *serial8250_get_port(int line);
void serial8250_rpm_get(struct uart_8250_port *p);
void serial8250_rpm_put(struct uart_8250_port *p);
+int serial8250_em485_init(struct uart_8250_port *p);
+void serial8250_em485_destroy(struct uart_8250_port *p);
#if defined(__alpha__) && !defined(CONFIG_PCI)
/*
diff --git a/drivers/tty/serial/8250/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c
index 34b51c651192..522aeae05192 100644
--- a/drivers/tty/serial/8250/8250_accent.c
+++ b/drivers/tty/serial/8250/8250_accent.c
@@ -10,18 +10,11 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
-#define PORT(_base,_irq) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF, \
- }
+#include "8250.h"
static struct plat_serial8250_port accent_data[] = {
- PORT(0x330, 4),
- PORT(0x338, 4),
+ SERIAL8250_PORT(0x330, 4),
+ SERIAL8250_PORT(0x338, 4),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c
index 549aa07c0d27..402dfdd4940e 100644
--- a/drivers/tty/serial/8250/8250_acorn.c
+++ b/drivers/tty/serial/8250/8250_acorn.c
@@ -70,7 +70,7 @@ serial_card_probe(struct expansion_card *ec, const struct ecard_id *id)
uart.port.regshift = 2;
uart.port.dev = &ec->dev;
- for (i = 0; i < info->num_ports; i ++) {
+ for (i = 0; i < info->num_ports; i++) {
uart.port.membase = info->vaddr + type->offset[i];
uart.port.mapbase = bus_addr + type->offset[i];
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
new file mode 100644
index 000000000000..e10f1244409b
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -0,0 +1,146 @@
+/*
+ * Serial port driver for BCM2835AUX UART
+ *
+ * Copyright (C) 2016 Martin Sperl <kernel@martin.sperl.org>
+ *
+ * Based on 8250_lpc18xx.c:
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "8250.h"
+
+struct bcm2835aux_data {
+ struct uart_8250_port uart;
+ struct clk *clk;
+ int line;
+};
+
+static int bcm2835aux_serial_probe(struct platform_device *pdev)
+{
+ struct bcm2835aux_data *data;
+ struct resource *res;
+ int ret;
+
+ /* allocate the custom structure */
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* initialize data */
+ spin_lock_init(&data->uart.port.lock);
+ data->uart.capabilities = UART_CAP_FIFO;
+ data->uart.port.dev = &pdev->dev;
+ data->uart.port.regshift = 2;
+ data->uart.port.type = PORT_16550;
+ data->uart.port.iotype = UPIO_MEM;
+ data->uart.port.fifosize = 8;
+ data->uart.port.flags = UPF_SHARE_IRQ |
+ UPF_FIXED_PORT |
+ UPF_FIXED_TYPE |
+ UPF_SKIP_TEST;
+
+ /* get the clock - this also enables the HW */
+ data->clk = devm_clk_get(&pdev->dev, NULL);
+ ret = PTR_ERR_OR_ZERO(data->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ return ret;
+ }
+
+ /* get the interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "irq not found - %i", ret);
+ return ret;
+ }
+ data->uart.port.irq = ret;
+
+ /* map the main registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "memory resource not found");
+ return -EINVAL;
+ }
+ data->uart.port.membase = devm_ioremap_resource(&pdev->dev, res);
+ ret = PTR_ERR_OR_ZERO(data->uart.port.membase);
+ if (ret)
+ return ret;
+
+ /* Check for a fixed line number */
+ ret = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (ret >= 0)
+ data->uart.port.line = ret;
+
+ /* enable the clock as a last step */
+ ret = clk_prepare_enable(data->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable uart clock - %d\n",
+ ret);
+ return ret;
+ }
+
+ /* the HW-clock divider for bcm2835aux is 8,
+ * but 8250 expects a divider of 16,
+ * so we have to multiply the actual clock by 2
+ * to get identical baudrates.
+ */
+ data->uart.port.uartclk = clk_get_rate(data->clk) * 2;
+
+ /* register the port */
+ ret = serial8250_register_8250_port(&data->uart);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register 8250 port - %d\n",
+ ret);
+ goto dis_clk;
+ }
+ data->line = ret;
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+
+dis_clk:
+ clk_disable_unprepare(data->clk);
+ return ret;
+}
+
+static int bcm2835aux_serial_remove(struct platform_device *pdev)
+{
+ struct bcm2835aux_data *data = platform_get_drvdata(pdev);
+
+ serial8250_unregister_port(data->uart.port.line);
+ clk_disable_unprepare(data->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835aux_serial_match[] = {
+ { .compatible = "brcm,bcm2835-aux-uart" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm2835aux_serial_match);
+
+static struct platform_driver bcm2835aux_serial_driver = {
+ .driver = {
+ .name = "bcm2835-aux-uart",
+ .of_match_table = bcm2835aux_serial_match,
+ },
+ .probe = bcm2835aux_serial_probe,
+ .remove = bcm2835aux_serial_remove,
+};
+module_platform_driver(bcm2835aux_serial_driver);
+
+MODULE_DESCRIPTION("BCM2835 auxiliar UART driver");
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c
index d125dc107985..a63b5998e383 100644
--- a/drivers/tty/serial/8250/8250_boca.c
+++ b/drivers/tty/serial/8250/8250_boca.c
@@ -10,32 +10,25 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
-#define PORT(_base,_irq) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF, \
- }
+#include "8250.h"
static struct plat_serial8250_port boca_data[] = {
- PORT(0x100, 12),
- PORT(0x108, 12),
- PORT(0x110, 12),
- PORT(0x118, 12),
- PORT(0x120, 12),
- PORT(0x128, 12),
- PORT(0x130, 12),
- PORT(0x138, 12),
- PORT(0x140, 12),
- PORT(0x148, 12),
- PORT(0x150, 12),
- PORT(0x158, 12),
- PORT(0x160, 12),
- PORT(0x168, 12),
- PORT(0x170, 12),
- PORT(0x178, 12),
+ SERIAL8250_PORT(0x100, 12),
+ SERIAL8250_PORT(0x108, 12),
+ SERIAL8250_PORT(0x110, 12),
+ SERIAL8250_PORT(0x118, 12),
+ SERIAL8250_PORT(0x120, 12),
+ SERIAL8250_PORT(0x128, 12),
+ SERIAL8250_PORT(0x130, 12),
+ SERIAL8250_PORT(0x138, 12),
+ SERIAL8250_PORT(0x140, 12),
+ SERIAL8250_PORT(0x148, 12),
+ SERIAL8250_PORT(0x150, 12),
+ SERIAL8250_PORT(0x158, 12),
+ SERIAL8250_PORT(0x160, 12),
+ SERIAL8250_PORT(0x168, 12),
+ SERIAL8250_PORT(0x170, 12),
+ SERIAL8250_PORT(0x178, 12),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index c9720a97a977..2f4f5ee651db 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -597,6 +597,7 @@ static void univ8250_console_write(struct console *co, const char *s,
static int univ8250_console_setup(struct console *co, char *options)
{
struct uart_port *port;
+ int retval;
/*
* Check whether an invalid uart number has been specified, and
@@ -609,7 +610,10 @@ static int univ8250_console_setup(struct console *co, char *options)
/* link port to console */
port->cons = co;
- return serial8250_console_setup(port, options, false);
+ retval = serial8250_console_setup(port, options, false);
+ if (retval != 0)
+ port->cons = NULL;
+ return retval;
}
/**
@@ -687,7 +691,7 @@ static int __init univ8250_console_init(void)
}
console_initcall(univ8250_console_init);
-#define SERIAL8250_CONSOLE &univ8250_console
+#define SERIAL8250_CONSOLE (&univ8250_console)
#else
#define SERIAL8250_CONSOLE NULL
#endif
@@ -764,6 +768,7 @@ void serial8250_suspend_port(int line)
uart_suspend_port(&serial8250_reg, port);
}
+EXPORT_SYMBOL(serial8250_suspend_port);
/**
* serial8250_resume_port - resume one serial port
@@ -789,6 +794,7 @@ void serial8250_resume_port(int line)
}
uart_resume_port(&serial8250_reg, port);
}
+EXPORT_SYMBOL(serial8250_resume_port);
/*
* Register a set of serial devices attached to a platform device. The
@@ -1068,6 +1074,15 @@ void serial8250_unregister_port(int line)
struct uart_8250_port *uart = &serial8250_ports[line];
mutex_lock(&serial_mutex);
+
+ if (uart->em485) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&uart->port.lock, flags);
+ serial8250_em485_destroy(uart);
+ spin_unlock_irqrestore(&uart->port.lock, flags);
+ }
+
uart_remove_one_port(&serial8250_reg, &uart->port);
if (serial8250_isa_devs) {
uart->port.flags &= ~UPF_BOOT_AUTOCONF;
@@ -1093,9 +1108,8 @@ static int __init serial8250_init(void)
serial8250_isa_init_ports();
- printk(KERN_INFO "Serial: 8250/16550 driver, "
- "%d ports, IRQ sharing %sabled\n", nr_uarts,
- share_irqs ? "en" : "dis");
+ pr_info("Serial: 8250/16550 driver, %d ports, IRQ sharing %sabled\n",
+ nr_uarts, share_irqs ? "en" : "dis");
#ifdef CONFIG_SPARC
ret = sunserial_register_minors(&serial8250_reg, UART_NR);
@@ -1168,15 +1182,11 @@ static void __exit serial8250_exit(void)
module_init(serial8250_init);
module_exit(serial8250_exit);
-EXPORT_SYMBOL(serial8250_suspend_port);
-EXPORT_SYMBOL(serial8250_resume_port);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
module_param(share_irqs, uint, 0644);
-MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
- " (unsafe)");
+MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices (unsafe)");
module_param(nr_uarts, uint, 0644);
MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a5d319e4aae6..a3fb95d85d7c 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -68,12 +68,6 @@ struct dw8250_data {
unsigned int uart_16550_compatible:1;
};
-#define BYT_PRV_CLK 0x800
-#define BYT_PRV_CLK_EN (1 << 0)
-#define BYT_PRV_CLK_M_VAL_SHIFT 1
-#define BYT_PRV_CLK_N_VAL_SHIFT 16
-#define BYT_PRV_CLK_UPDATE (1 << 31)
-
static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
@@ -95,25 +89,45 @@ static void dw8250_force_idle(struct uart_port *p)
(void)p->serial_in(p, UART_RX);
}
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_check_lcr(struct uart_port *p, int value)
{
- writeb(value, p->membase + (offset << p->regshift));
+ void __iomem *offset = p->membase + (UART_LCR << p->regshift);
+ int tries = 1000;
/* Make sure LCR write wasn't ignored */
- if (offset == UART_LCR) {
- int tries = 1000;
- while (tries--) {
- unsigned int lcr = p->serial_in(p, UART_LCR);
- if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
- return;
- dw8250_force_idle(p);
- writeb(value, p->membase + (UART_LCR << p->regshift));
- }
- /*
- * FIXME: this deadlocks if port->lock is already held
- * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
- */
+ while (tries--) {
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
+ return;
+
+ dw8250_force_idle(p);
+
+#ifdef CONFIG_64BIT
+ __raw_writeq(value & 0xff, offset);
+#else
+ if (p->iotype == UPIO_MEM32)
+ writel(value, offset);
+ else if (p->iotype == UPIO_MEM32BE)
+ iowrite32be(value, offset);
+ else
+ writeb(value, offset);
+#endif
}
+ /*
+ * FIXME: this deadlocks if port->lock is already held
+ * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+ */
+}
+
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ writeb(value, p->membase + (offset << p->regshift));
+
+ if (offset == UART_LCR && !d->uart_16550_compatible)
+ dw8250_check_lcr(p, value);
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -135,49 +149,26 @@ static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
{
+ struct dw8250_data *d = p->private_data;
+
value &= 0xff;
__raw_writeq(value, p->membase + (offset << p->regshift));
/* Read back to ensure register write ordering. */
__raw_readq(p->membase + (UART_LCR << p->regshift));
- /* Make sure LCR write wasn't ignored */
- if (offset == UART_LCR) {
- int tries = 1000;
- while (tries--) {
- unsigned int lcr = p->serial_in(p, UART_LCR);
- if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
- return;
- dw8250_force_idle(p);
- __raw_writeq(value & 0xff,
- p->membase + (UART_LCR << p->regshift));
- }
- /*
- * FIXME: this deadlocks if port->lock is already held
- * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
- */
- }
+ if (offset == UART_LCR && !d->uart_16550_compatible)
+ dw8250_check_lcr(p, value);
}
#endif /* CONFIG_64BIT */
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
+ struct dw8250_data *d = p->private_data;
+
writel(value, p->membase + (offset << p->regshift));
- /* Make sure LCR write wasn't ignored */
- if (offset == UART_LCR) {
- int tries = 1000;
- while (tries--) {
- unsigned int lcr = p->serial_in(p, UART_LCR);
- if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
- return;
- dw8250_force_idle(p);
- writel(value, p->membase + (UART_LCR << p->regshift));
- }
- /*
- * FIXME: this deadlocks if port->lock is already held
- * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
- */
- }
+ if (offset == UART_LCR && !d->uart_16550_compatible)
+ dw8250_check_lcr(p, value);
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
@@ -187,14 +178,33 @@ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
return dw8250_modify_msr(p, offset, value);
}
+static void dw8250_serial_out32be(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ iowrite32be(value, p->membase + (offset << p->regshift));
+
+ if (offset == UART_LCR && !d->uart_16550_compatible)
+ dw8250_check_lcr(p, value);
+}
+
+static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
+{
+ unsigned int value = ioread32be(p->membase + (offset << p->regshift));
+
+ return dw8250_modify_msr(p, offset, value);
+}
+
+
static int dw8250_handle_irq(struct uart_port *p)
{
struct dw8250_data *d = p->private_data;
unsigned int iir = p->serial_in(p, UART_IIR);
- if (serial8250_handle_irq(p, iir)) {
+ if (serial8250_handle_irq(p, iir))
return 1;
- } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+
+ if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
/* Clear the USR */
(void)p->serial_in(p, d->usr_reg);
@@ -281,6 +291,11 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
data->skip_autocfg = true;
}
#endif
+ if (of_device_is_big_endian(p->dev->of_node)) {
+ p->iotype = UPIO_MEM32BE;
+ p->serial_in = dw8250_serial_in32be;
+ p->serial_out = dw8250_serial_out32be;
+ }
} else if (has_acpi_companion(p->dev)) {
p->iotype = UPIO_MEM32;
p->regshift = 2;
@@ -309,14 +324,20 @@ static void dw8250_setup_port(struct uart_port *p)
* If the Component Version Register returns zero, we know that
* ADDITIONAL_FEATURES are not enabled. No need to go any further.
*/
- reg = readl(p->membase + DW_UART_UCV);
+ if (p->iotype == UPIO_MEM32BE)
+ reg = ioread32be(p->membase + DW_UART_UCV);
+ else
+ reg = readl(p->membase + DW_UART_UCV);
if (!reg)
return;
dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
(reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
- reg = readl(p->membase + DW_UART_CPR);
+ if (p->iotype == UPIO_MEM32BE)
+ reg = ioread32be(p->membase + DW_UART_CPR);
+ else
+ reg = readl(p->membase + DW_UART_CPR);
if (!reg)
return;
@@ -463,10 +484,8 @@ static int dw8250_probe(struct platform_device *pdev)
dw8250_quirks(p, data);
/* If the Busy Functionality is not implemented, don't handle it */
- if (data->uart_16550_compatible) {
- p->serial_out = NULL;
+ if (data->uart_16550_compatible)
p->handle_irq = NULL;
- }
if (!data->skip_autocfg)
dw8250_setup_port(p);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index af62131af21e..8d08ff5c4e34 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -39,15 +39,17 @@
static unsigned int __init serial8250_early_in(struct uart_port *port, int offset)
{
+ offset <<= port->regshift;
+
switch (port->iotype) {
case UPIO_MEM:
return readb(port->membase + offset);
case UPIO_MEM16:
- return readw(port->membase + (offset << 1));
+ return readw(port->membase + offset);
case UPIO_MEM32:
- return readl(port->membase + (offset << 2));
+ return readl(port->membase + offset);
case UPIO_MEM32BE:
- return ioread32be(port->membase + (offset << 2));
+ return ioread32be(port->membase + offset);
case UPIO_PORT:
return inb(port->iobase + offset);
default:
@@ -57,18 +59,20 @@ static unsigned int __init serial8250_early_in(struct uart_port *port, int offse
static void __init serial8250_early_out(struct uart_port *port, int offset, int value)
{
+ offset <<= port->regshift;
+
switch (port->iotype) {
case UPIO_MEM:
writeb(value, port->membase + offset);
break;
case UPIO_MEM16:
- writew(value, port->membase + (offset << 1));
+ writew(value, port->membase + offset);
break;
case UPIO_MEM32:
- writel(value, port->membase + (offset << 2));
+ writel(value, port->membase + offset);
break;
case UPIO_MEM32BE:
- iowrite32be(value, port->membase + (offset << 2));
+ iowrite32be(value, port->membase + offset);
break;
case UPIO_PORT:
outb(value, port->iobase + offset);
@@ -145,3 +149,25 @@ EARLYCON_DECLARE(uart8250, early_serial8250_setup);
EARLYCON_DECLARE(uart, early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
+OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
+
+#ifdef CONFIG_SERIAL_8250_OMAP
+
+static int __init early_omap8250_setup(struct earlycon_device *device,
+ const char *options)
+{
+ struct uart_port *port = &device->port;
+
+ if (!(device->port.membase || device->port.iobase))
+ return -ENODEV;
+
+ port->regshift = 2;
+ device->con->write = early_serial8250_write;
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
+
+#endif
diff --git a/drivers/tty/serial/8250/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c
index bf53aabf9b5e..3a7cb8262bb9 100644
--- a/drivers/tty/serial/8250/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250/8250_exar_st16c554.c
@@ -13,20 +13,13 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
-#define PORT(_base,_irq) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF, \
- }
+#include "8250.h"
static struct plat_serial8250_port exar_data[] = {
- PORT(0x100, 5),
- PORT(0x108, 5),
- PORT(0x110, 5),
- PORT(0x118, 5),
+ SERIAL8250_PORT(0x100, 5),
+ SERIAL8250_PORT(0x108, 5),
+ SERIAL8250_PORT(0x110, 5),
+ SERIAL8250_PORT(0x118, 5),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c
index be1582609626..4045180a8cfc 100644
--- a/drivers/tty/serial/8250/8250_fourport.c
+++ b/drivers/tty/serial/8250/8250_fourport.c
@@ -10,24 +10,20 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
-#define PORT(_base,_irq) \
- { \
- .iobase = _base, \
- .irq = _irq, \
- .uartclk = 1843200, \
- .iotype = UPIO_PORT, \
- .flags = UPF_BOOT_AUTOCONF | UPF_FOURPORT, \
- }
+#include "8250.h"
+
+#define SERIAL8250_FOURPORT(_base, _irq) \
+ SERIAL8250_PORT_FLAGS(_base, _irq, UPF_FOURPORT)
static struct plat_serial8250_port fourport_data[] = {
- PORT(0x1a0, 9),
- PORT(0x1a8, 9),
- PORT(0x1b0, 9),
- PORT(0x1b8, 9),
- PORT(0x2a0, 5),
- PORT(0x2a8, 5),
- PORT(0x2b0, 5),
- PORT(0x2b8, 5),
+ SERIAL8250_FOURPORT(0x1a0, 9),
+ SERIAL8250_FOURPORT(0x1a8, 9),
+ SERIAL8250_FOURPORT(0x1b0, 9),
+ SERIAL8250_FOURPORT(0x1b8, 9),
+ SERIAL8250_FOURPORT(0x2a0, 5),
+ SERIAL8250_FOURPORT(0x2a8, 5),
+ SERIAL8250_FOURPORT(0x2b0, 5),
+ SERIAL8250_FOURPORT(0x2b8, 5),
{ },
};
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 2e3ea1a70d7b..b1e6ae9f1ff9 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -42,7 +42,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
* the user what they're missing.
*/
if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
- printk(KERN_INFO
+ dev_info(&dev->dev,
"Serial: device 0x%llx not configured.\n"
"Enable support for Wax, Lasi, Asp or Dino.\n",
(unsigned long long)dev->hpa.start);
@@ -66,8 +66,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
err = serial8250_register_8250_port(&uart);
if (err < 0) {
- printk(KERN_WARNING
- "serial8250_register_8250_port returned error %d\n", err);
+ dev_warn(&dev->dev,
+ "serial8250_register_8250_port returned error %d\n",
+ err);
iounmap(uart.port.membase);
return err;
}
diff --git a/drivers/tty/serial/8250/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index 2891958cd842..38166db2b824 100644
--- a/drivers/tty/serial/8250/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
@@ -24,8 +24,7 @@
#endif
#ifdef CONFIG_HPAPCI
-struct hp300_port
-{
+struct hp300_port {
struct hp300_port *next; /* next port */
int line; /* line (tty) number */
};
@@ -111,7 +110,7 @@ int __init hp300_setup_serial_console(void)
/* Check for APCI console */
if (scode == 256) {
#ifdef CONFIG_HPAPCI
- printk(KERN_INFO "Serial console is HP APCI 1\n");
+ pr_info("Serial console is HP APCI 1\n");
port.uartclk = HPAPCI_BAUD_BASE * 16;
port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1));
@@ -119,7 +118,7 @@ int __init hp300_setup_serial_console(void)
port.regshift = 2;
add_preferred_console("ttyS", port.line, "9600n8");
#else
- printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
+ pr_warn("Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
return 0;
#endif
} else {
@@ -128,7 +127,7 @@ int __init hp300_setup_serial_console(void)
if (!pa)
return 0;
- printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode);
+ pr_info("Serial console is HP DCA at select code %d\n", scode);
port.uartclk = HPDCA_BAUD_BASE * 16;
port.mapbase = (pa + UART_OFFSET);
@@ -142,13 +141,13 @@ int __init hp300_setup_serial_console(void)
if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80)
add_preferred_console("ttyS", port.line, "9600n8");
#else
- printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
+ pr_warn("Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
return 0;
#endif
}
if (early_serial_setup(&port) < 0)
- printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n");
+ pr_warn("%s: early_serial_setup() failed.\n", __func__);
return 0;
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
@@ -180,8 +179,9 @@ static int hpdca_init_one(struct dio_dev *d,
line = serial8250_register_8250_port(&uart);
if (line < 0) {
- printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d"
- " irq %d failed\n", d->scode, uart.port.irq);
+ dev_notice(&d->dev,
+ "8250_hp300: register_serial() DCA scode %d irq %d failed\n",
+ d->scode, uart.port.irq);
return -ENOMEM;
}
@@ -249,8 +249,8 @@ static int __init hp300_8250_init(void)
/* Memory mapped I/O */
uart.port.iotype = UPIO_MEM;
- uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \
- | UPF_BOOT_AUTOCONF;
+ uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ
+ | UPF_BOOT_AUTOCONF;
/* XXX - no interrupt support yet */
uart.port.irq = 0;
uart.port.uartclk = HPAPCI_BAUD_BASE * 16;
@@ -261,8 +261,9 @@ static int __init hp300_8250_init(void)
line = serial8250_register_8250_port(&uart);
if (line < 0) {
- printk(KERN_NOTICE "8250_hp300: register_serial() APCI"
- " %d irq %d failed\n", i, uart.port.irq);
+ dev_notice(uart.port.dev,
+ "8250_hp300: register_serial() APCI %d irq %d failed\n",
+ i, uart.port.irq);
kfree(port);
continue;
}
diff --git a/drivers/tty/serial/8250/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c
index a5c778e83de0..27124e21eb96 100644
--- a/drivers/tty/serial/8250/8250_hub6.c
+++ b/drivers/tty/serial/8250/8250_hub6.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/serial_8250.h>
-#define HUB6(card,port) \
+#define HUB6(card, port) \
{ \
.iobase = 0x302, \
.irq = 3, \
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index d6e1ec9b4fde..b0677f610863 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -48,7 +48,7 @@ static const struct of_device_id of_match[];
#define UART_MCR_MDCE BIT(7)
#define UART_MCR_FCM BIT(6)
-#ifdef CONFIG_SERIAL_EARLYCON
+#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
static struct earlycon_device *early_device;
static uint8_t __init early_in(struct uart_port *port, int offset)
@@ -154,14 +154,18 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
break;
case UART_IER:
- /* Enable receive timeout interrupt with the
- * receive line status interrupt */
+ /*
+ * Enable receive timeout interrupt with the receive line
+ * status interrupt.
+ */
value |= (value & 0x4) << 2;
break;
case UART_MCR:
- /* If we have enabled modem status IRQs we should enable modem
- * mode. */
+ /*
+ * If we have enabled modem status IRQs we should enable
+ * modem mode.
+ */
ier = p->serial_in(p, UART_IER);
if (ier & UART_IER_MSI)
diff --git a/drivers/tty/serial/8250/8250_moxa.c b/drivers/tty/serial/8250/8250_moxa.c
new file mode 100644
index 000000000000..26eb5393a263
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_moxa.c
@@ -0,0 +1,157 @@
+/*
+ * 8250_moxa.c - MOXA Smartio/Industio MUE multiport serial driver.
+ *
+ * Author: Mathieu OTHACEHE <m.othacehe@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "8250.h"
+
+#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
+#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
+#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
+#define PCI_DEVICE_ID_MOXA_CP114EL 0x1144
+#define PCI_DEVICE_ID_MOXA_CP116E_A_A 0x1160
+#define PCI_DEVICE_ID_MOXA_CP116E_A_B 0x1161
+#define PCI_DEVICE_ID_MOXA_CP118EL_A 0x1182
+#define PCI_DEVICE_ID_MOXA_CP118E_A_I 0x1183
+#define PCI_DEVICE_ID_MOXA_CP132EL 0x1322
+#define PCI_DEVICE_ID_MOXA_CP134EL_A 0x1342
+#define PCI_DEVICE_ID_MOXA_CP138E_A 0x1381
+#define PCI_DEVICE_ID_MOXA_CP168EL_A 0x1683
+
+#define MOXA_BASE_BAUD 921600
+#define MOXA_UART_OFFSET 0x200
+#define MOXA_BASE_BAR 1
+
+struct moxa8250_board {
+ unsigned int num_ports;
+ int line[0];
+};
+
+enum {
+ moxa8250_2p = 0,
+ moxa8250_4p,
+ moxa8250_8p
+};
+
+static struct moxa8250_board moxa8250_boards[] = {
+ [moxa8250_2p] = { .num_ports = 2},
+ [moxa8250_4p] = { .num_ports = 4},
+ [moxa8250_8p] = { .num_ports = 8},
+};
+
+static int moxa8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct uart_8250_port uart;
+ struct moxa8250_board *brd;
+ void __iomem *ioaddr;
+ resource_size_t baseaddr;
+ unsigned int i, nr_ports;
+ unsigned int offset;
+ int ret;
+
+ brd = &moxa8250_boards[id->driver_data];
+ nr_ports = brd->num_ports;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ brd = devm_kzalloc(&pdev->dev, sizeof(struct moxa8250_board) +
+ sizeof(unsigned int) * nr_ports, GFP_KERNEL);
+ if (!brd)
+ return -ENOMEM;
+
+ memset(&uart, 0, sizeof(struct uart_8250_port));
+
+ uart.port.dev = &pdev->dev;
+ uart.port.irq = pdev->irq;
+ uart.port.uartclk = MOXA_BASE_BAUD * 16;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+
+ baseaddr = pci_resource_start(pdev, MOXA_BASE_BAR);
+ ioaddr = pcim_iomap(pdev, MOXA_BASE_BAR, 0);
+ if (!ioaddr)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_ports; i++) {
+
+ /*
+ * MOXA Smartio MUE boards with 4 ports have
+ * a different offset for port #3
+ */
+ if (nr_ports == 4 && i == 3)
+ offset = 7 * MOXA_UART_OFFSET;
+ else
+ offset = i * MOXA_UART_OFFSET;
+
+ uart.port.iotype = UPIO_MEM;
+ uart.port.iobase = 0;
+ uart.port.mapbase = baseaddr + offset;
+ uart.port.membase = ioaddr + offset;
+ uart.port.regshift = 0;
+
+ dev_dbg(&pdev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype);
+
+ brd->line[i] = serial8250_register_8250_port(&uart);
+ if (brd->line[i] < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq,
+ uart.port.iotype, brd->line[i]);
+ break;
+ }
+ }
+
+ pci_set_drvdata(pdev, brd);
+ return 0;
+}
+
+static void moxa8250_remove(struct pci_dev *pdev)
+{
+ struct moxa8250_board *brd = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < brd->num_ports; i++)
+ serial8250_unregister_port(brd->line[i]);
+}
+
+#define MOXA_DEVICE(id, data) { PCI_VDEVICE(MOXA, id), (kernel_ulong_t)data }
+
+static const struct pci_device_id pci_ids[] = {
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP102E, moxa8250_2p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP102EL, moxa8250_2p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP104EL_A, moxa8250_4p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP114EL, moxa8250_4p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP116E_A_A, moxa8250_8p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP116E_A_B, moxa8250_8p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP118EL_A, moxa8250_8p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP118E_A_I, moxa8250_8p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP132EL, moxa8250_2p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP134EL_A, moxa8250_4p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP138E_A, moxa8250_8p),
+ MOXA_DEVICE(PCI_DEVICE_ID_MOXA_CP168EL_A, moxa8250_8p),
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, pci_ids);
+
+static struct pci_driver moxa8250_pci_driver = {
+ .name = "8250_moxa",
+ .id_table = pci_ids,
+ .probe = moxa8250_probe,
+ .remove = moxa8250_remove,
+};
+
+module_pci_driver(moxa8250_pci_driver);
+
+MODULE_AUTHOR("Mathieu OTHACEHE");
+MODULE_DESCRIPTION("MOXA SmartIO MUE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 0e590b233f03..3489fbcb7313 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -16,7 +16,7 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -41,12 +41,10 @@ static void
mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
unsigned int baud, quot;
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
-
serial8250_do_set_termios(port, termios, old);
/*
@@ -116,7 +114,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
}
-static int mtk8250_runtime_suspend(struct device *dev)
+static int __maybe_unused mtk8250_runtime_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
@@ -126,7 +124,7 @@ static int mtk8250_runtime_suspend(struct device *dev)
return 0;
}
-static int mtk8250_runtime_resume(struct device *dev)
+static int __maybe_unused mtk8250_runtime_resume(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
int err;
@@ -245,8 +243,24 @@ static int mtk8250_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int mtk8250_suspend(struct device *dev)
+static int mtk8250_remove(struct platform_device *pdev)
+{
+ struct mtk8250_data *data = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ serial8250_unregister_port(data->line);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ mtk8250_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused mtk8250_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
@@ -255,7 +269,7 @@ static int mtk8250_suspend(struct device *dev)
return 0;
}
-static int mtk8250_resume(struct device *dev)
+static int __maybe_unused mtk8250_resume(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
@@ -263,7 +277,6 @@ static int mtk8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops mtk8250_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mtk8250_suspend, mtk8250_resume)
@@ -275,20 +288,20 @@ static const struct of_device_id mtk8250_of_match[] = {
{ .compatible = "mediatek,mt6577-uart" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mtk8250_of_match);
static struct platform_driver mtk8250_platform_driver = {
.driver = {
- .name = "mt6577-uart",
- .pm = &mtk8250_pm_ops,
- .of_match_table = mtk8250_of_match,
- .suppress_bind_attrs = true,
-
+ .name = "mt6577-uart",
+ .pm = &mtk8250_pm_ops,
+ .of_match_table = mtk8250_of_match,
},
.probe = mtk8250_probe,
+ .remove = mtk8250_remove,
};
-builtin_platform_driver(mtk8250_platform_driver);
+module_platform_driver(mtk8250_platform_driver);
-#ifdef CONFIG_SERIAL_8250_CONSOLE
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
static int __init early_mtk8250_setup(struct earlycon_device *device,
const char *options)
{
@@ -302,3 +315,7 @@ static int __init early_mtk8250_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup);
#endif
+
+MODULE_AUTHOR("Matthias Brugger");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Mediatek 8250 serial port driver");
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 33021c1f7d55..c7ed3d2bc8b2 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -335,6 +335,7 @@ static struct platform_driver of_platform_serial_driver = {
.driver = {
.name = "of_serial",
.of_match_table = of_platform_serial_table,
+ .pm = &of_serial_pm_ops,
},
.probe = of_platform_serial_probe,
.remove = of_platform_serial_remove,
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index a2c0734c76e2..6f760510e46d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -318,8 +318,7 @@ static void omap_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(port);
struct omap8250_priv *priv = up->port.private_data;
unsigned char cval = 0;
unsigned int baud;
@@ -682,9 +681,8 @@ static void omap_8250_shutdown(struct uart_port *port)
static void omap_8250_throttle(struct uart_port *port)
{
+ struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
pm_runtime_get_sync(port->dev);
@@ -697,11 +695,40 @@ static void omap_8250_throttle(struct uart_port *port)
pm_runtime_put_autosuspend(port->dev);
}
+static int omap_8250_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
+ port->rs485 = *rs485;
+
+ /*
+ * Both serial8250_em485_init and serial8250_em485_destroy
+ * are idempotent
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ int ret = serial8250_em485_init(up);
+
+ if (ret) {
+ rs485->flags &= ~SER_RS485_ENABLED;
+ port->rs485.flags &= ~SER_RS485_ENABLED;
+ }
+ return ret;
+ }
+
+ serial8250_em485_destroy(up);
+
+ return 0;
+}
+
static void omap_8250_unthrottle(struct uart_port *port)
{
+ struct uart_8250_port *up = up_to_u8250p(port);
unsigned long flags;
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
pm_runtime_get_sync(port->dev);
@@ -1146,6 +1173,7 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.shutdown = omap_8250_shutdown;
up.port.throttle = omap_8250_throttle;
up.port.unthrottle = omap_8250_unthrottle;
+ up.port.rs485_config = omap_8250_rs485_config;
if (pdev->dev.of_node) {
const struct of_device_id *id;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 7cd6f9a90542..98862aa5bb58 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -55,7 +55,6 @@ struct pci_serial_quirk {
struct serial_private {
struct pci_dev *dev;
unsigned int nr;
- void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
struct pci_serial_quirk *quirk;
int line[0];
};
@@ -85,15 +84,13 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
return -EINVAL;
if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
- if (!priv->remapped_bar[bar])
- priv->remapped_bar[bar] = pci_ioremap_bar(dev, bar);
- if (!priv->remapped_bar[bar])
+ if (!pcim_iomap(dev, bar, 0) && !pcim_iomap_table(dev))
return -ENOMEM;
port->port.iotype = UPIO_MEM;
port->port.iobase = 0;
port->port.mapbase = pci_resource_start(dev, bar) + offset;
- port->port.membase = priv->remapped_bar[bar] + offset;
+ port->port.membase = pcim_iomap_table(dev)[bar] + offset;
port->port.regshift = regshift;
} else {
port->port.iotype = UPIO_PORT;
@@ -721,7 +718,7 @@ static int pci_ni8430_init(struct pci_dev *dev)
*/
pcibios_resource_to_bus(dev->bus, &region, &dev->resource[bar]);
device_window = ((region.start + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00)
- | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
+ | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
writel(device_window, p + MITE_IOWBSR1);
/* Set window access to go to RAMSEL IO address space */
@@ -803,12 +800,12 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
unsigned int pi;
unsigned short sub_serports;
- pi = (c & 0xff);
+ pi = c & 0xff;
- if (pi == 2) {
+ if (pi == 2)
return 1;
- } else if ((pi == 0) &&
- (dev->device == PCI_DEVICE_ID_NETMOS_9900)) {
+
+ if ((pi == 0) && (dev->device == PCI_DEVICE_ID_NETMOS_9900)) {
/* two possibilities: 0x30ps encodes number of parallel and
* serial ports, or 0x1000 indicates *something*. This is not
* immediately obvious, since the 2s1p+4s configuration seems
@@ -816,12 +813,12 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
* advertising the same function 3 as the 4s+2s1p config.
*/
sub_serports = dev->subsystem_device & 0xf;
- if (sub_serports > 0) {
+ if (sub_serports > 0)
return sub_serports;
- } else {
- dev_err(&dev->dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
- return 0;
- }
+
+ dev_err(&dev->dev,
+ "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+ return 0;
}
moan_device("unknown NetMos/Mostech program interface", dev);
@@ -842,21 +839,21 @@ static int pci_netmos_init(struct pci_dev *dev)
return 0;
switch (dev->device) { /* FALLTHROUGH on all */
- case PCI_DEVICE_ID_NETMOS_9904:
- case PCI_DEVICE_ID_NETMOS_9912:
- case PCI_DEVICE_ID_NETMOS_9922:
- case PCI_DEVICE_ID_NETMOS_9900:
- num_serial = pci_netmos_9900_numports(dev);
- break;
+ case PCI_DEVICE_ID_NETMOS_9904:
+ case PCI_DEVICE_ID_NETMOS_9912:
+ case PCI_DEVICE_ID_NETMOS_9922:
+ case PCI_DEVICE_ID_NETMOS_9900:
+ num_serial = pci_netmos_9900_numports(dev);
+ break;
- default:
- if (num_serial == 0 ) {
- moan_device("unknown NetMos/Mostech device", dev);
- }
+ default:
+ break;
}
- if (num_serial == 0)
+ if (num_serial == 0) {
+ moan_device("unknown NetMos/Mostech device", dev);
return -ENODEV;
+ }
return num_serial;
}
@@ -1198,8 +1195,9 @@ static int pci_quatech_has_qmcr(struct uart_8250_port *port)
static int pci_quatech_test(struct uart_8250_port *port)
{
- u8 reg;
- u8 qopr = pci_quatech_rqopr(port);
+ u8 reg, qopr;
+
+ qopr = pci_quatech_rqopr(port);
pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1);
reg = pci_quatech_rqopr(port) & 0xC0;
if (reg != QPCR_TEST_GET1)
@@ -1286,6 +1284,7 @@ static int pci_quatech_init(struct pci_dev *dev)
unsigned long base = pci_resource_start(dev, 0);
if (base) {
u32 tmp;
+
outl(inl(base + 0x38) | 0x00002000, base + 0x38);
tmp = inl(base + 0x3c);
outl(tmp | 0x01000000, base + 0x3c);
@@ -1334,29 +1333,6 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static int pci_pericom_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- unsigned int bar, offset = board->first_offset, maxnr;
-
- bar = FL_GET_BASE(board->flags);
- if (board->flags & FL_BASE_BARS)
- bar += idx;
- else
- offset += idx * board->uart_offset;
-
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
- (board->reg_shift + 3);
-
- if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
- return 1;
-
- port->port.uartclk = 14745600;
-
- return setup_port(priv, port, bar, offset, board->reg_shift);
-}
-
static int
ce4100_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1541,10 +1517,9 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
static int pci_fintek_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
+ struct pci_dev *pci_dev = to_pci_dev(port->dev);
u8 setting;
u8 *index = (u8 *) port->private_data;
- struct pci_dev *pci_dev = container_of(port->dev, struct pci_dev,
- dev);
pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
@@ -1766,7 +1741,7 @@ xr17v35x_has_slave(struct serial_private *priv)
const int dev_id = priv->dev->device;
return ((dev_id == PCI_DEVICE_ID_EXAR_XR17V4358) ||
- (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358));
+ (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358));
}
static int
@@ -1866,8 +1841,8 @@ pci_fastcom335_setup(struct serial_private *priv,
static int
pci_wch_ch353_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
{
port->port.flags |= UPF_FIXED_TYPE;
port->port.type = PORT_16550A;
@@ -1876,8 +1851,8 @@ pci_wch_ch353_setup(struct serial_private *priv,
static int
pci_wch_ch38x_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
{
port->port.flags |= UPF_FIXED_TYPE;
port->port.type = PORT_16850;
@@ -2246,16 +2221,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.exit = pci_plx9050_exit,
},
/*
- * Pericom
- */
- {
- .vendor = PCI_VENDOR_ID_PERICOM,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
- },
- /*
* PLX
*/
{
@@ -3733,15 +3698,10 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 921600,
.reg_shift = 2,
},
- /*
- * Intel BayTrail HSUART reference clock is 44.2368 MHz at power-on,
- * but is overridden by byt_set_termios.
- */
[pbn_byt] = {
.flags = FL_BASE0,
.num_ports = 1,
.base_baud = 2764800,
- .uart_offset = 0x80,
.reg_shift = 2,
},
[pbn_qrk] = {
@@ -3840,6 +3800,20 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
{ PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
+ /* Moxa Smartio MUE boards handled by 8250_moxa */
+ { PCI_VDEVICE(MOXA, 0x1024), },
+ { PCI_VDEVICE(MOXA, 0x1025), },
+ { PCI_VDEVICE(MOXA, 0x1045), },
+ { PCI_VDEVICE(MOXA, 0x1144), },
+ { PCI_VDEVICE(MOXA, 0x1160), },
+ { PCI_VDEVICE(MOXA, 0x1161), },
+ { PCI_VDEVICE(MOXA, 0x1182), },
+ { PCI_VDEVICE(MOXA, 0x1183), },
+ { PCI_VDEVICE(MOXA, 0x1322), },
+ { PCI_VDEVICE(MOXA, 0x1342), },
+ { PCI_VDEVICE(MOXA, 0x1381), },
+ { PCI_VDEVICE(MOXA, 0x1683), },
+
/* Intel platforms with MID UART */
{ PCI_VDEVICE(INTEL, 0x081b), },
{ PCI_VDEVICE(INTEL, 0x081c), },
@@ -4027,12 +4001,6 @@ void pciserial_remove_ports(struct serial_private *priv)
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
- for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
- if (priv->remapped_bar[i])
- iounmap(priv->remapped_bar[i]);
- priv->remapped_bar[i] = NULL;
- }
-
/*
* Find the exit quirks.
*/
@@ -4104,7 +4072,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
- rc = pci_enable_device(dev);
+ rc = pcim_enable_device(dev);
pci_save_state(dev);
if (rc)
return rc;
@@ -4123,7 +4091,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
*/
rc = serial_pci_guess_board(dev, &tmp);
if (rc)
- goto disable;
+ return rc;
} else {
/*
* We matched an explicit entry. If we are able to
@@ -4139,16 +4107,11 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
}
priv = pciserial_init_ports(dev, board);
- if (!IS_ERR(priv)) {
- pci_set_drvdata(dev, priv);
- return 0;
- }
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- rc = PTR_ERR(priv);
-
- disable:
- pci_disable_device(dev);
- return rc;
+ pci_set_drvdata(dev, priv);
+ return 0;
}
static void pciserial_remove_one(struct pci_dev *dev)
@@ -4156,8 +4119,6 @@ static void pciserial_remove_one(struct pci_dev *dev)
struct serial_private *priv = pci_get_drvdata(dev);
pciserial_remove_ports(priv);
-
- pci_disable_device(dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -4538,7 +4499,7 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_921600 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958,
- PCI_ANY_ID , PCI_ANY_ID, 0, 0,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b2_8_1152000 },
/*
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 658b392d1170..34f05ed78b68 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -357,8 +357,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
/* Fujitsu Wacom 1FGT Tablet PC device */
{ "FUJ02E9", 0 },
/*
- * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
- * disguise)
+ * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6
+ * in disguise).
*/
{ "LTS0001", 0 },
/* Rockwell's (PORALiNK) 33600 INT PNP */
@@ -367,12 +367,14 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "PNPCXXX", UNKNOWN_DEV },
/* More unknown PnP modems */
{ "PNPDXXX", UNKNOWN_DEV },
- /* Winbond CIR port, should not be probed. We should keep track
- of it to prevent the legacy serial driver from probing it */
+ /*
+ * Winbond CIR port, should not be probed. We should keep track of
+ * it to prevent the legacy serial driver from probing it.
+ */
{ "WEC1022", CIR_PORT },
/*
- * SMSC IrCC SIR/FIR port, should not be probed by serial driver
- * as well so its own driver can bind to it.
+ * SMSC IrCC SIR/FIR port, should not be probed by serial driver as
+ * well so its own driver can bind to it.
*/
{ "SMCF010", CIR_PORT },
{ "", 0 }
@@ -380,35 +382,35 @@ static const struct pnp_device_id pnp_dev_table[] = {
MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
-static char *modem_names[] = {
+static const char *modem_names[] = {
"MODEM", "Modem", "modem", "FAX", "Fax", "fax",
"56K", "56k", "K56", "33.6", "28.8", "14.4",
"33,600", "28,800", "14,400", "33.600", "28.800", "14.400",
"33600", "28800", "14400", "V.90", "V.34", "V.32", NULL
};
-static int check_name(char *name)
+static bool check_name(const char *name)
{
- char **tmp;
+ const char **tmp;
for (tmp = modem_names; *tmp; tmp++)
if (strstr(name, *tmp))
- return 1;
+ return true;
- return 0;
+ return false;
}
-static int check_resources(struct pnp_dev *dev)
+static bool check_resources(struct pnp_dev *dev)
{
- resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
- int i;
+ static const resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(base); i++) {
if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -425,8 +427,8 @@ static int check_resources(struct pnp_dev *dev)
static int serial_pnp_guess_board(struct pnp_dev *dev)
{
if (!(check_name(pnp_dev_name(dev)) ||
- (dev->card && check_name(dev->card->name))))
- return -ENODEV;
+ (dev->card && check_name(dev->card->name))))
+ return -ENODEV;
if (check_resources(dev))
return 0;
@@ -462,11 +464,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
} else
return -ENODEV;
-#ifdef SERIAL_DEBUG_PNP
- printk(KERN_DEBUG
- "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
- uart.port.iobase, uart.port.mapbase, uart.port.irq, uart.port.iotype);
-#endif
+ dev_dbg(&dev->dev,
+ "Setup PNP port: port %lx, mem %pa, irq %d, type %d\n",
+ uart.port.iobase, &uart.port.mapbase,
+ uart.port.irq, uart.port.iotype);
+
if (flags & CIR_PORT) {
uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.type = PORT_8250_CIR;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8d262bce97e4..e213da01a3d7 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
+#include <linux/timer.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -52,7 +53,7 @@
#define DEBUG_AUTOCONF(fmt...) do { } while (0)
#endif
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
/*
* Here we define the default xmit fifo size used for each type of UART.
@@ -250,9 +251,11 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_AFE,
},
-/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
-workaround of errata A-008006 which states that tx_loadsz should be
-configured less than Maximum supported fifo bytes */
+ /*
+ * tx_loadsz is set to 63-bytes instead of 64-bytes to implement
+ * workaround of errata A-008006 which states that tx_loadsz should
+ * be configured less than Maximum supported fifo bytes.
+ */
[PORT_16550A_FSL64] = {
.name = "16550A_FSL64",
.fifo_size = 64,
@@ -522,6 +525,20 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
}
}
+static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
+{
+ unsigned char mcr = serial_in(p, UART_MCR);
+
+ if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ serial_out(p, UART_MCR, mcr);
+}
+
+static void serial8250_em485_handle_start_tx(unsigned long arg);
+static void serial8250_em485_handle_stop_tx(unsigned long arg);
+
void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
{
serial8250_clear_fifos(p);
@@ -546,6 +563,73 @@ void serial8250_rpm_put(struct uart_8250_port *p)
}
EXPORT_SYMBOL_GPL(serial8250_rpm_put);
+/**
+ * serial8250_em485_init() - put uart_8250_port into rs485 emulating
+ * @p: uart_8250_port port instance
+ *
+ * The function is used to start rs485 software emulating on the
+ * &struct uart_8250_port* @p. Namely, RTS is switched before/after
+ * transmission. The function is idempotent, so it is safe to call it
+ * multiple times.
+ *
+ * The caller MUST enable interrupt on empty shift register before
+ * calling serial8250_em485_init(). This interrupt is not a part of
+ * 8250 standard, but implementation defined.
+ *
+ * The function is supposed to be called from .rs485_config callback
+ * or from any other callback protected with p->port.lock spinlock.
+ *
+ * See also serial8250_em485_destroy()
+ *
+ * Return 0 - success, -errno - otherwise
+ */
+int serial8250_em485_init(struct uart_8250_port *p)
+{
+ if (p->em485 != NULL)
+ return 0;
+
+ p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_ATOMIC);
+ if (p->em485 == NULL)
+ return -ENOMEM;
+
+ setup_timer(&p->em485->stop_tx_timer,
+ serial8250_em485_handle_stop_tx, (unsigned long)p);
+ setup_timer(&p->em485->start_tx_timer,
+ serial8250_em485_handle_start_tx, (unsigned long)p);
+ p->em485->active_timer = NULL;
+
+ serial8250_em485_rts_after_send(p);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_init);
+
+/**
+ * serial8250_em485_destroy() - put uart_8250_port into normal state
+ * @p: uart_8250_port port instance
+ *
+ * The function is used to stop rs485 software emulating on the
+ * &struct uart_8250_port* @p. The function is idempotent, so it is safe to
+ * call it multiple times.
+ *
+ * The function is supposed to be called from .rs485_config callback
+ * or from any other callback protected with p->port.lock spinlock.
+ *
+ * See also serial8250_em485_init()
+ */
+void serial8250_em485_destroy(struct uart_8250_port *p)
+{
+ if (p->em485 == NULL)
+ return;
+
+ del_timer(&p->em485->start_tx_timer);
+ del_timer(&p->em485->stop_tx_timer);
+
+ kfree(p->em485);
+ p->em485 = NULL;
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+
/*
* These two wrappers ensure that enable_runtime_pm_tx() can be called more than
* once and disable_runtime_pm_tx() will still disable RPM because the fifo is
@@ -731,22 +815,16 @@ static int size_fifo(struct uart_8250_port *up)
*/
static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
{
- unsigned char old_dll, old_dlm, old_lcr;
- unsigned int id;
+ unsigned char old_lcr;
+ unsigned int id, old_dl;
old_lcr = serial_in(p, UART_LCR);
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+ old_dl = serial_dl_read(p);
+ serial_dl_write(p, 0);
+ id = serial_dl_read(p);
+ serial_dl_write(p, old_dl);
- old_dll = serial_in(p, UART_DLL);
- old_dlm = serial_in(p, UART_DLM);
-
- serial_out(p, UART_DLL, 0);
- serial_out(p, UART_DLM, 0);
-
- id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
- serial_out(p, UART_DLL, old_dll);
- serial_out(p, UART_DLM, old_dlm);
serial_out(p, UART_LCR, old_lcr);
return id;
@@ -1238,8 +1316,7 @@ static void autoconfig(struct uart_8250_port *up)
out_lock:
spin_unlock_irqrestore(&port->lock, flags);
if (up->capabilities != old_capabilities) {
- printk(KERN_WARNING
- "ttyS%d: detected caps %08x should be %08x\n",
+ pr_warn("ttyS%d: detected caps %08x should be %08x\n",
serial_index(port), old_capabilities,
up->capabilities);
}
@@ -1304,7 +1381,69 @@ static void autoconfig_irq(struct uart_8250_port *up)
port->irq = (irq > 0) ? irq : 0;
}
-static inline void __stop_tx(struct uart_8250_port *p)
+static void serial8250_stop_rx(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get(up);
+
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ up->port.read_status_mask &= ~UART_LSR_DR;
+ serial_port_out(port, UART_IER, up->ier);
+
+ serial8250_rpm_put(up);
+}
+
+static void __do_stop_tx_rs485(struct uart_8250_port *p)
+{
+ if (!p->em485)
+ return;
+
+ serial8250_em485_rts_after_send(p);
+ /*
+ * Empty the RX FIFO, we are not interested in anything
+ * received during the half-duplex transmission.
+ */
+ if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ serial8250_clear_fifos(p);
+}
+
+static void serial8250_em485_handle_stop_tx(unsigned long arg)
+{
+ struct uart_8250_port *p = (struct uart_8250_port *)arg;
+ struct uart_8250_em485 *em485 = p->em485;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->port.lock, flags);
+ if (em485 &&
+ em485->active_timer == &em485->stop_tx_timer) {
+ __do_stop_tx_rs485(p);
+ em485->active_timer = NULL;
+ }
+ spin_unlock_irqrestore(&p->port.lock, flags);
+}
+
+static void __stop_tx_rs485(struct uart_8250_port *p)
+{
+ struct uart_8250_em485 *em485 = p->em485;
+
+ if (!em485)
+ return;
+
+ /*
+ * __do_stop_tx_rs485 is going to set RTS according to config
+ * AND flush RX FIFO if required.
+ */
+ if (p->port.rs485.delay_rts_after_send > 0) {
+ em485->active_timer = &em485->stop_tx_timer;
+ mod_timer(&em485->stop_tx_timer, jiffies +
+ p->port.rs485.delay_rts_after_send * HZ / 1000);
+ } else {
+ __do_stop_tx_rs485(p);
+ }
+}
+
+static inline void __do_stop_tx(struct uart_8250_port *p)
{
if (p->ier & UART_IER_THRI) {
p->ier &= ~UART_IER_THRI;
@@ -1313,6 +1452,28 @@ static inline void __stop_tx(struct uart_8250_port *p)
}
}
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+ struct uart_8250_em485 *em485 = p->em485;
+
+ if (em485) {
+ unsigned char lsr = serial_in(p, UART_LSR);
+ /*
+ * To provide required timeing and allow FIFO transfer,
+ * __stop_tx_rs485 must be called only when both FIFO and
+ * shift register are empty. It is for device driver to enable
+ * interrupt on TEMT.
+ */
+ if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
+ return;
+
+ del_timer(&em485->start_tx_timer);
+ em485->active_timer = NULL;
+ }
+ __do_stop_tx(p);
+ __stop_tx_rs485(p);
+}
+
static void serial8250_stop_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -1330,12 +1491,10 @@ static void serial8250_stop_tx(struct uart_port *port)
serial8250_rpm_put(up);
}
-static void serial8250_start_tx(struct uart_port *port)
+static inline void __start_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
- serial8250_rpm_get_tx(up);
-
if (up->dma && !up->dma->tx_dma(up))
return;
@@ -1345,6 +1504,7 @@ static void serial8250_start_tx(struct uart_port *port)
if (up->bugs & UART_BUG_TXEN) {
unsigned char lsr;
+
lsr = serial_in(up, UART_LSR);
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
if (lsr & UART_LSR_THRE)
@@ -1361,33 +1521,83 @@ static void serial8250_start_tx(struct uart_port *port)
}
}
-static void serial8250_throttle(struct uart_port *port)
+static inline void start_tx_rs485(struct uart_port *port)
{
- port->throttle(port);
+ struct uart_8250_port *up = up_to_u8250p(port);
+ struct uart_8250_em485 *em485 = up->em485;
+ unsigned char mcr;
+
+ if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+ serial8250_stop_rx(&up->port);
+
+ del_timer(&em485->stop_tx_timer);
+ em485->active_timer = NULL;
+
+ mcr = serial_in(up, UART_MCR);
+ if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
+ !!(mcr & UART_MCR_RTS)) {
+ if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
+ mcr |= UART_MCR_RTS;
+ else
+ mcr &= ~UART_MCR_RTS;
+ serial_out(up, UART_MCR, mcr);
+
+ if (up->port.rs485.delay_rts_before_send > 0) {
+ em485->active_timer = &em485->start_tx_timer;
+ mod_timer(&em485->start_tx_timer, jiffies +
+ up->port.rs485.delay_rts_before_send * HZ / 1000);
+ return;
+ }
+ }
+
+ __start_tx(port);
}
-static void serial8250_unthrottle(struct uart_port *port)
+static void serial8250_em485_handle_start_tx(unsigned long arg)
{
- port->unthrottle(port);
+ struct uart_8250_port *p = (struct uart_8250_port *)arg;
+ struct uart_8250_em485 *em485 = p->em485;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->port.lock, flags);
+ if (em485 &&
+ em485->active_timer == &em485->start_tx_timer) {
+ __start_tx(&p->port);
+ em485->active_timer = NULL;
+ }
+ spin_unlock_irqrestore(&p->port.lock, flags);
}
-static void serial8250_stop_rx(struct uart_port *port)
+static void serial8250_start_tx(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
+ struct uart_8250_em485 *em485 = up->em485;
- serial8250_rpm_get(up);
+ serial8250_rpm_get_tx(up);
- up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
- up->port.read_status_mask &= ~UART_LSR_DR;
- serial_port_out(port, UART_IER, up->ier);
+ if (em485 &&
+ em485->active_timer == &em485->start_tx_timer)
+ return;
- serial8250_rpm_put(up);
+ if (em485)
+ start_tx_rs485(port);
+ else
+ __start_tx(port);
+}
+
+static void serial8250_throttle(struct uart_port *port)
+{
+ port->throttle(port);
+}
+
+static void serial8250_unthrottle(struct uart_port *port)
+{
+ port->unthrottle(port);
}
static void serial8250_disable_ms(struct uart_port *port)
{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(port);
/* no MSR capabilities */
if (up->bugs & UART_BUG_NOMSR)
@@ -1412,81 +1622,85 @@ static void serial8250_enable_ms(struct uart_port *port)
serial8250_rpm_put(up);
}
+static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+{
+ struct uart_port *port = &up->port;
+ unsigned char ch;
+ char flag = TTY_NORMAL;
+
+ if (likely(lsr & UART_LSR_DR))
+ ch = serial_in(up, UART_RX);
+ else
+ /*
+ * Intel 82571 has a Serial Over Lan device that will
+ * set UART_LSR_BI without setting UART_LSR_DR when
+ * it receives a break. To avoid reading from the
+ * receive buffer without UART_LSR_DR bit set, we
+ * just force the read character to be 0
+ */
+ ch = 0;
+
+ port->icount.rx++;
+
+ lsr |= up->lsr_saved_flags;
+ up->lsr_saved_flags = 0;
+
+ if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+ if (lsr & UART_LSR_BI) {
+ lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+ port->icount.brk++;
+ /*
+ * We do the SysRQ and SAK checking
+ * here because otherwise the break
+ * may get masked by ignore_status_mask
+ * or read_status_mask.
+ */
+ if (uart_handle_break(port))
+ return;
+ } else if (lsr & UART_LSR_PE)
+ port->icount.parity++;
+ else if (lsr & UART_LSR_FE)
+ port->icount.frame++;
+ if (lsr & UART_LSR_OE)
+ port->icount.overrun++;
+
+ /*
+ * Mask off conditions which should be ignored.
+ */
+ lsr &= port->read_status_mask;
+
+ if (lsr & UART_LSR_BI) {
+ DEBUG_INTR("handling break....");
+ flag = TTY_BREAK;
+ } else if (lsr & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (lsr & UART_LSR_FE)
+ flag = TTY_FRAME;
+ }
+ if (uart_handle_sysrq_char(port, ch))
+ return;
+
+ uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+}
+
/*
* serial8250_rx_chars: processes according to the passed in LSR
* value, and returns the remaining LSR bits not handled
* by this Rx routine.
*/
-unsigned char
-serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct uart_port *port = &up->port;
- unsigned char ch;
int max_count = 256;
- char flag;
do {
- if (likely(lsr & UART_LSR_DR))
- ch = serial_in(up, UART_RX);
- else
- /*
- * Intel 82571 has a Serial Over Lan device that will
- * set UART_LSR_BI without setting UART_LSR_DR when
- * it receives a break. To avoid reading from the
- * receive buffer without UART_LSR_DR bit set, we
- * just force the read character to be 0
- */
- ch = 0;
-
- flag = TTY_NORMAL;
- port->icount.rx++;
-
- lsr |= up->lsr_saved_flags;
- up->lsr_saved_flags = 0;
-
- if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
- if (lsr & UART_LSR_BI) {
- lsr &= ~(UART_LSR_FE | UART_LSR_PE);
- port->icount.brk++;
- /*
- * We do the SysRQ and SAK checking
- * here because otherwise the break
- * may get masked by ignore_status_mask
- * or read_status_mask.
- */
- if (uart_handle_break(port))
- goto ignore_char;
- } else if (lsr & UART_LSR_PE)
- port->icount.parity++;
- else if (lsr & UART_LSR_FE)
- port->icount.frame++;
- if (lsr & UART_LSR_OE)
- port->icount.overrun++;
-
- /*
- * Mask off conditions which should be ignored.
- */
- lsr &= port->read_status_mask;
-
- if (lsr & UART_LSR_BI) {
- DEBUG_INTR("handling break....");
- flag = TTY_BREAK;
- } else if (lsr & UART_LSR_PE)
- flag = TTY_PARITY;
- else if (lsr & UART_LSR_FE)
- flag = TTY_FRAME;
- }
- if (uart_handle_sysrq_char(port, ch))
- goto ignore_char;
-
- uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
-ignore_char:
+ serial8250_read_char(up, lsr);
+ if (--max_count == 0)
+ break;
lsr = serial_in(up, UART_LSR);
- } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
- spin_unlock(&port->lock);
+ } while (lsr & (UART_LSR_DR | UART_LSR_BI));
+
tty_flip_buffer_push(&port->state->port);
- spin_lock(&port->lock);
return lsr;
}
EXPORT_SYMBOL_GPL(serial8250_rx_chars);
@@ -1519,11 +1733,9 @@ void serial8250_tx_chars(struct uart_8250_port *up)
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
- if (up->capabilities & UART_CAP_HFIFO) {
- if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
- BOTH_EMPTY)
- break;
- }
+ if ((up->capabilities & UART_CAP_HFIFO) &&
+ (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+ break;
} while (--count > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1752,6 +1964,7 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
/* Wait up to 1s for flow control if necessary */
if (up->port.flags & UPF_CONS_FLOW) {
unsigned int tmout;
+
for (tmout = 1000000; tmout; tmout--) {
unsigned int msr = serial_in(up, UART_MSR);
up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
@@ -1985,23 +2198,23 @@ int serial8250_do_startup(struct uart_port *port)
serial8250_set_mctrl(port, port->mctrl);
- /* Serial over Lan (SoL) hack:
- Intel 8257x Gigabit ethernet chips have a
- 16550 emulation, to be used for Serial Over Lan.
- Those chips take a longer time than a normal
- serial device to signalize that a transmission
- data was queued. Due to that, the above test generally
- fails. One solution would be to delay the reading of
- iir. However, this is not reliable, since the timeout
- is variable. So, let's just don't test if we receive
- TX irq. This way, we'll never enable UART_BUG_TXEN.
+ /*
+ * Serial over Lan (SoL) hack:
+ * Intel 8257x Gigabit ethernet chips have a 16550 emulation, to be
+ * used for Serial Over Lan. Those chips take a longer time than a
+ * normal serial device to signalize that a transmission data was
+ * queued. Due to that, the above test generally fails. One solution
+ * would be to delay the reading of iir. However, this is not
+ * reliable, since the timeout is variable. So, let's just don't
+ * test if we receive TX irq. This way, we'll never enable
+ * UART_BUG_TXEN.
*/
if (up->port.flags & UPF_NO_TXEN_TEST)
goto dont_test_tx_en;
/*
- * Do a quick test to see if we receive an
- * interrupt when we enable the TX irq.
+ * Do a quick test to see if we receive an interrupt when we enable
+ * the TX irq.
*/
serial_port_out(port, UART_IER, UART_IER_THRI);
lsr = serial_port_in(port, UART_LSR);
@@ -2084,8 +2297,12 @@ void serial8250_do_shutdown(struct uart_port *port)
/*
* Disable interrupts from this port
*/
+ spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
serial_port_out(port, UART_IER, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ synchronize_irq(port->irq);
if (up->dma)
serial8250_release_dma(up);
@@ -2251,9 +2468,9 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
serial_port_out(port, 0x2, quot_frac);
}
-static unsigned int
-serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+static unsigned int serial8250_get_baud_rate(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
{
unsigned int tolerance = port->uartclk / 100;
@@ -2270,7 +2487,7 @@ serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios,
void
serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
+ struct ktermios *old)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned char cval;
@@ -2583,8 +2800,7 @@ static int do_get_rxtrig(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
- struct uart_8250_port *up =
- container_of(uport, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(uport);
if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
return -EINVAL;
@@ -2620,8 +2836,7 @@ static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
- struct uart_8250_port *up =
- container_of(uport, struct uart_8250_port, port);
+ struct uart_8250_port *up = up_to_u8250p(uport);
int rxtrig;
if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
@@ -2745,8 +2960,7 @@ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
-static const char *
-serial8250_type(struct uart_port *port)
+static const char *serial8250_type(struct uart_port *port)
{
int type = port->type;
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index bab6b3ae2540..1b7bd26555b7 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -35,7 +35,7 @@ struct uniphier8250_priv {
spinlock_t atomic_write_lock;
};
-#ifdef CONFIG_SERIAL_8250_CONSOLE
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
static int __init uniphier_early_console_setup(struct earlycon_device *device,
const char *options)
{
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index b03cb5175113..64742a086ae3 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -262,7 +262,12 @@ config SERIAL_8250_RSA
bool "Support RSA serial ports"
depends on SERIAL_8250_EXTENDED
help
- ::: To be written :::
+ Say Y here if you have a IODATA RSA-DV II/S ISA card and
+ would like to use its >115kbps speeds.
+ You will need to provide module parameter "probe_rsa", or boot-time
+ parameter 8250.probe_rsa with I/O addresses of this card then.
+
+ If you don't have such card, or if unsure, say N.
config SERIAL_8250_ACORN
tristate "Acorn expansion card serial port support"
@@ -272,6 +277,30 @@ config SERIAL_8250_ACORN
system, say Y to this option. The driver can handle 1, 2, or 3 port
cards. If unsure, say N.
+config SERIAL_8250_BCM2835AUX
+ tristate "BCM2835 auxiliar mini UART support"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on SERIAL_8250 && SERIAL_8250_SHARE_IRQ
+ help
+ Support for the BCM2835 auxiliar mini UART.
+
+ Features and limitations of the UART are
+ Registers are similar to 16650 registers,
+ set bits in the control registers that are unsupported
+ are ignored and read back as 0
+ 7/8 bit operation with 1 start and 1 stop bit
+ 8 symbols deep fifo for rx and tx
+ SW controlled RTS and SW readable CTS
+ Clock rate derived from system clock
+ Uses 8 times oversampling (compared to 16 times for 16650)
+ Missing break detection (but break generation)
+ Missing framing error detection
+ Missing parity bit
+ Missing receive time-out interrupt
+ Missing DCD, DSR, DTR and RI signals
+
+ If unsure, say N.
+
config SERIAL_8250_FSL
bool
depends on SERIAL_8250_CONSOLE
@@ -295,6 +324,7 @@ config SERIAL_8250_EM
config SERIAL_8250_RT288X
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
depends on SERIAL_8250
+ depends on MIPS || COMPILE_TEST
default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
help
Selecting this option will add support for the alternate register
@@ -346,7 +376,7 @@ config SERIAL_8250_LPC18XX
serial port, say Y to this option. If unsure, say Y.
config SERIAL_8250_MT6577
- bool "Mediatek serial port support"
+ tristate "Mediatek serial port support"
depends on SERIAL_8250 && ARCH_MEDIATEK
help
If you have a Mediatek based board and want to use the
@@ -360,9 +390,10 @@ config SERIAL_8250_UNIPHIER
serial ports, say Y to this option. If unsure, say N.
config SERIAL_8250_INGENIC
- bool "Support for Ingenic SoC serial ports"
- depends on OF_FLATTREE
- select LIBFDT
+ tristate "Support for Ingenic SoC serial ports"
+ depends on SERIAL_8250
+ depends on (OF_FLATTREE && SERIAL_8250_CONSOLE) || !SERIAL_EARLYCON
+ depends on MIPS || COMPILE_TEST
help
If you have a system using an Ingenic SoC and wish to make use of
its UARTs, say Y to this option. If unsure, say N.
@@ -378,6 +409,16 @@ config SERIAL_8250_MID
present on the UART found on Intel Medfield SOC and various other
Intel platforms.
+config SERIAL_8250_MOXA
+ tristate "MOXA SmartIO MUE support"
+ depends on SERIAL_8250 && PCI
+ help
+ Say Y here if you have a Moxa SmartIO MUE multiport serial card.
+ If unsure, say N.
+
+ This driver can also be built as a module. The module will be called
+ 8250_moxa. If you want to do that, say M here.
+
config SERIAL_OF_PLATFORM
tristate "Devicetree based probing for 8250 ports"
depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index b9b9bca5b6c3..c9a2d6ed87e9 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o
obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
+obj-$(CONFIG_SERIAL_8250_MOXA) += 8250_moxa.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += 8250_of.o
CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 4d180c9423ef..933c2688dd7e 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -28,7 +28,7 @@
and other provisions required by the GPL. If you do not delete
the provisions above, a recipient may use your version of this
file under either the MPL or the GPL.
-
+
======================================================================*/
#include <linux/module.h>
@@ -257,7 +257,7 @@ static const struct serial_quirk quirks[] = {
};
-static int serial_config(struct pcmcia_device * link);
+static int serial_config(struct pcmcia_device *link);
static void serial_remove(struct pcmcia_device *link)
@@ -309,7 +309,7 @@ static int serial_probe(struct pcmcia_device *link)
dev_dbg(&link->dev, "serial_attach()\n");
/* Create new serial device */
- info = kzalloc(sizeof (*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->p_dev = link;
@@ -339,7 +339,7 @@ static void serial_detach(struct pcmcia_device *link)
/*====================================================================*/
-static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
+static int setup_serial(struct pcmcia_device *handle, struct serial_info *info,
unsigned int iobase, int irq)
{
struct uart_8250_port uart;
@@ -441,16 +441,20 @@ static int simple_config(struct pcmcia_device *link)
struct serial_info *info = link->priv;
int i = -ENODEV, try;
- /* First pass: look for a config entry that looks normal.
- * Two tries: without IO aliases, then with aliases */
+ /*
+ * First pass: look for a config entry that looks normal.
+ * Two tries: without IO aliases, then with aliases.
+ */
link->config_flags |= CONF_AUTO_SET_VPP;
for (try = 0; try < 4; try++)
if (!pcmcia_loop_config(link, simple_config_check, &try))
goto found_port;
- /* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ /*
+ * Second pass: try to find an entry that isn't picky about
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
goto found_port;
@@ -480,8 +484,10 @@ static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data)
if (p_dev->resource[1]->end)
return -EINVAL;
- /* The quad port cards have bad CIS's, so just look for a
- window larger than 8 ports and assume it will be right */
+ /*
+ * The quad port cards have bad CIS's, so just look for a
+ * window larger than 8 ports and assume it will be right.
+ */
if (p_dev->resource[0]->end <= 8)
return -EINVAL;
@@ -527,8 +533,8 @@ static int multi_config(struct pcmcia_device *link)
info->multi = 2;
if (pcmcia_loop_config(link, multi_config_check_notpicky,
&base2)) {
- dev_warn(&link->dev, "no usable port range "
- "found, giving up\n");
+ dev_warn(&link->dev,
+ "no usable port range found, giving up\n");
return -ENODEV;
}
}
@@ -600,7 +606,7 @@ static int serial_check_for_multi(struct pcmcia_device *p_dev, void *priv_data)
}
-static int serial_config(struct pcmcia_device * link)
+static int serial_config(struct pcmcia_device *link)
{
struct serial_info *info = link->priv;
int i;
@@ -623,8 +629,10 @@ static int serial_config(struct pcmcia_device * link)
break;
}
- /* Another check for dual-serial cards: look for either serial or
- multifunction cards that ask for appropriate IO port ranges */
+ /*
+ * Another check for dual-serial cards: look for either serial or
+ * multifunction cards that ask for appropriate IO port ranges.
+ */
if ((info->multi == 0) &&
(link->has_func_id) &&
(link->socket->pcmcia_pfc == 0) &&
@@ -701,7 +709,7 @@ static const struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
- PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001", 0x18df0ba0, 0x831b1064),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
@@ -797,30 +805,30 @@ static const struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
- PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
- PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
- PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
- PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
- PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
- PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
- PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
- PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
- PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
- PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
- PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
- PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
- PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100 1.00.", 0x19ca78af, 0xf964f42b),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232 1.00.", 0x19ca78af, 0x69fb7490),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232", 0x19ca78af, 0xb6bc0235),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232", 0x63f2e0bd, 0xb9e175d3),
+ PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232-5", 0x63f2e0bd, 0xfce33442),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232", 0x3beb8cf2, 0x171e7190),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232-5", 0x3beb8cf2, 0x20da4262),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF428", 0x3beb8cf2, 0xea5dd57d),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF500", 0x3beb8cf2, 0xd77255fa),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: IC232", 0x3beb8cf2, 0x6a709903),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: SL232", 0x3beb8cf2, 0x18430676),
+ PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: XL232", 0x3beb8cf2, 0x6f933767),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial+Parallel Port: SP230", 0x3beb8cf2, 0xdb9e58bc),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029),
+ PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(2, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+ PCMCIA_MFC_DEVICE_PROD_ID12(3, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b),
/* too generic */
/* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 39721ec4f415..13d4ed6caac4 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -610,6 +610,7 @@ config SERIAL_UARTLITE_CONSOLE
bool "Support for console on Xilinx uartlite serial port"
depends on SERIAL_UARTLITE=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Say Y here if you wish to use a Xilinx uartlite as the system
console (the system console is the device which receives all kernel
@@ -732,7 +733,7 @@ config SERIAL_IP22_ZILOG_CONSOLE
config SERIAL_SH_SCI
tristate "SuperH SCI(F) serial port support"
- depends on SUPERH || ARCH_SHMOBILE || H8300 || COMPILE_TEST
+ depends on SUPERH || ARCH_RENESAS || H8300 || COMPILE_TEST
select SERIAL_CORE
config SERIAL_SH_SCI_NR_UARTS
@@ -745,6 +746,12 @@ config SERIAL_SH_SCI_CONSOLE
depends on SERIAL_SH_SCI=y
select SERIAL_CORE_CONSOLE
+config SERIAL_SH_SCI_EARLYCON
+ bool "Support for early console on SuperH SCI(F)"
+ depends on SERIAL_SH_SCI=y
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+
config SERIAL_SH_SCI_DMA
bool "DMA support"
depends on SERIAL_SH_SCI && DMA_ENGINE
@@ -793,17 +800,6 @@ config SERIAL_CORE_CONSOLE
config CONSOLE_POLL
bool
-config SERIAL_68328
- bool "68328 serial support"
- depends on M68328 || M68EZ328 || M68VZ328
- help
- This driver supports the built-in serial port of the Motorola 68328
- (standard, EZ and VZ varieties).
-
-config SERIAL_68328_RTS_CTS
- bool "Support RTS/CTS on 68328 serial port"
- depends on SERIAL_68328
-
config SERIAL_MCF
bool "Coldfire serial support"
depends on COLDFIRE
@@ -1606,6 +1602,28 @@ config SERIAL_STM32_CONSOLE
depends on SERIAL_STM32=y
select SERIAL_CORE_CONSOLE
+config SERIAL_MVEBU_UART
+ bool "Marvell EBU serial port support"
+ select SERIAL_CORE
+ help
+ This driver is for Marvell EBU SoC's UART. If you have a machine
+ based on the Armada-3700 SoC and wish to use the on-board serial
+ port,
+ say 'Y' here.
+ Otherwise, say 'N'.
+
+config SERIAL_MVEBU_CONSOLE
+ bool "Console on Marvell EBU serial port"
+ depends on SERIAL_MVEBU_UART
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+ default y
+ help
+ Say 'Y' here if you wish to use Armada-3700 UART as the system console.
+ (the system console is the device which receives all kernel messages
+ and warnings and which allows logins in single user mode)
+ Otherwise, say 'N'.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index b391c9b31960..8c261adac04e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
obj-$(CONFIG_SERIAL_MUX) += mux.o
-obj-$(CONFIG_SERIAL_68328) += 68328serial.o
obj-$(CONFIG_SERIAL_MCF) += mcf.o
obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
@@ -91,6 +90,7 @@ obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o
obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
+obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c0da0ccbbcf5..7c198e0a3178 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -187,7 +187,7 @@ static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
[REG_DMACR] = ZX_UART011_DMACR,
};
-static struct vendor_data vendor_zte = {
+static struct vendor_data vendor_zte __maybe_unused = {
.reg_offset = pl011_zte_offsets,
.access_32b = true,
.ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
@@ -420,7 +420,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
/* Optionally make use of an RX channel as well */
chan = dma_request_slave_channel(dev, "rx");
- if (!chan && plat->dma_rx_param) {
+ if (!chan && plat && plat->dma_rx_param) {
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
if (!chan) {
@@ -1167,7 +1167,7 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
/* Disable RX and TX DMA */
while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
- barrier();
+ cpu_relax();
spin_lock_irq(&uap->port.lock);
uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
@@ -1611,7 +1611,7 @@ static void pl011_put_poll_char(struct uart_port *port,
container_of(port, struct uart_amba_port, port);
while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
- barrier();
+ cpu_relax();
pl011_write(ch, uap, REG_DR);
}
@@ -1947,6 +1947,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
lcr_h |= UART01x_LCRH_PEN;
if (!(termios->c_cflag & PARODD))
lcr_h |= UART01x_LCRH_EPS;
+ if (termios->c_cflag & CMSPAR)
+ lcr_h |= UART011_LCRH_SPS;
}
if (uap->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
@@ -2150,7 +2152,7 @@ static void pl011_console_putchar(struct uart_port *port, int ch)
container_of(port, struct uart_amba_port, port);
while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
- barrier();
+ cpu_relax();
pl011_write(ch, uap, REG_DR);
}
@@ -2158,7 +2160,7 @@ static void
pl011_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_amba_port *uap = amba_ports[co->index];
- unsigned int status, old_cr = 0, new_cr;
+ unsigned int old_cr = 0, new_cr;
unsigned long flags;
int locked = 1;
@@ -2188,9 +2190,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore the TCR
*/
- do {
- status = pl011_read(uap, REG_FR);
- } while (status & UART01x_FR_BUSY);
+ while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
+ cpu_relax();
if (!uap->vendor->always_enabled)
pl011_write(old_cr, uap, REG_CR);
@@ -2302,13 +2303,13 @@ static struct console amba_console = {
static void pl011_putc(struct uart_port *port, int c)
{
while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
- ;
+ cpu_relax();
if (port->iotype == UPIO_MEM32)
writel(c, port->membase + UART01x_DR);
else
writeb(c, port->membase + UART01x_DR);
while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
- ;
+ cpu_relax();
}
static void pl011_early_write(struct console *con, const char *s, unsigned n)
@@ -2327,7 +2328,6 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
device->con->write = pl011_early_write;
return 0;
}
-EARLYCON_DECLARE(pl011, pl011_early_console_setup);
OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
#else
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 03ebe401fff7..3a1de5c87cb4 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -576,7 +576,6 @@ static int __init arc_early_console_setup(struct earlycon_device *dev,
dev->con->write = arc_early_serial_write;
return 0;
}
-EARLYCON_DECLARE(arc_uart, arc_early_console_setup);
OF_EARLYCON_DECLARE(arc_uart, "snps,arc-uart", arc_early_console_setup);
#endif /* CONFIG_SERIAL_ARC_CONSOLE */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 1c0884d8ef32..d9439e6ab719 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -159,8 +159,9 @@ struct atmel_uart_port {
u32 rts_high;
u32 rts_low;
bool ms_irq_enabled;
- bool is_usart; /* usart or uart */
- struct timer_list uart_timer; /* uart timer */
+ u32 rtor; /* address of receiver timeout register if it exists */
+ bool has_hw_timer;
+ struct timer_list uart_timer;
bool suspended;
unsigned int pending;
@@ -1710,19 +1711,24 @@ static void atmel_get_ip_name(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = atmel_uart_readl(port, ATMEL_US_NAME);
u32 version;
- int usart, uart;
- /* usart and uart ascii */
- usart = 0x55534152;
- uart = 0x44424755;
-
- atmel_port->is_usart = false;
-
- if (name == usart) {
- dev_dbg(port->dev, "This is usart\n");
- atmel_port->is_usart = true;
- } else if (name == uart) {
- dev_dbg(port->dev, "This is uart\n");
- atmel_port->is_usart = false;
+ u32 usart, dbgu_uart, new_uart;
+ /* ASCII decoding for IP version */
+ usart = 0x55534152; /* USAR(T) */
+ dbgu_uart = 0x44424755; /* DBGU */
+ new_uart = 0x55415254; /* UART */
+
+ atmel_port->has_hw_timer = false;
+
+ if (name == new_uart) {
+ dev_dbg(port->dev, "Uart with hw timer");
+ atmel_port->has_hw_timer = true;
+ atmel_port->rtor = ATMEL_UA_RTOR;
+ } else if (name == usart) {
+ dev_dbg(port->dev, "Usart\n");
+ atmel_port->has_hw_timer = true;
+ atmel_port->rtor = ATMEL_US_RTOR;
+ } else if (name == dbgu_uart) {
+ dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
} else {
/* fallback for older SoCs: use version field */
version = atmel_uart_readl(port, ATMEL_US_VERSION);
@@ -1730,12 +1736,12 @@ static void atmel_get_ip_name(struct uart_port *port)
case 0x302:
case 0x10213:
dev_dbg(port->dev, "This version is usart\n");
- atmel_port->is_usart = true;
+ atmel_port->has_hw_timer = true;
+ atmel_port->rtor = ATMEL_US_RTOR;
break;
case 0x203:
case 0x10202:
dev_dbg(port->dev, "This version is uart\n");
- atmel_port->is_usart = false;
break;
default:
dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
@@ -1835,12 +1841,13 @@ static int atmel_startup(struct uart_port *port)
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
- if (!atmel_port->is_usart) {
+ if (!atmel_port->has_hw_timer) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
- atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+ atmel_uart_writel(port, atmel_port->rtor,
+ PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
@@ -1850,12 +1857,13 @@ static int atmel_startup(struct uart_port *port)
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
} else if (atmel_use_dma_rx(port)) {
/* set UART timeout */
- if (!atmel_port->is_usart) {
+ if (!atmel_port->has_hw_timer) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
- atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+ atmel_uart_writel(port, atmel_port->rtor,
+ PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
@@ -2478,13 +2486,13 @@ static int __init atmel_console_init(void)
struct atmel_uart_data *pdata =
dev_get_platdata(&atmel_default_console_device->dev);
int id = pdata->num;
- struct atmel_uart_port *port = &atmel_ports[id];
+ struct atmel_uart_port *atmel_port = &atmel_ports[id];
- port->backup_imr = 0;
- port->uart.line = id;
+ atmel_port->backup_imr = 0;
+ atmel_port->uart.line = id;
add_preferred_console(ATMEL_DEVICENAME, id, NULL);
- ret = atmel_init_port(port, atmel_default_console_device);
+ ret = atmel_init_port(atmel_port, atmel_default_console_device);
if (ret)
return ret;
register_console(&atmel_console);
@@ -2599,23 +2607,23 @@ static int atmel_serial_resume(struct platform_device *pdev)
#define atmel_serial_resume NULL
#endif
-static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
+static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
- port->fifo_size = 0;
- port->rts_low = 0;
- port->rts_high = 0;
+ atmel_port->fifo_size = 0;
+ atmel_port->rts_low = 0;
+ atmel_port->rts_high = 0;
if (of_property_read_u32(pdev->dev.of_node,
"atmel,fifo-size",
- &port->fifo_size))
+ &atmel_port->fifo_size))
return;
- if (!port->fifo_size)
+ if (!atmel_port->fifo_size)
return;
- if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
- port->fifo_size = 0;
+ if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
+ atmel_port->fifo_size = 0;
dev_err(&pdev->dev, "Invalid FIFO size\n");
return;
}
@@ -2628,22 +2636,22 @@ static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
* Threshold to a reasonably high value respecting this 16 data
* empirical rule when possible.
*/
- port->rts_high = max_t(int, port->fifo_size >> 1,
- port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
- port->rts_low = max_t(int, port->fifo_size >> 2,
- port->fifo_size - ATMEL_RTS_LOW_OFFSET);
+ atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
+ atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
+ atmel_port->rts_low = max_t(int, atmel_port->fifo_size >> 2,
+ atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
dev_info(&pdev->dev, "Using FIFO (%u data)\n",
- port->fifo_size);
+ atmel_port->fifo_size);
dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
- port->rts_high);
+ atmel_port->rts_high);
dev_dbg(&pdev->dev, "RTS Low Threshold : %2u data\n",
- port->rts_low);
+ atmel_port->rts_low);
}
static int atmel_serial_probe(struct platform_device *pdev)
{
- struct atmel_uart_port *port;
+ struct atmel_uart_port *atmel_port;
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
void *data;
@@ -2674,99 +2682,133 @@ static int atmel_serial_probe(struct platform_device *pdev)
goto err;
}
- port = &atmel_ports[ret];
- port->backup_imr = 0;
- port->uart.line = ret;
- atmel_serial_probe_fifos(port, pdev);
+ atmel_port = &atmel_ports[ret];
+ atmel_port->backup_imr = 0;
+ atmel_port->uart.line = ret;
+ atmel_serial_probe_fifos(atmel_port, pdev);
- spin_lock_init(&port->lock_suspended);
+ spin_lock_init(&atmel_port->lock_suspended);
- ret = atmel_init_port(port, pdev);
+ ret = atmel_init_port(atmel_port, pdev);
if (ret)
goto err_clear_bit;
- port->gpios = mctrl_gpio_init(&port->uart, 0);
- if (IS_ERR(port->gpios)) {
- ret = PTR_ERR(port->gpios);
+ atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
+ if (IS_ERR(atmel_port->gpios)) {
+ ret = PTR_ERR(atmel_port->gpios);
goto err_clear_bit;
}
- if (!atmel_use_pdc_rx(&port->uart)) {
+ if (!atmel_use_pdc_rx(&atmel_port->uart)) {
ret = -ENOMEM;
data = kmalloc(sizeof(struct atmel_uart_char)
* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
if (!data)
goto err_alloc_ring;
- port->rx_ring.buf = data;
+ atmel_port->rx_ring.buf = data;
}
- rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
+ rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
- ret = uart_add_one_port(&atmel_uart, &port->uart);
+ ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
if (ret)
goto err_add_port;
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
- if (atmel_is_console_port(&port->uart)
+ if (atmel_is_console_port(&atmel_port->uart)
&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
/*
* The serial core enabled the clock for us, so undo
* the clk_prepare_enable() in atmel_console_setup()
*/
- clk_disable_unprepare(port->clk);
+ clk_disable_unprepare(atmel_port->clk);
}
#endif
device_init_wakeup(&pdev->dev, 1);
- platform_set_drvdata(pdev, port);
+ platform_set_drvdata(pdev, atmel_port);
/*
* The peripheral clock has been disabled by atmel_init_port():
* enable it before accessing I/O registers
*/
- clk_prepare_enable(port->clk);
+ clk_prepare_enable(atmel_port->clk);
if (rs485_enabled) {
- atmel_uart_writel(&port->uart, ATMEL_US_MR,
+ atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
ATMEL_US_USMODE_NORMAL);
- atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
+ atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
+ ATMEL_US_RTSEN);
}
/*
* Get port name of usart or uart
*/
- atmel_get_ip_name(&port->uart);
+ atmel_get_ip_name(&atmel_port->uart);
/*
* The peripheral clock can now safely be disabled till the port
* is used
*/
- clk_disable_unprepare(port->clk);
+ clk_disable_unprepare(atmel_port->clk);
return 0;
err_add_port:
- kfree(port->rx_ring.buf);
- port->rx_ring.buf = NULL;
+ kfree(atmel_port->rx_ring.buf);
+ atmel_port->rx_ring.buf = NULL;
err_alloc_ring:
- if (!atmel_is_console_port(&port->uart)) {
- clk_put(port->clk);
- port->clk = NULL;
+ if (!atmel_is_console_port(&atmel_port->uart)) {
+ clk_put(atmel_port->clk);
+ atmel_port->clk = NULL;
}
err_clear_bit:
- clear_bit(port->uart.line, atmel_ports_in_use);
+ clear_bit(atmel_port->uart.line, atmel_ports_in_use);
err:
return ret;
}
+/*
+ * Even if the driver is not modular, it makes sense to be able to
+ * unbind a device: there can be many bound devices, and there are
+ * situations where dynamic binding and unbinding can be useful.
+ *
+ * For example, a connected device can require a specific firmware update
+ * protocol that needs bitbanging on IO lines, but use the regular serial
+ * port in the normal case.
+ */
+static int atmel_serial_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int ret = 0;
+
+ tasklet_kill(&atmel_port->tasklet);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ ret = uart_remove_one_port(&atmel_uart, port);
+
+ kfree(atmel_port->rx_ring.buf);
+
+ /* "port" is allocated statically, so we shouldn't free it */
+
+ clear_bit(port->line, atmel_ports_in_use);
+
+ clk_put(atmel_port->clk);
+ atmel_port->clk = NULL;
+
+ return ret;
+}
+
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
+ .remove = atmel_serial_remove,
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
- .suppress_bind_attrs = true,
},
};
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index b3a4e0cdddaa..5beafd2d2218 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -450,6 +450,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
struct clps711x_port *s;
struct resource *res;
struct clk *uart_clk;
+ int irq;
if (index < 0 || index >= UART_CLPS711X_NR)
return -EINVAL;
@@ -467,12 +468,13 @@ static int uart_clps711x_probe(struct platform_device *pdev)
if (IS_ERR(s->port.membase))
return PTR_ERR(s->port.membase);
- s->port.irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(s->port.irq))
- return s->port.irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ s->port.irq = irq;
s->rx_irq = platform_get_irq(pdev, 1);
- if (IS_ERR_VALUE(s->rx_irq))
+ if (s->rx_irq < 0)
return s->rx_irq;
if (!np) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index f13f2ebd215b..c0172bf54a9b 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -1413,9 +1413,8 @@ rs_stop(struct tty_struct *tty)
xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
STOP_CHAR(info->port.tty));
xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
- if (tty->termios.c_iflag & IXON ) {
+ if (I_IXON(tty))
xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
- }
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
local_irq_restore(flags);
@@ -1436,9 +1435,8 @@ rs_start(struct tty_struct *tty)
info->xmit.tail,SERIAL_XMIT_SIZE)));
xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
- if (tty->termios.c_iflag & IXON ) {
+ if (I_IXON(tty))
xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
- }
*((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
if (!info->uses_dma_out &&
@@ -2968,7 +2966,7 @@ static int rs_raw_write(struct tty_struct *tty,
local_save_flags(flags);
DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
- DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
+ DFLOW(DEBUG_LOG(info->line, "ldisc\n"));
/* The local_irq_disable/restore_flags pairs below are needed
@@ -3161,13 +3159,12 @@ rs_throttle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
- printk("throttle %s: %lu....\n", tty_name(tty),
- (unsigned long)tty->ldisc.chars_in_buffer(tty));
+ printk("throttle %s ....\n", tty_name(tty));
#endif
- DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
+ DFLOW(DEBUG_LOG(info->line,"rs_throttle\n"));
/* Do RTS before XOFF since XOFF might take some time */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
/* Turn off RTS line */
e100_rts(info, 0);
}
@@ -3181,13 +3178,12 @@ rs_unthrottle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
- printk("unthrottle %s: %lu....\n", tty_name(tty),
- (unsigned long)tty->ldisc.chars_in_buffer(tty));
+ printk("unthrottle %s ....\n", tty_name(tty));
#endif
- DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
+ DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc\n"));
DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
/* Do RTS before XOFF since XOFF might take some time */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
/* Assert RTS line */
e100_rts(info, 1);
}
@@ -3555,8 +3551,7 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
change_speed(info);
/* Handle turning off CRTSCTS */
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios.c_cflag & CRTSCTS))
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
rs_start(tty);
}
@@ -3615,7 +3610,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
local_irq_restore(flags);
return;
}
- info->port.flags |= ASYNC_CLOSING;
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
@@ -3654,7 +3648,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
schedule_timeout_interruptible(info->port.close_delay);
wake_up_interruptible(&info->port.open_wait);
}
- info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+ info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
local_irq_restore(flags);
/* port closed */
@@ -3767,9 +3761,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
return 0;
}
- if (tty->termios.c_cflag & CLOCAL) {
- do_clocal = 1;
- }
+ if (C_CLOCAL(tty))
+ do_clocal = 1;
/*
* Block waiting for the carrier detect and the line to become
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index a80cdad114f3..02ad6953b167 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -453,7 +453,7 @@ static struct uart_driver digicolor_uart = {
static int digicolor_uart_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- int ret, index;
+ int irq, ret, index;
struct digicolor_port *dp;
struct resource *res;
struct clk *uart_clk;
@@ -481,9 +481,10 @@ static int digicolor_uart_probe(struct platform_device *pdev)
if (IS_ERR(dp->port.membase))
return PTR_ERR(dp->port.membase);
- dp->port.irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(dp->port.irq))
- return dp->port.irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ dp->port.irq = irq;
dp->port.iotype = UPIO_MEM;
dp->port.uartclk = clk_get_rate(uart_clk);
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 3f2423690d01..067783f0523c 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -19,7 +19,8 @@
#include <linux/io.h>
#include <linux/serial_core.h>
#include <linux/sizes.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#ifdef CONFIG_FIX_EARLYCON_MEM
#include <asm/fixmap.h>
@@ -28,22 +29,15 @@
#include <asm/serial.h>
static struct console early_con = {
- .name = "uart", /* 8250 console switch requires this name */
+ .name = "uart", /* fixed up at earlycon registration */
.flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
+ .index = 0,
};
static struct earlycon_device early_console_dev = {
.con = &early_con,
};
-extern struct earlycon_id __earlycon_table[];
-static const struct earlycon_id __earlycon_table_sentinel
- __used __section(__earlycon_table_end);
-
-static const struct of_device_id __earlycon_of_table_sentinel
- __used __section(__earlycon_of_table_end);
-
static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
{
void __iomem *base;
@@ -61,6 +55,39 @@ static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
return base;
}
+static void __init earlycon_init(struct earlycon_device *device,
+ const char *name)
+{
+ struct console *earlycon = device->con;
+ struct uart_port *port = &device->port;
+ const char *s;
+ size_t len;
+
+ /* scan backwards from end of string for first non-numeral */
+ for (s = name + strlen(name);
+ s > name && s[-1] >= '0' && s[-1] <= '9';
+ s--)
+ ;
+ if (*s)
+ earlycon->index = simple_strtoul(s, NULL, 10);
+ len = s - name;
+ strlcpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
+ earlycon->data = &early_console_dev;
+
+ if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 ||
+ port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE)
+ pr_info("%s%d at MMIO%s %pa (options '%s')\n",
+ earlycon->name, earlycon->index,
+ (port->iotype == UPIO_MEM) ? "" :
+ (port->iotype == UPIO_MEM16) ? "16" :
+ (port->iotype == UPIO_MEM32) ? "32" : "32be",
+ &port->mapbase, device->options);
+ else
+ pr_info("%s%d at I/O port 0x%lx (options '%s')\n",
+ earlycon->name, earlycon->index,
+ port->iobase, device->options);
+}
+
static int __init parse_options(struct earlycon_device *device, char *options)
{
struct uart_port *port = &device->port;
@@ -97,19 +124,6 @@ static int __init parse_options(struct earlycon_device *device, char *options)
strlcpy(device->options, options, length);
}
- if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 ||
- port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE)
- pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
- (port->iotype == UPIO_MEM) ? "" :
- (port->iotype == UPIO_MEM16) ? "16" :
- (port->iotype == UPIO_MEM32) ? "32" : "32be",
- (unsigned long long)port->mapbase,
- device->options);
- else
- pr_info("Early serial console at I/O port 0x%lx (options '%s')\n",
- port->iobase,
- device->options);
-
return 0;
}
@@ -127,7 +141,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
if (port->mapbase)
port->membase = earlycon_map(port->mapbase, 64);
- early_console_dev.con->data = &early_console_dev;
+ earlycon_init(&early_console_dev, match->name);
err = match->setup(&early_console_dev, buf);
if (err < 0)
return err;
@@ -166,7 +180,7 @@ int __init setup_earlycon(char *buf)
if (early_con.flags & CON_ENABLED)
return -EALREADY;
- for (match = __earlycon_table; match->name[0]; match++) {
+ for (match = __earlycon_table; match < __earlycon_table_end; match++) {
size_t len = strlen(match->name);
if (strncmp(buf, match->name, len))
@@ -204,20 +218,62 @@ static int __init param_setup_earlycon(char *buf)
}
early_param("earlycon", param_setup_earlycon);
-int __init of_setup_earlycon(unsigned long addr,
- int (*setup)(struct earlycon_device *, const char *))
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+int __init of_setup_earlycon(const struct earlycon_id *match,
+ unsigned long node,
+ const char *options)
{
int err;
struct uart_port *port = &early_console_dev.port;
+ const __be32 *val;
+ bool big_endian;
+ u64 addr;
spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
+ addr = of_flat_dt_translate_address(node);
+ if (addr == OF_BAD_ADDR) {
+ pr_warn("[%s] bad address\n", match->name);
+ return -ENXIO;
+ }
port->mapbase = addr;
port->uartclk = BASE_BAUD * 16;
- port->membase = earlycon_map(addr, SZ_4K);
+ port->membase = earlycon_map(port->mapbase, SZ_4K);
+
+ val = of_get_flat_dt_prop(node, "reg-offset", NULL);
+ if (val)
+ port->mapbase += be32_to_cpu(*val);
+ val = of_get_flat_dt_prop(node, "reg-shift", NULL);
+ if (val)
+ port->regshift = be32_to_cpu(*val);
+ big_endian = of_get_flat_dt_prop(node, "big-endian", NULL) != NULL ||
+ (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ of_get_flat_dt_prop(node, "native-endian", NULL) != NULL);
+ val = of_get_flat_dt_prop(node, "reg-io-width", NULL);
+ if (val) {
+ switch (be32_to_cpu(*val)) {
+ case 1:
+ port->iotype = UPIO_MEM;
+ break;
+ case 2:
+ port->iotype = UPIO_MEM16;
+ break;
+ case 4:
+ port->iotype = (big_endian) ? UPIO_MEM32BE : UPIO_MEM32;
+ break;
+ default:
+ pr_warn("[%s] unsupported reg-io-width\n", match->name);
+ return -EINVAL;
+ }
+ }
- early_console_dev.con->data = &early_console_dev;
- err = setup(&early_console_dev, NULL);
+ if (options) {
+ strlcpy(early_console_dev.options, options,
+ sizeof(early_console_dev.options));
+ }
+ earlycon_init(&early_console_dev, match->name);
+ err = match->setup(&early_console_dev, options);
if (err < 0)
return err;
if (!early_console_dev.con->write)
@@ -227,3 +283,5 @@ int __init of_setup_earlycon(unsigned long addr,
register_console(early_console_dev.con);
return 0;
}
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 88246f7e435a..2085a6cfa44b 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -395,8 +395,10 @@ static int ifx_spi_decode_spi_header(unsigned char *buffer, int *length,
if (h1 == 0 && h2 == 0) {
*received_cts = 0;
+ *more = 0;
return IFX_SPI_HEADER_0;
} else if (h1 == 0xffff && h2 == 0xffff) {
+ *more = 0;
/* spi_slave_cts remains as it was */
return IFX_SPI_HEADER_F;
}
@@ -688,6 +690,7 @@ static void ifx_spi_complete(void *ctx)
ifx_dev->rx_buffer + IFX_SPI_HEADER_OVERHEAD,
(size_t)actual_length);
} else {
+ more = 0;
dev_dbg(&ifx_dev->spi_dev->dev, "SPI transfer error %d",
ifx_dev->spi_msg.status);
}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9362f54c816c..231e7d5caf6c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2166,7 +2166,8 @@ static int imx_serial_port_suspend(struct device *dev)
uart_suspend_port(&imx_reg, &sport->port);
- return 0;
+ /* Needed to enable clock in suspend_noirq */
+ return clk_prepare(sport->clk_ipg);
}
static int imx_serial_port_resume(struct device *dev)
@@ -2179,6 +2180,8 @@ static int imx_serial_port_resume(struct device *dev)
uart_resume_port(&imx_reg, &sport->port);
+ clk_unprepare(sport->clk_ipg);
+
return 0;
}
diff --git a/drivers/tty/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
index 524e86ab3cae..c5ddfe542451 100644
--- a/drivers/tty/serial/jsm/jsm_tty.c
+++ b/drivers/tty/serial/jsm/jsm_tty.c
@@ -529,7 +529,6 @@ void jsm_input(struct jsm_channel *ch)
int data_len;
unsigned long lock_flags;
int len = 0;
- int n = 0;
int s = 0;
int i = 0;
@@ -569,8 +568,7 @@ void jsm_input(struct jsm_channel *ch)
*If the device is not open, or CREAD is off, flush
*input data and return immediately.
*/
- if (!tp ||
- !(tp->termios.c_cflag & CREAD) ) {
+ if (!tp || !C_CREAD(tp)) {
jsm_dbg(READ, &ch->ch_bd->pci_dev,
"input. dropping %d bytes on port %d...\n",
@@ -598,16 +596,15 @@ void jsm_input(struct jsm_channel *ch)
jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n");
len = tty_buffer_request_room(port, data_len);
- n = len;
/*
- * n now contains the most amount of data we can copy,
+ * len now contains the most amount of data we can copy,
* bounded either by the flip buffer size or the amount
* of data the card actually has pending...
*/
- while (n) {
+ while (len) {
s = ((head >= tail) ? head : RQUEUESIZE) - tail;
- s = min(s, n);
+ s = min(s, len);
if (s <= 0)
break;
@@ -638,7 +635,7 @@ void jsm_input(struct jsm_channel *ch)
tty_insert_flip_string(port, ch->ch_rqueue + tail, s);
}
tail += s;
- n -= s;
+ len -= s;
/* Flip queue if needed */
tail &= rmask;
}
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 0eeb64f2499c..68765f7c2645 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -47,59 +47,26 @@
#define BAUD_RATE 115200
#include <linux/serial_core.h>
-#include "m32r_sio.h"
#include "m32r_sio_reg.h"
-/*
- * Debugging.
- */
-#if 0
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#if 0
-#define DEBUG_INTR(fmt...) printk(fmt)
-#else
-#define DEBUG_INTR(fmt...) do { } while (0)
-#endif
-
#define PASS_LIMIT 256
-#define BASE_BAUD 115200
-
/* Standard COM flags */
#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
-/*
- * SERIAL_PORT_DFNS tells us about built-in ports that have no
- * standard enumeration mechanism. Platforms that can find all
- * serial ports via mechanisms like ACPI or PCI need not supply it.
- */
+static const struct {
+ unsigned int port;
+ unsigned int irq;
+} old_serial_port[] = {
#if defined(CONFIG_PLAT_USRV)
-
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, PLD_IRQ_UART0, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, PLD_IRQ_UART1, STD_COM_FLAGS }, /* ttyS1 */
-
-#else /* !CONFIG_PLAT_USRV */
-
-#if defined(CONFIG_SERIAL_M32R_PLDSIO)
-#define SERIAL_PORT_DFNS \
- { 0, BASE_BAUD, ((unsigned long)PLD_ESIO0CR), PLD_IRQ_SIO0_RCV, \
- STD_COM_FLAGS }, /* ttyS0 */
+ /* PORT IRQ FLAGS */
+ { 0x3F8, PLD_IRQ_UART0 }, /* ttyS0 */
+ { 0x2F8, PLD_IRQ_UART1 }, /* ttyS1 */
+#elif defined(CONFIG_SERIAL_M32R_PLDSIO)
+ { ((unsigned long)PLD_ESIO0CR), PLD_IRQ_SIO0_RCV }, /* ttyS0 */
#else
-#define SERIAL_PORT_DFNS \
- { 0, BASE_BAUD, M32R_SIO_OFFSET, M32R_IRQ_SIO0_R, \
- STD_COM_FLAGS }, /* ttyS0 */
+ { M32R_SIO_OFFSET, M32R_IRQ_SIO0_R }, /* ttyS0 */
#endif
-
-#endif /* !CONFIG_PLAT_USRV */
-
-static struct old_serial_port old_serial_port[] = {
- SERIAL_PORT_DFNS
};
#define UART_NR ARRAY_SIZE(old_serial_port)
@@ -108,19 +75,7 @@ struct uart_sio_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
- unsigned short rev;
- unsigned char acr;
unsigned char ier;
- unsigned char lcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char lsr_break_flag;
-
- /*
- * We provide a per-port pm hook.
- */
- void (*pm)(struct uart_port *port,
- unsigned int state, unsigned int old);
};
struct irq_info {
@@ -345,14 +300,8 @@ static void receive_chars(struct uart_sio_port *up, int *status)
*/
*status &= up->port.read_status_mask;
- if (up->port.line == up->port.cons->index) {
- /* Recover the break flag from console xmit */
- *status |= up->lsr_break_flag;
- up->lsr_break_flag = 0;
- }
-
if (*status & UART_LSR_BI) {
- DEBUG_INTR("handling break....");
+ pr_debug("handling break....\n");
flag = TTY_BREAK;
} else if (*status & UART_LSR_PE)
flag = TTY_PARITY;
@@ -413,7 +362,7 @@ static void transmit_chars(struct uart_sio_port *up)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&up->port);
- DEBUG_INTR("THRE...");
+ pr_debug("THRE...\n");
if (uart_circ_empty(xmit))
m32r_sio_stop_tx(&up->port);
@@ -425,7 +374,7 @@ static void transmit_chars(struct uart_sio_port *up)
static inline void m32r_sio_handle_port(struct uart_sio_port *up,
unsigned int status)
{
- DEBUG_INTR("status = %x...", status);
+ pr_debug("status = %x...\n", status);
if (status & 0x04)
receive_chars(up, &status);
@@ -453,7 +402,7 @@ static irqreturn_t m32r_sio_interrupt(int irq, void *dev_id)
struct list_head *l, *end = NULL;
int pass_counter = 0;
- DEBUG_INTR("m32r_sio_interrupt(%d)...", irq);
+ pr_debug("m32r_sio_interrupt(%d)...\n", irq);
#ifdef CONFIG_SERIAL_M32R_PLDSIO
// if (irq == PLD_IRQ_SIO0_SND)
@@ -493,7 +442,7 @@ static irqreturn_t m32r_sio_interrupt(int irq, void *dev_id)
spin_unlock(&i->lock);
- DEBUG_INTR("end.\n");
+ pr_debug("end.\n");
return IRQ_HANDLED;
}
@@ -782,20 +731,9 @@ static void m32r_sio_set_termios(struct uart_port *port,
serial_out(up, UART_IER, up->ier);
- up->lcr = cval; /* Save LCR */
spin_unlock_irqrestore(&up->port.lock, flags);
}
-static void m32r_sio_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate)
-{
- struct uart_sio_port *up =
- container_of(port, struct uart_sio_port, port);
-
- if (up->pm)
- up->pm(port, state, oldstate);
-}
-
/*
* Resource handling. This is complicated by the fact that resources
* depend on the port type. Maybe we should be claiming the standard
@@ -932,7 +870,6 @@ static struct uart_ops m32r_sio_pops = {
.startup = m32r_sio_startup,
.shutdown = m32r_sio_shutdown,
.set_termios = m32r_sio_set_termios,
- .pm = m32r_sio_pm,
.release_port = m32r_sio_release_port,
.request_port = m32r_sio_request_port,
.config_port = m32r_sio_config_port,
@@ -951,15 +888,14 @@ static void __init m32r_sio_init_ports(void)
return;
first = 0;
- for (i = 0, up = m32r_sio_ports; i < ARRAY_SIZE(old_serial_port);
- i++, up++) {
+ for (i = 0, up = m32r_sio_ports; i < UART_NR; i++, up++) {
up->port.iobase = old_serial_port[i].port;
up->port.irq = irq_canonicalize(old_serial_port[i].irq);
- up->port.uartclk = old_serial_port[i].baud_base * 16;
- up->port.flags = old_serial_port[i].flags;
- up->port.membase = old_serial_port[i].iomem_base;
- up->port.iotype = old_serial_port[i].io_type;
- up->port.regshift = old_serial_port[i].iomem_reg_shift;
+ up->port.uartclk = BAUD_RATE * 16;
+ up->port.flags = STD_COM_FLAGS;
+ up->port.membase = 0;
+ up->port.iotype = 0;
+ up->port.regshift = 0;
up->port.ops = &m32r_sio_pops;
}
}
@@ -978,9 +914,6 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
init_timer(&up->timer);
up->timer.function = m32r_sio_timeout;
- up->mcr_mask = ~0;
- up->mcr_force = 0;
-
uart_add_one_port(drv, &up->port);
}
}
@@ -1112,28 +1045,6 @@ static struct uart_driver m32r_sio_reg = {
.cons = M32R_SIO_CONSOLE,
};
-/**
- * m32r_sio_suspend_port - suspend one serial port
- * @line: serial line number
- *
- * Suspend one serial port.
- */
-void m32r_sio_suspend_port(int line)
-{
- uart_suspend_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
-}
-
-/**
- * m32r_sio_resume_port - resume one serial port
- * @line: serial line number
- *
- * Resume one serial port.
- */
-void m32r_sio_resume_port(int line)
-{
- uart_resume_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
-}
-
static int __init m32r_sio_init(void)
{
int ret, i;
@@ -1163,8 +1074,5 @@ static void __exit m32r_sio_exit(void)
module_init(m32r_sio_init);
module_exit(m32r_sio_exit);
-EXPORT_SYMBOL(m32r_sio_suspend_port);
-EXPORT_SYMBOL(m32r_sio_resume_port);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic M32R SIO serial driver");
diff --git a/drivers/tty/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
deleted file mode 100644
index 8129824496c6..000000000000
--- a/drivers/tty/serial/m32r_sio.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * m32r_sio.h
- *
- * Driver for M32R serial ports
- *
- * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- * Based on drivers/serial/8250.h.
- *
- * Copyright (C) 2001 Russell King.
- * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/pci.h>
-
-struct m32r_sio_probe {
- struct module *owner;
- int (*pci_init_one)(struct pci_dev *dev);
- void (*pci_remove_one)(struct pci_dev *dev);
- void (*pnp_init)(void);
-};
-
-int m32r_sio_register_probe(struct m32r_sio_probe *probe);
-void m32r_sio_unregister_probe(struct m32r_sio_probe *probe);
-void m32r_sio_get_irq_map(unsigned int *map);
-void m32r_sio_suspend_port(int line);
-void m32r_sio_resume_port(int line);
-
-struct old_serial_port {
- unsigned int uart;
- unsigned int baud_base;
- unsigned int port;
- unsigned int irq;
- unsigned int flags;
- unsigned char io_type;
- unsigned char __iomem *iomem_base;
- unsigned short iomem_reg_shift;
-};
-
-#define _INLINE_ inline
-
-#define PROBE_RSA (1 << 0)
-#define PROBE_ANY (~0)
-
-#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index b12a37bd37b6..024445aa0521 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -78,6 +78,7 @@
/* AML_UART_REG5 bits */
#define AML_UART_BAUD_MASK 0x7fffff
#define AML_UART_BAUD_USE BIT(23)
+#define AML_UART_BAUD_XTAL BIT(24)
#define AML_UART_PORT_NUM 6
#define AML_UART_DEV_NAME "ttyAML"
@@ -299,7 +300,12 @@ static void meson_uart_change_speed(struct uart_port *port, unsigned long baud)
val = readl(port->membase + AML_UART_REG5);
val &= ~AML_UART_BAUD_MASK;
- val = ((port->uartclk * 10 / (baud * 4) + 5) / 10) - 1;
+ if (port->uartclk == 24000000) {
+ val = ((port->uartclk / 3) / baud) - 1;
+ val |= AML_UART_BAUD_XTAL;
+ } else {
+ val = ((port->uartclk * 10 / (baud * 4) + 5) / 10) - 1;
+ }
val |= AML_UART_BAUD_USE;
writel(val, port->membase + AML_UART_REG5);
}
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 8c3e51314470..3970d6a9aaca 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -346,7 +346,7 @@ static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port)
return mpc5xxx_uart_process_int(port);
}
-static struct psc_ops mpc52xx_psc_ops = {
+static const struct psc_ops mpc52xx_psc_ops = {
.fifo_init = mpc52xx_psc_fifo_init,
.raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
.raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
@@ -376,7 +376,7 @@ static struct psc_ops mpc52xx_psc_ops = {
.get_mr1 = mpc52xx_psc_get_mr1,
};
-static struct psc_ops mpc5200b_psc_ops = {
+static const struct psc_ops mpc5200b_psc_ops = {
.fifo_init = mpc52xx_psc_fifo_init,
.raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
.raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
@@ -969,7 +969,7 @@ static u8 mpc5125_psc_get_mr1(struct uart_port *port)
return in_8(&PSC_5125(port)->mr1);
}
-static struct psc_ops mpc5125_psc_ops = {
+static const struct psc_ops mpc5125_psc_ops = {
.fifo_init = mpc5125_psc_fifo_init,
.raw_rx_rdy = mpc5125_psc_raw_rx_rdy,
.raw_tx_rdy = mpc5125_psc_raw_tx_rdy,
@@ -1004,7 +1004,7 @@ static struct psc_ops mpc5125_psc_ops = {
.get_mr1 = mpc5125_psc_get_mr1,
};
-static struct psc_ops mpc512x_psc_ops = {
+static const struct psc_ops mpc512x_psc_ops = {
.fifo_init = mpc512x_psc_fifo_init,
.raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
.raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index cadfd1cfae2b..4a3021bcc859 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -137,8 +137,6 @@ struct mpsc_port_info {
/* Internal driver state for this ctlr */
u8 ready;
u8 rcv_data;
- tcflag_t c_iflag; /* save termios->c_iflag */
- tcflag_t c_cflag; /* save termios->c_cflag */
/* Info passed in from platform */
u8 mirror_regs; /* Need to mirror regs? */
@@ -1407,9 +1405,6 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
ulong flags;
u32 chr_bits, stop_bits, par;
- pi->c_iflag = termios->c_iflag;
- pi->c_cflag = termios->c_cflag;
-
switch (termios->c_cflag & CSIZE) {
case CS5:
chr_bits = MPSC_MPCR_CL_5;
@@ -1870,12 +1865,12 @@ static int mpsc_shared_map_regs(struct platform_device *pd)
static void mpsc_shared_unmap_regs(void)
{
- if (!mpsc_shared_regs.mpsc_routing_base) {
+ if (mpsc_shared_regs.mpsc_routing_base) {
iounmap(mpsc_shared_regs.mpsc_routing_base);
release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
MPSC_ROUTING_REG_BLOCK_SIZE);
}
- if (!mpsc_shared_regs.sdma_intr_base) {
+ if (mpsc_shared_regs.sdma_intr_base) {
iounmap(mpsc_shared_regs.sdma_intr_base);
release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
MPSC_SDMA_INTR_REG_BLOCK_SIZE);
@@ -1891,44 +1886,39 @@ static void mpsc_shared_unmap_regs(void)
static int mpsc_shared_drv_probe(struct platform_device *dev)
{
struct mpsc_shared_pdata *pdata;
- int rc = -ENODEV;
-
- if (dev->id == 0) {
- rc = mpsc_shared_map_regs(dev);
- if (!rc) {
- pdata = (struct mpsc_shared_pdata *)
- dev_get_platdata(&dev->dev);
-
- mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
- mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
- mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
- mpsc_shared_regs.SDMA_INTR_CAUSE_m =
- pdata->intr_cause_val;
- mpsc_shared_regs.SDMA_INTR_MASK_m =
- pdata->intr_mask_val;
-
- rc = 0;
- }
- }
+ int rc;
- return rc;
+ if (dev->id != 0)
+ return -ENODEV;
+
+ rc = mpsc_shared_map_regs(dev);
+ if (rc)
+ return rc;
+
+ pdata = dev_get_platdata(&dev->dev);
+
+ mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
+ mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
+ mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
+ mpsc_shared_regs.SDMA_INTR_CAUSE_m = pdata->intr_cause_val;
+ mpsc_shared_regs.SDMA_INTR_MASK_m = pdata->intr_mask_val;
+
+ return 0;
}
static int mpsc_shared_drv_remove(struct platform_device *dev)
{
- int rc = -ENODEV;
+ if (dev->id != 0)
+ return -ENODEV;
- if (dev->id == 0) {
- mpsc_shared_unmap_regs();
- mpsc_shared_regs.MPSC_MRR_m = 0;
- mpsc_shared_regs.MPSC_RCRR_m = 0;
- mpsc_shared_regs.MPSC_TCRR_m = 0;
- mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
- mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
- rc = 0;
- }
+ mpsc_shared_unmap_regs();
+ mpsc_shared_regs.MPSC_MRR_m = 0;
+ mpsc_shared_regs.MPSC_RCRR_m = 0;
+ mpsc_shared_regs.MPSC_TCRR_m = 0;
+ mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
+ mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
- return rc;
+ return 0;
}
static struct platform_driver mpsc_shared_driver = {
@@ -1979,10 +1969,6 @@ static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
pi->sdma_base_p = r->start;
} else {
mpsc_resource_err("SDMA base");
- if (pi->mpsc_base) {
- iounmap(pi->mpsc_base);
- pi->mpsc_base = NULL;
- }
goto err;
}
@@ -1993,33 +1979,33 @@ static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
pi->brg_base_p = r->start;
} else {
mpsc_resource_err("BRG base");
- if (pi->mpsc_base) {
- iounmap(pi->mpsc_base);
- pi->mpsc_base = NULL;
- }
- if (pi->sdma_base) {
- iounmap(pi->sdma_base);
- pi->sdma_base = NULL;
- }
goto err;
}
return 0;
err:
+ if (pi->sdma_base) {
+ iounmap(pi->sdma_base);
+ pi->sdma_base = NULL;
+ }
+ if (pi->mpsc_base) {
+ iounmap(pi->mpsc_base);
+ pi->mpsc_base = NULL;
+ }
return -ENOMEM;
}
static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
{
- if (!pi->mpsc_base) {
+ if (pi->mpsc_base) {
iounmap(pi->mpsc_base);
release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
}
- if (!pi->sdma_base) {
+ if (pi->sdma_base) {
iounmap(pi->sdma_base);
release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
}
- if (!pi->brg_base) {
+ if (pi->brg_base) {
iounmap(pi->brg_base);
release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
}
@@ -2073,36 +2059,37 @@ static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
static int mpsc_drv_probe(struct platform_device *dev)
{
- struct mpsc_port_info *pi;
- int rc = -ENODEV;
-
- pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
-
- if (dev->id < MPSC_NUM_CTLRS) {
- pi = &mpsc_ports[dev->id];
-
- rc = mpsc_drv_map_regs(pi, dev);
- if (!rc) {
- mpsc_drv_get_platform_data(pi, dev, dev->id);
- pi->port.dev = &dev->dev;
-
- rc = mpsc_make_ready(pi);
- if (!rc) {
- spin_lock_init(&pi->tx_lock);
- rc = uart_add_one_port(&mpsc_reg, &pi->port);
- if (!rc) {
- rc = 0;
- } else {
- mpsc_release_port((struct uart_port *)
- pi);
- mpsc_drv_unmap_regs(pi);
- }
- } else {
- mpsc_drv_unmap_regs(pi);
- }
- }
- }
+ struct mpsc_port_info *pi;
+ int rc;
+
+ dev_dbg(&dev->dev, "mpsc_drv_probe: Adding MPSC %d\n", dev->id);
+
+ if (dev->id >= MPSC_NUM_CTLRS)
+ return -ENODEV;
+
+ pi = &mpsc_ports[dev->id];
+
+ rc = mpsc_drv_map_regs(pi, dev);
+ if (rc)
+ return rc;
+ mpsc_drv_get_platform_data(pi, dev, dev->id);
+ pi->port.dev = &dev->dev;
+
+ rc = mpsc_make_ready(pi);
+ if (rc)
+ goto err_unmap;
+
+ spin_lock_init(&pi->tx_lock);
+ rc = uart_add_one_port(&mpsc_reg, &pi->port);
+ if (rc)
+ goto err_relport;
+
+ return 0;
+err_relport:
+ mpsc_release_port(&pi->port);
+err_unmap:
+ mpsc_drv_unmap_regs(pi);
return rc;
}
@@ -2124,19 +2111,22 @@ static int __init mpsc_drv_init(void)
memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
rc = uart_register_driver(&mpsc_reg);
- if (!rc) {
- rc = platform_driver_register(&mpsc_shared_driver);
- if (!rc) {
- rc = platform_driver_register(&mpsc_driver);
- if (rc) {
- platform_driver_unregister(&mpsc_shared_driver);
- uart_unregister_driver(&mpsc_reg);
- }
- } else {
- uart_unregister_driver(&mpsc_reg);
- }
- }
+ if (rc)
+ return rc;
+
+ rc = platform_driver_register(&mpsc_shared_driver);
+ if (rc)
+ goto err_unreg_uart;
+ rc = platform_driver_register(&mpsc_driver);
+ if (rc)
+ goto err_unreg_plat;
+
+ return 0;
+err_unreg_plat:
+ platform_driver_unregister(&mpsc_shared_driver);
+err_unreg_uart:
+ uart_unregister_driver(&mpsc_reg);
return rc;
}
device_initcall(mpsc_drv_init);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index dcde955475dc..96d3ce8dc2dc 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1478,7 +1478,6 @@ msm_serial_early_console_setup(struct earlycon_device *device, const char *opt)
device->con->write = msm_serial_early_write;
return 0;
}
-EARLYCON_DECLARE(msm_serial, msm_serial_early_console_setup);
OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart",
msm_serial_early_console_setup);
@@ -1500,7 +1499,6 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device,
device->con->write = msm_serial_early_write_dm;
return 0;
}
-EARLYCON_DECLARE(msm_serial_dm, msm_serial_early_console_setup_dm);
OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm",
msm_serial_early_console_setup_dm);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
new file mode 100644
index 000000000000..0ff27818bb87
--- /dev/null
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -0,0 +1,650 @@
+/*
+* ***************************************************************************
+* Copyright (C) 2015 Marvell International Ltd.
+* ***************************************************************************
+* This program is free software: you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the Free
+* Software Foundation, either version 2 of the License, or any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+* ***************************************************************************
+*/
+
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+
+/* Register Map */
+#define UART_RBR 0x00
+#define RBR_BRK_DET BIT(15)
+#define RBR_FRM_ERR_DET BIT(14)
+#define RBR_PAR_ERR_DET BIT(13)
+#define RBR_OVR_ERR_DET BIT(12)
+
+#define UART_TSH 0x04
+
+#define UART_CTRL 0x08
+#define CTRL_SOFT_RST BIT(31)
+#define CTRL_TXFIFO_RST BIT(15)
+#define CTRL_RXFIFO_RST BIT(14)
+#define CTRL_ST_MIRR_EN BIT(13)
+#define CTRL_LPBK_EN BIT(12)
+#define CTRL_SND_BRK_SEQ BIT(11)
+#define CTRL_PAR_EN BIT(10)
+#define CTRL_TWO_STOP BIT(9)
+#define CTRL_TX_HFL_INT BIT(8)
+#define CTRL_RX_HFL_INT BIT(7)
+#define CTRL_TX_EMP_INT BIT(6)
+#define CTRL_TX_RDY_INT BIT(5)
+#define CTRL_RX_RDY_INT BIT(4)
+#define CTRL_BRK_DET_INT BIT(3)
+#define CTRL_FRM_ERR_INT BIT(2)
+#define CTRL_PAR_ERR_INT BIT(1)
+#define CTRL_OVR_ERR_INT BIT(0)
+#define CTRL_RX_INT (CTRL_RX_RDY_INT | CTRL_BRK_DET_INT |\
+ CTRL_FRM_ERR_INT | CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
+
+#define UART_STAT 0x0c
+#define STAT_TX_FIFO_EMP BIT(13)
+#define STAT_RX_FIFO_EMP BIT(12)
+#define STAT_TX_FIFO_FUL BIT(11)
+#define STAT_TX_FIFO_HFL BIT(10)
+#define STAT_RX_TOGL BIT(9)
+#define STAT_RX_FIFO_FUL BIT(8)
+#define STAT_RX_FIFO_HFL BIT(7)
+#define STAT_TX_EMP BIT(6)
+#define STAT_TX_RDY BIT(5)
+#define STAT_RX_RDY BIT(4)
+#define STAT_BRK_DET BIT(3)
+#define STAT_FRM_ERR BIT(2)
+#define STAT_PAR_ERR BIT(1)
+#define STAT_OVR_ERR BIT(0)
+#define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR | STAT_FRM_ERR\
+ | STAT_PAR_ERR | STAT_OVR_ERR)
+
+#define UART_BRDV 0x10
+
+#define MVEBU_NR_UARTS 1
+
+#define MVEBU_UART_TYPE "mvebu-uart"
+
+static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
+
+struct mvebu_uart_data {
+ struct uart_port *port;
+ struct clk *clk;
+};
+
+/* Core UART Driver Operations */
+static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
+{
+ unsigned long flags;
+ unsigned int st;
+
+ spin_lock_irqsave(&port->lock, flags);
+ st = readl(port->membase + UART_STAT);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void mvebu_uart_set_mctrl(struct uart_port *port,
+ unsigned int mctrl)
+{
+/*
+ * Even if we do not support configuring the modem control lines, this
+ * function must be proided to the serial core
+ */
+}
+
+static void mvebu_uart_stop_tx(struct uart_port *port)
+{
+ unsigned int ctl = readl(port->membase + UART_CTRL);
+
+ ctl &= ~CTRL_TX_RDY_INT;
+ writel(ctl, port->membase + UART_CTRL);
+}
+
+static void mvebu_uart_start_tx(struct uart_port *port)
+{
+ unsigned int ctl = readl(port->membase + UART_CTRL);
+
+ ctl |= CTRL_TX_RDY_INT;
+ writel(ctl, port->membase + UART_CTRL);
+}
+
+static void mvebu_uart_stop_rx(struct uart_port *port)
+{
+ unsigned int ctl = readl(port->membase + UART_CTRL);
+
+ ctl &= ~CTRL_RX_INT;
+ writel(ctl, port->membase + UART_CTRL);
+}
+
+static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
+{
+ unsigned int ctl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ ctl = readl(port->membase + UART_CTRL);
+ if (brk == -1)
+ ctl |= CTRL_SND_BRK_SEQ;
+ else
+ ctl &= ~CTRL_SND_BRK_SEQ;
+ writel(ctl, port->membase + UART_CTRL);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
+{
+ struct tty_port *tport = &port->state->port;
+ unsigned char ch = 0;
+ char flag = 0;
+
+ do {
+ if (status & STAT_RX_RDY) {
+ ch = readl(port->membase + UART_RBR);
+ ch &= 0xff;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (status & STAT_PAR_ERR)
+ port->icount.parity++;
+ }
+
+ if (status & STAT_BRK_DET) {
+ port->icount.brk++;
+ status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
+ if (uart_handle_break(port))
+ goto ignore_char;
+ }
+
+ if (status & STAT_OVR_ERR)
+ port->icount.overrun++;
+
+ if (status & STAT_FRM_ERR)
+ port->icount.frame++;
+
+ if (uart_handle_sysrq_char(port, ch))
+ goto ignore_char;
+
+ if (status & port->ignore_status_mask & STAT_PAR_ERR)
+ status &= ~STAT_RX_RDY;
+
+ status &= port->read_status_mask;
+
+ if (status & STAT_PAR_ERR)
+ flag = TTY_PARITY;
+
+ status &= ~port->ignore_status_mask;
+
+ if (status & STAT_RX_RDY)
+ tty_insert_flip_char(tport, ch, flag);
+
+ if (status & STAT_BRK_DET)
+ tty_insert_flip_char(tport, 0, TTY_BREAK);
+
+ if (status & STAT_FRM_ERR)
+ tty_insert_flip_char(tport, 0, TTY_FRAME);
+
+ if (status & STAT_OVR_ERR)
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+
+ignore_char:
+ status = readl(port->membase + UART_STAT);
+ } while (status & (STAT_RX_RDY | STAT_BRK_DET));
+
+ tty_flip_buffer_push(tport);
+}
+
+static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int count;
+ unsigned int st;
+
+ if (port->x_char) {
+ writel(port->x_char, port->membase + UART_TSH);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ mvebu_uart_stop_tx(port);
+ return;
+ }
+
+ for (count = 0; count < port->fifosize; count++) {
+ writel(xmit->buf[xmit->tail], port->membase + UART_TSH);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+
+ if (uart_circ_empty(xmit))
+ break;
+
+ st = readl(port->membase + UART_STAT);
+ if (st & STAT_TX_FIFO_FUL)
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ mvebu_uart_stop_tx(port);
+}
+
+static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (st & (STAT_RX_RDY | STAT_OVR_ERR | STAT_FRM_ERR | STAT_BRK_DET))
+ mvebu_uart_rx_chars(port, st);
+
+ if (st & STAT_TX_RDY)
+ mvebu_uart_tx_chars(port, st);
+
+ return IRQ_HANDLED;
+}
+
+static int mvebu_uart_startup(struct uart_port *port)
+{
+ int ret;
+
+ writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
+ port->membase + UART_CTRL);
+ udelay(1);
+ writel(CTRL_RX_INT, port->membase + UART_CTRL);
+
+ ret = request_irq(port->irq, mvebu_uart_isr, port->irqflags, "serial",
+ port);
+ if (ret) {
+ dev_err(port->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mvebu_uart_shutdown(struct uart_port *port)
+{
+ writel(0, port->membase + UART_CTRL);
+}
+
+static void mvebu_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ port->read_status_mask = STAT_RX_RDY | STAT_OVR_ERR |
+ STAT_TX_RDY | STAT_TX_FIFO_FUL;
+
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
+
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
+
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
+
+ if (old)
+ tty_termios_copy_hw(termios, old);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, 460800);
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *mvebu_uart_type(struct uart_port *port)
+{
+ return MVEBU_UART_TYPE;
+}
+
+static void mvebu_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to do here */
+}
+
+static int mvebu_uart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int mvebu_uart_get_poll_char(struct uart_port *port)
+{
+ unsigned int st = readl(port->membase + UART_STAT);
+
+ if (!(st & STAT_RX_RDY))
+ return NO_POLL_CHAR;
+
+ return readl(port->membase + UART_RBR);
+}
+
+static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ unsigned int st;
+
+ for (;;) {
+ st = readl(port->membase + UART_STAT);
+
+ if (!(st & STAT_TX_FIFO_FUL))
+ break;
+
+ udelay(1);
+ }
+
+ writel(c, port->membase + UART_TSH);
+}
+#endif
+
+static const struct uart_ops mvebu_uart_ops = {
+ .tx_empty = mvebu_uart_tx_empty,
+ .set_mctrl = mvebu_uart_set_mctrl,
+ .get_mctrl = mvebu_uart_get_mctrl,
+ .stop_tx = mvebu_uart_stop_tx,
+ .start_tx = mvebu_uart_start_tx,
+ .stop_rx = mvebu_uart_stop_rx,
+ .break_ctl = mvebu_uart_break_ctl,
+ .startup = mvebu_uart_startup,
+ .shutdown = mvebu_uart_shutdown,
+ .set_termios = mvebu_uart_set_termios,
+ .type = mvebu_uart_type,
+ .release_port = mvebu_uart_release_port,
+ .request_port = mvebu_uart_request_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = mvebu_uart_get_poll_char,
+ .poll_put_char = mvebu_uart_put_poll_char,
+#endif
+};
+
+/* Console Driver Operations */
+
+#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
+/* Early Console */
+static void mvebu_uart_putc(struct uart_port *port, int c)
+{
+ unsigned int st;
+
+ for (;;) {
+ st = readl(port->membase + UART_STAT);
+ if (!(st & STAT_TX_FIFO_FUL))
+ break;
+ }
+
+ writel(c, port->membase + UART_TSH);
+
+ for (;;) {
+ st = readl(port->membase + UART_STAT);
+ if (st & STAT_TX_FIFO_EMP)
+ break;
+ }
+}
+
+static void mvebu_uart_putc_early_write(struct console *con,
+ const char *s,
+ unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, mvebu_uart_putc);
+}
+
+static int __init
+mvebu_uart_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = mvebu_uart_putc_early_write;
+
+ return 0;
+}
+
+EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
+OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
+ mvebu_uart_early_console_setup);
+
+static void wait_for_xmitr(struct uart_port *port)
+{
+ u32 val;
+
+ readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+ (val & STAT_TX_EMP), 1, 10000);
+}
+
+static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
+{
+ wait_for_xmitr(port);
+ writel(ch, port->membase + UART_TSH);
+}
+
+static void mvebu_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &mvebu_uart_ports[co->index];
+ unsigned long flags;
+ unsigned int ier;
+ int locked = 1;
+
+ if (oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+
+ ier = readl(port->membase + UART_CTRL) &
+ (CTRL_RX_INT | CTRL_TX_RDY_INT);
+ writel(0, port->membase + UART_CTRL);
+
+ uart_console_write(port, s, count, mvebu_uart_console_putchar);
+
+ wait_for_xmitr(port);
+
+ if (ier)
+ writel(ier, port->membase + UART_CTRL);
+
+ if (locked)
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int mvebu_uart_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
+ return -EINVAL;
+
+ port = &mvebu_uart_ports[co->index];
+
+ if (!port->mapbase || !port->membase) {
+ pr_debug("console on ttyMV%i not present\n", co->index);
+ return -ENODEV;
+ }
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver mvebu_uart_driver;
+
+static struct console mvebu_uart_console = {
+ .name = "ttyMV",
+ .write = mvebu_uart_console_write,
+ .device = uart_console_device,
+ .setup = mvebu_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mvebu_uart_driver,
+};
+
+static int __init mvebu_uart_console_init(void)
+{
+ register_console(&mvebu_uart_console);
+ return 0;
+}
+
+console_initcall(mvebu_uart_console_init);
+
+
+#endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
+
+static struct uart_driver mvebu_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "mvebu_serial",
+ .dev_name = "ttyMV",
+ .nr = MVEBU_NR_UARTS,
+#ifdef CONFIG_SERIAL_MVEBU_CONSOLE
+ .cons = &mvebu_uart_console,
+#endif
+};
+
+static int mvebu_uart_probe(struct platform_device *pdev)
+{
+ struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ struct uart_port *port;
+ struct mvebu_uart_data *data;
+ int ret;
+
+ if (!reg || !irq) {
+ dev_err(&pdev->dev, "no registers/irq defined\n");
+ return -EINVAL;
+ }
+
+ port = &mvebu_uart_ports[0];
+
+ spin_lock_init(&port->lock);
+
+ port->dev = &pdev->dev;
+ port->type = PORT_MVEBU;
+ port->ops = &mvebu_uart_ops;
+ port->regshift = 0;
+
+ port->fifosize = 32;
+ port->iotype = UPIO_MEM32;
+ port->flags = UPF_FIXED_PORT;
+ port->line = 0; /* single port: force line number to 0 */
+
+ port->irq = irq->start;
+ port->irqflags = 0;
+ port->mapbase = reg->start;
+
+ port->membase = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(port->membase))
+ return -PTR_ERR(port->membase);
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->port = port;
+
+ port->private_data = data;
+ platform_set_drvdata(pdev, data);
+
+ ret = uart_add_one_port(&mvebu_uart_driver, port);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int mvebu_uart_remove(struct platform_device *pdev)
+{
+ struct mvebu_uart_data *data = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&mvebu_uart_driver, data->port);
+ data->port->private_data = NULL;
+ data->port->mapbase = 0;
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id mvebu_uart_of_match[] = {
+ { .compatible = "marvell,armada-3700-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mvebu_uart_of_match);
+
+static struct platform_driver mvebu_uart_platform_driver = {
+ .probe = mvebu_uart_probe,
+ .remove = mvebu_uart_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mvebu-uart",
+ .of_match_table = of_match_ptr(mvebu_uart_of_match),
+ },
+};
+
+static int __init mvebu_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&mvebu_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&mvebu_uart_platform_driver);
+ if (ret)
+ uart_unregister_driver(&mvebu_uart_driver);
+
+ return ret;
+}
+
+static void __exit mvebu_uart_exit(void)
+{
+ platform_driver_unregister(&mvebu_uart_platform_driver);
+ uart_unregister_driver(&mvebu_uart_driver);
+}
+
+arch_initcall(mvebu_uart_init);
+module_exit(mvebu_uart_exit);
+
+MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
+MODULE_DESCRIPTION("Marvell Armada-3700 Serial Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa49eb1e2fa2..a2a529994ba5 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1870,7 +1870,7 @@ static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
.driver = {
- .name = DRIVER_NAME,
+ .name = OMAP_SERIAL_DRIVER_NAME,
.pm = &serial_omap_dev_pm_ops,
.of_match_table = of_match_ptr(omap_serial_of_match),
},
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index d72cd736bdc6..ac7f8df54406 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -601,14 +601,21 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
unsigned int ufcon, ch, flag, ufstat, uerstat;
+ unsigned int fifocnt = 0;
int max_count = port->fifosize;
while (max_count-- > 0) {
- ufcon = rd_regl(port, S3C2410_UFCON);
- ufstat = rd_regl(port, S3C2410_UFSTAT);
-
- if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
- break;
+ /*
+ * Receive all characters known to be in FIFO
+ * before reading FIFO level again
+ */
+ if (fifocnt == 0) {
+ ufstat = rd_regl(port, S3C2410_UFSTAT);
+ fifocnt = s3c24xx_serial_rx_fifocnt(ourport, ufstat);
+ if (fifocnt == 0)
+ break;
+ }
+ fifocnt--;
uerstat = rd_regl(port, S3C2410_UERSTAT);
ch = rd_regb(port, S3C2410_URXH);
@@ -623,6 +630,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
}
} else {
if (txe) {
+ ufcon = rd_regl(port, S3C2410_UFCON);
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
rx_enabled(port) = 1;
@@ -2451,7 +2459,6 @@ static int __init s3c2410_early_console_setup(struct earlycon_device *device,
}
OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
s3c2410_early_console_setup);
-EARLYCON_DECLARE(s3c2410, s3c2410_early_console_setup);
/* S3C2412, S3C2440, S3C64xx */
static struct samsung_early_console_data s3c2440_early_console_data = {
@@ -2470,9 +2477,6 @@ OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
s3c2440_early_console_setup);
OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c2412, s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c2440, s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c6400, s3c2440_early_console_setup);
/* S5PV210, EXYNOS */
static struct samsung_early_console_data s5pv210_early_console_data = {
@@ -2489,8 +2493,6 @@ OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart",
s5pv210_early_console_setup);
OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
s5pv210_early_console_setup);
-EARLYCON_DECLARE(s5pv210, s5pv210_early_console_setup);
-EARLYCON_DECLARE(exynos4210, s5pv210_early_console_setup);
#endif
MODULE_ALIAS("platform:samsung-uart");
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 13f8d5f70272..025a4264430e 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -196,14 +196,14 @@
* or (IO6)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CTS_BIT (1 << 0) /* CTS */
-#define SC16IS7XX_MSR_DSR_BIT (1 << 1) /* DSR (IO4)
+#define SC16IS7XX_MSR_CTS_BIT (1 << 4) /* CTS */
+#define SC16IS7XX_MSR_DSR_BIT (1 << 5) /* DSR (IO4)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_RI_BIT (1 << 2) /* RI (IO7)
+#define SC16IS7XX_MSR_RI_BIT (1 << 6) /* RI (IO7)
* - only on 75x/76x
*/
-#define SC16IS7XX_MSR_CD_BIT (1 << 3) /* CD (IO6)
+#define SC16IS7XX_MSR_CD_BIT (1 << 7) /* CD (IO6)
* - only on 75x/76x
*/
#define SC16IS7XX_MSR_DELTA_MASK 0x0F /* Any of the delta bits! */
@@ -240,7 +240,7 @@
/* IOControl register bits (Only 750/760) */
#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
-#define SC16IS7XX_IOCONTROL_GPIO_BIT (1 << 1) /* Enable GPIO[7:4] */
+#define SC16IS7XX_IOCONTROL_MODEM_BIT (1 << 1) /* Enable GPIO[7:4] as modem pins */
#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
/* EFCR register bits */
@@ -687,7 +687,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
case SC16IS7XX_IIR_CTSRTS_SRC:
msr = sc16is7xx_port_read(port, SC16IS7XX_MSR_REG);
uart_handle_cts_change(port,
- !!(msr & SC16IS7XX_MSR_CTS_BIT));
+ !!(msr & SC16IS7XX_MSR_DCTS_BIT));
break;
case SC16IS7XX_IIR_THRI_SRC:
sc16is7xx_handle_tx(port);
@@ -761,12 +761,20 @@ static void sc16is7xx_reg_proc(struct kthread_work *ws)
memset(&one->config, 0, sizeof(one->config));
spin_unlock_irqrestore(&one->port.lock, irqflags);
- if (config.flags & SC16IS7XX_RECONF_MD)
+ if (config.flags & SC16IS7XX_RECONF_MD) {
sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_LOOP_BIT,
(one->port.mctrl & TIOCM_LOOP) ?
SC16IS7XX_MCR_LOOP_BIT : 0);
-
+ sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_RTS_BIT,
+ (one->port.mctrl & TIOCM_RTS) ?
+ SC16IS7XX_MCR_RTS_BIT : 0);
+ sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
+ SC16IS7XX_MCR_DTR_BIT,
+ (one->port.mctrl & TIOCM_DTR) ?
+ SC16IS7XX_MCR_DTR_BIT : 0);
+ }
if (config.flags & SC16IS7XX_RECONF_IER)
sc16is7xx_port_update(&one->port, SC16IS7XX_IER_REG,
config.ier_clear, 0);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index b1f54ab1818c..a126a603b083 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -171,14 +171,12 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
*/
uart_change_speed(tty, state, NULL);
- if (init_hw) {
- /*
- * Setup the RTS and DTR signals once the
- * port is open and ready to respond.
- */
- if (tty->termios.c_cflag & CBAUD)
- uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
- }
+ /*
+ * Setup the RTS and DTR signals once the
+ * port is open and ready to respond.
+ */
+ if (init_hw && C_BAUD(tty))
+ uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
/*
@@ -240,7 +238,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (uart_console(uport) && tty)
uport->cons->cflag = tty->termios.c_cflag;
- if (!tty || (tty->termios.c_cflag & HUPCL))
+ if (!tty || C_HUPCL(tty))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
uart_port_shutdown(port);
@@ -485,12 +483,15 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
spin_unlock_irq(&uport->lock);
}
-static inline int __uart_put_char(struct uart_port *port,
- struct circ_buf *circ, unsigned char c)
+static int uart_put_char(struct tty_struct *tty, unsigned char c)
{
+ struct uart_state *state = tty->driver_data;
+ struct uart_port *port = state->uart_port;
+ struct circ_buf *circ;
unsigned long flags;
int ret = 0;
+ circ = &state->xmit;
if (!circ->buf)
return 0;
@@ -504,13 +505,6 @@ static inline int __uart_put_char(struct uart_port *port,
return ret;
}
-static int uart_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct uart_state *state = tty->driver_data;
-
- return __uart_put_char(state->uart_port, &state->xmit, ch);
-}
-
static void uart_flush_chars(struct tty_struct *tty)
{
uart_start(tty);
@@ -639,7 +633,7 @@ static void uart_throttle(struct tty_struct *tty)
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
mask |= UPSTAT_AUTORTS;
if (port->status & mask) {
@@ -647,11 +641,11 @@ static void uart_throttle(struct tty_struct *tty)
mask &= ~port->status;
}
- if (mask & UPSTAT_AUTOXOFF)
- uart_send_xchar(tty, STOP_CHAR(tty));
-
if (mask & UPSTAT_AUTORTS)
uart_clear_mctrl(port, TIOCM_RTS);
+
+ if (mask & UPSTAT_AUTOXOFF)
+ uart_send_xchar(tty, STOP_CHAR(tty));
}
static void uart_unthrottle(struct tty_struct *tty)
@@ -662,7 +656,7 @@ static void uart_unthrottle(struct tty_struct *tty)
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
- if (tty->termios.c_cflag & CRTSCTS)
+ if (C_CRTSCTS(tty))
mask |= UPSTAT_AUTORTS;
if (port->status & mask) {
@@ -670,21 +664,25 @@ static void uart_unthrottle(struct tty_struct *tty)
mask &= ~port->status;
}
- if (mask & UPSTAT_AUTOXOFF)
- uart_send_xchar(tty, START_CHAR(tty));
-
if (mask & UPSTAT_AUTORTS)
uart_set_mctrl(port, TIOCM_RTS);
+
+ if (mask & UPSTAT_AUTOXOFF)
+ uart_send_xchar(tty, START_CHAR(tty));
}
-static void do_uart_get_info(struct tty_port *port,
- struct serial_struct *retinfo)
+static void uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
memset(retinfo, 0, sizeof(*retinfo));
+ /*
+ * Ensure the state we copy is consistent and no hardware changes
+ * occur as we go
+ */
+ mutex_lock(&port->mutex);
retinfo->type = uport->type;
retinfo->line = uport->line;
retinfo->port = uport->iobase;
@@ -703,15 +701,6 @@ static void do_uart_get_info(struct tty_port *port,
retinfo->io_type = uport->iotype;
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
-}
-
-static void uart_get_info(struct tty_port *port,
- struct serial_struct *retinfo)
-{
- /* Ensure the state we copy is consistent and no hardware changes
- occur as we go */
- mutex_lock(&port->mutex);
- do_uart_get_info(port, retinfo);
mutex_unlock(&port->mutex);
}
@@ -719,6 +708,7 @@ static int uart_get_info_user(struct tty_port *port,
struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
+
uart_get_info(port, &tmp);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
@@ -1391,8 +1381,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
uport = state->uart_port;
port = &state->port;
-
- pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
+ pr_debug("uart_close(%d) called\n", tty->index);
if (!port->count || tty_port_close_start(port, tty, filp) == 0)
return;
@@ -1434,7 +1423,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
* Wake up anyone trying to open this port.
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
- clear_bit(ASYNCB_CLOSING, &port->flags);
spin_unlock_irq(&port->lock);
wake_up_interruptible(&port->open_wait);
@@ -1510,7 +1498,7 @@ static void uart_hangup(struct tty_struct *tty)
struct tty_port *port = &state->port;
unsigned long flags;
- pr_debug("uart_hangup(%d)\n", state->uart_port->line);
+ pr_debug("uart_hangup(%d)\n", tty->index);
mutex_lock(&port->mutex);
if (port->flags & ASYNC_NORMAL_ACTIVE) {
@@ -1591,7 +1579,7 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
- struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
+ struct uart_driver *drv = tty->driver->driver_state;
int retval, line = tty->index;
struct uart_state *state = drv->state + line;
struct tty_port *port = &state->port;
@@ -1633,15 +1621,12 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
/*
* If we succeeded, wait until the port is ready.
*/
+err_unlock:
mutex_unlock(&port->mutex);
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
-
end:
return retval;
-err_unlock:
- mutex_unlock(&port->mutex);
- goto end;
}
static const char *uart_type(struct uart_port *port)
@@ -1700,17 +1685,13 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
seq_printf(m, " tx:%d rx:%d",
uport->icount.tx, uport->icount.rx);
if (uport->icount.frame)
- seq_printf(m, " fe:%d",
- uport->icount.frame);
+ seq_printf(m, " fe:%d", uport->icount.frame);
if (uport->icount.parity)
- seq_printf(m, " pe:%d",
- uport->icount.parity);
+ seq_printf(m, " pe:%d", uport->icount.parity);
if (uport->icount.brk)
- seq_printf(m, " brk:%d",
- uport->icount.brk);
+ seq_printf(m, " brk:%d", uport->icount.brk);
if (uport->icount.overrun)
- seq_printf(m, " oe:%d",
- uport->icount.overrun);
+ seq_printf(m, " oe:%d", uport->icount.overrun);
#define INFOBIT(bit, str) \
if (uport->mctrl & (bit)) \
@@ -1745,8 +1726,7 @@ static int uart_proc_show(struct seq_file *m, void *v)
struct uart_driver *drv = ttydrv->driver_state;
int i;
- seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
- "", "", "");
+ seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", "");
for (i = 0; i < drv->nr; i++)
uart_line_info(m, drv, i);
return 0;
@@ -1895,26 +1875,6 @@ uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
}
EXPORT_SYMBOL_GPL(uart_parse_options);
-struct baud_rates {
- unsigned int rate;
- unsigned int cflag;
-};
-
-static const struct baud_rates baud_rates[] = {
- { 921600, B921600 },
- { 460800, B460800 },
- { 230400, B230400 },
- { 115200, B115200 },
- { 57600, B57600 },
- { 38400, B38400 },
- { 19200, B19200 },
- { 9600, B9600 },
- { 4800, B4800 },
- { 2400, B2400 },
- { 1200, B1200 },
- { 0, B38400 }
-};
-
/**
* uart_set_options - setup the serial console parameters
* @port: pointer to the serial ports uart_port structure
@@ -1930,7 +1890,6 @@ uart_set_options(struct uart_port *port, struct console *co,
{
struct ktermios termios;
static struct ktermios dummy;
- int i;
/*
* Ensure that the serial console lock is initialised
@@ -1945,16 +1904,8 @@ uart_set_options(struct uart_port *port, struct console *co,
memset(&termios, 0, sizeof(struct ktermios));
- termios.c_cflag = CREAD | HUPCL | CLOCAL;
-
- /*
- * Construct a cflag setting.
- */
- for (i = 0; baud_rates[i].rate; i++)
- if (baud_rates[i].rate <= baud)
- break;
-
- termios.c_cflag |= baud_rates[i].cflag;
+ termios.c_cflag |= CREAD | HUPCL | CLOCAL;
+ tty_termios_encode_baud_rate(&termios, baud, baud);
if (bits == 7)
termios.c_cflag |= CS7;
diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
index b4decf8787de..57f152394af5 100644
--- a/drivers/tty/serial/serial_ks8695.c
+++ b/drivers/tty/serial/serial_ks8695.c
@@ -554,7 +554,7 @@ static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
.uartclk = KS8695_CLOCK_RATE * 16,
.fifosize = 16,
.ops = &ks8695uart_pops,
- .flags = ASYNC_BOOT_AUTOCONF,
+ .flags = UPF_BOOT_AUTOCONF,
.line = 0,
}
};
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 226ad23b136c..02147361eaa9 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -20,6 +20,7 @@
#include <linux/gpio/consumer.h>
#include <linux/termios.h>
#include <linux/serial_core.h>
+#include <linux/module.h>
#include "serial_mctrl_gpio.h"
@@ -249,3 +250,5 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
}
}
EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4646a9f531ad..0130feb069ae 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -84,6 +84,22 @@ enum SCI_CLKS {
SCI_NUM_CLKS
};
+/* Bit x set means sampling rate x + 1 is supported */
+#define SCI_SR(x) BIT((x) - 1)
+#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
+
+#define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
+ SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
+ SCI_SR(19) | SCI_SR(27)
+
+#define min_sr(_port) ffs((_port)->sampling_rate_mask)
+#define max_sr(_port) fls((_port)->sampling_rate_mask)
+
+/* Iterate over all supported sampling rates, from high to low */
+#define for_each_sr(_sr, _port) \
+ for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \
+ if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
+
struct sci_port {
struct uart_port port;
@@ -93,7 +109,7 @@ struct sci_port {
unsigned int overrun_mask;
unsigned int error_mask;
unsigned int error_clear;
- unsigned int sampling_rate;
+ unsigned int sampling_rate_mask;
resource_size_t reg_size;
/* Break timer */
@@ -637,7 +653,8 @@ static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
}
}
-#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+ defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
#ifdef CONFIG_CONSOLE_POLL
static int sci_poll_get_char(struct uart_port *port)
@@ -678,7 +695,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
serial_port_out(port, SCxTDR, c);
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
-#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
+#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
+ CONFIG_SERIAL_SH_SCI_EARLYCON */
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
{
@@ -1902,19 +1920,13 @@ static int sci_sck_calc(struct sci_port *s, unsigned int bps,
unsigned int *srr)
{
unsigned long freq = s->clk_rates[SCI_SCK];
- unsigned int min_sr, max_sr, sr;
int err, min_err = INT_MAX;
+ unsigned int sr;
- if (s->sampling_rate) {
- /* SCI(F) has a fixed sampling rate */
- min_sr = max_sr = s->sampling_rate / 2;
- } else {
- /* HSCIF has a variable 1/(8..32) sampling rate */
- min_sr = 8;
- max_sr = 32;
- }
+ if (s->port.type != PORT_HSCIF)
+ freq *= 2;
- for (sr = max_sr; sr >= min_sr; sr--) {
+ for_each_sr(sr, s) {
err = DIV_ROUND_CLOSEST(freq, sr) - bps;
if (abs(err) >= abs(min_err))
continue;
@@ -1935,19 +1947,13 @@ static int sci_brg_calc(struct sci_port *s, unsigned int bps,
unsigned long freq, unsigned int *dlr,
unsigned int *srr)
{
- unsigned int min_sr, max_sr, sr, dl;
int err, min_err = INT_MAX;
+ unsigned int sr, dl;
- if (s->sampling_rate) {
- /* SCIF has a fixed sampling rate */
- min_sr = max_sr = s->sampling_rate / 2;
- } else {
- /* HSCIF has a variable 1/(8..32) sampling rate */
- min_sr = 8;
- max_sr = 32;
- }
+ if (s->port.type != PORT_HSCIF)
+ freq *= 2;
- for (sr = max_sr; sr >= min_sr; sr--) {
+ for_each_sr(sr, s) {
dl = DIV_ROUND_CLOSEST(freq, sr * bps);
dl = clamp(dl, 1U, 65535U);
@@ -1973,19 +1979,12 @@ static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
unsigned int *brr, unsigned int *srr,
unsigned int *cks)
{
- unsigned int min_sr, max_sr, shift, sr, br, prediv, scrate, c;
unsigned long freq = s->clk_rates[SCI_FCK];
+ unsigned int sr, br, prediv, scrate, c;
int err, min_err = INT_MAX;
- if (s->sampling_rate) {
- min_sr = max_sr = s->sampling_rate;
- shift = 0;
- } else {
- /* HSCIF has a variable sample rate */
- min_sr = 8;
- max_sr = 32;
- shift = 1;
- }
+ if (s->port.type != PORT_HSCIF)
+ freq *= 2;
/*
* Find the combination of sample rate and clock select with the
@@ -2002,10 +2001,10 @@ static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
* (|D - 0.5| / N * (1 + F))|
* NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
*/
- for (sr = max_sr; sr >= min_sr; sr--) {
+ for_each_sr(sr, s) {
for (c = 0; c <= 3; c++) {
/* integerized formulas from HSCIF documentation */
- prediv = sr * (1 << (2 * c + shift));
+ prediv = sr * (1 << (2 * c + 1));
/*
* We need to calculate:
@@ -2062,7 +2061,7 @@ static void sci_reset(struct uart_port *port)
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
- unsigned int baud, smr_val = 0, scr_val = 0, i;
+ unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i;
unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0;
struct sci_port *s = to_sci_port(port);
@@ -2096,8 +2095,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
for (i = 0; i < SCI_NUM_CLKS; i++)
max_freq = max(max_freq, s->clk_rates[i]);
- baud = uart_get_baud_rate(port, termios, old, 0,
- max_freq / max(s->sampling_rate, 8U));
+ baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s));
if (!baud)
goto done;
@@ -2185,6 +2183,17 @@ done:
uart_update_timeout(port, termios->c_cflag, baud);
if (best_clk >= 0) {
+ if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+ switch (srr + 1) {
+ case 5: smr_val |= SCSMR_SRC_5; break;
+ case 7: smr_val |= SCSMR_SRC_7; break;
+ case 11: smr_val |= SCSMR_SRC_11; break;
+ case 13: smr_val |= SCSMR_SRC_13; break;
+ case 16: smr_val |= SCSMR_SRC_16; break;
+ case 17: smr_val |= SCSMR_SRC_17; break;
+ case 19: smr_val |= SCSMR_SRC_19; break;
+ case 27: smr_val |= SCSMR_SRC_27; break;
+ }
smr_val |= cks;
dev_dbg(port->dev,
"SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
@@ -2200,7 +2209,8 @@ done:
} else {
/* Don't touch the bit rate configuration */
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
- smr_val |= serial_port_in(port, SCSMR) & SCSMR_CKS;
+ smr_val |= serial_port_in(port, SCSMR) &
+ (SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val);
serial_port_out(port, SCSCR, scr_val);
serial_port_out(port, SCSMR, smr_val);
@@ -2232,6 +2242,16 @@ done:
scr_val |= s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0);
dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val);
serial_port_out(port, SCSCR, scr_val);
+ if ((srr + 1 == 5) &&
+ (port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
+ /*
+ * In asynchronous mode, when the sampling rate is 1/5, first
+ * received data may become invalid on some SCIFA and SCIFB.
+ * To avoid this problem wait more than 1 serial data time (1
+ * bit time x serial data number) after setting SCSCR.RE = 1.
+ */
+ udelay(DIV_ROUND_UP(10 * 1000000, baud));
+ }
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
@@ -2528,37 +2548,37 @@ static int sci_init_single(struct platform_device *dev,
port->fifosize = 256;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
- sci_port->sampling_rate = 16;
+ sci_port->sampling_rate_mask = SCI_SR_SCIFAB;
break;
case PORT_HSCIF:
port->fifosize = 128;
sci_port->overrun_reg = SCLSR;
sci_port->overrun_mask = SCLSR_ORER;
- sci_port->sampling_rate = 0;
+ sci_port->sampling_rate_mask = SCI_SR_RANGE(8, 32);
break;
case PORT_SCIFA:
port->fifosize = 64;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
- sci_port->sampling_rate = 16;
+ sci_port->sampling_rate_mask = SCI_SR_SCIFAB;
break;
case PORT_SCIF:
port->fifosize = 16;
if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
- sci_port->sampling_rate = 16;
+ sci_port->sampling_rate_mask = SCI_SR(16);
} else {
sci_port->overrun_reg = SCLSR;
sci_port->overrun_mask = SCLSR_ORER;
- sci_port->sampling_rate = 32;
+ sci_port->sampling_rate_mask = SCI_SR(32);
}
break;
default:
port->fifosize = 1;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCI_ORER;
- sci_port->sampling_rate = 32;
+ sci_port->sampling_rate_mask = SCI_SR(32);
break;
}
@@ -2567,7 +2587,7 @@ static int sci_init_single(struct platform_device *dev,
* data override the sampling rate for now.
*/
if (p->sampling_rate)
- sci_port->sampling_rate = p->sampling_rate;
+ sci_port->sampling_rate_mask = SCI_SR(p->sampling_rate);
if (!early) {
ret = sci_init_clocks(sci_port, &dev->dev);
@@ -2632,7 +2652,8 @@ static void sci_cleanup_single(struct sci_port *port)
pm_runtime_disable(port->port.dev);
}
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+ defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
static void serial_console_putchar(struct uart_port *port, int ch)
{
sci_poll_put_char(port, ch);
@@ -2652,9 +2673,12 @@ static void serial_console_write(struct console *co, const char *s,
int locked = 1;
local_irq_save(flags);
+#if defined(SUPPORT_SYSRQ)
if (port->sysrq)
locked = 0;
- else if (oops_in_progress)
+ else
+#endif
+ if (oops_in_progress)
locked = spin_trylock(&port->lock);
else
spin_lock(&port->lock);
@@ -2764,7 +2788,7 @@ static inline int sci_probe_earlyprintk(struct platform_device *pdev)
#define SCI_CONSOLE NULL
-#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
+#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
@@ -2998,6 +3022,62 @@ static void __exit sci_exit(void)
early_platform_init_buffer("earlyprintk", &sci_driver,
early_serial_buf, ARRAY_SIZE(early_serial_buf));
#endif
+#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+static struct __init plat_sci_port port_cfg;
+
+static int __init early_console_setup(struct earlycon_device *device,
+ int type)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->port.serial_in = sci_serial_in;
+ device->port.serial_out = sci_serial_out;
+ device->port.type = type;
+ memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
+ sci_ports[0].cfg = &port_cfg;
+ sci_ports[0].cfg->type = type;
+ sci_probe_regmap(sci_ports[0].cfg);
+ port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR) |
+ SCSCR_RE | SCSCR_TE;
+ sci_serial_out(&sci_ports[0].port, SCSCR, port_cfg.scscr);
+
+ device->con->write = serial_console_write;
+ return 0;
+}
+static int __init sci_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ return early_console_setup(device, PORT_SCI);
+}
+static int __init scif_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ return early_console_setup(device, PORT_SCIF);
+}
+static int __init scifa_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ return early_console_setup(device, PORT_SCIFA);
+}
+static int __init scifb_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ return early_console_setup(device, PORT_SCIFB);
+}
+static int __init hscif_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ return early_console_setup(device, PORT_HSCIF);
+}
+
+OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
+OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
+OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
+OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
+OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
+#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
+
module_init(sci_init);
module_exit(sci_exit);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index fb1760250421..7a4fa185b93e 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -35,12 +35,27 @@ enum {
/* SCSMR (Serial Mode Register) */
+#define SCSMR_C_A BIT(7) /* Communication Mode */
+#define SCSMR_CSYNC BIT(7) /* - Clocked synchronous mode */
+#define SCSMR_ASYNC 0 /* - Asynchronous mode */
#define SCSMR_CHR BIT(6) /* 7-bit Character Length */
#define SCSMR_PE BIT(5) /* Parity Enable */
#define SCSMR_ODD BIT(4) /* Odd Parity */
#define SCSMR_STOP BIT(3) /* Stop Bit Length */
#define SCSMR_CKS 0x0003 /* Clock Select */
+/* Serial Mode Register, SCIFA/SCIFB only bits */
+#define SCSMR_CKEDG BIT(12) /* Transmit/Receive Clock Edge Select */
+#define SCSMR_SRC_MASK 0x0700 /* Sampling Control */
+#define SCSMR_SRC_16 0x0000 /* Sampling rate 1/16 */
+#define SCSMR_SRC_5 0x0100 /* Sampling rate 1/5 */
+#define SCSMR_SRC_7 0x0200 /* Sampling rate 1/7 */
+#define SCSMR_SRC_11 0x0300 /* Sampling rate 1/11 */
+#define SCSMR_SRC_13 0x0400 /* Sampling rate 1/13 */
+#define SCSMR_SRC_17 0x0500 /* Sampling rate 1/17 */
+#define SCSMR_SRC_19 0x0600 /* Sampling rate 1/19 */
+#define SCSMR_SRC_27 0x0700 /* Sampling rate 1/27 */
+
/* Serial Control Register, SCIFA/SCIFB only bits */
#define SCSCR_TDRQE BIT(15) /* Tx Data Transfer Request Enable */
#define SCSCR_RDRQE BIT(14) /* Rx Data Transfer Request Enable */
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index ef26c4a60be4..18971063f95f 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -624,8 +624,6 @@ static int __init sprd_early_console_setup(
device->con->write = sprd_early_write;
return 0;
}
-
-EARLYCON_DECLARE(sprd_serial, sprd_early_console_setup);
OF_EARLYCON_DECLARE(sprd_serial, "sprd,sc9836-uart",
sprd_early_console_setup);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index b1c6bd3d483f..c9fdfc8bf47f 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -28,7 +28,7 @@
#define ULITE_NAME "ttyUL"
#define ULITE_MAJOR 204
#define ULITE_MINOR 187
-#define ULITE_NR_UARTS 4
+#define ULITE_NR_UARTS 16
/* ---------------------------------------------------------------------
* Register definitions
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
iowrite32be(val, addr);
}
-static struct uartlite_reg_ops uartlite_be = {
+static const struct uartlite_reg_ops uartlite_be = {
.in = uartlite_inbe32,
.out = uartlite_outbe32,
};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
iowrite32(val, addr);
}
-static struct uartlite_reg_ops uartlite_le = {
+static const struct uartlite_reg_ops uartlite_le = {
.in = uartlite_inle32,
.out = uartlite_outle32,
};
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
- struct uartlite_reg_ops *reg_ops = port->private_data;
+ const struct uartlite_reg_ops *reg_ops = port->private_data;
return reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
- struct uartlite_reg_ops *reg_ops = port->private_data;
+ const struct uartlite_reg_ops *reg_ops = port->private_data;
reg_ops->out(val, port->membase + offset);
}
@@ -193,12 +193,15 @@ static int ulite_transmit(struct uart_port *port, int stat)
static irqreturn_t ulite_isr(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- int busy, n = 0;
+ int stat, busy, n = 0;
+ unsigned long flags;
do {
- int stat = uart_in32(ULITE_STATUS, port);
+ spin_lock_irqsave(&port->lock, flags);
+ stat = uart_in32(ULITE_STATUS, port);
busy = ulite_receive(port, stat);
busy |= ulite_transmit(port, stat);
+ spin_unlock_irqrestore(&port->lock, flags);
n++;
} while (busy);
@@ -259,7 +262,8 @@ static int ulite_startup(struct uart_port *port)
{
int ret;
- ret = request_irq(port->irq, ulite_isr, IRQF_SHARED, "uartlite", port);
+ ret = request_irq(port->irq, ulite_isr, IRQF_SHARED | IRQF_TRIGGER_RISING,
+ "uartlite", port);
if (ret)
return ret;
@@ -519,6 +523,47 @@ static int __init ulite_console_init(void)
console_initcall(ulite_console_init);
+static void early_uartlite_putc(struct uart_port *port, int c)
+{
+ /*
+ * Limit how many times we'll spin waiting for TX FIFO status.
+ * This will prevent lockups if the base address is incorrectly
+ * set, or any other issue on the UARTLITE.
+ * This limit is pretty arbitrary, unless we are at about 10 baud
+ * we'll never timeout on a working UART.
+ */
+
+ unsigned retries = 1000000;
+ /* read status bit - 0x8 offset */
+ while (--retries && (readl(port->membase + 8) & (1 << 3)))
+ ;
+
+ /* Only attempt the iowrite if we didn't timeout */
+ /* write to TX_FIFO - 0x4 offset */
+ if (retries)
+ writel(c & 0xff, port->membase + 4);
+}
+
+static void early_uartlite_write(struct console *console,
+ const char *s, unsigned n)
+{
+ struct earlycon_device *device = console->data;
+ uart_console_write(&device->port, s, n, early_uartlite_putc);
+}
+
+static int __init early_uartlite_setup(struct earlycon_device *device,
+ const char *options)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = early_uartlite_write;
+ return 0;
+}
+EARLYCON_DECLARE(uartlite, early_uartlite_setup);
+OF_EARLYCON_DECLARE(uartlite_b, "xlnx,opb-uartlite-1.00.b", early_uartlite_setup);
+OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup);
+
#endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
static struct uart_driver ulite_uart_driver = {
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 009e0dbc12d2..cd46e64c4255 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -50,24 +50,24 @@ module_param(rx_timeout, uint, S_IRUGO);
MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
/* Register offsets for the UART. */
-#define CDNS_UART_CR_OFFSET 0x00 /* Control Register */
-#define CDNS_UART_MR_OFFSET 0x04 /* Mode Register */
-#define CDNS_UART_IER_OFFSET 0x08 /* Interrupt Enable */
-#define CDNS_UART_IDR_OFFSET 0x0C /* Interrupt Disable */
-#define CDNS_UART_IMR_OFFSET 0x10 /* Interrupt Mask */
-#define CDNS_UART_ISR_OFFSET 0x14 /* Interrupt Status */
-#define CDNS_UART_BAUDGEN_OFFSET 0x18 /* Baud Rate Generator */
-#define CDNS_UART_RXTOUT_OFFSET 0x1C /* RX Timeout */
-#define CDNS_UART_RXWM_OFFSET 0x20 /* RX FIFO Trigger Level */
-#define CDNS_UART_MODEMCR_OFFSET 0x24 /* Modem Control */
-#define CDNS_UART_MODEMSR_OFFSET 0x28 /* Modem Status */
-#define CDNS_UART_SR_OFFSET 0x2C /* Channel Status */
-#define CDNS_UART_FIFO_OFFSET 0x30 /* FIFO */
-#define CDNS_UART_BAUDDIV_OFFSET 0x34 /* Baud Rate Divider */
-#define CDNS_UART_FLOWDEL_OFFSET 0x38 /* Flow Delay */
-#define CDNS_UART_IRRX_PWIDTH_OFFSET 0x3C /* IR Min Received Pulse Width */
-#define CDNS_UART_IRTX_PWIDTH_OFFSET 0x40 /* IR Transmitted pulse Width */
-#define CDNS_UART_TXWM_OFFSET 0x44 /* TX FIFO Trigger Level */
+#define CDNS_UART_CR 0x00 /* Control Register */
+#define CDNS_UART_MR 0x04 /* Mode Register */
+#define CDNS_UART_IER 0x08 /* Interrupt Enable */
+#define CDNS_UART_IDR 0x0C /* Interrupt Disable */
+#define CDNS_UART_IMR 0x10 /* Interrupt Mask */
+#define CDNS_UART_ISR 0x14 /* Interrupt Status */
+#define CDNS_UART_BAUDGEN 0x18 /* Baud Rate Generator */
+#define CDNS_UART_RXTOUT 0x1C /* RX Timeout */
+#define CDNS_UART_RXWM 0x20 /* RX FIFO Trigger Level */
+#define CDNS_UART_MODEMCR 0x24 /* Modem Control */
+#define CDNS_UART_MODEMSR 0x28 /* Modem Status */
+#define CDNS_UART_SR 0x2C /* Channel Status */
+#define CDNS_UART_FIFO 0x30 /* FIFO */
+#define CDNS_UART_BAUDDIV 0x34 /* Baud Rate Divider */
+#define CDNS_UART_FLOWDEL 0x38 /* Flow Delay */
+#define CDNS_UART_IRRX_PWIDTH 0x3C /* IR Min Received Pulse Width */
+#define CDNS_UART_IRTX_PWIDTH 0x40 /* IR Transmitted pulse Width */
+#define CDNS_UART_TXWM 0x44 /* TX FIFO Trigger Level */
/* Control Register Bit Definitions */
#define CDNS_UART_CR_STOPBRK 0x00000100 /* Stop TX break */
@@ -126,6 +126,10 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
+#define CDNS_UART_RX_IRQS (CDNS_UART_IXR_PARITY | CDNS_UART_IXR_FRAMING | \
+ CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_RXTRIG | \
+ CDNS_UART_IXR_TOUT)
+
/* Goes in read_status_mask for break detection as the HW doesn't do it*/
#define CDNS_UART_IXR_BRK 0x80000000
@@ -172,43 +176,22 @@ struct cdns_uart {
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
clk_rate_change_nb);
-/**
- * cdns_uart_isr - Interrupt handler
- * @irq: Irq number
- * @dev_id: Id of the port
- *
- * Return: IRQHANDLED
- */
-static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+static void cdns_uart_handle_rx(struct uart_port *port, unsigned int isrstatus)
{
- struct uart_port *port = (struct uart_port *)dev_id;
- unsigned long flags;
- unsigned int isrstatus, numbytes;
- unsigned int data;
- char status = TTY_NORMAL;
-
- spin_lock_irqsave(&port->lock, flags);
-
- /* Read the interrupt status register to determine which
- * interrupt(s) is/are active.
- */
- isrstatus = readl(port->membase + CDNS_UART_ISR_OFFSET);
-
/*
* There is no hardware break detection, so we interpret framing
* error with all-zeros data as a break sequence. Most of the time,
* there's another non-zero byte at the end of the sequence.
*/
if (isrstatus & CDNS_UART_IXR_FRAMING) {
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY)) {
- if (!readl(port->membase + CDNS_UART_FIFO_OFFSET)) {
+ if (!readl(port->membase + CDNS_UART_FIFO)) {
port->read_status_mask |= CDNS_UART_IXR_BRK;
isrstatus &= ~CDNS_UART_IXR_FRAMING;
}
}
- writel(CDNS_UART_IXR_FRAMING,
- port->membase + CDNS_UART_ISR_OFFSET);
+ writel(CDNS_UART_IXR_FRAMING, port->membase + CDNS_UART_ISR);
}
/* drop byte with parity error if IGNPAR specified */
@@ -218,94 +201,106 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
isrstatus &= port->read_status_mask;
isrstatus &= ~port->ignore_status_mask;
- if ((isrstatus & CDNS_UART_IXR_TOUT) ||
- (isrstatus & CDNS_UART_IXR_RXTRIG)) {
- /* Receive Timeout Interrupt */
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_RXEMPTY)) {
- data = readl(port->membase + CDNS_UART_FIFO_OFFSET);
-
- /* Non-NULL byte after BREAK is garbage (99%) */
- if (data && (port->read_status_mask &
- CDNS_UART_IXR_BRK)) {
- port->read_status_mask &= ~CDNS_UART_IXR_BRK;
- port->icount.brk++;
- if (uart_handle_break(port))
- continue;
- }
+ if (!(isrstatus & (CDNS_UART_IXR_TOUT | CDNS_UART_IXR_RXTRIG)))
+ return;
-#ifdef SUPPORT_SYSRQ
- /*
- * uart_handle_sysrq_char() doesn't work if
- * spinlocked, for some reason
- */
- if (port->sysrq) {
- spin_unlock(&port->lock);
- if (uart_handle_sysrq_char(port,
- (unsigned char)data)) {
- spin_lock(&port->lock);
- continue;
- }
- spin_lock(&port->lock);
- }
-#endif
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)) {
+ u32 data;
+ char status = TTY_NORMAL;
- port->icount.rx++;
+ data = readl(port->membase + CDNS_UART_FIFO);
- if (isrstatus & CDNS_UART_IXR_PARITY) {
- port->icount.parity++;
- status = TTY_PARITY;
- } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
- port->icount.frame++;
- status = TTY_FRAME;
- } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
- port->icount.overrun++;
- }
+ /* Non-NULL byte after BREAK is garbage (99%) */
+ if (data && (port->read_status_mask & CDNS_UART_IXR_BRK)) {
+ port->read_status_mask &= ~CDNS_UART_IXR_BRK;
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (uart_handle_sysrq_char(port, data))
+ continue;
+
+ port->icount.rx++;
- uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
- data, status);
+ if (isrstatus & CDNS_UART_IXR_PARITY) {
+ port->icount.parity++;
+ status = TTY_PARITY;
+ } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
+ port->icount.frame++;
+ status = TTY_FRAME;
+ } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
+ port->icount.overrun++;
}
- spin_unlock(&port->lock);
- tty_flip_buffer_push(&port->state->port);
- spin_lock(&port->lock);
+
+ uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
+ data, status);
}
+ tty_flip_buffer_push(&port->state->port);
+}
- /* Dispatch an appropriate handler */
- if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY) {
- if (uart_circ_empty(&port->state->xmit)) {
- writel(CDNS_UART_IXR_TXEMPTY,
- port->membase + CDNS_UART_IDR_OFFSET);
- } else {
- numbytes = port->fifosize;
- /* Break if no more data available in the UART buffer */
- while (numbytes--) {
- if (uart_circ_empty(&port->state->xmit))
- break;
- /* Get the data from the UART circular buffer
- * and write it to the cdns_uart's TX_FIFO
- * register.
- */
- writel(port->state->xmit.buf[
- port->state->xmit.tail],
- port->membase + CDNS_UART_FIFO_OFFSET);
-
- port->icount.tx++;
-
- /* Adjust the tail of the UART buffer and wrap
- * the buffer if it reaches limit.
- */
- port->state->xmit.tail =
- (port->state->xmit.tail + 1) &
- (UART_XMIT_SIZE - 1);
- }
+static void cdns_uart_handle_tx(struct uart_port *port)
+{
+ unsigned int numbytes;
- if (uart_circ_chars_pending(
- &port->state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
- }
+ if (uart_circ_empty(&port->state->xmit)) {
+ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
+ return;
+ }
+
+ numbytes = port->fifosize;
+ while (numbytes && !uart_circ_empty(&port->state->xmit) &&
+ !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+ /*
+ * Get the data from the UART circular buffer
+ * and write it to the cdns_uart's TX_FIFO
+ * register.
+ */
+ writel(port->state->xmit.buf[port->state->xmit.tail],
+ port->membase + CDNS_UART_FIFO);
+ port->icount.tx++;
+
+ /*
+ * Adjust the tail of the UART buffer and wrap
+ * the buffer if it reaches limit.
+ */
+ port->state->xmit.tail =
+ (port->state->xmit.tail + 1) & (UART_XMIT_SIZE - 1);
+
+ numbytes--;
}
- writel(isrstatus, port->membase + CDNS_UART_ISR_OFFSET);
+ if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+/**
+ * cdns_uart_isr - Interrupt handler
+ * @irq: Irq number
+ * @dev_id: Id of the port
+ *
+ * Return: IRQHANDLED
+ */
+static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+{
+ struct uart_port *port = (struct uart_port *)dev_id;
+ unsigned long flags;
+ unsigned int isrstatus;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Read the interrupt status register to determine which
+ * interrupt(s) is/are active.
+ */
+ isrstatus = readl(port->membase + CDNS_UART_ISR);
+
+ if (isrstatus & CDNS_UART_RX_IRQS)
+ cdns_uart_handle_rx(port, isrstatus);
+
+ if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY)
+ cdns_uart_handle_tx(port);
+
+ writel(isrstatus, port->membase + CDNS_UART_ISR);
/* be sure to release the lock and tty before leaving */
spin_unlock_irqrestore(&port->lock, flags);
@@ -395,14 +390,14 @@ static unsigned int cdns_uart_set_baud_rate(struct uart_port *port,
&div8);
/* Write new divisors to hardware */
- mreg = readl(port->membase + CDNS_UART_MR_OFFSET);
+ mreg = readl(port->membase + CDNS_UART_MR);
if (div8)
mreg |= CDNS_UART_MR_CLKSEL;
else
mreg &= ~CDNS_UART_MR_CLKSEL;
- writel(mreg, port->membase + CDNS_UART_MR_OFFSET);
- writel(cd, port->membase + CDNS_UART_BAUDGEN_OFFSET);
- writel(bdiv, port->membase + CDNS_UART_BAUDDIV_OFFSET);
+ writel(mreg, port->membase + CDNS_UART_MR);
+ writel(cd, port->membase + CDNS_UART_BAUDGEN);
+ writel(bdiv, port->membase + CDNS_UART_BAUDDIV);
cdns_uart->baud = baud;
return calc_baud;
@@ -449,9 +444,9 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Disable the TX and RX to set baud rate */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
@@ -476,11 +471,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
spin_lock_irqsave(&cdns_uart->port->lock, flags);
/* Set TX/RX Reset */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
- while (readl(port->membase + CDNS_UART_CR_OFFSET) &
+ while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
@@ -489,11 +484,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
* enable bit and RX enable bit to enable the transmitter and
* receiver.
*/
- writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
@@ -510,43 +505,28 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
*/
static void cdns_uart_start_tx(struct uart_port *port)
{
- unsigned int status, numbytes = port->fifosize;
+ unsigned int status;
- if (uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port))
+ if (uart_tx_stopped(port))
return;
- status = readl(port->membase + CDNS_UART_CR_OFFSET);
- /* Set the TX enable bit and clear the TX disable bit to enable the
+ /*
+ * Set the TX enable bit and clear the TX disable bit to enable the
* transmitter.
*/
- writel((status & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
- port->membase + CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR);
+ status &= ~CDNS_UART_CR_TX_DIS;
+ status |= CDNS_UART_CR_TX_EN;
+ writel(status, port->membase + CDNS_UART_CR);
- while (numbytes-- && ((readl(port->membase + CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_TXFULL)) != CDNS_UART_SR_TXFULL) {
- /* Break if no more data available in the UART buffer */
- if (uart_circ_empty(&port->state->xmit))
- break;
+ if (uart_circ_empty(&port->state->xmit))
+ return;
- /* Get the data from the UART circular buffer and
- * write it to the cdns_uart's TX_FIFO register.
- */
- writel(port->state->xmit.buf[port->state->xmit.tail],
- port->membase + CDNS_UART_FIFO_OFFSET);
- port->icount.tx++;
+ cdns_uart_handle_tx(port);
- /* Adjust the tail of the UART buffer and wrap
- * the buffer if it reaches limit.
- */
- port->state->xmit.tail = (port->state->xmit.tail + 1) &
- (UART_XMIT_SIZE - 1);
- }
- writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR_OFFSET);
+ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR);
/* Enable the TX Empty interrupt */
- writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER_OFFSET);
-
- if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
+ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER);
}
/**
@@ -557,10 +537,10 @@ static void cdns_uart_stop_tx(struct uart_port *port)
{
unsigned int regval;
- regval = readl(port->membase + CDNS_UART_CR_OFFSET);
+ regval = readl(port->membase + CDNS_UART_CR);
regval |= CDNS_UART_CR_TX_DIS;
/* Disable the transmitter */
- writel(regval, port->membase + CDNS_UART_CR_OFFSET);
+ writel(regval, port->membase + CDNS_UART_CR);
}
/**
@@ -571,10 +551,13 @@ static void cdns_uart_stop_rx(struct uart_port *port)
{
unsigned int regval;
- regval = readl(port->membase + CDNS_UART_CR_OFFSET);
- regval |= CDNS_UART_CR_RX_DIS;
+ /* Disable RX IRQs */
+ writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IDR);
+
/* Disable the receiver */
- writel(regval, port->membase + CDNS_UART_CR_OFFSET);
+ regval = readl(port->membase + CDNS_UART_CR);
+ regval |= CDNS_UART_CR_RX_DIS;
+ writel(regval, port->membase + CDNS_UART_CR);
}
/**
@@ -587,7 +570,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
{
unsigned int status;
- status = readl(port->membase + CDNS_UART_SR_OFFSET) &
+ status = readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_TXEMPTY;
return status ? TIOCSER_TEMT : 0;
}
@@ -605,15 +588,15 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
spin_lock_irqsave(&port->lock, flags);
- status = readl(port->membase + CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR);
if (ctl == -1)
writel(CDNS_UART_CR_STARTBRK | status,
- port->membase + CDNS_UART_CR_OFFSET);
+ port->membase + CDNS_UART_CR);
else {
if ((status & CDNS_UART_CR_STOPBRK) == 0)
writel(CDNS_UART_CR_STOPBRK | status,
- port->membase + CDNS_UART_CR_OFFSET);
+ port->membase + CDNS_UART_CR);
}
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -636,18 +619,18 @@ static void cdns_uart_set_termios(struct uart_port *port,
spin_lock_irqsave(&port->lock, flags);
/* Wait for the transmit FIFO to empty before making changes */
- if (!(readl(port->membase + CDNS_UART_CR_OFFSET) &
+ if (!(readl(port->membase + CDNS_UART_CR) &
CDNS_UART_CR_TX_DIS)) {
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_TXEMPTY)) {
cpu_relax();
}
}
/* Disable the TX and RX to set baud rate */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
/*
* Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
@@ -666,20 +649,20 @@ static void cdns_uart_set_termios(struct uart_port *port,
uart_update_timeout(port, termios->c_cflag, baud);
/* Set TX/RX Reset */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
/*
* Clear the RX disable and TX disable bits and then set the TX enable
* bit and RX enable bit to enable the transmitter and receiver.
*/
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
- writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
@@ -699,7 +682,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
CDNS_UART_IXR_TOUT | CDNS_UART_IXR_PARITY |
CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN;
- mode_reg = readl(port->membase + CDNS_UART_MR_OFFSET);
+ mode_reg = readl(port->membase + CDNS_UART_MR);
/* Handling Data Size */
switch (termios->c_cflag & CSIZE) {
@@ -740,7 +723,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
cval |= CDNS_UART_MR_PARITY_NONE;
}
cval |= mode_reg & 1;
- writel(cval, port->membase + CDNS_UART_MR_OFFSET);
+ writel(cval, port->membase + CDNS_UART_MR);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -753,63 +736,67 @@ static void cdns_uart_set_termios(struct uart_port *port,
*/
static int cdns_uart_startup(struct uart_port *port)
{
- unsigned int retval = 0, status = 0;
+ int ret;
+ unsigned long flags;
+ unsigned int status = 0;
- retval = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME,
- (void *)port);
- if (retval)
- return retval;
+ spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
- port->membase + CDNS_UART_CR_OFFSET);
+ port->membase + CDNS_UART_CR);
/* Set the Control Register with TX/RX Enable, TX/RX Reset,
* no break chars.
*/
writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
- port->membase + CDNS_UART_CR_OFFSET);
-
- status = readl(port->membase + CDNS_UART_CR_OFFSET);
+ port->membase + CDNS_UART_CR);
- /* Clear the RX disable and TX disable bits and then set the TX enable
- * bit and RX enable bit to enable the transmitter and receiver.
+ /*
+ * Clear the RX disable bit and then set the RX enable bit to enable
+ * the receiver.
*/
- writel((status & ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS))
- | (CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN |
- CDNS_UART_CR_STOPBRK),
- port->membase + CDNS_UART_CR_OFFSET);
+ status = readl(port->membase + CDNS_UART_CR);
+ status &= CDNS_UART_CR_RX_DIS;
+ status |= CDNS_UART_CR_RX_EN;
+ writel(status, port->membase + CDNS_UART_CR);
/* Set the Mode Register with normal mode,8 data bits,1 stop bit,
* no parity.
*/
writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT
| CDNS_UART_MR_PARITY_NONE | CDNS_UART_MR_CHARLEN_8_BIT,
- port->membase + CDNS_UART_MR_OFFSET);
+ port->membase + CDNS_UART_MR);
/*
* Set the RX FIFO Trigger level to use most of the FIFO, but it
* can be tuned with a module parameter
*/
- writel(rx_trigger_level, port->membase + CDNS_UART_RXWM_OFFSET);
+ writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/*
* Receive Timeout register is enabled but it
* can be tuned with a module parameter
*/
- writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
/* Clear out any pending interrupts before enabling them */
- writel(readl(port->membase + CDNS_UART_ISR_OFFSET),
- port->membase + CDNS_UART_ISR_OFFSET);
+ writel(readl(port->membase + CDNS_UART_ISR),
+ port->membase + CDNS_UART_ISR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
+ if (ret) {
+ dev_err(port->dev, "request_irq '%d' failed with %d\n",
+ port->irq, ret);
+ return ret;
+ }
/* Set the Interrupt Registers with desired interrupts */
- writel(CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_PARITY |
- CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN |
- CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT,
- port->membase + CDNS_UART_IER_OFFSET);
+ writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER);
- return retval;
+ return 0;
}
/**
@@ -819,14 +806,21 @@ static int cdns_uart_startup(struct uart_port *port)
static void cdns_uart_shutdown(struct uart_port *port)
{
int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
- status = readl(port->membase + CDNS_UART_IMR_OFFSET);
- writel(status, port->membase + CDNS_UART_IDR_OFFSET);
+ status = readl(port->membase + CDNS_UART_IMR);
+ writel(status, port->membase + CDNS_UART_IDR);
+ writel(0xffffffff, port->membase + CDNS_UART_ISR);
/* Disable the TX and RX */
writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
- port->membase + CDNS_UART_CR_OFFSET);
+ port->membase + CDNS_UART_CR);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
free_irq(port->irq, port);
}
@@ -928,7 +922,7 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
u32 val;
- val = readl(port->membase + CDNS_UART_MODEMCR_OFFSET);
+ val = readl(port->membase + CDNS_UART_MODEMCR);
val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
@@ -937,55 +931,46 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_DTR)
val |= CDNS_UART_MODEMCR_DTR;
- writel(val, port->membase + CDNS_UART_MODEMCR_OFFSET);
+ writel(val, port->membase + CDNS_UART_MODEMCR);
}
#ifdef CONFIG_CONSOLE_POLL
static int cdns_uart_poll_get_char(struct uart_port *port)
{
- u32 imr;
int c;
+ unsigned long flags;
- /* Disable all interrupts */
- imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
- writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+ spin_lock_irqsave(&port->lock, flags);
/* Check if FIFO is empty */
- if (readl(port->membase + CDNS_UART_SR_OFFSET) & CDNS_UART_SR_RXEMPTY)
+ if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
c = NO_POLL_CHAR;
else /* Read a character */
- c = (unsigned char) readl(
- port->membase + CDNS_UART_FIFO_OFFSET);
+ c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
- /* Enable interrupts */
- writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
return c;
}
static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
{
- u32 imr;
+ unsigned long flags;
- /* Disable all interrupts */
- imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
- writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+ spin_lock_irqsave(&port->lock, flags);
/* Wait until FIFO is empty */
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_TXEMPTY))
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
/* Write a character */
- writel(c, port->membase + CDNS_UART_FIFO_OFFSET);
+ writel(c, port->membase + CDNS_UART_FIFO);
/* Wait until FIFO is empty */
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_TXEMPTY))
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
cpu_relax();
- /* Enable interrupts */
- writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+ spin_unlock_irqrestore(&port->lock, flags);
return;
}
@@ -1059,8 +1044,7 @@ static struct uart_port *cdns_uart_get_port(int id)
*/
static void cdns_uart_console_wait_tx(struct uart_port *port)
{
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
- CDNS_UART_SR_TXEMPTY))
+ while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
barrier();
}
@@ -1072,7 +1056,7 @@ static void cdns_uart_console_wait_tx(struct uart_port *port)
static void cdns_uart_console_putchar(struct uart_port *port, int ch)
{
cdns_uart_console_wait_tx(port);
- writel(ch, port->membase + CDNS_UART_FIFO_OFFSET);
+ writel(ch, port->membase + CDNS_UART_FIFO);
}
static void __init cdns_early_write(struct console *con, const char *s,
@@ -1093,7 +1077,9 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
return 0;
}
-EARLYCON_DECLARE(cdns, cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
/**
* cdns_uart_console_write - perform write operation
@@ -1109,30 +1095,33 @@ static void cdns_uart_console_write(struct console *co, const char *s,
unsigned int imr, ctrl;
int locked = 1;
- if (oops_in_progress)
+ if (port->sysrq)
+ locked = 0;
+ else if (oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
/* save and disable interrupt */
- imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
- writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+ imr = readl(port->membase + CDNS_UART_IMR);
+ writel(imr, port->membase + CDNS_UART_IDR);
/*
* Make sure that the tx part is enabled. Set the TX enable bit and
* clear the TX disable bit to enable the transmitter.
*/
- ctrl = readl(port->membase + CDNS_UART_CR_OFFSET);
- writel((ctrl & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
- port->membase + CDNS_UART_CR_OFFSET);
+ ctrl = readl(port->membase + CDNS_UART_CR);
+ ctrl &= ~CDNS_UART_CR_TX_DIS;
+ ctrl |= CDNS_UART_CR_TX_EN;
+ writel(ctrl, port->membase + CDNS_UART_CR);
uart_console_write(port, s, count, cdns_uart_console_putchar);
cdns_uart_console_wait_tx(port);
- writel(ctrl, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl, port->membase + CDNS_UART_CR);
/* restore interrupt state */
- writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+ writel(imr, port->membase + CDNS_UART_IER);
if (locked)
spin_unlock_irqrestore(&port->lock, flags);
@@ -1244,14 +1233,13 @@ static int cdns_uart_suspend(struct device *device)
spin_lock_irqsave(&port->lock, flags);
/* Empty the receive FIFO 1st before making changes */
- while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+ while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_RXEMPTY))
- readl(port->membase + CDNS_UART_FIFO_OFFSET);
+ readl(port->membase + CDNS_UART_FIFO);
/* set RX trigger level to 1 */
- writel(1, port->membase + CDNS_UART_RXWM_OFFSET);
+ writel(1, port->membase + CDNS_UART_RXWM);
/* disable RX timeout interrups */
- writel(CDNS_UART_IXR_TOUT,
- port->membase + CDNS_UART_IDR_OFFSET);
+ writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1290,30 +1278,28 @@ static int cdns_uart_resume(struct device *device)
spin_lock_irqsave(&port->lock, flags);
/* Set TX/RX Reset */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
- while (readl(port->membase + CDNS_UART_CR_OFFSET) &
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
+ while (readl(port->membase + CDNS_UART_CR) &
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
/* restore rx timeout value */
- writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+ writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
/* Enable Tx/Rx */
- ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+ ctrl_reg = readl(port->membase + CDNS_UART_CR);
ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
- writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+ writel(ctrl_reg, port->membase + CDNS_UART_CR);
spin_unlock_irqrestore(&port->lock, flags);
} else {
spin_lock_irqsave(&port->lock, flags);
/* restore original rx trigger level */
- writel(rx_trigger_level,
- port->membase + CDNS_UART_RXWM_OFFSET);
+ writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
/* enable RX timeout interrupt */
- writel(CDNS_UART_IXR_TOUT,
- port->membase + CDNS_UART_IER_OFFSET);
+ writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1406,27 +1392,30 @@ static int cdns_uart_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Cannot get uart_port structure\n");
rc = -ENODEV;
goto err_out_notif_unreg;
- } else {
- /* Register the port.
- * This function also registers this device with the tty layer
- * and triggers invocation of the config_port() entry point.
- */
- port->mapbase = res->start;
- port->irq = irq;
- port->dev = &pdev->dev;
- port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
- port->private_data = cdns_uart_data;
- cdns_uart_data->port = port;
- platform_set_drvdata(pdev, port);
- rc = uart_add_one_port(&cdns_uart_uart_driver, port);
- if (rc) {
- dev_err(&pdev->dev,
- "uart_add_one_port() failed; err=%i\n", rc);
- goto err_out_notif_unreg;
- }
- return 0;
}
+ /*
+ * Register the port.
+ * This function also registers this device with the tty layer
+ * and triggers invocation of the config_port() entry point.
+ */
+ port->mapbase = res->start;
+ port->irq = irq;
+ port->dev = &pdev->dev;
+ port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
+ port->private_data = cdns_uart_data;
+ cdns_uart_data->port = port;
+ platform_set_drvdata(pdev, port);
+
+ rc = uart_add_one_port(&cdns_uart_uart_driver, port);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "uart_add_one_port() failed; err=%i\n", rc);
+ goto err_out_notif_unreg;
+ }
+
+ return 0;
+
err_out_notif_unreg:
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 2b65bb7ffb8a..eeefd76a30da 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -1181,6 +1181,10 @@ static void zs_console_write(struct console *co, const char *s,
if (txint & TxINT_ENAB) {
zport->regs[1] |= TxINT_ENAB;
write_zsreg(zport, R1, zport->regs[1]);
+
+ /* Resume any transmission as the TxIP bit won't be set. */
+ if (!zport->tx_stopped)
+ zs_raw_transmit_chars(zport);
}
spin_unlock_irqrestore(&scc->zlock, flags);
}
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 6188059fd523..f5476e270734 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -2363,7 +2363,7 @@ static void mgsl_throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
mgsl_send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->irq_spinlock,flags);
info->serial_signals &= ~SerialSignal_RTS;
usc_set_serial_signals(info);
@@ -2397,7 +2397,7 @@ static void mgsl_unthrottle(struct tty_struct * tty)
mgsl_send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->irq_spinlock,flags);
info->serial_signals |= SerialSignal_RTS;
usc_set_serial_signals(info);
@@ -3039,30 +3039,25 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
mgsl_change_params(info);
/* Handle transition to B0 status */
- if (old_termios->c_cflag & CBAUD &&
- !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
-
+
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- tty->termios.c_cflag & CBAUD) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->serial_signals |= SerialSignal_RTS;
- }
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
-
+
/* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
mgsl_start(tty);
}
@@ -3281,7 +3276,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
return 0;
}
- if (tty->termios.c_cflag & CLOCAL)
+ if (C_CLOCAL(tty))
do_clocal = true;
/* Wait for carrier detect and the line to become
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 5505ea842179..c0a2f5a1b1c2 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -774,8 +774,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
change_params(info);
/* Handle transition to B0 status */
- if (old_termios->c_cflag & CBAUD &&
- !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
@@ -783,21 +782,17 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- tty->termios.c_cflag & CBAUD) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->signals |= SerialSignal_RTS;
- }
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
tx_release(tty);
}
@@ -1362,7 +1357,7 @@ static void throttle(struct tty_struct * tty)
DBGINFO(("%s throttle\n", info->device_name));
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
set_signals(info);
@@ -1387,7 +1382,7 @@ static void unthrottle(struct tty_struct * tty)
else
send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
set_signals(info);
@@ -3280,7 +3275,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return 0;
}
- if (tty->termios.c_cflag & CLOCAL)
+ if (C_CLOCAL(tty))
do_clocal = true;
/* Wait for carrier detect and the line to become
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index fb00a06dfa4b..90da0c712262 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -549,8 +549,8 @@ static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int set_break(struct tty_struct *tty, int break_state);
-static void add_device(SLMP_INFO *info);
-static void device_init(int adapter_num, struct pci_dev *pdev);
+static int add_device(SLMP_INFO *info);
+static int device_init(int adapter_num, struct pci_dev *pdev);
static int claim_resources(SLMP_INFO *info);
static void release_resources(SLMP_INFO *info);
@@ -871,8 +871,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
change_params(info);
/* Handle transition to B0 status */
- if (old_termios->c_cflag & CBAUD &&
- !(tty->termios.c_cflag & CBAUD)) {
+ if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
@@ -880,21 +879,17 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
/* Handle transition away from B0 status */
- if (!(old_termios->c_cflag & CBAUD) &&
- tty->termios.c_cflag & CBAUD) {
+ if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
- !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
info->serial_signals |= SerialSignal_RTS;
- }
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
/* Handle turning off CRTSCTS */
- if (old_termios->c_cflag & CRTSCTS &&
- !(tty->termios.c_cflag & CRTSCTS)) {
+ if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
tty->hw_stopped = 0;
tx_release(tty);
}
@@ -1472,7 +1467,7 @@ static void throttle(struct tty_struct * tty)
if (I_IXOFF(tty))
send_xchar(tty, STOP_CHAR(tty));
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals &= ~SerialSignal_RTS;
set_signals(info);
@@ -1501,7 +1496,7 @@ static void unthrottle(struct tty_struct * tty)
send_xchar(tty, START_CHAR(tty));
}
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->serial_signals |= SerialSignal_RTS;
set_signals(info);
@@ -3297,7 +3292,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
return 0;
}
- if (tty->termios.c_cflag & CLOCAL)
+ if (C_CLOCAL(tty))
do_clocal = true;
/* Wait for carrier detect and the line to become
@@ -3693,7 +3688,7 @@ static void release_resources(SLMP_INFO *info)
/* Add the specified device instance data structure to the
* global linked list of devices and increment the device count.
*/
-static void add_device(SLMP_INFO *info)
+static int add_device(SLMP_INFO *info)
{
info->next_device = NULL;
info->line = synclinkmp_device_count;
@@ -3731,7 +3726,9 @@ static void add_device(SLMP_INFO *info)
info->max_frame_size );
#if SYNCLINK_GENERIC_HDLC
- hdlcdev_init(info);
+ return hdlcdev_init(info);
+#else
+ return 0;
#endif
}
@@ -3820,10 +3817,10 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
return info;
}
-static void device_init(int adapter_num, struct pci_dev *pdev)
+static int device_init(int adapter_num, struct pci_dev *pdev)
{
SLMP_INFO *port_array[SCA_MAX_PORTS];
- int port;
+ int port, rc;
/* allocate device instances for up to SCA_MAX_PORTS devices */
for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
@@ -3833,14 +3830,16 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
tty_port_destroy(&port_array[port]->port);
kfree(port_array[port]);
}
- return;
+ return -ENOMEM;
}
}
/* give copy of port_array to all ports and add to device list */
for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
memcpy(port_array[port]->port_array,port_array,sizeof(port_array));
- add_device( port_array[port] );
+ rc = add_device( port_array[port] );
+ if (rc)
+ goto err_add;
spin_lock_init(&port_array[port]->lock);
}
@@ -3860,21 +3859,30 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
alloc_dma_bufs(port_array[port]);
}
- if ( request_irq(port_array[0]->irq_level,
+ rc = request_irq(port_array[0]->irq_level,
synclinkmp_interrupt,
port_array[0]->irq_flags,
port_array[0]->device_name,
- port_array[0]) < 0 ) {
+ port_array[0]);
+ if ( rc ) {
printk( "%s(%d):%s Can't request interrupt, IRQ=%d\n",
__FILE__,__LINE__,
port_array[0]->device_name,
port_array[0]->irq_level );
+ goto err_irq;
}
- else {
- port_array[0]->irq_requested = true;
- adapter_test(port_array[0]);
- }
+ port_array[0]->irq_requested = true;
+ adapter_test(port_array[0]);
}
+ return 0;
+err_irq:
+ release_resources( port_array[0] );
+err_add:
+ for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
+ tty_port_destroy(&port_array[port]->port);
+ kfree(port_array[port]);
+ }
+ return rc;
}
static const struct tty_operations ops = {
@@ -5589,8 +5597,7 @@ static int synclinkmp_init_one (struct pci_dev *dev,
printk("error enabling pci device %p\n", dev);
return -EIO;
}
- device_init( ++synclinkmp_adapter_count, dev );
- return 0;
+ return device_init( ++synclinkmp_adapter_count, dev );
}
static void synclinkmp_remove_one (struct pci_dev *dev)
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 3d245cd3d8e6..df2d735338e2 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -14,16 +14,23 @@
#include <linux/tty.h>
struct tty_audit_buf {
- atomic_t count;
struct mutex mutex; /* Protects all data below */
- int major, minor; /* The TTY which the data is from */
+ dev_t dev; /* The TTY which the data is from */
unsigned icanon:1;
size_t valid;
unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */
};
-static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
- unsigned icanon)
+static struct tty_audit_buf *tty_audit_buf_ref(void)
+{
+ struct tty_audit_buf *buf;
+
+ buf = current->signal->tty_audit_buf;
+ WARN_ON(buf == ERR_PTR(-ESRCH));
+ return buf;
+}
+
+static struct tty_audit_buf *tty_audit_buf_alloc(void)
{
struct tty_audit_buf *buf;
@@ -33,11 +40,9 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
if (!buf->data)
goto err_buf;
- atomic_set(&buf->count, 1);
mutex_init(&buf->mutex);
- buf->major = major;
- buf->minor = minor;
- buf->icanon = icanon;
+ buf->dev = MKDEV(0, 0);
+ buf->icanon = 0;
buf->valid = 0;
return buf;
@@ -54,13 +59,7 @@ static void tty_audit_buf_free(struct tty_audit_buf *buf)
kfree(buf);
}
-static void tty_audit_buf_put(struct tty_audit_buf *buf)
-{
- if (atomic_dec_and_test(&buf->count))
- tty_audit_buf_free(buf);
-}
-
-static void tty_audit_log(const char *description, int major, int minor,
+static void tty_audit_log(const char *description, dev_t dev,
unsigned char *data, size_t size)
{
struct audit_buffer *ab;
@@ -76,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
" minor=%d comm=", description, pid, uid,
- loginuid, sessionid, major, minor);
+ loginuid, sessionid, MAJOR(dev), MINOR(dev));
get_task_comm(name, tsk);
audit_log_untrustedstring(ab, name);
audit_log_format(ab, " data=");
@@ -99,7 +98,7 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
buf->valid = 0;
return;
}
- tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
+ tty_audit_log("tty", buf->dev, buf->data, buf->valid);
buf->valid = 0;
}
@@ -108,21 +107,20 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
*
* Make sure all buffered data is written out and deallocate the buffer.
* Only needs to be called if current->signal->tty_audit_buf != %NULL.
+ *
+ * The process is single-threaded at this point; no other threads share
+ * current->signal.
*/
void tty_audit_exit(void)
{
struct tty_audit_buf *buf;
- buf = current->signal->tty_audit_buf;
- current->signal->tty_audit_buf = NULL;
+ buf = xchg(&current->signal->tty_audit_buf, ERR_PTR(-ESRCH));
if (!buf)
return;
- mutex_lock(&buf->mutex);
tty_audit_buf_push(buf);
- mutex_unlock(&buf->mutex);
-
- tty_audit_buf_put(buf);
+ tty_audit_buf_free(buf);
}
/**
@@ -133,7 +131,6 @@ void tty_audit_exit(void)
void tty_audit_fork(struct signal_struct *sig)
{
sig->audit_tty = current->signal->audit_tty;
- sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
}
/**
@@ -141,123 +138,62 @@ void tty_audit_fork(struct signal_struct *sig)
*/
void tty_audit_tiocsti(struct tty_struct *tty, char ch)
{
- struct tty_audit_buf *buf;
- int major, minor, should_audit;
- unsigned long flags;
+ dev_t dev;
- spin_lock_irqsave(&current->sighand->siglock, flags);
- should_audit = current->signal->audit_tty;
- buf = current->signal->tty_audit_buf;
- if (buf)
- atomic_inc(&buf->count);
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
- major = tty->driver->major;
- minor = tty->driver->minor_start + tty->index;
- if (buf) {
- mutex_lock(&buf->mutex);
- if (buf->major == major && buf->minor == minor)
- tty_audit_buf_push(buf);
- mutex_unlock(&buf->mutex);
- tty_audit_buf_put(buf);
- }
-
- if (should_audit && audit_enabled) {
- kuid_t auid;
- unsigned int sessionid;
+ dev = MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+ if (tty_audit_push())
+ return;
- auid = audit_get_loginuid(current);
- sessionid = audit_get_sessionid(current);
- tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
- }
+ if (audit_enabled)
+ tty_audit_log("ioctl=TIOCSTI", dev, &ch, 1);
}
/**
- * tty_audit_push_current - Flush current's pending audit data
+ * tty_audit_push - Flush current's pending audit data
*
- * Try to lock sighand and get a reference to the tty audit buffer if available.
- * Flush the buffer or return an appropriate error code.
+ * Returns 0 if success, -EPERM if tty audit is disabled
*/
-int tty_audit_push_current(void)
+int tty_audit_push(void)
{
- struct tty_audit_buf *buf = ERR_PTR(-EPERM);
- struct task_struct *tsk = current;
- unsigned long flags;
+ struct tty_audit_buf *buf;
- if (!lock_task_sighand(tsk, &flags))
- return -ESRCH;
+ if (~current->signal->audit_tty & AUDIT_TTY_ENABLE)
+ return -EPERM;
- if (tsk->signal->audit_tty) {
- buf = tsk->signal->tty_audit_buf;
- if (buf)
- atomic_inc(&buf->count);
+ buf = tty_audit_buf_ref();
+ if (!IS_ERR_OR_NULL(buf)) {
+ mutex_lock(&buf->mutex);
+ tty_audit_buf_push(buf);
+ mutex_unlock(&buf->mutex);
}
- unlock_task_sighand(tsk, &flags);
-
- /*
- * Return 0 when signal->audit_tty set
- * but tsk->signal->tty_audit_buf == NULL.
- */
- if (!buf || IS_ERR(buf))
- return PTR_ERR(buf);
-
- mutex_lock(&buf->mutex);
- tty_audit_buf_push(buf);
- mutex_unlock(&buf->mutex);
-
- tty_audit_buf_put(buf);
return 0;
}
/**
* tty_audit_buf_get - Get an audit buffer.
*
- * Get an audit buffer for @tty, allocate it if necessary. Return %NULL
- * if TTY auditing is disabled or out of memory. Otherwise, return a new
- * reference to the buffer.
+ * Get an audit buffer, allocate it if necessary. Return %NULL
+ * if out of memory or ERR_PTR(-ESRCH) if tty_audit_exit() has already
+ * occurred. Otherwise, return a new reference to the buffer.
*/
-static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
- unsigned icanon)
+static struct tty_audit_buf *tty_audit_buf_get(void)
{
- struct tty_audit_buf *buf, *buf2;
- unsigned long flags;
-
- buf = NULL;
- buf2 = NULL;
- spin_lock_irqsave(&current->sighand->siglock, flags);
- if (likely(!current->signal->audit_tty))
- goto out;
- buf = current->signal->tty_audit_buf;
- if (buf) {
- atomic_inc(&buf->count);
- goto out;
- }
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ struct tty_audit_buf *buf;
+
+ buf = tty_audit_buf_ref();
+ if (buf)
+ return buf;
- buf2 = tty_audit_buf_alloc(tty->driver->major,
- tty->driver->minor_start + tty->index,
- icanon);
- if (buf2 == NULL) {
+ buf = tty_audit_buf_alloc();
+ if (buf == NULL) {
audit_log_lost("out of memory in TTY auditing");
return NULL;
}
- spin_lock_irqsave(&current->sighand->siglock, flags);
- if (!current->signal->audit_tty)
- goto out;
- buf = current->signal->tty_audit_buf;
- if (!buf) {
- current->signal->tty_audit_buf = buf2;
- buf = buf2;
- buf2 = NULL;
- }
- atomic_inc(&buf->count);
- /* Fall through */
- out:
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- if (buf2)
- tty_audit_buf_free(buf2);
- return buf;
+ /* Race to use this buffer, free it if another wins */
+ if (cmpxchg(&current->signal->tty_audit_buf, NULL, buf) != NULL)
+ tty_audit_buf_free(buf);
+ return tty_audit_buf_ref();
}
/**
@@ -265,39 +201,36 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
*
* Audit @data of @size from @tty, if necessary.
*/
-void tty_audit_add_data(struct tty_struct *tty, const void *data,
- size_t size, unsigned icanon)
+void tty_audit_add_data(struct tty_struct *tty, const void *data, size_t size)
{
struct tty_audit_buf *buf;
- int major, minor;
- int audit_log_tty_passwd;
- unsigned long flags;
+ unsigned int icanon = !!L_ICANON(tty);
+ unsigned int audit_tty;
+ dev_t dev;
- if (unlikely(size == 0))
+ audit_tty = READ_ONCE(current->signal->audit_tty);
+ if (~audit_tty & AUDIT_TTY_ENABLE)
return;
- spin_lock_irqsave(&current->sighand->siglock, flags);
- audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+ if (unlikely(size == 0))
return;
if (tty->driver->type == TTY_DRIVER_TYPE_PTY
&& tty->driver->subtype == PTY_TYPE_MASTER)
return;
- buf = tty_audit_buf_get(tty, icanon);
- if (!buf)
+ if ((~audit_tty & AUDIT_TTY_LOG_PASSWD) && icanon && !L_ECHO(tty))
+ return;
+
+ buf = tty_audit_buf_get();
+ if (IS_ERR_OR_NULL(buf))
return;
mutex_lock(&buf->mutex);
- major = tty->driver->major;
- minor = tty->driver->minor_start + tty->index;
- if (buf->major != major || buf->minor != minor
- || buf->icanon != icanon) {
+ dev = MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+ if (buf->dev != dev || buf->icanon != icanon) {
tty_audit_buf_push(buf);
- buf->major = major;
- buf->minor = minor;
+ buf->dev = dev;
buf->icanon = icanon;
}
do {
@@ -314,38 +247,4 @@ void tty_audit_add_data(struct tty_struct *tty, const void *data,
tty_audit_buf_push(buf);
} while (size != 0);
mutex_unlock(&buf->mutex);
- tty_audit_buf_put(buf);
-}
-
-/**
- * tty_audit_push - Push buffered data out
- *
- * Make sure no audit data is pending for @tty on the current process.
- */
-void tty_audit_push(struct tty_struct *tty)
-{
- struct tty_audit_buf *buf;
- unsigned long flags;
-
- spin_lock_irqsave(&current->sighand->siglock, flags);
- if (likely(!current->signal->audit_tty)) {
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- return;
- }
- buf = current->signal->tty_audit_buf;
- if (buf)
- atomic_inc(&buf->count);
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
- if (buf) {
- int major, minor;
-
- major = tty->driver->major;
- minor = tty->driver->minor_start + tty->index;
- mutex_lock(&buf->mutex);
- if (buf->major == major && buf->minor == minor)
- tty_audit_buf_push(buf);
- mutex_unlock(&buf->mutex);
- tty_audit_buf_put(buf);
- }
}
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 3cd31e0d4bd9..a946e49a2626 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -435,25 +435,42 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
+/**
+ * tty_ldisc_receive_buf - forward data to line discipline
+ * @ld: line discipline to process input
+ * @p: char buffer
+ * @f: TTY_* flags buffer
+ * @count: number of bytes to process
+ *
+ * Callers other than flush_to_ldisc() need to exclude the kworker
+ * from concurrent use of the line discipline, see paste_selection().
+ *
+ * Returns the number of bytes not processed
+ */
+int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ char *f, int count)
+{
+ if (ld->ops->receive_buf2)
+ count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ else {
+ count = min_t(int, count, ld->tty->receive_room);
+ if (count && ld->ops->receive_buf)
+ ld->ops->receive_buf(ld->tty, p, f, count);
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
static int
-receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
+receive_buf(struct tty_ldisc *ld, struct tty_buffer *head, int count)
{
- struct tty_ldisc *disc = tty->ldisc;
unsigned char *p = char_buf_ptr(head, head->read);
char *f = NULL;
if (~head->flags & TTYB_NORMAL)
f = flag_buf_ptr(head, head->read);
- if (disc->ops->receive_buf2)
- count = disc->ops->receive_buf2(tty, p, f, count);
- else {
- count = min_t(int, count, tty->receive_room);
- if (count && disc->ops->receive_buf)
- disc->ops->receive_buf(tty, p, f, count);
- }
- return count;
+ return tty_ldisc_receive_buf(ld, p, f, count);
}
/**
@@ -514,7 +531,7 @@ static void flush_to_ldisc(struct work_struct *work)
continue;
}
- count = receive_buf(tty, head, count);
+ count = receive_buf(disc, head, count);
if (!count)
break;
head->read += count;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index a7eacef1bd22..9b04d72e752e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -123,7 +123,8 @@ struct ktermios tty_std_termios = { /* for the benefit of tty drivers */
ECHOCTL | ECHOKE | IEXTEN,
.c_cc = INIT_C_CC,
.c_ispeed = 38400,
- .c_ospeed = 38400
+ .c_ospeed = 38400,
+ /* .c_line = N_TTY, */
};
EXPORT_SYMBOL(tty_std_termios);
@@ -134,13 +135,8 @@ EXPORT_SYMBOL(tty_std_termios);
LIST_HEAD(tty_drivers); /* linked list of tty drivers */
-/* Mutex to protect creating and releasing a tty. This is shared with
- vt.c for deeply disgusting hack reasons */
+/* Mutex to protect creating and releasing a tty */
DEFINE_MUTEX(tty_mutex);
-EXPORT_SYMBOL(tty_mutex);
-
-/* Spinlock to protect the tty->tty_files list */
-DEFINE_SPINLOCK(tty_files_lock);
static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
@@ -168,10 +164,9 @@ static void release_tty(struct tty_struct *tty, int idx);
* Locking: none. Must be called after tty is definitely unused
*/
-void free_tty_struct(struct tty_struct *tty)
+static void free_tty_struct(struct tty_struct *tty)
{
- if (!tty)
- return;
+ tty_ldisc_deinit(tty);
put_device(tty->dev);
kfree(tty->write_buf);
tty->magic = 0xDEADDEAD;
@@ -204,9 +199,9 @@ void tty_add_file(struct tty_struct *tty, struct file *file)
priv->tty = tty;
priv->file = file;
- spin_lock(&tty_files_lock);
+ spin_lock(&tty->files_lock);
list_add(&priv->list, &tty->tty_files);
- spin_unlock(&tty_files_lock);
+ spin_unlock(&tty->files_lock);
}
/**
@@ -227,10 +222,11 @@ void tty_free_file(struct file *file)
static void tty_del_file(struct file *file)
{
struct tty_file_private *priv = file->private_data;
+ struct tty_struct *tty = priv->tty;
- spin_lock(&tty_files_lock);
+ spin_lock(&tty->files_lock);
list_del(&priv->list);
- spin_unlock(&tty_files_lock);
+ spin_unlock(&tty->files_lock);
tty_free_file(file);
}
@@ -288,11 +284,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
struct list_head *p;
int count = 0;
- spin_lock(&tty_files_lock);
+ spin_lock(&tty->files_lock);
list_for_each(p, &tty->tty_files) {
count++;
}
- spin_unlock(&tty_files_lock);
+ spin_unlock(&tty->files_lock);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_SLAVE &&
tty->link && tty->link->count)
@@ -383,6 +379,12 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
EXPORT_SYMBOL_GPL(tty_find_polling_driver);
#endif
+static int is_ignored(int sig)
+{
+ return (sigismember(&current->blocked, sig) ||
+ current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
+}
+
/**
* tty_check_change - check for POSIX terminal changes
* @tty: tty to check
@@ -466,6 +468,11 @@ static long hung_up_tty_compat_ioctl(struct file *file,
return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
}
+static int hung_up_tty_fasync(int fd, struct file *file, int on)
+{
+ return -ENOTTY;
+}
+
static const struct file_operations tty_fops = {
.llseek = no_llseek,
.read = tty_read,
@@ -498,6 +505,7 @@ static const struct file_operations hung_up_tty_fops = {
.unlocked_ioctl = hung_up_tty_ioctl,
.compat_ioctl = hung_up_tty_compat_ioctl,
.release = tty_release,
+ .fasync = hung_up_tty_fasync,
};
static DEFINE_SPINLOCK(redirect_lock);
@@ -709,7 +717,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
workqueue with the lock held */
check_tty_count(tty, "tty_hangup");
- spin_lock(&tty_files_lock);
+ spin_lock(&tty->files_lock);
/* This breaks for file handles being sent over AF_UNIX sockets ? */
list_for_each_entry(priv, &tty->tty_files, list) {
filp = priv->file;
@@ -721,14 +729,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
__tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
- spin_unlock(&tty_files_lock);
+ spin_unlock(&tty->files_lock);
refs = tty_signal_session_leader(tty, exit_session);
/* Account for the p->signal references we killed */
while (refs--)
tty_kref_put(tty);
- tty_ldisc_hangup(tty);
+ tty_ldisc_hangup(tty, cons_filp != NULL);
spin_lock_irq(&tty->ctrl_lock);
clear_bit(TTY_THROTTLED, &tty->flags);
@@ -753,10 +761,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
} else if (tty->ops->hangup)
tty->ops->hangup(tty);
/*
- * We don't want to have driver/ldisc interactions beyond
- * the ones we did here. The driver layer expects no
- * calls after ->hangup() from the ldisc side. However we
- * can't yet guarantee all that.
+ * We don't want to have driver/ldisc interactions beyond the ones
+ * we did here. The driver layer expects no calls after ->hangup()
+ * from the ldisc side, which is now guaranteed.
*/
set_bit(TTY_HUPPED, &tty->flags);
tty_unlock(tty);
@@ -1069,6 +1076,8 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
/* We want to wait for the line discipline to sort out in this
situation */
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_read(file, buf, count, ppos);
if (ld->ops->read)
i = ld->ops->read(tty, file, buf, count);
else
@@ -1243,6 +1252,8 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
if (tty->ops->write_room == NULL)
tty_err(tty, "missing write_room method\n");
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_write(file, buf, count, ppos);
if (!ld->ops->write)
ret = -EIO;
else
@@ -1378,7 +1389,7 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
* the tty_mutex currently so we can be relaxed about ordering.
*/
-int tty_init_termios(struct tty_struct *tty)
+void tty_init_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
@@ -1388,24 +1399,21 @@ int tty_init_termios(struct tty_struct *tty)
else {
/* Check for lazy saved data */
tp = tty->driver->termios[idx];
- if (tp != NULL)
+ if (tp != NULL) {
tty->termios = *tp;
- else
+ tty->termios.c_line = tty->driver->init_termios.c_line;
+ } else
tty->termios = tty->driver->init_termios;
}
/* Compatibility until drivers always set this */
tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
- return 0;
}
EXPORT_SYMBOL_GPL(tty_init_termios);
int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
{
- int ret = tty_init_termios(tty);
- if (ret)
- return ret;
-
+ tty_init_termios(tty);
tty_driver_kref_get(driver);
tty->count++;
driver->ttys[tty->index] = tty;
@@ -1442,7 +1450,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
*
* Locking: tty_mutex for now
*/
-void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
{
if (driver->ops->remove)
driver->ops->remove(driver, tty);
@@ -1475,7 +1483,8 @@ static int tty_reopen(struct tty_struct *tty)
tty->count++;
- WARN_ON(!tty->ldisc);
+ if (!tty->ldisc)
+ return tty_ldisc_reinit(tty, tty->termios.c_line);
return 0;
}
@@ -1529,7 +1538,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
tty_lock(tty);
retval = tty_driver_install_tty(driver, tty);
if (retval < 0)
- goto err_deinit_tty;
+ goto err_free_tty;
if (!tty->port)
tty->port = driver->ports[idx];
@@ -1551,9 +1560,8 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
/* Return the tty locked so that it cannot vanish under the caller */
return tty;
-err_deinit_tty:
+err_free_tty:
tty_unlock(tty);
- deinitialize_tty_struct(tty);
free_tty_struct(tty);
err_module_put:
module_put(driver->owner);
@@ -1568,7 +1576,7 @@ err_release_tty:
return ERR_PTR(retval);
}
-void tty_free_termios(struct tty_struct *tty)
+static void tty_free_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
@@ -1587,7 +1595,6 @@ void tty_free_termios(struct tty_struct *tty)
}
*tp = tty->termios;
}
-EXPORT_SYMBOL(tty_free_termios);
/**
* tty_flush_works - flush all works of a tty/pty pair
@@ -1634,9 +1641,9 @@ static void release_one_tty(struct work_struct *work)
tty_driver_kref_put(driver);
module_put(owner);
- spin_lock(&tty_files_lock);
+ spin_lock(&tty->files_lock);
list_del_init(&tty->tty_files);
- spin_unlock(&tty_files_lock);
+ spin_unlock(&tty->files_lock);
put_pid(tty->pgrp);
put_pid(tty->session);
@@ -1967,7 +1974,7 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
* Locking: tty_mutex protects get_tty_driver
*/
static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
- int *noctty, int *index)
+ int *index)
{
struct tty_driver *driver;
@@ -1977,7 +1984,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
extern struct tty_driver *console_driver;
driver = tty_driver_kref_get(console_driver);
*index = fg_console;
- *noctty = 1;
break;
}
#endif
@@ -1988,7 +1994,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
if (driver) {
/* Don't let /dev/console block */
filp->f_flags |= O_NONBLOCK;
- *noctty = 1;
break;
}
}
@@ -2004,6 +2009,68 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
}
/**
+ * tty_open_by_driver - open a tty device
+ * @device: dev_t of device to open
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * Performs the driver lookup, checks for a reopen, or otherwise
+ * performs the first-time tty initialization.
+ *
+ * Returns the locked initialized or re-opened &tty_struct
+ *
+ * Claims the global tty_mutex to serialize:
+ * - concurrent first-time tty initialization
+ * - concurrent tty driver removal w/ lookup
+ * - concurrent tty removal from driver table
+ */
+static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+ struct file *filp)
+{
+ struct tty_struct *tty;
+ struct tty_driver *driver = NULL;
+ int index = -1;
+ int retval;
+
+ mutex_lock(&tty_mutex);
+ driver = tty_lookup_driver(device, filp, &index);
+ if (IS_ERR(driver)) {
+ mutex_unlock(&tty_mutex);
+ return ERR_CAST(driver);
+ }
+
+ /* check whether we're reopening an existing tty */
+ tty = tty_driver_lookup_tty(driver, inode, index);
+ if (IS_ERR(tty)) {
+ mutex_unlock(&tty_mutex);
+ goto out;
+ }
+
+ if (tty) {
+ mutex_unlock(&tty_mutex);
+ retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
+ if (retval) {
+ if (retval == -EINTR)
+ retval = -ERESTARTSYS;
+ tty = ERR_PTR(retval);
+ goto out;
+ }
+ retval = tty_reopen(tty);
+ if (retval < 0) {
+ tty_unlock(tty);
+ tty = ERR_PTR(retval);
+ }
+ } else { /* Returns with the tty_lock held for now */
+ tty = tty_init_dev(driver, index);
+ mutex_unlock(&tty_mutex);
+ }
+out:
+ tty_driver_kref_put(driver);
+ return tty;
+}
+
+/**
* tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
@@ -2031,8 +2098,6 @@ static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
int noctty, retval;
- struct tty_driver *driver = NULL;
- int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
@@ -2043,53 +2108,15 @@ retry_open:
if (retval)
return -ENOMEM;
- noctty = filp->f_flags & O_NOCTTY;
- index = -1;
- retval = 0;
-
tty = tty_open_current_tty(device, filp);
- if (!tty) {
- mutex_lock(&tty_mutex);
- driver = tty_lookup_driver(device, filp, &noctty, &index);
- if (IS_ERR(driver)) {
- retval = PTR_ERR(driver);
- goto err_unlock;
- }
-
- /* check whether we're reopening an existing tty */
- tty = tty_driver_lookup_tty(driver, inode, index);
- if (IS_ERR(tty)) {
- retval = PTR_ERR(tty);
- goto err_unlock;
- }
-
- if (tty) {
- mutex_unlock(&tty_mutex);
- retval = tty_lock_interruptible(tty);
- tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
- if (retval) {
- if (retval == -EINTR)
- retval = -ERESTARTSYS;
- goto err_unref;
- }
- retval = tty_reopen(tty);
- if (retval < 0) {
- tty_unlock(tty);
- tty = ERR_PTR(retval);
- }
- } else { /* Returns with the tty_lock held for now */
- tty = tty_init_dev(driver, index);
- mutex_unlock(&tty_mutex);
- }
-
- tty_driver_kref_put(driver);
- }
+ if (!tty)
+ tty = tty_open_by_driver(device, inode, filp);
if (IS_ERR(tty)) {
+ tty_free_file(filp);
retval = PTR_ERR(tty);
if (retval != -EAGAIN || signal_pending(current))
- goto err_file;
- tty_free_file(filp);
+ return retval;
schedule();
goto retry_open;
}
@@ -2097,10 +2124,6 @@ retry_open:
tty_add_file(tty, filp);
check_tty_count(tty, __func__);
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->driver->subtype == PTY_TYPE_MASTER)
- noctty = 1;
-
tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
if (tty->ops->open)
@@ -2133,6 +2156,12 @@ retry_open:
read_lock(&tasklist_lock);
spin_lock_irq(&current->sighand->siglock);
+ noctty = (filp->f_flags & O_NOCTTY) ||
+ (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) ||
+ device == MKDEV(TTYAUX_MAJOR, 1) ||
+ (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->driver->subtype == PTY_TYPE_MASTER);
+
if (!noctty &&
current->signal->leader &&
!current->signal->tty &&
@@ -2158,15 +2187,6 @@ retry_open:
read_unlock(&tasklist_lock);
tty_unlock(tty);
return 0;
-err_unlock:
- mutex_unlock(&tty_mutex);
-err_unref:
- /* after locks to avoid deadlock */
- if (!IS_ERR_OR_NULL(driver))
- tty_driver_kref_put(driver);
-err_file:
- tty_free_file(filp);
- return retval;
}
@@ -2193,6 +2213,8 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
return 0;
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_poll(filp, wait);
if (ld->ops->poll)
ret = ld->ops->poll(tty, filp, wait);
tty_ldisc_deref(ld);
@@ -2202,7 +2224,6 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
static int __tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty = file_tty(filp);
- struct tty_ldisc *ldisc;
unsigned long flags;
int retval = 0;
@@ -2213,13 +2234,6 @@ static int __tty_fasync(int fd, struct file *filp, int on)
if (retval <= 0)
goto out;
- ldisc = tty_ldisc_ref(tty);
- if (ldisc) {
- if (ldisc->ops->fasync)
- ldisc->ops->fasync(tty, on);
- tty_ldisc_deref(ldisc);
- }
-
if (on) {
enum pid_type type;
struct pid *pid;
@@ -2245,10 +2259,11 @@ out:
static int tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty = file_tty(filp);
- int retval;
+ int retval = -ENOTTY;
tty_lock(tty);
- retval = __tty_fasync(fd, filp, on);
+ if (!tty_hung_up_p(filp))
+ retval = __tty_fasync(fd, filp, on);
tty_unlock(tty);
return retval;
@@ -2282,6 +2297,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
return -EFAULT;
tty_audit_tiocsti(tty, ch);
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return -EIO;
ld->ops->receive_buf(tty, &ch, &mbz, 1);
tty_ldisc_deref(ld);
return 0;
@@ -2646,13 +2663,13 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
static int tiocsetd(struct tty_struct *tty, int __user *p)
{
- int ldisc;
+ int disc;
int ret;
- if (get_user(ldisc, p))
+ if (get_user(disc, p))
return -EFAULT;
- ret = tty_set_ldisc(tty, ldisc);
+ ret = tty_set_ldisc(tty, disc);
return ret;
}
@@ -2674,6 +2691,8 @@ static int tiocgetd(struct tty_struct *tty, int __user *p)
int ret;
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return -EIO;
ret = put_user(ld->ops->num, p);
tty_ldisc_deref(ld);
return ret;
@@ -2971,6 +2990,8 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return retval;
}
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_ioctl(file, cmd, arg);
retval = -EINVAL;
if (ld->ops->ioctl) {
retval = ld->ops->ioctl(tty, file, cmd, arg);
@@ -2999,6 +3020,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
}
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return hung_up_tty_compat_ioctl(file, cmd, arg);
if (ld->ops->compat_ioctl)
retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
else
@@ -3149,6 +3172,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
mutex_init(&tty->atomic_write_lock);
spin_lock_init(&tty->ctrl_lock);
spin_lock_init(&tty->flow_lock);
+ spin_lock_init(&tty->files_lock);
INIT_LIST_HEAD(&tty->tty_files);
INIT_WORK(&tty->SAK_work, do_SAK_work);
@@ -3162,20 +3186,6 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
}
/**
- * deinitialize_tty_struct
- * @tty: tty to deinitialize
- *
- * This subroutine deinitializes a tty structure that has been newly
- * allocated but tty_release cannot be called on that yet.
- *
- * Locking: none - tty in question must not be exposed at this point
- */
-void deinitialize_tty_struct(struct tty_struct *tty)
-{
- tty_ldisc_deinit(tty);
-}
-
-/**
* tty_put_char - write one character to a tty
* @tty: tty
* @ch: character
@@ -3569,7 +3579,7 @@ void __init console_init(void)
initcall_t *call;
/* Setup the default TTY line discipline. */
- tty_ldisc_begin();
+ n_tty_init();
/*
* set up the console device so that later boot sequences can
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 0ea351388724..23bf5bb1d8bf 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -719,16 +719,16 @@ static int get_sgflags(struct tty_struct *tty)
{
int flags = 0;
- if (!(tty->termios.c_lflag & ICANON)) {
- if (tty->termios.c_lflag & ISIG)
+ if (!L_ICANON(tty)) {
+ if (L_ISIG(tty))
flags |= 0x02; /* cbreak */
else
flags |= 0x20; /* raw */
}
- if (tty->termios.c_lflag & ECHO)
+ if (L_ECHO(tty))
flags |= 0x08; /* echo */
- if (tty->termios.c_oflag & OPOST)
- if (tty->termios.c_oflag & ONLCR)
+ if (O_OPOST(tty))
+ if (O_ONLCR(tty))
flags |= 0x10; /* crmod */
return flags;
}
@@ -908,7 +908,7 @@ static int tty_change_softcar(struct tty_struct *tty, int arg)
tty->termios.c_cflag |= bit;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old);
- if ((tty->termios.c_cflag & CLOCAL) != bit)
+ if (C_CLOCAL(tty) != bit)
ret = -EINVAL;
up_write(&tty->termios_rwsem);
return ret;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index a054d03c22e7..68947f6de5ad 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -140,9 +140,16 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* @disc: ldisc number
*
* Takes a reference to a line discipline. Deals with refcounts and
- * module locking counts. Returns NULL if the discipline is not available.
- * Returns a pointer to the discipline and bumps the ref count if it is
- * available
+ * module locking counts.
+ *
+ * Returns: -EINVAL if the discipline index is not [N_TTY..NR_LDISCS] or
+ * if the discipline is not registered
+ * -EAGAIN if request_module() failed to load or register the
+ * the discipline
+ * -ENOMEM if allocation failure
+ *
+ * Otherwise, returns a pointer to the discipline and bumps the
+ * ref count
*
* Locking:
* takes tty_ldiscs_lock to guard against ldisc races
@@ -250,19 +257,23 @@ const struct file_operations tty_ldiscs_proc_fops = {
* reference to it. If the line discipline is in flux then
* wait patiently until it changes.
*
+ * Returns: NULL if the tty has been hungup and not re-opened with
+ * a new file descriptor, otherwise valid ldisc reference
+ *
* Note: Must not be called from an IRQ/timer context. The caller
* must also be careful not to hold other locks that will deadlock
* against a discipline change, such as an existing ldisc reference
* (which we check for)
*
- * Note: only callable from a file_operations routine (which
- * guarantees tty->ldisc != NULL when the lock is acquired).
+ * Note: a file_operations routine (read/poll/write) should use this
+ * function to wait for any ldisc lifetime events to finish.
*/
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
- WARN_ON(!tty->ldisc);
+ if (!tty->ldisc)
+ ldsem_up_read(&tty->ldisc_sem);
return tty->ldisc;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
@@ -304,13 +315,13 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
EXPORT_SYMBOL_GPL(tty_ldisc_deref);
-static inline int __lockfunc
+static inline int
__tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write(&tty->ldisc_sem, timeout);
}
-static inline int __lockfunc
+static inline int
__tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
{
return ldsem_down_write_nested(&tty->ldisc_sem,
@@ -322,8 +333,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
ldsem_up_write(&tty->ldisc_sem);
}
-static int __lockfunc
-tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
{
int ret;
@@ -340,7 +350,7 @@ static void tty_ldisc_unlock(struct tty_struct *tty)
__tty_ldisc_unlock(tty);
}
-static int __lockfunc
+static int
tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
unsigned long timeout)
{
@@ -376,14 +386,13 @@ tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
return 0;
}
-static void __lockfunc
-tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
+static void tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
{
tty_ldisc_lock_pair_timeout(tty, tty2, MAX_SCHEDULE_TIMEOUT);
}
-static void __lockfunc tty_ldisc_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
+static void tty_ldisc_unlock_pair(struct tty_struct *tty,
+ struct tty_struct *tty2)
{
__tty_ldisc_unlock(tty);
if (tty2)
@@ -411,7 +420,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
/**
* tty_set_termios_ldisc - set ldisc field
* @tty: tty structure
- * @num: line discipline number
+ * @disc: line discipline number
*
* This is probably overkill for real world processors but
* they are not on hot paths so a little discipline won't do
@@ -424,10 +433,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
* Locking: takes termios_rwsem
*/
-static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
{
down_write(&tty->termios_rwsem);
- tty->termios.c_line = num;
+ tty->termios.c_line = disc;
up_write(&tty->termios_rwsem);
tty->disc_data = NULL;
@@ -455,7 +464,7 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
if (ret)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
- tty_ldisc_debug(tty, "%p: opened\n", tty->ldisc);
+ tty_ldisc_debug(tty, "%p: opened\n", ld);
return ret;
}
return 0;
@@ -476,7 +485,7 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
clear_bit(TTY_LDISC_OPEN, &tty->flags);
if (ld->ops->close)
ld->ops->close(tty);
- tty_ldisc_debug(tty, "%p: closed\n", tty->ldisc);
+ tty_ldisc_debug(tty, "%p: closed\n", ld);
}
/**
@@ -525,12 +534,12 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
* the close of one side of a tty/pty pair, and eventually hangup.
*/
-int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+int tty_set_ldisc(struct tty_struct *tty, int disc)
{
int retval;
struct tty_ldisc *old_ldisc, *new_ldisc;
- new_ldisc = tty_ldisc_get(tty, ldisc);
+ new_ldisc = tty_ldisc_get(tty, disc);
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
@@ -539,8 +548,13 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (retval)
goto err;
+ if (!tty->ldisc) {
+ retval = -EIO;
+ goto out;
+ }
+
/* Check the no-op case */
- if (tty->ldisc->ops->num == ldisc)
+ if (tty->ldisc->ops->num == disc)
goto out;
if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -556,7 +570,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
/* Now set up the new line discipline. */
tty->ldisc = new_ldisc;
- tty_set_termios_ldisc(tty, ldisc);
+ tty_set_termios_ldisc(tty, disc);
retval = tty_ldisc_open(tty, new_ldisc);
if (retval < 0) {
@@ -590,6 +604,25 @@ err:
}
/**
+ * tty_ldisc_kill - teardown ldisc
+ * @tty: tty being released
+ *
+ * Perform final close of the ldisc and reset tty->ldisc
+ */
+static void tty_ldisc_kill(struct tty_struct *tty)
+{
+ if (!tty->ldisc)
+ return;
+ /*
+ * Now kill off the ldisc
+ */
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+}
+
+/**
* tty_reset_termios - reset terminal state
* @tty: tty to reset
*
@@ -609,28 +642,44 @@ static void tty_reset_termios(struct tty_struct *tty)
/**
* tty_ldisc_reinit - reinitialise the tty ldisc
* @tty: tty to reinit
- * @ldisc: line discipline to reinitialize
+ * @disc: line discipline to reinitialize
+ *
+ * Completely reinitialize the line discipline state, by closing the
+ * current instance, if there is one, and opening a new instance. If
+ * an error occurs opening the new non-N_TTY instance, the instance
+ * is dropped and tty->ldisc reset to NULL. The caller can then retry
+ * with N_TTY instead.
*
- * Switch the tty to a line discipline and leave the ldisc
- * state closed
+ * Returns 0 if successful, otherwise error code < 0
*/
-static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+int tty_ldisc_reinit(struct tty_struct *tty, int disc)
{
- struct tty_ldisc *ld = tty_ldisc_get(tty, ldisc);
+ struct tty_ldisc *ld;
+ int retval;
- if (IS_ERR(ld))
- return -1;
+ ld = tty_ldisc_get(tty, disc);
+ if (IS_ERR(ld)) {
+ BUG_ON(disc == N_TTY);
+ return PTR_ERR(ld);
+ }
- tty_ldisc_close(tty, tty->ldisc);
- tty_ldisc_put(tty->ldisc);
- /*
- * Switch the line discipline back
- */
- tty->ldisc = ld;
- tty_set_termios_ldisc(tty, ldisc);
+ if (tty->ldisc) {
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ }
- return 0;
+ /* switch the line discipline */
+ tty->ldisc = ld;
+ tty_set_termios_ldisc(tty, disc);
+ retval = tty_ldisc_open(tty, tty->ldisc);
+ if (retval) {
+ if (!WARN_ON(disc == N_TTY)) {
+ tty_ldisc_put(tty->ldisc);
+ tty->ldisc = NULL;
+ }
+ }
+ return retval;
}
/**
@@ -648,13 +697,11 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
* tty itself so we must be careful about locking rules.
*/
-void tty_ldisc_hangup(struct tty_struct *tty)
+void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
{
struct tty_ldisc *ld;
- int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
- int err = 0;
- tty_ldisc_debug(tty, "%p: closing\n", tty->ldisc);
+ tty_ldisc_debug(tty, "%p: hangup\n", tty->ldisc);
ld = tty_ldisc_ref(tty);
if (ld != NULL) {
@@ -680,31 +727,17 @@ void tty_ldisc_hangup(struct tty_struct *tty)
*/
tty_ldisc_lock(tty, MAX_SCHEDULE_TIMEOUT);
- if (tty->ldisc) {
-
- /* At this point we have a halted ldisc; we want to close it and
- reopen a new ldisc. We could defer the reopen to the next
- open but it means auditing a lot of other paths so this is
- a FIXME */
- if (reset == 0) {
+ if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+ tty_reset_termios(tty);
- if (!tty_ldisc_reinit(tty, tty->termios.c_line))
- err = tty_ldisc_open(tty, tty->ldisc);
- else
- err = 1;
- }
- /* If the re-open fails or we reset then go to N_TTY. The
- N_TTY open cannot fail */
- if (reset || err) {
- BUG_ON(tty_ldisc_reinit(tty, N_TTY));
- WARN_ON(tty_ldisc_open(tty, tty->ldisc));
- }
+ if (tty->ldisc) {
+ if (reinit) {
+ if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
+ tty_ldisc_reinit(tty, N_TTY);
+ } else
+ tty_ldisc_kill(tty);
}
tty_ldisc_unlock(tty);
- if (reset)
- tty_reset_termios(tty);
-
- tty_ldisc_debug(tty, "%p: re-opened\n", tty->ldisc);
}
/**
@@ -719,44 +752,26 @@ void tty_ldisc_hangup(struct tty_struct *tty)
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
- struct tty_ldisc *ld = tty->ldisc;
- int retval;
-
- retval = tty_ldisc_open(tty, ld);
+ int retval = tty_ldisc_open(tty, tty->ldisc);
if (retval)
return retval;
if (o_tty) {
retval = tty_ldisc_open(o_tty, o_tty->ldisc);
if (retval) {
- tty_ldisc_close(tty, ld);
+ tty_ldisc_close(tty, tty->ldisc);
return retval;
}
}
return 0;
}
-static void tty_ldisc_kill(struct tty_struct *tty)
-{
- /*
- * Now kill off the ldisc
- */
- tty_ldisc_close(tty, tty->ldisc);
- tty_ldisc_put(tty->ldisc);
- /* Force an oops if we mess this up */
- tty->ldisc = NULL;
-
- /* Ensure the next open requests the N_TTY ldisc */
- tty_set_termios_ldisc(tty, N_TTY);
-}
-
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down (or one end of pty pair)
*
* Called during the final close of a tty or a pty pair in order to shut
- * down the line discpline layer. On exit, each ldisc assigned is N_TTY and
- * each ldisc has not been opened.
+ * down the line discpline layer. On exit, each tty's ldisc is NULL.
*/
void tty_ldisc_release(struct tty_struct *tty)
@@ -797,7 +812,7 @@ void tty_ldisc_init(struct tty_struct *tty)
}
/**
- * tty_ldisc_init - ldisc cleanup for new tty
+ * tty_ldisc_deinit - ldisc cleanup for new tty
* @tty: tty that was allocated recently
*
* The tty structure must not becompletely set up (tty_ldisc_setup) when
@@ -805,12 +820,7 @@ void tty_ldisc_init(struct tty_struct *tty)
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
- tty_ldisc_put(tty->ldisc);
+ if (tty->ldisc)
+ tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
-
-void tty_ldisc_begin(void)
-{
- /* Setup the default TTY line discipline. */
- (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
-}
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index dfa9ec03fa8e..d8bae67a6174 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -10,7 +10,7 @@
* Getting the big tty mutex.
*/
-void __lockfunc tty_lock(struct tty_struct *tty)
+void tty_lock(struct tty_struct *tty)
{
if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
return;
@@ -32,7 +32,7 @@ int tty_lock_interruptible(struct tty_struct *tty)
return ret;
}
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void tty_unlock(struct tty_struct *tty)
{
if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
return;
@@ -41,13 +41,13 @@ void __lockfunc tty_unlock(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_unlock);
-void __lockfunc tty_lock_slave(struct tty_struct *tty)
+void tty_lock_slave(struct tty_struct *tty)
{
if (tty && tty != tty->link)
tty_lock(tty);
}
-void __lockfunc tty_unlock_slave(struct tty_struct *tty)
+void tty_unlock_slave(struct tty_struct *tty)
{
if (tty && tty != tty->link)
tty_unlock(tty);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 846ed481c24f..dbcca30a54b1 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
}
if (filp->f_flags & O_NONBLOCK) {
/* Indicate we are open */
- if (tty->termios.c_cflag & CBAUD)
+ if (C_BAUD(tty))
tty_port_raise_dtr_rts(port);
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
@@ -476,7 +476,6 @@ int tty_port_close_start(struct tty_port *port,
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
- set_bit(ASYNCB_CLOSING, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
tty->closing = 1;
@@ -510,14 +509,12 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
- if (port->close_delay) {
- msleep_interruptible(
- jiffies_to_msecs(port->close_delay));
- }
+ if (port->close_delay)
+ msleep_interruptible(jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
}
- port->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
+ port->flags &= ~ASYNC_NORMAL_ACTIVE;
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL(tty_port_close_end);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 6f0336fff501..f973bfce5d08 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1706,16 +1706,12 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
return -EINVAL;
if (ct) {
- dia = kmalloc(sizeof(struct kbdiacr) * ct,
- GFP_KERNEL);
- if (!dia)
- return -ENOMEM;
- if (copy_from_user(dia, a->kbdiacr,
- sizeof(struct kbdiacr) * ct)) {
- kfree(dia);
- return -EFAULT;
- }
+ dia = memdup_user(a->kbdiacr,
+ sizeof(struct kbdiacr) * ct);
+ if (IS_ERR(dia))
+ return PTR_ERR(dia);
+
}
spin_lock_irqsave(&kbd_event_lock, flags);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 381a2b13682c..4dd9dd2270a0 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -347,6 +347,8 @@ int paste_selection(struct tty_struct *tty)
console_unlock();
ld = tty_ldisc_ref_wait(tty);
+ if (!ld)
+ return -EIO; /* ldisc was hung up */
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index bd51bdd0a7bf..3e3c7575e92d 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -568,7 +568,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
vc->vc_cols - vc->vc_x);
}
-static int softcursor_original;
+static int softcursor_original = -1;
static void add_softcursor(struct vc_data *vc)
{
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index d5c57f1e98fd..dca78565eb55 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_IMX21_HCD) += host/
-obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
+obj-$(CONFIG_USB_FSL_USB2) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 1173f9cbc137..0a866e90b49c 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -476,6 +476,8 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
return -EINVAL;
if (index < 0 || index > 0x7f)
return -EINVAL;
+ if (tmp < 0 || tmp > len - pos)
+ return -EINVAL;
pos += tmp;
/* skip trailing newline */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f14f4ab47ebb..9ce8c9f91674 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -28,6 +28,11 @@ struct ci_hdrc_imx_platform_flag {
bool runtime_pm;
};
+static const struct ci_hdrc_imx_platform_flag imx23_usb_data = {
+ .flags = CI_HDRC_TURN_VBUS_EARLY_ON |
+ CI_HDRC_DISABLE_STREAMING,
+};
+
static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
CI_HDRC_DISABLE_STREAMING,
};
@@ -66,6 +71,7 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
{ .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
@@ -244,7 +250,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
struct ci_hdrc_platform_data pdata = {
.name = dev_name(&pdev->dev),
.capoffset = DEF_CAPOFFSET,
- .flags = CI_HDRC_SET_NON_ZERO_TTHA,
};
int ret;
const struct of_device_id *of_id;
@@ -302,9 +307,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
&pdata);
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
- dev_err(&pdev->dev,
- "Can't register ci_hdrc platform device, err=%d\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "ci_hdrc_add_device failed, err=%d\n", ret);
goto err_clk;
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 7404064b9bbc..69426e644d17 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -721,6 +721,9 @@ static int ci_get_platdata(struct device *dev,
return ret;
}
+ if (of_find_property(dev->of_node, "non-zero-ttctrl-ttha", NULL))
+ platdata->flags |= CI_HDRC_SET_NON_ZERO_TTHA;
+
ext_id = ERR_PTR(-ENODEV);
ext_vbus = ERR_PTR(-ENODEV);
if (of_property_read_bool(dev->of_node, "extcon")) {
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index df47110bad2d..6d23eede4d8c 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -175,7 +175,6 @@ static int ci_requests_show(struct seq_file *s, void *data)
{
struct ci_hdrc *ci = s->private;
unsigned long flags;
- struct list_head *ptr = NULL;
struct ci_hw_req *req = NULL;
struct td_node *node, *tmpnode;
unsigned i, j, qsize = sizeof(struct ci_hw_td)/sizeof(u32);
@@ -187,9 +186,7 @@ static int ci_requests_show(struct seq_file *s, void *data)
spin_lock_irqsave(&ci->lock, flags);
for (i = 0; i < ci->hw_ep_max; i++)
- list_for_each(ptr, &ci->ci_hw_ep[i].qh.queue) {
- req = list_entry(ptr, struct ci_hw_req, queue);
-
+ list_for_each_entry(req, &ci->ci_hw_ep[i].qh.queue, queue) {
list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
seq_printf(s, "EP=%02i: TD=%08X %s\n",
i % (ci->hw_ep_max / 2),
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index ba90dc66703d..de8e22ec3902 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -66,6 +66,11 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
return count;
}
ci->fsm.a_bus_req = 1;
+ if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
+ ci->gadget.host_request_flag = 1;
+ mutex_unlock(&ci->fsm.lock);
+ return count;
+ }
}
ci_otg_queue_work(ci);
@@ -144,8 +149,14 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr,
mutex_lock(&ci->fsm.lock);
if (buf[0] == '0')
ci->fsm.b_bus_req = 0;
- else if (buf[0] == '1')
+ else if (buf[0] == '1') {
ci->fsm.b_bus_req = 1;
+ if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
+ ci->gadget.host_request_flag = 1;
+ mutex_unlock(&ci->fsm.lock);
+ return count;
+ }
+ }
ci_otg_queue_work(ci);
mutex_unlock(&ci->fsm.lock);
@@ -198,6 +209,7 @@ static unsigned otg_timer_ms[] = {
TA_AIDL_BDIS,
TB_ASE0_BRST,
TA_BIDL_ADIS,
+ TB_AIDL_BDIS,
TB_SE0_SRP,
TB_SRP_FAIL,
0,
@@ -309,6 +321,12 @@ static int a_bidl_adis_tmout(struct ci_hdrc *ci)
return 0;
}
+static int b_aidl_bdis_tmout(struct ci_hdrc *ci)
+{
+ ci->fsm.a_bus_suspend = 1;
+ return 0;
+}
+
static int b_se0_srp_tmout(struct ci_hdrc *ci)
{
ci->fsm.b_se0_srp = 1;
@@ -353,6 +371,7 @@ static int (*otg_timer_handlers[])(struct ci_hdrc *) = {
a_aidl_bdis_tmout, /* A_AIDL_BDIS */
b_ase0_brst_tmout, /* B_ASE0_BRST */
a_bidl_adis_tmout, /* A_BIDL_ADIS */
+ b_aidl_bdis_tmout, /* B_AIDL_BDIS */
b_se0_srp_tmout, /* B_SE0_SRP */
b_srp_fail_tmout, /* B_SRP_FAIL */
NULL, /* A_WAIT_ENUM */
@@ -644,9 +663,9 @@ static void ci_otg_fsm_event(struct ci_hdrc *ci)
break;
case OTG_STATE_B_PERIPHERAL:
if ((intr_sts & USBi_SLI) && port_conn && otg_bsess_vld) {
- fsm->a_bus_suspend = 1;
- ci_otg_queue_work(ci);
+ ci_otg_add_timer(ci, B_AIDL_BDIS);
} else if (intr_sts & USBi_PCI) {
+ ci_otg_del_timer(ci, B_AIDL_BDIS);
if (fsm->a_bus_suspend == 1)
fsm->a_bus_suspend = 0;
}
@@ -786,6 +805,10 @@ int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci)
ci->fsm.id = hw_read_otgsc(ci, OTGSC_ID) ? 1 : 0;
ci->fsm.otg->state = OTG_STATE_UNDEFINED;
ci->fsm.ops = &ci_otg_ops;
+ ci->gadget.hnp_polling_support = 1;
+ ci->fsm.host_req_flag = devm_kzalloc(ci->dev, 1, GFP_KERNEL);
+ if (!ci->fsm.host_req_flag)
+ return -ENOMEM;
mutex_init(&ci->fsm.lock);
diff --git a/drivers/usb/chipidea/otg_fsm.h b/drivers/usb/chipidea/otg_fsm.h
index 262d6ef8df7c..6366fe398ba6 100644
--- a/drivers/usb/chipidea/otg_fsm.h
+++ b/drivers/usb/chipidea/otg_fsm.h
@@ -62,6 +62,8 @@
/* SSEND time before SRP */
#define TB_SSEND_SRP (1500) /* minimum 1.5 sec, section:5.1.2 */
+#define TB_AIDL_BDIS (20) /* 4ms ~ 150ms, section 5.2.1 */
+
#if IS_ENABLED(CONFIG_USB_OTG_FSM)
int ci_hdrc_otg_fsm_init(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 3eafa2c9a2ba..065f5d97aa67 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -819,7 +819,6 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
ci->ep0out : ci->ep0in;
if (!list_empty(&hwep->qh.queue)) {
_ep_nuke(hwep);
- retval = -EOVERFLOW;
dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
_usb_addr(hwep));
}
@@ -1068,7 +1067,8 @@ __acquires(ci->lock)
}
break;
case USB_REQ_GET_STATUS:
- if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
+ if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
+ le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
type != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fa4e23930614..a6c4a1b895bd 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -713,9 +713,20 @@ static int acm_tty_write(struct tty_struct *tty,
}
if (acm->susp_count) {
+ if (acm->putbuffer) {
+ /* now to preserve order */
+ usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
+ acm->putbuffer = NULL;
+ }
usb_anchor_urb(wb->urb, &acm->delayed);
spin_unlock_irqrestore(&acm->write_lock, flags);
return count;
+ } else {
+ if (acm->putbuffer) {
+ /* at this point there is no good way to handle errors */
+ acm_start_wb(acm, acm->putbuffer);
+ acm->putbuffer = NULL;
+ }
}
stat = acm_start_wb(acm, wb);
@@ -726,6 +737,64 @@ static int acm_tty_write(struct tty_struct *tty,
return count;
}
+static void acm_tty_flush_chars(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ struct acm_wb *cur = acm->putbuffer;
+ int err;
+ unsigned long flags;
+
+ if (!cur) /* nothing to do */
+ return;
+
+ acm->putbuffer = NULL;
+ err = usb_autopm_get_interface_async(acm->control);
+ spin_lock_irqsave(&acm->write_lock, flags);
+ if (err < 0) {
+ cur->use = 0;
+ acm->putbuffer = cur;
+ goto out;
+ }
+
+ if (acm->susp_count)
+ usb_anchor_urb(cur->urb, &acm->delayed);
+ else
+ acm_start_wb(acm, cur);
+out:
+ spin_unlock_irqrestore(&acm->write_lock, flags);
+ return;
+}
+
+static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct acm *acm = tty->driver_data;
+ struct acm_wb *cur;
+ int wbn;
+ unsigned long flags;
+
+overflow:
+ cur = acm->putbuffer;
+ if (!cur) {
+ spin_lock_irqsave(&acm->write_lock, flags);
+ wbn = acm_wb_alloc(acm);
+ if (wbn >= 0) {
+ cur = &acm->wb[wbn];
+ acm->putbuffer = cur;
+ }
+ spin_unlock_irqrestore(&acm->write_lock, flags);
+ if (!cur)
+ return 0;
+ }
+
+ if (cur->len == acm->writesize) {
+ acm_tty_flush_chars(tty);
+ goto overflow;
+ }
+
+ cur->buf[cur->len++] = ch;
+ return 1;
+}
+
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
@@ -1114,6 +1183,9 @@ static int acm_probe(struct usb_interface *intf,
if (quirks == NO_UNION_NORMAL) {
data_interface = usb_ifnum_to_if(usb_dev, 1);
control_interface = usb_ifnum_to_if(usb_dev, 0);
+ /* we would crash */
+ if (!data_interface || !control_interface)
+ return -ENODEV;
goto skip_normal_probe;
}
@@ -1905,6 +1977,8 @@ static const struct tty_operations acm_ops = {
.cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
+ .put_char = acm_tty_put_char,
+ .flush_chars = acm_tty_flush_chars,
.write_room = acm_tty_write_room,
.ioctl = acm_tty_ioctl,
.throttle = acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ccfaba9ab4e4..05ce308d5d2a 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -94,6 +94,7 @@ struct acm {
unsigned long read_urbs_free;
struct urb *read_urbs[ACM_NR];
struct acm_rb read_buffers[ACM_NR];
+ struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
int rx_endpoint;
spinlock_t read_lock;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7a11a8263171..917a55c4480d 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/usb/tmc.h>
@@ -87,6 +88,23 @@ struct usbtmc_device_data {
u8 bTag_last_write; /* needed for abort */
u8 bTag_last_read; /* needed for abort */
+ /* data for interrupt in endpoint handling */
+ u8 bNotify1;
+ u8 bNotify2;
+ u16 ifnum;
+ u8 iin_bTag;
+ u8 *iin_buffer;
+ atomic_t iin_data_valid;
+ unsigned int iin_ep;
+ int iin_ep_present;
+ int iin_interval;
+ struct urb *iin_urb;
+ u16 iin_wMaxPacketSize;
+ atomic_t srq_asserted;
+
+ /* coalesced usb488_caps from usbtmc_dev_capabilities */
+ __u8 usb488_caps;
+
u8 rigol_quirk;
/* attributes from the USB TMC spec for this device */
@@ -99,6 +117,8 @@ struct usbtmc_device_data {
struct usbtmc_dev_capabilities capabilities;
struct kref kref;
struct mutex io_mutex; /* only one i/o function running at a time */
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
};
#define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
@@ -373,6 +393,142 @@ exit:
return rv;
}
+static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
+ void __user *arg)
+{
+ struct device *dev = &data->intf->dev;
+ u8 *buffer;
+ u8 tag;
+ __u8 stb;
+ int rv;
+
+ dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
+ data->iin_ep_present);
+
+ buffer = kmalloc(8, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ atomic_set(&data->iin_data_valid, 0);
+
+ /* must issue read_stb before using poll or select */
+ atomic_set(&data->srq_asserted, 0);
+
+ rv = usb_control_msg(data->usb_dev,
+ usb_rcvctrlpipe(data->usb_dev, 0),
+ USBTMC488_REQUEST_READ_STATUS_BYTE,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ data->iin_bTag,
+ data->ifnum,
+ buffer, 0x03, USBTMC_TIMEOUT);
+ if (rv < 0) {
+ dev_err(dev, "stb usb_control_msg returned %d\n", rv);
+ goto exit;
+ }
+
+ if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+ dev_err(dev, "control status returned %x\n", buffer[0]);
+ rv = -EIO;
+ goto exit;
+ }
+
+ if (data->iin_ep_present) {
+ rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&data->iin_data_valid) != 0,
+ USBTMC_TIMEOUT);
+ if (rv < 0) {
+ dev_dbg(dev, "wait interrupted %d\n", rv);
+ goto exit;
+ }
+
+ if (rv == 0) {
+ dev_dbg(dev, "wait timed out\n");
+ rv = -ETIME;
+ goto exit;
+ }
+
+ tag = data->bNotify1 & 0x7f;
+ if (tag != data->iin_bTag) {
+ dev_err(dev, "expected bTag %x got %x\n",
+ data->iin_bTag, tag);
+ }
+
+ stb = data->bNotify2;
+ } else {
+ stb = buffer[2];
+ }
+
+ rv = copy_to_user(arg, &stb, sizeof(stb));
+ if (rv)
+ rv = -EFAULT;
+
+ exit:
+ /* bump interrupt bTag */
+ data->iin_bTag += 1;
+ if (data->iin_bTag > 127)
+ /* 1 is for SRQ see USBTMC-USB488 subclass spec section 4.3.1 */
+ data->iin_bTag = 2;
+
+ kfree(buffer);
+ return rv;
+}
+
+static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
+ void __user *arg, unsigned int cmd)
+{
+ struct device *dev = &data->intf->dev;
+ __u8 val;
+ u8 *buffer;
+ u16 wValue;
+ int rv;
+
+ if (!(data->usb488_caps & USBTMC488_CAPABILITY_SIMPLE))
+ return -EINVAL;
+
+ buffer = kmalloc(8, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (cmd == USBTMC488_REQUEST_REN_CONTROL) {
+ rv = copy_from_user(&val, arg, sizeof(val));
+ if (rv) {
+ rv = -EFAULT;
+ goto exit;
+ }
+ wValue = val ? 1 : 0;
+ } else {
+ wValue = 0;
+ }
+
+ rv = usb_control_msg(data->usb_dev,
+ usb_rcvctrlpipe(data->usb_dev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ wValue,
+ data->ifnum,
+ buffer, 0x01, USBTMC_TIMEOUT);
+ if (rv < 0) {
+ dev_err(dev, "simple usb_control_msg failed %d\n", rv);
+ goto exit;
+ } else if (rv != 1) {
+ dev_warn(dev, "simple usb_control_msg returned %d\n", rv);
+ rv = -EIO;
+ goto exit;
+ }
+
+ if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+ dev_err(dev, "simple control status returned %x\n", buffer[0]);
+ rv = -EIO;
+ goto exit;
+ }
+ rv = 0;
+
+ exit:
+ kfree(buffer);
+ return rv;
+}
+
/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
* @transfer_size: number of bytes to request from the device.
@@ -895,6 +1051,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
data->capabilities.device_capabilities = buffer[5];
data->capabilities.usb488_interface_capabilities = buffer[14];
data->capabilities.usb488_device_capabilities = buffer[15];
+ data->usb488_caps = (buffer[14] & 0x07) | ((buffer[15] & 0x0f) << 4);
rv = 0;
err_out:
@@ -1069,6 +1226,33 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case USBTMC_IOCTL_ABORT_BULK_IN:
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
+
+ case USBTMC488_IOCTL_GET_CAPS:
+ retval = copy_to_user((void __user *)arg,
+ &data->usb488_caps,
+ sizeof(data->usb488_caps));
+ if (retval)
+ retval = -EFAULT;
+ break;
+
+ case USBTMC488_IOCTL_READ_STB:
+ retval = usbtmc488_ioctl_read_stb(data, (void __user *)arg);
+ break;
+
+ case USBTMC488_IOCTL_REN_CONTROL:
+ retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+ USBTMC488_REQUEST_REN_CONTROL);
+ break;
+
+ case USBTMC488_IOCTL_GOTO_LOCAL:
+ retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+ USBTMC488_REQUEST_GOTO_LOCAL);
+ break;
+
+ case USBTMC488_IOCTL_LOCAL_LOCKOUT:
+ retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+ USBTMC488_REQUEST_LOCAL_LOCKOUT);
+ break;
}
skip_io_on_zombie:
@@ -1076,6 +1260,34 @@ skip_io_on_zombie:
return retval;
}
+static int usbtmc_fasync(int fd, struct file *file, int on)
+{
+ struct usbtmc_device_data *data = file->private_data;
+
+ return fasync_helper(fd, file, on, &data->fasync);
+}
+
+static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+{
+ struct usbtmc_device_data *data = file->private_data;
+ unsigned int mask;
+
+ mutex_lock(&data->io_mutex);
+
+ if (data->zombie) {
+ mask = POLLHUP | POLLERR;
+ goto no_poll;
+ }
+
+ poll_wait(file, &data->waitq, wait);
+
+ mask = (atomic_read(&data->srq_asserted)) ? POLLIN | POLLRDNORM : 0;
+
+no_poll:
+ mutex_unlock(&data->io_mutex);
+ return mask;
+}
+
static const struct file_operations fops = {
.owner = THIS_MODULE,
.read = usbtmc_read,
@@ -1083,6 +1295,8 @@ static const struct file_operations fops = {
.open = usbtmc_open,
.release = usbtmc_release,
.unlocked_ioctl = usbtmc_ioctl,
+ .fasync = usbtmc_fasync,
+ .poll = usbtmc_poll,
.llseek = default_llseek,
};
@@ -1092,6 +1306,67 @@ static struct usb_class_driver usbtmc_class = {
.minor_base = USBTMC_MINOR_BASE,
};
+static void usbtmc_interrupt(struct urb *urb)
+{
+ struct usbtmc_device_data *data = urb->context;
+ struct device *dev = &data->intf->dev;
+ int status = urb->status;
+ int rv;
+
+ dev_dbg(&data->intf->dev, "int status: %d len %d\n",
+ status, urb->actual_length);
+
+ switch (status) {
+ case 0: /* SUCCESS */
+ /* check for valid STB notification */
+ if (data->iin_buffer[0] > 0x81) {
+ data->bNotify1 = data->iin_buffer[0];
+ data->bNotify2 = data->iin_buffer[1];
+ atomic_set(&data->iin_data_valid, 1);
+ wake_up_interruptible(&data->waitq);
+ goto exit;
+ }
+ /* check for SRQ notification */
+ if (data->iin_buffer[0] == 0x81) {
+ if (data->fasync)
+ kill_fasync(&data->fasync,
+ SIGIO, POLL_IN);
+
+ atomic_set(&data->srq_asserted, 1);
+ wake_up_interruptible(&data->waitq);
+ goto exit;
+ }
+ dev_warn(dev, "invalid notification: %x\n", data->iin_buffer[0]);
+ break;
+ case -EOVERFLOW:
+ dev_err(dev, "overflow with length %d, actual length is %d\n",
+ data->iin_wMaxPacketSize, urb->actual_length);
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -EILSEQ:
+ case -ETIME:
+ /* urb terminated, clean up */
+ dev_dbg(dev, "urb terminated, status: %d\n", status);
+ return;
+ default:
+ dev_err(dev, "unknown status received: %d\n", status);
+ }
+exit:
+ rv = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rv)
+ dev_err(dev, "usb_submit_urb failed: %d\n", rv);
+}
+
+static void usbtmc_free_int(struct usbtmc_device_data *data)
+{
+ if (!data->iin_ep_present || !data->iin_urb)
+ return;
+ usb_kill_urb(data->iin_urb);
+ kfree(data->iin_buffer);
+ usb_free_urb(data->iin_urb);
+ kref_put(&data->kref, usbtmc_delete);
+}
static int usbtmc_probe(struct usb_interface *intf,
const struct usb_device_id *id)
@@ -1114,6 +1389,9 @@ static int usbtmc_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
kref_init(&data->kref);
mutex_init(&data->io_mutex);
+ init_waitqueue_head(&data->waitq);
+ atomic_set(&data->iin_data_valid, 0);
+ atomic_set(&data->srq_asserted, 0);
data->zombie = 0;
/* Determine if it is a Rigol or not */
@@ -1134,9 +1412,12 @@ static int usbtmc_probe(struct usb_interface *intf,
data->bTag = 1;
data->TermCharEnabled = 0;
data->TermChar = '\n';
+ /* 2 <= bTag <= 127 USBTMC-USB488 subclass specification 4.3.1 */
+ data->iin_bTag = 2;
/* USBTMC devices have only one setting, so use that */
iface_desc = data->intf->cur_altsetting;
+ data->ifnum = iface_desc->desc.bInterfaceNumber;
/* Find bulk in endpoint */
for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
@@ -1161,6 +1442,20 @@ static int usbtmc_probe(struct usb_interface *intf,
break;
}
}
+ /* Find int endpoint */
+ for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
+ endpoint = &iface_desc->endpoint[n].desc;
+
+ if (usb_endpoint_is_int_in(endpoint)) {
+ data->iin_ep_present = 1;
+ data->iin_ep = endpoint->bEndpointAddress;
+ data->iin_wMaxPacketSize = usb_endpoint_maxp(endpoint);
+ data->iin_interval = endpoint->bInterval;
+ dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
+ data->iin_ep);
+ break;
+ }
+ }
retcode = get_capabilities(data);
if (retcode)
@@ -1169,6 +1464,39 @@ static int usbtmc_probe(struct usb_interface *intf,
retcode = sysfs_create_group(&intf->dev.kobj,
&capability_attr_grp);
+ if (data->iin_ep_present) {
+ /* allocate int urb */
+ data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!data->iin_urb) {
+ dev_err(&intf->dev, "Failed to allocate int urb\n");
+ goto error_register;
+ }
+
+ /* will reference data in int urb */
+ kref_get(&data->kref);
+
+ /* allocate buffer for interrupt in */
+ data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
+ GFP_KERNEL);
+ if (!data->iin_buffer) {
+ dev_err(&intf->dev, "Failed to allocate int buf\n");
+ goto error_register;
+ }
+
+ /* fill interrupt urb */
+ usb_fill_int_urb(data->iin_urb, data->usb_dev,
+ usb_rcvintpipe(data->usb_dev, data->iin_ep),
+ data->iin_buffer, data->iin_wMaxPacketSize,
+ usbtmc_interrupt,
+ data, data->iin_interval);
+
+ retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
+ if (retcode) {
+ dev_err(&intf->dev, "Failed to submit iin_urb\n");
+ goto error_register;
+ }
+ }
+
retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
retcode = usb_register_dev(intf, &usbtmc_class);
@@ -1185,6 +1513,7 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+ usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
@@ -1201,7 +1530,9 @@ static void usbtmc_disconnect(struct usb_interface *intf)
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
+ wake_up_all(&data->waitq);
mutex_unlock(&data->io_mutex);
+ usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
}
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index e6ec125e4485..e3d01619d6b3 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -51,6 +51,7 @@ static const char *const speed_names[] = {
[USB_SPEED_HIGH] = "high-speed",
[USB_SPEED_WIRELESS] = "wireless",
[USB_SPEED_SUPER] = "super-speed",
+ [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
const char *usb_speed_string(enum usb_device_speed speed)
@@ -64,18 +65,15 @@ EXPORT_SYMBOL_GPL(usb_speed_string);
enum usb_device_speed usb_get_maximum_speed(struct device *dev)
{
const char *maximum_speed;
- int err;
- int i;
+ int ret;
- err = device_property_read_string(dev, "maximum-speed", &maximum_speed);
- if (err < 0)
+ ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
+ if (ret < 0)
return USB_SPEED_UNKNOWN;
- for (i = 0; i < ARRAY_SIZE(speed_names); i++)
- if (strcmp(maximum_speed, speed_names[i]) == 0)
- return i;
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
- return USB_SPEED_UNKNOWN;
+ return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
@@ -109,13 +107,10 @@ static const char *const usb_dr_modes[] = {
static enum usb_dr_mode usb_get_dr_mode_from_string(const char *str)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(usb_dr_modes); i++)
- if (!strcmp(usb_dr_modes[i], str))
- return i;
+ int ret;
- return USB_DR_MODE_UNKNOWN;
+ ret = match_string(usb_dr_modes, ARRAY_SIZE(usb_dr_modes), str);
+ return (ret < 0) ? USB_DR_MODE_UNKNOWN : ret;
}
enum usb_dr_mode usb_get_dr_mode(struct device *dev)
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 61d538aa2346..504708f59b93 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -78,6 +78,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
fsm->b_srp_done = 0;
break;
case OTG_STATE_B_PERIPHERAL:
+ if (fsm->otg->gadget)
+ fsm->otg->gadget->host_request_flag = 0;
break;
case OTG_STATE_B_WAIT_ACON:
otg_del_timer(fsm, B_ASE0_BRST);
@@ -107,6 +109,8 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
case OTG_STATE_A_PERIPHERAL:
otg_del_timer(fsm, A_BIDL_ADIS);
fsm->a_bidl_adis_tmout = 0;
+ if (fsm->otg->gadget)
+ fsm->otg->gadget->host_request_flag = 0;
break;
case OTG_STATE_A_WAIT_VFALL:
otg_del_timer(fsm, A_WAIT_VFALL);
@@ -120,6 +124,87 @@ static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
}
}
+static void otg_hnp_polling_work(struct work_struct *work)
+{
+ struct otg_fsm *fsm = container_of(to_delayed_work(work),
+ struct otg_fsm, hnp_polling_work);
+ struct usb_device *udev;
+ enum usb_otg_state state = fsm->otg->state;
+ u8 flag;
+ int retval;
+
+ if (state != OTG_STATE_A_HOST && state != OTG_STATE_B_HOST)
+ return;
+
+ udev = usb_hub_find_child(fsm->otg->host->root_hub, 1);
+ if (!udev) {
+ dev_err(fsm->otg->host->controller,
+ "no usb dev connected, can't start HNP polling\n");
+ return;
+ }
+
+ *fsm->host_req_flag = 0;
+ /* Get host request flag from connected USB device */
+ retval = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
+ USB_REQ_GET_STATUS,
+ USB_DIR_IN | USB_RECIP_DEVICE,
+ 0,
+ OTG_STS_SELECTOR,
+ fsm->host_req_flag,
+ 1,
+ USB_CTRL_GET_TIMEOUT);
+ if (retval != 1) {
+ dev_err(&udev->dev, "Get one byte OTG status failed\n");
+ return;
+ }
+
+ flag = *fsm->host_req_flag;
+ if (flag == 0) {
+ /* Continue HNP polling */
+ schedule_delayed_work(&fsm->hnp_polling_work,
+ msecs_to_jiffies(T_HOST_REQ_POLL));
+ return;
+ } else if (flag != HOST_REQUEST_FLAG) {
+ dev_err(&udev->dev, "host request flag %d is invalid\n", flag);
+ return;
+ }
+
+ /* Host request flag is set */
+ if (state == OTG_STATE_A_HOST) {
+ /* Set b_hnp_enable */
+ if (!fsm->otg->host->b_hnp_enable) {
+ retval = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (retval >= 0)
+ fsm->otg->host->b_hnp_enable = 1;
+ }
+ fsm->a_bus_req = 0;
+ } else if (state == OTG_STATE_B_HOST) {
+ fsm->b_bus_req = 0;
+ }
+
+ otg_statemachine(fsm);
+}
+
+static void otg_start_hnp_polling(struct otg_fsm *fsm)
+{
+ /*
+ * The memory of host_req_flag should be allocated by
+ * controller driver, otherwise, hnp polling is not started.
+ */
+ if (!fsm->host_req_flag)
+ return;
+
+ INIT_DELAYED_WORK(&fsm->hnp_polling_work, otg_hnp_polling_work);
+ schedule_delayed_work(&fsm->hnp_polling_work,
+ msecs_to_jiffies(T_HOST_REQ_POLL));
+}
+
/* Called when entering a state */
static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
@@ -169,6 +254,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
otg_set_protocol(fsm, PROTO_HOST);
usb_bus_start_enum(fsm->otg->host,
fsm->otg->host->otg_port);
+ otg_start_hnp_polling(fsm);
break;
case OTG_STATE_A_IDLE:
otg_drv_vbus(fsm, 0);
@@ -203,6 +289,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
*/
if (!fsm->a_bus_req || fsm->a_suspend_req_inf)
otg_add_timer(fsm, A_WAIT_ENUM);
+ otg_start_hnp_polling(fsm);
break;
case OTG_STATE_A_SUSPEND:
otg_drv_vbus(fsm, 1);
diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile
index 2f6f93220046..9780877010b4 100644
--- a/drivers/usb/core/Makefile
+++ b/drivers/usb/core/Makefile
@@ -5,7 +5,7 @@
usbcore-y := usb.o hub.o hcd.o urb.o message.o driver.o
usbcore-y += config.o file.o buffer.o sysfs.o endpoint.o
usbcore-y += devio.o notify.o generic.o quirks.o devices.o
-usbcore-y += port.o
+usbcore-y += port.o of.o
usbcore-$(CONFIG_PCI) += hcd-pci.o
usbcore-$(CONFIG_ACPI) += usb-acpi.o
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 89f2e7765093..2741566ee4f2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -62,8 +62,9 @@ int hcd_buffer_create(struct usb_hcd *hcd)
char name[16];
int i, size;
- if (!hcd->self.controller->dma_mask &&
- !(hcd->driver->flags & HCD_LOCAL_MEM))
+ if (!IS_ENABLED(CONFIG_HAS_DMA) ||
+ (!hcd->self.controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM)))
return 0;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -93,6 +94,9 @@ void hcd_buffer_destroy(struct usb_hcd *hcd)
{
int i;
+ if (!IS_ENABLED(CONFIG_HAS_DMA))
+ return;
+
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
struct dma_pool *pool = hcd->pool[i];
@@ -119,8 +123,9 @@ void *hcd_buffer_alloc(
int i;
/* some USB hosts just use PIO */
- if (!bus->controller->dma_mask &&
- !(hcd->driver->flags & HCD_LOCAL_MEM)) {
+ if (!IS_ENABLED(CONFIG_HAS_DMA) ||
+ (!bus->controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM))) {
*dma = ~(dma_addr_t) 0;
return kmalloc(size, mem_flags);
}
@@ -145,8 +150,9 @@ void hcd_buffer_free(
if (!addr)
return;
- if (!bus->controller->dma_mask &&
- !(hcd->driver->flags & HCD_LOCAL_MEM)) {
+ if (!IS_ENABLED(CONFIG_HAS_DMA) ||
+ (!bus->controller->dma_mask &&
+ !(hcd->driver->flags & HCD_LOCAL_MEM))) {
kfree(addr);
return;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 5050760f5e17..31ccdccd7a04 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -43,6 +43,27 @@ static int find_next_descriptor(unsigned char *buffer, int size,
return buffer - buffer0;
}
+static void usb_parse_ssp_isoc_endpoint_companion(struct device *ddev,
+ int cfgno, int inum, int asnum, struct usb_host_endpoint *ep,
+ unsigned char *buffer, int size)
+{
+ struct usb_ssp_isoc_ep_comp_descriptor *desc;
+
+ /*
+ * The SuperSpeedPlus Isoc endpoint companion descriptor immediately
+ * follows the SuperSpeed Endpoint Companion descriptor
+ */
+ desc = (struct usb_ssp_isoc_ep_comp_descriptor *) buffer;
+ if (desc->bDescriptorType != USB_DT_SSP_ISOC_ENDPOINT_COMP ||
+ size < USB_DT_SSP_ISOC_EP_COMP_SIZE) {
+ dev_warn(ddev, "Invalid SuperSpeedPlus isoc endpoint companion"
+ "for config %d interface %d altsetting %d ep %d.\n",
+ cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ return;
+ }
+ memcpy(&ep->ssp_isoc_ep_comp, desc, USB_DT_SSP_ISOC_EP_COMP_SIZE);
+}
+
static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int inum, int asnum, struct usb_host_endpoint *ep,
unsigned char *buffer, int size)
@@ -54,6 +75,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
* be the first thing immediately following the endpoint descriptor.
*/
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
+
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
size < USB_DT_SS_EP_COMP_SIZE) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
@@ -76,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->desc.wMaxPacketSize;
return;
}
-
+ buffer += desc->bLength;
+ size -= desc->bLength;
memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE);
/* Check the various values */
@@ -112,6 +135,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 16;
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ !USB_SS_SSP_ISOC_COMP(desc->bmAttributes) &&
USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
@@ -140,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
max_tx);
ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx);
}
+ /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */
+ if (usb_endpoint_xfer_isoc(&ep->desc) &&
+ USB_SS_SSP_ISOC_COMP(desc->bmAttributes))
+ usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum,
+ ep, buffer, size);
}
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
@@ -191,6 +220,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
if (usb_endpoint_xfer_int(d)) {
i = 1;
switch (to_usb_device(ddev)->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
/* Many device manufacturers are using full-speed
@@ -274,7 +304,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
- if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+ if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
@@ -862,6 +892,9 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
break;
+ case USB_PTM_CAP_TYPE:
+ dev->bos->ptm_cap =
+ (struct usb_ptm_cap_descriptor *)buffer;
default:
break;
}
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index cffa0a0d7de2..ef04b50e6bbb 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -110,13 +110,6 @@ static const char format_endpt[] =
/* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
"E: Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n";
-
-/*
- * Need access to the driver and USB bus lists.
- * extern struct list_head usb_bus_list;
- * However, these will come from functions that return ptrs to each of them.
- */
-
/*
* Wait for an connect/disconnect event to happen. We initialize
* the event counter with an odd number, and each event will increment
@@ -221,7 +214,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
+ if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
@@ -230,7 +223,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
return start;
}
interval *= (speed == USB_SPEED_HIGH ||
- speed == USB_SPEED_SUPER) ? 125 : 1000;
+ speed >= USB_SPEED_SUPER) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
@@ -322,7 +315,7 @@ static char *usb_dump_config_descriptor(char *start, char *end,
if (start > end)
return start;
- if (speed == USB_SPEED_SUPER)
+ if (speed >= USB_SPEED_SUPER)
mul = 8;
else
mul = 2;
@@ -534,6 +527,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
speed = "480"; break;
case USB_SPEED_SUPER:
speed = "5000"; break;
+ case USB_SPEED_SUPER_PLUS:
+ speed = "10000"; break;
default:
speed = "??";
}
@@ -553,7 +548,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
/* super/high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH ||
- usbdev->speed == USB_SPEED_SUPER)
+ usbdev->speed >= USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
@@ -616,6 +611,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
struct usb_bus *bus;
ssize_t ret, total_written = 0;
loff_t skip_bytes = *ppos;
+ int id;
if (*ppos < 0)
return -EINVAL;
@@ -624,9 +620,9 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EFAULT;
- mutex_lock(&usb_bus_list_lock);
+ mutex_lock(&usb_bus_idr_lock);
/* print devices for all busses */
- list_for_each_entry(bus, &usb_bus_list, bus_list) {
+ idr_for_each_entry(&usb_bus_idr, bus, id) {
/* recurse through all children of the root hub */
if (!bus_to_hcd(bus)->rh_registered)
continue;
@@ -635,12 +631,12 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
bus->root_hub, bus, 0, 0, 0);
usb_unlock_device(bus->root_hub);
if (ret < 0) {
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
return ret;
}
total_written += ret;
}
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
return total_written;
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 59e7a3369084..52c4461dfccd 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -50,6 +50,7 @@
#include <linux/user_namespace.h>
#include <linux/scatterlist.h>
#include <linux/uaccess.h>
+#include <linux/dma-mapping.h>
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
@@ -69,6 +70,7 @@ struct usb_dev_state {
spinlock_t lock; /* protects the async urb lists */
struct list_head async_pending;
struct list_head async_completed;
+ struct list_head memory_list;
wait_queue_head_t wait; /* wake up if a request completed */
unsigned int discsignr;
struct pid *disc_pid;
@@ -77,6 +79,19 @@ struct usb_dev_state {
unsigned long ifclaimed;
u32 secid;
u32 disabled_bulk_eps;
+ bool privileges_dropped;
+ unsigned long interface_allowed_mask;
+};
+
+struct usb_memory {
+ struct list_head memlist;
+ int vma_use_count;
+ int urb_use_count;
+ u32 size;
+ void *mem;
+ dma_addr_t dma_handle;
+ unsigned long vm_start;
+ struct usb_dev_state *ps;
};
struct async {
@@ -89,6 +104,7 @@ struct async {
void __user *userbuffer;
void __user *userurb;
struct urb *urb;
+ struct usb_memory *usbm;
unsigned int mem_usage;
int status;
u32 secid;
@@ -162,6 +178,111 @@ static int connected(struct usb_dev_state *ps)
ps->dev->state != USB_STATE_NOTATTACHED);
}
+static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
+{
+ struct usb_dev_state *ps = usbm->ps;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ps->lock, flags);
+ --*count;
+ if (usbm->urb_use_count == 0 && usbm->vma_use_count == 0) {
+ list_del(&usbm->memlist);
+ spin_unlock_irqrestore(&ps->lock, flags);
+
+ usb_free_coherent(ps->dev, usbm->size, usbm->mem,
+ usbm->dma_handle);
+ usbfs_decrease_memory_usage(
+ usbm->size + sizeof(struct usb_memory));
+ kfree(usbm);
+ } else {
+ spin_unlock_irqrestore(&ps->lock, flags);
+ }
+}
+
+static void usbdev_vm_open(struct vm_area_struct *vma)
+{
+ struct usb_memory *usbm = vma->vm_private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&usbm->ps->lock, flags);
+ ++usbm->vma_use_count;
+ spin_unlock_irqrestore(&usbm->ps->lock, flags);
+}
+
+static void usbdev_vm_close(struct vm_area_struct *vma)
+{
+ struct usb_memory *usbm = vma->vm_private_data;
+
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+}
+
+struct vm_operations_struct usbdev_vm_ops = {
+ .open = usbdev_vm_open,
+ .close = usbdev_vm_close
+};
+
+static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct usb_memory *usbm = NULL;
+ struct usb_dev_state *ps = file->private_data;
+ size_t size = vma->vm_end - vma->vm_start;
+ void *mem;
+ unsigned long flags;
+ dma_addr_t dma_handle;
+ int ret;
+
+ ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
+ if (ret)
+ goto error;
+
+ usbm = kzalloc(sizeof(struct usb_memory), GFP_KERNEL);
+ if (!usbm) {
+ ret = -ENOMEM;
+ goto error_decrease_mem;
+ }
+
+ mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto error_free_usbm;
+ }
+
+ memset(mem, 0, size);
+
+ usbm->mem = mem;
+ usbm->dma_handle = dma_handle;
+ usbm->size = size;
+ usbm->ps = ps;
+ usbm->vm_start = vma->vm_start;
+ usbm->vma_use_count = 1;
+ INIT_LIST_HEAD(&usbm->memlist);
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(usbm->mem) >> PAGE_SHIFT,
+ size, vma->vm_page_prot) < 0) {
+ dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
+ return -EAGAIN;
+ }
+
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &usbdev_vm_ops;
+ vma->vm_private_data = usbm;
+
+ spin_lock_irqsave(&ps->lock, flags);
+ list_add_tail(&usbm->memlist, &ps->memory_list);
+ spin_unlock_irqrestore(&ps->lock, flags);
+
+ return 0;
+
+error_free_usbm:
+ kfree(usbm);
+error_decrease_mem:
+ usbfs_decrease_memory_usage(size + sizeof(struct usb_memory));
+error:
+ return ret;
+}
+
static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
@@ -278,8 +399,13 @@ static void free_async(struct async *as)
if (sg_page(&as->urb->sg[i]))
kfree(sg_virt(&as->urb->sg[i]));
}
+
kfree(as->urb->sg);
- kfree(as->urb->transfer_buffer);
+ if (as->usbm == NULL)
+ kfree(as->urb->transfer_buffer);
+ else
+ dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
+
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
usbfs_decrease_memory_usage(as->mem_usage);
@@ -624,6 +750,10 @@ static int claimintf(struct usb_dev_state *ps, unsigned int ifnum)
if (test_bit(ifnum, &ps->ifclaimed))
return 0;
+ if (ps->privileges_dropped &&
+ !test_bit(ifnum, &ps->interface_allowed_mask))
+ return -EACCES;
+
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
@@ -848,7 +978,7 @@ static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
(void *) (unsigned long) devt, match_devt);
if (!dev)
return NULL;
- return container_of(dev, struct usb_device, dev);
+ return to_usb_device(dev);
}
/*
@@ -861,7 +991,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
int ret;
ret = -ENOMEM;
- ps = kmalloc(sizeof(struct usb_dev_state), GFP_KERNEL);
+ ps = kzalloc(sizeof(struct usb_dev_state), GFP_KERNEL);
if (!ps)
goto out_free_ps;
@@ -889,16 +1019,15 @@ static int usbdev_open(struct inode *inode, struct file *file)
ps->dev = dev;
ps->file = file;
+ ps->interface_allowed_mask = 0xFFFFFFFF; /* 32 bits */
spin_lock_init(&ps->lock);
INIT_LIST_HEAD(&ps->list);
INIT_LIST_HEAD(&ps->async_pending);
INIT_LIST_HEAD(&ps->async_completed);
+ INIT_LIST_HEAD(&ps->memory_list);
init_waitqueue_head(&ps->wait);
- ps->discsignr = 0;
ps->disc_pid = get_pid(task_pid(current));
ps->cred = get_current_cred();
- ps->disccontext = NULL;
- ps->ifclaimed = 0;
security_task_getsecid(current, &ps->secid);
smp_wmb();
list_add_tail(&ps->list, &dev->filelist);
@@ -945,6 +1074,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
free_async(as);
as = async_getcompleted(ps);
}
+
kfree(ps);
return 0;
}
@@ -1198,6 +1328,28 @@ static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
static int proc_resetdevice(struct usb_dev_state *ps)
{
+ struct usb_host_config *actconfig = ps->dev->actconfig;
+ struct usb_interface *interface;
+ int i, number;
+
+ /* Don't allow a device reset if the process has dropped the
+ * privilege to do such things and any of the interfaces are
+ * currently claimed.
+ */
+ if (ps->privileges_dropped && actconfig) {
+ for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) {
+ interface = actconfig->interface[i];
+ number = interface->cur_altsetting->desc.bInterfaceNumber;
+ if (usb_interface_claimed(interface) &&
+ !test_bit(number, &ps->ifclaimed)) {
+ dev_warn(&ps->dev->dev,
+ "usbfs: interface %d claimed by %s while '%s' resets device\n",
+ number, interface->dev.driver->name, current->comm);
+ return -EACCES;
+ }
+ }
+ }
+
return usb_reset_device(ps->dev);
}
@@ -1266,6 +1418,31 @@ static int proc_setconfig(struct usb_dev_state *ps, void __user *arg)
return status;
}
+static struct usb_memory *
+find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)
+{
+ struct usb_memory *usbm = NULL, *iter;
+ unsigned long flags;
+ unsigned long uurb_start = (unsigned long)uurb->buffer;
+
+ spin_lock_irqsave(&ps->lock, flags);
+ list_for_each_entry(iter, &ps->memory_list, memlist) {
+ if (uurb_start >= iter->vm_start &&
+ uurb_start < iter->vm_start + iter->size) {
+ if (uurb->buffer_length > iter->vm_start + iter->size -
+ uurb_start) {
+ usbm = ERR_PTR(-EINVAL);
+ } else {
+ usbm = iter;
+ usbm->urb_use_count++;
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ps->lock, flags);
+ return usbm;
+}
+
static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
void __user *arg)
@@ -1378,11 +1555,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
number_of_packets = uurb->number_of_packets;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
number_of_packets;
- isopkt = kmalloc(isofrmlen, GFP_KERNEL);
- if (!isopkt)
- return -ENOMEM;
- if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
- ret = -EFAULT;
+ isopkt = memdup_user(iso_frame_desc, isofrmlen);
+ if (IS_ERR(isopkt)) {
+ ret = PTR_ERR(isopkt);
+ isopkt = NULL;
goto error;
}
for (totlen = u = 0; u < number_of_packets; u++) {
@@ -1422,6 +1598,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
goto error;
}
+ as->usbm = find_memory_area(ps, uurb);
+ if (IS_ERR(as->usbm)) {
+ ret = PTR_ERR(as->usbm);
+ as->usbm = NULL;
+ goto error;
+ }
+
+ /* do not use SG buffers when memory mapped segments
+ * are in use
+ */
+ if (as->usbm)
+ num_sgs = 0;
+
u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length +
num_sgs * sizeof(struct scatterlist);
ret = usbfs_increase_memory_usage(u);
@@ -1459,29 +1648,35 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen -= u;
}
} else if (uurb->buffer_length > 0) {
- as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
- GFP_KERNEL);
- if (!as->urb->transfer_buffer) {
- ret = -ENOMEM;
- goto error;
- }
+ if (as->usbm) {
+ unsigned long uurb_start = (unsigned long)uurb->buffer;
- if (!is_in) {
- if (copy_from_user(as->urb->transfer_buffer,
- uurb->buffer,
- uurb->buffer_length)) {
- ret = -EFAULT;
+ as->urb->transfer_buffer = as->usbm->mem +
+ (uurb_start - as->usbm->vm_start);
+ } else {
+ as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
+ GFP_KERNEL);
+ if (!as->urb->transfer_buffer) {
+ ret = -ENOMEM;
goto error;
}
- } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) {
- /*
- * Isochronous input data may end up being
- * discontiguous if some of the packets are short.
- * Clear the buffer so that the gaps don't leak
- * kernel data to userspace.
- */
- memset(as->urb->transfer_buffer, 0,
- uurb->buffer_length);
+ if (!is_in) {
+ if (copy_from_user(as->urb->transfer_buffer,
+ uurb->buffer,
+ uurb->buffer_length)) {
+ ret = -EFAULT;
+ goto error;
+ }
+ } else if (uurb->type == USBDEVFS_URB_TYPE_ISO) {
+ /*
+ * Isochronous input data may end up being
+ * discontiguous if some of the packets are
+ * short. Clear the buffer so that the gaps
+ * don't leak kernel data to userspace.
+ */
+ memset(as->urb->transfer_buffer, 0,
+ uurb->buffer_length);
+ }
}
}
as->urb->dev = ps->dev;
@@ -1528,10 +1723,14 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
isopkt = NULL;
as->ps = ps;
as->userurb = arg;
- if (is_in && uurb->buffer_length > 0)
+ if (as->usbm) {
+ unsigned long uurb_start = (unsigned long)uurb->buffer;
+
+ as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ as->urb->transfer_dma = as->usbm->dma_handle +
+ (uurb_start - as->usbm->vm_start);
+ } else if (is_in && uurb->buffer_length > 0)
as->userbuffer = uurb->buffer;
- else
- as->userbuffer = NULL;
as->signr = uurb->signr;
as->ifnum = ifnum;
as->pid = get_pid(task_pid(current));
@@ -1587,6 +1786,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return 0;
error:
+ if (as && as->usbm)
+ dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
kfree(isopkt);
kfree(dr);
if (as)
@@ -1903,7 +2104,7 @@ static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
ret = releaseintf(ps, ifnum);
if (ret < 0)
return ret;
- destroy_async_on_interface (ps, ifnum);
+ destroy_async_on_interface(ps, ifnum);
return 0;
}
@@ -1915,6 +2116,9 @@ static int proc_ioctl(struct usb_dev_state *ps, struct usbdevfs_ioctl *ctl)
struct usb_interface *intf = NULL;
struct usb_driver *driver = NULL;
+ if (ps->privileges_dropped)
+ return -EACCES;
+
/* alloc buffer */
size = _IOC_SIZE(ctl->ioctl_code);
if (size > 0) {
@@ -2040,7 +2244,8 @@ static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
__u32 caps;
caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
- USBDEVFS_CAP_REAP_AFTER_DISCONNECT;
+ USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP |
+ USBDEVFS_CAP_DROP_PRIVILEGES;
if (!ps->dev->bus->no_stop_on_short)
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
if (ps->dev->bus->sg_tablesize)
@@ -2067,6 +2272,9 @@ static int proc_disconnect_claim(struct usb_dev_state *ps, void __user *arg)
if (intf->dev.driver) {
struct usb_driver *driver = to_usb_driver(intf->dev.driver);
+ if (ps->privileges_dropped)
+ return -EACCES;
+
if ((dc.flags & USBDEVFS_DISCONNECT_CLAIM_IF_DRIVER) &&
strncmp(dc.driver, intf->dev.driver->name,
sizeof(dc.driver)) != 0)
@@ -2123,6 +2331,23 @@ static int proc_free_streams(struct usb_dev_state *ps, void __user *arg)
return r;
}
+static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg)
+{
+ u32 data;
+
+ if (copy_from_user(&data, arg, sizeof(data)))
+ return -EFAULT;
+
+ /* This is an one way operation. Once privileges are
+ * dropped, you cannot regain them. You may however reissue
+ * this ioctl to shrink the allowed interfaces mask.
+ */
+ ps->interface_allowed_mask &= data;
+ ps->privileges_dropped = true;
+
+ return 0;
+}
+
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@@ -2311,6 +2536,9 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
case USBDEVFS_FREE_STREAMS:
ret = proc_free_streams(ps, p);
break;
+ case USBDEVFS_DROP_PRIVILEGES:
+ ret = proc_drop_privileges(ps, p);
+ break;
}
done:
@@ -2366,6 +2594,7 @@ const struct file_operations usbdev_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = usbdev_compat_ioctl,
#endif
+ .mmap = usbdev_mmap,
.open = usbdev_open,
.release = usbdev_release,
};
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 56593a9a8726..2057d91d8336 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -502,11 +502,15 @@ static int usb_unbind_interface(struct device *dev)
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
- struct device *dev = &iface->dev;
+ struct device *dev;
struct usb_device *udev;
int retval = 0;
int lpm_disable_error;
+ if (!iface)
+ return -ENODEV;
+
+ dev = &iface->dev;
if (dev->driver)
return -EBUSY;
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index ea337a718cc1..822ced9639aa 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -19,6 +19,7 @@
#include <linux/errno.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/usb.h>
#include "usb.h"
@@ -155,7 +156,6 @@ int usb_register_dev(struct usb_interface *intf,
int minor_base = class_driver->minor_base;
int minor;
char name[20];
- char *temp;
#ifdef CONFIG_USB_DYNAMIC_MINORS
/*
@@ -192,14 +192,9 @@ int usb_register_dev(struct usb_interface *intf,
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
- temp = strrchr(name, '/');
- if (temp && (temp[1] != '\0'))
- ++temp;
- else
- temp = name;
intf->usb_dev = device_create(usb_class->class, &intf->dev,
MKDEV(USB_MAJOR, minor), class_driver,
- "%s", temp);
+ "%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
down_write(&minor_rwsem);
usb_minors[minor] = NULL;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 9eb1cff28bd4..7859d738df41 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -28,7 +28,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
#include <asm/prom.h>
#endif
@@ -74,6 +73,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
if (companion->bus != pdev->bus ||
PCI_SLOT(companion->devfn) != slot)
continue;
+
+ /*
+ * Companion device should be either UHCI,OHCI or EHCI host
+ * controller, otherwise skip.
+ */
+ if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
+ companion->class != CL_EHCI)
+ continue;
+
companion_hcd = pci_get_drvdata(companion);
if (!companion_hcd || !companion_hcd->self.root_hub)
continue;
@@ -197,7 +205,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
* The xHCI driver has its own irq management
* make sure irq setup is not touched for xhci in generic hcd code
*/
- if ((driver->flags & HCD_MASK) != HCD_USB3) {
+ if ((driver->flags & HCD_MASK) < HCD_USB3) {
if (!dev->irq) {
dev_err(&dev->dev,
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index df0e3b92533a..2ca2cef7f681 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -90,16 +90,15 @@ unsigned long usb_hcds_loaded;
EXPORT_SYMBOL_GPL(usb_hcds_loaded);
/* host controllers we manage */
-LIST_HEAD (usb_bus_list);
-EXPORT_SYMBOL_GPL (usb_bus_list);
+DEFINE_IDR (usb_bus_idr);
+EXPORT_SYMBOL_GPL (usb_bus_idr);
/* used when allocating bus numbers */
#define USB_MAXBUS 64
-static DECLARE_BITMAP(busmap, USB_MAXBUS);
/* used when updating list of hcds */
-DEFINE_MUTEX(usb_bus_list_lock); /* exported only for usbfs */
-EXPORT_SYMBOL_GPL (usb_bus_list_lock);
+DEFINE_MUTEX(usb_bus_idr_lock); /* exported only for usbfs */
+EXPORT_SYMBOL_GPL (usb_bus_idr_lock);
/* used for controlling access to virtual root hubs */
static DEFINE_SPINLOCK(hcd_root_hub_lock);
@@ -128,6 +127,27 @@ static inline int is_root_hub(struct usb_device *udev)
#define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
#define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
+/* usb 3.1 root hub device descriptor */
+static const u8 usb31_rh_dev_descriptor[18] = {
+ 0x12, /* __u8 bLength; */
+ USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
+ 0x10, 0x03, /* __le16 bcdUSB; v3.1 */
+
+ 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
+ 0x00, /* __u8 bDeviceSubClass; */
+ 0x03, /* __u8 bDeviceProtocol; USB 3 hub */
+ 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
+
+ 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
+ 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
+ KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
+
+ 0x03, /* __u8 iManufacturer; */
+ 0x02, /* __u8 iProduct; */
+ 0x01, /* __u8 iSerialNumber; */
+ 0x01 /* __u8 bNumConfigurations; */
+};
+
/* usb 3.0 root hub device descriptor */
static const u8 usb3_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
@@ -557,6 +577,8 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case USB_DT_DEVICE << 8:
switch (hcd->speed) {
case HCD_USB31:
+ bufp = usb31_rh_dev_descriptor;
+ break;
case HCD_USB3:
bufp = usb3_rh_dev_descriptor;
break;
@@ -645,9 +667,15 @@ nongeneric:
/* non-generic request */
switch (typeReq) {
case GetHubStatus:
- case GetPortStatus:
len = 4;
break;
+ case GetPortStatus:
+ if (wValue == HUB_PORT_STATUS)
+ len = 4;
+ else
+ /* other port status types return 8 bytes */
+ len = 8;
+ break;
case GetHubDescriptor:
len = sizeof (struct usb_hub_descriptor);
break;
@@ -967,8 +995,6 @@ static void usb_bus_init (struct usb_bus *bus)
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
mutex_init(&bus->usb_address0_mutex);
-
- INIT_LIST_HEAD (&bus->bus_list);
}
/*-------------------------------------------------------------------------*/
@@ -988,18 +1014,14 @@ static int usb_register_bus(struct usb_bus *bus)
int result = -E2BIG;
int busnum;
- mutex_lock(&usb_bus_list_lock);
- busnum = find_next_zero_bit(busmap, USB_MAXBUS, 1);
- if (busnum >= USB_MAXBUS) {
- printk (KERN_ERR "%s: too many buses\n", usbcore_name);
+ mutex_lock(&usb_bus_idr_lock);
+ busnum = idr_alloc(&usb_bus_idr, bus, 1, USB_MAXBUS, GFP_KERNEL);
+ if (busnum < 0) {
+ pr_err("%s: failed to get bus number\n", usbcore_name);
goto error_find_busnum;
}
- set_bit(busnum, busmap);
bus->busnum = busnum;
-
- /* Add it to the local list of buses */
- list_add (&bus->bus_list, &usb_bus_list);
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
usb_notify_add_bus(bus);
@@ -1008,7 +1030,7 @@ static int usb_register_bus(struct usb_bus *bus)
return 0;
error_find_busnum:
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
return result;
}
@@ -1029,13 +1051,11 @@ static void usb_deregister_bus (struct usb_bus *bus)
* controller code, as well as having it call this when cleaning
* itself up
*/
- mutex_lock(&usb_bus_list_lock);
- list_del (&bus->bus_list);
- mutex_unlock(&usb_bus_list_lock);
+ mutex_lock(&usb_bus_idr_lock);
+ idr_remove(&usb_bus_idr, bus->busnum);
+ mutex_unlock(&usb_bus_idr_lock);
usb_notify_remove_bus(bus);
-
- clear_bit(bus->busnum, busmap);
}
/**
@@ -1063,12 +1083,12 @@ static int register_root_hub(struct usb_hcd *hcd)
set_bit (devnum, usb_dev->bus->devmap.devicemap);
usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
- mutex_lock(&usb_bus_list_lock);
+ mutex_lock(&usb_bus_idr_lock);
usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
if (retval != sizeof usb_dev->descriptor) {
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
dev_name(&usb_dev->dev), retval);
return (retval < 0) ? retval : -EMSGSIZE;
@@ -1078,8 +1098,8 @@ static int register_root_hub(struct usb_hcd *hcd)
retval = usb_get_bos_descriptor(usb_dev);
if (!retval) {
usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
- } else if (usb_dev->speed == USB_SPEED_SUPER) {
- mutex_unlock(&usb_bus_list_lock);
+ } else if (usb_dev->speed >= USB_SPEED_SUPER) {
+ mutex_unlock(&usb_bus_idr_lock);
dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
dev_name(&usb_dev->dev), retval);
return retval;
@@ -1099,7 +1119,7 @@ static int register_root_hub(struct usb_hcd *hcd)
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
}
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
return retval;
}
@@ -1408,7 +1428,8 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
- if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
+ if (IS_ENABLED(CONFIG_HAS_DMA) &&
+ (urb->transfer_flags & URB_SETUP_MAP_SINGLE))
dma_unmap_single(hcd->self.controller,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
@@ -1440,17 +1461,20 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
usb_hcd_unmap_urb_setup_for_dma(hcd, urb);
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (urb->transfer_flags & URB_DMA_MAP_SG)
+ if (IS_ENABLED(CONFIG_HAS_DMA) &&
+ (urb->transfer_flags & URB_DMA_MAP_SG))
dma_unmap_sg(hcd->self.controller,
urb->sg,
urb->num_sgs,
dir);
- else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
+ else if (IS_ENABLED(CONFIG_HAS_DMA) &&
+ (urb->transfer_flags & URB_DMA_MAP_PAGE))
dma_unmap_page(hcd->self.controller,
urb->transfer_dma,
urb->transfer_buffer_length,
dir);
- else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
+ else if (IS_ENABLED(CONFIG_HAS_DMA) &&
+ (urb->transfer_flags & URB_DMA_MAP_SINGLE))
dma_unmap_single(hcd->self.controller,
urb->transfer_dma,
urb->transfer_buffer_length,
@@ -1492,7 +1516,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
if (hcd->self.uses_pio_for_control)
return ret;
- if (hcd->self.uses_dma) {
+ if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
urb->setup_dma = dma_map_single(
hcd->self.controller,
urb->setup_packet,
@@ -1518,7 +1542,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (urb->transfer_buffer_length != 0
&& !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
- if (hcd->self.uses_dma) {
+ if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
if (urb->num_sgs) {
int n;
@@ -2112,7 +2136,7 @@ int usb_alloc_streams(struct usb_interface *interface,
hcd = bus_to_hcd(dev->bus);
if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
return -EINVAL;
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
if (dev->state < USB_STATE_CONFIGURED)
return -ENODEV;
@@ -2160,7 +2184,7 @@ int usb_free_streams(struct usb_interface *interface,
dev = interface_to_usbdev(interface);
hcd = bus_to_hcd(dev->bus);
- if (dev->speed != USB_SPEED_SUPER)
+ if (dev->speed < USB_SPEED_SUPER)
return -EINVAL;
/* Double-free is not allowed */
@@ -2208,7 +2232,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
{
- struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+ struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
int status;
int old_state = hcd->state;
@@ -2257,7 +2281,7 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
{
- struct usb_hcd *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+ struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
int status;
int old_state = hcd->state;
@@ -2371,7 +2395,7 @@ int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num)
* boards with root hubs hooked up to internal devices (instead of
* just the OTG port) may need more attention to resetting...
*/
- hcd = container_of (bus, struct usb_hcd, self);
+ hcd = bus_to_hcd(bus);
if (port_num && hcd->driver->start_port_reset)
status = hcd->driver->start_port_reset(hcd, port_num);
@@ -2778,9 +2802,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
rhdev->speed = USB_SPEED_WIRELESS;
break;
case HCD_USB3:
- case HCD_USB31:
rhdev->speed = USB_SPEED_SUPER;
break;
+ case HCD_USB31:
+ rhdev->speed = USB_SPEED_SUPER_PLUS;
+ break;
default:
retval = -EINVAL;
goto err_set_rh_speed;
@@ -2863,9 +2889,9 @@ error_create_attr_group:
#ifdef CONFIG_PM
cancel_work_sync(&hcd->wakeup_work);
#endif
- mutex_lock(&usb_bus_list_lock);
+ mutex_lock(&usb_bus_idr_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
err_register_root_hub:
hcd->rh_pollable = 0;
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2932,9 +2958,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
- mutex_lock(&usb_bus_list_lock);
+ mutex_lock(&usb_bus_idr_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
/*
* tasklet_kill() isn't needed here because:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 51b436918f78..38cc4bae0a82 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -49,7 +49,7 @@ static void hub_event(struct work_struct *work);
DEFINE_MUTEX(usb_port_peer_mutex);
/* cycle leds on hubs that aren't blinking for attention */
-static bool blinkenlights = 0;
+static bool blinkenlights;
module_param(blinkenlights, bool, S_IRUGO);
MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs");
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(initial_descriptor_timeout,
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
-static bool old_scheme_first = 0;
+static bool old_scheme_first;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
@@ -298,7 +298,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
unsigned int hub_u1_del;
unsigned int hub_u2_del;
- if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
+ if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
return;
hub = usb_hub_to_struct_hub(udev->parent);
@@ -537,29 +537,34 @@ static int get_hub_status(struct usb_device *hdev,
/*
* USB 2.0 spec Section 11.24.2.7
+ * USB 3.1 takes into use the wValue and wLength fields, spec Section 10.16.2.6
*/
static int get_port_status(struct usb_device *hdev, int port1,
- struct usb_port_status *data)
+ void *data, u16 value, u16 length)
{
int i, status = -ETIMEDOUT;
for (i = 0; i < USB_STS_RETRIES &&
(status == -ETIMEDOUT || status == -EPIPE); i++) {
status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
- USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
- data, sizeof(*data), USB_STS_TIMEOUT);
+ USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, value,
+ port1, data, length, USB_STS_TIMEOUT);
}
return status;
}
-static int hub_port_status(struct usb_hub *hub, int port1,
- u16 *status, u16 *change)
+static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
+ u16 *status, u16 *change, u32 *ext_status)
{
int ret;
+ int len = 4;
+
+ if (type != HUB_PORT_STATUS)
+ len = 8;
mutex_lock(&hub->status_mutex);
- ret = get_port_status(hub->hdev, port1, &hub->status->port);
- if (ret < 4) {
+ ret = get_port_status(hub->hdev, port1, &hub->status->port, type, len);
+ if (ret < len) {
if (ret != -ENODEV)
dev_err(hub->intfdev,
"%s failed (err = %d)\n", __func__, ret);
@@ -568,13 +573,22 @@ static int hub_port_status(struct usb_hub *hub, int port1,
} else {
*status = le16_to_cpu(hub->status->port.wPortStatus);
*change = le16_to_cpu(hub->status->port.wPortChange);
-
+ if (type != HUB_PORT_STATUS && ext_status)
+ *ext_status = le32_to_cpu(
+ hub->status->port.dwExtPortStatus);
ret = 0;
}
mutex_unlock(&hub->status_mutex);
return ret;
}
+static int hub_port_status(struct usb_hub *hub, int port1,
+ u16 *status, u16 *change)
+{
+ return hub_ext_port_status(hub, port1, HUB_PORT_STATUS,
+ status, change, NULL);
+}
+
static void kick_hub_wq(struct usb_hub *hub)
{
struct usb_interface *intf;
@@ -2131,7 +2145,7 @@ static void hub_disconnect_children(struct usb_device *udev)
* Something got disconnected. Get rid of it and all of its children.
*
* If *pdev is a normal device then the parent hub must already be locked.
- * If *pdev is a root hub then the caller must hold the usb_bus_list_lock,
+ * If *pdev is a root hub then the caller must hold the usb_bus_idr_lock,
* which protects the set of root hubs as well as the list of buses.
*
* Only hub drivers (including virtual root hub drivers for host
@@ -2429,7 +2443,7 @@ static void set_usb_port_removable(struct usb_device *udev)
* enumerated. The device descriptor is available, but not descriptors
* for any device configuration. The caller must have locked either
* the parent hub (if udev is a normal device) or else the
- * usb_bus_list_lock (if udev is a root hub). The parent's pointer to
+ * usb_bus_idr_lock (if udev is a root hub). The parent's pointer to
* udev has already been installed, but udev is not yet visible through
* sysfs or other filesystem code.
*
@@ -2612,6 +2626,32 @@ out_authorized:
return result;
}
+/*
+ * Return 1 if port speed is SuperSpeedPlus, 0 otherwise
+ * check it from the link protocol field of the current speed ID attribute.
+ * current speed ID is got from ext port status request. Sublink speed attribute
+ * table is returned with the hub BOS SSP device capability descriptor
+ */
+static int port_speed_is_ssp(struct usb_device *hdev, int speed_id)
+{
+ int ssa_count;
+ u32 ss_attr;
+ int i;
+ struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
+
+ if (!ssp_cap)
+ return 0;
+
+ ssa_count = le32_to_cpu(ssp_cap->bmAttributes) &
+ USB_SSP_SUBLINK_SPEED_ATTRIBS;
+
+ for (i = 0; i <= ssa_count; i++) {
+ ss_attr = le32_to_cpu(ssp_cap->bmSublinkSpeedAttr[i]);
+ if (speed_id == (ss_attr & USB_SSP_SUBLINK_SPEED_SSID))
+ return !!(ss_attr & USB_SSP_SUBLINK_SPEED_LP);
+ }
+ return 0;
+}
/* Returns 1 if @hub is a WUSB root hub, 0 otherwise */
static unsigned hub_is_wusb(struct usb_hub *hub)
@@ -2619,7 +2659,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
struct usb_hcd *hcd;
if (hub->hdev->parent != NULL) /* not a root hub? */
return 0;
- hcd = container_of(hub->hdev->bus, struct usb_hcd, self);
+ hcd = bus_to_hcd(hub->hdev->bus);
return hcd->wireless;
}
@@ -2645,7 +2685,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
*/
static bool use_new_scheme(struct usb_device *udev, int retry)
{
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
return false;
return USE_NEW_SCHEME(retry);
@@ -2676,6 +2716,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
int delay_time, ret;
u16 portstatus;
u16 portchange;
+ u32 ext_portstatus = 0;
for (delay_time = 0;
delay_time < HUB_RESET_TIMEOUT;
@@ -2684,7 +2725,14 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
msleep(delay);
/* read and decode port status */
- ret = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (hub_is_superspeedplus(hub->hdev))
+ ret = hub_ext_port_status(hub, port1,
+ HUB_EXT_PORT_STATUS,
+ &portstatus, &portchange,
+ &ext_portstatus);
+ else
+ ret = hub_port_status(hub, port1, &portstatus,
+ &portchange);
if (ret < 0)
return ret;
@@ -2727,6 +2775,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (hub_is_wusb(hub))
udev->speed = USB_SPEED_WIRELESS;
+ else if (hub_is_superspeedplus(hub->hdev) &&
+ port_speed_is_ssp(hub->hdev, ext_portstatus &
+ USB_EXT_PORT_STAT_RX_SPEED_ID))
+ udev->speed = USB_SPEED_SUPER_PLUS;
else if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
@@ -3989,7 +4041,7 @@ int usb_disable_lpm(struct usb_device *udev)
struct usb_hcd *hcd;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return 0;
@@ -4048,7 +4100,7 @@ void usb_enable_lpm(struct usb_device *udev)
struct usb_port *port_dev;
if (!udev || !udev->parent ||
- udev->speed != USB_SPEED_SUPER ||
+ udev->speed < USB_SPEED_SUPER ||
!udev->lpm_capable ||
udev->state < USB_STATE_DEFAULT)
return;
@@ -4292,7 +4344,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
- int i, j, retval;
+ int retries, operations, retval, i;
unsigned delay = HUB_SHORT_RESET_TIME;
enum usb_device_speed oldspeed = udev->speed;
const char *speed;
@@ -4323,7 +4375,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
retval = -ENODEV;
- if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
+ /* Don't allow speed changes at reset, except usb 3.0 to faster */
+ if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
+ !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
dev_dbg(&udev->dev, "device reset changed speed!\n");
goto fail;
}
@@ -4335,6 +4389,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
*/
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
@@ -4361,7 +4416,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
else
speed = usb_speed_string(udev->speed);
- if (udev->speed != USB_SPEED_SUPER)
+ if (udev->speed < USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
(udev->config) ? "reset" : "new", speed,
@@ -4394,7 +4449,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* first 8 bytes of the device descriptor to get the ep0 maxpacket
* value.
*/
- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
bool did_new_scheme = false;
if (use_new_scheme(udev, retry_counter)) {
@@ -4421,7 +4476,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* 255 is for WUSB devices, we actually need to use
* 512 (WUSB1.0[4.8.1]).
*/
- for (j = 0; j < 3; ++j) {
+ for (operations = 0; operations < 3; ++operations) {
buf->bMaxPacketSize0 = 0;
r = usb_control_msg(udev, usb_rcvaddr0pipe(),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
@@ -4441,7 +4496,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
r = -EPROTO;
break;
}
- if (r == 0)
+ /*
+ * Some devices time out if they are powered on
+ * when already connected. They need a second
+ * reset. But only on the first attempt,
+ * lest we get into a time out/reset loop
+ */
+ if (r == 0 || (r == -ETIMEDOUT && retries == 0))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -4473,7 +4534,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* authorization will assign the final address.
*/
if (udev->wusb == 0) {
- for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
retval = hub_set_address(udev, devnum);
if (retval >= 0)
break;
@@ -4485,11 +4546,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
devnum, retval);
goto fail;
}
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
devnum = udev->devnum;
dev_info(&udev->dev,
- "%s SuperSpeed USB device number %d using %s\n",
+ "%s SuperSpeed%s USB device number %d using %s\n",
(udev->config) ? "reset" : "new",
+ (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
devnum, udev->bus->controller->driver->name);
}
@@ -4528,7 +4590,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* got from those devices show they aren't superspeed devices. Warm
* reset the port attached by the devices can fix them.
*/
- if ((udev->speed == USB_SPEED_SUPER) &&
+ if ((udev->speed >= USB_SPEED_SUPER) &&
(le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
dev_err(&udev->dev, "got a wrong device descriptor, "
"warm reset device\n");
@@ -4539,7 +4601,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (udev->descriptor.bMaxPacketSize0 == 0xff ||
- udev->speed == USB_SPEED_SUPER)
+ udev->speed >= USB_SPEED_SUPER)
i = 512;
else
i = udev->descriptor.bMaxPacketSize0;
@@ -4749,7 +4811,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
udev->level = hdev->level + 1;
udev->wusb = hub_is_wusb(hub);
- /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
+ /* Devices connected to SuperSpeed hubs are USB 3.0 or later */
if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
else
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 45d070dd1d03..34c1a7e22aae 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -140,6 +140,13 @@ static inline int hub_is_superspeed(struct usb_device *hdev)
return hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS;
}
+static inline int hub_is_superspeedplus(struct usb_device *hdev)
+{
+ return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
+ le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
+ hdev->bos->ssp_cap);
+}
+
static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
{
unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2;
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
new file mode 100644
index 000000000000..2289700c31d6
--- /dev/null
+++ b/drivers/usb/core/of.c
@@ -0,0 +1,47 @@
+/*
+ * of.c The helpers for hcd device tree support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Author: Peter Chen <peter.chen@freescale.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+
+/**
+ * usb_of_get_child_node - Find the device node match port number
+ * @parent: the parent device node
+ * @portnum: the port number which device is connecting
+ *
+ * Find the node from device tree according to its port number.
+ *
+ * Return: On success, a pointer to the device node, %NULL on failure.
+ */
+struct device_node *usb_of_get_child_node(struct device_node *parent,
+ int portnum)
+{
+ struct device_node *node;
+ u32 port;
+
+ for_each_child_of_node(parent, node) {
+ if (!of_property_read_u32(node, "reg", &port)) {
+ if (port == portnum)
+ return node;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_child_node);
+
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 65b6e6b84043..c953a0f1c695 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -23,10 +23,12 @@ static ssize_t field##_show(struct device *dev, \
{ \
struct usb_device *udev; \
struct usb_host_config *actconfig; \
- ssize_t rc = 0; \
+ ssize_t rc; \
\
udev = to_usb_device(dev); \
- usb_lock_device(udev); \
+ rc = usb_lock_device_interruptible(udev); \
+ if (rc < 0) \
+ return -EINTR; \
actconfig = udev->actconfig; \
if (actconfig) \
rc = sprintf(buf, format_string, \
@@ -47,10 +49,12 @@ static ssize_t bMaxPower_show(struct device *dev,
{
struct usb_device *udev;
struct usb_host_config *actconfig;
- ssize_t rc = 0;
+ ssize_t rc;
udev = to_usb_device(dev);
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
actconfig = udev->actconfig;
if (actconfig)
rc = sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
@@ -64,10 +68,12 @@ static ssize_t configuration_show(struct device *dev,
{
struct usb_device *udev;
struct usb_host_config *actconfig;
- ssize_t rc = 0;
+ ssize_t rc;
udev = to_usb_device(dev);
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
actconfig = udev->actconfig;
if (actconfig && actconfig->string)
rc = sprintf(buf, "%s\n", actconfig->string);
@@ -84,11 +90,13 @@ static ssize_t bConfigurationValue_store(struct device *dev,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int config, value;
+ int config, value, rc;
if (sscanf(buf, "%d", &config) != 1 || config < -1 || config > 255)
return -EINVAL;
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
value = usb_set_configuration(udev, config);
usb_unlock_device(udev);
return (value < 0) ? value : count;
@@ -105,7 +113,9 @@ static ssize_t name##_show(struct device *dev, \
int retval; \
\
udev = to_usb_device(dev); \
- usb_lock_device(udev); \
+ retval = usb_lock_device_interruptible(udev); \
+ if (retval < 0) \
+ return -EINTR; \
retval = sprintf(buf, "%s\n", udev->name); \
usb_unlock_device(udev); \
return retval; \
@@ -141,6 +151,9 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
case USB_SPEED_SUPER:
speed = "5000";
break;
+ case USB_SPEED_SUPER_PLUS:
+ speed = "10000";
+ break;
default:
speed = "unknown";
}
@@ -224,11 +237,13 @@ static ssize_t avoid_reset_quirk_store(struct device *dev,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int val;
+ int val, rc;
if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
return -EINVAL;
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
if (val)
udev->quirks |= USB_QUIRK_RESET;
else
@@ -294,7 +309,7 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value;
+ int value, rc;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
@@ -303,7 +318,9 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
udev->persist_enabled = !!value;
usb_unlock_device(udev);
return count;
@@ -420,13 +437,16 @@ static ssize_t level_store(struct device *dev, struct device_attribute *attr,
int len = count;
char *cp;
int rc = count;
+ int rv;
warn_level();
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
- usb_lock_device(udev);
+ rv = usb_lock_device_interruptible(udev);
+ if (rv < 0)
+ return -EINTR;
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0)
@@ -466,7 +486,9 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
bool value;
int ret;
- usb_lock_device(udev);
+ ret = usb_lock_device_interruptible(udev);
+ if (ret < 0)
+ return -EINTR;
ret = strtobool(buf, &value);
@@ -536,8 +558,11 @@ static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
+ int rc;
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
if (udev->usb3_lpm_u1_enabled)
p = "enabled";
@@ -555,8 +580,11 @@ static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
+ int rc;
- usb_lock_device(udev);
+ rc = usb_lock_device_interruptible(udev);
+ if (rc < 0)
+ return -EINTR;
if (udev->usb3_lpm_u2_enabled)
p = "enabled";
@@ -822,7 +850,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
*/
- usb_lock_device(udev);
for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
nleft > 0; ++cfgno) {
if (cfgno < 0) {
@@ -843,7 +870,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
@@ -969,7 +995,9 @@ static ssize_t supports_autosuspend_show(struct device *dev,
{
int s;
- device_lock(dev);
+ s = device_lock_interruptible(dev);
+ if (s < 0)
+ return -EINTR;
/* Devices will be autosuspended even when an interface isn't claimed */
s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
device_unlock(dev);
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 3d274778caaf..c601e25b609f 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -401,7 +401,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
/* SuperSpeed isoc endpoints have up to 16 bursts of up to
* 3 packets each
*/
- if (dev->speed == USB_SPEED_SUPER) {
+ if (dev->speed >= USB_SPEED_SUPER) {
int burst = 1 + ep->ss_ep_comp.bMaxBurst;
int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
max *= burst;
@@ -499,6 +499,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
}
/* too big? */
switch (dev->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER: /* units are 125us */
/* Handle up to 2^(16-1) microframes */
if (urb->interval > (1 << 15))
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index ebb29caa3fe4..dcb85e3cd5a7 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/usb/of.h>
#include <asm/io.h>
#include <linux/scatterlist.h>
@@ -241,7 +242,7 @@ static int __each_dev(struct device *dev, void *data)
if (!is_usb_device(dev))
return 0;
- return arg->fn(container_of(dev, struct usb_device, dev), arg->data);
+ return arg->fn(to_usb_device(dev), arg->data);
}
/**
@@ -397,7 +398,7 @@ struct device_type usb_device_type = {
/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
static unsigned usb_bus_is_wusb(struct usb_bus *bus)
{
- struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self);
+ struct usb_hcd *hcd = bus_to_hcd(bus);
return hcd->wireless;
}
@@ -423,6 +424,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
struct usb_device *dev;
struct usb_hcd *usb_hcd = bus_to_hcd(bus);
unsigned root_hub = 0;
+ unsigned raw_port = port1;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -470,6 +472,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->route = 0;
dev->dev.parent = bus->controller;
+ dev->dev.of_node = bus->controller->of_node;
dev_set_name(&dev->dev, "usb%d", bus->busnum);
root_hub = 1;
} else {
@@ -494,6 +497,14 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->dev.parent = &parent->dev;
dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
+ if (!parent->parent) {
+ /* device under root hub's port */
+ raw_port = usb_hcd_find_raw_port_number(usb_hcd,
+ port1);
+ }
+ dev->dev.of_node = usb_of_get_child_node(parent->dev.of_node,
+ raw_port);
+
/* hub driver sets up TT records */
}
@@ -1115,6 +1126,7 @@ static void __exit usb_exit(void)
bus_unregister(&usb_bus_type);
usb_acpi_unregister();
usb_debugfs_cleanup();
+ idr_destroy(&usb_bus_idr);
}
subsys_initcall(usb_init);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 05b5e17abf92..53318126ed91 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -45,7 +45,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev,
struct usb_host_config *c)
{
/* SuperSpeed power is in 8 mA units; others are in 2 mA units */
- unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
+ unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2);
return c->desc.bMaxPower * mul;
}
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index f0decc0d69b5..c1f29caa8990 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -2,6 +2,7 @@ config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
depends on HAS_DMA
depends on USB || USB_GADGET
+ depends on HAS_IOMEM
help
Say Y here if your system has a Dual Role Hi-Speed USB
controller based on the DesignWare HSOTG IP Core.
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 46c4ba75dc2a..4135a5ff67ca 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -56,189 +56,6 @@
#include "core.h"
#include "hcd.h"
-#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
-/**
- * dwc2_backup_host_registers() - Backup controller host registers.
- * When suspending usb bus, registers needs to be backuped
- * if controller power is disabled once suspended.
- *
- * @hsotg: Programming view of the DWC_otg controller
- */
-static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hregs_backup *hr;
- int i;
-
- dev_dbg(hsotg->dev, "%s\n", __func__);
-
- /* Backup Host regs */
- hr = &hsotg->hr_backup;
- hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
- hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
- hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
-
- hr->hprt0 = dwc2_read_hprt0(hsotg);
- hr->hfir = dwc2_readl(hsotg->regs + HFIR);
- hr->valid = true;
-
- return 0;
-}
-
-/**
- * dwc2_restore_host_registers() - Restore controller host registers.
- * When resuming usb bus, device registers needs to be restored
- * if controller power were disabled.
- *
- * @hsotg: Programming view of the DWC_otg controller
- */
-static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_hregs_backup *hr;
- int i;
-
- dev_dbg(hsotg->dev, "%s\n", __func__);
-
- /* Restore host regs */
- hr = &hsotg->hr_backup;
- if (!hr->valid) {
- dev_err(hsotg->dev, "%s: no host registers to restore\n",
- __func__);
- return -EINVAL;
- }
- hr->valid = false;
-
- dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
- dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
-
- for (i = 0; i < hsotg->core_params->host_channels; ++i)
- dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
-
- dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
- dwc2_writel(hr->hfir, hsotg->regs + HFIR);
- hsotg->frame_number = 0;
-
- return 0;
-}
-#else
-static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
-{ return 0; }
-
-static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
-{ return 0; }
-#endif
-
-#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
- IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
-/**
- * dwc2_backup_device_registers() - Backup controller device registers.
- * When suspending usb bus, registers needs to be backuped
- * if controller power is disabled once suspended.
- *
- * @hsotg: Programming view of the DWC_otg controller
- */
-static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_dregs_backup *dr;
- int i;
-
- dev_dbg(hsotg->dev, "%s\n", __func__);
-
- /* Backup dev regs */
- dr = &hsotg->dr_backup;
-
- dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
- dr->dctl = dwc2_readl(hsotg->regs + DCTL);
- dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
- dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
- dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
-
- for (i = 0; i < hsotg->num_of_eps; i++) {
- /* Backup IN EPs */
- dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
-
- /* Ensure DATA PID is correctly configured */
- if (dr->diepctl[i] & DXEPCTL_DPID)
- dr->diepctl[i] |= DXEPCTL_SETD1PID;
- else
- dr->diepctl[i] |= DXEPCTL_SETD0PID;
-
- dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
- dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
-
- /* Backup OUT EPs */
- dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
-
- /* Ensure DATA PID is correctly configured */
- if (dr->doepctl[i] & DXEPCTL_DPID)
- dr->doepctl[i] |= DXEPCTL_SETD1PID;
- else
- dr->doepctl[i] |= DXEPCTL_SETD0PID;
-
- dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
- dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
- }
- dr->valid = true;
- return 0;
-}
-
-/**
- * dwc2_restore_device_registers() - Restore controller device registers.
- * When resuming usb bus, device registers needs to be restored
- * if controller power were disabled.
- *
- * @hsotg: Programming view of the DWC_otg controller
- */
-static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_dregs_backup *dr;
- u32 dctl;
- int i;
-
- dev_dbg(hsotg->dev, "%s\n", __func__);
-
- /* Restore dev regs */
- dr = &hsotg->dr_backup;
- if (!dr->valid) {
- dev_err(hsotg->dev, "%s: no device registers to restore\n",
- __func__);
- return -EINVAL;
- }
- dr->valid = false;
-
- dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
- dwc2_writel(dr->dctl, hsotg->regs + DCTL);
- dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
- dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
- dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
-
- for (i = 0; i < hsotg->num_of_eps; i++) {
- /* Restore IN EPs */
- dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
- dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
- dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
-
- /* Restore OUT EPs */
- dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
- dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
- dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
- }
-
- /* Set the Power-On Programming done bit */
- dctl = dwc2_readl(hsotg->regs + DCTL);
- dctl |= DCTL_PWRONPRGDONE;
- dwc2_writel(dctl, hsotg->regs + DCTL);
-
- return 0;
-}
-#else
-static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
-{ return 0; }
-
-static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
-{ return 0; }
-#endif
-
/**
* dwc2_backup_global_registers() - Backup global controller registers.
* When suspending usb bus, registers needs to be backuped
@@ -421,62 +238,6 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
return ret;
}
-/**
- * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
- * used in both device and host modes
- *
- * @hsotg: Programming view of the DWC_otg controller
- */
-static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
-{
- u32 intmsk;
-
- /* Clear any pending OTG Interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
-
- /* Clear any pending interrupts */
- dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
-
- /* Enable the interrupts in the GINTMSK */
- intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
-
- if (hsotg->core_params->dma_enable <= 0)
- intmsk |= GINTSTS_RXFLVL;
- if (hsotg->core_params->external_id_pin_ctl <= 0)
- intmsk |= GINTSTS_CONIDSTSCHNG;
-
- intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
- GINTSTS_SESSREQINT;
-
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
-}
-
-/*
- * Initializes the FSLSPClkSel field of the HCFG register depending on the
- * PHY type
- */
-static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
-{
- u32 hcfg, val;
-
- if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) ||
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* Full speed PHY */
- val = HCFG_FSLSPCLKSEL_48_MHZ;
- } else {
- /* High speed PHY running at full speed or high speed */
- val = HCFG_FSLSPCLKSEL_30_60_MHZ;
- }
-
- dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
- hcfg = dwc2_readl(hsotg->regs + HCFG);
- hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
-}
-
/*
* Do core a soft reset of the core. Be careful with this because it
* resets all the internal state machines of the core.
@@ -646,1644 +407,6 @@ int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg)
return 0;
}
-static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg, i2cctl;
- int retval = 0;
-
- /*
- * core_init() is now called on every switch so only call the
- * following for the first time through
- */
- if (select_phy) {
- dev_dbg(hsotg->dev, "FS PHY selected\n");
-
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- if (!(usbcfg & GUSBCFG_PHYSEL)) {
- usbcfg |= GUSBCFG_PHYSEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
-
- /* Reset after a PHY select */
- retval = dwc2_core_reset_and_force_dr_mode(hsotg);
-
- if (retval) {
- dev_err(hsotg->dev,
- "%s: Reset failed, aborting", __func__);
- return retval;
- }
- }
- }
-
- /*
- * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
- * do this on HNP Dev/Host mode switches (done in dev_init and
- * host_init).
- */
- if (dwc2_is_host_mode(hsotg))
- dwc2_init_fs_ls_pclk_sel(hsotg);
-
- if (hsotg->core_params->i2c_enable > 0) {
- dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
-
- /* Program GUSBCFG.OtgUtmiFsSel to I2C */
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
-
- /* Program GI2CCTL.I2CEn */
- i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
- i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
- i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
- i2cctl &= ~GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
- i2cctl |= GI2CCTL_I2CEN;
- dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
- }
-
- return retval;
-}
-
-static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg, usbcfg_old;
- int retval = 0;
-
- if (!select_phy)
- return 0;
-
- usbcfg = usbcfg_old = dwc2_readl(hsotg->regs + GUSBCFG);
-
- /*
- * HS PHY parameters. These parameters are preserved during soft reset
- * so only program the first time. Do a soft reset immediately after
- * setting phyif.
- */
- switch (hsotg->core_params->phy_type) {
- case DWC2_PHY_TYPE_PARAM_ULPI:
- /* ULPI interface */
- dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
- usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
- usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
- if (hsotg->core_params->phy_ulpi_ddr > 0)
- usbcfg |= GUSBCFG_DDRSEL;
- break;
- case DWC2_PHY_TYPE_PARAM_UTMI:
- /* UTMI+ interface */
- dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
- usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
- if (hsotg->core_params->phy_utmi_width == 16)
- usbcfg |= GUSBCFG_PHYIF16;
- break;
- default:
- dev_err(hsotg->dev, "FS PHY selected at HS!\n");
- break;
- }
-
- if (usbcfg != usbcfg_old) {
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
-
- /* Reset after setting the PHY parameters */
- retval = dwc2_core_reset_and_force_dr_mode(hsotg);
- if (retval) {
- dev_err(hsotg->dev,
- "%s: Reset failed, aborting", __func__);
- return retval;
- }
- }
-
- return retval;
-}
-
-static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
-{
- u32 usbcfg;
- int retval = 0;
-
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
- hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
- /* If FS mode with FS PHY */
- retval = dwc2_fs_phy_init(hsotg, select_phy);
- if (retval)
- return retval;
- } else {
- /* High speed PHY */
- retval = dwc2_hs_phy_init(hsotg, select_phy);
- if (retval)
- return retval;
- }
-
- if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
- hsotg->core_params->ulpi_fs_ls > 0) {
- dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- usbcfg |= GUSBCFG_ULPI_FS_LS;
- usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
- } else {
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- usbcfg &= ~GUSBCFG_ULPI_FS_LS;
- usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
- }
-
- return retval;
-}
-
-static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
-{
- u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
-
- switch (hsotg->hw_params.arch) {
- case GHWCFG2_EXT_DMA_ARCH:
- dev_err(hsotg->dev, "External DMA Mode not supported\n");
- return -EINVAL;
-
- case GHWCFG2_INT_DMA_ARCH:
- dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- if (hsotg->core_params->ahbcfg != -1) {
- ahbcfg &= GAHBCFG_CTRL_MASK;
- ahbcfg |= hsotg->core_params->ahbcfg &
- ~GAHBCFG_CTRL_MASK;
- }
- break;
-
- case GHWCFG2_SLAVE_ONLY_ARCH:
- default:
- dev_dbg(hsotg->dev, "Slave Only Mode\n");
- break;
- }
-
- dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
- hsotg->core_params->dma_enable,
- hsotg->core_params->dma_desc_enable);
-
- if (hsotg->core_params->dma_enable > 0) {
- if (hsotg->core_params->dma_desc_enable > 0)
- dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
- else
- dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
- } else {
- dev_dbg(hsotg->dev, "Using Slave mode\n");
- hsotg->core_params->dma_desc_enable = 0;
- }
-
- if (hsotg->core_params->dma_enable > 0)
- ahbcfg |= GAHBCFG_DMA_EN;
-
- dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
-
- return 0;
-}
-
-static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
-{
- u32 usbcfg;
-
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
-
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- if (hsotg->core_params->otg_cap ==
- DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
- usbcfg |= GUSBCFG_HNPCAP;
- if (hsotg->core_params->otg_cap !=
- DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
- usbcfg |= GUSBCFG_SRPCAP;
- break;
-
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- if (hsotg->core_params->otg_cap !=
- DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
- usbcfg |= GUSBCFG_SRPCAP;
- break;
-
- case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
- case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
- default:
- break;
- }
-
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
-}
-
-/**
- * dwc2_core_init() - Initializes the DWC_otg controller registers and
- * prepares the core for device mode or host mode operation
- *
- * @hsotg: Programming view of the DWC_otg controller
- * @initial_setup: If true then this is the first init for this instance.
- */
-int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
-{
- u32 usbcfg, otgctl;
- int retval;
-
- dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
-
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
-
- /* Set ULPI External VBUS bit if needed */
- usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
- if (hsotg->core_params->phy_ulpi_ext_vbus ==
- DWC2_PHY_ULPI_EXTERNAL_VBUS)
- usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
-
- /* Set external TS Dline pulsing bit if needed */
- usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
- if (hsotg->core_params->ts_dline > 0)
- usbcfg |= GUSBCFG_TERMSELDLPULSE;
-
- dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
-
- /*
- * Reset the Controller
- *
- * We only need to reset the controller if this is a re-init.
- * For the first init we know for sure that earlier code reset us (it
- * needed to in order to properly detect various parameters).
- */
- if (!initial_setup) {
- retval = dwc2_core_reset_and_force_dr_mode(hsotg);
- if (retval) {
- dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
- __func__);
- return retval;
- }
- }
-
- /*
- * This needs to happen in FS mode before any other programming occurs
- */
- retval = dwc2_phy_init(hsotg, initial_setup);
- if (retval)
- return retval;
-
- /* Program the GAHBCFG Register */
- retval = dwc2_gahbcfg_init(hsotg);
- if (retval)
- return retval;
-
- /* Program the GUSBCFG register */
- dwc2_gusbcfg_init(hsotg);
-
- /* Program the GOTGCTL register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
- otgctl &= ~GOTGCTL_OTGVER;
- if (hsotg->core_params->otg_ver > 0)
- otgctl |= GOTGCTL_OTGVER;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
- dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
-
- /* Clear the SRP success bit for FS-I2c */
- hsotg->srp_success = 0;
-
- /* Enable common interrupts */
- dwc2_enable_common_interrupts(hsotg);
-
- /*
- * Do device or host initialization based on mode during PCD and
- * HCD initialization
- */
- if (dwc2_is_host_mode(hsotg)) {
- dev_dbg(hsotg->dev, "Host Mode\n");
- hsotg->op_state = OTG_STATE_A_HOST;
- } else {
- dev_dbg(hsotg->dev, "Device Mode\n");
- hsotg->op_state = OTG_STATE_B_PERIPHERAL;
- }
-
- return 0;
-}
-
-/**
- * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
- *
- * @hsotg: Programming view of DWC_otg controller
- */
-void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
-{
- u32 intmsk;
-
- dev_dbg(hsotg->dev, "%s()\n", __func__);
-
- /* Disable all interrupts */
- dwc2_writel(0, hsotg->regs + GINTMSK);
- dwc2_writel(0, hsotg->regs + HAINTMSK);
-
- /* Enable the common interrupts */
- dwc2_enable_common_interrupts(hsotg);
-
- /* Enable host mode interrupts without disturbing common interrupts */
- intmsk = dwc2_readl(hsotg->regs + GINTMSK);
- intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
-}
-
-/**
- * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
- *
- * @hsotg: Programming view of DWC_otg controller
- */
-void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
-{
- u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
-
- /* Disable host mode interrupts without disturbing common interrupts */
- intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
- GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
-}
-
-/*
- * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
- * For system that have a total fifo depth that is smaller than the default
- * RX + TX fifo size.
- *
- * @hsotg: Programming view of DWC_otg controller
- */
-static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_core_params *params = hsotg->core_params;
- struct dwc2_hw_params *hw = &hsotg->hw_params;
- u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
-
- total_fifo_size = hw->total_fifo_size;
- rxfsiz = params->host_rx_fifo_size;
- nptxfsiz = params->host_nperio_tx_fifo_size;
- ptxfsiz = params->host_perio_tx_fifo_size;
-
- /*
- * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
- * allocation with support for high bandwidth endpoints. Synopsys
- * defines MPS(Max Packet size) for a periodic EP=1024, and for
- * non-periodic as 512.
- */
- if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
- /*
- * For Buffer DMA mode/Scatter Gather DMA mode
- * 2 * ((Largest Packet size / 4) + 1 + 1) + n
- * with n = number of host channel.
- * 2 * ((1024/4) + 2) = 516
- */
- rxfsiz = 516 + hw->host_channels;
-
- /*
- * min non-periodic tx fifo depth
- * 2 * (largest non-periodic USB packet used / 4)
- * 2 * (512/4) = 256
- */
- nptxfsiz = 256;
-
- /*
- * min periodic tx fifo depth
- * (largest packet size*MC)/4
- * (1024 * 3)/4 = 768
- */
- ptxfsiz = 768;
-
- params->host_rx_fifo_size = rxfsiz;
- params->host_nperio_tx_fifo_size = nptxfsiz;
- params->host_perio_tx_fifo_size = ptxfsiz;
- }
-
- /*
- * If the summation of RX, NPTX and PTX fifo sizes is still
- * bigger than the total_fifo_size, then we have a problem.
- *
- * We won't be able to allocate as many endpoints. Right now,
- * we're just printing an error message, but ideally this FIFO
- * allocation algorithm would be improved in the future.
- *
- * FIXME improve this FIFO allocation algorithm.
- */
- if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
- dev_err(hsotg->dev, "invalid fifo sizes\n");
-}
-
-static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
-{
- struct dwc2_core_params *params = hsotg->core_params;
- u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
-
- if (!params->enable_dynamic_fifo)
- return;
-
- dwc2_calculate_dynamic_fifo(hsotg);
-
- /* Rx FIFO */
- grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
- dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
- grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
- grxfsiz |= params->host_rx_fifo_size <<
- GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
- dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
- dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GRXFSIZ));
-
- /* Non-periodic Tx FIFO */
- dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GNPTXFSIZ));
- nptxfsiz = params->host_nperio_tx_fifo_size <<
- FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
- nptxfsiz |= params->host_rx_fifo_size <<
- FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
- dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + GNPTXFSIZ));
-
- /* Periodic Tx FIFO */
- dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + HPTXFSIZ));
- hptxfsiz = params->host_perio_tx_fifo_size <<
- FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
- hptxfsiz |= (params->host_rx_fifo_size +
- params->host_nperio_tx_fifo_size) <<
- FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
- dwc2_readl(hsotg->regs + HPTXFSIZ));
-
- if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
- hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
- /*
- * Global DFIFOCFG calculation for Host mode -
- * include RxFIFO, NPTXFIFO and HPTXFIFO
- */
- dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
- dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
- dfifocfg |= (params->host_rx_fifo_size +
- params->host_nperio_tx_fifo_size +
- params->host_perio_tx_fifo_size) <<
- GDFIFOCFG_EPINFOBASE_SHIFT &
- GDFIFOCFG_EPINFOBASE_MASK;
- dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
- }
-}
-
-/**
- * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
- * Host mode
- *
- * @hsotg: Programming view of DWC_otg controller
- *
- * This function flushes the Tx and Rx FIFOs and flushes any entries in the
- * request queues. Host channels are reset to ensure that they are ready for
- * performing transfers.
- */
-void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
-{
- u32 hcfg, hfir, otgctl;
-
- dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
-
- /* Restart the Phy Clock */
- dwc2_writel(0, hsotg->regs + PCGCTL);
-
- /* Initialize Host Configuration Register */
- dwc2_init_fs_ls_pclk_sel(hsotg);
- if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
- hcfg = dwc2_readl(hsotg->regs + HCFG);
- hcfg |= HCFG_FSLSSUPP;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
- }
-
- /*
- * This bit allows dynamic reloading of the HFIR register during
- * runtime. This bit needs to be programmed during initial configuration
- * and its value must not be changed during runtime.
- */
- if (hsotg->core_params->reload_ctl > 0) {
- hfir = dwc2_readl(hsotg->regs + HFIR);
- hfir |= HFIR_RLDCTRL;
- dwc2_writel(hfir, hsotg->regs + HFIR);
- }
-
- if (hsotg->core_params->dma_desc_enable > 0) {
- u32 op_mode = hsotg->hw_params.op_mode;
- if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
- !hsotg->hw_params.dma_desc_enable ||
- op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
- op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
- op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
- dev_err(hsotg->dev,
- "Hardware does not support descriptor DMA mode -\n");
- dev_err(hsotg->dev,
- "falling back to buffer DMA mode.\n");
- hsotg->core_params->dma_desc_enable = 0;
- } else {
- hcfg = dwc2_readl(hsotg->regs + HCFG);
- hcfg |= HCFG_DESCDMA;
- dwc2_writel(hcfg, hsotg->regs + HCFG);
- }
- }
-
- /* Configure data FIFO sizes */
- dwc2_config_fifos(hsotg);
-
- /* TODO - check this */
- /* Clear Host Set HNP Enable in the OTG Control Register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
- otgctl &= ~GOTGCTL_HSTSETHNPEN;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
-
- /* Make sure the FIFOs are flushed */
- dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
- dwc2_flush_rx_fifo(hsotg);
-
- /* Clear Host Set HNP Enable in the OTG Control Register */
- otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
- otgctl &= ~GOTGCTL_HSTSETHNPEN;
- dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
-
- if (hsotg->core_params->dma_desc_enable <= 0) {
- int num_channels, i;
- u32 hcchar;
-
- /* Flush out any leftover queued requests */
- num_channels = hsotg->core_params->host_channels;
- for (i = 0; i < num_channels; i++) {
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
- hcchar &= ~HCCHAR_CHENA;
- hcchar |= HCCHAR_CHDIS;
- hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
- }
-
- /* Halt all channels to put them into a known state */
- for (i = 0; i < num_channels; i++) {
- int count = 0;
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
- hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
- hcchar &= ~HCCHAR_EPDIR;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
- dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
- __func__, i);
- do {
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
- if (++count > 1000) {
- dev_err(hsotg->dev,
- "Unable to clear enable on channel %d\n",
- i);
- break;
- }
- udelay(1);
- } while (hcchar & HCCHAR_CHENA);
- }
- }
-
- /* Turn on the vbus power */
- dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
- if (hsotg->op_state == OTG_STATE_A_HOST) {
- u32 hprt0 = dwc2_read_hprt0(hsotg);
-
- dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
- !!(hprt0 & HPRT0_PWR));
- if (!(hprt0 & HPRT0_PWR)) {
- hprt0 |= HPRT0_PWR;
- dwc2_writel(hprt0, hsotg->regs + HPRT0);
- }
- }
-
- dwc2_enable_host_interrupts(hsotg);
-}
-
-static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 hcintmsk = HCINTMSK_CHHLTD;
-
- switch (chan->ep_type) {
- case USB_ENDPOINT_XFER_CONTROL:
- case USB_ENDPOINT_XFER_BULK:
- dev_vdbg(hsotg->dev, "control/bulk\n");
- hcintmsk |= HCINTMSK_XFERCOMPL;
- hcintmsk |= HCINTMSK_STALL;
- hcintmsk |= HCINTMSK_XACTERR;
- hcintmsk |= HCINTMSK_DATATGLERR;
- if (chan->ep_is_in) {
- hcintmsk |= HCINTMSK_BBLERR;
- } else {
- hcintmsk |= HCINTMSK_NAK;
- hcintmsk |= HCINTMSK_NYET;
- if (chan->do_ping)
- hcintmsk |= HCINTMSK_ACK;
- }
-
- if (chan->do_split) {
- hcintmsk |= HCINTMSK_NAK;
- if (chan->complete_split)
- hcintmsk |= HCINTMSK_NYET;
- else
- hcintmsk |= HCINTMSK_ACK;
- }
-
- if (chan->error_state)
- hcintmsk |= HCINTMSK_ACK;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- if (dbg_perio())
- dev_vdbg(hsotg->dev, "intr\n");
- hcintmsk |= HCINTMSK_XFERCOMPL;
- hcintmsk |= HCINTMSK_NAK;
- hcintmsk |= HCINTMSK_STALL;
- hcintmsk |= HCINTMSK_XACTERR;
- hcintmsk |= HCINTMSK_DATATGLERR;
- hcintmsk |= HCINTMSK_FRMOVRUN;
-
- if (chan->ep_is_in)
- hcintmsk |= HCINTMSK_BBLERR;
- if (chan->error_state)
- hcintmsk |= HCINTMSK_ACK;
- if (chan->do_split) {
- if (chan->complete_split)
- hcintmsk |= HCINTMSK_NYET;
- else
- hcintmsk |= HCINTMSK_ACK;
- }
- break;
-
- case USB_ENDPOINT_XFER_ISOC:
- if (dbg_perio())
- dev_vdbg(hsotg->dev, "isoc\n");
- hcintmsk |= HCINTMSK_XFERCOMPL;
- hcintmsk |= HCINTMSK_FRMOVRUN;
- hcintmsk |= HCINTMSK_ACK;
-
- if (chan->ep_is_in) {
- hcintmsk |= HCINTMSK_XACTERR;
- hcintmsk |= HCINTMSK_BBLERR;
- }
- break;
- default:
- dev_err(hsotg->dev, "## Unknown EP type ##\n");
- break;
- }
-
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
-}
-
-static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 hcintmsk = HCINTMSK_CHHLTD;
-
- /*
- * For Descriptor DMA mode core halts the channel on AHB error.
- * Interrupt is not required.
- */
- if (hsotg->core_params->dma_desc_enable <= 0) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "desc DMA disabled\n");
- hcintmsk |= HCINTMSK_AHBERR;
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "desc DMA enabled\n");
- if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
- hcintmsk |= HCINTMSK_XFERCOMPL;
- }
-
- if (chan->error_state && !chan->do_split &&
- chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "setting ACK\n");
- hcintmsk |= HCINTMSK_ACK;
- if (chan->ep_is_in) {
- hcintmsk |= HCINTMSK_DATATGLERR;
- if (chan->ep_type != USB_ENDPOINT_XFER_INT)
- hcintmsk |= HCINTMSK_NAK;
- }
- }
-
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
-}
-
-static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 intmsk;
-
- if (hsotg->core_params->dma_enable > 0) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "DMA enabled\n");
- dwc2_hc_enable_dma_ints(hsotg, chan);
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "DMA disabled\n");
- dwc2_hc_enable_slave_ints(hsotg, chan);
- }
-
- /* Enable the top level host channel interrupt */
- intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
- intmsk |= 1 << chan->hc_num;
- dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
-
- /* Make sure host channel interrupts are enabled */
- intmsk = dwc2_readl(hsotg->regs + GINTMSK);
- intmsk |= GINTSTS_HCHINT;
- dwc2_writel(intmsk, hsotg->regs + GINTMSK);
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
-}
-
-/**
- * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
- * a specific endpoint
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel
- *
- * The HCCHARn register is set up with the characteristics specified in chan.
- * Host channel interrupts that may need to be serviced while this transfer is
- * in progress are enabled.
- */
-void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
-{
- u8 hc_num = chan->hc_num;
- u32 hcintmsk;
- u32 hcchar;
- u32 hcsplt = 0;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s()\n", __func__);
-
- /* Clear old interrupt conditions for this host channel */
- hcintmsk = 0xffffffff;
- hcintmsk &= ~HCINTMSK_RESERVED14_31;
- dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
-
- /* Enable channel interrupts required for this transfer */
- dwc2_hc_enable_ints(hsotg, chan);
-
- /*
- * Program the HCCHARn register with the endpoint characteristics for
- * the current transfer
- */
- hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
- hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
- if (chan->ep_is_in)
- hcchar |= HCCHAR_EPDIR;
- if (chan->speed == USB_SPEED_LOW)
- hcchar |= HCCHAR_LSPDDEV;
- hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
- hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
- if (dbg_hc(chan)) {
- dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
- hc_num, hcchar);
-
- dev_vdbg(hsotg->dev, "%s: Channel %d\n",
- __func__, hc_num);
- dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
- chan->dev_addr);
- dev_vdbg(hsotg->dev, " Ep Num: %d\n",
- chan->ep_num);
- dev_vdbg(hsotg->dev, " Is In: %d\n",
- chan->ep_is_in);
- dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
- chan->speed == USB_SPEED_LOW);
- dev_vdbg(hsotg->dev, " Ep Type: %d\n",
- chan->ep_type);
- dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
- chan->max_packet);
- }
-
- /* Program the HCSPLT register for SPLITs */
- if (chan->do_split) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev,
- "Programming HC %d with split --> %s\n",
- hc_num,
- chan->complete_split ? "CSPLIT" : "SSPLIT");
- if (chan->complete_split)
- hcsplt |= HCSPLT_COMPSPLT;
- hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
- HCSPLT_XACTPOS_MASK;
- hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
- HCSPLT_HUBADDR_MASK;
- hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
- HCSPLT_PRTADDR_MASK;
- if (dbg_hc(chan)) {
- dev_vdbg(hsotg->dev, " comp split %d\n",
- chan->complete_split);
- dev_vdbg(hsotg->dev, " xact pos %d\n",
- chan->xact_pos);
- dev_vdbg(hsotg->dev, " hub addr %d\n",
- chan->hub_addr);
- dev_vdbg(hsotg->dev, " hub port %d\n",
- chan->hub_port);
- dev_vdbg(hsotg->dev, " is_in %d\n",
- chan->ep_is_in);
- dev_vdbg(hsotg->dev, " Max Pkt %d\n",
- chan->max_packet);
- dev_vdbg(hsotg->dev, " xferlen %d\n",
- chan->xfer_len);
- }
- }
-
- dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
-}
-
-/**
- * dwc2_hc_halt() - Attempts to halt a host channel
- *
- * @hsotg: Controller register interface
- * @chan: Host channel to halt
- * @halt_status: Reason for halting the channel
- *
- * This function should only be called in Slave mode or to abort a transfer in
- * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
- * controller halts the channel when the transfer is complete or a condition
- * occurs that requires application intervention.
- *
- * In slave mode, checks for a free request queue entry, then sets the Channel
- * Enable and Channel Disable bits of the Host Channel Characteristics
- * register of the specified channel to intiate the halt. If there is no free
- * request queue entry, sets only the Channel Disable bit of the HCCHARn
- * register to flush requests for this channel. In the latter case, sets a
- * flag to indicate that the host channel needs to be halted when a request
- * queue slot is open.
- *
- * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
- * HCCHARn register. The controller ensures there is space in the request
- * queue before submitting the halt request.
- *
- * Some time may elapse before the core flushes any posted requests for this
- * host channel and halts. The Channel Halted interrupt handler completes the
- * deactivation of the host channel.
- */
-void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
- enum dwc2_halt_status halt_status)
-{
- u32 nptxsts, hptxsts, hcchar;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
- dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
-
- if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
- halt_status == DWC2_HC_XFER_AHB_ERR) {
- /*
- * Disable all channel interrupts except Ch Halted. The QTD
- * and QH state associated with this transfer has been cleared
- * (in the case of URB_DEQUEUE), so the channel needs to be
- * shut down carefully to prevent crashes.
- */
- u32 hcintmsk = HCINTMSK_CHHLTD;
-
- dev_vdbg(hsotg->dev, "dequeue/error\n");
- dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
-
- /*
- * Make sure no other interrupts besides halt are currently
- * pending. Handling another interrupt could cause a crash due
- * to the QTD and QH state.
- */
- dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
-
- /*
- * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
- * even if the channel was already halted for some other
- * reason
- */
- chan->halt_status = halt_status;
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
- if (!(hcchar & HCCHAR_CHENA)) {
- /*
- * The channel is either already halted or it hasn't
- * started yet. In DMA mode, the transfer may halt if
- * it finishes normally or a condition occurs that
- * requires driver intervention. Don't want to halt
- * the channel again. In either Slave or DMA mode,
- * it's possible that the transfer has been assigned
- * to a channel, but not started yet when an URB is
- * dequeued. Don't want to halt a channel that hasn't
- * started yet.
- */
- return;
- }
- }
- if (chan->halt_pending) {
- /*
- * A halt has already been issued for this channel. This might
- * happen when a transfer is aborted by a higher level in
- * the stack.
- */
- dev_vdbg(hsotg->dev,
- "*** %s: Channel %d, chan->halt_pending already set ***\n",
- __func__, chan->hc_num);
- return;
- }
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
-
- /* No need to set the bit in DDMA for disabling the channel */
- /* TODO check it everywhere channel is disabled */
- if (hsotg->core_params->dma_desc_enable <= 0) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "desc DMA disabled\n");
- hcchar |= HCCHAR_CHENA;
- } else {
- if (dbg_hc(chan))
- dev_dbg(hsotg->dev, "desc DMA enabled\n");
- }
- hcchar |= HCCHAR_CHDIS;
-
- if (hsotg->core_params->dma_enable <= 0) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "DMA not enabled\n");
- hcchar |= HCCHAR_CHENA;
-
- /* Check for space in the request queue to issue the halt */
- if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
- chan->ep_type == USB_ENDPOINT_XFER_BULK) {
- dev_vdbg(hsotg->dev, "control/bulk\n");
- nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
- if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
- dev_vdbg(hsotg->dev, "Disabling channel\n");
- hcchar &= ~HCCHAR_CHENA;
- }
- } else {
- if (dbg_perio())
- dev_vdbg(hsotg->dev, "isoc/intr\n");
- hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
- if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
- hsotg->queuing_high_bandwidth) {
- if (dbg_perio())
- dev_vdbg(hsotg->dev, "Disabling channel\n");
- hcchar &= ~HCCHAR_CHENA;
- }
- }
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "DMA enabled\n");
- }
-
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
- chan->halt_status = halt_status;
-
- if (hcchar & HCCHAR_CHENA) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Channel enabled\n");
- chan->halt_pending = 1;
- chan->halt_on_queue = 0;
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Channel disabled\n");
- chan->halt_on_queue = 1;
- }
-
- if (dbg_hc(chan)) {
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
- chan->hc_num);
- dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
- hcchar);
- dev_vdbg(hsotg->dev, " halt_pending: %d\n",
- chan->halt_pending);
- dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
- chan->halt_on_queue);
- dev_vdbg(hsotg->dev, " halt_status: %d\n",
- chan->halt_status);
- }
-}
-
-/**
- * dwc2_hc_cleanup() - Clears the transfer state for a host channel
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Identifies the host channel to clean up
- *
- * This function is normally called after a transfer is done and the host
- * channel is being released
- */
-void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
-{
- u32 hcintmsk;
-
- chan->xfer_started = 0;
-
- /*
- * Clear channel interrupt enables and any unhandled channel interrupt
- * conditions
- */
- dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
- hcintmsk = 0xffffffff;
- hcintmsk &= ~HCINTMSK_RESERVED14_31;
- dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
-}
-
-/**
- * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
- * which frame a periodic transfer should occur
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Identifies the host channel to set up and its properties
- * @hcchar: Current value of the HCCHAR register for the specified host channel
- *
- * This function has no effect on non-periodic transfers
- */
-static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan, u32 *hcchar)
-{
- if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
- chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- /* 1 if _next_ frame is odd, 0 if it's even */
- if (!(dwc2_hcd_get_frame_number(hsotg) & 0x1))
- *hcchar |= HCCHAR_ODDFRM;
- }
-}
-
-static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
-{
- /* Set up the initial PID for the transfer */
- if (chan->speed == USB_SPEED_HIGH) {
- if (chan->ep_is_in) {
- if (chan->multi_count == 1)
- chan->data_pid_start = DWC2_HC_PID_DATA0;
- else if (chan->multi_count == 2)
- chan->data_pid_start = DWC2_HC_PID_DATA1;
- else
- chan->data_pid_start = DWC2_HC_PID_DATA2;
- } else {
- if (chan->multi_count == 1)
- chan->data_pid_start = DWC2_HC_PID_DATA0;
- else
- chan->data_pid_start = DWC2_HC_PID_MDATA;
- }
- } else {
- chan->data_pid_start = DWC2_HC_PID_DATA0;
- }
-}
-
-/**
- * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
- * the Host Channel
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel
- *
- * This function should only be called in Slave mode. For a channel associated
- * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
- * associated with a periodic EP, the periodic Tx FIFO is written.
- *
- * Upon return the xfer_buf and xfer_count fields in chan are incremented by
- * the number of bytes written to the Tx FIFO.
- */
-static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 i;
- u32 remaining_count;
- u32 byte_count;
- u32 dword_count;
- u32 __iomem *data_fifo;
- u32 *data_buf = (u32 *)chan->xfer_buf;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s()\n", __func__);
-
- data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
-
- remaining_count = chan->xfer_len - chan->xfer_count;
- if (remaining_count > chan->max_packet)
- byte_count = chan->max_packet;
- else
- byte_count = remaining_count;
-
- dword_count = (byte_count + 3) / 4;
-
- if (((unsigned long)data_buf & 0x3) == 0) {
- /* xfer_buf is DWORD aligned */
- for (i = 0; i < dword_count; i++, data_buf++)
- dwc2_writel(*data_buf, data_fifo);
- } else {
- /* xfer_buf is not DWORD aligned */
- for (i = 0; i < dword_count; i++, data_buf++) {
- u32 data = data_buf[0] | data_buf[1] << 8 |
- data_buf[2] << 16 | data_buf[3] << 24;
- dwc2_writel(data, data_fifo);
- }
- }
-
- chan->xfer_count += byte_count;
- chan->xfer_buf += byte_count;
-}
-
-/**
- * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
- * channel and starts the transfer
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel. The xfer_len value
- * may be reduced to accommodate the max widths of the XferSize and
- * PktCnt fields in the HCTSIZn register. The multi_count value may be
- * changed to reflect the final xfer_len value.
- *
- * This function may be called in either Slave mode or DMA mode. In Slave mode,
- * the caller must ensure that there is sufficient space in the request queue
- * and Tx Data FIFO.
- *
- * For an OUT transfer in Slave mode, it loads a data packet into the
- * appropriate FIFO. If necessary, additional data packets are loaded in the
- * Host ISR.
- *
- * For an IN transfer in Slave mode, a data packet is requested. The data
- * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
- * additional data packets are requested in the Host ISR.
- *
- * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
- * register along with a packet count of 1 and the channel is enabled. This
- * causes a single PING transaction to occur. Other fields in HCTSIZ are
- * simply set to 0 since no data transfer occurs in this case.
- *
- * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
- * all the information required to perform the subsequent data transfer. In
- * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
- * controller performs the entire PING protocol, then starts the data
- * transfer.
- */
-void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
- u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
- u32 hcchar;
- u32 hctsiz = 0;
- u16 num_packets;
- u32 ec_mc;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s()\n", __func__);
-
- if (chan->do_ping) {
- if (hsotg->core_params->dma_enable <= 0) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "ping, no DMA\n");
- dwc2_hc_do_ping(hsotg, chan);
- chan->xfer_started = 1;
- return;
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "ping, DMA\n");
- hctsiz |= TSIZ_DOPNG;
- }
- }
-
- if (chan->do_split) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "split\n");
- num_packets = 1;
-
- if (chan->complete_split && !chan->ep_is_in)
- /*
- * For CSPLIT OUT Transfer, set the size to 0 so the
- * core doesn't expect any data written to the FIFO
- */
- chan->xfer_len = 0;
- else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
- chan->xfer_len = chan->max_packet;
- else if (!chan->ep_is_in && chan->xfer_len > 188)
- chan->xfer_len = 188;
-
- hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK;
-
- /* For split set ec_mc for immediate retries */
- if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
- chan->ep_type == USB_ENDPOINT_XFER_ISOC)
- ec_mc = 3;
- else
- ec_mc = 1;
- } else {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "no split\n");
- /*
- * Ensure that the transfer length and packet count will fit
- * in the widths allocated for them in the HCTSIZn register
- */
- if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
- chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- /*
- * Make sure the transfer size is no larger than one
- * (micro)frame's worth of data. (A check was done
- * when the periodic transfer was accepted to ensure
- * that a (micro)frame's worth of data can be
- * programmed into a channel.)
- */
- u32 max_periodic_len =
- chan->multi_count * chan->max_packet;
-
- if (chan->xfer_len > max_periodic_len)
- chan->xfer_len = max_periodic_len;
- } else if (chan->xfer_len > max_hc_xfer_size) {
- /*
- * Make sure that xfer_len is a multiple of max packet
- * size
- */
- chan->xfer_len =
- max_hc_xfer_size - chan->max_packet + 1;
- }
-
- if (chan->xfer_len > 0) {
- num_packets = (chan->xfer_len + chan->max_packet - 1) /
- chan->max_packet;
- if (num_packets > max_hc_pkt_count) {
- num_packets = max_hc_pkt_count;
- chan->xfer_len = num_packets * chan->max_packet;
- }
- } else {
- /* Need 1 packet for transfer length of 0 */
- num_packets = 1;
- }
-
- if (chan->ep_is_in)
- /*
- * Always program an integral # of max packets for IN
- * transfers
- */
- chan->xfer_len = num_packets * chan->max_packet;
-
- if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
- chan->ep_type == USB_ENDPOINT_XFER_ISOC)
- /*
- * Make sure that the multi_count field matches the
- * actual transfer length
- */
- chan->multi_count = num_packets;
-
- if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
- dwc2_set_pid_isoc(chan);
-
- hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK;
-
- /* The ec_mc gets the multi_count for non-split */
- ec_mc = chan->multi_count;
- }
-
- chan->start_pkt_count = num_packets;
- hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
- hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
- TSIZ_SC_MC_PID_MASK;
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
- if (dbg_hc(chan)) {
- dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
- hctsiz, chan->hc_num);
-
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
- chan->hc_num);
- dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
- (hctsiz & TSIZ_XFERSIZE_MASK) >>
- TSIZ_XFERSIZE_SHIFT);
- dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
- (hctsiz & TSIZ_PKTCNT_MASK) >>
- TSIZ_PKTCNT_SHIFT);
- dev_vdbg(hsotg->dev, " Start PID: %d\n",
- (hctsiz & TSIZ_SC_MC_PID_MASK) >>
- TSIZ_SC_MC_PID_SHIFT);
- }
-
- if (hsotg->core_params->dma_enable > 0) {
- dma_addr_t dma_addr;
-
- if (chan->align_buf) {
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "align_buf\n");
- dma_addr = chan->align_buf;
- } else {
- dma_addr = chan->xfer_dma;
- }
- dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
- (unsigned long)dma_addr, chan->hc_num);
- }
-
- /* Start the split */
- if (chan->do_split) {
- u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
-
- hcsplt |= HCSPLT_SPLTENA;
- dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
- }
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
- hcchar &= ~HCCHAR_MULTICNT_MASK;
- hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
- dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
-
- if (hcchar & HCCHAR_CHDIS)
- dev_warn(hsotg->dev,
- "%s: chdis set, channel %d, hcchar 0x%08x\n",
- __func__, chan->hc_num, hcchar);
-
- /* Set host channel enable after all other setup is complete */
- hcchar |= HCCHAR_CHENA;
- hcchar &= ~HCCHAR_CHDIS;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- (hcchar & HCCHAR_MULTICNT_MASK) >>
- HCCHAR_MULTICNT_SHIFT);
-
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
- chan->hc_num);
-
- chan->xfer_started = 1;
- chan->requests++;
-
- if (hsotg->core_params->dma_enable <= 0 &&
- !chan->ep_is_in && chan->xfer_len > 0)
- /* Load OUT packet into the appropriate Tx FIFO */
- dwc2_hc_write_packet(hsotg, chan);
-}
-
-/**
- * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
- * host channel and starts the transfer in Descriptor DMA mode
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel
- *
- * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
- * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
- * with micro-frame bitmap.
- *
- * Initializes HCDMA register with descriptor list address and CTD value then
- * starts the transfer via enabling the channel.
- */
-void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- u32 hcchar;
- u32 hctsiz = 0;
-
- if (chan->do_ping)
- hctsiz |= TSIZ_DOPNG;
-
- if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
- dwc2_set_pid_isoc(chan);
-
- /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
- hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
- TSIZ_SC_MC_PID_MASK;
-
- /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
- hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
-
- /* Non-zero only for high-speed interrupt endpoints */
- hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
-
- if (dbg_hc(chan)) {
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
- chan->hc_num);
- dev_vdbg(hsotg->dev, " Start PID: %d\n",
- chan->data_pid_start);
- dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
- }
-
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
-
- dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
- chan->desc_list_sz, DMA_TO_DEVICE);
-
- dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
- &chan->desc_list_addr, chan->hc_num);
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
- hcchar &= ~HCCHAR_MULTICNT_MASK;
- hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK;
-
- if (hcchar & HCCHAR_CHDIS)
- dev_warn(hsotg->dev,
- "%s: chdis set, channel %d, hcchar 0x%08x\n",
- __func__, chan->hc_num, hcchar);
-
- /* Set host channel enable after all other setup is complete */
- hcchar |= HCCHAR_CHENA;
- hcchar &= ~HCCHAR_CHDIS;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- (hcchar & HCCHAR_MULTICNT_MASK) >>
- HCCHAR_MULTICNT_SHIFT);
-
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
- chan->hc_num);
-
- chan->xfer_started = 1;
- chan->requests++;
-}
-
-/**
- * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
- * a previous call to dwc2_hc_start_transfer()
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel
- *
- * The caller must ensure there is sufficient space in the request queue and Tx
- * Data FIFO. This function should only be called in Slave mode. In DMA mode,
- * the controller acts autonomously to complete transfers programmed to a host
- * channel.
- *
- * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
- * if there is any data remaining to be queued. For an IN transfer, another
- * data packet is always requested. For the SETUP phase of a control transfer,
- * this function does nothing.
- *
- * Return: 1 if a new request is queued, 0 if no more requests are required
- * for this transfer
- */
-int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan)
-{
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
- chan->hc_num);
-
- if (chan->do_split)
- /* SPLITs always queue just once per channel */
- return 0;
-
- if (chan->data_pid_start == DWC2_HC_PID_SETUP)
- /* SETUPs are queued only once since they can't be NAK'd */
- return 0;
-
- if (chan->ep_is_in) {
- /*
- * Always queue another request for other IN transfers. If
- * back-to-back INs are issued and NAKs are received for both,
- * the driver may still be processing the first NAK when the
- * second NAK is received. When the interrupt handler clears
- * the NAK interrupt for the first NAK, the second NAK will
- * not be seen. So we can't depend on the NAK interrupt
- * handler to requeue a NAK'd request. Instead, IN requests
- * are issued each time this function is called. When the
- * transfer completes, the extra requests for the channel will
- * be flushed.
- */
- u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
-
- dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
- hcchar |= HCCHAR_CHENA;
- hcchar &= ~HCCHAR_CHDIS;
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
- hcchar);
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
- chan->requests++;
- return 1;
- }
-
- /* OUT transfers */
-
- if (chan->xfer_count < chan->xfer_len) {
- if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
- chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- u32 hcchar = dwc2_readl(hsotg->regs +
- HCCHAR(chan->hc_num));
-
- dwc2_hc_set_even_odd_frame(hsotg, chan,
- &hcchar);
- }
-
- /* Load OUT packet into the appropriate Tx FIFO */
- dwc2_hc_write_packet(hsotg, chan);
- chan->requests++;
- return 1;
- }
-
- return 0;
-}
-
-/**
- * dwc2_hc_do_ping() - Starts a PING transfer
- *
- * @hsotg: Programming view of DWC_otg controller
- * @chan: Information needed to initialize the host channel
- *
- * This function should only be called in Slave mode. The Do Ping bit is set in
- * the HCTSIZ register, then the channel is enabled.
- */
-void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
-{
- u32 hcchar;
- u32 hctsiz;
-
- if (dbg_hc(chan))
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
- chan->hc_num);
-
-
- hctsiz = TSIZ_DOPNG;
- hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
- dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
-
- hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
- hcchar |= HCCHAR_CHENA;
- hcchar &= ~HCCHAR_CHDIS;
- dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
-}
-
-/**
- * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
- * the HFIR register according to PHY type and speed
- *
- * @hsotg: Programming view of DWC_otg controller
- *
- * NOTE: The caller can modify the value of the HFIR register only after the
- * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
- * has been set
- */
-u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
-{
- u32 usbcfg;
- u32 hprt0;
- int clock = 60; /* default value */
-
- usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
- hprt0 = dwc2_readl(hsotg->regs + HPRT0);
-
- if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
- !(usbcfg & GUSBCFG_PHYIF16))
- clock = 60;
- if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
- GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
- clock = 48;
- if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
- !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
- clock = 30;
- if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
- !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
- clock = 60;
- if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
- !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
- clock = 48;
- if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
- clock = 48;
- if ((usbcfg & GUSBCFG_PHYSEL) &&
- hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
- clock = 48;
-
- if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
- /* High speed case */
- return 125 * clock;
- else
- /* FS/LS case */
- return 1000 * clock;
-}
-
-/**
- * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
- * buffer
- *
- * @core_if: Programming view of DWC_otg controller
- * @dest: Destination buffer for the packet
- * @bytes: Number of bytes to copy to the destination
- */
-void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
-{
- u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
- u32 *data_buf = (u32 *)dest;
- int word_count = (bytes + 3) / 4;
- int i;
-
- /*
- * Todo: Account for the case where dest is not dword aligned. This
- * requires reading data from the FIFO into a u32 temp buffer, then
- * moving it into the data buffer.
- */
-
- dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
-
- for (i = 0; i < word_count; i++, data_buf++)
- *data_buf = dwc2_readl(fifo);
-}
-
/**
* dwc2_dump_host_registers() - Prints the host registers
*
@@ -3355,13 +1478,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
hw->max_transfer_size = (1 << (width + 11)) - 1;
- /*
- * Clip max_transfer_size to 65535. dwc2_hc_setup_align_buf() allocates
- * coherent buffers with this size, and if it's too large we can
- * exhaust the coherent DMA pool.
- */
- if (hw->max_transfer_size > 65535)
- hw->max_transfer_size = 65535;
width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
hw->max_packet_count = (1 << (width + 4)) - 1;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 7fb6434f4639..3c58d633ce80 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -44,6 +44,26 @@
#include <linux/usb/phy.h>
#include "hw.h"
+/*
+ * Suggested defines for tracers:
+ * - no_printk: Disable tracing
+ * - pr_info: Print this info to the console
+ * - trace_printk: Print this info to trace buffer (good for verbose logging)
+ */
+
+#define DWC2_TRACE_SCHEDULER no_printk
+#define DWC2_TRACE_SCHEDULER_VB no_printk
+
+/* Detailed scheduler tracing, but won't overwhelm console */
+#define dwc2_sch_dbg(hsotg, fmt, ...) \
+ DWC2_TRACE_SCHEDULER(pr_fmt("%s: SCH: " fmt), \
+ dev_name(hsotg->dev), ##__VA_ARGS__)
+
+/* Verbose scheduler tracing */
+#define dwc2_sch_vdbg(hsotg, fmt, ...) \
+ DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
+ dev_name(hsotg->dev), ##__VA_ARGS__)
+
static inline u32 dwc2_readl(const void __iomem *addr)
{
u32 value = __raw_readl(addr);
@@ -572,6 +592,84 @@ struct dwc2_hregs_backup {
bool valid;
};
+/*
+ * Constants related to high speed periodic scheduling
+ *
+ * We have a periodic schedule that is DWC2_HS_SCHEDULE_UFRAMES long. From a
+ * reservation point of view it's assumed that the schedule goes right back to
+ * the beginning after the end of the schedule.
+ *
+ * What does that mean for scheduling things with a long interval? It means
+ * we'll reserve time for them in every possible microframe that they could
+ * ever be scheduled in. ...but we'll still only actually schedule them as
+ * often as they were requested.
+ *
+ * We keep our schedule in a "bitmap" structure. This simplifies having
+ * to keep track of and merge intervals: we just let the bitmap code do most
+ * of the heavy lifting. In a way scheduling is much like memory allocation.
+ *
+ * We schedule 100us per uframe or 80% of 125us (the maximum amount you're
+ * supposed to schedule for periodic transfers). That's according to spec.
+ *
+ * Note that though we only schedule 80% of each microframe, the bitmap that we
+ * keep the schedule in is tightly packed (AKA it doesn't have 100us worth of
+ * space for each uFrame).
+ *
+ * Requirements:
+ * - DWC2_HS_SCHEDULE_UFRAMES must even divide 0x4000 (HFNUM_MAX_FRNUM + 1)
+ * - DWC2_HS_SCHEDULE_UFRAMES must be 8 times DWC2_LS_SCHEDULE_FRAMES (probably
+ * could be any multiple of 8 times DWC2_LS_SCHEDULE_FRAMES, but there might
+ * be bugs). The 8 comes from the USB spec: number of microframes per frame.
+ */
+#define DWC2_US_PER_UFRAME 125
+#define DWC2_HS_PERIODIC_US_PER_UFRAME 100
+
+#define DWC2_HS_SCHEDULE_UFRAMES 8
+#define DWC2_HS_SCHEDULE_US (DWC2_HS_SCHEDULE_UFRAMES * \
+ DWC2_HS_PERIODIC_US_PER_UFRAME)
+
+/*
+ * Constants related to low speed scheduling
+ *
+ * For high speed we schedule every 1us. For low speed that's a bit overkill,
+ * so we make up a unit called a "slice" that's worth 25us. There are 40
+ * slices in a full frame and we can schedule 36 of those (90%) for periodic
+ * transfers.
+ *
+ * Our low speed schedule can be as short as 1 frame or could be longer. When
+ * we only schedule 1 frame it means that we'll need to reserve a time every
+ * frame even for things that only transfer very rarely, so something that runs
+ * every 2048 frames will get time reserved in every frame. Our low speed
+ * schedule can be longer and we'll be able to handle more overlap, but that
+ * will come at increased memory cost and increased time to schedule.
+ *
+ * Note: one other advantage of a short low speed schedule is that if we mess
+ * up and miss scheduling we can jump in and use any of the slots that we
+ * happened to reserve.
+ *
+ * With 25 us per slice and 1 frame in the schedule, we only need 4 bytes for
+ * the schedule. There will be one schedule per TT.
+ *
+ * Requirements:
+ * - DWC2_US_PER_SLICE must evenly divide DWC2_LS_PERIODIC_US_PER_FRAME.
+ */
+#define DWC2_US_PER_SLICE 25
+#define DWC2_SLICES_PER_UFRAME (DWC2_US_PER_UFRAME / DWC2_US_PER_SLICE)
+
+#define DWC2_ROUND_US_TO_SLICE(us) \
+ (DIV_ROUND_UP((us), DWC2_US_PER_SLICE) * \
+ DWC2_US_PER_SLICE)
+
+#define DWC2_LS_PERIODIC_US_PER_FRAME \
+ 900
+#define DWC2_LS_PERIODIC_SLICES_PER_FRAME \
+ (DWC2_LS_PERIODIC_US_PER_FRAME / \
+ DWC2_US_PER_SLICE)
+
+#define DWC2_LS_SCHEDULE_FRAMES 1
+#define DWC2_LS_SCHEDULE_SLICES (DWC2_LS_SCHEDULE_FRAMES * \
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME)
+
/**
* struct dwc2_hsotg - Holds the state of the driver, including the non-periodic
* and periodic schedules
@@ -657,11 +755,14 @@ struct dwc2_hregs_backup {
* periodic_sched_ready because it must be rescheduled for
* the next frame. Otherwise, the item moves to
* periodic_sched_inactive.
+ * @split_order: List keeping track of channels doing splits, in order.
* @periodic_usecs: Total bandwidth claimed so far for periodic transfers.
* This value is in microseconds per (micro)frame. The
* assumption is that all periodic transfers may occur in
* the same (micro)frame.
- * @frame_usecs: Internal variable used by the microframe scheduler
+ * @hs_periodic_bitmap: Bitmap used by the microframe scheduler any time the
+ * host is in high speed mode; low speed schedules are
+ * stored elsewhere since we need one per TT.
* @frame_number: Frame number read from the core at SOF. The value ranges
* from 0 to HFNUM_MAX_FRNUM.
* @periodic_qh_count: Count of periodic QHs, if using several eps. Used for
@@ -780,16 +881,19 @@ struct dwc2_hsotg {
struct list_head periodic_sched_ready;
struct list_head periodic_sched_assigned;
struct list_head periodic_sched_queued;
+ struct list_head split_order;
u16 periodic_usecs;
- u16 frame_usecs[8];
+ unsigned long hs_periodic_bitmap[
+ DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)];
u16 frame_number;
u16 periodic_qh_count;
bool bus_suspended;
bool new_connection;
+ u16 last_frame_num;
+
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
#define FRAME_NUM_ARRAY_SIZE 1000
- u16 last_frame_num;
u16 *frame_num_array;
u16 *last_frame_num_array;
int frame_num_idx;
@@ -885,34 +989,11 @@ enum dwc2_halt_status {
*/
extern int dwc2_core_reset(struct dwc2_hsotg *hsotg);
extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
-extern void dwc2_core_host_init(struct dwc2_hsotg *hsotg);
extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
-/*
- * Host core Functions.
- * The following functions support managing the DWC_otg controller in host
- * mode.
- */
-extern void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
-extern void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
- enum dwc2_halt_status halt_status);
-extern void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan);
-extern void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan);
-extern void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan);
-extern int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan);
-extern void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan);
-extern void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg);
-extern void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg);
-
-extern u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
/*
@@ -924,7 +1005,6 @@ extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes);
extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num);
extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg);
-extern int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup);
extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd);
extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
@@ -1191,6 +1271,8 @@ extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg);
#else
static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
{ return 0; }
@@ -1208,22 +1290,37 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
int testmode)
{ return 0; }
#define dwc2_is_device_connected(hsotg) (0)
+static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
#endif
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg);
+extern int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us);
extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg);
extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force);
extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg);
+int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg);
+int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg);
#else
static inline int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
{ return 0; }
+static inline int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg,
+ int us)
+{ return 0; }
static inline void dwc2_hcd_connect(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {}
static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {}
static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {}
static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
{ return 0; }
+static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+static inline int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{ return 0; }
+
#endif
#endif /* __DWC2_CORE_H__ */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 422ab7da4eb5..818f158232bb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
{
u32 intmsk;
u32 val;
+ u32 usbcfg;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
* set configuration.
*/
+ /* keep other bits untouched (so e.g. forced modes are not lost) */
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+ GUSBCFG_HNPCAP);
+
/* set the PLL on, remove the HNP/SRP and set the PHY */
val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG);
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (val << GUSBCFG_USBTRDTIM_SHIFT);
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
dwc2_hsotg_init_fifo(hsotg);
@@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = {
static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
{
u32 trdtim;
+ u32 usbcfg;
/* unmask subset of endpoint interrupts */
dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
@@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
dwc2_hsotg_init_fifo(hsotg);
+ /* keep other bits untouched (so e.g. forced modes are not lost) */
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
+ GUSBCFG_HNPCAP);
+
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
- dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) |
- (trdtim << GUSBCFG_USBTRDTIM_SHIFT),
- hsotg->regs + GUSBCFG);
+ usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+ (trdtim << GUSBCFG_USBTRDTIM_SHIFT);
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
if (using_dma(hsotg))
__orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN);
@@ -3668,3 +3681,105 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *hsotg)
return 0;
}
+
+/**
+ * dwc2_backup_device_registers() - Backup controller device registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_dregs_backup *dr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup dev regs */
+ dr = &hsotg->dr_backup;
+
+ dr->dcfg = dwc2_readl(hsotg->regs + DCFG);
+ dr->dctl = dwc2_readl(hsotg->regs + DCTL);
+ dr->daintmsk = dwc2_readl(hsotg->regs + DAINTMSK);
+ dr->diepmsk = dwc2_readl(hsotg->regs + DIEPMSK);
+ dr->doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Backup IN EPs */
+ dr->diepctl[i] = dwc2_readl(hsotg->regs + DIEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->diepctl[i] & DXEPCTL_DPID)
+ dr->diepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->diepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->dieptsiz[i] = dwc2_readl(hsotg->regs + DIEPTSIZ(i));
+ dr->diepdma[i] = dwc2_readl(hsotg->regs + DIEPDMA(i));
+
+ /* Backup OUT EPs */
+ dr->doepctl[i] = dwc2_readl(hsotg->regs + DOEPCTL(i));
+
+ /* Ensure DATA PID is correctly configured */
+ if (dr->doepctl[i] & DXEPCTL_DPID)
+ dr->doepctl[i] |= DXEPCTL_SETD1PID;
+ else
+ dr->doepctl[i] |= DXEPCTL_SETD0PID;
+
+ dr->doeptsiz[i] = dwc2_readl(hsotg->regs + DOEPTSIZ(i));
+ dr->doepdma[i] = dwc2_readl(hsotg->regs + DOEPDMA(i));
+ }
+ dr->valid = true;
+ return 0;
+}
+
+/**
+ * dwc2_restore_device_registers() - Restore controller device registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_dregs_backup *dr;
+ u32 dctl;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore dev regs */
+ dr = &hsotg->dr_backup;
+ if (!dr->valid) {
+ dev_err(hsotg->dev, "%s: no device registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ dr->valid = false;
+
+ dwc2_writel(dr->dcfg, hsotg->regs + DCFG);
+ dwc2_writel(dr->dctl, hsotg->regs + DCTL);
+ dwc2_writel(dr->daintmsk, hsotg->regs + DAINTMSK);
+ dwc2_writel(dr->diepmsk, hsotg->regs + DIEPMSK);
+ dwc2_writel(dr->doepmsk, hsotg->regs + DOEPMSK);
+
+ for (i = 0; i < hsotg->num_of_eps; i++) {
+ /* Restore IN EPs */
+ dwc2_writel(dr->diepctl[i], hsotg->regs + DIEPCTL(i));
+ dwc2_writel(dr->dieptsiz[i], hsotg->regs + DIEPTSIZ(i));
+ dwc2_writel(dr->diepdma[i], hsotg->regs + DIEPDMA(i));
+
+ /* Restore OUT EPs */
+ dwc2_writel(dr->doepctl[i], hsotg->regs + DOEPCTL(i));
+ dwc2_writel(dr->doeptsiz[i], hsotg->regs + DOEPTSIZ(i));
+ dwc2_writel(dr->doepdma[i], hsotg->regs + DOEPDMA(i));
+ }
+
+ /* Set the Power-On Programming done bit */
+ dctl = dwc2_readl(hsotg->regs + DCTL);
+ dctl |= DCTL_PWRONPRGDONE;
+ dwc2_writel(dctl, hsotg->regs + DCTL);
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 8847c72e55f6..1f6255131857 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -54,6 +54,535 @@
#include "core.h"
#include "hcd.h"
+/*
+ * =========================================================================
+ * Host Core Layer Functions
+ * =========================================================================
+ */
+
+/**
+ * dwc2_enable_common_interrupts() - Initializes the commmon interrupts,
+ * used in both device and host modes
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk;
+
+ /* Clear any pending OTG Interrupts */
+ dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
+
+ /* Clear any pending interrupts */
+ dwc2_writel(0xffffffff, hsotg->regs + GINTSTS);
+
+ /* Enable the interrupts in the GINTMSK */
+ intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
+
+ if (hsotg->core_params->dma_enable <= 0)
+ intmsk |= GINTSTS_RXFLVL;
+ if (hsotg->core_params->external_id_pin_ctl <= 0)
+ intmsk |= GINTSTS_CONIDSTSCHNG;
+
+ intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
+ GINTSTS_SESSREQINT;
+
+ dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+}
+
+/*
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the
+ * PHY type
+ */
+static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg, val;
+
+ if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->core_params->ulpi_fs_ls > 0) ||
+ hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* Full speed PHY */
+ val = HCFG_FSLSPCLKSEL_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = HCFG_FSLSPCLKSEL_30_60_MHZ;
+ }
+
+ dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
+ hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+ hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
+ dwc2_writel(hcfg, hsotg->regs + HCFG);
+}
+
+static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, i2cctl;
+ int retval = 0;
+
+ /*
+ * core_init() is now called on every switch so only call the
+ * following for the first time through
+ */
+ if (select_phy) {
+ dev_dbg(hsotg->dev, "FS PHY selected\n");
+
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ if (!(usbcfg & GUSBCFG_PHYSEL)) {
+ usbcfg |= GUSBCFG_PHYSEL;
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
+ /* Reset after a PHY select */
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+ }
+
+ /*
+ * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init).
+ */
+ if (dwc2_is_host_mode(hsotg))
+ dwc2_init_fs_ls_pclk_sel(hsotg);
+
+ if (hsotg->core_params->i2c_enable > 0) {
+ dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
+
+ /* Program GUSBCFG.OtgUtmiFsSel to I2C */
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg |= GUSBCFG_OTG_UTMI_FS_SEL;
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl = dwc2_readl(hsotg->regs + GI2CCTL);
+ i2cctl &= ~GI2CCTL_I2CDEVADDR_MASK;
+ i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT;
+ i2cctl &= ~GI2CCTL_I2CEN;
+ dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ i2cctl |= GI2CCTL_I2CEN;
+ dwc2_writel(i2cctl, hsotg->regs + GI2CCTL);
+ }
+
+ return retval;
+}
+
+static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg, usbcfg_old;
+ int retval = 0;
+
+ if (!select_phy)
+ return 0;
+
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg_old = usbcfg;
+
+ /*
+ * HS PHY parameters. These parameters are preserved during soft reset
+ * so only program the first time. Do a soft reset immediately after
+ * setting phyif.
+ */
+ switch (hsotg->core_params->phy_type) {
+ case DWC2_PHY_TYPE_PARAM_ULPI:
+ /* ULPI interface */
+ dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
+ usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
+ usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
+ if (hsotg->core_params->phy_ulpi_ddr > 0)
+ usbcfg |= GUSBCFG_DDRSEL;
+ break;
+ case DWC2_PHY_TYPE_PARAM_UTMI:
+ /* UTMI+ interface */
+ dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
+ usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
+ if (hsotg->core_params->phy_utmi_width == 16)
+ usbcfg |= GUSBCFG_PHYIF16;
+ break;
+ default:
+ dev_err(hsotg->dev, "FS PHY selected at HS!\n");
+ break;
+ }
+
+ if (usbcfg != usbcfg_old) {
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
+ /* Reset after setting the PHY parameters */
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev,
+ "%s: Reset failed, aborting", __func__);
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
+{
+ u32 usbcfg;
+ int retval = 0;
+
+ if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
+ hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+ /* If FS mode with FS PHY */
+ retval = dwc2_fs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+ } else {
+ /* High speed PHY */
+ retval = dwc2_hs_phy_init(hsotg, select_phy);
+ if (retval)
+ return retval;
+ }
+
+ if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ hsotg->core_params->ulpi_fs_ls > 0) {
+ dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg |= GUSBCFG_ULPI_FS_LS;
+ usbcfg |= GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ } else {
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~GUSBCFG_ULPI_FS_LS;
+ usbcfg &= ~GUSBCFG_ULPI_CLK_SUSP_M;
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+ }
+
+ return retval;
+}
+
+static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
+{
+ u32 ahbcfg = dwc2_readl(hsotg->regs + GAHBCFG);
+
+ switch (hsotg->hw_params.arch) {
+ case GHWCFG2_EXT_DMA_ARCH:
+ dev_err(hsotg->dev, "External DMA Mode not supported\n");
+ return -EINVAL;
+
+ case GHWCFG2_INT_DMA_ARCH:
+ dev_dbg(hsotg->dev, "Internal DMA Mode\n");
+ if (hsotg->core_params->ahbcfg != -1) {
+ ahbcfg &= GAHBCFG_CTRL_MASK;
+ ahbcfg |= hsotg->core_params->ahbcfg &
+ ~GAHBCFG_CTRL_MASK;
+ }
+ break;
+
+ case GHWCFG2_SLAVE_ONLY_ARCH:
+ default:
+ dev_dbg(hsotg->dev, "Slave Only Mode\n");
+ break;
+ }
+
+ dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
+ hsotg->core_params->dma_enable,
+ hsotg->core_params->dma_desc_enable);
+
+ if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->core_params->dma_desc_enable > 0)
+ dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
+ else
+ dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
+ } else {
+ dev_dbg(hsotg->dev, "Using Slave mode\n");
+ hsotg->core_params->dma_desc_enable = 0;
+ }
+
+ if (hsotg->core_params->dma_enable > 0)
+ ahbcfg |= GAHBCFG_DMA_EN;
+
+ dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
+
+ return 0;
+}
+
+static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
+
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ if (hsotg->core_params->otg_cap ==
+ DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
+ usbcfg |= GUSBCFG_HNPCAP;
+ if (hsotg->core_params->otg_cap !=
+ DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
+ usbcfg |= GUSBCFG_SRPCAP;
+ break;
+
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ if (hsotg->core_params->otg_cap !=
+ DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
+ usbcfg |= GUSBCFG_SRPCAP;
+ break;
+
+ case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST:
+ default:
+ break;
+ }
+
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+}
+
+/**
+ * dwc2_enable_host_interrupts() - Enables the Host mode interrupts
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_enable_host_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk;
+
+ dev_dbg(hsotg->dev, "%s()\n", __func__);
+
+ /* Disable all interrupts */
+ dwc2_writel(0, hsotg->regs + GINTMSK);
+ dwc2_writel(0, hsotg->regs + HAINTMSK);
+
+ /* Enable the common interrupts */
+ dwc2_enable_common_interrupts(hsotg);
+
+ /* Enable host mode interrupts without disturbing common interrupts */
+ intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ intmsk |= GINTSTS_DISCONNINT | GINTSTS_PRTINT | GINTSTS_HCHINT;
+ dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+}
+
+/**
+ * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
+{
+ u32 intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+
+ /* Disable host mode interrupts without disturbing common interrupts */
+ intmsk &= ~(GINTSTS_SOF | GINTSTS_PRTINT | GINTSTS_HCHINT |
+ GINTSTS_PTXFEMP | GINTSTS_NPTXFEMP | GINTSTS_DISCONNINT);
+ dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+}
+
+/*
+ * dwc2_calculate_dynamic_fifo() - Calculates the default fifo size
+ * For system that have a total fifo depth that is smaller than the default
+ * RX + TX fifo size.
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ */
+static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *params = hsotg->core_params;
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
+
+ total_fifo_size = hw->total_fifo_size;
+ rxfsiz = params->host_rx_fifo_size;
+ nptxfsiz = params->host_nperio_tx_fifo_size;
+ ptxfsiz = params->host_perio_tx_fifo_size;
+
+ /*
+ * Will use Method 2 defined in the DWC2 spec: minimum FIFO depth
+ * allocation with support for high bandwidth endpoints. Synopsys
+ * defines MPS(Max Packet size) for a periodic EP=1024, and for
+ * non-periodic as 512.
+ */
+ if (total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)) {
+ /*
+ * For Buffer DMA mode/Scatter Gather DMA mode
+ * 2 * ((Largest Packet size / 4) + 1 + 1) + n
+ * with n = number of host channel.
+ * 2 * ((1024/4) + 2) = 516
+ */
+ rxfsiz = 516 + hw->host_channels;
+
+ /*
+ * min non-periodic tx fifo depth
+ * 2 * (largest non-periodic USB packet used / 4)
+ * 2 * (512/4) = 256
+ */
+ nptxfsiz = 256;
+
+ /*
+ * min periodic tx fifo depth
+ * (largest packet size*MC)/4
+ * (1024 * 3)/4 = 768
+ */
+ ptxfsiz = 768;
+
+ params->host_rx_fifo_size = rxfsiz;
+ params->host_nperio_tx_fifo_size = nptxfsiz;
+ params->host_perio_tx_fifo_size = ptxfsiz;
+ }
+
+ /*
+ * If the summation of RX, NPTX and PTX fifo sizes is still
+ * bigger than the total_fifo_size, then we have a problem.
+ *
+ * We won't be able to allocate as many endpoints. Right now,
+ * we're just printing an error message, but ideally this FIFO
+ * allocation algorithm would be improved in the future.
+ *
+ * FIXME improve this FIFO allocation algorithm.
+ */
+ if (unlikely(total_fifo_size < (rxfsiz + nptxfsiz + ptxfsiz)))
+ dev_err(hsotg->dev, "invalid fifo sizes\n");
+}
+
+static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *params = hsotg->core_params;
+ u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
+
+ if (!params->enable_dynamic_fifo)
+ return;
+
+ dwc2_calculate_dynamic_fifo(hsotg);
+
+ /* Rx FIFO */
+ grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+ dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
+ grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
+ grxfsiz |= params->host_rx_fifo_size <<
+ GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
+ dwc2_writel(grxfsiz, hsotg->regs + GRXFSIZ);
+ dev_dbg(hsotg->dev, "new grxfsiz=%08x\n",
+ dwc2_readl(hsotg->regs + GRXFSIZ));
+
+ /* Non-periodic Tx FIFO */
+ dev_dbg(hsotg->dev, "initial gnptxfsiz=%08x\n",
+ dwc2_readl(hsotg->regs + GNPTXFSIZ));
+ nptxfsiz = params->host_nperio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ nptxfsiz |= params->host_rx_fifo_size <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ dwc2_writel(nptxfsiz, hsotg->regs + GNPTXFSIZ);
+ dev_dbg(hsotg->dev, "new gnptxfsiz=%08x\n",
+ dwc2_readl(hsotg->regs + GNPTXFSIZ));
+
+ /* Periodic Tx FIFO */
+ dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
+ dwc2_readl(hsotg->regs + HPTXFSIZ));
+ hptxfsiz = params->host_perio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ hptxfsiz |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size) <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ dwc2_writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
+ dwc2_readl(hsotg->regs + HPTXFSIZ));
+
+ if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
+ hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
+ /*
+ * Global DFIFOCFG calculation for Host mode -
+ * include RxFIFO, NPTXFIFO and HPTXFIFO
+ */
+ dfifocfg = dwc2_readl(hsotg->regs + GDFIFOCFG);
+ dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
+ dfifocfg |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size +
+ params->host_perio_tx_fifo_size) <<
+ GDFIFOCFG_EPINFOBASE_SHIFT &
+ GDFIFOCFG_EPINFOBASE_MASK;
+ dwc2_writel(dfifocfg, hsotg->regs + GDFIFOCFG);
+ }
+}
+
+/**
+ * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for
+ * the HFIR register according to PHY type and speed
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * NOTE: The caller can modify the value of the HFIR register only after the
+ * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort)
+ * has been set
+ */
+u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
+{
+ u32 usbcfg;
+ u32 hprt0;
+ int clock = 60; /* default value */
+
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+ hprt0 = dwc2_readl(hsotg->regs + HPRT0);
+
+ if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
+ !(usbcfg & GUSBCFG_PHYIF16))
+ clock = 60;
+ if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
+ GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
+ clock = 48;
+ if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
+ clock = 30;
+ if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && !(usbcfg & GUSBCFG_PHYIF16))
+ clock = 60;
+ if ((usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
+ !(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
+ clock = 48;
+ if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
+ clock = 48;
+ if ((usbcfg & GUSBCFG_PHYSEL) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ clock = 48;
+
+ if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
+ /* High speed case */
+ return 125 * clock - 1;
+
+ /* FS/LS case */
+ return 1000 * clock - 1;
+}
+
+/**
+ * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination
+ * buffer
+ *
+ * @core_if: Programming view of DWC_otg controller
+ * @dest: Destination buffer for the packet
+ * @bytes: Number of bytes to copy to the destination
+ */
+void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes)
+{
+ u32 __iomem *fifo = hsotg->regs + HCFIFO(0);
+ u32 *data_buf = (u32 *)dest;
+ int word_count = (bytes + 3) / 4;
+ int i;
+
+ /*
+ * Todo: Account for the case where dest is not dword aligned. This
+ * requires reading data from the FIFO into a u32 temp buffer, then
+ * moving it into the data buffer.
+ */
+
+ dev_vdbg(hsotg->dev, "%s(%p,%p,%d)\n", __func__, hsotg, dest, bytes);
+
+ for (i = 0; i < word_count; i++, data_buf++)
+ *data_buf = dwc2_readl(fifo);
+}
+
/**
* dwc2_dump_channel_info() - Prints the state of a host channel
*
@@ -77,7 +606,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
u32 hc_dma;
int i;
- if (chan == NULL)
+ if (!chan)
return;
hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
@@ -120,6 +649,1056 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
}
/*
+ * =========================================================================
+ * Low Level Host Channel Access Functions
+ * =========================================================================
+ */
+
+static void dwc2_hc_enable_slave_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ switch (chan->ep_type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ dev_vdbg(hsotg->dev, "control/bulk\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_STALL;
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_BBLERR;
+ } else {
+ hcintmsk |= HCINTMSK_NAK;
+ hcintmsk |= HCINTMSK_NYET;
+ if (chan->do_ping)
+ hcintmsk |= HCINTMSK_ACK;
+ }
+
+ if (chan->do_split) {
+ hcintmsk |= HCINTMSK_NAK;
+ if (chan->complete_split)
+ hcintmsk |= HCINTMSK_NYET;
+ else
+ hcintmsk |= HCINTMSK_ACK;
+ }
+
+ if (chan->error_state)
+ hcintmsk |= HCINTMSK_ACK;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "intr\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_NAK;
+ hcintmsk |= HCINTMSK_STALL;
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ hcintmsk |= HCINTMSK_FRMOVRUN;
+
+ if (chan->ep_is_in)
+ hcintmsk |= HCINTMSK_BBLERR;
+ if (chan->error_state)
+ hcintmsk |= HCINTMSK_ACK;
+ if (chan->do_split) {
+ if (chan->complete_split)
+ hcintmsk |= HCINTMSK_NYET;
+ else
+ hcintmsk |= HCINTMSK_ACK;
+ }
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "isoc\n");
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ hcintmsk |= HCINTMSK_FRMOVRUN;
+ hcintmsk |= HCINTMSK_ACK;
+
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_XACTERR;
+ hcintmsk |= HCINTMSK_BBLERR;
+ }
+ break;
+ default:
+ dev_err(hsotg->dev, "## Unknown EP type ##\n");
+ break;
+ }
+
+ dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
+}
+
+static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ /*
+ * For Descriptor DMA mode core halts the channel on AHB error.
+ * Interrupt is not required.
+ */
+ if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA disabled\n");
+ hcintmsk |= HCINTMSK_AHBERR;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA enabled\n");
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ hcintmsk |= HCINTMSK_XFERCOMPL;
+ }
+
+ if (chan->error_state && !chan->do_split &&
+ chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "setting ACK\n");
+ hcintmsk |= HCINTMSK_ACK;
+ if (chan->ep_is_in) {
+ hcintmsk |= HCINTMSK_DATATGLERR;
+ if (chan->ep_type != USB_ENDPOINT_XFER_INT)
+ hcintmsk |= HCINTMSK_NAK;
+ }
+ }
+
+ dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HCINTMSK to %08x\n", hcintmsk);
+}
+
+static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 intmsk;
+
+ if (hsotg->core_params->dma_enable > 0) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA enabled\n");
+ dwc2_hc_enable_dma_ints(hsotg, chan);
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA disabled\n");
+ dwc2_hc_enable_slave_ints(hsotg, chan);
+ }
+
+ /* Enable the top level host channel interrupt */
+ intmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ intmsk |= 1 << chan->hc_num;
+ dwc2_writel(intmsk, hsotg->regs + HAINTMSK);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set HAINTMSK to %08x\n", intmsk);
+
+ /* Make sure host channel interrupts are enabled */
+ intmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ intmsk |= GINTSTS_HCHINT;
+ dwc2_writel(intmsk, hsotg->regs + GINTMSK);
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "set GINTMSK to %08x\n", intmsk);
+}
+
+/**
+ * dwc2_hc_init() - Prepares a host channel for transferring packets to/from
+ * a specific endpoint
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * The HCCHARn register is set up with the characteristics specified in chan.
+ * Host channel interrupts that may need to be serviced while this transfer is
+ * in progress are enabled.
+ */
+static void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
+{
+ u8 hc_num = chan->hc_num;
+ u32 hcintmsk;
+ u32 hcchar;
+ u32 hcsplt = 0;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ /* Clear old interrupt conditions for this host channel */
+ hcintmsk = 0xffffffff;
+ hcintmsk &= ~HCINTMSK_RESERVED14_31;
+ dwc2_writel(hcintmsk, hsotg->regs + HCINT(hc_num));
+
+ /* Enable channel interrupts required for this transfer */
+ dwc2_hc_enable_ints(hsotg, chan);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer
+ */
+ hcchar = chan->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK;
+ hcchar |= chan->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK;
+ if (chan->ep_is_in)
+ hcchar |= HCCHAR_EPDIR;
+ if (chan->speed == USB_SPEED_LOW)
+ hcchar |= HCCHAR_LSPDDEV;
+ hcchar |= chan->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK;
+ hcchar |= chan->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK;
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(hc_num));
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
+ hc_num, hcchar);
+
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n",
+ __func__, hc_num);
+ dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
+ chan->dev_addr);
+ dev_vdbg(hsotg->dev, " Ep Num: %d\n",
+ chan->ep_num);
+ dev_vdbg(hsotg->dev, " Is In: %d\n",
+ chan->ep_is_in);
+ dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
+ chan->speed == USB_SPEED_LOW);
+ dev_vdbg(hsotg->dev, " Ep Type: %d\n",
+ chan->ep_type);
+ dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
+ chan->max_packet);
+ }
+
+ /* Program the HCSPLT register for SPLITs */
+ if (chan->do_split) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev,
+ "Programming HC %d with split --> %s\n",
+ hc_num,
+ chan->complete_split ? "CSPLIT" : "SSPLIT");
+ if (chan->complete_split)
+ hcsplt |= HCSPLT_COMPSPLT;
+ hcsplt |= chan->xact_pos << HCSPLT_XACTPOS_SHIFT &
+ HCSPLT_XACTPOS_MASK;
+ hcsplt |= chan->hub_addr << HCSPLT_HUBADDR_SHIFT &
+ HCSPLT_HUBADDR_MASK;
+ hcsplt |= chan->hub_port << HCSPLT_PRTADDR_SHIFT &
+ HCSPLT_PRTADDR_MASK;
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, " comp split %d\n",
+ chan->complete_split);
+ dev_vdbg(hsotg->dev, " xact pos %d\n",
+ chan->xact_pos);
+ dev_vdbg(hsotg->dev, " hub addr %d\n",
+ chan->hub_addr);
+ dev_vdbg(hsotg->dev, " hub port %d\n",
+ chan->hub_port);
+ dev_vdbg(hsotg->dev, " is_in %d\n",
+ chan->ep_is_in);
+ dev_vdbg(hsotg->dev, " Max Pkt %d\n",
+ chan->max_packet);
+ dev_vdbg(hsotg->dev, " xferlen %d\n",
+ chan->xfer_len);
+ }
+ }
+
+ dwc2_writel(hcsplt, hsotg->regs + HCSPLT(hc_num));
+}
+
+/**
+ * dwc2_hc_halt() - Attempts to halt a host channel
+ *
+ * @hsotg: Controller register interface
+ * @chan: Host channel to halt
+ * @halt_status: Reason for halting the channel
+ *
+ * This function should only be called in Slave mode or to abort a transfer in
+ * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the
+ * controller halts the channel when the transfer is complete or a condition
+ * occurs that requires application intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ */
+void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ enum dwc2_halt_status halt_status)
+{
+ u32 nptxsts, hptxsts, hcchar;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+ if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
+ dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
+
+ if (halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
+ halt_status == DWC2_HC_XFER_AHB_ERR) {
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ u32 hcintmsk = HCINTMSK_CHHLTD;
+
+ dev_vdbg(hsotg->dev, "dequeue/error\n");
+ dwc2_writel(hcintmsk, hsotg->regs + HCINTMSK(chan->hc_num));
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ dwc2_writel(~hcintmsk, hsotg->regs + HCINT(chan->hc_num));
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason
+ */
+ chan->halt_status = halt_status;
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ if (!(hcchar & HCCHAR_CHENA)) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+ if (chan->halt_pending) {
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+ dev_vdbg(hsotg->dev,
+ "*** %s: Channel %d, chan->halt_pending already set ***\n",
+ __func__, chan->hc_num);
+ return;
+ }
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+
+ /* No need to set the bit in DDMA for disabling the channel */
+ /* TODO check it everywhere channel is disabled */
+ if (hsotg->core_params->dma_desc_enable <= 0) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "desc DMA disabled\n");
+ hcchar |= HCCHAR_CHENA;
+ } else {
+ if (dbg_hc(chan))
+ dev_dbg(hsotg->dev, "desc DMA enabled\n");
+ }
+ hcchar |= HCCHAR_CHDIS;
+
+ if (hsotg->core_params->dma_enable <= 0) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA not enabled\n");
+ hcchar |= HCCHAR_CHENA;
+
+ /* Check for space in the request queue to issue the halt */
+ if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
+ chan->ep_type == USB_ENDPOINT_XFER_BULK) {
+ dev_vdbg(hsotg->dev, "control/bulk\n");
+ nptxsts = dwc2_readl(hsotg->regs + GNPTXSTS);
+ if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) {
+ dev_vdbg(hsotg->dev, "Disabling channel\n");
+ hcchar &= ~HCCHAR_CHENA;
+ }
+ } else {
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "isoc/intr\n");
+ hptxsts = dwc2_readl(hsotg->regs + HPTXSTS);
+ if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 ||
+ hsotg->queuing_high_bandwidth) {
+ if (dbg_perio())
+ dev_vdbg(hsotg->dev, "Disabling channel\n");
+ hcchar &= ~HCCHAR_CHENA;
+ }
+ }
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "DMA enabled\n");
+ }
+
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ chan->halt_status = halt_status;
+
+ if (hcchar & HCCHAR_CHENA) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Channel enabled\n");
+ chan->halt_pending = 1;
+ chan->halt_on_queue = 0;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Channel disabled\n");
+ chan->halt_on_queue = 1;
+ }
+
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " hcchar: 0x%08x\n",
+ hcchar);
+ dev_vdbg(hsotg->dev, " halt_pending: %d\n",
+ chan->halt_pending);
+ dev_vdbg(hsotg->dev, " halt_on_queue: %d\n",
+ chan->halt_on_queue);
+ dev_vdbg(hsotg->dev, " halt_status: %d\n",
+ chan->halt_status);
+ }
+}
+
+/**
+ * dwc2_hc_cleanup() - Clears the transfer state for a host channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Identifies the host channel to clean up
+ *
+ * This function is normally called after a transfer is done and the host
+ * channel is being released
+ */
+void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
+{
+ u32 hcintmsk;
+
+ chan->xfer_started = 0;
+
+ list_del_init(&chan->split_order_list_entry);
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions
+ */
+ dwc2_writel(0, hsotg->regs + HCINTMSK(chan->hc_num));
+ hcintmsk = 0xffffffff;
+ hcintmsk &= ~HCINTMSK_RESERVED14_31;
+ dwc2_writel(hcintmsk, hsotg->regs + HCINT(chan->hc_num));
+}
+
+/**
+ * dwc2_hc_set_even_odd_frame() - Sets the channel property that indicates in
+ * which frame a periodic transfer should occur
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Identifies the host channel to set up and its properties
+ * @hcchar: Current value of the HCCHAR register for the specified host channel
+ *
+ * This function has no effect on non-periodic transfers
+ */
+static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan, u32 *hcchar)
+{
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ int host_speed;
+ int xfer_ns;
+ int xfer_us;
+ int bytes_in_fifo;
+ u16 fifo_space;
+ u16 frame_number;
+ u16 wire_frame;
+
+ /*
+ * Try to figure out if we're an even or odd frame. If we set
+ * even and the current frame number is even the the transfer
+ * will happen immediately. Similar if both are odd. If one is
+ * even and the other is odd then the transfer will happen when
+ * the frame number ticks.
+ *
+ * There's a bit of a balancing act to get this right.
+ * Sometimes we may want to send data in the current frame (AK
+ * right away). We might want to do this if the frame number
+ * _just_ ticked, but we might also want to do this in order
+ * to continue a split transaction that happened late in a
+ * microframe (so we didn't know to queue the next transfer
+ * until the frame number had ticked). The problem is that we
+ * need a lot of knowledge to know if there's actually still
+ * time to send things or if it would be better to wait until
+ * the next frame.
+ *
+ * We can look at how much time is left in the current frame
+ * and make a guess about whether we'll have time to transfer.
+ * We'll do that.
+ */
+
+ /* Get speed host is running at */
+ host_speed = (chan->speed != USB_SPEED_HIGH &&
+ !chan->do_split) ? chan->speed : USB_SPEED_HIGH;
+
+ /* See how many bytes are in the periodic FIFO right now */
+ fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
+ TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
+ bytes_in_fifo = sizeof(u32) *
+ (hsotg->core_params->host_perio_tx_fifo_size -
+ fifo_space);
+
+ /*
+ * Roughly estimate bus time for everything in the periodic
+ * queue + our new transfer. This is "rough" because we're
+ * using a function that makes takes into account IN/OUT
+ * and INT/ISO and we're just slamming in one value for all
+ * transfers. This should be an over-estimate and that should
+ * be OK, but we can probably tighten it.
+ */
+ xfer_ns = usb_calc_bus_time(host_speed, false, false,
+ chan->xfer_len + bytes_in_fifo);
+ xfer_us = NS_TO_US(xfer_ns);
+
+ /* See what frame number we'll be at by the time we finish */
+ frame_number = dwc2_hcd_get_future_frame_number(hsotg, xfer_us);
+
+ /* This is when we were scheduled to be on the wire */
+ wire_frame = dwc2_frame_num_inc(chan->qh->next_active_frame, 1);
+
+ /*
+ * If we'd finish _after_ the frame we're scheduled in then
+ * it's hopeless. Just schedule right away and hope for the
+ * best. Note that it _might_ be wise to call back into the
+ * scheduler to pick a better frame, but this is better than
+ * nothing.
+ */
+ if (dwc2_frame_num_gt(frame_number, wire_frame)) {
+ dwc2_sch_vdbg(hsotg,
+ "QH=%p EO MISS fr=%04x=>%04x (%+d)\n",
+ chan->qh, wire_frame, frame_number,
+ dwc2_frame_num_dec(frame_number,
+ wire_frame));
+ wire_frame = frame_number;
+
+ /*
+ * We picked a different frame number; communicate this
+ * back to the scheduler so it doesn't try to schedule
+ * another in the same frame.
+ *
+ * Remember that next_active_frame is 1 before the wire
+ * frame.
+ */
+ chan->qh->next_active_frame =
+ dwc2_frame_num_dec(frame_number, 1);
+ }
+
+ if (wire_frame & 1)
+ *hcchar |= HCCHAR_ODDFRM;
+ else
+ *hcchar &= ~HCCHAR_ODDFRM;
+ }
+}
+
+static void dwc2_set_pid_isoc(struct dwc2_host_chan *chan)
+{
+ /* Set up the initial PID for the transfer */
+ if (chan->speed == USB_SPEED_HIGH) {
+ if (chan->ep_is_in) {
+ if (chan->multi_count == 1)
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ else if (chan->multi_count == 2)
+ chan->data_pid_start = DWC2_HC_PID_DATA1;
+ else
+ chan->data_pid_start = DWC2_HC_PID_DATA2;
+ } else {
+ if (chan->multi_count == 1)
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ else
+ chan->data_pid_start = DWC2_HC_PID_MDATA;
+ }
+ } else {
+ chan->data_pid_start = DWC2_HC_PID_DATA0;
+ }
+}
+
+/**
+ * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with
+ * the Host Channel
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * This function should only be called in Slave mode. For a channel associated
+ * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel
+ * associated with a periodic EP, the periodic Tx FIFO is written.
+ *
+ * Upon return the xfer_buf and xfer_count fields in chan are incremented by
+ * the number of bytes written to the Tx FIFO.
+ */
+static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 i;
+ u32 remaining_count;
+ u32 byte_count;
+ u32 dword_count;
+ u32 __iomem *data_fifo;
+ u32 *data_buf = (u32 *)chan->xfer_buf;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
+
+ remaining_count = chan->xfer_len - chan->xfer_count;
+ if (remaining_count > chan->max_packet)
+ byte_count = chan->max_packet;
+ else
+ byte_count = remaining_count;
+
+ dword_count = (byte_count + 3) / 4;
+
+ if (((unsigned long)data_buf & 0x3) == 0) {
+ /* xfer_buf is DWORD aligned */
+ for (i = 0; i < dword_count; i++, data_buf++)
+ dwc2_writel(*data_buf, data_fifo);
+ } else {
+ /* xfer_buf is not DWORD aligned */
+ for (i = 0; i < dword_count; i++, data_buf++) {
+ u32 data = data_buf[0] | data_buf[1] << 8 |
+ data_buf[2] << 16 | data_buf[3] << 24;
+ dwc2_writel(data, data_fifo);
+ }
+ }
+
+ chan->xfer_count += byte_count;
+ chan->xfer_buf += byte_count;
+}
+
+/**
+ * dwc2_hc_do_ping() - Starts a PING transfer
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * This function should only be called in Slave mode. The Do Ping bit is set in
+ * the HCTSIZ register, then the channel is enabled.
+ */
+static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcchar;
+ u32 hctsiz;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+
+ hctsiz = TSIZ_DOPNG;
+ hctsiz |= 1 << TSIZ_PKTCNT_SHIFT;
+ dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+}
+
+/**
+ * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host
+ * channel and starts the transfer
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel. The xfer_len value
+ * may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be
+ * changed to reflect the final xfer_len value.
+ *
+ * This function may be called in either Slave mode or DMA mode. In Slave mode,
+ * the caller must ensure that there is sufficient space in the request queue
+ * and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets are loaded in the
+ * Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ */
+static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
+ u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
+ u32 hcchar;
+ u32 hctsiz = 0;
+ u16 num_packets;
+ u32 ec_mc;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s()\n", __func__);
+
+ if (chan->do_ping) {
+ if (hsotg->core_params->dma_enable <= 0) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "ping, no DMA\n");
+ dwc2_hc_do_ping(hsotg, chan);
+ chan->xfer_started = 1;
+ return;
+ }
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "ping, DMA\n");
+
+ hctsiz |= TSIZ_DOPNG;
+ }
+
+ if (chan->do_split) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "split\n");
+ num_packets = 1;
+
+ if (chan->complete_split && !chan->ep_is_in)
+ /*
+ * For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO
+ */
+ chan->xfer_len = 0;
+ else if (chan->ep_is_in || chan->xfer_len > chan->max_packet)
+ chan->xfer_len = chan->max_packet;
+ else if (!chan->ep_is_in && chan->xfer_len > 188)
+ chan->xfer_len = 188;
+
+ hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
+ TSIZ_XFERSIZE_MASK;
+
+ /* For split set ec_mc for immediate retries */
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ ec_mc = 3;
+ else
+ ec_mc = 1;
+ } else {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "no split\n");
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register
+ */
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ u32 max_periodic_len =
+ chan->multi_count * chan->max_packet;
+
+ if (chan->xfer_len > max_periodic_len)
+ chan->xfer_len = max_periodic_len;
+ } else if (chan->xfer_len > max_hc_xfer_size) {
+ /*
+ * Make sure that xfer_len is a multiple of max packet
+ * size
+ */
+ chan->xfer_len =
+ max_hc_xfer_size - chan->max_packet + 1;
+ }
+
+ if (chan->xfer_len > 0) {
+ num_packets = (chan->xfer_len + chan->max_packet - 1) /
+ chan->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ chan->xfer_len = num_packets * chan->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0 */
+ num_packets = 1;
+ }
+
+ if (chan->ep_is_in)
+ /*
+ * Always program an integral # of max packets for IN
+ * transfers
+ */
+ chan->xfer_len = num_packets * chan->max_packet;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length
+ */
+ chan->multi_count = num_packets;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ dwc2_set_pid_isoc(chan);
+
+ hctsiz |= chan->xfer_len << TSIZ_XFERSIZE_SHIFT &
+ TSIZ_XFERSIZE_MASK;
+
+ /* The ec_mc gets the multi_count for non-split */
+ ec_mc = chan->multi_count;
+ }
+
+ chan->start_pkt_count = num_packets;
+ hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK;
+ hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
+ TSIZ_SC_MC_PID_MASK;
+ dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCTSIZ(%d)\n",
+ hctsiz, chan->hc_num);
+
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
+ (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT);
+ dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
+ (hctsiz & TSIZ_PKTCNT_MASK) >>
+ TSIZ_PKTCNT_SHIFT);
+ dev_vdbg(hsotg->dev, " Start PID: %d\n",
+ (hctsiz & TSIZ_SC_MC_PID_MASK) >>
+ TSIZ_SC_MC_PID_SHIFT);
+ }
+
+ if (hsotg->core_params->dma_enable > 0) {
+ dwc2_writel((u32)chan->xfer_dma,
+ hsotg->regs + HCDMA(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
+ (unsigned long)chan->xfer_dma, chan->hc_num);
+ }
+
+ /* Start the split */
+ if (chan->do_split) {
+ u32 hcsplt = dwc2_readl(hsotg->regs + HCSPLT(chan->hc_num));
+
+ hcsplt |= HCSPLT_SPLTENA;
+ dwc2_writel(hcsplt, hsotg->regs + HCSPLT(chan->hc_num));
+ }
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar &= ~HCCHAR_MULTICNT_MASK;
+ hcchar |= (ec_mc << HCCHAR_MULTICNT_SHIFT) & HCCHAR_MULTICNT_MASK;
+ dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
+
+ if (hcchar & HCCHAR_CHDIS)
+ dev_warn(hsotg->dev,
+ "%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, chan->hc_num, hcchar);
+
+ /* Set host channel enable after all other setup is complete */
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
+
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
+ chan->hc_num);
+
+ chan->xfer_started = 1;
+ chan->requests++;
+
+ if (hsotg->core_params->dma_enable <= 0 &&
+ !chan->ep_is_in && chan->xfer_len > 0)
+ /* Load OUT packet into the appropriate Tx FIFO */
+ dwc2_hc_write_packet(hsotg, chan);
+}
+
+/**
+ * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a
+ * host channel and starts the transfer in Descriptor DMA mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
+ * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field
+ * with micro-frame bitmap.
+ *
+ * Initializes HCDMA register with descriptor list address and CTD value then
+ * starts the transfer via enabling the channel.
+ */
+void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ u32 hcchar;
+ u32 hctsiz = 0;
+
+ if (chan->do_ping)
+ hctsiz |= TSIZ_DOPNG;
+
+ if (chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ dwc2_set_pid_isoc(chan);
+
+ /* Packet Count and Xfer Size are not used in Descriptor DMA mode */
+ hctsiz |= chan->data_pid_start << TSIZ_SC_MC_PID_SHIFT &
+ TSIZ_SC_MC_PID_MASK;
+
+ /* 0 - 1 descriptor, 1 - 2 descriptors, etc */
+ hctsiz |= (chan->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK;
+
+ /* Non-zero only for high-speed interrupt endpoints */
+ hctsiz |= chan->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK;
+
+ if (dbg_hc(chan)) {
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+ dev_vdbg(hsotg->dev, " Start PID: %d\n",
+ chan->data_pid_start);
+ dev_vdbg(hsotg->dev, " NTD: %d\n", chan->ntd - 1);
+ }
+
+ dwc2_writel(hctsiz, hsotg->regs + HCTSIZ(chan->hc_num));
+
+ dma_sync_single_for_device(hsotg->dev, chan->desc_list_addr,
+ chan->desc_list_sz, DMA_TO_DEVICE);
+
+ dwc2_writel(chan->desc_list_addr, hsotg->regs + HCDMA(chan->hc_num));
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %pad to HCDMA(%d)\n",
+ &chan->desc_list_addr, chan->hc_num);
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+ hcchar &= ~HCCHAR_MULTICNT_MASK;
+ hcchar |= chan->multi_count << HCCHAR_MULTICNT_SHIFT &
+ HCCHAR_MULTICNT_MASK;
+
+ if (hcchar & HCCHAR_CHDIS)
+ dev_warn(hsotg->dev,
+ "%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, chan->hc_num, hcchar);
+
+ /* Set host channel enable after all other setup is complete */
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
+
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar,
+ chan->hc_num);
+
+ chan->xfer_started = 1;
+ chan->requests++;
+}
+
+/**
+ * dwc2_hc_continue_transfer() - Continues a data transfer that was started by
+ * a previous call to dwc2_hc_start_transfer()
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ * @chan: Information needed to initialize the host channel
+ *
+ * The caller must ensure there is sufficient space in the request queue and Tx
+ * Data FIFO. This function should only be called in Slave mode. In DMA mode,
+ * the controller acts autonomously to complete transfers programmed to a host
+ * channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * Return: 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer
+ */
+static int dwc2_hc_continue_transfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan)
+{
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
+ chan->hc_num);
+
+ if (chan->do_split)
+ /* SPLITs always queue just once per channel */
+ return 0;
+
+ if (chan->data_pid_start == DWC2_HC_PID_SETUP)
+ /* SETUPs are queued only once since they can't be NAK'd */
+ return 0;
+
+ if (chan->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAK'd request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ u32 hcchar = dwc2_readl(hsotg->regs + HCCHAR(chan->hc_num));
+
+ dwc2_hc_set_even_odd_frame(hsotg, chan, &hcchar);
+ hcchar |= HCCHAR_CHENA;
+ hcchar &= ~HCCHAR_CHDIS;
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, " IN xfer: hcchar = 0x%08x\n",
+ hcchar);
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
+ chan->requests++;
+ return 1;
+ }
+
+ /* OUT transfers */
+
+ if (chan->xfer_count < chan->xfer_len) {
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ u32 hcchar = dwc2_readl(hsotg->regs +
+ HCCHAR(chan->hc_num));
+
+ dwc2_hc_set_even_odd_frame(hsotg, chan,
+ &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO */
+ dwc2_hc_write_packet(hsotg, chan);
+ chan->requests++;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * =========================================================================
+ * HCD
+ * =========================================================================
+ */
+
+/*
* Processes all the URBs in a single list of QHs. Completes them with
* -ETIMEDOUT and frees the QTD.
*
@@ -164,6 +1743,9 @@ static void dwc2_qh_list_free(struct dwc2_hsotg *hsotg,
qtd_list_entry)
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
+ if (qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh);
spin_lock_irqsave(&hsotg->lock, flags);
@@ -554,7 +2136,12 @@ static int dwc2_hcd_endpoint_disable(struct dwc2_hsotg *hsotg,
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
ep->hcpriv = NULL;
+
+ if (qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
+
spin_unlock_irqrestore(&hsotg->lock, flags);
+
dwc2_hcd_qh_free(hsotg, qh);
return 0;
@@ -580,6 +2167,224 @@ static int dwc2_hcd_endpoint_reset(struct dwc2_hsotg *hsotg,
return 0;
}
+/**
+ * dwc2_core_init() - Initializes the DWC_otg controller registers and
+ * prepares the core for device mode or host mode operation
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @initial_setup: If true then this is the first init for this instance.
+ */
+static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
+{
+ u32 usbcfg, otgctl;
+ int retval;
+
+ dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
+
+ /* Set ULPI External VBUS bit if needed */
+ usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
+ if (hsotg->core_params->phy_ulpi_ext_vbus ==
+ DWC2_PHY_ULPI_EXTERNAL_VBUS)
+ usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
+
+ /* Set external TS Dline pulsing bit if needed */
+ usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
+ if (hsotg->core_params->ts_dline > 0)
+ usbcfg |= GUSBCFG_TERMSELDLPULSE;
+
+ dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
+
+ /*
+ * Reset the Controller
+ *
+ * We only need to reset the controller if this is a re-init.
+ * For the first init we know for sure that earlier code reset us (it
+ * needed to in order to properly detect various parameters).
+ */
+ if (!initial_setup) {
+ retval = dwc2_core_reset_and_force_dr_mode(hsotg);
+ if (retval) {
+ dev_err(hsotg->dev, "%s(): Reset failed, aborting\n",
+ __func__);
+ return retval;
+ }
+ }
+
+ /*
+ * This needs to happen in FS mode before any other programming occurs
+ */
+ retval = dwc2_phy_init(hsotg, initial_setup);
+ if (retval)
+ return retval;
+
+ /* Program the GAHBCFG Register */
+ retval = dwc2_gahbcfg_init(hsotg);
+ if (retval)
+ return retval;
+
+ /* Program the GUSBCFG register */
+ dwc2_gusbcfg_init(hsotg);
+
+ /* Program the GOTGCTL register */
+ otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl &= ~GOTGCTL_OTGVER;
+ if (hsotg->core_params->otg_ver > 0)
+ otgctl |= GOTGCTL_OTGVER;
+ dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+ dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
+
+ /* Clear the SRP success bit for FS-I2c */
+ hsotg->srp_success = 0;
+
+ /* Enable common interrupts */
+ dwc2_enable_common_interrupts(hsotg);
+
+ /*
+ * Do device or host initialization based on mode during PCD and
+ * HCD initialization
+ */
+ if (dwc2_is_host_mode(hsotg)) {
+ dev_dbg(hsotg->dev, "Host Mode\n");
+ hsotg->op_state = OTG_STATE_A_HOST;
+ } else {
+ dev_dbg(hsotg->dev, "Device Mode\n");
+ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
+ }
+
+ return 0;
+}
+
+/**
+ * dwc2_core_host_init() - Initializes the DWC_otg controller registers for
+ * Host mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ * This function flushes the Tx and Rx FIFOs and flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ */
+static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
+{
+ u32 hcfg, hfir, otgctl;
+
+ dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+
+ /* Restart the Phy Clock */
+ dwc2_writel(0, hsotg->regs + PCGCTL);
+
+ /* Initialize Host Configuration Register */
+ dwc2_init_fs_ls_pclk_sel(hsotg);
+ if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
+ hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg |= HCFG_FSLSSUPP;
+ dwc2_writel(hcfg, hsotg->regs + HCFG);
+ }
+
+ /*
+ * This bit allows dynamic reloading of the HFIR register during
+ * runtime. This bit needs to be programmed during initial configuration
+ * and its value must not be changed during runtime.
+ */
+ if (hsotg->core_params->reload_ctl > 0) {
+ hfir = dwc2_readl(hsotg->regs + HFIR);
+ hfir |= HFIR_RLDCTRL;
+ dwc2_writel(hfir, hsotg->regs + HFIR);
+ }
+
+ if (hsotg->core_params->dma_desc_enable > 0) {
+ u32 op_mode = hsotg->hw_params.op_mode;
+
+ if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
+ !hsotg->hw_params.dma_desc_enable ||
+ op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
+ op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
+ op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
+ dev_err(hsotg->dev,
+ "Hardware does not support descriptor DMA mode -\n");
+ dev_err(hsotg->dev,
+ "falling back to buffer DMA mode.\n");
+ hsotg->core_params->dma_desc_enable = 0;
+ } else {
+ hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hcfg |= HCFG_DESCDMA;
+ dwc2_writel(hcfg, hsotg->regs + HCFG);
+ }
+ }
+
+ /* Configure data FIFO sizes */
+ dwc2_config_fifos(hsotg);
+
+ /* TODO - check this */
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl &= ~GOTGCTL_HSTSETHNPEN;
+ dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+
+ /* Make sure the FIFOs are flushed */
+ dwc2_flush_tx_fifo(hsotg, 0x10 /* all TX FIFOs */);
+ dwc2_flush_rx_fifo(hsotg);
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
+ otgctl &= ~GOTGCTL_HSTSETHNPEN;
+ dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
+
+ if (hsotg->core_params->dma_desc_enable <= 0) {
+ int num_channels, i;
+ u32 hcchar;
+
+ /* Flush out any leftover queued requests */
+ num_channels = hsotg->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar &= ~HCCHAR_CHENA;
+ hcchar |= HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ }
+
+ /* Halt all channels to put them into a known state */
+ for (i = 0; i < num_channels; i++) {
+ int count = 0;
+
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ hcchar |= HCCHAR_CHENA | HCCHAR_CHDIS;
+ hcchar &= ~HCCHAR_EPDIR;
+ dwc2_writel(hcchar, hsotg->regs + HCCHAR(i));
+ dev_dbg(hsotg->dev, "%s: Halt channel %d\n",
+ __func__, i);
+ do {
+ hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
+ if (++count > 1000) {
+ dev_err(hsotg->dev,
+ "Unable to clear enable on channel %d\n",
+ i);
+ break;
+ }
+ udelay(1);
+ } while (hcchar & HCCHAR_CHENA);
+ }
+ }
+
+ /* Turn on the vbus power */
+ dev_dbg(hsotg->dev, "Init: Port Power? op_state=%d\n", hsotg->op_state);
+ if (hsotg->op_state == OTG_STATE_A_HOST) {
+ u32 hprt0 = dwc2_read_hprt0(hsotg);
+
+ dev_dbg(hsotg->dev, "Init: Power Port (%d)\n",
+ !!(hprt0 & HPRT0_PWR));
+ if (!(hprt0 & HPRT0_PWR)) {
+ hprt0 |= HPRT0_PWR;
+ dwc2_writel(hprt0, hsotg->regs + HPRT0);
+ }
+ }
+
+ dwc2_enable_host_interrupts(hsotg);
+}
+
/*
* Initializes dynamic portions of the DWC_otg HCD state
*
@@ -635,9 +2440,9 @@ static void dwc2_hc_init_split(struct dwc2_hsotg *hsotg,
chan->hub_port = (u8)hub_port;
}
-static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
- struct dwc2_host_chan *chan,
- struct dwc2_qtd *qtd, void *bufptr)
+static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan,
+ struct dwc2_qtd *qtd)
{
struct dwc2_hcd_urb *urb = qtd->urb;
struct dwc2_hcd_iso_packet_desc *frame_desc;
@@ -657,7 +2462,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
else
chan->xfer_buf = urb->setup_packet;
chan->xfer_len = 8;
- bufptr = NULL;
break;
case DWC2_CONTROL_DATA:
@@ -684,7 +2488,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
- bufptr = NULL;
break;
}
break;
@@ -717,14 +2520,6 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
chan->xfer_len = frame_desc->length - qtd->isoc_split_offset;
- /* For non-dword aligned buffers */
- if (hsotg->core_params->dma_enable > 0 &&
- (chan->xfer_dma & 0x3))
- bufptr = (u8 *)urb->buf + frame_desc->offset +
- qtd->isoc_split_offset;
- else
- bufptr = NULL;
-
if (chan->xact_pos == DWC2_HCSPLT_XACTPOS_ALL) {
if (chan->xfer_len <= 188)
chan->xact_pos = DWC2_HCSPLT_XACTPOS_ALL;
@@ -733,63 +2528,93 @@ static void *dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
}
break;
}
+}
+
+#define DWC2_USB_DMA_ALIGN 4
+
+struct dma_aligned_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
+static void dwc2_free_dma_aligned_buffer(struct urb *urb)
+{
+ struct dma_aligned_buffer *temp;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
- return bufptr;
+ temp = container_of(urb->transfer_buffer,
+ struct dma_aligned_buffer, data);
+
+ if (usb_urb_dir_in(urb))
+ memcpy(temp->old_xfer_buffer, temp->data,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
-static int dwc2_hc_setup_align_buf(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
- struct dwc2_host_chan *chan,
- struct dwc2_hcd_urb *urb, void *bufptr)
+static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
- u32 buf_size;
- struct urb *usb_urb;
- struct usb_hcd *hcd;
+ struct dma_aligned_buffer *temp, *kmalloc_ptr;
+ size_t kmalloc_size;
- if (!qh->dw_align_buf) {
- if (chan->ep_type != USB_ENDPOINT_XFER_ISOC)
- buf_size = hsotg->core_params->max_transfer_size;
- else
- /* 3072 = 3 max-size Isoc packets */
- buf_size = 3072;
+ if (urb->num_sgs || urb->sg ||
+ urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
+ return 0;
- qh->dw_align_buf = kmalloc(buf_size, GFP_ATOMIC | GFP_DMA);
- if (!qh->dw_align_buf)
- return -ENOMEM;
- qh->dw_align_buf_size = buf_size;
- }
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
- if (chan->xfer_len) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- usb_urb = urb->priv;
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
- if (usb_urb) {
- if (usb_urb->transfer_flags &
- (URB_SETUP_MAP_SINGLE | URB_DMA_MAP_SG |
- URB_DMA_MAP_PAGE | URB_DMA_MAP_SINGLE)) {
- hcd = dwc2_hsotg_to_hcd(hsotg);
- usb_hcd_unmap_urb_for_dma(hcd, usb_urb);
- }
- if (!chan->ep_is_in)
- memcpy(qh->dw_align_buf, bufptr,
- chan->xfer_len);
- } else {
- dev_warn(hsotg->dev, "no URB in dwc2_urb\n");
- }
- }
+ /* Position our struct dma_aligned_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
- qh->dw_align_buf_dma = dma_map_single(hsotg->dev,
- qh->dw_align_buf, qh->dw_align_buf_size,
- chan->ep_is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
- dev_err(hsotg->dev, "can't map align_buf\n");
- chan->align_buf = 0;
- return -EINVAL;
- }
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
- chan->align_buf = qh->dw_align_buf_dma;
return 0;
}
+static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ /* We assume setup_dma is always aligned; warn if not */
+ WARN_ON_ONCE(urb->setup_dma &&
+ (urb->setup_dma & (DWC2_USB_DMA_ALIGN - 1)));
+
+ ret = dwc2_alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ dwc2_free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void dwc2_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ dwc2_free_dma_aligned_buffer(urb);
+}
+
/**
* dwc2_assign_and_init_hc() - Assigns transactions from a QTD to a free host
* channel and initializes the host channel to perform the transactions. The
@@ -804,7 +2629,6 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
struct dwc2_host_chan *chan;
struct dwc2_hcd_urb *urb;
struct dwc2_qtd *qtd;
- void *bufptr = NULL;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s(%p,%p)\n", __func__, hsotg, qh);
@@ -866,16 +2690,10 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
- if (hsotg->core_params->dma_enable > 0) {
+ if (hsotg->core_params->dma_enable > 0)
chan->xfer_dma = urb->dma + urb->actual_length;
-
- /* For non-dword aligned case */
- if (hsotg->core_params->dma_desc_enable <= 0 &&
- (chan->xfer_dma & 0x3))
- bufptr = (u8 *)urb->buf + urb->actual_length;
- } else {
+ else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
- }
chan->xfer_len = urb->length - urb->actual_length;
chan->xfer_count = 0;
@@ -887,27 +2705,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
chan->do_split = 0;
/* Set the transfer attributes */
- bufptr = dwc2_hc_init_xfer(hsotg, chan, qtd, bufptr);
-
- /* Non DWORD-aligned buffer case */
- if (bufptr) {
- dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
- if (dwc2_hc_setup_align_buf(hsotg, qh, chan, urb, bufptr)) {
- dev_err(hsotg->dev,
- "%s: Failed to allocate memory to handle non-dword aligned buffer\n",
- __func__);
- /* Add channel back to free list */
- chan->align_buf = 0;
- chan->multi_count = 0;
- list_add_tail(&chan->hc_list_entry,
- &hsotg->free_hc_list);
- qtd->in_process = 0;
- qh->channel = NULL;
- return -ENOMEM;
- }
- } else {
- chan->align_buf = 0;
- }
+ dwc2_hc_init_xfer(hsotg, chan, qtd);
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
@@ -968,7 +2766,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
* periodic assigned schedule
*/
qh_ptr = qh_ptr->next;
- list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned);
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
ret_val = DWC2_TRANSACTION_PERIODIC;
}
@@ -1001,8 +2800,8 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
* non-periodic active schedule
*/
qh_ptr = qh_ptr->next;
- list_move(&qh->qh_list_entry,
- &hsotg->non_periodic_sched_active);
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->non_periodic_sched_active);
if (ret_val == DWC2_TRANSACTION_NONE)
ret_val = DWC2_TRANSACTION_NON_PERIODIC;
@@ -1043,6 +2842,11 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
{
int retval = 0;
+ if (chan->do_split)
+ /* Put ourselves on the list to keep order straight */
+ list_move_tail(&chan->split_order_list_entry,
+ &hsotg->split_order);
+
if (hsotg->core_params->dma_enable > 0) {
if (hsotg->core_params->dma_desc_enable > 0) {
if (!chan->xfer_started ||
@@ -1102,10 +2906,14 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
u32 fspcavail;
u32 gintmsk;
int status;
- int no_queue_space = 0;
- int no_fifo_space = 0;
+ bool no_queue_space = false;
+ bool no_fifo_space = false;
u32 qspcavail;
+ /* If empty list then just adjust interrupt enables */
+ if (list_empty(&hsotg->periodic_sched_assigned))
+ goto exit;
+
if (dbg_perio())
dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
@@ -1175,50 +2983,40 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
* Move the QH from the periodic assigned schedule to
* the periodic queued schedule
*/
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_queued);
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_queued);
/* done queuing high bandwidth */
hsotg->queuing_high_bandwidth = 0;
}
}
- if (hsotg->core_params->dma_enable <= 0) {
- tx_status = dwc2_readl(hsotg->regs + HPTXSTS);
- qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
- TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
- TXSTS_FSPCAVAIL_SHIFT;
- if (dbg_perio()) {
- dev_vdbg(hsotg->dev,
- " P Tx Req Queue Space Avail (after queue): %d\n",
- qspcavail);
- dev_vdbg(hsotg->dev,
- " P Tx FIFO Space Avail (after queue): %d\n",
- fspcavail);
- }
-
- if (!list_empty(&hsotg->periodic_sched_assigned) ||
- no_queue_space || no_fifo_space) {
- /*
- * May need to queue more transactions as the request
- * queue or Tx FIFO empties. Enable the periodic Tx
- * FIFO empty interrupt. (Always use the half-empty
- * level to ensure that new requests are loaded as
- * soon as possible.)
- */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+exit:
+ if (no_queue_space || no_fifo_space ||
+ (hsotg->core_params->dma_enable <= 0 &&
+ !list_empty(&hsotg->periodic_sched_assigned))) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ if (!(gintmsk & GINTSTS_PTXFEMP)) {
gintmsk |= GINTSTS_PTXFEMP;
dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
- } else {
- /*
- * Disable the Tx FIFO empty interrupt since there are
- * no more transactions that need to be queued right
- * now. This function is called from interrupt
- * handlers to queue more transactions as transfer
- * states change.
- */
- gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ }
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+ if (gintmsk & GINTSTS_PTXFEMP) {
gintmsk &= ~GINTSTS_PTXFEMP;
dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
}
@@ -1365,9 +3163,8 @@ void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "Queue Transactions\n");
#endif
/* Process host channels associated with periodic transfers */
- if ((tr_type == DWC2_TRANSACTION_PERIODIC ||
- tr_type == DWC2_TRANSACTION_ALL) &&
- !list_empty(&hsotg->periodic_sched_assigned))
+ if (tr_type == DWC2_TRANSACTION_PERIODIC ||
+ tr_type == DWC2_TRANSACTION_ALL)
dwc2_process_periodic_channels(hsotg);
/* Process host channels associated with non-periodic transfers */
@@ -1947,6 +3744,35 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
}
+int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us)
+{
+ u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
+ u32 hfir = dwc2_readl(hsotg->regs + HFIR);
+ u32 hfnum = dwc2_readl(hsotg->regs + HFNUM);
+ unsigned int us_per_frame;
+ unsigned int frame_number;
+ unsigned int remaining;
+ unsigned int interval;
+ unsigned int phy_clks;
+
+ /* High speed has 125 us per (micro) frame; others are 1 ms per */
+ us_per_frame = (hprt & HPRT0_SPD_MASK) ? 1000 : 125;
+
+ /* Extract fields */
+ frame_number = (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
+ remaining = (hfnum & HFNUM_FRREM_MASK) >> HFNUM_FRREM_SHIFT;
+ interval = (hfir & HFIR_FRINT_MASK) >> HFIR_FRINT_SHIFT;
+
+ /*
+ * Number of phy clocks since the last tick of the frame number after
+ * "us" has passed.
+ */
+ phy_clks = (interval - remaining) +
+ DIV_ROUND_UP(interval * us, us_per_frame);
+
+ return dwc2_frame_num_inc(frame_number, phy_clks / interval);
+}
+
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
{
return hsotg->op_state == OTG_STATE_B_HOST;
@@ -2223,6 +4049,90 @@ void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
*hub_port = urb->dev->ttport;
}
+/**
+ * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
+ *
+ * This will get the dwc2_tt structure (and ttport) associated with the given
+ * context (which is really just a struct urb pointer).
+ *
+ * The first time this is called for a given TT we allocate memory for our
+ * structure. When everyone is done and has called dwc2_host_put_tt_info()
+ * then the refcount for the structure will go to 0 and we'll free it.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: The QH structure.
+ * @context: The priv pointer from a struct dwc2_hcd_urb.
+ * @mem_flags: Flags for allocating memory.
+ * @ttport: We'll return this device's port number here. That's used to
+ * reference into the bitmap if we're on a multi_tt hub.
+ *
+ * Return: a pointer to a struct dwc2_tt. Don't forget to call
+ * dwc2_host_put_tt_info()! Returns NULL upon memory alloc failure.
+ */
+
+struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, void *context,
+ gfp_t mem_flags, int *ttport)
+{
+ struct urb *urb = context;
+ struct dwc2_tt *dwc_tt = NULL;
+
+ if (urb->dev->tt) {
+ *ttport = urb->dev->ttport;
+
+ dwc_tt = urb->dev->tt->hcpriv;
+ if (dwc_tt == NULL) {
+ size_t bitmap_size;
+
+ /*
+ * For single_tt we need one schedule. For multi_tt
+ * we need one per port.
+ */
+ bitmap_size = DWC2_ELEMENTS_PER_LS_BITMAP *
+ sizeof(dwc_tt->periodic_bitmaps[0]);
+ if (urb->dev->tt->multi)
+ bitmap_size *= urb->dev->tt->hub->maxchild;
+
+ dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size,
+ mem_flags);
+ if (dwc_tt == NULL)
+ return NULL;
+
+ dwc_tt->usb_tt = urb->dev->tt;
+ dwc_tt->usb_tt->hcpriv = dwc_tt;
+ }
+
+ dwc_tt->refcount++;
+ }
+
+ return dwc_tt;
+}
+
+/**
+ * dwc2_host_put_tt_info() - Put the dwc2_tt from dwc2_host_get_tt_info()
+ *
+ * Frees resources allocated by dwc2_host_get_tt_info() if all current holders
+ * of the structure are done.
+ *
+ * It's OK to call this with NULL.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @dwc_tt: The pointer returned by dwc2_host_get_tt_info.
+ */
+void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt)
+{
+ /* Model kfree and make put of NULL a no-op */
+ if (dwc_tt == NULL)
+ return;
+
+ WARN_ON(dwc_tt->refcount < 1);
+
+ dwc_tt->refcount--;
+ if (!dwc_tt->refcount) {
+ dwc_tt->usb_tt->hcpriv = NULL;
+ kfree(dwc_tt);
+ }
+}
+
int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
{
struct urb *urb = context;
@@ -2334,9 +4244,7 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
kfree(qtd->urb);
qtd->urb = NULL;
- spin_unlock(&hsotg->lock);
usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
- spin_lock(&hsotg->lock);
}
/*
@@ -2789,6 +4697,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
fail3:
dwc2_urb->priv = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
+ if (qh_allocated && qh->channel && qh->channel->qh == qh)
+ qh->channel->qh = NULL;
fail2:
spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
@@ -2955,7 +4865,7 @@ static struct hc_driver dwc2_hc_driver = {
.hcd_priv_size = sizeof(struct wrapper_priv_data),
.irq = _dwc2_hcd_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.start = _dwc2_hcd_start,
.stop = _dwc2_hcd_stop,
@@ -2971,6 +4881,9 @@ static struct hc_driver dwc2_hc_driver = {
.bus_suspend = _dwc2_hcd_suspend,
.bus_resume = _dwc2_hcd_resume,
+
+ .map_urb_for_dma = dwc2_map_urb_for_dma,
+ .unmap_urb_for_dma = dwc2_unmap_urb_for_dma,
};
/*
@@ -3081,8 +4994,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
FRAME_NUM_ARRAY_SIZE, GFP_KERNEL);
if (!hsotg->last_frame_num_array)
goto error1;
- hsotg->last_frame_num = HFNUM_MAX_FRNUM;
#endif
+ hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
if (hsotg->core_params->dma_enable > 0 &&
@@ -3146,6 +5059,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
INIT_LIST_HEAD(&hsotg->periodic_sched_assigned);
INIT_LIST_HEAD(&hsotg->periodic_sched_queued);
+ INIT_LIST_HEAD(&hsotg->split_order);
+
/*
* Create a host channel descriptor for each host channel implemented
* in the controller. Initialize the channel descriptor array.
@@ -3159,12 +5074,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
if (channel == NULL)
goto error3;
channel->hc_num = i;
+ INIT_LIST_HEAD(&channel->split_order_list_entry);
hsotg->hc_ptr_array[i] = channel;
}
- if (hsotg->core_params->uframe_sched > 0)
- dwc2_hcd_init_usecs(hsotg);
-
/* Initialize hsotg start work */
INIT_DELAYED_WORK(&hsotg->start_work, dwc2_hcd_start_func);
@@ -3317,3 +5230,67 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
kfree(hsotg->frame_num_array);
#endif
}
+
+/**
+ * dwc2_backup_host_registers() - Backup controller host registers.
+ * When suspending usb bus, registers needs to be backuped
+ * if controller power is disabled once suspended.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Backup Host regs */
+ hr = &hsotg->hr_backup;
+ hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
+ hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
+ for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
+
+ hr->hprt0 = dwc2_read_hprt0(hsotg);
+ hr->hfir = dwc2_readl(hsotg->regs + HFIR);
+ hr->valid = true;
+
+ return 0;
+}
+
+/**
+ * dwc2_restore_host_registers() - Restore controller host registers.
+ * When resuming usb bus, device registers needs to be restored
+ * if controller power were disabled.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ */
+int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hregs_backup *hr;
+ int i;
+
+ dev_dbg(hsotg->dev, "%s\n", __func__);
+
+ /* Restore host regs */
+ hr = &hsotg->hr_backup;
+ if (!hr->valid) {
+ dev_err(hsotg->dev, "%s: no host registers to restore\n",
+ __func__);
+ return -EINVAL;
+ }
+ hr->valid = false;
+
+ dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
+ dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
+
+ for (i = 0; i < hsotg->core_params->host_channels; ++i)
+ dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
+
+ dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
+ dwc2_writel(hr->hfir, hsotg->regs + HFIR);
+ hsotg->frame_number = 0;
+
+ return 0;
+}
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 8f0a29cefdf7..89fa26cb25f4 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -75,8 +75,6 @@ struct dwc2_qh;
* (micro)frame
* @xfer_buf: Pointer to current transfer buffer position
* @xfer_dma: DMA address of xfer_buf
- * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
- * DWORD aligned
* @xfer_len: Total number of bytes to transfer
* @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer
@@ -108,6 +106,7 @@ struct dwc2_qh;
* @hc_list_entry: For linking to list of host channels
* @desc_list_addr: Current QH's descriptor list DMA address
* @desc_list_sz: Current QH's descriptor list size
+ * @split_order_list_entry: List entry for keeping track of the order of splits
*
* This structure represents the state of a single host channel when acting in
* host mode. It contains the data items needed to transfer packets to an
@@ -133,7 +132,6 @@ struct dwc2_host_chan {
u8 *xfer_buf;
dma_addr_t xfer_dma;
- dma_addr_t align_buf;
u32 xfer_len;
u32 xfer_count;
u16 start_pkt_count;
@@ -161,6 +159,7 @@ struct dwc2_host_chan {
struct list_head hc_list_entry;
dma_addr_t desc_list_addr;
u32 desc_list_sz;
+ struct list_head split_order_list_entry;
};
struct dwc2_hcd_pipe_info {
@@ -213,9 +212,47 @@ enum dwc2_transaction_type {
DWC2_TRANSACTION_ALL,
};
+/* The number of elements per LS bitmap (per port on multi_tt) */
+#define DWC2_ELEMENTS_PER_LS_BITMAP DIV_ROUND_UP(DWC2_LS_SCHEDULE_SLICES, \
+ BITS_PER_LONG)
+
+/**
+ * struct dwc2_tt - dwc2 data associated with a usb_tt
+ *
+ * @refcount: Number of Queue Heads (QHs) holding a reference.
+ * @usb_tt: Pointer back to the official usb_tt.
+ * @periodic_bitmaps: Bitmap for which parts of the 1ms frame are accounted
+ * for already. Each is DWC2_ELEMENTS_PER_LS_BITMAP
+ * elements (so sizeof(long) times that in bytes).
+ *
+ * This structure is stored in the hcpriv of the official usb_tt.
+ */
+struct dwc2_tt {
+ int refcount;
+ struct usb_tt *usb_tt;
+ unsigned long periodic_bitmaps[];
+};
+
+/**
+ * struct dwc2_hs_transfer_time - Info about a transfer on the high speed bus.
+ *
+ * @start_schedule_usecs: The start time on the main bus schedule. Note that
+ * the main bus schedule is tightly packed and this
+ * time should be interpreted as tightly packed (so
+ * uFrame 0 starts at 0 us, uFrame 1 starts at 100 us
+ * instead of 125 us).
+ * @duration_us: How long this transfer goes.
+ */
+
+struct dwc2_hs_transfer_time {
+ u32 start_schedule_us;
+ u16 duration_us;
+};
+
/**
* struct dwc2_qh - Software queue head structure
*
+ * @hsotg: The HCD state structure for the DWC OTG controller
* @ep_type: Endpoint type. One of the following values:
* - USB_ENDPOINT_XFER_CONTROL
* - USB_ENDPOINT_XFER_BULK
@@ -236,17 +273,35 @@ enum dwc2_transaction_type {
* @do_split: Full/low speed endpoint on high-speed hub requires split
* @td_first: Index of first activated isochronous transfer descriptor
* @td_last: Index of last activated isochronous transfer descriptor
- * @usecs: Bandwidth in microseconds per (micro)frame
- * @interval: Interval between transfers in (micro)frames
- * @sched_frame: (Micro)frame to initialize a periodic transfer.
- * The transfer executes in the following (micro)frame.
- * @frame_usecs: Internal variable used by the microframe scheduler
- * @start_split_frame: (Micro)frame at which last start split was initialized
+ * @host_us: Bandwidth in microseconds per transfer as seen by host
+ * @device_us: Bandwidth in microseconds per transfer as seen by device
+ * @host_interval: Interval between transfers as seen by the host. If
+ * the host is high speed and the device is low speed this
+ * will be 8 times device interval.
+ * @device_interval: Interval between transfers as seen by the device.
+ * interval.
+ * @next_active_frame: (Micro)frame _before_ we next need to put something on
+ * the bus. We'll move the qh to active here. If the
+ * host is in high speed mode this will be a uframe. If
+ * the host is in low speed mode this will be a full frame.
+ * @start_active_frame: If we are partway through a split transfer, this will be
+ * what next_active_frame was when we started. Otherwise
+ * it should always be the same as next_active_frame.
+ * @num_hs_transfers: Number of transfers in hs_transfers.
+ * Normally this is 1 but can be more than one for splits.
+ * Always >= 1 unless the host is in low/full speed mode.
+ * @hs_transfers: Transfers that are scheduled as seen by the high speed
+ * bus. Not used if host is in low or full speed mode (but
+ * note that it IS USED if the device is low or full speed
+ * as long as the HOST is in high speed mode).
+ * @ls_start_schedule_slice: Start time (in slices) on the low speed bus
+ * schedule that's being used by this device. This
+ * will be on the periodic_bitmap in a
+ * "struct dwc2_tt". Not used if this device is high
+ * speed. Note that this is in "schedule slice" which
+ * is tightly packed.
+ * @ls_duration_us: Duration on the low speed bus schedule.
* @ntd: Actual number of transfer descriptors in a list
- * @dw_align_buf: Used instead of original buffer if its physical address
- * is not dword-aligned
- * @dw_align_buf_size: Size of dw_align_buf
- * @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
@@ -257,13 +312,20 @@ enum dwc2_transaction_type {
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the
* descriptor
+ * @unreserve_timer: Timer for releasing periodic reservation.
+ * @dwc2_tt: Pointer to our tt info (or NULL if no tt).
+ * @ttport: Port number within our tt.
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
+ * @unreserve_pending: True if we planned to unreserve but haven't yet.
+ * @schedule_low_speed: True if we have a low/full speed component (either the
+ * host is in low/full speed mode or do_split).
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
* maintains a list of transfers (QTDs) for that endpoint. A QH structure may
* be entered in either the non-periodic or periodic schedule.
*/
struct dwc2_qh {
+ struct dwc2_hsotg *hsotg;
u8 ep_type;
u8 ep_is_in;
u16 maxp;
@@ -273,15 +335,16 @@ struct dwc2_qh {
u8 do_split;
u8 td_first;
u8 td_last;
- u16 usecs;
- u16 interval;
- u16 sched_frame;
- u16 frame_usecs[8];
- u16 start_split_frame;
+ u16 host_us;
+ u16 device_us;
+ u16 host_interval;
+ u16 device_interval;
+ u16 next_active_frame;
+ u16 start_active_frame;
+ s16 num_hs_transfers;
+ struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
+ u32 ls_start_schedule_slice;
u16 ntd;
- u8 *dw_align_buf;
- int dw_align_buf_size;
- dma_addr_t dw_align_buf_dma;
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
@@ -289,7 +352,12 @@ struct dwc2_qh {
dma_addr_t desc_list_dma;
u32 desc_list_sz;
u32 *n_bytes;
+ struct timer_list unreserve_timer;
+ struct dwc2_tt *dwc_tt;
+ int ttport;
unsigned tt_buffer_dirty:1;
+ unsigned unreserve_pending:1;
+ unsigned schedule_low_speed:1;
};
/**
@@ -362,6 +430,8 @@ struct hc_xfer_info {
};
#endif
+u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg);
+
/* Gets the struct usb_hcd that contains a struct dwc2_hsotg */
static inline struct usb_hcd *dwc2_hsotg_to_hcd(struct dwc2_hsotg *hsotg)
{
@@ -383,6 +453,12 @@ static inline void disable_hc_int(struct dwc2_hsotg *hsotg, int chnum, u32 intr)
dwc2_writel(mask, hsotg->regs + HCINTMSK(chnum));
}
+void dwc2_hc_cleanup(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan);
+void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ enum dwc2_halt_status halt_status);
+void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
+ struct dwc2_host_chan *chan);
+
/*
* Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
* are read as 1, they won't clear when written back.
@@ -456,7 +532,6 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
/* Schedule Queue Functions */
/* Implemented in hcd_queue.c */
-extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
struct dwc2_hcd_urb *urb,
gfp_t mem_flags);
@@ -571,6 +646,11 @@ static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
return (frame + inc) & HFNUM_MAX_FRNUM;
}
+static inline u16 dwc2_frame_num_dec(u16 frame, u16 dec)
+{
+ return (frame + HFNUM_MAX_FRNUM + 1 - dec) & HFNUM_MAX_FRNUM;
+}
+
static inline u16 dwc2_full_frame_num(u16 frame)
{
return (frame & HFNUM_MAX_FRNUM) >> 3;
@@ -648,7 +728,7 @@ static inline u16 dwc2_hcd_get_ep_bandwidth(struct dwc2_hsotg *hsotg,
return 0;
}
- return qh->usecs;
+ return qh->host_us;
}
extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
@@ -717,6 +797,12 @@ extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
int *hub_addr, int *hub_port);
+extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
+ void *context, gfp_t mem_flags,
+ int *ttport);
+
+extern void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg,
+ struct dwc2_tt *dwc_tt);
extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
int status);
@@ -739,7 +825,7 @@ do { \
_qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd, \
qtd_list_entry); \
if (usb_pipeint(_qtd_->urb->pipe) && \
- (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) { \
+ (_qh_)->start_active_frame != 0 && !_qtd_->complete_split) { \
_hfnum_.d32 = dwc2_readl((_hcd_)->regs + HFNUM); \
switch (_hfnum_.b.frnum & 0x7) { \
case 7: \
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index a41274aa52ad..0e1d42b5dec5 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -81,7 +81,7 @@ static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
{
return qh->dev_speed == USB_SPEED_HIGH ?
- (qh->interval + 8 - 1) / 8 : qh->interval;
+ (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
}
static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
@@ -111,7 +111,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
dma_unmap_single(hsotg->dev, qh->desc_list_dma,
qh->desc_list_sz,
DMA_FROM_DEVICE);
- kfree(qh->desc_list);
+ kmem_cache_free(desc_cache, qh->desc_list);
qh->desc_list = NULL;
return -ENOMEM;
}
@@ -252,7 +252,7 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
chan = qh->channel;
inc = dwc2_frame_incr_val(qh);
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
- i = dwc2_frame_list_idx(qh->sched_frame);
+ i = dwc2_frame_list_idx(qh->next_active_frame);
else
i = 0;
@@ -278,13 +278,13 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
return;
chan->schinfo = 0;
- if (chan->speed == USB_SPEED_HIGH && qh->interval) {
+ if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
j = 1;
/* TODO - check this */
- inc = (8 + qh->interval - 1) / qh->interval;
+ inc = (8 + qh->host_interval - 1) / qh->host_interval;
for (i = 0; i < inc; i++) {
chan->schinfo |= j;
- j = j << qh->interval;
+ j = j << qh->host_interval;
}
} else {
chan->schinfo = 0xff;
@@ -431,7 +431,10 @@ static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
- /* sched_frame is always frame number (not uFrame) both in FS and HS! */
+ /*
+ * next_active_frame is always frame number (not uFrame) both in FS
+ * and HS!
+ */
/*
* skip_frames is used to limit activated descriptors number
@@ -514,13 +517,13 @@ static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
*/
fr_idx_tmp = dwc2_frame_list_idx(frame);
fr_idx = (FRLISTEN_64_SIZE +
- dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
- % dwc2_frame_incr_val(qh);
+ dwc2_frame_list_idx(qh->next_active_frame) -
+ fr_idx_tmp) % dwc2_frame_incr_val(qh);
fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
} else {
- qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
+ qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
&skip_frames);
- fr_idx = dwc2_frame_list_idx(qh->sched_frame);
+ fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
}
qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
@@ -583,7 +586,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
u16 next_idx;
idx = qh->td_last;
- inc = qh->interval;
+ inc = qh->host_interval;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
@@ -605,11 +608,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
}
}
- if (qh->interval) {
- ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
- qh->interval;
+ if (qh->host_interval) {
+ ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
+ qh->host_interval;
if (skip_frames && !qh->channel)
- ntd_max -= skip_frames / qh->interval;
+ ntd_max -= skip_frames / qh->host_interval;
}
max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
@@ -1029,7 +1032,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
idx);
if (rc < 0)
return;
- idx = dwc2_desclist_idx_inc(idx, qh->interval,
+ idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
chan->speed);
if (!rc)
continue;
@@ -1039,7 +1042,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
/* rc == DWC2_CMPL_STOP */
- if (qh->interval >= 32)
+ if (qh->host_interval >= 32)
goto stop_scan;
qh->td_first = idx;
@@ -1242,8 +1245,10 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
for (i = 0; i < qtd_desc_count; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done))
+ &xfer_done)) {
+ qtd = NULL;
goto stop_scan;
+ }
desc_num++;
}
@@ -1258,7 +1263,7 @@ stop_scan:
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
else
- dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
}
if (halt_status == DWC2_HC_XFER_COMPLETE) {
@@ -1326,8 +1331,8 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
dwc2_hcd_qh_unlink(hsotg, qh);
} else {
/* Keep in assigned schedule to continue transfer */
- list_move(&qh->qh_list_entry,
- &hsotg->periodic_sched_assigned);
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_assigned);
/*
* If channel has been halted during giveback of urb
* then prevent any new scheduling.
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index cadba8b13c48..906f223542ee 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -55,12 +55,16 @@
/* This function is for debug only */
static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
{
-#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
u16 curr_frame_number = hsotg->frame_number;
+ u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
+
+ if (expected != curr_frame_number)
+ dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
+ expected, curr_frame_number);
+#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
- if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
- curr_frame_number) {
+ if (expected != curr_frame_number) {
hsotg->frame_num_array[hsotg->frame_num_idx] =
curr_frame_number;
hsotg->last_frame_num_array[hsotg->frame_num_idx] =
@@ -79,14 +83,15 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
}
hsotg->dumped_frame_num_array = 1;
}
- hsotg->last_frame_num = curr_frame_number;
#endif
+ hsotg->last_frame_num = curr_frame_number;
}
static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd)
{
+ struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
struct urb *usb_urb;
if (!chan->qh)
@@ -102,6 +107,15 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
return;
+ /*
+ * The root hub doesn't really have a TT, but Linux thinks it
+ * does because how could you have a "high speed hub" that
+ * directly talks directly to low speed devices without a TT?
+ * It's all lies. Lies, I tell you.
+ */
+ if (usb_urb->dev->tt->hub == root_hub)
+ return;
+
if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
chan->qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(usb_urb))
@@ -138,13 +152,19 @@ static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
while (qh_entry != &hsotg->periodic_sched_inactive) {
qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
qh_entry = qh_entry->next;
- if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
+ if (dwc2_frame_num_le(qh->next_active_frame,
+ hsotg->frame_number)) {
+ dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
+ qh, hsotg->frame_number,
+ qh->next_active_frame);
+
/*
* Move QH to the ready list to be executed next
* (micro)frame
*/
- list_move(&qh->qh_list_entry,
+ list_move_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_ready);
+ }
}
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE)
@@ -472,18 +492,6 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
xfer_length = urb->length - urb->actual_length;
}
- /* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && xfer_length) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
- chan->qh->dw_align_buf_size,
- chan->ep_is_in ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (chan->ep_is_in)
- memcpy(urb->buf + urb->actual_length,
- chan->qh->dw_align_buf, xfer_length);
- }
-
dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
urb->actual_length, xfer_length);
urb->actual_length += xfer_length;
@@ -573,21 +581,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
frame_desc->status = 0;
frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
chan, chnum, qtd, halt_status, NULL);
-
- /* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && frame_desc->actual_length) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
- dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
- chan->qh->dw_align_buf_size,
- chan->ep_is_in ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (chan->ep_is_in)
- memcpy(urb->buf + frame_desc->offset +
- qtd->isoc_split_offset,
- chan->qh->dw_align_buf,
- frame_desc->actual_length);
- }
break;
case DWC2_HC_XFER_FRAME_OVERRUN:
urb->error_count++;
@@ -608,21 +601,6 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
chan, chnum, qtd, halt_status, NULL);
- /* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && frame_desc->actual_length) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
- dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
- chan->qh->dw_align_buf_size,
- chan->ep_is_in ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (chan->ep_is_in)
- memcpy(urb->buf + frame_desc->offset +
- qtd->isoc_split_offset,
- chan->qh->dw_align_buf,
- frame_desc->actual_length);
- }
-
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
@@ -688,8 +666,6 @@ static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
}
no_qtd:
- if (qh->channel)
- qh->channel->align_buf = 0;
qh->channel = NULL;
dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
}
@@ -846,7 +822,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
* halt to be queued when the periodic schedule is
* processed.
*/
- list_move(&chan->qh->qh_list_entry,
+ list_move_tail(&chan->qh->qh_list_entry,
&hsotg->periodic_sched_assigned);
/*
@@ -954,14 +930,6 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len;
- if (chan->align_buf) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
- chan->qh->dw_align_buf_size, DMA_FROM_DEVICE);
- memcpy(qtd->urb->buf + frame_desc->offset +
- qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
- }
-
qtd->isoc_split_offset += len;
if (frame_desc->actual_length >= frame_desc->length) {
@@ -1184,19 +1152,6 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
xfer_length = urb->length - urb->actual_length;
}
- /* Non DWORD-aligned buffer case handling */
- if (chan->align_buf && xfer_length && chan->ep_is_in) {
- dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
- dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
- chan->qh->dw_align_buf_size,
- chan->ep_is_in ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- if (chan->ep_is_in)
- memcpy(urb->buf + urb->actual_length,
- chan->qh->dw_align_buf,
- xfer_length);
- }
-
urb->actual_length += xfer_length;
hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
@@ -1416,14 +1371,50 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
- int frnum = dwc2_hcd_get_frame_number(hsotg);
+ struct dwc2_qh *qh = chan->qh;
+ bool past_end;
+
+ if (hsotg->core_params->uframe_sched <= 0) {
+ int frnum = dwc2_hcd_get_frame_number(hsotg);
+
+ /* Don't have num_hs_transfers; simple logic */
+ past_end = dwc2_full_frame_num(frnum) !=
+ dwc2_full_frame_num(qh->next_active_frame);
+ } else {
+ int end_frnum;
- if (dwc2_full_frame_num(frnum) !=
- dwc2_full_frame_num(chan->qh->sched_frame)) {
/*
- * No longer in the same full speed frame.
- * Treat this as a transaction error.
- */
+ * Figure out the end frame based on schedule.
+ *
+ * We don't want to go on trying again and again
+ * forever. Let's stop when we've done all the
+ * transfers that were scheduled.
+ *
+ * We're going to be comparing start_active_frame
+ * and next_active_frame, both of which are 1
+ * before the time the packet goes on the wire,
+ * so that cancels out. Basically if had 1
+ * transfer and we saw 1 NYET then we're done.
+ * We're getting a NYET here so if next >=
+ * (start + num_transfers) we're done. The
+ * complexity is that for all but ISOC_OUT we
+ * skip one slot.
+ */
+ end_frnum = dwc2_frame_num_inc(
+ qh->start_active_frame,
+ qh->num_hs_transfers);
+
+ if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
+ qh->ep_is_in)
+ end_frnum =
+ dwc2_frame_num_inc(end_frnum, 1);
+
+ past_end = dwc2_frame_num_le(
+ end_frnum, qh->next_active_frame);
+ }
+
+ if (past_end) {
+ /* Treat this as a transaction error. */
#if 0
/*
* Todo: Fix system performance so this can
@@ -2008,6 +1999,16 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
}
dwc2_writel(hcint, hsotg->regs + HCINT(chnum));
+
+ /*
+ * If we got an interrupt after someone called
+ * dwc2_hcd_endpoint_disable() we don't want to crash below
+ */
+ if (!chan->qh) {
+ dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
+ return;
+ }
+
chan->hcint = hcint;
hcint &= hcintmsk;
@@ -2130,6 +2131,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
{
u32 haint;
int i;
+ struct dwc2_host_chan *chan, *chan_tmp;
haint = dwc2_readl(hsotg->regs + HAINT);
if (dbg_perio()) {
@@ -2138,6 +2140,22 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
}
+ /*
+ * According to USB 2.0 spec section 11.18.8, a host must
+ * issue complete-split transactions in a microframe for a
+ * set of full-/low-speed endpoints in the same relative
+ * order as the start-splits were issued in a microframe for.
+ */
+ list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
+ split_order_list_entry) {
+ int hc_num = chan->hc_num;
+
+ if (haint & (1 << hc_num)) {
+ dwc2_hc_n_intr(hsotg, hc_num);
+ haint &= ~(1 << hc_num);
+ }
+ }
+
for (i = 0; i < hsotg->core_params->host_channels; i++) {
if (haint & (1 << i))
dwc2_hc_n_intr(hsotg, i);
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 27d402f680a3..7f634fd771c7 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -38,6 +38,7 @@
* This file contains the functions to manage Queue Heads and Queue
* Transfer Descriptors for Host mode
*/
+#include <linux/gcd.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -53,194 +54,8 @@
#include "core.h"
#include "hcd.h"
-/**
- * dwc2_qh_init() - Initializes a QH structure
- *
- * @hsotg: The HCD state structure for the DWC OTG controller
- * @qh: The QH to init
- * @urb: Holds the information about the device/endpoint needed to initialize
- * the QH
- */
-#define SCHEDULE_SLOP 10
-static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
- struct dwc2_hcd_urb *urb)
-{
- int dev_speed, hub_addr, hub_port;
- char *speed, *type;
-
- dev_vdbg(hsotg->dev, "%s()\n", __func__);
-
- /* Initialize QH */
- qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
- qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
-
- qh->data_toggle = DWC2_HC_PID_DATA0;
- qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
- INIT_LIST_HEAD(&qh->qtd_list);
- INIT_LIST_HEAD(&qh->qh_list_entry);
-
- /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
- dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
-
- dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
-
- if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
- hub_addr != 0 && hub_addr != 1) {
- dev_vdbg(hsotg->dev,
- "QH init: EP %d: TT found at hub addr %d, for port %d\n",
- dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
- hub_port);
- qh->do_split = 1;
- }
-
- if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
- qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
- /* Compute scheduling parameters once and save them */
- u32 hprt, prtspd;
-
- /* Todo: Account for split transfers in the bus time */
- int bytecount =
- dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
-
- qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
- USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
- qh->ep_type == USB_ENDPOINT_XFER_ISOC,
- bytecount));
-
- /* Ensure frame_number corresponds to the reality */
- hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
- /* Start in a slightly future (micro)frame */
- qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
- SCHEDULE_SLOP);
- qh->interval = urb->interval;
-#if 0
- /* Increase interrupt polling rate for debugging */
- if (qh->ep_type == USB_ENDPOINT_XFER_INT)
- qh->interval = 8;
-#endif
- hprt = dwc2_readl(hsotg->regs + HPRT0);
- prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
- if (prtspd == HPRT0_SPD_HIGH_SPEED &&
- (dev_speed == USB_SPEED_LOW ||
- dev_speed == USB_SPEED_FULL)) {
- qh->interval *= 8;
- qh->sched_frame |= 0x7;
- qh->start_split_frame = qh->sched_frame;
- }
- dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
- }
-
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
- dwc2_hcd_get_dev_addr(&urb->pipe_info));
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
- dwc2_hcd_get_ep_num(&urb->pipe_info),
- dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
-
- qh->dev_speed = dev_speed;
-
- switch (dev_speed) {
- case USB_SPEED_LOW:
- speed = "low";
- break;
- case USB_SPEED_FULL:
- speed = "full";
- break;
- case USB_SPEED_HIGH:
- speed = "high";
- break;
- default:
- speed = "?";
- break;
- }
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
-
- switch (qh->ep_type) {
- case USB_ENDPOINT_XFER_ISOC:
- type = "isochronous";
- break;
- case USB_ENDPOINT_XFER_INT:
- type = "interrupt";
- break;
- case USB_ENDPOINT_XFER_CONTROL:
- type = "control";
- break;
- case USB_ENDPOINT_XFER_BULK:
- type = "bulk";
- break;
- default:
- type = "?";
- break;
- }
-
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
-
- if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
- qh->usecs);
- dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
- qh->interval);
- }
-}
-
-/**
- * dwc2_hcd_qh_create() - Allocates and initializes a QH
- *
- * @hsotg: The HCD state structure for the DWC OTG controller
- * @urb: Holds the information about the device/endpoint needed
- * to initialize the QH
- * @atomic_alloc: Flag to do atomic allocation if needed
- *
- * Return: Pointer to the newly allocated QH, or NULL on error
- */
-struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
- struct dwc2_hcd_urb *urb,
- gfp_t mem_flags)
-{
- struct dwc2_qh *qh;
-
- if (!urb->priv)
- return NULL;
-
- /* Allocate memory */
- qh = kzalloc(sizeof(*qh), mem_flags);
- if (!qh)
- return NULL;
-
- dwc2_qh_init(hsotg, qh, urb);
-
- if (hsotg->core_params->dma_desc_enable > 0 &&
- dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
- dwc2_hcd_qh_free(hsotg, qh);
- return NULL;
- }
-
- return qh;
-}
-
-/**
- * dwc2_hcd_qh_free() - Frees the QH
- *
- * @hsotg: HCD instance
- * @qh: The QH to free
- *
- * QH should already be removed from the list. QTD list should already be empty
- * if called from URB Dequeue.
- *
- * Must NOT be called with interrupt disabled or spinlock held
- */
-void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
-{
- if (qh->desc_list) {
- dwc2_hcd_qh_free_ddma(hsotg, qh);
- } else {
- /* kfree(NULL) is safe */
- kfree(qh->dw_align_buf);
- qh->dw_align_buf_dma = (dma_addr_t)0;
- }
- kfree(qh);
-}
+/* Wait this long before releasing periodic reservation */
+#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
@@ -301,19 +116,19 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
* High speed mode
* Max periodic usecs is 80% x 125 usec = 100 usec
*/
- max_claimed_usecs = 100 - qh->usecs;
+ max_claimed_usecs = 100 - qh->host_us;
} else {
/*
* Full speed mode
* Max periodic usecs is 90% x 1000 usec = 900 usec
*/
- max_claimed_usecs = 900 - qh->usecs;
+ max_claimed_usecs = 900 - qh->host_us;
}
if (hsotg->periodic_usecs > max_claimed_usecs) {
dev_err(hsotg->dev,
"%s: already claimed usecs %d, required usecs %d\n",
- __func__, hsotg->periodic_usecs, qh->usecs);
+ __func__, hsotg->periodic_usecs, qh->host_us);
status = -ENOSPC;
}
@@ -321,113 +136,1177 @@ static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
}
/**
- * Microframe scheduler
- * track the total use in hsotg->frame_usecs
- * keep each qh use in qh->frame_usecs
- * when surrendering the qh then donate the time back
+ * pmap_schedule() - Schedule time in a periodic bitmap (pmap).
+ *
+ * @map: The bitmap representing the schedule; will be updated
+ * upon success.
+ * @bits_per_period: The schedule represents several periods. This is how many
+ * bits are in each period. It's assumed that the beginning
+ * of the schedule will repeat after its end.
+ * @periods_in_map: The number of periods in the schedule.
+ * @num_bits: The number of bits we need per period we want to reserve
+ * in this function call.
+ * @interval: How often we need to be scheduled for the reservation this
+ * time. 1 means every period. 2 means every other period.
+ * ...you get the picture?
+ * @start: The bit number to start at. Normally 0. Must be within
+ * the interval or we return failure right away.
+ * @only_one_period: Normally we'll allow picking a start anywhere within the
+ * first interval, since we can still make all repetition
+ * requirements by doing that. However, if you pass true
+ * here then we'll return failure if we can't fit within
+ * the period that "start" is in.
+ *
+ * The idea here is that we want to schedule time for repeating events that all
+ * want the same resource. The resource is divided into fixed-sized periods
+ * and the events want to repeat every "interval" periods. The schedule
+ * granularity is one bit.
+ *
+ * To keep things "simple", we'll represent our schedule with a bitmap that
+ * contains a fixed number of periods. This gets rid of a lot of complexity
+ * but does mean that we need to handle things specially (and non-ideally) if
+ * the number of the periods in the schedule doesn't match well with the
+ * intervals that we're trying to schedule.
+ *
+ * Here's an explanation of the scheme we'll implement, assuming 8 periods.
+ * - If interval is 1, we need to take up space in each of the 8
+ * periods we're scheduling. Easy.
+ * - If interval is 2, we need to take up space in half of the
+ * periods. Again, easy.
+ * - If interval is 3, we actually need to fall back to interval 1.
+ * Why? Because we might need time in any period. AKA for the
+ * first 8 periods, we'll be in slot 0, 3, 6. Then we'll be
+ * in slot 1, 4, 7. Then we'll be in 2, 5. Then we'll be back to
+ * 0, 3, and 6. Since we could be in any frame we need to reserve
+ * for all of them. Sucks, but that's what you gotta do. Note that
+ * if we were instead scheduling 8 * 3 = 24 we'd do much better, but
+ * then we need more memory and time to do scheduling.
+ * - If interval is 4, easy.
+ * - If interval is 5, we again need interval 1. The schedule will be
+ * 0, 5, 2, 7, 4, 1, 6, 3, 0
+ * - If interval is 6, we need interval 2. 0, 6, 4, 2.
+ * - If interval is 7, we need interval 1.
+ * - If interval is 8, we need interval 8.
+ *
+ * If you do the math, you'll see that we need to pretend that interval is
+ * equal to the greatest_common_divisor(interval, periods_in_map).
+ *
+ * Note that at the moment this function tends to front-pack the schedule.
+ * In some cases that's really non-ideal (it's hard to schedule things that
+ * need to repeat every period). In other cases it's perfect (you can easily
+ * schedule bigger, less often repeating things).
+ *
+ * Here's the algorithm in action (8 periods, 5 bits per period):
+ * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
+ * |*****| ***|*****| ***|*****| ***|*****| ***| OK 3 bits, intv 3 at 2
+ * |*****|* ***|*****| ***|*****|* ***|*****| ***| OK 1 bits, intv 4 at 5
+ * |** |* |** | |** |* |** | | Remv 3 bits, intv 3 at 2
+ * |*** |* |*** | |*** |* |*** | | OK 1 bits, intv 6 at 2
+ * |**** |* * |**** | * |**** |* * |**** | * | OK 1 bits, intv 1 at 3
+ * |**** |**** |**** | *** |**** |**** |**** | *** | OK 2 bits, intv 2 at 6
+ * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 1 at 4
+ * |*****|*****|*****| ****|*****|*****|*****| ****| FAIL 1 bits, intv 1
+ * | ***|*****| ***| ****| ***|*****| ***| ****| Remv 2 bits, intv 2 at 0
+ * | ***| ****| ***| ****| ***| ****| ***| ****| Remv 1 bits, intv 4 at 5
+ * | **| ****| **| ****| **| ****| **| ****| Remv 1 bits, intv 6 at 2
+ * | *| ** *| *| ** *| *| ** *| *| ** *| Remv 1 bits, intv 1 at 3
+ * | *| *| *| *| *| *| *| *| Remv 2 bits, intv 2 at 6
+ * | | | | | | | | | Remv 1 bits, intv 1 at 4
+ * |** | |** | |** | |** | | OK 2 bits, intv 2 at 0
+ * |*** | |** | |*** | |** | | OK 1 bits, intv 4 at 2
+ * |*****| |** **| |*****| |** **| | OK 2 bits, intv 2 at 3
+ * |*****|* |** **| |*****|* |** **| | OK 1 bits, intv 4 at 5
+ * |*****|*** |** **| ** |*****|*** |** **| ** | OK 2 bits, intv 2 at 6
+ * |*****|*****|** **| ****|*****|*****|** **| ****| OK 2 bits, intv 2 at 8
+ * |*****|*****|*****| ****|*****|*****|*****| ****| OK 1 bits, intv 4 at 12
+ *
+ * This function is pretty generic and could be easily abstracted if anything
+ * needed similar scheduling.
+ *
+ * Returns either -ENOSPC or a >= 0 start bit which should be passed to the
+ * unschedule routine. The map bitmap will be updated on a non-error result.
*/
-static const unsigned short max_uframe_usecs[] = {
- 100, 100, 100, 100, 100, 100, 30, 0
-};
+static int pmap_schedule(unsigned long *map, int bits_per_period,
+ int periods_in_map, int num_bits,
+ int interval, int start, bool only_one_period)
+{
+ int interval_bits;
+ int to_reserve;
+ int first_end;
+ int i;
+
+ if (num_bits > bits_per_period)
+ return -ENOSPC;
+
+ /* Adjust interval as per description */
+ interval = gcd(interval, periods_in_map);
+
+ interval_bits = bits_per_period * interval;
+ to_reserve = periods_in_map / interval;
+
+ /* If start has gotten us past interval then we can't schedule */
+ if (start >= interval_bits)
+ return -ENOSPC;
+
+ if (only_one_period)
+ /* Must fit within same period as start; end at begin of next */
+ first_end = (start / bits_per_period + 1) * bits_per_period;
+ else
+ /* Can fit anywhere in the first interval */
+ first_end = interval_bits;
+
+ /*
+ * We'll try to pick the first repetition, then see if that time
+ * is free for each of the subsequent repetitions. If it's not
+ * we'll adjust the start time for the next search of the first
+ * repetition.
+ */
+ while (start + num_bits <= first_end) {
+ int end;
+
+ /* Need to stay within this period */
+ end = (start / bits_per_period + 1) * bits_per_period;
+
+ /* Look for num_bits us in this microframe starting at start */
+ start = bitmap_find_next_zero_area(map, end, start, num_bits,
+ 0);
+
+ /*
+ * We should get start >= end if we fail. We might be
+ * able to check the next microframe depending on the
+ * interval, so continue on (start already updated).
+ */
+ if (start >= end) {
+ start = end;
+ continue;
+ }
+
+ /* At this point we have a valid point for first one */
+ for (i = 1; i < to_reserve; i++) {
+ int ith_start = start + interval_bits * i;
+ int ith_end = end + interval_bits * i;
+ int ret;
+
+ /* Use this as a dumb "check if bits are 0" */
+ ret = bitmap_find_next_zero_area(
+ map, ith_start + num_bits, ith_start, num_bits,
+ 0);
+
+ /* We got the right place, continue checking */
+ if (ret == ith_start)
+ continue;
+
+ /* Move start up for next time and exit for loop */
+ ith_start = bitmap_find_next_zero_area(
+ map, ith_end, ith_start, num_bits, 0);
+ if (ith_start >= ith_end)
+ /* Need a while new period next time */
+ start = end;
+ else
+ start = ith_start - interval_bits * i;
+ break;
+ }
+
+ /* If didn't exit the for loop with a break, we have success */
+ if (i == to_reserve)
+ break;
+ }
-void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
+ if (start + num_bits > first_end)
+ return -ENOSPC;
+
+ for (i = 0; i < to_reserve; i++) {
+ int ith_start = start + interval_bits * i;
+
+ bitmap_set(map, ith_start, num_bits);
+ }
+
+ return start;
+}
+
+/**
+ * pmap_unschedule() - Undo work done by pmap_schedule()
+ *
+ * @map: See pmap_schedule().
+ * @bits_per_period: See pmap_schedule().
+ * @periods_in_map: See pmap_schedule().
+ * @num_bits: The number of bits that was passed to schedule.
+ * @interval: The interval that was passed to schedule.
+ * @start: The return value from pmap_schedule().
+ */
+static void pmap_unschedule(unsigned long *map, int bits_per_period,
+ int periods_in_map, int num_bits,
+ int interval, int start)
{
+ int interval_bits;
+ int to_release;
int i;
- for (i = 0; i < 8; i++)
- hsotg->frame_usecs[i] = max_uframe_usecs[i];
+ /* Adjust interval as per description in pmap_schedule() */
+ interval = gcd(interval, periods_in_map);
+
+ interval_bits = bits_per_period * interval;
+ to_release = periods_in_map / interval;
+
+ for (i = 0; i < to_release; i++) {
+ int ith_start = start + interval_bits * i;
+
+ bitmap_clear(map, ith_start, num_bits);
+ }
}
-static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+/*
+ * cat_printf() - A printf() + strcat() helper
+ *
+ * This is useful for concatenating a bunch of strings where each string is
+ * constructed using printf.
+ *
+ * @buf: The destination buffer; will be updated to point after the printed
+ * data.
+ * @size: The number of bytes in the buffer (includes space for '\0').
+ * @fmt: The format for printf.
+ * @...: The args for printf.
+ */
+static void cat_printf(char **buf, size_t *size, const char *fmt, ...)
{
- unsigned short utime = qh->usecs;
+ va_list args;
int i;
- for (i = 0; i < 8; i++) {
- /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
- if (utime <= hsotg->frame_usecs[i]) {
- hsotg->frame_usecs[i] -= utime;
- qh->frame_usecs[i] += utime;
- return i;
- }
+ if (*size == 0)
+ return;
+
+ va_start(args, fmt);
+ i = vsnprintf(*buf, *size, fmt, args);
+ va_end(args);
+
+ if (i >= *size) {
+ (*buf)[*size - 1] = '\0';
+ *buf += *size;
+ *size = 0;
+ } else {
+ *buf += i;
+ *size -= i;
}
- return -ENOSPC;
}
/*
- * use this for FS apps that can span multiple uframes
+ * pmap_print() - Print the given periodic map
+ *
+ * Will attempt to print out the periodic schedule.
+ *
+ * @map: See pmap_schedule().
+ * @bits_per_period: See pmap_schedule().
+ * @periods_in_map: See pmap_schedule().
+ * @period_name: The name of 1 period, like "uFrame"
+ * @units: The name of the units, like "us".
+ * @print_fn: The function to call for printing.
+ * @print_data: Opaque data to pass to the print function.
+ */
+static void pmap_print(unsigned long *map, int bits_per_period,
+ int periods_in_map, const char *period_name,
+ const char *units,
+ void (*print_fn)(const char *str, void *data),
+ void *print_data)
+{
+ int period;
+
+ for (period = 0; period < periods_in_map; period++) {
+ char tmp[64];
+ char *buf = tmp;
+ size_t buf_size = sizeof(tmp);
+ int period_start = period * bits_per_period;
+ int period_end = period_start + bits_per_period;
+ int start = 0;
+ int count = 0;
+ bool printed = false;
+ int i;
+
+ for (i = period_start; i < period_end + 1; i++) {
+ /* Handle case when ith bit is set */
+ if (i < period_end &&
+ bitmap_find_next_zero_area(map, i + 1,
+ i, 1, 0) != i) {
+ if (count == 0)
+ start = i - period_start;
+ count++;
+ continue;
+ }
+
+ /* ith bit isn't set; don't care if count == 0 */
+ if (count == 0)
+ continue;
+
+ if (!printed)
+ cat_printf(&buf, &buf_size, "%s %d: ",
+ period_name, period);
+ else
+ cat_printf(&buf, &buf_size, ", ");
+ printed = true;
+
+ cat_printf(&buf, &buf_size, "%d %s -%3d %s", start,
+ units, start + count - 1, units);
+ count = 0;
+ }
+
+ if (printed)
+ print_fn(tmp, print_data);
+ }
+}
+
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT. Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it. If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
*/
-static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
{
- unsigned short utime = qh->usecs;
- unsigned short xtime;
- int t_left;
+ unsigned long *map;
+
+ /* Don't expect to be missing a TT and be doing low speed scheduling */
+ if (WARN_ON(!qh->dwc_tt))
+ return NULL;
+
+ /* Get the map and adjust if this is a multi_tt hub */
+ map = qh->dwc_tt->periodic_bitmaps;
+ if (qh->dwc_tt->usb_tt->multi)
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+
+ return map;
+}
+
+struct dwc2_qh_print_data {
+ struct dwc2_hsotg *hsotg;
+ struct dwc2_qh *qh;
+};
+
+/**
+ * dwc2_qh_print() - Helper function for dwc2_qh_schedule_print()
+ *
+ * @str: The string to print
+ * @data: A pointer to a struct dwc2_qh_print_data
+ */
+static void dwc2_qh_print(const char *str, void *data)
+{
+ struct dwc2_qh_print_data *print_data = data;
+
+ dwc2_sch_dbg(print_data->hsotg, "QH=%p ...%s\n", print_data->qh, str);
+}
+
+/**
+ * dwc2_qh_schedule_print() - Print the periodic schedule
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH to print.
+ */
+static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ struct dwc2_qh_print_data print_data = { hsotg, qh };
int i;
- int j;
- int k;
- for (i = 0; i < 8; i++) {
- if (hsotg->frame_usecs[i] <= 0)
+ /*
+ * The printing functions are quite slow and inefficient.
+ * If we don't have tracing turned on, don't run unless the special
+ * define is turned on.
+ */
+#ifndef DWC2_PRINT_SCHEDULE
+ return;
+#endif
+
+ if (qh->schedule_low_speed) {
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+
+ dwc2_sch_dbg(hsotg, "QH=%p LS/FS trans: %d=>%d us @ %d us",
+ qh, qh->device_us,
+ DWC2_ROUND_US_TO_SLICE(qh->device_us),
+ DWC2_US_PER_SLICE * qh->ls_start_schedule_slice);
+
+ if (map) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p Whole low/full speed map %p now:\n",
+ qh, map);
+ pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, "Frame ", "slices",
+ dwc2_qh_print, &print_data);
+ }
+ }
+
+ for (i = 0; i < qh->num_hs_transfers; i++) {
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + i;
+ int uframe = trans_time->start_schedule_us /
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+ int rel_us = trans_time->start_schedule_us %
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ dwc2_sch_dbg(hsotg,
+ "QH=%p HS trans #%d: %d us @ uFrame %d + %d us\n",
+ qh, i, trans_time->duration_us, uframe, rel_us);
+ }
+ if (qh->num_hs_transfers) {
+ dwc2_sch_dbg(hsotg, "QH=%p Whole high speed map now:\n", qh);
+ pmap_print(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
+ dwc2_qh_print, &print_data);
+ }
+
+}
+
+/**
+ * dwc2_ls_pmap_schedule() - Schedule a low speed QH
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ * @search_slice: We'll start trying to schedule at the passed slice.
+ * Remember that slices are the units of the low speed
+ * schedule (think 25us or so).
+ *
+ * Wraps pmap_schedule() with the right parameters for low speed scheduling.
+ *
+ * Normally we schedule low speed devices on the map associated with the TT.
+ *
+ * Returns: 0 for success or an error code.
+ */
+static int dwc2_ls_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ int search_slice)
+{
+ int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+ int slice;
+
+ if (map == NULL)
+ return -EINVAL;
+
+ /*
+ * Schedule on the proper low speed map with our low speed scheduling
+ * parameters. Note that we use the "device_interval" here since
+ * we want the low speed interval and the only way we'd be in this
+ * function is if the device is low speed.
+ *
+ * If we happen to be doing low speed and high speed scheduling for the
+ * same transaction (AKA we have a split) we always do low speed first.
+ * That means we can always pass "false" for only_one_period (that
+ * parameters is only useful when we're trying to get one schedule to
+ * match what we already planned in the other schedule).
+ */
+ slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, slices,
+ qh->device_interval, search_slice, false);
+
+ if (slice < 0)
+ return slice;
+
+ qh->ls_start_schedule_slice = slice;
+ return 0;
+}
+
+/**
+ * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_ls_pmap_schedule()
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_ls_pmap_unschedule(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ int slices = DIV_ROUND_UP(qh->device_us, DWC2_US_PER_SLICE);
+ unsigned long *map = dwc2_get_ls_map(hsotg, qh);
+
+ /* Schedule should have failed, so no worries about no error code */
+ if (map == NULL)
+ return;
+
+ pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
+ DWC2_LS_SCHEDULE_FRAMES, slices, qh->device_interval,
+ qh->ls_start_schedule_slice);
+}
+
+/**
+ * dwc2_hs_pmap_schedule - Schedule in the main high speed schedule
+ *
+ * This will schedule something on the main dwc2 schedule.
+ *
+ * We'll start looking in qh->hs_transfers[index].start_schedule_us. We'll
+ * update this with the result upon success. We also use the duration from
+ * the same structure.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ * @only_one_period: If true we will limit ourselves to just looking at
+ * one period (aka one 100us chunk). This is used if we have
+ * already scheduled something on the low speed schedule and
+ * need to find something that matches on the high speed one.
+ * @index: The index into qh->hs_transfers that we're working with.
+ *
+ * Returns: 0 for success or an error code. Upon success the
+ * dwc2_hs_transfer_time specified by "index" will be updated.
+ */
+static int dwc2_hs_pmap_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ bool only_one_period, int index)
+{
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
+ int us;
+
+ us = pmap_schedule(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
+ qh->host_interval, trans_time->start_schedule_us,
+ only_one_period);
+
+ if (us < 0)
+ return us;
+
+ trans_time->start_schedule_us = us;
+ return 0;
+}
+
+/**
+ * dwc2_ls_pmap_unschedule() - Undo work done by dwc2_hs_pmap_schedule()
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_hs_pmap_unschedule(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, int index)
+{
+ struct dwc2_hs_transfer_time *trans_time = qh->hs_transfers + index;
+
+ pmap_unschedule(hsotg->hs_periodic_bitmap,
+ DWC2_HS_PERIODIC_US_PER_UFRAME,
+ DWC2_HS_SCHEDULE_UFRAMES, trans_time->duration_us,
+ qh->host_interval, trans_time->start_schedule_us);
+}
+
+/**
+ * dwc2_uframe_schedule_split - Schedule a QH for a periodic split xfer.
+ *
+ * This is the most complicated thing in USB. We have to find matching time
+ * in both the global high speed schedule for the port and the low speed
+ * schedule for the TT associated with the given device.
+ *
+ * Being here means that the host must be running in high speed mode and the
+ * device is in low or full speed mode (and behind a hub).
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_split(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh)
+{
+ int bytecount = dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+ int ls_search_slice;
+ int err = 0;
+ int host_interval_in_sched;
+
+ /*
+ * The interval (how often to repeat) in the actual host schedule.
+ * See pmap_schedule() for gcd() explanation.
+ */
+ host_interval_in_sched = gcd(qh->host_interval,
+ DWC2_HS_SCHEDULE_UFRAMES);
+
+ /*
+ * We always try to find space in the low speed schedule first, then
+ * try to find high speed time that matches. If we don't, we'll bump
+ * up the place we start searching in the low speed schedule and try
+ * again. To start we'll look right at the beginning of the low speed
+ * schedule.
+ *
+ * Note that this will tend to front-load the high speed schedule.
+ * We may eventually want to try to avoid this by either considering
+ * both schedules together or doing some sort of round robin.
+ */
+ ls_search_slice = 0;
+
+ while (ls_search_slice < DWC2_LS_SCHEDULE_SLICES) {
+ int start_s_uframe;
+ int ssplit_s_uframe;
+ int second_s_uframe;
+ int rel_uframe;
+ int first_count;
+ int middle_count;
+ int end_count;
+ int first_data_bytes;
+ int other_data_bytes;
+ int i;
+
+ if (qh->schedule_low_speed) {
+ err = dwc2_ls_pmap_schedule(hsotg, qh, ls_search_slice);
+
+ /*
+ * If we got an error here there's no other magic we
+ * can do, so bail. All the looping above is only
+ * helpful to redo things if we got a low speed slot
+ * and then couldn't find a matching high speed slot.
+ */
+ if (err)
+ return err;
+ } else {
+ /* Must be missing the tt structure? Why? */
+ WARN_ON_ONCE(1);
+ }
+
+ /*
+ * This will give us a number 0 - 7 if
+ * DWC2_LS_SCHEDULE_FRAMES == 1, or 0 - 15 if == 2, or ...
+ */
+ start_s_uframe = qh->ls_start_schedule_slice /
+ DWC2_SLICES_PER_UFRAME;
+
+ /* Get a number that's always 0 - 7 */
+ rel_uframe = (start_s_uframe % 8);
+
+ /*
+ * If we were going to start in uframe 7 then we would need to
+ * issue a start split in uframe 6, which spec says is not OK.
+ * Move on to the next full frame (assuming there is one).
+ *
+ * See 11.18.4 Host Split Transaction Scheduling Requirements
+ * bullet 1.
+ */
+ if (rel_uframe == 7) {
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+ ls_search_slice =
+ (qh->ls_start_schedule_slice /
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME + 1) *
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME;
continue;
+ }
/*
- * we need n consecutive slots so use j as a start slot
- * j plus j+1 must be enough time (for now)
+ * For ISOC in:
+ * - start split (frame -1)
+ * - complete split w/ data (frame +1)
+ * - complete split w/ data (frame +2)
+ * - ...
+ * - complete split w/ data (frame +num_data_packets)
+ * - complete split w/ data (frame +num_data_packets+1)
+ * - complete split w/ data (frame +num_data_packets+2, max 8)
+ * ...though if frame was "0" then max is 7...
+ *
+ * For ISOC out we might need to do:
+ * - start split w/ data (frame -1)
+ * - start split w/ data (frame +0)
+ * - ...
+ * - start split w/ data (frame +num_data_packets-2)
+ *
+ * For INTERRUPT in we might need to do:
+ * - start split (frame -1)
+ * - complete split w/ data (frame +1)
+ * - complete split w/ data (frame +2)
+ * - complete split w/ data (frame +3, max 8)
+ *
+ * For INTERRUPT out we might need to do:
+ * - start split w/ data (frame -1)
+ * - complete split (frame +1)
+ * - complete split (frame +2)
+ * - complete split (frame +3, max 8)
+ *
+ * Start adjusting!
*/
- xtime = hsotg->frame_usecs[i];
- for (j = i + 1; j < 8; j++) {
- /*
- * if we add this frame remaining time to xtime we may
- * be OK, if not we need to test j for a complete frame
- */
- if (xtime + hsotg->frame_usecs[j] < utime) {
- if (hsotg->frame_usecs[j] <
- max_uframe_usecs[j])
- continue;
+ ssplit_s_uframe = (start_s_uframe +
+ host_interval_in_sched - 1) %
+ host_interval_in_sched;
+ if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)
+ second_s_uframe = start_s_uframe;
+ else
+ second_s_uframe = start_s_uframe + 1;
+
+ /* First data transfer might not be all 188 bytes. */
+ first_data_bytes = 188 -
+ DIV_ROUND_UP(188 * (qh->ls_start_schedule_slice %
+ DWC2_SLICES_PER_UFRAME),
+ DWC2_SLICES_PER_UFRAME);
+ if (first_data_bytes > bytecount)
+ first_data_bytes = bytecount;
+ other_data_bytes = bytecount - first_data_bytes;
+
+ /*
+ * For now, skip OUT xfers where first xfer is partial
+ *
+ * Main dwc2 code assumes:
+ * - INT transfers never get split in two.
+ * - ISOC transfers can always transfer 188 bytes the first
+ * time.
+ *
+ * Until that code is fixed, try again if the first transfer
+ * couldn't transfer everything.
+ *
+ * This code can be removed if/when the rest of dwc2 handles
+ * the above cases. Until it's fixed we just won't be able
+ * to schedule quite as tightly.
+ */
+ if (!qh->ep_is_in &&
+ (first_data_bytes != min_t(int, 188, bytecount))) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p avoiding broken 1st xfer (%d, %d)\n",
+ qh, first_data_bytes, bytecount);
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+ ls_search_slice = (start_s_uframe + 1) *
+ DWC2_SLICES_PER_UFRAME;
+ continue;
+ }
+
+ /* Start by assuming transfers for the bytes */
+ qh->num_hs_transfers = 1 + DIV_ROUND_UP(other_data_bytes, 188);
+
+ /*
+ * Everything except ISOC OUT has extra transfers. Rules are
+ * complicated. See 11.18.4 Host Split Transaction Scheduling
+ * Requirements bullet 3.
+ */
+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ if (rel_uframe == 6)
+ qh->num_hs_transfers += 2;
+ else
+ qh->num_hs_transfers += 3;
+
+ if (qh->ep_is_in) {
+ /*
+ * First is start split, middle/end is data.
+ * Allocate full data bytes for all data.
+ */
+ first_count = 4;
+ middle_count = bytecount;
+ end_count = bytecount;
+ } else {
+ /*
+ * First is data, middle/end is complete.
+ * First transfer and second can have data.
+ * Rest should just have complete split.
+ */
+ first_count = first_data_bytes;
+ middle_count = max_t(int, 4, other_data_bytes);
+ end_count = 4;
}
- if (xtime >= utime) {
- t_left = utime;
- for (k = i; k < 8; k++) {
- t_left -= hsotg->frame_usecs[k];
- if (t_left <= 0) {
- qh->frame_usecs[k] +=
- hsotg->frame_usecs[k]
- + t_left;
- hsotg->frame_usecs[k] = -t_left;
- return i;
- } else {
- qh->frame_usecs[k] +=
- hsotg->frame_usecs[k];
- hsotg->frame_usecs[k] = 0;
- }
- }
+ } else {
+ if (qh->ep_is_in) {
+ int last;
+
+ /* Account for the start split */
+ qh->num_hs_transfers++;
+
+ /* Calculate "L" value from spec */
+ last = rel_uframe + qh->num_hs_transfers + 1;
+
+ /* Start with basic case */
+ if (last <= 6)
+ qh->num_hs_transfers += 2;
+ else
+ qh->num_hs_transfers += 1;
+
+ /* Adjust downwards */
+ if (last >= 6 && rel_uframe == 0)
+ qh->num_hs_transfers--;
+
+ /* 1st = start; rest can contain data */
+ first_count = 4;
+ middle_count = min_t(int, 188, bytecount);
+ end_count = middle_count;
+ } else {
+ /* All contain data, last might be smaller */
+ first_count = first_data_bytes;
+ middle_count = min_t(int, 188,
+ other_data_bytes);
+ end_count = other_data_bytes % 188;
}
- /* add the frame time to x time */
- xtime += hsotg->frame_usecs[j];
- /* we must have a fully available next frame or break */
- if (xtime < utime &&
- hsotg->frame_usecs[j] == max_uframe_usecs[j])
- continue;
}
+
+ /* Assign durations per uFrame */
+ qh->hs_transfers[0].duration_us = HS_USECS_ISO(first_count);
+ for (i = 1; i < qh->num_hs_transfers - 1; i++)
+ qh->hs_transfers[i].duration_us =
+ HS_USECS_ISO(middle_count);
+ if (qh->num_hs_transfers > 1)
+ qh->hs_transfers[qh->num_hs_transfers - 1].duration_us =
+ HS_USECS_ISO(end_count);
+
+ /*
+ * Assign start us. The call below to dwc2_hs_pmap_schedule()
+ * will start with these numbers but may adjust within the same
+ * microframe.
+ */
+ qh->hs_transfers[0].start_schedule_us =
+ ssplit_s_uframe * DWC2_HS_PERIODIC_US_PER_UFRAME;
+ for (i = 1; i < qh->num_hs_transfers; i++)
+ qh->hs_transfers[i].start_schedule_us =
+ ((second_s_uframe + i - 1) %
+ DWC2_HS_SCHEDULE_UFRAMES) *
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ /* Try to schedule with filled in hs_transfers above */
+ for (i = 0; i < qh->num_hs_transfers; i++) {
+ err = dwc2_hs_pmap_schedule(hsotg, qh, true, i);
+ if (err)
+ break;
+ }
+
+ /* If we scheduled all w/out breaking out then we're all good */
+ if (i == qh->num_hs_transfers)
+ break;
+
+ for (; i >= 0; i--)
+ dwc2_hs_pmap_unschedule(hsotg, qh, i);
+
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+
+ /* Try again starting in the next microframe */
+ ls_search_slice = (start_s_uframe + 1) * DWC2_SLICES_PER_UFRAME;
}
- return -ENOSPC;
+
+ if (ls_search_slice >= DWC2_LS_SCHEDULE_SLICES)
+ return -ENOSPC;
+
+ return 0;
+}
+
+/**
+ * dwc2_uframe_schedule_hs - Schedule a QH for a periodic high speed xfer.
+ *
+ * Basically this just wraps dwc2_hs_pmap_schedule() to provide a clean
+ * interface.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_hs(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* In non-split host and device time are the same */
+ WARN_ON(qh->host_us != qh->device_us);
+ WARN_ON(qh->host_interval != qh->device_interval);
+ WARN_ON(qh->num_hs_transfers != 1);
+
+ /* We'll have one transfer; init start to 0 before calling scheduler */
+ qh->hs_transfers[0].start_schedule_us = 0;
+ qh->hs_transfers[0].duration_us = qh->host_us;
+
+ return dwc2_hs_pmap_schedule(hsotg, qh, false, 0);
+}
+
+/**
+ * dwc2_uframe_schedule_ls - Schedule a QH for a periodic low/full speed xfer.
+ *
+ * Basically this just wraps dwc2_ls_pmap_schedule() to provide a clean
+ * interface.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule_ls(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* In non-split host and device time are the same */
+ WARN_ON(qh->host_us != qh->device_us);
+ WARN_ON(qh->host_interval != qh->device_interval);
+ WARN_ON(!qh->schedule_low_speed);
+
+ /* Run on the main low speed schedule (no split = no hub = no TT) */
+ return dwc2_ls_pmap_schedule(hsotg, qh, 0);
}
-static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+/**
+ * dwc2_uframe_schedule - Schedule a QH for a periodic xfer.
+ *
+ * Calls one of the 3 sub-function depending on what type of transfer this QH
+ * is for. Also adds some printing.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static int dwc2_uframe_schedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int ret;
- if (qh->dev_speed == USB_SPEED_HIGH) {
- /* if this is a hs transaction we need a full frame */
- ret = dwc2_find_single_uframe(hsotg, qh);
+ if (qh->dev_speed == USB_SPEED_HIGH)
+ ret = dwc2_uframe_schedule_hs(hsotg, qh);
+ else if (!qh->do_split)
+ ret = dwc2_uframe_schedule_ls(hsotg, qh);
+ else
+ ret = dwc2_uframe_schedule_split(hsotg, qh);
+
+ if (ret)
+ dwc2_sch_dbg(hsotg, "QH=%p Failed to schedule %d\n", qh, ret);
+ else
+ dwc2_qh_schedule_print(hsotg, qh);
+
+ return ret;
+}
+
+/**
+ * dwc2_uframe_unschedule - Undoes dwc2_uframe_schedule().
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_uframe_unschedule(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int i;
+
+ for (i = 0; i < qh->num_hs_transfers; i++)
+ dwc2_hs_pmap_unschedule(hsotg, qh, i);
+
+ if (qh->schedule_low_speed)
+ dwc2_ls_pmap_unschedule(hsotg, qh);
+
+ dwc2_sch_dbg(hsotg, "QH=%p Unscheduled\n", qh);
+}
+
+/**
+ * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled
+ *
+ * Takes a qh that has already been scheduled (which means we know we have the
+ * bandwdith reserved for us) and set the next_active_frame and the
+ * start_active_frame.
+ *
+ * This is expected to be called on qh's that weren't previously actively
+ * running. It just picks the next frame that we can fit into without any
+ * thought about the past.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for a periodic endpoint
+ *
+ */
+static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ u16 frame_number;
+ u16 earliest_frame;
+ u16 next_active_frame;
+ u16 relative_frame;
+ u16 interval;
+
+ /*
+ * Use the real frame number rather than the cached value as of the
+ * last SOF to give us a little extra slop.
+ */
+ frame_number = dwc2_hcd_get_frame_number(hsotg);
+
+ /*
+ * We wouldn't want to start any earlier than the next frame just in
+ * case the frame number ticks as we're doing this calculation.
+ *
+ * NOTE: if we could quantify how long till we actually get scheduled
+ * we might be able to avoid the "+ 1" by looking at the upper part of
+ * HFNUM (the FRREM field). For now we'll just use the + 1 though.
+ */
+ earliest_frame = dwc2_frame_num_inc(frame_number, 1);
+ next_active_frame = earliest_frame;
+
+ /* Get the "no microframe schduler" out of the way... */
+ if (hsotg->core_params->uframe_sched <= 0) {
+ if (qh->do_split)
+ /* Splits are active at microframe 0 minus 1 */
+ next_active_frame |= 0x7;
+ goto exit;
+ }
+
+ if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
+ /*
+ * We're either at high speed or we're doing a split (which
+ * means we're talking high speed to a hub). In any case
+ * the first frame should be based on when the first scheduled
+ * event is.
+ */
+ WARN_ON(qh->num_hs_transfers < 1);
+
+ relative_frame = qh->hs_transfers[0].start_schedule_us /
+ DWC2_HS_PERIODIC_US_PER_UFRAME;
+
+ /* Adjust interval as per high speed schedule */
+ interval = gcd(qh->host_interval, DWC2_HS_SCHEDULE_UFRAMES);
+
} else {
/*
- * if this is a fs transaction we may need a sequence
- * of frames
+ * Low or full speed directly on dwc2. Just about the same
+ * as high speed but on a different schedule and with slightly
+ * different adjustments. Note that this works because when
+ * the host and device are both low speed then frames in the
+ * controller tick at low speed.
*/
- ret = dwc2_find_multi_uframe(hsotg, qh);
+ relative_frame = qh->ls_start_schedule_slice /
+ DWC2_LS_PERIODIC_SLICES_PER_FRAME;
+ interval = gcd(qh->host_interval, DWC2_LS_SCHEDULE_FRAMES);
}
- return ret;
+
+ /* Scheduler messed up if frame is past interval */
+ WARN_ON(relative_frame >= interval);
+
+ /*
+ * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've
+ * done the gcd(), so it's safe to move to the beginning of the current
+ * interval like this.
+ *
+ * After this we might be before earliest_frame, but don't worry,
+ * we'll fix it...
+ */
+ next_active_frame = (next_active_frame / interval) * interval;
+
+ /*
+ * Actually choose to start at the frame number we've been
+ * scheduled for.
+ */
+ next_active_frame = dwc2_frame_num_inc(next_active_frame,
+ relative_frame);
+
+ /*
+ * We actually need 1 frame before since the next_active_frame is
+ * the frame number we'll be put on the ready list and we won't be on
+ * the bus until 1 frame later.
+ */
+ next_active_frame = dwc2_frame_num_dec(next_active_frame, 1);
+
+ /*
+ * By now we might actually be before the earliest_frame. Let's move
+ * up intervals until we're not.
+ */
+ while (dwc2_frame_num_gt(earliest_frame, next_active_frame))
+ next_active_frame = dwc2_frame_num_inc(next_active_frame,
+ interval);
+
+exit:
+ qh->next_active_frame = next_active_frame;
+ qh->start_active_frame = next_active_frame;
+
+ dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n",
+ qh, frame_number, qh->next_active_frame);
+}
+
+/**
+ * dwc2_do_reserve() - Make a periodic reservation
+ *
+ * Try to allocate space in the periodic schedule. Depending on parameters
+ * this might use the microframe scheduler or the dumb scheduler.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer.
+ *
+ * Returns: 0 upon success; error upon failure.
+ */
+static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int status;
+
+ if (hsotg->core_params->uframe_sched > 0) {
+ status = dwc2_uframe_schedule(hsotg, qh);
+ } else {
+ status = dwc2_periodic_channel_available(hsotg);
+ if (status) {
+ dev_info(hsotg->dev,
+ "%s: No host channel available for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ status = dwc2_check_periodic_bandwidth(hsotg, qh);
+ }
+
+ if (status) {
+ dev_dbg(hsotg->dev,
+ "%s: Insufficient periodic bandwidth for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ if (hsotg->core_params->uframe_sched <= 0)
+ /* Reserve periodic channel */
+ hsotg->periodic_channels++;
+
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs += qh->host_us;
+
+ dwc2_pick_first_frame(hsotg, qh);
+
+ return 0;
+}
+
+/**
+ * dwc2_do_unreserve() - Actually release the periodic reservation
+ *
+ * This function actually releases the periodic bandwidth that was reserved
+ * by the given qh.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ assert_spin_locked(&hsotg->lock);
+
+ WARN_ON(!qh->unreserve_pending);
+
+ /* No more unreserve pending--we're doing it */
+ qh->unreserve_pending = false;
+
+ if (WARN_ON(!list_empty(&qh->qh_list_entry)))
+ list_del_init(&qh->qh_list_entry);
+
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs -= qh->host_us;
+
+ if (hsotg->core_params->uframe_sched > 0) {
+ dwc2_uframe_unschedule(hsotg, qh);
+ } else {
+ /* Release periodic channel reservation */
+ hsotg->periodic_channels--;
+ }
+}
+
+/**
+ * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
+ *
+ * According to the kernel doc for usb_submit_urb() (specifically the part about
+ * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
+ * long as a device driver keeps submitting. Since we're using HCD_BH to give
+ * back the URB we need to give the driver a little bit of time before we
+ * release the reservation. This worker is called after the appropriate
+ * delay.
+ *
+ * @work: Pointer to a qh unreserve_work.
+ */
+static void dwc2_unreserve_timer_fn(unsigned long data)
+{
+ struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ /*
+ * Wait for the lock, or for us to be scheduled again. We
+ * could be scheduled again if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ * - The timer has been kicked again.
+ * In that case cancel and wait for the next call.
+ */
+ while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
+ if (timer_pending(&qh->unreserve_timer))
+ return;
+ }
+
+ /*
+ * Might be no more unreserve pending if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ *
+ * We can't put this in the loop above because unreserve_pending needs
+ * to be accessed under lock, so we can only check it once we got the
+ * lock.
+ */
+ if (qh->unreserve_pending)
+ dwc2_do_unreserve(hsotg, qh);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
}
/**
@@ -474,42 +1353,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
- if (hsotg->core_params->uframe_sched > 0) {
- int frame = -1;
-
- status = dwc2_find_uframe(hsotg, qh);
- if (status == 0)
- frame = 7;
- else if (status > 0)
- frame = status - 1;
-
- /* Set the new frame up */
- if (frame >= 0) {
- qh->sched_frame &= ~0x7;
- qh->sched_frame |= (frame & 7);
- }
-
- if (status > 0)
- status = 0;
- } else {
- status = dwc2_periodic_channel_available(hsotg);
- if (status) {
- dev_info(hsotg->dev,
- "%s: No host channel available for periodic transfer\n",
- __func__);
- return status;
- }
-
- status = dwc2_check_periodic_bandwidth(hsotg, qh);
- }
-
- if (status) {
- dev_dbg(hsotg->dev,
- "%s: Insufficient periodic bandwidth for periodic transfer\n",
- __func__);
- return status;
- }
-
status = dwc2_check_max_xfer_size(hsotg, qh);
if (status) {
dev_dbg(hsotg->dev,
@@ -518,6 +1361,35 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
return status;
}
+ /* Cancel pending unreserve; if canceled OK, unreserve was pending */
+ if (del_timer(&qh->unreserve_timer))
+ WARN_ON(!qh->unreserve_pending);
+
+ /*
+ * Only need to reserve if there's not an unreserve pending, since if an
+ * unreserve is pending then by definition our old reservation is still
+ * valid. Unreserve might still be pending even if we didn't cancel if
+ * dwc2_unreserve_timer_fn() already started. Code in the timer handles
+ * that case.
+ */
+ if (!qh->unreserve_pending) {
+ status = dwc2_do_reserve(hsotg, qh);
+ if (status)
+ return status;
+ } else {
+ /*
+ * It might have been a while, so make sure that frame_number
+ * is still good. Note: we could also try to use the similar
+ * dwc2_next_periodic_start() but that schedules much more
+ * tightly and we might need to hurry and queue things up.
+ */
+ if (dwc2_frame_num_le(qh->next_active_frame,
+ hsotg->frame_number))
+ dwc2_pick_first_frame(hsotg, qh);
+ }
+
+ qh->unreserve_pending = 0;
+
if (hsotg->core_params->dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
@@ -526,14 +1398,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
list_add_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_inactive);
- if (hsotg->core_params->uframe_sched <= 0)
- /* Reserve periodic channel */
- hsotg->periodic_channels++;
-
- /* Update claimed usecs per (micro)frame */
- hsotg->periodic_usecs += qh->usecs;
-
- return status;
+ return 0;
}
/**
@@ -546,25 +1411,231 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
- int i;
+ bool did_modify;
+
+ assert_spin_locked(&hsotg->lock);
+
+ /*
+ * Schedule the unreserve to happen in a little bit. Cases here:
+ * - Unreserve worker might be sitting there waiting to grab the lock.
+ * In this case it will notice it's been schedule again and will
+ * quit.
+ * - Unreserve worker might not be scheduled.
+ *
+ * We should never already be scheduled since dwc2_schedule_periodic()
+ * should have canceled the scheduled unreserve timer (hence the
+ * warning on did_modify).
+ *
+ * We add + 1 to the timer to guarantee that at least 1 jiffy has
+ * passed (otherwise if the jiffy counter might tick right after we
+ * read it and we'll get no delay).
+ */
+ did_modify = mod_timer(&qh->unreserve_timer,
+ jiffies + DWC2_UNRESERVE_DELAY + 1);
+ WARN_ON(did_modify);
+ qh->unreserve_pending = 1;
list_del_init(&qh->qh_list_entry);
+}
- /* Update claimed usecs per (micro)frame */
- hsotg->periodic_usecs -= qh->usecs;
+/**
+ * dwc2_qh_init() - Initializes a QH structure
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: The QH to init
+ * @urb: Holds the information about the device/endpoint needed to initialize
+ * the QH
+ * @mem_flags: Flags for allocating memory.
+ */
+static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
+ struct dwc2_hcd_urb *urb, gfp_t mem_flags)
+{
+ int dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
+ u8 ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+ bool ep_is_in = !!dwc2_hcd_is_pipe_in(&urb->pipe_info);
+ bool ep_is_isoc = (ep_type == USB_ENDPOINT_XFER_ISOC);
+ bool ep_is_int = (ep_type == USB_ENDPOINT_XFER_INT);
+ u32 hprt = dwc2_readl(hsotg->regs + HPRT0);
+ u32 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
+ bool do_split = (prtspd == HPRT0_SPD_HIGH_SPEED &&
+ dev_speed != USB_SPEED_HIGH);
+ int maxp = dwc2_hcd_get_mps(&urb->pipe_info);
+ int bytecount = dwc2_hb_mult(maxp) * dwc2_max_packet(maxp);
+ char *speed, *type;
- if (hsotg->core_params->uframe_sched > 0) {
- for (i = 0; i < 8; i++) {
- hsotg->frame_usecs[i] += qh->frame_usecs[i];
- qh->frame_usecs[i] = 0;
+ /* Initialize QH */
+ qh->hsotg = hsotg;
+ setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
+ (unsigned long)qh);
+ qh->ep_type = ep_type;
+ qh->ep_is_in = ep_is_in;
+
+ qh->data_toggle = DWC2_HC_PID_DATA0;
+ qh->maxp = maxp;
+ INIT_LIST_HEAD(&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->qh_list_entry);
+
+ qh->do_split = do_split;
+ qh->dev_speed = dev_speed;
+
+ if (ep_is_int || ep_is_isoc) {
+ /* Compute scheduling parameters once and save them */
+ int host_speed = do_split ? USB_SPEED_HIGH : dev_speed;
+ struct dwc2_tt *dwc_tt = dwc2_host_get_tt_info(hsotg, urb->priv,
+ mem_flags,
+ &qh->ttport);
+ int device_ns;
+
+ qh->dwc_tt = dwc_tt;
+
+ qh->host_us = NS_TO_US(usb_calc_bus_time(host_speed, ep_is_in,
+ ep_is_isoc, bytecount));
+ device_ns = usb_calc_bus_time(dev_speed, ep_is_in,
+ ep_is_isoc, bytecount);
+
+ if (do_split && dwc_tt)
+ device_ns += dwc_tt->usb_tt->think_time;
+ qh->device_us = NS_TO_US(device_ns);
+
+
+ qh->device_interval = urb->interval;
+ qh->host_interval = urb->interval * (do_split ? 8 : 1);
+
+ /*
+ * Schedule low speed if we're running the host in low or
+ * full speed OR if we've got a "TT" to deal with to access this
+ * device.
+ */
+ qh->schedule_low_speed = prtspd != HPRT0_SPD_HIGH_SPEED ||
+ dwc_tt;
+
+ if (do_split) {
+ /* We won't know num transfers until we schedule */
+ qh->num_hs_transfers = -1;
+ } else if (dev_speed == USB_SPEED_HIGH) {
+ qh->num_hs_transfers = 1;
+ } else {
+ qh->num_hs_transfers = 0;
}
- } else {
- /* Release periodic channel reservation */
- hsotg->periodic_channels--;
+
+ /* We'll schedule later when we have something to do */
+ }
+
+ switch (dev_speed) {
+ case USB_SPEED_LOW:
+ speed = "low";
+ break;
+ case USB_SPEED_FULL:
+ speed = "full";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "high";
+ break;
+ default:
+ speed = "?";
+ break;
+ }
+
+ switch (qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ type = "isochronous";
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ type = "interrupt";
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ type = "control";
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ type = "bulk";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+
+ dwc2_sch_dbg(hsotg, "QH=%p Init %s, %s speed, %d bytes:\n", qh, type,
+ speed, bytecount);
+ dwc2_sch_dbg(hsotg, "QH=%p ...addr=%d, ep=%d, %s\n", qh,
+ dwc2_hcd_get_dev_addr(&urb->pipe_info),
+ dwc2_hcd_get_ep_num(&urb->pipe_info),
+ ep_is_in ? "IN" : "OUT");
+ if (ep_is_int || ep_is_isoc) {
+ dwc2_sch_dbg(hsotg,
+ "QH=%p ...duration: host=%d us, device=%d us\n",
+ qh, qh->host_us, qh->device_us);
+ dwc2_sch_dbg(hsotg, "QH=%p ...interval: host=%d, device=%d\n",
+ qh, qh->host_interval, qh->device_interval);
+ if (qh->schedule_low_speed)
+ dwc2_sch_dbg(hsotg, "QH=%p ...low speed schedule=%p\n",
+ qh, dwc2_get_ls_map(hsotg, qh));
}
}
/**
+ * dwc2_hcd_qh_create() - Allocates and initializes a QH
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @urb: Holds the information about the device/endpoint needed
+ * to initialize the QH
+ * @atomic_alloc: Flag to do atomic allocation if needed
+ *
+ * Return: Pointer to the newly allocated QH, or NULL on error
+ */
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+ struct dwc2_hcd_urb *urb,
+ gfp_t mem_flags)
+{
+ struct dwc2_qh *qh;
+
+ if (!urb->priv)
+ return NULL;
+
+ /* Allocate memory */
+ qh = kzalloc(sizeof(*qh), mem_flags);
+ if (!qh)
+ return NULL;
+
+ dwc2_qh_init(hsotg, qh, urb, mem_flags);
+
+ if (hsotg->core_params->dma_desc_enable > 0 &&
+ dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
+ dwc2_hcd_qh_free(hsotg, qh);
+ return NULL;
+ }
+
+ return qh;
+}
+
+/**
+ * dwc2_hcd_qh_free() - Frees the QH
+ *
+ * @hsotg: HCD instance
+ * @qh: The QH to free
+ *
+ * QH should already be removed from the list. QTD list should already be empty
+ * if called from URB Dequeue.
+ *
+ * Must NOT be called with interrupt disabled or spinlock held
+ */
+void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ /* Make sure any unreserve work is finished. */
+ if (del_timer_sync(&qh->unreserve_timer)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_do_unreserve(hsotg, qh);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+ dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
+
+ if (qh->desc_list)
+ dwc2_hcd_qh_free_ddma(hsotg, qh);
+ kfree(qh);
+}
+
+/**
* dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
* schedule if it is not already in the schedule. If the QH is already in
* the schedule, no action is taken.
@@ -586,16 +1657,12 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* QH already in a schedule */
return 0;
- if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) &&
- !hsotg->frame_number) {
- dev_dbg(hsotg->dev,
- "reset frame number counter\n");
- qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
- SCHEDULE_SLOP);
- }
-
/* Add the new QH to the appropriate schedule */
if (dwc2_qh_is_non_per(qh)) {
+ /* Schedule right away */
+ qh->start_active_frame = hsotg->frame_number;
+ qh->next_active_frame = qh->start_active_frame;
+
/* Always start in inactive schedule */
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
@@ -649,39 +1716,164 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
}
}
-/*
- * Schedule the next continuing periodic split transfer
+/**
+ * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split.
+ *
+ * This is called for setting next_active_frame for periodic splits for all but
+ * the first packet of the split. Confusing? I thought so...
+ *
+ * Periodic splits are single low/full speed transfers that we end up splitting
+ * up into several high speed transfers. They always fit into one full (1 ms)
+ * frame but might be split over several microframes (125 us each). We to put
+ * each of the parts on a very specific high speed frame.
+ *
+ * This function figures out where the next active uFrame needs to be.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: QH for the periodic transfer.
+ * @frame_number: The current frame number.
+ *
+ * Return: number missed by (or 0 if we didn't miss).
*/
-static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
- struct dwc2_qh *qh, u16 frame_number,
- int sched_next_periodic_split)
+static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 frame_number)
{
+ u16 old_frame = qh->next_active_frame;
+ u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
+ int missed = 0;
u16 incr;
- if (sched_next_periodic_split) {
- qh->sched_frame = frame_number;
- incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
- if (dwc2_frame_num_le(frame_number, incr)) {
- /*
- * Allow one frame to elapse after start split
- * microframe before scheduling complete split, but
- * DON'T if we are doing the next start split in the
- * same frame for an ISOC out
- */
- if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
- qh->ep_is_in != 0) {
- qh->sched_frame =
- dwc2_frame_num_inc(qh->sched_frame, 1);
- }
- }
- } else {
- qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
- qh->interval);
- if (dwc2_frame_num_le(qh->sched_frame, frame_number))
- qh->sched_frame = frame_number;
- qh->sched_frame |= 0x7;
- qh->start_split_frame = qh->sched_frame;
+ /*
+ * See dwc2_uframe_schedule_split() for split scheduling.
+ *
+ * Basically: increment 1 normally, but 2 right after the start split
+ * (except for ISOC out).
+ */
+ if (old_frame == qh->start_active_frame &&
+ !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in))
+ incr = 2;
+ else
+ incr = 1;
+
+ qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr);
+
+ /*
+ * Note that it's OK for frame_number to be 1 frame past
+ * next_active_frame. Remember that next_active_frame is supposed to
+ * be 1 frame _before_ when we want to be scheduled. If we're 1 frame
+ * past it just means schedule ASAP.
+ *
+ * It's _not_ OK, however, if we're more than one frame past.
+ */
+ if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) {
+ /*
+ * OOPS, we missed. That's actually pretty bad since
+ * the hub will be unhappy; try ASAP I guess.
+ */
+ missed = dwc2_frame_num_dec(prev_frame_number,
+ qh->next_active_frame);
+ qh->next_active_frame = frame_number;
}
+
+ return missed;
+}
+
+/**
+ * dwc2_next_periodic_start() - Set next_active_frame for next transfer start
+ *
+ * This is called for setting next_active_frame for a periodic transfer for
+ * all cases other than midway through a periodic split. This will also update
+ * start_active_frame.
+ *
+ * Since we _always_ keep start_active_frame as the start of the previous
+ * transfer this is normally pretty easy: we just add our interval to
+ * start_active_frame and we've got our answer.
+ *
+ * The tricks come into play if we miss. In that case we'll look for the next
+ * slot we can fit into.
+ *
+ * @hsotg: The HCD state structure
+ * @qh: QH for the periodic transfer.
+ * @frame_number: The current frame number.
+ *
+ * Return: number missed by (or 0 if we didn't miss).
+ */
+static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh, u16 frame_number)
+{
+ int missed = 0;
+ u16 interval = qh->host_interval;
+ u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1);
+
+ qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame,
+ interval);
+
+ /*
+ * The dwc2_frame_num_gt() function used below won't work terribly well
+ * with if we just incremented by a really large intervals since the
+ * frame counter only goes to 0x3fff. It's terribly unlikely that we
+ * will have missed in this case anyway. Just go to exit. If we want
+ * to try to do better we'll need to keep track of a bigger counter
+ * somewhere in the driver and handle overflows.
+ */
+ if (interval >= 0x1000)
+ goto exit;
+
+ /*
+ * Test for misses, which is when it's too late to schedule.
+ *
+ * A few things to note:
+ * - We compare against prev_frame_number since start_active_frame
+ * and next_active_frame are always 1 frame before we want things
+ * to be active and we assume we can still get scheduled in the
+ * current frame number.
+ * - It's possible for start_active_frame (now incremented) to be
+ * next_active_frame if we got an EO MISS (even_odd miss) which
+ * basically means that we detected there wasn't enough time for
+ * the last packet and dwc2_hc_set_even_odd_frame() rescheduled us
+ * at the last second. We want to make sure we don't schedule
+ * another transfer for the same frame. My test webcam doesn't seem
+ * terribly upset by missing a transfer but really doesn't like when
+ * we do two transfers in the same frame.
+ * - Some misses are expected. Specifically, in order to work
+ * perfectly dwc2 really needs quite spectacular interrupt latency
+ * requirements. It needs to be able to handle its interrupts
+ * completely within 125 us of them being asserted. That not only
+ * means that the dwc2 interrupt handler needs to be fast but it
+ * means that nothing else in the system has to block dwc2 for a long
+ * time. We can help with the dwc2 parts of this, but it's hard to
+ * guarantee that a system will have interrupt latency < 125 us, so
+ * we have to be robust to some misses.
+ */
+ if (qh->start_active_frame == qh->next_active_frame ||
+ dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) {
+ u16 ideal_start = qh->start_active_frame;
+ int periods_in_map;
+
+ /*
+ * Adjust interval as per gcd with map size.
+ * See pmap_schedule() for more details here.
+ */
+ if (qh->do_split || qh->dev_speed == USB_SPEED_HIGH)
+ periods_in_map = DWC2_HS_SCHEDULE_UFRAMES;
+ else
+ periods_in_map = DWC2_LS_SCHEDULE_FRAMES;
+ interval = gcd(interval, periods_in_map);
+
+ do {
+ qh->start_active_frame = dwc2_frame_num_inc(
+ qh->start_active_frame, interval);
+ } while (dwc2_frame_num_gt(prev_frame_number,
+ qh->start_active_frame));
+
+ missed = dwc2_frame_num_dec(qh->start_active_frame,
+ ideal_start);
+ }
+
+exit:
+ qh->next_active_frame = qh->start_active_frame;
+
+ return missed;
}
/*
@@ -700,7 +1892,9 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int sched_next_periodic_split)
{
+ u16 old_frame = qh->next_active_frame;
u16 frame_number;
+ int missed;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -713,33 +1907,44 @@ void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
return;
}
+ /*
+ * Use the real frame number rather than the cached value as of the
+ * last SOF just to get us a little closer to reality. Note that
+ * means we don't actually know if we've already handled the SOF
+ * interrupt for this frame.
+ */
frame_number = dwc2_hcd_get_frame_number(hsotg);
- if (qh->do_split) {
- dwc2_sched_periodic_split(hsotg, qh, frame_number,
- sched_next_periodic_split);
- } else {
- qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
- qh->interval);
- if (dwc2_frame_num_le(qh->sched_frame, frame_number))
- qh->sched_frame = frame_number;
- }
+ if (sched_next_periodic_split)
+ missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number);
+ else
+ missed = dwc2_next_periodic_start(hsotg, qh, frame_number);
+
+ dwc2_sch_vdbg(hsotg,
+ "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n",
+ qh, sched_next_periodic_split, frame_number, old_frame,
+ qh->next_active_frame,
+ dwc2_frame_num_dec(qh->next_active_frame, old_frame),
+ missed, missed ? "MISS" : "");
if (list_empty(&qh->qtd_list)) {
dwc2_hcd_qh_unlink(hsotg, qh);
return;
}
+
/*
* Remove from periodic_sched_queued and move to
* appropriate queue
+ *
+ * Note: we purposely use the frame_number from the "hsotg" structure
+ * since we know SOF interrupt will handle future frames.
*/
- if ((hsotg->core_params->uframe_sched > 0 &&
- dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
- (hsotg->core_params->uframe_sched <= 0 &&
- qh->sched_frame == frame_number))
- list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
+ if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number))
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_ready);
else
- list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
+ list_move_tail(&qh->qh_list_entry,
+ &hsotg->periodic_sched_inactive);
}
/**
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 690b9fd98b55..88629bed6614 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -126,10 +126,10 @@ static const struct dwc2_core_params params_rk3066 = {
.speed = -1,
.enable_dynamic_fifo = 1,
.en_multiple_tx_fifo = -1,
- .host_rx_fifo_size = 520, /* 520 DWORDs */
+ .host_rx_fifo_size = 525, /* 525 DWORDs */
.host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
.host_perio_tx_fifo_size = 256, /* 256 DWORDs */
- .max_transfer_size = 65535,
+ .max_transfer_size = -1,
.max_packet_count = -1,
.host_channels = -1,
.phy_type = -1,
@@ -149,6 +149,38 @@ static const struct dwc2_core_params params_rk3066 = {
.hibernation = -1,
};
+static const struct dwc2_core_params params_ltq = {
+ .otg_cap = 2, /* non-HNP/non-SRP */
+ .otg_ver = -1,
+ .dma_enable = -1,
+ .dma_desc_enable = -1,
+ .dma_desc_fs_enable = -1,
+ .speed = -1,
+ .enable_dynamic_fifo = -1,
+ .en_multiple_tx_fifo = -1,
+ .host_rx_fifo_size = 288, /* 288 DWORDs */
+ .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */
+ .host_perio_tx_fifo_size = 96, /* 96 DWORDs */
+ .max_transfer_size = 65535,
+ .max_packet_count = 511,
+ .host_channels = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .ts_dline = -1,
+ .reload_ctl = -1,
+ .ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
+ GAHBCFG_HBSTLEN_SHIFT,
+ .uframe_sched = -1,
+ .external_id_pin_ctl = -1,
+ .hibernation = -1,
+};
+
/*
* Check the dr_mode against the module configuration and hardware
* capabilities.
@@ -428,6 +460,8 @@ static const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
{ .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
+ { .compatible = "lantiq,arx100-usb", .data = &params_ltq },
+ { .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
{ .compatible = "snps,dwc2", .data = NULL },
{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
{},
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index de5e01f41bc2..fa20f5a99d12 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
+ int retries = 1000;
int ret;
- /* Before Resetting PHY, put Core in Reset */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg |= DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
- /* Assert USB3 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
-
- /* Assert USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-
usb_phy_init(dwc->usb2_phy);
usb_phy_init(dwc->usb3_phy);
ret = phy_init(dwc->usb2_generic_phy);
@@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
phy_exit(dwc->usb2_generic_phy);
return ret;
}
- mdelay(100);
- /* Clear USB3 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
- reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+ /*
+ * We're resetting only the device side because, if we're in host mode,
+ * XHCI driver will reset the host block. If dwc3 was configured for
+ * host-only mode, then we can return early.
+ */
+ if (dwc->dr_mode == USB_DR_MODE_HOST)
+ return 0;
- /* Clear USB2 PHY reset */
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
- mdelay(100);
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ return 0;
- /* After PHYs are stable we can take Core out of reset state */
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
- reg &= ~DWC3_GCTL_CORESOFTRESET;
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+ udelay(1);
+ } while (--retries);
- return 0;
+ return -ETIMEDOUT;
}
/**
@@ -962,10 +950,6 @@ static int dwc3_probe(struct platform_device *pdev)
fladj = pdata->fladj_value;
}
- /* default to superspeed if no maximum_speed passed */
- if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
- dwc->maximum_speed = USB_SPEED_SUPER;
-
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
@@ -1016,6 +1000,33 @@ static int dwc3_probe(struct platform_device *pdev)
goto err1;
}
+ /* Check the maximum_speed parameter */
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_LOW:
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(dev, "invalid maximum_speed parameter %d\n",
+ dwc->maximum_speed);
+ /* fall through */
+ case USB_SPEED_UNKNOWN:
+ /* default to superspeed */
+ dwc->maximum_speed = USB_SPEED_SUPER;
+
+ /*
+ * default to superspeed plus if we are capable.
+ */
+ if (dwc3_is_usb31(dwc) &&
+ (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+ DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+ dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+
+ break;
+ }
+
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc, fladj);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e4f8b90d9627..6254b2ff9080 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -223,7 +223,8 @@
/* Global HWPARAMS3 Register */
#define DWC3_GHWPARAMS3_SSPHY_IFC(n) ((n) & 3)
#define DWC3_GHWPARAMS3_SSPHY_IFC_DIS 0
-#define DWC3_GHWPARAMS3_SSPHY_IFC_ENA 1
+#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN1 1
+#define DWC3_GHWPARAMS3_SSPHY_IFC_GEN2 2 /* DWC_usb31 only */
#define DWC3_GHWPARAMS3_HSPHY_IFC(n) (((n) & (3 << 2)) >> 2)
#define DWC3_GHWPARAMS3_HSPHY_IFC_DIS 0
#define DWC3_GHWPARAMS3_HSPHY_IFC_UTMI 1
@@ -249,6 +250,7 @@
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
#define DWC3_DCFG_SPEED_MASK (7 << 0)
+#define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DCFG_SUPERSPEED (4 << 0)
#define DWC3_DCFG_HIGHSPEED (0 << 0)
#define DWC3_DCFG_FULLSPEED2 (1 << 0)
@@ -339,6 +341,7 @@
#define DWC3_DSTS_CONNECTSPD (7 << 0)
+#define DWC3_DSTS_SUPERSPEED_PLUS (5 << 0) /* DWC_usb31 only */
#define DWC3_DSTS_SUPERSPEED (4 << 0)
#define DWC3_DSTS_HIGHSPEED (0 << 0)
#define DWC3_DSTS_FULLSPEED2 (1 << 0)
@@ -1024,6 +1027,12 @@ struct dwc3_gadget_ep_cmd_params {
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+/* check whether we are on the DWC_usb31 core */
+static inline bool dwc3_is_usb31(struct dwc3 *dwc)
+{
+ return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
#if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
int dwc3_host_init(struct dwc3 *dwc);
void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 2be268d2423d..72664700b8a2 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -39,8 +39,6 @@
#define USBSS_IRQ_COREIRQ_EN BIT(0)
#define USBSS_IRQ_COREIRQ_CLR BIT(0)
-static u64 kdwc3_dma_mask;
-
struct dwc3_keystone {
struct device *dev;
struct clk *clk;
@@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev)
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
- kdwc3_dma_mask = dma_get_mask(dev);
- dev->dma_mask = &kdwc3_dma_mask;
-
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
error = clk_prepare_enable(kdwc->clk);
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 9c9f74155066..974335377d9f 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -42,6 +42,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ unsigned int count;
int ret;
int i;
@@ -49,11 +50,11 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (!simple)
return -ENOMEM;
- ret = of_clk_get_parent_count(np);
- if (ret < 0)
- return ret;
+ count = of_clk_get_parent_count(np);
+ if (!count)
+ return -ENOENT;
- simple->num_clocks = ret;
+ simple->num_clocks = count;
simple->clks = devm_kcalloc(dev, simple->num_clocks,
sizeof(struct clk *), GFP_KERNEL);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 009d83048c8c..adc1e8a624cb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -35,6 +35,7 @@
#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa
+#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
@@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 8d6b75c2f53b..eca2e6d8e041 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -356,7 +356,8 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
*/
usb_status |= dwc->gadget.is_selfpowered;
- if (dwc->speed == DWC3_DSTS_SUPERSPEED) {
+ if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
+ (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (reg & DWC3_DCTL_INITU1ENA)
usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
@@ -426,7 +427,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_DEVICE_U1_ENABLE:
if (state != USB_STATE_CONFIGURED)
return -EINVAL;
- if (dwc->speed != DWC3_DSTS_SUPERSPEED)
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
return -EINVAL;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -440,7 +442,8 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_DEVICE_U2_ENABLE:
if (state != USB_STATE_CONFIGURED)
return -EINVAL;
- if (dwc->speed != DWC3_DSTS_SUPERSPEED)
+ if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+ (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
return -EINVAL;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2363bad45af8..d54a028cdfeb 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -463,7 +463,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
/* Burst size is only needed in SuperSpeed mode */
- if (dwc->gadget.speed == USB_SPEED_SUPER) {
+ if (dwc->gadget.speed >= USB_SPEED_SUPER) {
u32 burst = dep->endpoint.maxburst - 1;
params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
@@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
if (!usb_endpoint_xfer_isoc(desc))
- return 0;
+ goto out;
/* Link TRB for ISOC. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
@@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
}
+out:
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_CONTROL:
- strlcat(dep->name, "-control", sizeof(dep->name));
+ /* don't change name */
break;
case USB_ENDPOINT_XFER_ISOC:
strlcat(dep->name, "-isoc", sizeof(dep->name));
@@ -1441,7 +1442,8 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
- if (speed == DWC3_DSTS_SUPERSPEED) {
+ if ((speed == DWC3_DSTS_SUPERSPEED) ||
+ (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
ret = -EINVAL;
goto out;
@@ -1666,10 +1668,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
case USB_SPEED_HIGH:
reg |= DWC3_DSTS_HIGHSPEED;
break;
- case USB_SPEED_SUPER: /* FALLTHROUGH */
- case USB_SPEED_UNKNOWN: /* FALTHROUGH */
+ case USB_SPEED_SUPER_PLUS:
+ reg |= DWC3_DSTS_SUPERSPEED_PLUS;
+ break;
default:
- reg |= DWC3_DSTS_SUPERSPEED;
+ dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
+ dwc->maximum_speed);
+ /* fall through */
+ case USB_SPEED_SUPER:
+ reg |= DWC3_DCFG_SUPERSPEED;
+ break;
}
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2340,7 +2348,8 @@ static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
* this. Maybe it becomes part of the power saving plan.
*/
- if (speed != DWC3_DSTS_SUPERSPEED)
+ if ((speed != DWC3_DSTS_SUPERSPEED) &&
+ (speed != DWC3_DSTS_SUPERSPEED_PLUS))
return;
/*
@@ -2369,6 +2378,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_update_ram_clk_sel(dwc, speed);
switch (speed) {
+ case DWC3_DCFG_SUPERSPEED_PLUS:
+ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ dwc->gadget.ep0->maxpacket = 512;
+ dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
+ break;
case DWC3_DCFG_SUPERSPEED:
/*
* WORKAROUND: DWC3 revisions <1.90a have an issue which
@@ -2410,8 +2424,9 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
/* Enable USB2 LPM Capability */
- if ((dwc->revision > DWC3_REVISION_194A)
- && (speed != DWC3_DCFG_SUPERSPEED)) {
+ if ((dwc->revision > DWC3_REVISION_194A) &&
+ (speed != DWC3_DCFG_SUPERSPEED) &&
+ (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg |= DWC3_DCFG_LPM_CAP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2473,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
* implemented.
*/
- dwc->gadget_driver->resume(&dwc->gadget);
+ if (dwc->gadget_driver && dwc->gadget_driver->resume) {
+ spin_unlock(&dwc->lock);
+ dwc->gadget_driver->resume(&dwc->gadget);
+ spin_lock(&dwc->lock);
+ }
}
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8b14c2a13ac5..de9ffd60fcfa 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -54,6 +54,36 @@ static struct usb_gadget_strings **get_containers_gs(
}
/**
+ * function_descriptors() - get function descriptors for speed
+ * @f: the function
+ * @speed: the speed
+ *
+ * Returns the descriptors or NULL if not set.
+ */
+static struct usb_descriptor_header **
+function_descriptors(struct usb_function *f,
+ enum usb_device_speed speed)
+{
+ struct usb_descriptor_header **descriptors;
+
+ switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
+ descriptors = f->ssp_descriptors;
+ break;
+ case USB_SPEED_SUPER:
+ descriptors = f->ss_descriptors;
+ break;
+ case USB_SPEED_HIGH:
+ descriptors = f->hs_descriptors;
+ break;
+ default:
+ descriptors = f->fs_descriptors;
+ }
+
+ return descriptors;
+}
+
+/**
* next_ep_desc() - advance to the next EP descriptor
* @t: currect pointer within descriptor array
*
@@ -118,6 +148,13 @@ int config_ep_by_speed(struct usb_gadget *g,
/* select desired speed */
switch (g->speed) {
+ case USB_SPEED_SUPER_PLUS:
+ if (gadget_is_superspeed_plus(g)) {
+ speed_desc = f->ssp_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ /* else: Fall trough */
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
speed_desc = f->ss_descriptors;
@@ -161,7 +198,7 @@ ep_found:
(comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
return -EIO;
_ep->comp_desc = comp_desc;
- if (g->speed == USB_SPEED_SUPER) {
+ if (g->speed >= USB_SPEED_SUPER) {
switch (usb_endpoint_type(_ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
@@ -237,6 +274,8 @@ int usb_add_function(struct usb_configuration *config,
config->highspeed = true;
if (!config->superspeed && function->ss_descriptors)
config->superspeed = true;
+ if (!config->superspeed_plus && function->ssp_descriptors)
+ config->superspeed_plus = true;
done:
if (value)
@@ -417,17 +456,7 @@ static int config_buf(struct usb_configuration *config,
list_for_each_entry(f, &config->functions, list) {
struct usb_descriptor_header **descriptors;
- switch (speed) {
- case USB_SPEED_SUPER:
- descriptors = f->ss_descriptors;
- break;
- case USB_SPEED_HIGH:
- descriptors = f->hs_descriptors;
- break;
- default:
- descriptors = f->fs_descriptors;
- }
-
+ descriptors = function_descriptors(f, speed);
if (!descriptors)
continue;
status = usb_descriptor_fillbuf(next, len,
@@ -451,7 +480,7 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
u8 type = w_value >> 8;
enum usb_device_speed speed = USB_SPEED_UNKNOWN;
- if (gadget->speed == USB_SPEED_SUPER)
+ if (gadget->speed >= USB_SPEED_SUPER)
speed = gadget->speed;
else if (gadget_is_dualspeed(gadget)) {
int hs = 0;
@@ -482,6 +511,10 @@ static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
check_config:
/* ignore configs that won't work at this speed */
switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
+ if (!c->superspeed_plus)
+ continue;
+ break;
case USB_SPEED_SUPER:
if (!c->superspeed)
continue;
@@ -509,18 +542,24 @@ static int count_configs(struct usb_composite_dev *cdev, unsigned type)
unsigned count = 0;
int hs = 0;
int ss = 0;
+ int ssp = 0;
if (gadget_is_dualspeed(gadget)) {
if (gadget->speed == USB_SPEED_HIGH)
hs = 1;
if (gadget->speed == USB_SPEED_SUPER)
ss = 1;
+ if (gadget->speed == USB_SPEED_SUPER_PLUS)
+ ssp = 1;
if (type == USB_DT_DEVICE_QUALIFIER)
hs = !hs;
}
list_for_each_entry(c, &cdev->configs, list) {
/* ignore configs that won't work at this speed */
- if (ss) {
+ if (ssp) {
+ if (!c->superspeed_plus)
+ continue;
+ } else if (ss) {
if (!c->superspeed)
continue;
} else if (hs) {
@@ -597,6 +636,50 @@ static int bos_desc(struct usb_composite_dev *cdev)
ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
+ /* The SuperSpeedPlus USB Device Capability descriptor */
+ if (gadget_is_superspeed_plus(cdev->gadget)) {
+ struct usb_ssp_cap_descriptor *ssp_cap;
+
+ ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+
+ /*
+ * Report typical values.
+ */
+
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1));
+ ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
+ ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+
+ /* SSAC = 1 (2 attributes) */
+ ssp_cap->bmAttributes = cpu_to_le32(1);
+
+ /* Min RX/TX Lane Count = 1 */
+ ssp_cap->wFunctionalitySupport =
+ cpu_to_le16((1 << 8) | (1 << 12));
+
+ /*
+ * bmSublinkSpeedAttr[0]:
+ * ST = Symmetric, RX
+ * LSE = 3 (Gbps)
+ * LP = 1 (SuperSpeedPlus)
+ * LSM = 10 (10 Gbps)
+ */
+ ssp_cap->bmSublinkSpeedAttr[0] =
+ cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
+ /*
+ * bmSublinkSpeedAttr[1] =
+ * ST = Symmetric, TX
+ * LSE = 3 (Gbps)
+ * LP = 1 (SuperSpeedPlus)
+ * LSM = 10 (10 Gbps)
+ */
+ ssp_cap->bmSublinkSpeedAttr[1] =
+ cpu_to_le32((3 << 4) | (1 << 14) |
+ (0xa << 16) | (1 << 7));
+ }
+
return le16_to_cpu(bos->wTotalLength);
}
@@ -690,16 +773,7 @@ static int set_config(struct usb_composite_dev *cdev,
* function's setup callback instead of the current
* configuration's setup callback.
*/
- switch (gadget->speed) {
- case USB_SPEED_SUPER:
- descriptors = f->ss_descriptors;
- break;
- case USB_SPEED_HIGH:
- descriptors = f->hs_descriptors;
- break;
- default:
- descriptors = f->fs_descriptors;
- }
+ descriptors = function_descriptors(f, gadget->speed);
for (; *descriptors; ++descriptors) {
struct usb_endpoint_descriptor *ep;
@@ -819,8 +893,9 @@ int usb_add_config(struct usb_composite_dev *cdev,
} else {
unsigned i;
- DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
+ DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n",
config->bConfigurationValue, config,
+ config->superspeed_plus ? " superplus" : "",
config->superspeed ? " super" : "",
config->highspeed ? " high" : "",
config->fullspeed
@@ -1499,7 +1574,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
cdev->gadget->ep0->maxpacket;
if (gadget_is_superspeed(gadget)) {
if (gadget->speed >= USB_SPEED_SUPER) {
- cdev->desc.bcdUSB = cpu_to_le16(0x0300);
+ cdev->desc.bcdUSB = cpu_to_le16(0x0310);
cdev->desc.bMaxPacketSize0 = 9;
} else {
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
@@ -1634,15 +1709,24 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*((u8 *)req->buf) = value;
value = min(w_length, (u16) 1);
break;
-
- /*
- * USB 3.0 additions:
- * Function driver should handle get_status request. If such cb
- * wasn't supplied we respond with default value = 0
- * Note: function driver should supply such cb only for the first
- * interface of the function
- */
case USB_REQ_GET_STATUS:
+ if (gadget_is_otg(gadget) && gadget->hnp_polling_support &&
+ (w_index == OTG_STS_SELECTOR)) {
+ if (ctrl->bRequestType != (USB_DIR_IN |
+ USB_RECIP_DEVICE))
+ goto unknown;
+ *((u8 *)req->buf) = gadget->host_request_flag;
+ value = 1;
+ break;
+ }
+
+ /*
+ * USB 3.0 additions:
+ * Function driver should handle get_status request. If such cb
+ * wasn't supplied we respond with default value = 0
+ * Note: function driver should supply such cb only for the
+ * first interface of the function
+ */
if (!gadget_is_superspeed(gadget))
goto unknown;
if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 0fafa7a1b6f6..e6c0542a063b 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -163,7 +163,8 @@ EXPORT_SYMBOL_GPL(usb_copy_descriptors);
int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **fs,
struct usb_descriptor_header **hs,
- struct usb_descriptor_header **ss)
+ struct usb_descriptor_header **ss,
+ struct usb_descriptor_header **ssp)
{
struct usb_gadget *g = f->config->cdev->gadget;
@@ -182,6 +183,11 @@ int usb_assign_descriptors(struct usb_function *f,
if (!f->ss_descriptors)
goto err;
}
+ if (ssp && gadget_is_superspeed_plus(g)) {
+ f->ssp_descriptors = usb_copy_descriptors(ssp);
+ if (!f->ssp_descriptors)
+ goto err;
+ }
return 0;
err:
usb_free_all_descriptors(f);
@@ -194,6 +200,7 @@ void usb_free_all_descriptors(struct usb_function *f)
usb_free_descriptors(f->fs_descriptors);
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->ss_descriptors);
+ usb_free_descriptors(f->ssp_descriptors);
}
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 590c44989e5e..b6f60ca8a035 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -49,7 +49,6 @@ struct gadget_info {
struct config_group configs_group;
struct config_group strings_group;
struct config_group os_desc_group;
- struct config_group *default_groups[5];
struct mutex lock;
struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1];
@@ -71,7 +70,6 @@ static inline struct gadget_info *to_gadget_info(struct config_item *item)
struct config_usb_cfg {
struct config_group group;
struct config_group strings_group;
- struct config_group *default_groups[2];
struct list_head string_list;
struct usb_configuration c;
struct list_head func_list;
@@ -666,13 +664,12 @@ static struct config_group *config_desc_make(
INIT_LIST_HEAD(&cfg->string_list);
INIT_LIST_HEAD(&cfg->func_list);
- cfg->group.default_groups = cfg->default_groups;
- cfg->default_groups[0] = &cfg->strings_group;
-
config_group_init_type_name(&cfg->group, name,
&gadget_config_type);
+
config_group_init_type_name(&cfg->strings_group, "strings",
&gadget_config_name_strings_type);
+ configfs_add_default_group(&cfg->strings_group, &cfg->group);
ret = usb_add_config_only(&gi->cdev, &cfg->c);
if (ret)
@@ -1149,15 +1146,11 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
char **names,
struct module *owner)
{
- struct config_group **f_default_groups, *os_desc_group,
- **interface_groups;
+ struct config_group *os_desc_group;
struct config_item_type *os_desc_type, *interface_type;
vla_group(data_chunk);
- vla_item(data_chunk, struct config_group *, f_default_groups, 2);
vla_item(data_chunk, struct config_group, os_desc_group, 1);
- vla_item(data_chunk, struct config_group *, interface_groups,
- n_interf + 1);
vla_item(data_chunk, struct config_item_type, os_desc_type, 1);
vla_item(data_chunk, struct config_item_type, interface_type, 1);
@@ -1165,18 +1158,14 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
if (!vlabuf)
return -ENOMEM;
- f_default_groups = vla_ptr(vlabuf, data_chunk, f_default_groups);
os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group);
os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type);
- interface_groups = vla_ptr(vlabuf, data_chunk, interface_groups);
interface_type = vla_ptr(vlabuf, data_chunk, interface_type);
- parent->default_groups = f_default_groups;
os_desc_type->ct_owner = owner;
config_group_init_type_name(os_desc_group, "os_desc", os_desc_type);
- f_default_groups[0] = os_desc_group;
+ configfs_add_default_group(os_desc_group, parent);
- os_desc_group->default_groups = interface_groups;
interface_type->ct_group_ops = &interf_grp_ops;
interface_type->ct_attrs = interf_grp_attrs;
interface_type->ct_owner = owner;
@@ -1189,7 +1178,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
config_group_init_type_name(&d->group, "", interface_type);
config_item_set_name(&d->group.cg_item, "interface.%s",
names[n_interf]);
- interface_groups[n_interf] = &d->group;
+ configfs_add_default_group(&d->group, os_desc_group);
}
return 0;
@@ -1229,6 +1218,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
}
c->next_interface_id = 0;
memset(c->interface, 0, sizeof(c->interface));
+ c->superspeed_plus = 0;
c->superspeed = 0;
c->highspeed = 0;
c->fullspeed = 0;
@@ -1423,20 +1413,23 @@ static struct config_group *gadgets_make(
if (!gi)
return ERR_PTR(-ENOMEM);
- gi->group.default_groups = gi->default_groups;
- gi->group.default_groups[0] = &gi->functions_group;
- gi->group.default_groups[1] = &gi->configs_group;
- gi->group.default_groups[2] = &gi->strings_group;
- gi->group.default_groups[3] = &gi->os_desc_group;
+ config_group_init_type_name(&gi->group, name, &gadget_root_type);
config_group_init_type_name(&gi->functions_group, "functions",
&functions_type);
+ configfs_add_default_group(&gi->functions_group, &gi->group);
+
config_group_init_type_name(&gi->configs_group, "configs",
&config_desc_type);
+ configfs_add_default_group(&gi->configs_group, &gi->group);
+
config_group_init_type_name(&gi->strings_group, "strings",
&gadget_strings_strings_type);
+ configfs_add_default_group(&gi->strings_group, &gi->group);
+
config_group_init_type_name(&gi->os_desc_group, "os_desc",
&os_desc_type);
+ configfs_add_default_group(&gi->os_desc_group, &gi->group);
gi->composite.bind = configfs_do_nothing;
gi->composite.unbind = configfs_do_nothing;
@@ -1461,8 +1454,6 @@ static struct config_group *gadgets_make(
if (!gi->composite.gadget_driver.function)
goto err;
- config_group_init_type_name(&gi->group, name,
- &gadget_root_type);
return &gi->group;
err:
kfree(gi);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 2fa1e80a3ce7..a30766ca4226 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -685,7 +685,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
- acm_ss_function);
+ acm_ss_function, NULL);
if (status)
goto fail;
@@ -777,10 +777,10 @@ static ssize_t f_acm_port_num_show(struct config_item *item, char *page)
return sprintf(page, "%u\n", to_f_serial_opts(item)->port_num);
}
-CONFIGFS_ATTR_RO(f_acm_port_, num);
+CONFIGFS_ATTR_RO(f_acm_, port_num);
static struct configfs_attribute *acm_attrs[] = {
- &f_acm_port_attr_num,
+ &f_acm_attr_port_num,
NULL,
};
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 7ad60ee41914..4c488d15b6f6 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -786,7 +786,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ecm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function,
- ecm_ss_function);
+ ecm_ss_function, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index cad35a502d3f..d58bfc32be9e 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -309,7 +309,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function,
- eem_ss_function);
+ eem_ss_function, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index cf43e9e18368..e21ca2bd6839 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -684,44 +684,38 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
+ struct usb_request *req;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
int halt;
/* Are we still active? */
- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
- ret = -ENODEV;
- goto error;
- }
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- goto error;
- }
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
- if (ret) {
- ret = -EINTR;
- goto error;
- }
+ if (ret)
+ return -EINTR;
}
/* Do we halt? */
halt = (!io_data->read == !epfile->in);
- if (halt && epfile->isoc) {
- ret = -EINVAL;
- goto error;
- }
+ if (halt && epfile->isoc)
+ return -EINVAL;
/* Allocate & copy */
if (!halt) {
/*
* if we _do_ wait above, the epfile->ffs->gadget might be NULL
- * before the waiting completes, so do not assign to 'gadget' earlier
+ * before the waiting completes, so do not assign to 'gadget'
+ * earlier
*/
struct usb_gadget *gadget = epfile->ffs->gadget;
size_t copied;
@@ -763,17 +757,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
if (epfile->ep != ep) {
/* In the meantime, endpoint got disabled or changed. */
ret = -ESHUTDOWN;
- spin_unlock_irq(&epfile->ffs->eps_lock);
} else if (halt) {
/* Halt */
if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
usb_ep_set_halt(ep->ep);
- spin_unlock_irq(&epfile->ffs->eps_lock);
ret = -EBADMSG;
- } else {
- /* Fire the request */
- struct usb_request *req;
-
+ } else if (unlikely(data_len == -EINVAL)) {
/*
* Sanity Check: even though data_len can't be used
* uninitialized at the time I write this comment, some
@@ -785,80 +774,80 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* For such reason, we're adding this redundant sanity check
* here.
*/
- if (unlikely(data_len == -EINVAL)) {
- WARN(1, "%s: data_len == -EINVAL\n", __func__);
- ret = -EINVAL;
- goto error_lock;
- }
-
- if (io_data->aio) {
- req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
- if (unlikely(!req))
- goto error_lock;
-
- req->buf = data;
- req->length = data_len;
+ WARN(1, "%s: data_len == -EINVAL\n", __func__);
+ ret = -EINVAL;
+ } else if (!io_data->aio) {
+ DECLARE_COMPLETION_ONSTACK(done);
+ bool interrupted = false;
- io_data->buf = data;
- io_data->ep = ep->ep;
- io_data->req = req;
- io_data->ffs = epfile->ffs;
+ req = ep->req;
+ req->buf = data;
+ req->length = data_len;
- req->context = io_data;
- req->complete = ffs_epfile_async_io_complete;
+ req->context = &done;
+ req->complete = ffs_epfile_io_complete;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
- if (unlikely(ret)) {
- usb_ep_free_request(ep->ep, req);
- goto error_lock;
- }
- ret = -EIOCBQUEUED;
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (unlikely(ret < 0))
+ goto error_lock;
- spin_unlock_irq(&epfile->ffs->eps_lock);
- } else {
- DECLARE_COMPLETION_ONSTACK(done);
+ spin_unlock_irq(&epfile->ffs->eps_lock);
- req = ep->req;
- req->buf = data;
- req->length = data_len;
+ if (unlikely(wait_for_completion_interruptible(&done))) {
+ /*
+ * To avoid race condition with ffs_epfile_io_complete,
+ * dequeue the request first then check
+ * status. usb_ep_dequeue API should guarantee no race
+ * condition with req->complete callback.
+ */
+ usb_ep_dequeue(ep->ep, req);
+ interrupted = ep->status < 0;
+ }
- req->context = &done;
- req->complete = ffs_epfile_io_complete;
+ /*
+ * XXX We may end up silently droping data here. Since data_len
+ * (i.e. req->length) may be bigger than len (after being
+ * rounded up to maxpacketsize), we may end up with more data
+ * then user space has space for.
+ */
+ ret = interrupted ? -EINTR : ep->status;
+ if (io_data->read && ret > 0) {
+ ret = copy_to_iter(data, ret, &io_data->data);
+ if (!ret)
+ ret = -EFAULT;
+ }
+ goto error_mutex;
+ } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ } else {
+ req->buf = data;
+ req->length = data_len;
- ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ io_data->buf = data;
+ io_data->ep = ep->ep;
+ io_data->req = req;
+ io_data->ffs = epfile->ffs;
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ req->context = io_data;
+ req->complete = ffs_epfile_async_io_complete;
- if (unlikely(ret < 0)) {
- /* nop */
- } else if (unlikely(
- wait_for_completion_interruptible(&done))) {
- ret = -EINTR;
- usb_ep_dequeue(ep->ep, req);
- } else {
- /*
- * XXX We may end up silently droping data
- * here. Since data_len (i.e. req->length) may
- * be bigger than len (after being rounded up
- * to maxpacketsize), we may end up with more
- * data then user space has space for.
- */
- ret = ep->status;
- if (io_data->read && ret > 0) {
- ret = copy_to_iter(data, ret, &io_data->data);
- if (!ret)
- ret = -EFAULT;
- }
- }
- kfree(data);
+ ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+ if (unlikely(ret)) {
+ usb_ep_free_request(ep->ep, req);
+ goto error_lock;
}
- }
- mutex_unlock(&epfile->mutex);
- return ret;
+ ret = -EIOCBQUEUED;
+ /*
+ * Do not kfree the buffer in this function. It will be freed
+ * by ffs_user_copy_worker.
+ */
+ data = NULL;
+ }
error_lock:
spin_unlock_irq(&epfile->ffs->eps_lock);
+error_mutex:
mutex_unlock(&epfile->mutex);
error:
kfree(data);
@@ -1158,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = FUNCTIONFS_MAGIC;
sb->s_op = &ffs_sb_operations;
sb->s_time_gran = 1;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 99285b416308..51980c50546d 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -646,7 +646,7 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, NULL);
+ hidg_hs_descriptors, NULL, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index ddc3aad886b7..3a9f8f9c77bd 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -211,7 +211,7 @@ autoconf_fail:
ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs,
- ss_loopback_descs);
+ ss_loopback_descs, NULL);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 223ccf89d226..acf210f16328 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -3093,7 +3093,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
- fsg_ss_function);
+ fsg_ss_function, fsg_ss_function);
if (ret)
goto autoconf_fail;
@@ -3484,12 +3484,12 @@ static struct usb_function_instance *fsg_alloc_inst(void)
opts->lun0.lun = opts->common->luns[0];
opts->lun0.lun_id = 0;
- config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
- opts->default_groups[0] = &opts->lun0.group;
- opts->func_inst.group.default_groups = opts->default_groups;
config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
+ config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
+ configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group);
+
return &opts->func_inst;
release_buffers:
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index fb1fe96d3215..58fc199a18ec 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kfifo.h>
+#include <linux/spinlock.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -56,7 +57,7 @@ static const char f_midi_longname[] = "MIDI Gadget";
* USB <- IN endpoint <- rawmidi
*/
struct gmidi_in_port {
- struct f_midi *midi;
+ struct snd_rawmidi_substream *substream;
int active;
uint8_t cable;
uint8_t state;
@@ -78,9 +79,7 @@ struct f_midi {
struct snd_rawmidi *rmidi;
u8 ms_id;
- struct snd_rawmidi_substream *in_substream[MAX_PORTS];
struct snd_rawmidi_substream *out_substream[MAX_PORTS];
- struct gmidi_in_port *in_port[MAX_PORTS];
unsigned long out_triggered;
struct tasklet_struct tasklet;
@@ -91,7 +90,10 @@ struct f_midi {
unsigned int buflen, qlen;
/* This fifo is used as a buffer ring for pre-allocated IN usb_requests */
DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *);
+ spinlock_t transmit_lock;
unsigned int in_last_port;
+
+ struct gmidi_in_port in_ports_array[/* in_ports */];
};
static inline struct f_midi *func_to_midi(struct usb_function *f)
@@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- midi_alloc_ep_req(midi->out_ep, midi->buflen);
+ midi_alloc_ep_req(midi->out_ep,
+ max_t(unsigned, midi->buflen,
+ bulk_out_desc.wMaxPacketSize));
if (req == NULL)
return -ENOMEM;
@@ -518,98 +522,102 @@ static void f_midi_drop_out_substreams(struct f_midi *midi)
{
unsigned int i;
- for (i = 0; i < MAX_PORTS; i++) {
- struct gmidi_in_port *port = midi->in_port[i];
- struct snd_rawmidi_substream *substream = midi->in_substream[i];
+ for (i = 0; i < midi->in_ports; i++) {
+ struct gmidi_in_port *port = midi->in_ports_array + i;
+ struct snd_rawmidi_substream *substream = port->substream;
+ if (port->active && substream)
+ snd_rawmidi_drop_output(substream);
+ }
+}
- if (!port)
- break;
+static int f_midi_do_transmit(struct f_midi *midi, struct usb_ep *ep)
+{
+ struct usb_request *req = NULL;
+ unsigned int len, i;
+ bool active = false;
+ int err;
+
+ /*
+ * We peek the request in order to reuse it if it fails to enqueue on
+ * its endpoint
+ */
+ len = kfifo_peek(&midi->in_req_fifo, &req);
+ if (len != 1) {
+ ERROR(midi, "%s: Couldn't get usb request\n", __func__);
+ return -1;
+ }
+
+ /*
+ * If buffer overrun, then we ignore this transmission.
+ * IMPORTANT: This will cause the user-space rawmidi device to block
+ * until a) usb requests have been completed or b) snd_rawmidi_write()
+ * times out.
+ */
+ if (req->length > 0)
+ return 0;
+
+ for (i = midi->in_last_port; i < midi->in_ports; ++i) {
+ struct gmidi_in_port *port = midi->in_ports_array + i;
+ struct snd_rawmidi_substream *substream = port->substream;
if (!port->active || !substream)
continue;
- snd_rawmidi_drop_output(substream);
+ while (req->length + 3 < midi->buflen) {
+ uint8_t b;
+
+ if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
+ port->active = 0;
+ break;
+ }
+ f_midi_transmit_byte(req, port, b);
+ }
+
+ active = !!port->active;
+ if (active)
+ break;
+ }
+ midi->in_last_port = active ? i : 0;
+
+ if (req->length <= 0)
+ goto done;
+
+ err = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (err < 0) {
+ ERROR(midi, "%s failed to queue req: %d\n",
+ midi->in_ep->name, err);
+ req->length = 0; /* Re-use request next time. */
+ } else {
+ /* Upon success, put request at the back of the queue. */
+ kfifo_skip(&midi->in_req_fifo);
+ kfifo_put(&midi->in_req_fifo, req);
}
+
+done:
+ return active;
}
static void f_midi_transmit(struct f_midi *midi)
{
struct usb_ep *ep = midi->in_ep;
- bool active;
+ int ret;
+ unsigned long flags;
/* We only care about USB requests if IN endpoint is enabled */
if (!ep || !ep->enabled)
goto drop_out;
- do {
- struct usb_request *req = NULL;
- unsigned int len, i;
+ spin_lock_irqsave(&midi->transmit_lock, flags);
- active = false;
-
- /* We peek the request in order to reuse it if it fails
- * to enqueue on its endpoint */
- len = kfifo_peek(&midi->in_req_fifo, &req);
- if (len != 1) {
- ERROR(midi, "%s: Couldn't get usb request\n", __func__);
+ do {
+ ret = f_midi_do_transmit(midi, ep);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&midi->transmit_lock, flags);
goto drop_out;
}
+ } while (ret);
- /* If buffer overrun, then we ignore this transmission.
- * IMPORTANT: This will cause the user-space rawmidi device to block until a) usb
- * requests have been completed or b) snd_rawmidi_write() times out. */
- if (req->length > 0)
- return;
-
- for (i = midi->in_last_port; i < MAX_PORTS; i++) {
- struct gmidi_in_port *port = midi->in_port[i];
- struct snd_rawmidi_substream *substream = midi->in_substream[i];
-
- if (!port) {
- /* Reset counter when we reach the last available port */
- midi->in_last_port = 0;
- break;
- }
-
- if (!port->active || !substream)
- continue;
-
- while (req->length + 3 < midi->buflen) {
- uint8_t b;
-
- if (snd_rawmidi_transmit(substream, &b, 1) != 1) {
- port->active = 0;
- break;
- }
- f_midi_transmit_byte(req, port, b);
- }
-
- active = !!port->active;
- /* Check if last port is still active, which means that
- * there is still data on that substream but this current
- * request run out of space. */
- if (active) {
- midi->in_last_port = i;
- /* There is no need to re-iterate though midi ports. */
- break;
- }
- }
-
- if (req->length > 0) {
- int err;
-
- err = usb_ep_queue(ep, req, GFP_ATOMIC);
- if (err < 0) {
- ERROR(midi, "%s failed to queue req: %d\n",
- midi->in_ep->name, err);
- req->length = 0; /* Re-use request next time. */
- } else {
- /* Upon success, put request at the back of the queue. */
- kfifo_skip(&midi->in_req_fifo);
- kfifo_put(&midi->in_req_fifo, req);
- }
- }
- } while (active);
+ spin_unlock_irqrestore(&midi->transmit_lock, flags);
return;
@@ -626,13 +634,15 @@ static void f_midi_in_tasklet(unsigned long data)
static int f_midi_in_open(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
+ struct gmidi_in_port *port;
- if (!midi->in_port[substream->number])
+ if (substream->number >= midi->in_ports)
return -EINVAL;
VDBG(midi, "%s()\n", __func__);
- midi->in_substream[substream->number] = substream;
- midi->in_port[substream->number]->state = STATE_UNKNOWN;
+ port = midi->in_ports_array + substream->number;
+ port->substream = substream;
+ port->state = STATE_UNKNOWN;
return 0;
}
@@ -648,11 +658,11 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
{
struct f_midi *midi = substream->rmidi->private_data;
- if (!midi->in_port[substream->number])
+ if (substream->number >= midi->in_ports)
return;
VDBG(midi, "%s() %d\n", __func__, up);
- midi->in_port[substream->number]->active = up;
+ midi->in_ports_array[substream->number].active = up;
if (up)
tasklet_hi_schedule(&midi->tasklet);
}
@@ -1128,14 +1138,11 @@ static void f_midi_free(struct usb_function *f)
{
struct f_midi *midi;
struct f_midi_opts *opts;
- int i;
midi = func_to_midi(f);
opts = container_of(f->fi, struct f_midi_opts, func_inst);
kfree(midi->id);
mutex_lock(&opts->lock);
- for (i = opts->in_ports - 1; i >= 0; --i)
- kfree(midi->in_port[i]);
kfifo_free(&midi->in_req_fifo);
kfree(midi);
--opts->refcnt;
@@ -1163,7 +1170,7 @@ static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
{
- struct f_midi *midi;
+ struct f_midi *midi = NULL;
struct f_midi_opts *opts;
int status, i;
@@ -1172,37 +1179,26 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
mutex_lock(&opts->lock);
/* sanity check */
if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) {
- mutex_unlock(&opts->lock);
- return ERR_PTR(-EINVAL);
+ status = -EINVAL;
+ goto setup_fail;
}
/* allocate and initialize one new instance */
- midi = kzalloc(sizeof(*midi), GFP_KERNEL);
+ midi = kzalloc(
+ sizeof(*midi) + opts->in_ports * sizeof(*midi->in_ports_array),
+ GFP_KERNEL);
if (!midi) {
- mutex_unlock(&opts->lock);
- return ERR_PTR(-ENOMEM);
+ status = -ENOMEM;
+ goto setup_fail;
}
- for (i = 0; i < opts->in_ports; i++) {
- struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
-
- if (!port) {
- status = -ENOMEM;
- mutex_unlock(&opts->lock);
- goto setup_fail;
- }
-
- port->midi = midi;
- port->active = 0;
- port->cable = i;
- midi->in_port[i] = port;
- }
+ for (i = 0; i < opts->in_ports; i++)
+ midi->in_ports_array[i].cable = i;
/* set up ALSA midi devices */
midi->id = kstrdup(opts->id, GFP_KERNEL);
if (opts->id && !midi->id) {
status = -ENOMEM;
- mutex_unlock(&opts->lock);
goto setup_fail;
}
midi->in_ports = opts->in_ports;
@@ -1216,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
if (status)
goto setup_fail;
+ spin_lock_init(&midi->transmit_lock);
+
++opts->refcnt;
mutex_unlock(&opts->lock);
@@ -1229,8 +1227,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
return &midi->func;
setup_fail:
- for (--i; i >= 0; i--)
- kfree(midi->in_port[i]);
+ mutex_unlock(&opts->lock);
kfree(midi);
return ERR_PTR(status);
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..97f0a9bc84df 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1432,7 +1432,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
fs_ncm_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ NULL, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d6396e0909ee..d43e86cea74f 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -364,7 +364,8 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
obex_hs_ep_out_desc.bEndpointAddress =
obex_fs_ep_out_desc.bEndpointAddress;
- status = usb_assign_descriptors(f, fs_function, hs_function, NULL);
+ status = usb_assign_descriptors(f, fs_function, hs_function, NULL,
+ NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 157441dbfeba..0473d619d5bf 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -541,7 +541,7 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
/* Do not try to bind Phonet twice... */
status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function,
- NULL);
+ NULL, NULL);
if (status)
goto err;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 26ccad5d8680..c45104e3a64b 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1051,7 +1051,7 @@ autoconf_fail:
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, ss_printer_function);
+ hs_printer_function, ss_printer_function, NULL);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index e587767e374c..c8005823b190 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -783,7 +783,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
- eth_ss_function);
+ eth_ss_function, NULL);
if (status)
goto fail;
@@ -889,7 +889,6 @@ static void rndis_free_inst(struct usb_function_instance *f)
free_netdev(opts->net);
}
- kfree(opts->rndis_os_desc.group.default_groups); /* single VLA chunk */
kfree(opts);
}
@@ -916,10 +915,10 @@ static struct usb_function_instance *rndis_alloc_inst(void)
descs[0] = &opts->rndis_os_desc;
names[0] = "rndis";
- usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
- names, THIS_MODULE);
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
+ usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
+ names, THIS_MODULE);
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 6bb44d613bab..cb00ada21d9c 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -236,7 +236,7 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function,
- gser_ss_function);
+ gser_ss_function, NULL);
if (status)
goto fail;
dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 242ba5caffe5..df0189ddfdd5 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -437,7 +437,7 @@ no_iso:
ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_source_sink_descs,
- hs_source_sink_descs, ss_source_sink_descs);
+ hs_source_sink_descs, ss_source_sink_descs, NULL);
if (ret)
return ret;
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 829c78de9eba..434b983f3b4c 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -362,7 +362,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
fs_subset_out_desc.bEndpointAddress;
status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function,
- ss_eth_function);
+ ss_eth_function, NULL);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index bad007b5a190..2ace0295408e 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -41,13 +41,6 @@ static inline struct f_uas *to_f_uas(struct usb_function *f)
return container_of(f, struct f_uas, function);
}
-static void usbg_cmd_release(struct kref *);
-
-static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
-{
- kref_put(&cmd->ref, usbg_cmd_release);
-}
-
/* Start bot.c code */
static int bot_enqueue_cmd_cbw(struct f_uas *fu)
@@ -68,7 +61,7 @@ static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
struct usbg_cmd *cmd = req->context;
struct f_uas *fu = cmd->fu;
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
if (req->status < 0) {
pr_err("ERR %s(%d)\n", __func__, __LINE__);
return;
@@ -605,7 +598,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
break;
case UASP_QUEUE_COMMAND:
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
break;
@@ -615,7 +608,7 @@ static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
return;
cleanup:
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int uasp_send_status_response(struct usbg_cmd *cmd)
@@ -977,7 +970,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
return;
cleanup:
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@ -1046,7 +1039,7 @@ static void usbg_cmd_work(struct work_struct *work)
struct se_cmd *se_cmd;
struct tcm_usbg_nexus *tv_nexus;
struct usbg_tpg *tpg;
- int dir;
+ int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
@@ -1060,9 +1053,9 @@ static void usbg_cmd_work(struct work_struct *work)
goto out;
}
- if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
- cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
+ if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
+ cmd->sense_iu.sense, cmd->unpacked_lun, 0,
+ cmd->prio_attr, dir, flags) < 0)
goto out;
return;
@@ -1070,42 +1063,64 @@ static void usbg_cmd_work(struct work_struct *work)
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
+static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+ struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
+{
+ struct se_session *se_sess = tv_nexus->tvn_se_sess;
+ struct usbg_cmd *cmd;
+ int tag;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+ if (tag < 0)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->se_cmd.map_tag = tag;
+ cmd->se_cmd.tag = cmd->tag = scsi_tag;
+ cmd->fu = fu;
+
+ return cmd;
+}
+
+static void usbg_release_cmd(struct se_cmd *);
+
static int usbg_submit_command(struct f_uas *fu,
void *cmdbuf, unsigned int len)
{
struct command_iu *cmd_iu = cmdbuf;
struct usbg_cmd *cmd;
- struct usbg_tpg *tpg;
- struct tcm_usbg_nexus *tv_nexus;
+ struct usbg_tpg *tpg = fu->tpg;
+ struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
u32 cmd_len;
+ u16 scsi_tag;
if (cmd_iu->iu_id != IU_ID_COMMAND) {
pr_err("Unsupported type %d\n", cmd_iu->iu_id);
return -EINVAL;
}
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
- return -ENOMEM;
-
- cmd->fu = fu;
-
- /* XXX until I figure out why I can't free in on complete */
- kref_init(&cmd->ref);
- kref_get(&cmd->ref);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Missing nexus, ignoring command\n");
+ return -EINVAL;
+ }
- tpg = fu->tpg;
cmd_len = (cmd_iu->len & ~0x3) + 16;
if (cmd_len > USBG_MAX_CMD)
- goto err;
+ return -EINVAL;
+ scsi_tag = be16_to_cpup(&cmd_iu->tag);
+ cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
+ if (IS_ERR(cmd)) {
+ pr_err("usbg_get_cmd failed\n");
+ return -ENOMEM;
+ }
memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
- cmd->tag = be16_to_cpup(&cmd_iu->tag);
- cmd->se_cmd.tag = cmd->tag;
if (fu->flags & USBG_USE_STREAMS) {
if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
goto err;
@@ -1117,12 +1132,6 @@ static int usbg_submit_command(struct f_uas *fu,
cmd->stream = &fu->stream[0];
}
- tv_nexus = tpg->tpg_nexus;
- if (!tv_nexus) {
- pr_err("Missing nexus, ignoring command\n");
- goto err;
- }
-
switch (cmd_iu->prio_attr & 0x7) {
case UAS_HEAD_TAG:
cmd->prio_attr = TCM_HEAD_TAG;
@@ -1148,7 +1157,7 @@ static int usbg_submit_command(struct f_uas *fu,
return 0;
err:
- kfree(cmd);
+ usbg_release_cmd(&cmd->se_cmd);
return -EINVAL;
}
@@ -1182,7 +1191,7 @@ static void bot_cmd_work(struct work_struct *work)
out:
transport_send_check_condition_and_sense(se_cmd,
TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
static int bot_submit_command(struct f_uas *fu,
@@ -1190,7 +1199,7 @@ static int bot_submit_command(struct f_uas *fu,
{
struct bulk_cb_wrap *cbw = cmdbuf;
struct usbg_cmd *cmd;
- struct usbg_tpg *tpg;
+ struct usbg_tpg *tpg = fu->tpg;
struct tcm_usbg_nexus *tv_nexus;
u32 cmd_len;
@@ -1207,28 +1216,20 @@ static int bot_submit_command(struct f_uas *fu,
if (cmd_len < 1 || cmd_len > 16)
return -EINVAL;
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
- if (!cmd)
- return -ENOMEM;
-
- cmd->fu = fu;
-
- /* XXX until I figure out why I can't free in on complete */
- kref_init(&cmd->ref);
- kref_get(&cmd->ref);
-
- tpg = fu->tpg;
-
- memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
-
- cmd->bot_tag = cbw->Tag;
-
tv_nexus = tpg->tpg_nexus;
if (!tv_nexus) {
pr_err("Missing nexus, ignoring command\n");
- goto err;
+ return -ENODEV;
}
+ cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
+ if (IS_ERR(cmd)) {
+ pr_err("usbg_get_cmd failed\n");
+ return -ENOMEM;
+ }
+ memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+
+ cmd->bot_tag = cbw->Tag;
cmd->prio_attr = TCM_SIMPLE_TAG;
cmd->unpacked_lun = cbw->Lun;
cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
@@ -1239,9 +1240,6 @@ static int bot_submit_command(struct f_uas *fu,
queue_work(tpg->workqueue, &cmd->work);
return 0;
-err:
- kfree(cmd);
- return -EINVAL;
}
/* Start fabric.c code */
@@ -1282,20 +1280,14 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void usbg_cmd_release(struct kref *ref)
-{
- struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
- ref);
-
- transport_generic_free_cmd(&cmd->se_cmd, 0);
-}
-
static void usbg_release_cmd(struct se_cmd *se_cmd)
{
struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
+
kfree(cmd->data_buf);
- kfree(cmd);
+ percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
static int usbg_shutdown_session(struct se_session *se_sess)
@@ -1579,55 +1571,48 @@ out:
return ret;
}
+static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct usbg_tpg *tpg = container_of(se_tpg,
+ struct usbg_tpg, se_tpg);
+
+ tpg->tpg_nexus = p;
+ return 0;
+}
+
static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
{
- struct se_portal_group *se_tpg;
struct tcm_usbg_nexus *tv_nexus;
- int ret;
+ int ret = 0;
mutex_lock(&tpg->tpg_mutex);
if (tpg->tpg_nexus) {
ret = -EEXIST;
pr_debug("tpg->tpg_nexus already exists\n");
- goto err_unlock;
+ goto out_unlock;
}
- se_tpg = &tpg->se_tpg;
- ret = -ENOMEM;
tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
- if (!tv_nexus)
- goto err_unlock;
- tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
- if (IS_ERR(tv_nexus->tvn_se_sess))
- goto err_free;
+ if (!tv_nexus) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
- /*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the tcm_vhost struct se_portal_group with
- * the SCSI Initiator port name of the passed configfs group 'name'.
- */
- tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, name);
- if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ USB_G_DEFAULT_SESSION_TAGS,
+ sizeof(struct usbg_cmd),
+ TARGET_PROT_NORMAL, name,
+ tv_nexus, usbg_alloc_sess_cb);
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
#define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
pr_debug(MAKE_NEXUS_MSG, name);
#undef MAKE_NEXUS_MSG
- goto err_session;
+ ret = PTR_ERR(tv_nexus->tvn_se_sess);
+ kfree(tv_nexus);
}
- /*
- * Now register the TCM vHost virtual I_T Nexus as active.
- */
- transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
- tv_nexus->tvn_se_sess, tv_nexus);
- tpg->tpg_nexus = tv_nexus;
- mutex_unlock(&tpg->tpg_mutex);
- return 0;
-err_session:
- transport_free_session(tv_nexus->tvn_se_sess);
-err_free:
- kfree(tv_nexus);
-err_unlock:
+out_unlock:
mutex_unlock(&tpg->tpg_mutex);
return ret;
}
@@ -1735,11 +1720,7 @@ static void usbg_port_unlink(struct se_portal_group *se_tpg,
static int usbg_check_stop_free(struct se_cmd *se_cmd)
{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
-
- kref_put(&cmd->ref, usbg_cmd_release);
- return 1;
+ return target_put_sess_cmd(se_cmd);
}
static const struct target_core_fabric_ops usbg_ops = {
@@ -2098,7 +2079,7 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, uasp_fs_function_desc,
- uasp_hs_function_desc, uasp_ss_function_desc);
+ uasp_hs_function_desc, uasp_ss_function_desc, NULL);
if (ret)
goto ep_fail;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 6a2346b99f55..f2ac0cbc29a4 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -721,7 +721,8 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENOMEM;
/* copy descriptors, and track endpoint copies */
- status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL);
+ status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL,
+ NULL);
if (status)
goto fail;
return 0;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 044ca79d3cb5..186d4b162524 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1100,7 +1100,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
- ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
+ ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
+ NULL);
if (ret)
goto err;
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 70d3917cc003..943c21aafd3b 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -914,7 +914,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
params->resp_avail = resp_avail;
params->v = v;
- INIT_LIST_HEAD(&(params->resp_queue));
+ INIT_LIST_HEAD(&params->resp_queue);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
@@ -1006,13 +1006,10 @@ EXPORT_SYMBOL_GPL(rndis_add_hdr);
void rndis_free_response(struct rndis_params *params, u8 *buf)
{
- rndis_resp_t *r;
- struct list_head *act, *tmp;
+ rndis_resp_t *r, *n;
- list_for_each_safe(act, tmp, &(params->resp_queue))
- {
- r = list_entry(act, rndis_resp_t, list);
- if (r && r->buf == buf) {
+ list_for_each_entry_safe(r, n, &params->resp_queue, list) {
+ if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
@@ -1022,14 +1019,11 @@ EXPORT_SYMBOL_GPL(rndis_free_response);
u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
{
- rndis_resp_t *r;
- struct list_head *act, *tmp;
+ rndis_resp_t *r, *n;
if (!length) return NULL;
- list_for_each_safe(act, tmp, &(params->resp_queue))
- {
- r = list_entry(act, rndis_resp_t, list);
+ list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
@@ -1053,7 +1047,7 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
r->length = length;
r->send = 0;
- list_add_tail(&r->list, &(params->resp_queue));
+ list_add_tail(&r->list, &params->resp_queue);
return r;
}
diff --git a/drivers/usb/gadget/function/tcm.h b/drivers/usb/gadget/function/tcm.h
index b75c6f3e1980..a27e6e34db0b 100644
--- a/drivers/usb/gadget/function/tcm.h
+++ b/drivers/usb/gadget/function/tcm.h
@@ -23,6 +23,8 @@ enum {
#define USB_G_ALT_INT_BBB 0
#define USB_G_ALT_INT_UAS 1
+#define USB_G_DEFAULT_SESSION_TAGS 128
+
struct tcm_usbg_nexus {
struct se_session *tvn_se_sess;
};
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index ad8c9b05572d..66753ba7a42e 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -272,11 +272,6 @@ static struct config_item_type uvcg_default_processing_type = {
/* struct uvcg_processing {}; */
-static struct config_group *uvcg_processing_default_groups[] = {
- &uvcg_default_processing.group,
- NULL,
-};
-
/* control/processing */
static struct uvcg_processing_grp {
struct config_group group;
@@ -394,11 +389,6 @@ static struct config_item_type uvcg_default_camera_type = {
/* struct uvcg_camera {}; */
-static struct config_group *uvcg_camera_default_groups[] = {
- &uvcg_default_camera.group,
- NULL,
-};
-
/* control/terminal/camera */
static struct uvcg_camera_grp {
struct config_group group;
@@ -477,11 +467,6 @@ static struct config_item_type uvcg_default_output_type = {
/* struct uvcg_output {}; */
-static struct config_group *uvcg_output_default_groups[] = {
- &uvcg_default_output.group,
- NULL,
-};
-
/* control/terminal/output */
static struct uvcg_output_grp {
struct config_group group;
@@ -491,12 +476,6 @@ static struct config_item_type uvcg_output_grp_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_terminal_default_groups[] = {
- &uvcg_camera_grp.group,
- &uvcg_output_grp.group,
- NULL,
-};
-
/* control/terminal */
static struct uvcg_terminal_grp {
struct config_group group;
@@ -619,12 +598,6 @@ static struct config_item_type uvcg_control_class_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_control_class_default_groups[] = {
- &uvcg_control_class_fs.group,
- &uvcg_control_class_ss.group,
- NULL,
-};
-
/* control/class */
static struct uvcg_control_class_grp {
struct config_group group;
@@ -634,14 +607,6 @@ static struct config_item_type uvcg_control_class_grp_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_control_default_groups[] = {
- &uvcg_control_header_grp.group,
- &uvcg_processing_grp.group,
- &uvcg_terminal_grp.group,
- &uvcg_control_class_grp.group,
- NULL,
-};
-
/* control */
static struct uvcg_control_grp {
struct config_group group;
@@ -1780,11 +1745,6 @@ static struct config_item_type uvcg_default_color_matching_type = {
/* struct uvcg_color_matching {}; */
-static struct config_group *uvcg_color_matching_default_groups[] = {
- &uvcg_default_color_matching.group,
- NULL,
-};
-
/* streaming/color_matching */
static struct uvcg_color_matching_grp {
struct config_group group;
@@ -2145,13 +2105,6 @@ static struct config_item_type uvcg_streaming_class_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_streaming_class_default_groups[] = {
- &uvcg_streaming_class_fs.group,
- &uvcg_streaming_class_hs.group,
- &uvcg_streaming_class_ss.group,
- NULL,
-};
-
/* streaming/class */
static struct uvcg_streaming_class_grp {
struct config_group group;
@@ -2161,15 +2114,6 @@ static struct config_item_type uvcg_streaming_class_grp_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_streaming_default_groups[] = {
- &uvcg_streaming_header_grp.group,
- &uvcg_uncompressed_grp.group,
- &uvcg_mjpeg_grp.group,
- &uvcg_color_matching_grp.group,
- &uvcg_streaming_class_grp.group,
- NULL,
-};
-
/* streaming */
static struct uvcg_streaming_grp {
struct config_group group;
@@ -2179,12 +2123,6 @@ static struct config_item_type uvcg_streaming_grp_type = {
.ct_owner = THIS_MODULE,
};
-static struct config_group *uvcg_default_groups[] = {
- &uvcg_control_grp.group,
- &uvcg_streaming_grp.group,
- NULL,
-};
-
static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uvc_opts,
@@ -2273,59 +2211,64 @@ static struct config_item_type uvc_func_type = {
.ct_owner = THIS_MODULE,
};
-static inline void uvcg_init_group(struct config_group *g,
- struct config_group **default_groups,
- const char *name,
- struct config_item_type *type)
-{
- g->default_groups = default_groups;
- config_group_init_type_name(g, name, type);
-}
-
int uvcg_attach_configfs(struct f_uvc_opts *opts)
{
config_group_init_type_name(&uvcg_control_header_grp.group,
"header",
&uvcg_control_header_grp_type);
+
config_group_init_type_name(&uvcg_default_processing.group,
- "default",
- &uvcg_default_processing_type);
- uvcg_init_group(&uvcg_processing_grp.group,
- uvcg_processing_default_groups,
- "processing",
- &uvcg_processing_grp_type);
+ "default", &uvcg_default_processing_type);
+ config_group_init_type_name(&uvcg_processing_grp.group,
+ "processing", &uvcg_processing_grp_type);
+ configfs_add_default_group(&uvcg_default_processing.group,
+ &uvcg_processing_grp.group);
+
config_group_init_type_name(&uvcg_default_camera.group,
- "default",
- &uvcg_default_camera_type);
- uvcg_init_group(&uvcg_camera_grp.group,
- uvcg_camera_default_groups,
- "camera",
- &uvcg_camera_grp_type);
+ "default", &uvcg_default_camera_type);
+ config_group_init_type_name(&uvcg_camera_grp.group,
+ "camera", &uvcg_camera_grp_type);
+ configfs_add_default_group(&uvcg_default_camera.group,
+ &uvcg_camera_grp.group);
+
config_group_init_type_name(&uvcg_default_output.group,
- "default",
- &uvcg_default_output_type);
- uvcg_init_group(&uvcg_output_grp.group,
- uvcg_output_default_groups,
- "output",
- &uvcg_output_grp_type);
- uvcg_init_group(&uvcg_terminal_grp.group,
- uvcg_terminal_default_groups,
- "terminal",
- &uvcg_terminal_grp_type);
+ "default", &uvcg_default_output_type);
+ config_group_init_type_name(&uvcg_output_grp.group,
+ "output", &uvcg_output_grp_type);
+ configfs_add_default_group(&uvcg_default_output.group,
+ &uvcg_output_grp.group);
+
+ config_group_init_type_name(&uvcg_terminal_grp.group,
+ "terminal", &uvcg_terminal_grp_type);
+ configfs_add_default_group(&uvcg_camera_grp.group,
+ &uvcg_terminal_grp.group);
+ configfs_add_default_group(&uvcg_output_grp.group,
+ &uvcg_terminal_grp.group);
+
config_group_init_type_name(&uvcg_control_class_fs.group,
- "fs",
- &uvcg_control_class_type);
+ "fs", &uvcg_control_class_type);
config_group_init_type_name(&uvcg_control_class_ss.group,
- "ss",
- &uvcg_control_class_type);
- uvcg_init_group(&uvcg_control_class_grp.group,
- uvcg_control_class_default_groups,
+ "ss", &uvcg_control_class_type);
+ config_group_init_type_name(&uvcg_control_class_grp.group,
"class",
&uvcg_control_class_grp_type);
- uvcg_init_group(&uvcg_control_grp.group,
- uvcg_control_default_groups,
+ configfs_add_default_group(&uvcg_control_class_fs.group,
+ &uvcg_control_class_grp.group);
+ configfs_add_default_group(&uvcg_control_class_ss.group,
+ &uvcg_control_class_grp.group);
+
+ config_group_init_type_name(&uvcg_control_grp.group,
"control",
&uvcg_control_grp_type);
+ configfs_add_default_group(&uvcg_control_header_grp.group,
+ &uvcg_control_grp.group);
+ configfs_add_default_group(&uvcg_processing_grp.group,
+ &uvcg_control_grp.group);
+ configfs_add_default_group(&uvcg_terminal_grp.group,
+ &uvcg_control_grp.group);
+ configfs_add_default_group(&uvcg_control_class_grp.group,
+ &uvcg_control_grp.group);
+
config_group_init_type_name(&uvcg_streaming_header_grp.group,
"header",
&uvcg_streaming_header_grp_type);
@@ -2338,30 +2281,47 @@ int uvcg_attach_configfs(struct f_uvc_opts *opts)
config_group_init_type_name(&uvcg_default_color_matching.group,
"default",
&uvcg_default_color_matching_type);
- uvcg_init_group(&uvcg_color_matching_grp.group,
- uvcg_color_matching_default_groups,
+ config_group_init_type_name(&uvcg_color_matching_grp.group,
"color_matching",
&uvcg_color_matching_grp_type);
+ configfs_add_default_group(&uvcg_default_color_matching.group,
+ &uvcg_color_matching_grp.group);
+
config_group_init_type_name(&uvcg_streaming_class_fs.group,
- "fs",
- &uvcg_streaming_class_type);
+ "fs", &uvcg_streaming_class_type);
config_group_init_type_name(&uvcg_streaming_class_hs.group,
- "hs",
- &uvcg_streaming_class_type);
+ "hs", &uvcg_streaming_class_type);
config_group_init_type_name(&uvcg_streaming_class_ss.group,
- "ss",
- &uvcg_streaming_class_type);
- uvcg_init_group(&uvcg_streaming_class_grp.group,
- uvcg_streaming_class_default_groups,
- "class",
- &uvcg_streaming_class_grp_type);
- uvcg_init_group(&uvcg_streaming_grp.group,
- uvcg_streaming_default_groups,
- "streaming",
- &uvcg_streaming_grp_type);
- uvcg_init_group(&opts->func_inst.group,
- uvcg_default_groups,
+ "ss", &uvcg_streaming_class_type);
+ config_group_init_type_name(&uvcg_streaming_class_grp.group,
+ "class", &uvcg_streaming_class_grp_type);
+ configfs_add_default_group(&uvcg_streaming_class_fs.group,
+ &uvcg_streaming_class_grp.group);
+ configfs_add_default_group(&uvcg_streaming_class_hs.group,
+ &uvcg_streaming_class_grp.group);
+ configfs_add_default_group(&uvcg_streaming_class_ss.group,
+ &uvcg_streaming_class_grp.group);
+
+ config_group_init_type_name(&uvcg_streaming_grp.group,
+ "streaming", &uvcg_streaming_grp_type);
+ configfs_add_default_group(&uvcg_streaming_header_grp.group,
+ &uvcg_streaming_grp.group);
+ configfs_add_default_group(&uvcg_uncompressed_grp.group,
+ &uvcg_streaming_grp.group);
+ configfs_add_default_group(&uvcg_mjpeg_grp.group,
+ &uvcg_streaming_grp.group);
+ configfs_add_default_group(&uvcg_color_matching_grp.group,
+ &uvcg_streaming_grp.group);
+ configfs_add_default_group(&uvcg_streaming_class_grp.group,
+ &uvcg_streaming_grp.group);
+
+ config_group_init_type_name(&opts->func_inst.group,
"",
&uvc_func_type);
+ configfs_add_default_group(&uvcg_control_grp.group,
+ &opts->func_inst.group);
+ configfs_add_default_group(&uvcg_streaming_grp.group,
+ &opts->func_inst.group);
+
return 0;
}
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index a23d1b90454c..0b36878eb5fd 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -103,8 +103,7 @@ config USB_ETH
- CDC Ethernet Emulation Model (EEM) is a newer standard that has
a simpler interface that can be used by more USB hardware.
- RNDIS support is an additional option, more demanding than than
- subset.
+ RNDIS support is an additional option, more demanding than subset.
Within the USB device, this gadget driver exposes a network device
"usbX", where X depends on what other networking devices you have.
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 87fb0fd6aaab..e64479f882a5 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1699,28 +1699,6 @@ static struct usb_gadget_driver gadgetfs_driver = {
};
/*----------------------------------------------------------------------*/
-
-static void gadgetfs_nop(struct usb_gadget *arg) { }
-
-static int gadgetfs_probe(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- CHIP = gadget->name;
- return -EISNAM;
-}
-
-static struct usb_gadget_driver probe_driver = {
- .max_speed = USB_SPEED_HIGH,
- .bind = gadgetfs_probe,
- .unbind = gadgetfs_nop,
- .setup = (void *)gadgetfs_nop,
- .disconnect = gadgetfs_nop,
- .driver = {
- .name = "nop",
- },
-};
-
-
/* DEVICE INITIALIZATION
*
* fd = open ("/dev/gadget/$CHIP", O_RDWR)
@@ -1971,15 +1949,13 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
if (the_device)
return -ESRCH;
- /* fake probe to determine $CHIP */
- CHIP = NULL;
- usb_gadget_probe_driver(&probe_driver);
+ CHIP = usb_get_gadget_udc_name();
if (!CHIP)
return -ENODEV;
/* superblock */
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = GADGETFS_MAGIC;
sb->s_op = &gadget_fs_operations;
sb->s_time_gran = 1;
@@ -2034,6 +2010,8 @@ gadgetfs_kill_sb (struct super_block *sb)
put_dev (the_device);
the_device = NULL;
}
+ kfree(CHIP);
+ CHIP = NULL;
}
/*----------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 753c29bd11ad..7c289416f87d 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -74,7 +74,6 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
depends on FSL_SOC || ARCH_MXC
- select USB_FSL_MPH_DR_OF if OF
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
@@ -128,6 +127,7 @@ config USB_OMAP
config USB_PXA25X
tristate "PXA 25x or IXP 4xx"
depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX
+ depends on HAS_IOMEM
help
Intel's PXA 25x series XScale ARM-5TE processors include
an integrated full speed USB 1.1 device controller. The
@@ -176,7 +176,7 @@ config USB_RENESAS_USBHS_UDC
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Renesas USB3.0 Peripheral controller is a USB peripheral controller
that supports super, high, and full speed USB 3.0 data transfers.
@@ -187,6 +187,7 @@ config USB_RENESAS_USB3
config USB_PXA27X
tristate "PXA 27x"
+ depends on HAS_IOMEM
help
Intel's PXA 27x series XScale ARM v5TE processors include
an integrated full speed USB 1.1 device controller.
@@ -244,6 +245,7 @@ config USB_MV_U3D
config USB_M66592
tristate "Renesas M66592 USB Peripheral Controller"
+ depends on HAS_IOMEM
help
M66592 is a discrete USB peripheral controller chip that
supports both full and high speed USB 2.0 data transfers.
@@ -287,6 +289,7 @@ config USB_FSL_QE
dynamically linked module called "fsl_qe_udc".
config USB_NET2272
+ depends on HAS_IOMEM
tristate "PLX NET2272"
help
PLX NET2272 is a USB peripheral controller which supports
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index cd8764150861..39d70b4a8958 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3397,7 +3397,7 @@ err_pcidev:
static const struct pci_device_id pci_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
- .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{},
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 8755b2c2aada..18569de06b04 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -17,7 +17,9 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/atmel_usba_udc.h>
@@ -25,8 +27,6 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
-#include <asm/gpio.h>
-
#include "atmel_usba_udc.h"
#ifdef CONFIG_USB_GADGET_DEBUG_FS
@@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc)
list_del_init(&req->queue);
request_complete(ep, req, -ECONNRESET);
}
-
- /* NOTE: normally, the next call to the gadget driver is in
- * charge of disabling endpoints... usually disconnect().
- * The exception would be entering a high speed test mode.
- *
- * FIXME remove this code ... and retest thoroughly.
- */
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- if (ep->ep.desc) {
- spin_unlock(&udc->lock);
- usba_ep_disable(&ep->ep);
- spin_lock(&udc->lock);
- }
- }
}
static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
@@ -1888,20 +1874,15 @@ static int atmel_usba_stop(struct usb_gadget *gadget)
#ifdef CONFIG_OF
static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on)
{
- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
-
- if (is_on)
- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
- else
- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
+ is_on ? AT91_PMC_BIASEN : 0);
}
static void at91sam9g45_pulse_bias(struct usba_udc *udc)
{
- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR);
-
- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
+ AT91_PMC_BIASEN);
}
static const struct usba_udc_errata at91sam9rl_errata = {
@@ -1938,6 +1919,9 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
return ERR_PTR(-EINVAL);
udc->errata = match->data;
+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
+ if (udc->errata && IS_ERR(udc->pmc))
+ return ERR_CAST(udc->pmc);
udc->num_ep = 0;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index ea448a344767..3e1c9d589dfa 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -354,6 +354,8 @@ struct usba_udc {
struct dentry *debugfs_root;
struct dentry *debugfs_regs;
#endif
+
+ struct regmap *pmc;
};
static inline struct usba_ep *to_usba_ep(struct usb_ep *ep)
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 7f77db5d1278..aae7458d8986 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -581,8 +581,13 @@ err0:
void bdc_udc_exit(struct bdc *bdc)
{
+ unsigned long flags;
+
dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
bdc_ep_disable(bdc->bdc_ep_array[1]);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
usb_del_gadget_udc(&bdc->gadget);
bdc_free_ep(bdc);
}
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 1fdfec14a3ba..d2205d9e0c8b 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1846,7 +1846,7 @@ err:
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 79fe6b77ee44..8f32b5ee7734 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -49,7 +49,6 @@
#endif
#include <mach/hardware.h>
-#include <mach/platform.h>
/*
* USB device configuration structure
@@ -147,9 +146,7 @@ struct lpc32xx_udc {
u32 io_p_size;
void __iomem *udp_baseaddr;
int udp_irq[4];
- struct clk *usb_pll_clk;
struct clk *usb_slv_clk;
- struct clk *usb_otg_clk;
/* DMA support */
u32 *udca_v_base;
@@ -210,16 +207,6 @@ static inline struct lpc32xx_udc *to_udc(struct usb_gadget *g)
#define UDCA_BUFF_SIZE (128)
-/* TODO: When the clock framework is introduced in LPC32xx, IO_ADDRESS will
- * be replaced with an inremap()ed pointer
- * */
-#define USB_CTRL IO_ADDRESS(LPC32XX_CLK_PM_BASE + 0x64)
-
-/* USB_CTRL bit defines */
-#define USB_SLAVE_HCLK_EN (1 << 24)
-#define USB_HOST_NEED_CLK_EN (1 << 21)
-#define USB_DEV_NEED_CLK_EN (1 << 22)
-
/**********************************************************************
* USB device controller register offsets
**********************************************************************/
@@ -639,9 +626,6 @@ static void isp1301_udc_configure(struct lpc32xx_udc *udc)
i2c_smbus_write_byte_data(udc->isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING, INT_VBUS_VLD);
- /* Enable usb_need_clk clock after transceiver is initialized */
- writel((readl(USB_CTRL) | USB_DEV_NEED_CLK_EN), USB_CTRL);
-
dev_info(udc->dev, "ISP1301 Vendor ID : 0x%04x\n",
i2c_smbus_read_word_data(udc->isp1301_i2c_client, 0x00));
dev_info(udc->dev, "ISP1301 Product ID : 0x%04x\n",
@@ -980,31 +964,13 @@ static void udc_clk_set(struct lpc32xx_udc *udc, int enable)
return;
udc->clocked = 1;
-
- /* 48MHz PLL up */
- clk_enable(udc->usb_pll_clk);
-
- /* Enable the USB device clock */
- writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN,
- USB_CTRL);
-
- clk_enable(udc->usb_otg_clk);
+ clk_prepare_enable(udc->usb_slv_clk);
} else {
if (!udc->clocked)
return;
udc->clocked = 0;
-
- /* Never disable the USB_HCLK during normal operation */
-
- /* 48MHz PLL dpwn */
- clk_disable(udc->usb_pll_clk);
-
- /* Disable the USB device clock */
- writel(readl(USB_CTRL) & ~USB_DEV_NEED_CLK_EN,
- USB_CTRL);
-
- clk_disable(udc->usb_otg_clk);
+ clk_disable_unprepare(udc->usb_slv_clk);
}
}
@@ -3125,58 +3091,21 @@ static int lpc32xx_udc_probe(struct platform_device *pdev)
goto io_map_fail;
}
- /* Enable AHB slave USB clock, needed for further USB clock control */
- writel(USB_SLAVE_HCLK_EN | (1 << 19), USB_CTRL);
-
- /* Get required clocks */
- udc->usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
- if (IS_ERR(udc->usb_pll_clk)) {
- dev_err(udc->dev, "failed to acquire USB PLL\n");
- retval = PTR_ERR(udc->usb_pll_clk);
- goto pll_get_fail;
- }
- udc->usb_slv_clk = clk_get(&pdev->dev, "ck_usbd");
+ /* Get USB device clock */
+ udc->usb_slv_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(udc->usb_slv_clk)) {
dev_err(udc->dev, "failed to acquire USB device clock\n");
retval = PTR_ERR(udc->usb_slv_clk);
goto usb_clk_get_fail;
}
- udc->usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
- if (IS_ERR(udc->usb_otg_clk)) {
- dev_err(udc->dev, "failed to acquire USB otg clock\n");
- retval = PTR_ERR(udc->usb_otg_clk);
- goto usb_otg_clk_get_fail;
- }
-
- /* Setup PLL clock to 48MHz */
- retval = clk_enable(udc->usb_pll_clk);
- if (retval < 0) {
- dev_err(udc->dev, "failed to start USB PLL\n");
- goto pll_enable_fail;
- }
-
- retval = clk_set_rate(udc->usb_pll_clk, 48000);
- if (retval < 0) {
- dev_err(udc->dev, "failed to set USB clock rate\n");
- goto pll_set_fail;
- }
-
- writel(readl(USB_CTRL) | USB_DEV_NEED_CLK_EN, USB_CTRL);
/* Enable USB device clock */
- retval = clk_enable(udc->usb_slv_clk);
+ retval = clk_prepare_enable(udc->usb_slv_clk);
if (retval < 0) {
dev_err(udc->dev, "failed to start USB device clock\n");
goto usb_clk_enable_fail;
}
- /* Enable USB OTG clock */
- retval = clk_enable(udc->usb_otg_clk);
- if (retval < 0) {
- dev_err(udc->dev, "failed to start USB otg clock\n");
- goto usb_otg_clk_enable_fail;
- }
-
/* Setup deferred workqueue data */
udc->poweron = udc->pullup = 0;
INIT_WORK(&udc->pullup_job, pullup_work);
@@ -3287,19 +3216,10 @@ dma_alloc_fail:
dma_free_coherent(&pdev->dev, UDCA_BUFF_SIZE,
udc->udca_v_base, udc->udca_p_base);
i2c_fail:
- clk_disable(udc->usb_otg_clk);
-usb_otg_clk_enable_fail:
- clk_disable(udc->usb_slv_clk);
+ clk_disable_unprepare(udc->usb_slv_clk);
usb_clk_enable_fail:
-pll_set_fail:
- clk_disable(udc->usb_pll_clk);
-pll_enable_fail:
- clk_put(udc->usb_otg_clk);
-usb_otg_clk_get_fail:
clk_put(udc->usb_slv_clk);
usb_clk_get_fail:
- clk_put(udc->usb_pll_clk);
-pll_get_fail:
iounmap(udc->udp_baseaddr);
io_map_fail:
release_mem_region(udc->io_p_start, udc->io_p_size);
@@ -3336,12 +3256,9 @@ static int lpc32xx_udc_remove(struct platform_device *pdev)
free_irq(udc->udp_irq[IRQ_USB_HP], udc);
free_irq(udc->udp_irq[IRQ_USB_LP], udc);
- clk_disable(udc->usb_otg_clk);
- clk_put(udc->usb_otg_clk);
- clk_disable(udc->usb_slv_clk);
+ clk_disable_unprepare(udc->usb_slv_clk);
clk_put(udc->usb_slv_clk);
- clk_disable(udc->usb_pll_clk);
- clk_put(udc->usb_pll_clk);
+
iounmap(udc->udp_baseaddr);
release_mem_region(udc->io_p_start, udc->io_p_size);
kfree(udc);
@@ -3367,7 +3284,7 @@ static int lpc32xx_udc_suspend(struct platform_device *pdev, pm_message_t mesg)
udc->clocked = 1;
/* Kill global USB clock */
- clk_disable(udc->usb_slv_clk);
+ clk_disable_unprepare(udc->usb_slv_clk);
}
return 0;
@@ -3379,7 +3296,7 @@ static int lpc32xx_udc_resume(struct platform_device *pdev)
if (udc->clocked) {
/* Enable global USB clock */
- clk_enable(udc->usb_slv_clk);
+ clk_prepare_enable(udc->usb_slv_clk);
/* Enable clocking */
udc_clk_set(udc, 1);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6706aef907f4..c894b94b234b 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -3735,7 +3735,7 @@ static void net2280_shutdown(struct pci_dev *pdev)
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2280,
@@ -3743,7 +3743,7 @@ static const struct pci_device_id pci_ids[] = { {
.subdevice = PCI_ANY_ID,
.driver_data = PLX_LEGACY | PLX_2280,
}, {
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
.device = 0x2282,
@@ -3752,7 +3752,7 @@ static const struct pci_device_id pci_ids[] = { {
.driver_data = PLX_LEGACY,
},
{
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x3380,
@@ -3761,7 +3761,7 @@ static const struct pci_device_id pci_ids[] = { {
.driver_data = PLX_SUPERSPEED,
},
{
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_PLX,
.device = 0x3382,
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 7a04157ff579..9571ef54b86b 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -3234,22 +3234,22 @@ static const struct pci_device_id pch_udc_pcidev_id[] = {
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
- .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
- .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
- .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{
PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
- .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+ .class = PCI_CLASS_SERIAL_USB_DEVICE,
.class_mask = 0xffffffff,
},
{ 0 },
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index b82cb14850b6..a238da906115 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -48,18 +48,157 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-/*
- * This driver is PXA25x only. Grab the right register definitions.
- */
-#ifdef CONFIG_ARCH_PXA
-#include <mach/pxa25x-udc.h>
-#include <mach/hardware.h>
-#endif
-
#ifdef CONFIG_ARCH_LUBBOCK
#include <mach/lubbock.h>
#endif
+#define UDCCR 0x0000 /* UDC Control Register */
+#define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
+#define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
+#define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */
+#define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */
+#define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */
+#define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */
+#define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */
+#define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */
+#define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */
+#define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */
+#define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */
+#define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */
+#define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */
+#define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */
+#define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */
+#define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */
+#define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */
+#define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */
+#define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */
+#define UFNRH 0x0060 /* UDC Frame Number Register High */
+#define UFNRL 0x0064 /* UDC Frame Number Register Low */
+#define UBCR2 0x0068 /* UDC Byte Count Reg 2 */
+#define UBCR4 0x006c /* UDC Byte Count Reg 4 */
+#define UBCR7 0x0070 /* UDC Byte Count Reg 7 */
+#define UBCR9 0x0074 /* UDC Byte Count Reg 9 */
+#define UBCR12 0x0078 /* UDC Byte Count Reg 12 */
+#define UBCR14 0x007c /* UDC Byte Count Reg 14 */
+#define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */
+#define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */
+#define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */
+#define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */
+#define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */
+#define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */
+#define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */
+#define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */
+#define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */
+#define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */
+#define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */
+#define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */
+#define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */
+#define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */
+#define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */
+#define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */
+
+#define UICR0 0x0050 /* UDC Interrupt Control Register 0 */
+#define UICR1 0x0054 /* UDC Interrupt Control Register 1 */
+
+#define USIR0 0x0058 /* UDC Status Interrupt Register 0 */
+#define USIR1 0x005C /* UDC Status Interrupt Register 1 */
+
+#define UDCCR_UDE (1 << 0) /* UDC enable */
+#define UDCCR_UDA (1 << 1) /* UDC active */
+#define UDCCR_RSM (1 << 2) /* Device resume */
+#define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
+#define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
+#define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
+#define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
+#define UDCCR_REM (1 << 7) /* Reset interrupt mask */
+
+#define UDCCS0_OPR (1 << 0) /* OUT packet ready */
+#define UDCCS0_IPR (1 << 1) /* IN packet ready */
+#define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
+#define UDCCS0_SST (1 << 4) /* Sent stall */
+#define UDCCS0_FST (1 << 5) /* Force stall */
+#define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
+#define UDCCS0_SA (1 << 7) /* Setup active */
+
+#define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_BI_SST (1 << 4) /* Sent stall */
+#define UDCCS_BI_FST (1 << 5) /* Force stall */
+#define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
+
+#define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
+#define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
+#define UDCCS_BO_DME (1 << 3) /* DMA enable */
+#define UDCCS_BO_SST (1 << 4) /* Sent stall */
+#define UDCCS_BO_FST (1 << 5) /* Force stall */
+#define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
+#define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
+
+#define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
+
+#define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
+#define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
+#ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */
+#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
+#endif
+#ifdef CONFIG_ARCH_PXA
+#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
+#endif
+#define UDCCS_IO_DME (1 << 3) /* DMA enable */
+#define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
+#define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
+
+#define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
+#define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
+#define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
+#define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
+#define UDCCS_INT_SST (1 << 4) /* Sent stall */
+#define UDCCS_INT_FST (1 << 5) /* Force stall */
+#define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
+
+#define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
+#define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
+#define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
+#define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
+#define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
+#define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
+#define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
+#define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
+
+#define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
+#define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
+#define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
+#define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
+#define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
+#define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
+#define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
+#define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
+
+#define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
+#define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
+#define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
+#define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
+#define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
+#define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
+#define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
+#define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
+
+#define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
+#define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
+#define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
+#define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
+#define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
+#define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
+#define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
+#define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
+
/*
* This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
* series processors. The UDC for the IXP 4xx series is very similar.
@@ -150,25 +289,61 @@ static void pullup_on(void)
mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
}
-static void pio_irq_enable(int bEndpointAddress)
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+/*
+ * IXP4xx has its buses wired up in a way that relies on never doing any
+ * byte swaps, independent of whether it runs in big-endian or little-endian
+ * mode, as explained by Krzysztof Hałasa.
+ *
+ * We only support pxa25x in little-endian mode, but it is very likely
+ * that it works the same way.
+ */
+static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
+{
+ iowrite32be(val, dev->regs + reg);
+}
+
+static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
{
- bEndpointAddress &= 0xf;
+ return ioread32be(dev->regs + reg);
+}
+#else
+static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
+{
+ writel(val, dev->regs + reg);
+}
+
+static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
+{
+ return readl(dev->regs + reg);
+}
+#endif
+
+static void pio_irq_enable(struct pxa25x_ep *ep)
+{
+ u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
+
if (bEndpointAddress < 8)
- UICR0 &= ~(1 << bEndpointAddress);
+ udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) &
+ ~(1 << bEndpointAddress));
else {
bEndpointAddress -= 8;
- UICR1 &= ~(1 << bEndpointAddress);
+ udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) &
+ ~(1 << bEndpointAddress));
}
}
-static void pio_irq_disable(int bEndpointAddress)
+static void pio_irq_disable(struct pxa25x_ep *ep)
{
- bEndpointAddress &= 0xf;
+ u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
+
if (bEndpointAddress < 8)
- UICR0 |= 1 << bEndpointAddress;
+ udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) |
+ (1 << bEndpointAddress));
else {
bEndpointAddress -= 8;
- UICR1 |= 1 << bEndpointAddress;
+ udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) |
+ (1 << bEndpointAddress));
}
}
@@ -177,22 +352,61 @@ static void pio_irq_disable(int bEndpointAddress)
*/
#define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
-static inline void udc_set_mask_UDCCR(int mask)
+static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask)
{
- UDCCR = (UDCCR & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS);
+ u32 udccr = udc_get_reg(dev, UDCCR);
+
+ udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR);
}
-static inline void udc_clear_mask_UDCCR(int mask)
+static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask)
{
- UDCCR = (UDCCR & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS);
+ u32 udccr = udc_get_reg(dev, UDCCR);
+
+ udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR);
}
-static inline void udc_ack_int_UDCCR(int mask)
+static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
{
/* udccr contains the bits we dont want to change */
- __u32 udccr = UDCCR & UDCCR_MASK_BITS;
+ u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
- UDCCR = udccr | (mask & ~UDCCR_MASK_BITS);
+ udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR);
+}
+
+static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_udccs);
+}
+
+static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data)
+{
+ udc_set_reg(ep->dev, data, ep->regoff_udccs);
+}
+
+static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev)
+{
+ return udc_get_reg(dev, UDCCS0);
+}
+
+static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data)
+{
+ udc_set_reg(dev, data, UDCCS0);
+}
+
+static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_uddr);
+}
+
+static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data)
+{
+ udc_set_reg(ep->dev, data, ep->regoff_uddr);
+}
+
+static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep)
+{
+ return udc_get_reg(ep->dev, ep->regoff_ubcr);
}
/*
@@ -358,7 +572,7 @@ static inline void ep0_idle (struct pxa25x_udc *dev)
}
static int
-write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max)
+write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
{
u8 *buf;
unsigned length, count;
@@ -372,7 +586,7 @@ write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max)
count = length;
while (likely(count--))
- *uddr = *buf++;
+ udc_ep_set_UDDR(ep, *buf++);
return length;
}
@@ -392,7 +606,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
unsigned count;
int is_last, is_short;
- count = write_packet(ep->reg_uddr, req, max);
+ count = write_packet(ep, req, max);
/* last packet is usually short (or a zlp) */
if (unlikely (count != max))
@@ -416,15 +630,15 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
* double buffering might work. TSP, TPC, and TFS
* bit values are the same for all normal IN endpoints.
*/
- *ep->reg_udccs = UDCCS_BI_TPC;
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TPC);
if (is_short)
- *ep->reg_udccs = UDCCS_BI_TSP;
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
/* requests complete when all IN data is in the FIFO */
if (is_last) {
done (ep, req, 0);
if (list_empty(&ep->queue))
- pio_irq_disable (ep->bEndpointAddress);
+ pio_irq_disable(ep);
return 1;
}
@@ -432,7 +646,7 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
// double buffering is off in the default fifo mode, which
// prevents TFS from being set here.
- } while (*ep->reg_udccs & UDCCS_BI_TFS);
+ } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS);
return 0;
}
@@ -442,20 +656,21 @@ write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
static inline
void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
{
- UDCCS0 = flags|UDCCS0_SA|UDCCS0_OPR;
- USIR0 = USIR0_IR0;
+ udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
+ udc_set_reg(dev, USIR0, USIR0_IR0);
dev->req_pending = 0;
DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
- __func__, tag, UDCCS0, flags);
+ __func__, tag, udc_ep0_get_UDCCS(dev), flags);
}
static int
write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
{
+ struct pxa25x_udc *dev = ep->dev;
unsigned count;
int is_short;
- count = write_packet(&UDDR0, req, EP0_FIFO_SIZE);
+ count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
ep->dev->stats.write.bytes += count;
/* last packet "must be" short (or a zlp) */
@@ -468,7 +683,7 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
if (ep->dev->req_pending)
ep0start(ep->dev, UDCCS0_IPR, "short IN");
else
- UDCCS0 = UDCCS0_IPR;
+ udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
count = req->req.length;
done (ep, req, 0);
@@ -484,9 +699,9 @@ write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
if (count >= EP0_FIFO_SIZE) {
count = 100;
do {
- if ((UDCCS0 & UDCCS0_OPR) != 0) {
+ if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) {
/* clear OPR, generate ack */
- UDCCS0 = UDCCS0_OPR;
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
break;
}
count--;
@@ -521,7 +736,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
* UDCCS_{BO,IO}_RPC are all the same bit value.
* UDCCS_{BO,IO}_RNE are all the same bit value.
*/
- udccs = *ep->reg_udccs;
+ udccs = udc_ep_get_UDCCS(ep);
if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
break;
buf = req->req.buf + req->req.actual;
@@ -530,7 +745,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
/* read all bytes from this packet */
if (likely (udccs & UDCCS_BO_RNE)) {
- count = 1 + (0x0ff & *ep->reg_ubcr);
+ count = 1 + (0x0ff & udc_ep_get_UBCR(ep));
req->req.actual += min (count, bufferspace);
} else /* zlp */
count = 0;
@@ -540,7 +755,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
is_short ? "/S" : "",
req, req->req.actual, req->req.length);
while (likely (count-- != 0)) {
- u8 byte = (u8) *ep->reg_uddr;
+ u8 byte = (u8) udc_ep_get_UDDR(ep);
if (unlikely (bufferspace == 0)) {
/* this happens when the driver's buffer
@@ -556,7 +771,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
bufferspace--;
}
}
- *ep->reg_udccs = UDCCS_BO_RPC;
+ udc_ep_set_UDCCS(ep, UDCCS_BO_RPC);
/* RPC/RSP/RNE could now reflect the other packet buffer */
/* iso is one request per packet */
@@ -571,7 +786,7 @@ read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
if (is_short || req->req.actual == req->req.length) {
done (ep, req, 0);
if (list_empty(&ep->queue))
- pio_irq_disable (ep->bEndpointAddress);
+ pio_irq_disable(ep);
return 1;
}
@@ -595,7 +810,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
buf = req->req.buf + req->req.actual;
bufferspace = req->req.length - req->req.actual;
- while (UDCCS0 & UDCCS0_RNE) {
+ while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
byte = (u8) UDDR0;
if (unlikely (bufferspace == 0)) {
@@ -613,7 +828,7 @@ read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
}
}
- UDCCS0 = UDCCS0_OPR | UDCCS0_IPR;
+ udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
/* completion */
if (req->req.actual >= req->req.length)
@@ -687,8 +902,8 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
DBG(DBG_VERBOSE, "ep0 config ack%s\n",
dev->has_cfr ? "" : " raced");
if (dev->has_cfr)
- UDCCFR = UDCCFR_AREN|UDCCFR_ACM
- |UDCCFR_MB1;
+ udc_set_reg(dev, UDCCFR, UDCCFR_AREN |
+ UDCCFR_ACM | UDCCFR_MB1);
done(ep, req, 0);
dev->ep0state = EP0_END_XFER;
local_irq_restore (flags);
@@ -696,7 +911,7 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
}
if (dev->req_pending)
ep0start(dev, UDCCS0_IPR, "OUT");
- if (length == 0 || ((UDCCS0 & UDCCS0_RNE) != 0
+ if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0
&& read_ep0_fifo(ep, req))) {
ep0_idle(dev);
done(ep, req, 0);
@@ -711,16 +926,16 @@ pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
}
/* can the FIFO can satisfy the request immediately? */
} else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
- if ((*ep->reg_udccs & UDCCS_BI_TFS) != 0
+ if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0
&& write_fifo(ep, req))
req = NULL;
- } else if ((*ep->reg_udccs & UDCCS_BO_RFS) != 0
+ } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0
&& read_fifo(ep, req)) {
req = NULL;
}
if (likely(req && ep->ep.desc))
- pio_irq_enable(ep->bEndpointAddress);
+ pio_irq_enable(ep);
}
/* pio or dma irq handler advances the queue. */
@@ -747,7 +962,7 @@ static void nuke(struct pxa25x_ep *ep, int status)
done(ep, req, status);
}
if (ep->ep.desc)
- pio_irq_disable (ep->bEndpointAddress);
+ pio_irq_disable(ep);
}
@@ -807,14 +1022,14 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
local_irq_save(flags);
if ((ep->bEndpointAddress & USB_DIR_IN) != 0
- && ((*ep->reg_udccs & UDCCS_BI_TFS) == 0
+ && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0
|| !list_empty(&ep->queue))) {
local_irq_restore(flags);
return -EAGAIN;
}
/* FST bit is the same for control, bulk in, bulk out, interrupt in */
- *ep->reg_udccs = UDCCS_BI_FST|UDCCS_BI_FTF;
+ udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
/* ep0 needs special care */
if (!ep->ep.desc) {
@@ -826,7 +1041,7 @@ static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
} else {
unsigned i;
for (i = 0; i < 1000; i += 20) {
- if (*ep->reg_udccs & UDCCS_BI_SST)
+ if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST)
break;
udelay(20);
}
@@ -850,10 +1065,10 @@ static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
return -EOPNOTSUPP;
if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
- || (*ep->reg_udccs & UDCCS_BO_RFS) == 0)
+ || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0)
return 0;
else
- return (*ep->reg_ubcr & 0xfff) + 1;
+ return (udc_ep_get_UBCR(ep) & 0xfff) + 1;
}
static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
@@ -870,15 +1085,15 @@ static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
/* for OUT, just read and discard the FIFO contents. */
if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
- while (((*ep->reg_udccs) & UDCCS_BO_RNE) != 0)
- (void) *ep->reg_uddr;
+ while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
+ (void)udc_ep_get_UDDR(ep);
return;
}
/* most IN status is the same, but ISO can't stall */
- *ep->reg_udccs = UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
+ udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
| (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
- ? 0 : UDCCS_BI_SST);
+ ? 0 : UDCCS_BI_SST));
}
@@ -905,15 +1120,23 @@ static struct usb_ep_ops pxa25x_ep_ops = {
static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
{
- return ((UFNRH & 0x07) << 8) | (UFNRL & 0xff);
+ struct pxa25x_udc *dev;
+
+ dev = container_of(_gadget, struct pxa25x_udc, gadget);
+ return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) |
+ (udc_get_reg(dev, UFNRL) & 0xff);
}
static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
{
+ struct pxa25x_udc *udc;
+
+ udc = container_of(_gadget, struct pxa25x_udc, gadget);
+
/* host may not have enabled remote wakeup */
- if ((UDCCS0 & UDCCS0_DRWF) == 0)
+ if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0)
return -EHOSTUNREACH;
- udc_set_mask_UDCCR(UDCCR_RSM);
+ udc_set_mask_UDCCR(udc, UDCCR_RSM);
return 0;
}
@@ -1034,9 +1257,11 @@ udc_seq_show(struct seq_file *m, void *_d)
/* registers for device and ep0 */
seq_printf(m,
"uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
- UICR1, UICR0, USIR1, USIR0, UFNRH, UFNRL);
+ udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0),
+ udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0),
+ udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL));
- tmp = UDCCR;
+ tmp = udc_get_reg(dev, UDCCR);
seq_printf(m,
"udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
(tmp & UDCCR_REM) ? " rem" : "",
@@ -1048,7 +1273,7 @@ udc_seq_show(struct seq_file *m, void *_d)
(tmp & UDCCR_UDA) ? " uda" : "",
(tmp & UDCCR_UDE) ? " ude" : "");
- tmp = UDCCS0;
+ tmp = udc_ep0_get_UDCCS(dev);
seq_printf(m,
"udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
(tmp & UDCCS0_SA) ? " sa" : "",
@@ -1061,7 +1286,7 @@ udc_seq_show(struct seq_file *m, void *_d)
(tmp & UDCCS0_OPR) ? " opr" : "");
if (dev->has_cfr) {
- tmp = UDCCFR;
+ tmp = udc_get_reg(dev, UDCCFR);
seq_printf(m,
"udccfr %02X =%s%s\n", tmp,
(tmp & UDCCFR_AREN) ? " aren" : "",
@@ -1087,7 +1312,7 @@ udc_seq_show(struct seq_file *m, void *_d)
desc = ep->ep.desc;
if (!desc)
continue;
- tmp = *dev->ep [i].reg_udccs;
+ tmp = udc_ep_get_UDCCS(&dev->ep[i]);
seq_printf(m,
"%s max %d %s udccs %02x irqs %lu\n",
ep->ep.name, usb_endpoint_maxp(desc),
@@ -1151,14 +1376,15 @@ static const struct file_operations debug_fops = {
static void udc_disable(struct pxa25x_udc *dev)
{
/* block all irqs */
- udc_set_mask_UDCCR(UDCCR_SRM|UDCCR_REM);
- UICR0 = UICR1 = 0xff;
- UFNRH = UFNRH_SIM;
+ udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM);
+ udc_set_reg(dev, UICR0, 0xff);
+ udc_set_reg(dev, UICR1, 0xff);
+ udc_set_reg(dev, UFNRH, UFNRH_SIM);
/* if hardware supports it, disconnect from usb */
pullup_off();
- udc_clear_mask_UDCCR(UDCCR_UDE);
+ udc_clear_mask_UDCCR(dev, UDCCR_UDE);
ep0_idle (dev);
dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1200,10 +1426,10 @@ static void udc_reinit(struct pxa25x_udc *dev)
*/
static void udc_enable (struct pxa25x_udc *dev)
{
- udc_clear_mask_UDCCR(UDCCR_UDE);
+ udc_clear_mask_UDCCR(dev, UDCCR_UDE);
/* try to clear these bits before we enable the udc */
- udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
+ udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
ep0_idle(dev);
dev->gadget.speed = USB_SPEED_UNKNOWN;
@@ -1215,15 +1441,15 @@ static void udc_enable (struct pxa25x_udc *dev)
* - if RESET is already in progress, ack interrupt
* - unmask reset interrupt
*/
- udc_set_mask_UDCCR(UDCCR_UDE);
- if (!(UDCCR & UDCCR_UDA))
- udc_ack_int_UDCCR(UDCCR_RSTIR);
+ udc_set_mask_UDCCR(dev, UDCCR_UDE);
+ if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
+ udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
if (dev->has_cfr /* UDC_RES2 is defined */) {
/* pxa255 (a0+) can avoid a set_config race that could
* prevent gadget drivers from configuring correctly
*/
- UDCCFR = UDCCFR_ACM | UDCCFR_MB1;
+ udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
} else {
/* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
* which could result in missing packets and interrupts.
@@ -1231,15 +1457,15 @@ static void udc_enable (struct pxa25x_udc *dev)
* double buffers or not; ACM/AREN bits fit into the holes.
* zero bits (like USIR0_IRx) disable double buffering.
*/
- UDC_RES1 = 0x00;
- UDC_RES2 = 0x00;
+ udc_set_reg(dev, UDC_RES1, 0x00);
+ udc_set_reg(dev, UDC_RES2, 0x00);
}
/* enable suspend/resume and reset irqs */
- udc_clear_mask_UDCCR(UDCCR_SRM | UDCCR_REM);
+ udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM);
/* enable ep0 irqs */
- UICR0 &= ~UICR0_IM0;
+ udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0);
/* if hardware supports it, pullup D+ and wait for reset */
pullup_on();
@@ -1408,9 +1634,9 @@ static void udc_watchdog(unsigned long _dev)
local_irq_disable();
if (dev->ep0state == EP0_STALL
- && (UDCCS0 & UDCCS0_FST) == 0
- && (UDCCS0 & UDCCS0_SST) == 0) {
- UDCCS0 = UDCCS0_FST|UDCCS0_FTF;
+ && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0
+ && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) {
+ udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF);
DBG(DBG_VERBOSE, "ep0 re-stall\n");
start_watchdog(dev);
}
@@ -1419,7 +1645,7 @@ static void udc_watchdog(unsigned long _dev)
static void handle_ep0 (struct pxa25x_udc *dev)
{
- u32 udccs0 = UDCCS0;
+ u32 udccs0 = udc_ep0_get_UDCCS(dev);
struct pxa25x_ep *ep = &dev->ep [0];
struct pxa25x_request *req;
union {
@@ -1436,7 +1662,7 @@ static void handle_ep0 (struct pxa25x_udc *dev)
/* clear stall status */
if (udccs0 & UDCCS0_SST) {
nuke(ep, -EPIPE);
- UDCCS0 = UDCCS0_SST;
+ udc_ep0_set_UDCCS(dev, UDCCS0_SST);
del_timer(&dev->timer);
ep0_idle(dev);
}
@@ -1451,7 +1677,7 @@ static void handle_ep0 (struct pxa25x_udc *dev)
switch (dev->ep0state) {
case EP0_IDLE:
/* late-breaking status? */
- udccs0 = UDCCS0;
+ udccs0 = udc_ep0_get_UDCCS(dev);
/* start control request? */
if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
@@ -1462,14 +1688,14 @@ static void handle_ep0 (struct pxa25x_udc *dev)
/* read SETUP packet */
for (i = 0; i < 8; i++) {
- if (unlikely(!(UDCCS0 & UDCCS0_RNE))) {
+ if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) {
bad_setup:
DMSG("SETUP %d!\n", i);
goto stall;
}
u.raw [i] = (u8) UDDR0;
}
- if (unlikely((UDCCS0 & UDCCS0_RNE) != 0))
+ if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0))
goto bad_setup;
got_setup:
@@ -1545,7 +1771,7 @@ config_change:
*/
}
DBG(DBG_VERBOSE, "protocol STALL, "
- "%02x err %d\n", UDCCS0, i);
+ "%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
stall:
/* the watchdog timer helps deal with cases
* where udc seems to clear FST wrongly, and
@@ -1592,12 +1818,12 @@ stall:
* - IPR cleared
* - OPR got set, without SA (likely status stage)
*/
- UDCCS0 = udccs0 & (UDCCS0_SA|UDCCS0_OPR);
+ udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
}
break;
case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
if (udccs0 & UDCCS0_OPR) {
- UDCCS0 = UDCCS0_OPR|UDCCS0_FTF;
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
DBG(DBG_VERBOSE, "ep0in premature status\n");
if (req)
done(ep, req, 0);
@@ -1631,14 +1857,14 @@ stall:
* also appears after some config change events.
*/
if (udccs0 & UDCCS0_OPR)
- UDCCS0 = UDCCS0_OPR;
+ udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
ep0_idle(dev);
break;
case EP0_STALL:
- UDCCS0 = UDCCS0_FST;
+ udc_ep0_set_UDCCS(dev, UDCCS0_FST);
break;
}
- USIR0 = USIR0_IR0;
+ udc_set_reg(dev, USIR0, USIR0_IR0);
}
static void handle_ep(struct pxa25x_ep *ep)
@@ -1658,14 +1884,14 @@ static void handle_ep(struct pxa25x_ep *ep)
// TODO check FST handling
- udccs = *ep->reg_udccs;
+ udccs = udc_ep_get_UDCCS(ep);
if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
tmp = UDCCS_BI_TUR;
if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
tmp |= UDCCS_BI_SST;
tmp &= udccs;
if (likely (tmp))
- *ep->reg_udccs = tmp;
+ udc_ep_set_UDCCS(ep, tmp);
if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
completed = write_fifo(ep, req);
@@ -1676,13 +1902,13 @@ static void handle_ep(struct pxa25x_ep *ep)
tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
tmp &= udccs;
if (likely(tmp))
- *ep->reg_udccs = tmp;
+ udc_ep_set_UDCCS(ep, tmp);
/* fifos can hold packets, ready for reading... */
if (likely(req)) {
completed = read_fifo(ep, req);
} else
- pio_irq_disable (ep->bEndpointAddress);
+ pio_irq_disable(ep);
}
ep->pio_irqs++;
} while (completed);
@@ -1703,13 +1929,13 @@ pxa25x_udc_irq(int irq, void *_dev)
dev->stats.irqs++;
do {
- u32 udccr = UDCCR;
+ u32 udccr = udc_get_reg(dev, UDCCR);
handled = 0;
/* SUSpend Interrupt Request */
if (unlikely(udccr & UDCCR_SUSIR)) {
- udc_ack_int_UDCCR(UDCCR_SUSIR);
+ udc_ack_int_UDCCR(dev, UDCCR_SUSIR);
handled = 1;
DBG(DBG_VERBOSE, "USB suspend\n");
@@ -1722,7 +1948,7 @@ pxa25x_udc_irq(int irq, void *_dev)
/* RESume Interrupt Request */
if (unlikely(udccr & UDCCR_RESIR)) {
- udc_ack_int_UDCCR(UDCCR_RESIR);
+ udc_ack_int_UDCCR(dev, UDCCR_RESIR);
handled = 1;
DBG(DBG_VERBOSE, "USB resume\n");
@@ -1734,10 +1960,10 @@ pxa25x_udc_irq(int irq, void *_dev)
/* ReSeT Interrupt Request - USB reset */
if (unlikely(udccr & UDCCR_RSTIR)) {
- udc_ack_int_UDCCR(UDCCR_RSTIR);
+ udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
handled = 1;
- if ((UDCCR & UDCCR_UDA) == 0) {
+ if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) {
DBG(DBG_VERBOSE, "USB reset start\n");
/* reset driver and endpoints,
@@ -1753,8 +1979,10 @@ pxa25x_udc_irq(int irq, void *_dev)
}
} else {
- u32 usir0 = USIR0 & ~UICR0;
- u32 usir1 = USIR1 & ~UICR1;
+ u32 usir0 = udc_get_reg(dev, USIR0) &
+ ~udc_get_reg(dev, UICR0);
+ u32 usir1 = udc_get_reg(dev, USIR1) &
+ ~udc_get_reg(dev, UICR1);
int i;
if (unlikely (!usir0 && !usir1))
@@ -1775,13 +2003,15 @@ pxa25x_udc_irq(int irq, void *_dev)
if (i && (usir0 & tmp)) {
handle_ep(&dev->ep[i]);
- USIR0 |= tmp;
+ udc_set_reg(dev, USIR0,
+ udc_get_reg(dev, USIR0) | tmp);
handled = 1;
}
#ifndef CONFIG_USB_PXA25X_SMALL
if (usir1 & tmp) {
handle_ep(&dev->ep[i+8]);
- USIR1 |= tmp;
+ udc_set_reg(dev, USIR1,
+ udc_get_reg(dev, USIR1) | tmp);
handled = 1;
}
#endif
@@ -1826,8 +2056,8 @@ static struct pxa25x_udc memory = {
USB_EP_CAPS_DIR_ALL),
},
.dev = &memory,
- .reg_udccs = &UDCCS0,
- .reg_uddr = &UDDR0,
+ .regoff_udccs = UDCCS0,
+ .regoff_uddr = UDDR0,
},
/* first group of endpoints */
@@ -1843,8 +2073,8 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 1,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS1,
- .reg_uddr = &UDDR1,
+ .regoff_udccs = UDCCS1,
+ .regoff_uddr = UDDR1,
},
.ep[2] = {
.ep = {
@@ -1858,9 +2088,9 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 2,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS2,
- .reg_ubcr = &UBCR2,
- .reg_uddr = &UDDR2,
+ .regoff_udccs = UDCCS2,
+ .regoff_ubcr = UBCR2,
+ .regoff_uddr = UDDR2,
},
#ifndef CONFIG_USB_PXA25X_SMALL
.ep[3] = {
@@ -1875,8 +2105,8 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 3,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS3,
- .reg_uddr = &UDDR3,
+ .regoff_udccs = UDCCS3,
+ .regoff_uddr = UDDR3,
},
.ep[4] = {
.ep = {
@@ -1890,9 +2120,9 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 4,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS4,
- .reg_ubcr = &UBCR4,
- .reg_uddr = &UDDR4,
+ .regoff_udccs = UDCCS4,
+ .regoff_ubcr = UBCR4,
+ .regoff_uddr = UDDR4,
},
.ep[5] = {
.ep = {
@@ -1905,8 +2135,8 @@ static struct pxa25x_udc memory = {
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 5,
.bmAttributes = USB_ENDPOINT_XFER_INT,
- .reg_udccs = &UDCCS5,
- .reg_uddr = &UDDR5,
+ .regoff_udccs = UDCCS5,
+ .regoff_uddr = UDDR5,
},
/* second group of endpoints */
@@ -1922,8 +2152,8 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 6,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS6,
- .reg_uddr = &UDDR6,
+ .regoff_udccs = UDCCS6,
+ .regoff_uddr = UDDR6,
},
.ep[7] = {
.ep = {
@@ -1937,9 +2167,9 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 7,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS7,
- .reg_ubcr = &UBCR7,
- .reg_uddr = &UDDR7,
+ .regoff_udccs = UDCCS7,
+ .regoff_ubcr = UBCR7,
+ .regoff_uddr = UDDR7,
},
.ep[8] = {
.ep = {
@@ -1953,8 +2183,8 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 8,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS8,
- .reg_uddr = &UDDR8,
+ .regoff_udccs = UDCCS8,
+ .regoff_uddr = UDDR8,
},
.ep[9] = {
.ep = {
@@ -1968,9 +2198,9 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 9,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS9,
- .reg_ubcr = &UBCR9,
- .reg_uddr = &UDDR9,
+ .regoff_udccs = UDCCS9,
+ .regoff_ubcr = UBCR9,
+ .regoff_uddr = UDDR9,
},
.ep[10] = {
.ep = {
@@ -1983,8 +2213,8 @@ static struct pxa25x_udc memory = {
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 10,
.bmAttributes = USB_ENDPOINT_XFER_INT,
- .reg_udccs = &UDCCS10,
- .reg_uddr = &UDDR10,
+ .regoff_udccs = UDCCS10,
+ .regoff_uddr = UDDR10,
},
/* third group of endpoints */
@@ -2000,8 +2230,8 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 11,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS11,
- .reg_uddr = &UDDR11,
+ .regoff_udccs = UDCCS11,
+ .regoff_uddr = UDDR11,
},
.ep[12] = {
.ep = {
@@ -2015,9 +2245,9 @@ static struct pxa25x_udc memory = {
.fifo_size = BULK_FIFO_SIZE,
.bEndpointAddress = 12,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
- .reg_udccs = &UDCCS12,
- .reg_ubcr = &UBCR12,
- .reg_uddr = &UDDR12,
+ .regoff_udccs = UDCCS12,
+ .regoff_ubcr = UBCR12,
+ .regoff_uddr = UDDR12,
},
.ep[13] = {
.ep = {
@@ -2031,8 +2261,8 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 13,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS13,
- .reg_uddr = &UDDR13,
+ .regoff_udccs = UDCCS13,
+ .regoff_uddr = UDDR13,
},
.ep[14] = {
.ep = {
@@ -2046,9 +2276,9 @@ static struct pxa25x_udc memory = {
.fifo_size = ISO_FIFO_SIZE,
.bEndpointAddress = 14,
.bmAttributes = USB_ENDPOINT_XFER_ISOC,
- .reg_udccs = &UDCCS14,
- .reg_ubcr = &UBCR14,
- .reg_uddr = &UDDR14,
+ .regoff_udccs = UDCCS14,
+ .regoff_ubcr = UBCR14,
+ .regoff_uddr = UDDR14,
},
.ep[15] = {
.ep = {
@@ -2061,8 +2291,8 @@ static struct pxa25x_udc memory = {
.fifo_size = INT_FIFO_SIZE,
.bEndpointAddress = USB_DIR_IN | 15,
.bmAttributes = USB_ENDPOINT_XFER_INT,
- .reg_udccs = &UDCCS15,
- .reg_uddr = &UDDR15,
+ .regoff_udccs = UDCCS15,
+ .regoff_uddr = UDDR15,
},
#endif /* !CONFIG_USB_PXA25X_SMALL */
};
@@ -2109,6 +2339,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
struct pxa25x_udc *dev = &memory;
int retval, irq;
u32 chiprev;
+ struct resource *res;
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
@@ -2154,6 +2385,11 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->regs))
+ return PTR_ERR(dev->regs);
+
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index 3fe5931dc21a..4b8b72d7ab37 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -56,9 +56,9 @@ struct pxa25x_ep {
* UDDR = UDC Endpoint Data Register (the fifo)
* DRCM = DMA Request Channel Map
*/
- volatile u32 *reg_udccs;
- volatile u32 *reg_ubcr;
- volatile u32 *reg_uddr;
+ u32 regoff_udccs;
+ u32 regoff_ubcr;
+ u32 regoff_uddr;
};
struct pxa25x_request {
@@ -125,6 +125,7 @@ struct pxa25x_udc {
#ifdef CONFIG_USB_GADGET_DEBUG_FS
struct dentry *debugfs_udc;
#endif
+ void __iomem *regs;
};
#define to_pxa25x(g) (container_of((g), struct pxa25x_udc, gadget))
@@ -197,6 +198,8 @@ dump_udccs0(const char *label)
(udccs0 & UDCCS0_OPR) ? " opr" : "");
}
+static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *);
+
static void __maybe_unused
dump_state(struct pxa25x_udc *dev)
{
@@ -228,7 +231,7 @@ dump_state(struct pxa25x_udc *dev)
for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++) {
if (dev->ep[i].ep.desc == NULL)
continue;
- DMSG ("udccs%d = %02x\n", i, *dev->ep->reg_udccs);
+ DMSG ("udccs%d = %02x\n", i, udc_ep_get_UDCCS(&dev->ep[i]));
}
}
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index b86a6f03592e..e4e70e11d0f6 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
-#ifdef CONFIG_HAS_DMA
- dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
- gadget->dev.dma_parms = parent->dma_parms;
- gadget->dev.dma_mask = parent->dma_mask;
-#endif
-
if (release)
gadget->dev.release = release;
else
@@ -443,6 +437,36 @@ err1:
EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
/**
+ * usb_get_gadget_udc_name - get the name of the first UDC controller
+ * This functions returns the name of the first UDC controller in the system.
+ * Please note that this interface is usefull only for legacy drivers which
+ * assume that there is only one UDC controller in the system and they need to
+ * get its name before initialization. There is no guarantee that the UDC
+ * of the returned name will be still available, when gadget driver registers
+ * itself.
+ *
+ * Returns pointer to string with UDC controller name on success, NULL
+ * otherwise. Caller should kfree() returned string.
+ */
+char *usb_get_gadget_udc_name(void)
+{
+ struct usb_udc *udc;
+ char *name = NULL;
+
+ /* For now we take the first available UDC */
+ mutex_lock(&udc_lock);
+ list_for_each_entry(udc, &udc_list, list) {
+ if (!udc->driver) {
+ name = kstrdup(udc->gadget->name, GFP_KERNEL);
+ break;
+ }
+ }
+ mutex_unlock(&udc_lock);
+ return name;
+}
+EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
+
+/**
* usb_add_gadget_udc - adds a new gadget to the udc class driver list
* @parent: the parent device to this udc. Usually the controller
* driver's device.
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1f117c360ebb..3050b18b2447 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -5,6 +5,7 @@ comment "USB Host Controller Drivers"
config USB_C67X00_HCD
tristate "Cypress C67x00 HCD support"
+ depends on HAS_IOMEM
help
The Cypress C67x00 (EZ-Host/EZ-OTG) chips are dual-role
host/peripheral/OTG USB controllers.
@@ -17,6 +18,7 @@ config USB_C67X00_HCD
config USB_XHCI_HCD
tristate "xHCI HCD (USB 3.0) support"
+ depends on HAS_DMA && HAS_IOMEM
---help---
The eXtensible Host Controller Interface (xHCI) is standard for USB 3.0
"SuperSpeed" host controller hardware.
@@ -53,6 +55,7 @@ config USB_XHCI_MTK
config USB_XHCI_MVEBU
tristate "xHCI support for Marvell Armada 375/38x"
select USB_XHCI_PLATFORM
+ depends on HAS_IOMEM
depends on ARCH_MVEBU || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
@@ -61,7 +64,7 @@ config USB_XHCI_MVEBU
config USB_XHCI_RCAR
tristate "xHCI support for Renesas R-Car SoCs"
select USB_XHCI_PLATFORM
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
---help---
Say 'Y' to enable the support for the xHCI host controller
found in Renesas R-Car ARM SoCs.
@@ -70,6 +73,7 @@ endif # USB_XHCI_HCD
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
+ depends on HAS_DMA && HAS_IOMEM
---help---
The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
"high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
@@ -121,9 +125,6 @@ config USB_EHCI_TT_NEWSCHED
If unsure, say Y.
-config USB_FSL_MPH_DR_OF
- tristate
-
if USB_EHCI_HCD
config USB_EHCI_PCI
@@ -156,7 +157,6 @@ config USB_EHCI_FSL
tristate "Support for Freescale PPC on-chip EHCI USB controller"
depends on FSL_SOC
select USB_EHCI_ROOT_HUB_TT
- select USB_FSL_MPH_DR_OF if OF
---help---
Variation of ARC USB block used in some Freescale chips.
@@ -328,6 +328,7 @@ endif # USB_EHCI_HCD
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
+ depends on HAS_IOMEM
---help---
The OXU210HP is an USB host/OTG/device controller. Enable this
option if your board has this chip. If unsure, say N.
@@ -340,6 +341,7 @@ config USB_OXU210HP_HCD
config USB_ISP116X_HCD
tristate "ISP116X HCD support"
+ depends on HAS_IOMEM
---help---
The ISP1160 and ISP1161 chips are USB host controllers. Enable this
option if your board has this chip. If unsure, say N.
@@ -351,6 +353,7 @@ config USB_ISP116X_HCD
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
+ depends on HAS_IOMEM
---help---
Supports the Philips ISP1362 chip as a host controller
@@ -361,7 +364,7 @@ config USB_ISP1362_HCD
config USB_FOTG210_HCD
tristate "FOTG210 HCD support"
- depends on USB
+ depends on USB && HAS_DMA && HAS_IOMEM
---help---
Faraday FOTG210 is an OTG controller which can be configured as
an USB2.0 host. It is designed to meet USB2.0 EHCI specification
@@ -383,6 +386,7 @@ config USB_MAX3421_HCD
config USB_OHCI_HCD
tristate "OHCI HCD (USB 1.1) support"
+ depends on HAS_DMA && HAS_IOMEM
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing
USB 1.1 host controller hardware. It does more in hardware than Intel's
@@ -668,6 +672,7 @@ config USB_U132_HCD
config USB_SL811_HCD
tristate "SL811HS HCD support"
+ depends on HAS_IOMEM
help
The SL811HS is a single-port USB controller that supports either
host side or peripheral side roles. Enable this option if your
@@ -699,6 +704,7 @@ config USB_SL811_CS
config USB_R8A66597_HCD
tristate "R8A66597 HCD support"
+ depends on HAS_IOMEM
help
The R8A66597 is a USB 2.0 host and peripheral controller.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 65a06b4382bf..a9ddd3c9ec94 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -74,7 +74,8 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
-obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
+obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
+obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 291aaa2baed8..963e2d0e8f92 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -35,6 +35,7 @@ MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
MODULE_LICENSE("GPL");
struct bcma_hcd_device {
+ struct bcma_device *core;
struct platform_device *ehci_dev;
struct platform_device *ohci_dev;
struct gpio_desc *gpio_desc;
@@ -244,7 +245,10 @@ static const struct usb_ehci_pdata ehci_pdata = {
static const struct usb_ohci_pdata ohci_pdata = {
};
-static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, bool ohci, u32 addr)
+static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev,
+ const char *name, u32 addr,
+ const void *data,
+ size_t size)
{
struct platform_device *hci_dev;
struct resource hci_res[2];
@@ -259,8 +263,7 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
hci_res[1].start = dev->irq;
hci_res[1].flags = IORESOURCE_IRQ;
- hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
- "ehci-platform" , 0);
+ hci_dev = platform_device_alloc(name, 0);
if (!hci_dev)
return ERR_PTR(-ENOMEM);
@@ -271,12 +274,8 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
ARRAY_SIZE(hci_res));
if (ret)
goto err_alloc;
- if (ohci)
- ret = platform_device_add_data(hci_dev, &ohci_pdata,
- sizeof(ohci_pdata));
- else
- ret = platform_device_add_data(hci_dev, &ehci_pdata,
- sizeof(ehci_pdata));
+ if (data)
+ ret = platform_device_add_data(hci_dev, data, size);
if (ret)
goto err_alloc;
ret = platform_device_add(hci_dev);
@@ -290,31 +289,16 @@ err_alloc:
return ERR_PTR(ret);
}
-static int bcma_hcd_probe(struct bcma_device *dev)
+static int bcma_hcd_usb20_init(struct bcma_hcd_device *usb_dev)
{
- int err;
+ struct bcma_device *dev = usb_dev->core;
+ struct bcma_chipinfo *chipinfo = &dev->bus->chipinfo;
u32 ohci_addr;
- struct bcma_hcd_device *usb_dev;
- struct bcma_chipinfo *chipinfo;
-
- chipinfo = &dev->bus->chipinfo;
-
- /* TODO: Probably need checks here; is the core connected? */
+ int err;
if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
return -EOPNOTSUPP;
- usb_dev = devm_kzalloc(&dev->dev, sizeof(struct bcma_hcd_device),
- GFP_KERNEL);
- if (!usb_dev)
- return -ENOMEM;
-
- if (dev->dev.of_node)
- usb_dev->gpio_desc = devm_get_gpiod_from_child(&dev->dev, "vcc",
- &dev->dev.of_node->fwnode);
- if (!IS_ERR_OR_NULL(usb_dev->gpio_desc))
- gpiod_direction_output(usb_dev->gpio_desc, 1);
-
switch (dev->id.id) {
case BCMA_CORE_NS_USB20:
bcma_hcd_init_chip_arm(dev);
@@ -333,17 +317,20 @@ static int bcma_hcd_probe(struct bcma_device *dev)
&& chipinfo->rev == 0)
ohci_addr = 0x18009000;
- usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
+ usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, "ohci-platform",
+ ohci_addr, &ohci_pdata,
+ sizeof(ohci_pdata));
if (IS_ERR(usb_dev->ohci_dev))
return PTR_ERR(usb_dev->ohci_dev);
- usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
+ usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, "ehci-platform",
+ dev->addr, &ehci_pdata,
+ sizeof(ehci_pdata));
if (IS_ERR(usb_dev->ehci_dev)) {
err = PTR_ERR(usb_dev->ehci_dev);
goto err_unregister_ohci_dev;
}
- bcma_set_drvdata(dev, usb_dev);
return 0;
err_unregister_ohci_dev:
@@ -351,6 +338,40 @@ err_unregister_ohci_dev:
return err;
}
+static int bcma_hcd_probe(struct bcma_device *core)
+{
+ int err;
+ struct bcma_hcd_device *usb_dev;
+
+ /* TODO: Probably need checks here; is the core connected? */
+
+ usb_dev = devm_kzalloc(&core->dev, sizeof(struct bcma_hcd_device),
+ GFP_KERNEL);
+ if (!usb_dev)
+ return -ENOMEM;
+ usb_dev->core = core;
+
+ if (core->dev.of_node)
+ usb_dev->gpio_desc = devm_get_gpiod_from_child(&core->dev, "vcc",
+ &core->dev.of_node->fwnode);
+ if (!IS_ERR_OR_NULL(usb_dev->gpio_desc))
+ gpiod_direction_output(usb_dev->gpio_desc, 1);
+
+ switch (core->id.id) {
+ case BCMA_CORE_USB20_HOST:
+ case BCMA_CORE_NS_USB20:
+ err = bcma_hcd_usb20_init(usb_dev);
+ if (err)
+ return err;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ bcma_set_drvdata(core, usb_dev);
+ return 0;
+}
+
static void bcma_hcd_remove(struct bcma_device *dev)
{
struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index be0964a801e8..7440722bfbf0 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -185,8 +185,7 @@ static int ehci_atmel_drv_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ehci_atmel_drv_suspend(struct device *dev)
+static int __maybe_unused ehci_atmel_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
@@ -200,7 +199,7 @@ static int ehci_atmel_drv_suspend(struct device *dev)
return 0;
}
-static int ehci_atmel_drv_resume(struct device *dev)
+static int __maybe_unused ehci_atmel_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct atmel_ehci_priv *atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
@@ -208,7 +207,6 @@ static int ehci_atmel_drv_resume(struct device *dev)
atmel_start_clock(atmel_ehci);
return ehci_resume(hcd, false);
}
-#endif
#ifdef CONFIG_OF
static const struct of_device_id atmel_ehci_dt_ids[] = {
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index b7d623f1523c..79d12b2ba3c4 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -11,76 +11,73 @@
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* this file is part of ehci-hcd.c */
#ifdef CONFIG_DYNAMIC_DEBUG
-/* check the values in the HCSPARAMS register
+/*
+ * check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
* see EHCI spec, Table 2-4 for each value
*/
-static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
+static void dbg_hcs_params(struct ehci_hcd *ehci, char *label)
{
u32 params = ehci_readl(ehci, &ehci->caps->hcs_params);
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
"%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
label, params,
- HCS_DEBUG_PORT (params),
- HCS_INDICATOR (params) ? " ind" : "",
- HCS_N_CC (params),
- HCS_N_PCC (params),
- HCS_PORTROUTED (params) ? "" : " ordered",
- HCS_PPC (params) ? "" : " !ppc",
- HCS_N_PORTS (params)
- );
+ HCS_DEBUG_PORT(params),
+ HCS_INDICATOR(params) ? " ind" : "",
+ HCS_N_CC(params),
+ HCS_N_PCC(params),
+ HCS_PORTROUTED(params) ? "" : " ordered",
+ HCS_PPC(params) ? "" : " !ppc",
+ HCS_N_PORTS(params));
/* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */
- if (HCS_PORTROUTED (params)) {
+ if (HCS_PORTROUTED(params)) {
int i;
- char buf [46], tmp [7], byte;
+ char buf[46], tmp[7], byte;
buf[0] = 0;
- for (i = 0; i < HCS_N_PORTS (params); i++) {
- // FIXME MIPS won't readb() ...
- byte = readb (&ehci->caps->portroute[(i>>1)]);
+ for (i = 0; i < HCS_N_PORTS(params); i++) {
+ /* FIXME MIPS won't readb() ... */
+ byte = readb(&ehci->caps->portroute[(i >> 1)]);
sprintf(tmp, "%d ",
- ((i & 0x1) ? ((byte)&0xf) : ((byte>>4)&0xf)));
+ (i & 0x1) ? byte & 0xf : (byte >> 4) & 0xf);
strcat(buf, tmp);
}
- ehci_dbg (ehci, "%s portroute %s\n",
- label, buf);
+ ehci_dbg(ehci, "%s portroute %s\n", label, buf);
}
}
#else
-static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
+static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) {}
#endif
#ifdef CONFIG_DYNAMIC_DEBUG
-/* check the values in the HCCPARAMS register
+/*
+ * check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
* see EHCI Spec, Table 2-5 for each value
- * */
-static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
+ */
+static void dbg_hcc_params(struct ehci_hcd *ehci, char *label)
{
u32 params = ehci_readl(ehci, &ehci->caps->hcc_params);
- if (HCC_ISOC_CACHE (params)) {
- ehci_dbg (ehci,
+ if (HCC_ISOC_CACHE(params)) {
+ ehci_dbg(ehci,
"%s hcc_params %04x caching frame %s%s%s\n",
label, params,
HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
HCC_CANPARK(params) ? " park" : "",
HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
} else {
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
"%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
label,
params,
@@ -97,21 +94,21 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
}
#else
-static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
+static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) {}
#endif
#ifdef CONFIG_DYNAMIC_DEBUG
static void __maybe_unused
-dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
+dbg_qtd(const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
{
ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
hc32_to_cpup(ehci, &qtd->hw_next),
hc32_to_cpup(ehci, &qtd->hw_alt_next),
hc32_to_cpup(ehci, &qtd->hw_token),
- hc32_to_cpup(ehci, &qtd->hw_buf [0]));
- if (qtd->hw_buf [1])
+ hc32_to_cpup(ehci, &qtd->hw_buf[0]));
+ if (qtd->hw_buf[1])
ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
hc32_to_cpup(ehci, &qtd->hw_buf[1]),
hc32_to_cpup(ehci, &qtd->hw_buf[2]),
@@ -120,22 +117,22 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
}
static void __maybe_unused
-dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
+dbg_qh(const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
- ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ ehci_dbg(ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
-dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
+dbg_itd(const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
{
- ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
+ ehci_dbg(ehci, "%s [%d] itd %p, next %08x, urb %p\n",
label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
itd->urb);
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
" trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(ehci, itd->hw_transaction[0]),
hc32_to_cpu(ehci, itd->hw_transaction[1]),
@@ -145,7 +142,7 @@ dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
hc32_to_cpu(ehci, itd->hw_transaction[5]),
hc32_to_cpu(ehci, itd->hw_transaction[6]),
hc32_to_cpu(ehci, itd->hw_transaction[7]));
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
" buf: %08x %08x %08x %08x %08x %08x %08x\n",
hc32_to_cpu(ehci, itd->hw_bufp[0]),
hc32_to_cpu(ehci, itd->hw_bufp[1]),
@@ -154,19 +151,19 @@ dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
hc32_to_cpu(ehci, itd->hw_bufp[4]),
hc32_to_cpu(ehci, itd->hw_bufp[5]),
hc32_to_cpu(ehci, itd->hw_bufp[6]));
- ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n",
+ ehci_dbg(ehci, " index: %d %d %d %d %d %d %d %d\n",
itd->index[0], itd->index[1], itd->index[2],
itd->index[3], itd->index[4], itd->index[5],
itd->index[6], itd->index[7]);
}
static void __maybe_unused
-dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+dbg_sitd(const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{
- ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
+ ehci_dbg(ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
sitd->urb);
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
" addr %08x sched %04x result %08x buf %08x %08x\n",
hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
hc32_to_cpu(ehci, sitd->hw_uframe),
@@ -176,11 +173,11 @@ dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
}
static int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
- return scnprintf (buf, len,
+ return scnprintf(buf, len,
"%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
- label, label [0] ? " " : "", status,
+ label, label[0] ? " " : "", status,
(status & STS_PPCE_MASK) ? " PPCE" : "",
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
@@ -191,79 +188,83 @@ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
- (status & STS_INT) ? " INT" : ""
- );
+ (status & STS_INT) ? " INT" : "");
}
static int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
- return scnprintf (buf, len,
+ return scnprintf(buf, len,
"%s%sintrenable %02x%s%s%s%s%s%s%s",
- label, label [0] ? " " : "", enable,
+ label, label[0] ? " " : "", enable,
(enable & STS_PPCE_MASK) ? " PPCE" : "",
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
- (enable & STS_INT) ? " INT" : ""
- );
+ (enable & STS_INT) ? " INT" : "");
}
-static const char *const fls_strings [] =
- { "1024", "512", "256", "??" };
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
static int
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{
- return scnprintf (buf, len,
+ return scnprintf(buf, len,
"%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
"period=%s%s %s",
- label, label [0] ? " " : "", command,
+ label, label[0] ? " " : "", command,
(command & CMD_HIRD) ? " HIRD" : "",
(command & CMD_PPCEE) ? " PPCEE" : "",
(command & CMD_FSP) ? " FSP" : "",
(command & CMD_ASPE) ? " ASPE" : "",
(command & CMD_PSPE) ? " PSPE" : "",
(command & CMD_PARK) ? " park" : "(park)",
- CMD_PARK_CNT (command),
+ CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
- fls_strings [(command >> 2) & 0x3],
+ fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
- (command & CMD_RUN) ? "RUN" : "HALT"
- );
+ (command & CMD_RUN) ? "RUN" : "HALT");
}
static int
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
- case 0 << 10: sig = "se0"; break;
- case 1 << 10: sig = "k"; break; /* low speed */
- case 2 << 10: sig = "j"; break;
- default: sig = "?"; break;
+ case 0 << 10:
+ sig = "se0";
+ break;
+ case 1 << 10: /* low speed */
+ sig = "k";
+ break;
+ case 2 << 10:
+ sig = "j";
+ break;
+ default:
+ sig = "?";
+ break;
}
- return scnprintf (buf, len,
+ return scnprintf(buf, len,
"%s%sport:%d status %06x %d %s%s%s%s%s%s "
"sig=%s%s%s%s%s%s%s%s%s%s%s",
- label, label [0] ? " " : "", port, status,
- status>>25,/*device address */
- (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
+ label, label[0] ? " " : "", port, status,
+ status >> 25, /*device address */
+ (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ACK ?
" ACK" : "",
- (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
+ (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_NYET ?
" NYET" : "",
- (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
+ (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_STALL ?
" STALL" : "",
- (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
+ (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ERR ?
" ERR" : "",
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
@@ -282,52 +283,68 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
#else
static inline void __maybe_unused
-dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
+dbg_qh(char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{}
static inline int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
-{ return 0; }
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+ return 0;
+}
static inline int __maybe_unused
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
-{ return 0; }
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+ return 0;
+}
static inline int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
-{ return 0; }
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+ return 0;
+}
static inline int __maybe_unused
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
-{ return 0; }
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ return 0;
+}
#endif /* CONFIG_DYNAMIC_DEBUG */
-/* functions have the "wrong" filename when they're output... */
-#define dbg_status(ehci, label, status) { \
- char _buf [80]; \
- dbg_status_buf (_buf, sizeof _buf, label, status); \
- ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_status(struct ehci_hcd *ehci, const char *label, u32 status)
+{
+ char buf[80];
+
+ dbg_status_buf(buf, sizeof(buf), label, status);
+ ehci_dbg(ehci, "%s\n", buf);
}
-#define dbg_cmd(ehci, label, command) { \
- char _buf [80]; \
- dbg_command_buf (_buf, sizeof _buf, label, command); \
- ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_cmd(struct ehci_hcd *ehci, const char *label, u32 command)
+{
+ char buf[80];
+
+ dbg_command_buf(buf, sizeof(buf), label, command);
+ ehci_dbg(ehci, "%s\n", buf);
}
-#define dbg_port(ehci, label, port, status) { \
- char _buf [80]; \
- dbg_port_buf (_buf, sizeof _buf, label, port, status); \
- ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_port(struct ehci_hcd *ehci, const char *label, int port, u32 status)
+{
+ char buf[80];
+
+ dbg_port_buf(buf, sizeof(buf), label, port, status);
+ ehci_dbg(ehci, "%s\n", buf);
}
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILES
+#ifndef CONFIG_DYNAMIC_DEBUG
-static inline void create_debug_files (struct ehci_hcd *bus) { }
-static inline void remove_debug_files (struct ehci_hcd *bus) { }
+static inline void create_debug_files(struct ehci_hcd *bus) { }
+static inline void remove_debug_files(struct ehci_hcd *bus) { }
#else
@@ -348,6 +365,7 @@ static const struct file_operations debug_async_fops = {
.release = debug_close,
.llseek = default_llseek,
};
+
static const struct file_operations debug_bandwidth_fops = {
.owner = THIS_MODULE,
.open = debug_bandwidth_open,
@@ -355,6 +373,7 @@ static const struct file_operations debug_bandwidth_fops = {
.release = debug_close,
.llseek = default_llseek,
};
+
static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE,
.open = debug_periodic_open,
@@ -362,6 +381,7 @@ static const struct file_operations debug_periodic_fops = {
.release = debug_close,
.llseek = default_llseek,
};
+
static const struct file_operations debug_registers_fops = {
.owner = THIS_MODULE,
.open = debug_registers_open,
@@ -381,13 +401,19 @@ struct debug_buffer {
size_t alloc_size;
};
-#define speed_char(info1) ({ char tmp; \
- switch (info1 & (3 << 12)) { \
- case QH_FULL_SPEED: tmp = 'f'; break; \
- case QH_LOW_SPEED: tmp = 'l'; break; \
- case QH_HIGH_SPEED: tmp = 'h'; break; \
- default: tmp = '?'; break; \
- } tmp; })
+static inline char speed_char(u32 info1)
+{
+ switch (info1 & (3 << 12)) {
+ case QH_FULL_SPEED:
+ return 'f';
+ case QH_LOW_SPEED:
+ return 'l';
+ case QH_HIGH_SPEED:
+ return 'h';
+ default:
+ return '?';
+ }
+}
static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
{
@@ -397,18 +423,14 @@ static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
return '*';
if (v & QTD_STS_HALT)
return '-';
- if (!IS_SHORT_READ (v))
+ if (!IS_SHORT_READ(v))
return ' ';
/* tries to advance through hw_alt_next */
return '/';
}
-static void qh_lines (
- struct ehci_hcd *ehci,
- struct ehci_qh *qh,
- char **nextp,
- unsigned *sizep
-)
+static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh,
+ char **nextp, unsigned *sizep)
{
u32 scratch;
u32 hw_curr;
@@ -435,7 +457,7 @@ static void qh_lines (
}
scratch = hc32_to_cpup(ehci, &hw->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)"
" [cur %08x next %08x buf[0] %08x]",
qh, scratch & 0x007f,
@@ -453,46 +475,52 @@ static void qh_lines (
next += temp;
/* hc may be modifying the list as we read it ... */
- list_for_each (entry, &qh->qtd_list) {
- td = list_entry (entry, struct ehci_qtd, qtd_list);
+ list_for_each(entry, &qh->qtd_list) {
+ char *type;
+
+ td = list_entry(entry, struct ehci_qtd, qtd_list);
scratch = hc32_to_cpup(ehci, &td->hw_token);
mark = ' ';
- if (hw_curr == td->qtd_dma)
+ if (hw_curr == td->qtd_dma) {
mark = '*';
- else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
+ } else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) {
mark = '+';
- else if (QTD_LENGTH (scratch)) {
+ } else if (QTD_LENGTH(scratch)) {
if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
}
- temp = snprintf (next, size,
+ switch ((scratch >> 8) & 0x03) {
+ case 0:
+ type = "out";
+ break;
+ case 1:
+ type = "in";
+ break;
+ case 2:
+ type = "setup";
+ break;
+ default:
+ type = "?";
+ break;
+ }
+ temp = scnprintf(next, size,
"\n\t%p%c%s len=%d %08x urb %p"
" [td %08x buf[0] %08x]",
- td, mark, ({ char *tmp;
- switch ((scratch>>8)&0x03) {
- case 0: tmp = "out"; break;
- case 1: tmp = "in"; break;
- case 2: tmp = "setup"; break;
- default: tmp = "?"; break;
- } tmp;}),
+ td, mark, type,
(scratch >> 16) & 0x7fff,
scratch,
td->urb,
(u32) td->qtd_dma,
hc32_to_cpup(ehci, &td->hw_buf[0]));
- if (size < temp)
- temp = size;
size -= temp;
next += temp;
if (temp == size)
goto done;
}
- temp = snprintf (next, size, "\n");
- if (size < temp)
- temp = size;
+ temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
@@ -511,19 +539,20 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
struct ehci_qh *qh;
hcd = bus_to_hcd(buf->bus);
- ehci = hcd_to_ehci (hcd);
+ ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
*next = 0;
- /* dumps a snapshot of the async schedule.
+ /*
+ * dumps a snapshot of the async schedule.
* usually empty except for long-term bulk reads, or head.
* one QH per line, and TDs we know about
*/
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
- qh_lines (ehci, qh, &next, &size);
+ qh_lines(ehci, qh, &next, &size);
if (!list_empty(&ehci->async_unlink) && size > 0) {
temp = scnprintf(next, size, "\nunlink =\n");
size -= temp;
@@ -535,7 +564,7 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
qh_lines(ehci, qh, &next, &size);
}
}
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return strlen(buf->output_buf);
}
@@ -623,6 +652,33 @@ static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
return next - buf->output_buf;
}
+static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci,
+ struct ehci_qh_hw *hw, struct ehci_qh *qh, unsigned size)
+{
+ u32 scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+ struct ehci_qtd *qtd;
+ char *type = "";
+ unsigned temp = 0;
+
+ /* count tds, get ep direction */
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+ temp++;
+ switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8) & 0x03) {
+ case 0:
+ type = "out";
+ continue;
+ case 1:
+ type = "in";
+ continue;
+ }
+ }
+
+ return scnprintf(buf, size, " (%c%d ep%d%s [%d/%d] q%d p%d)",
+ speed_char(scratch), scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type, qh->ps.usecs,
+ qh->ps.c_usecs, temp, 0x7ff & (scratch >> 16));
+}
+
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
@@ -635,31 +691,32 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
unsigned i;
__hc32 tag;
- seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC);
+ seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
seen_count = 0;
hcd = bus_to_hcd(buf->bus);
- ehci = hcd_to_ehci (hcd);
+ ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
- temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size);
+ temp = scnprintf(next, size, "size = %d\n", ehci->periodic_size);
size -= temp;
next += temp;
- /* dump a snapshot of the periodic schedule.
+ /*
+ * dump a snapshot of the periodic schedule.
* iso changes, interrupt usually doesn't.
*/
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < ehci->periodic_size; i++) {
- p = ehci->pshadow [i];
- if (likely (!p.ptr))
+ p = ehci->pshadow[i];
+ if (likely(!p.ptr))
continue;
- tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]);
+ tag = Q_NEXT_TYPE(ehci, ehci->periodic[i]);
- temp = scnprintf (next, size, "%4d: ", i);
+ temp = scnprintf(next, size, "%4d: ", i);
size -= temp;
next += temp;
@@ -669,7 +726,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
hw = p.qh->hw;
- temp = scnprintf (next, size, " qh%d-%04x/%p",
+ temp = scnprintf(next, size, " qh%d-%04x/%p",
p.qh->ps.period,
hc32_to_cpup(ehci,
&hw->hw_info2)
@@ -680,10 +737,10 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
/* don't repeat what follows this qh */
for (temp = 0; temp < seen_count; temp++) {
- if (seen [temp].ptr != p.ptr)
+ if (seen[temp].ptr != p.ptr)
continue;
if (p.qh->qh_next.ptr) {
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
" ...");
size -= temp;
next += temp;
@@ -692,58 +749,32 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
}
/* show more info the first time around */
if (temp == seen_count) {
- u32 scratch = hc32_to_cpup(ehci,
- &hw->hw_info1);
- struct ehci_qtd *qtd;
- char *type = "";
-
- /* count tds, get ep direction */
- temp = 0;
- list_for_each_entry (qtd,
- &p.qh->qtd_list,
- qtd_list) {
- temp++;
- switch (0x03 & (hc32_to_cpu(
- ehci,
- qtd->hw_token) >> 8)) {
- case 0: type = "out"; continue;
- case 1: type = "in"; continue;
- }
- }
-
- temp = scnprintf (next, size,
- " (%c%d ep%d%s "
- "[%d/%d] q%d p%d)",
- speed_char (scratch),
- scratch & 0x007f,
- (scratch >> 8) & 0x000f, type,
- p.qh->ps.usecs,
- p.qh->ps.c_usecs,
- temp,
- 0x7ff & (scratch >> 16));
+ temp = output_buf_tds_dir(next, ehci,
+ hw, p.qh, size);
if (seen_count < DBG_SCHED_LIMIT)
- seen [seen_count++].qh = p.qh;
- } else
+ seen[seen_count++].qh = p.qh;
+ } else {
temp = 0;
+ }
tag = Q_NEXT_TYPE(ehci, hw->hw_next);
p = p.qh->qh_next;
break;
case Q_TYPE_FSTN:
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
" fstn-%8x/%p", p.fstn->hw_prev,
p.fstn);
tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
" itd/%p", p.itd);
tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
p = p.itd->itd_next;
break;
case Q_TYPE_SITD:
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
" sitd%d-%04x/%p",
p.sitd->stream->ps.period,
hc32_to_cpup(ehci, &p.sitd->hw_uframe)
@@ -757,12 +788,12 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
} while (p.ptr);
- temp = scnprintf (next, size, "\n");
+ temp = scnprintf(next, size, "\n");
size -= temp;
next += temp;
}
- spin_unlock_irqrestore (&ehci->lock, flags);
- kfree (seen);
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ kfree(seen);
return buf->alloc_size - size;
}
@@ -789,19 +820,19 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
struct ehci_hcd *ehci;
unsigned long flags;
unsigned temp, size, i;
- char *next, scratch [80];
- static char fmt [] = "%*s\n";
- static char label [] = "";
+ char *next, scratch[80];
+ static char fmt[] = "%*s\n";
+ static char label[] = "";
hcd = bus_to_hcd(buf->bus);
- ehci = hcd_to_ehci (hcd);
+ ehci = hcd_to_ehci(hcd);
next = buf->output_buf;
size = buf->alloc_size;
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
- size = scnprintf (next, size,
+ size = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"SUSPENDED (no register access)\n",
@@ -813,7 +844,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* Capability Registers */
i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
"bus %s, device %s\n"
"%s\n"
"EHCI %x.%02x, rh state %s\n",
@@ -829,16 +860,16 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
if (dev_is_pci(hcd->self.controller)) {
struct pci_dev *pdev;
u32 offset, cap, cap2;
- unsigned count = 256/4;
+ unsigned count = 256 / 4;
pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
offset = HCC_EXT_CAPS(ehci_readl(ehci,
&ehci->caps->hcc_params));
while (offset && count--) {
- pci_read_config_dword (pdev, offset, &cap);
+ pci_read_config_dword(pdev, offset, &cap);
switch (cap & 0xff) {
case 1:
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
"ownership %08x%s%s\n", cap,
(cap & (1 << 24)) ? " linux" : "",
(cap & (1 << 16)) ? " firmware" : "");
@@ -846,8 +877,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
next += temp;
offset += 4;
- pci_read_config_dword (pdev, offset, &cap2);
- temp = scnprintf (next, size,
+ pci_read_config_dword(pdev, offset, &cap2);
+ temp = scnprintf(next, size,
"SMI sts/enable 0x%08x\n", cap2);
size -= temp;
next += temp;
@@ -863,50 +894,50 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
#endif
- // FIXME interpret both types of params
+ /* FIXME interpret both types of params */
i = ehci_readl(ehci, &ehci->caps->hcs_params);
- temp = scnprintf (next, size, "structural params 0x%08x\n", i);
+ temp = scnprintf(next, size, "structural params 0x%08x\n", i);
size -= temp;
next += temp;
i = ehci_readl(ehci, &ehci->caps->hcc_params);
- temp = scnprintf (next, size, "capability params 0x%08x\n", i);
+ temp = scnprintf(next, size, "capability params 0x%08x\n", i);
size -= temp;
next += temp;
/* Operational Registers */
- temp = dbg_status_buf (scratch, sizeof scratch, label,
+ temp = dbg_status_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->status));
- temp = scnprintf (next, size, fmt, temp, scratch);
+ temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
- temp = dbg_command_buf (scratch, sizeof scratch, label,
+ temp = dbg_command_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->command));
- temp = scnprintf (next, size, fmt, temp, scratch);
+ temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
- temp = dbg_intr_buf (scratch, sizeof scratch, label,
+ temp = dbg_intr_buf(scratch, sizeof(scratch), label,
ehci_readl(ehci, &ehci->regs->intr_enable));
- temp = scnprintf (next, size, fmt, temp, scratch);
+ temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
- temp = scnprintf (next, size, "uframe %04x\n",
+ temp = scnprintf(next, size, "uframe %04x\n",
ehci_read_frame_index(ehci));
size -= temp;
next += temp;
- for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
- temp = dbg_port_buf (scratch, sizeof scratch, label, i,
+ for (i = 1; i <= HCS_N_PORTS(ehci->hcs_params); i++) {
+ temp = dbg_port_buf(scratch, sizeof(scratch), label, i,
ehci_readl(ehci,
&ehci->regs->port_status[i - 1]));
- temp = scnprintf (next, size, fmt, temp, scratch);
+ temp = scnprintf(next, size, fmt, temp, scratch);
size -= temp;
next += temp;
if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
" debug control %08x\n",
ehci_readl(ehci,
&ehci->debug->control));
@@ -924,31 +955,31 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
}
#ifdef EHCI_STATS
- temp = scnprintf (next, size,
+ temp = scnprintf(next, size,
"irq normal %ld err %ld iaa %ld (lost %ld)\n",
ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
ehci->stats.lost_iaa);
size -= temp;
next += temp;
- temp = scnprintf (next, size, "complete %ld unlink %ld\n",
+ temp = scnprintf(next, size, "complete %ld unlink %ld\n",
ehci->stats.complete, ehci->stats.unlink);
size -= temp;
next += temp;
#endif
done:
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return buf->alloc_size - size;
}
static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
- ssize_t (*fill_func)(struct debug_buffer *))
+ ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
- buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (buf) {
buf->bus = bus;
@@ -984,7 +1015,7 @@ out:
}
static ssize_t debug_output(struct file *file, char __user *user_buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
struct debug_buffer *buf = file->private_data;
int ret = 0;
@@ -1004,7 +1035,6 @@ static ssize_t debug_output(struct file *file, char __user *user_buf,
out:
return ret;
-
}
static int debug_close(struct inode *inode, struct file *file)
@@ -1037,11 +1067,12 @@ static int debug_bandwidth_open(struct inode *inode, struct file *file)
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
+
buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
if (!buf)
return -ENOMEM;
- buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8) * PAGE_SIZE;
file->private_data = buf;
return 0;
}
@@ -1054,7 +1085,7 @@ static int debug_registers_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM;
}
-static inline void create_debug_files (struct ehci_hcd *ehci)
+static inline void create_debug_files(struct ehci_hcd *ehci)
{
struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
@@ -1084,9 +1115,9 @@ file_error:
debugfs_remove_recursive(ehci->debug_dir);
}
-static inline void remove_debug_files (struct ehci_hcd *ehci)
+static inline void remove_debug_files(struct ehci_hcd *ehci)
{
debugfs_remove_recursive(ehci->debug_dir);
}
-#endif /* STUB_DEBUG_FILES */
+#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3b6eb219de1a..9f5ffb629973 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -35,6 +35,7 @@
#include <linux/usb/otg.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
+#include <linux/of_platform.h>
#include "ehci.h"
#include "ehci-fsl.h"
@@ -241,7 +242,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
* to portsc
*/
if (pdata->check_phy_clk_valid) {
- if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+ if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) &
+ PHY_CLK_VALID)) {
dev_warn(hcd->self.controller,
"USB PHY clock invalid\n");
return -EINVAL;
@@ -273,9 +275,11 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
/* Setup Snooping for all the 4GB space */
/* SNOOP1 starts from 0x0, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
+ iowrite32be(0x0 | SNOOP_SIZE_2GB,
+ non_ehci + FSL_SOC_USB_SNOOP1);
/* SNOOP2 starts from 0x80000000, size 2G */
- out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
+ iowrite32be(0x80000000 | SNOOP_SIZE_2GB,
+ non_ehci + FSL_SOC_USB_SNOOP2);
}
/* Deal with USB erratum A-005275 */
@@ -309,13 +313,13 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
if (pdata->have_sysif_regs) {
#ifdef CONFIG_FSL_SOC_BOOKE
- out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
- out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
+ iowrite32be(0x00000008, non_ehci + FSL_SOC_USB_PRICTRL);
+ iowrite32be(0x00000080, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
#else
- out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
- out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
+ iowrite32be(0x0000000c, non_ehci + FSL_SOC_USB_PRICTRL);
+ iowrite32be(0x00000040, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
#endif
- out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
+ iowrite32be(0x00000001, non_ehci + FSL_SOC_USB_SICTRL);
}
return 0;
@@ -554,7 +558,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
if (!fsl_deep_sleep())
return 0;
- ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
return 0;
}
@@ -577,7 +581,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
usb_root_hub_lost_power(hcd->self.root_hub);
/* Restore USB PHY settings and enable the controller. */
- out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+ iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
ehci_reset(ehci);
ehci_fsl_reinit(ehci);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 14178bbf0694..ae1b6e69eb96 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -306,9 +306,9 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
/*-------------------------------------------------------------------------*/
+static void end_iaa_cycle(struct ehci_hcd *ehci);
static void end_unlink_async(struct ehci_hcd *ehci);
static void unlink_empty_async(struct ehci_hcd *ehci);
-static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
static void ehci_work(struct ehci_hcd *ehci);
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -565,6 +565,9 @@ static int ehci_init(struct usb_hcd *hcd)
/* Accept arbitrarily long scatter-gather lists */
if (!(hcd->driver->flags & HCD_LOCAL_MEM))
hcd->self.sg_tablesize = ~0;
+
+ /* Prepare for unlinking active QHs */
+ ehci->old_current = ~0;
return 0;
}
@@ -675,8 +678,10 @@ int ehci_setup(struct usb_hcd *hcd)
return retval;
retval = ehci_halt(ehci);
- if (retval)
+ if (retval) {
+ ehci_mem_cleanup(ehci);
return retval;
+ }
ehci_reset(ehci);
@@ -756,7 +761,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
COUNT(ehci->stats.iaa);
- end_unlink_async(ehci);
+ end_iaa_cycle(ehci);
}
/* remote wakeup [4.3.1] */
@@ -909,7 +914,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
*/
} else {
qh = (struct ehci_qh *) urb->hcpriv;
- qh->exception = 1;
+ qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
@@ -970,10 +975,13 @@ rescan:
goto done;
}
- qh->exception = 1;
+ qh->unlink_reason |= QH_UNLINK_REQUESTED;
switch (qh->qh_state) {
case QH_STATE_LINKED:
- WARN_ON(!list_empty(&qh->qtd_list));
+ if (list_empty(&qh->qtd_list))
+ qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
+ else
+ WARN_ON(1);
if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
start_unlink_async(ehci, qh);
else
@@ -1040,7 +1048,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* re-linking will call qh_refresh().
*/
usb_settoggle(qh->ps.udev, epnum, is_out, 0);
- qh->exception = 1;
+ qh->unlink_reason |= QH_UNLINK_REQUESTED;
if (eptype == USB_ENDPOINT_XFER_BULK)
start_unlink_async(ehci, qh);
else
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 086a7115d263..ffc90295a95f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -33,6 +33,8 @@
#ifdef CONFIG_PM
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
+
static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
{
return !udev->maxchild && udev->persist_enabled &&
@@ -347,8 +349,10 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
goto done;
ehci->rh_state = EHCI_RH_SUSPENDED;
- end_unlink_async(ehci);
unlink_empty_async_suspended(ehci);
+
+ /* Any IAA cycle that started before the suspend is now invalid */
+ end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
ehci_handle_intr_unlinks(ehci);
end_free_itds(ehci);
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index c23e2858c815..3e226ef6ca62 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -33,6 +33,7 @@
#include <linux/usb/msm_hsusb_hw.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/acpi.h>
#include "ehci.h"
@@ -55,12 +56,16 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
if (retval)
return retval;
+ /* select ULPI phy and clear other status/control bits in PORTSC */
+ writel(PORTSC_PTS_ULPI, USB_PORTSC);
/* bursts of unspecified length. */
writel(0, USB_AHBBURST);
/* Use the AHB transactor, allow posted data writes */
writel(0x8, USB_AHBMODE);
/* Disable streaming mode and select host mode */
writel(0x13, USB_USBMODE);
+ /* Disable ULPI_TX_PKT_EN_CLR_FIX which is valid only for HSIC */
+ writel(readl(USB_GENCONFIG_2) & ~ULPI_TX_PKT_EN_CLR_FIX, USB_GENCONFIG_2);
return 0;
}
@@ -104,9 +109,9 @@ static int ehci_msm_probe(struct platform_device *pdev)
}
/*
- * OTG driver takes care of PHY initialization, clock management,
- * powering up VBUS, mapping of registers address space and power
- * management.
+ * If there is an OTG driver, let it take care of PHY initialization,
+ * clock management, powering up VBUS, mapping of registers address
+ * space and power management.
*/
if (pdev->dev.of_node)
phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
@@ -114,27 +119,35 @@ static int ehci_msm_probe(struct platform_device *pdev)
phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
- dev_err(&pdev->dev, "unable to find transceiver\n");
- ret = -EPROBE_DEFER;
- goto put_hcd;
- }
-
- ret = otg_set_host(phy->otg, &hcd->self);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to register with transceiver\n");
- goto put_hcd;
+ if (PTR_ERR(phy) == -EPROBE_DEFER) {
+ dev_err(&pdev->dev, "unable to find transceiver\n");
+ ret = -EPROBE_DEFER;
+ goto put_hcd;
+ }
+ phy = NULL;
}
hcd->usb_phy = phy;
device_init_wakeup(&pdev->dev, 1);
- /*
- * OTG device parent of HCD takes care of putting
- * hardware into low power mode.
- */
- pm_runtime_no_callbacks(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- /* FIXME: need to call usb_add_hcd() here? */
+ if (phy && phy->otg) {
+ /*
+ * MSM OTG driver takes care of adding the HCD and
+ * placing hardware into low power mode via runtime PM.
+ */
+ ret = otg_set_host(phy->otg, &hcd->self);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "unable to register with transceiver\n");
+ goto put_hcd;
+ }
+
+ pm_runtime_no_callbacks(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ } else {
+ ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (ret)
+ goto put_hcd;
+ }
return 0;
@@ -152,9 +165,10 @@ static int ehci_msm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- otg_set_host(hcd->usb_phy->otg, NULL);
-
- /* FIXME: need to call usb_remove_hcd() here? */
+ if (hcd->usb_phy && hcd->usb_phy->otg)
+ otg_set_host(hcd->usb_phy->otg, NULL);
+ else
+ usb_remove_hcd(hcd);
usb_put_hcd(hcd);
@@ -191,6 +205,12 @@ static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
.resume = ehci_msm_pm_resume,
};
+static const struct acpi_device_id msm_ehci_acpi_ids[] = {
+ { "QCOM8040", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, msm_ehci_acpi_ids);
+
static const struct of_device_id msm_ehci_dt_match[] = {
{ .compatible = "qcom,ehci-host", },
{}
@@ -200,10 +220,12 @@ MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
static struct platform_driver ehci_msm_driver = {
.probe = ehci_msm_probe,
.remove = ehci_msm_remove,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "msm_hsusb_host",
.pm = &ehci_msm_dev_pm_ops,
.of_match_table = msm_ehci_dt_match,
+ .acpi_match_table = ACPI_PTR(msm_ehci_acpi_ids),
},
};
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 2a5d2fd76040..3b3649d88c5f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -377,6 +377,12 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return usb_hcd_pci_probe(pdev, id);
}
+static void ehci_pci_remove(struct pci_dev *pdev)
+{
+ pci_clear_mwi(pdev);
+ usb_hcd_pci_remove(pdev);
+}
+
/* PCI driver selection metadata; PCI hotplugging uses this */
static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
@@ -396,7 +402,7 @@ static struct pci_driver ehci_pci_driver = {
.id_table = pci_ids,
.probe = ehci_pci_probe,
- .remove = usb_hcd_pci_remove,
+ .remove = ehci_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
#ifdef CONFIG_PM
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index bd7082f297bb..1757ebb471b6 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -345,8 +345,7 @@ static int ehci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
@@ -364,8 +363,7 @@ static int ehci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
if (pdata->power_on) {
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index aad0777240d3..eca3710d8fc4 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -394,6 +394,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
goto retry_xacterr;
}
stopped = 1;
+ qh->unlink_reason |= QH_UNLINK_HALTED;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
@@ -408,6 +409,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
&& !(qtd->hw_alt_next
& EHCI_LIST_END(ehci))) {
stopped = 1;
+ qh->unlink_reason |= QH_UNLINK_SHORT_READ;
}
/* stop scanning when we reach qtds the hc is using */
@@ -420,8 +422,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
stopped = 1;
/* cancel everything if we halt, suspend, etc */
- if (ehci->rh_state < EHCI_RH_RUNNING)
+ if (ehci->rh_state < EHCI_RH_RUNNING) {
last_status = -ESHUTDOWN;
+ qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
+ }
/* this qtd is active; skip it unless a previous qtd
* for its urb faulted, or its urb was canceled.
@@ -538,10 +542,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* except maybe high bandwidth ...
*/
if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
- qh->exception = 1;
+ qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
/* Let the caller know if the QH needs to be unlinked. */
- return qh->exception;
+ return qh->unlink_reason;
}
/*-------------------------------------------------------------------------*/
@@ -1003,7 +1007,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
- qh->exception = 0;
+ qh->unlink_reason = 0;
/* qtd completions reported later by interrupt */
enable_async(ehci);
@@ -1279,17 +1283,13 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
static void start_iaa_cycle(struct ehci_hcd *ehci)
{
- /* Do nothing if an IAA cycle is already running */
- if (ehci->iaa_in_progress)
- return;
- ehci->iaa_in_progress = true;
-
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
end_unlink_async(ehci);
- /* Otherwise start a new IAA cycle */
- } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+ /* Otherwise start a new IAA cycle if one isn't already running */
+ } else if (ehci->rh_state == EHCI_RH_RUNNING &&
+ !ehci->iaa_in_progress) {
/* Make sure the unlinks are all visible to the hardware */
wmb();
@@ -1297,17 +1297,13 @@ static void start_iaa_cycle(struct ehci_hcd *ehci)
ehci_writel(ehci, ehci->command | CMD_IAAD,
&ehci->regs->command);
ehci_readl(ehci, &ehci->regs->command);
+ ehci->iaa_in_progress = true;
ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
}
}
-/* the async qh for the qtds being unlinked are now gone from the HC */
-
-static void end_unlink_async(struct ehci_hcd *ehci)
+static void end_iaa_cycle(struct ehci_hcd *ehci)
{
- struct ehci_qh *qh;
- bool early_exit;
-
if (ehci->has_synopsys_hc_bug)
ehci_writel(ehci, (u32) ehci->async->qh_dma,
&ehci->regs->async_next);
@@ -1315,6 +1311,16 @@ static void end_unlink_async(struct ehci_hcd *ehci)
/* The current IAA cycle has ended */
ehci->iaa_in_progress = false;
+ end_unlink_async(ehci);
+}
+
+/* See if the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+ struct ehci_qh *qh;
+ bool early_exit;
+
if (list_empty(&ehci->async_unlink))
return;
qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
@@ -1335,14 +1341,60 @@ static void end_unlink_async(struct ehci_hcd *ehci)
* after the IAA interrupt occurs. In self-defense, always go
* through two IAA cycles for each QH.
*/
- else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
+ else if (qh->qh_state == QH_STATE_UNLINK) {
+ /*
+ * Second IAA cycle has finished. Process only the first
+ * waiting QH (NVIDIA (?) bug).
+ */
+ list_move_tail(&qh->unlink_node, &ehci->async_idle);
+ }
+
+ /*
+ * AMD/ATI (?) bug: The HC can continue to use an active QH long
+ * after the IAA interrupt occurs. To prevent problems, QHs that
+ * may still be active will wait until 2 ms have passed with no
+ * change to the hw_current and hw_token fields (this delay occurs
+ * between the two IAA cycles).
+ *
+ * The EHCI spec (4.8.2) says that active QHs must not be removed
+ * from the async schedule and recommends waiting until the QH
+ * goes inactive. This is ridiculous because the QH will _never_
+ * become inactive if the endpoint NAKs indefinitely.
+ */
+
+ /* Some reasons for unlinking guarantee the QH can't be active */
+ else if (qh->unlink_reason & (QH_UNLINK_HALTED |
+ QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY))
+ goto DelayDone;
+
+ /* The QH can't be active if the queue was and still is empty... */
+ else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
+ list_empty(&qh->qtd_list))
+ goto DelayDone;
+
+ /* ... or if the QH has halted */
+ else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
+ goto DelayDone;
+
+ /* Otherwise we have to wait until the QH stops changing */
+ else {
+ __hc32 qh_current, qh_token;
+
+ qh_current = qh->hw->hw_current;
+ qh_token = qh->hw->hw_token;
+ if (qh_current != ehci->old_current ||
+ qh_token != ehci->old_token) {
+ ehci->old_current = qh_current;
+ ehci->old_token = qh_token;
+ ehci_enable_event(ehci,
+ EHCI_HRTIMER_ACTIVE_UNLINK, true);
+ return;
+ }
+ DelayDone:
qh->qh_state = QH_STATE_UNLINK;
early_exit = true;
}
-
- /* Otherwise process only the first waiting QH (NVIDIA bug?) */
- else
- list_move_tail(&qh->unlink_node, &ehci->async_idle);
+ ehci->old_current = ~0; /* Prepare for next QH */
/* Start a new IAA cycle if any QHs are waiting for it */
if (!list_empty(&ehci->async_unlink))
@@ -1395,6 +1447,7 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
/* If nothing else is being unlinked, unlink the last empty QH */
if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
+ qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
start_unlink_async(ehci, qh_to_unlink);
--count;
}
@@ -1406,8 +1459,10 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
}
}
+#ifdef CONFIG_PM
+
/* The root hub is suspended; unlink all the async QHs */
-static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
@@ -1416,9 +1471,10 @@ static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
WARN_ON(!list_empty(&qh->qtd_list));
single_unlink_async(ehci, qh);
}
- start_iaa_cycle(ehci);
}
+#endif
+
/* makes sure the async qh will become idle */
/* caller must own ehci->lock */
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f9a332775c47..1dfe54f14737 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -34,7 +34,7 @@
* pre-calculated schedule data to make appending to the queue be quick.
*/
-static int ehci_get_frame (struct usb_hcd *hcd);
+static int ehci_get_frame(struct usb_hcd *hcd);
/*
* periodic_next_shadow - return "next" pointer on shadow list
@@ -52,7 +52,7 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
return &periodic->fstn->fstn_next;
case Q_TYPE_ITD:
return &periodic->itd->itd_next;
- // case Q_TYPE_SITD:
+ /* case Q_TYPE_SITD: */
default:
return &periodic->sitd->sitd_next;
}
@@ -73,7 +73,7 @@ shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
}
/* caller must hold ehci->lock */
-static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
+static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
@@ -296,10 +296,9 @@ static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
if (x <= 125) {
budget_line[uf] = x;
break;
- } else {
- budget_line[uf] = 125;
- x -= 125;
}
+ budget_line[uf] = 125;
+ x -= 125;
}
}
}
@@ -330,7 +329,8 @@ static int __maybe_unused same_tt(struct usb_device *dev1,
*/
static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
{
- unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
+ unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
+
if (!smask) {
ehci_err(ehci, "invalid empty smask!\n");
/* uframe 7 can't have bw so this will indicate failure */
@@ -346,7 +346,8 @@ max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
{
int i;
- for (i=0; i<7; i++) {
+
+ for (i = 0; i < 7; i++) {
if (max_tt_usecs[i] < tt_usecs[i]) {
tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
tt_usecs[i] = max_tt_usecs[i];
@@ -375,7 +376,7 @@ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
* limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
* since proper scheduling limits ssplits to less than 16 per uframe.
*/
-static int tt_available (
+static int tt_available(
struct ehci_hcd *ehci,
struct ehci_per_sched *ps,
struct ehci_tt *tt,
@@ -409,11 +410,11 @@ static int tt_available (
* must be empty, so as to not illegally delay
* already scheduled transactions
*/
- if (125 < usecs) {
+ if (usecs > 125) {
int ufs = (usecs / 125);
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
- if (0 < tt_usecs[i])
+ if (tt_usecs[i] > 0)
return 0;
}
@@ -435,7 +436,7 @@ static int tt_available (
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
-static int tt_no_collision (
+static int tt_no_collision(
struct ehci_hcd *ehci,
unsigned period,
struct usb_device *dev,
@@ -455,8 +456,8 @@ static int tt_no_collision (
__hc32 type;
struct ehci_qh_hw *hw;
- here = ehci->pshadow [frame];
- type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
+ here = ehci->pshadow[frame];
+ type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
while (here.ptr) {
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_ITD:
@@ -479,7 +480,7 @@ static int tt_no_collision (
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
- if (same_tt (dev, here.sitd->urb->dev)) {
+ if (same_tt(dev, here.sitd->urb->dev)) {
u16 mask;
mask = hc32_to_cpu(ehci, here.sitd
@@ -492,9 +493,9 @@ static int tt_no_collision (
type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
here = here.sitd->sitd_next;
continue;
- // case Q_TYPE_FSTN:
+ /* case Q_TYPE_FSTN: */
default:
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
"periodic frame %d bogus type %d\n",
frame, type);
}
@@ -588,14 +589,14 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->qh_next = here;
if (here.qh)
qh->hw->hw_next = *hw_p;
- wmb ();
+ wmb();
prev->qh = qh;
- *hw_p = QH_NEXT (ehci, qh->qh_dma);
+ *hw_p = QH_NEXT(ehci, qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
- qh->exception = 0;
+ qh->unlink_reason = 0;
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
@@ -633,7 +634,7 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
period = qh->ps.period ? : 1;
for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
- periodic_unlink (ehci, i, qh);
+ periodic_unlink(ehci, i, qh);
/* update per-qh bandwidth for debugfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
@@ -679,7 +680,7 @@ static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
/* if the qh is waiting for unlink, cancel it now */
cancel_unlink_wait_intr(ehci, qh);
- qh_unlink_periodic (ehci, qh);
+ qh_unlink_periodic(ehci, qh);
/* Make sure the unlinks are visible before starting the timer */
wmb();
@@ -763,7 +764,7 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
-static int check_period (
+static int check_period(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
@@ -785,11 +786,11 @@ static int check_period (
return 0;
}
- // success!
+ /* success! */
return 1;
}
-static int check_intr_schedule (
+static int check_intr_schedule(
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
@@ -925,7 +926,7 @@ done:
return status;
}
-static int intr_submit (
+static int intr_submit(
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
@@ -940,7 +941,7 @@ static int intr_submit (
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
@@ -951,20 +952,21 @@ static int intr_submit (
goto done_not_linked;
/* get qh and force any scheduling errors */
- INIT_LIST_HEAD (&empty);
+ INIT_LIST_HEAD(&empty);
qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
- if ((status = qh_schedule (ehci, qh)) != 0)
+ status = qh_schedule(ehci, qh);
+ if (status)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
- BUG_ON (qh == NULL);
+ BUG_ON(qh == NULL);
/* stuff into the periodic schedule */
if (qh->qh_state == QH_STATE_IDLE) {
@@ -982,9 +984,9 @@ done:
if (unlikely(status))
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
done_not_linked:
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (status)
- qtd_list_free (ehci, urb, qtd_list);
+ qtd_list_free(ehci, urb, qtd_list);
return status;
}
@@ -1022,12 +1024,12 @@ static void scan_intr(struct ehci_hcd *ehci)
/* ehci_iso_stream ops work with both ITD and SITD */
static struct ehci_iso_stream *
-iso_stream_alloc (gfp_t mem_flags)
+iso_stream_alloc(gfp_t mem_flags)
{
struct ehci_iso_stream *stream;
- stream = kzalloc(sizeof *stream, mem_flags);
- if (likely (stream != NULL)) {
+ stream = kzalloc(sizeof(*stream), mem_flags);
+ if (likely(stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = NO_FRAME;
@@ -1037,13 +1039,13 @@ iso_stream_alloc (gfp_t mem_flags)
}
static void
-iso_stream_init (
+iso_stream_init(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct urb *urb
)
{
- static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+ static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
struct usb_device *dev = urb->dev;
u32 buf1;
@@ -1058,11 +1060,7 @@ iso_stream_init (
epnum = usb_pipeendpoint(urb->pipe);
is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
maxp = usb_endpoint_maxp(&urb->ep->desc);
- if (is_input) {
- buf1 = (1 << 11);
- } else {
- buf1 = 0;
- }
+ buf1 = is_input ? 1 << 11 : 0;
/* knows about ITD vs SITD */
if (dev->speed == USB_SPEED_HIGH) {
@@ -1111,7 +1109,7 @@ iso_stream_init (
think_time = dev->tt ? dev->tt->think_time : 0;
stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
dev->speed, is_input, 1, maxp));
- hs_transfers = max (1u, (maxp + 187) / 188);
+ hs_transfers = max(1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
@@ -1151,7 +1149,7 @@ iso_stream_init (
}
static struct ehci_iso_stream *
-iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
+iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
{
unsigned epnum;
struct ehci_iso_stream *stream;
@@ -1164,25 +1162,25 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
else
ep = urb->dev->ep_out[epnum];
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
stream = ep->hcpriv;
- if (unlikely (stream == NULL)) {
+ if (unlikely(stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
- if (likely (stream != NULL)) {
+ if (likely(stream != NULL)) {
ep->hcpriv = stream;
iso_stream_init(ehci, stream, urb);
}
/* if dev->ep [epnum] is a QH, hw is set */
- } else if (unlikely (stream->hw != NULL)) {
- ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
+ } else if (unlikely(stream->hw != NULL)) {
+ ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
urb->dev->devpath, epnum,
usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return stream;
}
@@ -1191,16 +1189,16 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
/* ehci_iso_sched ops can be ITD-only or SITD-only */
static struct ehci_iso_sched *
-iso_sched_alloc (unsigned packets, gfp_t mem_flags)
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
{
struct ehci_iso_sched *iso_sched;
- int size = sizeof *iso_sched;
+ int size = sizeof(*iso_sched);
- size += packets * sizeof (struct ehci_iso_packet);
+ size += packets * sizeof(struct ehci_iso_packet);
iso_sched = kzalloc(size, mem_flags);
- if (likely (iso_sched != NULL)) {
- INIT_LIST_HEAD (&iso_sched->td_list);
- }
+ if (likely(iso_sched != NULL))
+ INIT_LIST_HEAD(&iso_sched->td_list);
+
return iso_sched;
}
@@ -1222,17 +1220,17 @@ itd_sched_init(
* when we fit new itds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
- struct ehci_iso_packet *uframe = &iso_sched->packet [i];
+ struct ehci_iso_packet *uframe = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
- length = urb->iso_frame_desc [i].length;
- buf = dma + urb->iso_frame_desc [i].offset;
+ length = urb->iso_frame_desc[i].length;
+ buf = dma + urb->iso_frame_desc[i].offset;
trans = EHCI_ISOC_ACTIVE;
trans |= buf & 0x0fff;
- if (unlikely (((i + 1) == urb->number_of_packets))
+ if (unlikely(((i + 1) == urb->number_of_packets))
&& !(urb->transfer_flags & URB_NO_INTERRUPT))
trans |= EHCI_ITD_IOC;
trans |= length << 16;
@@ -1241,26 +1239,26 @@ itd_sched_init(
/* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
buf += length;
- if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
+ if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
uframe->cross = 1;
}
}
static void
-iso_sched_free (
+iso_sched_free(
struct ehci_iso_stream *stream,
struct ehci_iso_sched *iso_sched
)
{
if (!iso_sched)
return;
- // caller must hold ehci->lock!
- list_splice (&iso_sched->td_list, &stream->free_list);
- kfree (iso_sched);
+ /* caller must hold ehci->lock! */
+ list_splice(&iso_sched->td_list, &stream->free_list);
+ kfree(iso_sched);
}
static int
-itd_urb_transaction (
+itd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
@@ -1274,8 +1272,8 @@ itd_urb_transaction (
struct ehci_iso_sched *sched;
unsigned long flags;
- sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
- if (unlikely (sched == NULL))
+ sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+ if (unlikely(sched == NULL))
return -ENOMEM;
itd_sched_init(ehci, sched, stream, urb);
@@ -1286,7 +1284,7 @@ itd_urb_transaction (
num_itds = urb->number_of_packets;
/* allocate/init ITDs */
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < num_itds; i++) {
/*
@@ -1298,14 +1296,14 @@ itd_urb_transaction (
struct ehci_itd, itd_list);
if (itd->frame == ehci->now_frame)
goto alloc_itd;
- list_del (&itd->itd_list);
+ list_del(&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
alloc_itd:
- spin_unlock_irqrestore (&ehci->lock, flags);
- itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
&itd_dma);
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (!itd) {
iso_sched_free(stream, sched);
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -1313,12 +1311,12 @@ itd_urb_transaction (
}
}
- memset (itd, 0, sizeof *itd);
+ memset(itd, 0, sizeof(*itd));
itd->itd_dma = itd_dma;
itd->frame = NO_FRAME;
- list_add (&itd->itd_list, &sched->td_list);
+ list_add(&itd->itd_list, &sched->td_list);
}
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
/* temporarily store schedule info in hcpriv */
urb->hcpriv = sched;
@@ -1385,7 +1383,7 @@ static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
}
static inline int
-itd_slot_ok (
+itd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe
@@ -1405,7 +1403,7 @@ itd_slot_ok (
}
static inline int
-sitd_slot_ok (
+sitd_slot_ok(
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
unsigned uframe,
@@ -1492,7 +1490,7 @@ sitd_slot_ok (
*/
static int
-iso_stream_schedule (
+iso_stream_schedule(
struct ehci_hcd *ehci,
struct urb *urb,
struct ehci_iso_stream *stream
@@ -1693,9 +1691,9 @@ itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
/* it's been recently zeroed */
itd->hw_next = EHCI_LIST_END(ehci);
- itd->hw_bufp [0] = stream->buf0;
- itd->hw_bufp [1] = stream->buf1;
- itd->hw_bufp [2] = stream->buf2;
+ itd->hw_bufp[0] = stream->buf0;
+ itd->hw_bufp[1] = stream->buf1;
+ itd->hw_bufp[2] = stream->buf2;
for (i = 0; i < 8; i++)
itd->index[i] = -1;
@@ -1712,13 +1710,13 @@ itd_patch(
u16 uframe
)
{
- struct ehci_iso_packet *uf = &iso_sched->packet [index];
+ struct ehci_iso_packet *uf = &iso_sched->packet[index];
unsigned pg = itd->pg;
- // BUG_ON (pg == 6 && uf->cross);
+ /* BUG_ON(pg == 6 && uf->cross); */
uframe &= 0x07;
- itd->index [uframe] = index;
+ itd->index[uframe] = index;
itd->hw_transaction[uframe] = uf->transaction;
itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
@@ -1726,7 +1724,7 @@ itd_patch(
itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
- if (unlikely (uf->cross)) {
+ if (unlikely(uf->cross)) {
u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
@@ -1736,7 +1734,7 @@ itd_patch(
}
static inline void
-itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
+itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{
union ehci_shadow *prev = &ehci->pshadow[frame];
__hc32 *hw_p = &ehci->periodic[frame];
@@ -1757,7 +1755,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
itd->hw_next = *hw_p;
prev->itd = itd;
itd->frame = frame;
- wmb ();
+ wmb();
*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
}
@@ -1776,7 +1774,7 @@ static void itd_link_urb(
next_uframe = stream->next_uframe & (mod - 1);
- if (unlikely (list_empty(&stream->td_list)))
+ if (unlikely(list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
@@ -1792,16 +1790,16 @@ static void itd_link_urb(
packet < urb->number_of_packets;) {
if (itd == NULL) {
/* ASSERT: we have all necessary itds */
- // BUG_ON (list_empty (&iso_sched->td_list));
+ /* BUG_ON(list_empty(&iso_sched->td_list)); */
/* ASSERT: no itds for this endpoint in this uframe */
- itd = list_entry (iso_sched->td_list.next,
+ itd = list_entry(iso_sched->td_list.next,
struct ehci_itd, itd_list);
- list_move_tail (&itd->itd_list, &stream->td_list);
+ list_move_tail(&itd->itd_list, &stream->td_list);
itd->stream = stream;
itd->urb = urb;
- itd_init (ehci, stream, itd);
+ itd_init(ehci, stream, itd);
}
uframe = next_uframe & 0x07;
@@ -1823,7 +1821,7 @@ static void itd_link_urb(
stream->next_uframe = next_uframe;
/* don't need that schedule data any more */
- iso_sched_free (stream, iso_sched);
+ iso_sched_free(stream, iso_sched);
urb->hcpriv = stream;
++ehci->isoc_count;
@@ -1855,19 +1853,19 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
- if (likely (itd->index[uframe] == -1))
+ if (likely(itd->index[uframe] == -1))
continue;
urb_index = itd->index[uframe];
- desc = &urb->iso_frame_desc [urb_index];
+ desc = &urb->iso_frame_desc[urb_index];
- t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
- itd->hw_transaction [uframe] = 0;
+ t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
+ itd->hw_transaction[uframe] = 0;
/* report transfer status */
- if (unlikely (t & ISO_ERRS)) {
+ if (unlikely(t & ISO_ERRS)) {
urb->error_count++;
if (t & EHCI_ISOC_BUF_ERR)
- desc->status = usb_pipein (urb->pipe)
+ desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & EHCI_ISOC_BABBLE)
@@ -1880,7 +1878,7 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
}
- } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
+ } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
desc->status = 0;
desc->actual_length = EHCI_ITD_LENGTH(t);
urb->actual_length += desc->actual_length;
@@ -1891,12 +1889,13 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
}
/* handle completion now? */
- if (likely ((urb_index + 1) != urb->number_of_packets))
+ if (likely((urb_index + 1) != urb->number_of_packets))
goto done;
- /* ASSERT: it's really the last itd for this urb
- list_for_each_entry (itd, &stream->td_list, itd_list)
- BUG_ON (itd->urb == urb);
+ /*
+ * ASSERT: it's really the last itd for this urb
+ * list_for_each_entry (itd, &stream->td_list, itd_list)
+ * BUG_ON(itd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
@@ -1936,7 +1935,7 @@ done:
/*-------------------------------------------------------------------------*/
-static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
+static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
@@ -1944,37 +1943,37 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
struct ehci_iso_stream *stream;
/* Get iso_stream head */
- stream = iso_stream_find (ehci, urb);
- if (unlikely (stream == NULL)) {
- ehci_dbg (ehci, "can't get iso stream\n");
+ stream = iso_stream_find(ehci, urb);
+ if (unlikely(stream == NULL)) {
+ ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (unlikely(urb->interval != stream->uperiod)) {
- ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
+ ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->uperiod, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
"%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
__func__, urb->dev->devpath, urb,
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length,
urb->number_of_packets, urb->interval,
stream);
#endif
/* allocate ITDs w/o locking anything */
- status = itd_urb_transaction (stream, ehci, urb, mem_flags);
- if (unlikely (status < 0)) {
- ehci_dbg (ehci, "can't init itds\n");
+ status = itd_urb_transaction(stream, ehci, urb, mem_flags);
+ if (unlikely(status < 0)) {
+ ehci_dbg(ehci, "can't init itds\n");
goto done;
}
/* schedule ... need to lock */
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
@@ -1984,7 +1983,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
- itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+ itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
@@ -1992,7 +1991,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
@@ -2022,13 +2021,13 @@ sitd_sched_init(
* when we fit new sitds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
- struct ehci_iso_packet *packet = &iso_sched->packet [i];
+ struct ehci_iso_packet *packet = &iso_sched->packet[i];
unsigned length;
dma_addr_t buf;
u32 trans;
- length = urb->iso_frame_desc [i].length & 0x03ff;
- buf = dma + urb->iso_frame_desc [i].offset;
+ length = urb->iso_frame_desc[i].length & 0x03ff;
+ buf = dma + urb->iso_frame_desc[i].offset;
trans = SITD_STS_ACTIVE;
if (((i + 1) == urb->number_of_packets)
@@ -2054,7 +2053,7 @@ sitd_sched_init(
}
static int
-sitd_urb_transaction (
+sitd_urb_transaction(
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
@@ -2067,14 +2066,14 @@ sitd_urb_transaction (
struct ehci_iso_sched *iso_sched;
unsigned long flags;
- iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
+ iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (iso_sched == NULL)
return -ENOMEM;
sitd_sched_init(ehci, iso_sched, stream, urb);
/* allocate/init sITDs */
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
for (i = 0; i < urb->number_of_packets; i++) {
/* NOTE: for now, we don't try to handle wraparound cases
@@ -2091,14 +2090,14 @@ sitd_urb_transaction (
struct ehci_sitd, sitd_list);
if (sitd->frame == ehci->now_frame)
goto alloc_sitd;
- list_del (&sitd->sitd_list);
+ list_del(&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else {
alloc_sitd:
- spin_unlock_irqrestore (&ehci->lock, flags);
- sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
&sitd_dma);
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (!sitd) {
iso_sched_free(stream, iso_sched);
spin_unlock_irqrestore(&ehci->lock, flags);
@@ -2106,17 +2105,17 @@ sitd_urb_transaction (
}
}
- memset (sitd, 0, sizeof *sitd);
+ memset(sitd, 0, sizeof(*sitd));
sitd->sitd_dma = sitd_dma;
sitd->frame = NO_FRAME;
- list_add (&sitd->sitd_list, &iso_sched->td_list);
+ list_add(&sitd->sitd_list, &iso_sched->td_list);
}
/* temporarily store schedule info in hcpriv */
urb->hcpriv = iso_sched;
urb->error_count = 0;
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return 0;
}
@@ -2131,8 +2130,8 @@ sitd_patch(
unsigned index
)
{
- struct ehci_iso_packet *uf = &iso_sched->packet [index];
- u64 bufp = uf->bufp;
+ struct ehci_iso_packet *uf = &iso_sched->packet[index];
+ u64 bufp;
sitd->hw_next = EHCI_LIST_END(ehci);
sitd->hw_fullspeed_ep = stream->address;
@@ -2152,14 +2151,14 @@ sitd_patch(
}
static inline void
-sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
+sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{
/* note: sitd ordering could matter (CSPLIT then SSPLIT) */
- sitd->sitd_next = ehci->pshadow [frame];
- sitd->hw_next = ehci->periodic [frame];
- ehci->pshadow [frame].sitd = sitd;
+ sitd->sitd_next = ehci->pshadow[frame];
+ sitd->hw_next = ehci->periodic[frame];
+ ehci->pshadow[frame].sitd = sitd;
sitd->frame = frame;
- wmb ();
+ wmb();
ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
}
@@ -2196,13 +2195,13 @@ static void sitd_link_urb(
packet++) {
/* ASSERT: we have all necessary sitds */
- BUG_ON (list_empty (&sched->td_list));
+ BUG_ON(list_empty(&sched->td_list));
/* ASSERT: no itds for this endpoint in this frame */
- sitd = list_entry (sched->td_list.next,
+ sitd = list_entry(sched->td_list.next,
struct ehci_sitd, sitd_list);
- list_move_tail (&sitd->sitd_list, &stream->td_list);
+ list_move_tail(&sitd->sitd_list, &stream->td_list);
sitd->stream = stream;
sitd->urb = urb;
@@ -2215,7 +2214,7 @@ static void sitd_link_urb(
stream->next_uframe = next_uframe & (mod - 1);
/* don't need that schedule data any more */
- iso_sched_free (stream, sched);
+ iso_sched_free(stream, sched);
urb->hcpriv = stream;
++ehci->isoc_count;
@@ -2242,20 +2241,20 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
u32 t;
- int urb_index = -1;
+ int urb_index;
struct ehci_iso_stream *stream = sitd->stream;
struct usb_device *dev;
bool retval = false;
urb_index = sitd->index;
- desc = &urb->iso_frame_desc [urb_index];
+ desc = &urb->iso_frame_desc[urb_index];
t = hc32_to_cpup(ehci, &sitd->hw_results);
/* report transfer status */
if (unlikely(t & SITD_ERRS)) {
urb->error_count++;
if (t & SITD_STS_DBE)
- desc->status = usb_pipein (urb->pipe)
+ desc->status = usb_pipein(urb->pipe)
? -ENOSR /* hc couldn't read */
: -ECOMM; /* hc couldn't write */
else if (t & SITD_STS_BABBLE)
@@ -2275,9 +2274,10 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
if ((urb_index + 1) != urb->number_of_packets)
goto done;
- /* ASSERT: it's really the last sitd for this urb
- list_for_each_entry (sitd, &stream->td_list, sitd_list)
- BUG_ON (sitd->urb == urb);
+ /*
+ * ASSERT: it's really the last sitd for this urb
+ * list_for_each_entry (sitd, &stream->td_list, sitd_list)
+ * BUG_ON(sitd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
@@ -2316,7 +2316,7 @@ done:
}
-static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
+static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
{
int status = -EINVAL;
@@ -2324,35 +2324,35 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
struct ehci_iso_stream *stream;
/* Get iso_stream head */
- stream = iso_stream_find (ehci, urb);
+ stream = iso_stream_find(ehci, urb);
if (stream == NULL) {
- ehci_dbg (ehci, "can't get iso stream\n");
+ ehci_dbg(ehci, "can't get iso stream\n");
return -ENOMEM;
}
if (urb->interval != stream->ps.period) {
- ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
+ ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
stream->ps.period, urb->interval);
goto done;
}
#ifdef EHCI_URB_TRACE
- ehci_dbg (ehci,
+ ehci_dbg(ehci,
"submit %p dev%s ep%d%s-iso len %d\n",
urb, urb->dev->devpath,
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
urb->transfer_buffer_length);
#endif
/* allocate SITDs */
- status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
+ status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
if (status < 0) {
- ehci_dbg (ehci, "can't init sitds\n");
+ ehci_dbg(ehci, "can't init sitds\n");
goto done;
}
/* schedule ... need to lock */
- spin_lock_irqsave (&ehci->lock, flags);
+ spin_lock_irqsave(&ehci->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
status = -ESHUTDOWN;
goto done_not_linked;
@@ -2362,7 +2362,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
goto done_not_linked;
status = iso_stream_schedule(ehci, urb, stream);
if (likely(status == 0)) {
- sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+ sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
} else if (status > 0) {
status = 0;
ehci_urb_done(ehci, urb, 0);
@@ -2370,7 +2370,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
}
done_not_linked:
- spin_unlock_irqrestore (&ehci->lock, flags);
+ spin_unlock_irqrestore(&ehci->lock, flags);
done:
return status;
}
@@ -2379,9 +2379,11 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
static void scan_isoc(struct ehci_hcd *ehci)
{
- unsigned uf, now_frame, frame;
- unsigned fmask = ehci->periodic_size - 1;
- bool modified, live;
+ unsigned uf, now_frame, frame;
+ unsigned fmask = ehci->periodic_size - 1;
+ bool modified, live;
+ union ehci_shadow q, *q_p;
+ __hc32 type, *hw_p;
/*
* When running, scan from last scan point up to "now"
@@ -2399,119 +2401,117 @@ static void scan_isoc(struct ehci_hcd *ehci)
ehci->now_frame = now_frame;
frame = ehci->last_iso_frame;
- for (;;) {
- union ehci_shadow q, *q_p;
- __hc32 type, *hw_p;
restart:
- /* scan each element in frame's queue for completions */
- q_p = &ehci->pshadow [frame];
- hw_p = &ehci->periodic [frame];
- q.ptr = q_p->ptr;
- type = Q_NEXT_TYPE(ehci, *hw_p);
- modified = false;
-
- while (q.ptr != NULL) {
- switch (hc32_to_cpu(ehci, type)) {
- case Q_TYPE_ITD:
- /* If this ITD is still active, leave it for
- * later processing ... check the next entry.
- * No need to check for activity unless the
- * frame is current.
- */
- if (frame == now_frame && live) {
- rmb();
- for (uf = 0; uf < 8; uf++) {
- if (q.itd->hw_transaction[uf] &
- ITD_ACTIVE(ehci))
- break;
- }
- if (uf < 8) {
- q_p = &q.itd->itd_next;
- hw_p = &q.itd->hw_next;
- type = Q_NEXT_TYPE(ehci,
- q.itd->hw_next);
- q = *q_p;
+ /* Scan each element in frame's queue for completions */
+ q_p = &ehci->pshadow[frame];
+ hw_p = &ehci->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(ehci, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(ehci, type)) {
+ case Q_TYPE_ITD:
+ /*
+ * If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(ehci))
break;
- }
}
-
- /* Take finished ITDs out of the schedule
- * and process them: recycle, maybe report
- * URB completion. HC won't cache the
- * pointer for much longer, if at all.
- */
- *q_p = q.itd->itd_next;
- if (!ehci->use_dummy_qh ||
- q.itd->hw_next != EHCI_LIST_END(ehci))
- *hw_p = q.itd->hw_next;
- else
- *hw_p = cpu_to_hc32(ehci,
- ehci->dummy->qh_dma);
- type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
- wmb();
- modified = itd_complete (ehci, q.itd);
- q = *q_p;
- break;
- case Q_TYPE_SITD:
- /* If this SITD is still active, leave it for
- * later processing ... check the next entry.
- * No need to check for activity unless the
- * frame is current.
- */
- if (((frame == now_frame) ||
- (((frame + 1) & fmask) == now_frame))
- && live
- && (q.sitd->hw_results &
- SITD_ACTIVE(ehci))) {
-
- q_p = &q.sitd->sitd_next;
- hw_p = &q.sitd->hw_next;
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE(ehci,
- q.sitd->hw_next);
+ q.itd->hw_next);
q = *q_p;
break;
}
+ }
+
+ /*
+ * Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ if (!ehci->use_dummy_qh ||
+ q.itd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.itd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+ type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
+ wmb();
+ modified = itd_complete(ehci, q.itd);
+ q = *q_p;
+ break;
+ case Q_TYPE_SITD:
+ /*
+ * If this SITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (((frame == now_frame) ||
+ (((frame + 1) & fmask) == now_frame))
+ && live
+ && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
- /* Take finished SITDs out of the schedule
- * and process them: recycle, maybe report
- * URB completion.
- */
- *q_p = q.sitd->sitd_next;
- if (!ehci->use_dummy_qh ||
- q.sitd->hw_next != EHCI_LIST_END(ehci))
- *hw_p = q.sitd->hw_next;
- else
- *hw_p = cpu_to_hc32(ehci,
- ehci->dummy->qh_dma);
+ q_p = &q.sitd->sitd_next;
+ hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
- wmb();
- modified = sitd_complete (ehci, q.sitd);
q = *q_p;
break;
- default:
- ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
- type, frame, q.ptr);
- // BUG ();
- /* FALL THROUGH */
- case Q_TYPE_QH:
- case Q_TYPE_FSTN:
- /* End of the iTDs and siTDs */
- q.ptr = NULL;
- break;
}
- /* assume completion callbacks modify the queue */
- if (unlikely(modified && ehci->isoc_count > 0))
- goto restart;
- }
-
- /* Stop when we have reached the current frame */
- if (frame == now_frame)
+ /*
+ * Take finished SITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion.
+ */
+ *q_p = q.sitd->sitd_next;
+ if (!ehci->use_dummy_qh ||
+ q.sitd->hw_next != EHCI_LIST_END(ehci))
+ *hw_p = q.sitd->hw_next;
+ else
+ *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+ type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
+ wmb();
+ modified = sitd_complete(ehci, q.sitd);
+ q = *q_p;
+ break;
+ default:
+ ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ /* BUG(); */
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
break;
+ }
- /* The last frame may still have active siTDs */
- ehci->last_iso_frame = frame;
- frame = (frame + 1) & fmask;
+ /* Assume completion callbacks modify the queue */
+ if (unlikely(modified && ehci->isoc_count > 0))
+ goto restart;
}
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ return;
+
+ /* The last frame may still have active siTDs */
+ ehci->last_iso_frame = frame;
+ frame = (frame + 1) & fmask;
+
+ goto restart;
}
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index b7c5cfa37a83..a94ed677d937 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -287,8 +287,7 @@ static int st_ehci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
@@ -308,8 +307,7 @@ static int st_ehci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
int err;
pinctrl_pm_select_default_state(dev);
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 424ac5d83714..69f50e6533a6 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -72,6 +72,7 @@ static unsigned event_delays_ns[] = {
1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
+ 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ACTIVE_UNLINK */
5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */
6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
@@ -237,6 +238,7 @@ static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
ehci->intr_unlink_wait_cycle))
break;
list_del_init(&qh->unlink_node);
+ qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
start_unlink_intr(ehci, qh);
}
@@ -360,7 +362,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
}
ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
- end_unlink_async(ehci);
+ end_iaa_cycle(ehci);
}
@@ -394,6 +396,7 @@ static void (*event_handlers[])(struct ehci_hcd *) = {
ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
+ end_unlink_async, /* EHCI_HRTIMER_ACTIVE_UNLINK */
ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index ec61aedb0067..3f3b74aeca97 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -110,6 +110,7 @@ enum ehci_hrtimer_event {
EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ EHCI_HRTIMER_ACTIVE_UNLINK, /* Wait while unlinking an active QH */
EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
@@ -156,6 +157,8 @@ struct ehci_hcd { /* one per controller */
struct list_head async_idle;
unsigned async_unlink_cycle;
unsigned async_count; /* async activity count */
+ __hc32 old_current; /* Test for QH becoming */
+ __hc32 old_token; /* inactive during unlink */
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
@@ -185,7 +188,7 @@ struct ehci_hcd { /* one per controller */
struct ehci_sitd *last_sitd_to_free;
/* per root hub port */
- unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
+ unsigned long reset_done[EHCI_MAX_ROOT_PORTS];
/* bit vectors (one bit per port) */
unsigned long bus_suspended; /* which ports were
@@ -244,9 +247,9 @@ struct ehci_hcd { /* one per controller */
/* irq statistics */
#ifdef EHCI_STATS
struct ehci_stats stats;
-# define COUNT(x) do { (x)++; } while (0)
+# define COUNT(x) ((x)++)
#else
-# define COUNT(x) do {} while (0)
+# define COUNT(x)
#endif
/* debug files */
@@ -268,13 +271,13 @@ struct ehci_hcd { /* one per controller */
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
-static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd)
+static inline struct ehci_hcd *hcd_to_ehci(struct usb_hcd *hcd)
{
return (struct ehci_hcd *) (hcd->hcd_priv);
}
-static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
+static inline struct usb_hcd *ehci_to_hcd(struct ehci_hcd *ehci)
{
- return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
+ return container_of((void *) ehci, struct usb_hcd, hcd_priv);
}
/*-------------------------------------------------------------------------*/
@@ -316,25 +319,25 @@ struct ehci_qtd {
#define HALT_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_HALT)
#define STATUS_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_STS)
- __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
- __hc32 hw_buf_hi [5]; /* Appendix B */
+ __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi[5]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t qtd_dma; /* qtd address */
struct list_head qtd_list; /* sw qtd list */
struct urb *urb; /* qtd's urb */
size_t length; /* length of buffer */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
/* mask NakCnt+T in qh->hw_alt_next */
-#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f)
+#define QTD_MASK(ehci) cpu_to_hc32(ehci, ~0x1f)
-#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
/*-------------------------------------------------------------------------*/
/* type tag from {qh,itd,sitd,fstn}->hw_next */
-#define Q_NEXT_TYPE(ehci,dma) ((dma) & cpu_to_hc32(ehci, 3 << 1))
+#define Q_NEXT_TYPE(ehci, dma) ((dma) & cpu_to_hc32(ehci, 3 << 1))
/*
* Now the following defines are not converted using the
@@ -350,7 +353,8 @@ struct ehci_qtd {
#define Q_TYPE_FSTN (3 << 1)
/* next async queue entry, or pointer to interrupt/periodic QH */
-#define QH_NEXT(ehci,dma) (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+#define QH_NEXT(ehci, dma) \
+ (cpu_to_hc32(ehci, (((u32) dma) & ~0x01f) | Q_TYPE_QH))
/* for periodic/async schedules and qtd lists, mark end of list */
#define EHCI_LIST_END(ehci) cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
@@ -405,9 +409,9 @@ struct ehci_qh_hw {
__hc32 hw_qtd_next;
__hc32 hw_alt_next;
__hc32 hw_token;
- __hc32 hw_buf [5];
- __hc32 hw_buf_hi [5];
-} __attribute__ ((aligned(32)));
+ __hc32 hw_buf[5];
+ __hc32 hw_buf_hi[5];
+} __aligned(32);
struct ehci_qh {
struct ehci_qh_hw *hw; /* Must come first */
@@ -432,13 +436,19 @@ struct ehci_qh {
u8 xacterrs; /* XactErr retry counter */
#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+ u8 unlink_reason;
+#define QH_UNLINK_HALTED 0x01 /* Halt flag is set */
+#define QH_UNLINK_SHORT_READ 0x02 /* Recover from a short read */
+#define QH_UNLINK_DUMMY_OVERLAY 0x04 /* QH overlayed the dummy TD */
+#define QH_UNLINK_SHUTDOWN 0x08 /* The HC isn't running */
+#define QH_UNLINK_QUEUE_EMPTY 0x10 /* Reached end of the queue */
+#define QH_UNLINK_REQUESTED 0x20 /* Disable, reset, or dequeue */
+
u8 gap_uf; /* uframes split/csplit gap */
unsigned is_out:1; /* bulk or intr OUT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
unsigned dequeue_during_giveback:1;
- unsigned exception:1; /* got a fault, or an unlink
- was requested */
unsigned should_be_inactive:1;
};
@@ -462,7 +472,7 @@ struct ehci_iso_sched {
struct list_head td_list;
unsigned span;
unsigned first_packet;
- struct ehci_iso_packet packet [0];
+ struct ehci_iso_packet packet[0];
};
/*
@@ -510,7 +520,7 @@ struct ehci_iso_stream {
struct ehci_itd {
/* first part defined by EHCI spec */
__hc32 hw_next; /* see EHCI 3.3.1 */
- __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
+ __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */
#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */
@@ -520,8 +530,8 @@ struct ehci_itd {
#define ITD_ACTIVE(ehci) cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
- __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
- __hc32 hw_bufp_hi [7]; /* Appendix B */
+ __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi[7]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t itd_dma; /* for this itd */
@@ -535,7 +545,7 @@ struct ehci_itd {
unsigned frame; /* where scheduled */
unsigned pg;
unsigned index[8]; /* in urb->iso_frame_desc */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
/*-------------------------------------------------------------------------*/
@@ -554,7 +564,7 @@ struct ehci_sitd {
__hc32 hw_results; /* EHCI table 3-11 */
#define SITD_IOC (1 << 31) /* interrupt on completion */
#define SITD_PAGE (1 << 30) /* buffer 0/1 */
-#define SITD_LENGTH(x) (0x3ff & ((x)>>16))
+#define SITD_LENGTH(x) (((x) >> 16) & 0x3ff)
#define SITD_STS_ACTIVE (1 << 7) /* HC may execute this */
#define SITD_STS_ERR (1 << 6) /* error from TT */
#define SITD_STS_DBE (1 << 5) /* data buffer error (in HC) */
@@ -565,9 +575,9 @@ struct ehci_sitd {
#define SITD_ACTIVE(ehci) cpu_to_hc32(ehci, SITD_STS_ACTIVE)
- __hc32 hw_buf [2]; /* EHCI table 3-12 */
+ __hc32 hw_buf[2]; /* EHCI table 3-12 */
__hc32 hw_backpointer; /* EHCI table 3-13 */
- __hc32 hw_buf_hi [2]; /* Appendix B */
+ __hc32 hw_buf_hi[2]; /* Appendix B */
/* the rest is HCD-private */
dma_addr_t sitd_dma;
@@ -578,7 +588,7 @@ struct ehci_sitd {
struct list_head sitd_list; /* list of stream's sitds */
unsigned frame;
unsigned index;
-} __attribute__ ((aligned (32)));
+} __aligned(32);
/*-------------------------------------------------------------------------*/
@@ -598,7 +608,7 @@ struct ehci_fstn {
/* the rest is HCD-private */
dma_addr_t fstn_dma;
union ehci_shadow fstn_next; /* ptr to periodic q entry */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
/*-------------------------------------------------------------------------*/
@@ -634,10 +644,10 @@ struct ehci_tt {
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \
- ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
+ ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup)
#define ehci_prepare_ports_for_controller_resume(ehci) \
- ehci_adjust_port_wakeup_flags(ehci, false, false);
+ ehci_adjust_port_wakeup_flags(ehci, false, false)
/*-------------------------------------------------------------------------*/
@@ -731,7 +741,7 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#endif
static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
- __u32 __iomem * regs)
+ __u32 __iomem *regs)
{
#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
return ehci_big_endian_mmio(ehci) ?
@@ -806,7 +816,7 @@ static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
#define ehci_big_endian_desc(e) ((e)->big_endian_desc)
/* cpu to ehci */
-static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
{
return ehci_big_endian_desc(ehci)
? (__force __hc32)cpu_to_be32(x)
@@ -814,14 +824,14 @@ static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
}
/* ehci to cpu */
-static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
{
return ehci_big_endian_desc(ehci)
? be32_to_cpu((__force __be32)x)
: le32_to_cpu((__force __le32)x);
}
-static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
{
return ehci_big_endian_desc(ehci)
? be32_to_cpup((__force __be32 *)x)
@@ -831,18 +841,18 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
#else
/* cpu to ehci */
-static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
{
return cpu_to_le32(x);
}
/* ehci to cpu */
-static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
{
return le32_to_cpu(x);
}
-static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
{
return le32_to_cpup(x);
}
@@ -852,18 +862,13 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
/*-------------------------------------------------------------------------*/
#define ehci_dbg(ehci, fmt, args...) \
- dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+ dev_dbg(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
#define ehci_err(ehci, fmt, args...) \
- dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+ dev_err(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
#define ehci_info(ehci, fmt, args...) \
- dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+ dev_info(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
#define ehci_warn(ehci, fmt, args...) \
- dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
-
-
-#ifndef CONFIG_DYNAMIC_DEBUG
-#define STUB_DEBUG_FILES
-#endif
+ dev_warn(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
/*-------------------------------------------------------------------------*/
@@ -883,12 +888,10 @@ extern int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec);
extern int ehci_reset(struct ehci_hcd *ehci);
-#ifdef CONFIG_PM
extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup);
extern int ehci_resume(struct usb_hcd *hcd, bool force_reset);
extern void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
bool suspending, bool do_wakeup);
-#endif /* CONFIG_PM */
extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 2341af4f3490..360a5e95abca 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -2267,7 +2267,7 @@ static unsigned qh_completions(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh)
{
struct fotg210_qtd *last, *end = qh->dummy;
- struct list_head *entry, *tmp;
+ struct fotg210_qtd *qtd, *tmp;
int last_status;
int stopped;
unsigned count = 0;
@@ -2301,12 +2301,10 @@ rescan:
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
- list_for_each_safe(entry, tmp, &qh->qtd_list) {
- struct fotg210_qtd *qtd;
+ list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
struct urb *urb;
u32 token = 0;
- qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
urb = qtd->urb;
/* clean up any state from previous QTD ...*/
@@ -2544,14 +2542,11 @@ retry_xacterr:
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
- struct list_head *qtd_list)
+ struct list_head *head)
{
- struct list_head *entry, *temp;
-
- list_for_each_safe(entry, temp, qtd_list) {
- struct fotg210_qtd *qtd;
+ struct fotg210_qtd *qtd, *temp;
- qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ list_for_each_entry_safe(qtd, temp, head, qtd_list) {
list_del(&qtd->qtd_list);
fotg210_qtd_free(fotg210, qtd);
}
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 0c382652a399..1044b0f9d656 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
struct fsl_usb2_dev_data {
char *dr_mode; /* controller mode */
@@ -96,7 +97,11 @@ static struct platform_device *fsl_usb2_device_register(
pdev->dev.parent = &ofdev->dev;
pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
- *pdev->dev.dma_mask = *ofdev->dev.dma_mask;
+
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
+ else
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (retval)
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index bd98706d1ce9..c369c29e496d 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -797,19 +797,16 @@ max3421_check_unlink(struct usb_hcd *hcd)
{
struct spi_device *spi = to_spi_device(hcd->self.controller);
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
- struct list_head *pos, *upos, *next_upos;
struct max3421_ep *max3421_ep;
struct usb_host_endpoint *ep;
- struct urb *urb;
+ struct urb *urb, *next;
unsigned long flags;
int retval = 0;
spin_lock_irqsave(&max3421_hcd->lock, flags);
- list_for_each(pos, &max3421_hcd->ep_list) {
- max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
ep = max3421_ep->ep;
- list_for_each_safe(upos, next_upos, &ep->urb_list) {
- urb = container_of(upos, struct urb, urb_list);
+ list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
if (urb->unlinked) {
retval = 1;
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
@@ -1184,22 +1181,19 @@ dump_eps(struct usb_hcd *hcd)
struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
struct max3421_ep *max3421_ep;
struct usb_host_endpoint *ep;
- struct list_head *pos, *upos;
char ubuf[512], *dp, *end;
unsigned long flags;
struct urb *urb;
int epnum, ret;
spin_lock_irqsave(&max3421_hcd->lock, flags);
- list_for_each(pos, &max3421_hcd->ep_list) {
- max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+ list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
ep = max3421_ep->ep;
dp = ubuf;
end = dp + sizeof(ubuf);
*dp = '\0';
- list_for_each(upos, &ep->urb_list) {
- urb = container_of(upos, struct urb, urb_list);
+ list_for_each_entry(urb, &ep->urb_list, urb_list) {
ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
usb_pipetype(urb->pipe),
usb_urb_dir_in(urb) ? "IN" : "OUT",
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 8c6e15bd6ff0..d177372bb357 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -24,8 +24,6 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
-#include <asm/gpio.h>
-
#include "ohci.h"
#define valid_port(index) ((index) >= 0 && (index) < AT91_MAX_USBH_PORTS)
@@ -583,9 +581,7 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
-static int
+static int __maybe_unused
ohci_hcd_at91_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -630,7 +626,8 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
return ret;
}
-static int ohci_hcd_at91_drv_resume(struct device *dev)
+static int __maybe_unused
+ohci_hcd_at91_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_at91_priv *ohci_at91 = hcd_to_ohci_at91_priv(hcd);
@@ -643,7 +640,6 @@ static int ohci_hcd_at91_drv_resume(struct device *dev)
ohci_resume(hcd, false);
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(ohci_hcd_at91_pm_ops, ohci_hcd_at91_drv_suspend,
ohci_hcd_at91_drv_resume);
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index cfa94275c52c..b7d4756232ae 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -22,7 +22,6 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -32,25 +31,9 @@
#include "ohci.h"
-
#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-
-#include <mach/platform.h>
-#include <mach/irqs.h>
#define USB_CONFIG_BASE 0x31020000
-#define PWRMAN_BASE 0x40004000
-
-#define USB_CTRL IO_ADDRESS(PWRMAN_BASE + 0x64)
-
-/* USB_CTRL bit defines */
-#define USB_SLAVE_HCLK_EN (1 << 24)
-#define USB_DEV_NEED_CLK_EN (1 << 22)
-#define USB_HOST_NEED_CLK_EN (1 << 21)
-#define PAD_CONTROL_LAST_DRIVEN (1 << 19)
-
#define USB_OTG_STAT_CONTROL IO_ADDRESS(USB_CONFIG_BASE + 0x110)
/* USB_OTG_STAT_CONTROL bit defines */
@@ -75,9 +58,7 @@ static struct i2c_client *isp1301_i2c_client;
extern int usb_disabled(void);
-static struct clk *usb_pll_clk;
-static struct clk *usb_dev_clk;
-static struct clk *usb_otg_clk;
+static struct clk *usb_host_clk;
static void isp1301_configure_lpc32xx(void)
{
@@ -117,9 +98,6 @@ static void isp1301_configure_lpc32xx(void)
i2c_smbus_write_byte_data(isp1301_i2c_client,
ISP1301_I2C_INTERRUPT_RISING | ISP1301_I2C_REG_CLEAR_ADDR, ~0);
- /* Enable usb_need_clk clock after transceiver is initialized */
- __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
-
printk(KERN_INFO "ISP1301 Vendor ID : 0x%04x\n",
i2c_smbus_read_word_data(isp1301_i2c_client, 0x00));
printk(KERN_INFO "ISP1301 Product ID : 0x%04x\n",
@@ -192,59 +170,20 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
goto fail_disable;
}
- /* Enable AHB slave USB clock, needed for further USB clock control */
- __raw_writel(USB_SLAVE_HCLK_EN | PAD_CONTROL_LAST_DRIVEN, USB_CTRL);
-
- /* Enable USB PLL */
- usb_pll_clk = devm_clk_get(&pdev->dev, "ck_pll5");
- if (IS_ERR(usb_pll_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB PLL\n");
- ret = PTR_ERR(usb_pll_clk);
+ /* Enable USB host clock */
+ usb_host_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usb_host_clk)) {
+ dev_err(&pdev->dev, "failed to acquire USB OHCI clock\n");
+ ret = PTR_ERR(usb_host_clk);
goto fail_disable;
}
- ret = clk_prepare_enable(usb_pll_clk);
+ ret = clk_prepare_enable(usb_host_clk);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB PLL\n");
+ dev_err(&pdev->dev, "failed to start USB OHCI clock\n");
goto fail_disable;
}
- ret = clk_set_rate(usb_pll_clk, 48000);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to set USB clock rate\n");
- goto fail_rate;
- }
-
- /* Enable USB device clock */
- usb_dev_clk = devm_clk_get(&pdev->dev, "ck_usbd");
- if (IS_ERR(usb_dev_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
- ret = PTR_ERR(usb_dev_clk);
- goto fail_rate;
- }
-
- ret = clk_prepare_enable(usb_dev_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto fail_rate;
- }
-
- /* Enable USB otg clocks */
- usb_otg_clk = devm_clk_get(&pdev->dev, "ck_usb_otg");
- if (IS_ERR(usb_otg_clk)) {
- dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
- ret = PTR_ERR(usb_otg_clk);
- goto fail_otg;
- }
-
- __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
-
- ret = clk_prepare_enable(usb_otg_clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
- goto fail_otg;
- }
-
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
@@ -283,11 +222,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
fail_resource:
usb_put_hcd(hcd);
fail_hcd:
- clk_disable_unprepare(usb_otg_clk);
-fail_otg:
- clk_disable_unprepare(usb_dev_clk);
-fail_rate:
- clk_disable_unprepare(usb_pll_clk);
+ clk_disable_unprepare(usb_host_clk);
fail_disable:
isp1301_i2c_client = NULL;
return ret;
@@ -300,9 +235,7 @@ static int ohci_hcd_nxp_remove(struct platform_device *pdev)
usb_remove_hcd(hcd);
ohci_nxp_stop_hc();
usb_put_hcd(hcd);
- clk_disable_unprepare(usb_otg_clk);
- clk_disable_unprepare(usb_dev_clk);
- clk_disable_unprepare(usb_pll_clk);
+ clk_disable_unprepare(usb_host_clk);
isp1301_i2c_client = NULL;
return 0;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index c2669f185f65..ae1c988da146 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -310,8 +310,7 @@ static int ohci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev->platform_data;
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
@@ -329,8 +328,7 @@ static int ohci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
if (pdata->power_on) {
int err = pdata->power_on(pdev);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index e8c006e7a960..a667cf2d5788 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -435,7 +435,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
pr_err("no resource of IORESOURCE_IRQ");
- return -ENXIO;
+ return irq;
}
usb_clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index df9028e0d9b4..acf2eb2a5676 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -270,8 +270,7 @@ static int st_ohci_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev->platform_data;
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
bool do_wakeup = device_may_wakeup(dev);
int ret;
@@ -289,8 +288,7 @@ static int st_ohci_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
- struct platform_device *pdev =
- container_of(dev, struct platform_device, dev);
+ struct platform_device *pdev = to_platform_device(dev);
int err;
if (pdata->power_on) {
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index bc462288cfb0..37f1725e7a46 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -735,10 +735,8 @@ extern void ohci_init_driver(struct hc_driver *drv,
const struct ohci_driver_overrides *over);
extern int ohci_restart(struct ohci_hcd *ohci);
extern int ohci_setup(struct usb_hcd *hcd);
-#ifdef CONFIG_PM
extern int ohci_suspend(struct usb_hcd *hcd, bool do_wakeup);
extern int ohci_resume(struct usb_hcd *hcd, bool hibernated);
-#endif
extern int ohci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength);
extern int ohci_hub_status_data(struct usb_hcd *hcd, char *buf);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index bc74aca8a54c..4e4d601af35c 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -981,7 +981,7 @@ static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
- struct list_head *entry, *tmp;
+ struct ehci_qtd *qtd, *tmp;
int stopped;
unsigned count = 0;
int do_status = 0;
@@ -1006,12 +1006,10 @@ static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
- list_for_each_safe(entry, tmp, &qh->qtd_list) {
- struct ehci_qtd *qtd;
+ list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
struct urb *urb;
u32 token = 0;
- qtd = list_entry(entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/
@@ -1174,14 +1172,11 @@ halt:
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct oxu_hcd *oxu,
- struct urb *urb, struct list_head *qtd_list)
+ struct urb *urb, struct list_head *head)
{
- struct list_head *entry, *temp;
-
- list_for_each_safe(entry, temp, qtd_list) {
- struct ehci_qtd *qtd;
+ struct ehci_qtd *qtd, *temp;
- qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+ list_for_each_entry_safe(qtd, temp, head, qtd_list) {
list_del(&qtd->qtd_list);
oxu_qtd_free(oxu, qtd);
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 26cb8c861e6e..35af36253440 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -992,7 +992,7 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if ((ext_cap_offset + sizeof(val)) > len) {
/* We're reading garbage from the controller */
dev_warn(&pdev->dev, "xHCI controller failing to respond");
- return;
+ goto iounmap;
}
val = readl(base + ext_cap_offset);
@@ -1055,6 +1055,7 @@ hc_init:
XHCI_MAX_HALT_USEC, val);
}
+iounmap:
iounmap(base);
}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 4cbd0633c5c2..bfa7fa3d2eea 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2099,16 +2099,13 @@ static void r8a66597_check_detect_child(struct r8a66597 *r8a66597,
memset(now_map, 0, sizeof(now_map));
- list_for_each_entry(bus, &usb_bus_list, bus_list) {
- if (!bus->root_hub)
- continue;
-
- if (bus->busnum != hcd->self.busnum)
- continue;
-
+ mutex_lock(&usb_bus_idr_lock);
+ bus = idr_find(&usb_bus_idr, hcd->self.busnum);
+ if (bus && bus->root_hub) {
collect_usb_address_map(bus->root_hub, now_map);
update_usb_address_map(r8a66597, bus->root_hub, now_map);
}
+ mutex_unlock(&usb_bus_idr_lock);
}
static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf)
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 05c85c7baf84..43d52931b5bf 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1309,13 +1309,9 @@ static void u132_hcd_ring_work_scheduler(struct work_struct *work)
u132_ring_put_kref(u132, ring);
return;
} else if (ring->curr_endp) {
- struct u132_endp *last_endp = ring->curr_endp;
- struct list_head *scan;
- struct list_head *head = &last_endp->endp_ring;
+ struct u132_endp *endp, *last_endp = ring->curr_endp;
unsigned long wakeup = 0;
- list_for_each(scan, head) {
- struct u132_endp *endp = list_entry(scan,
- struct u132_endp, endp_ring);
+ list_for_each_entry(endp, &last_endp->endp_ring, endp_ring) {
if (endp->queue_next == endp->queue_last) {
} else if ((endp->delayed == 0)
|| time_after_eq(jiffies, endp->jiffies)) {
@@ -2393,14 +2389,12 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
static int dequeue_from_overflow_chain(struct u132 *u132,
struct u132_endp *endp, struct urb *urb)
{
- struct list_head *scan;
- struct list_head *head = &endp->urb_more;
- list_for_each(scan, head) {
- struct u132_urbq *urbq = list_entry(scan, struct u132_urbq,
- urb_more);
+ struct u132_urbq *urbq;
+
+ list_for_each_entry(urbq, &endp->urb_more, urb_more) {
if (urbq->urb == urb) {
struct usb_hcd *hcd = u132_to_hcd(u132);
- list_del(scan);
+ list_del(&urbq->urb_more);
endp->queue_size -= 1;
urb->error_count = 0;
usb_hcd_giveback_urb(hcd, urb, 0);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index b30b4ce294d3..d61fcc48099e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -50,14 +50,18 @@ static u8 usb_bos_descriptor [] = {
0x00, /* bU1DevExitLat, set later. */
0x00, 0x00, /* __le16 bU2DevExitLat, set later. */
/* Second device capability, SuperSpeedPlus */
- 0x0c, /* bLength 12, will be adjusted later */
+ 0x1c, /* bLength 28, will be adjusted later */
USB_DT_DEVICE_CAPABILITY, /* Device Capability */
USB_SSP_CAP_TYPE, /* bDevCapabilityType SUPERSPEED_PLUS */
0x00, /* bReserved 0 */
- 0x00, 0x00, 0x00, 0x00, /* bmAttributes, get from xhci psic */
- 0x00, 0x00, /* wFunctionalitySupport */
+ 0x23, 0x00, 0x00, 0x00, /* bmAttributes, SSAC=3 SSIC=1 */
+ 0x01, 0x00, /* wFunctionalitySupport */
0x00, 0x00, /* wReserved 0 */
- /* Sublink Speed Attributes are added in xhci_create_usb3_bos_desc() */
+ /* Default Sublink Speed Attributes, overwrite if custom PSI exists */
+ 0x34, 0x00, 0x05, 0x00, /* 5Gbps, symmetric, rx, ID = 4 */
+ 0xb4, 0x00, 0x05, 0x00, /* 5Gbps, symmetric, tx, ID = 4 */
+ 0x35, 0x40, 0x0a, 0x00, /* 10Gbps, SSP, symmetric, rx, ID = 5 */
+ 0xb5, 0x40, 0x0a, 0x00, /* 10Gbps, SSP, symmetric, tx, ID = 5 */
};
static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
@@ -72,10 +76,14 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
/* does xhci support USB 3.1 Enhanced SuperSpeed */
- if (xhci->usb3_rhub.min_rev >= 0x01 && xhci->usb3_rhub.psi_uid_count) {
- /* two SSA entries for each unique PSI ID, one RX and one TX */
- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
- ssa_size = ssa_count * sizeof(u32);
+ if (xhci->usb3_rhub.min_rev >= 0x01) {
+ /* does xhci provide a PSI table for SSA speed attributes? */
+ if (xhci->usb3_rhub.psi_count) {
+ /* two SSA entries for each unique PSI ID, RX and TX */
+ ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
+ ssa_size = ssa_count * sizeof(u32);
+ ssp_cap_size -= 16; /* skip copying the default SSA */
+ }
desc_size += ssp_cap_size;
usb3_1 = true;
}
@@ -102,7 +110,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
}
- if (usb3_1) {
+ /* If PSI table exists, add the custom speed attributes from it */
+ if (usb3_1 && xhci->usb3_rhub.psi_count) {
u32 ssp_cap_base, bm_attrib, psi;
int offset;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 5cd080e0a685..bad0d1f9a41d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1070,7 +1070,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *top_dev;
struct usb_hcd *hcd;
- if (udev->speed == USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
hcd = xhci->shared_hcd;
else
hcd = xhci->main_hcd;
@@ -1105,6 +1105,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+ max_packets = MAX_PACKET(512);
+ break;
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
@@ -1292,6 +1296,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
}
/* Fall through - SS and HS isoc/int have same decoding */
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
@@ -1321,7 +1326,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
default:
BUG();
}
- return EP_INTERVAL(interval);
+ return interval;
}
/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
@@ -1332,39 +1337,42 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER ||
+ if (udev->speed < USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
}
+static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ /* Super speed and Plus have max burst in ep companion desc */
+ if (udev->speed >= USB_SPEED_SUPER)
+ return ep->ss_ep_comp.bMaxBurst;
+
+ if (udev->speed == USB_SPEED_HIGH &&
+ (usb_endpoint_xfer_isoc(&ep->desc) ||
+ usb_endpoint_xfer_int(&ep->desc)))
+ return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+
+ return 0;
+}
+
static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
{
int in;
- u32 type;
in = usb_endpoint_dir_in(&ep->desc);
- if (usb_endpoint_xfer_control(&ep->desc)) {
- type = EP_TYPE(CTRL_EP);
- } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
- if (in)
- type = EP_TYPE(BULK_IN_EP);
- else
- type = EP_TYPE(BULK_OUT_EP);
- } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
- if (in)
- type = EP_TYPE(ISOC_IN_EP);
- else
- type = EP_TYPE(ISOC_OUT_EP);
- } else if (usb_endpoint_xfer_int(&ep->desc)) {
- if (in)
- type = EP_TYPE(INT_IN_EP);
- else
- type = EP_TYPE(INT_OUT_EP);
- } else {
- type = 0;
- }
- return type;
+
+ if (usb_endpoint_xfer_control(&ep->desc))
+ return CTRL_EP;
+ if (usb_endpoint_xfer_bulk(&ep->desc))
+ return in ? BULK_IN_EP : BULK_OUT_EP;
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ return in ? ISOC_IN_EP : ISOC_OUT_EP;
+ if (usb_endpoint_xfer_int(&ep->desc))
+ return in ? INT_IN_EP : INT_OUT_EP;
+ return 0;
}
/* Return the maximum endpoint service interval time (ESIT) payload.
@@ -1382,7 +1390,12 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
- if (udev->speed == USB_SPEED_SUPER)
+ /* SuperSpeedPlus Isoc ep sending over 48k per esit */
+ if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
+ USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
+ return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
+ /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
+ else if (udev->speed >= USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
@@ -1404,10 +1417,14 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
unsigned int max_packet;
- unsigned int max_burst;
- enum xhci_ring_type type;
+ enum xhci_ring_type ring_type;
u32 max_esit_payload;
u32 endpoint_type;
+ unsigned int max_burst;
+ unsigned int interval;
+ unsigned int mult;
+ unsigned int avg_trb_len;
+ unsigned int err_count = 0;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -1415,12 +1432,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
endpoint_type = xhci_get_endpoint_type(ep);
if (!endpoint_type)
return -EINVAL;
- ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
- type = usb_endpoint_type(&ep->desc);
+ ring_type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
+ xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1430,80 +1446,52 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
- 1, type);
+ 1, ring_type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
- ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
- ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
- | EP_MULT(xhci_get_endpoint_mult(udev, ep)));
+ /*
+ * Get values to fill the endpoint context, mostly from ep descriptor.
+ * The average TRB buffer lengt for bulk endpoints is unclear as we
+ * have no clue on scatter gather list entry size. For Isoc and Int,
+ * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
+ */
+ max_esit_payload = xhci_get_max_esit_payload(udev, ep);
+ interval = xhci_get_endpoint_interval(udev, ep);
+ mult = xhci_get_endpoint_mult(udev, ep);
+ max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+ max_burst = xhci_get_endpoint_max_burst(udev, ep);
+ avg_trb_len = max_esit_payload;
/* FIXME dig Mult and streams info out of ep companion desc */
- /* Allow 3 retries for everything but isoc;
- * CErr shall be set to 0 for Isoch endpoints.
- */
+ /* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
- ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
- else
- ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
-
- /* Set the max packet size and max burst */
- max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
- max_burst = 0;
- switch (udev->speed) {
- case USB_SPEED_SUPER:
- /* dig out max burst from ep companion desc */
- max_burst = ep->ss_ep_comp.bMaxBurst;
- break;
- case USB_SPEED_HIGH:
- /* Some devices get this wrong */
- if (usb_endpoint_xfer_bulk(&ep->desc))
- max_packet = 512;
- /* bits 11:12 specify the number of additional transaction
- * opportunities per microframe (USB 2.0, section 9.6.6)
- */
- if (usb_endpoint_xfer_isoc(&ep->desc) ||
- usb_endpoint_xfer_int(&ep->desc)) {
- max_burst = (usb_endpoint_maxp(&ep->desc)
- & 0x1800) >> 11;
- }
- break;
- case USB_SPEED_FULL:
- case USB_SPEED_LOW:
- break;
- default:
- BUG();
- }
- ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
- MAX_BURST(max_burst));
- max_esit_payload = xhci_get_max_esit_payload(udev, ep);
- ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
-
- /*
- * XXX no idea how to calculate the average TRB buffer length for bulk
- * endpoints, as the driver gives us no clue how big each scatter gather
- * list entry (or buffer) is going to be.
- *
- * For isochronous and interrupt endpoints, we set it to the max
- * available, until we have new API in the USB core to allow drivers to
- * declare how much bandwidth they actually need.
- *
- * Normally, it would be calculated by taking the total of the buffer
- * lengths in the TD and then dividing by the number of TRBs in a TD,
- * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
- * use Event Data TRBs, and we don't chain in a link TRB on short
- * transfers, we're basically dividing by 1.
- *
- * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
- * should be set to 8 for control endpoints.
- */
+ err_count = 3;
+ /* Some devices get this wrong */
+ if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
- ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
- else
- ep_ctx->tx_info |=
- cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
+ avg_trb_len = 8;
+ /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
+ if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
+ mult = 0;
+
+ /* Fill the endpoint context */
+ ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
+ EP_INTERVAL(interval) |
+ EP_MULT(mult));
+ ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
+ MAX_PACKET(max_packet) |
+ MAX_BURST(max_burst) |
+ ERROR_COUNT(err_count));
+ ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
+ ep_ring->cycle_state);
+
+ ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
+ EP_AVG_TRB_LENGTH(avg_trb_len));
/* FIXME Debug endpoint context */
return 0;
@@ -1873,6 +1861,12 @@ no_bw:
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ xhci->usb2_ports = NULL;
+ xhci->usb3_ports = NULL;
+ xhci->port_array = NULL;
+ xhci->rh_bw = NULL;
+ xhci->ext_caps = NULL;
+
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 9532f5aef71b..79959f17c38c 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -695,7 +695,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
/*
* if ip sleep fails, and all clocks are disabled, access register will hang
* AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
@@ -703,7 +702,7 @@ static int xhci_mtk_remove(struct platform_device *dev)
* to wake up system immediately after system suspend complete if ip sleep
* fails, it is what we wanted.
*/
-static int xhci_mtk_suspend(struct device *dev)
+static int __maybe_unused xhci_mtk_suspend(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct usb_hcd *hcd = mtk->hcd;
@@ -722,7 +721,7 @@ static int xhci_mtk_suspend(struct device *dev)
return 0;
}
-static int xhci_mtk_resume(struct device *dev)
+static int __maybe_unused xhci_mtk_resume(struct device *dev)
{
struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
struct usb_hcd *hcd = mtk->hcd;
@@ -744,10 +743,7 @@ static int xhci_mtk_resume(struct device *dev)
static const struct dev_pm_ops xhci_mtk_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_mtk_suspend, xhci_mtk_resume)
};
-#define DEV_PM_OPS (&xhci_mtk_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM */
+#define DEV_PM_OPS IS_ENABLED(CONFIG_PM) ? &xhci_mtk_pm_ops : NULL
#ifdef CONFIG_OF
static const struct of_device_id mtk_xhci_of_match[] = {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f0640b7a1c42..48672fac7ff3 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -48,6 +48,7 @@
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
static const char hcd_name[] = "xhci_hcd";
@@ -155,7 +156,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d39d6bf1d090..474b5fa14900 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -39,12 +39,25 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ /*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
+ */
+ if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
+ xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
/* called during probe() after chip reset completes */
@@ -110,7 +123,13 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "renesas,xhci-r8a7795",
.data = &xhci_plat_renesas_rcar_gen3,
}, {
+ .compatible = "renesas,rcar-gen2-xhci",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,rcar-gen3-xhci",
+ .data = &xhci_plat_renesas_rcar_gen3,
},
+ {},
};
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 5a2e2e3936c4..529c3c40f901 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -14,7 +14,7 @@
#include "xhci.h" /* for hcd_to_xhci() */
enum xhci_plat_type {
- XHCI_PLAT_TYPE_MARVELL_ARMADA,
+ XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3915657e6078..99b4ff42f7a0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3558,12 +3558,11 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
* zero. Only xHCI 1.0 host controllers support this field.
*/
static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
- struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
- if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
+ if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
return 0;
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
@@ -3579,7 +3578,6 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
* contain 1 to (bMaxBurst + 1) packets.
*/
static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
- struct usb_device *udev,
struct urb *urb, unsigned int total_packet_count)
{
unsigned int max_burst;
@@ -3588,8 +3586,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
if (xhci->hci_version < 0x100)
return 0;
- switch (udev->speed) {
- case USB_SPEED_SUPER:
+ if (urb->dev->speed >= USB_SPEED_SUPER) {
/* bMaxBurst is zero based: 0 means 1 packet per burst */
max_burst = urb->ep->ss_ep_comp.bMaxBurst;
residue = total_packet_count % (max_burst + 1);
@@ -3599,11 +3596,10 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
if (residue == 0)
return max_burst;
return residue - 1;
- default:
- if (total_packet_count == 0)
- return 0;
- return total_packet_count - 1;
}
+ if (total_packet_count == 0)
+ return 0;
+ return total_packet_count - 1;
}
/*
@@ -3714,6 +3710,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int i, j;
bool more_trbs_coming;
struct xhci_virt_ep *xep;
+ int frame_id;
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
@@ -3723,33 +3720,31 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xhci_dbg(xhci, "Isoc URB with zero packets?\n");
return -EINVAL;
}
-
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
urb_priv = urb->hcpriv;
- /* Queue the first TRB, even if it's zero-length */
+ /* Queue the TRBs for each TD, even if they are zero-length */
for (i = 0; i < num_tds; i++) {
- unsigned int total_packet_count;
- unsigned int burst_count;
- unsigned int residue;
+ unsigned int total_pkt_count, max_pkt;
+ unsigned int burst_count, last_burst_pkt_count;
+ u32 sia_frame_id;
first_trb = true;
running_total = 0;
addr = start_addr + urb->iso_frame_desc[i].offset;
td_len = urb->iso_frame_desc[i].length;
td_remain_len = td_len;
- total_packet_count = DIV_ROUND_UP(td_len,
- GET_MAX_PACKET(
- usb_endpoint_maxp(&urb->ep->desc)));
+ max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
+
/* A zero-length transfer still involves at least one packet. */
- if (total_packet_count == 0)
- total_packet_count++;
- burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
- total_packet_count);
- residue = xhci_get_last_burst_packet_count(xhci,
- urb->dev, urb, total_packet_count);
+ if (total_pkt_count == 0)
+ total_pkt_count++;
+ burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
+ last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
+ urb, total_pkt_count);
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
@@ -3760,68 +3755,57 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return ret;
goto cleanup;
}
-
td = urb_priv->td[i];
+
+ /* use SIA as default, if frame id is used overwrite it */
+ sia_frame_id = TRB_SIA;
+ if (!(urb->transfer_flags & URB_ISO_ASAP) &&
+ HCC_CFC(xhci->hcc_params)) {
+ frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
+ if (frame_id >= 0)
+ sia_frame_id = TRB_FRAME_ID(frame_id);
+ }
+ /*
+ * Set isoc specific data for the first TRB in a TD.
+ * Prevent HW from getting the TRBs by keeping the cycle state
+ * inverted in the first TDs isoc TRB.
+ */
+ field = TRB_TYPE(TRB_ISOC) |
+ TRB_TLBPC(last_burst_pkt_count) |
+ sia_frame_id |
+ (i ? ep_ring->cycle_state : !start_cycle);
+
+ /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
+ if (!xep->use_extended_tbc)
+ field |= TRB_TBC(burst_count);
+
+ /* fill the rest of the TRB fields, and remaining normal TRBs */
for (j = 0; j < trbs_per_td; j++) {
- int frame_id = 0;
u32 remainder = 0;
- field = 0;
-
- if (first_trb) {
- field = TRB_TBC(burst_count) |
- TRB_TLBPC(residue);
- /* Queue the isoc TRB */
- field |= TRB_TYPE(TRB_ISOC);
-
- /* Calculate Frame ID and SIA fields */
- if (!(urb->transfer_flags & URB_ISO_ASAP) &&
- HCC_CFC(xhci->hcc_params)) {
- frame_id = xhci_get_isoc_frame_id(xhci,
- urb,
- i);
- if (frame_id >= 0)
- field |= TRB_FRAME_ID(frame_id);
- else
- field |= TRB_SIA;
- } else
- field |= TRB_SIA;
-
- if (i == 0) {
- if (start_cycle == 0)
- field |= 0x1;
- } else
- field |= ep_ring->cycle_state;
- first_trb = false;
- } else {
- /* Queue other normal TRBs */
- field |= TRB_TYPE(TRB_NORMAL);
- field |= ep_ring->cycle_state;
- }
+
+ /* only first TRB is isoc, overwrite otherwise */
+ if (!first_trb)
+ field = TRB_TYPE(TRB_NORMAL) |
+ ep_ring->cycle_state;
/* Only set interrupt on short packet for IN EPs */
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
- /* Chain all the TRBs together; clear the chain bit in
- * the last TRB to indicate it's the last TRB in the
- * chain.
- */
+ /* Set the chain bit for all except the last TRB */
if (j < trbs_per_td - 1) {
- field |= TRB_CHAIN;
more_trbs_coming = true;
+ field |= TRB_CHAIN;
} else {
+ more_trbs_coming = false;
td->last_trb = ep_ring->enqueue;
field |= TRB_IOC;
- if (xhci->hci_version == 0x100 &&
- !(xhci->quirks &
- XHCI_AVOID_BEI)) {
- /* Set BEI bit except for the last td */
- if (i < num_tds - 1)
- field |= TRB_BEI;
- }
- more_trbs_coming = false;
+ /* set BEI, except for the last TD */
+ if (xhci->hci_version >= 0x100 &&
+ !(xhci->quirks & XHCI_AVOID_BEI) &&
+ i < num_tds - 1)
+ field |= TRB_BEI;
}
-
/* Calculate TRB length */
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
@@ -3834,9 +3818,15 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb, trbs_per_td - j - 1);
length_field = TRB_LEN(trb_buff_len) |
- TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
+ /* xhci 1.1 with ETE uses TD Size field for TBC */
+ if (first_trb && xep->use_extended_tbc)
+ length_field |= TRB_TD_SIZE_TBC(burst_count);
+ else
+ length_field |= TRB_TD_SIZE(remainder);
+ first_trb = false;
+
queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
@@ -4014,7 +4004,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
- if (xhci->xhc_state) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0c8087d3c313..9e71c96ad74a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -1108,8 +1109,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Resume root hubs only when have pending events. */
status = readl(&xhci->op_regs->status);
if (status & STS_EINT) {
- usb_hcd_resume_root_hub(hcd);
usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
}
}
@@ -1124,10 +1125,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return retval;
}
@@ -2086,6 +2087,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
case USB_SPEED_HIGH:
return HS_BLOCK;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
@@ -2211,7 +2213,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
unsigned int packets_remaining = 0;
unsigned int i;
- if (virt_dev->udev->speed == USB_SPEED_SUPER)
+ if (virt_dev->udev->speed >= USB_SPEED_SUPER)
return xhci_check_ss_bw(xhci, virt_dev);
if (virt_dev->udev->speed == USB_SPEED_HIGH) {
@@ -2412,7 +2414,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
if (xhci_is_async_ep(ep_bw->type))
return;
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (xhci_is_sync_in_ep(ep_bw->type))
xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
xhci_get_ss_bw_consumed(ep_bw);
@@ -2450,6 +2452,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
@@ -2509,6 +2512,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_UNKNOWN:
case USB_SPEED_WIRELESS:
/* Should never happen because only LS/FS/HS endpoints will get
@@ -2770,7 +2774,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3817,7 +3822,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying or halted */
+ if (xhci->xhc_state) /* dying, removing or halted */
goto out;
if (!udev->slot_id) {
@@ -4897,6 +4902,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->sbrn == 0x31) {
xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
hcd->speed = HCD_USB31;
+ hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
}
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
@@ -4944,6 +4950,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
return retval;
xhci_dbg(xhci, "Reset complete\n");
+ /*
+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
+ * address memory pointers actually. So, this driver clears the AC64
+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
+ * DMA_BIT_MASK(32)) in this xhci_gen_setup().
+ */
+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
+ xhci->hcc_params &= ~BIT(0);
+
/* Set dma_mask and coherent_dma_mask to 64-bits,
* if xHC supports 64-bit addressing */
if (HCC_64BIT_ADDR(xhci->hcc_params) &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index cc651383ce5a..6c629c97f8ad 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -232,7 +232,9 @@ struct xhci_op_regs {
* disabled, or powered-off state.
*/
#define CMD_PM_INDEX (1 << 11)
-/* bits 12:31 are reserved (and should be preserved on writes). */
+/* bit 14 Extended TBC Enable, changes Isoc TRB fields to support larger TBC */
+#define CMD_ETE (1 << 14)
+/* bits 15:31 are reserved (and should be preserved on writes). */
/* IMAN - Interrupt Management Register */
#define IMAN_IE (1 << 1)
@@ -343,6 +345,7 @@ struct xhci_op_regs {
#define SLOT_SPEED_LS (XDEV_LS << 10)
#define SLOT_SPEED_HS (XDEV_HS << 10)
#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
/* Port Indicator Control */
#define PORT_LED_OFF (0 << 14)
#define PORT_LED_AMBER (1 << 14)
@@ -748,8 +751,9 @@ struct xhci_ep_ctx {
#define GET_MAX_PACKET(p) ((p) & 0x7ff)
/* tx_info bitmasks */
-#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
-#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
+#define EP_AVG_TRB_LENGTH(p) ((p) & 0xffff)
+#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) & 0xffff) << 16)
+#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) >> 16) & 0xff) << 24)
#define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff)
/* deq bitmasks */
@@ -941,6 +945,8 @@ struct xhci_virt_ep {
struct list_head bw_endpoint_list;
/* Isoch Frame ID checking storage */
int next_frame_id;
+ /* Use new Isoch TRB layout needed for extended TBC support */
+ bool use_extended_tbc;
};
enum xhci_overhead_type {
@@ -1182,9 +1188,12 @@ enum xhci_setup_dev {
#define TRB_LEN(p) ((p) & 0x1ffff)
/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
+/* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */
+#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17)
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
+/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
#define TRB_TBC(p) (((p) & 0x3) << 7)
#define TRB_TLBPC(p) (((p) & 0xf) << 16)
@@ -1596,6 +1605,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
@@ -1632,6 +1642,7 @@ struct xhci_hcd {
#define XHCI_PME_STUCK_QUIRK (1 << 20)
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
+#define XHCI_NO_64BIT_SUPPORT (1 << 23)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 23c794813e6a..76350e4ee807 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -73,6 +73,8 @@ static const struct usb_device_id chaoskey_table[] = {
};
MODULE_DEVICE_TABLE(usb, chaoskey_table);
+static void chaos_read_callback(struct urb *urb);
+
/* Driver-local specific stuff */
struct chaoskey {
struct usb_interface *interface;
@@ -80,7 +82,8 @@ struct chaoskey {
struct mutex lock;
struct mutex rng_lock;
int open; /* open count */
- int present; /* device not disconnected */
+ bool present; /* device not disconnected */
+ bool reading; /* ongoing IO */
int size; /* size of buf */
int valid; /* bytes of buf read */
int used; /* bytes of buf consumed */
@@ -88,15 +91,19 @@ struct chaoskey {
struct hwrng hwrng; /* Embedded struct for hwrng */
int hwrng_registered; /* registered with hwrng API */
wait_queue_head_t wait_q; /* for timeouts */
+ struct urb *urb; /* for performing IO */
char *buf;
};
static void chaoskey_free(struct chaoskey *dev)
{
- usb_dbg(dev->interface, "free");
- kfree(dev->name);
- kfree(dev->buf);
- kfree(dev);
+ if (dev) {
+ usb_dbg(dev->interface, "free");
+ usb_free_urb(dev->urb);
+ kfree(dev->name);
+ kfree(dev->buf);
+ kfree(dev);
+ }
}
static int chaoskey_probe(struct usb_interface *interface,
@@ -107,7 +114,7 @@ static int chaoskey_probe(struct usb_interface *interface,
int i;
int in_ep = -1;
struct chaoskey *dev;
- int result;
+ int result = -ENOMEM;
int size;
usb_dbg(interface, "probe %s-%s", udev->product, udev->serial);
@@ -142,14 +149,25 @@ static int chaoskey_probe(struct usb_interface *interface,
dev = kzalloc(sizeof(struct chaoskey), GFP_KERNEL);
if (dev == NULL)
- return -ENOMEM;
+ goto out;
dev->buf = kmalloc(size, GFP_KERNEL);
- if (dev->buf == NULL) {
- kfree(dev);
- return -ENOMEM;
- }
+ if (dev->buf == NULL)
+ goto out;
+
+ dev->urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!dev->urb)
+ goto out;
+
+ usb_fill_bulk_urb(dev->urb,
+ udev,
+ usb_rcvbulkpipe(udev, in_ep),
+ dev->buf,
+ size,
+ chaos_read_callback,
+ dev);
/* Construct a name using the product and serial values. Each
* device needs a unique name for the hwrng code
@@ -158,11 +176,8 @@ static int chaoskey_probe(struct usb_interface *interface,
if (udev->product && udev->serial) {
dev->name = kmalloc(strlen(udev->product) + 1 +
strlen(udev->serial) + 1, GFP_KERNEL);
- if (dev->name == NULL) {
- kfree(dev->buf);
- kfree(dev);
- return -ENOMEM;
- }
+ if (dev->name == NULL)
+ goto out;
strcpy(dev->name, udev->product);
strcat(dev->name, "-");
@@ -186,9 +201,7 @@ static int chaoskey_probe(struct usb_interface *interface,
result = usb_register_dev(interface, &chaoskey_class);
if (result) {
usb_err(interface, "Unable to allocate minor number.");
- usb_set_intfdata(interface, NULL);
- chaoskey_free(dev);
- return result;
+ goto out;
}
dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name;
@@ -215,6 +228,11 @@ static int chaoskey_probe(struct usb_interface *interface,
usb_dbg(interface, "chaoskey probe success, size %d", dev->size);
return 0;
+
+out:
+ usb_set_intfdata(interface, NULL);
+ chaoskey_free(dev);
+ return result;
}
static void chaoskey_disconnect(struct usb_interface *interface)
@@ -237,6 +255,7 @@ static void chaoskey_disconnect(struct usb_interface *interface)
mutex_lock(&dev->lock);
dev->present = 0;
+ usb_poison_urb(dev->urb);
if (!dev->open) {
mutex_unlock(&dev->lock);
@@ -311,14 +330,33 @@ static int chaoskey_release(struct inode *inode, struct file *file)
return 0;
}
+static void chaos_read_callback(struct urb *urb)
+{
+ struct chaoskey *dev = urb->context;
+ int status = urb->status;
+
+ usb_dbg(dev->interface, "callback status (%d)", status);
+
+ if (status == 0)
+ dev->valid = urb->actual_length;
+ else
+ dev->valid = 0;
+
+ dev->used = 0;
+
+ /* must be seen first before validity is announced */
+ smp_wmb();
+
+ dev->reading = false;
+ wake_up(&dev->wait_q);
+}
+
/* Fill the buffer. Called with dev->lock held
*/
static int _chaoskey_fill(struct chaoskey *dev)
{
DEFINE_WAIT(wait);
int result;
- int this_read;
- struct usb_device *udev = interface_to_usbdev(dev->interface);
usb_dbg(dev->interface, "fill");
@@ -343,21 +381,31 @@ static int _chaoskey_fill(struct chaoskey *dev)
return result;
}
- result = usb_bulk_msg(udev,
- usb_rcvbulkpipe(udev, dev->in_ep),
- dev->buf, dev->size, &this_read,
- NAK_TIMEOUT);
+ dev->reading = true;
+ result = usb_submit_urb(dev->urb, GFP_KERNEL);
+ if (result < 0) {
+ result = usb_translate_errors(result);
+ dev->reading = false;
+ goto out;
+ }
+
+ result = wait_event_interruptible_timeout(
+ dev->wait_q,
+ !dev->reading,
+ NAK_TIMEOUT);
+
+ if (result < 0)
+ goto out;
+ if (result == 0)
+ result = -ETIMEDOUT;
+ else
+ result = dev->valid;
+out:
/* Let the device go back to sleep eventually */
usb_autopm_put_interface(dev->interface);
- if (result == 0) {
- dev->valid = this_read;
- dev->used = 0;
- }
-
- usb_dbg(dev->interface, "bulk_msg result %d this_read %d",
- result, this_read);
+ usb_dbg(dev->interface, "read %d bytes", dev->valid);
return result;
}
@@ -395,13 +443,7 @@ static ssize_t chaoskey_read(struct file *file,
goto bail;
if (dev->valid == dev->used) {
result = _chaoskey_fill(dev);
- if (result) {
- mutex_unlock(&dev->lock);
- goto bail;
- }
-
- /* Read returned zero bytes */
- if (dev->used == dev->valid) {
+ if (result < 0) {
mutex_unlock(&dev->lock);
goto bail;
}
@@ -435,6 +477,8 @@ bail:
return read_count;
}
usb_dbg(dev->interface, "empty read, result %d", result);
+ if (result == -ETIMEDOUT)
+ result = -EAGAIN;
return result;
}
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 4e38683c653c..5105397e62fc 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -257,9 +257,9 @@ static int idmouse_open(struct inode *inode, struct file *file)
if (result)
goto error;
result = idmouse_create_image (dev);
+ usb_autopm_put_interface(interface);
if (result)
goto error;
- usb_autopm_put_interface(interface);
/* increment our usage count for the driver */
++dev->open;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c6bfd13f6c92..1950e87b4219 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ if (iface_desc->desc.bNumEndpoints < 1) {
+ dev_err(&interface->dev, "Invalid number of endpoints\n");
+ retval = -EINVAL;
+ goto error;
+ }
+
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8efbabacc84e..a22de52cb083 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -61,8 +61,8 @@
/* Forward declarations / clean-up routines */
#ifdef INCL_SISUSB_CON
-static int sisusb_first_vc = 0;
-static int sisusb_last_vc = 0;
+static int sisusb_first_vc;
+static int sisusb_last_vc;
module_param_named(first, sisusb_first_vc, int, 0);
module_param_named(last, sisusb_last_vc, int, 0);
MODULE_PARM_DESC(first, "Number of first console to take over (1 - MAX_NR_CONSOLES)");
@@ -71,25 +71,19 @@ MODULE_PARM_DESC(last, "Number of last console to take over (1 - MAX_NR_CONSOLES
static struct usb_driver sisusb_driver;
-static void
-sisusb_free_buffers(struct sisusb_usb_data *sisusb)
+static void sisusb_free_buffers(struct sisusb_usb_data *sisusb)
{
int i;
for (i = 0; i < NUMOBUFS; i++) {
- if (sisusb->obuf[i]) {
- kfree(sisusb->obuf[i]);
- sisusb->obuf[i] = NULL;
- }
- }
- if (sisusb->ibuf) {
- kfree(sisusb->ibuf);
- sisusb->ibuf = NULL;
+ kfree(sisusb->obuf[i]);
+ sisusb->obuf[i] = NULL;
}
+ kfree(sisusb->ibuf);
+ sisusb->ibuf = NULL;
}
-static void
-sisusb_free_urbs(struct sisusb_usb_data *sisusb)
+static void sisusb_free_urbs(struct sisusb_usb_data *sisusb)
{
int i;
@@ -108,8 +102,7 @@ sisusb_free_urbs(struct sisusb_usb_data *sisusb)
/* out-urb management */
/* Return 1 if all free, 0 otherwise */
-static int
-sisusb_all_free(struct sisusb_usb_data *sisusb)
+static int sisusb_all_free(struct sisusb_usb_data *sisusb)
{
int i;
@@ -124,8 +117,7 @@ sisusb_all_free(struct sisusb_usb_data *sisusb)
}
/* Kill all busy URBs */
-static void
-sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
+static void sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
{
int i;
@@ -141,20 +133,17 @@ sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
}
/* Return 1 if ok, 0 if error (not all complete within timeout) */
-static int
-sisusb_wait_all_out_complete(struct sisusb_usb_data *sisusb)
+static int sisusb_wait_all_out_complete(struct sisusb_usb_data *sisusb)
{
int timeout = 5 * HZ, i = 1;
- wait_event_timeout(sisusb->wait_q,
- (i = sisusb_all_free(sisusb)),
- timeout);
+ wait_event_timeout(sisusb->wait_q, (i = sisusb_all_free(sisusb)),
+ timeout);
return i;
}
-static int
-sisusb_outurb_available(struct sisusb_usb_data *sisusb)
+static int sisusb_outurb_available(struct sisusb_usb_data *sisusb)
{
int i;
@@ -168,20 +157,17 @@ sisusb_outurb_available(struct sisusb_usb_data *sisusb)
return -1;
}
-static int
-sisusb_get_free_outbuf(struct sisusb_usb_data *sisusb)
+static int sisusb_get_free_outbuf(struct sisusb_usb_data *sisusb)
{
int i, timeout = 5 * HZ;
wait_event_timeout(sisusb->wait_q,
- ((i = sisusb_outurb_available(sisusb)) >= 0),
- timeout);
+ ((i = sisusb_outurb_available(sisusb)) >= 0), timeout);
return i;
}
-static int
-sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
+static int sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
{
int i;
@@ -193,8 +179,7 @@ sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
return i;
}
-static void
-sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
+static void sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
{
if ((index >= 0) && (index < sisusb->numobufs))
sisusb->urbstatus[index] &= ~SU_URB_ALLOC;
@@ -202,8 +187,7 @@ sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
/* completion callback */
-static void
-sisusb_bulk_completeout(struct urb *urb)
+static void sisusb_bulk_completeout(struct urb *urb)
{
struct sisusb_urb_context *context = urb->context;
struct sisusb_usb_data *sisusb;
@@ -225,9 +209,9 @@ sisusb_bulk_completeout(struct urb *urb)
wake_up(&sisusb->wait_q);
}
-static int
-sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, void *data,
- int len, int *actual_length, int timeout, unsigned int tflags)
+static int sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index,
+ unsigned int pipe, void *data, int len, int *actual_length,
+ int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbout[index];
int retval, byteswritten = 0;
@@ -236,14 +220,15 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
urb->transfer_flags = 0;
usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
- sisusb_bulk_completeout, &sisusb->urbout_context[index]);
+ sisusb_bulk_completeout,
+ &sisusb->urbout_context[index]);
urb->transfer_flags |= tflags;
urb->actual_length = 0;
/* Set up context */
sisusb->urbout_context[index].actual_length = (timeout) ?
- NULL : actual_length;
+ NULL : actual_length;
/* Declare this urb/buffer in use */
sisusb->urbstatus[index] |= SU_URB_BUSY;
@@ -254,8 +239,8 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
/* If OK, and if timeout > 0, wait for completion */
if ((retval == 0) && timeout) {
wait_event_timeout(sisusb->wait_q,
- (!(sisusb->urbstatus[index] & SU_URB_BUSY)),
- timeout);
+ (!(sisusb->urbstatus[index] & SU_URB_BUSY)),
+ timeout);
if (sisusb->urbstatus[index] & SU_URB_BUSY) {
/* URB timed out... kill it and report error */
usb_kill_urb(urb);
@@ -277,8 +262,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
/* completion callback */
-static void
-sisusb_bulk_completein(struct urb *urb)
+static void sisusb_bulk_completein(struct urb *urb)
{
struct sisusb_usb_data *sisusb = urb->context;
@@ -289,9 +273,9 @@ sisusb_bulk_completein(struct urb *urb)
wake_up(&sisusb->wait_q);
}
-static int
-sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
- int len, int *actual_length, int timeout, unsigned int tflags)
+static int sisusb_bulkin_msg(struct sisusb_usb_data *sisusb,
+ unsigned int pipe, void *data, int len,
+ int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbin;
int retval, readbytes = 0;
@@ -375,7 +359,7 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
do {
passsize = thispass = (sisusb->obufsize < count) ?
- sisusb->obufsize : count;
+ sisusb->obufsize : count;
if (index < 0)
index = sisusb_get_free_outbuf(sisusb);
@@ -405,14 +389,9 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
if (!sisusb->sisusb_dev)
return -ENODEV;
- result = sisusb_bulkout_msg(sisusb,
- index,
- pipe,
- buffer,
- thispass,
- &transferred_len,
- async ? 0 : 5 * HZ,
- tflags);
+ result = sisusb_bulkout_msg(sisusb, index, pipe,
+ buffer, thispass, &transferred_len,
+ async ? 0 : 5 * HZ, tflags);
if (result == -ETIMEDOUT) {
@@ -500,13 +479,8 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
thispass = (bufsize < count) ? bufsize : count;
- result = sisusb_bulkin_msg(sisusb,
- pipe,
- buffer,
- thispass,
- &transferred_len,
- 5 * HZ,
- tflags);
+ result = sisusb_bulkin_msg(sisusb, pipe, buffer, thispass,
+ &transferred_len, 5 * HZ, tflags);
if (transferred_len)
thispass = transferred_len;
@@ -549,7 +523,7 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
}
static int sisusb_send_packet(struct sisusb_usb_data *sisusb, int len,
- struct sisusb_packet *packet)
+ struct sisusb_packet *packet)
{
int ret;
ssize_t bytes_transferred = 0;
@@ -585,8 +559,7 @@ static int sisusb_send_packet(struct sisusb_usb_data *sisusb, int len,
}
static int sisusb_send_bridge_packet(struct sisusb_usb_data *sisusb, int len,
- struct sisusb_packet *packet,
- unsigned int tflags)
+ struct sisusb_packet *packet, unsigned int tflags)
{
int ret;
ssize_t bytes_transferred = 0;
@@ -634,7 +607,7 @@ static int sisusb_send_bridge_packet(struct sisusb_usb_data *sisusb, int len,
*/
static int sisusb_write_memio_byte(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u8 data)
+ u32 addr, u8 data)
{
struct sisusb_packet packet;
int ret;
@@ -647,7 +620,7 @@ static int sisusb_write_memio_byte(struct sisusb_usb_data *sisusb, int type,
}
static int sisusb_write_memio_word(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u16 data)
+ u32 addr, u16 data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -655,36 +628,36 @@ static int sisusb_write_memio_word(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x0003;
- packet.data = (u32)data;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 1:
- packet.header = (type << 6) | 0x0006;
- packet.data = (u32)data << 8;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- packet.data = (u32)data << 16;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- packet.data = (u32)data << 24;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- packet.data = (u32)data >> 8;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
+ case 0:
+ packet.header = (type << 6) | 0x0003;
+ packet.data = (u32)data;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x0006;
+ packet.data = (u32)data << 8;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ packet.data = (u32)data << 16;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ packet.data = (u32)data << 24;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ packet.data = (u32)data >> 8;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
}
static int sisusb_write_memio_24bit(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u32 data)
+ u32 addr, u32 data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -692,40 +665,40 @@ static int sisusb_write_memio_24bit(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x0007;
- packet.data = data & 0x00ffffff;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 1:
- packet.header = (type << 6) | 0x000e;
- packet.data = data << 8;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- packet.data = data << 16;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- packet.data = (data >> 16) & 0x00ff;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- packet.data = data << 24;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0003;
- packet.address = (addr & ~3) + 4;
- packet.data = (data >> 8) & 0xffff;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
+ case 0:
+ packet.header = (type << 6) | 0x0007;
+ packet.data = data & 0x00ffffff;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x000e;
+ packet.data = data << 8;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ packet.data = data << 16;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ packet.data = (data >> 16) & 0x00ff;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ packet.data = data << 24;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0003;
+ packet.address = (addr & ~3) + 4;
+ packet.data = (data >> 8) & 0xffff;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
}
static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u32 data)
+ u32 addr, u32 data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -733,37 +706,37 @@ static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x000f;
- packet.data = data;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 1:
- packet.header = (type << 6) | 0x000e;
- packet.data = data << 8;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- packet.data = data >> 24;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- packet.data = data << 16;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0003;
- packet.address = (addr & ~3) + 4;
- packet.data = data >> 16;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- packet.data = data << 24;
- ret = sisusb_send_packet(sisusb, 10, &packet);
- packet.header = (type << 6) | 0x0007;
- packet.address = (addr & ~3) + 4;
- packet.data = data >> 8;
- ret |= sisusb_send_packet(sisusb, 10, &packet);
+ case 0:
+ packet.header = (type << 6) | 0x000f;
+ packet.data = data;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x000e;
+ packet.data = data << 8;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ packet.data = data >> 24;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ packet.data = data << 16;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0003;
+ packet.address = (addr & ~3) + 4;
+ packet.data = data >> 16;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ packet.data = data << 24;
+ ret = sisusb_send_packet(sisusb, 10, &packet);
+ packet.header = (type << 6) | 0x0007;
+ packet.address = (addr & ~3) + 4;
+ packet.data = data >> 8;
+ ret |= sisusb_send_packet(sisusb, 10, &packet);
}
return ret;
@@ -780,13 +753,12 @@ static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
*/
static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
- char *kernbuffer, int length,
- const char __user *userbuffer, int index,
- ssize_t *bytes_written)
+ char *kernbuffer, int length, const char __user *userbuffer,
+ int index, ssize_t *bytes_written)
{
struct sisusb_packet packet;
int ret = 0;
- static int msgcount = 0;
+ static int msgcount;
u8 swap8, fromkern = kernbuffer ? 1 : 0;
u16 swap16;
u32 swap32, flag = (length >> 28) & 1;
@@ -803,9 +775,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
length &= 0x00ffffff;
while (length) {
-
- switch (length) {
-
+ switch (length) {
case 1:
if (userbuffer) {
if (get_user(swap8, (u8 __user *)userbuffer))
@@ -813,9 +783,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
} else
swap8 = kernbuffer[0];
- ret = sisusb_write_memio_byte(sisusb,
- SISUSB_TYPE_MEM,
- addr, swap8);
+ ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM,
+ addr, swap8);
if (!ret)
(*bytes_written)++;
@@ -829,10 +798,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
} else
swap16 = *((u16 *)kernbuffer);
- ret = sisusb_write_memio_word(sisusb,
- SISUSB_TYPE_MEM,
- addr,
- swap16);
+ ret = sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
+ addr, swap16);
if (!ret)
(*bytes_written) += 2;
@@ -863,10 +830,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
kernbuffer[0];
#endif
- ret = sisusb_write_memio_24bit(sisusb,
- SISUSB_TYPE_MEM,
- addr,
- swap32);
+ ret = sisusb_write_memio_24bit(sisusb, SISUSB_TYPE_MEM,
+ addr, swap32);
if (!ret)
(*bytes_written) += 3;
@@ -880,10 +845,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
} else
swap32 = *((u32 *)kernbuffer);
- ret = sisusb_write_memio_long(sisusb,
- SISUSB_TYPE_MEM,
- addr,
- swap32);
+ ret = sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM,
+ addr, swap32);
if (!ret)
(*bytes_written) += 4;
@@ -892,103 +855,106 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
default:
if ((length & ~3) > 0x10000) {
- packet.header = 0x001f;
- packet.address = 0x000001d4;
- packet.data = addr;
- ret = sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- packet.header = 0x001f;
- packet.address = 0x000001d0;
- packet.data = (length & ~3);
- ret |= sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- packet.header = 0x001f;
- packet.address = 0x000001c0;
- packet.data = flag | 0x16;
- ret |= sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- if (userbuffer) {
- ret |= sisusb_send_bulk_msg(sisusb,
+ packet.header = 0x001f;
+ packet.address = 0x000001d4;
+ packet.data = addr;
+ ret = sisusb_send_bridge_packet(sisusb, 10,
+ &packet, 0);
+ packet.header = 0x001f;
+ packet.address = 0x000001d0;
+ packet.data = (length & ~3);
+ ret |= sisusb_send_bridge_packet(sisusb, 10,
+ &packet, 0);
+ packet.header = 0x001f;
+ packet.address = 0x000001c0;
+ packet.data = flag | 0x16;
+ ret |= sisusb_send_bridge_packet(sisusb, 10,
+ &packet, 0);
+ if (userbuffer) {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
NULL, userbuffer, 0,
bytes_written, 0, 1);
- userbuffer += (*bytes_written);
- } else if (fromkern) {
- ret |= sisusb_send_bulk_msg(sisusb,
+ userbuffer += (*bytes_written);
+ } else if (fromkern) {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
kernbuffer, NULL, 0,
bytes_written, 0, 1);
- kernbuffer += (*bytes_written);
- } else {
- ret |= sisusb_send_bulk_msg(sisusb,
+ kernbuffer += (*bytes_written);
+ } else {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_LBULK_OUT,
(length & ~3),
NULL, NULL, index,
bytes_written, 0, 1);
- kernbuffer += ((*bytes_written) &
- (sisusb->obufsize-1));
- }
+ kernbuffer += ((*bytes_written) &
+ (sisusb->obufsize-1));
+ }
} else {
- packet.header = 0x001f;
- packet.address = 0x00000194;
- packet.data = addr;
- ret = sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- packet.header = 0x001f;
- packet.address = 0x00000190;
- packet.data = (length & ~3);
- ret |= sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- if (sisusb->flagb0 != 0x16) {
packet.header = 0x001f;
- packet.address = 0x00000180;
- packet.data = flag | 0x16;
+ packet.address = 0x00000194;
+ packet.data = addr;
+ ret = sisusb_send_bridge_packet(sisusb, 10,
+ &packet, 0);
+ packet.header = 0x001f;
+ packet.address = 0x00000190;
+ packet.data = (length & ~3);
ret |= sisusb_send_bridge_packet(sisusb, 10,
- &packet, 0);
- sisusb->flagb0 = 0x16;
- }
- if (userbuffer) {
- ret |= sisusb_send_bulk_msg(sisusb,
+ &packet, 0);
+ if (sisusb->flagb0 != 0x16) {
+ packet.header = 0x001f;
+ packet.address = 0x00000180;
+ packet.data = flag | 0x16;
+ ret |= sisusb_send_bridge_packet(sisusb,
+ 10, &packet, 0);
+ sisusb->flagb0 = 0x16;
+ }
+ if (userbuffer) {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
NULL, userbuffer, 0,
bytes_written, 0, 1);
- userbuffer += (*bytes_written);
- } else if (fromkern) {
- ret |= sisusb_send_bulk_msg(sisusb,
+ userbuffer += (*bytes_written);
+ } else if (fromkern) {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
kernbuffer, NULL, 0,
bytes_written, 0, 1);
- kernbuffer += (*bytes_written);
- } else {
- ret |= sisusb_send_bulk_msg(sisusb,
+ kernbuffer += (*bytes_written);
+ } else {
+ ret |= sisusb_send_bulk_msg(sisusb,
SISUSB_EP_GFX_BULK_OUT,
(length & ~3),
NULL, NULL, index,
bytes_written, 0, 1);
- kernbuffer += ((*bytes_written) &
- (sisusb->obufsize-1));
- }
+ kernbuffer += ((*bytes_written) &
+ (sisusb->obufsize-1));
+ }
}
if (ret) {
msgcount++;
if (msgcount < 500)
- dev_err(&sisusb->sisusb_dev->dev, "Wrote %zd of %d bytes, error %d\n",
- *bytes_written, length, ret);
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Wrote %zd of %d bytes, error %d\n",
+ *bytes_written, length,
+ ret);
else if (msgcount == 500)
- dev_err(&sisusb->sisusb_dev->dev, "Too many errors, logging stopped\n");
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Too many errors, logging stopped\n");
}
addr += (*bytes_written);
length -= (*bytes_written);
- }
+ }
- if (ret)
- break;
+ if (ret)
+ break;
}
@@ -1000,7 +966,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
*/
static int sisusb_read_memio_byte(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u8 *data)
+ u32 addr, u8 *data)
{
struct sisusb_packet packet;
int ret;
@@ -1014,7 +980,7 @@ static int sisusb_read_memio_byte(struct sisusb_usb_data *sisusb, int type,
}
static int sisusb_read_memio_word(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u16 *data)
+ u32 addr, u16 *data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -1024,36 +990,36 @@ static int sisusb_read_memio_word(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x0003;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = (u16)(packet.data);
- break;
- case 1:
- packet.header = (type << 6) | 0x0006;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = (u16)(packet.data >> 8);
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = (u16)(packet.data >> 16);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = (u16)(packet.data >> 24);
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= (u16)(packet.data << 8);
+ case 0:
+ packet.header = (type << 6) | 0x0003;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = (u16)(packet.data);
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x0006;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = (u16)(packet.data >> 8);
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = (u16)(packet.data >> 16);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = (u16)(packet.data >> 24);
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= (u16)(packet.data << 8);
}
return ret;
}
static int sisusb_read_memio_24bit(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u32 *data)
+ u32 addr, u32 *data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -1061,40 +1027,40 @@ static int sisusb_read_memio_24bit(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x0007;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data & 0x00ffffff;
- break;
- case 1:
- packet.header = (type << 6) | 0x000e;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 8;
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 16;
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= ((packet.data & 0xff) << 16);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 24;
- packet.header = (type << 6) | 0x0003;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= ((packet.data & 0xffff) << 8);
+ case 0:
+ packet.header = (type << 6) | 0x0007;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data & 0x00ffffff;
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x000e;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 8;
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 16;
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= ((packet.data & 0xff) << 16);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 24;
+ packet.header = (type << 6) | 0x0003;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= ((packet.data & 0xffff) << 8);
}
return ret;
}
static int sisusb_read_memio_long(struct sisusb_usb_data *sisusb, int type,
- u32 addr, u32 *data)
+ u32 addr, u32 *data)
{
struct sisusb_packet packet;
int ret = 0;
@@ -1102,45 +1068,45 @@ static int sisusb_read_memio_long(struct sisusb_usb_data *sisusb, int type,
packet.address = addr & ~3;
switch (addr & 3) {
- case 0:
- packet.header = (type << 6) | 0x000f;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data;
- break;
- case 1:
- packet.header = (type << 6) | 0x000e;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 8;
- packet.header = (type << 6) | 0x0001;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= (packet.data << 24);
- break;
- case 2:
- packet.header = (type << 6) | 0x000c;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 16;
- packet.header = (type << 6) | 0x0003;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= (packet.data << 16);
- break;
- case 3:
- packet.header = (type << 6) | 0x0008;
- ret = sisusb_send_packet(sisusb, 6, &packet);
- *data = packet.data >> 24;
- packet.header = (type << 6) | 0x0007;
- packet.address = (addr & ~3) + 4;
- ret |= sisusb_send_packet(sisusb, 6, &packet);
- *data |= (packet.data << 8);
+ case 0:
+ packet.header = (type << 6) | 0x000f;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data;
+ break;
+ case 1:
+ packet.header = (type << 6) | 0x000e;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 8;
+ packet.header = (type << 6) | 0x0001;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= (packet.data << 24);
+ break;
+ case 2:
+ packet.header = (type << 6) | 0x000c;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 16;
+ packet.header = (type << 6) | 0x0003;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= (packet.data << 16);
+ break;
+ case 3:
+ packet.header = (type << 6) | 0x0008;
+ ret = sisusb_send_packet(sisusb, 6, &packet);
+ *data = packet.data >> 24;
+ packet.header = (type << 6) | 0x0007;
+ packet.address = (addr & ~3) + 4;
+ ret |= sisusb_send_packet(sisusb, 6, &packet);
+ *data |= (packet.data << 8);
}
return ret;
}
static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
- char *kernbuffer, int length,
- char __user *userbuffer, ssize_t *bytes_read)
+ char *kernbuffer, int length, char __user *userbuffer,
+ ssize_t *bytes_read)
{
int ret = 0;
char buf[4];
@@ -1152,34 +1118,27 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
length &= 0x00ffffff;
while (length) {
-
- switch (length) {
-
+ switch (length) {
case 1:
-
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM,
- addr, &buf[0]);
+ addr, &buf[0]);
if (!ret) {
(*bytes_read)++;
if (userbuffer) {
- if (put_user(buf[0],
- (u8 __user *)userbuffer)) {
+ if (put_user(buf[0], (u8 __user *)userbuffer))
return -EFAULT;
- }
- } else {
+ } else
kernbuffer[0] = buf[0];
- }
}
return ret;
case 2:
ret |= sisusb_read_memio_word(sisusb, SISUSB_TYPE_MEM,
- addr, &swap16);
+ addr, &swap16);
if (!ret) {
(*bytes_read) += 2;
if (userbuffer) {
- if (put_user(swap16,
- (u16 __user *)userbuffer))
+ if (put_user(swap16, (u16 __user *)userbuffer))
return -EFAULT;
} else {
*((u16 *)kernbuffer) = swap16;
@@ -1189,7 +1148,7 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
case 3:
ret |= sisusb_read_memio_24bit(sisusb, SISUSB_TYPE_MEM,
- addr, &swap32);
+ addr, &swap32);
if (!ret) {
(*bytes_read) += 3;
#ifdef __BIG_ENDIAN
@@ -1202,7 +1161,8 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
buf[0] = swap32 & 0xff;
#endif
if (userbuffer) {
- if (copy_to_user(userbuffer, &buf[0], 3))
+ if (copy_to_user(userbuffer,
+ &buf[0], 3))
return -EFAULT;
} else {
kernbuffer[0] = buf[0];
@@ -1214,12 +1174,11 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
default:
ret |= sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM,
- addr, &swap32);
+ addr, &swap32);
if (!ret) {
(*bytes_read) += 4;
if (userbuffer) {
- if (put_user(swap32,
- (u32 __user *)userbuffer))
+ if (put_user(swap32, (u32 __user *)userbuffer))
return -EFAULT;
userbuffer += 4;
@@ -1230,10 +1189,9 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
addr += 4;
length -= 4;
}
- }
-
- if (ret)
- break;
+ }
+ if (ret)
+ break;
}
return ret;
@@ -1242,40 +1200,39 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
/* High level: Gfx (indexed) register access */
#ifdef INCL_SISUSB_CON
-int
-sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
+int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
-int
-sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
+int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
{
return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
#endif
-int
-sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, u8 index, u8 data)
+int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+ u8 index, u8 data)
{
int ret;
+
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
return ret;
}
-int
-sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, u8 index, u8 *data)
+int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+ u8 index, u8 *data)
{
int ret;
+
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
return ret;
}
-int
-sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
- u8 myand, u8 myor)
+int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
+ u8 myand, u8 myor)
{
int ret;
u8 tmp;
@@ -1288,12 +1245,12 @@ sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
return ret;
}
-static int
-sisusb_setidxregmask(struct sisusb_usb_data *sisusb, int port, u8 idx,
- u8 data, u8 mask)
+static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
+ int port, u8 idx, u8 data, u8 mask)
{
int ret;
u8 tmp;
+
ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, idx);
ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, &tmp);
tmp &= ~(mask);
@@ -1302,75 +1259,76 @@ sisusb_setidxregmask(struct sisusb_usb_data *sisusb, int port, u8 idx,
return ret;
}
-int
-sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, u8 index, u8 myor)
+int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+ u8 index, u8 myor)
{
- return(sisusb_setidxregandor(sisusb, port, index, 0xff, myor));
+ return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
}
-int
-sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, u8 idx, u8 myand)
+int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+ u8 idx, u8 myand)
{
- return(sisusb_setidxregandor(sisusb, port, idx, myand, 0x00));
+ return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
}
/* Write/read video ram */
#ifdef INCL_SISUSB_CON
-int
-sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data)
+int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data)
{
- return(sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data));
+ return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data);
}
-int
-sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
+int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
{
- return(sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data));
+ return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data);
}
-int
-sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
- u32 dest, int length, size_t *bytes_written)
+int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
+ u32 dest, int length, size_t *bytes_written)
{
- return(sisusb_write_mem_bulk(sisusb, dest, src, length, NULL, 0, bytes_written));
+ return sisusb_write_mem_bulk(sisusb, dest, src, length,
+ NULL, 0, bytes_written);
}
#ifdef SISUSBENDIANTEST
-int
-sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
- u32 src, int length, size_t *bytes_written)
+int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
+ u32 src, int length, size_t *bytes_written)
{
- return(sisusb_read_mem_bulk(sisusb, src, dest, length, NULL, bytes_written));
+ return sisusb_read_mem_bulk(sisusb, src, dest, length,
+ NULL, bytes_written);
}
#endif
#endif
#ifdef SISUSBENDIANTEST
-static void
-sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
-{
- static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
- char destbuffer[10];
- size_t dummy;
- int i,j;
-
- sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
-
- for(i = 1; i <= 7; i++) {
- dev_dbg(&sisusb->sisusb_dev->dev, "sisusb: rwtest %d bytes\n", i);
- sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i, &dummy);
- for(j = 0; j < i; j++) {
- dev_dbg(&sisusb->sisusb_dev->dev, "rwtest read[%d] = %x\n", j, destbuffer[j]);
+static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
+{
+ static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
+ char destbuffer[10];
+ size_t dummy;
+ int i, j;
+
+ sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
+
+ for (i = 1; i <= 7; i++) {
+ dev_dbg(&sisusb->sisusb_dev->dev,
+ "sisusb: rwtest %d bytes\n", i);
+ sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase,
+ i, &dummy);
+ for (j = 0; j < i; j++) {
+ dev_dbg(&sisusb->sisusb_dev->dev,
+ "rwtest read[%d] = %x\n",
+ j, destbuffer[j]);
+ }
}
- }
}
#endif
/* access pci config registers (reg numbers 0, 4, 8, etc) */
-static int
-sisusb_write_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 data)
+static int sisusb_write_pci_config(struct sisusb_usb_data *sisusb,
+ int regnum, u32 data)
{
struct sisusb_packet packet;
int ret;
@@ -1382,8 +1340,8 @@ sisusb_write_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 data)
return ret;
}
-static int
-sisusb_read_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 *data)
+static int sisusb_read_pci_config(struct sisusb_usb_data *sisusb,
+ int regnum, u32 *data)
{
struct sisusb_packet packet;
int ret;
@@ -1397,8 +1355,8 @@ sisusb_read_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 *data)
/* Clear video RAM */
-static int
-sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
+static int sisusb_clear_vram(struct sisusb_usb_data *sisusb,
+ u32 address, int length)
{
int ret, i;
ssize_t j;
@@ -1416,7 +1374,8 @@ sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
return 0;
/* allocate free buffer/urb and clear the buffer */
- if ((i = sisusb_alloc_outbuf(sisusb)) < 0)
+ i = sisusb_alloc_outbuf(sisusb);
+ if (i < 0)
return -EBUSY;
memset(sisusb->obuf[i], 0, sisusb->obufsize);
@@ -1437,20 +1396,19 @@ sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
* a defined mode (640x480@60Hz)
*/
-#define GETREG(r,d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
-#define SETREG(r,d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
-#define SETIREG(r,i,d) sisusb_setidxreg(sisusb, r, i, d)
-#define GETIREG(r,i,d) sisusb_getidxreg(sisusb, r, i, d)
-#define SETIREGOR(r,i,o) sisusb_setidxregor(sisusb, r, i, o)
-#define SETIREGAND(r,i,a) sisusb_setidxregand(sisusb, r, i, a)
-#define SETIREGANDOR(r,i,a,o) sisusb_setidxregandor(sisusb, r, i, a, o)
-#define READL(a,d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
-#define WRITEL(a,d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
-#define READB(a,d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
-#define WRITEB(a,d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
-
-static int
-sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
+#define GETREG(r, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
+#define SETREG(r, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
+#define SETIREG(r, i, d) sisusb_setidxreg(sisusb, r, i, d)
+#define GETIREG(r, i, d) sisusb_getidxreg(sisusb, r, i, d)
+#define SETIREGOR(r, i, o) sisusb_setidxregor(sisusb, r, i, o)
+#define SETIREGAND(r, i, a) sisusb_setidxregand(sisusb, r, i, a)
+#define SETIREGANDOR(r, i, a, o) sisusb_setidxregandor(sisusb, r, i, a, o)
+#define READL(a, d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
+#define WRITEL(a, d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
+#define READB(a, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
+#define WRITEB(a, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
+
+static int sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
{
int ret;
u8 tmp8;
@@ -1480,8 +1438,8 @@ sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
return ret;
}
-static int
-sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
+static int sisusb_getbuswidth(struct sisusb_usb_data *sisusb,
+ int *bw, int *chab)
{
int ret;
u8 ramtype, done = 0;
@@ -1526,7 +1484,7 @@ sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
}
if ((t1 != 0x456789ab) || (t0 != 0x01234567)) {
*chab = 1; *bw = 64;
- ret |= SETIREGANDOR(SISSR, 0x14, 0xfc,0x01);
+ ret |= SETIREGANDOR(SISSR, 0x14, 0xfc, 0x01);
ret |= sisusb_triggersr16(sisusb, ramtype);
ret |= WRITEL(ramptr + 0, 0x89abcdef);
@@ -1593,8 +1551,7 @@ sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
return ret;
}
-static int
-sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
+static int sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
{
int ret = 0;
u32 ramptr = SISUSB_PCI_MEMBASE;
@@ -1622,10 +1579,8 @@ sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
return ret;
}
-static int
-sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
- u8 rankno, u8 chab, const u8 dramtype[][5],
- int bw)
+static int sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret,
+ int index, u8 rankno, u8 chab, const u8 dramtype[][5], int bw)
{
int ret = 0, ranksize;
u8 tmp;
@@ -1641,7 +1596,9 @@ sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
return ret;
tmp = 0;
- while ((ranksize >>= 1) > 0) tmp += 0x10;
+ while ((ranksize >>= 1) > 0)
+ tmp += 0x10;
+
tmp |= ((rankno - 1) << 2);
tmp |= ((bw / 64) & 0x02);
tmp |= (chab & 0x01);
@@ -1654,8 +1611,8 @@ sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
return ret;
}
-static int
-sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
+static int sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret,
+ u32 inc, int testn)
{
int ret = 0, i;
u32 j, tmp;
@@ -1669,7 +1626,9 @@ sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
for (i = 0, j = 0; i < testn; i++) {
ret |= READL(sisusb->vrambase + j, &tmp);
- if (tmp != j) return ret;
+ if (tmp != j)
+ return ret;
+
j += inc;
}
@@ -1677,9 +1636,8 @@ sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
return ret;
}
-static int
-sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
- int idx, int bw, const u8 rtype[][5])
+static int sisusb_check_ranks(struct sisusb_usb_data *sisusb,
+ int *iret, int rankno, int idx, int bw, const u8 rtype[][5])
{
int ret = 0, i, i2ret;
u32 inc;
@@ -1687,10 +1645,8 @@ sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
*iret = 0;
for (i = rankno; i >= 1; i--) {
- inc = 1 << (rtype[idx][2] +
- rtype[idx][1] +
- rtype[idx][0] +
- bw / 64 + i);
+ inc = 1 << (rtype[idx][2] + rtype[idx][1] + rtype[idx][0] +
+ bw / 64 + i);
ret |= sisusb_check_rbc(sisusb, &i2ret, inc, 2);
if (!i2ret)
return ret;
@@ -1710,9 +1666,8 @@ sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
return ret;
}
-static int
-sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
- int chab)
+static int sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret,
+ int bw, int chab)
{
int ret = 0, i2ret = 0, i, j;
static const u8 sdramtype[13][5] = {
@@ -1736,13 +1691,13 @@ sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
for (i = 0; i < 13; i++) {
ret |= SETIREGANDOR(SISSR, 0x13, 0x80, sdramtype[i][4]);
for (j = 2; j > 0; j--) {
- ret |= sisusb_set_rank(sisusb, &i2ret, i, j,
- chab, sdramtype, bw);
+ ret |= sisusb_set_rank(sisusb, &i2ret, i, j, chab,
+ sdramtype, bw);
if (!i2ret)
continue;
- ret |= sisusb_check_ranks(sisusb, &i2ret, j, i,
- bw, sdramtype);
+ ret |= sisusb_check_ranks(sisusb, &i2ret, j, i, bw,
+ sdramtype);
if (i2ret) {
*iret = 0; /* ram size found */
return ret;
@@ -1753,8 +1708,8 @@ sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
return ret;
}
-static int
-sisusb_setup_screen(struct sisusb_usb_data *sisusb, int clrall, int drwfr)
+static int sisusb_setup_screen(struct sisusb_usb_data *sisusb,
+ int clrall, int drwfr)
{
int ret = 0;
u32 address;
@@ -1775,47 +1730,47 @@ sisusb_setup_screen(struct sisusb_usb_data *sisusb, int clrall, int drwfr)
for (i = 0; i < modex; i++) {
address = sisusb->vrambase + (i * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
- address, 0xf100);
+ address, 0xf100);
address += (modex * (modey-1) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
- address, 0xf100);
+ address, 0xf100);
}
for (i = 0; i < modey; i++) {
address = sisusb->vrambase + ((i * modex) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
- address, 0xf100);
+ address, 0xf100);
address += ((modex - 1) * bpp);
ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
- address, 0xf100);
+ address, 0xf100);
}
}
return ret;
}
-static int
-sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
+static int sisusb_set_default_mode(struct sisusb_usb_data *sisusb,
+ int touchengines)
{
int ret = 0, i, j, modex, modey, bpp, du;
u8 sr31, cr63, tmp8;
static const char attrdata[] = {
- 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
- 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
- 0x01,0x00,0x00,0x00
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x01, 0x00, 0x00, 0x00
};
static const char crtcrdata[] = {
- 0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
- 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
- 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xea, 0x8c, 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3,
0xff
};
static const char grcdata[] = {
- 0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f,
0xff
};
static const char crtcdata[] = {
- 0x5f,0x4f,0x4f,0x83,0x55,0x81,0x0b,0x3e,
- 0xe9,0x8b,0xdf,0xe8,0x0c,0x00,0x00,0x05,
+ 0x5f, 0x4f, 0x4f, 0x83, 0x55, 0x81, 0x0b, 0x3e,
+ 0xe9, 0x8b, 0xdf, 0xe8, 0x0c, 0x00, 0x00, 0x05,
0x00
};
@@ -1858,28 +1813,32 @@ sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
SETIREGAND(SISSR, 0x37, 0xfe);
SETREG(SISMISCW, 0xef); /* sync */
SETIREG(SISCR, 0x11, 0x00); /* crtc */
- for (j = 0x00, i = 0; i <= 7; i++, j++) {
+ for (j = 0x00, i = 0; i <= 7; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
- }
- for (j = 0x10; i <= 10; i++, j++) {
+
+ for (j = 0x10; i <= 10; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
- }
- for (j = 0x15; i <= 12; i++, j++) {
+
+ for (j = 0x15; i <= 12; i++, j++)
SETIREG(SISCR, j, crtcdata[i]);
- }
- for (j = 0x0A; i <= 15; i++, j++) {
+
+ for (j = 0x0A; i <= 15; i++, j++)
SETIREG(SISSR, j, crtcdata[i]);
- }
+
SETIREG(SISSR, 0x0E, (crtcdata[16] & 0xE0));
SETIREGANDOR(SISCR, 0x09, 0x5f, ((crtcdata[16] & 0x01) << 5));
SETIREG(SISCR, 0x14, 0x4f);
du = (modex / 16) * (bpp * 2); /* offset/pitch */
- if (modex % 16) du += bpp;
+ if (modex % 16)
+ du += bpp;
+
SETIREGANDOR(SISSR, 0x0e, 0xf0, ((du >> 8) & 0x0f));
SETIREG(SISCR, 0x13, (du & 0xff));
du <<= 5;
tmp8 = du >> 8;
- if (du & 0xff) tmp8++;
+ if (du & 0xff)
+ tmp8++;
+
SETIREG(SISSR, 0x10, tmp8);
SETIREG(SISSR, 0x31, 0x00); /* VCLK */
SETIREG(SISSR, 0x2b, 0x1b);
@@ -1925,8 +1884,7 @@ sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
return ret;
}
-static int
-sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
+static int sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
{
int ret = 0, i, j, bw, chab, iret, retry = 3;
u8 tmp8, ramtype;
@@ -1970,7 +1928,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
ret |= GETREG(SISMISCR, &tmp8);
ret |= SETREG(SISMISCW, (tmp8 | 0x01));
- if (ret) continue;
+ if (ret)
+ continue;
/* Reset registers */
ret |= SETIREGAND(SISCR, 0x5b, 0xdf);
@@ -1979,23 +1938,23 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
ret |= SETREG(SISMISCW, 0x67);
- for (i = 0x06; i <= 0x1f; i++) {
+ for (i = 0x06; i <= 0x1f; i++)
ret |= SETIREG(SISSR, i, 0x00);
- }
- for (i = 0x21; i <= 0x27; i++) {
+
+ for (i = 0x21; i <= 0x27; i++)
ret |= SETIREG(SISSR, i, 0x00);
- }
- for (i = 0x31; i <= 0x3d; i++) {
+
+ for (i = 0x31; i <= 0x3d; i++)
ret |= SETIREG(SISSR, i, 0x00);
- }
- for (i = 0x12; i <= 0x1b; i++) {
+
+ for (i = 0x12; i <= 0x1b; i++)
ret |= SETIREG(SISSR, i, 0x00);
- }
- for (i = 0x79; i <= 0x7c; i++) {
+
+ for (i = 0x79; i <= 0x7c; i++)
ret |= SETIREG(SISCR, i, 0x00);
- }
- if (ret) continue;
+ if (ret)
+ continue;
ret |= SETIREG(SISCR, 0x63, 0x80);
@@ -2013,13 +1972,16 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
ret |= SETIREG(SISSR, 0x07, 0x18);
ret |= SETIREG(SISSR, 0x11, 0x0f);
- if (ret) continue;
+ if (ret)
+ continue;
for (i = 0x15, j = 0; i <= 0x1b; i++, j++) {
- ret |= SETIREG(SISSR, i, ramtypetable1[(j*4) + ramtype]);
+ ret |= SETIREG(SISSR, i,
+ ramtypetable1[(j*4) + ramtype]);
}
for (i = 0x40, j = 0; i <= 0x44; i++, j++) {
- ret |= SETIREG(SISCR, i, ramtypetable2[(j*4) + ramtype]);
+ ret |= SETIREG(SISCR, i,
+ ramtypetable2[(j*4) + ramtype]);
}
ret |= SETIREG(SISCR, 0x49, 0xaa);
@@ -2036,7 +1998,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
ret |= SETIREGAND(SISCAP, 0x3f, 0xef);
- if (ret) continue;
+ if (ret)
+ continue;
ret |= SETIREG(SISPART1, 0x00, 0x00);
@@ -2058,7 +2021,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
ret |= SETIREG(SISSR, 0x32, 0x11);
ret |= SETIREG(SISSR, 0x33, 0x00);
- if (ret) continue;
+ if (ret)
+ continue;
ret |= SETIREG(SISCR, 0x83, 0x00);
@@ -2080,13 +2044,15 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
if (ramtype <= 1) {
ret |= sisusb_get_sdram_size(sisusb, &iret, bw, chab);
if (iret) {
- dev_err(&sisusb->sisusb_dev->dev,"RAM size detection failed, assuming 8MB video RAM\n");
- ret |= SETIREG(SISSR,0x14,0x31);
+ dev_err(&sisusb->sisusb_dev->dev,
+ "RAM size detection failed, assuming 8MB video RAM\n");
+ ret |= SETIREG(SISSR, 0x14, 0x31);
/* TODO */
}
} else {
- dev_err(&sisusb->sisusb_dev->dev, "DDR RAM device found, assuming 8MB video RAM\n");
- ret |= SETIREG(SISSR,0x14,0x31);
+ dev_err(&sisusb->sisusb_dev->dev,
+ "DDR RAM device found, assuming 8MB video RAM\n");
+ ret |= SETIREG(SISSR, 0x14, 0x31);
/* *** TODO *** */
}
@@ -2117,8 +2083,7 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
#undef READL
#undef WRITEL
-static void
-sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
+static void sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
{
u8 tmp8, tmp82, ramtype;
int bw = 0;
@@ -2127,7 +2092,7 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
static const char ram_dynamictype[4] = {'D', 'G', 'D', 'G'};
static const int busSDR[4] = {64, 64, 128, 128};
static const int busDDR[4] = {32, 32, 64, 64};
- static const int busDDRA[4] = {64+32, 64+32 , (64+32)*2, (64+32)*2};
+ static const int busDDRA[4] = {64+32, 64+32, (64+32)*2, (64+32)*2};
sisusb_getidxreg(sisusb, SISSR, 0x14, &tmp8);
sisusb_getidxreg(sisusb, SISSR, 0x15, &tmp82);
@@ -2135,35 +2100,38 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
sisusb->vramsize = (1 << ((tmp8 & 0xf0) >> 4)) * 1024 * 1024;
ramtype &= 0x03;
switch ((tmp8 >> 2) & 0x03) {
- case 0: ramtypetext1 = "1 ch/1 r";
- if (tmp82 & 0x10) {
+ case 0:
+ ramtypetext1 = "1 ch/1 r";
+ if (tmp82 & 0x10)
bw = 32;
- } else {
+ else
bw = busSDR[(tmp8 & 0x03)];
- }
+
break;
- case 1: ramtypetext1 = "1 ch/2 r";
+ case 1:
+ ramtypetext1 = "1 ch/2 r";
sisusb->vramsize <<= 1;
bw = busSDR[(tmp8 & 0x03)];
break;
- case 2: ramtypetext1 = "asymmeric";
+ case 2:
+ ramtypetext1 = "asymmeric";
sisusb->vramsize += sisusb->vramsize/2;
bw = busDDRA[(tmp8 & 0x03)];
break;
- case 3: ramtypetext1 = "2 channel";
+ case 3:
+ ramtypetext1 = "2 channel";
sisusb->vramsize <<= 1;
bw = busDDR[(tmp8 & 0x03)];
break;
}
-
- dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %cDR S%cRAM, bus width %d\n",
- sisusb->vramsize >> 20, ramtypetext1,
- ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
+ dev_info(&sisusb->sisusb_dev->dev,
+ "%dMB %s %cDR S%cRAM, bus width %d\n",
+ sisusb->vramsize >> 20, ramtypetext1,
+ ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
}
-static int
-sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
+static int sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
{
struct sisusb_packet packet;
int ret;
@@ -2241,8 +2209,7 @@ sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
* of the graphics board.
*/
-static int
-sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
+static int sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
{
int ret = 0, test = 0;
u32 tmp32;
@@ -2250,16 +2217,25 @@ sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
if (sisusb->devinit == 1) {
/* Read PCI BARs and see if they have been set up */
ret |= sisusb_read_pci_config(sisusb, 0x10, &tmp32);
- if (ret) return ret;
- if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MEMBASE) test++;
+ if (ret)
+ return ret;
+
+ if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MEMBASE)
+ test++;
ret |= sisusb_read_pci_config(sisusb, 0x14, &tmp32);
- if (ret) return ret;
- if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MMIOBASE) test++;
+ if (ret)
+ return ret;
+
+ if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MMIOBASE)
+ test++;
ret |= sisusb_read_pci_config(sisusb, 0x18, &tmp32);
- if (ret) return ret;
- if ((tmp32 & 0xfffffff0) == SISUSB_PCI_IOPORTBASE) test++;
+ if (ret)
+ return ret;
+
+ if ((tmp32 & 0xfffffff0) == SISUSB_PCI_IOPORTBASE)
+ test++;
}
/* No? So reset the device */
@@ -2289,20 +2265,20 @@ sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
#ifdef INCL_SISUSB_CON
/* Set up default text mode:
- - Set text mode (0x03)
- - Upload default font
- - Upload user font (if available)
-*/
+ * - Set text mode (0x03)
+ * - Upload default font
+ * - Upload user font (if available)
+ */
-int
-sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
+int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
{
int ret = 0, slot = sisusb->font_slot, i;
const struct font_desc *myfont;
u8 *tempbuf;
u16 *tempbufb;
size_t written;
- static const char bootstring[] = "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
+ static const char bootstring[] =
+ "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
static const char bootlogo[] = "(o_ //\\ V_/_";
/* sisusb->lock is down */
@@ -2328,7 +2304,8 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
memcpy(tempbuf + (i * 32), myfont->data + (i * 16), 16);
/* Upload default font */
- ret = sisusbcon_do_font_op(sisusb, 1, 0, tempbuf, 8192, 0, 1, NULL, 16, 0);
+ ret = sisusbcon_do_font_op(sisusb, 1, 0, tempbuf, 8192,
+ 0, 1, NULL, 16, 0);
vfree(tempbuf);
@@ -2366,7 +2343,7 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
*(tempbufb++) = 0x0700 | bootstring[i++];
ret |= sisusb_copy_memory(sisusb, tempbuf,
- sisusb->vrambase, 8192, &written);
+ sisusb->vrambase, 8192, &written);
vfree(tempbuf);
@@ -2375,12 +2352,13 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
} else if (sisusb->scrbuf) {
ret |= sisusb_copy_memory(sisusb, (char *)sisusb->scrbuf,
- sisusb->vrambase, sisusb->scrbuf_size, &written);
+ sisusb->vrambase, sisusb->scrbuf_size,
+ &written);
}
if (sisusb->sisusb_cursor_size_from >= 0 &&
- sisusb->sisusb_cursor_size_to >= 0) {
+ sisusb->sisusb_cursor_size_to >= 0) {
sisusb_setidxreg(sisusb, SISCR, 0x0a,
sisusb->sisusb_cursor_size_from);
sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0xe0,
@@ -2392,7 +2370,8 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
}
slot = sisusb->sisusb_cursor_loc;
- if(slot < 0) slot = 0;
+ if (slot < 0)
+ slot = 0;
sisusb->sisusb_cursor_loc = -1;
sisusb->bad_cursor_pos = 1;
@@ -2413,22 +2392,19 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
/* fops */
-static int
-sisusb_open(struct inode *inode, struct file *file)
+static int sisusb_open(struct inode *inode, struct file *file)
{
struct sisusb_usb_data *sisusb;
struct usb_interface *interface;
int subminor = iminor(inode);
interface = usb_find_interface(&sisusb_driver, subminor);
- if (!interface) {
+ if (!interface)
return -ENODEV;
- }
sisusb = usb_get_intfdata(interface);
- if (!sisusb) {
+ if (!sisusb)
return -ENODEV;
- }
mutex_lock(&sisusb->lock);
@@ -2444,15 +2420,17 @@ sisusb_open(struct inode *inode, struct file *file)
if (!sisusb->devinit) {
if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
- sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
+ sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
- dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Failed to initialize device\n");
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
- dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Device not attached to USB 2.0 hub\n");
return -EIO;
}
}
@@ -2469,8 +2447,7 @@ sisusb_open(struct inode *inode, struct file *file)
return 0;
}
-void
-sisusb_delete(struct kref *kref)
+void sisusb_delete(struct kref *kref)
{
struct sisusb_usb_data *sisusb = to_sisusb_dev(kref);
@@ -2488,8 +2465,7 @@ sisusb_delete(struct kref *kref)
kfree(sisusb);
}
-static int
-sisusb_release(struct inode *inode, struct file *file)
+static int sisusb_release(struct inode *inode, struct file *file)
{
struct sisusb_usb_data *sisusb;
@@ -2516,8 +2492,8 @@ sisusb_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t
-sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t sisusb_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
{
struct sisusb_usb_data *sisusb;
ssize_t bytes_read = 0;
@@ -2539,11 +2515,10 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
}
if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
+ (*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_IOPORTBASE +
- SISUSB_PCI_IOPORTBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
+ SISUSB_PCI_IOPORTBASE;
/* Read i/o ports
* Byte, word and long(32) can be read. As this
@@ -2551,82 +2526,77 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
* in machine-endianness.
*/
switch (count) {
+ case 1:
+ if (sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO,
+ address, &buf8))
+ errno = -EIO;
+ else if (put_user(buf8, (u8 __user *)buffer))
+ errno = -EFAULT;
+ else
+ bytes_read = 1;
- case 1:
- if (sisusb_read_memio_byte(sisusb,
- SISUSB_TYPE_IO,
- address, &buf8))
- errno = -EIO;
- else if (put_user(buf8, (u8 __user *)buffer))
- errno = -EFAULT;
- else
- bytes_read = 1;
-
- break;
+ break;
- case 2:
- if (sisusb_read_memio_word(sisusb,
- SISUSB_TYPE_IO,
- address, &buf16))
- errno = -EIO;
- else if (put_user(buf16, (u16 __user *)buffer))
- errno = -EFAULT;
- else
- bytes_read = 2;
+ case 2:
+ if (sisusb_read_memio_word(sisusb, SISUSB_TYPE_IO,
+ address, &buf16))
+ errno = -EIO;
+ else if (put_user(buf16, (u16 __user *)buffer))
+ errno = -EFAULT;
+ else
+ bytes_read = 2;
- break;
+ break;
- case 4:
- if (sisusb_read_memio_long(sisusb,
- SISUSB_TYPE_IO,
- address, &buf32))
- errno = -EIO;
- else if (put_user(buf32, (u32 __user *)buffer))
- errno = -EFAULT;
- else
- bytes_read = 4;
+ case 4:
+ if (sisusb_read_memio_long(sisusb, SISUSB_TYPE_IO,
+ address, &buf32))
+ errno = -EIO;
+ else if (put_user(buf32, (u32 __user *)buffer))
+ errno = -EFAULT;
+ else
+ bytes_read = 4;
- break;
+ break;
- default:
- errno = -EIO;
+ default:
+ errno = -EIO;
}
- } else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
+ } else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE && (*ppos) <
+ SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_MEMBASE +
- SISUSB_PCI_MEMBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
+ SISUSB_PCI_MEMBASE;
/* Read video ram
* Remember: Data delivered is never endian-corrected
*/
errno = sisusb_read_mem_bulk(sisusb, address,
- NULL, count, buffer, &bytes_read);
+ NULL, count, buffer, &bytes_read);
if (bytes_read)
errno = bytes_read;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE + SISUSB_PCI_MMIOSIZE) {
+ (*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE +
+ SISUSB_PCI_MMIOSIZE) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_MMIOBASE +
- SISUSB_PCI_MMIOBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
+ SISUSB_PCI_MMIOBASE;
/* Read MMIO
* Remember: Data delivered is never endian-corrected
*/
errno = sisusb_read_mem_bulk(sisusb, address,
- NULL, count, buffer, &bytes_read);
+ NULL, count, buffer, &bytes_read);
if (bytes_read)
errno = bytes_read;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
- (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + 0x5c) {
+ (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + 0x5c) {
if (count != 4) {
mutex_unlock(&sisusb->lock);
@@ -2658,9 +2628,8 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
return errno ? errno : bytes_read;
}
-static ssize_t
-sisusb_write(struct file *file, const char __user *buffer, size_t count,
- loff_t *ppos)
+static ssize_t sisusb_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
{
struct sisusb_usb_data *sisusb;
int errno = 0;
@@ -2682,11 +2651,10 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
}
if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
+ (*ppos) < SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_IOPORTBASE +
- SISUSB_PCI_IOPORTBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
+ SISUSB_PCI_IOPORTBASE;
/* Write i/o ports
* Byte, word and long(32) can be written. As this
@@ -2694,53 +2662,49 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
* in machine-endianness.
*/
switch (count) {
+ case 1:
+ if (get_user(buf8, (u8 __user *)buffer))
+ errno = -EFAULT;
+ else if (sisusb_write_memio_byte(sisusb,
+ SISUSB_TYPE_IO, address, buf8))
+ errno = -EIO;
+ else
+ bytes_written = 1;
- case 1:
- if (get_user(buf8, (u8 __user *)buffer))
- errno = -EFAULT;
- else if (sisusb_write_memio_byte(sisusb,
- SISUSB_TYPE_IO,
- address, buf8))
- errno = -EIO;
- else
- bytes_written = 1;
-
- break;
+ break;
- case 2:
- if (get_user(buf16, (u16 __user *)buffer))
- errno = -EFAULT;
- else if (sisusb_write_memio_word(sisusb,
- SISUSB_TYPE_IO,
- address, buf16))
- errno = -EIO;
- else
- bytes_written = 2;
+ case 2:
+ if (get_user(buf16, (u16 __user *)buffer))
+ errno = -EFAULT;
+ else if (sisusb_write_memio_word(sisusb,
+ SISUSB_TYPE_IO, address, buf16))
+ errno = -EIO;
+ else
+ bytes_written = 2;
- break;
+ break;
- case 4:
- if (get_user(buf32, (u32 __user *)buffer))
- errno = -EFAULT;
- else if (sisusb_write_memio_long(sisusb,
- SISUSB_TYPE_IO,
- address, buf32))
- errno = -EIO;
- else
- bytes_written = 4;
+ case 4:
+ if (get_user(buf32, (u32 __user *)buffer))
+ errno = -EFAULT;
+ else if (sisusb_write_memio_long(sisusb,
+ SISUSB_TYPE_IO, address, buf32))
+ errno = -EIO;
+ else
+ bytes_written = 4;
- break;
+ break;
- default:
- errno = -EIO;
+ default:
+ errno = -EIO;
}
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
+ (*ppos) < SISUSB_PCI_PSEUDO_MEMBASE +
+ sisusb->vramsize) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_MEMBASE +
- SISUSB_PCI_MEMBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
+ SISUSB_PCI_MEMBASE;
/* Write video ram.
* Buffer is copied 1:1, therefore, on big-endian
@@ -2749,17 +2713,17 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
* mode or if YUV data is being transferred).
*/
errno = sisusb_write_mem_bulk(sisusb, address, NULL,
- count, buffer, 0, &bytes_written);
+ count, buffer, 0, &bytes_written);
if (bytes_written)
errno = bytes_written;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
- (*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE + SISUSB_PCI_MMIOSIZE) {
+ (*ppos) < SISUSB_PCI_PSEUDO_MMIOBASE +
+ SISUSB_PCI_MMIOSIZE) {
- address = (*ppos) -
- SISUSB_PCI_PSEUDO_MMIOBASE +
- SISUSB_PCI_MMIOBASE;
+ address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
+ SISUSB_PCI_MMIOBASE;
/* Write MMIO.
* Buffer is copied 1:1, therefore, on big-endian
@@ -2767,13 +2731,14 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
* in advance.
*/
errno = sisusb_write_mem_bulk(sisusb, address, NULL,
- count, buffer, 0, &bytes_written);
+ count, buffer, 0, &bytes_written);
if (bytes_written)
errno = bytes_written;
} else if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
- (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + SISUSB_PCI_PCONFSIZE) {
+ (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE +
+ SISUSB_PCI_PCONFSIZE) {
if (count != 4) {
mutex_unlock(&sisusb->lock);
@@ -2807,8 +2772,7 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
return errno ? errno : bytes_written;
}
-static loff_t
-sisusb_lseek(struct file *file, loff_t offset, int orig)
+static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
{
struct sisusb_usb_data *sisusb;
loff_t ret;
@@ -2831,9 +2795,8 @@ sisusb_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
-static int
-sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
- unsigned long arg)
+static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
+ struct sisusb_command *y, unsigned long arg)
{
int retval, port, length;
u32 address;
@@ -2849,105 +2812,99 @@ sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
SISUSB_PCI_IOPORTBASE;
switch (y->operation) {
- case SUCMD_GET:
- retval = sisusb_getidxreg(sisusb, port,
- y->data0, &y->data1);
- if (!retval) {
- if (copy_to_user((void __user *)arg, y,
- sizeof(*y)))
- retval = -EFAULT;
- }
- break;
+ case SUCMD_GET:
+ retval = sisusb_getidxreg(sisusb, port, y->data0, &y->data1);
+ if (!retval) {
+ if (copy_to_user((void __user *)arg, y, sizeof(*y)))
+ retval = -EFAULT;
+ }
+ break;
- case SUCMD_SET:
- retval = sisusb_setidxreg(sisusb, port,
- y->data0, y->data1);
- break;
+ case SUCMD_SET:
+ retval = sisusb_setidxreg(sisusb, port, y->data0, y->data1);
+ break;
- case SUCMD_SETOR:
- retval = sisusb_setidxregor(sisusb, port,
- y->data0, y->data1);
- break;
+ case SUCMD_SETOR:
+ retval = sisusb_setidxregor(sisusb, port, y->data0, y->data1);
+ break;
- case SUCMD_SETAND:
- retval = sisusb_setidxregand(sisusb, port,
- y->data0, y->data1);
- break;
+ case SUCMD_SETAND:
+ retval = sisusb_setidxregand(sisusb, port, y->data0, y->data1);
+ break;
- case SUCMD_SETANDOR:
- retval = sisusb_setidxregandor(sisusb, port,
- y->data0, y->data1, y->data2);
- break;
+ case SUCMD_SETANDOR:
+ retval = sisusb_setidxregandor(sisusb, port, y->data0,
+ y->data1, y->data2);
+ break;
- case SUCMD_SETMASK:
- retval = sisusb_setidxregmask(sisusb, port,
- y->data0, y->data1, y->data2);
- break;
+ case SUCMD_SETMASK:
+ retval = sisusb_setidxregmask(sisusb, port, y->data0,
+ y->data1, y->data2);
+ break;
- case SUCMD_CLRSCR:
- /* Gfx core must be initialized */
- if (!sisusb->gfxinit)
- return -ENODEV;
+ case SUCMD_CLRSCR:
+ /* Gfx core must be initialized */
+ if (!sisusb->gfxinit)
+ return -ENODEV;
- length = (y->data0 << 16) | (y->data1 << 8) | y->data2;
- address = y->data3 -
- SISUSB_PCI_PSEUDO_MEMBASE +
+ length = (y->data0 << 16) | (y->data1 << 8) | y->data2;
+ address = y->data3 - SISUSB_PCI_PSEUDO_MEMBASE +
SISUSB_PCI_MEMBASE;
- retval = sisusb_clear_vram(sisusb, address, length);
- break;
+ retval = sisusb_clear_vram(sisusb, address, length);
+ break;
- case SUCMD_HANDLETEXTMODE:
- retval = 0;
+ case SUCMD_HANDLETEXTMODE:
+ retval = 0;
#ifdef INCL_SISUSB_CON
- /* Gfx core must be initialized, SiS_Pr must exist */
- if (!sisusb->gfxinit || !sisusb->SiS_Pr)
- return -ENODEV;
+ /* Gfx core must be initialized, SiS_Pr must exist */
+ if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+ return -ENODEV;
- switch (y->data0) {
- case 0:
- retval = sisusb_reset_text_mode(sisusb, 0);
- break;
- case 1:
- sisusb->textmodedestroyed = 1;
- break;
- }
-#endif
+ switch (y->data0) {
+ case 0:
+ retval = sisusb_reset_text_mode(sisusb, 0);
+ break;
+ case 1:
+ sisusb->textmodedestroyed = 1;
break;
+ }
+#endif
+ break;
#ifdef INCL_SISUSB_CON
- case SUCMD_SETMODE:
- /* Gfx core must be initialized, SiS_Pr must exist */
- if (!sisusb->gfxinit || !sisusb->SiS_Pr)
- return -ENODEV;
+ case SUCMD_SETMODE:
+ /* Gfx core must be initialized, SiS_Pr must exist */
+ if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+ return -ENODEV;
- retval = 0;
+ retval = 0;
- sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
- sisusb->SiS_Pr->sisusb = (void *)sisusb;
+ sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
+ sisusb->SiS_Pr->sisusb = (void *)sisusb;
- if (SiSUSBSetMode(sisusb->SiS_Pr, y->data3))
- retval = -EINVAL;
+ if (SiSUSBSetMode(sisusb->SiS_Pr, y->data3))
+ retval = -EINVAL;
- break;
+ break;
- case SUCMD_SETVESAMODE:
- /* Gfx core must be initialized, SiS_Pr must exist */
- if (!sisusb->gfxinit || !sisusb->SiS_Pr)
- return -ENODEV;
+ case SUCMD_SETVESAMODE:
+ /* Gfx core must be initialized, SiS_Pr must exist */
+ if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+ return -ENODEV;
- retval = 0;
+ retval = 0;
- sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
- sisusb->SiS_Pr->sisusb = (void *)sisusb;
+ sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
+ sisusb->SiS_Pr->sisusb = (void *)sisusb;
- if (SiSUSBSetVESAMode(sisusb->SiS_Pr, y->data3))
- retval = -EINVAL;
+ if (SiSUSBSetVESAMode(sisusb->SiS_Pr, y->data3))
+ retval = -EINVAL;
- break;
+ break;
#endif
- default:
- retval = -EINVAL;
+ default:
+ retval = -EINVAL;
}
if (retval > 0)
@@ -2956,8 +2913,7 @@ sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
return retval;
}
-static long
-sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct sisusb_usb_data *sisusb;
struct sisusb_info x;
@@ -2978,52 +2934,51 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
switch (cmd) {
+ case SISUSB_GET_CONFIG_SIZE:
- case SISUSB_GET_CONFIG_SIZE:
-
- if (put_user(sizeof(x), argp))
- retval = -EFAULT;
+ if (put_user(sizeof(x), argp))
+ retval = -EFAULT;
- break;
+ break;
- case SISUSB_GET_CONFIG:
-
- x.sisusb_id = SISUSB_ID;
- x.sisusb_version = SISUSB_VERSION;
- x.sisusb_revision = SISUSB_REVISION;
- x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
- x.sisusb_gfxinit = sisusb->gfxinit;
- x.sisusb_vrambase = SISUSB_PCI_PSEUDO_MEMBASE;
- x.sisusb_mmiobase = SISUSB_PCI_PSEUDO_MMIOBASE;
- x.sisusb_iobase = SISUSB_PCI_PSEUDO_IOPORTBASE;
- x.sisusb_pcibase = SISUSB_PCI_PSEUDO_PCIBASE;
- x.sisusb_vramsize = sisusb->vramsize;
- x.sisusb_minor = sisusb->minor;
- x.sisusb_fbdevactive= 0;
+ case SISUSB_GET_CONFIG:
+
+ x.sisusb_id = SISUSB_ID;
+ x.sisusb_version = SISUSB_VERSION;
+ x.sisusb_revision = SISUSB_REVISION;
+ x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
+ x.sisusb_gfxinit = sisusb->gfxinit;
+ x.sisusb_vrambase = SISUSB_PCI_PSEUDO_MEMBASE;
+ x.sisusb_mmiobase = SISUSB_PCI_PSEUDO_MMIOBASE;
+ x.sisusb_iobase = SISUSB_PCI_PSEUDO_IOPORTBASE;
+ x.sisusb_pcibase = SISUSB_PCI_PSEUDO_PCIBASE;
+ x.sisusb_vramsize = sisusb->vramsize;
+ x.sisusb_minor = sisusb->minor;
+ x.sisusb_fbdevactive = 0;
#ifdef INCL_SISUSB_CON
- x.sisusb_conactive = sisusb->haveconsole ? 1 : 0;
+ x.sisusb_conactive = sisusb->haveconsole ? 1 : 0;
#else
- x.sisusb_conactive = 0;
+ x.sisusb_conactive = 0;
#endif
- memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
+ memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
- if (copy_to_user((void __user *)arg, &x, sizeof(x)))
- retval = -EFAULT;
+ if (copy_to_user((void __user *)arg, &x, sizeof(x)))
+ retval = -EFAULT;
- break;
+ break;
- case SISUSB_COMMAND:
+ case SISUSB_COMMAND:
- if (copy_from_user(&y, (void __user *)arg, sizeof(y)))
- retval = -EFAULT;
- else
- retval = sisusb_handle_command(sisusb, &y, arg);
+ if (copy_from_user(&y, (void __user *)arg, sizeof(y)))
+ retval = -EFAULT;
+ else
+ retval = sisusb_handle_command(sisusb, &y, arg);
- break;
+ break;
- default:
- retval = -ENOTTY;
- break;
+ default:
+ retval = -ENOTTY;
+ break;
}
err_out:
@@ -3032,20 +2987,20 @@ err_out:
}
#ifdef SISUSB_NEW_CONFIG_COMPAT
-static long
-sisusb_compat_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+static long sisusb_compat_ioctl(struct file *f, unsigned int cmd,
+ unsigned long arg)
{
long retval;
switch (cmd) {
- case SISUSB_GET_CONFIG_SIZE:
- case SISUSB_GET_CONFIG:
- case SISUSB_COMMAND:
- retval = sisusb_ioctl(f, cmd, arg);
- return retval;
+ case SISUSB_GET_CONFIG_SIZE:
+ case SISUSB_GET_CONFIG:
+ case SISUSB_COMMAND:
+ retval = sisusb_ioctl(f, cmd, arg);
+ return retval;
- default:
- return -ENOIOCTLCMD;
+ default:
+ return -ENOIOCTLCMD;
}
}
#endif
@@ -3070,21 +3025,20 @@ static struct usb_class_driver usb_sisusb_class = {
};
static int sisusb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct sisusb_usb_data *sisusb;
int retval = 0, i;
dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
- dev->devnum);
+ dev->devnum);
/* Allocate memory for our private */
sisusb = kzalloc(sizeof(*sisusb), GFP_KERNEL);
- if (!sisusb) {
- dev_err(&dev->dev, "Failed to allocate memory for private data\n");
+ if (!sisusb)
return -ENOMEM;
- }
+
kref_init(&sisusb->kref);
mutex_init(&(sisusb->lock));
@@ -3092,8 +3046,9 @@ static int sisusb_probe(struct usb_interface *intf,
/* Register device */
retval = usb_register_dev(intf, &usb_sisusb_class);
if (retval) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to get a minor for device %d\n",
- dev->devnum);
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Failed to get a minor for device %d\n",
+ dev->devnum);
retval = -ENODEV;
goto error_1;
}
@@ -3108,8 +3063,8 @@ static int sisusb_probe(struct usb_interface *intf,
/* Allocate buffers */
sisusb->ibufsize = SISUSB_IBUF_SIZE;
- if (!(sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL))) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for input buffer");
+ sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL);
+ if (!sisusb->ibuf) {
retval = -ENOMEM;
goto error_2;
}
@@ -3117,20 +3072,20 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->numobufs = 0;
sisusb->obufsize = SISUSB_OBUF_SIZE;
for (i = 0; i < NUMOBUFS; i++) {
- if (!(sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL))) {
+ sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL);
+ if (!sisusb->obuf[i]) {
if (i == 0) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for output buffer\n");
retval = -ENOMEM;
goto error_3;
}
break;
- } else
- sisusb->numobufs++;
-
+ }
+ sisusb->numobufs++;
}
/* Allocate URBs */
- if (!(sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL))) {
+ sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL);
+ if (!sisusb->sisurbin) {
dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
retval = -ENOMEM;
goto error_3;
@@ -3138,8 +3093,10 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->completein = 1;
for (i = 0; i < sisusb->numobufs; i++) {
- if (!(sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL))) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
+ sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!sisusb->sisurbout[i]) {
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Failed to allocate URBs\n");
retval = -ENOMEM;
goto error_4;
}
@@ -3148,12 +3105,15 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->urbstatus[i] = 0;
}
- dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n", sisusb->numobufs);
+ dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n",
+ sisusb->numobufs);
#ifdef INCL_SISUSB_CON
/* Allocate our SiS_Pr */
- if (!(sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL))) {
- dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate SiS_Pr\n");
+ sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL);
+ if (!sisusb->SiS_Pr) {
+ retval = -ENOMEM;
+ goto error_4;
}
#endif
@@ -3170,17 +3130,18 @@ static int sisusb_probe(struct usb_interface *intf,
if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
int initscreen = 1;
#ifdef INCL_SISUSB_CON
- if (sisusb_first_vc > 0 &&
- sisusb_last_vc > 0 &&
- sisusb_first_vc <= sisusb_last_vc &&
- sisusb_last_vc <= MAX_NR_CONSOLES)
+ if (sisusb_first_vc > 0 && sisusb_last_vc > 0 &&
+ sisusb_first_vc <= sisusb_last_vc &&
+ sisusb_last_vc <= MAX_NR_CONSOLES)
initscreen = 0;
#endif
if (sisusb_init_gfxdevice(sisusb, initscreen))
- dev_err(&sisusb->sisusb_dev->dev, "Failed to early initialize device\n");
+ dev_err(&sisusb->sisusb_dev->dev,
+ "Failed to early initialize device\n");
} else
- dev_info(&sisusb->sisusb_dev->dev, "Not attached to USB 2.0 hub, deferring init\n");
+ dev_info(&sisusb->sisusb_dev->dev,
+ "Not attached to USB 2.0 hub, deferring init\n");
sisusb->ready = 1;
@@ -3254,7 +3215,7 @@ static const struct usb_device_id sisusb_table[] = {
{ }
};
-MODULE_DEVICE_TABLE (usb, sisusb_table);
+MODULE_DEVICE_TABLE(usb, sisusb_table);
static struct usb_driver sisusb_driver = {
.name = "sisusb",
diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
index fec3f1128fdc..33ff49c4cea4 100644
--- a/drivers/usb/mon/mon_main.c
+++ b/drivers/usb/mon/mon_main.c
@@ -349,7 +349,7 @@ struct mon_bus *mon_bus_lookup(unsigned int num)
static int __init mon_init(void)
{
struct usb_bus *ubus;
- int rc;
+ int rc, id;
if ((rc = mon_text_init()) != 0)
goto err_text;
@@ -365,12 +365,11 @@ static int __init mon_init(void)
}
// MOD_INC_USE_COUNT(which_module?);
- mutex_lock(&usb_bus_list_lock);
- list_for_each_entry (ubus, &usb_bus_list, bus_list) {
+ mutex_lock(&usb_bus_idr_lock);
+ idr_for_each_entry(&usb_bus_idr, ubus, id)
mon_bus_init(ubus);
- }
usb_register_notify(&mon_nb);
- mutex_unlock(&usb_bus_list_lock);
+ mutex_unlock(&usb_bus_idr_lock);
return 0;
err_reg:
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 45c83baf675d..886526b5fcdd 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -7,6 +7,7 @@
config USB_MUSB_HDRC
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, AW, ...)'
depends on (USB || USB_GADGET)
+ depends on HAS_IOMEM
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -85,6 +86,7 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
+ depends on HAS_IOMEM
depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c3791a01ab31..39fd95833eb8 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1901,7 +1901,7 @@ static void musb_recover_from_babble(struct musb *musb)
*/
static struct musb *allocate_instance(struct device *dev,
- struct musb_hdrc_config *config, void __iomem *mbase)
+ const struct musb_hdrc_config *config, void __iomem *mbase)
{
struct musb *musb;
struct musb_hw_ep *ep;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index fd215fb45fd4..b6afe9e43305 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -438,7 +438,7 @@ struct musb {
*/
unsigned double_buffer_not_ok:1;
- struct musb_hdrc_config *config;
+ const struct musb_hdrc_config *config;
int xceiv_old_state;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 7539c3188ffc..8abfe4ec62fb 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -117,8 +117,8 @@ static void configure_channel(struct dma_channel *channel,
u8 bchannel = musb_channel->idx;
u16 csr = 0;
- dev_dbg(musb->controller, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
- channel, packet_sz, dma_addr, len, mode);
+ dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n",
+ channel, packet_sz, &dma_addr, len, mode);
if (mode) {
csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
@@ -152,10 +152,10 @@ static int dma_channel_program(struct dma_channel *channel,
struct musb_dma_controller *controller = musb_channel->controller;
struct musb *musb = controller->private_data;
- dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n",
musb_channel->epnum,
musb_channel->transmit ? "Tx" : "Rx",
- packet_sz, dma_addr, len, mode);
+ packet_sz, &dma_addr, len, mode);
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index d9b0dc461439..fdab4232cfbf 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -752,6 +752,7 @@ static const struct of_device_id sunxi_musb_match[] = {
{ .compatible = "allwinner,sun8i-a33-musb", },
{}
};
+MODULE_DEVICE_TABLE(of, sunxi_musb_match);
static struct platform_driver sunxi_musb_driver = {
.probe = sunxi_musb_probe,
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 4c82077da475..e6959ccb4453 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -310,9 +310,9 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
- dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+ dev_dbg(musb->controller, "ep%i %s dma ch%i dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
chdat->epnum, chdat->tx ? "tx" : "rx",
- ch, dma_addr, chdat->transfer_len, len,
+ ch, &dma_addr, chdat->transfer_len, len,
chdat->transfer_packet_sz, packet_sz);
/*
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index d0b6a1cd7f62..c92a295049ad 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
- if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
- return false;
-
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 39b424f7f629..a262a4343f29 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -5,7 +5,6 @@
#include <linux/usb/usb_phy_generic.h>
#include <linux/slab.h>
#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/usb/of.h>
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 5320cb8642cb..980c9dee09eb 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -118,7 +118,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
status = USB_EVENT_VBUS;
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
- usb_gadget_vbus_connect(otg->gadget);
+ if (otg->gadget)
+ usb_gadget_vbus_connect(otg->gadget);
/* drawing a "unit load" is *always* OK, except for OTG */
nop_set_vbus_draw(nop, 100);
@@ -128,7 +129,8 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
} else {
nop_set_vbus_draw(nop, 0);
- usb_gadget_vbus_disconnect(otg->gadget);
+ if (otg->gadget)
+ usb_gadget_vbus_disconnect(otg->gadget);
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
@@ -184,7 +186,10 @@ static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
}
otg->gadget = gadget;
- otg->state = OTG_STATE_B_IDLE;
+ if (otg->state == OTG_STATE_B_PERIPHERAL)
+ usb_gadget_vbus_connect(gadget);
+ else
+ otg->state = OTG_STATE_B_IDLE;
return 0;
}
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 3af263cc0caa..8d111ec653e4 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -258,7 +258,7 @@ static void power_down(struct isp1301 *isp)
isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
}
-static void power_up(struct isp1301 *isp)
+static void __maybe_unused power_up(struct isp1301 *isp)
{
// isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 579587d97217..3d7af85aecb9 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -65,9 +65,7 @@ struct phy_8x16 {
void __iomem *regs;
struct clk *core_clk;
struct clk *iface_clk;
- struct regulator *v3p3;
- struct regulator *v1p8;
- struct regulator *vdd;
+ struct regulator_bulk_data regulator[3];
struct reset_control *phy_reset;
@@ -78,51 +76,6 @@ struct phy_8x16 {
struct notifier_block reboot_notify;
};
-static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
-{
- int ret;
-
- ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
- if (ret)
- return ret;
-
- ret = regulator_enable(qphy->vdd);
- if (ret)
- return ret;
-
- ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
- if (ret)
- goto off_vdd;
-
- ret = regulator_enable(qphy->v3p3);
- if (ret)
- goto off_vdd;
-
- ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
- if (ret)
- goto off_3p3;
-
- ret = regulator_enable(qphy->v1p8);
- if (ret)
- goto off_3p3;
-
- return 0;
-
-off_3p3:
- regulator_disable(qphy->v3p3);
-off_vdd:
- regulator_disable(qphy->vdd);
-
- return ret;
-}
-
-static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
-{
- regulator_disable(qphy->v1p8);
- regulator_disable(qphy->v3p3);
- regulator_disable(qphy->vdd);
-}
-
static int phy_8x16_notify_connect(struct usb_phy *phy,
enum usb_device_speed speed)
{
@@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy)
static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
{
- struct regulator_bulk_data regs[3];
struct device *dev = qphy->phy.dev;
int ret;
@@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
if (IS_ERR(qphy->iface_clk))
return PTR_ERR(qphy->iface_clk);
- regs[0].supply = "v3p3";
- regs[1].supply = "v1p8";
- regs[2].supply = "vddcx";
+ qphy->regulator[0].supply = "v3p3";
+ qphy->regulator[1].supply = "v1p8";
+ qphy->regulator[2].supply = "vddcx";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator),
+ qphy->regulator);
if (ret)
return ret;
- qphy->v3p3 = regs[0].consumer;
- qphy->v1p8 = regs[1].consumer;
- qphy->vdd = regs[2].consumer;
-
qphy->phy_reset = devm_reset_control_get(dev, "phy");
if (IS_ERR(qphy->phy_reset))
return PTR_ERR(qphy->phy_reset);
@@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev)
if (ret < 0)
goto off_core;
- ret = phy_8x16_regulators_enable(qphy);
- if (0 && ret)
+ ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator),
+ qphy->regulator);
+ if (WARN_ON(ret))
goto off_clks;
qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
@@ -387,7 +337,7 @@ off_extcon:
extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB,
&qphy->vbus_notify);
off_power:
- phy_8x16_regulators_disable(qphy);
+ regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
off_clks:
clk_disable_unprepare(qphy->iface_clk);
off_core:
@@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev)
clk_disable_unprepare(qphy->iface_clk);
clk_disable_unprepare(qphy->core_clk);
- phy_8x16_regulators_disable(qphy);
+ regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/Kconfig b/drivers/usb/renesas_usbhs/Kconfig
index ebc99ee076ce..b26d7c339c05 100644
--- a/drivers/usb/renesas_usbhs/Kconfig
+++ b/drivers/usb/renesas_usbhs/Kconfig
@@ -5,7 +5,7 @@
config USB_RENESAS_USBHS
tristate 'Renesas USBHS controller'
depends on USB_GADGET
- depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+ depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
depends on EXTCON || !EXTCON # if EXTCON=m, USBHS cannot be built-in
default n
help
diff --git a/drivers/usb/renesas_usbhs/Makefile b/drivers/usb/renesas_usbhs/Makefile
index 9e47f477b6d2..d787d05f6546 100644
--- a/drivers/usb/renesas_usbhs/Makefile
+++ b/drivers/usb/renesas_usbhs/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs.o
-renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o
+renesas_usbhs-y := common.o mod.o pipe.o fifo.o rcar2.o rcar3.o
ifneq ($(CONFIG_USB_RENESAS_USBHS_HCD),)
renesas_usbhs-y += mod_host.o
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 5af9ca5d54ab..baeb7d23bf24 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -25,6 +25,7 @@
#include <linux/sysfs.h>
#include "common.h"
#include "rcar2.h"
+#include "rcar3.h"
/*
* image of renesas_usbhs
@@ -477,18 +478,16 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
{
- /* Gen3 is compatible with Gen2 */
.compatible = "renesas,usbhs-r8a7795",
- .data = (void *)USBHS_TYPE_RCAR_GEN2,
+ .data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
.compatible = "renesas,rcar-gen2-usbhs",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
{
- /* Gen3 is compatible with Gen2 */
.compatible = "renesas,rcar-gen3-usbhs",
- .data = (void *)USBHS_TYPE_RCAR_GEN2,
+ .data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{ },
};
@@ -578,6 +577,13 @@ static int usbhs_probe(struct platform_device *pdev)
priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
}
break;
+ case USBHS_TYPE_RCAR_GEN3:
+ priv->pfunc = usbhs_rcar3_ops;
+ if (!priv->dparam.pipe_configs) {
+ priv->dparam.pipe_configs = usbhsc_new_pipe;
+ priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_new_pipe);
+ }
+ break;
default:
if (!info->platform_callback.get_id) {
dev_err(&pdev->dev, "no platform callbacks");
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index c0f5c652d272..000f9750149f 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -46,7 +46,7 @@ static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
return -EINVAL;
}
-static struct usbhs_pkt_handle usbhsf_null_handler = {
+static const struct usbhs_pkt_handle usbhsf_null_handler = {
.prepare = usbhsf_null_handle,
.try_run = usbhsf_null_handle,
};
@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
goto __usbhs_pkt_handler_end;
}
- ret = func(pkt, &is_done);
+ if (likely(func))
+ ret = func(pkt, &is_done);
if (is_done)
__usbhsf_pkt_del(pkt);
@@ -422,12 +423,12 @@ static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
return 0;
}
-struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
.prepare = usbhs_dcp_dir_switch_to_write,
.try_run = usbhs_dcp_dir_switch_done,
};
-struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
.prepare = usbhs_dcp_dir_switch_to_read,
.try_run = usbhs_dcp_dir_switch_done,
};
@@ -449,7 +450,7 @@ static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
return pkt->handler->prepare(pkt, is_done);
}
-struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
.prepare = usbhsf_dcp_data_stage_try_push,
};
@@ -488,7 +489,7 @@ static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
return pkt->handler->prepare(pkt, is_done);
}
-struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
.prepare = usbhsf_dcp_data_stage_prepare_pop,
};
@@ -600,7 +601,7 @@ static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
return usbhsf_pio_try_push(pkt, is_done);
}
-struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
.prepare = usbhsf_pio_prepare_push,
.try_run = usbhsf_pio_try_push,
};
@@ -730,7 +731,7 @@ usbhs_fifo_read_busy:
return ret;
}
-struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
.prepare = usbhsf_prepare_pop,
.try_run = usbhsf_pio_try_pop,
};
@@ -747,7 +748,7 @@ static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
return 0;
}
-struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
+const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
.prepare = usbhsf_ctrl_stage_end,
.try_run = usbhsf_ctrl_stage_end,
};
@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len;
+ usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
@@ -934,7 +936,7 @@ static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
return 0;
}
-struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
.prepare = usbhsf_dma_prepare_push,
.dma_done = usbhsf_dma_push_done,
};
@@ -1182,7 +1184,7 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
}
-struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
.prepare = usbhsf_dma_prepare_pop,
.try_run = usbhsf_dma_try_pop,
.dma_done = usbhsf_dma_pop_done
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index c7d9b86d51bf..8b98507d7abc 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -54,7 +54,7 @@ struct usbhs_pkt_handle;
struct usbhs_pkt {
struct list_head node;
struct usbhs_pipe *pipe;
- struct usbhs_pkt_handle *handler;
+ const struct usbhs_pkt_handle *handler;
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt);
struct work_struct work;
@@ -86,18 +86,18 @@ void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe);
/*
* packet info
*/
-extern struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
-extern struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
-extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
+extern const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
-extern struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
-extern struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler;
void usbhs_pkt_init(struct usbhs_pkt *pkt);
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 657f9672ceba..53d104b56ef1 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
+ unsigned long flags;
ureq->req.actual = pkt->actual;
- usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_lock(priv, flags);
+ if (uep)
+ __usbhsg_queue_pop(uep, ureq, 0);
+ usbhs_unlock(priv, flags);
}
static void usbhsg_queue_push(struct usbhsg_uep *uep,
@@ -561,7 +565,7 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
if (!pkt)
break;
- usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET);
+ usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ESHUTDOWN);
}
usbhs_pipe_disable(pipe);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 0e95d2925dc5..78e9dba701c4 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -241,7 +241,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
int timeout = 1024;
- u16 val;
+ u16 mask = usbhs_mod_is_host(priv) ? (CSSTS | PID_MASK) : PID_MASK;
/*
* make sure....
@@ -265,9 +265,7 @@ static int usbhsp_pipe_barrier(struct usbhs_pipe *pipe)
usbhs_pipe_disable(pipe);
do {
- val = usbhsp_pipectrl_get(pipe);
- val &= CSSTS | PID_MASK;
- if (!val)
+ if (!(usbhsp_pipectrl_get(pipe) & mask))
return 0;
udelay(10);
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 3212ab51e844..7835747f9803 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -38,7 +38,7 @@ struct usbhs_pipe {
#define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2)
#define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3)
- struct usbhs_pkt_handle *handler;
+ const struct usbhs_pkt_handle *handler;
void *mod_private;
};
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
new file mode 100644
index 000000000000..38b01f2aeeb0
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -0,0 +1,54 @@
+/*
+ * Renesas USB driver R-Car Gen. 3 initialization and power control
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include "common.h"
+#include "rcar3.h"
+
+#define LPSTS 0x102
+#define UGCTRL2 0x184 /* 32-bit register */
+
+/* Low Power Status register (LPSTS) */
+#define LPSTS_SUSPM 0x4000
+
+/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
+#define UGCTRL2_USB0SEL_OTG 0x00000030
+
+void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
+ void __iomem *base, int enable)
+{
+ struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+
+ usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+
+ if (enable)
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
+ else
+ usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+
+ return 0;
+}
+
+static int usbhs_rcar3_get_id(struct platform_device *pdev)
+{
+ return USBHS_GADGET;
+}
+
+const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
+ .power_ctrl = usbhs_rcar3_power_ctrl,
+ .get_id = usbhs_rcar3_get_id,
+};
diff --git a/drivers/usb/renesas_usbhs/rcar3.h b/drivers/usb/renesas_usbhs/rcar3.h
new file mode 100644
index 000000000000..5f850b23ff18
--- /dev/null
+++ b/drivers/usb/renesas_usbhs/rcar3.h
@@ -0,0 +1,3 @@
+#include "common.h"
+
+extern const struct renesas_usbhs_platform_callback usbhs_rcar3_ops;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index c73808f095bb..f139488d0816 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -370,7 +370,7 @@ static void ch341_set_termios(struct tty_struct *tty,
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
{
const uint16_t ch341_break_reg =
- CH341_REG_BREAK1 | ((uint16_t) CH341_REG_BREAK2 << 8);
+ ((uint16_t) CH341_REG_BREAK2 << 8) | CH341_REG_BREAK1;
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 3806e7014199..a66b01bb1fa1 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -147,10 +147,7 @@ static int usb_console_setup(struct console *co, char *options)
kref_get(&tty->driver->kref);
__module_get(tty->driver->owner);
tty->ops = &usb_console_fake_tty_ops;
- if (tty_init_termios(tty)) {
- retval = -ENOMEM;
- goto put_tty;
- }
+ tty_init_termios(tty);
tty_port_tty_set(&port->port, tty);
}
@@ -185,7 +182,6 @@ static int usb_console_setup(struct console *co, char *options)
fail:
tty_port_tty_set(&port->port, NULL);
- put_tty:
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 73a366de5102..dd47823bb014 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
@@ -327,113 +328,169 @@ struct cp210x_comm_status {
#define PURGE_ALL 0x000f
/*
- * cp210x_get_config
- * Reads from the CP210x configuration registers
- * 'size' is specified in bytes.
- * 'data' is a pointer to a pre-allocated array of integers large
- * enough to hold 'size' bytes (with 4 bytes to each integer)
+ * Reads a variable-sized block of CP210X_ registers, identified by req.
+ * Returns data into buf in native USB byte order.
*/
-static int cp210x_get_config(struct usb_serial_port *port, u8 request,
- unsigned int *data, int size)
+static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
+ void *buf, int bufsize)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- __le32 *buf;
- int result, i, length;
-
- /* Number of integers required to contain the array */
- length = (((size - 1) | 3) + 1) / 4;
+ void *dmabuf;
+ int result;
- buf = kcalloc(length, sizeof(__le32), GFP_KERNEL);
- if (!buf)
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf) {
+ /*
+ * FIXME Some callers don't bother to check for error,
+ * at least give them consistent junk until they are fixed
+ */
+ memset(buf, 0, bufsize);
return -ENOMEM;
+ }
- /* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
- port_priv->bInterfaceNumber, buf, size,
- USB_CTRL_GET_TIMEOUT);
+ req, REQTYPE_INTERFACE_TO_HOST, 0,
+ port_priv->bInterfaceNumber, dmabuf, bufsize,
+ USB_CTRL_SET_TIMEOUT);
+ if (result == bufsize) {
+ memcpy(buf, dmabuf, bufsize);
+ result = 0;
+ } else {
+ dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
+ req, bufsize, result);
+ if (result >= 0)
+ result = -EPROTO;
- /* Convert data into an array of integers */
- for (i = 0; i < length; i++)
- data[i] = le32_to_cpu(buf[i]);
+ /*
+ * FIXME Some callers don't bother to check for error,
+ * at least give them consistent junk until they are fixed
+ */
+ memset(buf, 0, bufsize);
+ }
- kfree(buf);
+ kfree(dmabuf);
- if (result != size) {
- dev_dbg(&port->dev, "%s - Unable to send config request, request=0x%x size=%d result=%d\n",
- __func__, request, size, result);
- if (result > 0)
- result = -EPROTO;
+ return result;
+}
- return result;
+/*
+ * Reads any 32-bit CP210X_ register identified by req.
+ */
+static int cp210x_read_u32_reg(struct usb_serial_port *port, u8 req, u32 *val)
+{
+ __le32 le32_val;
+ int err;
+
+ err = cp210x_read_reg_block(port, req, &le32_val, sizeof(le32_val));
+ if (err) {
+ /*
+ * FIXME Some callers don't bother to check for error,
+ * at least give them consistent junk until they are fixed
+ */
+ *val = 0;
+ return err;
}
+ *val = le32_to_cpu(le32_val);
+
+ return 0;
+}
+
+/*
+ * Reads any 16-bit CP210X_ register identified by req.
+ */
+static int cp210x_read_u16_reg(struct usb_serial_port *port, u8 req, u16 *val)
+{
+ __le16 le16_val;
+ int err;
+
+ err = cp210x_read_reg_block(port, req, &le16_val, sizeof(le16_val));
+ if (err)
+ return err;
+
+ *val = le16_to_cpu(le16_val);
+
return 0;
}
/*
- * cp210x_set_config
- * Writes to the CP210x configuration registers
- * Values less than 16 bits wide are sent directly
- * 'size' is specified in bytes.
+ * Reads any 8-bit CP210X_ register identified by req.
+ */
+static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
+{
+ return cp210x_read_reg_block(port, req, val, sizeof(*val));
+}
+
+/*
+ * Writes any 16-bit CP210X_ register (req) whose value is passed
+ * entirely in the wValue field of the USB request.
*/
-static int cp210x_set_config(struct usb_serial_port *port, u8 request,
- unsigned int *data, int size)
+static int cp210x_write_u16_reg(struct usb_serial_port *port, u8 req, u16 val)
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- __le32 *buf;
- int result, i, length;
+ int result;
+
+ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ req, REQTYPE_HOST_TO_INTERFACE, val,
+ port_priv->bInterfaceNumber, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(&port->dev, "failed set request 0x%x status: %d\n",
+ req, result);
+ }
+
+ return result;
+}
- /* Number of integers required to contain the array */
- length = (((size - 1) | 3) + 1) / 4;
+/*
+ * Writes a variable-sized block of CP210X_ registers, identified by req.
+ * Data in buf must be in native USB byte order.
+ */
+static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
+ void *buf, int bufsize)
+{
+ struct usb_serial *serial = port->serial;
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ void *dmabuf;
+ int result;
- buf = kmalloc(length * sizeof(__le32), GFP_KERNEL);
- if (!buf)
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf)
return -ENOMEM;
- /* Array of integers into bytes */
- for (i = 0; i < length; i++)
- buf[i] = cpu_to_le32(data[i]);
+ memcpy(dmabuf, buf, bufsize);
- if (size > 2) {
- result = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0),
- request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
- port_priv->bInterfaceNumber, buf, size,
- USB_CTRL_SET_TIMEOUT);
- } else {
- result = usb_control_msg(serial->dev,
- usb_sndctrlpipe(serial->dev, 0),
- request, REQTYPE_HOST_TO_INTERFACE, data[0],
- port_priv->bInterfaceNumber, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- }
+ result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ req, REQTYPE_HOST_TO_INTERFACE, 0,
+ port_priv->bInterfaceNumber, dmabuf, bufsize,
+ USB_CTRL_SET_TIMEOUT);
- kfree(buf);
+ kfree(dmabuf);
- if ((size > 2 && result != size) || result < 0) {
- dev_dbg(&port->dev, "%s - Unable to send request, request=0x%x size=%d result=%d\n",
- __func__, request, size, result);
- if (result > 0)
+ if (result == bufsize) {
+ result = 0;
+ } else {
+ dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
+ req, bufsize, result);
+ if (result >= 0)
result = -EPROTO;
-
- return result;
}
- return 0;
+ return result;
}
/*
- * cp210x_set_config_single
- * Convenience function for calling cp210x_set_config on single data values
- * without requiring an integer pointer
+ * Writes any 32-bit CP210X_ register identified by req.
*/
-static inline int cp210x_set_config_single(struct usb_serial_port *port,
- u8 request, unsigned int data)
+static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
{
- return cp210x_set_config(port, request, &data, 2);
+ __le32 le32_val;
+
+ le32_val = cpu_to_le32(val);
+
+ return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
}
/*
@@ -445,47 +502,46 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
static int cp210x_detect_swapped_line_ctl(struct usb_serial_port *port)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- unsigned int line_ctl_save;
- unsigned int line_ctl_test;
+ u16 line_ctl_save;
+ u16 line_ctl_test;
int err;
- err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_save, 2);
+ err = cp210x_read_u16_reg(port, CP210X_GET_LINE_CTL, &line_ctl_save);
if (err)
return err;
- line_ctl_test = 0x800;
- err = cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_test, 2);
+ err = cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, 0x800);
if (err)
return err;
- err = cp210x_get_config(port, CP210X_GET_LINE_CTL, &line_ctl_test, 2);
+ err = cp210x_read_u16_reg(port, CP210X_GET_LINE_CTL, &line_ctl_test);
if (err)
return err;
if (line_ctl_test == 8) {
port_priv->has_swapped_line_ctl = true;
- line_ctl_save = swab16((u16)line_ctl_save);
+ line_ctl_save = swab16(line_ctl_save);
}
- return cp210x_set_config(port, CP210X_SET_LINE_CTL, &line_ctl_save, 2);
+ return cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, line_ctl_save);
}
/*
- * Must always be called instead of cp210x_get_config(CP210X_GET_LINE_CTL)
+ * Must always be called instead of cp210x_read_u16_reg(CP210X_GET_LINE_CTL)
* to workaround cp2108 bug and get correct value.
*/
-static int cp210x_get_line_ctl(struct usb_serial_port *port, unsigned int *ctl)
+static int cp210x_get_line_ctl(struct usb_serial_port *port, u16 *ctl)
{
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int err;
- err = cp210x_get_config(port, CP210X_GET_LINE_CTL, ctl, 2);
+ err = cp210x_read_u16_reg(port, CP210X_GET_LINE_CTL, ctl);
if (err)
return err;
/* Workaround swapped bytes in 16-bit value from CP210X_GET_LINE_CTL */
if (port_priv->has_swapped_line_ctl)
- *ctl = swab16((u16)(*ctl));
+ *ctl = swab16(*ctl);
return 0;
}
@@ -536,8 +592,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int result;
- result = cp210x_set_config_single(port, CP210X_IFC_ENABLE,
- UART_ENABLE);
+ result = cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_ENABLE);
if (result) {
dev_err(&port->dev, "%s - Unable to enable UART\n", __func__);
return result;
@@ -555,15 +610,12 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
static void cp210x_close(struct usb_serial_port *port)
{
- unsigned int purge_ctl;
-
usb_serial_generic_close(port);
/* Clear both queues; cp2108 needs this to avoid an occasional hang */
- purge_ctl = PURGE_ALL;
- cp210x_set_config(port, CP210X_PURGE, &purge_ctl, 2);
+ cp210x_write_u16_reg(port, CP210X_PURGE, PURGE_ALL);
- cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE);
+ cp210x_write_u16_reg(port, CP210X_IFC_ENABLE, UART_DISABLE);
}
/*
@@ -641,11 +693,12 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp)
{
struct device *dev = &port->dev;
- unsigned int cflag, modem_ctl[4];
- unsigned int baud;
- unsigned int bits;
+ unsigned int cflag;
+ u8 modem_ctl[16];
+ u32 baud;
+ u16 bits;
- cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
+ cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
dev_dbg(dev, "%s - baud rate = %d\n", __func__, baud);
*baudp = baud;
@@ -676,14 +729,14 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
break;
default:
dev_dbg(dev, "%s - Unknown number of data bits, using 8\n", __func__);
cflag |= CS8;
bits &= ~BITS_DATA_MASK;
bits |= BITS_DATA_8;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
break;
}
@@ -714,7 +767,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
dev_dbg(dev, "%s - Unknown parity mode, disabling parity\n", __func__);
cflag &= ~PARENB;
bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
break;
}
@@ -726,7 +779,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
case BITS_STOP_1_5:
dev_dbg(dev, "%s - stop bits = 1.5 (not supported, using 1 stop bit)\n", __func__);
bits &= ~BITS_STOP_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
break;
case BITS_STOP_2:
dev_dbg(dev, "%s - stop bits = 2\n", __func__);
@@ -735,12 +788,13 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
default:
dev_dbg(dev, "%s - Unknown number of stop bits, using 1 stop bit\n", __func__);
bits &= ~BITS_STOP_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits);
break;
}
- cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
- if (modem_ctl[0] & 0x0008) {
+ cp210x_read_reg_block(port, CP210X_GET_FLOW, modem_ctl,
+ sizeof(modem_ctl));
+ if (modem_ctl[0] & 0x08) {
dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
cflag |= CRTSCTS;
} else {
@@ -792,8 +846,7 @@ static void cp210x_change_speed(struct tty_struct *tty,
baud = cp210x_quantise_baudrate(baud);
dev_dbg(&port->dev, "%s - setting baud rate to %u\n", __func__, baud);
- if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
- sizeof(baud))) {
+ if (cp210x_write_u32_reg(port, CP210X_SET_BAUDRATE, baud)) {
dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
if (old_termios)
baud = old_termios->c_ospeed;
@@ -809,8 +862,8 @@ static void cp210x_set_termios(struct tty_struct *tty,
{
struct device *dev = &port->dev;
unsigned int cflag, old_cflag;
- unsigned int bits;
- unsigned int modem_ctl[4];
+ u16 bits;
+ u8 modem_ctl[16];
cflag = tty->termios.c_cflag;
old_cflag = old_termios->c_cflag;
@@ -848,7 +901,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
bits |= BITS_DATA_8;
break;
}
- if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
+ if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
dev_dbg(dev, "Number of data bits requested not supported by device\n");
}
@@ -875,7 +928,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
}
}
}
- if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
+ if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
dev_dbg(dev, "Parity mode not supported by device\n");
}
@@ -889,32 +942,40 @@ static void cp210x_set_termios(struct tty_struct *tty,
bits |= BITS_STOP_1;
dev_dbg(dev, "%s - stop bits = 1\n", __func__);
}
- if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
+ if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
dev_dbg(dev, "Number of stop bits requested not supported by device\n");
}
if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
- cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16);
- dev_dbg(dev, "%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x\n",
- __func__, modem_ctl[0], modem_ctl[1],
- modem_ctl[2], modem_ctl[3]);
+
+ /* Only bytes 0, 4 and 7 out of first 8 have functional bits */
+
+ cp210x_read_reg_block(port, CP210X_GET_FLOW, modem_ctl,
+ sizeof(modem_ctl));
+ dev_dbg(dev, "%s - read modem controls = %02x .. .. .. %02x .. .. %02x\n",
+ __func__, modem_ctl[0], modem_ctl[4], modem_ctl[7]);
if (cflag & CRTSCTS) {
modem_ctl[0] &= ~0x7B;
modem_ctl[0] |= 0x09;
- modem_ctl[1] = 0x80;
+ modem_ctl[4] = 0x80;
+ /* FIXME - why clear reserved bits just read? */
+ modem_ctl[5] = 0;
+ modem_ctl[6] = 0;
+ modem_ctl[7] = 0;
dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
} else {
modem_ctl[0] &= ~0x7B;
modem_ctl[0] |= 0x01;
- modem_ctl[1] |= 0x40;
+ /* FIXME - OR here instead of assignment looks wrong */
+ modem_ctl[4] |= 0x40;
dev_dbg(dev, "%s - flow control = NONE\n", __func__);
}
- dev_dbg(dev, "%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x\n",
- __func__, modem_ctl[0], modem_ctl[1],
- modem_ctl[2], modem_ctl[3]);
- cp210x_set_config(port, CP210X_SET_FLOW, modem_ctl, 16);
+ dev_dbg(dev, "%s - write modem controls = %02x .. .. .. %02x .. .. %02x\n",
+ __func__, modem_ctl[0], modem_ctl[4], modem_ctl[7]);
+ cp210x_write_reg_block(port, CP210X_SET_FLOW, modem_ctl,
+ sizeof(modem_ctl));
}
}
@@ -929,7 +990,7 @@ static int cp210x_tiocmset(struct tty_struct *tty,
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int set, unsigned int clear)
{
- unsigned int control = 0;
+ u16 control = 0;
if (set & TIOCM_RTS) {
control |= CONTROL_RTS;
@@ -950,7 +1011,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port,
dev_dbg(&port->dev, "%s - control = 0x%.4x\n", __func__, control);
- return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
+ return cp210x_write_u16_reg(port, CP210X_SET_MHS, control);
}
static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
@@ -964,10 +1025,10 @@ static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
static int cp210x_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- unsigned int control;
+ u8 control;
int result;
- cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
+ cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
@@ -984,7 +1045,7 @@ static int cp210x_tiocmget(struct tty_struct *tty)
static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
- unsigned int state;
+ u16 state;
if (break_state == 0)
state = BREAK_OFF;
@@ -992,7 +1053,7 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
state = BREAK_ON;
dev_dbg(&port->dev, "%s - turning break %s\n", __func__,
state == BREAK_OFF ? "off" : "on");
- cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
+ cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
}
static int cp210x_port_probe(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index 2916dea3ede8..5f17a3b9916d 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -140,7 +140,6 @@ static int cyberjack_open(struct tty_struct *tty,
{
struct cyberjack_private *priv;
unsigned long flags;
- int result = 0;
dev_dbg(&port->dev, "%s - usb_clear_halt\n", __func__);
usb_clear_halt(port->serial->dev, port->write_urb->pipe);
@@ -152,7 +151,7 @@ static int cyberjack_open(struct tty_struct *tty,
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- return result;
+ return 0;
}
static void cyberjack_close(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 01bf53392819..bbeeb2bd55a8 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct cypress_private *priv;
+ if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
+ dev_err(&port->dev, "required endpoint is missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
cypress_set_termios(tty, port, &priv->tmp_termios);
/* setup the port and start reading from the device */
- if (!port->interrupt_in_urb) {
- dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
- __func__);
- return -1;
- }
-
usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
port->interrupt_in_urb->transfer_buffer,
@@ -1165,8 +1164,7 @@ static void cypress_read_int_callback(struct urb *urb)
/* hangup, as defined in acm.c... this might be a bad place for it
* though */
- if (tty && !(tty->termios.c_cflag & CLOCAL) &&
- !(priv->current_status & UART_CD)) {
+ if (tty && !C_CLOCAL(tty) && !(priv->current_status & UART_CD)) {
dev_dbg(dev, "%s - calling hangup\n", __func__);
tty_hangup(tty);
goto continue_read;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 12b0e67473ba..16e8e37b3b36 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -695,11 +695,11 @@ static void digi_set_termios(struct tty_struct *tty,
arg = -1;
/* reassert DTR and (maybe) RTS on transition from B0 */
- if ((old_cflag&CBAUD) == B0) {
+ if ((old_cflag & CBAUD) == B0) {
/* don't set RTS if using hardware flow control */
/* and throttling input */
modem_signals = TIOCM_DTR;
- if (!(tty->termios.c_cflag & CRTSCTS) ||
+ if (!C_CRTSCTS(tty) ||
!test_bit(TTY_THROTTLED, &tty->flags))
modem_signals |= TIOCM_RTS;
digi_set_modem_signals(port, modem_signals, 1);
@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
static int digi_startup(struct usb_serial *serial)
{
+ struct device *dev = &serial->interface->dev;
struct digi_serial *serial_priv;
int ret;
+ int i;
+
+ /* check whether the device has the expected number of endpoints */
+ if (serial->num_port_pointers < serial->type->num_ports + 1) {
+ dev_err(dev, "OOB endpoints missing\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < serial->type->num_ports + 1 ; i++) {
+ if (!serial->port[i]->read_urb) {
+ dev_err(dev, "bulk-in endpoint missing\n");
+ return -ENODEV;
+ }
+ if (!serial->port[i]->write_urb) {
+ dev_err(dev, "bulk-out endpoint missing\n");
+ return -ENODEV;
+ }
+ }
serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
if (!serial_priv)
@@ -1491,8 +1510,8 @@ static int digi_read_oob_callback(struct urb *urb)
rts = 0;
if (tty)
- rts = tty->termios.c_cflag & CRTSCTS;
-
+ rts = C_CRTSCTS(tty);
+
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
/* convert from digi flags to termiox flags */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c660ae401d8..3a814e802dee 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
+ /* ICP DAS I-756xU devices */
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ } /* Terminating entry */
};
@@ -1320,11 +1324,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
if (baud <= 3000000) {
__u16 product_id = le16_to_cpu(
port->serial->dev->descriptor.idProduct);
- if (((FTDI_NDI_HUC_PID == product_id) ||
- (FTDI_NDI_SPECTRA_SCU_PID == product_id) ||
- (FTDI_NDI_FUTURE_2_PID == product_id) ||
- (FTDI_NDI_FUTURE_3_PID == product_id) ||
- (FTDI_NDI_AURORA_SCU_PID == product_id)) &&
+ if (((product_id == FTDI_NDI_HUC_PID) ||
+ (product_id == FTDI_NDI_SPECTRA_SCU_PID) ||
+ (product_id == FTDI_NDI_FUTURE_2_PID) ||
+ (product_id == FTDI_NDI_FUTURE_3_PID) ||
+ (product_id == FTDI_NDI_AURORA_SCU_PID)) &&
(baud == 19200)) {
baud = 1200000;
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index ed58c6fa8dbe..bbcc13df11ac 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -239,11 +239,11 @@ enum ftdi_sio_baudrate {
*/
#define FTDI_SIO_SET_DTR_MASK 0x1
-#define FTDI_SIO_SET_DTR_HIGH (1 | (FTDI_SIO_SET_DTR_MASK << 8))
-#define FTDI_SIO_SET_DTR_LOW (0 | (FTDI_SIO_SET_DTR_MASK << 8))
+#define FTDI_SIO_SET_DTR_HIGH ((FTDI_SIO_SET_DTR_MASK << 8) | 1)
+#define FTDI_SIO_SET_DTR_LOW ((FTDI_SIO_SET_DTR_MASK << 8) | 0)
#define FTDI_SIO_SET_RTS_MASK 0x2
-#define FTDI_SIO_SET_RTS_HIGH (2 | (FTDI_SIO_SET_RTS_MASK << 8))
-#define FTDI_SIO_SET_RTS_LOW (0 | (FTDI_SIO_SET_RTS_MASK << 8))
+#define FTDI_SIO_SET_RTS_HIGH ((FTDI_SIO_SET_RTS_MASK << 8) | 2)
+#define FTDI_SIO_SET_RTS_LOW ((FTDI_SIO_SET_RTS_MASK << 8) | 0)
/*
* ControlValue
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a84df2513994..c5d6c1e73e8e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -872,6 +872,14 @@
#define NOVITUS_BONO_E_PID 0x6010
/*
+ * ICPDAS I-756*U devices
+ */
+#define ICPDAS_VID 0x1b5c
+#define ICPDAS_I7560U_PID 0x0103
+#define ICPDAS_I7561U_PID 0x0104
+#define ICPDAS_I7563U_PID 0x0105
+
+/*
* RT Systems programming cables for various ham radios
*/
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index db591d19d416..97cabf803c2f 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -237,10 +237,10 @@ static inline int getDataLength(const __u8 *usbPacket)
*/
static inline int isAbortTrfCmnd(const unsigned char *buf)
{
- if (0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ,
- sizeof(GARMIN_STOP_TRANSFER_REQ)) ||
- 0 == memcmp(buf, GARMIN_STOP_TRANSFER_REQ_V2,
- sizeof(GARMIN_STOP_TRANSFER_REQ_V2)))
+ if (memcmp(buf, GARMIN_STOP_TRANSFER_REQ,
+ sizeof(GARMIN_STOP_TRANSFER_REQ)) == 0 ||
+ memcmp(buf, GARMIN_STOP_TRANSFER_REQ_V2,
+ sizeof(GARMIN_STOP_TRANSFER_REQ_V2)) == 0)
return 1;
else
return 0;
@@ -350,7 +350,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
unsigned l = 0;
dev_dbg(&garmin_data_p->port->dev, "%s - pkt-id: 0x%X.\n", __func__,
- 0xFF & pkt_id);
+ pkt_id);
*ptr++ = DLE;
*ptr++ = ACK;
@@ -366,7 +366,7 @@ static int gsp_send_ack(struct garmin_data *garmin_data_p, __u8 pkt_id)
*ptr++ = DLE;
*ptr++ = 0;
- *ptr++ = 0xFF & (-cksum);
+ *ptr++ = (-cksum) & 0xFF;
*ptr++ = DLE;
*ptr++ = ETX;
@@ -423,9 +423,9 @@ static int gsp_rec_packet(struct garmin_data *garmin_data_p, int count)
n++;
}
- if ((0xff & (cksum + *recpkt)) != 0) {
+ if (((cksum + *recpkt) & 0xff) != 0) {
dev_dbg(dev, "%s - invalid checksum, expected %02x, got %02x\n",
- __func__, 0xff & -cksum, 0xff & *recpkt);
+ __func__, -cksum & 0xff, *recpkt);
return -EINVPKT;
}
@@ -528,7 +528,7 @@ static int gsp_receive(struct garmin_data *garmin_data_p,
dev_dbg(dev, "NAK packet complete.\n");
} else {
dev_dbg(dev, "packet complete - id=0x%X.\n",
- 0xFF & data);
+ data);
gsp_rec_packet(garmin_data_p, size);
}
@@ -636,7 +636,7 @@ static int gsp_send(struct garmin_data *garmin_data_p,
garmin_data_p->outsize = 0;
- if (GARMIN_LAYERID_APPL != getLayerId(garmin_data_p->outbuffer)) {
+ if (getLayerId(garmin_data_p->outbuffer) != GARMIN_LAYERID_APPL) {
dev_dbg(dev, "not an application packet (%d)\n",
getLayerId(garmin_data_p->outbuffer));
return -1;
@@ -688,7 +688,7 @@ static int gsp_send(struct garmin_data *garmin_data_p,
*dst++ = DLE;
}
- cksum = 0xFF & -cksum;
+ cksum = -cksum & 0xFF;
*dst++ = cksum;
if (cksum == DLE)
*dst++ = DLE;
@@ -860,7 +860,6 @@ static int process_resetdev_request(struct usb_serial_port *port)
static int garmin_clear(struct garmin_data *garmin_data_p)
{
unsigned long flags;
- int status = 0;
/* flush all queued data */
pkt_clear(garmin_data_p);
@@ -870,7 +869,7 @@ static int garmin_clear(struct garmin_data *garmin_data_p)
garmin_data_p->outsize = 0;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
- return status;
+ return 0;
}
@@ -970,7 +969,7 @@ static void garmin_write_bulk_callback(struct urb *urb)
struct garmin_data *garmin_data_p =
usb_get_serial_port_data(port);
- if (GARMIN_LAYERID_APPL == getLayerId(urb->transfer_buffer)) {
+ if (getLayerId(urb->transfer_buffer) == GARMIN_LAYERID_APPL) {
if (garmin_data_p->mode == MODE_GARMIN_SERIAL) {
gsp_send_ack(garmin_data_p,
@@ -1025,7 +1024,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
dismiss_ack ? NULL : port);
urb->transfer_flags |= URB_ZERO_PACKET;
- if (GARMIN_LAYERID_APPL == getLayerId(buffer)) {
+ if (getLayerId(buffer) == GARMIN_LAYERID_APPL) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= APP_REQ_SEEN;
@@ -1077,9 +1076,9 @@ static int garmin_write(struct tty_struct *tty, struct usb_serial_port *port,
pktsiz = getDataLength(garmin_data_p->privpkt);
pktid = getPacketId(garmin_data_p->privpkt);
- if (count == (GARMIN_PKTHDR_LENGTH+pktsiz)
- && GARMIN_LAYERID_PRIVATE ==
- getLayerId(garmin_data_p->privpkt)) {
+ if (count == (GARMIN_PKTHDR_LENGTH + pktsiz) &&
+ getLayerId(garmin_data_p->privpkt) ==
+ GARMIN_LAYERID_PRIVATE) {
dev_dbg(dev, "%s - processing private request %d\n",
__func__, pktid);
@@ -1192,7 +1191,7 @@ static void garmin_read_bulk_callback(struct urb *urb)
garmin_read_process(garmin_data_p, data, urb->actual_length, 1);
if (urb->actual_length == 0 &&
- 0 != (garmin_data_p->flags & FLAGS_BULK_IN_RESTART)) {
+ (garmin_data_p->flags & FLAGS_BULK_IN_RESTART) != 0) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags &= ~FLAGS_BULK_IN_RESTART;
spin_unlock_irqrestore(&garmin_data_p->lock, flags);
@@ -1203,7 +1202,7 @@ static void garmin_read_bulk_callback(struct urb *urb)
__func__, retval);
} else if (urb->actual_length > 0) {
/* Continue trying to read until nothing more is received */
- if (0 == (garmin_data_p->flags & FLAGS_THROTTLED)) {
+ if ((garmin_data_p->flags & FLAGS_THROTTLED) == 0) {
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dev_err(&port->dev,
@@ -1249,12 +1248,12 @@ static void garmin_read_int_callback(struct urb *urb)
urb->transfer_buffer);
if (urb->actual_length == sizeof(GARMIN_BULK_IN_AVAIL_REPLY) &&
- 0 == memcmp(data, GARMIN_BULK_IN_AVAIL_REPLY,
- sizeof(GARMIN_BULK_IN_AVAIL_REPLY))) {
+ memcmp(data, GARMIN_BULK_IN_AVAIL_REPLY,
+ sizeof(GARMIN_BULK_IN_AVAIL_REPLY)) == 0) {
dev_dbg(&port->dev, "%s - bulk data available.\n", __func__);
- if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
+ if ((garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE) == 0) {
/* bulk data available */
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
@@ -1276,8 +1275,8 @@ static void garmin_read_int_callback(struct urb *urb)
}
} else if (urb->actual_length == (4+sizeof(GARMIN_START_SESSION_REPLY))
- && 0 == memcmp(data, GARMIN_START_SESSION_REPLY,
- sizeof(GARMIN_START_SESSION_REPLY))) {
+ && memcmp(data, GARMIN_START_SESSION_REPLY,
+ sizeof(GARMIN_START_SESSION_REPLY)) == 0) {
spin_lock_irqsave(&garmin_data_p->lock, flags);
garmin_data_p->flags |= FLAGS_SESSION_REPLY1_SEEN;
@@ -1356,7 +1355,7 @@ static void garmin_unthrottle(struct tty_struct *tty)
if (garmin_data_p->mode == MODE_NATIVE)
garmin_flush_queue(garmin_data_p);
- if (0 != (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
+ if ((garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE) != 0) {
status = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (status)
dev_err(&port->dev,
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index f49327d20ee8..f3007ecdd1b4 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1398,7 +1398,7 @@ static void edge_throttle(struct tty_struct *tty)
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
edge_port->shadowMCR &= ~MCR_RTS;
status = send_cmd_write_uart_register(edge_port, MCR,
edge_port->shadowMCR);
@@ -1435,7 +1435,7 @@ static void edge_unthrottle(struct tty_struct *tty)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
edge_port->shadowMCR |= MCR_RTS;
send_cmd_write_uart_register(edge_port, MCR,
edge_port->shadowMCR);
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 5ad4a0fb4b26..344b4eea4bd5 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -360,7 +360,7 @@ static void iuu_led_activity_on(struct urb *urb)
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
*buf_ptr++ = IUU_SET_LED;
- if (xmas == 1) {
+ if (xmas) {
get_random_bytes(buf_ptr, 6);
*(buf_ptr+7) = 1;
} else {
@@ -380,7 +380,7 @@ static void iuu_led_activity_off(struct urb *urb)
struct usb_serial_port *port = urb->context;
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
- if (xmas == 1) {
+ if (xmas) {
iuu_rxcmd(urb);
return;
} else {
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e07b15ed5814..b6bd8e4a6486 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1963,7 +1963,7 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
if (d_details->product_id == keyspan_usa49wg_product_id) {
dr = (void *)(s_priv->ctrl_buf);
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_OUT;
- dr->bRequest = 0xB0; /* 49wg control message */;
+ dr->bRequest = 0xB0; /* 49wg control message */
dr->wValue = 0;
dr->wIndex = 0;
dr->wLength = cpu_to_le16(sizeof(msg));
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index e020ad28a00c..fc5d3a791e08 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -472,7 +472,6 @@ static void klsi_105_set_termios(struct tty_struct *tty,
/* maybe this should be simulated by sending read
* disable and read enable messages?
*/
- ;
#if 0
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(serial, priv->control_state);
@@ -527,7 +526,6 @@ static void klsi_105_set_termios(struct tty_struct *tty,
mct_u232_set_line_ctrl(serial, priv->last_lcr);
#endif
- ;
}
/*
* Set flow control: well, I do not really now how to handle DTR/RTS.
@@ -546,7 +544,6 @@ static void klsi_105_set_termios(struct tty_struct *tty,
priv->control_state &= ~(TIOCM_DTR | TIOCM_RTS);
mct_u232_set_modem_ctrl(serial, priv->control_state);
#endif
- ;
}
memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index fd707d6a10e2..885655315de1 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
static int mct_u232_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct mct_u232_private *priv;
+ /* check first to simplify error handling */
+ if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
+ dev_err(&port->dev, "expected endpoint missing\n");
+ return -ENODEV;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
/* Use second interrupt-in endpoint for reading. */
- priv->read_urb = port->serial->port[1]->interrupt_in_urb;
+ priv->read_urb = serial->port[1]->interrupt_in_urb;
priv->read_urb->context = port;
spin_lock_init(&priv->lock);
@@ -428,7 +435,7 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
* either.
*/
spin_lock_irqsave(&priv->lock, flags);
- if (tty && (tty->termios.c_cflag & CBAUD))
+ if (tty && C_BAUD(tty))
priv->control_state = TIOCM_DTR | TIOCM_RTS;
else
priv->control_state = 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 78b4f64c6b00..2eddbe538cda 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1308,7 +1308,7 @@ static void mos7720_throttle(struct tty_struct *tty)
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
mos7720_port->shadowMCR &= ~UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
@@ -1338,7 +1338,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
mos7720_port->shadowMCR |= UART_MCR_RTS;
write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
mos7720_port->shadowMCR);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2c69bfcdacc6..ed378fb232e7 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1425,7 +1425,7 @@ static void mos7840_throttle(struct tty_struct *tty)
return;
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
mos7840_port->shadowMCR &= ~MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
@@ -1466,7 +1466,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
}
/* if we are implementing RTS/CTS, toggle that line */
- if (tty->termios.c_cflag & CRTSCTS) {
+ if (C_CRTSCTS(tty)) {
mos7840_port->shadowMCR |= MCR_RTS;
status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
mos7840_port->shadowMCR);
@@ -1842,7 +1842,7 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
Data = 0x0c;
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
- if (mos7840_port->read_urb_busy == false) {
+ if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
@@ -1906,7 +1906,7 @@ static void mos7840_set_termios(struct tty_struct *tty,
return;
}
- if (mos7840_port->read_urb_busy == false) {
+ if (!mos7840_port->read_urb_busy) {
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_KERNEL);
if (status) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 348e19834b83..c6f497f16526 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 504f5bff79c0..2df8ad5ede89 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -973,7 +973,7 @@ static int qt2_write(struct tty_struct *tty,
data = write_urb->transfer_buffer;
spin_lock_irqsave(&port_priv->urb_lock, flags);
- if (port_priv->urb_in_use == true) {
+ if (port_priv->urb_in_use) {
dev_err(&port->dev, "qt2_write - urb is in use\n");
goto write_out;
}
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index b2dff0f14743..93c6c9b08daa 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -76,13 +76,8 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-
-#ifndef CONFIG_USB_SERIAL_SAFE_PADDED
-#define CONFIG_USB_SERIAL_SAFE_PADDED 0
-#endif
-
-static bool safe = 1;
-static bool padded = CONFIG_USB_SERIAL_SAFE_PADDED;
+static bool safe = true;
+static bool padded = IS_ENABLED(CONFIG_USB_SERIAL_SAFE_PADDED);
#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB Safe Encapsulated Serial"
@@ -278,7 +273,7 @@ static int safe_startup(struct usb_serial *serial)
case LINEO_SAFESERIAL_CRC:
break;
case LINEO_SAFESERIAL_CRC_PADDED:
- padded = 1;
+ padded = true;
break;
default:
return -EINVAL;
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 57bf3ad41fb6..5a12c03138f8 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -57,7 +57,6 @@
void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
{
char *what = NULL;
- int i;
switch (srb->cmnd[0]) {
case TEST_UNIT_READY: what = "TEST_UNIT_READY"; break;
@@ -153,10 +152,8 @@ void usb_stor_show_command(const struct us_data *us, struct scsi_cmnd *srb)
default: what = "(unknown command)"; break;
}
usb_stor_dbg(us, "Command %s (%d bytes)\n", what, srb->cmd_len);
- usb_stor_dbg(us, "bytes: ");
- for (i = 0; i < srb->cmd_len && i < 16; i++)
- US_DEBUGPX(" %02x", srb->cmnd[i]);
- US_DEBUGPX("\n");
+ usb_stor_dbg(us, "bytes: %*ph\n", min_t(int, srb->cmd_len, 16),
+ (const unsigned char *)srb->cmnd);
}
void usb_stor_show_sense(const struct us_data *us,
@@ -174,11 +171,10 @@ void usb_stor_show_sense(const struct us_data *us,
if (what == NULL)
what = "(unknown ASC/ASCQ)";
- usb_stor_dbg(us, "%s: ", keystr);
if (fmt)
- US_DEBUGPX("%s (%s%x)\n", what, fmt, ascq);
+ usb_stor_dbg(us, "%s: %s (%s%x)\n", keystr, what, fmt, ascq);
else
- US_DEBUGPX("%s\n", what);
+ usb_stor_dbg(us, "%s: %s\n", keystr, what);
}
void usb_stor_dbg(const struct us_data *us, const char *fmt, ...)
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index f52520306e1a..6b365ce4e610 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -53,7 +53,6 @@ void usb_stor_show_sense(const struct us_data *us, unsigned char key,
__printf(2, 3) void usb_stor_dbg(const struct us_data *us,
const char *fmt, ...);
-#define US_DEBUGPX(fmt, ...) printk(fmt, ##__VA_ARGS__)
#define US_DEBUG(x) x
#else
__printf(2, 3)
@@ -63,8 +62,6 @@ static inline void _usb_stor_dbg(const struct us_data *us,
}
#define usb_stor_dbg(us, fmt, ...) \
do { if (0) _usb_stor_dbg(us, fmt, ##__VA_ARGS__); } while (0)
-#define US_DEBUGPX(fmt, ...) \
- do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#define US_DEBUG(x)
#endif
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index f3cf4cecd2b7..d3a17c65a702 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1067,12 +1067,12 @@ static void ms_lib_free_writebuf(struct us_data *us)
ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */
if (info->MS_Lib.blkpag) {
- kfree((u8 *)(info->MS_Lib.blkpag)); /* Arnold test ... */
+ kfree(info->MS_Lib.blkpag); /* Arnold test ... */
info->MS_Lib.blkpag = NULL;
}
if (info->MS_Lib.blkext) {
- kfree((u8 *)(info->MS_Lib.blkext)); /* Arnold test ... */
+ kfree(info->MS_Lib.blkext); /* Arnold test ... */
info->MS_Lib.blkext = NULL;
}
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e2..90901861bfc0 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev)
unsigned int max_sectors = 64;
if (us->fflags & US_FL_MAX_SECTORS_MIN)
- max_sectors = PAGE_CACHE_SIZE >> 9;
+ max_sectors = PAGE_SIZE >> 9;
if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index b74603689b9e..79224fcf9b59 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1102,24 +1102,24 @@ static int
sddr09_get_wp(struct us_data *us, struct sddr09_card_info *info) {
int result;
unsigned char status;
+ const char *wp_fmt;
result = sddr09_read_status(us, &status);
if (result) {
usb_stor_dbg(us, "read_status fails\n");
return result;
}
- usb_stor_dbg(us, "status 0x%02X", status);
if ((status & 0x80) == 0) {
info->flags |= SDDR09_WP; /* write protected */
- US_DEBUGPX(" WP");
+ wp_fmt = " WP";
+ } else {
+ wp_fmt = "";
}
- if (status & 0x40)
- US_DEBUGPX(" Ready");
- if (status & LUNBITS)
- US_DEBUGPX(" Suspended");
- if (status & 0x1)
- US_DEBUGPX(" Error");
- US_DEBUGPX("\n");
+ usb_stor_dbg(us, "status 0x%02X%s%s%s%s\n", status, wp_fmt,
+ status & 0x40 ? " Ready" : "",
+ status & LUNBITS ? " Suspended" : "",
+ status & 0x01 ? " Error" : "");
+
return 0;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9ff9404f99d7..16bc679dc2fc 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -2,7 +2,7 @@
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
+ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
@@ -246,6 +246,29 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
}
}
+static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *cmnd)
+{
+ u8 response_code = riu->response_code;
+
+ switch (response_code) {
+ case RC_INCORRECT_LUN:
+ cmnd->result = DID_BAD_TARGET << 16;
+ break;
+ case RC_TMF_SUCCEEDED:
+ cmnd->result = DID_OK << 16;
+ break;
+ case RC_TMF_NOT_SUPPORTED:
+ cmnd->result = DID_TARGET_FAILURE << 16;
+ break;
+ default:
+ uas_log_cmd_state(cmnd, "response iu", response_code);
+ cmnd->result = DID_ERROR << 16;
+ break;
+ }
+
+ return response_code == RC_TMF_SUCCEEDED;
+}
+
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
@@ -258,6 +281,7 @@ static void uas_stat_cmplt(struct urb *urb)
unsigned long flags;
unsigned int idx;
int status = urb->status;
+ bool success;
spin_lock_irqsave(&devinfo->lock, flags);
@@ -313,13 +337,13 @@ static void uas_stat_cmplt(struct urb *urb)
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
case IU_ID_RESPONSE:
- uas_log_cmd_state(cmnd, "unexpected response iu",
- ((struct response_iu *)iu)->response_code);
- /* Error, cancel data transfers */
- data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
- data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
cmdinfo->state &= ~COMMAND_INFLIGHT;
- cmnd->result = DID_ERROR << 16;
+ success = uas_evaluate_response_iu((struct response_iu *)iu, cmnd);
+ if (!success) {
+ /* Error, cancel data transfers */
+ data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
+ data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
+ }
uas_try_complete(cmnd, __func__);
break;
default:
@@ -757,6 +781,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
return SUCCESS;
}
+static int uas_target_alloc(struct scsi_target *starget)
+{
+ struct uas_dev_info *devinfo = (struct uas_dev_info *)
+ dev_to_shost(starget->dev.parent)->hostdata;
+
+ if (devinfo->flags & US_FL_NO_REPORT_LUNS)
+ starget->no_report_luns = 1;
+
+ return 0;
+}
+
static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
@@ -800,7 +835,6 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_BROKEN_FUA)
sdev->broken_fua = 1;
- scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
@@ -808,11 +842,12 @@ static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
+ .target_alloc = uas_target_alloc,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = 65536, /* Is there a limit on the _host_ ? */
+ .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
@@ -932,6 +967,12 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto set_alt0;
+ /*
+ * 1 tag is reserved for untagged commands +
+ * 1 tag to avoid off by one errors in some bridge firmwares
+ */
+ shost->can_queue = devinfo->qdepth - 2;
+
usb_set_intfdata(intf, shost);
result = scsi_add_host(shost, &intf->dev);
if (result)
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index ccc113e83d88..53341a77d89f 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: David Webb <djw@noc.ac.uk> */
+UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
+ "Seagate",
+ "Expansion Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_LUNS),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 43576ed31ccd..9de988a0f856 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -482,7 +482,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
- US_FL_MAX_SECTORS_240);
+ US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
p = quirks;
while (*p) {
@@ -532,6 +532,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'i':
f |= US_FL_IGNORE_DEVICE;
break;
+ case 'j':
+ f |= US_FL_NO_REPORT_LUNS;
+ break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index facaaf003f19..e40da7759a0e 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
+ if (size > urb->transfer_buffer_length) {
+ /* should not happen, probably malicious packet */
+ if (ud->side == USBIP_STUB) {
+ usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
+ return 0;
+ } else {
+ usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ return -EPIPE;
+ }
+ }
+
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 64933b993d7a..2580a32bcdff 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
int usbip_event_happened(struct usbip_device *ud)
{
int happened = 0;
+ unsigned long flags;
- spin_lock(&ud->lock);
+ spin_lock_irqsave(&ud->lock, flags);
if (ud->event != 0)
happened = 1;
- spin_unlock(&ud->lock);
+ spin_unlock_irqrestore(&ud->lock, flags);
return happened;
}
diff --git a/drivers/usb/usbip/usbip_protocol.txt b/drivers/usb/usbip/usbip_protocol.txt
deleted file mode 100644
index 16b6fe27284c..000000000000
--- a/drivers/usb/usbip/usbip_protocol.txt
+++ /dev/null
@@ -1,358 +0,0 @@
-PRELIMINARY DRAFT, MAY CONTAIN MISTAKES!
-28 Jun 2011
-
-The USB/IP protocol follows a server/client architecture. The server exports the
-USB devices and the clients imports them. The device driver for the exported
-USB device runs on the client machine.
-
-The client may ask for the list of the exported USB devices. To get the list the
-client opens a TCP/IP connection towards the server, and sends an OP_REQ_DEVLIST
-packet on top of the TCP/IP connection (so the actual OP_REQ_DEVLIST may be sent
-in one or more pieces at the low level transport layer). The server sends back
-the OP_REP_DEVLIST packet which lists the exported USB devices. Finally the
-TCP/IP connection is closed.
-
- virtual host controller usb host
- "client" "server"
- (imports USB devices) (exports USB devices)
- | |
- | OP_REQ_DEVLIST |
- | ----------------------------------------------> |
- | |
- | OP_REP_DEVLIST |
- | <---------------------------------------------- |
- | |
-
-Once the client knows the list of exported USB devices it may decide to use one
-of them. First the client opens a TCP/IP connection towards the server and
-sends an OP_REQ_IMPORT packet. The server replies with OP_REP_IMPORT. If the
-import was successful the TCP/IP connection remains open and will be used
-to transfer the URB traffic between the client and the server. The client may
-send two types of packets: the USBIP_CMD_SUBMIT to submit an URB, and
-USBIP_CMD_UNLINK to unlink a previously submitted URB. The answers of the
-server may be USBIP_RET_SUBMIT and USBIP_RET_UNLINK respectively.
-
- virtual host controller usb host
- "client" "server"
- (imports USB devices) (exports USB devices)
- | |
- | OP_REQ_IMPORT |
- | ----------------------------------------------> |
- | |
- | OP_REP_IMPORT |
- | <---------------------------------------------- |
- | |
- | |
- | USBIP_CMD_SUBMIT(seqnum = n) |
- | ----------------------------------------------> |
- | |
- | USBIP_RET_SUBMIT(seqnum = n) |
- | <---------------------------------------------- |
- | . |
- | : |
- | |
- | USBIP_CMD_SUBMIT(seqnum = m) |
- | ----------------------------------------------> |
- | |
- | USBIP_CMD_SUBMIT(seqnum = m+1) |
- | ----------------------------------------------> |
- | |
- | USBIP_CMD_SUBMIT(seqnum = m+2) |
- | ----------------------------------------------> |
- | |
- | USBIP_RET_SUBMIT(seqnum = m) |
- | <---------------------------------------------- |
- | |
- | USBIP_CMD_SUBMIT(seqnum = m+3) |
- | ----------------------------------------------> |
- | |
- | USBIP_RET_SUBMIT(seqnum = m+1) |
- | <---------------------------------------------- |
- | |
- | USBIP_CMD_SUBMIT(seqnum = m+4) |
- | ----------------------------------------------> |
- | |
- | USBIP_RET_SUBMIT(seqnum = m+2) |
- | <---------------------------------------------- |
- | . |
- | : |
- | |
- | USBIP_CMD_UNLINK |
- | ----------------------------------------------> |
- | |
- | USBIP_RET_UNLINK |
- | <---------------------------------------------- |
- | |
-
-The fields are in network (big endian) byte order meaning that the most significant
-byte (MSB) is stored at the lowest address.
-
-
-OP_REQ_DEVLIST: Retrieve the list of exported USB devices.
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x8005 | Command code: Retrieve the list of exported USB
- | | | devices.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
-
-OP_REP_DEVLIST: Reply with the list of exported USB devices.
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0.
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x0005 | Reply code: The list of exported USB devices.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: 0 for OK
------------+--------+------------+---------------------------------------------------
- 8 | 4 | n | Number of exported devices: 0 means no exported
- | | | devices.
------------+--------+------------+---------------------------------------------------
- 0x0C | | | From now on the exported n devices are described,
- | | | if any. If no devices are exported the message
- | | | ends with the previous "number of exported
- | | | devices" field.
------------+--------+------------+---------------------------------------------------
- | 256 | | path: Path of the device on the host exporting the
- | | | USB device, string closed with zero byte, e.g.
- | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
- | | | The unused bytes shall be filled with zero
- | | | bytes.
------------+--------+------------+---------------------------------------------------
- 0x10C | 32 | | busid: Bus ID of the exported device, string
- | | | closed with zero byte, e.g. "3-2". The unused
- | | | bytes shall be filled with zero bytes.
------------+--------+------------+---------------------------------------------------
- 0x12C | 4 | | busnum
------------+--------+------------+---------------------------------------------------
- 0x130 | 4 | | devnum
------------+--------+------------+---------------------------------------------------
- 0x134 | 4 | | speed
------------+--------+------------+---------------------------------------------------
- 0x138 | 2 | | idVendor
------------+--------+------------+---------------------------------------------------
- 0x13A | 2 | | idProduct
------------+--------+------------+---------------------------------------------------
- 0x13C | 2 | | bcdDevice
------------+--------+------------+---------------------------------------------------
- 0x13E | 1 | | bDeviceClass
------------+--------+------------+---------------------------------------------------
- 0x13F | 1 | | bDeviceSubClass
------------+--------+------------+---------------------------------------------------
- 0x140 | 1 | | bDeviceProtocol
------------+--------+------------+---------------------------------------------------
- 0x141 | 1 | | bConfigurationValue
------------+--------+------------+---------------------------------------------------
- 0x142 | 1 | | bNumConfigurations
------------+--------+------------+---------------------------------------------------
- 0x143 | 1 | | bNumInterfaces
------------+--------+------------+---------------------------------------------------
- 0x144 | | m_0 | From now on each interface is described, all
- | | | together bNumInterfaces times, with the
- | | | the following 4 fields:
------------+--------+------------+---------------------------------------------------
- | 1 | | bInterfaceClass
------------+--------+------------+---------------------------------------------------
- 0x145 | 1 | | bInterfaceSubClass
------------+--------+------------+---------------------------------------------------
- 0x146 | 1 | | bInterfaceProtocol
------------+--------+------------+---------------------------------------------------
- 0x147 | 1 | | padding byte for alignment, shall be set to zero
------------+--------+------------+---------------------------------------------------
- 0xC + | | | The second exported USB device starts at i=1
- i*0x138 + | | | with the busid field.
- m_(i-1)*4 | | |
-
-OP_REQ_IMPORT: Request to import (attach) a remote USB device.
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x8003 | Command code: import a remote USB device.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
------------+--------+------------+---------------------------------------------------
- 8 | 32 | | busid: the busid of the exported device on the
- | | | remote host. The possible values are taken
- | | | from the message field OP_REP_DEVLIST.busid.
- | | | A string closed with zero, the unused bytes
- | | | shall be filled with zeros.
------------+--------+------------+---------------------------------------------------
-
-OP_REP_IMPORT: Reply to import (attach) a remote USB device.
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x0003 | Reply code: Reply to import.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: 0 for OK
- | | | 1 for error
------------+--------+------------+---------------------------------------------------
- 8 | | | From now on comes the details of the imported
- | | | device, if the previous status field was OK (0),
- | | | otherwise the reply ends with the status field.
------------+--------+------------+---------------------------------------------------
- | 256 | | path: Path of the device on the host exporting the
- | | | USB device, string closed with zero byte, e.g.
- | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
- | | | The unused bytes shall be filled with zero
- | | | bytes.
------------+--------+------------+---------------------------------------------------
- 0x108 | 32 | | busid: Bus ID of the exported device, string
- | | | closed with zero byte, e.g. "3-2". The unused
- | | | bytes shall be filled with zero bytes.
------------+--------+------------+---------------------------------------------------
- 0x128 | 4 | | busnum
------------+--------+------------+---------------------------------------------------
- 0x12C | 4 | | devnum
------------+--------+------------+---------------------------------------------------
- 0x130 | 4 | | speed
------------+--------+------------+---------------------------------------------------
- 0x134 | 2 | | idVendor
------------+--------+------------+---------------------------------------------------
- 0x136 | 2 | | idProduct
------------+--------+------------+---------------------------------------------------
- 0x138 | 2 | | bcdDevice
------------+--------+------------+---------------------------------------------------
- 0x139 | 1 | | bDeviceClass
------------+--------+------------+---------------------------------------------------
- 0x13A | 1 | | bDeviceSubClass
------------+--------+------------+---------------------------------------------------
- 0x13B | 1 | | bDeviceProtocol
------------+--------+------------+---------------------------------------------------
- 0x13C | 1 | | bConfigurationValue
------------+--------+------------+---------------------------------------------------
- 0x13D | 1 | | bNumConfigurations
------------+--------+------------+---------------------------------------------------
- 0x13E | 1 | | bNumInterfaces
-
-USBIP_CMD_SUBMIT: Submit an URB
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000001 | command: Submit an URB
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: the sequence number of the URB to submit
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number, possible values are: 0...15
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | transfer_flags: possible values depend on the
- | | | URB transfer type, see below
------------+--------+------------+---------------------------------------------------
- 0x18 | 4 | | transfer_buffer_length
------------+--------+------------+---------------------------------------------------
- 0x1C | 4 | | start_frame: specify the selected frame to
- | | | transmit an ISO frame, ignored if URB_ISO_ASAP
- | | | is specified at transfer_flags
------------+--------+------------+---------------------------------------------------
- 0x20 | 4 | | number_of_packets: number of ISO packets
------------+--------+------------+---------------------------------------------------
- 0x24 | 4 | | interval: maximum time for the request on the
- | | | server-side host controller
------------+--------+------------+---------------------------------------------------
- 0x28 | 8 | | setup: data bytes for USB setup, filled with
- | | | zeros if not used
------------+--------+------------+---------------------------------------------------
- 0x30 | | | URB data. For ISO transfers the padding between
- | | | each ISO packets is not transmitted.
-
-
- Allowed transfer_flags | value | control | interrupt | bulk | isochronous
- -------------------------+------------+---------+-----------+----------+-------------
- URB_SHORT_NOT_OK | 0x00000001 | only in | only in | only in | no
- URB_ISO_ASAP | 0x00000002 | no | no | no | yes
- URB_NO_TRANSFER_DMA_MAP | 0x00000004 | yes | yes | yes | yes
- URB_NO_FSBR | 0x00000020 | yes | no | no | no
- URB_ZERO_PACKET | 0x00000040 | no | no | only out | no
- URB_NO_INTERRUPT | 0x00000080 | yes | yes | yes | yes
- URB_FREE_BUFFER | 0x00000100 | yes | yes | yes | yes
- URB_DIR_MASK | 0x00000200 | yes | yes | yes | yes
-
-
-USBIP_RET_SUBMIT: Reply for submitting an URB
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000003 | command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: URB sequence number
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | status: zero for successful URB transaction,
- | | | otherwise some kind of error happened.
------------+--------+------------+---------------------------------------------------
- 0x18 | 4 | n | actual_length: number of URB data bytes
------------+--------+------------+---------------------------------------------------
- 0x1C | 4 | | start_frame: for an ISO frame the actually
- | | | selected frame for transmit.
------------+--------+------------+---------------------------------------------------
- 0x20 | 4 | | number_of_packets
------------+--------+------------+---------------------------------------------------
- 0x24 | 4 | | error_count
------------+--------+------------+---------------------------------------------------
- 0x28 | 8 | | setup: data bytes for USB setup, filled with
- | | | zeros if not used
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
-
-USBIP_CMD_UNLINK: Unlink an URB
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000002 | command: URB unlink command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: URB sequence number to unlink: FIXME: is this so?
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number: zero
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | seqnum: the URB sequence number given previously
- | | | at USBIP_CMD_SUBMIT.seqnum field
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
-
-USBIP_RET_UNLINK: Reply for URB unlink
-
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000004 | command: reply for the URB unlink command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: the unlinked URB sequence number
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | status: This is the value contained in the
- | | | urb->status in the URB completition handler.
- | | | FIXME: a better explanation needed.
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 7fbe19d5279e..fca51105974e 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
void rh_port_connect(int rhport, enum usb_device_speed speed)
{
+ unsigned long flags;
+
usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
| (1 << USB_PORT_FEAT_C_CONNECTION);
@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
break;
}
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
static void rh_port_disconnect(int rhport)
{
+ unsigned long flags;
+
usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
the_controller->port_status[rhport] |=
(1 << USB_PORT_FEAT_C_CONNECTION);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
}
@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
int retval;
int rhport;
int changed = 0;
+ unsigned long flags;
retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
memset(buf, 0, retval);
vhci = hcd_to_vhci(hcd);
- spin_lock(&vhci->lock);
+ spin_lock_irqsave(&vhci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd)) {
usbip_dbg_vhci_rh("hw accessible flag not on?\n");
goto done;
@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
usb_hcd_resume_root_hub(hcd);
done:
- spin_unlock(&vhci->lock);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return changed ? retval : 0;
}
@@ -231,6 +236,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
struct vhci_hcd *dum;
int retval = 0;
int rhport;
+ unsigned long flags;
u32 prev_port_status[VHCI_NPORTS];
@@ -249,7 +255,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
dum = hcd_to_vhci(hcd);
- spin_lock(&dum->lock);
+ spin_lock_irqsave(&dum->lock, flags);
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
@@ -403,7 +409,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
usbip_dbg_vhci_rh(" bye\n");
- spin_unlock(&dum->lock);
+ spin_unlock_irqrestore(&dum->lock, flags);
return retval;
}
@@ -426,6 +432,7 @@ static void vhci_tx_urb(struct urb *urb)
{
struct vhci_device *vdev = get_vdev(urb->dev);
struct vhci_priv *priv;
+ unsigned long flags;
if (!vdev) {
pr_err("could not get virtual device");
@@ -438,7 +445,7 @@ static void vhci_tx_urb(struct urb *urb)
return;
}
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
priv->seqnum = atomic_inc_return(&the_controller->seqnum);
if (priv->seqnum == 0xffff)
@@ -452,7 +459,7 @@ static void vhci_tx_urb(struct urb *urb)
list_add_tail(&priv->list, &vdev->priv_tx);
wake_up(&vdev->waitq_tx);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
}
static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
@@ -461,6 +468,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct device *dev = &urb->dev->dev;
int ret = 0;
struct vhci_device *vdev;
+ unsigned long flags;
usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
hcd, urb, mem_flags);
@@ -468,11 +476,11 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
/* patch to usb_sg_init() is in 2.5.60 */
BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
if (urb->status != -EINPROGRESS) {
dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return urb->status;
}
@@ -484,7 +492,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
vdev->ud.status == VDEV_ST_ERROR) {
dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
spin_unlock(&vdev->ud.lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return -ENODEV;
}
spin_unlock(&vdev->ud.lock);
@@ -557,14 +565,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
out:
vhci_tx_urb(urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return 0;
no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
if (!ret)
usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
urb, urb->status);
@@ -621,16 +629,17 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct vhci_priv *priv;
struct vhci_device *vdev;
+ unsigned long flags;
pr_info("dequeue a urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
priv = urb->hcpriv;
if (!priv) {
/* URB was never linked! or will be soon given back by
* vhci_rx. */
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return -EIDRM;
}
@@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
ret = usb_hcd_check_unlink_urb(hcd, urb, status);
if (ret) {
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return ret;
}
}
@@ -667,10 +676,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
} else {
/* tcp connection is alive */
@@ -682,7 +691,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
if (!unlink) {
spin_unlock(&vdev->priv_lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
return -ENOMEM;
}
@@ -703,7 +712,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
spin_unlock(&vdev->priv_lock);
}
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_dbg_vhci_hc("leave\n");
return 0;
@@ -712,8 +721,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
+ unsigned long flags;
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
spin_lock(&vdev->priv_lock);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
@@ -747,19 +757,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
list_del(&unlink->list);
spin_unlock(&vdev->priv_lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
spin_lock(&vdev->priv_lock);
kfree(unlink);
}
spin_unlock(&vdev->priv_lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
}
/*
@@ -826,8 +836,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
static void vhci_device_reset(struct usbip_device *ud)
{
struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
+ unsigned long flags;
- spin_lock(&ud->lock);
+ spin_lock_irqsave(&ud->lock, flags);
vdev->speed = 0;
vdev->devid = 0;
@@ -841,14 +852,16 @@ static void vhci_device_reset(struct usbip_device *ud)
}
ud->status = VDEV_ST_NULL;
- spin_unlock(&ud->lock);
+ spin_unlock_irqrestore(&ud->lock, flags);
}
static void vhci_device_unusable(struct usbip_device *ud)
{
- spin_lock(&ud->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ud->lock, flags);
ud->status = VDEV_ST_ERROR;
- spin_unlock(&ud->lock);
+ spin_unlock_irqrestore(&ud->lock, flags);
}
static void vhci_device_init(struct vhci_device *vdev)
@@ -938,12 +951,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
static int vhci_bus_suspend(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+ unsigned long flags;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock(&vhci->lock);
+ spin_lock_irqsave(&vhci->lock, flags);
hcd->state = HC_STATE_SUSPENDED;
- spin_unlock(&vhci->lock);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return 0;
}
@@ -952,15 +966,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
{
struct vhci_hcd *vhci = hcd_to_vhci(hcd);
int rc = 0;
+ unsigned long flags;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
- spin_lock(&vhci->lock);
+ spin_lock_irqsave(&vhci->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
rc = -ESHUTDOWN;
else
hcd->state = HC_STATE_RUNNING;
- spin_unlock(&vhci->lock);
+ spin_unlock_irqrestore(&vhci->lock, flags);
return rc;
}
@@ -1058,17 +1073,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
int rhport = 0;
int connected = 0;
int ret = 0;
+ unsigned long flags;
hcd = platform_get_drvdata(pdev);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
if (the_controller->port_status[rhport] &
USB_PORT_STAT_CONNECTION)
connected += 1;
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
if (connected > 0) {
dev_info(&pdev->dev,
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 00e4a54308e4..d656e0edc3d5 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -72,10 +72,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
@@ -104,9 +105,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -117,8 +118,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
struct usbip_header *pdu)
{
struct vhci_unlink *unlink, *tmp;
+ unsigned long flags;
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
pr_info("unlink->seqnum %lu\n", unlink->seqnum);
@@ -127,12 +129,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
unlink->seqnum);
list_del(&unlink->list);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return unlink;
}
}
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
@@ -142,6 +144,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -152,9 +155,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
return;
}
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
if (!urb) {
/*
@@ -171,9 +174,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
@@ -185,10 +188,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
static int vhci_priv_tx_empty(struct vhci_device *vdev)
{
int empty = 0;
+ unsigned long flags;
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
empty = list_empty(&vdev->priv_rx);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return empty;
}
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index 211f43f67ea2..5b5462eb1665 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -32,10 +32,11 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
{
char *s = out;
int i = 0;
+ unsigned long flags;
BUG_ON(!the_controller || !out);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
/*
* output example:
@@ -70,7 +71,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
spin_unlock(&vdev->ud.lock);
}
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return out - s;
}
@@ -80,11 +81,12 @@ static DEVICE_ATTR_RO(status);
static int vhci_port_disconnect(__u32 rhport)
{
struct vhci_device *vdev;
+ unsigned long flags;
usbip_dbg_vhci_sysfs("enter\n");
/* lock */
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
vdev = port_to_vdev(rhport);
@@ -94,14 +96,14 @@ static int vhci_port_disconnect(__u32 rhport)
/* unlock */
spin_unlock(&vdev->ud.lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
return -EINVAL;
}
/* unlock */
spin_unlock(&vdev->ud.lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
@@ -177,6 +179,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
int sockfd = 0;
__u32 rhport = 0, devid = 0, speed = 0;
int err;
+ unsigned long flags;
/*
* @rhport: port number of vhci_hcd
@@ -202,14 +205,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
/* now need lock until setting vdev status as used */
/* begin a lock */
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
vdev = port_to_vdev(rhport);
spin_lock(&vdev->ud.lock);
if (vdev->ud.status != VDEV_ST_NULL) {
/* end of the lock */
spin_unlock(&vdev->ud.lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
sockfd_put(socket);
@@ -227,7 +230,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->ud.status = VDEV_ST_NOTASSIGNED;
spin_unlock(&vdev->ud.lock);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
/* end the lock */
vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index 409fd99f3257..3e7878fe2fd4 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
{
struct vhci_priv *priv, *tmp;
+ unsigned long flags;
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
list_move_tail(&priv->list, &vdev->priv_rx);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return priv;
}
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
@@ -136,16 +137,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
{
struct vhci_unlink *unlink, *tmp;
+ unsigned long flags;
- spin_lock(&vdev->priv_lock);
+ spin_lock_irqsave(&vdev->priv_lock, flags);
list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
list_move_tail(&unlink->list, &vdev->unlink_rx);
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return unlink;
}
- spin_unlock(&vdev->priv_lock);
+ spin_unlock_irqrestore(&vdev->priv_lock, flags);
return NULL;
}
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 50ce80d604f3..8ed8e34c3492 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -45,6 +45,7 @@
* funneled through AES are...16 bytes in size!
*/
+#include <crypto/skcipher.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -195,21 +196,22 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
-static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
struct crypto_cipher *tfm_aes, void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
int result = 0;
- struct blkcipher_desc desc;
+ SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
struct aes_ccm_b0 b0;
struct aes_ccm_b1 b1;
struct aes_ccm_a ax;
struct scatterlist sg[4], sg_dst;
- void *iv, *dst_buf;
- size_t ivsize, dst_size;
+ void *dst_buf;
+ size_t dst_size;
const u8 bzero[16] = { 0 };
+ u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
size_t zero_padding;
/*
@@ -232,9 +234,7 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
goto error_dst_buf;
}
- iv = crypto_blkcipher_crt(tfm_cbc)->iv;
- ivsize = crypto_blkcipher_ivsize(tfm_cbc);
- memset(iv, 0, ivsize);
+ memset(iv, 0, sizeof(iv));
/* Setup B0 */
b0.flags = 0x59; /* Format B0 */
@@ -259,9 +259,11 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
sg_set_buf(&sg[3], bzero, zero_padding);
sg_init_one(&sg_dst, dst_buf, dst_size);
- desc.tfm = tfm_cbc;
- desc.flags = 0;
- result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
+ skcipher_request_set_tfm(req, tfm_cbc);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
+ result = crypto_skcipher_encrypt(req);
+ skcipher_request_zero(req);
if (result < 0) {
printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
result);
@@ -301,18 +303,18 @@ ssize_t wusb_prf(void *out, size_t out_size,
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
- struct crypto_blkcipher *tfm_cbc;
+ struct crypto_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
u64 sfn = 0;
__le64 sfn_le;
- tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_cbc)) {
result = PTR_ERR(tfm_cbc);
printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
goto error_alloc_cbc;
}
- result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
+ result = crypto_skcipher_setkey(tfm_cbc, key, 16);
if (result < 0) {
printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
goto error_setkey_cbc;
@@ -345,7 +347,7 @@ error_setkey_aes:
crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
- crypto_free_blkcipher(tfm_cbc);
+ crypto_free_skcipher(tfm_cbc);
error_alloc_cbc:
return result;
}
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 41838db7f85c..8c5bd000739b 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -336,7 +336,7 @@ static inline
struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
{
struct usb_hcd *usb_hcd;
- usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
+ usb_hcd = bus_to_hcd(usb_dev->bus);
return usb_get_hcd(usb_hcd);
}
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 02912f180c6d..24ee2605b9f0 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -26,3 +26,7 @@ config VFIO_PCI_MMAP
config VFIO_PCI_INTX
depends on VFIO_PCI
def_bool y if !S390
+
+config VFIO_PCI_IGD
+ depends on VFIO_PCI
+ def_bool y if X86
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 131079255fd9..76d8ec058edd 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,4 +1,5 @@
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
+vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 8c80a48e3233..712a84978e97 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -111,6 +111,7 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
}
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
+static void vfio_pci_disable(struct vfio_pci_device *vdev);
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
@@ -169,13 +170,26 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
vdev->has_vga = true;
+
+ if (vfio_pci_is_vga(pdev) &&
+ pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
+ ret = vfio_pci_igd_init(vdev);
+ if (ret) {
+ dev_warn(&vdev->pdev->dev,
+ "Failed to setup Intel IGD regions\n");
+ vfio_pci_disable(vdev);
+ return ret;
+ }
+ }
+
return 0;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
- int bar;
+ int i, bar;
/* Stop the device from further DMA */
pci_clear_master(pdev);
@@ -186,6 +200,13 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
vdev->virq_disabled = false;
+ for (i = 0; i < vdev->num_regions; i++)
+ vdev->region[i].ops->release(vdev, &vdev->region[i]);
+
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL; /* don't krealloc a freed pointer */
+
vfio_config_free(vdev);
for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
@@ -421,6 +442,93 @@ static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
return walk.ret;
}
+static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_info_cap_header *header;
+ struct vfio_region_info_cap_sparse_mmap *sparse;
+ size_t end, size;
+ int nr_areas = 2, i = 0;
+
+ end = pci_resource_len(vdev->pdev, vdev->msix_bar);
+
+ /* If MSI-X table is aligned to the start or end, only one area */
+ if (((vdev->msix_offset & PAGE_MASK) == 0) ||
+ (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
+ nr_areas = 1;
+
+ size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
+
+ header = vfio_info_cap_add(caps, size,
+ VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ sparse = container_of(header,
+ struct vfio_region_info_cap_sparse_mmap, header);
+ sparse->nr_areas = nr_areas;
+
+ if (vdev->msix_offset & PAGE_MASK) {
+ sparse->areas[i].offset = 0;
+ sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
+ i++;
+ }
+
+ if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
+ sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
+ vdev->msix_size);
+ sparse->areas[i].size = end - sparse->areas[i].offset;
+ i++;
+ }
+
+ return 0;
+}
+
+static int region_type_cap(struct vfio_pci_device *vdev,
+ struct vfio_info_cap *caps,
+ unsigned int type, unsigned int subtype)
+{
+ struct vfio_info_cap_header *header;
+ struct vfio_region_info_cap_type *cap;
+
+ header = vfio_info_cap_add(caps, sizeof(*cap),
+ VFIO_REGION_INFO_CAP_TYPE, 1);
+ if (IS_ERR(header))
+ return PTR_ERR(header);
+
+ cap = container_of(header, struct vfio_region_info_cap_type, header);
+ cap->type = type;
+ cap->subtype = subtype;
+
+ return 0;
+}
+
+int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_pci_region *region;
+
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+
+ vdev->num_regions++;
+
+ return 0;
+}
+
static long vfio_pci_ioctl(void *device_data,
unsigned int cmd, unsigned long arg)
{
@@ -443,7 +551,7 @@ static long vfio_pci_ioctl(void *device_data,
if (vdev->reset_works)
info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -452,6 +560,8 @@ static long vfio_pci_ioctl(void *device_data,
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct pci_dev *pdev = vdev->pdev;
struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+ int i, ret;
minsz = offsetofend(struct vfio_region_info, offset);
@@ -480,8 +590,15 @@ static long vfio_pci_ioctl(void *device_data,
VFIO_REGION_INFO_FLAG_WRITE;
if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
pci_resource_flags(pdev, info.index) &
- IORESOURCE_MEM && info.size >= PAGE_SIZE)
+ IORESOURCE_MEM && info.size >= PAGE_SIZE) {
info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ if (info.index == vdev->msix_bar) {
+ ret = msix_sparse_mmap_cap(vdev, &caps);
+ if (ret)
+ return ret;
+ }
+ }
+
break;
case VFIO_PCI_ROM_REGION_INDEX:
{
@@ -493,8 +610,14 @@ static long vfio_pci_ioctl(void *device_data,
/* Report the BAR size, not the ROM size */
info.size = pci_resource_len(pdev, info.index);
- if (!info.size)
- break;
+ if (!info.size) {
+ /* Shadow ROMs appear as PCI option ROMs */
+ if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW)
+ info.size = 0x20000;
+ else
+ break;
+ }
/* Is it really there? */
io = pci_map_rom(pdev, &size);
@@ -518,7 +641,40 @@ static long vfio_pci_ioctl(void *device_data,
break;
default:
- return -EINVAL;
+ if (info.index >=
+ VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ return -EINVAL;
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
+
+ ret = region_type_cap(vdev, &caps,
+ vdev->region[i].type,
+ vdev->region[i].subtype);
+ if (ret)
+ return ret;
+ }
+
+ if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
+ if (info.argsz < sizeof(info) + caps.size) {
+ info.argsz = sizeof(info) + caps.size;
+ info.cap_offset = 0;
+ } else {
+ vfio_info_cap_shift(&caps, sizeof(info));
+ if (copy_to_user((void __user *)arg +
+ sizeof(info), caps.buf,
+ caps.size)) {
+ kfree(caps.buf);
+ return -EFAULT;
+ }
+ info.cap_offset = sizeof(info);
+ }
+
+ kfree(caps.buf);
}
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -798,7 +954,7 @@ static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct vfio_pci_device *vdev = device_data;
- if (index >= VFIO_PCI_NUM_REGIONS)
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
switch (index) {
@@ -815,6 +971,10 @@ static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
case VFIO_PCI_VGA_REGION_INDEX:
return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
+ default:
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vdev->region[index].ops->rw(vdev, buf,
+ count, ppos, iswrite);
}
return -EINVAL;
@@ -997,6 +1157,7 @@ static void vfio_pci_remove(struct pci_dev *pdev)
return;
vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
+ kfree(vdev->region);
kfree(vdev);
if (vfio_pci_is_vga(pdev)) {
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index fe2b470d7ec6..142c533efec7 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -33,9 +33,8 @@
#define PCI_CFG_SPACE_SIZE 256
-/* Useful "pseudo" capabilities */
+/* Fake capability ID for standard config space */
#define PCI_CAP_ID_BASIC 0
-#define PCI_CAP_ID_INVALID 0xFF
#define is_bar(offset) \
((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
@@ -301,6 +300,23 @@ static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
return count;
}
+/* Virt access uses only virtualization */
+static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ memcpy(vdev->vconfig + pos, &val, count);
+ return count;
+}
+
+static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ memcpy(val, vdev->vconfig + pos, count);
+ return count;
+}
+
/* Default capability regions to read-only, no-virtualization */
static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
[0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
@@ -319,6 +335,11 @@ static struct perm_bits unassigned_perms = {
.writefn = vfio_raw_config_write
};
+static struct perm_bits virt_perms = {
+ .readfn = vfio_virt_config_read,
+ .writefn = vfio_virt_config_write
+};
+
static void free_perm_bits(struct perm_bits *perm)
{
kfree(perm->virt);
@@ -454,14 +475,19 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
/*
- * NB. we expose the actual BAR size here, regardless of whether
- * we can read it. When we report the REGION_INFO for the ROM
- * we report what PCI tells us is the actual ROM size.
+ * NB. REGION_INFO will have reported zero size if we weren't able
+ * to read the ROM, but we still return the actual BAR size here if
+ * it exists (or the shadow ROM space).
*/
if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
mask |= PCI_ROM_ADDRESS_ENABLE;
*bar &= cpu_to_le32((u32)mask);
+ } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW) {
+ mask = ~(0x20000 - 1);
+ mask |= PCI_ROM_ADDRESS_ENABLE;
+ *bar &= cpu_to_le32((u32)mask);
} else
*bar = 0;
@@ -1332,6 +1358,8 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
pos + i, map[pos + i], cap);
}
+ BUILD_BUG_ON(PCI_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
+
memset(map + pos, cap, len);
ret = vfio_fill_vconfig_bytes(vdev, pos, len);
if (ret)
@@ -1419,9 +1447,9 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
/*
* Even though ecap is 2 bytes, we're currently a long way
* from exceeding 1 byte capabilities. If we ever make it
- * up to 0xFF we'll need to up this to a two-byte, byte map.
+ * up to 0xFE we'll need to up this to a two-byte, byte map.
*/
- BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID);
+ BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID_VIRT);
memset(map + epos, ecap, len);
ret = vfio_fill_vconfig_bytes(vdev, epos, len);
@@ -1597,6 +1625,9 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
if (cap_id == PCI_CAP_ID_INVALID) {
perm = &unassigned_perms;
cap_start = *ppos;
+ } else if (cap_id == PCI_CAP_ID_INVALID_VIRT) {
+ perm = &virt_perms;
+ cap_start = *ppos;
} else {
if (*ppos >= PCI_CFG_SPACE_SIZE) {
WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
new file mode 100644
index 000000000000..6394b168ef29
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -0,0 +1,280 @@
+/*
+ * VFIO PCI Intel Graphics support
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Register a device specific region through which to provide read-only
+ * access to the Intel IGD opregion. The register defining the opregion
+ * address is also virtualized to prevent user modification.
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+#include "vfio_pci_private.h"
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define OPREGION_SIZE (8 * 1024)
+#define OPREGION_PCI_ADDR 0xfc
+
+static size_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ void *base = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vdev->region[i].size || iswrite)
+ return -EINVAL;
+
+ count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ if (copy_to_user(buf, base + pos, count))
+ return -EFAULT;
+
+ *ppos += count;
+
+ return count;
+}
+
+static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ memunmap(region->data);
+}
+
+static const struct vfio_pci_regops vfio_pci_igd_regops = {
+ .rw = vfio_pci_igd_rw,
+ .release = vfio_pci_igd_release,
+};
+
+static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
+{
+ __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
+ u32 addr, size;
+ void *base;
+ int ret;
+
+ ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
+ if (ret)
+ return ret;
+
+ if (!addr || !(~addr))
+ return -ENODEV;
+
+ base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ size = le32_to_cpu(*(__le32 *)(base + 16));
+ if (!size) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ size *= 1024; /* In KB */
+
+ if (size != OPREGION_SIZE) {
+ memunmap(base);
+ base = memremap(addr, size, MEMREMAP_WB);
+ if (!base)
+ return -ENOMEM;
+ }
+
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+ &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
+ if (ret) {
+ memunmap(base);
+ return ret;
+ }
+
+ /* Fill vconfig with the hw value and virtualize register */
+ *dwordp = cpu_to_le32(addr);
+ memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
+ PCI_CAP_ID_INVALID_VIRT, 4);
+
+ return ret;
+}
+
+static size_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos,
+ bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct pci_dev *pdev = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ size_t size;
+ int ret;
+
+ if (pos >= vdev->region[i].size || iswrite)
+ return -EINVAL;
+
+ size = count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ if ((pos & 1) && size) {
+ u8 val;
+
+ ret = pci_user_read_config_byte(pdev, pos, &val);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (copy_to_user(buf + count - size, &val, 1))
+ return -EFAULT;
+
+ pos++;
+ size--;
+ }
+
+ if ((pos & 3) && size > 2) {
+ u16 val;
+
+ ret = pci_user_read_config_word(pdev, pos, &val);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ val = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &val, 2))
+ return -EFAULT;
+
+ pos += 2;
+ size -= 2;
+ }
+
+ while (size > 3) {
+ u32 val;
+
+ ret = pci_user_read_config_dword(pdev, pos, &val);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ val = cpu_to_le32(val);
+ if (copy_to_user(buf + count - size, &val, 4))
+ return -EFAULT;
+
+ pos += 4;
+ size -= 4;
+ }
+
+ while (size >= 2) {
+ u16 val;
+
+ ret = pci_user_read_config_word(pdev, pos, &val);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ val = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &val, 2))
+ return -EFAULT;
+
+ pos += 2;
+ size -= 2;
+ }
+
+ while (size) {
+ u8 val;
+
+ ret = pci_user_read_config_byte(pdev, pos, &val);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (copy_to_user(buf + count - size, &val, 1))
+ return -EFAULT;
+
+ pos++;
+ size--;
+ }
+
+ *ppos += count;
+
+ return count;
+}
+
+static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct pci_dev *pdev = region->data;
+
+ pci_dev_put(pdev);
+}
+
+static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
+ .rw = vfio_pci_igd_cfg_rw,
+ .release = vfio_pci_igd_cfg_release,
+};
+
+static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *host_bridge, *lpc_bridge;
+ int ret;
+
+ host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!host_bridge)
+ return -ENODEV;
+
+ if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
+ host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
+ pci_dev_put(host_bridge);
+ return -EINVAL;
+ }
+
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
+ &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
+ VFIO_REGION_INFO_FLAG_READ, host_bridge);
+ if (ret) {
+ pci_dev_put(host_bridge);
+ return ret;
+ }
+
+ lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
+ if (!lpc_bridge)
+ return -ENODEV;
+
+ if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
+ lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
+ pci_dev_put(lpc_bridge);
+ return -EINVAL;
+ }
+
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
+ &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
+ VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
+ if (ret) {
+ pci_dev_put(lpc_bridge);
+ return ret;
+ }
+
+ return 0;
+}
+
+int vfio_pci_igd_init(struct vfio_pci_device *vdev)
+{
+ int ret;
+
+ ret = vfio_pci_igd_opregion_init(vdev);
+ if (ret)
+ return ret;
+
+ ret = vfio_pci_igd_cfg_init(vdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 3b3ba15558b7..e9ea3fef144a 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -309,14 +309,14 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
int vector, int fd, bool msix)
{
struct pci_dev *pdev = vdev->pdev;
- int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
- char *name = msix ? "vfio-msix" : "vfio-msi";
struct eventfd_ctx *trigger;
- int ret;
+ int irq, ret;
- if (vector >= vdev->num_ctx)
+ if (vector < 0 || vector >= vdev->num_ctx)
return -EINVAL;
+ irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
+
if (vdev->ctx[vector].trigger) {
free_irq(irq, vdev->ctx[vector].trigger);
irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
@@ -328,8 +328,9 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
if (fd < 0)
return 0;
- vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
- name, vector, pci_name(pdev));
+ vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
+ msix ? "x" : "", vector,
+ pci_name(pdev));
if (!vdev->ctx[vector].name)
return -ENOMEM;
@@ -379,7 +380,7 @@ static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
{
int i, j, ret = 0;
- if (start + count > vdev->num_ctx)
+ if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
return -EINVAL;
for (i = 0, j = start; i < count && !ret; i++, j++) {
@@ -388,7 +389,7 @@ static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
}
if (ret) {
- for (--j; j >= start; j--)
+ for (--j; j >= (int)start; j--)
vfio_msi_set_vector_signal(vdev, j, -1, msix);
}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 0e7394f8f69b..8a7d546d18a0 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/irqbypass.h>
+#include <linux/types.h>
#ifndef VFIO_PCI_PRIVATE_H
#define VFIO_PCI_PRIVATE_H
@@ -24,6 +25,10 @@
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+/* Special capability IDs predefined access */
+#define PCI_CAP_ID_INVALID 0xFF /* default raw access */
+#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */
+
struct vfio_pci_irq_ctx {
struct eventfd_ctx *trigger;
struct virqfd *unmask;
@@ -33,6 +38,25 @@ struct vfio_pci_irq_ctx {
struct irq_bypass_producer producer;
};
+struct vfio_pci_device;
+struct vfio_pci_region;
+
+struct vfio_pci_regops {
+ size_t (*rw)(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region);
+};
+
+struct vfio_pci_region {
+ u32 type;
+ u32 subtype;
+ const struct vfio_pci_regops *ops;
+ void *data;
+ size_t size;
+ u32 flags;
+};
+
struct vfio_pci_device {
struct pci_dev *pdev;
void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
@@ -45,6 +69,8 @@ struct vfio_pci_device {
struct vfio_pci_irq_ctx *ctx;
int num_ctx;
int irq_type;
+ int num_regions;
+ struct vfio_pci_region *region;
u8 msi_qmax;
u8 msix_bar;
u16 msix_size;
@@ -91,4 +117,17 @@ extern void vfio_pci_uninit_perm_bits(void);
extern int vfio_config_init(struct vfio_pci_device *vdev);
extern void vfio_config_free(struct vfio_pci_device *vdev);
+
+extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
+ unsigned int type, unsigned int subtype,
+ const struct vfio_pci_regops *ops,
+ size_t size, u32 flags, void *data);
+#ifdef CONFIG_VFIO_PCI_IGD
+extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
+#else
+static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 210db24d2204..5ffd1d9ad4bd 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -124,11 +124,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
void __iomem *io;
ssize_t done;
- if (!pci_resource_start(pdev, bar))
+ if (pci_resource_start(pdev, bar))
+ end = pci_resource_len(pdev, bar);
+ else if (bar == PCI_ROM_RESOURCE &&
+ pdev->resource[bar].flags & IORESOURCE_ROM_SHADOW)
+ end = 0x20000;
+ else
return -EINVAL;
- end = pci_resource_len(pdev, bar);
-
if (pos >= end)
return -EINVAL;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index ecca316386f5..6fd6fa5469de 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1080,30 +1080,26 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
continue;
}
- /* module reference holds the driver we're working on */
- mutex_unlock(&vfio.iommu_drivers_lock);
-
data = driver->ops->open(arg);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
module_put(driver->ops->owner);
- goto skip_drivers_unlock;
+ continue;
}
ret = __vfio_container_attach_groups(container, driver, data);
- if (!ret) {
- container->iommu_driver = driver;
- container->iommu_data = data;
- } else {
+ if (ret) {
driver->ops->release(data);
module_put(driver->ops->owner);
+ continue;
}
- goto skip_drivers_unlock;
+ container->iommu_driver = driver;
+ container->iommu_data = data;
+ break;
}
mutex_unlock(&vfio.iommu_drivers_lock);
-skip_drivers_unlock:
up_write(&container->group_lock);
return ret;
@@ -1733,6 +1729,60 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
EXPORT_SYMBOL_GPL(vfio_external_check_extension);
/**
+ * Sub-module support
+ */
+/*
+ * Helper for managing a buffer of info chain capabilities, allocate or
+ * reallocate a buffer with additional @size, filling in @id and @version
+ * of the capability. A pointer to the new capability is returned.
+ *
+ * NB. The chain is based at the head of the buffer, so new entries are
+ * added to the tail, vfio_info_cap_shift() should be called to fixup the
+ * next offsets prior to copying to the user buffer.
+ */
+struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
+ size_t size, u16 id, u16 version)
+{
+ void *buf;
+ struct vfio_info_cap_header *header, *tmp;
+
+ buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
+ if (!buf) {
+ kfree(caps->buf);
+ caps->size = 0;
+ return ERR_PTR(-ENOMEM);
+ }
+
+ caps->buf = buf;
+ header = buf + caps->size;
+
+ /* Eventually copied to user buffer, zero */
+ memset(header, 0, size);
+
+ header->id = id;
+ header->version = version;
+
+ /* Add to the end of the capability chain */
+ for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next)
+ ; /* nothing */
+
+ tmp->next = caps->size;
+ caps->size += size;
+
+ return header;
+}
+EXPORT_SYMBOL_GPL(vfio_info_cap_add);
+
+void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
+{
+ struct vfio_info_cap_header *tmp;
+
+ for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next - offset)
+ tmp->next += offset;
+}
+EXPORT_SYMBOL_GPL(vfio_info_cap_shift);
+
+/**
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9eda69e40678..f744eeb3e2b4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -287,6 +287,43 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
rcu_read_unlock_bh();
}
+static inline unsigned long busy_clock(void)
+{
+ return local_clock() >> 10;
+}
+
+static bool vhost_can_busy_poll(struct vhost_dev *dev,
+ unsigned long endtime)
+{
+ return likely(!need_resched()) &&
+ likely(!time_after(busy_clock(), endtime)) &&
+ likely(!signal_pending(current)) &&
+ !vhost_has_work(dev);
+}
+
+static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
+ struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num)
+{
+ unsigned long uninitialized_var(endtime);
+ int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ out_num, in_num, NULL, NULL);
+
+ if (r == vq->num && vq->busyloop_timeout) {
+ preempt_disable();
+ endtime = busy_clock() + vq->busyloop_timeout;
+ while (vhost_can_busy_poll(vq->dev, endtime) &&
+ vhost_vq_avail_empty(vq->dev, vq))
+ cpu_relax_lowlatency();
+ preempt_enable();
+ r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
+ out_num, in_num, NULL, NULL);
+ }
+
+ return r;
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -331,10 +368,9 @@ static void handle_tx(struct vhost_net *net)
% UIO_MAXIOV == nvq->done_idx))
break;
- head = vhost_get_vq_desc(vq, vq->iov,
- ARRAY_SIZE(vq->iov),
- &out, &in,
- NULL, NULL);
+ head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in);
/* On error, stop handling until the next kick. */
if (unlikely(head < 0))
break;
@@ -435,6 +471,38 @@ static int peek_head_len(struct sock *sk)
return len;
}
+static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+ unsigned long uninitialized_var(endtime);
+ int len = peek_head_len(sk);
+
+ if (!len && vq->busyloop_timeout) {
+ /* Both tx vq and rx socket were polled here */
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(&net->dev, vq);
+
+ preempt_disable();
+ endtime = busy_clock() + vq->busyloop_timeout;
+
+ while (vhost_can_busy_poll(&net->dev, endtime) &&
+ skb_queue_empty(&sk->sk_receive_queue) &&
+ vhost_vq_avail_empty(&net->dev, vq))
+ cpu_relax_lowlatency();
+
+ preempt_enable();
+
+ if (vhost_enable_notify(&net->dev, vq))
+ vhost_poll_queue(&vq->poll);
+ mutex_unlock(&vq->mutex);
+
+ len = peek_head_len(sk);
+ }
+
+ return len;
+}
+
/* This is a multi-buffer version of vhost_get_desc, that works if
* vq has read descriptors only.
* @vq - the relevant virtqueue
@@ -553,7 +621,7 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = peek_head_len(sock->sk))) {
+ while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads, vhost_len,
@@ -917,7 +985,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
vhost_net_disable_vq(n, vq);
vq->private_data = sock;
- r = vhost_init_used(vq);
+ r = vhost_vq_init_access(vq);
if (r)
goto err_used;
r = vhost_net_enable_vq(n, vq);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 29cfc57d496e..0e6fd556c982 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1274,7 +1274,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
vq = &vs->vqs[i].vq;
mutex_lock(&vq->mutex);
vq->private_data = vs_tpg;
- vhost_init_used(vq);
+ vhost_vq_init_access(vq);
mutex_unlock(&vq->mutex);
}
ret = 0;
@@ -1664,8 +1664,7 @@ static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
mutex_unlock(&vhost_scsi_mutex);
}
-static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
- struct se_session *se_sess)
+static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
{
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
@@ -1721,98 +1720,82 @@ static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
NULL,
};
-static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
- const char *name)
+static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
{
- struct se_portal_group *se_tpg;
- struct se_session *se_sess;
- struct vhost_scsi_nexus *tv_nexus;
struct vhost_scsi_cmd *tv_cmd;
unsigned int i;
- mutex_lock(&tpg->tv_tpg_mutex);
- if (tpg->tpg_nexus) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- pr_debug("tpg->tpg_nexus already exists\n");
- return -EEXIST;
- }
- se_tpg = &tpg->se_tpg;
-
- tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
- if (!tv_nexus) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- pr_err("Unable to allocate struct vhost_scsi_nexus\n");
- return -ENOMEM;
- }
- /*
- * Initialize the struct se_session pointer and setup tagpool
- * for struct vhost_scsi_cmd descriptors
- */
- tv_nexus->tvn_se_sess = transport_init_session_tags(
- VHOST_SCSI_DEFAULT_TAGS,
- sizeof(struct vhost_scsi_cmd),
- TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
- if (IS_ERR(tv_nexus->tvn_se_sess)) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- kfree(tv_nexus);
- return -ENOMEM;
- }
- se_sess = tv_nexus->tvn_se_sess;
for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_sgl) {
- mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
goto out;
}
tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
- VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
+ VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
if (!tv_cmd->tvc_upages) {
- mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_upages\n");
goto out;
}
tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
- VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
+ VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
if (!tv_cmd->tvc_prot_sgl) {
- mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
goto out;
}
}
+ return 0;
+out:
+ vhost_scsi_free_cmd_map_res(se_sess);
+ return -ENOMEM;
+}
+
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+ const char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct vhost_scsi_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ if (tpg->tpg_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_debug("tpg->tpg_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate struct vhost_scsi_nexus\n");
+ return -ENOMEM;
+ }
/*
* Since we are running in 'demo mode' this call with generate a
* struct se_node_acl for the vhost_scsi struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group 'name'.
*/
- tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, (unsigned char *)name);
- if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ VHOST_SCSI_DEFAULT_TAGS,
+ sizeof(struct vhost_scsi_cmd),
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+ (unsigned char *)name, tv_nexus,
+ vhost_scsi_nexus_cb);
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
mutex_unlock(&tpg->tv_tpg_mutex);
- pr_debug("core_tpg_check_initiator_node_acl() failed"
- " for %s\n", name);
- goto out;
+ kfree(tv_nexus);
+ return -ENOMEM;
}
- /*
- * Now register the TCM vhost virtual I_T Nexus as active.
- */
- transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
- tv_nexus->tvn_se_sess, tv_nexus);
tpg->tpg_nexus = tv_nexus;
mutex_unlock(&tpg->tv_tpg_mutex);
return 0;
-
-out:
- vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
- transport_free_session(se_sess);
- kfree(tv_nexus);
- return -ENOMEM;
}
static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
@@ -1853,7 +1836,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
" %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
- vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
+ vhost_scsi_free_cmd_map_res(se_sess);
/*
* Release the SCSI I_T Nexus to the emulated vhost Target Port
*/
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index f2882ac98726..388eec4e1a90 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -196,7 +196,7 @@ static long vhost_test_run(struct vhost_test *n, int test)
oldpriv = vq->private_data;
vq->private_data = priv;
- r = vhost_init_used(&n->vqs[index]);
+ r = vhost_vq_init_access(&n->vqs[index]);
mutex_unlock(&vq->mutex);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 236553e81027..669fef1e2bb6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -43,11 +43,21 @@ enum {
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
-static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
vq->user_be = !virtio_legacy_is_little_endian();
}
+static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
+{
+ vq->user_be = true;
+}
+
+static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
+{
+ vq->user_be = false;
+}
+
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
struct vhost_vring_state s;
@@ -62,7 +72,10 @@ static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
s.num != VHOST_VRING_BIG_ENDIAN)
return -EINVAL;
- vq->user_be = s.num;
+ if (s.num == VHOST_VRING_BIG_ENDIAN)
+ vhost_enable_cross_endian_big(vq);
+ else
+ vhost_enable_cross_endian_little(vq);
return 0;
}
@@ -91,7 +104,7 @@ static void vhost_init_is_le(struct vhost_virtqueue *vq)
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
-static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
+static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
}
@@ -113,6 +126,11 @@ static void vhost_init_is_le(struct vhost_virtqueue *vq)
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+{
+ vq->is_le = virtio_legacy_is_little_endian();
+}
+
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
@@ -245,6 +263,13 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
+/* A lockless hint for busy polling code to exit the loop */
+bool vhost_has_work(struct vhost_dev *dev)
+{
+ return !list_empty(&dev->work_list);
+}
+EXPORT_SYMBOL_GPL(vhost_has_work);
+
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
@@ -276,8 +301,9 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->call = NULL;
vq->log_ctx = NULL;
vq->memory = NULL;
- vq->is_le = virtio_legacy_is_little_endian();
- vhost_vq_reset_user_be(vq);
+ vhost_reset_is_le(vq);
+ vhost_disable_cross_endian(vq);
+ vq->busyloop_timeout = 0;
}
static int vhost_worker(void *data)
@@ -912,6 +938,19 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
case VHOST_GET_VRING_ENDIAN:
r = vhost_get_vring_endian(vq, idx, argp);
break;
+ case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
+ if (copy_from_user(&s, argp, sizeof(s))) {
+ r = -EFAULT;
+ break;
+ }
+ vq->busyloop_timeout = s.num;
+ break;
+ case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
+ s.index = idx;
+ s.num = vq->busyloop_timeout;
+ if (copy_to_user(argp, &s, sizeof(s)))
+ r = -EFAULT;
+ break;
default:
r = -ENOIOCTLCMD;
}
@@ -1152,14 +1191,14 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
return 0;
}
-int vhost_init_used(struct vhost_virtqueue *vq)
+int vhost_vq_init_access(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
bool is_le = vq->is_le;
if (!vq->private_data) {
- vq->is_le = virtio_legacy_is_little_endian();
+ vhost_reset_is_le(vq);
return 0;
}
@@ -1182,7 +1221,7 @@ err:
vq->is_le = is_le;
return r;
}
-EXPORT_SYMBOL_GPL(vhost_init_used);
+EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
@@ -1633,6 +1672,20 @@ void vhost_add_used_and_signal_n(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
+/* return true if we're sure that avaiable ring is empty */
+bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __virtio16 avail_idx;
+ int r;
+
+ r = __get_user(avail_idx, &vq->avail->idx);
+ if (r)
+ return false;
+
+ return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
+}
+EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
+
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d3f767448a72..d36d8beb3351 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -37,6 +37,7 @@ struct vhost_poll {
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
@@ -114,6 +115,7 @@ struct vhost_virtqueue {
/* Ring endianness requested by userspace for cross-endian support. */
bool user_be;
#endif
+ u32 busyloop_timeout;
};
struct vhost_dev {
@@ -148,7 +150,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *,
struct vhost_log *log, unsigned int *log_num);
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
-int vhost_init_used(struct vhost_virtqueue *);
+int vhost_vq_init_access(struct vhost_virtqueue *);
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
unsigned count);
@@ -158,6 +160,7 @@ void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
struct vring_used_elem *heads, unsigned count);
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
+bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 8ea45a5cd806..983280e8d93f 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1808,8 +1808,8 @@ config FB_HIT
frame buffer card.
config FB_PMAG_AA
- bool "PMAG-AA TURBOchannel framebuffer support"
- depends on (FB = y) && TC
+ tristate "PMAG-AA TURBOchannel framebuffer support"
+ depends on FB && TC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1985,7 +1985,7 @@ config FB_W100
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
- depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
+ depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
- select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
select XEN_XENBUS_FRONTEND
default y
help
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index a305caea58ee..fb75b7e5a19a 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -1040,8 +1040,8 @@ static int acornfb_probe(struct platform_device *dev)
* for the framebuffer if we are not using
* VRAM.
*/
- base = dma_alloc_writecombine(current_par.dev, size, &handle,
- GFP_KERNEL);
+ base = dma_alloc_wc(current_par.dev, size, &handle,
+ GFP_KERNEL);
if (base == NULL) {
printk(KERN_ERR "acornfb: unable to allocate screen "
"memory\n");
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index 7a8afcd4573e..a8a22daa3f9d 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -154,8 +154,8 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
{
dma_addr_t dma;
- fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
- &dma, GFP_KERNEL);
+ fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, framesize, &dma,
+ GFP_KERNEL);
if (!fb->fb.screen_base) {
pr_err("CLCD: unable to map framebuffer\n");
return -ENOMEM;
@@ -169,14 +169,12 @@ int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma,
- fb->fb.screen_base,
- fb->fb.fix.smem_start,
- fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
void versatile_clcd_remove_dma(struct clcd_fb *fb)
{
- dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
- fb->fb.screen_base, fb->fb.fix.smem_start);
+ dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
+ fb->fb.fix.smem_start);
}
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 9362424c2340..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
fb->off_ienb = CLCD_PL111_IENB;
fb->off_cntl = CLCD_PL111_CNTL;
} else {
-#ifdef CONFIG_ARCH_VERSATILE
- fb->off_ienb = CLCD_PL111_IENB;
- fb->off_cntl = CLCD_PL111_CNTL;
-#else
- fb->off_ienb = CLCD_PL110_IENB;
- fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+ if (of_machine_is_compatible("arm,versatile-ab") ||
+ of_machine_is_compatible("arm,versatile-pb")) {
+ fb->off_ienb = CLCD_PL111_IENB;
+ fb->off_cntl = CLCD_PL111_CNTL;
+ } else {
+ fb->off_ienb = CLCD_PL110_IENB;
+ fb->off_cntl = CLCD_PL110_CNTL;
+ }
}
fb->clk = clk_get(&fb->dev->dev, NULL);
@@ -774,8 +775,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
{
- return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
+ return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
}
static void clcdfb_of_dma_remove(struct clcd_fb *fb)
diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c
index d6ce613e12ad..fcd2dd670a65 100644
--- a/drivers/video/fbdev/atafb.c
+++ b/drivers/video/fbdev/atafb.c
@@ -313,9 +313,6 @@ extern unsigned char fontdata_8x16[];
* * Draws cursor *
* int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
*
- * * Rotates the display *
- * void (*fb_rotate)(struct fb_info *info, int angle);
- *
* * wait for blit idle, optional *
* int (*fb_sync)(struct fb_info *info);
*
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 19eb42b57d87..669ecc755fa9 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -26,8 +26,6 @@
#include <linux/regulator/consumer.h>
#include <video/videomode.h>
-#include <asm/gpio.h>
-
#include <video/atmel_lcdc.h>
struct atmel_lcdfb_config {
@@ -414,8 +412,8 @@ static inline void atmel_lcdfb_free_video_memory(struct atmel_lcdfb_info *sinfo)
{
struct fb_info *info = sinfo->info;
- dma_free_writecombine(info->device, info->fix.smem_len,
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(info->device, info->fix.smem_len, info->screen_base,
+ info->fix.smem_start);
}
/**
@@ -435,8 +433,9 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
* ((var->bits_per_pixel + 7) / 8));
info->fix.smem_len = max(smem_len, sinfo->smem_len);
- info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
- (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(info->device, info->fix.smem_len,
+ (dma_addr_t *)&info->fix.smem_start,
+ GFP_KERNEL);
if (!info->screen_base) {
return -ENOMEM;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index c42ce2fdfd44..0a4626886b00 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -68,7 +68,6 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index ce0b1d05a388..218339a4edaa 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -76,7 +76,6 @@
#ifdef CONFIG_PPC
-#include <asm/pci-bridge.h>
#include "../macmodes.h"
#ifdef CONFIG_BOOTX_TEXT
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 59560189b24a..35df2c1a8a63 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -334,27 +334,6 @@ int au1100fb_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi)
return 0;
}
-/* fb_rotate
- * Rotate the display of this angle. This doesn't seems to be used by the core,
- * but as our hardware supports it, so why not implementing it...
- */
-void au1100fb_fb_rotate(struct fb_info *fbi, int angle)
-{
- struct au1100fb_device *fbdev = to_au1100fb_device(fbi);
-
- print_dbg("fb_rotate %p %d", fbi, angle);
-
- if (fbdev && (angle > 0) && !(angle % 90)) {
-
- fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
-
- fbdev->regs->lcd_control &= ~(LCD_CONTROL_SM_MASK);
- fbdev->regs->lcd_control |= ((angle/90) << LCD_CONTROL_SM_BIT);
-
- fbdev->regs->lcd_control |= LCD_CONTROL_GO;
- }
-}
-
/* fb_mmap
* Map video memory in user space. We don't use the generic fb_mmap method mainly
* to allow the use of the TLB streaming flag (CCA=6)
@@ -380,7 +359,6 @@ static struct fb_ops au1100fb_ops =
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
- .fb_rotate = au1100fb_fb_rotate,
.fb_mmap = au1100fb_fb_mmap,
};
diff --git a/drivers/video/fbdev/bf537-lq035.c b/drivers/video/fbdev/bf537-lq035.c
index 7db3052b471d..ef29fb425122 100644
--- a/drivers/video/fbdev/bf537-lq035.c
+++ b/drivers/video/fbdev/bf537-lq035.c
@@ -554,28 +554,6 @@ static int bfin_lq035_fb_check_var(struct fb_var_screeninfo *var,
return 0;
}
-/* fb_rotate
- * Rotate the display of this angle. This doesn't seems to be used by the core,
- * but as our hardware supports it, so why not implementing it...
- */
-static void bfin_lq035_fb_rotate(struct fb_info *fbi, int angle)
-{
- pr_debug("%s: %p %d", __func__, fbi, angle);
-#if (defined(UD) && defined(LBR))
- switch (angle) {
-
- case 180:
- gpio_set_value(LBR, 0);
- gpio_set_value(UD, 1);
- break;
- default:
- gpio_set_value(LBR, 1);
- gpio_set_value(UD, 0);
- break;
- }
-#endif
-}
-
static int bfin_lq035_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
if (nocursor)
@@ -623,7 +601,6 @@ static struct fb_ops bfin_lq035_fb_ops = {
.fb_open = bfin_lq035_fb_open,
.fb_release = bfin_lq035_fb_release,
.fb_check_var = bfin_lq035_fb_check_var,
- .fb_rotate = bfin_lq035_fb_rotate,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
diff --git a/drivers/video/fbdev/bt431.h b/drivers/video/fbdev/bt431.h
index 04e0cfbba538..3929602f5867 100644
--- a/drivers/video/fbdev/bt431.h
+++ b/drivers/video/fbdev/bt431.h
@@ -2,6 +2,7 @@
* linux/drivers/video/bt431.h
*
* Copyright 2003 Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
+ * Copyright 2016 Maciej W. Rozycki <macro@linux-mips.org>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -9,6 +10,8 @@
*/
#include <linux/types.h>
+#define BT431_CURSOR_SIZE 64
+
/*
* Bt431 cursor generator registers, 32-bit aligned.
* Two twin Bt431 are used on the DECstation's PMAG-AA.
@@ -60,7 +63,7 @@ static inline u8 bt431_get_value(u16 val)
#define BT431_CMD_CURS_ENABLE 0x40
#define BT431_CMD_XHAIR_ENABLE 0x20
#define BT431_CMD_OR_CURSORS 0x10
-#define BT431_CMD_AND_CURSORS 0x00
+#define BT431_CMD_XOR_CURSORS 0x00
#define BT431_CMD_1_1_MUX 0x00
#define BT431_CMD_4_1_MUX 0x04
#define BT431_CMD_5_1_MUX 0x08
@@ -196,28 +199,30 @@ static inline void bt431_position_cursor(struct bt431_regs *regs, u16 x, u16 y)
bt431_write_reg_inc(regs, (y >> 8) & 0x0f); /* BT431_REG_CYHI */
}
-static inline void bt431_set_font(struct bt431_regs *regs, u8 fgc,
- u16 width, u16 height)
+static inline void bt431_set_cursor(struct bt431_regs *regs,
+ const char *data, const char *mask,
+ u16 rop, u16 width, u16 height)
{
+ u16 x, y;
int i;
- u16 fgp = fgc ? 0xffff : 0x0000;
- u16 bgp = fgc ? 0x0000 : 0xffff;
+ i = 0;
+ width = DIV_ROUND_UP(width, 8);
bt431_select_reg(regs, BT431_REG_CRAM_BASE);
- for (i = BT431_REG_CRAM_BASE; i <= BT431_REG_CRAM_END; i++) {
- u16 value;
-
- if (height << 6 <= i << 3)
- value = bgp;
- else if (width <= i % 8 << 3)
- value = bgp;
- else if (((width >> 3) & 0xffff) > i % 8)
- value = fgp;
- else
- value = fgp & ~(bgp << (width % 8 << 1));
-
- bt431_write_cmap_inc(regs, value);
- }
+ for (y = 0; y < BT431_CURSOR_SIZE; y++)
+ for (x = 0; x < BT431_CURSOR_SIZE / 8; x++) {
+ u16 val = 0;
+
+ if (y < height && x < width) {
+ val = mask[i];
+ if (rop == ROP_XOR)
+ val = (val << 8) | (val ^ data[i]);
+ else
+ val = (val << 8) | (val & data[i]);
+ i++;
+ }
+ bt431_write_cmap_inc(regs, val);
+ }
}
static inline void bt431_init_cursor(struct bt431_regs *regs)
diff --git a/drivers/video/fbdev/bt455.h b/drivers/video/fbdev/bt455.h
index 80f61b03e9ae..dd1404b40611 100644
--- a/drivers/video/fbdev/bt455.h
+++ b/drivers/video/fbdev/bt455.h
@@ -2,6 +2,7 @@
* linux/drivers/video/bt455.h
*
* Copyright 2003 Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
+ * Copyright 2016 Maciej W. Rozycki <macro@linux-mips.org>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -29,66 +30,61 @@ static inline void bt455_select_reg(struct bt455_regs *regs, int ir)
regs->addr_cmap = ir & 0x0f;
}
+static inline void bt455_reset_reg(struct bt455_regs *regs)
+{
+ mb();
+ regs->addr_clr = 0;
+}
+
/*
* Read/write to a Bt455 color map register.
*/
-static inline void bt455_read_cmap_entry(struct bt455_regs *regs, int cr,
- u8* red, u8* green, u8* blue)
+static inline void bt455_read_cmap_next(struct bt455_regs *regs, u8 *grey)
{
- bt455_select_reg(regs, cr);
mb();
- *red = regs->addr_cmap_data & 0x0f;
+ regs->addr_cmap_data;
rmb();
- *green = regs->addr_cmap_data & 0x0f;
+ *grey = regs->addr_cmap_data & 0xf;
rmb();
- *blue = regs->addr_cmap_data & 0x0f;
+ regs->addr_cmap_data;
}
-static inline void bt455_write_cmap_entry(struct bt455_regs *regs, int cr,
- u8 red, u8 green, u8 blue)
+static inline void bt455_write_cmap_next(struct bt455_regs *regs, u8 grey)
{
- bt455_select_reg(regs, cr);
wmb();
- regs->addr_cmap_data = red & 0x0f;
+ regs->addr_cmap_data = 0x0;
wmb();
- regs->addr_cmap_data = green & 0x0f;
+ regs->addr_cmap_data = grey & 0xf;
wmb();
- regs->addr_cmap_data = blue & 0x0f;
+ regs->addr_cmap_data = 0x0;
}
-static inline void bt455_write_ovly_entry(struct bt455_regs *regs, int cr,
- u8 red, u8 green, u8 blue)
+static inline void bt455_write_ovly_next(struct bt455_regs *regs, u8 grey)
{
- bt455_select_reg(regs, cr);
wmb();
- regs->addr_ovly = red & 0x0f;
+ regs->addr_ovly = 0x0;
wmb();
- regs->addr_ovly = green & 0x0f;
+ regs->addr_ovly = grey & 0xf;
wmb();
- regs->addr_ovly = blue & 0x0f;
+ regs->addr_ovly = 0x0;
}
-static inline void bt455_set_cursor(struct bt455_regs *regs)
+static inline void bt455_read_cmap_entry(struct bt455_regs *regs,
+ int cr, u8 *grey)
{
- mb();
- regs->addr_ovly = 0x0f;
- wmb();
- regs->addr_ovly = 0x0f;
- wmb();
- regs->addr_ovly = 0x0f;
+ bt455_select_reg(regs, cr);
+ bt455_read_cmap_next(regs, grey);
}
-static inline void bt455_erase_cursor(struct bt455_regs *regs)
+static inline void bt455_write_cmap_entry(struct bt455_regs *regs,
+ int cr, u8 grey)
{
- /* bt455_write_cmap_entry(regs, 8, 0x00, 0x00, 0x00); */
- /* bt455_write_cmap_entry(regs, 9, 0x00, 0x00, 0x00); */
- bt455_write_ovly_entry(regs, 8, 0x03, 0x03, 0x03);
- bt455_write_ovly_entry(regs, 9, 0x07, 0x07, 0x07);
+ bt455_select_reg(regs, cr);
+ bt455_write_cmap_next(regs, grey);
+}
- wmb();
- regs->addr_ovly = 0x09;
- wmb();
- regs->addr_ovly = 0x09;
- wmb();
- regs->addr_ovly = 0x09;
+static inline void bt455_write_ovly_entry(struct bt455_regs *regs, u8 grey)
+{
+ bt455_reset_reg(regs);
+ bt455_write_ovly_next(regs, grey);
}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 6b2a06d09f2b..d8d583d32a37 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 0,
.vsync_len = 0,
- .sync = FB_SYNC_CLK_INVERT |
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = FB_SYNC_CLK_INVERT,
},
/* Sharp LK043T1DG01 */
[1] = {
@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 2,
.hsync_len = 41,
.vsync_len = 10,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = 0,
.flag = 0,
},
[2] = {
@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .sync = 0,
.flag = 0,
},
[3] = {
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 5b1081030cbb..75f0db25d19f 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -316,9 +316,8 @@ static int ep93xxfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
unsigned int offset = vma->vm_pgoff << PAGE_SHIFT;
if (offset < info->fix.smem_len) {
- return dma_mmap_writecombine(info->dev, vma, info->screen_base,
- info->fix.smem_start,
- info->fix.smem_len);
+ return dma_mmap_wc(info->dev, vma, info->screen_base,
+ info->fix.smem_start, info->fix.smem_len);
}
return -EINVAL;
@@ -428,8 +427,7 @@ static int ep93xxfb_alloc_videomem(struct fb_info *info)
/* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
- virt_addr = dma_alloc_writecombine(info->dev, fb_size,
- &phys_addr, GFP_KERNEL);
+ virt_addr = dma_alloc_wc(info->dev, fb_size, &phys_addr, GFP_KERNEL);
if (!virt_addr)
return -ENOMEM;
diff --git a/drivers/video/fbdev/exynos/Kconfig b/drivers/video/fbdev/exynos/Kconfig
index 1f16b4678c71..d916bef94f25 100644
--- a/drivers/video/fbdev/exynos/Kconfig
+++ b/drivers/video/fbdev/exynos/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig EXYNOS_VIDEO
- bool "Exynos Video driver support"
+ tristate "Exynos Video driver support"
depends on ARCH_S5PV210 || ARCH_EXYNOS
help
This enables support for EXYNOS Video device.
@@ -15,13 +15,13 @@ if EXYNOS_VIDEO
#
config EXYNOS_MIPI_DSI
- bool "EXYNOS MIPI DSI driver support."
+ tristate "EXYNOS MIPI DSI driver support."
select GENERIC_PHY
help
This enables support for MIPI-DSI device.
config EXYNOS_LCD_S6E8AX0
- bool "S6E8AX0 MIPI AMOLED LCD Driver"
+ tristate "S6E8AX0 MIPI AMOLED LCD Driver"
depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
depends on (LCD_CLASS_DEVICE = y)
default n
diff --git a/drivers/video/fbdev/exynos/Makefile b/drivers/video/fbdev/exynos/Makefile
index b5b1bd228abb..02d8dc522fea 100644
--- a/drivers/video/fbdev/exynos/Makefile
+++ b/drivers/video/fbdev/exynos/Makefile
@@ -2,6 +2,8 @@
# Makefile for the exynos video drivers.
#
-obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
- exynos_mipi_dsi_lowlevel.o
+obj-$(CONFIG_EXYNOS_MIPI_DSI) += exynos-mipi-dsi-mod.o
+
+exynos-mipi-dsi-mod-objs += exynos_mipi_dsi.o exynos_mipi_dsi_common.o \
+ exynos_mipi_dsi_lowlevel.o
obj-$(CONFIG_EXYNOS_LCD_S6E8AX0) += s6e8ax0.o
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
index b527fe464628..92e4af3caaf8 100644
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
@@ -263,6 +263,7 @@ int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv)
return 0;
}
+EXPORT_SYMBOL_GPL(exynos_mipi_dsi_register_lcd_driver);
static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi(
struct mipi_dsim_device *dsim,
@@ -402,12 +403,12 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
goto error;
}
- dsim->irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(dsim->irq)) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(&pdev->dev, "failed to request dsim irq resource\n");
- ret = -EINVAL;
goto error;
}
+ dsim->irq = ret;
init_completion(&dsim_wr_comp);
init_completion(&dsim_rd_comp);
diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c
index b63d55f481fa..1a242b1338e9 100644
--- a/drivers/video/fbdev/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
@@ -1185,8 +1185,8 @@ static int gbefb_probe(struct platform_device *p_dev)
} else {
/* try to allocate memory with the classical allocator
* this has high chance to fail on low memory machines */
- gbe_mem = dma_alloc_writecombine(NULL, gbe_mem_size,
- &gbe_dma_addr, GFP_KERNEL);
+ gbe_mem = dma_alloc_wc(NULL, gbe_mem_size, &gbe_dma_addr,
+ GFP_KERNEL);
if (!gbe_mem) {
printk(KERN_ERR "gbefb: couldn't allocate framebuffer memory\n");
ret = -ENOMEM;
@@ -1238,7 +1238,7 @@ static int gbefb_probe(struct platform_device *p_dev)
out_gbe_unmap:
arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr)
- dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+ dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
out_tiles_free:
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
@@ -1259,7 +1259,7 @@ static int gbefb_remove(struct platform_device* p_dev)
gbe_turn_off();
arch_phys_wc_del(par->wc_cookie);
if (gbe_dma_addr)
- dma_free_writecombine(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+ dma_free_wc(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
(void *)gbe_tiles.cpu, gbe_tiles.dma);
release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 9b167f7ef6c6..4363c64d74e8 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -33,7 +33,6 @@
#if defined(CONFIG_PPC)
#include <linux/nvram.h>
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#include "macmodes.h"
#endif
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index bb2f1e866020..76b6a7784b06 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -937,8 +937,8 @@ static int imxfb_probe(struct platform_device *pdev)
}
fbi->map_size = PAGE_ALIGN(info->fix.smem_len);
- info->screen_base = dma_alloc_writecombine(&pdev->dev, fbi->map_size,
- &fbi->map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(&pdev->dev, fbi->map_size,
+ &fbi->map_dma, GFP_KERNEL);
if (!info->screen_base) {
dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
@@ -1005,8 +1005,8 @@ failed_cmap:
if (pdata && pdata->exit)
pdata->exit(fbi->pdev);
failed_platform_init:
- dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
- fbi->map_dma);
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
failed_map:
iounmap(fbi->regs);
failed_ioremap:
@@ -1041,8 +1041,8 @@ static int imxfb_remove(struct platform_device *pdev)
kfree(info->pseudo_palette);
framebuffer_release(info);
- dma_free_writecombine(&pdev->dev, fbi->map_size, info->screen_base,
- fbi->map_dma);
+ dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
+ fbi->map_dma);
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index bbec737eef30..bf207444ba0c 100644
--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
@@ -302,7 +302,7 @@ static __inline__ int get_opt_int(const char *this_opt, const char *name,
}
static __inline__ int get_opt_bool(const char *this_opt, const char *name,
- int *ret)
+ bool *ret)
{
if (!ret)
return 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index 09b02cd1eb0e..7a90ea2c4613 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
@@ -47,7 +47,6 @@
#if defined(CONFIG_PPC_PMAC)
#include <asm/prom.h>
-#include <asm/pci-bridge.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index ad04a01e2761..abb6bbf226d5 100644
--- a/drivers/video/fbdev/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
@@ -354,7 +354,8 @@ static int metronome_powerup_cmd(struct metronomefb_par *par)
}
/* the rest are 0 */
- memset((u8 *) (par->metromem_cmd->args + i), 0, (32-i)*2);
+ memset(&par->metromem_cmd->args[i], 0,
+ (ARRAY_SIZE(par->metromem_cmd->args) - i) * 2);
par->metromem_cmd->csum = cs;
@@ -376,7 +377,8 @@ static int metronome_config_cmd(struct metronomefb_par *par)
memcpy(par->metromem_cmd->args, epd_frame_table[par->dt].config,
sizeof(epd_frame_table[par->dt].config));
/* the rest are 0 */
- memset((u8 *) (par->metromem_cmd->args + 4), 0, (32-4)*2);
+ memset(&par->metromem_cmd->args[4], 0,
+ (ARRAY_SIZE(par->metromem_cmd->args) - 4) * 2);
par->metromem_cmd->csum = 0xCC10;
par->metromem_cmd->csum += calc_img_cksum(par->metromem_cmd->args, 4);
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 7947634ee6b0..f91b1db262b0 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -1336,9 +1336,8 @@ static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
int retval = 0;
dma_addr_t addr;
- fbi->screen_base = dma_alloc_writecombine(fbi->device,
- mem_len,
- &addr, GFP_DMA | GFP_KERNEL);
+ fbi->screen_base = dma_alloc_wc(fbi->device, mem_len, &addr,
+ GFP_DMA | GFP_KERNEL);
if (!fbi->screen_base) {
dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
@@ -1378,8 +1377,8 @@ err0:
*/
static int mx3fb_unmap_video_memory(struct fb_info *fbi)
{
- dma_free_writecombine(fbi->device, fbi->fix.smem_len,
- fbi->screen_base, fbi->fix.smem_start);
+ dma_free_wc(fbi->device, fbi->fix.smem_len, fbi->screen_base,
+ fbi->fix.smem_start);
fbi->screen_base = NULL;
mutex_lock(&fbi->mm_lock);
diff --git a/drivers/video/fbdev/n411.c b/drivers/video/fbdev/n411.c
index 935830fea7b6..053deacad7cc 100644
--- a/drivers/video/fbdev/n411.c
+++ b/drivers/video/fbdev/n411.c
@@ -165,16 +165,22 @@ static int __init n411_init(void)
if (!n411_device)
return -ENOMEM;
- platform_device_add_data(n411_device, &n411_board, sizeof(n411_board));
+ ret = platform_device_add_data(n411_device, &n411_board,
+ sizeof(n411_board));
+ if (ret)
+ goto put_plat_device;
/* this _add binds hecubafb to n411. hecubafb refcounts n411 */
ret = platform_device_add(n411_device);
if (ret)
- platform_device_put(n411_device);
+ goto put_plat_device;
- return ret;
+ return 0;
+put_plat_device:
+ platform_device_put(n411_device);
+ return ret;
}
static void __exit n411_exit(void)
diff --git a/drivers/video/fbdev/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
index 389fa2cbb713..6680edae4696 100644
--- a/drivers/video/fbdev/nuc900fb.c
+++ b/drivers/video/fbdev/nuc900fb.c
@@ -396,8 +396,8 @@ static int nuc900fb_map_video_memory(struct fb_info *info)
dev_dbg(fbi->dev, "nuc900fb_map_video_memory(fbi=%p) map_size %lu\n",
fbi, map_size);
- info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
- &map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
+ GFP_KERNEL);
if (!info->screen_base)
return -ENOMEM;
@@ -411,8 +411,8 @@ static int nuc900fb_map_video_memory(struct fb_info *info)
static inline void nuc900fb_unmap_video_memory(struct fb_info *info)
{
struct nuc900fb_info *fbi = info->par;
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
}
static irqreturn_t nuc900fb_irqhandler(int irq, void *dev_id)
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 43a0a52fc527..fb60a8f0cc94 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -28,10 +28,6 @@
#include <linux/pci.h>
#include <asm/io.h>
-#ifdef CONFIG_PPC64
-#include <asm/pci-bridge.h>
-#endif
-
#ifdef CONFIG_PPC32
#include <asm/bootx.h>
#endif
diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c
index a0729d0200d0..21512b027ff7 100644
--- a/drivers/video/fbdev/omap/lcd_h3.c
+++ b/drivers/video/fbdev/omap/lcd_h3.c
@@ -22,8 +22,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c/tps65010.h>
+#include <linux/gpio.h>
-#include <asm/gpio.h>
#include "omapfb.h"
#define MODULE_NAME "omapfb-lcd_h3"
diff --git a/drivers/video/fbdev/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c
index c3ddebf934b2..b56886c7055e 100644
--- a/drivers/video/fbdev/omap/lcd_osk.c
+++ b/drivers/video/fbdev/omap/lcd_osk.c
@@ -22,8 +22,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/mux.h>
diff --git a/drivers/video/fbdev/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c
index 3d0ea04ec248..1a936d5c7b6f 100644
--- a/drivers/video/fbdev/omap/lcd_palmtt.c
+++ b/drivers/video/fbdev/omap/lcd_palmtt.c
@@ -28,8 +28,8 @@ GPIO13 - screen blanking
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/gpio.h>
-#include <asm/gpio.h>
#include "omapfb.h"
static int palmtt_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index 6efa2591eaa8..e3d9b9ea5498 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -612,8 +612,8 @@ static void lcdc_dma_handler(u16 status, void *data)
static int alloc_palette_ram(void)
{
- lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
- MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
+ lcdc.palette_virt = dma_alloc_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
+ &lcdc.palette_phys, GFP_KERNEL);
if (lcdc.palette_virt == NULL) {
dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
return -ENOMEM;
@@ -625,8 +625,8 @@ static int alloc_palette_ram(void)
static void free_palette_ram(void)
{
- dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
- lcdc.palette_virt, lcdc.palette_phys);
+ dma_free_wc(lcdc.fbdev->dev, MAX_PALETTE_SIZE, lcdc.palette_virt,
+ lcdc.palette_phys);
}
static int alloc_fbmem(struct omapfb_mem_region *region)
@@ -642,8 +642,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region)
if (region->size > frame_size)
frame_size = region->size;
lcdc.vram_size = frame_size;
- lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
- lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
+ lcdc.vram_virt = dma_alloc_wc(lcdc.fbdev->dev, lcdc.vram_size,
+ &lcdc.vram_phys, GFP_KERNEL);
if (lcdc.vram_virt == NULL) {
dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
return -ENOMEM;
@@ -660,8 +660,8 @@ static int alloc_fbmem(struct omapfb_mem_region *region)
static void free_fbmem(void)
{
- dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
- lcdc.vram_virt, lcdc.vram_phys);
+ dma_free_wc(lcdc.fbdev->dev, lcdc.vram_size, lcdc.vram_virt,
+ lcdc.vram_phys);
}
static int setup_fbmem(struct omapfb_mem_desc *req_md)
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 393ae1bc07e8..6429f33167f5 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -594,27 +594,6 @@ static int set_fb_var(struct fb_info *fbi,
}
-/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
-static void omapfb_rotate(struct fb_info *fbi, int rotate)
-{
- struct omapfb_plane_struct *plane = fbi->par;
- struct omapfb_device *fbdev = plane->fbdev;
-
- omapfb_rqueue_lock(fbdev);
- if (rotate != fbi->var.rotate) {
- struct fb_var_screeninfo *new_var = &fbdev->new_var;
-
- memcpy(new_var, &fbi->var, sizeof(*new_var));
- new_var->rotate = rotate;
- if (set_fb_var(fbi, new_var) == 0 &&
- memcmp(new_var, &fbi->var, sizeof(*new_var))) {
- memcpy(&fbi->var, new_var, sizeof(*new_var));
- ctrl_change_mode(fbi);
- }
- }
- omapfb_rqueue_unlock(fbdev);
-}
-
/*
* Set new x,y offsets in the virtual display for the visible area and switch
* to the new mode.
@@ -1256,7 +1235,6 @@ static struct fb_ops omapfb_ops = {
.fb_ioctl = omapfb_ioctl,
.fb_check_var = omapfb_check_var,
.fb_set_par = omapfb_set_par,
- .fb_rotate = omapfb_rotate,
.fb_pan_display = omapfb_pan_display,
};
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
char *desc, struct gpio_desc **gpiod)
{
- struct gpio_desc *gd;
int r;
- *gpiod = NULL;
-
r = devm_gpio_request_one(dev, gpio, flags, desc);
- if (r)
+ if (r) {
+ *gpiod = NULL;
return r == -ENOENT ? 0 : r;
+ }
- gd = gpio_to_desc(gpio);
- if (IS_ERR(gd))
- return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+ *gpiod = gpio_to_desc(gpio);
- *gpiod = gd;
return 0;
}
diff --git a/drivers/video/fbdev/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index 838424817de2..ffe2dd482f84 100644
--- a/drivers/video/fbdev/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
@@ -8,6 +8,7 @@
* and Harald Koerfgen <hkoerfg@web.de>, which itself is derived from
* "HP300 Topcat framebuffer support (derived from macfb of all things)
* Phil Blundell <philb@gnu.org> 1998"
+ * Copyright (c) 2016 Maciej W. Rozycki
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
@@ -21,37 +22,29 @@
*
* 2003-09-21 Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
* Hardware cursor support.
+ *
+ * 2016-02-21 Maciej W. Rozycki <macro@linux-mips.org>
+ * Version 0.03: Rewritten for the new FB and TC APIs.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+
+#include <linux/compiler.h>
#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/fb.h>
-#include <linux/console.h>
-
-#include <asm/bootinfo.h>
-#include <asm/dec/machtype.h>
-#include <asm/dec/tc.h>
-
-#include <video/fbcon.h>
-#include <video/fbcon-cfb8.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tc.h>
+#include <linux/timer.h>
#include "bt455.h"
#include "bt431.h"
/* Version information */
-#define DRIVER_VERSION "0.02"
+#define DRIVER_VERSION "0.03"
#define DRIVER_AUTHOR "Karsten Merker <merker@linuxtag.org>"
#define DRIVER_DESCRIPTION "PMAG-AA Framebuffer Driver"
-/* Prototypes */
-static int aafb_set_var(struct fb_var_screeninfo *var, int con,
- struct fb_info *info);
-
/*
* Bt455 RAM DAC register base offset (rel. to TC slot base address).
*/
@@ -68,443 +61,246 @@ static int aafb_set_var(struct fb_var_screeninfo *var, int con,
*/
#define PMAG_AA_ONBOARD_FBMEM_OFFSET 0x200000
-struct aafb_cursor {
- struct timer_list timer;
- int enable;
- int on;
- int vbl_cnt;
- int blink_rate;
- u16 x, y, width, height;
+struct aafb_par {
+ void __iomem *mmio;
+ struct bt455_regs __iomem *bt455;
+ struct bt431_regs __iomem *bt431;
};
-#define CURSOR_TIMER_FREQ (HZ / 50)
-#define CURSOR_BLINK_RATE (20)
-#define CURSOR_DRAW_DELAY (2)
-
-struct aafb_info {
- struct fb_info info;
- struct display disp;
- struct aafb_cursor cursor;
- struct bt455_regs *bt455;
- struct bt431_regs *bt431;
- unsigned long fb_start;
- unsigned long fb_size;
- unsigned long fb_line_length;
+static struct fb_var_screeninfo aafb_defined = {
+ .xres = 1280,
+ .yres = 1024,
+ .xres_virtual = 2048,
+ .yres_virtual = 1024,
+ .bits_per_pixel = 8,
+ .grayscale = 1,
+ .red.length = 0,
+ .green.length = 1,
+ .blue.length = 0,
+ .activate = FB_ACTIVATE_NOW,
+ .accel_flags = FB_ACCEL_NONE,
+ .pixclock = 7645,
+ .left_margin = 224,
+ .right_margin = 32,
+ .upper_margin = 33,
+ .lower_margin = 3,
+ .hsync_len = 160,
+ .vsync_len = 3,
+ .sync = FB_SYNC_ON_GREEN,
+ .vmode = FB_VMODE_NONINTERLACED,
};
-/*
- * Max 3 TURBOchannel slots -> max 3 PMAG-AA.
- */
-static struct aafb_info my_fb_info[3];
-
-static struct aafb_par {
-} current_par;
-
-static int currcon = -1;
-
-static void aafb_set_cursor(struct aafb_info *info, int on)
-{
- struct aafb_cursor *c = &info->cursor;
-
- if (on) {
- bt431_position_cursor(info->bt431, c->x, c->y);
- bt431_enable_cursor(info->bt431);
- } else
- bt431_erase_cursor(info->bt431);
-}
-
-static void aafbcon_cursor(struct display *disp, int mode, int x, int y)
-{
- struct aafb_info *info = (struct aafb_info *)disp->fb_info;
- struct aafb_cursor *c = &info->cursor;
-
- x *= fontwidth(disp);
- y *= fontheight(disp);
-
- if (c->x == x && c->y == y && (mode == CM_ERASE) == !c->enable)
- return;
-
- c->enable = 0;
- if (c->on)
- aafb_set_cursor(info, 0);
- c->x = x - disp->var.xoffset;
- c->y = y - disp->var.yoffset;
-
- switch (mode) {
- case CM_ERASE:
- c->on = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- if (c->on)
- aafb_set_cursor(info, c->on);
- else
- c->vbl_cnt = CURSOR_DRAW_DELAY;
- c->enable = 1;
- break;
- }
-}
+static struct fb_fix_screeninfo aafb_fix = {
+ .id = "PMAG-AA",
+ .smem_len = (2048 * 1024),
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO10,
+ .ypanstep = 1,
+ .ywrapstep = 1,
+ .line_length = 2048,
+ .mmio_len = PMAG_AA_ONBOARD_FBMEM_OFFSET - PMAG_AA_BT455_OFFSET,
+};
-static int aafbcon_set_font(struct display *disp, int width, int height)
+static int aafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
- struct aafb_info *info = (struct aafb_info *)disp->fb_info;
- struct aafb_cursor *c = &info->cursor;
- u8 fgc = ~attr_bgcol_ec(disp, disp->conp, &info->info);
+ struct aafb_par *par = info->par;
- if (width > 64 || height > 64 || width < 0 || height < 0)
+ if (cursor->image.height > BT431_CURSOR_SIZE ||
+ cursor->image.width > BT431_CURSOR_SIZE) {
+ bt431_erase_cursor(par->bt431);
return -EINVAL;
-
- c->height = height;
- c->width = width;
-
- bt431_set_font(info->bt431, fgc, width, height);
-
- return 1;
-}
-
-static void aafb_cursor_timer_handler(unsigned long data)
-{
- struct aafb_info *info = (struct aafb_info *)data;
- struct aafb_cursor *c = &info->cursor;
-
- if (!c->enable)
- goto out;
-
- if (c->vbl_cnt && --c->vbl_cnt == 0) {
- c->on ^= 1;
- aafb_set_cursor(info, c->on);
- c->vbl_cnt = c->blink_rate;
}
-out:
- c->timer.expires = jiffies + CURSOR_TIMER_FREQ;
- add_timer(&c->timer);
-}
-
-static void __init aafb_cursor_init(struct aafb_info *info)
-{
- struct aafb_cursor *c = &info->cursor;
-
- c->enable = 1;
- c->on = 1;
- c->x = c->y = 0;
- c->width = c->height = 0;
- c->vbl_cnt = CURSOR_DRAW_DELAY;
- c->blink_rate = CURSOR_BLINK_RATE;
-
- init_timer(&c->timer);
- c->timer.data = (unsigned long)info;
- c->timer.function = aafb_cursor_timer_handler;
- mod_timer(&c->timer, jiffies + CURSOR_TIMER_FREQ);
-}
-
-static void __exit aafb_cursor_exit(struct aafb_info *info)
-{
- struct aafb_cursor *c = &info->cursor;
-
- del_timer_sync(&c->timer);
-}
-
-static struct display_switch aafb_switch8 = {
- .setup = fbcon_cfb8_setup,
- .bmove = fbcon_cfb8_bmove,
- .clear = fbcon_cfb8_clear,
- .putc = fbcon_cfb8_putc,
- .putcs = fbcon_cfb8_putcs,
- .revc = fbcon_cfb8_revc,
- .cursor = aafbcon_cursor,
- .set_font = aafbcon_set_font,
- .clear_margins = fbcon_cfb8_clear_margins,
- .fontwidthmask = FONTWIDTH(4)|FONTWIDTH(8)|FONTWIDTH(12)|FONTWIDTH(16)
-};
-
-static void aafb_get_par(struct aafb_par *par)
-{
- *par = current_par;
-}
-
-static int aafb_get_fix(struct fb_fix_screeninfo *fix, int con,
- struct fb_info *info)
-{
- struct aafb_info *ip = (struct aafb_info *)info;
-
- memset(fix, 0, sizeof(struct fb_fix_screeninfo));
- strcpy(fix->id, "PMAG-AA");
- fix->smem_start = ip->fb_start;
- fix->smem_len = ip->fb_size;
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->ypanstep = 1;
- fix->ywrapstep = 1;
- fix->visual = FB_VISUAL_MONO10;
- fix->line_length = 1280;
- fix->accel = FB_ACCEL_NONE;
+ if (!cursor->enable)
+ bt431_erase_cursor(par->bt431);
- return 0;
-}
+ if (cursor->set & FB_CUR_SETPOS)
+ bt431_position_cursor(par->bt431,
+ cursor->image.dx, cursor->image.dy);
+ if (cursor->set & FB_CUR_SETCMAP) {
+ u8 fg = cursor->image.fg_color ? 0xf : 0x0;
+ u8 bg = cursor->image.bg_color ? 0xf : 0x0;
-static void aafb_set_disp(struct display *disp, int con,
- struct aafb_info *info)
-{
- struct fb_fix_screeninfo fix;
-
- disp->fb_info = &info->info;
- aafb_set_var(&disp->var, con, &info->info);
- if (disp->conp && disp->conp->vc_sw && disp->conp->vc_sw->con_cursor)
- disp->conp->vc_sw->con_cursor(disp->conp, CM_ERASE);
- disp->dispsw = &aafb_switch8;
- disp->dispsw_data = 0;
-
- aafb_get_fix(&fix, con, &info->info);
- disp->screen_base = (u8 *) fix.smem_start;
- disp->visual = fix.visual;
- disp->type = fix.type;
- disp->type_aux = fix.type_aux;
- disp->ypanstep = fix.ypanstep;
- disp->ywrapstep = fix.ywrapstep;
- disp->line_length = fix.line_length;
- disp->next_line = 2048;
- disp->can_soft_blank = 1;
- disp->inverse = 0;
- disp->scrollmode = SCROLL_YREDRAW;
-
- aafbcon_set_font(disp, fontwidth(disp), fontheight(disp));
-}
+ bt455_write_cmap_entry(par->bt455, 8, bg);
+ bt455_write_cmap_next(par->bt455, bg);
+ bt455_write_ovly_next(par->bt455, fg);
+ }
+ if (cursor->set & (FB_CUR_SETSIZE | FB_CUR_SETSHAPE | FB_CUR_SETIMAGE))
+ bt431_set_cursor(par->bt431,
+ cursor->image.data, cursor->mask, cursor->rop,
+ cursor->image.width, cursor->image.height);
-static int aafb_get_cmap(struct fb_cmap *cmap, int kspc, int con,
- struct fb_info *info)
-{
- static u16 color[2] = {0x0000, 0x000f};
- static struct fb_cmap aafb_cmap = {0, 2, color, color, color, NULL};
+ if (cursor->enable)
+ bt431_enable_cursor(par->bt431);
- fb_copy_cmap(&aafb_cmap, cmap, kspc ? 0 : 2);
return 0;
}
-static int aafb_set_cmap(struct fb_cmap *cmap, int kspc, int con,
- struct fb_info *info)
-{
- u16 color[2] = {0x0000, 0x000f};
-
- if (cmap->start == 0
- && cmap->len == 2
- && memcmp(cmap->red, color, sizeof(color)) == 0
- && memcmp(cmap->green, color, sizeof(color)) == 0
- && memcmp(cmap->blue, color, sizeof(color)) == 0
- && cmap->transp == NULL)
- return 0;
- else
- return -EINVAL;
-}
-
-static int aafb_ioctl(struct fb_info *info, u32 cmd, unsigned long arg)
-{
- /* TODO: Not yet implemented */
- return -ENOIOCTLCMD;
-}
+/* 0 unblanks, any other blanks. */
-static int aafb_switch(int con, struct fb_info *info)
+static int aafb_blank(int blank, struct fb_info *info)
{
- struct aafb_info *ip = (struct aafb_info *)info;
- struct display *old = (currcon < 0) ? &ip->disp : (fb_display + currcon);
- struct display *new = (con < 0) ? &ip->disp : (fb_display + con);
-
- if (old->conp && old->conp->vc_sw && old->conp->vc_sw->con_cursor)
- old->conp->vc_sw->con_cursor(old->conp, CM_ERASE);
-
- /* Set the current console. */
- currcon = con;
- aafb_set_disp(new, con, ip);
+ struct aafb_par *par = info->par;
+ u8 val = blank ? 0x00 : 0x0f;
+ bt455_write_cmap_entry(par->bt455, 1, val);
return 0;
}
-static void aafb_encode_var(struct fb_var_screeninfo *var,
- struct aafb_par *par)
-{
- var->xres = 1280;
- var->yres = 1024;
- var->xres_virtual = 2048;
- var->yres_virtual = 1024;
- var->xoffset = 0;
- var->yoffset = 0;
- var->bits_per_pixel = 8;
- var->grayscale = 1;
- var->red.offset = 0;
- var->red.length = 0;
- var->red.msb_right = 0;
- var->green.offset = 0;
- var->green.length = 1;
- var->green.msb_right = 0;
- var->blue.offset = 0;
- var->blue.length = 0;
- var->blue.msb_right = 0;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->transp.msb_right = 0;
- var->nonstd = 0;
- var->activate &= ~FB_ACTIVATE_MASK & FB_ACTIVATE_NOW;
- var->accel_flags = 0;
- var->sync = FB_SYNC_ON_GREEN;
- var->vmode &= ~FB_VMODE_MASK & FB_VMODE_NONINTERLACED;
-}
+static struct fb_ops aafb_ops = {
+ .owner = THIS_MODULE,
+ .fb_blank = aafb_blank,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_cursor = aafb_cursor,
+};
-static int aafb_get_var(struct fb_var_screeninfo *var, int con,
- struct fb_info *info)
+static int pmagaafb_probe(struct device *dev)
{
- if (con < 0) {
- struct aafb_par par;
-
- memset(var, 0, sizeof(struct fb_var_screeninfo));
- aafb_get_par(&par);
- aafb_encode_var(var, &par);
- } else
- *var = info->var;
+ struct tc_dev *tdev = to_tc_dev(dev);
+ resource_size_t start, len;
+ struct fb_info *info;
+ struct aafb_par *par;
+ int err;
+
+ info = framebuffer_alloc(sizeof(struct aafb_par), dev);
+ if (!info) {
+ printk(KERN_ERR "%s: Cannot allocate memory\n", dev_name(dev));
+ return -ENOMEM;
+ }
- return 0;
-}
+ par = info->par;
+ dev_set_drvdata(dev, info);
+
+ info->fbops = &aafb_ops;
+ info->fix = aafb_fix;
+ info->var = aafb_defined;
+ info->flags = FBINFO_DEFAULT;
+
+ /* Request the I/O MEM resource. */
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(dev))) {
+ printk(KERN_ERR "%s: Cannot reserve FB region\n",
+ dev_name(dev));
+ err = -EBUSY;
+ goto err_alloc;
+ }
-static int aafb_set_var(struct fb_var_screeninfo *var, int con,
- struct fb_info *info)
-{
- struct aafb_par par;
+ /* MMIO mapping setup. */
+ info->fix.mmio_start = start + PMAG_AA_BT455_OFFSET;
+ par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len);
+ if (!par->mmio) {
+ printk(KERN_ERR "%s: Cannot map MMIO\n", dev_name(dev));
+ err = -ENOMEM;
+ goto err_resource;
+ }
+ par->bt455 = par->mmio - PMAG_AA_BT455_OFFSET + PMAG_AA_BT455_OFFSET;
+ par->bt431 = par->mmio - PMAG_AA_BT455_OFFSET + PMAG_AA_BT431_OFFSET;
+
+ /* Frame buffer mapping setup. */
+ info->fix.smem_start = start + PMAG_AA_ONBOARD_FBMEM_OFFSET;
+ info->screen_base = ioremap_nocache(info->fix.smem_start,
+ info->fix.smem_len);
+ if (!info->screen_base) {
+ printk(KERN_ERR "%s: Cannot map FB\n", dev_name(dev));
+ err = -ENOMEM;
+ goto err_mmio_map;
+ }
+ info->screen_size = info->fix.smem_len;
- aafb_get_par(&par);
- aafb_encode_var(var, &par);
- info->var = *var;
+ /* Init colormap. */
+ bt455_write_cmap_entry(par->bt455, 0, 0x0);
+ bt455_write_cmap_next(par->bt455, 0xf);
- return 0;
-}
+ /* Init hardware cursor. */
+ bt431_erase_cursor(par->bt431);
+ bt431_init_cursor(par->bt431);
+
+ err = register_framebuffer(info);
+ if (err < 0) {
+ printk(KERN_ERR "%s: Cannot register framebuffer\n",
+ dev_name(dev));
+ goto err_smem_map;
+ }
-static int aafb_update_var(int con, struct fb_info *info)
-{
- struct aafb_info *ip = (struct aafb_info *)info;
- struct display *disp = (con < 0) ? &ip->disp : (fb_display + con);
+ get_device(dev);
- if (con == currcon)
- aafbcon_cursor(disp, CM_ERASE, ip->cursor.x, ip->cursor.y);
+ pr_info("fb%d: %s frame buffer device at %s\n",
+ info->node, info->fix.id, dev_name(dev));
return 0;
-}
-/* 0 unblanks, any other blanks. */
-static void aafb_blank(int blank, struct fb_info *info)
-{
- struct aafb_info *ip = (struct aafb_info *)info;
- u8 val = blank ? 0x00 : 0x0f;
+err_smem_map:
+ iounmap(info->screen_base);
- bt455_write_cmap_entry(ip->bt455, 1, val, val, val);
- aafbcon_cursor(&ip->disp, CM_ERASE, ip->cursor.x, ip->cursor.y);
-}
+err_mmio_map:
+ iounmap(par->mmio);
-static struct fb_ops aafb_ops = {
- .owner = THIS_MODULE,
- .fb_get_fix = aafb_get_fix,
- .fb_get_var = aafb_get_var,
- .fb_set_var = aafb_set_var,
- .fb_get_cmap = aafb_get_cmap,
- .fb_set_cmap = aafb_set_cmap,
- .fb_ioctl = aafb_ioctl
-};
+err_resource:
+ release_mem_region(start, len);
-static int __init init_one(int slot)
-{
- unsigned long base_addr = CKSEG1ADDR(get_tc_base_addr(slot));
- struct aafb_info *ip = &my_fb_info[slot];
-
- memset(ip, 0, sizeof(struct aafb_info));
-
- /*
- * Framebuffer display memory base address and friends.
- */
- ip->bt455 = (struct bt455_regs *) (base_addr + PMAG_AA_BT455_OFFSET);
- ip->bt431 = (struct bt431_regs *) (base_addr + PMAG_AA_BT431_OFFSET);
- ip->fb_start = base_addr + PMAG_AA_ONBOARD_FBMEM_OFFSET;
- ip->fb_size = 2048 * 1024; /* fb_fix_screeninfo.smem_length
- seems to be physical */
- ip->fb_line_length = 2048;
-
- /*
- * Let there be consoles..
- */
- strcpy(ip->info.modename, "PMAG-AA");
- ip->info.node = -1;
- ip->info.flags = FBINFO_FLAG_DEFAULT;
- ip->info.fbops = &aafb_ops;
- ip->info.disp = &ip->disp;
- ip->info.changevar = NULL;
- ip->info.switch_con = &aafb_switch;
- ip->info.updatevar = &aafb_update_var;
- ip->info.blank = &aafb_blank;
-
- aafb_set_disp(&ip->disp, currcon, ip);
-
- /*
- * Configure the RAM DACs.
- */
- bt455_erase_cursor(ip->bt455);
-
- /* Init colormap. */
- bt455_write_cmap_entry(ip->bt455, 0, 0x00, 0x00, 0x00);
- bt455_write_cmap_entry(ip->bt455, 1, 0x0f, 0x0f, 0x0f);
-
- /* Init hardware cursor. */
- bt431_init_cursor(ip->bt431);
- aafb_cursor_init(ip);
-
- /* Clear the screen. */
- memset ((void *)ip->fb_start, 0, ip->fb_size);
-
- if (register_framebuffer(&ip->info) < 0)
- return -EINVAL;
-
- printk(KERN_INFO "fb%d: %s frame buffer in TC slot %d\n",
- GET_FB_IDX(ip->info.node), ip->info.modename, slot);
-
- return 0;
+err_alloc:
+ framebuffer_release(info);
+ return err;
}
-static int __exit exit_one(int slot)
+static int __exit pmagaafb_remove(struct device *dev)
{
- struct aafb_info *ip = &my_fb_info[slot];
-
- if (unregister_framebuffer(&ip->info) < 0)
- return -EINVAL;
-
+ struct tc_dev *tdev = to_tc_dev(dev);
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct aafb_par *par = info->par;
+ resource_size_t start, len;
+
+ put_device(dev);
+ unregister_framebuffer(info);
+ iounmap(info->screen_base);
+ iounmap(par->mmio);
+ start = tdev->resource.start;
+ len = tdev->resource.end - start + 1;
+ release_mem_region(start, len);
+ framebuffer_release(info);
return 0;
}
/*
* Initialise the framebuffer.
*/
-int __init pmagaafb_init(void)
-{
- int sid;
- int found = 0;
-
- while ((sid = search_tc_card("PMAG-AA")) >= 0) {
- found = 1;
- claim_tc_card(sid);
- init_one(sid);
- }
+static const struct tc_device_id pmagaafb_tc_table[] = {
+ { "DEC ", "PMAG-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, pmagaafb_tc_table);
+
+static struct tc_driver pmagaafb_driver = {
+ .id_table = pmagaafb_tc_table,
+ .driver = {
+ .name = "pmagaafb",
+ .bus = &tc_bus_type,
+ .probe = pmagaafb_probe,
+ .remove = __exit_p(pmagaafb_remove),
+ },
+};
- return found ? 0 : -ENXIO;
+static int __init pmagaafb_init(void)
+{
+#ifndef MODULE
+ if (fb_get_options("pmagaafb", NULL))
+ return -ENXIO;
+#endif
+ return tc_register_driver(&pmagaafb_driver);
}
static void __exit pmagaafb_exit(void)
{
- int sid;
-
- while ((sid = search_tc_card("PMAG-AA")) >= 0) {
- exit_one(sid);
- release_tc_card(sid);
- }
+ tc_unregister_driver(&pmagaafb_driver);
}
+module_init(pmagaafb_init);
+module_exit(pmagaafb_exit);
+
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
-#ifdef MODULE
-module_init(pmagaafb_init);
-module_exit(pmagaafb_exit);
-#endif
diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 914a52ba8477..5872bc4af3ce 100644
--- a/drivers/video/fbdev/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
@@ -60,7 +60,7 @@ static struct fb_var_screeninfo pmagbafb_defined = {
.left_margin = 116,
.right_margin = 12,
.upper_margin = 34,
- .lower_margin = 12,
+ .lower_margin = 0,
.hsync_len = 128,
.vsync_len = 3,
.sync = FB_SYNC_ON_GREEN,
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 0e24eb9c219c..3b1ca4411073 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
- nr_pages, WRITE, 0, pages);
+ ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
+ 0, pages);
if (ret < nr_pages) {
nr_pages = ret;
@@ -735,7 +735,7 @@ out:
out_unmap:
for (i = 0; i < nr_pages; i++)
- page_cache_release(pages[i]);
+ put_page(pages[i]);
kfree(pages);
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index efb57c059997..def3a501acd6 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -680,8 +680,8 @@ static int pxa168fb_probe(struct platform_device *pdev)
*/
info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
- info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len,
- &fbi->fb_start_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, info->fix.smem_len,
+ &fbi->fb_start_dma, GFP_KERNEL);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto failed_free_info;
@@ -804,8 +804,8 @@ static int pxa168fb_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 33b2bb315a2a..2c0487f4f805 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev)
free_pages_exact(fbi->video_mem, fbi->video_mem_size);
- dma_free_writecombine(&dev->dev, fbi->dma_buff_size,
- fbi->dma_buff, fbi->dma_buff_phys);
+ dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff,
+ fbi->dma_buff_phys);
iounmap(fbi->mmio_base);
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index f72dd12456f9..5f4f696c2ecf 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1105,8 +1105,7 @@ static int s3c_fb_alloc_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
dev_dbg(sfb->dev, "want %u bytes for window\n", size);
- fbi->screen_base = dma_alloc_writecombine(sfb->dev, size,
- &map_dma, GFP_KERNEL);
+ fbi->screen_base = dma_alloc_wc(sfb->dev, size, &map_dma, GFP_KERNEL);
if (!fbi->screen_base)
return -ENOMEM;
@@ -1131,8 +1130,8 @@ static void s3c_fb_free_memory(struct s3c_fb *sfb, struct s3c_fb_win *win)
struct fb_info *fbi = win->fbinfo;
if (fbi->screen_base)
- dma_free_writecombine(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
- fbi->screen_base, fbi->fix.smem_start);
+ dma_free_wc(sfb->dev, PAGE_ALIGN(fbi->fix.smem_len),
+ fbi->screen_base, fbi->fix.smem_start);
}
/**
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index d6704add1601..0dd86be36afb 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -645,8 +645,8 @@ static int s3c2410fb_map_video_memory(struct fb_info *info)
dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
- info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
- &map_dma, GFP_KERNEL);
+ info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
+ GFP_KERNEL);
if (info->screen_base) {
/* prevent initial garbage on screen */
@@ -667,8 +667,8 @@ static inline void s3c2410fb_unmap_video_memory(struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
+ dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
+ info->screen_base, info->fix.smem_start);
}
static inline void modify_gpio(void __iomem *reg,
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index dcf774c15889..fc2aaa5aca23 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -567,8 +567,8 @@ static int sa1100fb_mmap(struct fb_info *info,
if (off < info->fix.smem_len) {
vma->vm_pgoff += 1; /* skip over the palette */
- return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu,
- fbi->map_dma, fbi->map_size);
+ return dma_mmap_wc(fbi->dev, vma, fbi->map_cpu, fbi->map_dma,
+ fbi->map_size);
}
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1099,8 +1099,8 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
* of the framebuffer.
*/
fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
- fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
- &fbi->map_dma, GFP_KERNEL);
+ fbi->map_cpu = dma_alloc_wc(fbi->dev, fbi->map_size, &fbi->map_dma,
+ GFP_KERNEL);
if (fbi->map_cpu) {
fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 48ccf6db62a2..e9cf19977285 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -174,7 +174,7 @@ static int simplefb_parse_pd(struct platform_device *pdev,
struct simplefb_par {
u32 palette[PSEUDO_PALETTE_SIZE];
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
- int clk_count;
+ unsigned int clk_count;
struct clk **clks;
#endif
#if defined CONFIG_OF && defined CONFIG_REGULATOR
@@ -213,7 +213,7 @@ static int simplefb_clocks_init(struct simplefb_par *par,
return 0;
par->clk_count = of_clk_get_parent_count(np);
- if (par->clk_count <= 0)
+ if (!par->clk_count)
return 0;
par->clks = kcalloc(par->clk_count, sizeof(struct clk *), GFP_KERNEL);
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index 295e0dedaf1f..20f7234e809e 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -2151,17 +2151,15 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
unsigned short RefreshRateTableIndex)
{
unsigned short CRT2Index, VCLKIndex = 0, VCLKIndexGEN = 0, VCLKIndexGENCRT = 0;
- unsigned short modeflag, resinfo, tempbx;
+ unsigned short resinfo, tempbx;
const unsigned char *CHTVVCLKPtr = NULL;
if(ModeNo <= 0x13) {
- modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo;
CRT2Index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC;
VCLKIndexGEN = (SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)) >> 2) & 0x03;
VCLKIndexGENCRT = VCLKIndexGEN;
} else {
- modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
CRT2Index = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
VCLKIndexGEN = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
@@ -7270,7 +7268,7 @@ SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift)
static void
SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex)
{
- unsigned short temp, temp1, resinfo = 0;
+ unsigned short temp, temp1;
unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
if(!(SiS_Pr->SiS_VBType & VB_SIS30xCLV)) return;
@@ -7282,10 +7280,6 @@ SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
if(!(ROMAddr[0x61] & 0x04)) return;
}
- if(ModeNo > 0x13) {
- resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
- }
-
SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x3a,0x08);
temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x3a);
if(!(temp & 0x01)) {
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index fefde7c6add7..f948baa16d82 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -614,22 +614,6 @@ int xxxfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
}
/**
- * xxxfb_rotate - NOT a required function. If your hardware
- * supports rotation the whole screen then
- * you would provide a hook for this.
- *
- * @info: frame buffer structure that represents a single frame buffer
- * @angle: The angle we rotate the screen.
- *
- * This operation is used to set or alter the properities of the
- * cursor.
- */
-void xxxfb_rotate(struct fb_info *info, int angle)
-{
-/* Will be deprecated */
-}
-
-/**
* xxxfb_sync - NOT a required function. Normally the accel engine
* for a graphics card take a specific amount of time.
* Often we have to wait for the accelerator to finish
@@ -665,7 +649,6 @@ static struct fb_ops xxxfb_ops = {
.fb_copyarea = xxxfb_copyarea, /* Needed !!! */
.fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
- .fb_rotate = xxxfb_rotate,
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
.fb_mmap = xxxfb_mmap,
diff --git a/drivers/video/fbdev/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 08879bdfad35..fb37f6e05391 100644
--- a/drivers/video/fbdev/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
@@ -1,9 +1,10 @@
-/* sunxvr1000.c: Sun XVR-1000 driver for sparc64 systems
+/* sunxvr1000.c: Sun XVR-1000 fb driver for sparc64 systems
+ *
+ * License: GPL
*
* Copyright (C) 2010 David S. Miller (davem@davemloft.net)
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/init.h>
@@ -173,36 +174,19 @@ err_out:
return err;
}
-static int gfb_remove(struct platform_device *op)
-{
- struct fb_info *info = dev_get_drvdata(&op->dev);
- struct gfb_info *gp = info->par;
-
- unregister_framebuffer(info);
-
- iounmap(gp->fb_base);
-
- of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
-
- framebuffer_release(info);
-
- return 0;
-}
-
static const struct of_device_id gfb_match[] = {
{
.name = "SUNW,gfb",
},
{},
};
-MODULE_DEVICE_TABLE(of, ffb_match);
static struct platform_driver gfb_driver = {
.probe = gfb_probe,
- .remove = gfb_remove,
.driver = {
- .name = "gfb",
- .of_match_table = gfb_match,
+ .name = "gfb",
+ .of_match_table = gfb_match,
+ .suppress_bind_attrs = true,
},
};
@@ -213,16 +197,4 @@ static int __init gfb_init(void)
return platform_driver_register(&gfb_driver);
}
-
-static void __exit gfb_exit(void)
-{
- platform_driver_unregister(&gfb_driver);
-}
-
-module_init(gfb_init);
-module_exit(gfb_exit);
-
-MODULE_DESCRIPTION("framebuffer driver for Sun XVR-1000 graphics");
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
+device_initcall(gfb_init);
diff --git a/drivers/video/fbdev/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 843b6bab0483..1a053292f2eb 100644
--- a/drivers/video/fbdev/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
@@ -1,9 +1,10 @@
-/* s3d.c: Sun 3DLABS XVR-2500 et al. driver for sparc64 systems
+/* sunxvr2500.c: Sun 3DLABS XVR-2500 et al. fb driver for sparc64 systems
+ *
+ * License: GPL
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -219,22 +220,6 @@ err_out:
return err;
}
-static void s3d_pci_unregister(struct pci_dev *pdev)
-{
- struct fb_info *info = pci_get_drvdata(pdev);
- struct s3d_info *sp = info->par;
-
- unregister_framebuffer(info);
-
- iounmap(sp->fb_base);
-
- pci_release_region(pdev, 1);
-
- framebuffer_release(info);
-
- pci_disable_device(pdev);
-}
-
static struct pci_device_id s3d_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002c), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x002d), },
@@ -248,10 +233,12 @@ static struct pci_device_id s3d_pci_table[] = {
};
static struct pci_driver s3d_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = "s3d",
.id_table = s3d_pci_table,
.probe = s3d_pci_register,
- .remove = s3d_pci_unregister,
};
static int __init s3d_init(void)
@@ -261,16 +248,4 @@ static int __init s3d_init(void)
return pci_register_driver(&s3d_driver);
}
-
-static void __exit s3d_exit(void)
-{
- pci_unregister_driver(&s3d_driver);
-}
-
-module_init(s3d_init);
-module_exit(s3d_exit);
-
-MODULE_DESCRIPTION("framebuffer driver for Sun XVR-2500 graphics");
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
+device_initcall(s3d_init);
diff --git a/drivers/video/fbdev/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 387350d004df..dc0d886e4e7e 100644
--- a/drivers/video/fbdev/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
@@ -1,9 +1,10 @@
-/* sunxvr500.c: Sun 3DLABS XVR-500 Expert3D driver for sparc64 systems
+/* sunxvr500.c: Sun 3DLABS XVR-500 Expert3D fb driver for sparc64 systems
+ *
+ * License: GPL
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/pci.h>
@@ -392,25 +393,6 @@ err_out:
return err;
}
-static void e3d_pci_unregister(struct pci_dev *pdev)
-{
- struct fb_info *info = pci_get_drvdata(pdev);
- struct e3d_info *ep = info->par;
-
- unregister_framebuffer(info);
-
- iounmap(ep->ramdac);
- iounmap(ep->fb_base);
-
- pci_release_region(pdev, 0);
- pci_release_region(pdev, 1);
-
- fb_dealloc_cmap(&info->cmap);
- framebuffer_release(info);
-
- pci_disable_device(pdev);
-}
-
static struct pci_device_id e3d_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), },
{ PCI_DEVICE(0x1091, 0x7a0), },
@@ -434,10 +416,12 @@ static struct pci_device_id e3d_pci_table[] = {
};
static struct pci_driver e3d_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = "e3d",
.id_table = e3d_pci_table,
.probe = e3d_pci_register,
- .remove = e3d_pci_unregister,
};
static int __init e3d_init(void)
@@ -447,16 +431,4 @@ static int __init e3d_init(void)
return pci_register_driver(&e3d_driver);
}
-
-static void __exit e3d_exit(void)
-{
- pci_unregister_driver(&e3d_driver);
-}
-
-module_init(e3d_init);
-module_exit(e3d_exit);
-
-MODULE_DESCRIPTION("framebuffer driver for Sun XVR-500 graphics");
-MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
+device_initcall(e3d_init);
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 32c8fc5f7a5c..60bdad3a689b 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -244,9 +244,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
down_read(&current->mm->mmap_sem);
- num_pinned = get_user_pages(current, current->mm,
- param.local_vaddr - lb_offset, num_pages,
- (param.source == -1) ? READ : WRITE,
+ num_pinned = get_user_pages(param.local_vaddr - lb_offset,
+ num_pages, (param.source == -1) ? READ : WRITE,
0, pages, NULL);
up_read(&current->mm->mmap_sem);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index cab9f3f63a38..77590320d44c 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -60,7 +60,7 @@ config VIRTIO_INPUT
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
select VIRTIO
---help---
This drivers provides support for memory mapped virtio
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 0c3691f46575..7b6d74f0c72f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -22,14 +22,14 @@
#include <linux/virtio.h>
#include <linux/virtio_balloon.h>
#include <linux/swap.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
+#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
#include <linux/oom.h>
#include <linux/wait.h>
+#include <linux/mm.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -49,11 +49,13 @@ struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
- /* Where the ballooning thread waits for config to change. */
- wait_queue_head_t config_change;
+ /* The balloon servicing is delegated to a freezable workqueue. */
+ struct work_struct update_balloon_stats_work;
+ struct work_struct update_balloon_size_work;
- /* The thread servicing the balloon. */
- struct task_struct *thread;
+ /* Prevent updating balloon when it is being canceled. */
+ spinlock_t stop_update_lock;
+ bool stop_update;
/* Waiting for host to ack the pages we released. */
wait_queue_head_t acked;
@@ -76,7 +78,6 @@ struct virtio_balloon {
u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
/* Memory statistics */
- int need_stats_update;
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
/* To register callback in oom notifier call chain */
@@ -123,6 +124,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
/* When host has read buffer, this completes via balloon_ack */
wait_event(vb->acked, virtqueue_get_buf(vq, &len));
+
}
static void set_page_pfns(u32 pfns[], struct page *page)
@@ -135,9 +137,10 @@ static void set_page_pfns(u32 pfns[], struct page *page)
pfns[i] = page_to_balloon_pfn(page) + i;
}
-static void fill_balloon(struct virtio_balloon *vb, size_t num)
+static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
{
struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
+ unsigned num_allocated_pages;
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -162,10 +165,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
adjust_managed_page_count(page, -1);
}
+ num_allocated_pages = vb->num_pfns;
/* Did we get any? */
if (vb->num_pfns != 0)
tell_host(vb, vb->inflate_vq);
mutex_unlock(&vb->balloon_lock);
+
+ return num_allocated_pages;
}
static void release_pages_balloon(struct virtio_balloon *vb)
@@ -229,10 +235,13 @@ static void update_balloon_stats(struct virtio_balloon *vb)
unsigned long events[NR_VM_EVENT_ITEMS];
struct sysinfo i;
int idx = 0;
+ long available;
all_vm_events(events);
si_meminfo(&i);
+ available = si_mem_available();
+
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
@@ -243,6 +252,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
pages_to_bytes(i.freeram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
pages_to_bytes(i.totalram));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
+ pages_to_bytes(available));
}
/*
@@ -251,14 +262,17 @@ static void update_balloon_stats(struct virtio_balloon *vb)
* with a single buffer. From that point forward, all conversations consist of
* a hypervisor request (a call to this function) which directs us to refill
* the virtqueue with a fresh stats buffer. Since stats collection can sleep,
- * we notify our kthread which does the actual work via stats_handle_request().
+ * we delegate the job to a freezable workqueue that will do the actual work via
+ * stats_handle_request().
*/
static void stats_request(struct virtqueue *vq)
{
struct virtio_balloon *vb = vq->vdev->priv;
- vb->need_stats_update = 1;
- wake_up(&vb->config_change);
+ spin_lock(&vb->stop_update_lock);
+ if (!vb->stop_update)
+ queue_work(system_freezable_wq, &vb->update_balloon_stats_work);
+ spin_unlock(&vb->stop_update_lock);
}
static void stats_handle_request(struct virtio_balloon *vb)
@@ -267,7 +281,6 @@ static void stats_handle_request(struct virtio_balloon *vb)
struct scatterlist sg;
unsigned int len;
- vb->need_stats_update = 0;
update_balloon_stats(vb);
vq = vb->stats_vq;
@@ -281,8 +294,12 @@ static void stats_handle_request(struct virtio_balloon *vb)
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ unsigned long flags;
- wake_up(&vb->config_change);
+ spin_lock_irqsave(&vb->stop_update_lock, flags);
+ if (!vb->stop_update)
+ queue_work(system_freezable_wq, &vb->update_balloon_size_work);
+ spin_unlock_irqrestore(&vb->stop_update_lock, flags);
}
static inline s64 towards_target(struct virtio_balloon *vb)
@@ -345,43 +362,32 @@ static int virtballoon_oom_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static int balloon(void *_vballoon)
+static void update_balloon_stats_func(struct work_struct *work)
{
- struct virtio_balloon *vb = _vballoon;
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
-
- set_freezable();
- while (!kthread_should_stop()) {
- s64 diff;
-
- try_to_freeze();
-
- add_wait_queue(&vb->config_change, &wait);
- for (;;) {
- if ((diff = towards_target(vb)) != 0 ||
- vb->need_stats_update ||
- kthread_should_stop() ||
- freezing(current))
- break;
- wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
- }
- remove_wait_queue(&vb->config_change, &wait);
+ struct virtio_balloon *vb;
- if (vb->need_stats_update)
- stats_handle_request(vb);
- if (diff > 0)
- fill_balloon(vb, diff);
- else if (diff < 0)
- leak_balloon(vb, -diff);
- update_balloon_size(vb);
+ vb = container_of(work, struct virtio_balloon,
+ update_balloon_stats_work);
+ stats_handle_request(vb);
+}
- /*
- * For large balloon changes, we could spend a lot of time
- * and always have work to do. Be nice if preempt disabled.
- */
- cond_resched();
- }
- return 0;
+static void update_balloon_size_func(struct work_struct *work)
+{
+ struct virtio_balloon *vb;
+ s64 diff;
+
+ vb = container_of(work, struct virtio_balloon,
+ update_balloon_size_work);
+ diff = towards_target(vb);
+
+ if (diff > 0)
+ diff -= fill_balloon(vb, diff);
+ else if (diff < 0)
+ diff += leak_balloon(vb, -diff);
+ update_balloon_size(vb);
+
+ if (diff)
+ queue_work(system_freezable_wq, work);
}
static int init_vqs(struct virtio_balloon *vb)
@@ -499,12 +505,14 @@ static int virtballoon_probe(struct virtio_device *vdev)
goto out;
}
+ INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func);
+ INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func);
+ spin_lock_init(&vb->stop_update_lock);
+ vb->stop_update = false;
vb->num_pages = 0;
mutex_init(&vb->balloon_lock);
- init_waitqueue_head(&vb->config_change);
init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
- vb->need_stats_update = 0;
balloon_devinfo_init(&vb->vb_dev_info);
#ifdef CONFIG_BALLOON_COMPACTION
@@ -523,16 +531,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- vb->thread = kthread_run(balloon, vb, "vballoon");
- if (IS_ERR(vb->thread)) {
- err = PTR_ERR(vb->thread);
- goto out_del_vqs;
- }
-
return 0;
-out_del_vqs:
- unregister_oom_notifier(&vb->nb);
out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
@@ -559,7 +559,13 @@ static void virtballoon_remove(struct virtio_device *vdev)
struct virtio_balloon *vb = vdev->priv;
unregister_oom_notifier(&vb->nb);
- kthread_stop(vb->thread);
+
+ spin_lock_irq(&vb->stop_update_lock);
+ vb->stop_update = true;
+ spin_unlock_irq(&vb->stop_update_lock);
+ cancel_work_sync(&vb->update_balloon_size_work);
+ cancel_work_sync(&vb->update_balloon_stats_work);
+
remove_common(vb);
kfree(vb);
}
@@ -570,10 +576,9 @@ static int virtballoon_freeze(struct virtio_device *vdev)
struct virtio_balloon *vb = vdev->priv;
/*
- * The kthread is already frozen by the PM core before this
+ * The workqueue is already frozen by the PM core before this
* function is called.
*/
-
remove_common(vb);
return 0;
}
@@ -589,7 +594,8 @@ static int virtballoon_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
- fill_balloon(vb, towards_target(vb));
+ if (towards_target(vb))
+ virtballoon_changed(vdev);
update_balloon_size(vb);
return 0;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 745c6ee1bb3e..48bfea91dbca 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -99,12 +99,6 @@ struct virtio_mmio_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the number of entries in the queue */
- unsigned int num;
-
- /* the virtual address of the ring queue */
- void *queue;
-
/* the list node for the virtqueues list */
struct list_head node;
};
@@ -322,15 +316,13 @@ static void vm_del_vq(struct virtqueue *vq)
{
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
- unsigned long flags, size;
+ unsigned long flags;
unsigned int index = vq->index;
spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
spin_unlock_irqrestore(&vm_dev->lock, flags);
- vring_del_virtqueue(vq);
-
/* Select and deactivate the queue */
writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
if (vm_dev->version == 1) {
@@ -340,8 +332,8 @@ static void vm_del_vq(struct virtqueue *vq)
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
- size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
- free_pages_exact(info->queue, size);
+ vring_del_virtqueue(vq);
+
kfree(info);
}
@@ -356,8 +348,6 @@ static void vm_del_vqs(struct virtio_device *vdev)
free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
}
-
-
static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
void (*callback)(struct virtqueue *vq),
const char *name)
@@ -365,7 +355,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
struct virtio_mmio_vq_info *info;
struct virtqueue *vq;
- unsigned long flags, size;
+ unsigned long flags;
+ unsigned int num;
int err;
if (!name)
@@ -388,66 +379,40 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
goto error_kmalloc;
}
- /* Allocate pages for the queue - start with a queue as big as
- * possible (limited by maximum size allowed by device), drop down
- * to a minimal size, just big enough to fit descriptor table
- * and two rings (which makes it "alignment_size * 2")
- */
- info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
-
- /* If the device reports a 0 entry queue, we won't be able to
- * use it to perform I/O, and vring_new_virtqueue() can't create
- * empty queues anyway, so don't bother to set up the device.
- */
- if (info->num == 0) {
+ num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
+ if (num == 0) {
err = -ENOENT;
- goto error_alloc_pages;
- }
-
- while (1) {
- size = PAGE_ALIGN(vring_size(info->num,
- VIRTIO_MMIO_VRING_ALIGN));
- /* Did the last iter shrink the queue below minimum size? */
- if (size < VIRTIO_MMIO_VRING_ALIGN * 2) {
- err = -ENOMEM;
- goto error_alloc_pages;
- }
-
- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
- if (info->queue)
- break;
-
- info->num /= 2;
+ goto error_new_virtqueue;
}
/* Create the vring */
- vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
- true, info->queue, vm_notify, callback, name);
+ vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ true, true, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
}
/* Activate the queue */
- writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
+ writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
- writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
+ writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
} else {
u64 addr;
- addr = virt_to_phys(info->queue);
+ addr = virtqueue_get_desc_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
- addr = virt_to_phys(virtqueue_get_avail(vq));
+ addr = virtqueue_get_avail_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
- addr = virt_to_phys(virtqueue_get_used(vq));
+ addr = virtqueue_get_used_addr(vq);
writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
writel((u32)(addr >> 32),
vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
@@ -471,8 +436,6 @@ error_new_virtqueue:
writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
}
- free_pages_exact(info->queue, size);
-error_alloc_pages:
kfree(info);
error_kmalloc:
error_available:
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index f6bed86c17f9..d9a905827967 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -467,7 +467,7 @@ static const struct dev_pm_ops virtio_pci_pm_ops = {
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
static const struct pci_device_id virtio_pci_id_table[] = {
- { PCI_DEVICE(0x1af4, PCI_ANY_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
{ 0 }
};
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 2cc252270b2d..28263200ed42 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -35,12 +35,6 @@ struct virtio_pci_vq_info {
/* the actual virtqueue */
struct virtqueue *vq;
- /* the number of entries in the queue */
- int num;
-
- /* the virtual address of the ring queue */
- void *queue;
-
/* the list node for the virtqueues list */
struct list_head node;
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 48bc9797e530..8c4e61783441 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -119,7 +119,6 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 msix_vec)
{
struct virtqueue *vq;
- unsigned long size;
u16 num;
int err;
@@ -131,27 +130,19 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
return ERR_PTR(-ENOENT);
- info->num = num;
info->msix_vector = msix_vec;
- size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
- info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
- if (info->queue == NULL)
+ /* create the vring */
+ vq = vring_create_virtqueue(index, num,
+ VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
+ true, false, vp_notify, callback, name);
+ if (!vq)
return ERR_PTR(-ENOMEM);
/* activate the queue */
- iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+ iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- /* create the vring */
- vq = vring_new_virtqueue(index, info->num,
- VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
- true, info->queue, vp_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto out_activate_queue;
- }
-
vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
@@ -159,17 +150,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
- goto out_assign;
+ goto out_deactivate;
}
}
return vq;
-out_assign:
- vring_del_virtqueue(vq);
-out_activate_queue:
+out_deactivate:
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- free_pages_exact(info->queue, size);
+ vring_del_virtqueue(vq);
return ERR_PTR(err);
}
@@ -177,7 +166,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
{
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- unsigned long size;
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
@@ -188,13 +176,10 @@ static void del_vq(struct virtio_pci_vq_info *info)
ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
}
- vring_del_virtqueue(vq);
-
/* Select and deactivate the queue */
iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
- size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
- free_pages_exact(info->queue, size);
+ vring_del_virtqueue(vq);
}
static const struct virtio_config_ops virtio_pci_config_ops = {
@@ -227,6 +212,13 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
return -ENODEV;
}
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
if (rc)
return rc;
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 7760fc1a2218..e76bd91a29da 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -17,6 +17,7 @@
*
*/
+#include <linux/delay.h>
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
vp_iowrite8(0, &vp_dev->common->device_status);
- /* Flush out the status write, and flush in device writes,
- * including MSI-X interrupts, if any. */
- vp_ioread8(&vp_dev->common->device_status);
+ /* After writing 0 to device_status, the driver MUST wait for a read of
+ * device_status to return 0 before reinitializing the device.
+ * This will flush out the status write, and flush in device writes,
+ * including MSI-X interrupts, if any.
+ */
+ while (vp_ioread8(&vp_dev->common->device_status))
+ msleep(1);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -287,31 +292,6 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
return vp_ioread16(&vp_dev->common->msix_config);
}
-static size_t vring_pci_size(u16 num)
-{
- /* We only need a cacheline separation. */
- return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES));
-}
-
-static void *alloc_virtqueue_pages(int *num)
-{
- void *pages;
-
- /* TODO: allocate each queue chunk individually */
- for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) {
- pages = alloc_pages_exact(vring_pci_size(*num),
- GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
- if (pages)
- return pages;
- }
-
- if (!*num)
- return NULL;
-
- /* Try to get a single page. You are my only hope! */
- return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO);
-}
-
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
struct virtio_pci_vq_info *info,
unsigned index,
@@ -343,29 +323,22 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
/* get offset of notification word for this vq */
off = vp_ioread16(&cfg->queue_notify_off);
- info->num = num;
info->msix_vector = msix_vec;
- info->queue = alloc_virtqueue_pages(&info->num);
- if (info->queue == NULL)
- return ERR_PTR(-ENOMEM);
-
/* create the vring */
- vq = vring_new_virtqueue(index, info->num,
- SMP_CACHE_BYTES, &vp_dev->vdev,
- true, info->queue, vp_notify, callback, name);
- if (!vq) {
- err = -ENOMEM;
- goto err_new_queue;
- }
+ vq = vring_create_virtqueue(index, num,
+ SMP_CACHE_BYTES, &vp_dev->vdev,
+ true, true, vp_notify, callback, name);
+ if (!vq)
+ return ERR_PTR(-ENOMEM);
/* activate the queue */
- vp_iowrite16(num, &cfg->queue_size);
- vp_iowrite64_twopart(virt_to_phys(info->queue),
+ vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
+ vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
&cfg->queue_desc_lo, &cfg->queue_desc_hi);
- vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
+ vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
&cfg->queue_avail_lo, &cfg->queue_avail_hi);
- vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
+ vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
&cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) {
@@ -410,8 +383,6 @@ err_assign_vector:
pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
err_map_notify:
vring_del_virtqueue(vq);
-err_new_queue:
- free_pages_exact(info->queue, vring_pci_size(info->num));
return ERR_PTR(err);
}
@@ -456,8 +427,6 @@ static void del_vq(struct virtio_pci_vq_info *info)
pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
vring_del_virtqueue(vq);
-
- free_pages_exact(info->queue, vring_pci_size(info->num));
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
@@ -641,6 +610,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
return -EINVAL;
}
+ err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (err)
+ err = dma_set_mask_and_coherent(&pci_dev->dev,
+ DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
/* Device capability is only mandatory for devices that have
* device-specific configuration.
*/
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index e12e385f7ac3..5c802d47892c 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <xen/xen.h>
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
@@ -54,6 +56,11 @@
#define END_USE(vq)
#endif
+struct vring_desc_state {
+ void *data; /* Data for callback. */
+ struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
+};
+
struct vring_virtqueue {
struct virtqueue vq;
@@ -89,6 +96,11 @@ struct vring_virtqueue {
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
+ /* DMA, allocation, and size information */
+ bool we_own_ring;
+ size_t queue_size_in_bytes;
+ dma_addr_t queue_dma_addr;
+
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
@@ -98,12 +110,120 @@ struct vring_virtqueue {
ktime_t last_add_time;
#endif
- /* Tokens for callbacks. */
- void *data[];
+ /* Per-descriptor state. */
+ struct vring_desc_state desc_state[];
};
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+/*
+ * The interaction between virtio and a possible IOMMU is a mess.
+ *
+ * On most systems with virtio, physical addresses match bus addresses,
+ * and it doesn't particularly matter whether we use the DMA API.
+ *
+ * On some systems, including Xen and any system with a physical device
+ * that speaks virtio behind a physical IOMMU, we must use the DMA API
+ * for virtio DMA to work at all.
+ *
+ * On other systems, including SPARC and PPC64, virtio-pci devices are
+ * enumerated as though they are behind an IOMMU, but the virtio host
+ * ignores the IOMMU, so we must either pretend that the IOMMU isn't
+ * there or somehow map everything as the identity.
+ *
+ * For the time being, we preserve historic behavior and bypass the DMA
+ * API.
+ */
+
+static bool vring_use_dma_api(struct virtio_device *vdev)
+{
+ /*
+ * In theory, it's possible to have a buggy QEMU-supposed
+ * emulated Q35 IOMMU and Xen enabled at the same time. On
+ * such a configuration, virtio has never worked and will
+ * not work without an even larger kludge. Instead, enable
+ * the DMA API if we're a Xen guest, which at least allows
+ * all of the sensible Xen configurations to work correctly.
+ */
+ if (xen_domain())
+ return true;
+
+ return false;
+}
+
+/*
+ * The DMA ops on various arches are rather gnarly right now, and
+ * making all of the arch DMA ops work on the vring device itself
+ * is a mess. For now, we use the parent device for DMA ops.
+ */
+struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+{
+ return vq->vq.vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
+ struct scatterlist *sg,
+ enum dma_data_direction direction)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return (dma_addr_t)sg_phys(sg);
+
+ /*
+ * We can't use dma_map_sg, because we don't use scatterlists in
+ * the way it expects (we don't guarantee that the scatterlist
+ * will exist for the lifetime of the mapping).
+ */
+ return dma_map_page(vring_dma_dev(vq),
+ sg_page(sg), sg->offset, sg->length,
+ direction);
+}
+
+static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
+ void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return (dma_addr_t)virt_to_phys(cpu_addr);
+
+ return dma_map_single(vring_dma_dev(vq),
+ cpu_addr, size, direction);
+}
+
+static void vring_unmap_one(const struct vring_virtqueue *vq,
+ struct vring_desc *desc)
+{
+ u16 flags;
+
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return;
+
+ flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ virtio64_to_cpu(vq->vq.vdev, desc->addr),
+ virtio32_to_cpu(vq->vq.vdev, desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ if (!vring_use_dma_api(vq->vq.vdev))
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
unsigned int total_sg, gfp_t gfp)
{
@@ -137,7 +257,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
struct vring_desc *desc;
- unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+ unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
int head;
bool indirect;
@@ -177,21 +297,15 @@ static inline int virtqueue_add(struct virtqueue *_vq,
if (desc) {
/* Use a single buffer which doesn't continue */
- vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
- vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
- /* avoid kmemleak false positive (hidden by virt_to_phys) */
- kmemleak_ignore(desc);
- vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
-
+ indirect = true;
/* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
- indirect = true;
} else {
+ indirect = false;
desc = vq->vring.desc;
i = head;
descs_used = total_sg;
- indirect = false;
}
if (vq->vq.num_free < descs_used) {
@@ -206,13 +320,14 @@ static inline int virtqueue_add(struct virtqueue *_vq,
return -ENOSPC;
}
- /* We're about to use some buffers from the free list. */
- vq->vq.num_free -= descs_used;
-
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -220,8 +335,12 @@ static inline int virtqueue_add(struct virtqueue *_vq,
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
- desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
+ desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
prev = i;
i = virtio16_to_cpu(_vq->vdev, desc[i].next);
@@ -230,14 +349,33 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
+ if (indirect) {
+ /* Now that the indirect table is filled in, map it. */
+ dma_addr_t addr = vring_map_single(
+ vq, desc, total_sg * sizeof(struct vring_desc),
+ DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
+ vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
+ vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
+
+ vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
+ }
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= descs_used;
+
/* Update free pointer */
if (indirect)
vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
else
vq->free_head = i;
- /* Set token. */
- vq->data[head] = data;
+ /* Store token and indirect buffer state. */
+ vq->desc_state[head].data = data;
+ if (indirect)
+ vq->desc_state[head].indir_desc = desc;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
@@ -260,6 +398,24 @@ static inline int virtqueue_add(struct virtqueue *_vq,
virtqueue_kick(_vq);
return 0;
+
+unmap_release:
+ err_idx = i;
+ i = head;
+
+ for (n = 0; n < total_sg; n++) {
+ if (i == err_idx)
+ break;
+ vring_unmap_one(vq, &desc[i]);
+ i = vq->vring.desc[i].next;
+ }
+
+ vq->vq.num_free += total_sg;
+
+ if (indirect)
+ kfree(desc);
+
+ return -EIO;
}
/**
@@ -430,27 +586,43 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
- unsigned int i;
+ unsigned int i, j;
+ u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
/* Clear data ptr. */
- vq->data[head] = NULL;
+ vq->desc_state[head].data = NULL;
- /* Put back on free list: find end */
+ /* Put back on free list: unmap first-level descriptors and find end */
i = head;
- /* Free the indirect table */
- if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
- kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
-
- while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
+ while (vq->vring.desc[i].flags & nextflag) {
+ vring_unmap_one(vq, &vq->vring.desc[i]);
i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
vq->vq.num_free++;
}
+ vring_unmap_one(vq, &vq->vring.desc[i]);
vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
vq->free_head = head;
+
/* Plus final descriptor */
vq->vq.num_free++;
+
+ /* Free the indirect table, if any, now that it's unmapped. */
+ if (vq->desc_state[head].indir_desc) {
+ struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
+ u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
+
+ BUG_ON(!(vq->vring.desc[head].flags &
+ cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+ BUG_ON(len == 0 || len % sizeof(struct vring_desc));
+
+ for (j = 0; j < len / sizeof(struct vring_desc); j++)
+ vring_unmap_one(vq, &indir_desc[j]);
+
+ kfree(vq->desc_state[head].indir_desc);
+ vq->desc_state[head].indir_desc = NULL;
+ }
}
static inline bool more_used(const struct vring_virtqueue *vq)
@@ -505,13 +677,13 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
BAD_RING(vq, "id %u out of range\n", i);
return NULL;
}
- if (unlikely(!vq->data[i])) {
+ if (unlikely(!vq->desc_state[i].data)) {
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
/* detach_buf clears data, so grab it now. */
- ret = vq->data[i];
+ ret = vq->desc_state[i].data;
detach_buf(vq, i);
vq->last_used_idx++;
/* If we expect an interrupt for the next entry, tell host
@@ -685,10 +857,10 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
START_USE(vq);
for (i = 0; i < vq->vring.num; i++) {
- if (!vq->data[i])
+ if (!vq->desc_state[i].data)
continue;
/* detach_buf clears data, so grab it now. */
- buf = vq->data[i];
+ buf = vq->desc_state[i].data;
detach_buf(vq, i);
vq->avail_idx_shadow--;
vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
@@ -723,35 +895,31 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
-struct virtqueue *vring_new_virtqueue(unsigned int index,
- unsigned int num,
- unsigned int vring_align,
- struct virtio_device *vdev,
- bool weak_barriers,
- void *pages,
- bool (*notify)(struct virtqueue *),
- void (*callback)(struct virtqueue *),
- const char *name)
+struct virtqueue *__vring_new_virtqueue(unsigned int index,
+ struct vring vring,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
{
- struct vring_virtqueue *vq;
unsigned int i;
+ struct vring_virtqueue *vq;
- /* We assume num is a power of 2. */
- if (num & (num - 1)) {
- dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
- }
-
- vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
+ vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
+ GFP_KERNEL);
if (!vq)
return NULL;
- vring_init(&vq->vring, num, pages, vring_align);
+ vq->vring = vring;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
- vq->vq.num_free = num;
+ vq->vq.num_free = vring.num;
vq->vq.index = index;
+ vq->we_own_ring = false;
+ vq->queue_dma_addr = 0;
+ vq->queue_size_in_bytes = 0;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
@@ -776,20 +944,145 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
/* Put everything in free lists. */
vq->free_head = 0;
- for (i = 0; i < num-1; i++) {
+ for (i = 0; i < vring.num-1; i++)
vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
- vq->data[i] = NULL;
- }
- vq->data[i] = NULL;
+ memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
return &vq->vq;
}
+EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
+
+static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ if (vring_use_dma_api(vdev)) {
+ return dma_alloc_coherent(vdev->dev.parent, size,
+ dma_handle, flag);
+ } else {
+ void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
+ if (queue) {
+ phys_addr_t phys_addr = virt_to_phys(queue);
+ *dma_handle = (dma_addr_t)phys_addr;
+
+ /*
+ * Sanity check: make sure we dind't truncate
+ * the address. The only arches I can find that
+ * have 64-bit phys_addr_t but 32-bit dma_addr_t
+ * are certain non-highmem MIPS and x86
+ * configurations, but these configurations
+ * should never allocate physical pages above 32
+ * bits, so this is fine. Just in case, throw a
+ * warning and abort if we end up with an
+ * unrepresentable address.
+ */
+ if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ free_pages_exact(queue, PAGE_ALIGN(size));
+ return NULL;
+ }
+ }
+ return queue;
+ }
+}
+
+static void vring_free_queue(struct virtio_device *vdev, size_t size,
+ void *queue, dma_addr_t dma_handle)
+{
+ if (vring_use_dma_api(vdev)) {
+ dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
+ } else {
+ free_pages_exact(queue, PAGE_ALIGN(size));
+ }
+}
+
+struct virtqueue *vring_create_virtqueue(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct virtqueue *vq;
+ void *queue;
+ dma_addr_t dma_addr;
+ size_t queue_size_in_bytes;
+ struct vring vring;
+
+ /* We assume num is a power of 2. */
+ if (num & (num - 1)) {
+ dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
+ return NULL;
+ }
+
+ /* TODO: allocate each queue chunk individually */
+ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
+ queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+ &dma_addr,
+ GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ if (queue)
+ break;
+ }
+
+ if (!num)
+ return NULL;
+
+ if (!queue) {
+ /* Try to get a single page. You are my only hope! */
+ queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+ &dma_addr, GFP_KERNEL|__GFP_ZERO);
+ }
+ if (!queue)
+ return NULL;
+
+ queue_size_in_bytes = vring_size(num, vring_align);
+ vring_init(&vring, num, queue, vring_align);
+
+ vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
+ notify, callback, name);
+ if (!vq) {
+ vring_free_queue(vdev, queue_size_in_bytes, queue,
+ dma_addr);
+ return NULL;
+ }
+
+ to_vvq(vq)->queue_dma_addr = dma_addr;
+ to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
+ to_vvq(vq)->we_own_ring = true;
+
+ return vq;
+}
+EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+
+struct virtqueue *vring_new_virtqueue(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ void *pages,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name)
+{
+ struct vring vring;
+ vring_init(&vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
+ notify, callback, name);
+}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
-void vring_del_virtqueue(struct virtqueue *vq)
+void vring_del_virtqueue(struct virtqueue *_vq)
{
- list_del(&vq->list);
- kfree(to_vvq(vq));
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->we_own_ring) {
+ vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
+ vq->vring.desc, vq->queue_dma_addr);
+ }
+ list_del(&_vq->list);
+ kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -853,20 +1146,42 @@ void virtio_break_device(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_break_device);
-void *virtqueue_get_avail(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->vring.avail;
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->queue_dma_addr;
}
-EXPORT_SYMBOL_GPL(virtqueue_get_avail);
+EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
-void *virtqueue_get_used(struct virtqueue *_vq)
+dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->vring.used;
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->queue_dma_addr +
+ ((char *)vq->vring.avail - (char *)vq->vring.desc);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
+
+dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ BUG_ON(!vq->we_own_ring);
+
+ return vq->queue_dma_addr +
+ ((char *)vq->vring.used - (char *)vq->vring.desc);
+}
+EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
+
+const struct vring *virtqueue_get_vring(struct virtqueue *vq)
+{
+ return &to_vvq(vq)->vring;
}
-EXPORT_SYMBOL_GPL(virtqueue_get_used);
+EXPORT_SYMBOL_GPL(virtqueue_get_vring);
MODULE_LICENSE("GPL");
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index b79a74a98a23..5fbeab38889e 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -202,7 +202,7 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
bridge = ca91cx42_bridge->driver_priv;
/* Need pdev */
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+ pdev = to_pci_dev(ca91cx42_bridge->parent);
INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers);
@@ -293,8 +293,7 @@ static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
iowrite32(tmp, bridge->base + LINT_EN);
if ((state == 0) && (sync != 0)) {
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
- dev);
+ pdev = to_pci_dev(ca91cx42_bridge->parent);
synchronize_irq(pdev->irq);
}
@@ -518,7 +517,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
- pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
+ pdev = to_pci_dev(ca91cx42_bridge->parent);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
@@ -1519,7 +1518,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
struct pci_dev *pdev;
/* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
+ pdev = to_pci_dev(parent);
return pci_alloc_consistent(pdev, size, dma);
}
@@ -1530,7 +1529,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size,
struct pci_dev *pdev;
/* Find pci_dev container of dev */
- pdev = container_of(parent, struct pci_dev, dev);
+ pdev = to_pci_dev(parent);
pci_free_consistent(pdev, size, vaddr, dma);
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 0e2f43bccf1f..a2eec97d5064 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -618,7 +618,6 @@ static u8 omap_w1_read_byte(void *_hdq)
hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
- hdq_data->hdq_usecount = 0;
/* Write followed by a read, release the module */
if (hdq_data->init_trans) {
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c9a7ff67d395..89a784751738 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -1147,7 +1147,6 @@ int w1_process(void *data)
jremain = 1;
}
- try_to_freeze();
__set_current_state(TASK_INTERRUPTIBLE);
/* hold list_mutex until after interruptible to prevent loosing
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 80825a7e8e48..fb947655badd 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -202,6 +202,26 @@ config ARM_SP805_WATCHDOG
ARM Primecell SP805 Watchdog timer. This will reboot your system when
the timeout is reached.
+config ARM_SBSA_WATCHDOG
+ tristate "ARM SBSA Generic Watchdog"
+ depends on ARM64
+ depends on ARM_ARCH_TIMER
+ select WATCHDOG_CORE
+ help
+ ARM SBSA Generic Watchdog has two stage timeouts:
+ the first signal (WS0) is for alerting the system by interrupt,
+ the second one (WS1) is a real hardware reset.
+ More details: ARM DEN0029B - Server Base System Architecture (SBSA)
+
+ This driver can operate ARM SBSA Generic Watchdog as a single stage
+ or a two stages watchdog, it depends on the module parameter "action".
+
+ Note: the maximum timeout in the two stages mode is half of that in
+ the single stage mode.
+
+ To compile this driver as module, choose M here: The module
+ will be called sbsa_gwdt.
+
config ASM9260_WATCHDOG
tristate "Alphascale ASM9260 watchdog"
depends on MACH_ASM9260
@@ -330,6 +350,7 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
depends on HAS_IOMEM
+ select WATCHDOG_CORE
help
Say Y here if to include support for the Synopsys DesignWare
watchdog timer found in many chips.
@@ -399,6 +420,7 @@ config DAVINCI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU
+ depends on ARM
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -468,6 +490,7 @@ config NUC900_WATCHDOG
config TS4800_WATCHDOG
tristate "TS-4800 Watchdog"
depends on HAS_IOMEM && OF
+ depends on SOC_IMX51 || COMPILE_TEST
select WATCHDOG_CORE
select MFD_SYSCON
help
@@ -713,6 +736,15 @@ config ALIM7101_WDT
Most people will say N.
+config EBC_C384_WDT
+ tristate "WinSystems EBC-C384 Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ help
+ Enables watchdog timer support for the watchdog timer on the
+ WinSystems EBC-C384 motherboard. The timeout may be configured via
+ the timeout module parameter.
+
config F71808E_WDT
tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
depends on X86
@@ -1142,6 +1174,7 @@ config W83627HF_WDT
NCT6779
NCT6791
NCT6792
+ NCT6102D/04D/06D
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
@@ -1214,6 +1247,32 @@ config SBC_EPX_C3_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called sbc_epx_c3.
+config INTEL_MEI_WDT
+ tristate "Intel MEI iAMT Watchdog"
+ depends on INTEL_MEI && X86
+ select WATCHDOG_CORE
+ ---help---
+ A device driver for the Intel MEI iAMT watchdog.
+
+ The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
+ Whenever the OS hangs or crashes, iAMT will send an event
+ to any subscriber to this event. The watchdog doesn't reset the
+ the platform.
+
+ To compile this driver as a module, choose M here:
+ the module will be called mei_wdt.
+
+config NI903X_WDT
+ tristate "NI 903x/913x Watchdog"
+ depends on X86 && ACPI
+ select WATCHDOG_CORE
+ ---help---
+ This is the driver for the watchdog timer on the National Instruments
+ 903x/913x real-time controllers.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ni903x_wdt.
+
# M32R Architecture
# M68K Architecture
@@ -1377,10 +1436,12 @@ config BCM7038_WDT
tristate "BCM7038 Watchdog"
select WATCHDOG_CORE
depends on HAS_IOMEM
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
help
- Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
-
- Say 'Y or 'M' here to enable the driver.
+ Watchdog driver for the built-in hardware in Broadcom 7038 and
+ later SoCs used in set-top boxes. BCM7038 was made public
+ during the 2004 CES, and since then, many Broadcom chips use this
+ watchdog block, including some cable modem chips.
config IMGPDC_WDT
tristate "Imagination Technologies PDC Watchdog Timer"
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f6a6a387c6c7..feb6270fdbde 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
# ARM Architecture
obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o
+obj-$(CONFIG_ARM_SBSA_WATCHDOG) += sbsa_gwdt.o
obj-$(CONFIG_ASM9260_WATCHDOG) += asm9260_wdt.o
obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
@@ -88,6 +89,7 @@ obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
+obj-$(CONFIG_EBC_C384_WDT) += ebc-c384_wdt.o
obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
obj-$(CONFIG_GEODE_WDT) += geodewdt.o
@@ -126,6 +128,8 @@ obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
obj-$(CONFIG_INTEL_MID_WATCHDOG) += intel-mid_wdt.o
+obj-$(CONFIG_INTEL_MEI_WDT) += mei_wdt.o
+obj-$(CONFIG_NI903X_WDT) += ni903x_wdt.o
# M32R Architecture
diff --git a/drivers/watchdog/atlas7_wdt.c b/drivers/watchdog/atlas7_wdt.c
index df6d9242a319..ed80734befae 100644
--- a/drivers/watchdog/atlas7_wdt.c
+++ b/drivers/watchdog/atlas7_wdt.c
@@ -154,6 +154,11 @@ static int atlas7_wdt_probe(struct platform_device *pdev)
writel(0, wdt->base + ATLAS7_WDT_CNT_CTRL);
wdt->tick_rate = clk_get_rate(clk);
+ if (!wdt->tick_rate) {
+ ret = -EINVAL;
+ goto err1;
+ }
+
wdt->clk = clk;
atlas7_wdd.min_timeout = 1;
atlas7_wdd.max_timeout = UINT_MAX / wdt->tick_rate;
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index df1c2a4b0165..a1900b9ab6c4 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -87,7 +87,8 @@ static int bcm47xx_wdt_hard_set_timeout(struct watchdog_device *wdd,
return 0;
}
-static int bcm47xx_wdt_restart(struct watchdog_device *wdd)
+static int bcm47xx_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
{
struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index 11e887572649..a100f648880d 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -119,7 +119,8 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
return ret;
}
-static int da9063_wdt_restart(struct watchdog_device *wdd)
+static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action,
+ void *data)
{
struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
int ret;
diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c
index 1ccb0b239348..77df772406b0 100644
--- a/drivers/watchdog/digicolor_wdt.c
+++ b/drivers/watchdog/digicolor_wdt.c
@@ -48,7 +48,8 @@ static void dc_wdt_set(struct dc_wdt *wdt, u32 ticks)
spin_unlock_irqrestore(&wdt->lock, flags);
}
-static int dc_wdt_restart(struct watchdog_device *wdog)
+static int dc_wdt_restart(struct watchdog_device *wdog, unsigned long action,
+ void *data)
{
struct dc_wdt *wdt = watchdog_get_drvdata(wdog);
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 8fefa4ad46d4..2acb51cf5504 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -12,9 +12,8 @@
* and these are a function of the input clock frequency.
*
* The DesignWare watchdog cannot be stopped once it has been started so we
- * use a software timer to implement a ping that will keep the watchdog alive.
- * If we receive an expected close for the watchdog then we keep the timer
- * running, otherwise the timer is stopped and the watchdog will expire.
+ * do not implement a stop function. The watchdog core will continue to send
+ * heartbeat requests after the watchdog device has been closed.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -22,12 +21,9 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
@@ -35,8 +31,6 @@
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
-#include <linux/timer.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
#define WDOG_CONTROL_REG_OFFSET 0x00
@@ -57,53 +51,50 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-#define WDT_TIMEOUT (HZ / 2)
-
-static struct {
+struct dw_wdt {
void __iomem *regs;
struct clk *clk;
- unsigned long in_use;
- unsigned long next_heartbeat;
- struct timer_list timer;
- int expect_close;
struct notifier_block restart_handler;
-} dw_wdt;
+ struct watchdog_device wdd;
+};
+
+#define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd)
-static inline int dw_wdt_is_enabled(void)
+static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt)
{
- return readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET) &
+ return readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET) &
WDOG_CONTROL_REG_WDT_EN_MASK;
}
-static inline int dw_wdt_top_in_seconds(unsigned top)
+static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top)
{
/*
* There are 16 possible timeout values in 0..15 where the number of
* cycles is 2 ^ (16 + i) and the watchdog counts down.
*/
- return (1U << (16 + top)) / clk_get_rate(dw_wdt.clk);
+ return (1U << (16 + top)) / clk_get_rate(dw_wdt->clk);
}
-static int dw_wdt_get_top(void)
+static int dw_wdt_get_top(struct dw_wdt *dw_wdt)
{
- int top = readl(dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
+ int top = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF;
- return dw_wdt_top_in_seconds(top);
+ return dw_wdt_top_in_seconds(dw_wdt, top);
}
-static inline void dw_wdt_set_next_heartbeat(void)
+static int dw_wdt_ping(struct watchdog_device *wdd)
{
- dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ;
-}
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
-static void dw_wdt_keepalive(void)
-{
- writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
+ writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs +
WDOG_COUNTER_RESTART_REG_OFFSET);
+
+ return 0;
}
-static int dw_wdt_set_top(unsigned top_s)
+static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
{
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
int i, top_val = DW_WDT_MAX_TOP;
/*
@@ -111,7 +102,7 @@ static int dw_wdt_set_top(unsigned top_s)
* always look for >=.
*/
for (i = 0; i <= DW_WDT_MAX_TOP; ++i)
- if (dw_wdt_top_in_seconds(i) >= top_s) {
+ if (dw_wdt_top_in_seconds(dw_wdt, i) >= top_s) {
top_val = i;
break;
}
@@ -123,33 +114,43 @@ static int dw_wdt_set_top(unsigned top_s)
* effectively get a pat of the watchdog right here.
*/
writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
- dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
- /*
- * Add an explicit pat to handle versions of the watchdog that
- * don't have TOPINIT. This won't hurt on versions that have
- * it.
- */
- dw_wdt_keepalive();
+ wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val);
- dw_wdt_set_next_heartbeat();
+ return 0;
+}
+
+static int dw_wdt_start(struct watchdog_device *wdd)
+{
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+
+ dw_wdt_set_timeout(wdd, wdd->timeout);
- return dw_wdt_top_in_seconds(top_val);
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+ dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+
+ return 0;
}
static int dw_wdt_restart_handle(struct notifier_block *this,
- unsigned long mode, void *cmd)
+ unsigned long mode, void *cmd)
{
+ struct dw_wdt *dw_wdt;
u32 val;
- writel(0, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
- val = readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt = container_of(this, struct dw_wdt, restart_handler);
+
+ writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+ val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
- writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
- WDOG_COUNTER_RESTART_REG_OFFSET);
+ writel(WDOG_COUNTER_RESTART_KICK_VALUE,
+ dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
else
writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
+ dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
/* wait for reset to assert... */
mdelay(500);
@@ -157,74 +158,12 @@ static int dw_wdt_restart_handle(struct notifier_block *this,
return NOTIFY_DONE;
}
-static void dw_wdt_ping(unsigned long data)
+static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd)
{
- if (time_before(jiffies, dw_wdt.next_heartbeat) ||
- (!nowayout && !dw_wdt.in_use)) {
- dw_wdt_keepalive();
- mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
- } else
- pr_crit("keepalive missed, machine will reset\n");
-}
-
-static int dw_wdt_open(struct inode *inode, struct file *filp)
-{
- if (test_and_set_bit(0, &dw_wdt.in_use))
- return -EBUSY;
-
- /* Make sure we don't get unloaded. */
- __module_get(THIS_MODULE);
-
- if (!dw_wdt_is_enabled()) {
- /*
- * The watchdog is not currently enabled. Set the timeout to
- * something reasonable and then start it.
- */
- dw_wdt_set_top(DW_WDT_DEFAULT_SECONDS);
- writel(WDOG_CONTROL_REG_WDT_EN_MASK,
- dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
- }
-
- dw_wdt_set_next_heartbeat();
-
- return nonseekable_open(inode, filp);
-}
-
-static ssize_t dw_wdt_write(struct file *filp, const char __user *buf,
- size_t len, loff_t *offset)
-{
- if (!len)
- return 0;
-
- if (!nowayout) {
- size_t i;
-
- dw_wdt.expect_close = 0;
-
- for (i = 0; i < len; ++i) {
- char c;
-
- if (get_user(c, buf + i))
- return -EFAULT;
-
- if (c == 'V') {
- dw_wdt.expect_close = 1;
- break;
- }
- }
- }
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
- dw_wdt_set_next_heartbeat();
- dw_wdt_keepalive();
- mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
-
- return len;
-}
-
-static u32 dw_wdt_time_left(void)
-{
- return readl(dw_wdt.regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
- clk_get_rate(dw_wdt.clk);
+ return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) /
+ clk_get_rate(dw_wdt->clk);
}
static const struct watchdog_info dw_wdt_ident = {
@@ -233,78 +172,33 @@ static const struct watchdog_info dw_wdt_ident = {
.identity = "Synopsys DesignWare Watchdog",
};
-static long dw_wdt_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned long val;
- int timeout;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user((void __user *)arg, &dw_wdt_ident,
- sizeof(dw_wdt_ident)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int __user *)arg);
-
- case WDIOC_KEEPALIVE:
- dw_wdt_set_next_heartbeat();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- timeout = dw_wdt_set_top(val);
- return put_user(timeout , (int __user *)arg);
-
- case WDIOC_GETTIMEOUT:
- return put_user(dw_wdt_get_top(), (int __user *)arg);
-
- case WDIOC_GETTIMELEFT:
- /* Get the time left until expiry. */
- if (get_user(val, (int __user *)arg))
- return -EFAULT;
- return put_user(dw_wdt_time_left(), (int __user *)arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static int dw_wdt_release(struct inode *inode, struct file *filp)
-{
- clear_bit(0, &dw_wdt.in_use);
-
- if (!dw_wdt.expect_close) {
- del_timer(&dw_wdt.timer);
-
- if (!nowayout)
- pr_crit("unexpected close, system will reboot soon\n");
- else
- pr_crit("watchdog cannot be disabled, system will reboot soon\n");
- }
-
- dw_wdt.expect_close = 0;
-
- return 0;
-}
+static const struct watchdog_ops dw_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = dw_wdt_start,
+ .ping = dw_wdt_ping,
+ .set_timeout = dw_wdt_set_timeout,
+ .get_timeleft = dw_wdt_get_timeleft,
+};
#ifdef CONFIG_PM_SLEEP
static int dw_wdt_suspend(struct device *dev)
{
- clk_disable_unprepare(dw_wdt.clk);
+ struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dw_wdt->clk);
return 0;
}
static int dw_wdt_resume(struct device *dev)
{
- int err = clk_prepare_enable(dw_wdt.clk);
+ struct dw_wdt *dw_wdt = dev_get_drvdata(dev);
+ int err = clk_prepare_enable(dw_wdt->clk);
if (err)
return err;
- dw_wdt_keepalive();
+ dw_wdt_ping(&dw_wdt->wdd);
return 0;
}
@@ -312,67 +206,82 @@ static int dw_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume);
-static const struct file_operations wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = dw_wdt_open,
- .write = dw_wdt_write,
- .unlocked_ioctl = dw_wdt_ioctl,
- .release = dw_wdt_release
-};
-
-static struct miscdevice dw_wdt_miscdev = {
- .fops = &wdt_fops,
- .name = "watchdog",
- .minor = WATCHDOG_MINOR,
-};
-
static int dw_wdt_drv_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct dw_wdt *dw_wdt;
+ struct resource *mem;
int ret;
- struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dw_wdt.regs = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dw_wdt.regs))
- return PTR_ERR(dw_wdt.regs);
+ dw_wdt = devm_kzalloc(dev, sizeof(*dw_wdt), GFP_KERNEL);
+ if (!dw_wdt)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dw_wdt->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(dw_wdt->regs))
+ return PTR_ERR(dw_wdt->regs);
- dw_wdt.clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dw_wdt.clk))
- return PTR_ERR(dw_wdt.clk);
+ dw_wdt->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(dw_wdt->clk))
+ return PTR_ERR(dw_wdt->clk);
- ret = clk_prepare_enable(dw_wdt.clk);
+ ret = clk_prepare_enable(dw_wdt->clk);
if (ret)
return ret;
- ret = misc_register(&dw_wdt_miscdev);
+ wdd = &dw_wdt->wdd;
+ wdd->info = &dw_wdt_ident;
+ wdd->ops = &dw_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_hw_heartbeat_ms =
+ dw_wdt_top_in_seconds(dw_wdt, DW_WDT_MAX_TOP) * 1000;
+ wdd->parent = dev;
+
+ watchdog_set_drvdata(wdd, dw_wdt);
+ watchdog_set_nowayout(wdd, nowayout);
+ watchdog_init_timeout(wdd, 0, dev);
+
+ /*
+ * If the watchdog is already running, use its already configured
+ * timeout. Otherwise use the default or the value provided through
+ * devicetree.
+ */
+ if (dw_wdt_is_enabled(dw_wdt)) {
+ wdd->timeout = dw_wdt_get_top(dw_wdt);
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ } else {
+ wdd->timeout = DW_WDT_DEFAULT_SECONDS;
+ watchdog_init_timeout(wdd, 0, dev);
+ }
+
+ platform_set_drvdata(pdev, dw_wdt);
+
+ ret = watchdog_register_device(wdd);
if (ret)
goto out_disable_clk;
- dw_wdt.restart_handler.notifier_call = dw_wdt_restart_handle;
- dw_wdt.restart_handler.priority = 128;
- ret = register_restart_handler(&dw_wdt.restart_handler);
+ dw_wdt->restart_handler.notifier_call = dw_wdt_restart_handle;
+ dw_wdt->restart_handler.priority = 128;
+ ret = register_restart_handler(&dw_wdt->restart_handler);
if (ret)
pr_warn("cannot register restart handler\n");
- dw_wdt_set_next_heartbeat();
- setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
- mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
-
return 0;
out_disable_clk:
- clk_disable_unprepare(dw_wdt.clk);
-
+ clk_disable_unprepare(dw_wdt->clk);
return ret;
}
static int dw_wdt_drv_remove(struct platform_device *pdev)
{
- unregister_restart_handler(&dw_wdt.restart_handler);
-
- misc_deregister(&dw_wdt_miscdev);
+ struct dw_wdt *dw_wdt = platform_get_drvdata(pdev);
- clk_disable_unprepare(dw_wdt.clk);
+ unregister_restart_handler(&dw_wdt->restart_handler);
+ watchdog_unregister_device(&dw_wdt->wdd);
+ clk_disable_unprepare(dw_wdt->clk);
return 0;
}
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
new file mode 100644
index 000000000000..77fda0b4b90e
--- /dev/null
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -0,0 +1,188 @@
+/*
+ * Watchdog timer driver for the WinSystems EBC-C384
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define MODULE_NAME "ebc-c384_wdt"
+#define WATCHDOG_TIMEOUT 60
+/*
+ * The timeout value in minutes must fit in a single byte when sent to the
+ * watchdog timer; the maximum timeout possible is 15300 (255 * 60) seconds.
+ */
+#define WATCHDOG_MAX_TIMEOUT 15300
+#define BASE_ADDR 0x564
+#define ADDR_EXTENT 5
+#define CFG_ADDR (BASE_ADDR + 1)
+#define PET_ADDR (BASE_ADDR + 2)
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static unsigned timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static int ebc_c384_wdt_start(struct watchdog_device *wdev)
+{
+ unsigned t = wdev->timeout;
+
+ /* resolution is in minutes for timeouts greater than 255 seconds */
+ if (t > 255)
+ t = DIV_ROUND_UP(t, 60);
+
+ outb(t, PET_ADDR);
+
+ return 0;
+}
+
+static int ebc_c384_wdt_stop(struct watchdog_device *wdev)
+{
+ outb(0x00, PET_ADDR);
+
+ return 0;
+}
+
+static int ebc_c384_wdt_set_timeout(struct watchdog_device *wdev, unsigned t)
+{
+ /* resolution is in minutes for timeouts greater than 255 seconds */
+ if (t > 255) {
+ /* round second resolution up to minute granularity */
+ wdev->timeout = roundup(t, 60);
+
+ /* set watchdog timer for minutes */
+ outb(0x00, CFG_ADDR);
+ } else {
+ wdev->timeout = t;
+
+ /* set watchdog timer for seconds */
+ outb(0x80, CFG_ADDR);
+ }
+
+ return 0;
+}
+
+static const struct watchdog_ops ebc_c384_wdt_ops = {
+ .start = ebc_c384_wdt_start,
+ .stop = ebc_c384_wdt_stop,
+ .set_timeout = ebc_c384_wdt_set_timeout
+};
+
+static const struct watchdog_info ebc_c384_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT,
+ .identity = MODULE_NAME
+};
+
+static int __init ebc_c384_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+
+ if (!devm_request_region(dev, BASE_ADDR, ADDR_EXTENT, dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ BASE_ADDR, BASE_ADDR + ADDR_EXTENT);
+ return -EBUSY;
+ }
+
+ wdd = devm_kzalloc(dev, sizeof(*wdd), GFP_KERNEL);
+ if (!wdd)
+ return -ENOMEM;
+
+ wdd->info = &ebc_c384_wdt_info;
+ wdd->ops = &ebc_c384_wdt_ops;
+ wdd->timeout = WATCHDOG_TIMEOUT;
+ wdd->min_timeout = 1;
+ wdd->max_timeout = WATCHDOG_MAX_TIMEOUT;
+
+ watchdog_set_nowayout(wdd, nowayout);
+
+ if (watchdog_init_timeout(wdd, timeout, dev))
+ dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n",
+ timeout, WATCHDOG_TIMEOUT);
+
+ platform_set_drvdata(pdev, wdd);
+
+ return watchdog_register_device(wdd);
+}
+
+static int ebc_c384_wdt_remove(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(wdd);
+
+ return 0;
+}
+
+static struct platform_driver ebc_c384_wdt_driver = {
+ .driver = {
+ .name = MODULE_NAME
+ },
+ .remove = ebc_c384_wdt_remove
+};
+
+static struct platform_device *ebc_c384_wdt_device;
+
+static int __init ebc_c384_wdt_init(void)
+{
+ int err;
+
+ if (!dmi_match(DMI_BOARD_NAME, "EBC-C384 SBC"))
+ return -ENODEV;
+
+ ebc_c384_wdt_device = platform_device_alloc(MODULE_NAME, -1);
+ if (!ebc_c384_wdt_device)
+ return -ENOMEM;
+
+ err = platform_device_add(ebc_c384_wdt_device);
+ if (err)
+ goto err_platform_device;
+
+ err = platform_driver_probe(&ebc_c384_wdt_driver, ebc_c384_wdt_probe);
+ if (err)
+ goto err_platform_driver;
+
+ return 0;
+
+err_platform_driver:
+ platform_device_del(ebc_c384_wdt_device);
+err_platform_device:
+ platform_device_put(ebc_c384_wdt_device);
+ return err;
+}
+
+static void __exit ebc_c384_wdt_exit(void)
+{
+ platform_device_unregister(ebc_c384_wdt_device);
+ platform_driver_unregister(&ebc_c384_wdt_driver);
+}
+
+module_init(ebc_c384_wdt_init);
+module_exit(ebc_c384_wdt_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("WinSystems EBC-C384 watchdog timer driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" MODULE_NAME);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 92443c319e59..8f89bd8a826a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -37,6 +37,7 @@
#include <asm/cacheflush.h>
#endif /* CONFIG_HPWDT_NMI_DECODING */
#include <asm/nmi.h>
+#include <asm/frame.h>
#define HPWDT_VERSION "1.3.3"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
@@ -353,10 +354,10 @@ static int detect_cru_service(void)
asm(".text \n\t"
".align 4 \n\t"
- ".globl asminline_call \n"
+ ".globl asminline_call \n\t"
+ ".type asminline_call, @function \n\t"
"asminline_call: \n\t"
- "pushq %rbp \n\t"
- "movq %rsp, %rbp \n\t"
+ FRAME_BEGIN
"pushq %rax \n\t"
"pushq %rbx \n\t"
"pushq %rdx \n\t"
@@ -386,7 +387,7 @@ asm(".text \n\t"
"popq %rdx \n\t"
"popq %rbx \n\t"
"popq %rax \n\t"
- "leave \n\t"
+ FRAME_END
"ret \n\t"
".previous");
@@ -483,7 +484,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
static int die_nmi_called;
if (!hpwdt_nmi_decoding)
- goto out;
+ return NMI_DONE;
spin_lock_irqsave(&rom_lock, rom_pl);
if (!die_nmi_called && !is_icru && !is_uefi)
@@ -496,11 +497,11 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
if (!is_icru && !is_uefi) {
if (cmn_regs.u1.ral == 0) {
- panic("An NMI occurred, "
- "but unable to determine source.\n");
+ nmi_panic(regs, "An NMI occurred, but unable to determine source.\n");
+ return NMI_HANDLED;
}
}
- panic("An NMI occurred. Depending on your system the reason "
+ nmi_panic(regs, "An NMI occurred. Depending on your system the reason "
"for the NMI is logged in any one of the following "
"resources:\n"
"1. Integrated Management Log (IML)\n"
@@ -508,8 +509,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
"3. OA Forward Progress Log\n"
"4. iLO Event Log");
-out:
- return NMI_DONE;
+ return NMI_HANDLED;
}
#endif /* CONFIG_HPWDT_NMI_DECODING */
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
index 3679f2e1922f..516fbef00856 100644
--- a/drivers/watchdog/imgpdc_wdt.c
+++ b/drivers/watchdog/imgpdc_wdt.c
@@ -150,7 +150,8 @@ static int pdc_wdt_start(struct watchdog_device *wdt_dev)
return 0;
}
-static int pdc_wdt_restart(struct watchdog_device *wdt_dev)
+static int pdc_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index e47966aa2db0..331aed831dac 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -25,14 +25,12 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/timer.h>
#include <linux/watchdog.h>
#define DRIVER_NAME "imx2-wdt"
@@ -60,7 +58,6 @@
struct imx2_wdt_device {
struct clk *clk;
struct regmap *regmap;
- struct timer_list timer; /* Pings the watchdog when closed */
struct watchdog_device wdog;
};
@@ -80,7 +77,8 @@ static const struct watchdog_info imx2_wdt_info = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
};
-static int imx2_wdt_restart(struct watchdog_device *wdog)
+static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action,
+ void *data)
{
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
unsigned int wcr_enable = IMX2_WDT_WCR_WDE;
@@ -146,16 +144,6 @@ static int imx2_wdt_ping(struct watchdog_device *wdog)
return 0;
}
-static void imx2_wdt_timer_ping(unsigned long arg)
-{
- struct watchdog_device *wdog = (struct watchdog_device *)arg;
- struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
-
- /* ping it every wdog->timeout / 2 seconds to prevent reboot */
- imx2_wdt_ping(wdog);
- mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
-}
-
static int imx2_wdt_set_timeout(struct watchdog_device *wdog,
unsigned int new_timeout)
{
@@ -172,40 +160,19 @@ static int imx2_wdt_start(struct watchdog_device *wdog)
{
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- if (imx2_wdt_is_running(wdev)) {
- /* delete the timer that pings the watchdog after close */
- del_timer_sync(&wdev->timer);
+ if (imx2_wdt_is_running(wdev))
imx2_wdt_set_timeout(wdog, wdog->timeout);
- } else
+ else
imx2_wdt_setup(wdog);
- return imx2_wdt_ping(wdog);
-}
-
-static int imx2_wdt_stop(struct watchdog_device *wdog)
-{
- /*
- * We don't need a clk_disable, it cannot be disabled once started.
- * We use a timer to ping the watchdog while /dev/watchdog is closed
- */
- imx2_wdt_timer_ping((unsigned long)wdog);
- return 0;
-}
-
-static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog)
-{
- struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
+ set_bit(WDOG_HW_RUNNING, &wdog->status);
- if (imx2_wdt_is_running(wdev)) {
- imx2_wdt_set_timeout(wdog, wdog->timeout);
- imx2_wdt_timer_ping((unsigned long)wdog);
- }
+ return imx2_wdt_ping(wdog);
}
static const struct watchdog_ops imx2_wdt_ops = {
.owner = THIS_MODULE,
.start = imx2_wdt_start,
- .stop = imx2_wdt_stop,
.ping = imx2_wdt_ping,
.set_timeout = imx2_wdt_set_timeout,
.restart = imx2_wdt_restart,
@@ -253,7 +220,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
wdog->info = &imx2_wdt_info;
wdog->ops = &imx2_wdt_ops;
wdog->min_timeout = 1;
- wdog->max_timeout = IMX2_WDT_MAX_TIME;
+ wdog->max_hw_heartbeat_ms = IMX2_WDT_MAX_TIME * 1000;
wdog->parent = &pdev->dev;
ret = clk_prepare_enable(wdev->clk);
@@ -274,9 +241,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
watchdog_set_restart_priority(wdog, 128);
watchdog_init_timeout(wdog, timeout, &pdev->dev);
- setup_timer(&wdev->timer, imx2_wdt_timer_ping, (unsigned long)wdog);
-
- imx2_wdt_ping_if_active(wdog);
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
+ set_bit(WDOG_HW_RUNNING, &wdog->status);
+ }
/*
* Disable the watchdog power down counter at boot. Otherwise the power
@@ -309,7 +277,6 @@ static int __exit imx2_wdt_remove(struct platform_device *pdev)
watchdog_unregister_device(wdog);
if (imx2_wdt_is_running(wdev)) {
- del_timer_sync(&wdev->timer);
imx2_wdt_ping(wdog);
dev_crit(&pdev->dev, "Device removed: Expect reboot!\n");
}
@@ -323,10 +290,9 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
if (imx2_wdt_is_running(wdev)) {
/*
- * We are running, we need to delete the timer but will
- * give max timeout before reboot will take place
+ * We are running, configure max timeout before reboot
+ * will take place.
*/
- del_timer_sync(&wdev->timer);
imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
imx2_wdt_ping(wdog);
dev_crit(&pdev->dev, "Device shutdown: Expect reboot!\n");
@@ -344,10 +310,6 @@ static int imx2_wdt_suspend(struct device *dev)
if (imx2_wdt_is_running(wdev)) {
imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
imx2_wdt_ping(wdog);
-
- /* The watchdog is not active */
- if (!watchdog_active(wdog))
- del_timer_sync(&wdev->timer);
}
clk_disable_unprepare(wdev->clk);
@@ -373,19 +335,10 @@ static int imx2_wdt_resume(struct device *dev)
* watchdog again.
*/
imx2_wdt_setup(wdog);
+ }
+ if (imx2_wdt_is_running(wdev)) {
imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
- } else if (imx2_wdt_is_running(wdev)) {
- /* Resuming from non-deep sleep state. */
- imx2_wdt_set_timeout(wdog, wdog->timeout);
- imx2_wdt_ping(wdog);
- /*
- * But the watchdog is not active, then start
- * the timer again.
- */
- if (!watchdog_active(wdog))
- mod_timer(&wdev->timer,
- jiffies + wdog->timeout * HZ / 2);
}
return 0;
diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c
index 6914c83aa6d9..fd171e6caa16 100644
--- a/drivers/watchdog/lpc18xx_wdt.c
+++ b/drivers/watchdog/lpc18xx_wdt.c
@@ -153,7 +153,8 @@ static int lpc18xx_wdt_start(struct watchdog_device *wdt_dev)
return 0;
}
-static int lpc18xx_wdt_restart(struct watchdog_device *wdt_dev)
+static int lpc18xx_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev);
unsigned long flags;
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
new file mode 100644
index 000000000000..630bd189f167
--- /dev/null
+++ b/drivers/watchdog/mei_wdt.c
@@ -0,0 +1,724 @@
+/*
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/watchdog.h>
+
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+
+/*
+ * iAMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "iamt_wdt"
+
+#define MEI_WDT_DEFAULT_TIMEOUT 120 /* seconds */
+#define MEI_WDT_MIN_TIMEOUT 120 /* seconds */
+#define MEI_WDT_MAX_TIMEOUT 65535 /* seconds */
+
+/* Commands */
+#define MEI_MANAGEMENT_CONTROL 0x02
+
+/* MEI Management Control version number */
+#define MEI_MC_VERSION_NUMBER 0x10
+
+/* Sub Commands */
+#define MEI_MC_START_WD_TIMER_REQ 0x13
+#define MEI_MC_START_WD_TIMER_RES 0x83
+#define MEI_WDT_STATUS_SUCCESS 0
+#define MEI_WDT_WDSTATE_NOT_REQUIRED 0x1
+#define MEI_MC_STOP_WD_TIMER_REQ 0x14
+
+/**
+ * enum mei_wdt_state - internal watchdog state
+ *
+ * @MEI_WDT_PROBE: wd in probing stage
+ * @MEI_WDT_IDLE: wd is idle and not opened
+ * @MEI_WDT_START: wd was opened, start was called
+ * @MEI_WDT_RUNNING: wd is expecting keep alive pings
+ * @MEI_WDT_STOPPING: wd is stopping and will move to IDLE
+ * @MEI_WDT_NOT_REQUIRED: wd device is not required
+ */
+enum mei_wdt_state {
+ MEI_WDT_PROBE,
+ MEI_WDT_IDLE,
+ MEI_WDT_START,
+ MEI_WDT_RUNNING,
+ MEI_WDT_STOPPING,
+ MEI_WDT_NOT_REQUIRED,
+};
+
+static const char *mei_wdt_state_str(enum mei_wdt_state state)
+{
+ switch (state) {
+ case MEI_WDT_PROBE:
+ return "PROBE";
+ case MEI_WDT_IDLE:
+ return "IDLE";
+ case MEI_WDT_START:
+ return "START";
+ case MEI_WDT_RUNNING:
+ return "RUNNING";
+ case MEI_WDT_STOPPING:
+ return "STOPPING";
+ case MEI_WDT_NOT_REQUIRED:
+ return "NOT_REQUIRED";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * struct mei_wdt - mei watchdog driver
+ * @wdd: watchdog device
+ *
+ * @cldev: mei watchdog client device
+ * @state: watchdog internal state
+ * @resp_required: ping required response
+ * @response: ping response completion
+ * @unregister: unregister worker
+ * @reg_lock: watchdog device registration lock
+ * @timeout: watchdog current timeout
+ *
+ * @dbgfs_dir: debugfs dir entry
+ */
+struct mei_wdt {
+ struct watchdog_device wdd;
+
+ struct mei_cl_device *cldev;
+ enum mei_wdt_state state;
+ bool resp_required;
+ struct completion response;
+ struct work_struct unregister;
+ struct mutex reg_lock;
+ u16 timeout;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+};
+
+/*
+ * struct mei_mc_hdr - Management Control Command Header
+ *
+ * @command: Management Control (0x2)
+ * @bytecount: Number of bytes in the message beyond this byte
+ * @subcommand: Management Control Subcommand
+ * @versionnumber: Management Control Version (0x10)
+ */
+struct mei_mc_hdr {
+ u8 command;
+ u8 bytecount;
+ u8 subcommand;
+ u8 versionnumber;
+};
+
+/**
+ * struct mei_wdt_start_request watchdog start/ping
+ *
+ * @hdr: Management Control Command Header
+ * @timeout: timeout value
+ * @reserved: reserved (legacy)
+ */
+struct mei_wdt_start_request {
+ struct mei_mc_hdr hdr;
+ u16 timeout;
+ u8 reserved[17];
+} __packed;
+
+/**
+ * struct mei_wdt_start_response watchdog start/ping response
+ *
+ * @hdr: Management Control Command Header
+ * @status: operation status
+ * @wdstate: watchdog status bit mask
+ */
+struct mei_wdt_start_response {
+ struct mei_mc_hdr hdr;
+ u8 status;
+ u8 wdstate;
+} __packed;
+
+/**
+ * struct mei_wdt_stop_request - watchdog stop
+ *
+ * @hdr: Management Control Command Header
+ */
+struct mei_wdt_stop_request {
+ struct mei_mc_hdr hdr;
+} __packed;
+
+/**
+ * mei_wdt_ping - send wd start/ping command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ * negative errno code on failure
+ */
+static int mei_wdt_ping(struct mei_wdt *wdt)
+{
+ struct mei_wdt_start_request req;
+ const size_t req_len = sizeof(req);
+ int ret;
+
+ memset(&req, 0, req_len);
+ req.hdr.command = MEI_MANAGEMENT_CONTROL;
+ req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+ req.hdr.subcommand = MEI_MC_START_WD_TIMER_REQ;
+ req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+ req.timeout = wdt->timeout;
+
+ ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * mei_wdt_stop - send wd stop command
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 on success,
+ * negative errno code on failure
+ */
+static int mei_wdt_stop(struct mei_wdt *wdt)
+{
+ struct mei_wdt_stop_request req;
+ const size_t req_len = sizeof(req);
+ int ret;
+
+ memset(&req, 0, req_len);
+ req.hdr.command = MEI_MANAGEMENT_CONTROL;
+ req.hdr.bytecount = req_len - offsetof(struct mei_mc_hdr, subcommand);
+ req.hdr.subcommand = MEI_MC_STOP_WD_TIMER_REQ;
+ req.hdr.versionnumber = MEI_MC_VERSION_NUMBER;
+
+ ret = mei_cldev_send(wdt->cldev, (u8 *)&req, req_len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * mei_wdt_ops_start - wd start command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 on success or -ENODEV;
+ */
+static int mei_wdt_ops_start(struct watchdog_device *wdd)
+{
+ struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ wdt->state = MEI_WDT_START;
+ wdd->timeout = wdt->timeout;
+ return 0;
+}
+
+/**
+ * mei_wdt_ops_stop - wd stop command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_stop(struct watchdog_device *wdd)
+{
+ struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+ if (wdt->state != MEI_WDT_RUNNING)
+ return 0;
+
+ wdt->state = MEI_WDT_STOPPING;
+
+ ret = mei_wdt_stop(wdt);
+ if (ret)
+ return ret;
+
+ wdt->state = MEI_WDT_IDLE;
+
+ return 0;
+}
+
+/**
+ * mei_wdt_ops_ping - wd ping command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ *
+ * Return: 0 if success, negative errno code on failure
+ */
+static int mei_wdt_ops_ping(struct watchdog_device *wdd)
+{
+ struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
+
+ if (wdt->state != MEI_WDT_START && wdt->state != MEI_WDT_RUNNING)
+ return 0;
+
+ if (wdt->resp_required)
+ init_completion(&wdt->response);
+
+ wdt->state = MEI_WDT_RUNNING;
+ ret = mei_wdt_ping(wdt);
+ if (ret)
+ return ret;
+
+ if (wdt->resp_required)
+ ret = wait_for_completion_killable(&wdt->response);
+
+ return ret;
+}
+
+/**
+ * mei_wdt_ops_set_timeout - wd set timeout command from the watchdog core.
+ *
+ * @wdd: watchdog device
+ * @timeout: timeout value to set
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_ops_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+
+ struct mei_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ /* valid value is already checked by the caller */
+ wdt->timeout = timeout;
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static const struct watchdog_ops wd_ops = {
+ .owner = THIS_MODULE,
+ .start = mei_wdt_ops_start,
+ .stop = mei_wdt_ops_stop,
+ .ping = mei_wdt_ops_ping,
+ .set_timeout = mei_wdt_ops_set_timeout,
+};
+
+/* not const as the firmware_version field need to be retrieved */
+static struct watchdog_info wd_info = {
+ .identity = INTEL_AMT_WATCHDOG_ID,
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT |
+ WDIOF_ALARMONLY,
+};
+
+/**
+ * __mei_wdt_is_registered - check if wdt is registered
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: true if the wdt is registered with the watchdog subsystem
+ * Locking: should be called under wdt->reg_lock
+ */
+static inline bool __mei_wdt_is_registered(struct mei_wdt *wdt)
+{
+ return !!watchdog_get_drvdata(&wdt->wdd);
+}
+
+/**
+ * mei_wdt_unregister - unregister from the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ */
+static void mei_wdt_unregister(struct mei_wdt *wdt)
+{
+ mutex_lock(&wdt->reg_lock);
+
+ if (__mei_wdt_is_registered(wdt)) {
+ watchdog_unregister_device(&wdt->wdd);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
+ memset(&wdt->wdd, 0, sizeof(wdt->wdd));
+ }
+
+ mutex_unlock(&wdt->reg_lock);
+}
+
+/**
+ * mei_wdt_register - register with the watchdog subsystem
+ *
+ * @wdt: mei watchdog device
+ *
+ * Return: 0 if success, negative errno code for failure
+ */
+static int mei_wdt_register(struct mei_wdt *wdt)
+{
+ struct device *dev;
+ int ret;
+
+ if (!wdt || !wdt->cldev)
+ return -EINVAL;
+
+ dev = &wdt->cldev->dev;
+
+ mutex_lock(&wdt->reg_lock);
+
+ if (__mei_wdt_is_registered(wdt)) {
+ ret = 0;
+ goto out;
+ }
+
+ wdt->wdd.info = &wd_info;
+ wdt->wdd.ops = &wd_ops;
+ wdt->wdd.parent = dev;
+ wdt->wdd.timeout = MEI_WDT_DEFAULT_TIMEOUT;
+ wdt->wdd.min_timeout = MEI_WDT_MIN_TIMEOUT;
+ wdt->wdd.max_timeout = MEI_WDT_MAX_TIMEOUT;
+
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(dev, "unable to register watchdog device = %d.\n", ret);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
+ }
+
+ wdt->state = MEI_WDT_IDLE;
+
+out:
+ mutex_unlock(&wdt->reg_lock);
+ return ret;
+}
+
+static void mei_wdt_unregister_work(struct work_struct *work)
+{
+ struct mei_wdt *wdt = container_of(work, struct mei_wdt, unregister);
+
+ mei_wdt_unregister(wdt);
+}
+
+/**
+ * mei_wdt_event_rx - callback for data receive
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+{
+ struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+ struct mei_wdt_start_response res;
+ const size_t res_len = sizeof(res);
+ int ret;
+
+ ret = mei_cldev_recv(wdt->cldev, (u8 *)&res, res_len);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ }
+
+ /* Empty response can be sent on stop */
+ if (ret == 0)
+ return;
+
+ if (ret < sizeof(struct mei_mc_hdr)) {
+ dev_err(&cldev->dev, "recv small data %d\n", ret);
+ return;
+ }
+
+ if (res.hdr.command != MEI_MANAGEMENT_CONTROL ||
+ res.hdr.versionnumber != MEI_MC_VERSION_NUMBER) {
+ dev_err(&cldev->dev, "wrong command received\n");
+ return;
+ }
+
+ if (res.hdr.subcommand != MEI_MC_START_WD_TIMER_RES) {
+ dev_warn(&cldev->dev, "unsupported command %d :%s[%d]\n",
+ res.hdr.subcommand,
+ mei_wdt_state_str(wdt->state),
+ wdt->state);
+ return;
+ }
+
+ /* Run the unregistration in a worker as this can be
+ * run only after ping completion, otherwise the flow will
+ * deadlock on watchdog core mutex.
+ */
+ if (wdt->state == MEI_WDT_RUNNING) {
+ if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+ wdt->state = MEI_WDT_NOT_REQUIRED;
+ schedule_work(&wdt->unregister);
+ }
+ goto out;
+ }
+
+ if (wdt->state == MEI_WDT_PROBE) {
+ if (res.wdstate & MEI_WDT_WDSTATE_NOT_REQUIRED) {
+ wdt->state = MEI_WDT_NOT_REQUIRED;
+ } else {
+ /* stop the watchdog and register watchdog device */
+ mei_wdt_stop(wdt);
+ mei_wdt_register(wdt);
+ }
+ return;
+ }
+
+ dev_warn(&cldev->dev, "not in correct state %s[%d]\n",
+ mei_wdt_state_str(wdt->state), wdt->state);
+
+out:
+ if (!completion_done(&wdt->response))
+ complete(&wdt->response);
+}
+
+/*
+ * mei_wdt_notify_event - callback for event notification
+ *
+ * @cldev: bus device
+ */
+static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+{
+ struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+ if (wdt->state != MEI_WDT_NOT_REQUIRED)
+ return;
+
+ mei_wdt_register(wdt);
+}
+
+/**
+ * mei_wdt_event - callback for event receive
+ *
+ * @cldev: bus device
+ * @events: event mask
+ * @context: callback context
+ */
+static void mei_wdt_event(struct mei_cl_device *cldev,
+ u32 events, void *context)
+{
+ if (events & BIT(MEI_CL_EVENT_RX))
+ mei_wdt_event_rx(cldev);
+
+ if (events & BIT(MEI_CL_EVENT_NOTIF))
+ mei_wdt_notify_event(cldev);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mei_wdt *wdt = file->private_data;
+ const size_t bufsz = 32;
+ char buf[32];
+ ssize_t pos;
+
+ mutex_lock(&wdt->reg_lock);
+ pos = scnprintf(buf, bufsz, "%s\n",
+ __mei_wdt_is_registered(wdt) ? "activated" : "deactivated");
+ mutex_unlock(&wdt->reg_lock);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_activation = {
+ .open = simple_open,
+ .read = mei_dbgfs_read_activation,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t mei_dbgfs_read_state(struct file *file, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mei_wdt *wdt = file->private_data;
+ const size_t bufsz = 32;
+ char buf[bufsz];
+ ssize_t pos;
+
+ pos = scnprintf(buf, bufsz, "state: %s\n",
+ mei_wdt_state_str(wdt->state));
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, pos);
+}
+
+static const struct file_operations dbgfs_fops_state = {
+ .open = simple_open,
+ .read = mei_dbgfs_read_state,
+ .llseek = generic_file_llseek,
+};
+
+static void dbgfs_unregister(struct mei_wdt *wdt)
+{
+ debugfs_remove_recursive(wdt->dbgfs_dir);
+ wdt->dbgfs_dir = NULL;
+}
+
+static int dbgfs_register(struct mei_wdt *wdt)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ wdt->dbgfs_dir = dir;
+ f = debugfs_create_file("state", S_IRUSR, dir, wdt, &dbgfs_fops_state);
+ if (!f)
+ goto err;
+
+ f = debugfs_create_file("activation", S_IRUSR,
+ dir, wdt, &dbgfs_fops_activation);
+ if (!f)
+ goto err;
+
+ return 0;
+err:
+ dbgfs_unregister(wdt);
+ return -ENODEV;
+}
+
+#else
+
+static inline void dbgfs_unregister(struct mei_wdt *wdt) {}
+
+static inline int dbgfs_register(struct mei_wdt *wdt)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int mei_wdt_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_wdt *wdt;
+ int ret;
+
+ wdt = kzalloc(sizeof(struct mei_wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->timeout = MEI_WDT_DEFAULT_TIMEOUT;
+ wdt->state = MEI_WDT_PROBE;
+ wdt->cldev = cldev;
+ wdt->resp_required = mei_cldev_ver(cldev) > 0x1;
+ mutex_init(&wdt->reg_lock);
+ init_completion(&wdt->response);
+ INIT_WORK(&wdt->unregister, mei_wdt_unregister_work);
+
+ mei_cldev_set_drvdata(cldev, wdt);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Could not enable cl device\n");
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_event_cb(wdt->cldev,
+ BIT(MEI_CL_EVENT_RX) |
+ BIT(MEI_CL_EVENT_NOTIF),
+ mei_wdt_event, NULL);
+
+ /* on legacy devices notification is not supported
+ * this doesn't fail the registration for RX event
+ */
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ wd_info.firmware_version = mei_cldev_ver(cldev);
+
+ if (wdt->resp_required)
+ ret = mei_wdt_ping(wdt);
+ else
+ ret = mei_wdt_register(wdt);
+
+ if (ret)
+ goto err_disable;
+
+ if (dbgfs_register(wdt))
+ dev_warn(&cldev->dev, "cannot register debugfs\n");
+
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ kfree(wdt);
+
+ return ret;
+}
+
+static int mei_wdt_remove(struct mei_cl_device *cldev)
+{
+ struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
+
+ /* Free the caller in case of fw initiated or unexpected reset */
+ if (!completion_done(&wdt->response))
+ complete(&wdt->response);
+
+ cancel_work_sync(&wdt->unregister);
+
+ mei_wdt_unregister(wdt);
+
+ mei_cldev_disable(cldev);
+
+ dbgfs_unregister(wdt);
+
+ kfree(wdt);
+
+ return 0;
+}
+
+#define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
+ 0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
+
+static struct mei_cl_device_id mei_wdt_tbl[] = {
+ { .uuid = MEI_UUID_WD, .version = MEI_CL_VERSION_ANY },
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_wdt_tbl);
+
+static struct mei_cl_driver mei_wdt_driver = {
+ .id_table = mei_wdt_tbl,
+ .name = KBUILD_MODNAME,
+
+ .probe = mei_wdt_probe,
+ .remove = mei_wdt_remove,
+};
+
+static int __init mei_wdt_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_wdt_driver);
+ if (ret) {
+ pr_err(KBUILD_MODNAME ": module registration failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void __exit mei_wdt_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_wdt_driver);
+}
+
+module_init(mei_wdt_init);
+module_exit(mei_wdt_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Device driver for Intel MEI iAMT watchdog");
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index aea5d2f44ad7..56ea1caf71c3 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -62,7 +62,8 @@ struct meson_wdt_dev {
const struct meson_wdt_data *data;
};
-static int meson_wdt_restart(struct watchdog_device *wdt_dev)
+static int meson_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
u32 tc_reboot = MESON_WDT_DC_RESET;
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 885c81bc4210..2c4a73d1e214 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -31,7 +31,8 @@ struct moxart_wdt_dev {
static int heartbeat;
-static int moxart_wdt_restart(struct watchdog_device *wdt_dev)
+static int moxart_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct moxart_wdt_dev *moxart_wdt = watchdog_get_drvdata(wdt_dev);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index b78776c05554..7ed417a765c7 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -64,7 +64,8 @@ struct mtk_wdt_dev {
void __iomem *wdt_base;
};
-static int mtk_wdt_restart(struct watchdog_device *wdt_dev)
+static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base;
diff --git a/drivers/watchdog/ni903x_wdt.c b/drivers/watchdog/ni903x_wdt.c
new file mode 100644
index 000000000000..dc67742e9018
--- /dev/null
+++ b/drivers/watchdog/ni903x_wdt.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2016 National Instruments Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/watchdog.h>
+
+#define NIWD_CONTROL 0x01
+#define NIWD_COUNTER2 0x02
+#define NIWD_COUNTER1 0x03
+#define NIWD_COUNTER0 0x04
+#define NIWD_SEED2 0x05
+#define NIWD_SEED1 0x06
+#define NIWD_SEED0 0x07
+
+#define NIWD_IO_SIZE 0x08
+
+#define NIWD_CONTROL_MODE 0x80
+#define NIWD_CONTROL_PROC_RESET 0x20
+#define NIWD_CONTROL_PET 0x10
+#define NIWD_CONTROL_RUNNING 0x08
+#define NIWD_CONTROL_CAPTURECOUNTER 0x04
+#define NIWD_CONTROL_RESET 0x02
+#define NIWD_CONTROL_ALARM 0x01
+
+#define NIWD_PERIOD_NS 30720
+#define NIWD_MIN_TIMEOUT 1
+#define NIWD_MAX_TIMEOUT 515
+#define NIWD_DEFAULT_TIMEOUT 60
+
+#define NIWD_NAME "ni903x_wdt"
+
+struct ni903x_wdt {
+ struct device *dev;
+ u16 io_base;
+ struct watchdog_device wdd;
+};
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. (default="
+ __MODULE_STRING(NIWD_DEFAULT_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, S_IRUGO);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static void ni903x_start(struct ni903x_wdt *wdt)
+{
+ u8 control = inb(wdt->io_base + NIWD_CONTROL);
+
+ outb(control | NIWD_CONTROL_RESET, wdt->io_base + NIWD_CONTROL);
+ outb(control | NIWD_CONTROL_PET, wdt->io_base + NIWD_CONTROL);
+}
+
+static int ni903x_wdd_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct ni903x_wdt *wdt = watchdog_get_drvdata(wdd);
+ u32 counter = timeout * (1000000000 / NIWD_PERIOD_NS);
+
+ outb(((0x00FF0000 & counter) >> 16), wdt->io_base + NIWD_SEED2);
+ outb(((0x0000FF00 & counter) >> 8), wdt->io_base + NIWD_SEED1);
+ outb((0x000000FF & counter), wdt->io_base + NIWD_SEED0);
+
+ wdd->timeout = timeout;
+
+ return 0;
+}
+
+static unsigned int ni903x_wdd_get_timeleft(struct watchdog_device *wdd)
+{
+ struct ni903x_wdt *wdt = watchdog_get_drvdata(wdd);
+ u8 control, counter0, counter1, counter2;
+ u32 counter;
+
+ control = inb(wdt->io_base + NIWD_CONTROL);
+ control |= NIWD_CONTROL_CAPTURECOUNTER;
+ outb(control, wdt->io_base + NIWD_CONTROL);
+
+ counter2 = inb(wdt->io_base + NIWD_COUNTER2);
+ counter1 = inb(wdt->io_base + NIWD_COUNTER1);
+ counter0 = inb(wdt->io_base + NIWD_COUNTER0);
+
+ counter = (counter2 << 16) | (counter1 << 8) | counter0;
+
+ return counter / (1000000000 / NIWD_PERIOD_NS);
+}
+
+static int ni903x_wdd_ping(struct watchdog_device *wdd)
+{
+ struct ni903x_wdt *wdt = watchdog_get_drvdata(wdd);
+ u8 control;
+
+ control = inb(wdt->io_base + NIWD_CONTROL);
+ outb(control | NIWD_CONTROL_PET, wdt->io_base + NIWD_CONTROL);
+
+ return 0;
+}
+
+static int ni903x_wdd_start(struct watchdog_device *wdd)
+{
+ struct ni903x_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ outb(NIWD_CONTROL_RESET | NIWD_CONTROL_PROC_RESET,
+ wdt->io_base + NIWD_CONTROL);
+
+ ni903x_wdd_set_timeout(wdd, wdd->timeout);
+ ni903x_start(wdt);
+
+ return 0;
+}
+
+static int ni903x_wdd_stop(struct watchdog_device *wdd)
+{
+ struct ni903x_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ outb(NIWD_CONTROL_RESET, wdt->io_base + NIWD_CONTROL);
+
+ return 0;
+}
+
+static acpi_status ni903x_resources(struct acpi_resource *res, void *data)
+{
+ struct ni903x_wdt *wdt = data;
+ u16 io_size;
+
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IO:
+ if (wdt->io_base != 0) {
+ dev_err(wdt->dev, "too many IO resources\n");
+ return AE_ERROR;
+ }
+
+ wdt->io_base = res->data.io.minimum;
+ io_size = res->data.io.address_length;
+
+ if (io_size < NIWD_IO_SIZE) {
+ dev_err(wdt->dev, "memory region too small\n");
+ return AE_ERROR;
+ }
+
+ if (!devm_request_region(wdt->dev, wdt->io_base, io_size,
+ NIWD_NAME)) {
+ dev_err(wdt->dev, "failed to get memory region\n");
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ default:
+ /* Ignore unsupported resources, e.g. IRQ */
+ return AE_OK;
+ }
+}
+
+static const struct watchdog_info ni903x_wdd_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "NI Watchdog",
+};
+
+static const struct watchdog_ops ni903x_wdd_ops = {
+ .owner = THIS_MODULE,
+ .start = ni903x_wdd_start,
+ .stop = ni903x_wdd_stop,
+ .ping = ni903x_wdd_ping,
+ .set_timeout = ni903x_wdd_set_timeout,
+ .get_timeleft = ni903x_wdd_get_timeleft,
+};
+
+static int ni903x_acpi_add(struct acpi_device *device)
+{
+ struct device *dev = &device->dev;
+ struct watchdog_device *wdd;
+ struct ni903x_wdt *wdt;
+ acpi_status status;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ device->driver_data = wdt;
+ wdt->dev = dev;
+
+ status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+ ni903x_resources, wdt);
+ if (ACPI_FAILURE(status) || wdt->io_base == 0) {
+ dev_err(dev, "failed to get resources\n");
+ return -ENODEV;
+ }
+
+ wdd = &wdt->wdd;
+ wdd->info = &ni903x_wdd_info;
+ wdd->ops = &ni903x_wdd_ops;
+ wdd->min_timeout = NIWD_MIN_TIMEOUT;
+ wdd->max_timeout = NIWD_MAX_TIMEOUT;
+ wdd->timeout = NIWD_DEFAULT_TIMEOUT;
+ wdd->parent = dev;
+ watchdog_set_drvdata(wdd, wdt);
+ watchdog_set_nowayout(wdd, nowayout);
+ ret = watchdog_init_timeout(wdd, timeout, dev);
+ if (ret)
+ dev_err(dev, "unable to set timeout value, using default\n");
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(dev, "failed to register watchdog\n");
+ return ret;
+ }
+
+ /* Switch from boot mode to user mode */
+ outb(NIWD_CONTROL_RESET | NIWD_CONTROL_MODE,
+ wdt->io_base + NIWD_CONTROL);
+
+ dev_dbg(dev, "io_base=0x%04X, timeout=%d, nowayout=%d\n",
+ wdt->io_base, timeout, nowayout);
+
+ return 0;
+}
+
+static int ni903x_acpi_remove(struct acpi_device *device)
+{
+ struct ni903x_wdt *wdt = acpi_driver_data(device);
+
+ ni903x_wdd_stop(&wdt->wdd);
+ watchdog_unregister_device(&wdt->wdd);
+
+ return 0;
+}
+
+static const struct acpi_device_id ni903x_device_ids[] = {
+ {"NIC775C", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ni903x_device_ids);
+
+static struct acpi_driver ni903x_acpi_driver = {
+ .name = NIWD_NAME,
+ .ids = ni903x_device_ids,
+ .ops = {
+ .add = ni903x_acpi_add,
+ .remove = ni903x_acpi_remove,
+ },
+};
+
+module_acpi_driver(ni903x_acpi_driver);
+
+MODULE_DESCRIPTION("NI 903x Watchdog");
+MODULE_AUTHOR("Jeff Westfahl <jeff.westfahl@ni.com>");
+MODULE_AUTHOR("Kyle Roeschley <kyle.roeschley@ni.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 313cd1c6fda0..0529aed158a4 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -31,6 +31,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
#include <mach/hardware.h>
/* WatchDog Timer - Chapter 23 Page 207 */
@@ -124,6 +126,41 @@ static int pnx4008_wdt_set_timeout(struct watchdog_device *wdd,
return 0;
}
+static int pnx4008_restart_handler(struct watchdog_device *wdd,
+ unsigned long mode, void *cmd)
+{
+ const char *boot_cmd = cmd;
+
+ /*
+ * Verify if a "cmd" passed from the userspace program rebooting
+ * the system; if available, handle it.
+ * - For details, see the 'reboot' syscall in kernel/reboot.c
+ * - If the received "cmd" is not supported, use the default mode.
+ */
+ if (boot_cmd) {
+ if (boot_cmd[0] == 'h')
+ mode = REBOOT_HARD;
+ else if (boot_cmd[0] == 's')
+ mode = REBOOT_SOFT;
+ }
+
+ if (mode == REBOOT_SOFT) {
+ /* Force match output active */
+ writel(EXT_MATCH0, WDTIM_EMR(wdt_base));
+ /* Internal reset on match output (RESOUT_N not asserted) */
+ writel(M_RES1, WDTIM_MCTRL(wdt_base));
+ } else {
+ /* Instant assert of RESETOUT_N with pulse length 1mS */
+ writel(13000, WDTIM_PULSE(wdt_base));
+ writel(M_RES2 | RESFRC1 | RESFRC2, WDTIM_MCTRL(wdt_base));
+ }
+
+ /* Wait for watchdog to reset system */
+ mdelay(1000);
+
+ return NOTIFY_DONE;
+}
+
static const struct watchdog_info pnx4008_wdt_ident = {
.options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE |
WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
@@ -135,6 +172,7 @@ static const struct watchdog_ops pnx4008_wdt_ops = {
.start = pnx4008_wdt_start,
.stop = pnx4008_wdt_stop,
.set_timeout = pnx4008_wdt_set_timeout,
+ .restart = pnx4008_restart_handler,
};
static struct watchdog_device pnx4008_wdd = {
@@ -169,6 +207,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
WDIOF_CARDRESET : 0;
pnx4008_wdd.parent = &pdev->dev;
watchdog_set_nowayout(&pnx4008_wdd, nowayout);
+ watchdog_set_restart_priority(&pnx4008_wdd, 128);
pnx4008_wdt_stop(&pnx4008_wdd); /* disable for now */
@@ -178,8 +217,7 @@ static int pnx4008_wdt_probe(struct platform_device *pdev)
goto disable_clk;
}
- dev_info(&pdev->dev, "PNX4008 Watchdog Timer: heartbeat %d sec\n",
- pnx4008_wdd.timeout);
+ dev_info(&pdev->dev, "heartbeat %d sec\n", pnx4008_wdd.timeout);
return 0;
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 424f9a952fee..20563ccb7be0 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -70,7 +70,8 @@ static int qcom_wdt_set_timeout(struct watchdog_device *wdd,
return qcom_wdt_start(wdd);
}
-static int qcom_wdt_restart(struct watchdog_device *wdd)
+static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
+ void *data)
{
struct qcom_wdt *wdt = to_qcom_wdt(wdd);
u32 timeout;
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index 71e78ef4b736..3a75f3b53452 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
/* Fall through */
case WDIOC_GETTIMEOUT:
- return copy_to_user(argp, &timeout, sizeof(int));
+ return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
default:
return -ENOTTY;
}
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 0093450441fe..59e95762a6de 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -47,6 +47,8 @@
#define S3C2410_WTDAT 0x04
#define S3C2410_WTCNT 0x08
+#define S3C2410_WTCNT_MAXCNT 0xffff
+
#define S3C2410_WTCON_RSTEN (1 << 0)
#define S3C2410_WTCON_INTEN (1 << 2)
#define S3C2410_WTCON_ENABLE (1 << 5)
@@ -56,8 +58,11 @@
#define S3C2410_WTCON_DIV64 (2 << 3)
#define S3C2410_WTCON_DIV128 (3 << 3)
+#define S3C2410_WTCON_MAXDIV 0x80
+
#define S3C2410_WTCON_PRESCALE(x) ((x) << 8)
#define S3C2410_WTCON_PRESCALE_MASK (0xff << 8)
+#define S3C2410_WTCON_PRESCALE_MAX 0xff
#define CONFIG_S3C2410_WATCHDOG_ATBOOT (0)
#define CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME (15)
@@ -198,6 +203,14 @@ do { \
/* functions */
+static inline unsigned int s3c2410wdt_max_timeout(struct clk *clock)
+{
+ unsigned long freq = clk_get_rate(clock);
+
+ return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1)
+ / S3C2410_WTCON_MAXDIV);
+}
+
static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
{
return container_of(nb, struct s3c2410_wdt, freq_transition);
@@ -349,7 +362,8 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeou
return 0;
}
-static int s3c2410wdt_restart(struct watchdog_device *wdd)
+static int s3c2410wdt_restart(struct watchdog_device *wdd, unsigned long action,
+ void *data)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
void __iomem *wdt_base = wdt->reg_base;
@@ -567,6 +581,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return ret;
}
+ wdt->wdt_device.min_timeout = 1;
+ wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt->clock);
+
ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
dev_err(dev, "failed to register cpufreq\n");
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
new file mode 100644
index 000000000000..ad383f6f15fc
--- /dev/null
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -0,0 +1,408 @@
+/*
+ * SBSA(Server Base System Architecture) Generic Watchdog driver
+ *
+ * Copyright (c) 2015, Linaro Ltd.
+ * Author: Fu Wei <fu.wei@linaro.org>
+ * Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
+ * Al Stone <al.stone@linaro.org>
+ * Timur Tabi <timur@codeaurora.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ARM SBSA Generic Watchdog has two stage timeouts:
+ * the first signal (WS0) is for alerting the system by interrupt,
+ * the second one (WS1) is a real hardware reset.
+ * More details about the hardware specification of this device:
+ * ARM DEN0029B - Server Base System Architecture (SBSA)
+ *
+ * This driver can operate ARM SBSA Generic Watchdog as a single stage watchdog
+ * or a two stages watchdog, it's set up by the module parameter "action".
+ * In the single stage mode, when the timeout is reached, your system
+ * will be reset by WS1. The first signal (WS0) is ignored.
+ * In the two stages mode, when the timeout is reached, the first signal (WS0)
+ * will trigger panic. If the system is getting into trouble and cannot be reset
+ * by panic or restart properly by the kdump kernel(if supported), then the
+ * second stage (as long as the first stage) will be reached, system will be
+ * reset by WS1. This function can help administrator to backup the system
+ * context info by panic console output or kdump.
+ *
+ * SBSA GWDT:
+ * if action is 1 (the two stages mode):
+ * |--------WOR-------WS0--------WOR-------WS1
+ * |----timeout-----(panic)----timeout-----reset
+ *
+ * if action is 0 (the single stage mode):
+ * |------WOR-----WS0(ignored)-----WOR------WS1
+ * |--------------timeout-------------------reset
+ *
+ * Note: Since this watchdog timer has two stages, and each stage is determined
+ * by WOR, in the single stage mode, the timeout is (WOR * 2); in the two
+ * stages mode, the timeout is WOR. The maximum timeout in the two stages mode
+ * is half of that in the single stage mode.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <asm/arch_timer.h>
+
+#define DRV_NAME "sbsa-gwdt"
+#define WATCHDOG_NAME "SBSA Generic Watchdog"
+
+/* SBSA Generic Watchdog register definitions */
+/* refresh frame */
+#define SBSA_GWDT_WRR 0x000
+
+/* control frame */
+#define SBSA_GWDT_WCS 0x000
+#define SBSA_GWDT_WOR 0x008
+#define SBSA_GWDT_WCV 0x010
+
+/* refresh/control frame */
+#define SBSA_GWDT_W_IIDR 0xfcc
+#define SBSA_GWDT_IDR 0xfd0
+
+/* Watchdog Control and Status Register */
+#define SBSA_GWDT_WCS_EN BIT(0)
+#define SBSA_GWDT_WCS_WS0 BIT(1)
+#define SBSA_GWDT_WCS_WS1 BIT(2)
+
+/**
+ * struct sbsa_gwdt - Internal representation of the SBSA GWDT
+ * @wdd: kernel watchdog_device structure
+ * @clk: store the System Counter clock frequency, in Hz.
+ * @refresh_base: Virtual address of the watchdog refresh frame
+ * @control_base: Virtual address of the watchdog control frame
+ */
+struct sbsa_gwdt {
+ struct watchdog_device wdd;
+ u32 clk;
+ void __iomem *refresh_base;
+ void __iomem *control_base;
+};
+
+#define DEFAULT_TIMEOUT 10 /* seconds */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in seconds. (>=0, default="
+ __MODULE_STRING(DEFAULT_TIMEOUT) ")");
+
+/*
+ * action refers to action taken when watchdog gets WS0
+ * 0 = skip
+ * 1 = panic
+ * defaults to skip (0)
+ */
+static int action;
+module_param(action, int, 0);
+MODULE_PARM_DESC(action, "after watchdog gets WS0 interrupt, do: "
+ "0 = skip(*) 1 = panic");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * watchdog operation functions
+ */
+static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+
+ wdd->timeout = timeout;
+
+ if (action)
+ writel(gwdt->clk * timeout,
+ gwdt->control_base + SBSA_GWDT_WOR);
+ else
+ /*
+ * In the single stage mode, The first signal (WS0) is ignored,
+ * the timeout is (WOR * 2), so the WOR should be configured
+ * to half value of timeout.
+ */
+ writel(gwdt->clk / 2 * timeout,
+ gwdt->control_base + SBSA_GWDT_WOR);
+
+ return 0;
+}
+
+static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+ u64 timeleft = 0;
+
+ /*
+ * In the single stage mode, if WS0 is deasserted
+ * (watchdog is in the first stage),
+ * timeleft = WOR + (WCV - system counter)
+ */
+ if (!action &&
+ !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
+ timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
+
+ timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
+ arch_counter_get_cntvct();
+
+ do_div(timeleft, gwdt->clk);
+
+ return timeleft;
+}
+
+static int sbsa_gwdt_keepalive(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+
+ /*
+ * Writing WRR for an explicit watchdog refresh.
+ * You can write anyting (like 0).
+ */
+ writel(0, gwdt->refresh_base + SBSA_GWDT_WRR);
+
+ return 0;
+}
+
+static unsigned int sbsa_gwdt_status(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+ u32 status = readl(gwdt->control_base + SBSA_GWDT_WCS);
+
+ /* is the watchdog timer running? */
+ return (status & SBSA_GWDT_WCS_EN) << WDOG_ACTIVE;
+}
+
+static int sbsa_gwdt_start(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+
+ /* writing WCS will cause an explicit watchdog refresh */
+ writel(SBSA_GWDT_WCS_EN, gwdt->control_base + SBSA_GWDT_WCS);
+
+ return 0;
+}
+
+static int sbsa_gwdt_stop(struct watchdog_device *wdd)
+{
+ struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+
+ /* Simply write 0 to WCS to clean WCS_EN bit */
+ writel(0, gwdt->control_base + SBSA_GWDT_WCS);
+
+ return 0;
+}
+
+static irqreturn_t sbsa_gwdt_interrupt(int irq, void *dev_id)
+{
+ panic(WATCHDOG_NAME " timeout");
+
+ return IRQ_HANDLED;
+}
+
+static struct watchdog_info sbsa_gwdt_info = {
+ .identity = WATCHDOG_NAME,
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE |
+ WDIOF_CARDRESET,
+};
+
+static struct watchdog_ops sbsa_gwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = sbsa_gwdt_start,
+ .stop = sbsa_gwdt_stop,
+ .status = sbsa_gwdt_status,
+ .ping = sbsa_gwdt_keepalive,
+ .set_timeout = sbsa_gwdt_set_timeout,
+ .get_timeleft = sbsa_gwdt_get_timeleft,
+};
+
+static int sbsa_gwdt_probe(struct platform_device *pdev)
+{
+ void __iomem *rf_base, *cf_base;
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct sbsa_gwdt *gwdt;
+ struct resource *res;
+ int ret, irq;
+ u32 status;
+
+ gwdt = devm_kzalloc(dev, sizeof(*gwdt), GFP_KERNEL);
+ if (!gwdt)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, gwdt);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cf_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cf_base))
+ return PTR_ERR(cf_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ rf_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(rf_base))
+ return PTR_ERR(rf_base);
+
+ /*
+ * Get the frequency of system counter from the cp15 interface of ARM
+ * Generic timer. We don't need to check it, because if it returns "0",
+ * system would panic in very early stage.
+ */
+ gwdt->clk = arch_timer_get_cntfrq();
+ gwdt->refresh_base = rf_base;
+ gwdt->control_base = cf_base;
+
+ wdd = &gwdt->wdd;
+ wdd->parent = dev;
+ wdd->info = &sbsa_gwdt_info;
+ wdd->ops = &sbsa_gwdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_timeout = U32_MAX / gwdt->clk;
+ wdd->timeout = DEFAULT_TIMEOUT;
+ watchdog_set_drvdata(wdd, gwdt);
+ watchdog_set_nowayout(wdd, nowayout);
+
+ status = readl(cf_base + SBSA_GWDT_WCS);
+ if (status & SBSA_GWDT_WCS_WS1) {
+ dev_warn(dev, "System reset by WDT.\n");
+ wdd->bootstatus |= WDIOF_CARDRESET;
+ }
+
+ if (action) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ action = 0;
+ dev_warn(dev, "unable to get ws0 interrupt.\n");
+ } else {
+ /*
+ * In case there is a pending ws0 interrupt, just ping
+ * the watchdog before registering the interrupt routine
+ */
+ writel(0, rf_base + SBSA_GWDT_WRR);
+ if (devm_request_irq(dev, irq, sbsa_gwdt_interrupt, 0,
+ pdev->name, gwdt)) {
+ action = 0;
+ dev_warn(dev, "unable to request IRQ %d.\n",
+ irq);
+ }
+ }
+ if (!action)
+ dev_warn(dev, "falling back to single stage mode.\n");
+ }
+ /*
+ * In the single stage mode, The first signal (WS0) is ignored,
+ * the timeout is (WOR * 2), so the maximum timeout should be doubled.
+ */
+ if (!action)
+ wdd->max_timeout *= 2;
+
+ watchdog_init_timeout(wdd, timeout, dev);
+ /*
+ * Update timeout to WOR.
+ * Because of the explicit watchdog refresh mechanism,
+ * it's also a ping, if watchdog is enabled.
+ */
+ sbsa_gwdt_set_timeout(wdd, wdd->timeout);
+
+ ret = watchdog_register_device(wdd);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "Initialized with %ds timeout @ %u Hz, action=%d.%s\n",
+ wdd->timeout, gwdt->clk, action,
+ status & SBSA_GWDT_WCS_EN ? " [enabled]" : "");
+
+ return 0;
+}
+
+static void sbsa_gwdt_shutdown(struct platform_device *pdev)
+{
+ struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
+
+ sbsa_gwdt_stop(&gwdt->wdd);
+}
+
+static int sbsa_gwdt_remove(struct platform_device *pdev)
+{
+ struct sbsa_gwdt *gwdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&gwdt->wdd);
+
+ return 0;
+}
+
+/* Disable watchdog if it is active during suspend */
+static int __maybe_unused sbsa_gwdt_suspend(struct device *dev)
+{
+ struct sbsa_gwdt *gwdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&gwdt->wdd))
+ sbsa_gwdt_stop(&gwdt->wdd);
+
+ return 0;
+}
+
+/* Enable watchdog if necessary */
+static int __maybe_unused sbsa_gwdt_resume(struct device *dev)
+{
+ struct sbsa_gwdt *gwdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&gwdt->wdd))
+ sbsa_gwdt_start(&gwdt->wdd);
+
+ return 0;
+}
+
+static const struct dev_pm_ops sbsa_gwdt_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sbsa_gwdt_suspend, sbsa_gwdt_resume)
+};
+
+static const struct of_device_id sbsa_gwdt_of_match[] = {
+ { .compatible = "arm,sbsa-gwdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sbsa_gwdt_of_match);
+
+static const struct platform_device_id sbsa_gwdt_pdev_match[] = {
+ { .name = DRV_NAME, },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, sbsa_gwdt_pdev_match);
+
+static struct platform_driver sbsa_gwdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &sbsa_gwdt_pm_ops,
+ .of_match_table = sbsa_gwdt_of_match,
+ },
+ .probe = sbsa_gwdt_probe,
+ .remove = sbsa_gwdt_remove,
+ .shutdown = sbsa_gwdt_shutdown,
+ .id_table = sbsa_gwdt_pdev_match,
+};
+
+module_platform_driver(sbsa_gwdt_driver);
+
+MODULE_DESCRIPTION("SBSA Generic Watchdog Driver");
+MODULE_AUTHOR("Fu Wei <fu.wei@linaro.org>");
+MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
+MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
+MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index e027deb54740..953bb7b7446f 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -83,7 +83,8 @@ static const int wdt_timeout_map[] = {
};
-static int sunxi_wdt_restart(struct watchdog_device *wdt_dev)
+static int sunxi_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
{
struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
void __iomem *wdt_base = sunxi_wdt->wdt_base;
diff --git a/drivers/watchdog/tangox_wdt.c b/drivers/watchdog/tangox_wdt.c
index 709c1ed6fd79..cfbed7e051b6 100644
--- a/drivers/watchdog/tangox_wdt.c
+++ b/drivers/watchdog/tangox_wdt.c
@@ -139,6 +139,10 @@ static int tangox_wdt_probe(struct platform_device *pdev)
return err;
dev->clk_rate = clk_get_rate(dev->clk);
+ if (!dev->clk_rate) {
+ err = -EINVAL;
+ goto err;
+ }
dev->wdt.parent = &pdev->dev;
dev->wdt.info = &tangox_wdt_info;
@@ -171,10 +175,8 @@ static int tangox_wdt_probe(struct platform_device *pdev)
}
err = watchdog_register_device(&dev->wdt);
- if (err) {
- clk_disable_unprepare(dev->clk);
- return err;
- }
+ if (err)
+ goto err;
platform_set_drvdata(pdev, dev);
@@ -187,6 +189,10 @@ static int tangox_wdt_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "SMP86xx/SMP87xx watchdog registered\n");
return 0;
+
+ err:
+ clk_disable_unprepare(dev->clk);
+ return err;
}
static int tangox_wdt_remove(struct platform_device *pdev)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index cab14bc9106c..09e8003039dc 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -45,10 +45,11 @@
static int wdt_io;
static int cr_wdt_timeout; /* WDT timeout register */
static int cr_wdt_control; /* WDT control register */
+static int cr_wdt_csr; /* WDT control & status register */
enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p,
- w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792 };
+ w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6102 };
static int timeout; /* in seconds */
module_param(timeout, int, 0);
@@ -92,15 +93,21 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)");
#define W83667HG_B_ID 0xb3
#define NCT6775_ID 0xb4
#define NCT6776_ID 0xc3
+#define NCT6102_ID 0xc4
#define NCT6779_ID 0xc5
#define NCT6791_ID 0xc8
#define NCT6792_ID 0xc9
#define W83627HF_WDT_TIMEOUT 0xf6
#define W83697HF_WDT_TIMEOUT 0xf4
+#define NCT6102D_WDT_TIMEOUT 0xf1
#define W83627HF_WDT_CONTROL 0xf5
#define W83697HF_WDT_CONTROL 0xf3
+#define NCT6102D_WDT_CONTROL 0xf0
+
+#define W836X7HF_WDT_CSR 0xf7
+#define NCT6102D_WDT_CSR 0xf2
static void superio_outb(int reg, int val)
{
@@ -197,6 +204,7 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
case nct6779:
case nct6791:
case nct6792:
+ case nct6102:
/*
* These chips have a fixed WDTO# output pin (W83627UHG),
* or support more than one WDTO# output pin.
@@ -229,8 +237,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip)
superio_outb(cr_wdt_control, t);
/* reset trigger, disable keyboard & mouse turning off watchdog */
- t = superio_inb(0xF7) & ~0xD0;
- superio_outb(0xF7, t);
+ t = superio_inb(cr_wdt_csr) & ~0xD0;
+ superio_outb(cr_wdt_csr, t);
superio_exit();
@@ -322,6 +330,7 @@ static int wdt_find(int addr)
cr_wdt_timeout = W83627HF_WDT_TIMEOUT;
cr_wdt_control = W83627HF_WDT_CONTROL;
+ cr_wdt_csr = W836X7HF_WDT_CSR;
ret = superio_enter();
if (ret)
@@ -387,6 +396,12 @@ static int wdt_find(int addr)
case NCT6792_ID:
ret = nct6792;
break;
+ case NCT6102_ID:
+ ret = nct6102;
+ cr_wdt_timeout = NCT6102D_WDT_TIMEOUT;
+ cr_wdt_control = NCT6102D_WDT_CONTROL;
+ cr_wdt_csr = NCT6102D_WDT_CSR;
+ break;
case 0xff:
ret = -ENODEV;
break;
@@ -422,6 +437,7 @@ static int __init wdt_init(void)
"NCT6779",
"NCT6791",
"NCT6792",
+ "NCT6102",
};
wdt_io = 0x2e;
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index e600fd93b7de..c1658fe73d58 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -164,7 +164,7 @@ static int watchdog_restart_notifier(struct notifier_block *nb,
int ret;
- ret = wdd->ops->restart(wdd);
+ ret = wdd->ops->restart(wdd, action, data);
if (ret)
return NOTIFY_BAD;
@@ -199,7 +199,7 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
return -EINVAL;
/* Mandatory operations need to be supported */
- if (wdd->ops->start == NULL || wdd->ops->stop == NULL)
+ if (!wdd->ops->start || (!wdd->ops->stop && !wdd->max_hw_heartbeat_ms))
return -EINVAL;
watchdog_check_min_max_timeout(wdd);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index ba2ecce4aae6..e2c5abbb45ff 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -36,6 +36,7 @@
#include <linux/errno.h> /* For the -ENODEV/... values */
#include <linux/fs.h> /* For file operations */
#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/jiffies.h> /* For timeout functions */
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/kref.h> /* For data references */
#include <linux/miscdevice.h> /* For handling misc devices */
@@ -44,6 +45,7 @@
#include <linux/slab.h> /* For memory functions */
#include <linux/types.h> /* For standard types (like size_t) */
#include <linux/watchdog.h> /* For watchdog specific items */
+#include <linux/workqueue.h> /* For workqueue */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include "watchdog_core.h"
@@ -61,6 +63,9 @@ struct watchdog_core_data {
struct cdev cdev;
struct watchdog_device *wdd;
struct mutex lock;
+ unsigned long last_keepalive;
+ unsigned long last_hw_keepalive;
+ struct delayed_work work;
unsigned long status; /* Internal status bits */
#define _WDOG_DEV_OPEN 0 /* Opened ? */
#define _WDOG_ALLOW_RELEASE 1 /* Did we receive the magic char ? */
@@ -71,6 +76,91 @@ static dev_t watchdog_devt;
/* Reference to watchdog device behind /dev/watchdog */
static struct watchdog_core_data *old_wd_data;
+static struct workqueue_struct *watchdog_wq;
+
+static inline bool watchdog_need_worker(struct watchdog_device *wdd)
+{
+ /* All variables in milli-seconds */
+ unsigned int hm = wdd->max_hw_heartbeat_ms;
+ unsigned int t = wdd->timeout * 1000;
+
+ /*
+ * A worker to generate heartbeat requests is needed if all of the
+ * following conditions are true.
+ * - Userspace activated the watchdog.
+ * - The driver provided a value for the maximum hardware timeout, and
+ * thus is aware that the framework supports generating heartbeat
+ * requests.
+ * - Userspace requests a longer timeout than the hardware can handle.
+ */
+ return hm && ((watchdog_active(wdd) && t > hm) ||
+ (t && !watchdog_active(wdd) && watchdog_hw_running(wdd)));
+}
+
+static long watchdog_next_keepalive(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ unsigned int timeout_ms = wdd->timeout * 1000;
+ unsigned long keepalive_interval;
+ unsigned long last_heartbeat;
+ unsigned long virt_timeout;
+ unsigned int hw_heartbeat_ms;
+
+ virt_timeout = wd_data->last_keepalive + msecs_to_jiffies(timeout_ms);
+ hw_heartbeat_ms = min(timeout_ms, wdd->max_hw_heartbeat_ms);
+ keepalive_interval = msecs_to_jiffies(hw_heartbeat_ms / 2);
+
+ if (!watchdog_active(wdd))
+ return keepalive_interval;
+
+ /*
+ * To ensure that the watchdog times out wdd->timeout seconds
+ * after the most recent ping from userspace, the last
+ * worker ping has to come in hw_heartbeat_ms before this timeout.
+ */
+ last_heartbeat = virt_timeout - msecs_to_jiffies(hw_heartbeat_ms);
+ return min_t(long, last_heartbeat - jiffies, keepalive_interval);
+}
+
+static inline void watchdog_update_worker(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+
+ if (watchdog_need_worker(wdd)) {
+ long t = watchdog_next_keepalive(wdd);
+
+ if (t > 0)
+ mod_delayed_work(watchdog_wq, &wd_data->work, t);
+ } else {
+ cancel_delayed_work(&wd_data->work);
+ }
+}
+
+static int __watchdog_ping(struct watchdog_device *wdd)
+{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ unsigned long earliest_keepalive = wd_data->last_hw_keepalive +
+ msecs_to_jiffies(wdd->min_hw_heartbeat_ms);
+ int err;
+
+ if (time_is_after_jiffies(earliest_keepalive)) {
+ mod_delayed_work(watchdog_wq, &wd_data->work,
+ earliest_keepalive - jiffies);
+ return 0;
+ }
+
+ wd_data->last_hw_keepalive = jiffies;
+
+ if (wdd->ops->ping)
+ err = wdd->ops->ping(wdd); /* ping the watchdog */
+ else
+ err = wdd->ops->start(wdd); /* restart watchdog */
+
+ watchdog_update_worker(wdd);
+
+ return err;
+}
+
/*
* watchdog_ping: ping the watchdog.
* @wdd: the watchdog device to ping
@@ -85,17 +175,28 @@ static struct watchdog_core_data *old_wd_data;
static int watchdog_ping(struct watchdog_device *wdd)
{
- int err;
+ struct watchdog_core_data *wd_data = wdd->wd_data;
- if (!watchdog_active(wdd))
+ if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
return 0;
- if (wdd->ops->ping)
- err = wdd->ops->ping(wdd); /* ping the watchdog */
- else
- err = wdd->ops->start(wdd); /* restart watchdog */
+ wd_data->last_keepalive = jiffies;
+ return __watchdog_ping(wdd);
+}
- return err;
+static void watchdog_ping_work(struct work_struct *work)
+{
+ struct watchdog_core_data *wd_data;
+ struct watchdog_device *wdd;
+
+ wd_data = container_of(to_delayed_work(work), struct watchdog_core_data,
+ work);
+
+ mutex_lock(&wd_data->lock);
+ wdd = wd_data->wdd;
+ if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)))
+ __watchdog_ping(wdd);
+ mutex_unlock(&wd_data->lock);
}
/*
@@ -111,14 +212,23 @@ static int watchdog_ping(struct watchdog_device *wdd)
static int watchdog_start(struct watchdog_device *wdd)
{
+ struct watchdog_core_data *wd_data = wdd->wd_data;
+ unsigned long started_at;
int err;
if (watchdog_active(wdd))
return 0;
- err = wdd->ops->start(wdd);
- if (err == 0)
+ started_at = jiffies;
+ if (watchdog_hw_running(wdd) && wdd->ops->ping)
+ err = wdd->ops->ping(wdd);
+ else
+ err = wdd->ops->start(wdd);
+ if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
+ wd_data->last_keepalive = started_at;
+ watchdog_update_worker(wdd);
+ }
return err;
}
@@ -137,7 +247,7 @@ static int watchdog_start(struct watchdog_device *wdd)
static int watchdog_stop(struct watchdog_device *wdd)
{
- int err;
+ int err = 0;
if (!watchdog_active(wdd))
return 0;
@@ -148,9 +258,15 @@ static int watchdog_stop(struct watchdog_device *wdd)
return -EBUSY;
}
- err = wdd->ops->stop(wdd);
- if (err == 0)
+ if (wdd->ops->stop)
+ err = wdd->ops->stop(wdd);
+ else
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ if (err == 0) {
clear_bit(WDOG_ACTIVE, &wdd->status);
+ watchdog_update_worker(wdd);
+ }
return err;
}
@@ -183,13 +299,22 @@ static unsigned int watchdog_get_status(struct watchdog_device *wdd)
static int watchdog_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
- if (!wdd->ops->set_timeout || !(wdd->info->options & WDIOF_SETTIMEOUT))
+ int err = 0;
+
+ if (!(wdd->info->options & WDIOF_SETTIMEOUT))
return -EOPNOTSUPP;
if (watchdog_timeout_invalid(wdd, timeout))
return -EINVAL;
- return wdd->ops->set_timeout(wdd, timeout);
+ if (wdd->ops->set_timeout)
+ err = wdd->ops->set_timeout(wdd, timeout);
+ else
+ wdd->timeout = timeout;
+
+ watchdog_update_worker(wdd);
+
+ return err;
}
/*
@@ -538,7 +663,7 @@ static int watchdog_open(struct inode *inode, struct file *file)
* If the /dev/watchdog device is open, we don't want the module
* to be unloaded.
*/
- if (!try_module_get(wdd->ops->owner)) {
+ if (!watchdog_hw_running(wdd) && !try_module_get(wdd->ops->owner)) {
err = -EBUSY;
goto out_clear;
}
@@ -549,7 +674,8 @@ static int watchdog_open(struct inode *inode, struct file *file)
file->private_data = wd_data;
- kref_get(&wd_data->kref);
+ if (!watchdog_hw_running(wdd))
+ kref_get(&wd_data->kref);
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -585,6 +711,7 @@ static int watchdog_release(struct inode *inode, struct file *file)
struct watchdog_core_data *wd_data = file->private_data;
struct watchdog_device *wdd;
int err = -EBUSY;
+ bool running;
mutex_lock(&wd_data->lock);
@@ -609,14 +736,24 @@ static int watchdog_release(struct inode *inode, struct file *file)
watchdog_ping(wdd);
}
+ cancel_delayed_work_sync(&wd_data->work);
+ watchdog_update_worker(wdd);
+
/* make sure that /dev/watchdog can be re-opened */
clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
done:
+ running = wdd && watchdog_hw_running(wdd);
mutex_unlock(&wd_data->lock);
- /* Allow the owner module to be unloaded again */
- module_put(wd_data->cdev.owner);
- kref_put(&wd_data->kref, watchdog_core_data_release);
+ /*
+ * Allow the owner module to be unloaded again unless the watchdog
+ * is still running. If the watchdog is still running, it can not
+ * be stopped, and its driver must not be unloaded.
+ */
+ if (!running) {
+ module_put(wd_data->cdev.owner);
+ kref_put(&wd_data->kref, watchdog_core_data_release);
+ }
return 0;
}
@@ -658,6 +795,11 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
wd_data->wdd = wdd;
wdd->wd_data = wd_data;
+ if (!watchdog_wq)
+ return -ENODEV;
+
+ INIT_DELAYED_WORK(&wd_data->work, watchdog_ping_work);
+
if (wdd->id == 0) {
old_wd_data = wd_data;
watchdog_miscdev.parent = wdd->parent;
@@ -688,8 +830,23 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno)
old_wd_data = NULL;
kref_put(&wd_data->kref, watchdog_core_data_release);
}
+ return err;
}
- return err;
+
+ /* Record time of most recent heartbeat as 'just before now'. */
+ wd_data->last_hw_keepalive = jiffies - 1;
+
+ /*
+ * If the watchdog is running, prevent its driver from being unloaded,
+ * and schedule an immediate ping.
+ */
+ if (watchdog_hw_running(wdd)) {
+ __module_get(wdd->ops->owner);
+ kref_get(&wd_data->kref);
+ queue_delayed_work(watchdog_wq, &wd_data->work, 0);
+ }
+
+ return 0;
}
/*
@@ -715,6 +872,8 @@ static void watchdog_cdev_unregister(struct watchdog_device *wdd)
wdd->wd_data = NULL;
mutex_unlock(&wd_data->lock);
+ cancel_delayed_work_sync(&wd_data->work);
+
kref_put(&wd_data->kref, watchdog_core_data_release);
}
@@ -780,6 +939,13 @@ int __init watchdog_dev_init(void)
{
int err;
+ watchdog_wq = alloc_workqueue("watchdogd",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ if (!watchdog_wq) {
+ pr_err("Failed to create watchdog workqueue\n");
+ return -ENOMEM;
+ }
+
err = class_register(&watchdog_class);
if (err < 0) {
pr_err("couldn't register class\n");
@@ -806,4 +972,5 @@ void __exit watchdog_dev_exit(void)
{
unregister_chrdev_region(watchdog_devt, MAX_DOGS);
class_unregister(&watchdog_class);
+ destroy_workqueue(watchdog_wq);
}
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index 0c7cb7302cf0..cbe373de3659 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -36,7 +36,7 @@
#define ZIIRAVE_STATE_OFF 0x1
#define ZIIRAVE_STATE_ON 0x2
-static char *ziirave_reasons[] = {"power cycle", "triggered", NULL, NULL,
+static char *ziirave_reasons[] = {"power cycle", "hw watchdog", NULL, NULL,
"host request", NULL, "illegal configuration",
"illegal instruction", "illegal trap",
"unknown"};
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 73708acce3ca..979a8317204f 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -37,23 +37,30 @@ config XEN_BALLOON_MEMORY_HOTPLUG
Memory could be hotplugged in following steps:
- 1) dom0: xl mem-max <domU> <maxmem>
+ 1) target domain: ensure that memory auto online policy is in
+ effect by checking /sys/devices/system/memory/auto_online_blocks
+ file (should be 'online').
+
+ 2) control domain: xl mem-max <target-domain> <maxmem>
where <maxmem> is >= requested memory size,
- 2) dom0: xl mem-set <domU> <memory>
+ 3) control domain: xl mem-set <target-domain> <memory>
where <memory> is requested memory size; alternatively memory
could be added by writing proper value to
/sys/devices/system/xen_memory/xen_memory0/target or
- /sys/devices/system/xen_memory/xen_memory0/target_kb on dumU,
+ /sys/devices/system/xen_memory/xen_memory0/target_kb on the
+ target domain.
- 3) domU: for i in /sys/devices/system/memory/memory*/state; do \
- [ "`cat "$i"`" = offline ] && echo online > "$i"; done
+ Alternatively, if memory auto onlining was not requested at step 1
+ the newly added memory can be manually onlined in the target domain
+ by doing the following:
- Memory could be onlined automatically on domU by adding following line to udev rules:
+ for i in /sys/devices/system/memory/memory*/state; do \
+ [ "`cat "$i"`" = offline ] && echo online > "$i"; done
- SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
+ or by adding the following line to udev rules:
- In that case step 3 should be omitted.
+ SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
int "Hotplugged memory limit (in GiB) for a PV guest"
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 12eab503efd1..9781e0dd59d6 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -42,7 +42,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
@@ -257,7 +256,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
return NULL;
res->name = "System RAM";
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
@@ -338,7 +337,16 @@ static enum bp_state reserve_additional_memory(void)
}
#endif
- rc = add_memory_resource(nid, resource);
+ /*
+ * add_memory_resource() will call online_pages() which in its turn
+ * will call xen_online_page() callback causing deadlock if we don't
+ * release balloon_mutex here. Unlocking here is safe because the
+ * callers drop the mutex before trying again.
+ */
+ mutex_unlock(&balloon_mutex);
+ rc = add_memory_resource(nid, resource, memhp_auto_online);
+ mutex_lock(&balloon_mutex);
+
if (rc) {
pr_warn("Cannot add additional memory (%i)\n", rc);
goto err;
@@ -751,7 +759,4 @@ static int __init balloon_init(void)
return 0;
}
-
subsys_initcall(balloon_init);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 7dd46312c180..bdff01095f54 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -9,7 +9,6 @@
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <asm/sync_bitops.h>
#include <asm/xen/hypercall.h>
@@ -38,8 +37,9 @@
/* Find the first set bit in a evtchn mask */
#define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
-static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD],
- cpu_evtchn_mask);
+#define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
+
+static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
static unsigned evtchn_2l_max_channels(void)
{
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 524c22146429..cb7138c97c69 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -26,7 +26,7 @@
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data)
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data)
{
int evtchn = evtchn_from_irq(data->irq);
- irq_move_irq(data);
+ if (!VALID_EVTCHN(evtchn))
+ return;
- if (VALID_EVTCHN(evtchn))
+ if (unlikely(irqd_is_setaffinity_pending(data))) {
+ int masked = test_and_set_mask(evtchn);
+
+ clear_evtchn(evtchn);
+
+ irq_move_masked_irq(data);
+
+ if (!masked)
+ unmask_evtchn(evtchn);
+ } else
clear_evtchn(evtchn);
}
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index eff2b88003d9..9289a17712e2 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -36,7 +36,6 @@
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
diff --git a/drivers/xen/features.c b/drivers/xen/features.c
index 99eda169c779..d7d34fdfc993 100644
--- a/drivers/xen/features.c
+++ b/drivers/xen/features.c
@@ -7,7 +7,7 @@
*/
#include <linux/types.h>
#include <linux/cache.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index effbaf91791f..bb36b1e1dbcc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -33,7 +33,6 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 3454973dc3bb..cf9666680c8c 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -2,6 +2,9 @@
* platform-pci.c
*
* Xen platform PCI device driver
+ *
+ * Authors: ssmith@xensource.com and stefano.stabellini@eu.citrix.com
+ *
* Copyright (c) 2005, Intel Corporation.
* Copyright (c) 2007, XenSource Inc.
* Copyright (c) 2010, Citrix
@@ -24,7 +27,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/pci.h>
#include <xen/platform_pci.h>
@@ -36,10 +39,6 @@
#define DRV_NAME "xen-platform-pci"
-MODULE_AUTHOR("ssmith@xensource.com and stefano.stabellini@eu.citrix.com");
-MODULE_DESCRIPTION("Xen platform PCI device");
-MODULE_LICENSE("GPL");
-
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@@ -101,8 +100,8 @@ static int platform_pci_resume(struct pci_dev *pdev)
return 0;
}
-static int platform_pci_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int platform_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int i, ret;
long ioaddr;
@@ -181,20 +180,17 @@ static struct pci_device_id platform_pci_tbl[] = {
{0,}
};
-MODULE_DEVICE_TABLE(pci, platform_pci_tbl);
-
static struct pci_driver platform_driver = {
.name = DRV_NAME,
- .probe = platform_pci_init,
+ .probe = platform_pci_probe,
.id_table = platform_pci_tbl,
#ifdef CONFIG_PM
.resume_early = platform_pci_resume,
#endif
};
-static int __init platform_pci_module_init(void)
+static int __init platform_pci_init(void)
{
return pci_register_driver(&platform_driver);
}
-
-module_init(platform_pci_module_init);
+device_initcall(platform_pci_init);
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index b5a7342e0ba5..6881b3ceb675 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/err.h>
@@ -50,11 +50,6 @@ static int __init xen_sysfs_type_init(void)
return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
}
-static void xen_sysfs_type_destroy(void)
-{
- sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
-}
-
/* xen version attributes */
static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
{
@@ -111,11 +106,6 @@ static int __init xen_sysfs_version_init(void)
return sysfs_create_group(hypervisor_kobj, &version_group);
}
-static void xen_sysfs_version_destroy(void)
-{
- sysfs_remove_group(hypervisor_kobj, &version_group);
-}
-
/* UUID */
static ssize_t uuid_show_fallback(struct hyp_sysfs_attr *attr, char *buffer)
@@ -157,11 +147,6 @@ static int __init xen_sysfs_uuid_init(void)
return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr);
}
-static void xen_sysfs_uuid_destroy(void)
-{
- sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
-}
-
/* xen compilation attributes */
static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
@@ -235,11 +220,6 @@ static int __init xen_compilation_init(void)
return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
}
-static void xen_compilation_destroy(void)
-{
- sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
-}
-
/* xen properties info */
static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
@@ -366,11 +346,6 @@ static int __init xen_properties_init(void)
return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
}
-static void xen_properties_destroy(void)
-{
- sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
-}
-
#ifdef CONFIG_XEN_HAVE_VPMU
struct pmu_mode {
const char *name;
@@ -484,11 +459,6 @@ static int __init xen_pmu_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_pmu_group);
}
-
-static void xen_pmu_destroy(void)
-{
- sysfs_remove_group(hypervisor_kobj, &xen_pmu_group);
-}
#endif
static int __init hyper_sysfs_init(void)
@@ -517,7 +487,8 @@ static int __init hyper_sysfs_init(void)
if (xen_initial_domain()) {
ret = xen_pmu_init();
if (ret) {
- xen_properties_destroy();
+ sysfs_remove_group(hypervisor_kobj,
+ &xen_properties_group);
goto prop_out;
}
}
@@ -525,31 +496,17 @@ static int __init hyper_sysfs_init(void)
goto out;
prop_out:
- xen_sysfs_uuid_destroy();
+ sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
uuid_out:
- xen_compilation_destroy();
+ sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
comp_out:
- xen_sysfs_version_destroy();
+ sysfs_remove_group(hypervisor_kobj, &version_group);
version_out:
- xen_sysfs_type_destroy();
+ sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
out:
return ret;
}
-
-static void __exit hyper_sysfs_exit(void)
-{
-#ifdef CONFIG_XEN_HAVE_VPMU
- xen_pmu_destroy();
-#endif
- xen_properties_destroy();
- xen_compilation_destroy();
- xen_sysfs_uuid_destroy();
- xen_sysfs_version_destroy();
- xen_sysfs_type_destroy();
-
-}
-module_init(hyper_sysfs_init);
-module_exit(hyper_sysfs_exit);
+device_initcall(hyper_sysfs_init);
static ssize_t hyp_sysfs_show(struct kobject *kobj,
struct attribute *attr,
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 39e7ef8d3957..79865b8901ba 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -33,7 +33,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm_types.h>
+#include <linux/init.h>
#include <linux/capability.h>
#include <xen/xen.h>
@@ -109,14 +111,6 @@ static int __init balloon_init(void)
}
subsys_initcall(balloon_init);
-static void balloon_exit(void)
-{
- /* XXX - release balloon here */
- return;
-}
-
-module_exit(balloon_exit);
-
#define BALLOON_SHOW(name, format, args...) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, \
@@ -250,5 +244,3 @@ static int register_balloon(struct device *dev)
return 0;
}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 9c234209d8b5..8e67336f8ddd 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -10,7 +10,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/pci.h>
#include "pciback.h"
#include "conf_space.h"
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index fb0221434f81..2f19dd7553e6 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <xen/events.h>
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 4843741e703a..c252eb3f0176 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -6,7 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index c46ee189466f..ff932624eaad 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -141,6 +141,8 @@ struct scsiback_tmr {
wait_queue_head_t tmr_wait;
};
+#define VSCSI_DEFAULT_SESSION_TAGS 128
+
struct scsiback_nexus {
/* Pointer to TCM session for I_T Nexus */
struct se_session *tvn_se_sess;
@@ -190,7 +192,6 @@ module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in backend buffer");
-static struct kmem_cache *scsiback_cachep;
static DEFINE_SPINLOCK(free_pages_lock);
static int free_pages_num;
static LIST_HEAD(scsiback_free_pages);
@@ -321,11 +322,11 @@ static void scsiback_free_translation_entry(struct kref *kref)
kfree(entry);
}
-static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
- uint32_t resid, struct vscsibk_pend *pending_req)
+static void scsiback_send_response(struct vscsibk_info *info,
+ char *sense_buffer, int32_t result, uint32_t resid,
+ uint16_t rqid)
{
struct vscsiif_response *ring_res;
- struct vscsibk_info *info = pending_req->info;
int notify;
struct scsi_sense_hdr sshdr;
unsigned long flags;
@@ -337,7 +338,7 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
info->ring.rsp_prod_pvt++;
ring_res->rslt = result;
- ring_res->rqid = pending_req->rqid;
+ ring_res->rqid = rqid;
if (sense_buffer != NULL &&
scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
@@ -357,6 +358,13 @@ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
if (notify)
notify_remote_via_irq(info->irq);
+}
+
+static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+ uint32_t resid, struct vscsibk_pend *pending_req)
+{
+ scsiback_send_response(pending_req->info, sense_buffer, result,
+ resid, pending_req->rqid);
if (pending_req->v2p)
kref_put(&pending_req->v2p->kref,
@@ -380,6 +388,12 @@ static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
scsiback_put(info);
+ /*
+ * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
+ * ahead of scsiback_check_stop_free() -> transport_generic_free_cmd()
+ * final se_cmd->cmd_kref put.
+ */
+ target_put_sess_cmd(&pending_req->se_cmd);
}
static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
@@ -388,16 +402,12 @@ static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
int rc;
- memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
-
- memset(se_cmd, 0, sizeof(*se_cmd));
-
scsiback_get(pending_req->info);
se_cmd->tag = pending_req->rqid;
rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
pending_req->sense_buffer, pending_req->v2p->lun,
pending_req->data_len, 0,
- pending_req->sc_data_direction, 0,
+ pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
pending_req->sgl, pending_req->n_sg,
NULL, 0, NULL, 0);
if (rc < 0) {
@@ -586,45 +596,40 @@ static void scsiback_disconnect(struct vscsibk_info *info)
static void scsiback_device_action(struct vscsibk_pend *pending_req,
enum tcm_tmreq_table act, int tag)
{
- int rc, err = FAILED;
struct scsiback_tpg *tpg = pending_req->v2p->tpg;
+ struct scsiback_nexus *nexus = tpg->tpg_nexus;
struct se_cmd *se_cmd = &pending_req->se_cmd;
struct scsiback_tmr *tmr;
+ u64 unpacked_lun = pending_req->v2p->lun;
+ int rc, err = FAILED;
tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
- if (!tmr)
- goto out;
+ if (!tmr) {
+ target_put_sess_cmd(se_cmd);
+ goto err;
+ }
init_waitqueue_head(&tmr->tmr_wait);
- transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
- tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
- &pending_req->sense_buffer[0]);
-
- rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
- if (rc < 0)
- goto out;
-
- se_cmd->se_tmr_req->ref_task_tag = tag;
-
- if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
- goto out;
+ rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
+ &pending_req->sense_buffer[0],
+ unpacked_lun, tmr, act, GFP_KERNEL,
+ tag, TARGET_SCF_ACK_KREF);
+ if (rc)
+ goto err;
- transport_generic_handle_tmr(se_cmd);
wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
SUCCESS : FAILED;
-out:
- if (tmr) {
- transport_generic_free_cmd(&pending_req->se_cmd, 1);
+ scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+ transport_generic_free_cmd(&pending_req->se_cmd, 1);
+ return;
+err:
+ if (tmr)
kfree(tmr);
- }
-
scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
-
- kmem_cache_free(scsiback_cachep, pending_req);
}
/*
@@ -653,15 +658,53 @@ out:
return entry;
}
-static int prepare_pending_reqs(struct vscsibk_info *info,
- struct vscsiif_request *ring_req,
- struct vscsibk_pend *pending_req)
+static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
+ struct v2p_entry *v2p)
+{
+ struct scsiback_tpg *tpg = v2p->tpg;
+ struct scsiback_nexus *nexus = tpg->tpg_nexus;
+ struct se_session *se_sess = nexus->tvn_se_sess;
+ struct vscsibk_pend *req;
+ int tag, i;
+
+ tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+ if (tag < 0) {
+ pr_err("Unable to obtain tag for vscsiif_request\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
+ memset(req, 0, sizeof(*req));
+ req->se_cmd.map_tag = tag;
+
+ for (i = 0; i < VSCSI_MAX_GRANTS; i++)
+ req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+
+ return req;
+}
+
+static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
+ struct vscsiif_back_ring *ring,
+ struct vscsiif_request *ring_req)
{
+ struct vscsibk_pend *pending_req;
struct v2p_entry *v2p;
struct ids_tuple vir;
- pending_req->rqid = ring_req->rqid;
- pending_req->info = info;
+ /* request range check from frontend */
+ if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+ (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
+ (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
+ (ring_req->sc_data_direction != DMA_NONE)) {
+ pr_debug("invalid parameter data_dir = %d\n",
+ ring_req->sc_data_direction);
+ return ERR_PTR(-EINVAL);
+ }
+ if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+ pr_debug("invalid parameter cmd_len = %d\n",
+ ring_req->cmd_len);
+ return ERR_PTR(-EINVAL);
+ }
vir.chn = ring_req->channel;
vir.tgt = ring_req->id;
@@ -669,33 +712,24 @@ static int prepare_pending_reqs(struct vscsibk_info *info,
v2p = scsiback_do_translation(info, &vir);
if (!v2p) {
- pending_req->v2p = NULL;
pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
- vir.chn, vir.tgt, vir.lun);
- return -ENODEV;
+ vir.chn, vir.tgt, vir.lun);
+ return ERR_PTR(-ENODEV);
}
- pending_req->v2p = v2p;
- /* request range check from frontend */
- pending_req->sc_data_direction = ring_req->sc_data_direction;
- if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
- (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
- (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
- (pending_req->sc_data_direction != DMA_NONE)) {
- pr_debug("invalid parameter data_dir = %d\n",
- pending_req->sc_data_direction);
- return -EINVAL;
+ pending_req = scsiback_get_pend_req(ring, v2p);
+ if (IS_ERR(pending_req)) {
+ kref_put(&v2p->kref, scsiback_free_translation_entry);
+ return ERR_PTR(-ENOMEM);
}
-
+ pending_req->rqid = ring_req->rqid;
+ pending_req->info = info;
+ pending_req->v2p = v2p;
+ pending_req->sc_data_direction = ring_req->sc_data_direction;
pending_req->cmd_len = ring_req->cmd_len;
- if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
- pr_debug("invalid parameter cmd_len = %d\n",
- pending_req->cmd_len);
- return -EINVAL;
- }
memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
- return 0;
+ return pending_req;
}
static int scsiback_do_cmd_fn(struct vscsibk_info *info)
@@ -704,7 +738,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
struct vscsiif_request ring_req;
struct vscsibk_pend *pending_req;
RING_IDX rc, rp;
- int err, more_to_do;
+ int more_to_do;
uint32_t result;
rc = ring->req_cons;
@@ -722,16 +756,13 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
while ((rc != rp)) {
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
break;
- pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
- if (!pending_req)
- return 1;
RING_COPY_REQUEST(ring, rc, &ring_req);
ring->req_cons = ++rc;
- err = prepare_pending_reqs(info, &ring_req, pending_req);
- if (err) {
- switch (err) {
+ pending_req = prepare_pending_reqs(info, ring, &ring_req);
+ if (IS_ERR(pending_req)) {
+ switch (PTR_ERR(pending_req)) {
case -ENODEV:
result = DID_NO_CONNECT;
break;
@@ -739,9 +770,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
result = DRIVER_ERROR;
break;
}
- scsiback_do_resp_with_sense(NULL, result << 24, 0,
- pending_req);
- kmem_cache_free(scsiback_cachep, pending_req);
+ scsiback_send_response(info, NULL, result << 24, 0,
+ ring_req.rqid);
return 1;
}
@@ -750,8 +780,8 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
scsiback_fast_flush_area(pending_req);
scsiback_do_resp_with_sense(NULL,
- DRIVER_ERROR << 24, 0, pending_req);
- kmem_cache_free(scsiback_cachep, pending_req);
+ DRIVER_ERROR << 24, 0, pending_req);
+ transport_generic_free_cmd(&pending_req->se_cmd, 0);
} else {
scsiback_cmd_exec(pending_req);
}
@@ -765,9 +795,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
break;
default:
pr_err_ratelimited("invalid request\n");
- scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
- 0, pending_req);
- kmem_cache_free(scsiback_cachep, pending_req);
+ scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+ pending_req);
+ transport_generic_free_cmd(&pending_req->se_cmd, 0);
break;
}
@@ -1353,24 +1383,20 @@ static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
static int scsiback_check_stop_free(struct se_cmd *se_cmd)
{
- /*
- * Do not release struct se_cmd's containing a valid TMR pointer.
- * These will be released directly in scsiback_device_action()
- * with transport_generic_free_cmd().
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- return 0;
-
- transport_generic_free_cmd(se_cmd, 0);
- return 1;
+ return transport_generic_free_cmd(se_cmd, 0);
}
static void scsiback_release_cmd(struct se_cmd *se_cmd)
{
- struct vscsibk_pend *pending_req = container_of(se_cmd,
- struct vscsibk_pend, se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+
+ if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+ kfree(tmr);
+ }
- kmem_cache_free(scsiback_cachep, pending_req);
+ percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
static int scsiback_shutdown_session(struct se_session *se_sess)
@@ -1494,61 +1520,49 @@ static struct configfs_attribute *scsiback_param_attrs[] = {
NULL,
};
+static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
+ struct se_session *se_sess, void *p)
+{
+ struct scsiback_tpg *tpg = container_of(se_tpg,
+ struct scsiback_tpg, se_tpg);
+
+ tpg->tpg_nexus = p;
+ return 0;
+}
+
static int scsiback_make_nexus(struct scsiback_tpg *tpg,
const char *name)
{
- struct se_portal_group *se_tpg;
- struct se_session *se_sess;
struct scsiback_nexus *tv_nexus;
+ int ret = 0;
mutex_lock(&tpg->tv_tpg_mutex);
if (tpg->tpg_nexus) {
- mutex_unlock(&tpg->tv_tpg_mutex);
pr_debug("tpg->tpg_nexus already exists\n");
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- se_tpg = &tpg->se_tpg;
tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
if (!tv_nexus) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unlock;
}
- /*
- * Initialize the struct se_session pointer
- */
- tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
+
+ tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+ VSCSI_DEFAULT_SESSION_TAGS,
+ sizeof(struct vscsibk_pend),
+ TARGET_PROT_NORMAL, name,
+ tv_nexus, scsiback_alloc_sess_cb);
if (IS_ERR(tv_nexus->tvn_se_sess)) {
- mutex_unlock(&tpg->tv_tpg_mutex);
kfree(tv_nexus);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unlock;
}
- se_sess = tv_nexus->tvn_se_sess;
- /*
- * Since we are running in 'demo mode' this call with generate a
- * struct se_node_acl for the scsiback struct se_portal_group with
- * the SCSI Initiator port name of the passed configfs group 'name'.
- */
- tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
- se_tpg, (unsigned char *)name);
- if (!tv_nexus->tvn_se_sess->se_node_acl) {
- mutex_unlock(&tpg->tv_tpg_mutex);
- pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
- name);
- goto out;
- }
- /* Now register the TCM pvscsi virtual I_T Nexus as active. */
- transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
- tv_nexus->tvn_se_sess, tv_nexus);
- tpg->tpg_nexus = tv_nexus;
+out_unlock:
mutex_unlock(&tpg->tv_tpg_mutex);
- return 0;
-
-out:
- transport_free_session(se_sess);
- kfree(tv_nexus);
- return -ENOMEM;
+ return ret;
}
static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
@@ -1866,16 +1880,6 @@ static struct xenbus_driver scsiback_driver = {
.otherend_changed = scsiback_frontend_changed
};
-static void scsiback_init_pend(void *p)
-{
- struct vscsibk_pend *pend = p;
- int i;
-
- memset(pend, 0, sizeof(*pend));
- for (i = 0; i < VSCSI_MAX_GRANTS; i++)
- pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
-}
-
static int __init scsiback_init(void)
{
int ret;
@@ -1886,14 +1890,9 @@ static int __init scsiback_init(void)
pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
VSCSI_VERSION, utsname()->sysname, utsname()->machine);
- scsiback_cachep = kmem_cache_create("vscsiif_cache",
- sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
- if (!scsiback_cachep)
- return -ENOMEM;
-
ret = xenbus_register_backend(&scsiback_driver);
if (ret)
- goto out_cache_destroy;
+ goto out;
ret = target_register_template(&scsiback_ops);
if (ret)
@@ -1903,8 +1902,7 @@ static int __init scsiback_init(void)
out_unregister_xenbus:
xenbus_unregister_driver(&scsiback_driver);
-out_cache_destroy:
- kmem_cache_destroy(scsiback_cachep);
+out:
pr_err("%s: error %d\n", __func__, ret);
return ret;
}
@@ -1920,7 +1918,6 @@ static void __exit scsiback_exit(void)
}
target_unregister_template(&scsiback_ops);
xenbus_unregister_driver(&scsiback_driver);
- kmem_cache_destroy(scsiback_cachep);
}
module_init(scsiback_init);
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 3b2bffde534f..53a085fca00c 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -71,7 +71,6 @@
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <xen/balloon.h>
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index ee6d9efd7b76..4a41ac9af966 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/capability.h>
#include <xen/xen.h>
@@ -18,8 +18,6 @@
#include "xenbus_comms.h"
-MODULE_LICENSE("GPL");
-
static int xenbus_backend_open(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_ADMIN))
@@ -132,11 +130,4 @@ static int __init xenbus_backend_init(void)
pr_err("Could not register xenbus backend device\n");
return err;
}
-
-static void __exit xenbus_backend_exit(void)
-{
- misc_deregister(&xenbus_backend_dev);
-}
-
-module_init(xenbus_backend_init);
-module_exit(xenbus_backend_exit);
+device_initcall(xenbus_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 912b64edb42b..cacf30d14747 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,7 +55,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include "xenbus_comms.h"
@@ -63,8 +63,6 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-MODULE_LICENSE("GPL");
-
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -626,11 +624,4 @@ static int __init xenbus_init(void)
pr_err("Could not register xenbus frontend device\n");
return err;
}
-
-static void __exit xenbus_exit(void)
-{
- misc_deregister(&xenbus_dev);
-}
-
-module_init(xenbus_init);
-module_exit(xenbus_exit);
+device_initcall(xenbus_init);
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ba804f3d8278..374b12af8812 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -44,7 +44,6 @@
#include <linux/fcntl.h>
#include <linux/kthread.h>
#include <linux/rwsem.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <asm/xen/hypervisor.h>
#include <xen/xenbus.h>
diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c
index a03f261b12d8..c6e2b4a542ea 100644
--- a/drivers/xen/xenfs/xensyms.c
+++ b/drivers/xen/xenfs/xensyms.c
@@ -1,4 +1,3 @@
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 36b210f9b6b2..9282dbf5abdb 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -65,8 +65,7 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
- kobj));
+ struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
struct ConfigDev cd;
/* Construct a ConfigDev */